summaryrefslogtreecommitdiff
path: root/scripts
ModeNameSize
-rw-r--r--.gitignore139logplain
-rw-r--r--Kbuild.include12485logplain
-rw-r--r--Kconfig.include2055logplain
-rwxr-xr-xLindent502logplain
-rw-r--r--Makefile1236logplain
-rw-r--r--Makefile.asm-generic1842logplain
-rw-r--r--Makefile.build17155logplain
-rw-r--r--Makefile.clean2202logplain
-rw-r--r--Makefile.dtbinst933logplain
-rw-r--r--Makefile.extrawarn2806logplain
-rw-r--r--Makefile.gcc-plugins2505logplain
-rw-r--r--Makefile.headersinst2939logplain
-rw-r--r--Makefile.host6047logplain
-rw-r--r--Makefile.kasan1475logplain
-rw-r--r--Makefile.kcov359logplain
-rw-r--r--Makefile.lib15338logplain
-rw-r--r--Makefile.modfinal1829logplain
-rw-r--r--Makefile.modinst1079logplain
-rw-r--r--Makefile.modpost3407logplain
-rw-r--r--Makefile.modsign791logplain
-rw-r--r--Makefile.package6767logplain
-rw-r--r--Makefile.ubsan1035logplain
-rwxr-xr-xadjust_autoksyms.sh2207logplain
-rw-r--r--asn1_compiler.c36169logplain
d---------atomic352logplain
d---------basic110logplain
-rw-r--r--bin2c.c743logplain
-rwxr-xr-xbloat-o-meter3362logplain
-rwxr-xr-xbootgraph.pl5780logplain
-rwxr-xr-xbpf_helpers_doc.py20572logplain
-rwxr-xr-xcc-can-link.sh166logplain
-rwxr-xr-xcheck-sysctl-docs4472logplain
-rwxr-xr-xcheck_extable.sh5049logplain
-rwxr-xr-xcheckincludes.pl1988logplain
-rwxr-xr-xcheckkconfigsymbols.py15867logplain
-rwxr-xr-xcheckpatch.pl207336logplain
-rwxr-xr-xcheckstack.pl5399logplain
-rwxr-xr-xchecksyscalls.sh7443logplain
-rwxr-xr-xcheckversion.pl1943logplain
-rwxr-xr-xclang-version.sh527logplain
-rwxr-xr-xcleanfile3542logplain
-rwxr-xr-xcleanpatch5182logplain
-rwxr-xr-xcoccicheck7419logplain
d---------coccinelle223logplain
-rwxr-xr-xconfig4635logplain
-rw-r--r--const_structs.checkpatch964logplain
-rwxr-xr-xdecode_stacktrace.sh3984logplain
-rwxr-xr-xdecodecode2629logplain
-rwxr-xr-xdepmod.sh1367logplain
-rwxr-xr-xdiffconfig3807logplain
-rwxr-xr-xdocumentation-file-ref-check5687logplain
d---------dtc969logplain
d---------dummy-tools126logplain
-rwxr-xr-xexport_report.pl4605logplain
-rw-r--r--extract-cert.c3541logplain
-rwxr-xr-xextract-ikconfig1734logplain
-rwxr-xr-xextract-module-sig.pl3746logplain
-rwxr-xr-xextract-sys-certs.pl3836logplain
-rwxr-xr-xextract-vmlinux1695logplain
-rwxr-xr-xextract_xc3028.pl45686logplain
-rwxr-xr-xfaddr2line6301logplain
-rwxr-xr-xfile-size.sh86logplain
-rwxr-xr-xfind-unused-docs.sh1304logplain
-rwxr-xr-xgcc-goto.sh511logplain
-rwxr-xr-xgcc-ld711logplain
-rwxr-xr-xgcc-plugin.sh406logplain
d---------gcc-plugins753logplain
-rwxr-xr-xgcc-version.sh588logplain
-rwxr-xr-xgcc-x86_32-has-stack-protector.sh173logplain
-rwxr-xr-xgcc-x86_64-has-stack-protector.sh198logplain
d---------gdb110logplain
-rwxr-xr-xgen_autoksyms.sh1329logplain
-rwxr-xr-xgen_compile_commands.py5695logplain
-rwxr-xr-xgen_ksymdeps.sh399logplain
d---------genksyms256logplain
-rwxr-xr-xget_abi.pl10335logplain
-rwxr-xr-xget_dvb_firmware25128logplain
-rwxr-xr-xget_maintainer.pl68226logplain
-rwxr-xr-xgfp-translate1732logplain
-rwxr-xr-xheaderdep.pl3587logplain
-rwxr-xr-xheaders_check.pl3819logplain
-rwxr-xr-xheaders_install.sh3641logplain
-rw-r--r--insert-sys-cert.c9084logplain
-rwxr-xr-xjobserver-exec2212logplain
-rw-r--r--kallsyms.c17848logplain
d---------kconfig1214logplain
-rwxr-xr-xkernel-doc62833logplain
d---------ksymoops34logplain
-rwxr-xr-xld-version.sh269logplain
-rwxr-xr-xleaking_addresses.pl13105logplain
-rwxr-xr-xlink-vmlinux.sh8566logplain
-rwxr-xr-xmakelst808logplain
-rwxr-xr-xmarkup_oops.pl8110logplain
-rwxr-xr-xmkcompile_h2392logplain
-rwxr-xr-xmkmakefile426logplain
-rwxr-xr-xmksysmap1359logplain
-rwxr-xr-xmkuboot.sh414logplain
d---------mod354logplain
-rw-r--r--module-common.lds901logplain
-rwxr-xr-xmodules-check.sh303logplain
-rwxr-xr-xnamespace.pl13492logplain
-rw-r--r--nsdeps1896logplain
-rwxr-xr-xobjdiff2905logplain
d---------package188logplain
-rwxr-xr-xparse-maintainers.pl4646logplain
-rwxr-xr-xpatch-kernel10185logplain
-rwxr-xr-xprofile2linkerlist.pl414logplain
-rwxr-xr-xprune-kernel708logplain
-rw-r--r--recordmcount.c16902logplain
-rw-r--r--recordmcount.h17549logplain
-rwxr-xr-xrecordmcount.pl19249logplain
d---------selinux182logplain
-rwxr-xr-xsetlocalversion4404logplain
-rwxr-xr-xshow_delta3083logplain
-rw-r--r--sign-file.c9994logplain
-rw-r--r--sorttable.c7665logplain
-rw-r--r--sorttable.h9884logplain
-rw-r--r--spdxcheck-test.sh323logplain
-rwxr-xr-xspdxcheck.py10157logplain
-rw-r--r--spelling.txt30656logplain
-rwxr-xr-xsphinx-pre-install18738logplain
-rwxr-xr-xsplit-man.pl600logplain
-rwxr-xr-xstackdelta1887logplain
-rwxr-xr-xstackusage794logplain
-rw-r--r--subarch.include641logplain
-rwxr-xr-xtags.sh9484logplain
-rwxr-xr-xtools-support-relr.sh518logplain
d---------tracing89logplain
-rw-r--r--unifdef.c35637logplain
-rwxr-xr-xver_linux2767logplain
-rw-r--r--xen-hypercalls.sh386logplain
-rwxr-xr-xxz_wrap.sh562logplain
idth='100%'> -rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml3
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml19
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml8
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml61
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml3
-rw-r--r--Documentation/devicetree/bindings/iommu/qcom,iommu.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/aspeed,video-engine.yaml70
-rw-r--r--Documentation/devicetree/bindings/media/aspeed-video.txt33
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7180.yaml4
-rw-r--r--Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml100
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml10
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sdm670-camss.yaml318
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8550-camss.yaml597
-rw-r--r--Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml158
-rw-r--r--Documentation/devicetree/bindings/media/snps,dw-hdmi-rx.yaml132
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml5
-rw-r--r--Documentation/devicetree/bindings/mfd/motorola-cpcap.txt6
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.yaml2
-rw-r--r--Documentation/devicetree/bindings/misc/atmel-ssc.txt50
-rw-r--r--Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml38
-rw-r--r--Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.yaml3
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel,hsmci.yaml106
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel-hsmci.txt73
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-slot.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml16
-rw-r--r--Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml5
-rw-r--r--Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel,dataflash.yaml55
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-dataflash.txt17
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.yaml7
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.yaml5
-rw-r--r--Documentation/devicetree/bindings/mtd/mxc-nand.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml84
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml18
-rw-r--r--Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml57
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml112
-rw-r--r--Documentation/devicetree/bindings/net/fsl,gianfar.yaml248
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt80
-rw-r--r--Documentation/devicetree/bindings/net/ieee802154/ca8210.txt2
-rw-r--r--Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-dwmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml128
-rw-r--r--Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml86
-rw-r--r--Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml (renamed from Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml)63
-rw-r--r--Documentation/devicetree/bindings/net/rfkill-gpio.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.yaml47
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml126
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml118
-rw-r--r--Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml21
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml13
-rw-r--r--Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml124
-rw-r--r--Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml5
-rw-r--r--Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/rockchip,power-controller.yaml3
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml11
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-nexus-node.yaml65
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/sophgo,sg2042-pwm.yaml58
-rw-r--r--Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml51
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml3
-rw-r--r--Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml4
-rw-r--r--Documentation/devicetree/bindings/riscv/spacemit.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/renesas/renesas.yaml16
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml3
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml100
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml5
-rw-r--r--Documentation/devicetree/bindings/soc/xilinx/xilinx.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml104
-rw-r--r--Documentation/devicetree/bindings/sound/atmel,at91sam9g20ek-wm8731.yaml72
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card2.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/awinic,aw88395.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/dmic-codec.yaml3
-rw-r--r--Documentation/devicetree/bindings/sound/everest,es8328.yaml11
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,audmix.yaml61
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml15
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,imx95-cm7-sof.yaml64
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,sai.yaml55
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,sof-cpu.yaml27
-rw-r--r--Documentation/devicetree/bindings/sound/ics43432.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-card.yaml14
-rw-r--r--Documentation/devicetree/bindings/sound/invensense,ics43432.yaml51
-rw-r--r--Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml9
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,wcd937x-sdw.yaml36
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-spdif.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas2770.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas27xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8904.yaml129
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8960.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,audio-formatter.yaml72
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.yaml65
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,spdif.yaml77
-rw-r--r--Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml24
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml25
-rw-r--r--Documentation/devicetree/bindings/spi/fsl,espi.yaml65
-rw-r--r--Documentation/devicetree/bindings/spi/fsl,spi.yaml74
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-spi.txt62
-rw-r--r--Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml83
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-sg2044-nor.yaml52
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml105
-rw-r--r--Documentation/devicetree/bindings/timer/arm,twd-timer.yaml6
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.yaml44
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,em-sti.yaml10
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,mtu2.yaml14
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml10
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tmu.yaml22
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,tpu.yaml8
-rw-r--r--Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml4
-rw-r--r--Documentation/devicetree/bindings/timer/sifive,clint.yaml24
-rw-r--r--Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml37
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml2
-rw-r--r--Documentation/devicetree/bindings/ufs/renesas,ufs.yaml12
-rw-r--r--Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml105
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml12
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml12
-rw-r--r--Documentation/driver-api/firmware/firmware-usage-guidelines.rst5
-rw-r--r--Documentation/driver-api/generic-counter.rst4
-rw-r--r--Documentation/driver-api/iio/core.rst2
-rw-r--r--Documentation/driver-api/infiniband.rst16
-rw-r--r--Documentation/driver-api/media/drivers/zoran.rst2
-rw-r--r--Documentation/driver-api/media/maintainer-entry-profile.rst2
-rw-r--r--Documentation/driver-api/media/tx-rx.rst26
-rw-r--r--Documentation/driver-api/nvdimm/nvdimm.rst6
-rw-r--r--Documentation/driver-api/pm/devices.rst2
-rw-r--r--Documentation/edac/features.rst103
-rw-r--r--Documentation/edac/index.rst12
-rw-r--r--Documentation/edac/memory_repair.rst121
-rw-r--r--Documentation/edac/scrub.rst266
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt2
-rwxr-xr-xDocumentation/features/list-arch.sh2
-rw-r--r--Documentation/filesystems/9p.rst2
-rw-r--r--Documentation/filesystems/bcachefs/SubmittingPatches.rst47
-rw-r--r--Documentation/filesystems/bcachefs/casefolding.rst90
-rw-r--r--Documentation/filesystems/bcachefs/index.rst20
-rw-r--r--Documentation/filesystems/coda.rst2
-rw-r--r--Documentation/filesystems/debugfs.rst2
-rw-r--r--Documentation/filesystems/f2fs.rst3
-rw-r--r--Documentation/filesystems/fscrypt.rst8
-rw-r--r--Documentation/filesystems/fsverity.rst16
-rw-r--r--Documentation/filesystems/idmappings.rst4
-rw-r--r--Documentation/filesystems/index.rst1
-rw-r--r--Documentation/filesystems/iomap/design.rst9
-rw-r--r--Documentation/filesystems/iomap/operations.rst42
-rw-r--r--Documentation/filesystems/journalling.rst4
-rw-r--r--Documentation/filesystems/locking.rst2
-rw-r--r--Documentation/filesystems/netfs_library.rst2
-rw-r--r--Documentation/filesystems/overlayfs.rst24
-rw-r--r--Documentation/filesystems/porting.rst48
-rw-r--r--Documentation/filesystems/sysv-fs.rst264
-rw-r--r--Documentation/filesystems/vfs.rst23
-rw-r--r--Documentation/filesystems/xfs/xfs-delayed-logging-design.rst2
-rw-r--r--Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst2
-rw-r--r--Documentation/filesystems/xfs/xfs-online-fsck-design.rst4
-rw-r--r--Documentation/hwmon/abituguru-datasheet.rst8
-rw-r--r--Documentation/hwmon/abituguru.rst2
-rw-r--r--Documentation/hwmon/asus_ec_sensors.rst1
-rw-r--r--Documentation/hwmon/cgbc-hwmon.rst63
-rw-r--r--Documentation/hwmon/dell-smm-hwmon.rst14
-rw-r--r--Documentation/hwmon/htu31.rst37
-rw-r--r--Documentation/hwmon/ina233.rst75
-rw-r--r--Documentation/hwmon/index.rst3
-rw-r--r--Documentation/hwmon/lm90.rst43
-rw-r--r--Documentation/hwmon/ltc2978.rst36
-rw-r--r--Documentation/hwmon/nct6683.rst3
-rw-r--r--Documentation/iio/iio_devbuf.rst2
-rw-r--r--Documentation/input/devices/elantech.rst2
-rw-r--r--Documentation/input/input-programming.rst19
-rw-r--r--Documentation/livepatch/module-elf-format.rst13
-rw-r--r--Documentation/mm/split_page_table_lock.rst2
-rw-r--r--Documentation/netlink/genetlink-c.yaml2
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml5
-rw-r--r--Documentation/netlink/genetlink.yaml2
-rw-r--r--Documentation/netlink/specs/conntrack.yaml643
-rw-r--r--Documentation/netlink/specs/devlink.yaml1
-rw-r--r--Documentation/netlink/specs/netdev.yaml23
-rw-r--r--Documentation/netlink/specs/nl80211.yaml2000
-rw-r--r--Documentation/netlink/specs/rt_addr.yaml23
-rw-r--r--Documentation/netlink/specs/rt_link.yaml19
-rw-r--r--Documentation/netlink/specs/rt_rule.yaml15
-rw-r--r--Documentation/networking/batman-adv.rst2
-rw-r--r--Documentation/networking/device_drivers/cable/index.rst18
-rw-r--r--Documentation/networking/device_drivers/cable/sb1000.rst222
-rw-r--r--Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst2
-rw-r--r--Documentation/networking/device_drivers/ethernet/index.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst5
-rw-r--r--Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst202
-rw-r--r--Documentation/networking/device_drivers/index.rst1
-rw-r--r--Documentation/networking/devlink/bnxt.rst2
-rw-r--r--Documentation/networking/devlink/ice.rst11
-rw-r--r--Documentation/networking/devlink/mlx5.rst4
-rw-r--r--Documentation/networking/devlink/sfc.rst16
-rw-r--r--Documentation/networking/devmem.rst5
-rw-r--r--Documentation/networking/ethtool-netlink.rst2
-rw-r--r--Documentation/networking/ip-sysctl.rst17
-rw-r--r--Documentation/networking/j1939.rst675
-rw-r--r--Documentation/networking/kcm.rst2
-rw-r--r--Documentation/networking/mptcp-sysctl.rst23
-rw-r--r--Documentation/networking/napi.rst33
-rw-r--r--Documentation/networking/net_cachelines/inet_connection_sock.rst5
-rw-r--r--Documentation/networking/net_cachelines/net_device.rst2
-rw-r--r--Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst1
-rw-r--r--Documentation/networking/net_cachelines/snmp.rst1
-rw-r--r--Documentation/networking/net_cachelines/tcp_sock.rst1
-rw-r--r--Documentation/networking/netconsole.rst104
-rw-r--r--Documentation/networking/netdevices.rst71
-rw-r--r--Documentation/networking/scaling.rst21
-rw-r--r--Documentation/networking/statistics.rst2
-rw-r--r--Documentation/networking/strparser.rst2
-rw-r--r--Documentation/networking/switchdev.rst2
-rw-r--r--Documentation/networking/timestamping.rst8
-rw-r--r--Documentation/networking/xfrm_device.rst3
-rw-r--r--Documentation/networking/xsk-tx-metadata.rst62
-rw-r--r--Documentation/nvme/nvme-pci-endpoint-target.rst2
-rw-r--r--Documentation/process/5.Posting.rst13
-rw-r--r--Documentation/process/changes.rst4
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst17
-rw-r--r--Documentation/process/kernel-docs.rst11
-rw-r--r--Documentation/process/maintainer-netdev.rst8
-rw-r--r--Documentation/process/submit-checklist.rst12
-rw-r--r--Documentation/process/submitting-patches.rst45
-rw-r--r--Documentation/rust/quick-start.rst2
-rw-r--r--Documentation/rust/testing.rst2
-rw-r--r--Documentation/scheduler/sched-bwc.rst2
-rw-r--r--Documentation/scheduler/sched-debug.rst2
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst2
-rw-r--r--Documentation/scheduler/sched-domains.rst5
-rw-r--r--Documentation/scheduler/sched-ext.rst39
-rw-r--r--Documentation/scheduler/sched-rt-group.rst3
-rw-r--r--Documentation/scheduler/sched-stats.rst2
-rw-r--r--Documentation/scsi/st.rst5
-rw-r--r--Documentation/security/landlock.rst13
-rw-r--r--Documentation/security/tpm/index.rst1
-rw-r--r--Documentation/security/tpm/tpm_ffa_crb.rst65
-rw-r--r--Documentation/sound/alsa-configuration.rst2
-rw-r--r--Documentation/sound/designs/powersave.rst6
-rw-r--r--Documentation/sound/soc/codec-to-codec.rst4
-rw-r--r--Documentation/sound/soc/dpcm.rst21
-rw-r--r--Documentation/sound/soc/machine.rst2
-rw-r--r--Documentation/sphinx/automarkup.py82
-rw-r--r--Documentation/sphinx/cdomain.py7
-rw-r--r--Documentation/sphinx/kernel_abi.py162
-rw-r--r--Documentation/sphinx/kernel_feat.py4
-rwxr-xr-xDocumentation/sphinx/kernel_include.py4
-rw-r--r--Documentation/sphinx/kerneldoc.py19
-rw-r--r--Documentation/sphinx/kernellog.py22
-rw-r--r--Documentation/sphinx/kfigure.py91
-rw-r--r--Documentation/sphinx/load_config.py2
-rwxr-xr-xDocumentation/sphinx/maintainers_include.py4
-rwxr-xr-xDocumentation/sphinx/rstFlatTable.py10
-rw-r--r--Documentation/tools/rv/rv-mon-sched.rst69
-rw-r--r--Documentation/trace/postprocess/decode_msr.py2
-rw-r--r--Documentation/trace/rv/monitor_sched.rst171
-rw-r--r--Documentation/translations/it_IT/process/submit-checklist.rst7
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist105
-rw-r--r--Documentation/translations/ja_JP/disclaimer-ja_JP.rst24
-rw-r--r--Documentation/translations/ja_JP/index.rst2
-rw-r--r--Documentation/translations/ja_JP/process/howto.rst37
-rw-r--r--Documentation/translations/ja_JP/process/submit-checklist.rst163
-rw-r--r--Documentation/translations/sp_SP/process/submit-checklist.rst7
-rw-r--r--Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst2
-rw-r--r--Documentation/translations/zh_CN/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_CN/dev-tools/ubsan.rst33
-rw-r--r--Documentation/translations/zh_CN/disclaimer-zh_CN.rst8
-rw-r--r--Documentation/translations/zh_CN/index.rst8
-rw-r--r--Documentation/translations/zh_CN/mm/balance.rst2
-rw-r--r--Documentation/translations/zh_CN/process/submit-checklist.rst4
-rw-r--r--Documentation/translations/zh_CN/security/credentials.rst479
-rw-r--r--Documentation/translations/zh_CN/security/index.rst8
-rw-r--r--Documentation/translations/zh_CN/security/keys/index.rst22
-rw-r--r--Documentation/translations/zh_CN/security/secrets/index.rst17
-rw-r--r--Documentation/translations/zh_CN/security/self-protection.rst271
-rw-r--r--Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst209
-rw-r--r--Documentation/translations/zh_CN/security/tpm/index.rst20
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm-security.rst151
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst49
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst31
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_tis.rst43
-rw-r--r--Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst51
-rw-r--r--Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst114
-rw-r--r--Documentation/translations/zh_TW/admin-guide/README.rst2
-rw-r--r--Documentation/translations/zh_TW/process/submit-checklist.rst4
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/accelerators/ocxl.rst7
-rw-r--r--Documentation/userspace-api/dma-buf-heaps.rst25
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst6
-rw-r--r--Documentation/userspace-api/landlock.rst72
-rw-r--r--Documentation/userspace-api/media/drivers/uvcvideo.rst64
-rw-r--r--Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst2
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst26
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst14
-rw-r--r--Documentation/userspace-api/media/videodev2.h.rst.exceptions4
-rw-r--r--Documentation/virt/kvm/api.rst22
-rw-r--r--Documentation/virt/kvm/arm/fw-pseudo-registers.rst15
-rw-r--r--Documentation/virt/kvm/arm/hypercalls.rst59
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-its.rst5
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-v3.rst12
-rw-r--r--Documentation/virt/kvm/locking.rst4
-rw-r--r--Documentation/wmi/acpi-interface.rst3
-rw-r--r--Documentation/wmi/driver-development-guide.rst4
-rw-r--r--MAINTAINERS339
-rw-r--r--Makefile10
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/io.h31
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/arm/boot/dts/allwinner/Makefile2
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-v3s-netcube-kumquat.dts276
-rw-r--r--arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi6
-rw-r--r--arch/arm/boot/dts/amlogic/meson8.dtsi18
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b-ec100.dts2
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b-mxq.dts2
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b-odroidc1.dts2
-rw-r--r--arch/arm/boot/dts/amlogic/meson8b.dtsi18
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi5
-rw-r--r--arch/arm/boot/dts/broadcom/bcm2711.dtsi12
-rw-r--r--arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts8
-rw-r--r--arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts8
-rw-r--r--arch/arm/boot/dts/cirrus/ep7211-edb7211.dts2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-netgear-wg302v1.dts40
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp4xx.dtsi2
-rw-r--r--arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi8
-rw-r--r--arch/arm/boot/dts/marvell/armada-388-clearfog-base.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-openrd.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/aks-cdu.dts2
-rw-r--r--arch/arm/boot/dts/microchip/animeo_ip.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91-foxg20.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91-qil_a9260.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91-sam9_l9260.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d27_som1_ek.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d2_ptc_ek.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d2_xplained.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d3_xplained.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d4_ma5d4evk.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d4_xplained.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama5d4ek.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts73
-rw-r--r--arch/arm/boot/dts/microchip/at91-sama7g5ek.dts1
-rw-r--r--arch/arm/boot/dts/microchip/at91-vinco.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91rm9200.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91rm9200ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9260.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9260ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9261ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9263ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9g20ek_common.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9g45.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9m10g45ek.dts4
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9n12ek.dts2
-rw-r--r--arch/arm/boot/dts/microchip/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/ethernut5.dts2
-rw-r--r--arch/arm/boot/dts/microchip/evk-pro3.dts2
-rw-r--r--arch/arm/boot/dts/microchip/mpa1600.dts2
-rw-r--r--arch/arm/boot/dts/microchip/pm9g45.dts4
-rw-r--r--arch/arm/boot/dts/microchip/sam9x60.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sama5d2.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sama5d3.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sama5d3xmb.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sama5d4.dtsi4
-rw-r--r--arch/arm/boot/dts/microchip/sama7d65.dtsi95
-rw-r--r--arch/arm/boot/dts/microchip/tny_a9260.dts2
-rw-r--r--arch/arm/boot/dts/microchip/tny_a9260_common.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/tny_a9263.dts2
-rw-r--r--arch/arm/boot/dts/microchip/tny_a9g20.dts2
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9260.dts2
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9260_common.dtsi11
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9263.dts11
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20-dab-mmx.dtsi10
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20.dts2
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi2
-rw-r--r--arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts4
-rw-r--r--arch/arm/boot/dts/nvidia/tegra114.dtsi34
-rw-r--r--arch/arm/boot/dts/nvidia/tegra124.dtsi40
-rw-r--r--arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/Makefile10
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx31.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx50.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-ppd.dts2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-aster.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-eval-v3.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris-v2.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval-v1.2.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.1.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.2.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora.dts11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis-v1.2.dtsi57
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi12
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-colibri-v1.2.dtsi57
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi25
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi4
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi32
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi11
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-var-som-concerto.dts320
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6ul-var-som.dtsi233
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi7
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts32
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7s.dtsi56
-rw-r--r--arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/mxs/Makefile3
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-btt3-0.dts12
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-btt3-1.dts8
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-btt3-2.dts39
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi313
-rw-r--r--arch/arm/boot/dts/nxp/mxs/imx28-sps1.dts13
-rw-r--r--arch/arm/boot/dts/nxp/vf/vf610-bk4.dts4
-rw-r--r--arch/arm/boot/dts/nxp/vf/vf610-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-c.dts4
-rw-r--r--arch/arm/boot/dts/nxp/vf/vfxxx.dtsi21
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-lager.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-stout.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790.dtsi7
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791.dtsi7
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792-blanche.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792-wheat.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792.dtsi6
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793-gose.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793.dtsi7
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-alt.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-silk.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794.dtsi7
-rw-r--r--arch/arm/boot/dts/renesas/r9a06g032.dtsi20
-rw-r--r--arch/arm/boot/dts/st/Makefile4
-rw-r--r--arch/arm/boot/dts/st/stm32f746-disco.dts18
-rw-r--r--arch/arm/boot/dts/st/stm32f769-disco.dts7
-rw-r--r--arch/arm/boot/dts/st/stm32mp131.dtsi35
-rw-r--r--arch/arm/boot/dts/st/stm32mp133c-prihmb.dts496
-rw-r--r--arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts30
-rw-r--r--arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi292
-rw-r--r--arch/arm/boot/dts/st/stm32mp151.dtsi1
-rw-r--r--arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts376
-rw-r--r--arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen1.dts103
-rw-r--r--arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen2.dts147
-rw-r--r--arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2.dtsi397
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-dk2.dts2
-rw-r--r--arch/arm/boot/dts/ti/davinci/da850-lego-ev3.dts10
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3-evm-processor-common.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4.dtsi8
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts5
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-7000.dtsi33
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-cc108.dts41
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-ebaz4205.dts2
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-microzed.dts10
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-parallella.dts1
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc702.dts85
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc706.dts67
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc770-xm010.dts39
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc770-xm011.dts31
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc770-xm012.dts35
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zc770-xm013.dts41
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zed.dts43
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi8
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zybo-z7.dts10
-rw-r--r--arch/arm/boot/dts/xilinx/zynq-zybo.dts9
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/ep93xx_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/lpc18xx_defconfig1
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig3
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/spitz_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/configs/wpcm450_defconfig1
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/vdso.h2
-rw-r--r--arch/arm/include/asm/vdso/gettimeofday.h7
-rw-r--r--arch/arm/include/asm/vdso/vsyscall.h12
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/kernel/vdso.c34
-rw-r--r--arch/arm/lib/crc-t10dif-glue.c10
-rw-r--r--arch/arm/lib/crc32-glue.c12
-rw-r--r--arch/arm/mach-at91/pm.c47
-rw-r--r--arch/arm/mach-at91/pm.h1
-rw-r--r--arch/arm/mach-at91/pm_data-offsets.c2
-rw-r--r--arch/arm/mach-at91/pm_suspend.S101
-rw-r--r--arch/arm/mach-davinci/Kconfig2
-rw-r--r--arch/arm/mach-davinci/da830.c1
-rw-r--r--arch/arm/mach-imx/common.h2
-rw-r--r--arch/arm/mach-imx/mmdc.c5
-rw-r--r--arch/arm/mach-omap1/Kconfig1
-rw-r--r--arch/arm/mach-s3c/devs.c1
-rw-r--r--arch/arm/mach-s3c/setup-fb-24bpp-s3c64xx.c1
-rw-r--r--arch/arm/mach-shmobile/headsmp.S1
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c76
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/cache-l2x0-pmu.c3
-rw-r--r--arch/arm/mm/fault-armv.c37
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm/vdso/vdso.lds.S4
-rw-r--r--arch/arm64/Kconfig14
-rw-r--r--arch/arm64/Kconfig.platforms9
-rw-r--r--arch/arm64/boot/dts/airoha/en7581-evb.dts44
-rw-r--r--arch/arm64/boot/dts/airoha/en7581.dtsi91
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a100-allwinner-perf1.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a100-cpu-opp.dtsi90
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi8
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts18
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts23
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts2
-rw-r--r--arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts61
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi24
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi8
-rw-r--r--arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi32
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi21
-rw-r--r--arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi24
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi33
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi25
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts6
-rw-r--r--arch/arm64/boot/dts/apple/Makefile16
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi9
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi5
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi5
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi45
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi610
-rw-r--r--arch/arm64/boot/dts/apple/s5l8960x.dtsi30
-rw-r--r--arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi45
-rw-r--r--arch/arm64/boot/dts/apple/s800-0-3-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi757
-rw-r--r--arch/arm64/boot/dts/apple/s800-0-3.dtsi179
-rw-r--r--arch/arm64/boot/dts/apple/s8000.dtsi162
-rw-r--r--arch/arm64/boot/dts/apple/s8001-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi26
-rw-r--r--arch/arm64/boot/dts/apple/s8001-j98a.dts1
-rw-r--r--arch/arm64/boot/dts/apple/s8001-j99a.dts1
-rw-r--r--arch/arm64/boot/dts/apple/s8001-pmgr.dtsi822
-rw-r--r--arch/arm64/boot/dts/apple/s8001.dtsi81
-rw-r--r--arch/arm64/boot/dts/apple/s8003.dtsi63
-rw-r--r--arch/arm64/boot/dts/apple/s800x-6s.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/s800x-ipad5.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/s800x-se.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/spi1-nvram.dtsi39
-rw-r--r--arch/arm64/boot/dts/apple/t600x-common.dtsi7
-rw-r--r--arch/arm64/boot/dts/apple/t600x-die0.dtsi28
-rw-r--r--arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi14
-rw-r--r--arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi2
-rw-r--r--arch/arm64/boot/dts/apple/t600x-j375.dtsi2
-rw-r--r--arch/arm64/boot/dts/apple/t7000-6.dtsi8
-rw-r--r--arch/arm64/boot/dts/apple/t7000-handheld.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/t7000-j42d.dts5
-rw-r--r--arch/arm64/boot/dts/apple/t7000-mini4.dtsi12
-rw-r--r--arch/arm64/boot/dts/apple/t7000-n102.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t7000-pmgr.dtsi641
-rw-r--r--arch/arm64/boot/dts/apple/t7000.dtsi73
-rw-r--r--arch/arm64/boot/dts/apple/t7001-air2.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/t7001-pmgr.dtsi650
-rw-r--r--arch/arm64/boot/dts/apple/t7001.dtsi65
-rw-r--r--arch/arm64/boot/dts/apple/t8010-7.dtsi12
-rw-r--r--arch/arm64/boot/dts/apple/t8010-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/t8010-ipad6.dtsi12
-rw-r--r--arch/arm64/boot/dts/apple/t8010-n112.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8010-pmgr.dtsi772
-rw-r--r--arch/arm64/boot/dts/apple/t8010.dtsi115
-rw-r--r--arch/arm64/boot/dts/apple/t8011-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/t8011-pmgr.dtsi806
-rw-r--r--arch/arm64/boot/dts/apple/t8011-pro2.dtsi8
-rw-r--r--arch/arm64/boot/dts/apple/t8011.dtsi101
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j132.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j137.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j140a.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j140k.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j152f.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j160.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j174.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j185.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j185f.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j213.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j214k.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j215.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j223.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j230k.dts14
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j680.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-j780.dts15
-rw-r--r--arch/arm64/boot/dts/apple/t8012-jxxx.dtsi44
-rw-r--r--arch/arm64/boot/dts/apple/t8012-pmgr.dtsi837
-rw-r--r--arch/arm64/boot/dts/apple/t8012-touchbar.dtsi20
-rw-r--r--arch/arm64/boot/dts/apple/t8012.dtsi281
-rw-r--r--arch/arm64/boot/dts/apple/t8015-8.dtsi4
-rw-r--r--arch/arm64/boot/dts/apple/t8015-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/apple/t8015-pmgr.dtsi931
-rw-r--r--arch/arm64/boot/dts/apple/t8015.dtsi151
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j293.dts58
-rw-r--r--arch/arm64/boot/dts/apple/t8103-jxxx.dtsi2
-rw-r--r--arch/arm64/boot/dts/apple/t8103-pmgr.dtsi18
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi137
-rw-r--r--arch/arm64/boot/dts/apple/t8112-j493.dts54
-rw-r--r--arch/arm64/boot/dts/apple/t8112-jxxx.dtsi2
-rw-r--r--arch/arm64/boot/dts/apple/t8112.dtsi105
-rw-r--r--arch/arm64/boot/dts/arm/Makefile1
-rw-r--r--arch/arm64/boot/dts/arm/corstone1000-fvp.dts26
-rw-r--r--arch/arm64/boot/dts/arm/corstone1000.dtsi3
-rw-r--r--arch/arm64/boot/dts/arm/morello-fvp.dts77
-rw-r--r--arch/arm64/boot/dts/arm/morello-sdp.dts157
-rw-r--r--arch/arm64/boot/dts/arm/morello.dtsi323
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2712.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos8895-dreamlte.dts72
-rw-r--r--arch/arm64/boot/dts/exynos/exynos8895.dtsi956
-rw-r--r--arch/arm64/boot/dts/exynos/exynos990.dtsi92
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov920.dtsi138
-rw-r--r--arch/arm64/boot/dts/exynos/google/Makefile1
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-oriole.dts267
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi294
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101-raven.dts29
-rw-r--r--arch/arm64/boot/dts/exynos/google/gs101.dtsi66
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile21
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts44
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-hsio.dtsi30
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl-evk.dts42
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso237
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-eval-01.dtso72
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts17
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phycore-no-eth.dtso12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phycore-no-spiflash.dtso16
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phycore-rpmsg.dtso58
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi21
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2pro.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-kontron-osm-s.dtsi7
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi64
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-basic.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi131
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revb-hdmi.dts45
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revb-lt6.dts60
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts30
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revc-bd500.dts91
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-skov-revc-tian-g07017.dts81
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi17
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi23
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts25
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi26
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-apalis-v1.1.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts112
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso22
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts32
-rw-r--r--arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi5
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts54
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts6
-rw-r--r--arch/arm64/boot/dts/freescale/imx93.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts1130
-rw-r--r--arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts83
-rw-r--r--arch/arm64/boot/dts/freescale/imx95.dtsi108
-rw-r--r--arch/arm64/boot/dts/freescale/mba8mx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/mba8xx.dtsi31
-rw-r--r--arch/arm64/boot/dts/freescale/s32g2.dtsi141
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-evb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/s32g3.dtsi150
-rw-r--r--arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts9
-rw-r--r--arch/arm64/boot/dts/freescale/s32gxxxa-evb.dtsi222
-rw-r--r--arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi170
-rw-r--r--arch/arm64/boot/dts/freescale/tqma8xx.dtsi12
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-371x.dtsi17
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-372x.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7020.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8020.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8080.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap807.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi33
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi24
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp115.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6359.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi173
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8188.dtsi307
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi184
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi129
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365-evk.dts245
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8365.dtsi336
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8370-genio-510-evk.dts19
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8370.dtsi64
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts1033
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi1223
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts102
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l-8-hd-panel.dtso84
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts71
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi13
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi20
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts10
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts163
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile5
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/condor-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/draak.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ebisu.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi11
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-eagle.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi11
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi88
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi104
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f0.dtsi17
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts69
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi88
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts154
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779h0.dtsi34
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l2-remi-pi.dts339
-rw-r--r--arch/arm64/boot/dts/renesas/r9a08g045.dtsi29
-rw-r--r--arch/arm64/boot/dts/renesas/r9a08g045s33-smarc-pmod1-type-3a.dtso48
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g047.dtsi37
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057.dtsi52
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts15
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057h48-kakip.dts136
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi20
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-switches.h40
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi13
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card-mix+split.dtsi73
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card2-mix+split.dtsi66
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf-simple-audio-card-mix+split.dtsi53
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile63
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-lvds-9904379.dtso130
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-video-demo.dtso190
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts14
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso166
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts26
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3528-pinctrl.dtsi1397
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts133
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3528.dtsi378
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-photonicat.dts588
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x-base.dtsi44
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts50
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts736
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576-rock-4d.dts751
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3576.dtsi229
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-armsom-lm7.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts75
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-base.dtsi110
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts50
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi186
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-firefly-icore-3588q.dtsi443
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts30
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-jaguar-pre-ict-tester.dtso171
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts225
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-mnt-reform2.dts336
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts67
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts56
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-ultra.dts83
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts56
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts83
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts31
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dtsi16
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s-rock-5c.dts42
-rw-r--r--arch/arm64/boot/dts/st/Makefile6
-rw-r--r--arch/arm64/boot/dts/st/stm32mp211.dtsi128
-rw-r--r--arch/arm64/boot/dts/st/stm32mp213.dtsi9
-rw-r--r--arch/arm64/boot/dts/st/stm32mp215.dtsi9
-rw-r--r--arch/arm64/boot/dts/st/stm32mp215f-dk.dts49
-rw-r--r--arch/arm64/boot/dts/st/stm32mp21xc.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp21xf.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp231.dtsi1214
-rw-r--r--arch/arm64/boot/dts/st/stm32mp233.dtsi94
-rw-r--r--arch/arm64/boot/dts/st/stm32mp235.dtsi16
-rw-r--r--arch/arm64/boot/dts/st/stm32mp235f-dk.dts113
-rw-r--r--arch/arm64/boot/dts/st/stm32mp23xc.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp23xf.dtsi8
-rw-r--r--arch/arm64/boot/dts/st/stm32mp257f-dk.dts113
-rw-r--r--arch/arm64/boot/dts/tesla/fsd.dtsi26
-rw-r--r--arch/arm64/boot/dts/ti/Makefile3
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi25
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi19
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62a7-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-main.dtsi26
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi9
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi33
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts19
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtso63
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-sk.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi41
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-evm.dts30
-rw-r--r--arch/arm64/boot/dts/ti/k3-j722s-main.dtsi208
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso7
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/xilinx/Makefile2
-rw-r--r--arch/arm64/boot/dts/xilinx/versal-net-clk.dtsi231
-rw-r--r--arch/arm64/boot/dts/xilinx/versal-net-vn-x-b2197-01-revA.dts116
-rw-r--r--arch/arm64/boot/dts/xilinx/versal-net.dtsi752
-rw-r--r--arch/arm64/boot/dts/xilinx/xlnx-zynqmp-clk.h126
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi17
-rw-r--r--arch/arm64/configs/defconfig14
-rw-r--r--arch/arm64/hyperv/hv_core.c17
-rw-r--r--arch/arm64/hyperv/mshyperv.c6
-rw-r--r--arch/arm64/include/asm/apple_m1_pmu.h1
-rw-r--r--arch/arm64/include/asm/asm-extable.h10
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h4
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cpucaps.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h28
-rw-r--r--arch/arm64/include/asm/cputype.h54
-rw-r--r--arch/arm64/include/asm/el2_setup.h56
-rw-r--r--arch/arm64/include/asm/extable.h4
-rw-r--r--arch/arm64/include/asm/fpsimd.h1
-rw-r--r--arch/arm64/include/asm/hypervisor.h1
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h8
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h37
-rw-r--r--arch/arm64/include/asm/kvm_host.h67
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h2
-rw-r--r--arch/arm64/include/asm/kvm_nested.h1
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h1
-rw-r--r--arch/arm64/include/asm/mem_encrypt.h11
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/mshyperv.h13
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h35
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h36
-rw-r--r--arch/arm64/include/asm/pgtable.h80
-rw-r--r--arch/arm64/include/asm/por.h11
-rw-r--r--arch/arm64/include/asm/spectre.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h45
-rw-r--r--arch/arm64/include/asm/tlbflush.h22
-rw-r--r--arch/arm64/include/asm/vdso.h2
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h38
-rw-r--r--arch/arm64/include/asm/vdso/getrandom.h12
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h16
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h29
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h14
-rw-r--r--arch/arm64/kernel/cpu_errata.c117
-rw-r--r--arch/arm64/kernel/cpufeature.c53
-rw-r--r--arch/arm64/kernel/elfcore.c3
-rw-r--r--arch/arm64/kernel/head.S22
-rw-r--r--arch/arm64/kernel/image-vars.h6
-rw-r--r--arch/arm64/kernel/pi/map_range.c6
-rw-r--r--arch/arm64/kernel/proton-pack.c224
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/kernel/topology.c182
-rw-r--r--arch/arm64/kernel/traps.c10
-rw-r--r--arch/arm64/kernel/vdso.c90
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S7
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kernel/vdso32/vdso.lds.S7
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c7
-rw-r--r--arch/arm64/kvm/arm.c76
-rw-r--r--arch/arm64/kvm/at.c8
-rw-r--r--arch/arm64/kvm/emulate-nested.c24
-rw-r--r--arch/arm64/kvm/handle_exit.c6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h4
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h14
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h2
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S10
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c79
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sysreg-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c16
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c22
-rw-r--r--arch/arm64/kvm/hyp/vhe/sysreg-sr.c28
-rw-r--r--arch/arm64/kvm/hypercalls.c13
-rw-r--r--arch/arm64/kvm/mmu.c22
-rw-r--r--arch/arm64/kvm/nested.c298
-rw-r--r--arch/arm64/kvm/pkvm.c75
-rw-r--r--arch/arm64/kvm/pmu-emul.c194
-rw-r--r--arch/arm64/kvm/pmu.c10
-rw-r--r--arch/arm64/kvm/ptdump.c4
-rw-r--r--arch/arm64/kvm/reset.c3
-rw-r--r--arch/arm64/kvm/sys_regs.c478
-rw-r--r--arch/arm64/kvm/sys_regs.h10
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c8
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c29
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c29
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3-nested.c409
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c46
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c35
-rw-r--r--arch/arm64/kvm/vgic/vgic.c38
-rw-r--r--arch/arm64/kvm/vgic/vgic.h6
-rw-r--r--arch/arm64/lib/clear_user.S25
-rw-r--r--arch/arm64/lib/copy_from_user.S10
-rw-r--r--arch/arm64/lib/copy_template.S10
-rw-r--r--arch/arm64/lib/copy_to_user.S10
-rw-r--r--arch/arm64/lib/crc-t10dif-glue.c10
-rw-r--r--arch/arm64/lib/crc32-glue.c10
-rw-r--r--arch/arm64/mm/extable.c40
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c20
-rw-r--r--arch/arm64/mm/kasan_init.c6
-rw-r--r--arch/arm64/mm/mmu.c15
-rw-r--r--arch/arm64/mm/physaddr.c2
-rw-r--r--arch/arm64/mm/ptdump.c4
-rw-r--r--arch/arm64/tools/cpucaps2
-rwxr-xr-xarch/arm64/tools/gen-sysreg.awk31
-rw-r--r--arch/arm64/tools/syscall_32.tbl1
-rw-r--r--arch/arm64/tools/sysreg153
-rw-r--r--arch/csky/abiv1/alignment.c2
-rw-r--r--arch/csky/kernel/vdso/Makefile2
-rw-r--r--arch/hexagon/configs/comet_defconfig1
-rw-r--r--arch/loongarch/Kconfig5
-rw-r--r--arch/loongarch/Makefile6
-rw-r--r--arch/loongarch/boot/dts/loongson-2k1000-ref.dts5
-rw-r--r--arch/loongarch/configs/loongson3_defconfig1
-rw-r--r--arch/loongarch/include/asm/kvm_host.h7
-rw-r--r--arch/loongarch/include/asm/vdso.h1
-rw-r--r--arch/loongarch/include/asm/vdso/arch_data.h25
-rw-r--r--arch/loongarch/include/asm/vdso/getrandom.h5
-rw-r--r--arch/loongarch/include/asm/vdso/gettimeofday.h14
-rw-r--r--arch/loongarch/include/asm/vdso/vdso.h38
-rw-r--r--arch/loongarch/include/asm/vdso/vsyscall.h17
-rw-r--r--arch/loongarch/kernel/acpi.c12
-rw-r--r--arch/loongarch/kernel/asm-offsets.c3
-rw-r--r--arch/loongarch/kernel/machine_kexec.c4
-rw-r--r--arch/loongarch/kernel/setup.c3
-rw-r--r--arch/loongarch/kernel/smp.c47
-rw-r--r--arch/loongarch/kernel/vdso.c92
-rw-r--r--arch/loongarch/kvm/Kconfig1
-rw-r--r--arch/loongarch/kvm/Makefile2
-rw-r--r--arch/loongarch/kvm/exit.c6
-rw-r--r--arch/loongarch/kvm/main.c10
-rw-r--r--arch/loongarch/kvm/switch.S12
-rw-r--r--arch/loongarch/kvm/vcpu.c43
-rw-r--r--arch/loongarch/kvm/vm.c6
-rw-r--r--arch/loongarch/lib/crc32-loongarch.c6
-rw-r--r--arch/loongarch/mm/mmap.c6
-rw-r--r--arch/loongarch/vdso/Makefile2
-rw-r--r--arch/loongarch/vdso/vdso.lds.S8
-rw-r--r--arch/loongarch/vdso/vgetcpu.c12
-rw-r--r--arch/m68k/Kconfig3
-rw-r--r--arch/m68k/configs/amiga_defconfig3
-rw-r--r--arch/m68k/configs/apollo_defconfig3
-rw-r--r--arch/m68k/configs/atari_defconfig3
-rw-r--r--arch/m68k/configs/bvme6000_defconfig3
-rw-r--r--arch/m68k/configs/hp300_defconfig3
-rw-r--r--arch/m68k/configs/mac_defconfig3
-rw-r--r--arch/m68k/configs/multi_defconfig3
-rw-r--r--arch/m68k/configs/mvme147_defconfig3
-rw-r--r--arch/m68k/configs/mvme16x_defconfig3
-rw-r--r--arch/m68k/configs/q40_defconfig3
-rw-r--r--arch/m68k/configs/sun3_defconfig3
-rw-r--r--arch/m68k/configs/sun3x_defconfig3
-rw-r--r--arch/m68k/include/asm/io_no.h4
-rw-r--r--arch/m68k/include/asm/processor.h14
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h6
-rw-r--r--arch/m68k/kernel/setup_mm.c6
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/kernel/uboot.c1
-rw-r--r--arch/m68k/sun3/mmu_emu.c7
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/mips/Kconfig3
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/cobalt_defconfig1
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/decstation_64_defconfig1
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig1
-rw-r--r--arch/mips/configs/fuloong2e_defconfig1
-rw-r--r--arch/mips/configs/ip32_defconfig1
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/configs/rt305x_defconfig1
-rw-r--r--arch/mips/configs/xway_defconfig1
-rw-r--r--arch/mips/include/asm/io.h25
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/spaces.h5
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h2
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h9
-rw-r--r--arch/mips/include/asm/vdso/vdso.h19
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h14
-rw-r--r--arch/mips/kernel/ptrace.c20
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl1
-rw-r--r--arch/mips/kernel/vdso.c47
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/lib/crc32-mips.c15
-rw-r--r--arch/mips/lib/iomap-pci.c10
-rw-r--r--arch/mips/loongson64/init.c4
-rw-r--r--arch/mips/vdso/Makefile2
-rw-r--r--arch/mips/vdso/vdso.lds.S5
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig2
-rw-r--r--arch/parisc/include/asm/io.h36
-rw-r--r--arch/parisc/include/asm/vdso.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h12
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/parisc/kernel/vdso32/Makefile2
-rw-r--r--arch/parisc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/Kconfig.debug7
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts107
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig1
-rw-r--r--arch/powerpc/configs/adder875_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig2
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/skiroot_defconfig1
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig1
-rw-r--r--arch/powerpc/crypto/Makefile1
-rw-r--r--arch/powerpc/include/asm/cell-pmu.h56
-rw-r--r--arch/powerpc/include/asm/cell-regs.h296
-rw-r--r--arch/powerpc/include/asm/dcr-generic.h36
-rw-r--r--arch/powerpc/include/asm/dcr-mmio.h44
-rw-r--r--arch/powerpc/include/asm/dcr.h32
-rw-r--r--arch/powerpc/include/asm/hvcall.h34
-rw-r--r--arch/powerpc/include/asm/io-defs.h70
-rw-r--r--arch/powerpc/include/asm/io-workarounds.h55
-rw-r--r--arch/powerpc/include/asm/io.h459
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/mmzone.h1
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h21
-rw-r--r--arch/powerpc/include/asm/pmi.h53
-rw-r--r--arch/powerpc/include/asm/prom.h2
-rw-r--r--arch/powerpc/include/asm/spu_priv1.h2
-rw-r--r--arch/powerpc/include/asm/static_call.h2
-rw-r--r--arch/powerpc/include/asm/time.h3
-rw-r--r--arch/powerpc/include/asm/topology.h1
-rw-r--r--arch/powerpc/include/asm/vdso.h1
-rw-r--r--arch/powerpc/include/asm/vdso/arch_data.h37
-rw-r--r--arch/powerpc/include/asm/vdso/getrandom.h11
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h29
-rw-r--r--arch/powerpc/include/asm/vdso/vsyscall.h13
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h44
-rw-r--r--arch/powerpc/include/asm/xics.h1
-rw-r--r--arch/powerpc/include/asm/xmon.h2
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S55
-rw-r--r--arch/powerpc/kernel/fadump.c23
-rw-r--r--arch/powerpc/kernel/io-workarounds.c197
-rw-r--r--arch/powerpc/kernel/io.c58
-rw-r--r--arch/powerpc/kernel/of_platform.c102
-rw-r--r--arch/powerpc/kernel/prom_init.c6
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c2
-rw-r--r--arch/powerpc/kernel/rtas.c75
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/static_call.c58
-rw-r--r--arch/powerpc/kernel/switch.S1
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/kernel/time.c67
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/kernel/vdso.c115
-rw-r--r--arch/powerpc/kernel/vdso/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S2
-rw-r--r--arch/powerpc/kernel/vdso/datapage.S4
-rw-r--r--arch/powerpc/kernel/vdso/gettimeofday.S4
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S4
-rw-r--r--arch/powerpc/kernel/vdso/vgettimeofday.c14
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/kernel/watchdog.c3
-rw-r--r--arch/powerpc/kexec/relocate_32.S7
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/powerpc.c9
-rw-r--r--arch/powerpc/lib/crc-t10dif-glue.c6
-rw-r--r--arch/powerpc/lib/crc32-glue.c10
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c12
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c4
-rw-r--r--arch/powerpc/mm/ioremap.c9
-rw-r--r--arch/powerpc/mm/ioremap_64.c2
-rw-r--r--arch/powerpc/mm/mem.c22
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c28
-rw-r--r--arch/powerpc/perf/isa207-common.c18
-rw-r--r--arch/powerpc/perf/vpa-pmu.c1
-rw-r--r--arch/powerpc/platforms/44x/uic.c2
-rw-r--r--arch/powerpc/platforms/Kconfig10
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype13
-rw-r--r--arch/powerpc/platforms/cell/Kconfig80
-rw-r--r--arch/powerpc/platforms/cell/Makefile23
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c481
-rw-r--r--arch/powerpc/platforms/cell/cbe_powerbutton.c106
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c298
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c387
-rw-r--r--arch/powerpc/platforms/cell/cell.h15
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c134
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c390
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h90
-rw-r--r--arch/powerpc/platforms/cell/iommu.c1060
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c125
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h26
-rw-r--r--arch/powerpc/platforms/cell/pmu.c412
-rw-r--r--arch/powerpc/platforms/cell/ras.c352
-rw-r--r--arch/powerpc/platforms/cell/ras.h13
-rw-r--r--arch/powerpc/platforms/cell/setup.c274
-rw-r--r--arch/powerpc/platforms/cell/smp.c162
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c170
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c344
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c530
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c167
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.h14
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig3
-rw-r--r--arch/powerpc/platforms/microwatt/Makefile1
-rw-r--r--arch/powerpc/platforms/microwatt/microwatt.h1
-rw-r--r--arch/powerpc/platforms/microwatt/setup.c18
-rw-r--r--arch/powerpc/platforms/microwatt/smp.c80
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig1
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c8
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/htmdump.c121
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c139
-rw-r--r--arch/powerpc/platforms/pseries/papr-vpd.c7
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/dcr.c182
-rw-r--r--arch/powerpc/sysdev/ipic.c3
-rw-r--r--arch/powerpc/sysdev/pmi.c267
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c21
-rw-r--r--arch/powerpc/xmon/Makefile5
-rw-r--r--arch/powerpc/xmon/spu-dis.c237
-rw-r--r--arch/powerpc/xmon/spu-insns.h399
-rw-r--r--arch/powerpc/xmon/spu-opc.c34
-rw-r--r--arch/powerpc/xmon/spu.h115
-rw-r--r--arch/powerpc/xmon/xmon.c279
-rw-r--r--arch/riscv/Kconfig6
-rw-r--r--arch/riscv/Kconfig.socs1
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi5
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi5
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi5
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts36
-rw-r--r--arch/riscv/boot/dts/sophgo/sg2042.dtsi19
-rw-r--r--arch/riscv/boot/dts/spacemit/Makefile1
-rw-r--r--arch/riscv/boot/dts/spacemit/k1-milkv-jupiter.dts27
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-common.dtsi8
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts34
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts5
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-pinfunc.h2
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110.dtsi4
-rw-r--r--arch/riscv/include/asm/bitops.h4
-rw-r--r--arch/riscv/include/asm/io.h2
-rw-r--r--arch/riscv/include/asm/kvm_host.h2
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/arch_data.h (renamed from arch/riscv/include/asm/vdso/time_data.h)8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h14
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h9
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c3
-rw-r--r--arch/riscv/kernel/vdso.c90
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c6
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S7
-rw-r--r--arch/riscv/kvm/main.c4
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c2
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c1
-rw-r--r--arch/riscv/kvm/vcpu_timer.c7
-rw-r--r--arch/riscv/lib/Makefile5
-rw-r--r--arch/riscv/lib/crc-clmul-consts.h122
-rw-r--r--arch/riscv/lib/crc-clmul-template.h265
-rw-r--r--arch/riscv/lib/crc-clmul.h23
-rw-r--r--arch/riscv/lib/crc-t10dif.c24
-rw-r--r--arch/riscv/lib/crc16_msb.c18
-rw-r--r--arch/riscv/lib/crc32-riscv.c311
-rw-r--r--arch/riscv/lib/crc32.c53
-rw-r--r--arch/riscv/lib/crc32_lsb.c18
-rw-r--r--arch/riscv/lib/crc32_msb.c18
-rw-r--r--arch/riscv/lib/crc64.c34
-rw-r--r--arch/riscv/lib/crc64_lsb.c18
-rw-r--r--arch/riscv/lib/crc64_msb.c18
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/debug_defconfig3
-rw-r--r--arch/s390/configs/defconfig3
-rw-r--r--arch/s390/include/asm/gmap.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/pci.h4
-rw-r--r--arch/s390/include/asm/pci_clp.h4
-rw-r--r--arch/s390/include/asm/uv.h2
-rw-r--r--arch/s390/include/asm/vdso.h4
-rw-r--r--arch/s390/include/asm/vdso/getrandom.h12
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h15
-rw-r--r--arch/s390/include/asm/vdso/vsyscall.h20
-rw-r--r--arch/s390/kernel/crash_dump.c62
-rw-r--r--arch/s390/kernel/dumpstack.c7
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c3
-rw-r--r--arch/s390/kernel/perf_pai_ext.c3
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kernel/time.c11
-rw-r--r--arch/s390/kernel/traps.c6
-rw-r--r--arch/s390/kernel/uv.c136
-rw-r--r--arch/s390/kernel/vdso.c97
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S7
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S8
-rw-r--r--arch/s390/kvm/gmap.c103
-rw-r--r--arch/s390/kvm/interrupt.c3
-rw-r--r--arch/s390/kvm/kvm-s390.c29
-rw-r--r--arch/s390/kvm/pci.c17
-rw-r--r--arch/s390/lib/crc32-glue.c2
-rw-r--r--arch/s390/mm/gmap.c28
-rw-r--r--arch/s390/pci/pci.c35
-rw-r--r--arch/s390/pci/pci_bus.c25
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_sysfs.c11
-rw-r--r--arch/sh/configs/se7206_defconfig2
-rw-r--r--arch/sh/configs/sh2007_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig1
-rw-r--r--arch/sh/include/asm/io.h30
-rw-r--r--arch/sh/kernel/Makefile3
-rw-r--r--arch/sh/kernel/iomap.c162
-rw-r--r--arch/sh/kernel/ioport.c5
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c21
-rw-r--r--arch/sh/lib/io.c4
-rw-r--r--arch/sparc/configs/sparc32_defconfig1
-rw-r--r--arch/sparc/configs/sparc64_defconfig1
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/lib/crc32_glue.c10
-rw-r--r--arch/sparc/vdso/Makefile7
-rw-r--r--arch/sparc/vdso/checkundef.sh10
-rw-r--r--arch/um/kernel/um_arch.c11
-rw-r--r--arch/x86/Kbuild4
-rw-r--r--arch/x86/Kconfig214
-rw-r--r--arch/x86/Kconfig.cpu105
-rw-r--r--arch/x86/Kconfig.cpufeatures201
-rw-r--r--arch/x86/Makefile55
-rw-r--r--arch/x86/Makefile_32.cpu5
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/Makefile7
-rw-r--r--arch/x86/boot/boot.h4
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/head_64.S103
-rw-r--r--arch/x86/boot/compressed/la57toggle.S112
-rw-r--r--arch/x86/boot/compressed/misc.c14
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c2
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S2
-rw-r--r--arch/x86/boot/cpucheck.c3
-rw-r--r--arch/x86/boot/cpuflags.c27
-rw-r--r--arch/x86/boot/cpuflags.h7
-rw-r--r--arch/x86/boot/genimage.sh5
-rw-r--r--arch/x86/boot/mkcpustr.c3
-rw-r--r--arch/x86/boot/setup.ld2
-rw-r--r--arch/x86/boot/tools/build.c247
-rw-r--r--arch/x86/coco/sev/core.c28
-rw-r--r--arch/x86/coco/tdx/tdx.c4
-rw-r--r--arch/x86/configs/xen.config2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c22
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S7
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S1
-rw-r--r--arch/x86/crypto/camellia-x86_64-asm_64.S9
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S7
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S5
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S5
-rw-r--r--arch/x86/entry/Makefile8
-rw-r--r--arch/x86/entry/calling.h1
-rw-r--r--arch/x86/entry/common.c524
-rw-r--r--arch/x86/entry/entry.S6
-rw-r--r--arch/x86/entry/entry_32.S4
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/entry/entry_64_compat.S4
-rw-r--r--arch/x86/entry/entry_64_fred.S1
-rw-r--r--arch/x86/entry/syscall_32.c332
-rw-r--r--arch/x86/entry/syscall_64.c111
-rw-r--r--arch/x86/entry/syscall_x32.c25
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl3
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/entry/vdso/Makefile10
-rwxr-xr-xarch/x86/entry/vdso/checkundef.sh10
-rw-r--r--arch/x86/entry/vdso/extable.h2
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S10
-rw-r--r--arch/x86/entry/vdso/vdso2c.c21
-rw-r--r--arch/x86/entry/vdso/vdso2c.h20
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c16
-rw-r--r--arch/x86/entry/vdso/vma.c128
-rw-r--r--arch/x86/events/amd/brs.c3
-rw-r--r--arch/x86/events/amd/ibs.c282
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/amd/lbr.c3
-rw-r--r--arch/x86/events/core.c29
-rw-r--r--arch/x86/events/intel/bts.c45
-rw-r--r--arch/x86/events/intel/core.c160
-rw-r--r--arch/x86/events/intel/ds.c204
-rw-r--r--arch/x86/events/intel/lbr.c73
-rw-r--r--arch/x86/events/intel/p4.c9
-rw-r--r--arch/x86/events/intel/p6.c26
-rw-r--r--arch/x86/events/intel/uncore.c3
-rw-r--r--arch/x86/events/perf_event.h61
-rw-r--r--arch/x86/events/perf_event_flags.h2
-rw-r--r--arch/x86/events/rapl.c10
-rw-r--r--arch/x86/hyperv/Makefile2
-rw-r--r--arch/x86/hyperv/hv_apic.c5
-rw-r--r--arch/x86/hyperv/hv_init.c35
-rw-r--r--arch/x86/hyperv/hv_vtl.c35
-rw-r--r--arch/x86/hyperv/irqdomain.c6
-rw-r--r--arch/x86/hyperv/ivm.c5
-rw-r--r--arch/x86/hyperv/mmu.c5
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/alternative.h47
-rw-r--r--arch/x86/include/asm/amd-ibs.h3
-rw-r--r--arch/x86/include/asm/amd_nb.h1
-rw-r--r--arch/x86/include/asm/amd_node.h24
-rw-r--r--arch/x86/include/asm/apic.h4
-rw-r--r--arch/x86/include/asm/arch_hweight.h14
-rw-r--r--arch/x86/include/asm/asm-prototypes.h4
-rw-r--r--arch/x86/include/asm/asm.h21
-rw-r--r--arch/x86/include/asm/atomic.h14
-rw-r--r--arch/x86/include/asm/atomic64_32.h98
-rw-r--r--arch/x86/include/asm/atomic64_64.h14
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/bitops.h14
-rw-r--r--arch/x86/include/asm/boot.h2
-rw-r--r--arch/x86/include/asm/bug.h8
-rw-r--r--arch/x86/include/asm/cfi.h26
-rw-r--r--arch/x86/include/asm/cmpxchg.h28
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h42
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h4
-rw-r--r--arch/x86/include/asm/coco.h10
-rw-r--r--arch/x86/include/asm/cpu.h15
-rw-r--r--arch/x86/include/asm/cpu_device_id.h130
-rw-r--r--arch/x86/include/asm/cpufeature.h81
-rw-r--r--arch/x86/include/asm/cpufeatures.h17
-rw-r--r--arch/x86/include/asm/cpuid.h216
-rw-r--r--arch/x86/include/asm/cpuid/api.h210
-rw-r--r--arch/x86/include/asm/cpuid/types.h32
-rw-r--r--arch/x86/include/asm/cpumask.h4
-rw-r--r--arch/x86/include/asm/current.h40
-rw-r--r--arch/x86/include/asm/desc.h1
-rw-r--r--arch/x86/include/asm/desc_defs.h4
-rw-r--r--arch/x86/include/asm/disabled-features.h161
-rw-r--r--arch/x86/include/asm/dwarf2.h2
-rw-r--r--arch/x86/include/asm/e820/api.h1
-rw-r--r--arch/x86/include/asm/e820/types.h9
-rw-r--r--arch/x86/include/asm/edac.h2
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/fpu/api.h17
-rw-r--r--arch/x86/include/asm/frame.h10
-rw-r--r--arch/x86/include/asm/fred.h4
-rw-r--r--arch/x86/include/asm/fsgsbase.h4
-rw-r--r--arch/x86/include/asm/ftrace.h24
-rw-r--r--arch/x86/include/asm/hardirq.h4
-rw-r--r--arch/x86/include/asm/hw_irq.h4
-rw-r--r--arch/x86/include/asm/ibt.h25
-rw-r--r--arch/x86/include/asm/idtentry.h6
-rw-r--r--arch/x86/include/asm/init.h4
-rw-r--r--arch/x86/include/asm/inst.h2
-rw-r--r--arch/x86/include/asm/intel-family.h50
-rw-r--r--arch/x86/include/asm/io.h3
-rw-r--r--arch/x86/include/asm/irq_stack.h12
-rw-r--r--arch/x86/include/asm/irqflags.h10
-rw-r--r--arch/x86/include/asm/jump_label.h4
-rw-r--r--arch/x86/include/asm/kasan.h2
-rw-r--r--arch/x86/include/asm/kexec.h62
-rw-r--r--arch/x86/include/asm/kvm_host.h22
-rw-r--r--arch/x86/include/asm/linkage.h24
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mem_encrypt.h4
-rw-r--r--arch/x86/include/asm/mmu.h12
-rw-r--r--arch/x86/include/asm/mmu_context.h10
-rw-r--r--arch/x86/include/asm/mshyperv.h26
-rw-r--r--arch/x86/include/asm/msr-index.h21
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/nmi.h2
-rw-r--r--arch/x86/include/asm/nops.h2
-rw-r--r--arch/x86/include/asm/nospec-branch.h53
-rw-r--r--arch/x86/include/asm/orc_types.h4
-rw-r--r--arch/x86/include/asm/page.h4
-rw-r--r--arch/x86/include/asm/page_32.h4
-rw-r--r--arch/x86/include/asm/page_32_types.h8
-rw-r--r--arch/x86/include/asm/page_64.h9
-rw-r--r--arch/x86/include/asm/page_64_types.h2
-rw-r--r--arch/x86/include/asm/page_types.h4
-rw-r--r--arch/x86/include/asm/paravirt.h19
-rw-r--r--arch/x86/include/asm/paravirt_types.h20
-rw-r--r--arch/x86/include/asm/percpu.h153
-rw-r--r--arch/x86/include/asm/perf_event.h20
-rw-r--r--arch/x86/include/asm/pgalloc.h5
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h12
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h4
-rw-r--r--arch/x86/include/asm/pgtable-invert.h4
-rw-r--r--arch/x86/include/asm/pgtable.h12
-rw-r--r--arch/x86/include/asm/pgtable_32.h4
-rw-r--r--arch/x86/include/asm/pgtable_32_areas.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h6
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h4
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/preempt.h25
-rw-r--r--arch/x86/include/asm/processor.h67
-rw-r--r--arch/x86/include/asm/prom.h4
-rw-r--r--arch/x86/include/asm/proto.h3
-rw-r--r--arch/x86/include/asm/pti.h4
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/purgatory.h4
-rw-r--r--arch/x86/include/asm/pvclock-abi.h4
-rw-r--r--arch/x86/include/asm/realmode.h4
-rw-r--r--arch/x86/include/asm/required-features.h105
-rw-r--r--arch/x86/include/asm/resctrl.h36
-rw-r--r--arch/x86/include/asm/rmwcc.h2
-rw-r--r--arch/x86/include/asm/runtime-const.h13
-rw-r--r--arch/x86/include/asm/segment.h8
-rw-r--r--arch/x86/include/asm/set_memory.h2
-rw-r--r--arch/x86/include/asm/setup.h7
-rw-r--r--arch/x86/include/asm/setup_data.h4
-rw-r--r--arch/x86/include/asm/sev-common.h12
-rw-r--r--arch/x86/include/asm/sev.h6
-rw-r--r--arch/x86/include/asm/shared/tdx.h4
-rw-r--r--arch/x86/include/asm/shstk.h4
-rw-r--r--arch/x86/include/asm/signal.h8
-rw-r--r--arch/x86/include/asm/smap.h6
-rw-r--r--arch/x86/include/asm/smp.h24
-rw-r--r--arch/x86/include/asm/special_insns.h22
-rw-r--r--arch/x86/include/asm/sta2x11.h13
-rw-r--r--arch/x86/include/asm/stackprotector.h36
-rw-r--r--arch/x86/include/asm/string_64.h2
-rw-r--r--arch/x86/include/asm/svm.h5
-rw-r--r--arch/x86/include/asm/sync_bitops.h12
-rw-r--r--arch/x86/include/asm/tdx.h4
-rw-r--r--arch/x86/include/asm/thread_info.h12
-rw-r--r--arch/x86/include/asm/tlb.h138
-rw-r--r--arch/x86/include/asm/tlbbatch.h5
-rw-r--r--arch/x86/include/asm/tlbflush.h72
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/unwind_hints.h4
-rw-r--r--arch/x86/include/asm/vdso.h6
-rw-r--r--arch/x86/include/asm/vdso/getrandom.h14
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h41
-rw-r--r--arch/x86/include/asm/vdso/processor.h4
-rw-r--r--arch/x86/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/x86/include/asm/vermagic.h4
-rw-r--r--arch/x86/include/asm/vmx.h28
-rw-r--r--arch/x86/include/asm/xen/interface.h10
-rw-r--r--arch/x86/include/asm/xen/interface_32.h4
-rw-r--r--arch/x86/include/asm/xen/interface_64.h4
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h4
-rw-r--r--arch/x86/include/uapi/asm/e820.h4
-rw-r--r--arch/x86/include/uapi/asm/kvm.h3
-rw-r--r--arch/x86/include/uapi/asm/ldt.h4
-rw-r--r--arch/x86/include/uapi/asm/msr.h4
-rw-r--r--arch/x86/include/uapi/asm/ptrace-abi.h6
-rw-r--r--arch/x86/include/uapi/asm/ptrace.h4
-rw-r--r--arch/x86/include/uapi/asm/setup_data.h4
-rw-r--r--arch/x86/include/uapi/asm/signal.h8
-rw-r--r--arch/x86/include/uapi/asm/svm.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/cppc.c4
-rw-r--r--arch/x86/kernel/acpi/cstate.c19
-rw-r--r--arch/x86/kernel/acpi/madt_playdead.S1
-rw-r--r--arch/x86/kernel/acpi/madt_wakeup.c73
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S1
-rw-r--r--arch/x86/kernel/alternative.c645
-rw-r--r--arch/x86/kernel/amd_nb.c10
-rw-r--r--arch/x86/kernel/amd_node.c149
-rw-r--r--arch/x86/kernel/apic/Makefile3
-rw-r--r--arch/x86/kernel/apic/apic.c7
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c105
-rw-r--r--arch/x86/kernel/apic/ipi.c33
-rw-r--r--arch/x86/kernel/apic/local.h13
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/vector.c231
-rw-r--r--arch/x86/kernel/asm-offsets.c5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c6
-rw-r--r--arch/x86/kernel/bootflag.c29
-rw-r--r--arch/x86/kernel/callthunks.c13
-rw-r--r--arch/x86/kernel/cfi.c26
-rw-r--r--arch/x86/kernel/cpu/amd.c30
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c121
-rw-r--r--arch/x86/kernel/cpu/bus_lock.c20
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c33
-rw-r--r--arch/x86/kernel/cpu/common.c241
-rw-r--r--arch/x86/kernel/cpu/cpu.h8
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c35
-rw-r--r--arch/x86/kernel/cpu/debugfs.c4
-rw-r--r--arch/x86/kernel/cpu/hygon.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c359
-rw-r--r--arch/x86/kernel/cpu/match.c30
-rw-r--r--arch/x86/kernel/cpu/mce/core.c44
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c274
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_shas.c444
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c40
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c17
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c6
-rw-r--r--arch/x86/kernel/cpu/proc.c7
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile5
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c181
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c93
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h201
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c119
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c55
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c284
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.c10
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c7
-rw-r--r--arch/x86/kernel/cpu/vmware.c4
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c2
-rw-r--r--arch/x86/kernel/e820.c115
-rw-r--r--arch/x86/kernel/early_printk.c49
-rw-r--r--arch/x86/kernel/fpu/core.c23
-rw-r--r--arch/x86/kernel/fpu/internal.h2
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c30
-rw-r--r--arch/x86/kernel/fpu/xstate.h31
-rw-r--r--arch/x86/kernel/ftrace.c30
-rw-r--r--arch/x86/kernel/ftrace_64.S5
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/head_64.S24
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/ioport.c2
-rw-r--r--arch/x86/kernel/irq.c5
-rw-r--r--arch/x86/kernel/irq_32.c51
-rw-r--r--arch/x86/kernel/irq_64.c8
-rw-r--r--arch/x86/kernel/irqflags.S1
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kernel/module.c81
-rw-r--r--arch/x86/kernel/nmi.c42
-rw-r--r--arch/x86/kernel/paravirt.c48
-rw-r--r--arch/x86/kernel/process.c17
-rw-r--r--arch/x86/kernel/process_32.c11
-rw-r--r--arch/x86/kernel/process_64.c31
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c12
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S23
-rw-r--r--arch/x86/kernel/setup.c200
-rw-r--r--arch/x86/kernel/setup_percpu.c15
-rw-r--r--arch/x86/kernel/signal_32.c62
-rw-r--r--arch/x86/kernel/smpboot.c92
-rw-r--r--arch/x86/kernel/static_call.c2
-rw-r--r--arch/x86/kernel/tboot.c3
-rw-r--r--arch/x86/kernel/traps.c150
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_msr.c2
-rw-r--r--arch/x86/kernel/uprobes.c14
-rw-r--r--arch/x86/kernel/verify_cpu.S4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S43
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c72
-rw-r--r--arch/x86/kvm/cpuid.h9
-rw-r--r--arch/x86/kvm/emulate.c5
-rw-r--r--arch/x86/kvm/hyperv.c3
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/kvm_emulate.h7
-rw-r--r--arch/x86/kvm/lapic.c22
-rw-r--r--arch/x86/kvm/mmu/mmu.c365
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/mmu/spte.c31
-rw-r--r--arch/x86/kvm/mmu/spte.h2
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h34
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c45
-rw-r--r--arch/x86/kvm/smm.c2
-rw-r--r--arch/x86/kvm/svm/nested.c2
-rw-r--r--arch/x86/kvm/svm/sev.c397
-rw-r--r--arch/x86/kvm/svm/svm.c114
-rw-r--r--arch/x86/kvm/svm/svm.h41
-rw-r--r--arch/x86/kvm/svm/vmenter.S10
-rw-r--r--arch/x86/kvm/trace.h14
-rw-r--r--arch/x86/kvm/vmx/nested.c23
-rw-r--r--arch/x86/kvm/vmx/nested.h22
-rw-r--r--arch/x86/kvm/vmx/vmx.c235
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h18
-rw-r--r--arch/x86/kvm/x86.c344
-rw-r--r--arch/x86/kvm/x86.h8
-rw-r--r--arch/x86/kvm/xen.c125
-rw-r--r--arch/x86/kvm/xen.h30
-rw-r--r--arch/x86/lib/Makefile10
-rw-r--r--arch/x86/lib/bhi.S147
-rw-r--r--arch/x86/lib/clear_page_64.S9
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S2
-rw-r--r--arch/x86/lib/copy_page_64.S3
-rw-r--r--arch/x86/lib/copy_user_64.S3
-rw-r--r--arch/x86/lib/copy_user_uncached_64.S2
-rw-r--r--arch/x86/lib/crc-pclmul-consts.h195
-rw-r--r--arch/x86/lib/crc-pclmul-template.S582
-rw-r--r--arch/x86/lib/crc-pclmul-template.h76
-rw-r--r--arch/x86/lib/crc-t10dif-glue.c29
-rw-r--r--arch/x86/lib/crc16-msb-pclmul.S6
-rw-r--r--arch/x86/lib/crc32-glue.c57
-rw-r--r--arch/x86/lib/crc32-pclmul.S219
-rw-r--r--arch/x86/lib/crc64-glue.c50
-rw-r--r--arch/x86/lib/crc64-pclmul.S7
-rw-r--r--arch/x86/lib/crct10dif-pcl-asm_64.S332
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/getuser.S16
-rw-r--r--arch/x86/lib/hweight.S3
-rw-r--r--arch/x86/lib/memmove_64.S3
-rw-r--r--arch/x86/lib/memset_64.S3
-rw-r--r--arch/x86/lib/msr-reg.S3
-rw-r--r--arch/x86/lib/msr.c2
-rw-r--r--arch/x86/lib/putuser.S9
-rw-r--r--arch/x86/lib/retpoline.S3
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/math-emu/control_w.h2
-rw-r--r--arch/x86/math-emu/exception.h6
-rw-r--r--arch/x86/math-emu/fpu_emu.h6
-rw-r--r--arch/x86/math-emu/status_w.h6
-rw-r--r--arch/x86/mm/ident_map.c14
-rw-r--r--arch/x86/mm/init.c9
-rw-r--r--arch/x86/mm/init_32.c9
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--arch/x86/mm/ioremap.c8
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/kaslr.c10
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c2
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S1
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c6
-rw-r--r--arch/x86/mm/mmap.c9
-rw-r--r--arch/x86/mm/pat/cpa-test.c2
-rw-r--r--arch/x86/mm/pat/memtype.c6
-rw-r--r--arch/x86/mm/pat/set_memory.c243
-rw-r--r--arch/x86/mm/pgtable.c54
-rw-r--r--arch/x86/mm/tlb.c432
-rw-r--r--arch/x86/net/bpf_jit_comp.c34
-rw-r--r--arch/x86/pci/Makefile2
-rw-r--r--arch/x86/pci/sta2x11-fixup.c233
-rw-r--r--arch/x86/pci/xen.c8
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c1
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/platform/pvh/head.S14
-rw-r--r--arch/x86/power/hibernate_asm_64.S2
-rw-r--r--arch/x86/realmode/rm/realmode.h4
-rw-r--r--arch/x86/realmode/rm/wakeup.h2
-rwxr-xr-xarch/x86/tools/cpufeaturemasks.awk88
-rw-r--r--arch/x86/tools/insn_decoder_test.c3
-rw-r--r--arch/x86/tools/relocs.c147
-rw-r--r--arch/x86/virt/svm/sev.c1
-rw-r--r--arch/x86/xen/Kconfig2
-rw-r--r--arch/x86/xen/enlighten_pv.c69
-rw-r--r--arch/x86/xen/mmu_pv.c1
-rw-r--r--arch/x86/xen/smp_pv.c2
-rw-r--r--arch/x86/xen/xen-asm.S5
-rw-r--r--arch/x86/xen/xen-head.S12
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--block/Kconfig2
-rw-r--r--block/Makefile3
-rw-r--r--block/badblocks.c327
-rw-r--r--block/bdev.c13
-rw-r--r--block/bfq-iosched.c5
-rw-r--r--block/bio-integrity-auto.c191
-rw-r--r--block/bio-integrity.c266
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c81
-rw-r--r--block/blk-cgroup.h2
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-crypto-fallback.c7
-rw-r--r--block/blk-crypto-internal.h10
-rw-r--r--block/blk-crypto-profile.c101
-rw-r--r--block/blk-crypto-sysfs.c35
-rw-r--r--block/blk-crypto.c204
-rw-r--r--block/blk-flush.c10
-rw-r--r--block/blk-iocost.c23
-rw-r--r--block/blk-ioprio.c23
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk-mq-debugfs.c41
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq-sysfs.c4
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq.c22
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk-rq-qos.c82
-rw-r--r--block/blk-settings.c58
-rw-r--r--block/blk-sysfs.c304
-rw-r--r--block/blk-throttle.c82
-rw-r--r--block/blk-throttle.h7
-rw-r--r--block/blk-wbt.c17
-rw-r--r--block/blk.h2
-rw-r--r--block/bounce.c2
-rw-r--r--block/bsg-lib.c2
-rw-r--r--block/elevator.c43
-rw-r--r--block/elevator.h2
-rw-r--r--block/genhd.c9
-rw-r--r--block/ioctl.c5
-rw-r--r--block/kyber-iosched.c2
-rw-r--r--block/partitions/efi.c2
-rw-r--r--block/partitions/sgi.c2
-rw-r--r--block/partitions/sun.c2
-rw-r--r--block/t10-pi.c8
-rw-r--r--crypto/Kconfig26
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/crc32c_generic.c8
-rw-r--r--crypto/crc64_rocksoft_generic.c89
-rw-r--r--crypto/crct10dif_generic.c168
-rw-r--r--crypto/hkdf.c573
-rw-r--r--crypto/tcrypt.c8
-rw-r--r--crypto/testmgr.c14
-rw-r--r--crypto/testmgr.h303
-rw-r--r--drivers/accel/qaic/qaic_data.c9
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpi_video.c7
-rw-r--r--drivers/acpi/arm64/dma.c5
-rw-r--r--drivers/acpi/button.c10
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_attr.c37
-rw-r--r--drivers/acpi/fan_core.c25
-rw-r--r--drivers/acpi/fan_hwmon.c8
-rw-r--r--drivers/acpi/hed.c7
-rw-r--r--drivers/acpi/numa/srat.c1
-rw-r--r--drivers/acpi/platform_profile.c139
-rw-r--r--drivers/acpi/power.c5
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/amba/bus.c3
-rw-r--r--drivers/android/binderfs.c1
-rw-r--r--drivers/ata/ahci.c34
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libata-core.c56
-rw-r--r--drivers/ata/libata-eh.c11
-rw-r--r--drivers/ata/pata_octeon_cf.c5
-rw-r--r--drivers/ata/sata_via.c3
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/auxdisplay/charlcd.h5
-rw-r--r--drivers/auxdisplay/hd44780.c19
-rw-r--r--drivers/auxdisplay/hd44780_common.c24
-rw-r--r--drivers/auxdisplay/hd44780_common.h4
-rw-r--r--drivers/auxdisplay/lcd2s.c12
-rw-r--r--drivers/auxdisplay/panel.c17
-rw-r--r--drivers/auxdisplay/seg-led-gpio.c3
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/base/devtmpfs.c153
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/clock_ops.c73
-rw-r--r--drivers/base/power/generic_ops.c24
-rw-r--r--drivers/base/power/main.c165
-rw-r--r--drivers/base/power/runtime.c91
-rw-r--r--drivers/base/regmap/regcache.c31
-rw-r--r--drivers/block/loop.c106
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/null_blk/main.c188
-rw-r--r--drivers/block/null_blk/null_blk.h6
-rw-r--r--drivers/block/null_blk/zoned.c20
-rw-r--r--drivers/block/rnbd/rnbd-clt.c2
-rw-r--r--drivers/block/sunvdc.c2
-rw-r--r--drivers/block/ublk_drv.c122
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/bfusb.c3
-rw-r--r--drivers/bluetooth/btintel.c341
-rw-r--r--drivers/bluetooth/btintel.h24
-rw-r--r--drivers/bluetooth/btintel_pcie.c582
-rw-r--r--drivers/bluetooth/btintel_pcie.h93
-rw-r--r--drivers/bluetooth/btmtk.c10
-rw-r--r--drivers/bluetooth/btmtksdio.c3
-rw-r--r--drivers/bluetooth/btnxpuart.c407
-rw-r--r--drivers/bluetooth/btqca.c27
-rw-r--r--drivers/bluetooth/btqca.h4
-rw-r--r--drivers/bluetooth/btusb.c78
-rw-r--r--drivers/bluetooth/hci_ldisc.c19
-rw-r--r--drivers/bluetooth/hci_qca.c27
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c5
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c3
-rw-r--r--drivers/bus/mhi/host/pci_generic.c5
-rw-r--r--drivers/bus/qcom-ssc-block-bus.c34
-rw-r--r--drivers/bus/simple-pm-bus.c22
-rw-r--r--drivers/cdx/cdx.c9
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/sonypi.c11
-rw-r--r--drivers/char/tpm/Kconfig9
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm-chip.c6
-rw-r--r--drivers/char/tpm/tpm-interface.c37
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm2-sessions.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c105
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c348
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h25
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c22
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c20
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/clk/qcom/dispcc-sm8750.c2
-rw-r--r--drivers/clk/samsung/clk-gs101.c8
-rw-r--r--drivers/clk/samsung/clk-pll.c7
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/clocksource/timer-stm32-lp.c36
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/Kconfig.powerpc18
-rw-r--r--drivers/cpufreq/Kconfig.x8612
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/amd-pstate-trace.h57
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c211
-rw-r--r--drivers/cpufreq/amd-pstate.c670
-rw-r--r--drivers/cpufreq/amd-pstate.h65
-rw-r--r--drivers/cpufreq/apple-soc-cpufreq.c18
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c2
-rw-r--r--drivers/cpufreq/bmips-cpufreq.c1
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c1
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c9
-rw-r--r--drivers/cpufreq/cpufreq-dt.c24
-rw-r--r--drivers/cpufreq/cpufreq.c84
-rw-r--r--drivers/cpufreq/cpufreq_governor.c45
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c1
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c43
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c1
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c1
-rw-r--r--drivers/cpufreq/loongson3_cpufreq.c11
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c3
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c1
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernow-k7.c1
-rw-r--r--drivers/cpufreq/powernow-k8.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c11
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c173
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h33
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c102
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c150
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c16
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c8
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c1
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c21
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/sh-cpufreq.c1
-rw-r--r--drivers/cpufreq/spear-cpufreq.c1
-rw-r--r--drivers/cpufreq/speedstep-centrino.c1
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c8
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c1
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c1
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c3
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/cpuidle-arm.c8
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c2
-rw-r--r--drivers/cpuidle/cpuidle-psci.c7
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c2
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c4
-rw-r--r--drivers/cpuidle/governors/menu.c129
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c3
-rw-r--r--drivers/dpll/dpll_core.c7
-rw-r--r--drivers/edac/Kconfig30
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c52
-rw-r--r--drivers/edac/debugfs.c5
-rwxr-xr-xdrivers/edac/ecs.c205
-rw-r--r--drivers/edac/edac_device.c185
-rw-r--r--drivers/edac/i10nm_base.c2
-rw-r--r--drivers/edac/i5400_edac.c3
-rw-r--r--drivers/edac/i7300_edac.c7
-rw-r--r--drivers/edac/ie31200_edac.c641
-rw-r--r--drivers/edac/igen6_edac.c41
-rwxr-xr-xdrivers/edac/mem_repair.c359
-rw-r--r--drivers/edac/pnd2_edac.c4
-rwxr-xr-xdrivers/edac/scrub.c209
-rw-r--r--drivers/edac/skx_common.c33
-rw-r--r--drivers/edac/skx_common.h11
-rw-r--r--drivers/edac/xgene_edac.c17
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/arm_ffa/bus.c14
-rw-r--r--drivers/firmware/arm_ffa/driver.c532
-rw-r--r--drivers/firmware/arm_scmi/bus.c69
-rw-r--r--drivers/firmware/arm_scmi/driver.c10
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c13
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c51
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c4
-rw-r--r--drivers/firmware/imx/imx-scu.c1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c18
-rw-r--r--drivers/firmware/qcom/qcom_scm.c4
-rw-r--r--drivers/firmware/samsung/Kconfig14
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c224
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.h29
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c769
-rw-r--r--drivers/firmware/samsung/exynos-acpm.h23
-rw-r--r--drivers/firmware/smccc/kvm_guest.c66
-rw-r--r--drivers/firmware/smccc/soc_id.c80
-rw-r--r--drivers/firmware/thead,th1520-aon.c250
-rw-r--r--drivers/firmware/xilinx/zynqmp.c6
-rw-r--r--drivers/gpio/Kconfig9
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO89
-rw-r--r--drivers/gpio/dev-sync-probe.c97
-rw-r--r--drivers/gpio/dev-sync-probe.h25
-rw-r--r--drivers/gpio/gpio-74x164.c92
-rw-r--r--drivers/gpio/gpio-adnp.c138
-rw-r--r--drivers/gpio/gpio-adp5520.c12
-rw-r--r--drivers/gpio/gpio-adp5585.c10
-rw-r--r--drivers/gpio/gpio-aggregator.c58
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c14
-rw-r--r--drivers/gpio/gpio-altera.c6
-rw-r--r--drivers/gpio/gpio-amd-fch.c7
-rw-r--r--drivers/gpio/gpio-amd8111.c6
-rw-r--r--drivers/gpio/gpio-arizona.c9
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c86
-rw-r--r--drivers/gpio/gpio-aspeed.c108
-rw-r--r--drivers/gpio/gpio-bcm-kona.c69
-rw-r--r--drivers/gpio/gpio-bd71815.c15
-rw-r--r--drivers/gpio/gpio-bd71828.c15
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c8
-rw-r--r--drivers/gpio/gpio-brcmstb.c3
-rw-r--r--drivers/gpio/gpio-bt8xx.c48
-rw-r--r--drivers/gpio/gpio-cgbc.c24
-rw-r--r--drivers/gpio/gpio-creg-snps.c10
-rw-r--r--drivers/gpio/gpio-cros-ec.c13
-rw-r--r--drivers/gpio/gpio-crystalcove.c15
-rw-r--r--drivers/gpio/gpio-cs5535.c6
-rw-r--r--drivers/gpio/gpio-da9052.c34
-rw-r--r--drivers/gpio/gpio-da9055.c14
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpio/gpio-latch.c68
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c55
-rw-r--r--drivers/gpio/gpio-max3191x.c18
-rw-r--r--drivers/gpio/gpio-max77650.c14
-rw-r--r--drivers/gpio/gpio-mmio.c37
-rw-r--r--drivers/gpio/gpio-mockup.c14
-rw-r--r--drivers/gpio/gpio-mvebu.c15
-rw-r--r--drivers/gpio/gpio-nomadik.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c17
-rw-r--r--drivers/gpio/gpio-pcf857x.c29
-rw-r--r--drivers/gpio/gpio-rcar.c44
-rw-r--r--drivers/gpio/gpio-regmap.c73
-rw-r--r--drivers/gpio/gpio-sim.c98
-rw-r--r--drivers/gpio/gpio-stmpe.c6
-rw-r--r--drivers/gpio/gpio-vf610.c105
-rw-r--r--drivers/gpio/gpio-virtio.c29
-rw-r--r--drivers/gpio/gpio-virtuser.c73
-rw-r--r--drivers/gpio/gpio-wcove.c3
-rw-r--r--drivers/gpio/gpio-wm831x.c3
-rw-r--r--drivers/gpio/gpio-xilinx.c102
-rw-r--r--drivers/gpio/gpio-xra1403.c3
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c15
-rw-r--r--drivers/gpio/gpiolib-of.c137
-rw-r--r--drivers/gpio/gpiolib.c390
-rw-r--r--drivers/gpio/gpiolib.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h677
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm82
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c94
-rw-r--r--drivers/gpu/drm/clients/drm_log.c4
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c4
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs16
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c5
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c3
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c134
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c3
-rw-r--r--drivers/gpu/drm/msm/msm_io_utils.c3
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/r300.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c11
-rw-r--r--drivers/gpu/drm/tiny/bochs.c5
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c23
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c4
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c10
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h2
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c53
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c192
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c4
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c13
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c96
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c103
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h10
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h8
-rw-r--r--drivers/gpu/host1x/dev.c6
-rw-r--r--drivers/hid/Kconfig40
-rw-r--r--drivers/hid/Makefile4
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c58
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c50
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h3
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c2
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c75
-rw-r--r--drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c531
-rw-r--r--drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c47
-rw-r--r--drivers/hid/bpf/progs/XPPen__ACK05.bpf.c330
-rw-r--r--drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c44
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_async.h219
-rw-r--r--drivers/hid/bpf/progs/hid_bpf_helpers.h19
-rw-r--r--drivers/hid/hid-apple.c11
-rw-r--r--drivers/hid/hid-appleir.c2
-rw-r--r--drivers/hid/hid-appletb-bl.c204
-rw-r--r--drivers/hid/hid-appletb-kbd.c507
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-corsair-void.c83
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-google-hammer.c3
-rw-r--r--drivers/hid/hid-ids.h37
-rw-r--r--drivers/hid/hid-lenovo.c8
-rw-r--r--drivers/hid/hid-lg-g15.c146
-rw-r--r--drivers/hid/hid-nintendo.c14
-rw-r--r--drivers/hid/hid-plantronics.c144
-rw-r--r--drivers/hid/hid-quirks.c24
-rw-r--r--drivers/hid/hid-steam.c9
-rw-r--r--drivers/hid/hid-universal-pidff.c202
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c14
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c18
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c1
-rw-r--r--drivers/hid/usbhid/hid-pidff.c569
-rw-r--r--drivers/hid/usbhid/hid-pidff.h33
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/wacom_sys.c35
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hid/wacom_wac.h7
-rw-r--r--drivers/hv/Kconfig17
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/hv.c94
-rw-r--r--drivers/hv/hv_common.c198
-rw-r--r--drivers/hv/hv_proc.c (renamed from arch/x86/hyperv/hv_proc.c)27
-rw-r--r--drivers/hv/mshv.h30
-rw-r--r--drivers/hv/mshv_common.c161
-rw-r--r--drivers/hv/mshv_eventfd.c833
-rw-r--r--drivers/hv/mshv_eventfd.h71
-rw-r--r--drivers/hv/mshv_irq.c124
-rw-r--r--drivers/hv/mshv_portid_table.c83
-rw-r--r--drivers/hv/mshv_root.h311
-rw-r--r--drivers/hv/mshv_root_hv_call.c849
-rw-r--r--drivers/hv/mshv_root_main.c2307
-rw-r--r--drivers/hv/mshv_synic.c665
-rw-r--r--drivers/hv/vmbus_drv.c67
-rw-r--r--drivers/hwmon/Kconfig25
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/acpi_power_meter.c856
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/asus-ec-sensors.c10
-rw-r--r--drivers/hwmon/cgbc-hwmon.c304
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c5
-rw-r--r--drivers/hwmon/emc2305.c38
-rw-r--r--drivers/hwmon/gpio-fan.c16
-rw-r--r--drivers/hwmon/gsc-hwmon.c1
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c4
-rw-r--r--drivers/hwmon/htu31.c350
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/ina3221.c9
-rw-r--r--drivers/hwmon/isl28022.c44
-rw-r--r--drivers/hwmon/k10temp.c2
-rw-r--r--drivers/hwmon/lm90.c82
-rw-r--r--drivers/hwmon/ltc4282.c44
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/nct6775-core.c4
-rw-r--r--drivers/hwmon/ntc_thermistor.c81
-rw-r--r--drivers/hwmon/peci/dimmtemp.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig15
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/ina233.c191
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c69
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c371
-rw-r--r--drivers/hwmon/pt5161l.c46
-rw-r--r--drivers/hwmon/sg2042-mcu.c42
-rw-r--r--drivers/hwmon/sht3x.c67
-rw-r--r--drivers/hwmon/tps23861.c31
-rw-r--r--drivers/hwmon/xgene-hwmon.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c13
-rw-r--r--drivers/hwtracing/intel_th/pci.c15
-rw-r--r--drivers/hwtracing/stm/heartbeat.c6
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c12
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c12
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c26
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/idle/Makefile5
-rw-r--r--drivers/idle/intel_idle.c49
-rw-r--r--drivers/iio/adc/ad7192.c2
-rw-r--r--drivers/iio/adc/ad7606.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c68
-rw-r--r--drivers/iio/adc/pac1921.c2
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/dac/ad3552r.c6
-rw-r--r--drivers/iio/filter/admv8818.c14
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c2
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c2
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/iio/trigger/iio-trig-hrtimer.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h3
-rw-r--r--drivers/infiniband/hw/hfi1/init.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c20
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c46
-rw-r--r--drivers/infiniband/hw/irdma/main.h3
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c14
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/input/joystick/xpad.c39
-rw-r--r--drivers/input/misc/iqs7222.c50
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h111
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c26
-rw-r--r--drivers/input/touchscreen/imagis.c9
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/Kconfig4
-rw-r--r--drivers/iommu/amd/amd_iommu.h8
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h30
-rw-r--r--drivers/iommu/amd/init.c65
-rw-r--r--drivers/iommu/amd/io_pgtable.c7
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c2
-rw-r--r--drivers/iommu/amd/iommu.c91
-rw-r--r--drivers/iommu/amd/pasid.c2
-rw-r--r--drivers/iommu/apple-dart.c22
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c11
-rw-r--r--drivers/iommu/dma-iommu.c82
-rw-r--r--drivers/iommu/hyperv-iommu.c8
-rw-r--r--drivers/iommu/intel/iommu.c239
-rw-r--r--drivers/iommu/intel/iommu.h28
-rw-r--r--drivers/iommu/intel/irq_remapping.c42
-rw-r--r--drivers/iommu/intel/pasid.c43
-rw-r--r--drivers/iommu/intel/prq.c2
-rw-r--r--drivers/iommu/intel/svm.c43
-rw-r--r--drivers/iommu/io-pgtable-dart.c2
-rw-r--r--drivers/iommu/iommu-priv.h5
-rw-r--r--drivers/iommu/iommu.c220
-rw-r--r--drivers/iommu/iommufd/device.c266
-rw-r--r--drivers/iommu/iommufd/fault.c130
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c5
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h64
-rw-r--r--drivers/iommu/iommufd/main.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c25
-rw-r--r--drivers/iommu/of_iommu.c13
-rw-r--r--drivers/iommu/rockchip-iommu.c61
-rw-r--r--drivers/iommu/s390-iommu.c138
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/irqchip/Kconfig24
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c8
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c57
-rw-r--r--drivers/irqchip/irq-gic-v2m.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its-msi-parent.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c36
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c13
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c1
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c1
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c5
-rw-r--r--drivers/irqchip/irq-meson-gpio.c48
-rw-r--r--drivers/irqchip/irq-msi-lib.c11
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c1
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c1
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c53
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c198
-rw-r--r--drivers/irqchip/irq-riscv-aplic-direct.c24
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c14
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c215
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c151
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c249
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c85
-rw-r--r--drivers/leds/leds-aw200xx.c2
-rw-r--r--drivers/leds/leds-st1202.c21
-rw-r--r--drivers/leds/trigger/ledtrig-pattern.c4
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/md-bitmap.c14
-rw-r--r--drivers/md/md-cluster.c18
-rw-r--r--drivers/md/md-cluster.h6
-rw-r--r--drivers/md/md-linear.c15
-rw-r--r--drivers/md/md.c356
-rw-r--r--drivers/md/md.h62
-rw-r--r--drivers/md/raid0.c18
-rw-r--r--drivers/md/raid1-10.c6
-rw-r--r--drivers/md/raid1.c56
-rw-r--r--drivers/md/raid10.c66
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-ppl.c16
-rw-r--r--drivers/md/raid5.c91
-rw-r--r--drivers/media/cec/core/cec-api.c2
-rw-r--r--drivers/media/cec/core/cec-pin.c14
-rw-r--r--drivers/media/common/siano/smsdvb-main.c2
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c8
-rw-r--r--drivers/media/dvb-frontends/dibx000_common.c10
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c2
-rw-r--r--drivers/media/dvb-frontends/stv0299.c2
-rw-r--r--drivers/media/dvb-frontends/tda10048.c8
-rw-r--r--drivers/media/i2c/Kconfig12
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/adv7180.c34
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h2
-rw-r--r--drivers/media/i2c/adv7511-v4l2.c6
-rw-r--r--drivers/media/i2c/ccs-pll.c16
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c6
-rw-r--r--drivers/media/i2c/dw9719.c113
-rw-r--r--drivers/media/i2c/hi556.c46
-rw-r--r--drivers/media/i2c/imx214.c1287
-rw-r--r--drivers/media/i2c/imx219.c320
-rw-r--r--drivers/media/i2c/imx283.c37
-rw-r--r--drivers/media/i2c/imx319.c9
-rw-r--r--drivers/media/i2c/imx335.c21
-rw-r--r--drivers/media/i2c/imx415.c183
-rw-r--r--drivers/media/i2c/lt6911uxe.c707
-rw-r--r--drivers/media/i2c/ov08x40.c168
-rw-r--r--drivers/media/i2c/ov2740.c27
-rw-r--r--drivers/media/i2c/ov7251.c4
-rw-r--r--drivers/media/i2c/ov9282.c23
-rw-r--r--drivers/media/i2c/st-mipid02.c5
-rw-r--r--drivers/media/i2c/tc358746.c235
-rw-r--r--drivers/media/i2c/tda1997x.c7
-rw-r--r--drivers/media/i2c/vgxy61.c4
-rw-r--r--drivers/media/i2c/video-i2c.c12
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c30
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c33
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c3
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.c29
-rw-r--r--drivers/media/pci/cx23885/netup-eeprom.h1
-rw-r--r--drivers/media/pci/cx88/cx88-input.c3
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c11
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.c33
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-dma.h3
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c12
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-queue.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c1
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c78
-rw-r--r--drivers/media/pci/mgb4/mgb4_cmt.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c13
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.h8
-rw-r--r--drivers/media/pci/mgb4/mgb4_regs.c1
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c1
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c4
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c1
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-hw.c2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c31
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.c8
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpuapi.c10
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c6
-rw-r--r--drivers/media/platform/nuvoton/npcm-video.c6
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c5
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c11
-rw-r--r--drivers/media/platform/qcom/Kconfig1
-rw-r--r--drivers/media/platform/qcom/Makefile1
-rw-r--r--drivers/media/platform/qcom/camss/Makefile2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-1.c19
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-4-7.c42
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-780.c337
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-780.h25
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen2.c60
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c258
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h54
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c794
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.h8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-17x.c112
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c11
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-480.c274
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-780.c159
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen1.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c275
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h59
-rw-r--r--drivers/media/platform/qcom/camss/camss.c595
-rw-r--r--drivers/media/platform/qcom/camss/camss.h6
-rw-r--r--drivers/media/platform/qcom/iris/Kconfig13
-rw-r--r--drivers/media/platform/qcom/iris/Makefile31
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c623
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h117
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c96
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h111
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c259
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c116
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.c176
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h155
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1.h16
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c826
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h448
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c666
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2.h41
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c957
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h161
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c292
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h125
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c934
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c318
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.h182
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h77
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h186
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c149
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8550.c266
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.c140
-rw-r--r--drivers/media/platform/qcom/iris/iris_power.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c349
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c131
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c276
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h144
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c90
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h53
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c335
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.h19
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c659
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h25
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c453
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c38
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3.c122
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c270
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h91
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c369
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h28
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_register_defines.h17
-rw-r--r--drivers/media/platform/qcom/venus/Kconfig2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c100
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c18
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c9
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/cfe.c6
-rw-r--r--drivers/media/platform/renesas/rcar-csi2.c118
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c162
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c4
-rw-r--r--drivers/media/platform/rockchip/rga/rga-buf.c7
-rw-r--r--drivers/media/platform/rockchip/rga/rga-hw.c2
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c5
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c3
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c3
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c106
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c23
-rw-r--r--drivers/media/platform/synopsys/Kconfig3
-rw-r--r--drivers/media/platform/synopsys/Makefile2
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Kconfig35
-rw-r--r--drivers/media/platform/synopsys/hdmirx/Makefile4
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c2746
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h394
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c275
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h43
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c3
-rw-r--r--drivers/media/platform/ti/cal/cal.c4
-rw-r--r--drivers/media/platform/ti/cal/cal.h1
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c54
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h2
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c1
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c8
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c71
-rw-r--r--drivers/media/rc/pwm-ir-tx.c3
-rw-r--r--drivers/media/rc/rc-core-priv.h4
-rw-r--r--drivers/media/rc/streamzap.c70
-rw-r--r--drivers/media/test-drivers/vim2m.c6
-rw-r--r--drivers/media/test-drivers/vimc/vimc-streamer.c6
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c12
-rw-r--r--drivers/media/test-drivers/vivid/Kconfig12
-rw-r--r--drivers/media/test-drivers/vivid/Makefile5
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c14
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h2
-rw-r--r--drivers/media/test-drivers/vivid/vivid-ctrls.c36
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-cap.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-out.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-kthread-touch.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-osd.h19
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c11
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c3
-rw-r--r--drivers/media/tuners/tuner-simple.c20
-rw-r--r--drivers/media/tuners/tuner-types.c296
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c17
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h18
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/au6610.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ce6230.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/ec168.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c2
-rw-r--r--drivers/media/usb/dvb-usb/a800.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c4
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c4
-rw-r--r--drivers/media/usb/dvb-usb/cinergyT2-core.c2
-rw-r--r--drivers/media/usb/dvb-usb/cxusb-analog.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c6
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mb.c2
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-mc.c2
-rw-r--r--drivers/media/usb/dvb-usb/digitv.c4
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c2
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c4
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h6
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c16
-rw-r--r--drivers/media/usb/dvb-usb/gp8psk.c2
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/nova-t-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/dvb-usb/umt-010.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp702x.c2
-rw-r--r--drivers/media/usb/dvb-usb/vp7045.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c40
-rw-r--r--drivers/media/usb/pwc/pwc-if.c1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c799
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c44
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c74
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h32
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c105
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c169
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c40
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c43
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/memory/mtk-smi.c33
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/memory/tegra/tegra20-emc.c4
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c4
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c1
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/message/fusion/mptscsih.c64
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/mfd/intel-lpss.c2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c15
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c2
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c2
-rw-r--r--drivers/misc/ntsync.c7
-rw-r--r--drivers/misc/vcpu_stall_detector.c3
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/pwrseq_simple.c3
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio_uart.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c12
-rw-r--r--drivers/mmc/host/atmel-mci.c4
-rw-r--r--drivers/mmc/host/cqhci-crypto.c8
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c41
-rw-r--r--drivers/mmc/host/dw_mmc.c97
-rw-r--r--drivers/mmc/host/dw_mmc.h27
-rw-r--r--drivers/mmc/host/omap.c19
-rw-r--r--drivers/mmc/host/renesas_sdhi.h1
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c131
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c10
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c72
-rw-r--r--drivers/mmc/host/sdhci-omap.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c6
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h10
-rw-r--r--drivers/mtd/devices/mchp48l640.c9
-rw-r--r--drivers/mtd/mtdcore.c14
-rw-r--r--drivers/mtd/mtdpart.c3
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c36
-rw-r--r--drivers/mtd/nand/spi/Makefile3
-rw-r--r--drivers/mtd/nand/spi/core.c85
-rw-r--r--drivers/mtd/nand/spi/esmt.c90
-rw-r--r--drivers/mtd/nand/spi/macronix.c79
-rw-r--r--drivers/mtd/nand/spi/micron.c135
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/spi-nor/core.c77
-rw-r--r--drivers/mtd/spi-nor/macronix.c31
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/swp.c1
-rw-r--r--drivers/mtd/spi-nor/winbond.c88
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mux/gpio.c4
-rw-r--r--drivers/net/Kconfig24
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/amt.c13
-rw-r--r--drivers/net/bareudp.c9
-rw-r--r--drivers/net/bonding/bond_main.c47
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/bonding/bond_options.c58
-rw-r--r--drivers/net/caif/caif_serial.c14
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c51
-rw-r--r--drivers/net/can/dev/netlink.c4
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c80
-rw-r--r--drivers/net/can/flexcan/flexcan.h6
-rw-r--r--drivers/net/can/m_can/m_can.c9
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c28
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c5
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c8
-rw-r--r--drivers/net/can/usb/gs_usb.c5
-rw-r--r--drivers/net/can/usb/ucan.c43
-rw-r--r--drivers/net/can/vxcan.c7
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c14
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c1
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c1
-rw-r--r--drivers/net/dsa/microchip/ksz8.c11
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c231
-rw-r--r--drivers/net/dsa/mt7530.c318
-rw-r--r--drivers/net/dsa/mt7530.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c103
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-639x.c4
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c1
-rw-r--r--drivers/net/dsa/realtek/Kconfig2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c8
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c9
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c6
-rw-r--r--drivers/net/dummy.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c7
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/airoha/Kconfig27
-rw-r--r--drivers/net/ethernet/airoha/Makefile9
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c (renamed from drivers/net/ethernet/mediatek/airoha_eth.c)1370
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h552
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c520
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h34
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c910
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c181
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h803
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c47
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c749
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c112
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c85
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h143
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1089
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h52
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c89
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.h132
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c231
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c76
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile2
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h25
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h142
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h17
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c51
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c343
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c87
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.c436
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_rq.h8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.c117
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_wq.h7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c7
-rw-r--r--drivers/net/ethernet/ec_bhf.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c197
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c25
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c52
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h94
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c70
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c45
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c90
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c384
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c30
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c110
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c41
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c31
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h122
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c348
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c58
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c298
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c55
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h105
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c181
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c48
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h10
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h35
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c245
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c485
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.h47
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c433
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h24
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h239
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_types.h34
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c203
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c102
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c211
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c64
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c515
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c430
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c119
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c32
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c51
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c38
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c150
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c122
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c201
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c225
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig8
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c81
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c22
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c715
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c133
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/hwmon.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c231
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile3
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h84
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c138
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c882
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c109
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h8
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c48
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h9
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c356
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h35
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c55
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c269
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h33
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c4
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c6
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c78
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c9
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c81
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase_main.c10
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c7
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig5
-rw-r--r--drivers/net/ethernet/sfc/Makefile2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c8
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c24
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c13
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.c522
-rw-r--r--drivers/net/ethernet/sfc/efx_reflash.h20
-rw-r--r--drivers/net/ethernet/sfc/fw_formats.h114
-rw-r--r--drivers/net/ethernet/sfc/mae.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c115
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h22
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h13842
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c59
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c11
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/tc.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c178
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c233
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c564
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c75
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c344
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c9
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h33
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c84
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c252
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h8
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c9
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c63
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c422
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c137
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c58
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c4
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig11
-rw-r--r--drivers/net/ethernet/toshiba/Makefile2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2556
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h475
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c174
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c105
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c236
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c142
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.c883
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ptp.h20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h135
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c20
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c11
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c56
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c16
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h14
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c315
-rw-r--r--drivers/net/geneve.c62
-rw-r--r--drivers/net/gtp.c10
-rw-r--r--drivers/net/hamradio/baycom_par.c4
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c4
-rw-r--r--drivers/net/hamradio/bpqether.c25
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c16
-rw-r--r--drivers/net/hyperv/rndis_filter.c13
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c78
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c18
-rw-r--r--drivers/net/ipvlan/ipvlan.h3
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c9
-rw-r--r--drivers/net/ipvlan/ipvtap.c6
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/macsec.c10
-rw-r--r--drivers/net/macvlan.c22
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/mctp/Kconfig10
-rw-r--r--drivers/net/mctp/Makefile1
-rw-r--r--drivers/net/mctp/mctp-i2c.c7
-rw-r--r--drivers/net/mctp/mctp-i3c.c8
-rw-r--r--drivers/net/mctp/mctp-usb.c385
-rw-r--r--drivers/net/mdio/mdio-i2c.c79
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c393
-rw-r--r--drivers/net/netdevsim/bpf.c3
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/netdevsim/ipsec.c11
-rw-r--r--drivers/net/netdevsim/netdev.c78
-rw-r--r--drivers/net/netdevsim/netdevsim.h2
-rw-r--r--drivers/net/netkit.c15
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c22
-rw-r--r--drivers/net/pcs/pcs-xpcs.c105
-rw-r--r--drivers/net/pcs/pcs-xpcs.h26
-rw-r--r--drivers/net/pfcp.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin1100.c5
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c7
-rw-r--r--drivers/net/phy/aquantia/aquantia_hwmon.c32
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c240
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c3
-rw-r--r--drivers/net/phy/bcm54140.c1
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/dp83822.c38
-rw-r--r--drivers/net/phy/dp83867.c5
-rw-r--r--drivers/net/phy/dp83td510.c187
-rw-r--r--drivers/net/phy/dp83tg720.c78
-rw-r--r--drivers/net/phy/fixed_phy.c16
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c280
-rw-r--r--drivers/net/phy/marvell.c92
-rw-r--r--drivers/net/phy/marvell10g.c24
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/mediatek/mtk-ge-soc.c303
-rw-r--r--drivers/net/phy/mediatek/mtk-ge.c78
-rw-r--r--drivers/net/phy/mediatek/mtk-phy-lib.c77
-rw-r--r--drivers/net/phy/mediatek/mtk.h15
-rw-r--r--drivers/net/phy/micrel.c33
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c14
-rw-r--r--drivers/net/phy/mxl-gpy.c19
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c162
-rw-r--r--drivers/net/phy/nxp-tja11xx.c57
-rw-r--r--drivers/net/phy/phy-c45.c55
-rw-r--r--drivers/net/phy/phy-caps.h63
-rw-r--r--drivers/net/phy/phy-core.c334
-rw-r--r--drivers/net/phy/phy.c157
-rw-r--r--drivers/net/phy/phy_caps.c359
-rw-r--r--drivers/net/phy/phy_device.c414
-rw-r--r--drivers/net/phy/phy_led_triggers.c2
-rw-r--r--drivers/net/phy/phy_link_topology.c2
-rw-r--r--drivers/net/phy/phy_package.c350
-rw-r--r--drivers/net/phy/phylib-internal.h27
-rw-r--r--drivers/net/phy/phylib.h34
-rw-r--r--drivers/net/phy/phylink.c561
-rw-r--r--drivers/net/phy/qcom/qca807x.c16
-rw-r--r--drivers/net/phy/qt2025.rs2
-rw-r--r--drivers/net/phy/realtek/realtek_hwmon.c7
-rw-r--r--drivers/net/phy/realtek/realtek_main.c121
-rw-r--r--drivers/net/phy/sfp.c95
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/ppp/ppp_generic.c42
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/sb1000.c1179
-rw-r--r--drivers/net/tap.c166
-rw-r--r--drivers/net/team/team_core.c9
-rw-r--r--drivers/net/tun.c221
-rw-r--r--drivers/net/tun_vnet.h186
-rw-r--r--drivers/net/usb/asix_devices.c17
-rw-r--r--drivers/net/usb/ax88172a.c12
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/r8153_ecm.c6
-rw-r--r--drivers/net/veth.c11
-rw-r--r--drivers/net/virtio_net.c277
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c10
-rw-r--r--drivers/net/vrf.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c36
-rw-r--r--drivers/net/wireguard/device.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c13
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h13
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c52
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c35
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c133
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c145
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c195
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c80
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c202
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h139
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1191
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h115
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1238
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h453
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h82
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c1419
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c66
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c253
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h442
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c8
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c870
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h10
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c22
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c23
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c1176
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h290
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/ath/testmode_i.h (renamed from drivers/net/wireless/ath/ath11k/testmode_i.h)54
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c20
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c91
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c18
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c670
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c1998
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1082
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c536
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c671
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h233
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c358
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c1213
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2670
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c329
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c720
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h582
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1076
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c759
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c396
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c393
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c224
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2060
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2008
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1289
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h266
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c513
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c474
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c438
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c700
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1374
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c15
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c143
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c88
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c274
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c306
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c976
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c636
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h79
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig25
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c57
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c58
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h45
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c215
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h69
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2257
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig2
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c38
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c2959
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c240
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h208
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2063
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c369
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h103
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c80
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c282
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h56
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c789
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h44
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c601
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c28
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c74
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c422
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c220
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c7
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h1
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c54
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c45
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c14
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c25
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c10
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/wwan/wwan_core.c16
-rw-r--r--drivers/ntb/test/ntb_pingpong.c3
-rw-r--r--drivers/nvdimm/badrange.c2
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c7
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/common/Kconfig1
-rw-r--r--drivers/nvme/common/auth.c337
-rw-r--r--drivers/nvme/common/keyring.c65
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/apple.c7
-rw-r--r--drivers/nvme/host/auth.c115
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/fabrics.c34
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c6
-rw-r--r--drivers/nvme/host/ioctl.c12
-rw-r--r--drivers/nvme/host/multipath.c138
-rw-r--r--drivers/nvme/host/nvme.h22
-rw-r--r--drivers/nvme/host/pci.c44
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/sysfs.c24
-rw-r--r--drivers/nvme/host/tcp.c112
-rw-r--r--drivers/nvme/host/zns.c10
-rw-r--r--drivers/nvme/target/auth.c72
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/debugfs.c27
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c60
-rw-r--r--drivers/nvme/target/fabrics-cmd.c25
-rw-r--r--drivers/nvme/target/fc.c14
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h41
-rw-r--r--drivers/nvme/target/pci-epf.c40
-rw-r--r--drivers/nvme/target/tcp.c47
-rw-r--r--drivers/nvmem/brcm_nvram.c2
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c2
-rw-r--r--drivers/of/base.c27
-rw-r--r--drivers/of/device.c7
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/of/property.c33
-rw-r--r--drivers/pci/Kconfig6
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/controller/vmd.c20
-rw-r--r--drivers/pci/msi/msi.c35
-rw-r--r--drivers/pci/pci-driver.c9
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c105
-rw-r--r--drivers/perf/arm-ccn.c5
-rw-r--r--drivers/perf/arm-cmn.c5
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c32
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c81
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h57
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c22
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmuv3.c11
-rw-r--r--drivers/perf/arm_spe_pmu.c4
-rw-r--r--drivers/perf/arm_v7_pmu.c50
-rw-r--r--drivers/perf/dwc_pcie_pmu.c51
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c4
-rw-r--r--drivers/perf/thunderx2_pmu.c5
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c3
-rw-r--r--drivers/pinctrl/spacemit/Kconfig3
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c2
-rw-r--r--drivers/platform/arm64/Kconfig21
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c825
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c22
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c71
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c28
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h1
-rw-r--r--drivers/platform/mellanox/Kconfig13
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlx-platform.c (renamed from drivers/platform/x86/mlx-platform.c)17
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c20
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h5
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c5
-rw-r--r--drivers/platform/x86/Kconfig41
-rw-r--r--drivers/platform/x86/Makefile7
-rw-r--r--drivers/platform/x86/amd/Makefile2
-rw-r--r--drivers/platform/x86/amd/hsmp/Kconfig2
-rw-r--r--drivers/platform/x86/amd/hsmp/Makefile6
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c7
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c1
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.h3
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c36
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile6
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c113
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h82
-rw-r--r--drivers/platform/x86/amd/pmf/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c2
-rw-r--r--drivers/platform/x86/amd/pmf/core.c2
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c11
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c82
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c1
-rw-r--r--drivers/platform/x86/dell/Kconfig30
-rw-r--r--drivers/platform/x86/dell/Makefile45
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-base.c491
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-legacy.c95
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c768
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.c1249
-rw-r--r--drivers/platform/x86/dell/alienware-wmi.h117
-rw-r--r--drivers/platform/x86/dell/dell-uart-backlight.c2
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c84
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/Makefile2
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c15
-rw-r--r--drivers/platform/x86/ideapad-laptop.c23
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/ifs/Makefile2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c48
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/adl.c56
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c137
-rw-r--r--drivers/platform/x86/intel/pmc/cnp.c29
-rw-r--r--drivers/platform/x86/intel/pmc/core.c115
-rw-r--r--drivers/platform/x86/intel/pmc/core.h199
-rw-r--r--drivers/platform/x86/intel/pmc/icl.c24
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c67
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c109
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c550
-rw-r--r--drivers/platform/x86/intel/pmc/spt.c45
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c59
-rw-r--r--drivers/platform/x86/intel/vsec.c7
-rw-r--r--drivers/platform/x86/lenovo-wmi-hotkey-utilities.c212
-rw-r--r--drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c2
-rw-r--r--drivers/platform/x86/samsung-galaxybook.c1425
-rw-r--r--drivers/platform/x86/think-lmi.c51
-rw-r--r--drivers/platform/x86/think-lmi.h2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c182
-rw-r--r--drivers/platform/x86/wmi.c143
-rw-r--r--drivers/platform/x86/x86-android-tablets/Kconfig1
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c2
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c11
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c1
-rw-r--r--drivers/pmdomain/core.c35
-rw-r--r--drivers/pmdomain/imx/gpcv2.c2
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c2
-rw-r--r--drivers/pmdomain/rockchip/Kconfig2
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c205
-rw-r--r--drivers/pmdomain/sunxi/sun20i-ppu.c15
-rw-r--r--drivers/pmdomain/thead/Kconfig12
-rw-r--r--drivers/pmdomain/thead/Makefile2
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c218
-rw-r--r--drivers/pmdomain/ti/omap_prm.c2
-rw-r--r--drivers/pnp/base.h4
-rw-r--r--drivers/pnp/card.c32
-rw-r--r--drivers/pnp/core.c16
-rw-r--r--drivers/power/reset/ltc2952-poweroff.c8
-rw-r--r--drivers/power/supply/ab8500_chargalg.c9
-rw-r--r--drivers/powercap/Kconfig2
-rw-r--r--drivers/powercap/idle_inject.c3
-rw-r--r--drivers/powercap/intel_rapl_common.c5
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/ptp/ptp_chardev.c16
-rw-r--r--drivers/ptp/ptp_ocp.c7
-rw-r--r--drivers/pwm/Kconfig14
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c19
-rw-r--r--drivers/pwm/pwm-clps711x.c4
-rw-r--r--drivers/pwm/pwm-gpio.c5
-rw-r--r--drivers/pwm/pwm-lpss.c1
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-pca9685.c9
-rw-r--r--drivers/pwm/pwm-sophgo-sg2042.c194
-rw-r--r--drivers/pwm/pwm-stmpe.c25
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/regulator/Kconfig14
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/ad5398.c30
-rw-r--r--drivers/regulator/axp20x-regulator.c14
-rw-r--r--drivers/regulator/core.c88
-rw-r--r--drivers/regulator/cros-ec-regulator.c4
-rw-r--r--drivers/regulator/devres.c22
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/of_regulator.c21
-rw-r--r--drivers/regulator/pca9450-regulator.c91
-rw-r--r--drivers/regulator/pcf50633-regulator.c124
-rw-r--r--drivers/regulator/pf9453-regulator.c879
-rw-r--r--drivers/regulator/rtq2208-regulator.c216
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-imx-scu.c101
-rw-r--r--drivers/reset/reset-microchip-sparx5.c19
-rw-r--r--drivers/rtc/class.c3
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/scsi/Kconfig3
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/cxlflash/Kconfig15
-rw-r--r--drivers/scsi/cxlflash/Makefile5
-rw-r--r--drivers/scsi/cxlflash/backend.h48
-rw-r--r--drivers/scsi/cxlflash/common.h340
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c177
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c278
-rw-r--r--drivers/scsi/cxlflash/main.c3970
-rw-r--r--drivers/scsi/cxlflash/main.h129
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c1399
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.h72
-rw-r--r--drivers/scsi/cxlflash/sislite.h560
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2218
-rw-r--r--drivers/scsi/cxlflash/superpipe.h150
-rw-r--r--drivers/scsi/cxlflash/vlun.c1336
-rw-r--r--drivers/scsi/cxlflash/vlun.h82
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c2
-rw-r--r--drivers/scsi/fnic/fdls_disc.c57
-rw-r--r--drivers/scsi/fnic/fnic_main.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c28
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c4
-rw-r--r--drivers/scsi/hpsa.c19
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c4
-rw-r--r--drivers/scsi/ips.c8
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/isci/isci.h7
-rw-r--r--drivers/scsi/isci/remote_device.h2
-rw-r--r--drivers/scsi/iscsi_tcp.c60
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c91
-rw-r--r--drivers/scsi/lpfc/lpfc.h26
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c10
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h11
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h21
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_tool.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h20
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h34
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c129
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c159
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c101
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h9
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h54
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c23
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h10
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c79
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c279
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c8
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c4
-rw-r--r--drivers/scsi/scsi.c28
-rw-r--r--drivers/scsi/scsi_debug.c928
-rw-r--r--drivers/scsi/scsi_error.c19
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysctl.c4
-rw-r--r--drivers/scsi/st.c80
-rw-r--r--drivers/scsi/st.h6
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/sh/clk/cpg.c25
-rw-r--r--drivers/slimbus/messaging.c5
-rw-r--r--drivers/soc/apple/rtkit-internal.h1
-rw-r--r--drivers/soc/apple/rtkit.c112
-rw-r--r--drivers/soc/atmel/soc.c5
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c4
-rw-r--r--drivers/soc/imx/soc-imx8m.c26
-rw-r--r--drivers/soc/mediatek/mt8167-mmsys.h31
-rw-r--r--drivers/soc/mediatek/mt8173-mmsys.h99
-rw-r--r--drivers/soc/mediatek/mt8183-mmsys.h50
-rw-r--r--drivers/soc/mediatek/mt8186-mmsys.h88
-rw-r--r--drivers/soc/mediatek/mt8188-mmsys.h266
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h71
-rw-r--r--drivers/soc/mediatek/mt8195-mmsys.h632
-rw-r--r--drivers/soc/mediatek/mt8365-mmsys.h84
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h14
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c6
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c22
-rw-r--r--drivers/soc/qcom/ice.c51
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/pdr_internal.h1
-rw-r--r--drivers/soc/qcom/pmic_glink.c2
-rw-r--r--drivers/soc/qcom/qcom_aoss.c3
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c12
-rw-r--r--drivers/soc/qcom/qcom_pdr_msg.c3
-rw-r--r--drivers/soc/renesas/Kconfig18
-rw-r--r--drivers/soc/renesas/Makefile4
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c23
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c67
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c67
-rw-r--r--drivers/soc/renesas/renesas-soc.c33
-rw-r--r--drivers/soc/renesas/rz-sysc.c137
-rw-r--r--drivers/soc/renesas/rz-sysc.h46
-rw-r--r--drivers/soc/samsung/exynos-asv.c1
-rw-r--r--drivers/soc/samsung/exynos-chipid.c5
-rw-r--r--drivers/soc/samsung/exynos-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos-usi.c108
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c1
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c1
-rw-r--r--drivers/soc/tegra/pmc.c3
-rw-r--r--drivers/soc/ti/k3-socinfo.c13
-rw-r--r--drivers/soundwire/qcom.c26
-rw-r--r--drivers/spi/Kconfig44
-rw-r--r--drivers/spi/Makefile7
-rw-r--r--drivers/spi/atmel-quadspi.c5
-rw-r--r--drivers/spi/spi-aspeed-smc.c7
-rw-r--r--drivers/spi/spi-axi-spi-engine.c315
-rw-r--r--drivers/spi/spi-cadence-quadspi.c8
-rw-r--r--drivers/spi/spi-fsi.c13
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-gpio.c45
-rw-r--r--drivers/spi/spi-imx.c2
-rw-r--r--drivers/spi/spi-mem.c11
-rw-r--r--drivers/spi/spi-microchip-core.c41
-rw-r--r--drivers/spi/spi-mt65xx.c17
-rw-r--r--drivers/spi/spi-mtk-snfi.c3
-rw-r--r--drivers/spi/spi-mux.c4
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c169
-rw-r--r--drivers/spi/spi-offload.c468
-rw-r--r--drivers/spi/spi-qpic-snand.c1633
-rw-r--r--drivers/spi/spi-realtek-rtl-snand.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c4
-rw-r--r--drivers/spi/spi-sg2044-nor.c488
-rw-r--r--drivers/spi/spi-stm32-ospi.c1063
-rw-r--r--drivers/spi/spi-stm32-qspi.c5
-rw-r--r--drivers/spi/spi-zynq-qspi.c4
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c173
-rw-r--r--drivers/spi/spi.c117
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c35
-rw-r--r--drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c9
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c54
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c6
-rw-r--r--drivers/target/loopback/tcm_loop.c5
-rw-r--r--drivers/target/target_core_configfs.c6
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_iblock.c12
-rw-r--r--drivers/target/target_core_pr.c6
-rw-r--r--drivers/target/target_core_spc.c36
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3402_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c6
-rw-r--r--drivers/thermal/intel/intel_tcc.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/thermal_core.c20
-rw-r--r--drivers/thermal/thermal_debugfs.c4
-rw-r--r--drivers/thermal/thermal_of.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/eeprom.c2
-rw-r--r--drivers/thunderbolt/tunnel.c11
-rw-r--r--drivers/thunderbolt/tunnel.h2
-rw-r--r--drivers/tty/Kconfig19
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c3
-rw-r--r--drivers/tty/serial/8250/8250_port.c10
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c8
-rw-r--r--drivers/ufs/core/ufs-sysfs.c10
-rw-r--r--drivers/ufs/core/ufs_trace.h135
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c7
-rw-r--r--drivers/ufs/core/ufshcd-priv.h21
-rw-r--r--drivers/ufs/core/ufshcd.c148
-rw-r--r--drivers/ufs/host/Kconfig12
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c13
-rw-r--r--drivers/ufs/host/ufs-exynos.h2
-rw-r--r--drivers/ufs/host/ufs-hisi.c6
-rw-r--r--drivers/ufs/host/ufs-mediatek.c11
-rw-r--r--drivers/ufs/host/ufs-qcom.c131
-rw-r--r--drivers/ufs/host/ufs-qcom.h39
-rw-r--r--drivers/ufs/host/ufs-renesas.c723
-rw-r--r--drivers/ufs/host/ufs-rockchip.c354
-rw-r--r--drivers/ufs/host/ufs-rockchip.h90
-rw-r--r--drivers/ufs/host/ufs-sprd.c6
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/chipidea/otg_fsm.c3
-rw-r--r--drivers/usb/core/hub.c33
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/drd.c4
-rw-r--r--drivers/usb/dwc3/gadget.c10
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c3
-rw-r--r--drivers/usb/gadget/composite.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c48
-rw-r--r--drivers/usb/storage/debug.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c24
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c1
-rw-r--r--drivers/vfio/group.c16
-rw-r--r--drivers/video/fbdev/hyperv_fb.c52
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c59
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--drivers/virtio/virtio_mem.c4
-rw-r--r--drivers/watchdog/softdog.c8
-rw-r--r--drivers/watchdog/watchdog_dev.c4
-rw-r--r--drivers/watchdog/watchdog_hrtimer_pretimeout.c4
-rw-r--r--drivers/xen/pci.c32
-rw-r--r--drivers/xen/platform-pci.c4
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c20
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenfs/xensyms.c4
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/9p/vfs_inode_dotl.c8
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/file.c9
-rw-r--r--fs/affs/namei.c8
-rw-r--r--fs/afs/addr_list.c50
-rw-r--r--fs/afs/cell.c446
-rw-r--r--fs/afs/cmservice.c82
-rw-r--r--fs/afs/dir.c17
-rw-r--r--fs/afs/dynroot.c501
-rw-r--r--fs/afs/fs_probe.c32
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h100
-rw-r--r--fs/afs/main.c16
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/afs/proc.c19
-rw-r--r--fs/afs/rxrpc.c8
-rw-r--r--fs/afs/server.c601
-rw-r--r--fs/afs/server_list.c6
-rw-r--r--fs/afs/super.c25
-rw-r--r--fs/afs/vl_alias.c7
-rw-r--r--fs/afs/vl_rotate.c2
-rw-r--r--fs/afs/volume.c15
-rw-r--r--fs/autofs/autofs_i.h2
-rw-r--r--fs/autofs/dev-ioctl.c3
-rw-r--r--fs/autofs/root.c14
-rw-r--r--fs/bad_inode.c6
-rw-r--r--fs/bcachefs/Kconfig2
-rw-r--r--fs/bcachefs/Makefile3
-rw-r--r--fs/bcachefs/alloc_background.c190
-rw-r--r--fs/bcachefs/alloc_background.h2
-rw-r--r--fs/bcachefs/alloc_foreground.c31
-rw-r--r--fs/bcachefs/alloc_foreground.h19
-rw-r--r--fs/bcachefs/alloc_types.h2
-rw-r--r--fs/bcachefs/backpointers.c151
-rw-r--r--fs/bcachefs/backpointers.h26
-rw-r--r--fs/bcachefs/bcachefs.h20
-rw-r--r--fs/bcachefs/bcachefs_format.h16
-rw-r--r--fs/bcachefs/bcachefs_ioctl.h29
-rw-r--r--fs/bcachefs/btree_cache.c1
-rw-r--r--fs/bcachefs/btree_gc.c18
-rw-r--r--fs/bcachefs/btree_io.c261
-rw-r--r--fs/bcachefs/btree_io.h4
-rw-r--r--fs/bcachefs/btree_iter.c14
-rw-r--r--fs/bcachefs/btree_iter.h9
-rw-r--r--fs/bcachefs/btree_locking.c8
-rw-r--r--fs/bcachefs/btree_node_scan.c29
-rw-r--r--fs/bcachefs/btree_trans_commit.c120
-rw-r--r--fs/bcachefs/btree_types.h13
-rw-r--r--fs/bcachefs/btree_update.c5
-rw-r--r--fs/bcachefs/btree_update.h10
-rw-r--r--fs/bcachefs/btree_update_interior.c150
-rw-r--r--fs/bcachefs/btree_update_interior.h7
-rw-r--r--fs/bcachefs/btree_write_buffer.c21
-rw-r--r--fs/bcachefs/buckets.c80
-rw-r--r--fs/bcachefs/buckets.h31
-rw-r--r--fs/bcachefs/buckets_types.h27
-rw-r--r--fs/bcachefs/chardev.c38
-rw-r--r--fs/bcachefs/checksum.c25
-rw-r--r--fs/bcachefs/checksum.h2
-rw-r--r--fs/bcachefs/compress.c65
-rw-r--r--fs/bcachefs/data_update.c237
-rw-r--r--fs/bcachefs/data_update.h17
-rw-r--r--fs/bcachefs/debug.c34
-rw-r--r--fs/bcachefs/dirent.c274
-rw-r--r--fs/bcachefs/dirent.h17
-rw-r--r--fs/bcachefs/dirent_format.h20
-rw-r--r--fs/bcachefs/disk_accounting.h18
-rw-r--r--fs/bcachefs/disk_accounting_format.h12
-rw-r--r--fs/bcachefs/ec.c482
-rw-r--r--fs/bcachefs/ec.h46
-rw-r--r--fs/bcachefs/ec_types.h12
-rw-r--r--fs/bcachefs/errcode.h65
-rw-r--r--fs/bcachefs/error.c88
-rw-r--r--fs/bcachefs/error.h57
-rw-r--r--fs/bcachefs/extents.c249
-rw-r--r--fs/bcachefs/extents.h24
-rw-r--r--fs/bcachefs/extents_format.h24
-rw-r--r--fs/bcachefs/extents_types.h11
-rw-r--r--fs/bcachefs/eytzinger.c76
-rw-r--r--fs/bcachefs/eytzinger.h95
-rw-r--r--fs/bcachefs/fs-io-buffered.c38
-rw-r--r--fs/bcachefs/fs-io-direct.c20
-rw-r--r--fs/bcachefs/fs-ioctl.c34
-rw-r--r--fs/bcachefs/fs-ioctl.h20
-rw-r--r--fs/bcachefs/fs.c147
-rw-r--r--fs/bcachefs/fsck.c231
-rw-r--r--fs/bcachefs/inode.c25
-rw-r--r--fs/bcachefs/inode.h1
-rw-r--r--fs/bcachefs/inode_format.h3
-rw-r--r--fs/bcachefs/io_misc.c3
-rw-r--r--fs/bcachefs/io_read.c762
-rw-r--r--fs/bcachefs/io_read.h92
-rw-r--r--fs/bcachefs/io_write.c414
-rw-r--r--fs/bcachefs/io_write.h38
-rw-r--r--fs/bcachefs/io_write_types.h2
-rw-r--r--fs/bcachefs/journal.c250
-rw-r--r--fs/bcachefs/journal.h42
-rw-r--r--fs/bcachefs/journal_io.c99
-rw-r--r--fs/bcachefs/journal_reclaim.c10
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c7
-rw-r--r--fs/bcachefs/journal_types.h37
-rw-r--r--fs/bcachefs/lru.c100
-rw-r--r--fs/bcachefs/lru.h22
-rw-r--r--fs/bcachefs/lru_format.h6
-rw-r--r--fs/bcachefs/migrate.c26
-rw-r--r--fs/bcachefs/move.c456
-rw-r--r--fs/bcachefs/move_types.h20
-rw-r--r--fs/bcachefs/movinggc.c40
-rw-r--r--fs/bcachefs/namei.c (renamed from fs/bcachefs/fs-common.c)210
-rw-r--r--fs/bcachefs/namei.h (renamed from fs/bcachefs/fs-common.h)31
-rw-r--r--fs/bcachefs/opts.c115
-rw-r--r--fs/bcachefs/opts.h69
-rw-r--r--fs/bcachefs/progress.c63
-rw-r--r--fs/bcachefs/progress.h29
-rw-r--r--fs/bcachefs/rebalance.c46
-rw-r--r--fs/bcachefs/recovery.c4
-rw-r--r--fs/bcachefs/recovery_passes_types.h2
-rw-r--r--fs/bcachefs/reflink.c23
-rw-r--r--fs/bcachefs/sb-counters.c90
-rw-r--r--fs/bcachefs/sb-counters.h4
-rw-r--r--fs/bcachefs/sb-counters_format.h31
-rw-r--r--fs/bcachefs/sb-downgrade.c8
-rw-r--r--fs/bcachefs/sb-errors_format.h5
-rw-r--r--fs/bcachefs/sb-members.h16
-rw-r--r--fs/bcachefs/sb-members_format.h1
-rw-r--r--fs/bcachefs/snapshot.c7
-rw-r--r--fs/bcachefs/snapshot.h1
-rw-r--r--fs/bcachefs/str_hash.c2
-rw-r--r--fs/bcachefs/str_hash.h12
-rw-r--r--fs/bcachefs/super-io.c106
-rw-r--r--fs/bcachefs/super-io.h17
-rw-r--r--fs/bcachefs/super.c152
-rw-r--r--fs/bcachefs/super.h2
-rw-r--r--fs/bcachefs/super_types.h8
-rw-r--r--fs/bcachefs/sysfs.c141
-rw-r--r--fs/bcachefs/sysfs.h5
-rw-r--r--fs/bcachefs/trace.h101
-rw-r--r--fs/bcachefs/util.c259
-rw-r--r--fs/bcachefs/util.h18
-rw-r--r--fs/bcachefs/xattr.c2
-rw-r--r--fs/binfmt_elf.c21
-rw-r--r--fs/binfmt_elf_fdpic.c13
-rw-r--r--fs/btrfs/accessors.h1
-rw-r--r--fs/btrfs/acl.h2
-rw-r--r--fs/btrfs/async-thread.c11
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/bio.c38
-rw-r--r--fs/btrfs/block-group.c155
-rw-r--r--fs/btrfs/btrfs_inode.h17
-rw-r--r--fs/btrfs/compression.c31
-rw-r--r--fs/btrfs/compression.h26
-rw-r--r--fs/btrfs/ctree.c18
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/defrag.c78
-rw-r--r--fs/btrfs/defrag.h4
-rw-r--r--fs/btrfs/delayed-inode.c101
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c33
-rw-r--r--fs/btrfs/dir-item.c24
-rw-r--r--fs/btrfs/dir-item.h1
-rw-r--r--fs/btrfs/direct-io.c19
-rw-r--r--fs/btrfs/direct-io.h2
-rw-r--r--fs/btrfs/discard.c34
-rw-r--r--fs/btrfs/discard.h1
-rw-r--r--fs/btrfs/disk-io.c109
-rw-r--r--fs/btrfs/export.c51
-rw-r--r--fs/btrfs/extent-io-tree.c8
-rw-r--r--fs/btrfs/extent-tree.c63
-rw-r--r--fs/btrfs/extent-tree.h1
-rw-r--r--fs/btrfs/extent_io.c589
-rw-r--r--fs/btrfs/extent_io.h9
-rw-r--r--fs/btrfs/file-item.c30
-rw-r--r--fs/btrfs/file-item.h2
-rw-r--r--fs/btrfs/file.c28
-rw-r--r--fs/btrfs/file.h2
-rw-r--r--fs/btrfs/free-space-cache.c57
-rw-r--r--fs/btrfs/free-space-tree.c45
-rw-r--r--fs/btrfs/fs.c1
-rw-r--r--fs/btrfs/fs.h26
-rw-r--r--fs/btrfs/inode-item.c6
-rw-r--r--fs/btrfs/inode.c606
-rw-r--r--fs/btrfs/ioctl.c217
-rw-r--r--fs/btrfs/ioctl.h4
-rw-r--r--fs/btrfs/locking.c1
-rw-r--r--fs/btrfs/ordered-data.c23
-rw-r--r--fs/btrfs/ordered-data.h9
-rw-r--r--fs/btrfs/print-tree.h2
-rw-r--r--fs/btrfs/props.c66
-rw-r--r--fs/btrfs/props.h8
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/qgroup.h3
-rw-r--r--fs/btrfs/raid-stripe-tree.h1
-rw-r--r--fs/btrfs/reflink.c100
-rw-r--r--fs/btrfs/relocation.c30
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c544
-rw-r--r--fs/btrfs/send.h4
-rw-r--r--fs/btrfs/space-info.c2
-rw-r--r--fs/btrfs/subpage.c224
-rw-r--r--fs/btrfs/subpage.h52
-rw-r--r--fs/btrfs/super.c6
-rw-r--r--fs/btrfs/sysfs.c18
-rw-r--r--fs/btrfs/sysfs.h1
-rw-r--r--fs/btrfs/tests/extent-io-tests.c6
-rw-r--r--fs/btrfs/tests/extent-map-tests.c1
-rw-r--r--fs/btrfs/transaction.c39
-rw-r--r--fs/btrfs/tree-log.c392
-rw-r--r--fs/btrfs/verity.c4
-rw-r--r--fs/btrfs/volumes.c17
-rw-r--r--fs/btrfs/volumes.h4
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/btrfs/zlib.c85
-rw-r--r--fs/btrfs/zoned.c9
-rw-r--r--fs/btrfs/zstd.c66
-rw-r--r--fs/buffer.c58
-rw-r--r--fs/cachefiles/namei.c16
-rw-r--r--fs/cachefiles/ondemand.c7
-rw-r--r--fs/ceph/addr.c1259
-rw-r--r--fs/ceph/dir.c45
-rw-r--r--fs/ceph/inode.c31
-rw-r--r--fs/ceph/mds_client.c2
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/ceph/super.c11
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/coda/dir.c14
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/coredump.c57
-rw-r--r--fs/crypto/Kconfig21
-rw-r--r--fs/crypto/crypto.c22
-rw-r--r--fs/crypto/hkdf.c85
-rw-r--r--fs/crypto/inline_crypt.c4
-rw-r--r--fs/dax.c111
-rw-r--r--fs/dcache.c70
-rw-r--r--fs/devpts/inode.c251
-rw-r--r--fs/dlm/config.h2
-rw-r--r--fs/dlm/lock.c2
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/drop_caches.c23
-rw-r--r--fs/ecryptfs/inode.c20
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/efivarfs/super.c52
-rw-r--r--fs/erofs/Kconfig14
-rw-r--r--fs/erofs/compress.h2
-rw-r--r--fs/erofs/data.c148
-rw-r--r--fs/erofs/decompressor.c95
-rw-r--r--fs/erofs/decompressor_deflate.c8
-rw-r--r--fs/erofs/decompressor_lzma.c8
-rw-r--r--fs/erofs/decompressor_zstd.c8
-rw-r--r--fs/erofs/dir.c9
-rw-r--r--fs/erofs/erofs_fs.h191
-rw-r--r--fs/erofs/fileio.c2
-rw-r--r--fs/erofs/fscache.c2
-rw-r--r--fs/erofs/inode.c125
-rw-r--r--fs/erofs/internal.h47
-rw-r--r--fs/erofs/namei.c2
-rw-r--r--fs/erofs/super.c85
-rw-r--r--fs/erofs/sysfs.c2
-rw-r--r--fs/erofs/xattr.c12
-rw-r--r--fs/erofs/zdata.c102
-rw-r--r--fs/erofs/zmap.c286
-rw-r--r--fs/eventfd.c5
-rw-r--r--fs/eventpoll.c16
-rw-r--r--fs/exec.c2
-rw-r--r--fs/exfat/balloc.c10
-rw-r--r--fs/exfat/exfat_fs.h2
-rw-r--r--fs/exfat/fatent.c11
-rw-r--r--fs/exfat/file.c2
-rw-r--r--fs/exfat/namei.c15
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/ext4/balloc.c4
-rw-r--r--fs/ext4/bitmap.c8
-rw-r--r--fs/ext4/dir.c7
-rw-r--r--fs/ext4/ext4.h94
-rw-r--r--fs/ext4/ext4_jbd2.c12
-rw-r--r--fs/ext4/ext4_jbd2.h113
-rw-r--r--fs/ext4/extents.c531
-rw-r--r--fs/ext4/extents_status.c1
-rw-r--r--fs/ext4/file.c30
-rw-r--r--fs/ext4/fsync.c12
-rw-r--r--fs/ext4/hash.c2
-rw-r--r--fs/ext4/ialloc.c9
-rw-r--r--fs/ext4/inline.c205
-rw-r--r--fs/ext4/inode.c292
-rw-r--r--fs/ext4/ioctl.c13
-rw-r--r--fs/ext4/mballoc-test.c2
-rw-r--r--fs/ext4/mballoc.c8
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/namei.c127
-rw-r--r--fs/ext4/orphan.c2
-rw-r--r--fs/ext4/page-io.c77
-rw-r--r--fs/ext4/resize.c4
-rw-r--r--fs/ext4/super.c267
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/ext4/xattr.c47
-rw-r--r--fs/ext4/xattr.h10
-rw-r--r--fs/f2fs/checkpoint.c71
-rw-r--r--fs/f2fs/compress.c1
-rw-r--r--fs/f2fs/data.c192
-rw-r--r--fs/f2fs/debug.c3
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h155
-rw-r--r--fs/f2fs/file.c126
-rw-r--r--fs/f2fs/gc.c42
-rw-r--r--fs/f2fs/inline.c22
-rw-r--r--fs/f2fs/inode.c33
-rw-r--r--fs/f2fs/namei.c22
-rw-r--r--fs/f2fs/node.c450
-rw-r--r--fs/f2fs/node.h13
-rw-r--r--fs/f2fs/segment.c55
-rw-r--r--fs/f2fs/segment.h9
-rw-r--r--fs/f2fs/shrinker.c92
-rw-r--r--fs/f2fs/super.c191
-rw-r--r--fs/f2fs/sysfs.c139
-rw-r--r--fs/f2fs/xattr.c8
-rw-r--r--fs/fat/namei_msdos.c8
-rw-r--r--fs/fat/namei_vfat.c8
-rw-r--r--fs/file.c133
-rw-r--r--fs/file_table.c73
-rw-r--r--fs/fs-writeback.c30
-rw-r--r--fs/fsopen.c2
-rw-r--r--fs/fuse/dev.c17
-rw-r--r--fs/fuse/dev_uring.c4
-rw-r--r--fs/fuse/dir.c50
-rw-r--r--fs/gfs2/bmap.c3
-rw-r--r--fs/gfs2/file.c8
-rw-r--r--fs/gfs2/glock.c124
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c9
-rw-r--r--fs/gfs2/lops.c78
-rw-r--r--fs/gfs2/meta_io.c15
-rw-r--r--fs/gfs2/super.c24
-rw-r--r--fs/gfs2/trace_gfs2.h10
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/hfs/dir.c10
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hostfs/hostfs_kern.c16
-rw-r--r--fs/hpfs/namei.c10
-rw-r--r--fs/hugetlbfs/inode.c6
-rw-r--r--fs/init.c7
-rw-r--r--fs/inode.c127
-rw-r--r--fs/internal.h11
-rw-r--r--fs/ioctl.c10
-rw-r--r--fs/iomap/Makefile1
-rw-r--r--fs/iomap/buffered-io.c356
-rw-r--r--fs/iomap/direct-io.c279
-rw-r--r--fs/iomap/fiemap.c21
-rw-r--r--fs/iomap/internal.h10
-rw-r--r--fs/iomap/ioend.c216
-rw-r--r--fs/iomap/iter.c97
-rw-r--r--fs/iomap/seek.c16
-rw-r--r--fs/iomap/swapfile.c7
-rw-r--r--fs/iomap/trace.h8
-rw-r--r--fs/jbd2/commit.c10
-rw-r--r--fs/jbd2/journal.c34
-rw-r--r--fs/jbd2/recovery.c80
-rw-r--r--fs/jbd2/revoke.c21
-rw-r--r--fs/jbd2/transaction.c21
-rw-r--r--fs/jffs2/dir.c18
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_dmap.c39
-rw-r--r--fs/jfs/jfs_dtree.c3
-rw-r--r--fs/jfs/jfs_extent.c10
-rw-r--r--fs/jfs/jfs_imap.c17
-rw-r--r--fs/jfs/namei.c8
-rw-r--r--fs/jfs/super.c6
-rw-r--r--fs/jfs/xattr.c15
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/minix/namei.c8
-rw-r--r--fs/mnt_idmapping.c51
-rw-r--r--fs/mount.h37
-rw-r--r--fs/mpage.c49
-rw-r--r--fs/namei.c149
-rw-r--r--fs/namespace.c852
-rw-r--r--fs/netfs/direct_read.c6
-rw-r--r--fs/netfs/read_collect.c18
-rw-r--r--fs/netfs/rolling_buffer.c4
-rw-r--r--fs/netfs/write_collect.c3
-rw-r--r--fs/nfs/dir.c20
-rw-r--r--fs/nfs/file.c3
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/nfs3proc.c29
-rw-r--r--fs/nfs/nfs4proc.c47
-rw-r--r--fs/nfs/proc.c12
-rw-r--r--fs/nfsd/nfs4recover.c7
-rw-r--r--fs/nfsd/vfs.c34
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/notify/fanotify/fanotify.c38
-rw-r--r--fs/notify/fanotify/fanotify.h18
-rw-r--r--fs/notify/fanotify/fanotify_user.c89
-rw-r--r--fs/notify/fdinfo.c5
-rw-r--r--fs/notify/fsnotify.c47
-rw-r--r--fs/notify/fsnotify.h11
-rw-r--r--fs/notify/mark.c14
-rw-r--r--fs/nsfs.c32
-rw-r--r--fs/ntfs3/namei.c8
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c10
-rw-r--r--fs/ocfs2/namei.c10
-rw-r--r--fs/omfs/dir.c6
-rw-r--r--fs/open.c44
-rw-r--r--fs/orangefs/file.c4
-rw-r--r--fs/orangefs/inode.c149
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-bufmap.c25
-rw-r--r--fs/orangefs/orangefs-bufmap.h3
-rw-r--r--fs/orangefs/orangefs-debug.h43
-rw-r--r--fs/orangefs/orangefs-debugfs.c43
-rw-r--r--fs/overlayfs/dir.c46
-rw-r--r--fs/overlayfs/overlayfs.h15
-rw-r--r--fs/overlayfs/params.c25
-rw-r--r--fs/overlayfs/super.c23
-rw-r--r--fs/pidfs.c247
-rw-r--r--fs/pipe.c213
-rw-r--r--fs/pnode.c14
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/base.c55
-rw-r--r--fs/proc/generic.c10
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/internal.h14
-rw-r--r--fs/proc/kcore.c12
-rw-r--r--fs/pstore/inode.c111
-rw-r--r--fs/pstore/internal.h4
-rw-r--r--fs/pstore/platform.c11
-rw-r--r--fs/ramfs/inode.c6
-rw-r--r--fs/read_write.c13
-rw-r--r--fs/signalfd.c7
-rw-r--r--fs/smb/client/cifsacl.c34
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifssmb.c46
-rw-r--r--fs/smb/client/connect.c16
-rw-r--r--fs/smb/client/file.c2
-rw-r--r--fs/smb/client/fs_context.c18
-rw-r--r--fs/smb/client/inode.c10
-rw-r--r--fs/smb/client/smb2pdu.c96
-rw-r--r--fs/smb/common/smbacl.h3
-rw-r--r--fs/smb/server/connection.c20
-rw-r--r--fs/smb/server/connection.h2
-rw-r--r--fs/smb/server/ksmbd_work.c3
-rw-r--r--fs/smb/server/ksmbd_work.h1
-rw-r--r--fs/smb/server/oplock.c43
-rw-r--r--fs/smb/server/oplock.h1
-rw-r--r--fs/smb/server/server.c14
-rw-r--r--fs/smb/server/smb2pdu.c8
-rw-r--r--fs/smb/server/smbacl.c52
-rw-r--r--fs/smb/server/smbacl.h2
-rw-r--r--fs/smb/server/transport_ipc.c1
-rw-r--r--fs/smb/server/vfs.c58
-rw-r--r--fs/splice.c60
-rw-r--r--fs/squashfs/cache.c2
-rw-r--r--fs/super.c57
-rw-r--r--fs/sysv/Kconfig38
-rw-r--r--fs/sysv/Makefile9
-rw-r--r--fs/sysv/balloc.c240
-rw-r--r--fs/sysv/dir.c378
-rw-r--r--fs/sysv/file.c59
-rw-r--r--fs/sysv/ialloc.c235
-rw-r--r--fs/sysv/inode.c354
-rw-r--r--fs/sysv/itree.c511
-rw-r--r--fs/sysv/namei.c280
-rw-r--r--fs/sysv/super.c595
-rw-r--r--fs/sysv/sysv.h245
-rw-r--r--fs/timerfd.c11
-rw-r--r--fs/tracefs/inode.c10
-rw-r--r--fs/ubifs/dir.c10
-rw-r--r--fs/ubifs/io.c3
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/unicode/Kconfig5
-rw-r--r--fs/unicode/Makefile2
-rw-r--r--fs/unicode/tests/.kunitconfig3
-rw-r--r--fs/unicode/tests/utf8_kunit.c (renamed from fs/unicode/utf8-selftest.c)153
-rw-r--r--fs/unicode/utf8-norm.c2
-rw-r--r--fs/vboxsf/dir.c8
-rw-r--r--fs/vboxsf/super.c3
-rw-r--r--fs/verity/Kconfig8
-rw-r--r--fs/xfs/Makefile7
-rw-r--r--fs/xfs/libxfs/xfs_ag.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c316
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h7
-rw-r--r--fs/xfs/libxfs/xfs_format.h20
-rw-r--r--fs/xfs/libxfs/xfs_fs.h14
-rw-r--r--fs/xfs/libxfs/xfs_group.h31
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c23
-rw-r--r--fs/xfs/libxfs/xfs_inode_util.c1
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h7
-rw-r--r--fs/xfs/libxfs/xfs_metafile.c167
-rw-r--r--fs/xfs/libxfs/xfs_metafile.h6
-rw-r--r--fs/xfs/libxfs/xfs_ondisk.h6
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c11
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.c39
-rw-r--r--fs/xfs/libxfs/xfs_rtgroup.h50
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_rtrmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c81
-rw-r--r--fs/xfs/libxfs/xfs_types.h28
-rw-r--r--fs/xfs/libxfs/xfs_zones.c186
-rw-r--r--fs/xfs/libxfs/xfs_zones.h35
-rw-r--r--fs/xfs/scrub/agheader.c2
-rw-r--r--fs/xfs/scrub/bmap.c4
-rw-r--r--fs/xfs/scrub/fscounters.c22
-rw-r--r--fs/xfs/scrub/fscounters_repair.c12
-rw-r--r--fs/xfs/scrub/inode.c7
-rw-r--r--fs/xfs/scrub/inode_repair.c7
-rw-r--r--fs/xfs/scrub/newbt.c2
-rw-r--r--fs/xfs/scrub/orphanage.c9
-rw-r--r--fs/xfs/scrub/reap.c9
-rw-r--r--fs/xfs/scrub/repair.c37
-rw-r--r--fs/xfs/scrub/rtbitmap.c11
-rw-r--r--fs/xfs/scrub/rtrefcount_repair.c34
-rw-r--r--fs/xfs/scrub/rtrmap_repair.c29
-rw-r--r--fs/xfs/scrub/scrub.c2
-rw-r--r--fs/xfs/xfs_aops.c194
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c32
-rw-r--r--fs/xfs/xfs_bmap_util.h12
-rw-r--r--fs/xfs/xfs_buf.c732
-rw-r--r--fs/xfs/xfs_buf.h36
-rw-r--r--fs/xfs/xfs_buf_item.c114
-rw-r--r--fs/xfs/xfs_buf_item_recover.c8
-rw-r--r--fs/xfs/xfs_buf_mem.c45
-rw-r--r--fs/xfs/xfs_buf_mem.h6
-rw-r--r--fs/xfs/xfs_discard.c3
-rw-r--r--fs/xfs/xfs_extent_busy.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c35
-rw-r--r--fs/xfs/xfs_file.c364
-rw-r--r--fs/xfs/xfs_fsmap.c86
-rw-r--r--fs/xfs/xfs_fsops.c67
-rw-r--r--fs/xfs/xfs_fsops.h3
-rw-r--r--fs/xfs/xfs_icache.c6
-rw-r--r--fs/xfs/xfs_inode.c6
-rw-r--r--fs/xfs/xfs_inode.h28
-rw-r--r--fs/xfs/xfs_inode_item.c1
-rw-r--r--fs/xfs/xfs_inode_item_recover.c1
-rw-r--r--fs/xfs/xfs_ioctl.c12
-rw-r--r--fs/xfs/xfs_iomap.c532
-rw-r--r--fs/xfs/xfs_iomap.h7
-rw-r--r--fs/xfs/xfs_iops.c35
-rw-r--r--fs/xfs/xfs_log.c4
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_message.c4
-rw-r--r--fs/xfs/xfs_message.h1
-rw-r--r--fs/xfs/xfs_mount.c217
-rw-r--r--fs/xfs/xfs_mount.h131
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_reflink.c18
-rw-r--r--fs/xfs/xfs_rtalloc.c244
-rw-r--r--fs/xfs/xfs_rtalloc.h5
-rw-r--r--fs/xfs/xfs_super.c168
-rw-r--r--fs/xfs/xfs_sysfs.c75
-rw-r--r--fs/xfs/xfs_sysfs.h5
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h219
-rw-r--r--fs/xfs/xfs_zone_alloc.c1220
-rw-r--r--fs/xfs/xfs_zone_alloc.h70
-rw-r--r--fs/xfs/xfs_zone_gc.c1165
-rw-r--r--fs/xfs/xfs_zone_info.c105
-rw-r--r--fs/xfs/xfs_zone_priv.h119
-rw-r--r--fs/xfs/xfs_zone_space_resv.c263
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--include/acpi/actbl3.h1
-rw-r--r--include/acpi/processor.h5
-rw-r--r--include/asm-generic/io.h6
-rw-r--r--include/asm-generic/iomap.h36
-rw-r--r--include/asm-generic/mshyperv.h72
-rw-r--r--include/asm-generic/rwonce.h10
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-generic/vdso/vsyscall.h27
-rw-r--r--include/asm-generic/vmlinux.lds.h49
-rw-r--r--include/crypto/hkdf.h20
-rw-r--r--include/drm/drm_util.h16
-rw-r--r--include/dt-bindings/clock/rockchip,rk3528-cru.h453
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h5
-rw-r--r--include/dt-bindings/clock/samsung,exynos990.h21
-rw-r--r--include/dt-bindings/clock/xlnx-zynqmp-clk.h7
-rw-r--r--include/dt-bindings/power/allwinner,sun8i-v853-ppu.h10
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h2
-rw-r--r--include/dt-bindings/power/thead,th1520-power.h19
-rw-r--r--include/dt-bindings/reset/rockchip,rk3528-cru.h241
-rw-r--r--include/dt-bindings/soc/samsung,exynos-usi.h17
-rw-r--r--include/dt-bindings/sound/qcom,wcd934x.h16
-rw-r--r--include/hyperv/hvgdk_mini.h83
-rw-r--r--include/hyperv/hvhdk.h132
-rw-r--r--include/hyperv/hvhdk_mini.h91
-rw-r--r--include/kunit/test.h20
-rw-r--r--include/kvm/arm_pmu.h17
-rw-r--r--include/kvm/arm_vgic.h10
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/align.h10
-rw-r--r--include/linux/arm-smccc.h55
-rw-r--r--include/linux/arm_ffa.h22
-rw-r--r--include/linux/avf/virtchnl.h139
-rw-r--r--include/linux/badblocks.h10
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bio-integrity.h25
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bits.h2
-rw-r--r--include/linux/blk-crypto-profile.h73
-rw-r--r--include/linux/blk-crypto.h73
-rw-r--r--include/linux/blk-mq.h27
-rw-r--r--include/linux/blkdev.h23
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cache.h9
-rw-r--r--include/linux/capability.h5
-rw-r--r--include/linux/cfi.h2
-rw-r--r--include/linux/cgroup-defs.h5
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/cleanup.h24
-rw-r--r--include/linux/compaction.h5
-rw-r--r--include/linux/compiler.h28
-rw-r--r--include/linux/compiler_types.h23
-rw-r--r--include/linux/console.h8
-rw-r--r--include/linux/cpu_rmap.h1
-rw-r--r--include/linux/cpufreq.h26
-rw-r--r--include/linux/cpumask.h71
-rw-r--r--include/linux/cpuset.h11
-rw-r--r--include/linux/crc-t10dif.h12
-rw-r--r--include/linux/crc32.h55
-rw-r--r--include/linux/crc32c.h8
-rw-r--r--include/linux/crc64.h38
-rw-r--r--include/linux/crc7.h7
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/damon.h5
-rw-r--r--include/linux/dcache.h46
-rw-r--r--include/linux/device.h128
-rw-r--r--include/linux/device/devres.h129
-rw-r--r--include/linux/dma-direct.h13
-rw-r--r--include/linux/edac.h215
-rw-r--r--include/linux/energy_model.h22
-rw-r--r--include/linux/err.h3
-rw-r--r--include/linux/ethtool.h13
-rw-r--r--include/linux/execmem.h31
-rw-r--r--include/linux/fanotify.h12
-rw-r--r--include/linux/file_ref.h48
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h49
-rw-r--r--include/linux/firmware/thead/thead,th1520-aon.h200
-rw-r--r--include/linux/fs.h72
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/fscrypt.h12
-rw-r--r--include/linux/fsnotify.h41
-rw-r--r--include/linux/fsnotify_backend.h42
-rw-r--r--include/linux/ftrace_regs.h5
-rw-r--r--include/linux/gpio.h4
-rw-r--r--include/linux/gpio/consumer.h80
-rw-r--r--include/linux/gpio/driver.h92
-rw-r--r--include/linux/gpio/regmap.h4
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/hrtimer.h8
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hyperv.h57
-rw-r--r--include/linux/idr.h17
-rw-r--r--include/linux/ieee80211.h12
-rw-r--r--include/linux/if_bridge.h6
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/if_macvlan.h6
-rw-r--r--include/linux/interrupt.h16
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h16
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h16
-rw-r--r--include/linux/io.h5
-rw-r--r--include/linux/iomap.h116
-rw-r--r--include/linux/iommu.h61
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/irqchip/irq-davinci-cp-intc.h25
-rw-r--r--include/linux/irqdomain.h137
-rw-r--r--include/linux/jbd2.h24
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kstrtox.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/lsm_audit.h10
-rw-r--r--include/linux/lsm_hook_defs.h3
-rw-r--r--include/linux/mem_encrypt.h23
-rw-r--r--include/linux/misc_cgroup.h6
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h13
-rw-r--r--include/linux/mlx5/driver.h6
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h64
-rw-r--r--include/linux/mlx5/port.h85
-rw-r--r--include/linux/mm.h34
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mnt_idmapping.h5
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/moduleloader.h4
-rw-r--r--include/linux/msi.h69
-rw-r--r--include/linux/mtd/nand-qpic-common.h13
-rw-r--r--include/linux/mtd/nand.h2
-rw-r--r--include/linux/mtd/spinand.h129
-rw-r--r--include/linux/namei.h45
-rw-r--r--include/linux/net/intel/iidc.h2
-rw-r--r--include/linux/netdev_features.h8
-rw-r--r--include/linux/netdevice.h179
-rw-r--r--include/linux/netpoll.h7
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h4
-rw-r--r--include/linux/nodemask.h8
-rw-r--r--include/linux/nodemask_types.h11
-rw-r--r--include/linux/numa.h17
-rw-r--r--include/linux/nvme-auth.h7
-rw-r--r--include/linux/nvme-keyring.h12
-rw-r--r--include/linux/nvme.h7
-rw-r--r--include/linux/objpool.h7
-rw-r--r--include/linux/objtool.h4
-rw-r--r--include/linux/of.h18
-rw-r--r--include/linux/page-flags.h18
-rw-r--r--include/linux/pagemap.h57
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pcs/pcs-xpcs.h3
-rw-r--r--include/linux/percpu-defs.h17
-rw-r--r--include/linux/percpu-rwsem.h8
-rw-r--r--include/linux/perf/arm_pmu.h17
-rw-r--r--include/linux/perf_event.h102
-rw-r--r--include/linux/phy.h265
-rw-r--r--include/linux/phylink.h49
-rw-r--r--include/linux/pid.h7
-rw-r--r--include/linux/pidfs.h1
-rw-r--r--include/linux/pipe_fs_i.h90
-rw-r--r--include/linux/platform_data/huawei-gaokun-ec.h79
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h94
-rw-r--r--include/linux/platform_profile.h5
-rw-r--r--include/linux/pm.h9
-rw-r--r--include/linux/pm_clock.h5
-rw-r--r--include/linux/pm_domain.h7
-rw-r--r--include/linux/pm_runtime.h33
-rw-r--r--include/linux/pm_wakeup.h6
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/posix-clock.h6
-rw-r--r--include/linux/posix-timers.h30
-rw-r--r--include/linux/ppp_channel.h3
-rw-r--r--include/linux/preempt.h3
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/proc_fs.h7
-rw-r--r--include/linux/pwm.h4
-rw-r--r--include/linux/qed/qed_ll2_if.h2
-rw-r--r--include/linux/rcupdate.h58
-rw-r--r--include/linux/rcupdate_wait.h3
-rw-r--r--include/linux/rcutiny.h36
-rw-r--r--include/linux/rcutree.h5
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/resctrl.h212
-rw-r--r--include/linux/resctrl_types.h54
-rw-r--r--include/linux/rv.h4
-rw-r--r--include/linux/sched.h25
-rw-r--r--include/linux/sched/deadline.h4
-rw-r--r--include/linux/sched/debug.h2
-rw-r--r--include/linux/sched/ext.h1
-rw-r--r--include/linux/sched/idle.h23
-rw-r--r--include/linux/sched/mm.h7
-rw-r--r--include/linux/sched/signal.h3
-rw-r--r--include/linux/sched/topology.h14
-rw-r--r--include/linux/sctp.h2
-rw-r--r--include/linux/seccomp.h12
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/sizes.h8
-rw-r--r--include/linux/skbuff.h50
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/soc/apple/rtkit.h2
-rw-r--r--include/linux/spi/offload/consumer.h39
-rw-r--r--include/linux/spi/offload/provider.h47
-rw-r--r--include/linux/spi/offload/types.h100
-rw-r--r--include/linux/spi/spi.h56
-rw-r--r--include/linux/srcu.h102
-rw-r--r--include/linux/srcutiny.h29
-rw-r--r--include/linux/srcutree.h98
-rw-r--r--include/linux/stmmac.h15
-rw-r--r--include/linux/string.h16
-rw-r--r--include/linux/string_choices.h24
-rw-r--r--include/linux/swap.h9
-rw-r--r--include/linux/swap_cgroup.h4
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysv_fs.h214
-rw-r--r--include/linux/tcp.h5
-rw-r--r--include/linux/thread_info.h48
-rw-r--r--include/linux/time_namespace.h2
-rw-r--r--include/linux/topology.h53
-rw-r--r--include/linux/torture.h1
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/linux/trace_events.h18
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/ucopysize.h63
-rw-r--r--include/linux/uidgid.h6
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/unroll.h44
-rw-r--r--include/linux/uprobes.h3
-rw-r--r--include/linux/usb/mctp-usb.h30
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/util_macros.h15
-rw-r--r--include/linux/vdso_datastore.h10
-rw-r--r--include/linux/vfsdebug.h45
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmcore_info.h3
-rw-r--r--include/linux/vmstat.h11
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/zstd.h87
-rw-r--r--include/linux/zstd_errors.h30
-rw-r--r--include/linux/zstd_lib.h1123
-rw-r--r--include/media/rc-core.h46
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/tuner-types.h6
-rw-r--r--include/media/v4l2-common.h19
-rw-r--r--include/media/v4l2-ctrls.h50
-rw-r--r--include/media/v4l2-dv-timings.h1
-rw-r--r--include/media/v4l2-ioctl.h12
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-subdev.h4
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_unix.h81
-rw-r--r--include/net/ax25.h1
-rw-r--r--include/net/bluetooth/bluetooth.h1
-rw-r--r--include/net/bluetooth/hci.h36
-rw-r--r--include/net/bluetooth/hci_core.h135
-rw-r--r--include/net/bluetooth/l2cap.h7
-rw-r--r--include/net/bluetooth/mgmt.h1
-rw-r--r--include/net/bonding.h1
-rw-r--r--include/net/busy_poll.h21
-rw-r--r--include/net/cfg80211.h88
-rw-r--r--include/net/dropreason-core.h9
-rw-r--r--include/net/dropreason.h6
-rw-r--r--include/net/dst_metadata.h7
-rw-r--r--include/net/fib_rules.h27
-rw-r--r--include/net/gro.h38
-rw-r--r--include/net/hotdata.h1
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_connection_sock.h33
-rw-r--r--include/net/inet_frag.h6
-rw-r--r--include/net/inet_hashtables.h11
-rw-r--r--include/net/ip.h26
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_tunnels.h12
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/ipv6_frag.h5
-rw-r--r--include/net/libeth/rx.h47
-rw-r--r--include/net/lwtunnel.h12
-rw-r--r--include/net/mac80211.h38
-rw-r--r--include/net/mana/gdma.h11
-rw-r--r--include/net/mctp.h2
-rw-r--r--include/net/mptcp.h19
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netdev_lock.h101
-rw-r--r--include/net/netdev_netlink.h12
-rw-r--r--include/net/netdev_queues.h5
-rw-r--r--include/net/netdev_rx_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h4
-rw-r--r--include/net/netfilter/nft_fib.h21
-rw-r--r--include/net/netlink.h15
-rw-r--r--include/net/netns/ipv4.h4
-rw-r--r--include/net/rps.h2
-rw-r--r--include/net/rtnetlink.h40
-rw-r--r--include/net/sctp/checksum.h7
-rw-r--r--include/net/sock.h35
-rw-r--r--include/net/tcp.h139
-rw-r--r--include/net/xdp.h1
-rw-r--r--include/net/xdp_sock.h10
-rw-r--r--include/net/xdp_sock_drv.h44
-rw-r--r--include/net/xfrm.h21
-rw-r--r--include/net/xsk_buff_pool.h8
-rw-r--r--include/scsi/libiscsi_tcp.h16
-rw-r--r--include/scsi/scsi_device.h9
-rw-r--r--include/scsi/scsi_proto.h4
-rw-r--r--include/soc/qcom/ice.h3
-rw-r--r--include/soc/rockchip/rockchip_sip.h3
-rw-r--r--include/soc/tegra/bpmp-abi.h2
-rw-r--r--include/sound/hda-mlink.h25
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/pcm_drm_eld.h91
-rw-r--r--include/sound/sdca.h22
-rw-r--r--include/sound/sdca_function.h1179
-rw-r--r--include/sound/sdca_regmap.h31
-rw-r--r--include/sound/simple_card_utils.h8
-rw-r--r--include/sound/soc-dai.h8
-rw-r--r--include/sound/soc-dapm.h61
-rw-r--r--include/sound/soc-dpcm.h8
-rw-r--r--include/sound/soc.h138
-rw-r--r--include/sound/sof/ipc4/header.h13
-rw-r--r--include/sound/tas2781-dsp.h30
-rw-r--r--include/sound/tas2781.h33
-rw-r--r--include/sound/wm8904.h3
-rw-r--r--include/trace/define_trace.h7
-rw-r--r--include/trace/events/afs.h83
-rw-r--r--include/trace/events/erofs.h2
-rw-r--r--include/trace/events/mmflags.h41
-rw-r--r--include/trace/events/osnoise.h96
-rw-r--r--include/trace/events/power.h37
-rw-r--r--include/trace/events/rcu.h36
-rw-r--r--include/trace/events/sched.h15
-rw-r--r--include/trace/events/sched_ext.h19
-rw-r--r--include/trace/events/scsi.h4
-rw-r--r--include/trace/events/target.h4
-rw-r--r--include/trace/events/tcp.h6
-rw-r--r--include/uapi/asm-generic/mman-common.h1
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/batman_adv.h18
-rw-r--r--include/uapi/linux/bits.h8
-rw-r--r--include/uapi/linux/blk-crypto.h44
-rw-r--r--include/uapi/linux/bpf.h30
-rw-r--r--include/uapi/linux/btrfs.h16
-rw-r--r--include/uapi/linux/can.h3
-rw-r--r--include/uapi/linux/const.h2
-rw-r--r--include/uapi/linux/elf.h137
-rw-r--r--include/uapi/linux/errqueue.h1
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/f2fs.h7
-rw-r--r--include/uapi/linux/fanotify.h10
-rw-r--r--include/uapi/linux/fib_rules.h3
-rw-r--r--include/uapi/linux/firewire-cdev.h3
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/if_cablemodem.h23
-rw-r--r--include/uapi/linux/if_link.h7
-rw-r--r--include/uapi/linux/if_xdp.h10
-rw-r--r--include/uapi/linux/io_uring.h2
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/landlock.h35
-rw-r--r--include/uapi/linux/mount.h10
-rw-r--r--include/uapi/linux/mshv.h291
-rw-r--r--include/uapi/linux/net_tstamp.h6
-rw-r--r--include/uapi/linux/netdev.h9
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h3
-rw-r--r--include/uapi/linux/nl80211.h72
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/pidfd.h31
-rw-r--r--include/uapi/linux/prctl.h11
-rw-r--r--include/uapi/linux/rkisp1-config.h2
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--include/uapi/linux/snmp.h13
-rw-r--r--include/uapi/linux/stddef.h6
-rw-r--r--include/uapi/linux/tcp.h12
-rw-r--r--include/uapi/linux/ublk_cmd.h7
-rw-r--r--include/uapi/linux/usb/ch9.h1
-rw-r--r--include/uapi/linux/usb/video.h1
-rw-r--r--include/uapi/linux/uvcvideo.h13
-rw-r--r--include/uapi/linux/v4l2-controls.h7
-rw-r--r--include/uapi/linux/videodev2.h5
-rw-r--r--include/uapi/linux/virtio_net.h13
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h276
-rw-r--r--include/uapi/sound/intel/avs/tokens.h11
-rw-r--r--include/ufs/ufs.h1
-rw-r--r--include/ufs/ufshcd.h22
-rw-r--r--include/vdso/align.h15
-rw-r--r--include/vdso/cache.h15
-rw-r--r--include/vdso/datapage.h120
-rw-r--r--include/vdso/helpers.h24
-rw-r--r--include/xen/interface/xen-mca.h2
-rw-r--r--init/.kunitconfig3
-rw-r--r--init/Kconfig22
-rw-r--r--init/Makefile1
-rw-r--r--init/initramfs.c66
-rw-r--r--init/initramfs_internal.h8
-rw-r--r--init/initramfs_test.c407
-rw-r--r--init/main.c1
-rw-r--r--io_uring/io_uring.c55
-rw-r--r--io_uring/io_uring.h1
-rw-r--r--io_uring/kbuf.c56
-rw-r--r--io_uring/napi.c4
-rw-r--r--io_uring/net.c5
-rw-r--r--io_uring/rsrc.c4
-rw-r--r--io_uring/rw.c7
-rw-r--r--io_uring/timeout.c13
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/audit_watch.c12
-rw-r--r--kernel/auditsc.c12
-rw-r--r--kernel/bpf/btf.c1
-rw-r--r--kernel/bpf/cpumap.c146
-rw-r--r--kernel/bpf/helpers.c3
-rw-r--r--kernel/bpf/inode.c8
-rw-r--r--kernel/bpf/offload.c11
-rw-r--r--kernel/bpf/verifier.c4
-rw-r--r--kernel/capability.c16
-rw-r--r--kernel/cfi.c4
-rw-r--r--kernel/cgroup/cgroup-internal.h1
-rw-r--r--kernel/cgroup/cgroup-v1.c7
-rw-r--r--kernel/cgroup/cgroup.c6
-rw-r--r--kernel/cgroup/cpuset-v1.c49
-rw-r--r--kernel/cgroup/cpuset.c79
-rw-r--r--kernel/cgroup/legacy_freezer.c6
-rw-r--r--kernel/cgroup/misc.c16
-rw-r--r--kernel/cgroup/rstat.c116
-rw-r--r--kernel/configs/hardening.config2
-rw-r--r--kernel/context_tracking.c9
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/crash_core.c2
-rw-r--r--kernel/dma/direct.c28
-rw-r--r--kernel/entry/Makefile3
-rw-r--r--kernel/entry/common.c2
-rw-r--r--kernel/events/callchain.c38
-rw-r--r--kernel/events/core.c1097
-rw-r--r--kernel/events/hw_breakpoint.c5
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/events/uprobes.c12
-rw-r--r--kernel/exit.c64
-rw-r--r--kernel/fork.c25
-rw-r--r--kernel/futex/core.c21
-rw-r--r--kernel/iomem.c5
-rw-r--r--kernel/irq/Kconfig5
-rw-r--r--kernel/irq/chip.c77
-rw-r--r--kernel/irq/internals.h11
-rw-r--r--kernel/irq/irqdesc.c2
-rw-r--r--kernel/irq/irqdomain.c5
-rw-r--r--kernel/irq/manage.c7
-rw-r--r--kernel/irq/migration.c20
-rw-r--r--kernel/irq/msi.c21
-rw-r--r--kernel/kallsyms.c12
-rw-r--r--kernel/kcmp.c2
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/livepatch/core.c9
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/locking/lock_events_list.h28
-rw-r--r--kernel/locking/lockdep.c17
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rtmutex.c29
-rw-r--r--kernel/locking/rtmutex_common.h4
-rw-r--r--kernel/locking/semaphore.c13
-rw-r--r--kernel/module/main.c81
-rw-r--r--kernel/module/strict_rwx.c9
-rw-r--r--kernel/padata.c2
-rw-r--r--kernel/pid.c106
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/Kconfig3
-rw-r--r--kernel/power/energy_model.c67
-rw-r--r--kernel/power/hibernate.c22
-rw-r--r--kernel/power/snapshot.c16
-rw-r--r--kernel/power/suspend.c22
-rw-r--r--kernel/printk/internal.h1
-rw-r--r--kernel/printk/printk.c59
-rw-r--r--kernel/printk/printk_ringbuffer.c13
-rw-r--r--kernel/rcu/Kconfig35
-rw-r--r--kernel/rcu/Kconfig.debug18
-rw-r--r--kernel/rcu/rcu.h13
-rw-r--r--kernel/rcu/rcutorture.c124
-rw-r--r--kernel/rcu/refscale.c32
-rw-r--r--kernel/rcu/srcutiny.c20
-rw-r--r--kernel/rcu/srcutree.c207
-rw-r--r--kernel/rcu/tasks.h5
-rw-r--r--kernel/rcu/tiny.c29
-rw-r--r--kernel/rcu/tree.c73
-rw-r--r--kernel/rcu/tree_exp.h6
-rw-r--r--kernel/rcu/tree_nocb.h20
-rw-r--r--kernel/rcu/tree_plugin.h22
-rw-r--r--kernel/reboot.c1
-rw-r--r--kernel/rseq.c140
-rw-r--r--kernel/sched/Makefile5
-rw-r--r--kernel/sched/build_policy.c1
-rw-r--r--kernel/sched/build_utility.c4
-rw-r--r--kernel/sched/core.c172
-rw-r--r--kernel/sched/core_sched.c2
-rw-r--r--kernel/sched/cputime.c8
-rw-r--r--kernel/sched/deadline.c59
-rw-r--r--kernel/sched/debug.c18
-rw-r--r--kernel/sched/ext.c1084
-rw-r--r--kernel/sched/ext.h10
-rw-r--r--kernel/sched/ext_idle.c1171
-rw-r--r--kernel/sched/ext_idle.h35
-rw-r--r--kernel/sched/fair.c145
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/sched/sched.h132
-rw-r--r--kernel/sched/stats.h2
-rw-r--r--kernel/sched/syscalls.c12
-rw-r--r--kernel/sched/topology.c45
-rw-r--r--kernel/seccomp.c49
-rw-r--r--kernel/signal.c110
-rw-r--r--kernel/static_call_inline.c2
-rw-r--r--kernel/stop_machine.c1
-rw-r--r--kernel/sys.c19
-rw-r--r--kernel/sysctl.c341
-rw-r--r--kernel/time/Makefile6
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/hrtimer.c34
-rw-r--r--kernel/time/namespace.c24
-rw-r--r--kernel/time/ntp.c3
-rw-r--r--kernel/time/posix-clock.c27
-rw-r--r--kernel/time/posix-timers.c558
-rw-r--r--kernel/time/sched_clock.c3
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c3
-rw-r--r--kernel/time/tick-sched.c6
-rw-r--r--kernel/time/timekeeping.c94
-rw-r--r--kernel/time/timer_list.c4
-rw-r--r--kernel/time/vsyscall.c66
-rw-r--r--kernel/torture.c12
-rw-r--r--kernel/trace/Kconfig12
-rw-r--r--kernel/trace/bpf_trace.c21
-rw-r--r--kernel/trace/fgraph.c2
-rw-r--r--kernel/trace/ftrace.c57
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/rv/Kconfig7
-rw-r--r--kernel/trace/rv/Makefile7
-rw-r--r--kernel/trace/rv/monitors/sched/Kconfig11
-rw-r--r--kernel/trace/rv/monitors/sched/sched.c38
-rw-r--r--kernel/trace/rv/monitors/sched/sched.h3
-rw-r--r--kernel/trace/rv/monitors/sco/Kconfig14
-rw-r--r--kernel/trace/rv/monitors/sco/sco.c88
-rw-r--r--kernel/trace/rv/monitors/sco/sco.h47
-rw-r--r--kernel/trace/rv/monitors/sco/sco_trace.h15
-rw-r--r--kernel/trace/rv/monitors/scpd/Kconfig15
-rw-r--r--kernel/trace/rv/monitors/scpd/scpd.c96
-rw-r--r--kernel/trace/rv/monitors/scpd/scpd.h49
-rw-r--r--kernel/trace/rv/monitors/scpd/scpd_trace.h15
-rw-r--r--kernel/trace/rv/monitors/sncid/Kconfig15
-rw-r--r--kernel/trace/rv/monitors/sncid/sncid.c96
-rw-r--r--kernel/trace/rv/monitors/sncid/sncid.h49
-rw-r--r--kernel/trace/rv/monitors/sncid/sncid_trace.h15
-rw-r--r--kernel/trace/rv/monitors/snep/Kconfig15
-rw-r--r--kernel/trace/rv/monitors/snep/snep.c96
-rw-r--r--kernel/trace/rv/monitors/snep/snep.h49
-rw-r--r--kernel/trace/rv/monitors/snep/snep_trace.h15
-rw-r--r--kernel/trace/rv/monitors/snroc/Kconfig14
-rw-r--r--kernel/trace/rv/monitors/snroc/snroc.c85
-rw-r--r--kernel/trace/rv/monitors/snroc/snroc.h47
-rw-r--r--kernel/trace/rv/monitors/snroc/snroc_trace.h15
-rw-r--r--kernel/trace/rv/monitors/tss/Kconfig14
-rw-r--r--kernel/trace/rv/monitors/tss/tss.c91
-rw-r--r--kernel/trace/rv/monitors/tss/tss.h47
-rw-r--r--kernel/trace/rv/monitors/tss/tss_trace.h15
-rw-r--r--kernel/trace/rv/monitors/wip/Kconfig2
-rw-r--r--kernel/trace/rv/monitors/wip/wip.c2
-rw-r--r--kernel/trace/rv/monitors/wip/wip.h1
-rw-r--r--kernel/trace/rv/monitors/wwnr/Kconfig2
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.c2
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.h1
-rw-r--r--kernel/trace/rv/rv.c154
-rw-r--r--kernel/trace/rv/rv.h4
-rw-r--r--kernel/trace/rv/rv_reactors.c28
-rw-r--r--kernel/trace/rv/rv_trace.h6
-rw-r--r--kernel/trace/trace.c32
-rw-r--r--kernel/trace/trace.h9
-rw-r--r--kernel/trace/trace_entries.h12
-rw-r--r--kernel/trace/trace_eprobe.c8
-rw-r--r--kernel/trace/trace_event_perf.c4
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_hist.c44
-rw-r--r--kernel/trace/trace_events_synth.c40
-rw-r--r--kernel/trace/trace_events_trigger.c38
-rw-r--r--kernel/trace/trace_events_user.c7
-rw-r--r--kernel/trace/trace_fprobe.c55
-rw-r--r--kernel/trace/trace_functions.c46
-rw-r--r--kernel/trace/trace_functions_graph.c176
-rw-r--r--kernel/trace/trace_irqsoff.c14
-rw-r--r--kernel/trace/trace_kprobe.c5
-rw-r--r--kernel/trace/trace_osnoise.c94
-rw-r--r--kernel/trace/trace_output.c122
-rw-r--r--kernel/trace/trace_output.h9
-rw-r--r--kernel/trace/trace_probe.c28
-rw-r--r--kernel/trace/trace_probe.h6
-rw-r--r--kernel/trace/trace_sched_wakeup.c6
-rw-r--r--kernel/trace/trace_uprobe.c9
-rw-r--r--kernel/tracepoint.c2
-rw-r--r--kernel/user_namespace.c26
-rw-r--r--kernel/watch_queue.c16
-rw-r--r--kernel/watchdog.c28
-rw-r--r--kernel/watchdog_perf.c29
-rw-r--r--lib/Kconfig45
-rw-r--r--lib/Kconfig.debug139
-rw-r--r--lib/Kconfig.ubsan25
-rw-r--r--lib/Makefile48
-rw-r--r--lib/atomic64_test.c2
-rw-r--r--lib/cpu_rmap.c2
-rw-r--r--lib/cpumask.c37
-rw-r--r--lib/crc32.c21
-rw-r--r--lib/crc64-rocksoft.c126
-rw-r--r--lib/crc64.c49
-rw-r--r--lib/crc7.c6
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/dynamic_queue_limits.c2
-rw-r--r--lib/gen_crc64table.c10
-rw-r--r--lib/iomap.c40
-rw-r--r--lib/iov_iter.c8
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/prime_numbers.c91
-rw-r--r--lib/math/prime_numbers_private.h16
-rw-r--r--lib/math/tests/Makefile8
-rw-r--r--lib/math/tests/gcd_kunit.c56
-rw-r--r--lib/math/tests/int_log_kunit.c74
-rw-r--r--lib/math/tests/prime_numbers_kunit.c59
-rw-r--r--lib/math/tests/rational_kunit.c (renamed from lib/math/rational-test.c)0
-rw-r--r--lib/net_utils.c4
-rw-r--r--lib/test_bitmap.c28
-rw-r--r--lib/test_objpool.c3
-rw-r--r--lib/test_ubsan.c18
-rw-r--r--lib/tests/Makefile48
-rw-r--r--lib/tests/bitfield_kunit.c (renamed from lib/bitfield_kunit.c)0
-rw-r--r--lib/tests/blackhole_dev_kunit.c (renamed from lib/test_blackhole_dev.c)47
-rw-r--r--lib/tests/checksum_kunit.c (renamed from lib/checksum_kunit.c)0
-rw-r--r--lib/tests/cmdline_kunit.c (renamed from lib/cmdline_kunit.c)0
-rw-r--r--lib/tests/cpumask_kunit.c (renamed from lib/cpumask_kunit.c)0
-rw-r--r--lib/tests/crc_kunit.c (renamed from lib/crc_kunit.c)68
-rw-r--r--lib/tests/fortify_kunit.c (renamed from lib/fortify_kunit.c)156
-rw-r--r--lib/tests/hashtable_test.c (renamed from lib/hashtable_test.c)0
-rw-r--r--lib/tests/is_signed_type_kunit.c (renamed from lib/is_signed_type_kunit.c)0
-rw-r--r--lib/tests/kfifo_kunit.c224
-rw-r--r--lib/tests/kunit_iov_iter.c (renamed from lib/kunit_iov_iter.c)0
-rw-r--r--lib/tests/list-test.c (renamed from lib/list-test.c)0
-rw-r--r--lib/tests/longest_symbol_kunit.c82
-rw-r--r--lib/tests/memcpy_kunit.c (renamed from lib/memcpy_kunit.c)0
-rw-r--r--lib/tests/overflow_kunit.c (renamed from lib/overflow_kunit.c)38
-rw-r--r--lib/tests/printf_kunit.c (renamed from lib/test_printf.c)442
-rw-r--r--lib/tests/scanf_kunit.c (renamed from lib/test_scanf.c)295
-rw-r--r--lib/tests/siphash_kunit.c (renamed from lib/siphash_kunit.c)0
-rw-r--r--lib/tests/slub_kunit.c (renamed from lib/slub_kunit.c)59
-rw-r--r--lib/tests/stackinit_kunit.c (renamed from lib/stackinit_kunit.c)30
-rw-r--r--lib/tests/string_helpers_kunit.c (renamed from lib/string_helpers_kunit.c)0
-rw-r--r--lib/tests/string_kunit.c (renamed from lib/string_kunit.c)4
-rw-r--r--lib/tests/test_bits.c (renamed from lib/test_bits.c)0
-rw-r--r--lib/tests/test_fprobe.c (renamed from lib/test_fprobe.c)0
-rw-r--r--lib/tests/test_hash.c (renamed from lib/test_hash.c)0
-rw-r--r--lib/tests/test_kprobes.c (renamed from lib/test_kprobes.c)0
-rw-r--r--lib/tests/test_linear_ranges.c (renamed from lib/test_linear_ranges.c)0
-rw-r--r--lib/tests/test_list_sort.c (renamed from lib/test_list_sort.c)0
-rw-r--r--lib/tests/test_sort.c (renamed from lib/test_sort.c)0
-rw-r--r--lib/tests/usercopy_kunit.c (renamed from lib/usercopy_kunit.c)0
-rw-r--r--lib/tests/util_macros_kunit.c (renamed from lib/util_macros_kunit.c)0
-rw-r--r--lib/ubsan.c28
-rw-r--r--lib/ubsan.h8
-rw-r--r--lib/vdso/Kconfig5
-rw-r--r--lib/vdso/Makefile19
-rw-r--r--lib/vdso/Makefile.include18
-rw-r--r--lib/vdso/datastore.c129
-rw-r--r--lib/vdso/getrandom.c8
-rw-r--r--lib/vdso/gettimeofday.c196
-rw-r--r--lib/vsprintf.c7
-rw-r--r--lib/zstd/Makefile3
-rw-r--r--lib/zstd/common/allocations.h56
-rw-r--r--lib/zstd/common/bits.h150
-rw-r--r--lib/zstd/common/bitstream.h155
-rw-r--r--lib/zstd/common/compiler.h151
-rw-r--r--lib/zstd/common/cpu.h3
-rw-r--r--lib/zstd/common/debug.c9
-rw-r--r--lib/zstd/common/debug.h37
-rw-r--r--lib/zstd/common/entropy_common.c42
-rw-r--r--lib/zstd/common/error_private.c13
-rw-r--r--lib/zstd/common/error_private.h88
-rw-r--r--lib/zstd/common/fse.h103
-rw-r--r--lib/zstd/common/fse_decompress.c132
-rw-r--r--lib/zstd/common/huf.h240
-rw-r--r--lib/zstd/common/mem.h3
-rw-r--r--lib/zstd/common/portability_macros.h47
-rw-r--r--lib/zstd/common/zstd_common.c38
-rw-r--r--lib/zstd/common/zstd_deps.h16
-rw-r--r--lib/zstd/common/zstd_internal.h153
-rw-r--r--lib/zstd/compress/clevels.h3
-rw-r--r--lib/zstd/compress/fse_compress.c74
-rw-r--r--lib/zstd/compress/hist.c13
-rw-r--r--lib/zstd/compress/hist.h10
-rw-r--r--lib/zstd/compress/huf_compress.c441
-rw-r--r--lib/zstd/compress/zstd_compress.c3293
-rw-r--r--lib/zstd/compress/zstd_compress_internal.h621
-rw-r--r--lib/zstd/compress/zstd_compress_literals.c157
-rw-r--r--lib/zstd/compress/zstd_compress_literals.h25
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.c21
-rw-r--r--lib/zstd/compress/zstd_compress_sequences.h16
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c394
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.h3
-rw-r--r--lib/zstd/compress/zstd_cwksp.h222
-rw-r--r--lib/zstd/compress/zstd_double_fast.c245
-rw-r--r--lib/zstd/compress/zstd_double_fast.h27
-rw-r--r--lib/zstd/compress/zstd_fast.c703
-rw-r--r--lib/zstd/compress/zstd_fast.h16
-rw-r--r--lib/zstd/compress/zstd_lazy.c840
-rw-r--r--lib/zstd/compress/zstd_lazy.h195
-rw-r--r--lib/zstd/compress/zstd_ldm.c102
-rw-r--r--lib/zstd/compress/zstd_ldm.h17
-rw-r--r--lib/zstd/compress/zstd_ldm_geartab.h3
-rw-r--r--lib/zstd/compress/zstd_opt.c571
-rw-r--r--lib/zstd/compress/zstd_opt.h55
-rw-r--r--lib/zstd/compress/zstd_preSplit.c239
-rw-r--r--lib/zstd/compress/zstd_preSplit.h34
-rw-r--r--lib/zstd/decompress/huf_decompress.c887
-rw-r--r--lib/zstd/decompress/zstd_ddict.c9
-rw-r--r--lib/zstd/decompress/zstd_ddict.h3
-rw-r--r--lib/zstd/decompress/zstd_decompress.c375
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.c724
-rw-r--r--lib/zstd/decompress/zstd_decompress_block.h10
-rw-r--r--lib/zstd/decompress/zstd_decompress_internal.h19
-rw-r--r--lib/zstd/decompress_sources.h2
-rw-r--r--lib/zstd/zstd_common_module.c5
-rw-r--r--lib/zstd/zstd_compress_module.c75
-rw-r--r--lib/zstd/zstd_decompress_module.c4
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/compaction.c3
-rw-r--r--mm/damon/core.c7
-rw-r--r--mm/damon/paddr.c3
-rw-r--r--mm/execmem.c39
-rw-r--r--mm/filemap.c153
-rw-r--r--mm/folio-compat.c14
-rw-r--r--mm/gup.c6
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/internal.h15
-rw-r--r--mm/kmsan/hooks.c1
-rw-r--r--mm/memcontrol-v1.c6
-rw-r--r--mm/memcontrol.c13
-rw-r--r--mm/memory-failure.c63
-rw-r--r--mm/memory.c42
-rw-r--r--mm/memory_hotplug.c26
-rw-r--r--mm/mempolicy.c31
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/mmap.c54
-rw-r--r--mm/nommu.c22
-rw-r--r--mm/page_alloc.c18
-rw-r--r--mm/page_isolation.c10
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/readahead.c14
-rw-r--r--mm/shmem.c47
-rw-r--r--mm/slab.h34
-rw-r--r--mm/slab_common.c62
-rw-r--r--mm/slub.c336
-rw-r--r--mm/swap.c16
-rw-r--r--mm/swap.h1
-rw-r--r--mm/swap_cgroup.c7
-rw-r--r--mm/swapfile.c12
-rw-r--r--mm/usercopy.c18
-rw-r--r--mm/userfaultfd.c107
-rw-r--r--mm/util.c232
-rw-r--r--mm/vma.c15
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c23
-rw-r--r--mm/vmstat.c46
-rw-r--r--mm/zswap.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c36
-rw-r--r--net/8021q/vlan_netlink.c9
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/ax25/af_ax25.c30
-rw-r--r--net/ax25/ax25_route.c74
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c8
-rw-r--r--net/batman-adv/bat_iv_ogm.c108
-rw-r--r--net/batman-adv/bat_v.c28
-rw-r--r--net/batman-adv/bat_v_elp.c16
-rw-r--r--net/batman-adv/bat_v_ogm.c45
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c106
-rw-r--r--net/batman-adv/distributed-arp-table.c68
-rw-r--r--net/batman-adv/distributed-arp-table.h4
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/gateway_client.c38
-rw-r--r--net/batman-adv/gateway_common.c8
-rw-r--r--net/batman-adv/hard-interface.c158
-rw-r--r--net/batman-adv/hard-interface.h12
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/batman-adv/log.h10
-rw-r--r--net/batman-adv/main.c42
-rw-r--r--net/batman-adv/main.h24
-rw-r--r--net/batman-adv/mesh-interface.c (renamed from net/batman-adv/soft-interface.c)206
-rw-r--r--net/batman-adv/mesh-interface.h (renamed from net/batman-adv/soft-interface.h)22
-rw-r--r--net/batman-adv/multicast.c182
-rw-r--r--net/batman-adv/multicast_forw.c30
-rw-r--r--net/batman-adv/netlink.c180
-rw-r--r--net/batman-adv/netlink.h2
-rw-r--r--net/batman-adv/network-coding.c64
-rw-r--r--net/batman-adv/originator.c58
-rw-r--r--net/batman-adv/routing.c42
-rw-r--r--net/batman-adv/send.c36
-rw-r--r--net/batman-adv/send.h4
-rw-r--r--net/batman-adv/tp_meter.c30
-rw-r--r--net/batman-adv/trace.h2
-rw-r--r--net/batman-adv/translation-table.c198
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/tvlv.c26
-rw-r--r--net/batman-adv/types.h78
-rw-r--r--net/bluetooth/6lowpan.c10
-rw-r--r--net/bluetooth/coredump.c28
-rw-r--r--net/bluetooth/hci_conn.c122
-rw-r--r--net/bluetooth/hci_core.c87
-rw-r--r--net/bluetooth/hci_event.c69
-rw-r--r--net/bluetooth/hci_sync.c32
-rw-r--r--net/bluetooth/iso.c30
-rw-r--r--net/bluetooth/l2cap_core.c57
-rw-r--r--net/bluetooth/l2cap_sock.c15
-rw-r--r--net/bluetooth/mgmt.c57
-rw-r--r--net/bluetooth/mgmt_util.c17
-rw-r--r--net/bluetooth/mgmt_util.h4
-rw-r--r--net/bluetooth/rfcomm/core.c6
-rw-r--r--net/bluetooth/sco.c44
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_ioctl.c36
-rw-r--r--net/bridge/br_netlink.c6
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/caif/chnl_net.c5
-rw-r--r--net/can/af_can.c14
-rw-r--r--net/can/af_can.h12
-rw-r--r--net/can/bcm.c21
-rw-r--r--net/can/isotp.c11
-rw-r--r--net/can/j1939/bus.c4
-rw-r--r--net/can/j1939/transport.c8
-rw-r--r--net/can/proc.c46
-rw-r--r--net/can/raw.c7
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c619
-rw-r--r--net/core/dev.h32
-rw-r--r--net/core/dev_api.c335
-rw-r--r--net/core/dev_ioctl.c87
-rw-r--r--net/core/devmem.c5
-rw-r--r--net/core/dst.c6
-rw-r--r--net/core/fib_rules.c221
-rw-r--r--net/core/filter.c125
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/gro.c103
-rw-r--r--net/core/hotdata.c1
-rw-r--r--net/core/lwtunnel.c88
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/net-procfs.c28
-rw-r--r--net/core/net-sysfs.c24
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netdev-genl-gen.c4
-rw-r--r--net/core/netdev-genl-gen.h6
-rw-r--r--net/core/netdev-genl.c76
-rw-r--r--net/core/netdev_rx_queue.c51
-rw-r--r--net/core/netpoll.c76
-rw-r--r--net/core/page_pool.c22
-rw-r--r--net/core/page_pool_user.c2
-rw-r--r--net/core/pktgen.c344
-rw-r--r--net/core/rtnetlink.c97
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/selftests.c4
-rw-r--r--net/core/skbuff.c117
-rw-r--r--net/core/sock.c41
-rw-r--r--net/core/xdp.c10
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c13
-rw-r--r--net/dccp/output.c5
-rw-r--r--net/dccp/timer.c8
-rw-r--r--net/devlink/core.c2
-rw-r--r--net/dsa/conduit.c17
-rw-r--r--net/dsa/user.c27
-rw-r--r--net/ethtool/cabletest.c29
-rw-r--r--net/ethtool/cmis_fw_update.c8
-rw-r--r--net/ethtool/common.c3
-rw-r--r--net/ethtool/common.h7
-rw-r--r--net/ethtool/features.c8
-rw-r--r--net/ethtool/ioctl.c31
-rw-r--r--net/ethtool/linkstate.c2
-rw-r--r--net/ethtool/module.c9
-rw-r--r--net/ethtool/netlink.c19
-rw-r--r--net/ethtool/netlink.h5
-rw-r--r--net/ethtool/phy.c23
-rw-r--r--net/ethtool/plca.c6
-rw-r--r--net/ethtool/pse-pd.c4
-rw-r--r--net/ethtool/rss.c4
-rw-r--r--net/ethtool/stats.c2
-rw-r--r--net/ethtool/strset.c2
-rw-r--r--net/ethtool/tsinfo.c13
-rw-r--r--net/hsr/Kconfig18
-rw-r--r--net/hsr/Makefile2
-rw-r--r--net/hsr/hsr_device.c4
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/hsr/hsr_framereg.c99
-rw-r--r--net/hsr/hsr_framereg.h8
-rw-r--r--net/hsr/hsr_main.h2
-rw-r--r--net/hsr/hsr_netlink.c12
-rw-r--r--net/hsr/prp_dup_discard_test.c212
-rw-r--r--net/ieee802154/6lowpan/core.c10
-rw-r--r--net/ieee802154/6lowpan/reassembly.c27
-rw-r--r--net/ieee802154/core.c10
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/bpf_tcp_ca.c2
-rw-r--r--net/ipv4/devinet.c77
-rw-r--r--net/ipv4/fib_frontend.c78
-rw-r--r--net/ipv4/fib_rules.c59
-rw-r--r--net/ipv4/fib_semantics.c206
-rw-r--r--net/ipv4/fib_trie.c22
-rw-r--r--net/ipv4/icmp.c39
-rw-r--r--net/ipv4/igmp.c14
-rw-r--r--net/ipv4/igmp_internal.h17
-rw-r--r--net/ipv4/inet_connection_sock.c97
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/inet_fragment.c31
-rw-r--r--net/ipv4/inet_hashtables.c122
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/inetpeer.c8
-rw-r--r--net/ipv4/ip_fragment.c48
-rw-r--r--net/ipv4/ip_gre.c22
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c10
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/ipip.c9
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c11
-rw-r--r--net/ipv4/nexthop.c184
-rw-r--r--net/ipv4/ping.c26
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/syncookies.c9
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp.c155
-rw-r--r--net/ipv4/tcp_dctcp.c2
-rw-r--r--net/ipv4/tcp_dctcp.h2
-rw-r--r--net/ipv4/tcp_diag.c21
-rw-r--r--net/ipv4/tcp_fastopen.c8
-rw-r--r--net/ipv4/tcp_input.c191
-rw-r--r--net/ipv4/tcp_ipv4.c114
-rw-r--r--net/ipv4/tcp_metrics.c6
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_offload.c23
-rw-r--r--net/ipv4/tcp_output.c69
-rw-r--r--net/ipv4/tcp_timer.c82
-rw-r--r--net/ipv4/udp.c75
-rw-r--r--net/ipv4/udp_offload.c10
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/fib6_rules.c57
-rw-r--r--net/ipv6/icmp.c7
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/inet6_connection_sock.c14
-rw-r--r--net/ipv6/inet6_hashtables.c40
-rw-r--r--net/ipv6/ioam6_iptunnel.c8
-rw-r--r--net/ipv6/ip6_gre.c29
-rw-r--r--net/ipv6/ip6_output.c11
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c15
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c27
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c23
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c19
-rw-r--r--net/ipv6/ping.c3
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/reassembly.c29
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/tcp_ipv6.c69
-rw-r--r--net/ipv6/tcpv6_offload.c23
-rw-r--r--net/ipv6/udp.c12
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/l2tp/l2tp_ppp.c1
-rw-r--r--net/llc/llc_s_ac.c49
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/mac80211/agg-tx.c9
-rw-r--r--net/mac80211/cfg.c46
-rw-r--r--net/mac80211/chan.c20
-rw-r--r--net/mac80211/debugfs.c44
-rw-r--r--net/mac80211/debugfs_sta.c7
-rw-r--r--net/mac80211/driver-ops.c10
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/drop.h21
-rw-r--r--net/mac80211/eht.c9
-rw-r--r--net/mac80211/ethtool.c2
-rw-r--r--net/mac80211/ieee80211_i.h43
-rw-r--r--net/mac80211/iface.c87
-rw-r--r--net/mac80211/main.c16
-rw-r--r--net/mac80211/mesh_hwmp.c14
-rw-r--r--net/mac80211/mlme.c721
-rw-r--r--net/mac80211/parse.c135
-rw-r--r--net/mac80211/rx.c229
-rw-r--r--net/mac80211/sta_info.c84
-rw-r--r--net/mac80211/status.c34
-rw-r--r--net/mac80211/tests/Makefile2
-rw-r--r--net/mac80211/tests/chan-mode.c254
-rw-r--r--net/mac80211/tests/util.c6
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c16
-rw-r--r--net/mac80211/wbrf.c3
-rw-r--r--net/mac802154/main.c4
-rw-r--r--net/mctp/route.c10
-rw-r--r--net/mctp/test/route-test.c109
-rw-r--r--net/mptcp/Makefile2
-rw-r--r--net/mptcp/ctrl.c143
-rw-r--r--net/mptcp/diag.c42
-rw-r--r--net/mptcp/fastopen.c27
-rw-r--r--net/mptcp/options.c7
-rw-r--r--net/mptcp/pm.c666
-rw-r--r--net/mptcp/pm_kernel.c1412
-rw-r--r--net/mptcp/pm_netlink.c1915
-rw-r--r--net/mptcp/pm_userspace.c269
-rw-r--r--net/mptcp/protocol.c332
-rw-r--r--net/mptcp/protocol.h99
-rw-r--r--net/mptcp/sched.c39
-rw-r--r--net/mptcp/sockopt.c28
-rw-r--r--net/mptcp/subflow.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/nf_conncount.c6
-rw-r--r--net/netfilter/nf_conntrack_standalone.c12
-rw-r--r--net/netfilter/nf_log_syslog.c8
-rw-r--r--net/netfilter/nf_tables_api.c24
-rw-r--r--net/netfilter/nf_tables_core.c11
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_compat.c8
-rw-r--r--net/netfilter/nft_ct.c6
-rw-r--r--net/netfilter/nft_exthdr.c10
-rw-r--r--net/netfilter/xt_hashlimit.c12
-rw-r--r--net/netfilter/xt_repldata.h2
-rw-r--r--net/netlink/af_netlink.c1
-rw-r--r--net/nfc/hci/llc.c11
-rw-r--r--net/nfc/hci/llc.h1
-rw-r--r--net/openvswitch/conntrack.c30
-rw-r--r--net/openvswitch/datapath.h19
-rw-r--r--net/openvswitch/flow_netlink.c15
-rw-r--r--net/openvswitch/vport-internal_dev.c2
-rw-r--r--net/openvswitch/vport.h9
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/stats.c3
-rw-r--r--net/rfkill/rfkill-gpio.c3
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/peer_object.c30
-rw-r--r--net/sched/act_gate.c3
-rw-r--r--net/sched/act_tunnel_key.c8
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c225
-rw-r--r--net/sched/sch_gred.c3
-rw-r--r--net/sched/sch_qfq.c2
-rw-r--r--net/sched/sch_taprio.c6
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/smc/smc_pnet.c8
-rw-r--r--net/socket.c35
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/switchdev/switchdev.c25
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tls/tls_device.c8
-rw-r--r--net/tls/tls_main.c4
-rw-r--r--net/unix/af_unix.c56
-rw-r--r--net/unix/af_unix.h72
-rw-r--r--net/unix/diag.c18
-rw-r--r--net/unix/garbage.c33
-rw-r--r--net/unix/sysctl_net_unix.c6
-rw-r--r--net/unix/unix_bpf.c5
-rw-r--r--net/wireless/chan.c13
-rw-r--r--net/wireless/core.c26
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/mlme.c17
-rw-r--r--net/wireless/nl80211.c77
-rw-r--r--net/wireless/rdev-ops.h10
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/scan.c8
-rw-r--r--net/wireless/trace.h19
-rw-r--r--net/wireless/util.c4
-rw-r--r--net/xdp/xsk.c9
-rw-r--r--net/xdp/xsk_buff_pool.c51
-rw-r--r--net/xfrm/xfrm_device.c46
-rw-r--r--net/xfrm/xfrm_interface_core.c15
-rw-r--r--net/xfrm/xfrm_iptfs.c6
-rw-r--r--net/xfrm/xfrm_output.c49
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c58
-rw-r--r--net/xfrm/xfrm_user.c14
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/helpers/cpumask.c45
-rw-r--r--rust/helpers/helpers.c2
-rw-r--r--rust/helpers/sync.c13
-rw-r--r--rust/kernel/alloc/allocator_test.rs18
-rw-r--r--rust/kernel/cred.rs5
-rw-r--r--rust/kernel/error.rs2
-rw-r--r--rust/kernel/fs/file.rs4
-rw-r--r--rust/kernel/init.rs23
-rw-r--r--rust/kernel/init/macros.rs6
-rw-r--r--rust/kernel/lib.rs2
-rw-r--r--rust/kernel/security.rs12
-rw-r--r--rust/kernel/seq_file.rs1
-rw-r--r--rust/kernel/sync.rs63
-rw-r--r--rust/kernel/sync/condvar.rs28
-rw-r--r--rust/kernel/sync/lock.rs35
-rw-r--r--rust/kernel/sync/lock/global.rs5
-rw-r--r--rust/kernel/sync/locked_by.rs2
-rw-r--r--rust/kernel/sync/poll.rs2
-rw-r--r--rust/kernel/task.rs4
-rw-r--r--rust/kernel/workqueue.rs26
-rwxr-xr-xsamples/check-exec/run-script-ask.sh (renamed from samples/check-exec/run-script-ask.inc)0
-rw-r--r--samples/landlock/sandboxer.c37
-rw-r--r--samples/vfs/samples-vfs.h14
-rw-r--r--samples/vfs/test-list-all-mounts.c35
-rw-r--r--scripts/Makefile.clang2
-rw-r--r--scripts/Makefile.lib5
-rw-r--r--scripts/Makefile.ubsan10
-rw-r--r--scripts/coccinelle/misc/newline_in_nl_msg.cocci13
-rwxr-xr-xscripts/documentation-file-ref-check2
-rwxr-xr-xscripts/gcc-x86_32-has-stack-protector.sh8
-rwxr-xr-xscripts/gcc-x86_64-has-stack-protector.sh4
-rw-r--r--scripts/gdb/linux/cpus.py2
-rwxr-xr-xscripts/gen-crc-consts.py291
-rwxr-xr-xscripts/generate_rust_analyzer.py71
-rwxr-xr-xscripts/get_abi.pl1103
-rwxr-xr-xscripts/get_abi.py214
-rwxr-xr-xscripts/get_feat.pl4
-rw-r--r--scripts/integer-wrap-ignore.scl3
-rw-r--r--scripts/kallsyms.c72
-rwxr-xr-xscripts/kernel-doc163
-rw-r--r--scripts/lib/abi/abi_parser.py628
-rw-r--r--scripts/lib/abi/abi_regex.py234
-rw-r--r--scripts/lib/abi/helpers.py38
-rw-r--r--scripts/lib/abi/system_symbols.py378
-rwxr-xr-xscripts/link-vmlinux.sh8
-rwxr-xr-xscripts/min-tool-version.sh4
-rwxr-xr-xscripts/package/install-extmod-build2
-rw-r--r--scripts/rustdoc_test_gen.rs4
-rwxr-xr-xscripts/selinux/install_policy.sh15
-rw-r--r--scripts/sorttable.c411
-rw-r--r--scripts/syscall.tbl1
-rwxr-xr-xscripts/tracing/draw_functrace.py129
-rw-r--r--security/Kconfig21
-rw-r--r--security/Kconfig.hardening33
-rw-r--r--security/apparmor/apparmorfs.c8
-rw-r--r--security/commoncap.c9
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_main.c18
-rw-r--r--security/ipe/policy_fs.c8
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/keys/key.c2
-rw-r--r--security/landlock/.kunitconfig2
-rw-r--r--security/landlock/Makefile5
-rw-r--r--security/landlock/access.h25
-rw-r--r--security/landlock/audit.c522
-rw-r--r--security/landlock/audit.h76
-rw-r--r--security/landlock/cred.c28
-rw-r--r--security/landlock/cred.h92
-rw-r--r--security/landlock/domain.c264
-rw-r--r--security/landlock/domain.h174
-rw-r--r--security/landlock/errata.h99
-rw-r--r--security/landlock/errata/abi-4.h15
-rw-r--r--security/landlock/errata/abi-6.h19
-rw-r--r--security/landlock/fs.c323
-rw-r--r--security/landlock/fs.h40
-rw-r--r--security/landlock/id.c251
-rw-r--r--security/landlock/id.h25
-rw-r--r--security/landlock/limits.h7
-rw-r--r--security/landlock/net.c78
-rw-r--r--security/landlock/ruleset.c30
-rw-r--r--security/landlock/ruleset.h48
-rw-r--r--security/landlock/setup.c40
-rw-r--r--security/landlock/setup.h3
-rw-r--r--security/landlock/syscalls.c99
-rw-r--r--security/landlock/task.c257
-rw-r--r--security/loadpin/Kconfig2
-rw-r--r--security/lsm_audit.c29
-rw-r--r--security/min_addr.c11
-rw-r--r--security/security.c17
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c77
-rw-r--r--security/selinux/include/classmap.h8
-rw-r--r--security/selinux/include/policycap.h1
-rw-r--r--security/selinux/include/policycap_names.h1
-rw-r--r--security/selinux/include/security.h8
-rw-r--r--security/selinux/ss/services.c15
-rw-r--r--security/smack/smack.h20
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c40
-rw-r--r--security/smack/smackfs.c29
-rw-r--r--security/yama/yama_lsm.c9
-rw-r--r--sound/ac97/bus.c9
-rw-r--r--sound/arm/aaci.c2
-rw-r--r--sound/atmel/ac97c.c9
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_drm_eld.c387
-rw-r--r--sound/core/pcm_misc.c4
-rw-r--r--sound/core/seq/oss/seq_oss_event.c8
-rw-r--r--sound/core/seq/seq_clientmgr.c53
-rw-r--r--sound/core/seq/seq_memory.c1
-rw-r--r--sound/core/timer.c147
-rw-r--r--sound/hda/intel-dsp-config.c4
-rw-r--r--sound/isa/es18xx.c8
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c2
-rw-r--r--sound/pci/ctxfi/ctdaio.c4
-rw-r--r--sound/pci/echoaudio/echoaudio.c6
-rw-r--r--sound/pci/emu10k1/memory.c2
-rw-r--r--sound/pci/hda/Kconfig2
-rw-r--r--sound/pci/hda/cs35l56_hda.c6
-rw-r--r--sound/pci/hda/hda_beep.c15
-rw-r--r--sound/pci/hda/hda_bind.c2
-rw-r--r--sound/pci/hda/hda_codec.c3
-rw-r--r--sound/pci/hda/hda_eld.c385
-rw-r--r--sound/pci/hda/hda_intel.c58
-rw-r--r--sound/pci/hda/hda_local.h49
-rw-r--r--sound/pci/hda/hda_tegra.c16
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c228
-rw-r--r--sound/pci/hda/tas2781-spi.h1
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c13
-rw-r--r--sound/pci/hda/tas2781_hda_spi.c5
-rw-r--r--sound/pci/hda/tas2781_spi_fwlib.c8
-rw-r--r--sound/pci/lola/lola.h2
-rw-r--r--sound/pci/lola/lola_mixer.c43
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/pci/oxygen/oxygen.h2
-rw-r--r--sound/pci/oxygen/oxygen_lib.c5
-rw-r--r--sound/pci/oxygen/se6x.c4
-rw-r--r--sound/pci/oxygen/virtuoso.c4
-rw-r--r--sound/pci/vx222/vx222.c9
-rw-r--r--sound/soc/Kconfig7
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/amd/Kconfig10
-rw-r--r--sound/soc/amd/acp/Kconfig12
-rw-r--r--sound/soc/amd/acp/Makefile4
-rw-r--r--sound/soc/amd/acp/acp-i2s.c185
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c308
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c2
-rw-r--r--sound/soc/amd/acp/acp-pci.c220
-rw-r--r--sound/soc/amd/acp/acp-pdm.c53
-rw-r--r--sound/soc/amd/acp/acp-platform.c145
-rw-r--r--sound/soc/amd/acp/acp-rembrandt.c149
-rw-r--r--sound/soc/amd/acp/acp-renoir.c116
-rw-r--r--sound/soc/amd/acp/acp-sdw-legacy-mach.c45
-rw-r--r--sound/soc/amd/acp/acp-sdw-mach-common.c34
-rw-r--r--sound/soc/amd/acp/acp3x-es83xx/acp3x-es83xx.c5
-rw-r--r--sound/soc/amd/acp/acp63.c163
-rw-r--r--sound/soc/amd/acp/acp70.c132
-rw-r--r--sound/soc/amd/acp/amd-acp70-acpi-match.c160
-rw-r--r--sound/soc/amd/acp/amd-acpi-mach.c93
-rw-r--r--sound/soc/amd/acp/amd.h168
-rw-r--r--sound/soc/amd/acp/chip_offset_byte.h12
-rw-r--r--sound/soc/amd/acp/soc_amd_sdw_common.h12
-rw-r--r--sound/soc/amd/mach-config.h1
-rw-r--r--sound/soc/amd/ps/Makefile2
-rw-r--r--sound/soc/amd/ps/acp63.h246
-rw-r--r--sound/soc/amd/ps/pci-ps.c434
-rw-r--r--sound/soc/amd/ps/ps-common.c475
-rw-r--r--sound/soc/amd/ps/ps-pdm-dma.c18
-rw-r--r--sound/soc/amd/ps/ps-sdw-dma.c381
-rw-r--r--sound/soc/amd/rpl/rpl-pci-acp6x.c10
-rw-r--r--sound/soc/amd/vangogh/acp5x-pcm-dma.c11
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c21
-rw-r--r--sound/soc/amd/yc/acp6x-pdm-dma.c12
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c10
-rw-r--r--sound/soc/atmel/atmel-classd.c3
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c11
-rw-r--r--sound/soc/au1x/i2sc.c17
-rw-r--r--sound/soc/au1x/psc-ac97.c17
-rw-r--r--sound/soc/au1x/psc-i2s.c17
-rw-r--r--sound/soc/codecs/Kconfig13
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/adau1701.c4
-rw-r--r--sound/soc/codecs/adau17x1.c2
-rw-r--r--sound/soc/codecs/ak4375.c11
-rw-r--r--sound/soc/codecs/ak4458.c13
-rw-r--r--sound/soc/codecs/ak5558.c11
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/aw88166.c1933
-rw-r--r--sound/soc/codecs/aw88166.h534
-rw-r--r--sound/soc/codecs/aw88395/aw88395_device.c2
-rw-r--r--sound/soc/codecs/cpcap.c200
-rw-r--r--sound/soc/codecs/cros_ec_codec.c3
-rw-r--r--sound/soc/codecs/cs35l32.c11
-rw-r--r--sound/soc/codecs/cs35l33.c14
-rw-r--r--sound/soc/codecs/cs35l34.c14
-rw-r--r--sound/soc/codecs/cs35l41-spi.c5
-rw-r--r--sound/soc/codecs/cs35l56.c1
-rw-r--r--sound/soc/codecs/cs4234.c12
-rw-r--r--sound/soc/codecs/cs4265.c4
-rw-r--r--sound/soc/codecs/cs4270.c4
-rw-r--r--sound/soc/codecs/cs4271.c4
-rw-r--r--sound/soc/codecs/cs42l42-i2c.c6
-rw-r--r--sound/soc/codecs/cs42l42-sdw.c16
-rw-r--r--sound/soc/codecs/cs42l42.c4
-rw-r--r--sound/soc/codecs/cs42l43-jack.c13
-rw-r--r--sound/soc/codecs/cs42l43.c17
-rw-r--r--sound/soc/codecs/cs42l43.h3
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c2
-rw-r--r--sound/soc/codecs/cs42l51.c8
-rw-r--r--sound/soc/codecs/cs42l52.c4
-rw-r--r--sound/soc/codecs/cs42l56.c4
-rw-r--r--sound/soc/codecs/cs42l73.c4
-rw-r--r--sound/soc/codecs/cs42l83-i2c.c6
-rw-r--r--sound/soc/codecs/cs42xx8-i2c.c2
-rw-r--r--sound/soc/codecs/cs42xx8.c14
-rw-r--r--sound/soc/codecs/cs43130.c33
-rw-r--r--sound/soc/codecs/cs4341.c2
-rw-r--r--sound/soc/codecs/cs4349.c7
-rw-r--r--sound/soc/codecs/cs530x.c4
-rw-r--r--sound/soc/codecs/cs53l30.c11
-rw-r--r--sound/soc/codecs/cx2072x.c12
-rw-r--r--sound/soc/codecs/da7210.c4
-rw-r--r--sound/soc/codecs/da7213.c10
-rw-r--r--sound/soc/codecs/da7218.c4
-rw-r--r--sound/soc/codecs/da7219.c4
-rw-r--r--sound/soc/codecs/da732x.c4
-rw-r--r--sound/soc/codecs/da9055.c4
-rw-r--r--sound/soc/codecs/dmic.c27
-rw-r--r--sound/soc/codecs/hdac_hdmi.c18
-rw-r--r--sound/soc/codecs/hdmi-codec.c68
-rw-r--r--sound/soc/codecs/jz4760.c32
-rw-r--r--sound/soc/codecs/jz4770.c40
-rw-r--r--sound/soc/codecs/lochnagar-sc.c4
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c8
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c8
-rw-r--r--sound/soc/codecs/lpass-va-macro.c8
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c8
-rw-r--r--sound/soc/codecs/madera.c12
-rw-r--r--sound/soc/codecs/max98090.c11
-rw-r--r--sound/soc/codecs/max98373-i2c.c6
-rw-r--r--sound/soc/codecs/max98373-sdw.c10
-rw-r--r--sound/soc/codecs/max98390.c6
-rw-r--r--sound/soc/codecs/max98396.c6
-rw-r--r--sound/soc/codecs/max98520.c8
-rw-r--r--sound/soc/codecs/max9860.c6
-rw-r--r--sound/soc/codecs/max98927.c6
-rw-r--r--sound/soc/codecs/mt6358.c104
-rw-r--r--sound/soc/codecs/mt6358.h4
-rw-r--r--sound/soc/codecs/mt6359-accdet.h9
-rw-r--r--sound/soc/codecs/mt6359.c9
-rw-r--r--sound/soc/codecs/mt6660.c9
-rw-r--r--sound/soc/codecs/nau8540.c4
-rw-r--r--sound/soc/codecs/nau8810.c4
-rw-r--r--sound/soc/codecs/nau8822.c4
-rw-r--r--sound/soc/codecs/nau8824.c4
-rw-r--r--sound/soc/codecs/nau8825.c4
-rw-r--r--sound/soc/codecs/ntp8918.c1
-rw-r--r--sound/soc/codecs/pcm1681.c1
-rw-r--r--sound/soc/codecs/pcm3008.c61
-rw-r--r--sound/soc/codecs/pcm3008.h19
-rw-r--r--sound/soc/codecs/pcm3168a-i2c.c11
-rw-r--r--sound/soc/codecs/pcm3168a-spi.c2
-rw-r--r--sound/soc/codecs/pcm3168a.c18
-rw-r--r--sound/soc/codecs/pcm512x-i2c.c2
-rw-r--r--sound/soc/codecs/pcm512x-spi.c2
-rw-r--r--sound/soc/codecs/pcm512x.c7
-rw-r--r--sound/soc/codecs/pcm6240.c28
-rw-r--r--sound/soc/codecs/pcm6240.h7
-rw-r--r--sound/soc/codecs/rk817_codec.c4
-rw-r--r--sound/soc/codecs/rt1011.c8
-rw-r--r--sound/soc/codecs/rt1015.c10
-rw-r--r--sound/soc/codecs/rt1016.c10
-rw-r--r--sound/soc/codecs/rt1017-sdca-sdw.c10
-rw-r--r--sound/soc/codecs/rt1019.c6
-rw-r--r--sound/soc/codecs/rt1305.c12
-rw-r--r--sound/soc/codecs/rt1308-sdw.c10
-rw-r--r--sound/soc/codecs/rt1308.c8
-rw-r--r--sound/soc/codecs/rt1316-sdw.c10
-rw-r--r--sound/soc/codecs/rt1318-sdw.c10
-rw-r--r--sound/soc/codecs/rt1318.c6
-rw-r--r--sound/soc/codecs/rt1320-sdw.c13
-rw-r--r--sound/soc/codecs/rt274.c12
-rw-r--r--sound/soc/codecs/rt286.c9
-rw-r--r--sound/soc/codecs/rt298.c9
-rw-r--r--sound/soc/codecs/rt5514-spi.c8
-rw-r--r--sound/soc/codecs/rt5514.c12
-rw-r--r--sound/soc/codecs/rt5616.c4
-rw-r--r--sound/soc/codecs/rt5631.c4
-rw-r--r--sound/soc/codecs/rt5640.c18
-rw-r--r--sound/soc/codecs/rt5645.c24
-rw-r--r--sound/soc/codecs/rt5651.c12
-rw-r--r--sound/soc/codecs/rt5659.c12
-rw-r--r--sound/soc/codecs/rt5660.c12
-rw-r--r--sound/soc/codecs/rt5663.c10
-rw-r--r--sound/soc/codecs/rt5665.c12
-rw-r--r--sound/soc/codecs/rt5668.c10
-rw-r--r--sound/soc/codecs/rt5670.c12
-rw-r--r--sound/soc/codecs/rt5677-spi.c3
-rw-r--r--sound/soc/codecs/rt5677.c5
-rw-r--r--sound/soc/codecs/rt5682-i2c.c6
-rw-r--r--sound/soc/codecs/rt5682-sdw.c12
-rw-r--r--sound/soc/codecs/rt5682.c4
-rw-r--r--sound/soc/codecs/rt5682s.c4
-rw-r--r--sound/soc/codecs/rt700-sdw.c12
-rw-r--r--sound/soc/codecs/rt711-sdca-sdw.c12
-rw-r--r--sound/soc/codecs/rt711-sdw.c12
-rw-r--r--sound/soc/codecs/rt712-sdca-dmic.c12
-rw-r--r--sound/soc/codecs/rt712-sdca-sdw.c12
-rw-r--r--sound/soc/codecs/rt712-sdca.c80
-rw-r--r--sound/soc/codecs/rt712-sdca.h3
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c10
-rw-r--r--sound/soc/codecs/rt715-sdca.c8
-rw-r--r--sound/soc/codecs/rt715-sdw.c10
-rw-r--r--sound/soc/codecs/rt715.c8
-rw-r--r--sound/soc/codecs/rt721-sdca-sdw.c12
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.c146
-rw-r--r--sound/soc/codecs/rt722-sdca-sdw.h99
-rw-r--r--sound/soc/codecs/rt722-sdca.c135
-rw-r--r--sound/soc/codecs/rt722-sdca.h4
-rw-r--r--sound/soc/codecs/rt9120.c8
-rw-r--r--sound/soc/codecs/rtq9128.c8
-rw-r--r--sound/soc/codecs/sgtl5000.c4
-rw-r--r--sound/soc/codecs/sma1307.c39
-rw-r--r--sound/soc/codecs/src4xxx.c4
-rw-r--r--sound/soc/codecs/tas2552.c9
-rw-r--r--sound/soc/codecs/tas2562.c14
-rw-r--r--sound/soc/codecs/tas2764.c113
-rw-r--r--sound/soc/codecs/tas2764.h7
-rw-r--r--sound/soc/codecs/tas2770.c57
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c197
-rw-r--r--sound/soc/codecs/tas2781-i2c.c215
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c3
-rw-r--r--sound/soc/codecs/tlv320dac33.c6
-rw-r--r--sound/soc/codecs/ts3a227e.c6
-rw-r--r--sound/soc/codecs/tscs454.c5
-rw-r--r--sound/soc/codecs/twl4030.c2
-rw-r--r--sound/soc/codecs/uda1380.c6
-rw-r--r--sound/soc/codecs/wcd934x.c16
-rw-r--r--sound/soc/codecs/wcd937x-sdw.c49
-rw-r--r--sound/soc/codecs/wcd937x.c57
-rw-r--r--sound/soc/codecs/wcd937x.h7
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c10
-rw-r--r--sound/soc/codecs/wcd938x.c11
-rw-r--r--sound/soc/codecs/wcd939x-sdw.c10
-rw-r--r--sound/soc/codecs/wcd939x.c2
-rw-r--r--sound/soc/codecs/wm0010.c13
-rw-r--r--sound/soc/codecs/wm2200.c15
-rw-r--r--sound/soc/codecs/wm5100.c15
-rw-r--r--sound/soc/codecs/wm5110.c4
-rw-r--r--sound/soc/codecs/wm8350.c4
-rw-r--r--sound/soc/codecs/wm8400.c4
-rw-r--r--sound/soc/codecs/wm8510.c4
-rw-r--r--sound/soc/codecs/wm8523.c4
-rw-r--r--sound/soc/codecs/wm8524.c2
-rw-r--r--sound/soc/codecs/wm8580.c4
-rw-r--r--sound/soc/codecs/wm8711.c4
-rw-r--r--sound/soc/codecs/wm8728.c2
-rw-r--r--sound/soc/codecs/wm8737.c4
-rw-r--r--sound/soc/codecs/wm8741.c2
-rw-r--r--sound/soc/codecs/wm8750.c4
-rw-r--r--sound/soc/codecs/wm8753.c12
-rw-r--r--sound/soc/codecs/wm8770.c4
-rw-r--r--sound/soc/codecs/wm8776.c4
-rw-r--r--sound/soc/codecs/wm8804-i2c.c2
-rw-r--r--sound/soc/codecs/wm8804-spi.c2
-rw-r--r--sound/soc/codecs/wm8804.c11
-rw-r--r--sound/soc/codecs/wm8900.c8
-rw-r--r--sound/soc/codecs/wm8903.c8
-rw-r--r--sound/soc/codecs/wm8904.c325
-rw-r--r--sound/soc/codecs/wm8940.c4
-rw-r--r--sound/soc/codecs/wm8955.c4
-rw-r--r--sound/soc/codecs/wm8960.c4
-rw-r--r--sound/soc/codecs/wm8961.c4
-rw-r--r--sound/soc/codecs/wm8962.c12
-rw-r--r--sound/soc/codecs/wm8971.c4
-rw-r--r--sound/soc/codecs/wm8974.c4
-rw-r--r--sound/soc/codecs/wm8978.c4
-rw-r--r--sound/soc/codecs/wm8983.c4
-rw-r--r--sound/soc/codecs/wm8985.c4
-rw-r--r--sound/soc/codecs/wm8988.c4
-rw-r--r--sound/soc/codecs/wm8990.c4
-rw-r--r--sound/soc/codecs/wm8991.c4
-rw-r--r--sound/soc/codecs/wm8993.c8
-rw-r--r--sound/soc/codecs/wm8994.c10
-rw-r--r--sound/soc/codecs/wm8995.c4
-rw-r--r--sound/soc/codecs/wm8996.c8
-rw-r--r--sound/soc/codecs/wm9081.c8
-rw-r--r--sound/soc/codecs/wm9712.c10
-rw-r--r--sound/soc/codecs/wm9713.c18
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/codecs/wsa881x.c17
-rw-r--r--sound/soc/codecs/wsa883x.c202
-rw-r--r--sound/soc/codecs/wsa884x.c12
-rw-r--r--sound/soc/dwc/dwc-i2s.c6
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c4
-rw-r--r--sound/soc/fsl/fsl_aud2htx.c3
-rw-r--r--sound/soc/fsl/fsl_audmix.c16
-rw-r--r--sound/soc/fsl/fsl_esai.c9
-rw-r--r--sound/soc/fsl/fsl_micfil.c58
-rw-r--r--sound/soc/fsl/fsl_micfil.h1
-rw-r--r--sound/soc/fsl/fsl_qmc_audio.c4
-rw-r--r--sound/soc/fsl/fsl_sai.c8
-rw-r--r--sound/soc/fsl/fsl_xcvr.c3
-rw-r--r--sound/soc/fsl/imx-card.c5
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c3
-rw-r--r--sound/soc/generic/audio-graph-card.c97
-rw-r--r--sound/soc/generic/audio-graph-card2-custom-sample.dtsi702
-rw-r--r--sound/soc/generic/audio-graph-card2-custom-sample1.dtsi396
-rw-r--r--sound/soc/generic/audio-graph-card2-custom-sample2.dtsi382
-rw-r--r--sound/soc/generic/audio-graph-card2.c95
-rw-r--r--sound/soc/generic/simple-card-utils.c177
-rw-r--r--sound/soc/generic/simple-card.c132
-rw-r--r--sound/soc/img/img-i2s-in.c9
-rw-r--r--sound/soc/img/img-i2s-out.c9
-rw-r--r--sound/soc/img/img-parallel-out.c5
-rw-r--r--sound/soc/img/img-spdif-in.c9
-rw-r--r--sound/soc/img/img-spdif-out.c10
-rw-r--r--sound/soc/img/pistachio-internal-dac.c8
-rw-r--r--sound/soc/intel/avs/avs.h1
-rw-r--r--sound/soc/intel/avs/board_selection.c17
-rw-r--r--sound/soc/intel/avs/boards/Kconfig10
-rw-r--r--sound/soc/intel/avs/boards/Makefile2
-rw-r--r--sound/soc/intel/avs/boards/da7219.c5
-rw-r--r--sound/soc/intel/avs/boards/es8336.c3
-rw-r--r--sound/soc/intel/avs/boards/max98357a.c2
-rw-r--r--sound/soc/intel/avs/boards/max98373.c2
-rw-r--r--sound/soc/intel/avs/boards/max98927.c4
-rw-r--r--sound/soc/intel/avs/boards/nau8825.c5
-rw-r--r--sound/soc/intel/avs/boards/pcm3168a.c143
-rw-r--r--sound/soc/intel/avs/boards/rt274.c5
-rw-r--r--sound/soc/intel/avs/boards/rt286.c5
-rw-r--r--sound/soc/intel/avs/boards/rt298.c7
-rw-r--r--sound/soc/intel/avs/boards/rt5514.c2
-rw-r--r--sound/soc/intel/avs/boards/rt5663.c3
-rw-r--r--sound/soc/intel/avs/boards/rt5682.c3
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c4
-rw-r--r--sound/soc/intel/avs/control.c182
-rw-r--r--sound/soc/intel/avs/control.h12
-rw-r--r--sound/soc/intel/avs/core.c24
-rw-r--r--sound/soc/intel/avs/dsp.c3
-rw-r--r--sound/soc/intel/avs/loader.c64
-rw-r--r--sound/soc/intel/avs/messages.c149
-rw-r--r--sound/soc/intel/avs/messages.h46
-rw-r--r--sound/soc/intel/avs/path.c375
-rw-r--r--sound/soc/intel/avs/path.h5
-rw-r--r--sound/soc/intel/avs/pcm.c2
-rw-r--r--sound/soc/intel/avs/tgl.c33
-rw-r--r--sound/soc/intel/avs/topology.c89
-rw-r--r--sound/soc/intel/avs/topology.h16
-rw-r--r--sound/soc/intel/boards/sof_sdw.c2
-rw-r--r--sound/soc/intel/catpt/device.c14
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c29
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-lnl-match.c137
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-ptl-match.c284
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-sdw-mockup-match.c42
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-sdw-mockup-match.h1
-rw-r--r--sound/soc/kirkwood/armada-370-db.c6
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c20
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.h2
-rw-r--r--sound/soc/mediatek/common/mtk-soc-card.h1
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.c19
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c8
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-wm8960.c2
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt7986/mt7986-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt7986/mt7986-dai-etdm.c4
-rw-r--r--sound/soc/mediatek/mt7986/mt7986-wm8960.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c4
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c4
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-clk.c55
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-clk.h2
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-mt6366.c2
-rw-r--r--sound/soc/mediatek/mt8188/Makefile1
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-afe-clk.c8
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-afe-clk.h8
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-afe-common.h1
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-afe-pcm.c34
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-audsys-clk.c4
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-audsys-clkid.h4
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-dai-dmic.c683
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-mt6359.c57
-rw-r--r--sound/soc/mediatek/mt8188/mt8188-reg.h17
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c2
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359.c12
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-clk.c3
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-afe-pcm.c19
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-i2s.c12
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-dai-pcm.c4
-rw-r--r--sound/soc/mediatek/mt8365/mt8365-mt6357.c2
-rw-r--r--sound/soc/meson/axg-tdm-interface.c9
-rw-r--r--sound/soc/meson/meson-card-utils.c4
-rw-r--r--sound/soc/meson/t9015.c4
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c2
-rw-r--r--sound/soc/pxa/spitz.c2
-rw-r--r--sound/soc/qcom/lpass-sc7180.c8
-rw-r--r--sound/soc/qcom/lpass-sc7280.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.c2
-rw-r--r--sound/soc/qcom/sc7180.c2
-rw-r--r--sound/soc/qcom/sc7280.c4
-rw-r--r--sound/soc/qcom/sdw.c34
-rw-r--r--sound/soc/qcom/sm8250.c3
-rw-r--r--sound/soc/renesas/migor.c2
-rw-r--r--sound/soc/renesas/rcar/core.c8
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c2
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c10
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c5
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c16
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c8
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c10
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c9
-rw-r--r--sound/soc/samsung/aries_wm8994.c6
-rw-r--r--sound/soc/samsung/arndale.c4
-rw-r--r--sound/soc/samsung/bells.c21
-rw-r--r--sound/soc/samsung/i2s.c10
-rw-r--r--sound/soc/samsung/littlemill.c5
-rw-r--r--sound/soc/samsung/lowland.c7
-rw-r--r--sound/soc/samsung/midas_wm1811.c2
-rw-r--r--sound/soc/samsung/odroid.c2
-rw-r--r--sound/soc/samsung/smdk_wm8994.c4
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c2
-rw-r--r--sound/soc/samsung/snow.c2
-rw-r--r--sound/soc/samsung/speyside.c53
-rw-r--r--sound/soc/samsung/tm2_wm5110.c9
-rw-r--r--sound/soc/samsung/tobermory.c3
-rw-r--r--sound/soc/sdca/Makefile2
-rw-r--r--sound/soc/sdca/sdca_device.c3
-rw-r--r--sound/soc/sdca/sdca_functions.c1697
-rw-r--r--sound/soc/sdca/sdca_regmap.c321
-rw-r--r--sound/soc/sdw_utils/soc_sdw_utils.c14
-rw-r--r--sound/soc/soc-card.c14
-rw-r--r--sound/soc/soc-component.c38
-rw-r--r--sound/soc/soc-compress.c2
-rw-r--r--sound/soc/soc-core.c50
-rw-r--r--sound/soc/soc-dai.c38
-rw-r--r--sound/soc/soc-dapm.c51
-rw-r--r--sound/soc/soc-link.c18
-rw-r--r--sound/soc/soc-ops-test.c541
-rw-r--r--sound/soc/soc-ops.c730
-rw-r--r--sound/soc/soc-pcm.c281
-rw-r--r--sound/soc/soc-topology.c59
-rw-r--r--sound/soc/soc-utils.c61
-rw-r--r--sound/soc/sof/amd/Kconfig5
-rw-r--r--sound/soc/sof/amd/acp.c56
-rw-r--r--sound/soc/sof/amd/acp.h2
-rw-r--r--sound/soc/sof/amd/pci-acp63.c3
-rw-r--r--sound/soc/sof/amd/pci-acp70.c3
-rw-r--r--sound/soc/sof/amd/pci-rmb.c1
-rw-r--r--sound/soc/sof/amd/pci-rn.c3
-rw-r--r--sound/soc/sof/amd/pci-vangogh.c3
-rw-r--r--sound/soc/sof/imx/Kconfig17
-rw-r--r--sound/soc/sof/imx/Makefile6
-rw-r--r--sound/soc/sof/imx/imx-common.c430
-rw-r--r--sound/soc/sof/imx/imx-common.h151
-rw-r--r--sound/soc/sof/imx/imx8.c755
-rw-r--r--sound/soc/sof/imx/imx8m.c567
-rw-r--r--sound/soc/sof/imx/imx8ulp.c520
-rw-r--r--sound/soc/sof/imx/imx9.c137
-rw-r--r--sound/soc/sof/intel/Makefile2
-rw-r--r--sound/soc/sof/intel/bdw.c3
-rw-r--r--sound/soc/sof/intel/byt.c3
-rw-r--r--sound/soc/sof/intel/hda-dai.c2
-rw-r--r--sound/soc/sof/intel/hda-mlink.c127
-rw-r--r--sound/soc/sof/intel/hda.c34
-rw-r--r--sound/soc/sof/intel/hda.h4
-rw-r--r--sound/soc/sof/intel/lnl.c117
-rw-r--r--sound/soc/sof/intel/lnl.h6
-rw-r--r--sound/soc/sof/intel/mtl.c81
-rw-r--r--sound/soc/sof/intel/mtl.h15
-rw-r--r--sound/soc/sof/intel/pci-apl.c2
-rw-r--r--sound/soc/sof/intel/pci-cnl.c2
-rw-r--r--sound/soc/sof/intel/pci-icl.c2
-rw-r--r--sound/soc/sof/intel/pci-lnl.c14
-rw-r--r--sound/soc/sof/intel/pci-mtl.c10
-rw-r--r--sound/soc/sof/intel/pci-ptl.c19
-rw-r--r--sound/soc/sof/intel/pci-skl.c2
-rw-r--r--sound/soc/sof/intel/pci-tgl.c2
-rw-r--r--sound/soc/sof/intel/pci-tng.c3
-rw-r--r--sound/soc/sof/intel/ptl.c106
-rw-r--r--sound/soc/sof/intel/ptl.h19
-rw-r--r--sound/soc/sof/intel/shim.h2
-rw-r--r--sound/soc/sof/ipc3-pcm.c13
-rw-r--r--sound/soc/sof/ipc3-topology.c20
-rw-r--r--sound/soc/sof/ipc3.c12
-rw-r--r--sound/soc/sof/ipc4-loader.c176
-rw-r--r--sound/soc/sof/ipc4-pcm.c21
-rw-r--r--sound/soc/sof/ipc4-priv.h6
-rw-r--r--sound/soc/sof/ipc4-topology.c65
-rw-r--r--sound/soc/sof/ipc4.c26
-rw-r--r--sound/soc/sof/mediatek/mt8186/mt8186.c107
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.c3
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c107
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.c130
-rw-r--r--sound/soc/sof/mediatek/mtk-adsp-common.h10
-rw-r--r--sound/soc/sof/pcm.c169
-rw-r--r--sound/soc/sof/sof-acpi-dev.c9
-rw-r--r--sound/soc/sof/sof-audio.c49
-rw-r--r--sound/soc/sof/sof-audio.h17
-rw-r--r--sound/soc/sof/sof-of-dev.c8
-rw-r--r--sound/soc/sof/sof-pci-dev.c9
-rw-r--r--sound/soc/sof/topology.c4
-rw-r--r--sound/soc/stm/stm32_i2s.c6
-rw-r--r--sound/soc/stm/stm32_sai.c6
-rw-r--r--sound/soc/stm/stm32_sai_sub.c6
-rw-r--r--sound/soc/stm/stm32_spdifrx.c6
-rw-r--r--sound/soc/sunxi/sun4i-codec.c57
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c6
-rw-r--r--sound/soc/sunxi/sun50i-dmic.c6
-rw-r--r--sound/soc/sunxi/sun8i-codec.c6
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/tegra186_asrc.c13
-rw-r--r--sound/soc/tegra/tegra186_dspk.c13
-rw-r--r--sound/soc/tegra/tegra20_i2s.c13
-rw-r--r--sound/soc/tegra/tegra20_spdif.c13
-rw-r--r--sound/soc/tegra/tegra210_admaif.c37
-rw-r--r--sound/soc/tegra/tegra210_admaif.h9
-rw-r--r--sound/soc/tegra/tegra210_adx.c23
-rw-r--r--sound/soc/tegra/tegra210_ahub.c13
-rw-r--r--sound/soc/tegra/tegra210_amx.c13
-rw-r--r--sound/soc/tegra/tegra210_dmic.c13
-rw-r--r--sound/soc/tegra/tegra210_i2s.c13
-rw-r--r--sound/soc/tegra/tegra210_mixer.c13
-rw-r--r--sound/soc/tegra/tegra210_mvc.c13
-rw-r--r--sound/soc/tegra/tegra210_ope.c13
-rw-r--r--sound/soc/tegra/tegra210_sfc.c13
-rw-r--r--sound/soc/tegra/tegra30_ahub.c13
-rw-r--r--sound/soc/tegra/tegra30_i2s.c13
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.c18
-rw-r--r--sound/soc/tegra/tegra_isomgr_bw.c129
-rw-r--r--sound/soc/tegra/tegra_isomgr_bw.h31
-rw-r--r--sound/soc/tegra/tegra_wm8903.c2
-rw-r--r--sound/soc/ti/ams-delta.c2
-rw-r--r--sound/soc/ti/davinci-evm.c2
-rw-r--r--sound/soc/ti/davinci-i2s.c6
-rw-r--r--sound/soc/ti/j721e-evm.c9
-rw-r--r--sound/soc/ti/n810.c2
-rw-r--r--sound/soc/ti/omap-twl4030.c6
-rw-r--r--sound/soc/ti/omap3pandora.c4
-rw-r--r--sound/soc/ti/osk5912.c2
-rw-r--r--sound/soc/ti/rx51.c2
-rw-r--r--sound/soc/uniphier/aio-cpu.c8
-rw-r--r--sound/soc/ux500/mop500_ab8500.c4
-rw-r--r--sound/soc/xtensa/xtfpga-i2s.c6
-rw-r--r--sound/usb/format.c4
-rw-r--r--sound/usb/midi.c80
-rw-r--r--sound/usb/mixer_quirks.c61
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/usx2y/usbusx2y.c11
-rw-r--r--sound/usb/usx2y/usbusx2y.h26
-rw-r--r--sound/usb/usx2y/usbusx2yaudio.c27
-rw-r--r--sound/x86/intel_hdmi_audio.c8
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h30
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/x86/include/asm/amd-ibs.h3
-rw-r--r--tools/arch/x86/include/asm/asm.h8
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h9
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h161
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/asm/nops.h2
-rw-r--r--tools/arch/x86/include/asm/orc_types.h4
-rw-r--r--tools/arch/x86/include/asm/pvclock-abi.h4
-rw-r--r--tools/arch/x86/include/asm/required-features.h105
-rw-r--r--tools/build/Makefile.feature3
-rw-r--r--tools/build/feature/Makefile3
-rw-r--r--tools/include/linux/bits.h2
-rw-r--r--tools/include/nolibc/Makefile2
-rw-r--r--tools/include/nolibc/arch-mips.h1
-rw-r--r--tools/include/nolibc/arch-s390.h9
-rw-r--r--tools/include/nolibc/arch.h2
-rw-r--r--tools/include/nolibc/crt.h2
-rw-r--r--tools/include/nolibc/dirent.h98
-rw-r--r--tools/include/nolibc/errno.h2
-rw-r--r--tools/include/nolibc/limits.h7
-rw-r--r--tools/include/nolibc/nolibc.h4
-rw-r--r--tools/include/nolibc/signal.h1
-rw-r--r--tools/include/nolibc/stackprotector.h2
-rw-r--r--tools/include/nolibc/stdio.h98
-rw-r--r--tools/include/nolibc/stdlib.h1
-rw-r--r--tools/include/nolibc/string.h4
-rw-r--r--tools/include/nolibc/sys.h83
-rw-r--r--tools/include/uapi/asm-generic/socket.h21
-rw-r--r--tools/include/uapi/linux/bpf.h30
-rw-r--r--tools/include/uapi/linux/const.h2
-rw-r--r--tools/include/uapi/linux/elf.h524
-rw-r--r--tools/include/uapi/linux/if_xdp.h10
-rw-r--r--tools/include/uapi/linux/netdev.h9
-rw-r--r--tools/memory-model/Documentation/glossary.txt32
-rw-r--r--tools/memory-model/Documentation/herd-representation.txt49
-rw-r--r--tools/memory-model/README4
-rw-r--r--tools/memory-model/linux-kernel.bell33
-rw-r--r--tools/memory-model/linux-kernel.cat10
-rw-r--r--tools/memory-model/linux-kernel.cfg1
-rw-r--r--tools/memory-model/linux-kernel.def169
-rw-r--r--tools/net/ynl/Makefile.deps1
-rw-r--r--tools/net/ynl/pyynl/lib/ynl.py46
-rwxr-xr-xtools/net/ynl/pyynl/ynl_gen_c.py28
-rw-r--r--tools/objtool/Documentation/objtool.txt105
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/arch/loongarch/decode.c28
-rw-r--r--tools/objtool/arch/loongarch/include/arch/elf.h7
-rw-r--r--tools/objtool/arch/loongarch/special.c159
-rw-r--r--tools/objtool/arch/powerpc/decode.c24
-rw-r--r--tools/objtool/arch/x86/decode.c14
-rw-r--r--tools/objtool/builtin-check.c208
-rw-r--r--tools/objtool/check.c95
-rw-r--r--tools/objtool/elf.c3
-rw-r--r--tools/objtool/include/objtool/arch.h3
-rw-r--r--tools/objtool/include/objtool/builtin.h3
-rw-r--r--tools/objtool/include/objtool/elf.h2
-rw-r--r--tools/objtool/include/objtool/warn.h20
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/objtool/objtool.c78
-rw-r--r--tools/objtool/orc_dump.c7
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/power/cpupower/Makefile19
-rw-r--r--tools/power/cpupower/bench/parse.c4
-rw-r--r--tools/power/cpupower/lib/cpupower.c48
-rw-r--r--tools/power/cpupower/lib/cpupower.h3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c48
-rw-r--r--tools/power/pm-graph/config/custom-timeline-functions.cfg4
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py6
-rw-r--r--tools/power/x86/intel-speed-select/Makefile2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c22
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c11
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h34
-rw-r--r--tools/sched_ext/include/scx/common.h1
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h95
-rw-r--r--tools/sched_ext/include/scx/compat.h16
-rw-r--r--tools/sched_ext/include/scx/enum_defs.autogen.h120
-rw-r--r--tools/sched_ext/scx_central.c15
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c23
-rw-r--r--tools/scripts/Makefile.include3
-rwxr-xr-xtools/testing/ktest/ktest.pl8
-rw-r--r--tools/testing/kunit/configs/all_tests.config2
-rw-r--r--tools/testing/kunit/kunit_kernel.py4
-rw-r--r--tools/testing/kunit/kunit_parser.py9
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py11
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py5
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py4
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c1
-rw-r--r--tools/testing/selftests/arm64/mte/check_hugetlb_options.c19
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c28
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_helpers.h29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/net_timestamping.c239
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c145
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h1
-rw-r--r--tools/testing/selftests/bpf/progs/net_timestamping.c248
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_meta.c53
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c168
-rwxr-xr-xtools/testing/selftests/damon/damon_nr_regions.py2
-rwxr-xr-xtools/testing/selftests/damon/damos_quota.py9
-rwxr-xr-xtools/testing/selftests/damon/damos_quota_goal.py3
-rw-r--r--tools/testing/selftests/drivers/net/.gitignore2
-rw-r--r--tools/testing/selftests/drivers/net/Makefile4
-rw-r--r--tools/testing/selftests/drivers/net/README.rst4
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh4
-rw-r--r--tools/testing/selftests/drivers/net/config1
-rwxr-xr-xtools/testing/selftests/drivers/net/hds.py3
-rw-r--r--tools/testing/selftests/drivers/net/hw/Makefile6
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/csum.py50
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/devmem.py6
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/irq.py99
-rw-r--r--tools/testing/selftests/drivers/net/hw/ncdevmem.c1
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_ctx.py48
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/rss_input_xfrm.py87
-rwxr-xr-xtools/testing/selftests/drivers/net/hw/tso.py241
-rw-r--r--tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c13
-rw-r--r--tools/testing/selftests/drivers/net/lib/py/env.py139
-rw-r--r--tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh17
-rwxr-xr-xtools/testing/selftests/drivers/net/netcons_sysdata.sh242
-rwxr-xr-xtools/testing/selftests/drivers/net/ping.py208
-rwxr-xr-xtools/testing/selftests/drivers/net/queues.py45
-rw-r--r--tools/testing/selftests/drivers/net/xdp_helper.c151
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/.gitignore (renamed from arch/x86/boot/tools/.gitignore)2
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c516
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c14
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c507
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/wrappers.h17
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h2
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount_test.c13
-rw-r--r--tools/testing/selftests/filesystems/utils.c501
-rw-r--r--tools/testing/selftests/filesystems/utils.h45
-rw-r--r--tools/testing/selftests/ftrace/.gitignore1
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc14
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_uprobe.tc10
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/dynevent_limitations.tc42
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions8
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-expressions.tc1
-rw-r--r--tools/testing/selftests/kselftest.h5
-rwxr-xr-xtools/testing/selftests/kselftest/module.sh2
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm1
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c2
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c1
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c46
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c40
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c523
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h33
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h50
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c28
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c114
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c2
-rw-r--r--tools/testing/selftests/kvm/mmu_stress_test.c21
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c81
-rw-r--r--tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c6
-rw-r--r--tools/testing/selftests/kvm/x86/nested_emulation_test.c146
-rw-r--r--tools/testing/selftests/kvm/x86/nested_exceptions_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/nx_huge_pages_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c158
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86/svm_int_ctl_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86/ucna_injection_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c5
-rw-r--r--tools/testing/selftests/landlock/.gitignore1
-rw-r--r--tools/testing/selftests/landlock/Makefile6
-rw-r--r--tools/testing/selftests/landlock/audit.h472
-rw-r--r--tools/testing/selftests/landlock/audit_test.c551
-rw-r--r--tools/testing/selftests/landlock/base_test.c130
-rw-r--r--tools/testing/selftests/landlock/common.h20
-rw-r--r--tools/testing/selftests/landlock/config1
-rw-r--r--tools/testing/selftests/landlock/fs_test.c594
-rw-r--r--tools/testing/selftests/landlock/net_test.c132
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c140
-rw-r--r--tools/testing/selftests/landlock/scoped_abstract_unix_test.c111
-rw-r--r--tools/testing/selftests/landlock/scoped_signal_test.c108
-rw-r--r--tools/testing/selftests/landlock/wait-pipe-sandbox.c131
-rw-r--r--tools/testing/selftests/lib.mk5
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rw-r--r--tools/testing/selftests/lib/config3
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh4
-rwxr-xr-xtools/testing/selftests/lib/printf.sh4
-rwxr-xr-xtools/testing/selftests/lib/scanf.sh4
-rw-r--r--tools/testing/selftests/livepatch/functions.sh49
-rwxr-xr-xtools/testing/selftests/livepatch/test-ftrace.sh34
-rwxr-xr-xtools/testing/selftests/livepatch/test-kprobe.sh2
-rw-r--r--tools/testing/selftests/mm/guard-pages.c16
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c2
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c8
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c14
-rw-r--r--tools/testing/selftests/mm/mkdirty.c8
-rw-r--r--tools/testing/selftests/mm/mlock2.h1
-rw-r--r--tools/testing/selftests/mm/mseal_test.c6
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h3
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c4
-rw-r--r--tools/testing/selftests/mm/protection_keys.c4
-rwxr-xr-xtools/testing/selftests/mm/run_vmtests.sh4
-rw-r--r--tools/testing/selftests/mm/uffd-common.c4
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c15
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c14
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c652
-rw-r--r--tools/testing/selftests/net/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile10
-rwxr-xr-xtools/testing/selftests/net/bpf_offload.py5
-rwxr-xr-xtools/testing/selftests/net/cmsg_ip.sh187
-rwxr-xr-xtools/testing/selftests/net/cmsg_ipv6.sh154
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c114
-rw-r--r--tools/testing/selftests/net/config10
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh4
-rwxr-xr-xtools/testing/selftests/net/fdb_flush.sh2
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh9
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh74
-rw-r--r--tools/testing/selftests/net/forwarding/README2
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh10
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh8
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh15
-rw-r--r--tools/testing/selftests/net/gro.c8
-rwxr-xr-xtools/testing/selftests/net/gro.sh7
-rwxr-xr-xtools/testing/selftests/net/ip_local_port_range.sh4
-rw-r--r--tools/testing/selftests/net/lib.sh19
-rw-r--r--tools/testing/selftests/net/lib/py/__init__.py4
-rw-r--r--tools/testing/selftests/net/lib/py/ksft.py7
-rw-r--r--tools/testing/selftests/net/lib/py/netns.py18
-rw-r--r--tools/testing/selftests/net/lib/py/utils.py89
-rw-r--r--tools/testing/selftests/net/lib/py/ynl.py4
-rw-r--r--tools/testing/selftests/net/lib/xdp_dummy.bpf.c6
-rwxr-xr-xtools/testing/selftests/net/link_netns.py141
-rwxr-xr-xtools/testing/selftests/net/lwt_dst_cache_ref_loop.sh246
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh27
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_diag.c272
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh2
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh30
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter.sh7
-rwxr-xr-xtools/testing/selftests/net/netfilter/br_netfilter_queue.sh7
-rwxr-xr-xtools/testing/selftests/net/netfilter/nft_queue.sh1
-rwxr-xr-xtools/testing/selftests/net/netns-name.sh10
-rwxr-xr-xtools/testing/selftests/net/nl_netdev.py18
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh11
-rw-r--r--tools/testing/selftests/net/proc_net_pktgen.c690
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c2
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.py30
-rw-r--r--tools/testing/selftests/net/setup_veth.sh3
-rw-r--r--tools/testing/selftests/net/so_rcv_listener.c168
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect-deny.c58
-rw-r--r--tools/testing/selftests/net/tcp_ao/connect.c22
-rw-r--r--tools/testing/selftests/net/tcp_ao/icmps-discard.c17
-rw-r--r--tools/testing/selftests/net/tcp_ao/key-management.c76
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/aolib.h114
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c7
-rw-r--r--tools/testing/selftests/net/tcp_ao/lib/sock.c315
-rw-r--r--tools/testing/selftests/net/tcp_ao/restore.c75
-rw-r--r--tools/testing/selftests/net/tcp_ao/rst.c47
-rw-r--r--tools/testing/selftests/net/tcp_ao/self-connect.c18
-rw-r--r--tools/testing/selftests/net/tcp_ao/seq-ext.c30
-rw-r--r--tools/testing/selftests/net/tcp_ao/unsigned-md5.c118
-rwxr-xr-xtools/testing/selftests/net/test_blackhole_dev.sh11
-rwxr-xr-xtools/testing/selftests/net/test_so_rcv.sh73
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_fdb_changelink.sh111
-rw-r--r--tools/testing/selftests/nolibc/Makefile30
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.c6
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c138
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh26
-rw-r--r--tools/testing/selftests/pidfd/.gitignore2
-rw-r--r--tools/testing/selftests/pidfd/Makefile4
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h109
-rw-r--r--tools/testing/selftests/pidfd/pidfd_exec_helper.c12
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c692
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c30
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c45
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c76
-rw-r--r--tools/testing/selftests/powerpc/include/pkeys.h5
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c2
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c4
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c5
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c17
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/check_extended_reg_test.c35
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c20
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h12
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c6
-rw-r--r--tools/testing/selftests/ptp/testptp.c37
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/srcu_lockdep.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE073
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE103
-rw-r--r--tools/testing/selftests/rseq/.gitignore1
-rw-r--r--tools/testing/selftests/rseq/Makefile9
-rw-r--r--tools/testing/selftests/rseq/rseq.c27
-rw-r--r--tools/testing/selftests/rseq/rseq.h5
-rwxr-xr-xtools/testing/selftests/rseq/run_syscall_errors_test.sh5
-rw-r--r--tools/testing/selftests/rseq/syscall_errors_test.c124
-rw-r--r--tools/testing/selftests/sched/config2
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/config1
-rw-r--r--tools/testing/selftests/sched_ext/numa.bpf.c100
-rw-r--r--tools/testing/selftests/sched_ext/numa.c59
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c6
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json10
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json25
-rw-r--r--tools/testing/selftests/timers/posix_timers.c73
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/ublk/Makefile15
-rw-r--r--tools/testing/selftests/ublk/common.c55
-rw-r--r--tools/testing/selftests/ublk/file_backed.c175
-rw-r--r--tools/testing/selftests/ublk/kublk.c106
-rw-r--r--tools/testing/selftests/ublk/kublk.h88
-rw-r--r--tools/testing/selftests/ublk/null.c72
-rw-r--r--tools/testing/selftests/ublk/stripe.c318
-rwxr-xr-xtools/testing/selftests/ublk/test_common.sh195
-rwxr-xr-xtools/testing/selftests/ublk/test_generic_01.sh44
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_01.sh13
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_02.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_03.sh16
-rwxr-xr-xtools/testing/selftests/ublk/test_loop_04.sh14
-rwxr-xr-xtools/testing/selftests/ublk/test_null_01.sh9
-rwxr-xr-xtools/testing/selftests/ublk/test_null_02.sh20
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_01.sh47
-rwxr-xr-xtools/testing/selftests/ublk/test_stress_02.sh47
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_01.sh34
-rwxr-xr-xtools/testing/selftests/ublk/test_stripe_02.sh24
-rw-r--r--tools/testing/selftests/ublk/trace/seq_io.bt25
-rw-r--r--tools/testing/selftests/ublk/ublk_dep.h18
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c2
-rw-r--r--tools/testing/selftests/vDSO/Makefile11
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c29
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.h1
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c150
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c4
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config1
-rw-r--r--tools/testing/selftests/x86/Makefile6
-rw-r--r--tools/testing/selftests/x86/amx.c442
-rw-r--r--tools/testing/selftests/x86/avx.c12
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c14
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c24
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c24
-rw-r--r--tools/testing/selftests/x86/helpers.h28
-rw-r--r--tools/testing/selftests/x86/ioperm.c25
-rw-r--r--tools/testing/selftests/x86/iopl.c25
-rw-r--r--tools/testing/selftests/x86/lam.c151
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c18
-rw-r--r--tools/testing/selftests/x86/mov_ss_trap.c14
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c24
-rw-r--r--tools/testing/selftests/x86/sigaltstack.c26
-rw-r--r--tools/testing/selftests/x86/sigreturn.c24
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c22
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c12
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c12
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c3
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c24
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c13
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c12
-rw-r--r--tools/testing/selftests/x86/xstate.c477
-rw-r--r--tools/testing/selftests/x86/xstate.h195
-rw-r--r--tools/tracing/rtla/.gitignore1
-rw-r--r--tools/tracing/rtla/Makefile20
-rw-r--r--tools/tracing/rtla/Makefile.config42
-rw-r--r--tools/tracing/rtla/Makefile.rtla17
-rw-r--r--tools/tracing/rtla/src/Build1
-rw-r--r--tools/tracing/rtla/src/osnoise.c86
-rw-r--r--tools/tracing/rtla/src/osnoise.h50
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c124
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c126
-rw-r--r--tools/tracing/rtla/src/timerlat.bpf.c149
-rw-r--r--tools/tracing/rtla/src/timerlat.c106
-rw-r--r--tools/tracing/rtla/src/timerlat.h54
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c2
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.c166
-rw-r--r--tools/tracing/rtla/src/timerlat_bpf.h59
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c354
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c482
-rw-r--r--tools/tracing/rtla/src/trace.c4
-rw-r--r--tools/tracing/rtla/tests/engine.sh66
-rw-r--r--tools/tracing/rtla/tests/osnoise.t6
-rw-r--r--tools/tracing/rtla/tests/timerlat.t14
-rw-r--r--tools/verification/dot2/dot2k27
-rw-r--r--tools/verification/dot2/dot2k.py80
-rw-r--r--tools/verification/dot2/dot2k_templates/Kconfig3
-rw-r--r--tools/verification/dot2/dot2k_templates/main.c4
-rw-r--r--tools/verification/dot2/dot2k_templates/main_container.c38
-rw-r--r--tools/verification/dot2/dot2k_templates/main_container.h3
-rw-r--r--tools/verification/models/sched/sco.dot18
-rw-r--r--tools/verification/models/sched/scpd.dot18
-rw-r--r--tools/verification/models/sched/sncid.dot18
-rw-r--r--tools/verification/models/sched/snep.dot18
-rw-r--r--tools/verification/models/sched/snroc.dot18
-rw-r--r--tools/verification/models/sched/tss.dot18
-rw-r--r--tools/verification/rv/Makefile.rv2
-rw-r--r--tools/verification/rv/include/in_kernel.h2
-rw-r--r--tools/verification/rv/include/rv.h3
-rw-r--r--tools/verification/rv/src/in_kernel.c256
-rw-r--r--tools/verification/rv/src/rv.c38
-rw-r--r--usr/include/Makefile2
-rw-r--r--virt/kvm/Kconfig4
-rw-r--r--virt/kvm/kvm_main.c75
7060 files changed, 334590 insertions, 135955 deletions
diff --git a/.mailmap b/.mailmap
index 77bcfcfd471c..05a3889ae2d3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -88,7 +88,6 @@ Antonio Quartulli <antonio@mandelbit.com> <antonio@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com>
Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org>
Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org>
-Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net>
Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc>
Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
Archit Taneja <archit@ti.com>
@@ -282,6 +281,7 @@ Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
+Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>
Jacob Shin <Jacob.Shin@amd.com>
@@ -685,6 +685,8 @@ Stephen Hemminger <stephen@networkplumber.org> <shemminger@linux-foundation.org>
Stephen Hemminger <stephen@networkplumber.org> <shemminger@osdl.org>
Stephen Hemminger <stephen@networkplumber.org> <sthemmin@microsoft.com>
Stephen Hemminger <stephen@networkplumber.org> <sthemmin@vyatta.com>
+Stephen Smalley <stephen.smalley.work@gmail.com> <sds@epoch.ncsc.mil>
+Stephen Smalley <stephen.smalley.work@gmail.com> <sds@tycho.nsa.gov>
Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com>
Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com> <subashab@codeaurora.org>
@@ -692,6 +694,7 @@ Subbaraman Narayanamurthy <quic_subbaram@quicinc.com> <subbaram@codeaurora.org>
Subhash Jadavani <subhashj@codeaurora.org>
Sudarshan Rajagopalan <quic_sudaraja@quicinc.com> <sudaraja@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+Sumit Garg <sumit.garg@kernel.org> <sumit.garg@linaro.org>
Sumit Semwal <sumit.semwal@ti.com>
Surabhi Vishnoi <quic_svishnoi@quicinc.com> <svishnoi@codeaurora.org>
Sven Eckelmann <sven@narfation.org> <seckelmann@datto.com>
diff --git a/CREDITS b/CREDITS
index 53d11a46fd69..3d2b81bdf1a6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2187,6 +2187,10 @@ D: Various ACPI fixes, keeping correct battery state through suspend
D: various lockdep annotations, autofs and other random bugfixes
S: Prague, Czech Republic
+N: Ishizaki Kou
+E: kou.ishizaki@toshiba.co.jp
+D: Spidernet driver for PowerPC Cell platforms
+
N: Gene Kozin
E: 74604.152@compuserve.com
W: https://www.sangoma.com
@@ -2197,6 +2201,9 @@ S: Markham, Ontario
S: L3R 8B2
S: Canada
+N: Christian Krafft
+D: PowerPC Cell support
+
N: Maxim Krasnyansky
E: maxk@qualcomm.com
W: http://vtun.sf.net
@@ -2389,6 +2396,10 @@ S: ICP vortex GmbH
S: Neckarsulm
S: Germany
+N: Geoff Levand
+E: geoff@infradead.org
+D: Spidernet driver for PowerPC Cell platforms
+
N: Phil Lewis
E: beans@bucket.ualr.edu
D: Promised to send money if I would put his name in the source tree.
@@ -3233,6 +3244,10 @@ N: Rui Prior
E: rprior@inescn.pt
D: ATM device driver for NICStAR based cards
+N: Roopa Prabhu
+E: roopa@nvidia.com
+D: Bridge co-maintainer, vxlan and networking contributor
+
N: Stefan Probst
E: sp@caldera.de
D: The Linux Support Team Erlangen, 1993-97
diff --git a/Documentation/ABI/README b/Documentation/ABI/README
index 8bac9cb09a6d..ef0e6d11e919 100644
--- a/Documentation/ABI/README
+++ b/Documentation/ABI/README
@@ -1,4 +1,5 @@
-This directory attempts to document the ABI between the Linux kernel and
+This part of the documentation inside Documentation/ABI directory
+attempts to document the ABI between the Linux kernel and
userspace, and the relative stability of these interfaces. Due to the
everchanging nature of Linux, and the differing maturity levels, these
interfaces should be used by userspace programs in different ways.
diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill
index f25174eafd55..20cb688af173 100644
--- a/Documentation/ABI/removed/sysfs-class-rfkill
+++ b/Documentation/ABI/removed/sysfs-class-rfkill
@@ -4,7 +4,7 @@ For details to this subsystem look at Documentation/driver-api/rfkill.rst.
What: /sys/class/rfkill/rfkill[0-9]+/claim
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: This file was deprecated because there no longer was a way to
claim just control over a single rfkill instance.
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 0cceb2badc83..3879963f0f01 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -109,6 +109,10 @@ Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Indicates whether a storage device is capable of storing
integrity metadata. Set if the device is T10 PI-capable.
+ This flag is set to 1 if the storage media is formatted
+ with T10 Protection Information. If the storage media is
+ not formatted with T10 Protection Information, this flag
+ is set to 0.
What: /sys/block/<disk>/integrity/format
@@ -117,6 +121,13 @@ Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Metadata format for integrity capable block device.
E.g. T10-DIF-TYPE1-CRC.
+ This field describes the type of T10 Protection Information
+ that the block device can send and receive.
+ If the device can store application integrity metadata but
+ no T10 Protection Information profile is used, this field
+ contains "nop".
+ If the device does not support integrity metadata, this
+ field contains "none".
What: /sys/block/<disk>/integrity/protection_interval_bytes
@@ -142,7 +153,17 @@ Date: June 2008
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Number of bytes of integrity tag space available per
- 512 bytes of data.
+ protection_interval_bytes, which is typically
+ the device's logical block size.
+ This field describes the size of the application tag
+ if the storage device is formatted with T10 Protection
+ Information and permits use of the application tag.
+ The tag_size is reported in bytes and indicates the
+ space available for adding an opaque tag to each block
+ (protection_interval_bytes).
+ If the device does not support T10 Protection Information
+ (even if the device provides application integrity
+ metadata space), this field is set to 0.
What: /sys/block/<disk>/integrity/write_generate
@@ -229,6 +250,17 @@ Description:
encryption, refer to Documentation/block/inline-encryption.rst.
+What: /sys/block/<disk>/queue/crypto/hw_wrapped_keys
+Date: February 2025
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] The presence of this file indicates that the device
+ supports hardware-wrapped inline encryption keys, i.e. key blobs
+ that can only be unwrapped and used by dedicated hardware. For
+ more information about hardware-wrapped inline encryption keys,
+ see Documentation/block/inline-encryption.rst.
+
+
What: /sys/block/<disk>/queue/crypto/max_dun_bits
Date: February 2022
Contact: linux-block@vger.kernel.org
@@ -267,6 +299,15 @@ Description:
use with inline encryption.
+What: /sys/block/<disk>/queue/crypto/raw_keys
+Date: February 2025
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] The presence of this file indicates that the device
+ supports raw inline encryption keys, i.e. keys that are managed
+ in raw, plaintext form in software.
+
+
What: /sys/block/<disk>/queue/dax
Date: June 2016
Contact: linux-block@vger.kernel.org
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
index 037979f7dc4b..67b605e3dd16 100644
--- a/Documentation/ABI/stable/sysfs-class-rfkill
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -16,7 +16,7 @@ Description: The rfkill class subsystem folder.
What: /sys/class/rfkill/rfkill[0-9]+/name
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Name assigned by driver to this key (interface or driver name).
Values: arbitrary string.
@@ -24,7 +24,7 @@ Values: arbitrary string.
What: /sys/class/rfkill/rfkill[0-9]+/type
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Driver type string ("wlan", "bluetooth", etc).
Values: See include/linux/rfkill.h.
@@ -32,7 +32,7 @@ Values: See include/linux/rfkill.h.
What: /sys/class/rfkill/rfkill[0-9]+/persistent
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Whether the soft blocked state is initialised from non-volatile
storage at startup.
@@ -44,7 +44,7 @@ Values: A numeric value:
What: /sys/class/rfkill/rfkill[0-9]+/state
Date: 09-Jul-2007
-KernelVersion v2.6.22
+KernelVersion: v2.6.22
Contact: linux-wireless@vger.kernel.org
Description: Current state of the transmitter.
This file was scheduled to be removed in 2014, but due to its
@@ -67,7 +67,7 @@ Values: A numeric value.
What: /sys/class/rfkill/rfkill[0-9]+/hard
Date: 12-March-2010
-KernelVersion v2.6.34
+KernelVersion: v2.6.34
Contact: linux-wireless@vger.kernel.org
Description: Current hardblock state. This file is read only.
Values: A numeric value.
@@ -81,7 +81,7 @@ Values: A numeric value.
What: /sys/class/rfkill/rfkill[0-9]+/soft
Date: 12-March-2010
-KernelVersion v2.6.34
+KernelVersion: v2.6.34
Contact: linux-wireless@vger.kernel.org
Description: Current softblock state. This file is read and write.
Values: A numeric value.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-cpu b/Documentation/ABI/stable/sysfs-devices-system-cpu
index 902392d7eddf..cf78bd99f6c8 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-cpu
+++ b/Documentation/ABI/stable/sysfs-devices-system-cpu
@@ -24,12 +24,6 @@ Description: Default value for the Data Stream Control Register (DSCR) on
If set by a process it will be inherited by child processes.
Values: 64 bit unsigned integer (bit field)
-What: /sys/devices/system/cpu/cpuX/topology/physical_package_id
-Description: physical package id of cpuX. Typically corresponds to a physical
- socket number, but the actual value is architecture and platform
- dependent.
-Values: integer
-
What: /sys/devices/system/cpu/cpuX/topology/die_id
Description: the CPU die ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
@@ -86,10 +80,6 @@ What: /sys/devices/system/cpu/cpuX/topology/die_cpus
Description: internal kernel map of CPUs within the same die.
Values: hexadecimal bitmask.
-What: /sys/devices/system/cpu/cpuX/topology/ppin
-Description: per-socket protected processor inventory number
-Values: hexadecimal.
-
What: /sys/devices/system/cpu/cpuX/topology/die_cpus_list
Description: human-readable list of CPUs within the same die.
The format is like 0-3, 8-11, 14,17.
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index f2ec42949a54..4a355e6747ae 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -246,14 +246,14 @@ Description: Controls whether PRS disable is turned on for the workqueue.
capability.
What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy
-Date May 25, 2021
+Date: May 25, 2021
KernelVersion: 5.14.0
Contact: dmaengine@vger.kernel.org
Description: Show the current number of entries in this WQ if WQ Occupancy
Support bit WQ capabilities is 1.
What: /sys/bus/dsa/devices/wq<m>.<n>/enqcmds_retries
-Date Oct 29, 2021
+Date: Oct 29, 2021
KernelVersion: 5.17.0
Contact: dmaengine@vger.kernel.org
Description: Indicate the number of retires for an enqcmds submission on a sharedwq.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi2 b/Documentation/ABI/testing/configfs-usb-gadget-midi2
index 0eac3aaba137..d76a52e2ca7f 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-midi2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-midi2
@@ -47,7 +47,7 @@ Description:
midi1_first_group The first UMP Group number for MIDI 1.0 (0-15)
midi1_num_groups The number of groups for MIDI 1.0 (0-16)
ui_hint 0: unknown, 1: receiver, 2: sender, 3: both
- midi_ci_verison Supported MIDI-CI version number (8 bit)
+ midi_ci_version Supported MIDI-CI version number (8 bit)
is_midi1 Legacy MIDI 1.0 device (0, 1 or 2)
sysex8_streams Max number of SysEx8 streams (8 bit)
active Active FB flag (0 or 1)
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
index bf2869c413e7..a97b70f588da 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-cti
@@ -1,241 +1,241 @@
What: /sys/bus/coresight/devices/<cti-name>/enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable/Disable the CTI hardware.
What: /sys/bus/coresight/devices/<cti-name>/powered
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Indicate if the CTI hardware is powered.
What: /sys/bus/coresight/devices/<cti-name>/ctmid
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Display the associated CTM ID
What: /sys/bus/coresight/devices/<cti-name>/nr_trigger_cons
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Number of devices connected to triggers on this CTI
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/name
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Name of connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_signals
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Input trigger signals from connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/in_types
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Functional types for the input trigger signals
from connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_signals
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Output trigger signals to connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/triggers<N>/out_types
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Functional types for the output trigger signals
to connected device <N>
What: /sys/bus/coresight/devices/<cti-name>/regs/inout_sel
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Select the index for inen and outen registers.
What: /sys/bus/coresight/devices/<cti-name>/regs/inen
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write the CTIINEN register selected by inout_sel.
What: /sys/bus/coresight/devices/<cti-name>/regs/outen
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write the CTIOUTEN register selected by inout_sel.
What: /sys/bus/coresight/devices/<cti-name>/regs/gate
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write CTIGATE register.
What: /sys/bus/coresight/devices/<cti-name>/regs/asicctl
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Read or write ASICCTL register.
What: /sys/bus/coresight/devices/<cti-name>/regs/intack
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write the INTACK register.
What: /sys/bus/coresight/devices/<cti-name>/regs/appset
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Set CTIAPPSET register to activate channel. Read back to
determine current value of register.
What: /sys/bus/coresight/devices/<cti-name>/regs/appclear
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write APPCLEAR register to deactivate channel.
What: /sys/bus/coresight/devices/<cti-name>/regs/apppulse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Write APPPULSE to pulse a channel active for one clock
cycle.
What: /sys/bus/coresight/devices/<cti-name>/regs/chinstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read current status of channel inputs.
What: /sys/bus/coresight/devices/<cti-name>/regs/choutstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of channel outputs.
What: /sys/bus/coresight/devices/<cti-name>/regs/triginstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of input trigger signals
What: /sys/bus/coresight/devices/<cti-name>/regs/trigoutstatus
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) read current status of output trigger signals.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_attach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Attach a CTI input trigger to a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigin_detach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Detach a CTI input trigger from a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_attach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Attach a CTI output trigger to a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_detach
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Detach a CTI output trigger from a CTM channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable CTIGATE for single channel (Write) or list enabled
channels through the gate (R).
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_gate_disable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Disable CTIGATE for single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_set
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Activate a single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_clear
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Deactivate a single channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_pulse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Pulse a single channel - activate for a single clock cycle.
What: /sys/bus/coresight/devices/<cti-name>/channels/trigout_filtered
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) List of output triggers filtered across all connections.
What: /sys/bus/coresight/devices/<cti-name>/channels/trig_filter_enable
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Enable or disable trigger output signal filtering.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_inuse
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) show channels with at least one attached trigger signal.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_free
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) show channels with no attached trigger signals.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_sel
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (RW) Write channel number to select a channel to view, read to
see selected channel number.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_in
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read to see input triggers connected to selected view
channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_out
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Read) Read to see output triggers connected to selected view
channel.
What: /sys/bus/coresight/devices/<cti-name>/channels/chan_xtrigs_reset
Date: March 2020
-KernelVersion 5.7
+KernelVersion: 5.7
Contact: Mike Leach or Mathieu Poirier
Description: (Write) Clear all channel / trigger programming.
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
index bf710ea6e0ef..53cb454b60d0 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm
@@ -1,6 +1,6 @@
What: /sys/bus/coresight/devices/<tpdm-name>/integration_test
Date: January 2023
-KernelVersion 6.2
+KernelVersion: 6.2
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Run integration test for tpdm. Integration test
@@ -14,7 +14,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/reset_dataset
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Reset the dataset of the tpdm.
@@ -24,7 +24,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_type
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger type of the DSB for tpdm.
@@ -35,7 +35,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_ts
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger timestamp of the DSB for tpdm.
@@ -46,7 +46,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_mode
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the programming mode of the DSB for tpdm.
@@ -60,7 +60,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_idx
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the index number of the edge detection for the DSB
@@ -69,7 +69,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_val
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Write a data to control the edge detection corresponding to
@@ -85,7 +85,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_mask
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Write a data to mask the edge detection corresponding to the index
@@ -97,21 +97,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcr[0:15]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Read a set of the edge control value of the DSB in TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
Read a set of the edge control mask of the DSB in TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the trigger pattern for the DSB
@@ -119,7 +119,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the trigger pattern for the DSB
@@ -127,21 +127,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the pattern for the DSB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:7]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the pattern for the DSB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/enable_ts
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern timestamp of DSB tpdm. Read
@@ -153,7 +153,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/set_type
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern type of DSB tpdm. Read
@@ -165,7 +165,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_msr/msr[0:31]
Date: March 2023
-KernelVersion 6.7
+KernelVersion: 6.7
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the MSR(mux select register) for the DSB subunit
@@ -173,7 +173,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_mode
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description: (Write) Set the data collection mode of CMB tpdm. Continuous
change creates CMB data set elements on every CMBCLK edge.
@@ -187,7 +187,7 @@ Description: (Write) Set the data collection mode of CMB tpdm. Continuous
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the trigger pattern for the CMB
@@ -195,7 +195,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_patt/xpmr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the trigger pattern for the CMB
@@ -203,21 +203,21 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the value of the pattern for the CMB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:1]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the mask of the pattern for the CMB subunit TPDM.
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_patt/enable_ts
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(Write) Set the pattern timestamp of CMB tpdm. Read
@@ -229,7 +229,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_trig_ts
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the trigger timestamp of the CMB for tpdm.
@@ -240,7 +240,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_ts_all
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Read or write the status of timestamp upon all interface.
@@ -252,7 +252,7 @@ Description:
What: /sys/bus/coresight/devices/<tpdm-name>/cmb_msr/msr[0:31]
Date: January 2024
-KernelVersion 6.9
+KernelVersion: 6.9
Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com>
Description:
(RW) Set/Get the MSR(mux select register) for the CMB subunit
diff --git a/Documentation/ABI/testing/sysfs-class-chromeos b/Documentation/ABI/testing/sysfs-class-chromeos
index 74ece942722e..7fa5be6cc774 100644
--- a/Documentation/ABI/testing/sysfs-class-chromeos
+++ b/Documentation/ABI/testing/sysfs-class-chromeos
@@ -31,3 +31,23 @@ Date: August 2015
KernelVersion: 4.2
Description:
Show the information about the EC software and hardware.
+
+What: /sys/class/chromeos/cros_ec/usbpdmuxinfo
+Date: February 2025
+Description:
+ Show PD mux status for each typec port with following flags:
+ - "USB": USB connected
+ - "DP": DP connected
+ - "POLARITY": CC line Polarity inverted
+ - "HPD_IRQ": Hot Plug Detect interrupt is asserted
+ - "HPD_LVL": Hot Plug Detect level is asserted
+ - "SAFE": DP is in safe mode
+ - "TBT": TBT enabled
+ - "USB4": USB4 enabled
+
+What: /sys/class/chromeos/cros_ec/ap_mode_entry
+Date: February 2025
+Description:
+ Show if the AP mode entry EC feature is supported.
+ It indicates whether the EC waits for direction from the AP
+ to enter Type-C altmodes or USB4 mode.
diff --git a/Documentation/ABI/testing/sysfs-driver-amd-sfh b/Documentation/ABI/testing/sysfs-driver-amd-sfh
new file mode 100644
index 000000000000..c053126a83bb
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-amd-sfh
@@ -0,0 +1,13 @@
+What: /sys/bus/pci/drivers/pcie_mp2_amd/*/hpd
+Date: April 2025
+Contact: mario.limonciello@amd.com
+Description:
+ Human presence detection (HPD) enable/disable.
+ When HPD is enabled, the device will be able to detect the
+ presence of a human and will send an interrupt that can be
+ used to wake the system from a low power state.
+ When HPD is disabled, the device will not be able to detect
+ the presence of a human.
+
+ Access: Read/Write
+ Valid values: enabled/disabled
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
new file mode 100644
index 000000000000..2a19584d091e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
@@ -0,0 +1,13 @@
+What: /sys/bus/hid/drivers/hid-appletb-kbd/<dev>/mode
+Date: September, 2023
+KernelVersion: 6.5
+Contact: linux-input@vger.kernel.org
+Description:
+ The set of keys displayed on the Touch Bar.
+ Valid values are:
+ == =================
+ 0 Escape key only
+ 1 Function keys
+ 2 Media/brightness keys
+ 3 None
+ == =================
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 5fa6655aee84..ae0191295d29 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1559,3 +1559,48 @@ Description:
Symbol - HCMID. This file shows the UFSHCD manufacturer id.
The Manufacturer ID is defined by JEDEC in JEDEC-JEP106.
The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/critical_health
+What: /sys/bus/platform/devices/*.ufs/critical_health
+Date: February 2025
+Contact: Avri Altman <avri.altman@wdc.com>
+Description: Report the number of times a critical health event has been
+ reported by a UFS device. Further insight into the specific
+ issue can be gained by reading one of: bPreEOLInfo,
+ bDeviceLifeTimeEstA, bDeviceLifeTimeEstB,
+ bWriteBoosterBufferLifeTimeEst, and bRPMBLifeTimeEst.
+
+ The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/clkscale_enable
+What: /sys/bus/platform/devices/*.ufs/clkscale_enable
+Date: January 2025
+Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
+Description:
+ This attribute shows whether the UFS clock scaling is enabled or not.
+ And it can be used to enable/disable the clock scaling by writing
+ 1 or 0 to this attribute.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/clkgate_enable
+What: /sys/bus/platform/devices/*.ufs/clkgate_enable
+Date: January 2025
+Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
+Description:
+ This attribute shows whether the UFS clock gating is enabled or not.
+ And it can be used to enable/disable the clock gating by writing
+ 1 or 0 to this attribute.
+
+ The attribute is read/write.
+
+What: /sys/bus/platform/drivers/ufshcd/*/clkgate_delay_ms
+What: /sys/bus/platform/devices/*.ufs/clkgate_delay_ms
+Date: January 2025
+Contact: Ziqi Chen <quic_ziqichen@quicinc.com>
+Description:
+ This attribute shows and sets the number of milliseconds of idle time
+ before the UFS driver starts to perform clock gating. This can
+ prevent the UFS from frequently performing clock gating/ungating.
+
+ The attribute is read/write.
diff --git a/Documentation/ABI/testing/sysfs-edac-ecs b/Documentation/ABI/testing/sysfs-edac-ecs
new file mode 100644
index 000000000000..87c885c4eb1a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-edac-ecs
@@ -0,0 +1,74 @@
+What: /sys/bus/edac/devices/<dev-name>/ecs_fruX
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ The sysfs EDAC bus devices /<dev-name>/ecs_fruX subdirectory
+ pertains to the memory media ECS (Error Check Scrub) control
+ feature, where <dev-name> directory corresponds to a device
+ registered with the EDAC device driver for the ECS feature.
+ /ecs_fruX belongs to the media FRUs (Field Replaceable Unit)
+ under the memory device.
+
+ The sysfs ECS attr nodes are only present if the parent
+ driver has implemented the corresponding attr callback
+ function and provided the necessary operations to the EDAC
+ device driver during registration.
+
+What: /sys/bus/edac/devices/<dev-name>/ecs_fruX/log_entry_type
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The log entry type of how the DDR5 ECS log is reported.
+
+ - 0 - per DRAM.
+
+ - 1 - per memory media FRU.
+
+ - All other values are reserved.
+
+What: /sys/bus/edac/devices/<dev-name>/ecs_fruX/mode
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The mode of how the DDR5 ECS counts the errors.
+ Error count is tracked based on two different modes
+ selected by DDR5 ECS Control Feature - Codeword mode and
+ Row Count mode. If the ECS is under Codeword mode, then
+ the error count increments each time a codeword with check
+ bit errors is detected. If the ECS is under Row Count mode,
+ then the error counter increments each time a row with
+ check bit errors is detected.
+
+ - 0 - ECS counts rows in the memory media that have ECC errors.
+
+ - 1 - ECS counts codewords with errors, specifically, it counts
+ the number of ECC-detected errors in the memory media.
+
+ - All other values are reserved.
+
+What: /sys/bus/edac/devices/<dev-name>/ecs_fruX/reset
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (WO) ECS reset ECC counter.
+
+ - 1 - reset ECC counter to the default value.
+
+ - All other values are reserved.
+
+What: /sys/bus/edac/devices/<dev-name>/ecs_fruX/threshold
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) DDR5 ECS threshold count per gigabits of memory cells.
+ The ECS error count is subject to the ECS Threshold count
+ per Gbit, which masks error counts less than the Threshold.
+
+ Supported values are 256, 1024 and 4096.
+
+ All other values are reserved.
diff --git a/Documentation/ABI/testing/sysfs-edac-memory-repair b/Documentation/ABI/testing/sysfs-edac-memory-repair
new file mode 100644
index 000000000000..0434a3b23ff3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-edac-memory-repair
@@ -0,0 +1,206 @@
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ The sysfs EDAC bus devices /<dev-name>/mem_repairX subdirectory
+ pertains to the memory media repair features control, such as
+ PPR (Post Package Repair), memory sparing etc, where <dev-name>
+ directory corresponds to a device registered with the EDAC
+ device driver for the memory repair features.
+
+ Post Package Repair is a maintenance operation requests the memory
+ device to perform a repair operation on its media. It is a memory
+ self-healing feature that fixes a failing memory location by
+ replacing it with a spare row in a DRAM device. For example, a
+ CXL memory device with DRAM components that support PPR features may
+ implement PPR maintenance operations. DRAM components may support
+ two types of PPR functions: hard PPR, for a permanent row repair, and
+ soft PPR, for a temporary row repair. Soft PPR may be much faster
+ than hard PPR, but the repair is lost with a power cycle.
+
+ The sysfs attributes nodes for a repair feature are only
+ present if the parent driver has implemented the corresponding
+ attr callback function and provided the necessary operations
+ to the EDAC device driver during registration.
+
+ In some states of system configuration (e.g. before address
+ decoders have been configured), memory devices (e.g. CXL)
+ may not have an active mapping in the main host address
+ physical address map. As such, the memory to repair must be
+ identified by a device specific physical addressing scheme
+ using a device physical address(DPA). The DPA and other control
+ attributes to use will be presented in related error records.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/repair_type
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) Memory repair type. For eg. post package repair,
+ memory sparing etc. Valid values are:
+
+ - ppr - Post package repair.
+
+ - cacheline-sparing
+
+ - row-sparing
+
+ - bank-sparing
+
+ - rank-sparing
+
+ - All other values are reserved.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/persist_mode
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Get/Set the current persist repair mode set for a
+ repair function. Persist repair modes supported in the
+ device, based on a memory repair function, either is temporary,
+ which is lost with a power cycle or permanent. Valid values are:
+
+ - 0 - Soft memory repair (temporary repair).
+
+ - 1 - Hard memory repair (permanent repair).
+
+ - All other values are reserved.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/repair_safe_when_in_use
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) True if memory media is accessible and data is retained
+ during the memory repair operation.
+ The data may not be retained and memory requests may not be
+ correctly processed during a repair operation. In such case
+ repair operation can not be executed at runtime. The memory
+ must be taken offline.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/hpa
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Host Physical Address (HPA) of the memory to repair.
+ The HPA to use will be provided in related error records.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/dpa
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Device Physical Address (DPA) of the memory to repair.
+ The specific DPA to use will be provided in related error
+ records.
+
+ In some states of system configuration (e.g. before address
+ decoders have been configured), memory devices (e.g. CXL)
+ may not have an active mapping in the main host address
+ physical address map. As such, the memory to repair must be
+ identified by a device specific physical addressing scheme
+ using a DPA. The device physical address(DPA) to use will be
+ presented in related error records.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/nibble_mask
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Read/Write Nibble mask of the memory to repair.
+ Nibble mask identifies one or more nibbles in error on the
+ memory bus that produced the error event. Nibble Mask bit 0
+ shall be set if nibble 0 on the memory bus produced the
+ event, etc. For example, CXL PPR and sparing, a nibble mask
+ bit set to 1 indicates the request to perform repair
+ operation in the specific device. All nibble mask bits set
+ to 1 indicates the request to perform the operation in all
+ devices. Eg. for CXL memory repair, the specific value of
+ nibble mask to use will be provided in related error records.
+ For more details, See nibble mask field in CXL spec ver 3.1,
+ section 8.2.9.7.1.2 Table 8-103 soft PPR and section
+ 8.2.9.7.1.3 Table 8-104 hard PPR, section 8.2.9.7.1.4
+ Table 8-105 memory sparing.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/min_hpa
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/max_hpa
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/min_dpa
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/max_dpa
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The supported range of memory address that is to be
+ repaired. The memory device may give the supported range of
+ attributes to use and it will depend on the memory device
+ and the portion of memory to repair.
+ The userspace may receive the specific value of attributes
+ to use for a repair operation from the memory device via
+ related error records and trace events, for eg. CXL DRAM
+ and CXL general media error records in CXL memory devices.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/bank_group
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/bank
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/rank
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/row
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/column
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/channel
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/sub_channel
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The control attributes for the memory to be repaired.
+ The specific value of attributes to use depends on the
+ portion of memory to repair and will be reported to the host
+ in related error records and be available to userspace
+ in trace events, such as CXL DRAM and CXL general media
+ error records of CXL memory devices.
+
+ When readng back these attributes, it returns the current
+ value of memory requested to be repaired.
+
+ bank_group - The bank group of the memory to repair.
+
+ bank - The bank number of the memory to repair.
+
+ rank - The rank of the memory to repair. Rank is defined as a
+ set of memory devices on a channel that together execute a
+ transaction.
+
+ row - The row number of the memory to repair.
+
+ column - The column number of the memory to repair.
+
+ channel - The channel of the memory to repair. Channel is
+ defined as an interface that can be independently accessed
+ for a transaction.
+
+ sub_channel - The subchannel of the memory to repair.
+
+ The requirement to set these attributes varies based on the
+ repair function. The attributes in sysfs are not present
+ unless required for a repair function.
+
+ For example, CXL spec ver 3.1, Section 8.2.9.7.1.2 Table 8-103
+ soft PPR and Section 8.2.9.7.1.3 Table 8-104 hard PPR operations,
+ these attributes are not required to set. CXL spec ver 3.1,
+ Section 8.2.9.7.1.4 Table 8-105 memory sparing, these attributes
+ are required to set based on memory sparing granularity.
+
+What: /sys/bus/edac/devices/<dev-name>/mem_repairX/repair
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (WO) Issue the memory repair operation for the specified
+ memory repair attributes. The operation may fail if resources
+ are insufficient based on the requirements of the memory
+ device and repair function.
+
+ - 1 - Issue the repair operation.
+
+ - All other values are reserved.
diff --git a/Documentation/ABI/testing/sysfs-edac-scrub b/Documentation/ABI/testing/sysfs-edac-scrub
new file mode 100644
index 000000000000..c43be90deab4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-edac-scrub
@@ -0,0 +1,69 @@
+What: /sys/bus/edac/devices/<dev-name>/scrubX
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ The sysfs EDAC bus devices /<dev-name>/scrubX subdirectory
+ belongs to an instance of memory scrub control feature,
+ where <dev-name> directory corresponds to a device/memory
+ region registered with the EDAC device driver for the
+ scrub control feature.
+
+ The sysfs scrub attr nodes are only present if the parent
+ driver has implemented the corresponding attr callback
+ function and provided the necessary operations to the EDAC
+ device driver during registration.
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/addr
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The base address of the memory region to be scrubbed
+ for on-demand scrubbing. Setting address starts scrubbing.
+ The size must be set before that.
+
+ The readback addr value is non-zero if the requested
+ on-demand scrubbing is in progress, zero otherwise.
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/size
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The size of the memory region to be scrubbed
+ (on-demand scrubbing).
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/enable_background
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) Start/Stop background (patrol) scrubbing if supported.
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/min_cycle_duration
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) Supported minimum scrub cycle duration in seconds
+ by the memory scrubber.
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/max_cycle_duration
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RO) Supported maximum scrub cycle duration in seconds
+ by the memory scrubber.
+
+What: /sys/bus/edac/devices/<dev-name>/scrubX/current_cycle_duration
+Date: March 2025
+KernelVersion: 6.15
+Contact: linux-edac@vger.kernel.org
+Description:
+ (RW) The current scrub cycle duration in seconds and must be
+ within the supported range by the memory scrubber.
+
+ Scrub has an overhead when running and that may want to be
+ reduced by taking longer to do it.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 3e1630c70d8a..59adb7dc6f9e 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -347,7 +347,7 @@ Description: Used to control configure extension list:
- [c] means add/del cold file extension
What: /sys/fs/f2fs/<disk>/unusable
-Date April 2019
+Date: April 2019
Contact: "Daniel Rosenberg" <drosen@google.com>
Description: If checkpoint=disable, it displays the number of blocks that
are unusable.
@@ -355,7 +355,7 @@ Description: If checkpoint=disable, it displays the number of blocks that
would be unusable if checkpoint=disable were to be set.
What: /sys/fs/f2fs/<disk>/encoding
-Date July 2019
+Date: July 2019
Contact: "Daniel Rosenberg" <drosen@google.com>
Description: Displays name and version of the encoding set for the filesystem.
If no encoding is set, displays (none)
@@ -734,6 +734,7 @@ Description: Support configuring fault injection type, should be
FAULT_BLKADDR_VALIDITY 0x000040000
FAULT_BLKADDR_CONSISTENCE 0x000080000
FAULT_NO_SEGMENT 0x000100000
+ FAULT_INCONSISTENT_FOOTER 0x000200000
=========================== ===========
What: /sys/fs/f2fs/<disk>/discard_io_aware_gran
@@ -828,3 +829,20 @@ Date: November 2024
Contact: "Chao Yu" <chao@kernel.org>
Description: It controls max read extent count for per-inode, the value of threshold
is 10240 by default.
+
+What: /sys/fs/f2fs/tuning/reclaim_caches_kb
+Date: February 2025
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: It reclaims the given KBs of file-backed pages registered by
+ ioctl(F2FS_IOC_DONATE_RANGE).
+ For example, writing N tries to drop N KBs spaces in LRU.
+
+What: /sys/fs/f2fs/<disk>/carve_out
+Date: March 2025
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: For several zoned storage devices, vendors will provide extra space which
+ was used for device level GC than specs and F2FS can use this space for
+ filesystem level GC. To do that, we can reserve the space using
+ reserved_blocks. However, it is not enough, since this extra space should
+ not be shown to users. So, with this new sysfs node, we can hide the space
+ by substracting reserved_blocks from total bytes.
diff --git a/Documentation/ABI/testing/sysfs-kernel-fadump b/Documentation/ABI/testing/sysfs-kernel-fadump
index 2f9daa7ca55b..b64b7622e6fc 100644
--- a/Documentation/ABI/testing/sysfs-kernel-fadump
+++ b/Documentation/ABI/testing/sysfs-kernel-fadump
@@ -55,4 +55,5 @@ Date: May 2024
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
This is a special sysfs file available to setup additional
- parameters to be passed to capture kernel.
+ parameters to be passed to capture kernel. For HASH MMU it
+ is exported only if RMA size higher than 768MB.
diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
index 65ed3865da62..09f783fa0a53 100644
--- a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
+++ b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
@@ -150,3 +150,13 @@ Description:
The "mfg_lock" sysfs attribute is write-only.
A successful write to this attribute will latch the
board-level attributes into EEPROM, making them read-only.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/rtc_battery
+Date: June 2025
+KernelVersion: 6.15
+Contact: "Xiangrong Li <xiangrongl@nvidia.com>"
+Description:
+ The "rtc_battery" sysfs attribute is read-only.
+ A successful read from this attribute returns the status of
+ the board's RTC battery. The RTC battery status register is
+ also cleared upon successful read operation.
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index a3942b1036e2..2192478e83cf 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -131,7 +131,7 @@ Description:
CAUTION: Using it will cause your machine's real-time (CMOS)
clock to be set to a random invalid time after a resume.
-What; /sys/power/pm_trace_dev_match
+What: /sys/power/pm_trace_dev_match
Date: October 2010
Contact: James Hogan <jhogan@kernel.org>
Description:
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 52c6c5a3efa9..63094646df28 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -12,7 +12,7 @@ endif
# Check for broken ABI files
ifeq ($(CONFIG_WARN_ABI_ERRORS),y)
-$(shell $(srctree)/scripts/get_abi.pl validate --dir $(srctree)/Documentation/ABI)
+$(shell $(srctree)/scripts/get_abi.py --dir $(srctree)/Documentation/ABI validate)
endif
# You can set these variables from the command line.
diff --git a/Documentation/RCU/rcubarrier.rst b/Documentation/RCU/rcubarrier.rst
index 6da7f66da2a8..12a7b059654f 100644
--- a/Documentation/RCU/rcubarrier.rst
+++ b/Documentation/RCU/rcubarrier.rst
@@ -329,10 +329,7 @@ Answer:
was first added back in 2005. This is because on_each_cpu()
disables preemption, which acted as an RCU read-side critical
section, thus preventing CPU 0's grace period from completing
- until on_each_cpu() had dealt with all of the CPUs. However,
- with the advent of preemptible RCU, rcu_barrier() no longer
- waited on nonpreemptible regions of code in preemptible kernels,
- that being the job of the new rcu_barrier_sched() function.
+ until on_each_cpu() had dealt with all of the CPUs.
However, with the RCU flavor consolidation around v4.20, this
possibility was once again ruled out, because the consolidated
diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst
index 30080ff6f406..d1ccd6039a8c 100644
--- a/Documentation/RCU/stallwarn.rst
+++ b/Documentation/RCU/stallwarn.rst
@@ -96,6 +96,13 @@ warnings:
the ``rcu_.*timer wakeup didn't happen for`` console-log message,
which will include additional debugging information.
+- A timer issue causes time to appear to jump forward, so that RCU
+ believes that the RCU CPU stall-warning timeout has been exceeded
+ when in fact much less time has passed. This could be due to
+ timer hardware bugs, timer driver bugs, or even corruption of
+ the "jiffies" global variable. These sorts of timer hardware
+ and driver bugs are not uncommon when testing new hardware.
+
- A low-level kernel issue that either fails to invoke one of the
variants of rcu_eqs_enter(true), rcu_eqs_exit(true), ct_idle_enter(),
ct_idle_exit(), ct_irq_enter(), or ct_irq_exit() on the one
diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst
index ce63be6d64ad..b44ef68f6e4d 100644
--- a/Documentation/admin-guide/LSM/index.rst
+++ b/Documentation/admin-guide/LSM/index.rst
@@ -48,3 +48,4 @@ subdirectories.
Yama
SafeSetID
ipe
+ landlock
diff --git a/Documentation/admin-guide/LSM/landlock.rst b/Documentation/admin-guide/LSM/landlock.rst
new file mode 100644
index 000000000000..9e61607def08
--- /dev/null
+++ b/Documentation/admin-guide/LSM/landlock.rst
@@ -0,0 +1,158 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright © 2025 Microsoft Corporation
+
+================================
+Landlock: system-wide management
+================================
+
+:Author: Mickaël Salaün
+:Date: March 2025
+
+Landlock can leverage the audit framework to log events.
+
+User space documentation can be found here:
+Documentation/userspace-api/landlock.rst.
+
+Audit
+=====
+
+Denied access requests are logged by default for a sandboxed program if `audit`
+is enabled. This default behavior can be changed with the
+sys_landlock_restrict_self() flags (cf.
+Documentation/userspace-api/landlock.rst). Landlock logs can also be masked
+thanks to audit rules. Landlock can generate 2 audit record types.
+
+Record types
+------------
+
+AUDIT_LANDLOCK_ACCESS
+ This record type identifies a denied access request to a kernel resource.
+ The ``domain`` field indicates the ID of the domain that blocked the
+ request. The ``blockers`` field indicates the cause(s) of this denial
+ (separated by a comma), and the following fields identify the kernel object
+ (similar to SELinux). There may be more than one of this record type per
+ audit event.
+
+ Example with a file link request generating two records in the same event::
+
+ domain=195ba459b blockers=fs.refer path="/usr/bin" dev="vda2" ino=351
+ domain=195ba459b blockers=fs.make_reg,fs.refer path="/usr/local" dev="vda2" ino=365
+
+AUDIT_LANDLOCK_DOMAIN
+ This record type describes the status of a Landlock domain. The ``status``
+ field can be either ``allocated`` or ``deallocated``.
+
+ The ``allocated`` status is part of the same audit event and follows
+ the first logged ``AUDIT_LANDLOCK_ACCESS`` record of a domain. It identifies
+ Landlock domain information at the time of the sys_landlock_restrict_self()
+ call with the following fields:
+
+ - the ``domain`` ID
+ - the enforcement ``mode``
+ - the domain creator's ``pid``
+ - the domain creator's ``uid``
+ - the domain creator's executable path (``exe``)
+ - the domain creator's command line (``comm``)
+
+ Example::
+
+ domain=195ba459b status=allocated mode=enforcing pid=300 uid=0 exe="/root/sandboxer" comm="sandboxer"
+
+ The ``deallocated`` status is an event on its own and it identifies a
+ Landlock domain release. After such event, it is guarantee that the
+ related domain ID will never be reused during the lifetime of the system.
+ The ``domain`` field indicates the ID of the domain which is released, and
+ the ``denials`` field indicates the total number of denied access request,
+ which might not have been logged according to the audit rules and
+ sys_landlock_restrict_self()'s flags.
+
+ Example::
+
+ domain=195ba459b status=deallocated denials=3
+
+
+Event samples
+--------------
+
+Here are two examples of log events (see serial numbers).
+
+In this example a sandboxed program (``kill``) tries to send a signal to the
+init process, which is denied because of the signal scoping restriction
+(``LL_SCOPED=s``)::
+
+ $ LL_FS_RO=/ LL_FS_RW=/ LL_SCOPED=s LL_FORCE_LOG=1 ./sandboxer kill 1
+
+This command generates two events, each identified with a unique serial
+number following a timestamp (``msg=audit(1729738800.268:30)``). The first
+event (serial ``30``) contains 4 records. The first record
+(``type=LANDLOCK_ACCESS``) shows an access denied by the domain `1a6fdc66f`.
+The cause of this denial is signal scopping restriction
+(``blockers=scope.signal``). The process that would have receive this signal
+is the init process (``opid=1 ocomm="systemd"``).
+
+The second record (``type=LANDLOCK_DOMAIN``) describes (``status=allocated``)
+domain `1a6fdc66f`. This domain was created by process ``286`` executing the
+``/root/sandboxer`` program launched by the root user.
+
+The third record (``type=SYSCALL``) describes the syscall, its provided
+arguments, its result (``success=no exit=-1``), and the process that called it.
+
+The fourth record (``type=PROCTITLE``) shows the command's name as an
+hexadecimal value. This can be translated with ``python -c
+'print(bytes.fromhex("6B696C6C0031"))'``.
+
+Finally, the last record (``type=LANDLOCK_DOMAIN``) is also the only one from
+the second event (serial ``31``). It is not tied to a direct user space action
+but an asynchronous one to free resources tied to a Landlock domain
+(``status=deallocated``). This can be useful to know that the following logs
+will not concern the domain ``1a6fdc66f`` anymore. This record also summarize
+the number of requests this domain denied (``denials=1``), whether they were
+logged or not.
+
+.. code-block::
+
+ type=LANDLOCK_ACCESS msg=audit(1729738800.268:30): domain=1a6fdc66f blockers=scope.signal opid=1 ocomm="systemd"
+ type=LANDLOCK_DOMAIN msg=audit(1729738800.268:30): domain=1a6fdc66f status=allocated mode=enforcing pid=286 uid=0 exe="/root/sandboxer" comm="sandboxer"
+ type=SYSCALL msg=audit(1729738800.268:30): arch=c000003e syscall=62 success=no exit=-1 [..] ppid=272 pid=286 auid=0 uid=0 gid=0 [...] comm="kill" [...]
+ type=PROCTITLE msg=audit(1729738800.268:30): proctitle=6B696C6C0031
+ type=LANDLOCK_DOMAIN msg=audit(1729738800.324:31): domain=1a6fdc66f status=deallocated denials=1
+
+Here is another example showcasing filesystem access control::
+
+ $ LL_FS_RO=/ LL_FS_RW=/tmp LL_FORCE_LOG=1 ./sandboxer sh -c "echo > /etc/passwd"
+
+The related audit logs contains 8 records from 3 different events (serials 33,
+34 and 35) created by the same domain `1a6fdc679`::
+
+ type=LANDLOCK_ACCESS msg=audit(1729738800.221:33): domain=1a6fdc679 blockers=fs.write_file path="/dev/tty" dev="devtmpfs" ino=9
+ type=LANDLOCK_DOMAIN msg=audit(1729738800.221:33): domain=1a6fdc679 status=allocated mode=enforcing pid=289 uid=0 exe="/root/sandboxer" comm="sandboxer"
+ type=SYSCALL msg=audit(1729738800.221:33): arch=c000003e syscall=257 success=no exit=-13 [...] ppid=272 pid=289 auid=0 uid=0 gid=0 [...] comm="sh" [...]
+ type=PROCTITLE msg=audit(1729738800.221:33): proctitle=7368002D63006563686F203E202F6574632F706173737764
+ type=LANDLOCK_ACCESS msg=audit(1729738800.221:34): domain=1a6fdc679 blockers=fs.write_file path="/etc/passwd" dev="vda2" ino=143821
+ type=SYSCALL msg=audit(1729738800.221:34): arch=c000003e syscall=257 success=no exit=-13 [...] ppid=272 pid=289 auid=0 uid=0 gid=0 [...] comm="sh" [...]
+ type=PROCTITLE msg=audit(1729738800.221:34): proctitle=7368002D63006563686F203E202F6574632F706173737764
+ type=LANDLOCK_DOMAIN msg=audit(1729738800.261:35): domain=1a6fdc679 status=deallocated denials=2
+
+
+Event filtering
+---------------
+
+If you get spammed with audit logs related to Landlock, this is either an
+attack attempt or a bug in the security policy. We can put in place some
+filters to limit noise with two complementary ways:
+
+- with sys_landlock_restrict_self()'s flags if we can fix the sandboxed
+ programs,
+- or with audit rules (see :manpage:`auditctl(8)`).
+
+Additional documentation
+========================
+
+* `Linux Audit Documentation`_
+* Documentation/userspace-api/landlock.rst
+* Documentation/security/landlock.rst
+* https://landlock.io
+
+.. Links
+.. _Linux Audit Documentation:
+ https://github.com/linux-audit/audit-documentation/wiki
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index eb9452668909..70b02f30013a 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -165,7 +165,7 @@ Configuring the kernel
"make xconfig" Qt based configuration tool.
- "make gconfig" GTK+ based configuration tool.
+ "make gconfig" GTK based configuration tool.
"make oldconfig" Default all questions based on the contents of
your existing ./.config file and asking about
@@ -176,7 +176,7 @@ Configuring the kernel
values without prompting.
"make defconfig" Create a ./.config file by using the default
- symbol values from either arch/$ARCH/defconfig
+ symbol values from either arch/$ARCH/configs/defconfig
or arch/$ARCH/configs/${PLATFORM}_defconfig,
depending on the architecture.
diff --git a/Documentation/admin-guide/abi-obsolete-files.rst b/Documentation/admin-guide/abi-obsolete-files.rst
new file mode 100644
index 000000000000..3061a916b4b5
--- /dev/null
+++ b/Documentation/admin-guide/abi-obsolete-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Obsolete ABI Files
+==================
+
+.. kernel-abi:: obsolete
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-obsolete.rst b/Documentation/admin-guide/abi-obsolete.rst
index 594e697aa1b2..640f3903e847 100644
--- a/Documentation/admin-guide/abi-obsolete.rst
+++ b/Documentation/admin-guide/abi-obsolete.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI obsolete symbols
====================
@@ -7,5 +9,5 @@ marked to be removed at some later point in time.
The description of the interface will document the reason why it is
obsolete and when it can be expected to be removed.
-.. kernel-abi:: ABI/obsolete
- :rst:
+.. kernel-abi:: obsolete
+ :no-files:
diff --git a/Documentation/admin-guide/abi-removed-files.rst b/Documentation/admin-guide/abi-removed-files.rst
new file mode 100644
index 000000000000..f1bdfadd2ec4
--- /dev/null
+++ b/Documentation/admin-guide/abi-removed-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Removed ABI Files
+=================
+
+.. kernel-abi:: removed
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-removed.rst b/Documentation/admin-guide/abi-removed.rst
index f9e000c81828..88832d3eacd6 100644
--- a/Documentation/admin-guide/abi-removed.rst
+++ b/Documentation/admin-guide/abi-removed.rst
@@ -1,5 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI removed symbols
===================
-.. kernel-abi:: ABI/removed
- :rst:
+.. kernel-abi:: removed
+ :no-files:
diff --git a/Documentation/admin-guide/abi-stable-files.rst b/Documentation/admin-guide/abi-stable-files.rst
new file mode 100644
index 000000000000..f867738fc178
--- /dev/null
+++ b/Documentation/admin-guide/abi-stable-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Stable ABI Files
+================
+
+.. kernel-abi:: stable
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-stable.rst b/Documentation/admin-guide/abi-stable.rst
index fc3361d847b1..528c68401f4b 100644
--- a/Documentation/admin-guide/abi-stable.rst
+++ b/Documentation/admin-guide/abi-stable.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI stable symbols
==================
@@ -10,5 +12,5 @@ for at least 2 years.
Most interfaces (like syscalls) are expected to never change and always
be available.
-.. kernel-abi:: ABI/stable
- :rst:
+.. kernel-abi:: stable
+ :no-files:
diff --git a/Documentation/admin-guide/abi-testing-files.rst b/Documentation/admin-guide/abi-testing-files.rst
new file mode 100644
index 000000000000..1da868e42fdb
--- /dev/null
+++ b/Documentation/admin-guide/abi-testing-files.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Testing ABI Files
+=================
+
+.. kernel-abi:: testing
+ :no-symbols:
diff --git a/Documentation/admin-guide/abi-testing.rst b/Documentation/admin-guide/abi-testing.rst
index 19767926b344..6153ebd38e2d 100644
--- a/Documentation/admin-guide/abi-testing.rst
+++ b/Documentation/admin-guide/abi-testing.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
ABI testing symbols
===================
@@ -16,5 +18,5 @@ Programs that use these interfaces are strongly encouraged to add their
name to the description of these interfaces, so that the kernel
developers can easily notify them if any changes occur.
-.. kernel-abi:: ABI/testing
- :rst:
+.. kernel-abi:: testing
+ :no-files:
diff --git a/Documentation/admin-guide/abi.rst b/Documentation/admin-guide/abi.rst
index bcab3ef2597c..c6039359e585 100644
--- a/Documentation/admin-guide/abi.rst
+++ b/Documentation/admin-guide/abi.rst
@@ -1,7 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+
=====================
Linux ABI description
=====================
+.. kernel-abi:: README
+
+ABI symbols
+-----------
+
.. toctree::
:maxdepth: 2
@@ -9,3 +16,14 @@ Linux ABI description
abi-testing
abi-obsolete
abi-removed
+
+ABI files
+---------
+
+.. toctree::
+ :maxdepth: 2
+
+ abi-stable-files
+ abi-testing-files
+ abi-obsolete-files
+ abi-removed-files
diff --git a/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst b/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
index 582d3427de3f..a964aff373b1 100644
--- a/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
+++ b/Documentation/admin-guide/cgroup-v1/freezer-subsystem.rst
@@ -125,3 +125,7 @@ to unfreeze all tasks in the container::
This is the basic mechanism which should do the right thing for user space task
in a simple scenario.
+
+This freezer implementation is affected by shortcomings (see commit
+76f969e8948d8 ("cgroup: cgroup v2 freezer")) and cgroup v2 freezer is
+recommended.
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 286d16fc22eb..02b8206a3594 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -90,6 +90,7 @@ Brief summary of control files.
used.
memory.swappiness set/show swappiness parameter of vmscan
(See sysctl's vm.swappiness)
+ Per memcg knob does not exist in cgroup v2.
memory.move_charge_at_immigrate This knob is deprecated.
memory.oom_control set/show oom controls.
This knob is deprecated and shouldn't be
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index cb1b4e759b7e..f293a13b42ed 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1076,15 +1076,20 @@ cpufreq governor about the minimum desired frequency which should always be
provided by a CPU, as well as the maximum desired frequency, which should not
be exceeded by a CPU.
-WARNING: cgroup2 doesn't yet support control of realtime processes. For
-a kernel built with the CONFIG_RT_GROUP_SCHED option enabled for group
-scheduling of realtime processes, the cpu controller can only be enabled
-when all RT processes are in the root cgroup. This limitation does
-not apply if CONFIG_RT_GROUP_SCHED is disabled. Be aware that system
-management software may already have placed RT processes into nonroot
-cgroups during the system boot process, and these processes may need
-to be moved to the root cgroup before the cpu controller can be enabled
-with a CONFIG_RT_GROUP_SCHED enabled kernel.
+WARNING: cgroup2 cpu controller doesn't yet fully support the control of
+realtime processes. For a kernel built with the CONFIG_RT_GROUP_SCHED option
+enabled for group scheduling of realtime processes, the cpu controller can only
+be enabled when all RT processes are in the root cgroup. Be aware that system
+management software may already have placed RT processes into non-root cgroups
+during the system boot process, and these processes may need to be moved to the
+root cgroup before the cpu controller can be enabled with a
+CONFIG_RT_GROUP_SCHED enabled kernel.
+
+With CONFIG_RT_GROUP_SCHED disabled, this limitation does not apply and some of
+the interface files either affect realtime processes or account for them. See
+the following section for details. Only the cpu controller is affected by
+CONFIG_RT_GROUP_SCHED. Other controllers can be used for the resource control of
+realtime processes irrespective of CONFIG_RT_GROUP_SCHED.
CPU Interface Files
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index 2418b0c2d3df..b857eb6ca1b6 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -238,11 +238,10 @@ When mounting an ext4 filesystem, the following option are accepted:
configured using tune2fs)
data_err=ignore(*)
- Just print an error message if an error occurs in a file data buffer in
- ordered mode.
+ Just print an error message if an error occurs in a file data buffer.
+
data_err=abort
- Abort the journal if an error occurs in a file data buffer in ordered
- mode.
+ Abort the journal if an error occurs in a file data buffer.
grpid | bsdgroups
New objects have the group ID of their parent.
diff --git a/Documentation/admin-guide/gpio/gpio-sim.rst b/Documentation/admin-guide/gpio/gpio-sim.rst
index 1cc5567a4bbe..35d49ccd49e0 100644
--- a/Documentation/admin-guide/gpio/gpio-sim.rst
+++ b/Documentation/admin-guide/gpio/gpio-sim.rst
@@ -71,7 +71,7 @@ specific lines. The name of those subdirectories must take the form of:
``'line<offset>'`` (e.g. ``'line0'``, ``'line20'``, etc.) as the name will be
used by the module to assign the config to the specific line at given offset.
-Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in
+Once the configuration is complete, the ``'live'`` attribute must be set to 1 in
order to instantiate the chip. It can be set back to 0 to destroy the simulated
chip. The module will synchronously wait for the new simulated device to be
successfully probed and if this doesn't happen, writing to ``'live'`` will
diff --git a/Documentation/admin-guide/gpio/gpio-virtuser.rst b/Documentation/admin-guide/gpio/gpio-virtuser.rst
index 2aca70db9f3b..7e7c0df51640 100644
--- a/Documentation/admin-guide/gpio/gpio-virtuser.rst
+++ b/Documentation/admin-guide/gpio/gpio-virtuser.rst
@@ -92,7 +92,7 @@ struct. The first two take string values as arguments:
Activating GPIO consumers
-------------------------
-Once the confiuration is complete, the ``'live'`` attribute must be set to 1 in
+Once the configuration is complete, the ``'live'`` attribute must be set to 1 in
order to instantiate the consumer. It can be set back to 0 to destroy the
virtual device. The module will synchronously wait for the new simulated device
to be successfully probed and if this doesn't happen, writing to ``'live'`` will
diff --git a/Documentation/admin-guide/highuid.rst b/Documentation/admin-guide/highuid.rst
deleted file mode 100644
index 6ee70465c0ea..000000000000
--- a/Documentation/admin-guide/highuid.rst
+++ /dev/null
@@ -1,80 +0,0 @@
-===================================================
-Notes on the change from 16-bit UIDs to 32-bit UIDs
-===================================================
-
-:Author: Chris Wing <wingc@umich.edu>
-:Last updated: January 11, 2000
-
-- kernel code MUST take into account __kernel_uid_t and __kernel_uid32_t
- when communicating between user and kernel space in an ioctl or data
- structure.
-
-- kernel code should use uid_t and gid_t in kernel-private structures and
- code.
-
-What's left to be done for 32-bit UIDs on all Linux architectures:
-
-- Disk quotas have an interesting limitation that is not related to the
- maximum UID/GID. They are limited by the maximum file size on the
- underlying filesystem, because quota records are written at offsets
- corresponding to the UID in question.
- Further investigation is needed to see if the quota system can cope
- properly with huge UIDs. If it can deal with 64-bit file offsets on all
- architectures, this should not be a problem.
-
-- Decide whether or not to keep backwards compatibility with the system
- accounting file, or if we should break it as the comments suggest
- (currently, the old 16-bit UID and GID are still written to disk, and
- part of the former pad space is used to store separate 32-bit UID and
- GID)
-
-- Need to validate that OS emulation calls the 16-bit UID
- compatibility syscalls, if the OS being emulated used 16-bit UIDs, or
- uses the 32-bit UID system calls properly otherwise.
-
- This affects at least:
-
- - iBCS on Intel
-
- - sparc32 emulation on sparc64
- (need to support whatever new 32-bit UID system calls are added to
- sparc32)
-
-- Validate that all filesystems behave properly.
-
- At present, 32-bit UIDs _should_ work for:
-
- - ext2
- - ufs
- - isofs
- - nfs
- - coda
- - udf
-
- Ioctl() fixups have been made for:
-
- - ncpfs
- - smbfs
-
- Filesystems with simple fixups to prevent 16-bit UID wraparound:
-
- - minix
- - sysv
- - qnx4
-
- Other filesystems have not been checked yet.
-
-- The ncpfs and smpfs filesystems cannot presently use 32-bit UIDs in
- all ioctl()s. Some new ioctl()s have been added with 32-bit UIDs, but
- more are needed. (as well as new user<->kernel data structures)
-
-- The ELF core dump format only supports 16-bit UIDs on arm, i386, m68k,
- sh, and sparc32. Fixing this is probably not that important, but would
- require adding a new ELF section.
-
-- The ioctl()s used to control the in-kernel NFS server only support
- 16-bit UIDs on arm, i386, m68k, sh, and sparc32.
-
-- make sure that the UID mapping feature of AX25 networking works properly
- (it should be safe because it's always used a 32-bit integer to
- communicate between user and kernel)
diff --git a/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
index 0585d02b9a6c..ad15417d39f9 100644
--- a/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+++ b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
@@ -29,14 +29,6 @@ Below is the list of affected Intel processors [#f1]_:
RAPTORLAKE_S 06_BFH
=================== ============
-As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
-RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
-vulnerable in Linux because they share the same family/model with an affected
-part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
-CPUID.HYBRID. This information could be used to distinguish between the
-affected and unaffected parts, but it is deemed not worth adding complexity as
-the reporting is fixed automatically when these parts enumerate RFDS_NO.
-
Mitigation
==========
Intel released a microcode update that enables software to clear sensitive
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
index 2ad1c05b8c88..66af95251a3d 100644
--- a/Documentation/admin-guide/hw-vuln/srso.rst
+++ b/Documentation/admin-guide/hw-vuln/srso.rst
@@ -104,7 +104,20 @@ The possible values in this file are:
(spec_rstack_overflow=ibpb-vmexit)
+ * 'Mitigation: Reduced Speculation':
+ This mitigation gets automatically enabled when the above one "IBPB on
+ VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
+
+ It gets automatically enabled on machines which have the
+ SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
+ to the above =ibpb-vmexit mitigation because the user/kernel boundary is
+ not affected anymore and thus "safe RET" is not needed.
+
+ After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
+ is detected (functionality present on all such machines) and that
+ practically overrides IBPB on VMEXIT as it has a lot less performance
+ impact and takes care of the guest->host attack vector too.
In order to exploit vulnerability, an attacker needs to:
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index c8af32a8f800..259d79fbeb94 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -187,7 +187,6 @@ A few hard-to-categorize and generally obsolete documents.
.. toctree::
:maxdepth: 1
- highuid
ldm
unicode
diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst
index 609a3201fd4e..9453196ade51 100644
--- a/Documentation/admin-guide/iostats.rst
+++ b/Documentation/admin-guide/iostats.rst
@@ -2,62 +2,39 @@
I/O statistics fields
=====================
-Since 2.4.20 (and some versions before, with patches), and 2.5.45,
-more extensive disk statistics have been introduced to help measure disk
-activity. Tools such as ``sar`` and ``iostat`` typically interpret these and do
-the work for you, but in case you are interested in creating your own
-tools, the fields are explained here.
-
-In 2.4 now, the information is found as additional fields in
-``/proc/partitions``. In 2.6 and upper, the same information is found in two
-places: one is in the file ``/proc/diskstats``, and the other is within
-the sysfs file system, which must be mounted in order to obtain
-the information. Throughout this document we'll assume that sysfs
-is mounted on ``/sys``, although of course it may be mounted anywhere.
-Both ``/proc/diskstats`` and sysfs use the same source for the information
-and so should not differ.
-
-Here are examples of these different formats::
-
- 2.4:
- 3 0 39082680 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 9221278 hda1 35486 0 35496 38030 0 0 0 0 0 38030 38030
-
- 2.6+ sysfs:
- 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 35486 38030 38030 38030
-
- 2.6+ diskstats:
- 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160
- 3 1 hda1 35486 38030 38030 38030
-
- 4.18+ diskstats:
- 3 0 hda 446216 784926 9550688 4382310 424847 312726 5922052 19310380 0 3376340 23705160 0 0 0 0
-
-On 2.4 you might execute ``grep 'hda ' /proc/partitions``. On 2.6+, you have
-a choice of ``cat /sys/block/hda/stat`` or ``grep 'hda ' /proc/diskstats``.
-
-The advantage of one over the other is that the sysfs choice works well
-if you are watching a known, small set of disks. ``/proc/diskstats`` may
-be a better choice if you are watching a large number of disks because
-you'll avoid the overhead of 50, 100, or 500 or more opens/closes with
-each snapshot of your disk statistics.
-
-In 2.4, the statistics fields are those after the device name. In
-the above example, the first field of statistics would be 446216.
-By contrast, in 2.6+ if you look at ``/sys/block/hda/stat``, you'll
-find just the 15 fields, beginning with 446216. If you look at
-``/proc/diskstats``, the 15 fields will be preceded by the major and
-minor device numbers, and device name. Each of these formats provides
-15 fields of statistics, each meaning exactly the same things.
-All fields except field 9 are cumulative since boot. Field 9 should
-go to zero as I/Os complete; all others only increase (unless they
-overflow and wrap). Wrapping might eventually occur on a very busy
-or long-lived system; so applications should be prepared to deal with
-it. Regarding wrapping, the types of the fields are either unsigned
-int (32 bit) or unsigned long (32-bit or 64-bit, depending on your
-machine) as noted per-field below. Unless your observations are very
-spread in time, these fields should not wrap twice before you notice it.
+The kernel exposes disk statistics via ``/proc/diskstats`` and
+``/sys/block/<device>/stat``. These stats are usually accessed via tools
+such as ``sar`` and ``iostat``.
+
+Here are examples using a disk with two partitions::
+
+ /proc/diskstats:
+ 259 0 nvme0n1 255999 814 12369153 47919 996852 81 36123024 425995 0 301795 580470 0 0 0 0 60602 106555
+ 259 1 nvme0n1p1 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0
+ 259 2 nvme0n1p2 255401 1 12343477 47799 996004 0 36014736 425784 0 344336 473584 0 0 0 0 0 0
+
+ /sys/block/nvme0n1/stat:
+ 255999 814 12369153 47919 996858 81 36123056 426009 0 301809 580491 0 0 0 0 60605 106562
+
+ /sys/block/nvme0n1/nvme0n1p1/stat:
+ 492 813 17572 96 848 81 108288 210 0 76 307 0 0 0 0 0 0
+
+Both files contain the same 17 statistics. ``/sys/block/<device>/stat``
+contains the fields for ``<device>``. In ``/proc/diskstats`` the fields
+are prefixed with the major and minor device numbers and the device
+name. In the example above, the first stat value for ``nvme0n1`` is
+255999 in both files.
+
+The sysfs ``stat`` file is efficient for monitoring a small, known set
+of disks. If you're tracking a large number of devices,
+``/proc/diskstats`` is often the better choice since it avoids the
+overhead of opening and closing multiple files for each snapshot.
+
+All fields are cumulative, monotonic counters, except for field 9, which
+resets to zero as I/Os complete. The remaining fields reset at boot, on
+device reattachment or reinitialization, or when the underlying counter
+overflows. Applications reading these counters should detect and handle
+resets when comparing stat snapshots.
Each set of stats only applies to the indicated device; if you want
system-wide stats you'll have to find all the devices and sum them all up.
diff --git a/Documentation/admin-guide/kdump/kdump.rst b/Documentation/admin-guide/kdump/kdump.rst
index 5376890adbeb..1f7f14c6e184 100644
--- a/Documentation/admin-guide/kdump/kdump.rst
+++ b/Documentation/admin-guide/kdump/kdump.rst
@@ -180,10 +180,6 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64)
1) On i386, enable high memory support under "Processor type and
features"::
- CONFIG_HIGHMEM64G=y
-
- or::
-
CONFIG_HIGHMEM4G
2) With CONFIG_SMP=y, usually nr_cpus=1 need specified on the kernel
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fb8752b42ec8..5e351ac52cca 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -416,10 +416,6 @@
Format: { quiet (default) | verbose | debug }
Change the amount of debugging information output
when initialising the APIC and IO-APIC components.
- For X86-32, this can also be used to specify an APIC
- driver name.
- Format: apic=driver_name
- Examples: apic=bigsmp
apic_extnmi= [APIC,X86,EARLY] External NMI delivery setting
Format: { bsp (default) | all | none }
@@ -1785,7 +1781,9 @@
allocation boundaries as a proactive defense
against bounds-checking flaws in the kernel's
copy_to_user()/copy_from_user() interface.
- on Perform hardened usercopy checks (default).
+ The default is determined by
+ CONFIG_HARDENED_USERCOPY_DEFAULT_ON.
+ on Perform hardened usercopy checks.
off Disable hardened usercopy checks.
hardlockup_all_cpu_backtrace=
@@ -2316,6 +2314,9 @@
per_cpu_perf_limits
Allow per-logical-CPU P-State performance control limits using
cpufreq sysfs interface
+ no_cas
+ Do not enable capacity-aware scheduling (CAS) on
+ hybrid systems
intremap= [X86-64,Intel-IOMMU,EARLY]
on enable Interrupt Remapping (default)
@@ -3116,6 +3117,8 @@
* max_sec_lba48: Set or clear transfer size limit to
65535 sectors.
+ * external: Mark port as external (hotplug-capable).
+
* [no]lpm: Enable or disable link power management.
* [no]setxfer: Indicate if transfer speed mode setting
@@ -5017,6 +5020,14 @@
Format: <bool>
default: 0 (auto_verbose is enabled)
+ printk.debug_non_panic_cpus=
+ Allows storing messages from non-panic CPUs into
+ the printk log buffer during panic(). They are
+ flushed to consoles by the panic-CPU on
+ a best-effort basis.
+ Format: <bool> (1/Y/y=enable, 0/N/n=disable)
+ Default: disabled
+
printk.devkmsg={on,off,ratelimit}
Control writing to /dev/kmsg.
on - unlimited logging to /dev/kmsg from userspace
@@ -5758,6 +5769,11 @@
rcutorture.test_boost_duration= [KNL]
Duration (s) of each individual boost test.
+ rcutorture.test_boost_holdoff= [KNL]
+ Holdoff time (s) from start of test to the start
+ of RCU priority-boost testing. Defaults to zero,
+ that is, no holdoff.
+
rcutorture.test_boost_interval= [KNL]
Interval (s) between each boost test.
@@ -6082,7 +6098,7 @@
is assumed to be I/O ports; otherwise it is memory.
reserve_mem= [RAM]
- Format: nn[KNG]:<align>:<label>
+ Format: nn[KMG]:<align>:<label>
Reserve physical memory and label it with a name that
other subsystems can use to access it. This is typically
used for systems that do not wipe the RAM, and this command
@@ -6582,6 +6598,8 @@
Selecting 'on' will also enable the mitigation
against user space to user space task attacks.
+ Selecting specific mitigation does not force enable
+ user mitigations.
Selecting 'off' will disable both the kernel and
the user space protections.
@@ -7279,6 +7297,15 @@
See also "Event triggers" in Documentation/trace/events.rst
+ traceoff_after_boot
+ [FTRACE] Sometimes tracing is used to debug issues
+ during the boot process. Since the trace buffer has a
+ limited amount of storage, it may be prudent to
+ disable tracing after the boot is finished, otherwise
+ the critical information may be overwritten. With this
+ option, the main tracing buffer will be turned off at
+ the end of the boot process.
+
traceoff_on_warning
[FTRACE] enable this option to disable tracing when a
warning is hit. This turns off "tracing_on". Tracing can
@@ -7672,13 +7699,6 @@
16 - SIGBUS faults
Example: user_debug=31
- userpte=
- [X86,EARLY] Flags controlling user PTE allocations.
-
- nohigh = do not allocate PTE pages in
- HIGHMEM regardless of setting
- of CONFIG_HIGHPTE.
-
vdso= [X86,SH,SPARC]
On X86_32, this is an alias for vdso32=. Otherwise:
diff --git a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
index ea7fa2a8bbf0..ee9a6c94f383 100644
--- a/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
+++ b/Documentation/admin-guide/kernel-per-CPU-kthreads.rst
@@ -278,12 +278,7 @@ To reduce its OS jitter, do any of the following:
due to the rtas_event_scan() function.
WARNING: Please check your CPU specifications to
make sure that this is safe on your particular system.
- e. If running on Cell Processor, build your kernel with
- CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
- spu_gov_work().
- WARNING: Please check your CPU specifications to
- make sure that this is safe on your particular system.
- f. If running on PowerMAC, build your kernel with
+ e. If running on PowerMAC, build your kernel with
CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
avoiding OS jitter from rackmeter_do_timer().
diff --git a/Documentation/admin-guide/laptops/index.rst b/Documentation/admin-guide/laptops/index.rst
index cd9a1c2695fd..e71c8984c23e 100644
--- a/Documentation/admin-guide/laptops/index.rst
+++ b/Documentation/admin-guide/laptops/index.rst
@@ -11,6 +11,7 @@ Laptop Drivers
disk-shock-protection
laptop-mode
lg-laptop
+ samsung-galaxybook
sony-laptop
sonypi
thinkpad-acpi
diff --git a/Documentation/admin-guide/laptops/samsung-galaxybook.rst b/Documentation/admin-guide/laptops/samsung-galaxybook.rst
new file mode 100644
index 000000000000..752b8f1a4a74
--- /dev/null
+++ b/Documentation/admin-guide/laptops/samsung-galaxybook.rst
@@ -0,0 +1,174 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+==========================
+Samsung Galaxy Book Driver
+==========================
+
+Joshua Grisham <josh@joshuagrisham.com>
+
+This is a Linux x86 platform driver for Samsung Galaxy Book series notebook
+devices which utilizes Samsung's ``SCAI`` ACPI device in order to control
+extra features and receive various notifications.
+
+Supported devices
+=================
+
+Any device with one of the supported ACPI device IDs should be supported. This
+covers most of the "Samsung Galaxy Book" series notebooks that are currently
+available as of this writing, and could include other Samsung notebook devices
+as well.
+
+Status
+======
+
+The following features are currently supported:
+
+- :ref:`Keyboard backlight <keyboard-backlight>` control
+- :ref:`Performance mode <performance-mode>` control implemented using the
+ platform profile interface
+- :ref:`Battery charge control end threshold
+ <battery-charge-control-end-threshold>` (stop charging battery at given
+ percentage value) implemented as a battery hook
+- :ref:`Firmware Attributes <firmware-attributes>` to allow control of various
+ device settings
+- :ref:`Handling of Fn hotkeys <keyboard-hotkey-actions>` for various actions
+- :ref:`Handling of ACPI notifications and hotkeys
+ <acpi-notifications-and-hotkey-actions>`
+
+Because different models of these devices can vary in their features, there is
+logic built within the driver which attempts to test each implemented feature
+for a valid response before enabling its support (registering additional devices
+or extensions, adding sysfs attributes, etc). Therefore, it can be important to
+note that not all features may be supported for your particular device.
+
+The following features might be possible to implement but will require
+additional investigation and are therefore not supported at this time:
+
+- "Dolby Atmos" mode for the speakers
+- "Outdoor Mode" for increasing screen brightness on models with ``SAM0427``
+- "Silent Mode" on models with ``SAM0427``
+
+.. _keyboard-backlight:
+
+Keyboard backlight
+==================
+
+A new LED class named ``samsung-galaxybook::kbd_backlight`` is created which
+will then expose the device using the standard sysfs-based LED interface at
+``/sys/class/leds/samsung-galaxybook::kbd_backlight``. Brightness can be
+controlled by writing the desired value to the ``brightness`` sysfs attribute or
+with any other desired userspace utility.
+
+.. note::
+ Most of these devices have an ambient light sensor which also turns
+ off the keyboard backlight under well-lit conditions. This behavior does not
+ seem possible to control at this time, but can be good to be aware of.
+
+.. _performance-mode:
+
+Performance mode
+================
+
+This driver implements the
+Documentation/userspace-api/sysfs-platform_profile.rst interface for working
+with the "performance mode" function of the Samsung ACPI device.
+
+Mapping of each Samsung "performance mode" to its respective platform profile is
+performed dynamically by the driver, as not all models support all of the same
+performance modes. Your device might have one or more of the following mappings:
+
+- "Silent" maps to ``low-power``
+- "Quiet" maps to ``quiet``
+- "Optimized" maps to ``balanced``
+- "High performance" maps to ``performance``
+
+The result of the mapping can be printed in the kernel log when the module is
+loaded. Supported profiles can also be retrieved from
+``/sys/firmware/acpi/platform_profile_choices``, while
+``/sys/firmware/acpi/platform_profile`` can be used to read or write the
+currently selected profile.
+
+The ``balanced`` platform profile will be set during module load if no profile
+has been previously set.
+
+.. _battery-charge-control-end-threshold:
+
+Battery charge control end threshold
+====================================
+
+This platform driver will add the ability to set the battery's charge control
+end threshold, but does not have the ability to set a start threshold.
+
+This feature is typically called "Battery Saver" by the various Samsung
+applications in Windows, but in Linux we have implemented the standardized
+"charge control threshold" sysfs interface on the battery device to allow for
+controlling this functionality from the userspace.
+
+The sysfs attribute
+``/sys/class/power_supply/BAT1/charge_control_end_threshold`` can be used to
+read or set the desired charge end threshold.
+
+If you wish to maintain interoperability with the Samsung Settings application
+in Windows, then you should set the value to 100 to represent "off", or enable
+the feature using only one of the following values: 50, 60, 70, 80, or 90.
+Otherwise, the driver will accept any value between 1 and 100 as the percentage
+that you wish the battery to stop charging at.
+
+.. note::
+ Some devices have been observed as automatically "turning off" the charge
+ control end threshold if an input value of less than 30 is given.
+
+.. _firmware-attributes:
+
+Firmware Attributes
+===================
+
+The following enumeration-typed firmware attributes are set up by this driver
+and should be accessible under
+``/sys/class/firmware-attributes/samsung-galaxybook/attributes/`` if your device
+supports them:
+
+- ``power_on_lid_open`` (device should power on when the lid is opened)
+- ``usb_charging`` (USB ports can deliver power to connected devices even when
+ the device is powered off or in a low sleep state)
+- ``block_recording`` (blocks access to camera and microphone)
+
+All of these attributes are simple boolean-like enumeration values which use 0
+to represent "off" and 1 to represent "on". Use the ``current_value`` attribute
+to get or change the setting on the device.
+
+Note that when ``block_recording`` is updated, the input device "Samsung Galaxy
+Book Lens Cover" will receive a ``SW_CAMERA_LENS_COVER`` switch event which
+reflects the current state.
+
+.. _keyboard-hotkey-actions:
+
+Keyboard hotkey actions (i8042 filter)
+======================================
+
+The i8042 filter will swallow the keyboard events for the Fn+F9 hotkey (Multi-
+level keyboard backlight toggle) and Fn+F10 hotkey (Block recording toggle)
+and instead execute their actions within the driver itself.
+
+Fn+F9 will cycle through the brightness levels of the keyboard backlight. A
+notification will be sent using ``led_classdev_notify_brightness_hw_changed``
+so that the userspace can be aware of the change. This mimics the behavior of
+other existing devices where the brightness level is cycled internally by the
+embedded controller and then reported via a notification.
+
+Fn+F10 will toggle the value of the "block recording" setting, which blocks
+or allows usage of the built-in camera and microphone (and generates the same
+Lens Cover switch event mentioned above).
+
+.. _acpi-notifications-and-hotkey-actions:
+
+ACPI notifications and hotkey actions
+=====================================
+
+ACPI notifications will generate ACPI netlink events under the device class
+``samsung-galaxybook`` and bus ID matching the Samsung ACPI device ID found on
+your device. The events can be received using userspace tools such as
+``acpi_listen`` and ``acpid``.
+
+The Fn+F11 Performance mode hotkey will be handled by the driver; each keypress
+will cycle to the next available platform profile.
diff --git a/Documentation/admin-guide/media/cec.rst b/Documentation/admin-guide/media/cec.rst
index 92690e1f2183..b2e7a300494a 100644
--- a/Documentation/admin-guide/media/cec.rst
+++ b/Documentation/admin-guide/media/cec.rst
@@ -451,7 +451,7 @@ configure the CEC devices for HDMI Input and the HDMI Outputs manually.
---------------------
A three character manufacturer name that is used in the EDID for the HDMI
-Input. If not set, then userspace is reponsible for configuring an EDID.
+Input. If not set, then userspace is responsible for configuring an EDID.
If set, then the driver will update the EDID automatically based on the
resolutions supported by the connected displays, and it will not be possible
anymore to manually set the EDID for the HDMI Input.
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
index b9da127c074d..f69d331e3cb1 100644
--- a/Documentation/admin-guide/media/mgb4.rst
+++ b/Documentation/admin-guide/media/mgb4.rst
@@ -22,7 +22,9 @@ Global (PCI card) parameters
| 0 - No module present
| 1 - FPDL3
- | 2 - GMSL
+ | 2 - GMSL (one serializer, two daisy chained deserializers)
+ | 3 - GMSL (one serializer, two deserializers)
+ | 4 - GMSL (two deserializers with two daisy chain outputs)
**module_version** (R):
Module version number. Zero in case of a missing module.
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index a21369eba034..3950583f2b15 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -248,6 +248,20 @@ are the following:
If that frequency cannot be determined, this attribute should not
be present.
+``cpuinfo_avg_freq``
+ An average frequency (in KHz) of all CPUs belonging to a given policy,
+ derived from a hardware provided feedback and reported on a time frame
+ spanning at most few milliseconds.
+
+ This is expected to be based on the frequency the hardware actually runs
+ at and, as such, might require specialised hardware support (such as AMU
+ extension on ARM). If one cannot be determined, this attribute should
+ not be present.
+
+ Note, that failed attempt to retrieve current frequency for a given
+ CPU(s) will result in an appropriate error, i.e: EAGAIN for CPU that
+ remains idle (raised on ARM).
+
``cpuinfo_max_freq``
Maximum possible operating frequency the CPUs belonging to this policy
can run at (in kHz).
@@ -293,7 +307,8 @@ are the following:
Some architectures (e.g. ``x86``) may attempt to provide information
more precisely reflecting the current CPU frequency through this
attribute, but that still may not be the exact current CPU frequency as
- seen by the hardware at the moment.
+ seen by the hardware at the moment. This behavior though, is only
+ available via c:macro:``CPUFREQ_ARCH_CUR_FREQ`` option.
``scaling_driver``
The scaling driver currently in use.
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index eb58d7a5affd..0c090b076224 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -275,20 +275,25 @@ values and, when predicting the idle duration next time, it computes the average
and variance of them. If the variance is small (smaller than 400 square
milliseconds) or it is small relative to the average (the average is greater
that 6 times the standard deviation), the average is regarded as the "typical
-interval" value. Otherwise, the longest of the saved observed idle duration
+interval" value. Otherwise, either the longest or the shortest (depending on
+which one is farther from the average) of the saved observed idle duration
values is discarded and the computation is repeated for the remaining ones.
+
Again, if the variance of them is small (in the above sense), the average is
taken as the "typical interval" value and so on, until either the "typical
-interval" is determined or too many data points are disregarded, in which case
-the "typical interval" is assumed to equal "infinity" (the maximum unsigned
-integer value).
-
-If the "typical interval" computed this way is long enough, the governor obtains
-the time until the closest timer event with the assumption that the scheduler
-tick will be stopped. That time, referred to as the *sleep length* in what follows,
-is the upper bound on the time before the next CPU wakeup. It is used to determine
-the sleep length range, which in turn is needed to get the sleep length correction
-factor.
+interval" is determined or too many data points are disregarded. In the latter
+case, if the size of the set of data points still under consideration is
+sufficiently large, the next idle duration is not likely to be above the largest
+idle duration value still in that set, so that value is taken as the predicted
+next idle duration. Finally, if the set of data points still under
+consideration is too small, no prediction is made.
+
+If the preliminary prediction of the next idle duration computed this way is
+long enough, the governor obtains the time until the closest timer event with
+the assumption that the scheduler tick will be stopped. That time, referred to
+as the *sleep length* in what follows, is the upper bound on the time before the
+next CPU wakeup. It is used to determine the sleep length range, which in turn
+is needed to get the sleep length correction factor.
The ``menu`` governor maintains an array containing several correction factor
values that correspond to different sleep length ranges organized so that each
@@ -302,7 +307,7 @@ to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
The sleep length is multiplied by the correction factor for the range that it
falls into to obtain an approximation of the predicted idle duration that is
compared to the "typical interval" determined previously and the minimum of
-the two is taken as the idle duration prediction.
+the two is taken as the final idle duration prediction.
If the "typical interval" value is small, which means that the CPU is likely
to be woken up soon enough, the sleep length computation is skipped as it may
diff --git a/Documentation/admin-guide/pm/intel_idle.rst b/Documentation/admin-guide/pm/intel_idle.rst
index 39bd6ecce7de..5940528146eb 100644
--- a/Documentation/admin-guide/pm/intel_idle.rst
+++ b/Documentation/admin-guide/pm/intel_idle.rst
@@ -192,11 +192,19 @@ even if they have been enumerated (see :ref:`cpu-pm-qos` in
Documentation/admin-guide/pm/cpuidle.rst).
Setting ``max_cstate`` to 0 causes the ``intel_idle`` initialization to fail.
-The ``no_acpi`` and ``use_acpi`` module parameters (recognized by ``intel_idle``
-if the kernel has been configured with ACPI support) can be set to make the
-driver ignore the system's ACPI tables entirely or use them for all of the
-recognized processor models, respectively (they both are unset by default and
-``use_acpi`` has no effect if ``no_acpi`` is set).
+The ``no_acpi``, ``use_acpi`` and ``no_native`` module parameters are
+recognized by ``intel_idle`` if the kernel has been configured with ACPI
+support. In the case that ACPI is not configured these flags have no impact
+on functionality.
+
+``no_acpi`` - Do not use ACPI at all. Only native mode is available, no
+ACPI mode.
+
+``use_acpi`` - No-op in ACPI mode, the driver will consult ACPI tables for
+C-states on/off status in native mode.
+
+``no_native`` - Work only in ACPI mode, no native mode available (ignore
+all custom tables).
The value of the ``states_off`` module parameter (0 by default) represents a
list of idle states to be disabled by default in the form of a bitmask.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index bf13ad25a32f..78fc83ed2a7e 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -696,6 +696,9 @@ of them have to be prepended with the ``intel_pstate=`` prefix.
Use per-logical-CPU P-State limits (see `Coordination of P-state
Limits`_ for details).
+``no_cas``
+ Do not enable capacity-aware scheduling (CAS) which is enabled by
+ default on hybrid systems.
Diagnostics and Tuning
======================
diff --git a/Documentation/admin-guide/pnp.rst b/Documentation/admin-guide/pnp.rst
index 3eda08191d13..24d80e3eb309 100644
--- a/Documentation/admin-guide/pnp.rst
+++ b/Documentation/admin-guide/pnp.rst
@@ -129,9 +129,6 @@ pnp_put_protocol
pnp_register_protocol
use this to register a new PnP protocol
-pnp_unregister_protocol
- use this function to remove a PnP protocol from the Plug and Play Layer
-
pnp_register_driver
adds a PnP driver to the Plug and Play Layer
diff --git a/Documentation/admin-guide/serial-console.rst b/Documentation/admin-guide/serial-console.rst
index a3dfc2c66e01..1609e7479249 100644
--- a/Documentation/admin-guide/serial-console.rst
+++ b/Documentation/admin-guide/serial-console.rst
@@ -78,7 +78,9 @@ If no console device is specified, the first device found capable of
acting as a system console will be used. At this time, the system
first looks for a VGA card and then for a serial port. So if you don't
have a VGA card in your system the first serial port will automatically
-become the console.
+become the console, unless the kernel is configured with the
+CONFIG_NULL_TTY_DEFAULT_CONSOLE option, then it will default to using the
+ttynull device.
You will need to create a new device to use ``/dev/console``. The official
``/dev/console`` is now character device 5,1.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index a43b78b4b646..dd49a89a62d3 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -212,6 +212,17 @@ pid>/``).
This value defaults to 0.
+core_sort_vma
+=============
+
+The default coredump writes VMAs in address order. By setting
+``core_sort_vma`` to 1, VMAs will be written from smallest size
+to largest size. This is known to break at least elfutils, but
+can be handy when dealing with very large (and truncated)
+coredumps where the more useful debugging details are included
+in the smaller VMAs.
+
+
core_uses_pid
=============
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index 2ed79f41a411..d0502691dfa1 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -28,7 +28,7 @@ should be a userspace tool that handles all the low-level details, keeps
a database of the authorized devices and prompts users for new connections.
More details about the sysfs interface for Thunderbolt devices can be
-found in ``Documentation/ABI/testing/sysfs-bus-thunderbolt``.
+found in Documentation/ABI/testing/sysfs-bus-thunderbolt.
Those users who just want to connect any device without any sort of
manual work can add following line to
diff --git a/Documentation/admin-guide/workload-tracing.rst b/Documentation/admin-guide/workload-tracing.rst
index 6be38c1b9c5b..d6313890ee41 100644
--- a/Documentation/admin-guide/workload-tracing.rst
+++ b/Documentation/admin-guide/workload-tracing.rst
@@ -82,7 +82,7 @@ Install tools to build Linux kernel and tools in kernel repository.
scripts/ver_linux is a good way to check if your system already has
the necessary tools::
- sudo apt-get build-essentials flex bison yacc
+ sudo apt-get install build-essential flex bison yacc
sudo apt install libelf-dev systemtap-sdt-dev libslang2-dev libperl-dev libdw-dev
cscope is a good tool to browse kernel sources. Let's install it now::
diff --git a/Documentation/arch/arm64/amu.rst b/Documentation/arch/arm64/amu.rst
index 01f2de2b0450..ac1b3f0e211d 100644
--- a/Documentation/arch/arm64/amu.rst
+++ b/Documentation/arch/arm64/amu.rst
@@ -80,7 +80,7 @@ bypass the setting of AMUSERENR_EL0 to trap accesses from EL0 (userspace) to
EL1 (kernel). Therefore, firmware should still ensure accesses to AMU registers
are not trapped in EL2/EL3.
-The fixed counters of AMUv1 are accessible though the following system
+The fixed counters of AMUv1 are accessible through the following system
register definitions:
- SYS_AMEVCNTR0_CORE_EL0
diff --git a/Documentation/arch/arm64/asymmetric-32bit.rst b/Documentation/arch/arm64/asymmetric-32bit.rst
index 1ca2b359a907..57b8d7476f71 100644
--- a/Documentation/arch/arm64/asymmetric-32bit.rst
+++ b/Documentation/arch/arm64/asymmetric-32bit.rst
@@ -55,7 +55,7 @@ sysfs
The subset of CPUs capable of running 32-bit tasks is described in
``/sys/devices/system/cpu/aarch32_el0`` and is documented further in
-``Documentation/ABI/testing/sysfs-devices-system-cpu``.
+Documentation/ABI/testing/sysfs-devices-system-cpu.
**Note:** CPUs are advertised by this file as they are detected and so
late-onlining of 32-bit-capable CPUs can result in the file contents
diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst
index cad6fdc96b98..dee7b6de864f 100644
--- a/Documentation/arch/arm64/booting.rst
+++ b/Documentation/arch/arm64/booting.rst
@@ -288,6 +288,12 @@ Before jumping into the kernel, the following conditions must be met:
- SCR_EL3.FGTEn (bit 27) must be initialised to 0b1.
+ For CPUs with the Fine Grained Traps 2 (FEAT_FGT2) extension present:
+
+ - If EL3 is present and the kernel is entered at EL2:
+
+ - SCR_EL3.FGTEn2 (bit 59) must be initialised to 0b1.
+
For CPUs with support for HCRX_EL2 (FEAT_HCX) present:
- If EL3 is present and the kernel is entered at EL2:
@@ -382,6 +388,22 @@ Before jumping into the kernel, the following conditions must be met:
- SMCR_EL2.EZT0 (bit 30) must be initialised to 0b1.
+ For CPUs with the Performance Monitors Extension (FEAT_PMUv3p9):
+
+ - If EL3 is present:
+
+ - MDCR_EL3.EnPM2 (bit 7) must be initialised to 0b1.
+
+ - If the kernel is entered at EL1 and EL2 is present:
+
+ - HDFGRTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1.
+ - HDFGRTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1.
+ - HDFGRTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1.
+
+ - HDFGWTR2_EL2.nPMICNTR_EL0 (bit 2) must be initialised to 0b1.
+ - HDFGWTR2_EL2.nPMICFILTR_EL0 (bit 3) must be initialised to 0b1.
+ - HDFGWTR2_EL2.nPMUACR_EL1 (bit 4) must be initialised to 0b1.
+
For CPUs with Memory Copy and Memory Set instructions (FEAT_MOPS):
- If the kernel is entered at EL1 and EL2 is present:
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index f074f6219f5c..f968c13b46a7 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -284,6 +284,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Rockchip | RK3588 | #3588001 | ROCKCHIP_ERRATUM_3588001 |
+----------------+-----------------+-----------------+-----------------------------+
+| Rockchip | RK3568 | #3568002 | ROCKCHIP_ERRATUM_3568002 |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/powerpc/cxlflash.rst b/Documentation/arch/powerpc/cxlflash.rst
deleted file mode 100644
index e8f488acfa41..000000000000
--- a/Documentation/arch/powerpc/cxlflash.rst
+++ /dev/null
@@ -1,433 +0,0 @@
-================================
-Coherent Accelerator (CXL) Flash
-================================
-
-Introduction
-============
-
- The IBM Power architecture provides support for CAPI (Coherent
- Accelerator Power Interface), which is available to certain PCIe slots
- on Power 8 systems. CAPI can be thought of as a special tunneling
- protocol through PCIe that allow PCIe adapters to look like special
- purpose co-processors which can read or write an application's
- memory and generate page faults. As a result, the host interface to
- an adapter running in CAPI mode does not require the data buffers to
- be mapped to the device's memory (IOMMU bypass) nor does it require
- memory to be pinned.
-
- On Linux, Coherent Accelerator (CXL) kernel services present CAPI
- devices as a PCI device by implementing a virtual PCI host bridge.
- This abstraction simplifies the infrastructure and programming
- model, allowing for drivers to look similar to other native PCI
- device drivers.
-
- CXL provides a mechanism by which user space applications can
- directly talk to a device (network or storage) bypassing the typical
- kernel/device driver stack. The CXL Flash Adapter Driver enables a
- user space application direct access to Flash storage.
-
- The CXL Flash Adapter Driver is a kernel module that sits in the
- SCSI stack as a low level device driver (below the SCSI disk and
- protocol drivers) for the IBM CXL Flash Adapter. This driver is
- responsible for the initialization of the adapter, setting up the
- special path for user space access, and performing error recovery. It
- communicates directly the Flash Accelerator Functional Unit (AFU)
- as described in Documentation/arch/powerpc/cxl.rst.
-
- The cxlflash driver supports two, mutually exclusive, modes of
- operation at the device (LUN) level:
-
- - Any flash device (LUN) can be configured to be accessed as a
- regular disk device (i.e.: /dev/sdc). This is the default mode.
-
- - Any flash device (LUN) can be configured to be accessed from
- user space with a special block library. This mode further
- specifies the means of accessing the device and provides for
- either raw access to the entire LUN (referred to as direct
- or physical LUN access) or access to a kernel/AFU-mediated
- partition of the LUN (referred to as virtual LUN access). The
- segmentation of a disk device into virtual LUNs is assisted
- by special translation services provided by the Flash AFU.
-
-Overview
-========
-
- The Coherent Accelerator Interface Architecture (CAIA) introduces a
- concept of a master context. A master typically has special privileges
- granted to it by the kernel or hypervisor allowing it to perform AFU
- wide management and control. The master may or may not be involved
- directly in each user I/O, but at the minimum is involved in the
- initial setup before the user application is allowed to send requests
- directly to the AFU.
-
- The CXL Flash Adapter Driver establishes a master context with the
- AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
- Adapter Problem Space Memory Map looks like this::
-
- +-------------------------------+
- | 512 * 64 KB User MMIO |
- | (per context) |
- | User Accessible |
- +-------------------------------+
- | 512 * 128 B per context |
- | Provisioning and Control |
- | Trusted Process accessible |
- +-------------------------------+
- | 64 KB Global |
- | Trusted Process accessible |
- +-------------------------------+
-
- This driver configures itself into the SCSI software stack as an
- adapter driver. The driver is the only entity that is considered a
- Trusted Process to program the Provisioning and Control and Global
- areas in the MMIO Space shown above. The master context driver
- discovers all LUNs attached to the CXL Flash adapter and instantiates
- scsi block devices (/dev/sdb, /dev/sdc etc.) for each unique LUN
- seen from each path.
-
- Once these scsi block devices are instantiated, an application
- written to a specification provided by the block library may get
- access to the Flash from user space (without requiring a system call).
-
- This master context driver also provides a series of ioctls for this
- block library to enable this user space access. The driver supports
- two modes for accessing the block device.
-
- The first mode is called a virtual mode. In this mode a single scsi
- block device (/dev/sdb) may be carved up into any number of distinct
- virtual LUNs. The virtual LUNs may be resized as long as the sum of
- the sizes of all the virtual LUNs, along with the meta-data associated
- with it does not exceed the physical capacity.
-
- The second mode is called the physical mode. In this mode a single
- block device (/dev/sdb) may be opened directly by the block library
- and the entire space for the LUN is available to the application.
-
- Only the physical mode provides persistence of the data. i.e. The
- data written to the block device will survive application exit and
- restart and also reboot. The virtual LUNs do not persist (i.e. do
- not survive after the application terminates or the system reboots).
-
-
-Block library API
-=================
-
- Applications intending to get access to the CXL Flash from user
- space should use the block library, as it abstracts the details of
- interfacing directly with the cxlflash driver that are necessary for
- performing administrative actions (i.e.: setup, tear down, resize).
- The block library can be thought of as a 'user' of services,
- implemented as IOCTLs, that are provided by the cxlflash driver
- specifically for devices (LUNs) operating in user space access
- mode. While it is not a requirement that applications understand
- the interface between the block library and the cxlflash driver,
- a high-level overview of each supported service (IOCTL) is provided
- below.
-
- The block library can be found on GitHub:
- http://github.com/open-power/capiflash
-
-
-CXL Flash Driver LUN IOCTLs
-===========================
-
- Users, such as the block library, that wish to interface with a flash
- device (LUN) via user space access need to use the services provided
- by the cxlflash driver. As these services are implemented as ioctls,
- a file descriptor handle must first be obtained in order to establish
- the communication channel between a user and the kernel. This file
- descriptor is obtained by opening the device special file associated
- with the scsi disk device (/dev/sdb) that was created during LUN
- discovery. As per the location of the cxlflash driver within the
- SCSI protocol stack, this open is actually not seen by the cxlflash
- driver. Upon successful open, the user receives a file descriptor
- (herein referred to as fd1) that should be used for issuing the
- subsequent ioctls listed below.
-
- The structure definitions for these IOCTLs are available in:
- uapi/scsi/cxlflash_ioctl.h
-
-DK_CXLFLASH_ATTACH
-------------------
-
- This ioctl obtains, initializes, and starts a context using the CXL
- kernel services. These services specify a context id (u16) by which
- to uniquely identify the context and its allocated resources. The
- services additionally provide a second file descriptor (herein
- referred to as fd2) that is used by the block library to initiate
- memory mapped I/O (via mmap()) to the CXL flash device and poll for
- completion events. This file descriptor is intentionally installed by
- this driver and not the CXL kernel services to allow for intermediary
- notification and access in the event of a non-user-initiated close(),
- such as a killed process. This design point is described in further
- detail in the description for the DK_CXLFLASH_DETACH ioctl.
-
- There are a few important aspects regarding the "tokens" (context id
- and fd2) that are provided back to the user:
-
- - These tokens are only valid for the process under which they
- were created. The child of a forked process cannot continue
- to use the context id or file descriptor created by its parent
- (see DK_CXLFLASH_VLUN_CLONE for further details).
-
- - These tokens are only valid for the lifetime of the context and
- the process under which they were created. Once either is
- destroyed, the tokens are to be considered stale and subsequent
- usage will result in errors.
-
- - A valid adapter file descriptor (fd2 >= 0) is only returned on
- the initial attach for a context. Subsequent attaches to an
- existing context (DK_CXLFLASH_ATTACH_REUSE_CONTEXT flag present)
- do not provide the adapter file descriptor as it was previously
- made known to the application.
-
- - When a context is no longer needed, the user shall detach from
- the context via the DK_CXLFLASH_DETACH ioctl. When this ioctl
- returns with a valid adapter file descriptor and the return flag
- DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
- close the adapter file descriptor following a successful detach.
-
- - When this ioctl returns with a valid fd2 and the return flag
- DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
- close fd2 in the following circumstances:
-
- + Following a successful detach of the last user of the context
- + Following a successful recovery on the context's original fd2
- + In the child process of a fork(), following a clone ioctl,
- on the fd2 associated with the source context
-
- - At any time, a close on fd2 will invalidate the tokens. Applications
- should exercise caution to only close fd2 when appropriate (outlined
- in the previous bullet) to avoid premature loss of I/O.
-
-DK_CXLFLASH_USER_DIRECT
------------------------
- This ioctl is responsible for transitioning the LUN to direct
- (physical) mode access and configuring the AFU for direct access from
- user space on a per-context basis. Additionally, the block size and
- last logical block address (LBA) are returned to the user.
-
- As mentioned previously, when operating in user space access mode,
- LUNs may be accessed in whole or in part. Only one mode is allowed
- at a time and if one mode is active (outstanding references exist),
- requests to use the LUN in a different mode are denied.
-
- The AFU is configured for direct access from user space by adding an
- entry to the AFU's resource handle table. The index of the entry is
- treated as a resource handle that is returned to the user. The user
- is then able to use the handle to reference the LUN during I/O.
-
-DK_CXLFLASH_USER_VIRTUAL
-------------------------
- This ioctl is responsible for transitioning the LUN to virtual mode
- of access and configuring the AFU for virtual access from user space
- on a per-context basis. Additionally, the block size and last logical
- block address (LBA) are returned to the user.
-
- As mentioned previously, when operating in user space access mode,
- LUNs may be accessed in whole or in part. Only one mode is allowed
- at a time and if one mode is active (outstanding references exist),
- requests to use the LUN in a different mode are denied.
-
- The AFU is configured for virtual access from user space by adding
- an entry to the AFU's resource handle table. The index of the entry
- is treated as a resource handle that is returned to the user. The
- user is then able to use the handle to reference the LUN during I/O.
-
- By default, the virtual LUN is created with a size of 0. The user
- would need to use the DK_CXLFLASH_VLUN_RESIZE ioctl to adjust the grow
- the virtual LUN to a desired size. To avoid having to perform this
- resize for the initial creation of the virtual LUN, the user has the
- option of specifying a size as part of the DK_CXLFLASH_USER_VIRTUAL
- ioctl, such that when success is returned to the user, the
- resource handle that is provided is already referencing provisioned
- storage. This is reflected by the last LBA being a non-zero value.
-
- When a LUN is accessible from more than one port, this ioctl will
- return with the DK_CXLFLASH_ALL_PORTS_ACTIVE return flag set. This
- provides the user with a hint that I/O can be retried in the event
- of an I/O error as the LUN can be reached over multiple paths.
-
-DK_CXLFLASH_VLUN_RESIZE
------------------------
- This ioctl is responsible for resizing a previously created virtual
- LUN and will fail if invoked upon a LUN that is not in virtual
- mode. Upon success, an updated last LBA is returned to the user
- indicating the new size of the virtual LUN associated with the
- resource handle.
-
- The partitioning of virtual LUNs is jointly mediated by the cxlflash
- driver and the AFU. An allocation table is kept for each LUN that is
- operating in the virtual mode and used to program a LUN translation
- table that the AFU references when provided with a resource handle.
-
- This ioctl can return -EAGAIN if an AFU sync operation takes too long.
- In addition to returning a failure to user, cxlflash will also schedule
- an asynchronous AFU reset. Should the user choose to retry the operation,
- it is expected to succeed. If this ioctl fails with -EAGAIN, the user
- can either retry the operation or treat it as a failure.
-
-DK_CXLFLASH_RELEASE
--------------------
- This ioctl is responsible for releasing a previously obtained
- reference to either a physical or virtual LUN. This can be
- thought of as the inverse of the DK_CXLFLASH_USER_DIRECT or
- DK_CXLFLASH_USER_VIRTUAL ioctls. Upon success, the resource handle
- is no longer valid and the entry in the resource handle table is
- made available to be used again.
-
- As part of the release process for virtual LUNs, the virtual LUN
- is first resized to 0 to clear out and free the translation tables
- associated with the virtual LUN reference.
-
-DK_CXLFLASH_DETACH
-------------------
- This ioctl is responsible for unregistering a context with the
- cxlflash driver and release outstanding resources that were
- not explicitly released via the DK_CXLFLASH_RELEASE ioctl. Upon
- success, all "tokens" which had been provided to the user from the
- DK_CXLFLASH_ATTACH onward are no longer valid.
-
- When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
- attach, the application _must_ close the fd2 associated with the context
- following the detach of the final user of the context.
-
-DK_CXLFLASH_VLUN_CLONE
-----------------------
- This ioctl is responsible for cloning a previously created
- context to a more recently created context. It exists solely to
- support maintaining user space access to storage after a process
- forks. Upon success, the child process (which invoked the ioctl)
- will have access to the same LUNs via the same resource handle(s)
- as the parent, but under a different context.
-
- Context sharing across processes is not supported with CXL and
- therefore each fork must be met with establishing a new context
- for the child process. This ioctl simplifies the state management
- and playback required by a user in such a scenario. When a process
- forks, child process can clone the parents context by first creating
- a context (via DK_CXLFLASH_ATTACH) and then using this ioctl to
- perform the clone from the parent to the child.
-
- The clone itself is fairly simple. The resource handle and lun
- translation tables are copied from the parent context to the child's
- and then synced with the AFU.
-
- When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
- attach, the application _must_ close the fd2 associated with the source
- context (still resident/accessible in the parent process) following the
- clone. This is to avoid a stale entry in the file descriptor table of the
- child process.
-
- This ioctl can return -EAGAIN if an AFU sync operation takes too long.
- In addition to returning a failure to user, cxlflash will also schedule
- an asynchronous AFU reset. Should the user choose to retry the operation,
- it is expected to succeed. If this ioctl fails with -EAGAIN, the user
- can either retry the operation or treat it as a failure.
-
-DK_CXLFLASH_VERIFY
-------------------
- This ioctl is used to detect various changes such as the capacity of
- the disk changing, the number of LUNs visible changing, etc. In cases
- where the changes affect the application (such as a LUN resize), the
- cxlflash driver will report the changed state to the application.
-
- The user calls in when they want to validate that a LUN hasn't been
- changed in response to a check condition. As the user is operating out
- of band from the kernel, they will see these types of events without
- the kernel's knowledge. When encountered, the user's architected
- behavior is to call in to this ioctl, indicating what they want to
- verify and passing along any appropriate information. For now, only
- verifying a LUN change (ie: size different) with sense data is
- supported.
-
-DK_CXLFLASH_RECOVER_AFU
------------------------
- This ioctl is used to drive recovery (if such an action is warranted)
- of a specified user context. Any state associated with the user context
- is re-established upon successful recovery.
-
- User contexts are put into an error condition when the device needs to
- be reset or is terminating. Users are notified of this error condition
- by seeing all 0xF's on an MMIO read. Upon encountering this, the
- architected behavior for a user is to call into this ioctl to recover
- their context. A user may also call into this ioctl at any time to
- check if the device is operating normally. If a failure is returned
- from this ioctl, the user is expected to gracefully clean up their
- context via release/detach ioctls. Until they do, the context they
- hold is not relinquished. The user may also optionally exit the process
- at which time the context/resources they held will be freed as part of
- the release fop.
-
- When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
- attach, the application _must_ unmap and close the fd2 associated with the
- original context following this ioctl returning success and indicating that
- the context was recovered (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET).
-
-DK_CXLFLASH_MANAGE_LUN
-----------------------
- This ioctl is used to switch a LUN from a mode where it is available
- for file-system access (legacy), to a mode where it is set aside for
- exclusive user space access (superpipe). In case a LUN is visible
- across multiple ports and adapters, this ioctl is used to uniquely
- identify each LUN by its World Wide Node Name (WWNN).
-
-
-CXL Flash Driver Host IOCTLs
-============================
-
- Each host adapter instance that is supported by the cxlflash driver
- has a special character device associated with it to enable a set of
- host management function. These character devices are hosted in a
- class dedicated for cxlflash and can be accessed via `/dev/cxlflash/*`.
-
- Applications can be written to perform various functions using the
- host ioctl APIs below.
-
- The structure definitions for these IOCTLs are available in:
- uapi/scsi/cxlflash_ioctl.h
-
-HT_CXLFLASH_LUN_PROVISION
--------------------------
- This ioctl is used to create and delete persistent LUNs on cxlflash
- devices that lack an external LUN management interface. It is only
- valid when used with AFUs that support the LUN provision capability.
-
- When sufficient space is available, LUNs can be created by specifying
- the target port to host the LUN and a desired size in 4K blocks. Upon
- success, the LUN ID and WWID of the created LUN will be returned and
- the SCSI bus can be scanned to detect the change in LUN topology. Note
- that partial allocations are not supported. Should a creation fail due
- to a space issue, the target port can be queried for its current LUN
- geometry.
-
- To remove a LUN, the device must first be disassociated from the Linux
- SCSI subsystem. The LUN deletion can then be initiated by specifying a
- target port and LUN ID. Upon success, the LUN geometry associated with
- the port will be updated to reflect new number of provisioned LUNs and
- available capacity.
-
- To query the LUN geometry of a port, the target port is specified and
- upon success, the following information is presented:
-
- - Maximum number of provisioned LUNs allowed for the port
- - Current number of provisioned LUNs for the port
- - Maximum total capacity of provisioned LUNs for the port (4K blocks)
- - Current total capacity of provisioned LUNs for the port (4K blocks)
-
- With this information, the number of available LUNs and capacity can be
- can be calculated.
-
-HT_CXLFLASH_AFU_DEBUG
----------------------
- This ioctl is used to debug AFUs by supporting a command pass-through
- interface. It is only valid when used with AFUs that support the AFU
- debug capability.
-
- With exception of buffer management, AFU debug commands are opaque to
- cxlflash and treated as pass-through. For debug commands that do require
- data transfer, the user supplies an adequately sized data buffer and must
- specify the data transfer direction with respect to the host. There is a
- maximum transfer size of 256K imposed. Note that partial read completions
- are not supported - when errors are experienced with a host read data
- transfer, the data buffer is not copied back to the user.
diff --git a/Documentation/arch/powerpc/firmware-assisted-dump.rst b/Documentation/arch/powerpc/firmware-assisted-dump.rst
index 7e37aadd1f77..7e266e749cd5 100644
--- a/Documentation/arch/powerpc/firmware-assisted-dump.rst
+++ b/Documentation/arch/powerpc/firmware-assisted-dump.rst
@@ -120,6 +120,28 @@ to ensure that crash data is preserved to process later.
e.g.
# echo 1 > /sys/firmware/opal/mpipl/release_core
+-- Support for Additional Kernel Arguments in Fadump
+ Fadump has a feature that allows passing additional kernel arguments
+ to the fadump kernel. This feature was primarily designed to disable
+ kernel functionalities that are not required for the fadump kernel
+ and to reduce its memory footprint while collecting the dump.
+
+ Command to Add Additional Kernel Parameters to Fadump:
+ e.g.
+ # echo "nr_cpus=16" > /sys/kernel/fadump/bootargs_append
+
+ The above command is sufficient to add additional arguments to fadump.
+ An explicit service restart is not required.
+
+ Command to Retrieve the Additional Fadump Arguments:
+ e.g.
+ # cat /sys/kernel/fadump/bootargs_append
+
+Note: Additional kernel arguments for fadump with HASH MMU is only
+ supported if the RMA size is greater than 768 MB. If the RMA
+ size is less than 768 MB, the kernel does not export the
+ /sys/kernel/fadump/bootargs_append sysfs node.
+
Implementation details:
-----------------------
diff --git a/Documentation/arch/powerpc/index.rst b/Documentation/arch/powerpc/index.rst
index 9749f6dc258f..995268530f21 100644
--- a/Documentation/arch/powerpc/index.rst
+++ b/Documentation/arch/powerpc/index.rst
@@ -13,7 +13,6 @@ powerpc
cpu_families
cpu_features
cxl
- cxlflash
dawr-power9
dexcr
dscr
diff --git a/Documentation/arch/powerpc/papr_hcalls.rst b/Documentation/arch/powerpc/papr_hcalls.rst
index 80d2c0aadab5..805e1cb9bab9 100644
--- a/Documentation/arch/powerpc/papr_hcalls.rst
+++ b/Documentation/arch/powerpc/papr_hcalls.rst
@@ -289,6 +289,17 @@ to be issued multiple times in order to be completely serviced. The
subsequent hcalls to the hypervisor until the hcall is completely serviced
at which point H_SUCCESS or other error is returned by the hypervisor.
+**H_HTM**
+
+| Input: flags, target, operation (op), op-param1, op-param2, op-param3
+| Out: *dumphtmbufferdata*
+| Return Value: *H_Success,H_Busy,H_LongBusyOrder,H_Partial,H_Parameter,
+ H_P2,H_P3,H_P4,H_P5,H_P6,H_State,H_Not_Available,H_Authority*
+
+H_HTM supports setup, configuration, control and dumping of Hardware Trace
+Macro (HTM) function and its data. HTM buffer stores tracing data for functions
+like core instruction, core LLAT and nest.
+
References
==========
.. [1] "Power Architecture Platform Reference"
diff --git a/Documentation/arch/x86/boot.rst b/Documentation/arch/x86/boot.rst
index 76f53d3450e7..77e6163288db 100644
--- a/Documentation/arch/x86/boot.rst
+++ b/Documentation/arch/x86/boot.rst
@@ -1038,16 +1038,6 @@ Offset/size: 0x000c/4
This field contains maximal allowed type for setup_data and setup_indirect structs.
-The Image Checksum
-==================
-
-From boot protocol version 2.08 onwards the CRC-32 is calculated over
-the entire file using the characteristic polynomial 0x04C11DB7 and an
-initial remainder of 0xffffffff. The checksum is appended to the
-file; therefore the CRC of the file up to the limit specified in the
-syssize field of the header is always 0.
-
-
The Kernel Command Line
=======================
diff --git a/Documentation/arch/x86/usb-legacy-support.rst b/Documentation/arch/x86/usb-legacy-support.rst
index e01c08b7c981..b17bf122270a 100644
--- a/Documentation/arch/x86/usb-legacy-support.rst
+++ b/Documentation/arch/x86/usb-legacy-support.rst
@@ -20,11 +20,7 @@ It has several drawbacks, though:
features (wheel, extra buttons, touchpad mode) of the real PS/2 mouse may
not be available.
-2) If CONFIG_HIGHMEM64G is enabled, the PS/2 mouse emulation can cause
- system crashes, because the SMM BIOS is not expecting to be in PAE mode.
- The Intel E7505 is a typical machine where this happens.
-
-3) If AMD64 64-bit mode is enabled, again system crashes often happen,
+2) If AMD64 64-bit mode is enabled, again system crashes often happen,
because the SMM BIOS isn't expecting the CPU to be in 64-bit mode. The
BIOS manufacturers only test with Windows, and Windows doesn't do 64-bit
yet.
@@ -38,11 +34,6 @@ Problem 1)
compiled-in, too.
Problem 2)
- can currently only be solved by either disabling HIGHMEM64G
- in the kernel config or USB Legacy support in the BIOS. A BIOS update
- could help, but so far no such update exists.
-
-Problem 3)
is usually fixed by a BIOS update. Check the board
manufacturers web site. If an update is not available, disable USB
Legacy support in the BIOS. If this alone doesn't help, try also adding
diff --git a/Documentation/block/inline-encryption.rst b/Documentation/block/inline-encryption.rst
index 90b733422ed4..6380e6ab492b 100644
--- a/Documentation/block/inline-encryption.rst
+++ b/Documentation/block/inline-encryption.rst
@@ -77,10 +77,10 @@ Basic design
============
We introduce ``struct blk_crypto_key`` to represent an inline encryption key and
-how it will be used. This includes the actual bytes of the key; the size of the
-key; the algorithm and data unit size the key will be used with; and the number
-of bytes needed to represent the maximum data unit number the key will be used
-with.
+how it will be used. This includes the type of the key (raw or
+hardware-wrapped); the actual bytes of the key; the size of the key; the
+algorithm and data unit size the key will be used with; and the number of bytes
+needed to represent the maximum data unit number the key will be used with.
We introduce ``struct bio_crypt_ctx`` to represent an encryption context. It
contains a data unit number and a pointer to a blk_crypto_key. We add pointers
@@ -301,3 +301,250 @@ kernel will pretend that the device does not support hardware inline encryption
When the crypto API fallback is enabled, this means that all bios with and
encryption context will use the fallback, and IO will complete as usual. When
the fallback is disabled, a bio with an encryption context will be failed.
+
+.. _hardware_wrapped_keys:
+
+Hardware-wrapped keys
+=====================
+
+Motivation and threat model
+---------------------------
+
+Linux storage encryption (dm-crypt, fscrypt, eCryptfs, etc.) traditionally
+relies on the raw encryption key(s) being present in kernel memory so that the
+encryption can be performed. This traditionally isn't seen as a problem because
+the key(s) won't be present during an offline attack, which is the main type of
+attack that storage encryption is intended to protect from.
+
+However, there is an increasing desire to also protect users' data from other
+types of attacks (to the extent possible), including:
+
+- Cold boot attacks, where an attacker with physical access to a system suddenly
+ powers it off, then immediately dumps the system memory to extract recently
+ in-use encryption keys, then uses these keys to decrypt user data on-disk.
+
+- Online attacks where the attacker is able to read kernel memory without fully
+ compromising the system, followed by an offline attack where any extracted
+ keys can be used to decrypt user data on-disk. An example of such an online
+ attack would be if the attacker is able to run some code on the system that
+ exploits a Meltdown-like vulnerability but is unable to escalate privileges.
+
+- Online attacks where the attacker fully compromises the system, but their data
+ exfiltration is significantly time-limited and/or bandwidth-limited, so in
+ order to completely exfiltrate the data they need to extract the encryption
+ keys to use in a later offline attack.
+
+Hardware-wrapped keys are a feature of inline encryption hardware that is
+designed to protect users' data from the above attacks (to the extent possible),
+without introducing limitations such as a maximum number of keys.
+
+Note that it is impossible to **fully** protect users' data from these attacks.
+Even in the attacks where the attacker "just" gets read access to kernel memory,
+they can still extract any user data that is present in memory, including
+plaintext pagecache pages of encrypted files. The focus here is just on
+protecting the encryption keys, as those instantly give access to **all** user
+data in any following offline attack, rather than just some of it (where which
+data is included in that "some" might not be controlled by the attacker).
+
+Solution overview
+-----------------
+
+Inline encryption hardware typically has "keyslots" into which software can
+program keys for the hardware to use; the contents of keyslots typically can't
+be read back by software. As such, the above security goals could be achieved
+if the kernel simply erased its copy of the key(s) after programming them into
+keyslot(s) and thereafter only referred to them via keyslot number.
+
+However, that naive approach runs into a couple problems:
+
+- It limits the number of unlocked keys to the number of keyslots, which
+ typically is a small number. In cases where there is only one encryption key
+ system-wide (e.g., a full-disk encryption key), that can be tolerable.
+ However, in general there can be many logged-in users with many different
+ keys, and/or many running applications with application-specific encrypted
+ storage areas. This is especially true if file-based encryption (e.g.
+ fscrypt) is being used.
+
+- Inline crypto engines typically lose the contents of their keyslots if the
+ storage controller (usually UFS or eMMC) is reset. Resetting the storage
+ controller is a standard error recovery procedure that is executed if certain
+ types of storage errors occur, and such errors can occur at any time.
+ Therefore, when inline crypto is being used, the operating system must always
+ be ready to reprogram the keyslots without user intervention.
+
+Thus, it is important for the kernel to still have a way to "remind" the
+hardware about a key, without actually having the raw key itself.
+
+Somewhat less importantly, it is also desirable that the raw keys are never
+visible to software at all, even while being initially unlocked. This would
+ensure that a read-only compromise of system memory will never allow a key to be
+extracted to be used off-system, even if it occurs when a key is being unlocked.
+
+To solve all these problems, some vendors of inline encryption hardware have
+made their hardware support *hardware-wrapped keys*. Hardware-wrapped keys
+are encrypted keys that can only be unwrapped (decrypted) and used by hardware
+-- either by the inline encryption hardware itself, or by a dedicated hardware
+block that can directly provision keys to the inline encryption hardware.
+
+(We refer to them as "hardware-wrapped keys" rather than simply "wrapped keys"
+to add some clarity in cases where there could be other types of wrapped keys,
+such as in file-based encryption. Key wrapping is a commonly used technique.)
+
+The key which wraps (encrypts) hardware-wrapped keys is a hardware-internal key
+that is never exposed to software; it is either a persistent key (a "long-term
+wrapping key") or a per-boot key (an "ephemeral wrapping key"). The long-term
+wrapped form of the key is what is initially unlocked, but it is erased from
+memory as soon as it is converted into an ephemerally-wrapped key. In-use
+hardware-wrapped keys are always ephemerally-wrapped, not long-term wrapped.
+
+As inline encryption hardware can only be used to encrypt/decrypt data on-disk,
+the hardware also includes a level of indirection; it doesn't use the unwrapped
+key directly for inline encryption, but rather derives both an inline encryption
+key and a "software secret" from it. Software can use the "software secret" for
+tasks that can't use the inline encryption hardware, such as filenames
+encryption. The software secret is not protected from memory compromise.
+
+Key hierarchy
+-------------
+
+Here is the key hierarchy for a hardware-wrapped key::
+
+ Hardware-wrapped key
+ |
+ |
+ <Hardware KDF>
+ |
+ -----------------------------
+ | |
+ Inline encryption key Software secret
+
+The components are:
+
+- *Hardware-wrapped key*: a key for the hardware's KDF (Key Derivation
+ Function), in ephemerally-wrapped form. The key wrapping algorithm is a
+ hardware implementation detail that doesn't impact kernel operation, but a
+ strong authenticated encryption algorithm such as AES-256-GCM is recommended.
+
+- *Hardware KDF*: a KDF (Key Derivation Function) which the hardware uses to
+ derive subkeys after unwrapping the wrapped key. The hardware's choice of KDF
+ doesn't impact kernel operation, but it does need to be known for testing
+ purposes, and it's also assumed to have at least a 256-bit security strength.
+ All known hardware uses the SP800-108 KDF in Counter Mode with AES-256-CMAC,
+ with a particular choice of labels and contexts; new hardware should use this
+ already-vetted KDF.
+
+- *Inline encryption key*: a derived key which the hardware directly provisions
+ to a keyslot of the inline encryption hardware, without exposing it to
+ software. In all known hardware, this will always be an AES-256-XTS key.
+ However, in principle other encryption algorithms could be supported too.
+ Hardware must derive distinct subkeys for each supported encryption algorithm.
+
+- *Software secret*: a derived key which the hardware returns to software so
+ that software can use it for cryptographic tasks that can't use inline
+ encryption. This value is cryptographically isolated from the inline
+ encryption key, i.e. knowing one doesn't reveal the other. (The KDF ensures
+ this.) Currently, the software secret is always 32 bytes and thus is suitable
+ for cryptographic applications that require up to a 256-bit security strength.
+ Some use cases (e.g. full-disk encryption) won't require the software secret.
+
+Example: in the case of fscrypt, the fscrypt master key (the key that protects a
+particular set of encrypted directories) is made hardware-wrapped. The inline
+encryption key is used as the file contents encryption key, while the software
+secret (rather than the master key directly) is used to key fscrypt's KDF
+(HKDF-SHA512) to derive other subkeys such as filenames encryption keys.
+
+Note that currently this design assumes a single inline encryption key per
+hardware-wrapped key, without any further key derivation. Thus, in the case of
+fscrypt, currently hardware-wrapped keys are only compatible with the "inline
+encryption optimized" settings, which use one file contents encryption key per
+encryption policy rather than one per file. This design could be extended to
+make the hardware derive per-file keys using per-file nonces passed down the
+storage stack, and in fact some hardware already supports this; future work is
+planned to remove this limitation by adding the corresponding kernel support.
+
+Kernel support
+--------------
+
+The inline encryption support of the kernel's block layer ("blk-crypto") has
+been extended to support hardware-wrapped keys as an alternative to raw keys,
+when hardware support is available. This works in the following way:
+
+- A ``key_types_supported`` field is added to the crypto capabilities in
+ ``struct blk_crypto_profile``. This allows device drivers to declare that
+ they support raw keys, hardware-wrapped keys, or both.
+
+- ``struct blk_crypto_key`` can now contain a hardware-wrapped key as an
+ alternative to a raw key; a ``key_type`` field is added to
+ ``struct blk_crypto_config`` to distinguish between the different key types.
+ This allows users of blk-crypto to en/decrypt data using a hardware-wrapped
+ key in a way very similar to using a raw key.
+
+- A new method ``blk_crypto_ll_ops::derive_sw_secret`` is added. Device drivers
+ that support hardware-wrapped keys must implement this method. Users of
+ blk-crypto can call ``blk_crypto_derive_sw_secret()`` to access this method.
+
+- The programming and eviction of hardware-wrapped keys happens via
+ ``blk_crypto_ll_ops::keyslot_program`` and
+ ``blk_crypto_ll_ops::keyslot_evict``, just like it does for raw keys. If a
+ driver supports hardware-wrapped keys, then it must handle hardware-wrapped
+ keys being passed to these methods.
+
+blk-crypto-fallback doesn't support hardware-wrapped keys. Therefore,
+hardware-wrapped keys can only be used with actual inline encryption hardware.
+
+All the above deals with hardware-wrapped keys in ephemerally-wrapped form only.
+To get such keys in the first place, new block device ioctls have been added to
+provide a generic interface to creating and preparing such keys:
+
+- ``BLKCRYPTOIMPORTKEY`` converts a raw key to long-term wrapped form. It takes
+ in a pointer to a ``struct blk_crypto_import_key_arg``. The caller must set
+ ``raw_key_ptr`` and ``raw_key_size`` to the pointer and size (in bytes) of the
+ raw key to import. On success, ``BLKCRYPTOIMPORTKEY`` returns 0 and writes
+ the resulting long-term wrapped key blob to the buffer pointed to by
+ ``lt_key_ptr``, which is of maximum size ``lt_key_size``. It also updates
+ ``lt_key_size`` to be the actual size of the key. On failure, it returns -1
+ and sets errno. An errno of ``EOPNOTSUPP`` indicates that the block device
+ does not support hardware-wrapped keys. An errno of ``EOVERFLOW`` indicates
+ that the output buffer did not have enough space for the key blob.
+
+- ``BLKCRYPTOGENERATEKEY`` is like ``BLKCRYPTOIMPORTKEY``, but it has the
+ hardware generate the key instead of importing one. It takes in a pointer to
+ a ``struct blk_crypto_generate_key_arg``.
+
+- ``BLKCRYPTOPREPAREKEY`` converts a key from long-term wrapped form to
+ ephemerally-wrapped form. It takes in a pointer to a ``struct
+ blk_crypto_prepare_key_arg``. The caller must set ``lt_key_ptr`` and
+ ``lt_key_size`` to the pointer and size (in bytes) of the long-term wrapped
+ key blob to convert. On success, ``BLKCRYPTOPREPAREKEY`` returns 0 and writes
+ the resulting ephemerally-wrapped key blob to the buffer pointed to by
+ ``eph_key_ptr``, which is of maximum size ``eph_key_size``. It also updates
+ ``eph_key_size`` to be the actual size of the key. On failure, it returns -1
+ and sets errno. Errno values of ``EOPNOTSUPP`` and ``EOVERFLOW`` mean the
+ same as they do for ``BLKCRYPTOIMPORTKEY``. An errno of ``EBADMSG`` indicates
+ that the long-term wrapped key is invalid.
+
+Userspace needs to use either ``BLKCRYPTOIMPORTKEY`` or ``BLKCRYPTOGENERATEKEY``
+once to create a key, and then ``BLKCRYPTOPREPAREKEY`` each time the key is
+unlocked and added to the kernel. Note that these ioctls have no relevance for
+raw keys; they are only for hardware-wrapped keys.
+
+Testability
+-----------
+
+Both the hardware KDF and the inline encryption itself are well-defined
+algorithms that don't depend on any secrets other than the unwrapped key.
+Therefore, if the unwrapped key is known to software, these algorithms can be
+reproduced in software in order to verify the ciphertext that is written to disk
+by the inline encryption hardware.
+
+However, the unwrapped key will only be known to software for testing if the
+"import" functionality is used. Proper testing is not possible in the
+"generate" case where the hardware generates the key itself. The correct
+operation of the "generate" mode thus relies on the security and correctness of
+the hardware RNG and its use to generate the key, as well as the testing of the
+"import" mode as that should cover all parts other than the key generation.
+
+For an example of a test that verifies the ciphertext written to disk in the
+"import" mode, see the fscrypt hardware-wrapped key tests in xfstests, or
+`Android's vts_kernel_encryption_test
+<https://android.googlesource.com/platform/test/vts-testcase/kernel/+/refs/heads/main/encryption/>`_.
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 0c2205d536b3..3dad1f90b098 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -47,7 +47,7 @@ from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '2.4.4'
+needs_sphinx = '3.4.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst
index 683bc6d09f00..9f57766581df 100644
--- a/Documentation/core-api/min_heap.rst
+++ b/Documentation/core-api/min_heap.rst
@@ -47,8 +47,8 @@ Example:
#define MIN_HEAP_PREALLOCATED(_type, _name, _nr)
struct _name {
- int nr; /* Number of elements in the heap */
- int size; /* Maximum number of elements that can be held */
+ size_t nr; /* Number of elements in the heap */
+ size_t size; /* Maximum number of elements that can be held */
_type *data; /* Pointer to the heap data */
_type preallocated[_nr]; /* Static preallocated array */
}
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index ecccc0473da9..4bdc394e86af 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -661,7 +661,7 @@ Do *not* use it from C.
Thanks
======
-If you add other %p extensions, please extend <lib/test_printf.c> with
-one or more test cases, if at all feasible.
+If you add other %p extensions, please extend <lib/tests/printf_kunit.c>
+with one or more test cases, if at all feasible.
Thank you for your cooperation and attention.
diff --git a/Documentation/core-api/this_cpu_ops.rst b/Documentation/core-api/this_cpu_ops.rst
index 91acbcf30e9b..533ac5dd5750 100644
--- a/Documentation/core-api/this_cpu_ops.rst
+++ b/Documentation/core-api/this_cpu_ops.rst
@@ -138,12 +138,22 @@ get_cpu/put_cpu sequence requires. No processor number is
available. Instead, the offset of the local per cpu area is simply
added to the per cpu offset.
-Note that this operation is usually used in a code segment when
-preemption has been disabled. The pointer is then used to
-access local per cpu data in a critical section. When preemption
-is re-enabled this pointer is usually no longer useful since it may
-no longer point to per cpu data of the current processor.
-
+Note that this operation can only be used in code segments where
+smp_processor_id() may be used, for example, where preemption has been
+disabled. The pointer is then used to access local per cpu data in a
+critical section. When preemption is re-enabled this pointer is usually
+no longer useful since it may no longer point to per cpu data of the
+current processor.
+
+The special cases where it makes sense to obtain a per-CPU pointer in
+preemptible code are addressed by raw_cpu_ptr(), but such use cases need
+to handle cases where two different CPUs are accessing the same per cpu
+variable, which might well be that of a third CPU. These use cases are
+typically performance optimizations. For example, SRCU implements a pair
+of counters as a pair of per-CPU variables, and rcu_read_lock_nmisafe()
+uses raw_cpu_ptr() to get a pointer to some CPU's counter, and uses
+atomic_inc_long() to handle migration between the raw_cpu_ptr() and
+the atomic_inc_long().
Per cpu variables and offsets
-----------------------------
diff --git a/Documentation/dev-tools/kcsan.rst b/Documentation/dev-tools/kcsan.rst
index d81c42d1063e..8575178aa87f 100644
--- a/Documentation/dev-tools/kcsan.rst
+++ b/Documentation/dev-tools/kcsan.rst
@@ -203,7 +203,7 @@ they happen concurrently in different threads, and at least one of them is a
least one is a write. For a more thorough discussion and definition, see `"Plain
Accesses and Data Races" in the LKMM`_.
-.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt#n1922
+.. _"Plain Accesses and Data Races" in the LKMM: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/memory-model/Documentation/explanation.txt?id=8f6629c004b193d23612641c3607e785819e97ab#n2164
Relationship with the Linux-Kernel Memory Consistency Model (LKMM)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index fdb1df86783a..18c2da67fae4 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -347,7 +347,7 @@ kselftest. We use kselftests for lib/ as an example.
1. Create the test module
2. Create the test script that will run (load/unload) the module
- e.g. ``tools/testing/selftests/lib/printf.sh``
+ e.g. ``tools/testing/selftests/lib/bitmap.sh``
3. Add line to config file e.g. ``tools/testing/selftests/lib/config``
diff --git a/Documentation/devicetree/bindings/arm/apple.yaml b/Documentation/devicetree/bindings/arm/apple.yaml
index dc9aab19ff11..da60e9de1cfb 100644
--- a/Documentation/devicetree/bindings/arm/apple.yaml
+++ b/Documentation/devicetree/bindings/arm/apple.yaml
@@ -57,6 +57,25 @@ description: |
- iPad Pro (2nd Generation) (10.5 Inch)
- iPad Pro (2nd Generation) (12.9 Inch)
+ Devices based on the "T2" SoC:
+
+ - Apple T2 MacBookPro15,2 (j132)
+ - Apple T2 iMacPro1,1 (j137)
+ - Apple T2 MacBookAir8,2 (j140a)
+ - Apple T2 MacBookAir8,1 (j140k)
+ - Apple T2 MacBookPro16,1 (j152f)
+ - Apple T2 MacPro7,1 (j160)
+ - Apple T2 Macmini8,1 (j174)
+ - Apple T2 iMac20,1 (j185)
+ - Apple T2 iMac20,2 (j185f)
+ - Apple T2 MacBookPro15,4 (j213)
+ - Apple T2 MacBookPro16,2 (j214k)
+ - Apple T2 MacBookPro16,4 (j215)
+ - Apple T2 MacBookPro16,3 (j223)
+ - Apple T2 MacBookAir9,1 (j230k)
+ - Apple T2 MacBookPro15,1 (j680)
+ - Apple T2 MacBookPro15,3 (j780)
+
Devices based on the "A11" SoC:
- iPhone 8
@@ -211,6 +230,28 @@ properties:
- const: apple,t8011
- const: apple,arm-platform
+ - description: Apple T2 SoC based platforms
+ items:
+ - enum:
+ - apple,j132 # Apple T2 MacBookPro15,2 (j132)
+ - apple,j137 # Apple T2 iMacPro1,1 (j137)
+ - apple,j140a # Apple T2 MacBookAir8,2 (j140a)
+ - apple,j140k # Apple T2 MacBookAir8,1 (j140k)
+ - apple,j152f # Apple T2 MacBookPro16,1 (j152f)
+ - apple,j160 # Apple T2 MacPro7,1 (j160)
+ - apple,j174 # Apple T2 Macmini8,1 (j174)
+ - apple,j185 # Apple T2 iMac20,1 (j185)
+ - apple,j185f # Apple T2 iMac20,2 (j185f)
+ - apple,j213 # Apple T2 MacBookPro15,4 (j213)
+ - apple,j214k # Apple T2 MacBookPro16,2 (j214k)
+ - apple,j215 # Apple T2 MacBookPro16,4 (j215)
+ - apple,j223 # Apple T2 MacBookPro16,3 (j223)
+ - apple,j230k # Apple T2 MacBookAir9,1 (j230k)
+ - apple,j680 # Apple T2 MacBookPro15,1 (j680)
+ - apple,j780 # Apple T2 MacBookPro15,3 (j780)
+ - const: apple,t8012
+ - const: apple,arm-platform
+
- description: Apple A11 SoC based platforms
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml b/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
index 673277a7a224..5001f4d5a0dc 100644
--- a/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
+++ b/Documentation/devicetree/bindings/arm/apple/apple,pmgr.yaml
@@ -22,6 +22,11 @@ properties:
compatible:
items:
- enum:
+ - apple,s5l8960x-pmgr
+ - apple,t7000-pmgr
+ - apple,s8000-pmgr
+ - apple,t8010-pmgr
+ - apple,t8015-pmgr
- apple,t8103-pmgr
- apple,t8112-pmgr
- apple,t6000-pmgr
diff --git a/Documentation/devicetree/bindings/arm/arm,morello.yaml b/Documentation/devicetree/bindings/arm/arm,morello.yaml
new file mode 100644
index 000000000000..e843b97fa485
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm,morello.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/arm,morello.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM Morello Platforms
+
+maintainers:
+ - Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+description: |+
+ The Morello architecture is an experimental extension to Armv8.2-A,
+ which extends the AArch64 state with the principles proposed in
+ version 7 of the Capability Hardware Enhanced RISC Instructions
+ (CHERI) ISA.
+
+ ARM's Morello Platforms are built as a research project to explore
+ capability architectures based on arm.
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+ - description: Arm Morello System Platforms
+ items:
+ - enum:
+ - arm,morello-sdp
+ - arm,morello-fvp
+ - const: arm,morello
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.yaml b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
index 0ec29366e6c2..3a34b7a2e8d4 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.yaml
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
@@ -23,8 +23,6 @@ properties:
- const: atmel,at91rm9200
- items:
- enum:
- - olimex,sam9-l9260
- - enum:
- atmel,at91sam9260
- atmel,at91sam9261
- atmel,at91sam9263
@@ -36,6 +34,37 @@ properties:
- atmel,at91sam9x60
- const: atmel,at91sam9
+ - description: Olimex SAM9-L9260
+ items:
+ - const: olimex,sam9-l9260
+ - const: atmel,at91sam9260
+ - const: atmel,at91sam9
+
+ - description: Calao USB A9260
+ items:
+ - const: calao,usb-a9260
+ - const: atmel,at91sam9260
+ - const: atmel,at91sam9
+
+ - description: Calao USB A9263
+ items:
+ - const: calao,usb-a9263
+ - const: atmel,at91sam9263
+ - const: atmel,at91sam9
+
+ - description: Calao USB A9G20
+ items:
+ - const: calao,usb-a9g20
+ - const: atmel,at91sam9g20
+ - const: atmel,at91sam9
+
+ - description: Calao USB A9G20-LPW
+ items:
+ - const: calao,usb-a9g20-lpw
+ - const: calao,usb-a9g20
+ - const: atmel,at91sam9g20
+ - const: atmel,at91sam9
+
- items:
- enum:
- overkiz,kizboxmini-base # Overkiz kizbox Mini Base Board
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index 1a173e92bb13..d3821f651e72 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -2,6 +2,7 @@ Atmel system registers
Chipid required properties:
- compatible: Should be "atmel,sama5d2-chipid" or "microchip,sama7g5-chipid"
+ "microchip,sama7d65-chipid"
- reg : Should contain registers location and length
PIT Timer required properties:
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 73dd73d2d4fa..2e666b2a4dcd 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -177,6 +177,7 @@ properties:
- arm,neoverse-v2
- arm,neoverse-v3
- arm,neoverse-v3ae
+ - arm,rainier
- brcm,brahma-b15
- brcm,brahma-b53
- brcm,vulcan
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 0db2cbd7891f..1b90870958a2 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -97,6 +97,7 @@ properties:
- i2se,duckbill
- i2se,duckbill-2
- karo,tx28 # Ka-Ro electronics TX28 module
+ - lwn,imx28-btt3
- lwn,imx28-xea
- msr,m28cu3 # M28 SoM with custom base board
- schulercontrol,imx28-sps1
@@ -296,7 +297,6 @@ properties:
- technexion,imx6q-pico-pi # TechNexion i.MX6Q Pico-Pi
- technologic,imx6q-ts4900
- technologic,imx6q-ts7970
- - toradex,apalis_imx6q # Apalis iMX6 Modules
- udoo,imx6q-udoo # Udoo i.MX6 Quad Board
- uniwest,imx6q-evi # Uniwest Evi
- variscite,dt6customboard
@@ -490,7 +490,6 @@ properties:
- technexion,imx6dl-pico-pi # TechNexion i.MX6DL Pico-Pi
- technologic,imx6dl-ts4900
- technologic,imx6dl-ts7970
- - toradex,colibri_imx6dl # Colibri iMX6 Modules
- udoo,imx6dl-udoo # Udoo i.MX6 Dual-lite Board
- vdl,lanmcu # Van der Laan LANMCU board
- wand,imx6dl-wandboard # Wandboard i.MX6 Dual Lite Board
@@ -688,6 +687,12 @@ properties:
- const: phytec,imx6ul-pcl063 # PHYTEC phyCORE-i.MX 6UL
- const: fsl,imx6ul
+ - description: i.MX6UL Variscite VAR-SOM-MX6 Boards
+ items:
+ - const: variscite,mx6ulconcerto
+ - const: variscite,var-som-imx6ul
+ - const: fsl,imx6ul
+
- description: Kontron BL i.MX6UL (N631X S) Board
items:
- const: kontron,bl-imx6ul # Kontron BL i.MX6UL Carrier Board
@@ -730,9 +735,6 @@ properties:
- joz,jozacp # JOZ Access Point
- kontron,sl-imx6ull # Kontron SL i.MX6ULL SoM
- myir,imx6ull-mys-6ulx-eval # MYiR Tech iMX6ULL Evaluation Board
- - toradex,colibri-imx6ull # Colibri iMX6ULL Modules
- - toradex,colibri-imx6ull-emmc # Colibri iMX6ULL 1GB (eMMC) Module
- - toradex,colibri-imx6ull-wifi # Colibri iMX6ULL Wi-Fi / BT Modules
- uni-t,uti260b # UNI-T UTi260B Thermal Camera
- const: fsl,imx6ull
@@ -891,8 +893,6 @@ properties:
- technexion,imx7d-pico-hobbit # TechNexion i.MX7D Pico-Hobbit
- technexion,imx7d-pico-nymph # TechNexion i.MX7D Pico-Nymph
- technexion,imx7d-pico-pi # TechNexion i.MX7D Pico-Pi
- - toradex,colibri-imx7d # Colibri iMX7D Module
- - toradex,colibri-imx7d-emmc # Colibri iMX7D 1GB (eMMC) Module
- zii,imx7d-rmu2 # ZII RMU2 Board
- zii,imx7d-rpu2 # ZII RPU2 Board
- const: fsl,imx7d
@@ -962,9 +962,6 @@ properties:
- innocomm,wb15-evk # i.MX8MM Innocomm EVK board with WB15 SoM
- kontron,imx8mm-sl # i.MX8MM Kontron SL (N801X) SOM
- kontron,imx8mm-osm-s # i.MX8MM Kontron OSM-S (N802X) SOM
- - toradex,verdin-imx8mm # Verdin iMX8M Mini Modules
- - toradex,verdin-imx8mm-nonwifi # Verdin iMX8M Mini Modules without Wi-Fi / BT
- - toradex,verdin-imx8mm-wifi # Verdin iMX8M Mini Wi-Fi / BT Modules
- prt,prt8mm # i.MX8MM Protonic PRT8MM Board
- const: fsl,imx8mm
@@ -1098,12 +1095,12 @@ properties:
- gateworks,imx8mp-gw74xx # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw75xx-2x # i.MX8MP Gateworks Board
- gateworks,imx8mp-gw82xx-2x # i.MX8MP Gateworks Board
+ - skov,imx8mp-skov-basic # SKOV i.MX8MP baseboard without frontplate
- skov,imx8mp-skov-revb-hdmi # SKOV i.MX8MP climate control without panel
- skov,imx8mp-skov-revb-lt6 # SKOV i.MX8MP climate control with 7†panel
- skov,imx8mp-skov-revb-mi1010ait-1cp1 # SKOV i.MX8MP climate control with 10.1" panel
- - toradex,verdin-imx8mp # Verdin iMX8M Plus Modules
- - toradex,verdin-imx8mp-nonwifi # Verdin iMX8M Plus Modules without Wi-Fi / BT
- - toradex,verdin-imx8mp-wifi # Verdin iMX8M Plus Wi-Fi / BT Modules
+ - skov,imx8mp-skov-revc-bd500 # SKOV i.MX8MP climate control with LED frontplate
+ - skov,imx8mp-skov-revc-tian-g07017 # SKOV i.MX8MP climate control with 7" panel
- ysoft,imx8mp-iota2-lumpy # Y Soft i.MX8MP IOTA2 Lumpy Board
- const: fsl,imx8mp
@@ -1273,8 +1270,6 @@ properties:
- enum:
- fsl,imx8qm-mek # i.MX8QM MEK Board
- fsl,imx8qm-mek-revd # i.MX8QM MEK Rev D Board
- - toradex,apalis-imx8 # Apalis iMX8 Modules
- - toradex,apalis-imx8-v1.1 # Apalis iMX8 V1.1 Modules
- const: fsl,imx8qm
- description: i.MX8QM Boards with Toradex Apalis iMX8 Modules
@@ -1355,6 +1350,7 @@ properties:
- description: i.MX95 based Boards
items:
- enum:
+ - fsl,imx95-15x15-evk # i.MX95 15x15 EVK Board
- fsl,imx95-19x19-evk # i.MX95 19x19 EVK Board
- const: fsl,imx95
@@ -1435,7 +1431,6 @@ properties:
- fsl,vf610-twr # VF610 Tower Board
- lwn,bk4 # Liebherr BK4 controller
- phytec,vf610-cosmic # PHYTEC Cosmic/Cosmic+ Board
- - toradex,vf610-colibri_vf61 # Colibri VF61 Modules
- const: fsl,vf610
- description: Toradex Colibri VF61 Module on Colibri Evaluation Board
diff --git a/Documentation/devicetree/bindings/arm/google.yaml b/Documentation/devicetree/bindings/arm/google.yaml
index e20b5c9b16bc..99961e5282e5 100644
--- a/Documentation/devicetree/bindings/arm/google.yaml
+++ b/Documentation/devicetree/bindings/arm/google.yaml
@@ -34,10 +34,11 @@ properties:
const: '/'
compatible:
oneOf:
- - description: Google Pixel 6 / Oriole
+ - description: Google Pixel 6 or 6 Pro (Oriole or Raven)
items:
- enum:
- google,gs101-oriole
+ - google,gs101-raven
- const: google,gs101
# Bootloader requires empty ect node to be present
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.yaml b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.yaml
index 6905d29f3108..51e1386f0e01 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.yaml
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.yaml
@@ -18,6 +18,7 @@ properties:
items:
- enum:
- cznic,turris-mox
+ - glinet,gl-mv1000
- globalscale,espressobin
- marvell,armada-3720-db
- methode,edpu
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
index 538d91be8857..4bc7454a5d3a 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-7k-8k.yaml
@@ -23,6 +23,9 @@ properties:
- description: Armada 7040 SoC
items:
+ - enum:
+ - globalscale,mochabin
+ - marvell,armada7040-db
- const: marvell,armada7040
- const: marvell,armada-ap806-quad
- const: marvell,armada-ap806
@@ -35,10 +38,32 @@ properties:
- description: Armada 8040 SoC
items:
+ - enum:
+ - iei,puzzle-m801
+ - marvell,armada8040-db
+ - solidrun,clearfog-gt-8k
- const: marvell,armada8040
- const: marvell,armada-ap806-quad
- const: marvell,armada-ap806
+ - description: Armada 8040 SoC MACCHIATOBin Boards
+ items:
+ - enum:
+ - marvell,armada8040-mcbin-doubleshot
+ - marvell,armada8040-mcbin-singleshot
+ - const: marvell,armada8040-mcbin
+ - const: marvell,armada8040
+ - const: marvell,armada-ap806-quad
+ - const: marvell,armada-ap806
+
+ - description: Armada 8080 SoC
+ items:
+ - enum:
+ - marvell,armada-8080-db
+ - const: marvell,armada-8080
+ - const: marvell,armada-ap810-octa
+ - const: marvell,armada-ap810
+
- description: Armada CN9130 SoC with no external CP
items:
- const: marvell,cn9130
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt b/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt
deleted file mode 100644
index f3e9624534c6..000000000000
--- a/Documentation/devicetree/bindings/arm/marvell/armada-8kp.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-Marvell Armada 8KPlus Platforms Device Tree Bindings
-----------------------------------------------------
-
-Boards using a SoC of the Marvell Armada 8KP families must carry
-the following root node property:
-
- - compatible, with one of the following values:
-
- - "marvell,armada-8080", "marvell,armada-ap810-octa", "marvell,armada-ap810"
- when the SoC being used is the Armada 8080
-
-Example:
-
-compatible = "marvell,armada-8080-db", "marvell,armada-8080",
- "marvell,armada-ap810-octa", "marvell,armada-ap810"
diff --git a/Documentation/devicetree/bindings/arm/mediatek.yaml b/Documentation/devicetree/bindings/arm/mediatek.yaml
index 3ce34d68c213..108ae5e0185d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek.yaml
@@ -414,6 +414,11 @@ properties:
- const: mediatek,mt8365
- items:
- enum:
+ - mediatek,mt8370-evk
+ - const: mediatek,mt8370
+ - const: mediatek,mt8188
+ - items:
+ - enum:
- mediatek,mt8390-evk
- const: mediatek,mt8390
- const: mediatek,mt8188
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index a148ff54f2b8..295963a3cae7 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -67,6 +67,7 @@ properties:
- arm,neoverse-v2-pmu
- arm,neoverse-v3-pmu
- arm,neoverse-v3ae-pmu
+ - arm,rainier-pmu
- brcm,vulcan-pmu
- cavium,thunder-pmu
- nvidia,denver-pmu
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 522a6f0450ea..650fb833d96e 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -49,6 +49,11 @@ properties:
- anbernic,rg-arc-s
- const: rockchip,rk3566
+ - description: Ariaboard Photonicat
+ items:
+ - const: ariaboard,photonicat
+ - const: rockchip,rk3568
+
- description: ArmSoM Sige5 board
items:
- const: armsom,sige5
@@ -178,6 +183,13 @@ properties:
- const: engicam,px30-core
- const: rockchip,px30
+ - description: Firefly iCore-3588Q-based boards
+ items:
+ - enum:
+ - mntre,reform2-rcore
+ - const: firefly,icore-3588q
+ - const: rockchip,rk3588
+
- description: Firefly Core-3588J-based boards
items:
- enum:
@@ -236,6 +248,11 @@ properties:
- firefly,roc-rk3399-pc-plus
- const: rockchip,rk3399
+ - description: Firefly ROC-RK3576-PC
+ items:
+ - const: firefly,roc-rk3576-pc
+ - const: rockchip,rk3576
+
- description: Firefly Station M2
items:
- const: firefly,rk3566-roc-pc
@@ -862,6 +879,11 @@ properties:
- const: radxa,rock-4c-plus
- const: rockchip,rk3399
+ - description: Radxa ROCK 4D
+ items:
+ - const: radxa,rock-4d
+ - const: rockchip,rk3576
+
- description: Radxa ROCK 4SE
items:
- const: radxa,rock-4se
@@ -1136,11 +1158,12 @@ properties:
- const: xunlong,orangepi-3b
- const: rockchip,rk3566
- - description: Xunlong Orange Pi 5 Max/Plus
+ - description: Xunlong Orange Pi 5 Max/Plus/Ultra
items:
- enum:
- xunlong,orangepi-5-max
- xunlong,orangepi-5-plus
+ - xunlong,orangepi-5-ultra
- const: rockchip,rk3588
- description: Xunlong Orange Pi R1 Plus / LTS
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
index d083d8ad48b7..ed97652c8492 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,stm32-syscon.yaml
@@ -21,6 +21,8 @@ properties:
- st,stm32f4-gcan
- st,stm32mp151-pwr-mcu
- st,stm32mp157-syscfg
+ - st,stm32mp21-syscfg
+ - st,stm32mp23-syscfg
- st,stm32mp25-syscfg
- const: syscon
- items:
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
index b6c56d4ce6b9..5fee2f38ff25 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32.yaml
@@ -51,9 +51,16 @@ properties:
- st,stm32mp135f-dk
- const: st,stm32mp135
+ - description: ST STM32MP133 based Boards
+ items:
+ - enum:
+ - pri,prihmb # Priva E-Measuringbox board
+ - const: st,stm32mp133
+
- description: ST STM32MP151 based Boards
items:
- enum:
+ - ply,plyaqm # Plymovent AQM board
- prt,mecio1r0 # Protonic MECIO1r0
- prt,mect1s # Protonic MECT1S
- prt,prtt1a # Protonic PRTT1A
@@ -94,6 +101,8 @@ properties:
- description: Octavo OSD32MP153 System-in-Package based boards
items:
- enum:
+ - lxa,stm32mp153c-fairytux2-gen1 # Linux Automation FairyTux 2 (Generation 1)
+ - lxa,stm32mp153c-fairytux2-gen2 # Linux Automation FairyTux 2 (Generation 2)
- lxa,stm32mp153c-tac-gen3 # Linux Automation TAC (Generation 3)
- const: oct,stm32mp153x-osd32
- const: st,stm32mp153
@@ -178,9 +187,22 @@ properties:
- description: ST STM32MP257 based Boards
items:
- enum:
+ - st,stm32mp257f-dk
- st,stm32mp257f-ev1
- const: st,stm32mp257
+ - description: ST STM32MP235 based Boards
+ items:
+ - enum:
+ - st,stm32mp235f-dk
+ - const: st,stm32mp235
+
+ - description: ST STM32MP215 based Boards
+ items:
+ - enum:
+ - st,stm32mp215f-dk
+ - const: st,stm32mp215
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
index 046536d02706..f536cdd2c1a6 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.yaml
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -589,6 +589,11 @@ properties:
- const: emlid,neutis-n5h3
- const: allwinner,sun8i-h3
+ - description: NetCube Systems Kumquat
+ items:
+ - const: netcube,kumquat
+ - const: allwinner,sun8i-v3s
+
- description: NextThing Co. CHIP
items:
- const: nextthing,chip
diff --git a/Documentation/devicetree/bindings/arm/ti/omap.yaml b/Documentation/devicetree/bindings/arm/ti/omap.yaml
index 93e04a109a12..3603edd7361d 100644
--- a/Documentation/devicetree/bindings/arm/ti/omap.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/omap.yaml
@@ -141,6 +141,13 @@ properties:
- const: ti,omap4430
- const: ti,omap4
+ - description: OMAP4 PandaBoard Revision A4 and later
+ items:
+ - const: ti,omap4-panda-a4
+ - const: ti,omap4-panda
+ - const: ti,omap4430
+ - const: ti,omap4
+
- description: OMAP4 DuoVero with Parlor expansion board/daughter board
items:
- const: gumstix,omap4-duovero-parlor
diff --git a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
index 9952e0ef7767..6ad78429dc74 100644
--- a/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
+++ b/Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
@@ -163,11 +163,9 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
#include <dt-bindings/phy/phy.h>
sata: ahci@fd0c0000 {
@@ -175,7 +173,7 @@ examples:
reg = <0xfd0c0000 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 133 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&zynqmp_clk SATA_REF>;
+ clocks = <&zynqmp_clk 22>;
ceva,p0-cominit-params = /bits/ 8 <0x0F 0x25 0x18 0x29>;
ceva,p0-comwake-params = /bits/ 8 <0x04 0x0B 0x08 0x0F>;
ceva,p0-burst-params = /bits/ 8 <0x0A 0x08 0x4A 0x06>;
diff --git a/Documentation/devicetree/bindings/ata/fsl,pq-sata.yaml b/Documentation/devicetree/bindings/ata/fsl,pq-sata.yaml
new file mode 100644
index 000000000000..1d19ee832f0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/fsl,pq-sata.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/fsl,pq-sata.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale 8xxx/3.0 Gb/s SATA nodes
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+description:
+ SATA nodes are defined to describe on-chip Serial ATA controllers.
+ Each SATA controller should have its own node.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - fsl,mpc8377-sata
+ - fsl,mpc8536-sata
+ - fsl,mpc8315-sata
+ - fsl,mpc8379-sata
+ - const: fsl,pq-sata
+ - const: fsl,pq-sata-v2
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2, 3, 4]
+ description: |
+ 1 for controller @ 0x18000
+ 2 for controller @ 0x19000
+ 3 for controller @ 0x1a000
+ 4 for controller @ 0x1b000
+
+required:
+ - compatible
+ - interrupts
+ - cell-index
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ sata@18000 {
+ compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
+ reg = <0x18000 0x1000>;
+ cell-index = <1>;
+ interrupts = <44 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/ata/fsl-sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt
deleted file mode 100644
index fd63bb3becc9..000000000000
--- a/Documentation/devicetree/bindings/ata/fsl-sata.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Freescale 8xxx/3.0 Gb/s SATA nodes
-
-SATA nodes are defined to describe on-chip Serial ATA controllers.
-Each SATA port should have its own node.
-
-Required properties:
-- compatible : compatible list, contains 2 entries, first is
- "fsl,CHIP-sata", where CHIP is the processor
- (mpc8315, mpc8379, etc.) and the second is
- "fsl,pq-sata"
-- interrupts : <interrupt mapping for SATA IRQ>
-- cell-index : controller index.
- 1 for controller @ 0x18000
- 2 for controller @ 0x19000
- 3 for controller @ 0x1a000
- 4 for controller @ 0x1b000
-
-Optional properties:
-- reg : <registers mapping>
-
-Example:
- sata@18000 {
- compatible = "fsl,mpc8379-sata", "fsl,pq-sata";
- reg = <0x18000 0x1000>;
- cell-index = <1>;
- interrupts = <2c 8>;
- interrupt-parent = < &ipic >;
- };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3528-cru.yaml b/Documentation/devicetree/bindings/clock/rockchip,rk3528-cru.yaml
new file mode 100644
index 000000000000..5a3ec902351c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3528-cru.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/rockchip,rk3528-cru.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip RK3528 Clock and Reset Controller
+
+maintainers:
+ - Yao Zi <ziyao@disroot.org>
+
+description: |
+ The RK3528 clock controller generates the clock and also implements a reset
+ controller for SoC peripherals. For example, it provides SCLK_UART0 and
+ PCLK_UART0 as well as SRST_P_UART0 and SRST_S_UART0 for the first UART
+ module.
+ Each clock is assigned an identifier, consumer nodes can use it to specify
+ the clock. All available clock and reset IDs are defined in dt-binding
+ headers.
+
+properties:
+ compatible:
+ const: rockchip,rk3528-cru
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: External 24MHz oscillator clock
+ - description: >
+ 50MHz clock generated by PHY module, for generating GMAC0 clocks only.
+
+ clock-names:
+ items:
+ - const: xin24m
+ - const: gmac0
+
+ "#clock-cells":
+ const: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@ff4a0000 {
+ compatible = "rockchip,rk3528-cru";
+ reg = <0xff4a0000 0x30000>;
+ clocks = <&xin24m>, <&gmac0_clk>;
+ clock-names = "xin24m", "gmac0";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos990-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos990-clock.yaml
index 9e7944b5f13b..c15cc1752b02 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos990-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos990-clock.yaml
@@ -31,6 +31,7 @@ properties:
compatible:
enum:
- samsung,exynos990-cmu-hsi0
+ - samsung,exynos990-cmu-peris
- samsung,exynos990-cmu-top
clocks:
@@ -83,6 +84,24 @@ allOf:
properties:
compatible:
contains:
+ const: samsung,exynos990-cmu-peris
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: CMU_PERIS BUS clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: bus
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: samsung,exynos990-cmu-top
then:
diff --git a/Documentation/devicetree/bindings/connector/gocontroll,moduline-module-slot.yaml b/Documentation/devicetree/bindings/connector/gocontroll,moduline-module-slot.yaml
new file mode 100644
index 000000000000..a16ae2762d16
--- /dev/null
+++ b/Documentation/devicetree/bindings/connector/gocontroll,moduline-module-slot.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/connector/gocontroll,moduline-module-slot.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GOcontroll Moduline Module slot
+
+maintainers:
+ - Maud Spierings <maudspierings@gocontroll.com>
+
+description:
+ The GOcontroll Moduline module slot represents a connector that fullfills the
+ Moduline slot specification, and can thus house any IO module that is also
+ built to this spec.
+
+properties:
+ compatible:
+ const: gocontroll,moduline-module-slot
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: indicates readiness, high means busy.
+ maxItems: 1
+ reset-gpios:
+ description: resets the module, active low.
+ maxItems: 1
+ sync-gpios:
+ description: sync line between all module slots.
+ maxItems: 1
+
+ vdd-supply:
+ description: low power 3v3 supply generally for the microcontroller.
+ vddp-supply:
+ description: medium power 5v0 supply for on module low power peripherals.
+ vddhpp-supply:
+ description: high power 6v-8v supply for on module high power peripherals.
+ power-supply:
+ description: high power 6v-30v supply for high power module circuits.
+
+ i2c-bus:
+ description: i2c bus shared between module slots and the SoC
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ slot-number:
+ description:
+ The number of the module slot representing the location of on the pcb.
+ This enables access to the modules based on slot location.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ spi-max-frequency: true
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - interrupts
+ - sync-gpios
+ - i2c-bus
+ - slot-number
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ reg = <0>;
+ compatible = "gocontroll,moduline-module-slot";
+ reset-gpios = <&gpio5 10 GPIO_ACTIVE_LOW>;
+ sync-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&reg_3v3_per>;
+ vddp-supply = <&reg_5v0>;
+ vddhpp-supply = <&reg_6v4>;
+ i2c-bus = <&i2c2>;
+ slot-number = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index e937eb7355e7..e0242bed3342 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -34,6 +34,7 @@ properties:
- description: v2 of CPUFREQ HW (EPSS)
items:
- enum:
+ - qcom,qcs8300-cpufreq-epss
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
- qcom,sa8775p-cpufreq-epss
@@ -111,22 +112,20 @@ allOf:
enum:
- qcom,qcm2290-cpufreq-hw
- qcom,sar2130p-cpufreq-epss
+ - qcom,sdx75-cpufreq-epss
then:
properties:
reg:
- minItems: 1
maxItems: 1
reg-names:
- minItems: 1
maxItems: 1
interrupts:
- minItems: 1
maxItems: 1
interrupt-names:
- minItems: 1
+ maxItems: 1
- if:
properties:
@@ -135,6 +134,7 @@ allOf:
enum:
- qcom,qdu1000-cpufreq-epss
- qcom,sa8255p-cpufreq-epss
+ - qcom,sa8775p-cpufreq-epss
- qcom,sc7180-cpufreq-hw
- qcom,sc8180x-cpufreq-hw
- qcom,sc8280xp-cpufreq-epss
@@ -160,12 +160,14 @@ allOf:
interrupt-names:
minItems: 2
+ maxItems: 2
- if:
properties:
compatible:
contains:
enum:
+ - qcom,qcs8300-cpufreq-epss
- qcom,sc7280-cpufreq-epss
- qcom,sm8250-cpufreq-epss
- qcom,sm8350-cpufreq-epss
@@ -187,6 +189,7 @@ allOf:
interrupt-names:
minItems: 3
+ maxItems: 3
- if:
properties:
@@ -211,7 +214,31 @@ allOf:
interrupt-names:
minItems: 2
+ maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sm8650-cpufreq-epss
+ then:
+ properties:
+ reg:
+ minItems: 4
+ maxItems: 4
+
+ reg-names:
+ minItems: 4
+ maxItems: 4
+
+ interrupts:
+ minItems: 4
+ maxItems: 4
+
+ interrupt-names:
+ minItems: 4
+ maxItems: 4
examples:
- |
diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra114-mipi.yaml b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra114-mipi.yaml
index f448624dd779..193ddb105283 100644
--- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra114-mipi.yaml
+++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra114-mipi.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- nvidia,tegra114-mipi
+ - nvidia,tegra124-mipi
- nvidia,tegra210-mipi
- nvidia,tegra186-mipi
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
index ac3198953b8e..b5399c65a731 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
+++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
@@ -75,7 +75,6 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
fpd_dma_chan1: dma-controller@fd500000 {
compatible = "xlnx,zynqmp-dma-1.0";
@@ -84,7 +83,7 @@ examples:
interrupts = <0 117 0x4>;
#dma-cells = <1>;
clock-names = "clk_main", "clk_apb";
- clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
+ clocks = <&zynqmp_clk 19>, <&zynqmp_clk 31>;
xlnx,bus-width = <128>;
dma-coherent;
};
diff --git a/Documentation/devicetree/bindings/firmware/fsl,scu.yaml b/Documentation/devicetree/bindings/firmware/fsl,scu.yaml
index 557e524786c2..f9ba18f06369 100644
--- a/Documentation/devicetree/bindings/firmware/fsl,scu.yaml
+++ b/Documentation/devicetree/bindings/firmware/fsl,scu.yaml
@@ -45,6 +45,18 @@ properties:
Keys provided by the SCU
$ref: /schemas/input/fsl,scu-key.yaml
+ reset-controller:
+ type: object
+ properties:
+ compatible:
+ const: fsl,imx-scu-reset
+ '#reset-cells':
+ const: 1
+ required:
+ - compatible
+ - '#reset-cells'
+ additionalProperties: false
+
mboxes:
description:
A list of phandles of TX MU channels followed by a list of phandles of
diff --git a/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml b/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
new file mode 100644
index 000000000000..2cdad1bbae73
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2024 Linaro Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/google,gs101-acpm-ipc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos ACPM mailbox protocol
+
+maintainers:
+ - Tudor Ambarus <tudor.ambarus@linaro.org>
+
+description: |
+ ACPM (Alive Clock and Power Manager) is a firmware that operates on the
+ APM (Active Power Management) module that handles overall power management
+ activities. ACPM and masters regard each other as independent hardware
+ component and communicate with each other using mailbox messages and
+ shared memory.
+
+ This binding is intended to define the interface the firmware implementing
+ ACPM provides for OSPM in the device tree.
+
+properties:
+ compatible:
+ const: google,gs101-acpm-ipc
+
+ mboxes:
+ maxItems: 1
+
+ shmem:
+ description:
+ List of phandle pointing to the shared memory (SHM) area. The memory
+ contains channels configuration data and the TX/RX ring buffers that
+ are used for passing messages to/from the ACPM firmware.
+ maxItems: 1
+
+required:
+ - compatible
+ - mboxes
+ - shmem
+
+additionalProperties: false
+
+examples:
+ - |
+ power-management {
+ compatible = "google,gs101-acpm-ipc";
+ mboxes = <&ap2apm_mailbox>;
+ shmem = <&apm_sram>;
+ };
diff --git a/Documentation/devicetree/bindings/firmware/thead,th1520-aon.yaml b/Documentation/devicetree/bindings/firmware/thead,th1520-aon.yaml
new file mode 100644
index 000000000000..bbc183200400
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/thead,th1520-aon.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/firmware/thead,th1520-aon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: T-HEAD TH1520 AON (Always-On) Firmware
+
+description: |
+ The Always-On (AON) subsystem in the TH1520 SoC is responsible for managing
+ low-power states, system wakeup events, and power management tasks. It is
+ designed to operate independently in a dedicated power domain, allowing it to
+ remain functional even during the SoC's deep sleep states.
+
+ At the heart of the AON subsystem is the E902, a low-power core that executes
+ firmware responsible for coordinating tasks such as power domain control,
+ clock management, and system wakeup signaling. Communication between the main
+ SoC and the AON subsystem is handled through a mailbox interface, which
+ enables message-based interactions with the AON firmware.
+
+maintainers:
+ - Michal Wilczynski <m.wilczynski@samsung.com>
+
+properties:
+ compatible:
+ const: thead,th1520-aon
+
+ mboxes:
+ maxItems: 1
+
+ mbox-names:
+ items:
+ - const: aon
+
+ "#power-domain-cells":
+ const: 1
+
+required:
+ - compatible
+ - mboxes
+ - mbox-names
+ - "#power-domain-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ aon: aon {
+ compatible = "thead,th1520-aon";
+ mboxes = <&mbox_910t 1>;
+ mbox-names = "aon";
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/aspeed,ast2400-gpio.yaml b/Documentation/devicetree/bindings/gpio/aspeed,ast2400-gpio.yaml
index b9afd07a9d24..b16273e69dfe 100644
--- a/Documentation/devicetree/bindings/gpio/aspeed,ast2400-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/aspeed,ast2400-gpio.yaml
@@ -46,6 +46,12 @@ properties:
minimum: 12
maximum: 232
+patternProperties:
+ "-hog(-[0-9]+)?$":
+ type: object
+ required:
+ - gpio-hog
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml b/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
index 33d4e4716516..7ed5f9c4dde9 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.yaml
@@ -72,6 +72,9 @@ properties:
"#gpio-cells":
const: 2
+ gpio-ranges:
+ maxItems: 1
+
marvell,pwm-offset:
$ref: /schemas/types.yaml#/definitions/uint32
description: Offset in the register map for the pwm registers (in bytes)
@@ -96,6 +99,13 @@ properties:
- const: axi
minItems: 1
+patternProperties:
+ "^(.+-hog(-[0-9]+)?)$":
+ type: object
+
+ required:
+ - gpio-hog
+
required:
- compatible
- gpio-controller
diff --git a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
index cabda2eab4a2..4fb32e9aec0a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-vf610.yaml
@@ -28,6 +28,7 @@ properties:
- items:
- enum:
- fsl,imx93-gpio
+ - fsl,imx94-gpio
- fsl,imx95-gpio
- const: fsl,imx8ulp-gpio
diff --git a/Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml b/Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
index cf3b1b270aa8..b68159600e2b 100644
--- a/Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/loongson,ls-gpio.yaml
@@ -20,7 +20,10 @@ properties:
- loongson,ls2k2000-gpio1
- loongson,ls2k2000-gpio2
- loongson,ls3a5000-gpio
+ - loongson,ls3a6000-gpio # Loongson-3A6000 node GPIO
- loongson,ls7a-gpio
+ - loongson,ls7a2000-gpio1 # LS7A2000 chipset GPIO
+ - loongson,ls7a2000-gpio2 # LS7A2000 ACPI GPIO
- items:
- const: loongson,ls2k1000-gpio
- const: loongson,ls2k-gpio
diff --git a/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
index 3718103e966a..8bca574bb66d 100644
--- a/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
+++ b/Documentation/devicetree/bindings/gpio/nxp,pcf8575.yaml
@@ -73,6 +73,43 @@ properties:
wakeup-source: true
+ reset-gpios:
+ maxItems: 1
+ description:
+ GPIO controlling the (reset active LOW) RESET# pin.
+
+ The active polarity of the GPIO must translate to the low state of the
+ RESET# pin on the IC, i.e. if a GPIO is directly routed to the RESET# pin
+ without any inverter, GPIO_ACTIVE_LOW is expected.
+
+ Performing a reset makes all lines initialized to their input (pulled-up)
+ state.
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - nxp,pca9670
+ - nxp,pca9671
+ - nxp,pca9672
+ - nxp,pca9673
+ then:
+ properties:
+ reset-gpios: false
+
+ # lines-initial-states XOR reset-gpios
+ # Performing a reset reinitializes all lines to a known state which
+ # may not match passed lines-initial-states
+ - if:
+ required:
+ - lines-initial-states
+ then:
+ properties:
+ reset-gpios: false
+
patternProperties:
"^(.+-hog(-[0-9]+)?)$":
type: object
diff --git a/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml b/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
index 385aac7161a0..383020450d78 100644
--- a/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
+++ b/Documentation/devicetree/bindings/hwinfo/samsung,exynos-chipid.yaml
@@ -19,9 +19,11 @@ properties:
- enum:
- samsung,exynos5433-chipid
- samsung,exynos7-chipid
+ - samsung,exynos7870-chipid
- const: samsung,exynos4210-chipid
- items:
- enum:
+ - samsung,exynos2200-chipid
- samsung,exynos7885-chipid
- samsung,exynos8895-chipid
- samsung,exynos9810-chipid
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ad741x.yaml b/Documentation/devicetree/bindings/hwmon/adi,ad741x.yaml
index ce7f8ce9da0a..236d8b52ef85 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ad741x.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ad741x.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/adi,ad741x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml b/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
index fd79bf2e0d16..ddb72857c846 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,adm1275.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/adi,adm1275.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/adi,ltc2991.yaml b/Documentation/devicetree/bindings/hwmon/adi,ltc2991.yaml
index 011e5b65c79c..1ff44cb22ef4 100644
--- a/Documentation/devicetree/bindings/hwmon/adi,ltc2991.yaml
+++ b/Documentation/devicetree/bindings/hwmon/adi,ltc2991.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/adi,ltc2991.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/gpio-fan.yaml b/Documentation/devicetree/bindings/hwmon/gpio-fan.yaml
index 7f30cfc87350..4faebbb4c7ab 100644
--- a/Documentation/devicetree/bindings/hwmon/gpio-fan.yaml
+++ b/Documentation/devicetree/bindings/hwmon/gpio-fan.yaml
@@ -23,6 +23,9 @@ properties:
alarm-gpios:
maxItems: 1
+ fan-supply:
+ description: Power supply for fan
+
gpio-fan,speed-map:
$ref: /schemas/types.yaml#/definitions/uint32-matrix
minItems: 2
diff --git a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
index 37e1dc9c7dd3..aa801ef1640b 100644
--- a/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
+++ b/Documentation/devicetree/bindings/hwmon/lltc,ltc2978.yaml
@@ -12,6 +12,8 @@ maintainers:
properties:
compatible:
enum:
+ - lltc,lt7170
+ - lltc,lt7171
- lltc,ltc2972
- lltc,ltc2974
- lltc,ltc2975
@@ -30,6 +32,7 @@ properties:
- lltc,ltc7880
- lltc,ltm2987
- lltc,ltm4664
+ - lltc,ltm4673
- lltc,ltm4675
- lltc,ltm4676
- lltc,ltm4677
@@ -46,6 +49,7 @@ properties:
description: |
list of regulators provided by this controller.
Valid names of regulators depend on number of supplies supported per device:
+ * lt7170, lt7171 : vout0
* ltc2972 vout0 - vout1
* ltc2974, ltc2975 : vout0 - vout3
* ltc2977, ltc2979, ltc2980, ltm2987 : vout0 - vout7
@@ -55,6 +59,7 @@ properties:
* ltc7880 : vout0 - vout1
* ltc3883 : vout0
* ltm4664 : vout0 - vout1
+ * ltm4673 : vout0 - vout3
* ltm4675, ltm4676, ltm4677, ltm4678 : vout0 - vout1
* ltm4680, ltm4686 : vout0 - vout1
* ltm4700 : vout0 - vout1
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
index 93e86e3b4602..8af0d7458e62 100644
--- a/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max20730.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/maxim,max20730.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml
index 4f5837a30773..139a95e00fe5 100644
--- a/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max6639.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/maxim,max6639.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml b/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml
index 2c26104a5e16..24c7697fdc1a 100644
--- a/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml
+++ b/Documentation/devicetree/bindings/hwmon/maxim,max6650.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/maxim,max6650.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/microchip,emc2305.yaml b/Documentation/devicetree/bindings/hwmon/microchip,emc2305.yaml
new file mode 100644
index 000000000000..d3f06ebc19fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/microchip,emc2305.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/hwmon/microchip,emc2305.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip EMC2305 SMBus compliant PWM fan controller
+
+maintainers:
+ - Michael Shych <michaelsh@nvidia.com>
+
+description:
+ Microchip EMC2301/2/3/5 pwm controller which supports up to five programmable
+ fan control circuits.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - microchip,emc2305
+ - items:
+ - enum:
+ - microchip,emc2303
+ - microchip,emc2302
+ - microchip,emc2301
+ - const: microchip,emc2305
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ '#pwm-cells':
+ const: 3
+ description: |
+ Number of cells in a PWM specifier.
+ - cell 0: The PWM frequency
+ - cell 1: The PWM polarity: 0 or PWM_POLARITY_INVERTED
+ - cell 2: The PWM output config:
+ - 0 (Open-Drain)
+ - 1 (Push-Pull)
+
+patternProperties:
+ '^fan@[0-4]$':
+ $ref: fan-common.yaml#
+ unevaluatedProperties: false
+ properties:
+ reg:
+ description:
+ The fan number used to determine the associated PWM channel.
+ maxItems: 1
+
+ required:
+ - reg
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/pwm/pwm.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fan_controller: fan-controller@2f {
+ compatible = "microchip,emc2305";
+ reg = <0x2f>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #pwm-cells = <3>;
+
+ fan@0 {
+ reg = <0x0>;
+ pwms = <&fan_controller 26000 PWM_POLARITY_INVERTED 1>;
+ #cooling-cells = <2>;
+ };
+
+ fan@1 {
+ reg = <0x1>;
+ pwms = <&fan_controller 26000 0 1>;
+ #cooling-cells = <2>;
+ };
+
+ fan@2 {
+ reg = <0x2>;
+ pwms = <&fan_controller 26000 0 1>;
+ #cooling-cells = <2>;
+ };
+
+ fan@3 {
+ reg = <0x3>;
+ pwms = <&fan_controller 26000 0 1>;
+ #cooling-cells = <2>;
+ };
+
+ fan@4 {
+ reg = <0x4>;
+ pwms = <&fan_controller 26000 0 1>;
+ #cooling-cells = <2>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/hwmon/national,lm90.yaml b/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
index 6e59c8fdef30..4feb76919404 100644
--- a/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
+++ b/Documentation/devicetree/bindings/hwmon/national,lm90.yaml
@@ -32,6 +32,9 @@ properties:
- national,lm89
- national,lm90
- national,lm99
+ - nuvoton,nct7716
+ - nuvoton,nct7717
+ - nuvoton,nct7718
- nxp,sa56004
- onnn,nct1008
- ti,tmp451
@@ -120,6 +123,8 @@ allOf:
- dallas,max6659
- dallas,max6695
- dallas,max6696
+ - nuvoton,nct7716
+ - nuvoton,nct7717
then:
patternProperties:
"^channel@([0-2])$":
@@ -155,6 +160,7 @@ allOf:
- national,lm89
- national,lm90
- national,lm99
+ - nuvoton,nct7718
- nxp,sa56004
- winbond,w83l771
then:
diff --git a/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml b/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml
index 3d0146e20d3e..b8e500e6cd9f 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ntc-thermistor.yaml
@@ -76,7 +76,7 @@ properties:
- const: murata,ncp03wf104
- const: murata,ncp15xh103
- const: samsung,1404-001221
- # Deprecated "ntp," compatible strings
+ # Deprecated "ntc," compatible strings
- const: ntc,ncp15wb473
deprecated: true
- const: ntc,ncp18wb473
diff --git a/Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml b/Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml
index e3db642878d4..244470282890 100644
--- a/Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml
+++ b/Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/nuvoton,nct6775.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/nuvoton,nct7363.yaml b/Documentation/devicetree/bindings/hwmon/nuvoton,nct7363.yaml
index c1e5dedc2f6a..625fcf5d3b54 100644
--- a/Documentation/devicetree/bindings/hwmon/nuvoton,nct7363.yaml
+++ b/Documentation/devicetree/bindings/hwmon/nuvoton,nct7363.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/nuvoton,nct7363.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/nuvoton,nct7802.yaml b/Documentation/devicetree/bindings/hwmon/nuvoton,nct7802.yaml
index cd8dcd797031..c16a33227e94 100644
--- a/Documentation/devicetree/bindings/hwmon/nuvoton,nct7802.yaml
+++ b/Documentation/devicetree/bindings/hwmon/nuvoton,nct7802.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/nuvoton,nct7802.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
index e8feee38c76c..f8bea1c0e94a 100644
--- a/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
+++ b/Documentation/devicetree/bindings/hwmon/pmbus/ti,ucd90320.yaml
@@ -28,6 +28,15 @@ properties:
reg:
maxItems: 1
+ gpio-controller: true
+
+ gpio-line-names:
+ minItems: 84
+ maxItems: 84
+
+ '#gpio-cells':
+ const: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml b/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml
index a32035409cee..78e3d97e2ae5 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,adc128d818.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,adc128d818.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
index 926be9a29044..fb80456120e1 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ads7828.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,ads7828.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
index 05a9cb36cd82..bc03781342c0 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,ina2xx.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
@@ -27,6 +26,7 @@ properties:
- ti,ina226
- ti,ina230
- ti,ina231
+ - ti,ina233
- ti,ina237
- ti,ina238
- ti,ina260
@@ -75,12 +75,41 @@ properties:
the alert polarity to active-high.
$ref: /schemas/types.yaml#/definitions/flag
+ ti,maximum-expected-current-microamp:
+ description: |
+ This value indicates the maximum current in microamps that you can
+ expect to measure with ina233 in your circuit.
+
+ This value will be used to calculate the Current_LSB and current/power
+ coefficient for the pmbus and to calibrate the IC.
+ minimum: 32768
+ maximum: 4294967295
+ default: 32768000
+
required:
- compatible
- reg
allOf:
- $ref: hwmon-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - silergy,sy24655
+ - ti,ina209
+ - ti,ina219
+ - ti,ina220
+ - ti,ina226
+ - ti,ina230
+ - ti,ina231
+ - ti,ina237
+ - ti,ina238
+ - ti,ina260
+ then:
+ properties:
+ ti,maximum-expected-current-microamp: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
index f553235a7321..63d8cf467806 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,lm87.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,lm87.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
index 227858e76058..cba5b4a1b81f 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,tmp513.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml b/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
index f58248c29e22..ee7de53e1918 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tps23861.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/ti,tps23861.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/hwmon/winbond,w83781d.yaml b/Documentation/devicetree/bindings/hwmon/winbond,w83781d.yaml
index 31ce77a4b087..6971ecb314eb 100644
--- a/Documentation/devicetree/bindings/hwmon/winbond,w83781d.yaml
+++ b/Documentation/devicetree/bindings/hwmon/winbond,w83781d.yaml
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
-
$id: http://devicetree.org/schemas/hwmon/winbond,w83781d.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
index ab5881d0d017..52d3f1ce3367 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@ -146,6 +146,7 @@ properties:
maxItems: 2
pwm-names:
+ minItems: 1
items:
- const: convst1
- const: convst2
diff --git a/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
index 8cbad7e792b6..a403392fb263 100644
--- a/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
@@ -193,7 +193,6 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
bus {
#address-cells = <2>;
@@ -204,7 +203,7 @@ examples:
interrupt-parent = <&gic>;
interrupts = <0 56 4>;
reg = <0x0 0xffa50000 0x0 0x800>;
- clocks = <&zynqmp_clk AMS_REF>;
+ clocks = <&zynqmp_clk 70>;
#address-cells = <1>;
#size-cells = <1>;
#io-channel-cells = <1>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
index e24cbd960993..bd8ede3a4ad8 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml
@@ -19,6 +19,7 @@ properties:
- imagis,ist3038
- imagis,ist3038b
- imagis,ist3038c
+ - imagis,ist3038h
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
index f49b43f45f3d..06e3621a8c06 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun7i-a20-sc-nmi.yaml
@@ -26,6 +26,7 @@ properties:
deprecated: true
- const: allwinner,sun7i-a20-sc-nmi
- const: allwinner,sun9i-a80-nmi
+ - const: allwinner,sun55i-a523-nmi
- items:
- enum:
- allwinner,sun8i-v3s-nmi
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
index a93744763787..3d60d9e9e208 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
@@ -35,6 +35,9 @@ properties:
- amlogic,meson-sm1-gpio-intc
- amlogic,meson-a1-gpio-intc
- amlogic,meson-s4-gpio-intc
+ - amlogic,a4-gpio-intc
+ - amlogic,a4-gpio-ao-intc
+ - amlogic,a5-gpio-intc
- amlogic,c3-gpio-intc
- amlogic,t7-gpio-intc
- const: amlogic,meson-gpio-intc
@@ -49,7 +52,7 @@ properties:
amlogic,channel-interrupts:
description: Array with the upstream hwirq numbers
- minItems: 8
+ minItems: 2
maxItems: 12
$ref: /schemas/types.yaml#/definitions/uint32-array
@@ -60,6 +63,20 @@ required:
- "#interrupt-cells"
- amlogic,channel-interrupts
+if:
+ properties:
+ compatible:
+ contains:
+ const: amlogic,a4-gpio-ao-intc
+then:
+ properties:
+ amlogic,channel-interrupts:
+ maxItems: 2
+else:
+ properties:
+ amlogic,channel-interrupts:
+ minItems: 8
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
index d7ef4f1323a7..3f99c8645767 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/interrupt-controller/renesas,rzv2h-icu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Renesas RZ/V2H(P) Interrupt Control Unit
+title: Renesas RZ/{G3E,V2H(P)} Interrupt Control Unit
maintainers:
- Fabrizio Castro <fabrizio.castro.jz@renesas.com>
@@ -20,7 +20,9 @@ description:
properties:
compatible:
- const: renesas,r9a09g057-icu # RZ/V2H(P)
+ enum:
+ - renesas,r9a09g047-icu # RZ/G3E
+ - renesas,r9a09g057-icu # RZ/V2H(P)
'#interrupt-cells':
description: The first cell is the SPI number of the NMI or the
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
index 190a6499c932..bef00521d5da 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml
@@ -91,6 +91,14 @@ properties:
Firmware must configure interrupt delegation registers based on
interrupt delegation list.
+ riscv,hart-indexes:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 16384
+ description:
+ A list of hart indexes that APLIC should use to address each hart
+ that is mentioned in the "interrupts-extended"
+
dependencies:
riscv,delegation: [ "riscv,children" ]
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
new file mode 100644
index 000000000000..e1ffd55fa7bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/sophgo,sg2042-msi.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/sophgo,sg2042-msi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2042 MSI Controller
+
+maintainers:
+ - Chen Wang <unicorn_wang@outlook.com>
+
+description:
+ This interrupt controller is in Sophgo SG2042 for transforming interrupts from
+ PCIe MSI to PLIC interrupts.
+
+allOf:
+ - $ref: /schemas/interrupt-controller/msi-controller.yaml#
+
+properties:
+ compatible:
+ const: sophgo,sg2042-msi
+
+ reg:
+ items:
+ - description: clear register
+ - description: msi doorbell address
+
+ reg-names:
+ items:
+ - const: clr
+ - const: doorbell
+
+ msi-controller: true
+
+ msi-ranges:
+ maxItems: 1
+
+ "#msi-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - msi-controller
+ - msi-ranges
+ - "#msi-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ msi-controller@30000000 {
+ compatible = "sophgo,sg2042-msi";
+ reg = <0x30000000 0x4>, <0x30000008 0x4>;
+ reg-names = "clr", "doorbell";
+ msi-controller;
+ #msi-cells = <0>;
+ msi-ranges = <&plic 64 IRQ_TYPE_LEVEL_HIGH 32>;
+ };
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 032fdc27127b..7b9d5507d6cc 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -90,6 +90,7 @@ properties:
- enum:
- qcom,qcm2290-smmu-500
- qcom,qcs615-smmu-500
+ - qcom,qcs8300-smmu-500
- qcom,sa8255p-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sar2130p-smmu-500
@@ -397,6 +398,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,qcs8300-smmu-500
- qcom,sa8775p-smmu-500
- qcom,sc7280-smmu-500
- qcom,sc8280xp-smmu-500
@@ -581,7 +583,6 @@ allOf:
- cavium,smmu-v2
- marvell,ap806-smmu-500
- nvidia,smmu-500
- - qcom,qcs8300-smmu-500
- qcom,qdu1000-smmu-500
- qcom,sa8255p-smmu-500
- qcom,sc7180-smmu-500
diff --git a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
index 5ae9a628261f..3e5623edd207 100644
--- a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
@@ -22,6 +22,7 @@ properties:
- enum:
- qcom,msm8916-iommu
- qcom,msm8917-iommu
+ - qcom,msm8937-iommu
- qcom,msm8953-iommu
- const: qcom,msm-iommu-v1
- items:
diff --git a/Documentation/devicetree/bindings/media/aspeed,video-engine.yaml b/Documentation/devicetree/bindings/media/aspeed,video-engine.yaml
new file mode 100644
index 000000000000..682bba20778c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/aspeed,video-engine.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/aspeed,video-engine.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ASPEED Video Engine
+
+maintainers:
+ - Eddie James <eajames@linux.ibm.com>
+
+description:
+ The Video Engine (VE) embedded in the ASPEED SOCs can be configured to
+ capture and compress video data from digital or analog sources.
+
+properties:
+ compatible:
+ enum:
+ - aspeed,ast2400-video-engine
+ - aspeed,ast2500-video-engine
+ - aspeed,ast2600-video-engine
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: vclk
+ - const: eclk
+
+ resets:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ memory-region:
+ maxItems: 1
+ description: |
+ Phandle to the reserved memory nodes to be associated with the
+ VE. VE will acquires memory space for 3 purposes:
+ 1. JPEG header
+ 2. Compressed result
+ 3. Temporary transformed image data
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/ast2600-clock.h>
+
+ video@1e700000 {
+ compatible = "aspeed,ast2600-video-engine";
+ reg = <0x1e700000 0x1000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
+ <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/media/aspeed-video.txt b/Documentation/devicetree/bindings/media/aspeed-video.txt
deleted file mode 100644
index d2ca32512272..000000000000
--- a/Documentation/devicetree/bindings/media/aspeed-video.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-* Device tree bindings for Aspeed Video Engine
-
-The Video Engine (VE) embedded in the Aspeed AST2400/2500/2600 SOCs can
-capture and compress video data from digital or analog sources.
-
-Required properties:
- - compatible: "aspeed,ast2400-video-engine" or
- "aspeed,ast2500-video-engine" or
- "aspeed,ast2600-video-engine"
- - reg: contains the offset and length of the VE memory region
- - clocks: clock specifiers for the syscon clocks associated with
- the VE (ordering must match the clock-names property)
- - clock-names: "vclk" and "eclk"
- - resets: reset specifier for the syscon reset associated with
- the VE
- - interrupts: the interrupt associated with the VE on this platform
-
-Optional properties:
- - memory-region:
- phandle to a memory region to allocate from, as defined in
- Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
-
-Example:
-
-video-engine@1e700000 {
- compatible = "aspeed,ast2500-video-engine";
- reg = <0x1e700000 0x20000>;
- clocks = <&syscon ASPEED_CLK_GATE_VCLK>, <&syscon ASPEED_CLK_GATE_ECLK>;
- clock-names = "vclk", "eclk";
- resets = <&syscon ASPEED_RESET_VIDEO>;
- interrupts = <7>;
- memory-region = <&video_engine_memory>;
-};
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
index 4371a0ef2761..9ee1483775f6 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.yaml
@@ -49,6 +49,10 @@ properties:
Indicates that the output is a BT.656-4 compatible stream.
type: boolean
+ interrupts:
+ items:
+ - description: The GPIO connected to the INTRQ pin.
+
port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
index b68141264c0e..4d40e75b4e1e 100644
--- a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
@@ -71,7 +71,7 @@ properties:
description:
Any lane can be inverted or not.
minItems: 1
- maxItems: 2
+ maxItems: 3
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
index 5865e6f0be89..bf8082d87ac0 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
@@ -4,52 +4,70 @@
$id: http://devicetree.org/schemas/media/mediatek,vcodec-subdev-decoder.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Mediatek Video Decode Accelerator With Multi Hardware
+title: MediaTek Video Decode Accelerator With Multi Hardware
maintainers:
- Yunfei Dong <yunfei.dong@mediatek.com>
description: |
- Mediatek Video Decode is the video decode hardware present in Mediatek
- SoCs which supports high resolution decoding functionalities. Required
- parent and child device node.
-
- About the Decoder Hardware Block Diagram, please check below:
-
- +------------------------------------------------+-------------------------------------+
- | | |
- | input -> lat soc HW -> lat HW -> lat buffer --|--> lat buffer -> core HW -> output |
- | || || | || |
- +------------||-------------||-------------------+---------------------||--------------+
- || lat || | core workqueue <parent>
- -------------||-------------||-------------------|---------------------||---------------
- ||<------------||----------------HW index---------------->|| <child>
- \/ \/ \/
- +-------------------------------------------------------------+
- | enable/disable |
- | clk power irq iommu |
- | (lat/lat soc/core0/core1) |
- +-------------------------------------------------------------+
-
- As above, there are parent and child devices, child mean each hardware. The child device
- controls the information of each hardware independent which include clk/power/irq.
-
- There are two workqueues in parent device: lat workqueue and core workqueue. They are used
- to lat and core hardware decoder. Lat workqueue need to get input bitstream and lat buffer,
- then enable lat to decode, writing the result to lat buffer, dislabe hardware when lat decode
- done. Core workqueue need to get lat buffer and output buffer, then enable core to decode,
- writing the result to output buffer, disable hardware when core decode done. These two
- hardwares will decode each frame cyclically.
-
- For the smi common may not the same for each hardware, can't combine all hardware in one node,
- or leading to iommu fault when access dram data.
-
- Lat soc is a hardware which is related with some larb(local arbiter) ports. For mt8195
- platform, there are some ports like RDMA, UFO in lat soc larb, need to enable its power and
- clock when lat start to work, don't have interrupt.
-
- mt8195: lat soc HW + lat HW + core HW
- mt8192: lat HW + core HW
+ MediaTek Video Decode Accelerator is the video decoding hardware present in
+ MediaTek SoCs that supports high-resolution decoding functionalities.
+ It consists of parent and child nodes.
+
+ The decoder hardware block diagram is shown below:
+
+ +------------------------------------------------+------------------------------+
+ | | |
+ | input -> LAT-SoC HW -> LAT HW -> LAT buffer --|--> Core HW -> output buffer |
+ | || || | || |
+ +--------------||-----------||-------------------+-------||---------------------+
+ LAT Workqueue | Core Workqueue <parent>
+ ---------------||-----------||-------------------|-------||----------------------
+ ||<----------||---------HW index--------->|| <child>
+ \/ \/ \/
+ +-------------------------------------------------------------+
+ | enable/disable |
+ | clk power irq iommu |
+ | (lat/lat-soc/core0/core1) |
+ +-------------------------------------------------------------+
+
+ The child nodes represent the individual hardware blocks within the decoding
+ pipeline, such as LAT-SoC, LAT and Core.
+ Each child node is responsible for managing the dedicated resources of the
+ hardware, such as clocks, power domains, interrupts and IOMMUs.
+
+ The parent node is a central point of control for the child nodes.
+ It identifies the specific video decoding pipeline architecture used by the
+ SoC, manages the shared resources like workqueues and platform data, and
+ handles V4L2 API calls on behalf of the underlying hardware.
+
+ The parent utilizes two workqueues to manage the decoding process.
+ 1. LAT Workqueue, for LAT-SoC and LAT decoder:
+ Its workers take input bitstream and LAT buffer, enable the hardware for
+ decoding tasks, write the result to LAT buffer, and disable the hardware
+ after the LAT decoding is done.
+ 2. Core Workqueue, for Core decoder:
+ Its workers take LAT buffer and output buffer, enable the hardware for
+ decoding tasks, write the result to output buffer, and disable the hardware
+ after the Core decoding is done.
+
+ These hardware decode each frame cyclically.
+
+ The hardware might be associated with different SMI-common devices.
+ To prevent IOMMU faults during DRAM access in such cases, each hardware with
+ the unique SMI-common device must be placed under a separate parent node in
+ the device tree.
+
+ LAT-SoC refers to another hardware block that connected to additional LARB
+ (local arbiter) ports, such as RDMA and UFO.
+ It requires independent power and clock control to work with LAT decoder, and
+ it doesn't have a dedicated interrupt.
+
+ The used video decoding pipeline architecture across various Mediatek SoC:
+ MT8195: LAT-SoC + LAT + Core
+ MT8192: LAT + Core
+ MT8188: LAT + Core
+ MT8186: Core
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.yaml
index cfabf360f278..a4aacd3eb189 100644
--- a/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.yaml
@@ -44,7 +44,8 @@ properties:
maxItems: 1
iommus:
- maxItems: 2
+ minItems: 2
+ maxItems: 32
description: |
Points to the respective IOMMU block with master port as argument, see
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
diff --git a/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.yaml b/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.yaml
index 83c020a673d6..5b15f8977f67 100644
--- a/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.yaml
@@ -39,7 +39,7 @@ properties:
iommus:
minItems: 2
- maxItems: 4
+ maxItems: 32
description: |
Points to the respective IOMMU block with master port as argument, see
Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml for details.
diff --git a/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml
index e11141b812a0..ee35e3bc97ff 100644
--- a/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml
+++ b/Documentation/devicetree/bindings/media/qcom,sc7280-camss.yaml
@@ -55,8 +55,8 @@ properties:
- const: csiphy3_timer
- const: csiphy4
- const: csiphy4_timer
- - const: gcc_camera_ahb
- - const: gcc_cam_hf_axi
+ - const: gcc_axi_hf
+ - const: gcc_axi_sf
- const: icp_ahb
- const: vfe0
- const: vfe0_axi
@@ -310,8 +310,8 @@ examples:
<&camcc CAM_CC_CSI3PHYTIMER_CLK>,
<&camcc CAM_CC_CSIPHY4_CLK>,
<&camcc CAM_CC_CSI4PHYTIMER_CLK>,
- <&gcc GCC_CAMERA_AHB_CLK>,
<&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&gcc GCC_CAMERA_SF_AXI_CLK>,
<&camcc CAM_CC_ICP_AHB_CLK>,
<&camcc CAM_CC_IFE_0_CLK>,
<&camcc CAM_CC_IFE_0_AXI_CLK>,
@@ -343,8 +343,8 @@ examples:
"csiphy3_timer",
"csiphy4",
"csiphy4_timer",
- "gcc_camera_ahb",
- "gcc_cam_hf_axi",
+ "gcc_axi_hf",
+ "gcc_axi_sf",
"icp_ahb",
"vfe0",
"vfe0_axi",
diff --git a/Documentation/devicetree/bindings/media/qcom,sdm670-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sdm670-camss.yaml
new file mode 100644
index 000000000000..35c40fe22376
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,sdm670-camss.yaml
@@ -0,0 +1,318 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,sdm670-camss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SDM670 Camera Subsystem (CAMSS)
+
+maintainers:
+ - Richard Acayan <mailingradian@gmail.com>
+
+description:
+ The CAMSS IP is a CSI decoder and ISP present on Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,sdm670-camss
+
+ reg:
+ maxItems: 9
+
+ reg-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: vfe0
+ - const: vfe1
+ - const: vfe_lite
+
+ interrupts:
+ maxItems: 9
+
+ interrupt-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: vfe0
+ - const: vfe1
+ - const: vfe_lite
+
+ clocks:
+ maxItems: 22
+
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: cpas_ahb
+ - const: csi0
+ - const: csi1
+ - const: csi2
+ - const: csiphy0
+ - const: csiphy0_timer
+ - const: csiphy1
+ - const: csiphy1_timer
+ - const: csiphy2
+ - const: csiphy2_timer
+ - const: gcc_camera_ahb
+ - const: gcc_camera_axi
+ - const: soc_ahb
+ - const: vfe0
+ - const: vfe0_axi
+ - const: vfe0_cphy_rx
+ - const: vfe1
+ - const: vfe1_axi
+ - const: vfe1_cphy_rx
+ - const: vfe_lite
+ - const: vfe_lite_cphy_rx
+
+ iommus:
+ maxItems: 4
+
+ power-domains:
+ items:
+ - description: IFE0 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE1 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: Titan Top GDSC - Titan ISP Block, Global Distributed Switch Controller.
+
+ power-domain-names:
+ items:
+ - const: ife0
+ - const: ife1
+ - const: top
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.8V regulator supply to PHY refclk pll block.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ description:
+ CSI input ports.
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY0.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY1.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data from CSIPHY2.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - iommus
+ - power-domains
+ - power-domain-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,camcc-sdm845.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isp@acb3000 {
+ compatible = "qcom,sdm670-camss";
+
+ reg = <0 0x0acb3000 0 0x1000>,
+ <0 0x0acba000 0 0x1000>,
+ <0 0x0acc8000 0 0x1000>,
+ <0 0x0ac65000 0 0x1000>,
+ <0 0x0ac66000 0 0x1000>,
+ <0 0x0ac67000 0 0x1000>,
+ <0 0x0acaf000 0 0x4000>,
+ <0 0x0acb6000 0 0x4000>,
+ <0 0x0acc4000 0 0x4000>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 466 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 468 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 465 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 467 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 469 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CSID_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&gcc GCC_CAMERA_AHB_CLK>,
+ <&gcc GCC_CAMERA_AXI_CLK>,
+ <&camcc CAM_CC_SOC_AHB_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_AXI_CLK>,
+ <&camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "csi0",
+ "csi1",
+ "csi2",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "gcc_camera_ahb",
+ "gcc_camera_axi",
+ "soc_ahb",
+ "vfe0",
+ "vfe0_axi",
+ "vfe0_cphy_rx",
+ "vfe1",
+ "vfe1_axi",
+ "vfe1_cphy_rx",
+ "vfe_lite",
+ "vfe_lite_cphy_rx";
+
+ iommus = <&apps_smmu 0x808 0x0>,
+ <&apps_smmu 0x810 0x8>,
+ <&apps_smmu 0xc08 0x0>,
+ <&apps_smmu 0xc10 0x8>;
+
+ power-domains = <&camcc IFE_0_GDSC>,
+ <&camcc IFE_1_GDSC>,
+ <&camcc TITAN_TOP_GDSC>;
+ power-domain-names = "ife0",
+ "ife1",
+ "top";
+
+ vdda-phy-supply = <&vreg_l1a_1p225>;
+ vdda-pll-supply = <&vreg_l8a_1p8>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ csiphy_ep0: endpoint {
+ clock-lanes = <7>;
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&front_sensor_ep>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8550-camss.yaml b/Documentation/devicetree/bindings/media/qcom,sm8550-camss.yaml
new file mode 100644
index 000000000000..cd34f14916b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,sm8550-camss.yaml
@@ -0,0 +1,597 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,sm8550-camss.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM8550 Camera Subsystem (CAMSS)
+
+maintainers:
+ - Depeng Shao <quic_depengs@quicinc.com>
+
+description:
+ The CAMSS IP is a CSI decoder and ISP present on Qualcomm platforms.
+
+properties:
+ compatible:
+ const: qcom,sm8550-camss
+
+ reg:
+ maxItems: 19
+
+ reg-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csid_wrapper
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy3
+ - const: csiphy4
+ - const: csiphy5
+ - const: csiphy6
+ - const: csiphy7
+ - const: vfe0
+ - const: vfe1
+ - const: vfe2
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ clocks:
+ maxItems: 36
+
+ clock-names:
+ items:
+ - const: camnoc_axi
+ - const: cpas_ahb
+ - const: cpas_fast_ahb_clk
+ - const: cpas_ife_lite
+ - const: cpas_vfe0
+ - const: cpas_vfe1
+ - const: cpas_vfe2
+ - const: csid
+ - const: csiphy0
+ - const: csiphy0_timer
+ - const: csiphy1
+ - const: csiphy1_timer
+ - const: csiphy2
+ - const: csiphy2_timer
+ - const: csiphy3
+ - const: csiphy3_timer
+ - const: csiphy4
+ - const: csiphy4_timer
+ - const: csiphy5
+ - const: csiphy5_timer
+ - const: csiphy6
+ - const: csiphy6_timer
+ - const: csiphy7
+ - const: csiphy7_timer
+ - const: csiphy_rx
+ - const: gcc_axi_hf
+ - const: vfe0
+ - const: vfe0_fast_ahb
+ - const: vfe1
+ - const: vfe1_fast_ahb
+ - const: vfe2
+ - const: vfe2_fast_ahb
+ - const: vfe_lite
+ - const: vfe_lite_ahb
+ - const: vfe_lite_cphy_rx
+ - const: vfe_lite_csid
+
+ interrupts:
+ maxItems: 18
+
+ interrupt-names:
+ items:
+ - const: csid0
+ - const: csid1
+ - const: csid2
+ - const: csid_lite0
+ - const: csid_lite1
+ - const: csiphy0
+ - const: csiphy1
+ - const: csiphy2
+ - const: csiphy3
+ - const: csiphy4
+ - const: csiphy5
+ - const: csiphy6
+ - const: csiphy7
+ - const: vfe0
+ - const: vfe1
+ - const: vfe2
+ - const: vfe_lite0
+ - const: vfe_lite1
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: ahb
+ - const: hf_0_mnoc
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ items:
+ - description: IFE0 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE1 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: IFE2 GDSC - Image Front End, Global Distributed Switch Controller.
+ - description: Titan GDSC - Titan ISP Block, Global Distributed Switch Controller.
+
+ power-domain-names:
+ items:
+ - const: ife0
+ - const: ife1
+ - const: ife2
+ - const: top
+
+ vdda-phy-supply:
+ description:
+ Phandle to a regulator supply to PHY core block.
+
+ vdda-pll-supply:
+ description:
+ Phandle to 1.2V regulator supply to PHY refclk pll block.
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ description:
+ CSI input ports.
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI0.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI1.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI2.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@3:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI3.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@4:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI4.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@5:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI5.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@6:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI6.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+ port@7:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port for receiving CSI data on CSI7.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maxItems: 1
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+
+ bus-type:
+ enum:
+ - 1 # MEDIA_BUS_TYPE_CSI2_CPHY
+ - 4 # MEDIA_BUS_TYPE_CSI2_DPHY
+
+ required:
+ - clock-lanes
+ - data-lanes
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - interconnects
+ - interconnect-names
+ - iommus
+ - power-domains
+ - power-domain-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,sm8550-camcc.h>
+ #include <dt-bindings/clock/qcom,sm8550-gcc.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ isp@acb7000 {
+ compatible = "qcom,sm8550-camss";
+
+ reg = <0 0x0acb7000 0 0xd00>,
+ <0 0x0acb9000 0 0xd00>,
+ <0 0x0acbb000 0 0xd00>,
+ <0 0x0acca000 0 0xa00>,
+ <0 0x0acce000 0 0xa00>,
+ <0 0x0acb6000 0 0x1000>,
+ <0 0x0ace4000 0 0x2000>,
+ <0 0x0ace6000 0 0x2000>,
+ <0 0x0ace8000 0 0x2000>,
+ <0 0x0acea000 0 0x2000>,
+ <0 0x0acec000 0 0x2000>,
+ <0 0x0acee000 0 0x2000>,
+ <0 0x0acf0000 0 0x2000>,
+ <0 0x0acf2000 0 0x2000>,
+ <0 0x0ac62000 0 0xf000>,
+ <0 0x0ac71000 0 0xf000>,
+ <0 0x0ac80000 0 0xf000>,
+ <0 0x0accb000 0 0x1800>,
+ <0 0x0accf000 0 0x1800>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csid_wrapper",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "csiphy5",
+ "csiphy6",
+ "csiphy7",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ clocks = <&camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&camcc CAM_CC_CPAS_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_FAST_AHB_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_LITE_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_0_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_1_CLK>,
+ <&camcc CAM_CC_CPAS_IFE_2_CLK>,
+ <&camcc CAM_CC_CSID_CLK>,
+ <&camcc CAM_CC_CSIPHY0_CLK>,
+ <&camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY1_CLK>,
+ <&camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY2_CLK>,
+ <&camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY3_CLK>,
+ <&camcc CAM_CC_CSI3PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY4_CLK>,
+ <&camcc CAM_CC_CSI4PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY5_CLK>,
+ <&camcc CAM_CC_CSI5PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY6_CLK>,
+ <&camcc CAM_CC_CSI6PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSIPHY7_CLK>,
+ <&camcc CAM_CC_CSI7PHYTIMER_CLK>,
+ <&camcc CAM_CC_CSID_CSIPHY_RX_CLK>,
+ <&gcc GCC_CAMERA_HF_AXI_CLK>,
+ <&camcc CAM_CC_IFE_0_CLK>,
+ <&camcc CAM_CC_IFE_0_FAST_AHB_CLK>,
+ <&camcc CAM_CC_IFE_1_CLK>,
+ <&camcc CAM_CC_IFE_1_FAST_AHB_CLK>,
+ <&camcc CAM_CC_IFE_2_CLK>,
+ <&camcc CAM_CC_IFE_2_FAST_AHB_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CLK>,
+ <&camcc CAM_CC_IFE_LITE_AHB_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+ <&camcc CAM_CC_IFE_LITE_CSID_CLK>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cpas_fast_ahb_clk",
+ "cpas_ife_lite",
+ "cpas_vfe0",
+ "cpas_vfe1",
+ "cpas_vfe2",
+ "csid",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy3",
+ "csiphy3_timer",
+ "csiphy4",
+ "csiphy4_timer",
+ "csiphy5",
+ "csiphy5_timer",
+ "csiphy6",
+ "csiphy6_timer",
+ "csiphy7",
+ "csiphy7_timer",
+ "csiphy_rx",
+ "gcc_axi_hf",
+ "vfe0",
+ "vfe0_fast_ahb",
+ "vfe1",
+ "vfe1_fast_ahb",
+ "vfe2",
+ "vfe2_fast_ahb",
+ "vfe_lite",
+ "vfe_lite_ahb",
+ "vfe_lite_cphy_rx",
+ "vfe_lite_csid";
+
+ interrupts = <GIC_SPI 601 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 603 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 431 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 605 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 477 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 478 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 479 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 448 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 122 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 278 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 602 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 604 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 688 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 606 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 377 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csid_lite0",
+ "csid_lite1",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "csiphy4",
+ "csiphy5",
+ "csiphy6",
+ "csiphy7",
+ "vfe0",
+ "vfe1",
+ "vfe2",
+ "vfe_lite0",
+ "vfe_lite1";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
+ &config_noc SLAVE_CAMERA_CFG QCOM_ICC_TAG_ACTIVE_ONLY>,
+ <&mmss_noc MASTER_CAMNOC_HF QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "ahb",
+ "hf_0_mnoc";
+
+ iommus = <&apps_smmu 0x800 0x20>;
+
+ power-domains = <&camcc CAM_CC_IFE_0_GDSC>,
+ <&camcc CAM_CC_IFE_1_GDSC>,
+ <&camcc CAM_CC_IFE_2_GDSC>,
+ <&camcc CAM_CC_TITAN_TOP_GDSC>;
+ power-domain-names = "ife0",
+ "ife1",
+ "ife2",
+ "top";
+
+ vdda-phy-supply = <&vreg_l1e_0p88>;
+ vdda-pll-supply = <&vreg_l3e_1p2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csiphy_ep0: endpoint@0 {
+ reg = <0>;
+ clock-lanes = <7>;
+ data-lanes = <0 1>;
+ remote-endpoint = <&sensor_ep>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml b/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml
new file mode 100644
index 000000000000..e424ea84c211
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,sm8550-iris.yaml
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/qcom,sm8550-iris.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm iris video encode and decode accelerators
+
+maintainers:
+ - Vikash Garodia <quic_vgarodia@quicinc.com>
+ - Dikshita Agarwal <quic_dikshita@quicinc.com>
+
+description:
+ The iris video processing unit is a video encode and decode accelerator
+ present on Qualcomm platforms.
+
+allOf:
+ - $ref: qcom,venus-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8550-iris
+
+ power-domains:
+ maxItems: 4
+
+ power-domain-names:
+ items:
+ - const: venus
+ - const: vcodec0
+ - const: mxc
+ - const: mmcx
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: iface
+ - const: core
+ - const: vcodec0_core
+
+ interconnects:
+ maxItems: 2
+
+ interconnect-names:
+ items:
+ - const: cpu-cfg
+ - const: video-mem
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: bus
+
+ iommus:
+ maxItems: 2
+
+ dma-coherent: true
+
+ operating-points-v2: true
+
+ opp-table:
+ type: object
+
+required:
+ - compatible
+ - power-domain-names
+ - interconnects
+ - interconnect-names
+ - resets
+ - reset-names
+ - iommus
+ - dma-coherent
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/clock/qcom,sm8550-gcc.h>
+ #include <dt-bindings/clock/qcom,sm8450-videocc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interconnect/qcom,icc.h>
+ #include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
+ #include <dt-bindings/power/qcom-rpmpd.h>
+ #include <dt-bindings/power/qcom,rpmhpd.h>
+
+ video-codec@aa00000 {
+ compatible = "qcom,sm8550-iris";
+ reg = <0x0aa00000 0xf0000>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+
+ power-domains = <&videocc VIDEO_CC_MVS0C_GDSC>,
+ <&videocc VIDEO_CC_MVS0_GDSC>,
+ <&rpmhpd RPMHPD_MXC>,
+ <&rpmhpd RPMHPD_MMCX>;
+ power-domain-names = "venus", "vcodec0", "mxc", "mmcx";
+
+ clocks = <&gcc GCC_VIDEO_AXI0_CLK>,
+ <&videocc VIDEO_CC_MVS0C_CLK>,
+ <&videocc VIDEO_CC_MVS0_CLK>;
+ clock-names = "iface", "core", "vcodec0_core";
+
+ interconnects = <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_VENUS_CFG QCOM_ICC_TAG_ALWAYS>,
+ <&mmss_noc MASTER_VIDEO QCOM_ICC_TAG_ALWAYS
+ &mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "cpu-cfg", "video-mem";
+
+ memory-region = <&video_mem>;
+
+ resets = <&gcc GCC_VIDEO_AXI0_CLK_ARES>;
+ reset-names = "bus";
+
+ iommus = <&apps_smmu 0x1940 0x0000>,
+ <&apps_smmu 0x1947 0x0000>;
+ dma-coherent;
+
+ operating-points-v2 = <&iris_opp_table>;
+
+ iris_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-240000000 {
+ opp-hz = /bits/ 64 <240000000>;
+ required-opps = <&rpmhpd_opp_svs>,
+ <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-338000000 {
+ opp-hz = /bits/ 64 <338000000>;
+ required-opps = <&rpmhpd_opp_svs>,
+ <&rpmhpd_opp_svs>;
+ };
+
+ opp-366000000 {
+ opp-hz = /bits/ 64 <366000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>,
+ <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-444000000 {
+ opp-hz = /bits/ 64 <444000000>;
+ required-opps = <&rpmhpd_opp_turbo>,
+ <&rpmhpd_opp_turbo>;
+ };
+
+ opp-533333334 {
+ opp-hz = /bits/ 64 <533333334>;
+ required-opps = <&rpmhpd_opp_turbo_l1>,
+ <&rpmhpd_opp_turbo_l1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/snps,dw-hdmi-rx.yaml b/Documentation/devicetree/bindings/media/snps,dw-hdmi-rx.yaml
new file mode 100644
index 000000000000..510e94e9ca3a
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/snps,dw-hdmi-rx.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Device Tree bindings for Synopsys DesignWare HDMI RX Controller
+
+---
+$id: http://devicetree.org/schemas/media/snps,dw-hdmi-rx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Synopsys DesignWare HDMI RX Controller
+
+maintainers:
+ - Shreeya Patel <shreeya.patel@collabora.com>
+
+description:
+ Synopsys DesignWare HDMI Input Controller preset on RK3588 SoCs
+ allowing devices to receive and decode high-resolution video streams
+ from external sources like media players, cameras, laptops, etc.
+
+properties:
+ compatible:
+ items:
+ - const: rockchip,rk3588-hdmirx-ctrler
+ - const: snps,dw-hdmi-rx
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 3
+
+ interrupt-names:
+ items:
+ - const: cec
+ - const: hdmi
+ - const: dma
+
+ clocks:
+ maxItems: 7
+
+ clock-names:
+ items:
+ - const: aclk
+ - const: audio
+ - const: cr_para
+ - const: pclk
+ - const: ref
+ - const: hclk_s_hdmirx
+ - const: hclk_vo1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 4
+
+ reset-names:
+ items:
+ - const: axi
+ - const: apb
+ - const: ref
+ - const: biu
+
+ memory-region:
+ maxItems: 1
+
+ hpd-gpios:
+ description: GPIO specifier for HPD.
+ maxItems: 1
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of the syscon node for the general register file
+ containing HDMIRX PHY status bits.
+
+ rockchip,vo1-grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle of the syscon node for the Video Output GRF register
+ to enable EDID transfer through SDAIN and SCLIN.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-names
+ - clocks
+ - clock-names
+ - power-domains
+ - resets
+ - pinctrl-0
+ - hpd-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/power/rk3588-power.h>
+ #include <dt-bindings/reset/rockchip,rk3588-cru.h>
+ hdmi_receiver: hdmi-receiver@fdee0000 {
+ compatible = "rockchip,rk3588-hdmirx-ctrler", "snps,dw-hdmi-rx";
+ reg = <0xfdee0000 0x6000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "cec", "hdmi", "dma";
+ clocks = <&cru ACLK_HDMIRX>,
+ <&cru CLK_HDMIRX_AUD>,
+ <&cru CLK_CR_PARA>,
+ <&cru PCLK_HDMIRX>,
+ <&cru CLK_HDMIRX_REF>,
+ <&cru PCLK_S_HDMIRX>,
+ <&cru HCLK_VO1>;
+ clock-names = "aclk",
+ "audio",
+ "cr_para",
+ "pclk",
+ "ref",
+ "hclk_s_hdmirx",
+ "hclk_vo1";
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_A_HDMIRX>, <&cru SRST_P_HDMIRX>,
+ <&cru SRST_HDMIRX_REF>, <&cru SRST_A_HDMIRX_BIU>;
+ reset-names = "axi", "apb", "ref", "biu";
+ memory-region = <&hdmi_receiver_cma>;
+ pinctrl-0 = <&hdmim1_rx_cec &hdmim1_rx_hpdin &hdmim1_rx_scl &hdmim1_rx_sda &hdmirx_5v_detection>;
+ pinctrl-names = "default";
+ hpd-gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml b/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
index 33bedfe41924..e9fa3cfea5d2 100644
--- a/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
+++ b/Documentation/devicetree/bindings/media/st,stm32mp25-csi.yaml
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics STM32 CSI controller
description:
- The STM32 CSI controller allows connecting a CSI based
- camera to the DCMIPP camera pipeline.
+ The STM32 CSI controller, coupled with a D-PHY allows connecting a CSI-2
+ based camera to the DCMIPP camera pipeline.
maintainers:
- Alain Volmat <alain.volmat@foss.st.com>
@@ -109,7 +109,6 @@ examples:
endpoint {
remote-endpoint = <&imx335_ep>;
data-lanes = <1 2>;
- bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
};
};
diff --git a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
index 190230216de8..f00827c9b67f 100644
--- a/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
+++ b/Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
@@ -31,6 +31,10 @@ node must be named "audio-codec".
Required properties for the audio-codec subnode:
- #sound-dai-cells = <1>;
+- interrupts : should contain jack detection interrupts, with headset
+ detect interrupt matching "hs" and microphone bias 2
+ detect interrupt matching "mb2" in interrupt-names.
+- interrupt-names : Contains "hs", "mb2"
The audio-codec provides two DAIs. The first one is connected to the
Stereo HiFi DAC and the second one is connected to the Voice DAC.
@@ -52,6 +56,8 @@ Example:
audio-codec {
#sound-dai-cells = <1>;
+ interrupts-extended = <&cpcap 9 0>, <&cpcap 10 0>;
+ interrupt-names = "hs", "mb2";
/* HiFi */
port@0 {
diff --git a/Documentation/devicetree/bindings/mfd/syscon.yaml b/Documentation/devicetree/bindings/mfd/syscon.yaml
index 4d67ff26d445..2836e4793afc 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.yaml
+++ b/Documentation/devicetree/bindings/mfd/syscon.yaml
@@ -103,6 +103,7 @@ select:
- rockchip,rk3288-qos
- rockchip,rk3368-qos
- rockchip,rk3399-qos
+ - rockchip,rk3528-qos
- rockchip,rk3562-qos
- rockchip,rk3568-qos
- rockchip,rk3576-qos
@@ -202,6 +203,7 @@ properties:
- rockchip,rk3288-qos
- rockchip,rk3368-qos
- rockchip,rk3399-qos
+ - rockchip,rk3528-qos
- rockchip,rk3562-qos
- rockchip,rk3568-qos
- rockchip,rk3576-qos
diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
deleted file mode 100644
index f9fb412642fe..000000000000
--- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Atmel SSC driver.
-
-Required properties:
-- compatible: "atmel,at91rm9200-ssc" or "atmel,at91sam9g45-ssc"
- - atmel,at91rm9200-ssc: support pdc transfer
- - atmel,at91sam9g45-ssc: support dma transfer
-- reg: Should contain SSC registers location and length
-- interrupts: Should contain SSC interrupt
-- clock-names: tuple listing input clock names.
- Required elements: "pclk"
-- clocks: phandles to input clocks.
-
-
-Required properties for devices compatible with "atmel,at91sam9g45-ssc":
-- dmas: DMA specifier, consisting of a phandle to DMA controller node,
- the memory interface and SSC DMA channel ID (for tx and rx).
- See Documentation/devicetree/bindings/dma/atmel-dma.txt for details.
-- dma-names: Must be "tx", "rx".
-
-Optional properties:
- - atmel,clk-from-rk-pin: bool property.
- - When SSC works in slave mode, according to the hardware design, the
- clock can get from TK pin, and also can get from RK pin. So, add
- this parameter to choose where the clock from.
- - By default the clock is from TK pin, if the clock from RK pin, this
- property is needed.
- - #sound-dai-cells: Should contain <0>.
- - This property makes the SSC into an automatically registered DAI.
-
-Examples:
-- PDC transfer:
-ssc0: ssc@fffbc000 {
- compatible = "atmel,at91rm9200-ssc";
- reg = <0xfffbc000 0x4000>;
- interrupts = <14 4 5>;
- clocks = <&ssc0_clk>;
- clock-names = "pclk";
-};
-
-- DMA transfer:
-ssc0: ssc@f0010000 {
- compatible = "atmel,at91sam9g45-ssc";
- reg = <0xf0010000 0x4000>;
- interrupts = <28 4 5>;
- dmas = <&dma0 1 13>,
- <&dma0 1 14>;
- dma-names = "tx", "rx";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
-};
diff --git a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
index 0ccd632d5620..9f3b1edacaa0 100644
--- a/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
+++ b/Documentation/devicetree/bindings/mmc/allwinner,sun4i-a10-mmc.yaml
@@ -30,38 +30,34 @@ properties:
- const: allwinner,sun50i-a100-emmc
- const: allwinner,sun50i-a100-mmc
- items:
- - const: allwinner,sun8i-a83t-mmc
+ - enum:
+ - allwinner,sun8i-a83t-mmc
+ - allwinner,suniv-f1c100s-mmc
- const: allwinner,sun7i-a20-mmc
- items:
- - const: allwinner,sun8i-r40-emmc
+ - enum:
+ - allwinner,sun8i-r40-emmc
+ - allwinner,sun50i-h5-emmc
+ - allwinner,sun50i-h6-emmc
- const: allwinner,sun50i-a64-emmc
- items:
- - const: allwinner,sun8i-r40-mmc
+ - enum:
+ - allwinner,sun8i-r40-mmc
+ - allwinner,sun50i-h5-mmc
+ - allwinner,sun50i-h6-mmc
- const: allwinner,sun50i-a64-mmc
- items:
- - const: allwinner,sun50i-h5-emmc
- - const: allwinner,sun50i-a64-emmc
- - items:
- - const: allwinner,sun50i-h5-mmc
- - const: allwinner,sun50i-a64-mmc
- - items:
- - const: allwinner,sun50i-h6-emmc
- - const: allwinner,sun50i-a64-emmc
- - items:
- - const: allwinner,sun50i-h6-mmc
- - const: allwinner,sun50i-a64-mmc
- - items:
- - const: allwinner,sun20i-d1-emmc
- - const: allwinner,sun50i-a100-emmc
- - items:
- - const: allwinner,sun50i-h616-emmc
+ - enum:
+ - allwinner,sun20i-d1-emmc
+ - allwinner,sun50i-h616-emmc
+ - allwinner,sun55i-a523-emmc
- const: allwinner,sun50i-a100-emmc
- items:
- const: allwinner,sun50i-h616-mmc
- const: allwinner,sun50i-a100-mmc
- items:
- - const: allwinner,suniv-f1c100s-mmc
- - const: allwinner,sun7i-a20-mmc
+ - const: allwinner,sun55i-a523-mmc
+ - const: allwinner,sun20i-d1-mmc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.yaml b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.yaml
index 022682a977c6..0d4d9ca6a8d9 100644
--- a/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.yaml
+++ b/Documentation/devicetree/bindings/mmc/amlogic,meson-mx-sdio.yaml
@@ -60,6 +60,9 @@ patternProperties:
bus-width:
enum: [1, 4]
+ required:
+ - compatible
+
unevaluatedProperties: false
required:
diff --git a/Documentation/devicetree/bindings/mmc/atmel,hsmci.yaml b/Documentation/devicetree/bindings/mmc/atmel,hsmci.yaml
new file mode 100644
index 000000000000..151b414b9d27
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/atmel,hsmci.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mmc/atmel,hsmci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel High-Speed MultiMedia Card Interface (HSMCI)
+
+description:
+ The Atmel HSMCI controller provides an interface for MMC, SD, and SDIO memory
+ cards.
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Aubin Constans <aubin.constans@microchip.com>
+
+allOf:
+ - $ref: mmc-controller.yaml
+
+properties:
+ compatible:
+ const: atmel,hsmci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rxtx
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: mci_clk
+
+ "#address-cells":
+ const: 1
+ description: Used for slot IDs.
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "slot@[0-2]$":
+ $ref: mmc-slot.yaml
+ description: A slot node representing an MMC, SD, or SDIO slot.
+
+ properties:
+ reg:
+ enum: [0, 1]
+
+ required:
+ - reg
+ - bus-width
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - "#address-cells"
+ - "#size-cells"
+
+anyOf:
+ - required:
+ - slot@0
+ - required:
+ - slot@1
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/clock/at91.h>
+ mmc@f0008000 {
+ compatible = "atmel,hsmci";
+ reg = <0xf0008000 0x600>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mci0_clk>;
+ clock-names = "mci_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioD 15 0>;
+ cd-inverted;
+ };
+
+ slot@1 {
+ reg = <1>;
+ bus-width = <4>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
deleted file mode 100644
index 07ad02075a93..000000000000
--- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-* Atmel High Speed MultiMedia Card Interface
-
-This controller on atmel products provides an interface for MMC, SD and SDIO
-types of memory cards.
-
-This file documents differences between the core properties described
-by mmc.txt and the properties used by the atmel-mci driver.
-
-1) MCI node
-
-Required properties:
-- compatible: should be "atmel,hsmci"
-- #address-cells: should be one. The cell is the slot id.
-- #size-cells: should be zero.
-- at least one slot node
-- clock-names: tuple listing input clock names.
- Required elements: "mci_clk"
-- clocks: phandles to input clocks.
-
-The node contains child nodes for each slot that the platform uses
-
-Example MCI node:
-
-mmc0: mmc@f0008000 {
- compatible = "atmel,hsmci";
- reg = <0xf0008000 0x600>;
- interrupts = <12 4>;
- #address-cells = <1>;
- #size-cells = <0>;
- clock-names = "mci_clk";
- clocks = <&mci0_clk>;
-
- [ child node definitions...]
-};
-
-2) slot nodes
-
-Required properties:
-- reg: should contain the slot id.
-- bus-width: number of data lines connected to the controller
-
-Optional properties:
-- cd-gpios: specify GPIOs for card detection
-- cd-inverted: invert the value of external card detect gpio line
-- wp-gpios: specify GPIOs for write protection
-
-Example slot node:
-
-slot@0 {
- reg = <0>;
- bus-width = <4>;
- cd-gpios = <&pioD 15 0>
- cd-inverted;
-};
-
-Example full MCI node:
-mmc0: mmc@f0008000 {
- compatible = "atmel,hsmci";
- reg = <0xf0008000 0x600>;
- interrupts = <12 4>;
- #address-cells = <1>;
- #size-cells = <0>;
- slot@0 {
- reg = <0>;
- bus-width = <4>;
- cd-gpios = <&pioD 15 0>
- cd-inverted;
- };
- slot@1 {
- reg = <1>;
- bus-width = <4>;
- };
-};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
index b9b999570529..b98a84f93277 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.yaml
@@ -57,6 +57,7 @@ properties:
- fsl,imx8mp-usdhc
- fsl,imx8ulp-usdhc
- fsl,imx93-usdhc
+ - fsl,imx94-usdhc
- fsl,imx95-usdhc
- const: fsl,imx8mm-usdhc
- items:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 9d7a1298c455..26e4f0f8dc1c 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -24,7 +24,7 @@ properties:
$nodename:
pattern: "^mmc(@.*)?$"
-unevaluatedProperties: true
+additionalProperties: true
examples:
- |
diff --git a/Documentation/devicetree/bindings/mmc/mmc-slot.yaml b/Documentation/devicetree/bindings/mmc/mmc-slot.yaml
index 1f0667828063..ca3d0114bfc6 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-slot.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-slot.yaml
@@ -29,7 +29,6 @@ properties:
maxItems: 1
required:
- - compatible
- reg
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
index af378b9ff3f4..773baa6c2656 100644
--- a/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
+++ b/Documentation/devicetree/bindings/mmc/renesas,sdhi.yaml
@@ -68,6 +68,9 @@ properties:
- renesas,sdhi-r9a08g045 # RZ/G3S
- renesas,sdhi-r9a09g011 # RZ/V2M
- const: renesas,rzg2l-sdhi
+ - items:
+ - const: renesas,sdhi-r9a09g047 # RZ/G3E
+ - const: renesas,sdhi-r9a09g057 # RZ/V2H(P)
reg:
maxItems: 1
@@ -211,6 +214,19 @@ allOf:
sectioned off to be run by a separate second clock source to allow
the main core clock to be turned off to save power.
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,sdhi-r9a09g057
+ then:
+ properties:
+ vqmmc-regulator:
+ type: object
+ description: VQMMC SD regulator
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
index 06df1269f247..bf273115235b 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.yaml
@@ -38,6 +38,8 @@ properties:
- rockchip,rk3328-dw-mshc
- rockchip,rk3368-dw-mshc
- rockchip,rk3399-dw-mshc
+ - rockchip,rk3528-dw-mshc
+ - rockchip,rk3562-dw-mshc
- rockchip,rk3568-dw-mshc
- rockchip,rk3588-dw-mshc
- rockchip,rv1108-dw-mshc
diff --git a/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml b/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml
index ef2d1d7c92fc..e8bd49d46794 100644
--- a/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml
+++ b/Documentation/devicetree/bindings/mmc/samsung,exynos-dw-mshc.yaml
@@ -24,6 +24,8 @@ properties:
- samsung,exynos5420-dw-mshc-smu
- samsung,exynos7-dw-mshc
- samsung,exynos7-dw-mshc-smu
+ - samsung,exynos7870-dw-mshc
+ - samsung,exynos7870-dw-mshc-smu
- items:
- enum:
- samsung,exynos5433-dw-mshc-smu
diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
index c3d5e0230af1..e6e604072d3c 100644
--- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
@@ -14,7 +14,10 @@ properties:
compatible:
oneOf:
- items:
- - const: rockchip,rk3576-dwcmshc
+ - enum:
+ - rockchip,rk3528-dwcmshc
+ - rockchip,rk3562-dwcmshc
+ - rockchip,rk3576-dwcmshc
- const: rockchip,rk3588-dwcmshc
- enum:
- rockchip,rk3568-dwcmshc
diff --git a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
index 15b63bbb82a2..b90d3b48c2f2 100644
--- a/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
@@ -42,7 +42,7 @@ required:
- clock-names
- interrupts
-unevaluatedProperties: true
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/mtd/atmel,dataflash.yaml b/Documentation/devicetree/bindings/mtd/atmel,dataflash.yaml
new file mode 100644
index 000000000000..8c72fa346e36
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/atmel,dataflash.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/atmel,dataflash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel DataFlash
+
+maintainers:
+ - Nayab Sayed <nayabbasha.sayed@microchip.com>
+
+description:
+ The Atmel DataFlash is a low pin-count serial interface sequential access
+ Flash memory, compatible with SPI standard. The device tree may optionally
+ contain sub-nodes describing partitions of the address space.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - atmel,at45db321d
+ - atmel,at45db041e
+ - atmel,at45db642d
+ - atmel,at45db021d
+ - const: atmel,at45
+ - const: atmel,dataflash
+ - items:
+ - const: atmel,at45
+ - const: atmel,dataflash
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: mtd.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@1 {
+ compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+ reg = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
deleted file mode 100644
index 1889a4db5b7c..000000000000
--- a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-* Atmel Data Flash
-
-Required properties:
-- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash".
-
-The device tree may optionally contain sub-nodes describing partitions of the
-address space. See partition.txt for more detail.
-
-Example:
-
-flash@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
- spi-max-frequency = <25000000>;
- reg = <1>;
-};
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
index f9eb1868ca1f..0badb2e978c7 100644
--- a/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.yaml
@@ -29,7 +29,14 @@ properties:
- enum:
- fsl,imx8mm-gpmi-nand
- fsl,imx8mn-gpmi-nand
+ - fsl,imx8mp-gpmi-nand
+ - fsl,imx8mq-gpmi-nand
- const: fsl,imx7d-gpmi-nand
+ - items:
+ - enum:
+ - fsl,imx8dxl-gpmi-nand
+ - fsl,imx8qm-gpmi-nand
+ - const: fsl,imx8qxp-gpmi-nand
reg:
items:
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
index 18f6733408b4..1b375dee83b0 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.yaml
@@ -122,6 +122,8 @@ properties:
'#size-cells':
const: 1
+ ranges: true
+
big-endian: true
little-endian: true
@@ -143,8 +145,7 @@ then:
required:
- syscon
-# FIXME: A parent bus may define timing properties
-additionalProperties: true
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
index cf4198e43d7f..bd8f7b683953 100644
--- a/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/mxc-nand.yaml
@@ -14,8 +14,12 @@ allOf:
properties:
compatible:
- const: fsl,imx27-nand
-
+ oneOf:
+ - const: fsl,imx27-nand
+ - items:
+ - enum:
+ - fsl,imx31-nand
+ - const: fsl,imx27-nand
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
index c578637c5826..0fdd11265417 100644
--- a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
+++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
@@ -63,6 +63,14 @@ properties:
"#size-cells":
const: 0
+ airoha,npu:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the node used to configure the NPU module.
+ The Airoha Network Processor Unit (NPU) provides a configuration
+ interface to implement hardware flow offloading programming Packet
+ Processor Engine (PPE) flow table.
+
patternProperties:
"^ethernet@[1-4]$":
type: object
@@ -132,6 +140,8 @@ examples:
<GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ airoha,npu = <&npu>;
+
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml
new file mode 100644
index 000000000000..76dd97c3fb40
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/airoha,en7581-npu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha Network Processor Unit for EN7581 SoC
+
+maintainers:
+ - Lorenzo Bianconi <lorenzo@kernel.org>
+
+description:
+ The Airoha Network Processor Unit (NPU) provides a configuration interface
+ to implement wired and wireless hardware flow offloading programming Packet
+ Processor Engine (PPE) flow table.
+
+properties:
+ compatible:
+ enum:
+ - airoha,en7581-npu
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: mbox host irq line
+ - description: watchdog0 irq line
+ - description: watchdog1 irq line
+ - description: watchdog2 irq line
+ - description: watchdog3 irq line
+ - description: watchdog4 irq line
+ - description: watchdog5 irq line
+ - description: watchdog6 irq line
+ - description: watchdog7 irq line
+ - description: wlan irq line0
+ - description: wlan irq line1
+ - description: wlan irq line2
+ - description: wlan irq line3
+ - description: wlan irq line4
+ - description: wlan irq line5
+
+ memory-region:
+ maxItems: 1
+ description:
+ Memory used to store NPU firmware binary.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - memory-region
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ npu@1e900000 {
+ compatible = "airoha,en7581-npu";
+ reg = <0 0x1e900000 0 0x313000>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ memory-region = <&npu_binary>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index 798a4c19f18c..0cd78d71768c 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -152,6 +152,12 @@ properties:
The second range is is for the Amlogic specific configuration
(for example the PRG_ETHERNET register range on Meson8b and newer)
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: macirq
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
index 0a2d7baf5db3..d02e9dd847ef 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml
@@ -17,6 +17,9 @@ description:
maintainers:
- Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>
+allOf:
+ - $ref: bluetooth-controller.yaml#
+
properties:
compatible:
enum:
@@ -40,10 +43,20 @@ properties:
Host-To-Chip power save mechanism is driven by this GPIO
connected to BT_WAKE_IN pin of the NXP chipset.
+ nxp,wakein-pin:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description:
+ The GPIO number of the NXP chipset used for BT_WAKE_IN.
+
+ nxp,wakeout-pin:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description:
+ The GPIO number of the NXP chipset used for BT_WAKE_OUT.
+
required:
- compatible
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -54,5 +67,8 @@ examples:
fw-init-baudrate = <3000000>;
firmware-name = "uartuart8987_bt_v0.bin";
device-wakeup-gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ nxp,wakein-pin = /bits/ 8 <18>;
+ nxp,wakeout-pin = /bits/ 8 <19>;
+ local-bd-address = [66 55 44 33 22 11];
};
};
diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
index a72152f7e29b..6353a336f382 100644
--- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
+++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml
@@ -19,6 +19,7 @@ properties:
- qcom,qca2066-bt
- qcom,qca6174-bt
- qcom,qca9377-bt
+ - qcom,wcn3950-bt
- qcom,wcn3988-bt
- qcom,wcn3990-bt
- qcom,wcn3991-bt
@@ -138,6 +139,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,wcn3950-bt
- qcom,wcn3988-bt
- qcom,wcn3990-bt
- qcom,wcn3991-bt
diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
index 97dd1a7c5ed2..f81d56f7c12a 100644
--- a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
+++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
@@ -10,9 +10,6 @@ title:
maintainers:
- Marc Kleine-Budde <mkl@pengutronix.de>
-allOf:
- - $ref: can-controller.yaml#
-
properties:
compatible:
oneOf:
@@ -28,6 +25,7 @@ properties:
- fsl,vf610-flexcan
- fsl,ls1021ar2-flexcan
- fsl,lx2160ar1-flexcan
+ - nxp,s32g2-flexcan
- items:
- enum:
- fsl,imx53-flexcan
@@ -43,12 +41,25 @@ properties:
- enum:
- fsl,ls1028ar1-flexcan
- const: fsl,lx2160ar1-flexcan
+ - items:
+ - enum:
+ - nxp,s32g3-flexcan
+ - const: nxp,s32g2-flexcan
+ - items:
+ - enum:
+ - fsl,imx94-flexcan
+ - const: fsl,imx95-flexcan
reg:
maxItems: 1
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 4
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 4
clocks:
maxItems: 2
@@ -70,6 +81,9 @@ properties:
xceiver-supply:
description: Regulator that powers the CAN transceiver.
+ phys:
+ maxItems: 1
+
big-endian:
$ref: /schemas/types.yaml#/definitions/flag
description: |
@@ -136,6 +150,41 @@ required:
- reg
- interrupts
+allOf:
+ - $ref: can-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nxp,s32g2-flexcan
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Message Buffer interrupt for mailboxes 0-7 and Enhanced RX FIFO
+ - description: Device state change
+ - description: Bus Error detection
+ - description: Message Buffer interrupt for mailboxes 8-127
+ interrupt-names:
+ items:
+ - const: mb-0
+ - const: state
+ - const: berr
+ - const: mb-1
+ required:
+ - interrupt-names
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names: false
+ - if:
+ required:
+ - xceiver-supply
+ then:
+ properties:
+ phys: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
index 2a98b26630cb..c155c9c6db39 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
@@ -40,7 +40,7 @@ properties:
microchip,rx-int-gpios:
description:
- GPIO phandle of GPIO connected to to INT1 pin of the MCP251XFD, which
+ GPIO phandle of GPIO connected to INT1 pin of the MCP251XFD, which
signals a pending RX interrupt.
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
index 7c5ac5d2e880..f6884f6e59e7 100644
--- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
+++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml
@@ -170,7 +170,7 @@ allOf:
const: renesas,r8a779h0-canfd
then:
patternProperties:
- "^channel[5-7]$": false
+ "^channel[4-7]$": false
else:
if:
not:
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
index 3c30dd23cd4e..8d69846b2e09 100644
--- a/Documentation/devicetree/bindings/net/cdns,macb.yaml
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -197,7 +197,6 @@ examples:
};
- |
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
#include <dt-bindings/phy/phy.h>
@@ -210,9 +209,9 @@ examples:
interrupt-parent = <&gic>;
interrupts = <0 59 4>, <0 59 4>;
reg = <0x0 0xff0c0000 0x0 0x1000>;
- clocks = <&zynqmp_clk LPD_LSBUS>, <&zynqmp_clk GEM1_REF>,
- <&zynqmp_clk GEM1_TX>, <&zynqmp_clk GEM1_RX>,
- <&zynqmp_clk GEM_TSU>;
+ clocks = <&zynqmp_clk 31>, <&zynqmp_clk 105>,
+ <&zynqmp_clk 51>, <&zynqmp_clk 50>,
+ <&zynqmp_clk 44>;
clock-names = "pclk", "hclk", "tx_clk", "rx_clk", "tsu_clk";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
index 4c78c546343f..d6c957a33b48 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
oneOf:
- const: brcm,bcm5325
+ - const: brcm,bcm53101
- const: brcm,bcm53115
- const: brcm,bcm53125
- const: brcm,bcm53128
@@ -77,6 +78,7 @@ allOf:
contains:
enum:
- brcm,bcm5325
+ - brcm,bcm53101
- brcm,bcm53115
- brcm,bcm53125
- brcm,bcm53128
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index 2c71454ae8e3..824bbe4333b7 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -232,6 +232,12 @@ properties:
PHY's that have configurable TX internal delays. If this property is
present then the PHY applies the TX delay.
+ tx-amplitude-100base-tx-percent:
+ description:
+ Transmit amplitude gain applied for 100BASE-TX. 100% matches 2V
+ peak-to-peak specified in ANSI X3.263. When omitted, the PHYs default
+ will be left as is.
+
leds:
type: object
diff --git a/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml
new file mode 100644
index 000000000000..03c819bc701b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,gianfar-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Gianfar (TSEC) MDIO Device
+
+description:
+ This binding describes the MDIO is a bus to which the PHY devices are
+ connected. For each device that exists on this bus, a child node should be
+ created.
+
+ Some TSECs are associated with an internal Ten-Bit Interface (TBI) PHY. This
+ PHY is accessed through the local MDIO bus. These buses are defined similarly
+ to the mdio buses, except they are compatible with "fsl,gianfar-tbi". The TBI
+ PHYs underneath them are similar to normal PHYs, but the reg property is
+ considered instructive, rather than descriptive. The reg property should be
+ chosen so it doesn't interfere with other PHYs on the bus.
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+# This is needed to distinguish gianfar.yaml and gianfar-mdio.yaml, because
+# both use compatible = "gianfar" (with different device_type values)
+select:
+ oneOf:
+ - properties:
+ compatible:
+ contains:
+ const: gianfar
+ device_type:
+ const: mdio
+ required:
+ - device_type
+
+ - properties:
+ compatible:
+ contains:
+ enum:
+ - fsl,gianfar-tbi
+ - fsl,gianfar-mdio
+ - fsl,etsec2-tbi
+ - fsl,etsec2-mdio
+ - fsl,ucc-mdio
+ - ucc_geth_phy
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ enum:
+ - fsl,gianfar-tbi
+ - fsl,gianfar-mdio
+ - fsl,etsec2-tbi
+ - fsl,etsec2-mdio
+ - fsl,ucc-mdio
+ - gianfar
+ - ucc_geth_phy
+
+ reg:
+ minItems: 1
+ items:
+ - description:
+ Offset and length of the register set for the device
+
+ - description:
+ Optionally, the offset and length of the TBIPA register (TBI PHY
+ address register). If TBIPA register is not specified, the driver
+ will attempt to infer it from the register set specified (your
+ mileage may vary).
+
+ device_type:
+ const: mdio
+
+required:
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+allOf:
+ - $ref: mdio.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ucc_geth_phy
+ then:
+ required:
+ - device_type
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mdio@24520 {
+ reg = <0x24520 0x20>;
+ compatible = "fsl,gianfar-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl,gianfar.yaml b/Documentation/devicetree/bindings/net/fsl,gianfar.yaml
new file mode 100644
index 000000000000..f92f284aa05b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,gianfar.yaml
@@ -0,0 +1,248 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,gianfar.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale Three-Speed Ethernet Controller (TSEC), "Gianfar"
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+# This is needed to distinguish gianfar.yaml and gianfar-mdio.yaml, because
+# both use compatible = "gianfar" (with different device_type values)
+select:
+ oneOf:
+ - properties:
+ compatible:
+ contains:
+ const: gianfar
+ device_type:
+ const: network
+ required:
+ - device_type
+
+ - properties:
+ compatible:
+ const: fsl,etsec2
+
+ required:
+ - compatible
+
+properties:
+ compatible:
+ enum:
+ - gianfar
+ - fsl,etsec2
+
+ device_type:
+ const: network
+
+ model:
+ enum:
+ - FEC
+ - TSEC
+ - eTSEC
+
+ reg:
+ maxItems: 1
+
+ ranges: true
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: Transmit interrupt or single combined interrupt
+ - description: Receive interrupt
+ - description: Error interrupt
+
+ dma-coherent: true
+
+ fsl,magic-packet:
+ type: boolean
+ description:
+ If present, indicates that the hardware supports waking up via magic packet.
+
+ fsl,wake-on-filer:
+ type: boolean
+ description:
+ If present, indicates that the hardware supports waking up by Filer
+ General Purpose Interrupt (FGPI) asserted on the Rx int line. This is
+ an advanced power management capability allowing certain packet types
+ (user) defined by filer rules to wake up the system.
+
+ bd-stash:
+ type: boolean
+ description:
+ If present, indicates that the hardware supports stashing buffer
+ descriptors in the L2.
+
+ rx-stash-len:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Denotes the number of bytes of a received buffer to stash in the L2.
+
+ rx-stash-idx:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Denotes the index of the first byte from the received buffer to stash in
+ the L2.
+
+ fsl,num_rx_queues:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of receive queues
+ const: 8
+
+ fsl,num_tx_queues:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of transmit queues
+ const: 8
+
+ tbi-handle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Reference (phandle) to the TBI node
+
+required:
+ - compatible
+ - model
+
+patternProperties:
+ "^mdio@[0-9a-f]+$":
+ $ref: /schemas/net/fsl,gianfar-mdio.yaml#
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+ # eTSEC2 controller nodes have "queue group" subnodes and don't need a "reg"
+ # property.
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,etsec2
+ then:
+ patternProperties:
+ "^queue-group@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Transmit interrupt
+ - description: Receive interrupt
+ - description: Error interrupt
+
+ required:
+ - reg
+ - interrupts
+
+ additionalProperties: false
+ else:
+ required:
+ - reg
+
+ # TSEC and eTSEC devices require three interrupts
+ - if:
+ properties:
+ model:
+ contains:
+ enum: [ TSEC, eTSEC ]
+ then:
+ properties:
+ interrupts:
+ items:
+ - description: Transmit interrupt
+ - description: Receive interrupt
+ - description: Error interrupt
+
+
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@24000 {
+ device_type = "network";
+ model = "TSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2>, <30 2>, <34 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy0>;
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ethernet@24000 {
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ ranges = <0x0 0x24000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <32 IRQ_TYPE_LEVEL_LOW>,
+ <33 IRQ_TYPE_LEVEL_LOW>,
+ <34 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&ipic>;
+
+ mdio@520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x520 0x20>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ethernet {
+ compatible = "fsl,etsec2";
+ ranges;
+ device_type = "network";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic>;
+ model = "eTSEC";
+ fsl,magic-packet;
+ dma-coherent;
+
+ queue-group@2d10000 {
+ reg = <0x0 0x2d10000 0x0 0x1000>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ queue-group@2d14000 {
+ reg = <0x0 0x2d14000 0x0 0x1000>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 9c9668c1b6a2..b18bb4c997ea 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -1,88 +1,14 @@
* MDIO IO device
-The MDIO is a bus to which the PHY devices are connected. For each
-device that exists on this bus, a child node should be created. See
-the definition of the PHY node in booting-without-of.txt for an example
-of how to define a PHY.
-
-Required properties:
- - reg : Offset and length of the register set for the device, and optionally
- the offset and length of the TBIPA register (TBI PHY address
- register). If TBIPA register is not specified, the driver will
- attempt to infer it from the register set specified (your mileage may
- vary).
- - compatible : Should define the compatible device type for the
- mdio. Currently supported strings/devices are:
- - "fsl,gianfar-tbi"
- - "fsl,gianfar-mdio"
- - "fsl,etsec2-tbi"
- - "fsl,etsec2-mdio"
- - "fsl,ucc-mdio"
- - "fsl,fman-mdio"
- When device_type is "mdio", the following strings are also considered:
- - "gianfar"
- - "ucc_geth_phy"
-
-Example:
-
- mdio@24520 {
- reg = <24520 20>;
- compatible = "fsl,gianfar-mdio";
-
- ethernet-phy@0 {
- ......
- };
- };
+Refer to Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml
* TBI Internal MDIO bus
-As of this writing, every tsec is associated with an internal TBI PHY.
-This PHY is accessed through the local MDIO bus. These buses are defined
-similarly to the mdio buses, except they are compatible with "fsl,gianfar-tbi".
-The TBI PHYs underneath them are similar to normal PHYs, but the reg property
-is considered instructive, rather than descriptive. The reg property should
-be chosen so it doesn't interfere with other PHYs on the bus.
+Refer to Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml
* Gianfar-compatible ethernet nodes
-Properties:
-
- - device_type : Should be "network"
- - model : Model of the device. Can be "TSEC", "eTSEC", or "FEC"
- - compatible : Should be "gianfar"
- - reg : Offset and length of the register set for the device
- - interrupts : For FEC devices, the first interrupt is the device's
- interrupt. For TSEC and eTSEC devices, the first interrupt is
- transmit, the second is receive, and the third is error.
- - phy-handle : See ethernet.txt file in the same directory.
- - fixed-link : See fixed-link.txt in the same directory.
- - phy-connection-type : See ethernet.txt file in the same directory.
- This property is only really needed if the connection is of type
- "rgmii-id", as all other connection types are detected by hardware.
- - fsl,magic-packet : If present, indicates that the hardware supports
- waking up via magic packet.
- - fsl,wake-on-filer : If present, indicates that the hardware supports
- waking up by Filer General Purpose Interrupt (FGPI) asserted on the
- Rx int line. This is an advanced power management capability allowing
- certain packet types (user) defined by filer rules to wake up the system.
- - bd-stash : If present, indicates that the hardware supports stashing
- buffer descriptors in the L2.
- - rx-stash-len : Denotes the number of bytes of a received buffer to stash
- in the L2.
- - rx-stash-idx : Denotes the index of the first byte from the received
- buffer to stash in the L2.
-
-Example:
- ethernet@24000 {
- device_type = "network";
- model = "TSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- local-mac-address = [ 00 E0 0C 00 73 00 ];
- interrupts = <29 2 30 2 34 2>;
- interrupt-parent = <&mpic>;
- phy-handle = <&phy0>
- };
+Refer to Documentation/devicetree/bindings/net/fsl,gianfar.yaml
* Gianfar PTP clock nodes
diff --git a/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
index a1046e636fa1..f1bd07a0097d 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
@@ -20,7 +20,7 @@ Example:
reg = <0>;
spi-max-frequency = <3000000>;
spi-cpol;
- reset-gpio = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ reset-gpio = <&gpio1 1 GPIO_ACTIVE_LOW>;
irq-gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>;
extclock-enable;
extclock-freq = 16000000;
diff --git a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
index 42a0bc94312c..62c1da36a2b5 100644
--- a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
+++ b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
@@ -41,6 +41,12 @@ properties:
- const: ptp_ref
- const: tx_clk
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: macirq
+
required:
- compatible
- clocks
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
index ed9d845f6008..3aab21b8e8de 100644
--- a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
@@ -64,6 +64,12 @@ properties:
- const: rmii_internal
- const: mac_cg
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: macirq
+
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
index 87bc4416eadf..e5db346beca9 100644
--- a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
@@ -56,6 +56,14 @@ properties:
- tx
- mem
+ interrupts:
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: macirq
+ - const: eth_wake_irq
+
intf_mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index 1a46d80a66e8..b4a79912d473 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -210,70 +210,70 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
- #include <dt-bindings/interconnect/qcom,sdm845.h>
-
- smp2p-mpss {
- compatible = "qcom,smp2p";
- interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
- mboxes = <&apss_shared 6>;
- qcom,smem = <94>, <432>;
- qcom,local-pid = <0>;
- qcom,remote-pid = <5>;
-
- ipa_smp2p_out: ipa-ap-to-modem {
- qcom,entry-name = "ipa";
- #qcom,smem-state-cells = <1>;
- };
-
- ipa_smp2p_in: ipa-modem-to-ap {
- qcom,entry-name = "ipa";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/qcom,rpmh.h>
+ #include <dt-bindings/interconnect/qcom,sdm845.h>
+
+ smp2p-mpss {
+ compatible = "qcom,smp2p";
+ interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apss_shared 6>;
+ qcom,smem = <94>, <432>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
};
- ipa@1e40000 {
- compatible = "qcom,sc7180-ipa";
-
- qcom,gsi-loader = "self";
- memory-region = <&ipa_fw_mem>;
- firmware-name = "qcom/sc7180-trogdor/modem/modem.mbn";
-
- iommus = <&apps_smmu 0x440 0x0>,
- <&apps_smmu 0x442 0x0>;
- reg = <0x1e40000 0x7000>,
- <0x1e47000 0x2000>,
- <0x1e04000 0x2c000>;
- reg-names = "ipa-reg",
- "ipa-shared",
- "gsi";
-
- interrupts-extended = <&intc GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
- <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
- <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
- <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
- interrupt-names = "ipa",
- "gsi",
- "ipa-clock-query",
- "ipa-setup-ready";
-
- clocks = <&rpmhcc RPMH_IPA_CLK>;
- clock-names = "core";
-
- interconnects =
- <&aggre2_noc MASTER_IPA 0 &mc_virt SLAVE_EBI1 0>,
- <&aggre2_noc MASTER_IPA 0 &system_noc SLAVE_IMEM 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_IPA_CFG 0>;
- interconnect-names = "memory",
- "imem",
- "config";
-
- qcom,qmp = <&aoss_qmp>;
-
- qcom,smem-states = <&ipa_smp2p_out 0>,
- <&ipa_smp2p_out 1>;
- qcom,smem-state-names = "ipa-clock-enabled-valid",
- "ipa-clock-enabled";
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
+ };
+
+ ipa@1e40000 {
+ compatible = "qcom,sc7180-ipa";
+
+ qcom,gsi-loader = "self";
+ memory-region = <&ipa_fw_mem>;
+ firmware-name = "qcom/sc7180-trogdor/modem/modem.mbn";
+
+ iommus = <&apps_smmu 0x440 0x0>,
+ <&apps_smmu 0x442 0x0>;
+ reg = <0x1e40000 0x7000>,
+ <0x1e47000 0x2000>,
+ <0x1e04000 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc GIC_SPI 311 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects =
+ <&aggre2_noc MASTER_IPA 0 &mc_virt SLAVE_EBI1 0>,
+ <&aggre2_noc MASTER_IPA 0 &system_noc SLAVE_IMEM 0>,
+ <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_IPA_CFG 0>;
+ interconnect-names = "memory",
+ "imem",
+ "config";
+
+ qcom,qmp = <&aoss_qmp>;
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+ };
diff --git a/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml b/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml
new file mode 100644
index 000000000000..02e4e33e9969
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/realtek,rtl9301-mdio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek RTL9300 MDIO Controller
+
+maintainers:
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - realtek,rtl9302b-mdio
+ - realtek,rtl9302c-mdio
+ - realtek,rtl9303-mdio
+ - const: realtek,rtl9301-mdio
+ - const: realtek,rtl9301-mdio
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ '^mdio-bus@[0-3]$':
+ $ref: mdio.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+ required:
+ - reg
+
+ patternProperties:
+ '^ethernet-phy@[a-f0-9]+$':
+ type: object
+ $ref: ethernet-phy.yaml#
+ unevaluatedProperties: false
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio-controller@ca00 {
+ compatible = "realtek,rtl9301-mdio";
+ reg = <0xca00 0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio-bus@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ };
+ };
+
+ mdio-bus@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml b/Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml
index f053303ab1e6..80eabc170669 100644
--- a/Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml
+++ b/Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
-$id: http://devicetree.org/schemas/mfd/realtek,rtl9301-switch.yaml#
+$id: http://devicetree.org/schemas/net/realtek,rtl9301-switch.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek Switch with Internal CPU
@@ -14,6 +14,8 @@ description:
number of different peripherals are accessed through a common register block,
represented here as a syscon node.
+$ref: ethernet-switch.yaml#/$defs/ethernet-ports
+
properties:
compatible:
items:
@@ -28,12 +30,23 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 2
+
+ interrupt-names:
+ items:
+ - const: switch
+ - const: nic
+
'#address-cells':
const: 1
'#size-cells':
const: 1
+ ethernet-ports:
+ type: object
+
patternProperties:
'reboot@[0-9a-f]+$':
$ref: /schemas/power/reset/syscon-reboot.yaml#
@@ -41,9 +54,14 @@ patternProperties:
'i2c@[0-9a-f]+$':
$ref: /schemas/i2c/realtek,rtl9301-i2c.yaml#
+ 'mdio-controller@[0-9a-f]+$':
+ $ref: realtek,rtl9301-mdio.yaml#
+
required:
- compatible
- reg
+ - interrupts
+ - interrupt-names
additionalProperties: false
@@ -52,6 +70,9 @@ examples:
ethernet-switch@1b000000 {
compatible = "realtek,rtl9301-switch", "syscon", "simple-mfd";
reg = <0x1b000000 0x10000>;
+ interrupt-parent = <&intc>;
+ interrupts = <23>, <24>;
+ interrupt-names = "switch", "nic";
#address-cells = <1>;
#size-cells = <1>;
@@ -110,5 +131,45 @@ examples:
};
};
};
+
+ mdio-controller@ca00 {
+ compatible = "realtek,rtl9301-mdio";
+ reg = <0xca00 0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio-bus@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ mdio-bus@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy2: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ phy-handle = <&phy1>;
+ };
+ port@1 {
+ reg = <1>;
+ phy-handle = <&phy2>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/net/rfkill-gpio.yaml b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml
index 9630c8466fac..4a706a41ab38 100644
--- a/Documentation/devicetree/bindings/net/rfkill-gpio.yaml
+++ b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml
@@ -32,6 +32,10 @@ properties:
shutdown-gpios:
maxItems: 1
+ default-blocked:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: configure rfkill state as blocked at boot
+
required:
- compatible
- radio-type
@@ -48,4 +52,5 @@ examples:
label = "rfkill-pcie-wlan";
radio-type = "wlan";
shutdown-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ default-blocked;
};
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index f8a576611d6c..0ac7c4b47d6b 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -24,6 +24,7 @@ select:
- rockchip,rk3366-gmac
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
+ - rockchip,rk3528-gmac
- rockchip,rk3568-gmac
- rockchip,rk3576-gmac
- rockchip,rk3588-gmac
@@ -32,9 +33,6 @@ select:
required:
- compatible
-allOf:
- - $ref: snps,dwmac.yaml#
-
properties:
compatible:
oneOf:
@@ -52,14 +50,25 @@ properties:
- rockchip,rv1108-gmac
- items:
- enum:
+ - rockchip,rk3528-gmac
- rockchip,rk3568-gmac
- rockchip,rk3576-gmac
- rockchip,rk3588-gmac
- rockchip,rv1126-gmac
- const: snps,dwmac-4.20a
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: macirq
+ - const: eth_wake_irq
+
clocks:
- minItems: 5
+ minItems: 4
maxItems: 8
clock-names:
@@ -114,6 +123,36 @@ required:
- compatible
- clocks
- clock-names
+ - rockchip,grf
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3576-gmac
+ - rockchip,rk3588-gmac
+ then:
+ required:
+ - rockchip,php-grf
+ else:
+ properties:
+ rockchip,php-grf: false
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,rk3528-gmac
+ then:
+ properties:
+ clocks:
+ minItems: 5
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 91e75eb3f329..78b3030dc56d 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -32,6 +32,7 @@ select:
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwmac-5.20
+ - snps,dwmac-5.30a
- snps,dwxgmac
- snps,dwxgmac-2.10
@@ -98,10 +99,13 @@ properties:
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwmac-5.20
+ - snps,dwmac-5.30a
- snps,dwxgmac
- snps,dwxgmac-2.10
+ - sophgo,sg2044-dwmac
- starfive,jh7100-dwmac
- starfive,jh7110-dwmac
+ - tesla,fsd-ethqos
- thead,th1520-gmac
reg:
@@ -126,7 +130,7 @@ properties:
clocks:
minItems: 1
- maxItems: 8
+ maxItems: 10
additionalItems: true
items:
- description: GMAC main clock
@@ -138,7 +142,7 @@ properties:
clock-names:
minItems: 1
- maxItems: 8
+ maxItems: 10
additionalItems: true
contains:
enum:
@@ -490,6 +494,7 @@ properties:
snps,en-tx-lpi-clockgating:
$ref: /schemas/types.yaml#/definitions/flag
+ deprecated: true
description:
Enable gating of the MAC TX clock during TX low-power mode
@@ -631,6 +636,7 @@ allOf:
- snps,dwmac-4.20a
- snps,dwmac-5.10a
- snps,dwmac-5.20
+ - snps,dwmac-5.30a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac
diff --git a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
new file mode 100644
index 000000000000..4dd2dc9c678b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/sophgo,sg2044-dwmac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2044 DWMAC glue layer
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - sophgo,sg2044-dwmac
+ required:
+ - compatible
+
+properties:
+ compatible:
+ items:
+ - const: sophgo,sg2044-dwmac
+ - const: snps,dwmac-5.30a
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: GMAC main clock
+ - description: PTP clock
+ - description: TX clock
+
+ clock-names:
+ items:
+ - const: stmmaceth
+ - const: ptp_ref
+ - const: tx
+
+ dma-noncoherent: true
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: stmmaceth
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - resets
+ - reset-names
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ethernet@30006000 {
+ compatible = "sophgo,sg2044-dwmac", "snps,dwmac-5.30a";
+ reg = <0x30006000 0x4000>;
+ clocks = <&clk 151>, <&clk 152>, <&clk 154>;
+ clock-names = "stmmaceth", "ptp_ref", "tx";
+ interrupt-parent = <&intc>;
+ interrupts = <296 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ resets = <&rst 30>;
+ reset-names = "stmmaceth";
+ snps,multicast-filter-bins = <0>;
+ snps,perfect-filter-entries = <1>;
+ snps,aal;
+ snps,tso;
+ snps,txpbl = <32>;
+ snps,rxpbl = <32>;
+ snps,mtl-rx-config = <&gmac0_mtl_rx_setup>;
+ snps,mtl-tx-config = <&gmac0_mtl_tx_setup>;
+ snps,axi-config = <&gmac0_stmmac_axi_setup>;
+ status = "disabled";
+
+ gmac0_mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <8>;
+ snps,rx-sched-wsp;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ queue4 {};
+ queue5 {};
+ queue6 {};
+ queue7 {};
+ };
+
+ gmac0_mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <8>;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ queue4 {};
+ queue5 {};
+ queue6 {};
+ queue7 {};
+ };
+
+ gmac0_stmmac_axi_setup: stmmac-axi-config {
+ snps,blen = <16 8 4 0 0 0 0>;
+ snps,wr_osr_lmt = <1>;
+ snps,rd_osr_lmt = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
index 85cea9966a27..987254900d0d 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -54,6 +54,16 @@ properties:
items:
- const: stmmaceth
+ interrupts:
+ minItems: 1
+ maxItems: 2
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: macirq
+ - const: eth_wake_irq
+
clocks:
minItems: 3
items:
diff --git a/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml b/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml
new file mode 100644
index 000000000000..dd7481bb16e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/tesla,fsd-ethqos.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: FSD Ethernet Quality of Service
+
+maintainers:
+ - Swathi K S <swathi.ks@samsung.com>
+
+description:
+ Tesla ethernet devices based on dwmmac support Gigabit ethernet.
+
+allOf:
+ - $ref: snps,dwmac.yaml#
+
+properties:
+ compatible:
+ const: tesla,fsd-ethqos
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: macirq
+
+ clocks:
+ minItems: 5
+ items:
+ - description: PTP clock
+ - description: Master bus clock
+ - description: Slave bus clock
+ - description: MAC TX clock
+ - description: MAC RX clock
+ - description: Master2 bus clock
+ - description: Slave2 bus clock
+ - description: RX MUX clock
+ - description: PHY RX clock
+ - description: PERIC RGMII clock
+
+ clock-names:
+ minItems: 5
+ items:
+ - const: ptp_ref
+ - const: master_bus
+ - const: slave_bus
+ - const: tx
+ - const: rx
+ - const: master2_bus
+ - const: slave2_bus
+ - const: eqos_rxclk_mux
+ - const: eqos_phyrxclk
+ - const: dout_peric_rgmii_clk
+
+ iommus:
+ maxItems: 1
+
+ phy-mode:
+ enum:
+ - rgmii
+ - rgmii-id
+ - rgmii-rxid
+ - rgmii-txid
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - iommus
+ - phy-mode
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/fsd-clk.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ethernet1: ethernet@14300000 {
+ compatible = "tesla,fsd-ethqos";
+ reg = <0x0 0x14300000 0x0 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_ACLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_HCLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I>,
+ <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I>,
+ <&clock_peric PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK>,
+ <&clock_peric PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK>,
+ <&clock_peric PERIC_DOUT_RGMII_CLK>;
+ clock-names = "ptp_ref", "master_bus", "slave_bus","tx",
+ "rx", "master2_bus", "slave2_bus", "eqos_rxclk_mux",
+ "eqos_phyrxclk","dout_peric_rgmii_clk";
+ assigned-clocks = <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>,
+ <&clock_peric PERIC_EQOS_PHYRXCLK>;
+ assigned-clock-parents = <&clock_peric PERIC_EQOS_PHYRXCLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth1_tx_clk>, <&eth1_tx_data>, <&eth1_tx_ctrl>,
+ <&eth1_phy_intr>, <&eth1_rx_clk>, <&eth1_rx_data>,
+ <&eth1_rx_ctrl>, <&eth1_mdio>;
+ iommus = <&smmu_peric 0x0 0x1>;
+ phy-mode = "rgmii-id";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
index 052f636158b3..f0f32e18fc85 100644
--- a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
@@ -42,6 +42,12 @@ properties:
- const: stmmaceth
- const: phy_ref_clk
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ const: macirq
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
index aace072e2d52..f2440d39b7eb 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml
@@ -92,20 +92,41 @@ properties:
ieee80211-freq-limit: true
+ qcom,calibration-data:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description:
+ Calibration data + board-specific data as a byte array. The length
+ can vary between hardware versions.
+
qcom,ath10k-calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
+ deprecated: true
description:
Calibration data + board-specific data as a byte array. The length
can vary between hardware versions.
+ qcom,calibration-variant:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ Unique variant identifier of the calibration data in board-2.bin
+ for designs with colliding bus and device specific ids
+
qcom,ath10k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
description:
Unique variant identifier of the calibration data in board-2.bin
for designs with colliding bus and device specific ids
+ qcom,pre-calibration-data:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description:
+ Pre-calibration data as a byte array. The length can vary between
+ hardware versions.
+
qcom,ath10k-pre-calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
+ deprecated: true
description:
Pre-calibration data as a byte array. The length can vary between
hardware versions.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
index a4425cf196ab..653b319fee88 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml
@@ -22,8 +22,15 @@ properties:
reg:
maxItems: 1
+ qcom,calibration-variant:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: |
+ string to uniquely identify variant of the calibration data for designs
+ with colliding bus and device ids
+
qcom,ath11k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
description: |
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
@@ -127,7 +134,7 @@ examples:
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
- qcom,ath11k-calibration-variant = "LE_X13S";
+ qcom,calibration-variant = "LE_X13S";
};
};
};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index a69ffb7b3cb8..c089677702cf 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -41,8 +41,15 @@ properties:
* reg
* reg-names
+ qcom,calibration-variant:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ string to uniquely identify variant of the calibration data in the
+ board-2.bin for designs with colliding bus and device specific ids
+
qcom,ath11k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
description:
string to uniquely identify variant of the calibration data in the
board-2.bin for designs with colliding bus and device specific ids
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml
index 318f305405e3..589960144fe1 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml
@@ -52,8 +52,15 @@ properties:
reg:
maxItems: 1
+ qcom,calibration-variant:
+ $ref: /schemas/types.yaml#/definitions/string
+ description:
+ String to uniquely identify variant of the calibration data for designs
+ with colliding bus and device ids
+
qcom,ath12k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
+ deprecated: true
description:
String to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
@@ -103,7 +110,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
- qcom,ath12k-calibration-variant = "RDP433_1";
+ qcom,calibration-variant = "RDP433_1";
ports {
#address-cells = <1>;
@@ -139,7 +146,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
- qcom,ath12k-calibration-variant = "RDP433_2";
+ qcom,calibration-variant = "RDP433_2";
qcom,wsi-controller;
ports {
@@ -176,7 +183,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
- qcom,ath12k-calibration-variant = "RDP433_3";
+ qcom,calibration-variant = "RDP433_3";
ports {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml b/Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml
new file mode 100644
index 000000000000..4a03b0ee3149
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/platform/huawei,gaokun-ec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Huawei Matebook E Go Embedded Controller
+
+maintainers:
+ - Pengyu Luo <mitltlatltl@gmail.com>
+
+description:
+ Different from other Qualcomm Snapdragon sc8180x and sc8280xp-based
+ machines, the Huawei Matebook E Go tablets use embedded controllers
+ while others use a system called PMIC GLink which handles battery,
+ UCSI, USB Type-C DP Alt Mode. In addition, Huawei's implementation
+ also handles additional features, such as charging thresholds, FN
+ lock, smart charging, tablet lid status, thermal sensors, and more.
+
+properties:
+ compatible:
+ enum:
+ - huawei,gaokun3-ec
+
+ reg:
+ const: 0x38
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ interrupts:
+ maxItems: 1
+
+patternProperties:
+ '^connector@[01]$':
+ $ref: /schemas/connector/usb-connector.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ embedded-controller@38 {
+ compatible = "huawei,gaokun3-ec";
+ reg = <0x38>;
+
+ interrupts-extended = <&tlmm 107 IRQ_TYPE_LEVEL_LOW>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ucsi0_ss_in: endpoint {
+ remote-endpoint = <&usb_0_qmpphy_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ucsi0_sbu: endpoint {
+ remote-endpoint = <&usb0_sbu_mux>;
+ };
+ };
+ };
+ };
+
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ucsi1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ucsi1_sbu: endpoint {
+ remote-endpoint = <&usb1_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml b/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml
index 46e2647a5d72..f578be6a3bc8 100644
--- a/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml
+++ b/Documentation/devicetree/bindings/power/allwinner,sun20i-d1-ppu.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- allwinner,sun20i-d1-ppu
+ - allwinner,sun8i-v853-ppu
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
index 59a6af735a21..6e9a670eaf56 100644
--- a/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
+++ b/Documentation/devicetree/bindings/power/apple,pmgr-pwrstate.yaml
@@ -31,6 +31,11 @@ properties:
compatible:
items:
- enum:
+ - apple,s5l8960x-pmgr-pwrstate
+ - apple,t7000-pmgr-pwrstate
+ - apple,s8000-pmgr-pwrstate
+ - apple,t8010-pmgr-pwrstate
+ - apple,t8015-pmgr-pwrstate
- apple,t8103-pmgr-pwrstate
- apple,t8112-pmgr-pwrstate
- apple,t6000-pmgr-pwrstate
diff --git a/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml b/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
index 202a5d51ee88..3fa77fe14c87 100644
--- a/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,kpss-acc-v2.yaml
@@ -18,7 +18,9 @@ description:
properties:
compatible:
- const: qcom,kpss-acc-v2
+ enum:
+ - qcom,kpss-acc-v2
+ - qcom,msm8916-acc
reg:
items:
diff --git a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
index 650dc0aae6f5..ebab98987e49 100644
--- a/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
+++ b/Documentation/devicetree/bindings/power/rockchip,power-controller.yaml
@@ -132,6 +132,9 @@ $defs:
A number of phandles to clocks that need to be enabled
while power domain switches state.
+ domain-supply:
+ description: domain regulator supply.
+
pm_qos:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
diff --git a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
index ac0a35bf8648..d5a9340ff920 100644
--- a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.yaml
@@ -23,8 +23,15 @@ properties:
const: 3
compatible:
- enum:
- - fsl,imx7ulp-pwm
+ oneOf:
+ - enum:
+ - fsl,imx7ulp-pwm
+ - items:
+ - enum:
+ - fsl,imx93-pwm
+ - fsl,imx94-pwm
+ - fsl,imx95-pwm
+ - const: fsl,imx7ulp-pwm
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pwm/pwm-nexus-node.yaml b/Documentation/devicetree/bindings/pwm/pwm-nexus-node.yaml
new file mode 100644
index 000000000000..3b40e271fe8d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-nexus-node.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/pwm-nexus-node.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PWM Nexus node properties
+
+description: >
+ Platforms can have a standardized connector/expansion slot that exposes PWMs
+ signals to expansion boards.
+
+ A nexus node allows to remap a phandle list in a consumer node through a
+ connector node in a generic way. With this remapping, the consumer node needs
+ to know only about the nexus node. Resources behind the nexus node are
+ decoupled by the nexus node itself.
+
+maintainers:
+ - Herve Codina <herve.codina@bootlin.com>
+
+select: true
+
+properties:
+ '#pwm-cells': true
+
+ pwm-map:
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+
+ pwm-map-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
+ pwm-map-pass-thru:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+
+dependentRequired:
+ pwm-map: ['#pwm-cells']
+ pwm-map-mask: [ pwm-map ]
+ pwm-map-pass-thru: [ pwm-map ]
+
+additionalProperties: true
+
+examples:
+ - |
+ pwm1: pwm@100 {
+ reg = <0x100 0x10>;
+ #pwm-cells = <3>;
+ };
+
+ pwm2: pwm@200 {
+ reg = <0x200 0x10>;
+ #pwm-cells = <3>;
+ };
+
+ connector: connector {
+ #pwm-cells = <3>;
+ pwm-map = <0 0 0 &pwm1 1 0 0>,
+ <1 0 0 &pwm2 4 0 0>,
+ <2 0 0 &pwm1 3 0 0>;
+ pwm-map-mask = <0xffffffff 0x0 0x0>;
+ pwm-map-pass-thru = <0x0 0xffffffff 0xffffffff>;
+ };
+
+ device {
+ pwms = <&connector 1 57000 0>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
index 65bfb492b3a4..c8cdfb723336 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.yaml
@@ -30,6 +30,8 @@ properties:
- enum:
- rockchip,px30-pwm
- rockchip,rk3308-pwm
+ - rockchip,rk3528-pwm
+ - rockchip,rk3562-pwm
- rockchip,rk3568-pwm
- rockchip,rk3588-pwm
- rockchip,rv1126-pwm
diff --git a/Documentation/devicetree/bindings/pwm/sophgo,sg2042-pwm.yaml b/Documentation/devicetree/bindings/pwm/sophgo,sg2042-pwm.yaml
new file mode 100644
index 000000000000..bbb6326d47d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/sophgo,sg2042-pwm.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/sophgo,sg2042-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2042 PWM controller
+
+maintainers:
+ - Chen Wang <unicorn_wang@outlook.com>
+
+description:
+ This controller contains 4 channels which can generate PWM waveforms.
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: sophgo,sg2042-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: apb
+
+ resets:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/reset/sophgo,sg2042-reset.h>
+
+ pwm@7f006000 {
+ compatible = "sophgo,sg2042-pwm";
+ reg = <0x7f006000 0x1000>;
+ #pwm-cells = <3>;
+ clocks = <&clock 67>;
+ clock-names = "apb";
+ resets = <&rstgen RST_PWM>;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
index 68709a7dc43f..4ffe5c3faea0 100644
--- a/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/nxp,pca9450-regulator.yaml
@@ -17,6 +17,9 @@ description: |
Datasheet is available at
https://www.nxp.com/docs/en/data-sheet/PCA9450DS.pdf
+ Support PF9453, Datasheet is available at
+ https://www.nxp.com/docs/en/data-sheet/PF9453_SDS.pdf
+
# The valid names for PCA9450 regulator nodes are:
# BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6,
# LDO1, LDO2, LDO3, LDO4, LDO5
@@ -30,6 +33,7 @@ properties:
- nxp,pca9450c
- nxp,pca9451a
- nxp,pca9452
+ - nxp,pf9453
reg:
maxItems: 1
@@ -42,8 +46,30 @@ properties:
description: |
list of regulators provided by this controller
+ properties:
+ LDO5:
+ type: object
+ $ref: regulator.yaml#
+ description:
+ Properties for single LDO5 regulator.
+
+ properties:
+ nxp,sd-vsel-fixed-low:
+ type: boolean
+ description:
+ Let the driver know that SD_VSEL is hardwired to low level and
+ there is no GPIO to get the actual value from.
+
+ sd-vsel-gpios:
+ description:
+ GPIO that can be used to read the current status of the SD_VSEL
+ signal in order for the driver to know if LDO5CTRL_L or LDO5CTRL_H
+ is used by the hardware.
+
+ unevaluatedProperties: false
+
patternProperties:
- "^LDO[1-5]$":
+ "^LDO([1-4]|-SNVS)$":
type: object
$ref: regulator.yaml#
description:
@@ -78,11 +104,6 @@ properties:
additionalProperties: false
- sd-vsel-gpios:
- description: GPIO that is used to switch LDO5 between being configured by
- LDO5CTRL_L or LDO5CTRL_H register. Use this if the SD_VSEL signal is
- connected to a host GPIO.
-
nxp,i2c-lt-enable:
type: boolean
description:
@@ -101,6 +122,24 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nxp,pf9453
+ then:
+ properties:
+ regulators:
+ patternProperties:
+ "^LDO[3-4]$": false
+ "^BUCK[5-6]$": false
+ else:
+ properties:
+ regulators:
+ properties:
+ LDO-SNVS: false
+
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
index 87accc6f13b8..022c1f197364 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
@@ -39,7 +39,7 @@ properties:
interrupts:
maxItems: 1
-
+
richtek,mtp-sel-high:
type: boolean
description:
@@ -77,6 +77,7 @@ properties:
properties:
richtek,fixed-microvolt:
+ deprecated: true
description: |
This property can be used to set a fixed operating voltage that lies outside
the range of the regulator's adjustable mode.
diff --git a/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
index 98465d26949e..c3b33bbc7319 100644
--- a/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/atmel,at91sam9260-reset.yaml
@@ -26,6 +26,10 @@ properties:
- items:
- const: atmel,sama5d3-rstc
- const: atmel,at91sam9g45-rstc
+ - items:
+ - enum:
+ - microchip,sam9x7-rstc
+ - const: microchip,sam9x60-rstc
reg:
minItems: 1
diff --git a/Documentation/devicetree/bindings/riscv/spacemit.yaml b/Documentation/devicetree/bindings/riscv/spacemit.yaml
index 52e55077af1a..077b94f10dca 100644
--- a/Documentation/devicetree/bindings/riscv/spacemit.yaml
+++ b/Documentation/devicetree/bindings/riscv/spacemit.yaml
@@ -21,6 +21,7 @@ properties:
- items:
- enum:
- bananapi,bpi-f3
+ - milkv,jupiter
- const: spacemit,k1
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
index 7b031ef09669..54cd585f19e3 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml
@@ -54,6 +54,10 @@ properties:
dma-coherent: true
+ firmware-name:
+ maxItems: 1
+ description: Specify the name of the QUP firmware to load.
+
required:
- compatible
- reg
@@ -135,6 +139,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ firmware-name = "qcom/sa8775p/qupv3fw.elf";
i2c0: i2c@a94000 {
compatible = "qcom,geni-i2c";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
index 2d3fe0b54243..4c9e78f29523 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,pmic-glink.yaml
@@ -38,6 +38,7 @@ properties:
- items:
- enum:
- qcom,sm8650-pmic-glink
+ - qcom,sm8750-pmic-glink
- qcom,x1e80100-pmic-glink
- const: qcom,sm8550-pmic-glink
- const: qcom,pmic-glink
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
index ebbf0c9109ce..e0f7503a9f35 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas,r9a09g057-sys.yaml
@@ -22,7 +22,10 @@ description: |
properties:
compatible:
- const: renesas,r9a09g057-sys
+ items:
+ - enum:
+ - renesas,r9a09g047-sys # RZ/G3E
+ - renesas,r9a09g057-sys # RZ/V2H
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
index 225c0f07ae94..51a4c48eea6d 100644
--- a/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
+++ b/Documentation/devicetree/bindings/soc/renesas/renesas.yaml
@@ -493,6 +493,13 @@ properties:
- renesas,r9a07g044l2 # Dual Cortex-A55 RZ/G2L
- const: renesas,r9a07g044
+ - items:
+ - enum:
+ # MYIR Remi Pi SBC (MYB-YG2LX-REMI)
+ - myir,remi-pi
+ - const: renesas,r9a07g044l2
+ - const: renesas,r9a07g044
+
- description: RZ/V2L (R9A07G054)
items:
- enum:
@@ -552,6 +559,15 @@ properties:
- renesas,r9a09g057h41 # RZ/V2H
- renesas,r9a09g057h42 # RZ/V2H with Mali-G31 support
- renesas,r9a09g057h44 # RZ/V2HP with Mali-G31 + Mali-C55 support
+ - renesas,r9a09g057h45 # RZ/V2H with cryptographic extension support
+ - renesas,r9a09g057h46 # RZ/V2H with Mali-G31 + cryptographic extension support
+ - renesas,r9a09g057h48 # RZ/V2HP with Mali-G31 + Mali-C55 + cryptographic extension support
+ - const: renesas,r9a09g057
+
+ - description: Yuridenki-Shokai RZ/V2H Kakip
+ items:
+ - const: yuridenki,kakip
+ - const: renesas,r9a09g057h48
- const: renesas,r9a09g057
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 61f38b68a4a3..2f61c1b95fea 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -15,6 +15,9 @@ properties:
- items:
- enum:
- rockchip,rk3288-sgrf
+ - rockchip,rk3528-ioc-grf
+ - rockchip,rk3528-vo-grf
+ - rockchip,rk3528-vpu-grf
- rockchip,rk3566-pipe-grf
- rockchip,rk3568-pcie3-phy-grf
- rockchip,rk3568-pipe-grf
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
index 8e6d051d8c97..204da6fe458d 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-pmu.yaml
@@ -52,6 +52,8 @@ properties:
- const: syscon
- items:
- enum:
+ - samsung,exynos2200-pmu
+ - samsung,exynos7870-pmu
- samsung,exynos7885-pmu
- samsung,exynos8895-pmu
- samsung,exynos9810-pmu
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
index 5b046932fbc3..cb22637091e8 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
@@ -11,11 +11,21 @@ maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
description: |
- USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
- USI shares almost all internal circuits within each protocol, so only one
- protocol can be chosen at a time. USI is modeled as a node with zero or more
- child nodes, each representing a serial sub-node device. The mode setting
- selects which particular function will be used.
+ The USI IP-core provides configurable support for serial protocols, enabling
+ different serial communication modes depending on the version.
+
+ In USIv1, configurations are available to enable either one or two protocols
+ simultaneously in select combinations - High-Speed I2C0, High-Speed
+ I2C1, SPI, UART, High-Speed I2C0 and I2C1 or both High-Speed
+ I2C1 and UART.
+
+ In USIv2, only one protocol can be active at a time, either UART, SPI, or
+ High-Speed I2C.
+
+ The USI core shares internal circuits across protocols, meaning only the
+ selected configuration is active at any given time. USI is modeled as a node
+ with zero or more child nodes, each representing a serial sub-node device. The
+ mode setting selects which particular function will be used.
properties:
$nodename:
@@ -31,6 +41,7 @@ properties:
- const: samsung,exynos850-usi
- enum:
- samsung,exynos850-usi
+ - samsung,exynos8895-usi
reg:
maxItems: 1
@@ -64,7 +75,7 @@ properties:
samsung,mode:
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [0, 1, 2, 3]
+ enum: [0, 1, 2, 3, 4, 5, 6]
description:
Selects USI function (which serial protocol to use). Refer to
<include/dt-bindings/soc/samsung,exynos-usi.h> for valid USI mode values.
@@ -101,37 +112,59 @@ required:
- samsung,sysreg
- samsung,mode
-if:
- properties:
- compatible:
- contains:
- enum:
- - samsung,exynos850-usi
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos850-usi
+
+ then:
+ properties:
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus (APB) clock
+ - description: Operating clock for UART/SPI/I2C protocol
-then:
- properties:
- reg:
- maxItems: 1
+ clock-names:
+ maxItems: 2
- clocks:
- items:
- - description: Bus (APB) clock
- - description: Operating clock for UART/SPI/I2C protocol
+ samsung,mode:
+ enum: [0, 1, 2, 3]
- clock-names:
- maxItems: 2
+ required:
+ - reg
+ - clocks
+ - clock-names
- required:
- - reg
- - clocks
- - clock-names
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos8895-usi
-else:
- properties:
- reg: false
- clocks: false
- clock-names: false
- samsung,clkreq-on: false
+ then:
+ properties:
+ reg: false
+
+ clocks:
+ items:
+ - description: Bus (APB) clock
+ - description: Operating clock for UART/SPI protocol
+
+ clock-names:
+ maxItems: 2
+
+ samsung,clkreq-on: false
+
+ required:
+ - clocks
+ - clock-names
additionalProperties: false
@@ -144,7 +177,7 @@ examples:
compatible = "samsung,exynos850-usi";
reg = <0x138200c0 0x20>;
samsung,sysreg = <&sysreg_peri 0x1010>;
- samsung,mode = <USI_V2_UART>;
+ samsung,mode = <USI_MODE_UART>;
samsung,clkreq-on; /* needed for UART mode */
#address-cells = <1>;
#size-cells = <1>;
@@ -158,7 +191,6 @@ examples:
interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cmu_peri 32>, <&cmu_peri 31>;
clock-names = "uart", "clk_uart_baud0";
- status = "disabled";
};
hsi2c_0: i2c@13820000 {
diff --git a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
index a75aef240629..d27ed6c9d61e 100644
--- a/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/samsung,exynos-sysreg.yaml
@@ -18,6 +18,11 @@ properties:
- google,gs101-hsi2-sysreg
- google,gs101-peric0-sysreg
- google,gs101-peric1-sysreg
+ - samsung,exynos2200-cmgp-sysreg
+ - samsung,exynos2200-peric0-sysreg
+ - samsung,exynos2200-peric1-sysreg
+ - samsung,exynos2200-peric2-sysreg
+ - samsung,exynos2200-ufs-sysreg
- samsung,exynos3-sysreg
- samsung,exynos4-sysreg
- samsung,exynos5-sysreg
diff --git a/Documentation/devicetree/bindings/soc/xilinx/xilinx.yaml b/Documentation/devicetree/bindings/soc/xilinx/xilinx.yaml
index 131aba5ed9f4..fb5c39c79d28 100644
--- a/Documentation/devicetree/bindings/soc/xilinx/xilinx.yaml
+++ b/Documentation/devicetree/bindings/soc/xilinx/xilinx.yaml
@@ -9,8 +9,8 @@ title: Xilinx Zynq Platforms
maintainers:
- Michal Simek <michal.simek@amd.com>
-description: |
- Xilinx boards with Zynq-7000 SOC or Zynq UltraScale+ MPSoC
+description:
+ AMD/Xilinx boards with ARM 32/64bits cores
properties:
$nodename:
@@ -187,6 +187,13 @@ properties:
- const: qemu,mbv
- const: amd,mbv
+ - description: Xilinx Versal NET VN-X revA platform
+ items:
+ enum:
+ - xlnx,versal-net-vnx-revA
+ - xlnx,versal-net-vnx
+ - xlnx,versal-net
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
index ccae64ce3071..b4eca702febc 100644
--- a/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/allwinner,sun4i-a10-codec.yaml
@@ -102,6 +102,10 @@ properties:
maxItems: 1
description: GPIO to enable the external amplifier
+ hp-det-gpios:
+ maxItems: 1
+ description: GPIO for headphone/line-out detection
+
required:
- "#sound-dai-cells"
- compatible
@@ -251,8 +255,10 @@ allOf:
allwinner,audio-routing:
items:
enum:
+ - Headphone
- LINEOUT
- Line Out
+ - Speaker
dmas:
items:
diff --git a/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml b/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
new file mode 100644
index 000000000000..a05e61431824
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/atmel,at91-ssc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel Serial Synchronous Serial (SSC)
+
+maintainers:
+ - Andrei Simion <andrei.simion@microchip.com>
+
+description:
+ The Atmel Synchronous Serial Controller (SSC) provides a versatile
+ synchronous communication link for audio and telecom applications,
+ supporting protocols like I2S, Short Frame Sync, and Long Frame Sync.
+
+properties:
+ compatible:
+ enum:
+ - atmel,at91rm9200-ssc
+ - atmel,at91sam9g45-ssc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: pclk
+
+ dmas:
+ items:
+ - description: TX DMA Channel
+ - description: RX DMA Channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ atmel,clk-from-rk-pin:
+ description:
+ Specify the clock source for the SSC (Synchronous Serial Controller)
+ when operating in slave mode. By default, the clock is sourced from
+ the TK pin.
+ type: boolean
+
+ "#sound-dai-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+allOf:
+ - $ref: dai-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - atmel,at91sam9g45-ssc
+ then:
+ required:
+ - dmas
+ - dma-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/at91.h>
+ #include <dt-bindings/dma/at91.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ ssc@100000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0x100000 0x4000>;
+ interrupts = <28 IRQ_TYPE_LEVEL_HIGH 5>;
+ dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(38))>,
+ <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(39))>;
+ dma-names = "tx", "rx";
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 28>;
+ clock-names = "pclk";
+ #sound-dai-cells = <0>;
+ };
+
+ ssc@c00000 {
+ compatible = "atmel,at91rm9200-ssc";
+ reg = <0xc00000 0x4000>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 14>;
+ clock-names = "pclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/atmel,at91sam9g20ek-wm8731.yaml b/Documentation/devicetree/bindings/sound/atmel,at91sam9g20ek-wm8731.yaml
new file mode 100644
index 000000000000..627da2d890b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel,at91sam9g20ek-wm8731.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/atmel,at91sam9g20ek-wm8731.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel at91sam9g20ek wm8731 audio complex
+
+maintainers:
+ - Balakrishnan Sambath <balakrishnan.s@microchip.com>
+
+description:
+ The audio complex configuration for Atmel at91sam9g20ek with WM8731 audio codec.
+
+properties:
+ compatible:
+ const: atmel,at91sam9g20ek-wm8731-audio
+
+ atmel,model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex.
+
+ atmel,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: A list of the connections between audio components.
+ minItems: 2
+ maxItems: 4
+ items:
+ enum:
+ # Board Connectors
+ - Ext Spk
+ - Int Mic
+
+ # CODEC Pins
+ - LOUT
+ - ROUT
+ - LHPOUT
+ - RHPOUT
+ - LLINEIN
+ - RLINEIN
+ - MICIN
+
+ atmel,ssc-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the SSC controller.
+
+ atmel,audio-codec:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of WM8731 audio codec.
+
+required:
+ - compatible
+ - atmel,model
+ - atmel,audio-routing
+ - atmel,ssc-controller
+ - atmel,audio-codec
+
+additionalProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "atmel,at91sam9g20ek-wm8731-audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_mck>;
+ atmel,model = "wm8731 @ AT91SAMG20EK";
+ atmel,audio-routing =
+ "Ext Spk", "LHPOUT",
+ "Int Mic", "MICIN";
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8731>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt b/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt
deleted file mode 100644
index 9c5a9947b64d..000000000000
--- a/Documentation/devicetree/bindings/sound/atmel-at91sam9g20ek-wm8731-audio.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-* Atmel at91sam9g20ek wm8731 audio complex
-
-Required properties:
- - compatible: "atmel,at91sam9g20ek-wm8731-audio"
- - atmel,model: The user-visible name of this sound complex.
- - atmel,audio-routing: A list of the connections between audio components.
- - atmel,ssc-controller: The phandle of the SSC controller
- - atmel,audio-codec: The phandle of the WM8731 audio codec
-Optional properties:
- - pinctrl-names, pinctrl-0: Please refer to pinctrl-bindings.txt
-
-Example:
-sound {
- compatible = "atmel,at91sam9g20ek-wm8731-audio";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_pck0_as_mck>;
-
- atmel,model = "wm8731 @ AT91SAMG20EK";
-
- atmel,audio-routing =
- "Ext Spk", "LHPOUT",
- "Int MIC", "MICIN";
-
- atmel,ssc-controller = <&ssc0>;
- atmel,audio-codec = <&wm8731>;
-};
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml b/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
index f943f90d8b15..94588353f852 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card2.yaml
@@ -37,6 +37,10 @@ properties:
codec2codec:
type: object
description: Codec to Codec node
+ hp-det-gpios:
+ $ref: audio-graph.yaml#/properties/hp-det-gpios
+ widgets:
+ $ref: audio-graph.yaml#/properties/widgets
required:
- compatible
diff --git a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
index 6676406bf2de..bb92d6ca3144 100644
--- a/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
+++ b/Documentation/devicetree/bindings/sound/awinic,aw88395.yaml
@@ -19,6 +19,7 @@ properties:
enum:
- awinic,aw88081
- awinic,aw88083
+ - awinic,aw88166
- awinic,aw88261
- awinic,aw88395
- awinic,aw88399
diff --git a/Documentation/devicetree/bindings/sound/dmic-codec.yaml b/Documentation/devicetree/bindings/sound/dmic-codec.yaml
index 59ef0cf6b6e5..cc3c84dd4c26 100644
--- a/Documentation/devicetree/bindings/sound/dmic-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/dmic-codec.yaml
@@ -19,6 +19,9 @@ properties:
'#sound-dai-cells':
const: 0
+ vref-supply:
+ description: Phandle to the digital microphone reference supply
+
dmicen-gpios:
description: GPIO specifier for DMIC to control start and stop
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/everest,es8328.yaml b/Documentation/devicetree/bindings/sound/everest,es8328.yaml
index ed18e40dcaac..ddddd7b143ab 100644
--- a/Documentation/devicetree/bindings/sound/everest,es8328.yaml
+++ b/Documentation/devicetree/bindings/sound/everest,es8328.yaml
@@ -24,9 +24,13 @@ maintainers:
properties:
compatible:
- enum:
- - everest,es8328
- - everest,es8388
+ oneOf:
+ - enum:
+ - everest,es8328
+ - items:
+ - enum:
+ - everest,es8388
+ - const: everest,es8328
reg:
maxItems: 1
@@ -56,6 +60,7 @@ properties:
required:
- compatible
+ - reg
- clocks
- DVDD-supply
- AVDD-supply
diff --git a/Documentation/devicetree/bindings/sound/fsl,audmix.yaml b/Documentation/devicetree/bindings/sound/fsl,audmix.yaml
index 9413b901cf77..3ad197b3c82c 100644
--- a/Documentation/devicetree/bindings/sound/fsl,audmix.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,audmix.yaml
@@ -61,13 +61,26 @@ properties:
- description: serial audio input 2
maxItems: 1
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ patternProperties:
+ '^port@[0-1]':
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description: Input port from SAI TX
+
+ properties:
+ port@2:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description: Output port to SAI RX
+
required:
- compatible
- reg
- clocks
- clock-names
- power-domains
- - dais
unevaluatedProperties: false
@@ -80,4 +93,50 @@ examples:
clock-names = "ipg";
power-domains = <&pd_audmix>;
dais = <&sai4>, <&sai5>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ playback-only;
+
+ amix_endpoint0: endpoint {
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ dai-tdm-slot-width-map = <32 8 32>;
+ dai-format = "dsp_a";
+ remote-endpoint = <&be00_ep>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ playback-only;
+
+ amix_endpoint1: endpoint {
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ dai-tdm-slot-width-map = <32 8 32>;
+ dai-format = "dsp_a";
+ remote-endpoint = <&be01_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ capture-only;
+
+ amix_endpoint2: endpoint {
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ dai-tdm-slot-width-map = <32 8 32>;
+ dai-format = "dsp_a";
+ bitclock-master;
+ frame-master;
+ remote-endpoint = <&be02_ep>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
index c454110f4281..8f1108e7e14e 100644
--- a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -80,7 +80,10 @@ required:
- fsl,asrc-rate
- fsl,asrc-format
-additionalProperties: false
+allOf:
+ - $ref: dai-common.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
index 76aa1f248488..85799f83e65f 100644
--- a/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,imx-asrc.yaml
@@ -77,6 +77,10 @@ properties:
power-domains:
maxItems: 1
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
fsl,asrc-rate:
$ref: /schemas/types.yaml#/definitions/uint32
description: The mutual sample rate used by DPCM Back Ends
@@ -120,6 +124,7 @@ required:
- fsl,asrc-width
allOf:
+ - $ref: dai-common.yaml#
- if:
properties:
compatible:
@@ -145,7 +150,7 @@ allOf:
required:
- power-domains
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
@@ -173,4 +178,12 @@ examples:
"txa", "txb", "txc";
fsl,asrc-rate = <48000>;
fsl,asrc-width = <16>;
+
+ port {
+ playback-only;
+
+ asrc_endpoint: endpoint {
+ remote-endpoint = <&fe00_ep>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/fsl,imx95-cm7-sof.yaml b/Documentation/devicetree/bindings/sound/fsl,imx95-cm7-sof.yaml
new file mode 100644
index 000000000000..f00ae3219e15
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,imx95-cm7-sof.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,imx95-cm7-sof.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP imx95 CM7 core
+
+maintainers:
+ - Daniel Baluta <daniel.baluta@nxp.com>
+
+description: NXP imx95 CM7 core used for audio processing
+
+properties:
+ compatible:
+ const: fsl,imx95-cm7-sof
+
+ reg:
+ maxItems: 1
+
+ reg-names:
+ const: sram
+
+ memory-region:
+ maxItems: 1
+
+ memory-region-names:
+ const: dma
+
+ port:
+ description: SAI3 port
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - memory-region
+ - memory-region-names
+ - port
+
+allOf:
+ - $ref: fsl,sof-cpu.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ cm7-cpu@80000000 {
+ compatible = "fsl,imx95-cm7-sof";
+ reg = <0x80000000 0x6100000>;
+ reg-names = "sram";
+ mboxes = <&mu7 2 0>, <&mu7 2 1>, <&mu7 3 0>, <&mu7 3 1>;
+ mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
+ memory-region = <&adma_res>;
+ memory-region-names = "dma";
+ port {
+ /* SAI3-WM8962 link */
+ endpoint {
+ remote-endpoint = <&wm8962_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/fsl,sai.yaml b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
index a5d9c246cc47..0d733e5b08a4 100644
--- a/Documentation/devicetree/bindings/sound/fsl,sai.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,sai.yaml
@@ -41,6 +41,10 @@ properties:
- fsl,imx93-sai
- fsl,imx95-sai
- fsl,vf610-sai
+ - items:
+ - enum:
+ - fsl,imx94-sai
+ - const: fsl,imx95-sai
reg:
maxItems: 1
@@ -93,6 +97,24 @@ properties:
items:
- description: receive and transmit interrupt
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+ properties:
+ port@0:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description: port for TX and RX
+
+ port@1:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description: port for TX only
+
+ port@2:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+ description: port for RX only
+
big-endian:
description: |
required if all the SAI registers are big-endian rather than little-endian.
@@ -204,4 +226,37 @@ examples:
dma-names = "rx", "tx";
fsl,dataline = <1 0xff 0xff 2 0xff 0x11>;
#sound-dai-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+ playback-only;
+
+ sai1_endpoint0: endpoint {
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ dai-tdm-slot-width-map = <32 8 32>;
+ dai-format = "dsp_a";
+ bitclock-master;
+ frame-master;
+ remote-endpoint = <&mcodec01_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ capture-only;
+
+ sai1_endpoint1: endpoint {
+ dai-tdm-slot-num = <8>;
+ dai-tdm-slot-width = <32>;
+ dai-tdm-slot-width-map = <32 8 32>;
+ dai-format = "dsp_a";
+ remote-endpoint = <&fe02_ep>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/fsl,sof-cpu.yaml b/Documentation/devicetree/bindings/sound/fsl,sof-cpu.yaml
new file mode 100644
index 000000000000..31863932dbc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,sof-cpu.yaml
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/fsl,sof-cpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP audio processor common properties
+
+maintainers:
+ - Daniel Baluta <daniel.baluta@nxp.com>
+
+properties:
+ mboxes:
+ maxItems: 4
+
+ mbox-names:
+ items:
+ - const: txdb0
+ - const: txdb1
+ - const: rxdb0
+ - const: rxdb1
+
+required:
+ - mboxes
+ - mbox-names
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/sound/ics43432.txt b/Documentation/devicetree/bindings/sound/ics43432.txt
deleted file mode 100644
index e6f05f2f6c4e..000000000000
--- a/Documentation/devicetree/bindings/sound/ics43432.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Invensense ICS-43432-compatible MEMS microphone with I2S output.
-
-There are no software configuration options for this device, indeed, the only
-host connection is the I2S interface. Apart from requirements on clock
-frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
-64 clock cycles in each stereo output frame; 24 of the 32 available bits
-contain audio data. A hardware pin determines if the device outputs data
-on the left or right channel of the I2S frame.
-
-Required properties:
- - compatible: should be one of the following.
- "invensense,ics43432": For the Invensense ICS43432
- "cui,cmm-4030d-261": For the CUI CMM-4030D-261-I2S-TR
-
-Example:
-
- ics43432: ics43432 {
- compatible = "invensense,ics43432";
- };
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-card.yaml b/Documentation/devicetree/bindings/sound/imx-audio-card.yaml
index f7ad5ea2491e..3c75c8c78987 100644
--- a/Documentation/devicetree/bindings/sound/imx-audio-card.yaml
+++ b/Documentation/devicetree/bindings/sound/imx-audio-card.yaml
@@ -46,6 +46,14 @@ patternProperties:
description: see tdm-slot.txt.
$ref: /schemas/types.yaml#/definitions/uint32
+ playback-only:
+ description: link is used only for playback
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ capture-only:
+ description: link is used only for capture
+ $ref: /schemas/types.yaml#/definitions/flag
+
cpu:
description: Holds subnode which indicates cpu dai.
type: object
@@ -71,6 +79,12 @@ patternProperties:
- link-name
- cpu
+ allOf:
+ - not:
+ required:
+ - playback-only
+ - capture-only
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/sound/invensense,ics43432.yaml b/Documentation/devicetree/bindings/sound/invensense,ics43432.yaml
new file mode 100644
index 000000000000..7bd984817aa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/invensense,ics43432.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/invensense,ics43432.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Invensense ICS-43432-compatible MEMS Microphone with I2S Output
+
+maintainers:
+ - Oleksij Rempel <o.rempel@pengutronix.de>
+
+description:
+ The ICS-43432 and compatible MEMS microphones output audio over an I2S
+ interface and require no software configuration. The only host connection
+ is the I2S bus. The microphone requires an I2S clock frequency between
+ 460 kHz and 3.379 MHz and 64 clock cycles per stereo frame. Each frame
+ contains 32-bit slots per channel, with 24 bits carrying audio data.
+ A hardware pin determines whether the microphone outputs audio on the
+ left or right channel of the I2S frame.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - invensense,ics43432
+ - cui,cmm-4030d-261
+
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ics43432: ics43432 {
+ compatible = "invensense,ics43432";
+
+ port {
+ endpoint {
+ remote-endpoint = <&i2s1_endpoint>;
+ dai-format = "i2s";
+ };
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
index 362e729b51b4..76d5a437dc8f 100644
--- a/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
+++ b/Documentation/devicetree/bindings/sound/mediatek,mt8188-mt6359.yaml
@@ -40,6 +40,14 @@ properties:
hardware that provides additional audio functionalities if present.
The AFE will link to ADSP when the phandle is provided.
+ mediatek,accdet:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The phandle to the MT6359 accessory detection block, which detects audio
+ jack insertion and removal. This property should only be present if the
+ accdet block is actually wired to the audio jack pins and to be used for
+ jack detection.
+
patternProperties:
"^dai-link-[0-9]+$":
type: object
@@ -62,6 +70,7 @@ patternProperties:
- PCM1_BE
- DL_SRC_BE
- UL_SRC_BE
+ - DMIC_BE
codec:
description: Holds subnode which indicates codec dai.
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
index 12c31b4b99e1..3ca9affb79a2 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
@@ -28,6 +28,7 @@ properties:
- nvidia,tegra186-hda
- nvidia,tegra210-hda
- nvidia,tegra124-hda
+ - nvidia,tegra114-hda
- const: nvidia,tegra30-hda
- items:
- const: nvidia,tegra132-hda
diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd937x-sdw.yaml b/Documentation/devicetree/bindings/sound/qcom,wcd937x-sdw.yaml
index d3cf8f59cb23..c8543f969ebb 100644
--- a/Documentation/devicetree/bindings/sound/qcom,wcd937x-sdw.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,wcd937x-sdw.yaml
@@ -58,6 +58,40 @@ properties:
items:
enum: [1, 2, 3, 4, 5]
+ qcom,tx-channel-mapping:
+ description: |
+ Specifies static channel mapping between slave and master tx port
+ channels.
+ In the order of slave port channels which is adc1, adc2, adc3,
+ dmic0, dmic1, mbhc, dmic2, dmic3, dmci4, dmic5, dmic6, dmic7.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 12
+ maxItems: 12
+ additionalItems: false
+ items:
+ enum:
+ - 1 # WCD9370_SWRM_CH1
+ - 2 # WCD9370_SWRM_CH2
+ - 3 # WCD9370_SWRM_CH3
+ - 4 # WCD9370_SWRM_CH4
+
+ qcom,rx-channel-mapping:
+ description: |
+ Specifies static channels mapping between slave and master rx port
+ channels.
+ In the order of slave port channels, which is
+ hph_l, hph_r, clsh, comp_l, comp_r, lo, dsd_r, dsd_l.
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 8
+ maxItems: 8
+ additionalItems: false
+ items:
+ enum:
+ - 1 # WCD9370_SWRM_CH1
+ - 2 # WCD9370_SWRM_CH2
+ - 3 # WCD9370_SWRM_CH3
+ - 4 # WCD9370_SWRM_CH4
+
required:
- compatible
- reg
@@ -74,6 +108,7 @@ examples:
compatible = "sdw20217010a00";
reg = <0 4>;
qcom,rx-port-mapping = <1 2 3 4 5>;
+ qcom,rx-channel-mapping = /bits/ 8 <1 2 1 1 2 1 1 2>;
};
};
@@ -85,6 +120,7 @@ examples:
compatible = "sdw20217010a00";
reg = <0 3>;
qcom,tx-port-mapping = <2 2 3 4>;
+ qcom,tx-channel-mapping = /bits/ 8 <1 2 1 1 2 3 3 4 1 2 3 4>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
index c3c989ef2a2c..32dea7392e8d 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
+++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
@@ -31,6 +31,10 @@ properties:
- rockchip,rk3288-spdif
- rockchip,rk3308-spdif
- const: rockchip,rk3066-spdif
+ - items:
+ - enum:
+ - rockchip,rk3588-spdif
+ - const: rockchip,rk3568-spdif
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
index 5e7aea43aced..8eab98a0f7a2 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas2770.yaml
@@ -23,6 +23,7 @@ properties:
compatible:
enum:
- ti,tas2770
+ - ti,tas5770l # Apple variant
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
index 5447482179c1..fcaae848e78a 100644
--- a/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
+++ b/Documentation/devicetree/bindings/sound/ti,tas27xx.yaml
@@ -24,6 +24,7 @@ properties:
enum:
- ti,tas2764
- ti,tas2780
+ - ti,sn012776 # Apple variant of TAS2764
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8904.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8904.yaml
index 329260cf0fa0..3029a868e5e1 100644
--- a/Documentation/devicetree/bindings/sound/wlf,wm8904.yaml
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8904.yaml
@@ -38,6 +38,82 @@ properties:
DCVDD-supply: true
MICVDD-supply: true
+ wlf,in1l-as-dmicdat1:
+ type: boolean
+ description:
+ Use IN1L/DMICDAT1 as DMICDAT1, enabling the DMIC input path.
+ Can be used separately or together with wlf,in1r-as-dmicdat2.
+
+ wlf,in1r-as-dmicdat2:
+ type: boolean
+ description:
+ Use IN1R/DMICDAT2 as DMICDAT2, enabling the DMIC input path.
+ Can be used separately or together with wlf,in1l-as-dmicdat1.
+
+ wlf,gpio-cfg:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 4
+ maxItems: 4
+ description:
+ Default register values for R121/122/123/124 (GPIO Control).
+ If any entry has the value 0xFFFF, the related register won't be set.
+ default: [0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF]
+
+ wlf,micbias-cfg:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 2
+ maxItems: 2
+ description:
+ Default register values for R6/R7 (Mic Bias Control).
+ default: [0, 0]
+
+ wlf,drc-cfg-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ List of strings for the available DRC modes.
+ If absent, DRC is disabled.
+
+ wlf,drc-cfg-regs:
+ $ref: /schemas/types.yaml#/definitions/uint16-matrix
+ description:
+ Sets of default register values for R40/41/42/43 (DRC).
+ Each set corresponds to a DRC mode, so the number of sets should equal
+ the length of wlf,drc-cfg-names.
+ If absent, DRC is disabled.
+ items:
+ minItems: 4
+ maxItems: 4
+
+ wlf,retune-mobile-cfg-names:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description:
+ List of strings for the available retune modes.
+ If absent, retune is disabled.
+
+ wlf,retune-mobile-cfg-hz:
+ description:
+ The list must be the same length as wlf,retune-mobile-cfg-names.
+ If absent, retune is disabled.
+
+ wlf,retune-mobile-cfg-regs:
+ $ref: /schemas/types.yaml#/definitions/uint16-matrix
+ description:
+ Sets of default register values for R134/.../157 (EQ).
+ Each set corresponds to a retune mode, so the number of sets should equal
+ the length of wlf,retune-mobile-cfg-names.
+ If absent, retune is disabled.
+ items:
+ minItems: 24
+ maxItems: 24
+
+dependencies:
+ wlf,drc-cfg-names: [ 'wlf,drc-cfg-regs' ]
+ wlf,drc-cfg-regs: [ 'wlf,drc-cfg-names' ]
+
+ wlf,retune-mobile-cfg-names: [ 'wlf,retune-mobile-cfg-hz', 'wlf,retune-mobile-cfg-regs' ]
+ wlf,retune-mobile-cfg-regs: [ 'wlf,retune-mobile-cfg-names', 'wlf,retune-mobile-cfg-hz' ]
+ wlf,retune-mobile-cfg-hz: [ 'wlf,retune-mobile-cfg-names', 'wlf,retune-mobile-cfg-regs' ]
+
required:
- compatible
- reg
@@ -70,5 +146,58 @@ examples:
DBVDD-supply = <&reg_1p8v>;
DCVDD-supply = <&reg_1p8v>;
MICVDD-supply = <&reg_1p8v>;
+
+ wlf,drc-cfg-names = "default", "peaklimiter", "tradition", "soft",
+ "music";
+ /*
+ * Config registers per name, respectively:
+ * KNEE_IP = 0, KNEE_OP = 0, HI_COMP = 1, LO_COMP = 1
+ * KNEE_IP = -24, KNEE_OP = -6, HI_COMP = 1/4, LO_COMP = 1
+ * KNEE_IP = -42, KNEE_OP = -3, HI_COMP = 0, LO_COMP = 1
+ * KNEE_IP = -45, KNEE_OP = -9, HI_COMP = 1/8, LO_COMP = 1
+ * KNEE_IP = -30, KNEE_OP = -10.5, HI_COMP = 1/4, LO_COMP = 1
+ */
+ wlf,drc-cfg-regs = /bits/ 16 <0x01af 0x3248 0x0000 0x0000>,
+ /bits/ 16 <0x04af 0x324b 0x0010 0x0408>,
+ /bits/ 16 <0x04af 0x324b 0x0028 0x0704>,
+ /bits/ 16 <0x04af 0x324b 0x0018 0x078c>,
+ /bits/ 16 <0x04af 0x324b 0x0010 0x050e>;
+
+ /* GPIO1 = DMIC_CLK, don't touch others */
+ wlf,gpio-cfg = <0x0018>, <0xffff>, <0xffff>, <0xffff>;
+
+ /* Use IN1R as DMICDAT2, leave IN1L as an analog input path */
+ wlf,in1r-as-dmicdat2;
+
+ wlf,retune-mobile-cfg-names = "bassboost", "bassboost", "treble";
+ wlf,retune-mobile-cfg-hz = <48000>, <44100>, <48000>;
+ /*
+ * Config registers per name, respectively:
+ * EQ_ENA, 100 Hz, 300 Hz, 875 Hz, 2400 Hz, 6900 Hz
+ * 1, +6 dB, +3 dB, 0 dB, 0 dB, 0 dB
+ * 1, +6 dB, +3 dB, 0 dB, 0 dB, 0 dB
+ * 1, -2 dB, -2 dB, 0 dB, 0 dB, +3 dB
+ * Each one uses the defaults for ReTune Mobile registers 140-157
+ */
+ wlf,retune-mobile-cfg-regs = /bits/ 16 <0x1 0x12 0xf 0xc 0xc 0xc
+ 0x0fca 0x0400 0x00d8 0x1eb5
+ 0xf145 0x0bd5 0x0075 0x1c58
+ 0xf3d3 0x0a54 0x0568 0x168e
+ 0xf829 0x07ad 0x1103 0x0564
+ 0x0559 0x4000>,
+
+ /bits/ 16 <0x1 0x12 0xf 0xc 0xc 0xc
+ 0x0fca 0x0400 0x00d8 0x1eb5
+ 0xf145 0x0bd5 0x0075 0x1c58
+ 0xf3d3 0x0a54 0x0568 0x168e
+ 0xf829 0x07ad 0x1103 0x0564
+ 0x0559 0x4000>,
+
+ /bits/ 16 <0x1 0xa 0xa 0xc 0xc 0xf
+ 0x0fca 0x0400 0x00d8 0x1eb5
+ 0xf145 0x0bd5 0x0075 0x1c58
+ 0xf3d3 0x0a54 0x0568 0x168e
+ 0xf829 0x07ad 0x1103 0x0564
+ 0x0559 0x4000>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8960.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8960.yaml
index 62e62c335d07..3c2b9790ffcf 100644
--- a/Documentation/devicetree/bindings/sound/wlf,wm8960.yaml
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8960.yaml
@@ -75,6 +75,10 @@ properties:
enable DACLRC pin. If shared-lrclk is present, no need to enable DAC for
captrue.
+ port:
+ $ref: audio-graph-port.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
deleted file mode 100644
index cbc93c8f4963..000000000000
--- a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Device-Tree bindings for Xilinx PL audio formatter
-
-The IP core supports DMA, data formatting(AES<->PCM conversion)
-of audio samples.
-
-Required properties:
- - compatible: "xlnx,audio-formatter-1.0"
- - interrupt-names: Names specified to list of interrupts in same
- order mentioned under "interrupts".
- List of supported interrupt names are:
- "irq_mm2s" : interrupt from MM2S block
- "irq_s2mm" : interrupt from S2MM block
- - interrupts-parent: Phandle for interrupt controller.
- - interrupts: List of Interrupt numbers.
- - reg: Base address and size of the IP core instance.
- - clock-names: List of input clocks.
- Required elements: "s_axi_lite_aclk", "aud_mclk"
- - clocks: Input clock specifier. Refer to common clock bindings.
-
-Example:
- audio_ss_0_audio_formatter_0: audio_formatter@80010000 {
- compatible = "xlnx,audio-formatter-1.0";
- interrupt-names = "irq_mm2s", "irq_s2mm";
- interrupt-parent = <&gic>;
- interrupts = <0 104 4>, <0 105 4>;
- reg = <0x0 0x80010000 0x0 0x1000>;
- clock-names = "s_axi_lite_aclk", "aud_mclk";
- clocks = <&clk 71>, <&clk_wiz_1 0>;
- };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.yaml b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.yaml
new file mode 100644
index 000000000000..82fa448bd2e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/xlnx,audio-formatter.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx PL audio formatter
+
+description:
+ The IP core supports DMA, data formatting(AES<->PCM conversion)
+ of audio samples.
+
+maintainers:
+ - Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - xlnx,audio-formatter-1.0
+
+ reg:
+ maxItems: 1
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: irq_mm2s
+ - const: irq_s2mm
+
+ interrupts:
+ minItems: 1
+ items:
+ - description: interrupt from MM2S block
+ - description: interrupt from S2MM block
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: s_axi_lite_aclk
+ - const: aud_mclk
+
+ clocks:
+ minItems: 1
+ items:
+ - description: clock for the axi data stream
+ - description: clock for the MEMS microphone data stream
+
+required:
+ - compatible
+ - reg
+ - interrupt-names
+ - interrupts
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ audio_formatter@80010000 {
+ compatible = "xlnx,audio-formatter-1.0";
+ reg = <0x80010000 0x1000>;
+ interrupt-names = "irq_mm2s", "irq_s2mm";
+ interrupt-parent = <&gic>;
+ interrupts = <0 104 4>, <0 105 4>;
+ clock-names = "s_axi_lite_aclk", "aud_mclk";
+ clocks = <&clk 71>, <&clk_wiz_1 0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
deleted file mode 100644
index 5e7c7d5bb60a..000000000000
--- a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Device-Tree bindings for Xilinx I2S PL block
-
-The IP supports I2S based playback/capture audio
-
-Required property:
- - compatible: "xlnx,i2s-transmitter-1.0" for playback and
- "xlnx,i2s-receiver-1.0" for capture
-
-Required property common to both I2S playback and capture:
- - reg: Base address and size of the IP core instance.
- - xlnx,dwidth: sample data width. Can be any of 16, 24.
- - xlnx,num-channels: Number of I2S streams. Can be any of 1, 2, 3, 4.
- supported channels = 2 * xlnx,num-channels
-
-Example:
-
- i2s_receiver@a0080000 {
- compatible = "xlnx,i2s-receiver-1.0";
- reg = <0x0 0xa0080000 0x0 0x10000>;
- xlnx,dwidth = <0x18>;
- xlnx,num-channels = <1>;
- };
- i2s_transmitter@a0090000 {
- compatible = "xlnx,i2s-transmitter-1.0";
- reg = <0x0 0xa0090000 0x0 0x10000>;
- xlnx,dwidth = <0x18>;
- xlnx,num-channels = <1>;
- };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.yaml b/Documentation/devicetree/bindings/sound/xlnx,i2s.yaml
new file mode 100644
index 000000000000..3c2b0be07c53
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,i2s.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/xlnx,i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx I2S PL block
+
+description:
+ The IP supports I2S based playback/capture audio.
+
+maintainers:
+ - Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - xlnx,i2s-receiver-1.0
+ - xlnx,i2s-transmitter-1.0
+
+ reg:
+ maxItems: 1
+
+ xlnx,dwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 16
+ - 24
+ description: |
+ Sample data width.
+
+ xlnx,num-channels:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 4
+ description: |
+ Number of I2S streams.
+
+required:
+ - compatible
+ - reg
+ - xlnx,dwidth
+ - xlnx,num-channels
+
+additionalProperties: false
+
+examples:
+ - |
+ i2s@a0080000 {
+ compatible = "xlnx,i2s-receiver-1.0";
+ reg = <0xa0080000 0x10000>;
+ xlnx,dwidth = <0x18>;
+ xlnx,num-channels = <1>;
+ };
+ i2s@a0090000 {
+ compatible = "xlnx,i2s-transmitter-1.0";
+ reg = <0xa0090000 0x10000>;
+ xlnx,dwidth = <0x18>;
+ xlnx,num-channels = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
deleted file mode 100644
index 15c2d64d247c..000000000000
--- a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Device-Tree bindings for Xilinx SPDIF IP
-
-The IP supports playback and capture of SPDIF audio
-
-Required properties:
- - compatible: "xlnx,spdif-2.0"
- - clock-names: List of input clocks.
- Required elements: "s_axi_aclk", "aud_clk_i"
- - clocks: Input clock specifier. Refer to common clock bindings.
- - reg: Base address and address length of the IP core instance.
- - interrupts-parent: Phandle for interrupt controller.
- - interrupts: List of Interrupt numbers.
- - xlnx,spdif-mode: 0 :- receiver mode
- 1 :- transmitter mode
- - xlnx,aud_clk_i: input audio clock value.
-
-Example:
- spdif_0: spdif@80010000 {
- clock-names = "aud_clk_i", "s_axi_aclk";
- clocks = <&misc_clk_0>, <&clk 71>;
- compatible = "xlnx,spdif-2.0";
- interrupt-names = "spdif_interrupt";
- interrupt-parent = <&gic>;
- interrupts = <0 91 4>;
- reg = <0x0 0x80010000 0x0 0x10000>;
- xlnx,spdif-mode = <1>;
- xlnx,aud_clk_i = <49152913>;
- };
diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.yaml b/Documentation/devicetree/bindings/sound/xlnx,spdif.yaml
new file mode 100644
index 000000000000..a45d8a0755fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/xlnx,spdif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx SPDIF IP
+
+description:
+ The IP supports playback and capture of SPDIF audio.
+
+maintainers:
+ - Vincenzo Frascino <vincenzo.frascino@arm.com>
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - xlnx,spdif-2.0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: SPDIF audio interrupt
+
+ clock-names:
+ items:
+ - const: aud_clk_i
+ - const: s_axi_aclk
+
+ clocks:
+ minItems: 1
+ items:
+ - description: input audio clock
+ - description: clock for the AXI data stream
+
+ xlnx,spdif-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0
+ - 1
+ description: |
+ 0 - receiver
+ 1 - transmitter
+
+ xlnx,aud_clk_i:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input audio clock frequency. It affects the sampling rate.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ spdif@80010000 {
+ compatible = "xlnx,spdif-2.0";
+ reg = <0x80010000 0x10000>;
+ clock-names = "aud_clk_i", "s_axi_aclk";
+ clocks = <&misc_clk_0>, <&clk 71>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 91 4>;
+ xlnx,spdif-mode = <1>;
+ xlnx,aud_clk_i = <49152913>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
index d48faa42d025..4b3828eda6cb 100644
--- a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
+++ b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml
@@ -41,6 +41,26 @@ properties:
- const: s_axi_aclk
- const: spi_clk
+ trigger-sources:
+ description:
+ An array of trigger source phandles for offload instances. The index in
+ the array corresponds to the offload instance number.
+ minItems: 1
+ maxItems: 32
+
+ dmas:
+ description:
+ DMA channels connected to the input or output stream interface of an
+ offload instance.
+ minItems: 1
+ maxItems: 32
+
+ dma-names:
+ items:
+ pattern: "^offload(?:[12]?[0-9]|3[01])-[tr]x$"
+ minItems: 1
+ maxItems: 32
+
required:
- compatible
- reg
@@ -59,6 +79,10 @@ examples:
clocks = <&clkc 15>, <&clkc 15>;
clock-names = "s_axi_aclk", "spi_clk";
+ trigger-sources = <&trigger_clock>;
+ dmas = <&dma 0>;
+ dma-names = "offload0-rx";
+
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
index b6bc71d19286..53a52fb8b819 100644
--- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/spi/cdns,qspi-nor.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Cadence Quad SPI controller
+title: Cadence Quad/Octal SPI controller
maintainers:
- Vaishnav Achath <vaishnav.a@ti.com>
@@ -76,8 +76,12 @@ properties:
- ti,am654-ospi
- ti,k2g-qspi
- xlnx,versal-ospi-1.0
+ # The compatible is qspi-nor for historical reasons but such
+ # controllers are meant to be used with flashes of all kinds,
+ # ie. also NAND flashes, not only NOR flashes.
- const: cdns,qspi-nor
- const: cdns,qspi-nor
+ deprecated: true
reg:
items:
@@ -142,6 +146,18 @@ properties:
items:
enum: [ qspi, qspi-ocp, rstc_ref ]
+patternProperties:
+ "^flash@[0-9a-f]+$":
+ type: object
+ $ref: cdns,qspi-nor-peripheral-props.yaml
+ additionalProperties: true
+ required:
+ - cdns,read-delay
+ - cdns,tshsl-ns
+ - cdns,tsd2d-ns
+ - cdns,tchsh-ns
+ - cdns,tslch-ns
+
required:
- compatible
- reg
@@ -157,7 +173,7 @@ unevaluatedProperties: false
examples:
- |
qspi: spi@ff705000 {
- compatible = "cdns,qspi-nor";
+ compatible = "intel,socfpga-qspi", "cdns,qspi-nor";
#address-cells = <1>;
#size-cells = <0>;
reg = <0xff705000 0x1000>,
@@ -173,5 +189,10 @@ examples:
flash@0 {
compatible = "jedec,spi-nor";
reg = <0x0>;
+ cdns,read-delay = <4>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
};
};
diff --git a/Documentation/devicetree/bindings/spi/fsl,espi.yaml b/Documentation/devicetree/bindings/spi/fsl,espi.yaml
new file mode 100644
index 000000000000..d267bbfaf02f
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl,espi.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/fsl,espi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale eSPI (Enhanced Serial Peripheral Interface) controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+properties:
+ compatible:
+ const: fsl,mpc8536-espi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ fsl,espi-num-chipselects:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 4 ]
+ description: The number of the chipselect signals.
+
+ fsl,csbef:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Chip select assertion time in bits before frame starts
+
+ fsl,csaft:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Chip select negation time in bits after frame ends
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - fsl,espi-num-chipselects
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi@110000 {
+ compatible = "fsl,mpc8536-espi";
+ reg = <0x110000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <53 IRQ_TYPE_EDGE_FALLING>;
+ fsl,espi-num-chipselects = <4>;
+ fsl,csbef = <1>;
+ fsl,csaft = <1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/spi/fsl,spi.yaml b/Documentation/devicetree/bindings/spi/fsl,spi.yaml
new file mode 100644
index 000000000000..d74792fc9bf2
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl,spi.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/fsl,spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale SPI (Serial Peripheral Interface) controller
+
+maintainers:
+ - J. Neuschäfer <j.ne@posteo.net>
+
+properties:
+ compatible:
+ enum:
+ - fsl,spi
+ - aeroflexgaisler,spictrl
+
+ reg:
+ maxItems: 1
+
+ cell-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ QE SPI subblock index.
+ 0: QE subblock SPI1
+ 1: QE subblock SPI2
+
+ mode:
+ description: SPI operation mode
+ enum:
+ - cpu
+ - cpu-qe
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ description: input clock frequency to non FSL_SOC cores
+
+ cs-gpios: true
+
+ fsl,spisel_boot:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ For the MPC8306 and MPC8309, specifies that the SPISEL_BOOT signal is used
+ as chip select for a slave device. Use reg = <number of gpios> in the
+ corresponding child node, i.e. 0 if the cs-gpios property is not present.
+
+required:
+ - compatible
+ - reg
+ - mode
+ - interrupts
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi@4c0 {
+ compatible = "fsl,spi";
+ reg = <0x4c0 0x40>;
+ cell-index = <0>;
+ interrupts = <82 0>;
+ mode = "cpu";
+ cs-gpios = <&gpio 18 IRQ_TYPE_EDGE_RISING // device reg=<0>
+ &gpio 19 IRQ_TYPE_EDGE_RISING>; // device reg=<1>
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/spi/fsl-spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
deleted file mode 100644
index 0654380eb751..000000000000
--- a/Documentation/devicetree/bindings/spi/fsl-spi.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-* SPI (Serial Peripheral Interface)
-
-Required properties:
-- cell-index : QE SPI subblock index.
- 0: QE subblock SPI1
- 1: QE subblock SPI2
-- compatible : should be "fsl,spi" or "aeroflexgaisler,spictrl".
-- mode : the SPI operation mode, it can be "cpu" or "cpu-qe".
-- reg : Offset and length of the register set for the device
-- interrupts : <a b> where a is the interrupt number and b is a
- field that represents an encoding of the sense and level
- information for the interrupt. This should be encoded based on
- the information in section 2) depending on the type of interrupt
- controller you have.
-- clock-frequency : input clock frequency to non FSL_SOC cores
-
-Optional properties:
-- cs-gpios : specifies the gpio pins to be used for chipselects.
- The gpios will be referred to as reg = <index> in the SPI child nodes.
- If unspecified, a single SPI device without a chip select can be used.
-- fsl,spisel_boot : for the MPC8306 and MPC8309, specifies that the
- SPISEL_BOOT signal is used as chip select for a slave device. Use
- reg = <number of gpios> in the corresponding child node, i.e. 0 if
- the cs-gpios property is not present.
-
-Example:
- spi@4c0 {
- cell-index = <0>;
- compatible = "fsl,spi";
- reg = <4c0 40>;
- interrupts = <82 0>;
- interrupt-parent = <700>;
- mode = "cpu";
- cs-gpios = <&gpio 18 1 // device reg=<0>
- &gpio 19 1>; // device reg=<1>
- };
-
-
-* eSPI (Enhanced Serial Peripheral Interface)
-
-Required properties:
-- compatible : should be "fsl,mpc8536-espi".
-- reg : Offset and length of the register set for the device.
-- interrupts : should contain eSPI interrupt, the device has one interrupt.
-- fsl,espi-num-chipselects : the number of the chipselect signals.
-
-Optional properties:
-- fsl,csbef: chip select assertion time in bits before frame starts
-- fsl,csaft: chip select negation time in bits after frame ends
-
-Example:
- spi@110000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,mpc8536-espi";
- reg = <0x110000 0x1000>;
- interrupts = <53 0x2>;
- interrupt-parent = <&mpic>;
- fsl,espi-num-chipselects = <4>;
- fsl,csbef = <1>;
- fsl,csaft = <1>;
- };
diff --git a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
index e1f5bfa4433c..ed17815263a8 100644
--- a/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
+++ b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
@@ -35,6 +35,8 @@ properties:
- enum:
- mediatek,mt7981-spi-ipm
- mediatek,mt7986-spi-ipm
+ - mediatek,mt7988-spi-quad
+ - mediatek,mt7988-spi-single
- mediatek,mt8188-spi-ipm
- const: mediatek,spi-ipm
- items:
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml
new file mode 100644
index 000000000000..aa3f93319203
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/qcom,spi-qpic-snand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QPIC NAND controller
+
+maintainers:
+ - Md sadre Alam <quic_mdalam@quicinc.com>
+
+description:
+ The QCOM QPIC-SPI-NAND flash controller is an extended version of
+ the QCOM QPIC NAND flash controller. It can work both in serial
+ and parallel mode. It supports typical SPI-NAND page cache
+ operations in single, dual or quad IO mode with pipelined ECC
+ encoding/decoding using the QPIC ECC HW engine.
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - qcom,ipq9574-snand
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: core
+ - const: aon
+ - const: iom
+
+ dmas:
+ items:
+ - description: tx DMA channel
+ - description: rx DMA channel
+ - description: cmd DMA channel
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+ - const: cmd
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,ipq9574-gcc.h>
+ spi@79b0000 {
+ compatible = "qcom,ipq9574-snand";
+ reg = <0x1ac00000 0x800>;
+
+ clocks = <&gcc GCC_QPIC_CLK>,
+ <&gcc GCC_QPIC_AHB_CLK>,
+ <&gcc GCC_QPIC_IO_MACRO_CLK>;
+ clock-names = "core", "aon", "iom";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ nand-ecc-engine = <&qpic_nand>;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
index ed1d4aa41b8c..a65a42ccaafe 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
@@ -24,6 +24,7 @@ properties:
- enum:
- fsl,imx8ulp-spi
- fsl,imx93-spi
+ - fsl,imx94-spi
- fsl,imx95-spi
- const: fsl,imx7ulp-spi
reg:
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 46d9d6ee0923..104f5ffdd04e 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -34,6 +34,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3562-spi
- rockchip,rk3568-spi
- rockchip,rk3576-spi
- rockchip,rk3588-spi
diff --git a/Documentation/devicetree/bindings/spi/spi-sg2044-nor.yaml b/Documentation/devicetree/bindings/spi/spi-sg2044-nor.yaml
new file mode 100644
index 000000000000..948ff7a09643
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-sg2044-nor.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/spi-sg2044-nor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SG2044 SPI NOR controller
+
+maintainers:
+ - Longbin Li <looong.bin@gmail.com>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ const: sophgo,sg2044-spifmc-nor
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - resets
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi@1000000 {
+ compatible = "sophgo,sg2044-spifmc-nor";
+ reg = <0x1000000 0x4000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk 0>;
+ interrupts = <37 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rst 0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
index 04d4d3b4916d..02cf1314367b 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
@@ -65,14 +65,13 @@ allOf:
examples:
- |
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
qspi: spi@ff0f0000 {
compatible = "xlnx,zynqmp-qspi-1.0";
- clocks = <&zynqmp_clk QSPI_REF>, <&zynqmp_clk LPD_LSBUS>;
+ clocks = <&zynqmp_clk 53>, <&zynqmp_clk 82>;
clock-names = "ref_clk", "pclk";
interrupts = <0 15 4>;
interrupt-parent = <&gic>;
diff --git a/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml
new file mode 100644
index 000000000000..5f276f27dc4c
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/st,stm32mp25-ospi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32 Octal Serial Peripheral Interface (OSPI)
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ const: st,stm32mp25-ospi
+
+ reg:
+ maxItems: 1
+
+ memory-region:
+ description:
+ Memory region to be used for memory-map read access.
+ In memory-mapped mode, read access are performed from the memory
+ device using the direct mapping.
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ items:
+ - description: phandle to OSPI block reset
+ - description: phandle to delay block reset
+
+ dmas:
+ maxItems: 2
+
+ dma-names:
+ items:
+ - const: tx
+ - const: rx
+
+ st,syscfg-dlyb:
+ description: configure OCTOSPI delay block.
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - description: phandle to syscfg
+ - description: register offset within syscfg
+
+ access-controllers:
+ description: phandle to the rifsc device to check access right
+ and in some cases, an additional phandle to the rcc device for
+ secure clock control.
+ items:
+ - description: phandle to bus controller
+ - description: phandle to clock controller
+ minItems: 1
+
+ power-domains:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - st,syscfg-dlyb
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/reset/st,stm32mp25-rcc.h>
+
+ spi@40430000 {
+ compatible = "st,stm32mp25-ospi";
+ reg = <0x40430000 0x400>;
+ memory-region = <&mm_ospi1>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 2 0x62 0x00003121 0x0>,
+ <&hpdma 2 0x42 0x00003112 0x0>;
+ dma-names = "tx", "rx";
+ clocks = <&scmi_clk CK_SCMI_OSPI1>;
+ resets = <&scmi_reset RST_SCMI_OSPI1>, <&scmi_reset RST_SCMI_OSPI1DLL>;
+ access-controllers = <&rifsc 74>;
+ power-domains = <&CLUSTER_PD>;
+ st,syscfg-dlyb = <&syscfg 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <108000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml b/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml
index 5684df6448ef..eb1127352c7b 100644
--- a/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,twd-timer.yaml
@@ -50,7 +50,7 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
timer@2c000600 {
- compatible = "arm,arm11mp-twd-timer";
- reg = <0x2c000600 0x20>;
- interrupts = <GIC_PPI 13 0xf01>;
+ compatible = "arm,arm11mp-twd-timer";
+ reg = <0x2c000600 0x20>;
+ interrupts = <GIC_PPI 13 0xf01>;
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.yaml b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
index 5e09c04da30e..260b05f213e6 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.yaml
@@ -178,29 +178,29 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/r8a7790-sysc.h>
cmt0: timer@ffca0000 {
- compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0";
- reg = <0xffca0000 0x1004>;
- interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 124>;
- clock-names = "fck";
- power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
- resets = <&cpg 124>;
+ compatible = "renesas,r8a7790-cmt0", "renesas,rcar-gen2-cmt0";
+ reg = <0xffca0000 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 124>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 124>;
};
cmt1: timer@e6130000 {
- compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1";
- reg = <0xe6130000 0x1004>;
- interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 329>;
- clock-names = "fck";
- power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
- resets = <&cpg 329>;
+ compatible = "renesas,r8a7790-cmt1", "renesas,rcar-gen2-cmt1";
+ reg = <0xe6130000 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 329>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 329>;
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,em-sti.yaml b/Documentation/devicetree/bindings/timer/renesas,em-sti.yaml
index 233d74d5402c..a7385d865bca 100644
--- a/Documentation/devicetree/bindings/timer/renesas,em-sti.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,em-sti.yaml
@@ -38,9 +38,9 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
timer@e0180000 {
- compatible = "renesas,em-sti";
- reg = <0xe0180000 0x54>;
- interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&sti_sclk>;
- clock-names = "sclk";
+ compatible = "renesas,em-sti";
+ reg = <0xe0180000 0x54>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&sti_sclk>;
+ clock-names = "sclk";
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml b/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml
index 15d8dddf4ae9..e56c12f03f72 100644
--- a/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,mtu2.yaml
@@ -66,11 +66,11 @@ examples:
#include <dt-bindings/clock/r7s72100-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
mtu2: timer@fcff0000 {
- compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
- reg = <0xfcff0000 0x400>;
- interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tgi0a";
- clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
- clock-names = "fck";
- power-domains = <&cpg_clocks>;
+ compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
+ reg = <0xfcff0000 0x400>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tgi0a";
+ clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
+ clock-names = "fck";
+ power-domains = <&cpg_clocks>;
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
index e8c642166462..9ba858f094ab 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -71,9 +71,9 @@ examples:
#include <dt-bindings/clock/r7s72100-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
ostm0: timer@fcfec000 {
- compatible = "renesas,r7s72100-ostm", "renesas,ostm";
- reg = <0xfcfec000 0x30>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>;
- clocks = <&mstp5_clks R7S72100_CLK_OSTM0>;
- power-domains = <&cpg_clocks>;
+ compatible = "renesas,r7s72100-ostm", "renesas,ostm";
+ reg = <0xfcfec000 0x30>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&mstp5_clks R7S72100_CLK_OSTM0>;
+ power-domains = <&cpg_clocks>;
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
index 75b0e7c70b62..b1229595acfb 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tmu.yaml
@@ -122,15 +122,15 @@ examples:
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/r8a7779-sysc.h>
tmu0: timer@ffd80000 {
- compatible = "renesas,tmu-r8a7779", "renesas,tmu";
- reg = <0xffd80000 0x30>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
- clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
- clock-names = "fck";
- power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
- #renesas,channels = <3>;
+ compatible = "renesas,tmu-r8a7779", "renesas,tmu";
+ reg = <0xffd80000 0x30>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tuni0", "tuni1", "tuni2", "ticpi2";
+ clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
+ #renesas,channels = <3>;
};
diff --git a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
index 01554dff23d8..7a473b302775 100644
--- a/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,tpu.yaml
@@ -49,8 +49,8 @@ additionalProperties: false
examples:
- |
tpu: tpu@ffffe0 {
- compatible = "renesas,tpu";
- reg = <0xffffe0 16>, <0xfffff0 12>;
- clocks = <&pclk>;
- clock-names = "fck";
+ compatible = "renesas,tpu";
+ reg = <0xffffe0 16>, <0xfffff0 12>;
+ clocks = <&pclk>;
+ clock-names = "fck";
};
diff --git a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
index 02d1c355808e..10578f544581 100644
--- a/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
+++ b/Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
@@ -27,6 +27,7 @@ properties:
- enum:
- axis,artpec8-mct
- google,gs101-mct
+ - samsung,exynos2200-mct-peris
- samsung,exynos3250-mct
- samsung,exynos5250-mct
- samsung,exynos5260-mct
@@ -34,6 +35,7 @@ properties:
- samsung,exynos5433-mct
- samsung,exynos850-mct
- samsung,exynos8895-mct
+ - samsung,exynos990-mct
- tesla,fsd-mct
- const: samsung,exynos4210-mct
@@ -130,11 +132,13 @@ allOf:
enum:
- axis,artpec8-mct
- google,gs101-mct
+ - samsung,exynos2200-mct-peris
- samsung,exynos5260-mct
- samsung,exynos5420-mct
- samsung,exynos5433-mct
- samsung,exynos850-mct
- samsung,exynos8895-mct
+ - samsung,exynos990-mct
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/timer/sifive,clint.yaml b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
index 76d83aea4e2b..653e2e0ca878 100644
--- a/Documentation/devicetree/bindings/timer/sifive,clint.yaml
+++ b/Documentation/devicetree/bindings/timer/sifive,clint.yaml
@@ -37,6 +37,12 @@ properties:
- starfive,jh8100-clint # StarFive JH8100
- const: sifive,clint0 # SiFive CLINT v0 IP block
- items:
+ - {}
+ - const: sifive,clint2 # SiFive CLINT v2 IP block
+ description:
+ SiFive CLINT v2 is the HRT that supports the Zicntr. The control of sifive,clint2
+ differs from that of sifive,clint0, making them incompatible.
+ - items:
- enum:
- allwinner,sun20i-d1-clint
- sophgo,cv1800b-clint
@@ -62,6 +68,22 @@ properties:
minItems: 1
maxItems: 4095
+ sifive,fine-ctr-bits:
+ maximum: 15
+ description: The width in bits of the fine counter.
+
+if:
+ properties:
+ compatible:
+ contains:
+ const: sifive,clint2
+then:
+ required:
+ - sifive,fine-ctr-bits
+else:
+ properties:
+ sifive,fine-ctr-bits: false
+
additionalProperties: false
required:
@@ -77,6 +99,6 @@ examples:
<&cpu2intc 3>, <&cpu2intc 7>,
<&cpu3intc 3>, <&cpu3intc 7>,
<&cpu4intc 3>, <&cpu4intc 7>;
- reg = <0x2000000 0x10000>;
+ reg = <0x2000000 0x10000>;
};
...
diff --git a/Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml b/Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml
new file mode 100644
index 000000000000..1eac20031dc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/trigger-source/pwm-trigger.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic trigger source using PWM
+
+description: Remaps a PWM channel as a trigger source.
+
+maintainers:
+ - David Lechner <dlechner@baylibre.com>
+
+properties:
+ compatible:
+ const: pwm-trigger
+
+ '#trigger-source-cells':
+ const: 0
+
+ pwms:
+ maxItems: 1
+
+required:
+ - compatible
+ - '#trigger-source-cells'
+ - pwms
+
+additionalProperties: false
+
+examples:
+ - |
+ trigger {
+ compatible = "pwm-trigger";
+ #trigger-source-cells = <0>;
+ pwms = <&pwm 0 1000000 0>;
+ };
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index fadbd3c041c8..30e8f89fa032 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -189,6 +189,8 @@ properties:
- mcube,mc3230
# Measurement Specialities I2C temperature and humidity sensor
- meas,htu21
+ # Measurement Specialities I2C temperature and humidity sensor
+ - meas,htu31
# Measurement Specialities I2C pressure and temperature sensor
- meas,ms5637
# Measurement Specialities I2C pressure and temperature sensor
diff --git a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
index 1949a15e73d2..ac11ac7d1d12 100644
--- a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
+++ b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml
@@ -33,6 +33,16 @@ properties:
resets:
maxItems: 1
+ nvmem-cells:
+ maxItems: 1
+
+ nvmem-cell-names:
+ items:
+ - const: calibration
+
+dependencies:
+ nvmem-cells: [ nvmem-cell-names ]
+
required:
- compatible
- reg
@@ -58,4 +68,6 @@ examples:
freq-table-hz = <200000000 200000000>, <38400000 38400000>;
power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>;
resets = <&cpg 1514>;
+ nvmem-cells = <&ufs_tune>;
+ nvmem-cell-names = "calibration";
};
diff --git a/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml b/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
new file mode 100644
index 000000000000..c7d17cf4dc42
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/rockchip,rk3576-ufshc.yaml
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ufs/rockchip,rk3576-ufshc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip UFS Host Controller
+
+maintainers:
+ - Shawn Lin <shawn.lin@rock-chips.com>
+
+allOf:
+ - $ref: ufs-common.yaml
+
+properties:
+ compatible:
+ const: rockchip,rk3576-ufshc
+
+ reg:
+ maxItems: 5
+
+ reg-names:
+ items:
+ - const: hci
+ - const: mphy
+ - const: hci_grf
+ - const: mphy_grf
+ - const: hci_apb
+
+ clocks:
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: core
+ - const: pclk
+ - const: pclk_mphy
+ - const: ref_out
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 4
+
+ reset-names:
+ items:
+ - const: biu
+ - const: sys
+ - const: ufs
+ - const: grf
+
+ reset-gpios:
+ maxItems: 1
+ description: |
+ GPIO specifiers for host to reset the whole UFS device including PHY and
+ memory. This gpio is active low and should choose the one whose high output
+ voltage is lower than 1.5V based on the UFS spec.
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - power-domains
+ - resets
+ - reset-names
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rockchip,rk3576-cru.h>
+ #include <dt-bindings/reset/rockchip,rk3576-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/rockchip,rk3576-power.h>
+ #include <dt-bindings/pinctrl/rockchip.h>
+ #include <dt-bindings/gpio/gpio.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ufshc: ufshc@2a2d0000 {
+ compatible = "rockchip,rk3576-ufshc";
+ reg = <0x0 0x2a2d0000 0x0 0x10000>,
+ <0x0 0x2b040000 0x0 0x10000>,
+ <0x0 0x2601f000 0x0 0x1000>,
+ <0x0 0x2603c000 0x0 0x1000>,
+ <0x0 0x2a2e0000 0x0 0x10000>;
+ reg-names = "hci", "mphy", "hci_grf", "mphy_grf", "hci_apb";
+ clocks = <&cru ACLK_UFS_SYS>, <&cru PCLK_USB_ROOT>, <&cru PCLK_MPHY>,
+ <&cru CLK_REF_UFS_CLKOUT>;
+ clock-names = "core", "pclk", "pclk_mphy", "ref_out";
+ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_USB>;
+ resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>, <&cru SRST_A_UFS>,
+ <&cru SRST_P_UFS_GRF>;
+ reset-names = "biu", "sys", "ufs", "grf";
+ reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
index 00f87a558c7d..b5843f4d17d8 100644
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -101,7 +101,6 @@ examples:
#include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
#include <dt-bindings/power/xlnx-zynqmp-power.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
- #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
#include <dt-bindings/phy/phy.h>
axi {
@@ -113,7 +112,7 @@ examples:
#size-cells = <0x2>;
compatible = "xlnx,zynqmp-dwc3";
reg = <0x0 0xff9d0000 0x0 0x100>;
- clocks = <&zynqmp_clk USB0_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+ clocks = <&zynqmp_clk 32>, <&zynqmp_clk 34>;
clock-names = "bus_clk", "ref_clk";
power-domains = <&zynqmp_firmware PD_USB_0>;
resets = <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
index ef3143f4b794..004d3ebec091 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.yaml
@@ -106,6 +106,10 @@ properties:
- description: USB3/SS(P) PHY
- description: USB2/HS PHY
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) Output endpoint to a Type-C connector
+
vusb33-supply:
description: Regulator of USB AVDD3.3v
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
index d4e187c78a0b..21fc6bbe954f 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
@@ -155,6 +155,18 @@ properties:
property is used. See graph.txt
$ref: /schemas/graph.yaml#/properties/port
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: High Speed (HS) data bus.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: Super Speed (SS) data bus.
+
enable-manual-drd:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 5079ca6ce1d1..0ef0bc6449ec 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -147,6 +147,8 @@ patternProperties:
description: Arctic Sand
"^arcx,.*":
description: arcx Inc. / Archronix Inc.
+ "^ariaboard,.*":
+ description: Shanghai Novotech Co., Ltd. (Ariaboard)
"^aries,.*":
description: Aries Embedded GmbH
"^arm,.*":
@@ -593,6 +595,8 @@ patternProperties:
description: GlobalTop Technology, Inc.
"^gmt,.*":
description: Global Mixed-mode Technology, Inc.
+ "^gocontroll,.*":
+ description: GOcontroll Modular Embedded Electronics B.V.
"^goldelico,.*":
description: Golden Delicious Computers GmbH & Co. KG
"^goodix,.*":
@@ -1031,6 +1035,8 @@ patternProperties:
description: Neofidelity Inc.
"^neonode,.*":
description: Neonode Inc.
+ "^netcube,.*":
+ description: NetCube Systems Austria
"^netgear,.*":
description: NETGEAR
"^netlogic,.*":
@@ -1202,6 +1208,8 @@ patternProperties:
description: Primux Trading, S.L.
"^probox2,.*":
description: PROBOX2 (by W2COMP Co., Ltd.)
+ "^pri,.*":
+ description: Priva
"^prt,.*":
description: Protonic Holland
"^pulsedlight,.*":
@@ -1267,7 +1275,7 @@ patternProperties:
"^riscv,.*":
description: RISC-V Foundation
"^rockchip,.*":
- description: Fuzhou Rockchip Electronics Co., Ltd
+ description: Rockchip Electronics Co., Ltd.
"^rocktech,.*":
description: ROCKTECH DISPLAYS LIMITED
"^rohm,.*":
@@ -1737,6 +1745,8 @@ patternProperties:
description: Shenzhen Yashi Changhua Intelligent Technology Co., Ltd.
"^ysoft,.*":
description: Y Soft Corporation a.s.
+ "^yuridenki,.*":
+ description: Yuridenki-Shokai Co. Ltd.
"^zarlink,.*":
description: Zarlink Semiconductor
"^zealz,.*":
diff --git a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
index fdcfce42c6d2..336e912281c3 100644
--- a/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
+++ b/Documentation/driver-api/firmware/firmware-usage-guidelines.rst
@@ -42,3 +42,8 @@ then of course these rules will not apply strictly.)
deprecating old major versions, then this should only be done as a
last option, and be stated clearly in all communications.
+* Firmware files that affect the User API (UAPI) shall not introduce
+ changes that break existing userspace programs. Updates to such firmware
+ must ensure backward compatibility with existing userspace applications.
+ This includes maintaining consistent interfaces and behaviors that
+ userspace programs rely on.
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 71ccc30e586b..e826f16ea43d 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -467,7 +467,7 @@ Counter sysfs
Translates counter data to the standard Counter sysfs interface format
and vice versa.
-Please refer to the ``Documentation/ABI/testing/sysfs-bus-counter`` file
+Please refer to the Documentation/ABI/testing/sysfs-bus-counter file
for a detailed breakdown of the available Generic Counter interface
sysfs attributes.
@@ -483,7 +483,7 @@ Sysfs Interface
Several sysfs attributes are generated by the Generic Counter interface,
and reside under the ``/sys/bus/counter/devices/counterX`` directory,
where ``X`` is to the respective counter device id. Please see
-``Documentation/ABI/testing/sysfs-bus-counter`` for detailed information
+Documentation/ABI/testing/sysfs-bus-counter for detailed information
on each Generic Counter interface sysfs attribute.
Through these sysfs attributes, programs and scripts may interact with
diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst
index dfe438dc91a7..42b580fb2989 100644
--- a/Documentation/driver-api/iio/core.rst
+++ b/Documentation/driver-api/iio/core.rst
@@ -60,7 +60,7 @@ directory. Common attributes are:
* :file:`sampling_frequency_available`, available discrete set of sampling
frequency values for device.
* Available standard attributes for IIO devices are described in the
- :file:`Documentation/ABI/testing/sysfs-bus-iio` file in the Linux kernel
+ :file:Documentation/ABI/testing/sysfs-bus-iio file in the Linux kernel
sources.
IIO device channels
diff --git a/Documentation/driver-api/infiniband.rst b/Documentation/driver-api/infiniband.rst
index 30e142ccbee9..10d8be9e74fe 100644
--- a/Documentation/driver-api/infiniband.rst
+++ b/Documentation/driver-api/infiniband.rst
@@ -77,14 +77,14 @@ iSCSI Extensions for RDMA (iSER)
:internal:
.. kernel-doc:: drivers/infiniband/ulp/iser/iscsi_iser.c
- :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers \
- iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit \
- iscsi_iser_cleanup_task iscsi_iser_check_protection \
- iscsi_iser_conn_create iscsi_iser_conn_bind \
- iscsi_iser_conn_start iscsi_iser_conn_stop \
- iscsi_iser_session_destroy iscsi_iser_session_create \
- iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll \
- iscsi_iser_ep_disconnect
+ :functions: iscsi_iser_pdu_alloc iser_initialize_task_headers
+ iscsi_iser_task_init iscsi_iser_mtask_xmit iscsi_iser_task_xmit
+ iscsi_iser_cleanup_task iscsi_iser_check_protection
+ iscsi_iser_conn_create iscsi_iser_conn_bind
+ iscsi_iser_conn_start iscsi_iser_conn_stop
+ iscsi_iser_session_destroy iscsi_iser_session_create
+ iscsi_iser_set_param iscsi_iser_ep_connect iscsi_iser_ep_poll
+ iscsi_iser_ep_disconnect
.. kernel-doc:: drivers/infiniband/ulp/iser/iser_initiator.c
:internal:
diff --git a/Documentation/driver-api/media/drivers/zoran.rst b/Documentation/driver-api/media/drivers/zoran.rst
index b205e10c3154..3e05b7f0442a 100644
--- a/Documentation/driver-api/media/drivers/zoran.rst
+++ b/Documentation/driver-api/media/drivers/zoran.rst
@@ -222,7 +222,7 @@ The CCIR - I uses the PAL colorsystem, and is used in Great Britain, Hong Kong,
Ireland, Nigeria, South Africa.
The CCIR - N uses the PAL colorsystem and PAL frame size but the NTSC framerate,
-and is used in Argentinia, Uruguay, an a few others
+and is used in Argentina, Uruguay, an a few others
We do not talk about how the audio is broadcast !
diff --git a/Documentation/driver-api/media/maintainer-entry-profile.rst b/Documentation/driver-api/media/maintainer-entry-profile.rst
index ffc712a5f632..ad96a89ee916 100644
--- a/Documentation/driver-api/media/maintainer-entry-profile.rst
+++ b/Documentation/driver-api/media/maintainer-entry-profile.rst
@@ -116,7 +116,7 @@ CEC drivers ``cec-compliance``
.. [3] The ``v4l2-compliance`` also covers the media controller usage inside
V4L2 drivers.
-Other compilance tools are under development to check other parts of the
+Other compliance tools are under development to check other parts of the
subsystem.
Those tests need to pass before the patches go upstream.
diff --git a/Documentation/driver-api/media/tx-rx.rst b/Documentation/driver-api/media/tx-rx.rst
index c71003f74b1c..0b8c9cde8ee4 100644
--- a/Documentation/driver-api/media/tx-rx.rst
+++ b/Documentation/driver-api/media/tx-rx.rst
@@ -49,6 +49,13 @@ Link frequency
The :ref:`V4L2_CID_LINK_FREQ <v4l2-cid-link-freq>` control is used to tell the
receiver the frequency of the bus (i.e. it is not the same as the symbol rate).
+Drivers that do not have user-configurable link frequency should report it
+through the ``.get_mbus_config()`` subdev pad operation, in the ``link_freq``
+field of struct v4l2_mbus_config, instead of through controls.
+
+Receiver drivers should use :c:func:`v4l2_get_link_freq` helper to obtain the
+link frequency from the transmitter sub-device.
+
``.enable_streams()`` and ``.disable_streams()`` callbacks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -58,6 +65,15 @@ to control the transmitter driver's streaming state. These callbacks may not be
called directly, but by using ``v4l2_subdev_enable_streams()`` and
``v4l2_subdev_disable_streams()``.
+Stopping the transmitter
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+A transmitter stops sending the stream of images as a result of
+calling the ``.disable_streams()`` callback. Some transmitters may stop the
+stream at a frame boundary whereas others stop immediately,
+effectively leaving the current frame unfinished. The receiver driver
+should not make assumptions either way, but function properly in both
+cases.
CSI-2 transmitter drivers
-------------------------
@@ -126,13 +142,3 @@ device, so this should be only done when it is needed.
Receiver drivers that do not need explicit LP-11 or LP-111 state setup are
waived from calling the two callbacks.
-
-Stopping the transmitter
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-A transmitter stops sending the stream of images as a result of
-calling the ``.disable_streams()`` callback. Some transmitters may stop the
-stream at a frame boundary whereas others stop immediately,
-effectively leaving the current frame unfinished. The receiver driver
-should not make assumptions either way, but function properly in both
-cases.
diff --git a/Documentation/driver-api/nvdimm/nvdimm.rst b/Documentation/driver-api/nvdimm/nvdimm.rst
index ca16b5acbf30..c205efa4d45b 100644
--- a/Documentation/driver-api/nvdimm/nvdimm.rst
+++ b/Documentation/driver-api/nvdimm/nvdimm.rst
@@ -535,12 +535,12 @@ internally with a static identifier::
char devname[50];
snprintf(devname, sizeof(devname), "namespace%d.%d",
- ndctl_region_get_id(region), paramaters->id);
+ ndctl_region_get_id(region), parameters->id);
ndctl_namespace_set_alt_name(ndns, devname);
/* 'uuid' must be set prior to setting size! */
- ndctl_namespace_set_uuid(ndns, paramaters->uuid);
- ndctl_namespace_set_size(ndns, paramaters->size);
+ ndctl_namespace_set_uuid(ndns, parameters->uuid);
+ ndctl_namespace_set_size(ndns, parameters->size);
/* unlike pmem namespaces, blk namespaces have a sector size */
if (parameters->lbasize)
ndctl_namespace_set_sector_size(ndns, parameters->lbasize);
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index d448cb57df86..8d86d5da4023 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -358,7 +358,7 @@ the phases are: ``prepare``, ``suspend``, ``suspend_late``, ``suspend_noirq``.
is probed against the device in question by passing them to the
:c:func:`dev_pm_set_driver_flags` helper function.] If the first of
these flags is set, the PM core will not apply the direct-complete
- procedure described above to the given device and, consequenty, to any
+ procedure described above to the given device and, consequently, to any
of its ancestors. The second flag, when set, informs the middle layer
code (bus types, device types, PM domains, classes) that it should take
the return value of the ``->prepare`` callback provided by the driver
diff --git a/Documentation/edac/features.rst b/Documentation/edac/features.rst
new file mode 100644
index 000000000000..3f283de297c7
--- /dev/null
+++ b/Documentation/edac/features.rst
@@ -0,0 +1,103 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.2-no-invariants-or-later
+
+=================
+EDAC/RAS features
+=================
+
+Copyright (c) 2024-2025 HiSilicon Limited.
+
+:Author: Shiju Jose <shiju.jose@huawei.com>
+:License: The GNU Free Documentation License, Version 1.2 without
+ Invariant Sections, Front-Cover Texts nor Back-Cover Texts.
+ (dual licensed under the GPL v2)
+
+- Written for: 6.15
+
+Introduction
+------------
+
+EDAC/RAS components plugging and high-level design:
+
+1. Scrub control
+
+2. Error Check Scrub (ECS) control
+
+3. ACPI RAS2 features
+
+4. Post Package Repair (PPR) control
+
+5. Memory Sparing Repair control
+
+High level design is illustrated in the following diagram::
+
+ +-----------------------------------------------+
+ | Userspace - Rasdaemon |
+ | +-------------+ |
+ | | RAS CXL mem | +---------------+ |
+ | |error handler|---->| | |
+ | +-------------+ | RAS dynamic | |
+ | +-------------+ | scrub, memory | |
+ | | RAS memory |---->| repair control| |
+ | |error handler| +----|----------+ |
+ | +-------------+ | |
+ +--------------------------|--------------------+
+ |
+ |
+ +-------------------------------|------------------------------+
+ | Kernel EDAC extension for | controlling RAS Features |
+ |+------------------------------|----------------------------+ |
+ || EDAC Core Sysfs EDAC| Bus | |
+ || +--------------------------|---------------------------+| |
+ || |/sys/bus/edac/devices/<dev>/scrubX/ | | EDAC device || |
+ || |/sys/bus/edac/devices/<dev>/ecsX/ |<->| EDAC MC || |
+ || |/sys/bus/edac/devices/<dev>/repairX | | EDAC sysfs || |
+ || +---------------------------|--------------------------+| |
+ || EDAC|Bus | |
+ || | | |
+ || +----------+ Get feature | Get feature | |
+ || | | desc +---------|------+ desc +----------+ | |
+ || |EDAC scrub|<-----| EDAC device | | | | |
+ || +----------+ | driver- RAS |----->| EDAC mem | | |
+ || +----------+ | feature control| | repair | | |
+ || | |<-----| | +----------+ | |
+ || |EDAC ECS | +---------|------+ | |
+ || +----------+ Register RAS|features | |
+ || ______________________|_____________ | |
+ |+---------|---------------|------------------|--------------+ |
+ | +-------|----+ +-------|-------+ +----|----------+ |
+ | | | | CXL mem driver| | Client driver | |
+ | | ACPI RAS2 | | scrub, ECS, | | memory repair | |
+ | | driver | | sparing, PPR | | features | |
+ | +-----|------+ +-------|-------+ +------|--------+ |
+ | | | | |
+ +--------|-----------------|--------------------|--------------+
+ | | |
+ +--------|-----------------|--------------------|--------------+
+ | +---|-----------------|--------------------|-------+ |
+ | | | |
+ | | Platform HW and Firmware | |
+ | +--------------------------------------------------+ |
+ +--------------------------------------------------------------+
+
+
+1. EDAC Features components - Create feature-specific descriptors. For
+ example: scrub, ECS, memory repair in the above diagram.
+
+2. EDAC device driver for controlling RAS Features - Get feature's attribute
+ descriptors from EDAC RAS feature component and registers device's RAS
+ features with EDAC bus and expose the features control attributes via
+ sysfs. For example, /sys/bus/edac/devices/<dev-name>/<feature>X/
+
+3. RAS dynamic feature controller - Userspace sample modules in rasdaemon for
+ dynamic scrub/repair control to issue scrubbing/repair when excess number
+ of corrected memory errors are reported in a short span of time.
+
+RAS features
+------------
+1. Memory Scrub
+
+Memory scrub features are documented in `Documentation/edac/scrub.rst`.
+
+2. Memory Repair
+
+Memory repair features are documented in `Documentation/edac/memory_repair.rst`.
diff --git a/Documentation/edac/index.rst b/Documentation/edac/index.rst
new file mode 100644
index 000000000000..420c6601dbae
--- /dev/null
+++ b/Documentation/edac/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.2-no-invariants-or-later
+
+==============
+EDAC Subsystem
+==============
+
+.. toctree::
+ :maxdepth: 1
+
+ features
+ memory_repair
+ scrub
diff --git a/Documentation/edac/memory_repair.rst b/Documentation/edac/memory_repair.rst
new file mode 100644
index 000000000000..52162a422864
--- /dev/null
+++ b/Documentation/edac/memory_repair.rst
@@ -0,0 +1,121 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.2-no-invariants-or-later
+
+==========================
+EDAC Memory Repair Control
+==========================
+
+Copyright (c) 2024-2025 HiSilicon Limited.
+
+:Author: Shiju Jose <shiju.jose@huawei.com>
+:License: The GNU Free Documentation License, Version 1.2 without
+ Invariant Sections, Front-Cover Texts nor Back-Cover Texts.
+ (dual licensed under the GPL v2)
+:Original Reviewers:
+
+- Written for: 6.15
+
+Introduction
+------------
+
+Some memory devices support repair operations to address issues in their
+memory media. Post Package Repair (PPR) and memory sparing are examples of
+such features.
+
+Post Package Repair (PPR)
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Post Package Repair is a maintenance operation which requests the memory
+device to perform repair operation on its media. It is a memory self-healing
+feature that fixes a failing memory location by replacing it with a spare row
+in a DRAM device.
+
+For example, a CXL memory device with DRAM components that support PPR
+features implements maintenance operations. DRAM components support those
+types of PPR functions:
+
+ - hard PPR, for a permanent row repair, and
+ - soft PPR, for a temporary row repair.
+
+Soft PPR is much faster than hard PPR, but the repair is lost after a power
+cycle.
+
+The data may not be retained and memory requests may not be correctly
+processed during a repair operation. In such case, the repair operation should
+not be executed at runtime.
+
+For example, for CXL memory devices, see CXL spec rev 3.1 [1]_ sections
+8.2.9.7.1.1 PPR Maintenance Operations, 8.2.9.7.1.2 sPPR Maintenance Operation
+and 8.2.9.7.1.3 hPPR Maintenance Operation for more details.
+
+Memory Sparing
+~~~~~~~~~~~~~~
+
+Memory sparing is a repair function that replaces a portion of memory with
+a portion of functional memory at a particular granularity. Memory
+sparing has cacheline/row/bank/rank sparing granularities. For example, in
+rank memory-sparing mode, one memory rank serves as a spare for other ranks on
+the same channel in case they fail.
+
+The spare rank is held in reserve and not used as active memory until
+a failure is indicated, with reserved capacity subtracted from the total
+available memory in the system.
+
+After an error threshold is surpassed in a system protected by memory sparing,
+the content of a failing rank of DIMMs is copied to the spare rank. The
+failing rank is then taken offline and the spare rank placed online for use as
+active memory in place of the failed rank.
+
+For example, CXL memory devices can support various subclasses for sparing
+operation vary in terms of the scope of the sparing being performed.
+
+Cacheline sparing subclass refers to a sparing action that can replace a full
+cacheline. Row sparing is provided as an alternative to PPR sparing functions
+and its scope is that of a single DDR row. Bank sparing allows an entire bank
+to be replaced. Rank sparing is defined as an operation in which an entire DDR
+rank is replaced.
+
+See CXL spec 3.1 [1]_ section 8.2.9.7.1.4 Memory Sparing Maintenance
+Operations for more details.
+
+.. [1] https://computeexpresslink.org/cxl-specification/
+
+Use cases of generic memory repair features control
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. The soft PPR, hard PPR and memory-sparing features share similar control
+ attributes. Therefore, there is a need for a standardized, generic sysfs
+ repair control that is exposed to userspace and used by administrators,
+ scripts and tools.
+
+2. When a CXL device detects an error in a memory component, it informs the
+ host of the need for a repair maintenance operation by using an event
+ record where the "maintenance needed" flag is set. The event record
+ specifies the device physical address (DPA) and attributes of the memory
+ that requires repair. The kernel reports the corresponding CXL general
+ media or DRAM trace event to userspace, and userspace tools (e.g.
+ rasdaemon) initiate a repair maintenance operation in response to the
+ device request using the sysfs repair control.
+
+3. Userspace tools, such as rasdaemon, request a repair operation on a memory
+ region when maintenance need flag set or an uncorrected memory error or
+ excess of corrected memory errors above a threshold value is reported or an
+ exceed corrected errors threshold flag set for that memory.
+
+4. Multiple PPR/sparing instances may be present per memory device.
+
+5. Drivers should enforce that live repair is safe. In systems where memory
+ mapping functions can change between boots, one approach to this is to log
+ memory errors seen on this boot against which to check live memory repair
+ requests.
+
+The File System
+---------------
+
+The control attributes of a registered memory repair instance could be
+accessed in the /sys/bus/edac/devices/<dev-name>/mem_repairX/
+
+sysfs
+-----
+
+Sysfs files are documented in
+`Documentation/ABI/testing/sysfs-edac-memory-repair`.
diff --git a/Documentation/edac/scrub.rst b/Documentation/edac/scrub.rst
new file mode 100644
index 000000000000..daab929cdba1
--- /dev/null
+++ b/Documentation/edac/scrub.rst
@@ -0,0 +1,266 @@
+.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.2-no-invariants-or-later
+
+=============
+Scrub Control
+=============
+
+Copyright (c) 2024-2025 HiSilicon Limited.
+
+:Author: Shiju Jose <shiju.jose@huawei.com>
+:License: The GNU Free Documentation License, Version 1.2 without
+ Invariant Sections, Front-Cover Texts nor Back-Cover Texts.
+ (dual licensed under the GPL v2)
+
+- Written for: 6.15
+
+Introduction
+------------
+
+Increasing DRAM size and cost have made memory subsystem reliability an
+important concern. These modules are used where potentially corrupted data
+could cause expensive or fatal issues. Memory errors are among the top
+hardware failures that cause server and workload crashes.
+
+Memory scrubbing is a feature where an ECC (Error-Correcting Code) engine
+reads data from each memory media location, corrects if necessary and writes
+the corrected data back to the same memory media location.
+
+DIMMs can be scrubbed at a configurable rate to detect uncorrected memory
+errors and attempt recovery from detected errors, providing the following
+benefits:
+
+1. Proactively scrubbing DIMMs reduces the chance of a correctable error
+ becoming uncorrectable.
+
+2. When detected, uncorrected errors caught in unallocated memory pages are
+ isolated and prevented from being allocated to an application or the OS.
+
+3. This reduces the likelihood of software or hardware products encountering
+ memory errors.
+
+4. The additional data on failures in memory may be used to build up
+ statistics that are later used to decide whether to use memory repair
+ technologies such as Post Package Repair or Sparing.
+
+There are 2 types of memory scrubbing:
+
+1. Background (patrol) scrubbing while the DRAM is otherwise idle.
+
+2. On-demand scrubbing for a specific address range or region of memory.
+
+Several types of interfaces to hardware memory scrubbers have been
+identified, such as CXL memory device patrol scrub, CXL DDR5 ECS, ACPI
+RAS2 memory scrubbing, and ACPI NVDIMM ARS (Address Range Scrub).
+
+The control mechanisms vary across different memory scrubbers. To enable
+standardized userspace tooling, there is a need to present these controls
+through a standardized ABI.
+
+A generic memory EDAC scrub control allows users to manage underlying
+scrubbers in the system through a standardized sysfs control interface. It
+abstracts the management of various scrubbing functionalities into a unified
+set of functions.
+
+Use cases of common scrub control feature
+-----------------------------------------
+
+1. Several types of interfaces for hardware memory scrubbers have been
+ identified, including the CXL memory device patrol scrub, CXL DDR5 ECS,
+ ACPI RAS2 memory scrubbing features, ACPI NVDIMM ARS (Address Range Scrub),
+ and software-based memory scrubbers.
+
+ Of the identified interfaces to hardware memory scrubbers some support
+ control over patrol (background) scrubbing (e.g., ACPI RAS2, CXL) and/or
+ on-demand scrubbing (e.g., ACPI RAS2, ACPI ARS). However, the scrub control
+ interfaces vary between memory scrubbers, highlighting the need for
+ a standardized, generic sysfs scrub control interface that is accessible to
+ userspace for administration and use by scripts/tools.
+
+2. User-space scrub controls allow users to disable scrubbing if necessary,
+ for example, to disable background patrol scrubbing or adjust the scrub
+ rate for performance-aware operations where background activities need to
+ be minimized or disabled.
+
+3. User-space tools enable on-demand scrubbing for specific address ranges,
+ provided that the scrubber supports this functionality.
+
+4. User-space tools can also control memory DIMM scrubbing at a configurable
+ scrub rate via sysfs scrub controls. This approach offers several benefits:
+
+ 4.1. Detects uncorrectable memory errors early, before user access to affected
+ memory, helping facilitate recovery.
+
+ 4.2. Reduces the likelihood of correctable errors developing into uncorrectable
+ errors.
+
+5. Policy control for hotplugged memory is necessary because there may not
+ be a system-wide BIOS or similar control to manage scrub settings for a CXL
+ device added after boot. Determining these settings is a policy decision,
+ balancing reliability against performance, so userspace should control it.
+ Therefore, a unified interface is recommended for handling this function in
+ a way that aligns with other similar interfaces, rather than creating a
+ separate one.
+
+Scrubbing features
+------------------
+
+CXL Memory Scrubbing features
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CXL spec r3.1 [1]_ section 8.2.9.9.11.1 describes the memory device patrol
+scrub control feature. The device patrol scrub proactively locates and makes
+corrections to errors in regular cycle. The patrol scrub control allows the
+userspace request to change CXL patrol scrubber's configurations.
+
+The patrol scrub control allows the requester to specify the number of
+hours in which the patrol scrub cycles must be completed, provided that
+the requested scrub rate must be within the supported range of the
+scrub rate that the device is capable of. In the CXL driver, the
+number of seconds per scrub cycles, which user requests via sysfs, is
+rescaled to hours per scrub cycles.
+
+In addition, they allow the host to disable the feature in case it interferes
+with performance-aware operations which require the background operations to
+be turned off.
+
+Error Check Scrub (ECS)
+~~~~~~~~~~~~~~~~~~~~~~~
+
+CXL spec r3.1 [1]_ section 8.2.9.9.11.2 describes Error Check Scrub (ECS)
+- a feature defined in the JEDEC DDR5 SDRAM Specification (JESD79-5) and
+allowing DRAM to internally read, correct single-bit errors, and write back
+corrected data bits to the DRAM array while providing transparency to error
+counts.
+
+The DDR5 device contains number of memory media Field Replaceable Units (FRU)
+per device. The DDR5 ECS feature and thus the ECS control driver supports
+configuring the ECS parameters per FRU.
+
+ACPI RAS2 Hardware-based Memory Scrubbing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ACPI spec 6.5 [2]_ section 5.2.21 ACPI RAS2 describes an ACPI RAS2 table
+which provides interfaces for platform RAS features and supports independent
+RAS controls and capabilities for a given RAS feature for multiple instances
+of the same component in a given system.
+
+Memory RAS features apply to RAS capabilities, controls and operations that
+are specific to memory. RAS2 PCC sub-spaces for memory-specific RAS features
+have a Feature Type of 0x00 (Memory).
+
+The platform can use the hardware-based memory scrubbing feature to expose
+controls and capabilities associated with hardware-based memory scrub
+engines. The RAS2 memory scrubbing feature supports as per spec,
+
+1. Independent memory scrubbing controls for each NUMA domain, identified
+ using its proximity domain.
+
+2. Provision for background (patrol) scrubbing of the entire memory system,
+ as well as on-demand scrubbing for a specific region of memory.
+
+ACPI Address Range Scrubbing (ARS)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ACPI spec 6.5 [2]_ section 9.19.7.2 describes Address Range Scrubbing (ARS).
+ARS allows the platform to communicate memory errors to system software.
+This capability allows system software to prevent accesses to addresses with
+uncorrectable errors in memory. ARS functions manage all NVDIMMs present in
+the system. Only one scrub can be in progress system wide at any given time.
+
+The following functions are supported as per the specification:
+
+1. Query ARS Capabilities for a given address range, indicates platform
+ supports the ACPI NVDIMM Root Device Unconsumed Error Notification.
+
+2. Start ARS triggers an Address Range Scrub for the given memory range.
+ Address scrubbing can be done for volatile or persistent memory, or both.
+
+3. Query ARS Status command allows software to get the status of ARS,
+ including the progress of ARS and ARS error record.
+
+4. Clear Uncorrectable Error.
+
+5. Translate SPA
+
+6. ARS Error Inject etc.
+
+The kernel supports an existing control for ARS and ARS is currently not
+supported in EDAC.
+
+.. [1] https://computeexpresslink.org/cxl-specification/
+
+.. [2] https://uefi.org/specs/ACPI/6.5/
+
+Comparison of various scrubbing features
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ +--------------+-----------+-----------+-----------+-----------+
+ | | ACPI | CXL patrol| CXL ECS | ARS |
+ | Name | RAS2 | scrub | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | On-demand | Supported | No | No | Supported |
+ | Scrubbing | | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Background | Supported | Supported | Supported | No |
+ | scrubbing | | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Mode of | Scrub ctrl| per device| per memory| Unknown |
+ | scrubbing | per NUMA | | media | |
+ | | domain. | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Query scrub | Supported | Supported | Supported | Supported |
+ | capabilities | | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Setting | Supported | No | No | Supported |
+ | address range| | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Setting | Supported | Supported | No | No |
+ | scrub rate | | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Unit for | Not | in hours | No | No |
+ | scrub rate | Defined | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | | Supported | | | |
+ | Scrub | on-demand | No | No | Supported |
+ | status/ | scrubbing | | | |
+ | Completion | only | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+ | UC error | |CXL general|CXL general| ACPI UCE |
+ | reporting | Exception |media/DRAM |media/DRAM | notify and|
+ | | |event/media|event/media| query |
+ | | |scan? |scan? | ARS status|
+ +--------------+-----------+-----------+-----------+-----------+
+ | | | | | |
+ | Support for | Supported | Supported | Supported | No |
+ | EDAC control | | | | |
+ | | | | | |
+ +--------------+-----------+-----------+-----------+-----------+
+
+The File System
+---------------
+
+The control attributes of a registered scrubber instance could be
+accessed in:
+
+/sys/bus/edac/devices/<dev-name>/scrubX/
+
+sysfs
+-----
+
+Sysfs files are documented in
+`Documentation/ABI/testing/sysfs-edac-scrub`
+
+`Documentation/ABI/testing/sysfs-edac-ecs`
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 02febc883588..d937b7a03575 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -20,7 +20,7 @@
| openrisc: | TODO |
| parisc: | ok |
| powerpc: | ok |
- | riscv: | ok |
+ | riscv: | TODO |
| s390: | ok |
| sh: | TODO |
| sparc: | TODO |
diff --git a/Documentation/features/list-arch.sh b/Documentation/features/list-arch.sh
index e73aa35848f0..ac8ff7f6f859 100755
--- a/Documentation/features/list-arch.sh
+++ b/Documentation/features/list-arch.sh
@@ -6,6 +6,6 @@
# (If no arguments are given then it will print the host architecture's status.)
#
-ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/')}
+ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/' | sed 's/s390x/s390/')}
$(dirname $0)/../../scripts/get_feat.pl list --arch $ARCH
diff --git a/Documentation/filesystems/9p.rst b/Documentation/filesystems/9p.rst
index 2bbf68b56b0d..3078f3c9256a 100644
--- a/Documentation/filesystems/9p.rst
+++ b/Documentation/filesystems/9p.rst
@@ -90,7 +90,7 @@ Just start the 9pfs capable network server like diod/nfs-ganesha e.g.::
$ diod -f -n -d 0 -S -l 0.0.0.0:9999 -e $PWD
-Optionaly scan your bus if there are more then one usbg gadgets to find their path::
+Optionally scan your bus if there are more then one usbg gadgets to find their path::
$ python $kernel_dir/tools/usb/p9_fwd.py list
diff --git a/Documentation/filesystems/bcachefs/SubmittingPatches.rst b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
index 026b12ae0d6a..18c79d548391 100644
--- a/Documentation/filesystems/bcachefs/SubmittingPatches.rst
+++ b/Documentation/filesystems/bcachefs/SubmittingPatches.rst
@@ -1,8 +1,13 @@
-Submitting patches to bcachefs:
-===============================
+Submitting patches to bcachefs
+==============================
+
+Here are suggestions for submitting patches to bcachefs subsystem.
+
+Submission checklist
+--------------------
Patches must be tested before being submitted, either with the xfstests suite
-[0], or the full bcachefs test suite in ktest [1], depending on what's being
+[0]_, or the full bcachefs test suite in ktest [1]_, depending on what's being
touched. Note that ktest wraps xfstests and will be an easier method to running
it for most users; it includes single-command wrappers for all the mainstream
in-kernel local filesystems.
@@ -26,21 +31,21 @@ considered out of date), but try not to deviate too much without reason.
Focus on writing code that reads well and is organized well; code should be
aesthetically pleasing.
-CI:
-===
+CI
+--
Instead of running your tests locally, when running the full test suite it's
-prefereable to let a server farm do it in parallel, and then have the results
+preferable to let a server farm do it in parallel, and then have the results
in a nice test dashboard (which can tell you which failures are new, and
presents results in a git log view, avoiding the need for most bisecting).
-That exists [2], and community members may request an account. If you work for
+That exists [2]_, and community members may request an account. If you work for
a big tech company, you'll need to help out with server costs to get access -
but the CI is not restricted to running bcachefs tests: it runs any ktest test
(which generally makes it easy to wrap other tests that can run in qemu).
-Other things to think about:
-============================
+Other things to think about
+---------------------------
- How will we debug this code? Is there sufficient introspection to diagnose
when something starts acting wonky on a user machine?
@@ -68,7 +73,7 @@ Other things to think about:
land - use them. Use them judiciously, and not as a replacement for proper
error handling, but use them.
-- Does it need to be performance tested? Should we add new peformance counters?
+- Does it need to be performance tested? Should we add new performance counters?
bcachefs has a set of persistent runtime counters which can be viewed with
the 'bcachefs fs top' command; this should give users a basic idea of what
@@ -79,20 +84,22 @@ Other things to think about:
tested? (Automated tests exists but aren't in the CI, due to the hassle of
disk image management; coordinate to have them run.)
-Mailing list, IRC:
-==================
+Mailing list, IRC
+-----------------
-Patches should hit the list [3], but much discussion and code review happens on
-IRC as well [4]; many people appreciate the more conversational approach and
-quicker feedback.
+Patches should hit the list [3]_, but much discussion and code review happens
+on IRC as well [4]_; many people appreciate the more conversational approach
+and quicker feedback.
Additionally, we have a lively user community doing excellent QA work, which
exists primarily on IRC. Please make use of that resource; user feedback is
important for any nontrivial feature, and documenting it in commit messages
would be a good idea.
-[0]: git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
-[1]: https://evilpiepirate.org/git/ktest.git/
-[2]: https://evilpiepirate.org/~testdashboard/ci/
-[3]: linux-bcachefs@vger.kernel.org
-[4]: irc.oftc.net#bcache, #bcachefs-dev
+.. rubric:: References
+
+.. [0] git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
+.. [1] https://evilpiepirate.org/git/ktest.git/
+.. [2] https://evilpiepirate.org/~testdashboard/ci/
+.. [3] linux-bcachefs@vger.kernel.org
+.. [4] irc.oftc.net#bcache, #bcachefs-dev
diff --git a/Documentation/filesystems/bcachefs/casefolding.rst b/Documentation/filesystems/bcachefs/casefolding.rst
new file mode 100644
index 000000000000..ba5de97d155f
--- /dev/null
+++ b/Documentation/filesystems/bcachefs/casefolding.rst
@@ -0,0 +1,90 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Casefolding
+===========
+
+bcachefs has support for case-insensitive file and directory
+lookups using the regular `chattr +F` (`S_CASEFOLD`, `FS_CASEFOLD_FL`)
+casefolding attributes.
+
+The main usecase for casefolding is compatibility with software written
+against other filesystems that rely on casefolded lookups
+(eg. NTFS and Wine/Proton).
+Taking advantage of file-system level casefolding can lead to great
+loading time gains in many applications and games.
+
+Casefolding support requires a kernel with the `CONFIG_UNICODE` enabled.
+Once a directory has been flagged for casefolding, a feature bit
+is enabled on the superblock which marks the filesystem as using
+casefolding.
+When the feature bit for casefolding is enabled, it is no longer possible
+to mount that filesystem on kernels without `CONFIG_UNICODE` enabled.
+
+On the lookup/query side: casefolding is implemented by allocating a new
+string of `BCH_NAME_MAX` length using the `utf8_casefold` function to
+casefold the query string.
+
+On the dirent side: casefolding is implemented by ensuring the `bkey`'s
+hash is made from the casefolded string and storing the cached casefolded
+name with the regular name in the dirent.
+
+The structure looks like this:
+
+* Regular: [dirent data][regular name][nul][nul]...
+* Casefolded: [dirent data][reg len][cf len][regular name][casefolded name][nul][nul]...
+
+(Do note, the number of NULs here is merely for illustration; their count can
+vary per-key, and they may not even be present if the key is aligned to
+`sizeof(u64)`.)
+
+This is efficient as it means that for all file lookups that require casefolding,
+it has identical performance to a regular lookup:
+a hash comparison and a `memcmp` of the name.
+
+Rationale
+---------
+
+Several designs were considered for this system:
+One was to introduce a dirent_v2, however that would be painful especially as
+the hash system only has support for a single key type. This would also need
+`BCH_NAME_MAX` to change between versions, and a new feature bit.
+
+Another option was to store without the two lengths, and just take the length of
+the regular name and casefolded name contiguously / 2 as the length. This would
+assume that the regular length == casefolded length, but that could potentially
+not be true, if the uppercase unicode glyph had a different UTF-8 encoding than
+the lowercase unicode glyph.
+It would be possible to disregard the casefold cache for those cases, but it was
+decided to simply encode the two string lengths in the key to avoid random
+performance issues if this edgecase was ever hit.
+
+The option settled on was to use a free-bit in d_type to mark a dirent as having
+a casefold cache, and then treat the first 4 bytes the name block as lengths.
+You can see this in the `d_cf_name_block` member of union in `bch_dirent`.
+
+The feature bit was used to allow casefolding support to be enabled for the majority
+of users, but some allow users who have no need for the feature to still use bcachefs as
+`CONFIG_UNICODE` can increase the kernel side a significant amount due to the tables used,
+which may be decider between using bcachefs for eg. embedded platforms.
+
+Other filesystems like ext4 and f2fs have a super-block level option for casefolding
+encoding, but bcachefs currently does not provide this. ext4 and f2fs do not expose
+any encodings than a single UTF-8 version. When future encodings are desirable,
+they will be added trivially using the opts mechanism.
+
+dentry/dcache considerations
+----------------------------
+
+Currently, in casefolded directories, bcachefs (like other filesystems) will not cache
+negative dentry's.
+
+This is because currently doing so presents a problem in the following scenario:
+
+ - Lookup file "blAH" in a casefolded directory
+ - Creation of file "BLAH" in a casefolded directory
+ - Lookup file "blAH" in a casefolded directory
+
+This would fail if negative dentry's were cached.
+
+This is slightly suboptimal, but could be fixed in future with some vfs work.
+
diff --git a/Documentation/filesystems/bcachefs/index.rst b/Documentation/filesystems/bcachefs/index.rst
index 7db4d7ceab58..3864d0ae89c1 100644
--- a/Documentation/filesystems/bcachefs/index.rst
+++ b/Documentation/filesystems/bcachefs/index.rst
@@ -4,10 +4,28 @@
bcachefs Documentation
======================
+Subsystem-specific development process notes
+--------------------------------------------
+
+Development notes specific to bcachefs. These are intended to supplement
+:doc:`general kernel development handbook </process/index>`.
+
.. toctree::
- :maxdepth: 2
+ :maxdepth: 1
:numbered:
CodingStyle
SubmittingPatches
+
+Filesystem implementation
+-------------------------
+
+Documentation for filesystem features and their implementation details.
+At this moment, only a few of these are described here.
+
+.. toctree::
+ :maxdepth: 1
+ :numbered:
+
+ casefolding
errorcodes
diff --git a/Documentation/filesystems/coda.rst b/Documentation/filesystems/coda.rst
index bdde7e4e010b..0db3c83a50e5 100644
--- a/Documentation/filesystems/coda.rst
+++ b/Documentation/filesystems/coda.rst
@@ -141,7 +141,7 @@ kernel support.
a process P which accessing a Coda file. It makes a system call which
traps to the OS kernel. Examples of such calls trapping to the kernel
are ``read``, ``write``, ``open``, ``close``, ``create``, ``mkdir``,
- ``rmdir``, ``chmod`` in a Unix ontext. Similar calls exist in the Win32
+ ``rmdir``, ``chmod`` in a Unix context. Similar calls exist in the Win32
environment, and are named ``CreateFile``.
Generally the operating system handles the request in a virtual
diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst
index f7f977ffbf8d..610f718ef8b5 100644
--- a/Documentation/filesystems/debugfs.rst
+++ b/Documentation/filesystems/debugfs.rst
@@ -220,7 +220,7 @@ There are a couple of other directory-oriented helper functions::
A call to debugfs_change_name() will give a new name to an existing debugfs
file, always in the same directory. The new_name must not exist prior
-to the call; the return value is 0 on success and -E... on failuer.
+to the call; the return value is 0 on success and -E... on failure.
Symbolic links can be created with debugfs_create_symlink().
There is one important thing that all debugfs users must take into account:
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index fb7d2ee022bc..e15c4275862a 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -206,6 +206,7 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_BLKADDR_VALIDITY 0x000040000
FAULT_BLKADDR_CONSISTENCE 0x000080000
FAULT_NO_SEGMENT 0x000100000
+ FAULT_INCONSISTENT_FOOTER 0x000200000
=========================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
@@ -365,6 +366,8 @@ errors=%s Specify f2fs behavior on critical errors. This supports modes:
pending node write drop keep N/A
pending meta write keep keep N/A
====================== =============== =============== ========
+nat_bits Enable nat_bits feature to enhance full/empty nat blocks access,
+ by default it's disabled.
======================== ============================================================
Debugfs Entries
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 04eaab01314b..e80329908549 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -137,9 +137,8 @@ However, these ioctls have some limitations:
- In general, decrypted contents and filenames in the kernel VFS
caches are freed but not wiped. Therefore, portions thereof may be
recoverable from freed memory, even after the corresponding key(s)
- were wiped. To partially solve this, you can set
- CONFIG_PAGE_POISONING=y in your kernel config and add page_poison=1
- to your kernel command line. However, this has a performance cost.
+ were wiped. To partially solve this, you can add init_on_free=1 to
+ your kernel command line. However, this has a performance cost.
- Secret keys might still exist in CPU registers, in crypto
accelerator hardware (if used by the crypto API to implement any of
@@ -428,11 +427,8 @@ API, but the filenames mode still does.
- Mandatory:
- CONFIG_CRYPTO_ADIANTUM
- Recommended:
- - arm32: CONFIG_CRYPTO_CHACHA20_NEON
- arm32: CONFIG_CRYPTO_NHPOLY1305_NEON
- - arm64: CONFIG_CRYPTO_CHACHA20_NEON
- arm64: CONFIG_CRYPTO_NHPOLY1305_NEON
- - x86: CONFIG_CRYPTO_CHACHA20_X86_64
- x86: CONFIG_CRYPTO_NHPOLY1305_SSE2
- x86: CONFIG_CRYPTO_NHPOLY1305_AVX2
diff --git a/Documentation/filesystems/fsverity.rst b/Documentation/filesystems/fsverity.rst
index 76e538217868..dacdbc1149e6 100644
--- a/Documentation/filesystems/fsverity.rst
+++ b/Documentation/filesystems/fsverity.rst
@@ -248,11 +248,17 @@ FS_IOC_READ_VERITY_METADATA
The FS_IOC_READ_VERITY_METADATA ioctl reads verity metadata from a
verity file. This ioctl is available since Linux v5.12.
-This ioctl allows writing a server program that takes a verity file
-and serves it to a client program, such that the client can do its own
-fs-verity compatible verification of the file. This only makes sense
-if the client doesn't trust the server and if the server needs to
-provide the storage for the client.
+This ioctl is useful for cases where the verity verification should be
+performed somewhere other than the currently running kernel.
+
+One example is a server program that takes a verity file and serves it
+to a client program, such that the client can do its own fs-verity
+compatible verification of the file. This only makes sense if the
+client doesn't trust the server and if the server needs to provide the
+storage for the client.
+
+Another example is copying verity metadata when creating filesystem
+images in userspace (such as with ``mkfs.ext4 -d``).
This is a fairly specialized use case, and most fs-verity users won't
need this ioctl.
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
index 77930c77fcfe..2a206129f828 100644
--- a/Documentation/filesystems/idmappings.rst
+++ b/Documentation/filesystems/idmappings.rst
@@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The
straightforward algorithm to use is to apply the inverse of the first idmapping,
mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using
either the second idmapping mapping or third idmapping mapping. The second
-idmapping would map ``u1000`` down to ``21000``. The third idmapping would map
-``u1000`` down to ``u31000``.
+idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map
+``u1000`` down to ``k31000``.
If we were given the same task for the following three idmappings::
diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst
index 2636f2a41bd3..a9cf8e950b15 100644
--- a/Documentation/filesystems/index.rst
+++ b/Documentation/filesystems/index.rst
@@ -118,7 +118,6 @@ Documentation for filesystem implementations.
spufs/index
squashfs
sysfs
- sysv-fs
tmpfs
ubifs
ubifs-authentication
diff --git a/Documentation/filesystems/iomap/design.rst b/Documentation/filesystems/iomap/design.rst
index b0d0188a095e..e29651a42eec 100644
--- a/Documentation/filesystems/iomap/design.rst
+++ b/Documentation/filesystems/iomap/design.rst
@@ -246,6 +246,10 @@ The fields are as follows:
* **IOMAP_F_PRIVATE**: Starting with this value, the upper bits can
be set by the filesystem for its own purposes.
+ * **IOMAP_F_ANON_WRITE**: Indicates that (write) I/O does not have a target
+ block assigned to it yet and the file system will do that in the bio
+ submission handler, splitting the I/O as needed.
+
These flags can be set by iomap itself during file operations.
The filesystem should supply an ``->iomap_end`` function if it needs
to observe these flags:
@@ -352,6 +356,11 @@ operations:
``IOMAP_NOWAIT`` is often set on behalf of ``IOCB_NOWAIT`` or
``RWF_NOWAIT``.
+ * ``IOMAP_DONTCACHE`` is set when the caller wishes to perform a
+ buffered file I/O and would like the kernel to drop the pagecache
+ after the I/O completes, if it isn't already being used by another
+ thread.
+
If it is necessary to read existing file contents from a `different
<https://lore.kernel.org/all/20191008071527.29304-9-hch@lst.de/>`_
device or address range on a device, the filesystem should return that
diff --git a/Documentation/filesystems/iomap/operations.rst b/Documentation/filesystems/iomap/operations.rst
index 2c7f5df9d8b0..3b628e370d88 100644
--- a/Documentation/filesystems/iomap/operations.rst
+++ b/Documentation/filesystems/iomap/operations.rst
@@ -131,6 +131,8 @@ These ``struct kiocb`` flags are significant for buffered I/O with iomap:
* ``IOCB_NOWAIT``: Turns on ``IOMAP_NOWAIT``.
+ * ``IOCB_DONTCACHE``: Turns on ``IOMAP_DONTCACHE``.
+
Internal per-Folio State
------------------------
@@ -283,7 +285,7 @@ The ``ops`` structure must be specified and is as follows:
struct iomap_writeback_ops {
int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
loff_t offset, unsigned len);
- int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ int (*submit_ioend)(struct iomap_writepage_ctx *wpc, int status);
void (*discard_folio)(struct folio *folio, loff_t pos);
};
@@ -306,13 +308,12 @@ The fields are as follows:
purpose.
This function must be supplied by the filesystem.
- - ``prepare_ioend``: Enables filesystems to transform the writeback
- ioend or perform any other preparatory work before the writeback I/O
- is submitted.
+ - ``submit_ioend``: Allows the file systems to hook into writeback bio
+ submission.
This might include pre-write space accounting updates, or installing
a custom ``->bi_end_io`` function for internal purposes, such as
deferring the ioend completion to a workqueue to run metadata update
- transactions from process context.
+ transactions from process context before submitting the bio.
This function is optional.
- ``discard_folio``: iomap calls this function after ``->map_blocks``
@@ -341,7 +342,7 @@ This can happen in interrupt or process context, depending on the
storage device.
Filesystems that need to update internal bookkeeping (e.g. unwritten
-extent conversions) should provide a ``->prepare_ioend`` function to
+extent conversions) should provide a ``->submit_ioend`` function to
set ``struct iomap_end::bio::bi_end_io`` to its own function.
This function should call ``iomap_finish_ioends`` after finishing its
own work (e.g. unwritten extent conversion).
@@ -515,18 +516,33 @@ IOMAP_WRITE`` with any combination of the following enhancements:
* ``IOMAP_ATOMIC``: This write is being issued with torn-write
protection.
- Only a single bio can be created for the write, and the write must
- not be split into multiple I/O requests, i.e. flag REQ_ATOMIC must be
- set.
+ Torn-write protection may be provided based on HW-offload or by a
+ software mechanism provided by the filesystem.
+
+ For HW-offload based support, only a single bio can be created for the
+ write, and the write must not be split into multiple I/O requests, i.e.
+ flag REQ_ATOMIC must be set.
The file range to write must be aligned to satisfy the requirements
of both the filesystem and the underlying block device's atomic
commit capabilities.
If filesystem metadata updates are required (e.g. unwritten extent
- conversion or copy on write), all updates for the entire file range
+ conversion or copy-on-write), all updates for the entire file range
must be committed atomically as well.
- Only one space mapping is allowed per untorn write.
- Untorn writes must be aligned to, and must not be longer than, a
- single file block.
+ Untorn-writes may be longer than a single file block. In all cases,
+ the mapping start disk block must have at least the same alignment as
+ the write offset.
+ The filesystems must set IOMAP_F_ATOMIC_BIO to inform iomap core of an
+ untorn-write based on HW-offload.
+
+ For untorn-writes based on a software mechanism provided by the
+ filesystem, all the disk block alignment and single bio restrictions
+ which apply for HW-offload based untorn-writes do not apply.
+ The mechanism would typically be used as a fallback for when
+ HW-offload based untorn-writes may not be issued, e.g. the range of the
+ write covers multiple extents, meaning that it is not possible to issue
+ a single bio.
+ All filesystem metadata updates for the entire file range must be
+ committed atomically as well.
Callers commonly hold ``i_rwsem`` in shared or exclusive mode before
calling this function.
diff --git a/Documentation/filesystems/journalling.rst b/Documentation/filesystems/journalling.rst
index 0254f7d57429..863e93e623f7 100644
--- a/Documentation/filesystems/journalling.rst
+++ b/Documentation/filesystems/journalling.rst
@@ -111,9 +111,7 @@ a callback function when the transaction is finally committed to disk,
so that you can do some of your own management. You ask the journalling
layer for calling the callback by simply setting
``journal->j_commit_callback`` function pointer and that function is
-called after each transaction commit. You can also use
-``transaction->t_private_list`` for attaching entries to a transaction
-that need processing when the transaction commits.
+called after each transaction commit.
JBD2 also provides a way to block all transaction updates via
jbd2_journal_lock_updates() /
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index d20a32b77b60..0ec0bb6eb0fb 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -66,7 +66,7 @@ prototypes::
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 73f0bfd7e903..3886c14f89f4 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -515,7 +515,7 @@ The methods defined in the table are:
the cache to expand a request in either direction. This allows the cache to
size the request appropriately for the cache granularity.
- The function is passed poiners to the start and length in its parameters,
+ The function is passed pointers to the start and length in its parameters,
plus the size of the file for reference, and adjusts the start and length
appropriately. It should return one of:
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 6245b67ae9e0..2db379b4b31e 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -292,13 +292,27 @@ rename or unlink will of course be noticed and handled).
Permission model
----------------
+An overlay filesystem stashes credentials that will be used when
+accessing lower or upper filesystems.
+
+In the old mount api the credentials of the task calling mount(2) are
+stashed. In the new mount api the credentials of the task creating the
+superblock through FSCONFIG_CMD_CREATE command of fsconfig(2) are
+stashed.
+
+Starting with kernel v6.15 it is possible to use the "override_creds"
+mount option which will cause the credentials of the calling task to be
+recorded. Note that "override_creds" is only meaningful when used with
+the new mount api as the old mount api combines setting options and
+superblock creation in a single mount(2) syscall.
+
Permission checking in the overlay filesystem follows these principles:
1) permission check SHOULD return the same result before and after copy up
2) task creating the overlay mount MUST NOT gain additional privileges
- 3) non-mounting task MAY gain additional privileges through the overlay,
+ 3) task[*] MAY gain additional privileges through the overlay,
compared to direct access on underlying lower or upper filesystems
This is achieved by performing two permission checks on each access:
@@ -306,7 +320,7 @@ This is achieved by performing two permission checks on each access:
a) check if current task is allowed access based on local DAC (owner,
group, mode and posix acl), as well as MAC checks
- b) check if mounting task would be allowed real operation on lower or
+ b) check if stashed credentials would be allowed real operation on lower or
upper layer based on underlying filesystem permissions, again including
MAC checks
@@ -315,10 +329,10 @@ are copied up. On the other hand it can result in server enforced
permissions (used by NFS, for example) being ignored (3).
Check (b) ensures that no task gains permissions to underlying layers that
-the mounting task does not have (2). This also means that it is possible
+the stashed credentials do not have (2). This also means that it is possible
to create setups where the consistency rule (1) does not hold; normally,
-however, the mounting task will have sufficient privileges to perform all
-operations.
+however, the stashed credentials will have sufficient privileges to
+perform all operations.
Another way to demonstrate this model is drawing parallels between::
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 1639e78e3146..767b2927c762 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1144,7 +1144,7 @@ and it *must* be opened exclusive.
---
-** mandatory**
+**mandatory**
->d_revalidate() gets two extra arguments - inode of parent directory and
name our dentry is expected to have. Both are stable (dir is pinned in
@@ -1157,3 +1157,49 @@ in normal case it points into the pathname being looked up.
NOTE: if you need something like full path from the root of filesystem,
you are still on your own - this assists with simple cases, but it's not
magic.
+
+---
+
+**recommended**
+
+kern_path_locked() and user_path_locked() no longer return a negative
+dentry so this doesn't need to be checked. If the name cannot be found,
+ERR_PTR(-ENOENT) is returned.
+
+---
+
+**recommended**
+
+lookup_one_qstr_excl() is changed to return errors in more cases, so
+these conditions don't require explicit checks:
+
+ - if LOOKUP_CREATE is NOT given, then the dentry won't be negative,
+ ERR_PTR(-ENOENT) is returned instead
+ - if LOOKUP_EXCL IS given, then the dentry won't be positive,
+ ERR_PTR(-EEXIST) is rreturned instread
+
+LOOKUP_EXCL now means "target must not exist". It can be combined with
+LOOK_CREATE or LOOKUP_RENAME_TARGET.
+
+---
+
+**mandatory**
+invalidate_inodes() is gone use evict_inodes() instead.
+
+---
+
+**mandatory**
+
+->mkdir() now returns a dentry. If the created inode is found to
+already be in cache and have a dentry (often IS_ROOT()), it will need to
+be spliced into the given name in place of the given dentry. That dentry
+now needs to be returned. If the original dentry is used, NULL should
+be returned. Any error should be returned with ERR_PTR().
+
+In general, filesystems which use d_instantiate_new() to install the new
+inode can safely return NULL. Filesystems which may not have an I_NEW inode
+should use d_drop();d_splice_alias() and return the result of the latter.
+
+If a positive dentry cannot be returned for some reason, in-kernel
+clients such as cachefiles, nfsd, smb/server may not perform ideally but
+will fail-safe.
diff --git a/Documentation/filesystems/sysv-fs.rst b/Documentation/filesystems/sysv-fs.rst
deleted file mode 100644
index 89e40911ad7c..000000000000
--- a/Documentation/filesystems/sysv-fs.rst
+++ /dev/null
@@ -1,264 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-==================
-SystemV Filesystem
-==================
-
-It implements all of
- - Xenix FS,
- - SystemV/386 FS,
- - Coherent FS.
-
-To install:
-
-* Answer the 'System V and Coherent filesystem support' question with 'y'
- when configuring the kernel.
-* To mount a disk or a partition, use::
-
- mount [-r] -t sysv device mountpoint
-
- The file system type names::
-
- -t sysv
- -t xenix
- -t coherent
-
- may be used interchangeably, but the last two will eventually disappear.
-
-Bugs in the present implementation:
-
-- Coherent FS:
-
- - The "free list interleave" n:m is currently ignored.
- - Only file systems with no filesystem name and no pack name are recognized.
- (See Coherent "man mkfs" for a description of these features.)
-
-- SystemV Release 2 FS:
-
- The superblock is only searched in the blocks 9, 15, 18, which
- corresponds to the beginning of track 1 on floppy disks. No support
- for this FS on hard disk yet.
-
-
-These filesystems are rather similar. Here is a comparison with Minix FS:
-
-* Linux fdisk reports on partitions
-
- - Minix FS 0x81 Linux/Minix
- - Xenix FS ??
- - SystemV FS ??
- - Coherent FS 0x08 AIX bootable
-
-* Size of a block or zone (data allocation unit on disk)
-
- - Minix FS 1024
- - Xenix FS 1024 (also 512 ??)
- - SystemV FS 1024 (also 512 and 2048)
- - Coherent FS 512
-
-* General layout: all have one boot block, one super block and
- separate areas for inodes and for directories/data.
- On SystemV Release 2 FS (e.g. Microport) the first track is reserved and
- all the block numbers (including the super block) are offset by one track.
-
-* Byte ordering of "short" (16 bit entities) on disk:
-
- - Minix FS little endian 0 1
- - Xenix FS little endian 0 1
- - SystemV FS little endian 0 1
- - Coherent FS little endian 0 1
-
- Of course, this affects only the file system, not the data of files on it!
-
-* Byte ordering of "long" (32 bit entities) on disk:
-
- - Minix FS little endian 0 1 2 3
- - Xenix FS little endian 0 1 2 3
- - SystemV FS little endian 0 1 2 3
- - Coherent FS PDP-11 2 3 0 1
-
- Of course, this affects only the file system, not the data of files on it!
-
-* Inode on disk: "short", 0 means non-existent, the root dir ino is:
-
- ================================= ==
- Minix FS 1
- Xenix FS, SystemV FS, Coherent FS 2
- ================================= ==
-
-* Maximum number of hard links to a file:
-
- =========== =========
- Minix FS 250
- Xenix FS ??
- SystemV FS ??
- Coherent FS >=10000
- =========== =========
-
-* Free inode management:
-
- - Minix FS
- a bitmap
- - Xenix FS, SystemV FS, Coherent FS
- There is a cache of a certain number of free inodes in the super-block.
- When it is exhausted, new free inodes are found using a linear search.
-
-* Free block management:
-
- - Minix FS
- a bitmap
- - Xenix FS, SystemV FS, Coherent FS
- Free blocks are organized in a "free list". Maybe a misleading term,
- since it is not true that every free block contains a pointer to
- the next free block. Rather, the free blocks are organized in chunks
- of limited size, and every now and then a free block contains pointers
- to the free blocks pertaining to the next chunk; the first of these
- contains pointers and so on. The list terminates with a "block number"
- 0 on Xenix FS and SystemV FS, with a block zeroed out on Coherent FS.
-
-* Super-block location:
-
- =========== ==========================
- Minix FS block 1 = bytes 1024..2047
- Xenix FS block 1 = bytes 1024..2047
- SystemV FS bytes 512..1023
- Coherent FS block 1 = bytes 512..1023
- =========== ==========================
-
-* Super-block layout:
-
- - Minix FS::
-
- unsigned short s_ninodes;
- unsigned short s_nzones;
- unsigned short s_imap_blocks;
- unsigned short s_zmap_blocks;
- unsigned short s_firstdatazone;
- unsigned short s_log_zone_size;
- unsigned long s_max_size;
- unsigned short s_magic;
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short s_firstdatazone;
- unsigned long s_nzones;
- unsigned short s_fzone_count;
- unsigned long s_fzones[NICFREE];
- unsigned short s_finode_count;
- unsigned short s_finodes[NICINOD];
- char s_flock;
- char s_ilock;
- char s_modified;
- char s_rdonly;
- unsigned long s_time;
- short s_dinfo[4]; -- SystemV FS only
- unsigned long s_free_zones;
- unsigned short s_free_inodes;
- short s_dinfo[4]; -- Xenix FS only
- unsigned short s_interleave_m,s_interleave_n; -- Coherent FS only
- char s_fname[6];
- char s_fpack[6];
-
- then they differ considerably:
-
- Xenix FS::
-
- char s_clean;
- char s_fill[371];
- long s_magic;
- long s_type;
-
- SystemV FS::
-
- long s_fill[12 or 14];
- long s_state;
- long s_magic;
- long s_type;
-
- Coherent FS::
-
- unsigned long s_unique;
-
- Note that Coherent FS has no magic.
-
-* Inode layout:
-
- - Minix FS::
-
- unsigned short i_mode;
- unsigned short i_uid;
- unsigned long i_size;
- unsigned long i_time;
- unsigned char i_gid;
- unsigned char i_nlinks;
- unsigned short i_zone[7+1+1];
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short i_mode;
- unsigned short i_nlink;
- unsigned short i_uid;
- unsigned short i_gid;
- unsigned long i_size;
- unsigned char i_zone[3*(10+1+1+1)];
- unsigned long i_atime;
- unsigned long i_mtime;
- unsigned long i_ctime;
-
-
-* Regular file data blocks are organized as
-
- - Minix FS:
-
- - 7 direct blocks
- - 1 indirect block (pointers to blocks)
- - 1 double-indirect block (pointer to pointers to blocks)
-
- - Xenix FS, SystemV FS, Coherent FS:
-
- - 10 direct blocks
- - 1 indirect block (pointers to blocks)
- - 1 double-indirect block (pointer to pointers to blocks)
- - 1 triple-indirect block (pointer to pointers to pointers to blocks)
-
-
- =========== ========== ================
- Inode size inodes per block
- =========== ========== ================
- Minix FS 32 32
- Xenix FS 64 16
- SystemV FS 64 16
- Coherent FS 64 8
- =========== ========== ================
-
-* Directory entry on disk
-
- - Minix FS::
-
- unsigned short inode;
- char name[14/30];
-
- - Xenix FS, SystemV FS, Coherent FS::
-
- unsigned short inode;
- char name[14];
-
- =========== ============== =====================
- Dir entry size dir entries per block
- =========== ============== =====================
- Minix FS 16/32 64/32
- Xenix FS 16 64
- SystemV FS 16 64
- Coherent FS 16 32
- =========== ============== =====================
-
-* How to implement symbolic links such that the host fsck doesn't scream:
-
- - Minix FS normal
- - Xenix FS kludge: as regular files with chmod 1000
- - SystemV FS ??
- - Coherent FS kludge: as regular files with chmod 1000
-
-
-Notation: We often speak of a "block" but mean a zone (the allocation unit)
-and not the disk driver's notion of "block".
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index 31eea688609a..ae79c30b6c0c 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -495,7 +495,7 @@ As of kernel 2.6.22, the following members are defined:
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,const char *);
- int (*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,umode_t,dev_t);
int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
@@ -562,7 +562,26 @@ otherwise noted.
``mkdir``
called by the mkdir(2) system call. Only required if you want
to support creating subdirectories. You will probably need to
- call d_instantiate() just as you would in the create() method
+ call d_instantiate_new() just as you would in the create() method.
+
+ If d_instantiate_new() is not used and if the fh_to_dentry()
+ export operation is provided, or if the storage might be
+ accessible by another path (e.g. with a network filesystem)
+ then more care may be needed. Importantly d_instantate()
+ should not be used with an inode that is no longer I_NEW if there
+ any chance that the inode could already be attached to a dentry.
+ This is because of a hard rule in the VFS that a directory must
+ only ever have one dentry.
+
+ For example, if an NFS filesystem is mounted twice the new directory
+ could be visible on the other mount before it is on the original
+ mount, and a pair of name_to_handle_at(), open_by_handle_at()
+ calls could instantiate the directory inode with an IS_ROOT()
+ dentry before the first mkdir returns.
+
+ If there is any chance this could happen, then the new inode
+ should be d_drop()ed and attached with d_splice_alias(). The
+ returned dentry (if any) should be returned by ->mkdir().
``rmdir``
called by the rmdir(2) system call. Only required if you want
diff --git a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
index 6402ab8e370c..2a2705e975e8 100644
--- a/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
+++ b/Documentation/filesystems/xfs/xfs-delayed-logging-design.rst
@@ -219,7 +219,7 @@ The log is circular, so the positions in the log are defined by the combination
of a cycle number - the number of times the log has been overwritten - and the
offset into the log. A LSN carries the cycle in the upper 32 bits and the
offset in the lower 32 bits. The offset is in units of "basic blocks" (512
-bytes). Hence we can do realtively simple LSN based math to keep track of
+bytes). Hence we can do relatively simple LSN based math to keep track of
available space in the log.
Log space accounting is done via a pair of constructs called "grant heads". The
diff --git a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
index 32b6ac4ca9d6..ce4584fb3103 100644
--- a/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
+++ b/Documentation/filesystems/xfs/xfs-maintainer-entry-profile.rst
@@ -93,7 +93,7 @@ others on a regular basis about burnout.
sponsoring work on any part of XFS.
- **LTS Maintainer**: Someone who backports and tests bug fixes from
- uptream to the LTS kernels.
+ upstream to the LTS kernels.
There tend to be six separate LTS trees at any given time.
The maintainer for a given LTS release should identify themselves with an
diff --git a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
index 12aa63840830..e231d127cd40 100644
--- a/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
+++ b/Documentation/filesystems/xfs/xfs-online-fsck-design.rst
@@ -4521,8 +4521,8 @@ Both online and offline repair can use this strategy.
| For this second effort, the ondisk parent pointer format as originally |
| proposed was ``(parent_inum, parent_gen, dirent_pos) → (dirent_name)``. |
| The format was changed during development to eliminate the requirement |
-| of repair tools needing to to ensure that the ``dirent_pos`` field |
-| always matched when reconstructing a directory. |
+| of repair tools needing to ensure that the ``dirent_pos`` field always |
+| matched when reconstructing a directory. |
| |
| There were a few other ways to have solved that problem: |
| |
diff --git a/Documentation/hwmon/abituguru-datasheet.rst b/Documentation/hwmon/abituguru-datasheet.rst
index 0cd61471d2a2..19ba4b4cd034 100644
--- a/Documentation/hwmon/abituguru-datasheet.rst
+++ b/Documentation/hwmon/abituguru-datasheet.rst
@@ -6,9 +6,9 @@ First of all, what I know about uGuru is no fact based on any help, hints or
datasheet from Abit. The data I have got on uGuru have I assembled through
my weak knowledge in "backwards engineering".
And just for the record, you may have noticed uGuru isn't a chip developed by
-Abit, as they claim it to be. It's really just an microprocessor (uC) created by
+Abit, as they claim it to be. It's really just a microprocessor (uC) created by
Winbond (W83L950D). And no, reading the manual for this specific uC or
-mailing Windbond for help won't give any useful data about uGuru, as it is
+mailing Winbond for help won't give any useful data about uGuru, as it is
the program inside the uC that is responding to calls.
Olle Sandberg <ollebull@gmail.com>, 2005-05-25
@@ -35,7 +35,7 @@ As far as known the uGuru is always placed at and using the (ISA) I/O-ports
ports are holding for detection. We will refer to 0xE0 as CMD (command-port)
and 0xE4 as DATA because Abit refers to them with these names.
-If DATA holds 0x00 or 0x08 and CMD holds 0x00 or 0xAC an uGuru could be
+If DATA holds 0x00 or 0x08 and CMD holds 0x00 or 0xAC a uGuru could be
present. We have to check for two different values at data-port, because
after a reboot uGuru will hold 0x00 here, but if the driver is removed and
later on attached again data-port will hold 0x08, more about this later.
@@ -46,7 +46,7 @@ have to test CMD for two different values. On these uGuru's DATA will initially
hold 0x09 and will only hold 0x08 after reading CMD first, so CMD must be read
first!
-To be really sure an uGuru is present a test read of one or more register
+To be really sure a uGuru is present a test read of one or more register
sets should be done.
diff --git a/Documentation/hwmon/abituguru.rst b/Documentation/hwmon/abituguru.rst
index cfda60b757ce..4a5ee16b1048 100644
--- a/Documentation/hwmon/abituguru.rst
+++ b/Documentation/hwmon/abituguru.rst
@@ -40,7 +40,7 @@ Supported chips:
.. [2] There is a separate abituguru3 driver for these motherboards,
the abituguru (without the 3 !) driver will not work on these
- motherboards (and visa versa)!
+ motherboards (and vice versa)!
Authors:
- Hans de Goede <j.w.r.degoede@hhs.nl>,
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
index 739636cf7994..d2be9db29614 100644
--- a/Documentation/hwmon/asus_ec_sensors.rst
+++ b/Documentation/hwmon/asus_ec_sensors.rst
@@ -6,6 +6,7 @@ Kernel driver asus_ec_sensors
Supported boards:
* PRIME X470-PRO
* PRIME X570-PRO
+ * PRIME X670E-PRO WIFI
* Pro WS X570-ACE
* ProArt X570-CREATOR WIFI
* ProArt X670E-CREATOR WIFI
diff --git a/Documentation/hwmon/cgbc-hwmon.rst b/Documentation/hwmon/cgbc-hwmon.rst
new file mode 100644
index 000000000000..3a5e6e6e8639
--- /dev/null
+++ b/Documentation/hwmon/cgbc-hwmon.rst
@@ -0,0 +1,63 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver cgbc-hwmon
+========================
+
+Supported chips:
+
+ * Congatec Board Controller.
+
+ Prefix: 'cgbc-hwmon'
+
+Author: Thomas Richard <thomas.richard@bootlin.com>
+
+Description
+-----------
+
+This driver enables monitoring support for the Congatec Board Controller.
+This controller is embedded on the x86 SoMs of Congatec.
+
+Sysfs entries
+-------------
+
+The following sysfs entries list contains all sensors defined in the Board
+Controller. The available sensors in sysfs depend on the SoM and the
+system.
+
+============= ======================
+Name Description
+============= ======================
+temp1_input CPU temperature
+temp2_input Box temperature
+temp3_input Ambient temperature
+temp4_input Board temperature
+temp5_input Carrier temperature
+temp6_input Chipset temperature
+temp7_input Video temperature
+temp8_input Other temperature
+temp9_input TOPDIM temperature
+temp10_input BOTTOMDIM temperature
+in0_input CPU voltage
+in1_input DC Runtime voltage
+in2_input DC Standby voltage
+in3_input CMOS Battery voltage
+in4_input Battery voltage
+in5_input AC voltage
+in6_input Other voltage
+in7_input 5V voltage
+in8_input 5V Standby voltage
+in9_input 3V3 voltage
+in10_input 3V3 Standby voltage
+in11_input VCore A voltage
+in12_input VCore B voltage
+in13_input 12V voltage
+curr1_input DC current
+curr2_input 5V current
+curr3_input 12V current
+fan1_input CPU fan
+fan2_input Box fan
+fan3_input Ambient fan
+fan4_input Chiptset fan
+fan5_input Video fan
+fan6_input Other fan
+============= ======================
diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst
index 74905675d71f..5a4edb6565cf 100644
--- a/Documentation/hwmon/dell-smm-hwmon.rst
+++ b/Documentation/hwmon/dell-smm-hwmon.rst
@@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard
=============================== ======= =======================================
Name Perm Description
=============================== ======= =======================================
-fan[1-3]_input RO Fan speed in RPM.
-fan[1-3]_label RO Fan label.
-fan[1-3]_min RO Minimal Fan speed in RPM
-fan[1-3]_max RO Maximal Fan speed in RPM
-fan[1-3]_target RO Expected Fan speed in RPM
-pwm[1-3] RW Control the fan PWM duty-cycle.
+fan[1-4]_input RO Fan speed in RPM.
+fan[1-4]_label RO Fan label.
+fan[1-4]_min RO Minimal Fan speed in RPM
+fan[1-4]_max RO Maximal Fan speed in RPM
+fan[1-4]_target RO Expected Fan speed in RPM
+pwm[1-4] RW Control the fan PWM duty-cycle.
pwm1_enable WO Enable or disable automatic BIOS fan
control (not supported on all laptops,
see below for details).
@@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches!
---------------------------
The driver also exports the fans as thermal cooling devices with
-``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control
+``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control
using one of the thermal governors.
Module parameters
diff --git a/Documentation/hwmon/htu31.rst b/Documentation/hwmon/htu31.rst
new file mode 100644
index 000000000000..ccde84264643
--- /dev/null
+++ b/Documentation/hwmon/htu31.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver HTU31
+====================
+
+Supported chips:
+
+ * Measurement Specialties HTU31
+
+ Prefix: 'htu31'
+
+ Addresses scanned: -
+
+ Datasheet: Publicly available from https://www.te.com/en/product-CAT-HSC0007.html
+
+Author:
+
+ - Andrei Lalaev <andrey.lalaev@gmail.com>
+
+Description
+-----------
+
+HTU31 is a humidity and temperature sensor.
+
+Supported temperature range is from -40 to 125 degrees Celsius.
+
+Communication with the device is performed via I2C protocol. Sensor's default address
+is 0x40.
+
+sysfs-Interface
+---------------
+
+=================== =================
+temp1_input: temperature input
+humidity1_input: humidity input
+heater_enable: heater control
+=================== =================
diff --git a/Documentation/hwmon/ina233.rst b/Documentation/hwmon/ina233.rst
new file mode 100644
index 000000000000..42323162e6db
--- /dev/null
+++ b/Documentation/hwmon/ina233.rst
@@ -0,0 +1,75 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Kernel driver ina233
+====================
+
+Supported chips:
+
+ * TI INA233
+
+ Prefix: 'ina233'
+
+ * Datasheet
+
+ Publicly available at the TI website : https://www.ti.com/lit/ds/symlink/ina233.pdf
+
+Author: Leo Yang <leo.yang.sy0@gmail.com>
+
+Usage Notes
+-----------
+
+The shunt resistor value can be configured by a device tree property;
+see Documentation/devicetree/bindings/hwmon/ti,ina2xx.yaml for details.
+
+
+Description
+-----------
+
+This driver supports hardware monitoring for TI INA233.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
+
+The driver provides the following attributes for input voltage:
+
+**in1_input**
+
+**in1_label**
+
+**in1_max**
+
+**in1_max_alarm**
+
+**in1_min**
+
+**in1_min_alarm**
+
+The driver provides the following attributes for shunt voltage:
+
+**in2_input**
+
+**in2_label**
+
+The driver provides the following attributes for output voltage:
+
+**in3_input**
+
+**in3_label**
+
+**in3_alarm**
+
+The driver provides the following attributes for output current:
+
+**curr1_input**
+
+**curr1_label**
+
+**curr1_max**
+
+**curr1_max_alarm**
+
+The driver provides the following attributes for input power:
+
+**power1_input**
+
+**power1_label**
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 874f8fd26325..f0ddf6222c44 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -53,6 +53,7 @@ Hardware Monitoring Kernel Drivers
bel-pfe
bpa-rs600
bt1-pvt
+ cgbc-hwmon
chipcap2
coretemp
corsair-cpro
@@ -85,11 +86,13 @@ Hardware Monitoring Kernel Drivers
hih6130
hp-wmi-sensors
hs3001
+ htu31
ibmaem
ibm-cffps
ibmpowernv
ina209
ina2xx
+ ina233
ina238
ina3221
inspur-ipsps1
diff --git a/Documentation/hwmon/lm90.rst b/Documentation/hwmon/lm90.rst
index 23af17a0ab44..98452eed16d5 100644
--- a/Documentation/hwmon/lm90.rst
+++ b/Documentation/hwmon/lm90.rst
@@ -365,6 +365,34 @@ Supported chips:
Datasheet: Not publicly available, can be requested from Nuvoton
+ * Nuvoton NCT7716
+
+ Prefix: 'nct7716'
+
+ Addresses scanned: I2C 0x48, 0x49
+
+ Datasheet: Not publicly available, can be requested from Nuvoton
+
+ * Nuvoton NCT7717
+
+ Prefix: 'nct7717'
+
+ Addresses scanned: I2C 0x48
+
+ Datasheet: Publicly available at Nuvoton website
+
+ https://www.nuvoton.com/resource-files/Nuvoton_NCT7717U_Datasheet_V111.pdf
+
+ * Nuvoton NCT7718
+
+ Prefix: 'nct7718'
+
+ Addresses scanned: I2C 0x4c
+
+ Datasheet: Publicly available at Nuvoton website
+
+ https://www.nuvoton.com/resource-files/Nuvoton_NCT7718W_Datasheet_V11.pdf
+
* Philips/NXP SA56004X
Prefix: 'sa56004'
@@ -573,6 +601,21 @@ W83L771AWG/ASG
* The AWG and ASG variants only differ in package format.
* Diode ideality factor configuration (remote sensor) at 0xE3
+NCT7716:
+ * 8 bit sensor resolution
+ * Selectable address
+ * Configurable conversion rate
+
+NCT7717:
+ * 8 bit sensor resolution
+ * Configurable conversion rate
+
+NCT7718:
+ * Temperature offset register for remote temperature sensor
+ * 11 bit resolution for remote temperature sensor
+ * Low temperature limits
+ * Configurable conversion rate
+
SA56004X:
* Better local resolution
diff --git a/Documentation/hwmon/ltc2978.rst b/Documentation/hwmon/ltc2978.rst
index 651ca4904c66..0b72d566cac7 100644
--- a/Documentation/hwmon/ltc2978.rst
+++ b/Documentation/hwmon/ltc2978.rst
@@ -5,6 +5,22 @@ Kernel driver ltc2978
Supported chips:
+ * Analog Devices LT7170
+
+ Prefix: 'lt7170'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/en/products/lt7170.html
+
+ * Analog Devices LT7171
+
+ Prefix: 'lt7171'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/en/products/lt7171.html
+
* Linear Technology LTC2972
Prefix: 'ltc2972'
@@ -151,6 +167,14 @@ Supported chips:
Datasheet: https://www.analog.com/en/products/ltm4644
+ * Linear Technology LTM4673
+
+ Prefix: 'ltm4673'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.analog.com/en/products/ltm4673
+
* Linear Technology LTM4675
Prefix: 'ltm4675'
@@ -215,6 +239,8 @@ Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
+- LT7170 and LT7171 are 20 A, 16 V, single- or dual-phase Silent Switcher
+- step-down regulators with Digital Power System Management.
- LTC2974 and LTC2975 are quad digital power supply managers.
- LTC2978 is an octal power supply monitor.
- LTC2977 is a pin compatible replacement for LTC2978.
@@ -292,6 +318,7 @@ in1_reset_history Reset input voltage history.
in[N]_label "vout[1-8]".
+ - LT7170, LT7171: N=2
- LTC2972: N=2-3
- LTC2974, LTC2975: N=2-5
- LTC2977, LTC2979, LTC2980, LTM2987: N=2-9
@@ -330,6 +357,8 @@ in[N]_reset_history Reset output voltage history.
temp[N]_input Measured temperature.
+ - On LT7170 and LT7171, temp1 reports the chip
+ temperature.
- On LTC2972, temp[1-2] report external temperatures,
and temp 3 reports the chip temperature.
- On LTC2974 and LTC2975, temp[1-4] report external
@@ -403,9 +432,9 @@ power[N]_input Measured output power.
curr1_label "iin".
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889,
- LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680,
- and LTM4700 only.
+ LT7170, LT7171, LTC3880, LTC3883, LTC3884, LTC3886,
+ LTC3887, LTC3889, LTM4644, LTM4675, LTM4676, LTM4677,
+ LTM4678, LTM4680, and LTM4700 only.
curr1_input Measured input current.
@@ -423,6 +452,7 @@ curr1_reset_history Reset input current history.
curr[N]_label "iout[1-4]".
+ - LT7170, LT7171: N=1
- LTC2972: N-1-2
- LTC2974, LTC2975: N=1-4
- LTC2977, LTC2979, LTC2980, LTM2987: not supported
diff --git a/Documentation/hwmon/nct6683.rst b/Documentation/hwmon/nct6683.rst
index 8d4a20d99e59..3e549ba95a15 100644
--- a/Documentation/hwmon/nct6683.rst
+++ b/Documentation/hwmon/nct6683.rst
@@ -3,7 +3,7 @@ Kernel driver nct6683
Supported chips:
- * Nuvoton NCT6683D/NCT6687D
+ * Nuvoton NCT6683D/NCT6686D/NCT6687D
Prefix: 'nct6683'
@@ -61,6 +61,7 @@ Board Firmware version
Intel DH87RL NCT6683D EC firmware version 1.0 build 04/03/13
Intel DH87MC NCT6683D EC firmware version 1.0 build 04/03/13
Intel DB85FL NCT6683D EC firmware version 1.0 build 04/03/13
+AMD BC-250 NCT6686D EC firmware version 1.0 build 07/28/21
ASRock X570 NCT6683D EC firmware version 1.0 build 06/28/19
ASRock X670E NCT6686D EC firmware version 1.0 build 05/19/22
ASRock B650 Steel Legend WiFi NCT6686D EC firmware version 1.0 build 11/09/23
diff --git a/Documentation/iio/iio_devbuf.rst b/Documentation/iio/iio_devbuf.rst
index 9919e4792d0e..dca1f0200b0d 100644
--- a/Documentation/iio/iio_devbuf.rst
+++ b/Documentation/iio/iio_devbuf.rst
@@ -148,5 +148,5 @@ applied), however there are corner cases in which the buffered data may be found
in a processed form. Please note that these corner cases are not addressed by
this documentation.
-Please see ``Documentation/ABI/testing/sysfs-bus-iio`` for a complete
+Please see Documentation/ABI/testing/sysfs-bus-iio for a complete
description of the attributes.
diff --git a/Documentation/input/devices/elantech.rst b/Documentation/input/devices/elantech.rst
index c3374a7ce7af..98163a258b83 100644
--- a/Documentation/input/devices/elantech.rst
+++ b/Documentation/input/devices/elantech.rst
@@ -556,7 +556,7 @@ Note on debounce:
In case the box has unstable power supply or other electricity issues, or
when number of finger changes, F/W would send "debounce packet" to inform
driver that the hardware is in debounce status.
-The debouce packet has the following signature::
+The debounce packet has the following signature::
byte 0: 0xc4
byte 1: 0xff
diff --git a/Documentation/input/input-programming.rst b/Documentation/input/input-programming.rst
index c9264814c7aa..2b3e6a34e34b 100644
--- a/Documentation/input/input-programming.rst
+++ b/Documentation/input/input-programming.rst
@@ -346,3 +346,22 @@ driver can handle these events, it has to set the respective bits in evbit,
This callback routine can be called from an interrupt or a BH (although that
isn't a rule), and thus must not sleep, and must not take too long to finish.
+
+Polled input devices
+~~~~~~~~~~~~~~~~~~~~
+
+Input polling is set up by passing an input device struct and a callback to
+the function::
+
+ int input_setup_polling(struct input_dev *dev,
+ void (*poll_fn)(struct input_dev *dev))
+
+Within the callback, devices should use the regular input_report_* functions
+and input_sync as is used by other devices.
+
+There is also the function::
+
+ void input_set_poll_interval(struct input_dev *dev, unsigned int interval)
+
+which is used to configure the interval, in milliseconds, that the device will
+be polled at.
diff --git a/Documentation/livepatch/module-elf-format.rst b/Documentation/livepatch/module-elf-format.rst
index a03ed02ec57e..5d48778d4dfc 100644
--- a/Documentation/livepatch/module-elf-format.rst
+++ b/Documentation/livepatch/module-elf-format.rst
@@ -217,16 +217,19 @@ livepatch relocation section refer to their respective symbols with their symbol
indices, and the original symbol indices (and thus the symtab ordering) must be
preserved in order for apply_relocate_add() to find the right symbol.
-For example, take this particular rela from a livepatch module:::
+For example, take this particular rela from a livepatch module::
Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
Offset Info Type Symbol's Value Symbol's Name + Addend
000000000000001f 0000005e00000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.printk,0 - 4
- This rela refers to the symbol '.klp.sym.vmlinux.printk,0', and the symbol index is encoded
- in 'Info'. Here its symbol index is 0x5e, which is 94 in decimal, which refers to the
- symbol index 94.
- And in this patch module's corresponding symbol table, symbol index 94 refers to that very symbol:
+This rela refers to the symbol '.klp.sym.vmlinux.printk,0', and the symbol
+index is encoded in 'Info'. Here its symbol index is 0x5e, which is 94 in
+decimal, which refers to the symbol index 94.
+
+And in this patch module's corresponding symbol table, symbol index 94 refers
+to that very symbol::
+
[ snip ]
94: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.printk,0
[ snip ]
diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst
index 8e1ceb0a6619..cc3cd46abd1b 100644
--- a/Documentation/mm/split_page_table_lock.rst
+++ b/Documentation/mm/split_page_table_lock.rst
@@ -4,7 +4,7 @@ Split page table lock
Originally, mm->page_table_lock spinlock protected all page tables of the
mm_struct. But this approach leads to poor page fault scalability of
-multi-threaded applications due high contention on the lock. To improve
+multi-threaded applications due to high contention on the lock. To improve
scalability, split page table lock was introduced.
With split page table lock we have separate per-table lock to serialize
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index 44f2226160ca..96fa1f1522ed 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -161,7 +161,7 @@ properties:
type: string
type: &attr-type
enum: [ unused, pad, flag, binary,
- uint, sint, u8, u16, u32, u64, s32, s64,
+ uint, sint, u8, u16, u32, u64, s8, s16, s32, s64,
string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index ed64acf1bef7..a8c5b521937d 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -152,6 +152,9 @@ properties:
the right formatting mechanism when displaying values of this
type.
enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ struct:
+ description: Name of the nested struct type.
+ type: string
# End genetlink-legacy
attribute-sets:
@@ -204,7 +207,7 @@ properties:
type: &attr-type
description: The netlink attribute type
enum: [ unused, pad, flag, binary, bitfield32,
- uint, sint, u8, u16, u32, u64, s32, s64,
+ uint, sint, u8, u16, u32, u64, s8, s16, s32, s64,
string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml
index e43e50dba2e4..40efbbad76ab 100644
--- a/Documentation/netlink/genetlink.yaml
+++ b/Documentation/netlink/genetlink.yaml
@@ -124,7 +124,7 @@ properties:
type: string
type: &attr-type
enum: [ unused, pad, flag, binary,
- uint, sint, u8, u16, u32, u64, s32, s64,
+ uint, sint, u8, u16, u32, u64, s8, s16, s32, s64,
string, nest, indexed-array, nest-type-value ]
doc:
description: Documentation of the attribute.
diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml
new file mode 100644
index 000000000000..840dc4504216
--- /dev/null
+++ b/Documentation/netlink/specs/conntrack.yaml
@@ -0,0 +1,643 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: conntrack
+protocol: netlink-raw
+protonum: 12
+
+doc:
+ Netfilter connection tracking subsystem over nfnetlink
+
+definitions:
+ -
+ name: nfgenmsg
+ type: struct
+ members:
+ -
+ name: nfgen-family
+ type: u8
+ -
+ name: version
+ type: u8
+ -
+ name: res-id
+ byte-order: big-endian
+ type: u16
+ -
+ name: nf-ct-tcp-flags-mask
+ type: struct
+ members:
+ -
+ name: flags
+ type: u8
+ enum: nf-ct-tcp-flags
+ enum-as-flags: true
+ -
+ name: mask
+ type: u8
+ enum: nf-ct-tcp-flags
+ enum-as-flags: true
+ -
+ name: nf-ct-tcp-flags
+ type: flags
+ entries:
+ - window-scale
+ - sack-perm
+ - close-init
+ - be-liberal
+ - unacked
+ - maxack
+ - challenge-ack
+ - simultaneous-open
+ -
+ name: nf-ct-tcp-state
+ type: enum
+ entries:
+ - none
+ - syn-sent
+ - syn-recv
+ - established
+ - fin-wait
+ - close-wait
+ - last-ack
+ - time-wait
+ - close
+ - syn-sent2
+ - max
+ - ignore
+ - retrans
+ - unack
+ - timeout-max
+ -
+ name: nf-ct-sctp-state
+ type: enum
+ entries:
+ - none
+ - cloned
+ - cookie-wait
+ - cookie-echoed
+ - established
+ - shutdown-sent
+ - shutdown-received
+ - shutdown-ack-sent
+ - shutdown-heartbeat-sent
+ -
+ name: nf-ct-status
+ type: flags
+ entries:
+ - expected
+ - seen-reply
+ - assured
+ - confirmed
+ - src-nat
+ - dst-nat
+ - seq-adj
+ - src-nat-done
+ - dst-nat-done
+ - dying
+ - fixed-timeout
+ - template
+ - nat-clash
+ - helper
+ - offload
+ - hw-offload
+
+attribute-sets:
+ -
+ name: counter-attrs
+ attributes:
+ -
+ name: packets
+ type: u64
+ byte-order: big-endian
+ -
+ name: bytes
+ type: u64
+ byte-order: big-endian
+ -
+ name: packets-old
+ type: u32
+ -
+ name: bytes-old
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: tuple-proto-attrs
+ attributes:
+ -
+ name: proto-num
+ type: u8
+ doc: l4 protocol number
+ -
+ name: proto-src-port
+ type: u16
+ byte-order: big-endian
+ doc: l4 source port
+ -
+ name: proto-dst-port
+ type: u16
+ byte-order: big-endian
+ doc: l4 source port
+ -
+ name: proto-icmp-id
+ type: u16
+ byte-order: big-endian
+ doc: l4 icmp id
+ -
+ name: proto-icmp-type
+ type: u8
+ -
+ name: proto-icmp-code
+ type: u8
+ -
+ name: proto-icmpv6-id
+ type: u16
+ byte-order: big-endian
+ doc: l4 icmp id
+ -
+ name: proto-icmpv6-type
+ type: u8
+ -
+ name: proto-icmpv6-code
+ type: u8
+ -
+ name: tuple-ip-attrs
+ attributes:
+ -
+ name: ip-v4-src
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ doc: ipv4 source address
+ -
+ name: ip-v4-dst
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ doc: ipv4 destination address
+ -
+ name: ip-v6-src
+ type: binary
+ checks:
+ min-len: 16
+ byte-order: big-endian
+ display-hint: ipv6
+ doc: ipv6 source address
+ -
+ name: ip-v6-dst
+ type: binary
+ checks:
+ min-len: 16
+ byte-order: big-endian
+ display-hint: ipv6
+ doc: ipv6 destination address
+ -
+ name: tuple-attrs
+ attributes:
+ -
+ name: tuple-ip
+ type: nest
+ nested-attributes: tuple-ip-attrs
+ doc: conntrack l3 information
+ -
+ name: tuple-proto
+ type: nest
+ nested-attributes: tuple-proto-attrs
+ doc: conntrack l4 information
+ -
+ name: tuple-zone
+ type: u16
+ byte-order: big-endian
+ doc: conntrack zone id
+ -
+ name: protoinfo-tcp-attrs
+ attributes:
+ -
+ name: tcp-state
+ type: u8
+ enum: nf-ct-tcp-state
+ doc: tcp connection state
+ -
+ name: tcp-wscale-original
+ type: u8
+ doc: window scaling factor in original direction
+ -
+ name: tcp-wscale-reply
+ type: u8
+ doc: window scaling factor in reply direction
+ -
+ name: tcp-flags-original
+ type: binary
+ struct: nf-ct-tcp-flags-mask
+ -
+ name: tcp-flags-reply
+ type: binary
+ struct: nf-ct-tcp-flags-mask
+ -
+ name: protoinfo-dccp-attrs
+ attributes:
+ -
+ name: dccp-state
+ type: u8
+ doc: dccp connection state
+ -
+ name: dccp-role
+ type: u8
+ -
+ name: dccp-handshake-seq
+ type: u64
+ byte-order: big-endian
+ -
+ name: dccp-pad
+ type: pad
+ -
+ name: protoinfo-sctp-attrs
+ attributes:
+ -
+ name: sctp-state
+ type: u8
+ doc: sctp connection state
+ enum: nf-ct-sctp-state
+ -
+ name: vtag-original
+ type: u32
+ byte-order: big-endian
+ -
+ name: vtag-reply
+ type: u32
+ byte-order: big-endian
+ -
+ name: protoinfo-attrs
+ attributes:
+ -
+ name: protoinfo-tcp
+ type: nest
+ nested-attributes: protoinfo-tcp-attrs
+ doc: conntrack tcp state information
+ -
+ name: protoinfo-dccp
+ type: nest
+ nested-attributes: protoinfo-dccp-attrs
+ doc: conntrack dccp state information
+ -
+ name: protoinfo-sctp
+ type: nest
+ nested-attributes: protoinfo-sctp-attrs
+ doc: conntrack sctp state information
+ -
+ name: help-attrs
+ attributes:
+ -
+ name: help-name
+ type: string
+ doc: helper name
+ -
+ name: nat-proto-attrs
+ attributes:
+ -
+ name: nat-port-min
+ type: u16
+ byte-order: big-endian
+ -
+ name: nat-port-max
+ type: u16
+ byte-order: big-endian
+ -
+ name: nat-attrs
+ attributes:
+ -
+ name: nat-v4-minip
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-v4-maxip
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-v6-minip
+ type: binary
+ -
+ name: nat-v6-maxip
+ type: binary
+ -
+ name: nat-proto
+ type: nest
+ nested-attributes: nat-proto-attrs
+ -
+ name: seqadj-attrs
+ attributes:
+ -
+ name: correction-pos
+ type: u32
+ byte-order: big-endian
+ -
+ name: offset-before
+ type: u32
+ byte-order: big-endian
+ -
+ name: offset-after
+ type: u32
+ byte-order: big-endian
+ -
+ name: secctx-attrs
+ attributes:
+ -
+ name: secctx-name
+ type: string
+ -
+ name: synproxy-attrs
+ attributes:
+ -
+ name: isn
+ type: u32
+ byte-order: big-endian
+ -
+ name: its
+ type: u32
+ byte-order: big-endian
+ -
+ name: tsoff
+ type: u32
+ byte-order: big-endian
+ -
+ name: conntrack-attrs
+ attributes:
+ -
+ name: tuple-orig
+ type: nest
+ nested-attributes: tuple-attrs
+ doc: conntrack l3+l4 protocol information, original direction
+ -
+ name: tuple-reply
+ type: nest
+ nested-attributes: tuple-attrs
+ doc: conntrack l3+l4 protocol information, reply direction
+ -
+ name: status
+ type: u32
+ byte-order: big-endian
+ enum: nf-ct-status
+ enum-as-flags: true
+ doc: conntrack flag bits
+ -
+ name: protoinfo
+ type: nest
+ nested-attributes: protoinfo-attrs
+ -
+ name: help
+ type: nest
+ nested-attributes: help-attrs
+ -
+ name: nat-src
+ type: nest
+ nested-attributes: nat-attrs
+ -
+ name: timeout
+ type: u32
+ byte-order: big-endian
+ -
+ name: mark
+ type: u32
+ byte-order: big-endian
+ -
+ name: counters-orig
+ type: nest
+ nested-attributes: counter-attrs
+ -
+ name: counters-reply
+ type: nest
+ nested-attributes: counter-attrs
+ -
+ name: use
+ type: u32
+ byte-order: big-endian
+ -
+ name: id
+ type: u32
+ byte-order: big-endian
+ -
+ name: nat-dst
+ type: nest
+ nested-attributes: nat-attrs
+ -
+ name: tuple-master
+ type: nest
+ nested-attributes: tuple-attrs
+ -
+ name: seq-adj-orig
+ type: nest
+ nested-attributes: seqadj-attrs
+ -
+ name: seq-adj-reply
+ type: nest
+ nested-attributes: seqadj-attrs
+ -
+ name: secmark
+ type: binary
+ doc: obsolete
+ -
+ name: zone
+ type: u16
+ byte-order: big-endian
+ doc: conntrack zone id
+ -
+ name: secctx
+ type: nest
+ nested-attributes: secctx-attrs
+ -
+ name: timestamp
+ type: u64
+ byte-order: big-endian
+ -
+ name: mark-mask
+ type: u32
+ byte-order: big-endian
+ -
+ name: labels
+ type: binary
+ -
+ name: labels mask
+ type: binary
+ -
+ name: synproxy
+ type: nest
+ nested-attributes: synproxy-attrs
+ -
+ name: filter
+ type: nest
+ nested-attributes: tuple-attrs
+ -
+ name: status-mask
+ type: u32
+ byte-order: big-endian
+ enum: nf-ct-status
+ enum-as-flags: true
+ doc: conntrack flag bits to change
+ -
+ name: timestamp-event
+ type: u64
+ byte-order: big-endian
+ -
+ name: conntrack-stats-attrs
+ attributes:
+ -
+ name: searched
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: found
+ type: u32
+ byte-order: big-endian
+ -
+ name: new
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: invalid
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: ignore
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: delete
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: delete-list
+ type: u32
+ byte-order: big-endian
+ doc: obsolete
+ -
+ name: insert
+ type: u32
+ byte-order: big-endian
+ -
+ name: insert-failed
+ type: u32
+ byte-order: big-endian
+ -
+ name: drop
+ type: u32
+ byte-order: big-endian
+ -
+ name: early-drop
+ type: u32
+ byte-order: big-endian
+ -
+ name: error
+ type: u32
+ byte-order: big-endian
+ -
+ name: search-restart
+ type: u32
+ byte-order: big-endian
+ -
+ name: clash-resolve
+ type: u32
+ byte-order: big-endian
+ -
+ name: chain-toolong
+ type: u32
+ byte-order: big-endian
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: get
+ doc: get / dump entries
+ attribute-set: conntrack-attrs
+ fixed-header: nfgenmsg
+ do:
+ request:
+ value: 0x101
+ attributes:
+ - tuple-orig
+ - tuple-reply
+ - zone
+ reply:
+ value: 0x100
+ attributes:
+ - tuple-orig
+ - tuple-reply
+ - status
+ - protoinfo
+ - help
+ - nat-src
+ - nat-dst
+ - timeout
+ - mark
+ - counter-orig
+ - counter-reply
+ - use
+ - id
+ - nat-dst
+ - tuple-master
+ - seq-adj-orig
+ - seq-adj-reply
+ - zone
+ - secctx
+ - labels
+ - synproxy
+ dump:
+ request:
+ value: 0x101
+ attributes:
+ - nfgen-family
+ - mark
+ - filter
+ - status
+ - zone
+ reply:
+ value: 0x100
+ attributes:
+ - tuple-orig
+ - tuple-reply
+ - status
+ - protoinfo
+ - help
+ - nat-src
+ - nat-dst
+ - timeout
+ - mark
+ - counter-orig
+ - counter-reply
+ - use
+ - id
+ - nat-dst
+ - tuple-master
+ - seq-adj-orig
+ - seq-adj-reply
+ - zone
+ - secctx
+ - labels
+ - synproxy
+ -
+ name: get-stats
+ doc: dump pcpu conntrack stats
+ attribute-set: conntrack-stats-attrs
+ fixed-header: nfgenmsg
+ dump:
+ request:
+ value: 0x104
+ reply:
+ value: 0x104
+ attributes:
+ - searched
+ - found
+ - insert
+ - insert-failed
+ - drop
+ - early-drop
+ - error
+ - search-restart
+ - clash-resolve
+ - chain-toolong
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 09fbb4c03fc8..bd9726269b4f 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -1868,6 +1868,7 @@ operations:
- info-version-fixed
- info-version-running
- info-version-stored
+ - info-board-serial-number
dump:
reply: *info-get-reply
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 288923e965ae..f5e0750ab71d 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -70,6 +70,10 @@ definitions:
name: tx-checksum
doc:
L3 checksum HW offload is supported by the driver.
+ -
+ name: tx-launch-time-fifo
+ doc:
+ Launch time HW offload is supported by the driver.
-
name: queue-type
type: enum
@@ -277,6 +281,9 @@ attribute-sets:
processing, if event polling finds events
type: uint
-
+ name: xsk-info
+ attributes: []
+ -
name: queue
attributes:
-
@@ -294,6 +301,9 @@ attribute-sets:
-
name: type
doc: Queue type as rx, tx. Each queue type defines a separate ID space.
+ XDP TX queues allocated in the kernel are not linked to NAPIs and
+ thus not listed. AF_XDP queues will have more information set in
+ the xsk attribute.
type: u32
enum: queue-type
-
@@ -309,7 +319,11 @@ attribute-sets:
doc: io_uring memory provider information.
type: nest
nested-attributes: io-uring-provider-info
-
+ -
+ name: xsk
+ doc: XSK information for this queue, if any.
+ type: nest
+ nested-attributes: xsk-info
-
name: qstats
doc: |
@@ -457,6 +471,8 @@ attribute-sets:
name: tx-needs-csum
doc: |
Number of packets that required the device to calculate the checksum.
+ This counter includes the number of GSO wire packets for which device
+ calculated the L4 checksum.
type: uint
-
name: tx-hw-gso-packets
@@ -652,6 +668,7 @@ operations:
- ifindex
- dmabuf
- io-uring
+ - xsk
dump:
request:
attributes:
@@ -728,8 +745,8 @@ operations:
- irq-suspend-timeout
kernel-family:
- headers: [ "linux/list.h"]
- sock-priv: struct list_head
+ headers: [ "net/netdev_netlink.h"]
+ sock-priv: struct netdev_nl_sock
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/nl80211.yaml b/Documentation/netlink/specs/nl80211.yaml
new file mode 100644
index 000000000000..1ec49c3562cd
--- /dev/null
+++ b/Documentation/netlink/specs/nl80211.yaml
@@ -0,0 +1,2000 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nl80211
+protocol: genetlink-legacy
+
+doc:
+ Netlink API for 802.11 wireless devices
+
+definitions:
+ -
+ name: commands
+ type: enum
+ entries:
+ - unspec
+ - get-wiphy
+ - set-wiphy
+ - new-wiphy
+ - del-wiphy
+ - get-interface
+ - set-interface
+ - new-interface
+ - del-interface
+ - get-key
+ - set-key
+ - new-key
+ - del-key
+ - get-beacon
+ - set-beacon
+ - new-beacon
+ - del-beacon
+ - get-station
+ - set-station
+ - new-station
+ - del-station
+ - get-mpath
+ - set-mpath
+ - new-mpath
+ - del-mpath
+ - set-bss
+ - set-reg
+ - req-set-reg
+ - get-mesh-config
+ - set-mesh-config
+ - set-mgmt-extra-ie
+ - get-reg
+ - get-scan
+ - trigger-scan
+ - new-scan-results
+ - scan-aborted
+ - reg-change
+ - authenticate
+ - associate
+ - deauthenticate
+ - disassociate
+ - michael-mic-failure
+ - reg-beacon-hint
+ - join-ibss
+ - leave-ibss
+ - testmode
+ - connect
+ - roam
+ - disconnect
+ - set-wiphy-netns
+ - get-survey
+ - new-survey-results
+ - set-pmksa
+ - del-pmksa
+ - flush-pmksa
+ - remain-on-channel
+ - cancel-remain-on-channel
+ - set-tx-bitrate-mask
+ - register-action
+ - action
+ - action-tx-status
+ - set-power-save
+ - get-power-save
+ - set-cqm
+ - notify-cqm
+ - set-channel
+ - set-wds-peer
+ - frame-wait-cancel
+ - join-mesh
+ - leave-mesh
+ - unprot-deauthenticate
+ - unprot-disassociate
+ - new-peer-candidate
+ - get-wowlan
+ - set-wowlan
+ - start-sched-scan
+ - stop-sched-scan
+ - sched-scan-results
+ - sched-scan-stopped
+ - set-rekey-offload
+ - pmksa-candidate
+ - tdls-oper
+ - tdls-mgmt
+ - unexpected-frame
+ - probe-client
+ - register-beacons
+ - unexpected-4-addr-frame
+ - set-noack-map
+ - ch-switch-notify
+ - start-p2p-device
+ - stop-p2p-device
+ - conn-failed
+ - set-mcast-rate
+ - set-mac-acl
+ - radar-detect
+ - get-protocol-features
+ - update-ft-ies
+ - ft-event
+ - crit-protocol-start
+ - crit-protocol-stop
+ - get-coalesce
+ - set-coalesce
+ - channel-switch
+ - vendor
+ - set-qos-map
+ - add-tx-ts
+ - del-tx-ts
+ - get-mpp
+ - join-ocb
+ - leave-ocb
+ - ch-switch-started-notify
+ - tdls-channel-switch
+ - tdls-cancel-channel-switch
+ - wiphy-reg-change
+ - abort-scan
+ - start-nan
+ - stop-nan
+ - add-nan-function
+ - del-nan-function
+ - change-nan-config
+ - nan-match
+ - set-multicast-to-unicast
+ - update-connect-params
+ - set-pmk
+ - del-pmk
+ - port-authorized
+ - reload-regdb
+ - external-auth
+ - sta-opmode-changed
+ - control-port-frame
+ - get-ftm-responder-stats
+ - peer-measurement-start
+ - peer-measurement-result
+ - peer-measurement-complete
+ - notify-radar
+ - update-owe-info
+ - probe-mesh-link
+ - set-tid-config
+ - unprot-beacon
+ - control-port-frame-tx-status
+ - set-sar-specs
+ - obss-color-collision
+ - color-change-request
+ - color-change-started
+ - color-change-aborted
+ - color-change-completed
+ - set-fils-aad
+ - assoc-comeback
+ - add-link
+ - remove-link
+ - add-link-sta
+ - modify-link-sta
+ - remove-link-sta
+ - set-hw-timestamp
+ - links-removed
+ - set-tid-to-link-mapping
+ -
+ name: feature-flags
+ type: flags
+ entries:
+ - sk-tx-status
+ - ht-ibss
+ - inactivity-timer
+ - cell-base-reg-hints
+ - p2p-device-needs-channel
+ - sae
+ - low-priority-scan
+ - scan-flush
+ - ap-scan
+ - vif-txpower
+ - need-obss-scan
+ - p2p-go-ctwin
+ - p2p-go-oppps
+ - reserved
+ - advertise-chan-limits
+ - full-ap-client-state
+ - userspace-mpm
+ - active-monitor
+ - ap-mode-chan-width-change
+ - ds-param-set-ie-in-probes
+ - wfa-tpc-ie-in-probes
+ - quiet
+ - tx-power-insertion
+ - ackto-estimation
+ - static-smps
+ - dynamic-smps
+ - supports-wmm-admission
+ - mac-on-create
+ - tdls-channel-switch
+ - scan-random-mac-addr
+ - sched-scan-random-mac-addr
+ - no-random-mac-addr
+ -
+ name: ieee80211-mcs-info
+ type: struct
+ members:
+ -
+ name: rx-mask
+ type: binary
+ len: 10
+ -
+ name: rx-highest
+ type: u16
+ byte-order: little-endian
+ -
+ name: tx-params
+ type: u8
+ -
+ name: reserved
+ type: binary
+ len: 3
+ -
+ name: ieee80211-vht-mcs-info
+ type: struct
+ members:
+ -
+ name: rx-mcs-map
+ type: u16
+ byte-order: little-endian
+ -
+ name: rx-highest
+ type: u16
+ byte-order: little-endian
+ -
+ name: tx-mcs-map
+ type: u16
+ byte-order: little-endian
+ -
+ name: tx-highest
+ type: u16
+ byte-order: little-endian
+ -
+ name: ieee80211-ht-cap
+ type: struct
+ members:
+ -
+ name: cap-info
+ type: u16
+ byte-order: little-endian
+ -
+ name: ampdu-params-info
+ type: u8
+ -
+ name: mcs
+ type: binary
+ struct: ieee80211-mcs-info
+ -
+ name: extended-ht-cap-info
+ type: u16
+ byte-order: little-endian
+ -
+ name: tx-bf-cap-info
+ type: u32
+ byte-order: little-endian
+ -
+ name: antenna-selection-info
+ type: u8
+ -
+ name: channel-type
+ type: enum
+ entries:
+ - no-ht
+ - ht20
+ - ht40minus
+ - ht40plus
+ -
+ name: sta-flag-update
+ type: struct
+ members:
+ -
+ name: mask
+ type: u32
+ -
+ name: set
+ type: u32
+ -
+ name: protocol-features
+ type: flags
+ entries:
+ - split-wiphy-dump
+
+attribute-sets:
+ -
+ name: nl80211-attrs
+ name-prefix: nl80211-attr-
+ enum-name: nl80211-attrs
+ attr-max-name: num-nl80211-attr
+ attributes:
+ -
+ name: wiphy
+ type: u32
+ -
+ name: wiphy-name
+ type: string
+ -
+ name: ifindex
+ type: u32
+ -
+ name: ifname
+ type: string
+ -
+ name: iftype
+ type: u32
+ -
+ name: mac
+ type: binary
+ display-hint: mac
+ -
+ name: key-data
+ type: binary
+ -
+ name: key-idx
+ type: u8
+ -
+ name: key-cipher
+ type: u32
+ -
+ name: key-seq
+ type: binary
+ -
+ name: key-default
+ type: flag
+ -
+ name: beacon-interval
+ type: u32
+ -
+ name: dtim-period
+ type: u32
+ -
+ name: beacon-head
+ type: binary
+ -
+ name: beacon-tail
+ type: binary
+ -
+ name: sta-aid
+ type: u16
+ -
+ name: sta-flags
+ type: binary # TODO: nest
+ -
+ name: sta-listen-interval
+ type: u16
+ -
+ name: sta-supported-rates
+ type: binary
+ -
+ name: sta-vlan
+ type: u32
+ -
+ name: sta-info
+ type: binary # TODO: nest
+ -
+ name: wiphy-bands
+ type: nest
+ nested-attributes: wiphy-bands
+ -
+ name: mntr-flags
+ type: binary # TODO: nest
+ -
+ name: mesh-id
+ type: binary
+ -
+ name: sta-plink-action
+ type: u8
+ -
+ name: mpath-next-hop
+ type: binary
+ display-hint: mac
+ -
+ name: mpath-info
+ type: binary # TODO: nest
+ -
+ name: bss-cts-prot
+ type: u8
+ -
+ name: bss-short-preamble
+ type: u8
+ -
+ name: bss-short-slot-time
+ type: u8
+ -
+ name: ht-capability
+ type: binary
+ -
+ name: supported-iftypes
+ type: nest
+ nested-attributes: supported-iftypes
+ -
+ name: reg-alpha2
+ type: binary
+ -
+ name: reg-rules
+ type: binary # TODO: nest
+ -
+ name: mesh-config
+ type: binary # TODO: nest
+ -
+ name: bss-basic-rates
+ type: binary
+ -
+ name: wiphy-txq-params
+ type: binary # TODO: nest
+ -
+ name: wiphy-freq
+ type: u32
+ -
+ name: wiphy-channel-type
+ type: u32
+ enum: channel-type
+ -
+ name: key-default-mgmt
+ type: flag
+ -
+ name: mgmt-subtype
+ type: u8
+ -
+ name: ie
+ type: binary
+ -
+ name: max-num-scan-ssids
+ type: u8
+ -
+ name: scan-frequencies
+ type: binary # TODO: nest
+ -
+ name: scan-ssids
+ type: binary # TODO: nest
+ -
+ name: generation
+ type: u32
+ -
+ name: bss
+ type: binary # TODO: nest
+ -
+ name: reg-initiator
+ type: u8
+ -
+ name: reg-type
+ type: u8
+ -
+ name: supported-commands
+ type: indexed-array
+ sub-type: u32
+ enum: commands
+ -
+ name: frame
+ type: binary
+ -
+ name: ssid
+ type: binary
+ -
+ name: auth-type
+ type: u32
+ -
+ name: reason-code
+ type: u16
+ -
+ name: key-type
+ type: u32
+ -
+ name: max-scan-ie-len
+ type: u16
+ -
+ name: cipher-suites
+ type: binary
+ sub-type: u32
+ display-hint: hex
+ -
+ name: freq-before
+ type: binary # TODO: nest
+ -
+ name: freq-after
+ type: binary # TODO: nest
+ -
+ name: freq-fixed
+ type: flag
+ -
+ name: wiphy-retry-short
+ type: u8
+ -
+ name: wiphy-retry-long
+ type: u8
+ -
+ name: wiphy-frag-threshold
+ type: u32
+ -
+ name: wiphy-rts-threshold
+ type: u32
+ -
+ name: timed-out
+ type: flag
+ -
+ name: use-mfp
+ type: u32
+ -
+ name: sta-flags2
+ type: binary
+ struct: sta-flag-update
+ -
+ name: control-port
+ type: flag
+ -
+ name: testdata
+ type: binary
+ -
+ name: privacy
+ type: flag
+ -
+ name: disconnected-by-ap
+ type: flag
+ -
+ name: status-code
+ type: u16
+ -
+ name: cipher-suites-pairwise
+ type: binary
+ -
+ name: cipher-suite-group
+ type: u32
+ -
+ name: wpa-versions
+ type: u32
+ -
+ name: akm-suites
+ type: binary
+ -
+ name: req-ie
+ type: binary
+ -
+ name: resp-ie
+ type: binary
+ -
+ name: prev-bssid
+ type: binary
+ -
+ name: key
+ type: binary # TODO: nest
+ -
+ name: keys
+ type: binary # TODO: nest
+ -
+ name: pid
+ type: u32
+ -
+ name: 4addr
+ type: u8
+ -
+ name: survey-info
+ type: binary # TODO: nest
+ -
+ name: pmkid
+ type: binary
+ -
+ name: max-num-pmkids
+ type: u8
+ -
+ name: duration
+ type: u32
+ -
+ name: cookie
+ type: u64
+ -
+ name: wiphy-coverage-class
+ type: u8
+ -
+ name: tx-rates
+ type: binary # TODO: nest
+ -
+ name: frame-match
+ type: binary
+ -
+ name: ack
+ type: flag
+ -
+ name: ps-state
+ type: u32
+ -
+ name: cqm
+ type: binary # TODO: nest
+ -
+ name: local-state-change
+ type: flag
+ -
+ name: ap-isolate
+ type: u8
+ -
+ name: wiphy-tx-power-setting
+ type: u32
+ -
+ name: wiphy-tx-power-level
+ type: u32
+ -
+ name: tx-frame-types
+ type: nest
+ nested-attributes: iftype-attrs
+ -
+ name: rx-frame-types
+ type: nest
+ nested-attributes: iftype-attrs
+ -
+ name: frame-type
+ type: u16
+ -
+ name: control-port-ethertype
+ type: flag
+ -
+ name: control-port-no-encrypt
+ type: flag
+ -
+ name: support-ibss-rsn
+ type: flag
+ -
+ name: wiphy-antenna-tx
+ type: u32
+ -
+ name: wiphy-antenna-rx
+ type: u32
+ -
+ name: mcast-rate
+ type: u32
+ -
+ name: offchannel-tx-ok
+ type: flag
+ -
+ name: bss-ht-opmode
+ type: u16
+ -
+ name: key-default-types
+ type: binary # TODO: nest
+ -
+ name: max-remain-on-channel-duration
+ type: u32
+ -
+ name: mesh-setup
+ type: binary # TODO: nest
+ -
+ name: wiphy-antenna-avail-tx
+ type: u32
+ -
+ name: wiphy-antenna-avail-rx
+ type: u32
+ -
+ name: support-mesh-auth
+ type: flag
+ -
+ name: sta-plink-state
+ type: u8
+ -
+ name: wowlan-triggers
+ type: binary # TODO: nest
+ -
+ name: wowlan-triggers-supported
+ type: nest
+ nested-attributes: wowlan-triggers-attrs
+ -
+ name: sched-scan-interval
+ type: u32
+ -
+ name: interface-combinations
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: if-combination-attributes
+ -
+ name: software-iftypes
+ type: nest
+ nested-attributes: supported-iftypes
+ -
+ name: rekey-data
+ type: binary # TODO: nest
+ -
+ name: max-num-sched-scan-ssids
+ type: u8
+ -
+ name: max-sched-scan-ie-len
+ type: u16
+ -
+ name: scan-supp-rates
+ type: binary # TODO: nest
+ -
+ name: hidden-ssid
+ type: u32
+ -
+ name: ie-probe-resp
+ type: binary
+ -
+ name: ie-assoc-resp
+ type: binary
+ -
+ name: sta-wme
+ type: binary # TODO: nest
+ -
+ name: support-ap-uapsd
+ type: flag
+ -
+ name: roam-support
+ type: flag
+ -
+ name: sched-scan-match
+ type: binary # TODO: nest
+ -
+ name: max-match-sets
+ type: u8
+ -
+ name: pmksa-candidate
+ type: binary # TODO: nest
+ -
+ name: tx-no-cck-rate
+ type: flag
+ -
+ name: tdls-action
+ type: u8
+ -
+ name: tdls-dialog-token
+ type: u8
+ -
+ name: tdls-operation
+ type: u8
+ -
+ name: tdls-support
+ type: flag
+ -
+ name: tdls-external-setup
+ type: flag
+ -
+ name: device-ap-sme
+ type: u32
+ -
+ name: dont-wait-for-ack
+ type: flag
+ -
+ name: feature-flags
+ type: u32
+ enum: feature-flags
+ enum-as-flags: True
+ -
+ name: probe-resp-offload
+ type: u32
+ -
+ name: probe-resp
+ type: binary
+ -
+ name: dfs-region
+ type: u8
+ -
+ name: disable-ht
+ type: flag
+ -
+ name: ht-capability-mask
+ type: binary
+ struct: ieee80211-ht-cap
+ -
+ name: noack-map
+ type: u16
+ -
+ name: inactivity-timeout
+ type: u16
+ -
+ name: rx-signal-dbm
+ type: u32
+ -
+ name: bg-scan-period
+ type: u16
+ -
+ name: wdev
+ type: u64
+ -
+ name: user-reg-hint-type
+ type: u32
+ -
+ name: conn-failed-reason
+ type: u32
+ -
+ name: auth-data
+ type: binary
+ -
+ name: vht-capability
+ type: binary
+ -
+ name: scan-flags
+ type: u32
+ -
+ name: channel-width
+ type: u32
+ -
+ name: center-freq1
+ type: u32
+ -
+ name: center-freq2
+ type: u32
+ -
+ name: p2p-ctwindow
+ type: u8
+ -
+ name: p2p-oppps
+ type: u8
+ -
+ name: local-mesh-power-mode
+ type: u32
+ -
+ name: acl-policy
+ type: u32
+ -
+ name: mac-addrs
+ type: binary # TODO: nest
+ -
+ name: mac-acl-max
+ type: u32
+ -
+ name: radar-event
+ type: u32
+ -
+ name: ext-capa
+ type: binary
+ -
+ name: ext-capa-mask
+ type: binary
+ -
+ name: sta-capability
+ type: u16
+ -
+ name: sta-ext-capability
+ type: binary
+ -
+ name: protocol-features
+ type: u32
+ enum: protocol-features
+ -
+ name: split-wiphy-dump
+ type: flag
+ -
+ name: disable-vht
+ type: flag
+ -
+ name: vht-capability-mask
+ type: binary
+ -
+ name: mdid
+ type: u16
+ -
+ name: ie-ric
+ type: binary
+ -
+ name: crit-prot-id
+ type: u16
+ -
+ name: max-crit-prot-duration
+ type: u16
+ -
+ name: peer-aid
+ type: u16
+ -
+ name: coalesce-rule
+ type: binary # TODO: nest
+ -
+ name: ch-switch-count
+ type: u32
+ -
+ name: ch-switch-block-tx
+ type: flag
+ -
+ name: csa-ies
+ type: binary # TODO: nest
+ -
+ name: cntdwn-offs-beacon
+ type: binary
+ -
+ name: cntdwn-offs-presp
+ type: binary
+ -
+ name: rxmgmt-flags
+ type: binary
+ -
+ name: sta-supported-channels
+ type: binary
+ -
+ name: sta-supported-oper-classes
+ type: binary
+ -
+ name: handle-dfs
+ type: flag
+ -
+ name: support-5-mhz
+ type: flag
+ -
+ name: support-10-mhz
+ type: flag
+ -
+ name: opmode-notif
+ type: u8
+ -
+ name: vendor-id
+ type: u32
+ -
+ name: vendor-subcmd
+ type: u32
+ -
+ name: vendor-data
+ type: binary
+ -
+ name: vendor-events
+ type: binary
+ -
+ name: qos-map
+ type: binary
+ -
+ name: mac-hint
+ type: binary
+ display-hint: mac
+ -
+ name: wiphy-freq-hint
+ type: u32
+ -
+ name: max-ap-assoc-sta
+ type: u32
+ -
+ name: tdls-peer-capability
+ type: u32
+ -
+ name: socket-owner
+ type: flag
+ -
+ name: csa-c-offsets-tx
+ type: binary
+ -
+ name: max-csa-counters
+ type: u8
+ -
+ name: tdls-initiator
+ type: flag
+ -
+ name: use-rrm
+ type: flag
+ -
+ name: wiphy-dyn-ack
+ type: flag
+ -
+ name: tsid
+ type: u8
+ -
+ name: user-prio
+ type: u8
+ -
+ name: admitted-time
+ type: u16
+ -
+ name: smps-mode
+ type: u8
+ -
+ name: oper-class
+ type: u8
+ -
+ name: mac-mask
+ type: binary
+ display-hint: mac
+ -
+ name: wiphy-self-managed-reg
+ type: flag
+ -
+ name: ext-features
+ type: binary
+ -
+ name: survey-radio-stats
+ type: binary
+ -
+ name: netns-fd
+ type: u32
+ -
+ name: sched-scan-delay
+ type: u32
+ -
+ name: reg-indoor
+ type: flag
+ -
+ name: max-num-sched-scan-plans
+ type: u32
+ -
+ name: max-scan-plan-interval
+ type: u32
+ -
+ name: max-scan-plan-iterations
+ type: u32
+ -
+ name: sched-scan-plans
+ type: binary # TODO: nest
+ -
+ name: pbss
+ type: flag
+ -
+ name: bss-select
+ type: binary # TODO: nest
+ -
+ name: sta-support-p2p-ps
+ type: u8
+ -
+ name: pad
+ type: binary
+ -
+ name: iftype-ext-capa
+ type: binary # TODO: nest
+ -
+ name: mu-mimo-group-data
+ type: binary
+ -
+ name: mu-mimo-follow-mac-addr
+ type: binary
+ display-hint: mac
+ -
+ name: scan-start-time-tsf
+ type: u64
+ -
+ name: scan-start-time-tsf-bssid
+ type: binary
+ -
+ name: measurement-duration
+ type: u16
+ -
+ name: measurement-duration-mandatory
+ type: flag
+ -
+ name: mesh-peer-aid
+ type: u16
+ -
+ name: nan-master-pref
+ type: u8
+ -
+ name: bands
+ type: u32
+ -
+ name: nan-func
+ type: binary # TODO: nest
+ -
+ name: nan-match
+ type: binary # TODO: nest
+ -
+ name: fils-kek
+ type: binary
+ -
+ name: fils-nonces
+ type: binary
+ -
+ name: multicast-to-unicast-enabled
+ type: flag
+ -
+ name: bssid
+ type: binary
+ display-hint: mac
+ -
+ name: sched-scan-relative-rssi
+ type: s8
+ -
+ name: sched-scan-rssi-adjust
+ type: binary
+ -
+ name: timeout-reason
+ type: u32
+ -
+ name: fils-erp-username
+ type: binary
+ -
+ name: fils-erp-realm
+ type: binary
+ -
+ name: fils-erp-next-seq-num
+ type: u16
+ -
+ name: fils-erp-rrk
+ type: binary
+ -
+ name: fils-cache-id
+ type: binary
+ -
+ name: pmk
+ type: binary
+ -
+ name: sched-scan-multi
+ type: flag
+ -
+ name: sched-scan-max-reqs
+ type: u32
+ -
+ name: want-1x-4way-hs
+ type: flag
+ -
+ name: pmkr0-name
+ type: binary
+ -
+ name: port-authorized
+ type: binary
+ -
+ name: external-auth-action
+ type: u32
+ -
+ name: external-auth-support
+ type: flag
+ -
+ name: nss
+ type: u8
+ -
+ name: ack-signal
+ type: s32
+ -
+ name: control-port-over-nl80211
+ type: flag
+ -
+ name: txq-stats
+ type: nest
+ nested-attributes: txq-stats-attrs
+ -
+ name: txq-limit
+ type: u32
+ -
+ name: txq-memory-limit
+ type: u32
+ -
+ name: txq-quantum
+ type: u32
+ -
+ name: he-capability
+ type: binary
+ -
+ name: ftm-responder
+ type: binary # TODO: nest
+ -
+ name: ftm-responder-stats
+ type: binary # TODO: nest
+ -
+ name: timeout
+ type: u32
+ -
+ name: peer-measurements
+ type: binary # TODO: nest
+ -
+ name: airtime-weight
+ type: u16
+ -
+ name: sta-tx-power-setting
+ type: u8
+ -
+ name: sta-tx-power
+ type: s16
+ -
+ name: sae-password
+ type: binary
+ -
+ name: twt-responder
+ type: flag
+ -
+ name: he-obss-pd
+ type: binary # TODO: nest
+ -
+ name: wiphy-edmg-channels
+ type: u8
+ -
+ name: wiphy-edmg-bw-config
+ type: u8
+ -
+ name: vlan-id
+ type: u16
+ -
+ name: he-bss-color
+ type: binary # TODO: nest
+ -
+ name: iftype-akm-suites
+ type: binary # TODO: nest
+ -
+ name: tid-config
+ type: binary # TODO: nest
+ -
+ name: control-port-no-preauth
+ type: flag
+ -
+ name: pmk-lifetime
+ type: u32
+ -
+ name: pmk-reauth-threshold
+ type: u8
+ -
+ name: receive-multicast
+ type: flag
+ -
+ name: wiphy-freq-offset
+ type: u32
+ -
+ name: center-freq1-offset
+ type: u32
+ -
+ name: scan-freq-khz
+ type: binary # TODO: nest
+ -
+ name: he-6ghz-capability
+ type: binary
+ -
+ name: fils-discovery
+ type: binary # TOOD: nest
+ -
+ name: unsol-bcast-probe-resp
+ type: binary # TOOD: nest
+ -
+ name: s1g-capability
+ type: binary
+ -
+ name: s1g-capability-mask
+ type: binary
+ -
+ name: sae-pwe
+ type: u8
+ -
+ name: reconnect-requested
+ type: binary
+ -
+ name: sar-spec
+ type: nest
+ nested-attributes: sar-attributes
+ -
+ name: disable-he
+ type: flag
+ -
+ name: obss-color-bitmap
+ type: u64
+ -
+ name: color-change-count
+ type: u8
+ -
+ name: color-change-color
+ type: u8
+ -
+ name: color-change-elems
+ type: binary # TODO: nest
+ -
+ name: mbssid-config
+ type: binary # TODO: nest
+ -
+ name: mbssid-elems
+ type: binary # TODO: nest
+ -
+ name: radar-background
+ type: flag
+ -
+ name: ap-settings-flags
+ type: u32
+ -
+ name: eht-capability
+ type: binary
+ -
+ name: disable-eht
+ type: flag
+ -
+ name: mlo-links
+ type: binary # TODO: nest
+ -
+ name: mlo-link-id
+ type: u8
+ -
+ name: mld-addr
+ type: binary
+ display-hint: mac
+ -
+ name: mlo-support
+ type: flag
+ -
+ name: max-num-akm-suites
+ type: binary
+ -
+ name: eml-capability
+ type: u16
+ -
+ name: mld-capa-and-ops
+ type: u16
+ -
+ name: tx-hw-timestamp
+ type: u64
+ -
+ name: rx-hw-timestamp
+ type: u64
+ -
+ name: td-bitmap
+ type: binary
+ -
+ name: punct-bitmap
+ type: u32
+ -
+ name: max-hw-timestamp-peers
+ type: u16
+ -
+ name: hw-timestamp-enabled
+ type: flag
+ -
+ name: ema-rnr-elems
+ type: binary # TODO: nest
+ -
+ name: mlo-link-disabled
+ type: flag
+ -
+ name: bss-dump-include-use-data
+ type: flag
+ -
+ name: mlo-ttlm-dlink
+ type: u16
+ -
+ name: mlo-ttlm-ulink
+ type: u16
+ -
+ name: assoc-spp-amsdu
+ type: flag
+ -
+ name: wiphy-radios
+ type: binary # TODO: nest
+ -
+ name: wiphy-interface-combinations
+ type: binary # TODO: nest
+ -
+ name: vif-radio-mask
+ type: u32
+ -
+ name: frame-type-attrs
+ subset-of: nl80211-attrs
+ attributes:
+ -
+ name: frame-type
+ -
+ name: wiphy-bands
+ name-prefix: nl80211-band-
+ attr-max-name: num-nl80211-bands
+ attributes:
+ -
+ name: 2ghz
+ doc: 2.4 GHz ISM band
+ value: 0
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: 5ghz
+ doc: around 5 GHz band (4.9 - 5.7 GHz)
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: 60ghz
+ doc: around 60 GHz band (58.32 - 69.12 GHz)
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: 6ghz
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: s1ghz
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: lc
+ type: nest
+ nested-attributes: band-attrs
+ -
+ name: band-attrs
+ enum-name: nl80211-band-attr
+ name-prefix: nl80211-band-attr-
+ attributes:
+ -
+ name: freqs
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: frequency-attrs
+ -
+ name: rates
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: bitrate-attrs
+ -
+ name: ht-mcs-set
+ type: binary
+ struct: ieee80211-mcs-info
+ -
+ name: ht-capa
+ type: u16
+ -
+ name: ht-ampdu-factor
+ type: u8
+ -
+ name: ht-ampdu-density
+ type: u8
+ -
+ name: vht-mcs-set
+ type: binary
+ struct: ieee80211-vht-mcs-info
+ -
+ name: vht-capa
+ type: u32
+ -
+ name: iftype-data
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: iftype-data-attrs
+ -
+ name: edmg-channels
+ type: binary
+ -
+ name: edmg-bw-config
+ type: binary
+ -
+ name: s1g-mcs-nss-set
+ type: binary
+ -
+ name: s1g-capa
+ type: binary
+ -
+ name: bitrate-attrs
+ name-prefix: nl80211-bitrate-attr-
+ attributes:
+ -
+ name: rate
+ type: u32
+ -
+ name: 2ghz-shortpreamble
+ type: flag
+ -
+ name: frequency-attrs
+ name-prefix: nl80211-frequency-attr-
+ attributes:
+ -
+ name: freq
+ type: u32
+ -
+ name: disabled
+ type: flag
+ -
+ name: no-ir
+ type: flag
+ -
+ name: no-ibss
+ name-prefix: __nl80211-frequency-attr-
+ type: flag
+ -
+ name: radar
+ type: flag
+ -
+ name: max-tx-power
+ type: u32
+ -
+ name: dfs-state
+ type: u32
+ -
+ name: dfs-time
+ type: binary
+ -
+ name: no-ht40-minus
+ type: binary
+ -
+ name: no-ht40-plus
+ type: binary
+ -
+ name: no-80mhz
+ type: binary
+ -
+ name: no-160mhz
+ type: binary
+ -
+ name: dfs-cac-time
+ type: binary
+ -
+ name: indoor-only
+ type: binary
+ -
+ name: ir-concurrent
+ type: binary
+ -
+ name: no-20mhz
+ type: binary
+ -
+ name: no-10mhz
+ type: binary
+ -
+ name: wmm
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: wmm-attrs
+ -
+ name: no-he
+ type: binary
+ -
+ name: offset
+ type: u32
+ -
+ name: 1mhz
+ type: binary
+ -
+ name: 2mhz
+ type: binary
+ -
+ name: 4mhz
+ type: binary
+ -
+ name: 8mhz
+ type: binary
+ -
+ name: 16mhz
+ type: binary
+ -
+ name: no-320mhz
+ type: binary
+ -
+ name: no-eht
+ type: binary
+ -
+ name: psd
+ type: binary
+ -
+ name: dfs-concurrent
+ type: binary
+ -
+ name: no-6ghz-vlp-client
+ type: binary
+ -
+ name: no-6ghz-afc-client
+ type: binary
+ -
+ name: can-monitor
+ type: binary
+ -
+ name: allow-6ghz-vlp-ap
+ type: binary
+ -
+ name: if-combination-attributes
+ enum-name: nl80211-if-combination-attrs
+ name-prefix: nl80211-iface-comb-
+ attr-max-name: max-nl80211-iface-comb
+ attributes:
+ -
+ name: limits
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: iface-limit-attributes
+ -
+ name: maxnum
+ type: u32
+ -
+ name: sta-ap-bi-match
+ type: flag
+ -
+ name: num-channels
+ type: u32
+ -
+ name: radar-detect-widths
+ type: u32
+ -
+ name: radar-detect-regions
+ type: u32
+ -
+ name: bi-min-gcd
+ type: u32
+ -
+ name: iface-limit-attributes
+ enum-name: nl80211-iface-limit-attrs
+ name-prefix: nl80211-iface-limit-
+ attr-max-name: max-nl80211-iface-limit
+ attributes:
+ -
+ name: max
+ type: u32
+ -
+ name: types
+ type: nest
+ nested-attributes: supported-iftypes
+ -
+ name: iftype-data-attrs
+ name-prefix: nl80211-band-iftype-attr-
+ attributes:
+ -
+ name: iftypes
+ type: binary
+ -
+ name: he-cap-mac
+ type: binary
+ -
+ name: he-cap-phy
+ type: binary
+ -
+ name: he-cap-mcs-set
+ type: binary
+ -
+ name: he-cap-ppe
+ type: binary
+ -
+ name: he-6ghz-capa
+ type: binary
+ -
+ name: vendor-elems
+ type: binary
+ -
+ name: eht-cap-mac
+ type: binary
+ -
+ name: eht-cap-phy
+ type: binary
+ -
+ name: eht-cap-mcs-set
+ type: binary
+ -
+ name: eht-cap-ppe
+ type: binary
+ -
+ name: iftype-attrs
+ enum-name: nl80211-iftype
+ name-prefix: nl80211-iftype-
+ attributes:
+ -
+ name: unspecified
+ type: nest
+ value: 0
+ nested-attributes: frame-type-attrs
+ -
+ name: adhoc
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: station
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: ap
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: ap-vlan
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: wds
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: monitor
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: mesh-point
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: p2p-client
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: p2p-go
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: p2p-device
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: ocb
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: nan
+ type: nest
+ nested-attributes: frame-type-attrs
+ -
+ name: sar-attributes
+ enum-name: nl80211-sar-attrs
+ name-prefix: nl80211-sar-attr-
+ attributes:
+ -
+ name: type
+ type: u32
+ -
+ name: specs
+ type: indexed-array
+ sub-type: nest
+ nested-attributes: sar-specs
+ -
+ name: sar-specs
+ enum-name: nl80211-sar-specs-attrs
+ name-prefix: nl80211-sar-attr-specs-
+ attributes:
+ -
+ name: power
+ type: s32
+ -
+ name: range-index
+ type: u32
+ -
+ name: start-freq
+ type: u32
+ -
+ name: end-freq
+ type: u32
+ -
+ name: supported-iftypes
+ enum-name: nl80211-iftype
+ name-prefix: nl80211-iftype-
+ attributes:
+ -
+ name: adhoc
+ type: flag
+ -
+ name: station
+ type: flag
+ -
+ name: ap
+ type: flag
+ -
+ name: ap-vlan
+ type: flag
+ -
+ name: wds
+ type: flag
+ -
+ name: monitor
+ type: flag
+ -
+ name: mesh-point
+ type: flag
+ -
+ name: p2p-client
+ type: flag
+ -
+ name: p2p-go
+ type: flag
+ -
+ name: p2p-device
+ type: flag
+ -
+ name: ocb
+ type: flag
+ -
+ name: nan
+ type: flag
+ -
+ name: txq-stats-attrs
+ name-prefix: nl80211-txq-stats-
+ attributes:
+ -
+ name: backlog-bytes
+ type: u32
+ -
+ name: backlog-packets
+ type: u32
+ -
+ name: flows
+ type: u32
+ -
+ name: drops
+ type: u32
+ -
+ name: ecn-marks
+ type: u32
+ -
+ name: overlimit
+ type: u32
+ -
+ name: overmemory
+ type: u32
+ -
+ name: collisions
+ type: u32
+ -
+ name: tx-bytes
+ type: u32
+ -
+ name: tx-packets
+ type: u32
+ -
+ name: max-flows
+ type: u32
+ -
+ name: wmm-attrs
+ enum-name: nl80211-wmm-rule
+ name-prefix: nl80211-wmmr-
+ attributes:
+ -
+ name: cw-min
+ type: u16
+ -
+ name: cw-max
+ type: u16
+ -
+ name: aifsn
+ type: u8
+ -
+ name: txop
+ type: u16
+ -
+ name: wowlan-triggers-attrs
+ enum-name: nl80211-wowlan-triggers
+ name-prefix: nl80211-wowlan-trig-
+ attr-max-name: max-nl80211-wowlan-trig
+ attributes:
+ -
+ name: any
+ type: flag
+ -
+ name: disconnect
+ type: flag
+ -
+ name: magic-pkt
+ type: flag
+ -
+ name: pkt-pattern
+ type: flag
+ -
+ name: gtk-rekey-supported
+ type: flag
+ -
+ name: gtk-rekey-failure
+ type: flag
+ -
+ name: eap-ident-request
+ type: flag
+ -
+ name: 4way-handshake
+ type: flag
+ -
+ name: rfkill-release
+ type: flag
+ -
+ name: wakeup-pkt-80211
+ type: flag
+ -
+ name: wakeup-pkt-80211-len
+ type: flag
+ -
+ name: wakeup-pkt-8023
+ type: flag
+ -
+ name: wakeup-pkt-8023-len
+ type: flag
+ -
+ name: tcp-connection
+ type: flag
+ -
+ name: wakeup-tcp-match
+ type: flag
+ -
+ name: wakeup-tcp-connlost
+ type: flag
+ -
+ name: wakeup-tcp-nomoretokens
+ type: flag
+ -
+ name: net-detect
+ type: flag
+ -
+ name: net-detect-results
+ type: flag
+ -
+ name: unprotected-deauth-disassoc
+ type: flag
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: get-wiphy
+ doc: |
+ Get information about a wiphy or dump a list of all wiphys. Requests to dump get-wiphy
+ should unconditionally include the split-wiphy-dump flag in the request.
+ attribute-set: nl80211-attrs
+ do:
+ request:
+ value: 1
+ attributes:
+ - wiphy
+ - wdev
+ - ifindex
+ reply:
+ value: 3
+ attributes: &wiphy-reply-attrs
+ - bands
+ - cipher-suites
+ - control-port-ethertype
+ - ext-capa
+ - ext-capa-mask
+ - ext-features
+ - feature-flags
+ - generation
+ - ht-capability-mask
+ - interface-combinations
+ - mac
+ - max-csa-counters
+ - max-match-sets
+ - max-num-akm-suites
+ - max-num-pmkids
+ - max-num-scan-ssids
+ - max-num-sched-scan-plans
+ - max-num-sched-scan-ssids
+ - max-remain-on-channel-duration
+ - max-scan-ie-len
+ - max-scan-plan-interval
+ - max-scan-plan-iterations
+ - max-sched-scan-ie-len
+ - offchannel-tx-ok
+ - rx-frame-types
+ - sar-spec
+ - sched-scan-max-reqs
+ - software-iftypes
+ - support-ap-uapsd
+ - supported-commands
+ - supported-iftypes
+ - tdls-external-setup
+ - tdls-support
+ - tx-frame-types
+ - txq-limit
+ - txq-memory-limit
+ - txq-quantum
+ - txq-stats
+ - vht-capability-mask
+ - wiphy
+ - wiphy-antenna-avail-rx
+ - wiphy-antenna-avail-tx
+ - wiphy-antenna-rx
+ - wiphy-antenna-tx
+ - wiphy-bands
+ - wiphy-coverage-class
+ - wiphy-frag-threshold
+ - wiphy-name
+ - wiphy-retry-long
+ - wiphy-retry-short
+ - wiphy-rts-threshold
+ - wowlan-triggers-supported
+ dump:
+ request:
+ attributes:
+ - wiphy
+ - wdev
+ - ifindex
+ - split-wiphy-dump
+ reply:
+ attributes: *wiphy-reply-attrs
+ -
+ name: get-interface
+ doc: Get information about an interface or dump a list of all interfaces
+ attribute-set: nl80211-attrs
+ do:
+ request:
+ value: 5
+ attributes:
+ - ifname
+ reply:
+ value: 7
+ attributes: &interface-reply-attrs
+ - ifname
+ - iftype
+ - ifindex
+ - wiphy
+ - wdev
+ - mac
+ - generation
+ - txq-stats
+ - 4addr
+ dump:
+ request:
+ attributes:
+ - ifname
+ reply:
+ attributes: *interface-reply-attrs
+ -
+ name: get-protocol-features
+ doc: Get information about supported protocol features
+ attribute-set: nl80211-attrs
+ do:
+ request:
+ value: 95
+ attributes:
+ - protocol-features
+ reply:
+ value: 95
+ attributes:
+ - protocol-features
+
+mcast-groups:
+ list:
+ -
+ name: config
+ -
+ name: scan
+ -
+ name: regulatory
+ -
+ name: mlme
+ -
+ name: vendor
+ -
+ name: nan
+ -
+ name: testmode
diff --git a/Documentation/netlink/specs/rt_addr.yaml b/Documentation/netlink/specs/rt_addr.yaml
index cbee1cedb177..5dd5469044c7 100644
--- a/Documentation/netlink/specs/rt_addr.yaml
+++ b/Documentation/netlink/specs/rt_addr.yaml
@@ -168,6 +168,29 @@ operations:
reply:
value: 20
attributes: *ifaddr-all
+ -
+ name: getmaddrs
+ doc: Get / dump IPv4/IPv6 multicast addresses.
+ attribute-set: addr-attrs
+ fixed-header: ifaddrmsg
+ do:
+ request:
+ value: 58
+ attributes:
+ - ifa-family
+ - ifa-index
+ reply:
+ value: 58
+ attributes: &mcaddr-attrs
+ - ifa-multicast
+ - ifa-cacheinfo
+ dump:
+ request:
+ value: 58
+ - ifa-family
+ reply:
+ value: 58
+ attributes: *mcaddr-attrs
mcast-groups:
list:
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
index 0d492500c7e5..31238455f8e9 100644
--- a/Documentation/netlink/specs/rt_link.yaml
+++ b/Documentation/netlink/specs/rt_link.yaml
@@ -771,6 +771,18 @@ definitions:
name: to
type: u32
-
+ name: ifla-geneve-port-range
+ type: struct
+ members:
+ -
+ name: low
+ type: u16
+ byte-order: big-endian
+ -
+ name: high
+ type: u16
+ byte-order: big-endian
+ -
name: ifla-vf-mac
type: struct
members:
@@ -1148,6 +1160,9 @@ attribute-sets:
name: max-pacing-offload-horizon
type: uint
doc: EDT offload horizon supported by the device (in nsec).
+ -
+ name: netns-immutable
+ type: u8
-
name: af-spec-attrs
attributes:
@@ -1915,6 +1930,10 @@ attribute-sets:
-
name: inner-proto-inherit
type: flag
+ -
+ name: port-range
+ type: binary
+ struct: ifla-geneve-port-range
-
name: linkinfo-iptun-attrs
name-prefix: ifla-iptun-
diff --git a/Documentation/netlink/specs/rt_rule.yaml b/Documentation/netlink/specs/rt_rule.yaml
index a9debac3058a..de0938d36541 100644
--- a/Documentation/netlink/specs/rt_rule.yaml
+++ b/Documentation/netlink/specs/rt_rule.yaml
@@ -182,6 +182,18 @@ attribute-sets:
type: u32
byte-order: big-endian
display-hint: hex
+ -
+ name: sport-mask
+ type: u16
+ display-hint: hex
+ -
+ name: dport-mask
+ type: u16
+ display-hint: hex
+ -
+ name: dscp-mask
+ type: u8
+ display-hint: hex
operations:
enum-model: directional
@@ -215,6 +227,9 @@ operations:
- dscp
- flowlabel
- flowlabel-mask
+ - sport-mask
+ - dport-mask
+ - dscp-mask
-
name: newrule-ntf
doc: Notify a rule creation
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index 44b9b5cc0e24..ec53a42499c1 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -27,7 +27,7 @@ Load the batman-adv module into your kernel::
$ insmod batman-adv.ko
The module is now waiting for activation. You must add some interfaces on which
-batman-adv can operate. The batman-adv soft-interface can be created using the
+batman-adv can operate. The batman-adv mesh-interface can be created using the
iproute2 tool ``ip``::
$ ip link add name bat0 type batadv
diff --git a/Documentation/networking/device_drivers/cable/index.rst b/Documentation/networking/device_drivers/cable/index.rst
deleted file mode 100644
index cce3c4392972..000000000000
--- a/Documentation/networking/device_drivers/cable/index.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-
-Cable Modem Device Drivers
-==========================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- sb1000
-
-.. only:: subproject and html
-
- Indices
- =======
-
- * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/cable/sb1000.rst b/Documentation/networking/device_drivers/cable/sb1000.rst
deleted file mode 100644
index c8582ca4034d..000000000000
--- a/Documentation/networking/device_drivers/cable/sb1000.rst
+++ /dev/null
@@ -1,222 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-===================
-SB100 device driver
-===================
-
-sb1000 is a module network device driver for the General Instrument (also known
-as NextLevel) SURFboard1000 internal cable modem board. This is an ISA card
-which is used by a number of cable TV companies to provide cable modem access.
-It's a one-way downstream-only cable modem, meaning that your upstream net link
-is provided by your regular phone modem.
-
-This driver was written by Franco Venturi <fventuri@mediaone.net>. He deserves
-a great deal of thanks for this wonderful piece of code!
-
-Needed tools
-============
-
-Support for this device is now a part of the standard Linux kernel. The
-driver source code file is drivers/net/sb1000.c. In addition to this
-you will need:
-
-1. The "cmconfig" program. This is a utility which supplements "ifconfig"
- to configure the cable modem and network interface (usually called "cm0");
-
-2. Several PPP scripts which live in /etc/ppp to make connecting via your
- cable modem easy.
-
- These utilities can be obtained from:
-
- http://www.jacksonville.net/~fventuri/
-
- in Franco's original source code distribution .tar.gz file. Support for
- the sb1000 driver can be found at:
-
- - http://web.archive.org/web/%2E/http://home.adelphia.net/~siglercm/sb1000.html
- - http://web.archive.org/web/%2E/http://linuxpower.cx/~cable/
-
- along with these utilities.
-
-3. The standard isapnp tools. These are necessary to configure your SB1000
- card at boot time (or afterwards by hand) since it's a PnP card.
-
- If you don't have these installed as a standard part of your Linux
- distribution, you can find them at:
-
- http://www.roestock.demon.co.uk/isapnptools/
-
- or check your Linux distribution binary CD or their web site. For help with
- isapnp, pnpdump, or /etc/isapnp.conf, go to:
-
- http://www.roestock.demon.co.uk/isapnptools/isapnpfaq.html
-
-Using the driver
-================
-
-To make the SB1000 card work, follow these steps:
-
-1. Run ``make config``, or ``make menuconfig``, or ``make xconfig``, whichever
- you prefer, in the top kernel tree directory to set up your kernel
- configuration. Make sure to say "Y" to "Prompt for development drivers"
- and to say "M" to the sb1000 driver. Also say "Y" or "M" to all the standard
- networking questions to get TCP/IP and PPP networking support.
-
-2. **BEFORE** you build the kernel, edit drivers/net/sb1000.c. Make sure
- to redefine the value of READ_DATA_PORT to match the I/O address used
- by isapnp to access your PnP cards. This is the value of READPORT in
- /etc/isapnp.conf or given by the output of pnpdump.
-
-3. Build and install the kernel and modules as usual.
-
-4. Boot your new kernel following the usual procedures.
-
-5. Set up to configure the new SB1000 PnP card by capturing the output
- of "pnpdump" to a file and editing this file to set the correct I/O ports,
- IRQ, and DMA settings for all your PnP cards. Make sure none of the settings
- conflict with one another. Then test this configuration by running the
- "isapnp" command with your new config file as the input. Check for
- errors and fix as necessary. (As an aside, I use I/O ports 0x110 and
- 0x310 and IRQ 11 for my SB1000 card and these work well for me. YMMV.)
- Then save the finished config file as /etc/isapnp.conf for proper
- configuration on subsequent reboots.
-
-6. Download the original file sb1000-1.1.2.tar.gz from Franco's site or one of
- the others referenced above. As root, unpack it into a temporary directory
- and do a ``make cmconfig`` and then ``install -c cmconfig /usr/local/sbin``.
- Don't do ``make install`` because it expects to find all the utilities built
- and ready for installation, not just cmconfig.
-
-7. As root, copy all the files under the ppp/ subdirectory in Franco's
- tar file into /etc/ppp, being careful not to overwrite any files that are
- already in there. Then modify ppp@gi-on to set the correct login name,
- phone number, and frequency for the cable modem. Also edit pap-secrets
- to specify your login name and password and any site-specific information
- you need.
-
-8. Be sure to modify /etc/ppp/firewall to use ipchains instead of
- the older ipfwadm commands from the 2.0.x kernels. There's a neat utility to
- convert ipfwadm commands to ipchains commands:
-
- http://users.dhp.com/~whisper/ipfwadm2ipchains/
-
- You may also wish to modify the firewall script to implement a different
- firewalling scheme.
-
-9. Start the PPP connection via the script /etc/ppp/ppp@gi-on. You must be
- root to do this. It's better to use a utility like sudo to execute
- frequently used commands like this with root permissions if possible. If you
- connect successfully the cable modem interface will come up and you'll see a
- driver message like this at the console::
-
- cm0: sb1000 at (0x110,0x310), csn 1, S/N 0x2a0d16d8, IRQ 11.
- sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)
-
- The "ifconfig" command should show two new interfaces, ppp0 and cm0.
-
- The command "cmconfig cm0" will give you information about the cable modem
- interface.
-
-10. Try pinging a site via ``ping -c 5 www.yahoo.com``, for example. You should
- see packets received.
-
-11. If you can't get site names (like www.yahoo.com) to resolve into
- IP addresses (like 204.71.200.67), be sure your /etc/resolv.conf file
- has no syntax errors and has the right nameserver IP addresses in it.
- If this doesn't help, try something like ``ping -c 5 204.71.200.67`` to
- see if the networking is running but the DNS resolution is where the
- problem lies.
-
-12. If you still have problems, go to the support web sites mentioned above
- and read the information and documentation there.
-
-Common problems
-===============
-
-1. Packets go out on the ppp0 interface but don't come back on the cm0
- interface. It looks like I'm connected but I can't even ping any
- numerical IP addresses. (This happens predominantly on Debian systems due
- to a default boot-time configuration script.)
-
-Solution
- As root ``echo 0 > /proc/sys/net/ipv4/conf/cm0/rp_filter`` so it
- can share the same IP address as the ppp0 interface. Note that this
- command should probably be added to the /etc/ppp/cablemodem script
- *right*between* the "/sbin/ifconfig" and "/sbin/cmconfig" commands.
- You may need to do this to /proc/sys/net/ipv4/conf/ppp0/rp_filter as well.
- If you do this to /proc/sys/net/ipv4/conf/default/rp_filter on each reboot
- (in rc.local or some such) then any interfaces can share the same IP
- addresses.
-
-2. I get "unresolved symbol" error messages on executing ``insmod sb1000.o``.
-
-Solution
- You probably have a non-matching kernel source tree and
- /usr/include/linux and /usr/include/asm header files. Make sure you
- install the correct versions of the header files in these two directories.
- Then rebuild and reinstall the kernel.
-
-3. When isapnp runs it reports an error, and my SB1000 card isn't working.
-
-Solution
- There's a problem with later versions of isapnp using the "(CHECK)"
- option in the lines that allocate the two I/O addresses for the SB1000 card.
- This first popped up on RH 6.0. Delete "(CHECK)" for the SB1000 I/O addresses.
- Make sure they don't conflict with any other pieces of hardware first! Then
- rerun isapnp and go from there.
-
-4. I can't execute the /etc/ppp/ppp@gi-on file.
-
-Solution
- As root do ``chmod ug+x /etc/ppp/ppp@gi-on``.
-
-5. The firewall script isn't working (with 2.2.x and higher kernels).
-
-Solution
- Use the ipfwadm2ipchains script referenced above to convert the
- /etc/ppp/firewall script from the deprecated ipfwadm commands to ipchains.
-
-6. I'm getting *tons* of firewall deny messages in the /var/kern.log,
- /var/messages, and/or /var/syslog files, and they're filling up my /var
- partition!!!
-
-Solution
- First, tell your ISP that you're receiving DoS (Denial of Service)
- and/or portscanning (UDP connection attempts) attacks! Look over the deny
- messages to figure out what the attack is and where it's coming from. Next,
- edit /etc/ppp/cablemodem and make sure the ",nobroadcast" option is turned on
- to the "cmconfig" command (uncomment that line). If you're not receiving these
- denied packets on your broadcast interface (IP address xxx.yyy.zzz.255
- typically), then someone is attacking your machine in particular. Be careful
- out there....
-
-7. Everything seems to work fine but my computer locks up after a while
- (and typically during a lengthy download through the cable modem)!
-
-Solution
- You may need to add a short delay in the driver to 'slow down' the
- SURFboard because your PC might not be able to keep up with the transfer rate
- of the SB1000. To do this, it's probably best to download Franco's
- sb1000-1.1.2.tar.gz archive and build and install sb1000.o manually. You'll
- want to edit the 'Makefile' and look for the 'SB1000_DELAY'
- define. Uncomment those 'CFLAGS' lines (and comment out the default ones)
- and try setting the delay to something like 60 microseconds with:
- '-DSB1000_DELAY=60'. Then do ``make`` and as root ``make install`` and try
- it out. If it still doesn't work or you like playing with the driver, you may
- try other numbers. Remember though that the higher the delay, the slower the
- driver (which slows down the rest of the PC too when it is actively
- used). Thanks to Ed Daiga for this tip!
-
-Credits
-=======
-
-This README came from Franco Venturi's original README file which is
-still supplied with his driver .tar.gz archive. I and all other sb1000 users
-owe Franco a tremendous "Thank you!" Additional thanks goes to Carl Patten
-and Ralph Bonnell who are now managing the Linux SB1000 web site, and to
-the SB1000 users who reported and helped debug the common problems listed
-above.
-
-
- Clemmitt Sigler
- csigler@vt.edu
diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst
index 8bf411b857d4..5f3885e56f58 100644
--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst
+++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst
@@ -70,7 +70,7 @@ the DPSW object that it will probe:
Besides the configuration of the actual DPSW object, the dpaa2-switch driver
will need the following DPAA2 objects:
- * 1 DPMCP - A Management Command Portal object is needed for any interraction
+ * 1 DPMCP - A Management Command Portal object is needed for any interaction
with the MC firmware.
* 1 DPBP - A Buffer Pool is used for seeding buffers intended for the Rx path
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 6fc1961492b7..05d822b904b4 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -55,7 +55,6 @@ Contents:
ti/cpsw_switchdev
ti/am65_nuss_cpsw_switchdev
ti/tlan
- toshiba/spider_net
wangxun/txgbe
wangxun/ngbe
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
index 99d95be4d159..43d72c8b713b 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst
@@ -1082,6 +1082,11 @@ like flow control, FEC and more.
need to replace the cable/transceiver.
- Error
+ * - `total_success_recovery_phy`
+ - The number of total successful recovery events of any type during
+ ports reset cycle.
+ - Error
+
* - `rx_out_of_buffer`
- Number of times receive queue had no software buffers allocated for the
adapter's incoming traffic.
diff --git a/Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst b/Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
deleted file mode 100644
index fe5b32be15cd..000000000000
--- a/Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
+++ /dev/null
@@ -1,202 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-===========================
-The Spidernet Device Driver
-===========================
-
-Written by Linas Vepstas <linas@austin.ibm.com>
-
-Version of 7 June 2007
-
-Abstract
-========
-This document sketches the structure of portions of the spidernet
-device driver in the Linux kernel tree. The spidernet is a gigabit
-ethernet device built into the Toshiba southbridge commonly used
-in the SONY Playstation 3 and the IBM QS20 Cell blade.
-
-The Structure of the RX Ring.
-=============================
-The receive (RX) ring is a circular linked list of RX descriptors,
-together with three pointers into the ring that are used to manage its
-contents.
-
-The elements of the ring are called "descriptors" or "descrs"; they
-describe the received data. This includes a pointer to a buffer
-containing the received data, the buffer size, and various status bits.
-
-There are three primary states that a descriptor can be in: "empty",
-"full" and "not-in-use". An "empty" or "ready" descriptor is ready
-to receive data from the hardware. A "full" descriptor has data in it,
-and is waiting to be emptied and processed by the OS. A "not-in-use"
-descriptor is neither empty or full; it is simply not ready. It may
-not even have a data buffer in it, or is otherwise unusable.
-
-During normal operation, on device startup, the OS (specifically, the
-spidernet device driver) allocates a set of RX descriptors and RX
-buffers. These are all marked "empty", ready to receive data. This
-ring is handed off to the hardware, which sequentially fills in the
-buffers, and marks them "full". The OS follows up, taking the full
-buffers, processing them, and re-marking them empty.
-
-This filling and emptying is managed by three pointers, the "head"
-and "tail" pointers, managed by the OS, and a hardware current
-descriptor pointer (GDACTDPA). The GDACTDPA points at the descr
-currently being filled. When this descr is filled, the hardware
-marks it full, and advances the GDACTDPA by one. Thus, when there is
-flowing RX traffic, every descr behind it should be marked "full",
-and everything in front of it should be "empty". If the hardware
-discovers that the current descr is not empty, it will signal an
-interrupt, and halt processing.
-
-The tail pointer tails or trails the hardware pointer. When the
-hardware is ahead, the tail pointer will be pointing at a "full"
-descr. The OS will process this descr, and then mark it "not-in-use",
-and advance the tail pointer. Thus, when there is flowing RX traffic,
-all of the descrs in front of the tail pointer should be "full", and
-all of those behind it should be "not-in-use". When RX traffic is not
-flowing, then the tail pointer can catch up to the hardware pointer.
-The OS will then note that the current tail is "empty", and halt
-processing.
-
-The head pointer (somewhat mis-named) follows after the tail pointer.
-When traffic is flowing, then the head pointer will be pointing at
-a "not-in-use" descr. The OS will perform various housekeeping duties
-on this descr. This includes allocating a new data buffer and
-dma-mapping it so as to make it visible to the hardware. The OS will
-then mark the descr as "empty", ready to receive data. Thus, when there
-is flowing RX traffic, everything in front of the head pointer should
-be "not-in-use", and everything behind it should be "empty". If no
-RX traffic is flowing, then the head pointer can catch up to the tail
-pointer, at which point the OS will notice that the head descr is
-"empty", and it will halt processing.
-
-Thus, in an idle system, the GDACTDPA, tail and head pointers will
-all be pointing at the same descr, which should be "empty". All of the
-other descrs in the ring should be "empty" as well.
-
-The show_rx_chain() routine will print out the locations of the
-GDACTDPA, tail and head pointers. It will also summarize the contents
-of the ring, starting at the tail pointer, and listing the status
-of the descrs that follow.
-
-A typical example of the output, for a nearly idle system, might be::
-
- net eth1: Total number of descrs=256
- net eth1: Chain tail located at descr=20
- net eth1: Chain head is at 20
- net eth1: HW curr desc (GDACTDPA) is at 21
- net eth1: Have 1 descrs with stat=x40800101
- net eth1: HW next desc (GDACNEXTDA) is at 22
- net eth1: Last 255 descrs with stat=xa0800000
-
-In the above, the hardware has filled in one descr, number 20. Both
-head and tail are pointing at 20, because it has not yet been emptied.
-Meanwhile, hw is pointing at 21, which is free.
-
-The "Have nnn decrs" refers to the descr starting at the tail: in this
-case, nnn=1 descr, starting at descr 20. The "Last nnn descrs" refers
-to all of the rest of the descrs, from the last status change. The "nnn"
-is a count of how many descrs have exactly the same status.
-
-The status x4... corresponds to "full" and status xa... corresponds
-to "empty". The actual value printed is RXCOMST_A.
-
-In the device driver source code, a different set of names are
-used for these same concepts, so that::
-
- "empty" == SPIDER_NET_DESCR_CARDOWNED == 0xa
- "full" == SPIDER_NET_DESCR_FRAME_END == 0x4
- "not in use" == SPIDER_NET_DESCR_NOT_IN_USE == 0xf
-
-
-The RX RAM full bug/feature
-===========================
-
-As long as the OS can empty out the RX buffers at a rate faster than
-the hardware can fill them, there is no problem. If, for some reason,
-the OS fails to empty the RX ring fast enough, the hardware GDACTDPA
-pointer will catch up to the head, notice the not-empty condition,
-ad stop. However, RX packets may still continue arriving on the wire.
-The spidernet chip can save some limited number of these in local RAM.
-When this local ram fills up, the spider chip will issue an interrupt
-indicating this (GHIINT0STS will show ERRINT, and the GRMFLLINT bit
-will be set in GHIINT1STS). When the RX ram full condition occurs,
-a certain bug/feature is triggered that has to be specially handled.
-This section describes the special handling for this condition.
-
-When the OS finally has a chance to run, it will empty out the RX ring.
-In particular, it will clear the descriptor on which the hardware had
-stopped. However, once the hardware has decided that a certain
-descriptor is invalid, it will not restart at that descriptor; instead
-it will restart at the next descr. This potentially will lead to a
-deadlock condition, as the tail pointer will be pointing at this descr,
-which, from the OS point of view, is empty; the OS will be waiting for
-this descr to be filled. However, the hardware has skipped this descr,
-and is filling the next descrs. Since the OS doesn't see this, there
-is a potential deadlock, with the OS waiting for one descr to fill,
-while the hardware is waiting for a different set of descrs to become
-empty.
-
-A call to show_rx_chain() at this point indicates the nature of the
-problem. A typical print when the network is hung shows the following::
-
- net eth1: Spider RX RAM full, incoming packets might be discarded!
- net eth1: Total number of descrs=256
- net eth1: Chain tail located at descr=255
- net eth1: Chain head is at 255
- net eth1: HW curr desc (GDACTDPA) is at 0
- net eth1: Have 1 descrs with stat=xa0800000
- net eth1: HW next desc (GDACNEXTDA) is at 1
- net eth1: Have 127 descrs with stat=x40800101
- net eth1: Have 1 descrs with stat=x40800001
- net eth1: Have 126 descrs with stat=x40800101
- net eth1: Last 1 descrs with stat=xa0800000
-
-Both the tail and head pointers are pointing at descr 255, which is
-marked xa... which is "empty". Thus, from the OS point of view, there
-is nothing to be done. In particular, there is the implicit assumption
-that everything in front of the "empty" descr must surely also be empty,
-as explained in the last section. The OS is waiting for descr 255 to
-become non-empty, which, in this case, will never happen.
-
-The HW pointer is at descr 0. This descr is marked 0x4.. or "full".
-Since its already full, the hardware can do nothing more, and thus has
-halted processing. Notice that descrs 0 through 254 are all marked
-"full", while descr 254 and 255 are empty. (The "Last 1 descrs" is
-descr 254, since tail was at 255.) Thus, the system is deadlocked,
-and there can be no forward progress; the OS thinks there's nothing
-to do, and the hardware has nowhere to put incoming data.
-
-This bug/feature is worked around with the spider_net_resync_head_ptr()
-routine. When the driver receives RX interrupts, but an examination
-of the RX chain seems to show it is empty, then it is probable that
-the hardware has skipped a descr or two (sometimes dozens under heavy
-network conditions). The spider_net_resync_head_ptr() subroutine will
-search the ring for the next full descr, and the driver will resume
-operations there. Since this will leave "holes" in the ring, there
-is also a spider_net_resync_tail_ptr() that will skip over such holes.
-
-As of this writing, the spider_net_resync() strategy seems to work very
-well, even under heavy network loads.
-
-
-The TX ring
-===========
-The TX ring uses a low-watermark interrupt scheme to make sure that
-the TX queue is appropriately serviced for large packet sizes.
-
-For packet sizes greater than about 1KBytes, the kernel can fill
-the TX ring quicker than the device can drain it. Once the ring
-is full, the netdev is stopped. When there is room in the ring,
-the netdev needs to be reawakened, so that more TX packets are placed
-in the ring. The hardware can empty the ring about four times per jiffy,
-so its not appropriate to wait for the poll routine to refill, since
-the poll routine runs only once per jiffy. The low-watermark mechanism
-marks a descr about 1/4th of the way from the bottom of the queue, so
-that an interrupt is generated when the descr is processed. This
-interrupt wakes up the netdev, which can then refill the queue.
-For large packets, this mechanism generates a relatively small number
-of interrupts, about 1K/sec. For smaller packets, this will drop to zero
-interrupts, as the hardware can empty the queue faster than the kernel
-can fill it.
diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst
index 0dd30a84ce25..a254af25b7ef 100644
--- a/Documentation/networking/device_drivers/index.rst
+++ b/Documentation/networking/device_drivers/index.rst
@@ -9,7 +9,6 @@ Contents:
:maxdepth: 2
atm/index
- cable/index
can/index
cellular/index
ethernet/index
diff --git a/Documentation/networking/devlink/bnxt.rst b/Documentation/networking/devlink/bnxt.rst
index a4fb27663cd6..9a8b3d76d11f 100644
--- a/Documentation/networking/devlink/bnxt.rst
+++ b/Documentation/networking/devlink/bnxt.rst
@@ -24,6 +24,8 @@ Parameters
- Permanent
* - ``enable_remote_dev_reset``
- Runtime
+ * - ``enable_roce``
+ - Permanent
The ``bnxt`` driver also implements the following driver-specific
parameters.
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index e3972d03cea0..792e9f8c846a 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -69,6 +69,17 @@ Parameters
To verify that value has been set:
$ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers
+ * - ``msix_vec_per_pf_max``
+ - driverinit
+ - Set the max MSI-X that can be used by the PF, rest can be utilized for
+ SRIOV. The range is from min value set in msix_vec_per_pf_min to
+ 2k/number of ports.
+ * - ``msix_vec_per_pf_min``
+ - driverinit
+ - Set the min MSI-X that will be used by the PF. This value inform how many
+ MSI-X will be allocated statically. The range is from 2 to value set
+ in msix_vec_per_pf_max.
+
.. list-table:: Driver specific parameters implemented
:widths: 5 5 90
diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst
index 41618538fc70..7febe0aecd53 100644
--- a/Documentation/networking/devlink/mlx5.rst
+++ b/Documentation/networking/devlink/mlx5.rst
@@ -280,6 +280,10 @@ Description of the vnic counters:
number of packets handled by the VNIC experiencing unexpected steering
failure (at any point in steering flow owned by the VNIC, including the FDB
for the eswitch owner).
+- icm_consumption
+ amount of Interconnect Host Memory (ICM) consumed by the vnic in
+ granularity of 4KB. ICM is host memory allocated by SW upon HCA request
+ and is used for storing data structures that control HCA operation.
User commands examples:
diff --git a/Documentation/networking/devlink/sfc.rst b/Documentation/networking/devlink/sfc.rst
index db64a1bd9733..0398d59ea184 100644
--- a/Documentation/networking/devlink/sfc.rst
+++ b/Documentation/networking/devlink/sfc.rst
@@ -5,7 +5,7 @@ sfc devlink support
===================
This document describes the devlink features implemented by the ``sfc``
-device driver for the ef100 device.
+device driver for the ef10 and ef100 devices.
Info versions
=============
@@ -18,6 +18,10 @@ The ``sfc`` driver reports the following versions
* - Name
- Type
- Description
+ * - ``fw.bundle_id``
+ - stored
+ - Version of the firmware "bundle" image that was last used to update
+ multiple components.
* - ``fw.mgmt.suc``
- running
- For boards where the management function is split between multiple
@@ -55,3 +59,13 @@ The ``sfc`` driver reports the following versions
* - ``fw.uefi``
- running
- UEFI driver version (No UNDI support).
+
+Flash Update
+============
+
+The ``sfc`` driver implements support for flash update using the
+``devlink-flash`` interface. It supports updating the device flash using a
+combined flash image ("bundle") that contains multiple components (on ef10,
+typically ``fw.mgmt``, ``fw.app``, ``fw.exprom`` and ``fw.uefi``).
+
+The driver does not support any overwrite mask flags.
diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst
index d95363645331..eb678ca45496 100644
--- a/Documentation/networking/devmem.rst
+++ b/Documentation/networking/devmem.rst
@@ -256,7 +256,7 @@ Testing
=======
More realistic example code can be found in the kernel source under
-``tools/testing/selftests/net/ncdevmem.c``
+``tools/testing/selftests/drivers/net/hw/ncdevmem.c``
ncdevmem is a devmem TCP netcat. It works very similarly to netcat, but
receives data directly into a udmabuf.
@@ -268,8 +268,7 @@ ncdevmem has a validation mode as well that expects a repeating pattern of
incoming data and validates it as such. For example, you can launch
ncdevmem on the server by::
- ncdevmem -s <server IP> -c <client IP> -f eth1 -d 3 -n 0000:06:00.0 -l \
- -p 5201 -v 7
+ ncdevmem -s <server IP> -c <client IP> -f <ifname> -l -p 5201 -v 7
On client side, use regular netcat to send TX data to ncdevmem process
on the server::
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 3770a2294509..b6e9af4d0f1b 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -1934,7 +1934,7 @@ ETHTOOL_A_RSS_INDIR attribute returns RSS indirection table where each byte
indicates queue number.
ETHTOOL_A_RSS_INPUT_XFRM attribute is a bitmap indicating the type of
transformation applied to the input protocol fields before given to the RSS
-hfunc. Current supported option is symmetric-xor.
+hfunc. Current supported options are symmetric-xor and symmetric-or-xor.
PLCA_GET_CFG
============
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 363b4950d542..5c63ab928b97 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -705,6 +705,8 @@ tcp_retries2 - INTEGER
seconds and is a lower bound for the effective timeout.
TCP will effectively time out at the first RTO which exceeds the
hypothetical timeout.
+ If tcp_rto_max_ms is decreased, it is recommended to also
+ change tcp_retries2.
RFC 1122 recommends at least 100 seconds for the timeout,
which corresponds to a value of at least 8.
@@ -1227,8 +1229,8 @@ tcp_pingpong_thresh - INTEGER
tcp_rto_min_us - INTEGER
Minimal TCP retransmission timeout (in microseconds). Note that the
rto_min route option has the highest precedence for configuring this
- setting, followed by the TCP_BPF_RTO_MIN socket option, followed by
- this tcp_rto_min_us sysctl.
+ setting, followed by the TCP_BPF_RTO_MIN and TCP_RTO_MIN_US socket
+ options, followed by this tcp_rto_min_us sysctl.
The recommended practice is to use a value less or equal to 200000
microseconds.
@@ -1237,6 +1239,17 @@ tcp_rto_min_us - INTEGER
Default: 200000
+tcp_rto_max_ms - INTEGER
+ Maximal TCP retransmission timeout (in ms).
+ Note that TCP_RTO_MAX_MS socket option has higher precedence.
+
+ When changing tcp_rto_max_ms, it is important to understand
+ that tcp_retries2 might need a change.
+
+ Possible Values: 1000 - 120,000
+
+ Default: 120,000
+
UDP variables
=============
diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
index 544bad175aae..45f02efe3df5 100644
--- a/Documentation/networking/j1939.rst
+++ b/Documentation/networking/j1939.rst
@@ -66,6 +66,90 @@ the library exclusively, or by the in-kernel system exclusively.
J1939 concepts
==============
+Data Sent to the J1939 Stack
+----------------------------
+
+The data buffers sent to the J1939 stack from user space are not CAN frames
+themselves. Instead, they are payloads that the J1939 stack converts into
+proper CAN frames based on the size of the buffer and the type of transfer. The
+size of the buffer influences how the stack processes the data and determines
+the internal code path used for the transfer.
+
+**Handling of Different Buffer Sizes:**
+
+- **Buffers with a size of 8 bytes or less:**
+
+ - These are handled as simple sessions internally within the stack.
+
+ - The stack converts the buffer directly into a single CAN frame without
+ fragmentation.
+
+ - This type of transfer does not require an actual client (receiver) on the
+ receiving side.
+
+- **Buffers up to 1785 bytes:**
+
+ - These are automatically handled as J1939 Transport Protocol (TP) transfers.
+
+ - Internally, the stack splits the buffer into multiple 8-byte CAN frames.
+
+ - TP transfers can be unicast or broadcast.
+
+ - **Broadcast TP:** Does not require a receiver on the other side and can be
+ used in broadcast scenarios.
+
+ - **Unicast TP:** Requires an active receiver (client) on the other side to
+ acknowledge the transfer.
+
+- **Buffers from 1786 bytes up to 111 MiB:**
+
+ - These are handled as ISO 11783 Extended Transport Protocol (ETP) transfers.
+
+ - ETP transfers are used for larger payloads and are split into multiple CAN
+ frames internally.
+
+ - **ETP transfers (unicast):** Require a receiver on the other side to
+ process the incoming data and acknowledge each step of the transfer.
+
+ - ETP transfers cannot be broadcast like TP transfers, and always require a
+ receiver for operation.
+
+**Non-Blocking Operation with `MSG_DONTWAIT`:**
+
+The J1939 stack supports non-blocking operation when used in combination with
+the `MSG_DONTWAIT` flag. In this mode, the stack attempts to take as much data
+as the available memory for the socket allows. It returns the amount of data
+that was successfully taken, and it is the responsibility of user space to
+monitor this value and handle partial transfers.
+
+- If the stack cannot take the entire buffer, it returns the number of bytes
+ successfully taken, and user space should handle the remainder.
+
+- **Error handling:** When using `MSG_DONTWAIT`, the user must rely on the
+ error queue to detect transfer errors. See the **SO_J1939_ERRQUEUE** section
+ for details on how to subscribe to error notifications. Without the error
+ queue, there is no other way for user space to be notified of transfer errors
+ during non-blocking operations.
+
+**Behavior and Requirements:**
+
+- **Simple transfers (<= 8 bytes):** Do not require a receiver on the other
+ side, making them easy to send without needing address claiming or
+ coordination with a destination.
+
+- **Unicast TP/ETP:** Requires a receiver on the other side to complete the
+ transfer. The receiver must acknowledge the transfer for the session to
+ proceed successfully.
+
+- **Broadcast TP:** Allows sending data without a receiver, but only works for
+ TP transfers. ETP cannot be broadcast and always needs a receiving client.
+
+These different behaviors depend heavily on the size of the buffer provided to
+the stack, and the appropriate transport mechanism (TP or ETP) is selected
+based on the payload size. The stack automatically manages the fragmentation
+and reassembly of large payloads and ensures that the correct CAN frames are
+generated and transmitted for each session.
+
PGN
---
@@ -338,6 +422,459 @@ with ``cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR``,
}
}
+setsockopt(2)
+^^^^^^^^^^^^^
+
+The ``setsockopt(2)`` function is used to configure various socket-level
+options for J1939 communication. The following options are supported:
+
+``SO_J1939_FILTER``
+~~~~~~~~~~~~~~~~~~~
+
+The ``SO_J1939_FILTER`` option is essential when the default behavior of
+``bind(2)`` and ``connect(2)`` is insufficient for specific use cases. By
+default, ``bind(2)`` and ``connect(2)`` allow a socket to be associated with a
+single unicast or broadcast address. However, there are scenarios where finer
+control over the incoming messages is required, such as filtering by Parameter
+Group Number (PGN) rather than by addresses.
+
+For example, in a system where multiple types of J1939 messages are being
+transmitted, a process might only be interested in a subset of those messages,
+such as specific PGNs, and not want to receive all messages destined for its
+address or broadcast to the bus.
+
+By applying the ``SO_J1939_FILTER`` option, you can filter messages based on:
+
+- **Source Address (SA)**: Filter messages coming from specific source
+ addresses.
+
+- **Source Name**: Filter messages coming from ECUs with specific NAME
+ identifiers.
+
+- **Parameter Group Number (PGN)**: Focus on receiving messages with specific
+ PGNs, filtering out irrelevant ones.
+
+This filtering mechanism is particularly useful when:
+
+- You want to receive a subset of messages based on their PGNs, even if the
+ address is the same.
+
+- You need to handle both broadcast and unicast messages but only care about
+ certain message types or parameters.
+
+- The ``bind(2)`` and ``connect(2)`` functions only allow binding to a single
+ address, which might not be sufficient if the process needs to handle multiple
+ PGNs but does not want to open multiple sockets.
+
+To remove existing filters, you can pass ``optval == NULL`` or ``optlen == 0``
+to ``setsockopt(2)``. This will clear all currently set filters. If you want to
+**update** the set of filters, you must pass the updated filter set to
+``setsockopt(2)``, as the new filter set will **replace** the old one entirely.
+This behavior ensures that any previous filter configuration is discarded and
+only the new set is applied.
+
+Example of removing all filters:
+
+.. code-block:: c
+
+ setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, NULL, 0);
+
+**Maximum number of filters:** The maximum amount of filters that can be
+applied using ``SO_J1939_FILTER`` is defined by ``J1939_FILTER_MAX``, which is
+set to 512. This means you can configure up to 512 individual filters to match
+your specific filtering needs.
+
+Practical use case: **Monitoring Address Claiming**
+
+One practical use case is monitoring the J1939 address claiming process by
+filtering for specific PGNs related to address claiming. This allows a process
+to monitor and handle address claims without processing unrelated messages.
+
+Example:
+
+.. code-block:: c
+
+ struct j1939_filter filt[] = {
+ {
+ .pgn = J1939_PGN_ADDRESS_CLAIMED,
+ .pgn_mask = J1939_PGN_PDU1_MAX,
+ }, {
+ .pgn = J1939_PGN_REQUEST,
+ .pgn_mask = J1939_PGN_PDU1_MAX,
+ }, {
+ .pgn = J1939_PGN_ADDRESS_COMMANDED,
+ .pgn_mask = J1939_PGN_MAX,
+ },
+ };
+ setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt));
+
+In this example, the socket will only receive messages with the PGNs related to
+address claiming: ``J1939_PGN_ADDRESS_CLAIMED``, ``J1939_PGN_REQUEST``, and
+``J1939_PGN_ADDRESS_COMMANDED``. This is particularly useful in scenarios where
+you want to monitor and process address claims without being overwhelmed by
+other traffic on the J1939 network.
+
+``SO_J1939_PROMISC``
+~~~~~~~~~~~~~~~~~~~~
+
+The ``SO_J1939_PROMISC`` option enables socket-level promiscuous mode. When
+this option is enabled, the socket will receive all J1939 traffic, regardless
+of any filters set by ``bind()`` or ``connect()``. This is analogous to
+enabling promiscuous mode for an Ethernet interface, where all traffic on the
+network segment is captured.
+
+However, **`SO_J1939_FILTER` has a higher priority** compared to
+``SO_J1939_PROMISC``. This means that even in promiscuous mode, you can reduce
+the number of packets received by applying specific filters with
+`SO_J1939_FILTER`. The filters will limit which packets are passed to the
+socket, allowing for more refined traffic selection while promiscuous mode is
+active.
+
+The acceptable value size for this option is ``sizeof(int)``, and the value is
+only differentiated between `0` and non-zero. A value of `0` disables
+promiscuous mode, while any non-zero value enables it.
+
+This combination can be useful for debugging or monitoring specific types of
+traffic while still capturing a broad set of messages.
+
+Example:
+
+.. code-block:: c
+
+ int value = 1;
+ setsockopt(sock, SOL_CAN_J1939, SO_J1939_PROMISC, &value, sizeof(value));
+
+In this example, setting ``value`` to any non-zero value (e.g., `1`) enables
+promiscuous mode, allowing the socket to receive all J1939 traffic on the
+network.
+
+``SO_BROADCAST``
+~~~~~~~~~~~~~~~~
+
+The ``SO_BROADCAST`` option enables the sending and receiving of broadcast
+messages. By default, broadcast messages are disabled for J1939 sockets. When
+this option is enabled, the socket will be allowed to send and receive
+broadcast packets on the J1939 network.
+
+Due to the nature of the CAN bus as a shared medium, all messages transmitted
+on the bus are visible to all participants. In the context of J1939,
+broadcasting refers to using a specific destination address field, where the
+destination address is set to a value that indicates the message is intended
+for all participants (usually a global address such as 0xFF). Enabling the
+broadcast option allows the socket to send and receive such broadcast messages.
+
+The acceptable value size for this option is ``sizeof(int)``, and the value is
+only differentiated between `0` and non-zero. A value of `0` disables the
+ability to send and receive broadcast messages, while any non-zero value
+enables it.
+
+Example:
+
+.. code-block:: c
+
+ int value = 1;
+ setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value));
+
+In this example, setting ``value`` to any non-zero value (e.g., `1`) enables
+the socket to send and receive broadcast messages.
+
+``SO_J1939_SEND_PRIO``
+~~~~~~~~~~~~~~~~~~~~~~
+
+The ``SO_J1939_SEND_PRIO`` option sets the priority of outgoing J1939 messages
+for the socket. In J1939, messages can have different priorities, and lower
+numerical values indicate higher priority. This option allows the user to
+control the priority of messages sent from the socket by adjusting the priority
+bits in the CAN identifier.
+
+The acceptable value **size** for this option is ``sizeof(int)``, and the value
+is expected to be in the range of 0 to 7, where `0` is the highest priority,
+and `7` is the lowest. By default, the priority is set to `6` if this option is
+not explicitly configured.
+
+Note that the priority values `0` and `1` can only be set if the process has
+the `CAP_NET_ADMIN` capability. These are reserved for high-priority traffic
+and require administrative privileges.
+
+Example:
+
+.. code-block:: c
+
+ int prio = 3; // Priority value between 0 (highest) and 7 (lowest)
+ setsockopt(sock, SOL_CAN_J1939, SO_J1939_SEND_PRIO, &prio, sizeof(prio));
+
+In this example, the priority is set to `3`, meaning the outgoing messages will
+be sent with a moderate priority level.
+
+``SO_J1939_ERRQUEUE``
+~~~~~~~~~~~~~~~~~~~~~
+
+The ``SO_J1939_ERRQUEUE`` option enables the socket to receive error messages
+from the error queue, providing diagnostic information about transmission
+failures, protocol violations, or other issues that occur during J1939
+communication. Once this option is set, user space is required to handle
+``MSG_ERRQUEUE`` messages.
+
+Setting ``SO_J1939_ERRQUEUE`` to ``0`` will purge any currently present error
+messages in the error queue. When enabled, error messages can be retrieved
+using the ``recvmsg(2)`` system call.
+
+When subscribing to the error queue, the following error events can be
+accessed:
+
+- **``J1939_EE_INFO_TX_ABORT``**: Transmission abort errors.
+- **``J1939_EE_INFO_RX_RTS``**: Reception of RTS (Request to Send) control
+ frames.
+- **``J1939_EE_INFO_RX_DPO``**: Reception of data packets with Data Page Offset
+ (DPO).
+- **``J1939_EE_INFO_RX_ABORT``**: Reception abort errors.
+
+The error queue can be used to correlate errors with specific message transfer
+sessions using the session ID (``tskey``). The session ID is assigned via the
+``SOF_TIMESTAMPING_OPT_ID`` flag, which is set by enabling the
+``SO_TIMESTAMPING`` option.
+
+If ``SO_J1939_ERRQUEUE`` is activated, the user is required to pull messages
+from the error queue, meaning that using plain ``recv(2)`` is not sufficient
+anymore. The user must use ``recvmsg(2)`` with appropriate flags to handle
+error messages. Failure to do so can result in the socket becoming blocked with
+unprocessed error messages in the queue.
+
+It is **recommended** that ``SO_J1939_ERRQUEUE`` be used in combination with
+``SO_TIMESTAMPING`` in most cases. This enables proper error handling along
+with session tracking and timestamping, providing a more detailed analysis of
+message transfers and errors.
+
+The acceptable value **size** for this option is ``sizeof(int)``, and the value
+is only differentiated between ``0`` and non-zero. A value of ``0`` disables
+error queue reception and purges any existing error messages, while any
+non-zero value enables it.
+
+Example:
+
+.. code-block:: c
+
+ int enable = 1; // Enable error queue reception
+ setsockopt(sock, SOL_CAN_J1939, SO_J1939_ERRQUEUE, &enable, sizeof(enable));
+
+ // Enable timestamping with session tracking via tskey
+ int timestamping = SOF_TIMESTAMPING_OPT_ID | SOF_TIMESTAMPING_TX_ACK |
+ SOF_TIMESTAMPING_TX_SCHED |
+ SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_OPT_CMSG;
+ setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &timestamping,
+ sizeof(timestamping));
+
+When enabled, error messages can be retrieved using ``recvmsg(2)``. By
+combining ``SO_J1939_ERRQUEUE`` with ``SO_TIMESTAMPING`` (with
+``SOF_TIMESTAMPING_OPT_ID`` and ``SOF_TIMESTAMPING_OPT_CMSG`` enabled), the
+user can track message transfers, retrieve precise timestamps, and correlate
+errors with specific sessions.
+
+For more information on enabling timestamps and session tracking, refer to the
+`SO_TIMESTAMPING` section.
+
+``SO_TIMESTAMPING``
+~~~~~~~~~~~~~~~~~~~
+
+The ``SO_TIMESTAMPING`` option allows the socket to receive timestamps for
+various events related to message transmissions and receptions in J1939. This
+option is often used in combination with ``SO_J1939_ERRQUEUE`` to provide
+detailed diagnostic information, session tracking, and precise timing data for
+message transfers.
+
+In J1939, all payloads provided by user space, regardless of size, are
+processed by the kernel as **sessions**. This includes both single-frame
+messages (up to 8 bytes) and multi-frame protocols such as the Transport
+Protocol (TP) and Extended Transport Protocol (ETP). Even for small,
+single-frame messages, the kernel creates a session to manage the transmission
+and reception. The concept of sessions allows the kernel to manage various
+aspects of the protocol, such as reassembling multi-frame messages and tracking
+the status of transmissions.
+
+When receiving extended error messages from the error queue, the error
+information is delivered through a `struct sock_extended_err`, accessible via
+the control message (``cmsg``) retrieved using the ``recvmsg(2)`` system call.
+
+There are two typical origins for the extended error messages in J1939:
+
+1. ``serr->ee_origin == SO_EE_ORIGIN_TIMESTAMPING``:
+
+ In this case, the `serr->ee_info` field will contain one of the following
+ timestamp types:
+
+ - ``SCM_TSTAMP_SCHED``: This timestamp is valid for Extended Transport
+ Protocol (ETP) transfers and simple transfers (8 bytes or less). It
+ indicates when a message or set of frames has been scheduled for
+ transmission.
+
+ - For simple transfers (8 bytes or less), it marks the point when the
+ message is queued and ready to be sent onto the CAN bus.
+
+ - For ETP transfers, it is sent after receiving a CTS (Clear to Send)
+ frame on the sender side, indicating that a new set of frames has been
+ scheduled for transmission.
+
+ - The Transport Protocol (TP) case is currently not implemented for this
+ timestamp.
+
+ - On the receiver side, the counterpart to this event for ETP is
+ represented by the ``J1939_EE_INFO_RX_DPO`` message, which indicates the
+ reception of a Data Page Offset (DPO) control frame.
+
+ - ``SCM_TSTAMP_ACK``: This timestamp indicates the acknowledgment of the
+ message or session.
+
+ - For simple transfers (8 bytes or less), it marks when the message has
+ been sent and an echo confirmation has been received from the CAN
+ controller, indicating that the frame was transmitted onto the bus.
+
+ - For multi-frame transfers (TP or ETP), it signifies that the entire
+ session has been acknowledged, typically after receiving the End of
+ Message Acknowledgment (EOMA) packet.
+
+2. ``serr->ee_origin == SO_EE_ORIGIN_LOCAL``:
+
+ In this case, the `serr->ee_info` field will contain one of the following
+ J1939 stack-specific message types:
+
+ - ``J1939_EE_INFO_TX_ABORT``: This message indicates that the transmission
+ of a message or session was aborted. The cause of the abort can come from
+ various sources:
+
+ - **CAN stack failure**: The J1939 stack was unable to pass the frame to
+ the CAN framework for transmission.
+
+ - **Echo failure**: The J1939 stack did not receive an echo confirmation
+ from the CAN controller, meaning the frame may not have been successfully
+ transmitted to the CAN bus.
+
+ - **Protocol-level issues**: For multi-frame transfers (TP/ETP), this
+ could include protocol-related errors, such as an abort signaled by the
+ receiver or a timeout at the protocol level, which causes the session to
+ terminate prematurely.
+
+ - The corresponding error code is stored in ``serr->ee_data``
+ (``session->err`` on kernel side), providing additional details about
+ the specific reason for the abort.
+
+ - ``J1939_EE_INFO_RX_RTS``: This message indicates that the J1939 stack has
+ received a Request to Send (RTS) control frame, signaling the start of a
+ multi-frame transfer using the Transport Protocol (TP) or Extended
+ Transport Protocol (ETP).
+
+ - It informs the receiver that the sender is ready to transmit a
+ multi-frame message and includes details about the total message size
+ and the number of frames to be sent.
+
+ - Statistics such as ``J1939_NLA_TOTAL_SIZE``, ``J1939_NLA_PGN``,
+ ``J1939_NLA_SRC_NAME``, and ``J1939_NLA_DEST_NAME`` are provided along
+ with the ``J1939_EE_INFO_RX_RTS`` message, giving detailed information
+ about the incoming transfer.
+
+ - ``J1939_EE_INFO_RX_DPO``: This message indicates that the J1939 stack has
+ received a Data Page Offset (DPO) control frame, which is part of the
+ Extended Transport Protocol (ETP).
+
+ - The DPO frame signals the continuation of an ETP multi-frame message by
+ indicating the offset position in the data being transferred. It helps
+ the receiver manage large data sets by identifying which portion of the
+ message is being received.
+
+ - It is typically paired with a corresponding ``SCM_TSTAMP_SCHED`` event
+ on the sender side, which indicates when the next set of frames is
+ scheduled for transmission.
+
+ - This event includes statistics such as ``J1939_NLA_BYTES_ACKED``, which
+ tracks the number of bytes acknowledged up to that point in the session.
+
+ - ``J1939_EE_INFO_RX_ABORT``: This message indicates that the reception of a
+ multi-frame message (Transport Protocol or Extended Transport Protocol) has
+ been aborted.
+
+ - The abort can be triggered by protocol-level errors such as timeouts, an
+ unexpected frame, or a specific abort request from the sender.
+
+ - This message signals that the receiver cannot continue processing the
+ transfer, and the session is terminated.
+
+ - The corresponding error code is stored in ``serr->ee_data``
+ (``session->err`` on kernel side ), providing further details about the
+ reason for the abort, such as protocol violations or timeouts.
+
+ - After receiving this message, the receiver discards the partially received
+ frames, and the multi-frame session is considered incomplete.
+
+In both cases, if ``SOF_TIMESTAMPING_OPT_ID`` is enabled, ``serr->ee_data``
+will be set to the session’s unique identifier (``session->tskey``). This
+allows user space to track message transfers by their session identifier across
+multiple frames or stages.
+
+In all other cases, ``serr->ee_errno`` will be set to ``ENOMSG``, except for
+the ``J1939_EE_INFO_TX_ABORT`` and ``J1939_EE_INFO_RX_ABORT`` cases, where the
+kernel sets ``serr->ee_data`` to the error stored in ``session->err``. All
+protocol-specific errors are converted to standard kernel error values and
+stored in ``session->err``. These error values are unified across system calls
+and ``serr->ee_errno``. Some of the known error values are described in the
+`Error Codes in the J1939 Stack` section.
+
+When the `J1939_EE_INFO_RX_RTS` message is provided, it will include the
+following statistics for multi-frame messages (TP and ETP):
+
+ - ``J1939_NLA_TOTAL_SIZE``: Total size of the message in the session.
+ - ``J1939_NLA_PGN``: Parameter Group Number (PGN) identifying the message type.
+ - ``J1939_NLA_SRC_NAME``: 64-bit name of the source ECU.
+ - ``J1939_NLA_DEST_NAME``: 64-bit name of the destination ECU.
+ - ``J1939_NLA_SRC_ADDR``: 8-bit source address of the sending ECU.
+ - ``J1939_NLA_DEST_ADDR``: 8-bit destination address of the receiving ECU.
+
+- For other messages (including single-frame messages), only the following
+ statistic is included:
+
+ - ``J1939_NLA_BYTES_ACKED``: Number of bytes successfully acknowledged in the
+ session.
+
+The key flags for ``SO_TIMESTAMPING`` include:
+
+- ``SOF_TIMESTAMPING_OPT_ID``: Enables the use of a unique session identifier
+ (``tskey``) for each transfer. This identifier helps track message transfers
+ and errors as distinct sessions in user space. When this option is enabled,
+ ``serr->ee_data`` will be set to ``session->tskey``.
+
+- ``SOF_TIMESTAMPING_OPT_CMSG``: Sends timestamp information through control
+ messages (``struct scm_timestamping``), allowing the application to retrieve
+ timestamps alongside the data.
+
+- ``SOF_TIMESTAMPING_TX_SCHED``: Provides the timestamp for when a message is
+ scheduled for transmission (``SCM_TSTAMP_SCHED``).
+
+- ``SOF_TIMESTAMPING_TX_ACK``: Provides the timestamp for when a message
+ transmission is fully acknowledged (``SCM_TSTAMP_ACK``).
+
+- ``SOF_TIMESTAMPING_RX_SOFTWARE``: Provides timestamps for reception-related
+ events (e.g., ``J1939_EE_INFO_RX_RTS``, ``J1939_EE_INFO_RX_DPO``,
+ ``J1939_EE_INFO_RX_ABORT``).
+
+These flags enable detailed monitoring of message lifecycles, including
+transmission scheduling, acknowledgments, reception timestamps, and gathering
+detailed statistics about the communication session, especially for multi-frame
+payloads like TP and ETP.
+
+Example:
+
+.. code-block:: c
+
+ // Enable timestamping with various options, including session tracking and
+ // statistics
+ int sock_opt = SOF_TIMESTAMPING_OPT_CMSG |
+ SOF_TIMESTAMPING_TX_ACK |
+ SOF_TIMESTAMPING_TX_SCHED |
+ SOF_TIMESTAMPING_OPT_ID |
+ SOF_TIMESTAMPING_RX_SOFTWARE;
+
+ setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &sock_opt, sizeof(sock_opt));
+
+
+
Dynamic Addressing
------------------
@@ -458,3 +995,141 @@ Send:
};
sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr));
+
+
+Error Codes in the J1939 Stack
+------------------------------
+
+This section lists all potential kernel error codes that can be exposed to user
+space when interacting with the J1939 stack. It includes both standard error
+codes and those derived from protocol-specific abort codes.
+
+- ``EAGAIN``: Operation would block; retry may succeed. One common reason is
+ that an active TP or ETP session exists, and an attempt was made to start a
+ new overlapping TP or ETP session between the same peers.
+
+- ``ENETDOWN``: Network is down. This occurs when the CAN interface is switched
+ to the "down" state.
+
+- ``ENOBUFS``: No buffer space available. This error occurs when the CAN
+ interface's transmit (TX) queue is full, and no more messages can be queued.
+
+- ``EOVERFLOW``: Value too large for defined data type. In J1939, this can
+ happen if the requested data lies outside of the queued buffer. For example,
+ if a CTS (Clear to Send) requests an offset not available in the kernel buffer
+ because user space did not provide enough data.
+
+- ``EBUSY``: Device or resource is busy. For example, this occurs if an
+ identical session is already active and the stack is unable to recover from
+ the condition.
+
+- ``EACCES``: Permission denied. This error can occur, for example, when
+ attempting to send broadcast messages, but the socket is not configured with
+ ``SO_BROADCAST``.
+
+- ``EADDRNOTAVAIL``: Address not available. This error occurs in cases such as:
+
+ - When attempting to use ``getsockname(2)`` to retrieve the peer's address,
+ but the socket is not connected.
+
+ - When trying to send data to or from a NAME, but address claiming for the
+ NAME was not performed or detected by the stack.
+
+- ``EBADFD``: File descriptor in bad state. This error can occur if:
+
+ - Attempting to send data to an unbound socket.
+
+ - The socket is bound but has no source name, and the source address is
+ ``J1939_NO_ADDR``.
+
+ - The ``can_ifindex`` is incorrect.
+
+- ``EFAULT``: Bad address. Occurs mostly when the stack can't copy from or to a
+ sockptr, when there is insufficient data from user space, or when the buffer
+ provided by user space is not large enough for the requested data.
+
+- ``EINTR``: A signal occurred before any data was transmitted; see ``signal(7)``.
+
+- ``EINVAL``: Invalid argument passed. For example:
+
+ - ``msg->msg_namelen`` is less than ``J1939_MIN_NAMELEN``.
+
+ - ``addr->can_family`` is not equal to ``AF_CAN``.
+
+ - An incorrect PGN was provided.
+
+- ``ENODEV``: No such device. This happens when the CAN network device cannot
+ be found for the provided ``can_ifindex`` or if ``can_ifindex`` is 0.
+
+- ``ENOMEM``: Out of memory. Typically related to issues with memory allocation
+ in the stack.
+
+- ``ENOPROTOOPT``: Protocol not available. This can occur when using
+ ``getsockopt(2)`` or ``setsockopt(2)`` if the requested socket option is not
+ available.
+
+- ``EDESTADDRREQ``: Destination address required. This error occurs:
+
+ - In the case of ``connect(2)``, if the ``struct sockaddr *uaddr`` is ``NULL``.
+
+ - In the case of ``send*(2)``, if there is an attempt to send an ETP message
+ to a broadcast address.
+
+- ``EDOM``: Argument out of domain. This error may happen if attempting to send
+ a TP or ETP message to a PGN that is reserved for control PGNs for TP or ETP
+ operations.
+
+- ``EIO``: I/O error. This can occur if the amount of data provided to the
+ socket for a TP or ETP session does not match the announced amount of data for
+ the session.
+
+- ``ENOENT``: No such file or directory. This can happen when the stack
+ attempts to transfer CTS or EOMA but cannot find a matching receiving socket
+ anymore.
+
+- ``ENOIOCTLCMD``: No ioctls are available for the socket layer.
+
+- ``EPERM``: Operation not permitted. For example, this can occur if a
+ requested action requires ``CAP_NET_ADMIN`` privileges.
+
+- ``ENETUNREACH``: Network unreachable. Most likely, this occurs when frames
+ cannot be transmitted to the CAN bus.
+
+- ``ETIME``: Timer expired. This can happen if a timeout occurs while
+ attempting to send a simple message, for example, when an echo message from
+ the controller is not received.
+
+- ``EPROTO``: Protocol error.
+
+ - Used for various protocol-level errors in J1939, including:
+
+ - Duplicate sequence number.
+
+ - Unexpected EDPO or ECTS packet.
+
+ - Invalid PGN or offset in EDPO/ECTS.
+
+ - Number of EDPO packets exceeded CTS allowance.
+
+ - Any other protocol-level error.
+
+- ``EMSGSIZE``: Message too long.
+
+- ``ENOMSG``: No message available.
+
+- ``EALREADY``: The ECU is already engaged in one or more connection-managed
+ sessions and cannot support another.
+
+- ``EHOSTUNREACH``: A timeout occurred, and the session was aborted.
+
+- ``EBADMSG``: CTS (Clear to Send) messages were received during an active data
+ transfer, causing an abort.
+
+- ``ENOTRECOVERABLE``: The maximum retransmission request limit was reached,
+ and the session cannot recover.
+
+- ``ENOTCONN``: An unexpected data transfer packet was received.
+
+- ``EILSEQ``: A bad sequence number was received, and the software could not
+ recover.
+
diff --git a/Documentation/networking/kcm.rst b/Documentation/networking/kcm.rst
index db0f5560ac1c..71f44d0beaa3 100644
--- a/Documentation/networking/kcm.rst
+++ b/Documentation/networking/kcm.rst
@@ -200,7 +200,7 @@ while. Example use::
setsockopt(kcmfd, SOL_KCM, KCM_RECV_DISABLE, &val, sizeof(val))
-BFP programs for message delineation
+BPF programs for message delineation
------------------------------------
BPF programs can be compiled using the BPF LLVM backend. For example,
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index 03e1d3610333..5bfab01eff5a 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -30,6 +30,10 @@ allow_join_initial_addr_port - BOOLEAN
Default: 1
+available_path_managers - STRING
+ Shows the available path managers choices that are registered. More
+ path managers may be available, but not loaded.
+
available_schedulers - STRING
Shows the available schedulers choices that are registered. More packet
schedulers may be available, but not loaded.
@@ -72,6 +76,23 @@ enabled - BOOLEAN
Default: 1 (enabled)
+path_manager - STRING
+ Set the default path manager name to use for each new MPTCP
+ socket. In-kernel path management will control subflow
+ connections and address advertisements according to
+ per-namespace values configured over the MPTCP netlink
+ API. Userspace path management puts per-MPTCP-connection subflow
+ connection decisions and address advertisements under control of
+ a privileged userspace program, at the cost of more netlink
+ traffic to propagate all of the related events and commands.
+
+ This is a per-namespace sysctl.
+
+ * "kernel" - In-kernel path manager
+ * "userspace" - Userspace path manager
+
+ Default: "kernel"
+
pm_type - INTEGER
Set the default path manager type to use for each new MPTCP
socket. In-kernel path management will control subflow
@@ -84,6 +105,8 @@ pm_type - INTEGER
This is a per-namespace sysctl.
+ Deprecated since v6.15, use path_manager instead.
+
* 0 - In-kernel path manager
* 1 - Userspace path manager
diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst
index f970a2be271a..d0e3953cae6a 100644
--- a/Documentation/networking/napi.rst
+++ b/Documentation/networking/napi.rst
@@ -171,12 +171,43 @@ a channel as an IRQ/NAPI which services queues of a given type. For example,
a configuration of 1 ``rx``, 1 ``tx`` and 1 ``combined`` channel is expected
to utilize 3 interrupts, 2 Rx and 2 Tx queues.
+Persistent NAPI config
+----------------------
+
+Drivers often allocate and free NAPI instances dynamically. This leads to loss
+of NAPI-related user configuration each time NAPI instances are reallocated.
+The netif_napi_add_config() API prevents this loss of configuration by
+associating each NAPI instance with a persistent NAPI configuration based on
+a driver defined index value, like a queue number.
+
+Using this API allows for persistent NAPI IDs (among other settings), which can
+be beneficial to userspace programs using ``SO_INCOMING_NAPI_ID``. See the
+sections below for other NAPI configuration settings.
+
+Drivers should try to use netif_napi_add_config() whenever possible.
+
User API
========
User interactions with NAPI depend on NAPI instance ID. The instance IDs
are only visible to the user thru the ``SO_INCOMING_NAPI_ID`` socket option.
-It's not currently possible to query IDs used by a given device.
+
+Users can query NAPI IDs for a device or device queue using netlink. This can
+be done programmatically in a user application or by using a script included in
+the kernel source tree: ``tools/net/ynl/pyynl/cli.py``.
+
+For example, using the script to dump all of the queues for a device (which
+will reveal each queue's NAPI ID):
+
+.. code-block:: bash
+
+ $ kernel-source/tools/net/ynl/pyynl/cli.py \
+ --spec Documentation/netlink/specs/netdev.yaml \
+ --dump queue-get \
+ --json='{"ifindex": 2}'
+
+See ``Documentation/netlink/specs/netdev.yaml`` for more details on
+available operations and attributes.
Software IRQ coalescing
-----------------------
diff --git a/Documentation/networking/net_cachelines/inet_connection_sock.rst b/Documentation/networking/net_cachelines/inet_connection_sock.rst
index 4a15627fc93b..8fae85ebb773 100644
--- a/Documentation/networking/net_cachelines/inet_connection_sock.rst
+++ b/Documentation/networking/net_cachelines/inet_connection_sock.rst
@@ -12,11 +12,11 @@ struct inet_sock icsk_inet read_mostly r
struct request_sock_queue icsk_accept_queue
struct inet_bind_bucket icsk_bind_hash read_mostly tcp_set_state
struct inet_bind2_bucket icsk_bind2_hash read_mostly tcp_set_state,inet_put_port
-unsigned_long icsk_timeout read_mostly inet_csk_reset_xmit_timer,tcp_connect
-struct timer_list icsk_retransmit_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect
+struct timer_list icsk_retransmit_timer read_write inet_csk_reset_xmit_timer,tcp_connect
struct timer_list icsk_delack_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect
u32 icsk_rto read_write tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one
u32 icsk_rto_min
+u32 icsk_rto_max read_mostly tcp_reset_xmit_timer
u32 icsk_delack_max
u32 icsk_pmtu_cookie read_write tcp_sync_mss,tcp_current_mss,tcp_send_syn_data,tcp_connect_init,tcp_connect
struct tcp_congestion_ops icsk_ca_ops read_write tcp_cwnd_validate,tcp_tso_segs,tcp_ca_dst_init,tcp_connect_init,tcp_connect,tcp_write_xmit
@@ -38,7 +38,6 @@ struct icsk_ack_u8 quick read_write w
struct icsk_ack_u8 pingpong
struct icsk_ack_u8 retry write_mostly read_write inet_csk_clear_xmit_timer,tcp_rearm_rto,tcp_event_new_data_sent,tcp_write_xmit,__tcp_send_ack,tcp_send_ack,
struct icsk_ack_u8 ato read_mostly write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_send_ack,tcp_send_ack
-struct icsk_ack_unsigned_long timeout read_write read_write inet_csk_reset_xmit_timer,tcp_connect
struct icsk_ack_u32 lrcvtime read_write tcp_finish_connect,tcp_connect,tcp_event_data_sent,__tcp_transmit_skb
struct icsk_ack_u16 rcv_mss write_mostly read_mostly __tcp_select_window,__tcp_cleanup_rbuf,tcp_initialize_rcv_mss,tcp_connect_init
struct icsk_mtup_int search_high read_write tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_check_reprobe,tcp_write_xmit
diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst
index 15e31ece675f..6327e689e8a8 100644
--- a/Documentation/networking/net_cachelines/net_device.rst
+++ b/Documentation/networking/net_cachelines/net_device.rst
@@ -167,7 +167,7 @@ unsigned:1 wol_enabled
unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded)
unsigned_long:1 see_all_hwtstamp_requests
unsigned_long:1 change_proto_down
-unsigned_long:1 netns_local
+unsigned_long:1 netns_immutable
unsigned_long:1 fcoe_mtu
struct list_head net_notifier_list
struct macsec_ops* macsec_ops
diff --git a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
index de0263302f16..6e7b20afd2d4 100644
--- a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
+++ b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst
@@ -86,6 +86,7 @@ u8 sysctl_tcp_sack
u8 sysctl_tcp_window_scaling tcp_syn_options,tcp_parse_options
u8 sysctl_tcp_timestamps
u8 sysctl_tcp_early_retrans read_mostly tcp_schedule_loss_probe(tcp_write_xmit)
+u32 sysctl_tcp_rto_max_ms
u8 sysctl_tcp_recovery tcp_fastretrans_alert
u8 sysctl_tcp_thin_linear_timeouts tcp_retrans_timer(on_thin_streams)
u8 sysctl_tcp_slow_start_after_idle unlikely(tcp_cwnd_validate-network-not-starved)
diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst
index 90ca2d92547d..bc96efc92cf5 100644
--- a/Documentation/networking/net_cachelines/snmp.rst
+++ b/Documentation/networking/net_cachelines/snmp.rst
@@ -36,6 +36,7 @@ unsigned_long LINUX_MIB_TIMEWAITRECYCLED
unsigned_long LINUX_MIB_TIMEWAITKILLED
unsigned_long LINUX_MIB_PAWSACTIVEREJECTED
unsigned_long LINUX_MIB_PAWSESTABREJECTED
+unsigned_long LINUX_MIB_TSECR_REJECTED
unsigned_long LINUX_MIB_DELAYEDACKLOST
unsigned_long LINUX_MIB_LISTENOVERFLOWS
unsigned_long LINUX_MIB_LISTENDROPS
diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst
index 1f79765072b1..bc9b2131bf7a 100644
--- a/Documentation/networking/net_cachelines/tcp_sock.rst
+++ b/Documentation/networking/net_cachelines/tcp_sock.rst
@@ -27,6 +27,7 @@ u32 dsack_dups
u32 snd_una read_mostly read_write tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx)
u32 snd_sml read_write tcp_minshall_check,tcp_minshall_update
u32 rcv_tstamp read_mostly tcp_ack
+void * tcp_clean_acked read_mostly tcp_ack
u32 lsndtime read_write tcp_slow_start_after_idle_check,tcp_event_data_sent
u32 last_oow_ack_time
u32 compressed_ack_rcv_nxt
diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst
index 94c4680fdf3e..a0076b542e9c 100644
--- a/Documentation/networking/netconsole.rst
+++ b/Documentation/networking/netconsole.rst
@@ -17,6 +17,8 @@ Release prepend support by Breno Leitao <leitao@debian.org>, Jul 7 2023
Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
+Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
+
Please send bug reports to Matt Mackall <mpm@selenic.com>
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
@@ -45,7 +47,7 @@ following format::
r if present, prepend kernel version (release) to the message
src-port source for UDP packets (defaults to 6665)
src-ip source IP to use (interface address)
- dev network interface (eth0)
+ dev network interface name (eth0) or MAC address
tgt-port port for logging agent (6666)
tgt-ip IP address for logging agent
tgt-macaddr ethernet MAC address for logging agent (broadcast)
@@ -62,6 +64,10 @@ or using IPv6::
insmod netconsole netconsole=@/,@fd00:1:2:3::1/
+or using a MAC address to select the egress interface::
+
+ linux netconsole=4444@10.0.0.1/22:33:44:55:66:77,9353@10.0.0.2/12:34:56:78:9a:bc
+
It also supports logging to multiple remote agents by specifying
parameters for the multiple agents separated by semicolons and the
complete string enclosed in "quotes", thusly::
@@ -238,6 +244,102 @@ Delete `userdata` entries with `rmdir`::
It is recommended to not write user data values with newlines.
+Task name auto population in userdata
+-------------------------------------
+
+Inside the netconsole configfs hierarchy, there is a file called
+`taskname_enabled` under the `userdata` directory. This file is used to enable
+or disable the automatic task name population feature. This feature
+automatically populates the current task name that is scheduled in the CPU
+sneding the message.
+
+To enable task name auto-population::
+
+ echo 1 > /sys/kernel/config/netconsole/target1/userdata/taskname_enabled
+
+When this option is enabled, the netconsole messages will include an additional
+line in the userdata field with the format `taskname=<task name>`. This allows
+the receiver of the netconsole messages to easily find which application was
+currently scheduled when that message was generated, providing extra context
+for kernel messages and helping to categorize them.
+
+Example::
+
+ echo "This is a message" > /dev/kmsg
+ 12,607,22085407756,-;This is a message
+ taskname=echo
+
+In this example, the message was generated while "echo" was the current
+scheduled process.
+
+Kernel release auto population in userdata
+------------------------------------------
+
+Within the netconsole configfs hierarchy, there is a file named `release_enabled`
+located in the `userdata` directory. This file controls the kernel release
+(version) auto-population feature, which appends the kernel release information
+to userdata dictionary in every message sent.
+
+To enable the release auto-population::
+
+ echo 1 > /sys/kernel/config/netconsole/target1/userdata/release_enabled
+
+Example::
+
+ echo "This is a message" > /dev/kmsg
+ 12,607,22085407756,-;This is a message
+ release=6.14.0-rc6-01219-g3c027fbd941d
+
+.. note::
+
+ This feature provides the same data as the "release prepend" feature.
+ However, in this case, the release information is appended to the userdata
+ dictionary rather than being included in the message header.
+
+
+CPU number auto population in userdata
+--------------------------------------
+
+Inside the netconsole configfs hierarchy, there is a file called
+`cpu_nr` under the `userdata` directory. This file is used to enable or disable
+the automatic CPU number population feature. This feature automatically
+populates the CPU number that is sending the message.
+
+To enable the CPU number auto-population::
+
+ echo 1 > /sys/kernel/config/netconsole/target1/userdata/cpu_nr
+
+When this option is enabled, the netconsole messages will include an additional
+line in the userdata field with the format `cpu=<cpu_number>`. This allows the
+receiver of the netconsole messages to easily differentiate and demultiplex
+messages originating from different CPUs, which is particularly useful when
+dealing with parallel log output.
+
+Example::
+
+ echo "This is a message" > /dev/kmsg
+ 12,607,22085407756,-;This is a message
+ cpu=42
+
+In this example, the message was sent by CPU 42.
+
+.. note::
+
+ If the user has set a conflicting `cpu` key in the userdata dictionary,
+ both keys will be reported, with the kernel-populated entry appearing after
+ the user one. For example::
+
+ # User-defined CPU entry
+ mkdir -p /sys/kernel/config/netconsole/target1/userdata/cpu
+ echo "1" > /sys/kernel/config/netconsole/target1/userdata/cpu/value
+
+ Output might look like::
+
+ 12,607,22085407756,-;This is a message
+ cpu=1
+ cpu=42 # kernel-populated value
+
+
Extended console:
=================
diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst
index 1d37038e9fbe..ebb868f50ac2 100644
--- a/Documentation/networking/netdevices.rst
+++ b/Documentation/networking/netdevices.rst
@@ -210,49 +210,55 @@ packets is preferred.
struct net_device synchronization rules
=======================================
ndo_open:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
ndo_stop:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
Note: netif_running() is guaranteed false
ndo_do_ioctl:
Synchronization: rtnl_lock() semaphore.
- Context: process
- This is only called by network subsystems internally,
- not by user space calling ioctl as it was in before
- linux-5.14.
+ This is only called by network subsystems internally,
+ not by user space calling ioctl as it was in before
+ linux-5.14.
ndo_siocbond:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
- Used by the bonding driver for the SIOCBOND family of
- ioctl commands.
+ Used by the bonding driver for the SIOCBOND family of
+ ioctl commands.
ndo_siocwandev:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
Used by the drivers/net/wan framework to handle
the SIOCWANDEV ioctl with the if_settings structure.
ndo_siocdevprivate:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
This is used to implement SIOCDEVPRIVATE ioctl helpers.
These should not be added to new drivers, so don't use.
ndo_eth_ioctl:
- Synchronization: rtnl_lock() semaphore.
+ Synchronization: rtnl_lock() semaphore. In addition, netdev instance
+ lock if the driver implements queue management or shaper API.
Context: process
ndo_get_stats:
- Synchronization: rtnl_lock() semaphore, or RCU.
+ Synchronization: RCU (can be called concurrently with the stats
+ update path).
Context: atomic (can't sleep under RCU)
ndo_start_xmit:
@@ -284,6 +290,16 @@ ndo_set_rx_mode:
Synchronization: netif_addr_lock spinlock.
Context: BHs disabled
+ndo_setup_tc:
+ ``TC_SETUP_BLOCK`` and ``TC_SETUP_FT`` are running under NFT locks
+ (i.e. no ``rtnl_lock`` and no device instance lock). The rest of
+ ``tc_setup_type`` types run under netdev instance lock if the driver
+ implements queue management or shaper API.
+
+Most ndo callbacks not specified in the list above are running
+under ``rtnl_lock``. In addition, netdev instance lock is taken as well if
+the driver implements queue management or shaper API.
+
struct napi_struct synchronization rules
========================================
napi->poll:
@@ -298,6 +314,35 @@ napi->poll:
softirq
will be called with interrupts disabled by netconsole.
+struct netdev_queue_mgmt_ops synchronization rules
+==================================================
+
+All queue management ndo callbacks are holding netdev instance lock.
+
+RTNL and netdev instance lock
+=============================
+
+Historically, all networking control operations were protected by a single
+global lock known as ``rtnl_lock``. There is an ongoing effort to replace this
+global lock with separate locks for each network namespace. Additionally,
+properties of individual netdev are increasingly protected by per-netdev locks.
+
+For device drivers that implement shaping or queue management APIs, all control
+operations will be performed under the netdev instance lock. Currently, this
+instance lock is acquired within the context of ``rtnl_lock``. The drivers
+can also explicitly request instance lock to be acquired via
+``request_ops_lock``. In the future, there will be an option for individual
+drivers to opt out of using ``rtnl_lock`` and instead perform their control
+operations directly under the netdev instance lock.
+
+Devices drivers are encouraged to rely on the instance lock where possible.
+
+For the (mostly software) drivers that need to interact with the core stack,
+there are two sets of interfaces: ``dev_xxx`` and ``netif_xxx`` (e.g.,
+``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx`` functions handle
+acquiring the instance lock themselves, while the ``netif_xxx`` functions
+assume that the driver has already acquired the instance lock.
+
NETDEV_INTERNAL symbol namespace
================================
diff --git a/Documentation/networking/scaling.rst b/Documentation/networking/scaling.rst
index 4eb50bcb9d42..99b6a61e5e31 100644
--- a/Documentation/networking/scaling.rst
+++ b/Documentation/networking/scaling.rst
@@ -49,14 +49,21 @@ destination address) and TCP/UDP (source port, destination port) tuples
are swapped, the computed hash is the same. This is beneficial in some
applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need
both directions of the flow to land on the same Rx queue (and CPU). The
-"Symmetric-XOR" is a type of RSS algorithms that achieves this hash
-symmetry by XORing the input source and destination fields of the IP
-and/or L4 protocols. This, however, results in reduced input entropy and
-could potentially be exploited. Specifically, the algorithm XORs the input
+"Symmetric-XOR" and "Symmetric-OR-XOR" are types of RSS algorithms that
+achieve this hash symmetry by XOR/ORing the input source and destination
+fields of the IP and/or L4 protocols. This, however, results in reduced
+input entropy and could potentially be exploited.
+
+Specifically, the "Symmetric-XOR" algorithm XORs the input
as follows::
# (SRC_IP ^ DST_IP, SRC_IP ^ DST_IP, SRC_PORT ^ DST_PORT, SRC_PORT ^ DST_PORT)
+The "Symmetric-OR-XOR" algorithm, on the other hand, transforms the input as
+follows::
+
+ # (SRC_IP | DST_IP, SRC_IP ^ DST_IP, SRC_PORT | DST_PORT, SRC_PORT ^ DST_PORT)
+
The result is then fed to the underlying RSS algorithm.
Some advanced NICs allow steering packets to queues based on
@@ -427,8 +434,10 @@ rps_dev_flow_table. The stack consults a CPU to hardware queue map which
is maintained by the NIC driver. This is an auto-generated reverse map of
the IRQ affinity table shown by /proc/interrupts. Drivers can use
functions in the cpu_rmap (“CPU affinity reverse mapâ€) kernel library
-to populate the map. For each CPU, the corresponding queue in the map is
-set to be one whose processing CPU is closest in cache locality.
+to populate the map. Alternatively, drivers can delegate the cpu_rmap
+management to the Kernel by calling netif_enable_cpu_rmap(). For each CPU,
+the corresponding queue in the map is set to be one whose processing CPU is
+closest in cache locality.
Accelerated RFS Configuration
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index 75e017dfa825..518284e287b0 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -143,7 +143,7 @@ reading multiple stats as it internally performs a full dump of
and reports only the stat corresponding to the accessed file.
Sysfs files are documented in
-`Documentation/ABI/testing/sysfs-class-net-statistics`.
+Documentation/ABI/testing/sysfs-class-net-statistics.
netlink
diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst
index 7f623d1db72a..8dc6bb04c710 100644
--- a/Documentation/networking/strparser.rst
+++ b/Documentation/networking/strparser.rst
@@ -180,7 +180,7 @@ There are seven callbacks:
struct contains two fields: offset and full_len. Offset is
where the message starts in the skb, and full_len is the
the length of the message. skb->len - offset may be greater
- then full_len since strparser does not trim the skb.
+ than full_len since strparser does not trim the skb.
::
diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst
index f355f0166f1b..2966b7122f05 100644
--- a/Documentation/networking/switchdev.rst
+++ b/Documentation/networking/switchdev.rst
@@ -137,7 +137,7 @@ would be sub-port 0 on port 1 on switch 1.
Port Features
^^^^^^^^^^^^^
-dev->netns_local
+dev->netns_immutable
If the switchdev driver (and device) only supports offloading of the default
network namespace (netns), the driver should set this private flag to prevent
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 61ef9da10e28..b8fef8101176 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -140,6 +140,14 @@ SOF_TIMESTAMPING_TX_ACK:
cumulative acknowledgment. The mechanism ignores SACK and FACK.
This flag can be enabled via both socket options and control messages.
+SOF_TIMESTAMPING_TX_COMPLETION:
+ Request tx timestamps on packet tx completion. The completion
+ timestamp is generated by the kernel when it receives packet a
+ completion report from the hardware. Hardware may report multiple
+ packets at once, and completion timestamps reflect the timing of the
+ report and not actual tx time. This flag can be enabled via both
+ socket options and control messages.
+
1.3.2 Timestamp Reporting
^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst
index 66f6e9a9b59a..7f24c09f2694 100644
--- a/Documentation/networking/xfrm_device.rst
+++ b/Documentation/networking/xfrm_device.rst
@@ -126,7 +126,8 @@ been setup for offload, it first calls into xdo_dev_offload_ok() with
the skb and the intended offload state to ask the driver if the offload
will serviceable. This can check the packet information to be sure the
offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
-return true of false to signify its support.
+return true or false to signify its support. In case driver doesn't implement
+this callback, the stack provides reasonable defaults.
Crypto offload mode:
When ready to send, the driver needs to inspect the Tx packet for the
diff --git a/Documentation/networking/xsk-tx-metadata.rst b/Documentation/networking/xsk-tx-metadata.rst
index e76b0cfc32f7..df53a10ccac3 100644
--- a/Documentation/networking/xsk-tx-metadata.rst
+++ b/Documentation/networking/xsk-tx-metadata.rst
@@ -50,6 +50,10 @@ The flags field enables the particular offload:
checksum. ``csum_start`` specifies byte offset of where the checksumming
should start and ``csum_offset`` specifies byte offset where the
device should store the computed checksum.
+- ``XDP_TXMD_FLAGS_LAUNCH_TIME``: requests the device to schedule the
+ packet for transmission at a pre-determined time called launch time. The
+ value of launch time is indicated by ``launch_time`` field of
+ ``union xsk_tx_metadata``.
Besides the flags above, in order to trigger the offloads, the first
packet's ``struct xdp_desc`` descriptor should set ``XDP_TX_METADATA``
@@ -65,6 +69,63 @@ In this case, when running in ``XDK_COPY`` mode, the TX checksum
is calculated on the CPU. Do not enable this option in production because
it will negatively affect performance.
+Launch Time
+===========
+
+The value of the requested launch time should be based on the device's PTP
+Hardware Clock (PHC) to ensure accuracy. AF_XDP takes a different data path
+compared to the ETF queuing discipline, which organizes packets and delays
+their transmission. Instead, AF_XDP immediately hands off the packets to
+the device driver without rearranging their order or holding them prior to
+transmission. Since the driver maintains FIFO behavior and does not perform
+packet reordering, a packet with a launch time request will block other
+packets in the same Tx Queue until it is sent. Therefore, it is recommended
+to allocate separate queue for scheduling traffic that is intended for
+future transmission.
+
+In scenarios where the launch time offload feature is disabled, the device
+driver is expected to disregard the launch time request. For correct
+interpretation and meaningful operation, the launch time should never be
+set to a value larger than the farthest programmable time in the future
+(the horizon). Different devices have different hardware limitations on the
+launch time offload feature.
+
+stmmac driver
+-------------
+
+For stmmac, TSO and launch time (TBS) features are mutually exclusive for
+each individual Tx Queue. By default, the driver configures Tx Queue 0 to
+support TSO and the rest of the Tx Queues to support TBS. The launch time
+hardware offload feature can be enabled or disabled by using the tc-etf
+command to call the driver's ndo_setup_tc() callback.
+
+The value of the launch time that is programmed in the Enhanced Normal
+Transmit Descriptors is a 32-bit value, where the most significant 8 bits
+represent the time in seconds and the remaining 24 bits represent the time
+in 256 ns increments. The programmed launch time is compared against the
+PTP time (bits[39:8]) and rolls over after 256 seconds. Therefore, the
+horizon of the launch time for dwmac4 and dwxlgmac2 is 128 seconds in the
+future.
+
+igc driver
+----------
+
+For igc, all four Tx Queues support the launch time feature. The launch
+time hardware offload feature can be enabled or disabled by using the
+tc-etf command to call the driver's ndo_setup_tc() callback. When entering
+TSN mode, the igc driver will reset the device and create a default Qbv
+schedule with a 1-second cycle time, with all Tx Queues open at all times.
+
+The value of the launch time that is programmed in the Advanced Transmit
+Context Descriptor is a relative offset to the starting time of the Qbv
+transmission window of the queue. The Frst flag of the descriptor can be
+set to schedule the packet for the next Qbv cycle. Therefore, the horizon
+of the launch time for i225 and i226 is the ending time of the next cycle
+of the Qbv transmission window of the queue. For example, when the Qbv
+cycle time is set to 1 second, the horizon of the launch time ranges
+from 1 second to 2 seconds, depending on where the Qbv cycle is currently
+running.
+
Querying Device Capabilities
============================
@@ -74,6 +135,7 @@ Refer to ``xsk-flags`` features bitmask in
- ``tx-timestamp``: device supports ``XDP_TXMD_FLAGS_TIMESTAMP``
- ``tx-checksum``: device supports ``XDP_TXMD_FLAGS_CHECKSUM``
+- ``tx-launch-time-fifo``: device supports ``XDP_TXMD_FLAGS_LAUNCH_TIME``
See ``tools/net/ynl/samples/netdev.c`` on how to query this information.
diff --git a/Documentation/nvme/nvme-pci-endpoint-target.rst b/Documentation/nvme/nvme-pci-endpoint-target.rst
index 66e7b7d869b4..b699595d1762 100644
--- a/Documentation/nvme/nvme-pci-endpoint-target.rst
+++ b/Documentation/nvme/nvme-pci-endpoint-target.rst
@@ -86,7 +86,7 @@ configurable through configfs before starting the controller. To avoid issues
with excessive local memory usage for executing commands, MDTS defaults to 512
KB and is limited to a maximum of 2 MB (arbitrary limit).
-Mimimum number of PCI Address Mapping Windows Required
+Minimum number of PCI Address Mapping Windows Required
------------------------------------------------------
Most PCI endpoint controllers provide a limited number of mapping windows for
diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst
index dbb763a8de90..22fa925353cf 100644
--- a/Documentation/process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -268,10 +268,15 @@ The tags in common use are:
- Cc: the named person received a copy of the patch and had the
opportunity to comment on it.
-Be careful in the addition of tags to your patches, as only Cc: is appropriate
-for addition without the explicit permission of the person named; using
-Reported-by: is fine most of the time as well, but ask for permission if
-the bug was reported in private.
+Be careful in the addition of the aforementioned tags to your patches, as all
+except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the
+person named. For those three implicit permission is sufficient if the person
+contributed to the Linux kernel using that name and email address according
+to the lore archives or the commit history -- and in case of Reported-by:
+and Suggested-by: did the reporting or suggestion in public. Note,
+bugzilla.kernel.org is a public place in this sense, but email addresses
+used there are private; so do not expose them in tags, unless the person
+used them in earlier contributions.
Sending the patch
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index a0beca805362..d564362773b5 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -58,11 +58,11 @@ mcelog 0.6 mcelog --version
iptables 1.4.2 iptables -V
openssl & libcrypto 1.0.0 openssl version
bc 1.06.95 bc --version
-Sphinx\ [#f1]_ 2.4.4 sphinx-build --version
+Sphinx\ [#f1]_ 3.4.3 sphinx-build --version
GNU tar 1.28 tar --version
gtags (optional) 6.6.5 gtags --version
mkimage (optional) 2017.01 mkimage --version
-Python (optional) 3.5.x python3 --version
+Python (optional) 3.9.x python3 --version
GNU AWK (optional) 5.1.0 gawk --version
====================== =============== ========================================
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
index 1d1150954be3..4cdef8360698 100644
--- a/Documentation/process/code-of-conduct-interpretation.rst
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -145,13 +145,16 @@ kernel community.
Any decisions regarding enforcement recommendations will be brought to
the TAB for implementation of enforcement with the relevant maintainers
-if needed. A decision by the Code of Conduct Committee can be overturned
-by the TAB by a two-thirds vote.
+if needed. Once the TAB approves one or more of the measures outlined
+in the scope of the ban by two-thirds of the members voting for the
+measures, the Code of Conduct Committee will enforce the TAB approved
+measures. Any Code of Conduct Committee members serving on the TAB will
+not vote on the measures.
At quarterly intervals, the Code of Conduct Committee and TAB will
provide a report summarizing the anonymised reports that the Code of
Conduct committee has received and their status, as well details of any
-overridden decisions including complete and identifiable voting details.
+TAB approved decisions including complete and identifiable voting details.
Because how we interpret and enforce the Code of Conduct will evolve over
time, this document will be updated when necessary to reflect any
@@ -227,9 +230,11 @@ The scope of the ban for a period of time could include:
such as mailing lists and social media sites
Once the TAB approves one or more of the measures outlined in the scope of
-the ban by a two-thirds vote, the Code of Conduct Committee will enforce
-the TAB approved measure(s) in collaboration with the community, maintainers,
-sub-maintainers, and kernel.org administrators.
+the ban by two-thirds of the members voting for the measures, the Code of
+Conduct Committee will enforce the TAB approved measure(s) in collaboration
+with the community, maintainers, sub-maintainers, and kernel.org
+administrators. Any Code of Conduct Committee members serving on the TAB
+will not vote on the measures.
The Code of Conduct Committee is mindful of the negative impact of seeking
public apology and instituting ban could have on individuals. It is also
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index 3b5b5983fea8..c67ac12cf789 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -75,6 +75,17 @@ On-line docs
Published books
---------------
+ * Title: **The Linux Memory Manager**
+
+ :Author: Lorenzo Stoakes
+ :Publisher: No Starch Press
+ :Date: February 2025
+ :Pages: 1300
+ :ISBN: 978-1718504462
+ :Notes: Memory management. Full draft available as early access for
+ pre-order, full release scheduled for Fall 2025. See
+ https://nostarch.com/linux-memory-manager for further info.
+
* Title: **Practical Linux System Administration: A Guide to Installation, Configuration, and Management, 1st Edition**
:Author: Kenneth Hess
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index e497729525d5..1ac62dc3a66f 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -311,6 +311,14 @@ to the mailing list, e.g.::
Posting as one thread is discouraged because it confuses patchwork
(as of patchwork 2.2.2).
+Co-posting selftests
+--------------------
+
+Selftests should be part of the same series as the code changes.
+Specifically for fixes both code change and related test should go into
+the same tree (the tests may lack a Fixes tag, which is expected).
+Mixing code changes and test changes in a single commit is discouraged.
+
Preparing changes
-----------------
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index e531dd504b6c..beb7f94279fd 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -52,7 +52,8 @@ Provide documentation
4) All new module parameters are documented with ``MODULE_PARM_DESC()``
5) All new userspace interfaces are documented in ``Documentation/ABI/``.
- See ``Documentation/ABI/README`` for more information.
+ See Documentation/admin-guide/abi.rst (or ``Documentation/ABI/README``)
+ for more information.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
@@ -91,9 +92,12 @@ Build your code
fix any issues.
2) Builds on multiple CPU architectures by using local cross-compile tools
- or some other build farm. Note that ppc64 is a good architecture for
- cross-compilation checking because it tends to use ``unsigned long`` for
- 64-bit quantities.
+ or some other build farm.
+ Note that testing against architectures of different word sizes
+ (32- and 64-bit) and different endianness (big- and little-) is effective
+ in catching various portability issues due to false assumptions on
+ representable quantity range, data alignment, or endianness, among
+ others.
3) Newly-added code has been compiled with ``gcc -W`` (use
``make KCFLAGS=-W``). This will generate lots of noise, but is good
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index 8fdc0ef3e604..cede4e7b29af 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -495,10 +495,10 @@ list archives. A "# Suffix" may also be used in this case to clarify.
If a person has had the opportunity to comment on a patch, but has not
provided such comments, you may optionally add a ``Cc:`` tag to the patch.
-This is the only tag which might be added without an explicit action by the
-person it names - but it should indicate that this person was copied on the
-patch. This tag documents that potentially interested parties
-have been included in the discussion.
+This tag documents that potentially interested parties have been included in
+the discussion. Note, this is one of only three tags you might be able to use
+without explicit permission of the person named (see 'Tagging people requires
+permission' below for details).
Co-developed-by: states that the patch was co-created by multiple developers;
it is used to give attribution to co-authors (in addition to the author
@@ -544,9 +544,9 @@ hopefully inspires them to help us again in the future. The tag is intended for
bugs; please do not use it to credit feature requests. The tag should be
followed by a Closes: tag pointing to the report, unless the report is not
available on the web. The Link: tag can be used instead of Closes: if the patch
-fixes a part of the issue(s) being reported. Please note that if the bug was
-reported in private, then ask for permission first before using the Reported-by
-tag.
+fixes a part of the issue(s) being reported. Note, the Reported-by tag is one
+of only three tags you might be able to use without explicit permission of the
+person named (see 'Tagging people requires permission' below for details).
A Tested-by: tag indicates that the patch has been successfully tested (in
some environment) by the person named. This tag informs maintainers that
@@ -596,11 +596,11 @@ Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned
in the patch changelog (after the '---' separator).
A Suggested-by: tag indicates that the patch idea is suggested by the person
-named and ensures credit to the person for the idea. Please note that this
-tag should not be added without the reporter's permission, especially if the
-idea was not posted in a public forum. That said, if we diligently credit our
-idea reporters, they will, hopefully, be inspired to help us again in the
-future.
+named and ensures credit to the person for the idea: if we diligently credit
+our idea reporters, they will, hopefully, be inspired to help us again in the
+future. Note, this is one of only three tags you might be able to use without
+explicit permission of the person named (see 'Tagging people requires
+permission' below for details).
A Fixes: tag indicates that the patch fixes an issue in a previous commit. It
is used to make it easy to determine where a bug originated, which can help
@@ -618,6 +618,21 @@ Finally, while providing tags is welcome and typically very appreciated, please
note that signers (i.e. submitters and maintainers) may use their discretion in
applying offered tags.
+.. _tagging_people:
+
+Tagging people requires permission
+----------------------------------
+
+Be careful in the addition of the aforementioned tags to your patches, as all
+except for Cc:, Reported-by:, and Suggested-by: need explicit permission of the
+person named. For those three implicit permission is sufficient if the person
+contributed to the Linux kernel using that name and email address according
+to the lore archives or the commit history -- and in case of Reported-by:
+and Suggested-by: did the reporting or suggestion in public. Note,
+bugzilla.kernel.org is a public place in this sense, but email addresses
+used there are private; so do not expose them in tags, unless the person
+used them in earlier contributions.
+
.. _the_canonical_patch_format:
The canonical patch format
@@ -717,6 +732,12 @@ patch in the permanent changelog. If the ``from`` line is missing,
then the ``From:`` line from the email header will be used to determine
the patch author in the changelog.
+The author may indicate their affiliation or the sponsor of the work
+by adding the name of an organization to the ``from`` and ``SoB`` lines,
+e.g.:
+
+ From: Patch Author (Company) <author@example.com>
+
Explanation Body
^^^^^^^^^^^^^^^^
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index 4aa50e5fcb8c..6d2607870ba4 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -145,7 +145,7 @@ Rust standard library source
****************************
The Rust standard library source is required because the build system will
-cross-compile ``core`` and ``alloc``.
+cross-compile ``core``.
If ``rustup`` is being used, run::
diff --git a/Documentation/rust/testing.rst b/Documentation/rust/testing.rst
index 568b71b415a4..180b886e0f1e 100644
--- a/Documentation/rust/testing.rst
+++ b/Documentation/rust/testing.rst
@@ -97,7 +97,7 @@ operator are also supported as usual, e.g.:
/// ```
/// # use kernel::{spawn_work_item, workqueue};
- /// spawn_work_item!(workqueue::system(), || pr_info!("x"))?;
+ /// spawn_work_item!(workqueue::system(), || pr_info!("x\n"))?;
/// # Ok::<(), Error>(())
/// ```
diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst
index 41ed2ceafc92..e881a945c188 100644
--- a/Documentation/scheduler/sched-bwc.rst
+++ b/Documentation/scheduler/sched-bwc.rst
@@ -59,7 +59,7 @@ At the same time, we can say that the worst case deadline miss, will be
\Sum e_i; that is, there is a bounded tardiness (under the assumption
that x+e is indeed WCET).
-The interferenece when using burst is valued by the possibilities for
+The interference when using burst is valued by the possibilities for
missing the deadline and the average WCET. Test results showed that when
there many cgroups or CPU is under utilized, the interference is
limited. More details are shown in:
diff --git a/Documentation/scheduler/sched-debug.rst b/Documentation/scheduler/sched-debug.rst
index 4d3d24f2a439..b5a92a39eccd 100644
--- a/Documentation/scheduler/sched-debug.rst
+++ b/Documentation/scheduler/sched-debug.rst
@@ -2,7 +2,7 @@
Scheduler debugfs
=================
-Booting a kernel with CONFIG_SCHED_DEBUG=y will give access to
+Booting a kernel with debugfs enabled will give access to
scheduler specific debug files under /sys/kernel/debug/sched. Some of
those files are described below.
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index 8786f219fc73..b574a2644c77 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -96,7 +96,7 @@ picked and the current task is preempted.
CFS uses nanosecond granularity accounting and does not rely on any jiffies or
other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
way the previous scheduler had, and has no heuristics whatsoever. There is
-only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):
+only one central tunable:
/sys/kernel/debug/sched/base_slice_ns
diff --git a/Documentation/scheduler/sched-domains.rst b/Documentation/scheduler/sched-domains.rst
index 5e996fe973b1..15e3a4cb304a 100644
--- a/Documentation/scheduler/sched-domains.rst
+++ b/Documentation/scheduler/sched-domains.rst
@@ -73,9 +73,8 @@ Architectures may override the generic domain builder and the default SD flags
for a given topology level by creating a sched_domain_topology_level array and
calling set_sched_topology() with this array as the parameter.
-The sched-domains debugging infrastructure can be enabled by enabling
-CONFIG_SCHED_DEBUG and adding 'sched_verbose' to your cmdline. If you
-forgot to tweak your cmdline, you can also flip the
+The sched-domains debugging infrastructure can be enabled by 'sched_verbose'
+to your cmdline. If you forgot to tweak your cmdline, you can also flip the
/sys/kernel/debug/sched/verbose knob. This enables an error checking parse of
the sched domains which should catch most possible errors (described above). It
also prints out the domain structure in a visual format.
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index c4672d7df2f7..0b2654e2164b 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -107,8 +107,7 @@ detailed information:
nr_rejected : 0
enable_seq : 1
-If ``CONFIG_SCHED_DEBUG`` is set, whether a given task is on sched_ext can
-be determined as follows:
+Whether a given task is on sched_ext can be determined as follows:
.. code-block:: none
@@ -294,6 +293,42 @@ dispatching, and must be dispatched to with ``scx_bpf_dsq_insert()``. See
the function documentation and usage in ``tools/sched_ext/scx_simple.bpf.c``
for more information.
+Task Lifecycle
+--------------
+
+The following pseudo-code summarizes the entire lifecycle of a task managed
+by a sched_ext scheduler:
+
+.. code-block:: c
+
+ ops.init_task(); /* A new task is created */
+ ops.enable(); /* Enable BPF scheduling for the task */
+
+ while (task in SCHED_EXT) {
+ if (task can migrate)
+ ops.select_cpu(); /* Called on wakeup (optimization) */
+
+ ops.runnable(); /* Task becomes ready to run */
+
+ while (task is runnable) {
+ if (task is not in a DSQ) {
+ ops.enqueue(); /* Task can be added to a DSQ */
+
+ /* A CPU becomes available */
+
+ ops.dispatch(); /* Task is moved to a local DSQ */
+ }
+ ops.running(); /* Task starts running on its assigned CPU */
+ ops.tick(); /* Called every 1/HZ seconds */
+ ops.stopping(); /* Task stops running (time slice expires or wait) */
+ }
+
+ ops.quiescent(); /* Task releases its assigned CPU (wait) */
+ }
+
+ ops.disable(); /* Disable BPF scheduling for the task */
+ ops.exit_task(); /* Task is destroyed */
+
Where to Look
=============
diff --git a/Documentation/scheduler/sched-rt-group.rst b/Documentation/scheduler/sched-rt-group.rst
index 80b05a3009ea..ab464335d320 100644
--- a/Documentation/scheduler/sched-rt-group.rst
+++ b/Documentation/scheduler/sched-rt-group.rst
@@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system:
* sched_rt_period_us takes values from 1 to INT_MAX.
* sched_rt_runtime_us takes values from -1 to sched_rt_period_us.
* A run time of -1 specifies runtime == period, ie. no limit.
+ * sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve
+ bandwidth for fair dl_server. For accurate value check average of
+ runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/
2.2 Default behaviour
diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst
index caea83d91c67..08b6bc9a315c 100644
--- a/Documentation/scheduler/sched-stats.rst
+++ b/Documentation/scheduler/sched-stats.rst
@@ -88,7 +88,7 @@ One of these is produced per domain for each cpu described. (Note that if
CONFIG_SMP is not defined, *no* domains are utilized and these lines
will not appear in the output. <name> is an extension to the domain field
that prints the name of the corresponding sched domain. It can appear in
-schedstat version 17 and above, and requires CONFIG_SCHED_DEBUG.)
+schedstat version 17 and above.
domain<N> <name> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
diff --git a/Documentation/scsi/st.rst b/Documentation/scsi/st.rst
index d3b28c28d74c..b4a092faa9c8 100644
--- a/Documentation/scsi/st.rst
+++ b/Documentation/scsi/st.rst
@@ -157,6 +157,11 @@ enabled driver and mode options. The value in the file is a bit mask where the
bit definitions are the same as those used with MTSETDRVBUFFER in setting the
options.
+Each directory contains the entry 'position_lost_in_reset'. If this value is
+one, reading and writing to the device is blocked after device reset. Most
+devices rewind the tape after reset and the writes/read don't access the
+tape position the user expects.
+
A link named 'tape' is made from the SCSI device directory to the class
directory corresponding to the mode 0 auto-rewind device (e.g., st0).
diff --git a/Documentation/security/landlock.rst b/Documentation/security/landlock.rst
index 59ecdb1c0d4d..e0fc54aff09e 100644
--- a/Documentation/security/landlock.rst
+++ b/Documentation/security/landlock.rst
@@ -7,7 +7,7 @@ Landlock LSM: kernel documentation
==================================
:Author: Mickaël Salaün
-:Date: December 2022
+:Date: March 2025
Landlock's goal is to create scoped access-control (i.e. sandboxing). To
harden a whole system, this feature should be available to any process,
@@ -45,6 +45,10 @@ Guiding principles for safe access controls
sandboxed process shall retain their scoped accesses (at the time of resource
acquisition) whatever process uses them.
Cf. `File descriptor access rights`_.
+* Access denials shall be logged according to system and Landlock domain
+ configurations. Log entries must contain information about the cause of the
+ denial and the owner of the related security policy. Such log generation
+ should have a negligible performance and memory impact on allowed requests.
Design choices
==============
@@ -124,6 +128,13 @@ makes the reasoning much easier and helps avoid pitfalls.
.. kernel-doc:: security/landlock/ruleset.h
:identifiers:
+Additional documentation
+========================
+
+* Documentation/userspace-api/landlock.rst
+* Documentation/admin-guide/LSM/landlock.rst
+* https://landlock.io
+
.. Links
.. _tools/testing/selftests/landlock/:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/tools/testing/selftests/landlock/
diff --git a/Documentation/security/tpm/index.rst b/Documentation/security/tpm/index.rst
index fa593d960040..deda952eacbe 100644
--- a/Documentation/security/tpm/index.rst
+++ b/Documentation/security/tpm/index.rst
@@ -10,3 +10,4 @@ Trusted Platform Module documentation
tpm_vtpm_proxy
xen-tpmfront
tpm_ftpm_tee
+ tpm_ffa_crb
diff --git a/Documentation/security/tpm/tpm_ffa_crb.rst b/Documentation/security/tpm/tpm_ffa_crb.rst
new file mode 100644
index 000000000000..0184193da3c7
--- /dev/null
+++ b/Documentation/security/tpm/tpm_ffa_crb.rst
@@ -0,0 +1,65 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================
+TPM CRB over FF-A Driver
+========================
+
+The TPM Command Response Buffer (CRB) interface is a standard TPM interface
+defined in the TCG PC Client Platform TPM Profile (PTP) Specification [1]_.
+The CRB provides a structured set of control registers a client uses when
+interacting with a TPM as well as a data buffer for storing TPM commands and
+responses. A CRB interface can be implemented in:
+
+- hardware registers in a discrete TPM chip
+
+- in memory for a TPM running in isolated environment where shared memory
+ allows a client to interact with the TPM
+
+The Firmware Framework for Arm A-profile (FF-A) [2]_ is a specification
+that defines interfaces and protocols for the following purposes:
+
+- Compartmentalize firmware into software partitions that run in the Arm
+ Secure world environment (also know as TrustZone)
+
+- Provide a standard interface for software components in the Non-secure
+ state, for example OS and Hypervisors, to communicate with this firmware.
+
+A TPM can be implemented as an FF-A secure service. This could be a firmware
+TPM or could potentially be a TPM service that acts as a proxy to a discrete
+TPM chip. An FF-A based TPM abstracts hardware details (e.g. bus controller
+and chip selects) away from the OS and can protect locality 4 from access
+by an OS. The TCG-defined CRB interface is used by clients to interact
+with the TPM service.
+
+The Arm TPM Service Command Response Buffer Interface Over FF-A [3]_
+specification defines FF-A messages that can be used by a client to signal
+when updates have been made to the CRB.
+
+How the Linux CRB driver interacts with FF-A is summarized below:
+
+- The tpm_crb_ffa driver registers with the FF-A subsystem in the kernel
+ with an architected TPM service UUID defined in the CRB over FF-A spec.
+
+- If a TPM service is discovered by FF-A, the probe() function in the
+ tpm_crb_ffa driver runs, and the driver initializes.
+
+- The probing and initialization of the Linux CRB driver is triggered
+ by the discovery of a TPM advertised via ACPI. The CRB driver can
+ detect the type of TPM through the ACPI 'start' method. The start
+ method for Arm FF-A was defined in TCG ACPI v1.4 [4]_.
+
+- When the CRB driver performs its normal functions such as signaling 'start'
+ and locality request/relinquish it invokes the tpm_crb_ffa_start() funnction
+ in the tpm_crb_ffa driver which handles the FF-A messaging to the TPM.
+
+References
+==========
+
+.. [1] **TCG PC Client Platform TPM Profile (PTP) Specification**
+ https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+.. [2] **Arm Firmware Framework for Arm A-profile (FF-A)**
+ https://developer.arm.com/documentation/den0077/latest/
+.. [3] **Arm TPM Service Command Response Buffer Interface Over FF-A**
+ https://developer.arm.com/documentation/den0138/latest/
+.. [4] **TCG ACPI Specification**
+ https://trustedcomputinggroup.org/resource/tcg-acpi-specification/
diff --git a/Documentation/sound/alsa-configuration.rst b/Documentation/sound/alsa-configuration.rst
index 04254474fa04..a45174d165eb 100644
--- a/Documentation/sound/alsa-configuration.rst
+++ b/Documentation/sound/alsa-configuration.rst
@@ -58,7 +58,7 @@ debug
2 = verbose debug messages);
This option appears only when ``CONFIG_SND_DEBUG=y``.
This option can be dynamically changed via sysfs
- /sys/modules/snd/parameters/debug file.
+ /sys/module/snd/parameters/debug file.
Module snd-pcm-oss
------------------
diff --git a/Documentation/sound/designs/powersave.rst b/Documentation/sound/designs/powersave.rst
index 138157452eb9..ca7d1e838b4d 100644
--- a/Documentation/sound/designs/powersave.rst
+++ b/Documentation/sound/designs/powersave.rst
@@ -25,15 +25,15 @@ operations.
The ``power_save`` option is exported as writable. This means you can
adjust the value via sysfs on the fly. For example, to turn on the
automatic power-save mode with 10 seconds, write to
-``/sys/modules/snd_ac97_codec/parameters/power_save`` (usually as root):
+``/sys/module/snd_ac97_codec/parameters/power_save`` (usually as root):
::
- # echo 10 > /sys/modules/snd_ac97_codec/parameters/power_save
+ # echo 10 > /sys/module/snd_ac97_codec/parameters/power_save
Note that you might hear click noise/pop when changing the power
state. Also, it often takes certain time to wake up from the
-power-down to the active state. These are often hardly to fix, so
+power-down to the active state. These are often hard to fix, so
don't report extra bug reports unless you have a fix patch ;-)
For HD-audio interface, there is another module option,
diff --git a/Documentation/sound/soc/codec-to-codec.rst b/Documentation/sound/soc/codec-to-codec.rst
index 0418521b6e03..973c147d9d82 100644
--- a/Documentation/sound/soc/codec-to-codec.rst
+++ b/Documentation/sound/soc/codec-to-codec.rst
@@ -68,7 +68,7 @@ file:
.codec_dai_name = "codec-2-dai_name",
.platform_name = "samsung-i2s.0",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM,
+ | SND_SOC_DAIFMT_CBP_CFP,
.ignore_suspend = 1,
.c2c_params = &dsp_codec_params,
.num_c2c_params = 1,
@@ -80,7 +80,7 @@ file:
.codec_name = "codec-3,
.codec_dai_name = "codec-3-dai_name",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM,
+ | SND_SOC_DAIFMT_CBP_CFP,
.ignore_suspend = 1,
.c2c_params = &dsp_codec_params,
.num_c2c_params = 1,
diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst
index 02419a6f8213..7b6aeab3c207 100644
--- a/Documentation/sound/soc/dpcm.rst
+++ b/Documentation/sound/soc/dpcm.rst
@@ -147,14 +147,16 @@ For the example above we have to define 4 FE DAI links and 6 BE DAI links. The
FE DAI links are defined as follows :-
::
+ SND_SOC_DAILINK_DEFS(pcm0,
+ DAILINK_COMP_ARRAY(COMP_CPU("System Pin")),
+ DAILINK_COMP_ARRAY(COMP_DUMMY()),
+ DAILINK_COMP_ARRAY(COMP_PLATFORM("dsp-audio")));
+
static struct snd_soc_dai_link machine_dais[] = {
{
.name = "PCM0 System",
.stream_name = "System Playback",
- .cpu_dai_name = "System Pin",
- .platform_name = "dsp-audio",
- .codec_name = "snd-soc-dummy",
- .codec_dai_name = "snd-soc-dummy-dai",
+ SND_SOC_DAILINK_REG(pcm0),
.dynamic = 1,
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
},
@@ -174,15 +176,16 @@ dynamic and will change depending on runtime config.
The BE DAIs are configured as follows :-
::
+ SND_SOC_DAILINK_DEFS(headset,
+ DAILINK_COMP_ARRAY(COMP_CPU("ssp-dai.0")),
+ DAILINK_COMP_ARRAY(COMP_CODEC("rt5640.0-001c", "rt5640-aif1")));
+
static struct snd_soc_dai_link machine_dais[] = {
.....< FE DAI links here >
{
.name = "Codec Headset",
- .cpu_dai_name = "ssp-dai.0",
- .platform_name = "snd-soc-dummy",
+ SND_SOC_DAILINK_REG(headset),
.no_pcm = 1,
- .codec_name = "rt5640.0-001c",
- .codec_dai_name = "rt5640-aif1",
.ignore_suspend = 1,
.ignore_pmdown_time = 1,
.be_hw_params_fixup = hswult_ssp0_fixup,
@@ -362,7 +365,7 @@ The machine driver sets some additional parameters to the DAI link i.e.
.codec_dai_name = "modem-aif1",
.codec_name = "modem",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM,
+ | SND_SOC_DAIFMT_CBP_CFP,
.c2c_params = &dai_params,
.num_c2c_params = 1,
}
diff --git a/Documentation/sound/soc/machine.rst b/Documentation/sound/soc/machine.rst
index 9db132bc0070..1828f5edca3e 100644
--- a/Documentation/sound/soc/machine.rst
+++ b/Documentation/sound/soc/machine.rst
@@ -75,7 +75,7 @@ In the above struct, dai’s are registered using names but you can pass
either dai name or device tree node but not both. Also, names used here
for cpu/codec/platform dais should be globally unique.
-Additionaly below example macro can be used to register cpu, codec and
+Additionally below example macro can be used to register cpu, codec and
platform dai::
SND_SOC_DAILINK_DEFS(wm2200_cpu_dsp,
diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py
index a413f8dd5115..ecf54d22e9dc 100644
--- a/Documentation/sphinx/automarkup.py
+++ b/Documentation/sphinx/automarkup.py
@@ -11,13 +11,7 @@ from sphinx.errors import NoUri
import re
from itertools import chain
-#
-# Python 2 lacks re.ASCII...
-#
-try:
- ascii_p3 = re.ASCII
-except AttributeError:
- ascii_p3 = 0
+from kernel_abi import get_kernel_abi
#
# Regex nastiness. Of course.
@@ -26,28 +20,30 @@ except AttributeError:
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
-RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII)
#
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
#
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
- flags=ascii_p3)
+ flags=re.ASCII)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
-RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
-RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
+RE_abi_file = re.compile(r'(\bDocumentation/ABI/[\w\-/]+)')
+RE_abi_symbol = re.compile(r'(\b/(sys|config|proc)/[\w\-/]+)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
@@ -83,11 +79,10 @@ def markup_refs(docname, app, node):
#
# Associate each regex with the function that will markup its matches
#
- markup_func_sphinx2 = {RE_doc: markup_doc_ref,
- RE_function: markup_c_ref,
- RE_generic_type: markup_c_ref}
- markup_func_sphinx3 = {RE_doc: markup_doc_ref,
+ markup_func = {RE_doc: markup_doc_ref,
+ RE_abi_file: markup_abi_file_ref,
+ RE_abi_symbol: markup_abi_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
@@ -95,11 +90,6 @@ def markup_refs(docname, app, node):
RE_typedef: markup_c_ref,
RE_git: markup_git}
- if sphinx.version_info[0] >= 3:
- markup_func = markup_func_sphinx3
- else:
- markup_func = markup_func_sphinx2
-
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
@@ -270,6 +260,54 @@ def markup_doc_ref(docname, app, match):
else:
return nodes.Text(match.group(0))
+#
+# Try to replace a documentation reference for ABI symbols and files
+# with a cross reference to that page
+#
+def markup_abi_ref(docname, app, match, warning=False):
+ stddom = app.env.domains['std']
+ #
+ # Go through the dance of getting an xref out of the std domain
+ #
+ kernel_abi = get_kernel_abi()
+
+ fname = match.group(1)
+ target = kernel_abi.xref(fname)
+
+ # Kernel ABI doesn't describe such file or symbol
+ if not target:
+ if warning:
+ kernel_abi.log.warning("%s not found", fname)
+ return nodes.Text(match.group(0))
+
+ pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'ref',
+ reftarget = target, modname = None,
+ classname = None, refexplicit = False)
+
+ #
+ # XXX The Latex builder will throw NoUri exceptions here,
+ # work around that by ignoring them.
+ #
+ try:
+ xref = stddom.resolve_xref(app.env, docname, app.builder, 'ref',
+ target, pxref, None)
+ except NoUri:
+ xref = None
+ #
+ # Return the xref if we got it; otherwise just return the plain text.
+ #
+ if xref:
+ return xref
+ else:
+ return nodes.Text(match.group(0))
+
+#
+# Variant of markup_abi_ref() that warns whan a reference is not found
+#
+def markup_abi_file_ref(docname, app, match):
+ return markup_abi_ref(docname, app, match, warning=True)
+
+
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
with open(source) as f:
diff --git a/Documentation/sphinx/cdomain.py b/Documentation/sphinx/cdomain.py
index e6959af25402..e8ea80d4324c 100644
--- a/Documentation/sphinx/cdomain.py
+++ b/Documentation/sphinx/cdomain.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
-u"""
+"""
cdomain
~~~~~~~
@@ -45,9 +45,6 @@ import re
__version__ = '1.1'
-# Get Sphinx version
-major, minor, patch = sphinx.version_info[:3]
-
# Namespace to be prepended to the full name
namespace = None
@@ -145,7 +142,7 @@ class CObject(Base_CObject):
}
def handle_func_like_macro(self, sig, signode):
- u"""Handles signatures of function-like macros.
+ """Handles signatures of function-like macros.
If the objtype is 'function' and the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
diff --git a/Documentation/sphinx/kernel_abi.py b/Documentation/sphinx/kernel_abi.py
index 5911bd0d7965..db6f0380de94 100644
--- a/Documentation/sphinx/kernel_abi.py
+++ b/Documentation/sphinx/kernel_abi.py
@@ -2,7 +2,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-abi
~~~~~~~~~~
@@ -14,7 +14,7 @@ u"""
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
- scripts/get_abi.pl script to parse the Kernel ABI files.
+ scripts/get_abi.py script to parse the Kernel ABI files.
Overview of directive's argument and options.
@@ -32,107 +32,137 @@ u"""
"""
-import codecs
import os
-import subprocess
-import sys
import re
-import kernellog
+import sys
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
-from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
+from sphinx.util import logging
+
+srctree = os.path.abspath(os.environ["srctree"])
+sys.path.insert(0, os.path.join(srctree, "scripts/lib/abi"))
+
+from abi_parser import AbiParser
+
+__version__ = "1.0"
+
+logger = logging.getLogger('kernel_abi')
+path = os.path.join(srctree, "Documentation/ABI")
-__version__ = '1.0'
+_kernel_abi = None
+
+def get_kernel_abi():
+ """
+ Initialize kernel_abi global var, if not initialized yet.
+
+ This is needed to avoid warnings during Sphinx module initialization.
+ """
+ global _kernel_abi
+
+ if not _kernel_abi:
+ # Parse ABI symbols only once
+ _kernel_abi = AbiParser(path, logger=logger)
+ _kernel_abi.parse_abi()
+ _kernel_abi.check_issues()
+
+ return _kernel_abi
def setup(app):
app.add_directive("kernel-abi", KernelCmd)
- return dict(
- version = __version__
- , parallel_read_safe = True
- , parallel_write_safe = True
- )
+ return {
+ "version": __version__,
+ "parallel_read_safe": True,
+ "parallel_write_safe": True
+ }
-class KernelCmd(Directive):
- u"""KernelABI (``kernel-abi``) directive"""
+class KernelCmd(Directive):
+ """KernelABI (``kernel-abi``) directive"""
required_arguments = 1
- optional_arguments = 2
+ optional_arguments = 3
has_content = False
final_argument_whitespace = True
+ parser = None
option_spec = {
- "debug" : directives.flag,
- "rst" : directives.unchanged
+ "debug": directives.flag,
+ "no-symbols": directives.flag,
+ "no-files": directives.flag,
}
def run(self):
+ kernel_abi = get_kernel_abi()
+
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
- srctree = os.path.abspath(os.environ["srctree"])
-
- args = [
- os.path.join(srctree, 'scripts/get_abi.pl'),
- 'rest',
- '--enable-lineno',
- '--dir', os.path.join(srctree, 'Documentation', self.arguments[0]),
- ]
-
- if 'rst' in self.options:
- args.append('--rst-source')
-
- lines = subprocess.check_output(args, cwd=os.path.dirname(doc.current_source)).decode('utf-8')
- nodeList = self.nestedParse(lines, self.arguments[0])
- return nodeList
-
- def nestedParse(self, lines, fname):
env = self.state.document.settings.env
content = ViewList()
node = nodes.section()
- if "debug" in self.options:
- code_block = "\n\n.. code-block:: rst\n :linenos:\n"
- for l in lines.split("\n"):
- code_block += "\n " + l
- lines = code_block + "\n\n"
+ abi_type = self.arguments[0]
- line_regex = re.compile(r"^\.\. LINENO (\S+)\#([0-9]+)$")
- ln = 0
- n = 0
- f = fname
+ if "no-symbols" in self.options:
+ show_symbols = False
+ else:
+ show_symbols = True
- for line in lines.split("\n"):
- n = n + 1
- match = line_regex.search(line)
- if match:
- new_f = match.group(1)
+ if "no-files" in self.options:
+ show_file = False
+ else:
+ show_file = True
- # Sphinx parser is lazy: it stops parsing contents in the
- # middle, if it is too big. So, handle it per input file
- if new_f != f and content:
- self.do_parse(content, node)
- content = ViewList()
+ tab_width = self.options.get('tab-width',
+ self.state.document.settings.tab_width)
- # Add the file to Sphinx build dependencies
- env.note_dependency(os.path.abspath(f))
-
- f = new_f
-
- # sphinx counts lines from 0
- ln = int(match.group(2)) - 1
+ old_f = None
+ n = 0
+ n_sym = 0
+ for msg, f, ln in kernel_abi.doc(show_file=show_file,
+ show_symbols=show_symbols,
+ filter_path=abi_type):
+ n_sym += 1
+ msg_list = statemachine.string2lines(msg, tab_width,
+ convert_whitespace=True)
+ if "debug" in self.options:
+ lines = [
+ "", "", ".. code-block:: rst",
+ " :linenos:", ""
+ ]
+ for m in msg_list:
+ lines.append(" " + m)
else:
- content.append(line, f, ln)
-
- kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
+ lines = msg_list
- if content:
- self.do_parse(content, node)
+ for line in lines:
+ # sphinx counts lines from 0
+ content.append(line, f, ln - 1)
+ n += 1
+
+ if f != old_f:
+ # Add the file to Sphinx build dependencies
+ env.note_dependency(os.path.abspath(f))
+
+ old_f = f
+
+ # Sphinx doesn't like to parse big messages. So, let's
+ # add content symbol by symbol
+ if content:
+ self.do_parse(content, node)
+ content = ViewList()
+
+ if show_symbols and not show_file:
+ logger.verbose("%s ABI: %i symbols (%i ReST lines)" % (abi_type, n_sym, n))
+ elif not show_symbols and show_file:
+ logger.verbose("%s ABI: %i files (%i ReST lines)" % (abi_type, n_sym, n))
+ else:
+ logger.verbose("%s ABI: %i data (%i ReST lines)" % (abi_type, n_sym, n))
return node.children
diff --git a/Documentation/sphinx/kernel_feat.py b/Documentation/sphinx/kernel_feat.py
index 03ace5f01b5c..e3a51867f27b 100644
--- a/Documentation/sphinx/kernel_feat.py
+++ b/Documentation/sphinx/kernel_feat.py
@@ -1,7 +1,7 @@
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
-u"""
+"""
kernel-feat
~~~~~~~~~~~
@@ -56,7 +56,7 @@ def setup(app):
class KernelFeat(Directive):
- u"""KernelFeat (``kernel-feat``) directive"""
+ """KernelFeat (``kernel-feat``) directive"""
required_arguments = 1
optional_arguments = 2
diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py
index 638762442336..8db176045bc5 100755
--- a/Documentation/sphinx/kernel_include.py
+++ b/Documentation/sphinx/kernel_include.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
-u"""
+"""
kernel-include
~~~~~~~~~~~~~~
@@ -56,7 +56,7 @@ def setup(app):
class KernelInclude(Include):
# ==============================================================================
- u"""KernelInclude (``kernel-include``) directive"""
+ """KernelInclude (``kernel-include``) directive"""
def run(self):
env = self.state.document.settings.env
diff --git a/Documentation/sphinx/kerneldoc.py b/Documentation/sphinx/kerneldoc.py
index ec1ddfff1863..39ddae6ae7dd 100644
--- a/Documentation/sphinx/kerneldoc.py
+++ b/Documentation/sphinx/kerneldoc.py
@@ -39,7 +39,7 @@ from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
-import kernellog
+from sphinx.util import logging
__version__ = '1.0'
@@ -56,16 +56,12 @@ class KernelDocDirective(Directive):
'functions': directives.unchanged,
}
has_content = False
+ logger = logging.getLogger('kerneldoc')
def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
- # Pass the version string to kernel-doc, as it needs to use a different
- # dialect, depending what the C domain supports for each specific
- # Sphinx versions
- cmd += ['-sphinx-version', sphinx.__version__]
-
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
@@ -109,8 +105,7 @@ class KernelDocDirective(Directive):
cmd += [filename]
try:
- kernellog.verbose(env.app,
- 'calling kernel-doc \'%s\'' % (" ".join(cmd)))
+ self.logger.verbose("calling kernel-doc '%s'" % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
@@ -120,8 +115,8 @@ class KernelDocDirective(Directive):
if p.returncode != 0:
sys.stderr.write(err)
- kernellog.warn(env.app,
- 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
+ self.logger.warning("kernel-doc '%s' failed with return code %d"
+ % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
@@ -148,8 +143,8 @@ class KernelDocDirective(Directive):
return node.children
except Exception as e: # pylint: disable=W0703
- kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
- (" ".join(cmd), str(e)))
+ self.logger.warning("kernel-doc '%s' processing failed with: %s" %
+ (" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
diff --git a/Documentation/sphinx/kernellog.py b/Documentation/sphinx/kernellog.py
deleted file mode 100644
index 0bc00c138cad..000000000000
--- a/Documentation/sphinx/kernellog.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Sphinx has deprecated its older logging interface, but the replacement
-# only goes back to 1.6. So here's a wrapper layer to keep around for
-# as long as we support 1.4.
-#
-# We don't support 1.4 anymore, but we'll keep the wrappers around until
-# we change all the code to not use them anymore :)
-#
-import sphinx
-from sphinx.util import logging
-
-logger = logging.getLogger('kerneldoc')
-
-def warn(app, message):
- logger.warning(message)
-
-def verbose(app, message):
- logger.verbose(message)
-
-def info(app, message):
- logger.info(message)
diff --git a/Documentation/sphinx/kfigure.py b/Documentation/sphinx/kfigure.py
index 97166333b727..f1a7f13c9c60 100644
--- a/Documentation/sphinx/kfigure.py
+++ b/Documentation/sphinx/kfigure.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0103, R0903, R0912, R0915
-u"""
+"""
scalable figure and image handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -59,12 +59,14 @@ from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
from sphinx.util.nodes import clean_astext
-import kernellog
+from sphinx.util import logging
Figure = images.Figure
__version__ = '1.0.0'
+logger = logging.getLogger('kfigure')
+
# simple helper
# -------------
@@ -163,14 +165,14 @@ def setup(app):
def setupTools(app):
- u"""
+ """
Check available build tools and log some *verbose* messages.
This function is called once, when the builder is initiated.
"""
global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
- kernellog.verbose(app, "kfigure: check installed tools ...")
+ logger.verbose("kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
@@ -178,7 +180,7 @@ def setupTools(app):
inkscape_cmd = which('inkscape')
if dot_cmd:
- kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
+ logger.verbose("use dot(1) from: " + dot_cmd)
try:
dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
@@ -190,10 +192,11 @@ def setupTools(app):
dot_Tpdf_ptn = b'pdf'
dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
else:
- kernellog.warn(app, "dot(1) not found, for better output quality install "
- "graphviz from https://www.graphviz.org")
+ logger.warning(
+ "dot(1) not found, for better output quality install graphviz from https://www.graphviz.org"
+ )
if inkscape_cmd:
- kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd)
+ logger.verbose("use inkscape(1) from: " + inkscape_cmd)
inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
stderr=subprocess.DEVNULL)
ver_one_ptn = b'Inkscape 1'
@@ -204,26 +207,27 @@ def setupTools(app):
else:
if convert_cmd:
- kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
+ logger.verbose("use convert(1) from: " + convert_cmd)
else:
- kernellog.verbose(app,
+ logger.verbose(
"Neither inkscape(1) nor convert(1) found.\n"
- "For SVG to PDF conversion, "
- "install either Inkscape (https://inkscape.org/) (preferred) or\n"
- "ImageMagick (https://www.imagemagick.org)")
+ "For SVG to PDF conversion, install either Inkscape (https://inkscape.org/) (preferred) or\n"
+ "ImageMagick (https://www.imagemagick.org)"
+ )
if rsvg_convert_cmd:
- kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd)
- kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
+ logger.verbose("use rsvg-convert(1) from: " + rsvg_convert_cmd)
+ logger.verbose("use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
dot_Tpdf = False
else:
- kernellog.verbose(app,
+ logger.verbose(
"rsvg-convert(1) not found.\n"
- " SVG rendering of convert(1) is done by ImageMagick-native renderer.")
+ " SVG rendering of convert(1) is done by ImageMagick-native renderer."
+ )
if dot_Tpdf:
- kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tpdf' for DOT -> PDF conversion")
else:
- kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
+ logger.verbose("use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
# integrate conversion tools
@@ -257,13 +261,12 @@ def convert_image(img_node, translator, src_fname=None):
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
- kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
+ logger.verbose('assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
- kernellog.verbose(app,
- "dot from graphviz not available / include DOT raw.")
+ logger.verbose("dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
@@ -290,10 +293,11 @@ def convert_image(img_node, translator, src_fname=None):
if translator.builder.format == 'latex':
if not inkscape_cmd and convert_cmd is None:
- kernellog.warn(app,
- "no SVG to PDF conversion available / include SVG raw."
- "\nIncluding large raw SVGs can cause xelatex error."
- "\nInstall Inkscape (preferred) or ImageMagick.")
+ logger.warning(
+ "no SVG to PDF conversion available / include SVG raw.\n"
+ "Including large raw SVGs can cause xelatex error.\n"
+ "Install Inkscape (preferred) or ImageMagick."
+ )
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
@@ -306,15 +310,14 @@ def convert_image(img_node, translator, src_fname=None):
_name = dst_fname[len(str(translator.builder.outdir)) + 1:]
if isNewer(dst_fname, src_fname):
- kernellog.verbose(app,
- "convert: {out}/%s already exists and is newer" % _name)
+ logger.verbose("convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
- kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
+ logger.verbose('convert DOT to: {out}/' + _name)
if translator.builder.format == 'latex' and not dot_Tpdf:
svg_fname = path.join(translator.builder.outdir, fname + '.svg')
ok1 = dot2format(app, src_fname, svg_fname)
@@ -325,7 +328,7 @@ def convert_image(img_node, translator, src_fname=None):
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
- kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
+ logger.verbose('convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
@@ -354,7 +357,7 @@ def dot2format(app, dot_fname, out_fname):
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
- kernellog.warn(app,
+ logger.warning(
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
@@ -388,13 +391,14 @@ def svg2pdf(app, svg_fname, pdf_fname):
pass
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
if warning_msg:
- kernellog.warn(app, "Warning msg from %s: %s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.warning( "Warning msg from %s: %s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
elif warning_msg:
- kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s"
- % (cmd_name, str(warning_msg, 'utf-8')))
+ logger.verbose("Warning msg from %s (likely harmless):\n%s" %
+ (cmd_name, str(warning_msg, 'utf-8')))
return bool(exit_code == 0)
@@ -418,7 +422,8 @@ def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
- kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
+ logger.warning("Error #%d when calling: %s" %
+ (exit_code, " ".join(cmd)))
ok = bool(exit_code == 0)
return ok
@@ -440,7 +445,7 @@ class kernel_image(nodes.image):
pass
class KernelImage(images.Image):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. image::`` directive, except *remote URI* and
*glob* pattern. The KernelImage wraps a image node into a
@@ -476,7 +481,7 @@ class kernel_figure(nodes.figure):
"""Node for ``kernel-figure`` directive."""
class KernelFigure(Figure):
- u"""KernelImage directive
+ """KernelImage directive
Earns everything from ``.. figure::`` directive, except *remote URI* and
*glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
@@ -513,15 +518,15 @@ def visit_kernel_render(self, node):
app = self.builder.app
srclang = node.get('srclang')
- kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
+ logger.verbose('visit kernel-render node lang: "%s"' % srclang)
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
- kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
+ logger.warning( 'kernel-render: "%s" unknown / include raw.' % srclang)
return
if not dot_cmd and tmp_ext == '.dot':
- kernellog.verbose(app, "dot from graphviz not available / include raw.")
+ logger.verbose("dot from graphviz not available / include raw.")
return
literal_block = node[0]
@@ -552,7 +557,7 @@ class kernel_render(nodes.General, nodes.Inline, nodes.Element):
pass
class KernelRender(Figure):
- u"""KernelRender directive
+ """KernelRender directive
Render content by external tool. Has all the options known from the
*figure* directive, plus option ``caption``. If ``caption`` has a
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index 8b416bfd75ac..ec50e1ee5223 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -9,7 +9,7 @@ from sphinx.util.osutil import fs_encoding
def loadConfig(namespace):
# ------------------------------------------------------------------------------
- u"""Load an additional configuration file into *namespace*.
+ """Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
diff --git a/Documentation/sphinx/maintainers_include.py b/Documentation/sphinx/maintainers_include.py
index dcad0fff4723..d31cff867436 100755
--- a/Documentation/sphinx/maintainers_include.py
+++ b/Documentation/sphinx/maintainers_include.py
@@ -3,7 +3,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
-u"""
+"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
@@ -37,7 +37,7 @@ def setup(app):
)
class MaintainersInclude(Include):
- u"""MaintainersInclude (``maintainers-include``) directive"""
+ """MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
index 16bea0632555..180fbb50c337 100755
--- a/Documentation/sphinx/rstFlatTable.py
+++ b/Documentation/sphinx/rstFlatTable.py
@@ -2,7 +2,7 @@
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0330, R0903, R0912
-u"""
+"""
flat-table
~~~~~~~~~~
@@ -99,7 +99,7 @@ class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class FlatTable(Table):
# ==============================================================================
- u"""FlatTable (``flat-table``) directive"""
+ """FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
@@ -135,7 +135,7 @@ class FlatTable(Table):
class ListTableBuilder(object):
# ==============================================================================
- u"""Builds a table from a double-stage list"""
+ """Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
@@ -212,7 +212,7 @@ class ListTableBuilder(object):
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
- u"""parses the node from a :py:class:`FlatTable` directive's body"""
+ """parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
@@ -225,7 +225,7 @@ class ListTableBuilder(object):
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
- u"""Round off the table definition.
+ """Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
diff --git a/Documentation/tools/rv/rv-mon-sched.rst b/Documentation/tools/rv/rv-mon-sched.rst
new file mode 100644
index 000000000000..da0fe4c79ae5
--- /dev/null
+++ b/Documentation/tools/rv/rv-mon-sched.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+rv-mon-sched
+============
+-----------------------------
+Scheduler monitors collection
+-----------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+
+**rv mon sched** [*OPTIONS*]
+
+**rv mon <NESTED_MONITOR>** [*OPTIONS*]
+
+**rv mon sched:<NESTED_MONITOR>** [*OPTIONS*]
+
+DESCRIPTION
+===========
+
+The scheduler monitor collection is a container for several monitors to model
+the behaviour of the scheduler. Each monitor describes a specification that
+the scheduler should follow.
+
+As a monitor container, it will enable all nested monitors and set them
+according to OPTIONS.
+Nevertheless nested monitors can also be activated independently both by name
+and by specifying sched: , e.g. to enable only monitor tss you can do any of:
+
+ # rv mon sched:tss
+
+ # rv mon tss
+
+See kernel documentation for further information about this monitor:
+<https://docs.kernel.org/trace/rv/monitor_sched.html>
+
+OPTIONS
+=======
+
+.. include:: common_ikm.rst
+
+NESTED MONITOR
+==============
+
+The available nested monitors are:
+ * scpd: schedule called with preemption disabled
+ * snep: schedule does not enable preempt
+ * sncid: schedule not called with interrupt disabled
+ * snroc: set non runnable on its own context
+ * sco: scheduling context operations
+ * tss: task switch while scheduling
+
+SEE ALSO
+========
+
+**rv**\(1), **rv-mon**\(1)
+
+Linux kernel *RV* documentation:
+<https://www.kernel.org/doc/html/latest/trace/rv/index.html>
+
+AUTHOR
+======
+
+Written by Gabriele Monaco <gmonaco@redhat.com>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/trace/postprocess/decode_msr.py b/Documentation/trace/postprocess/decode_msr.py
index aa9cc7abd5c2..f5609b16f589 100644
--- a/Documentation/trace/postprocess/decode_msr.py
+++ b/Documentation/trace/postprocess/decode_msr.py
@@ -32,6 +32,6 @@ for j in sys.stdin:
break
if r:
j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")")
- print j,
+ print(j)
diff --git a/Documentation/trace/rv/monitor_sched.rst b/Documentation/trace/rv/monitor_sched.rst
new file mode 100644
index 000000000000..24b2c62a3bc2
--- /dev/null
+++ b/Documentation/trace/rv/monitor_sched.rst
@@ -0,0 +1,171 @@
+Scheduler monitors
+==================
+
+- Name: sched
+- Type: container for multiple monitors
+- Author: Gabriele Monaco <gmonaco@redhat.com>, Daniel Bristot de Oliveira <bristot@kernel.org>
+
+Description
+-----------
+
+Monitors describing complex systems, such as the scheduler, can easily grow to
+the point where they are just hard to understand because of the many possible
+state transitions.
+Often it is possible to break such descriptions into smaller monitors,
+sharing some or all events. Enabling those smaller monitors concurrently is,
+in fact, testing the system as if we had one single larger monitor.
+Splitting models into multiple specification is not only easier to
+understand, but gives some more clues when we see errors.
+
+The sched monitor is a set of specifications to describe the scheduler behaviour.
+It includes several per-cpu and per-task monitors that work independently to verify
+different specifications the scheduler should follow.
+
+To make this system as straightforward as possible, sched specifications are *nested*
+monitors, whereas sched itself is a *container*.
+From the interface perspective, sched includes other monitors as sub-directories,
+enabling/disabling or setting reactors to sched, propagates the change to all monitors,
+however single monitors can be used independently as well.
+
+It is important that future modules are built after their container (sched, in
+this case), otherwise the linker would not respect the order and the nesting
+wouldn't work as expected.
+To do so, simply add them after sched in the Makefile.
+
+Specifications
+--------------
+
+The specifications included in sched are currently a work in progress, adapting the ones
+defined in by Daniel Bristot in [1].
+
+Currently we included the following:
+
+Monitor tss
+~~~~~~~~~~~
+
+The task switch while scheduling (tss) monitor ensures a task switch happens
+only in scheduling context, that is inside a call to `__schedule`::
+
+ |
+ |
+ v
+ +-----------------+
+ | thread | <+
+ +-----------------+ |
+ | |
+ | schedule_entry | schedule_exit
+ v |
+ sched_switch |
+ +--------------- |
+ | sched |
+ +--------------> -+
+
+Monitor sco
+~~~~~~~~~~~
+
+The scheduling context operations (sco) monitor ensures changes in a task state
+happen only in thread context::
+
+
+ |
+ |
+ v
+ sched_set_state +------------------+
+ +------------------ | |
+ | | thread_context |
+ +-----------------> | | <+
+ +------------------+ |
+ | |
+ | schedule_entry | schedule_exit
+ v |
+ |
+ scheduling_context -+
+
+Monitor snroc
+~~~~~~~~~~~~~
+
+The set non runnable on its own context (snroc) monitor ensures changes in a
+task state happens only in the respective task's context. This is a per-task
+monitor::
+
+ |
+ |
+ v
+ +------------------+
+ | other_context | <+
+ +------------------+ |
+ | |
+ | sched_switch_in | sched_switch_out
+ v |
+ sched_set_state |
+ +------------------ |
+ | own_context |
+ +-----------------> -+
+
+Monitor scpd
+~~~~~~~~~~~~
+
+The schedule called with preemption disabled (scpd) monitor ensures schedule is
+called with preemption disabled::
+
+ |
+ |
+ v
+ +------------------+
+ | cant_sched | <+
+ +------------------+ |
+ | |
+ | preempt_disable | preempt_enable
+ v |
+ schedule_entry |
+ schedule_exit |
+ +----------------- can_sched |
+ | |
+ +----------------> -+
+
+Monitor snep
+~~~~~~~~~~~~
+
+The schedule does not enable preempt (snep) monitor ensures a schedule call
+does not enable preemption::
+
+ |
+ |
+ v
+ preempt_disable +------------------------+
+ preempt_enable | |
+ +------------------ | non_scheduling_context |
+ | | |
+ +-----------------> | | <+
+ +------------------------+ |
+ | |
+ | schedule_entry | schedule_exit
+ v |
+ |
+ scheduling_contex -+
+
+Monitor sncid
+~~~~~~~~~~~~~
+
+The schedule not called with interrupt disabled (sncid) monitor ensures
+schedule is not called with interrupt disabled::
+
+ |
+ |
+ v
+ schedule_entry +--------------+
+ schedule_exit | |
+ +----------------- | can_sched |
+ | | |
+ +----------------> | | <+
+ +--------------+ |
+ | |
+ | irq_disable | irq_enable
+ v |
+ |
+ cant_sched -+
+
+References
+----------
+
+[1] - https://bristot.me/linux-task-model
diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst
index 692be4af9c9b..5bf1b4adebc1 100644
--- a/Documentation/translations/it_IT/process/submit-checklist.rst
+++ b/Documentation/translations/it_IT/process/submit-checklist.rst
@@ -58,9 +58,10 @@ Fornite documentazione
4) Tutti i nuovi parametri dei moduli sono documentati con ``MODULE_PARM_DESC()``.
5) Tutte le nuove interfacce verso lo spazio utente sono documentate in
- ``Documentation/ABI/``. Leggete ``Documentation/ABI/README`` per maggiori
- informazioni. Le patch che modificano le interfacce utente dovrebbero
- essere inviate in copia anche a linux-api@vger.kernel.org.
+ ``Documentation/ABI/``. Leggete Documentation/admin-guide/abi.rst
+ (o ``Documentation/ABI/README``) per maggiori informazioni.
+ Le patch che modificano le interfacce utente dovrebbero essere inviate
+ in copia anche a linux-api@vger.kernel.org.
6) Se la patch aggiunge nuove chiamate ioctl, allora aggiornate
``Documentation/userspace-api/ioctl/ioctl-number.rst``.
diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
deleted file mode 100644
index 1759c6b452d6..000000000000
--- a/Documentation/translations/ja_JP/SubmitChecklist
+++ /dev/null
@@ -1,105 +0,0 @@
-NOTE:
-This is a version of Documentation/process/submit-checklist.rst into Japanese.
-This document is maintained by Takenori Nagano <t-nagano@ah.jp.nec.com>
-and the JF Project team <http://www.linux.or.jp/JF/>.
-If you find any difference between this document and the original file
-or a problem with the translation,
-please contact the maintainer of this file or JF project.
-
-Please also note that the purpose of this file is to be easier to read
-for non English (read: Japanese) speakers and is not intended as a
-fork. So if you have any comments or updates of this file, please try
-to update the original English file first.
-
-Last Updated: 2008/07/14
-==================================
-ã“れã¯ã€
-linux-2.6.26/Documentation/process/submit-checklist.rst ã®å’Œè¨³ã§ã™ã€‚
-
-翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
-翻訳日: 2008/07/14
-翻訳者: Takenori Nagano <t-nagano at ah dot jp dot nec dot com>
-校正者: Masanori Kobayashi ã•ã‚“ <zap03216 at nifty dot ne dot jp>
-==================================
-
-
-Linux カーãƒãƒ«ãƒ‘ãƒƒãƒæŠ•ç¨¿è€…å‘ã‘ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-本書ã§ã¯ã€ãƒ‘ッãƒã‚’より素早ãå–り込んã§ã‚‚らã„ãŸã„開発者ãŒå®Ÿè·µã™ã¹ã基本的ãªäº‹æŸ„
-ã‚’ã„ãã¤ã‹ç´¹ä»‹ã—ã¾ã™ã€‚ã“ã“ã«ã‚ã‚‹å…¨ã¦ã®äº‹æŸ„ã¯ã€Documentation/process/submitting-patches.rst
-ãªã©ã®Linuxカーãƒãƒ«ãƒ‘ãƒƒãƒæŠ•ç¨¿ã«éš›ã—ã¦ã®å¿ƒå¾—を補足ã™ã‚‹ã‚‚ã®ã§ã™ã€‚
-
- 1: 妥当ãªCONFIGオプションや変更ã•れãŸCONFIGオプションã€ã¤ã¾ã‚Š =y, =m, =n
- å…¨ã¦ã§æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。ãã®éš›ã€gccåŠã³ãƒªãƒ³ã‚«ãŒ
- warningã‚„errorを出ã—ã¦ã„ãªã„ã“ã¨ã‚‚確èªã—ã¦ãã ã•ã„。
-
- 2: allnoconfig, allmodconfig オプションを用ã„ã¦æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’
- 確èªã—ã¦ãã ã•ã„。
-
- 3: 手許ã®ã‚¯ãƒ­ã‚¹ã‚³ãƒ³ãƒ‘イルツールやOSDLã®PLMã®ã‚ˆã†ãªã‚‚ã®ã‚’用ã„ã¦ã€è¤‡æ•°ã®
- アーキテクãƒãƒ£ã«ãŠã„ã¦ã‚‚æ­£ã—ãビルドã§ãã‚‹ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。
-
- 4: 64bité•·ã®'unsigned long'を使用ã—ã¦ã„ã‚‹ppc64ã¯ã€ã‚¯ãƒ­ã‚¹ã‚³ãƒ³ãƒ‘イルã§ã®
- ãƒã‚§ãƒƒã‚¯ã«é©å½“ãªã‚¢ãƒ¼ã‚­ãƒ†ã‚¯ãƒãƒ£ã§ã™ã€‚
-
- 5: カーãƒãƒ«ã‚³ãƒ¼ãƒ‡ã‚£ãƒ³ã‚°ã‚¹ã‚¿ã‚¤ãƒ«ã«æº–æ‹ ã—ã¦ã„ã‚‹ã‹ã©ã†ã‹ç¢ºèªã—ã¦ãã ã•ã„(!)
-
- 6: CONFIGオプションã®è¿½åŠ ãƒ»å¤‰æ›´ã‚’ã—ãŸå ´åˆã«ã¯ã€CONFIGメニューãŒå£Šã‚Œã¦ã„ãªã„
- ã“ã¨ã‚’確èªã—ã¦ãã ã•ã„。
-
- 7: æ–°ã—ãKconfigã®ã‚ªãƒ—ションを追加ã™ã‚‹éš›ã«ã¯ã€å¿…ãšãã®helpも記述ã—ã¦ãã ã•ã„。
-
- 8: é©åˆ‡ãªKconfigã®ä¾å­˜é–¢ä¿‚を考ãˆãªãŒã‚‰æ…Žé‡ã«ãƒã‚§ãƒƒã‚¯ã—ã¦ãã ã•ã„。
- ãŸã ã—ã€ã“ã®ä½œæ¥­ã¯ãƒžã‚·ãƒ³ã‚’使ã£ãŸãƒ†ã‚¹ãƒˆã§ãã¡ã‚“ã¨è¡Œã†ã®ãŒã¨ã¦ã‚‚困難ã§ã™ã€‚
- ã†ã¾ãã‚„ã‚‹ã«ã¯ã€è‡ªåˆ†ã®é ­ã§è€ƒãˆã‚‹ã“ã¨ã§ã™ã€‚
-
- 9: sparseを利用ã—ã¦ã¡ã‚ƒã‚“ã¨ã—ãŸã‚³ãƒ¼ãƒ‰ãƒã‚§ãƒƒã‚¯ã‚’ã—ã¦ãã ã•ã„。
-
-10: 'make checkstack' を利用ã—ã€å•題ãŒç™ºè¦‹ã•れãŸã‚‰ä¿®æ­£ã—ã¦ãã ã•ã„。
- 'make checkstack' ã¯æ˜Žç¤ºçš„ã«å•題を示ã—ã¾ã›ã‚“ãŒã€ã©ã‚Œã‹
- 1ã¤ã®é–¢æ•°ãŒ512ãƒã‚¤ãƒˆã‚ˆã‚Šå¤§ãã„スタックを使ã£ã¦ã„れã°ã€ä¿®æ­£ã™ã¹ã候補ã¨
- ãªã‚Šã¾ã™ã€‚
-
-11: グローãƒãƒ«ãªkernel API を説明ã™ã‚‹ kernel-doc をソースã®ä¸­ã«å«ã‚ã¦ãã ã•ã„。
- ( staticãªé–¢æ•°ã«ãŠã„ã¦ã¯å¿…é ˆã§ã¯ã‚りã¾ã›ã‚“ãŒã€å«ã‚ã¦ã‚‚らã£ã¦ã‚‚çµæ§‹ã§ã™ )
- ãã—ã¦ã€'make htmldocs' ã‚‚ã—ã㯠'make mandocs' を利用ã—ã¦è¿½è¨˜ã—ãŸ
- ドキュメントã®ãƒã‚§ãƒƒã‚¯ã‚’行ã„ã€å•題ãŒè¦‹ã¤ã‹ã£ãŸå ´åˆã«ã¯ä¿®æ­£ã‚’行ã£ã¦ãã ã•ã„。
-
-12: CONFIG_PREEMPT, CONFIG_DEBUG_PREEMPT, CONFIG_DEBUG_SLAB,
- CONFIG_DEBUG_PAGEALLOC, CONFIG_DEBUG_MUTEXES, CONFIG_DEBUG_SPINLOCK,
- CONFIG_DEBUG_ATOMIC_SLEEP ã“れら全ã¦ã‚’åŒæ™‚ã«æœ‰åйã«ã—ã¦å‹•作確èªã‚’
- 行ã£ã¦ãã ã•ã„。
-
-13: CONFIG_SMP, CONFIG_PREEMPT を有効ã«ã—ãŸå ´åˆã¨ç„¡åйã«ã—ãŸå ´åˆã®ä¸¡æ–¹ã§
- ビルドã—ãŸä¸Šã€å‹•作確èªã‚’行ã£ã¦ãã ã•ã„。
-
-14: lockdepã®æ©Ÿèƒ½ã‚’å…¨ã¦æœ‰åйã«ã—ãŸä¸Šã§ã€å…¨ã¦ã®ã‚³ãƒ¼ãƒ‰ãƒ‘スを評価ã—ã¦ãã ã•ã„。
-
-15: /proc ã«æ–°ã—ã„エントリを追加ã—ãŸå ´åˆã«ã¯ã€Documentation/ é…下ã«
- å¿…ãšãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’追加ã—ã¦ãã ã•ã„。
-
-16: æ–°ã—ã„ブートパラメータを追加ã—ãŸå ´åˆã«ã¯ã€
- å¿…ãšDocumentation/admin-guide/kernel-parameters.rst ã«èª¬æ˜Žã‚’追加ã—ã¦ãã ã•ã„。
-
-17: æ–°ã—ãmoduleã«ãƒ‘ラメータを追加ã—ãŸå ´åˆã«ã¯ã€MODULE_PARM_DESC()ã‚’
- 利用ã—ã¦å¿…ãšãã®èª¬æ˜Žã‚’記述ã—ã¦ãã ã•ã„。
-
-18: æ–°ã—ã„userspaceインタフェースを作æˆã—ãŸå ´åˆã«ã¯ã€Documentation/ABI/ ã«
- Documentation/ABI/README ã‚’å‚考ã«ã—ã¦å¿…ãšãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã‚’追加ã—ã¦ãã ã•ã„。
-
-19: å°‘ãªãã¨ã‚‚slabアロケーションã¨pageアロケーションã«å¤±æ•—ã—ãŸå ´åˆã®
- 挙動ã«ã¤ã„ã¦ã€fault-injectionを利用ã—ã¦ç¢ºèªã—ã¦ãã ã•ã„。
- Documentation/fault-injection/ ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
-
- 追加ã—ãŸã‚³ãƒ¼ãƒ‰ãŒã‹ãªã‚Šã®é‡ã§ã‚ã£ãŸãªã‚‰ã°ã€ã‚µãƒ–システム特有ã®
- fault-injectionを追加ã—ãŸã»ã†ãŒè‰¯ã„ã‹ã‚‚ã—れã¾ã›ã‚“。
-
-20: æ–°ãŸã«è¿½åŠ ã—ãŸã‚³ãƒ¼ãƒ‰ã¯ã€`gcc -W'ã§ã‚³ãƒ³ãƒ‘イルã—ã¦ãã ã•ã„。
- ã“ã®ã‚ªãƒ—ションã¯å¤§é‡ã®ä¸è¦ãªãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’出力ã—ã¾ã™ãŒã€
- "warning: comparison between signed and unsigned" ã®ã‚ˆã†ãªãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã¯ã€
- ãƒã‚°ã‚’見ã¤ã‘ã‚‹ã®ã«å½¹ã«ç«‹ã¡ã¾ã™ã€‚
-
-21: 投稿ã—ãŸãƒ‘ッãƒãŒ -mm パッãƒã‚»ãƒƒãƒˆã«ãƒžãƒ¼ã‚¸ã•れãŸå¾Œã€å…¨ã¦ã®æ—¢å­˜ã®ãƒ‘ッãƒã‚„
- VM, VFS ãŠã‚ˆã³ãã®ä»–ã®ã‚µãƒ–システムã«é–¢ã™ã‚‹æ§˜ã€…ãªå¤‰æ›´ã¨ã€ç¾æ™‚点ã§ã‚‚共存
- ã§ãã‚‹ã“ã¨ã‚’確èªã™ã‚‹ãƒ†ã‚¹ãƒˆã‚’行ã£ã¦ãã ã•ã„。
diff --git a/Documentation/translations/ja_JP/disclaimer-ja_JP.rst b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst
new file mode 100644
index 000000000000..46a026000407
--- /dev/null
+++ b/Documentation/translations/ja_JP/disclaimer-ja_JP.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _translations_ja_JP_disclaimer:
+
+==========================
+å…責æ¡é … (Disclaimer) 抄訳
+==========================
+
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>`
+ ã®ä¸€éƒ¨ã‚’翻訳ã—ãŸã‚‚ã®ã§ã™ã€‚全文ã¯è‹±èªžç‰ˆã‚’å‚照願ã„ã¾ã™ã€‚
+
+Documentation/translations/ja_JP/ 以下ã®ãƒ•ァイルã¯ã€å¯¾å¿œã™ã‚‹
+Documentation/ 以下ã®ãƒ•ァイル (原文) ã®æ—¥æœ¬èªžè¨³ã§ã™ã€‚
+翻訳ã¨åŽŸæ–‡ã¨ã®é•ã„や翻訳上ã®å•題を見ã¤ã‘ãŸã‚‰ã€
+MAINTAINERS ã«è¨˜è¼‰ã®ç¶­æŒç®¡ç†è€…ã«çŸ¥ã‚‰ã›ã¦ãã ã•ã„。
+翻訳ãŒåŽŸæ–‡ã®æ›´æ–°ã«è¿½ã„ã¤ã„ã¦ã„ãªã„å ´åˆã¯ã€ãれを日本語版ã«å映ã™ã‚‹ãƒ‘ッãƒã®
+投稿も歓迎ã§ã™ã€‚
+
+ãªãŠã€ã“ã®ç¿»è¨³ã®ç›®çš„ã¯éžè‹±èªž (ã“ã“ã§ã¯æ—¥æœ¬èªž) 話者ã¸ã®ä¾¿å®œæä¾›ã§ã‚りã€
+フォークをæ„図ã—ãŸã‚‚ã®ã§ã¯ãªã„事を念頭ã«ãŠã„ã¦ãã ã•ã„。ã—ãŸãŒã£ã¦ã€
+ã“ã®ãƒ•ァイルã®å†…容ã«å¯¾ã™ã‚‹ã‚³ãƒ¡ãƒ³ãƒˆã‚„æ›´æ–°ã™ã¹ãã“ã¨ãŒã‚れã°ã€å…ˆã«åŽŸæ–‡ã®
+更新を検討ã—ã¦ãã ã•ã„。
diff --git a/Documentation/translations/ja_JP/index.rst b/Documentation/translations/ja_JP/index.rst
index 0b476b429e3b..4159b417bfdd 100644
--- a/Documentation/translations/ja_JP/index.rst
+++ b/Documentation/translations/ja_JP/index.rst
@@ -11,7 +11,9 @@
.. toctree::
:maxdepth: 1
+ disclaimer-ja_JP
process/howto
+ process/submit-checklist
.. raw:: latex
diff --git a/Documentation/translations/ja_JP/process/howto.rst b/Documentation/translations/ja_JP/process/howto.rst
index d9ba40588e46..5e307f90982c 100644
--- a/Documentation/translations/ja_JP/process/howto.rst
+++ b/Documentation/translations/ja_JP/process/howto.rst
@@ -1,35 +1,18 @@
-.. raw:: latex
+.. SPDX-License-Identifier: GPL-2.0
- \kerneldocCJKoff
-
-NOTE:
-This is a version of Documentation/process/howto.rst translated into Japanese.
-This document is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
-If you find any difference between this document and the original file or
-a problem with the translation, please contact the maintainer of this file.
-
-Please also note that the purpose of this file is to be easier to
-read for non English (read: Japanese) speakers and is not intended as
-a fork. So if you have any comments or updates for this file, please
-try to update the original English file first.
-
-----------------------------------
-
-.. raw:: latex
-
- \kerneldocCJKon
-
-ã“ã®æ–‡æ›¸ã¯ã€
-Documentation/process/howto.rst
-ã®å’Œè¨³ã§ã™ã€‚
-
-翻訳者: Tsugikazu Shibata <tshibata@ab.jp.nec.com>
-
-----------------------------------
+.. Originally contributed by Tsugikazu Shibata
Linux カーãƒãƒ«é–‹ç™ºã®ã‚„り方
==========================
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ Documentation/process/howto.rst
+ ã®ç¿»è¨³ã§ã™ã€‚
+ å…責æ¡é …ã«ã¤ã„ã¦ã¯ã€
+ :ref:`å…責æ¡é …ã®æŠ„è¨³ <translations_ja_JP_disclaimer>` ãŠã‚ˆã³ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
+
ã“れã¯ä¸Šã®ãƒˆãƒ”ック( Linux カーãƒãƒ«é–‹ç™ºã®ã‚„り方)ã®é‡è¦ãªäº‹æŸ„を網羅ã—ãŸ
ドキュメントã§ã™ã€‚ã“ã“ã«ã¯ Linux カーãƒãƒ«é–‹ç™ºè€…ã«ãªã‚‹ãŸã‚ã®æ–¹æ³•ã¨Linux
カーãƒãƒ«é–‹ç™ºã‚³ãƒŸãƒ¥ãƒ‹ãƒ†ã‚£ã¨å…±ã«æ´»å‹•ã™ã‚‹ã‚„ã‚Šæ–¹ã‚’å­¦ã¶æ–¹æ³•ãŒå«ã¾ã‚Œã¦ã„ã¾ã™ã€‚
diff --git a/Documentation/translations/ja_JP/process/submit-checklist.rst b/Documentation/translations/ja_JP/process/submit-checklist.rst
new file mode 100644
index 000000000000..fb3b9e3bd8ee
--- /dev/null
+++ b/Documentation/translations/ja_JP/process/submit-checklist.rst
@@ -0,0 +1,163 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. Translated by Akira Yokosawa <akiyks@gmail.com>
+
+.. An old translation of this document of a different origin was at
+ Documentation/translations/ja_JP/SubmitChecklist, which can be found
+ in the pre-v6.14 tree if you are interested.
+ Please note that this translation is independent of the previous one.
+
+======================================
+Linux カーãƒãƒ«ãƒ‘ãƒƒãƒæŠ•ç¨¿ãƒã‚§ãƒƒã‚¯ãƒªã‚¹ãƒˆ
+======================================
+
+.. note:: ã€è¨³è¨»ã€‘
+ ã“ã®æ–‡æ›¸ã¯ã€
+ Documentation/process/submit-checklist.rst
+ ã®ç¿»è¨³ã§ã™ã€‚
+ å…責æ¡é …ã«ã¤ã„ã¦ã¯ã€
+ :ref:`å…責æ¡é …ã®æŠ„è¨³ <translations_ja_JP_disclaimer>` ãŠã‚ˆã³ã€
+ :ref:`Disclaimer (英語版) <translations_disclaimer>` ã‚’å‚ç…§ã—ã¦ãã ã•ã„。
+
+以下ã¯ã€ã‚«ãƒ¼ãƒãƒ«ãƒ‘ッãƒã®æŠ•稿時ã«ã€ãã®ã‚¹ãƒ ãƒ¼ã‚ºãªå—ã‘入れã®ãŸã‚ã«å¿ƒãŒã‘ã‚‹
+ã¹ã基本的ãªäº‹é …ã§ã™ã€‚
+
+ã“れã¯ã€ Documentation/process/submitting-patches.rst ãŠã‚ˆã³ãã®ä»–ã®
+Linux カーãƒãƒ«ãƒ‘ãƒƒãƒæŠ•ç¨¿ã«é–¢ã™ã‚‹æ–‡æ›¸ã‚’è¸ã¾ãˆã€ãれを補足ã™ã‚‹ã‚‚ã®ã§ã™ã€‚
+
+.. note:: ã€è¨³è¨»ã€‘
+ å¯èƒ½ãªé …ç›®ã«ã¤ã„ã¦ã¯ã€ãƒ‘ッãƒã‚‚ã—ãã¯ãƒ‘ッãƒå†…ã®æ›´æ–°ã‚’æš—é»™ã®ä¸»èªžã¨ã—ã¦ã€
+ ãã®æœ›ã¾ã—ã„çŠ¶æ…‹ã‚’è¡¨ã™æ–‡ä½“ã¨ã—ã¾ã™ã€‚ãã®ä»–ã€åŽŸç¾©ã‚’æãªã‚ãªã„範囲ã§
+ 係りçµã³ã‚’調整ã™ã‚‹ãªã©ã€ç°¡æ½”ã§æŠŠæ¡ã—ã‚„ã™ã„ç®‡æ¡æ›¸ãを目指ã—ã¾ã™ã€‚
+
+
+コードã®ãƒ¬ãƒ“ュー
+================
+
+1) 利用ã™ã‚‹æ©Ÿèƒ½ã«ã¤ã„ã¦ã€ãã®æ©Ÿèƒ½ã‚’定義・宣言ã—ã¦ã„るファイルを
+ ``#include`` ã—ã¦ã„る。
+ ä»–ã®ãƒ˜ãƒƒãƒ€ãƒ¼ãƒ•ァイル経由ã§ã®å–り込ã¿ã«ä¾å­˜ã—ãªã„。
+
+2) Documentation/process/coding-style.rst ã«è©³è¿°ã•れã¦ã„る一般的ãªã‚¹ã‚¿ã‚¤ãƒ«
+ ã«ã¤ã„ã¦ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+
+3) メモリãƒãƒªã‚¢ãƒ¼ (例, ``barrier()``, ``rmb()``, ``wmb()``) ã«ã¤ã„ã¦ã€
+ ãã®ã™ã¹ã¦ã«ã€ä½œç”¨ã¨ç›®çš„ã€åŠã³å¿…è¦ç†ç”±ã«ã¤ã„ã¦ã®èª¬æ˜ŽãŒã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰å†…ã®
+ コメントã¨ã—ã¦è¨˜è¿°ã•れã¦ã„る。
+
+
+Kconfig 変更ã®ãƒ¬ãƒ“ュー
+======================
+
+1) æ–°è¦ã®ã€ã‚‚ã—ãã¯å¤‰æ›´ã•れ㟠``CONFIG`` オプションã«ã¤ã„ã¦ã€ãれãŒé–¢ä¿‚ã™ã‚‹
+ コンフィグメニューã¸ã®æ‚ªå½±éŸ¿ãŒãªã„。ã¾ãŸã€
+ Documentation/kbuild/kconfig-language.rst ã®
+ "Menu attibutes: default value" ã«è¨˜è¼‰ã®ä¾‹å¤–æ¡ä»¶ã‚’満ãŸã™å ´åˆã‚’除ãã€
+ ãã®ãƒ‡ãƒ•ォルトãŒç„¡åйã«ãªã£ã¦ã„る。
+
+2) æ–°è¦ã® ``Kconfig`` オプションã«ãƒ˜ãƒ«ãƒ—テキストãŒã‚る。
+
+3) 妥当㪠``Kconfig`` ã®çµ„ã¿åˆã‚ã›ã«ã¤ã„ã¦æ³¨æ„æ·±ãレビューã•れã¦ã„る。
+ ã“れをテストã§ã‚„り切るã®ã¯å›°é›£ã§ã€çŸ¥åŠ›ãŒæ±ºã‚手ã¨ãªã‚‹ã€‚
+
+ドキュメンテーションã®ä½œæˆ
+==========================
+
+1) グローãƒãƒ«ãªã‚«ãƒ¼ãƒãƒ« API ㌠:ref:`kernel-doc <kernel_doc>` ã®å½¢å¼ã§
+ ドキュメント化ã•れã¦ã„ã‚‹ (é™çš„関数ã«ã¯æ±‚ã‚られãªã„ãŒã€ä»˜ã‘ã¦ã‚‚よã„)。
+
+2) æ–°è¦ ``/proc`` エントリーãŒã€ã™ã¹ã¦ ``Documentation/`` 以下ã«è¨˜è¼‰ã•れã¦
+ ã„る。
+
+3) æ–°è¦ã‚«ãƒ¼ãƒãƒ«ãƒ»ãƒ–ート・パラメータãŒã€ã™ã¹ã¦
+ ``Documentation/admin-guide/kernel-parameters.rst`` ã«è¨˜è¼‰ã•れã¦ã„る。
+
+4) æ–°è¦ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ãƒ»ãƒ‘ラメータãŒã€ã™ã¹ã¦ ``MODULE_PARM_DESC()`` ã«ã‚ˆã£ã¦è¨˜è¿°
+ ã•れã¦ã„る。
+
+5) æ–°è¦ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‚¹ãƒšãƒ¼ã‚¹ãƒ»ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ェースãŒã€ã™ã¹ã¦ ``Documentaion/ABI/``
+ 以下ã«è¨˜è¼‰ã•れã¦ã„る。詳ã—ãã¯ã€ Documentation/admin-guide/abi.rst
+ (ã‚‚ã—ã㯠``Documentation/ABI/README``) ã‚’å‚照。
+ ユーザースペース・インターフェースを変更ã™ã‚‹ãƒ‘ッãƒã¯ã€
+ linux-api@vger.kernel.org ã«ã‚‚ CC ã™ã¹ã—。
+
+6) ãªã‚“らã‹ã® ioctl を追加ã™ã‚‹ãƒ‘ッãƒã¯ã€
+ ``Documentation/userspace-api/ioctl/ioctl-number.rst``
+ ã®æ›´æ–°ã‚’ä¼´ã†ã€‚
+
+ツールã«ã‚ˆã‚‹ã‚³ãƒ¼ãƒ‰ã®ãƒã‚§ãƒƒã‚¯
+============================
+
+1) スタイル・ãƒã‚§ãƒƒã‚«ãƒ¼ (``scripts/checkpatch.pl``) ã«ã‚ˆã£ã¦ã€çНã—ãŒã¡ãª
+ パッãƒãƒ»ã‚¹ã‚¿ã‚¤ãƒ«ã®é•åãŒãªã„ã“ã¨ã‚’ç¢ºèªæ¸ˆã¿ã€‚
+ 指摘ã•れるé•åを残ã™å ´åˆã¯ã€ãれを正当化ã§ãã‚‹ã“ã¨ã€‚
+
+2) sparse ã«ã‚ˆã‚Šå…¥å¿µã«ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+
+3) ``make checkstack`` ã§æŒ‡æ‘˜ã•れるå•題ãŒã‚れã°ã€ãれãŒä¿®æ­£æ¸ˆã¿ã€‚
+ ``checkstack`` ã¯å•題点を明示的ã«ã¯æŒ‡æ‘˜ã—ãªã„ãŒã€ スタック消費ãŒ
+ 512 ãƒã‚¤ãƒˆã‚’è¶Šãˆã‚‹é–¢æ•°ã¯è¦‹ç›´ã—ã®å€™è£œã€‚
+
+コードã®ãƒ“ルド
+==============
+
+1) ä»¥ä¸‹ã®æ¡ä»¶ã§ã‚¯ãƒªãƒ¼ãƒ³ã«ãƒ“ルドã§ãる。
+
+ a) é©ç”¨å¯èƒ½ãªã€ãŠã‚ˆã³ ``=y``, ``=m``, ``=n`` を変更ã—㟠``CONFIG``
+ オプションã§ã®ãƒ“ルド。
+ ``gcc`` ãŠã‚ˆã³ãƒªãƒ³ã‚«ãƒ¼ã‹ã‚‰ã®è­¦å‘Šãƒ»ã‚¨ãƒ©ãƒ¼ãŒãªã„ã“ã¨ã€‚
+
+ b) ``allnoconfig`` 㨠``allmodconfig`` ãŒãƒ‘ス
+
+ c) ``O=builddir`` を指定ã—ã¦ã®ãƒ“ルド
+
+ d) Documentation/ 以下ã®å¤‰æ›´ã«é–¢ã—ã¦ã€ãƒ‰ã‚­ãƒ¥ãƒ¡ãƒ³ãƒˆã®ãƒ“ãƒ«ãƒ‰ã§æ–°ãŸãªè­¦å‘Šã‚„
+ エラーãŒå‡ºãªã„。
+ ``make htmldocs`` ã¾ãŸã¯ ``make pdfdocs`` ã§ãƒ“ルドã—ã€å•題ãŒã‚れã°ä¿®æ­£ã€‚
+
+2) ローカルã®ã‚¯ãƒ­ã‚¹ãƒ»ã‚³ãƒ³ãƒ‘イル・ツールã€ãã®ä»–ã®ãƒ“ルド環境 (訳註: build farm)
+ を使ã£ã¦ã€è¤‡æ•°ã® CPU アーキテクãƒãƒ£å‘ã‘ã«ãƒ“ルドã§ãる。
+ 特ã«ã€ãƒ¯ãƒ¼ãƒ‰ã‚µã‚¤ã‚º (32 ビット㨠64 ビット) やエンディアン (ビッグã¨ãƒªãƒˆãƒ«)
+ ã®ç•°ãªã‚‹ã‚¢ãƒ¼ã‚­ãƒ†ã‚¯ãƒãƒ£ã‚’対象ã¨ã™ã‚‹ãƒ†ã‚¹ãƒˆã¯ã€è¡¨ç¾å¯èƒ½æ•°å€¤ç¯„囲・データ整列・
+ エンディアンãªã©ã«ã¤ã„ã¦ã®èª¤ã£ãŸä»®å®šã«èµ·å› ã™ã‚‹æ§˜ã€…ãªç§»æ¤ä¸Šã®å•題をæ•ãˆã‚‹
+ ã®ã«åŠ¹æžœçš„ã€‚
+
+3) æ–°è¦ã«è¿½åŠ ã•れãŸã‚³ãƒ¼ãƒ‰ã«ã¤ã„㦠(``make KCFLAGS=-W`` を使ã£ã¦)
+ ``gcc -W`` ã§ã‚³ãƒ³ãƒ‘イル。
+ ã“れã¯å¤šãã®ãƒŽã‚¤ã‚ºã‚’ä¼´ã†ãŒã€
+ ``warning: comparison between signed and unsigned``
+ ã®é¡žã„ã®ãƒã‚°ã‚’ã‚ã¶ã‚Šå‡ºã™ã®ã«åŠ¹æžœçš„ã€‚
+
+4) 変更ã•れるソースコードãŒã€ä¸‹è¨˜ã® ``Kconfig`` シンボルã«é–¢é€£ã™ã‚‹ã‚«ãƒ¼ãƒãƒ«
+ API や機能ã«ä¾å­˜ (ã‚‚ã—ãã¯åˆ©ç”¨) ã™ã‚‹å ´åˆã€ãれら㮠``Kconfig`` シンボルãŒã€
+ 無効ã€ãŠã‚ˆã³ (å¯èƒ½ãªã‚‰) ``=m`` ã®å ´åˆã‚’組ã¿åˆã‚ã›ãŸè¤‡æ•°ã®ãƒ“ルドを
+ (全部ã¾ã¨ã‚ã¦ã§ã¯ãªãã€ã„ã‚ã„ã‚ãªãƒ©ãƒ³ãƒ€ãƒ ã®çµ„ã¿åˆã‚ã›ã§) テスト済ã¿ã€‚
+
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``,
+ ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (ãŸã ã—ã€å¾Œè€…㯠``CONFIG_NET=y``
+ ã¨ã®çµ„ã¿åˆã‚ã›)。
+
+コードã®ãƒ†ã‚¹ãƒˆ
+==============
+
+1) ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+ ``CONFIG_SLUB_DEBUG``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
+ ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
+ ``CONFIG_PROVE_RCU`` ãŠã‚ˆã³ ``CONFIG_DEBUG_OBJECTS_RCU_HEAD`` ã‚’ã™ã¹ã¦
+ åŒæ™‚ã«æœ‰åйã«ã—ã¦ã®ãƒ†ã‚¹ãƒˆæ¸ˆã¿ã€‚
+
+2) ``CONFIG_SMP`` 㨠``CONFIG_PREEMPT`` ãŒæœ‰åйã¨ç„¡åйã®å ´åˆã«ã¤ã„ã¦ã€ãƒ“ルドã¨
+ ランタイムã®ãƒ†ã‚¹ãƒˆæ¸ˆã¿ã€‚
+
+3) lockdep ã®æ©Ÿèƒ½ã‚’ã™ã¹ã¦æœ‰åйã«ã—ã¦ã®å®Ÿè¡Œã§ã€ã™ã¹ã¦ã®ã‚³ãƒ¼ãƒ‰çµŒè·¯ãŒç¢ºèªæ¸ˆã¿ã€‚
+
+4) 最低é™ã€slab 㨠ページ・アロケーションã®å¤±æ•—ã«é–¢ã™ã‚‹èª¤ã‚Šæ³¨å…¥
+ (訳註: fault injection) ã«ã‚ˆã‚‹ãƒã‚§ãƒƒã‚¯æ¸ˆã¿ã€‚
+ 詳ã—ãã¯ã€ Documentation/fault-injection/index.rst ã‚’å‚照。
+ æ–°è¦ã®ã‚³ãƒ¼ãƒ‰ãŒå¤šã„å ´åˆã¯ã€ã‚µãƒ–システム対象ã®èª¤ã‚Šæ³¨å…¥ã‚’追加ã™ã‚‹ã®ãŒæœ›ã¾ã—ã„
+ å¯èƒ½æ€§ã‚り。
+
+5) linux-next ã®æœ€æ–°ã‚¿ã‚°ã«å¯¾ã™ã‚‹ãƒ†ã‚¹ãƒˆã«ã‚ˆã‚Šã€ä»–ã§ã‚­ãƒ¥ãƒ¼ã‚¤ãƒ³ã‚°ã•れã¦ã„ã‚‹
+ パッãƒã‚„ã€VMã€VFSã€ãã®ä»–ã®ã‚µãƒ–システム内ã®ã™ã¹ã¦ã®å¤‰æ›´ã¨çµ„ã¿åˆã‚ã›ã¦ã®
+ å‹•ä½œã‚’ç¢ºèªæ¸ˆã¿ã€‚
diff --git a/Documentation/translations/sp_SP/process/submit-checklist.rst b/Documentation/translations/sp_SP/process/submit-checklist.rst
index 0d6651f9d871..e7107cc97001 100644
--- a/Documentation/translations/sp_SP/process/submit-checklist.rst
+++ b/Documentation/translations/sp_SP/process/submit-checklist.rst
@@ -97,9 +97,10 @@ y en otros lugares con respecto al envío de parches del kernel de Linux.
``MODULE_PARM_DESC()``.
18) Todas las nuevas interfaces de espacio de usuario están documentadas
- en ``Documentation/ABI/``. Consulte ``Documentation/ABI/README`` para
- obtener más información. Los parches que cambian las interfaces del
- espacio de usuario deben ser CCed a linux-api@vger.kernel.org.
+ en ``Documentation/ABI/``. Consulte Documentation/admin-guide/abi.rst
+ (o ``Documentation/ABI/README``) para obtener más información.
+ Los parches que cambian las interfaces del espacio de usuario deben
+ ser CCed a linux-api@vger.kernel.org.
19) Se ha comprobado con la inyección de al menos errores de asignación
de slab y página. Consulte ``Documentation/fault-injection/``.
diff --git a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
index dc728c739e28..b35d24464be9 100644
--- a/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
+++ b/Documentation/translations/sp_SP/scheduler/sched-design-CFS.rst
@@ -112,7 +112,7 @@ CFS usa una granularidad de nanosegundos y no depende de ningún
jiffy o detalles como HZ. De este modo, el gestor de tareas CFS no tiene
noción de "ventanas de tiempo" de la forma en que tenía el gestor de
tareas previo, y tampoco tiene heurísticos. Únicamente hay un parámetro
-central ajustable (se ha de cambiar en CONFIG_SCHED_DEBUG):
+central ajustable:
/sys/kernel/debug/sched/base_slice_ns
diff --git a/Documentation/translations/zh_CN/admin-guide/README.rst b/Documentation/translations/zh_CN/admin-guide/README.rst
index e679cbc3c89d..1bdafdc4c8e2 100644
--- a/Documentation/translations/zh_CN/admin-guide/README.rst
+++ b/Documentation/translations/zh_CN/admin-guide/README.rst
@@ -146,7 +146,7 @@ Linux内核6.x版本 <http://kernel.org/>
"make xconfig" 基于Qtçš„é…置工具。
- "make gconfig" 基于GTK+çš„é…置工具。
+ "make gconfig" 基于GTKçš„é…置工具。
"make oldconfig" 基于现有的 ./.config 文件选择所有选项,并询问
æ–°é…置选项。
diff --git a/Documentation/translations/zh_CN/dev-tools/ubsan.rst b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
index 2487696b3772..81ef6f77caeb 100644
--- a/Documentation/translations/zh_CN/dev-tools/ubsan.rst
+++ b/Documentation/translations/zh_CN/dev-tools/ubsan.rst
@@ -3,7 +3,14 @@
.. include:: ../disclaimer-zh_CN.rst
:Original: Documentation/dev-tools/ubsan.rst
-:Translator: Dongliang Mu <dzm91@hust.edu.cn>
+
+:翻译:
+
+ 慕冬亮 Dongliang Mu <dzm91@hust.edu.cn>
+
+:校译:
+
+ 王昱力 WangYuli <wangyuli@uniontech.com>
未定义行为消毒剂 - UBSAN
====================================
@@ -55,30 +62,20 @@ GCC自4.9.x [1_] ï¼ˆè¯¦è§ ``-fsanitize=undefined`` 选项åŠå…¶å­é€‰é¡¹ï¼‰ç‰ˆæ
使用如下内核é…ç½®å¯ç”¨UBSAN::
- CONFIG_UBSAN=y
-
-使用如下内核é…置检查整个内核::
-
- CONFIG_UBSAN_SANITIZE_ALL=y
-
-为了在特定文件或目录å¯åŠ¨ä»£ç æ’桩,需è¦åœ¨ç›¸åº”的内核Makefile中添加一行类似内容:
+ CONFIG_UBSAN=y
-- 啿–‡ä»¶ï¼ˆå¦‚main.o)::
-
- UBSAN_SANITIZE_main.o := y
-
-- 一个目录中的所有文件::
-
- UBSAN_SANITIZE := y
-
-å³ä½¿è®¾ç½®äº†``CONFIG_UBSAN_SANITIZE_ALL=y``,为了é¿å…æ–‡ä»¶è¢«æ’æ¡©ï¼Œå¯ä½¿ç”¨::
+排除è¦è¢«æ£€æµ‹çš„æ–‡ä»¶::
UBSAN_SANITIZE_main.o := n
-与::
+排除一个目录中的所有文件::
UBSAN_SANITIZE := n
+当全部文件都被ç¦ç”¨ï¼Œå¯é€šè¿‡å¦‚下方å¼ä¸ºç‰¹å®šæ–‡ä»¶å¯ç”¨::
+
+ UBSAN_SANITIZE_main.o := y
+
未对é½çš„内存访问检测å¯é€šè¿‡å¼€å¯ç‹¬ç«‹é€‰é¡¹ - CONFIG_UBSAN_ALIGNMENT 检测。
è¯¥é€‰é¡¹åœ¨æ”¯æŒæœªå¯¹é½è®¿é—®çš„æž¶æž„上(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y)
默认为关闭。该选项ä»å¯é€šè¿‡å†…æ ¸é…ç½®å¯ç”¨ï¼Œä½†å®ƒå°†äº§ç”Ÿå¤§é‡çš„UBSAN报告。
diff --git a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
index 3c6db094a63c..37681c0b2a01 100644
--- a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
+++ b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
@@ -1,9 +1,7 @@
:orphan:
-.. warning::
+.. note::
此文件的目的是为让中文读者更容易阅读和ç†è§£ï¼Œè€Œä¸æ˜¯ä½œä¸ºä¸€ä¸ªåˆ†æ”¯ã€‚ 因此,
如果您对此文件有任何æ„è§æˆ–更新,请先å°è¯•更新原始英文文件。
-
-.. note::
- 如果您å‘现本文档与原始文件有任何ä¸åŒæˆ–者有翻译问题,请è”系该文件的译者,
- 或者请求时奎亮的帮助:<alexs@kernel.org>。
+ 如果您å‘现本文档与原始文件有任何ä¸åŒæˆ–者有翻译问题,请å‘建议或者补ä¸ç»™
+ 该文件的译者,或者请求中文文档维护者和审阅者的帮助。
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 7574e1673180..cc512ca54172 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -26,7 +26,13 @@
顺便说下,中文文档也需è¦éµå®ˆå†…核编ç é£Žæ ¼ï¼Œé£Žæ ¼ä¸­ä¸­æ–‡å’Œè‹±æ–‡çš„主è¦ä¸åŒå°±æ˜¯ä¸­æ–‡
的字符标点å ç”¨ä¸¤ä¸ªè‹±æ–‡å­—ç¬¦å®½åº¦ï¼Œæ‰€ä»¥ï¼Œå½“è‹±æ–‡è¦æ±‚ä¸è¦è¶…过æ¯è¡Œ100个字符时,
中文就ä¸è¦è¶…过50个字符。å¦å¤–ï¼Œä¹Ÿè¦æ³¨æ„'-','='等符å·ä¸Žç›¸å…³æ ‡é¢˜çš„对é½ã€‚在将
-è¡¥ä¸æäº¤åˆ°ç¤¾åŒºä¹‹å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试。
+è¡¥ä¸æäº¤åˆ°ç¤¾åŒºä¹‹å‰ï¼Œä¸€å®šè¦è¿›è¡Œå¿…è¦çš„ ``checkpatch.pl`` 检查和编译测试,确ä¿
+在 ``make htmldocs/pdfdocs`` 中ä¸å¢žåŠ æ–°çš„å‘Šè­¦ï¼Œæœ€åŽï¼Œå®‰è£…检查你生æˆçš„
+html/pdf æ–‡ä»¶ï¼Œç¡®è®¤å®ƒä»¬çœ‹èµ·æ¥æ˜¯æ­£å¸¸çš„。
+
+æäº¤ä¹‹å‰è¯·ç¡®è®¤ä½ çš„è¡¥ä¸å¯ä»¥æ­£å¸¸æäº¤åˆ°ä¸­æ–‡æ–‡æ¡£ç»´æŠ¤åº“:
+https://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git/
+如果你的补ä¸ä¾èµ–于其他人的补ä¸, å¯ä»¥ä¸Žå…¶ä»–人商é‡åŽç”±æŸä¸€ä¸ªäººåˆå¹¶æäº¤ã€‚
与Linux 内核社区一起工作
------------------------
diff --git a/Documentation/translations/zh_CN/mm/balance.rst b/Documentation/translations/zh_CN/mm/balance.rst
index 6fd79209c307..f877c0cfa39a 100644
--- a/Documentation/translations/zh_CN/mm/balance.rst
+++ b/Documentation/translations/zh_CN/mm/balance.rst
@@ -64,7 +64,7 @@ kswapdå¹¶ä¸çœŸæ­£éœ€è¦å¹³è¡¡é«˜å†…存区,因为中断上下文并ä¸è¯·æ±‚é«
如果从进程内存和shm中å·å–页é¢å¯ä»¥å‡è½»è¯¥é¡µé¢èŠ‚ç‚¹ä¸­ä»»ä½•åŒºçš„å†…å­˜åŽ‹åŠ›ï¼Œè€Œè¯¥åŒºçš„å†…å­˜åŽ‹åŠ›
å·²ç»ä½ŽäºŽå…¶æ°´ä½ï¼Œåˆ™ä¼šè¿›è¡Œå·å–。
-watemark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd:
+watermark[WMARK_MIN/WMARK_LOW/WMARK_HIGH]/low_on_memory/zone_wake_kswapd:
这些是æ¯ä¸ªåŒºçš„字段,用于确定一个区何时需è¦å¹³è¡¡ã€‚当页颿•°ä½ŽäºŽæ°´ä½[WMARK_MIN]时,
hysteric 的字段low_on_memoryè¢«è®¾ç½®ã€‚è¿™ä¸ªå­—æ®µä¼šä¸€ç›´è¢«è®¾ç½®ï¼Œç›´åˆ°ç©ºé—²é¡µæ•°å˜æˆæ°´ä½
[WMARK_HIGH]。当low_on_memory被设置时,页é¢åˆ†é…请求将å°è¯•释放该区域的一些页é¢ï¼ˆå¦‚æžœ
diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst
index 10536b74aeec..0e524f1c1af5 100644
--- a/Documentation/translations/zh_CN/process/submit-checklist.rst
+++ b/Documentation/translations/zh_CN/process/submit-checklist.rst
@@ -82,8 +82,8 @@ Linuxå†…æ ¸è¡¥ä¸æäº¤æ£€æŸ¥å•
17) 所有新的模å—傿•°éƒ½è®°å½•在 ``MODULE_PARM_DESC()``
18) 所有新的用户空间接å£éƒ½è®°å½•在 ``Documentation/ABI/`` 中。有关详细信æ¯ï¼Œ
- 请å‚阅 ``Documentation/ABI/README`` 。更改用户空间接å£çš„è¡¥ä¸åº”该抄é€
- linux-api@vger.kernel.org。
+ 请å‚阅 Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。
+ 更改用户空间接å£çš„è¡¥ä¸åº”è¯¥æŠ„é€ linux-api@vger.kernel.org\ 。
19) 已通过至少注入slabå’Œpage分é…失败进行检查。请å‚阅 ``Documentation/fault-injection/`` 。
å¦‚æžœæ–°ä»£ç æ˜¯å®žè´¨æ€§çš„,那么添加å­ç³»ç»Ÿç‰¹å®šçš„æ•…障注入å¯èƒ½æ˜¯åˆé€‚的。
diff --git a/Documentation/translations/zh_CN/security/credentials.rst b/Documentation/translations/zh_CN/security/credentials.rst
new file mode 100644
index 000000000000..91c353dfb622
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/credentials.rst
@@ -0,0 +1,479 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/credentials.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+=============
+Linux中的凭æ®
+=============
+
+作者: David Howells <dhowells@redhat.com>
+
+.. contents:: :local:
+
+概述
+====
+
+当一个对象对å¦ä¸€ä¸ªå¯¹è±¡è¿›è¡Œæ“作时,Linux执行的安全检查包å«å‡ ä¸ªéƒ¨åˆ†ï¼š
+
+ 1. 对象
+
+ 对象是å¯ä»¥ç›´æŽ¥ç”±ç”¨æˆ·ç©ºé—´ç¨‹åºæ“作的系统中的实体。Linux具有多ç§å¯æ“作
+ 的对象,包括:
+
+ - 任务
+ - 文件/索引节点
+ - 套接字
+ - 消æ¯é˜Ÿåˆ—
+ - 共享内存段
+ - ä¿¡å·é‡
+ - 密钥
+
+ 所有这些对象的æè¿°çš„一部分是一组凭æ®ã€‚集åˆä¸­çš„内容å–决于对象的类型。
+
+ 2. 对象所有æƒ
+
+ 大多数对象的凭æ®ä¸­ä¼šæœ‰ä¸€ä¸ªå­é›†ç”¨æ¥è¡¨ç¤ºè¯¥å¯¹è±¡çš„æ‰€æœ‰æƒã€‚
+ è¿™ç”¨äºŽèµ„æºæ ¸ç®—å’Œé™åˆ¶ï¼ˆå¦‚ç£ç›˜é…é¢å’Œä»»åŠ¡èµ„æºé™åˆ¶ï¼‰ã€‚
+
+ 例如,在标准的UNIX文件系统中,这将由标记在索引节点上的UID定义。
+
+ 3. 对象上下文
+
+ 此外在这些对象的凭æ®ä¸­ï¼Œå°†æœ‰ä¸€ä¸ªå­é›†è¡¨ç¤ºå¯¹è±¡çš„“对象上下文â€ã€‚
+ è¿™å¯èƒ½ä¸Žï¼ˆ2)中相åŒï¼Œä¹Ÿå¯èƒ½ä¸åŒ —— 例如,在标准的UNIX文件中,
+ 这是由标记在索引节点上的UID和GID定义的。
+
+ 对象上下文是进行安全计算的一部分,当对象被æ“作时会用到。
+
+ 4. 主体
+
+ 主体是正在对其他对象执行æ“作的对象。
+
+ ç³»ç»Ÿä¸­çš„å¤§å¤šæ•°å¯¹è±¡æ˜¯ä¸æ´»åŠ¨çš„ï¼šä»–ä»¬ä¸ä¼šå¯¹ç³»ç»Ÿä¸­çš„其他对象起作用。
+ 进程/任务是明显的例外:它们å¯ä»¥è®¿é—®å’Œæ“纵其他对象。
+
+ 任务之外的其他对象在æŸäº›æƒ…况下也å¯ä»¥æ˜¯ä¸»ä½“。例如,打开的文件å¯ä»¥ä½¿ç”¨
+ å为 ``fcntl(F_SETOWN)`` 的任务给它的UIDå’ŒEUIDå‘一个任务å‘é€SIGIO
+ ä¿¡å·ã€‚åœ¨è¿™ç§æƒ…况下,文件结构也会有一个主体上下文。
+
+ 5. 主体上下文
+
+ ä¸»ä½“å¯¹å…¶å‡­æ®æœ‰ä¸€ä¸ªé¢å¤–的解释。其凭æ®çš„一个å­é›†å½¢æˆäº†â€œä¸»ä½“上下文â€ã€‚主体
+ 上下文在主体执行æ“作时作为安全计算的一部分使用。
+
+ 例如,Linux任务在æ“作文件时会有FSUIDã€FSGID和附加组列表 —— 这些凭æ®
+ 与通常构æˆä»»åŠ¡çš„å¯¹è±¡ä¸Šä¸‹æ–‡çš„çœŸå®žUIDå’ŒGID是相互独立的。
+
+ 6. æ“作
+
+ Linuxæä¾›è®¸å¤šæ“作,主体å¯ä»¥å¯¹å¯¹è±¡æ‰§è¡Œè¿™äº›æ“作。å¯ç”¨çš„æ“ä½œé›†å–决于主体
+ 和对象的性质。
+
+
+ æ“作包括读å–ã€å†™å…¥ã€åˆ›å»ºå’Œåˆ é™¤æ–‡ä»¶ï¼Œä»¥åŠæ´¾ç”Ÿï¼ˆforking)或å‘é€
+ ä¿¡å·ï¼ˆsignalling)和跟踪(tracing)任务等。
+
+ 7. 规则,访问控制列表和安全计算
+
+ 当主体对对象进行æ“作时,会进行安全计算。这涉åŠåˆ°ä½¿ç”¨ä¸»ä½“上下文ã€å¯¹è±¡
+ 上下文和æ“作,并æœç´¢ä¸€ä¸ªæˆ–多个规则集,以确定在给定这些上下文的情况下,
+ 主体是å¦è¢«æŽˆäºˆæˆ–æ‹’ç»ä»¥æ‰€éœ€æ–¹å¼å¯¹å¯¹è±¡è¿›è¡Œæ“作的æƒé™ã€‚
+
+ ä¸»è¦æœ‰ä¸¤ä¸ªè§„åˆ™æ¥æºï¼š
+
+ a. 自主访问控制(DAC):
+
+ 有时,对象的æè¿°ä¸­ä¼šåŒ…å«ä¸€ç»„è§„åˆ™ã€‚è¿™å°±æ˜¯æ‰€è°“çš„â€œè®¿é—®æŽ§åˆ¶åˆ—è¡¨â€æˆ–‘ACL’。
+ 一个Linux文件å¯ä»¥æä¾›å¤šä¸ªACL。
+
+ 例如,传统的UNIX文件包括一个æƒé™æŽ©ç ï¼Œå®ƒæ˜¯ä¸€ä¸ªç®€åŒ–çš„ACL,具有三个固定的
+ 主体类别(“用户â€ã€â€œç»„â€å’Œâ€œå…¶ä»–â€ï¼‰ï¼Œæ¯ä¸€ä¸ªéƒ½å¯ä»¥è¢«æŽˆäºˆä¸€å®šçš„特æƒï¼ˆå¦‚“读å–â€ã€
+ “写入â€å’Œâ€œæ‰§è¡Œâ€ —— 无论这些映射对于对象æ„味ç€ä»€ä¹ˆï¼‰ã€‚然而,UNIX文件æƒé™ä¸
+ å…è®¸ä»»æ„æŒ‡å®šä¸»ä½“,因此用途有é™ã€‚
+
+ Linux文件还å¯ä»¥æ”¯æŒPOSIX ACL。这是一个规则列表,为任æ„主体授予å„ç§æƒé™ã€‚
+
+ b. 强制访问控制(MAC):
+
+ 整个系统å¯èƒ½æœ‰ä¸€ä¸ªæˆ–多个规则集,适用于所有主体和对象,ä¸è€ƒè™‘å®ƒä»¬çš„æ¥æºã€‚
+ SELinuxå’ŒSmackå°±æ˜¯è¿™ç§æƒ…况的例å­ã€‚
+
+ 在SELinuxå’ŒSmack的情况下,æ¯ä¸ªå¯¹è±¡åœ¨å…¶å‡­æ®ä¸­éƒ½è¢«èµ‹äºˆä¸€ä¸ªæ ‡ç­¾ã€‚当请求执
+ 行æ“作时,它们使用主体标签ã€å¯¹è±¡æ ‡ç­¾å’Œæ“作,寻找一个规则,该规则表示此æ“
+ 作是授予还是拒ç»çš„。
+
+
+凭æ®ç±»åž‹
+========
+
+Linux内核支æŒä»¥ä¸‹ç±»åž‹çš„凭æ®ï¼š
+
+ 1. 传统的UNIX凭æ®ã€‚
+
+ - 真实用户ID
+ - 真实组ID
+
+ UIDå’ŒGIDç”±å¤§å¤šæ•°ï¼ˆå¦‚æžœä¸æ˜¯å…¨éƒ¨ï¼‰Linux对象æºå¸¦ï¼Œå³ä½¿æœ‰æ—¶å®ƒä»¬éœ€è¦è¢«è™šæž„出
+ æ¥ï¼ˆä¾‹å¦‚FAT或CIFSæ–‡ä»¶ï¼Œè¿™äº›æ–‡ä»¶æ¥æºäºŽWindows)。这些(通常)定义了该对象
+ 的对象上下文,但任务在æŸäº›æƒ…况下略有ä¸åŒã€‚
+
+ - 有效用户ID,ä¿å­˜ç”¨æˆ·IDå’ŒFS用户ID
+ - 有效组ID,ä¿å­˜ç»„IDå’ŒFS组ID
+ - 补充组
+
+ 这些是仅由任务使用的é¢å¤–凭æ®ã€‚通常,一个EUID/EGID/GROUPS 被用作主体上下文,
+ 而真实UID/GID è¢«ç”¨ä½œå¯¹è±¡ä¸Šä¸‹æ–‡ã€‚å¯¹äºŽä»»åŠ¡ï¼Œè¿™å¹¶ä¸æ€»æ˜¯æ­£ç¡®çš„。
+
+ 2. 能力
+
+ - å…许的能力集åˆ
+ - å¯ç»§æ‰¿çš„能力集åˆ
+ - 有效的能力集åˆ
+ - 能力边界集åˆ
+
+ 这些仅由任务æºå¸¦ï¼Œè¡¨ç¤ºæŽˆäºˆä»»åŠ¡çš„è¶…å‡ºæ™®é€šä»»åŠ¡æƒé™çš„能力。这些å¯ä»¥é€šè¿‡ä¼ ç»Ÿ
+ UNIX凭æ®çš„æ›´æ”¹è¿›è¡Œéšå¼æ“作,但也å¯ä»¥é€šè¿‡ ``capset()`` 系统调用直接æ“作。
+
+ å…许的能力是指进程å¯ä»¥é€šè¿‡ ``capset()`` 将其添加到其有效或å…许集åˆä¸­çš„
+ 那些能力。这个å¯ç»§æ‰¿çš„集åˆä¹Ÿå¯èƒ½å—到这样的é™åˆ¶ã€‚
+
+ 有效能力是任务本身实际å¯ä»¥ä½¿ç”¨çš„能力。
+
+ å¯ç»§æ‰¿èƒ½åŠ›æ˜¯é‚£äº›å¯ä»¥é€šè¿‡ ``execve()`` 传递的能力。
+
+ 边界集é™åˆ¶äº†é€šè¿‡ ``execve()`` 继承的能力,特别是在以UID 0执行二进制文件时。
+
+ 3. å®‰å…¨ç®¡ç†æ ‡è®°ï¼ˆsecurebits)
+
+ 它们用于控制上述凭æ®åœ¨ç‰¹å®šæ“作如execve()中的æ“作和继承方å¼ã€‚它们并ä¸ç›´æŽ¥
+ 用作对象或主体凭æ®ä½¿ç”¨ã€‚
+
+ 4. 密钥和密钥环
+
+ 这些仅由任务æºå¸¦ã€‚它们用于æºå¸¦å’Œç¼“å­˜ä¸é€‚åˆæ”¾å…¥å…¶ä»–标准UNIX凭æ®ä¸­çš„安全令牌。
+ 它们用诸如使网络文件系统密钥在进程执行的文件访问时å¯ç”¨ï¼Œè€Œæ— éœ€è®©æ™®é€šç¨‹åºäº†è§£
+ 涉åŠçš„安全细节。
+
+ 密钥环是一ç§ç‰¹æ®Šç±»åž‹çš„密钥。它们æºå¸¦ä¸€ç»„其他密钥,并å¯ä»¥æœç´¢æ¥æŸ¥æ‰¾æ‰€éœ€çš„密钥。
+ æ¯ä¸ªè¿›ç¨‹å¯ä»¥è®¢é˜…多个密钥环:
+
+ æ¯çº¿ç¨‹å¯†é’¥
+ æ¯è¿›ç¨‹å¯†é’¥çޝ
+ æ¯ä¼šè¯å¯†é’¥çޝ
+
+ 当进程访问一个密钥时,若尚ä¸å­˜åœ¨ï¼Œåˆ™é€šå¸¸ä¼šå°†å…¶ç¼“存在一个密钥环中,以便将æ¥çš„
+ 访问时找到该密钥。
+
+ 有关密钥的更多信æ¯ï¼Œè¯·å‚è§ ``Documentation/translations/zh_CN/security/keys/*`` 。
+
+ 5. LSM
+
+ Linux安全模å—å…许在任务执行æ“作时施加é¢å¤–的控制。目å‰ï¼ŒLinux支æŒå‡ ç§LSM选项。
+
+ 一些工作通过标记系统中的对象,并应用一组规则(策略)说明æŸä¸ªæ ‡ç­¾çš„任务å¯ä»¥å¯¹
+ å¦ä¸€æ ‡ç­¾çš„对象执行哪些æ“作。
+
+ 6. AF_KEY
+
+ 这是一ç§åŸºäºŽå¥—接字网络å议栈中的凭æ®ç®¡ç†[RFC 2367]。本文档中没有讨论它,因为ä¸
+ 直接与任务和文件凭æ®è¿›è¡Œäº¤äº’,而是ä¿ç•™äº†ç³»ç»Ÿçº§çš„凭æ®ã€‚
+
+
+当打开一个文件时,打开任务的主体上下文的一部分会记录在创建的文件结构中。
+这使得使用该文件结构的æ“作å¯ä»¥ä½¿ç”¨è¿™äº›å‡­æ®ï¼Œè€Œä¸æ˜¯å‘出æ“作的任务的主体上下文。
+ä¸€ä¸ªä¾‹å­æ˜¯åœ¨ç½‘络文件系统上打开的文件,打开文件的凭æ®åº”该被呈现给æœåŠ¡å™¨ï¼Œè€Œä¸ç®¡
+å®žé™…è¿›è¡Œè¯»å–æˆ–写入æ“作的是è°ã€‚
+
+
+文件标记
+========
+
+存储在ç£ç›˜ä¸Šæˆ–通过网络获å–的文件å¯èƒ½å…·æœ‰æ³¨é‡Šï¼Œæž„æˆè¯¥æ–‡ä»¶çš„对象安全上下文。
+æ ¹æ®æ–‡ä»¶ç³»ç»Ÿçš„类型,这些注释å¯èƒ½åŒ…括以下一项或多项:
+
+ * UNIX UID, GID, mode;
+ * Windows user ID;
+ * Access control list;
+ * LSM security label;
+ * UNIX exec privilege escalation bits (SUID/SGID);
+ * File capabilities exec privilege escalation bits.
+
+å°†è¿™äº›ä¸Žä»»åŠ¡çš„ä¸»ä½“å®‰å…¨ä¸Šä¸‹æ–‡è¿›è¡Œæ¯”è¾ƒï¼Œå¹¶æ ¹æ®æ¯”较结果å…è®¸æˆ–ç¦æ­¢æ‰§è¡ŒæŸäº›æ“作。
+在execve()çš„æƒ…å†µä¸‹ï¼Œç‰¹æƒæå‡ä½èµ·ä½œç”¨ï¼Œå¹¶ä¸”å¯èƒ½å…è®¸ç”±å¯æ‰§è¡Œæ–‡ä»¶çš„æ³¨é‡Šå†³å®šçš„
+进程获得é¢å¤–的特æƒã€‚
+
+
+任务凭æ®
+========
+
+在Linux中,一个任务的所有凭æ®éƒ½ä¿å­˜åœ¨ä¸€ä¸ªå¼•用计数结构体‘struct cred’中,
+通过(uid, gid)或(groups, keys, LSM security)进行访问。æ¯ä¸ªä»»åŠ¡åœ¨å…¶
+task_struct中通过一个å为‘cred’的指针指å‘其凭æ®ã€‚
+
+一旦一组凭æ®å·²ç»å‡†å¤‡å¥½å¹¶æäº¤ï¼Œé™¤éžä»¥ä¸‹å‡ ç§æƒ…况,å¦åˆ™ä¸èƒ½æ›´æ”¹ï¼š
+
+ 1. 其引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 2. 它所指å‘çš„ group_info 结构体的引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 3. 它所指å‘的安全数æ®çš„引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 4. 它所指å‘的任何密钥环的引用计数å¯ä»¥æ›´æ”¹ï¼›
+
+ 5. 它所指å‘的任何密钥环å¯ä»¥è¢«æ’¤é”€ã€è¿‡æœŸæˆ–其安全属性å¯ä»¥æ›´æ”¹ï¼›
+
+ 6. 它所指å‘的任何密钥环的内容å¯ä»¥æ›´æ”¹ï¼ˆå¯†é’¥çŽ¯çš„æ•´ä¸ªç›®çš„å°±æ˜¯ä½œä¸ºä¸€ç»„å…±äº«å‡­æ®ï¼Œ
+ å¯ç”±å…·æœ‰é€‚当访问æƒé™çš„任何人修改)。
+
+è¦æ›´æ”¹cred结构体中的任何内容,必须éµå¾ªå¤åˆ¶å’Œæ›¿æ¢çš„原则。首先进行å¤åˆ¶ï¼Œç„¶åŽä¿®
+改副本,最åŽä½¿ç”¨RCU(读-å¤åˆ¶-æ›´æ–°ï¼‰å°†ä»»åŠ¡æŒ‡é’ˆæ›´æ”¹ä¸ºæŒ‡å‘æ–°çš„副本。有一些å°è£…å¯
+用于帮助执行这个过程(è§ä¸‹æ–‡ï¼‰ã€‚
+
+一个任务åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼›ä¸å†å…许一个任务修改å¦ä¸€ä¸ªä»»åŠ¡çš„å‡­æ®ã€‚
+è¿™æ„å‘³ç€ ``capset()`` 系统调用ä¸å†å…许使用除当å‰è¿›ç¨‹ä¹‹å¤–的任何PID。
+此外, ``keyctl_instantiate()`` å’Œ ``keyctl_negate()`` 函数也ä¸å†
+å…许在请求进程中附加到特定于进程的密钥环,因为实例化进程å¯èƒ½éœ€è¦åˆ›å»ºå®ƒä»¬ã€‚
+
+
+ä¸å¯å˜å‡­æ®
+----------
+
+一旦一组凭æ®å·²ç»è¢«å…¬å¼€ï¼ˆä¾‹å¦‚通过调用 ``commit_creds()`` ),必须将其视为
+ä¸å¯å˜çš„,除了两个例外情况:
+
+ 1. 引用计数å¯ä»¥è¢«ä¿®æ”¹ã€‚
+
+ 2. 虽然无法更改一组凭æ®çš„密钥环订阅,但订阅的密钥环的内容å¯ä»¥è¢«æ›´æ”¹ã€‚
+
+为了在编译时æ•获æ„外的凭æ®ä¿®æ”¹ï¼Œstruct task_struct具有_const_指针指å‘其凭æ®é›†ï¼Œ
+struct file也是如此。此外,æŸäº›å‡½æ•°å¦‚ ``get_cred()`` å’Œ ``put_cred()`` 在
+const指针上æ“作,因此ä¸éœ€è¦è¿›è¡Œç±»åž‹è½¬æ¢ï¼Œä½†éœ€è¦ä¸´æ—¶æ”¾å¼ƒconsté™å®šï¼Œä»¥ä¾¿èƒ½å¤Ÿä¿®æ”¹
+引用计数。
+
+
+访问任务凭æ®
+------------
+
+任务åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼Œå…许当å‰è¿›ç¨‹å¯ä»¥è¯»å–或替æ¢è‡ªå·±çš„凭æ®ï¼Œæ— éœ€ä»»ä½•å½¢å¼é”定的
+情况下 —— è¿™æžå¤§ç®€åŒ–了事情。它å¯ä»¥è°ƒç”¨::
+
+ const struct cred *current_cred()
+
+èŽ·å–æŒ‡å‘其凭æ®ç»“构的指针,并且之åŽä¸å¿…释放它。
+
+有一些方便的å°è£…用于检索任务凭æ®çš„特定方é¢ï¼ˆåœ¨æ¯ç§æƒ…况下都åªè¿”回值)::
+
+ uid_t current_uid(void) Current's real UID
+ gid_t current_gid(void) Current's real GID
+ uid_t current_euid(void) Current's effective UID
+ gid_t current_egid(void) Current's effective GID
+ uid_t current_fsuid(void) Current's file access UID
+ gid_t current_fsgid(void) Current's file access GID
+ kernel_cap_t current_cap(void) Current's effective capabilities
+ struct user_struct *current_user(void) Current's user account
+
+还有一些方便的å°è£…,用于检索任务凭æ®çš„特定关è”对::
+
+ void current_uid_gid(uid_t *, gid_t *);
+ void current_euid_egid(uid_t *, gid_t *);
+ void current_fsuid_fsgid(uid_t *, gid_t *);
+
+在从当å‰ä»»åŠ¡çš„å‡­æ®ä¸­æ£€ç´¢åŽï¼Œé€šè¿‡å…¶å‚数返回这些值对。
+
+
+此外,还有一个函数用于获å–当å‰è¿›ç¨‹çš„当å‰å‡­æ®é›†çš„引用::
+
+ const struct cred *get_current_cred(void);
+
+以åŠç”¨äºŽèŽ·å–对一个实际上ä¸å­˜åœ¨äºŽstruct cred中的凭æ®çš„引用的函数::
+
+ struct user_struct *get_current_user(void);
+ struct group_info *get_current_groups(void);
+
+分别获得对当å‰è¿›ç¨‹çš„ user accounting structure 和补充组列表的引用。
+
+一旦获得引用,就必须使用 ``put_cred()``, ``free_uid()`` 或
+``put_group_info()`` æ¥é€‚当释放它。
+
+
+访问其他任务的凭æ®
+------------------
+
+虽然一个任务å¯ä»¥åœ¨ä¸éœ€è¦é”定的情况下访问自己的凭æ®ï¼Œä½†æƒ³è¦è®¿é—®å¦ä¸€ä¸ªä»»åŠ¡
+的凭æ®çš„任务并éžå¦‚此。它必须使用RCU读é”å’Œ ``rcu_dereference()``。
+
+``rcu_dereference()`` 是由::
+
+ const struct cred *__task_cred(struct task_struct *task);
+
+这应该在RCU读é”中使用,如下例所示::
+
+ void foo(struct task_struct *t, struct foo_data *f)
+ {
+ const struct cred *tcred;
+ ...
+ rcu_read_lock();
+ tcred = __task_cred(t);
+ f->uid = tcred->uid;
+ f->gid = tcred->gid;
+ f->groups = get_group_info(tcred->groups);
+ rcu_read_unlock();
+ ...
+ }
+
+如果需è¦é•¿æ—¶é—´æŒæœ‰å¦ä¸€ä¸ªä»»åŠ¡çš„å‡­æ®ï¼Œå¹¶ä¸”å¯èƒ½åœ¨æ­¤è¿‡ç¨‹ä¸­ä¼‘眠,则调用方
+应该使用以下函数æ¥èŽ·å–对这些凭æ®çš„引用::
+
+ const struct cred *get_task_cred(struct task_struct *task);
+
+这个函数内部完æˆäº†æ‰€æœ‰çš„RCUæ“ä½œã€‚å½“ä½¿ç”¨å®Œè¿™äº›å‡­æ®æ—¶ï¼Œè°ƒç”¨æ–¹å¿…须调用put_cred()
+函数释放它们。
+
+.. note::
+ ``__task_cred()`` 的结果ä¸åº”直接传递给 ``get_cred()`` ,
+ 因为这å¯èƒ½ä¸Ž ``commit_cred()`` å‘生竞争æ¡ä»¶ã€‚
+
+还有一些方便的函数å¯ä»¥è®¿é—®å¦ä¸€ä¸ªä»»åС凭æ®çš„特定部分,将RCUæ“作对调用方éšè—èµ·æ¥::
+
+ uid_t task_uid(task) Task's real UID
+ uid_t task_euid(task) Task's effective UID
+
+å¦‚æžœè°ƒç”¨æ–¹åœ¨æ­¤æ—¶å·²ç»æŒæœ‰RCU读é”,则应使用::
+
+ __task_cred(task)->uid
+ __task_cred(task)->euid
+
+类似地,如果需è¦è®¿é—®ä»»åС凭æ®çš„多个方é¢ï¼Œåº”使用RCU读é”,调用 ``__task_cred()``
+函数,将结果存储在临时指针中,然åŽä»Žä¸´æ—¶æŒ‡é’ˆä¸­è°ƒç”¨å‡­æ®çš„å„个方é¢ï¼Œæœ€åŽé‡Šæ”¾é”。
+这样å¯ä»¥é˜²æ­¢å¤šæ¬¡è°ƒç”¨æ˜‚贵的RCUæ“作。
+
+如果需è¦è®¿é—®å¦ä¸€ä¸ªä»»åС凭æ®çš„å…¶ä»–å•个方é¢ï¼Œå¯ä»¥ä½¿ç”¨::
+
+ task_cred_xxx(task, member)
+
+这里的‘member’是credç»“æž„ä½“çš„éžæŒ‡é’ˆæˆå‘˜ã€‚例如::
+
+ uid_t task_cred_xxx(task, suid);
+
+将从任务中检索‘struct cred::suid’,并执行适当的RCUæ“作。对于指针æˆå‘˜ï¼Œ
+ä¸èƒ½ä½¿ç”¨è¿™ç§å½¢å¼ï¼Œå› ä¸ºå®ƒä»¬æŒ‡å‘的内容å¯èƒ½åœ¨é‡Šæ”¾RCU读é”的瞬间消失。
+
+
+修改凭æ®
+--------
+
+å¦‚å…ˆå‰æåˆ°çš„ï¼Œä¸€ä¸ªä»»åŠ¡åªèƒ½ä¿®æ”¹è‡ªå·±çš„凭æ®ï¼Œä¸èƒ½ä¿®æ”¹å…¶ä»–任务的凭æ®ã€‚è¿™æ„味
+ç€å®ƒä¸éœ€è¦ä½¿ç”¨ä»»ä½•锿¥ä¿®æ”¹è‡ªå·±çš„凭æ®ã€‚
+
+è¦ä¿®æ”¹å½“å‰è¿›ç¨‹çš„凭æ®ï¼Œå‡½æ•°åº”首先调用::
+
+ struct cred *prepare_creds(void);
+
+这将é”定current->cred_replace_mutex,然åŽåˆ†é…并构建当å‰è¿›ç¨‹å‡­æ®çš„副本。
+如果æˆåŠŸï¼Œå‡½æ•°è¿”å›žæ—¶ä»ç„¶ä¿æŒäº’æ–¥é”ã€‚å¦‚æžœä¸æˆåŠŸï¼ˆå†…å­˜ä¸è¶³ï¼‰ï¼Œåˆ™è¿”回NULL。
+
+互斥é”防止 ``ptrace()`` åœ¨è¿›è¡Œå‡­æ®æž„建和更改的安全检查时更改进程的ptrace
+状æ€ï¼Œå› ä¸ºptrace状æ€å¯èƒ½ä¼šæ”¹å˜ç»“果,特别是在 ``execve()`` 的情况下。
+
+新的凭æ®é›†åº”适当地进行修改,并进行任何安全检查和挂钩。在此时,当å‰å’Œå»ºè®®çš„
+凭æ®é›†éƒ½å¯ç”¨ï¼Œå› ä¸ºcurrent_cred()将返回当å‰çš„凭æ®é›†ã€‚
+
+在替æ¢ç»„列表时,必须在将其添加到凭æ®ä¹‹å‰å¯¹æ–°åˆ—表进行排åºï¼Œå› ä¸ºä½¿ç”¨äºŒåˆ†æŸ¥æ‰¾
+测试æˆå‘˜èµ„格。实际上,这æ„味ç€åœ¨set_groups()或set_current_groups()之
+å‰åº”调用groups_sort()。groups_sort()ä¸èƒ½åœ¨å…±äº«çš„ ``struct group_list``
+上调用,因为å³ä½¿æ•°ç»„å·²ç»æŽ’åºï¼Œå®ƒä¹Ÿå¯èƒ½ä½œä¸ºæŽ’åºè¿‡ç¨‹çš„一部分对元素进行排列。
+
+当凭æ®é›†å‡†å¤‡å¥½æ—¶ï¼Œåº”通过调用以下函数将其æäº¤ç»™å½“å‰è¿›ç¨‹::
+
+ int commit_creds(struct cred *new);
+
+这将修改凭æ®å’Œè¿›ç¨‹çš„å„个方é¢ï¼Œç»™LSMæä¾›æœºä¼šåšåŒæ ·çš„修改,然åŽä½¿ç”¨
+``rcu_assign_pointer()`` 将新的凭æ®å®žé™…æäº¤ç»™ ``current->cred`` ,
+释放 ``current->cred_replace_mutex`` 以å…许 ``ptrace()`` 进行æ“
+作,并通知调度程åºå’Œå…¶ä»–组件有关更改的情况。
+
+该函数ä¿è¯è¿”回0,以便å¯ä»¥åœ¨è¯¸å¦‚ ``sys_setresuid()`` 函数的末尾进行尾调用。
+
+请注æ„,该函数会消耗调用者对新凭æ®çš„引用。调用者在此之åŽä¸åº”调用
+``put_cred()`` 释放新凭æ®ã€‚
+
+此外,一旦新的凭æ®ä¸Šè°ƒç”¨äº†è¯¥å‡½æ•°ï¼Œå°±ä¸èƒ½è¿›ä¸€æ­¥æ›´æ”¹è¿™äº›å‡­æ®ã€‚
+
+
+如果在调用 ``prepare_creds()`` 之åŽå®‰å…¨æ£€æŸ¥å¤±è´¥æˆ–å‘生其他错误,
+则应调用以下函数::
+
+ void abort_creds(struct cred *new);
+
+这将释放 ``prepare_creds()`` 获å–çš„ ``current->cred_replace_mutex`` çš„é”,
+并释放新的凭æ®ã€‚
+
+一个典型的凭æ®ä¿®æ”¹å‡½æ•°çœ‹èµ·æ¥åƒè¿™æ ·::
+
+ int alter_suid(uid_t suid)
+ {
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ new->suid = suid;
+ ret = security_alter_suid(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+ }
+
+
+管ç†å‡­æ®
+--------
+
+有一些函数用æ¥è¾…助凭æ®ç®¡ç†:
+
+ - ``void put_cred(const struct cred *cred);``
+
+ 这将释放对给定凭æ®é›†çš„引用。如果引用计数为零,凭æ®é›†å°†ç”±
+ RCU系统安排进行销æ¯ã€‚
+
+ - ``const struct cred *get_cred(const struct cred *cred);``
+
+ 这将获å–对活动凭æ®é›†çš„引用。返回指å‘凭æ®é›†çš„æŒ‡é’ˆã€‚
+
+ - ``struct cred *get_new_cred(struct cred *cred);``
+
+ 这将获å–坹当剿­£åœ¨æž„建且å¯å˜çš„凭æ®é›†çš„引用。返回指å‘凭æ®é›†çš„æŒ‡é’ˆã€‚
+
+打开文件凭æ®
+============
+
+当打开新文件时,会获å–对打开任务凭æ®çš„引用,并将其附加到文件结构体的
+``f_cred`` 字段中,替代原æ¥çš„ ``f_uid`` å’Œ ``f_gid`` 。原æ¥è®¿é—®
+``file->f_uid`` å’Œ ``file->f_gid`` 的代ç çŽ°åœ¨åº”è®¿é—® ``file->f_cred->fsuid``
+和 ``file->f_cred->fsgid`` 。
+
+安全访问 ``f_cred`` 的情况下å¯ä»¥ä¸ä½¿ç”¨RCU或加é”,因为指å‘凭æ®çš„æŒ‡é’ˆ
+ä»¥åŠæŒ‡å‘的凭æ®ç»“æž„çš„å†…å®¹åœ¨æ–‡ä»¶ç»“æž„çš„æ•´ä¸ªç”Ÿå‘½å‘¨æœŸä¸­ä¿æŒä¸å˜ï¼Œé™¤éžæ˜¯
+上述列出的例外情况(å‚阅任务凭æ®éƒ¨åˆ†ï¼‰ã€‚
+
+为了é¿å…“混淆代ç†â€æƒé™æå‡æ”»å‡»ï¼Œåœ¨æ‰“开的文件åŽç»­æ“作时,访问控制检查
+应该使用这些凭æ®ï¼Œè€Œä¸æ˜¯ä½¿ç”¨â€œå½“å‰â€çš„凭æ®ï¼Œå› ä¸ºè¯¥æ–‡ä»¶å¯èƒ½å·²ç»è¢«ä¼ é€’ç»™
+一个更具特æƒçš„进程。
+
+覆盖VFS对凭æ®çš„使用
+===================
+
+在æŸäº›æƒ…况下,需è¦è¦†ç›–VFS使用的凭æ®ï¼Œå¯ä»¥é€šè¿‡ä½¿ç”¨ä¸åŒçš„凭æ®é›†è°ƒç”¨
+如 ``vfs_mkdir()`` æ¥å®žçŽ°ã€‚ä»¥ä¸‹æ˜¯ä¸€äº›è¿›è¡Œæ­¤æ“作的ä½ç½®:
+
+ * ``sys_faccessat()``.
+ * ``do_coredump()``.
+ * nfs4recover.c.
diff --git a/Documentation/translations/zh_CN/security/index.rst b/Documentation/translations/zh_CN/security/index.rst
index d8aacd1930d9..78d9d4b36dca 100644
--- a/Documentation/translations/zh_CN/security/index.rst
+++ b/Documentation/translations/zh_CN/security/index.rst
@@ -15,20 +15,20 @@
.. toctree::
:maxdepth: 1
+ credentials
+ snp-tdx-threat-model
lsm
sak
+ self-protection
siphash
+ tpm/index
digsig
landlock
TODOLIST:
-* credentials
-* snp-tdx-threat-model
* IMA-templates
* keys/index
* lsm-development
* SCTP
-* self-protection
-* tpm/index
* secrets/index
* ipe
diff --git a/Documentation/translations/zh_CN/security/keys/index.rst b/Documentation/translations/zh_CN/security/keys/index.rst
new file mode 100644
index 000000000000..7c28d003fb0a
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/keys/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/keys/index.rst
+
+:翻译:
+
+
+========
+内核密钥
+========
+
+.. toctree::
+ :maxdepth: 1
+
+
+TODOLIST:
+* core
+* ecryptfs
+* request-key
+* trusted-encrypted
diff --git a/Documentation/translations/zh_CN/security/secrets/index.rst b/Documentation/translations/zh_CN/security/secrets/index.rst
new file mode 100644
index 000000000000..5ea78713f10e
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/secrets/index.rst
@@ -0,0 +1,17 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/secrets/index.rst
+
+:翻译:
+
+=====================
+密钥文档
+=====================
+
+.. toctree::
+
+
+TODOLIST:
+
+* coco
diff --git a/Documentation/translations/zh_CN/security/self-protection.rst b/Documentation/translations/zh_CN/security/self-protection.rst
new file mode 100644
index 000000000000..3c8a68b1e1be
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/self-protection.rst
@@ -0,0 +1,271 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+:Original: Documentation/security/self-protection.rst
+
+:翻译:
+
+ å¼ å· zhangwei <zhangwei@cqsoftware.com.cn>
+
+============
+å†…æ ¸è‡ªæˆ‘ä¿æŠ¤
+============
+
+å†…æ ¸è‡ªæˆ‘ä¿æŠ¤æ˜¯æŒ‡åœ¨Linux内核中设计与实现的å„ç§ç³»ç»Ÿä¸Žç»“æž„
+ä»¥é˜²æ­¢å†…æ ¸æœ¬èº«çš„å®‰å…¨æ¼æ´žé—®é¢˜ã€‚它涵盖了广泛问题,包括去除
+æ•´ä¸ªç±»çš„æ¼æ´žï¼Œé˜»æ­¢å®‰å…¨æ¼æ´žåˆ©ç”¨æ–¹æ³•,以åŠä¸»åŠ¨æ£€æµ‹æ”»å‡»å°
+è¯•ã€‚å¹¶éžæ‰€æœ‰çš„è¯é¢˜éƒ½åœ¨æœ¬æ–‡ä¸­æ¶‰åŠï¼Œä½†å®ƒåº”该为了解内核自我
+ä¿æŠ¤æä¾›ä¸€ä¸ªåˆç†çš„起点,并解答常è§çš„问题。(当然,欢迎æ
+交补ä¸ï¼ï¼‰
+
+在最å的情况下,我们å‡è®¾ä¸€ä¸ªéžç‰¹æƒçš„æœ¬åœ°æ”»å‡»è€…对内核内存
+有任æ„读写访问æƒé™ã€‚è™½ç„¶åœ¨è®¸å¤šæƒ…å†µä¸‹ï¼Œæ¼æ´žè¢«åˆ©ç”¨æ—¶å¹¶ä¸ä¼š
+æä¾›æ­¤çº§åˆ«çš„访问æƒé™ï¼Œä½†å¦‚æžœæˆ‘ä»¬èƒ½é˜²å¾¡æœ€åæƒ…况,也能应对
+æƒé™è¾ƒä½Žçš„æ”»å‡»ã€‚一个更高的标准,且需è¦ç‰¢è®°çš„æ˜¯ä¿æŠ¤å†…æ ¸å…
+å—具有特æƒçš„æœ¬åœ°æ”»å‡»è€…的攻击,因为root用户å¯ä»¥æœ‰æ›´å¤šæƒé™ã€‚
+(尤其是当他们能够加载任æ„å†…æ ¸æ¨¡å—æ—¶ï¼‰
+
+æˆåŠŸçš„è‡ªæˆ‘ä¿æŠ¤çš„ç›®æ ‡æ˜¯ï¼šæœ‰æ•ˆã€é»˜è®¤å¼€å¯ã€ä¸éœ€è¦å¼€å‘者主动
+é€‰æ‹©ã€æ²¡æœ‰æ€§èƒ½å½±å“ã€ä¸å¦¨ç¢å†…核调试ã€å¹¶ä¸”没有测试。虽然很
+难满足所有的这些目标,但明确æåˆ°è¿™äº›ç›®æ ‡éžå¸¸é‡è¦ï¼Œå› ä¸ºè¿™
+些方é¢éœ€è¦è¢«æŽ¢ç´¢ã€è§£å†³æˆ–接å—。
+
+==========
+攻击é¢ç¼©å‡
+==========
+
+é˜²æ­¢å®‰å…¨æ¼æ´žæœ€åŸºæœ¬çš„é˜²å¾¡æ–¹å¼æ˜¯å‡å°‘å¯ä»¥è¢«ç”¨æ¥é‡å®šå‘执行的
+内核区域。这包括é™åˆ¶ç”¨æˆ·å…¬å¼€ä½¿ç”¨çš„APIã€ä½¿å†…æ ¸API更难被错
+è¯¯ä½¿ç”¨ã€æœ€å°åŒ–å¯å†™å†…核内存区域等。
+
+严格的内核内存æƒé™
+-------------------
+
+当所有内核内存都是å¯å†™çš„,攻击者å¯ä»¥è½»æ¾åœ°é‡å®šå‘执行æµã€‚
+为了å‡å°‘è¿™ç§æ”»å‡»ç›®æ ‡çš„å¯ç”¨æ€§ï¼Œå†…æ ¸éœ€è¦æ›´ä¸¥æ ¼çš„æƒé™é›†æ¥
+ä¿æŠ¤å…¶å†…å­˜ã€‚
+
+坿‰§è¡Œä»£ç å’Œåªè¯»æ•°æ®å¿…é¡»ä¸å¯å†™
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ä»»ä½•å…·æœ‰å¯æ‰§è¡Œå†…存的区域必须ä¸å¯å†™ï¼Œæ˜¾ç„¶è¿™ä¹ŸåŒ…括内核文本
+本身。我们还必须考虑其他地方:内核模å—ã€JIT内存等,(在
+æŸäº›æƒ…况下,为了支æŒåƒæŒ‡ä»¤æ›¿ä»£ã€æ–­ç‚¹ã€kprobes等功能,这些
+区域会暂时被设置为å¯å†™ã€‚如果这些功能必须存在于内核中,它
+ä»¬çš„å®žçŽ°æ–¹å¼æ˜¯ï¼šåœ¨æ›´æ–°æœŸé—´å°†å†…存临时设置å¯å†™ï¼Œç„¶åŽå†æ¢å¤
+为原始æƒé™ã€‚)
+
+为了支æŒè¿™ä¸€ç‚¹ï¼ŒCONFIG_STRICT_KERNEL_RWX å’Œ
+CONFIG_STRICT_MODULE_RWX 的设计旨在确ä¿ä»£ç ä¸å¯å†™ï¼Œæ•°æ®ä¸
+坿‰§è¡Œï¼Œä»¥åŠåªè¯»æ•°æ®æ—¢ä¸å¯å†™ä¹Ÿä¸å¯æ‰§è¡Œã€‚
+
+大多数架构默认支æŒè¿™äº›é€‰é¡¹ï¼Œä¸”用户无法选择。对于一些åƒarm
+è¿™ç§å¸Œæœ›èƒ½å¤Ÿé€‰æ‹©è¿™äº›é€‰é¡¹çš„æž¶æž„,å¯ä»¥åœ¨æž¶æž„Kconfig中选择
+ARCH_OPTIONAL_KERNEL_RWX以å¯ç”¨Kconfigæç¤ºã€‚
+CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT决定在å¯ç”¨
+ARCH_OPTIONAL_KERNEL_RWX时的默认设置。
+
+å‡½æ•°æŒ‡é’ˆå’Œæ•æ„Ÿå˜é‡å¿…é¡»ä¸å¯å†™
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+内核内存中有大é‡çš„函数指针,这些指针被内核查找并用于继续执行
+(例如,æè¿°ç¬¦/å‘é‡è¡¨ã€æ–‡ä»¶/网络等æ“作结构等)。这些å˜é‡çš„æ•°
+é‡å¿…é¡»å‡å°‘到最低é™åº¦
+
+许多åƒè¿™æ ·çš„å˜é‡å¯ä»¥é€šè¿‡è®¾ç½®ä¸º"const"æ¥å®žçްåªè¯»ï¼Œä»Žè€Œä½¿å®ƒä»¬
+存放在内核的.rodata段而éž.data段,从而获得内核严格内存æƒé™çš„
+ä¿æŠ¤ã€‚
+
+对于在_init是仅åˆå§‹åŒ–一次的å˜é‡ï¼Œå¯ä»¥ä½¿ç”¨_ro_after_init属性
+进行标记。
+
+剩下的å˜é‡é€šå¸¸æ˜¯é‚£äº›æ›´æ–°é¢‘率较低的(例如GDT)。这些å˜é‡éœ€è¦å¦
+一个机制(类似于上述æåˆ°çš„å¯¹å†…æ ¸ä»£ç æ‰€åšçš„临时例外),以便在
+å…¶ä½™ç”Ÿå‘½å‘¨æœŸå†…ä¿æŒåªè¯»çжæ€ã€‚ï¼ˆä¾‹å¦‚ï¼Œåœ¨è¿›è¡Œæ›´æ–°æ—¶ï¼Œåªæœ‰æ‰§è¡Œ
+æ›´æ–°çš„CPU线程会被授予对内存的ä¸å¯ä¸­æ–­å†™å…¥è®¿é—®æƒé™ã€‚)
+
+将内核内存与用户空间内存分隔开
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+内核ç»å¯¹ä¸å¯ä»¥æ‰§è¡Œç”¨æˆ·ç©ºé—´å†…å­˜ï¼ŒåŒæ—¶ï¼Œå†…核也ä¸å¾—在没有明确预
+期的情况下访问用户内存空间。这些规则å¯ä»¥é€šè¿‡ä¸€äº›ç¡¬ä»¶é™åˆ¶æ¥æ”¯
+æŒï¼ˆå¦‚x86çš„SMEP/SMAP,ARMçš„PXN/PAN)或通过仿真(如ARM的内存
+域)æ¥å¼ºåˆ¶æ‰§è¡Œã€‚é€šè¿‡è¿™ç§æ–¹å¼é˜»æ­¢ç”¨æˆ·ç©ºé—´å†…存的访问,攻击者就
+无法将执行和数æ®è§£æžè½¬ç§»åˆ°æ˜“于控制的用户空间内存,从而迫使攻
+击完全在内核中进行。
+
+å‡å°‘对系统调用的访问
+--------------------
+
+对于64ä½ç³»ç»Ÿï¼Œä¸€ç§æ¶ˆé™¤è®¸å¤šç³»ç»Ÿè°ƒç”¨æœ€ç®€å•的方法是构建时ä¸å¯ç”¨
+CONFIG_CONPATã€‚ç„¶è€Œï¼Œè¿™ç§æƒ…况通常ä¸å¯è¡Œã€‚
+
+“seccompâ€ç³»ç»Ÿä¸ºç”¨æˆ·ç©ºé—´æä¾›äº†ä¸€ç§å¯é€‰åŠŸèƒ½ï¼Œæä¾›äº†ä¸€ç§å‡å°‘å¯ä¾›
+è¿è¡Œä¸­è¿›ç¨‹ä½¿ç”¨å†…核入å£ç‚¹æ•°é‡çš„æ–¹æ³•。这é™åˆ¶äº†å¯ä»¥è®¿é—®å†…核代ç 
+的范围,å¯èƒ½é™ä½Žäº†æŸä¸ªç‰¹å®šæ¼æ´žè¢«æ”»å‡»è€…利用的å¯èƒ½æ€§ã€‚
+
+ä¸€ä¸ªæ”¹è¿›çš„æ–¹å‘æ˜¯åˆ›å»ºæœ‰æ•ˆçš„æ–¹æ³•,仅å…许å—信任的进程访问例如兼
+容模å¼ã€ç”¨æˆ·å‘½å空间ã€BPF创建和性能分æžç­‰åŠŸèƒ½ã€‚è¿™å°†æŠŠå†…æ ¸å…¥å£
+点范围é™åˆ¶åœ¨é€šå¸¸å¯ä»¥è¢«éžç‰¹æƒç”¨æˆ·ç©ºé—´è¿›ç¨‹è®¿é—®çš„较常è§é›†åˆä¸­
+
+é™åˆ¶å¯¹å†…核模å—的访问
+--------------------
+
+内核ç»ä¸åº”å…许éžç‰¹æƒç”¨æˆ·åŠ è½½ç‰¹å®šçš„å†…æ ¸æ¨¡å—,因为这å¯èƒ½ä¸ºæ”»å‡»è€…
+æä¾›ä¸€ä¸ªæ„外扩展的å¯ç”¨æ”»å‡»é¢çš„æ–¹æ³•。(通过已预定义å­ç³»ç»ŸæŒ‰éœ€åŠ 
+载模å—,如MODULE_ALIAS_*,被认为是“预期的â€ï¼Œä½†å³ä¾¿å¦‚此,也应对
+这些情况给予更多的关注。)例如,通过éžç‰¹æƒçš„套接字API加载文件
+ç³»ç»Ÿæ¨¡å—æ˜¯æ²¡æœ‰æ„ä¹‰çš„ï¼šåªæœ‰rootç”¨æˆ·æˆ–ç‰©ç†æœ¬åœ°ç”¨æˆ·åº”è¯¥è§¦å‘æ–‡ä»¶ç³»
+统模å—的加载。(在æŸäº›æƒ…况下,这甚至å¯èƒ½å­˜åœ¨äº‰è®®ã€‚)
+
+为了防止特æƒç”¨æˆ·çš„æ”»å‡»ï¼Œç³»ç»Ÿå¯èƒ½éœ€è¦å®Œå…¨ç¦æ­¢æ¨¡å—加载(例如,通
+过å•体内核构建或modules_disabled sysctlï¼‰ï¼Œæˆ–è€…ä½¿ç”¨ç­¾åæ¨¡å—(例
+如,CONFIG_MODULE_SIG_FORCE或通过LoadPinä¿æŠ¤çš„dm-crypt),以防
+æ­¢root用户通过模å—加载器加载任æ„内核代ç ã€‚
+
+内存完整性
+----------
+
+å†…æ ¸ä¸­æœ‰è®¸å¤šå†…å­˜ç»“æž„åœ¨æ”»å‡»è¿‡ç¨‹ä¸­è¢«å®šæœŸæ³›æ»¥ç”¨ä»¥èŽ·å–æ‰§è¡ŒæŽ§åˆ¶ï¼Œè¿„今
+为止,最常è§çš„æ˜¯å †æ ˆç¼“å†²åŒºæº¢å‡ºï¼Œåœ¨è¿™ç§æ”»å‡»ä¸­ï¼Œå †æ ˆä¸Šå­˜å‚¨çš„返回地
+å€è¢«è¦†ç›–。除此之外,还有许多其他类型的攻击,防护措施也应è¿è€Œç”Ÿã€‚
+
+堆栈缓冲区溢出
+--------------
+
+ç»å…¸çš„堆栈缓冲区溢出攻击是指超出栈上分é…çš„å˜é‡é¢„期大å°ï¼Œä»Žè€Œå°†ä¸€
+ä¸ªå—æŽ§å€¼å†™å…¥æ ˆå¸§çš„è¿”å›žåœ°å€ã€‚最常è§çš„é˜²å¾¡æŽªæ–½æ˜¯å †æ ˆä¿æŠ¤
+(CONFIG_STACKPROTECTOR),它在函数返回å‰ä¼šéªŒè¯æ ˆä¸Šçš„“stack canaryâ€ã€‚
+其他防御措施还包括影å­å †æ ˆç­‰ã€‚
+
+堆栈深度溢出
+------------
+
+一个ä¸å¤ªå®¹æ˜“被ç†è§£çš„æ”»å‡»æ–¹å¼æ˜¯åˆ©ç”¨bug触å‘内核通过深度函数调用或
+å¤§çš„å †æ ˆåˆ†é…æ¥æ¶ˆè€—å †æ ˆå†…å­˜ã€‚é€šè¿‡è¿™ç§æ”»å‡»ï¼Œæ”»å‡»è€…å¯ä»¥å°†æ•°æ®å†™å…¥å†…
+核预分é…å †æ ˆç©ºé—´ä¹‹å¤–çš„æ•æ„Ÿç»“æž„ã€‚ä¸ºäº†æ›´å¥½çš„é˜²æŠ¤è¿™ç§æ”»å‡»ï¼Œå¿…须进行
+两项é‡è¦çš„æ›´æ”¹ï¼šå°†æ•感的线程信æ¯ç»“构转移到其他地方,并在堆栈底部
+添加一个故障内存洞,以æ•获这些溢出
+
+栈内存完整性
+------------
+
+用于跟踪堆空闲列表的结构å¯ä»¥åœ¨åˆ†é…和释放时进行完整性检查,以确ä¿å®ƒ
+们ä¸ä¼šè¢«ç”¨æ¥æ“作其它内存区域。
+
+计算器完整性
+------------
+
+内核中的许多地方使用原å­è®¡æ•°å™¨æ¥è·Ÿè¸ªå¯¹è±¡å¼•用或执行类似的生命周期管
+ç†ã€‚当这些计数器å¯èƒ½å‘生溢出时(无论是上溢还是下溢),这通常会暴露
+出使用åŽé‡Šæ”¾ï¼ˆuse-after-freeï¼‰æ¼æ´žã€‚é€šè¿‡æ•æ‰åŽŸå­è®¡æ•°å™¨æº¢å‡ºï¼Œè¿™ç±»æ¼
+æ´žå°±å¯ä»¥æ¶ˆå¤±ã€‚
+
+大å°è®¡ç®—溢出检测
+----------------
+
+与计算器溢出类似,整数溢出(通常是大å°è®¡ç®—)需è¦åœ¨è¿è¡Œæ—¶è¿›è¡Œæ£€æµ‹ï¼Œ
+ä»¥é˜²æ­¢è¿™ç±»åœ¨ä¼ ç»Ÿä¸Šä¼šå¯¼è‡´èƒ½å¤Ÿå†™å…¥å†…æ ¸ç¼“å†²åŒºæœ«å°¾ä¹‹å¤–çš„æ¼æ´žã€‚
+
+概率性防御
+----------
+
+尽管许多防御措施å¯ä»¥è¢«è®¤å®šæ˜¯ç¡®å®šçš„(例如,åªè¯»å†…å­˜ä¸èƒ½å†™å…¥ï¼‰ï¼Œä½†
+æœ‰äº›ç¡®ä¿æŽªæ–½ä»…æä¾›ç»Ÿè®¡é˜²å¾¡ï¼Œå³æ”»å‡»è€…必须收集足够的关于è¿è¡Œç³»ç»Ÿçš„
+ä¿¡æ¯æ‰èƒ½çªç ´é˜²å¾¡ã€‚尽管这些防御并ä¸å®Œç¾Žï¼Œä½†å®ƒä»¬ç¡®å®žæä¾›äº†æœ‰æ„义的
+ä¿æŠ¤ã€‚
+
+æ ˆä¿æŠ¤ã€è¿·æƒ‘技术和其他秘密
+--------------------------
+
+值得注æ„的是,åƒä¹‹å‰è®¨è®ºçš„æ ˆä¿æŠ¤è¿™æ ·çš„æŠ€æœ¯ï¼Œä»ŽæŠ€æœ¯ä¸Šæ¥è¯´æ˜¯ç»Ÿè®¡æ€§é˜²
+御,因为它们ä¾èµ–于一个秘密值,而这样的值å¯èƒ½ä¼šé€šè¿‡ä¿¡æ¯æ³„éœ²æ¼æ´žè€Œè¢«
+å‘现。
+
+对于想JITï¼ˆåŠæ—¶ç¿»è¯‘å™¨ï¼‰è¿™æ ·çš„æƒ…å†µï¼Œå…¶ä¸­å¯æ‰§è¡Œå†…容å¯èƒ½éƒ¨åˆ†ç”±ç”¨æˆ·ç©ºé—´
+控制,也需è¦ç±»ä¼¼çš„秘密之æ¥è¿·æƒ‘。
+
+至关é‡è¦çš„æ˜¯ï¼Œæ‰€ä½¿ç”¨çš„秘密值必须是独立的(例如,æ¯ä¸ªæ ˆä½¿ç”¨ä¸åŒçš„æ ˆ
+ä¿æŠ¤å€¼ï¼‰ï¼Œå¹¶ä¸”å…·æœ‰é«˜ç†µï¼ˆä¾‹å¦‚ï¼Œéšæœºæ•°ç”Ÿæˆå™¨ï¼ˆRNGï¼‰æ˜¯å¦æ­£å¸¸å·¥ä½œï¼Ÿï¼‰ï¼Œ
+以最大é™åº¦åœ°æé«˜å…¶æˆåŠŸçŽ‡ã€‚
+
+内核地å€ç©ºé—´å¸ƒå±€éšæœºåŒ–ï¼ˆKASLR)
+-------------------------------
+
+由于内核内存的ä½ç½®å‡ ä¹Žæ€»æ˜¯æ”»å‡»æˆåŠŸçš„å…³é”®å› ç´ ï¼Œå› æ­¤ä½¿å†…æ ¸å†…å­˜ä½ç½®å˜
+å¾—éžç¡®å®šæ€§ä¼šå¢žåŠ æ”»å‡»çš„éš¾åº¦ã€‚ï¼ˆè¯·æ³¨æ„,这åè¿‡æ¥æé«˜äº†ä¿¡æ¯æ³„露的价
+值,因为泄露的信æ¯å¯ä»¥ç”¨æ¥å‘现目标内存ä½ç½®ã€‚)
+
+文本和模å—基å€
+--------------
+
+通过在å¯åŠ¨æ—¶é‡æ–°è®¾å®šå†…核的物ç†åŸºåœ°å€å’Œè™šæ‹ŸåŸºåœ°å€
+(CONFIG_RANDOMIZE_BASE),那些需è¦åˆ©ç”¨å†…核代ç çš„æ”»å‡»å°†ä¼šå—阻。此外
+通过å移模å—加载基地å€ï¼Œæ„味ç€å³ä½¿ç³»ç»Ÿæ¯æ¬¡å¯åŠ¨æ—¶æŒ‰ç›¸åŒé¡ºåºåŠ è½½åŒä¸€
+组模å—,这些模å—也ä¸ä¼šä¸Žå†…核文本的其余部分公用一个基地å€ã€‚
+
+堆栈基地å€
+----------
+
+如果进程之间内核堆栈的基地å€ä¸ç›¸åŒï¼Œç”šè‡³åœ¨ä¸åŒç³»ç»Ÿè°ƒç”¨ä¹‹é—´ä¹Ÿä¸ç›¸åŒï¼Œ
+那么栈上或超出栈的目标ä½ç½®å°±ä¼šå˜å¾—更加难以确定。
+
+动æ€å†…存基å€
+------------
+
+很多内核的动æ€å†…存(例如kmalloc,vmalloc等)由于早期å¯åЍåˆå§‹åŒ–的顺
+åºï¼Œæœ€ç»ˆå¸ƒå±€æ˜¯ç›¸å¯¹ç¡®å®šçš„。如果这些区域的基地å€åœ¨å¯åЍ之间ä¸ç›¸åŒï¼Œæ”»
+击者就无法轻易定ä½å®ƒä»¬ï¼Œå¿…é¡»ä¾èµ–äºŽé’ˆå¯¹è¯¥åŒºåŸŸçš„ä¿¡æ¯æ³„露æ‰èƒ½æˆåŠŸã€‚
+
+结构布局
+--------
+
+é€šè¿‡åœ¨æ¯æ¬¡æž„å»ºæ—¶å¯¹æ•æ„Ÿç»“æž„çš„å¸ƒå±€è¿›è¡ŒéšæœºåŒ–处ç†ï¼Œæ”»å‡»è¿™å¿…须将攻击调
+节到已知的内核版本,或者泄露足够的内核内存æ¥ç¡®å®šç»“æž„å¸ƒå±€ï¼Œç„¶åŽæ‰èƒ½
+对其进行æ“作。
+
+é˜²æ­¢ä¿¡æ¯æ³„露
+------------
+
+ç”±äºŽæ•æ„Ÿç»“构的ä½ç½®æ˜¯æ”»å‡»çš„主è¦ç›®æ ‡ï¼Œå› æ­¤é˜²æ­¢å†…核内存地å€å’Œå†…核内存
+内容泄露éžå¸¸é‡è¦ï¼ˆå› ä¸ºå®ƒä»¬å¯èƒ½åŒ…å«å†…æ ¸åœ°å€æˆ–è€…å…¶ä»–æ•æ„Ÿæ•°æ®ï¼Œä¾‹å¦‚
+æ ˆä¿æŠ¤å€¼ï¼‰ã€‚
+
+内核地å€
+--------
+
+å°†å†…æ ¸åœ°å€æ‰“å°åˆ°ç”¨æˆ·ç©ºé—´ä¼šæ³„éœ²æœ‰å…³å†…æ ¸å†…å­˜å¸ƒå±€çš„æ•æ„Ÿä¿¡æ¯ã€‚在使用任
+何打å°ç¬¦å·æ‰“å°åŽŸå§‹åœ°å€æ—¶ï¼Œç›®å‰%px,%p[ad](和在æŸäº›æƒ…况下的%p[sSb])
+时。使用这些格å¼ç¬¦å†™å…¥çš„æ–‡ä»¶éœ€è¦é™åˆ¶ä¸ºåªæœ‰ç‰¹æƒè¿›ç¨‹å¯è¯»ã€‚
+
+在4.14åŠä»¥å‰çš„内核版本中,使用%pæ ¼å¼ç¬¦æ‰“å°çš„æ˜¯åŽŸå§‹åœ°å€ã€‚从4.15-rcl
+版本开始,使用%pæ ¼å¼ç¬¦æ‰“å°çš„地å€ä¼šåœ¨æ‰“å°å‰è¿›è¡Œå“ˆå¸Œå¤„ç†ã€‚
+
+[*]如果å¯ç”¨KALLSYMSå¹¶ä¸”ç¬¦å·æŸ¥æ‰¾å¤±è´¥ï¼Œåˆ™æ‰“å°åŽŸå§‹åœ°å€ï¼›å¦‚果没有å¯ç”¨
+KALLSYSM,则会直接打å°åŽŸå§‹åœ°å€ã€‚
+
+唯一标识符
+----------
+
+内核内存地å€ç»ä¸å¯èƒ½ç”¨ä½œå‘用户空间公开的标识符。相å,应该使用原å­
+计数器,IDR(ID映射表)或类似的唯一标识符。
+
+内存åˆå§‹åŒ–
+----------
+
+å¤åˆ¶åˆ°ç”¨æˆ·ç©ºé—´çš„内存必须始终被完全åˆå§‹åŒ–,如果没有显å¼åœ°ä½¿ç”¨memset()
+函数进行åˆå§‹åŒ–,那就需è¦ä¿®æ”¹ç¼–è¯‘å™¨ï¼Œç¡®ä¿æ¸…除结构中的空洞。
+
+内存清除
+--------
+
+在释放内存时,最好对内存内容进行清除处ç†ï¼Œä»¥é˜²æ­¢æ”»å‡»è€…é‡ç”¨å†…存中以å‰
+的内容。例如,在系统调用返回时清除堆栈(CONFIG_GCC_PLUGIN_STACKLEAK),
+在释放堆内容是清除其内容。这有助于防止许多未åˆå§‹åŒ–å˜é‡æ”»å‡»ã€å †æ ˆå†…容
+泄露ã€å †å†…容泄露以åŠä½¿ç”¨åŽé‡Šæ”¾æ”»å‡»ï¼ˆuser-after-free)。
+
+目标追踪
+--------
+
+为了帮助消除导致内核地å€è¢«å†™å…¥ç”¨æˆ·ç©ºé—´çš„å„ç§é”™è¯¯ï¼Œéœ€è¦è·Ÿè¸ªå†™å…¥çš„目标。
+如果缓冲区的目标是用户空间(例如,基于seq_file的/proc文件),则应该自
+åŠ¨å®¡æŸ¥æ•æ„Ÿå€¼ã€‚
diff --git a/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst
new file mode 100644
index 000000000000..b51eeaebab67
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/snp-tdx-threat-model.rst
@@ -0,0 +1,209 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/snp-tdx-threat-model.rst
+
+:翻译:
+
+ 毛玉贤 Yuxian Mao <maoyuxian@cqsoftware.com.cn>
+
+==========================
+Linux中x86虚拟化的机密计算
+==========================
+
+.. contents:: :local:
+
+By: Elena Reshetova <elena.reshetova@intel.com> and Carlos Bilbao <carlos.bilbao.osdev@gmail.com>
+
+动机
+====
+
+在x86虚拟环境中从事机密计算工作的内核开å‘人员,是基于一组与传统Linux内核
+å¨èƒæ¨¡åž‹æœ‰æ‰€ä¸åŒçš„å‡è®¾æ¡ä»¶ä¸‹å¼€å±•工作的。传统æ„义上,Linuxå¨èƒæ¨¡åž‹æ‰¿è®¤æ”»
+击者å¯ä»¥å­˜åœ¨äºŽç”¨æˆ·ç©ºé—´ï¼Œä»¥åŠä¸€å°éƒ¨åˆ†èƒ½å¤Ÿé€šè¿‡å„ç§ç½‘ç»œæŽ¥å£æˆ–有é™çš„硬件特定
+暴露接å£ï¼ˆå¦‚USBã€Thunderbolt)与内核交互的外部攻击者。本文档的目的是解释
+在机密计算领域中出现的é¢å¤–攻击å‘é‡ï¼Œå¹¶è®¨è®ºä¸º Linux 内核æå‡ºçš„ä¿æŠ¤æœºåˆ¶ã€‚
+
+概述与术语
+==========
+
+机密计算(Confidential Computing,简称CoCo)是一个广泛的术语,涵盖了多ç§
+æ—¨åœ¨ä¿æŠ¤æ•°æ®åœ¨ä½¿ç”¨è¿‡ç¨‹ä¸­ï¼ˆä¸Žé™æ€æ•°æ®æˆ–传输数æ®ç›¸æ¯”)的机密性和完整性的安
+全技术。从本质上讲,机密计算(CoCo)解决方案æä¾›äº†ä¸€ä¸ªå—信任执行环境(TEE),
+在该环境中å¯ä»¥è¿›è¡Œå®‰å…¨çš„æ•°æ®å¤„ç†ï¼Œå› æ­¤ï¼Œå®ƒä»¬é€šå¸¸æ ¹æ®é¢„期在TEE中è¿è¡Œçš„软件
+æ¥è¿›ä¸€æ­¥åˆ’分为ä¸åŒçš„å­ç±»åž‹ã€‚本文档专注于一类针对虚拟化环境的机密计算技术
+(Confidential Computing, CoCo),这些技术å…许在å¯ä¿¡æ‰§è¡ŒçŽ¯å¢ƒ
+(Trusted Execution Environment, TEE)中è¿è¡Œè™šæ‹Ÿæœºï¼ˆVM)。从现在起,本文档
+将把这一类机密计算(CoCo)技术称为“虚拟化环境(VE)中的机密计算(CoCo)â€ã€‚
+
+在虚拟化环境中,机密计算(CoCo)指的是一组硬件和/或软件技术,这些技术能够
+为在CoCo虚拟机(VM)内è¿è¡Œçš„软件æä¾›æ›´å¼ºçš„安全ä¿éšœã€‚具体æ¥è¯´ï¼Œæœºå¯†è®¡ç®—å…许
+其用户确认所有软件组件的å¯ä¿¡åº¦ï¼Œä»Žè€Œå°†å…¶åŒ…å«åœ¨ç²¾ç®€çš„å—信任计算基(TCB)中,
+这是基于机密计算具备验è¯è¿™äº›å—信组件状æ€çš„能力。
+
+虽然ä¸åŒæŠ€æœ¯ä¹‹é—´çš„具体实现细节有所ä¸åŒï¼Œä½†æ‰€æœ‰çŽ°æœ‰æœºåˆ¶éƒ½æ—¨åœ¨ä¸ºè™šæ‹Ÿæœºçš„å®¢æˆ·
+内存和执行状æ€ï¼ˆvCPU寄存器)æä¾›æ›´é«˜çš„æœºå¯†æ€§å’Œå®Œæ•´æ€§ï¼Œæ›´ä¸¥æ ¼åœ°æŽ§åˆ¶å®¢æˆ·ä¸­æ–­
+注入,并æä¾›ä¸€äº›é¢å¤–æœºåˆ¶æ¥æŽ§åˆ¶å®¢æˆ·ä¸Žå®¿ä¸»æœºä¹‹é—´çš„é¡µæ˜ å°„ã€‚æœ‰å…³x86特定解决方案
+的更多细节,å¯ä»¥å‚考
+:doc:`Intel Trust Domain Extensions (TDX) </arch/x86/tdx>` 和
+`AMD Memory Encryption <https://www.amd.com/system/files/techdocs/sev-snp-strengthening-vm-isolation-with-integrity-protection-and-more.pdf>`_.
+
+基本的机密计算(CoCo)客户布局包括宿主机ã€å®¢æˆ·æœºã€ç”¨äºŽå®¢æˆ·æœºä¸Žå®¿ä¸»æœºä¹‹é—´é€šä¿¡
+的接å£ã€èƒ½å¤Ÿæ”¯æŒCoCo虚拟机(VM)的平å°ï¼Œä»¥åŠä¸€ä¸ªåœ¨å®¢æˆ·VM和底层平å°ä¹‹é—´å……当安
+全管ç†å‘˜çš„å¯ä¿¡ä¸­ä»‹ã€‚宿主机侧的虚拟机监视器(VMM)通常由传统VMM功能的一个å­é›†
+组æˆï¼Œå¹¶ä»ç„¶è´Ÿè´£å®¢æˆ·æœºç”Ÿå‘½å‘¨æœŸçš„管ç†ï¼Œå³åˆ›å»ºæˆ–销æ¯CoCo虚拟机ã€ç®¡ç†å…¶å¯¹ç³»ç»Ÿèµ„
+æºçš„访问等。然而,由于它通常ä¸åœ¨CoCo VMçš„å¯ä¿¡è®¡ç®—基(TCB)内,其访问æƒé™å—到
+é™åˆ¶ï¼Œä»¥ç¡®ä¿å®žçŽ°å®‰å…¨ç›®æ ‡ã€‚
+
+在下图中,"<--->" 线表示机密计算(CoCo)安全管ç†å‘˜ä¸Žå…¶ä½™ç»„件之间的åŒå‘通信通
+铿ˆ–接å£ï¼Œè¿™äº›ç»„件包括客户机ã€å®¿ä¸»æœºå’Œç¡¬ä»¶ï¼ˆæ•°æ®æµï¼‰::
+
+ +-------------------+ +-----------------------+
+ | CoCo guest VM |<---->| |
+ +-------------------+ | |
+ | Interfaces | | CoCo security manager |
+ +-------------------+ | |
+ | Host VMM |<---->| |
+ +-------------------+ | |
+ | |
+ +--------------------+ | |
+ | CoCo platform |<--->| |
+ +--------------------+ +-----------------------+
+
+机密计算(CoCo)安全管ç†å™¨çš„具体细节在在ä¸åŒæŠ€æœ¯ä¹‹é—´å­˜åœ¨æ˜¾è‘—差异。例如,在æŸ
+些情况下,它å¯èƒ½é€šè¿‡ç¡¬ä»¶ï¼ˆHW)实现,而在其他情况下,它å¯èƒ½æ˜¯çº¯è½¯ä»¶ï¼ˆSW)实现。
+
+现有的Linux内核å¨èƒæ¨¡åž‹
+=======================
+
+当å‰Linux内核å¨èƒæ¨¡åž‹çš„æ€»ä½“组件包括::
+
+ +-----------------------+ +-------------------+
+ | |<---->| Userspace |
+ | | +-------------------+
+ | External attack | | Interfaces |
+ | vectors | +-------------------+
+ | |<---->| Linux Kernel |
+ | | +-------------------+
+ +-----------------------+ +-------------------+
+ | Bootloader/BIOS |
+ +-------------------+
+ +-------------------+
+ | HW platform |
+ +-------------------+
+
+在å¯åŠ¨è¿‡ç¨‹ä¸­ï¼Œå¼•å¯¼åŠ è½½ç¨‹åºï¼ˆbootloader)和内核之间也存在通信,但本图并未明确
+表示这一点。“接å£â€æ¡†è¡¨ç¤ºå…许内核与用户空间之间通信的å„ç§æŽ¥å£ã€‚ 这包括系统调用ã€
+内核 APIã€è®¾å¤‡é©±åŠ¨ç¨‹åºç­‰ã€‚
+
+现有的 Linux 内核å¨èƒæ¨¡åž‹é€šå¸¸å‡è®¾å…¶åœ¨ä¸€ä¸ªå—信任的硬件平å°ä¸Šæ‰§è¡Œï¼Œå¹¶ä¸”所有固件
+å’Œå¯åŠ¨åŠ è½½ç¨‹åºéƒ½åŒ…å«åœ¨è¯¥å¹³å°çš„å—信任计算基(TCBï¼‰ä¸­ã€‚ä¸»è¦æ”»å‡»è€…驻留在用户空间
+中,æ¥è‡ªç”¨æˆ·ç©ºé—´çš„æ‰€æœ‰æ•°æ®é€šå¸¸è¢«è®¤ä¸ºæ˜¯ä¸å¯ä¿¡çš„,除éžç”¨æˆ·ç©ºé—´å…·æœ‰è¶³å¤Ÿçš„ç‰¹æƒæ¥
+执行å—信任的æ“作。此外,通常还会考虑外部攻击者,包括那些能够访问å¯ç”¨çš„外部网络
+ï¼ˆä¾‹å¦‚ä»¥å¤ªç½‘ã€æ— çº¿ç½‘络ã€è“ç‰™ï¼‰ã€æš´éœ²çš„硬件接å£ï¼ˆä¾‹å¦‚ USBã€Thunderbolt),以åŠ
+能够离线修改ç£ç›˜å†…容的攻击者。
+
+关于外部攻击途径,值得注æ„的是,在大多数情况下,外部攻击者会首先å°è¯•利用用户空
+é—´çš„æ¼æ´žï¼Œä½†æ”»å‡»è€…也å¯èƒ½ç›´æŽ¥é’ˆå¯¹å†…核,特别是在宿主机具有物ç†è®¿é—®æƒé™çš„æƒ…况下。直
+接攻击内核的例å­åŒ…æ‹¬æ¼æ´ž CVE-2019-19524ã€CVE-2022-0435 å’Œ CVE-2020-24490。
+
+机密计算å¨èƒæ¨¡åž‹åŠå…¶å®‰å…¨ç›®æ ‡
+============================
+
+æœºå¯†è®¡ç®—åœ¨ä¸Šè¿°æ”»å‡»è€…åˆ—è¡¨ä¸­å¢žåŠ äº†ä¸€ç§æ–°çš„æ”»å‡»è€…类型:å¯èƒ½å­˜åœ¨è¡Œä¸ºä¸å½“的宿主机
+(这å¯èƒ½åŒ…括传统虚拟机监视器VMM的部分组件或全部),由于其较大的软件攻击é¢ï¼Œ
+通常被置于CoCo VM TCBä¹‹å¤–ã€‚éœ€è¦æ³¨æ„çš„æ˜¯ï¼Œè¿™å¹¶ä¸æ„味ç€å®¿ä¸»æœºæˆ–VMMæ˜¯æ•…æ„æ¶æ„的,
+而是强调拥有一个较å°çš„CoCo VM TCBå…·æœ‰å®‰å…¨ä»·å€¼ã€‚è¿™ç§æ–°åž‹çš„æ”»å‡»è€…å¯ä»¥è¢«è§†ä¸ºä¸€ç§
+更强大的外部攻击者,因为它ä½äºŽåŒä¸€ç‰©ç†æœºå™¨ä¸Šï¼ˆä¸Žè¿œç¨‹ç½‘络攻击者ä¸åŒï¼‰ï¼Œå¹¶ä¸”对
+客户机内核与大部分硬件的通信具有控制æƒ::
+
+ +------------------------+
+ | CoCo guest VM |
+ +-----------------------+ | +-------------------+ |
+ | |<--->| | Userspace | |
+ | | | +-------------------+ |
+ | External attack | | | Interfaces | |
+ | vectors | | +-------------------+ |
+ | |<--->| | Linux Kernel | |
+ | | | +-------------------+ |
+ +-----------------------+ | +-------------------+ |
+ | | Bootloader/BIOS | |
+ +-----------------------+ | +-------------------+ |
+ | |<--->+------------------------+
+ | | | Interfaces |
+ | | +------------------------+
+ | CoCo security |<--->| Host/Host-side VMM |
+ | manager | +------------------------+
+ | | +------------------------+
+ | |<--->| CoCo platform |
+ +-----------------------+ +------------------------+
+
+ä¼ ç»Ÿä¸Šï¼Œå®¿ä¸»æœºå¯¹å®¢æˆ·æœºæ•°æ®æ‹¥æœ‰æ— é™è®¿é—®æƒé™ï¼Œå¹¶å¯ä»¥åˆ©ç”¨è¿™ç§è®¿é—®æƒé™æ¥æ”»å‡»å®¢æˆ·è™š
+拟机。然而,机密计算(CoCo)系统通过添加诸如客户数æ®ä¿å¯†æ€§å’Œå®Œæ•´æ€§ä¿æŠ¤ç­‰å®‰å…¨
+特性æ¥ç¼“解此类攻击。该å¨èƒæ¨¡åž‹å‡è®¾è¿™äº›å®‰å…¨ç‰¹æ€§æ˜¯å¯ç”¨ä¸”完好的。
+
+这个 **Linux内核机密计算虚拟机(CoCo VM)的安全目标** å¯ä»¥æ€»ç»“如下:
+
+1. ä¿æŠ¤CoCoå®¢æˆ·æœºç§æœ‰å†…存和寄存器的机密性和完整性。
+
+2. 防止宿主机特æƒå‡çº§åˆ°CoCo客户机Linux内核。虽然宿主机(åŠä¸»æœºç«¯è™šæ‹Ÿæœºç®¡ç†ç¨‹åºï¼‰
+ 确实需è¦ä¸€å®šçš„ç‰¹æƒæ¥åˆ›å»ºã€é”€æ¯æˆ–æš‚åœè®¿å®¢ï¼Œä½†é˜²æ­¢ç‰¹æƒå‡çº§çš„部分目标是确ä¿è¿™äº›
+ æ“作ä¸ä¼šä¸ºæ”»å‡»è€…æä¾›èŽ·å–客户机内核访问æƒé™çš„途径。
+
+上述安全目标导致了两个主è¦çš„**Linux内核机密计算虚拟机(CoCo VM)资产**:
+
+1. 客户机内核执行上下文。
+2. å®¢æˆ·æœºå†…æ ¸ç§æœ‰å†…存。
+
+宿主机对CoCo客户机资æºå…·æœ‰å®Œå…¨æŽ§åˆ¶æƒï¼Œå¹¶å¯ä»¥éšæ—¶æ‹’ç»è®¿é—®è¿™äº›èµ„æºã€‚资æºçš„示例包
+括CPUæ—¶é—´ã€å®¢æˆ·æœºå¯ä»¥æ¶ˆè€—的内存ã€ç½‘络带宽等。因此,宿主机对CoCoå®¢æˆ·æœºçš„æ‹’ç»æœåŠ¡
+(DoS)攻击超出了此å¨èƒæ¨¡åž‹çš„范围。
+
+Linux CoCoè™šæ‹Ÿæœºæ”»å‡»é¢æ˜¯æŒ‡ä»ŽCoCo客户机Linux内核暴露到ä¸å—信任的主机的任何接å£ï¼Œ
+è¿™äº›æŽ¥å£æœªè¢«CoCoæŠ€æœ¯çš„è½¯ç¡¬ä»¶ä¿æŠ¤æ‰€è¦†ç›–ã€‚è¿™åŒ…æ‹¬æ‰€æœ‰å¯èƒ½çš„ä¾§ä¿¡é“æ”»å‡»ä»¥åŠçž¬æ€æ‰§
+è¡Œä¾§ä¿¡é“æ”»å‡»ã€‚显å¼ï¼ˆéžæ—é“)接å£çš„示例包括访问端å£I/Oã€å†…存映射I/O(MMIO)和
+直接内存访问(DMA)接å£ã€è®¿é—®PCIé…置空间ã€ç‰¹å®šäºŽè™šæ‹Ÿæœºç®¡ç†ç¨‹åºï¼ˆVMM)的超调用
+(指å‘主机端VMM)ã€è®¿é—®å…±äº«å†…存页ã€ä¸»æœºå…许注入到访客内核的中断,以åŠç‰¹å®šäºŽ
+CoCo技术的超调用(如果存在)。此外,在CoCo系统中,宿主机通常控制创建CoCo客户机
+的过程:它有方法将固件和引导程åºé•œåƒã€å†…核镜åƒä»¥åŠå†…核命令行加载到客户机中。所有
+这些数æ®åœ¨é€šè¿‡è¯æ˜Žæœºåˆ¶ç¡®è®¤å…¶å®Œæ•´æ€§å’ŒçœŸå®žæ€§ä¹‹å‰ï¼Œéƒ½åº”视为ä¸å¯ä¿¡çš„。
+
+下表显示了针对CoCo客户机Linux内核的å¨èƒçŸ©é˜µï¼Œä½†å¹¶æœªè®¨è®ºæ½œåœ¨çš„缓解策略。该矩阵涉
+åŠçš„æ˜¯CoCo特定版本的客户机ã€å®¿ä¸»æœºå’Œå¹³å°ã€‚
+
+.. list-table:: CoCo Linux客户机内核å¨èƒçŸ©é˜µ
+ :widths: auto
+ :align: center
+ :header-rows: 1
+
+ * - å¨èƒåç§°
+ - å¨èƒæè¿°
+
+ * - å®¢æˆ·æœºæ¶æ„é…ç½®
+ - 一个行为ä¸å½“的主机修改了以下其中一个客户机的é…置:
+
+ 1. 客户机固件或引导加载程åº
+
+ 2. 客户机内核或模å—二进制文件
+
+ 3. å®¢æˆ·æœºå‘½ä»¤è¡Œå‚æ•°
+
+ 这使得宿主机能够破å在CoCo客户虚拟机内部è¿è¡Œä»£ç çš„完整性,从而è¿å了机密计算
+ (CoCo)的安全目标。
+
+ * - CoCoå®¢æˆ·æœºæ•°æ®æ”»å‡»
+ - 一个行为ä¸å½“的宿主机对CoCo客户虚拟机与宿主机管ç†çš„ç‰©ç†æˆ–虚拟设备之间传输的数
+ æ®æ‹¥æœ‰å®Œå…¨æŽ§åˆ¶æƒã€‚这使得宿主机å¯ä»¥å¯¹è¿™ç±»æ•°æ®çš„ä¿å¯†æ€§ã€å®Œæ•´æ€§å’Œæ–°é²œæ€§è¿›è¡Œä»»ä½•攻击。
+
+ * - æ ¼å¼é”™è¯¯çš„è¿è¡Œæ—¶è¾“å…¥
+ - 一个行为ä¸å½“的宿主机通过客户机内核代ç ä½¿ç”¨çš„ä»»æ„é€šä¿¡æŽ¥å£æ³¨å…¥æ ¼å¼é”™è¯¯çš„输入。
+ å¦‚æžœä»£ç æ²¡æœ‰æ­£ç¡®å¤„ç†è¿™äº›è¾“入,这å¯èƒ½å¯¼è‡´ä»Žå®¿ä¸»æœºåˆ°å®¢æˆ·æœºå†…æ ¸çš„ç‰¹æƒæå‡ã€‚这包
+ æ‹¬ä¼ ç»Ÿçš„ä¾§ä¿¡é“æ”»å‡»å’Œ/æˆ–çž¬æ€æ‰§è¡Œæ”»å‡»è·¯å¾„。
+
+ * - æ¶æ„è¿è¡Œæ—¶è¾“å…¥
+ - 一个行为ä¸å½“的宿主机通过客户机内核代ç ä½¿ç”¨çš„ä»»æ„é€šä¿¡æŽ¥å£æ³¨å…¥ç‰¹å®šçš„输入值。与之å‰
+ 的攻击å‘é‡ï¼ˆæ ¼å¼é”™è¯¯çš„è¿è¡Œæ—¶è¾“入)ä¸åŒï¼Œè¿™ä¸ªè¾“å…¥å¹¶éžæ ¼å¼é”™è¯¯ï¼Œè€Œæ˜¯å…¶å€¼è¢«ç²¾å¿ƒè®¾
+ 计以影å“客户机内核的安全性。这类输入的例å­åŒ…括å‘客户机æä¾›æ¶æ„的时间或å‘客户机
+ çš„éšæœºæ•°ç”Ÿæˆå™¨æä¾›ç†µå€¼ã€‚此外,如果它导致客户机内核执行特定æ“作(例如处ç†ä¸»æœºæ³¨
+ å…¥çš„ä¸­æ–­ï¼‰ï¼Œæ­¤ç±»äº‹ä»¶çš„æ—¶åºæœ¬èº«ä¹Ÿå¯èƒ½æˆä¸ºä¸€ç§æ”»å‡»è·¯å¾„ã€‚è¿™ç§æ”»å‡»æ˜¯å¯¹æä¾›çš„宿主机输
+ å…¥å…·æœ‰æŠµæŠ—æ€§çš„ä¸€ç§æ–¹å¼ã€‚
diff --git a/Documentation/translations/zh_CN/security/tpm/index.rst b/Documentation/translations/zh_CN/security/tpm/index.rst
new file mode 100644
index 000000000000..707646590647
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/index.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/index.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+================
+å¯ä¿¡å¹³å°æ¨¡å—文档
+================
+
+.. toctree::
+
+ tpm_event_log
+ tpm-security
+ tpm_tis
+ tpm_vtpm_proxy
+ xen-tpmfront
+ tpm_ftpm_tee
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm-security.rst b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst
new file mode 100644
index 000000000000..26818d28c98f
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm-security.rst
@@ -0,0 +1,151 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm-security.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+TPM安全
+=======
+
+本文档的目的是æè¿°æˆ‘们如何使内核使用TPM在é¢å¯¹å¤–部窥探和数æ®åŒ…篡改
+æ”»å‡»ï¼ˆæ–‡çŒ®ä¸­ç§°ä¸ºè¢«åŠ¨å’Œä¸»åŠ¨ä¸­é—´äººæ”»å‡»ï¼‰æ—¶ä¿æŒåˆç†çš„ç¨³å¥æ€§ã€‚当å‰çš„
+安全文档适用于TPM2.0。
+
+介ç»
+----
+
+TPM通常是一个通过æŸç§ä½Žå¸¦å®½æ€»çº¿è¿žæŽ¥åˆ°PC的独立芯片。虽然有一些
+例外,例如Intel PTT,它是è¿è¡Œåœ¨é è¿‘CPU的软件环境中的软件TPM,
+容易å—到ä¸åŒç±»åž‹çš„æ”»å‡»ï¼Œä½†ç›®å‰å¤§å¤šæ•°å¼ºåŒ–çš„å®‰å…¨çŽ¯å¢ƒè¦æ±‚使用独立
+硬件TPM,这是本文讨论的使用场景。
+
+总线上的窥探和篡改攻击
+----------------------
+
+当å‰çš„æŠ€æœ¯çжæ€å…许使用 `TPM Genie`_ 硬件中间人,这是一ç§ç®€å•的外部设备,å¯ä»¥åœ¨
+任何系统或笔记本电脑上几秒钟内安装。最近æˆåŠŸæ¼”ç¤ºäº†é’ˆå¯¹ `Windows Bitlocker TPM`_
+ç³»ç»Ÿçš„æ”»å‡»ã€‚æœ€è¿‘åŒæ ·çš„æ”»å‡»é’ˆå¯¹ `基于TPMçš„Linuxç£ç›˜åР坆`_ 方案也é­åˆ°äº†åŒæ ·çš„æ”»å‡»ã€‚
+下一阶段的研究似乎是入侵总线上现有的设备以充当中间人,因此攻击者需è¦ç‰©ç†è®¿é—®å‡ 
+ç§’é’Ÿçš„è¦æ±‚å¯èƒ½ä¸å†å­˜åœ¨ã€‚然而,本文档的目标是尽å¯èƒ½åœ¨è¿™ç§çŽ¯å¢ƒä¸‹ä¿æŠ¤TPM的机密性和
+完整性,并å°è¯•ç¡®ä¿å³ä½¿æˆ‘们ä¸èƒ½é˜²æ­¢æ”»å‡»ï¼Œè‡³å°‘å¯ä»¥æ£€æµ‹åˆ°å®ƒã€‚
+
+ä¸å¹¸çš„æ˜¯ï¼Œå¤§å¤šæ•°TPM功能,包括硬件é‡ç½®åŠŸèƒ½ï¼Œéƒ½èƒ½è¢«èƒ½å¤Ÿè®¿é—®æ€»çº¿çš„æ”»å‡»
+è€…æŽ§åˆ¶ï¼Œå› æ­¤ä¸‹é¢æˆ‘们将讨论一些å¯èƒ½å‡ºçŽ°çš„å¹²æ‰°æƒ…å†µã€‚
+
+测é‡ï¼ˆPCR)完整性
+-----------------
+
+由于攻击者å¯ä»¥å‘TPMå‘é€è‡ªå·±çš„命令,他们å¯ä»¥å‘é€ä»»æ„çš„PCR扩展,从而破
+åæµ‹é‡ç³»ç»Ÿï¼Œè¿™å°†æ˜¯ä¸€ç§çƒ¦äººçš„æ‹’ç»æœåŠ¡æ”»å‡»ã€‚ç„¶è€Œï¼Œé’ˆå¯¹å¯†å°åˆ°ä¿¡ä»»æµ‹é‡ä¸­
+的实体,有两类更严é‡çš„æ”»å‡»ã€‚
+
+1. 攻击者å¯ä»¥æ‹¦æˆªæ¥è‡ªç³»ç»Ÿçš„æ‰€æœ‰PCR扩展,并完全替æ¢ä¸ºè‡ªå·±çš„值,产生
+ 一个未篡改状æ€çš„é‡çŽ°ï¼Œè¿™ä¼šä½¿PCR测é‡è¯æ˜ŽçŠ¶æ€æ˜¯å¯ä¿¡çš„,并释放密钥。
+
+2. 攻击者å¯èƒ½ä¼šåœ¨æŸä¸ªæ—¶åˆ»é‡ç½®TPM,清除PCR,然åŽå‘é€è‡ªå·±çš„æµ‹é‡ï¼Œä»Žè€Œ
+ 有效地覆盖TPMå·²ç»å®Œæˆçš„å¯åŠ¨æ—¶é—´æµ‹é‡ã€‚
+
+ç¬¬ä¸€ç§æ”»å‡»å¯ä»¥é€šè¿‡å§‹ç»ˆå¯¹PCR扩展和读å–命令进行HMACä¿æŠ¤æ¥é˜²æ­¢ï¼Œè¿™æ„味ç€
+如果没有在å“åº”ä¸­äº§ç”Ÿå¯æ£€æµ‹çš„HMAC失败,则测é‡å€¼æ— æ³•被替æ¢ã€‚然而第二ç§
+攻击åªèƒ½é€šè¿‡ä¾èµ–æŸç§æœºåˆ¶æ¥æ£€æµ‹ï¼Œè¿™ç§æœºåˆ¶ä¼šåœ¨TPMé‡ç½®åŽå‘生å˜åŒ–。
+
+ç§˜å¯†ä¿æŠ¤
+--------
+
+æŸäº›è¿›å‡ºTPM的信æ¯,如密钥密å°ã€ç§é’¥å¯¼å…¥å’Œéšæœºæ•°ç”Ÿæˆå®¹æ˜“被拦截,而仅仅
+使用HMACä¿æŠ¤æ— æ³•é˜²æ­¢è¿™ç§æƒ…况。因此,对于这些类型的命令,我们必须使用
+请求和å“应加密æ¥é˜²æ­¢ç§˜å¯†ä¿¡æ¯çš„æ³„露。
+
+与TPM建立åˆå§‹ä¿¡ä»»
+-----------------
+
+为了从一开始就æä¾›å®‰å…¨æ€§ï¼Œå¿…须建立一个åˆå§‹çš„共享或éžå¯¹ç§°ç§˜å¯†ï¼Œå¹¶ä¸”该
+秘钥必须对攻击者ä¸å¯çŸ¥ã€‚最明显的途径是使用背书和存储ç§å­ï¼Œè¿™äº›å¯ä»¥ç”¨
+æ¥æ´¾ç”Ÿéžå¯¹ç§°å¯†é’¥ã€‚然而,使用这些密钥很困难,因为将它们传递给内核的唯
+一方法是通过命令行,这需è¦åœ¨å¯åŠ¨ç³»ç»Ÿä¸­è¿›è¡Œå¹¿æ³›çš„æ”¯æŒï¼Œè€Œä¸”无法ä¿è¯ä»»
+何一个层次ä¸ä¼šæœ‰ä»»ä½•å½¢å¼çš„æŽˆæƒã€‚
+
+Linux内核选择的机制是从空ç§å­ä½¿ç”¨æ ‡å‡†çš„存储ç§å­å‚数派生出主椭圆曲线
+密钥。空ç§å­æœ‰ä¸¤ä¸ªä¼˜åŠ¿ï¼šé¦–å…ˆè¯¥å±‚æ¬¡ç‰©ç†ä¸Šæ— æ³•具有授æƒï¼Œå› æ­¤æˆ‘们始终å¯
+以使用它;其次空ç§å­åœ¨TPMé‡ç½®æ—¶ä¼šå‘生å˜åŒ–,这æ„味ç€å¦‚果我们在当天开始
+时基于空ç§å­å»ºç«‹ä¿¡ä»»ï¼Œå¦‚æžœTPMé‡ç½®ä¸”ç§å­å˜åŒ–,则所有派生的密钥进行加ç›
+处ç†çš„会è¯éƒ½å°†å¤±è´¥ã€‚
+
+显然,在没有任何其他共享秘密的情况下使用空ç§å­ï¼Œæˆ‘们必须创建并读å–åˆå§‹
+公钥,这当然å¯èƒ½ä¼šè¢«æ€»çº¿ä¸­é—´äººæ‹¦æˆªå¹¶æ›¿æ¢ã€‚然而,TPMæœ‰ä¸€ä¸ªå¯†é’¥è®¤è¯æœºåˆ¶
+(使用EK背书è¯ä¹¦ï¼Œåˆ›å»ºè®¤è¯èº«ä»½å¯†é’¥ï¼Œå¹¶ç”¨è¯¥å¯†é’¥è®¤è¯ç©ºç§å­ä¸»å¯†é’¥ï¼‰ï¼Œä½†ç”±
+äºŽå®ƒè¿‡äºŽå¤æ‚,无法在内核中è¿è¡Œï¼Œå› æ­¤æˆ‘们ä¿ç•™ç©ºä¸»å¯†é’¥å称的副本,通过
+sysfs导出,以便用户空间在å¯åŠ¨æ—¶è¿›è¡Œå®Œæ•´çš„è®¤è¯ã€‚这里的明确ä¿è¯æ˜¯ï¼Œå¦‚果空
+ä¸»å¯†é’¥è®¤è¯æˆåŠŸï¼Œé‚£ä¹ˆä»Žå½“å¤©å¼€å§‹çš„æ‰€æœ‰TPM交易都是安全的;如果认è¯å¤±è´¥ï¼Œåˆ™
+说明系统上有中间人(并且任何在å¯åŠ¨æœŸé—´ä½¿ç”¨çš„ç§˜å¯†å¯èƒ½å·²è¢«æ³„露)。
+
+信任堆å 
+--------
+
+在当å‰çš„空主密钥场景中,TPM必须在交给下一个使用者之å‰å®Œå…¨æ¸…除。然而,
+内核将派生出的空ç§å­å¯†é’¥çš„å称传递给用户空间,用户空间å¯ä»¥é€šè¿‡è®¤è¯æ¥
+验è¯è¯¥å称。因此,这ç§å称传递链也å¯ä»¥ç”¨äºŽå„个å¯åŠ¨ç»„ä»¶ä¹‹é—´ï¼ˆé€šè¿‡æœªæŒ‡
+定的机制)。例如grubå¯ä»¥ä½¿ç”¨ç©ºç§å­æ–¹æ¡ˆæ¥å®žçŽ°å®‰å…¨æ€§ï¼Œå¹¶å°†å称交给内核。
+内核å¯ä»¥æ´¾ç”Ÿå‡ºå¯†é’¥å’Œå称,并确定如果它们与交接的版本ä¸åŒï¼Œåˆ™è¡¨ç¤ºå‘生
+了篡改。因此å¯ä»¥é€šè¿‡å称传递将任æ„å¯åŠ¨ç»„ä»¶ï¼ˆä»ŽUEFI到grub到内核)串è”
+èµ·æ¥ï¼Œåªè¦æ¯ä¸ªåŽç»­ç»„件知é“如何收集该åç§°,å¹¶æ ¹æ®å…¶æ´¾ç”Ÿçš„密钥进行验è¯ã€‚
+
+会è¯å±žæ€§
+--------
+
+所有内核使用的TPM命令都å…许会è¯ã€‚HMAC会è¯å¯ç”¨äºŽæ£€æŸ¥è¯·æ±‚å’Œå“应的完整性,
+而解密和加密标志å¯ç”¨äºŽä¿æŠ¤å‚æ•°å’Œå“应。HMAC和加密密钥通常是从共享授æƒç§˜
+钥推导出æ¥çš„,但对于许多内核æ“作æ¥è¯´ï¼Œè¿™äº›å¯†é’¥æ˜¯å·²çŸ¥çš„(通常为空)。因
+此内核使用空主密钥作为ç›å¯†é’¥æ¥åˆ›å»ºæ¯ä¸ªHMAC会è¯ï¼Œè¿™æ ·å°±ä¸ºä¼šè¯å¯†é’¥çš„æ´¾ç”Ÿ
+æä¾›äº†åŠ å¯†è¾“å…¥ã€‚å› æ­¤å†…æ ¸ä»…éœ€åˆ›å»ºä¸€æ¬¡ç©ºä¸»å¯†é’¥ï¼ˆä½œä¸ºä¸€ä¸ªæ˜“å¤±çš„TPM奿Ÿ„),
+并将其ä¿å­˜åœ¨tpm_chipä¸­ï¼Œç”¨äºŽæ¯æ¬¡åœ¨å†…核中使用TPM时。由于内核资æºç®¡ç†å™¨ç¼º
+ä¹åŽ»é—´éš™åŒ–ï¼Œå½“å‰æ¯æ¬¡æ“作都需è¦åˆ›å»ºå’Œé”€æ¯ä¼šè¯ï¼Œä½†æœªæ¥å¯èƒ½ä¼šå°†å•个会è¯é‡ç”¨
+用于内核中的HMACã€åŠ å¯†å’Œè§£å¯†ä¼šè¯ã€‚
+
+ä¿æŠ¤ç±»åž‹
+--------
+
+对于æ¯ä¸ªå†…æ ¸æ“作,我们使用空主密钥加ç›çš„HMACæ¥ä¿æŠ¤å®Œæ•´æ€§ã€‚æ­¤å¤–æˆ‘ä»¬ä½¿ç”¨å‚æ•°
+加密æ¥ä¿æŠ¤å¯†é’¥å¯†å°ï¼Œå¹¶ä½¿ç”¨å‚数解密æ¥ä¿æŠ¤å¯†é’¥è§£å°å’Œéšæœºæ•°ç”Ÿæˆã€‚
+
+空主密钥认è¯åœ¨ç”¨æˆ·ç©ºé—´çš„实现
+============================
+
+æ¯ä¸ªTPM都会附带几个X.509è¯ä¹¦ï¼Œé€šå¸¸ç”¨äºŽä¸»èƒŒä¹¦å¯†é’¥ã€‚本文档å‡è®¾å­˜åœ¨æ¤­åœ†æ›²çº¿
+版本的è¯ä¹¦ï¼Œä½äºŽ01C00002ï¼Œä½†ä¹ŸåŒæ ·é€‚用于RSAè¯ä¹¦(ä½äºŽ01C00001)。
+
+认è¯çš„第一步是使用 `TCG EK Credential Profile`_ 模æ¿è¿›è¡Œä¸»å¯†é’¥çš„创建,该
+模æ¿å…许将生æˆçš„主密钥与è¯ä¹¦ä¸­çš„主密钥进行比较(公钥必须匹é…ï¼‰ã€‚éœ€è¦æ³¨æ„
+的是,生æˆEK主密钥需è¦EK层级密ç ï¼Œä½†EC主密钥的预生æˆç‰ˆæœ¬åº”ä½äºŽ81010002,
+并且å¯ä»¥æ— éœ€å¯†é’¥æŽˆæƒå¯¹å…¶æ‰§è¡ŒTPM2_ReadPublic()æ“作。接下æ¥ï¼Œè¯ä¹¦æœ¬èº«å¿…é¡»
+ç»è¿‡éªŒè¯ï¼Œä»¥ç¡®ä¿å…¶å¯ä»¥è¿½æº¯åˆ°åˆ¶é€ å•†æ ¹è¯ä¹¦ï¼ˆè¯¥æ ¹è¯ä¹¦åº”公开在制造商网站上)。
+å®Œæˆæ­¤æ­¥éª¤åŽå°†åœ¨TPM内部生æˆä¸€ä¸ªè®¤è¯å¯†é’¥ï¼ˆAK),并使用TPM2_MakeCredentialã€
+AKçš„åç§°å’ŒEK公钥加密一个秘密。然åŽTPM执行TPM2_ActivateCredentialï¼Œåªæœ‰åœ¨
+TPMã€EKå’ŒAK之间的绑定关系æˆç«‹æ—¶ï¼Œæ‰èƒ½æ¢å¤ç§˜å¯†ã€‚现在,生æˆçš„AKå¯ä»¥ç”¨äºŽå¯¹ç”±
+内核导出的空主密钥进行认è¯ã€‚由于TPM2_MakeCredential/ActivateCredentialæ“作
+ç›¸å¯¹å¤æ‚,下é¢å°†æè¿°ä¸€ç§æ¶‰åŠå¤–部生æˆç§é’¥çš„简化过程。
+
+这个过程是通常基于éšç§CA认è¯è¿‡ç¨‹çš„简化缩写。å‡è®¾æ­¤æ—¶è®¤è¯ç”±TPM所有者进行,
+所有者åªèƒ½è®¿é—®æ‰€æœ‰è€…层次。所有者创建一个外部公/ç§é’¥å¯¹ï¼ˆå‡è®¾æ˜¯æ¤­åœ†æ›²çº¿ï¼‰ï¼Œ
+并使用内部包装过程将ç§é’¥è¿›è¡Œå°è£…以便导入,该ç§é’¥è¢«å…¶çˆ¶çº§ç”±EC派生的存储主密
+é’¥ä¿æŠ¤ã€‚TPM2_Import()æ“作使用一个以EK主密钥为ç›å€¼çš„傿•°è§£å¯†HMAC会è¯ï¼ˆè¿™ä¹Ÿä¸
+需è¦EK密钥授æƒï¼‰ï¼Œæ„味ç€å†…部å°è£…å¯†é’¥æ˜¯åŠ å¯†å‚æ•°ï¼Œå› æ­¤é™¤éžTPM拥有认è¯çš„EK,å¦
+则无法执行导入æ“作。如果该命令æˆåŠŸæ‰§è¡Œå¹¶ä¸”HMAC在返回时通过验è¯ï¼Œæˆ‘们就知é“
+我们有一个åªä¸ºè®¤è¯TPM加载的ç§é’¥å‰¯æœ¬ã€‚现在该密钥已加载到TPM中,并且存储主密
+钥已被清除(以释放空间用于生æˆç©ºå¯†é’¥ï¼‰ã€‚
+
+çŽ°åœ¨æ ¹æ® `TCG TPM v2.0 Provisioning Guidance`_ 中的存储é…置生æˆç©ºEC主密钥;
+该密钥的å称(å³å…¬é’¥åŒºåŸŸçš„哈希值)被计算出æ¥å¹¶ä¸Žå†…核在/sys/class/tpm/tpm0/null_name
+中æä¾›çš„空ç§å­å称进行比较。如果åç§°ä¸åŒ¹é…,TPMå°±è¢«è®¤ä¸ºæ˜¯å—æŸçš„。如果å称匹é…,
+用户执行TPM2_Certify()ï¼Œä½¿ç”¨ç©ºä¸»å¯†é’¥ä½œä¸ºå¯¹è±¡å¥æŸ„,使用加载的ç§é’¥ä½œä¸ºç­¾å奿Ÿ„,
+å¹¶æä¾›éšæœºçš„åˆæ ¼æ•°æ®ã€‚返回的certifyInfo的签å将与加载的ç§é’¥çš„公钥部分进行验è¯ï¼Œ
+å¹¶æ£€æŸ¥åˆæ ¼æ•°æ®ä»¥é˜²æ­¢é‡æ”¾ã€‚如果所有测试都通过,用户就å¯ä»¥ç¡®ä¿¡TPM的完整性和éšç§
+性在整个内核å¯åŠ¨è¿‡ç¨‹ä¸­å¾—åˆ°äº†ä¿æŠ¤ã€‚
+
+.. _TPM Genie: https://www.nccgroup.trust/globalassets/about-us/us/documents/tpm-genie.pdf
+.. _Windows Bitlocker TPM: https://dolosgroup.io/blog/2021/7/9/from-stolen-laptop-to-inside-the-company-network
+.. _基于TPMçš„Linuxç£ç›˜åР坆: https://www.secura.com/blog/tpm-sniffing-attacks-against-non-bitlocker-targets
+.. _TCG EK Credential Profile: https://trustedcomputinggroup.org/resource/tcg-ek-credential-profile-for-tpm-family-2-0/
+.. _TCG TPM v2.0 Provisioning Guidance: https://trustedcomputinggroup.org/resource/tcg-tpm-v2-0-provisioning-guidance/
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst
new file mode 100644
index 000000000000..9c173291ac3e
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_event_log.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_event_log.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+===========
+TPM事件日志
+===========
+
+本文档简è¦ä»‹ç»äº†ä»€ä¹ˆæ˜¯TPM日志,以åŠå®ƒæ˜¯å¦‚何从预å¯åŠ¨å›ºä»¶ç§»äº¤åˆ°æ“作系统的。
+
+介ç»
+====
+
+预å¯åŠ¨å›ºä»¶ç»´æŠ¤ä¸€ä¸ªäº‹ä»¶æ—¥å¿—ï¼Œæ¯å½“它将æŸäº›å†…容哈希到任何一个PCR寄存器时,该
+日志会添加新æ¡ç›®ã€‚这些事件按类型分类,并包å«å“ˆå¸ŒåŽçš„PCR寄存器值。通常,预
+å¯åŠ¨å›ºä»¶ä¼šå“ˆå¸Œé‚£äº›å³å°†ç§»äº¤æ‰§è¡Œæƒæˆ–与å¯åŠ¨è¿‡ç¨‹ç›¸å…³çš„ç»„ä»¶ã€‚
+
+其主è¦åº”用是远程认è¯ï¼Œè€Œå®ƒä¹‹æ‰€ä»¥æœ‰ç”¨çš„原因在[1]中第一部分很好地é˜è¿°äº†ï¼š
+
+认è¯ç”¨äºŽå‘挑战者æä¾›æœ‰å…³å¹³å°çжæ€çš„ä¿¡æ¯ã€‚然而,PCR的内容难以解读;因此,当
+PCRå†…å®¹é™„æœ‰æµ‹é‡æ—¥å¿—时,认è¯é€šå¸¸ä¼šæ›´æœ‰ç”¨ã€‚å°½ç®¡æµ‹é‡æ—¥å¿—本身并ä¸å¯ä¿¡ï¼Œä½†å®ƒä»¬
+åŒ…å«æ¯”PCR内容更为丰富的信æ¯é›†ã€‚PCRå†…å®¹ç”¨äºŽå¯¹æµ‹é‡æ—¥å¿—进行验è¯ã€‚
+
+UEFI事件日志
+============
+
+UEFIæä¾›çš„事件日志有一些比较奇怪的特性。
+
+在调用ExitBootServices()之å‰ï¼ŒLinux EFI引导加载程åºä¼šå°†äº‹ä»¶æ—¥å¿—å¤åˆ¶åˆ°ç”±
+引导加载程åºè‡ªå®šä¹‰çš„é…置表中。ä¸å¹¸çš„æ˜¯ï¼Œé€šè¿‡ExitBootServices()生æˆçš„事件
+å¹¶ä¸ä¼šå‡ºçŽ°åœ¨è¿™ä¸ªè¡¨é‡Œã€‚
+
+固件æä¾›äº†ä¸€ä¸ªæ‰€è°“的最终事件é…ç½®è¡¨æŽ’åºæ¥è§£å†³è¿™ä¸ªé—®é¢˜ã€‚事件会在第一次调用
+EFI_TCG2_PROTOCOL.GetEventLog()åŽè¢«é•œåƒåˆ°è¿™ä¸ªè¡¨ä¸­ã€‚
+
+这引出了å¦ä¸€ä¸ªé—®é¢˜ï¼šæ— æ³•ä¿è¯å®ƒä¸ä¼šåœ¨ Linux EFI stub 开始è¿è¡Œä¹‹å‰è¢«è°ƒç”¨ã€‚
+因此,在 stub è¿è¡Œæ—¶ï¼Œå®ƒéœ€è¦è®¡ç®—并将最终事件表的大å°ä¿å­˜åˆ°è‡ªå®šä¹‰é…置表中,
+以便TPM驱动程åºå¯ä»¥åœ¨ç¨åŽè¿žæŽ¥æ¥è‡ªè‡ªå®šä¹‰é…置表和最终事件表的两个事件日志时
+跳过这些事件。
+
+å‚考文献
+========
+
+- [1] https://trustedcomputinggroup.org/resource/pc-client-specific-platform-firmware-profile-specification/
+- [2] The final concatenation is done in drivers/char/tpm/eventlog/efi.c
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst
new file mode 100644
index 000000000000..5901eee32563
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_ftpm_tee.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_ftpm_tee.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+===========
+固件TPM驱动
+===========
+
+本文档æè¿°äº†å›ºä»¶å¯ä¿¡å¹³å°æ¨¡å—(fTPM)设备驱动。
+
+介ç»
+====
+
+è¯¥é©±åŠ¨ç¨‹åºæ˜¯ç”¨äºŽARMçš„TrustZone环境中实现的固件的适é…器。该驱动
+程åºå…许程åºä»¥ä¸Žç¡¬ä»¶TPM相åŒçš„æ–¹å¼ä¸ŽTPM进行交互。
+
+设计
+====
+
+该驱动程åºå……当一个薄层,传递命令到固件实现的TPM并接收其å“应。驱动
+ç¨‹åºæœ¬èº«å¹¶ä¸åŒ…å«å¤ªå¤šé€»è¾‘ï¼Œæ›´åƒæ˜¯å›ºä»¶ä¸Žå†…æ ¸/用户空间之间的一个管é“。
+
+固件本身基于以下论文:
+https://www.microsoft.com/en-us/research/wp-content/uploads/2017/06/ftpm1.pdf
+
+当驱动程åºè¢«åŠ è½½æ—¶ï¼Œå®ƒä¼šå‘用户空间暴露 ``/dev/tpmX`` 字符设备,å…许
+用户空间通过该设备与固件TPM进行通信。
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst
new file mode 100644
index 000000000000..0fb009f93e10
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_tis.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_tis.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+TPM FIFO接å£é©±åЍ
+================
+
+TCG PTPè§„èŒƒå®šä¹‰äº†ä¸¤ç§æŽ¥å£ç±»åž‹ï¼šFIFOå’ŒCRB。å‰è€…基于顺åºçš„读写æ“作,
+åŽè€…基于包å«å®Œæ•´å‘½ä»¤æˆ–å“应的缓冲区。
+
+FIFO(先进先出)接å£è¢«tpm_tis_coreä¾èµ–的驱动程åºä½¿ç”¨ã€‚最åˆï¼ŒLinuxåª
+有一个å为tpm_tisçš„é©±åŠ¨ï¼Œè¦†ç›–äº†å†…å­˜æ˜ å°„ï¼ˆå³ MMIO)接å£ï¼Œä½†åŽæ¥å®ƒè¢«
+扩展以支æŒTCG标准所支æŒçš„å…¶ä»–ç‰©ç†æŽ¥å£ã€‚
+
+由于历å²åŽŸå› ï¼Œæœ€åˆçš„MMIO驱动被称为tpm_tis,而FIFO驱动的框架被命å为
+tpm_tis_core。在tpm_tis中的“tisâ€åŽç¼€æ¥è‡ªTPM接å£è§„范,这是针对TPM1.x
+芯片的硬件接å£è§„范。
+
+通信基于一个由TPM芯片通过硬件总线或内存映射共享的20KiB 缓冲区,具体
+å–å†³äºŽç‰©ç†æŽ¥çº¿ã€‚è¯¥ç¼“å†²åŒºè¿›ä¸€æ­¥åˆ†ä¸ºäº”ä¸ªç›¸ç­‰å¤§å°çš„4KiB缓冲区,为CPUå’Œ
+TPM之间的通信æä¾›ç­‰æ•ˆçš„寄存器集。这些通信端点在TCG术语中称为localities。
+
+当内核想è¦å‘TPM芯片å‘é€å‘½ä»¤æ—¶ï¼Œå®ƒé¦–先通过在TPM_ACCESS寄存器中设置
+requestUse使¥ä¿ç•™locality0。当访问被授予时,该ä½ç”±èŠ¯ç‰‡æ¸…é™¤ã€‚ä¸€æ—¦å®Œæˆ
+通信,内核会写入TPM_ACCESS.activeLocalityä½ã€‚这告诉芯片该本地性已被
+释放。
+
+待处ç†çš„æœ¬åœ°æ€§ç”±èŠ¯ç‰‡æŒ‰ä¼˜å…ˆçº§é™åºé€ä¸ªæœåŠ¡ï¼Œä¸€æ¬¡ä¸€ä¸ªï¼š
+
+- Locality0优先级最低。
+- Locality5优先级最高。
+
+关于localities的更多信æ¯å’Œå«ä¹‰ï¼Œè¯·å‚阅TCG PC客户端平å°TPM é…置文件规范的第3.2节。
+
+å‚考文献
+========
+
+TCG PC客户端平å°TPMé…置文件(PTP)规范
+https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
diff --git a/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst
new file mode 100644
index 000000000000..bc92cfb684c3
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/tpm_vtpm_proxy.rst
@@ -0,0 +1,51 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/tpm_vtpm_proxy.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+==========================
+Linux容器的虚拟TPM代ç†é©±åЍ
+==========================
+
+| 作者:
+| Stefan Berger <stefanb@linux.vnet.ibm.com>
+
+本文档æè¿°äº†ç”¨äºŽLinux容器的虚拟å¯ä¿¡å¹³å°æ¨¡å—(vTPM)代ç†è®¾å¤‡é©±åŠ¨ã€‚
+
+介ç»
+====
+
+这项工作的目标是为æ¯ä¸ªLinux容器æä¾›TPM功能。这使得程åºèƒ½å¤Ÿåƒä¸Žç‰©ç†ç³»ç»Ÿ
+上的TPM交互一样,与容器中的TPM进行交互。æ¯ä¸ªå®¹å™¨éƒ½ä¼šèŽ·å¾—ä¸€ä¸ªå”¯ä¸€çš„ã€æ¨¡
+拟的软件TPM。
+
+设计
+====
+
+为了使æ¯ä¸ªå®¹å™¨éƒ½èƒ½ä½¿ç”¨æ¨¡æ‹Ÿçš„软件TPMï¼Œå®¹å™¨ç®¡ç†æ ˆéœ€è¦åˆ›å»ºä¸€å¯¹è®¾å¤‡ï¼Œå…¶ä¸­
+包括一个客户端TPM字符设备 ``/dev/tpmX`` (X=0,1,2...)和一个‘æœåŠ¡å™¨ç«¯â€™
+文件æè¿°ç¬¦ã€‚当文件æè¿°ç¬¦ä¼ è¢«é€’ç»™TPM模拟器时,å‰è€…通过创建具有适当主次
+设备å·çš„字符设备被移入容器,然åŽï¼Œå®¹å™¨å†…的软件å¯ä»¥ä½¿ç”¨å­—符设备å‘é€TPM
+命令,模拟器将通过文件æè¿°ç¬¦æŽ¥æ”¶è¿™äº›å‘½ä»¤ï¼Œå¹¶ç”¨å®ƒæ¥å‘é€å“应。
+
+为了支æŒè¿™ä¸€ç‚¹ï¼Œè™šæ‹ŸTPM代ç†é©±åŠ¨ç¨‹åºæä¾›äº†ä¸€ä¸ªè®¾å¤‡ ``/dev/vtpmx`` ,该设备
+用于通过ioctl创建设备对。ioctl将其作为é…置设备的输入标志,例如这些标志指示
+TPMæ¨¡æ‹Ÿå™¨æ˜¯å¦æ”¯æŒTPM1.2或TPM2功能。ioctl的结果是返回‘æœåŠ¡å™¨ç«¯â€™çš„æ–‡ä»¶æè¿°ç¬¦
+以åŠåˆ›å»ºçš„字符设备的主次设备å·ã€‚此外,还会返回TPM字符设备的编å·ã€‚例如,如果
+创建了 ``/dev/tpm10`` ,则返回编å·ï¼ˆ ``dev_num`` )10。
+
+一旦设备被创建,驱动程åºå°†ç«‹å³å°è¯•与TPM进行通信。æ¥è‡ªé©±åŠ¨ç¨‹åºçš„æ‰€æœ‰å‘½ä»¤
+都å¯ä»¥ä»Žioctl返回的文件æè¿°ç¬¦ä¸­è¯»å–。这些命令应该立å³å¾—到å“应。
+
+UAPI
+====
+
+该API在以下内核代ç ä¸­ï¼š
+
+include/uapi/linux/vtpm_proxy.h
+drivers/char/tpm/tpm_vtpm_proxy.c
+
+函数:vtpmx_ioc_new_dev
diff --git a/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst
new file mode 100644
index 000000000000..fa085d98a99b
--- /dev/null
+++ b/Documentation/translations/zh_CN/security/tpm/xen-tpmfront.rst
@@ -0,0 +1,114 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: ../../disclaimer-zh_CN.rst
+
+:Original: Documentation/security/tpm/xen-tpmfront.rst
+
+:翻译:
+ 赵硕 Shuo Zhao <zhaoshuo@cqsoftware.com.cn>
+
+================
+Xen的虚拟TPM接å£
+================
+
+作者:Matthew Fioravante (JHUAPL), Daniel De Graaf (NSA)
+
+本文档æè¿°äº†ç”¨äºŽXen的虚拟å¯ä¿¡å¹³å°æ¨¡å—(vTPM)å­ç³»ç»Ÿã€‚å‡å®šè¯»è€…熟悉
+Xenå’ŒLinux的构建和安装,并对TPMå’ŒvTPM概念有基本的ç†è§£ã€‚
+
+介ç»
+----
+
+这项工作的目标是为虚拟客户æ“作系统(在Xen中称为DomU)æä¾›TPM功能。这使得
+程åºèƒ½å¤Ÿåƒä¸Žç‰©ç†ç³»ç»Ÿä¸Šçš„TPM交互一样,与虚拟系统中的TPM进行交互。æ¯ä¸ªå®¢æˆ·
+æ“ä½œç³»ç»Ÿéƒ½ä¼šèŽ·å¾—ä¸€ä¸ªå”¯ä¸€çš„ã€æ¨¡æ‹Ÿçš„软件TPM。然而,vTPM的所有秘密(如密钥ã€
+NVRAM 等)由vTPM管ç†åŸŸè¿›è¡Œç®¡ç†ï¼Œè¯¥åŸŸå°†è¿™äº›ç§˜å¯†å°å­˜åˆ°ç‰©ç†TPM中。如果创建这
+些域(管ç†åŸŸã€vTPM域和客户域)的过程是å¯ä¿¡çš„,vTPMå­ç³»ç»Ÿå°±èƒ½å°†æ ¹æ¤äºŽç¡¬ä»¶
+TPM的信任链扩展到Xen中的虚拟机。vTPMçš„æ¯ä¸ªä¸»è¦ç»„件都作为一个独立的域实现,
+从而通过虚拟机监控程åºï¼ˆhypervisor)æä¾›å®‰å…¨éš”离。
+
+这个mini-os vTPM å­ç³»ç»Ÿæ˜¯å»ºç«‹åœ¨IBMå’ŒIntelå…¬å¸ä¹‹å‰çš„vTPM工作基础上的。
+
+
+设计概述
+--------
+
+vTPM的架构æè¿°å¦‚下::
+
+ +------------------+
+ | Linux DomU | ...
+ | | ^ |
+ | v | |
+ | xen-tpmfront |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | mini-os/tpmback |
+ | | ^ |
+ | v | |
+ | vtpm-stubdom | ...
+ | | ^ |
+ | v | |
+ | mini-os/tpmfront |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | mini-os/tpmback |
+ | | ^ |
+ | v | |
+ | vtpmmgr-stubdom |
+ | | ^ |
+ | v | |
+ | mini-os/tpm_tis |
+ +------------------+
+ | ^
+ v |
+ +------------------+
+ | Hardware TPM |
+ +------------------+
+
+* Linux DomU:
+ 希望使用vTPM的基于Linux的客户机。å¯èƒ½æœ‰å¤šä¸ªè¿™æ ·çš„实例。
+
+* xen-tpmfront.ko:
+ Linux内核虚拟TPMå‰ç«¯é©±åŠ¨ç¨‹åºã€‚该驱动程åºä¸ºåŸºäºŽLinuxçš„DomUæä¾›
+ vTPM访问。
+
+* mini-os/tpmback:
+ Mini-os TPMåŽç«¯é©±åŠ¨ç¨‹åºã€‚Linuxå‰ç«¯é©±åŠ¨ç¨‹åºé€šè¿‡è¯¥åŽç«¯é©±åŠ¨ç¨‹åºè¿ž
+ 接,以便在Linux DomU和其vTPM之间进行通信。该驱动程åºè¿˜è¢«
+ vtpmmgr-stubdom用于与vtpm-stubdom通信。
+
+* vtpm-stubdom:
+ 一个实现vTPMçš„mini-os存根域。æ¯ä¸ªæ­£åœ¨è¿è¡Œçš„vtpm-stubdom实例与系统
+ 上的逻辑vTPM之间有一一对应的关系。vTPMå¹³å°é…置寄存器(PCRs)通常都
+ åˆå§‹åŒ–为零。
+
+* mini-os/tpmfront:
+ Mini-os TPMå‰ç«¯é©±åŠ¨ç¨‹åºã€‚vTPM mini-os域vtpm-stubdom使用该驱动程åº
+ 与vtpmmgr-stubdom通信。此驱动程åºè¿˜ç”¨äºŽä¸ŽvTPM域通信的mini-os域,例
+ 如 pv-grub。
+
+* vtpmmgr-stubdom:
+ 一个实现vTPM管ç†å™¨çš„mini-osåŸŸã€‚ç³»ç»Ÿä¸­åªæœ‰ä¸€ä¸ªvTPM管ç†å™¨ï¼Œå¹¶ä¸”在整个
+ 机器生命周期内应一直è¿è¡Œã€‚此域调节对系统中物ç†TPMçš„è®¿é—®ï¼Œå¹¶ç¡®ä¿æ¯ä¸ª
+ vTPMçš„æŒä¹…状æ€ã€‚
+
+* mini-os/tpm_tis:
+ Mini-osTPM1.2版本TPM 接å£è§„范(TIS)驱动程åºã€‚该驱动程åºç”±vtpmmgr-stubdom
+ 用于直接与硬件TPM通信。通信通过将硬件内存页映射到vtpmmgr-stubdomæ¥å®žçŽ°ã€‚
+
+* 硬件TPM:
+ 固定在主æ¿ä¸Šçš„ç‰©ç† TPM。
+
+与Xen的集æˆ
+-----------
+
+vTPM驱动程åºçš„æ”¯æŒå·²åœ¨Xen4.3中通过libxl工具堆栈添加。有关设置vTPMå’ŒvTPM
+管ç†å™¨å­˜æ ¹åŸŸçš„详细信æ¯ï¼Œè¯·å‚è§Xen文档(docs/misc/vtpm.txt)。一旦存根域
+è¿è¡Œï¼Œä¸Žç£ç›˜æˆ–网络设备相åŒï¼ŒvTPM设备将在域的é…置文件中进行设置
+
+为了使用诸如IMAï¼ˆå®Œæ•´æ€§æµ‹é‡æž¶æž„)等需è¦åœ¨initrd之å‰åŠ è½½TPM的功能,必须将
+xen-tpmfront驱动程åºç¼–译到内核中。如果ä¸ä½¿ç”¨è¿™äº›åŠŸèƒ½ï¼Œé©±åŠ¨ç¨‹åºå¯ä»¥ä½œä¸º
+模å—编译,并åƒå¾€å¸¸ä¸€æ ·åŠ è½½ã€‚
diff --git a/Documentation/translations/zh_TW/admin-guide/README.rst b/Documentation/translations/zh_TW/admin-guide/README.rst
index a6e34c200ea3..0b038074d9d1 100644
--- a/Documentation/translations/zh_TW/admin-guide/README.rst
+++ b/Documentation/translations/zh_TW/admin-guide/README.rst
@@ -149,7 +149,7 @@ Linux內核6.x版本 <http://kernel.org/>
"make xconfig" 基於Qtçš„é…置工具。
- "make gconfig" 基於GTK+çš„é…置工具。
+ "make gconfig" 基於GTKçš„é…置工具。
"make oldconfig" åŸºæ–¼ç¾æœ‰çš„ ./.config æ–‡ä»¶é¸æ“‡æ‰€æœ‰é¸é …,並詢å•
æ–°é…ç½®é¸é …。
diff --git a/Documentation/translations/zh_TW/process/submit-checklist.rst b/Documentation/translations/zh_TW/process/submit-checklist.rst
index 0ecb187753e4..a0cb91a6945f 100644
--- a/Documentation/translations/zh_TW/process/submit-checklist.rst
+++ b/Documentation/translations/zh_TW/process/submit-checklist.rst
@@ -85,8 +85,8 @@ Linuxå…§æ ¸è£œä¸æäº¤æª¢æŸ¥å–®
17) æ‰€æœ‰æ–°çš„æ¨¡å¡Šåƒæ•¸éƒ½è¨˜éŒ„在 ``MODULE_PARM_DESC()``
18) 所有新的用戶空間接å£éƒ½è¨˜éŒ„在 ``Documentation/ABI/`` 中。有關詳細信æ¯ï¼Œ
- è«‹åƒé–± ``Documentation/ABI/README`` 。更改用戶空間接å£çš„è£œä¸æ‡‰è©²æŠ„é€
- linux-api@vger.kernel.org。
+ è«‹åƒé–± Documentation/admin-guide/abi.rst (或 ``Documentation/ABI/README``)。
+ 更改用戶空間接å£çš„è£œä¸æ‡‰è©²æŠ„é€ linux-api@vger.kernel.org\ 。
19) 已通éŽè‡³å°‘注入slabå’Œpage分é…失敗進行檢查。請åƒé–± ``Documentation/fault-injection/`` 。
如果新代碼是實質性的,那麼添加å­ç³»çµ±ç‰¹å®šçš„æ•…障注入å¯èƒ½æ˜¯åˆé©çš„。
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index bf555c2270f5..1998dc146c56 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -1050,7 +1050,7 @@ Its attributes are:
midi1_num_groups The number of groups for MIDI 1.0 (0-16)
ui_hint UI-hint of this FB
0: unknown, 1: receiver, 2: sender, 3: both
- midi_ci_verison Supported MIDI-CI version number (8 bit)
+ midi_ci_version Supported MIDI-CI version number (8 bit)
is_midi1 Legacy MIDI 1.0 device (0-2)
0: MIDI 2.0 device,
1: MIDI 1.0 without restriction, or
diff --git a/Documentation/userspace-api/accelerators/ocxl.rst b/Documentation/userspace-api/accelerators/ocxl.rst
index db7570d5e50d..4e213af70237 100644
--- a/Documentation/userspace-api/accelerators/ocxl.rst
+++ b/Documentation/userspace-api/accelerators/ocxl.rst
@@ -3,8 +3,11 @@ OpenCAPI (Open Coherent Accelerator Processor Interface)
========================================================
OpenCAPI is an interface between processors and accelerators. It aims
-at being low-latency and high-bandwidth. The specification is
-developed by the `OpenCAPI Consortium <http://opencapi.org/>`_.
+at being low-latency and high-bandwidth.
+
+The specification was developed by the OpenCAPI Consortium, and is now
+available from the `Compute Express Link Consortium
+<https://computeexpresslink.org/resource/opencapi-specification-archive/>`_.
It allows an accelerator (which could be an FPGA, ASICs, ...) to access
the host memory coherently, using virtual addresses. An OpenCAPI
diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst
new file mode 100644
index 000000000000..535f49047ce6
--- /dev/null
+++ b/Documentation/userspace-api/dma-buf-heaps.rst
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================
+Allocating dma-buf using heaps
+==============================
+
+Dma-buf Heaps are a way for userspace to allocate dma-buf objects. They are
+typically used to allocate buffers from a specific allocation pool, or to share
+buffers across frameworks.
+
+Heaps
+=====
+
+A heap represents a specific allocator. The Linux kernel currently supports the
+following heaps:
+
+ - The ``system`` heap allocates virtually contiguous, cacheable, buffers.
+
+ - The ``cma`` heap allocates physically contiguous, cacheable,
+ buffers. Only present if a CMA region is present. Such a region is
+ usually created either through the kernel commandline through the
+ `cma` parameter, a memory region Device-Tree node with the
+ `linux,cma-default` property set, or through the `CMA_SIZE_MBYTES` or
+ `CMA_SIZE_PERCENTAGE` Kconfig options. Depending on the platform, it
+ might be called ``reserved``, ``linux,cma``, or ``default-pool``.
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index b1395d94b3fd..9cbe4390c872 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -44,6 +44,7 @@ Devices and I/O
:maxdepth: 1
accelerators/ocxl
+ dma-buf-heaps
dma-buf-alloc-exchange
gpio/index
iommufd
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 6d1465315df3..3cf4183b767e 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -85,6 +85,8 @@ Code Seq# Include File Comments
0x10 20-2F arch/s390/include/uapi/asm/hypfs.h
0x12 all linux/fs.h BLK* ioctls
linux/blkpg.h
+ linux/blkzoned.h
+ linux/blk-crypto.h
0x15 all linux/fs.h FS_IOC_* ioctls
0x1b all InfiniBand Subsystem
<http://infiniband.sourceforge.net/>
@@ -370,10 +372,12 @@ Code Seq# Include File Comments
0xB7 all uapi/linux/remoteproc_cdev.h <mailto:linux-remoteproc@vger.kernel.org>
0xB7 all uapi/linux/nsfs.h <mailto:Andrei Vagin <avagin@openvz.org>>
0xB8 01-02 uapi/misc/mrvl_cn10k_dpi.h Marvell CN10K DPI driver
+0xB8 all uapi/linux/mshv.h Microsoft Hyper-V /dev/mshv driver
+ <mailto:linux-hyperv@vger.kernel.org>
0xC0 00-0F linux/usb/iowarrior.h
0xCA 00-0F uapi/misc/cxl.h
0xCA 10-2F uapi/misc/ocxl.h
-0xCA 80-BF uapi/scsi/cxlflash_ioctl.h
+0xCA 80-BF uapi/scsi/cxlflash_ioctl.h Dead since 6.14
0xCB 00-1F CBM serial IEC bus in development:
<mailto:michael.klein@puffin.lb.shuttle.de>
0xCC 00-0F drivers/misc/ibmvmc.h pseries VMC driver
diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst
index ad587f53fe41..1d0c2c15c22e 100644
--- a/Documentation/userspace-api/landlock.rst
+++ b/Documentation/userspace-api/landlock.rst
@@ -8,7 +8,7 @@ Landlock: unprivileged access control
=====================================
:Author: Mickaël Salaün
-:Date: January 2025
+:Date: March 2025
The goal of Landlock is to enable restriction of ambient rights (e.g. global
filesystem or network access) for a set of processes. Because Landlock
@@ -317,33 +317,32 @@ IPC scoping
-----------
Similar to the implicit `Ptrace restrictions`_, we may want to further restrict
-interactions between sandboxes. Each Landlock domain can be explicitly scoped
-for a set of actions by specifying it on a ruleset. For example, if a
-sandboxed process should not be able to :manpage:`connect(2)` to a
-non-sandboxed process through abstract :manpage:`unix(7)` sockets, we can
-specify such a restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``.
-Moreover, if a sandboxed process should not be able to send a signal to a
-non-sandboxed process, we can specify this restriction with
-``LANDLOCK_SCOPE_SIGNAL``.
-
-A sandboxed process can connect to a non-sandboxed process when its domain is
-not scoped. If a process's domain is scoped, it can only connect to sockets
-created by processes in the same scope.
-Moreover, if a process is scoped to send signal to a non-scoped process, it can
-only send signals to processes in the same scope.
-
-A connected datagram socket behaves like a stream socket when its domain is
-scoped, meaning if the domain is scoped after the socket is connected, it can
-still :manpage:`send(2)` data just like a stream socket. However, in the same
-scenario, a non-connected datagram socket cannot send data (with
-:manpage:`sendto(2)`) outside its scope.
-
-A process with a scoped domain can inherit a socket created by a non-scoped
-process. The process cannot connect to this socket since it has a scoped
-domain.
-
-IPC scoping does not support exceptions, so if a domain is scoped, no rules can
-be added to allow access to resources or processes outside of the scope.
+interactions between sandboxes. Therefore, at ruleset creation time, each
+Landlock domain can restrict the scope for certain operations, so that these
+operations can only reach out to processes within the same Landlock domain or in
+a nested Landlock domain (the "scope").
+
+The operations which can be scoped are:
+
+``LANDLOCK_SCOPE_SIGNAL``
+ This limits the sending of signals to target processes which run within the
+ same or a nested Landlock domain.
+
+``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``
+ This limits the set of abstract :manpage:`unix(7)` sockets to which we can
+ :manpage:`connect(2)` to socket addresses which were created by a process in
+ the same or a nested Landlock domain.
+
+ A :manpage:`sendto(2)` on a non-connected datagram socket is treated as if
+ it were doing an implicit :manpage:`connect(2)` and will be blocked if the
+ remote end does not stem from the same or a nested Landlock domain.
+
+ A :manpage:`sendto(2)` on a socket which was previously connected will not
+ be restricted. This works for both datagram and stream sockets.
+
+IPC scoping does not support exceptions via :manpage:`landlock_add_rule(2)`.
+If an operation is scoped within a domain, no rules can be added to allow access
+to resources or processes outside of the scope.
Truncating files
----------------
@@ -595,6 +594,16 @@ Starting with the Landlock ABI version 6, it is possible to restrict
:manpage:`signal(7)` sending by setting ``LANDLOCK_SCOPE_SIGNAL`` to the
``scoped`` ruleset attribute.
+Logging (ABI < 7)
+-----------------
+
+Starting with the Landlock ABI version 7, it is possible to control logging of
+Landlock audit events with the ``LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF``,
+``LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON``, and
+``LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF`` flags passed to
+sys_landlock_restrict_self(). See Documentation/admin-guide/LSM/landlock.rst
+for more details on audit.
+
.. _kernel_support:
Kernel support
@@ -683,9 +692,16 @@ fine-grained restrictions). Moreover, their complexity can lead to security
issues, especially when untrusted processes can manipulate them (cf.
`Controlling access to user namespaces <https://lwn.net/Articles/673597/>`_).
+How to disable Landlock audit records?
+--------------------------------------
+
+You might want to put in place filters as explained here:
+Documentation/admin-guide/LSM/landlock.rst
+
Additional documentation
========================
+* Documentation/admin-guide/LSM/landlock.rst
* Documentation/security/landlock.rst
* https://landlock.io
diff --git a/Documentation/userspace-api/media/drivers/uvcvideo.rst b/Documentation/userspace-api/media/drivers/uvcvideo.rst
index a290f9fadae9..dbb30ad389ae 100644
--- a/Documentation/userspace-api/media/drivers/uvcvideo.rst
+++ b/Documentation/userspace-api/media/drivers/uvcvideo.rst
@@ -181,6 +181,7 @@ Argument: struct uvc_xu_control_mapping
UVC_CTRL_DATA_TYPE_BOOLEAN Boolean
UVC_CTRL_DATA_TYPE_ENUM Enumeration
UVC_CTRL_DATA_TYPE_BITMASK Bitmask
+ UVC_CTRL_DATA_TYPE_RECT Rectangular area
UVCIOC_CTRL_QUERY - Query a UVC XU control
@@ -255,3 +256,66 @@ Argument: struct uvc_xu_control_query
__u8 query Request code to send to the device
__u16 size Control data size (in bytes)
__u8 *data Control value
+
+
+Driver-specific V4L2 controls
+-----------------------------
+
+The uvcvideo driver implements the following UVC-specific controls:
+
+``V4L2_CID_UVC_REGION_OF_INTEREST_RECT (struct)``
+ This control determines the region of interest (ROI). ROI is a
+ rectangular area represented by a struct :c:type:`v4l2_rect`. The
+ rectangle is in global sensor coordinates using pixel units. It is
+ independent of the field of view, not impacted by any cropping or
+ scaling.
+
+ Use ``V4L2_CTRL_WHICH_MIN_VAL`` and ``V4L2_CTRL_WHICH_MAX_VAL`` to query
+ the range of rectangle sizes.
+
+ Setting a ROI allows the camera to optimize the capture for the region.
+ The value of ``V4L2_CID_REGION_OF_INTEREST_AUTO`` control determines
+ the detailed behavior.
+
+ An example of use of this control, can be found in the:
+ `Chrome OS USB camera HAL.
+ <https://chromium.googlesource.com/chromiumos/platform2/+/refs/heads/release-R121-15699.B/camera/hal/usb/>`
+
+
+``V4L2_CID_UVC_REGION_OF_INTEREST_AUTO (bitmask)``
+ This determines which, if any, on-board features should track to the
+ Region of Interest specified by the current value of
+ ``V4L2_CID_UVD__REGION_OF_INTEREST_RECT``.
+
+ Max value is a mask indicating all supported Auto Controls.
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_EXPOSURE``
+ - Setting this bit causes automatic exposure to track the region of
+ interest instead of the whole image.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_IRIS``
+ - Setting this bit causes automatic iris to track the region of interest
+ instead of the whole image.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_WHITE_BALANCE``
+ - Setting this bit causes automatic white balance to track the region
+ of interest instead of the whole image.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_FOCUS``
+ - Setting this bit causes automatic focus adjustment to track the region
+ of interest instead of the whole image.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_FACE_DETECT``
+ - Setting this bit causes automatic face detection to track the region of
+ interest instead of the whole image.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_DETECT_AND_TRACK``
+ - Setting this bit enables automatic face detection and tracking. The
+ current value of ``V4L2_CID_REGION_OF_INTEREST_RECT`` may be updated by
+ the driver.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_IMAGE_STABILIZATION``
+ - Setting this bit enables automatic image stabilization. The
+ current value of ``V4L2_CID_REGION_OF_INTEREST_RECT`` may be updated by
+ the driver.
+ * - ``V4L2_UVC_REGION_OF_INTEREST_AUTO_HIGHER_QUALITY``
+ - Setting this bit enables automatically capture the specified region
+ with higher quality if possible.
diff --git a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
index 34d6a0a1f4d3..70b5966aaff8 100644
--- a/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
+++ b/Documentation/userspace-api/media/rc/rc-sysfs-nodes.rst
@@ -6,7 +6,7 @@
Remote Controller's sysfs nodes
*******************************
-As defined at ``Documentation/ABI/testing/sysfs-class-rc``, those are
+As defined at Documentation/ABI/testing/sysfs-class-rc, those are
the sysfs nodes that control the Remote Controllers:
diff --git a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
index 4d56c0528ad7..b8698b85bd80 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-g-ext-ctrls.rst
@@ -199,6 +199,10 @@ still cause this situation.
- ``p_area``
- A pointer to a struct :c:type:`v4l2_area`. Valid if this control is
of type ``V4L2_CTRL_TYPE_AREA``.
+ * - struct :c:type:`v4l2_rect` *
+ - ``p_rect``
+ - A pointer to a struct :c:type:`v4l2_rect`. Valid if this control is
+ of type ``V4L2_CTRL_TYPE_RECT``.
* - struct :c:type:`v4l2_ctrl_h264_sps` *
- ``p_h264_sps``
- A pointer to a struct :c:type:`v4l2_ctrl_h264_sps`. Valid if this control is
@@ -334,14 +338,26 @@ still cause this situation.
- Which value of the control to get/set/try.
* - :cspan:`2` ``V4L2_CTRL_WHICH_CUR_VAL`` will return the current value of
the control, ``V4L2_CTRL_WHICH_DEF_VAL`` will return the default
- value of the control and ``V4L2_CTRL_WHICH_REQUEST_VAL`` indicates that
- these controls have to be retrieved from a request or tried/set for
- a request. In the latter case the ``request_fd`` field contains the
+ value of the control, ``V4L2_CTRL_WHICH_MIN_VAL`` will return the minimum
+ value of the control, and ``V4L2_CTRL_WHICH_MAX_VAL`` will return the maximum
+ value of the control. ``V4L2_CTRL_WHICH_REQUEST_VAL`` indicates that
+ the control value has to be retrieved from a request or tried/set for
+ a request. In that case the ``request_fd`` field contains the
file descriptor of the request that should be used. If the device
does not support requests, then ``EACCES`` will be returned.
- When using ``V4L2_CTRL_WHICH_DEF_VAL`` be aware that you can only
- get the default value of the control, you cannot set or try it.
+ When using ``V4L2_CTRL_WHICH_DEF_VAL``, ``V4L2_CTRL_WHICH_MIN_VAL``
+ or ``V4L2_CTRL_WHICH_MAX_VAL`` be aware that you can only get the
+ default/minimum/maximum value of the control, you cannot set or try it.
+
+ Whether a control supports querying the minimum and maximum values using
+ ``V4L2_CTRL_WHICH_MIN_VAL`` and ``V4L2_CTRL_WHICH_MAX_VAL`` is indicated
+ by the ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX`` flag. Most non-compound
+ control types support this. For controls with compound types, the
+ definition of minimum/maximum values are provided by
+ the control documentation. If a compound control does not document the
+ meaning of minimum/maximum value, then querying the minimum or maximum
+ value will result in the error code -EINVAL.
For backwards compatibility you can also use a control class here
(see :ref:`ctrl-class`). In that case all controls have to
diff --git a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
index 4d38acafe8e1..3549417c7feb 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-queryctrl.rst
@@ -441,6 +441,16 @@ See also the examples in :ref:`control`.
- n/a
- A struct :c:type:`v4l2_area`, containing the width and the height
of a rectangular area. Units depend on the use case.
+ * - ``V4L2_CTRL_TYPE_RECT``
+ - n/a
+ - n/a
+ - n/a
+ - A struct :c:type:`v4l2_rect`, containing a rectangle described by
+ the position of its top-left corner, the width and the height. Units
+ depend on the use case. Support for ``V4L2_CTRL_WHICH_MIN_VAL`` and
+ ``V4L2_CTRL_WHICH_MAX_VAL`` is optional and depends on the
+ ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX`` flag. See the documentation of
+ the specific control on how to interpret the minimum and maximum values.
* - ``V4L2_CTRL_TYPE_H264_SPS``
- n/a
- n/a
@@ -657,6 +667,10 @@ See also the examples in :ref:`control`.
``dims[0]``. So setting the control with a differently sized
array will change the ``elems`` field when the control is
queried afterwards.
+ * - ``V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX``
+ - 0x1000
+ - This control supports getting minimum and maximum values using
+ vidioc_g_ext_ctrls with V4L2_CTRL_WHICH_MIN/MAX_VAL.
Return Value
============
diff --git a/Documentation/userspace-api/media/videodev2.h.rst.exceptions b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
index 429b5cdf05c3..35d3456cc812 100644
--- a/Documentation/userspace-api/media/videodev2.h.rst.exceptions
+++ b/Documentation/userspace-api/media/videodev2.h.rst.exceptions
@@ -150,6 +150,7 @@ replace symbol V4L2_CTRL_TYPE_HEVC_SPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_HEVC_PPS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_AREA :c:type:`v4l2_ctrl_type`
+replace symbol V4L2_CTRL_TYPE_RECT :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_FWHT_PARAMS :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_VP8_FRAME :c:type:`v4l2_ctrl_type`
replace symbol V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR :c:type:`v4l2_ctrl_type`
@@ -395,6 +396,7 @@ replace define V4L2_CTRL_FLAG_HAS_PAYLOAD control-flags
replace define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE control-flags
replace define V4L2_CTRL_FLAG_MODIFY_LAYOUT control-flags
replace define V4L2_CTRL_FLAG_DYNAMIC_ARRAY control-flags
+replace define V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX control-flags
replace define V4L2_CTRL_FLAG_NEXT_CTRL control
replace define V4L2_CTRL_FLAG_NEXT_COMPOUND control
@@ -569,6 +571,8 @@ ignore define V4L2_CTRL_DRIVER_PRIV
ignore define V4L2_CTRL_MAX_DIMS
ignore define V4L2_CTRL_WHICH_CUR_VAL
ignore define V4L2_CTRL_WHICH_DEF_VAL
+ignore define V4L2_CTRL_WHICH_MIN_VAL
+ignore define V4L2_CTRL_WHICH_MAX_VAL
ignore define V4L2_CTRL_WHICH_REQUEST_VAL
ignore define V4L2_OUT_CAP_CUSTOM_TIMINGS
ignore define V4L2_CID_MAX_CTRLS
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 2b52eb77e29c..1f8625b7646a 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1000,6 +1000,10 @@ blobs in userspace. When the guest writes the MSR, kvm copies one
page of a blob (32- or 64-bit, depending on the vcpu mode) to guest
memory.
+The MSR index must be in the range [0x40000000, 0x4fffffff], i.e. must reside
+in the range that is unofficially reserved for use by hypervisors. The min/max
+values are enumerated via KVM_XEN_MSR_MIN_INDEX and KVM_XEN_MSR_MAX_INDEX.
+
::
struct kvm_xen_hvm_config {
@@ -8258,6 +8262,24 @@ KVM exits with the register state of either the L1 or L2 guest
depending on which executed at the time of an exit. Userspace must
take care to differentiate between these cases.
+7.37 KVM_CAP_ARM_WRITABLE_IMP_ID_REGS
+-------------------------------------
+
+:Architectures: arm64
+:Target: VM
+:Parameters: None
+:Returns: 0 on success, -EINVAL if vCPUs have been created before enabling this
+ capability.
+
+This capability changes the behavior of the registers that identify a PE
+implementation of the Arm architecture: MIDR_EL1, REVIDR_EL1, and AIDR_EL1.
+By default, these registers are visible to userspace but treated as invariant.
+
+When this capability is enabled, KVM allows userspace to change the
+aforementioned registers before the first KVM_RUN. These registers are VM
+scoped, meaning that the same set of values are presented on all vCPUs in a
+given VM.
+
8. Other capabilities.
======================
diff --git a/Documentation/virt/kvm/arm/fw-pseudo-registers.rst b/Documentation/virt/kvm/arm/fw-pseudo-registers.rst
index b90fd0b0fa66..d78b53b05dfc 100644
--- a/Documentation/virt/kvm/arm/fw-pseudo-registers.rst
+++ b/Documentation/virt/kvm/arm/fw-pseudo-registers.rst
@@ -116,7 +116,7 @@ The pseudo-firmware bitmap register are as follows:
ARM DEN0057A.
* KVM_REG_ARM_VENDOR_HYP_BMAP:
- Controls the bitmap of the Vendor specific Hypervisor Service Calls.
+ Controls the bitmap of the Vendor specific Hypervisor Service Calls[0-63].
The following bits are accepted:
@@ -127,6 +127,19 @@ The pseudo-firmware bitmap register are as follows:
Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP:
The bit represents the Precision Time Protocol KVM service.
+* KVM_REG_ARM_VENDOR_HYP_BMAP_2:
+ Controls the bitmap of the Vendor specific Hypervisor Service Calls[64-127].
+
+ The following bits are accepted:
+
+ Bit-0: KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER
+ This represents the ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID
+ function-id. This is reset to 0.
+
+ Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS
+ This represents the ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID
+ function-id. This is reset to 0.
+
Errors:
======= =============================================================
diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst
index af7bc2c2e0cb..7400a89aabf8 100644
--- a/Documentation/virt/kvm/arm/hypercalls.rst
+++ b/Documentation/virt/kvm/arm/hypercalls.rst
@@ -142,3 +142,62 @@ region is equal to the memory protection granule advertised by
| | | +---------------------------------------------+
| | | | ``INVALID_PARAMETER (-3)`` |
+---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID``
+-------------------------------------------------------
+Request the target CPU implementation version information and the number of target
+implementations for the Guest VM.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; KVM/ARM64 Guests only |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000040 |
++---------------------+----------+--------------------------------------------------+
+| Arguments: | None |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``NOT_SUPPORTED (-1)`` |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R1 | Bits [63:32] Reserved/Must be zero |
+| | | +---------------------------------------------+
+| | | | Bits [31:16] Major version |
+| | | +---------------------------------------------+
+| | | | Bits [15:0] Minor version |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Number of target implementations |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+
+``ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID``
+-------------------------------------------------------
+
+Request the target CPU implementation information for the Guest VM. The Guest kernel
+will use this information to enable the associated errata.
+
++---------------------+-------------------------------------------------------------+
+| Presence: | Optional; KVM/ARM64 Guests only |
++---------------------+-------------------------------------------------------------+
+| Calling convention: | HVC64 |
++---------------------+----------+--------------------------------------------------+
+| Function ID: | (uint32) | 0xC6000041 |
++---------------------+----------+----+---------------------------------------------+
+| Arguments: | (uint64) | R1 | selected implementation index |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | Reserved / Must be zero |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | Reserved / Must be zero |
++---------------------+----------+----+---------------------------------------------+
+| Return Values: | (int64) | R0 | ``SUCCESS (0)`` |
+| | | +---------------------------------------------+
+| | | | ``INVALID_PARAMETER (-3)`` |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R1 | MIDR_EL1 of the selected implementation |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R2 | REVIDR_EL1 of the selected implementation |
+| +----------+----+---------------------------------------------+
+| | (uint64) | R3 | AIDR_EL1 of the selected implementation |
++---------------------+----------+----+---------------------------------------------+
diff --git a/Documentation/virt/kvm/devices/arm-vgic-its.rst b/Documentation/virt/kvm/devices/arm-vgic-its.rst
index e053124f77c4..0f971f83205a 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-its.rst
+++ b/Documentation/virt/kvm/devices/arm-vgic-its.rst
@@ -126,7 +126,8 @@ KVM_DEV_ARM_VGIC_GRP_ITS_REGS
ITS Restore Sequence:
---------------------
-The following ordering must be followed when restoring the GIC and the ITS:
+The following ordering must be followed when restoring the GIC, ITS, and
+KVM_IRQFD assignments:
a) restore all guest memory and create vcpus
b) restore all redistributors
@@ -139,6 +140,8 @@ d) restore the ITS in the following order:
3. Load the ITS table data (KVM_DEV_ARM_ITS_RESTORE_TABLES)
4. Restore GITS_CTLR
+e) restore KVM_IRQFD assignments for MSIs
+
Then vcpus can be started.
ITS Table ABI REV0:
diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.rst b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
index 5817edb4e046..e860498b1e35 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-v3.rst
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
@@ -291,8 +291,18 @@ Groups:
| Aff3 | Aff2 | Aff1 | Aff0 |
Errors:
-
======= =============================================
-EINVAL vINTID is not multiple of 32 or info field is
not VGIC_LEVEL_INFO_LINE_LEVEL
======= =============================================
+
+ KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ
+ Attributes:
+
+ The attr field of kvm_device_attr encodes the following values:
+
+ bits: | 31 .... 5 | 4 .... 0 |
+ values: | RES0 | vINTID |
+
+ The vINTID specifies which interrupt is generated when the vGIC
+ must generate a maintenance interrupt. This must be a PPI.
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index c56d5f26c750..ae8bce7fecbe 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -196,7 +196,7 @@ writable between reading spte and updating spte. Like below case:
The Dirty bit is lost in this case.
In order to avoid this kind of issue, we always treat the spte as "volatile"
-if it can be updated out of mmu-lock [see spte_has_volatile_bits()]; it means
+if it can be updated out of mmu-lock [see spte_needs_atomic_update()]; it means
the spte is always atomically updated in this case.
3) flush tlbs due to spte updated
@@ -212,7 +212,7 @@ function to update spte (present -> present).
Since the spte is "volatile" if it can be updated out of mmu-lock, we always
atomically update the spte and the race caused by fast page fault can be avoided.
-See the comments in spte_has_volatile_bits() and mmu_spte_update().
+See the comments in spte_needs_atomic_update() and mmu_spte_update().
Lockless Access Tracking:
diff --git a/Documentation/wmi/acpi-interface.rst b/Documentation/wmi/acpi-interface.rst
index 06fb7fcf4413..f1b28835d23c 100644
--- a/Documentation/wmi/acpi-interface.rst
+++ b/Documentation/wmi/acpi-interface.rst
@@ -89,6 +89,9 @@ Similar to the ``WExx`` ACPI methods, except that it controls data collection
instead of events and thus the last two characters of the ACPI method name are
the method ID of the data block to enable/disable.
+Those ACPI methods are also called before setting data blocks to match the
+behaviour of the Windows driver.
+
_WED ACPI method
----------------
diff --git a/Documentation/wmi/driver-development-guide.rst b/Documentation/wmi/driver-development-guide.rst
index f7e1089a0559..99ef21fc1c1e 100644
--- a/Documentation/wmi/driver-development-guide.rst
+++ b/Documentation/wmi/driver-development-guide.rst
@@ -96,6 +96,10 @@ on a given machine.
Because of this, WMI drivers should use the state container design pattern as described in
Documentation/driver-api/driver-model/design-patterns.rst.
+.. warning:: Using both GUID-based and non-GUID-based functions for querying WMI data blocks and
+ handling WMI events simultaneously on the same device is guaranteed to corrupt the
+ WMI device state and might lead to erratic behaviour.
+
WMI method drivers
------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 8865ae596dd4..7ebde7c59321 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -124,6 +124,7 @@ F: include/net/ieee80211_radiotap.h
F: include/net/iw_handler.h
F: include/net/wext.h
F: include/uapi/linux/nl80211.h
+N: include/uapi/linux/nl80211-.*
F: include/uapi/linux/wireless.h
F: net/wireless/
@@ -355,7 +356,7 @@ ACPI PMIC DRIVERS
M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Len Brown <lenb@kernel.org>
R: Andy Shevchenko <andy@kernel.org>
-R: Mika Westerberg <mika.westerberg@linux.intel.com>
+R: Mika Westerberg <westeri@kernel.org>
L: linux-acpi@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-acpi/list/
@@ -514,7 +515,7 @@ F: drivers/hwmon/adm1029.c
ADM8211 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-F: drivers/net/wireless/admtek/adm8211.*
+F: drivers/net/wireless/admtek/
ADP1050 HARDWARE MONITOR DRIVER
M: Radu Sabau <radu.sabau@analog.com>
@@ -726,7 +727,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml
-F: drivers/net/ethernet/mediatek/airoha_eth.c
+F: drivers/net/ethernet/airoha/
AIROHA PCIE PHY DRIVER
M: Lorenzo Bianconi <lorenzo@kernel.org>
@@ -792,10 +793,12 @@ F: Documentation/admin-guide/perf/alibaba_pmu.rst
F: drivers/perf/alibaba_uncore_drw_pmu.c
ALIENWARE WMI DRIVER
+M: Kurt Borja <kuurtb@gmail.com>
+L: platform-driver-x86@vger.kernel.org
L: Dell.Client.Kernel@dell.com
S: Maintained
F: Documentation/wmi/devices/alienware-wmi.rst
-F: drivers/platform/x86/dell/alienware-wmi.c
+F: drivers/platform/x86/dell/alienware-wmi*
ALLEGRO DVT VIDEO IP CORE DRIVER
M: Michael Tretter <m.tretter@pengutronix.de>
@@ -2212,6 +2215,7 @@ ARM/APPLE MACHINE SUPPORT
M: Sven Peter <sven@svenpeter.dev>
M: Janne Grunau <j@jannau.net>
R: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+R: Neal Gompa <neal@gompa.dev>
L: asahi@lists.linux.dev
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2236,6 +2240,7 @@ F: Documentation/devicetree/bindings/pci/apple,pcie.yaml
F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
F: Documentation/devicetree/bindings/power/apple*
F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml
+F: Documentation/devicetree/bindings/spi/apple,spi.yaml
F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
F: arch/arm64/boot/dts/apple/
F: drivers/bluetooth/hci_bcm4377.c
@@ -2253,6 +2258,7 @@ F: drivers/nvmem/apple-efuses.c
F: drivers/pinctrl/pinctrl-apple-gpio.c
F: drivers/pwm/pwm-apple.c
F: drivers/soc/apple/*
+F: drivers/spi/spi-apple.c
F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
@@ -2475,6 +2481,7 @@ X: arch/arm64/boot/dts/freescale/qoriq-*
X: drivers/media/i2c/
N: imx
N: mxs
+N: \bmxc[^\d]
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
@@ -2751,6 +2758,13 @@ F: arch/arm/boot/dts/socionext/milbeaut*
F: arch/arm/mach-milbeaut/
N: milbeaut
+ARM/MORELLO PLATFORM
+M: Vincenzo Frascino <vincenzo.frascino@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/arm/arm,morello.yaml
+F: arch/arm64/boot/dts/arm/morello*
+
ARM/MOXA ART SOC
M: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3070,6 +3084,7 @@ F: drivers/*/*s3c24*
F: drivers/*/*s3c64xx*
F: drivers/*/*s5pv210*
F: drivers/clocksource/samsung_pwm_timer.c
+F: drivers/firmware/samsung/
F: drivers/mailbox/exynos-mailbox.c
F: drivers/memory/samsung/
F: drivers/pwm/pwm-samsung.c
@@ -3549,7 +3564,7 @@ M: Eddie James <eajames@linux.ibm.com>
L: linux-media@vger.kernel.org
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/media/aspeed-video.txt
+F: Documentation/devicetree/bindings/media/aspeed,video-engine.yaml
F: drivers/media/platform/aspeed/
ASUS EC HARDWARE MONITOR DRIVER
@@ -4010,10 +4025,10 @@ F: include/vdso/bits.h
F: lib/bitmap-str.c
F: lib/bitmap.c
F: lib/cpumask.c
-F: lib/cpumask_kunit.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
+F: lib/tests/cpumask_kunit.c
F: tools/include/linux/bitfield.h
F: tools/include/linux/bitmap.h
F: tools/include/linux/bits.h
@@ -4022,6 +4037,11 @@ F: tools/include/vdso/bits.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c
+BITMAP API BINDINGS [RUST]
+M: Yury Norov <yury.norov@gmail.com>
+S: Maintained
+F: rust/helpers/cpumask.c
+
BITOPS API
M: Yury Norov <yury.norov@gmail.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
@@ -5398,6 +5418,8 @@ F: Documentation/dev-tools/checkpatch.rst
CHINESE DOCUMENTATION
M: Alex Shi <alexs@kernel.org>
M: Yanteng Si <siyanteng@loongson.cn>
+R: Dongliang Mu <dzm91@hust.edu.cn>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/alexs/linux.git
S: Maintained
F: Documentation/translations/zh_CN/
@@ -5474,16 +5496,19 @@ S: Maintained
F: drivers/platform/chrome/cros_ec_uart.c
CHROMEOS EC USB PD NOTIFY DRIVER
-M: Prashant Malani <pmalani@chromium.org>
+M: Åukasz Bartosik <ukaszb@chromium.org>
+M: Andrei Kuchynski <akuchynski@chromium.org>
+M: Jameson Thies <jthies@google.com>
L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/platform/chrome/cros_usbpd_notify.c
F: include/linux/platform_data/cros_usbpd_notify.h
CHROMEOS EC USB TYPE-C DRIVER
-M: Prashant Malani <pmalani@chromium.org>
M: Benson Leung <bleung@chromium.org>
M: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
+M: Jameson Thies <jthies@google.com>
+M: Andrei Kuchynski <akuchynski@chromium.org>
L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/platform/chrome/cros_ec_typec.*
@@ -5506,6 +5531,8 @@ F: drivers/watchdog/cros_ec_wdt.c
CHROMEOS UCSI DRIVER
M: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
M: Åukasz Bartosik <ukaszb@chromium.org>
+M: Jameson Thies <jthies@google.com>
+M: Andrei Kuchynski <akuchynski@chromium.org>
L: chrome-platform@lists.linux.dev
S: Maintained
F: drivers/usb/typec/ucsi/cros_ec_ucsi.c
@@ -5775,6 +5802,7 @@ X: drivers/clk/clkdev.c
COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
M: Steve French <sfrench@samba.org>
+M: Steve French <smfrench@gmail.com>
R: Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
R: Ronnie Sahlberg <ronniesahlberg@gmail.com> (directory leases, sparse files)
R: Shyam Prasad N <sprasad@microsoft.com> (multichannel)
@@ -5866,6 +5894,7 @@ CONGATEC BOARD CONTROLLER MFD DRIVER
M: Thomas Richard <thomas.richard@bootlin.com>
S: Maintained
F: drivers/gpio/gpio-cgbc.c
+F: drivers/hwmon/cgbc-hwmon.c
F: drivers/i2c/busses/i2c-cgbc.c
F: drivers/mfd/cgbc-core.c
F: drivers/watchdog/cgbc_wdt.c
@@ -6090,9 +6119,11 @@ F: include/linux/platform_data/cpuidle-exynos.h
CPUIDLE DRIVER - ARM PSCI
M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Sudeep Holla <sudeep.holla@arm.com>
+M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
F: drivers/cpuidle/cpuidle-psci.c
CPUIDLE DRIVER - ARM PSCI PM DOMAIN
@@ -6135,6 +6166,7 @@ F: Documentation/staging/crc*
F: arch/*/lib/crc*
F: include/linux/crc*
F: lib/crc*
+F: scripts/gen-crc-consts.py
CREATIVE SB0540
M: Bastien Nocera <hadess@hadess.net>
@@ -6142,6 +6174,16 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-creative-sb0540.c
+CREDENTIALS
+M: Paul Moore <paul@paul-moore.com>
+R: Serge Hallyn <sergeh@kernel.org>
+L: linux-security-module@vger.kernel.org
+S: Supported
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm.git
+F: include/linux/cred.h
+F: kernel/cred.c
+F: Documentation/security/credentials.rst
+
INTEL CRPS COMMON REDUNDANT PSU DRIVER
M: Ninad Palsule <ninad@linux.ibm.com>
L: linux-hwmon@vger.kernel.org
@@ -6206,7 +6248,7 @@ F: Documentation/process/cve.rst
CW1200 WLAN driver
S: Orphan
-F: drivers/net/wireless/st/cw1200/
+F: drivers/net/wireless/st/
F: include/linux/platform_data/net-cw1200.h
CX18 VIDEO4LINUX DRIVER
@@ -6321,15 +6363,6 @@ F: drivers/misc/cxl/
F: include/misc/cxl*
F: include/uapi/misc/cxl.h
-CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
-M: Manoj N. Kumar <manoj@linux.ibm.com>
-M: Uma Krishnan <ukrishn@linux.ibm.com>
-L: linux-scsi@vger.kernel.org
-S: Obsolete
-F: Documentation/arch/powerpc/cxlflash.rst
-F: drivers/scsi/cxlflash/
-F: include/uapi/scsi/cxlflash_ioctl.h
-
CYBERPRO FB DRIVER
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6913,6 +6946,7 @@ L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
+F: Documentation/userspace-api/dma-buf-heaps.rst
F: drivers/dma-buf/dma-heap.c
F: drivers/dma-buf/heaps/*
F: include/linux/dma-heap.h
@@ -8223,6 +8257,7 @@ F: drivers/edac/aspeed_edac.c
EDAC-BLUEFIELD
M: Shravan Kumar Ramani <shravankr@nvidia.com>
+M: David Thompson <davthompson@nvidia.com>
S: Supported
F: drivers/edac/bluefield_edac.c
@@ -8527,6 +8562,15 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/media/rc/ene_ir.*
+ENERGY MODEL
+M: Lukasz Luba <lukasz.luba@arm.com>
+M: "Rafael J. Wysocki" <rafael@kernel.org>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: kernel/power/energy_model.c
+F: include/linux/energy_model.h
+F: Documentation/power/energy-model.rst
+
EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
M: Laurentiu Tudor <laurentiu.tudor@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
@@ -8588,12 +8632,14 @@ F: Documentation/networking/devlink/etas_es58x.rst
F: drivers/net/can/usb/etas_es58x/
ETHERNET BRIDGE
-M: Roopa Prabhu <roopa@nvidia.com>
M: Nikolay Aleksandrov <razor@blackwall.org>
+M: Ido Schimmel <idosch@nvidia.com>
L: bridge@lists.linux.dev
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linuxfoundation.org/en/Net:Bridge
+F: include/linux/if_bridge.h
+F: include/uapi/linux/if_bridge.h
F: include/linux/netfilter_bridge/
F: net/bridge/
@@ -8642,7 +8688,6 @@ F: rust/kernel/net/phy/reg.rs
EXEC & BINFMT API, ELF
M: Kees Cook <kees@kernel.org>
-R: Eric Biederman <ebiederm@xmission.com>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
@@ -8899,6 +8944,9 @@ F: include/linux/fs.h
F: include/linux/fs_types.h
F: include/uapi/linux/fs.h
F: include/uapi/linux/openat2.h
+F: Documentation/driver-api/early-userspace/buffer-format.rst
+F: init/do_mounts*
+F: init/*initramfs*
FILESYSTEMS [EXPORTFS]
M: Chuck Lever <chuck.lever@oracle.com>
@@ -9068,9 +9116,9 @@ L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: include/linux/fortify-string.h
-F: lib/fortify_kunit.c
-F: lib/memcpy_kunit.c
F: lib/test_fortify/*
+F: lib/tests/fortify_kunit.c
+F: lib/tests/memcpy_kunit.c
K: \bunsafe_memcpy\b
K: \b__NO_FORTIFY\b
@@ -9442,14 +9490,11 @@ F: include/linux/fscrypt.h
F: include/uapi/linux/fscrypt.h
FSI SUBSYSTEM
-M: Jeremy Kerr <jk@ozlabs.org>
-M: Joel Stanley <joel@jms.id.au>
-R: Alistar Popple <alistair@popple.id.au>
-R: Eddie James <eajames@linux.ibm.com>
+M: Eddie James <eajames@linux.ibm.com>
+R: Ninad Palsule <ninad@linux.ibm.com>
L: linux-fsi@lists.ozlabs.org
S: Supported
Q: http://patchwork.ozlabs.org/project/linux-fsi/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/fsi.git
F: drivers/fsi/
F: include/linux/fsi*.h
F: include/trace/events/fsi*.h
@@ -9758,9 +9803,9 @@ F: include/linux/string.h
F: include/linux/string_choices.h
F: include/linux/string_helpers.h
F: lib/string.c
-F: lib/string_kunit.c
F: lib/string_helpers.c
-F: lib/string_helpers_kunit.c
+F: lib/tests/string_helpers_kunit.c
+F: lib/tests/string_kunit.c
F: scripts/coccinelle/api/string_choices.cocci
GENERIC UIO DRIVER FOR PCI DEVICES
@@ -9780,6 +9825,7 @@ F: include/asm-generic/vdso/vsyscall.h
F: include/vdso/
F: kernel/time/vsyscall.c
F: lib/vdso/
+F: tools/testing/selftests/vDSO/
GENWQE (IBM Generic Workqueue Card)
M: Frank Haverkamp <haver@linux.ibm.com>
@@ -9829,8 +9875,12 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/usb/go7007/
+GOCONTROLL MODULINE MODULE SLOT
+M: Maud Spierings <maudspierings@gocontroll.com>
+S: Maintained
+F: Documentation/devicetree/bindings/connector/gocontroll,moduline-module-slot.yaml
+
GOODIX TOUCHSCREEN
-M: Bastien Nocera <hadess@hadess.net>
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
S: Maintained
@@ -10316,6 +10366,14 @@ F: drivers/hid/hid-sensor-*
F: drivers/iio/*/hid-*
F: include/linux/hid-sensor-*
+HID UNIVERSAL PIDFF DRIVER
+M: Tomasz Pakuła <tomasz.pakula.oficjalny@gmail.com>
+M: Oleg Makarenko <oleg@makarenk.ooo>
+L: linux-input@vger.kernel.org
+S: Maintained
+B: https://github.com/JacKeTUs/universal-pidff/issues
+F: drivers/hid/hid-universal-pidff.c
+
HID VRC-2 CAR CONTROLLER DRIVER
M: Marcus Folkesson <marcus.folkesson@gmail.com>
L: linux-input@vger.kernel.org
@@ -10691,6 +10749,12 @@ W: http://www.st.com/
F: Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml
F: drivers/iio/humidity/hts221*
+HTU31 Hardware Temperature and Humidity Sensor
+M: Andrei Lalaev <andrey.lalaev@gmail.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: drivers/hwmon/htu31.c
+
HUAWEI ETHERNET DRIVER
M: Cai Huoqing <cai.huoqing@linux.dev>
L: netdev@vger.kernel.org
@@ -10698,6 +10762,13 @@ S: Maintained
F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
F: drivers/net/ethernet/huawei/hinic/
+HUAWEI MATEBOOK E GO EMBEDDED CONTROLLER DRIVER
+M: Pengyu Luo <mitltlatltl@gmail.com>
+S: Maintained
+F: Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml
+F: drivers/platform/arm64/huawei-gaokun-ec.c
+F: include/linux/platform_data/huawei-gaokun-ec.h
+
HUGETLB SUBSYSTEM
M: Muchun Song <muchun.song@linux.dev>
L: linux-mm@kvack.org
@@ -11142,7 +11213,7 @@ S: Maintained
F: drivers/i2c/busses/i2c-icy.c
IDEAPAD LAPTOP EXTRAS DRIVER
-M: Ike Panhc <ike.pan@canonical.com>
+M: Ike Panhc <ikepanhc@gmail.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
W: http://launchpad.net/ideapad-laptop
@@ -11332,6 +11403,13 @@ L: linux-fbdev@vger.kernel.org
S: Orphan
F: drivers/video/fbdev/imsttfb.c
+INA233 HARDWARE MONITOR DRIVERS
+M: Leo Yang <leo.yang.sy0@gmail.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: Documentation/hwmon/ina233.rst
+F: drivers/hwmon/pmbus/ina233.c
+
INDEX OF FURTHER KERNEL DOCUMENTATION
M: Carlos Bilbao <carlos.bilbao@kernel.org>
S: Maintained
@@ -11677,12 +11755,14 @@ F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
F: drivers/crypto/intel/iaa/*
INTEL IDLE DRIVER
-M: Jacob Pan <jacob.jun.pan@linux.intel.com>
-M: Len Brown <lenb@kernel.org>
+M: Rafael J. Wysocki <rafael@kernel.org>
+M: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
+M: Artem Bityutskiy <dedekind1@gmail.com>
+R: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
B: https://bugzilla.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/idle/intel_idle.c
INTEL IDXD DRIVER
@@ -11873,6 +11953,7 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-platform-intel-pmc
F: drivers/platform/x86/intel/pmc/
+F: include/linux/platform_data/x86/intel_pmc_ipc.h
INTEL PMIC GPIO DRIVERS
M: Andy Shevchenko <andy@kernel.org>
@@ -12588,8 +12669,9 @@ F: Documentation/ABI/testing/sysfs-kernel-warn_count
F: arch/*/configs/hardening.config
F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
+F: include/linux/ucopysize.h
F: kernel/configs/hardening.config
-F: lib/usercopy_kunit.c
+F: lib/tests/usercopy_kunit.c
F: mm/usercopy.c
F: security/Kconfig.hardening
K: \b(add|choose)_random_kstack_offset\b
@@ -12655,7 +12737,9 @@ F: tools/testing/selftests/
KERNEL SMB3 SERVER (KSMBD)
M: Namjae Jeon <linkinjeon@kernel.org>
+M: Namjae Jeon <linkinjeon@samba.org>
M: Steve French <sfrench@samba.org>
+M: Steve French <smfrench@gmail.com>
R: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Tom Talpey <tom@talpey.com>
L: linux-cifs@vger.kernel.org
@@ -12824,9 +12908,7 @@ F: fs/kernfs/
F: include/linux/kernfs.h
KEXEC
-M: Eric Biederman <ebiederm@xmission.com>
L: kexec@lists.infradead.org
-S: Maintained
W: http://kernel.org/pub/linux/utils/kernel/kexec/
F: include/linux/kexec.h
F: include/uapi/linux/kexec.h
@@ -12872,7 +12954,7 @@ F: include/keys/trusted_dcp.h
F: security/keys/trusted-keys/trusted_dcp.c
KEYS-TRUSTED-TEE
-M: Sumit Garg <sumit.garg@linaro.org>
+M: Sumit Garg <sumit.garg@kernel.org>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
@@ -12987,7 +13069,7 @@ F: Documentation/trace/kprobes.rst
F: include/asm-generic/kprobes.h
F: include/linux/kprobes.h
F: kernel/kprobes.c
-F: lib/test_kprobes.c
+F: lib/tests/test_kprobes.c
F: samples/kprobes
KS0108 LCD CONTROLLER DRIVER
@@ -13075,6 +13157,7 @@ L: linux-security-module@vger.kernel.org
S: Supported
W: https://landlock.io
T: git https://git.kernel.org/pub/scm/linux/kernel/git/mic/linux.git
+F: Documentation/admin-guide/LSM/landlock.rst
F: Documentation/security/landlock.rst
F: Documentation/userspace-api/landlock.rst
F: fs/ioctl.c
@@ -13151,6 +13234,12 @@ S: Maintained
W: http://legousb.sourceforge.net/
F: drivers/usb/misc/legousbtower.c
+LENOVO WMI HOTKEY UTILITIES DRIVER
+M: Jackie Dong <xy-jackie@139.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
+
LETSKETCH HID TABLET DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-input@vger.kernel.org
@@ -13317,7 +13406,7 @@ M: Mark Brown <broonie@kernel.org>
R: Matti Vaittinen <mazziesaccount@gmail.com>
F: include/linux/linear_range.h
F: lib/linear_ranges.c
-F: lib/test_linear_ranges.c
+F: lib/tests/test_linear_ranges.c
LINUX FOR POWER MACINTOSH
L: linuxppc-dev@lists.ozlabs.org
@@ -13445,7 +13534,7 @@ M: David Gow <davidgow@google.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained
-F: lib/list-test.c
+F: lib/tests/list-test.c
LITEX PLATFORM
M: Karol Gugala <kgugala@antmicro.com>
@@ -13752,12 +13841,10 @@ F: drivers/hwmon/ltc4282.c
LTC4286 HARDWARE MONITOR DRIVER
M: Delphine CC Chiu <Delphine_CC_Chiu@Wiwynn.com>
-L: linux-i2c@vger.kernel.org
+L: linux-hwmon@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/hwmon/lltc,ltc4286.yaml
F: Documentation/hwmon/ltc4286.rst
-F: drivers/hwmon/pmbus/Kconfig
-F: drivers/hwmon/pmbus/Makefile
F: drivers/hwmon/pmbus/ltc4286.c
LTC4306 I2C MULTIPLEXER DRIVER
@@ -13905,6 +13992,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/mctp.rst
F: drivers/net/mctp/
+F: include/linux/usb/mctp-usb.h
F: include/net/mctp.h
F: include/net/mctpdevice.h
F: include/net/netns/mctp.h
@@ -13994,6 +14082,7 @@ MARVELL LIBERTAS WIRELESS DRIVER
L: libertas-dev@lists.infradead.org
S: Orphan
F: drivers/net/wireless/marvell/libertas/
+F: drivers/net/wireless/marvell/libertas_tf/
MARVELL MACCHIATOBIN SUPPORT
M: Russell King <linux@armlinux.org.uk>
@@ -15030,6 +15119,7 @@ M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
S: Supported
+F: Documentation/ABI/stable/sysfs-driver-mlxreg-io
F: Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
F: drivers/platform/mellanox/
F: include/linux/platform_data/mlxreg.h
@@ -15100,12 +15190,6 @@ F: Documentation/leds/leds-mlxcpld.rst
F: drivers/leds/leds-mlxcpld.c
F: drivers/leds/leds-mlxreg.c
-MELLANOX PLATFORM DRIVER
-M: Vadim Pasternak <vadimp@nvidia.com>
-L: platform-driver-x86@vger.kernel.org
-S: Supported
-F: drivers/platform/x86/mlx-platform.c
-
MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@kernel.org>
@@ -15634,7 +15718,7 @@ M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
M: Andrei Simion <andrei.simion@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
-F: Documentation/devicetree/bindings/misc/atmel-ssc.txt
+F: Documentation/devicetree/bindings/sound/atmel,at91-ssc.yaml
F: drivers/misc/atmel-ssc.c
F: include/linux/atmel-ssc.h
@@ -15663,7 +15747,7 @@ M: Ajay Singh <ajay.kathat@microchip.com>
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
L: linux-wireless@vger.kernel.org
S: Supported
-F: drivers/net/wireless/microchip/wilc1000/
+F: drivers/net/wireless/microchip/
MICROSEMI MIPS SOCS
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
@@ -16449,6 +16533,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
+X: drivers/net/wireless/ath/
+X: drivers/net/wireless/broadcom/
+X: drivers/net/wireless/intel/
+X: drivers/net/wireless/intersil/
+X: drivers/net/wireless/marvell/
+X: drivers/net/wireless/mediatek/mt76/
+X: drivers/net/wireless/mediatek/mt7601u/
+X: drivers/net/wireless/microchip/
+X: drivers/net/wireless/purelifi/
+X: drivers/net/wireless/quantenna/
+X: drivers/net/wireless/ralink/
+X: drivers/net/wireless/realtek/
+X: drivers/net/wireless/rsi/
+X: drivers/net/wireless/silabs/
+X: drivers/net/wireless/st/
+X: drivers/net/wireless/ti/
+X: drivers/net/wireless/zydas/
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
@@ -16644,6 +16745,17 @@ F: net/mptcp/
F: tools/testing/selftests/bpf/*/*mptcp*.[ch]
F: tools/testing/selftests/net/mptcp/
+NETWORKING [SRv6]
+M: Andrea Mayer <andrea.mayer@uniroma2.it>
+L: netdev@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+F: include/linux/seg6*
+F: include/net/seg6*
+F: include/uapi/linux/seg6*
+F: net/ipv6/seg6*
+F: tools/testing/selftests/net/srv6*
+
NETWORKING [TCP]
M: Eric Dumazet <edumazet@google.com>
M: Neal Cardwell <ncardwell@google.com>
@@ -17672,7 +17784,7 @@ F: Documentation/ABI/testing/sysfs-bus-optee-devices
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
-M: Sumit Garg <sumit.garg@linaro.org>
+M: Sumit Garg <sumit.garg@kernel.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
F: drivers/char/hw_random/optee-rng.c
@@ -17833,7 +17945,7 @@ M: Christian Lamparter <chunkeey@googlemail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/p54
-F: drivers/net/wireless/intersil/p54/
+F: drivers/net/wireless/intersil/
PACKET SOCKETS
M: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
@@ -18983,9 +19095,10 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sysctl/sysctl.git sysctl-next
F: fs/proc/proc_sysctl.c
F: include/linux/sysctl.h
-F: kernel/sysctl-test.c
-F: kernel/sysctl.c
-F: tools/testing/selftests/sysctl/
+F: kernel/sysctl*
+F: tools/testing/selftests/sysctl/*
+F: lib/test_sysctl.c
+F: scripts/check-sysctl-docs
PS3 NETWORK SUPPORT
M: Geoff Levand <geoff@infradead.org>
@@ -19110,7 +19223,7 @@ PURELIFI PLFXLC DRIVER
M: Srinivasan Raju <srini.raju@purelifi.com>
L: linux-wireless@vger.kernel.org
S: Supported
-F: drivers/net/wireless/purelifi/plfxlc/
+F: drivers/net/wireless/purelifi/
PVRUSB2 VIDEO4LINUX DRIVER
M: Mike Isely <isely@pobox.com>
@@ -19202,6 +19315,7 @@ F: Documentation/devicetree/bindings/soc/qcom/qcom,apr*
F: Documentation/devicetree/bindings/sound/qcom,*
F: drivers/soc/qcom/apr.c
F: include/dt-bindings/sound/qcom,wcd9335.h
+F: include/dt-bindings/sound/qcom,wcd934x.h
F: sound/soc/codecs/lpass-rx-macro.*
F: sound/soc/codecs/lpass-tx-macro.*
F: sound/soc/codecs/lpass-va-macro.c
@@ -19584,6 +19698,16 @@ S: Maintained
F: Documentation/devicetree/bindings/regulator/vqmmc-ipq4019-regulator.yaml
F: drivers/regulator/vqmmc-ipq4019-regulator.c
+QUALCOMM IRIS VIDEO ACCELERATOR DRIVER
+M: Vikash Garodia <quic_vgarodia@quicinc.com>
+M: Dikshita Agarwal <quic_dikshita@quicinc.com>
+R: Abhinav Kumar <quic_abhinavk@quicinc.com>
+L: linux-media@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/qcom,*-iris.yaml
+F: drivers/media/platform/qcom/iris/
+
QUALCOMM NAND CONTROLLER DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-mtd@lists.infradead.org
@@ -19661,7 +19785,7 @@ M: Igor Mitsyanko <imitsyanko@quantenna.com>
R: Sergey Matyukevich <geomatsi@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-F: drivers/net/wireless/quantenna
+F: drivers/net/wireless/quantenna/
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
@@ -19741,7 +19865,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
M: Stanislaw Gruszka <stf_xl@wp.pl>
L: linux-wireless@vger.kernel.org
S: Maintained
-F: drivers/net/wireless/ralink/rt2x00/
+F: drivers/net/wireless/ralink/
RAMDISK RAM BLOCK DEVICE DRIVER
M: Jens Axboe <axboe@kernel.dk>
@@ -19894,6 +20018,7 @@ S: Supported
F: Documentation/arch/x86/resctrl*
F: arch/x86/include/asm/resctrl.h
F: arch/x86/kernel/cpu/resctrl/
+F: include/linux/resctrl*.h
F: tools/testing/selftests/resctrl/
READ-COPY UPDATE (RCU)
@@ -20413,7 +20538,10 @@ F: drivers/perf/riscv_pmu_sbi.c
RISC-V SPACEMIT SoC Support
M: Yixun Lan <dlan@gentoo.org>
L: linux-riscv@lists.infradead.org
+L: spacemit@lists.linux.dev
S: Maintained
+W: https://github.com/spacemit-com/linux/wiki
+C: irc://irc.libera.chat/spacemit
T: git https://github.com/spacemit-com/linux
F: arch/riscv/boot/dts/spacemit/
N: spacemit
@@ -20427,15 +20555,20 @@ L: linux-riscv@lists.infradead.org
S: Maintained
T: git https://github.com/pdp7/linux.git
F: Documentation/devicetree/bindings/clock/thead,th1520-clk-ap.yaml
+F: Documentation/devicetree/bindings/firmware/thead,th1520-aon.yaml
F: Documentation/devicetree/bindings/mailbox/thead,th1520-mbox.yaml
F: Documentation/devicetree/bindings/net/thead,th1520-gmac.yaml
F: Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml
F: arch/riscv/boot/dts/thead/
F: drivers/clk/thead/clk-th1520-ap.c
+F: drivers/firmware/thead,th1520-aon.c
F: drivers/mailbox/mailbox-th1520.c
F: drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
F: drivers/pinctrl/pinctrl-th1520.c
+F: drivers/pmdomain/thead/
F: include/dt-bindings/clock/thead,th1520-clk-ap.h
+F: include/dt-bindings/power/thead,th1520-power.h
+F: include/linux/firmware/thead/thead,th1520-aon.h
RNBD BLOCK DRIVERS
M: Md. Haris Iqbal <haris.iqbal@ionos.com>
@@ -20958,6 +21091,15 @@ F: arch/arm64/boot/dts/exynos/exynos850*
F: drivers/clk/samsung/clk-exynos850.c
F: include/dt-bindings/clock/exynos850.h
+SAMSUNG EXYNOS ACPM MAILBOX PROTOCOL
+M: Tudor Ambarus <tudor.ambarus@linaro.org>
+L: linux-kernel@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
+F: drivers/firmware/samsung/exynos-acpm*
+F: include/linux/firmware/samsung/exynos-acpm-protocol.h
+
SAMSUNG EXYNOS MAILBOX DRIVER
M: Tudor Ambarus <tudor.ambarus@linaro.org>
L: linux-kernel@vger.kernel.org
@@ -20988,6 +21130,13 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/s3c-fb.c
+SAMSUNG GALAXY BOOK DRIVER
+M: Joshua Grisham <josh@joshuagrisham.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: Documentation/admin-guide/laptops/samsung-galaxybook.rst
+F: drivers/platform/x86/samsung-galaxybook.c
+
SAMSUNG INTERCONNECT DRIVERS
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Artur Świgoń <a.swigon@samsung.com>
@@ -21089,6 +21238,7 @@ F: include/linux/clk/samsung.h
SAMSUNG SPI DRIVERS
M: Andi Shyti <andi.shyti@kernel.org>
+R: Tudor Ambarus <tudor.ambarus@linaro.org>
L: linux-spi@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -21164,8 +21314,7 @@ S: Maintained
W: https://github.com/sched-ext/scx
T: git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
F: include/linux/sched/ext.h
-F: kernel/sched/ext.h
-F: kernel/sched/ext.c
+F: kernel/sched/ext*
F: tools/sched_ext/
F: tools/testing/selftests/sched_ext
@@ -21499,7 +21648,6 @@ F: include/linux/slimbus.h
SFC NETWORK DRIVER
M: Edward Cree <ecree.xilinx@gmail.com>
-M: Martin Habets <habetsm.xilinx@gmail.com>
L: netdev@vger.kernel.org
L: linux-net-drivers@amd.com
S: Maintained
@@ -21708,7 +21856,7 @@ SILICON LABS WIRELESS DRIVERS (for WFxxx series)
M: Jérôme Pouiller <jerome.pouiller@silabs.com>
S: Supported
F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
-F: drivers/net/wireless/silabs/wfx/
+F: drivers/net/wireless/silabs/
SILICON MOTION SM712 FRAME BUFFER DRIVER
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -21748,7 +21896,7 @@ M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: include/linux/siphash.h
F: lib/siphash.c
-F: lib/siphash_kunit.c
+F: lib/tests/siphash_kunit.c
SIS 190 ETHERNET DRIVER
M: Francois Romieu <romieu@fr.zoreil.com>
@@ -22159,7 +22307,9 @@ F: include/linux/sony-laptop.h
SOPHGO DEVICETREES and DRIVERS
M: Chen Wang <unicorn_wang@outlook.com>
-M: Inochi Amaoto <inochiama@outlook.com>
+M: Inochi Amaoto <inochiama@gmail.com>
+L: sophgo@lists.linux.dev
+W: https://github.com/sophgo/linux/wiki
T: git https://github.com/sophgo/linux.git
S: Maintained
N: sophgo
@@ -22357,6 +22507,13 @@ F: Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
+SPI OFFLOAD
+R: David Lechner <dlechner@baylibre.com>
+F: drivers/spi/spi-offload-trigger-pwm.c
+F: drivers/spi/spi-offload.c
+F: include/linux/spi/offload/
+K: spi_offload
+
SPI SUBSYSTEM
M: Mark Brown <broonie@kernel.org>
L: linux-spi@vger.kernel.org
@@ -22370,15 +22527,6 @@ F: include/linux/spi/
F: include/uapi/linux/spi/
F: tools/spi/
-SPIDERNET NETWORK DRIVER for CELL
-M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
-M: Geoff Levand <geoff@infradead.org>
-L: netdev@vger.kernel.org
-L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
-F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
-F: drivers/net/ethernet/toshiba/spider_net*
-
SPMI SUBSYSTEM
M: Stephen Boyd <sboyd@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -23004,6 +23152,14 @@ F: drivers/net/pcs/pcs-xpcs.c
F: drivers/net/pcs/pcs-xpcs.h
F: include/linux/pcs/pcs-xpcs.h
+SYNOPSYS DESIGNWARE HDMI RX CONTROLLER DRIVER
+M: Shreeya Patel <shreeya.patel@collabora.com>
+L: linux-media@vger.kernel.org
+L: kernel@collabora.com
+S: Maintained
+F: Documentation/devicetree/bindings/media/snps,dw-hdmi-rx.yaml
+F: drivers/media/platform/synopsys/hdmirx/*
+
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -23092,12 +23248,6 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/system76_acpi.c
-SYSV FILESYSTEM
-S: Orphan
-F: Documentation/filesystems/sysv-fs.rst
-F: fs/sysv/
-F: include/linux/sysv_fs.h
-
TASKSTATS STATISTICS INTERFACE
M: Balbir Singh <bsingharora@gmail.com>
S: Maintained
@@ -23285,7 +23435,7 @@ F: include/media/i2c/tw9910.h
TEE SUBSYSTEM
M: Jens Wiklander <jens.wiklander@linaro.org>
-R: Sumit Garg <sumit.garg@linaro.org>
+R: Sumit Garg <sumit.garg@kernel.org>
L: op-tee@lists.trustedfirmware.org
S: Maintained
F: Documentation/ABI/testing/sysfs-class-tee
@@ -24037,6 +24187,8 @@ Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
F: drivers/char/tpm/
+F: include/linux/tpm*.h
+F: include/uapi/linux/vtpm_proxy.h
F: tools/testing/selftests/tpm2/
TPS546D24 DRIVER
@@ -24075,6 +24227,7 @@ F: fs/tracefs/
F: include/linux/trace*.h
F: include/trace/
F: kernel/trace/
+F: kernel/tracepoint.c
F: scripts/tracing/
F: tools/testing/selftests/ftrace/
@@ -24111,6 +24264,11 @@ W: https://github.com/srcres258/linux-doc
T: git git://github.com/srcres258/linux-doc.git doc-zh-tw
F: Documentation/translations/zh_TW/
+TRIGGER SOURCE - PWM
+M: David Lechner <dlechner@baylibre.com>
+S: Maintained
+F: Documentation/devicetree/bindings/trigger-source/pwm-trigger.yaml
+
TRUSTED SECURITY MODULE (TSM) ATTESTATION REPORTS
M: Dan Williams <dan.j.williams@intel.com>
L: linux-coco@lists.linux.dev
@@ -24170,7 +24328,7 @@ W: http://vtun.sourceforge.net/tun
F: Documentation/networking/tuntap.rst
F: arch/um/os-Linux/drivers/
F: drivers/net/tap.c
-F: drivers/net/tun.c
+F: drivers/net/tun*
TURBOCHANNEL SUBSYSTEM
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
@@ -25430,8 +25588,8 @@ R: Sergey Senozhatsky <senozhatsky@chromium.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
F: Documentation/core-api/printk-formats.rst
-F: lib/test_printf.c
-F: lib/test_scanf.c
+F: lib/tests/printf_kunit.c
+F: lib/tests/scanf_kunit.c
F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER
@@ -25964,7 +26122,6 @@ F: include/xen/swiotlb-xen.h
XFS FILESYSTEM
M: Carlos Maiolino <cem@kernel.org>
-R: Darrick J. Wong <djwong@kernel.org>
L: linux-xfs@vger.kernel.org
S: Supported
W: http://xfs.org/
@@ -26025,6 +26182,14 @@ S: Maintained
F: drivers/pwm/pwm-xilinx.c
F: include/clocksource/timer-xilinx.h
+XILINX SOUND DRIVERS
+M: Vincenzo Frascino <vincenzo.frascino@arm.com>
+S: Maintained
+F: Documentation/devicetree/bindings/sound/xlnx,i2s.yaml
+F: Documentation/devicetree/bindings/sound/xlnx,audio-formatter.yaml
+F: Documentation/devicetree/bindings/sound/xlnx,spdif.yaml
+F: sound/soc/xilinx/*
+
XILINX SD-FEC IP CORES
M: Derek Kiernan <derek.kiernan@amd.com>
M: Dragan Cvetic <dragan.cvetic@amd.com>
@@ -26209,7 +26374,7 @@ F: mm/zbud.c
ZD1211RW WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-F: drivers/net/wireless/zydas/zd1211rw/
+F: drivers/net/wireless/zydas/
ZD1301 MEDIA DRIVER
L: linux-media@vger.kernel.org
diff --git a/Makefile b/Makefile
index 70bdbf2218fc..d138b17b8840 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -1014,6 +1014,9 @@ CC_FLAGS_CFI := -fsanitize=kcfi
ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS
CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers
endif
+ifdef CONFIG_FINEIBT_BHI
+ CC_FLAGS_CFI += -fsanitize-kcfi-arity
+endif
ifdef CONFIG_RUST
# Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects
# CONFIG_CFI_ICALL_NORMALIZE_INTEGERS.
@@ -1123,6 +1126,11 @@ endif
KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS))
+# userspace programs are linked via the compiler, use the correct linker
+ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy)
+KBUILD_USERLDFLAGS += --ld-path=$(LD)
+endif
+
# make the checker run with the right architecture
CHECKFLAGS += --arch=$(ARCH)
diff --git a/arch/Kconfig b/arch/Kconfig
index b8a4ff365582..9f6eb09ef12d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1584,6 +1584,10 @@ config HAVE_SPARSE_SYSCALL_NR
entries at 4000, 5000 and 6000 locations. This option turns on syscall
related optimizations for a given architecture.
+config ARCH_HAS_VDSO_ARCH_DATA
+ depends on GENERIC_VDSO_DATA_STORE
+ bool
+
config ARCH_HAS_VDSO_TIME_DATA
bool
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 65fe1e54c6da..fa3e4c246cda 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -10,10 +10,6 @@
#include <asm/machvec.h>
#include <asm/hwrpb.h>
-/* The generic header contains only prototypes. Including it ensures that
- the implementation we have here matches that interface. */
-#include <asm-generic/iomap.h>
-
/*
* Virtual -> physical identity mapping starts at this offset
*/
@@ -276,13 +272,24 @@ extern void __raw_writeq(u64 b, volatile void __iomem *addr);
#define __raw_writel __raw_writel
#define __raw_writeq __raw_writeq
-/*
- * Mapping from port numbers to __iomem space is pretty easy.
- */
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern u64 ioread64(const void __iomem *);
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite64(u64, void __iomem *);
+
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
-/* These two have to be extern inline because of the extern prototype from
- <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
- the same declaration. */
extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
{
return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
@@ -629,10 +636,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define RTC_PORT(x) (0x70 + (x))
#define RTC_ALWAYS_BCD 0
-/*
- * These get provided from <asm-generic/iomap.h> since alpha does not
- * select GENERIC_IOMAP.
- */
#define ioread64 ioread64
#define iowrite64 iowrite64
#define ioread8_rep ioread8_rep
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index c59d53d6d3f3..2dd6340de6b4 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -506,3 +506,4 @@
574 common getxattrat sys_getxattrat
575 common listxattrat sys_listxattrat
576 common removexattrat sys_removexattrat
+577 common open_tree_attr sys_open_tree_attr
diff --git a/arch/arm/boot/dts/allwinner/Makefile b/arch/arm/boot/dts/allwinner/Makefile
index 48666f73e638..d799ad153b37 100644
--- a/arch/arm/boot/dts/allwinner/Makefile
+++ b/arch/arm/boot/dts/allwinner/Makefile
@@ -199,6 +199,7 @@ DTC_FLAGS_sun8i-h3-nanopi-r1 := -@
DTC_FLAGS_sun8i-h3-orangepi-pc := -@
DTC_FLAGS_sun8i-h3-bananapi-m2-plus-v1.2 := -@
DTC_FLAGS_sun8i-h3-orangepi-pc-plus := -@
+DTC_FLAGS_sun8i-v3s-netcube-kumquat := -@
dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a23-evb.dtb \
sun8i-a23-gt90h-v4.dtb \
@@ -261,6 +262,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-v3s-anbernic-rg-nano.dtb \
sun8i-v3s-licheepi-zero.dtb \
sun8i-v3s-licheepi-zero-dock.dtb \
+ sun8i-v3s-netcube-kumquat.dtb \
sun8i-v40-bananapi-m2-berry.dtb
dtb-$(CONFIG_MACH_SUN9I) += \
sun9i-a80-optimus.dtb \
diff --git a/arch/arm/boot/dts/allwinner/sun8i-v3s-netcube-kumquat.dts b/arch/arm/boot/dts/allwinner/sun8i-v3s-netcube-kumquat.dts
new file mode 100644
index 000000000000..5143cb4e7b78
--- /dev/null
+++ b/arch/arm/boot/dts/allwinner/sun8i-v3s-netcube-kumquat.dts
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2025 Lukas Schmid <lukas.schmid@netcube.li>
+ */
+
+/dts-v1/;
+#include "sun8i-v3s.dtsi"
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/{
+ model = "NetCube Systems Kumquat";
+ compatible = "netcube,kumquat", "allwinner,sun8i-v3s";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &emac;
+ rtc0 = &ds3232;
+ rtc1 = &rtc; /* not battery backed */
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ /* 40 MHz Crystal Oscillator on PCB */
+ clk_can0: clock-can0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ key-user {
+ label = "GPIO Key User";
+ linux,code = <KEY_PROG1>;
+ gpios = <&pio 1 2 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PB2 */
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-heartbeat {
+ gpios = <&pio 4 4 GPIO_ACTIVE_HIGH>; /* PE4 */
+ linux,default-trigger = "heartbeat";
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_HEARTBEAT;
+ };
+
+ led-mmc0-act {
+ gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
+ linux,default-trigger = "mmc0";
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_DISK;
+ };
+ };
+
+ /* EA3036C Switching 3 Channel Regulator - Channel 2 */
+ reg_vcc3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&reg_vcc5v0>;
+ };
+
+ /* K7805-1000R3 Switching Regulator supplied from main 12/24V terminal block */
+ reg_vcc5v0: regulator-5v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&codec {
+ allwinner,audio-routing =
+ "Headphone", "HP",
+ "Headphone", "HPCOM",
+ "MIC1", "Mic",
+ "Mic", "HBIAS";
+ status = "okay";
+};
+
+&ehci {
+ status = "okay";
+};
+
+&emac {
+ allwinner,leds-active-low;
+ nvmem-cells = <&eth0_macaddress>;
+ nvmem-cell-names = "mac-address";
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom0: eeprom@50 {
+ compatible = "atmel,24c02"; /* actually it's a 24AA02E48 */
+ reg = <0x50>;
+ pagesize = <16>;
+ read-only;
+ vcc-supply = <&reg_vcc3v3>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ eth0_macaddress: macaddress@fa {
+ reg = <0xfa 0x06>;
+ };
+ };
+
+ tusb320: typec@60 {
+ compatible = "ti,tusb320";
+ reg = <0x60>;
+ interrupts-extended = <&pio 1 5 IRQ_TYPE_LEVEL_LOW>; /* PB5 */
+ };
+
+ ds3232: rtc@68 {
+ compatible = "dallas,ds3232";
+ reg = <0x68>;
+ };
+};
+
+/* Exposed as the Flash/SD Header on the board */
+&mmc0 {
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ broken-cd;
+ status = "okay";
+};
+
+/* Connected to the on-board ESP32 */
+&mmc1 {
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ broken-cd;
+ status = "okay";
+};
+
+&ohci {
+ status = "okay";
+};
+
+/* Disable external 32k osc as it is broken on current revision */
+&osc32k {
+ status = "disabled";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_vcc3v3>;
+ vcc-pc-supply = <&reg_vcc3v3>;
+ vcc-pe-supply = <&reg_vcc3v3>;
+ vcc-pf-supply = <&reg_vcc3v3>;
+ vcc-pg-supply = <&reg_vcc3v3>;
+
+ gpio-line-names = "", "", "", "", // PA
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "CAN_nCS", "CAN_nINT", "USER_SW", "PB3", // PB
+ "USB_ID", "USBC_nINT", "I2C0_SCL", "I2C0_SDA",
+ "UART0_TX", "UART0_RX", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "SPI_MISO", "SPI_SCK", "FLASH_nCS", "SPI_MOSI", // PC
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "", // PD
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "Q12", "Q11", "Q10", "Q9", // PE
+ "LED_SYS0", "I1", "Q1", "Q2",
+ "I2", "I3", "Q3", "Q4",
+ "I4", "I5", "Q5", "Q6",
+ "I6", "I7", "Q7", "Q8",
+ "I8", "UART1_TXD", "UART1_RXD", "ESP_nRST",
+ "ESP_nBOOT", "", "", "",
+ "", "", "", "",
+ "SD_D1", "SD_D0", "SD_CLK", "SD_CMD", // PF
+ "SD_D3", "SD_D2", "LED_SYS1", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "ESP_CLK", "ESP_CMD", "ESP_D0", "ESP_D1", // PG
+ "ESP_D2", "ESP_D3", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+/* Disable external 32k osc as it is broken on current revision */
+&rtc {
+ /delete-property/ clocks;
+};
+
+/* Exposed as a USB-C connector with USB-Serial converter */
+&uart0 {
+ pinctrl-0 = <&uart0_pb_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+/* Connected to the Bootloader/Console of the ESP32 */
+&uart1 {
+ pinctrl-0 = <&uart1_pe_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usb_otg {
+ extcon = <&tusb320 0>;
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usbphy {
+ usb0_id_det-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+ status = "okay";
+};
+
+&spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cs-gpios = <0>, <&pio 1 0 GPIO_ACTIVE_LOW>; /* PB0 */
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "firmware";
+ spi-max-frequency = <40000000>;
+ };
+
+ can@1 {
+ compatible = "microchip,mcp2518fd";
+ reg = <1>;
+ clocks = <&clk_can0>;
+ interrupts-extended = <&pio 1 1 IRQ_TYPE_LEVEL_LOW>; /* PB1 */
+ spi-max-frequency = <20000000>;
+ vdd-supply = <&reg_vcc3v3>;
+ xceiver-supply = <&reg_vcc3v3>;
+ };
+};
diff --git a/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi b/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
index 9e13c2aa8911..f909b1d4dbca 100644
--- a/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/allwinner/sun8i-v3s.dtsi
@@ -416,6 +416,12 @@
function = "uart0";
};
+ /omit-if-no-ref/
+ uart1_pe_pins: uart1-pe-pins {
+ pins = "PE21", "PE22";
+ function = "uart1";
+ };
+
uart2_pins: uart2-pins {
pins = "PB0", "PB1";
function = "uart2";
diff --git a/arch/arm/boot/dts/amlogic/meson8.dtsi b/arch/arm/boot/dts/amlogic/meson8.dtsi
index 9ff142d9fe3f..847f7b1f1e96 100644
--- a/arch/arm/boot/dts/amlogic/meson8.dtsi
+++ b/arch/arm/boot/dts/amlogic/meson8.dtsi
@@ -449,7 +449,11 @@
};
pwm_ef: pwm@86c0 {
- compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8-pwm-v2";
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
reg = <0x86c0 0x10>;
#pwm-cells = <3>;
status = "disabled";
@@ -699,11 +703,19 @@
};
&pwm_ab {
- compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8-pwm-v2";
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
};
&pwm_cd {
- compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8-pwm-v2";
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
};
&rtc {
diff --git a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
index 18ea6592b7d7..236999548094 100644
--- a/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/amlogic/meson8b-ec100.dts
@@ -443,8 +443,6 @@
status = "okay";
pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
pinctrl-names = "default";
- clocks = <&xtal>, <&xtal>;
- clock-names = "clkin0", "clkin1";
};
&rtc {
diff --git a/arch/arm/boot/dts/amlogic/meson8b-mxq.dts b/arch/arm/boot/dts/amlogic/meson8b-mxq.dts
index fb28cb330f17..0bca0b33eea2 100644
--- a/arch/arm/boot/dts/amlogic/meson8b-mxq.dts
+++ b/arch/arm/boot/dts/amlogic/meson8b-mxq.dts
@@ -162,8 +162,6 @@
status = "okay";
pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
pinctrl-names = "default";
- clocks = <&xtal>, <&xtal>;
- clock-names = "clkin0", "clkin1";
};
&uart_AO {
diff --git a/arch/arm/boot/dts/amlogic/meson8b-odroidc1.dts b/arch/arm/boot/dts/amlogic/meson8b-odroidc1.dts
index 2aa012f38a3b..1cd2093202ca 100644
--- a/arch/arm/boot/dts/amlogic/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/amlogic/meson8b-odroidc1.dts
@@ -347,8 +347,6 @@
status = "okay";
pinctrl-0 = <&pwm_c1_pins>, <&pwm_d_pins>;
pinctrl-names = "default";
- clocks = <&xtal>, <&xtal>;
- clock-names = "clkin0", "clkin1";
};
&rtc {
diff --git a/arch/arm/boot/dts/amlogic/meson8b.dtsi b/arch/arm/boot/dts/amlogic/meson8b.dtsi
index 9e02a97f86a0..0876611ce26a 100644
--- a/arch/arm/boot/dts/amlogic/meson8b.dtsi
+++ b/arch/arm/boot/dts/amlogic/meson8b.dtsi
@@ -403,8 +403,12 @@
};
pwm_ef: pwm@86c0 {
- compatible = "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x86c0 0x10>;
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -674,11 +678,19 @@
};
&pwm_ab {
- compatible = "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
};
&pwm_cd {
- compatible = "amlogic,meson8b-pwm";
+ compatible = "amlogic,meson8b-pwm-v2", "amlogic,meson8-pwm-v2";
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "Video PLL" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
};
&rtc {
diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
index 6bf4241fe3b7..c78ed064d166 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcm2835-rpi.dtsi"
-#include <dt-bindings/power/raspberrypi-power.h>
#include <dt-bindings/reset/raspberrypi,firmware-reset.h>
/ {
@@ -101,7 +100,3 @@
&vchiq {
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
};
-
-&xhci {
- power-domains = <&power RPI_POWER_DOMAIN_USB>;
-};
diff --git a/arch/arm/boot/dts/broadcom/bcm2711.dtsi b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
index e4e42af21ef3..c06d9f5e53c8 100644
--- a/arch/arm/boot/dts/broadcom/bcm2711.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm2711.dtsi
@@ -134,7 +134,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
- arm,primecell-periphid = <0x00241011>;
+ arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@@ -145,7 +145,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
- arm,primecell-periphid = <0x00241011>;
+ arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@@ -156,7 +156,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
- arm,primecell-periphid = <0x00241011>;
+ arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@@ -167,7 +167,7 @@
clocks = <&clocks BCM2835_CLOCK_UART>,
<&clocks BCM2835_CLOCK_VPU>;
clock-names = "uartclk", "apb_pclk";
- arm,primecell-periphid = <0x00241011>;
+ arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
@@ -451,8 +451,6 @@
IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) |
IRQ_TYPE_LEVEL_LOW)>;
- /* This only applies to the ARMv7 stub */
- arm,cpu-registers-not-fw-configured;
};
cpus: cpus {
@@ -610,6 +608,7 @@
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&pm BCM2835_POWER_DOMAIN_USB>;
/* DWC2 and this IP block share the same USB PHY,
* enabling both at the same time results in lockups.
* So keep this node disabled and let the bootloader
@@ -1177,6 +1176,7 @@
};
&uart0 {
+ arm,primecell-periphid = <0x00341011>;
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
index 53cb0c58f6d0..3da2daee0c84 100644
--- a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
+++ b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts
@@ -124,19 +124,19 @@
};
port@1 {
- label = "lan1";
+ label = "lan4";
};
port@2 {
- label = "lan2";
+ label = "lan3";
};
port@3 {
- label = "lan3";
+ label = "lan2";
};
port@4 {
- label = "lan4";
+ label = "lan1";
};
};
};
diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
index 6c666dc7ad23..01ec8c03686a 100644
--- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
+++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts
@@ -126,11 +126,11 @@
ports {
port@0 {
- label = "lan4";
+ label = "wan";
};
port@1 {
- label = "lan3";
+ label = "lan1";
};
port@2 {
@@ -138,11 +138,11 @@
};
port@3 {
- label = "lan1";
+ label = "lan3";
};
port@4 {
- label = "wan";
+ label = "lan4";
};
};
};
diff --git a/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts b/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
index 808cd5778e27..adc74243ed19 100644
--- a/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
+++ b/arch/arm/boot/dts/cirrus/ep7211-edb7211.dts
@@ -88,7 +88,7 @@
};
&portd {
- lcden {
+ lcden-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-netgear-wg302v1.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-netgear-wg302v1.dts
index 19d56e9aec9d..a351a97d257e 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-netgear-wg302v1.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-netgear-wg302v1.dts
@@ -8,6 +8,7 @@
#include "intel-ixp42x.dtsi"
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
/ {
model = "Netgear WG302 v1";
@@ -32,6 +33,35 @@
serial0 = &uart1;
};
+ leds {
+ compatible = "gpio-leds";
+ test_led: led-test {
+ color = <LED_COLOR_ID_AMBER>;
+ function = "test";
+ gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ wlan_led: led-wlan {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_WLAN;
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ linux,default-trigger = "phy0tx";
+ };
+ };
+
+ gpio_keys {
+ /* RESET is on GPIO13 which can't fire interrupts */
+ compatible = "gpio-keys-polled";
+ poll-interval = <100>;
+
+ button-reset {
+ linux,code = <KEY_RESTART>;
+ label = "reset";
+ gpios = <&gpio0 13 GPIO_ACTIVE_LOW>;
+ };
+ };
+
soc {
bus@c4000000 {
flash@0,0 {
@@ -57,7 +87,7 @@
status = "okay";
/*
- * Taken from WG302 v2 PCI boardfile (wg302v2-pci.c)
+ * Taken from WG302 v1 PCI boardfile (wg302v1-pci.c)
* We have slots (IDSEL) 1 and 2 with one assigned IRQ
* each handling all IRQs.
*/
@@ -70,10 +100,10 @@
<0x0800 0 0 3 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 1 is irq 8 */
<0x0800 0 0 4 &gpio0 8 IRQ_TYPE_LEVEL_LOW>, /* INT D on slot 1 is irq 8 */
/* IDSEL 2 */
- <0x1000 0 0 1 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 2 is irq 9 */
- <0x1000 0 0 2 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 2 is irq 9 */
- <0x1000 0 0 3 &gpio0 9 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 2 is irq 9 */
- <0x1000 0 0 4 &gpio0 9 IRQ_TYPE_LEVEL_LOW>; /* INT D on slot 2 is irq 9 */
+ <0x1000 0 0 1 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 2 is irq 10 */
+ <0x1000 0 0 2 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT B on slot 2 is irq 10 */
+ <0x1000 0 0 3 &gpio0 10 IRQ_TYPE_LEVEL_LOW>, /* INT C on slot 2 is irq 10 */
+ <0x1000 0 0 4 &gpio0 10 IRQ_TYPE_LEVEL_LOW>; /* INT D on slot 2 is irq 10 */
};
ethernet@c8009000 {
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp4xx.dtsi b/arch/arm/boot/dts/intel/ixp/intel-ixp4xx.dtsi
index 51a716c59669..0adeccabd4fe 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp4xx.dtsi
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp4xx.dtsi
@@ -193,10 +193,10 @@
compatible = "intel,ixp4xx-ethernet";
reg = <0xc800c000 0x1000>;
status = "disabled";
- intel,npe = <0>;
/* Dummy values that depend on firmware */
queue-rx = <&qmgr 0>;
queue-txready = <&qmgr 0>;
+ intel,npe-handle = <&npe 0>;
};
};
};
diff --git a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
index 8208c6a9627a..7aa71a9aa1bb 100644
--- a/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
+++ b/arch/arm/boot/dts/marvell/armada-385-clearfog-gtr.dtsi
@@ -453,7 +453,7 @@
pinctrl-0 = <&cf_gtr_fan_pwm &cf_gtr_wifi_disable_pins>;
pinctrl-names = "default";
- wifi-disable {
+ wifi-disable-hog {
gpio-hog;
gpios = <30 GPIO_ACTIVE_LOW>, <31 GPIO_ACTIVE_LOW>;
output-low;
@@ -465,7 +465,7 @@
pinctrl-0 = <&cf_gtr_isolation_pins &cf_gtr_poe_reset_pins &cf_gtr_lte_disable_pins>;
pinctrl-names = "default";
- lte-disable {
+ lte-disable-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_LOW>;
output-low;
@@ -476,14 +476,14 @@
* This signal, when asserted, isolates Armada 38x sample at reset pins
* from control of external devices. Should be de-asserted after reset.
*/
- sar-isolation {
+ sar-isolation-hog {
gpio-hog;
gpios = <15 GPIO_ACTIVE_LOW>;
output-low;
line-name = "sar-isolation";
};
- poe-reset {
+ poe-reset-hog {
gpio-hog;
gpios = <16 GPIO_ACTIVE_LOW>;
output-low;
diff --git a/arch/arm/boot/dts/marvell/armada-388-clearfog-base.dts b/arch/arm/boot/dts/marvell/armada-388-clearfog-base.dts
index f7daa3bc707e..cf32ba9b4e8e 100644
--- a/arch/arm/boot/dts/marvell/armada-388-clearfog-base.dts
+++ b/arch/arm/boot/dts/marvell/armada-388-clearfog-base.dts
@@ -34,7 +34,7 @@
};
&gpio0 {
- phy1_reset {
+ phy1-reset-hog {
gpio-hog;
gpios = <19 GPIO_ACTIVE_LOW>;
output-low;
diff --git a/arch/arm/boot/dts/marvell/kirkwood-openrd.dtsi b/arch/arm/boot/dts/marvell/kirkwood-openrd.dtsi
index 47f03c69c55a..9d7cff4feada 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-openrd.dtsi
+++ b/arch/arm/boot/dts/marvell/kirkwood-openrd.dtsi
@@ -53,7 +53,7 @@
cd-gpios = <&gpio0 29 9>;
};
gpio@10100 {
- p28 {
+ p28-hog {
gpio-hog;
gpios = <28 GPIO_ACTIVE_HIGH>;
/*
@@ -71,7 +71,7 @@
};
};
gpio@10140 {
- p2 {
+ p2-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
/*
diff --git a/arch/arm/boot/dts/microchip/aks-cdu.dts b/arch/arm/boot/dts/microchip/aks-cdu.dts
index b65f80e1ef05..302cb872efa1 100644
--- a/arch/arm/boot/dts/microchip/aks-cdu.dts
+++ b/arch/arm/boot/dts/microchip/aks-cdu.dts
@@ -56,7 +56,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/animeo_ip.dts b/arch/arm/boot/dts/microchip/animeo_ip.dts
index 7f527622d3f2..c11f4f7dac94 100644
--- a/arch/arm/boot/dts/microchip/animeo_ip.dts
+++ b/arch/arm/boot/dts/microchip/animeo_ip.dts
@@ -136,7 +136,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
atmel,vbus-gpio = <&pioB 15 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/microchip/at91-foxg20.dts b/arch/arm/boot/dts/microchip/at91-foxg20.dts
index 9dfd5de808d1..8e9e87665045 100644
--- a/arch/arm/boot/dts/microchip/at91-foxg20.dts
+++ b/arch/arm/boot/dts/microchip/at91-foxg20.dts
@@ -131,7 +131,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-qil_a9260.dts b/arch/arm/boot/dts/microchip/at91-qil_a9260.dts
index 5ccb3c139592..892dbd8dbbed 100644
--- a/arch/arm/boot/dts/microchip/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/microchip/at91-qil_a9260.dts
@@ -114,7 +114,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sam9_l9260.dts b/arch/arm/boot/dts/microchip/at91-sam9_l9260.dts
index 2fb51b9aca2a..49dc1a4ccb36 100644
--- a/arch/arm/boot/dts/microchip/at91-sam9_l9260.dts
+++ b/arch/arm/boot/dts/microchip/at91-sam9_l9260.dts
@@ -105,7 +105,7 @@
status = "okay";
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d27_som1_ek.dts
index f3ffb8f01d8a..45edf6214cf7 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d27_som1_ek.dts
@@ -37,7 +37,7 @@
status = "okay";
};
- usb1: ohci@400000 {
+ usb1: usb@400000 {
num-ports = <3>;
atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */
&pioA PIN_PA27 GPIO_ACTIVE_HIGH
@@ -48,7 +48,7 @@
status = "okay";
};
- usb2: ehci@500000 {
+ usb2: usb@500000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d2_ptc_ek.dts
index e4ae60ef5f8a..10d69f6957cf 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d2_ptc_ek.dts
@@ -47,7 +47,7 @@
status = "okay";
};
- usb1: ohci@400000 {
+ usb1: usb@400000 {
num-ports = <3>;
atmel,vbus-gpio = <0
&pioA PIN_PB12 GPIO_ACTIVE_HIGH
@@ -58,7 +58,7 @@
status = "okay";
};
- usb2: ehci@500000 {
+ usb2: usb@500000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/microchip/at91-sama5d2_xplained.dts
index 4bab3f25b855..7e77a55ed41d 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d2_xplained.dts
@@ -46,7 +46,7 @@
status = "okay";
};
- usb1: ohci@400000 {
+ usb1: usb@400000 {
num-ports = <3>;
atmel,vbus-gpio = <0 /* &pioA PIN_PB9 GPIO_ACTIVE_HIGH */
&pioA PIN_PB10 GPIO_ACTIVE_HIGH
@@ -57,7 +57,7 @@
status = "okay";
};
- usb2: ehci@500000 {
+ usb2: usb@500000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/microchip/at91-sama5d3_xplained.dts
index 5662992cf213..d2c43957497d 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d3_xplained.dts
@@ -283,7 +283,7 @@
status = "okay";
};
- usb1: ohci@600000 {
+ usb1: usb@600000 {
num-ports = <3>;
atmel,vbus-gpio = <0
&pioE 3 GPIO_ACTIVE_LOW
@@ -294,7 +294,7 @@
status = "okay";
};
- usb2: ehci@700000 {
+ usb2: usb@700000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d4_ma5d4evk.dts b/arch/arm/boot/dts/microchip/at91-sama5d4_ma5d4evk.dts
index 8adf567f2f0f..b9725e400501 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d4_ma5d4evk.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d4_ma5d4evk.dts
@@ -22,7 +22,7 @@
status = "okay";
};
- usb1: ohci@500000 {
+ usb1: usb@500000 {
num-ports = <3>;
atmel,vbus-gpio = <0
&pioE 11 GPIO_ACTIVE_LOW
@@ -31,7 +31,7 @@
status = "okay";
};
- usb2: ehci@600000 {
+ usb2: usb@600000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/microchip/at91-sama5d4_xplained.dts
index 95d701d13fef..0ecccb9a809d 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d4_xplained.dts
@@ -164,7 +164,7 @@
status = "okay";
};
- usb1: ohci@500000 {
+ usb1: usb@500000 {
num-ports = <3>;
atmel,vbus-gpio = <0
&pioE 11 GPIO_ACTIVE_HIGH
@@ -175,7 +175,7 @@
status = "okay";
};
- usb2: ehci@600000 {
+ usb2: usb@600000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama5d4ek.dts b/arch/arm/boot/dts/microchip/at91-sama5d4ek.dts
index 20ac775059ca..69107d6cd26c 100644
--- a/arch/arm/boot/dts/microchip/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama5d4ek.dts
@@ -198,7 +198,7 @@
status = "okay";
};
- usb1: ohci@500000 {
+ usb1: usb@500000 {
num-ports = <3>;
atmel,vbus-gpio = <0 /* &pioE 10 GPIO_ACTIVE_LOW */
&pioE 11 GPIO_ACTIVE_LOW
@@ -207,7 +207,7 @@
status = "okay";
};
- usb2: ehci@600000 {
+ usb2: usb@600000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
index 0f86360fb733..30fdc4f55a3b 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7d65_curiosity.dts
@@ -32,6 +32,18 @@
};
};
+&dma0 {
+ status = "okay";
+};
+
+&dma1 {
+ status = "okay";
+};
+
+&dma2 {
+ status = "okay";
+};
+
&flx6 {
atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
status = "okay";
@@ -43,11 +55,63 @@
status = "okay";
};
+&flx10 {
+ atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
+ status = "okay";
+};
+
+&i2c10 {
+ dmas = <0>, <0>;
+ i2c-analog-filter;
+ i2c-digital-filter;
+ i2c-digital-filter-width-ns = <35>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c10_default>;
+ status = "okay";
+
+ power-monitor@10 {
+ compatible = "microchip,pac1934";
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@1 {
+ reg = <0x1>;
+ shunt-resistor-micro-ohms = <47000>;
+ label = "VDD3V3";
+ };
+
+ channel@2 {
+ reg = <0x2>;
+ shunt-resistor-micro-ohms = <47000>;
+ label = "VDDIODDR";
+ };
+
+ channel@3 {
+ reg = <0x3>;
+ shunt-resistor-micro-ohms = <47000>;
+ label = "VDDCORE";
+ };
+
+ channel@4 {
+ reg = <0x4>;
+ shunt-resistor-micro-ohms = <47000>;
+ label = "VDDCPU";
+ };
+ };
+};
+
&main_xtal {
clock-frequency = <24000000>;
};
&pioa {
+ pinctrl_i2c10_default: i2c10-default{
+ pinmux = <PIN_PB19__FLEXCOM10_IO1>,
+ <PIN_PB20__FLEXCOM10_IO0>;
+ bias-pull-up;
+ };
+
pinctrl_sdmmc1_default: sdmmc1-default {
cmd-data {
pinmux = <PIN_PB22__SDMMC1_CMD>,
@@ -84,6 +148,15 @@
status = "okay";
};
+&shdwc {
+ debounce-delay-us = <976>;
+ status = "okay";
+
+ input@0 {
+ reg = <0>;
+ };
+};
+
&slow_xtal {
clock-frequency = <32768>;
};
diff --git a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
index 0f5e6ad438dd..2543599013b1 100644
--- a/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/microchip/at91-sama7g5ek.dts
@@ -137,6 +137,7 @@
vref-supply = <&vddout25>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mikrobus1_an_default &pinctrl_mikrobus2_an_default>;
+ atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91-vinco.dts b/arch/arm/boot/dts/microchip/at91-vinco.dts
index ecbdacf48708..c5fc51667066 100644
--- a/arch/arm/boot/dts/microchip/at91-vinco.dts
+++ b/arch/arm/boot/dts/microchip/at91-vinco.dts
@@ -162,7 +162,7 @@
status = "disabled";
};
- usb1: ohci@500000 {
+ usb1: usb@500000 {
num-ports = <3>;
atmel,vbus-gpio = <0
&pioE 11 GPIO_ACTIVE_LOW
@@ -171,7 +171,7 @@
status = "disabled";
};
- usb2: ehci@600000 {
+ usb2: usb@600000 {
/* 4G Modem */
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91rm9200.dtsi b/arch/arm/boot/dts/microchip/at91rm9200.dtsi
index 02a838541dc3..2a4c83d88733 100644
--- a/arch/arm/boot/dts/microchip/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/microchip/at91rm9200.dtsi
@@ -702,7 +702,7 @@
status = "disabled";
};
- usb0: ohci@300000 {
+ usb0: usb@300000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00300000 0x100000>;
interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91rm9200ek.dts b/arch/arm/boot/dts/microchip/at91rm9200ek.dts
index 0bf472b157a5..ce691c4692b9 100644
--- a/arch/arm/boot/dts/microchip/at91rm9200ek.dts
+++ b/arch/arm/boot/dts/microchip/at91rm9200ek.dts
@@ -89,7 +89,7 @@
};
};
- usb0: ohci@300000 {
+ usb0: usb@300000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9260.dtsi b/arch/arm/boot/dts/microchip/at91sam9260.dtsi
index 0038183e9a53..ec973f07a961 100644
--- a/arch/arm/boot/dts/microchip/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9260.dtsi
@@ -742,7 +742,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91sam9260ek.dts b/arch/arm/boot/dts/microchip/at91sam9260ek.dts
index e8e65e60564d..8522a210b484 100644
--- a/arch/arm/boot/dts/microchip/at91sam9260ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9260ek.dts
@@ -131,7 +131,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9261.dtsi b/arch/arm/boot/dts/microchip/at91sam9261.dtsi
index b57a7fd67197..0b556c234557 100644
--- a/arch/arm/boot/dts/microchip/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9261.dtsi
@@ -77,7 +77,7 @@
#size-cells = <1>;
ranges;
- usb0: ohci@500000 {
+ usb0: usb@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
interrupts = <20 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91sam9261ek.dts b/arch/arm/boot/dts/microchip/at91sam9261ek.dts
index a8f523131cd6..313bc2797fde 100644
--- a/arch/arm/boot/dts/microchip/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9261ek.dts
@@ -31,7 +31,7 @@
};
ahb {
- usb0: ohci@500000 {
+ usb0: usb@500000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9263.dtsi b/arch/arm/boot/dts/microchip/at91sam9263.dtsi
index b95d4016ae9f..3e9e5ce7c6c8 100644
--- a/arch/arm/boot/dts/microchip/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9263.dtsi
@@ -768,7 +768,7 @@
status = "disabled";
};
- usb0: ohci@a00000 {
+ usb0: usb@a00000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00a00000 0x100000>;
interrupts = <29 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91sam9263ek.dts b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
index f25692543d71..471ea25296aa 100644
--- a/arch/arm/boot/dts/microchip/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9263ek.dts
@@ -207,7 +207,7 @@
};
};
- usb0: ohci@a00000 {
+ usb0: usb@a00000 {
num-ports = <2>;
status = "okay";
atmel,vbus-gpio = <&pioA 24 GPIO_ACTIVE_HIGH
diff --git a/arch/arm/boot/dts/microchip/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/microchip/at91sam9g20ek_common.dtsi
index 4e7cfbbd4241..84a7287107f8 100644
--- a/arch/arm/boot/dts/microchip/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9g20ek_common.dtsi
@@ -211,7 +211,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
index 157d306ef5c9..535e26e05e99 100644
--- a/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9g45.dtsi
@@ -964,7 +964,7 @@
status = "disabled";
};
- usb0: ohci@700000 {
+ usb0: usb@700000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00700000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -973,7 +973,7 @@
status = "disabled";
};
- usb1: ehci@800000 {
+ usb1: usb@800000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00800000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91sam9m10g45ek.dts b/arch/arm/boot/dts/microchip/at91sam9m10g45ek.dts
index 071db4f16313..2a31b2f14893 100644
--- a/arch/arm/boot/dts/microchip/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9m10g45ek.dts
@@ -303,14 +303,14 @@
};
};
- usb0: ohci@700000 {
+ usb0: usb@700000 {
status = "okay";
num-ports = <2>;
atmel,vbus-gpio = <&pioD 1 GPIO_ACTIVE_LOW
&pioD 3 GPIO_ACTIVE_LOW>;
};
- usb1: ehci@800000 {
+ usb1: usb@800000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
index 844bd50943fc..2f930c39ce4d 100644
--- a/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9n12.dtsi
@@ -748,7 +748,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
index 643c3b2ab97e..b06a54e8e237 100644
--- a/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/microchip/at91sam9n12ek.dts
@@ -180,7 +180,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <1>;
atmel,vbus-gpio = <&pioB 7 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
index 27c1f2861cc3..17bdf1e4db01 100644
--- a/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/microchip/at91sam9x5.dtsi
@@ -886,7 +886,7 @@
};
};
- usb0: ohci@600000 {
+ usb0: usb@600000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -895,7 +895,7 @@
status = "disabled";
};
- usb1: ehci@700000 {
+ usb1: usb@700000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/ethernut5.dts b/arch/arm/boot/dts/microchip/ethernut5.dts
index ad7a0850252a..52ccef31b391 100644
--- a/arch/arm/boot/dts/microchip/ethernut5.dts
+++ b/arch/arm/boot/dts/microchip/ethernut5.dts
@@ -101,7 +101,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/evk-pro3.dts b/arch/arm/boot/dts/microchip/evk-pro3.dts
index 6d519d02d190..40c5111c2f0a 100644
--- a/arch/arm/boot/dts/microchip/evk-pro3.dts
+++ b/arch/arm/boot/dts/microchip/evk-pro3.dts
@@ -45,7 +45,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/mpa1600.dts b/arch/arm/boot/dts/microchip/mpa1600.dts
index 005c2758e229..2a97e2c0b894 100644
--- a/arch/arm/boot/dts/microchip/mpa1600.dts
+++ b/arch/arm/boot/dts/microchip/mpa1600.dts
@@ -57,7 +57,7 @@
};
};
- usb0: ohci@300000 {
+ usb0: usb@300000 {
num-ports = <1>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/microchip/pm9g45.dts b/arch/arm/boot/dts/microchip/pm9g45.dts
index c349fd3758a6..2258e62f5864 100644
--- a/arch/arm/boot/dts/microchip/pm9g45.dts
+++ b/arch/arm/boot/dts/microchip/pm9g45.dts
@@ -139,12 +139,12 @@
};
};
- usb0: ohci@700000 {
+ usb0: usb@700000 {
status = "okay";
num-ports = <2>;
};
- usb1: ehci@800000 {
+ usb1: usb@800000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/microchip/sam9x60.dtsi b/arch/arm/boot/dts/microchip/sam9x60.dtsi
index b8b2c1ddf3f1..b075865e6a76 100644
--- a/arch/arm/boot/dts/microchip/sam9x60.dtsi
+++ b/arch/arm/boot/dts/microchip/sam9x60.dtsi
@@ -88,7 +88,7 @@
status = "disabled";
};
- usb1: ohci@600000 {
+ usb1: usb@600000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -97,7 +97,7 @@
status = "disabled";
};
- usb2: ehci@700000 {
+ usb2: usb@700000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/sama5d2.dtsi b/arch/arm/boot/dts/microchip/sama5d2.dtsi
index 3f99451aef83..dc22fb679333 100644
--- a/arch/arm/boot/dts/microchip/sama5d2.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d2.dtsi
@@ -136,7 +136,7 @@
status = "disabled";
};
- usb1: ohci@400000 {
+ usb1: usb@400000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00400000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -145,7 +145,7 @@
status = "disabled";
};
- usb2: ehci@500000 {
+ usb2: usb@500000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00500000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/sama5d3.dtsi b/arch/arm/boot/dts/microchip/sama5d3.dtsi
index 70f380c399ce..e95799c17fdb 100644
--- a/arch/arm/boot/dts/microchip/sama5d3.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d3.dtsi
@@ -1074,7 +1074,7 @@
status = "disabled";
};
- usb1: ohci@600000 {
+ usb1: usb@600000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -1083,7 +1083,7 @@
status = "disabled";
};
- usb2: ehci@700000 {
+ usb2: usb@700000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/sama5d3xmb.dtsi b/arch/arm/boot/dts/microchip/sama5d3xmb.dtsi
index 3652c9e24124..90da04b84b39 100644
--- a/arch/arm/boot/dts/microchip/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d3xmb.dtsi
@@ -172,7 +172,7 @@
status = "okay";
};
- usb1: ohci@600000 {
+ usb1: usb@600000 {
num-ports = <3>;
atmel,vbus-gpio = <&pioD 25 GPIO_ACTIVE_HIGH
&pioD 26 GPIO_ACTIVE_LOW
@@ -181,7 +181,7 @@
status = "okay";
};
- usb2: ehci@700000 {
+ usb2: usb@700000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/microchip/sama5d4.dtsi b/arch/arm/boot/dts/microchip/sama5d4.dtsi
index 355132628604..59a7d557c7cb 100644
--- a/arch/arm/boot/dts/microchip/sama5d4.dtsi
+++ b/arch/arm/boot/dts/microchip/sama5d4.dtsi
@@ -119,7 +119,7 @@
status = "disabled";
};
- usb1: ohci@500000 {
+ usb1: usb@500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>;
@@ -128,7 +128,7 @@
status = "disabled";
};
- usb2: ehci@600000 {
+ usb2: usb@600000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00600000 0x100000>;
interrupts = <46 IRQ_TYPE_LEVEL_HIGH 2>;
diff --git a/arch/arm/boot/dts/microchip/sama7d65.dtsi b/arch/arm/boot/dts/microchip/sama7d65.dtsi
index 854b30d15dcd..b6710ccd4c36 100644
--- a/arch/arm/boot/dts/microchip/sama7d65.dtsi
+++ b/arch/arm/boot/dts/microchip/sama7d65.dtsi
@@ -9,6 +9,7 @@
*/
#include <dt-bindings/clock/at91.h>
+#include <dt-bindings/dma/at91.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
@@ -52,6 +53,11 @@
#address-cells = <1>;
#size-cells = <1>;
+ sfrbu: sfr@e0008000 {
+ compatible ="microchip,sama7d65-sfrbu", "atmel,sama5d2-sfrbu", "syscon";
+ reg = <0xe0008000 0x20>;
+ };
+
pioa: pinctrl@e0014000 {
compatible = "microchip,sama7d65-pinctrl", "microchip,sama7g5-pinctrl";
reg = <0xe0014000 0x800>;
@@ -76,6 +82,31 @@
clock-names = "td_slck", "md_slck", "main_xtal";
};
+ ps_wdt: watchdog@e001d000 {
+ compatible = "microchip,sama7d65-wdt", "microchip,sama7g5-wdt";
+ reg = <0xe001d000 0x30>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk32k 0>;
+ };
+
+ reset_controller: reset-controller@e001d100 {
+ compatible = "microchip,sama7d65-rstc", "microchip,sama7g5-rstc";
+ reg = <0xe001d100 0xc>, <0xe001d1e4 0x4>;
+ #reset-cells = <1>;
+ clocks = <&clk32k 0>;
+ };
+
+ shdwc: poweroff@e001d200 {
+ compatible = "microchip,sama7d65-shdwc", "microchip,sama7g5-shdwc", "syscon";
+ reg = <0xe001d200 0x20>;
+ clocks = <&clk32k 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,wakeup-rtc-timer;
+ atmel,wakeup-rtt-timer;
+ status = "disabled";
+ };
+
clk32k: clock-controller@e001d500 {
compatible = "microchip,sama7d65-sckc", "microchip,sam9x60-sckc";
reg = <0xe001d500 0x4>;
@@ -83,6 +114,29 @@
#clock-cells = <1>;
};
+ rtc: rtc@e001d800 {
+ compatible = "microchip,sama7d65-rtc", "microchip,sam9x60-rtc";
+ reg = <0xe001d800 0x30>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk32k 1>;
+ };
+
+ chipid@e0020000 {
+ compatible = "microchip,sama7d65-chipid";
+ reg = <0xe0020000 0x8>;
+ };
+
+ dma2: dma-controller@e1200000 {
+ compatible = "microchip,sama7d65-dma", "microchip,sama7g5-dma";
+ reg = <0xe1200000 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 23>;
+ clock-names = "dma_clk";
+ dma-requests = <0>;
+ status = "disabled";
+ };
+
sdmmc1: mmc@e1208000 {
compatible = "microchip,sama7d65-sdhci", "microchip,sam9x60-sdhci";
reg = <0xe1208000 0x400>;
@@ -95,6 +149,26 @@
status = "disabled";
};
+ dma0: dma-controller@e1610000 {
+ compatible = "microchip,sama7d65-dma", "microchip,sama7g5-dma";
+ reg = <0xe1610000 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 21>;
+ clock-names = "dma_clk";
+ status = "disabled";
+ };
+
+ dma1: dma-controller@e1614000 {
+ compatible = "microchip,sama7d65-dma", "microchip,sama7g5-dma";
+ reg = <0xe1614000 0x1000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 22>;
+ clock-names = "dma_clk";
+ status = "disabled";
+ };
+
pit64b0: timer@e1800000 {
compatible = "microchip,sama7d65-pit64b", "microchip,sam9x60-pit64b";
reg = <0xe1800000 0x100>;
@@ -132,6 +206,27 @@
};
};
+ flx10: flexcom@e2824000 {
+ compatible = "microchip,sama7d65-flexcom", "atmel,sama5d2-flexcom";
+ reg = <0xe2824000 0x200>;
+ ranges = <0x0 0xe2824000 0x800>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 44>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ i2c10: i2c@600 {
+ compatible = "microchip,sama7d65-i2c", "microchip,sam9x60-i2c";
+ reg = <0x600 0x200>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 44>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,fifo-size = <32>;
+ status = "disabled";
+ };
+ };
+
gic: interrupt-controller@e8c11000 {
compatible = "arm,cortex-a7-gic";
reg = <0xe8c11000 0x1000>,
diff --git a/arch/arm/boot/dts/microchip/tny_a9260.dts b/arch/arm/boot/dts/microchip/tny_a9260.dts
index ef6d586ce887..f0f2a787d669 100644
--- a/arch/arm/boot/dts/microchip/tny_a9260.dts
+++ b/arch/arm/boot/dts/microchip/tny_a9260.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tny_a9260.dts - Device Tree file for Caloa TNY A9260 board
+ * tny_a9260.dts - Device Tree file for Calao TNY A9260 board
*
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/tny_a9260_common.dtsi b/arch/arm/boot/dts/microchip/tny_a9260_common.dtsi
index 70e5635c78ed..4d4377f51bec 100644
--- a/arch/arm/boot/dts/microchip/tny_a9260_common.dtsi
+++ b/arch/arm/boot/dts/microchip/tny_a9260_common.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tny_a9260_common.dtsi - Device Tree file for Caloa TNY A926x board
+ * tny_a9260_common.dtsi - Device Tree file for Calao TNY A926x board
*
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/tny_a9263.dts b/arch/arm/boot/dts/microchip/tny_a9263.dts
index 62b7d9f9a926..3dd48b3e06da 100644
--- a/arch/arm/boot/dts/microchip/tny_a9263.dts
+++ b/arch/arm/boot/dts/microchip/tny_a9263.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * usb_a9263.dts - Device Tree file for Caloa USB A9293 board
+ * usb_a9263.dts - Device Tree file for Calao USB A9293 board
*
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/tny_a9g20.dts b/arch/arm/boot/dts/microchip/tny_a9g20.dts
index 118d766a1265..cebd5696a2c1 100644
--- a/arch/arm/boot/dts/microchip/tny_a9g20.dts
+++ b/arch/arm/boot/dts/microchip/tny_a9g20.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tny_a9g20.dts - Device Tree file for Caloa TNY A9G20 board
+ * tny_a9g20.dts - Device Tree file for Calao TNY A9G20 board
*
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/usb_a9260.dts b/arch/arm/boot/dts/microchip/usb_a9260.dts
index 66f8da89007d..e7f7b259ccf3 100644
--- a/arch/arm/boot/dts/microchip/usb_a9260.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9260.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * usb_a9260.dts - Device Tree file for Caloa USB A9260 board
+ * usb_a9260.dts - Device Tree file for Calao USB A9260 board
*
* Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi b/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
index 8744b5f6f792..8c3530638c6d 100644
--- a/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/microchip/usb_a9260_common.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * usb_a926x.dts - Device Tree file for Caloa USB A926x board
+ * usb_a926x.dts - Device Tree file for Calao USB A926x board
*
* Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
@@ -111,7 +111,7 @@
};
};
- usb0: ohci@500000 {
+ usb0: usb@500000 {
num-ports = <2>;
status = "okay";
};
@@ -122,17 +122,14 @@
user_led {
label = "user_led";
- gpios = <&pioB 21 GPIO_ACTIVE_LOW>;
- linux,default-trigger = "heartbeat";
+ gpios = <&pioB 21 GPIO_ACTIVE_HIGH>;
};
};
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- user_pb {
+ button-user-pb {
label = "user_pb";
gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
linux,code = <28>;
diff --git a/arch/arm/boot/dts/microchip/usb_a9263.dts b/arch/arm/boot/dts/microchip/usb_a9263.dts
index 45745915b2e1..60d7936dc562 100644
--- a/arch/arm/boot/dts/microchip/usb_a9263.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9263.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * usb_a9263.dts - Device Tree file for Caloa USB A9293 board
+ * usb_a9263.dts - Device Tree file for Calao USB A9293 board
*
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
@@ -9,7 +9,7 @@
/ {
model = "Calao USB A9263";
- compatible = "atmel,usb-a9263", "atmel,at91sam9263", "atmel,at91sam9";
+ compatible = "calao,usb-a9263", "atmel,at91sam9263", "atmel,at91sam9";
chosen {
bootargs = "mem=64M console=ttyS0,115200 root=/dev/mtdblock5 rw rootfstype=ubifs";
@@ -139,7 +139,7 @@
};
};
- usb0: ohci@a00000 {
+ usb0: usb@a00000 {
num-ports = <2>;
status = "okay";
};
@@ -151,16 +151,13 @@
user_led {
label = "user_led";
gpios = <&pioB 21 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "heartbeat";
};
};
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- user_pb {
+ button-user-pb {
label = "user_pb";
gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
linux,code = <28>;
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20-dab-mmx.dtsi b/arch/arm/boot/dts/microchip/usb_a9g20-dab-mmx.dtsi
index 08d58081201a..5b1d80c0ab26 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20-dab-mmx.dtsi
+++ b/arch/arm/boot/dts/microchip/usb_a9g20-dab-mmx.dtsi
@@ -65,28 +65,26 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- user_pb1 {
+ button-user-pb1 {
label = "user_pb1";
gpios = <&pioB 25 GPIO_ACTIVE_LOW>;
linux,code = <0x100>;
};
- user_pb2 {
+ button-user-pb2 {
label = "user_pb2";
gpios = <&pioB 13 GPIO_ACTIVE_LOW>;
linux,code = <0x101>;
};
- user_pb3 {
+ button-user-pb3 {
label = "user_pb3";
gpios = <&pioA 26 GPIO_ACTIVE_LOW>;
linux,code = <0x102>;
};
- user_pb4 {
+ button-user-pb4 {
label = "user_pb4";
gpios = <&pioC 9 GPIO_ACTIVE_LOW>;
linux,code = <0x103>;
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20.dts b/arch/arm/boot/dts/microchip/usb_a9g20.dts
index 2f667b083e81..a2f748141d4b 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9g20.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * usb_a9g20.dts - Device Tree file for Caloa USB A9G20 board
+ * usb_a9g20.dts - Device Tree file for Calao USB A9G20 board
*
* Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi b/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi
index 7d10b36db1ee..f1946e0996b7 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi
+++ b/arch/arm/boot/dts/microchip/usb_a9g20_common.dtsi
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * usb_a9g20.dts - Device Tree file for Caloa USB A9G20 board
+ * usb_a9g20.dts - Device Tree file for Calao USB A9G20 board
*
* Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
diff --git a/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts b/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
index f65712015d40..4d104797176c 100644
--- a/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
+++ b/arch/arm/boot/dts/microchip/usb_a9g20_lpw.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * usb_a9g20_lpw.dts - Device Tree file for Caloa USB A9G20 Low Power board
+ * usb_a9g20_lpw.dts - Device Tree file for Calao USB A9G20 Low Power board
*
* Copyright (C) 2013 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
@@ -16,7 +16,7 @@
spi1: spi@fffcc000 {
cs-gpios = <&pioB 3 GPIO_ACTIVE_HIGH>;
status = "okay";
- mmc-slot@0 {
+ mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
voltage-ranges = <3200 3400>;
diff --git a/arch/arm/boot/dts/nvidia/tegra114.dtsi b/arch/arm/boot/dts/nvidia/tegra114.dtsi
index 86f14e2fd29f..4caf2073c556 100644
--- a/arch/arm/boot/dts/nvidia/tegra114.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra114.dtsi
@@ -139,7 +139,7 @@
reg = <0x54400000 0x00040000>;
clocks = <&tegra_car TEGRA114_CLK_DSIB>,
<&tegra_car TEGRA114_CLK_DSIBLP>,
- <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>;
+ <&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 82>;
reset-names = "dsi";
@@ -577,6 +577,21 @@
#iommu-cells = <1>;
};
+ hda@70030000 {
+ compatible = "nvidia,tegra114-hda", "nvidia,tegra30-hda";
+ reg = <0x70030000 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA114_CLK_HDA>,
+ <&tegra_car TEGRA114_CLK_HDA2HDMI>,
+ <&tegra_car TEGRA114_CLK_HDA2CODEC_2X>;
+ clock-names = "hda", "hda2hdmi", "hda2codec_2x";
+ resets = <&tegra_car 125>, /* hda */
+ <&tegra_car 128>, /* hda2hdmi */
+ <&tegra_car 111>; /* hda2codec_2x */
+ reset-names = "hda", "hda2hdmi", "hda2codec_2x";
+ status = "disabled";
+ };
+
ahub@70080000 {
compatible = "nvidia,tegra114-ahub";
reg = <0x70080000 0x200>,
@@ -805,31 +820,40 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <0>;
};
- cpu@1 {
+ cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <1>;
};
- cpu@2 {
+ cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <2>;
};
- cpu@3 {
+ cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a15";
reg = <3>;
};
};
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
timer {
compatible = "arm,armv7-timer";
interrupts =
diff --git a/arch/arm/boot/dts/nvidia/tegra124.dtsi b/arch/arm/boot/dts/nvidia/tegra124.dtsi
index 8f1fff373461..ec4f0e346b2b 100644
--- a/arch/arm/boot/dts/nvidia/tegra124.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra124.dtsi
@@ -165,6 +165,22 @@
status = "disabled";
};
+ dsia: dsi@54300000 {
+ compatible = "nvidia,tegra124-dsi";
+ reg = <0x0 0x54300000 0x0 0x00040000>;
+ clocks = <&tegra_car TEGRA124_CLK_DSIA>,
+ <&tegra_car TEGRA124_CLK_DSIALP>,
+ <&tegra_car TEGRA124_CLK_PLL_D_OUT0>;
+ clock-names = "dsi", "lp", "parent";
+ resets = <&tegra_car 48>;
+ reset-names = "dsi";
+ nvidia,mipi-calibrate = <&mipi 0x060>; /* DSIA & DSIB pads */
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
vic@54340000 {
compatible = "nvidia,tegra124-vic";
reg = <0x0 0x54340000 0x0 0x00040000>;
@@ -177,6 +193,22 @@
iommus = <&mc TEGRA_SWGROUP_VIC>;
};
+ dsib: dsi@54400000 {
+ compatible = "nvidia,tegra124-dsi";
+ reg = <0x0 0x54400000 0x0 0x00040000>;
+ clocks = <&tegra_car TEGRA124_CLK_DSIB>,
+ <&tegra_car TEGRA124_CLK_DSIBLP>,
+ <&tegra_car TEGRA124_CLK_PLL_D_OUT0>;
+ clock-names = "dsi", "lp", "parent";
+ resets = <&tegra_car 82>;
+ reset-names = "dsi";
+ nvidia,mipi-calibrate = <&mipi 0x180>; /* DSIC & DSID pads */
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
sor@54540000 {
compatible = "nvidia,tegra124-sor";
reg = <0x0 0x54540000 0x0 0x00040000>;
@@ -938,6 +970,14 @@
};
};
+ mipi: mipi@700e3000 {
+ compatible = "nvidia,tegra124-mipi";
+ reg = <0x0 0x700e3000 0x0 0x100>;
+ clocks = <&tegra_car TEGRA124_CLK_MIPI_CAL>;
+ clock-names = "mipi-cal";
+ #nvidia,mipi-calibrate-cells = <1>;
+ };
+
dfll: clock@70110000 {
compatible = "nvidia,tegra124-dfll";
reg = <0 0x70110000 0 0x100>, /* DFLL control */
diff --git a/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts b/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts
index e118809dc6d9..67764afeb013 100644
--- a/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts
+++ b/arch/arm/boot/dts/nvidia/tegra20-asus-tf101.dts
@@ -1085,6 +1085,17 @@
sbs,poll-retry-count = <10>;
power-supplies = <&mains>;
};
+
+ /* Dynaimage ambient light sensor */
+ light-sensor@1c {
+ compatible = "dynaimage,al3000a";
+ reg = <0x1c>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_LEVEL_HIGH>;
+
+ vdd-supply = <&vdd_1v8_sys>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/Makefile b/arch/arm/boot/dts/nxp/imx/Makefile
index 39a153536d2a..8b3abe817e12 100644
--- a/arch/arm/boot/dts/nxp/imx/Makefile
+++ b/arch/arm/boot/dts/nxp/imx/Makefile
@@ -69,6 +69,10 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-colibri-eval-v3.dtb \
imx6dl-colibri-iris.dtb \
imx6dl-colibri-iris-v2.dtb \
+ imx6dl-colibri-v1.2-aster.dtb \
+ imx6dl-colibri-v1.2-eval-v3.dtb \
+ imx6dl-colibri-v1.2-iris.dtb \
+ imx6dl-colibri-v1.2-iris-v2.dtb \
imx6dl-cubox-i.dtb \
imx6dl-cubox-i-emmc-som-v15.dtb \
imx6dl-cubox-i-som-v15.dtb \
@@ -158,6 +162,11 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6q-apalis-ixora.dtb \
imx6q-apalis-ixora-v1.1.dtb \
imx6q-apalis-ixora-v1.2.dtb \
+ imx6q-apalis-v1.2-eval.dtb \
+ imx6q-apalis-v1.2-eval-v1.2.dtb \
+ imx6q-apalis-v1.2-ixora.dtb \
+ imx6q-apalis-v1.2-ixora-v1.1.dtb \
+ imx6q-apalis-v1.2-ixora-v1.2.dtb \
imx6q-apf6dev.dtb \
imx6q-arm2.dtb \
imx6q-b450v3.dtb \
@@ -329,6 +338,7 @@ dtb-$(CONFIG_SOC_IMX6UL) += \
imx6ul-tx6ul-0010.dtb \
imx6ul-tx6ul-0011.dtb \
imx6ul-tx6ul-mainboard.dtb \
+ imx6ul-var-som-concerto.dtb \
imx6ull-14x14-evk.dtb \
imx6ull-colibri-aster.dtb \
imx6ull-colibri-emmc-aster.dtb \
diff --git a/arch/arm/boot/dts/nxp/imx/imx31.dtsi b/arch/arm/boot/dts/nxp/imx/imx31.dtsi
index 00006c90d9a7..813a81558c40 100644
--- a/arch/arm/boot/dts/nxp/imx/imx31.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx31.dtsi
@@ -340,7 +340,7 @@
#address-cells = <1>;
#size-cells = <1>;
- nfc: nand@b8000000 {
+ nfc: nand-controller@b8000000 {
compatible = "fsl,imx31-nand", "fsl,imx27-nand";
reg = <0xb8000000 0x1000>;
interrupts = <33>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx50.dtsi b/arch/arm/boot/dts/nxp/imx/imx50.dtsi
index 1b6f444443dd..d76c496b3f71 100644
--- a/arch/arm/boot/dts/nxp/imx/imx50.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx50.dtsi
@@ -338,7 +338,7 @@
clks: ccm@53fd4000 {
compatible = "fsl,imx50-ccm";
reg = <0x53fd4000 0x4000>;
- interrupts = <0 71 0x04 0 72 0x04>;
+ interrupts = <71>, <72>;
#clock-cells = <1>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx51.dtsi b/arch/arm/boot/dts/nxp/imx/imx51.dtsi
index cc88da4d7785..8323e3a56a1f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx51.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx51.dtsi
@@ -458,7 +458,7 @@
clks: ccm@73fd4000 {
compatible = "fsl,imx51-ccm";
reg = <0x73fd4000 0x4000>;
- interrupts = <0 71 0x04 0 72 0x04>;
+ interrupts = <71>, <72>;
#clock-cells = <1>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
index c14eb7280f09..3cdb87ac1d7c 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-mba53.dts
@@ -162,7 +162,7 @@
};
expander: pca9554@20 {
- compatible = "pca9554";
+ compatible = "nxp,pca9554";
reg = <0x20>;
interrupts = <109>;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts b/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
index e939acc1c88b..2892e457fea7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx53-ppd.dts
@@ -593,7 +593,7 @@
touchscreen@4b {
compatible = "atmel,maxtouch";
- reset-gpio = <&gpio5 19 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>;
reg = <0x4b>;
interrupt-parent = <&gpio5>;
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53.dtsi b/arch/arm/boot/dts/nxp/imx/imx53.dtsi
index 845e2bf8460a..faac7cc249d0 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53.dtsi
@@ -598,7 +598,7 @@
clks: ccm@53fd4000 {
compatible = "fsl,imx53-ccm";
reg = <0x53fd4000 0x4000>;
- interrupts = <0 71 0x04 0 72 0x04>;
+ interrupts = <71>, <72>;
#clock-cells = <1>;
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-aster.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-aster.dts
new file mode 100644
index 000000000000..44c78c07f431
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-aster.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6dl-colibri-aster.dts"
+#include "imx6qdl-colibri-v1.2.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX6DL/S V1.2+ on Colibri Aster Board";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-eval-v3.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-eval-v3.dts
new file mode 100644
index 000000000000..93fd0af53a3c
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-eval-v3.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6dl-colibri-eval-v3.dts"
+#include "imx6qdl-colibri-v1.2.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX6DL/S V1.2+ on Colibri Evaluation Board V3";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris-v2.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris-v2.dts
new file mode 100644
index 000000000000..92d41fc9a13f
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris-v2.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6dl-colibri-iris-v2.dts"
+#include "imx6qdl-colibri-v1.2.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX6DL/S V1.2+ on Colibri Iris V2 Board";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris.dts b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris.dts
new file mode 100644
index 000000000000..c8957948c887
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-colibri-v1.2-iris.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6dl-colibri-iris.dts"
+#include "imx6qdl-colibri-v1.2.dtsi"
+
+/ {
+ model = "Toradex Colibri iMX6DL/S V1.2+ on Colibri Iris Board";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval-v1.2.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval-v1.2.dts
new file mode 100644
index 000000000000..908dab57fd87
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval-v1.2.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6q-apalis-eval-v1.2.dts"
+#include "imx6qdl-apalis-v1.2.dtsi"
+
+/ {
+ model = "Toradex Apalis iMX6Q/D Module V1.2+ on Apalis Evaluation Board v1.2";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval.dts
new file mode 100644
index 000000000000..5463d4127382
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-eval.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6q-apalis-eval.dts"
+#include "imx6qdl-apalis-v1.2.dtsi"
+
+/ {
+ model = "Toradex Apalis iMX6Q/D Module V1.2+ on Apalis Evaluation Board";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.1.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.1.dts
new file mode 100644
index 000000000000..84eabf81ba84
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.1.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6q-apalis-ixora-v1.1.dts"
+#include "imx6qdl-apalis-v1.2.dtsi"
+
+/ {
+ model = "Toradex Apalis iMX6Q/D Module V1.2+ on Ixora Carrier Board V1.1";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.2.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.2.dts
new file mode 100644
index 000000000000..d7cfab4de457
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora-v1.2.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6q-apalis-ixora-v1.2.dts"
+#include "imx6qdl-apalis-v1.2.dtsi"
+
+/ {
+ model = "Toradex Apalis iMX6Q/D Module V1.2+ on Ixora Carrier Board V1.2";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora.dts b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora.dts
new file mode 100644
index 000000000000..189b074e31ce
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-apalis-v1.2-ixora.dts
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+/dts-v1/;
+
+#include "imx6q-apalis-ixora.dts"
+#include "imx6qdl-apalis-v1.2.dtsi"
+
+/ {
+ model = "Toradex Apalis iMX6Q/D Module V1.2+ on Ixora Carrier Board";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis-v1.2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis-v1.2.dtsi
new file mode 100644
index 000000000000..83fa04fc9f18
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis-v1.2.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+&i2c2 {
+ /delete-node/ stmpe811@41;
+
+ ad7879_ts: touchscreen@2c {
+ compatible = "adi,ad7879-1";
+ reg = <0x2c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touch_int>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio4>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+
+ tla2024_adc: adc@49 {
+ compatible = "ti,tla2024";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Apalis AN1_ADC0 */
+ channel@4 {
+ reg = <4>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Apalis AN1_ADC1 */
+ channel@5 {
+ reg = <5>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Apalis AN1_ADC2 */
+ channel@6 {
+ reg = <6>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Apalis AN1_TSWIP_ADC3 */
+ channel@7 {
+ reg = <7>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
index dffab5aa8b9c..b13000a62a7b 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
@@ -10,7 +10,6 @@
/ {
model = "Toradex Apalis iMX6Q/D Module";
- compatible = "toradex,apalis_imx6q", "fsl,imx6q";
aliases {
mmc0 = &usdhc3; /* eMMC */
@@ -108,6 +107,11 @@
};
};
+ poweroff {
+ compatible = "regulator-poweroff";
+ cpu-supply = <&vgen2_reg>;
+ };
+
reg_module_3v3: regulator-module-3v3 {
compatible = "regulator-fixed";
regulator-always-on;
@@ -236,10 +240,6 @@
status = "disabled";
};
-&clks {
- fsl,pmic-stby-poweroff;
-};
-
/* Apalis SPI1 */
&ecspi1 {
cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>;
@@ -527,7 +527,6 @@
pmic: pmic@8 {
compatible = "fsl,pfuze100";
- fsl,pmic-stby-poweroff;
reg = <0x08>;
regulators {
@@ -664,7 +663,6 @@
st,settling = <3>;
/* 5 ms touch detect interrupt delay */
st,touch-det-delay = <5>;
- status = "disabled";
};
stmpe_adc: stmpe_adc {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri-v1.2.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri-v1.2.dtsi
new file mode 100644
index 000000000000..d11bf911b728
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri-v1.2.dtsi
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/* Copyright (c) 2025 Toradex */
+
+&i2c2 {
+ /delete-node/ stmpe811@41;
+
+ ad7879_ts: touchscreen@2c {
+ compatible = "adi,ad7879-1";
+ reg = <0x2c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touch_int>;
+ interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio6>;
+ touchscreen-max-pressure = <4096>;
+ adi,resistance-plate-x = <120>;
+ adi,first-conversion-delay = /bits/ 8 <3>;
+ adi,acquisition-time = /bits/ 8 <1>;
+ adi,median-filter-size = /bits/ 8 <2>;
+ adi,averaging = /bits/ 8 <1>;
+ adi,conversion-interval = /bits/ 8 <255>;
+ };
+
+ tla2024_adc: adc@49 {
+ compatible = "ti,tla2024";
+ reg = <0x49>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Colibri AIN0 */
+ channel@4 {
+ reg = <4>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Colibri AIN1 */
+ channel@5 {
+ reg = <5>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Colibri AIN2 */
+ channel@6 {
+ reg = <6>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+
+ /* Colibri AIN3 */
+ channel@7 {
+ reg = <7>;
+ ti,datarate = <4>;
+ ti,gain = <1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
index 9f33419c260b..3525cbcda57f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
@@ -10,7 +10,6 @@
/ {
model = "Toradex Colibri iMX6DL/S Module";
- compatible = "toradex,colibri_imx6dl", "fsl,imx6dl";
aliases {
mmc0 = &usdhc3; /* eMMC */
@@ -588,7 +587,6 @@
st,settling = <3>;
/* 5 ms touch detect interrupt delay */
st,touch-det-delay = <5>;
- status = "disabled";
};
stmpe_adc: stmpe_adc {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
index 8cefda70db63..ee2c6bec92e8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-mba6.dtsi
@@ -124,7 +124,7 @@
compatible = "fsl,imx-audio-tlv320aic32x4";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_audmux>;
- model = "imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
ssi-controller = <&ssi1>;
audio-codec = <&tlv320aic32x4>;
audio-asrc = <&asrc>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
index 6152a9ed4768..07492f63a1f8 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6.dtsi
@@ -7,16 +7,6 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-/ {
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- regulator-name = "supply-3p3v";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-};
-
&ecspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1>;
@@ -25,11 +15,16 @@
m25p80: flash@0 {
compatible = "jedec,spi-nor";
- spi-max-frequency = <50000000>;
reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ vcc-supply = <&sw4_reg>;
m25p,fast-read;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
@@ -119,7 +114,7 @@
};
sw4_reg: sw4 {
- regulator-min-microvolt = <800000>;
+ regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
@@ -183,7 +178,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- vmmc-supply = <&reg_3p3v>;
+ vmmc-supply = <&sw4_reg>;
non-removable;
disable-wp;
no-sd;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
index 828996382f24..e8fd37dd8835 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6a.dtsi
@@ -30,14 +30,14 @@
temperature-sensor@48 {
compatible = "national,lm75a";
reg = <0x48>;
- vs-supply = <&reg_3p3v>;
+ vs-supply = <&sw4_reg>;
};
eeprom@50 {
compatible = "st,24c64", "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
- vcc-supply = <&reg_3p3v>;
+ vcc-supply = <&sw4_reg>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
index 1d0966b8d99e..0e404c1f62f2 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-tqma6b.dtsi
@@ -23,14 +23,14 @@
temperature-sensor@48 {
compatible = "national,lm75a";
reg = <0x48>;
- vs-supply = <&reg_3p3v>;
+ vs-supply = <&sw4_reg>;
};
eeprom@50 {
compatible = "st,24c64", "atmel,24c64";
reg = <0x50>;
pagesize = <32>;
- vcc-supply = <&reg_3p3v>;
+ vcc-supply = <&sw4_reg>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
index 0e839bbfea08..911ccbd132cf 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-14x14-evk.dtsi
@@ -62,6 +62,33 @@
gpios = <&gpio_spi 3 GPIO_ACTIVE_LOW>;
};
+ reg_audio_5v: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
sound-wm8960 {
compatible = "fsl,imx-audio-wm8960";
model = "wm8960-audio";
@@ -139,6 +166,11 @@
wlf,gpio-cfg = <1 3>;
clocks = <&clks IMX6UL_CLK_SAI2>;
clock-names = "mclk";
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
camera@3c {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
index c9c0794f01a2..2dd635a615cb 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul-common.dtsi
@@ -162,13 +162,18 @@
status = "okay";
flash0: flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
+ reg = <0>;
spi-max-frequency = <33000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <1>;
- reg = <0>;
+ vcc-supply = <&reg_vldo4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts
index f2a5f17f312e..2e7b96e7b791 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1-mba6ulx.dts
@@ -6,8 +6,9 @@
/dts-v1/;
-#include "imx6ul-tqma6ul1.dtsi"
+#include "imx6ul-tqma6ul2.dtsi"
#include "mba6ulx.dtsi"
+#include "imx6ul-tqma6ul1.dtsi"
/ {
model = "TQ-Systems TQMa6UL1 SoM on MBa6ULx board";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi
index 24192d012ef7..79c8c5529135 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-tqma6ul1.dtsi
@@ -4,8 +4,6 @@
* Author: Markus Niebel <Markus.Niebel@tq-group.com>
*/
-#include "imx6ul-tqma6ul2.dtsi"
-
/ {
model = "TQ-Systems TQMa6UL1 SoM";
compatible = "tq,imx6ul-tqma6ul1", "fsl,imx6ul";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-var-som-concerto.dts b/arch/arm/boot/dts/nxp/imx/imx6ul-var-som-concerto.dts
new file mode 100644
index 000000000000..9ff3b374a2b3
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-var-som-concerto.dts
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Support for Variscite MX6 Concerto Carrier board with the VAR-SOM-MX6UL
+ * Variscite SoM mounted on it
+ *
+ * Copyright 2019 Variscite Ltd.
+ * Copyright 2025 Bootlin
+ */
+
+#include "imx6ul-var-som.dtsi"
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Variscite VAR-SOM-MX6UL Concerto Board";
+ compatible = "variscite,mx6ulconcerto", "variscite,var-som-imx6ul", "fsl,imx6ul";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_key_back>, <&pinctrl_gpio_key_wakeup>;
+
+ key-back {
+ gpios = <&gpio4 14 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_BACK>;
+ };
+
+ key-wakeup {
+ gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ label = "gpled2";
+ gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ status = "okay";
+};
+
+&fec1 {
+ status = "disabled";
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>, <&pinctrl_enet2_gpio>, <&pinctrl_enet2_mdio>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ clocks = <&rmii_ref_clk>;
+ clock-names = "rmii-ref";
+ reset-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <100000>;
+ micrel,led-mode = <0>;
+ micrel,rmii-reference-clock-select-25-mhz = <1>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ rtc@68 {
+ /*
+ * To actually use this interrupt
+ * connect pins J14.8 & J14.10 on the Concerto-Board.
+ */
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rtc>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <10 IRQ_TYPE_EDGE_FALLING>;
+ };
+};
+
+&iomuxc {
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b031
+ >;
+ };
+
+ pinctrl_enet2_gpio: enet2-gpiogrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05 0x1b0b0 /* fec2 reset */
+ >;
+ };
+
+ pinctrl_enet2_mdio: enet2-mdiogrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO06__ENET2_MDIO 0x1b0b0
+ MX6UL_PAD_GPIO1_IO07__ENET2_MDC 0x1b0b0
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x1b020
+ MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x1b020
+ >;
+ };
+
+ pinctrl_gpio_key_back: gpio-key-backgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_CE1_B__GPIO4_IO14 0x17059
+ >;
+ };
+
+ pinctrl_gpio_leds: gpio-ledsgrp {
+ fsl,pins = <
+ MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25 0x1b0b0 /* GPLED2 */
+ >;
+ };
+
+ pinctrl_gpio_key_wakeup: gpio-keys-wakeupgrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER8__GPIO5_IO08 0x17059
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_PIXCLK__I2C1_SCL 0x4001b8b0
+ MX6UL_PAD_CSI_MCLK__I2C1_SDA 0x4001b8b0
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO05__PWM4_OUT 0x110b0
+ >;
+ };
+
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_MOD__GPIO1_IO10 0x1b0b0 /* RTC alarm IRQ */
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX6UL_PAD_CSI_DATA00__UART5_DCE_TX 0x1b0b1
+ MX6UL_PAD_CSI_DATA01__UART5_DCE_RX 0x1b0b1
+ MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS 0x1b0b1
+ MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_usb_otg1_id: usbotg1idgrp {
+ fsl,pins = <
+ MX6UL_PAD_UART3_TX_DATA__ANATOP_OTG1_ID 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x17059
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9
+ MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9
+ MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+ MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+ MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+ MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1-gpiogrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO00__GPIO1_IO00 0x1b0b1 /* CD */
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO01__WDOG1_WDOG_B 0x78b0
+ >;
+ };
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&snvs_pwrkey {
+ status = "disabled";
+};
+
+&snvs_rtc {
+ status = "disabled";
+};
+
+&tsc {
+ /*
+ * Conflics with wdog1 ext-reset-output & SD CD pins,
+ * so we keep it disabled by default.
+ */
+ status = "disabled";
+};
+
+/* Console UART */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+/* ttymxc4 UART */
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb_otg1_id>;
+ dr_mode = "otg";
+ disable-over-current;
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>, <&pinctrl_usdhc1_gpio>;
+ cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+ no-1-8-v;
+ keep-power-in-suspend;
+ wakeup-source;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ /*
+ * To actually use ext-reset-output
+ * connect pins J17.3 & J17.8 on the Concerto-Board
+ */
+ fsl,ext-reset-output;
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-var-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-var-som.dtsi
new file mode 100644
index 000000000000..4e536e0252de
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/imx/imx6ul-var-som.dtsi
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Support for Variscite VAR-SOM-MX6UL Module
+ *
+ * Copyright 2019 Variscite Ltd.
+ * Copyright 2025 Bootlin
+ */
+
+/dts-v1/;
+
+#include "imx6ul.dtsi"
+#include <dt-bindings/clock/imx6ul-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Variscite VAR-SOM-MX6UL module";
+ compatible = "variscite,var-som-imx6ul", "fsl,imx6ul";
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
+
+ reg_gpio_dvfs: reg-gpio-dvfs {
+ compatible = "regulator-gpio";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "gpio_dvfs";
+ regulator-type = "voltage";
+ gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+ states = <1300000 0x1
+ 1400000 0x0>;
+ };
+
+ rmii_ref_clk: rmii-ref-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ clock-output-names = "rmii-ref";
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-rates = <786432000>;
+};
+
+&cpu0 {
+ dc-supply = <&reg_gpio_dvfs>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>, <&pinctrl_enet1_gpio>, <&pinctrl_enet1_mdio>;
+ phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ clocks = <&rmii_ref_clk>;
+ clock-names = "rmii-ref";
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <100000>;
+ micrel,led-mode = <1>;
+ micrel,rmii-reference-clock-select-25-mhz = <1>;
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+ MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+ MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031
+ >;
+ };
+
+ pinctrl_enet1_gpio: enet1-gpiogrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x1b0b0 /* fec1 reset */
+ >;
+ };
+
+ pinctrl_enet1_mdio: enet1-mdiogrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO06__ENET1_MDIO 0x1b0b0
+ MX6UL_PAD_GPIO1_IO07__ENET1_MDC 0x1b0b0
+ >;
+ };
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04 0x1b0b0 /* BT Enable */
+ MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06 0x03029 /* WLAN Enable */
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088
+ MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088
+ MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x11088
+ MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x11088
+ MX6UL_PAD_JTAG_TMS__SAI2_MCLK 0x17088
+ >;
+ };
+
+ pinctrl_tsc: tscgrp {
+ fsl,pins = <
+ MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0xb0
+ MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0xb0
+ MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0
+ MX6UL_PAD_GPIO1_IO04__GPIO1_IO04 0xb0
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX 0x1b0b1
+ MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX 0x1b0b1
+ MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS 0x1b0b1
+ MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS 0x1b0b1
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x10069
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x17059
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x17059
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x17059
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x17059
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x17059
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x17059
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x17059
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100b9
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170b9
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x170b9
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x170b9
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170b9
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170b9
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x170b9
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x170b9
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x170b9
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170b9
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ MX6UL_PAD_NAND_RE_B__USDHC2_CLK 0x100f9
+ MX6UL_PAD_NAND_WE_B__USDHC2_CMD 0x170f9
+ MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x170f9
+ MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x170f9
+ MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x170f9
+ MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x170f9
+ MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x170f9
+ MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x170f9
+ MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x170f9
+ MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x170f9
+ >;
+ };
+};
+
+&pxp {
+ status = "okay";
+};
+
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clks IMX6UL_CLK_SAI2_SEL>,
+ <&clks IMX6UL_CLK_SAI2>;
+ assigned-clock-parents = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>;
+ assigned-clock-rates = <0>, <12288000>;
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&snvs_poweroff {
+ status = "okay";
+};
+
+&tsc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tsc>;
+ xnur-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ measure-delay-time = <0xffff>;
+ pre-charge-time = <0xfff>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ keep-power-in-suspend;
+ wakeup-source;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
index 576a7df505d3..4d948a9757f9 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
@@ -170,7 +170,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
ssi-controller = <&sai1>;
audio-codec = <&tlv320aic32x4>;
audio-routing =
diff --git a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
index aa8f65cd4adf..2966a33bc528 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7-tqma7.dtsi
@@ -265,6 +265,13 @@
spi-max-frequency = <29000000>;
spi-rx-bus-width = <4>;
spi-tx-bus-width = <4>;
+ vcc-supply = <&vgen4_reg>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
index 6cde84636900..17236f90ab33 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-sdb.dts
@@ -143,6 +143,33 @@
gpio = <&gpio1 4 GPIO_ACTIVE_LOW>;
};
+ reg_audio_5v: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm1 0 5000000 0>;
@@ -406,6 +433,11 @@
<&clks IMX7D_AUDIO_MCLK_ROOT_DIV>;
assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
assigned-clock-rates = <0>, <884736000>, <12288000>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
index 22dd72499ef2..2629968001a7 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx7s.dtsi
@@ -176,6 +176,34 @@
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
};
+ video_mux: csi-mux {
+ compatible = "video-mux";
+ mux-controls = <&mux 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+
+ csi_mux_from_mipi_vc0: endpoint {
+ remote-endpoint = <&mipi_vc0_to_csi_mux>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ csi_mux_to_csi: endpoint {
+ remote-endpoint = <&csi_from_csi_mux>;
+ };
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -529,34 +557,6 @@
#mux-control-cells = <1>;
mux-reg-masks = <0x14 0x00000010>;
};
-
- video_mux: csi-mux {
- compatible = "video-mux";
- mux-controls = <&mux 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
-
- port@0 {
- reg = <0>;
- };
-
- port@1 {
- reg = <1>;
-
- csi_mux_from_mipi_vc0: endpoint {
- remote-endpoint = <&mipi_vc0_to_csi_mux>;
- };
- };
-
- port@2 {
- reg = <2>;
-
- csi_mux_to_csi: endpoint {
- remote-endpoint = <&csi_from_csi_mux>;
- };
- };
- };
};
ocotp: efuse@30350000 {
diff --git a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
index 941d9860218e..67a3d484bc9f 100644
--- a/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/mba6ulx.dtsi
@@ -142,7 +142,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
ssi-controller = <&sai1>;
audio-codec = <&tlv320aic32x4>;
audio-asrc = <&asrc>;
diff --git a/arch/arm/boot/dts/nxp/mxs/Makefile b/arch/arm/boot/dts/nxp/mxs/Makefile
index a430d04f9c69..96dd31ea19ba 100644
--- a/arch/arm/boot/dts/nxp/mxs/Makefile
+++ b/arch/arm/boot/dts/nxp/mxs/Makefile
@@ -8,6 +8,9 @@ dtb-$(CONFIG_ARCH_MXS) += \
imx28-apf28.dtb \
imx28-apf28dev.dtb \
imx28-apx4devkit.dtb \
+ imx28-btt3-0.dtb \
+ imx28-btt3-1.dtb \
+ imx28-btt3-2.dtb \
imx28-cfa10036.dtb \
imx28-cfa10037.dtb \
imx28-cfa10049.dtb \
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-btt3-0.dts b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-0.dts
new file mode 100644
index 000000000000..6ac46e4b21bb
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-0.dts
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2024
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+
+/dts-v1/;
+#include "imx28-btt3.dtsi"
+
+&hog_pins_rev {
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-btt3-1.dts b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-1.dts
new file mode 100644
index 000000000000..213fe931c58b
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-1.dts
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2024
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+
+/dts-v1/;
+#include "imx28-btt3.dtsi"
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-btt3-2.dts b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-2.dts
new file mode 100644
index 000000000000..4bccd784d065
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-btt3-2.dts
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2024
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+
+/dts-v1/;
+#include "imx28-btt3.dtsi"
+
+/ {
+ panel {
+ compatible = "powertip,st7272", "panel-dpi";
+ power-supply = <&reg_3v3>;
+ width-mm = <70>;
+ height-mm = <52>;
+
+ panel-timing {
+ clock-frequency = <6500000>;
+ hactive = <320>;
+ vactive = <240>;
+ hfront-porch = <20>;
+ hback-porch = <68>;
+ hsync-len = <30>;
+ vfront-porch = <4>;
+ vback-porch = <14>;
+ vsync-len = <4>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi b/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi
new file mode 100644
index 000000000000..2c52e67e5c14
--- /dev/null
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-btt3.dtsi
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+/*
+ * Copyright 2024
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ */
+/dts-v1/;
+#include "imx28-lwe.dtsi"
+
+/ {
+ model = "BTT3";
+
+ compatible = "lwn,imx28-btt3", "fsl,imx28";
+
+ chosen {
+ bootargs = "root=/dev/mmcblk0p2 rootfstype=ext4 ro rootwait console=ttyAMA0,115200 panic=1 quiet";
+ };
+
+ memory@40000000 {
+ reg = <0x40000000 0x10000000>;
+ device_type = "memory";
+ };
+
+ panel {
+ compatible = "powertip,hx8238a", "panel-dpi";
+ power-supply = <&reg_3v3>;
+ width-mm = <70>;
+ height-mm = <52>;
+
+ panel-timing {
+ clock-frequency = <6500000>;
+ hactive = <320>;
+ vactive = <240>;
+ hfront-porch = <20>;
+ hback-porch = <38>;
+ hsync-len = <30>;
+ vfront-porch = <4>;
+ vback-porch = <14>;
+ vsync-len = <4>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <0>;
+ pixelclk-active = <1>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+
+ poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "BTTC Audio";
+ simple-audio-card,widgets = "Speaker", "BTTC Speaker";
+ simple-audio-card,routing = "BTTC Speaker", "SPKOUTN", "BTTC Speaker", "SPKOUTP";
+
+ simple-audio-card,dai-link@0 {
+ format = "left_j";
+ bitclock-master = <&dai0_master>;
+ frame-master = <&dai0_master>;
+ mclk-fs = <256>;
+
+ dai0_master: cpu {
+ sound-dai = <&saif0>;
+ };
+
+ codec {
+ sound-dai = <&wm89xx>;
+ clocks = <&saif0>;
+ };
+ };
+ };
+
+ wifi_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_en_pin_bttc>;
+ reset-gpios = <&gpio0 27 GPIO_ACTIVE_LOW>;
+ /* W1-163 needs 60us for WL_EN to be low and */
+ /* 150ms after high before downloading FW is possible */
+ post-power-on-delay-ms = <200>;
+ power-off-delay-us = <100>;
+ };
+};
+
+&auart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+};
+
+&auart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart3_pins_a>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&i2c0 {
+ wm89xx: audio-codec@1a {
+ compatible = "wlf,wm8940";
+ reg = <0x1a>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&lcdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a>, <&lcdif_sync_pins_bttc>,
+ <&lcdif_reset_pins_bttc>;
+ status = "okay";
+
+ port {
+ display_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&mac0 {
+ clocks = <&clks 57>, <&clks 57>, <&clks 64>;
+ clock-names = "ipg", "ahb", "enet_out";
+ phy-handle = <&mac0_phy>;
+ phy-mode = "rmii";
+ phy-supply = <&reg_3v3>;
+ /*
+ * This MAC address is adjusted during production.
+ * Value specified below is used as a fallback during recovery.
+ */
+ local-mac-address = [ 00 11 B8 00 BF 8A ];
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mac0_phy: ethernet-phy@0 {
+ /* LAN8720Ai - PHY ID */
+ compatible = "ethernet-phy-id0007.c0f0","ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ smsc,disable-energy-detect;
+ max-speed = <100>;
+ reset-gpios = <&gpio4 12 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <1000>;
+ reset-deassert-us = <1000>;
+ };
+ };
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>, <&hog_pins_rev>;
+
+ hog_pins_a: hog@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_RDY2__GPIO_0_22
+ MX28_PAD_GPMI_RDY3__GPIO_0_23
+ MX28_PAD_GPMI_RDN__GPIO_0_24
+ MX28_PAD_LCD_VSYNC__GPIO_1_28
+ MX28_PAD_SSP2_SS1__GPIO_2_20
+ MX28_PAD_SSP2_SS2__GPIO_2_21
+ MX28_PAD_AUART2_CTS__GPIO_3_10
+ MX28_PAD_AUART2_RTS__GPIO_3_11
+ MX28_PAD_GPMI_WRN__GPIO_0_25
+ MX28_PAD_ENET0_RXD2__GPIO_4_9
+ MX28_PAD_ENET0_TXD2__GPIO_4_11
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ hog_pins_rev: hog@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_ENET0_RXD3__GPIO_4_10
+ MX28_PAD_ENET0_TX_CLK__GPIO_4_5
+ MX28_PAD_ENET0_COL__GPIO_4_14
+ MX28_PAD_ENET0_CRS__GPIO_4_15
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ keypad_pins_bttc: keypad-bttc@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_D00__GPIO_0_0
+ MX28_PAD_AUART0_CTS__GPIO_3_2
+ MX28_PAD_AUART0_RTS__GPIO_3_3
+ MX28_PAD_GPMI_D03__GPIO_0_3
+ MX28_PAD_GPMI_D04__GPIO_0_4
+ MX28_PAD_GPMI_D05__GPIO_0_5
+ MX28_PAD_GPMI_D06__GPIO_0_6
+ MX28_PAD_GPMI_D07__GPIO_0_7
+ MX28_PAD_GPMI_CE1N__GPIO_0_17
+ MX28_PAD_GPMI_CE2N__GPIO_0_18
+ MX28_PAD_GPMI_CE3N__GPIO_0_19
+ MX28_PAD_GPMI_RDY0__GPIO_0_20
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ lcdif_sync_pins_bttc: lcdif-bttc@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_DOTCLK__LCD_DOTCLK
+ MX28_PAD_LCD_ENABLE__LCD_ENABLE
+ MX28_PAD_LCD_HSYNC__LCD_HSYNC
+ MX28_PAD_LCD_RD_E__LCD_VSYNC
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_DISABLE>;
+ };
+
+ lcdif_reset_pins_bttc: lcdif-bttc@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ MX28_PAD_LCD_RESET__GPIO_3_30
+ >;
+ fsl,drive-strength = <MXS_DRIVE_4mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
+ ssp1_sdio_pins_a: ssp1-sdio@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_SSP1_DATA0__SSP1_D0
+ MX28_PAD_GPMI_D01__SSP1_D1
+ MX28_PAD_GPMI_D02__SSP1_D2
+ MX28_PAD_SSP1_DATA3__SSP1_D3
+ MX28_PAD_SSP1_CMD__SSP1_CMD
+ MX28_PAD_SSP1_SCK__SSP1_SCK
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+
+ wifi_en_pin_bttc: wifi-en-pin@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ MX28_PAD_GPMI_CLE__GPIO_0_27
+ >;
+ fsl,drive-strength = <MXS_DRIVE_8mA>;
+ fsl,voltage = <MXS_VOLTAGE_HIGH>;
+ fsl,pull-up = <MXS_PULL_ENABLE>;
+ };
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm3_pins_a>;
+ status = "okay";
+};
+
+&reg_usb_5v {
+ gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+};
+
+&saif0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ #sound-dai-cells = <0>;
+ assigned-clocks = <&clks 53>;
+ assigned-clock-rates = <12000000>;
+ status = "okay";
+};
+
+&saif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ #sound-dai-cells = <0>;
+ fsl,saif-master = <&saif0>;
+ status = "okay";
+};
+
+&ssp1 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ssp1_sdio_pins_a>;
+ bus-width = <4>;
+ no-1-8-v; /* force 3.3V VIO */
+ non-removable;
+ vmmc-supply = <&reg_3v3>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ keep-power-in-suspend;
+ status = "okay";
+
+ wlan@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&ssp2 {
+ compatible = "fsl,imx28-spi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nxp/mxs/imx28-sps1.dts b/arch/arm/boot/dts/nxp/mxs/imx28-sps1.dts
index 0f01dded4e3d..ca62e7933116 100644
--- a/arch/arm/boot/dts/nxp/mxs/imx28-sps1.dts
+++ b/arch/arm/boot/dts/nxp/mxs/imx28-sps1.dts
@@ -24,30 +24,25 @@
};
leds {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "gpio-leds";
status = "okay";
- led@1 {
+ led-1 {
label = "sps1-1:yellow:user";
gpios = <&gpio0 6 0>;
linux,default-trigger = "heartbeat";
- reg = <0>;
};
- led@2 {
+ led-2 {
label = "sps1-2:red:user";
gpios = <&gpio0 3 0>;
linux,default-trigger = "heartbeat";
- reg = <1>;
};
- led@3 {
+ led-3 {
label = "sps1-3:red:user";
gpios = <&gpio0 0 0>;
- default-trigger = "heartbeat";
- reg = <2>;
+ linux,default-trigger = "heartbeat";
};
};
diff --git a/arch/arm/boot/dts/nxp/vf/vf610-bk4.dts b/arch/arm/boot/dts/nxp/vf/vf610-bk4.dts
index 722182f5fd17..2492fb99956c 100644
--- a/arch/arm/boot/dts/nxp/vf/vf610-bk4.dts
+++ b/arch/arm/boot/dts/nxp/vf/vf610-bk4.dts
@@ -119,7 +119,7 @@
status = "okay";
spidev0@0 {
- compatible = "lwn,bk4";
+ compatible = "lwn,bk4-spi";
spi-max-frequency = <30000000>;
reg = <0>;
fsl,spi-cs-sck-delay = <200>;
@@ -136,7 +136,7 @@
#address-cells = <0>;
slave {
- compatible = "lwn,bk4";
+ compatible = "lwn,bk4-spi";
spi-max-frequency = <30000000>;
};
};
diff --git a/arch/arm/boot/dts/nxp/vf/vf610-colibri.dtsi b/arch/arm/boot/dts/nxp/vf/vf610-colibri.dtsi
index 607cec2df861..20aed3946214 100644
--- a/arch/arm/boot/dts/nxp/vf/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/vf/vf610-colibri.dtsi
@@ -8,7 +8,6 @@
/ {
model = "Toradex Colibri VF61 COM";
- compatible = "toradex,vf610-colibri_vf61", "fsl,vf610";
memory@80000000 {
device_type = "memory";
diff --git a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-c.dts
index 6f9878f124c4..4f99044837f8 100644
--- a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-c.dts
+++ b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-c.dts
@@ -392,7 +392,7 @@
};
&gpio0 {
- eth0_intrp {
+ eth0-intrp-hog {
gpio-hog;
gpios = <23 GPIO_ACTIVE_HIGH>;
input;
@@ -401,7 +401,7 @@
};
&gpio3 {
- eth0_intrp {
+ eth0-intrp-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
input;
diff --git a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
index acccf9a3c898..597f20be82f1 100644
--- a/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
+++ b/arch/arm/boot/dts/nxp/vf/vfxxx.dtsi
@@ -158,8 +158,8 @@
clocks = <&clks VF610_CLK_DSPI0>;
clock-names = "dspi";
spi-num-chipselects = <6>;
- dmas = <&edma1 1 12>, <&edma1 1 13>;
- dma-names = "rx", "tx";
+ dmas = <&edma1 1 13>, <&edma1 1 12>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -172,8 +172,8 @@
clocks = <&clks VF610_CLK_DSPI1>;
clock-names = "dspi";
spi-num-chipselects = <4>;
- dmas = <&edma1 1 14>, <&edma1 1 15>;
- dma-names = "rx", "tx";
+ dmas = <&edma1 1 15>, <&edma1 1 14>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -529,9 +529,8 @@
clocks = <&clks VF610_CLK_DSPI2>;
clock-names = "dspi";
spi-num-chipselects = <2>;
- dmas = <&edma1 0 10>,
- <&edma1 0 11>;
- dma-names = "rx", "tx";
+ dmas = <&edma1 0 11>, <&edma1 0 10>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -544,8 +543,8 @@
clocks = <&clks VF610_CLK_DSPI3>;
clock-names = "dspi";
spi-num-chipselects = <2>;
- dmas = <&edma1 0 12>, <&edma1 0 13>;
- dma-names = "rx", "tx";
+ dmas = <&edma1 0 13>, <&edma1 0 12>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -725,13 +724,13 @@
clocks = <&clks VF610_CLK_CAAM>;
clock-names = "ipg";
- sec_jr0: jr0@1000 {
+ sec_jr0: jr@1000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x1000 0x1000>;
interrupts = <102 IRQ_TYPE_LEVEL_HIGH>;
};
- sec_jr1: jr1@2000 {
+ sec_jr1: jr@2000 {
compatible = "fsl,sec-v4.0-job-ring";
reg = <0x2000 0x1000>;
interrupts = <102 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/renesas/r8a7790-lager.dts b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
index 3bce5876a9d8..4f002aa7fbaf 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
@@ -754,6 +754,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7790-stout.dts b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
index d7c0a9574ce8..b1e20579e071 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
@@ -268,6 +268,7 @@
&scifa0 {
pinctrl-0 = <&scifa0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7790.dtsi b/arch/arm/boot/dts/renesas/r8a7790.dtsi
index f746f0b9e686..4f97c09dbc9f 100644
--- a/arch/arm/boot/dts/renesas/r8a7790.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7790.dtsi
@@ -227,6 +227,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -265,6 +266,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -374,6 +376,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7790";
reg = <0 0xe6060000 0 0x250>;
+ bootph-all;
};
tpu: pwm@e60f0000 {
@@ -395,6 +398,7 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
apmu@e6151000 {
@@ -412,6 +416,7 @@
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7790-rst";
reg = <0 0xe6160000 0 0x0100>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1948,6 +1953,7 @@
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
+ bootph-all;
};
cmt0: timer@ffca0000 {
@@ -2018,5 +2024,6 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <48000000>;
+ bootph-all;
};
};
diff --git a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
index e4e1d9c98c61..e9f90fa44d55 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
@@ -679,6 +679,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
index 08381498350a..f518eadd8b9c 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
@@ -312,6 +312,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7791.dtsi b/arch/arm/boot/dts/renesas/r8a7791.dtsi
index e57567adff55..5023b41c28b3 100644
--- a/arch/arm/boot/dts/renesas/r8a7791.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7791.dtsi
@@ -125,6 +125,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -152,6 +153,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -291,6 +293,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7791";
reg = <0 0xe6060000 0 0x250>;
+ bootph-all;
};
tpu: pwm@e60f0000 {
@@ -312,6 +315,7 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
apmu@e6152000 {
@@ -323,6 +327,7 @@
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7791-rst";
reg = <0 0xe6160000 0 0x0100>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1875,6 +1880,7 @@
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
+ bootph-all;
};
cmt0: timer@ffca0000 {
@@ -1945,5 +1951,6 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <48000000>;
+ bootph-all;
};
};
diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
index a3986076d8e3..23ec0f8a6651 100644
--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
@@ -301,6 +301,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7792-wheat.dts b/arch/arm/boot/dts/renesas/r8a7792-wheat.dts
index bfc780f7e396..93bd81723c8f 100644
--- a/arch/arm/boot/dts/renesas/r8a7792-wheat.dts
+++ b/arch/arm/boot/dts/renesas/r8a7792-wheat.dts
@@ -183,6 +183,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7792.dtsi b/arch/arm/boot/dts/renesas/r8a7792.dtsi
index 08cbe6c13cee..7513afc1c958 100644
--- a/arch/arm/boot/dts/renesas/r8a7792.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7792.dtsi
@@ -82,6 +82,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
+ bootph-all;
};
lbsc: bus {
@@ -109,6 +110,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -308,6 +310,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7792";
reg = <0 0xe6060000 0 0x144>;
+ bootph-all;
};
cpg: clock-controller@e6150000 {
@@ -318,6 +321,7 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
apmu@e6152000 {
@@ -329,6 +333,7 @@
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7792-rst";
reg = <0 0xe6160000 0 0x0100>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -947,6 +952,7 @@
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
+ bootph-all;
};
cmt0: timer@ffca0000 {
diff --git a/arch/arm/boot/dts/renesas/r8a7793-gose.dts b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
index 2c05d7c2b377..45b267ec2679 100644
--- a/arch/arm/boot/dts/renesas/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
@@ -642,6 +642,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7793.dtsi b/arch/arm/boot/dts/renesas/r8a7793.dtsi
index e48e43cc6b03..fc6d3bcca296 100644
--- a/arch/arm/boot/dts/renesas/r8a7793.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7793.dtsi
@@ -117,6 +117,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
+ bootph-all;
};
pmu {
@@ -137,6 +138,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -276,6 +278,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7793";
reg = <0 0xe6060000 0 0x250>;
+ bootph-all;
};
/* Special CPG clocks */
@@ -287,6 +290,7 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
apmu@e6152000 {
@@ -298,6 +302,7 @@
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7793-rst";
reg = <0 0xe6160000 0 0x0100>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1454,6 +1459,7 @@
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
+ bootph-all;
};
cmt0: timer@ffca0000 {
@@ -1524,5 +1530,6 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <48000000>;
+ bootph-all;
};
};
diff --git a/arch/arm/boot/dts/renesas/r8a7794-alt.dts b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
index f70e26aa83a0..3f06a7f67d62 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
@@ -479,6 +479,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7794-silk.dts b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
index 2a0819311a3c..342825605768 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
@@ -394,6 +394,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/renesas/r8a7794.dtsi b/arch/arm/boot/dts/renesas/r8a7794.dtsi
index bc16c896c0f9..92010d09f6c4 100644
--- a/arch/arm/boot/dts/renesas/r8a7794.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a7794.dtsi
@@ -99,6 +99,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
+ bootph-all;
};
pmu {
@@ -119,6 +120,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -243,6 +245,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7794";
reg = <0 0xe6060000 0 0x11c>;
+ bootph-all;
};
cpg: clock-controller@e6150000 {
@@ -253,6 +256,7 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
apmu@e6151000 {
@@ -264,6 +268,7 @@
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7794-rst";
reg = <0 0xe6160000 0 0x0100>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1440,6 +1445,7 @@
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
+ bootph-all;
};
cmt0: timer@ffca0000 {
@@ -1491,5 +1497,6 @@
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <48000000>;
+ bootph-all;
};
};
diff --git a/arch/arm/boot/dts/renesas/r9a06g032.dtsi b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
index 7548291c8d7e..87e03446fb4d 100644
--- a/arch/arm/boot/dts/renesas/r9a06g032.dtsi
+++ b/arch/arm/boot/dts/renesas/r9a06g032.dtsi
@@ -211,8 +211,8 @@
reg-io-width = <4>;
clocks = <&sysctrl R9A06G032_CLK_UART3>, <&sysctrl R9A06G032_HCLK_UART3>;
clock-names = "baudclk", "apb_pclk";
- dmas = <&dmamux 0 0 0 0 0 1>, <&dmamux 1 0 0 0 1 1>;
- dma-names = "rx", "tx";
+ dmas = <&dmamux 1 0 0 0 1 1>, <&dmamux 0 0 0 0 0 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -224,8 +224,8 @@
reg-io-width = <4>;
clocks = <&sysctrl R9A06G032_CLK_UART4>, <&sysctrl R9A06G032_HCLK_UART4>;
clock-names = "baudclk", "apb_pclk";
- dmas = <&dmamux 2 0 0 0 2 1>, <&dmamux 3 0 0 0 3 1>;
- dma-names = "rx", "tx";
+ dmas = <&dmamux 3 0 0 0 3 1>, <&dmamux 2 0 0 0 2 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -237,8 +237,8 @@
reg-io-width = <4>;
clocks = <&sysctrl R9A06G032_CLK_UART5>, <&sysctrl R9A06G032_HCLK_UART5>;
clock-names = "baudclk", "apb_pclk";
- dmas = <&dmamux 4 0 0 0 4 1>, <&dmamux 5 0 0 0 5 1>;
- dma-names = "rx", "tx";
+ dmas = <&dmamux 5 0 0 0 5 1>, <&dmamux 4 0 0 0 4 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -250,8 +250,8 @@
reg-io-width = <4>;
clocks = <&sysctrl R9A06G032_CLK_UART6>, <&sysctrl R9A06G032_HCLK_UART6>;
clock-names = "baudclk", "apb_pclk";
- dmas = <&dmamux 6 0 0 0 6 1>, <&dmamux 7 0 0 0 7 1>;
- dma-names = "rx", "tx";
+ dmas = <&dmamux 7 0 0 0 7 1>, <&dmamux 6 0 0 0 6 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
@@ -263,8 +263,8 @@
reg-io-width = <4>;
clocks = <&sysctrl R9A06G032_CLK_UART7>, <&sysctrl R9A06G032_HCLK_UART7>;
clock-names = "baudclk", "apb_pclk";
- dmas = <&dmamux 4 0 0 0 20 1>, <&dmamux 5 0 0 0 21 1>;
- dma-names = "rx", "tx";
+ dmas = <&dmamux 5 0 0 0 21 1>, <&dmamux 4 0 0 0 20 1>;
+ dma-names = "tx", "rx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/st/Makefile b/arch/arm/boot/dts/st/Makefile
index b7d5d305cbbe..60d55516f723 100644
--- a/arch/arm/boot/dts/st/Makefile
+++ b/arch/arm/boot/dts/st/Makefile
@@ -29,6 +29,7 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
stm32h750i-art-pi.dtb \
+ stm32mp133c-prihmb.dtb \
stm32mp135f-dhcor-dhsbc.dtb \
stm32mp135f-dk.dtb \
stm32mp151a-prtt1a.dtb \
@@ -37,8 +38,11 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32mp151a-dhcor-testbench.dtb \
stm32mp151c-mecio1r0.dtb \
stm32mp151c-mect1s.dtb \
+ stm32mp151c-plyaqm.dtb \
stm32mp153c-dhcom-drc02.dtb \
stm32mp153c-dhcor-drc-compact.dtb \
+ stm32mp153c-lxa-fairytux2-gen1.dtb \
+ stm32mp153c-lxa-fairytux2-gen2.dtb \
stm32mp153c-lxa-tac-gen3.dtb \
stm32mp153c-mecio1r1.dtb \
stm32mp157a-avenger96.dtb \
diff --git a/arch/arm/boot/dts/st/stm32f746-disco.dts b/arch/arm/boot/dts/st/stm32f746-disco.dts
index 087de6f09629..b57dbdce2f40 100644
--- a/arch/arm/boot/dts/st/stm32f746-disco.dts
+++ b/arch/arm/boot/dts/st/stm32f746-disco.dts
@@ -78,6 +78,24 @@
serial0 = &usart1;
};
+ leds {
+ compatible = "gpio-leds";
+ led-usr {
+ gpios = <&gpioi 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+ button-0 {
+ label = "User";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpioi 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
usbotg_hs_phy: usb-phy {
#phy-cells = <0>;
compatible = "usb-nop-xceiv";
diff --git a/arch/arm/boot/dts/st/stm32f769-disco.dts b/arch/arm/boot/dts/st/stm32f769-disco.dts
index 52c5baf58ab9..535cfdc4681c 100644
--- a/arch/arm/boot/dts/st/stm32f769-disco.dts
+++ b/arch/arm/boot/dts/st/stm32f769-disco.dts
@@ -79,13 +79,16 @@
leds {
compatible = "gpio-leds";
- led-green {
+ led-usr2 {
gpios = <&gpioj 5 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- led-red {
+ led-usr1 {
gpios = <&gpioj 13 GPIO_ACTIVE_HIGH>;
};
+ led-usr3 {
+ gpios = <&gpioa 12 GPIO_ACTIVE_HIGH>;
+ };
};
gpio-keys {
diff --git a/arch/arm/boot/dts/st/stm32mp131.dtsi b/arch/arm/boot/dts/st/stm32mp131.dtsi
index 0019d12c3d3d..8512a6e46b33 100644
--- a/arch/arm/boot/dts/st/stm32mp131.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp131.dtsi
@@ -100,6 +100,31 @@
always-on;
};
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&dts>;
+
+ trips {
+ cpu_alert1: cpu-alert1 {
+ temperature = <85000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ cpu-crit {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ };
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -919,6 +944,16 @@
};
};
+ dts: thermal@50028000 {
+ compatible = "st,stm32-thermal";
+ reg = <0x50028000 0x100>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc DTS>;
+ clock-names = "pclk";
+ #thermal-sensor-cells = <0>;
+ status = "disabled";
+ };
+
mdma: dma-controller@58000000 {
compatible = "st,stm32h7-mdma";
reg = <0x58000000 0x1000>;
diff --git a/arch/arm/boot/dts/st/stm32mp133c-prihmb.dts b/arch/arm/boot/dts/st/stm32mp133c-prihmb.dts
new file mode 100644
index 000000000000..663b6de1b814
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp133c-prihmb.dts
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/regulator/st,stm32mp13-regulator.h>
+#include "stm32mp133.dtsi"
+#include "stm32mp13xc.dtsi"
+#include "stm32mp13-pinctrl.dtsi"
+
+/ {
+ model = "Priva E-Measuringbox board";
+ compatible = "pri,prihmb", "st,stm32mp133";
+
+ aliases {
+ ethernet0 = &ethernet1;
+ mdio-gpio0 = &mdio0;
+ mmc0 = &sdmmc1;
+ mmc1 = &sdmmc2;
+ serial0 = &uart4;
+ serial1 = &usart6;
+ serial2 = &uart7;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ counter-0 {
+ compatible = "interrupt-counter";
+ gpios = <&gpioa 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ button-reset {
+ label = "reset-button";
+ linux,code = <BTN_1>;
+ gpios = <&gpioi 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpioa 14 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+ };
+
+ led-controller-0 {
+ compatible = "pwm-leds-multicolor";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+ max-brightness = <255>;
+
+ led-red {
+ active-low;
+ color = <LED_COLOR_ID_RED>;
+ pwms = <&pwm2 2 1000000 1>;
+ };
+
+ led-green {
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ pwms = <&pwm1 1 1000000 1>;
+ };
+
+ led-blue {
+ active-low;
+ color = <LED_COLOR_ID_BLUE>;
+ pwms = <&pwm1 2 1000000 1>;
+ };
+ };
+ };
+
+ led-controller-1 {
+ compatible = "pwm-leds-multicolor";
+
+ multi-led {
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_STATUS;
+ max-brightness = <255>;
+
+ led-red {
+ active-low;
+ color = <LED_COLOR_ID_RED>;
+ pwms = <&pwm1 0 1000000 1>;
+ };
+
+ led-green {
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ pwms = <&pwm2 0 1000000 1>;
+ };
+
+ led-blue {
+ active-low;
+ color = <LED_COLOR_ID_BLUE>;
+ pwms = <&pwm2 1 1000000 1>;
+ };
+ };
+ };
+
+ /* DP83TD510E PHYs have max MDC rate of 1.75MHz. Since we can't reduce
+ * stmmac MDC clock without reducing system bus rate, we need to use
+ * gpio based MDIO bus.
+ */
+ mdio0: mdio {
+ compatible = "virtual,mdio-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&gpiog 2 GPIO_ACTIVE_HIGH
+ &gpioa 2 GPIO_ACTIVE_HIGH>;
+
+ /* TI DP83TD510E */
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-id2000.0181";
+ reg = <0>;
+ interrupts-extended = <&gpioa 4 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioa 3 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <35>;
+ };
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x10000000>;
+ };
+
+ reg_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ optee@ce000000 {
+ reg = <0xce000000 0x02000000>;
+ no-map;
+ };
+ };
+};
+
+&adc_1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc_1_pins_a>;
+ vdda-supply = <&reg_3v3>;
+ vref-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&adc1 {
+ status = "okay";
+
+ channel@0 { /* Fan current PC0*/
+ reg = <0>;
+ st,min-sample-time-ns = <10000>; /* 10µs sampling time */
+ };
+ channel@11 { /* Fan voltage */
+ reg = <11>;
+ st,min-sample-time-ns = <10000>; /* 10µs sampling time */
+ };
+ channel@15 { /* Supply voltage */
+ reg = <15>;
+ st,min-sample-time-ns = <10000>; /* 10µs sampling time */
+ };
+};
+
+&dts {
+ status = "okay";
+};
+
+&ethernet1 {
+ status = "okay";
+ pinctrl-0 = <&ethernet1_rmii_pins_a>;
+ pinctrl-1 = <&ethernet1_rmii_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+};
+
+&i2c1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_pins_a>;
+ pinctrl-1 = <&i2c1_sleep_pins_a>;
+ clock-frequency = <100000>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+
+ board-sensor@48 {
+ compatible = "ti,tmp1075";
+ reg = <0x48>;
+ vs-supply = <&reg_3v3>;
+ };
+};
+
+&{i2c1_pins_a/pins} {
+ pinmux = <STM32_PINMUX('D', 3, AF5)>, /* I2C1_SCL */
+ <STM32_PINMUX('B', 8, AF4)>; /* I2C1_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+};
+
+&{i2c1_sleep_pins_a/pins} {
+ pinmux = <STM32_PINMUX('D', 3, ANALOG)>, /* I2C1_SCL */
+ <STM32_PINMUX('B', 8, ANALOG)>; /* I2C1_SDA */
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+/* SD card without Card-detect */
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_clk_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_clk_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ broken-cd;
+ no-sdio;
+ no-1-8-v;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+/* EMMC */
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_a &sdmmc2_clk_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_a &sdmmc2_clk_pins_a>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_a>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ no-1-8-v;
+ st,neg-edge;
+ mmc-ddr-3_3v;
+ bus-width = <8>;
+ vmmc-supply = <&reg_3v3>;
+ status = "okay";
+};
+
+&timers1 {
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pwm1: pwm {
+ pinctrl-0 = <&pwm1_pins_a>;
+ pinctrl-1 = <&pwm1_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+};
+
+&timers4 {
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pwm2: pwm {
+ pinctrl-0 = <&pwm4_pins_a>;
+ pinctrl-1 = <&pwm4_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+};
+
+/* Fan PWM */
+&timers5 {
+ status = "okay";
+
+ pwm3: pwm {
+ pinctrl-0 = <&pwm5_pins_a>;
+ pinctrl-1 = <&pwm5_sleep_pins_a>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+};
+
+&timers2 {
+ status = "okay";
+
+ timer@1 {
+ status = "okay";
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&uart7 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart7_pins_a>;
+ pinctrl-1 = <&uart7_sleep_pins_a>;
+ pinctrl-2 = <&uart7_idle_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&usart6 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&usart6_pins_a>;
+ pinctrl-1 = <&usart6_sleep_pins_a>;
+ pinctrl-2 = <&usart6_idle_pins_a>;
+ linux,rs485-enabled-at-boot-time;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&pinctrl {
+ adc_1_pins_a: adc1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 0, ANALOG)>, /* ADC1 in0 */
+ <STM32_PINMUX('C', 2, ANALOG)>, /* ADC1 in15 */
+ <STM32_PINMUX('F', 13, ANALOG)>; /* ADC1 in11 */
+ };
+ };
+
+ ethernet1_rmii_pins_a: rmii-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, AF11)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, AF11)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 1, AF11)>; /* ETH1_RMII_REF_CLK */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
+ bias-disable;
+ };
+ };
+
+ ethernet1_rmii_sleep_pins_a: rmii-sleep-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 13, ANALOG)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('G', 14, ANALOG)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
+ };
+ };
+
+ pwm1_pins_a: pwm1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 9, AF1)>, /* TIM1_CH1 */
+ <STM32_PINMUX('E', 11, AF1)>, /* TIM1_CH2 */
+ <STM32_PINMUX('E', 13, AF1)>; /* TIM1_CH3 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm1_sleep_pins_a: pwm1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 9, ANALOG)>, /* TIM1_CH1 */
+ <STM32_PINMUX('E', 11, ANALOG)>, /* TIM1_CH2 */
+ <STM32_PINMUX('E', 13, ANALOG)>; /* TIM1_CH3 */
+ };
+ };
+
+ pwm4_pins_a: pwm4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, AF2)>, /* TIM4_CH1 */
+ <STM32_PINMUX('B', 7, AF2)>, /* TIM4_CH2 */
+ <STM32_PINMUX('D', 14, AF2)>; /* TIM4_CH3 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ pwm4_sleep_pins_a: pwm4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* TIM4_CH1 */
+ <STM32_PINMUX('B', 7, ANALOG)>, /* TIM4_CH2 */
+ <STM32_PINMUX('D', 14, ANALOG)>; /* TIM4_CH3 */
+ };
+ };
+ pwm5_pins_a: pwm5-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, AF2)>; /* TIM5_CH1 */
+ };
+ };
+
+ pwm5_sleep_pins_a: pwm5-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, ANALOG)>; /* TIM5_CH1 */
+ };
+ };
+
+ uart7_pins_a: uart7-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('E', 8, AF7)>; /* UART_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 10, AF7)>; /* UART7_RX */
+ bias-pull-up;
+ };
+ };
+
+ uart7_idle_pins_a: uart7-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('E', 8, ANALOG)>; /* UART7_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 10, AF7)>; /* UART7_RX */
+ bias-pull-up;
+ };
+ };
+
+ uart7_sleep_pins_a: uart7-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('E', 8, ANALOG)>, /* UART7_TX */
+ <STM32_PINMUX('E', 10, ANALOG)>; /* UART7_RX */
+ };
+ };
+
+ usart6_pins_a: usart6-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 8, AF7)>, /* USART6_TX */
+ <STM32_PINMUX('F', 10, AF7)>; /* USART6_DE */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('H', 11, AF7)>; /* USART6_RX */
+ bias-disable;
+ };
+ };
+
+ usart6_idle_pins_a: usart6-idle-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 8, ANALOG)>; /* USART6_TX */
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('F', 10, AF7)>; /* USART6_DE */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
+ pinmux = <STM32_PINMUX('H', 11, AF7)>; /* USART6_RX */
+ bias-disable;
+ };
+ };
+
+ usart6_sleep_pins_a: usart6-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 8, ANALOG)>, /* USART6_TX */
+ <STM32_PINMUX('F', 10, ANALOG)>, /* USART6_DE */
+ <STM32_PINMUX('H', 11, ANALOG)>; /* USART6_RX */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
index 853dc21449d9..9902849ed040 100644
--- a/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
+++ b/arch/arm/boot/dts/st/stm32mp135f-dhcor-dhsbc.dts
@@ -176,7 +176,7 @@
gpio-line-names = "", "", "", "",
"", "DHSBC_USB_PWR_CC1", "", "",
"", "", "", "DHSBC_nETH1_RST",
- "", "DHCOR_HW-CODING_0", "", "";
+ "", "DHCOR_HW-CODING_0", "", "DHSBC_HW-CODE_2";
};
&gpiob {
@@ -197,7 +197,7 @@
gpio-line-names = "", "", "", "",
"", "DHCOR_RAM-CODING_0", "", "",
"", "DHCOR_RAM-CODING_1", "", "",
- "", "", "", "";
+ "", "DHSBC_HW-CODE_1", "", "";
};
&gpioe {
@@ -221,6 +221,13 @@
"DHSBC_ETH1_INTB", "", "", "DHSBC_ETH2_INTB";
};
+&gpioh {
+ gpio-line-names = "", "", "", "DHSBC_HW-CODE_0",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
&gpioi {
gpio-line-names = "DHCOR_RTC_nINT", "DHCOR_HW-CODING_1",
"DHCOR_BT_REG_ON", "DHCOR_PMIC_nINT",
@@ -296,6 +303,9 @@
st33htph: tpm@0 {
compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
reg = <0>;
+ interrupt-parent = <&gpioe>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpioe 12 GPIO_ACTIVE_LOW>;
spi-max-frequency = <24000000>;
};
};
@@ -419,3 +429,19 @@
type = "micro";
};
};
+
+/* LDO2 is expansion connector 3V3 supply on STM32MP13xx DHCOR DHSBC rev.200 */
+&vdd_ldo2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+};
+
+/* LDO5 is carrier board 3V3 supply on STM32MP13xx DHCOR DHSBC rev.200 */
+&vdd_sd {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+};
diff --git a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
index 95fafc51a1c8..40605ea85ee1 100644
--- a/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp15-pinctrl.dtsi
@@ -26,6 +26,13 @@
};
/omit-if-no-ref/
+ adc1_in10_pins_a: adc1-in10-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 0, ANALOG)>;
+ };
+ };
+
+ /omit-if-no-ref/
adc12_ain_pins_a: adc12-ain-0 {
pins {
pinmux = <STM32_PINMUX('C', 3, ANALOG)>, /* ADC1 in13 */
@@ -585,6 +592,43 @@
};
/omit-if-no-ref/
+ ethernet0_rmii_pins_d: rmii-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 12, AF11)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, AF11)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, AF11)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 1, AF11)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 2, AF11)>, /* ETH1_MDIO */
+ <STM32_PINMUX('C', 1, AF11)>; /* ETH1_MDC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ ethernet0_rmii_sleep_pins_d: rmii-sleep-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 12, ANALOG)>, /* ETH1_RMII_TXD0 */
+ <STM32_PINMUX('B', 13, ANALOG)>, /* ETH1_RMII_TXD1 */
+ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */
+ <STM32_PINMUX('A', 2, ANALOG)>, /* ETH1_MDIO */
+ <STM32_PINMUX('C', 1, ANALOG)>, /* ETH1_MDC */
+ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */
+ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */
+ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */
+ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */
+ };
+ };
+
+ /omit-if-no-ref/
fmc_pins_a: fmc-0 {
pins1 {
pinmux = <STM32_PINMUX('D', 4, AF12)>, /* FMC_NOE */
@@ -726,6 +770,25 @@
};
/omit-if-no-ref/
+ i2c1_pins_c: i2c1-2 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, AF5)>, /* I2C1_SCL */
+ <STM32_PINMUX('D', 13, AF5)>; /* I2C1_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ i2c1_sleep_pins_c: i2c1-sleep-2 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
+ <STM32_PINMUX('D', 13, ANALOG)>; /* I2C1_SDA */
+ };
+ };
+
+ /omit-if-no-ref/
i2c2_pins_a: i2c2-0 {
pins {
pinmux = <STM32_PINMUX('H', 4, AF4)>, /* I2C2_SCL */
@@ -820,6 +883,27 @@
};
/omit-if-no-ref/
+ i2s1_pins_a: i2s1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 6, AF5)>, /* I2S2_SDI */
+ <STM32_PINMUX('A', 4, AF5)>, /* I2S2_WS */
+ <STM32_PINMUX('A', 5, AF5)>; /* I2S2_CK */
+ slew-rate = <0>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ i2s1_sleep_pins_a: i2s1-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 6, ANALOG)>, /* I2S2_SDI */
+ <STM32_PINMUX('A', 4, ANALOG)>, /* I2S2_WS */
+ <STM32_PINMUX('A', 5, ANALOG)>; /* I2S2_CK */
+ };
+ };
+
+ /omit-if-no-ref/
i2s2_pins_a: i2s2-0 {
pins {
pinmux = <STM32_PINMUX('I', 3, AF5)>, /* I2S2_SDO */
@@ -1419,6 +1503,23 @@
};
/omit-if-no-ref/
+ pwm1_pins_d: pwm1-3 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, AF2)>; /* TIM5_CH1 */
+ bias-pull-down;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ pwm1_sleep_pins_d: pwm1-sleep-3 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 0, ANALOG)>;
+ };
+ };
+
+ /omit-if-no-ref/
pwm2_pins_a: pwm2-0 {
pins {
pinmux = <STM32_PINMUX('A', 3, AF1)>; /* TIM2_CH4 */
@@ -2161,6 +2262,66 @@
};
/omit-if-no-ref/
+ sdmmc2_b4_pins_c: sdmmc2-b4-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ sdmmc2_b4_od_pins_c: sdmmc2-b4-od-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, AF10)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, AF9)>; /* SDMMC2_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 3, AF9)>; /* SDMMC2_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins3 {
+ pinmux = <STM32_PINMUX('G', 6, AF10)>; /* SDMMC2_CMD */
+ slew-rate = <1>;
+ drive-open-drain;
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ sdmmc2_b4_sleep_pins_c: sdmmc2-b4-sleep-2 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 7, ANALOG)>, /* SDMMC2_D1 */
+ <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC2_D2 */
+ <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC2_D3 */
+ <STM32_PINMUX('E', 3, ANALOG)>, /* SDMMC2_CK */
+ <STM32_PINMUX('G', 6, ANALOG)>; /* SDMMC2_CMD */
+ };
+ };
+
+ /omit-if-no-ref/
sdmmc2_d47_pins_a: sdmmc2-d47-0 {
pins {
pinmux = <STM32_PINMUX('A', 8, AF9)>, /* SDMMC2_D4 */
@@ -2390,6 +2551,66 @@
};
/omit-if-no-ref/
+ sdmmc3_b4_pins_c: sdmmc3-b4-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('D', 0, AF10)>; /* SDMMC3_CMD */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ sdmmc3_b4_od_pins_c: sdmmc3-b4-od-2 {
+ pins1 {
+ pinmux = <STM32_PINMUX('D', 1, AF10)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, AF10)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, AF10)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, AF10)>; /* SDMMC3_D3 */
+ slew-rate = <1>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 15, AF10)>; /* SDMMC3_CK */
+ slew-rate = <2>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+
+ pins3 {
+ pinmux = <STM32_PINMUX('D', 0, AF10)>; /* SDMMC3_CMD */
+ slew-rate = <1>;
+ drive-open-drain;
+ bias-pull-up;
+ };
+ };
+
+ /omit-if-no-ref/
+ sdmmc3_b4_sleep_pins_c: sdmmc3-b4-sleep-2 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 1, ANALOG)>, /* SDMMC3_D0 */
+ <STM32_PINMUX('D', 4, ANALOG)>, /* SDMMC3_D1 */
+ <STM32_PINMUX('D', 5, ANALOG)>, /* SDMMC3_D2 */
+ <STM32_PINMUX('D', 7, ANALOG)>, /* SDMMC3_D3 */
+ <STM32_PINMUX('G', 15, ANALOG)>, /* SDMMC3_CK */
+ <STM32_PINMUX('D', 0, ANALOG)>; /* SDMMC3_CMD */
+ };
+ };
+
+ /omit-if-no-ref/
spdifrx_pins_a: spdifrx-0 {
pins {
pinmux = <STM32_PINMUX('G', 12, AF8)>; /* SPDIF_IN1 */
@@ -2601,6 +2822,41 @@
};
/omit-if-no-ref/
+ uart4_pins_e: uart4-4 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 8, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart4_idle_pins_e: uart4-idle-4 {
+ pins1 {
+ pinmux = <STM32_PINMUX('G', 11, ANALOG)>; /* UART4_TX */
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 8, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
+ uart4_sleep_pins_e: uart4-sleep-4 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 11, ANALOG)>, /* UART4_TX */
+ <STM32_PINMUX('B', 8, ANALOG)>; /* UART4_RX */
+ };
+ };
+
+ /omit-if-no-ref/
uart5_pins_a: uart5-0 {
pins1 {
pinmux = <STM32_PINMUX('B', 13, AF14)>; /* UART5_TX */
@@ -2678,6 +2934,23 @@
};
/omit-if-no-ref/
+ uart7_pins_d: uart7-3 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 7, AF7)>, /* UART7_TX */
+ <STM32_PINMUX('F', 8, AF7)>; /* UART7_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+
+ pins2 {
+ pinmux = <STM32_PINMUX('E', 7, AF7)>, /* UART7_RX */
+ <STM32_PINMUX('F', 9, AF7)>; /* UART7_CTS */
+ bias-disable;
+ };
+ };
+
+ /omit-if-no-ref/
uart8_pins_a: uart8-0 {
pins1 {
pinmux = <STM32_PINMUX('E', 1, AF8)>; /* UART8_TX */
@@ -3119,6 +3392,25 @@
};
/omit-if-no-ref/
+ i2c6_pins_b: i2c6-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 11, AF2)>, /* I2C6_SCL */
+ <STM32_PINMUX('A', 12, AF2)>; /* I2C6_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ /omit-if-no-ref/
+ i2c6_sleep_pins_b: i2c6-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* I2C6_SCL */
+ <STM32_PINMUX('A', 12, ANALOG)>; /* I2C6_SDA */
+ };
+ };
+
+ /omit-if-no-ref/
spi1_pins_a: spi1-0 {
pins1 {
pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi
index b9a87fbe971d..0daa8ffe2ff5 100644
--- a/arch/arm/boot/dts/st/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/st/stm32mp151.dtsi
@@ -1781,7 +1781,6 @@
st,syscon = <&syscfg 0x4>;
snps,mixed-burst;
snps,pbl = <2>;
- snps,en-tx-lpi-clockgating;
snps,axi-config = <&stmmac_axi_config_0>;
snps,tso;
access-controllers = <&etzpc 94>;
diff --git a/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts b/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts
new file mode 100644
index 000000000000..39a3211c6133
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp151c-plyaqm.dts
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/dts-v1/;
+
+#include <arm/st/stm32mp151.dtsi>
+#include <arm/st/stm32mp15xc.dtsi>
+#include <arm/st/stm32mp15-pinctrl.dtsi>
+#include <arm/st/stm32mp15xxad-pinctrl.dtsi>
+#include <arm/st/stm32mp15-scmi.dtsi>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ model = "Plymovent AQM board";
+ compatible = "ply,plyaqm", "st,stm32mp151";
+
+ aliases {
+ ethernet0 = &ethernet0;
+ serial0 = &uart4;
+ serial1 = &uart7;
+ };
+
+ codec {
+ compatible = "invensense,ics43432";
+
+ port {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s1_endpoint>;
+ dai-format = "i2s";
+ };
+ };
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ gpios = <&gpioa 3 GPIO_ACTIVE_HIGH>; /* WHITE_EN */
+ color = <LED_COLOR_ID_WHITE>;
+ default-state = "on";
+ };
+ };
+
+ v3v3: fixed-regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ v5v_sw: fixed-regulator-v5sw {
+ compatible = "regulator-fixed";
+ regulator-name = "5v-switched";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpioe 10 GPIO_ACTIVE_HIGH>; /* 5V_SWITCHED_EN */
+ startup-delay-us = <100000>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ optee@cfd00000 {
+ reg = <0xcfd00000 0x300000>;
+ no-map;
+ };
+ };
+
+ sound {
+ compatible = "audio-graph-card";
+ label = "STM32MP15";
+ dais = <&i2s1_port>;
+ };
+
+ wifi_pwrseq: wifi-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&gpioe 12 GPIO_ACTIVE_LOW>; /* WLAN_REG_ON */
+ };
+};
+
+&adc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&adc1_in10_pins_a>;
+ vdda-supply = <&v3v3>;
+ vref-supply = <&v3v3>;
+ status = "okay";
+
+ adc@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ channel@10 { /* NTC */
+ reg = <10>;
+ st,min-sample-time-ns = <10000>; /* 10µs sampling time */
+ };
+ };
+};
+
+&cpu0 {
+ clocks = <&scmi_clk CK_SCMI_MPU>;
+};
+
+&cryp1 {
+ clocks = <&scmi_clk CK_SCMI_CRYP1>;
+ resets = <&scmi_reset RST_SCMI_CRYP1>;
+ status = "okay";
+};
+
+&ethernet0 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ethernet0_rmii_pins_d>;
+ pinctrl-1 = <&ethernet0_rmii_sleep_pins_d>;
+ phy-mode = "rmii";
+ max-speed = <100>;
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ /* KSZ8081RNA PHY */
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ interrupts-extended = <&gpiob 0 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpiob 1 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ };
+ };
+};
+
+&gpioa {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "HWID_PL_N", "HWID_CP", "";
+};
+
+&gpiob {
+ gpio-line-names =
+ "", "", "", "", "", "", "LED_LATCH", "",
+ "", "RELAY1_EN", "", "", "", "", "", "";
+};
+
+&gpioc {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "HWID_Q7", "", "";
+};
+
+&gpioe {
+ gpio-line-names =
+ "", "", "", "", "RELAY2_EN", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpiog {
+ gpio-line-names =
+ "", "", "", "", "", "", "", "SW1",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpioz {
+ clocks = <&scmi_clk CK_SCMI_GPIOZ>;
+};
+
+&hash1 {
+ clocks = <&scmi_clk CK_SCMI_HASH1>;
+ resets = <&scmi_reset RST_SCMI_HASH1>;
+};
+
+&i2c1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_pins_c>;
+ pinctrl-1 = <&i2c1_sleep_pins_c>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+};
+
+&i2c4 {
+ clocks = <&scmi_clk CK_SCMI_I2C4>;
+ resets = <&scmi_reset RST_SCMI_I2C4>;
+};
+
+&i2c6 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c6_pins_b>;
+ pinctrl-1 = <&i2c6_sleep_pins_b>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ clocks = <&scmi_clk CK_SCMI_I2C6>;
+ resets = <&scmi_reset RST_SCMI_I2C6>;
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pressure-sensor@47 {
+ compatible = "bosch,bmp580";
+ reg = <0x47>;
+ vdda-supply = <&v5v_sw>;
+ vddd-supply = <&v5v_sw>;
+ };
+
+ co2-sensor@62 {
+ compatible = "sensirion,scd41";
+ reg = <0x62>;
+ vdd-supply = <&v5v_sw>;
+ };
+
+ pm-sensor@69 {
+ compatible = "sensirion,sps30";
+ reg = <0x69>;
+ };
+};
+
+&i2s1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2s1_pins_a>;
+ pinctrl-1 = <&i2s1_sleep_pins_a>;
+ clocks = <&rcc SPI1>, <&rcc SPI1_K>, <&rcc PLL3_Q>, <&rcc PLL3_R>;
+ clock-names = "pclk", "i2sclk", "x8k", "x11k";
+ #clock-cells = <0>; /* Set I2S2 as master clock provider */
+ status = "okay";
+
+ i2s1_port: port {
+ i2s1_endpoint: endpoint {
+ format = "i2s";
+ mclk-fs = <256>;
+ remote-endpoint = <&codec_endpoint>;
+ };
+ };
+};
+
+&iwdg2 {
+ clocks = <&rcc IWDG2>, <&scmi_clk CK_SCMI_LSI>;
+ status = "okay";
+};
+
+&m4_rproc {
+ /delete-property/ st,syscfg-holdboot;
+ resets = <&scmi_reset RST_SCMI_MCU>,
+ <&scmi_reset RST_SCMI_MCU_HOLD_BOOT>;
+ reset-names = "mcu_rst", "hold_boot";
+};
+
+&mdma1 {
+ resets = <&scmi_reset RST_SCMI_MDMA>;
+};
+
+&rcc {
+ compatible = "st,stm32mp1-rcc-secure", "syscon";
+ clock-names = "hse", "hsi", "csi", "lse", "lsi";
+ clocks = <&scmi_clk CK_SCMI_HSE>,
+ <&scmi_clk CK_SCMI_HSI>,
+ <&scmi_clk CK_SCMI_CSI>,
+ <&scmi_clk CK_SCMI_LSE>,
+ <&scmi_clk CK_SCMI_LSI>;
+};
+
+&rng1 {
+ clocks = <&scmi_clk CK_SCMI_RNG1>;
+ resets = <&scmi_reset RST_SCMI_RNG1>;
+ status = "okay";
+};
+
+&rtc {
+ clocks = <&scmi_clk CK_SCMI_RTCAPB>, <&scmi_clk CK_SCMI_RTC>;
+};
+
+/* SD card without Card-detect */
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ broken-cd;
+ no-sdio;
+ no-1-8-v;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+/* EMMC */
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_c &sdmmc2_d47_pins_b>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_c &sdmmc2_d47_pins_b>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_c &sdmmc2_d47_sleep_pins_b>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ no-1-8-v;
+ st,neg-edge;
+ bus-width = <8>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+/* Wifi */
+&sdmmc3 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc3_b4_pins_c>;
+ pinctrl-1 = <&sdmmc3_b4_od_pins_c>;
+ pinctrl-2 = <&sdmmc3_b4_sleep_pins_c>;
+ non-removable;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&timers5 {
+ status = "okay";
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pwm {
+ pinctrl-0 = <&pwm1_pins_d>;
+ pinctrl-1 = <&pwm1_sleep_pins_d>;
+ pinctrl-names = "default", "sleep";
+ status = "okay";
+ };
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_e>;
+ pinctrl-1 = <&uart4_idle_pins_e>;
+ pinctrl-2 = <&uart4_sleep_pins_e>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
+
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart7_pins_d>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpioe 11 GPIO_ACTIVE_HIGH>; /* BT_REG_ON */
+ max-speed = <4000000>;
+ vbat-supply = <&v3v3>;
+ vddio-supply = <&v3v3>;
+ interrupt-parent = <&gpiog>;
+ interrupts = <12 IRQ_TYPE_EDGE_RISING>; /* BT_HOST_WAKE */
+ interrupt-names = "host-wakeup";
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen1.dts b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen1.dts
new file mode 100644
index 000000000000..3a0e84262424
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen1.dts
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (C) 2024 Leonard Göhrs, Pengutronix
+ */
+
+/dts-v1/;
+
+#include "stm32mp153c-lxa-fairytux2.dtsi"
+
+/ {
+ model = "Linux Automation GmbH FairyTux 2 Gen 1";
+ compatible = "lxa,stm32mp153c-fairytux2-gen1", "oct,stm32mp153x-osd32", "st,stm32mp153";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-left {
+ label = "USER_BTN1";
+ linux,code = <KEY_ESC>;
+ gpios = <&gpioi 11 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+
+ button-right {
+ label = "USER_BTN2";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpioe 9 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ };
+};
+
+&gpiof {
+ gpio-line-names = "GPIO1", "GPIO2", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioh {
+ gpio-line-names = "", "", "", "", "LCD_RESET", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "GPIO3", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioi {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "", "ETH_", "", /* 5 */
+ "", "USER_BTN1"; /* 10 */
+};
+
+&i2c1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_pins_b>;
+ pinctrl-1 = <&i2c1_sleep_pins_b>;
+ status = "okay";
+
+ io_board_gpio: gpio@20 {
+ compatible = "ti,tca6408";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ vcc-supply = <&v3v3_hdmi>;
+ gpio-line-names = "LED1_GA_YK", "LED2_GA_YK", "LED1_GK_YA", "LED2_GK_YA",
+ "RS485_EN", "RS485_120R", "", "CAN_120R";
+ };
+};
+
+&led_controller_io {
+ /*
+ * led-2 and led-3 are internally connected antiparallel to one
+ * another inside the ethernet jack like this:
+ * GPIO1 ---+---|led-2|>--+--- GPIO3
+ * +--<|led-3|---+
+ * E.g. only one of the LEDs can be illuminated at a time while
+ * the other output must be driven low.
+ * This should likely be implemented using a multi color LED
+ * driver for antiparallel LEDs.
+ */
+ led-2 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_ACTIVITY;
+ gpios = <&io_board_gpio 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-3 {
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_ACTIVITY;
+ gpios = <&io_board_gpio 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&usart3 {
+ /*
+ * On Gen 1 FairyTux 2 only RTS can be used and not CTS as well,
+ * Because pins PD11 (CTS) and PI11 (USER_BTN1) share the same
+ * interrupt and only one of them can be used at a time.
+ */
+ rts-gpios = <&gpiod 12 GPIO_ACTIVE_LOW>;
+};
+
+&usbotg_hs {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen2.dts b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen2.dts
new file mode 100644
index 000000000000..66e6da912508
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2-gen2.dts
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (C) 2024 Leonard Göhrs, Pengutronix
+ */
+
+/dts-v1/;
+
+#include "stm32mp153c-lxa-fairytux2.dtsi"
+
+/ {
+ model = "Linux Automation GmbH FairyTux 2 Gen 2";
+ compatible = "lxa,stm32mp153c-fairytux2-gen2", "oct,stm32mp153x-osd32", "st,stm32mp153";
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-left {
+ label = "USER_BTN1";
+ linux,code = <KEY_ESC>;
+ gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+
+ button-right {
+ label = "USER_BTN2";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpioe 9 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ };
+ };
+};
+
+&gpiof {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioh {
+ gpio-line-names = "", "", "", "", "LCD_RESET", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "GPIO1", "GPIO_INT", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioi {
+ gpio-line-names = "GPIO2", "", "", "", "", /* 0 */
+ "", "", "", "ETH_", "", /* 5 */
+ "", "USER_BTN1"; /* 10 */
+};
+
+&i2c1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c1_pins_b>;
+ pinctrl-1 = <&i2c1_sleep_pins_b>;
+ status = "okay";
+
+ io_board_gpio: gpio@20 {
+ compatible = "ti,tca6408";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpioh>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&board_tca6408_pins>;
+ #interrupt-cells = <2>;
+ vcc-supply = <&v3v3_hdmi>;
+ gpio-line-names = "LED1_GA_YK", "LED2_GA_YK", "LED1_GK_YA", "USB_CC_ALERT",
+ "RS485_EN", "RS485_120R", "USB_CC_RESET", "CAN_120R";
+ };
+
+ usb_c: typec@28 {
+ compatible = "st,stusb1600";
+ reg = <0x28>;
+ interrupt-parent = <&io_board_gpio>;
+ interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ vdd-supply = <&reg_5v>;
+ vsys-supply = <&v3v3_hdmi>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ typec-power-opmode = "default";
+
+ port {
+ con_usbotg_hs_ep: endpoint {
+ remote-endpoint = <&usbotg_hs_ep>;
+ };
+ };
+ };
+ };
+
+ temperature-sensor@48 {
+ compatible = "national,lm75a";
+ reg = <0x48>;
+ /*
+ * The sensor itself is powered by a voltage divider from the
+ * always-on 5V supply.
+ * The required pull-up resistors however are on v3v3_hdmi.
+ */
+ vs-supply = <&v3v3_hdmi>;
+ };
+
+ io_board_eeprom: eeprom@56 {
+ compatible = "atmel,24c04";
+ reg = <0x56>;
+ vcc-supply = <&v3v3_hdmi>;
+ };
+};
+
+&rtc {
+ status = "okay";
+};
+
+&led_controller_io {
+ led-2 {
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_ACTIVITY;
+ gpios = <&io_board_gpio 1 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&usart3 {
+ rts-gpios = <&gpiod 12 GPIO_ACTIVE_LOW>;
+ cts-gpios = <&gpiod 11 GPIO_ACTIVE_LOW>;
+};
+
+&usbotg_hs {
+ usb-role-switch;
+
+ port {
+ usbotg_hs_ep: endpoint {
+ remote-endpoint = <&con_usbotg_hs_ep>;
+ };
+ };
+};
+
+&pinctrl {
+ board_tca6408_pins: stusb1600-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 13, GPIO)>;
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2.dtsi b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2.dtsi
new file mode 100644
index 000000000000..9eeb9d6b5eb0
--- /dev/null
+++ b/arch/arm/boot/dts/st/stm32mp153c-lxa-fairytux2.dtsi
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (C) 2020 STMicroelectronics - All Rights Reserved
+ * Copyright (C) 2021 Rouven Czerwinski, Pengutronix
+ * Copyright (C) 2023, 2024 Leonard Göhrs, Pengutronix
+ */
+
+#include "stm32mp153.dtsi"
+#include "stm32mp15xc.dtsi"
+#include "stm32mp15xx-osd32.dtsi"
+#include "stm32mp15xxac-pinctrl.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ aliases {
+ can0 = &m_can1;
+ ethernet0 = &ethernet0;
+ i2c0 = &i2c1;
+ i2c1 = &i2c4;
+ mmc1 = &sdmmc2;
+ serial0 = &uart4;
+ serial1 = &usart3;
+ spi0 = &spi4;
+ };
+
+ chosen {
+ stdout-path = &uart4;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&v3v3>;
+
+ brightness-levels = <0 31 63 95 127 159 191 223 255>;
+ default-brightness-level = <7>;
+ pwms = <&led_pwm 3 1000000 0>;
+ };
+
+ led-controller-cpu {
+ compatible = "gpio-leds";
+
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpioa 13 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ led_controller_io: led-controller-io {
+ compatible = "gpio-leds";
+
+ /*
+ * led-0 and led-1 are internally connected antiparallel to one
+ * another inside the ethernet jack like this:
+ * GPIO0 ---+---|led-0|>--+--- GPIO2
+ * +--<|led-1|---+
+ * E.g. only one of the LEDs can be illuminated at a time while
+ * the other output must be driven low.
+ * This should likely be implemented using a multi color LED
+ * driver for antiparallel LEDs.
+ */
+ led-0 {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_LAN;
+ gpios = <&io_board_gpio 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-1 {
+ color = <LED_COLOR_ID_ORANGE>;
+ function = LED_FUNCTION_LAN;
+ gpios = <&io_board_gpio 2 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ reg_5v: regulator-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_1v2: regulator-1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ vin-supply = <&reg_5v>;
+ };
+};
+
+baseboard_eeprom: &sip_eeprom {
+};
+
+&crc1 {
+ status = "okay";
+};
+
+&cryp1 {
+ status = "okay";
+};
+
+&dts {
+ status = "okay";
+};
+
+&ethernet0 {
+ assigned-clocks = <&rcc ETHCK_K>, <&rcc PLL4_P>;
+ assigned-clock-parents = <&rcc PLL4_P>;
+ assigned-clock-rates = <125000000>; /* Clock PLL4 to 750Mhz in ATF */
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&ethernet0_rgmii_pins_b>;
+ pinctrl-1 = <&ethernet0_rgmii_sleep_pins_b>;
+
+ st,eth-clk-sel;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@3 { /* KSZ9031RN */
+ reg = <3>;
+ reset-gpios = <&gpioe 11 GPIO_ACTIVE_LOW>; /* ETH_RST# */
+ interrupt-parent = <&gpioa>;
+ interrupts = <6 IRQ_TYPE_EDGE_FALLING>; /* ETH_MDINT# */
+ reset-assert-us = <10000>;
+ reset-deassert-us = <300>;
+ micrel,force-master;
+ };
+ };
+};
+
+&gpioa {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "ETH_INT", "", "", "", /* 5 */
+ "", "", "", "BOOTROM_LED", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpiob {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioc {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", ""; /* 10 */
+};
+
+&gpiod {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "LCD_TE", "", "", /* 5 */
+ "LCD_DC", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioe {
+ gpio-line-names = "LCD_CS", "", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpiof {
+ gpio-line-names = "GPIO1", "GPIO2", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpiog {
+ gpio-line-names = "", "", "", "", "", /* 0 */
+ "", "", "", "", "", /* 5 */
+ "", "", "", "", "", /* 10 */
+ ""; /* 15 */
+};
+
+&gpioz {
+ gpio-line-names = "HWID0", "HWID1", "HWID2", "HWID3", "", /* 0 */
+ "", "HWID4", "HWID5"; /* 5 */
+};
+
+&hash1 {
+ status = "okay";
+};
+
+&iwdg2 {
+ timeout-sec = <8>;
+ status = "okay";
+};
+
+&m_can1 {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&m_can1_pins_b>;
+ pinctrl-1 = <&m_can1_sleep_pins_b>;
+ status = "okay";
+ termination-gpios = <&io_board_gpio 7 GPIO_ACTIVE_HIGH>;
+ termination-ohms = <120>;
+};
+
+&pmic {
+ regulators {
+ buck1-supply = <&reg_5v>; /* VIN */
+ buck2-supply = <&reg_5v>; /* VIN */
+ buck3-supply = <&reg_5v>; /* VIN */
+ buck4-supply = <&reg_5v>; /* VIN */
+ ldo2-supply = <&reg_5v>; /* PMIC_LDO25IN */
+ ldo4-supply = <&reg_5v>; /* VIN */
+ ldo5-supply = <&reg_5v>; /* PMIC_LDO25IN */
+ vref_ddr-supply = <&reg_5v>; /* VIN */
+ boost-supply = <&reg_5v>; /* PMIC_BSTIN */
+ pwr_sw2-supply = <&bst_out>; /* PMIC_SWIN */
+ };
+};
+
+&pwr_regulators {
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a &sdmmc2_d47_pins_b>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a &sdmmc2_d47_sleep_pins_b>;
+ vmmc-supply = <&v3v3>;
+
+ bus-width = <8>;
+ mmc-ddr-3_3v;
+ no-1-8-v;
+ non-removable;
+ no-sd;
+ no-sdio;
+ st,neg-edge;
+
+ status = "okay";
+};
+
+&spi4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi4_pins_a>;
+ cs-gpios = <&gpioe 0 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ lcd: display@0 {
+ compatible = "shineworld,lh133k", "panel-mipi-dbi-spi";
+ reg = <0>;
+ power-supply = <&v3v3>;
+ io-supply = <&v3v3>;
+ backlight = <&backlight>;
+ dc-gpios = <&gpiod 10 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpioh 4 GPIO_ACTIVE_HIGH>;
+ spi-3wire;
+ spi-max-frequency = <32000000>;
+
+ width-mm = <23>;
+ height-mm = <23>;
+ rotation = <180>;
+
+ panel-timing {
+ hactive = <240>;
+ vactive = <240>;
+ hback-porch = <0>;
+ vback-porch = <0>;
+
+ clock-frequency = <0>;
+ hfront-porch = <0>;
+ hsync-len = <0>;
+ vfront-porch = <0>;
+ vsync-len = <0>;
+ };
+ };
+};
+
+&timers2 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+
+ timer@1 {
+ status = "okay";
+ };
+};
+
+&timers3 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+
+ timer@2 {
+ status = "okay";
+ };
+};
+
+&timers4 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+
+ timer@3 {
+ status = "okay";
+ };
+};
+
+&timers8 {
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+
+ led_pwm: pwm {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pwm8_pins_b>;
+ pinctrl-1 = <&pwm8_sleep_pins_b>;
+ status = "okay";
+ };
+};
+
+&uart4 {
+ label = "debug";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_a>;
+
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+};
+
+&usart3 {
+ label = "external";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usart3_pins_a>;
+
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ status = "okay";
+};
+
+&usbh_ehci {
+ phys = <&usbphyc_port0>;
+ phy-names = "usb";
+
+ status = "okay";
+};
+
+&usbotg_hs {
+ phys = <&usbphyc_port1 0>;
+ phy-names = "usb2-phy";
+
+ vusb_d-supply = <&vdd_usb>;
+ vusb_a-supply = <&reg18>;
+
+ status = "okay";
+};
+
+&usbphyc {
+ status = "okay";
+};
+
+&usbphyc_port0 {
+ phy-supply = <&vdd_usb>;
+};
+
+&usbphyc_port1 {
+ phy-supply = <&vdd_usb>;
+};
+
+&v3v3_hdmi {
+ regulator-enable-ramp-delay = <1000>;
+};
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
index 5f9c0160a9c4..324f7bb988d1 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
@@ -67,7 +67,7 @@
touchscreen@38 {
compatible = "focaltech,ft6236";
reg = <0x38>;
- interrupts = <2 2>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
interrupt-parent = <&gpiof>;
touchscreen-size-x = <480>;
touchscreen-size-y = <800>;
diff --git a/arch/arm/boot/dts/ti/davinci/da850-lego-ev3.dts b/arch/arm/boot/dts/ti/davinci/da850-lego-ev3.dts
index 4df10379ff22..173401c58d53 100644
--- a/arch/arm/boot/dts/ti/davinci/da850-lego-ev3.dts
+++ b/arch/arm/boot/dts/ti/davinci/da850-lego-ev3.dts
@@ -412,14 +412,14 @@
status = "okay";
/* Don't pull down battery voltage adc io channel */
- batt_volt_en {
+ batt-volt-en-hog {
gpio-hog;
gpios = <6 GPIO_ACTIVE_HIGH>;
output-high;
};
/* Don't impede Bluetooth clock signal */
- bt_clock_en {
+ bt-clock-en-hog {
gpio-hog;
gpios = <5 GPIO_ACTIVE_HIGH>;
input;
@@ -433,19 +433,19 @@
* anything, but they are present in the source code from LEGO.
*/
- bt_pic_en {
+ bt-pic-en-hog {
gpio-hog;
gpios = <51 GPIO_ACTIVE_HIGH>;
output-low;
};
- bt_pic_rst {
+ bt-pic-rst-hog {
gpio-hog;
gpios = <78 GPIO_ACTIVE_HIGH>;
output-high;
};
- bt_pic_cts {
+ bt-pic-cts-hog {
gpio-hog;
gpios = <87 GPIO_ACTIVE_HIGH>;
input;
diff --git a/arch/arm/boot/dts/ti/omap/omap3-evm-processor-common.dtsi b/arch/arm/boot/dts/ti/omap/omap3-evm-processor-common.dtsi
index e27837093e43..70e33cdd519a 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-evm-processor-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap3-evm-processor-common.dtsi
@@ -205,7 +205,7 @@
/* T2_GPIO_2 low to route GPIO_61 to on-board devices */
&twl_gpio {
- en_on_board_gpio_61 {
+ en-on-board-gpio-61-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>;
output-low;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
index 3fcef3080eae..150dd84c9e0f 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
@@ -1414,7 +1414,7 @@
uart3: serial@0 {
compatible = "ti,omap4-uart";
reg = <0x0 0x100>;
- interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
};
};
@@ -1765,7 +1765,7 @@
uart1: serial@0 {
compatible = "ti,omap4-uart";
reg = <0x0 0x100>;
- interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&wakeupgen GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
};
};
@@ -1794,7 +1794,7 @@
uart2: serial@0 {
compatible = "ti,omap4-uart";
reg = <0x0 0x100>;
- interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&wakeupgen GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
};
};
@@ -1823,7 +1823,7 @@
uart4: serial@0 {
compatible = "ti,omap4-uart";
reg = <0x0 0x100>;
- interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&wakeupgen GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <48000000>;
};
};
diff --git a/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts b/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts
index 8fd076e5d1b0..4b8bfd0188ad 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts
+++ b/arch/arm/boot/dts/ti/omap/omap4-panda-a4.dts
@@ -7,6 +7,11 @@
#include "omap443x.dtsi"
#include "omap4-panda-common.dtsi"
+/ {
+ model = "TI OMAP4 PandaBoard (A4)";
+ compatible = "ti,omap4-panda-a4", "ti,omap4-panda", "ti,omap4430", "ti,omap4";
+};
+
/* Pandaboard Rev A4+ have external pullups on SCL & SDA */
&dss_hdmi_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/xilinx/zynq-7000.dtsi b/arch/arm/boot/dts/xilinx/zynq-7000.dtsi
index a7db3f3009f2..153b8d93cbee 100644
--- a/arch/arm/boot/dts/xilinx/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/xilinx/zynq-7000.dtsi
@@ -8,6 +8,13 @@
#size-cells = <1>;
compatible = "xlnx,zynq-7000";
+ options {
+ u-boot {
+ compatible = "u-boot,config";
+ bootscr-address = /bits/ 64 <0x3000000>;
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -34,7 +41,7 @@
};
};
- fpga_full: fpga-full {
+ fpga_full: fpga-region {
compatible = "fpga-region";
fpga-mgr = <&devcfg>;
#address-cells = <1>;
@@ -93,6 +100,7 @@
};
amba: axi {
+ bootph-all;
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -190,6 +198,17 @@
reg = <0xf8006000 0x1000>;
};
+ ocm: sram@fffc0000 {
+ compatible = "mmio-sram";
+ reg = <0xfffc0000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xfffc0000 0x10000>;
+ ocm-sram@0 {
+ reg = <0x0 0x10000>;
+ };
+ };
+
uart0: serial@e0000000 {
compatible = "xlnx,xuartps", "cdns,uart-r1p8";
status = "disabled";
@@ -277,13 +296,18 @@
0x2 0x0 0xe4000000 0x2000000>; /* SRAM/NOR CS1 region */
#address-cells = <2>;
#size-cells = <1>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 18 4>;
nfc0: nand-controller@0,0 {
compatible = "arm,pl353-nand-r2p1";
reg = <0 0 0x1000000>;
status = "disabled";
- #address-cells = <1>;
- #size-cells = <0>;
+ };
+ nor0: flash@1,0 {
+ status = "disabled";
+ compatible = "cfi-flash";
+ reg = <1 0 0x2000000>;
};
};
@@ -308,12 +332,14 @@
};
slcr: slcr@f8000000 {
+ bootph-all;
#address-cells = <1>;
#size-cells = <1>;
compatible = "xlnx,zynq-slcr", "syscon", "simple-mfd";
reg = <0xF8000000 0x1000>;
ranges;
clkc: clkc@100 {
+ bootph-all;
#clock-cells = <1>;
compatible = "xlnx,ps7-clkc";
fclk-enable = <0>;
@@ -398,6 +424,7 @@
};
scutimer: timer@f8f00600 {
+ bootph-all;
interrupt-parent = <&intc>;
interrupts = <1 13 0x301>;
compatible = "arm,cortex-a9-twd-timer";
diff --git a/arch/arm/boot/dts/xilinx/zynq-cc108.dts b/arch/arm/boot/dts/xilinx/zynq-cc108.dts
index 8b9ab9bba23b..f5525c048426 100644
--- a/arch/arm/boot/dts/xilinx/zynq-cc108.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-cc108.dts
@@ -18,6 +18,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart0;
+ spi0 = &qspi;
};
chosen {
@@ -48,7 +49,44 @@
ethernet_phy: ethernet-phy@1 {
reg = <1>;
- device_type = "ethernet-phy";
+ };
+};
+
+&qspi {
+ status = "okay";
+ num-cs = <1>;
+ flash@0 { /* 16 MB */
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot-bs";
+ reg = <0x0 0x400000>; /* 4MB */
+ };
+ partition@400000 {
+ label = "qspi-linux";
+ reg = <0x400000 0x400000>; /* 4MB */
+ };
+ partition@800000 {
+ label = "qspi-rootfs";
+ reg = <0x800000 0x400000>; /* 4MB */
+ };
+ partition@c00000 {
+ label = "qspi-devicetree";
+ reg = <0xc00000 0x100000>; /* 1MB */
+ };
+ partition@d00000 {
+ label = "qspi-scratch";
+ reg = <0xd00000 0x200000>; /* 2MB */
+ };
+ partition@f00000 {
+ label = "qspi-uboot-env";
+ reg = <0xf00000 0x100000>; /* 1MB */
+ };
};
};
@@ -59,6 +97,7 @@
};
&uart0 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-ebaz4205.dts b/arch/arm/boot/dts/xilinx/zynq-ebaz4205.dts
index 53fa6dbfd8fd..14f644156a6f 100644
--- a/arch/arm/boot/dts/xilinx/zynq-ebaz4205.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-ebaz4205.dts
@@ -51,6 +51,8 @@
&nfc0 {
status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
nand@0 {
reg = <0>;
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-microzed.dts b/arch/arm/boot/dts/xilinx/zynq-microzed.dts
index 6ed84fb15902..68b867e8369e 100644
--- a/arch/arm/boot/dts/xilinx/zynq-microzed.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-microzed.dts
@@ -11,8 +11,9 @@
compatible = "avnet,zynq-microzed", "xlnx,zynq-microzed", "xlnx,zynq-7000";
aliases {
- ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
+ mmc0 = &sdhci0;
};
memory@0 {
@@ -35,6 +36,11 @@
ps-clk-frequency = <33333333>;
};
+&qspi {
+ bootph-all;
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
@@ -46,10 +52,12 @@
};
&sdhci0 {
+ bootph-all;
status = "okay";
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-parallella.dts b/arch/arm/boot/dts/xilinx/zynq-parallella.dts
index 54592aeb92b9..366af4fcf8d9 100644
--- a/arch/arm/boot/dts/xilinx/zynq-parallella.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-parallella.dts
@@ -46,7 +46,6 @@
compatible = "ethernet-phy-id0141.0e90",
"ethernet-phy-ieee802.3-c22";
reg = <0>;
- device_type = "ethernet-phy";
marvell,reg-init = <0x3 0x10 0xff00 0x1e>,
<0x3 0x11 0xfff0 0xa>;
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc702.dts b/arch/arm/boot/dts/xilinx/zynq-zc702.dts
index 6efdbca9d3ef..6955637c5b1a 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc702.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc702.dts
@@ -15,7 +15,10 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
+ nvmem0 = &eeprom;
+ rtc0 = &rtc;
};
memory@0 {
@@ -63,19 +66,6 @@
};
};
-&amba {
- ocm: sram@fffc0000 {
- compatible = "mmio-sram";
- reg = <0xfffc0000 0x10000>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0xfffc0000 0x10000>;
- ocm-sram@0 {
- reg = <0x0 0x10000>;
- };
- };
-};
-
&can0 {
status = "okay";
pinctrl-names = "default";
@@ -95,7 +85,6 @@
ethernet_phy: ethernet-phy@7 {
reg = <7>;
- device_type = "ethernet-phy";
};
};
@@ -152,7 +141,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- eeprom@54 {
+ eeprom: eeprom@54 {
compatible = "atmel,24c08";
reg = <0x54>;
};
@@ -174,7 +163,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- rtc@51 {
+ rtc: rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
};
@@ -210,7 +199,7 @@
conf {
groups = "can0_9_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-rx {
@@ -233,7 +222,7 @@
conf {
groups = "ethernet0_0_grp";
slew-rate = <0>;
- io-standard = <4>;
+ power-source = <4>;
};
conf-rx {
@@ -256,7 +245,7 @@
conf-mdio {
groups = "mdio0_0_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
bias-disable;
};
};
@@ -274,7 +263,7 @@
"gpio0_10_grp", "gpio0_11_grp", "gpio0_12_grp",
"gpio0_13_grp", "gpio0_14_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-pull-up {
@@ -298,11 +287,11 @@
groups = "i2c0_10_grp";
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
};
- pinctrl_i2c0_gpio: i2c0-gpio {
+ pinctrl_i2c0_gpio: i2c0-gpio-grp {
mux {
groups = "gpio0_50_grp", "gpio0_51_grp";
function = "gpio0";
@@ -311,7 +300,7 @@
conf {
groups = "gpio0_50_grp", "gpio0_51_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
};
@@ -324,7 +313,7 @@
conf {
groups = "sdio0_2_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
bias-disable;
};
@@ -338,7 +327,7 @@
bias-high-impedance;
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
mux-wp {
@@ -351,7 +340,7 @@
bias-high-impedance;
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
};
@@ -364,7 +353,7 @@
conf {
groups = "uart1_10_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-rx {
@@ -387,7 +376,7 @@
conf {
groups = "usb0_0_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-rx {
@@ -403,13 +392,53 @@
};
};
+&qspi {
+ bootph-all;
+ status = "okay";
+ num-cs = <1>;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
+ };
+};
+
&sdhci0 {
+ bootph-all;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ bootph-all;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc706.dts b/arch/arm/boot/dts/xilinx/zynq-zc706.dts
index 77943c16d33f..3b803c698473 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc706.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc706.dts
@@ -14,7 +14,10 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
+ nvmem0 = &eeprom;
+ rtc0 = &rtc;
};
memory@0 {
@@ -46,7 +49,6 @@
ethernet_phy: ethernet-phy@7 {
reg = <7>;
- device_type = "ethernet-phy";
};
};
@@ -100,7 +102,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <2>;
- eeprom@54 {
+ eeprom: eeprom@54 {
compatible = "atmel,24c08";
reg = <0x54>;
};
@@ -122,7 +124,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- rtc@51 {
+ rtc: rtc@51 {
compatible = "nxp,pcf8563";
reg = <0x51>;
};
@@ -150,7 +152,7 @@
conf {
groups = "ethernet0_0_grp";
slew-rate = <0>;
- io-standard = <4>;
+ power-source = <4>;
};
conf-rx {
@@ -173,7 +175,7 @@
conf-mdio {
groups = "mdio0_0_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
bias-disable;
};
};
@@ -187,7 +189,7 @@
conf {
groups = "gpio0_7_grp", "gpio0_46_grp", "gpio0_47_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-pull-up {
@@ -211,7 +213,7 @@
groups = "i2c0_10_grp";
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
};
@@ -224,7 +226,7 @@
conf {
groups = "sdio0_2_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
bias-disable;
};
@@ -238,7 +240,7 @@
bias-high-impedance;
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
mux-wp {
@@ -251,7 +253,7 @@
bias-high-impedance;
bias-pull-up;
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
};
@@ -264,7 +266,7 @@
conf {
groups = "uart1_10_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-rx {
@@ -287,7 +289,7 @@
conf {
groups = "usb0_0_grp";
slew-rate = <0>;
- io-standard = <1>;
+ power-source = <1>;
};
conf-rx {
@@ -303,13 +305,54 @@
};
};
+&qspi {
+ bootph-all;
+ status = "okay";
+ num-cs = <2>;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>, <1>;
+ parallel-memories = /bits/ 64 <0x1000000 0x1000000>; /* 16MB */
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
+ };
+};
+
&sdhci0 {
+ bootph-all;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhci0_default>;
};
&uart1 {
+ bootph-all;
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1_default>;
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc770-xm010.dts b/arch/arm/boot/dts/xilinx/zynq-zc770-xm010.dts
index 0dd352289a45..5fe799c3c7cf 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc770-xm010.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc770-xm010.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem0;
i2c0 = &i2c0;
serial0 = &uart1;
+ spi0 = &qspi;
spi1 = &spi1;
};
@@ -45,7 +46,6 @@
ethernet_phy: ethernet-phy@7 {
reg = <7>;
- device_type = "ethernet-phy";
};
};
@@ -57,7 +57,43 @@
compatible = "atmel,24c02";
reg = <0x52>;
};
+};
+&qspi {
+ status = "okay";
+ num-cs = <1>;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
+ };
};
&sdhci0 {
@@ -85,6 +121,7 @@
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc770-xm011.dts b/arch/arm/boot/dts/xilinx/zynq-zc770-xm011.dts
index 56732e8f6ca1..f9a086fe66d3 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc770-xm011.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc770-xm011.dts
@@ -47,6 +47,36 @@
};
};
+&nfc0 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ nand@0 {
+ reg = <0>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "nand-fsbl-uboot";
+ reg = <0x0 0x1000000>;
+ };
+ partition@1000000 {
+ label = "nand-linux";
+ reg = <0x1000000 0x2000000>;
+ };
+ partition@3000000 {
+ label = "nand-rootfs";
+ reg = <0x3000000 0x200000>;
+ };
+ };
+ };
+};
+
+&smcc {
+ status = "okay";
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -54,6 +84,7 @@
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc770-xm012.dts b/arch/arm/boot/dts/xilinx/zynq-zc770-xm012.dts
index d2359b789eb8..24520e7d3965 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc770-xm012.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc770-xm012.dts
@@ -53,6 +53,40 @@
};
};
+&nor0 {
+ status = "okay";
+ bank-width = <1>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "nor-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "nor-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "nor-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "nor-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "nor-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
+};
+
+&smcc {
+ status = "okay";
+};
+
&spi1 {
status = "okay";
num-cs = <4>;
@@ -60,5 +94,6 @@
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zc770-xm013.dts b/arch/arm/boot/dts/xilinx/zynq-zc770-xm013.dts
index 38d96adc870c..103e87ea7253 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zc770-xm013.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zc770-xm013.dts
@@ -15,6 +15,7 @@
ethernet0 = &gem1;
i2c0 = &i2c1;
serial0 = &uart0;
+ spi0 = &qspi;
spi1 = &spi0;
};
@@ -40,7 +41,6 @@
ethernet_phy: ethernet-phy@7 {
reg = <7>;
- device_type = "ethernet-phy";
};
};
@@ -58,6 +58,44 @@
};
};
+&qspi {
+ status = "okay";
+ num-cs = <2>;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>, <1>;
+ parallel-memories = /bits/ 64 <0x1000000 0x1000000>; /* 16MB */
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
+ };
+};
+
&spi0 {
status = "okay";
num-cs = <4>;
@@ -73,5 +111,6 @@
};
&uart0 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zed.dts b/arch/arm/boot/dts/xilinx/zynq-zed.dts
index 6a5a93aa6552..52ba569b2b9f 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zed.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zed.dts
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -43,15 +44,55 @@
ethernet_phy: ethernet-phy@0 {
reg = <0>;
- device_type = "ethernet-phy";
+ };
+};
+
+&qspi {
+ bootph-all;
+ status = "okay";
+ num-cs = <1>;
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <4>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "qspi-fsbl-uboot";
+ reg = <0x0 0x100000>;
+ };
+ partition@100000 {
+ label = "qspi-linux";
+ reg = <0x100000 0x500000>;
+ };
+ partition@600000 {
+ label = "qspi-device-tree";
+ reg = <0x600000 0x20000>;
+ };
+ partition@620000 {
+ label = "qspi-rootfs";
+ reg = <0x620000 0x5e0000>;
+ };
+ partition@c00000 {
+ label = "qspi-bitstream";
+ reg = <0xc00000 0x400000>;
+ };
+ };
};
};
&sdhci0 {
+ bootph-all;
status = "okay";
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
index 33b02e05ce82..defef9c8da13 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
+++ b/arch/arm/boot/dts/xilinx/zynq-zturn-common.dtsi
@@ -63,6 +63,11 @@
ps-clk-frequency = <33333333>;
};
+&qspi {
+ bootph-all;
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-mode = "rgmii-id";
@@ -73,14 +78,17 @@
};
&sdhci0 {
+ bootph-all;
status = "okay";
};
&uart0 {
+ bootph-all;
status = "okay";
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zybo-z7.dts b/arch/arm/boot/dts/xilinx/zynq-zybo-z7.dts
index 7b87e10d3953..56b917eec783 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zybo-z7.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zybo-z7.dts
@@ -10,6 +10,8 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
+ mmc0 = &sdhci0;
};
memory@0 {
@@ -49,15 +51,21 @@
ethernet_phy: ethernet-phy@0 {
reg = <0>;
- device_type = "ethernet-phy";
};
};
+&qspi {
+ bootph-all;
+ status = "okay";
+};
+
&sdhci0 {
+ bootph-all;
status = "okay";
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/boot/dts/xilinx/zynq-zybo.dts b/arch/arm/boot/dts/xilinx/zynq-zybo.dts
index 755f6f109d5a..fbc7d1b12e94 100644
--- a/arch/arm/boot/dts/xilinx/zynq-zybo.dts
+++ b/arch/arm/boot/dts/xilinx/zynq-zybo.dts
@@ -13,6 +13,7 @@
aliases {
ethernet0 = &gem0;
serial0 = &uart1;
+ spi0 = &qspi;
mmc0 = &sdhci0;
};
@@ -44,15 +45,21 @@
ethernet_phy: ethernet-phy@0 {
reg = <0>;
- device_type = "ethernet-phy";
};
};
+&qspi {
+ bootph-all;
+ status = "okay";
+};
+
&sdhci0 {
+ bootph-all;
status = "okay";
};
&uart1 {
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index b5f0bd8dd536..27dc3bf6b124 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -7,7 +7,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 2849d17f5856..b382a2e175fb 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -129,7 +129,6 @@ CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y
-CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 7dece9d98828..2248afaf35b5 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -113,7 +113,6 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_LIBCRC32C=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SLAB=y
CONFIG_DEBUG_SPINLOCK=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 87841e5cafe2..297c6a7b978a 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -483,8 +483,6 @@ CONFIG_CRYPTO_DEV_SAHARA=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
-CONFIG_CRC7=m
-CONFIG_LIBCRC32C=m
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index f55c231e0870..2aa2ac8c6507 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -148,7 +148,6 @@ CONFIG_EXT2_FS=y
CONFIG_JFFS2_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 34d079e03b3c..fa06d98e43fc 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -118,7 +118,6 @@ CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_JFFS2_FS=y
CONFIG_KEYS=y
-CONFIG_CRC32_BIT=y
CONFIG_DMA_API_DEBUG=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 3f4ddcf49ec7..db81862bdb93 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -290,7 +290,6 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y
-CONFIG_LIBCRC32C=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 2467afd32146..a518d4a2581e 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -188,7 +188,6 @@ CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=y
CONFIG_CRC_CCITT=y
-CONFIG_LIBCRC32C=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 43bc1255a5db..d8a6e43c401e 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -161,7 +161,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_CRYPTO_DEV_MXS_DCP=y
CONFIG_CRC_ITU_T=m
-CONFIG_CRC7=m
CONFIG_FONTS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 025b595dd837..661e5d6894bd 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -221,7 +221,6 @@ CONFIG_CRYPTO_PCBC=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_LIBCRC32C=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 6de45d7f6078..113d6dfe5243 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -13,7 +13,6 @@ CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
@@ -710,8 +709,6 @@ CONFIG_CRYPTO_DEV_OMAP_DES=m
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
-CONFIG_LIBCRC32C=y
CONFIG_DMA_CMA=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index fd28f3176c6b..8c30ed14e52c 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -14,6 +14,7 @@ CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
CONFIG_HIGHMEM=y
CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 294d16ddeb18..ac5b7a5aaff6 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -235,7 +235,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRC_CCITT=y
-CONFIG_LIBCRC32C=m
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 77048b5e1ec4..423bb41c4225 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -75,7 +75,6 @@ CONFIG_EXT3_FS=y
# CONFIG_INOTIFY_USER is not set
CONFIG_NLS=y
CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
CONFIG_PRINTK_TIME=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index d2a094ad360c..3a9bda2bf422 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -6,7 +6,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_DEBUG=y
diff --git a/arch/arm/configs/wpcm450_defconfig b/arch/arm/configs/wpcm450_defconfig
index 45483deab034..5e4397f7f828 100644
--- a/arch/arm/configs/wpcm450_defconfig
+++ b/arch/arm/configs/wpcm450_defconfig
@@ -193,7 +193,6 @@ CONFIG_PKCS7_MESSAGE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_ITU_T=m
-CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1815748f5d2a..bae5edf348ef 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -381,7 +381,7 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
void iounmap(volatile void __iomem *io_addr);
#define iounmap iounmap
-void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
#define arch_memremap_wb arch_memremap_wb
/*
diff --git a/arch/arm/include/asm/vdso.h b/arch/arm/include/asm/vdso.h
index 5b85889f82ee..88364a6727ff 100644
--- a/arch/arm/include/asm/vdso.h
+++ b/arch/arm/include/asm/vdso.h
@@ -4,6 +4,8 @@
#ifdef __KERNEL__
+#define __VDSO_PAGES 4
+
#ifndef __ASSEMBLY__
struct mm_struct;
diff --git a/arch/arm/include/asm/vdso/gettimeofday.h b/arch/arm/include/asm/vdso/gettimeofday.h
index 592d3d015ca7..1e9f81639c88 100644
--- a/arch/arm/include/asm/vdso/gettimeofday.h
+++ b/arch/arm/include/asm/vdso/gettimeofday.h
@@ -112,7 +112,7 @@ static inline bool arm_vdso_hres_capable(void)
#define __arch_vdso_hres_capable arm_vdso_hres_capable
static __always_inline u64 __arch_get_hw_counter(int clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
#ifdef CONFIG_ARM_ARCH_TIMER
u64 cycle_now;
@@ -135,11 +135,6 @@ static __always_inline u64 __arch_get_hw_counter(int clock_mode,
#endif
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm/include/asm/vdso/vsyscall.h b/arch/arm/include/asm/vdso/vsyscall.h
index 705414710dcd..4e7226ad02ec 100644
--- a/arch/arm/include/asm/vdso/vsyscall.h
+++ b/arch/arm/include/asm/vdso/vsyscall.h
@@ -7,22 +7,14 @@
#include <vdso/datapage.h>
#include <asm/cacheflush.h>
-extern struct vdso_data *vdso_data;
extern bool cntvct_ok;
static __always_inline
-struct vdso_data *__arm_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __arm_get_k_vdso_data
-
-static __always_inline
-void __arm_sync_vdso_data(struct vdso_data *vdata)
+void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
{
flush_dcache_page(virt_to_page(vdata));
}
-#define __arch_sync_vdso_data __arm_sync_vdso_data
+#define __arch_sync_vdso_time_data __arch_sync_vdso_time_data
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4853875740d0..123f4a8ef446 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -153,10 +153,6 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
-#ifdef CONFIG_VDSO
- DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
-#endif
- BLANK();
#ifdef CONFIG_ARM_MPU
DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns));
DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6ea645939573..afbd2ebe5c39 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -258,13 +258,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
barrier();
}
-#ifdef CONFIG_PREEMPT
-#define S_PREEMPT " PREEMPT"
-#elif defined(CONFIG_PREEMPT_RT)
-#define S_PREEMPT " PREEMPT_RT"
-#else
-#define S_PREEMPT ""
-#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
@@ -282,8 +275,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
static int die_counter;
int ret;
- pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
- str, err, ++die_counter);
+ pr_emerg("Internal error: %s: %x [#%d]" S_SMP S_ISA "\n",
+ str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 29dd2f3c62fe..325448ffbba0 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -7,6 +7,7 @@
*/
#include <linux/cache.h>
+#include <linux/vdso_datastore.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -33,15 +34,6 @@ extern char vdso_start[], vdso_end[];
/* Total number of pages needed for the data and text portions of the VDSO. */
unsigned int vdso_total_pages __ro_after_init;
-static union vdso_data_store vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = vdso_data_store.data;
-
-static struct page *vdso_data_page __ro_after_init;
-static const struct vm_special_mapping vdso_data_mapping = {
- .name = "[vvar]",
- .pages = &vdso_data_page,
-};
-
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -192,9 +184,6 @@ static int __init vdso_init(void)
if (vdso_text_pagelist == NULL)
return -ENOMEM;
- /* Grab the VDSO data page. */
- vdso_data_page = virt_to_page(vdso_data);
-
/* Grab the VDSO text pages. */
for (i = 0; i < text_pages; i++) {
struct page *page;
@@ -205,7 +194,7 @@ static int __init vdso_init(void)
vdso_text_mapping.pages = vdso_text_pagelist;
- vdso_total_pages = 1; /* for the data/vvar page */
+ vdso_total_pages = VDSO_NR_PAGES; /* for the data/vvar pages */
vdso_total_pages += text_pages;
cntvct_ok = cntvct_functional();
@@ -216,16 +205,7 @@ static int __init vdso_init(void)
}
arch_initcall(vdso_init);
-static int install_vvar(struct mm_struct *mm, unsigned long addr)
-{
- struct vm_area_struct *vma;
-
- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ | VM_MAYREAD,
- &vdso_data_mapping);
-
- return PTR_ERR_OR_ZERO(vma);
-}
+static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
/* assumes mmap_lock is write-locked */
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
@@ -238,12 +218,12 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
if (vdso_text_pagelist == NULL)
return;
- if (install_vvar(mm, addr))
+ if (IS_ERR(vdso_install_vvar_mapping(mm, addr)))
return;
- /* Account for vvar page. */
- addr += PAGE_SIZE;
- len = (vdso_total_pages - 1) << PAGE_SHIFT;
+ /* Account for vvar pages. */
+ addr += VDSO_NR_PAGES * PAGE_SIZE;
+ len = (vdso_total_pages - VDSO_NR_PAGES) << PAGE_SHIFT;
vma = _install_special_mapping(mm, addr, len,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
diff --git a/arch/arm/lib/crc-t10dif-glue.c b/arch/arm/lib/crc-t10dif-glue.c
index d24dee62670e..6efad3d78284 100644
--- a/arch/arm/lib/crc-t10dif-glue.c
+++ b/arch/arm/lib/crc-t10dif-glue.c
@@ -44,9 +44,7 @@ u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
crc_t10dif_pmull8(crc, data, length, buf);
kernel_neon_end();
- crc = 0;
- data = buf;
- length = sizeof(buf);
+ return crc_t10dif_generic(0, buf, sizeof(buf));
}
}
return crc_t10dif_generic(crc, data, length);
@@ -69,12 +67,6 @@ static void __exit crc_t10dif_arm_exit(void)
}
module_exit(crc_t10dif_arm_exit);
-bool crc_t10dif_is_optimized(void)
-{
- return static_key_enabled(&have_neon);
-}
-EXPORT_SYMBOL(crc_t10dif_is_optimized);
-
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/lib/crc32-glue.c b/arch/arm/lib/crc32-glue.c
index 2c30ba3d80e6..4340351dbde8 100644
--- a/arch/arm/lib/crc32-glue.c
+++ b/arch/arm/lib/crc32-glue.c
@@ -59,14 +59,14 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_arch);
-static u32 crc32c_le_scalar(u32 crc, const u8 *p, size_t len)
+static u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
{
if (static_branch_likely(&have_crc32))
return crc32c_armv8_le(crc, p, len);
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
}
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
@@ -74,7 +74,7 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
/* align p to 16-byte boundary */
if (n) {
- crc = crc32c_le_scalar(crc, p, n);
+ crc = crc32c_scalar(crc, p, n);
p += n;
len -= n;
}
@@ -85,9 +85,9 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
p += n;
len -= n;
}
- return crc32c_le_scalar(crc, p, len);
+ return crc32c_scalar(crc, p, len);
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 05a1547642b6..3aa20038ad93 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -222,13 +222,16 @@ static const struct of_device_id sam9x60_ws_ids[] = {
{ /* sentinel */ }
};
-static const struct of_device_id sama7g5_ws_ids[] = {
+static const struct of_device_id sama7_ws_ids[] = {
+ { .compatible = "microchip,sama7d65-rtc", .data = &ws_info[1] },
{ .compatible = "microchip,sama7g5-rtc", .data = &ws_info[1] },
{ .compatible = "microchip,sama7g5-ohci", .data = &ws_info[2] },
{ .compatible = "usb-ohci", .data = &ws_info[2] },
{ .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
{ .compatible = "usb-ehci", .data = &ws_info[2] },
+ { .compatible = "microchip,sama7d65-sdhci", .data = &ws_info[3] },
{ .compatible = "microchip,sama7g5-sdhci", .data = &ws_info[3] },
+ { .compatible = "microchip,sama7d65-rtt", .data = &ws_info[4] },
{ .compatible = "microchip,sama7g5-rtt", .data = &ws_info[4] },
{ /* sentinel */ }
};
@@ -545,11 +548,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val)
{
- unsigned char modified_gray_code[] = {
- 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
- 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
- 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
- 0x10, 0x11,
+ /* SYNOPSYS workaround to fix a bug in the calibration logic */
+ unsigned char modified_fix_code[] = {
+ 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
+ 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
+ 0x1e, 0x1f,
};
unsigned int tmp, index;
int i;
@@ -560,25 +564,25 @@ static int at91_suspend_finish(unsigned long val)
* restore the ZQ0SR0 with the value saved here. But the
* calibration is buggy and restoring some values from ZQ0SR0
* is forbidden and risky thus we need to provide processed
- * values for these (modified gray code values).
+ * values for these.
*/
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
- soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
+ soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
- soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/*
* The 1st 8 words of memory might get corrupted in the process
@@ -643,6 +647,11 @@ static void at91_pm_suspend(suspend_state_t state)
at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
&at91_pm_suspend_in_sram,
at91_pm_suspend_in_sram_sz);
+
+ if (IS_ENABLED(CONFIG_SOC_SAMA7D65)) {
+ /* SHDWC.SR */
+ readl(soc_pm.data.shdwc + 0x08);
+ }
} else {
at91_suspend_finish(0);
}
@@ -1061,7 +1070,8 @@ static int __init at91_pm_backup_init(void)
int ret = -ENODEV, located = 0;
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
- !IS_ENABLED(CONFIG_SOC_SAMA7G5))
+ !IS_ENABLED(CONFIG_SOC_SAMA7G5) &&
+ !IS_ENABLED(CONFIG_SOC_SAMA7D65))
return -EPERM;
if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
@@ -1329,6 +1339,7 @@ struct pmc_info {
unsigned long uhp_udp_mask;
unsigned long mckr;
unsigned long version;
+ unsigned long mcks;
};
static const struct pmc_info pmc_infos[] __initconst = {
@@ -1360,8 +1371,14 @@ static const struct pmc_info pmc_infos[] __initconst = {
{
.mckr = 0x28,
.version = AT91_PMC_V2,
+ .mcks = 4,
+ },
+ {
+ .uhp_udp_mask = AT91SAM926x_PMC_UHP,
+ .mckr = 0x28,
+ .version = AT91_PMC_V2,
+ .mcks = 9,
},
-
};
static const struct of_device_id atmel_pmc_ids[] __initconst = {
@@ -1378,6 +1395,7 @@ static const struct of_device_id atmel_pmc_ids[] __initconst = {
{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
{ .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
+ { .compatible = "microchip,sama7d65-pmc", .data = &pmc_infos[6] },
{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
{ /* sentinel */ },
};
@@ -1448,6 +1466,7 @@ static void __init at91_pm_init(void (*pm_idle)(void))
soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
soc_pm.data.pmc_mckr_offset = pmc->mckr;
soc_pm.data.pmc_version = pmc->version;
+ soc_pm.data.pmc_mcks = pmc->mcks;
if (pm_idle)
arm_pm_idle = pm_idle;
@@ -1671,7 +1690,7 @@ void __init sama7_pm_init(void)
at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
at91_pm_init(NULL);
- soc_pm.ws_ids = sama7g5_ws_ids;
+ soc_pm.ws_ids = sama7_ws_ids;
soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h
index 53bdc9000e44..50c3a425d140 100644
--- a/arch/arm/mach-at91/pm.h
+++ b/arch/arm/mach-at91/pm.h
@@ -39,6 +39,7 @@ struct at91_pm_data {
unsigned int suspend_mode;
unsigned int pmc_mckr_offset;
unsigned int pmc_version;
+ unsigned int pmc_mcks;
};
#endif
diff --git a/arch/arm/mach-at91/pm_data-offsets.c b/arch/arm/mach-at91/pm_data-offsets.c
index 40bd4e8fe40a..0ca5da66dc26 100644
--- a/arch/arm/mach-at91/pm_data-offsets.c
+++ b/arch/arm/mach-at91/pm_data-offsets.c
@@ -18,6 +18,8 @@ int main(void)
pmc_mckr_offset));
DEFINE(PM_DATA_PMC_VERSION, offsetof(struct at91_pm_data,
pmc_version));
+ DEFINE(PM_DATA_PMC_MCKS, offsetof(struct at91_pm_data,
+ pmc_mcks));
return 0;
}
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index e5869cca5e79..e23b86834096 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -814,18 +814,20 @@ sr_dis_exit:
.endm
/**
- * at91_mckx_ps_enable: save MCK1..4 settings and switch it to main clock
+ * at91_mckx_ps_enable: save MCK settings and switch it to main clock
*
- * Side effects: overwrites tmp1, tmp2
+ * Side effects: overwrites tmp1, tmp2, tmp3
*/
.macro at91_mckx_ps_enable
#ifdef CONFIG_SOC_SAMA7
ldr pmc, .pmc_base
+ ldr tmp3, .mcks
- /* There are 4 MCKs we need to handle: MCK1..4 */
+ /* Start at MCK1 and go until MCKs */
mov tmp1, #1
-e_loop: cmp tmp1, #5
- beq e_done
+e_loop:
+ cmp tmp1, tmp3
+ bgt e_done
/* Write MCK ID to retrieve the settings. */
str tmp1, [pmc, #AT91_PMC_MCR_V2]
@@ -850,7 +852,37 @@ e_save_mck3:
b e_ps
e_save_mck4:
+ cmp tmp1, #4
+ bne e_save_mck5
str tmp2, .saved_mck4
+ b e_ps
+
+e_save_mck5:
+ cmp tmp1, #5
+ bne e_save_mck6
+ str tmp2, .saved_mck5
+ b e_ps
+
+e_save_mck6:
+ cmp tmp1, #6
+ bne e_save_mck7
+ str tmp2, .saved_mck6
+ b e_ps
+
+e_save_mck7:
+ cmp tmp1, #7
+ bne e_save_mck8
+ str tmp2, .saved_mck7
+ b e_ps
+
+e_save_mck8:
+ cmp tmp1, #8
+ bne e_save_mck9
+ str tmp2, .saved_mck8
+ b e_ps
+
+e_save_mck9:
+ str tmp2, .saved_mck9
e_ps:
/* Use CSS=MAINCK and DIV=1. */
@@ -870,18 +902,20 @@ e_done:
.endm
/**
- * at91_mckx_ps_restore: restore MCK1..4 settings
+ * at91_mckx_ps_restore: restore MCKx settings
*
* Side effects: overwrites tmp1, tmp2
*/
.macro at91_mckx_ps_restore
#ifdef CONFIG_SOC_SAMA7
ldr pmc, .pmc_base
+ ldr tmp2, .mcks
- /* There are 4 MCKs we need to handle: MCK1..4 */
+ /* Start from MCK1 and go up to MCKs */
mov tmp1, #1
-r_loop: cmp tmp1, #5
- beq r_done
+r_loop:
+ cmp tmp1, tmp2
+ bgt r_done
r_save_mck1:
cmp tmp1, #1
@@ -902,7 +936,37 @@ r_save_mck3:
b r_ps
r_save_mck4:
+ cmp tmp1, #4
+ bne r_save_mck5
ldr tmp2, .saved_mck4
+ b r_ps
+
+r_save_mck5:
+ cmp tmp1, #5
+ bne r_save_mck6
+ ldr tmp2, .saved_mck5
+ b r_ps
+
+r_save_mck6:
+ cmp tmp1, #6
+ bne r_save_mck7
+ ldr tmp2, .saved_mck6
+ b r_ps
+
+r_save_mck7:
+ cmp tmp1, #7
+ bne r_save_mck8
+ ldr tmp2, .saved_mck7
+ b r_ps
+
+r_save_mck8:
+ cmp tmp1, #8
+ bne r_save_mck9
+ ldr tmp2, .saved_mck8
+ b r_ps
+
+r_save_mck9:
+ ldr tmp2, .saved_mck9
r_ps:
/* Write MCK ID to retrieve the settings. */
@@ -921,6 +985,7 @@ r_ps:
wait_mckrdy tmp1
add tmp1, tmp1, #1
+ ldr tmp2, .mcks
b r_loop
r_done:
#endif
@@ -1045,6 +1110,10 @@ ENTRY(at91_pm_suspend_in_sram)
str tmp1, .memtype
ldr tmp1, [r0, #PM_DATA_MODE]
str tmp1, .pm_mode
+#ifdef CONFIG_SOC_SAMA7
+ ldr tmp1, [r0, #PM_DATA_PMC_MCKS]
+ str tmp1, .mcks
+#endif
/*
* ldrne below are here to preload their address in the TLB as access
@@ -1132,6 +1201,10 @@ ENDPROC(at91_pm_suspend_in_sram)
.word 0
.pmc_version:
.word 0
+#ifdef CONFIG_SOC_SAMA7
+.mcks:
+ .word 0
+#endif
.saved_mckr:
.word 0
.saved_pllar:
@@ -1155,6 +1228,16 @@ ENDPROC(at91_pm_suspend_in_sram)
.word 0
.saved_mck4:
.word 0
+.saved_mck5:
+ .word 0
+.saved_mck6:
+ .word 0
+.saved_mck7:
+ .word 0
+.saved_mck8:
+ .word 0
+.saved_mck9:
+ .word 0
#endif
ENTRY(at91_pm_suspend_in_sram_sz)
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 2a8a9fe46586..8f66de0405d9 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_DAVINCI
bool "TI DaVinci"
depends on ARCH_MULTI_V5
depends on CPU_LITTLE_ENDIAN
+ select ARCH_DAVINCI_DA850
select CPU_ARM926T
select DAVINCI_TIMER
select ZONE_DMA
@@ -27,6 +28,7 @@ config ARCH_DAVINCI_DA830
config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
+ select ARCH_DAVINCI_DA8XX
select DAVINCI_CP_INTC
config ARCH_DAVINCI_DA8XX
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 2e497745b624..a044ea5cb4f1 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -11,7 +11,6 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <clocksource/timer-davinci.h>
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 13f3068e9845..45c1a2a7b35f 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -98,7 +98,7 @@ extern const u32 imx53_suspend_sz;
void imx6_suspend(void __iomem *ocram_vbase);
#else
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
-static const u32 imx53_suspend_sz;
+static __maybe_unused const u32 imx53_suspend_sz;
static inline void imx6_suspend(void __iomem *ocram_vbase) {}
#endif
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index e898f7c2733e..94e4f4a2f73f 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -509,9 +509,8 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
pmu_mmdc->devtype_data = device_get_match_data(&pdev->dev);
- hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
+ hrtimer_setup(&pmu_mmdc->hrtimer, mmdc_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index a643b71e30a3..08ec6bd84ada 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -8,6 +8,7 @@ menuconfig ARCH_OMAP1
select ARCH_OMAP
select CLKSRC_MMIO
select FORCE_PCI if PCCARD
+ select GENERIC_IRQ_CHIP
select GPIOLIB
help
Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx)
diff --git a/arch/arm/mach-s3c/devs.c b/arch/arm/mach-s3c/devs.c
index 8c26d592d2a3..bab2abd8a34a 100644
--- a/arch/arm/mach-s3c/devs.c
+++ b/arch/arm/mach-s3c/devs.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
-#include <linux/fb.h>
#include <linux/gfp.h>
#include <linux/mmc/host.h>
#include <linux/ioport.h>
diff --git a/arch/arm/mach-s3c/setup-fb-24bpp-s3c64xx.c b/arch/arm/mach-s3c/setup-fb-24bpp-s3c64xx.c
index cfa34b55ca21..c3269cd6a848 100644
--- a/arch/arm/mach-s3c/setup-fb-24bpp-s3c64xx.c
+++ b/arch/arm/mach-s3c/setup-fb-24bpp-s3c64xx.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/fb.h>
#include <linux/gpio.h>
#include "fb.h"
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index a956b489b6ea..2bc7e73a8582 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep)
.long shmobile_smp_arg - 1b
.bss
+ .align 2
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
.space NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index c38367a10c79..3cd34a42e39b 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -8,19 +8,15 @@
*/
#include <linux/clocksource.h>
-#include <linux/device.h>
-#include <linux/dma-map-ops.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_clk.h>
-#include <linux/of_fdt.h>
#include <linux/psci.h>
#include <asm/mach/arch.h>
#include <asm/secure_cntvoff.h>
#include "common.h"
-#include "rcar-gen2.h"
static const struct of_device_id cpg_matches[] __initconst = {
{ .compatible = "renesas,r8a7742-cpg-mssr", .data = "extal" },
@@ -122,76 +118,6 @@ skip_update:
timer_probe();
}
-struct memory_reserve_config {
- u64 reserved;
- u64 base, size;
-};
-
-static int __init rcar_gen2_scan_mem(unsigned long node, const char *uname,
- int depth, void *data)
-{
- const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- const __be32 *reg, *endp;
- int l;
- struct memory_reserve_config *mrc = data;
- u64 lpae_start = 1ULL << 32;
-
- /* We are scanning "memory" nodes only */
- if (type == NULL || strcmp(type, "memory"))
- return 0;
-
- reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
- if (reg == NULL)
- reg = of_get_flat_dt_prop(node, "reg", &l);
- if (reg == NULL)
- return 0;
-
- endp = reg + (l / sizeof(__be32));
- while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
- u64 base, size;
-
- base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- if (base >= lpae_start)
- continue;
-
- if ((base + size) >= lpae_start)
- size = lpae_start - base;
-
- if (size < mrc->reserved)
- continue;
-
- if (base < mrc->base)
- continue;
-
- /* keep the area at top near the 32-bit legacy limit */
- mrc->base = base + size - mrc->reserved;
- mrc->size = mrc->reserved;
- }
-
- return 0;
-}
-
-static void __init rcar_gen2_reserve(void)
-{
- struct memory_reserve_config mrc;
-
- /* reserve 256 MiB at the top of the physical legacy 32-bit space */
- memset(&mrc, 0, sizeof(mrc));
- mrc.reserved = SZ_256M;
-
- of_scan_flat_dt(rcar_gen2_scan_mem, &mrc);
-#ifdef CONFIG_DMA_CMA
- if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size)) {
- static struct cma *rcar_gen2_dma_contiguous;
-
- dma_contiguous_reserve_area(mrc.size, mrc.base, 0,
- &rcar_gen2_dma_contiguous, true);
- }
-#endif
-}
-
static const char * const rcar_gen2_boards_compat_dt[] __initconst = {
"renesas,r8a7790",
"renesas,r8a7791",
@@ -204,7 +130,6 @@ static const char * const rcar_gen2_boards_compat_dt[] __initconst = {
DT_MACHINE_START(RCAR_GEN2_DT, "Generic R-Car Gen2 (Flattened Device Tree)")
.init_late = shmobile_init_late,
.init_time = rcar_gen2_timer_init,
- .reserve = rcar_gen2_reserve,
.dt_compat = rcar_gen2_boards_compat_dt,
MACHINE_END
@@ -220,6 +145,5 @@ static const char * const rz_g1_boards_compat_dt[] __initconst = {
DT_MACHINE_START(RZ_G1_DT, "Generic RZ/G1 (Flattened Device Tree)")
.init_late = shmobile_init_late,
.init_time = rcar_gen2_timer_init,
- .reserve = rcar_gen2_reserve,
.dt_compat = rz_g1_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 2b6f50dd5478..5c1023a6d78c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -928,6 +928,7 @@ config VDSO
select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_32
select GENERIC_GETTIMEOFDAY
+ select GENERIC_VDSO_DATA_STORE
help
Place in the process address space an ELF shared object
providing fast implementations of gettimeofday and
diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c
index 993fefdc167a..93ef0502b7ff 100644
--- a/arch/arm/mm/cache-l2x0-pmu.c
+++ b/arch/arm/mm/cache-l2x0-pmu.c
@@ -539,8 +539,7 @@ static __init int l2x0_pmu_init(void)
* at higher frequencies.
*/
l2x0_pmu_poll_period = ms_to_ktime(1000);
- hrtimer_init(&l2x0_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- l2x0_pmu_hrtimer.function = l2x0_pmu_poll;
+ hrtimer_setup(&l2x0_pmu_hrtimer, l2x0_pmu_poll, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cpumask_set_cpu(0, &pmu_cpu);
ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_L2X0_ONLINE,
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 2bec87c3327d..39fd5df73317 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
}
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
- unsigned long pfn, struct vm_fault *vmf)
+ unsigned long pfn, bool need_lock)
{
spinlock_t *ptl;
pgd_t *pgd;
@@ -99,12 +99,11 @@ again:
if (!pte)
return 0;
- /*
- * If we are using split PTE locks, then we need to take the page
- * lock here. Otherwise we are using shared mm->page_table_lock
- * which is already locked, thus cannot take it.
- */
- if (ptl != vmf->ptl) {
+ if (need_lock) {
+ /*
+ * Use nested version here to indicate that we are already
+ * holding one similar spinlock.
+ */
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
pte_unmap_unlock(pte, ptl);
@@ -114,7 +113,7 @@ again:
ret = do_adjust_pte(vma, address, pfn, pte);
- if (ptl != vmf->ptl)
+ if (need_lock)
spin_unlock(ptl);
pte_unmap(pte);
@@ -123,9 +122,10 @@ again:
static void
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep, unsigned long pfn,
- struct vm_fault *vmf)
+ unsigned long addr, pte_t *ptep, unsigned long pfn)
{
+ const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE);
+ const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
unsigned long offset;
@@ -142,6 +142,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
/*
+ * If we are using split PTE locks, then we need to take the pte
+ * lock. Otherwise we are using shared mm->page_table_lock which
+ * is already locked, thus cannot take it.
+ */
+ bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS);
+ unsigned long mpnt_addr;
+
+ /*
* If this VMA is not in our MM, we can ignore it.
* Note that we intentionally mask out the VMA
* that we are fixing up.
@@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
if (!(mpnt->vm_flags & VM_MAYSHARE))
continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf);
+ mpnt_addr = mpnt->vm_start + offset;
+
+ /* Avoid deadlocks by not grabbing the same PTE lock again. */
+ if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr)
+ need_lock = false;
+ aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock);
}
flush_dcache_mmap_unlock(mapping);
if (aliases)
@@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
__flush_dcache_folio(mapping, folio);
if (mapping) {
if (cache_is_vivt())
- make_coherent(mapping, vma, addr, ptep, pfn, vmf);
+ make_coherent(mapping, vma, addr, ptep, pfn);
else if (vma->vm_flags & VM_EXEC)
__flush_icache_all();
}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 89f1c97f3079..748698e91a4b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -436,7 +436,7 @@ void __arm_iomem_set_ro(void __iomem *ptr, size_t size)
set_memory_ro((unsigned long)ptr, PAGE_ALIGN(size) / PAGE_SIZE);
}
-void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags)
{
return (__force void *)arch_ioremap_caller(phys_addr, size,
MT_MEMORY_RW,
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1a8f6914ee59..d638cc87807e 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -248,7 +248,7 @@ void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
#endif
-void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags)
{
return (void *)phys_addr;
}
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 49eeb2ad8dbd..27c1d5ebcd91 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -481,3 +481,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 8a306bbec4a0..cb044bfd145d 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
hostprogs := vdsomunge
diff --git a/arch/arm/vdso/vdso.lds.S b/arch/arm/vdso/vdso.lds.S
index 9bfa0f52923c..7c08371f4400 100644
--- a/arch/arm/vdso/vdso.lds.S
+++ b/arch/arm/vdso/vdso.lds.S
@@ -11,16 +11,16 @@
*/
#include <linux/const.h>
-#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
OUTPUT_ARCH(arm)
SECTIONS
{
- PROVIDE(_vdso_data = . - VDSO_DATA_SIZE);
+ VDSO_VVAR_SYMS
. = SIZEOF_HEADERS;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 940343beb3d4..62dc903ecc7f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -162,6 +162,7 @@ config ARM64
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select HARDIRQS_SW_RESEND
select HAS_IOPORT
@@ -217,6 +218,7 @@ config ARM64
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+ select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GUP_FAST
select HAVE_FTRACE_GRAPH_FUNC
@@ -250,6 +252,7 @@ config ARM64
select HAVE_KRETPROBES
select HAVE_GENERIC_VDSO
select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
+ select HOTPLUG_SMT if HOTPLUG_CPU
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
@@ -323,7 +326,7 @@ config ARCH_MMAP_RND_BITS_MIN
default 18
# max bits determined by the following formula:
-# VA_BITS - PAGE_SHIFT - 3
+# VA_BITS - PTDESC_TABLE_SHIFT
config ARCH_MMAP_RND_BITS_MAX
default 19 if ARM64_VA_BITS=36
default 24 if ARM64_VA_BITS=39
@@ -1302,6 +1305,15 @@ config NVIDIA_CARMEL_CNP_ERRATUM
If unsure, say Y.
+config ROCKCHIP_ERRATUM_3568002
+ bool "Rockchip 3568002: GIC600 can not access physical addresses higher than 4GB"
+ default y
+ help
+ The Rockchip RK3566 and RK3568 GIC600 SoC integrations have AXI
+ addressing limited to the first 32bit of physical address space.
+
+ If unsure, say Y.
+
config ROCKCHIP_ERRATUM_3588001
bool "Rockchip 3588001: GIC600 can not support shareability attributes"
default y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 02f9248f7c84..8b76821f190f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -321,6 +321,10 @@ config ARCH_STM32
help
This enables support for ARMv8 based STMicroelectronics
STM32 family, including:
+ - STM32MP21:
+ - STM32MP211, STM32MP213, STM32MP215.
+ - STM32MP23:
+ - STM32MP231, STM32MP233, STM32MP235.
- STM32MP25:
- STM32MP251, STM32MP253, STM32MP255 and STM32MP257.
@@ -374,13 +378,12 @@ config ARCH_UNIPHIER
This enables support for Socionext UniPhier SoC family.
config ARCH_VEXPRESS
- bool "ARMv8 software model (Versatile Express)"
+ bool "ARM Ltd Platforms"
select GPIOLIB
select PM
select PM_GENERIC_DOMAINS
help
- This enables support for the ARMv8 software model (Versatile
- Express).
+ This enables support for the ARM Ltd Platforms.
config ARCH_VISCONTI
bool "Toshiba Visconti SoC Family"
diff --git a/arch/arm64/boot/dts/airoha/en7581-evb.dts b/arch/arm64/boot/dts/airoha/en7581-evb.dts
index cf58e43dd5b2..d53b72d18242 100644
--- a/arch/arm64/boot/dts/airoha/en7581-evb.dts
+++ b/arch/arm64/boot/dts/airoha/en7581-evb.dts
@@ -24,3 +24,47 @@
reg = <0x0 0x80000000 0x2 0x00000000>;
};
};
+
+&spi_nand {
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ bootloader@0 {
+ label = "bootloader";
+ reg = <0x00000000 0x00080000>;
+ read-only;
+ };
+
+ art@200000 {
+ label = "art";
+ reg = <0x00200000 0x00400000>;
+ };
+
+ tclinux@600000 {
+ label = "tclinux";
+ reg = <0x00600000 0x03200000>;
+ };
+
+ tclinux_slave@3800000 {
+ label = "tclinux_alt";
+ reg = <0x03800000 0x03200000>;
+ };
+
+ rootfs_data@6a00000 {
+ label = "rootfs_data";
+ reg = <0x06a00000 0x01400000>;
+ };
+
+ reserved_bmt@7e00000 {
+ label = "reserved_bmt";
+ reg = <0x07e00000 0x00200000>;
+ read-only;
+ };
+ };
+};
+
+&i2c0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/airoha/en7581.dtsi b/arch/arm64/boot/dts/airoha/en7581.dtsi
index 55eb1762fb11..26b136940917 100644
--- a/arch/arm64/boot/dts/airoha/en7581.dtsi
+++ b/arch/arm64/boot/dts/airoha/en7581.dtsi
@@ -2,6 +2,8 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/en7523-clk.h>
+#include <dt-bindings/reset/airoha,en7581-reset.h>
/ {
interrupt-parent = <&gic>;
@@ -122,6 +124,12 @@
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
};
+ clk20m: clock-20000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <20000000>;
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -142,6 +150,36 @@
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
};
+ spi@1fa10000 {
+ compatible = "airoha,en7581-snand";
+ reg = <0x0 0x1fa10000 0x0 0x140>,
+ <0x0 0x1fa11000 0x0 0x160>;
+
+ clocks = <&scuclk EN7523_CLK_SPI>;
+ clock-names = "spi";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+
+ spi_nand: nand@0 {
+ compatible = "spi-nand";
+ reg = <0>;
+
+ spi-max-frequency = <50000000>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <2>;
+ };
+ };
+
+ scuclk: clock-controller@1fb00000 {
+ compatible = "airoha,en7581-scu";
+ reg = <0x0 0x1fb00000 0x0 0x970>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
uart1: serial@1fbf0000 {
compatible = "ns16550";
reg = <0x0 0x1fbf0000 0x0 0x30>;
@@ -150,5 +188,58 @@
interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <1843200>;
};
+
+ rng@1faa1000 {
+ compatible = "airoha,en7581-trng";
+ reg = <0x0 0x1faa1000 0x0 0xc04>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ system-controller@1fbf0200 {
+ compatible = "airoha,en7581-gpio-sysctl", "syscon",
+ "simple-mfd";
+ reg = <0x0 0x1fbf0200 0x0 0xc0>;
+
+ en7581_pinctrl: pinctrl {
+ compatible = "airoha,en7581-pinctrl";
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ i2c0: i2c@1fbf8000 {
+ compatible = "mediatek,mt7621-i2c";
+ reg = <0x0 0x1fbf8000 0x0 0x100>;
+
+ resets = <&scuclk EN7581_I2C2_RST>;
+
+ clocks = <&clk20m>;
+ clock-frequency = <100000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+
+ i2c1: i2c@1fbf8100 {
+ compatible = "mediatek,mt7621-i2c";
+ reg = <0x0 0x1fbf8100 0x0 0x100>;
+
+ resets = <&scuclk EN7581_I2C_MASTER_RST>;
+
+ clocks = <&clk20m>;
+ clock-frequency = <100000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100-allwinner-perf1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a100-allwinner-perf1.dts
index a387bccdcefd..a7e3be0155a8 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a100-allwinner-perf1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100-allwinner-perf1.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include "sun50i-a100.dtsi"
+#include "sun50i-a100-cpu-opp.dtsi"
#include <dt-bindings/gpio/gpio.h>
@@ -38,6 +39,10 @@
status = "okay";
};
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
&pio {
vcc-pb-supply = <&reg_dcdc1>;
vcc-pc-supply = <&reg_eldo1>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100-cpu-opp.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100-cpu-opp.dtsi
new file mode 100644
index 000000000000..c6a2efa037dc
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100-cpu-opp.dtsi
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
+// Copyright (c) 2020 ShuoSheng Huang <huangshuosheng@allwinnertech.com>
+
+/ {
+ cpu_opp_table: opp-table-cpu {
+ compatible = "allwinner,sun50i-a100-operating-points";
+ nvmem-cells = <&cpu_speed_grade>;
+ opp-shared;
+
+ opp-408000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <408000000>;
+
+ opp-microvolt-speed0 = <900000>;
+ opp-microvolt-speed1 = <900000>;
+ opp-microvolt-speed2 = <900000>;
+ };
+
+ opp-600000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <600000000>;
+
+ opp-microvolt-speed0 = <900000>;
+ opp-microvolt-speed1 = <900000>;
+ opp-microvolt-speed2 = <900000>;
+ };
+
+ opp-816000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <816000000>;
+
+ opp-microvolt-speed0 = <940000>;
+ opp-microvolt-speed1 = <900000>;
+ opp-microvolt-speed2 = <900000>;
+ };
+
+ opp-1080000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1080000000>;
+
+ opp-microvolt-speed0 = <1020000>;
+ opp-microvolt-speed1 = <980000>;
+ opp-microvolt-speed2 = <950000>;
+ };
+
+ opp-1200000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1200000000>;
+
+ opp-microvolt-speed0 = <1100000>;
+ opp-microvolt-speed1 = <1020000>;
+ opp-microvolt-speed2 = <1000000>;
+ };
+
+ opp-1320000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1320000000>;
+
+ opp-microvolt-speed0 = <1160000>;
+ opp-microvolt-speed1 = <1060000>;
+ opp-microvolt-speed2 = <1030000>;
+ };
+
+ opp-1464000000 {
+ clock-latency-ns = <244144>; /* 8 32k periods */
+ opp-hz = /bits/ 64 <1464000000>;
+
+ opp-microvolt-speed0 = <1180000>;
+ opp-microvolt-speed1 = <1180000>;
+ opp-microvolt-speed2 = <1130000>;
+ };
+ };
+};
+
+&cpu0 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu1 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu2 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
+
+&cpu3 {
+ operating-points-v2 = <&cpu_opp_table>;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
index a24adba201af..f9f6fea03b74 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a100.dtsi
@@ -23,6 +23,7 @@
device_type = "cpu";
reg = <0x0>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
};
cpu1: cpu@1 {
@@ -30,6 +31,7 @@
device_type = "cpu";
reg = <0x1>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
};
cpu2: cpu@2 {
@@ -37,6 +39,7 @@
device_type = "cpu";
reg = <0x2>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
};
cpu3: cpu@3 {
@@ -44,6 +47,7 @@
device_type = "cpu";
reg = <0x3>;
enable-method = "psci";
+ clocks = <&ccu CLK_CPUX>;
};
};
@@ -175,6 +179,10 @@
ths_calibration: calib@14 {
reg = <0x14 8>;
};
+
+ cpu_speed_grade: cpu-speed-grade@1c {
+ reg = <0x1c 0x2>;
+ };
};
watchdog@30090a0 {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
index a231abf1684a..7e17ca07892d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-2024.dts
@@ -167,6 +167,12 @@
gpios = <&pio 8 12 GPIO_ACTIVE_HIGH>; /* PI12 */
default-state = "on";
};
+
+ led-1 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&pio 8 11 GPIO_ACTIVE_HIGH>; /* PI11 */
+ };
};
reg_vcc5v: regulator-vcc5v { /* USB-C power input */
@@ -237,6 +243,7 @@
battery_power: battery-power {
compatible = "x-powers,axp717-battery-power-supply";
monitored-battery = <&battery>;
+ x-powers,no-thermistor;
};
regulators {
@@ -328,8 +335,17 @@
regulator-name = "boost";
};
+ /*
+ * Regulator function is unknown, but reading
+ * GPIO values in bootloader is inconsistent
+ * on reboot if this is disabled. Setting to
+ * default value from regulator OTP mem.
+ */
reg_cpusldo: cpusldo {
- /* unused */
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
index ff453336eab1..bef4d107482f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h700-anbernic-rg35xx-h.dts
@@ -71,6 +71,25 @@
<&pio 8 2 GPIO_ACTIVE_LOW>;
#mux-control-cells = <0>;
};
+
+ reg_vcc3v8_usb: regulator-vcc3v8-usb {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&pio 4 5 GPIO_ACTIVE_HIGH>; /* PE5 */
+ regulator-min-microvolt = <3800000>;
+ regulator-max-microvolt = <3800000>;
+ regulator-name = "vcc3v8-usb";
+ };
+
+ reg_vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&pio 8 7 GPIO_ACTIVE_HIGH>; /* PI7 */
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0-usb";
+ vin-supply = <&reg_vcc3v8_usb>;
+ };
};
&gpadc {
@@ -113,3 +132,7 @@
function = "gpio_out";
};
};
+
+&usbphy {
+ usb1_vbus-supply = <&reg_vcc5v0_usb>;
+};
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
index 7c82d90e940d..8862adae44e9 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
@@ -58,7 +58,7 @@
&spi1 {
status = "okay";
- sdcard0: sdcard@0 {
+ sdcard0: mmc@0 {
compatible = "mmc-spi-slot";
reg = <0>;
spi-max-frequency = <20000000>;
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
index 58e2b0a6f841..b34dd8d5d1b1 100644
--- a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
+++ b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
@@ -8,32 +8,10 @@
/dts-v1/;
-/include/ "amd-seattle-soc.dtsi"
-/include/ "amd-seattle-cpus.dtsi"
+/include/ "amd-overdrive-rev-b0.dts"
/ {
model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
- compatible = "amd,seattle-overdrive", "amd,seattle";
-
- chosen {
- stdout-path = &serial0;
- };
-
- psci {
- compatible = "arm,psci-0.2";
- method = "smc";
- };
-};
-
-&ccp0 {
- status = "okay";
-};
-
-/**
- * NOTE: In Rev.B, gpio0 is reserved.
- */
-&gpio1 {
- status = "okay";
};
&gpio2 {
@@ -44,48 +22,11 @@
status = "okay";
};
-&gpio4 {
- status = "okay";
-};
-
-&i2c0 {
- status = "okay";
-};
-
-&i2c1 {
- status = "okay";
-};
-
-&pcie0 {
- status = "okay";
-};
-
&sata1 {
status = "okay";
};
-&spi0 {
- status = "okay";
-};
-
-&spi1 {
- status = "okay";
- sdcard0: sdcard@0 {
- compatible = "mmc-spi-slot";
- reg = <0>;
- spi-max-frequency = <20000000>;
- voltage-ranges = <3200 3400>;
- pl022,interface = <0>;
- pl022,com-mode = <0x0>;
- pl022,rx-level-trig = <0>;
- pl022,tx-level-trig = <0>;
- };
-};
-
&ipmi_kcs {
status = "okay";
};
-&smb0 {
- /include/ "amd-seattle-xgbe-b.dtsi"
-};
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
index 2dd2c28171ee..73f687773ce6 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-clks.dtsi
@@ -5,51 +5,39 @@
* Copyright (C) 2014 Advanced Micro Devices, Inc.
*/
- adl3clk_100mhz: clk100mhz_0 {
+ adl3clk_100mhz: uartspiclk_100mhz: clock-100000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <100000000>;
clock-output-names = "adl3clk_100mhz";
};
- ccpclk_375mhz: clk375mhz {
+ ccpclk_375mhz: clock-375000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <375000000>;
clock-output-names = "ccpclk_375mhz";
};
- sataclk_333mhz: clk333mhz {
+ sataclk_333mhz: clock-333000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <333000000>;
clock-output-names = "sataclk_333mhz";
};
- pcieclk_500mhz: clk500mhz_0 {
+ dmaclk_500mhz: pcieclk_500mhz: clock-500000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <500000000>;
clock-output-names = "pcieclk_500mhz";
};
- dmaclk_500mhz: clk500mhz_1 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <500000000>;
- clock-output-names = "dmaclk_500mhz";
- };
-
- miscclk_250mhz: clk250mhz_4 {
+ xgmacclk0_dma_250mhz: xgmacclk0_ptp_250mhz: xgmacclk1_dma_250mhz: xgmacclk1_ptp_250mhz:
+ miscclk_250mhz: clock-250000000 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <250000000>;
clock-output-names = "miscclk_250mhz";
};
- uartspiclk_100mhz: clk100mhz_1 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <100000000>;
- clock-output-names = "uartspiclk_100mhz";
- };
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
index d3d931eb7677..a611f8288b3e 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
@@ -11,6 +11,8 @@
#address-cells = <2>;
#size-cells = <2>;
+ /include/ "amd-seattle-clks.dtsi"
+
gic0: interrupt-controller@e1101000 {
compatible = "arm,gic-400", "arm,cortex-a15-gic";
interrupt-controller;
@@ -38,7 +40,7 @@
<1 10 0xff04>;
};
- smb0: smb {
+ smb0: bus {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <2>;
@@ -51,8 +53,6 @@
*/
dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
- /include/ "amd-seattle-clks.dtsi"
-
sata0: sata@e0300000 {
compatible = "snps,dwc-ahci";
reg = <0 0xe0300000 0 0xf0000>;
@@ -121,7 +121,6 @@
status = "disabled";
compatible = "arm,pl022", "arm,primecell";
reg = <0 0xe1020000 0 0x1000>;
- spi-controller;
interrupts = <0 330 4>;
clocks = <&uartspiclk_100mhz>, <&uartspiclk_100mhz>;
clock-names = "sspclk", "apb_pclk";
@@ -131,7 +130,6 @@
status = "disabled";
compatible = "arm,pl022", "arm,primecell";
reg = <0 0xe1030000 0 0x1000>;
- spi-controller;
interrupts = <0 329 4>;
clocks = <&uartspiclk_100mhz>, <&uartspiclk_100mhz>;
clock-names = "sspclk", "apb_pclk";
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
index 9259e547e2e8..18b0c2dd1b2d 100644
--- a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
+++ b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
@@ -5,35 +5,7 @@
* Copyright (C) 2015 Advanced Micro Devices, Inc.
*/
- xgmacclk0_dma_250mhz: clk250mhz_0 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <250000000>;
- clock-output-names = "xgmacclk0_dma_250mhz";
- };
-
- xgmacclk0_ptp_250mhz: clk250mhz_1 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <250000000>;
- clock-output-names = "xgmacclk0_ptp_250mhz";
- };
-
- xgmacclk1_dma_250mhz: clk250mhz_2 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <250000000>;
- clock-output-names = "xgmacclk1_dma_250mhz";
- };
-
- xgmacclk1_ptp_250mhz: clk250mhz_3 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <250000000>;
- clock-output-names = "xgmacclk1_ptp_250mhz";
- };
-
- xgmac0: xgmac@e0700000 {
+ xgmac0: ethernet@e0700000 {
compatible = "amd,xgbe-seattle-v1a";
reg = <0 0xe0700000 0 0x80000>,
<0 0xe0780000 0 0x80000>,
@@ -59,7 +31,7 @@
dma-coherent;
};
- xgmac1: xgmac@e0900000 {
+ xgmac1: ethernet@e0900000 {
compatible = "amd,xgbe-seattle-v1a";
reg = <0 0xe0900000 0 0x80000>,
<0 0xe0980000 0 0x80000>,
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
index de10e7aebf21..a06838552f21 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a4.dtsi
@@ -48,3 +48,24 @@
};
};
};
+
+&apb {
+ gpio_intc: interrupt-controller@4080 {
+ compatible = "amlogic,a4-gpio-intc",
+ "amlogic,meson-gpio-intc";
+ reg = <0x0 0x4080 0x0 0x20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ amlogic,channel-interrupts =
+ <10 11 12 13 14 15 16 17 18 19 20 21>;
+ };
+
+ gpio_ao_intc: interrupt-controller@8e72c {
+ compatible = "amlogic,a4-gpio-ao-intc",
+ "amlogic,meson-gpio-intc";
+ reg = <0x0 0x8e72c 0x0 0x0c>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ amlogic,channel-interrupts = <140 141>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
index 17a6316de891..32ed1776891b 100644
--- a/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
+++ b/arch/arm64/boot/dts/amlogic/amlogic-a5.dtsi
@@ -48,3 +48,15 @@
};
};
};
+
+&apb {
+ gpio_intc: interrupt-controller@4080 {
+ compatible = "amlogic,a5-gpio-intc",
+ "amlogic,meson-gpio-intc";
+ reg = <0x0 0x4080 0x0 0x20>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ amlogic,channel-interrupts =
+ <10 11 12 13 14 15 16 17 18 19 20 21>;
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index e9b22868983d..a6924d246bb1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1693,8 +1693,12 @@
};
pwm_AO_cd: pwm@2000 {
- compatible = "amlogic,meson-axg-ao-pwm";
+ compatible = "amlogic,meson-axg-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x02000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc_AO CLKID_AO_CLK81>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV5>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -1728,8 +1732,12 @@
};
pwm_AO_ab: pwm@7000 {
- compatible = "amlogic,meson-axg-ao-pwm";
+ compatible = "amlogic,meson-axg-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x07000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc_AO CLKID_AO_CLK81>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV5>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -1806,15 +1814,23 @@
};
pwm_ab: pwm@1b000 {
- compatible = "amlogic,meson-axg-ee-pwm";
+ compatible = "amlogic,meson-axg-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x1b000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc CLKID_FCLK_DIV5>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
pwm_cd: pwm@1a000 {
- compatible = "amlogic,meson-axg-ee-pwm";
+ compatible = "amlogic,meson-axg-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x1a000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc CLKID_FCLK_DIV5>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 49b51c54013f..ab2b3f15ef19 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -2060,8 +2060,11 @@
};
pwm_AO_cd: pwm@2000 {
- compatible = "amlogic,meson-g12a-ao-pwm-cd";
+ compatible = "amlogic,meson-g12-pwm-v2",
+ "amlogic,meson8-pwm-v2";
reg = <0x0 0x2000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc_AO CLKID_AO_CLK81>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -2099,8 +2102,13 @@
};
pwm_AO_ab: pwm@7000 {
- compatible = "amlogic,meson-g12a-ao-pwm-ab";
+ compatible = "amlogic,meson-g12-pwm-v2",
+ "amlogic,meson8-pwm-v2";
reg = <0x0 0x7000 0x0 0x20>;
+ clocks = <&xtal>,
+ <&clkc_AO CLKID_AO_CLK81>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV5>;
#pwm-cells = <3>;
status = "disabled";
};
@@ -2301,22 +2309,37 @@
};
pwm_ef: pwm@19000 {
- compatible = "amlogic,meson-g12a-ee-pwm";
+ compatible = "amlogic,meson-g12-pwm-v2",
+ "amlogic,meson8-pwm-v2";
reg = <0x0 0x19000 0x0 0x20>;
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
pwm_cd: pwm@1a000 {
- compatible = "amlogic,meson-g12a-ee-pwm";
+ compatible = "amlogic,meson-g12-pwm-v2",
+ "amlogic,meson8-pwm-v2";
reg = <0x0 0x1a000 0x0 0x20>;
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
pwm_ab: pwm@1b000 {
- compatible = "amlogic,meson-g12a-ee-pwm";
+ compatible = "amlogic,meson-g12-pwm-v2",
+ "amlogic,meson8-pwm-v2";
reg = <0x0 0x1b000 0x0 0x20>;
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
#pwm-cells = <3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
index a457b3f4397b..9aa36f17ffa2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts
@@ -346,8 +346,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -355,8 +353,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&pdm {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
index c779a5da7d1e..952b8d02e5c2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
@@ -284,8 +284,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -293,8 +291,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index ea51341f031b..52fbc5103e45 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -389,8 +389,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -398,8 +396,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&pdm {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index f70a46967e2b..5407049d2647 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -502,8 +502,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index 32f98a192494..01da83658ae3 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -328,8 +328,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -363,8 +361,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&uart_A {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
index 65b963d794cd..adedc1340c78 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d-libretech-cc.dts
@@ -116,6 +116,4 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>, <&pwm_b_x7_pins>;
- clocks = <&xtal>, <&xtal>;
- clock-names = "clkin0", "clkin1";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
index 08c33ec7e9f1..92e8b26ecccc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi-cm4.dtsi
@@ -257,8 +257,6 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
@@ -273,8 +271,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
index d4e1990b5f26..54663c55a20e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-bananapi.dtsi
@@ -367,8 +367,6 @@
status = "okay";
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&pwm_ef {
@@ -380,8 +378,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
index 16dd409051b4..48650bad230d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-khadas-vim3.dtsi
@@ -92,16 +92,12 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
index 09d959aefb18..7e8964bacfce 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid.dtsi
@@ -327,16 +327,12 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
index 39feba7f2d08..fc05ecf90714 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-radxa-zero2.dts
@@ -379,32 +379,24 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_ef {
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_AO_ab {
pinctrl-0 = <&pwm_ao_a_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
index 4cb6930ffb19..a7a0fc264cdc 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-w400.dtsi
@@ -304,24 +304,18 @@
&pwm_ab {
pinctrl-0 = <&pwm_a_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
&pwm_ef {
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
index d38c3a224fbe..2da49cfbde77 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
@@ -345,24 +345,18 @@
&pwm_AO_ab {
pinctrl-0 = <&pwm_ao_a_3_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_ab {
pinctrl-0 = <&pwm_b_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
status = "okay";
};
&pwm_ef {
pinctrl-0 = <&pwm_e_pins>, <&pwm_f_clk_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index 45ccddd1aaf0..6da1316d97c6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -240,8 +240,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 2673f0dbafe7..7d99ca44e660 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -329,14 +329,14 @@
};
pwm_ab: pwm@8550 {
- compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+ compatible = "amlogic,meson-gxbb-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x08550 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
};
pwm_cd: pwm@8650 {
- compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+ compatible = "amlogic,meson-gxbb-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x08650 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
@@ -351,7 +351,7 @@
};
pwm_ef: pwm@86c0 {
- compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+ compatible = "amlogic,meson-gxbb-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x086c0 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
@@ -498,7 +498,7 @@
};
pwm_AO_ab: pwm@550 {
- compatible = "amlogic,meson-gx-ao-pwm", "amlogic,meson-gxbb-ao-pwm";
+ compatible = "amlogic,meson-gxbb-pwm-v2", "amlogic,meson8-pwm-v2";
reg = <0x0 0x00550 0x0 0x10>;
#pwm-cells = <3>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index cf2e2ef81680..2ecc6ebd5a43 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -298,8 +298,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 7d7dde93fff3..c09da40ff7b0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -241,8 +241,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* Wireless SDIO Module */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 1736bd2e96e2..6f67364fd63f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -150,8 +150,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* Wireless SDIO Module */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 3807a184810b..6ff567225fee 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -222,8 +222,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
index deb295227189..bfedfc1472ec 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
@@ -185,8 +185,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index ed00e67e6923..8ebce7114a60 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -739,6 +739,31 @@
};
};
+&pwm_ab {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
+&pwm_AO_ab {
+ clocks = <&xtal>, <&clkc CLKID_CLK81>;
+};
+
+&pwm_cd {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
+&pwm_ef {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
&pwrc {
resets = <&reset RESET_VIU>,
<&reset RESET_VENC>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
index c5e2306ad7a4..ca7c4e8e7cac 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts
@@ -280,8 +280,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* This is connected to the Bluetooth module: */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
index 2b94b6e5285e..4ca90ac947b7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
@@ -116,8 +116,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index 89fe5110f7a2..62a2da766a00 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -115,8 +115,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* SD card */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
index a80f0ea2773b..4e89d6f6bb57 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
@@ -211,8 +211,6 @@
status = "okay";
pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>;
pinctrl-names = "default";
- clocks = <&xtal> , <&xtal>;
- clock-names = "clkin0", "clkin1" ;
};
&pwm_ef {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index c79f9f2099bf..236cedec9f19 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -145,8 +145,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* Wireless SDIO Module */
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index b52a830efcce..05a0d4de3ad7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -101,8 +101,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index f58d1790de1c..2dc2fdaecf9f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -809,6 +809,31 @@
};
};
+&pwm_ab {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
+&pwm_AO_ab {
+ clocks = <&xtal>, <&clkc CLKID_CLK81>;
+};
+
+&pwm_cd {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
+&pwm_ef {
+ clocks = <&xtal>,
+ <>, /* unknown/untested, the datasheet calls it "vid_pll" */
+ <&clkc CLKID_FCLK_DIV4>,
+ <&clkc CLKID_FCLK_DIV3>;
+};
+
&pwrc {
resets = <&reset RESET_VIU>,
<&reset RESET_VENC>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index 96a3dd2d8a99..2a09b3d550e2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -289,16 +289,12 @@
status = "okay";
pinctrl-0 = <&pwm_ao_a_3_pins>, <&pwm_ao_b_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&pwm_ef {
status = "okay";
pinctrl-0 = <&pwm_e_pins>, <&pwm_f_clk_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
&sd_emmc_a {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 7356d3b628b1..ecaf678b23dd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -192,8 +192,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&clkc CLKID_FCLK_DIV4>;
- clock-names = "clkin0";
};
/* Wireless SDIO Module */
diff --git a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
index 929e4720ae76..ac9c4c2673b1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-libretech-cottonwood.dtsi
@@ -458,24 +458,18 @@
status = "okay";
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
};
&pwm_ab {
status = "okay";
pinctrl-0 = <&pwm_b_x7_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
};
&pwm_cd {
status = "okay";
pinctrl-0 = <&pwm_d_x3_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
index d1fa8b8bf795..a3463149db3d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-ac2xx.dtsi
@@ -199,15 +199,11 @@
status = "okay";
pinctrl-0 = <&pwm_ao_a_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -215,8 +211,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
index 81dce862902a..40db95f64636 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi.dtsi
@@ -367,8 +367,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
index 9c0b544e2209..5d75ad3f3e46 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
@@ -78,8 +78,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index 7b0e9817a615..ad8d07883760 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -392,8 +392,6 @@
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 2e3397e55da2..37d7f64b6d5d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -435,15 +435,11 @@
status = "okay";
pinctrl-0 = <&pwm_ao_a_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&pwm_AO_cd {
pinctrl-0 = <&pwm_ao_d_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin1";
status = "okay";
};
@@ -451,8 +447,6 @@
status = "okay";
pinctrl-0 = <&pwm_e_pins>;
pinctrl-names = "default";
- clocks = <&xtal>;
- clock-names = "clkin0";
};
&saradc {
diff --git a/arch/arm64/boot/dts/apple/Makefile b/arch/arm64/boot/dts/apple/Makefile
index ab6ebb53218a..4f337bff36cd 100644
--- a/arch/arm64/boot/dts/apple/Makefile
+++ b/arch/arm64/boot/dts/apple/Makefile
@@ -46,6 +46,22 @@ dtb-$(CONFIG_ARCH_APPLE) += t8011-j120.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8011-j121.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8011-j207.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8011-j208.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j132.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j137.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j140a.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j140k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j152f.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j160.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j174.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j185.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j185f.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j213.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j215.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j223.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j230k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j214k.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j680.dtb
+dtb-$(CONFIG_ARCH_APPLE) += t8012-j780.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8015-d201.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8015-d20.dtb
dtb-$(CONFIG_ARCH_APPLE) += t8015-d211.dtb
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
index 0b16adf07f79..8868df1538d6 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-5s.dtsi
@@ -8,6 +8,7 @@
#include "s5l8960x.dtsi"
#include "s5l8960x-common.dtsi"
+#include "s5l8960x-opp.dtsi"
#include <dt-bindings/input/input.h>
/ {
@@ -49,3 +50,11 @@
};
};
};
+
+&dwi_bl {
+ status = "okay";
+};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
index 741c5a9f21dd..dd57eb1d34c0 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-air1.dtsi
@@ -8,6 +8,7 @@
#include "s5l8960x.dtsi"
#include "s5l8960x-common.dtsi"
+#include "s5l8965x-opp.dtsi"
#include <dt-bindings/input/input.h>
/ {
@@ -49,3 +50,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
index b27ef5680626..f3696d22e71c 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x-mini2.dtsi
@@ -8,6 +8,7 @@
#include "s5l8960x.dtsi"
#include "s5l8960x-common.dtsi"
+#include "s5l8960x-opp.dtsi"
#include <dt-bindings/input/input.h>
/ {
@@ -49,3 +50,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi
new file mode 100644
index 000000000000..e4d568c4a119
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8960x-opp.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Operating points for Apple S5L8960X "A7" SoC, Up to 1296 MHz
+ *
+ * target-type: N51, N53, J85, J86. J87, J85m, J86m, J87m
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+ cyclone_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <15500>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <43000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <3>;
+ clock-latency-ns = <26000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <840000000>;
+ opp-level = <4>;
+ clock-latency-ns = <30000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1128000000>;
+ opp-level = <5>;
+ clock-latency-ns = <39500>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1296000000>;
+ opp-level = <6>;
+ clock-latency-ns = <45500>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi b/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi
new file mode 100644
index 000000000000..da265f484307
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8960x-pmgr.dtsi
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S5L8960X "A7" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@20000 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@20008 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_secuart0: power-controller@200f0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "secuart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_secuart1: power-controller@200f8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "secuart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_cpm: power-controller@20010 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_lio: power-controller@20018 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "lio";
+ apple,always-on; /* Core device */
+ };
+
+ ps_iomux: power-controller@20020 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "iomux";
+ apple,always-on; /* Core device */
+ };
+
+ ps_aic: power-controller@20028 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_debug: power-controller@20030 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20030 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_dwi: power-controller@20038 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20038 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@20040 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_mca0: power-controller@20048 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20048 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@20050 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@20058 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20058 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@20060 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20060 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@20068 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20068 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@20070 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20070 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@20078 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20078 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@20080 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20080 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@20088 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20088 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@20090 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20090 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@20098 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20098 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@200a0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@200a8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@200b0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@200b8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@200c0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@200c8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@200d0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@200d8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@200e0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@200e8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x200e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio_p: power-controller@20110 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ };
+
+ ps_usb: power-controller@20158 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@20160 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@20170 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@20180 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_disp_busmux: power-controller@201a8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp_busmux";
+ };
+
+ ps_media: power-controller@201d8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp: power-controller@201d0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp";
+ };
+
+ ps_msr: power-controller@201e0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@201e8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0: power-controller@201b0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_aes0: power-controller@20100 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aes0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@20108 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsic0_phy: power-controller@20118 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_hsic1_phy: power-controller@20120 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic1_phy";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_hsic2_phy: power-controller@20128 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic2_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_ispsens0: power-controller@20130 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens0";
+ };
+
+ ps_ispsens1: power-controller@20138 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens1";
+ };
+
+ ps_mcc: power-controller@20140 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Core device */
+ };
+
+ ps_mcu: power-controller@20148 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcu";
+ apple,always-on; /* Core device */
+ };
+
+ ps_amp: power-controller@20150 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "amp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb2host0_ohci: power-controller@20168 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usb2host1_ohci: power-controller@20178 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1_ohci";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_usbotg: power-controller@20188 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@20190 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@20198 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_cp: power-controller@201a0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_mipi_dsi: power-controller@201b8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_dp: power-controller@201c0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0>;
+ };
+
+ ps_disp1: power-controller@201c8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_vdec: power-controller@201f0 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec";
+ power-domains = <&ps_media>;
+ };
+
+ ps_venc: power-controller@201f8 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc";
+ power-domains = <&ps_media>;
+ };
+
+ ps_ans: power-controller@20200 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans";
+ };
+
+ ps_ans_dll: power-controller@20208 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans_dll";
+ power-domains = <&ps_ans>;
+ };
+
+ ps_gfx: power-controller@20218 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@20268 {
+ compatible = "apple,s5l8960x-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ power-domains = <&ps_secuart1>, <&ps_secuart0>;
+ apple,always-on; /* Locked on */
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/s5l8960x.dtsi b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
index 0218ecac1d83..d820b0e43050 100644
--- a/arch/arm64/boot/dts/apple/s5l8960x.dtsi
+++ b/arch/arm64/boot/dts/apple/s5l8960x.dtsi
@@ -33,6 +33,8 @@
compatible = "apple,cyclone";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&cyclone_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -41,6 +43,8 @@
compatible = "apple,cyclone";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&cyclone_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -53,6 +57,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202220000 {
+ compatible = "apple,s5l8960x-cluster-cpufreq";
+ reg = <0x2 0x02220000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0a0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0a0000 0x0 0x4000>;
@@ -62,9 +72,18 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,s5l8960x-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x24000>;
+ };
+
wdt: watchdog@20e027000 {
compatible = "apple,s5l8960x-wdt", "apple,wdt";
reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -78,11 +97,20 @@
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ dwi_bl: backlight@20e200010 {
+ compatible = "apple,s5l8960x-dwi-bl", "apple,dwi-bl";
+ reg = <0x2 0x0e200010 0x0 0x8>;
+ power-domains = <&ps_dwi>;
+ status = "disabled";
};
pinctrl: pinctrl@20e300000 {
compatible = "apple,s5l8960x-pinctrl", "apple,pinctrl";
reg = <0x2 0x0e300000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -111,3 +139,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "s5l8960x-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi b/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi
new file mode 100644
index 000000000000..d34dae74a90c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s5l8965x-opp.dtsi
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Operating points for Apple S5L8965X "A7" Rev A SoC, Up to 1392 MHz
+ *
+ * target-type: J71, J72, J73
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+ cyclone_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <10000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <2>;
+ clock-latency-ns = <49000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <840000000>;
+ opp-level = <3>;
+ clock-latency-ns = <30000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1128000000>;
+ opp-level = <4>;
+ clock-latency-ns = <39500>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1296000000>;
+ opp-level = <5>;
+ clock-latency-ns = <45500>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1392000000>;
+ opp-level = <6>;
+ clock-latency-ns = <46500>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi b/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
index 4276bd890e81..cb42c5f2c1b6 100644
--- a/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
+++ b/arch/arm64/boot/dts/apple/s800-0-3-common.dtsi
@@ -43,6 +43,10 @@
};
};
+&dwi_bl {
+ status = "okay";
+};
+
&serial0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi b/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi
new file mode 100644
index 000000000000..196b8e745a95
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s800-0-3-pmgr.dtsi
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S8000/3 "A9" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80150 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80158 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dwi: power-controller@80110 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@80118 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pms: power-controller@80120 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_pcie_ref: power-controller@80148 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_mca0: power-controller@80168 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80170 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80178 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80180 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80188 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@80190 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@80198 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@801a0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801a8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801b0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801b8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801c0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801c8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801d0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@801d8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@801e0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@801e8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@801f0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@801f8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80160 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsic0_phy: power-controller@80128 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_hsic1_phy: power-controller@80130 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic1_phy";
+ power-domains = <&ps_usb2host2>;
+ };
+
+ ps_isp_sens0: power-controller@80138 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens0";
+ };
+
+ ps_isp_sens1: power-controller@80140 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens1";
+ };
+
+ ps_usb: power-controller@80250 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@80258 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@80260 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@80270 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host2: power-controller@80280 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host2";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_rtmux: power-controller@802a8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "rtmux";
+ };
+
+ ps_media: power-controller@802d0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp: power-controller@802c8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_msr: power-controller@802e0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@802d8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0: power-controller@802b0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_pmp: power-controller@802e8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pms_sram: power-controller@802f0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_uart5: power-controller@80200 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@80208 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@80210 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@80218 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_aes0: power-controller@80220 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aes0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80228 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80228 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80230 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs1: power-controller@80238 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs2: power-controller@80240 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs3: power-controller@80248 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_usb2host0_ohci: power-controller@80268 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usb2host1_ohci: power-controller@80278 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1_ohci";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_usb2host2_ohci: power-controller@80288 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host2_ohci";
+ power-domains = <&ps_usb2host2>;
+ };
+
+ ps_usbotg: power-controller@80290 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@80298 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802a0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_mipi_dsi: power-controller@802b8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_dp: power-controller@802c0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0>;
+ };
+
+ ps_vdec: power-controller@802f8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec";
+ power-domains = <&ps_media>;
+ };
+
+ ps_venc: power-controller@80308 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie: power-controller@80310 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@80318 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_pcie_link0: power-controller@80320 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link0";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link1: power-controller@80328 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link1";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link2: power-controller@80330 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80330 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link2";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link3: power-controller@80338 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80338 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link3";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_gfx: power-controller@80340 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80340 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_venc_pipe: power-controller@88000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me0: power-controller@88008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88010 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_aop: power-controller@80000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop";
+ power-domains = <&ps_aop_busif &ps_aop_cpu &ps_aop_filter>;
+ apple,always-on; /* Always on processor */
+ };
+
+ ps_debug: power-controller@80008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_aop_gpio: power-controller@80010 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_gpio";
+ power-domains = <&ps_aop>;
+ };
+
+ ps_aop_cpu: power-controller@80040 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_cpu";
+ };
+
+ ps_aop_filter: power-controller@80048 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80048 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_filter";
+ };
+
+ ps_aop_busif: power-controller@80050 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_busif";
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/s800-0-3.dtsi b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
new file mode 100644
index 000000000000..c0e9ae45627c
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s800-0-3.dtsi
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple S8000/S8003 "A9" SoC
+ *
+ * This file contains parts common to both variants of A9
+ *
+ * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+
+/ {
+ interrupt-parent = <&aic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clkref: clock-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "clkref";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "apple,twister";
+ reg = <0x0 0x0>;
+ cpu-release-addr = <0 0>; /* To be filled in by loader */
+ operating-points-v2 = <&twister_opp>;
+ performance-domains = <&cpufreq>;
+ enable-method = "spin-table";
+ device_type = "cpu";
+ };
+
+ cpu1: cpu@1 {
+ compatible = "apple,twister";
+ reg = <0x0 0x1>;
+ cpu-release-addr = <0 0>; /* To be filled in by loader */
+ operating-points-v2 = <&twister_opp>;
+ performance-domains = <&cpufreq>;
+ enable-method = "spin-table";
+ device_type = "cpu";
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ nonposted-mmio;
+ ranges;
+
+ cpufreq: performance-controller@202220000 {
+ compatible = "apple,s8000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x02220000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
+ serial0: serial@20a0c0000 {
+ compatible = "apple,s5l-uart";
+ reg = <0x2 0x0a0c0000 0x0 0x4000>;
+ reg-io-width = <4>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 192 IRQ_TYPE_LEVEL_HIGH>;
+ /* Use the bootloader-enabled clocks for now. */
+ clocks = <&clkref>, <&clkref>;
+ clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
+ status = "disabled";
+ };
+
+ pmgr: power-management@20e000000 {
+ compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x8c000>;
+ };
+
+ aic: interrupt-controller@20e100000 {
+ compatible = "apple,s8000-aic", "apple,aic";
+ reg = <0x2 0x0e100000 0x0 0x100000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ dwi_bl: backlight@20e200080 {
+ compatible = "apple,s8000-dwi-bl", "apple,dwi-bl";
+ reg = <0x2 0x0e200080 0x0 0x8>;
+ power-domains = <&ps_dwi>;
+ status = "disabled";
+ };
+
+ pinctrl_ap: pinctrl@20f100000 {
+ compatible = "apple,s8000-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x0f100000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_ap 0 0 208>;
+ apple,npins = <208>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_aop: pinctrl@2100f0000 {
+ compatible = "apple,s8000-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x100f0000 0x0 0x100000>;
+ power-domains = <&ps_aop_gpio>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aop 0 0 42>;
+ apple,npins = <42>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 113 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 114 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 115 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 116 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 117 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 118 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 119 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmgr_mini: power-management@210200000 {
+ compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x10200000 0 0x84000>;
+ };
+
+ wdt: watchdog@2102b0000 {
+ compatible = "apple,s8000-wdt", "apple,wdt";
+ reg = <0x2 0x102b0000 0x0 0x4000>;
+ clocks = <&clkref>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&aic>;
+ interrupt-names = "phys", "virt";
+ /* Note that A9 doesn't actually have a hypervisor (EL2 is not implemented). */
+ interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+#include "s800-0-3-pmgr.dtsi"
+
+/*
+ * The A9 was made by two separate fabs on two different process
+ * nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
+ */
diff --git a/arch/arm64/boot/dts/apple/s8000.dtsi b/arch/arm64/boot/dts/apple/s8000.dtsi
index 6e9046ea106c..72322f5677ab 100644
--- a/arch/arm64/boot/dts/apple/s8000.dtsi
+++ b/arch/arm64/boot/dts/apple/s8000.dtsi
@@ -4,141 +4,65 @@
*
* Other names: H8P, "Maui"
*
- * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
*/
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/apple-aic.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/apple.h>
+#include "s800-0-3.dtsi"
/ {
- interrupt-parent = <&aic>;
- #address-cells = <2>;
- #size-cells = <2>;
+ twister_opp: opp-table {
+ compatible = "operating-points-v2";
- clkref: clock-ref {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- clock-output-names = "clkref";
- };
-
- cpus {
- #address-cells = <2>;
- #size-cells = <0>;
-
- cpu0: cpu@0 {
- compatible = "apple,twister";
- reg = <0x0 0x0>;
- cpu-release-addr = <0 0>; /* To be filled in by loader */
- enable-method = "spin-table";
- device_type = "cpu";
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <650>;
};
-
- cpu1: cpu@1 {
- compatible = "apple,twister";
- reg = <0x0 0x1>;
- cpu-release-addr = <0 0>; /* To be filled in by loader */
- enable-method = "spin-table";
- device_type = "cpu";
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <75000>;
};
- };
-
- soc {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- nonposted-mmio;
- ranges;
-
- serial0: serial@20a0c0000 {
- compatible = "apple,s5l-uart";
- reg = <0x2 0x0a0c0000 0x0 0x4000>;
- reg-io-width = <4>;
- interrupt-parent = <&aic>;
- interrupts = <AIC_IRQ 192 IRQ_TYPE_LEVEL_HIGH>;
- /* Use the bootloader-enabled clocks for now. */
- clocks = <&clkref>, <&clkref>;
- clock-names = "uart", "clk_uart_baud0";
- status = "disabled";
+ opp03 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <3>;
+ clock-latency-ns = <27000>;
};
-
- aic: interrupt-controller@20e100000 {
- compatible = "apple,s8000-aic", "apple,aic";
- reg = <0x2 0x0e100000 0x0 0x100000>;
- #interrupt-cells = <3>;
- interrupt-controller;
+ opp04 {
+ opp-hz = /bits/ 64 <912000000>;
+ opp-level = <4>;
+ clock-latency-ns = <32000>;
};
-
- pinctrl_ap: pinctrl@20f100000 {
- compatible = "apple,s8000-pinctrl", "apple,pinctrl";
- reg = <0x2 0x0f100000 0x0 0x100000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl_ap 0 0 208>;
- apple,npins = <208>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&aic>;
- interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>;
+ opp05 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-level = <5>;
+ clock-latency-ns = <35000>;
};
-
- pinctrl_aop: pinctrl@2100f0000 {
- compatible = "apple,s8000-pinctrl", "apple,pinctrl";
- reg = <0x2 0x100f0000 0x0 0x100000>;
-
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pinctrl_aop 0 0 42>;
- apple,npins = <42>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
- interrupt-parent = <&aic>;
- interrupts = <AIC_IRQ 113 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 114 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 115 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 116 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 117 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 118 IRQ_TYPE_LEVEL_HIGH>,
- <AIC_IRQ 119 IRQ_TYPE_LEVEL_HIGH>;
+ opp06 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-level = <6>;
+ clock-latency-ns = <45000>;
};
-
- wdt: watchdog@2102b0000 {
- compatible = "apple,s8000-wdt", "apple,wdt";
- reg = <0x2 0x102b0000 0x0 0x4000>;
- clocks = <&clkref>;
- interrupt-parent = <&aic>;
- interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>;
+ opp07 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-level = <7>;
+ clock-latency-ns = <58000>;
};
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupt-parent = <&aic>;
- interrupt-names = "phys", "virt";
- /* Note that A9 doesn't actually have a hypervisor (EL2 is not implemented). */
- interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
- <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp08 {
+ opp-hz = /bits/ 64 <1844000000>;
+ opp-level = <8>;
+ clock-latency-ns = <58000>;
+ turbo-mode;
+ };
+#endif
};
};
/*
* The A9 was made by two separate fabs on two different process
* nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
- * the S8003 (APL1022) on 16nm. While they are seemingly the same,
- * they do have distinct part numbers and devices using them have
- * distinct model names. There are currently no known differences
- * between these as far as Linux is concerned, but let's keep things
- * structured properly to make it easier to alter the behaviour of
- * one of the chips if need be.
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
*/
diff --git a/arch/arm64/boot/dts/apple/s8001-common.dtsi b/arch/arm64/boot/dts/apple/s8001-common.dtsi
index e94d0e77653a..91b06e113894 100644
--- a/arch/arm64/boot/dts/apple/s8001-common.dtsi
+++ b/arch/arm64/boot/dts/apple/s8001-common.dtsi
@@ -24,6 +24,7 @@
framebuffer0: framebuffer@0 {
compatible = "apple,simple-framebuffer", "simple-framebuffer";
reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0 &ps_dp0>;
/* Format properties will be added by loader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi b/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi
new file mode 100644
index 000000000000..e66a4c1c138f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s8001-j98a-j99a.dtsi
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple iPad Pro (12.9-inch)
+ *
+ * This file contains parts common to iPad Pro (12.9-inch).
+ *
+ * target-type: J98a, J99a
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&ps_dcs4 {
+ apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs5 {
+ apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs6 {
+ apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs7 {
+ apple,always-on; /* LPDDR4 interface */
+};
diff --git a/arch/arm64/boot/dts/apple/s8001-j98a.dts b/arch/arm64/boot/dts/apple/s8001-j98a.dts
index 6d6b841e7ab0..162eca05c2d9 100644
--- a/arch/arm64/boot/dts/apple/s8001-j98a.dts
+++ b/arch/arm64/boot/dts/apple/s8001-j98a.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "s8001-pro.dtsi"
+#include "s8001-j98a-j99a.dtsi"
/ {
compatible = "apple,j98a", "apple,s8001", "apple,arm-platform";
diff --git a/arch/arm64/boot/dts/apple/s8001-j99a.dts b/arch/arm64/boot/dts/apple/s8001-j99a.dts
index d20194b1cae7..7b765820c69e 100644
--- a/arch/arm64/boot/dts/apple/s8001-j99a.dts
+++ b/arch/arm64/boot/dts/apple/s8001-j99a.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "s8001-pro.dtsi"
+#include "s8001-j98a-j99a.dtsi"
/ {
compatible = "apple,j99a", "apple,s8001", "apple,arm-platform";
diff --git a/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi b/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi
new file mode 100644
index 000000000000..859ab77ae92b
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/s8001-pmgr.dtsi
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple S8001 "A9X" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80148 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80150 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dwi: power-controller@80110 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@80118 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pcie_ref: power-controller@80140 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_mca0: power-controller@80160 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80168 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80170 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80178 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80180 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@80188 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@80190 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@80198 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801a0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801a8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801b0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801b8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801c0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801c8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@801d0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@801d8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@801e0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@801e8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@801f0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@801f8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80158 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsic0_phy: power-controller@80128 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_isp_sens0: power-controller@80130 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens0";
+ };
+
+ ps_isp_sens1: power-controller@80138 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens1";
+ };
+
+ ps_pms: power-controller@80120 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb: power-controller@80278 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@80280 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@80288 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@80298 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host2: power-controller@802a8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host2";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_rtmux: power-controller@802d0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "rtmux";
+ apple,always-on; /* Core device */
+ };
+
+ ps_disp1mux: power-controller@802e8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1mux";
+ };
+
+ ps_disp0: power-controller@802d8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_disp1: power-controller@802f0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1";
+ power-domains = <&ps_disp1mux>;
+ };
+
+ ps_uart6: power-controller@80200 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@80208 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@80210 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_aes0: power-controller@80218 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aes0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80230 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80238 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs1: power-controller@80240 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs2: power-controller@80248 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs3: power-controller@80250 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs4: power-controller@80258 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs4";
+ };
+
+ ps_dcs5: power-controller@80260 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs5";
+ };
+
+ ps_dcs6: power-controller@80268 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs6";
+ };
+
+ ps_dcs7: power-controller@80270 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs7";
+ };
+
+ ps_usb2host0_ohci: power-controller@80290 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usbotg: power-controller@802b8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@802c0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802c8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_dp0: power-controller@802e0 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp0";
+ power-domains = <&ps_disp0>;
+ };
+
+ ps_dp1: power-controller@802f8 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp1";
+ power-domains = <&ps_disp1>;
+ };
+
+ ps_dpa0: power-controller@80220 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dpa0";
+ };
+
+ ps_dpa1: power-controller@80228 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80228 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dpa1";
+ };
+
+ ps_media: power-controller@80308 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp: power-controller@80300 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_msr: power-controller@80318 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@80310 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_venc: power-controller@80340 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80340 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie: power-controller@80348 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80348 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_srs: power-controller@80390 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80390 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "srs";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie_aux: power-controller@80350 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80350 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_pcie_link0: power-controller@80358 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80358 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link0";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link1: power-controller@80360 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80360 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link1";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link2: power-controller@80368 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80368 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link2";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link3: power-controller@80370 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80370 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link3";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link4: power-controller@80378 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80378 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link4";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_pcie_link5: power-controller@80380 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80380 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_link5";
+ power-domains = <&ps_pcie>;
+ };
+
+ ps_vdec: power-controller@80330 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80330 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec";
+ power-domains = <&ps_media>;
+ };
+
+ ps_gfx: power-controller@80388 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80388 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_pmp: power-controller@80320 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pms_sram: power-controller@80328 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on*/
+ };
+
+ ps_venc_pipe: power-controller@88000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me0: power-controller@88008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88010 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_aop: power-controller@80000 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop";
+ power-domains = <&ps_aop_cpu &ps_aop_filter &ps_aop_busif>;
+ apple,always-on; /* Always on processor */
+ };
+
+ ps_debug: power-controller@80008 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_aop_gpio: power-controller@80010 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_gpio";
+ };
+
+ ps_aop_cpu: power-controller@80040 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_cpu";
+ };
+
+ ps_aop_filter: power-controller@80048 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80048 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_filter";
+ };
+
+ ps_aop_busif: power-controller@80050 {
+ compatible = "apple,s8000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_busif";
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/s8001.dtsi b/arch/arm64/boot/dts/apple/s8001.dtsi
index 23ee3238844d..d56d49c048bb 100644
--- a/arch/arm64/boot/dts/apple/s8001.dtsi
+++ b/arch/arm64/boot/dts/apple/s8001.dtsi
@@ -32,6 +32,8 @@
compatible = "apple,twister";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ operating-points-v2 = <&twister_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -40,11 +42,62 @@
compatible = "apple,twister";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ operating-points-v2 = <&twister_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ twister_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <800>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <53000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <792000000>;
+ opp-level = <3>;
+ clock-latency-ns = <18000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1080000000>;
+ opp-level = <4>;
+ clock-latency-ns = <21000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1440000000>;
+ opp-level = <5>;
+ clock-latency-ns = <25000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-level = <6>;
+ clock-latency-ns = <33000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <2160000000>;
+ opp-level = <7>;
+ clock-latency-ns = <45000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp08 {
+ opp-hz = /bits/ 64 <2160000000>;
+ opp-level = <8>;
+ clock-latency-ns = <45000>;
+ turbo-mode;
+ };
+#endif
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -52,6 +105,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202220000 {
+ compatible = "apple,s8000-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x02220000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0c0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -61,19 +120,30 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x8c000>;
+ };
+
aic: interrupt-controller@20e100000 {
compatible = "apple,s8000-aic", "apple,aic";
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
};
pinctrl_ap: pinctrl@20f100000 {
compatible = "apple,s8000-pinctrl", "apple,pinctrl";
reg = <0x2 0x0f100000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -95,6 +165,7 @@
pinctrl_aop: pinctrl@2100f0000 {
compatible = "apple,s8000-pinctrl", "apple,pinctrl";
reg = <0x2 0x100f0000 0x0 0x100000>;
+ power-domains = <&ps_aop_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -113,6 +184,14 @@
<AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>;
};
+ pmgr_mini: power-management@210200000 {
+ compatible = "apple,s8000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x10200000 0 0x84000>;
+ };
+
wdt: watchdog@2102b0000 {
compatible = "apple,s8000-wdt", "apple,wdt";
reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -131,3 +210,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "s8001-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/s8003.dtsi b/arch/arm64/boot/dts/apple/s8003.dtsi
index 7e4ad4f7e499..79df5c783260 100644
--- a/arch/arm64/boot/dts/apple/s8003.dtsi
+++ b/arch/arm64/boot/dts/apple/s8003.dtsi
@@ -4,18 +4,65 @@
*
* Other names: H8P, "Malta"
*
- * Copyright (c) 2022, Konrad Dybcio <konradybcio@kernel.org>
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
*/
-#include "s8000.dtsi"
+#include "s800-0-3.dtsi"
+
+/ {
+ twister_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <500>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <45000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <3>;
+ clock-latency-ns = <22000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <912000000>;
+ opp-level = <4>;
+ clock-latency-ns = <25000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-level = <5>;
+ clock-latency-ns = <28000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-level = <6>;
+ clock-latency-ns = <35000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1800000000>;
+ opp-level = <7>;
+ clock-latency-ns = <38000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp08 {
+ opp-hz = /bits/ 64 <1844000000>;
+ opp-level = <8>;
+ clock-latency-ns = <38000>;
+ turbo-mode;
+ };
+#endif
+ };
+};
/*
* The A9 was made by two separate fabs on two different process
* nodes: Samsung made the S8000 (APL0898) on 14nm and TSMC made
- * the S8003 (APL1022) on 16nm. While they are seemingly the same,
- * they do have distinct part numbers and devices using them have
- * distinct model names. There are currently no known differences
- * between these as far as Linux is concerned, but let's keep things
- * structured properly to make it easier to alter the behaviour of
- * one of the chips if need be.
+ * the S8003 (APL1022) on 16nm. There are some minor differences
+ * such as timing in cpufreq state transistions.
*/
diff --git a/arch/arm64/boot/dts/apple/s800x-6s.dtsi b/arch/arm64/boot/dts/apple/s800x-6s.dtsi
index 49b04db310c6..1dcf80cc2920 100644
--- a/arch/arm64/boot/dts/apple/s800x-6s.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-6s.dtsi
@@ -47,3 +47,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi b/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
index 32570ed3cdf0..c1701e81f0c1 100644
--- a/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-ipad5.dtsi
@@ -41,3 +41,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_dp>;
+};
diff --git a/arch/arm64/boot/dts/apple/s800x-se.dtsi b/arch/arm64/boot/dts/apple/s800x-se.dtsi
index a1a5690e8371..deb7c7cc90f6 100644
--- a/arch/arm64/boot/dts/apple/s800x-se.dtsi
+++ b/arch/arm64/boot/dts/apple/s800x-se.dtsi
@@ -47,3 +47,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/spi1-nvram.dtsi b/arch/arm64/boot/dts/apple/spi1-nvram.dtsi
new file mode 100644
index 000000000000..3df2fd3993b5
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/spi1-nvram.dtsi
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+//
+// Devicetree include for common spi-nor nvram flash.
+//
+// Apple uses a consistent configiguration for the nvram on all known M1* and
+// M2* devices.
+//
+// Copyright The Asahi Linux Contributors
+
+/ {
+ aliases {
+ nvram = &nvram;
+ };
+};
+
+&spi1 {
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nvram: partition@700000 {
+ label = "nvram";
+ /* To be filled by the loader */
+ reg = <0x0 0x0>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t600x-common.dtsi b/arch/arm64/boot/dts/apple/t600x-common.dtsi
index fa8ead699363..87dfc13d7417 100644
--- a/arch/arm64/boot/dts/apple/t600x-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-common.dtsi
@@ -362,6 +362,13 @@
clock-output-names = "clkref";
};
+ clk_200m: clock-200m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ clock-output-names = "clk_200m";
+ };
+
/*
* This is a fabulated representation of the input clock
* to NCO since we don't know the true clock tree.
diff --git a/arch/arm64/boot/dts/apple/t600x-die0.dtsi b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
index b1c875e692c8..e9b3140ba1a9 100644
--- a/arch/arm64/boot/dts/apple/t600x-die0.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-die0.dtsi
@@ -163,6 +163,34 @@
status = "disabled";
};
+ spi1: spi@39b104000 {
+ compatible = "apple,t6000-spi", "apple,spi";
+ reg = <0x3 0x9b104000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 0 1107 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clk_200m>;
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi1>;
+ status = "disabled";
+ };
+
+ spi3: spi@39b10c000 {
+ compatible = "apple,t6000-spi", "apple,spi";
+ reg = <0x3 0x9b10c000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 0 1109 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&clkref>;
+ pinctrl-0 = <&spi3_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi3>;
+ status = "disabled";
+ };
+
serial0: serial@39b200000 {
compatible = "apple,s5l-uart";
reg = <0x3 0x9b200000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi b/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
index b31f1a7a2b3f..1a994c3c1b79 100644
--- a/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-gpio-pins.dtsi
@@ -36,6 +36,20 @@
<APPLE_PINMUX(101, 1)>;
};
+ spi1_pins: spi1-pins {
+ pinmux = <APPLE_PINMUX(10, 1)>,
+ <APPLE_PINMUX(11, 1)>,
+ <APPLE_PINMUX(32, 1)>,
+ <APPLE_PINMUX(33, 1)>;
+ };
+
+ spi3_pins: spi3-pins {
+ pinmux = <APPLE_PINMUX(52, 1)>,
+ <APPLE_PINMUX(53, 1)>,
+ <APPLE_PINMUX(54, 1)>,
+ <APPLE_PINMUX(55, 1)>;
+ };
+
pcie_pins: pcie-pins {
pinmux = <APPLE_PINMUX(0, 1)>,
<APPLE_PINMUX(1, 1)>,
diff --git a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
index 2e471dfe43cf..22ebc78e120b 100644
--- a/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j314-j316.dtsi
@@ -119,3 +119,5 @@
&fpwm0 {
status = "okay";
};
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t600x-j375.dtsi b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
index 1e5a19e49b08..d5b985ad5679 100644
--- a/arch/arm64/boot/dts/apple/t600x-j375.dtsi
+++ b/arch/arm64/boot/dts/apple/t600x-j375.dtsi
@@ -126,3 +126,5 @@
&pcie0_dart_3 {
status = "okay";
};
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t7000-6.dtsi b/arch/arm64/boot/dts/apple/t7000-6.dtsi
index f60ea4a4a387..7048d7383982 100644
--- a/arch/arm64/boot/dts/apple/t7000-6.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-6.dtsi
@@ -48,3 +48,11 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
+
+&typhoon_opp06 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-handheld.dtsi b/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
index 8984c9ec6cc8..7b58aa648b53 100644
--- a/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-handheld.dtsi
@@ -22,6 +22,10 @@
};
};
+&dwi_bl {
+ status = "okay";
+};
+
&serial0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t7000-j42d.dts b/arch/arm64/boot/dts/apple/t7000-j42d.dts
index 2231db6a739d..2ec9e06cc63f 100644
--- a/arch/arm64/boot/dts/apple/t7000-j42d.dts
+++ b/arch/arm64/boot/dts/apple/t7000-j42d.dts
@@ -20,6 +20,7 @@
framebuffer0: framebuffer@0 {
compatible = "apple,simple-framebuffer", "simple-framebuffer";
reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0 &ps_dp>;
/* Format properties will be added by loader */
status = "disabled";
};
@@ -29,3 +30,7 @@
&serial6 {
status = "okay";
};
+
+&typhoon_opp06 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-mini4.dtsi b/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
index c64ddc402fda..cc235c5a0c43 100644
--- a/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000-mini4.dtsi
@@ -49,3 +49,15 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_dp>;
+};
+
+&typhoon_opp06 {
+ status = "okay";
+};
+
+&typhoon_opp07 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-n102.dts b/arch/arm64/boot/dts/apple/t7000-n102.dts
index 9c55d339ba4e..99eb8a2b8c73 100644
--- a/arch/arm64/boot/dts/apple/t7000-n102.dts
+++ b/arch/arm64/boot/dts/apple/t7000-n102.dts
@@ -46,3 +46,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0 &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi b/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi
new file mode 100644
index 000000000000..5948fa7afffc
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t7000-pmgr.dtsi
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T7000 "A8" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+&pmgr {
+ ps_cpu0: power-controller@20000 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@20008 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@20040 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_p: power-controller@201f8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ };
+
+ ps_lio: power-controller@20100 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "lio";
+ apple,always-on; /* Core device */
+ };
+
+ ps_iomux: power-controller@20108 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "iomux";
+ apple,always-on; /* Core device */
+ };
+
+ ps_aic: power-controller@20110 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_debug: power-controller@20118 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_dwi: power-controller@20120 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@20128 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_mca0: power-controller@20130 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@20138 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@20140 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@20148 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@20150 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@20158 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@20160 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@20168 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@20170 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@20178 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@20180 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@20188 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@20190 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@20198 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@201a0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@201a8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@201b0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@201b8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@201c0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@201c8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@201d0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@201d8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@201e0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_aes0: power-controller@201e8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aes0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@201f0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb: power-controller@20248 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@20250 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@20258 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@20268 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host2: power-controller@20278 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host2";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_disp_busmux: power-controller@202a8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp_busmux";
+ };
+
+ ps_media: power-controller@202d8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp: power-controller@202d0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp";
+ };
+
+ ps_msr: power-controller@202e0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@202e8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0: power-controller@202b0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_disp1: power-controller@202c8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_pcie_ref: power-controller@20220 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_hsic0_phy: power-controller@20200 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_hsic1_phy: power-controller@20208 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic1_phy";
+ power-domains = <&ps_usb2host2>;
+ };
+
+ ps_ispsens0: power-controller@20210 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens0";
+ };
+
+ ps_ispsens1: power-controller@20218 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens1";
+ };
+
+ ps_mcc: power-controller@20230 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_mcu: power-controller@20238 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcu";
+ apple,always-on; /* Core device */
+ };
+
+ ps_amp: power-controller@20240 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "amp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb2host0_ohci: power-controller@20260 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usbotg: power-controller@20288 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@20290 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple Fabric, critical block */
+ };
+
+ ps_sf: power-controller@20298 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple Fabric, critical block */
+ };
+
+ ps_cp: power-controller@202a0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_mipi_dsi: power-controller@202b8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_dp: power-controller@202c0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0>;
+ };
+
+ ps_vdec: power-controller@202f0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec";
+ power-domains = <&ps_media>;
+ };
+
+ ps_ans: power-controller@20318 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans";
+ };
+
+ ps_venc: power-controller@20300 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie: power-controller@20308 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@20310 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_gfx: power-controller@20320 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@20400 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_venc_pipe: power-controller@21000 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me0: power-controller@21008 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me1: power-controller@21010 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ power-domains = <&ps_venc>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t7000.dtsi b/arch/arm64/boot/dts/apple/t7000.dtsi
index a7cc29e84c84..85a34dc7bc01 100644
--- a/arch/arm64/boot/dts/apple/t7000.dtsi
+++ b/arch/arm64/boot/dts/apple/t7000.dtsi
@@ -33,6 +33,8 @@
compatible = "apple,typhoon";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ performance-domains = <&cpufreq>;
+ operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -41,11 +43,55 @@
compatible = "apple,typhoon";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ performance-domains = <&cpufreq>;
+ operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ typhoon_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <300>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <50000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <3>;
+ clock-latency-ns = <29000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <840000000>;
+ opp-level = <4>;
+ clock-latency-ns = <29000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1128000000>;
+ opp-level = <5>;
+ clock-latency-ns = <36000>;
+ };
+ typhoon_opp06: opp06 {
+ opp-hz = /bits/ 64 <1392000000>;
+ opp-level = <6>;
+ clock-latency-ns = <42000>;
+ status = "disabled"; /* Not available on N102 */
+ };
+ typhoon_opp07: opp07 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-level = <7>;
+ clock-latency-ns = <49000>;
+ status = "disabled"; /* J96 and J97 only */
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -53,6 +99,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202220000 {
+ compatible = "apple,t7000-cluster-cpufreq", "apple,s5l8960x-cluster-cpufreq";
+ reg = <0x2 0x02220000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0c0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -62,6 +114,7 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
@@ -74,9 +127,18 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart6>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,t7000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x24000>;
+ };
+
wdt: watchdog@20e027000 {
compatible = "apple,t7000-wdt", "apple,wdt";
reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -90,11 +152,20 @@
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ dwi_bl: backlight@20e200010 {
+ compatible = "apple,t7000-dwi-bl", "apple,dwi-bl";
+ reg = <0x2 0x0e200010 0x0 0x8>;
+ power-domains = <&ps_dwi>;
+ status = "disabled";
};
pinctrl: pinctrl@20e300000 {
compatible = "apple,t7000-pinctrl", "apple,pinctrl";
reg = <0x2 0x0e300000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -123,3 +194,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "t7000-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t7001-air2.dtsi b/arch/arm64/boot/dts/apple/t7001-air2.dtsi
index 19fabd425c52..e4ec8c1977de 100644
--- a/arch/arm64/boot/dts/apple/t7001-air2.dtsi
+++ b/arch/arm64/boot/dts/apple/t7001-air2.dtsi
@@ -20,6 +20,7 @@
framebuffer0: framebuffer@0 {
compatible = "apple,simple-framebuffer", "simple-framebuffer";
reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0 &ps_dp>;
/* Format properties will be added by loader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi b/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi
new file mode 100644
index 000000000000..7321cfdcd189
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t7001-pmgr.dtsi
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T7001 "A8X" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@20000 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@20008 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu2: power-controller@20010 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu2";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@20040 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_p: power-controller@201f8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ };
+
+ ps_lio: power-controller@20100 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "lio";
+ apple,always-on; /* Core device */
+ };
+
+ ps_iomux: power-controller@20108 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "iomux";
+ apple,always-on; /* Core device */
+ };
+
+ ps_aic: power-controller@20110 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_debug: power-controller@20118 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_dwi: power-controller@20120 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@20128 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_mca0: power-controller@20130 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@20138 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@20140 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@20148 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@20150 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@20158 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@20160 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@20168 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@20170 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@20178 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@20180 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@20188 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@20190 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@20198 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@201a0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@201a8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@201b0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@201b8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@201c0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@201c8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@201d0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@201d8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@201e0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_aes0: power-controller@201e8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aes0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@201f0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x201f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb: power-controller@20248 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@20250 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@20258 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@20268 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host2: power-controller@20278 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host2";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_disp_busmux: power-controller@202a8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp_busmux";
+ };
+
+ ps_disp1_busmux: power-controller@202c0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1_busmux";
+ };
+
+ ps_media: power-controller@202d8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp: power-controller@202d0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp";
+ };
+
+ ps_msr: power-controller@202e0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@202e8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0: power-controller@202b0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0";
+ power-domains = <&ps_disp_busmux>;
+ };
+
+ ps_disp1: power-controller@202c8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp1";
+ power-domains = <&ps_disp1_busmux>;
+ };
+
+ ps_pcie_ref: power-controller@20220 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_hsic0_phy: power-controller@20200 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_hsic1_phy: power-controller@20208 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic1_phy";
+ power-domains = <&ps_usb2host2>;
+ };
+
+ ps_ispsens0: power-controller@20210 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens0";
+ };
+
+ ps_ispsens1: power-controller@20218 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens1";
+ };
+
+ ps_mcc: power-controller@20230 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_mcu: power-controller@20238 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcu";
+ apple,always-on; /* Core device */
+ };
+
+ ps_amp: power-controller@20240 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "amp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_usb2host0_ohci: power-controller@20260 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usbotg: power-controller@20288 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@20290 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@20298 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_cp: power-controller@202a0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cp";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dp: power-controller@202b8 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0>;
+ };
+
+ ps_vdec: power-controller@202f0 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x202f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec";
+ power-domains = <&ps_media>;
+ };
+
+ ps_ans: power-controller@20318 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans";
+ };
+
+ ps_venc: power-controller@20300 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie: power-controller@20308 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@20310 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_gfx: power-controller@20320 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@20400 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x20400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_venc_pipe: power-controller@21000 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me0: power-controller@21008 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ power-domains = <&ps_venc>;
+ };
+
+ ps_venc_me1: power-controller@21010 {
+ compatible = "apple,t7000-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x21010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ power-domains = <&ps_venc>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t7001.dtsi b/arch/arm64/boot/dts/apple/t7001.dtsi
index a76e034c85e3..8e2c67e19c41 100644
--- a/arch/arm64/boot/dts/apple/t7001.dtsi
+++ b/arch/arm64/boot/dts/apple/t7001.dtsi
@@ -35,6 +35,8 @@
compatible = "apple,typhoon";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ performance-domains = <&cpufreq>;
+ operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -43,6 +45,8 @@
compatible = "apple,typhoon";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled in by loader */
+ performance-domains = <&cpufreq>;
+ operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -51,11 +55,53 @@
compatible = "apple,typhoon";
reg = <0x0 0x2>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq>;
+ operating-points-v2 = <&typhoon_opp>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ typhoon_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <300>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <396000000>;
+ opp-level = <2>;
+ clock-latency-ns = <49000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-level = <3>;
+ clock-latency-ns = <31000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <840000000>;
+ opp-level = <4>;
+ clock-latency-ns = <32000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1128000000>;
+ opp-level = <5>;
+ clock-latency-ns = <32000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1392000000>;
+ opp-level = <6>;
+ clock-latency-ns = <37000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1512000000>;
+ opp-level = <7>;
+ clock-latency-ns = <41000>;
+ };
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -63,6 +109,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202220000 {
+ compatible = "apple,t7000-cluster-cpufreq", "apple,s5l8960x-cluster-cpufreq";
+ reg = <0x2 0x02220000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0c0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -72,9 +124,18 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,t7000-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x24000>;
+ };
+
wdt: watchdog@20e027000 {
compatible = "apple,t7000-wdt", "apple,wdt";
reg = <0x2 0x0e027000 0x0 0x1000>;
@@ -88,11 +149,13 @@
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
};
pinctrl: pinctrl@20e300000 {
compatible = "apple,t7000-pinctrl", "apple,pinctrl";
reg = <0x2 0x0e300000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -121,3 +184,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "t7001-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8010-7.dtsi b/arch/arm64/boot/dts/apple/t8010-7.dtsi
index 1332fd73f50f..1913b7b2c1fe 100644
--- a/arch/arm64/boot/dts/apple/t8010-7.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-7.dtsi
@@ -41,3 +41,15 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+};
+
+&hurricane_opp09 {
+ status = "okay";
+};
+
+&hurricane_opp10 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-common.dtsi b/arch/arm64/boot/dts/apple/t8010-common.dtsi
index 6613fb57c92f..44dc968638b1 100644
--- a/arch/arm64/boot/dts/apple/t8010-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-common.dtsi
@@ -43,6 +43,10 @@
};
};
+&dwi_bl {
+ status = "okay";
+};
+
&serial0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi b/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
index 81696c6e302c..1e46e4a3a7f4 100644
--- a/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010-ipad6.dtsi
@@ -42,3 +42,15 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0_fe &ps_disp0_be &ps_dp>;
+};
+
+&hurricane_opp09 {
+ status = "okay";
+};
+
+&hurricane_opp10 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-n112.dts b/arch/arm64/boot/dts/apple/t8010-n112.dts
index 6e71c3cb5d92..48fdbedf74da 100644
--- a/arch/arm64/boot/dts/apple/t8010-n112.dts
+++ b/arch/arm64/boot/dts/apple/t8010-n112.dts
@@ -45,3 +45,7 @@
};
};
};
+
+&framebuffer0 {
+ power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+};
diff --git a/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi
new file mode 100644
index 000000000000..6d451088616a
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8010-pmgr.dtsi
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8010 "A10" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80160 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80168 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dwi: power-controller@80110 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@80118 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pms: power-controller@80120 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_pcie_ref: power-controller@80148 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_socuvd: power-controller@80150 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "socuvd";
+ };
+
+ ps_mca0: power-controller@80178 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80180 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80188 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80190 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80198 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@801a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@801a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@801b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801c0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@801e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@801f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@801f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80170 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsic0_phy: power-controller@80128 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_isp_sens0: power-controller@80130 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens0";
+ };
+
+ ps_isp_sens1: power-controller@80138 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens1";
+ };
+
+ ps_isp_sens2: power-controller@80140 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens2";
+ };
+
+ ps_usb: power-controller@80268 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@80270 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@80278 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@80288 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_rtmux: power-controller@802a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "rtmux";
+ };
+
+ ps_media: power-controller@802d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp_sys: power-controller@802d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sys";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_msr: power-controller@802e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@802e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0_fe: power-controller@802b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_fe";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_disp0_be: power-controller@802b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_be";
+ power-domains = <&ps_disp0_fe>;
+ };
+
+ ps_pmp: power-controller@802f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pms_sram: power-controller@802f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_uart3: power-controller@80200 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@80208 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@80210 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@80218 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@80220 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@80228 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80228 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_hfd0: power-controller@80238 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hfd0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80240 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80248 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs1: power-controller@80250 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs2: power-controller@80258 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs3: power-controller@80260 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_usb2host0_ohci: power-controller@80280 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usbotg: power-controller@80290 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@80298 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_mipi_dsi: power-controller@802c0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_dp: power-controller@802c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_venc_sys: power-controller@80310 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_sys";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pcie: power-controller@80318 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@80320 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_vdec0: power-controller@80300 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec0";
+ power-domains = <&ps_media>;
+ };
+
+ ps_gfx: power-controller@80328 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_isp_rsts0: power-controller@84000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts0";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_rsts1: power-controller@84008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts1";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_vis: power-controller@84010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_vis";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_be: power-controller@84018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_be";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_pearl: power-controller@84020 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_pearl";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_dprx: power-controller@84028 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dprx";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_venc_pipe4: power-controller@88000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe4";
+ power-domains = <&ps_venc_sys>;
+ };
+
+ ps_venc_pipe5: power-controller@88008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe5";
+ power-domains = <&ps_venc_sys>;
+ };
+
+ ps_venc_me0: power-controller@88010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_aop: power-controller@80000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop";
+ power-domains = <&ps_aop_cpu &ps_aop_busif &ps_aop_filter>;
+ apple,always-on; /* Always on processor */
+ };
+
+ ps_debug: power-controller@80008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_aop_gpio: power-controller@80010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_gpio";
+ };
+
+ ps_aop_cpu: power-controller@80048 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80048 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_cpu";
+ };
+
+ ps_aop_filter: power-controller@80050 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_filter";
+ };
+
+ ps_aop_busif: power-controller@80058 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80058 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_busif";
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8010.dtsi b/arch/arm64/boot/dts/apple/t8010.dtsi
index e3d6a8354103..17e294bd7c44 100644
--- a/arch/arm64/boot/dts/apple/t8010.dtsi
+++ b/arch/arm64/boot/dts/apple/t8010.dtsi
@@ -32,6 +32,8 @@
compatible = "apple,hurricane-zephyr";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -40,11 +42,89 @@
compatible = "apple,hurricane-zephyr";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ fusion_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ /*
+ * Apple Fusion Architecture: Hardware big.LITTLE switcher
+ * that use p-state transitions to switch between cores.
+ * Only one type of core can be active at a given time.
+ *
+ * The E-core frequencies are adjusted so performance scales
+ * linearly with reported clock speed.
+ */
+
+ opp01 {
+ opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+ opp-level = <1>;
+ clock-latency-ns = <11000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+ opp-level = <2>;
+ clock-latency-ns = <49000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <425000000>; /* 732 MHz, E-core */
+ opp-level = <3>;
+ clock-latency-ns = <13000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <637000000>; /* 1092 MHz, E-core */
+ opp-level = <4>;
+ clock-latency-ns = <18000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <756000000>;
+ opp-level = <5>;
+ clock-latency-ns = <35000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1056000000>;
+ opp-level = <6>;
+ clock-latency-ns = <31000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1356000000>;
+ opp-level = <7>;
+ clock-latency-ns = <37000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <1644000000>;
+ opp-level = <8>;
+ clock-latency-ns = <39500>;
+ };
+ hurricane_opp09: opp09 {
+ opp-hz = /bits/ 64 <1944000000>;
+ opp-level = <9>;
+ clock-latency-ns = <46000>;
+ status = "disabled"; /* Not available on N112 */
+ };
+ hurricane_opp10: opp10 {
+ opp-hz = /bits/ 64 <2244000000>;
+ opp-level = <10>;
+ clock-latency-ns = <56000>;
+ status = "disabled"; /* Not available on N112 */
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ hurricane_opp11: opp11 {
+ opp-hz = /bits/ 64 <2340000000>;
+ opp-level = <11>;
+ clock-latency-ns = <56000>;
+ turbo-mode;
+ status = "disabled"; /* Not available on N112 */
+ };
+#endif
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -52,6 +132,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202f20000 {
+ compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x02f20000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0c0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -61,19 +147,37 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x8c000>;
+ };
+
aic: interrupt-controller@20e100000 {
compatible = "apple,t8010-aic", "apple,aic";
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ dwi_bl: backlight@20e200080 {
+ compatible = "apple,t8010-dwi-bl", "apple,dwi-bl";
+ reg = <0x2 0x0e200080 0x0 0x8>;
+ power-domains = <&ps_dwi>;
+ status = "disabled";
};
pinctrl_ap: pinctrl@20f100000 {
compatible = "apple,t8010-pinctrl", "apple,pinctrl";
reg = <0x2 0x0f100000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -95,6 +199,7 @@
pinctrl_aop: pinctrl@2100f0000 {
compatible = "apple,t8010-pinctrl", "apple,pinctrl";
reg = <0x2 0x100f0000 0x0 0x100000>;
+ power-domains = <&ps_aop_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -113,6 +218,14 @@
<AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>;
};
+ pmgr_mini: power-management@210200000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x10200000 0 0x84000>;
+ };
+
wdt: watchdog@2102b0000 {
compatible = "apple,t8010-wdt", "apple,wdt";
reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -131,3 +244,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "t8010-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8011-common.dtsi b/arch/arm64/boot/dts/apple/t8011-common.dtsi
index 44a0d0ea2ee3..2010b56246f1 100644
--- a/arch/arm64/boot/dts/apple/t8011-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011-common.dtsi
@@ -22,6 +22,7 @@
framebuffer0: framebuffer@0 {
compatible = "apple,simple-framebuffer", "simple-framebuffer";
reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0_fe &ps_disp0_be &ps_dp>;
/* Format properties will be added by loader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi
new file mode 100644
index 000000000000..c44e3f9d7087
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8011-pmgr.dtsi
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8011 "A10X" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu2: power-controller@80010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu2";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80158 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80160 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dwi: power-controller@80110 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@80118 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pms: power-controller@80120 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_pcie_ref: power-controller@80148 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_mca0: power-controller@80170 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80178 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80180 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80188 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80190 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@80198 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@801a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@801a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801c0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@801e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@801e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@801f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@801f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80168 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsic0_phy: power-controller@80128 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsic0_phy";
+ power-domains = <&ps_usb3host>;
+ };
+
+ ps_isp_sens0: power-controller@80130 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens0";
+ };
+
+ ps_isp_sens1: power-controller@80138 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens1";
+ };
+
+ ps_isp_sens2: power-controller@80140 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens2";
+ };
+
+ ps_usb: power-controller@80288 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@80290 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host: power-controller@80298 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2dev: power-controller@802a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2dev";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb3host: power-controller@802a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb3host";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb3dev: power-controller@802b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb3dev";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_media: power-controller@802e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp_sys: power-controller@802e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sys";
+ };
+
+ ps_msr: power-controller@802f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@802f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0_fe: power-controller@802c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_fe";
+ };
+
+ ps_disp0_be: power-controller@802d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_be";
+ power-domains = <&ps_disp0_fe>;
+ };
+
+ ps_dpa: power-controller@80230 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dpa";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@80200 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@80208 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@80210 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@80218 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@80220 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_hfd0: power-controller@80238 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hfd0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80240 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80248 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs1: power-controller@80250 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs2: power-controller@80258 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs3: power-controller@80260 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs4: power-controller@80268 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs4";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs5: power-controller@80270 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs5";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs6: power-controller@80278 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs6";
+ };
+
+ ps_dcs7: power-controller@80280 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs7";
+ };
+
+ ps_smx: power-controller@802b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802c0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_dp: power-controller@802d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_venc_sys: power-controller@80320 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_sys";
+ power-domains = <&ps_media>;
+ };
+
+ ps_srs: power-controller@80390 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80390 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "srs";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pms_sram: power-controller@80308 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_pmp: power-controller@80300 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pcie: power-controller@80328 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@80330 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80330 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_vdec0: power-controller@80310 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec0";
+ power-domains = <&ps_media>;
+ };
+
+ ps_gfx: power-controller@80338 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80338 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_isp_rsts0: power-controller@84000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts0";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_rsts1: power-controller@84008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts1";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_vis: power-controller@84010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_vis";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_be: power-controller@84018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_be";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_pearl: power-controller@84020 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_pearl";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_dprx: power-controller@84028 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dprx";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_venc_pipe4: power-controller@88000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe4";
+ power-domains = <&ps_venc_sys>;
+ };
+
+ ps_venc_pipe5: power-controller@88008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe5";
+ power-domains = <&ps_venc_sys>;
+ };
+
+ ps_venc_me0: power-controller@88010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_aop: power-controller@80000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop";
+ power-domains = <&ps_aop_cpu &ps_aop_filter &ps_aop_busif>;
+ apple,always-on; /* Always on processor */
+ };
+
+ ps_debug: power-controller@80008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_aop_gpio: power-controller@80010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_gpio";
+ };
+
+ ps_aop_cpu: power-controller@80048 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80048 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_cpu";
+ };
+
+ ps_aop_filter: power-controller@80050 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_filter";
+ };
+
+ ps_aop_busif: power-controller@80058 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80058 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_busif";
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8011-pro2.dtsi b/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
index f4e707415003..5eaa0a73350f 100644
--- a/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011-pro2.dtsi
@@ -40,3 +40,11 @@
};
};
};
+
+&ps_dcs6 {
+ apple,always-on; /* LPDDR4 interface */
+};
+
+&ps_dcs7 {
+ apple,always-on; /* LPDDR4 interface */
+};
diff --git a/arch/arm64/boot/dts/apple/t8011.dtsi b/arch/arm64/boot/dts/apple/t8011.dtsi
index 6c4ed9dc4a50..5b280c896b76 100644
--- a/arch/arm64/boot/dts/apple/t8011.dtsi
+++ b/arch/arm64/boot/dts/apple/t8011.dtsi
@@ -32,6 +32,8 @@
compatible = "apple,hurricane-zephyr";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -40,6 +42,8 @@
compatible = "apple,hurricane-zephyr";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -48,11 +52,80 @@
compatible = "apple,hurricane-zephyr";
reg = <0x0 0x2>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ fusion_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ /*
+ * Apple Fusion Architecture: Hardwired big.LITTLE switcher
+ * that use p-state transitions to switch between cores.
+ *
+ * The E-core frequencies are adjusted so performance scales
+ * linearly with reported clock speed.
+ */
+
+ opp01 {
+ opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+ opp-level = <1>;
+ clock-latency-ns = <12000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+ opp-level = <2>;
+ clock-latency-ns = <135000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <448000000>; /* 768 MHz, E-core */
+ opp-level = <3>;
+ clock-latency-ns = <105000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <662000000>; /* 1152 MHz, E-core */
+ opp-level = <4>;
+ clock-latency-ns = <115000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <804000000>;
+ opp-level = <5>;
+ clock-latency-ns = <122000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1140000000>;
+ opp-level = <6>;
+ clock-latency-ns = <120000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1548000000>;
+ opp-level = <7>;
+ clock-latency-ns = <125000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <1956000000>;
+ opp-level = <8>;
+ clock-latency-ns = <135000>;
+ };
+ opp09 {
+ opp-hz = /bits/ 64 <2316000000>;
+ opp-level = <9>;
+ clock-latency-ns = <140000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp10 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-level = <10>;
+ clock-latency-ns = <140000>;
+ turbo-mode;
+ };
+#endif
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -60,6 +133,12 @@
nonposted-mmio;
ranges;
+ cpufreq: performance-controller@202f20000 {
+ compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x02f20000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@20a0c0000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x0a0c0000 0x0 0x4000>;
@@ -69,19 +148,30 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
+ pmgr: power-management@20e000000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x8c000>;
+ };
+
aic: interrupt-controller@20e100000 {
compatible = "apple,t8010-aic", "apple,aic";
reg = <0x2 0x0e100000 0x0 0x100000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
};
pinctrl_ap: pinctrl@20f100000 {
compatible = "apple,t8010-pinctrl", "apple,pinctrl";
reg = <0x2 0x0f100000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -103,6 +193,7 @@
pinctrl_aop: pinctrl@2100f0000 {
compatible = "apple,t8010-pinctrl", "apple,pinctrl";
reg = <0x2 0x100f0000 0x0 0x100000>;
+ power-domains = <&ps_aop_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -121,6 +212,14 @@
<AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>;
};
+ pmgr_mini: power-management@210200000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x10200000 0 0x84000>;
+ };
+
wdt: watchdog@2102b0000 {
compatible = "apple,t8010-wdt", "apple,wdt";
reg = <0x2 0x102b0000 0x0 0x4000>;
@@ -139,3 +238,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "t8011-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8012-j132.dts b/arch/arm64/boot/dts/apple/t8012-j132.dts
new file mode 100644
index 000000000000..778a69be18dd
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j132.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,2 (j132), J132, iBridge2,4
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro15,2 (j132)";
+ compatible = "apple,j132", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j137.dts b/arch/arm64/boot/dts/apple/t8012-j137.dts
new file mode 100644
index 000000000000..dbde1ad7ce14
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j137.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMacPro1,1 (j137), J137, iBridge2,1
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 iMacPro1,1 (j137)";
+ compatible = "apple,j137", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j140a.dts b/arch/arm64/boot/dts/apple/t8012-j140a.dts
new file mode 100644
index 000000000000..5df1ff74d2df
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j140a.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir8,2 (j140a), J140a, iBridge2,12
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 MacBookAir8,2 (j140a)";
+ compatible = "apple,j140a", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j140k.dts b/arch/arm64/boot/dts/apple/t8012-j140k.dts
new file mode 100644
index 000000000000..a0ef1585e5c2
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j140k.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir8,1 (j140k), J140k, iBridge2,8
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 MacBookAir8,1 (j140k)";
+ compatible = "apple,j140k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j152f.dts b/arch/arm64/boot/dts/apple/t8012-j152f.dts
new file mode 100644
index 000000000000..261416eaf97e
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j152f.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,1 (j152f), J152f, iBridge2,14
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro16,1 (j152f)";
+ compatible = "apple,j152f", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j160.dts b/arch/arm64/boot/dts/apple/t8012-j160.dts
new file mode 100644
index 000000000000..fbcc0604f4a0
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j160.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacPro7,1 (j160), J160, iBridge2,6
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 MacPro7,1 (j160)";
+ compatible = "apple,j160", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j174.dts b/arch/arm64/boot/dts/apple/t8012-j174.dts
new file mode 100644
index 000000000000..d11c70f84a71
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j174.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 Macmini8,1 (j174), J174, iBridge2,5
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 Macmini8,1 (j174)";
+ compatible = "apple,j174", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j185.dts b/arch/arm64/boot/dts/apple/t8012-j185.dts
new file mode 100644
index 000000000000..33492f5db46d
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j185.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMac20,1 (j185), J185, iBridge2,19
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 iMac20,1 (j185)";
+ compatible = "apple,j185", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j185f.dts b/arch/arm64/boot/dts/apple/t8012-j185f.dts
new file mode 100644
index 000000000000..3a4abdd8f7d7
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j185f.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 iMac20,2 (j185f), J185f, iBridge2,20
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 iMac20,2 (j185f)";
+ compatible = "apple,j185f", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j213.dts b/arch/arm64/boot/dts/apple/t8012-j213.dts
new file mode 100644
index 000000000000..8270812b9a68
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j213.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,4 (j213), J213, iBridge2,10
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro15,4 (j213)";
+ compatible = "apple,j213", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j214k.dts b/arch/arm64/boot/dts/apple/t8012-j214k.dts
new file mode 100644
index 000000000000..5b8e42512060
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j214k.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,2 (j214k), J214k, iBridge2,16
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro16,2 (j214k)";
+ compatible = "apple,j214k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j215.dts b/arch/arm64/boot/dts/apple/t8012-j215.dts
new file mode 100644
index 000000000000..ad574fbf7f92
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j215.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,4 (j215), J215, iBridge2,22
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro16,4 (j215)";
+ compatible = "apple,j215", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j223.dts b/arch/arm64/boot/dts/apple/t8012-j223.dts
new file mode 100644
index 000000000000..de75d775aac5
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j223.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro16,3 (j223), J223, iBridge2,21
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro16,3 (j223)";
+ compatible = "apple,j223", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j230k.dts b/arch/arm64/boot/dts/apple/t8012-j230k.dts
new file mode 100644
index 000000000000..4b19bc70ab0f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j230k.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookAir9,1 (j230k), J230k, iBridge2,15
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+
+/ {
+ model = "Apple T2 MacBookAir9,1 (j230k)";
+ compatible = "apple,j230k", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j680.dts b/arch/arm64/boot/dts/apple/t8012-j680.dts
new file mode 100644
index 000000000000..aa5a72e07d3f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j680.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,1 (j680), J680, iBridge2,3
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro15,1 (j680)";
+ compatible = "apple,j680", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-j780.dts b/arch/arm64/boot/dts/apple/t8012-j780.dts
new file mode 100644
index 000000000000..9cee891cb16d
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-j780.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T2 MacBookPro15,3 (j780), J780, iBridge2,7
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "t8012-jxxx.dtsi"
+#include "t8012-touchbar.dtsi"
+
+/ {
+ model = "Apple T2 MacBookPro15,3 (j780)";
+ compatible = "apple,j780", "apple,t8012", "apple,arm-platform";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi
new file mode 100644
index 000000000000..36e82633bc52
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-jxxx.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Common Device Tree for all T2 devices
+ *
+ * target-type: J132, J137, J140a, J140k, J152f, J160, J174, J185, J185f
+ * J213, J214k, J215, J223, J230k, J680, J780
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+#include "t8012.dtsi"
+
+/ {
+ chassis-type = "embedded";
+
+ aliases {
+ serial0 = &serial0;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ stdout-path = "serial0";
+ };
+
+ memory@800000000 {
+ device_type = "memory";
+ reg = <0x8 0 0 0>; /* To be filled by loader */
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* To be filled by loader */
+ };
+};
+
+&serial0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi
new file mode 100644
index 000000000000..35a462edd4af
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-pmgr.dtsi
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8012 "T2" SoC
+ *
+ * Copyright (c) 2024 Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80158 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80160 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_iomux: power-controller@80150 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80150 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "iomux";
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_gpio: power-controller@80110 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pcie_down_ref: power-controller@80138 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_down_ref";
+ };
+
+ ps_pcie_stg0_ref: power-controller@80140 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg0_ref";
+ };
+
+ ps_pcie_stg1_ref: power-controller@80148 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg1_ref";
+ };
+
+ ps_mca0: power-controller@80170 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80178 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80180 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80188 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80190 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca5: power-controller@80198 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@801a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@801b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801c0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@801a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80168 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_isp_sens0: power-controller@80120 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens0";
+ };
+
+ ps_isp_sens1: power-controller@80128 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens1";
+ };
+
+ ps_isp_sens2: power-controller@80130 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sens2";
+ };
+
+ ps_pms: power-controller@80118 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_i2c4: power-controller@801c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c5: power-controller@801d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c6: power-controller@801d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_usb: power-controller@80268 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctrl: power-controller@80270 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctrl";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@80278 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_usb2host1: power-controller@80288 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_rtmux: power-controller@802a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "rtmux";
+ };
+
+ ps_media: power-controller@802d8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_isp_sys: power-controller@802d0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sys";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_msr: power-controller@802e8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_jpg: power-controller@802e0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0_fe: power-controller@802b0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_fe";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_disp0_be: power-controller@802b8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_be";
+ power-domains = <&ps_disp0_fe>;
+ };
+
+ ps_uart0: power-controller@80200 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@80208 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@80210 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart3: power-controller@80218 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@80220 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_dpa: power-controller@80228 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80228 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dpa";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_hfd0: power-controller@80230 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hfd0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80240 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80240 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80248 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs1: power-controller@80250 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs2: power-controller@80258 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ /* Not used on some devicecs, to be disabled by loader */
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_dcs3: power-controller@80260 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ /* Not used on some devicecs, to be disabled by loader */
+ apple,always-on; /* LPDDR4 interface */
+ };
+
+ ps_usb2host0_ohci: power-controller@80280 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usbotg: power-controller@80290 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbotg";
+ power-domains = <&ps_usbctrl>;
+ };
+
+ ps_smx: power-controller@80298 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_mipi_dsi: power-controller@802c8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_pmp: power-controller@802f0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pms_sram: power-controller@802f8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_pcie_up_af: power-controller@80320 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_up_af";
+ power-domains = <&ps_iomux>;
+ };
+
+ ps_pcie_up: power-controller@80328 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_up";
+ power-domains = <&ps_pcie_up_af>;
+ };
+
+ ps_venc_sys: power-controller@80300 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_sys";
+ power-domains = <&ps_media>;
+ };
+
+ ps_ans2: power-controller@80308 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans2";
+ power-domains = <&ps_iomux>;
+ };
+
+ ps_pcie_down: power-controller@80310 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_down";
+ power-domains = <&ps_iomux>;
+ };
+
+ ps_pcie_down_aux: power-controller@80318 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_down_aux";
+ };
+
+ ps_pcie_up_aux: power-controller@80330 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80330 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_up_aux";
+ power-domains = <&ps_pcie_up>;
+ };
+
+ ps_pcie_stg0: power-controller@80338 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80338 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg0";
+ power-domains = <&ps_ans2>;
+ };
+
+ ps_pcie_stg0_aux: power-controller@80340 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80340 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg0_aux";
+ };
+
+ ps_pcie_stg1: power-controller@80348 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80348 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg1";
+ power-domains = <&ps_ans2>;
+ };
+
+ ps_pcie_stg1_aux: power-controller@80350 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80350 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_stg1_aux";
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_isp_rsts0: power-controller@84000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts0";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_rsts1: power-controller@84008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts1";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_vis: power-controller@84010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_vis";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_be: power-controller@84018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_be";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_pearl: power-controller@84020 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_pearl";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_venc_pipe4: power-controller@88000 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe4";
+ };
+
+ ps_venc_pipe5: power-controller@88008 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe5";
+ };
+
+ ps_venc_me0: power-controller@88010 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88018 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_spmi: power-controller@80058 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80058 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spmi";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_nub_aon: power-controller@80060 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80060 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "nub_aon";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_smc_fabric: power-controller@80030 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80030 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_fabric";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_smc_aon: power-controller@80088 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80088 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_aon";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_debug: power-controller@80050 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_nub_sram: power-controller@801a0 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "nub_sram";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_nub_fabric: power-controller@80198 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "nub_fabric";
+ apple,always-on; /* Core AON device */
+ };
+
+ ps_smc_cpu: power-controller@801a8 {
+ compatible = "apple,t8010-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_cpu";
+ power-domains = <&ps_smc_fabric &ps_smc_aon>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi b/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi
new file mode 100644
index 000000000000..fc4a80d0c787
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012-touchbar.dtsi
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Common Device Tree for T2 devices with a Touch Bar
+ *
+ * target-type: J152f, J213, J214k, J215, J223, J680, J780
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+/ {
+ chosen {
+ framebuffer0: framebuffer@0 {
+ compatible = "apple,simple-framebuffer", "simple-framebuffer";
+ reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0_fe &ps_disp0_be &ps_mipi_dsi>;
+ /* Format properties will be added by loader */
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8012.dtsi b/arch/arm64/boot/dts/apple/t8012.dtsi
new file mode 100644
index 000000000000..42df2f51ad7b
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8012.dtsi
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T8012 "T2" SoC
+ *
+ * Other names: H9M, "Gibraltar"
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/apple.h>
+
+/ {
+ interrupt-parent = <&aic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ clkref: clock-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "clkref";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@10000 {
+ compatible = "apple,hurricane-zephyr";
+ reg = <0x0 0x10000>;
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
+ enable-method = "spin-table";
+ device_type = "cpu";
+ };
+
+ cpu1: cpu@10001 {
+ compatible = "apple,hurricane-zephyr";
+ reg = <0x0 0x10001>;
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ operating-points-v2 = <&fusion_opp>;
+ performance-domains = <&cpufreq>;
+ enable-method = "spin-table";
+ device_type = "cpu";
+ };
+ };
+
+ fusion_opp: opp-table {
+ compatible = "operating-points-v2";
+
+ /*
+ * Apple Fusion Architecture: Hardware big.LITTLE switcher
+ * that use p-state transitions to switch between cores.
+ * Only one type of core can be active at a given time.
+ *
+ * The E-core frequencies are adjusted so performance scales
+ * linearly with reported clock speed.
+ */
+
+ opp01 {
+ opp-hz = /bits/ 64 <172000000>; /* 300 MHz, E-core */
+ opp-level = <1>;
+ clock-latency-ns = <11000>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <230000000>; /* 396 MHz, E-core */
+ opp-level = <2>;
+ clock-latency-ns = <140000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <425000000>; /* 732 MHz, E-core */
+ opp-level = <3>;
+ clock-latency-ns = <110000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <637000000>; /* 1092 MHz, E-core */
+ opp-level = <4>;
+ clock-latency-ns = <130000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <756000000>;
+ opp-level = <5>;
+ clock-latency-ns = <130000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1056000000>;
+ opp-level = <6>;
+ clock-latency-ns = <130000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <1356000000>;
+ opp-level = <7>;
+ clock-latency-ns = <130000>;
+ };
+ opp08 {
+ opp-hz = /bits/ 64 <1644000000>;
+ opp-level = <8>;
+ clock-latency-ns = <135000>;
+ };
+ opp09 {
+ opp-hz = /bits/ 64 <1944000000>;
+ opp-level = <9>;
+ clock-latency-ns = <140000>;
+ };
+ opp10 {
+ opp-hz = /bits/ 64 <2244000000>;
+ opp-level = <10>;
+ clock-latency-ns = <150000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp11 {
+ opp-hz = /bits/ 64 <2340000000>;
+ opp-level = <11>;
+ clock-latency-ns = <150000>;
+ turbo-mode;
+ };
+#endif
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ nonposted-mmio;
+ ranges;
+
+ cpufreq: performance-controller@202f20000 {
+ compatible = "apple,t8010-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x02f20000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
+ serial0: serial@20a600000 {
+ compatible = "apple,s5l-uart";
+ reg = <0x2 0x0a600000 0x0 0x4000>;
+ reg-io-width = <4>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 271 IRQ_TYPE_LEVEL_HIGH>;
+ /* Use the bootloader-enabled clocks for now. */
+ clocks = <&clkref>, <&clkref>;
+ clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
+ status = "disabled";
+ };
+
+ pmgr: power-management@20e000000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0xe000000 0 0x8c000>;
+ };
+
+ aic: interrupt-controller@20e100000 {
+ compatible = "apple,t8010-aic", "apple,aic";
+ reg = <0x2 0x0e100000 0x0 0x100000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ pinctrl_ap: pinctrl@20f100000 {
+ compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x0f100000 0x0 0x100000>;
+ power-domains = <&ps_gpio>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_ap 0 0 221>;
+ apple,npins = <221>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 49 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 50 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 51 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_aop: pinctrl@2100f0000 {
+ compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x0100f0000 0x0 0x10000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_aop 0 0 41>;
+ apple,npins = <41>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 132 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 133 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 135 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 136 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 137 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_nub: pinctrl@2111f0000 {
+ compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x111f0000 0x0 0x1000>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_nub 0 0 19>;
+ apple,npins = <19>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 164 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 165 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 166 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pmgr_mini: power-management@211200000 {
+ compatible = "apple,t8010-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x11200000 0 0x84000>;
+ };
+
+ wdt: watchdog@2112b0000 {
+ compatible = "apple,t8010-wdt", "apple,wdt";
+ reg = <0x2 0x112b0000 0x0 0x4000>;
+ clocks = <&clkref>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 168 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pinctrl_smc: pinctrl@212024000 {
+ compatible = "apple,t8010-pinctrl", "apple,pinctrl";
+ reg = <0x2 0x12024000 0x0 0x1000>;
+ power-domains = <&ps_smc_cpu>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_smc 0 0 81>;
+ apple,npins = <81>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 195 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 196 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 197 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 198 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 199 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 200 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 201 IRQ_TYPE_LEVEL_HIGH>;
+ /*
+ * SMC is not yet supported and accessing this pinctrl while SMC is
+ * suspended results in a hang.
+ */
+ status = "disabled";
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&aic>;
+ interrupt-names = "phys", "virt";
+ /* Note that T2 doesn't actually have a hypervisor (EL2 is not implemented). */
+ interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+#include "t8012-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8015-8.dtsi b/arch/arm64/boot/dts/apple/t8015-8.dtsi
index b6505b5185bd..0300ee1a2ffb 100644
--- a/arch/arm64/boot/dts/apple/t8015-8.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015-8.dtsi
@@ -11,3 +11,7 @@
/ {
chassis-type = "handset";
};
+
+&dwi_bl {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8015-common.dtsi b/arch/arm64/boot/dts/apple/t8015-common.dtsi
index 69258a33ea50..498f58fb9715 100644
--- a/arch/arm64/boot/dts/apple/t8015-common.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015-common.dtsi
@@ -24,6 +24,7 @@
framebuffer0: framebuffer@0 {
compatible = "apple,simple-framebuffer", "simple-framebuffer";
reg = <0 0 0 0>; /* To be filled by loader */
+ power-domains = <&ps_disp0_be &ps_mipi_dsi &ps_disp0_hilo &ps_disp0_ppp>;
/* Format properties will be added by loader */
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi
new file mode 100644
index 000000000000..e238c2d2732f
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8015-pmgr.dtsi
@@ -0,0 +1,931 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * PMGR Power domains for the Apple T8015 "A11" SoC
+ *
+ * Copyright (c) 2024, Nick Chan <towinchenmi@gmail.com>
+ */
+
+&pmgr {
+ ps_cpu0: power-controller@80000 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu0";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu1: power-controller@80008 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu1";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu2: power-controller@80010 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu2";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu3: power-controller@80018 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu3";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu4: power-controller@80020 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu4";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpu5: power-controller@80028 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpu5";
+ apple,always-on; /* Core device */
+ };
+
+ ps_cpm: power-controller@80040 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80040 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "cpm";
+ apple,always-on; /* Core device */
+ };
+
+ ps_sio_busif: power-controller@80158 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80158 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_busif";
+ };
+
+ ps_sio_p: power-controller@80160 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80160 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio_p";
+ power-domains = <&ps_sio_busif>;
+ };
+
+ ps_sbr: power-controller@80100 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80100 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sbr";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_aic: power-controller@80108 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80108 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aic";
+ apple,always-on; /* Core device */
+ };
+
+ ps_dwi: power-controller@80110 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80110 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dwi";
+ };
+
+ ps_gpio: power-controller@80118 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80118 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gpio";
+ };
+
+ ps_pms: power-controller@80120 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80120 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms";
+ apple,always-on; /* Core device */
+ };
+
+ ps_pcie_ref: power-controller@80148 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80148 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_ref";
+ };
+
+ ps_mca0: power-controller@80170 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80170 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca1: power-controller@80178 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80178 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca2: power-controller@80180 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80180 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca3: power-controller@80188 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80188 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mca4: power-controller@80190 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80190 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_pwm0: power-controller@801a0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pwm0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c0: power-controller@801a8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c1: power-controller@801b0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c2: power-controller@801b8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_i2c3: power-controller@801c0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "i2c3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi0: power-controller@801c8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi1: power-controller@801d0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi2: power-controller@801d8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_spi3: power-controller@801e0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart0: power-controller@801e8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801e8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart1: power-controller@801f0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart1";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart2: power-controller@801f8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x801f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart2";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_sio: power-controller@80168 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80168 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sio";
+ power-domains = <&ps_sio_p>;
+ apple,always-on; /* Core device */
+ };
+
+ ps_hsicphy: power-controller@80128 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80128 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hsicphy";
+ power-domains = <&ps_usb2host1>;
+ };
+
+ ps_ispsens0: power-controller@80130 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80130 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens0";
+ };
+
+ ps_ispsens1: power-controller@80138 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80138 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens1";
+ };
+
+ ps_ispsens2: power-controller@80140 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ispsens2";
+ };
+
+ ps_mca5: power-controller@80198 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80198 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mca5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_usb: power-controller@80270 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80270 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb";
+ };
+
+ ps_usbctlreg: power-controller@80278 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80278 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usbctlreg";
+ power-domains = <&ps_usb>;
+ };
+
+ ps_usb2host0: power-controller@80280 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80280 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0";
+ power-domains = <&ps_usbctlreg>;
+ };
+
+ ps_usb2host1: power-controller@80290 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80290 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host1";
+ power-domains = <&ps_usbctlreg>;
+ };
+
+ ps_rtmux: power-controller@802b0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "rtmux";
+ };
+
+ ps_media: power-controller@802f0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "media";
+ };
+
+ ps_jpg: power-controller@802f8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802f8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "jpg";
+ power-domains = <&ps_media>;
+ };
+
+ ps_disp0_fe: power-controller@802b8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802b8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_fe";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_disp0_be: power-controller@802c0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_be";
+ power-domains = <&ps_disp0_fe>;
+ };
+
+ ps_disp0_gp: power-controller@802c8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802c8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_gp";
+ power-domains = <&ps_disp0_be>;
+ status = "disabled";
+ };
+
+ ps_uart3: power-controller@80200 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80200 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart3";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart4: power-controller@80208 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80208 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart4";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart5: power-controller@80210 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80210 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart5";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart6: power-controller@80218 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80218 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart6";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart7: power-controller@80220 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80220 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart7";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_uart8: power-controller@80228 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80228 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "uart8";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_hfd0: power-controller@80238 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80238 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "hfd0";
+ power-domains = <&ps_sio_p>;
+ };
+
+ ps_mcc: power-controller@80248 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80248 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mcc";
+ apple,always-on; /* Memory cache controller */
+ };
+
+ ps_dcs0: power-controller@80250 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80250 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs0";
+ apple,always-on; /* LPDDR4X interface */
+ };
+
+ ps_dcs1: power-controller@80258 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80258 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs1";
+ apple,always-on; /* LPDDR4X interface */
+ };
+
+ ps_dcs2: power-controller@80260 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs2";
+ apple,always-on; /* LPDDR4X interface */
+ };
+
+ ps_dcs3: power-controller@80268 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80268 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dcs3";
+ apple,always-on; /* LPDDR4X interface */
+ };
+
+ ps_usb2host0_ohci: power-controller@80288 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80288 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2host0_ohci";
+ power-domains = <&ps_usb2host0>;
+ };
+
+ ps_usb2dev: power-controller@80298 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80298 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "usb2dev";
+ power-domains = <&ps_usbctlreg>;
+ };
+
+ ps_smx: power-controller@802a0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smx";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_sf: power-controller@802a8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sf";
+ apple,always-on; /* Apple fabric, critical block */
+ };
+
+ ps_mipi_dsi: power-controller@802d8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "mipi_dsi";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_dp: power-controller@802e0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802e0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dp";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_dpa: power-controller@80230 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80230 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dpa";
+ };
+
+ ps_disp0_be_2x: power-controller@802d0 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x802d0 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_be_2x";
+ power-domains = <&ps_disp0_be>;
+ };
+
+ ps_isp_sys: power-controller@80350 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80350 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_sys";
+ power-domains = <&ps_rtmux>;
+ };
+
+ ps_msr: power-controller@80300 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80300 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "msr";
+ power-domains = <&ps_media>;
+ };
+
+ ps_venc_sys: power-controller@80398 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80398 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_sys";
+ power-domains = <&ps_media>;
+ };
+
+ ps_pmp: power-controller@80308 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80308 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pmp";
+ };
+
+ ps_pms_sram: power-controller@80310 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80310 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pms_sram";
+ };
+
+ ps_pcie: power-controller@80318 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80318 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie";
+ };
+
+ ps_pcie_aux: power-controller@80320 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80320 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_aux";
+ };
+
+ ps_vdec0: power-controller@80388 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80388 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "vdec0";
+ power-domains = <&ps_media>;
+ };
+
+ ps_gfx: power-controller@80338 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80338 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "gfx";
+ };
+
+ ps_ans2: power-controller@80328 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80328 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "ans2";
+ apple,always-on;
+ };
+
+ ps_pcie_direct: power-controller@80330 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80330 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "pcie_direct";
+ apple,always-on;
+ };
+
+ ps_avd_sys: power-controller@803a8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x803a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "avd_sys";
+ power-domains = <&ps_media>;
+ };
+
+ ps_sep: power-controller@80400 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80400 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "sep";
+ apple,always-on; /* Locked on */
+ };
+
+ ps_disp0_gp0: power-controller@80830 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80830 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_gp0";
+ power-domains = <&ps_disp0_gp>;
+ status = "disabled";
+ };
+
+ ps_disp0_gp1: power-controller@80838 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80838 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_gp1";
+ status = "disabled";
+ };
+
+ ps_disp0_ppp: power-controller@80840 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80840 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_ppp";
+ };
+
+ ps_disp0_hilo: power-controller@80848 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80848 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "disp0_hilo";
+ };
+
+ ps_isp_rsts0: power-controller@84000 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts0";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_rsts1: power-controller@84008 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_rsts1";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_vis: power-controller@84010 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_vis";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_be: power-controller@84018 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_be";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_pearl: power-controller@84020 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_pearl";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_dprx: power-controller@84028 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "dprx";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_isp_cnv: power-controller@84030 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x84030 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "isp_cnv";
+ power-domains = <&ps_isp_sys>;
+ };
+
+ ps_venc_dma: power-controller@88000 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_dma";
+ };
+
+ ps_venc_pipe4: power-controller@88010 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88010 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe4";
+ };
+
+ ps_venc_pipe5: power-controller@88018 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88018 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_pipe5";
+ };
+
+ ps_venc_me0: power-controller@88020 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me0";
+ };
+
+ ps_venc_me1: power-controller@88028 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x88028 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "venc_me1";
+ };
+};
+
+&pmgr_mini {
+ ps_aop_base: power-controller@80008 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80008 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_base";
+ power-domains = <&ps_aop_cpu &ps_aop_filter>;
+ apple,always-on; /* Always on processor */
+ };
+
+ ps_debug: power-controller@80050 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80050 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "debug";
+ };
+
+ ps_aop_cpu: power-controller@80020 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80020 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_cpu";
+ };
+
+ ps_aop_filter: power-controller@80000 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80000 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "aop_filter";
+ };
+
+ ps_spmi: power-controller@80058 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80058 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spmi";
+ apple,always-on; /* System Power Management Interface */
+ };
+
+ ps_smc_i2cm1: power-controller@800a8 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x800a8 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_i2cm1";
+ };
+
+ ps_smc_fabric: power-controller@80030 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80030 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_fabric";
+ };
+
+ ps_smc_cpu: power-controller@80140 {
+ compatible = "apple,t8015-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x80140 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "smc_cpu";
+ power-domains = <&ps_smc_fabric &ps_smc_i2cm1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8015.dtsi b/arch/arm64/boot/dts/apple/t8015.dtsi
index 8828d830e5be..4d54afcecd50 100644
--- a/arch/arm64/boot/dts/apple/t8015.dtsi
+++ b/arch/arm64/boot/dts/apple/t8015.dtsi
@@ -58,6 +58,9 @@
compatible = "apple,mistral";
reg = <0x0 0x0>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_e>;
+ operating-points-v2 = <&mistral_opp>;
+ capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -66,6 +69,9 @@
compatible = "apple,mistral";
reg = <0x0 0x1>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_e>;
+ operating-points-v2 = <&mistral_opp>;
+ capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -74,6 +80,9 @@
compatible = "apple,mistral";
reg = <0x0 0x2>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_e>;
+ operating-points-v2 = <&mistral_opp>;
+ capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -82,6 +91,9 @@
compatible = "apple,mistral";
reg = <0x0 0x3>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_e>;
+ operating-points-v2 = <&mistral_opp>;
+ capacity-dmips-mhz = <633>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -90,6 +102,9 @@
compatible = "apple,monsoon";
reg = <0x0 0x10004>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_p>;
+ operating-points-v2 = <&monsoon_opp>;
+ capacity-dmips-mhz = <1024>;
enable-method = "spin-table";
device_type = "cpu";
};
@@ -98,11 +113,107 @@
compatible = "apple,monsoon";
reg = <0x0 0x10005>;
cpu-release-addr = <0 0>; /* To be filled by loader */
+ performance-domains = <&cpufreq_p>;
+ operating-points-v2 = <&monsoon_opp>;
+ capacity-dmips-mhz = <1024>;
enable-method = "spin-table";
device_type = "cpu";
};
};
+ mistral_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <1800>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <453000000>;
+ opp-level = <2>;
+ clock-latency-ns = <140000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <672000000>;
+ opp-level = <3>;
+ clock-latency-ns = <105000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <972000000>;
+ opp-level = <4>;
+ clock-latency-ns = <115000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1272000000>;
+ opp-level = <5>;
+ clock-latency-ns = <125000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <1572000000>;
+ opp-level = <6>;
+ clock-latency-ns = <135000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp07 {
+ opp-hz = /bits/ 64 <1680000000>;
+ opp-level = <7>;
+ clock-latency-ns = <135000>;
+ turbo-mode;
+ };
+#endif
+ };
+
+ monsoon_opp: opp-table-1 {
+ compatible = "operating-points-v2";
+
+ opp01 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-level = <1>;
+ clock-latency-ns = <1400>;
+ };
+ opp02 {
+ opp-hz = /bits/ 64 <453000000>;
+ opp-level = <2>;
+ clock-latency-ns = <140000>;
+ };
+ opp03 {
+ opp-hz = /bits/ 64 <853000000>;
+ opp-level = <3>;
+ clock-latency-ns = <110000>;
+ };
+ opp04 {
+ opp-hz = /bits/ 64 <1332000000>;
+ opp-level = <4>;
+ clock-latency-ns = <110000>;
+ };
+ opp05 {
+ opp-hz = /bits/ 64 <1812000000>;
+ opp-level = <5>;
+ clock-latency-ns = <125000>;
+ };
+ opp06 {
+ opp-hz = /bits/ 64 <2064000000>;
+ opp-level = <6>;
+ clock-latency-ns = <130000>;
+ };
+ opp07 {
+ opp-hz = /bits/ 64 <2304000000>;
+ opp-level = <7>;
+ clock-latency-ns = <140000>;
+ };
+#if 0
+ /* Not available until CPU deep sleep is implemented */
+ opp08 {
+ opp-hz = /bits/ 64 <2376000000>;
+ opp-level = <8>;
+ clock-latency-ns = <140000>;
+ turbo-mode;
+ };
+#endif
+ };
+
soc {
compatible = "simple-bus";
#address-cells = <2>;
@@ -110,6 +221,18 @@
nonposted-mmio;
ranges;
+ cpufreq_e: performance-controller@208e20000 {
+ compatible = "apple,t8015-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x08e20000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
+ cpufreq_p: performance-controller@208ea0000 {
+ compatible = "apple,t8015-cluster-cpufreq", "apple,t8103-cluster-cpufreq", "apple,cluster-cpufreq";
+ reg = <0x2 0x08ea0000 0 0x1000>;
+ #performance-domain-cells = <0>;
+ };
+
serial0: serial@22e600000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x2e600000 0x0 0x4000>;
@@ -119,6 +242,7 @@
/* Use the bootloader-enabled clocks for now. */
clocks = <&clkref>, <&clkref>;
clock-names = "uart", "clk_uart_baud0";
+ power-domains = <&ps_uart0>;
status = "disabled";
};
@@ -127,11 +251,28 @@
reg = <0x2 0x32100000 0x0 0x8000>;
#interrupt-cells = <3>;
interrupt-controller;
+ power-domains = <&ps_aic>;
+ };
+
+ pmgr: power-management@232000000 {
+ compatible = "apple,t8015-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x32000000 0 0x8c000>;
+ };
+
+ dwi_bl: backlight@232200080 {
+ compatible = "apple,t8015-dwi-bl", "apple,dwi-bl";
+ reg = <0x2 0x32200080 0x0 0x8>;
+ power-domains = <&ps_dwi>;
+ status = "disabled";
};
pinctrl_ap: pinctrl@233100000 {
compatible = "apple,t8015-pinctrl", "apple,pinctrl";
reg = <0x2 0x33100000 0x0 0x1000>;
+ power-domains = <&ps_gpio>;
gpio-controller;
#gpio-cells = <2>;
@@ -188,6 +329,14 @@
<AIC_IRQ 170 IRQ_TYPE_LEVEL_HIGH>;
};
+ pmgr_mini: power-management@235200000 {
+ compatible = "apple,t8015-pmgr", "apple,pmgr", "syscon", "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ reg = <0x2 0x35200000 0 0x84000>;
+ };
+
wdt: watchdog@2352b0000 {
compatible = "apple,t8015-wdt", "apple,wdt";
reg = <0x2 0x352b0000 0x0 0x4000>;
@@ -232,3 +381,5 @@
<AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>;
};
};
+
+#include "t8015-pmgr.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index 56b0c67bfcda..2dfe7b895b2b 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -17,6 +17,14 @@
compatible = "apple,j293", "apple,t8103", "apple,arm-platform";
model = "Apple MacBook Pro (13-inch, M1, 2020)";
+ /*
+ * All of those are used by the bootloader to pass calibration
+ * blobs and other device-specific properties
+ */
+ aliases {
+ touchbar0 = &touchbar0;
+ };
+
led-controller {
compatible = "pwm-leds";
led-0 {
@@ -49,3 +57,53 @@
&fpwm1 {
status = "okay";
};
+
+&spi0 {
+ cs-gpios = <&pinctrl_ap 109 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ touchbar0: touchbar@0 {
+ compatible = "apple,j293-touchbar";
+ reg = <0>;
+ spi-max-frequency = <11500000>;
+ spi-cs-setup-delay-ns = <2000>;
+ spi-cs-hold-delay-ns = <2000>;
+ reset-gpios = <&pinctrl_ap 139 GPIO_ACTIVE_LOW>;
+ interrupts-extended = <&pinctrl_ap 194 IRQ_TYPE_EDGE_FALLING>;
+ firmware-name = "apple/dfrmtfw-j293.bin";
+ touchscreen-size-x = <23045>;
+ touchscreen-size-y = <640>;
+ touchscreen-inverted-y;
+ };
+};
+
+&display_dfr {
+ status = "okay";
+};
+
+&dfr_mipi_out {
+ dfr_mipi_out_panel: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dfr_panel_in>;
+ };
+};
+
+&displaydfr_mipi {
+ status = "okay";
+
+ dfr_panel: panel@0 {
+ compatible = "apple,j293-summit", "apple,summit";
+ reg = <0>;
+ max-brightness = <255>;
+
+ port {
+ dfr_panel_in: endpoint {
+ remote-endpoint = <&dfr_mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&displaydfr_dart {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
index 5988a4eb6efa..8e82231acab5 100644
--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
@@ -90,3 +90,5 @@
&nco_clkref {
clock-frequency = <900000000>;
};
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
index 9645861a858c..c41c57d63997 100644
--- a/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-pmgr.dtsi
@@ -387,6 +387,15 @@
power-domains = <&ps_sio>, <&ps_spi_p>;
};
+ ps_spi4: power-controller@260 {
+ compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
+ reg = <0x260 4>;
+ #power-domain-cells = <0>;
+ #reset-cells = <0>;
+ label = "spi4";
+ power-domains = <&ps_sio>, <&ps_spi_p>;
+ };
+
ps_uart_n: power-controller@268 {
compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
reg = <0x268 4>;
@@ -558,15 +567,6 @@
apple,always-on; /* Memory controller */
};
- ps_spi4: power-controller@260 {
- compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
- reg = <0x260 4>;
- #power-domain-cells = <0>;
- #reset-cells = <0>;
- label = "spi4";
- power-domains = <&ps_sio>, <&ps_spi_p>;
- };
-
ps_dcs0: power-controller@300 {
compatible = "apple,t8103-pmgr-pwrstate", "apple,pmgr-pwrstate";
reg = <0x300 4>;
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index 9b0dad6b6184..97b6a067394e 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -326,6 +326,20 @@
clock-output-names = "clkref";
};
+ clk_120m: clock-120m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <120000000>;
+ clock-output-names = "clk_120m";
+ };
+
+ clk_200m: clock-200m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ clock-output-names = "clk_200m";
+ };
+
/*
* This is a fabulated representation of the input clock
* to NCO since we don't know the true clock tree.
@@ -356,6 +370,67 @@
#performance-domain-cells = <0>;
};
+ display_dfr: display-pipe@228200000 {
+ compatible = "apple,t8103-display-pipe", "apple,h7-display-pipe";
+ reg = <0x2 0x28200000 0x0 0xc000>,
+ <0x2 0x28400000 0x0 0x4000>;
+ reg-names = "be", "fe";
+ power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 502 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 506 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "be", "fe";
+ iommus = <&displaydfr_dart 0>;
+ status = "disabled";
+
+ port {
+ dfr_adp_out_mipi: endpoint {
+ remote-endpoint = <&dfr_mipi_in_adp>;
+ };
+ };
+ };
+
+ displaydfr_dart: iommu@228304000 {
+ compatible = "apple,t8103-dart";
+ reg = <0x2 0x28304000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 504 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ power-domains = <&ps_dispdfr_fe>;
+ status = "disabled";
+ };
+
+ displaydfr_mipi: dsi@228600000 {
+ compatible = "apple,t8103-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+ reg = <0x2 0x28600000 0x0 0x100000>;
+ power-domains = <&ps_mipi_dsi>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfr_mipi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfr_mipi_in_adp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dfr_adp_out_mipi>;
+ };
+ };
+
+ dfr_mipi_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
sio_dart: iommu@235004000 {
compatible = "apple,t8103-dart";
reg = <0x2 0x35004000 0x0 0x4000>;
@@ -441,6 +516,48 @@
status = "disabled";
};
+ spi0: spi@235100000 {
+ compatible = "apple,t8103-spi", "apple,spi";
+ reg = <0x2 0x35100000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 614 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_200m>;
+ pinctrl-0 = <&spi0_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@235104000 {
+ compatible = "apple,t8103-spi", "apple,spi";
+ reg = <0x2 0x35104000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 615 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_200m>;
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@23510c000 {
+ compatible = "apple,t8103-spi", "apple,spi";
+ reg = <0x2 0x3510c000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 617 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_120m>;
+ pinctrl-0 = <&spi3_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
serial0: serial@235200000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x35200000 0x0 0x1000>;
@@ -597,6 +714,26 @@
<APPLE_PINMUX(134, 1)>;
};
+ spi0_pins: spi0-pins {
+ pinmux = <APPLE_PINMUX(67, 1)>, /* CLK */
+ <APPLE_PINMUX(68, 1)>, /* MOSI */
+ <APPLE_PINMUX(69, 1)>; /* MISO */
+ };
+
+ spi1_pins: spi1-pins {
+ pinmux = <APPLE_PINMUX(42, 1)>,
+ <APPLE_PINMUX(43, 1)>,
+ <APPLE_PINMUX(44, 1)>,
+ <APPLE_PINMUX(45, 1)>;
+ };
+
+ spi3_pins: spi3-pins {
+ pinmux = <APPLE_PINMUX(46, 1)>,
+ <APPLE_PINMUX(47, 1)>,
+ <APPLE_PINMUX(48, 1)>,
+ <APPLE_PINMUX(49, 1)>;
+ };
+
pcie_pins: pcie-pins {
pinmux = <APPLE_PINMUX(150, 1)>,
<APPLE_PINMUX(151, 1)>,
diff --git a/arch/arm64/boot/dts/apple/t8112-j493.dts b/arch/arm64/boot/dts/apple/t8112-j493.dts
index 0ad908349f55..3d73f9ee2f46 100644
--- a/arch/arm64/boot/dts/apple/t8112-j493.dts
+++ b/arch/arm64/boot/dts/apple/t8112-j493.dts
@@ -17,8 +17,13 @@
compatible = "apple,j493", "apple,t8112", "apple,arm-platform";
model = "Apple MacBook Pro (13-inch, M2, 2022)";
+ /*
+ * All of those are used by the bootloader to pass calibration
+ * blobs and other device-specific properties
+ */
aliases {
bluetooth0 = &bluetooth0;
+ touchbar0 = &touchbar0;
wifi0 = &wifi0;
};
@@ -35,6 +40,37 @@
};
};
+&display_dfr {
+ status = "okay";
+};
+
+&dfr_mipi_out {
+ dfr_mipi_out_panel: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dfr_panel_in>;
+ };
+};
+
+&displaydfr_mipi {
+ status = "okay";
+
+ dfr_panel: panel@0 {
+ compatible = "apple,j493-summit", "apple,summit";
+ reg = <0>;
+ max-brightness = <255>;
+
+ port {
+ dfr_panel_in: endpoint {
+ remote-endpoint = <&dfr_mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&displaydfr_dart {
+ status = "okay";
+};
+
/*
* Force the bus number assignments so that we can declare some of the
* on-board devices and properties that are populated by the bootloader
@@ -67,3 +103,21 @@
&fpwm1 {
status = "okay";
};
+
+&spi3 {
+ status = "okay";
+
+ touchbar0: touchbar@0 {
+ compatible = "apple,j493-touchbar";
+ reg = <0>;
+ spi-max-frequency = <8000000>;
+ spi-cs-setup-delay-ns = <2000>;
+ spi-cs-hold-delay-ns = <2000>;
+ reset-gpios = <&pinctrl_ap 170 GPIO_ACTIVE_LOW>;
+ interrupts-extended = <&pinctrl_ap 174 IRQ_TYPE_EDGE_FALLING>;
+ firmware-name = "apple/dfrmtfw-j493.bin";
+ touchscreen-size-x = <23045>;
+ touchscreen-size-y = <640>;
+ touchscreen-inverted-y;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
index f5edf61113e7..6da35496a4c8 100644
--- a/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112-jxxx.dtsi
@@ -79,3 +79,5 @@
&nco_clkref {
clock-frequency = <900000000>;
};
+
+#include "spi1-nvram.dtsi"
diff --git a/arch/arm64/boot/dts/apple/t8112.dtsi b/arch/arm64/boot/dts/apple/t8112.dtsi
index 1666e6ab250b..d9b966d68e4f 100644
--- a/arch/arm64/boot/dts/apple/t8112.dtsi
+++ b/arch/arm64/boot/dts/apple/t8112.dtsi
@@ -349,6 +349,13 @@
clock-output-names = "clkref";
};
+ clk_200m: clock-200m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ clock-output-names = "clk_200m";
+ };
+
/*
* This is a fabulated representation of the input clock
* to NCO since we don't know the true clock tree.
@@ -379,6 +386,67 @@
#performance-domain-cells = <0>;
};
+ display_dfr: display-pipe@228200000 {
+ compatible = "apple,t8112-display-pipe", "apple,h7-display-pipe";
+ reg = <0x2 0x28200000 0x0 0xc000>,
+ <0x2 0x28400000 0x0 0x4000>;
+ reg-names = "be", "fe";
+ power-domains = <&ps_dispdfr_fe>, <&ps_dispdfr_be>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 614 IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_IRQ 618 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "be", "fe";
+ iommus = <&displaydfr_dart 0>;
+ status = "disabled";
+
+ port {
+ dfr_adp_out_mipi: endpoint {
+ remote-endpoint = <&dfr_mipi_in_adp>;
+ };
+ };
+ };
+
+ displaydfr_dart: iommu@228304000 {
+ compatible = "apple,t8110-dart";
+ reg = <0x2 0x28304000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 616 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ power-domains = <&ps_dispdfr_fe>;
+ status = "disabled";
+ };
+
+ displaydfr_mipi: dsi@228600000 {
+ compatible = "apple,t8112-display-pipe-mipi", "apple,h7-display-pipe-mipi";
+ reg = <0x2 0x28600000 0x0 0x100000>;
+ power-domains = <&ps_mipi_dsi>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfr_mipi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfr_mipi_in_adp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dfr_adp_out_mipi>;
+ };
+ };
+
+ dfr_mipi_out: port@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+ };
+
sio_dart: iommu@235004000 {
compatible = "apple,t8110-dart";
reg = <0x2 0x35004000 0x0 0x4000>;
@@ -467,6 +535,34 @@
status = "disabled";
};
+ spi1: spi@235104000 {
+ compatible = "apple,t8112-spi", "apple,spi";
+ reg = <0x2 0x35104000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 749 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk_200m>;
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi3: spi@23510c000 {
+ compatible = "apple,t8112-spi", "apple,spi";
+ reg = <0x2 0x3510c000 0x0 0x4000>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 751 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkref>;
+ pinctrl-0 = <&spi3_pins>;
+ pinctrl-names = "default";
+ power-domains = <&ps_spi3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled"; /* only used in J493 */
+ };
+
serial0: serial@235200000 {
compatible = "apple,s5l-uart";
reg = <0x2 0x35200000 0x0 0x1000>;
@@ -626,13 +722,20 @@
<APPLE_PINMUX(130, 1)>;
};
- spi3_pins: spi3-pins {
+ spi1_pins: spi1-pins {
pinmux = <APPLE_PINMUX(46, 1)>,
<APPLE_PINMUX(47, 1)>,
<APPLE_PINMUX(48, 1)>,
<APPLE_PINMUX(49, 1)>;
};
+ spi3_pins: spi3-pins {
+ pinmux = <APPLE_PINMUX(93, 1)>,
+ <APPLE_PINMUX(94, 1)>,
+ <APPLE_PINMUX(95, 1)>,
+ <APPLE_PINMUX(96, 1)>;
+ };
+
pcie_pins: pcie-pins {
pinmux = <APPLE_PINMUX(162, 1)>,
<APPLE_PINMUX(163, 1)>,
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index d908e96d7ddc..f30ee045dc95 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -7,3 +7,4 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += fvp-base-revc.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += corstone1000-fvp.dtb corstone1000-mps3.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += morello-sdp.dtb morello-fvp.dtb
diff --git a/arch/arm64/boot/dts/arm/corstone1000-fvp.dts b/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
index abd013562995..66ba6b027193 100644
--- a/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
+++ b/arch/arm64/boot/dts/arm/corstone1000-fvp.dts
@@ -49,3 +49,29 @@
clock-names = "smclk", "apb_pclk";
};
};
+
+&cpus {
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x1>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x2>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a35";
+ reg = <0x3>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/arm/corstone1000.dtsi b/arch/arm64/boot/dts/arm/corstone1000.dtsi
index bb9b96fb5314..56ada8728b60 100644
--- a/arch/arm64/boot/dts/arm/corstone1000.dtsi
+++ b/arch/arm64/boot/dts/arm/corstone1000.dtsi
@@ -21,7 +21,7 @@
stdout-path = "serial0:115200n8";
};
- cpus {
+ cpus: cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -29,6 +29,7 @@
device_type = "cpu";
compatible = "arm,cortex-a35";
reg = <0>;
+ enable-method = "psci";
next-level-cache = <&L2_0>;
};
};
diff --git a/arch/arm64/boot/dts/arm/morello-fvp.dts b/arch/arm64/boot/dts/arm/morello-fvp.dts
new file mode 100644
index 000000000000..2072c0b72325
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/morello-fvp.dts
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ */
+
+/dts-v1/;
+#include "morello.dtsi"
+
+/ {
+ model = "Arm Morello Fixed Virtual Platform";
+ compatible = "arm,morello-fvp", "arm,morello";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ bp_refclock24mhz: clock-24000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "bp:clock24mhz";
+ };
+
+ block_0: virtio_block@1c170000 {
+ compatible = "virtio,mmio";
+ reg = <0x0 0x1c170000 0x0 0x200>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ net_0: virtio_net@1c180000 {
+ compatible = "virtio,mmio";
+ reg = <0x0 0x1c180000 0x0 0x200>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ rng_0: virtio_rng@1c190000 {
+ compatible = "virtio,mmio";
+ reg = <0x0 0x1c190000 0x0 0x200>;
+ interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ p9_0: virtio_p9@1c1a0000 {
+ compatible = "virtio,mmio";
+ reg = <0x0 0x1c1a0000 0x0 0x200>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ kmi_0: kmi@1c150000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x0 0x1c150000 0x0 0x1000>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&bp_refclock24mhz>, <&bp_refclock24mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi_1: kmi@1c160000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x0 0x1c160000 0x0 0x1000>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&bp_refclock24mhz>, <&bp_refclock24mhz>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ eth_0: ethernet@1d100000 {
+ compatible = "smsc,lan91c111";
+ reg = <0x0 0x1d100000 0x0 0x10000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/arm/morello-sdp.dts b/arch/arm64/boot/dts/arm/morello-sdp.dts
new file mode 100644
index 000000000000..cee49dee7571
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/morello-sdp.dts
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2021-2024, Arm Limited. All rights reserved.
+ */
+
+/dts-v1/;
+#include "morello.dtsi"
+
+/ {
+ model = "Arm Morello System Development Platform";
+ compatible = "arm,morello-sdp", "arm,morello";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ dpu_aclk: clock-350000000 {
+ /* 77.1 MHz derived from 24 MHz reference clock */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <350000000>;
+ clock-output-names = "aclk";
+ };
+
+ dpu_pixel_clk: clock-148500000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <148500000>;
+ clock-output-names = "pxclk";
+ };
+
+ i2c0: i2c@1c0f0000 {
+ compatible = "cdns,i2c-r1p14";
+ reg = <0x0 0x1c0f0000 0x0 0x1000>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&dpu_aclk>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clock-frequency = <100000>;
+
+ hdmi_tx: hdmi-transmitter@70 {
+ compatible = "nxp,tda998x";
+ reg = <0x70>;
+ video-ports = <0x234501>;
+ port {
+ tda998x_0_input: endpoint {
+ remote-endpoint = <&dp_pl0_out0>;
+ };
+ };
+ };
+ };
+
+ dp0: display@2cc00000 {
+ compatible = "arm,mali-d32", "arm,mali-d71";
+ reg = <0x0 0x2cc00000 0x0 0x20000>;
+ interrupts = <0 69 4>;
+ clocks = <&dpu_aclk>;
+ clock-names = "aclk";
+ iommus = <&smmu_dp 0>, <&smmu_dp 1>, <&smmu_dp 2>, <&smmu_dp 3>,
+ <&smmu_dp 8>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pl0: pipeline@0 {
+ reg = <0>;
+ clocks = <&dpu_pixel_clk>;
+ clock-names = "pxclk";
+ port {
+ dp_pl0_out0: endpoint {
+ remote-endpoint = <&tda998x_0_input>;
+ };
+ };
+ };
+ };
+
+ smmu_ccix: iommu@4f000000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0x4f000000 0x0 0x40000>;
+
+ interrupts = <GIC_SPI 228 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 230 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 41 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ msi-parent = <&its1 0>;
+ #iommu-cells = <1>;
+ dma-coherent;
+ };
+
+ smmu_pcie: iommu@4f400000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0x4f400000 0x0 0x40000>;
+
+ interrupts = <GIC_SPI 235 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 237 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 40 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 236 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
+ msi-parent = <&its2 0>;
+ #iommu-cells = <1>;
+ dma-coherent;
+ };
+
+ pcie_ctlr: pcie@28c0000000 {
+ device_type = "pci";
+ compatible = "pci-host-ecam-generic";
+ reg = <0x28 0xC0000000 0 0x10000000>;
+ ranges = <0x01000000 0x00 0x00000000 0x00 0x6f000000 0x00 0x00800000>,
+ <0x02000000 0x00 0x60000000 0x00 0x60000000 0x00 0x0f000000>,
+ <0x42000000 0x09 0x00000000 0x09 0x00000000 0x1f 0xc0000000>;
+ bus-range = <0 255>;
+ linux,pci-domain = <0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ dma-coherent;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic 0 0 0 169 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 0 170 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 0 171 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 0 172 IRQ_TYPE_LEVEL_HIGH>;
+ msi-map = <0 &its_pcie 0 0x10000>;
+ iommu-map = <0 &smmu_pcie 0 0x10000>;
+ };
+
+ ccix_pcie_ctlr: pcie@4fc0000000 {
+ device_type = "pci";
+ compatible = "pci-host-ecam-generic";
+ reg = <0x4f 0xC0000000 0 0x10000000>;
+ ranges = <0x01000000 0x00 0x00000000 0x00 0x7f000000 0x00 0x00800000>,
+ <0x02000000 0x00 0x70000000 0x00 0x70000000 0x00 0x0f000000>,
+ <0x42000000 0x30 0x00000000 0x30 0x00000000 0x1f 0xc0000000>;
+ linux,pci-domain = <1>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ dma-coherent;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &gic 0 0 0 201 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 0 202 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 0 203 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 0 204 IRQ_TYPE_LEVEL_HIGH>;
+ msi-map = <0 &its_ccix 0 0x10000>;
+ iommu-map = <0 &smmu_ccix 0 0x10000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/arm/morello.dtsi b/arch/arm64/boot/dts/arm/morello.dtsi
new file mode 100644
index 000000000000..0bab0b3ea969
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/morello.dtsi
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright (c) 2020-2024, Arm Limited. All rights reserved.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ interrupt-parent = <&gic>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ soc_refclk50mhz: clock-50000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ clock-output-names = "apb_pclk";
+ };
+
+ soc_refclk85mhz: clock-85000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <85000000>;
+ clock-output-names = "iofpga:aclk";
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,rainier";
+ reg = <0x0 0x0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ /* 4 ways set associative */
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <512>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <512>;
+ next-level-cache = <&l2_0>;
+ clocks = <&scmi_dvfs 0>;
+
+ l2_0: l2-cache-0 {
+ compatible = "cache";
+ cache-level = <2>;
+ /* 8 ways set associative */
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+
+ l3_0: l3-cache {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-size = <0x100000>;
+ cache-unified;
+ };
+ };
+ };
+
+ cpu1: cpu@100 {
+ compatible = "arm,rainier";
+ reg = <0x0 0x100>;
+ device_type = "cpu";
+ enable-method = "psci";
+ /* 4 ways set associative */
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <512>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <512>;
+ next-level-cache = <&l2_1>;
+ clocks = <&scmi_dvfs 0>;
+
+ l2_1: l2-cache-1 {
+ compatible = "cache";
+ cache-level = <2>;
+ /* 8 ways set associative */
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu2: cpu@10000 {
+ compatible = "arm,rainier";
+ reg = <0x0 0x10000>;
+ device_type = "cpu";
+ enable-method = "psci";
+ /* 4 ways set associative */
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <512>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <512>;
+ next-level-cache = <&l2_2>;
+ clocks = <&scmi_dvfs 1>;
+
+ l2_2: l2-cache-2 {
+ compatible = "cache";
+ cache-level = <2>;
+ /* 8 ways set associative */
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+
+ cpu3: cpu@10100 {
+ compatible = "arm,rainier";
+ reg = <0x0 0x10100>;
+ device_type = "cpu";
+ enable-method = "psci";
+ /* 4 ways set associative */
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <512>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <512>;
+ next-level-cache = <&l2_3>;
+ clocks = <&scmi_dvfs 1>;
+
+ l2_3: l2-cache-3 {
+ compatible = "cache";
+ cache-level = <2>;
+ /* 8 ways set associative */
+ cache-size = <0x100000>;
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ cache-unified;
+ next-level-cache = <&l3_0>;
+ };
+ };
+ };
+
+ firmware {
+ interrupt-parent = <&gic>;
+
+ scmi {
+ compatible = "arm,scmi";
+ mbox-names = "tx", "rx";
+ mboxes = <&mailbox 1 0>, <&mailbox 1 1>;
+ shmem = <&cpu_scp_hpri0>, <&cpu_scp_hpri1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_dvfs: protocol@13 {
+ reg = <0x13>;
+ #clock-cells = <1>;
+ };
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+ };
+ };
+
+ /* The first bank of memory, memory map is actually provided by UEFI. */
+ memory@80000000 {
+ device_type = "memory";
+ /* [0x80000000-0xffffffff] */
+ reg = <0x00000000 0x80000000 0x0 0x7f000000>;
+ };
+
+ memory@8080000000 {
+ device_type = "memory";
+ /* [0x8080000000-0x83f7ffffff] */
+ reg = <0x00000080 0x80000000 0x3 0x78000000>;
+ };
+
+ pmu {
+ compatible = "arm,rainier-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure-firmware@ff000000 {
+ reg = <0x0 0xff000000 0x0 0x01000000>;
+ no-map;
+ };
+ };
+
+ spe-pmu {
+ compatible = "arm,statistical-profiling-extension-v1";
+ interrupts = <GIC_PPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic>;
+ ranges;
+
+ uart0: serial@2a400000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0 0x2a400000 0x0 0x1000>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&soc_refclk50mhz>, <&soc_refclk50mhz>;
+ clock-names = "uartclk", "apb_pclk";
+
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@30000000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x30000000 0x0 0x10000>, /* GICD */
+ <0x0 0x300c0000 0x0 0x80000>; /* GICR */
+
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <3>;
+ interrupt-controller;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ its1: msi-controller@30040000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x30040000 0x0 0x20000>;
+
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ its2: msi-controller@30060000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x30060000 0x0 0x20000>;
+
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ its_ccix: msi-controller@30080000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x30080000 0x0 0x20000>;
+
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ its_pcie: msi-controller@300a0000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0x300a0000 0x0 0x20000>;
+
+ msi-controller;
+ #msi-cells = <1>;
+ };
+ };
+
+ smmu_dp: iommu@2ce00000 {
+ compatible = "arm,smmu-v3";
+ reg = <0x0 0x2ce00000 0x0 0x40000>;
+
+ interrupts = <GIC_SPI 76 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 78 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "eventq", "gerror", "cmdq-sync";
+ #iommu-cells = <1>;
+ };
+
+ mailbox: mhu@45000000 {
+ compatible = "arm,mhu-doorbell", "arm,primecell";
+ reg = <0x0 0x45000000 0x0 0x1000>;
+
+ interrupts = <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ clocks = <&soc_refclk50mhz>;
+ clock-names = "apb_pclk";
+ };
+
+ sram: sram@6000000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x06000000 0x0 0x8000>;
+ ranges = <0 0x0 0x06000000 0x8000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpu_scp_hpri0: scp-sram@0 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x80>;
+ };
+
+ cpu_scp_hpri1: scp-sram@80 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x80 0x80>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
index 689c82b7f596..9e610a89a337 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi
@@ -227,7 +227,7 @@
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk_uart>, <&clk_vpu>;
clock-names = "uartclk", "apb_pclk";
- arm,primecell-periphid = <0x00241011>;
+ arm,primecell-periphid = <0x00341011>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/exynos/exynos8895-dreamlte.dts b/arch/arm64/boot/dts/exynos/exynos8895-dreamlte.dts
index 3a376ab2bb9e..61e064af3337 100644
--- a/arch/arm64/boot/dts/exynos/exynos8895-dreamlte.dts
+++ b/arch/arm64/boot/dts/exynos/exynos8895-dreamlte.dts
@@ -10,12 +10,17 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/soc/samsung,exynos-usi.h>
/ {
model = "Samsung Galaxy S8 (SM-G950F)";
compatible = "samsung,dreamlte", "samsung,exynos8895";
chassis-type = "handset";
+ aliases {
+ mmc0 = &mmc;
+ };
+
chosen {
#address-cells = <2>;
#size-cells = <1>;
@@ -89,12 +94,60 @@
wakeup-source;
};
};
+
+ /* TODO: Remove once PMIC is implemented */
+ reg_placeholder: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "reg-placeholder";
+ };
+};
+
+&hsi2c_23 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ touchscreen@48 {
+ compatible = "samsung,s6sy761";
+ reg = <0x48>;
+
+ /* TODO: Update once PMIC is implemented */
+ avdd-supply = <&reg_placeholder>;
+ vdd-supply = <&reg_placeholder>;
+
+ interrupt-parent = <&gpa1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+
+ pinctrl-0 = <&ts_int>;
+ pinctrl-names = "default";
+ };
};
&oscclk {
clock-frequency = <26000000>;
};
+&mmc {
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus1 &sd2_bus4 &sd2_cd>;
+ pinctrl-names = "default";
+
+ bus-width = <4>;
+ card-detect-delay = <200>;
+ cd-gpios = <&gpa1 5 GPIO_ACTIVE_LOW>;
+ clock-frequency = <800000000>;
+ disable-wp;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+
+ /* TODO: Add regulators once PMIC is implemented */
+
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ samsung,dw-mshc-sdr-timing = <0 3>;
+
+ status = "okay";
+};
+
&pinctrl_alive {
key_power: key-power-pins {
samsung,pins = "gpa2-4";
@@ -123,4 +176,23 @@
samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
samsung,pin-drv = <EXYNOS7_PIN_DRV_LV1>;
};
+
+ sd2_cd: sd2-cd-pins {
+ samsung,pins = "gpa1-5";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
+ samsung,pin-drv = <EXYNOS7_PIN_DRV_LV4>;
+ };
+
+ ts_int: ts-int-pins {
+ samsung,pins = "gpa1-0";
+ samsung,pin-function = <EXYNOS_PIN_FUNC_EINT>;
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ samsung,pin-drv = <EXYNOS7_PIN_DRV_LV1>;
+ };
+};
+
+&usi9 {
+ samsung,mode = <USI_MODE_I2C0_1>;
+ status = "okay";
};
diff --git a/arch/arm64/boot/dts/exynos/exynos8895.dtsi b/arch/arm64/boot/dts/exynos/exynos8895.dtsi
index 36657abfc615..f92d2a8a20a2 100644
--- a/arch/arm64/boot/dts/exynos/exynos8895.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos8895.dtsi
@@ -26,30 +26,6 @@
pinctrl7 = &pinctrl_peric1;
};
- arm-a53-pmu {
- compatible = "arm,cortex-a53-pmu";
- interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu0>,
- <&cpu1>,
- <&cpu2>,
- <&cpu3>;
- };
-
- mongoose-m2-pmu {
- compatible = "samsung,mongoose-pmu";
- interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu4>,
- <&cpu5>,
- <&cpu6>,
- <&cpu7>;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -149,6 +125,30 @@
clock-output-names = "oscclk";
};
+ pmu-a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ };
+
+ pmu-mongoose-m2 {
+ compatible = "samsung,mongoose-pmu";
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu4>,
+ <&cpu5>,
+ <&cpu6>,
+ <&cpu7>;
+ };
+
psci {
compatible = "arm,psci";
method = "smc";
@@ -228,6 +228,12 @@
"usi1", "usi2", "usi3";
};
+ syscon_peric0: syscon@10420000 {
+ compatible = "samsung,exynos8895-peric0-sysreg", "syscon";
+ reg = <0x10420000 0x2000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK>;
+ };
+
serial_0: serial@10430000 {
compatible = "samsung,exynos8895-uart";
reg = <0x10430000 0x100>;
@@ -241,6 +247,254 @@
status = "disabled";
};
+ usi0: usi@10440000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10440000 0x11000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric0 0x1000>;
+ status = "disabled";
+
+ hsi2c_5: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 364 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c5_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_2: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 366 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart2_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_2: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi2_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_6: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI00_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 365 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c6_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi1: usi@10460000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10460000 0x11000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric0 0x1004>;
+ status = "disabled";
+
+ hsi2c_7: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c5_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_3: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart3_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_3: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi3_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_8: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI01_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c8_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi2: usi@10480000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10480000 0x11000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric0 0x1008>;
+ status = "disabled";
+
+ hsi2c_9: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c9_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_4: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart4_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_4: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi4_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_10: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI02_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c10_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi3: usi@104a0000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x104a0000 0x11000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric0 0x100c>;
+ status = "disabled";
+
+ hsi2c_11: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c11_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_5: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart5_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_5: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_PCLK>,
+ <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi5_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_12: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric0 CLK_GOUT_PERIC0_USI03_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c12_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
pinctrl_peric0: pinctrl@104d0000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x104d0000 0x1000>;
@@ -273,6 +527,12 @@
"usi10", "usi11", "usi12", "usi13";
};
+ syscon_peric1: syscon@10820000 {
+ compatible = "samsung,exynos8895-peric1-sysreg", "syscon";
+ reg = <0x10820000 0x2000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK>;
+ };
+
serial_1: serial@10830000 {
compatible = "samsung,exynos8895-uart";
reg = <0x10830000 0x100>;
@@ -286,6 +546,626 @@
status = "disabled";
};
+ usi4: usi@10840000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10840000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1008>;
+ status = "disabled";
+
+ hsi2c_13: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c13_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_6: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart6_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_6: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi6_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_14: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI04_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c14_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi5: usi@10860000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10860000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x100c>;
+ status = "disabled";
+
+ hsi2c_15: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c15_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_7: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart7_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_7: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi7_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_16: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI05_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c16_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi6: usi@10880000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10880000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1010>;
+ status = "disabled";
+
+ hsi2c_17: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c17_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_8: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart8_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_8: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi8_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_18: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI06_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c18_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi7: usi@108a0000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x108a0000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1014>;
+ status = "disabled";
+
+ hsi2c_19: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c19_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_9: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart9_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_9: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi9_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_20: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI07_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c20_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi8: usi@108c0000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x108c0000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1018>;
+ status = "disabled";
+
+ hsi2c_21: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c21_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_10: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart10_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_10: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0x0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi10_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_22: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI08_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c22_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi9: usi@108e0000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x108e0000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x101c>;
+ status = "disabled";
+
+ hsi2c_23: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 410 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c23_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_11: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart11_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_11: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi11_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_24: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI09_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c24_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi10: usi@10900000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10900000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1020>;
+ status = "disabled";
+
+ hsi2c_25: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c25_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_12: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart12_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_12: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi12_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_26: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI10_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c26_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi11: usi@10920000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10920000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1024>;
+ status = "disabled";
+
+ hsi2c_27: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c27_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_13: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart13_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_13: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi13_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_28: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI11_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c28_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi12: usi@10940000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10940000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x1028>;
+ status = "disabled";
+
+ hsi2c_29: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c29_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_14: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart14_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_14: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi14_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_30: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI12_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c30_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
+ usi13: usi@10960000 {
+ compatible = "samsung,exynos8895-usi";
+ ranges = <0x0 0x10960000 0x11000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_SCLK_USI>;
+ clock-names = "pclk", "ipclk";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ samsung,sysreg = <&syscon_peric1 0x102c>;
+ status = "disabled";
+
+ hsi2c_31: i2c@0 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x0 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c31_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+
+ serial_15: serial@0 {
+ compatible = "samsung,exynos8895-uart";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_SCLK_USI>;
+ clock-names = "uart", "clk_uart_baud0";
+ interrupts = <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&uart15_bus>;
+ pinctrl-names = "default";
+ samsung,uart-fifosize = <64>;
+ status = "disabled";
+ };
+
+ spi_15: spi@0 {
+ compatible = "samsung,exynos8895-spi",
+ "samsung,exynos850-spi";
+ reg = <0 0x100>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_PCLK>,
+ <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_SCLK_USI>;
+ clock-names = "spi", "spi_busclk0";
+ interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&spi15_bus>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ hsi2c_32: i2c@10000 {
+ compatible = "samsung,exynos8895-hsi2c";
+ reg = <0x10000 0x1000>;
+ clocks = <&cmu_peric1 CLK_GOUT_PERIC1_USI13_I_PCLK>;
+ clock-names = "hsi2c";
+ interrupts = <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-0 = <&hsi2c32_bus>;
+ pinctrl-names = "default";
+ status = "disabled";
+ };
+ };
+
pinctrl_peric1: pinctrl@10980000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x10980000 0x1000>;
@@ -380,6 +1260,12 @@
"ufs", "usbdrd30";
};
+ syscon_fsys0: syscon@11020000 {
+ compatible = "samsung,exynos8895-fsys0-sysreg", "syscon";
+ reg = <0x11020000 0x2000>;
+ clocks = <&cmu_fsys0 CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK>;
+ };
+
pinctrl_fsys0: pinctrl@11050000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x11050000 0x1000>;
@@ -398,12 +1284,34 @@
clock-names = "oscclk", "bus", "pcie", "ufs", "mmc";
};
+ syscon_fsys1: syscon@11420000 {
+ compatible = "samsung,exynos8895-fsys1-sysreg", "syscon";
+ reg = <0x11420000 0x2000>;
+ clocks = <&cmu_fsys1 CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK>;
+ };
+
pinctrl_fsys1: pinctrl@11430000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x11430000 0x1000>;
interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
};
+ mmc: mmc@11500000 {
+ compatible = "samsung,exynos8895-dw-mshc-smu",
+ "samsung,exynos7-dw-mshc-smu";
+ reg = <0x11500000 0x2000>;
+ assigned-clocks = <&cmu_top CLK_MOUT_CMU_FSYS1_MMC_CARD>;
+ assigned-clock-parents = <&cmu_top CLK_FOUT_SHARED4_PLL>;
+ clocks = <&cmu_fsys1 CLK_GOUT_FSYS1_MMC_CARD_I_ACLK>,
+ <&cmu_fsys1 CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <64>;
+ interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
pinctrl_abox: pinctrl@13e60000 {
compatible = "samsung,exynos8895-pinctrl";
reg = <0x13e60000 0x1000>;
diff --git a/arch/arm64/boot/dts/exynos/exynos990.dtsi b/arch/arm64/boot/dts/exynos/exynos990.dtsi
index 9d017dbed952..dd7f99f51a75 100644
--- a/arch/arm64/boot/dts/exynos/exynos990.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos990.dtsi
@@ -25,37 +25,6 @@
pinctrl6 = &pinctrl_vts;
};
- arm-a55-pmu {
- compatible = "arm,cortex-a55-pmu";
- interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
-
- interrupt-affinity = <&cpu0>,
- <&cpu1>,
- <&cpu2>,
- <&cpu3>;
- };
-
- arm-a76-pmu {
- compatible = "arm,cortex-a76-pmu";
- interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
-
- interrupt-affinity = <&cpu4>,
- <&cpu5>;
- };
-
- mongoose-m5-pmu {
- compatible = "samsung,mongoose-pmu";
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
-
- interrupt-affinity = <&cpu6>,
- <&cpu7>;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -163,6 +132,37 @@
clock-output-names = "oscclk";
};
+ pmu-a55 {
+ compatible = "arm,cortex-a55-pmu";
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ };
+
+ pmu-a76 {
+ compatible = "arm,cortex-a76-pmu";
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-affinity = <&cpu4>,
+ <&cpu5>;
+ };
+
+ pmu-mongoose-m5 {
+ compatible = "samsung,mongoose-pmu";
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-affinity = <&cpu6>,
+ <&cpu7>;
+ };
+
psci {
compatible = "arm,psci-0.2";
method = "hvc";
@@ -181,6 +181,36 @@
reg = <0x10000000 0x100>;
};
+ cmu_peris: clock-controller@10020000 {
+ compatible = "samsung,exynos990-cmu-peris";
+ reg = <0x10020000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>,
+ <&cmu_top CLK_DOUT_CMU_PERIS_BUS>;
+ clock-names = "oscclk", "bus";
+ };
+
+ timer@10040000 {
+ compatible = "samsung,exynos990-mct",
+ "samsung,exynos4210-mct";
+ reg = <0x10040000 0x800>;
+ clocks = <&oscclk>, <&cmu_peris CLK_GOUT_PERIS_MCT_PCLK>;
+ clock-names = "fin_pll", "mct";
+ interrupts = <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
gic: interrupt-controller@10101000 {
compatible = "arm,gic-400";
reg = <0x10101000 0x1000>,
diff --git a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
index eb446cdc4ab6..fc6ac531d597 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov920.dtsi
@@ -89,6 +89,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x0>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl0>;
};
cpu1: cpu@100 {
@@ -96,6 +103,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x100>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl0>;
};
cpu2: cpu@200 {
@@ -103,6 +117,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x200>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl0>;
};
cpu3: cpu@300 {
@@ -110,6 +131,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x300>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl0>;
};
cpu4: cpu@10000 {
@@ -117,6 +145,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x10000>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl1>;
};
cpu5: cpu@10100 {
@@ -124,6 +159,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x10100>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl1>;
};
cpu6: cpu@10200 {
@@ -131,6 +173,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x10200>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl1>;
};
cpu7: cpu@10300 {
@@ -138,6 +187,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x10300>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl1>;
};
cpu8: cpu@20000 {
@@ -145,6 +201,13 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x20000>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl2>;
};
cpu9: cpu@20100 {
@@ -152,6 +215,70 @@
compatible = "arm,cortex-a78ae";
reg = <0x0 0x20100>;
enable-method = "psci";
+ i-cache-size = <0x10000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x10000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <256>;
+ next-level-cache = <&l2_cache_cl2>;
+ };
+
+ l2_cache_cl0: l2-cache0 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ next-level-cache = <&l3_cache_cl0>;
+ };
+
+ l2_cache_cl1: l2-cache1 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ next-level-cache = <&l3_cache_cl1>;
+ };
+
+ l2_cache_cl2: l2-cache2 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-unified;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ next-level-cache = <&l3_cache_cl2>;
+ };
+
+ l3_cache_cl0: l3-cache0 {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ cache-size = <0x200000>;/* 2MB L3 cache for cpu cluster-0 */
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ };
+
+ l3_cache_cl1: l3-cache1 {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ cache-size = <0x200000>;/* 2MB L3 cache for cpu cluster-1 */
+ cache-line-size = <64>;
+ cache-sets = <2048>;
+ };
+
+ l3_cache_cl2: l3-cache2 {
+ compatible = "cache";
+ cache-level = <3>;
+ cache-unified;
+ cache-size = <0x100000>;/* 1MB L3 cache for cpu cluster-2 */
+ cache-line-size = <64>;
+ cache-sets = <1365>;
};
};
@@ -440,6 +567,17 @@
interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
};
+ ufs_0_phy: phy@16e04000 {
+ compatible = "samsung,exynosautov920-ufs-phy";
+ reg = <0x16e04000 0x4000>;
+ reg-names = "phy-pma";
+ clocks = <&xtcxo>;
+ clock-names = "ref_clk";
+ samsung,pmu-syscon = <&pmu_system_controller>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
pinctrl_aud: pinctrl@1a460000 {
compatible = "samsung,exynosautov920-pinctrl";
reg = <0x1a460000 0x10000>;
diff --git a/arch/arm64/boot/dts/exynos/google/Makefile b/arch/arm64/boot/dts/exynos/google/Makefile
index 0a6d5e1fe4ee..7385f82b03c9 100644
--- a/arch/arm64/boot/dts/exynos/google/Makefile
+++ b/arch/arm64/boot/dts/exynos/google/Makefile
@@ -2,3 +2,4 @@
dtb-$(CONFIG_ARCH_EXYNOS) += \
gs101-oriole.dtb \
+ gs101-raven.dtb
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
index e58881c61d53..8df42bedbc03 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
+++ b/arch/arm64/boot/dts/exynos/google/gs101-oriole.dts
@@ -8,273 +8,22 @@
/dts-v1/;
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/usb/pd.h>
-#include "gs101-pinctrl.h"
-#include "gs101.dtsi"
+#include "gs101-pixel-common.dtsi"
/ {
model = "Oriole";
compatible = "google,gs101-oriole", "google,gs101";
-
- aliases {
- serial0 = &serial_0;
- };
-
- chosen {
- /* Bootloader expects bootargs specified otherwise it crashes */
- bootargs = "";
- stdout-path = &serial_0;
- };
-
- gpio-keys {
- compatible = "gpio-keys";
- pinctrl-0 = <&key_voldown>, <&key_volup>, <&key_power>;
- pinctrl-names = "default";
-
- button-vol-down {
- label = "KEY_VOLUMEDOWN";
- linux,code = <KEY_VOLUMEDOWN>;
- gpios = <&gpa7 3 GPIO_ACTIVE_LOW>;
- wakeup-source;
- };
-
- button-vol-up {
- label = "KEY_VOLUMEUP";
- linux,code = <KEY_VOLUMEUP>;
- gpios = <&gpa8 1 GPIO_ACTIVE_LOW>;
- wakeup-source;
- };
-
- button-power {
- label = "KEY_POWER";
- linux,code = <KEY_POWER>;
- gpios = <&gpa10 1 GPIO_ACTIVE_LOW>;
- wakeup-source;
- };
- };
-
- /* TODO: Remove this once PMIC is implemented */
- reg_placeholder: regulator-0 {
- compatible = "regulator-fixed";
- regulator-name = "placeholder_reg";
- };
-
- /* TODO: Remove this once S2MPG11 slave PMIC is implemented */
- ufs_0_fixed_vcc_reg: regulator-1 {
- compatible = "regulator-fixed";
- regulator-name = "ufs-vcc";
- gpio = <&gpp0 1 GPIO_ACTIVE_HIGH>;
- regulator-boot-on;
- enable-active-high;
- };
-};
-
-&ext_24_5m {
- clock-frequency = <24576000>;
-};
-
-&ext_200m {
- clock-frequency = <200000000>;
-};
-
-&hsi2c_8 {
- status = "okay";
-
- eeprom: eeprom@50 {
- compatible = "atmel,24c08";
- reg = <0x50>;
- };
-};
-
-&hsi2c_12 {
- status = "okay";
- /* TODO: add the devices once drivers exist */
-
- usb-typec@25 {
- compatible = "maxim,max77759-tcpci", "maxim,max33359";
- reg = <0x25>;
- interrupts-extended = <&gpa8 2 IRQ_TYPE_LEVEL_LOW>;
- pinctrl-0 = <&typec_int>;
- pinctrl-names = "default";
-
- connector {
- compatible = "usb-c-connector";
- label = "USB-C";
- data-role = "dual";
- power-role = "dual";
- self-powered;
- try-power-role = "sink";
- op-sink-microwatt = <2600000>;
- slow-charger-loop;
- /*
- * max77759 operating in reverse boost mode (0xA) can
- * source up to 1.5A while extboost can only do ~1A.
- * Since extboost is the primary path, advertise 900mA.
- */
- source-pdos = <PDO_FIXED(5000, 900,
- (PDO_FIXED_SUSPEND
- | PDO_FIXED_USB_COMM
- | PDO_FIXED_DATA_SWAP
- | PDO_FIXED_DUAL_ROLE))>;
- sink-pdos = <PDO_FIXED(5000, 3000,
- (PDO_FIXED_DATA_SWAP
- | PDO_FIXED_USB_COMM
- | PDO_FIXED_HIGHER_CAP
- | PDO_FIXED_DUAL_ROLE))
- PDO_FIXED(9000, 2200, 0)
- PDO_PPS_APDO(5000, 11000, 3000)>;
- sink-vdos = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
- IDH_PTYPE_DFP_HOST, 2, 0x18d1)
- VDO_CERT(0x0)
- VDO_PRODUCT(0x4ee1, 0x0)
- VDO_UFP(UFP_VDO_VER1_2,
- (DEV_USB2_CAPABLE
- | DEV_USB3_CAPABLE),
- UFP_RECEPTACLE, 0,
- AMA_VCONN_NOT_REQ, 0,
- UFP_ALTMODE_NOT_SUPP,
- UFP_USB32_GEN1)
- /* padding */ 0
- VDO_DFP(DFP_VDO_VER1_1,
- (HOST_USB2_CAPABLE
- | HOST_USB3_CAPABLE),
- DFP_RECEPTACLE, 0)>;
- sink-vdos-v1 = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
- 0, 0, 0x18d1)
- VDO_CERT(0x0)
- VDO_PRODUCT(0x4ee1, 0x0)>;
- /*
- * Until bootloader is updated to set those two when
- * console is enabled, we disable PD here.
- */
- pd-disable;
- typec-power-opmode = "default";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- usbc0_orien_sw: endpoint {
- remote-endpoint = <&usbdrd31_phy_orien_switch>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- usbc0_role_sw: endpoint {
- remote-endpoint = <&usbdrd31_dwc3_role_switch>;
- };
- };
- };
- };
- };
-};
-
-&pinctrl_far_alive {
- key_voldown: key-voldown-pins {
- samsung,pins = "gpa7-3";
- samsung,pin-function = <GS101_PIN_FUNC_EINT>;
- samsung,pin-pud = <GS101_PIN_PULL_NONE>;
- samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
- };
-
- key_volup: key-volup-pins {
- samsung,pins = "gpa8-1";
- samsung,pin-function = <GS101_PIN_FUNC_EINT>;
- samsung,pin-pud = <GS101_PIN_PULL_NONE>;
- samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
- };
-
- typec_int: typec-int-pins {
- samsung,pins = "gpa8-2";
- samsung,pin-function = <GS101_PIN_FUNC_EINT>;
- samsung,pin-pud = <GS101_PIN_PULL_UP>;
- samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
- };
-};
-
-&pinctrl_gpio_alive {
- key_power: key-power-pins {
- samsung,pins = "gpa10-1";
- samsung,pin-function = <GS101_PIN_FUNC_EINT>;
- samsung,pin-pud = <GS101_PIN_PULL_NONE>;
- samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
- };
-};
-
-&serial_0 {
- status = "okay";
-};
-
-&ufs_0 {
- status = "okay";
- vcc-supply = <&ufs_0_fixed_vcc_reg>;
-};
-
-&ufs_0_phy {
- status = "okay";
-};
-
-&usbdrd31 {
- vdd10-supply = <&reg_placeholder>;
- vdd33-supply = <&reg_placeholder>;
- status = "okay";
-};
-
-&usbdrd31_dwc3 {
- dr_mode = "otg";
- usb-role-switch;
- role-switch-default-mode = "peripheral";
- maximum-speed = "super-speed-plus";
- status = "okay";
-
- port {
- usbdrd31_dwc3_role_switch: endpoint {
- remote-endpoint = <&usbc0_role_sw>;
- };
- };
-};
-
-&usbdrd31_phy {
- orientation-switch;
- /* TODO: Update these once PMIC is implemented */
- pll-supply = <&reg_placeholder>;
- dvdd-usb20-supply = <&reg_placeholder>;
- vddh-usb20-supply = <&reg_placeholder>;
- vdd33-usb20-supply = <&reg_placeholder>;
- vdda-usbdp-supply = <&reg_placeholder>;
- vddh-usbdp-supply = <&reg_placeholder>;
- status = "okay";
-
- port {
- usbdrd31_phy_orien_switch: endpoint {
- remote-endpoint = <&usbc0_orien_sw>;
- };
- };
-};
-
-&usi_uart {
- samsung,clkreq-on; /* needed for UART mode */
- status = "okay";
-};
-
-&usi8 {
- samsung,mode = <USI_V2_I2C>;
- status = "okay";
};
-&usi12 {
- samsung,mode = <USI_V2_I2C>;
+&cont_splash_mem {
+ reg = <0x0 0xfac00000 (1080 * 2400 * 4)>;
status = "okay";
};
-&watchdog_cl0 {
- timeout-sec = <30>;
+&framebuffer0 {
+ width = <1080>;
+ height = <2400>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi b/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi
new file mode 100644
index 000000000000..b25230495c64
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/google/gs101-pixel-common.dtsi
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Device Tree nodes common for all GS101-based Pixel
+ *
+ * Copyright 2021-2023 Google LLC
+ * Copyright 2023 Linaro Ltd - <peter.griffin@linaro.org>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/usb/pd.h>
+#include "gs101-pinctrl.h"
+#include "gs101.dtsi"
+
+/ {
+ aliases {
+ serial0 = &serial_0;
+ };
+
+ chosen {
+ /* Bootloader expects bootargs specified otherwise it crashes */
+ bootargs = "";
+ stdout-path = &serial_0;
+
+ /* Use display framebuffer as setup by bootloader */
+ framebuffer0: framebuffer-0 {
+ compatible = "simple-framebuffer";
+ memory-region = <&cont_splash_mem>;
+ /* format properties to be added by actual board */
+ status = "disabled";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-0 = <&key_voldown>, <&key_volup>, <&key_power>;
+ pinctrl-names = "default";
+
+ button-vol-down {
+ label = "KEY_VOLUMEDOWN";
+ linux,code = <KEY_VOLUMEDOWN>;
+ gpios = <&gpa7 3 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button-vol-up {
+ label = "KEY_VOLUMEUP";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&gpa8 1 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button-power {
+ label = "KEY_POWER";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpa10 1 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
+
+ /* TODO: Remove this once PMIC is implemented */
+ reg_placeholder: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "placeholder_reg";
+ };
+
+ /* TODO: Remove this once S2MPG11 slave PMIC is implemented */
+ ufs_0_fixed_vcc_reg: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "ufs-vcc";
+ gpio = <&gpp0 1 GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ cont_splash_mem: splash@fac00000 {
+ /* size to be updated by actual board */
+ reg = <0x0 0xfac00000 0x0>;
+ no-map;
+ status = "disabled";
+ };
+ };
+};
+
+&ext_24_5m {
+ clock-frequency = <24576000>;
+};
+
+&ext_200m {
+ clock-frequency = <200000000>;
+};
+
+&hsi2c_8 {
+ status = "okay";
+
+ eeprom: eeprom@50 {
+ compatible = "atmel,24c08";
+ reg = <0x50>;
+ };
+};
+
+&hsi2c_12 {
+ status = "okay";
+ /* TODO: add the devices once drivers exist */
+
+ usb-typec@25 {
+ compatible = "maxim,max77759-tcpci", "maxim,max33359";
+ reg = <0x25>;
+ interrupts-extended = <&gpa8 2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&typec_int>;
+ pinctrl-names = "default";
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ power-role = "dual";
+ self-powered;
+ try-power-role = "sink";
+ op-sink-microwatt = <2600000>;
+ slow-charger-loop;
+ /*
+ * max77759 operating in reverse boost mode (0xA) can
+ * source up to 1.5A while extboost can only do ~1A.
+ * Since extboost is the primary path, advertise 900mA.
+ */
+ source-pdos = <PDO_FIXED(5000, 900,
+ (PDO_FIXED_SUSPEND
+ | PDO_FIXED_USB_COMM
+ | PDO_FIXED_DATA_SWAP
+ | PDO_FIXED_DUAL_ROLE))>;
+ sink-pdos = <PDO_FIXED(5000, 3000,
+ (PDO_FIXED_DATA_SWAP
+ | PDO_FIXED_USB_COMM
+ | PDO_FIXED_HIGHER_CAP
+ | PDO_FIXED_DUAL_ROLE))
+ PDO_FIXED(9000, 2200, 0)
+ PDO_PPS_APDO(5000, 11000, 3000)>;
+ sink-vdos = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
+ IDH_PTYPE_DFP_HOST, 2, 0x18d1)
+ VDO_CERT(0x0)
+ VDO_PRODUCT(0x4ee1, 0x0)
+ VDO_UFP(UFP_VDO_VER1_2,
+ (DEV_USB2_CAPABLE
+ | DEV_USB3_CAPABLE),
+ UFP_RECEPTACLE, 0,
+ AMA_VCONN_NOT_REQ, 0,
+ UFP_ALTMODE_NOT_SUPP,
+ UFP_USB32_GEN1)
+ /* padding */ 0
+ VDO_DFP(DFP_VDO_VER1_1,
+ (HOST_USB2_CAPABLE
+ | HOST_USB3_CAPABLE),
+ DFP_RECEPTACLE, 0)>;
+ sink-vdos-v1 = <VDO_IDH(1, 1, IDH_PTYPE_PERIPH, 0,
+ 0, 0, 0x18d1)
+ VDO_CERT(0x0)
+ VDO_PRODUCT(0x4ee1, 0x0)>;
+ /*
+ * Until bootloader is updated to set those two when
+ * console is enabled, we disable PD here.
+ */
+ pd-disable;
+ typec-power-opmode = "default";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_orien_sw: endpoint {
+ remote-endpoint = <&usbdrd31_phy_orien_switch>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_role_sw: endpoint {
+ remote-endpoint = <&usbdrd31_dwc3_role_switch>;
+ };
+ };
+ };
+ };
+ };
+};
+
+&pinctrl_far_alive {
+ key_voldown: key-voldown-pins {
+ samsung,pins = "gpa7-3";
+ samsung,pin-function = <GS101_PIN_FUNC_EINT>;
+ samsung,pin-pud = <GS101_PIN_PULL_NONE>;
+ samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
+ };
+
+ key_volup: key-volup-pins {
+ samsung,pins = "gpa8-1";
+ samsung,pin-function = <GS101_PIN_FUNC_EINT>;
+ samsung,pin-pud = <GS101_PIN_PULL_NONE>;
+ samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
+ };
+
+ typec_int: typec-int-pins {
+ samsung,pins = "gpa8-2";
+ samsung,pin-function = <GS101_PIN_FUNC_EINT>;
+ samsung,pin-pud = <GS101_PIN_PULL_UP>;
+ samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
+ };
+};
+
+&pinctrl_gpio_alive {
+ key_power: key-power-pins {
+ samsung,pins = "gpa10-1";
+ samsung,pin-function = <GS101_PIN_FUNC_EINT>;
+ samsung,pin-pud = <GS101_PIN_PULL_NONE>;
+ samsung,pin-drv = <GS101_PIN_DRV_2_5_MA>;
+ };
+};
+
+&serial_0 {
+ status = "okay";
+};
+
+&ufs_0 {
+ status = "okay";
+ vcc-supply = <&ufs_0_fixed_vcc_reg>;
+};
+
+&ufs_0_phy {
+ status = "okay";
+};
+
+&usbdrd31 {
+ vdd10-supply = <&reg_placeholder>;
+ vdd33-supply = <&reg_placeholder>;
+ status = "okay";
+};
+
+&usbdrd31_dwc3 {
+ dr_mode = "otg";
+ usb-role-switch;
+ role-switch-default-mode = "peripheral";
+ maximum-speed = "super-speed-plus";
+ status = "okay";
+
+ port {
+ usbdrd31_dwc3_role_switch: endpoint {
+ remote-endpoint = <&usbc0_role_sw>;
+ };
+ };
+};
+
+&usbdrd31_phy {
+ orientation-switch;
+ /* TODO: Update these once PMIC is implemented */
+ pll-supply = <&reg_placeholder>;
+ dvdd-usb20-supply = <&reg_placeholder>;
+ vddh-usb20-supply = <&reg_placeholder>;
+ vdd33-usb20-supply = <&reg_placeholder>;
+ vdda-usbdp-supply = <&reg_placeholder>;
+ vddh-usbdp-supply = <&reg_placeholder>;
+ status = "okay";
+
+ port {
+ usbdrd31_phy_orien_switch: endpoint {
+ remote-endpoint = <&usbc0_orien_sw>;
+ };
+ };
+};
+
+&usi_uart {
+ samsung,clkreq-on; /* needed for UART mode */
+ status = "okay";
+};
+
+&usi8 {
+ samsung,mode = <USI_V2_I2C>;
+ status = "okay";
+};
+
+&usi12 {
+ samsung,mode = <USI_V2_I2C>;
+ status = "okay";
+};
+
+&watchdog_cl0 {
+ timeout-sec = <30>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101-raven.dts b/arch/arm64/boot/dts/exynos/google/gs101-raven.dts
new file mode 100644
index 000000000000..1e7e6b34b864
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/google/gs101-raven.dts
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Raven Device Tree
+ *
+ * Copyright 2021-2023 Google LLC
+ * Copyright 2023-2025 Linaro Ltd
+ */
+
+/dts-v1/;
+
+#include "gs101-pixel-common.dtsi"
+
+/ {
+ model = "Raven";
+ compatible = "google,gs101-raven", "google,gs101";
+};
+
+&cont_splash_mem {
+ reg = <0x0 0xfac00000 (1440 * 3120 * 4)>;
+ status = "okay";
+};
+
+&framebuffer0 {
+ width = <1440>;
+ height = <3120>;
+ stride = <(1440 * 4)>;
+ format = "a8r8g8b8";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/exynos/google/gs101.dtsi b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
index c5335dd59dfe..3de3a758f113 100644
--- a/arch/arm64/boot/dts/exynos/google/gs101.dtsi
+++ b/arch/arm64/boot/dts/exynos/google/gs101.dtsi
@@ -73,7 +73,7 @@
compatible = "arm,cortex-a55";
reg = <0x0000>;
enable-method = "psci";
- cpu-idle-states = <&ANANKE_CPU_SLEEP>;
+ cpu-idle-states = <&ananke_cpu_sleep>;
capacity-dmips-mhz = <250>;
dynamic-power-coefficient = <70>;
};
@@ -83,7 +83,7 @@
compatible = "arm,cortex-a55";
reg = <0x0100>;
enable-method = "psci";
- cpu-idle-states = <&ANANKE_CPU_SLEEP>;
+ cpu-idle-states = <&ananke_cpu_sleep>;
capacity-dmips-mhz = <250>;
dynamic-power-coefficient = <70>;
};
@@ -93,7 +93,7 @@
compatible = "arm,cortex-a55";
reg = <0x0200>;
enable-method = "psci";
- cpu-idle-states = <&ANANKE_CPU_SLEEP>;
+ cpu-idle-states = <&ananke_cpu_sleep>;
capacity-dmips-mhz = <250>;
dynamic-power-coefficient = <70>;
};
@@ -103,7 +103,7 @@
compatible = "arm,cortex-a55";
reg = <0x0300>;
enable-method = "psci";
- cpu-idle-states = <&ANANKE_CPU_SLEEP>;
+ cpu-idle-states = <&ananke_cpu_sleep>;
capacity-dmips-mhz = <250>;
dynamic-power-coefficient = <70>;
};
@@ -113,7 +113,7 @@
compatible = "arm,cortex-a76";
reg = <0x0400>;
enable-method = "psci";
- cpu-idle-states = <&ENYO_CPU_SLEEP>;
+ cpu-idle-states = <&enyo_cpu_sleep>;
capacity-dmips-mhz = <620>;
dynamic-power-coefficient = <284>;
};
@@ -123,7 +123,7 @@
compatible = "arm,cortex-a76";
reg = <0x0500>;
enable-method = "psci";
- cpu-idle-states = <&ENYO_CPU_SLEEP>;
+ cpu-idle-states = <&enyo_cpu_sleep>;
capacity-dmips-mhz = <620>;
dynamic-power-coefficient = <284>;
};
@@ -133,7 +133,7 @@
compatible = "arm,cortex-x1";
reg = <0x0600>;
enable-method = "psci";
- cpu-idle-states = <&HERA_CPU_SLEEP>;
+ cpu-idle-states = <&hera_cpu_sleep>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <650>;
};
@@ -143,7 +143,7 @@
compatible = "arm,cortex-x1";
reg = <0x0700>;
enable-method = "psci";
- cpu-idle-states = <&HERA_CPU_SLEEP>;
+ cpu-idle-states = <&hera_cpu_sleep>;
capacity-dmips-mhz = <1024>;
dynamic-power-coefficient = <650>;
};
@@ -151,7 +151,7 @@
idle-states {
entry-method = "psci";
- ANANKE_CPU_SLEEP: cpu-ananke-sleep {
+ ananke_cpu_sleep: cpu-ananke-sleep {
idle-state-name = "c2";
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
@@ -160,7 +160,7 @@
min-residency-us = <2000>;
};
- ENYO_CPU_SLEEP: cpu-enyo-sleep {
+ enyo_cpu_sleep: cpu-enyo-sleep {
idle-state-name = "c2";
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
@@ -169,7 +169,7 @@
min-residency-us = <2500>;
};
- HERA_CPU_SLEEP: cpu-hera-sleep {
+ hera_cpu_sleep: cpu-hera-sleep {
idle-state-name = "c2";
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010000>;
@@ -196,6 +196,14 @@
clock-output-names = "ext-200m";
};
+ firmware {
+ acpm_ipc: power-management {
+ compatible = "google,gs101-acpm-ipc";
+ mboxes = <&ap2apm_mailbox>;
+ shmem = <&apm_sram>;
+ };
+ };
+
pmu-0 {
compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_cluster0>;
@@ -1400,18 +1408,30 @@
poweroff: syscon-poweroff {
compatible = "syscon-poweroff";
- regmap = <&pmu_system_controller>;
offset = <0x3e9c>; /* PAD_CTRL_PWR_HOLD */
- mask = <0x100>; /* reset value */
+ mask = <0x00000100>;
+ value = <0x0>;
};
reboot: syscon-reboot {
compatible = "syscon-reboot";
- regmap = <&pmu_system_controller>;
offset = <0x3a00>; /* SYSTEM_CONFIGURATION */
mask = <0x2>; /* SWRESET_SYSTEM */
value = <0x2>; /* reset value */
};
+
+ reboot-mode {
+ compatible = "syscon-reboot-mode";
+ offset = <0x0810>; /* EXYNOS_PMU_SYSIP_DAT0 */
+ mode-bootloader = <0xfc>;
+ mode-charge = <0x0a>;
+ mode-fastboot = <0xfa>;
+ mode-reboot-ab-update = <0x52>;
+ mode-recovery = <0xff>;
+ mode-rescue = <0xf9>;
+ mode-shutdown-thermal = <0x51>;
+ mode-shutdown-thermal-battery = <0x51>;
+ };
};
pinctrl_gpio_alive: pinctrl@174d0000 {
@@ -1440,6 +1460,15 @@
};
};
+ ap2apm_mailbox: mailbox@17610000 {
+ compatible = "google,gs101-mbox";
+ reg = <0x17610000 0x1000>;
+ clocks = <&cmu_apm CLK_GOUT_APM_MAILBOX_APM_AP_PCLK>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH 0>;
+ #mbox-cells = <0>;
+ };
+
pinctrl_gsactrl: pinctrl@17940000 {
compatible = "google,gs101-pinctrl";
reg = <0x17940000 0x00001000>;
@@ -1454,6 +1483,7 @@
/* TODO: update once support for this CMU exists */
clocks = <0>;
clock-names = "pclk";
+ status = "disabled";
};
cmu_top: clock-controller@1e080000 {
@@ -1466,6 +1496,14 @@
};
};
+ apm_sram: sram@2039000 {
+ compatible = "mmio-sram";
+ reg = <0x0 0x2039000 0x40000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x2039000 0x40000>;
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts =
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 839432153cc7..b6d3fe26d621 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -122,6 +122,19 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-mx8menlo.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phg.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-rdk.dtb
+
+imx8mm-phyboard-polis-peb-av-10-dtbs += imx8mm-phyboard-polis-rdk.dtb imx8mm-phyboard-polis-peb-av-10.dtbo
+imx8mm-phyboard-polis-peb-eval-01-dtbs += imx8mm-phyboard-polis-rdk.dtb imx8mm-phyboard-polis-peb-eval-01.dtbo
+imx8mm-phycore-no-eth-dtbs += imx8mm-phyboard-polis-rdk.dtb imx8mm-phycore-no-eth.dtbo
+imx8mm-phycore-no-spiflash-dtbs += imx8mm-phyboard-polis-rdk.dtb imx8mm-phycore-no-spiflash.dtbo
+imx8mm-phycore-rpmsg-dtbs += imx8mm-phyboard-polis-rdk.dtb imx8mm-phycore-rpmsg.dtbo
+
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-peb-av-10.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-peb-eval-01.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phycore-no-eth.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phycore-no-spiflash.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-phycore-rpmsg.dtb
+
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phygate-tauri-l.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-prt8mm.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-tqma8mqml-mba8mx.dtb
@@ -193,9 +206,12 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-nitrogen-smarc-universal-board.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
imx8mp-phyboard-pollux-rdk-no-eth-dtbs += imx8mp-phyboard-pollux-rdk.dtb imx8mp-phycore-no-eth.dtbo
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk-no-eth.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-basic.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-hdmi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-lt6.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revb-mi1010ait-1cp1.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revc-bd500.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mp-skov-revc-tian-g07017.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mpxl.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-tqma8mpql-mba8mp-ras314.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-var-som-symphony.dtb
@@ -267,6 +283,10 @@ dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-eval-v3.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-iris.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-iris-v2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
+
+imx8qxp-mek-pcie-ep-dtbs += imx8qxp-mek.dtb imx8qxp-mek-pcie-ep.dtbo
+dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek-pcie-ep.dtb
+
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-tqma8xqp-mba8xx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8ulp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-9x9-qsb.dtb
@@ -281,6 +301,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx93-phyboard-segin.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxca.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-tqma9352-mba93xxla.dtb
dtb-$(CONFIG_ARCH_MXC) += imx93-var-som-symphony.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx95-15x15-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx95-19x19-evk.dtb
imx8mm-kontron-dl-dtbs := imx8mm-kontron-bl.dtb imx8mm-kontron-dl.dtbo
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
index bc0d89427fbe..3a11068f2212 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
@@ -87,6 +87,22 @@
los-gpios = <&sfpgpio 7 GPIO_ACTIVE_HIGH>;
maximum-power-milliwatt = <2000>;
};
+
+ usb1v2_supply: regulator-usbhub-1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "usbhub_1v2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ system3v3_supply: regulator-system-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "system_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
};
/* XG1 - Upper SFP */
@@ -231,6 +247,12 @@
compatible = "atmel,at97sc3204t";
reg = <0x29>;
};
+
+ usbhub: usb-hub@2d {
+ compatible = "microchip,usb5744";
+ reg = <0x2d>;
+ };
+
};
&i2c2 {
@@ -378,10 +400,32 @@
};
};
+/* LS1088A USB Port 0 - direct to bottom USB-A port */
&usb0 {
status = "okay";
};
+/* LS1088A USB Port 1 - to Microchip USB5744 USB Hub */
&usb1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
+
+ hub_2_0: hub@1 {
+ compatible = "usb424,2744";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ i2c-bus = <&usbhub>;
+ vdd-supply = <&system3v3_supply>;
+ vdd2-supply = <&usb1v2_supply>;
+ };
+
+ hub_3_0: hub@2 {
+ compatible = "usb424,5744";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ i2c-bus = <&usbhub>;
+ vdd-supply = <&system3v3_supply>;
+ vdd2-supply = <&usb1v2_supply>;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
index a3fc945aea16..dbea1eefdeec 100644
--- a/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-apalis-v1.1.dtsi
@@ -790,6 +790,22 @@
status = "okay";
};
+/* Apalis HDMI Audio */
+&sai5 {
+ assigned-clocks = <&acm IMX_ADMA_ACM_SAI5_MCLK_SEL>,
+ <&acm IMX_ADMA_ACM_AUD_CLK1_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_1 IMX_SC_PM_CLK_MST_BUS>,
+ <&sai5_lpcg 0>;
+ assigned-clock-parents = <&aud_pll_div0_lpcg 0>, <&aud_rec1_lpcg 0>;
+ assigned-clock-rates = <0>, <0>, <786432000>, <49152000>, <12288000>,
+ <722534400>, <45158400>, <11289600>, <49152000>;
+};
+
/* TODO: Apalis SATA1 */
/* Apalis SPDIF1 */
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-hsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-hsio.dtsi
index 70a8aa1a6791..9b8b1380c4c2 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-hsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-hsio.dtsi
@@ -57,8 +57,9 @@ hsio_subsys: bus@5f000000 {
ranges = <0x81000000 0 0x00000000 0x8ff80000 0 0x00010000>,
<0x82000000 0 0x80000000 0x80000000 0 0x0ff00000>;
#interrupt-cells = <1>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi", "dma";
#address-cells = <3>;
#size-cells = <2>;
clocks = <&pcieb_lpcg IMX_LPCG_CLK_6>,
@@ -68,9 +69,9 @@ hsio_subsys: bus@5f000000 {
bus-range = <0x00 0xff>;
device_type = "pci";
interrupt-map = <0 0 0 1 &gic 0 105 4>,
- <0 0 0 2 &gic 0 106 4>,
- <0 0 0 3 &gic 0 107 4>,
- <0 0 0 4 &gic 0 108 4>;
+ <0 0 0 2 &gic 0 106 4>,
+ <0 0 0 3 &gic 0 107 4>,
+ <0 0 0 4 &gic 0 108 4>;
interrupt-map-mask = <0 0 0 0x7>;
num-lanes = <1>;
num-viewport = <4>;
@@ -79,6 +80,25 @@ hsio_subsys: bus@5f000000 {
status = "disabled";
};
+ pcieb_ep: pcie-ep@5f010000 {
+ compatible = "fsl,imx8q-pcie-ep";
+ reg = <0x5f010000 0x00010000>,
+ <0x80000000 0x10000000>;
+ reg-names = "dbi", "addr_space";
+ num-lanes = <1>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ clocks = <&pcieb_lpcg IMX_LPCG_CLK_6>,
+ <&pcieb_lpcg IMX_LPCG_CLK_4>,
+ <&pcieb_lpcg IMX_LPCG_CLK_5>;
+ clock-names = "dbi", "mstr", "slv";
+ power-domains = <&pd IMX_SC_R_PCIE_B>;
+ fsl,max-link-speed = <3>;
+ num-ib-windows = <6>;
+ num-ob-windows = <6>;
+ status = "disabled";
+ };
+
pcieb_lpcg: clock-controller@5f060000 {
compatible = "fsl,imx8qxp-lpcg";
reg = <0x5f060000 0x10000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
index 6259186cd4d9..5f3b4014e152 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
@@ -191,6 +191,33 @@
enable-active-high;
};
+ reg_audio_5v: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
bt_sco_codec: audio-codec-bt {
compatible = "linux,bt-sco";
#sound-dai-cells = <1>;
@@ -420,6 +447,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <2 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
};
@@ -444,6 +476,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <2 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
};
@@ -468,6 +505,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <2 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
index a8ef4fba16a9..d16490d87687 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
@@ -254,6 +254,10 @@
status = "okay";
};
+&reg_nvcc_sd {
+ sd-vsel-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -454,7 +458,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000d0
>;
};
@@ -467,7 +471,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000d0
>;
};
@@ -480,7 +484,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000d0
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
index 663ae52b4852..d45542965230 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
@@ -342,6 +342,7 @@
regulator-name = "NVCC_SD (LDO5)";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ sd-vsel-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
};
};
};
@@ -794,7 +795,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 /* SDIO_A_D2 */
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 /* SDIO_A_D3 */
MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x400000d6 /* SDIO_A_WP */
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x90
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000090
>;
};
@@ -807,7 +808,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 /* SDIO_A_D2 */
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 /* SDIO_A_D3 */
MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x400000d6 /* SDIO_A_WP */
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x90
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000090
>;
};
@@ -820,7 +821,7 @@
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 /* SDIO_A_D2 */
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 /* SDIO_A_D3 */
MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x400000d6 /* SDIO_A_WP */
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x90
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x40000090
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso
new file mode 100644
index 000000000000..840f83293452
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-av-10.dtso
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "imx8mm-pinfunc.h"
+
+&{/} {
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lcd>;
+ default-brightness-level = <6>;
+ pwms = <&pwm4 0 50000 0>;
+ power-supply = <&reg_vdd_3v3_s>;
+ enable-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ brightness-levels= <0 4 8 16 32 64 128 255>;
+ };
+
+ panel {
+ compatible = "edt,etml1010g3dra";
+ backlight = <&backlight>;
+ power-supply = <&reg_vcc_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&bridge_out>;
+ };
+ };
+ };
+
+ reg_sound_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8_Audio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_sound_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3_Analog";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ sound-peb-av-10 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "snd-peb-av-10";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&dailink_master>;
+ simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,mclk-fs = <32>;
+ simple-audio-card,widgets =
+ "Line", "Line In",
+ "Speaker", "Speaker",
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Speaker", "SPOP",
+ "Speaker", "SPOM",
+ "Headphone Jack", "HPLOUT",
+ "Headphone Jack", "HPROUT",
+ "LINE1L", "Line In",
+ "LINE1R", "Line In",
+ "MIC3R", "Microphone Jack",
+ "Microphone Jack", "Mic Bias";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai5>;
+ };
+
+ dailink_master: simple-audio-card,codec {
+ sound-dai = <&codec>;
+ clocks = <&clk IMX8MM_CLK_SAI5>;
+ };
+ };
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ sda-gpios = <&gpio5 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio5 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ codec: codec@18 {
+ compatible = "ti,tlv320aic3007";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tlv320>;
+ #sound-dai-cells = <0>;
+ reg = <0x18>;
+ reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+ ai3x-gpio-func = <0xd 0x0>;
+ ai3x-micbias-vg = <2>;
+ AVDD-supply = <&reg_sound_3v3>;
+ IOVDD-supply = <&reg_sound_3v3>;
+ DRVDD-supply = <&reg_sound_3v3>;
+ DVDD-supply = <&reg_sound_1v8>;
+ };
+
+ eeprom@57 {
+ compatible = "atmel,24c32";
+ pagesize = <32>;
+ reg = <0x57>;
+ vcc-supply = <&reg_vdd_3v3_s>;
+ };
+
+ eeprom@5f {
+ compatible = "atmel,24c32";
+ pagesize = <32>;
+ reg = <0x5f>;
+ size = <32>;
+ vcc-supply = <&reg_vdd_3v3_s>;
+ };
+};
+
+&lcdif {
+ status = "okay";
+};
+
+&mipi_dsi {
+ samsung,esc-clock-frequency = <10000000>;
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&sai5 {
+ assigned-clocks = <&clk IMX8MM_CLK_SAI5>;
+ assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL2_OUT>;
+ assigned-clock-rates = <11289600>;
+ clocks = <&clk IMX8MM_CLK_SAI5_IPG>, <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_SAI5_ROOT>, <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_AUDIO_PLL1_OUT>,
+ <&clk IMX8MM_AUDIO_PLL2_OUT>;
+ clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3", "pll8k",
+ "pll11k";
+ fsl,sai-mclk-direction-output;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai5>;
+ #sound-dai-cells = <0>;
+ status = "okay";
+};
+
+&sn65dsi83 {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ bridge_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ bridge_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&iomuxc {
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x400001c2
+ MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x400001c2
+ >;
+ };
+
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C3_SCL_GPIO5_IO18 0x1e2
+ MX8MM_IOMUXC_I2C3_SDA_GPIO5_IO19 0x1e2
+ >;
+ };
+ pinctrl_lcd: lcd0grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_TXD_GPIO5_IO1 0x12
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x12
+ >;
+ };
+
+ pinctrl_sai5: sai5grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_MCLK_SAI5_MCLK 0xd6
+ MX8MM_IOMUXC_SAI5_RXD0_SAI5_RX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_tlv320: tlv320grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x16
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x16
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-eval-01.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-eval-01.dtso
new file mode 100644
index 000000000000..a28f51ece93b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-peb-eval-01.dtso
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Janine Hagemann <j.hagemann@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include "imx8mm-pinfunc.h"
+
+&{/} {
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_keys>;
+
+ button-0 {
+ label = "home";
+ linux,code = <KEY_HOME>;
+ gpios = <&gpio4 17 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+
+ button-1 {
+ label = "menu";
+ linux,code = <KEY_MENU>;
+ gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
+
+ user-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_user_leds>;
+
+ user-led1 {
+ gpios = <&gpio4 14 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+ user-led2 {
+ gpios = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+ user-led3 {
+ gpios = <&gpio5 28 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_gpio_keys: gpio_keysgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x16
+ MX8MM_IOMUXC_UART4_TXD_GPIO5_IO29 0x16
+ >;
+ };
+
+ pinctrl_user_leds: user_ledsgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x16
+ MX8MM_IOMUXC_UART4_RXD_GPIO5_IO28 0x16
+ MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x16
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
index 5eacbd9611ee..be470cfb03d7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phyboard-polis-rdk.dts
@@ -219,9 +219,15 @@
status = "okay";
};
+/* RTC */
&rv3028 {
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pinctrl_rtc>;
+ pinctrl-names = "default";
aux-voltage-chargeable = <1>;
trickle-resistor-ohms = <3000>;
+ wakeup-source;
};
&snvs_pwrkey {
@@ -255,11 +261,12 @@
device-wakeup-gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
interrupt-names = "host-wakeup";
interrupt-parent = <&gpio2>;
- interrupts = <9 IRQ_TYPE_EDGE_BOTH>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
max-speed = <2000000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_bt>;
shutdown-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ vbat-supply = <&reg_vcc_3v3>;
vddio-supply = <&reg_vcc_3v3>;
};
};
@@ -332,7 +339,7 @@
fsl,pins = <
MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x00
MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x00
- MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x00
+ MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x140
>;
};
@@ -408,6 +415,12 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x1c0
+ >;
+ };
+
pinctrl_tpm: tpmgrp {
fsl,pins = <
MX8MM_IOMUXC_SD1_STROBE_GPIO2_IO11 0x140
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-eth.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-eth.dtso
new file mode 100644
index 000000000000..0fb4b6da6c10
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-eth.dtso
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&ethphy0 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-spiflash.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-spiflash.dtso
new file mode 100644
index 000000000000..7bfc366c1689
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-no-spiflash.dtso
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+&flexspi {
+ status = "disabled";
+};
+
+&som_flash {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-rpmsg.dtso b/arch/arm64/boot/dts/freescale/imx8mm-phycore-rpmsg.dtso
new file mode 100644
index 000000000000..43d5905f3d72
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-rpmsg.dtso
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Author: Dominik Haller <d.haller@phytec.de>
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+
+&{/} {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ m4_reserved: m4@80000000 {
+ reg = <0 0x80000000 0 0x1000000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@b8000000 {
+ reg = <0 0xb8000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@b8008000 {
+ reg = <0 0xb8008000 0 0x8000>;
+ no-map;
+ };
+
+ rsc_table: rsc_table@b80ff000 {
+ reg = <0 0xb80ff000 0 0x1000>;
+ no-map;
+ };
+
+ vdevbuffer: vdevbuffer@b8400000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xb8400000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ core-m4 {
+ compatible = "fsl,imx8mm-cm4";
+ clocks = <&clk IMX8MM_CLK_M4_DIV>;
+ mboxes = <&mu 0 1
+ &mu 1 1
+ &mu 3 1>;
+ mbox-names = "tx", "rx", "rxdb";
+ memory-region = <&vdevbuffer>, <&vdev0vring0>, <&vdev0vring1>, <&rsc_table>;
+ syscon = <&src>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
index 6069678244f3..672baba4c8d0 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phycore-som.dtsi
@@ -69,7 +69,6 @@
/* Ethernet */
&fec1 {
- fsl,magic-packet;
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
pinctrl-names = "default";
@@ -161,11 +160,13 @@
regulator-always-on;
regulator-boot-on;
regulator-max-microvolt = <2500000>;
- regulator-min-microvolt = <1500000>;
+ regulator-min-microvolt = <2500000>;
regulator-name = "VCC_ENET_2V5 (LDO3)";
regulator-state-mem {
- regulator-off-in-suspend;
+ regulator-on-in-suspend;
+ regulator-suspend-max-microvolt = <2500000>;
+ regulator-suspend-min-microvolt = <2500000>;
};
};
@@ -285,9 +286,11 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sn65dsi83>;
reg = <0x2d>;
+ vcc-supply = <&reg_vdd_1v8>;
status = "disabled";
};
+ /* EEPROM */
eeprom@51 {
compatible = "atmel,24c32";
pagesize = <32>;
@@ -295,17 +298,14 @@
vcc-supply = <&reg_vdd_3v3_s>;
};
+ /* RTC */
rv3028: rtc@52 {
compatible = "microcrystal,rv3028";
- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
- interrupt-parent = <&gpio1>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_rtc>;
reg = <0x52>;
};
};
-/* EMMC */
+/* eMMC */
&usdhc3 {
assigned-clocks = <&clk IMX8MM_CLK_USDHC3_ROOT>;
assigned-clock-rates = <400000000>;
@@ -373,12 +373,6 @@
>;
};
- pinctrl_rtc: rtcgrp {
- fsl,pins = <
- MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x1c0
- >;
- };
-
pinctrl_sn65dsi83: sn65dsi83grp {
fsl,pins = <
MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
index c3835b2d860a..755cf9cacd22 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
@@ -215,8 +215,13 @@
/* RTC */
&rv3028 {
+ interrupt-parent = <&gpio1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pinctrl_rtc>;
+ pinctrl-names = "default";
aux-voltage-chargeable = <1>;
trickle-resistor-ohms = <3000>;
+ wakeup-source;
};
&uart1 {
@@ -394,6 +399,12 @@
>;
};
+ pinctrl_rtc: rtcgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x1c0
+ >;
+ };
+
pinctrl_tempsense: tempsensegrp {
fsl,pins = <
MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x00
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
index 8f58c84e14c8..b82e9790ea20 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-tqma8mqml.dtsi
@@ -65,6 +65,7 @@
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+ vcc-supply = <&buck5_reg>;
partitions {
compatible = "fixed-partitions";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
index ce20de259805..3d0b14968131 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
@@ -16,10 +16,10 @@
"Headphone Jack", "HPOUTR",
"IN2L", "Line In Jack",
"IN2R", "Line In Jack",
- "Headphone Jack", "MICBIAS",
- "IN1L", "Headphone Jack";
+ "Microphone Jack", "MICBIAS",
+ "IN1L", "Microphone Jack";
simple-audio-card,widgets =
- "Microphone", "Headphone Jack",
+ "Microphone", "Microphone Jack",
"Headphone", "Headphone Jack",
"Line", "Line In Jack";
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index c528594ac442..7251ad3a0017 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -18,20 +18,6 @@
rtc1 = &snvs_rtc;
};
- backlight: backlight {
- compatible = "pwm-backlight";
- brightness-levels = <0 45 63 88 119 158 203 255>;
- default-brightness-level = <4>;
- /* Verdin I2S_2_D_OUT (DSI_1_BKL_EN/DSI_1_BKL_EN_LVDS, SODIMM 46) */
- enable-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2s_2_d_out_dsi_1_bkl_en>;
- power-supply = <&reg_3p3v>;
- /* Verdin PWM_3_DSI/PWM_3_DSI_LVDS (SODIMM 19) */
- pwms = <&pwm1 0 6666667 PWM_POLARITY_INVERTED>;
- status = "disabled";
- };
-
/* Fixed clock dedicated to SPI CAN controller */
clk40m: oscillator {
compatible = "fixed-clock";
@@ -66,13 +52,6 @@
status = "disabled";
};
- panel_lvds: panel-lvds {
- compatible = "panel-lvds";
- backlight = <&backlight>;
- data-mapping = "vesa-24";
- status = "disabled";
- };
-
/* Carrier Board Supplies */
reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2pro.dts b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2pro.dts
index c6ad65becc97..475cbf9e0d1e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2pro.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-bsh-smm-s2pro.dts
@@ -64,7 +64,6 @@
DVDD-supply = <&buck5_reg>;
reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
ai31xx-micbias-vg = <MICBIAS_AVDDV>;
- clocks = <&clk IMX8MN_CLK_SAI3_ROOT>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
index e68a3fd73e17..640c41b51af9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-tqma8mqnl.dtsi
@@ -63,6 +63,7 @@
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+ vcc-supply = <&buck5_reg>;
partitions {
compatible = "fixed-partitions";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 68e12a752edd..c26954e5a605 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -74,6 +74,24 @@
clock-frequency = <100000000>;
};
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
reg_audio_pwr: regulator-audio-pwr {
compatible = "regulator-fixed";
pinctrl-names = "default";
@@ -586,7 +604,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <3 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
SPKVDD1-supply = <&reg_audio_pwr>;
+ SPKVDD2-supply = <&reg_audio_pwr>;
};
pca6416: gpio@20 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-kontron-osm-s.dtsi
index e0e9f6f7616d..b97bfeb1c30f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-kontron-osm-s.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-kontron-osm-s.dtsi
@@ -311,6 +311,7 @@
regulator-name = "NVCC_SD (LDO5)";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ sd-vsel-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
};
};
};
@@ -808,7 +809,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0 /* SDIO_A_D1 */
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0 /* SDIO_A_D2 */
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0 /* SDIO_A_D3 */
- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x400001d0
>;
};
@@ -820,7 +821,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4 /* SDIO_A_D1 */
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4 /* SDIO_A_D2 */
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4 /* SDIO_A_D3 */
- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x400001d0
>;
};
@@ -832,7 +833,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6 /* SDIO_A_D1 */
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6 /* SDIO_A_D2 */
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6 /* SDIO_A_D3 */
- MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0x400001d0
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
new file mode 100644
index 000000000000..a1b75c9068b2
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2024 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+&clk {
+ assigned-clocks = <&clk IMX8MP_CLK_A53_SRC>,
+ <&clk IMX8MP_CLK_A53_CORE>,
+ <&clk IMX8MP_SYS_PLL3>,
+ <&clk IMX8MP_CLK_NOC>,
+ <&clk IMX8MP_CLK_NOC_IO>,
+ <&clk IMX8MP_CLK_GIC>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_ARM_PLL_OUT>,
+ <0>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL3_OUT>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <0>, <0>,
+ <600000000>,
+ <800000000>,
+ <600000000>,
+ <400000000>;
+ fsl,operating-mode = "nominal";
+};
+
+&pgc_hdmimix {
+ assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>,
+ <&clk IMX8MP_CLK_HDMI_APB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_133M>;
+ assigned-clock-rates = <400000000>, <133000000>;
+};
+
+&pgc_hsiomix {
+ assigned-clocks = <&clk IMX8MP_CLK_HSIO_AXI>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <400000000>;
+};
+
+&pgc_gpumix {
+ assigned-clocks = <&clk IMX8MP_CLK_GPU_AXI>,
+ <&clk IMX8MP_CLK_GPU_AHB>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL3_OUT>,
+ <&clk IMX8MP_SYS_PLL3_OUT>;
+ assigned-clock-rates = <600000000>, <300000000>;
+};
+
+&media_blk_ctrl {
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
+ <&clk IMX8MP_CLK_MEDIA_APB>,
+ <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
+ <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
+ <&clk IMX8MP_CLK_MEDIA_ISP>,
+ <&clk IMX8MP_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_VIDEO_PLL1_OUT>,
+ <&clk IMX8MP_VIDEO_PLL1_OUT>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <400000000>, <200000000>,
+ <0>, <0>, <400000000>,
+ <1039500000>;
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-basic.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-basic.dts
new file mode 100644
index 000000000000..5a2629f3567c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-basic.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/dts-v1/;
+
+#include "imx8mp-skov-reva.dtsi"
+
+/ {
+ model = "SKOV IMX8MP CPU basic/fallback";
+ compatible = "skov,imx8mp-skov-basic", "fsl,imx8mp";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi
index 59813ef8e2bb..020f20c8ce66 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-reva.dtsi
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
#include "imx8mp.dtsi"
+#include "imx8mp-nominal.dtsi"
#include <dt-bindings/leds/common.h>
@@ -116,6 +117,11 @@
regulator-name = "24V";
regulator-min-microvolt = <24000000>;
regulator-max-microvolt = <24000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg24v>;
+ interrupts-extended = <&gpio4 23 IRQ_TYPE_EDGE_FALLING>;
+ system-critical-regulator;
+ regulator-uv-less-critical-window-ms = <50>;
};
reg_can2rs: regulator-can2rs {
@@ -163,6 +169,19 @@
};
};
+/*
+ * Board is passively cooled and heatsink is specced for continuous operation
+ * at 1.2 GHz only. Short bouts of 1.6 GHz are ok, but these should be done
+ * intentionally, not as part of suspend/resume cycles.
+ */
+&{/opp-table/opp-1600000000} {
+ /delete-property/ opp-suspend;
+};
+
+&{/opp-table/opp-1800000000} {
+ /delete-property/ opp-suspend;
+};
+
&A53_0 {
cpu-supply = <&reg_vdd_arm>;
};
@@ -197,7 +216,7 @@
&eqos {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_eqos>;
- phy-mode = "rgmii-txid";
+ phy-mode = "rgmii-rxid";
status = "okay";
fixed-link {
@@ -222,8 +241,11 @@
&i2c1 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio5 14 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
pmic@25 {
@@ -232,13 +254,12 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
interrupts-extended = <&gpio1 3 IRQ_TYPE_EDGE_RISING>;
- sd-vsel-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
regulators {
reg_vdd_soc: BUCK1 {
regulator-name = "VDD_SOC";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <2187500>;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -247,20 +268,20 @@
reg_vdd_arm: BUCK2 {
regulator-name = "VDD_ARM";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <2187500>;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1000000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <3125>;
- nxp,dvs-run-voltage = <950000>;
+ nxp,dvs-run-voltage = <850000>;
nxp,dvs-standby-voltage = <850000>;
};
reg_vdd_3v3: BUCK4 {
regulator-name = "VDD_3V3";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -268,8 +289,8 @@
reg_vdd_1v8: BUCK5 {
regulator-name = "VDD_1V8";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -277,8 +298,8 @@
reg_nvcc_dram_1v1: BUCK6 {
regulator-name = "NVCC_DRAM_1V1";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <3400000>;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -286,8 +307,8 @@
reg_nvcc_snvs_1v8: LDO1 {
regulator-name = "NVCC_SNVS_1V8";
- regulator-min-microvolt = <1600000>;
- regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -295,8 +316,8 @@
reg_vdda_1v8: LDO3 {
regulator-name = "VDDA_1V8";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
vin-supply = <&reg_5v_p>;
regulator-boot-on;
regulator-always-on;
@@ -314,10 +335,21 @@
};
};
+&i2c2 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio5 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+};
+
&i2c3 {
- clock-frequency = <100000>;
- pinctrl-names = "default";
+ clock-frequency = <400000>;
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c3>;
+ pinctrl-1 = <&pinctrl_i2c3_gpio>;
+ scl-gpios = <&gpio5 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 19 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
i2c_rtc: rtc@51 {
@@ -332,8 +364,11 @@
&i2c4 {
clock-frequency = <380000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c4>;
+ pinctrl-1 = <&pinctrl_i2c4_gpio>;
+ scl-gpios = <&gpio5 20 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio5 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
switch: switch@5f {
@@ -391,6 +426,13 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
+ /*
+ * While there is no CTS line, the property "uart-has-rtscts" is still
+ * the right thing to do to enable the UART to do RS485. In RS485-Mode
+ * CTS isn't used anyhow and there is no dedicated property
+ * "uart-has-rts-but-no-cts".
+ */
+ uart-has-rtscts;
};
&uart2 {
@@ -538,6 +580,27 @@
>;
};
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C1_SCL__GPIO5_IO14 0x400001c2
+ MX8MP_IOMUXC_I2C1_SDA__GPIO5_IO15 0x400001c2
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2
+ MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C2_SCL__GPIO5_IO16 0x400001c2
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x400001c2
+ >;
+ };
+
pinctrl_i2c3: i2c3grp {
fsl,pins = <
MX8MP_IOMUXC_I2C3_SCL__I2C3_SCL 0x400001c2
@@ -545,6 +608,13 @@
>;
};
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C3_SCL__GPIO5_IO18 0x400001c2
+ MX8MP_IOMUXC_I2C3_SDA__GPIO5_IO19 0x400001c2
+ >;
+ };
+
pinctrl_i2c4: i2c4grp {
fsl,pins = <
MX8MP_IOMUXC_I2C4_SCL__I2C4_SCL 0x400001c3
@@ -552,10 +622,16 @@
>;
};
+ pinctrl_i2c4_gpio: i2c4gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_I2C4_SCL__GPIO5_IO20 0x400001c3
+ MX8MP_IOMUXC_I2C4_SDA__GPIO5_IO21 0x400001c3
+ >;
+ };
+
pinctrl_pmic: pmicirqgrp {
fsl,pins = <
MX8MP_IOMUXC_GPIO1_IO03__GPIO1_IO03 0x41
- MX8MP_IOMUXC_GPIO1_IO04__GPIO1_IO04 0x41
>;
};
@@ -571,6 +647,12 @@
>;
};
+ pinctrl_reg24v: reg24vgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI2_RXD0__GPIO4_IO23 0x154
+ >;
+ };
+
pinctrl_reg_vsd_3v3: regvsd3v3grp {
fsl,pins = <
MX8MP_IOMUXC_SD2_RESET_B__GPIO2_IO19 0x40
@@ -605,6 +687,8 @@
MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x140
MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x140
MX8MP_IOMUXC_UART3_RXD__UART1_DTE_RTS 0x140
+ /* CTS pin is not connected, but needed as workaround */
+ MX8MP_IOMUXC_UART3_TXD__UART1_DTE_CTS 0x140
>;
};
@@ -623,6 +707,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d0
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d0
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d0
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
@@ -634,6 +719,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d4
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d4
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d4
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
@@ -645,6 +731,7 @@
MX8MP_IOMUXC_SD2_DATA1__USDHC2_DATA1 0x1d6
MX8MP_IOMUXC_SD2_DATA2__USDHC2_DATA2 0x1d6
MX8MP_IOMUXC_SD2_DATA3__USDHC2_DATA3 0x1d6
+ MX8MP_IOMUXC_GPIO1_IO04__USDHC2_VSELECT 0xc0
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-hdmi.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-hdmi.dts
index c1ca69da3cb8..32a429437cbd 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-hdmi.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-hdmi.dts
@@ -9,12 +9,53 @@
compatible = "skov,imx8mp-skov-revb-hdmi", "fsl,imx8mp";
};
+&hdmi_pvi {
+ status = "okay";
+};
+
+&hdmi_tx {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hdmi>;
+ ddc-i2c-bus = <&i2c5>;
+ status = "okay";
+};
+
+&hdmi_tx_phy {
+ status = "okay";
+};
+
+&i2c5 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&pinctrl_i2c5>;
+ pinctrl-1 = <&pinctrl_i2c5_gpio>;
+ scl-gpios = <&gpio3 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&lcdif3 {
+ status = "okay";
+};
+
&iomuxc {
pinctrl_hdmi: hdmigrp {
fsl,pins = <
- MX8MP_IOMUXC_HDMI_DDC_SCL__HDMIMIX_HDMI_SCL 0x1c3
- MX8MP_IOMUXC_HDMI_DDC_SDA__HDMIMIX_HDMI_SDA 0x1c3
MX8MP_IOMUXC_HDMI_HPD__HDMIMIX_HDMI_HPD 0x19
>;
};
+
+ pinctrl_i2c5: i2c5grp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__I2C5_SCL 0x400001c2
+ MX8MP_IOMUXC_HDMI_DDC_SDA__I2C5_SDA 0x400001c2
+ >;
+ };
+
+ pinctrl_i2c5_gpio: i2c5gpiogrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_HDMI_DDC_SCL__GPIO3_IO26 0x400001c2
+ MX8MP_IOMUXC_HDMI_DDC_SDA__GPIO3_IO27 0x400001c2
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-lt6.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-lt6.dts
index ccbd3abedd69..baecf768a2ee 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-lt6.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-lt6.dts
@@ -8,6 +8,45 @@
model = "SKOV IMX8MP CPU revB - LT6";
compatible = "skov,imx8mp-skov-revb-lt6", "fsl,imx8mp";
+ lvds-decoder {
+ compatible = "ti,sn65lvds822", "lvds-decoder";
+ power-supply = <&reg_3v3>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ in_lvds1: endpoint {
+ data-mapping = "vesa-24";
+ remote-endpoint = <&ldb_lvds_ch1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lvds_decoder_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+
+ panel {
+ compatible = "logictechno,lttd800480070-l6wh-rt";
+ backlight = <&backlight>;
+ power-supply = <&reg_tft_vcom>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_decoder_out>;
+ };
+ };
+ };
+
touchscreen {
compatible = "resistive-adc-touch";
io-channels = <&adc_ts 1>, <&adc_ts 3>, <&adc_ts 4>, <&adc_ts 5>;
@@ -78,6 +117,27 @@
};
};
+&lcdif2 {
+ status = "okay";
+};
+
+&lvds_bridge {
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>,
+ <&clk IMX8MP_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
+ /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_DISP2_PIX * 2 * 7 */
+ assigned-clock-rates = <0>, <462000000>;
+ status = "okay";
+
+ ports {
+ port@2 {
+ ldb_lvds_ch1: endpoint {
+ remote-endpoint = <&in_lvds1>;
+ };
+ };
+ };
+};
+
&pwm1 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
index 2c75da5f064f..45c9a6d55bc9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revb-mi1010ait-1cp1.dts
@@ -27,8 +27,6 @@
&i2c2 {
clock-frequency = <100000>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
touchscreen@38 {
@@ -51,8 +49,11 @@
};
&lvds_bridge {
- /* IMX8MP_CLK_MEDIA_LDB = IMX8MP_CLK_MEDIA_DISP2_PIX * 7 */
- assigned-clock-rates = <490000000>;
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>,
+ <&clk IMX8MP_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
+ /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_DISP2_PIX * 2 * 7 */
+ assigned-clock-rates = <0>, <980000000>;
status = "okay";
ports {
@@ -64,18 +65,6 @@
};
};
-&media_blk_ctrl {
- /* currently it is not possible to let display clocks confugure
- * automatically, so we need to set them manually
- */
- assigned-clock-rates = <500000000>, <200000000>, <0>,
- /* IMX8MP_CLK_MEDIA_DISP2_PIX = pixelclk of lvds panel */
- <70000000>,
- <500000000>,
- /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_LDB */
- <490000000>;
-};
-
&pwm4 {
status = "okay";
};
@@ -90,12 +79,3 @@
voltage-table = <3160000 73>;
status = "okay";
};
-
-&iomuxc {
- pinctrl_i2c2: i2c2grp {
- fsl,pins = <
- MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c2
- MX8MP_IOMUXC_I2C2_SDA__I2C2_SDA 0x400001c2
- >;
- };
-};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-bd500.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-bd500.dts
new file mode 100644
index 000000000000..b816c6cd3bca
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-bd500.dts
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/dts-v1/;
+
+#include "imx8mp-skov-reva.dtsi"
+
+/ {
+ model = "SKOV IMX8MP CPU revC - bd500";
+ compatible = "skov,imx8mp-skov-revc-bd500", "fsl,imx8mp";
+
+ leds {
+ led_system_red: led-3 {
+ label = "bd500:system:red";
+ color = <LED_COLOR_ID_RED>;
+ /* Inverted compared to others due to NMOS inverter */
+ gpios = <&gpioexp 3 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led_system_green: led-4 {
+ label = "bd500:system:green";
+ color = <LED_COLOR_ID_GREEN>;
+ gpios = <&gpioexp 2 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+ led_lan1_red: led-5 {
+ label = "bd500:lan1:act";
+ color = <LED_COLOR_ID_RED>;
+ linux,default-trigger = "netdev";
+ gpios = <&gpioexp 1 GPIO_ACTIVE_LOW>;
+ };
+
+ led_lan1_green: led-6 {
+ label = "bd500:lan1:link";
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "netdev";
+ gpios = <&gpioexp 0 GPIO_ACTIVE_LOW>;
+ };
+
+ led_lan2_red: led-7 {
+ label = "bd500:lan2:act";
+ color = <LED_COLOR_ID_RED>;
+ linux,default-trigger = "netdev";
+ gpios = <&gpioexp 6 GPIO_ACTIVE_LOW>;
+ };
+
+ led_lan2_green: led-8 {
+ label = "bd500:lan2:link";
+ color = <LED_COLOR_ID_GREEN>;
+ linux,default-trigger = "netdev";
+ gpios = <&gpioexp 7 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-1 {
+ label = "S1";
+ linux,code = <KEY_CONFIG>;
+ gpios = <&gpioexp 5 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ gpioexp: gpio@20 {
+ compatible = "nxp,pca6408";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_exp>;
+ interrupts-extended = <&gpio4 28 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
+ vcc-supply = <&reg_vdd_3v3>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&iomuxc {
+ pinctrl_gpio_exp: gpioexpgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_SAI3_RXFS__GPIO4_IO28 0x0
+ MX8MP_IOMUXC_SAI3_RXC__GPIO4_IO29 0x0
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-tian-g07017.dts b/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-tian-g07017.dts
new file mode 100644
index 000000000000..9a562c011f2c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mp-skov-revc-tian-g07017.dts
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/dts-v1/;
+
+#include "imx8mp-skov-reva.dtsi"
+
+/ {
+ model = "SKOV IMX8MP CPU revC - TIAN G07017";
+ compatible = "skov,imx8mp-skov-revc-tian-g07017", "fsl,imx8mp";
+
+ panel {
+ compatible = "topland,tian-g07017-01";
+ backlight = <&backlight>;
+ power-supply = <&reg_tft_vcom>;
+
+ port {
+ in_lvds0: endpoint {
+ remote-endpoint = <&ldb_lvds_ch0>;
+ };
+ };
+ };
+};
+
+&backlight {
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5506";
+ reg = <0x38>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_touchscreen>;
+ interrupts-extended = <&gpio4 28 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio4 29 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <1024>;
+ touchscreen-size-y = <600>;
+ vcc-supply = <&reg_vdd_3v3>;
+ iovcc-supply = <&reg_vdd_3v3>;
+ wakeup-source;
+ };
+};
+
+&lcdif2 {
+ status = "okay";
+};
+
+&lvds_bridge {
+ assigned-clocks = <&clk IMX8MP_CLK_MEDIA_LDB>,
+ <&clk IMX8MP_VIDEO_PLL1>;
+ assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>;
+ /* IMX8MP_VIDEO_PLL1 = IMX8MP_CLK_MEDIA_DISP2_PIX * 2 * 7 */
+ assigned-clock-rates = <0>, <358400000>;
+ status = "okay";
+
+ ports {
+ port@1 {
+ ldb_lvds_ch0: endpoint {
+ remote-endpoint = <&in_lvds0>;
+ };
+ };
+ };
+};
+
+&pwm4 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&reg_tft_vcom {
+ regulator-min-microvolt = <3160000>;
+ regulator-max-microvolt = <3160000>;
+ voltage-table = <3160000 73>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
index ae64731266f3..23c612e80dd3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql-mba8mpxl.dts
@@ -234,7 +234,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "tq-tlv320aic32x";
+ model = "tqm-tlv320aic32";
audio-cpu = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
index 336785a9fba8..6067ca3be814 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
/*
- * Copyright 2021-2022 TQ-Systems GmbH
- * Author: Alexander Stein <alexander.stein@tq-group.com>
+ * Copyright 2021-2025 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * D-82229 Seefeld, Germany.
+ * Author: Alexander Stein
*/
#include "imx8mp.dtsi"
@@ -23,15 +24,6 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
-
- /* e-MMC IO, needed for HS modes */
- reg_vcc1v8: regulator-vcc1v8 {
- compatible = "regulator-fixed";
- regulator-name = "VCC1V8";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-always-on;
- };
};
&A53_0 {
@@ -49,6 +41,7 @@
spi-max-frequency = <80000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+ vcc-supply = <&buck5_reg>;
partitions {
compatible = "fixed-partitions";
@@ -197,7 +190,7 @@
no-sd;
no-sdio;
vmmc-supply = <&reg_vcc3v3>;
- vqmmc-supply = <&reg_vcc1v8>;
+ vqmmc-supply = <&buck5_reg>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
index da8902c5f7e5..1493319aa748 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi
@@ -28,10 +28,10 @@
"Headphone Jack", "HPOUTR",
"IN2L", "Line In Jack",
"IN2R", "Line In Jack",
- "Headphone Jack", "MICBIAS",
- "IN1L", "Headphone Jack";
+ "Microphone Jack", "MICBIAS",
+ "IN1L", "Microphone Jack";
simple-audio-card,widgets =
- "Microphone", "Headphone Jack",
+ "Microphone", "Microphone Jack",
"Headphone", "Headphone Jack",
"Line", "Line In Jack";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index e0d3b8cba221..ce6793b2d57e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -816,12 +816,12 @@
assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>,
<&clk IMX8MP_CLK_ML_AXI>,
<&clk IMX8MP_CLK_ML_AHB>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
<&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>;
- assigned-clock-rates = <800000000>,
+ assigned-clock-rates = <1000000000>,
<800000000>,
- <300000000>;
+ <400000000>;
};
pgc_audio: power-domain@5 {
@@ -834,7 +834,7 @@
assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
<&clk IMX8MP_SYS_PLL1_800M>;
assigned-clock-rates = <400000000>,
- <600000000>;
+ <800000000>;
};
pgc_gpu2d: power-domain@6 {
@@ -1619,10 +1619,11 @@
<&clk IMX8MP_CLK_SAI3>,
<&clk IMX8MP_CLK_SAI5>,
<&clk IMX8MP_CLK_SAI6>,
- <&clk IMX8MP_CLK_SAI7>;
+ <&clk IMX8MP_CLK_SAI7>,
+ <&clk IMX8MP_CLK_AUDIO_AXI_ROOT>;
clock-names = "ahb",
"sai1", "sai2", "sai3",
- "sai5", "sai6", "sai7";
+ "sai5", "sai6", "sai7", "axi";
power-domains = <&pgc_audio>;
assigned-clocks = <&clk IMX8MP_AUDIO_PLL1>,
<&clk IMX8MP_AUDIO_PLL2>;
@@ -2232,9 +2233,9 @@
clock-names = "core", "shader", "bus", "reg";
assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>,
<&clk IMX8MP_CLK_GPU3D_SHADER_CORE>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
- <&clk IMX8MP_SYS_PLL1_800M>;
- assigned-clock-rates = <800000000>, <800000000>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
+ <&clk IMX8MP_SYS_PLL2_1000M>;
+ assigned-clock-rates = <1000000000>, <1000000000>;
power-domains = <&pgc_gpu3d>;
};
@@ -2247,8 +2248,8 @@
<&clk IMX8MP_CLK_GPU_AHB>;
clock-names = "core", "bus", "reg";
assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>;
- assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>;
- assigned-clock-rates = <800000000>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>;
+ assigned-clock-rates = <1000000000>;
power-domains = <&pgc_gpu2d>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index 9d8e7231b7c6..d9f203c79519 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -979,24 +979,27 @@
};
&usb_dwc3_0 {
- #address-cells = <1>;
- #size-cells = <0>;
dr_mode = "otg";
status = "okay";
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- typec_hs: endpoint {
- remote-endpoint = <&usb_con_hs>;
+ port@0 {
+ reg = <0>;
+
+ typec_hs: endpoint {
+ remote-endpoint = <&usb_con_hs>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- typec_ss: endpoint {
- remote-endpoint = <&usb_con_ss>;
+ typec_ss: endpoint {
+ remote-endpoint = <&usb_con_ss>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index bb37a32ce461..9e0e2d7271ef 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -794,7 +794,6 @@
interrupt-parent = <&gpio1>;
interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "irq";
- extcon = <&usb3_phy0>;
wakeup-source;
connector {
@@ -1322,25 +1321,28 @@
};
&usb_dwc3_0 {
- #address-cells = <1>;
- #size-cells = <0>;
dr_mode = "otg";
usb-role-switch;
status = "okay";
- port@0 {
- reg = <0>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
- typec_hs: endpoint {
- remote-endpoint = <&usb_con_hs>;
+ port@0 {
+ reg = <0>;
+
+ typec_hs: endpoint {
+ remote-endpoint = <&usb_con_hs>;
+ };
};
- };
- port@1 {
- reg = <1>;
+ port@1 {
+ reg = <1>;
- typec_ss: endpoint {
- remote-endpoint = <&usb_con_ss>;
+ typec_ss: endpoint {
+ remote-endpoint = <&usb_con_ss>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
index 01e5092e4c40..c92001c80f11 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
@@ -254,6 +254,7 @@
spi-max-frequency = <84000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+ vcc-supply = <&nvcc_1v8_reg>;
partitions {
compatible = "fixed-partitions";
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-apalis-v1.1.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-apalis-v1.1.dtsi
index 81ba8b2831ac..b1c3f331c4ed 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-apalis-v1.1.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-apalis-v1.1.dtsi
@@ -9,8 +9,6 @@
/ {
model = "Toradex Apalis iMX8QM V1.1";
- compatible = "toradex,apalis-imx8-v1.1",
- "fsl,imx8qm";
};
/* TODO: Cooling Maps */
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
index 4d6427fbe875..c18f57039f6e 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-apalis.dtsi
@@ -7,8 +7,6 @@
/ {
model = "Toradex Apalis iMX8QM";
- compatible = "toradex,apalis-imx8",
- "fsl,imx8qm";
};
&ethphy0 {
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 50fd3370f7dc..353f825a8ac5 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -155,6 +155,13 @@
enable-active-high;
};
+ reg_audio: regulator-audio {
+ compatible = "regulator-fixed";
+ regulator-name = "cs42888_supply";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
reg_fec2_supply: regulator-fec2-nvcc {
compatible = "regulator-fixed";
regulator-name = "fec2_nvcc";
@@ -220,6 +227,33 @@
regulator-max-microvolt = <1800000>;
};
+ reg_audio_5v: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
bt_sco_codec: audio-codec-bt {
compatible = "linux,bt-sco";
#sound-dai-cells = <1>;
@@ -244,6 +278,26 @@
};
};
+ sound-cs42888 {
+ compatible = "fsl,imx-audio-cs42888";
+ model = "imx-cs42888";
+ audio-cpu = <&esai0>;
+ audio-codec = <&cs42888>;
+ audio-asrc = <&asrc0>;
+ audio-routing = "Line Out Jack", "AOUT1L",
+ "Line Out Jack", "AOUT1R",
+ "Line Out Jack", "AOUT2L",
+ "Line Out Jack", "AOUT2R",
+ "Line Out Jack", "AOUT3L",
+ "Line Out Jack", "AOUT3R",
+ "Line Out Jack", "AOUT4L",
+ "Line Out Jack", "AOUT4R",
+ "AIN1L", "Line In Jack",
+ "AIN1R", "Line In Jack",
+ "AIN2L", "Line In Jack",
+ "AIN2R", "Line In Jack";
+ };
+
sound-wm8960 {
compatible = "fsl,imx-audio-wm8960";
model = "wm8960-audio";
@@ -322,12 +376,44 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ cs42888: audio-codec@48 {
+ compatible = "cirrus,cs42888";
+ reg = <0x48>;
+ clocks = <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "mclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cs42888_reset>;
+ VA-supply = <&reg_audio>;
+ VD-supply = <&reg_audio>;
+ VLS-supply = <&reg_audio>;
+ VLC-supply = <&reg_audio>;
+ reset-gpios = <&lsio_gpio4 25 GPIO_ACTIVE_LOW>;
+ assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&mclkout0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <786432000>, <49152000>, <12288000>, <12288000>;
+ };
};
&cm41_intmux {
status = "okay";
};
+&esai0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esai0>;
+ assigned-clocks = <&acm IMX_ADMA_ACM_ESAI0_MCLK_SEL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_SLV_BUS>,
+ <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_MST_BUS>,
+ <&esai0_lpcg IMX_LPCG_CLK_4>;
+ assigned-clock-parents = <&aud_pll_div0_lpcg IMX_LPCG_CLK_0>;
+ assigned-clock-rates = <0>, <786432000>, <49152000>, <12288000>, <49152000>;
+ status = "okay";
+};
+
&hsio_phy {
fsl,hsio-cfg = "pciea-pcieb-sata";
fsl,refclk-pad-mode = "input";
@@ -439,6 +525,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <2 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
};
@@ -718,6 +809,12 @@
>;
};
+ pinctrl_cs42888_reset: cs42888_resetgrp {
+ fsl,pins = <
+ IMX8QM_QSPI1A_DATA1_LSIO_GPIO4_IO25 0x0600004c
+ >;
+ };
+
pinctrl_i2c0: i2c0grp {
fsl,pins = <
IMX8QM_HDMI_TX0_TS_SCL_DMA_I2C0_SCL 0x06000021
@@ -752,6 +849,21 @@
>;
};
+ pinctrl_esai0: esai0grp {
+ fsl,pins = <
+ IMX8QM_ESAI0_FSR_AUD_ESAI0_FSR 0xc6000040
+ IMX8QM_ESAI0_FST_AUD_ESAI0_FST 0xc6000040
+ IMX8QM_ESAI0_SCKR_AUD_ESAI0_SCKR 0xc6000040
+ IMX8QM_ESAI0_SCKT_AUD_ESAI0_SCKT 0xc6000040
+ IMX8QM_ESAI0_TX0_AUD_ESAI0_TX0 0xc6000040
+ IMX8QM_ESAI0_TX1_AUD_ESAI0_TX1 0xc6000040
+ IMX8QM_ESAI0_TX2_RX3_AUD_ESAI0_TX2_RX3 0xc6000040
+ IMX8QM_ESAI0_TX3_RX2_AUD_ESAI0_TX3_RX2 0xc6000040
+ IMX8QM_ESAI0_TX4_RX1_AUD_ESAI0_TX4_RX1 0xc6000040
+ IMX8QM_ESAI0_TX5_RX0_AUD_ESAI0_TX5_RX0 0xc6000040
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
IMX8QM_ENET0_MDC_CONN_ENET0_MDC 0x06000020
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
index b1d0189a1725..e80f722dbe65 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-hsio.dtsi
@@ -42,6 +42,25 @@
status = "disabled";
};
+ pciea_ep: pcie-ep@5f000000 {
+ compatible = "fsl,imx8q-pcie-ep";
+ reg = <0x5f000000 0x00010000>,
+ <0x40000000 0x10000000>;
+ reg-names = "dbi", "addr_space";
+ num-lanes = <1>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dma";
+ clocks = <&pciea_lpcg IMX_LPCG_CLK_6>,
+ <&pciea_lpcg IMX_LPCG_CLK_4>,
+ <&pciea_lpcg IMX_LPCG_CLK_5>;
+ clock-names = "dbi", "mstr", "slv";
+ power-domains = <&pd IMX_SC_R_PCIE_A>;
+ fsl,max-link-speed = <3>;
+ num-ib-windows = <6>;
+ num-ob-windows = <6>;
+ status = "disabled";
+ };
+
pcieb: pcie@5f010000 {
compatible = "fsl,imx8q-pcie";
reg = <0x5f010000 0x10000>,
@@ -50,8 +69,9 @@
ranges = <0x81000000 0 0x00000000 0x8ff80000 0 0x00010000>,
<0x82000000 0 0x80000000 0x80000000 0 0x0ff00000>;
#interrupt-cells = <1>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi", "dma";
#address-cells = <3>;
#size-cells = <2>;
clocks = <&pcieb_lpcg IMX_LPCG_CLK_6>,
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso b/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso
new file mode 100644
index 000000000000..4f562eb5c5b1
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek-pcie-ep.dtso
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <dt-bindings/phy/phy.h>
+
+/dts-v1/;
+/plugin/;
+
+&pcieb {
+ status = "disabled";
+};
+
+&pcieb_ep {
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ pinctrl-0 = <&pinctrl_pcieb>;
+ pinctrl-names = "default";
+ vpcie-supply = <&reg_pcieb>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index be79c793213a..a669a5d500d3 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -98,6 +98,33 @@
regulator-name = "cs42888_supply";
};
+ reg_audio_5v: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_audio_1v8: regulator-audio-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "audio-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
reg_can_en: regulator-can-en {
compatible = "regulator-fixed";
regulator-max-microvolt = <3300000>;
@@ -418,6 +445,11 @@
wlf,shared-lrclk;
wlf,hp-cfg = <2 2 3>;
wlf,gpio-cfg = <1 3>;
+ AVDD-supply = <&reg_audio_3v3>;
+ DBVDD-supply = <&reg_audio_1v8>;
+ DCVDD-supply = <&reg_audio_1v8>;
+ SPKVDD1-supply = <&reg_audio_5v>;
+ SPKVDD2-supply = <&reg_audio_5v>;
};
pca6416: gpio@20 {
diff --git a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
index d5abfdb8ede2..ecb35c6b67f5 100644
--- a/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
@@ -295,8 +295,8 @@
"",
"SODIMM_61",
"SODIMM_103",
- "",
- "",
+ "SODIMM_79",
+ "SODIMM_97",
"",
"SODIMM_25",
"SODIMM_27",
diff --git a/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi
index 47c1363a2f99..119a16207059 100644
--- a/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-kontron-osm-s.dtsi
@@ -189,6 +189,7 @@
regulator-name = "NVCC_SD (LDO5)";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
+ nxp,sd-vsel-fixed-low;
};
};
};
@@ -282,6 +283,7 @@
pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
vmmc-supply = <&reg_usdhc2_vcc>;
+ vqmmc-supply = <&reg_nvcc_sd>;
cd-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
};
@@ -553,7 +555,6 @@
MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x40001382 /* SDIO_A_D1 */
MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x40001382 /* SDIO_A_D2 */
MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x40001382 /* SDIO_A_D3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
>;
};
@@ -565,7 +566,6 @@
MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x4000138e /* SDIO_A_D1 */
MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x4000138e /* SDIO_A_D2 */
MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x4000138e /* SDIO_A_D3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
>;
};
@@ -577,7 +577,6 @@
MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x400013fe /* SDIO_A_D1 */
MX93_PAD_SD2_DATA2__USDHC2_DATA2 0x400013fe /* SDIO_A_D2 */
MX93_PAD_SD2_DATA3__USDHC2_DATA3 0x400013fe /* SDIO_A_D3 */
- MX93_PAD_SD2_VSELECT__USDHC2_VSELECT 0x1d0
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
index 8e939d716aac..ebbac5f8d2b2 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxca.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
/*
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>,
* D-82229 Seefeld, Germany.
* Author: Markus Niebel
* Author: Alexander Stein
@@ -26,8 +26,8 @@
aliases {
eeprom0 = &eeprom0;
- ethernet0 = &fec;
- ethernet1 = &eqos;
+ ethernet0 = &eqos;
+ ethernet1 = &fec;
rtc0 = &pcf85063;
rtc1 = &bbnsm_rtc;
};
@@ -448,38 +448,38 @@
"WLAN_PERST#", "12V_EN";
/*
- * Controls the WiFi card PD pin which is low active
- * as power down signal. The output-high states, the signal
- * is active, e.g. card is powered down
+ * Controls the WiFi card's low-active power down pin.
+ * The output-low states, the signal is inactive,
+ * resulting in high signal at power-down pin
*/
wlan-pd-hog {
gpio-hog;
gpios = <4 GPIO_ACTIVE_LOW>;
- output-high;
+ output-low;
line-name = "WLAN_PD#";
};
/*
- * Controls the WiFi card disable pin which is low active
- * as disable signal. The output-high states, the signal
- * is active, e.g. card is disabled
+ * Controls the WiFi card's low-active disable pin.
+ * The output-low states, the signal is inactive,
+ * resulting in high signal at power-down pin
*/
wlan-wdisable-hog {
gpio-hog;
gpios = <5 GPIO_ACTIVE_LOW>;
- output-high;
+ output-low;
line-name = "WLAN_W_DISABLE#";
};
/*
- * Controls the WiFi card reset pin which is low active
- * as reset signal. The output-high states, the signal
- * is active, e.g. card in reset
+ * Controls the WiFi card's reset pin.
+ * The output-low states, the signal is inactive,
+ * resulting in high signal at power-down pin
*/
wlan-perst-hog {
gpio-hog;
gpios = <6 GPIO_ACTIVE_LOW>;
- output-high;
+ output-low;
line-name = "WLAN_PERST#";
};
};
@@ -755,12 +755,6 @@
>;
};
- pinctrl_pcf85063: pcf85063grp {
- fsl,pins = <
- MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000
- >;
- };
-
pinctrl_mipi_csi: mipicsigrp {
fsl,pins = <
MX93_PAD_CCM_CLKO3__CCMSRCGPCMIX_CLKO3 0x051e /* MCLK */
@@ -769,6 +763,12 @@
>;
};
+ pinctrl_pcf85063: pcf85063grp {
+ fsl,pins = <
+ MX93_PAD_SAI1_RXD0__GPIO1_IO14 0x1000
+ >;
+ };
+
pinctrl_pexp_irq: pexpirqgrp {
fsl,pins = <
/* HYS | FSEL_0 | No DSE */
@@ -783,17 +783,17 @@
>;
};
- pinctrl_temp_sensor_som: tempsensorsomgrp {
+ pinctrl_tc9595: tc9595-grp {
fsl,pins = <
- /* HYS | FSEL_0 | no DSE */
- MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000
+ /* HYS | PD | FSEL_0 | no DSE */
+ MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400
>;
};
- pinctrl_tc9595: tc9595-grp {
+ pinctrl_temp_sensor_som: tempsensorsomgrp {
fsl,pins = <
- /* HYS | PD | FSEL_0 | no DSE */
- MX93_PAD_CCM_CLKO4__GPIO4_IO29 0x1400
+ /* HYS | FSEL_0 | no DSE */
+ MX93_PAD_SAI1_TXFS__GPIO1_IO11 0x1000
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
index 2e953a05c590..9e88c42c3d17 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352-mba93xxla.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
/*
- * Copyright (c) 2022-2023 TQ-Systems GmbH <linux@ew.tq-group.com>,
+ * Copyright (c) 2022-2024 TQ-Systems GmbH <linux@ew.tq-group.com>,
* D-82229 Seefeld, Germany.
* Author: Markus Niebel
* Author: Alexander Stein
@@ -26,8 +26,8 @@
aliases {
eeprom0 = &eeprom0;
- ethernet0 = &fec;
- ethernet1 = &eqos;
+ ethernet0 = &eqos;
+ ethernet1 = &fec;
rtc0 = &pcf85063;
rtc1 = &bbnsm_rtc;
};
diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
index 56766fdb0b1e..64cd0776b43d 100644
--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
@@ -1334,6 +1334,14 @@
#index-cells = <1>;
};
+ memory-controller@4e300000 {
+ compatible = "nxp,imx9-memory-controller";
+ reg = <0x4e300000 0x800>, <0x4e301000 0x1000>;
+ reg-names = "ctrl", "inject";
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
+ };
+
ddr-pmu@4e300dc0 {
compatible = "fsl,imx93-ddr-pmu";
reg = <0x4e300dc0 0x200>;
diff --git a/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts b/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts
new file mode 100644
index 000000000000..514f2429dcbc
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx95-15x15-evk.dts
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2025 NXP
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/i3c/i3c.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
+#include "imx95.dtsi"
+
+#define FALLING_EDGE BIT(0)
+#define RISING_EDGE BIT(1)
+
+#define BRD_SM_CTRL_SD3_WAKE 0x8000
+#define BRD_SM_CTRL_PCIE1_WAKE 0x8001
+#define BRD_SM_CTRL_BT_WAKE 0x8002
+#define BRD_SM_CTRL_PCIE2_WAKE 0x8003
+#define BRD_SM_CTRL_BUTTON 0x8004
+
+/ {
+ compatible = "fsl,imx95-15x15-evk", "fsl,imx95";
+ model = "NXP i.MX95 15X15 board";
+
+ aliases {
+ ethernet0 = &enetc_port0;
+ ethernet1 = &enetc_port1;
+ serial0 = &lpuart1;
+ };
+
+ bt_sco_codec: bt-sco-codec {
+ compatible = "linux,bt-sco";
+ #sound-dai-cells = <1>;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ stdout-path = &lpuart1;
+ };
+
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ #cooling-cells = <2>;
+ cooling-levels = <64 128 192 255>;
+ pwms = <&tpm6 0 4000000 PWM_POLARITY_INVERTED>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "+V1.8_SW";
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "+V3.3_SW";
+ };
+
+ reg_vref_1v8: regulator-adc-vref {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vref_1v8";
+ };
+
+ reg_audio_pwr: regulator-audio-pwr {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "audio-pwr";
+ gpio = <&pcal6524 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_audio_switch1: regulator-audio-switch1 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "audio-switch1";
+ gpio = <&pcal6524 0 GPIO_ACTIVE_LOW>;
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "can2-stby";
+ gpio = <&pcal6524 14 GPIO_ACTIVE_LOW>;
+ };
+
+ reg_m2_pwr: regulator-m2-pwr {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "M.2-power";
+ gpio = <&pcal6524 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ off-on-delay-us = <12000>;
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ pinctrl-names = "default";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "VDD_SD2_3V3";
+ gpio = <&gpio3 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usdhc3_vmmc: regulator-usdhc3 {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <3300000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-name = "WLAN_EN";
+ vin-supply = <&reg_m2_pwr>;
+ gpio = <&pcal6524 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ /*
+ * IW612 wifi chip needs more delay than other wifi chips to complete
+ * the host interface initialization after power up, otherwise the
+ * internal state of IW612 may be unstable, resulting in the failure of
+ * the SDIO3.0 switch voltage.
+ */
+ startup-delay-us = <20000>;
+ };
+
+ reg_vcc_12v: regulator-vcc-12v {
+ compatible = "regulator-fixed";
+ regulator-max-microvolt = <12000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-name = "VCC_12V";
+ gpio = <&pcal6524 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reserved-memory {
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ linux_cma: linux,cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0 0x80000000 0 0x7F000000>;
+ reusable;
+ size = <0 0x3c000000>;
+ linux,cma-default;
+ };
+
+ vdev0vring0: vdev0vring0@88000000 {
+ reg = <0 0x88000000 0 0x8000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@88008000 {
+ reg = <0 0x88008000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring0: vdev1vring0@88010000 {
+ reg = <0 0x88010000 0 0x8000>;
+ no-map;
+ };
+
+ vdev1vring1: vdev1vring1@88018000 {
+ reg = <0 0x88018000 0 0x8000>;
+ no-map;
+ };
+
+ vdevbuffer: vdevbuffer@88020000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x88020000 0 0x100000>;
+ no-map;
+ };
+
+ rsc_table: rsc-table@88220000 {
+ reg = <0 0x88220000 0 0x1000>;
+ no-map;
+ };
+
+ vpu_boot: vpu_boot@a0000000 {
+ reg = <0 0xa0000000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ sound-bt-sco {
+ compatible = "simple-audio-card";
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,bitclock-master = <&btcpu>;
+ simple-audio-card,format = "dsp_a";
+ simple-audio-card,frame-master = <&btcpu>;
+ simple-audio-card,name = "bt-sco-audio";
+
+ simple-audio-card,codec {
+ sound-dai = <&bt_sco_codec 1>;
+ };
+
+ btcpu: simple-audio-card,cpu {
+ dai-tdm-slot-num = <2>;
+ dai-tdm-slot-width = <16>;
+ sound-dai = <&sai1>;
+ };
+ };
+
+ sound-micfil {
+ compatible = "fsl,imx-audio-card";
+ model = "micfil-audio";
+
+ pri-dai-link {
+ format = "i2s";
+ link-name = "micfil hifi";
+
+ cpu {
+ sound-dai = <&micfil>;
+ };
+ };
+ };
+
+ sound-wm8962 {
+ compatible = "fsl,imx-audio-wm8962";
+ audio-codec = <&wm8962>;
+ audio-cpu = <&sai3>;
+ audio-routing = "Headphone Jack", "HPOUTL", "Headphone Jack", "HPOUTR",
+ "Ext Spk", "SPKOUTL", "Ext Spk", "SPKOUTR", "AMIC", "MICBIAS",
+ "IN3R", "AMIC", "IN1R", "AMIC";
+ hp-det-gpio = <&gpio2 21 GPIO_ACTIVE_HIGH>;
+ model = "wm8962-audio";
+ pinctrl-0 = <&pinctrl_hp>;
+ pinctrl-names = "default";
+ };
+
+ sound-xcvr {
+ compatible = "fsl,imx-audio-card";
+ model = "imx-audio-xcvr";
+
+ pri-dai-link {
+ link-name = "XCVR PCM";
+
+ cpu {
+ sound-dai = <&xcvr>;
+ };
+ };
+ };
+
+ usdhc3_pwrseq: usdhc3-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-0 = <&pinctrl_usdhc3_pwrseq>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ };
+
+ memory@80000000 {
+ reg = <0x0 0x80000000 0 0x80000000>;
+ device_type = "memory";
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&enetc_port0 {
+ phy-handle = <&ethphy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&pinctrl_enetc0>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&enetc_port1 {
+ phy-handle = <&ethphy1>;
+ phy-mode = "rgmii-id";
+ pinctrl-0 = <&pinctrl_enetc1>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&flexcan2 {
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ pinctrl-names = "default";
+ xceiver-supply = <&reg_can2_stby>;
+ status = "okay";
+};
+
+&i3c2 {
+ i2c-scl-hz = <400000>;
+ pinctrl-0 = <&pinctrl_i3c2>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ pca9570: gpio@24 {
+ compatible = "nxp,pca9570";
+ reg = <0x24 0 (I2C_FILTER)>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names = "OUT1", "OUT2", "OUT3", "OUT4";
+ };
+};
+
+&lpi2c2 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&pinctrl_lpi2c2>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ wm8962: codec@1a {
+ compatible = "wlf,wm8962";
+ reg = <0x1a>;
+ clocks = <&scmi_clk IMX95_CLK_SAI3>;
+ AVDD-supply = <&reg_audio_pwr>;
+ CPVDD-supply = <&reg_audio_pwr>;
+ DBVDD-supply = <&reg_audio_pwr>;
+ DCVDD-supply = <&reg_audio_pwr>;
+ gpio-cfg = <
+ 0x0000
+ 0x0000
+ 0x0000
+ 0x0000
+ 0x0000
+ 0x0000
+ >;
+ MICVDD-supply = <&reg_audio_pwr>;
+ PLLVDD-supply = <&reg_audio_pwr>;
+ SPKVDD1-supply = <&reg_audio_pwr>;
+ SPKVDD2-supply = <&reg_audio_pwr>;
+ };
+
+ pcal6524: gpio@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ interrupt-parent = <&gpio5>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ pinctrl-0 = <&pinctrl_pcal6524>;
+ pinctrl-names = "default";
+ };
+};
+
+&lpi2c3 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&pinctrl_lpi2c3>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ ptn5110: tcpc@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&pinctrl_ptn5110>;
+ pinctrl-names = "default";
+
+ typec_con: connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USB-C";
+ op-sink-microwatt = <15000000>;
+ power-role = "dual";
+ self-powered;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(5000, 20000, 3000)>;
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ try-power-role = "sink";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec_con_hs: endpoint {
+ remote-endpoint = <&usb3_data_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ typec_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
+ };
+
+ pca9632: led-controller@62 {
+ compatible = "nxp,pca9632";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ nxp,inverted-out;
+
+ led_backlight0: led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_BACKLIGHT;
+ function-enumerator = <0>;
+ };
+
+ led_backlight1: led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_BACKLIGHT;
+ function-enumerator = <1>;
+ };
+ };
+};
+
+&lpi2c4 {
+ clock-frequency = <400000>;
+ pinctrl-0 = <&pinctrl_lpi2c4>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&lpi2c6 {
+ clock-frequency = <100000>;
+ pinctrl-0 = <&pinctrl_lpi2c6>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&lpuart1 {
+ pinctrl-0 = <&pinctrl_uart1>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&lpuart5 {
+ pinctrl-0 = <&pinctrl_uart5>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ bluetooth {
+ compatible = "nxp,88w8987-bt";
+ };
+};
+
+&micfil {
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_PDM>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>, <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>, <3612672000>, <393216000>, <361267200>, <49152000>;
+ #sound-dai-cells = <0>;
+ pinctrl-0 = <&pinctrl_pdm>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&mu7 {
+ status = "okay";
+};
+
+&netc_blk_ctrl {
+ status = "okay";
+};
+
+&netc_bus0 {
+ msi-map = <0x00 &its 0x60 0x1>, //ENETC0 PF
+ <0x10 &its 0x61 0x1>, //ENETC0 VF0
+ <0x20 &its 0x62 0x1>, //ENETC0 VF1
+ <0x40 &its 0x63 0x1>, //ENETC1 PF
+ <0x50 &its 0x65 0x1>, //ENETC1 VF0
+ <0x60 &its 0x66 0x1>, //ENETC1 VF1
+ <0x80 &its 0x64 0x1>, //ENETC2 PF
+ <0xc0 &its 0x67 0x1>;
+};
+
+&netc_emdio {
+ pinctrl-0 = <&pinctrl_emdio>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ ethphy0: ethernet-phy@1 {
+ reg = <1>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ reset-gpios = <&pcal6524 4 GPIO_ACTIVE_LOW>;
+ realtek,clkout-disable;
+ };
+
+ ethphy1: ethernet-phy@2 {
+ reg = <2>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ reset-gpios = <&pcal6524 5 GPIO_ACTIVE_LOW>;
+ realtek,clkout-disable;
+ };
+};
+
+&netc_timer {
+ status = "okay";
+};
+
+&netcmix_blk_ctrl {
+ status = "okay";
+};
+
+&pcie0 {
+ pinctrl-0 = <&pinctrl_pcie0>;
+ pinctrl-names = "default";
+ reset-gpio = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_m2_pwr>;
+ status = "okay";
+};
+
+&sai1 {
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI1>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>, <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>, <3612672000>, <393216000>, <361267200>, <12288000>;
+ #sound-dai-cells = <0>;
+ pinctrl-0 = <&pinctrl_sai1>;
+ pinctrl-names = "default";
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&sai3 {
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SAI3>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>, <&scmi_clk IMX95_CLK_AUDIOPLL1>;
+ assigned-clock-rates = <3932160000>, <3612672000>, <393216000>, <361267200>, <12288000>;
+ #sound-dai-cells = <0>;
+ pinctrl-0 = <&pinctrl_sai3>;
+ pinctrl-names = "default";
+ fsl,sai-mclk-direction-output;
+ status = "okay";
+};
+
+&scmi_iomuxc {
+ pinctrl_emdio: emdiogrp {
+ fsl,pins = <
+ IMX95_PAD_ENET2_MDC__NETCMIX_TOP_NETC_MDC 0x57e
+ IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_NETC_MDIO 0x97e
+ >;
+ };
+
+ pinctrl_enetc0: enetc0grp {
+ fsl,pins = <
+ IMX95_PAD_ENET1_TD3__NETCMIX_TOP_ETH0_RGMII_TD3 0x57e
+ IMX95_PAD_ENET1_TD2__NETCMIX_TOP_ETH0_RGMII_TD2 0x57e
+ IMX95_PAD_ENET1_TD1__NETCMIX_TOP_ETH0_RGMII_TD1 0x57e
+ IMX95_PAD_ENET1_TD0__NETCMIX_TOP_ETH0_RGMII_TD0 0x57e
+ IMX95_PAD_ENET1_TX_CTL__NETCMIX_TOP_ETH0_RGMII_TX_CTL 0x57e
+ IMX95_PAD_ENET1_TXC__NETCMIX_TOP_ETH0_RGMII_TX_CLK 0x58e
+ IMX95_PAD_ENET1_RX_CTL__NETCMIX_TOP_ETH0_RGMII_RX_CTL 0x57e
+ IMX95_PAD_ENET1_RXC__NETCMIX_TOP_ETH0_RGMII_RX_CLK 0x58e
+ IMX95_PAD_ENET1_RD0__NETCMIX_TOP_ETH0_RGMII_RD0 0x57e
+ IMX95_PAD_ENET1_RD1__NETCMIX_TOP_ETH0_RGMII_RD1 0x57e
+ IMX95_PAD_ENET1_RD2__NETCMIX_TOP_ETH0_RGMII_RD2 0x57e
+ IMX95_PAD_ENET1_RD3__NETCMIX_TOP_ETH0_RGMII_RD3 0x57e
+ >;
+ };
+
+ pinctrl_enetc1: enetc1grp {
+ fsl,pins = <
+ IMX95_PAD_ENET2_TD3__NETCMIX_TOP_ETH1_RGMII_TD3 0x57e
+ IMX95_PAD_ENET2_TD2__NETCMIX_TOP_ETH1_RGMII_TD2 0x57e
+ IMX95_PAD_ENET2_TD1__NETCMIX_TOP_ETH1_RGMII_TD1 0x57e
+ IMX95_PAD_ENET2_TD0__NETCMIX_TOP_ETH1_RGMII_TD0 0x57e
+ IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_ETH1_RGMII_TX_CTL 0x57e
+ IMX95_PAD_ENET2_TXC__NETCMIX_TOP_ETH1_RGMII_TX_CLK 0x58e
+ IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_ETH1_RGMII_RX_CTL 0x57e
+ IMX95_PAD_ENET2_RXC__NETCMIX_TOP_ETH1_RGMII_RX_CLK 0x58e
+ IMX95_PAD_ENET2_RD0__NETCMIX_TOP_ETH1_RGMII_RD0 0x57e
+ IMX95_PAD_ENET2_RD1__NETCMIX_TOP_ETH1_RGMII_RD1 0x57e
+ IMX95_PAD_ENET2_RD2__NETCMIX_TOP_ETH1_RGMII_RD2 0x57e
+ IMX95_PAD_ENET2_RD3__NETCMIX_TOP_ETH1_RGMII_RD3 0x57e
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO25__CAN2_TX 0x39e
+ IMX95_PAD_GPIO_IO27__CAN2_RX 0x39e
+ >;
+ };
+
+ pinctrl_hp: hpgrp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO21__GPIO2_IO_BIT21 0x31e
+ >;
+ };
+
+ pinctrl_i3c2: i3c2grp {
+ fsl,pins = <
+ IMX95_PAD_ENET1_MDC__I3C2_SCL 0x40000186
+ IMX95_PAD_ENET1_MDIO__I3C2_SDA 0x40000186
+ >;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ IMX95_PAD_I2C1_SCL__AONMIX_TOP_LPI2C1_SCL 0x40000b9e
+ IMX95_PAD_I2C1_SDA__AONMIX_TOP_LPI2C1_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c2: lpi2c2grp {
+ fsl,pins = <
+ IMX95_PAD_I2C2_SCL__AONMIX_TOP_LPI2C2_SCL 0x40000b9e
+ IMX95_PAD_I2C2_SDA__AONMIX_TOP_LPI2C2_SDA 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c3: lpi2c3grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO28__LPI2C3_SDA 0x40000b9e
+ IMX95_PAD_GPIO_IO29__LPI2C3_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c4: lpi2c4grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO30__LPI2C4_SDA 0x40000b9e
+ IMX95_PAD_GPIO_IO31__LPI2C4_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_lpi2c6: lpi2c6grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO02__LPI2C6_SDA 0x40000b9e
+ IMX95_PAD_GPIO_IO03__LPI2C6_SCL 0x40000b9e
+ >;
+ };
+
+ pinctrl_mipi_dsi_csi: mipidsigrp {
+ fsl,pins = <
+ IMX95_PAD_XSPI1_DATA6__GPIO5_IO_BIT6 0x31e
+ >;
+ };
+
+ pinctrl_pcal6524: pcal6524grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO34__GPIO5_IO_BIT14 0x31e
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO32__HSIOMIX_TOP_PCIE1_CLKREQ_B 0x40000b1e
+ IMX95_PAD_GPIO_IO33__GPIO5_IO_BIT13 0x31e
+ >;
+ };
+
+ pinctrl_pdm: pdmgrp {
+ fsl,pins = <
+ IMX95_PAD_PDM_CLK__AONMIX_TOP_PDM_CLK 0x31e
+ IMX95_PAD_PDM_BIT_STREAM0__AONMIX_TOP_PDM_BIT_STREAM_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_ptn5110: ptn5110grp {
+ fsl,pins = <
+ IMX95_PAD_XSPI1_SS1_B__GPIO5_IO_BIT11 0x31e
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmcgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_RESET_B__GPIO3_IO_BIT7 0x31e
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ IMX95_PAD_SAI1_RXD0__AONMIX_TOP_SAI1_RX_DATA_BIT0 0x31e
+ IMX95_PAD_SAI1_TXC__AONMIX_TOP_SAI1_TX_BCLK 0x31e
+ IMX95_PAD_SAI1_TXFS__AONMIX_TOP_SAI1_TX_SYNC 0x31e
+ IMX95_PAD_SAI1_TXD0__AONMIX_TOP_SAI1_TX_DATA_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ IMX95_PAD_ENET2_MDIO__NETCMIX_TOP_SAI2_RX_BCLK 0x31e
+ IMX95_PAD_ENET2_MDC__NETCMIX_TOP_SAI2_RX_SYNC 0x31e
+ IMX95_PAD_ENET2_TD3__NETCMIX_TOP_SAI2_RX_DATA_BIT0 0x31e
+ IMX95_PAD_ENET2_TD2__NETCMIX_TOP_SAI2_RX_DATA_BIT1 0x31e
+ IMX95_PAD_ENET2_TXC__NETCMIX_TOP_SAI2_TX_BCLK 0x31e
+ IMX95_PAD_ENET2_TX_CTL__NETCMIX_TOP_SAI2_TX_SYNC 0x31e
+ IMX95_PAD_ENET2_RX_CTL__NETCMIX_TOP_SAI2_TX_DATA_BIT0 0x31e
+ IMX95_PAD_ENET2_RXC__NETCMIX_TOP_SAI2_TX_DATA_BIT1 0x31e
+ IMX95_PAD_ENET2_RD0__NETCMIX_TOP_SAI2_TX_DATA_BIT2 0x31e
+ IMX95_PAD_ENET2_RD1__NETCMIX_TOP_SAI2_TX_DATA_BIT3 0x31e
+ IMX95_PAD_ENET2_RD2__NETCMIX_TOP_SAI2_MCLK 0x31e
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO17__SAI3_MCLK 0x31e
+ IMX95_PAD_GPIO_IO16__SAI3_TX_BCLK 0x31e
+ IMX95_PAD_GPIO_IO26__SAI3_TX_SYNC 0x31e
+ IMX95_PAD_GPIO_IO20__SAI3_RX_DATA_BIT0 0x31e
+ IMX95_PAD_GPIO_IO19__SAI3_TX_DATA_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_spdif: spdifgrp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO22__SPDIF_IN 0x3fe
+ IMX95_PAD_GPIO_IO23__SPDIF_OUT 0x3fe
+ >;
+ };
+
+ pinctrl_tpm3: tpm3grp {
+ fsl,pins = <
+ IMX95_PAD_CCM_CLKO2__GPIO3_IO_BIT27 0x51e
+ >;
+ };
+
+ pinctrl_tpm6: tpm6grp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO08__TPM6_CH0 0x51e
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ IMX95_PAD_UART1_RXD__AONMIX_TOP_LPUART1_RX 0x31e
+ IMX95_PAD_UART1_TXD__AONMIX_TOP_LPUART1_TX 0x31e
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ IMX95_PAD_DAP_TDO_TRACESWO__LPUART5_TX 0x31e
+ IMX95_PAD_DAP_TDI__LPUART5_RX 0x31e
+ IMX95_PAD_DAP_TMS_SWDIO__LPUART5_RTS_B 0x31e
+ IMX95_PAD_DAP_TCLK_SWCLK__LPUART5_CTS_B 0x31e
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x158e
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x138e
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x138e
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x138e
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x138e
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x138e
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x138e
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x138e
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x138e
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x138e
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x158e
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD1_CLK__USDHC1_CLK 0x15fe
+ IMX95_PAD_SD1_CMD__USDHC1_CMD 0x13fe
+ IMX95_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
+ IMX95_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
+ IMX95_PAD_SD1_DATA2__USDHC1_DATA2 0x13fe
+ IMX95_PAD_SD1_DATA3__USDHC1_DATA3 0x13fe
+ IMX95_PAD_SD1_DATA4__USDHC1_DATA4 0x13fe
+ IMX95_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
+ IMX95_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
+ IMX95_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
+ IMX95_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x31e
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x158e
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x138e
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x138e
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x138e
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x138e
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x138e
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD2_CLK__USDHC2_CLK 0x15fe
+ IMX95_PAD_SD2_CMD__USDHC2_CMD 0x13fe
+ IMX95_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
+ IMX95_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
+ IMX95_PAD_SD2_DATA2__USDHC2_DATA2 0x13fe
+ IMX95_PAD_SD2_DATA3__USDHC2_DATA3 0x13fe
+ IMX95_PAD_SD2_VSELECT__USDHC2_VSELECT 0x51e
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ IMX95_PAD_SD3_CLK__USDHC3_CLK 0x158e
+ IMX95_PAD_SD3_CMD__USDHC3_CMD 0x138e
+ IMX95_PAD_SD3_DATA0__USDHC3_DATA0 0x138e
+ IMX95_PAD_SD3_DATA1__USDHC3_DATA1 0x138e
+ IMX95_PAD_SD3_DATA2__USDHC3_DATA2 0x138e
+ IMX95_PAD_SD3_DATA3__USDHC3_DATA3 0x138e
+ >;
+ };
+
+ pinctrl_usdhc3_pwrseq: usdhc3pwrseqgrp {
+ fsl,pins = <
+ IMX95_PAD_XSPI1_SCLK__GPIO5_IO_BIT9 0x31e
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD3_CLK__USDHC3_CLK 0x158e
+ IMX95_PAD_SD3_CMD__USDHC3_CMD 0x138e
+ IMX95_PAD_SD3_DATA0__USDHC3_DATA0 0x138e
+ IMX95_PAD_SD3_DATA1__USDHC3_DATA1 0x138e
+ IMX95_PAD_SD3_DATA2__USDHC3_DATA2 0x138e
+ IMX95_PAD_SD3_DATA3__USDHC3_DATA3 0x138e
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ IMX95_PAD_SD3_CLK__USDHC3_CLK 0x15fe
+ IMX95_PAD_SD3_CMD__USDHC3_CMD 0x13fe
+ IMX95_PAD_SD3_DATA0__USDHC3_DATA0 0x13fe
+ IMX95_PAD_SD3_DATA1__USDHC3_DATA1 0x13fe
+ IMX95_PAD_SD3_DATA2__USDHC3_DATA2 0x13fe
+ IMX95_PAD_SD3_DATA3__USDHC3_DATA3 0x13fe
+ >;
+ };
+};
+
+&scmi_misc {
+ nxp,ctrl-ids = <BRD_SM_CTRL_SD3_WAKE 1>,
+ <BRD_SM_CTRL_PCIE1_WAKE 1>,
+ <BRD_SM_CTRL_BT_WAKE 1>,
+ <BRD_SM_CTRL_PCIE2_WAKE 1>,
+ <BRD_SM_CTRL_BUTTON 1>;
+};
+
+&thermal_zones {
+ a55-thermal {
+ cooling-maps {
+ map1 {
+ cooling-device = <&fan0 0 1>;
+ trip = <&atrip2>;
+ };
+
+ map2 {
+ cooling-device = <&fan0 1 2>;
+ trip = <&atrip3>;
+ };
+
+ map3 {
+ cooling-device = <&fan0 2 3>;
+ trip = <&atrip4>;
+ };
+ };
+
+ trips {
+ atrip2: trip2 {
+ hysteresis = <2000>;
+ temperature = <55000>;
+ type = "active";
+ };
+
+ atrip3: trip3 {
+ hysteresis = <2000>;
+ temperature = <65000>;
+ type = "active";
+ };
+
+ atrip4: trip4 {
+ hysteresis = <2000>;
+ temperature = <75000>;
+ type = "active";
+ };
+ };
+ };
+
+ pf09-thermal {
+ polling-delay = <2000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&scmi_sensor 2>;
+
+ trips {
+ pf09_alert: trip0 {
+ hysteresis = <2000>;
+ temperature = <140000>;
+ type = "passive";
+ };
+
+ pf09_crit: trip1 {
+ hysteresis = <2000>;
+ temperature = <155000>;
+ type = "critical";
+ };
+ };
+ };
+
+ pf53arm-thermal {
+ polling-delay = <2000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&scmi_sensor 4>;
+
+ cooling-maps {
+ map0 {
+ cooling-device = <&A55_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A55_5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ trip = <&pf5301_alert>;
+ };
+ };
+
+ trips {
+ pf5301_alert: trip0 {
+ hysteresis = <2000>;
+ temperature = <140000>;
+ type = "passive";
+ };
+
+ pf5301_crit: trip1 {
+ hysteresis = <2000>;
+ temperature = <155000>;
+ type = "critical";
+ };
+ };
+ };
+
+ pf53soc-thermal {
+ polling-delay = <2000>;
+ polling-delay-passive = <250>;
+ thermal-sensors = <&scmi_sensor 3>;
+
+ trips {
+ pf5302_alert: trip0 {
+ hysteresis = <2000>;
+ temperature = <140000>;
+ type = "passive";
+ };
+
+ pf5302_crit: trip1 {
+ hysteresis = <2000>;
+ temperature = <155000>;
+ type = "critical";
+ };
+ };
+ };
+};
+
+&tpm3 {
+ pinctrl-0 = <&pinctrl_tpm3>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&tpm6 {
+ pinctrl-0 = <&pinctrl_tpm6>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ adp-disable;
+ dr_mode = "otg";
+ hnp-disable;
+ role-switch-default-mode = "peripheral";
+ srp-disable;
+ usb-role-switch;
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ status = "okay";
+
+ port {
+ usb3_data_hs: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&usb3_phy {
+ orientation-switch;
+ fsl,phy-tx-preemp-amp-tune-microamp = <600>;
+ status = "okay";
+
+ port {
+ usb3_data_ss: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+};
+
+&usdhc1 {
+ bus-width = <8>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc1>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ fsl,tuning-step = <1>;
+ status = "okay";
+};
+
+&usdhc2 {
+ bus-width = <4>;
+ cd-gpios = <&gpio3 00 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-3 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ fsl,tuning-step = <1>;
+ status = "okay";
+};
+
+&usdhc3 {
+ bus-width = <4>;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&usdhc3_pwrseq>;
+ non-removable;
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc3>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ vmmc-supply = <&reg_usdhc3_vmmc>;
+ wakeup-source;
+ status = "okay";
+};
+
+&wdog3 {
+ status = "okay";
+};
+
+&xcvr {
+ clocks = <&scmi_clk IMX95_CLK_BUSWAKEUP>,
+ <&scmi_clk IMX95_CLK_SPDIF>,
+ <&dummy>,
+ <&scmi_clk IMX95_CLK_AUDIOXCVR>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>;
+ clock-names = "ipg", "phy", "spba", "pll_ipg", "pll8k", "pll11k";
+ assigned-clocks = <&scmi_clk IMX95_CLK_AUDIOPLL1_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2_VCO>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL2>,
+ <&scmi_clk IMX95_CLK_SPDIF>,
+ <&scmi_clk IMX95_CLK_AUDIOXCVR>;
+ assigned-clock-parents = <0>, <0>, <0>, <0>,
+ <&scmi_clk IMX95_CLK_AUDIOPLL1>,
+ <&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
+ assigned-clock-rates = <3932160000>, <3612672000>,
+ <393216000>, <361267200>,
+ <12288000>, <0>;
+ #sound-dai-cells = <0>;
+ pinctrl-0 = <&pinctrl_spdif>;
+ pinctrl-names = "default";
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
index 8bc066c3760c..25ac331f0318 100644
--- a/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx95-19x19-evk.dts
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
#include "imx95.dtsi"
#define FALLING_EDGE 1
@@ -317,6 +318,48 @@
interrupt-parent = <&gpio5>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
};
+
+ ptn5110: tcpc@50 {
+ compatible = "nxp,ptn5110", "tcpci";
+ reg = <0x50>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_typec>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+
+ typec_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ data-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 0, PDO_FIXED_USB_COMM)>;
+ op-sink-microwatt = <0>;
+ self-powered;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ typec_con_hs: endpoint {
+ remote-endpoint = <&usb3_data_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ typec_con_ss: endpoint {
+ remote-endpoint = <&usb3_data_ss>;
+ };
+ };
+ };
+ };
+ };
};
&lpuart1 {
@@ -418,6 +461,40 @@
status = "okay";
};
+&usb3 {
+ status = "okay";
+};
+
+&usb3_dwc3 {
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ usb-role-switch;
+ role-switch-default-mode = "peripheral";
+ snps,dis-u1-entry-quirk;
+ snps,dis-u2-entry-quirk;
+ status = "okay";
+
+ port {
+ usb3_data_hs: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&usb3_phy {
+ fsl,phy-tx-preemp-amp-tune-microamp = <600>;
+ orientation-switch;
+ status = "okay";
+
+ port {
+ usb3_data_ss: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+};
+
&usdhc1 {
pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -676,6 +753,12 @@
>;
};
+ pinctrl_typec: typecgrp {
+ fsl,pins = <
+ IMX95_PAD_GPIO_IO34__GPIO5_IO_BIT14 0x31e
+ >;
+ };
+
pinctrl_usdhc2_gpio: usdhc2gpiogrp {
fsl,pins = <
IMX95_PAD_SD2_CD_B__GPIO3_IO_BIT0 0x31e
diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi
index 6b8470cb3461..9bb26b466a06 100644
--- a/arch/arm64/boot/dts/freescale/imx95.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx95.dtsi
@@ -291,6 +291,13 @@
clock-output-names = "sai5_mclk";
};
+ clk_sys100m: clock-sys100m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "clk_sys100m";
+ };
+
osc_24m: clock-24m {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -673,6 +680,19 @@
status = "disabled";
};
+ i3c2: i3c@42520000 {
+ compatible = "silvaco,i3c-master-v1";
+ reg = <0x42520000 0x10000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>,
+ <&scmi_clk IMX95_CLK_I3C2>,
+ <&scmi_clk IMX95_CLK_I3C2SLOW>;
+ clock-names = "pclk", "fast_clk", "slow_clk";
+ status = "disabled";
+ };
+
lpi2c3: i2c@42530000 {
compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x42530000 0x10000>;
@@ -1245,6 +1265,19 @@
status = "disabled";
};
+ i3c1: i3c@44330000 {
+ compatible = "silvaco,i3c-master-v1";
+ reg = <0x44330000 0x10000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ clocks = <&scmi_clk IMX95_CLK_BUSAON>,
+ <&scmi_clk IMX95_CLK_I3C1>,
+ <&scmi_clk IMX95_CLK_I3C1SLOW>;
+ clock-names = "pclk", "fast_clk", "slow_clk";
+ status = "disabled";
+ };
+
lpi2c1: i2c@44340000 {
compatible = "fsl,imx95-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x44340000 0x10000>;
@@ -1379,6 +1412,7 @@
<GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&scmi_clk IMX95_CLK_ADC>;
clock-names = "ipg";
+ #io-channel-cells = <1>;
status = "disabled";
};
@@ -1537,6 +1571,56 @@
};
};
+ usb3: usb@4c010010 {
+ compatible = "fsl,imx95-dwc3", "fsl,imx8mp-dwc3";
+ reg = <0x0 0x4c010010 0x0 0x04>,
+ <0x0 0x4c1f0000 0x0 0x20>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_32K>;
+ clock-names = "hsio", "suspend";
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ dma-ranges = <0x0 0x0 0x0 0x0 0x10 0x0>;
+ status = "disabled";
+
+ usb3_dwc3: usb@4c100000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0x4c100000 0x0 0x10000>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>,
+ <&scmi_clk IMX95_CLK_24M>,
+ <&scmi_clk IMX95_CLK_32K>;
+ clock-names = "bus_early", "ref", "suspend";
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb3_phy>, <&usb3_phy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ snps,gfladj-refclk-lpm-sel-quirk;
+ snps,parkmode-disable-ss-quirk;
+ iommus = <&smmu 0xe>;
+ };
+ };
+
+ hsio_blk_ctl: syscon@4c0100c0 {
+ compatible = "nxp,imx95-hsio-blk-ctl", "syscon";
+ reg = <0x0 0x4c0100c0 0x0 0x1>;
+ #clock-cells = <1>;
+ clocks = <&clk_sys100m>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ };
+
+ usb3_phy: phy@4c1f0040 {
+ compatible = "fsl,imx95-usb-phy", "fsl,imx8mp-usb-phy";
+ reg = <0x0 0x4c1f0040 0x0 0x40>,
+ <0x0 0x4c1fc000 0x0 0x100>;
+ clocks = <&scmi_clk IMX95_CLK_HSIO>;
+ clock-names = "phy";
+ #phy-cells = <0>;
+ power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ status = "disabled";
+ };
+
pcie0: pcie@4c300000 {
compatible = "fsl,imx95-pcie";
reg = <0 0x4c300000 0 0x10000>,
@@ -1564,8 +1648,9 @@
clocks = <&scmi_clk IMX95_CLK_HSIO>,
<&scmi_clk IMX95_CLK_HSIOPLL>,
<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
- <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
- clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>,
+ <&hsio_blk_ctl 0>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux", "ref";
assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
<&scmi_clk IMX95_CLK_HSIOPLL>,
<&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
@@ -1573,6 +1658,12 @@
assigned-clock-parents = <0>, <0>,
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ /* pcie0's Devid(BIT[7:6]) is 0x00, stream id(BIT[5:0]) is 0x10~0x17 */
+ msi-map = <0x0 &its 0x10 0x1>,
+ <0x100 &its 0x11 0x7>;
+ iommu-map = <0x000 &smmu 0x10 0x1>,
+ <0x100 &smmu 0x11 0x7>;
+ iommu-map-mask = <0x1ff>;
fsl,max-link-speed = <3>;
status = "disabled";
};
@@ -1631,8 +1722,9 @@
clocks = <&scmi_clk IMX95_CLK_HSIO>,
<&scmi_clk IMX95_CLK_HSIOPLL>,
<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
- <&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
- clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux";
+ <&scmi_clk IMX95_CLK_HSIOPCIEAUX>,
+ <&hsio_blk_ctl 0>;
+ clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_aux", "ref";
assigned-clocks =<&scmi_clk IMX95_CLK_HSIOPLL_VCO>,
<&scmi_clk IMX95_CLK_HSIOPLL>,
<&scmi_clk IMX95_CLK_HSIOPCIEAUX>;
@@ -1640,6 +1732,14 @@
assigned-clock-parents = <0>, <0>,
<&scmi_clk IMX95_CLK_SYSPLL1_PFD1_DIV2>;
power-domains = <&scmi_devpd IMX95_PD_HSIO_TOP>;
+ /* pcie1's Devid(BIT[7:6]) is 0x10, stream id(BIT[5:0]) is 0x18~0x1f */
+ msi-map = <0x0 &its 0x98 0x1>,
+ <0x100 &its 0x99 0x7>;
+ msi-map-mask = <0x1ff>;
+ /* smmu have not Devid(BIT[7:6]) */
+ iommu-map = <0x000 &smmu 0x18 0x1>,
+ <0x100 &smmu 0x19 0x7>;
+ iommu-map-mask = <0x1ff>;
fsl,max-link-speed = <3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
index 58e3865c2889..7ee1228a50f4 100644
--- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
@@ -138,7 +138,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "imx-audio-tlv320aic32x4";
+ model = "tqm-tlv320aic32";
ssi-controller = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
diff --git a/arch/arm64/boot/dts/freescale/mba8xx.dtsi b/arch/arm64/boot/dts/freescale/mba8xx.dtsi
index 276d1683b03b..c4b5663949ad 100644
--- a/arch/arm64/boot/dts/freescale/mba8xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8xx.dtsi
@@ -36,6 +36,13 @@
stdout-path = &lpuart1;
};
+ /* Non-controllable PCIe reference clock generator */
+ pcie_refclk: clock-pcie-ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
pinctrl-names = "default";
@@ -208,6 +215,12 @@
status = "okay";
};
+&hsio_phy {
+ fsl,hsio-cfg = "pciea-x2-pcieb";
+ fsl,refclk-pad-mode = "input";
+ status = "okay";
+};
+
&i2c1 {
tlv320aic3x04: audio-codec@18 {
compatible = "ti,tlv320aic32x4";
@@ -309,7 +322,15 @@
"", "", "", "";
};
-/* TODO: Mini-PCIe */
+&pcieb {
+ phys = <&hsio_phy 0 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy";
+ pinctrl-0 = <&pinctrl_pcieb>;
+ pinctrl-names = "default";
+ reset-gpios = <&lsio_gpio4 0 GPIO_ACTIVE_LOW>;
+ vpcie-supply = <&reg_pcie_1v5>;
+ status = "okay";
+};
&sai1 {
assigned-clocks = <&clk IMX_SC_R_AUDIO_PLL_0 IMX_SC_PM_CLK_PLL>,
@@ -467,10 +488,10 @@
fsl,pins = <IMX8QXP_USDHC1_RESET_B_LSIO_GPIO4_IO19 0x00000020>;
};
- pinctrl_pcieb: pcieagrp {
- fsl,pins = <IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 0x06000041>,
- <IMX8QXP_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 0x06000041>,
- <IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 0x04000041>;
+ pinctrl_pcieb: pciebgrp {
+ fsl,pins = <IMX8QXP_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 0x06000041>,
+ <IMX8QXP_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B 0x06000041>,
+ <IMX8QXP_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 0x04000041>;
};
pinctrl_reg_pcie_1v5: regpcie1v5grp {
diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi
index 7be430b78c83..ea1456d361a3 100644
--- a/arch/arm64/boot/dts/freescale/s32g2.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi
@@ -317,6 +317,49 @@
};
};
+ edma0: dma-controller@40144000 {
+ compatible = "nxp,s32g2-edma";
+ reg = <0x40144000 0x24000>,
+ <0x4012c000 0x3000>,
+ <0x40130000 0x3000>;
+ #dma-cells = <2>;
+ dma-channels = <32>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-0-15",
+ "tx-16-31",
+ "err";
+ clocks = <&clks 63>, <&clks 64>;
+ clock-names = "dmamux0", "dmamux1";
+ };
+
+ can0: can@401b4000 {
+ compatible = "nxp,s32g2-flexcan";
+ reg = <0x401b4000 0xa000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ can1: can@401be000 {
+ compatible = "nxp,s32g2-flexcan";
+ reg = <0x401be000 0xa000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
uart0: serial@401c8000 {
compatible = "nxp,s32g2-linflexuart",
"fsl,s32v234-linflexuart";
@@ -333,6 +376,82 @@
status = "disabled";
};
+ i2c0: i2c@401e4000 {
+ compatible = "nxp,s32g2-i2c";
+ reg = <0x401e4000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c1: i2c@401e8000 {
+ compatible = "nxp,s32g2-i2c";
+ reg = <0x401e8000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c2: i2c@401ec000 {
+ compatible = "nxp,s32g2-i2c";
+ reg = <0x401ec000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ edma1: dma-controller@40244000 {
+ compatible = "nxp,s32g2-edma";
+ reg = <0x40244000 0x24000>,
+ <0x4022c000 0x3000>,
+ <0x40230000 0x3000>;
+ #dma-cells = <2>;
+ dma-channels = <32>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-0-15",
+ "tx-16-31",
+ "err";
+ clocks = <&clks 63>, <&clks 64>;
+ clock-names = "dmamux0", "dmamux1";
+ };
+
+ can2: can@402a8000 {
+ compatible = "nxp,s32g2-flexcan";
+ reg = <0x402a8000 0xa000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ can3: can@402b2000 {
+ compatible = "nxp,s32g2-flexcan";
+ reg = <0x402b2000 0xa000>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
uart2: serial@402bc000 {
compatible = "nxp,s32g2-linflexuart",
"fsl,s32v234-linflexuart";
@@ -341,6 +460,28 @@
status = "disabled";
};
+ i2c3: i2c@402d8000 {
+ compatible = "nxp,s32g2-i2c";
+ reg = <0x402d8000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c4: i2c@402dc000 {
+ compatible = "nxp,s32g2-i2c";
+ reg = <0x402dc000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
usdhc0: mmc@402f0000 {
compatible = "nxp,s32g2-usdhc";
reg = <0x402f0000 0x1000>;
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
index b9a119eea2b7..c4a195dd67bf 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-evb.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "s32g2.dtsi"
+#include "s32gxxxa-evb.dtsi"
/ {
model = "NXP S32G2 Evaluation Board (S32G-VNP-EVB)";
diff --git a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
index aaa61a8ad0da..b5ba51696f43 100644
--- a/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
+++ b/arch/arm64/boot/dts/freescale/s32g274a-rdb2.dts
@@ -7,6 +7,7 @@
/dts-v1/;
#include "s32g2.dtsi"
+#include "s32gxxxa-rdb.dtsi"
/ {
model = "NXP S32G2 Reference Design Board 2 (S32G-VNP-RDB2)";
diff --git a/arch/arm64/boot/dts/freescale/s32g3.dtsi b/arch/arm64/boot/dts/freescale/s32g3.dtsi
index 6c572ffe37ca..991dbfbfa203 100644
--- a/arch/arm64/boot/dts/freescale/s32g3.dtsi
+++ b/arch/arm64/boot/dts/freescale/s32g3.dtsi
@@ -374,6 +374,51 @@
};
};
+ edma0: dma-controller@40144000 {
+ compatible = "nxp,s32g3-edma", "nxp,s32g2-edma";
+ reg = <0x40144000 0x24000>,
+ <0x4012c000 0x3000>,
+ <0x40130000 0x3000>;
+ #dma-cells = <2>;
+ dma-channels = <32>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-0-15",
+ "tx-16-31",
+ "err";
+ clocks = <&clks 63>, <&clks 64>;
+ clock-names = "dmamux0", "dmamux1";
+ };
+
+ can0: can@401b4000 {
+ compatible = "nxp,s32g3-flexcan",
+ "nxp,s32g2-flexcan";
+ reg = <0x401b4000 0xa000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ can1: can@401be000 {
+ compatible = "nxp,s32g3-flexcan",
+ "nxp,s32g2-flexcan";
+ reg = <0x401be000 0xa000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
uart0: serial@401c8000 {
compatible = "nxp,s32g3-linflexuart",
"fsl,s32v234-linflexuart";
@@ -390,6 +435,87 @@
status = "disabled";
};
+ i2c0: i2c@401e4000 {
+ compatible = "nxp,s32g3-i2c",
+ "nxp,s32g2-i2c";
+ reg = <0x401e4000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c1: i2c@401e8000 {
+ compatible = "nxp,s32g3-i2c",
+ "nxp,s32g2-i2c";
+ reg = <0x401e8000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c2: i2c@401ec000 {
+ compatible = "nxp,s32g3-i2c",
+ "nxp,s32g2-i2c";
+ reg = <0x401ec000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ edma1: dma-controller@40244000 {
+ compatible = "nxp,s32g3-edma", "nxp,s32g2-edma";
+ reg = <0x40244000 0x24000>,
+ <0x4022c000 0x3000>,
+ <0x40230000 0x3000>;
+ #dma-cells = <2>;
+ dma-channels = <32>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "tx-0-15",
+ "tx-16-31",
+ "err";
+ clocks = <&clks 63>, <&clks 64>;
+ clock-names = "dmamux0", "dmamux1";
+ };
+
+ can2: can@402a8000 {
+ compatible = "nxp,s32g3-flexcan",
+ "nxp,s32g2-flexcan";
+ reg = <0x402a8000 0xa000>;
+ interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ can3: can@402b2000 {
+ compatible = "nxp,s32g3-flexcan",
+ "nxp,s32g2-flexcan";
+ reg = <0x402b2000 0xa000>;
+ interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "mb-0", "state", "berr", "mb-1";
+ clocks = <&clks 9>, <&clks 11>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
uart2: serial@402bc000 {
compatible = "nxp,s32g3-linflexuart",
"fsl,s32v234-linflexuart";
@@ -398,6 +524,30 @@
status = "disabled";
};
+ i2c3: i2c@402d8000 {
+ compatible = "nxp,s32g3-i2c",
+ "nxp,s32g2-i2c";
+ reg = <0x402d8000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
+ i2c4: i2c@402dc000 {
+ compatible = "nxp,s32g3-i2c",
+ "nxp,s32g2-i2c";
+ reg = <0x402dc000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks 40>;
+ clock-names = "ipg";
+ status = "disabled";
+ };
+
usdhc0: mmc@402f0000 {
compatible = "nxp,s32g3-usdhc",
"nxp,s32g2-usdhc";
diff --git a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
index 828e353455b5..802f543cae4a 100644
--- a/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
+++ b/arch/arm64/boot/dts/freescale/s32g399a-rdb3.dts
@@ -8,6 +8,7 @@
/dts-v1/;
#include "s32g3.dtsi"
+#include "s32gxxxa-rdb.dtsi"
/ {
model = "NXP S32G3 Reference Design Board 3 (S32G-VNP-RDB3)";
@@ -39,6 +40,14 @@
status = "okay";
};
+&i2c4 {
+ current-sensor@40 {
+ compatible = "ti,ina231";
+ reg = <0x40>;
+ shunt-resistor = <1000>;
+ };
+};
+
&usdhc0 {
pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc0>;
diff --git a/arch/arm64/boot/dts/freescale/s32gxxxa-evb.dtsi b/arch/arm64/boot/dts/freescale/s32gxxxa-evb.dtsi
new file mode 100644
index 000000000000..d26af0fb8be7
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/s32gxxxa-evb.dtsi
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2024 NXP
+ *
+ * Authors: Ciprian Marian Costea <ciprianmarian.costea@oss.nxp.com>
+ * Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com>
+ * Larisa Grigore <larisa.grigore@nxp.com>
+ */
+
+&pinctrl {
+ can0_pins: can0-pins {
+ can0-grp0 {
+ pinmux = <0x2c1>;
+ output-enable;
+ slew-rate = <133>;
+ };
+
+ can0-grp1 {
+ pinmux = <0x2b0>;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ can0-grp2 {
+ pinmux = <0x2012>;
+ };
+ };
+
+ can2_pins: can2-pins {
+ can2-grp0 {
+ pinmux = <0x1b2>;
+ output-enable;
+ slew-rate = <133>;
+ };
+
+ can2-grp1 {
+ pinmux = <0x1c0>;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ can2-grp2 {
+ pinmux = <0x2782>;
+ };
+ };
+
+ can3_pins: can3-pins {
+ can3-grp0 {
+ pinmux = <0x192>;
+ output-enable;
+ slew-rate = <133>;
+ };
+
+ can3-grp1 {
+ pinmux = <0x1a0>;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ can3-grp2 {
+ pinmux = <0x2792>;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ i2c0-grp0 {
+ pinmux = <0x101>, <0x111>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c0-grp1 {
+ pinmux = <0x2352>, <0x2362>;
+ };
+ };
+
+ i2c0_gpio_pins: i2c0-gpio-pins {
+ i2c0-gpio-grp0 {
+ pinmux = <0x100>, <0x110>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c0-gpio-grp1 {
+ pinmux = <0x2350>, <0x2360>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ i2c1-grp0 {
+ pinmux = <0x131>, <0x141>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c1-grp1 {
+ pinmux = <0x2cd2>, <0x2ce2>;
+ };
+ };
+
+ i2c1_gpio_pins: i2c1-gpio-pins {
+ i2c1-gpio-grp0 {
+ pinmux = <0x130>, <0x140>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c1-gpio-grp1 {
+ pinmux = <0x2cd0>, <0x2ce0>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ i2c2-grp0 {
+ pinmux = <0x151>, <0x161>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c2-grp1 {
+ pinmux = <0x2cf2>, <0x2d02>;
+ };
+ };
+
+ i2c2_gpio_pins: i2c2-gpio-pins {
+ i2c2-gpio-grp0 {
+ pinmux = <0x150>, <0x160>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c2-gpio-grp1 {
+ pinmux = <0x2cf0>, <0x2d00>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ i2c4-grp0 {
+ pinmux = <0x211>, <0x222>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c4-grp1 {
+ pinmux = <0x2d43>, <0x2d33>;
+ };
+ };
+
+ i2c4_gpio_pins: i2c4-gpio-pins {
+ i2c4-gpio-grp0 {
+ pinmux = <0x210>, <0x220>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c4-gpio-grp1 {
+ pinmux = <0x2d40>, <0x2d30>;
+ };
+ };
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can2_pins>;
+ status = "okay";
+};
+
+&can3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can3_pins>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-1 = <&i2c0_gpio_pins>;
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-1 = <&i2c1_gpio_pins>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-1 = <&i2c2_gpio_pins>;
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-1 = <&i2c4_gpio_pins>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi b/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi
new file mode 100644
index 000000000000..ba53ec622f0b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/s32gxxxa-rdb.dtsi
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2024 NXP
+ *
+ * Authors: Ciprian Marian Costea <ciprianmarian.costea@oss.nxp.com>
+ * Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com>
+ * Larisa Grigore <larisa.grigore@nxp.com>
+ */
+
+&pinctrl {
+ can0_pins: can0-pins {
+ can0-grp0 {
+ pinmux = <0x112>;
+ output-enable;
+ slew-rate = <133>;
+ };
+
+ can0-grp1 {
+ pinmux = <0x120>;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ can0-grp2 {
+ pinmux = <0x2013>;
+ };
+ };
+
+ can1_pins: can1-pins {
+ can1-grp0 {
+ pinmux = <0x132>;
+ output-enable;
+ slew-rate = <133>;
+ };
+
+ can1-grp1 {
+ pinmux = <0x140>;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ can1-grp2 {
+ pinmux = <0x2772>;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ i2c0-grp0 {
+ pinmux = <0x1f2>, <0x201>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c0-grp1 {
+ pinmux = <0x2353>, <0x2363>;
+ };
+ };
+
+ i2c0_gpio_pins: i2c0-gpio-pins {
+ i2c0-gpio-grp0 {
+ pinmux = <0x1f0>, <0x200>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c0-gpio-grp1 {
+ pinmux = <0x2350>, <0x2360>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ i2c2-grp0 {
+ pinmux = <0x151>, <0x161>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c2-grp1 {
+ pinmux = <0x2cf2>, <0x2d02>;
+ };
+ };
+
+ i2c2_gpio_pins: i2c2-gpio-pins {
+ i2c2-gpio-grp0 {
+ pinmux = <0x2cf0>, <0x2d00>;
+ };
+
+ i2c2-gpio-grp1 {
+ pinmux = <0x150>, <0x160>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ i2c4-grp0 {
+ pinmux = <0x211>, <0x222>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c4-grp1 {
+ pinmux = <0x2d43>, <0x2d33>;
+ };
+ };
+
+ i2c4_gpio_pins: i2c4-gpio-pins {
+ i2c4-gpio-grp0 {
+ pinmux = <0x210>, <0x220>;
+ drive-open-drain;
+ output-enable;
+ input-enable;
+ slew-rate = <133>;
+ };
+
+ i2c4-gpio-grp1 {
+ pinmux = <0x2d40>, <0x2d30>;
+ };
+ };
+};
+
+&can0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins>;
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-1 = <&i2c0_gpio_pins>;
+ status = "okay";
+
+ pcal6524: gpio-expander@22 {
+ compatible = "nxp,pcal6524";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-1 = <&i2c2_gpio_pins>;
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-1 = <&i2c4_gpio_pins>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/tqma8xx.dtsi b/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
index 366912bf3d5e..58693b774d4c 100644
--- a/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/tqma8xx.dtsi
@@ -65,6 +65,7 @@
spi-max-frequency = <66000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
+ vcc-supply = <&reg_1v8>;
partitions {
compatible = "fixed-partitions";
@@ -74,8 +75,6 @@
};
};
-/* TODO GPU */
-
&i2c1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -114,6 +113,15 @@
};
};
+&jpegdec {
+ status = "okay";
+};
+
+&jpegenc {
+ status = "okay";
+};
+
+
&mu_m0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
index 79a55a0fa2f1..4c6a075908d1 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-coresight.dtsi
@@ -17,6 +17,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu0>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -34,6 +35,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu1>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -51,6 +53,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu2>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -68,6 +71,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu3>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -160,6 +164,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu4>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -177,6 +182,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu5>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -194,6 +200,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu6>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
@@ -211,6 +218,7 @@
clocks = <&crg_ctrl HI3660_PCLK>;
clock-names = "apb_pclk";
cpu = <&cpu7>;
+ arm,coresight-loses-context-with-cpu;
out-ports {
port {
diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
index 75377c292bcb..605f5be1538c 100644
--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
@@ -78,7 +78,7 @@
#size-cells = <2>;
ranges;
- internal-regs@7f000000 {
+ bus@7f000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/marvell/armada-371x.dtsi b/arch/arm64/boot/dts/marvell/armada-371x.dtsi
deleted file mode 100644
index dc1182ec9fa1..000000000000
--- a/arch/arm64/boot/dts/marvell/armada-371x.dtsi
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-/*
- * Device Tree Include file for Marvell Armada 371x family of SoCs
- * (also named 88F3710)
- *
- * Copyright (C) 2016 Marvell
- *
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- */
-
-#include "armada-37xx.dtsi"
-
-/ {
- model = "Marvell Armada 3710 SoC";
- compatible = "marvell,armada3710", "marvell,armada3700";
-};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index 0cfb38492021..bd4e61d5448e 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -18,7 +18,7 @@
/ {
model = "Marvell Armada 3720 Development Board DB-88F3720-DDR3";
- compatible = "marvell,armada-3720-db", "marvell,armada3720", "marvell,armada3700";
+ compatible = "marvell,armada-3720-db", "marvell,armada3720", "marvell,armada3710";
chosen {
stdout-path = "serial0:115200n8";
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
index 6715a19c1483..5c4d8f379704 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-emmc.dts
@@ -18,7 +18,7 @@
/ {
model = "Globalscale Marvell ESPRESSOBin Board (eMMC)";
compatible = "globalscale,espressobin-emmc", "globalscale,espressobin",
- "marvell,armada3720", "marvell,armada3700";
+ "marvell,armada3720", "marvell,armada3710";
};
&sdhci0 {
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
index b3cc2b7b5d19..97a180c8dcd9 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-ultra.dts
@@ -13,7 +13,7 @@
/ {
model = "Globalscale Marvell ESPRESSOBin Ultra Board";
compatible = "globalscale,espressobin-ultra", "globalscale,espressobin",
- "marvell,armada3720", "marvell,armada3700";
+ "marvell,armada3720", "marvell,armada3710";
aliases {
/* ethernet1 is WAN port */
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
index 2a8aa3901a9f..75401eab4d42 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7-emmc.dts
@@ -19,7 +19,7 @@
model = "Globalscale Marvell ESPRESSOBin Board V7 (eMMC)";
compatible = "globalscale,espressobin-v7-emmc", "globalscale,espressobin-v7",
"globalscale,espressobin", "marvell,armada3720",
- "marvell,armada3700";
+ "marvell,armada3710";
aliases {
/* ethernet1 is wan port */
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
index b03af87611a9..48a7f50fb427 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin-v7.dts
@@ -18,7 +18,7 @@
/ {
model = "Globalscale Marvell ESPRESSOBin Board V7";
compatible = "globalscale,espressobin-v7", "globalscale,espressobin",
- "marvell,armada3720", "marvell,armada3700";
+ "marvell,armada3720", "marvell,armada3710";
aliases {
/* ethernet1 is wan port */
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
index c5a834b33b77..1542d836c090 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
@@ -16,5 +16,5 @@
/ {
model = "Globalscale Marvell ESPRESSOBin Board";
- compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3700";
+ compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts b/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
index 56930f2ce481..9f4bafeddd82 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-gl-mv1000.dts
@@ -7,7 +7,7 @@
/ {
model = "GL.iNet GL-MV1000";
- compatible = "glinet,gl-mv1000", "marvell,armada3720";
+ compatible = "glinet,gl-mv1000", "marvell,armada3720", "marvell,armada3710";
aliases {
led-boot = &led_power;
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index 54453b0a91f9..f4d73c8b1a6d 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -14,7 +14,7 @@
/ {
model = "CZ.NIC Turris Mox Board";
compatible = "cznic,turris-mox", "marvell,armada3720",
- "marvell,armada3700";
+ "marvell,armada3710";
aliases {
spi0 = &spi0;
diff --git a/arch/arm64/boot/dts/marvell/armada-372x.dtsi b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
index 02ae1e153288..b99ac4c03a48 100644
--- a/arch/arm64/boot/dts/marvell/armada-372x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-372x.dtsi
@@ -12,9 +12,6 @@
#include "armada-37xx.dtsi"
/ {
- model = "Marvell Armada 3720 SoC";
- compatible = "marvell,armada3720", "marvell,armada3700";
-
cpus {
cpu1: cpu@1 {
device_type = "cpu";
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 9603223dd761..75b0fdc3efb2 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -11,8 +11,6 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
- model = "Marvell Armada 37xx SoC";
- compatible = "marvell,armada3700";
interrupt-parent = <&gic>;
#address-cells = <2>;
#size-cells = <2>;
@@ -78,7 +76,7 @@
#size-cells = <2>;
ranges;
- internal-regs@d0000000 {
+ bus@d0000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/marvell/armada-7020.dtsi b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
index 4e46326dd123..570f901b4f4a 100644
--- a/arch/arm64/boot/dts/marvell/armada-7020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7020.dtsi
@@ -8,9 +8,3 @@
#include "armada-ap806-dual.dtsi"
#include "armada-70x0.dtsi"
-
-/ {
- model = "Marvell Armada 7020";
- compatible = "marvell,armada7020", "marvell,armada-ap806-dual",
- "marvell,armada-ap806";
-};
diff --git a/arch/arm64/boot/dts/marvell/armada-7040.dtsi b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
index 2f440711d21d..710ac44870bd 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-7040.dtsi
@@ -9,12 +9,6 @@
#include "armada-ap806-quad.dtsi"
#include "armada-70x0.dtsi"
-/ {
- model = "Marvell Armada 7040";
- compatible = "marvell,armada7040", "marvell,armada-ap806-quad",
- "marvell,armada-ap806";
-};
-
&cp0_pcie0 {
iommu-map =
<0x0 &smmu 0x480 0x20>,
diff --git a/arch/arm64/boot/dts/marvell/armada-8020.dtsi b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
index ba1307c0fadb..b6fc18876093 100644
--- a/arch/arm64/boot/dts/marvell/armada-8020.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8020.dtsi
@@ -9,12 +9,6 @@
#include "armada-ap806-dual.dtsi"
#include "armada-80x0.dtsi"
-/ {
- model = "Marvell Armada 8020";
- compatible = "marvell,armada8020", "marvell,armada-ap806-dual",
- "marvell,armada-ap806";
-};
-
/* The RTC requires external oscillator. But on Aramda 80x0, the RTC clock
* in CP master is not connected (by package) to the oscillator. So
* disable it. However, the RTC clock in CP slave is connected to the
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 225a54ab688d..90ae93274a16 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -371,25 +371,25 @@
};
&cp0_gpio2 {
- sata_reset {
+ sata-reset-hog {
gpio-hog;
gpios = <1 GPIO_ACTIVE_HIGH>;
output-high;
};
- lte_reset {
+ lte-reset-hog {
gpio-hog;
gpios = <2 GPIO_ACTIVE_LOW>;
output-low;
};
- wlan_disable {
+ wlan_disable-hog {
gpio-hog;
gpios = <19 GPIO_ACTIVE_LOW>;
output-low;
};
- lte_disable {
+ lte-disable-hog {
gpio-hog;
gpios = <21 GPIO_ACTIVE_LOW>;
output-low;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts b/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
index 9c25a88581e4..def25d51c4bf 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-puzzle-m801.dts
@@ -13,7 +13,7 @@
/ {
model = "IEI-Puzzle-M801";
- compatible = "marvell,armada8040", "marvell,armada-ap806-quad", "marvell,armada-ap806";
+ compatible = "iei,puzzle-m801", "marvell,armada8040", "marvell,armada-ap806-quad", "marvell,armada-ap806";
aliases {
ethernet0 = &cp0_eth0;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040.dtsi b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
index 22c2d6ebf381..3efd9b9e6892 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040.dtsi
@@ -9,12 +9,6 @@
#include "armada-ap806-quad.dtsi"
#include "armada-80x0.dtsi"
-/ {
- model = "Marvell Armada 8040";
- compatible = "marvell,armada8040", "marvell,armada-ap806-quad",
- "marvell,armada-ap806";
-};
-
&cp0_pcie0 {
iommu-map =
<0x0 &smmu 0x480 0x20>,
diff --git a/arch/arm64/boot/dts/marvell/armada-8080.dtsi b/arch/arm64/boot/dts/marvell/armada-8080.dtsi
index 299e814d1ded..32bb56f2fe3f 100644
--- a/arch/arm64/boot/dts/marvell/armada-8080.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8080.dtsi
@@ -6,9 +6,3 @@
*/
#include "armada-ap810-ap0-octa-core.dtsi"
-
-/ {
- model = "Marvell 8080 board";
- compatible = "marvell,armada-8080", "marvell,armada-ap810-octa",
- "marvell,armada-ap810";
-};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
index 3ed6fba1f438..82f4dedfc25e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi
@@ -8,9 +8,6 @@
#include "armada-ap806.dtsi"
/ {
- model = "Marvell Armada AP806 Dual";
- compatible = "marvell,armada-ap806-dual", "marvell,armada-ap806";
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
index cf6a96ddcf40..f37f49c79a50 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
@@ -8,9 +8,6 @@
#include "armada-ap806.dtsi"
/ {
- model = "Marvell Armada AP806 Quad";
- compatible = "marvell,armada-ap806-quad", "marvell,armada-ap806";
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 866628679ac7..73a570cf1010 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -5,14 +5,8 @@
* Device Tree file for Marvell Armada AP806.
*/
-#define AP_NAME ap806
#include "armada-ap80x.dtsi"
-/ {
- model = "Marvell Armada AP806";
- compatible = "marvell,armada-ap806";
-};
-
&ap_syscon0 {
ap_clk: clock {
compatible = "marvell,ap806-clock";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
index 8848238f9565..e8af7546e893 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807-quad.dtsi
@@ -8,9 +8,6 @@
#include "armada-ap807.dtsi"
/ {
- model = "Marvell Armada AP807 Quad";
- compatible = "marvell,armada-ap807-quad", "marvell,armada-ap807";
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
index a3328d05fc94..196793d8715c 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap807.dtsi
@@ -5,14 +5,8 @@
* Copyright (C) 2019 Marvell Technology Group Ltd.
*/
-#define AP_NAME ap807
#include "armada-ap80x.dtsi"
-/ {
- model = "Marvell Armada AP807";
- compatible = "marvell,armada-ap807";
-};
-
&ap_syscon0 {
ap_clk: clock {
compatible = "marvell,ap807-clock";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index fdf88cd0eb02..40e146982921 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -48,14 +48,29 @@
};
};
- AP_NAME {
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a72-pmu";
+ interrupt-parent = <&pic>;
+ interrupts = <17>;
+ };
+
+ soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
ranges;
- config-space@f0000000 {
+ bus@f0000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
@@ -122,20 +137,6 @@
};
};
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- };
-
- pmu {
- compatible = "arm,cortex-a72-pmu";
- interrupt-parent = <&pic>;
- interrupts = <17>;
- };
-
odmi: odmi@300000 {
compatible = "marvell,odmi-controller";
msi-controller;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
index d1a7143ef3d4..2e719ffc8289 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi
@@ -11,7 +11,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "marvell,armada-ap810-octa";
cpu0: cpu@0 {
device_type = "cpu";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi
index 2f9ab6b4a2c9..abb37e5fc2c0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0.dtsi
@@ -10,10 +10,9 @@
/dts-v1/;
/ {
- model = "Marvell Armada AP810";
- compatible = "marvell,armada-ap810";
#address-cells = <2>;
#size-cells = <2>;
+ interrupt-parent = <&gic>;
aliases {
serial0 = &uart0_ap0;
@@ -25,14 +24,21 @@
method = "smc";
};
- ap810-ap0 {
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ soc {
#address-cells = <2>;
#size-cells = <2>;
compatible = "simple-bus";
- interrupt-parent = <&gic>;
ranges;
- config-space@e8000000 {
+ bus@e8000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
@@ -62,14 +68,6 @@
};
};
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
- <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
- };
-
xor@400000 {
compatible = "marvell,armada-7k-xor", "marvell,xor-v2";
reg = <0x400000 0x1000>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
index 4fd33b0fa56e..e3cfd168becc 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
@@ -5,8 +5,4 @@
* Device Tree file for Marvell Armada CP110.
*/
-#define CP11X_TYPE cp110
-
#include "armada-cp11x.dtsi"
-
-#undef CP11X_TYPE
diff --git a/arch/arm64/boot/dts/marvell/armada-cp115.dtsi b/arch/arm64/boot/dts/marvell/armada-cp115.dtsi
index 1d0a9653e681..ec6432c8db7c 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp115.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp115.dtsi
@@ -5,8 +5,4 @@
* Device Tree file for Marvell Armada CP115.
*/
-#define CP11X_TYPE cp115
-
#include "armada-cp11x.dtsi"
-
-#undef CP11X_TYPE
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 161beec0b6b0..a057e119492f 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -17,7 +17,7 @@
* The contents of the node are defined below, in order to
* save one indentation level
*/
- CP11X_NAME: CP11X_NAME { };
+ CP11X_NAME: CP11X_NODE_NAME(bus) { };
/*
* CPs only have one sensor in the thermal IC.
@@ -51,7 +51,7 @@
interrupt-parent = <&CP11X_LABEL(icu_nsr)>;
ranges;
- config-space@CP11X_BASE {
+ bus@CP11X_BASE {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
index cb8d54895a77..a997bbabedd8 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
+++ b/arch/arm64/boot/dts/marvell/cn9130-sr-som.dtsi
@@ -7,9 +7,6 @@
#include <dt-bindings/gpio/gpio.h>
/ {
- model = "SolidRun CN9130 SoM";
- compatible = "solidrun,cn9130-sr-som", "marvell,cn9130";
-
aliases {
ethernet0 = &cp0_eth0;
ethernet1 = &cp0_eth1;
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index b763b73788a4..58484e830063 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -95,13 +95,16 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-cherry-tomato-r3.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-demo.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8195-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8365-evk.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8370-genio-510-evk.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-genio-1200-evk.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8390-genio-700-evk.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-kontron-3-5-sbc-i1200.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8395-radxa-nio-12l-8-hd-panel.dtbo
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
# Device tree overlays support
DTC_FLAGS_mt7986a-bananapi-bpi-r3 := -@
DTC_FLAGS_mt7986a-bananapi-bpi-r3-mini := -@
DTC_FLAGS_mt7988a-bananapi-bpi-r4 := -@
+DTC_FLAGS_mt8395-radxa-nio-12l := -@
diff --git a/arch/arm64/boot/dts/mediatek/mt6359.dtsi b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
index 150ad84d5d2b..7b10f9c59819 100644
--- a/arch/arm64/boot/dts/mediatek/mt6359.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt6359.dtsi
@@ -15,7 +15,8 @@
#io-channel-cells = <1>;
};
- mt6359codec: mt6359codec {
+ mt6359codec: audio-codec {
+ compatible = "mediatek,mt6359-codec";
};
regulators {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
index b5d4b5baf478..0d995b342d46 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi
@@ -925,8 +925,6 @@
&pwrap {
pmic: pmic {
compatible = "mediatek,mt6397";
- #address-cells = <1>;
- #size-cells = <1>;
interrupts-extended = <&pio 11 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 3458be7f7f61..6d1d8877b43f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -352,14 +352,14 @@
#clock-cells = <1>;
};
- infracfg: power-controller@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt8173-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
- pericfg: power-controller@10003000 {
+ pericfg: clock-controller@10003000 {
compatible = "mediatek,mt8173-pericfg", "syscon";
reg = <0 0x10003000 0 0x1000>;
#clock-cells = <1>;
@@ -564,7 +564,7 @@
memory-region = <&vpu_dma_reserved>;
};
- sysirq: intpol-controller@10200620 {
+ sysirq: interrupt-controller@10200620 {
compatible = "mediatek,mt8173-sysirq",
"mediatek,mt6577-sysirq";
interrupt-controller;
@@ -1255,8 +1255,7 @@
};
pwm0: pwm@1401e000 {
- compatible = "mediatek,mt8173-disp-pwm",
- "mediatek,mt6595-disp-pwm";
+ compatible = "mediatek,mt8173-disp-pwm";
reg = <0 0x1401e000 0 0x1000>;
#pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM026M>,
@@ -1266,8 +1265,7 @@
};
pwm1: pwm@1401f000 {
- compatible = "mediatek,mt8173-disp-pwm",
- "mediatek,mt6595-disp-pwm";
+ compatible = "mediatek,mt8173-disp-pwm";
reg = <0 0x1401f000 0 0x1000>;
#pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM126M>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
index 3935d83a047e..7bc7c2687d6f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
@@ -14,16 +14,13 @@
};
&touchscreen {
- status = "okay";
+ compatible = "elan,ekth6a12nay";
- compatible = "hid-over-i2c";
- reg = <0x10>;
- interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
+ vcc33-supply = <&pp3300_alw>;
+ vccio-supply = <&pp1800_alw>;
};
&mt6358codec {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
index 72852b760038..863f3e403de8 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku1.dts
@@ -27,16 +27,12 @@
};
&touchscreen {
- status = "okay";
+ compatible = "elan,ekth6a12nay";
- compatible = "hid-over-i2c";
- reg = <0x10>;
- interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
+ vcc33-supply = <&pp3300_alw>;
};
&qca_wifi {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
index 757d0afd14fb..e0a583ce4a0b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku6.dts
@@ -14,16 +14,12 @@
};
&touchscreen {
- status = "okay";
+ compatible = "elan,ekth6a12nay";
- compatible = "hid-over-i2c";
- reg = <0x10>;
- interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
+ vcc33-supply = <&pp3300_alw>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
index 6641b087e7c5..7874c9a20e12 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel-sku7.dts
@@ -14,16 +14,12 @@
};
&touchscreen {
- status = "okay";
+ compatible = "elan,ekth6a12nay";
- compatible = "hid-over-i2c";
- reg = <0x10>;
- interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&touchscreen_pins>;
- post-power-on-delay-ms = <10>;
- hid-descr-addr = <0x0001>;
+ vcc33-supply = <&pp3300_alw>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi b/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi
index b6abecbcfa81..c5254ae0bb99 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188-geralt.dtsi
@@ -9,6 +9,7 @@
/ {
aliases {
+ dsi0 = &disp_dsi0;
i2c0 = &i2c0;
i2c1 = &i2c1;
i2c2 = &i2c2;
@@ -273,14 +274,27 @@
port {
dsi_panel_in: endpoint {
- remote-endpoint = <&dsi_out>;
+ remote-endpoint = <&dsi0_out>;
};
};
};
- port {
- dsi_out: endpoint {
- remote-endpoint = <&dsi_panel_in>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ remote-endpoint = <&dsi_panel_in>;
+ };
};
};
};
@@ -296,12 +310,74 @@
pinctrl-0 = <&disp_pwm1_pins>;
};
+&dither0_in {
+ remote-endpoint = <&postmask0_out>;
+};
+
+&dither0_out {
+ remote-endpoint = <&dsi0_in>;
+};
+
+&ethdr0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ ethdr0_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vdosys1_ep_ext>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ ethdr0_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&merge5_in>;
+ };
+ };
+ };
+};
+
+&gamma0_out {
+ remote-endpoint = <&postmask0_in>;
+};
+
&dp_intf1 {
status = "okay";
- port {
- dp_intf1_out: endpoint {
- remote-endpoint = <&dptx_in>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ dp_intf1_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&merge5_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ dp_intf1_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dptx_in>;
+ };
};
};
};
@@ -394,6 +470,35 @@
status = "okay";
};
+&merge5 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ merge5_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ethdr0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ merge5_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_intf1_in>;
+ };
+ };
+ };
+};
+
&mfg0 {
domain-supply = <&mt6359_vproc2_buck_reg>;
};
@@ -513,6 +618,10 @@
};
};
+&ovl0_in {
+ remote-endpoint = <&vdosys0_ep_main>;
+};
+
&pcie {
pinctrl-names = "default";
pinctrl-0 = <&pcie_pins>;
@@ -1029,6 +1138,14 @@
interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>;
};
+&postmask0_in {
+ remote-endpoint = <&gamma0_out>;
+};
+
+&postmask0_out {
+ remote-endpoint = <&dither0_in>;
+};
+
&sound {
pinctrl-names = "aud_etdm_hp_on", "aud_etdm_hp_off",
"aud_etdm_spk_on", "aud_etdm_spk_off",
@@ -1103,6 +1220,12 @@
};
/* USB detachable base */
+&ssusb0 {
+ dr_mode = "host";
+ vusb33-supply = <&pp3300_s3>;
+ status = "okay";
+};
+
&xhci0 {
/* controlled by EC */
vbus-supply = <&pp3300_z1>;
@@ -1110,6 +1233,12 @@
};
/* USB3 hub */
+&ssusb1 {
+ dr_mode = "host";
+ vusb33-supply = <&pp3300_s3>;
+ status = "okay";
+};
+
&xhci1 {
vusb33-supply = <&pp3300_s3>;
vbus-supply = <&pp5000_usb_vbus>;
@@ -1117,6 +1246,36 @@
};
/* USB BT */
+&ssusb2 {
+ dr_mode = "host";
+ vusb33-supply = <&pp3300_s3>;
+ status = "okay";
+};
+
+&vdosys0 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys0_ep_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ };
+};
+
+&vdosys1 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys1_ep_ext: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ethdr0_in>;
+ };
+ };
+};
+
&xhci2 {
/* no power supply since MT7921's power is controlled by PCIe */
/* MT7921's USB BT has issues with USB2 LPM */
diff --git a/arch/arm64/boot/dts/mediatek/mt8188.dtsi b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
index 338120930b81..69a8423d3858 100644
--- a/arch/arm64/boot/dts/mediatek/mt8188.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8188.dtsi
@@ -26,9 +26,11 @@
aliases {
dp-intf0 = &dp_intf0;
dp-intf1 = &dp_intf1;
+ dsc0 = &dsc0;
ethdr0 = &ethdr0;
gce0 = &gce0;
gce1 = &gce1;
+ merge0 = &merge0;
merge1 = &merge1;
merge2 = &merge2;
merge3 = &merge3;
@@ -492,7 +494,7 @@
};
cooling-maps {
- map0 {
+ cpu_little0_cooling_map0: map0 {
trip = <&cpu_little0_alert0>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -530,7 +532,7 @@
};
cooling-maps {
- map0 {
+ cpu_little1_cooling_map0: map0 {
trip = <&cpu_little1_alert0>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -568,7 +570,7 @@
};
cooling-maps {
- map0 {
+ cpu_little2_cooling_map0: map0 {
trip = <&cpu_little2_alert0>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -606,7 +608,7 @@
};
cooling-maps {
- map0 {
+ cpu_little3_cooling_map0: map0 {
trip = <&cpu_little3_alert0>;
cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -1392,7 +1394,7 @@
compatible = "mediatek,mt8188-afe";
reg = <0 0x10b10000 0 0x10000>;
assigned-clocks = <&topckgen CLK_TOP_A1SYS_HP>;
- assigned-clock-parents = <&clk26m>;
+ assigned-clock-parents = <&topckgen CLK_TOP_APLL1_D4>;
clocks = <&clk26m>,
<&apmixedsys CLK_APMIXED_APLL1>,
<&apmixedsys CLK_APMIXED_APLL2>,
@@ -1647,6 +1649,38 @@
status = "disabled";
};
+ ssusb1: usb@11201000 {
+ compatible = "mediatek,mt8188-mtu3", "mediatek,mtu3";
+ reg = <0 0x11201000 0 0x2dff>, <0 0x11203e00 0 0x0100>;
+ reg-names = "mac", "ippc";
+ ranges = <0 0 0 0x11200000 0 0x3f00>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_USB_TOP>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_BUS>,
+ <&topckgen CLK_TOP_SSUSB_TOP_REF>,
+ <&pericfg_ao CLK_PERI_AO_SSUSB_XHCI>;
+ clock-names = "sys_ck", "ref_ck", "mcu_ck";
+ phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>;
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x468 2>;
+ status = "disabled";
+
+ xhci1: usb@0 {
+ compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
+ reg = <0 0 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_SSUSB_XHCI>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_XHCI>;
+ clock-names = "sys_ck";
+ status = "disabled";
+ };
+ };
+
eth: ethernet@11021000 {
compatible = "mediatek,mt8188-gmac", "mediatek,mt8195-gmac",
"snps,dwmac-5.10a";
@@ -1744,27 +1778,6 @@
};
};
- xhci1: usb@11200000 {
- compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
- reg = <0 0x11200000 0 0x1000>,
- <0 0x11203e00 0 0x0100>;
- reg-names = "mac", "ippc";
- interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH 0>;
- phys = <&u2port1 PHY_TYPE_USB2>,
- <&u3port1 PHY_TYPE_USB3>;
- assigned-clocks = <&topckgen CLK_TOP_USB_TOP>,
- <&topckgen CLK_TOP_SSUSB_XHCI>;
- assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>,
- <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
- clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_BUS>,
- <&topckgen CLK_TOP_SSUSB_TOP_REF>,
- <&pericfg_ao CLK_PERI_AO_SSUSB_XHCI>;
- clock-names = "sys_ck", "ref_ck", "mcu_ck";
- mediatek,syscon-wakeup = <&pericfg 0x468 2>;
- wakeup-source;
- status = "disabled";
- };
-
mmc0: mmc@11230000 {
compatible = "mediatek,mt8188-mmc", "mediatek,mt8183-mmc";
reg = <0 0x11230000 0 0x10000>,
@@ -1792,6 +1805,20 @@
status = "disabled";
};
+ mmc2: mmc@11250000 {
+ compatible = "mediatek,mt8188-mmc", "mediatek,mt8183-mmc";
+ reg = <0 0x11250000 0 0x1000>,
+ <0 0x11e60000 0 0x1000>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&topckgen CLK_TOP_MSDC30_2>,
+ <&infracfg_ao CLK_INFRA_AO_MSDC2>,
+ <&infracfg_ao CLK_INFRA_AO_MSDC30_2>;
+ clock-names = "source", "hclk", "source_cg";
+ assigned-clocks = <&topckgen CLK_TOP_MSDC30_2>;
+ assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
+ status = "disabled";
+ };
+
lvts_mcu: thermal-sensor@11278000 {
compatible = "mediatek,mt8188-lvts-mcu";
reg = <0 0x11278000 0 0x1000>;
@@ -1851,42 +1878,68 @@
#clock-cells = <1>;
};
- xhci2: usb@112a0000 {
- compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
- reg = <0 0x112a0000 0 0x1000>,
- <0 0x112a3e00 0 0x0100>;
+ ssusb2: usb@112a1000 {
+ compatible = "mediatek,mt8188-mtu3", "mediatek,mtu3";
+ reg = <0 0x112a1000 0 0x2dff>, <0 0x112a3e00 0 0x0100>;
reg-names = "mac", "ippc";
- interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH 0>;
- phys = <&u2port2 PHY_TYPE_USB2>;
- assigned-clocks = <&topckgen CLK_TOP_SSUSB_XHCI_3P>,
- <&topckgen CLK_TOP_USB_TOP_3P>;
- assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>,
- <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ ranges = <0 0 0 0x112a0000 0 0x3f00>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_USB_TOP_3P>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_3P_BUS>,
<&topckgen CLK_TOP_SSUSB_TOP_P3_REF>,
<&pericfg_ao CLK_PERI_AO_SSUSB_3P_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck";
+ phys = <&u2port2 PHY_TYPE_USB2>;
+ wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x470 2>;
status = "disabled";
+
+ xhci2: usb@0 {
+ compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
+ reg = <0 0 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_SSUSB_XHCI_3P>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_3P_XHCI>;
+ clock-names = "sys_ck";
+ status = "disabled";
+ };
};
- xhci0: usb@112b0000 {
- compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
- reg = <0 0x112b0000 0 0x1000>,
- <0 0x112b3e00 0 0x0100>;
+ ssusb0: usb@112b1000 {
+ compatible = "mediatek,mt8188-mtu3", "mediatek,mtu3";
+ reg = <0 0x112b1000 0 0x2dff>, <0 0x112b3e00 0 0x0100>;
reg-names = "mac", "ippc";
- interrupts = <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH 0>;
- phys = <&u2port0 PHY_TYPE_USB2>;
- assigned-clocks = <&topckgen CLK_TOP_SSUSB_XHCI_2P>,
- <&topckgen CLK_TOP_USB_TOP_2P>;
- assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>,
- <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ ranges = <0 0 0 0x112b0000 0 0x3f00>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupts = <GIC_SPI 532 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_SSUSB_XHCI_2P>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_2P_BUS>,
<&topckgen CLK_TOP_SSUSB_TOP_P2_REF>,
<&pericfg_ao CLK_PERI_AO_SSUSB_2P_XHCI>;
clock-names = "sys_ck", "ref_ck", "mcu_ck";
- mediatek,syscon-wakeup = <&pericfg 0x460 2>;
+ phys = <&u2port0 PHY_TYPE_USB2>;
wakeup-source;
+ mediatek,syscon-wakeup = <&pericfg 0x460 2>;
status = "disabled";
+
+ xhci0: usb@0 {
+ compatible = "mediatek,mt8188-xhci", "mediatek,mtk-xhci";
+ reg = <0 0 0 0x1000>;
+ reg-names = "mac";
+ interrupts = <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH 0>;
+ assigned-clocks = <&topckgen CLK_TOP_USB_TOP_2P>;
+ assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D5_D4>;
+ clocks = <&pericfg_ao CLK_PERI_AO_SSUSB_2P_XHCI>;
+ clock-names = "sys_ck";
+ status = "disabled";
+ };
};
pcie: pcie@112f0000 {
@@ -2502,6 +2555,23 @@
iommus = <&vdo_iommu M4U_PORT_L0_DISP_OVL0_RDMA0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x0000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ovl0_in: endpoint { };
+ };
+
+ port@1 {
+ reg = <1>;
+ ovl0_out: endpoint {
+ remote-endpoint = <&rdma0_in>;
+ };
+ };
+ };
};
rdma0: rdma@1c002000 {
@@ -2512,6 +2582,25 @@
iommus = <&vdo_iommu M4U_PORT_L1_DISP_RDMA0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x2000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ rdma0_in: endpoint {
+ remote-endpoint = <&ovl0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ rdma0_out: endpoint {
+ remote-endpoint = <&color0_in>;
+ };
+ };
+ };
};
color0: color@1c003000 {
@@ -2521,6 +2610,25 @@
interrupts = <GIC_SPI 639 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x3000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ color0_in: endpoint {
+ remote-endpoint = <&rdma0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ color0_out: endpoint {
+ remote-endpoint = <&ccorr0_in>;
+ };
+ };
+ };
};
ccorr0: ccorr@1c004000 {
@@ -2530,6 +2638,25 @@
interrupts = <GIC_SPI 640 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x4000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ccorr0_in: endpoint {
+ remote-endpoint = <&color0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ ccorr0_out: endpoint {
+ remote-endpoint = <&aal0_in>;
+ };
+ };
+ };
};
aal0: aal@1c005000 {
@@ -2539,6 +2666,25 @@
interrupts = <GIC_SPI 641 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x5000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ aal0_in: endpoint {
+ remote-endpoint = <&ccorr0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ aal0_out: endpoint {
+ remote-endpoint = <&gamma0_in>;
+ };
+ };
+ };
};
gamma0: gamma@1c006000 {
@@ -2548,6 +2694,23 @@
interrupts = <GIC_SPI 642 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x6000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ gamma0_in: endpoint {
+ remote-endpoint = <&aal0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ gamma0_out: endpoint { };
+ };
+ };
};
dither0: dither@1c007000 {
@@ -2557,6 +2720,21 @@
interrupts = <GIC_SPI 643 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x7000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dither0_in: endpoint { };
+ };
+
+ port@1 {
+ reg = <1>;
+ dither0_out: endpoint { };
+ };
+ };
};
disp_dsi0: dsi@1c008000 {
@@ -2574,6 +2752,15 @@
status = "disabled";
};
+ dsc0: dsc@1c009000 {
+ compatible = "mediatek,mt8188-disp-dsc", "mediatek,mt8195-disp-dsc";
+ reg = <0 0x1c009000 0 0x1000>;
+ clocks = <&vdosys0 CLK_VDO0_DSC_WRAP0>;
+ interrupts = <GIC_SPI 645 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x9000 0x1000>;
+ };
+
disp_dsi1: dsi@1c012000 {
compatible = "mediatek,mt8188-dsi";
reg = <0 0x1c012000 0 0x1000>;
@@ -2589,6 +2776,17 @@
status = "disabled";
};
+ merge0: merge0@1c014000 {
+ compatible = "mediatek,mt8188-disp-merge", "mediatek,mt8195-disp-merge";
+ reg = <0 0x1c014000 0 0x1000>;
+ clocks = <&vdosys0 CLK_VDO0_VPP_MERGE0>,
+ <&vdosys1 CLK_VDO1_MERGE_VDO1_DL_ASYNC>;
+ clock-names = "merge", "merge_async";
+ interrupts = <GIC_SPI 656 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x4000 0x1000>;
+ };
+
dp_intf0: dp-intf@1c015000 {
compatible = "mediatek,mt8188-dp-intf";
reg = <0 0x1c015000 0 0x1000>;
@@ -2619,6 +2817,21 @@
interrupts = <GIC_SPI 661 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8188_POWER_DOMAIN_VDOSYS0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ postmask0_in: endpoint { };
+ };
+
+ port@1 {
+ reg = <1>;
+ postmask0_out: endpoint { };
+ };
+ };
};
vdosys0: syscon@1c01d000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index 5056e07399e2..e70599807bb1 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -297,12 +297,29 @@
cpu-supply = <&mt6315_6_vbuck1>;
};
+&dither0_out {
+ remote-endpoint = <&dsc0_in>;
+};
+
&dp_intf0 {
status = "okay";
- port {
- dp_intf0_out: endpoint {
- remote-endpoint = <&edp_in>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dp_intf0_in: endpoint {
+ remote-endpoint = <&merge0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dp_intf0_out: endpoint {
+ remote-endpoint = <&edp_in>;
+ };
};
};
};
@@ -310,9 +327,51 @@
&dp_intf1 {
status = "okay";
- port {
- dp_intf1_out: endpoint {
- remote-endpoint = <&dptx_in>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ dp_intf1_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&merge5_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ dp_intf1_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dptx_in>;
+ };
+ };
+ };
+};
+
+&dsc0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsc0_in: endpoint {
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsc0_out: endpoint {
+ remote-endpoint = <&merge0_in>;
+ };
};
};
};
@@ -357,6 +416,35 @@
};
};
+&ethdr0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ ethdr0_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vdosys1_ep_ext>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ ethdr0_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&merge5_in>;
+ };
+ };
+ };
+};
+
&disp_pwm0 {
status = "okay";
@@ -376,8 +464,12 @@
#size-cells = <0>;
port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- dptx_in: endpoint {
+
+ dptx_in: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&dp_intf1_out>;
};
};
@@ -511,6 +603,56 @@
};
};
+&merge0 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ merge0_in: endpoint {
+ remote-endpoint = <&dsc0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ merge0_out: endpoint {
+ remote-endpoint = <&dp_intf0_in>;
+ };
+ };
+ };
+};
+
+&merge5 {
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ merge5_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ethdr0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ merge5_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dp_intf1_in>;
+ };
+ };
+ };
+};
+
&mfg0 {
domain-supply = <&mt6315_7_vbuck1>;
};
@@ -612,6 +754,10 @@
};
};
+&ovl0_in {
+ remote-endpoint = <&vdosys0_ep_main>;
+};
+
&pcie1 {
status = "okay";
@@ -1363,6 +1509,18 @@
status = "okay";
};
+&vdosys0 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys0_ep_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ };
+};
+
/*
* For the USB Type-C ports the role and alternate modes switching is
* done by the EC so we set dr_mode to host to avoid interfering.
@@ -1385,6 +1543,18 @@
status = "okay";
};
+&vdosys1 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys1_ep_ext: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&ethdr0_in>;
+ };
+ };
+};
+
&xhci0 {
status = "okay";
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index f013dbad9dc4..4f2dc0a75566 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -3142,6 +3142,23 @@
clocks = <&vdosys0 CLK_VDO0_DISP_OVL0>;
iommus = <&iommu_vdo M4U_PORT_L0_DISP_OVL0_RDMA0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x0000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ovl0_in: endpoint { };
+ };
+
+ port@1 {
+ reg = <1>;
+ ovl0_out: endpoint {
+ remote-endpoint = <&rdma0_in>;
+ };
+ };
+ };
};
rdma0: rdma@1c002000 {
@@ -3152,6 +3169,25 @@
clocks = <&vdosys0 CLK_VDO0_DISP_RDMA0>;
iommus = <&iommu_vdo M4U_PORT_L0_DISP_RDMA0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x2000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ rdma0_in: endpoint {
+ remote-endpoint = <&ovl0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ rdma0_out: endpoint {
+ remote-endpoint = <&color0_in>;
+ };
+ };
+ };
};
color0: color@1c003000 {
@@ -3161,6 +3197,25 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_COLOR0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x3000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ color0_in: endpoint {
+ remote-endpoint = <&rdma0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ color0_out: endpoint {
+ remote-endpoint = <&ccorr0_in>;
+ };
+ };
+ };
};
ccorr0: ccorr@1c004000 {
@@ -3170,6 +3225,25 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_CCORR0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x4000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ccorr0_in: endpoint {
+ remote-endpoint = <&color0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ ccorr0_out: endpoint {
+ remote-endpoint = <&aal0_in>;
+ };
+ };
+ };
};
aal0: aal@1c005000 {
@@ -3179,6 +3253,25 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_AAL0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x5000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ aal0_in: endpoint {
+ remote-endpoint = <&ccorr0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ aal0_out: endpoint {
+ remote-endpoint = <&gamma0_in>;
+ };
+ };
+ };
};
gamma0: gamma@1c006000 {
@@ -3188,6 +3281,25 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_GAMMA0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x6000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ gamma0_in: endpoint {
+ remote-endpoint = <&aal0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ gamma0_out: endpoint {
+ remote-endpoint = <&dither0_in>;
+ };
+ };
+ };
};
dither0: dither@1c007000 {
@@ -3197,6 +3309,23 @@
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_DITHER0>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c00XXXX 0x7000 0x1000>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dither0_in: endpoint {
+ remote-endpoint = <&gamma0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dither0_out: endpoint { };
+ };
+ };
};
dsi0: dsi@1c008000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
index 44c61094c4d5..1f8584bd66c3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8365-evk.dts
@@ -28,6 +28,21 @@
stdout-path = "serial0:921600n8";
};
+ connector {
+ compatible = "hdmi-connector";
+ label = "hdmi";
+ type = "d";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hdmi_connector_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&hdmi_connector_out>;
+ };
+ };
+ };
+
firmware {
optee {
compatible = "linaro,optee-tz";
@@ -105,6 +120,16 @@
pinctrl-5 = <&aud_mosi_on_pins>;
mediatek,platform = <&afe>;
};
+
+ vsys_lcm_reg: regulator-vsys-lcm {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&pio 129 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <5000000>;
+ regulator-name = "vsys_lcm";
+ };
+
};
&afe {
@@ -132,13 +157,102 @@
sram-supply = <&mt6357_vsram_proc_reg>;
};
+&dither0_out {
+ remote-endpoint = <&dsi0_in>;
+};
+
+&dpi0 {
+ pinctrl-0 = <&dpi_default_pins>;
+ pinctrl-1 = <&dpi_idle_pins>;
+ pinctrl-names = "default", "sleep";
+ /*
+ * Ethernet and HDMI (DPI0) are sharing pins.
+ * Only one can be enabled at a time and require the physical switch
+ * SW2101 to be set on LAN position
+ */
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dpi0_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&rdma1_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ dpi0_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&it66121_in>;
+ };
+ };
+ };
+};
+
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "startek,kd070fhfid015";
+ reg = <0>;
+ enable-gpios = <&pio 67 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 20 GPIO_ACTIVE_HIGH>;
+ iovcc-supply = <&mt6357_vsim1_reg>;
+ power-supply = <&vsys_lcm_reg>;
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dsi0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ dsi0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
&ethernet {
pinctrl-0 = <&ethernet_pins>;
pinctrl-names = "default";
phy-handle = <&eth_phy>;
phy-mode = "rmii";
/*
- * Ethernet and HDMI (DSI0) are sharing pins.
+ * Ethernet and HDMI (DPI0) are sharing pins.
* Only one can be enabled at a time and require the physical switch
* SW2101 to be set on LAN position
* mt6357_vibr_reg and mt6357_vsim2_reg are needed to supply ethernet
@@ -162,6 +276,56 @@
status = "okay";
};
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-div = <2>;
+ clock-frequency = <100000>;
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ it66121_hdmi: hdmi@4c {
+ compatible = "ite,it66121";
+ reg = <0x4c>;
+ #sound-dai-cells = <0>;
+ interrupt-parent = <&pio>;
+ interrupts = <68 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&ite_pins>;
+ pinctrl-names = "default";
+ reset-gpios = <&pio 69 GPIO_ACTIVE_LOW>;
+ vcn18-supply = <&mt6357_vsim2_reg>;
+ vcn33-supply = <&mt6357_vibr_reg>;
+ vrf12-supply = <&mt6357_vrf12_reg>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ it66121_in: endpoint@0 {
+ reg = <0>;
+ bus-width = <12>;
+ remote-endpoint = <&dpi0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ hdmi_connector_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+ };
+ };
+ };
+};
+
&mmc0 {
assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL>;
assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
@@ -206,6 +370,11 @@
mediatek,micbias1-microvolt = <1700000>;
};
+&mt6357_vsim1_reg {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+};
+
&pio {
aud_default_pins: audiodefault-pins {
clk-dat-pins {
@@ -268,6 +437,49 @@
};
};
+ dpi_default_pins: dpi-default-pins {
+ pins {
+ pinmux = <MT8365_PIN_0_GPIO0__FUNC_DPI_D0>,
+ <MT8365_PIN_1_GPIO1__FUNC_DPI_D1>,
+ <MT8365_PIN_2_GPIO2__FUNC_DPI_D2>,
+ <MT8365_PIN_3_GPIO3__FUNC_DPI_D3>,
+ <MT8365_PIN_4_GPIO4__FUNC_DPI_D4>,
+ <MT8365_PIN_5_GPIO5__FUNC_DPI_D5>,
+ <MT8365_PIN_6_GPIO6__FUNC_DPI_D6>,
+ <MT8365_PIN_7_GPIO7__FUNC_DPI_D7>,
+ <MT8365_PIN_8_GPIO8__FUNC_DPI_D8>,
+ <MT8365_PIN_9_GPIO9__FUNC_DPI_D9>,
+ <MT8365_PIN_10_GPIO10__FUNC_DPI_D10>,
+ <MT8365_PIN_11_GPIO11__FUNC_DPI_D11>,
+ <MT8365_PIN_12_GPIO12__FUNC_DPI_DE>,
+ <MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC>,
+ <MT8365_PIN_14_GPIO14__FUNC_DPI_CK>,
+ <MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC>;
+ drive-strength = <4>;
+ };
+ };
+
+ dpi_idle_pins: dpi-idle-pins {
+ pins {
+ pinmux = <MT8365_PIN_0_GPIO0__FUNC_GPIO0>,
+ <MT8365_PIN_1_GPIO1__FUNC_GPIO1>,
+ <MT8365_PIN_2_GPIO2__FUNC_GPIO2>,
+ <MT8365_PIN_3_GPIO3__FUNC_GPIO3>,
+ <MT8365_PIN_4_GPIO4__FUNC_GPIO4>,
+ <MT8365_PIN_5_GPIO5__FUNC_GPIO5>,
+ <MT8365_PIN_6_GPIO6__FUNC_GPIO6>,
+ <MT8365_PIN_7_GPIO7__FUNC_GPIO7>,
+ <MT8365_PIN_8_GPIO8__FUNC_GPIO8>,
+ <MT8365_PIN_9_GPIO9__FUNC_GPIO9>,
+ <MT8365_PIN_10_GPIO10__FUNC_GPIO10>,
+ <MT8365_PIN_11_GPIO11__FUNC_GPIO11>,
+ <MT8365_PIN_12_GPIO12__FUNC_GPIO12>,
+ <MT8365_PIN_13_GPIO13__FUNC_GPIO13>,
+ <MT8365_PIN_14_GPIO14__FUNC_GPIO14>,
+ <MT8365_PIN_15_GPIO15__FUNC_GPIO15>;
+ };
+ };
+
ethernet_pins: ethernet-pins {
phy_reset_pins {
pinmux = <MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133>;
@@ -309,6 +521,33 @@
};
};
+ i2c1_pins: i2c1-pins {
+ pins {
+ pinmux = <MT8365_PIN_59_SDA1__FUNC_SDA1_0>,
+ <MT8365_PIN_60_SCL1__FUNC_SCL1_0>;
+ bias-pull-up;
+ };
+ };
+
+ ite_pins: ite-pins {
+ irq_ite_pins {
+ pinmux = <MT8365_PIN_68_CMDAT0__FUNC_GPIO68>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pwr_pins {
+ pinmux = <MT8365_PIN_70_CMDAT2__FUNC_GPIO70>,
+ <MT8365_PIN_71_CMDAT3__FUNC_GPIO71>;
+ output-high;
+ };
+
+ rst_ite_pins {
+ pinmux = <MT8365_PIN_69_CMDAT1__FUNC_GPIO69>;
+ output-high;
+ };
+ };
+
mmc0_default_pins: mmc0-default-pins {
clk-pins {
pinmux = <MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK>;
@@ -464,6 +703,10 @@
status = "okay";
};
+&rdma1_out {
+ remote-endpoint = <&dpi0_in>;
+};
+
&ssusb {
dr_mode = "otg";
maximum-speed = "high-speed";
diff --git a/arch/arm64/boot/dts/mediatek/mt8365.dtsi b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
index 2bf8c9d02b6e..e6d2b3221a3b 100644
--- a/arch/arm64/boot/dts/mediatek/mt8365.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8365.dtsi
@@ -10,6 +10,7 @@
#include <dt-bindings/clock/mediatek,mt8365-clk.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/memory/mediatek,mt8365-larb-port.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/mediatek,mt8365-power.h>
@@ -19,6 +20,19 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ aal0 = &aal0;
+ ccorr0 = &ccorr0;
+ color0 = &color0;
+ dither0 = &dither0;
+ dpi0 = &dpi0;
+ dsi0 = &dsi0;
+ gamma0 = &gamma0;
+ ovl0 = &ovl0;
+ rdma0 = &rdma0;
+ rdma1 = &rdma1;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -609,6 +623,15 @@
status = "disabled";
};
+ disp_pwm: pwm@1100e000 {
+ compatible = "mediatek,mt8365-disp-pwm", "mediatek,mt8183-disp-pwm";
+ reg = <0 0x1100e000 0 0x1000>;
+ clock-names = "main", "mm";
+ clocks = <&topckgen CLK_TOP_DISP_PWM_SEL>, <&infracfg CLK_IFR_DISP_PWM>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ #pwm-cells = <2>;
+ };
+
i2c3: i2c@1100f000 {
compatible = "mediatek,mt8365-i2c", "mediatek,mt8168-i2c";
reg = <0 0x1100f000 0 0xa0>, <0 0x11000200 0 0x80>;
@@ -705,6 +728,15 @@
status = "disabled";
};
+ mipi_tx0: dsi-phy@11c00000 {
+ compatible = "mediatek,mt8365-mipi-tx", "mediatek,mt8183-mipi-tx";
+ reg = <0 0x11c00000 0 0x800>;
+ clock-output-names = "mipi_tx0_pll";
+ clocks = <&clk26m>;
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ };
+
u3phy: t-phy@11cc0000 {
compatible = "mediatek,mt8365-tphy", "mediatek,generic-tphy-v2";
#address-cells = <1>;
@@ -732,6 +764,26 @@
compatible = "mediatek,mt8365-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mmsys_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ mmsys_ext: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&rdma1_in>;
+ };
+ };
+ };
+
+ mutex: mutex@14001000 {
+ compatible = "mediatek,mt8365-disp-mutex";
+ reg = <0 0x14001000 0 0x1000>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
};
smi_common: smi@14002000 {
@@ -757,6 +809,290 @@
mediatek,larb-id = <0>;
};
+ ovl0: ovl@1400b000 {
+ compatible = "mediatek,mt8365-disp-ovl", "mediatek,mt8192-disp-ovl";
+ reg = <0 0x1400b000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_OVL0>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_LOW>;
+ iommus = <&iommu M4U_PORT_DISP_OVL0>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ ovl0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&mmsys_main>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ ovl0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&rdma0_in>;
+ };
+ };
+ };
+ };
+
+ rdma0: rdma@1400d000 {
+ compatible = "mediatek,mt8365-disp-rdma", "mediatek,mt8183-disp-rdma";
+ reg = <0 0x1400d000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_RDMA0>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_LOW>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA0>;
+ mediatek,rdma-fifo-size = <5120>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ rdma0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ rdma0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&color0_in>;
+ };
+ };
+ };
+ };
+
+ color0: color@1400f000 {
+ compatible = "mediatek,mt8365-disp-color", "mediatek,mt8173-disp-color";
+ reg = <0 0x1400f000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_COLOR0>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ color0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&rdma0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ color0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ccorr0_in>;
+ };
+ };
+ };
+ };
+
+ ccorr0: ccorr@14010000 {
+ compatible = "mediatek,mt8365-disp-ccorr", "mediatek,mt8183-disp-ccorr";
+ reg = <0 0x14010000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_CCORR0>;
+ interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ ccorr0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&color0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ ccorr0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&aal0_in>;
+ };
+ };
+ };
+ };
+
+ aal0: aal@14011000 {
+ compatible = "mediatek,mt8365-disp-aal", "mediatek,mt8183-disp-aal";
+ reg = <0 0x14011000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_AAL0>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ aal0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ccorr0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ aal0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&gamma0_in>;
+ };
+ };
+ };
+ };
+
+ gamma0: gamma@14012000 {
+ compatible = "mediatek,mt8365-disp-gamma", "mediatek,mt8183-disp-gamma";
+ reg = <0 0x14012000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_GAMMA0>;
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ gamma0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&aal0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ gamma0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&dither0_in>;
+ };
+ };
+ };
+ };
+
+ dither0: dither@14013000 {
+ compatible = "mediatek,mt8365-disp-dither", "mediatek,mt8183-disp-dither";
+ reg = <0 0x14013000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_DITHER0>;
+ interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ dither0_in: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&gamma0_out>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ dither0_out: endpoint@0 {
+ reg = <0>;
+ };
+ };
+ };
+ };
+
+ dsi0: dsi@14014000 {
+ compatible = "mediatek,mt8365-dsi", "mediatek,mt8183-dsi";
+ reg = <0 0x14014000 0 0x1000>;
+ clock-names = "engine", "digital", "hs";
+ clocks = <&mmsys CLK_MM_MM_DSI0>,
+ <&mmsys CLK_MM_DSI0_DIG_DSI>,
+ <&mipi_tx0>;
+ interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_LOW>;
+ phy-names = "dphy";
+ phys = <&mipi_tx0>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ };
+
+ rdma1: rdma@14016000 {
+ compatible = "mediatek,mt8365-disp-rdma", "mediatek,mt8183-disp-rdma";
+ reg = <0 0x14016000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_MM_DISP_RDMA1>;
+ interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
+ iommus = <&iommu M4U_PORT_DISP_RDMA1>;
+ mediatek,rdma-fifo-size = <2048>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ rdma1_in: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&mmsys_ext>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ rdma1_out: endpoint@1 {
+ reg = <1>;
+ };
+ };
+ };
+ };
+
+ dpi0: dpi@14018000 {
+ compatible = "mediatek,mt8365-dpi", "mediatek,mt8192-dpi";
+ reg = <0 0x14018000 0 0x1000>;
+ clocks = <&mmsys CLK_MM_DPI0_DPI0>,
+ <&mmsys CLK_MM_MM_DPI0>,
+ <&apmixedsys CLK_APMIXED_LVDSPLL>;
+ clock-names = "pixel", "engine", "pll";
+ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_LOW>;
+ power-domains = <&spm MT8365_POWER_DOMAIN_MM>;
+ status = "disabled";
+ };
+
camsys: syscon@15000000 {
compatible = "mediatek,mt8365-imgsys", "syscon";
reg = <0 0x15000000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8370-genio-510-evk.dts b/arch/arm64/boot/dts/mediatek/mt8370-genio-510-evk.dts
new file mode 100644
index 000000000000..71a8cbed1df6
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8370-genio-510-evk.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+ */
+/dts-v1/;
+
+#include "mt8370.dtsi"
+#include "mt8390-genio-common.dtsi"
+
+/ {
+ model = "MediaTek Genio-510 EVK";
+ compatible = "mediatek,mt8370-evk", "mediatek,mt8370", "mediatek,mt8188";
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0x1 0x00000000>;
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8370.dtsi b/arch/arm64/boot/dts/mediatek/mt8370.dtsi
new file mode 100644
index 000000000000..cf1a3759451f
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8370.dtsi
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+ */
+
+/dts-v1/;
+#include "mt8188.dtsi"
+
+/ {
+ compatible = "mediatek,mt8370";
+
+ cpus {
+ /delete-node/ cpu@400;
+ /delete-node/ cpu@500;
+
+ cpu-map {
+ cluster0 {
+ /delete-node/ core4;
+ /delete-node/ core5;
+ };
+ };
+ };
+};
+
+&cpu6 {
+ clock-frequency = <2200000000>;
+};
+
+&cpu7 {
+ clock-frequency = <2200000000>;
+};
+
+&cpu_little0_cooling_map0 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+};
+
+&cpu_little1_cooling_map0 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+};
+
+&cpu_little2_cooling_map0 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+};
+
+&cpu_little3_cooling_map0 {
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+};
+
+&ppi_cluster0 {
+ affinity = <&cpu0 &cpu1 &cpu2 &cpu3>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
index 04e4a2f73799..612336713a64 100644
--- a/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-700-evk.dts
@@ -8,1047 +8,16 @@
/dts-v1/;
#include "mt8188.dtsi"
-#include "mt6359.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h>
-#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
-#include <dt-bindings/spmi/spmi.h>
-#include <dt-bindings/usb/pd.h>
+#include "mt8390-genio-common.dtsi"
/ {
model = "MediaTek Genio-700 EVK";
compatible = "mediatek,mt8390-evk", "mediatek,mt8390",
"mediatek,mt8188";
- aliases {
- ethernet0 = &eth;
- i2c0 = &i2c0;
- i2c1 = &i2c1;
- i2c2 = &i2c2;
- i2c3 = &i2c3;
- i2c4 = &i2c4;
- i2c5 = &i2c5;
- i2c6 = &i2c6;
- mmc0 = &mmc0;
- mmc1 = &mmc1;
- serial0 = &uart0;
- };
-
- chosen {
- stdout-path = "serial0:921600n8";
- };
-
- firmware {
- optee {
- compatible = "linaro,optee-tz";
- method = "smc";
- };
- };
-
memory@40000000 {
device_type = "memory";
reg = <0 0x40000000 0x2 0x00000000>;
};
-
- reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- /*
- * 12 MiB reserved for OP-TEE (BL32)
- * +-----------------------+ 0x43e0_0000
- * | SHMEM 2MiB |
- * +-----------------------+ 0x43c0_0000
- * | | TA_RAM 8MiB |
- * + TZDRAM +--------------+ 0x4340_0000
- * | | TEE_RAM 2MiB |
- * +-----------------------+ 0x4320_0000
- */
- optee_reserved: optee@43200000 {
- no-map;
- reg = <0 0x43200000 0 0x00c00000>;
- };
-
- scp_mem: memory@50000000 {
- compatible = "shared-dma-pool";
- reg = <0 0x50000000 0 0x2900000>;
- no-map;
- };
-
- /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
- bl31_secmon_reserved: memory@54600000 {
- no-map;
- reg = <0 0x54600000 0x0 0x200000>;
- };
-
- apu_mem: memory@55000000 {
- compatible = "shared-dma-pool";
- reg = <0 0x55000000 0 0x1400000>; /* 20 MB */
- };
-
- vpu_mem: memory@57000000 {
- compatible = "shared-dma-pool";
- reg = <0 0x57000000 0 0x1400000>; /* 20 MB */
- };
-
- adsp_mem: memory@60000000 {
- compatible = "shared-dma-pool";
- reg = <0 0x60000000 0 0xf00000>;
- no-map;
- };
-
- afe_dma_mem: memory@60f00000 {
- compatible = "shared-dma-pool";
- reg = <0 0x60f00000 0 0x100000>;
- no-map;
- };
-
- adsp_dma_mem: memory@61000000 {
- compatible = "shared-dma-pool";
- reg = <0 0x61000000 0 0x100000>;
- no-map;
- };
- };
-
- common_fixed_5v: regulator-0 {
- compatible = "regulator-fixed";
- regulator-name = "vdd_5v";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 10 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-always-on;
- vin-supply = <&reg_vsys>;
- };
-
- edp_panel_fixed_3v3: regulator-1 {
- compatible = "regulator-fixed";
- regulator-name = "vedp_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- enable-active-high;
- gpio = <&pio 15 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&edp_panel_3v3_en_pins>;
- vin-supply = <&reg_vsys>;
- };
-
- gpio_fixed_3v3: regulator-2 {
- compatible = "regulator-fixed";
- regulator-name = "ext_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-always-on;
- vin-supply = <&reg_vsys>;
- };
-
- /* system wide 4.2V power rail from charger */
- reg_vsys: regulator-vsys {
- compatible = "regulator-fixed";
- regulator-name = "vsys";
- regulator-always-on;
- regulator-boot-on;
- };
-
- /* used by mmc2 */
- sdio_fixed_1v8: regulator-3 {
- compatible = "regulator-fixed";
- regulator-name = "vio18_conn";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- enable-active-high;
- regulator-always-on;
- };
-
- /* used by mmc2 */
- sdio_fixed_3v3: regulator-4 {
- compatible = "regulator-fixed";
- regulator-name = "wifi_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pio 74 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-always-on;
- vin-supply = <&reg_vsys>;
- };
-
- touch0_fixed_3v3: regulator-5 {
- compatible = "regulator-fixed";
- regulator-name = "vio33_tp1";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pio 119 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&reg_vsys>;
- };
-
- usb_hub_fixed_3v3: regulator-6 {
- compatible = "regulator-fixed";
- regulator-name = "vhub_3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- gpio = <&pio 112 GPIO_ACTIVE_HIGH>; /* HUB_3V3_EN */
- startup-delay-us = <10000>;
- enable-active-high;
- vin-supply = <&reg_vsys>;
- };
-
- usb_p0_vbus: regulator-7 {
- compatible = "regulator-fixed";
- regulator-name = "vbus_p0";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 84 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&reg_vsys>;
- };
-
- usb_p1_vbus: regulator-8 {
- compatible = "regulator-fixed";
- regulator-name = "vbus_p1";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&pio 87 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- vin-supply = <&reg_vsys>;
- };
-
- /* used by ssusb2 */
- usb_p2_vbus: regulator-9 {
- compatible = "regulator-fixed";
- regulator-name = "wifi_3v3";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- enable-active-high;
- };
-};
-
-&adsp {
- memory-region = <&adsp_dma_mem>, <&adsp_mem>;
- status = "okay";
-};
-
-&afe {
- memory-region = <&afe_dma_mem>;
- status = "okay";
-};
-
-&gpu {
- mali-supply = <&mt6359_vproc2_buck_reg>;
- status = "okay";
-};
-
-&i2c0 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c0_pins>;
- clock-frequency = <400000>;
- status = "okay";
-
- touchscreen@5d {
- compatible = "goodix,gt9271";
- reg = <0x5d>;
- interrupt-parent = <&pio>;
- interrupts-extended = <&pio 6 IRQ_TYPE_EDGE_RISING>;
- irq-gpios = <&pio 6 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&pio 5 GPIO_ACTIVE_HIGH>;
- AVDD28-supply = <&touch0_fixed_3v3>;
- VDDIO-supply = <&mt6359_vio18_ldo_reg>;
- pinctrl-names = "default";
- pinctrl-0 = <&touch_pins>;
- };
-};
-
-&i2c1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c1_pins>;
- clock-frequency = <400000>;
- status = "okay";
-};
-
-&i2c2 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c2_pins>;
- clock-frequency = <400000>;
- status = "okay";
-};
-
-&i2c3 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c3_pins>;
- clock-frequency = <400000>;
- status = "okay";
-};
-
-&i2c4 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c4_pins>;
- clock-frequency = <1000000>;
- status = "okay";
-};
-
-&i2c5 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c5_pins>;
- clock-frequency = <400000>;
- status = "okay";
-};
-
-&i2c6 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2c6_pins>;
- clock-frequency = <400000>;
- status = "okay";
-};
-
-&mfg0 {
- domain-supply = <&mt6359_vproc2_buck_reg>;
-};
-
-&mfg1 {
- domain-supply = <&mt6359_vsram_others_ldo_reg>;
-};
-
-&mmc0 {
- status = "okay";
- pinctrl-names = "default", "state_uhs";
- pinctrl-0 = <&mmc0_default_pins>;
- pinctrl-1 = <&mmc0_uhs_pins>;
- bus-width = <8>;
- max-frequency = <200000000>;
- cap-mmc-highspeed;
- mmc-hs200-1_8v;
- mmc-hs400-1_8v;
- supports-cqe;
- cap-mmc-hw-reset;
- no-sdio;
- no-sd;
- hs400-ds-delay = <0x1481b>;
- vmmc-supply = <&mt6359_vemc_1_ldo_reg>;
- vqmmc-supply = <&mt6359_vufs_ldo_reg>;
- non-removable;
-};
-
-&mmc1 {
- status = "okay";
- pinctrl-names = "default", "state_uhs";
- pinctrl-0 = <&mmc1_default_pins>;
- pinctrl-1 = <&mmc1_uhs_pins>;
- bus-width = <4>;
- max-frequency = <200000000>;
- cap-sd-highspeed;
- sd-uhs-sdr50;
- sd-uhs-sdr104;
- no-mmc;
- no-sdio;
- cd-gpios = <&pio 2 GPIO_ACTIVE_LOW>;
- vmmc-supply = <&mt6359_vpa_buck_reg>;
- vqmmc-supply = <&mt6359_vsim1_ldo_reg>;
-};
-
-&mt6359_vbbck_ldo_reg {
- regulator-always-on;
-};
-
-&mt6359_vcn18_ldo_reg {
- regulator-name = "vcn18_pmu";
- regulator-always-on;
-};
-
-&mt6359_vcn33_2_bt_ldo_reg {
- regulator-name = "vcn33_2_pmu";
- regulator-always-on;
-};
-
-&mt6359_vcore_buck_reg {
- regulator-name = "dvdd_proc_l";
- regulator-always-on;
-};
-
-&mt6359_vgpu11_buck_reg {
- regulator-name = "dvdd_core";
- regulator-always-on;
-};
-
-&mt6359_vpa_buck_reg {
- regulator-name = "vpa_pmu";
- regulator-max-microvolt = <3100000>;
-};
-
-&mt6359_vproc2_buck_reg {
- /* The name "vgpu" is required by mtk-regulator-coupler */
- regulator-name = "vgpu";
- regulator-min-microvolt = <550000>;
- regulator-max-microvolt = <800000>;
- regulator-coupled-with = <&mt6359_vsram_others_ldo_reg>;
- regulator-coupled-max-spread = <6250>;
-};
-
-&mt6359_vpu_buck_reg {
- regulator-name = "dvdd_adsp";
- regulator-always-on;
-};
-
-&mt6359_vrf12_ldo_reg {
- regulator-name = "va12_abb2_pmu";
- regulator-always-on;
-};
-
-&mt6359_vsim1_ldo_reg {
- regulator-name = "vsim1_pmu";
- regulator-enable-ramp-delay = <480>;
-};
-
-&mt6359_vsram_others_ldo_reg {
- /* The name "vsram_gpu" is required by mtk-regulator-coupler */
- regulator-name = "vsram_gpu";
- regulator-min-microvolt = <750000>;
- regulator-max-microvolt = <800000>;
- regulator-coupled-with = <&mt6359_vproc2_buck_reg>;
- regulator-coupled-max-spread = <6250>;
-};
-
-&mt6359_vufs_ldo_reg {
- regulator-name = "vufs18_pmu";
- regulator-always-on;
-};
-
-&mt6359codec {
- mediatek,mic-type-0 = <1>; /* ACC */
- mediatek,mic-type-1 = <3>; /* DCC */
-};
-
-&pcie {
- pinctrl-names = "default";
- pinctrl-0 = <&pcie_pins_default>;
- status = "okay";
-};
-
-&pciephy {
- status = "okay";
-};
-
-&pio {
- audio_default_pins: audio-default-pins {
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>,
- <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>,
- <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>,
- <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>,
- <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>,
- <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>,
- <PINMUX_GPIO107__FUNC_B0_I2SIN_MCK>,
- <PINMUX_GPIO108__FUNC_B0_I2SIN_BCK>,
- <PINMUX_GPIO109__FUNC_B0_I2SIN_WS>,
- <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>,
- <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>,
- <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>,
- <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>,
- <PINMUX_GPIO117__FUNC_O_I2SO2_D0>,
- <PINMUX_GPIO118__FUNC_O_I2SO2_D1>,
- <PINMUX_GPIO121__FUNC_B0_PCM_CLK>,
- <PINMUX_GPIO122__FUNC_B0_PCM_SYNC>,
- <PINMUX_GPIO124__FUNC_I0_PCM_DI>,
- <PINMUX_GPIO125__FUNC_O_DMIC1_CLK>,
- <PINMUX_GPIO126__FUNC_I0_DMIC1_DAT>,
- <PINMUX_GPIO128__FUNC_O_DMIC2_CLK>,
- <PINMUX_GPIO129__FUNC_I0_DMIC2_DAT>;
- };
- };
-
- dptx_pins: dptx-pins {
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>;
- bias-pull-up;
- };
- };
-
- edp_panel_3v3_en_pins: edp-panel-3v3-en-pins {
- pins1 {
- pinmux = <PINMUX_GPIO15__FUNC_B_GPIO15>;
- output-high;
- };
- };
-
- eth_default_pins: eth-default-pins {
- pins-cc {
- pinmux = <PINMUX_GPIO139__FUNC_B0_GBE_TXC>,
- <PINMUX_GPIO140__FUNC_I0_GBE_RXC>,
- <PINMUX_GPIO141__FUNC_I0_GBE_RXDV>,
- <PINMUX_GPIO142__FUNC_O_GBE_TXEN>;
- drive-strength = <8>;
- };
-
- pins-mdio {
- pinmux = <PINMUX_GPIO143__FUNC_O_GBE_MDC>,
- <PINMUX_GPIO144__FUNC_B1_GBE_MDIO>;
- drive-strength = <8>;
- input-enable;
- };
-
- pins-power {
- pinmux = <PINMUX_GPIO145__FUNC_B_GPIO145>,
- <PINMUX_GPIO146__FUNC_B_GPIO146>;
- output-high;
- };
-
- pins-rxd {
- pinmux = <PINMUX_GPIO135__FUNC_I0_GBE_RXD3>,
- <PINMUX_GPIO136__FUNC_I0_GBE_RXD2>,
- <PINMUX_GPIO137__FUNC_I0_GBE_RXD1>,
- <PINMUX_GPIO138__FUNC_I0_GBE_RXD0>;
- drive-strength = <8>;
- };
-
- pins-txd {
- pinmux = <PINMUX_GPIO131__FUNC_O_GBE_TXD3>,
- <PINMUX_GPIO132__FUNC_O_GBE_TXD2>,
- <PINMUX_GPIO133__FUNC_O_GBE_TXD1>,
- <PINMUX_GPIO134__FUNC_O_GBE_TXD0>;
- drive-strength = <8>;
- };
- };
-
- eth_sleep_pins: eth-sleep-pins {
- pins-cc {
- pinmux = <PINMUX_GPIO139__FUNC_B_GPIO139>,
- <PINMUX_GPIO140__FUNC_B_GPIO140>,
- <PINMUX_GPIO141__FUNC_B_GPIO141>,
- <PINMUX_GPIO142__FUNC_B_GPIO142>;
- };
-
- pins-mdio {
- pinmux = <PINMUX_GPIO143__FUNC_B_GPIO143>,
- <PINMUX_GPIO144__FUNC_B_GPIO144>;
- input-disable;
- bias-disable;
- };
-
- pins-rxd {
- pinmux = <PINMUX_GPIO135__FUNC_B_GPIO135>,
- <PINMUX_GPIO136__FUNC_B_GPIO136>,
- <PINMUX_GPIO137__FUNC_B_GPIO137>,
- <PINMUX_GPIO138__FUNC_B_GPIO138>;
- };
-
- pins-txd {
- pinmux = <PINMUX_GPIO131__FUNC_B_GPIO131>,
- <PINMUX_GPIO132__FUNC_B_GPIO132>,
- <PINMUX_GPIO133__FUNC_B_GPIO133>,
- <PINMUX_GPIO134__FUNC_B_GPIO134>;
- };
- };
-
- i2c0_pins: i2c0-pins {
- pins {
- pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>,
- <PINMUX_GPIO55__FUNC_B1_SCL0>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c1_pins: i2c1-pins {
- pins {
- pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>,
- <PINMUX_GPIO57__FUNC_B1_SCL1>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c2_pins: i2c2-pins {
- pins {
- pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>,
- <PINMUX_GPIO59__FUNC_B1_SCL2>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c3_pins: i2c3-pins {
- pins {
- pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>,
- <PINMUX_GPIO61__FUNC_B1_SCL3>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c4_pins: i2c4-pins {
- pins {
- pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>,
- <PINMUX_GPIO63__FUNC_B1_SCL4>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c5_pins: i2c5-pins {
- pins {
- pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>,
- <PINMUX_GPIO65__FUNC_B1_SCL5>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- i2c6_pins: i2c6-pins {
- pins {
- pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>,
- <PINMUX_GPIO67__FUNC_B1_SCL6>;
- bias-pull-up = <MTK_PULL_SET_RSEL_011>;
- drive-strength-microamp = <1000>;
- };
- };
-
- gpio_key_pins: gpio-key-pins {
- pins {
- pinmux = <PINMUX_GPIO42__FUNC_B1_KPCOL0>,
- <PINMUX_GPIO43__FUNC_B1_KPCOL1>,
- <PINMUX_GPIO44__FUNC_B1_KPROW0>;
- };
- };
-
- mmc0_default_pins: mmc0-default-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
- drive-strength = <6>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
- <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
- <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
- <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
- <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
- <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
- <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
- <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
- <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
-
- pins-rst {
- pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- mmc0_uhs_pins: mmc0-uhs-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
- drive-strength = <8>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
- <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
- <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
- <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
- <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
- <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
- <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
- <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
- <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
- input-enable;
- drive-strength = <8>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
-
- pins-ds {
- pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>;
- drive-strength = <8>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-rst {
- pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
- drive-strength = <8>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- mmc1_default_pins: mmc1-default-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
- drive-strength = <6>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
- <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
- <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
- <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
- <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
-
- pins-insert {
- pinmux = <PINMUX_GPIO2__FUNC_B_GPIO2>;
- bias-pull-up;
- };
- };
-
- mmc1_uhs_pins: mmc1-uhs-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
- drive-strength = <6>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
- <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
- <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
- <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
- <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- mmc2_default_pins: mmc2-default-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
- drive-strength = <4>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
- <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
- <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
- <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
- <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
-
- pins-pcm {
- pinmux = <PINMUX_GPIO123__FUNC_O_PCM_DO>;
- };
- };
-
- mmc2_uhs_pins: mmc2-uhs-pins {
- pins-clk {
- pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
- drive-strength = <4>;
- bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
- };
-
- pins-cmd-dat {
- pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
- <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
- <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
- <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
- <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- mmc2_eint_pins: mmc2-eint-pins {
- pins-dat1 {
- pinmux = <PINMUX_GPIO172__FUNC_B_GPIO172>;
- input-enable;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- mmc2_dat1_pins: mmc2-dat1-pins {
- pins-dat1 {
- pinmux = <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>;
- input-enable;
- drive-strength = <6>;
- bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
- };
- };
-
- panel_default_pins: panel-default-pins {
- pins-dcdc {
- pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>;
- output-low;
- };
-
- pins-en {
- pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>;
- output-low;
- };
-
- pins-rst {
- pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>;
- output-high;
- };
- };
-
- pcie_pins_default: pcie-default {
- mux {
- pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>,
- <PINMUX_GPIO48__FUNC_O_PERSTN>,
- <PINMUX_GPIO49__FUNC_B1_CLKREQN>;
- bias-pull-up;
- };
- };
-
- rt1715_int_pins: rt1715-int-pins {
- pins_cmd0_dat {
- pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>;
- bias-pull-up;
- input-enable;
- };
- };
-
- spi0_pins: spi0-pins {
- pins-spi {
- pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>,
- <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>,
- <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>,
- <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>;
- bias-disable;
- };
- };
-
- spi1_pins: spi1-pins {
- pins-spi {
- pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>,
- <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>,
- <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>,
- <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>;
- bias-disable;
- };
- };
-
- spi2_pins: spi2-pins {
- pins-spi {
- pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>,
- <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>,
- <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>,
- <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>;
- bias-disable;
- };
- };
-
- touch_pins: touch-pins {
- pins-irq {
- pinmux = <PINMUX_GPIO6__FUNC_B_GPIO6>;
- input-enable;
- bias-disable;
- };
-
- pins-reset {
- pinmux = <PINMUX_GPIO5__FUNC_B_GPIO5>;
- output-high;
- };
- };
-
- uart0_pins: uart0-pins {
- pins {
- pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>,
- <PINMUX_GPIO32__FUNC_I1_URXD0>;
- bias-pull-up;
- };
- };
-
- uart1_pins: uart1-pins {
- pins {
- pinmux = <PINMUX_GPIO33__FUNC_O_UTXD1>,
- <PINMUX_GPIO34__FUNC_I1_URXD1>;
- bias-pull-up;
- };
- };
-
- uart2_pins: uart2-pins {
- pins {
- pinmux = <PINMUX_GPIO35__FUNC_O_UTXD2>,
- <PINMUX_GPIO36__FUNC_I1_URXD2>;
- bias-pull-up;
- };
- };
-
- usb_default_pins: usb-default-pins {
- pins-iddig {
- pinmux = <PINMUX_GPIO83__FUNC_B_GPIO83>;
- input-enable;
- bias-pull-up;
- };
-
- pins-valid {
- pinmux = <PINMUX_GPIO85__FUNC_I0_VBUSVALID>;
- input-enable;
- };
-
- pins-vbus {
- pinmux = <PINMUX_GPIO84__FUNC_O_USB_DRVVBUS>;
- output-high;
- };
-
- };
-
- usb1_default_pins: usb1-default-pins {
- pins-valid {
- pinmux = <PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P>;
- input-enable;
- };
-
- pins-usb-hub-3v3-en {
- pinmux = <PINMUX_GPIO112__FUNC_B_GPIO112>;
- output-high;
- };
- };
-
- wifi_pwrseq_pins: wifi-pwrseq-pins {
- pins-wifi-enable {
- pinmux = <PINMUX_GPIO127__FUNC_B_GPIO127>;
- output-low;
- };
- };
-};
-
-&eth {
- phy-mode ="rgmii-id";
- phy-handle = <&ethernet_phy0>;
- pinctrl-names = "default", "sleep";
- pinctrl-0 = <&eth_default_pins>;
- pinctrl-1 = <&eth_sleep_pins>;
- mediatek,mac-wol;
- snps,reset-gpio = <&pio 147 GPIO_ACTIVE_HIGH>;
- snps,reset-delays-us = <0 10000 10000>;
- status = "okay";
-};
-
-&eth_mdio {
- ethernet_phy0: ethernet-phy@1 {
- compatible = "ethernet-phy-id001c.c916";
- reg = <0x1>;
- };
-};
-
-&pmic {
- interrupt-parent = <&pio>;
- interrupts = <222 IRQ_TYPE_LEVEL_HIGH>;
-
- mt6359keys: keys {
- compatible = "mediatek,mt6359-keys";
- mediatek,long-press-mode = <1>;
- power-off-time-sec = <0>;
-
- power-key {
- linux,keycodes = <KEY_POWER>;
- wakeup-source;
- };
- };
-};
-
-&scp {
- memory-region = <&scp_mem>;
- status = "okay";
-};
-
-&sound {
- compatible = "mediatek,mt8390-mt6359-evk", "mediatek,mt8188-mt6359-evb";
- model = "mt8390-evk";
- pinctrl-names = "default";
- pinctrl-0 = <&audio_default_pins>;
- audio-routing =
- "Headphone", "Headphone L",
- "Headphone", "Headphone R";
- mediatek,adsp = <&adsp>;
- status = "okay";
-
- dai-link-0 {
- link-name = "DL_SRC_BE";
-
- codec {
- sound-dai = <&pmic 0>;
- };
- };
-};
-
-&spi2 {
- pinctrl-0 = <&spi2_pins>;
- pinctrl-names = "default";
- mediatek,pad-select = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "okay";
};
-&uart0 {
- pinctrl-0 = <&uart0_pins>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&uart1 {
- pinctrl-0 = <&uart1_pins>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&uart2 {
- pinctrl-0 = <&uart2_pins>;
- pinctrl-names = "default";
- status = "okay";
-};
-
-&u3phy0 {
- status = "okay";
-};
-
-&u3phy1 {
- status = "okay";
-};
-
-&u3phy2 {
- status = "okay";
-};
-
-&xhci0 {
- status = "okay";
- vusb33-supply = <&mt6359_vusb_ldo_reg>;
-};
-
-&xhci1 {
- status = "okay";
- vusb33-supply = <&mt6359_vusb_ldo_reg>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- hub_2_0: hub@1 {
- compatible = "usb451,8025";
- reg = <1>;
- peer-hub = <&hub_3_0>;
- reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
- vdd-supply = <&usb_hub_fixed_3v3>;
- };
-
- hub_3_0: hub@2 {
- compatible = "usb451,8027";
- reg = <2>;
- peer-hub = <&hub_2_0>;
- reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
- vdd-supply = <&usb_hub_fixed_3v3>;
- };
-};
-
-&xhci2 {
- status = "okay";
- vusb33-supply = <&mt6359_vusb_ldo_reg>;
- vbus-supply = <&sdio_fixed_3v3>; /* wifi_3v3 */
-};
diff --git a/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi
new file mode 100644
index 000000000000..60139e6dffd8
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8390-genio-common.dtsi
@@ -0,0 +1,1223 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ * Author: Chris Chen <chris-qj.chen@mediatek.com>
+ * Pablo Sun <pablo.sun@mediatek.com>
+ * Macpaul Lin <macpaul.lin@mediatek.com>
+ *
+ * Copyright (C) 2025 Collabora Ltd.
+ * Louis-Alexis Eyraud <louisalexis.eyraud@collabora.com>
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include "mt6359.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h>
+#include <dt-bindings/regulator/mediatek,mt6360-regulator.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/usb/pd.h>
+
+/ {
+ aliases {
+ ethernet0 = &eth;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ mmc0 = &mmc0;
+ mmc1 = &mmc1;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+
+ dmic_codec: dmic-codec {
+ #sound-dai-cells = <0>;
+ compatible = "dmic-codec";
+ num-channels = <2>;
+ wakeup-delay-ms = <30>;
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * 12 MiB reserved for OP-TEE (BL32)
+ * +-----------------------+ 0x43e0_0000
+ * | SHMEM 2MiB |
+ * +-----------------------+ 0x43c0_0000
+ * | | TA_RAM 8MiB |
+ * + TZDRAM +--------------+ 0x4340_0000
+ * | | TEE_RAM 2MiB |
+ * +-----------------------+ 0x4320_0000
+ */
+ optee_reserved: optee@43200000 {
+ no-map;
+ reg = <0 0x43200000 0 0x00c00000>;
+ };
+
+ scp_mem: memory@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+
+ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+ bl31_secmon_reserved: memory@54600000 {
+ no-map;
+ reg = <0 0x54600000 0x0 0x200000>;
+ };
+
+ apu_mem: memory@55000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x55000000 0 0x1400000>; /* 20 MB */
+ };
+
+ vpu_mem: memory@57000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x57000000 0 0x1400000>; /* 20 MB */
+ };
+
+ adsp_mem: memory@60000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60000000 0 0xf00000>;
+ no-map;
+ };
+
+ afe_dma_mem: memory@60f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x60f00000 0 0x100000>;
+ no-map;
+ };
+
+ adsp_dma_mem: memory@61000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x61000000 0 0x100000>;
+ no-map;
+ };
+ };
+
+ common_fixed_5v: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_vsys>;
+ };
+
+ edp_panel_fixed_3v3: regulator-1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vedp_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ gpio = <&pio 15 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&edp_panel_3v3_en_pins>;
+ vin-supply = <&reg_vsys>;
+ };
+
+ gpio_fixed_3v3: regulator-2 {
+ compatible = "regulator-fixed";
+ regulator-name = "ext_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_vsys>;
+ };
+
+ /* system wide 4.2V power rail from charger */
+ reg_vsys: regulator-vsys {
+ compatible = "regulator-fixed";
+ regulator-name = "vsys";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ /* used by mmc2 */
+ sdio_fixed_1v8: regulator-3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio18_conn";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ /* used by mmc2 */
+ sdio_fixed_3v3: regulator-4 {
+ compatible = "regulator-fixed";
+ regulator-name = "wifi_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 74 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ vin-supply = <&reg_vsys>;
+ };
+
+ touch0_fixed_3v3: regulator-5 {
+ compatible = "regulator-fixed";
+ regulator-name = "vio33_tp1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 119 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_vsys>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_vreg_pins>;
+ };
+
+ usb_hub_fixed_3v3: regulator-6 {
+ compatible = "regulator-fixed";
+ regulator-name = "vhub_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pio 112 GPIO_ACTIVE_HIGH>; /* HUB_3V3_EN */
+ startup-delay-us = <10000>;
+ enable-active-high;
+ vin-supply = <&reg_vsys>;
+ };
+
+ usb_p0_vbus: regulator-7 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus_p0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 84 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_vsys>;
+ };
+
+ usb_p1_vbus: regulator-8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus_p1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&pio 87 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&reg_vsys>;
+ };
+
+ /* used by ssusb2 */
+ usb_p2_vbus: regulator-9 {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus_p2";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+ };
+};
+
+&adsp {
+ memory-region = <&adsp_dma_mem>, <&adsp_mem>;
+ status = "okay";
+};
+
+&afe {
+ memory-region = <&afe_dma_mem>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&mt6359_vproc2_buck_reg>;
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ touchscreen@5d {
+ compatible = "goodix,gt9271";
+ reg = <0x5d>;
+ interrupt-parent = <&pio>;
+ interrupts-extended = <&pio 6 IRQ_TYPE_EDGE_RISING>;
+ irq-gpios = <&pio 6 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 5 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&touch0_fixed_3v3>;
+ VDDIO-supply = <&mt6359_vio18_ldo_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_pins>;
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+
+ typec-mux@48 {
+ compatible = "ite,it5205";
+ reg = <0x48>;
+
+ mode-switch;
+ orientation-switch;
+
+ vcc-supply = <&mt6359_vcn33_1_bt_ldo_reg>;
+
+ port {
+ it5205_sbu_mux: endpoint {
+ remote-endpoint = <&typec_sbu_out>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ clock-frequency = <1000000>;
+ status = "okay";
+
+ rt1715@4e {
+ compatible = "richtek,rt1715";
+ reg = <0x4e>;
+ interrupts-extended = <&pio 12 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tcpci_int_pins>;
+ vbus-supply = <&usb_p1_vbus>;
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ op-sink-microwatt = <10000000>;
+ power-role = "dual";
+ try-power-role = "sink";
+ pd-revision = /bits/ 8 <0x03 0x00 0x01 0x08>;
+
+ sink-pdos = <PDO_FIXED(5000, 2000,
+ PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_DATA_SWAP)>;
+ source-pdos = <PDO_FIXED(5000, 2000,
+ PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_DATA_SWAP)>;
+
+ altmodes {
+ displayport {
+ svid = /bits/ 16 <0xff01>;
+ vdo = <0x001c1c47>;
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ typec_con_hs: endpoint {
+ remote-endpoint = <&mtu3_hs1_role_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ typec_con_ss: endpoint {
+ remote-endpoint = <&xhci_ss_ep>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ typec_sbu_out: endpoint {
+ remote-endpoint = <&it5205_sbu_mux>;
+ };
+
+ };
+ };
+ };
+ };
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+ clock-frequency = <400000>;
+ status = "okay";
+};
+
+&mfg0 {
+ domain-supply = <&mt6359_vproc2_buck_reg>;
+};
+
+&mfg1 {
+ domain-supply = <&mt6359_vsram_others_ldo_reg>;
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_default_pins>;
+ pinctrl-1 = <&mmc0_uhs_pins>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ supports-cqe;
+ cap-mmc-hw-reset;
+ no-sdio;
+ no-sd;
+ hs400-ds-delay = <0x1481b>;
+ vmmc-supply = <&mt6359_vemc_1_ldo_reg>;
+ vqmmc-supply = <&mt6359_vufs_ldo_reg>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_default_pins>;
+ pinctrl-1 = <&mmc1_uhs_pins>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ no-mmc;
+ no-sdio;
+ cd-gpios = <&pio 2 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&mt6359_vpa_buck_reg>;
+ vqmmc-supply = <&mt6359_vsim1_ldo_reg>;
+};
+
+&mt6359_vbbck_ldo_reg {
+ regulator-always-on;
+};
+
+&mt6359_vcn18_ldo_reg {
+ regulator-name = "vcn18_pmu";
+ regulator-always-on;
+};
+
+&mt6359_vcn33_2_bt_ldo_reg {
+ regulator-name = "vcn33_2_pmu";
+ regulator-always-on;
+};
+
+&mt6359_vcore_buck_reg {
+ regulator-name = "dvdd_proc_l";
+ regulator-always-on;
+};
+
+&mt6359_vgpu11_buck_reg {
+ regulator-name = "dvdd_core";
+ regulator-always-on;
+};
+
+&mt6359_vpa_buck_reg {
+ regulator-name = "vpa_pmu";
+ regulator-max-microvolt = <3100000>;
+};
+
+&mt6359_vproc2_buck_reg {
+ /* The name "vgpu" is required by mtk-regulator-coupler */
+ regulator-name = "vgpu";
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <800000>;
+ regulator-coupled-with = <&mt6359_vsram_others_ldo_reg>;
+ regulator-coupled-max-spread = <6250>;
+};
+
+&mt6359_vpu_buck_reg {
+ regulator-name = "dvdd_adsp";
+ regulator-always-on;
+};
+
+&mt6359_vrf12_ldo_reg {
+ regulator-name = "va12_abb2_pmu";
+ regulator-always-on;
+};
+
+&mt6359_vsim1_ldo_reg {
+ regulator-name = "vsim1_pmu";
+ regulator-enable-ramp-delay = <480>;
+};
+
+&mt6359_vsram_others_ldo_reg {
+ /* The name "vsram_gpu" is required by mtk-regulator-coupler */
+ regulator-name = "vsram_gpu";
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <800000>;
+ regulator-coupled-with = <&mt6359_vproc2_buck_reg>;
+ regulator-coupled-max-spread = <6250>;
+};
+
+&mt6359_vufs_ldo_reg {
+ regulator-name = "vufs18_pmu";
+ regulator-always-on;
+};
+
+&mt6359codec {
+ mediatek,mic-type-0 = <1>; /* ACC */
+ mediatek,mic-type-1 = <3>; /* DCC */
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins_default>;
+ status = "okay";
+};
+
+&pciephy {
+ status = "okay";
+};
+
+&pio {
+ audio_default_pins: audio-default-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI>,
+ <PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI>,
+ <PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0>,
+ <PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1>,
+ <PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0>,
+ <PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1>,
+ <PINMUX_GPIO107__FUNC_B0_I2SIN_MCK>,
+ <PINMUX_GPIO108__FUNC_B0_I2SIN_BCK>,
+ <PINMUX_GPIO109__FUNC_B0_I2SIN_WS>,
+ <PINMUX_GPIO110__FUNC_I0_I2SIN_D0>,
+ <PINMUX_GPIO114__FUNC_O_I2SO2_MCK>,
+ <PINMUX_GPIO115__FUNC_B0_I2SO2_BCK>,
+ <PINMUX_GPIO116__FUNC_B0_I2SO2_WS>,
+ <PINMUX_GPIO117__FUNC_O_I2SO2_D0>,
+ <PINMUX_GPIO118__FUNC_O_I2SO2_D1>,
+ <PINMUX_GPIO121__FUNC_B0_PCM_CLK>,
+ <PINMUX_GPIO122__FUNC_B0_PCM_SYNC>,
+ <PINMUX_GPIO124__FUNC_I0_PCM_DI>,
+ <PINMUX_GPIO125__FUNC_O_DMIC1_CLK>,
+ <PINMUX_GPIO126__FUNC_I0_DMIC1_DAT>,
+ <PINMUX_GPIO128__FUNC_O_DMIC2_CLK>,
+ <PINMUX_GPIO129__FUNC_I0_DMIC2_DAT>;
+ };
+ };
+
+ dptx_pins: dptx-pins {
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO46__FUNC_I0_DP_TX_HPD>;
+ bias-pull-up;
+ };
+ };
+
+ edp_panel_3v3_en_pins: edp-panel-3v3-en-pins {
+ pins1 {
+ pinmux = <PINMUX_GPIO15__FUNC_B_GPIO15>;
+ output-high;
+ };
+ };
+
+ eth_default_pins: eth-default-pins {
+ pins-cc {
+ pinmux = <PINMUX_GPIO139__FUNC_B0_GBE_TXC>,
+ <PINMUX_GPIO140__FUNC_I0_GBE_RXC>,
+ <PINMUX_GPIO141__FUNC_I0_GBE_RXDV>,
+ <PINMUX_GPIO142__FUNC_O_GBE_TXEN>;
+ drive-strength = <8>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO143__FUNC_O_GBE_MDC>,
+ <PINMUX_GPIO144__FUNC_B1_GBE_MDIO>;
+ drive-strength = <8>;
+ input-enable;
+ };
+
+ pins-power {
+ pinmux = <PINMUX_GPIO145__FUNC_B_GPIO145>,
+ <PINMUX_GPIO146__FUNC_B_GPIO146>;
+ output-high;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO135__FUNC_I0_GBE_RXD3>,
+ <PINMUX_GPIO136__FUNC_I0_GBE_RXD2>,
+ <PINMUX_GPIO137__FUNC_I0_GBE_RXD1>,
+ <PINMUX_GPIO138__FUNC_I0_GBE_RXD0>;
+ drive-strength = <8>;
+ };
+
+ pins-txd {
+ pinmux = <PINMUX_GPIO131__FUNC_O_GBE_TXD3>,
+ <PINMUX_GPIO132__FUNC_O_GBE_TXD2>,
+ <PINMUX_GPIO133__FUNC_O_GBE_TXD1>,
+ <PINMUX_GPIO134__FUNC_O_GBE_TXD0>;
+ drive-strength = <8>;
+ };
+ };
+
+ eth_sleep_pins: eth-sleep-pins {
+ pins-cc {
+ pinmux = <PINMUX_GPIO139__FUNC_B_GPIO139>,
+ <PINMUX_GPIO140__FUNC_B_GPIO140>,
+ <PINMUX_GPIO141__FUNC_B_GPIO141>,
+ <PINMUX_GPIO142__FUNC_B_GPIO142>;
+ };
+
+ pins-mdio {
+ pinmux = <PINMUX_GPIO143__FUNC_B_GPIO143>,
+ <PINMUX_GPIO144__FUNC_B_GPIO144>;
+ input-disable;
+ bias-disable;
+ };
+
+ pins-rxd {
+ pinmux = <PINMUX_GPIO135__FUNC_B_GPIO135>,
+ <PINMUX_GPIO136__FUNC_B_GPIO136>,
+ <PINMUX_GPIO137__FUNC_B_GPIO137>,
+ <PINMUX_GPIO138__FUNC_B_GPIO138>;
+ };
+
+ pins-txd {
+ pinmux = <PINMUX_GPIO131__FUNC_B_GPIO131>,
+ <PINMUX_GPIO132__FUNC_B_GPIO132>,
+ <PINMUX_GPIO133__FUNC_B_GPIO133>,
+ <PINMUX_GPIO134__FUNC_B_GPIO134>;
+ };
+ };
+
+ i2c0_pins: i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO56__FUNC_B1_SDA0>,
+ <PINMUX_GPIO55__FUNC_B1_SCL0>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c1_pins: i2c1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO58__FUNC_B1_SDA1>,
+ <PINMUX_GPIO57__FUNC_B1_SCL1>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c2_pins: i2c2-pins {
+ pins {
+ pinmux = <PINMUX_GPIO60__FUNC_B1_SDA2>,
+ <PINMUX_GPIO59__FUNC_B1_SCL2>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c3_pins: i2c3-pins {
+ pins {
+ pinmux = <PINMUX_GPIO62__FUNC_B1_SDA3>,
+ <PINMUX_GPIO61__FUNC_B1_SCL3>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c4_pins: i2c4-pins {
+ pins {
+ pinmux = <PINMUX_GPIO64__FUNC_B1_SDA4>,
+ <PINMUX_GPIO63__FUNC_B1_SCL4>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c5_pins: i2c5-pins {
+ pins {
+ pinmux = <PINMUX_GPIO66__FUNC_B1_SDA5>,
+ <PINMUX_GPIO65__FUNC_B1_SCL5>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ i2c6_pins: i2c6-pins {
+ pins {
+ pinmux = <PINMUX_GPIO68__FUNC_B1_SDA6>,
+ <PINMUX_GPIO67__FUNC_B1_SCL6>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_011>;
+ drive-strength-microamp = <1000>;
+ };
+ };
+
+ gpio_key_pins: gpio-key-pins {
+ pins {
+ pinmux = <PINMUX_GPIO42__FUNC_B1_KPCOL0>,
+ <PINMUX_GPIO43__FUNC_B1_KPCOL1>,
+ <PINMUX_GPIO44__FUNC_B1_KPROW0>;
+ };
+ };
+
+ mmc0_default_pins: mmc0-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc0_uhs_pins: mmc0-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO157__FUNC_B1_MSDC0_CLK>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0>,
+ <PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1>,
+ <PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2>,
+ <PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3>,
+ <PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4>,
+ <PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5>,
+ <PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6>,
+ <PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7>,
+ <PINMUX_GPIO156__FUNC_B1_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-ds {
+ pinmux = <PINMUX_GPIO162__FUNC_B0_MSDC0_DSL>;
+ drive-strength = <8>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO155__FUNC_O_MSDC0_RSTB>;
+ drive-strength = <8>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc1_default_pins: mmc1-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
+ <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
+ <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
+ <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
+ <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-insert {
+ pinmux = <PINMUX_GPIO2__FUNC_B_GPIO2>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_uhs_pins: mmc1-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO164__FUNC_B1_MSDC1_CLK>;
+ drive-strength = <6>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO163__FUNC_B1_MSDC1_CMD>,
+ <PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0>,
+ <PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1>,
+ <PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2>,
+ <PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_default_pins: mmc2-default-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
+ drive-strength = <4>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
+ <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
+ <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
+ <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
+ <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins-pcm {
+ pinmux = <PINMUX_GPIO123__FUNC_O_PCM_DO>;
+ };
+ };
+
+ mmc2_uhs_pins: mmc2-uhs-pins {
+ pins-clk {
+ pinmux = <PINMUX_GPIO170__FUNC_B1_MSDC2_CLK>;
+ drive-strength = <4>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins-cmd-dat {
+ pinmux = <PINMUX_GPIO169__FUNC_B1_MSDC2_CMD>,
+ <PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0>,
+ <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>,
+ <PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2>,
+ <PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_eint_pins: mmc2-eint-pins {
+ pins-dat1 {
+ pinmux = <PINMUX_GPIO172__FUNC_B_GPIO172>;
+ input-enable;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ mmc2_dat1_pins: mmc2-dat1-pins {
+ pins-dat1 {
+ pinmux = <PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1>;
+ input-enable;
+ drive-strength = <6>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+ };
+
+ panel_default_pins: panel-default-pins {
+ pins-dcdc {
+ pinmux = <PINMUX_GPIO45__FUNC_B_GPIO45>;
+ output-low;
+ };
+
+ pins-en {
+ pinmux = <PINMUX_GPIO111__FUNC_B_GPIO111>;
+ output-low;
+ };
+
+ pins-rst {
+ pinmux = <PINMUX_GPIO25__FUNC_B_GPIO25>;
+ output-high;
+ };
+ };
+
+ pcie_pins_default: pcie-default {
+ mux {
+ pinmux = <PINMUX_GPIO47__FUNC_I1_WAKEN>,
+ <PINMUX_GPIO48__FUNC_O_PERSTN>,
+ <PINMUX_GPIO49__FUNC_B1_CLKREQN>;
+ bias-pull-up;
+ };
+ };
+
+ rt1715_int_pins: rt1715-int-pins {
+ pins_cmd0_dat {
+ pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ spi0_pins: spi0-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO69__FUNC_O_SPIM0_CSB>,
+ <PINMUX_GPIO70__FUNC_O_SPIM0_CLK>,
+ <PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI>,
+ <PINMUX_GPIO72__FUNC_B0_SPIM0_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi1_pins: spi1-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO75__FUNC_O_SPIM1_CSB>,
+ <PINMUX_GPIO76__FUNC_O_SPIM1_CLK>,
+ <PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI>,
+ <PINMUX_GPIO78__FUNC_B0_SPIM1_MISO>;
+ bias-disable;
+ };
+ };
+
+ spi2_pins: spi2-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO79__FUNC_O_SPIM2_CSB>,
+ <PINMUX_GPIO80__FUNC_O_SPIM2_CLK>,
+ <PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI>,
+ <PINMUX_GPIO82__FUNC_B0_SPIM2_MISO>;
+ bias-disable;
+ };
+ };
+
+ touch_vreg_pins: touch-avdd-pins {
+ pins-power {
+ pinmux = <PINMUX_GPIO120__FUNC_B_GPIO120>;
+ output-high;
+ };
+ };
+
+ touch_pins: touch-pins {
+ pins-irq {
+ pinmux = <PINMUX_GPIO6__FUNC_B_GPIO6>;
+ input-enable;
+ bias-disable;
+ };
+
+ pins-reset {
+ pinmux = <PINMUX_GPIO5__FUNC_B_GPIO5>;
+ output-high;
+ };
+ };
+
+ tcpci_int_pins: tcpci-int-pins {
+ pins-int-n {
+ pinmux = <PINMUX_GPIO12__FUNC_B_GPIO12>;
+ bias-pull-up;
+ input-enable;
+ };
+ };
+
+ uart0_pins: uart0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO31__FUNC_O_UTXD0>,
+ <PINMUX_GPIO32__FUNC_I1_URXD0>;
+ bias-pull-up;
+ };
+ };
+
+ uart1_pins: uart1-pins {
+ pins {
+ pinmux = <PINMUX_GPIO33__FUNC_O_UTXD1>,
+ <PINMUX_GPIO34__FUNC_I1_URXD1>;
+ bias-pull-up;
+ };
+ };
+
+ uart2_pins: uart2-pins {
+ pins {
+ pinmux = <PINMUX_GPIO35__FUNC_O_UTXD2>,
+ <PINMUX_GPIO36__FUNC_I1_URXD2>;
+ bias-pull-up;
+ };
+ };
+
+ usb_default_pins: usb-default-pins {
+ pins-iddig {
+ pinmux = <PINMUX_GPIO83__FUNC_B_GPIO83>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins-valid {
+ pinmux = <PINMUX_GPIO85__FUNC_I0_VBUSVALID>;
+ input-enable;
+ };
+
+ pins-vbus {
+ pinmux = <PINMUX_GPIO84__FUNC_O_USB_DRVVBUS>;
+ output-high;
+ };
+
+ };
+
+ usb1_default_pins: usb1-default-pins {
+ pins-valid {
+ pinmux = <PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P>;
+ input-enable;
+ };
+
+ pins-usb-hub-3v3-en {
+ pinmux = <PINMUX_GPIO112__FUNC_B_GPIO112>;
+ output-high;
+ };
+ };
+
+ usb2_default_pins: usb2-default-pins {
+ pins-iddig {
+ pinmux = <PINMUX_GPIO89__FUNC_B_GPIO89>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ wifi_pwrseq_pins: wifi-pwrseq-pins {
+ pins-wifi-enable {
+ pinmux = <PINMUX_GPIO127__FUNC_B_GPIO127>;
+ output-low;
+ };
+ };
+};
+
+&eth {
+ phy-mode ="rgmii-id";
+ phy-handle = <&ethernet_phy0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&eth_default_pins>;
+ pinctrl-1 = <&eth_sleep_pins>;
+ mediatek,mac-wol;
+ snps,reset-gpio = <&pio 147 GPIO_ACTIVE_HIGH>;
+ snps,reset-delays-us = <0 10000 10000>;
+ status = "okay";
+};
+
+&eth_mdio {
+ ethernet_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-id001c.c916";
+ reg = <0x1>;
+ };
+};
+
+&pmic {
+ interrupt-parent = <&pio>;
+ interrupts = <222 IRQ_TYPE_LEVEL_HIGH>;
+
+ mt6359keys: keys {
+ compatible = "mediatek,mt6359-keys";
+ mediatek,long-press-mode = <1>;
+ power-off-time-sec = <0>;
+
+ power-key {
+ linux,keycodes = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+};
+
+&scp {
+ memory-region = <&scp_mem>;
+ status = "okay";
+};
+
+&sound {
+ compatible = "mediatek,mt8390-mt6359-evk", "mediatek,mt8188-mt6359-evb";
+ model = "mt8390-evk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&audio_default_pins>;
+ audio-routing =
+ "Headphone", "Headphone L",
+ "Headphone", "Headphone R",
+ "DMIC_INPUT", "AP DMIC",
+ "AP DMIC", "AUDGLB",
+ "AP DMIC", "MIC_BIAS_0",
+ "AP DMIC", "MIC_BIAS_2";
+ mediatek,adsp = <&adsp>;
+ status = "okay";
+
+ dai-link-0 {
+ link-name = "DL_SRC_BE";
+
+ codec {
+ sound-dai = <&pmic 0>;
+ };
+ };
+
+ dai-link-1 {
+ link-name = "DMIC_BE";
+
+ codec {
+ sound-dai = <&dmic_codec>;
+ };
+ };
+};
+
+&spi2 {
+ pinctrl-0 = <&spi2_pins>;
+ pinctrl-names = "default";
+ mediatek,pad-select = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-0 = <&uart0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-0 = <&uart1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&u3phy0 {
+ status = "okay";
+};
+
+&u3phy1 {
+ status = "okay";
+};
+
+&u3phy2 {
+ status = "okay";
+};
+
+&ssusb0 {
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ usb-role-switch;
+ wakeup-source;
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ pinctrl-0 = <&usb_default_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 83 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_p0_vbus>;
+ };
+};
+
+&xhci0 {
+ status = "okay";
+};
+
+&ssusb1 {
+ dr_mode = "otg";
+ usb-role-switch;
+ wakeup-source;
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ pinctrl-0 = <&usb1_default_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+
+ port {
+ mtu3_hs1_role_sw: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+};
+
+&xhci1 {
+ status = "okay";
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hub_2_0: hub@1 {
+ compatible = "usb451,8025";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&usb_hub_fixed_3v3>;
+ };
+
+ hub_3_0: hub@2 {
+ compatible = "usb451,8027";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&pio 7 GPIO_ACTIVE_HIGH>;
+ vdd-supply = <&usb_hub_fixed_3v3>;
+ };
+
+ port {
+ xhci_ss_ep: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+};
+
+&ssusb2 {
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ usb-role-switch;
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ wakeup-source;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_default_pins>;
+ status = "okay";
+
+ connector {
+ compatible = "gpio-usb-b-connector", "usb-b-connector";
+ type = "micro";
+ id-gpios = <&pio 89 GPIO_ACTIVE_HIGH>;
+ vbus-supply = <&usb_p2_vbus>;
+ };
+};
+
+&xhci2 {
+ vusb33-supply = <&mt6359_vusb_ldo_reg>;
+ vbus-supply = <&sdio_fixed_3v3>; /* wifi_3v3 */
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
index 5950194c9ccb..f02c32def593 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-genio-1200-evk.dts
@@ -229,6 +229,21 @@
pinctrl-0 = <&i2c2_pins>;
pinctrl-names = "default";
status = "okay";
+
+ typec-mux@48 {
+ compatible = "ite,it5205";
+ reg = <0x48>;
+ vcc-supply = <&mt6359_vibr_ldo_reg>;
+ mode-switch;
+ orientation-switch;
+ status = "okay";
+
+ port {
+ it5205_sbu_ep: endpoint {
+ remote-endpoint = <&mt6360_ssusb_sbu_ep>;
+ };
+ };
+ };
};
&i2c6 {
@@ -335,6 +350,63 @@
regulator-always-on;
};
};
+
+ tcpc {
+ compatible = "mediatek,mt6360-tcpc";
+ interrupts-extended = <&pio 17 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "PD_IRQB";
+
+ connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ data-role = "dual";
+ op-sink-microwatt = <10000000>;
+ power-role = "dual";
+ try-power-role = "sink";
+
+ source-pdos = <PDO_FIXED(5000, 1000,
+ PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_DATA_SWAP)>;
+ sink-pdos = <PDO_FIXED(5000, 2000,
+ PDO_FIXED_DUAL_ROLE |
+ PDO_FIXED_DATA_SWAP)>;
+
+ pd-revision = /bits/ 8 <0x03 0x01 0x01 0x06>;
+
+ altmodes {
+ displayport {
+ svid = /bits/ 16 <0xff01>;
+ vdo = <0x00001c46>;
+ };
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ typec_con_hs: endpoint {
+ remote-endpoint = <&mtu3_hs0_role_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ typec_con_ss: endpoint {
+ remote-endpoint = <&mtu3_ss0_role_sw>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ mt6360_ssusb_sbu_ep: endpoint {
+ remote-endpoint = <&it5205_sbu_ep>;
+ };
+ };
+ };
+ };
+ };
};
};
@@ -770,6 +842,13 @@
};
};
+ u3_p0_vbus: u3-p0-vbus-default-pins {
+ pins-vbus {
+ pinmux = <PINMUX_GPIO63__FUNC_VBUSVALID>;
+ input-enable;
+ };
+ };
+
uart0_pins: uart0-pins {
pins {
pinmux = <PINMUX_GPIO98__FUNC_UTXD0>,
@@ -898,8 +977,31 @@
};
&ssusb0 {
+ dr_mode = "otg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&u3_p0_vbus>;
+ usb-role-switch;
vusb33-supply = <&mt6359_vusb_ldo_reg>;
status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mtu3_hs0_role_sw: endpoint {
+ remote-endpoint = <&typec_con_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ mtu3_ss0_role_sw: endpoint {
+ remote-endpoint = <&typec_con_ss>;
+ };
+ };
+ };
};
&ssusb2 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l-8-hd-panel.dtso b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l-8-hd-panel.dtso
new file mode 100644
index 000000000000..0389c9cb8581
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l-8-hd-panel.dtso
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Radxa Display 8 HD touchscreen module
+ * Copyright (C) 2025 Collabora Ltd.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&backlight {
+ status = "okay";
+};
+
+&disp_pwm0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_default_pins>;
+ status = "okay";
+};
+
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "radxa,display-8hd-ad002", "jadard,jd9365da-h3";
+ reg = <0>;
+ backlight = <&backlight>;
+ vdd-supply = <&mt6359_vcn33_2_bt_ldo_reg>;
+ vccio-supply = <&mt6360_ldo2>;
+ reset-gpios = <&pio 108 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&panel_default_pins>;
+
+ port {
+ dsi_panel_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+};
+
+&dsi0_out {
+ remote-endpoint = <&dsi_panel_in>;
+};
+
+&i2c4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ interrupts-extended = <&pio 132 IRQ_TYPE_EDGE_RISING>;
+ irq-gpios = <&pio 132 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 133 GPIO_ACTIVE_HIGH>;
+ VDDIO-supply = <&mt6359_vcn33_2_bt_ldo_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_pins>;
+ };
+};
+
+&mipi_tx0 {
+ status = "okay";
+};
+
+&ovl0_in {
+ remote-endpoint = <&vdosys0_ep_main>;
+};
+
+&vdosys0 {
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdosys0_ep_main: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&ovl0_in>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
index 41dc34837b02..1c922e98441a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8395-radxa-nio-12l.dts
@@ -48,6 +48,18 @@
reg = <0 0x40000000 0x1 0x0>;
};
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 1023>;
+ default-brightness-level = <576>;
+ enable-gpios = <&pio 107 GPIO_ACTIVE_HIGH>;
+ num-interpolated-steps = <1023>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dsi0_backlight_pins>;
+ pwms = <&disp_pwm0 0 500000>;
+ status = "disabled";
+ };
+
wifi_vreg: regulator-wifi-3v3-en {
compatible = "regulator-fixed";
regulator-name = "wifi_3v3_en";
@@ -172,6 +184,32 @@
cpu-supply = <&mt6315_6_vbuck1>;
};
+&dither0_out {
+ remote-endpoint = <&dsi0_in>;
+};
+
+&dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ remote-endpoint = <&dither0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint { };
+ };
+ };
+};
+
&eth {
phy-mode = "rgmii-rxid";
phy-handle = <&rgmii_phy>;
@@ -476,6 +514,13 @@
&pio {
mediatek,rsel-resistance-in-si-unit;
+ dsi0_backlight_pins: dsi0-backlight-pins {
+ pins-backlight-en {
+ pinmux = <PINMUX_GPIO107__FUNC_GPIO107>;
+ output-high;
+ };
+ };
+
eth_default_pins: eth-default-pins {
pins-cc {
pinmux = <PINMUX_GPIO85__FUNC_GBE_TXC>,
@@ -673,6 +718,13 @@
};
};
+ panel_default_pins: panel-pins {
+ pins-rst {
+ pinmux = <PINMUX_GPIO108__FUNC_GPIO108>;
+ bias-pull-up;
+ };
+ };
+
pcie0_default_pins: pcie0-default-pins {
pins-bus {
pinmux = <PINMUX_GPIO19__FUNC_WAKEN>,
@@ -691,6 +743,12 @@
};
};
+ pwm0_default_pins: pwm0-pins {
+ pins-disp-pwm {
+ pinmux = <PINMUX_GPIO97__FUNC_DISP_PWM0>;
+ };
+ };
+
spi1_pins: spi1-default-pins {
pins-bus {
pinmux = <PINMUX_GPIO136__FUNC_SPIM1_CSB>,
@@ -711,6 +769,19 @@
};
};
+ touch_pins: touch-pins {
+ pins-touch-int {
+ pinmux = <PINMUX_GPIO132__FUNC_GPIO132>;
+ input-enable;
+ bias-disable;
+ };
+
+ pins-touch-rst {
+ pinmux = <PINMUX_GPIO133__FUNC_GPIO133>;
+ output-high;
+ };
+ };
+
uart0_pins: uart0-pins {
pins-bus {
pinmux = <PINMUX_GPIO98__FUNC_UTXD0>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 1c53ccc5e3cb..9b9d1d15b0c7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -49,6 +49,19 @@
};
};
+ i2c@7000c000 {
+ status = "okay";
+
+ tmp451: temperature-sensor@4c {
+ compatible = "ti,tmp451";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(X, 4) IRQ_TYPE_LEVEL_LOW>;
+ vcc-supply = <&vdd_1v8>;
+ #thermal-sensor-cells = <1>;
+ };
+ };
+
i2c@7000c400 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index 63b94a04308e..83ed6ac2a8d8 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1375,6 +1375,15 @@
#gpio-cells = <2>;
gpio-controller;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(L, 1) IRQ_TYPE_EDGE_FALLING>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_1v8>;
};
exp2: gpio@77 {
@@ -1383,6 +1392,15 @@
#gpio-cells = <2>;
gpio-controller;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_EDGE_FALLING>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio_1v8>;
};
};
@@ -1686,7 +1704,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
- gpio = <&exp1 14 GPIO_ACTIVE_HIGH>;
+ gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
enable-active-high;
vin-supply = <&vdd_1v8>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
index c56824d7f4d8..0ecdd7243b2e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -266,7 +266,6 @@
regulator-max-microvolt = <1170000>;
regulator-enable-ramp-delay = <146>;
regulator-ramp-delay = <27500>;
- regulator-ramp-delay-scale = <300>;
regulator-always-on;
regulator-boot-on;
@@ -281,7 +280,6 @@
regulator-max-microvolt = <1150000>;
regulator-enable-ramp-delay = <176>;
regulator-ramp-delay = <27500>;
- regulator-ramp-delay-scale = <300>;
regulator-always-on;
regulator-boot-on;
@@ -296,7 +294,6 @@
regulator-max-microvolt = <1350000>;
regulator-enable-ramp-delay = <176>;
regulator-ramp-delay = <27500>;
- regulator-ramp-delay-scale = <350>;
regulator-always-on;
regulator-boot-on;
@@ -311,7 +308,6 @@
regulator-max-microvolt = <1800000>;
regulator-enable-ramp-delay = <242>;
regulator-ramp-delay = <27500>;
- regulator-ramp-delay-scale = <360>;
regulator-always-on;
regulator-boot-on;
@@ -326,7 +322,6 @@
regulator-max-microvolt = <1200000>;
regulator-enable-ramp-delay = <26>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
regulator-always-on;
regulator-boot-on;
@@ -341,7 +336,6 @@
regulator-max-microvolt = <1050000>;
regulator-enable-ramp-delay = <22>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
maxim,active-fps-power-up-slot = <0>;
@@ -354,7 +348,6 @@
regulator-max-microvolt = <3300000>;
regulator-enable-ramp-delay = <62>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
maxim,active-fps-power-up-slot = <0>;
@@ -371,7 +364,6 @@
regulator-max-microvolt = <1100000>;
regulator-enable-ramp-delay = <22>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
regulator-disable-active-discharge;
regulator-always-on;
regulator-boot-on;
@@ -395,7 +387,6 @@
regulator-max-microvolt = <1050000>;
regulator-enable-ramp-delay = <24>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
maxim,active-fps-power-up-slot = <3>;
@@ -408,7 +399,6 @@
regulator-max-microvolt = <1050000>;
regulator-enable-ramp-delay = <22>;
regulator-ramp-delay = <100000>;
- regulator-ramp-delay-scale = <200>;
maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
maxim,active-fps-power-up-slot = <6>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 942e3a0f81ed..b6c84d195c0e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -874,6 +874,16 @@
pins = "sdmmc3";
power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>;
};
+
+ gpio_1v8: gpio-1v8 {
+ pins = "gpio";
+ power-source = <TEGRA_IO_PAD_VOLTAGE_1V8>;
+ };
+
+ gpio_3v3: gpio-3v3 {
+ pins = "gpio";
+ power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>;
+ };
};
powergates {
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
index 36e888053746..9ce55b4d2de8 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3740-0002+p3701-0008.dts
@@ -302,6 +302,16 @@
};
pcie@141a0000 {
+ reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
+ 0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */
+ 0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
+ 0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
+ 0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB) */
+
+ ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */
+ 0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000 /* non-prefetchable memory (128MB) */
+ 0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>; /* prefetchable memory (25088MB) */
+
status = "okay";
vddio-pex-ctl-supply = <&vdd_1v8_ls>;
phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
index 19340d13f789..41821354bbda 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3768-0000+p3767.dtsi
@@ -227,13 +227,6 @@
wakeup-event-action = <EV_ACT_ASSERTED>;
wakeup-source;
};
-
- key-suspend {
- label = "Suspend";
- gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
- linux,input-type = <EV_KEY>;
- linux,code = <KEY_SLEEP>;
- };
};
fan: pwm-fan {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts b/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts
index 09b95f89ee58..1667c7157057 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-huawei-gaokun3.dts
@@ -28,6 +28,7 @@
aliases {
i2c4 = &i2c4;
+ i2c15 = &i2c15;
serial1 = &uart2;
};
@@ -216,6 +217,40 @@
};
};
+ usb0-sbu-mux {
+ compatible = "pericom,pi3usb102", "gpio-sbu-mux";
+
+ select-gpios = <&tlmm 164 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb0_sbu_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+
+ port {
+ usb0_sbu_mux: endpoint {
+ remote-endpoint = <&ucsi0_sbu>;
+ };
+ };
+ };
+
+ usb1-sbu-mux {
+ compatible = "pericom,pi3usb102", "gpio-sbu-mux";
+
+ select-gpios = <&tlmm 47 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-0 = <&usb1_sbu_default>;
+ pinctrl-names = "default";
+
+ orientation-switch;
+
+ port {
+ usb1_sbu_mux: endpoint {
+ remote-endpoint = <&ucsi1_sbu>;
+ };
+ };
+ };
+
wcn6855-pmu {
compatible = "qcom,wcn6855-pmu";
@@ -584,6 +619,97 @@
};
+&i2c15 {
+ clock-frequency = <400000>;
+
+ pinctrl-0 = <&i2c15_default>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ embedded-controller@38 {
+ compatible = "huawei,gaokun3-ec";
+ reg = <0x38>;
+
+ interrupts-extended = <&tlmm 107 IRQ_TYPE_LEVEL_LOW>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ connector@0 {
+ compatible = "usb-c-connector";
+ reg = <0>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ucsi0_hs_in: endpoint {
+ remote-endpoint = <&usb_0_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ucsi0_ss_in: endpoint {
+ remote-endpoint = <&usb_0_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ ucsi0_sbu: endpoint {
+ remote-endpoint = <&usb0_sbu_mux>;
+ };
+ };
+ };
+ };
+
+ connector@1 {
+ compatible = "usb-c-connector";
+ reg = <1>;
+ power-role = "dual";
+ data-role = "dual";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ ucsi1_hs_in: endpoint {
+ remote-endpoint = <&usb_1_dwc3_hs>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ucsi1_ss_in: endpoint {
+ remote-endpoint = <&usb_1_qmpphy_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ ucsi1_sbu: endpoint {
+ remote-endpoint = <&usb1_sbu_mux>;
+ };
+ };
+ };
+ };
+ };
+};
+
&mdss0 {
status = "okay";
};
@@ -1004,6 +1130,10 @@
dr_mode = "host";
};
+&usb_0_dwc3_hs {
+ remote-endpoint = <&ucsi0_hs_in>;
+};
+
&usb_0_hsphy {
vdda-pll-supply = <&vreg_l9d>;
vdda18-supply = <&vreg_l1c>;
@@ -1025,6 +1155,10 @@
remote-endpoint = <&mdss0_dp0_out>;
};
+&usb_0_qmpphy_out {
+ remote-endpoint = <&ucsi0_ss_in>;
+};
+
&usb_1 {
status = "okay";
};
@@ -1033,6 +1167,10 @@
dr_mode = "host";
};
+&usb_1_dwc3_hs {
+ remote-endpoint = <&ucsi1_hs_in>;
+};
+
&usb_1_hsphy {
vdda-pll-supply = <&vreg_l4b>;
vdda18-supply = <&vreg_l1c>;
@@ -1054,6 +1192,10 @@
remote-endpoint = <&mdss0_dp1_out>;
};
+&usb_1_qmpphy_out {
+ remote-endpoint = <&ucsi1_ss_in>;
+};
+
&usb_2 {
status = "okay";
};
@@ -1177,6 +1319,13 @@
bias-disable;
};
+ i2c15_default: i2c15-default-state {
+ pins = "gpio36", "gpio37";
+ function = "qup15";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+
mode_pin_active: mode-pin-state {
pins = "gpio26";
function = "gpio";
@@ -1301,6 +1450,20 @@
};
};
+ usb0_sbu_default: usb0-sbu-state {
+ pins = "gpio164";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
+ usb1_sbu_default: usb1-sbu-state {
+ pins = "gpio47";
+ function = "gpio";
+ drive-strength = <16>;
+ bias-disable;
+ };
+
wcd_default: wcd-default-state {
reset-pins {
pins = "gpio106";
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index e0ce804bb1a3..d0314cdf0b92 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -5163,7 +5163,6 @@
<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
- dma-coherent;
};
anoc_1_tbu: tbu@150c5000 {
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index 928635f2e76b..d25e665ee4bf 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -132,6 +132,7 @@ dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044c2-smarc-cru-csi-ov5645.dtbo
r9a07g044c2-smarc-cru-csi-ov5645-dtbs := r9a07g044c2-smarc.dtb r9a07g044c2-smarc-cru-csi-ov5645.dtbo
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044c2-smarc-cru-csi-ov5645.dtb
+dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-remi-pi.dtb
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-smarc.dtb
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-smarc-cru-csi-ov5645.dtbo
r9a07g044l2-smarc-cru-csi-ov5645-dtbs := r9a07g044l2-smarc.dtb r9a07g044l2-smarc-cru-csi-ov5645.dtbo
@@ -143,12 +144,16 @@ r9a07g054l2-smarc-cru-csi-ov5645-dtbs := r9a07g054l2-smarc.dtb r9a07g054l2-smarc
dtb-$(CONFIG_ARCH_R9A07G054) += r9a07g054l2-smarc-cru-csi-ov5645.dtb
dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc.dtb
+dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc-pmod1-type-3a.dtbo
+r9a08g045s33-smarc-pmod1-type-3a-dtbs := r9a08g045s33-smarc.dtb r9a08g045s33-smarc-pmod1-type-3a.dtbo
+dtb-$(CONFIG_ARCH_R9A08G045) += r9a08g045s33-smarc-pmod1-type-3a.dtb
dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb
dtb-$(CONFIG_ARCH_R9A09G047) += r9a09g047e57-smarc.dtb
dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h44-rzv2h-evk.dtb
+dtb-$(CONFIG_ARCH_R9A09G057) += r9a09g057h48-kakip.dtb
dtb-$(CONFIG_ARCH_RCAR_GEN3) += draak-ebisu-panel-aa104xd12.dtbo
dtb-$(CONFIG_ARCH_RCAR_GEN3) += salvator-panel-aa104xd12.dtbo
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
index 43f88c199b78..1489bc8d2f4e 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-som.dtsi
@@ -282,6 +282,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/condor-common.dtsi b/arch/arm64/boot/dts/renesas/condor-common.dtsi
index 375a56b20f26..a10584150571 100644
--- a/arch/arm64/boot/dts/renesas/condor-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/condor-common.dtsi
@@ -544,6 +544,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>, <&scif_clk_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/draak.dtsi b/arch/arm64/boot/dts/renesas/draak.dtsi
index 05712cd96d28..380b857fd273 100644
--- a/arch/arm64/boot/dts/renesas/draak.dtsi
+++ b/arch/arm64/boot/dts/renesas/draak.dtsi
@@ -695,6 +695,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/ebisu.dtsi b/arch/arm64/boot/dts/renesas/ebisu.dtsi
index ab8283656660..4f38b01ae18d 100644
--- a/arch/arm64/boot/dts/renesas/ebisu.dtsi
+++ b/arch/arm64/boot/dts/renesas/ebisu.dtsi
@@ -786,6 +786,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 659ae1fed2fa..4e78139d52f6 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -289,6 +289,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index f065ee90649a..c8b87aed92a3 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -215,6 +215,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -222,6 +223,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -262,6 +264,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -400,6 +404,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a774a1";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -480,11 +485,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a774a1-rst";
reg = <0 0xe6160000 0 0x018c>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2785,6 +2792,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 117cb6950f91..f2fc2a2035a1 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -108,6 +108,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -115,6 +116,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -146,6 +148,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -284,6 +288,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a774b1";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -364,11 +369,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a774b1-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2661,6 +2668,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
index b78dbd807d15..57a281fc4977 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
@@ -378,6 +378,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 7655d5e3a034..530ffd29cf13 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -47,16 +47,20 @@
cluster1_opp: opp-table-1 {
compatible = "operating-points-v2";
opp-shared;
+
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
opp-suspend;
};
@@ -103,6 +107,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -134,6 +139,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -257,6 +264,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a774c0";
reg = <0 0xe6060000 0 0x508>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -337,11 +345,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a774c0-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1953,6 +1963,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index f845ca604de0..e4dbda8c34d9 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -277,6 +277,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -284,6 +285,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -326,6 +328,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -464,6 +468,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a774e1";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -544,11 +549,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a774e1-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2917,6 +2924,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 96f3b5fe7e92..6ee9cdeb5a3a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -292,6 +292,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -299,6 +300,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -347,6 +349,7 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -485,6 +488,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7795";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -565,11 +569,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7795-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -3398,6 +3404,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index ee80f52dc7cf..a323ac47ca70 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -264,6 +264,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -271,6 +272,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -311,6 +313,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -449,6 +453,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a7796";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -529,11 +534,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a7796-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2996,6 +3003,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index 3b9066043a71..49f6d31c5903 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -264,6 +264,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -271,6 +272,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -311,6 +313,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -449,6 +453,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77961";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -529,11 +534,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77961-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2817,6 +2824,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 557bdf8fab17..136a22ca50b7 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -143,6 +143,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -150,6 +151,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -182,6 +184,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -320,6 +324,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77965";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -400,11 +405,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77965-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2828,6 +2835,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso b/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso
index 9450d8ac94cb..0c005660d8dd 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso
+++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle-function-expansion.dtso
@@ -70,7 +70,7 @@
gpio-controller;
#gpio-cells = <2>;
- vin0_adv7612_en {
+ vin0-adv7612-en-hog {
gpio-hog;
gpios = <3 GPIO_ACTIVE_LOW>;
output-high;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
index 32f07aa27316..8b594e9e9dc1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77970-eagle.dts
@@ -409,6 +409,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
index 118e77f4477e..445f5dd7c983 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts
@@ -296,6 +296,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 38145fd6acf0..01744496805c 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -60,6 +60,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -67,6 +68,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pmu_a53 {
@@ -91,6 +93,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -200,6 +203,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77970";
reg = <0 0xe6060000 0 0x504>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -280,11 +284,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77970-rst";
reg = <0 0xe6160000 0 0x200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1196,6 +1202,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
index b409a8d1737e..c2692d6fd00d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77980-v3hsk.dts
@@ -282,6 +282,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>, <&scif_clk_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 55a6c622f873..f7e506ad7a21 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -80,6 +80,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -87,6 +88,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -120,6 +122,7 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
#address-cells = <2>;
#size-cells = <2>;
@@ -229,6 +232,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77980";
reg = <0 0xe6060000 0 0x50c>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -309,11 +313,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77980-rst";
reg = <0 0xe6160000 0 0x200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1579,6 +1585,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 233af3081e84..6b8742045836 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -47,16 +47,20 @@
cluster1_opp: opp-table-1 {
compatible = "operating-points-v2";
opp-shared;
+
opp-800000000 {
opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
};
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
};
opp-1200000000 {
opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1030000>;
clock-latency-ns = <300000>;
opp-suspend;
};
@@ -118,6 +122,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
/* External PCIe clock - can be overridden by the board */
@@ -149,6 +154,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -272,6 +279,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77990";
reg = <0 0xe6060000 0 0x508>;
+ bootph-all;
};
i2c_dvfs: i2c@e60b0000 {
@@ -368,11 +376,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77990-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2117,6 +2127,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
index 5f0828a4675b..b66cd7c90d53 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
@@ -65,6 +65,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pmu_a53 {
@@ -86,6 +87,8 @@
soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -209,6 +212,7 @@
pfc: pinctrl@e6060000 {
compatible = "renesas,pfc-r8a77995";
reg = <0 0xe6060000 0 0x508>;
+ bootph-all;
};
cmt0: timer@e60f0000 {
@@ -289,11 +293,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a77995-rst";
reg = <0 0xe6160000 0 0x0200>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -1448,6 +1454,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
index e8c8fca48b69..0916fd57d1f1 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
@@ -348,6 +348,7 @@
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
+ bootph-all;
uart-has-rtscts;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index fe6d97859e4a..f1613bfd1632 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -47,6 +47,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -54,6 +55,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pmu_a76 {
@@ -71,6 +73,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -93,6 +97,7 @@
<0 0xe6060000 0 0x16c>, <0 0xe6060800 0 0x16c>,
<0 0xe6068000 0 0x16c>, <0 0xe6068800 0 0x16c>,
<0 0xe6069000 0 0x16c>, <0 0xe6069800 0 0x16c>;
+ bootph-all;
};
gpio0: gpio@e6058180 {
@@ -331,11 +336,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a779a0-rst";
reg = <0 0xe6160000 0 0x4000>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2338,6 +2345,42 @@
iommus = <&ipmmu_vi1 7>;
};
+ fcpvx0: fcp@fedb0000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1100>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP01>;
+ resets = <&cpg 1100>;
+ iommus = <&ipmmu_vi1 24>;
+ };
+
+ fcpvx1: fcp@fedb8000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb8000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1101>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP01>;
+ resets = <&cpg 1101>;
+ iommus = <&ipmmu_vi1 25>;
+ };
+
+ fcpvx2: fcp@fedc0000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedc0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1102>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP23>;
+ resets = <&cpg 1102>;
+ iommus = <&ipmmu_vi1 26>;
+ };
+
+ fcpvx3: fcp@fedc8000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedc8000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1103>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP23>;
+ resets = <&cpg 1103>;
+ iommus = <&ipmmu_vi1 27>;
+ };
+
vspd0: vsp@fea20000 {
compatible = "renesas,vsp2";
reg = <0 0xfea20000 0 0x5000>;
@@ -2360,6 +2403,50 @@
renesas,fcp = <&fcpvd1>;
};
+ vspx0: vsp@fedd0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd0000 0 0x8000>;
+ interrupts = <GIC_SPI 600 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1028>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP01>;
+ resets = <&cpg 1028>;
+
+ renesas,fcp = <&fcpvx0>;
+ };
+
+ vspx1: vsp@fedd8000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd8000 0 0x8000>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1029>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP01>;
+ resets = <&cpg 1029>;
+
+ renesas,fcp = <&fcpvx1>;
+ };
+
+ vspx2: vsp@fede0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfede0000 0 0x8000>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1030>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP23>;
+ resets = <&cpg 1030>;
+
+ renesas,fcp = <&fcpvx2>;
+ };
+
+ vspx3: vsp@fede8000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfede8000 0 0x8000>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1031>;
+ power-domains = <&sysc R8A779A0_PD_A3ISP23>;
+ resets = <&cpg 1031>;
+
+ renesas,fcp = <&fcpvx3>;
+ };
+
csi40: csi2@feaa0000 {
compatible = "renesas,r8a779a0-csi2";
reg = <0 0xfeaa0000 0 0x10000>;
@@ -2893,6 +2980,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
index e03baefb6a98..1781bb79a619 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f0-spider-cpu.dtsi
@@ -101,6 +101,7 @@
&hscif0 {
pinctrl-0 = <&hscif0_pins>;
pinctrl-names = "default";
+ bootph-all;
uart-has-rtscts;
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
index 5d38669ed1ec..ad2b0398d354 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f0-spider-ethernet.dtsi
@@ -5,6 +5,14 @@
* Copyright (C) 2021 Renesas Electronics Corp.
*/
+/ {
+ aliases {
+ ethernet0 = &rswitch_port0;
+ ethernet1 = &rswitch_port1;
+ ethernet2 = &rswitch_port2;
+ };
+};
+
&eth_serdes {
status = "okay";
};
@@ -42,61 +50,61 @@
pinctrl-0 = <&tsn0_pins>, <&tsn1_pins>, <&tsn2_pins>;
pinctrl-names = "default";
status = "okay";
+};
+
+&rswitch_port0 {
+ reg = <0>;
+ phy-handle = <&u101>;
+ phy-mode = "sgmii";
+ phys = <&eth_serdes 0>;
+ status = "okay";
- ethernet-ports {
+ mdio {
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
- phy-handle = <&u101>;
- phy-mode = "sgmii";
- phys = <&eth_serdes 0>;
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- u101: ethernet-phy@1 {
- reg = <1>;
- compatible = "ethernet-phy-ieee802.3-c45";
- interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
- };
- };
- };
- port@1 {
+ u101: ethernet-phy@1 {
reg = <1>;
- phy-handle = <&u201>;
- phy-mode = "sgmii";
- phys = <&eth_serdes 1>;
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- u201: ethernet-phy@2 {
- reg = <2>;
- compatible = "ethernet-phy-ieee802.3-c45";
- interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
- };
- };
+ compatible = "ethernet-phy-ieee802.3-c45";
+ interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
};
- port@2 {
+ };
+};
+
+&rswitch_port1 {
+ reg = <1>;
+ phy-handle = <&u201>;
+ phy-mode = "sgmii";
+ phys = <&eth_serdes 1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ u201: ethernet-phy@2 {
reg = <2>;
- phy-handle = <&u301>;
- phy-mode = "sgmii";
- phys = <&eth_serdes 2>;
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- u301: ethernet-phy@3 {
- reg = <3>;
- compatible = "ethernet-phy-ieee802.3-c45";
- interrupts-extended = <&gpio3 9 IRQ_TYPE_LEVEL_LOW>;
- };
- };
+ compatible = "ethernet-phy-ieee802.3-c45";
+ interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&rswitch_port2 {
+ reg = <2>;
+ phy-handle = <&u301>;
+ phy-mode = "sgmii";
+ phys = <&eth_serdes 2>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ u301: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ interrupts-extended = <&gpio3 9 IRQ_TYPE_LEVEL_LOW>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
index 054498e54730..b496495c59a6 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779f0.dtsi
@@ -253,6 +253,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -260,6 +261,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pcie0_clkref: pcie0-clkref {
@@ -296,6 +298,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -315,6 +319,7 @@
compatible = "renesas,pfc-r8a779f0";
reg = <0 0xe6050000 0 0x16c>, <0 0xe6050800 0 0x16c>,
<0 0xe6051000 0 0x16c>, <0 0xe6051800 0 0x16c>;
+ bootph-all;
};
gpio0: gpio@e6050180 {
@@ -463,11 +468,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a779f0-rst";
reg = <0 0xe6160000 0 0x4000>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -974,17 +981,20 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
+ rswitch_port0: port@0 {
reg = <0>;
phys = <&eth_serdes 0>;
+ status = "disabled";
};
- port@1 {
+ rswitch_port1: port@1 {
reg = <1>;
phys = <&eth_serdes 1>;
+ status = "disabled";
};
- port@2 {
+ rswitch_port2: port@2 {
reg = <2>;
phys = <&eth_serdes 2>;
+ status = "disabled";
};
};
};
@@ -1280,6 +1290,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
index 5d71d52f9c65..67b18f2bffbd 100644
--- a/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779f4-s4sk.dts
@@ -22,7 +22,8 @@
i2c5 = &i2c5;
serial0 = &hscif0;
serial1 = &hscif1;
- ethernet0 = &rswitch;
+ ethernet0 = &rswitch_port0;
+ ethernet1 = &rswitch_port1;
};
chosen {
@@ -67,6 +68,7 @@
&hscif0 {
pinctrl-0 = <&hscif0_pins>;
pinctrl-names = "default";
+ bootph-all;
uart-has-rtscts;
status = "okay";
@@ -179,49 +181,42 @@
pinctrl-0 = <&tsn0_pins>, <&tsn1_pins>;
pinctrl-names = "default";
status = "okay";
+};
+
+&rswitch_port0 {
+ reg = <0>;
+ phy-handle = <&ic99>;
+ phy-mode = "sgmii";
+ phys = <&eth_serdes 0>;
+ status = "okay";
- ethernet-ports {
+ mdio {
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
- phy-handle = <&ic99>;
- phy-mode = "sgmii";
- phys = <&eth_serdes 0>;
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ic99: ethernet-phy@1 {
- reg = <1>;
- compatible = "ethernet-phy-ieee802.3-c45";
- interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
- };
- };
- };
-
- port@1 {
+ ic99: ethernet-phy@1 {
reg = <1>;
- phy-handle = <&ic102>;
- phy-mode = "sgmii";
- phys = <&eth_serdes 1>;
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ic102: ethernet-phy@2 {
- reg = <2>;
- compatible = "ethernet-phy-ieee802.3-c45";
- interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
- };
- };
+ compatible = "ethernet-phy-ieee802.3-c45";
+ interrupts-extended = <&gpio3 10 IRQ_TYPE_LEVEL_LOW>;
};
+ };
+};
+
+&rswitch_port1 {
+ reg = <1>;
+ phy-handle = <&ic102>;
+ phy-mode = "sgmii";
+ phys = <&eth_serdes 1>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
- port@2 {
- status = "disabled";
+ ic102: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "ethernet-phy-ieee802.3-c45";
+ interrupts-extended = <&gpio3 11 IRQ_TYPE_LEVEL_LOW>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 104f740d20d3..1760720b7128 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -166,6 +166,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr {
@@ -173,6 +174,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pcie0_clkref: pcie0-clkref {
@@ -215,6 +217,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -237,6 +241,7 @@
<0 0xe6060000 0 0x16c>, <0 0xe6060800 0 0x16c>,
<0 0xe6061000 0 0x16c>, <0 0xe6061800 0 0x16c>,
<0 0xe6068000 0 0x16c>;
+ bootph-all;
};
gpio0: gpio@e6050180 {
@@ -452,11 +457,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a779g0-rst";
reg = <0 0xe6160000 0 0x4000>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -2171,6 +2178,24 @@
iommus = <&ipmmu_vi1 7>;
};
+ fcpvx0: fcp@fedb0000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1100>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP0>;
+ resets = <&cpg 1100>;
+ iommus = <&ipmmu_vi1 24>;
+ };
+
+ fcpvx1: fcp@fedb8000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb8000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1101>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP1>;
+ resets = <&cpg 1101>;
+ iommus = <&ipmmu_vi1 25>;
+ };
+
vspd0: vsp@fea20000 {
compatible = "renesas,vsp2";
reg = <0 0xfea20000 0 0x7000>;
@@ -2193,6 +2218,28 @@
renesas,fcp = <&fcpvd1>;
};
+ vspx0: vsp@fedd0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd0000 0 0x8000>;
+ interrupts = <GIC_SPI 556 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1028>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP0>;
+ resets = <&cpg 1028>;
+
+ renesas,fcp = <&fcpvx0>;
+ };
+
+ vspx1: vsp@fedd8000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd8000 0 0x8000>;
+ interrupts = <GIC_SPI 557 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1029>;
+ power-domains = <&sysc R8A779G0_PD_A3ISP1>;
+ resets = <&cpg 1029>;
+
+ renesas,fcp = <&fcpvx1>;
+ };
+
du: display@feb00000 {
compatible = "renesas,du-r8a779g0";
reg = <0 0xfeb00000 0 0x40000>;
@@ -2453,49 +2500,10 @@
};
};
- fcpvx0: fcp@fedb0000 {
- compatible = "renesas,fcpv";
- reg = <0 0xfedb0000 0 0x200>;
- clocks = <&cpg CPG_MOD 1100>;
- power-domains = <&sysc R8A779G0_PD_A3ISP0>;
- resets = <&cpg 1100>;
- iommus = <&ipmmu_vi1 24>;
- };
-
- fcpvx1: fcp@fedb8000 {
- compatible = "renesas,fcpv";
- reg = <0 0xfedb8000 0 0x200>;
- clocks = <&cpg CPG_MOD 1101>;
- power-domains = <&sysc R8A779G0_PD_A3ISP1>;
- resets = <&cpg 1101>;
- iommus = <&ipmmu_vi1 25>;
- };
-
- vspx0: vsp@fedd0000 {
- compatible = "renesas,vsp2";
- reg = <0 0xfedd0000 0 0x8000>;
- interrupts = <GIC_SPI 556 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 1028>;
- power-domains = <&sysc R8A779G0_PD_A3ISP0>;
- resets = <&cpg 1028>;
-
- renesas,fcp = <&fcpvx0>;
- };
-
- vspx1: vsp@fedd8000 {
- compatible = "renesas,vsp2";
- reg = <0 0xfedd8000 0 0x8000>;
- interrupts = <GIC_SPI 557 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 1029>;
- power-domains = <&sysc R8A779G0_PD_A3ISP1>;
- resets = <&cpg 1029>;
-
- renesas,fcp = <&fcpvx1>;
- };
-
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
index 18fd52f55de5..4d890e0617af 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779h0-gray-hawk-single.dts
@@ -46,6 +46,8 @@
serial0 = &hscif0;
serial1 = &hscif2;
ethernet0 = &avb0;
+ ethernet1 = &avb1;
+ ethernet2 = &avb2;
};
can_transceiver0: can-phy0 {
@@ -200,17 +202,64 @@
&avb0 {
pinctrl-0 = <&avb0_pins>;
pinctrl-names = "default";
- phy-handle = <&phy0>;
+ phy-handle = <&avb0_phy>;
tx-internal-delay-ps = <2000>;
status = "okay";
- phy0: ethernet-phy@0 {
- compatible = "ethernet-phy-id0022.1622",
- "ethernet-phy-ieee802.3-c22";
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>;
- reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ avb0_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0022.1622",
+ "ethernet-phy-ieee802.3-c22";
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupts-extended = <&gpio7 5 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&avb1 {
+ pinctrl-0 = <&avb1_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb1_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio6 1 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb1_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ interrupts-extended = <&gpio6 3 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
+};
+
+&avb2 {
+ pinctrl-0 = <&avb2_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&avb2_phy>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <4000>;
+
+ avb2_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c45";
+ reg = <0>;
+ interrupts-extended = <&gpio5 4 IRQ_TYPE_LEVEL_LOW>;
+ };
};
};
@@ -233,25 +282,6 @@
};
};
-&dsi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
-
- dsi0_out: endpoint {
- remote-endpoint = <&sn65dsi86_in0>;
- data-lanes = <1 2 3 4>;
- };
- };
- };
-};
-
-&du {
- status = "okay";
-};
-
&csi40 {
status = "okay";
@@ -292,6 +322,25 @@
};
};
+&dsi0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+
+ dsi0_out: endpoint {
+ remote-endpoint = <&sn65dsi86_in0>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+ };
+};
+
+&du {
+ status = "okay";
+};
+
&extal_clk {
clock-frequency = <16666666>;
};
@@ -312,6 +361,7 @@
&hscif0 {
pinctrl-0 = <&hscif0_pins>;
pinctrl-names = "default";
+ bootph-all;
uart-has-rtscts;
status = "okay";
@@ -558,6 +608,56 @@
};
};
+ avb1_pins: avb1 {
+ mux {
+ groups = "avb1_link", "avb1_mdio", "avb1_rgmii",
+ "avb1_txcrefclk";
+ function = "avb1";
+ };
+
+ link {
+ groups = "avb1_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb1_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb1_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
+ avb2_pins: avb2 {
+ mux {
+ groups = "avb2_link", "avb2_mdio", "avb2_rgmii",
+ "avb2_txcrefclk";
+ function = "avb2";
+ };
+
+ link {
+ groups = "avb2_link";
+ bias-disable;
+ };
+
+ mdio {
+ groups = "avb2_mdio";
+ drive-strength = <24>;
+ bias-disable;
+ };
+
+ rgmii {
+ groups = "avb2_rgmii";
+ drive-strength = <24>;
+ bias-disable;
+ };
+ };
+
can_clk_pins: can-clk {
groups = "can_clk";
function = "can_clk";
diff --git a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
index d0c01c0fdda2..8524a1e7205e 100644
--- a/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779h0.dtsi
@@ -138,6 +138,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
extalr_clk: extalr-clk {
@@ -145,6 +146,7 @@
#clock-cells = <0>;
/* This value must be overridden by the board */
clock-frequency = <0>;
+ bootph-all;
};
pcie0_clkref: pcie0-clkref {
@@ -180,6 +182,8 @@
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
+ bootph-all;
+
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -201,6 +205,7 @@
<0 0xe6058000 0 0x16c>, <0 0xe6058800 0 0x16c>,
<0 0xe6060000 0 0x16c>, <0 0xe6060800 0 0x16c>,
<0 0xe6061000 0 0x16c>, <0 0xe6061800 0 0x16c>;
+ bootph-all;
};
gpio0: gpio@e6050180 {
@@ -401,11 +406,13 @@
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
+ bootph-all;
};
rst: reset-controller@e6160000 {
compatible = "renesas,r8a779h0-rst";
reg = <0 0xe6160000 0 0x4000>;
+ bootph-all;
};
sysc: system-controller@e6180000 {
@@ -793,8 +800,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_hc 0>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -842,8 +847,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_hc 1>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -891,8 +894,6 @@
rx-internal-delay-ps = <0>;
tx-internal-delay-ps = <0>;
iommus = <&ipmmu_hc 2>;
- #address-cells = <1>;
- #size-cells = <0>;
status = "disabled";
};
@@ -1908,6 +1909,15 @@
resets = <&cpg 508>;
};
+ fcpvx0: fcp@fedb0000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfedb0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 1100>;
+ power-domains = <&sysc R8A779H0_PD_A3ISP0>;
+ resets = <&cpg 1100>;
+ iommus = <&ipmmu_vi1 24>;
+ };
+
vspd0: vsp@fea20000 {
compatible = "renesas,vsp2";
reg = <0 0xfea20000 0 0x8000>;
@@ -1918,6 +1928,17 @@
renesas,fcp = <&fcpvd0>;
};
+ vspx0: vsp@fedd0000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfedd0000 0 0x8000>;
+ interrupts = <GIC_SPI 556 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 1028>;
+ power-domains = <&sysc R8A779H0_PD_A3ISP0>;
+ resets = <&cpg 1028>;
+
+ renesas,fcp = <&fcpvx0>;
+ };
+
du: display@feb00000 {
compatible = "renesas,du-r8a779h0";
reg = <0 0xfeb00000 0 0x40000>;
@@ -2144,6 +2165,7 @@
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l2-remi-pi.dts b/arch/arm64/boot/dts/renesas/r9a07g044l2-remi-pi.dts
new file mode 100644
index 000000000000..3267e7b75b58
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l2-remi-pi.dts
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the MYIR Remi Pi
+ *
+ * Copyright (C) 2022 MYIR Electronics Corp.
+ * Copyright (C) 2025 Collabora Ltd.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+#include "r9a07g044l2.dtsi"
+
+/ {
+ model = "MYIR Tech Limited Remi Pi MYB-YG2LX-REMI";
+ compatible = "myir,remi-pi", "renesas,r9a07g044l2", "renesas,r9a07g044";
+
+ aliases {
+ ethernet0 = &eth0;
+ ethernet1 = &eth1;
+
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+
+ mmc0 = &sdhi0;
+
+ serial0 = &scif0;
+ serial4 = &scif4;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+ ddc-i2c-bus = <&i2c1>;
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&lt8912_out>;
+ };
+ };
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x0 0x38000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ vin-supply = <&reg_5p0v>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ vin-supply = <&reg_5p0v>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v: regulator-5p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-5.0V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_1p1v: regulator-vdd-core {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.1V";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+};
+
+&dsi {
+ status = "okay";
+
+ ports {
+ port@1 {
+ dsi_out: endpoint {
+ remote-endpoint = <&lt8912_in>;
+ data-lanes = <1 2 3 4>;
+ };
+ };
+ };
+};
+
+&du {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&eth0 {
+ pinctrl-0 = <&eth0_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+
+ phy0: ethernet-phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <4>;
+ interrupts-extended = <&pinctrl RZG2L_GPIO(44, 2) IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&pinctrl RZG2L_GPIO(44, 3) GPIO_ACTIVE_LOW>;
+ };
+};
+
+&eth1 {
+ pinctrl-0 = <&eth1_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii-id";
+ status = "okay";
+
+ phy1: ethernet-phy@6 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <6>;
+ interrupts-extended = <&pinctrl RZG2L_GPIO(43, 2) IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&pinctrl RZG2L_GPIO(43, 3) GPIO_ACTIVE_LOW>;
+ };
+};
+
+&extal_clk {
+ clock-frequency = <24000000>;
+};
+
+&gpu {
+ mali-supply = <&reg_1p1v>;
+};
+
+&i2c0 {
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-names = "default";
+
+ clock-frequency = <400000>;
+ status = "okay";
+
+ hdmi-bridge@48 {
+ compatible = "lontium,lt8912b";
+ reg = <0x48> ;
+ reset-gpios = <&pinctrl RZG2L_GPIO(42, 2) GPIO_ACTIVE_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ lt8912_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ lt8912_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+};
+
+&i2c1 {
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c2 {
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c3 {
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&mtu3 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ostm1 {
+ status = "okay";
+};
+
+&ostm2 {
+ status = "okay";
+};
+
+&phyrst {
+ status = "okay";
+};
+
+&pinctrl {
+ eth0_pins: eth0 {
+ pinmux = <RZG2L_PORT_PINMUX(27, 1, 1)>, /* ET0_MDC */
+ <RZG2L_PORT_PINMUX(28, 0, 1)>, /* ET0_MDIO */
+ <RZG2L_PORT_PINMUX(20, 0, 1)>, /* ET0_TXC */
+ <RZG2L_PORT_PINMUX(20, 1, 1)>, /* ET0_TX_CTL */
+ <RZG2L_PORT_PINMUX(20, 2, 1)>, /* ET0_TXD0 */
+ <RZG2L_PORT_PINMUX(21, 0, 1)>, /* ET0_TXD1 */
+ <RZG2L_PORT_PINMUX(21, 1, 1)>, /* ET0_TXD2 */
+ <RZG2L_PORT_PINMUX(22, 0, 1)>, /* ET0_TXD3 */
+ <RZG2L_PORT_PINMUX(24, 0, 1)>, /* ET0_RXC */
+ <RZG2L_PORT_PINMUX(24, 1, 1)>, /* ET0_RX_CTL */
+ <RZG2L_PORT_PINMUX(25, 0, 1)>, /* ET0_RXD0 */
+ <RZG2L_PORT_PINMUX(25, 1, 1)>, /* ET0_RXD1 */
+ <RZG2L_PORT_PINMUX(26, 0, 1)>, /* ET0_RXD2 */
+ <RZG2L_PORT_PINMUX(26, 1, 1)>; /* ET0_RXD3 */
+ };
+
+ eth1_pins: eth1 {
+ pinmux = <RZG2L_PORT_PINMUX(37, 0, 1)>, /* ET1_MDC */
+ <RZG2L_PORT_PINMUX(37, 1, 1)>, /* ET1_MDIO */
+ <RZG2L_PORT_PINMUX(29, 0, 1)>, /* ET1_TXC */
+ <RZG2L_PORT_PINMUX(29, 1, 1)>, /* ET1_TX_CTL */
+ <RZG2L_PORT_PINMUX(30, 0, 1)>, /* ET1_TXD0 */
+ <RZG2L_PORT_PINMUX(30, 1, 1)>, /* ET1_TXD1 */
+ <RZG2L_PORT_PINMUX(31, 0, 1)>, /* ET1_TXD2 */
+ <RZG2L_PORT_PINMUX(31, 1, 1)>, /* ET1_TXD3 */
+ <RZG2L_PORT_PINMUX(33, 1, 1)>, /* ET1_RXC */
+ <RZG2L_PORT_PINMUX(34, 0, 1)>, /* ET1_RX_CTL */
+ <RZG2L_PORT_PINMUX(34, 1, 1)>, /* ET1_RXD0 */
+ <RZG2L_PORT_PINMUX(35, 0, 1)>, /* ET1_RXD1 */
+ <RZG2L_PORT_PINMUX(35, 1, 1)>, /* ET1_RXD2 */
+ <RZG2L_PORT_PINMUX(36, 0, 1)>; /* ET1_RXD3 */
+ };
+
+ i2c0_pins: i2c0 {
+ pins = "RIIC0_SDA", "RIIC0_SCL";
+ input-enable;
+ };
+
+ i2c1_pins: i2c1 {
+ pins = "RIIC1_SDA", "RIIC1_SCL";
+ input-enable;
+ };
+
+ i2c2_pins: i2c2 {
+ pinmux = <RZG2L_PORT_PINMUX(3, 0, 2)>, /* SDA */
+ <RZG2L_PORT_PINMUX(3, 1, 2)>; /* SCL */
+ };
+
+ i2c3_pins: i2c3 {
+ pinmux = <RZG2L_PORT_PINMUX(18, 0, 3)>, /* SDA */
+ <RZG2L_PORT_PINMUX(18, 1, 3)>; /* SCL */
+ };
+
+ scif0_pins: scif0 {
+ pinmux = <RZG2L_PORT_PINMUX(38, 0, 1)>, /* TxD */
+ <RZG2L_PORT_PINMUX(38, 1, 1)>; /* RxD */
+ };
+
+ scif4_pins: scif4 {
+ pinmux = <RZG2L_PORT_PINMUX(2, 0, 5)>, /* TxD */
+ <RZG2L_PORT_PINMUX(2, 1, 5)>; /* RxD */
+ };
+
+ sdhi0_pins: sd0 {
+ sd0-ctrl {
+ pins = "SD0_CLK", "SD0_CMD";
+ power-source = <1800>;
+ };
+
+ sd0-data {
+ pins = "SD0_DATA0", "SD0_DATA1", "SD0_DATA2", "SD0_DATA3",
+ "SD0_DATA4", "SD0_DATA5", "SD0_DATA6", "SD0_DATA7";
+ power-source = <1800>;
+ };
+
+ sd0-rst {
+ pins = "SD0_RST#";
+ power-source = <1800>;
+ };
+ };
+};
+
+&scif0 {
+ pinctrl-0 = <&scif0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&scif4 {
+ pinctrl-0 = <&scif4_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-1 = <&sdhi0_pins>;
+ pinctrl-names = "default", "state_uhs";
+
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&reg_1p8v>;
+ bus-width = <8>;
+ mmc-hs200-1_8v;
+ non-removable;
+ fixed-emmc-driver-type = <1>;
+ status = "okay";
+};
+
+&usb2_phy1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
index a9b98db9ef95..0364f89776e6 100644
--- a/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a08g045.dtsi
@@ -28,6 +28,33 @@
clock-frequency = <0>;
};
+ cluster0_opp: opp-table-0 {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-137500000 {
+ opp-hz = /bits/ 64 <137500000>;
+ opp-microvolt = <940000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-275000000 {
+ opp-hz = /bits/ 64 <275000000>;
+ opp-microvolt = <940000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-550000000 {
+ opp-hz = /bits/ 64 <550000000>;
+ opp-microvolt = <940000>;
+ clock-latency-ns = <300000>;
+ };
+ opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <940000>;
+ clock-latency-ns = <300000>;
+ opp-suspend;
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -40,6 +67,7 @@
next-level-cache = <&L3_CA55>;
enable-method = "psci";
clocks = <&cpg CPG_CORE R9A08G045_CLK_I>;
+ operating-points-v2 = <&cluster0_opp>;
};
L3_CA55: cache-controller-0 {
@@ -443,7 +471,6 @@
<GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "lpm_int", "ca55stbydone_int",
"cm33stbyr_int", "ca55_deny";
- status = "disabled";
};
pinctrl: pinctrl@11030000 {
diff --git a/arch/arm64/boot/dts/renesas/r9a08g045s33-smarc-pmod1-type-3a.dtso b/arch/arm64/boot/dts/renesas/r9a08g045s33-smarc-pmod1-type-3a.dtso
new file mode 100644
index 000000000000..4a81e3a3c8bd
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a08g045s33-smarc-pmod1-type-3a.dtso
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for the RZ/G3S SMARC Carrier II EVK PMOD parts
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ *
+ * [Connection]
+ *
+ * SMARC Carrier II EVK
+ * +--------------------------------------------+
+ * |PMOD1_3A (PMOD1 PIN HEADER) |
+ * | SCIF1_CTS# (pin1) (pin7) PMOD1_GPIO10 |
+ * | SCIF1_TXD (pin2) (pin8) PMOD1_GPIO11 |
+ * | SCIF1_RXD (pin3) (pin9) PMOD1_GPIO12 |
+ * | SCIF1_RTS# (pin4) (pin10) PMOD1_GPIO13 |
+ * | GND (pin5) (pin11) GND |
+ * | PWR_PMOD1 (pin6) (pin12) GND |
+ * +--------------------------------------------+
+ *
+ * The following switches should be set as follows for SCIF1:
+ * - SW_CONFIG2: ON
+ * - SW_OPT_MUX4: ON
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+#include "rzg3s-smarc-switches.h"
+
+&pinctrl {
+ scif1_pins: scif1-pins {
+ pinmux = <RZG2L_PORT_PINMUX(14, 0, 1)>, /* TXD */
+ <RZG2L_PORT_PINMUX(14, 1, 1)>, /* RXD */
+ <RZG2L_PORT_PINMUX(16, 0, 1)>, /* CTS# */
+ <RZG2L_PORT_PINMUX(16, 1, 1)>; /* RTS# */
+ };
+};
+
+#if SW_CONFIG3 == SW_ON && SW_OPT_MUX4 == SW_ON
+&scif1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&scif1_pins>;
+ uart-has-rtscts;
+ status = "okay";
+};
+#endif
diff --git a/arch/arm64/boot/dts/renesas/r9a09g047.dtsi b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
index 200e9ea89193..c93aa16d0a6e 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g047.dtsi
@@ -154,6 +154,13 @@
#power-domain-cells = <0>;
};
+ sys: system-controller@10430000 {
+ compatible = "renesas,r9a09g047-sys";
+ reg = <0 0x10430000 0 0x10000>;
+ clocks = <&cpg CPG_CORE R9A09G047_SYS_0_PCLK>;
+ resets = <&cpg 0x30>;
+ };
+
scif0: serial@11c01400 {
compatible = "renesas,scif-r9a09g047", "renesas,scif-r9a09g057";
reg = <0 0x11c01400 0 0x400>;
@@ -175,6 +182,36 @@
status = "disabled";
};
+ wdt1: watchdog@14400000 {
+ compatible = "renesas,r9a09g047-wdt", "renesas,r9a09g057-wdt";
+ reg = <0 0x14400000 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x4d>, <&cpg CPG_MOD 0x4e>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x76>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt2: watchdog@13000000 {
+ compatible = "renesas,r9a09g047-wdt", "renesas,r9a09g057-wdt";
+ reg = <0 0x13000000 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x77>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ wdt3: watchdog@13000400 {
+ compatible = "renesas,r9a09g047-wdt", "renesas,r9a09g057-wdt";
+ reg = <0 0x13000400 0 0x400>;
+ clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>;
+ clock-names = "pclk", "oscclk";
+ resets = <&cpg 0x78>;
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
i2c0: i2c@14400400 {
compatible = "renesas,riic-r9a09g047", "renesas,riic-r9a09g057";
reg = <0 0x14400400 0 0x400>;
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
index 1c550b22b164..0cd00bb05191 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
@@ -105,6 +105,35 @@
};
};
+ gpu_opp_table: opp-table-1 {
+ compatible = "operating-points-v2";
+
+ opp-630000000 {
+ opp-hz = /bits/ 64 <630000000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-315000000 {
+ opp-hz = /bits/ 64 <315000000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-157500000 {
+ opp-hz = /bits/ 64 <157500000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-78750000 {
+ opp-hz = /bits/ 64 <78750000>;
+ opp-microvolt = <800000>;
+ };
+
+ opp-19687500 {
+ opp-hz = /bits/ 64 <19687500>;
+ opp-microvolt = <800000>;
+ };
+ };
+
psci {
compatible = "arm,psci-1.0", "arm,psci-0.2";
method = "smc";
@@ -249,7 +278,6 @@
reg = <0 0x10430000 0 0x10000>;
clocks = <&cpg CPG_CORE R9A09G057_SYS_0_PCLK>;
resets = <&cpg 0x30>;
- status = "disabled";
};
ostm0: timer@11800000 {
@@ -582,6 +610,28 @@
status = "disabled";
};
+ gpu: gpu@14850000 {
+ compatible = "renesas,r9a09g057-mali",
+ "arm,mali-bifrost";
+ reg = <0x0 0x14850000 0x0 0x10000>;
+ interrupts = <GIC_SPI 884 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 885 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 883 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 886 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu", "event";
+ clocks = <&cpg CPG_MOD 0xf0>,
+ <&cpg CPG_MOD 0xf1>,
+ <&cpg CPG_MOD 0xf2>;
+ clock-names = "gpu", "bus", "bus_ace";
+ power-domains = <&cpg>;
+ resets = <&cpg 0xdd>,
+ <&cpg 0xde>,
+ <&cpg 0xdf>;
+ reset-names = "rst", "axi_rst", "ace_rst";
+ operating-points-v2 = <&gpu_opp_table>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@14900000 {
compatible = "arm,gic-v3";
reg = <0x0 0x14900000 0 0x20000>,
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
index 0b705c987b6c..063eca0ba3e2 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
+++ b/arch/arm64/boot/dts/renesas/r9a09g057h44-rzv2h-evk.dts
@@ -43,6 +43,16 @@
reg = <0x2 0x40000000 0x2 0x00000000>;
};
+ reg_0p8v: regulator0 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "fixed-0.8V";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
reg_3p3v: regulator1 {
compatible = "regulator-fixed";
@@ -68,6 +78,11 @@
clock-frequency = <22579200>;
};
+&gpu {
+ status = "okay";
+ mali-supply = <&reg_0p8v>;
+};
+
&i2c0 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057h48-kakip.dts b/arch/arm64/boot/dts/renesas/r9a09g057h48-kakip.dts
new file mode 100644
index 000000000000..d2586d278769
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a09g057h48-kakip.dts
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Device Tree Source for Yuridenki-Shokai the Kakip board
+ *
+ * Copyright (C) 2024 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "r9a09g057.dtsi"
+
+/ {
+ model = "Yuridenki-Shokai Kakip Board based on r9a09g057h48";
+ compatible = "yuridenki,kakip", "renesas,r9a09g057h48", "renesas,r9a09g057";
+
+ aliases {
+ serial0 = &scif;
+ mmc0 = &sdhi0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@48000000 {
+ device_type = "memory";
+ /* first 128MB is reserved for secure area. */
+ reg = <0x0 0x48000000 0x1 0xF8000000>;
+ };
+
+ reg_3p3v: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vqmmc_sdhi0: regulator-vccq-sdhi0 {
+ compatible = "regulator-gpio";
+ regulator-name = "SDHI0 VccQ";
+ gpios = <&pinctrl RZV2H_GPIO(A, 0) GPIO_ACTIVE_HIGH>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ gpios-states = <0>;
+ states = <3300000 0>, <1800000 1>;
+ };
+};
+
+&ostm0 {
+ status = "okay";
+};
+
+&ostm1 {
+ status = "okay";
+};
+
+&ostm2 {
+ status = "okay";
+};
+
+&ostm3 {
+ status = "okay";
+};
+
+&ostm4 {
+ status = "okay";
+};
+
+&ostm5 {
+ status = "okay";
+};
+
+&ostm6 {
+ status = "okay";
+};
+
+&ostm7 {
+ status = "okay";
+};
+
+&pinctrl {
+ scif_pins: scif {
+ pins = "SCIF_RXD", "SCIF_TXD";
+ };
+
+ sd0-pwr-en-hog {
+ gpio-hog;
+ gpios = <RZV2H_GPIO(A, 1) GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "sd0_pwr_en";
+ };
+
+ sdhi0_pins: sd0 {
+ sd0-clk {
+ pins = "SD0CLK";
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+
+ sd0-data {
+ pins = "SD0DAT0", "SD0DAT1", "SD0DAT2", "SD0DAT3", "SD0CMD";
+ input-enable;
+ renesas,output-impedance = <3>;
+ slew-rate = <0>;
+ };
+
+ sd0-mux {
+ pinmux = <RZV2H_PORT_PINMUX(A, 5, 15)>; /* SD0_CD */
+ };
+ };
+};
+
+&qextal_clk {
+ clock-frequency = <24000000>;
+};
+
+&scif {
+ pinctrl-0 = <&scif_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&sdhi0 {
+ pinctrl-0 = <&sdhi0_pins>;
+ pinctrl-names = "default";
+ vmmc-supply = <&reg_3p3v>;
+ vqmmc-supply = <&vqmmc_sdhi0>;
+ bus-width = <4>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
index 6b583ae2ac52..f4ba050beb0d 100644
--- a/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3e-smarc-som.dtsi
@@ -26,3 +26,7 @@
&rtxin_clk {
clock-frequency = <32768>;
};
+
+&wdt1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
index ef12c1c462a7..39845faec894 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
@@ -9,25 +9,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
-/*
- * On-board switches' states:
- * @SW_OFF: switch's state is OFF
- * @SW_ON: switch's state is ON
- */
-#define SW_OFF 0
-#define SW_ON 1
-
-/*
- * SW_CONFIG[x] switches' states:
- * @SW_CONFIG2:
- * SW_OFF - SD0 is connected to eMMC
- * SW_ON - SD0 is connected to uSD0 card
- * @SW_CONFIG3:
- * SW_OFF - SD2 is connected to SoC
- * SW_ON - SCIF1, SSI0, IRQ0, IRQ1 connected to SoC
- */
-#define SW_CONFIG2 SW_OFF
-#define SW_CONFIG3 SW_ON
+#include "rzg3s-smarc-switches.h"
/ {
compatible = "renesas,rzg3s-smarcm", "renesas,r9a08g045s33", "renesas,r9a08g045";
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-switches.h b/arch/arm64/boot/dts/renesas/rzg3s-smarc-switches.h
new file mode 100644
index 000000000000..bbf908a5322c
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-switches.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * On-board switches for the Renesas RZ/G3S SMARC Module and RZ SMARC Carrier II
+ * boards.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __RZG3S_SMARC_SWITCHES_H__
+#define __RZG3S_SMARC_SWITCHES_H__
+
+/*
+ * On-board switches' states:
+ * @SW_OFF: switch's state is OFF
+ * @SW_ON: switch's state is ON
+ */
+#define SW_OFF 0
+#define SW_ON 1
+
+/*
+ * SW_CONFIG[x] switches' states:
+ * @SW_CONFIG2:
+ * SW_OFF - SD0 is connected to eMMC
+ * SW_ON - SD0 is connected to uSD0 card
+ * @SW_CONFIG3:
+ * SW_OFF - SD2 is connected to SoC
+ * SW_ON - SCIF1, SSI0, IRQ0, IRQ1 connected to SoC
+ */
+#define SW_CONFIG2 SW_OFF
+#define SW_CONFIG3 SW_ON
+
+/*
+ * SW_OPT_MUX[x] switches' states:
+ * @SW_OPT_MUX4:
+ * SW_OFF - The SMARC SER0 signals are routed to M.2 Key E UART
+ * SW_ON - The SMARC SER0 signals are routed to PMOD1
+ */
+#define SW_OPT_MUX4 SW_ON
+
+#endif /* __RZG3S_SMARC_SWITCHES_H__ */
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
index 81b4ffd1417d..5e044a4d0234 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc.dtsi
@@ -12,6 +12,8 @@
/ {
aliases {
i2c0 = &i2c0;
+ serial0 = &scif1;
+ serial1 = &scif3;
serial3 = &scif0;
mmc1 = &sdhi1;
};
@@ -162,6 +164,11 @@
<RZG2L_PORT_PINMUX(6, 4, 1)>; /* TXD */
};
+ scif3_pins: scif3 {
+ pinmux = <RZG2L_PORT_PINMUX(17, 2, 7)>, /* RXD */
+ <RZG2L_PORT_PINMUX(17, 3, 7)>; /* TXD */
+ };
+
sdhi1_pins: sd1 {
data {
pins = "SD1_DATA0", "SD1_DATA1", "SD1_DATA2", "SD1_DATA3";
@@ -208,6 +215,12 @@
status = "okay";
};
+&scif3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&scif3_pins>;
+ status = "okay";
+};
+
&sdhi1 {
pinctrl-0 = <&sdhi1_pins>;
pinctrl-1 = <&sdhi1_pins_uhs>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 06c7e9746304..68971c870d17 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -940,6 +940,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card-mix+split.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card-mix+split.dtsi
index 8ae6af1af094..4caa0281a687 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card-mix+split.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card-mix+split.dtsi
@@ -15,7 +15,9 @@
* (D) CPU3 (2ch) --/ (TDM-1 : 2,3ch)
* (E) CPU4 (2ch) --/ (TDM-2 : 4,5ch)
* (F) CPU5 (2ch) --/ (TDM-3 : 6,7ch)
- * (G) CPU6 (6ch) <---- (6ch) (Z) PCM3168A-c
+ * (G) CPU6 (2ch) <---- (6ch) (Z) PCM3168A-c (TDM-a: 0,1ch)
+ * (H) CPU7 (2ch) <--/ (TDM-b: 2,3ch)
+ * (I) CPU8 (2ch) <--/ (TDM-c: 4,5ch)
*
* (A) aplay -D plughw:0,0 xxx.wav (MIX-0)
* (B) aplay -D plughw:0,1 xxx.wav (MIX-1)
@@ -25,7 +27,9 @@
* (F) aplay -D plughw:1,3 xxx.wav (TDM-3)
*
* (A) arecord -D plughw:0,0 xxx.wav
- * (G) arecord -D plughw:1,4 xxx.wav
+ * (G) arecord -D plughw:1,4 xxx.wav (TDM-a)
+ * (H) arecord -D plughw:1,5 xxx.wav (TDM-b)
+ * (I) arecord -D plughw:1,6 xxx.wav (TDM-c)
*/
/ {
sound_card_kf: expand-sound {
@@ -35,13 +39,18 @@
routing = "pcm3168a Playback", "DAI2 Playback",
"pcm3168a Playback", "DAI3 Playback",
"pcm3168a Playback", "DAI4 Playback",
- "pcm3168a Playback", "DAI5 Playback";
+ "pcm3168a Playback", "DAI5 Playback",
+ "DAI6 Capture", "pcm3168a Capture",
+ "DAI7 Capture", "pcm3168a Capture",
+ "DAI8 Capture", "pcm3168a Capture";
dais = <&snd_kf1 /* (C) CPU2 */
&snd_kf2 /* (D) CPU3 */
&snd_kf3 /* (E) CPU4 */
&snd_kf4 /* (F) CPU5 */
- &snd_kf5 /* (G) GPU6 */
+ &snd_kf5 /* (G) CPU6 */
+ &snd_kf6 /* (H) CPU7 */
+ &snd_kf7 /* (I) CPU8 */
>;
};
};
@@ -50,7 +59,9 @@
ports {
#address-cells = <1>;
#size-cells = <0>;
+
mclk-fs = <512>;
+ prefix = "pcm3168a";
/*
* (Y) PCM3168A-p
@@ -59,7 +70,6 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
- prefix = "pcm3168a";
convert-channels = <8>; /* to 8ch TDM */
/* (C) CPU2 -> (Y) PCM3168A-p */
@@ -91,10 +101,28 @@
* (Z) PCM3168A-c
*/
port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
+
+ convert-channels = <6>; /* to 6ch TDM */
+
/* (G) CPU6 <- PCM3168A-c */
- pcm3168a_endpoint_c: endpoint {
- remote-endpoint = <&rsnd_for_pcm3168a_capture>;
+ pcm3168a_endpoint_c1: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&rsnd_for_pcm3168a_capture1>;
+ clocks = <&clksndsel>;
+ };
+ /* (H) CPU7 <- PCM3168A-c */
+ pcm3168a_endpoint_c2: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&rsnd_for_pcm3168a_capture2>;
+ clocks = <&clksndsel>;
+ };
+ /* (I) CPU8 <- PCM3168A-c */
+ pcm3168a_endpoint_c3: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&rsnd_for_pcm3168a_capture3>;
clocks = <&clksndsel>;
};
};
@@ -160,12 +188,35 @@
*/
snd_kf5: port@6 {
reg = <6>;
- rsnd_for_pcm3168a_capture: endpoint {
- remote-endpoint = <&pcm3168a_endpoint_c>;
+ rsnd_for_pcm3168a_capture1: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_c1>;
+ bitclock-master;
+ frame-master;
+ capture = <&ssiu40 &ssi4>;
+ };
+ };
+ /*
+ * (H) CPU7
+ */
+ snd_kf6: port@7 {
+ reg = <7>;
+ rsnd_for_pcm3168a_capture2: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_c2>;
+ bitclock-master;
+ frame-master;
+ capture = <&ssiu41 &ssi4>;
+ };
+ };
+ /*
+ * (I) CPU8
+ */
+ snd_kf7: port@8 {
+ reg = <8>;
+ rsnd_for_pcm3168a_capture3: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_c3>;
bitclock-master;
frame-master;
- dai-tdm-slot-num = <6>;
- capture = <&ssi4>;
+ capture = <&ssiu42 &ssi4>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card2-mix+split.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card2-mix+split.dtsi
index 4cf632bc4621..67a0057a3383 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card2-mix+split.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf-audio-graph-card2-mix+split.dtsi
@@ -15,7 +15,9 @@
* (D) CPU2 (2ch) --/ (TDM-1 : 2,3ch)
* (E) CPU4 (2ch) --/ (TDM-2 : 4,5ch)
* (F) CPU5 (2ch) --/ (TDM-3 : 6,7ch)
- * (G) CPU6 (6ch) <---- (6ch) (Z) PCM3168A-c
+ * (G) CPU6 (2ch) <---- (6ch) (Z) PCM3168A-c (TDM-a: 0,1ch)
+ * (H) CPU7 (2ch) <--/ (TDM-b: 2,3ch)
+ * (I) CPU8 (2ch) <--/ (TDM-c: 4,5ch)
*
* (A) aplay -D plughw:0,0 xxx.wav (MIX-0)
* (B) aplay -D plughw:0,1 xxx.wav (MIX-1)
@@ -25,7 +27,9 @@
* (F) aplay -D plughw:1,3 xxx.wav (TDM-3)
*
* (A) arecord -D plughw:0,0 xxx.wav
- * (G) arecord -D plughw:1,4 xxx.wav
+ * (G) arecord -D plughw:1,4 xxx.wav (TDM-a)
+ * (H) arecord -D plughw:1,5 xxx.wav (TDM-b)
+ * (I) arecord -D plughw:1,6 xxx.wav (TDM-c)
*/
/ {
sound_card_kf: expand-sound {
@@ -36,19 +40,25 @@
"pcm3168a Playback", "DAI3 Playback",
"pcm3168a Playback", "DAI4 Playback",
"pcm3168a Playback", "DAI5 Playback",
- "DAI6 Capture", "pcm3168a Capture";
+ "DAI6 Capture", "pcm3168a Capture",
+ "DAI7 Capture", "pcm3168a Capture",
+ "DAI8 Capture", "pcm3168a Capture";
links = <&fe_c /* (C) CPU2 */
&fe_d /* (D) CPU3 */
&fe_e /* (E) CPU4 */
&fe_f /* (F) CPU5 */
- &rsnd_g /* (G) CPU6 */
+ &fe_g /* (G) CPU6 */
+ &fe_h /* (H) CPU7 */
+ &fe_i /* (I) CPU8 */
&be_y /* (Y) PCM3168A-p */
+ &be_z /* (Z) PCM3168A-c */
>;
- dpcm {
+ dpcm: dpcm {
#address-cells = <1>;
#size-cells = <0>;
+ non-supplier;
ports@0 {
#address-cells = <1>;
@@ -62,21 +72,32 @@
* (D) CPU3
* (E) CPU4
* (F) CPU5
+ * (G) CPU6
+ * (H) CPU7
+ * (I) CPU8
*/
fe_c: port@2 { reg = <2>; fe_c_ep: endpoint { remote-endpoint = <&rsnd_c_ep>; }; };
fe_d: port@3 { reg = <3>; fe_d_ep: endpoint { remote-endpoint = <&rsnd_d_ep>; }; };
fe_e: port@4 { reg = <4>; fe_e_ep: endpoint { remote-endpoint = <&rsnd_e_ep>; }; };
fe_f: port@5 { reg = <5>; fe_f_ep: endpoint { remote-endpoint = <&rsnd_f_ep>; }; };
+
+ fe_g: port@6 { reg = <6>; fe_g_ep: endpoint { remote-endpoint = <&rsnd_g_ep>; }; };
+ fe_h: port@7 { reg = <7>; fe_h_ep: endpoint { remote-endpoint = <&rsnd_h_ep>; }; };
+ fe_i: port@8 { reg = <8>; fe_i_ep: endpoint { remote-endpoint = <&rsnd_i_ep>; }; };
};
ports@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
/*
* BE
*
* (Y) PCM3168A-p
+ * (Z) PCM3168A-c
*/
- be_y: port { be_y_ep: endpoint { remote-endpoint = <&pcm3168a_y_ep>; }; };
+ be_y: port@0 { reg = <0>; be_y_ep: endpoint { remote-endpoint = <&pcm3168a_y_ep>; }; };
+ be_z: port@1 { reg = <1>; be_z_ep: endpoint { remote-endpoint = <&pcm3168a_z_ep>; }; };
};
};
};
@@ -106,8 +127,9 @@
*/
port@1 {
reg = <1>;
+ convert-channels = <6>; /* to 6ch TDM */
pcm3168a_z_ep: endpoint {
- remote-endpoint = <&rsnd_g_ep>;
+ remote-endpoint = <&be_z_ep>;
clocks = <&clksndsel>;
};
};
@@ -171,13 +193,37 @@
/*
* (G) CPU6
*/
- rsnd_g: port@6 {
+ port@6 {
reg = <6>;
rsnd_g_ep: endpoint {
- remote-endpoint = <&pcm3168a_z_ep>;
+ remote-endpoint = <&fe_g_ep>;
+ bitclock-master;
+ frame-master;
+ capture = <&ssiu40 &ssi4>;
+ };
+ };
+ /*
+ * (H) CPU7
+ */
+ port@7 {
+ reg = <7>;
+ rsnd_h_ep: endpoint {
+ remote-endpoint = <&fe_h_ep>;
+ bitclock-master;
+ frame-master;
+ capture = <&ssiu41 &ssi4>;
+ };
+ };
+ /*
+ * (I) CPU8
+ */
+ port@8 {
+ reg = <8>;
+ rsnd_i_ep: endpoint {
+ remote-endpoint = <&fe_i_ep>;
bitclock-master;
frame-master;
- capture = <&ssi4>;
+ capture = <&ssiu42 &ssi4>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf-simple-audio-card-mix+split.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf-simple-audio-card-mix+split.dtsi
index f01d91aaadf3..fd75801c329e 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf-simple-audio-card-mix+split.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf-simple-audio-card-mix+split.dtsi
@@ -15,7 +15,9 @@
* (D) CPU2 (2ch) --/ (TDM-1 : 2,3ch)
* (E) CPU4 (2ch) --/ (TDM-2 : 4,5ch)
* (F) CPU5 (2ch) --/ (TDM-3 : 6,7ch)
- * (G) CPU6 (6ch) <---- (6ch) (Z) PCM3168A-c
+ * (G) CPU6 (2ch) <---- (6ch) (Z) PCM3168A-c (TDM-a: 0,1ch)
+ * (H) CPU7 (2ch) <--/ (TDM-b: 2,3ch)
+ * (I) CPU8 (2ch) <--/ (TDM-c: 4,5ch)
*
* (A) aplay -D plughw:0,0 xxx.wav (MIX-0)
* (B) aplay -D plughw:0,1 xxx.wav (MIX-1)
@@ -25,7 +27,9 @@
* (F) aplay -D plughw:1,3 xxx.wav (TDM-3)
*
* (A) arecord -D plughw:0,0 xxx.wav
- * (G) arecord -D plughw:1,4 xxx.wav
+ * (G) arecord -D plughw:1,4 xxx.wav (TDM-a)
+ * (H) arecord -D plughw:1,5 xxx.wav (TDM-b)
+ * (I) arecord -D plughw:1,6 xxx.wav (TDM-c)
*/
/ {
@@ -39,7 +43,10 @@
simple-audio-card,routing = "pcm3168a Playback", "DAI2 Playback",
"pcm3168a Playback", "DAI3 Playback",
"pcm3168a Playback", "DAI4 Playback",
- "pcm3168a Playback", "DAI5 Playback";
+ "pcm3168a Playback", "DAI5 Playback",
+ "DAI6 Capture", "pcm3168a Capture",
+ "DAI7 Capture", "pcm3168a Capture",
+ "DAI8 Capture", "pcm3168a Capture";
simple-audio-card,dai-link@0 {
#address-cells = <1>;
@@ -88,16 +95,40 @@
};
simple-audio-card,dai-link@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
+ convert-channels = <6>; /* to 6ch TDM */
+
/*
* (G) CPU6
*/
- cpu {
+ cpu@0 {
+ reg = <0>;
bitclock-master;
frame-master;
sound-dai = <&rcar_sound 6>;
};
/*
+ * (H) CPU7
+ */
+ cpu@1 {
+ reg = <1>;
+ bitclock-master;
+ frame-master;
+ sound-dai = <&rcar_sound 7>;
+ };
+ /*
+ * (I) CPU8
+ */
+ cpu@2 {
+ reg = <2>;
+ bitclock-master;
+ frame-master;
+ sound-dai = <&rcar_sound 8>;
+ };
+
+ /*
* (Z) PCM3168A-c
*/
codec {
@@ -151,7 +182,19 @@
* (G) CPU6
*/
dai6 {
- capture = <&ssi4>;
+ capture = <&ssiu40 &ssi4>;
+ };
+ /*
+ * (H) CPU7
+ */
+ dai7 {
+ capture = <&ssiu41 &ssi4>;
+ };
+ /*
+ * (I) CPU8
+ */
+ dai8 {
+ capture = <&ssiu42 &ssi4>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index 0c58d816c375..fcab957b54f7 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -448,6 +448,7 @@
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
index f24814d7c924..b4024e85ae5a 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-cpu-common.dtsi
@@ -201,6 +201,7 @@
&hscif0 {
pinctrl-0 = <&hscif0_pins>;
pinctrl-names = "default";
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi b/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
index 9017c4475a7c..a5d1c1008e7e 100644
--- a/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
+++ b/arch/arm64/boot/dts/renesas/white-hawk-csi-dsi.dtsi
@@ -21,7 +21,9 @@
bus-type = <MEDIA_BUS_TYPE_CSI2_CPHY>;
clock-lanes = <0>;
data-lanes = <1 2 3>;
- line-orders = <0 3 0>;
+ line-orders = <MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC
+ MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA
+ MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC>;
remote-endpoint = <&max96712_out0>;
};
};
@@ -42,7 +44,9 @@
bus-type = <MEDIA_BUS_TYPE_CSI2_CPHY>;
clock-lanes = <0>;
data-lanes = <1 2 3>;
- line-orders = <0 3 0>;
+ line-orders = <MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC
+ MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA
+ MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC>;
remote-endpoint = <&max96712_out1>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index def1222c1907..3e8771ef69ba 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -5,6 +5,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-ctouch2-of10.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-engicam-px30-core-edimm2.2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-firefly-jd4-core-mb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-lvds-9904379.dtbo
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-video-demo.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-bpi-p2-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-evb.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3308-roc-cc.dtb
@@ -61,6 +63,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinebook-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinephone-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou-video-demo.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc-mezzanine.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc-plus.dtb
@@ -122,6 +125,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-mecsbc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-odroid-m1.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-photonicat.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-qnap-ts433.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-radxa-e25.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-roc-pc.dtb
@@ -132,6 +136,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-display-vz.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-io-expander.dtbo
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-armsom-sige5.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-roc-pc.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3576-rock-4d.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3582-radxa-e52c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-sige7.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-armsom-w3.dtb
@@ -145,11 +151,14 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-firefly-itx-3588j.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-friendlyelec-cm3588-nas.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-h96-max-v58.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar-pre-ict-tester.dtbo
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-mnt-reform2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-nanopc-t6-lts.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-ok3588-c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-max.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-plus.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-orangepi-5-ultra.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-quartzpro64.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5-itx.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b.dtb
@@ -170,3 +179,57 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-orangepi-5.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-orangepi-5b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-rock-5a.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588s-rock-5c.dtb
+
+# Overlay application tests
+#
+# A .dtbo must have its own
+#
+# dtb-$(CONFIG_ARCH_ROCKCHIP) += <overlay>.dtbo
+#
+# entry, and at least one overlay application test reflecting a possible
+# hardware combination in real life:
+#
+# dtb-$(CONFIG_ARCH_ROCKCHIP) += <overlay-application-test>.dtb
+# <overlay-application-test>-dtbs := <base>.dtb <overlay-1>.dtbo [<overlay-2>.dtbo ...]
+#
+# This will make the <base>.dtb have symbols (like when DTC_FLAGS has -@ passed)
+# and generate a new DTB (<overlay-application-test>.dtb) which is the
+# result of the application of <overlay-1>.dtbo and other listed overlays on top
+# of <base>.dtb.
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-haikou-lvds-9904379.dtb
+px30-ringneck-haikou-haikou-lvds-9904379-dtbs := px30-ringneck-haikou.dtb \
+ px30-ringneck-haikou-lvds-9904379.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += px30-ringneck-haikou-haikou-video-demo.dtb
+px30-ringneck-haikou-haikou-video-demo-dtbs := px30-ringneck-haikou.dtb \
+ px30-ringneck-haikou-video-demo.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou-haikou-video-demo.dtb
+rk3399-puma-haikou-haikou-video-demo-dtbs := rk3399-puma-haikou.dtb \
+ rk3399-puma-haikou-video-demo.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-wolfvision-pf5-vz-2-uhd.dtb
+rk3568-wolfvision-pf5-vz-2-uhd-dtbs := rk3568-wolfvision-pf5.dtb \
+ rk3568-wolfvision-pf5-display-vz.dtbo \
+ rk3568-wolfvision-pf5-io-expander.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6a-wifi.dtb
+rk3588-edgeble-neu6a-wifi-dtbs := rk3588-edgeble-neu6a-io.dtb \
+ rk3588-edgeble-neu6a-wifi.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-edgeble-neu6b-wifi.dtb
+rk3588-edgeble-neu6b-wifi-dtbs := rk3588-edgeble-neu6b-io.dtb \
+ rk3588-edgeble-neu6a-wifi.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-jaguar-pre-ict-tester.dtb
+rk3588-jaguar-pre-ict-tester-dtbs := rk3588-jaguar.dtb \
+ rk3588-jaguar-pre-ict-tester.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-ep.dtb
+rk3588-rock-5b-pcie-ep-dtbs := rk3588-rock-5b.dtb \
+ rk3588-rock-5b-pcie-ep.dtbo
+
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3588-rock-5b-pcie-srns.dtb
+rk3588-rock-5b-pcie-srns-dtbs := rk3588-rock-5b.dtb \
+ rk3588-rock-5b-pcie-srns.dtbo
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-lvds-9904379.dtso b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-lvds-9904379.dtso
new file mode 100644
index 000000000000..3fc088a5636a
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-lvds-9904379.dtso
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ *
+ * HAIKOU-LVDS-9904379 adapter for PX30 Ringneck and Haikou carrierboard.
+ *
+ * This adapter needs to be plugged in the fake PCIe connector called Video
+ * Connector on Haikou carrierboard.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+&{/} {
+ backlight_lvds: backlight-lvds {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 255>;
+ default-brightness-level = <255>;
+ num-interpolated-steps = <255>;
+ power-supply = <&vcc3v3_baseboard>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ panel {
+ compatible = "admatec,9904379", "panel-lvds";
+ backlight = <&backlight_lvds>;
+ data-mapping = "vesa-24";
+ height-mm = <126>;
+ power-supply = <&vcc3v3_baseboard>;
+ width-mm = <224>;
+
+ panel-timing {
+ clock-frequency = <49500000>;
+ hactive = <1024>;
+ hback-porch = <90>;
+ hfront-porch = <90>;
+ hsync-len = <90>;
+ vactive = <600>;
+ vback-porch = <10>;
+ vfront-porch = <10>;
+ vsync-len = <10>;
+ };
+
+ port {
+ panel_in_lvds: endpoint {
+ remote-endpoint = <&lvds_out_panel>;
+ };
+ };
+ };
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* EEPROM and GT928 are limited to 400KHz */
+ clock-frequency = <400000>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt928";
+ reg = <0x14>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&touch_int &touch_rst>;
+ pinctrl-names = "default";
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
+ AVDD28-supply = <&vcc3v3_baseboard>;
+ VDDIO-supply = <&vcc3v3_baseboard>;
+ };
+
+ eeprom@54 {
+ reg = <0x54>;
+ compatible = "st,24c04", "atmel,24c04";
+ pagesize = <16>;
+ size = <512>;
+ vcc-supply = <&vcc3v3_baseboard>;
+ };
+};
+
+&lvds {
+ status = "okay";
+};
+
+&lvds_out {
+ lvds_out_panel: endpoint {
+ remote-endpoint = <&panel_in_lvds>;
+ };
+};
+
+&pinctrl {
+ touch {
+ touch_int: touch-int {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ touch_rst: touch-rst {
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-video-demo.dtso b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-video-demo.dtso
new file mode 100644
index 000000000000..7d9ea5aa5984
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou-video-demo.dtso
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ *
+ * DEVKIT ADDON CAM-TS-A01
+ * https://embedded.cherry.de/product/development-kit/
+ *
+ * DT-overlay for the camera / DSI demo appliance for Haikou boards.
+ * In the flavour for use with a Ringneck system-on-module.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/px30-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+&{/} {
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&dc_12v>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ cam_afvdd_2v8: regulator-cam-afvdd-2v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 2 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "cam-afvdd-2v8";
+ vin-supply = <&vcc2v8_video>;
+ };
+
+ cam_avdd_2v8: regulator-cam-avdd-2v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 4 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "cam-avdd-2v8";
+ vin-supply = <&vcc2v8_video>;
+ };
+
+ cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "cam-dovdd-1v8";
+ vin-supply = <&vcc1v8_video>;
+ };
+
+ cam_dvdd_1v2: regulator-cam-dvdd-1v2 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&pca9670 5 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-name = "cam-dvdd-1v2";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ vcc1v8_video: regulator-vcc1v8-video {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc1v8-video";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ vcc2v8_video: regulator-vcc2v8-video {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "vcc2v8-video";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ video-adapter-leds {
+ compatible = "gpio-leds";
+
+ video-adapter-led {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pca9670 7 GPIO_ACTIVE_HIGH>;
+ label = "video-adapter-led";
+ linux,default-trigger = "none";
+ };
+ };
+};
+
+&display_subsystem {
+ status = "okay";
+};
+
+&dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3148w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc1v8_video>;
+ reset-gpios = <&pca9670 0 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc2v8_video>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&dsi_dphy {
+ status = "okay";
+};
+
+&dsi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
+
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* OV5675, GT911, DW9714 are limited to 400KHz */
+ clock-frequency = <400000>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&touch_int>;
+ pinctrl-names = "default";
+ reset-gpios = <&pca9670 1 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&vcc2v8_video>;
+ VDDIO-supply = <&vcc3v3_baseboard>;
+ };
+
+ pca9670: gpio@27 {
+ compatible = "nxp,pca9670";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-0 = <&pca9670_resetn>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pinctrl {
+ pca9670 {
+ pca9670_resetn: pca9670-resetn {
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ touch {
+ touch_int: touch-int {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
index eb9470a00e54..91cf4cd3fae2 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
@@ -154,6 +154,8 @@
};
&i2c3 {
+ status = "okay";
+
eeprom@50 {
reg = <0x50>;
compatible = "atmel,24c01";
@@ -194,6 +196,13 @@
<3 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ uart {
+ uart5_rts_pin: uart5-rts-pin {
+ rockchip,pins =
+ <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&pwm0 {
@@ -222,10 +231,15 @@
};
&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer>;
status = "okay";
};
&uart5 {
+ /* Add pinmux for rts-gpios (uart5_rts_pin) */
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart5_xfer &uart5_rts_pin>;
rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
index e80412abec08..142244d52706 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi
@@ -325,10 +325,6 @@
};
};
-&i2c3 {
- status = "okay";
-};
-
&i2s0_8ch {
rockchip,trcm-sync-tx-only;
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index 629121de5a13..5e7181948992 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -147,7 +147,7 @@
&pwm5 {
status = "okay";
- pinctrl-names = "active";
+ pinctrl-names = "default";
pinctrl-0 = <&pwm5_pin_pull_down>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
index a94114fb7cc1..96c27fc5005d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
@@ -274,13 +274,13 @@
&pwm0 {
pinctrl-0 = <&pwm0_pin_pull_up>;
- pinctrl-names = "active";
+ pinctrl-names = "default";
status = "okay";
};
&pwm1 {
pinctrl-0 = <&pwm1_pin_pull_up>;
- pinctrl-names = "active";
+ pinctrl-names = "default";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index 6310b58de77f..a4bdd87d0729 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -428,10 +428,18 @@
status = "okay";
};
+&u2phy_otg {
+ status = "okay";
+};
+
&uart2 {
status = "okay";
};
+&usb20_otg {
+ status = "okay";
+};
+
&usbdrd3 {
dr_mode = "host";
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
index b1c9bd0e63ef..8d94d9f91a5c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi
@@ -115,7 +115,7 @@
};
&u2phy1_host {
- status = "disabled";
+ phy-supply = <&vdd_5v>;
};
&uart0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index b169be06d4d1..c8eb5481f43d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -603,7 +603,7 @@
};
&pwm2 {
- pinctrl-names = "active";
+ pinctrl-names = "default";
pinctrl-0 = <&pwm2_pin_pull_down>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso
new file mode 100644
index 000000000000..0377ec860d35
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou-video-demo.dtso
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2025 Cherry Embedded Solutions GmbH
+ *
+ * DEVKIT ADDON CAM-TS-A01
+ * https://embedded.cherry.de/product/development-kit/
+ *
+ * DT-overlay for the camera / DSI demo appliance for Haikou boards.
+ * In the flavour for use with a Puma system-on-module.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/rk3399-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+&{/} {
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ power-supply = <&dc_12v>;
+ pwms = <&pwm0 0 25000 0>;
+ };
+
+ cam_afvdd_2v8: regulator-cam-afvdd-2v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 2 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "cam-afvdd-2v8";
+ vin-supply = <&vcc2v8_video>;
+ };
+
+ cam_avdd_2v8: regulator-cam-avdd-2v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 4 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "cam-avdd-2v8";
+ vin-supply = <&vcc2v8_video>;
+ };
+
+ cam_dovdd_1v8: regulator-cam-dovdd-1v8 {
+ compatible = "regulator-fixed";
+ gpio = <&pca9670 3 GPIO_ACTIVE_LOW>;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "cam-dovdd-1v8";
+ vin-supply = <&vcc1v8_video>;
+ };
+
+ cam_dvdd_1v2: regulator-cam-dvdd-1v2 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&pca9670 5 GPIO_ACTIVE_HIGH>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-name = "cam-dvdd-1v2";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ vcc1v8_video: regulator-vcc1v8-video {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <1800000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-name = "vcc1v8-video";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ vcc2v8_video: regulator-vcc2v8-video {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-max-microvolt = <2800000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-name = "vcc2v8-video";
+ vin-supply = <&vcc3v3_baseboard>;
+ };
+
+ video-adapter-leds {
+ compatible = "gpio-leds";
+
+ video-adapter-led {
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&pca9670 7 GPIO_ACTIVE_HIGH>;
+ label = "video-adapter-led";
+ linux,default-trigger = "none";
+ };
+ };
+};
+
+&i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ /* OV5675, GT911, DW9714 are limited to 400KHz */
+ clock-frequency = <400000>;
+
+ touchscreen@14 {
+ compatible = "goodix,gt911";
+ reg = <0x14>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PC7 IRQ_TYPE_LEVEL_LOW>;
+ irq-gpios = <&gpio1 RK_PC7 GPIO_ACTIVE_HIGH>;
+ pinctrl-0 = <&touch_int>;
+ pinctrl-names = "default";
+ reset-gpios = <&pca9670 1 GPIO_ACTIVE_HIGH>;
+ AVDD28-supply = <&vcc2v8_video>;
+ VDDIO-supply = <&vcc3v3_baseboard>;
+ };
+
+ pca9670: gpio@27 {
+ compatible = "nxp,pca9670";
+ reg = <0x27>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-0 = <&pca9670_resetn>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio4 RK_PD6 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&mipi_out {
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&mipi_in_panel>;
+ };
+};
+
+&mipi_dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ panel@0 {
+ compatible = "leadtek,ltk050h3148w";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc1v8_video>;
+ reset-gpios = <&pca9670 0 GPIO_ACTIVE_LOW>;
+ vci-supply = <&vcc2v8_video>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+};
+
+&pinctrl {
+ pca9670 {
+ pca9670_resetn: pca9670-resetn {
+ rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ touch {
+ touch_int: touch-int {
+ rockchip,pins = <1 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 947bbd62a6b0..f2234dabd664 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -149,8 +149,15 @@
};
};
+&gmac {
+ status = "okay";
+};
+
&hdmi {
- ddc-i2c-bus = <&i2c3>;
+ status = "okay";
+};
+
+&hdmi_sound {
status = "okay";
};
@@ -186,9 +193,22 @@
};
};
-&i2c6 {
+&i2c7 {
+ eeprom@50 {
+ reg = <0x50>;
+ compatible = "atmel,24c01";
+ pagesize = <8>;
+ size = <128>;
+ vcc-supply = <&vcc3v3_baseboard>;
+ };
+};
+
+&i2s0 {
+ status = "okay";
+};
+
+&i2s2 {
status = "okay";
- clock-frequency = <400000>;
};
&pcie_phy {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 995b30a7aae0..e00fbaa8acc1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -183,7 +183,6 @@
snps,reset-delays-us = <0 10000 50000>;
tx_delay = <0x10>;
rx_delay = <0x23>;
- status = "okay";
};
&gpu {
@@ -389,6 +388,14 @@
};
};
+&hdmi {
+ ddc-i2c-bus = <&i2c3>;
+};
+
+&i2c6 {
+ clock-frequency = <400000>;
+};
+
&i2c7 {
status = "okay";
clock-frequency = <400000>;
@@ -439,7 +446,6 @@
pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
- status = "okay";
};
/*
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
index e2e9279fa267..8e3858cf988c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-plus.dts
@@ -112,7 +112,7 @@
&i2c1 {
es8388: es8388@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
clocks = <&cru SCLK_I2S_8CH_OUT>;
#sound-dai-cells = <0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index 69a9d6170649..51c6aa26d828 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -227,6 +227,16 @@
vin-supply = <&vcc12v_dcin>;
};
+ vcca_0v9: regulator-vcca-0v9 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
vdd_log: regulator-vdd-log {
compatible = "pwm-regulator";
pwms = <&pwm2 0 25000 1>;
@@ -312,6 +322,8 @@
};
&hdmi {
+ avdd-0v9-supply = <&vcca_0v9>;
+ avdd-1v8-supply = <&vcc1v8_dvp>;
ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec>;
@@ -661,6 +673,8 @@
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_perst>;
+ vpcie0v9-supply = <&vcca_0v9>;
+ vpcie1v8-supply = <&vcca_1v8>;
vpcie12v-supply = <&vcc12v_dcin>;
vpcie3v3-supply = <&vcc3v3_pcie>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3528-pinctrl.dtsi b/arch/arm64/boot/dts/rockchip/rk3528-pinctrl.dtsi
new file mode 100644
index 000000000000..ea051362fb26
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3528-pinctrl.dtsi
@@ -0,0 +1,1397 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+
+#include <dt-bindings/pinctrl/rockchip.h>
+#include "rockchip-pinconf.dtsi"
+
+/*
+ * This file is auto generated by pin2dts tool, please keep these code
+ * by adding changes at end of this file.
+ */
+&pinctrl {
+ arm {
+ /omit-if-no-ref/
+ arm_pins: arm-pins {
+ rockchip,pins =
+ /* arm_avs */
+ <4 RK_PC4 3 &pcfg_pull_none>;
+ };
+ };
+
+ clk {
+ /omit-if-no-ref/
+ clkm0_32k_out: clkm0-32k-out {
+ rockchip,pins =
+ /* clkm0_32k_out */
+ <3 RK_PC3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ clkm1_32k_out: clkm1-32k-out {
+ rockchip,pins =
+ /* clkm1_32k_out */
+ <1 RK_PC3 1 &pcfg_pull_none>;
+ };
+ };
+
+ emmc {
+ /omit-if-no-ref/
+ emmc_rstnout: emmc-rstnout {
+ rockchip,pins =
+ /* emmc_rstn */
+ <1 RK_PD6 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins =
+ /* emmc_d0 */
+ <1 RK_PC4 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d1 */
+ <1 RK_PC5 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d2 */
+ <1 RK_PC6 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d3 */
+ <1 RK_PC7 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d4 */
+ <1 RK_PD0 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d5 */
+ <1 RK_PD1 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d6 */
+ <1 RK_PD2 1 &pcfg_pull_up_drv_level_2>,
+ /* emmc_d7 */
+ <1 RK_PD3 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_clk: emmc-clk {
+ rockchip,pins =
+ /* emmc_clk */
+ <1 RK_PD5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_cmd: emmc-cmd {
+ rockchip,pins =
+ /* emmc_cmd */
+ <1 RK_PD4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ emmc_strb: emmc-strb {
+ rockchip,pins =
+ /* emmc_strb */
+ <1 RK_PD7 1 &pcfg_pull_none>;
+ };
+ };
+
+ eth {
+ /omit-if-no-ref/
+ eth_pins: eth-pins {
+ rockchip,pins =
+ /* eth_clk_25m_out */
+ <3 RK_PB5 2 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+
+ fephy {
+ /omit-if-no-ref/
+ fephym0_led_dpx: fephym0-led_dpx {
+ rockchip,pins =
+ /* fephy_led_dpx_m0 */
+ <4 RK_PB5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fephym0_led_link: fephym0-led_link {
+ rockchip,pins =
+ /* fephy_led_link_m0 */
+ <4 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fephym0_led_spd: fephym0-led_spd {
+ rockchip,pins =
+ /* fephy_led_spd_m0 */
+ <4 RK_PB7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fephym1_led_dpx: fephym1-led_dpx {
+ rockchip,pins =
+ /* fephy_led_dpx_m1 */
+ <2 RK_PA4 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fephym1_led_link: fephym1-led_link {
+ rockchip,pins =
+ /* fephy_led_link_m1 */
+ <2 RK_PA6 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fephym1_led_spd: fephym1-led_spd {
+ rockchip,pins =
+ /* fephy_led_spd_m1 */
+ <2 RK_PA5 5 &pcfg_pull_none>;
+ };
+ };
+
+ fspi {
+ /omit-if-no-ref/
+ fspi_pins: fspi-pins {
+ rockchip,pins =
+ /* fspi_clk */
+ <1 RK_PD5 2 &pcfg_pull_none>,
+ /* fspi_d0 */
+ <1 RK_PC4 2 &pcfg_pull_none>,
+ /* fspi_d1 */
+ <1 RK_PC5 2 &pcfg_pull_none>,
+ /* fspi_d2 */
+ <1 RK_PC6 2 &pcfg_pull_none>,
+ /* fspi_d3 */
+ <1 RK_PC7 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ fspi_csn0: fspi-csn0 {
+ rockchip,pins =
+ /* fspi_csn0 */
+ <1 RK_PD0 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ fspi_csn1: fspi-csn1 {
+ rockchip,pins =
+ /* fspi_csn1 */
+ <1 RK_PD1 2 &pcfg_pull_none>;
+ };
+ };
+
+ gpu {
+ /omit-if-no-ref/
+ gpu_pins: gpu-pins {
+ rockchip,pins =
+ /* gpu_avs */
+ <4 RK_PC3 3 &pcfg_pull_none>;
+ };
+ };
+
+ hdmi {
+ /omit-if-no-ref/
+ hdmi_pins: hdmi-pins {
+ rockchip,pins =
+ /* hdmi_tx_cec */
+ <0 RK_PA3 1 &pcfg_pull_none>,
+ /* hdmi_tx_hpd */
+ <0 RK_PA2 1 &pcfg_pull_none>,
+ /* hdmi_tx_scl */
+ <0 RK_PA4 1 &pcfg_pull_none>,
+ /* hdmi_tx_sda */
+ <0 RK_PA5 1 &pcfg_pull_none>;
+ };
+ };
+
+ hsm {
+ /omit-if-no-ref/
+ hsmm0_pins: hsmm0-pins {
+ rockchip,pins =
+ /* hsm_clk_out_m0 */
+ <2 RK_PA2 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ hsmm1_pins: hsmm1-pins {
+ rockchip,pins =
+ /* hsm_clk_out_m1 */
+ <1 RK_PA4 3 &pcfg_pull_none>;
+ };
+ };
+
+ i2c0 {
+ /omit-if-no-ref/
+ i2c0m0_xfer: i2c0m0-xfer {
+ rockchip,pins =
+ /* i2c0_scl_m0 */
+ <4 RK_PC4 2 &pcfg_pull_none_smt>,
+ /* i2c0_sda_m0 */
+ <4 RK_PC3 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c0m1_xfer: i2c0m1-xfer {
+ rockchip,pins =
+ /* i2c0_scl_m1 */
+ <4 RK_PA1 2 &pcfg_pull_none_smt>,
+ /* i2c0_sda_m1 */
+ <4 RK_PA0 2 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c1 {
+ /omit-if-no-ref/
+ i2c1m0_xfer: i2c1m0-xfer {
+ rockchip,pins =
+ /* i2c1_scl_m0 */
+ <4 RK_PA3 2 &pcfg_pull_none_smt>,
+ /* i2c1_sda_m0 */
+ <4 RK_PA2 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c1m1_xfer: i2c1m1-xfer {
+ rockchip,pins =
+ /* i2c1_scl_m1 */
+ <4 RK_PC5 4 &pcfg_pull_none_smt>,
+ /* i2c1_sda_m1 */
+ <4 RK_PC6 4 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c2 {
+ /omit-if-no-ref/
+ i2c2m0_xfer: i2c2m0-xfer {
+ rockchip,pins =
+ /* i2c2_scl_m0 */
+ <0 RK_PA4 2 &pcfg_pull_none_smt>,
+ /* i2c2_sda_m0 */
+ <0 RK_PA5 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c2m1_xfer: i2c2m1-xfer {
+ rockchip,pins =
+ /* i2c2_scl_m1 */
+ <1 RK_PA5 3 &pcfg_pull_none_smt>,
+ /* i2c2_sda_m1 */
+ <1 RK_PA6 3 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c3 {
+ /omit-if-no-ref/
+ i2c3m0_xfer: i2c3m0-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m0 */
+ <1 RK_PA0 2 &pcfg_pull_none_smt>,
+ /* i2c3_sda_m0 */
+ <1 RK_PA1 2 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c3m1_xfer: i2c3m1-xfer {
+ rockchip,pins =
+ /* i2c3_scl_m1 */
+ <3 RK_PC1 5 &pcfg_pull_none_smt>,
+ /* i2c3_sda_m1 */
+ <3 RK_PC3 5 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c4 {
+ /omit-if-no-ref/
+ i2c4_xfer: i2c4-xfer {
+ rockchip,pins =
+ /* i2c4_scl */
+ <2 RK_PA0 4 &pcfg_pull_none_smt>,
+ /* i2c4_sda */
+ <2 RK_PA1 4 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c5 {
+ /omit-if-no-ref/
+ i2c5m0_xfer: i2c5m0-xfer {
+ rockchip,pins =
+ /* i2c5_scl_m0 */
+ <1 RK_PB2 3 &pcfg_pull_none_smt>,
+ /* i2c5_sda_m0 */
+ <1 RK_PB3 3 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c5m1_xfer: i2c5m1-xfer {
+ rockchip,pins =
+ /* i2c5_scl_m1 */
+ <1 RK_PD2 3 &pcfg_pull_none_smt>,
+ /* i2c5_sda_m1 */
+ <1 RK_PD3 3 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c6 {
+ /omit-if-no-ref/
+ i2c6m0_xfer: i2c6m0-xfer {
+ rockchip,pins =
+ /* i2c6_scl_m0 */
+ <3 RK_PB2 5 &pcfg_pull_none_smt>,
+ /* i2c6_sda_m0 */
+ <3 RK_PB3 5 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2c6m1_xfer: i2c6m1-xfer {
+ rockchip,pins =
+ /* i2c6_scl_m1 */
+ <1 RK_PD4 3 &pcfg_pull_none_smt>,
+ /* i2c6_sda_m1 */
+ <1 RK_PD7 3 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2c7 {
+ /omit-if-no-ref/
+ i2c7_xfer: i2c7-xfer {
+ rockchip,pins =
+ /* i2c7_scl */
+ <2 RK_PA5 4 &pcfg_pull_none_smt>,
+ /* i2c7_sda */
+ <2 RK_PA6 4 &pcfg_pull_none_smt>;
+ };
+ };
+
+ i2s0 {
+ /omit-if-no-ref/
+ i2s0m0_lrck: i2s0m0-lrck {
+ rockchip,pins =
+ /* i2s0_lrck_m0 */
+ <3 RK_PB6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_mclk: i2s0m0-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m0 */
+ <3 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sclk: i2s0m0-sclk {
+ rockchip,pins =
+ /* i2s0_sclk_m0 */
+ <3 RK_PB5 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m0_sdi: i2s0m0-sdi {
+ rockchip,pins =
+ /* i2s0m0_sdi */
+ <3 RK_PB7 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ i2s0m0_sdo: i2s0m0-sdo {
+ rockchip,pins =
+ /* i2s0m0_sdo */
+ <3 RK_PC0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_lrck: i2s0m1-lrck {
+ rockchip,pins =
+ /* i2s0_lrck_m1 */
+ <1 RK_PB6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_mclk: i2s0m1-mclk {
+ rockchip,pins =
+ /* i2s0_mclk_m1 */
+ <1 RK_PB4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sclk: i2s0m1-sclk {
+ rockchip,pins =
+ /* i2s0_sclk_m1 */
+ <1 RK_PB5 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s0m1_sdi: i2s0m1-sdi {
+ rockchip,pins =
+ /* i2s0m1_sdi */
+ <1 RK_PB7 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ i2s0m1_sdo: i2s0m1-sdo {
+ rockchip,pins =
+ /* i2s0m1_sdo */
+ <1 RK_PC0 1 &pcfg_pull_none>;
+ };
+ };
+
+ i2s1 {
+ /omit-if-no-ref/
+ i2s1_lrck: i2s1-lrck {
+ rockchip,pins =
+ /* i2s1_lrck */
+ <4 RK_PA6 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_mclk: i2s1-mclk {
+ rockchip,pins =
+ /* i2s1_mclk */
+ <4 RK_PA4 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sclk: i2s1-sclk {
+ rockchip,pins =
+ /* i2s1_sclk */
+ <4 RK_PA5 1 &pcfg_pull_none_smt>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdi0: i2s1-sdi0 {
+ rockchip,pins =
+ /* i2s1_sdi0 */
+ <4 RK_PB4 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdi1: i2s1-sdi1 {
+ rockchip,pins =
+ /* i2s1_sdi1 */
+ <4 RK_PB3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdi2: i2s1-sdi2 {
+ rockchip,pins =
+ /* i2s1_sdi2 */
+ <4 RK_PA3 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdi3: i2s1-sdi3 {
+ rockchip,pins =
+ /* i2s1_sdi3 */
+ <4 RK_PA2 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdo0: i2s1-sdo0 {
+ rockchip,pins =
+ /* i2s1_sdo0 */
+ <4 RK_PA7 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdo1: i2s1-sdo1 {
+ rockchip,pins =
+ /* i2s1_sdo1 */
+ <4 RK_PB0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdo2: i2s1-sdo2 {
+ rockchip,pins =
+ /* i2s1_sdo2 */
+ <4 RK_PB1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ i2s1_sdo3: i2s1-sdo3 {
+ rockchip,pins =
+ /* i2s1_sdo3 */
+ <4 RK_PB2 1 &pcfg_pull_none>;
+ };
+ };
+
+ jtag {
+ /omit-if-no-ref/
+ jtagm0_pins: jtagm0-pins {
+ rockchip,pins =
+ /* jtag_cpu_tck_m0 */
+ <2 RK_PA2 2 &pcfg_pull_none>,
+ /* jtag_cpu_tms_m0 */
+ <2 RK_PA3 2 &pcfg_pull_none>,
+ /* jtag_mcu_tck_m0 */
+ <2 RK_PA4 2 &pcfg_pull_none>,
+ /* jtag_mcu_tms_m0 */
+ <2 RK_PA5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ jtagm1_pins: jtagm1-pins {
+ rockchip,pins =
+ /* jtag_cpu_tck_m1 */
+ <4 RK_PD0 2 &pcfg_pull_none>,
+ /* jtag_cpu_tms_m1 */
+ <4 RK_PC7 2 &pcfg_pull_none>,
+ /* jtag_mcu_tck_m1 */
+ <4 RK_PD0 3 &pcfg_pull_none>,
+ /* jtag_mcu_tms_m1 */
+ <4 RK_PC7 3 &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ /omit-if-no-ref/
+ pciem0_pins: pciem0-pins {
+ rockchip,pins =
+ /* pcie_clkreqn_m0 */
+ <3 RK_PA6 5 &pcfg_pull_none>,
+ /* pcie_perstn_m0 */
+ <3 RK_PB0 5 &pcfg_pull_none>,
+ /* pcie_waken_m0 */
+ <3 RK_PA7 5 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pciem1_pins: pciem1-pins {
+ rockchip,pins =
+ /* pcie_clkreqn_m1 */
+ <1 RK_PA0 4 &pcfg_pull_none>,
+ /* pcie_perstn_m1 */
+ <1 RK_PA2 4 &pcfg_pull_none>,
+ /* pcie_waken_m1 */
+ <1 RK_PA1 4 &pcfg_pull_none>;
+ };
+ };
+
+ pdm {
+ /omit-if-no-ref/
+ pdm_clk0: pdm-clk0 {
+ rockchip,pins =
+ /* pdm_clk0 */
+ <4 RK_PB5 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdm_clk1: pdm-clk1 {
+ rockchip,pins =
+ /* pdm_clk1 */
+ <4 RK_PA4 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdm_sdi0: pdm-sdi0 {
+ rockchip,pins =
+ /* pdm_sdi0 */
+ <4 RK_PB2 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdm_sdi1: pdm-sdi1 {
+ rockchip,pins =
+ /* pdm_sdi1 */
+ <4 RK_PB1 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdm_sdi2: pdm-sdi2 {
+ rockchip,pins =
+ /* pdm_sdi2 */
+ <4 RK_PB3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ pdm_sdi3: pdm-sdi3 {
+ rockchip,pins =
+ /* pdm_sdi3 */
+ <4 RK_PC1 3 &pcfg_pull_none>;
+ };
+ };
+
+ pmu {
+ /omit-if-no-ref/
+ pmu_pins: pmu-pins {
+ rockchip,pins =
+ /* pmu_debug */
+ <4 RK_PA0 4 &pcfg_pull_none>;
+ };
+ };
+
+ pwm0 {
+ /omit-if-no-ref/
+ pwm0m0_pins: pwm0m0-pins {
+ rockchip,pins =
+ /* pwm0_m0 */
+ <4 RK_PC3 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm0m1_pins: pwm0m1-pins {
+ rockchip,pins =
+ /* pwm0_m1 */
+ <1 RK_PA2 5 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm1 {
+ /omit-if-no-ref/
+ pwm1m0_pins: pwm1m0-pins {
+ rockchip,pins =
+ /* pwm1_m0 */
+ <4 RK_PC4 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm1m1_pins: pwm1m1-pins {
+ rockchip,pins =
+ /* pwm1_m1 */
+ <1 RK_PA3 4 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm2 {
+ /omit-if-no-ref/
+ pwm2m0_pins: pwm2m0-pins {
+ rockchip,pins =
+ /* pwm2_m0 */
+ <4 RK_PC5 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm2m1_pins: pwm2m1-pins {
+ rockchip,pins =
+ /* pwm2_m1 */
+ <1 RK_PA7 2 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm3 {
+ /omit-if-no-ref/
+ pwm3m0_pins: pwm3m0-pins {
+ rockchip,pins =
+ /* pwm3_m0 */
+ <4 RK_PC6 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm3m1_pins: pwm3m1-pins {
+ rockchip,pins =
+ /* pwm3_m1 */
+ <2 RK_PA4 3 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm4 {
+ /omit-if-no-ref/
+ pwm4m0_pins: pwm4m0-pins {
+ rockchip,pins =
+ /* pwm4_m0 */
+ <4 RK_PB7 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm4m1_pins: pwm4m1-pins {
+ rockchip,pins =
+ /* pwm4_m1 */
+ <1 RK_PA4 2 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm5 {
+ /omit-if-no-ref/
+ pwm5m0_pins: pwm5m0-pins {
+ rockchip,pins =
+ /* pwm5_m0 */
+ <4 RK_PC0 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm5m1_pins: pwm5m1-pins {
+ rockchip,pins =
+ /* pwm5_m1 */
+ <3 RK_PC3 1 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm6 {
+ /omit-if-no-ref/
+ pwm6m0_pins: pwm6m0-pins {
+ rockchip,pins =
+ /* pwm6_m0 */
+ <4 RK_PC1 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm6m1_pins: pwm6m1-pins {
+ rockchip,pins =
+ /* pwm6_m1 */
+ <1 RK_PC3 3 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm6m2_pins: pwm6m2-pins {
+ rockchip,pins =
+ /* pwm6_m2 */
+ <3 RK_PC1 1 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwm7 {
+ /omit-if-no-ref/
+ pwm7m0_pins: pwm7m0-pins {
+ rockchip,pins =
+ /* pwm7_m0 */
+ <4 RK_PC2 1 &pcfg_pull_none_drv_level_0>;
+ };
+
+ /omit-if-no-ref/
+ pwm7m1_pins: pwm7m1-pins {
+ rockchip,pins =
+ /* pwm7_m1 */
+ <1 RK_PC2 2 &pcfg_pull_none_drv_level_0>;
+ };
+ };
+
+ pwr {
+ /omit-if-no-ref/
+ pwr_pins: pwr-pins {
+ rockchip,pins =
+ /* pwr_ctrl0 */
+ <4 RK_PC2 2 &pcfg_pull_none>,
+ /* pwr_ctrl1 */
+ <4 RK_PB6 1 &pcfg_pull_none>;
+ };
+ };
+
+ ref {
+ /omit-if-no-ref/
+ refm0_pins: refm0-pins {
+ rockchip,pins =
+ /* ref_clk_out_m0 */
+ <0 RK_PA1 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ refm1_pins: refm1-pins {
+ rockchip,pins =
+ /* ref_clk_out_m1 */
+ <3 RK_PC3 6 &pcfg_pull_none>;
+ };
+ };
+
+ rgmii {
+ /omit-if-no-ref/
+ rgmii_miim: rgmii-miim {
+ rockchip,pins =
+ /* rgmii_mdc */
+ <3 RK_PB6 2 &pcfg_pull_none_drv_level_2>,
+ /* rgmii_mdio */
+ <3 RK_PB7 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ rgmii_rx_bus2: rgmii-rx_bus2 {
+ rockchip,pins =
+ /* rgmii_rxd0 */
+ <3 RK_PA3 2 &pcfg_pull_none>,
+ /* rgmii_rxd1 */
+ <3 RK_PA2 2 &pcfg_pull_none>,
+ /* rgmii_rxdv_crs */
+ <3 RK_PC2 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmii_tx_bus2: rgmii-tx_bus2 {
+ rockchip,pins =
+ /* rgmii_txd0 */
+ <3 RK_PA1 2 &pcfg_pull_none_drv_level_2>,
+ /* rgmii_txd1 */
+ <3 RK_PA0 2 &pcfg_pull_none_drv_level_2>,
+ /* rgmii_txen */
+ <3 RK_PC0 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ rgmii_rgmii_clk: rgmii-rgmii_clk {
+ rockchip,pins =
+ /* rgmii_rxclk */
+ <3 RK_PA5 2 &pcfg_pull_none>,
+ /* rgmii_txclk */
+ <3 RK_PA4 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ rgmii_rgmii_bus: rgmii-rgmii_bus {
+ rockchip,pins =
+ /* rgmii_rxd2 */
+ <3 RK_PA7 2 &pcfg_pull_none>,
+ /* rgmii_rxd3 */
+ <3 RK_PA6 2 &pcfg_pull_none>,
+ /* rgmii_txd2 */
+ <3 RK_PB1 2 &pcfg_pull_none_drv_level_2>,
+ /* rgmii_txd3 */
+ <3 RK_PB0 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ rgmii_clk: rgmii-clk {
+ rockchip,pins =
+ /* rgmii_clk */
+ <3 RK_PB4 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ rgmii_txer: rgmii-txer {
+ rockchip,pins =
+ /* rgmii_txer */
+ <3 RK_PC1 2 &pcfg_pull_none>;
+ };
+ };
+
+ scr {
+ /omit-if-no-ref/
+ scrm0_pins: scrm0-pins {
+ rockchip,pins =
+ /* scr_clk_m0 */
+ <1 RK_PA2 3 &pcfg_pull_none>,
+ /* scr_data_m0 */
+ <1 RK_PA1 3 &pcfg_pull_none>,
+ /* scr_detn_m0 */
+ <1 RK_PA0 3 &pcfg_pull_none>,
+ /* scr_rstn_m0 */
+ <1 RK_PA3 3 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ scrm1_pins: scrm1-pins {
+ rockchip,pins =
+ /* scr_clk_m1 */
+ <2 RK_PA5 3 &pcfg_pull_none>,
+ /* scr_data_m1 */
+ <2 RK_PA3 4 &pcfg_pull_none>,
+ /* scr_detn_m1 */
+ <2 RK_PA6 3 &pcfg_pull_none>,
+ /* scr_rstn_m1 */
+ <2 RK_PA4 4 &pcfg_pull_none>;
+ };
+ };
+
+ sdio0 {
+ /omit-if-no-ref/
+ sdio0_bus4: sdio0-bus4 {
+ rockchip,pins =
+ /* sdio0_d0 */
+ <1 RK_PA0 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio0_d1 */
+ <1 RK_PA1 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio0_d2 */
+ <1 RK_PA2 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio0_d3 */
+ <1 RK_PA3 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio0_clk: sdio0-clk {
+ rockchip,pins =
+ /* sdio0_clk */
+ <1 RK_PA5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio0_cmd: sdio0-cmd {
+ rockchip,pins =
+ /* sdio0_cmd */
+ <1 RK_PA4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio0_det: sdio0-det {
+ rockchip,pins =
+ /* sdio0_det */
+ <1 RK_PA6 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdio0_pwren: sdio0-pwren {
+ rockchip,pins =
+ /* sdio0_pwren */
+ <1 RK_PA7 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdio1 {
+ /omit-if-no-ref/
+ sdio1_bus4: sdio1-bus4 {
+ rockchip,pins =
+ /* sdio1_d0 */
+ <3 RK_PA6 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio1_d1 */
+ <3 RK_PA7 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio1_d2 */
+ <3 RK_PB0 1 &pcfg_pull_up_drv_level_2>,
+ /* sdio1_d3 */
+ <3 RK_PB1 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio1_clk: sdio1-clk {
+ rockchip,pins =
+ /* sdio1_clk */
+ <3 RK_PA4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio1_cmd: sdio1-cmd {
+ rockchip,pins =
+ /* sdio1_cmd */
+ <3 RK_PA5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdio1_det: sdio1-det {
+ rockchip,pins =
+ /* sdio1_det */
+ <3 RK_PB3 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdio1_pwren: sdio1-pwren {
+ rockchip,pins =
+ /* sdio1_pwren */
+ <3 RK_PB2 1 &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc {
+ /omit-if-no-ref/
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins =
+ /* sdmmc_d0 */
+ <2 RK_PA0 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc_d1 */
+ <2 RK_PA1 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc_d2 */
+ <2 RK_PA2 1 &pcfg_pull_up_drv_level_2>,
+ /* sdmmc_d3 */
+ <2 RK_PA3 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins =
+ /* sdmmc_clk */
+ <2 RK_PA5 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins =
+ /* sdmmc_cmd */
+ <2 RK_PA4 1 &pcfg_pull_up_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc_det: sdmmc-det {
+ rockchip,pins =
+ /* sdmmc_detn */
+ <2 RK_PA6 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ sdmmc_pwren: sdmmc-pwren {
+ rockchip,pins =
+ /* sdmmc_pwren */
+ <4 RK_PA1 1 &pcfg_pull_none>;
+ };
+ };
+
+ spdif {
+ /omit-if-no-ref/
+ spdifm0_pins: spdifm0-pins {
+ rockchip,pins =
+ /* spdif_tx_m0 */
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm1_pins: spdifm1-pins {
+ rockchip,pins =
+ /* spdif_tx_m1 */
+ <1 RK_PC3 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ spdifm2_pins: spdifm2-pins {
+ rockchip,pins =
+ /* spdif_tx_m2 */
+ <3 RK_PC3 2 &pcfg_pull_none>;
+ };
+ };
+
+ spi0 {
+ /omit-if-no-ref/
+ spi0_pins: spi0-pins {
+ rockchip,pins =
+ /* spi0_clk */
+ <4 RK_PB4 2 &pcfg_pull_none_drv_level_2>,
+ /* spi0_miso */
+ <4 RK_PB3 2 &pcfg_pull_none_drv_level_2>,
+ /* spi0_mosi */
+ <4 RK_PB2 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ spi0_csn0: spi0-csn0 {
+ rockchip,pins =
+ /* spi0_csn0 */
+ <4 RK_PB6 2 &pcfg_pull_none_drv_level_2>;
+ };
+ /omit-if-no-ref/
+ spi0_csn1: spi0-csn1 {
+ rockchip,pins =
+ /* spi0_csn1 */
+ <4 RK_PC1 2 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+
+ spi1 {
+ /omit-if-no-ref/
+ spi1_pins: spi1-pins {
+ rockchip,pins =
+ /* spi1_clk */
+ <1 RK_PB6 2 &pcfg_pull_none_drv_level_2>,
+ /* spi1_miso */
+ <1 RK_PC0 2 &pcfg_pull_none_drv_level_2>,
+ /* spi1_mosi */
+ <1 RK_PB7 2 &pcfg_pull_none_drv_level_2>;
+ };
+
+ /omit-if-no-ref/
+ spi1_csn0: spi1-csn0 {
+ rockchip,pins =
+ /* spi1_csn0 */
+ <1 RK_PC1 1 &pcfg_pull_none_drv_level_2>;
+ };
+ /omit-if-no-ref/
+ spi1_csn1: spi1-csn1 {
+ rockchip,pins =
+ /* spi1_csn1 */
+ <1 RK_PC2 1 &pcfg_pull_none_drv_level_2>;
+ };
+ };
+
+ tsi0 {
+ /omit-if-no-ref/
+ tsi0_pins: tsi0-pins {
+ rockchip,pins =
+ /* tsi0_clkin */
+ <3 RK_PB2 3 &pcfg_pull_none>,
+ /* tsi0_d0 */
+ <3 RK_PB1 3 &pcfg_pull_none>,
+ /* tsi0_d1 */
+ <3 RK_PB5 3 &pcfg_pull_none>,
+ /* tsi0_d2 */
+ <3 RK_PB6 3 &pcfg_pull_none>,
+ /* tsi0_d3 */
+ <3 RK_PB7 3 &pcfg_pull_none>,
+ /* tsi0_d4 */
+ <3 RK_PA3 3 &pcfg_pull_none>,
+ /* tsi0_d5 */
+ <3 RK_PA2 3 &pcfg_pull_none>,
+ /* tsi0_d6 */
+ <3 RK_PA1 3 &pcfg_pull_none>,
+ /* tsi0_d7 */
+ <3 RK_PA0 3 &pcfg_pull_none>,
+ /* tsi0_fail */
+ <3 RK_PC0 3 &pcfg_pull_none>,
+ /* tsi0_sync */
+ <3 RK_PB4 3 &pcfg_pull_none>,
+ /* tsi0_valid */
+ <3 RK_PB3 3 &pcfg_pull_none>;
+ };
+ };
+
+ tsi1 {
+ /omit-if-no-ref/
+ tsi1_pins: tsi1-pins {
+ rockchip,pins =
+ /* tsi1_clkin */
+ <3 RK_PA5 3 &pcfg_pull_none>,
+ /* tsi1_d0 */
+ <3 RK_PA4 3 &pcfg_pull_none>,
+ /* tsi1_sync */
+ <3 RK_PA7 3 &pcfg_pull_none>,
+ /* tsi1_valid */
+ <3 RK_PA6 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart0 {
+ /omit-if-no-ref/
+ uart0m0_xfer: uart0m0-xfer {
+ rockchip,pins =
+ /* uart0_rx_m0 */
+ <4 RK_PC7 1 &pcfg_pull_up>,
+ /* uart0_tx_m0 */
+ <4 RK_PD0 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart0m1_xfer: uart0m1-xfer {
+ rockchip,pins =
+ /* uart0_rx_m1 */
+ <2 RK_PA0 2 &pcfg_pull_up>,
+ /* uart0_tx_m1 */
+ <2 RK_PA1 2 &pcfg_pull_up>;
+ };
+ };
+
+ uart1 {
+ /omit-if-no-ref/
+ uart1m0_xfer: uart1m0-xfer {
+ rockchip,pins =
+ /* uart1_rx_m0 */
+ <4 RK_PA7 2 &pcfg_pull_up>,
+ /* uart1_tx_m0 */
+ <4 RK_PA6 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1m1_xfer: uart1m1-xfer {
+ rockchip,pins =
+ /* uart1_rx_m1 */
+ <4 RK_PC6 2 &pcfg_pull_up>,
+ /* uart1_tx_m1 */
+ <4 RK_PC5 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart1_ctsn: uart1-ctsn {
+ rockchip,pins =
+ /* uart1_ctsn */
+ <4 RK_PA4 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart1_rtsn: uart1-rtsn {
+ rockchip,pins =
+ /* uart1_rtsn */
+ <4 RK_PA5 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart2 {
+ /omit-if-no-ref/
+ uart2m0_xfer: uart2m0-xfer {
+ rockchip,pins =
+ /* uart2_rx_m0 */
+ <3 RK_PA0 1 &pcfg_pull_up>,
+ /* uart2_tx_m0 */
+ <3 RK_PA1 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart2m0_ctsn: uart2m0-ctsn {
+ rockchip,pins =
+ /* uart2m0_ctsn */
+ <3 RK_PA3 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart2m0_rtsn: uart2m0-rtsn {
+ rockchip,pins =
+ /* uart2m0_rtsn */
+ <3 RK_PA2 1 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart2m1_xfer: uart2m1-xfer {
+ rockchip,pins =
+ /* uart2_rx_m1 */
+ <1 RK_PB0 1 &pcfg_pull_up>,
+ /* uart2_tx_m1 */
+ <1 RK_PB1 1 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart2m1_ctsn: uart2m1-ctsn {
+ rockchip,pins =
+ /* uart2m1_ctsn */
+ <1 RK_PB3 1 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart2m1_rtsn: uart2m1-rtsn {
+ rockchip,pins =
+ /* uart2m1_rtsn */
+ <1 RK_PB2 1 &pcfg_pull_none>;
+ };
+ };
+
+ uart3 {
+ /omit-if-no-ref/
+ uart3m0_xfer: uart3m0-xfer {
+ rockchip,pins =
+ /* uart3_rx_m0 */
+ <4 RK_PB0 2 &pcfg_pull_up>,
+ /* uart3_tx_m0 */
+ <4 RK_PB1 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart3m1_xfer: uart3m1-xfer {
+ rockchip,pins =
+ /* uart3_rx_m1 */
+ <4 RK_PB7 3 &pcfg_pull_up>,
+ /* uart3_tx_m1 */
+ <4 RK_PC0 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart3_ctsn: uart3-ctsn {
+ rockchip,pins =
+ /* uart3_ctsn */
+ <4 RK_PA3 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart3_rtsn: uart3-rtsn {
+ rockchip,pins =
+ /* uart3_rtsn */
+ <4 RK_PA2 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart4 {
+ /omit-if-no-ref/
+ uart4_xfer: uart4-xfer {
+ rockchip,pins =
+ /* uart4_rx */
+ <2 RK_PA2 3 &pcfg_pull_up>,
+ /* uart4_tx */
+ <2 RK_PA3 3 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart4_ctsn: uart4-ctsn {
+ rockchip,pins =
+ /* uart4_ctsn */
+ <2 RK_PA1 3 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart4_rtsn: uart4-rtsn {
+ rockchip,pins =
+ /* uart4_rtsn */
+ <2 RK_PA0 3 &pcfg_pull_none>;
+ };
+ };
+
+ uart5 {
+ /omit-if-no-ref/
+ uart5m0_xfer: uart5m0-xfer {
+ rockchip,pins =
+ /* uart5_rx_m0 */
+ <1 RK_PA2 2 &pcfg_pull_up>,
+ /* uart5_tx_m0 */
+ <1 RK_PA3 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart5m0_ctsn: uart5m0-ctsn {
+ rockchip,pins =
+ /* uart5m0_ctsn */
+ <1 RK_PA6 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart5m0_rtsn: uart5m0-rtsn {
+ rockchip,pins =
+ /* uart5m0_rtsn */
+ <1 RK_PA5 2 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart5m1_xfer: uart5m1-xfer {
+ rockchip,pins =
+ /* uart5_rx_m1 */
+ <1 RK_PD4 2 &pcfg_pull_up>,
+ /* uart5_tx_m1 */
+ <1 RK_PD7 2 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart5m1_ctsn: uart5m1-ctsn {
+ rockchip,pins =
+ /* uart5m1_ctsn */
+ <1 RK_PD3 2 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart5m1_rtsn: uart5m1-rtsn {
+ rockchip,pins =
+ /* uart5m1_rtsn */
+ <1 RK_PD2 2 &pcfg_pull_none>;
+ };
+ };
+
+ uart6 {
+ /omit-if-no-ref/
+ uart6m0_xfer: uart6m0-xfer {
+ rockchip,pins =
+ /* uart6_rx_m0 */
+ <3 RK_PA7 4 &pcfg_pull_up>,
+ /* uart6_tx_m0 */
+ <3 RK_PA6 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart6m1_xfer: uart6m1-xfer {
+ rockchip,pins =
+ /* uart6_rx_m1 */
+ <3 RK_PC3 4 &pcfg_pull_up>,
+ /* uart6_tx_m1 */
+ <3 RK_PC1 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart6_ctsn: uart6-ctsn {
+ rockchip,pins =
+ /* uart6_ctsn */
+ <3 RK_PA4 4 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart6_rtsn: uart6-rtsn {
+ rockchip,pins =
+ /* uart6_rtsn */
+ <3 RK_PA5 4 &pcfg_pull_none>;
+ };
+ };
+
+ uart7 {
+ /omit-if-no-ref/
+ uart7m0_xfer: uart7m0-xfer {
+ rockchip,pins =
+ /* uart7_rx_m0 */
+ <3 RK_PB3 4 &pcfg_pull_up>,
+ /* uart7_tx_m0 */
+ <3 RK_PB2 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart7m0_ctsn: uart7m0-ctsn {
+ rockchip,pins =
+ /* uart7m0_ctsn */
+ <3 RK_PB0 4 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart7m0_rtsn: uart7m0-rtsn {
+ rockchip,pins =
+ /* uart7m0_rtsn */
+ <3 RK_PB1 4 &pcfg_pull_none>;
+ };
+
+ /omit-if-no-ref/
+ uart7m1_xfer: uart7m1-xfer {
+ rockchip,pins =
+ /* uart7_rx_m1 */
+ <1 RK_PB3 4 &pcfg_pull_up>,
+ /* uart7_tx_m1 */
+ <1 RK_PB2 4 &pcfg_pull_up>;
+ };
+
+ /omit-if-no-ref/
+ uart7m1_ctsn: uart7m1-ctsn {
+ rockchip,pins =
+ /* uart7m1_ctsn */
+ <1 RK_PB0 4 &pcfg_pull_none>;
+ };
+ /omit-if-no-ref/
+ uart7m1_rtsn: uart7m1-rtsn {
+ rockchip,pins =
+ /* uart7m1_rtsn */
+ <1 RK_PB1 4 &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts b/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
index d2cdb63d4a9d..57a446b5cbd6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3528-radxa-e20c.dts
@@ -6,17 +6,150 @@
*/
/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
#include "rk3528.dtsi"
/ {
model = "Radxa E20C";
compatible = "radxa,e20c", "rockchip,rk3528";
+ aliases {
+ mmc0 = &sdhci;
+ };
+
chosen {
stdout-path = "serial0:1500000n8";
};
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "MASKROM";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <0>;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_key>;
+
+ button-user {
+ gpios = <&gpio0 RK_PA0 GPIO_ACTIVE_LOW>;
+ label = "USER";
+ linux,code = <BTN_1>;
+ wakeup-source;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&lan_led_g>, <&sys_led_g>, <&wan_led_g>;
+
+ led-lan {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ function = LED_FUNCTION_LAN;
+ gpios = <&gpio4 RK_PB5 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "netdev";
+ };
+
+ led-sys {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "on";
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio4 RK_PC1 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ led-wan {
+ color = <LED_COLOR_ID_GREEN>;
+ default-state = "off";
+ function = LED_FUNCTION_WAN;
+ gpios = <&gpio4 RK_PC0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "netdev";
+ };
+ };
+
+ vcc_1v8: regulator-1v8-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ };
+
+ vcc_3v3: regulator-3v3-vcc {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc5v0_sys: regulator-5v0-vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+};
+
+&pinctrl {
+ gpio-keys {
+ user_key: user-key {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ lan_led_g: lan-led-g {
+ rockchip,pins = <4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ sys_led_g: sys-led-g {
+ rockchip,pins = <4 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wan_led_g: wan-led-g {
+ rockchip,pins = <4 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&saradc {
+ vref-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ no-sd;
+ no-sdio;
+ non-removable;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
};
&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0m0_xfer>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3528.dtsi b/arch/arm64/boot/dts/rockchip/rk3528.dtsi
index e58faa985aa4..26c3559d6a6d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3528.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3528.dtsi
@@ -4,8 +4,12 @@
* Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
*/
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/clock/rockchip,rk3528-cru.h>
+#include <dt-bindings/reset/rockchip,rk3528-cru.h>
/ {
compatible = "rockchip,rk3528";
@@ -15,6 +19,11 @@
#size-cells = <2>;
aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -51,6 +60,7 @@
reg = <0x0>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&scmi_clk SCMI_CLK_CPU>;
};
cpu1: cpu@1 {
@@ -58,6 +68,7 @@
reg = <0x1>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&scmi_clk SCMI_CLK_CPU>;
};
cpu2: cpu@2 {
@@ -65,6 +76,7 @@
reg = <0x2>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&scmi_clk SCMI_CLK_CPU>;
};
cpu3: cpu@3 {
@@ -72,6 +84,22 @@
reg = <0x3>;
device_type = "cpu";
enable-method = "psci";
+ clocks = <&scmi_clk SCMI_CLK_CPU>;
+ };
+ };
+
+ firmware {
+ scmi: scmi {
+ compatible = "arm,scmi-smc";
+ arm,smc-id = <0x82000010>;
+ shmem = <&scmi_shmem>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
};
};
@@ -80,6 +108,18 @@
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scmi_shmem: shmem@10f000 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ no-map;
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
@@ -95,6 +135,13 @@
#clock-cells = <0>;
};
+ gmac0_clk: clock-gmac50m {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ clock-output-names = "gmac0";
+ #clock-cells = <0>;
+ };
+
soc {
compatible = "simple-bus";
ranges = <0x0 0xfe000000 0x0 0xfe000000 0x0 0x2000000>;
@@ -114,10 +161,219 @@
#interrupt-cells = <3>;
};
+ qos_crypto_a: qos@ff200000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200000 0x0 0x20>;
+ };
+
+ qos_crypto_p: qos@ff200080 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200080 0x0 0x20>;
+ };
+
+ qos_dcf: qos@ff200100 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200100 0x0 0x20>;
+ };
+
+ qos_dft2apb: qos@ff200200 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200200 0x0 0x20>;
+ };
+
+ qos_dma2ddr: qos@ff200280 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200280 0x0 0x20>;
+ };
+
+ qos_dmac: qos@ff200300 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200300 0x0 0x20>;
+ };
+
+ qos_keyreader: qos@ff200380 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff200380 0x0 0x20>;
+ };
+
+ qos_cpu: qos@ff210000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff210000 0x0 0x20>;
+ };
+
+ qos_debug: qos@ff210080 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff210080 0x0 0x20>;
+ };
+
+ qos_gpu_m0: qos@ff220000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff220000 0x0 0x20>;
+ };
+
+ qos_gpu_m1: qos@ff220080 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff220080 0x0 0x20>;
+ };
+
+ qos_pmu_mcu: qos@ff240000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff240000 0x0 0x20>;
+ };
+
+ qos_rkvdec: qos@ff250000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff250000 0x0 0x20>;
+ };
+
+ qos_rkvenc: qos@ff260000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff260000 0x0 0x20>;
+ };
+
+ qos_gmac0: qos@ff270000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270000 0x0 0x20>;
+ };
+
+ qos_hdcp: qos@ff270080 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270080 0x0 0x20>;
+ };
+
+ qos_jpegdec: qos@ff270100 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270100 0x0 0x20>;
+ };
+
+ qos_rga2_m0ro: qos@ff270200 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270200 0x0 0x20>;
+ };
+
+ qos_rga2_m0wo: qos@ff270280 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270280 0x0 0x20>;
+ };
+
+ qos_sdmmc0: qos@ff270300 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270300 0x0 0x20>;
+ };
+
+ qos_usb2host: qos@ff270380 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270380 0x0 0x20>;
+ };
+
+ qos_vdpp: qos@ff270480 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270480 0x0 0x20>;
+ };
+
+ qos_vop: qos@ff270500 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff270500 0x0 0x20>;
+ };
+
+ qos_emmc: qos@ff280000 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280000 0x0 0x20>;
+ };
+
+ qos_fspi: qos@ff280080 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280080 0x0 0x20>;
+ };
+
+ qos_gmac1: qos@ff280100 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280100 0x0 0x20>;
+ };
+
+ qos_pcie: qos@ff280180 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280180 0x0 0x20>;
+ };
+
+ qos_sdio0: qos@ff280200 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280200 0x0 0x20>;
+ };
+
+ qos_sdio1: qos@ff280280 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280280 0x0 0x20>;
+ };
+
+ qos_tsp: qos@ff280300 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280300 0x0 0x20>;
+ };
+
+ qos_usb3otg: qos@ff280380 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280380 0x0 0x20>;
+ };
+
+ qos_vpu: qos@ff280400 {
+ compatible = "rockchip,rk3528-qos", "syscon";
+ reg = <0x0 0xff280400 0x0 0x20>;
+ };
+
+ cru: clock-controller@ff4a0000 {
+ compatible = "rockchip,rk3528-cru";
+ reg = <0x0 0xff4a0000 0x0 0x30000>;
+ assigned-clocks =
+ <&cru XIN_OSC0_DIV>, <&cru PLL_GPLL>,
+ <&cru PLL_PPLL>, <&cru PLL_CPLL>,
+ <&cru ARMCLK>, <&cru CLK_MATRIX_250M_SRC>,
+ <&cru CLK_MATRIX_500M_SRC>,
+ <&cru CLK_MATRIX_50M_SRC>,
+ <&cru CLK_MATRIX_100M_SRC>,
+ <&cru CLK_MATRIX_150M_SRC>,
+ <&cru CLK_MATRIX_200M_SRC>,
+ <&cru CLK_MATRIX_300M_SRC>,
+ <&cru CLK_MATRIX_339M_SRC>,
+ <&cru CLK_MATRIX_400M_SRC>,
+ <&cru CLK_MATRIX_600M_SRC>,
+ <&cru CLK_PPLL_50M_MATRIX>,
+ <&cru CLK_PPLL_100M_MATRIX>,
+ <&cru CLK_PPLL_125M_MATRIX>,
+ <&cru ACLK_BUS_VOPGL_ROOT>;
+ assigned-clock-rates =
+ <32768>, <1188000000>,
+ <1000000000>, <996000000>,
+ <408000000>, <250000000>,
+ <500000000>,
+ <50000000>,
+ <100000000>,
+ <150000000>,
+ <200000000>,
+ <300000000>,
+ <340000000>,
+ <400000000>,
+ <600000000>,
+ <50000000>,
+ <100000000>,
+ <125000000>,
+ <500000000>;
+ clocks = <&xin24m>, <&gmac0_clk>;
+ clock-names = "xin24m", "gmac0";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ ioc_grf: syscon@ff540000 {
+ compatible = "rockchip,rk3528-ioc-grf", "syscon";
+ reg = <0x0 0xff540000 0x0 0x40000>;
+ };
+
uart0: serial@ff9f0000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xff9f0000 0x0 0x100>;
- clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -127,6 +383,8 @@
uart1: serial@ff9f8000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xff9f8000 0x0 0x100>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -136,6 +394,8 @@
uart2: serial@ffa00000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xffa00000 0x0 0x100>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -144,6 +404,8 @@
uart3: serial@ffa08000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
reg = <0x0 0xffa08000 0x0 0x100>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -153,6 +415,8 @@
uart4: serial@ffa10000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xffa10000 0x0 0x100>;
+ clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -162,6 +426,8 @@
uart5: serial@ffa18000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xffa18000 0x0 0x100>;
+ clocks = <&cru SCLK_UART5>, <&cru PCLK_UART5>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -171,6 +437,8 @@
uart6: serial@ffa20000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xffa20000 0x0 0x100>;
+ clocks = <&cru SCLK_UART6>, <&cru PCLK_UART6>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
@@ -180,10 +448,118 @@
uart7: serial@ffa28000 {
compatible = "rockchip,rk3528-uart", "snps,dw-apb-uart";
reg = <0x0 0xffa28000 0x0 0x100>;
+ clocks = <&cru SCLK_UART7>, <&cru PCLK_UART7>;
+ clock-names = "baudclk", "apb_pclk";
interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <4>;
reg-shift = <2>;
status = "disabled";
};
+
+ saradc: adc@ffae0000 {
+ compatible = "rockchip,rk3528-saradc";
+ reg = <0x0 0xffae0000 0x0 0x10000>;
+ clocks = <&cru CLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&cru SRST_P_SARADC>;
+ reset-names = "saradc-apb";
+ #io-channel-cells = <1>;
+ status = "disabled";
+ };
+
+ sdhci: mmc@ffbf0000 {
+ compatible = "rockchip,rk3528-dwcmshc",
+ "rockchip,rk3588-dwcmshc";
+ reg = <0x0 0xffbf0000 0x0 0x10000>;
+ assigned-clocks = <&cru BCLK_EMMC>, <&cru TCLK_EMMC>,
+ <&cru CCLK_SRC_EMMC>;
+ assigned-clock-rates = <200000000>, <24000000>,
+ <200000000>;
+ clocks = <&cru CCLK_SRC_EMMC>, <&cru HCLK_EMMC>,
+ <&cru ACLK_EMMC>, <&cru BCLK_EMMC>,
+ <&cru TCLK_EMMC>;
+ clock-names = "core", "bus", "axi", "block", "timer";
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ max-frequency = <200000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8>, <&emmc_clk>, <&emmc_cmd>,
+ <&emmc_strb>;
+ resets = <&cru SRST_C_EMMC>, <&cru SRST_H_EMMC>,
+ <&cru SRST_A_EMMC>, <&cru SRST_B_EMMC>,
+ <&cru SRST_T_EMMC>;
+ reset-names = "core", "bus", "axi", "block", "timer";
+ status = "disabled";
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3528-pinctrl";
+ rockchip,grf = <&ioc_grf>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio0: gpio@ff610000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff610000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO0>, <&cru DBCLK_GPIO0>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@ffaf0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffaf0000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO1>, <&cru DBCLK_GPIO1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@ffb00000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb00000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO2>, <&cru DBCLK_GPIO2>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@ffb10000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb10000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO3>, <&cru DBCLK_GPIO3>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@ffb20000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xffb20000 0x0 0x200>;
+ clocks = <&cru PCLK_GPIO4>, <&cru DBCLK_GPIO4>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
};
};
+
+#include "rk3528-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
index 61dd71c259aa..ddf84c2a19cf 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
@@ -512,7 +512,6 @@
&sdmmc0 {
max-frequency = <150000000>;
- supports-sd;
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
index 2d3ae1544822..3613661417b2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
@@ -9,6 +9,8 @@
#include "rk3566.dtsi"
/ {
+ chassis-type = "tablet";
+
aliases {
mmc0 = &sdhci;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index 98e75df8b158..3c127c5c2607 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -265,8 +265,12 @@
};
&gmac1 {
- assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;
- assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>,
+ <&cru SCLK_GMAC1_RGMII_SPEED>,
+ <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>,
+ <&cru SCLK_GMAC1>,
+ <&gmac1_clkin>;
clock_in_out = "input";
phy-supply = <&vcc_3v3>;
phy-mode = "rgmii";
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
index 24928a129446..5707321a1144 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
@@ -173,8 +173,12 @@
};
&gmac1 {
- assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;
- assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>,
+ <&cru SCLK_GMAC1_RGMII_SPEED>,
+ <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>,
+ <&cru SCLK_GMAC1>,
+ <&gmac1_clkin>;
clock_in_out = "input";
phy-mode = "rgmii";
phy-supply = <&vcc_3v3>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-photonicat.dts b/arch/arm64/boot/dts/rockchip/rk3568-photonicat.dts
new file mode 100644
index 000000000000..58c1052ba8ef
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-photonicat.dts
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3568.dtsi"
+
+/ {
+ model = "Ariaboard Photonicat";
+ compatible = "ariaboard,photonicat", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc0;
+ mmc2 = &sdmmc1;
+ };
+
+ battery: battery {
+ compatible = "simple-battery";
+ device-chemistry = "lithium-ion";
+ charge-full-design-microamp-hours = <6800000>;
+ energy-full-design-microwatt-hours = <25000000>;
+ voltage-max-design-microvolt = <4200000>;
+ voltage-min-design-microvolt = <3400000>;
+
+ ocv-capacity-celsius = <25>;
+ ocv-capacity-table-0 = <4100000 100>, <4040000 90>,
+ <3980000 80>, <3920000 70>,
+ <3870000 60>, <3820000 50>,
+ <3790000 40>, <3770000 30>,
+ <3740000 20>, <3680000 10>,
+ <3450000 0>;
+ };
+
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ hdmi_con: hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ vcc_1v8: regulator-vcc-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc_3v3: regulator-vcc-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ /* actually fed by vcc_syson, dependent
+ * on pi6c clock generator
+ */
+ vcc3v3_pcie: regulator-vcc3v3-pcie {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_pi6c>;
+ };
+
+ /* pi6c pcie clock generator */
+ vcc3v3_pi6c: regulator-vcc3v3-pi6c {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA6 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren_h>;
+ regulator-name = "vcc3v3_pi6c";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_syson>;
+ };
+
+ vcc3v3_sd: regulator-vcc3v3-sd {
+ compatible = "regulator-fixed";
+ gpio = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_pwren>;
+ regulator-boot-on;
+ regulator-name = "vcc3v3_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3>;
+ };
+
+ vcc3v3_sys: regulator-vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_syson>;
+ };
+
+ vcc3v4_rf: regulator-vcc3v4-rf {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PC2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rf_pwr_en>;
+ regulator-name = "vcc3v4_rf";
+ regulator-min-microvolt = <3400000>;
+ regulator-max-microvolt = <3400000>;
+ vin-supply = <&vccin_5v>;
+ };
+
+ vcc5v0_usb30_otg0: regulator-vcc5v0-usb30-otg0 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren_h>;
+ regulator-name = "vcc5v0_usb30_otg0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vccin_5v>;
+ };
+
+ vccin_5v: regulator-vccin-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vccin_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc_sysin: regulator-vcc-sysin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sysin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vccin_5v>;
+ };
+
+ vcc_syson: regulator-vcc-syson {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_syson";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_sysin>;
+ };
+
+ vcca_1v8: regulator-vcca-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vdda_0v9: regulator-vdda-0v9 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vdd_gpu: regulator-vdd-gpu {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 5000 1>;
+ pwm-supply = <&vcc_syson>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-settling-time-up-us = <250>;
+ };
+
+ vdd_logic: regulator-vdd-logic {
+ compatible = "pwm-regulator";
+ pwms = <&pwm1 0 5000 1>;
+ pwm-supply = <&vcc_syson>;
+ regulator-name = "vdd_logic";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+ regulator-settling-time-up-us = <250>;
+ };
+
+ rfkill-modem {
+ compatible = "rfkill-gpio";
+ label = "M.2 USB Modem";
+ radio-type = "wwan";
+ shutdown-gpios = <&gpio4 RK_PC4 GPIO_ACTIVE_HIGH>;
+ };
+
+ wifi_pwrseq: wifi-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&pmucru CLK_RTC_32K>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h &clk32k_out1>;
+ post-power-on-delay-ms = <200>;
+ reset-gpios = <&gpio2 RK_PB1 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&combphy0 {
+ status = "okay";
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&combphy2 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+/* Motorcomm YT8521SC LAN port (require SGMII) */
+&gmac0 {
+ status = "disabled";
+};
+
+/* Motorcomm YT8521SC WAN port */
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>;
+ assigned-clock-rates = <0>, <125000000>;
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy>;
+ phy-mode = "rgmii-id";
+ phy-supply = <&vcc_3v3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ avdd-0v9-supply = <&vdda_0v9>;
+ avdd-1v8-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc_syson>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2m1_xfer>;
+ status = "okay";
+};
+
+&i2s0_8ch {
+ status = "okay";
+};
+
+&mdio1 {
+ rgmii_phy: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x3>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpios = <&gpio4 RK_PC0 GPIO_ACTIVE_LOW>;
+ rx-internal-delay-ps = <1500>;
+ tx-internal-delay-ps = <1500>;
+ };
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+/* M.2 E-Key for PCIe WLAN */
+&pcie3x2 {
+ max-link-speed = <1>;
+ num-lanes = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie30x1m0_pins>;
+ reset-gpios = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ bt {
+ bt_reg_on_h: bt-reg-on-h {
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_pwren_h: pcie-pwren-h {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdmmc0 {
+ sdmmc0_pwren: sdmmc0-pwren {
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ rf_pwr_en: rf-pwr-en {
+ rockchip,pins = <4 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_host_pwren_h: usb-host-pwren-h {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc_3v3>;
+ pmuio2-supply = <&vcc_3v3>;
+ vccio1-supply = <&vcc_3v3>;
+ vccio2-supply = <&vcc_1v8>;
+ vccio3-supply = <&vcc_3v3>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_3v3>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+/* eMMC */
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ mmc-hs200-1_8v;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd>;
+ vmmc-supply = <&vcc_3v3>;
+ vqmmc-supply = <&vcc_1v8>;
+ status = "okay";
+};
+
+/* Micro SD card slot */
+&sdmmc0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ no-1-8-v;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd>;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+/* Qualcomm Atheros QCA9377 WiFi */
+&sdmmc1 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc3v3_sys>;
+ vqmmc-supply = <&vcc_1v8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ wifi: wifi@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wake";
+ };
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+/* Qualcomm Atheros QCA9377 Bluetooth */
+&uart1 {
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,qca9377-bt";
+ clocks = <&pmucru CLK_RTC_32K>;
+ enable-gpios = <&gpio2 RK_PB7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_reg_on_h>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
+/* Debug UART */
+&uart2 {
+ status = "okay";
+};
+
+&uart3 {
+ dma-names = "tx", "rx";
+ status = "okay";
+};
+
+/* Onboard power management MCU */
+&uart4 {
+ dma-names = "tx", "rx";
+ status = "okay";
+};
+
+/* M.2 E-Key for USB Bluetooth */
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+/* USB Type-A Port */
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+/* M.2 B-Key for USB Modem WWAN */
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc3v4_rf>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb30_otg0>;
+ status = "okay";
+};
+
+&usb2phy1 {
+ status = "okay";
+};
+
+&usb2phy1_otg {
+ phy-supply = <&vcc5v0_usb30_otg0>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
+
+&xin32k {
+ pinctrl-names = "default";
+ pinctrl-0 = <&clk32k_out1>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
index ac79140a9ecd..44cfdfeed668 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
@@ -778,20 +778,6 @@
pinctrl-0 = <&uart1m0_xfer &uart1m0_ctsn &uart1m0_rtsn>;
uart-has-rtscts;
status = "okay";
-
- bluetooth {
- compatible = "brcm,bcm43438-bt";
- clocks = <&rk809 1>;
- clock-names = "lpo";
- device-wakeup-gpios = <&gpio4 RK_PB5 GPIO_ACTIVE_HIGH>;
- host-wakeup-gpios = <&gpio4 RK_PB4 GPIO_ACTIVE_HIGH>;
- shutdown-gpios = <&gpio4 RK_PB2 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&bt_host_wake &bt_wake &bt_enable>;
- vbat-supply = <&vcc3v3_sys>;
- vddio-supply = <&vcc_1v8>;
- /* vddio comes from regulator on module, use IO bank voltage instead */
- };
};
&uart2 {
diff --git a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
index e55390629114..fd2214b6fad4 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x-base.dtsi
@@ -174,6 +174,18 @@
method = "smc";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scmi_shmem: shmem@10f000 {
+ compatible = "arm,scmi-shmem";
+ reg = <0x0 0x0010f000 0x0 0x100>;
+ no-map;
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>,
@@ -199,19 +211,6 @@
#clock-cells = <0>;
};
- sram@10f000 {
- compatible = "mmio-sram";
- reg = <0x0 0x0010f000 0x0 0x100>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x0 0x0010f000 0x100>;
-
- scmi_shmem: sram@0 {
- compatible = "arm,scmi-shmem";
- reg = <0x0 0x100>;
- };
- };
-
sata1: sata@fc400000 {
compatible = "rockchip,rk3568-dwc-ahci", "snps,dwc-ahci";
reg = <0 0xfc400000 0 0x1000>;
@@ -284,6 +283,18 @@
mbi-alias = <0x0 0xfd410000>;
mbi-ranges = <296 24>;
msi-controller;
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-noncoherent;
+
+ its: msi-controller@fd440000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0xfd440000 0 0x20000>;
+ dma-noncoherent;
+ msi-controller;
+ #msi-cells = <1>;
+ };
};
usb_host0_ehci: usb@fd800000 {
@@ -957,7 +968,7 @@
num-ib-windows = <6>;
num-ob-windows = <2>;
max-link-speed = <2>;
- msi-map = <0x0 &gic 0x0 0x1000>;
+ msi-map = <0x0 &its 0x0 0x1000>;
num-lanes = <1>;
phys = <&combphy2 PHY_TYPE_PCIE>;
phy-names = "pcie-phy";
@@ -1032,6 +1043,11 @@
status = "disabled";
};
+ /*
+ * Testing showed that the HWRNG found in RK3566 produces unacceptably
+ * low quality of random data, so the HWRNG isn't enabled for all RK356x
+ * SoC variants despite its presence.
+ */
rng: rng@fe388000 {
compatible = "rockchip,rk3568-rng";
reg = <0x0 0xfe388000 0x0 0x4000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
index 7c7331936a7f..828bde7fab68 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-armsom-sige5.dts
@@ -10,6 +10,7 @@
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include <dt-bindings/usb/pd.h>
#include "rk3576.dtsi"
@@ -26,6 +27,17 @@
stdout-path = "serial0:1500000n8";
};
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
leds: leds {
compatible = "gpio-leds";
@@ -182,8 +194,7 @@
&eth0m0_tx_bus2
&eth0m0_rx_bus2
&eth0m0_rgmii_clk
- &eth0m0_rgmii_bus
- &ethm0_clk0_25m_out>;
+ &eth0m0_rgmii_bus>;
phy-handle = <&rgmii_phy0>;
status = "okay";
@@ -214,6 +225,26 @@
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdptxphy {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
@@ -656,3 +687,18 @@
pinctrl-0 = <&uart0m0_xfer>;
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
index 782ca000a644..e368691fd28e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3576-evb1-v10.dts
@@ -10,6 +10,7 @@
#include <dt-bindings/input/input.h>
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rk3576.dtsi"
/ {
@@ -57,6 +58,17 @@
};
};
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
leds: leds {
compatible = "gpio-leds";
@@ -270,6 +282,26 @@
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdptxphy {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
@@ -729,3 +761,18 @@
dr_mode = "host";
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts b/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts
new file mode 100644
index 000000000000..612b7bb0b749
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3576-roc-pc.dts
@@ -0,0 +1,736 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Firefly Technology Co. Ltd
+ * Copyright (c) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3576.dtsi"
+
+/ {
+ model = "Firefly ROC-RK3576-PC";
+ compatible = "firefly,roc-rk3576-pc", "rockchip,rk3576";
+
+ aliases {
+ mmc0 = &sdhci;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ adc-keys-0 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-maskrom {
+ label = "Maskrom";
+ linux,code = <KEY_SETUP>;
+ press-threshold-microvolt = <17000>;
+ };
+ };
+
+ adc-keys-1 {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <17000>;
+ };
+ };
+
+ vbus5v0_typec: regulator-vbus5v0-typec {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PD1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_otg0_pwren_h>;
+ regulator-name = "vbus5v0_typec";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_device_s0>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc1v2_ufs_vccq_s0: regulator-vcc1v2-ufs-vccq-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v2_ufs_vccq_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc5v0_sys_s5>;
+ };
+
+ vcc1v8_ufs_vccq2_s0: regulator-vcc1v8-ufs-vccq2-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8_ufs_vccq2_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc3v3_pcie: regulator-vcc3v3-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 RK_PB3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren_h>;
+ regulator-name = "vcc3v3_pcie";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc3v3_rtc_s5: regulator-vcc3v3-rtc-s5 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_rtc_s5";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys_s5>;
+ };
+
+ vcc5v0_device_s0: regulator-vcc5v0-device-s0 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio2 RK_PC0 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5vd_en>;
+ regulator-name = "vcc5v0_device";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_sys_s5: regulator-vcc5v0-sys-s5 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb20_host1: regulator-vcc5v0-usb20-host1 {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PC7 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_host_pwren_h>;
+ regulator-name = "vcc5v0_host1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc5v0_device_s0>;
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys_s5>;
+ };
+
+ vcc_1v8_s0: regulator-vcc-1v8-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc_2v0_pldo_s3: regulator-vcc-2v0-pldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_2v0_pldo_s3";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ vin-supply = <&vcc5v0_sys_s5>;
+ };
+
+ vcc_3v3_s0: regulator-vcc-3v3-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_ufs_s0: regulator-vcc-ufs-s0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_ufs_s0";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys_s5>;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0m0_miim
+ &eth0m0_tx_bus2
+ &eth0m0_rx_bus2
+ &eth0m0_rgmii_clk
+ &eth0m0_rgmii_bus
+ &ethm0_clk0_25m_out>;
+ /* Use rgmii-rxid mode to disable rx delay inside Soc */
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&rgmii_phy0>;
+ tx_delay = <0x21>;
+ status = "okay";
+};
+
+&mdio0 {
+ status = "okay";
+
+ rgmii_phy0: phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ clocks = <&cru REFCLKO25M_GMAC0_OUT>;
+ /* Reset time is 20ms, 100ms for rtl8211f */
+ reset-delay-us = <20000>;
+ reset-gpios = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
+ reset-post-delay-us = <100000>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ pmic@23 {
+ compatible = "rockchip,rk806";
+ reg = <0x23>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys_s5>;
+ vcc2-supply = <&vcc5v0_sys_s5>;
+ vcc3-supply = <&vcc5v0_sys_s5>;
+ vcc4-supply = <&vcc5v0_sys_s5>;
+ vcc5-supply = <&vcc5v0_sys_s5>;
+ vcc6-supply = <&vcc5v0_sys_s5>;
+ vcc7-supply = <&vcc5v0_sys_s5>;
+ vcc8-supply = <&vcc5v0_sys_s5>;
+ vcc9-supply = <&vcc5v0_sys_s5>;
+ vcc10-supply = <&vcc5v0_sys_s5>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys_s5>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys_s5>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs1_slp: dvs1-slp-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs1_pwrdn: dvs1-pwrdn-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs1_rst: dvs1-rst-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs2_slp: dvs2-slp-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs2_pwrdn: dvs2-pwrdn-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs2_rst: dvs2-rst-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs2_dvs: dvs2-dvs-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs2_gpio: dvs2-gpio-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun5";
+ };
+
+ rk806_dvs3_slp: dvs3-slp-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs3_pwrdn: dvs3-pwrdn-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs3_rst: dvs3-rst-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs3_dvs: dvs3-dvs-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs3_gpio: dvs3-gpio-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun5";
+ };
+
+ regulators {
+ vdd_cpu_big_s0: dcdc-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_big_s0";
+ regulator-enable-ramp-delay = <400>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_npu_s0: dcdc-reg2 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_npu_s0";
+ regulator-enable-ramp-delay = <400>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_gpu_s0: dcdc-reg5 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <900000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <800000>;
+ regulator-name = "vdd_logic_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo2_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdda_1v2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_3v3_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo6_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_ddr_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v75_hdmi_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <837500>;
+ regulator-max-microvolt = <837500>;
+ regulator-name = "vdda0v75_hdmi_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_0v85_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdda_0v75_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ /* pc9202 watchdog@3c with enable-gpio gpio0-c3 */
+
+ /* hnyetek,husb311 typec-portc@4e */
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtc_int_l>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
+ wakeup-source;
+ };
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8_s0>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ max-frequency = <200000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ full-pwr-cycle-in-suspend;
+ status = "okay";
+};
+
+&sdmmc {
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ sd-uhs-sdr104;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+&pinctrl {
+ hym8563 {
+ rtc_int_l: rtc-int-l {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ power {
+ vcc5vd_en: vcc5vd-en {
+ rockchip,pins = <2 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ pcie_pwren_h: pcie-pwren-h {
+ rockchip,pins = <2 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ hub_reset_h: hub-reset-h {
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb3_host_pwren_h: usb3-host-pwren-h {
+ rockchip,pins = <0 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usb_otg0_pwren_h: usb-otg0-pwren-h {
+ rockchip,pins = <0 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ usbc0_int_l: usbc0-int-l {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ watchdog {
+ wd_en: wd-en {
+ rockchip,pins = <0 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-0 = <&uart0m0_xfer>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4m1_xfer &uart4m1_ctsn>;
+ status = "okay";
+};
+
+/* On the extension pin header */
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart6m3_xfer>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576-rock-4d.dts b/arch/arm64/boot/dts/rockchip/rk3576-rock-4d.dts
new file mode 100644
index 000000000000..6756403111e7
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3576-rock-4d.dts
@@ -0,0 +1,751 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2024 Radxa Computer (Shenzhen) Co., Ltd.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include <dt-bindings/usb/pd.h>
+#include "rk3576.dtsi"
+
+/ {
+ model = "Radxa ROCK 4D";
+ compatible = "radxa,rock-4d", "rockchip,rk3576";
+
+ aliases {
+ ethernet0 = &gmac0;
+ mmc0 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial0:1500000n8";
+ };
+
+ hdmi-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds: leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_rgb_g &led_rgb_r>;
+
+ power-led {
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ user-led {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_HEARTBEAT;
+ gpios = <&gpio0 RK_PC4 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ vcc_12v0_dcin: regulator-vcc-12v0-dcin {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-name = "vcc_12v0_dcin";
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-name = "vcc_1v1_nldo_s3";
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_1v2_ufs_vccq_s0: regulator-vcc-1v2-ufs-vccq-s0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc_1v2_ufs_vccq_s0";
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_1v8_s0: regulator-vcc-1v8-s0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc_1v8_ufs_vccq2_s0: regulator-vcc-1v8-ufs-vccq2-s0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_ufs_vccq2_s0";
+ vin-supply = <&vcc_1v8_s3>;
+ };
+
+ vcc_2v0_pldo_s3: regulator-vcc-2v0-pldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-name = "vcc_2v0_pldo_s3";
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_3v3_pcie: regulator-vcc-3v3-pcie {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pwren>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_pcie";
+ startup-delay-us = <5000>;
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_3v3_rtc_s5: regulator-vcc-3v3-rtc-s5 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_rtc_s5";
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_3v3_s0: regulator-vcc-3v3-s0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s0";
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ vcc_3v3_ufs_s0: regulator-vcc-ufs-s0 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_ufs_s0";
+ vin-supply = <&vcc_5v0_sys>;
+ };
+
+ vcc_5v0_device: regulator-vcc-5v0-device {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc_5v0_device";
+ vin-supply = <&vcc_12v0_dcin>;
+ };
+
+ vcc_5v0_host: regulator-vcc-5v0-host {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpios = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_host_pwren>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_host";
+ vin-supply = <&vcc_5v0_device>;
+ };
+
+ vcc_5v0_sys: regulator-vcc-5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc_5v0_sys";
+ vin-supply = <&vcc_12v0_dcin>;
+ };
+};
+
+&combphy1_psu {
+ status = "okay";
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&eth0m0_miim
+ &eth0m0_tx_bus2
+ &eth0m0_rx_bus2
+ &eth0m0_rgmii_clk
+ &eth0m0_rgmii_bus
+ &ethm0_clk0_25m_out>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in {
+ hdmi_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi>;
+ };
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&hdptxphy {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ pmic@23 {
+ compatible = "rockchip,rk806";
+ reg = <0x23>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ interrupt-parent = <&gpio0>;
+ interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins
+ &rk806_dvs1_null
+ &rk806_dvs2_null
+ &rk806_dvs3_null>;
+ system-power-controller;
+ vcc1-supply = <&vcc_5v0_sys>;
+ vcc2-supply = <&vcc_5v0_sys>;
+ vcc3-supply = <&vcc_5v0_sys>;
+ vcc4-supply = <&vcc_5v0_sys>;
+ vcc5-supply = <&vcc_5v0_sys>;
+ vcc6-supply = <&vcc_5v0_sys>;
+ vcc7-supply = <&vcc_5v0_sys>;
+ vcc8-supply = <&vcc_5v0_sys>;
+ vcc9-supply = <&vcc_5v0_sys>;
+ vcc10-supply = <&vcc_5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc_5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc_5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs1_pwrdn: dvs1-pwrdn-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs1_rst: dvs1-rst-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs1_slp: dvs1-slp-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs2_dvs: dvs2-dvs-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs2_gpio: dvs2-gpio-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun5";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_pwrdn: dvs2-pwrdn-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs2_rst: dvs2-rst-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs2_slp: dvs2-slp-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun1";
+ };
+
+ rk806_dvs3_dvs: dvs3-dvs-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun4";
+ };
+
+ rk806_dvs3_gpio: dvs3-gpio-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun5";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_pwrdn: dvs3-pwrdn-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun2";
+ };
+
+ rk806_dvs3_rst: dvs3-rst-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun3";
+ };
+
+ rk806_dvs3_slp: dvs3-slp-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun1";
+ };
+
+ regulators {
+ vdd_cpu_big_s0: dcdc-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_cpu_big_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_npu_s0: dcdc-reg2 {
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_npu_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_cpu_lit_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vdd_gpu_s0: dcdc-reg5 {
+ regulator-boot-on;
+ regulator-enable-ramp-delay = <400>;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <900000>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-ramp-delay = <12500>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_logic_s0: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <800000>;
+ regulator-name = "vdd_logic_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdd_ddr_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca_1v8_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo2_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vdda_1v2_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcca_3v3_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcca1v8_pldo6_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdda_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_ddr_pll_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v75_hdmi_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <837500>;
+ regulator-max-microvolt = <837500>;
+ regulator-name = "vdda0v75_hdmi_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdda_0v85_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdda_0v75_s0";
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ hym8563: rtc@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-output-names = "hym8563";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hym8563_int>;
+ wakeup-source;
+ };
+};
+
+&mdio0 {
+ rgmii_phy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ clocks = <&cru REFCLKO25M_GMAC0_OUT>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rtl8211f_rst>;
+ reset-assert-us = <20000>;
+ reset-deassert-us = <100000>;
+ reset-gpio = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&pinctrl {
+ hym8563 {
+ hym8563_int: hym8563-int {
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ led_rgb_g: led-green-en {
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ led_rgb_r: led-red-en {
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rtl8211f {
+ rtl8211f_rst: rtl8211f-rst {
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie {
+ pcie_pwren: pcie-pwren {
+ rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ usb_host_pwren: usb-host-pwren {
+ rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <200000000>;
+ no-sdio;
+ no-mmc;
+ sd-uhs-sdr104;
+ vmmc-supply = <&vcc_3v3_s3>;
+ vqmmc-supply = <&vccio_sd_s0>;
+ status = "okay";
+};
+
+
+&sfc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&fspi0_pins &fspi0_csn0>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
+ vcc-supply = <&vcc_1v8_s3>;
+ };
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-0 = <&uart0m0_xfer>;
+ status = "okay";
+};
+
+&usb_drd1_dwc3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3576.dtsi b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
index 4dde954043ef..ebb5fc8bb8b1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3576.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3576.dtsi
@@ -111,7 +111,7 @@
reg = <0x0>;
enable-method = "psci";
capacity-dmips-mhz = <485>;
- clocks = <&scmi_clk ARMCLK_L>;
+ clocks = <&scmi_clk SCMI_ARMCLK_L>;
operating-points-v2 = <&cluster0_opp_table>;
#cooling-cells = <2>;
dynamic-power-coefficient = <120>;
@@ -124,7 +124,7 @@
reg = <0x1>;
enable-method = "psci";
capacity-dmips-mhz = <485>;
- clocks = <&scmi_clk ARMCLK_L>;
+ clocks = <&scmi_clk SCMI_ARMCLK_L>;
operating-points-v2 = <&cluster0_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -135,7 +135,7 @@
reg = <0x2>;
enable-method = "psci";
capacity-dmips-mhz = <485>;
- clocks = <&scmi_clk ARMCLK_L>;
+ clocks = <&scmi_clk SCMI_ARMCLK_L>;
operating-points-v2 = <&cluster0_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -146,7 +146,7 @@
reg = <0x3>;
enable-method = "psci";
capacity-dmips-mhz = <485>;
- clocks = <&scmi_clk ARMCLK_L>;
+ clocks = <&scmi_clk SCMI_ARMCLK_L>;
operating-points-v2 = <&cluster0_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -157,7 +157,7 @@
reg = <0x100>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk ARMCLK_B>;
+ clocks = <&scmi_clk SCMI_ARMCLK_B>;
operating-points-v2 = <&cluster1_opp_table>;
#cooling-cells = <2>;
dynamic-power-coefficient = <320>;
@@ -170,7 +170,7 @@
reg = <0x101>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk ARMCLK_B>;
+ clocks = <&scmi_clk SCMI_ARMCLK_B>;
operating-points-v2 = <&cluster1_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -181,7 +181,7 @@
reg = <0x102>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk ARMCLK_B>;
+ clocks = <&scmi_clk SCMI_ARMCLK_B>;
operating-points-v2 = <&cluster1_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -192,7 +192,7 @@
reg = <0x103>;
enable-method = "psci";
capacity-dmips-mhz = <1024>;
- clocks = <&scmi_clk ARMCLK_B>;
+ clocks = <&scmi_clk SCMI_ARMCLK_B>;
operating-points-v2 = <&cluster1_opp_table>;
cpu-idle-states = <&CPU_SLEEP>;
};
@@ -393,6 +393,11 @@
};
};
+ display_subsystem: display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vop_out>;
+ };
+
firmware {
scmi: scmi {
compatible = "arm,scmi-smc";
@@ -620,6 +625,11 @@
};
};
+ hdptxphy_grf: syscon@26032000 {
+ compatible = "rockchip,rk3576-hdptxphy-grf", "syscon";
+ reg = <0x0 0x26032000 0x0 0x100>;
+ };
+
vo1_grf: syscon@26036000 {
compatible = "rockchip,rk3576-vo1-grf", "syscon";
reg = <0x0 0x26036000 0x0 0x100>;
@@ -922,7 +932,7 @@
gpu: gpu@27800000 {
compatible = "rockchip,rk3576-mali", "arm,mali-bifrost";
reg = <0x0 0x27800000 0x0 0x200000>;
- assigned-clocks = <&scmi_clk CLK_GPU>;
+ assigned-clocks = <&scmi_clk SCMI_CLK_GPU>;
assigned-clock-rates = <198000000>;
clocks = <&cru CLK_GPU>;
clock-names = "core";
@@ -937,6 +947,109 @@
status = "disabled";
};
+ vop: vop@27d00000 {
+ compatible = "rockchip,rk3576-vop";
+ reg = <0x0 0x27d00000 0x0 0x3000>, <0x0 0x27d05000 0x0 0x1000>;
+ reg-names = "vop", "gamma-lut";
+ interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 380 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "sys",
+ "vp0",
+ "vp1",
+ "vp2";
+ clocks = <&cru ACLK_VOP>,
+ <&cru HCLK_VOP>,
+ <&cru DCLK_VP0>,
+ <&cru DCLK_VP1>,
+ <&cru DCLK_VP2>;
+ clock-names = "aclk",
+ "hclk",
+ "dclk_vp0",
+ "dclk_vp1",
+ "dclk_vp2";
+ iommus = <&vop_mmu>;
+ power-domains = <&power RK3576_PD_VOP>;
+ rockchip,grf = <&sys_grf>;
+ rockchip,pmu = <&pmu>;
+ status = "disabled";
+
+ vop_out: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vp0: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ vp1: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ vp2: port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+ };
+ };
+
+ vop_mmu: iommu@27d07e00 {
+ compatible = "rockchip,rk3576-iommu", "rockchip,rk3568-iommu";
+ reg = <0x0 0x27d07e00 0x0 0x100>, <0x0 0x27d07f00 0x0 0x100>;
+ interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VOP>, <&cru HCLK_VOP>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
+ power-domains = <&power RK3576_PD_VOP>;
+ status = "disabled";
+ };
+
+ hdmi: hdmi@27da0000 {
+ compatible = "rockchip,rk3576-dw-hdmi-qp";
+ reg = <0x0 0x27da0000 0x0 0x20000>;
+ clocks = <&cru PCLK_HDMITX0>,
+ <&cru CLK_HDMITX0_EARC>,
+ <&cru CLK_HDMITX0_REF>,
+ <&cru MCLK_SAI6_8CH>,
+ <&cru CLK_HDMITXHDP>,
+ <&cru HCLK_VO0_ROOT>;
+ clock-names = "pclk", "earc", "ref", "aud", "hdp", "hclk_vo1";
+ interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 367 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "avp", "cec", "earc", "main", "hpd";
+ phys = <&hdptxphy>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_txm0_pins &hdmi_tx_scl &hdmi_tx_sda>;
+ power-domains = <&power RK3576_PD_VO0>;
+ resets = <&cru SRST_HDMITX0_REF>, <&cru SRST_HDMITXHDP>;
+ reset-names = "ref", "hdp";
+ rockchip,grf = <&ioc_grf>;
+ rockchip,vo-grf = <&vo0_grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in: port@0 {
+ reg = <0>;
+ };
+
+ hdmi_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
qos_hdcp1: qos@27f02000 {
compatible = "rockchip,rk3576-qos", "syscon";
reg = <0x0 0x27f02000 0x0 0x20>;
@@ -1221,6 +1334,41 @@
};
};
+ ufshc: ufshc@2a2d0000 {
+ compatible = "rockchip,rk3576-ufshc";
+ reg = <0x0 0x2a2d0000 0x0 0x10000>,
+ <0x0 0x2b040000 0x0 0x10000>,
+ <0x0 0x2601f000 0x0 0x1000>,
+ <0x0 0x2603c000 0x0 0x1000>,
+ <0x0 0x2a2e0000 0x0 0x10000>;
+ reg-names = "hci", "mphy", "hci_grf", "mphy_grf", "hci_apb";
+ clocks = <&cru ACLK_UFS_SYS>, <&cru PCLK_USB_ROOT>, <&cru PCLK_MPHY>,
+ <&cru CLK_REF_UFS_CLKOUT>;
+ clock-names = "core", "pclk", "pclk_mphy", "ref_out";
+ assigned-clocks = <&cru CLK_REF_OSC_MPHY>;
+ assigned-clock-parents = <&cru CLK_REF_MPHY_26M>;
+ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&power RK3576_PD_USB>;
+ pinctrl-0 = <&ufs_refclk>;
+ pinctrl-names = "default";
+ resets = <&cru SRST_A_UFS_BIU>, <&cru SRST_A_UFS_SYS>,
+ <&cru SRST_A_UFS>, <&cru SRST_P_UFS_GRF>;
+ reset-names = "biu", "sys", "ufs", "grf";
+ reset-gpios = <&gpio4 RK_PD0 GPIO_ACTIVE_LOW>;
+ status = "disabled";
+ };
+
+ sfc1: spi@2a300000 {
+ compatible = "rockchip,sfc";
+ reg = <0x0 0x2a300000 0x0 0x4000>;
+ interrupts = <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_FSPI1_X2>, <&cru HCLK_FSPI1>;
+ clock-names = "clk_sfc", "hclk_sfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
sdmmc: mmc@2a310000 {
compatible = "rockchip,rk3576-dw-mshc";
reg = <0x0 0x2a310000 0x0 0x4000>;
@@ -1260,6 +1408,56 @@
status = "disabled";
};
+ sfc0: spi@2a340000 {
+ compatible = "rockchip,sfc";
+ reg = <0x0 0x2a340000 0x0 0x4000>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_FSPI_X2>, <&cru HCLK_FSPI>;
+ clock-names = "clk_sfc", "hclk_sfc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ otp: otp@2a580000 {
+ compatible = "rockchip,rk3576-otp";
+ reg = <0x0 0x2a580000 0x0 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&cru CLK_OTPC_NS>, <&cru PCLK_OTPC_NS>,
+ <&cru CLK_OTP_PHY_G>;
+ clock-names = "otp", "apb_pclk", "phy";
+ resets = <&cru SRST_OTPC_NS>, <&cru SRST_P_OTPC_NS>;
+ reset-names = "otp", "apb";
+
+ /* Data cells */
+ cpu_code: cpu-code@2 {
+ reg = <0x02 0x2>;
+ };
+ otp_cpu_version: cpu-version@5 {
+ reg = <0x05 0x1>;
+ bits = <3 3>;
+ };
+ otp_id: id@a {
+ reg = <0x0a 0x10>;
+ };
+ cpub_leakage: cpub-leakage@1e {
+ reg = <0x1e 0x1>;
+ };
+ cpul_leakage: cpul-leakage@1f {
+ reg = <0x1f 0x1>;
+ };
+ npu_leakage: npu-leakage@20 {
+ reg = <0x20 0x1>;
+ };
+ gpu_leakage: gpu-leakage@21 {
+ reg = <0x21 0x1>;
+ };
+ log_leakage: log-leakage@22 {
+ reg = <0x22 0x1>;
+ };
+ };
+
gic: interrupt-controller@2a701000 {
compatible = "arm,gic-400";
reg = <0x0 0x2a701000 0 0x10000>,
@@ -1756,6 +1954,19 @@
status = "disabled";
};
+ hdptxphy: hdmiphy@2b000000 {
+ compatible = "rockchip,rk3576-hdptx-phy", "rockchip,rk3588-hdptx-phy";
+ reg = <0x0 0x2b000000 0x0 0x2000>;
+ clocks = <&cru CLK_PHY_REF_SRC>, <&cru PCLK_HDPTX_APB>;
+ clock-names = "ref", "apb";
+ resets = <&cru SRST_P_HDPTX_APB>, <&cru SRST_HDPTX_INIT>,
+ <&cru SRST_HDPTX_CMN>, <&cru SRST_HDPTX_LANE>;
+ reset-names = "apb", "init", "cmn", "lane";
+ rockchip,grf = <&hdptxphy_grf>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
sram: sram@3ff88000 {
compatible = "mmio-sram";
reg = <0x0 0x3ff88000 0x0 0x78000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-armsom-lm7.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-armsom-lm7.dtsi
index a3138d2d384c..e44125e9a8fb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-armsom-lm7.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-armsom-lm7.dtsi
@@ -114,6 +114,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&saradc {
vref-supply = <&avcc_1v8_s0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
index 08f09053a066..ae9274365bed 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-armsom-sige7.dts
@@ -4,6 +4,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/leds/common.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "rk3588.dtsi"
/ {
@@ -33,6 +34,17 @@
"Headphone", "Headphones";
};
+ hdmi0-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi0_con_in: endpoint {
+ remote-endpoint = <&hdmi0_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -164,6 +176,30 @@
status = "okay";
};
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi0>;
+ };
+};
+
+&hdmi0_out {
+ hdmi0_out_con: endpoint {
+ remote-endpoint = <&hdmi0_con_in>;
+ };
+};
+
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -258,6 +294,10 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
/* phy1 - right ethernet port */
&pcie2x1l0 {
reset-gpios = <&gpio4 RK_PA5 GPIO_ACTIVE_HIGH>;
@@ -268,6 +308,22 @@
&pcie2x1l1 {
reset-gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>;
status = "okay";
+
+ pcie@0,0 {
+ reg = <0x300000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ device_type = "pci";
+ bus-range = <0x30 0x3f>;
+
+ wifi: wifi@0,0 {
+ compatible = "pci14e4,449d";
+ reg = <0x310000 0 0 0 0>;
+ clocks = <&hym8563>;
+ clock-names = "lpo";
+ };
+ };
};
/* phy0 - left ethernet port */
@@ -286,6 +342,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
@@ -723,3 +783,18 @@
dr_mode = "host";
status = "okay";
};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
index c3abdfb04f8f..1e18ad93ba0e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi
@@ -358,11 +358,6 @@
};
firmware {
- optee: optee {
- compatible = "linaro,optee-tz";
- method = "smc";
- };
-
scmi: scmi {
compatible = "arm,scmi-smc";
arm,smc-id = <0x82000010>;
@@ -382,6 +377,22 @@
};
};
+ hdmi0_sound: hdmi0-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <128>;
+ simple-audio-card,name = "hdmi0";
+ status = "disabled";
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi0>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s5_8ch>;
+ };
+ };
+
pmu-a55 {
compatible = "arm,cortex-a55-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH &ppi_partition0>;
@@ -864,7 +875,7 @@
};
};
/* These power domains are grouped by VD_GPU */
- power-domain@RK3588_PD_GPU {
+ pd_gpu: power-domain@RK3588_PD_GPU {
reg = <RK3588_PD_GPU>;
clocks = <&cru CLK_GPU>,
<&cru CLK_GPU_COREGROUP>,
@@ -1261,14 +1272,16 @@
<&cru DCLK_VOP1>,
<&cru DCLK_VOP2>,
<&cru DCLK_VOP3>,
- <&cru PCLK_VOP_ROOT>;
+ <&cru PCLK_VOP_ROOT>,
+ <&hdptxphy0>;
clock-names = "aclk",
"hclk",
"dclk_vp0",
"dclk_vp1",
"dclk_vp2",
"dclk_vp3",
- "pclk_vop";
+ "pclk_vop",
+ "pll_hdmiphy0";
iommus = <&vop_mmu>;
power-domains = <&power RK3588_PD_VOP>;
rockchip,grf = <&sys_grf>;
@@ -1318,6 +1331,21 @@
status = "disabled";
};
+ spdif_tx2: spdif-tx@fddb0000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfddb0000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF2_DP0_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF2>, <&cru HCLK_SPDIF2_DP0>;
+ dma-names = "tx";
+ dmas = <&dmac1 6>;
+ interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&power RK3588_PD_VO0>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
i2s4_8ch: i2s@fddc0000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddc0000 0x0 0x1000>;
@@ -1335,6 +1363,21 @@
status = "disabled";
};
+ spdif_tx3: spdif-tx@fdde0000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfdde0000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF3_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF3>, <&cru HCLK_SPDIF3>;
+ dma-names = "tx";
+ dmas = <&dmac1 7>;
+ interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&power RK3588_PD_VO1>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
i2s5_8ch: i2s@fddf0000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddf0000 0x0 0x1000>;
@@ -1385,7 +1428,7 @@
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "avp", "cec", "earc", "main", "hpd";
- phys = <&hdptxphy_hdmi0>;
+ phys = <&hdptxphy0>;
pinctrl-names = "default";
pinctrl-0 = <&hdmim0_tx0_cec &hdmim0_tx0_hpd
&hdmim0_tx0_scl &hdmim0_tx0_sda>;
@@ -1394,6 +1437,7 @@
reset-names = "ref", "hdp";
rockchip,grf = <&sys_grf>;
rockchip,vo-grf = <&vo1_grf>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
@@ -1921,6 +1965,14 @@
status = "disabled";
};
+ rng@fe378000 {
+ compatible = "rockchip,rk3588-rng";
+ reg = <0x0 0xfe378000 0x0 0x200>;
+ interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&scmi_clk SCMI_HCLK_SECURE_NS>;
+ resets = <&scmi_reset 48>;
+ };
+
i2s0_8ch: i2s@fe470000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfe470000 0x0 0x1000>;
@@ -2016,12 +2068,47 @@
status = "disabled";
};
+ spdif_tx0: spdif-tx@fe4e0000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfe4e0000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF0_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF0>, <&cru HCLK_SPDIF0>;
+ dma-names = "tx";
+ dmas = <&dmac0 5>;
+ interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spdif0m0_tx>;
+ pinctrl-names = "default";
+ power-domains = <&power RK3588_PD_AUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
+ spdif_tx1: spdif-tx@fe4f0000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfe4f0000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF1_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF1>, <&cru HCLK_SPDIF1>;
+ dma-names = "tx";
+ dmas = <&dmac1 5>;
+ interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH 0>;
+ pinctrl-0 = <&spdif1m0_tx>;
+ pinctrl-names = "default";
+ power-domains = <&power RK3588_PD_AUDIO>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
gic: interrupt-controller@fe600000 {
compatible = "arm,gic-v3";
reg = <0x0 0xfe600000 0 0x10000>, /* GICD */
<0x0 0xfe680000 0 0x100000>; /* GICR */
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-controller;
+ dma-noncoherent;
mbi-alias = <0x0 0xfe610000>;
mbi-ranges = <424 56>;
msi-controller;
@@ -2033,6 +2120,7 @@
its0: msi-controller@fe640000 {
compatible = "arm,gic-v3-its";
reg = <0x0 0xfe640000 0x0 0x20000>;
+ dma-noncoherent;
msi-controller;
#msi-cells = <1>;
};
@@ -2040,6 +2128,7 @@
its1: msi-controller@fe660000 {
compatible = "arm,gic-v3-its";
reg = <0x0 0xfe660000 0x0 0x20000>;
+ dma-noncoherent;
msi-controller;
#msi-cells = <1>;
};
@@ -2807,11 +2896,12 @@
#dma-cells = <1>;
};
- hdptxphy_hdmi0: phy@fed60000 {
+ hdptxphy0: phy@fed60000 {
compatible = "rockchip,rk3588-hdptx-phy";
reg = <0x0 0xfed60000 0x0 0x2000>;
clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>, <&cru PCLK_HDPTX0>;
clock-names = "ref", "apb";
+ #clock-cells = <0>;
#phy-cells = <0>;
resets = <&cru SRST_HDPTX0>, <&cru SRST_P_HDPTX0>,
<&cru SRST_HDPTX0_INIT>, <&cru SRST_HDPTX0_CMN>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
index 9d525c8ff725..9eda69722665 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-evb.dts
@@ -129,7 +129,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
index bc6b43a77153..6dc10da5215f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts
@@ -166,7 +166,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
index 71ed680621b8..cc37f082adea 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5.dtsi
@@ -277,6 +277,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
index 5e72d0eff0e0..8a783dc64c0e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-common.dtsi
@@ -126,6 +126,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
leds {
led_user_en: led_user_en {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
index 7125790bbed2..08920344a4b8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-edgeble-neu6a-io.dtsi
@@ -4,12 +4,24 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
/ {
chosen {
stdout-path = "serial2:1500000n8";
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
/* Unnamed gated oscillator: 100MHz,3.3V,3225 */
pcie30_port0_refclk: pcie30_port1_refclk: pcie-oscillator {
compatible = "gated-fixed-clock";
@@ -81,6 +93,26 @@
status = "okay";
};
+&hdmi1 {
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
&i2c6 {
status = "okay";
@@ -275,3 +307,18 @@
&usb_host2_xhci {
status = "okay";
};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vp0 {
+ vp0_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index ba49f0bbaac6..8e912da299a2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -132,6 +132,17 @@
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
pcie20_avdd0v85: regulator-pcie20-avdd0v85 {
compatible = "regulator-fixed";
regulator-name = "pcie20_avdd0v85";
@@ -364,7 +375,27 @@
};
};
-&hdptxphy_hdmi0 {
+&hdmi1 {
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
status = "okay";
};
@@ -441,7 +472,7 @@
status = "okay";
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
@@ -519,6 +550,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
audio {
hp_detect: headphone-detect {
@@ -1371,11 +1406,11 @@
status = "okay";
};
-&vop_mmu {
+&vop {
status = "okay";
};
-&vop {
+&vop_mmu {
status = "okay";
};
@@ -1385,3 +1420,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
index 840b638af1c2..099edb3fd0f6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi
@@ -7,6 +7,46 @@
#include "rk3588-extra-pinctrl.dtsi"
/ {
+ hdmi1_sound: hdmi1-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,mclk-fs = <128>;
+ simple-audio-card,name = "hdmi1";
+ status = "disabled";
+
+ simple-audio-card,codec {
+ sound-dai = <&hdmi1>;
+ };
+
+ simple-audio-card,cpu {
+ sound-dai = <&i2s6_8ch>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /*
+ * The 4k HDMI capture controller works only with 32bit
+ * phys addresses and doesn't support IOMMU. HDMI RX CMA
+ * must be reserved below 4GB.
+ * The size of 160MB was determined as follows:
+ * (3840 * 2160 pixels) * (4 bytes/pixel) * (2 frames/buffer) / 10^6 = 66MB
+ * To ensure sufficient support for practical use-cases,
+ * we doubled the 66MB value.
+ */
+ hdmi_receiver_cma: hdmi-receiver-cma {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x0 0x0 0xffffffff>;
+ size = <0x0 (160 * 0x100000)>; /* 160MiB */
+ alignment = <0x0 0x40000>; /* 64K */
+ no-map;
+ status = "disabled";
+ };
+ };
+
usb_host1_xhci: usb@fc400000 {
compatible = "rockchip,rk3588-dwc3", "snps,dwc3";
reg = <0x0 0xfc400000 0x0 0x400000>;
@@ -67,6 +107,26 @@
};
};
+ hdptxphy1_grf: syscon@fd5e4000 {
+ compatible = "rockchip,rk3588-hdptxphy-grf", "syscon";
+ reg = <0x0 0xfd5e4000 0x0 0x100>;
+ };
+
+ spdif_tx5: spdif-tx@fddb8000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfddb8000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF5_DP1_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF5>, <&cru HCLK_SPDIF5_DP1>;
+ dma-names = "tx";
+ dmas = <&dmac1 22>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&power RK3588_PD_VO0>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
i2s8_8ch: i2s@fddc8000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddc8000 0x0 0x1000>;
@@ -84,6 +144,21 @@
status = "disabled";
};
+ spdif_tx4: spdif-tx@fdde8000 {
+ compatible = "rockchip,rk3588-spdif", "rockchip,rk3568-spdif";
+ reg = <0x0 0xfdde8000 0x0 0x1000>;
+ assigned-clock-parents = <&cru PLL_AUPLL>;
+ assigned-clocks = <&cru CLK_SPDIF4_SRC>;
+ clock-names = "mclk", "hclk";
+ clocks = <&cru MCLK_SPDIF4>, <&cru HCLK_SPDIF4>;
+ dma-names = "tx";
+ dmas = <&dmac1 8>;
+ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH 0>;
+ power-domains = <&power RK3588_PD_VO1>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+ };
+
i2s6_8ch: i2s@fddf4000 {
compatible = "rockchip,rk3588-i2s-tdm";
reg = <0x0 0xfddf4000 0x0 0x1000>;
@@ -135,6 +210,79 @@
status = "disabled";
};
+ hdmi1: hdmi@fdea0000 {
+ compatible = "rockchip,rk3588-dw-hdmi-qp";
+ reg = <0x0 0xfdea0000 0x0 0x20000>;
+ clocks = <&cru PCLK_HDMITX1>,
+ <&cru CLK_HDMITX1_EARC>,
+ <&cru CLK_HDMITX1_REF>,
+ <&cru MCLK_I2S6_8CH_TX>,
+ <&cru CLK_HDMIHDP1>,
+ <&cru HCLK_VO1>;
+ clock-names = "pclk", "earc", "ref", "aud", "hdp", "hclk_vo1";
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "avp", "cec", "earc", "main", "hpd";
+ phys = <&hdptxphy1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmim2_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_HDMITX1_REF>, <&cru SRST_HDMIHDP1>;
+ reset-names = "ref", "hdp";
+ rockchip,grf = <&sys_grf>;
+ rockchip,vo-grf = <&vo1_grf>;
+ #sound-dai-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi1_in: port@0 {
+ reg = <0>;
+ };
+
+ hdmi1_out: port@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ hdmi_receiver: hdmi_receiver@fdee0000 {
+ compatible = "rockchip,rk3588-hdmirx-ctrler", "snps,dw-hdmi-rx";
+ reg = <0x0 0xfdee0000 0x0 0x6000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH 0>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "cec", "hdmi", "dma";
+ clocks = <&cru ACLK_HDMIRX>,
+ <&cru CLK_HDMIRX_AUD>,
+ <&cru CLK_CR_PARA>,
+ <&cru PCLK_HDMIRX>,
+ <&cru CLK_HDMIRX_REF>,
+ <&cru PCLK_S_HDMIRX>,
+ <&cru HCLK_VO1>;
+ clock-names = "aclk",
+ "audio",
+ "cr_para",
+ "pclk",
+ "ref",
+ "hclk_s_hdmirx",
+ "hclk_vo1";
+ memory-region = <&hdmi_receiver_cma>;
+ power-domains = <&power RK3588_PD_VO1>;
+ resets = <&cru SRST_A_HDMIRX>, <&cru SRST_P_HDMIRX>,
+ <&cru SRST_HDMIRX_REF>, <&cru SRST_A_HDMIRX_BIU>;
+ reset-names = "axi", "apb", "ref", "biu";
+ rockchip,grf = <&sys_grf>;
+ rockchip,vo1-grf = <&vo1_grf>;
+ status = "disabled";
+ };
+
pcie3x4: pcie@fe150000 {
compatible = "rockchip,rk3588-pcie", "rockchip,rk3568-pcie";
#address-cells = <3>;
@@ -398,6 +546,23 @@
};
};
+ hdptxphy1: phy@fed70000 {
+ compatible = "rockchip,rk3588-hdptx-phy";
+ reg = <0x0 0xfed70000 0x0 0x2000>;
+ clocks = <&cru CLK_USB2PHY_HDPTXRXPHY_REF>, <&cru PCLK_HDPTX1>;
+ clock-names = "ref", "apb";
+ #clock-cells = <0>;
+ #phy-cells = <0>;
+ resets = <&cru SRST_HDPTX1>, <&cru SRST_P_HDPTX1>,
+ <&cru SRST_HDPTX1_INIT>, <&cru SRST_HDPTX1_CMN>,
+ <&cru SRST_HDPTX1_LANE>, <&cru SRST_HDPTX1_ROPLL>,
+ <&cru SRST_HDPTX1_LCPLL>;
+ reset-names = "phy", "apb", "init", "cmn", "lane", "ropll",
+ "lcpll";
+ rockchip,grf = <&hdptxphy1_grf>;
+ status = "disabled";
+ };
+
usbdp_phy1: phy@fed90000 {
compatible = "rockchip,rk3588-usbdp-phy";
reg = <0x0 0xfed90000 0x0 0x10000>;
@@ -449,3 +614,24 @@
status = "disabled";
};
};
+
+&vop {
+ clocks = <&cru ACLK_VOP>,
+ <&cru HCLK_VOP>,
+ <&cru DCLK_VOP0>,
+ <&cru DCLK_VOP1>,
+ <&cru DCLK_VOP2>,
+ <&cru DCLK_VOP3>,
+ <&cru PCLK_VOP_ROOT>,
+ <&hdptxphy0>,
+ <&hdptxphy1>;
+ clock-names = "aclk",
+ "hclk",
+ "dclk_vp0",
+ "dclk_vp1",
+ "dclk_vp2",
+ "dclk_vp3",
+ "pclk_vop",
+ "pll_hdmiphy0",
+ "pll_hdmiphy1";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi
index 390051317389..4331cdc70f97 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-fet3588-c.dtsi
@@ -205,6 +205,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
leds {
led_rgb_b: led-rgb-b {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi
index 42c523b553c9..80e16ea4154c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-firefly-core-3588j.dtsi
@@ -108,6 +108,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&sdhci {
bus-width = <8>;
no-sdio;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-firefly-icore-3588q.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-firefly-icore-3588q.dtsi
new file mode 100644
index 000000000000..6726eeb49255
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-firefly-icore-3588q.dtsi
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+#include "rk3588.dtsi"
+
+/ {
+ compatible = "firefly,icore-3588q", "rockchip,rk3588";
+
+ aliases {
+ mmc0 = &sdhci;
+ };
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_big0_s0>;
+};
+
+&cpu_b2 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_b3 {
+ cpu-supply = <&vdd_cpu_big1_s0>;
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_lit_s0>;
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0m2_xfer>;
+ status = "okay";
+
+ vdd_cpu_big0_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-name = "vdd_cpu_big0_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_big1_s0: regulator@43 {
+ compatible = "rockchip,rk8603", "rockchip,rk8602";
+ reg = <0x43>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-name = "vdd_cpu_big1_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1m2_xfer>;
+ status = "okay";
+
+ vdd_npu_s0: vdd_npu_mem_s0: regulator@42 {
+ compatible = "rockchip,rk8602";
+ reg = <0x42>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-name = "vdd_npu_s0";
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ no-sdio;
+ no-sd;
+ non-removable;
+ max-frequency = <150000000>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ status = "okay";
+};
+
+&spi2 {
+ assigned-clocks = <&cru CLK_SPI2>;
+ assigned-clock-rates = <200000000>;
+ num-cs = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2m2_cs0 &spi2m2_pins>;
+ status = "okay";
+
+ pmic@0 {
+ compatible = "rockchip,rk806";
+ reg = <0x0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_pins>, <&rk806_dvs1_null>,
+ <&rk806_dvs2_null>, <&rk806_dvs3_null>;
+ spi-max-frequency = <1000000>;
+ system-power-controller;
+
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc5-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
+ vcc8-supply = <&vcc5v0_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc_2v0_pldo_s3>;
+ vcc12-supply = <&vcc5v0_sys>;
+ vcc13-supply = <&vcc_1v1_nldo_s3>;
+ vcc14-supply = <&vcc_1v1_nldo_s3>;
+ vcca-supply = <&vcc5v0_sys>;
+
+ rk806_dvs1_null: dvs1-null-pins {
+ pins = "gpio_pwrctrl1";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs2_null: dvs2-null-pins {
+ pins = "gpio_pwrctrl2";
+ function = "pin_fun0";
+ };
+
+ rk806_dvs3_null: dvs3-null-pins {
+ pins = "gpio_pwrctrl3";
+ function = "pin_fun0";
+ };
+
+ regulators {
+ vdd_gpu_s0: vdd_gpu_mem_s0: dcdc-reg1 {
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_gpu_s0";
+ regulator-enable-ramp-delay = <400>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_lit_s0: vdd_cpu_lit_mem_s0: dcdc-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_cpu_lit_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_log_s0: dcdc-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <750000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_log_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_vdenc_s0: vdd_vdenc_mem_s0: dcdc-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <550000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_vdenc_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_ddr_s0: dcdc-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <950000>;
+ regulator-ramp-delay = <12500>;
+ regulator-name = "vdd_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ vdd2_ddr_s3: dcdc-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd2_ddr_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_2v0_pldo_s3: dcdc-reg7 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-name = "vdd_2v0_pldo_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <2000000>;
+ };
+ };
+
+ vcc_3v3_s3: dcdc-reg8 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vddq_ddr_s0: dcdc-reg9 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vddq_ddr_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s3: dcdc-reg10 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avcc_1v8_s0: pldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "avcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8_s0: pldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc_1v8_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ avdd_1v2_s0: pldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "avdd_1v2_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v3_s0: pldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc_3v3_s0";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vccio_sd_s0: pldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vccio_sd_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ pldo6_s3: pldo-reg6 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pldo6_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vdd_0v75_s3: nldo-reg1 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s3";
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <750000>;
+ };
+ };
+
+ vdd_ddr_pll_s0: nldo-reg2 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_ddr_pll_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <850000>;
+ };
+ };
+
+ avdd_0v75_s0: nldo-reg3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "avdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v85_s0: nldo-reg4 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ regulator-name = "vdd_0v85_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_0v75_s0: nldo-reg5 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "vdd_0v75_s0";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&uart2 {
+ pinctrl-0 = <&uart2m0_xfer>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts b/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts
index 2be5251d3e3b..e086114c7634 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-firefly-itx-3588j.dts
@@ -337,7 +337,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts
index b3a04ca370bb..8171fbfd819a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588-nas.dts
@@ -335,7 +335,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
index e3a9598b99fc..1af0a30866f6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-friendlyelec-cm3588.dtsi
@@ -256,6 +256,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
gpio-leds {
led_sys_pin: led-sys-pin {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts b/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts
index 4791b77f3571..73d8ce4fde2b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-h96-max-v58.dts
@@ -140,6 +140,24 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
+
+ spdif_dit: spdif-dit {
+ compatible = "linux,spdif-dit";
+ #sound-dai-cells = <0>;
+ };
+
+ spdif_sound: spdif-sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "SPDIF";
+
+ simple-audio-card,cpu {
+ sound-dai = <&spdif_tx0>;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&spdif_dit>;
+ };
+ };
};
&combphy0_ps {
@@ -207,7 +225,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -316,6 +334,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
@@ -400,6 +422,12 @@
status = "okay";
};
+&spdif_tx0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif0m1_tx>;
+ status = "okay";
+};
+
&spi2 {
assigned-clocks = <&cru CLK_SPI2>;
assigned-clock-rates = <200000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar-pre-ict-tester.dtso b/arch/arm64/boot/dts/rockchip/rk3588-jaguar-pre-ict-tester.dtso
new file mode 100644
index 000000000000..9d44dfe2f30d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar-pre-ict-tester.dtso
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT)
+/*
+ * Copyright (c) 2024 Cherry Embedded Solutions GmbH
+ *
+ * Device Tree Overlay for the Pre-ICT tester adapter for the Mezzanine
+ * connector on RK3588 Jaguar.
+ *
+ * This adapter has a PCIe Gen2 x1 M.2 M-Key connector and two proprietary
+ * camera connectors (each their own I2C bus, clock, reset and PWM lines as well
+ * as 2-lane CSI).
+ *
+ * This adapter routes some GPIOs to power rails and loops together some other
+ * GPIOs.
+ *
+ * This adapter is used during manufacturing for validating proper soldering of
+ * the mezzanine connector.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+&{/} {
+ pre_ict_tester_vcc_1v2: regulator-pre-ict-tester-vcc-1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "pre_ict_tester_vcc_1v2";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+
+ pre_ict_tester_vcc_2v8: regulator-pre-ict-tester-vcc-2v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "pre_ict_tester_vcc_2v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ vin-supply = <&vcc_3v3_s3>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&gpio3 {
+ pinctrl-0 = <&pre_ict_pwr2gpio>;
+ pinctrl-names = "default";
+};
+
+&pcie2x1l2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie2x1l2_perstn_m0>;
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>; /* PCIE20X1_2_PERSTN_M0 */
+ vpcie3v3-supply = <&vcc_3v3_s3>;
+ status = "okay";
+};
+
+&pinctrl {
+ pcie2x1l2 {
+ pcie2x1l2_perstn_m0: pcie2x1l2-perstn-m0 {
+ rockchip,pins = <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pre-ict-tester {
+ pre_ict_pwr2gpio: pre-ict-pwr2gpio-pins {
+ rockchip,pins =
+ /*
+ * GPIO3_A3 requires two power rails to be properly
+ * routed to the mezzanine connector to report a proper
+ * value: VCC_1V8_S0_1 and VCC_IN_2. It may report an
+ * incorrect value if VCC_1V8_S0_1 isn't properly routed,
+ * but GPIO3_C6 would catch this HW soldering issue.
+ * If VCC_IN_2 is properly routed, GPIO3_A3 should be
+ * LOW. The signal shall not read HIGH in the event
+ * GPIO3_A3 isn't properly routed due to soldering
+ * issue. Therefore, let's enforce a pull-up (which is
+ * the SoC default for this pin).
+ */
+ <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>,
+ /*
+ * GPIO3_A4 is directly routed to VCC_1V8_S0_2 power
+ * rail. It should be HIGH if all is properly soldered.
+ * To guarantee that, a pull-down is enforced (which is
+ * the SoC default for this pin) so that LOW is read if
+ * the loop doesn't exist on HW (soldering issue on
+ * either signals).
+ */
+ <3 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>,
+ /*
+ * GPIO3_B2 requires two power rails to be properly
+ * routed to the mezzanine connector to report a proper
+ * value: VCC_1V8_S0_1 and VCC_IN_1. It may report an
+ * incorrect value if VCC_1V8_S0_1 isn't properly routed,
+ * but GPIO3_C6 would catch this HW soldering issue.
+ * If VCC_IN_1 is properly routed, GPIO3_B2 should be
+ * LOW. This is an issue if GPIO3_B2 isn't properly
+ * routed due to soldering issue, because GPIO3_B2
+ * default bias is pull-down therefore being LOW. So
+ * the worst case scenario and the pass scenario expect
+ * the same value. Make GPIO3_B2 a pull-up so that a
+ * soldering issue on GPIO3_B2 reports HIGH but proper
+ * soldering reports LOW.
+ */
+ <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>,
+ /*
+ * GPIO3_C6 is directly routed to VCC_1V8_S0_1 power
+ * rail. It should be HIGH if all is properly soldered.
+ * This is an issue if GPIO3_C6 or VCC_1V8_S0_1 isn't
+ * properly routed due to soldering issue, because
+ * GPIO3_C6 default bias is pull-up therefore being HIGH
+ * in all cases:
+ * - GPIO3_C6 is floating (so HIGH) if GPIO3_C6 is not
+ * routed properly,
+ * - GPIO3_C6 is floating (so HIGH) if VCC_1V8_S0_1 is
+ * not routed properly,
+ * - GPIO3_C6 is HIGH if everything is proper,
+ * Make GPIO3_C6 a pull-down so that a soldering issue
+ * on GPIO3_C6 or VCC_1V8_S0_1 reports LOW but proper
+ * soldering reports HIGH.
+ */
+ <3 RK_PC6 RK_FUNC_GPIO &pcfg_pull_down>,
+ /*
+ * GPIO3_D2 is routed to VCC_5V0_1 power rail through a
+ * voltage divider on the adapter.
+ * It should be HIGH if all is properly soldered.
+ * To guarantee that, a pull-down is enforced (which is
+ * the SoC default for this pin) so that LOW is read if
+ * the loop doesn't exist on HW (soldering issue on
+ * either signals).
+ */
+ <3 RK_PD2 RK_FUNC_GPIO &pcfg_pull_down>,
+ /*
+ * GPIO3_D3 is routed to VCC_5V0_2 power rail through a
+ * voltage divider on the adapter.
+ * It should be HIGH if all is properly soldered.
+ * To guarantee that, a pull-down is enforced (which is
+ * the SoC default for this pin) so that LOW is read if
+ * the loop doesn't exist on HW (soldering issue on
+ * either signals).
+ */
+ <3 RK_PD3 RK_FUNC_GPIO &pcfg_pull_down>,
+ /*
+ * GPIO3_D4 is routed to VCC_3V3_S3_1 power rail through
+ * a voltage divider on the adapter.
+ * It should be HIGH if all is properly soldered.
+ * To guarantee that, a pull-down is enforced (which is
+ * the SoC default for this pin) so that LOW is read if
+ * the loop doesn't exist on HW (soldering issue on
+ * either signals).
+ */
+ <3 RK_PD4 RK_FUNC_GPIO &pcfg_pull_down>,
+ /*
+ * GPIO3_D5 is routed to VCC_3V3_S3_2 power rail through
+ * a voltage divider on the adapter.
+ * It should be HIGH if all is properly soldered.
+ * To guarantee that, a pull-down is enforced (which is
+ * the SoC default for this pin) so that LOW is read if
+ * the loop doesn't exist on HW (soldering issue on
+ * either signals).
+ */
+ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
index 90f823b2c219..9fceea6c1398 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts
@@ -303,7 +303,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -333,6 +333,56 @@
};
};
+ typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cc_int1>;
+ vbus-supply = <&vcc_5v0_usb_c1>;
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USBC-1 P11";
+ power-role = "source";
+ self-powered;
+ source-pdos =
+ <PDO_FIXED(5000, 1500, PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)>;
+ vbus-supply = <&vcc_5v0_usb_c1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc0_hs: endpoint {
+ remote-endpoint = <&usb_host0_xhci_drd_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc0_ss: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usbc0_sbu: endpoint {
+ remote-endpoint = <&usbdp_phy0_typec_sbu>;
+ };
+ };
+ };
+ };
+ };
+
vdd_npu_s0: regulator@42 {
compatible = "rockchip,rk8602";
reg = <0x42>;
@@ -394,6 +444,56 @@
pinctrl-0 = <&i2c8m2_xfer>;
status = "okay";
+ typec-portc@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PA4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cc_int2>;
+ vbus-supply = <&vcc_5v0_usb_c2>;
+
+ connector {
+ compatible = "usb-c-connector";
+ data-role = "dual";
+ label = "USBC-2 P12";
+ power-role = "source";
+ self-powered;
+ source-pdos =
+ <PDO_FIXED(5000, 1500, PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)>;
+ vbus-supply = <&vcc_5v0_usb_c2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ usbc1_hs: endpoint {
+ remote-endpoint = <&usb_host1_xhci_drd_sw>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ usbc1_ss: endpoint {
+ remote-endpoint = <&usbdp_phy1_typec_ss>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ usbc1_sbu: endpoint {
+ remote-endpoint = <&usbdp_phy1_typec_sbu>;
+ };
+ };
+ };
+ };
+ };
+
vdd_cpu_big0_s0: regulator@42 {
compatible = "rockchip,rk8602";
reg = <0x42>;
@@ -451,6 +551,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
emmc {
emmc_reset: emmc-reset {
@@ -483,6 +587,26 @@
rockchip,pins = <0 RK_PC7 12 &pcfg_pull_none>;
};
};
+
+ usb3 {
+ cc_int1: cc-int1 {
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ cc_int2: cc-int2 {
+ rockchip,pins = <4 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ typec0_sbu_dc_pins: typec0-sbu-dc-pins {
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 RK_PC3 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+
+ typec1_sbu_dc_pins: typec1-sbu-dc-pins {
+ rockchip,pins = <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_down>;
+ };
+ };
};
&saradc {
@@ -503,7 +627,6 @@
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
- supports-cqe;
vmmc-supply = <&vcc_3v3_s3>;
vqmmc-supply = <&vcc_1v8_s3>;
status = "okay";
@@ -851,6 +974,24 @@
status = "okay";
};
+/* USB-C P11 connector */
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+/* USB-C P12 connector */
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -893,6 +1034,56 @@
status = "okay";
};
+/* Type-C on P11 */
+&usbdp_phy0 {
+ orientation-switch;
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec0_sbu_dc_pins>;
+ sbu1-dc-gpios = <&gpio4 RK_PB0 GPIO_ACTIVE_HIGH>; /* Q7_USB_C0_SBU1_DC */
+ sbu2-dc-gpios = <&gpio1 RK_PC3 GPIO_ACTIVE_HIGH>; /* Q7_USB_C0_SBU2_DC */
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy0_typec_ss: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc0_ss>;
+ };
+
+ usbdp_phy0_typec_sbu: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc0_sbu>;
+ };
+ };
+};
+
+/* Type-C on P12 */
+&usbdp_phy1 {
+ orientation-switch;
+ pinctrl-names = "default";
+ pinctrl-0 = <&typec1_sbu_dc_pins>;
+ sbu1-dc-gpios = <&gpio0 RK_PD4 GPIO_ACTIVE_HIGH>; /* Q7_USB_C1_SBU1_DC */
+ sbu2-dc-gpios = <&gpio1 RK_PB5 GPIO_ACTIVE_HIGH>; /* Q7_USB_C1_SBU2_DC */
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usbdp_phy1_typec_ss: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&usbc1_ss>;
+ };
+
+ usbdp_phy1_typec_sbu: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usbc1_sbu>;
+ };
+ };
+};
+
/* host0 on P10 USB-A */
&usb_host0_ehci {
status = "okay";
@@ -903,6 +1094,36 @@
status = "okay";
};
+/* host0 on P11 USB-C */
+&usb_host0_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_host0_xhci_drd_sw: endpoint {
+ remote-endpoint = <&usbc0_hs>;
+ };
+ };
+};
+
+/* host1 on P12 USB-C */
+&usb_host1_xhci {
+ usb-role-switch;
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ usb_host1_xhci_drd_sw: endpoint {
+ remote-endpoint = <&usbc1_hs>;
+ };
+ };
+};
+
/* host1 on M.2 E-key */
&usb_host1_ehci {
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-mnt-reform2.dts b/arch/arm64/boot/dts/rockchip/rk3588-mnt-reform2.dts
new file mode 100644
index 000000000000..78a4e896f665
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-mnt-reform2.dts
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 MNT Research GmbH
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include <dt-bindings/usb/pd.h>
+
+#include "rk3588-firefly-icore-3588q.dtsi"
+
+/ {
+ model = "MNT Reform 2 with RCORE RK3588 Module";
+ compatible = "mntre,reform2-rcore", "firefly,icore-3588q", "rockchip,rk3588";
+ chassis-type = "laptop";
+
+ aliases {
+ ethernet0 = &gmac0;
+ mmc1 = &sdmmc;
+ };
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ brightness-levels = <0 8 16 32 64 128 160 200 255>;
+ default-brightness-level = <128>;
+ enable-gpios = <&gpio2 RK_PB5 GPIO_ACTIVE_HIGH>;
+ pwms = <&pwm8 0 10000 0>;
+ };
+
+ gmac0_clkin: external-gmac0-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ clock-output-names = "gmac0_clkin";
+ };
+
+ pcie30_avdd1v8: regulator-pcie30-avdd1v8 {
+ compatible = "regulator-fixed";
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "pcie30_avdd1v8";
+ vin-supply = <&avcc_1v8_s0>;
+ };
+
+ pcie30_avdd0v75: regulator-pcie30-avdd0v75 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <750000>;
+ regulator-name = "pcie30_avdd0v75";
+ vin-supply = <&avdd_0v75_s0>;
+ };
+
+ vcc12v_dcin: regulator-vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-name = "vcc12v_dcin";
+ };
+
+ vcc_1v1_nldo_s3: regulator-vcc-1v1-nldo-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v1_nldo_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_pcie30: regulator-vcc3v3-pcie30 {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc3v3_pcie30";
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_host: regulator-vcc5v0-host {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_host";
+ };
+
+ vcc5v0_sys: regulator-vcc5v0-sys {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_sys";
+ vin-supply = <&vcc12v_dcin>;
+ };
+
+ vcc5v0_usb: regulator-vcc5v0-usb {
+ compatible = "regulator-fixed";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-name = "vcc5v0_usb";
+ vin-supply = <&vcc12v_dcin>;
+ };
+};
+
+&combphy0_ps {
+ status = "okay";
+};
+
+&gmac0 {
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus
+ &gmac0_clkinout
+ &eth_phy_reset>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu_s0>;
+ sram-supply = <&vdd_gpu_mem_s0>;
+ status = "okay";
+};
+
+&hdmi0 {
+ status = "okay";
+};
+
+&hdmi0_in {
+ hdmi0_in_vp2: endpoint {
+ remote-endpoint = <&vp2_out_hdmi0>;
+ };
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6m0_xfer>;
+ status = "okay";
+
+ rtc@68 {
+ compatible = "nxp,pcf8523";
+ reg = <0x68>;
+ };
+};
+
+&mdio0 {
+ rgmii_phy: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x0>;
+ };
+};
+
+&pcie2x1l2 {
+ pinctrl-0 = <&pcie2_0_rst>;
+ reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&pcie30phy {
+ status = "okay";
+};
+
+&pcie3x4 {
+ num-lanes = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie3_reset>;
+ reset-gpios = <&gpio1 RK_PB4 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&pinctrl {
+ dp {
+ dp1_hpd: dp1-hpd {
+ rockchip,pins = <1 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie2 {
+ pcie2_0_rst: pcie2-0-rst {
+ rockchip,pins = <3 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pcie3 {
+ pcie3_reset: pcie3-reset {
+ rockchip,pins = <1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ eth_phy {
+ eth_phy_reset: eth-phy-reset {
+ rockchip,pins = <3 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pwm8 {
+ pinctrl-0 = <&pwm8m2_pins>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&avcc_1v8_s0>;
+ status = "okay";
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ max-frequency = <40000000>;
+ no-1-8-v;
+ no-mmc;
+ no-sdio;
+ vmmc-supply = <&vcc3v3_pcie30>;
+ vqmmc-supply = <&vcc3v3_pcie30>;
+ status = "okay";
+};
+
+&tsadc {
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
+&u2phy1 {
+ status = "okay";
+};
+
+&u2phy1_otg {
+ status = "okay";
+};
+
+&u2phy2 {
+ status = "okay";
+};
+
+&u2phy2_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&u2phy3 {
+ status = "okay";
+};
+
+&u2phy3_host {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+};
+
+&usbdp_phy0 {
+ status = "okay";
+};
+
+&usbdp_phy1 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp2 {
+ vp2_out_hdmi0: endpoint@ROCKCHIP_VOP2_EP_HDMI0 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI0>;
+ remote-endpoint = <&hdmi0_in_vp2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
index cb350727d116..bbe500cc924b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-nanopc-t6.dtsi
@@ -360,7 +360,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -565,6 +565,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
gpio-leds {
sys_led_pin: sys-led-pin {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
index 1c0851b45eb8..fbe1d5c06d90 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-ok3588-c.dts
@@ -312,6 +312,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
pcie2 {
pcie2_0_rst: pcie2-0-rst {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
index 87090cb98020..f748c6f760d8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-compact.dtsi
@@ -7,9 +7,6 @@
#include "rk3588-orangepi-5.dtsi"
/ {
- model = "Xunlong Orange Pi 5 Max";
- compatible = "xunlong,orangepi-5-max", "rockchip,rk3588";
-
vcc5v0_usb30_otg: vcc5v0-usb30-otg-regulator {
compatible = "regulator-fixed";
enable-active-high;
@@ -62,18 +59,12 @@
&led_blue_pwm {
/* PWM_LED1 */
- pwms = <&pwm4 0 25000 0>;
status = "okay";
};
-&led_green_pwm {
- /* PWM_LED2 */
- pwms = <&pwm5 0 25000 0>;
-};
-
/* phy2 */
&pcie2x1l1 {
- reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio3 RK_PD4 GPIO_ACTIVE_HIGH>;
vpcie3v3-supply = <&vcc3v3_pcie_eth>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts
index ce44549babf4..8b1d35760c3b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-max.dts
@@ -21,6 +21,17 @@
};
};
};
+
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
};
&hdmi0 {
@@ -39,10 +50,57 @@
};
};
-&hdptxphy_hdmi0 {
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdmi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
status = "okay";
};
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
+&led_blue_pwm {
+ pwms = <&pwm4 0 25000 0>;
+};
+
+&led_green_pwm {
+ pwms = <&pwm5 0 25000 0>;
+};
+
&pinctrl {
usb {
@@ -58,3 +116,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
index 255e33c5dbdc..121e4d1c3fa5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-plus.dts
@@ -26,6 +26,17 @@
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio4 RK_PB3 GPIO_ACTIVE_LOW>;
@@ -113,6 +124,10 @@
status = "okay";
};
+&hdmi0_sound {
+ status = "okay";
+};
+
&hdmi0_in {
hdmi0_in_vp0: endpoint {
remote-endpoint = <&vp0_out_hdmi0>;
@@ -125,7 +140,31 @@
};
};
-&hdptxphy_hdmi0 {
+&hdmi1 {
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
status = "okay";
};
@@ -189,6 +228,14 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
&led_blue_gpio {
gpios = <&gpio3 RK_PA6 GPIO_ACTIVE_HIGH>;
status = "okay";
@@ -342,3 +389,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-ultra.dts b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-ultra.dts
new file mode 100644
index 000000000000..f8c6c080e418
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5-ultra.dts
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3588-orangepi-5-compact.dtsi"
+
+/ {
+ model = "Xunlong Orange Pi 5 Ultra";
+ compatible = "xunlong,orangepi-5-ultra", "rockchip,rk3588";
+
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+};
+
+&hdmi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp0: endpoint {
+ remote-endpoint = <&vp0_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
+&led_blue_pwm {
+ pwms = <&pwm4 0 25000 PWM_POLARITY_INVERTED>;
+};
+
+&led_green_pwm {
+ pwms = <&pwm5 0 25000 PWM_POLARITY_INVERTED>;
+};
+
+&pinctrl {
+ usb {
+ usb_otg_pwren: usb-otg-pwren {
+ rockchip,pins = <4 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&vcc5v0_usb30_otg {
+ gpios = <&gpio4 RK_PB1 GPIO_ACTIVE_HIGH>;
+};
+
+&vp0 {
+ vp0_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi
index a98e804a0949..91d56c34a1e4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-orangepi-5.dtsi
@@ -276,7 +276,7 @@
/* PLDO2 vcca 1.8V, BUCK8 gated by PLDO2 being enabled */
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
AVDD-supply = <&vcc_3v3_s0>;
@@ -348,6 +348,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&saradc {
vref-supply = <&vcc_1v8_s0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
index 088cfade6f6f..78aaa6635b5d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-quartzpro64.dts
@@ -311,7 +311,7 @@
status = "okay";
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clock-rates = <12288000>;
@@ -347,6 +347,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts
index 6d68f70284e4..7de17117df7a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts
@@ -11,6 +11,7 @@
#include <dt-bindings/leds/common.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
#include "dt-bindings/usb/pd.h"
#include "rk3588.dtsi"
@@ -72,6 +73,17 @@
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
/* Unnamed gated oscillator: 100MHz,3.3V,3225 */
pcie30_port0_refclk: pcie30_port1_refclk: pcie-oscillator {
compatible = "gated-fixed-clock";
@@ -261,6 +273,28 @@
status = "okay";
};
+&hdmi1 {
+ pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdptxphy1 {
+ status = "okay";
+};
+
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0m2_xfer>;
@@ -564,6 +598,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
rtc_int: rtc-int {
@@ -690,10 +728,9 @@
&sdhci {
bus-width = <8>;
- max-frequency = <200000000>;
+ max-frequency = <150000000>;
mmc-hs400-1_8v;
mmc-hs400-enhanced-strobe;
- mmc-hs200-1_8v;
no-sdio;
no-sd;
non-removable;
@@ -1209,3 +1246,18 @@
rockchip,dp-lane-mux = <2 3>;
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
index d597112f1d5b..d22068475c5d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
@@ -49,6 +49,17 @@
};
};
+ hdmi1-con {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi1_con_in: endpoint {
+ remote-endpoint = <&hdmi1_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -220,7 +231,48 @@
};
};
-&hdptxphy_hdmi0 {
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdmi1 {
+ pinctrl-0 = <&hdmim0_tx1_cec &hdmim0_tx1_hpd
+ &hdmim1_tx1_scl &hdmim1_tx1_sda>;
+ status = "okay";
+};
+
+&hdmi1_in {
+ hdmi1_in_vp1: endpoint {
+ remote-endpoint = <&vp1_out_hdmi1>;
+ };
+};
+
+&hdmi1_out {
+ hdmi1_out_con: endpoint {
+ remote-endpoint = <&hdmi1_con_in>;
+ };
+};
+
+&hdmi1_sound {
+ status = "okay";
+};
+
+&hdmi_receiver_cma {
+ status = "okay";
+};
+
+&hdmi_receiver {
+ hpd-gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&hdmim1_rx_cec &hdmim1_rx_hpdin &hdmim1_rx_scl &hdmim1_rx_sda &hdmirx_hpd>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&hdptxphy0 {
+ status = "okay";
+};
+
+&hdptxphy1 {
status = "okay";
};
@@ -318,6 +370,14 @@
};
};
+&i2s5_8ch {
+ status = "okay";
+};
+
+&i2s6_8ch {
+ status = "okay";
+};
+
&package_thermal {
polling-delay = <1000>;
@@ -376,7 +436,17 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
+ hdmirx {
+ hdmirx_hpd: hdmirx-5v-detection {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
hym8563 {
hym8563_int: hym8563-int {
rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -891,11 +961,11 @@
status = "okay";
};
-&vop_mmu {
+&vop {
status = "okay";
};
-&vop {
+&vop_mmu {
status = "okay";
};
@@ -905,3 +975,10 @@
remote-endpoint = <&hdmi0_in_vp0>;
};
};
+
+&vp1 {
+ vp1_out_hdmi1: endpoint@ROCKCHIP_VOP2_EP_HDMI1 {
+ reg = <ROCKCHIP_VOP2_EP_HDMI1>;
+ remote-endpoint = <&hdmi1_in_vp1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
index 3187b4918a30..a3d8ff647839 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger-haikou.dts
@@ -189,7 +189,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -310,8 +310,10 @@
status = "okay";
};
+/* DB9 RS232/RS485 when SW2 in "UART1" mode */
&uart5 {
rts-gpios = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>;
+ status = "okay";
};
&usbdp_phy0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
index 81a6a05ce13b..c4933a08dd1e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi
@@ -173,7 +173,6 @@
&i2c2 {
pinctrl-0 = <&i2c2m3_xfer>;
- status = "okay";
};
&i2c2m3_xfer {
@@ -336,6 +335,10 @@
reset-gpios = <&gpio3 RK_PB6 GPIO_ACTIVE_HIGH>;
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
emmc {
emmc_reset: emmc-reset {
@@ -386,7 +389,6 @@
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>;
- supports-cqe;
vmmc-supply = <&vcc_3v3_s3>;
vqmmc-supply = <&vcc_1v8_s3>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
index 3cbee5b97470..5a428e00ab93 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-toybrick-x0.dts
@@ -289,6 +289,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
rtl8211f {
rtl8211f_rst: rtl8211f-rst {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
index 6bc46734cc14..711ac4f2c7cb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588-turing-rk1.dtsi
@@ -287,6 +287,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
fan {
fan_int: fan-int {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
index 9c394f733bbf..8b717c4017a4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-coolpi-4b.dts
@@ -236,7 +236,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -361,6 +361,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
@@ -429,7 +433,7 @@
};
&pwm13 {
- pinctrl-names = "active";
+ pinctrl-names = "default";
pinctrl-0 = <&pwm13m2_pins>;
status = "okay";
};
@@ -803,6 +807,14 @@
status = "okay";
};
+&u2phy0 {
+ status = "okay";
+};
+
+&u2phy0_otg {
+ status = "okay";
+};
+
&u2phy2 {
status = "okay";
};
@@ -832,6 +844,16 @@
pinctrl-0 = <&uart9m2_xfer &uart9m2_ctsn>;
};
+&usbdp_phy0 {
+ /*
+ * USBDP PHY0 is wired to a USB3 Type-A OTG connector. Additionally
+ * the differential pairs 0+1 and the aux channel are wired to a
+ * mini DP connector.
+ */
+ rockchip,dp-lane-mux = <0 1>;
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
@@ -840,6 +862,11 @@
status = "okay";
};
+&usb_host0_xhci {
+ extcon = <&u2phy0>;
+ status = "okay";
+};
+
&usb_host1_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
index bc4077575beb..9f4aca9c2e3f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-evb1-v10.dts
@@ -242,7 +242,7 @@
status = "okay";
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
clocks = <&cru I2S0_8CH_MCLKOUT>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
@@ -340,6 +340,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
audio {
hp_detect: headphone-detect {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts
index 812bba0aef1a..873a2bd6a6de 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-gameforce-ace.dts
@@ -611,7 +611,7 @@
status = "okay";
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
assigned-clock-rates = <12288000>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
@@ -675,6 +675,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
audio-amplifier {
headphone_amplifier_en: headphone-amplifier-en {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
index 4a3aa80f2226..4189a88ecf40 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
@@ -278,7 +278,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -412,7 +412,7 @@
status = "okay";
es8388: audio-codec@11 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x11>;
assigned-clock-rates = <12288000>;
assigned-clocks = <&cru I2S0_8CH_MCLKOUT>;
@@ -455,6 +455,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
bluetooth-pins {
bt_reset: bt-reset {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
index ac48e7fd3923..88a5e822ed17 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-khadas-edge2.dts
@@ -233,6 +233,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
vdd_sd {
vdd_sd_en: vdd-sd-en {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
index d2eddea1840f..fbf062ec3bf1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-nanopi-r6.dtsi
@@ -251,7 +251,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -359,6 +359,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
gpio-key {
key1_pin: key1-pin {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts
index 8f034c6d494c..a72063c55140 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-odroid-m2.dts
@@ -264,7 +264,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -433,6 +433,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
lcd {
lcd_pwren: lcd-pwren {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dtsi
index d86aeacca238..4fedc50cce8c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-orangepi-5.dtsi
@@ -197,7 +197,11 @@
};
};
-&hdptxphy_hdmi0 {
+&hdmi0_sound {
+ status = "okay";
+};
+
+&hdptxphy0 {
status = "okay";
};
@@ -268,7 +272,7 @@
status = "okay";
es8388: audio-codec@10 {
- compatible = "everest,es8388";
+ compatible = "everest,es8388", "everest,es8328";
reg = <0x10>;
clocks = <&cru I2S1_8CH_MCLKOUT>;
AVDD-supply = <&vcc_3v3_s0>;
@@ -355,6 +359,10 @@
status = "okay";
};
+&i2s5_8ch {
+ status = "okay";
+};
+
&mdio1 {
rgmii_phy1: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
@@ -365,6 +373,10 @@
};
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
hym8563 {
hym8563_int: hym8563-int {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
index 70a43432bdc5..f894742b1ebe 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5a.dts
@@ -334,7 +334,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -359,6 +359,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
leds {
io_led: io-led {
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5c.dts b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5c.dts
index 9b14d5383cdc..dd7317bab613 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s-rock-5c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588s-rock-5c.dts
@@ -68,10 +68,10 @@
};
};
- fan {
+ fan: fan {
compatible = "pwm-fan";
#cooling-cells = <2>;
- cooling-levels = <0 64 128 192 255>;
+ cooling-levels = <0 24 44 64 128 192 255>;
fan-supply = <&vcc_5v0>;
pwms = <&pwm3 0 10000 0>;
};
@@ -278,7 +278,7 @@
};
};
-&hdptxphy_hdmi0 {
+&hdptxphy0 {
status = "okay";
};
@@ -417,6 +417,36 @@
};
};
+&package_thermal {
+ polling-delay = <1000>;
+
+ trips {
+ package_fan0: package-fan0 {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ package_fan1: package-fan1 {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&package_fan0>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map1 {
+ trip = <&package_fan1>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
&pcie2x1l2 {
pinctrl-names = "default";
pinctrl-0 = <&pcie20x1_2_perstn_m0>;
@@ -425,6 +455,10 @@
status = "okay";
};
+&pd_gpu {
+ domain-supply = <&vdd_gpu_s0>;
+};
+
&pinctrl {
leds {
led_pins: led-pins {
@@ -843,6 +877,8 @@
};
&tsadc {
+ rockchip,hw-tshut-mode = <1>; /* tshut mode 0:CRU 1:GPIO */
+ rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */
status = "okay";
};
diff --git a/arch/arm64/boot/dts/st/Makefile b/arch/arm64/boot/dts/st/Makefile
index 881fe1296c58..63908113ae36 100644
--- a/arch/arm64/boot/dts/st/Makefile
+++ b/arch/arm64/boot/dts/st/Makefile
@@ -1,2 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_STM32) += stm32mp257f-ev1.dtb
+dtb-$(CONFIG_ARCH_STM32) += \
+ stm32mp215f-dk.dtb \
+ stm32mp235f-dk.dtb \
+ stm32mp257f-dk.dtb \
+ stm32mp257f-ev1.dtb
diff --git a/arch/arm64/boot/dts/st/stm32mp211.dtsi b/arch/arm64/boot/dts/st/stm32mp211.dtsi
new file mode 100644
index 000000000000..6dd1377f3e1d
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp211.dtsi
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a35";
+ reg = <0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ };
+ };
+
+ arm-pmu {
+ compatible = "arm,cortex-a35-pmu";
+ interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>;
+ interrupt-parent = <&intc>;
+ };
+
+ arm_wdt: watchdog {
+ compatible = "arm,smc-wdt";
+ arm,smc-id = <0xbc000000>;
+ status = "disabled";
+ };
+
+ ck_flexgen_08: clock-64000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <64000000>;
+ };
+
+ ck_flexgen_51: clock-200000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+
+ scmi: scmi {
+ compatible = "linaro,scmi-optee";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ linaro,optee-channel-id = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_reset: protocol@16 {
+ reg = <0x16>;
+ #reset-cells = <1>;
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ arm,no-tick-in-suspend;
+ };
+
+ soc@0 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x0 0x0 0x0 0x80000000>;
+ dma-ranges = <0x0 0x0 0x80000000 0x1 0x0>;
+ interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <2>;
+
+ rifsc: bus@42080000 {
+ compatible = "simple-bus";
+ reg = <0x42080000 0x0 0x1000>;
+ ranges;
+ dma-ranges;
+ #address-cells = <1>;
+ #size-cells = <2>;
+
+ usart2: serial@400e0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x400e0000 0x0 0x400>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ck_flexgen_08>;
+ status = "disabled";
+ };
+ };
+
+ syscfg: syscon@44230000 {
+ compatible = "st,stm32mp21-syscfg", "syscon";
+ reg = <0x44230000 0x0 0x10000>;
+ };
+
+ intc: interrupt-controller@4ac10000 {
+ compatible = "arm,cortex-a7-gic";
+ reg = <0x4ac10000 0x0 0x1000>,
+ <0x4ac20000 0x0 0x2000>,
+ <0x4ac40000 0x0 0x2000>,
+ <0x4ac60000 0x0 0x2000>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp213.dtsi b/arch/arm64/boot/dts/st/stm32mp213.dtsi
new file mode 100644
index 000000000000..fdd2dc432edd
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp213.dtsi
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include "stm32mp211.dtsi"
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp215.dtsi b/arch/arm64/boot/dts/st/stm32mp215.dtsi
new file mode 100644
index 000000000000..a7df77f928c5
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp215.dtsi
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include "stm32mp213.dtsi"
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp215f-dk.dts b/arch/arm64/boot/dts/st/stm32mp215f-dk.dts
new file mode 100644
index 000000000000..7bdaeaa5ab0f
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp215f-dk.dts
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Amelie Delaunay <amelie.delaunay@foss.st.com> for STMicroelectronics.
+ */
+
+/dts-v1/;
+
+#include "stm32mp215.dtsi"
+#include "stm32mp21xf.dtsi"
+
+/ {
+ model = "STMicroelectronics STM32MP215F-DK Discovery Board";
+ compatible = "st,stm32mp215f-dk", "st,stm32mp215";
+
+ aliases {
+ serial0 = &usart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ fw@80000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x80000000 0x0 0x4000000>;
+ no-map;
+ };
+ };
+};
+
+&arm_wdt {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&usart2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp21xc.dtsi b/arch/arm64/boot/dts/st/stm32mp21xc.dtsi
new file mode 100644
index 000000000000..e33b00b424e1
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp21xc.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp21xf.dtsi b/arch/arm64/boot/dts/st/stm32mp21xf.dtsi
new file mode 100644
index 000000000000..e33b00b424e1
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp21xf.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp231.dtsi b/arch/arm64/boot/dts/st/stm32mp231.dtsi
new file mode 100644
index 000000000000..8820d219a33e
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp231.dtsi
@@ -0,0 +1,1214 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include <dt-bindings/clock/st,stm32mp25-rcc.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/st,stm32mp25-regulator.h>
+#include <dt-bindings/reset/st,stm32mp25-rcc.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a35";
+ reg = <0>;
+ device_type = "cpu";
+ enable-method = "psci";
+ power-domains = <&cpu0_pd>;
+ power-domain-names = "psci";
+ };
+ };
+
+ arm-pmu {
+ compatible = "arm,cortex-a35-pmu";
+ interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>;
+ interrupt-parent = <&intc>;
+ };
+
+ arm_wdt: watchdog {
+ compatible = "arm,smc-wdt";
+ arm,smc-id = <0xb200005a>;
+ status = "disabled";
+ };
+
+ clk_dsi_txbyte: clock-0 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ clk_rcbsec: clk-64000000 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <64000000>;
+ };
+
+ firmware {
+ optee: optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ scmi {
+ compatible = "linaro,scmi-optee";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ linaro,optee-channel-id = <0>;
+
+ scmi_clk: protocol@14 {
+ reg = <0x14>;
+ #clock-cells = <1>;
+ };
+
+ scmi_reset: protocol@16 {
+ reg = <0x16>;
+ #reset-cells = <1>;
+ };
+
+ scmi_voltd: protocol@17 {
+ reg = <0x17>;
+
+ scmi_regu: regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ scmi_vddio1: regulator@0 {
+ reg = <VOLTD_SCMI_VDDIO1>;
+ regulator-name = "vddio1";
+ };
+ scmi_vddio2: regulator@1 {
+ reg = <VOLTD_SCMI_VDDIO2>;
+ regulator-name = "vddio2";
+ };
+ scmi_vddio3: regulator@2 {
+ reg = <VOLTD_SCMI_VDDIO3>;
+ regulator-name = "vddio3";
+ };
+ scmi_vddio4: regulator@3 {
+ reg = <VOLTD_SCMI_VDDIO4>;
+ regulator-name = "vddio4";
+ };
+ scmi_vdd33ucpd: regulator@5 {
+ reg = <VOLTD_SCMI_UCPD>;
+ regulator-name = "vdd33ucpd";
+ };
+ scmi_vdda18adc: regulator@7 {
+ reg = <VOLTD_SCMI_ADC>;
+ regulator-name = "vdda18adc";
+ };
+ };
+ };
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+
+ cpu0_pd: power-domain-cpu0 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ };
+
+ cluster_pd: power-domain-cluster {
+ #power-domain-cells = <0>;
+ power-domains = <&ret_pd>;
+ };
+
+ ret_pd: power-domain-retention {
+ #power-domain-cells = <0>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>;
+ always-on;
+ };
+
+ soc@0 {
+ compatible = "simple-bus";
+ ranges = <0x0 0x0 0x0 0x80000000>;
+ interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ hpdma: dma-controller@40400000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40400000 0x1000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA1>;
+ #dma-cells = <3>;
+ };
+
+ hpdma2: dma-controller@40410000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40410000 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA2>;
+ #dma-cells = <3>;
+ };
+
+ hpdma3: dma-controller@40420000 {
+ compatible = "st,stm32mp25-dma3";
+ reg = <0x40420000 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scmi_clk CK_SCMI_HPDMA3>;
+ #dma-cells = <3>;
+ };
+
+ rifsc: bus@42080000 {
+ compatible = "st,stm32mp25-rifsc", "simple-bus";
+ reg = <0x42080000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #access-controller-cells = <1>;
+
+ i2s2: audio-controller@400b0000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x400b0000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI2>, <&rcc CK_KER_SPI2>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI2_R>;
+ dmas = <&hpdma 51 0x43 0x12>,
+ <&hpdma 52 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 23>;
+ status = "disabled";
+ };
+
+ spi2: spi@400b0000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x400b0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI2>;
+ resets = <&rcc SPI2_R>;
+ dmas = <&hpdma 51 0x20 0x3012>,
+ <&hpdma 52 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 23>;
+ status = "disabled";
+ };
+
+ i2s3: audio-controller@400c0000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x400c0000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI3>, <&rcc CK_KER_SPI3>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI3_R>;
+ dmas = <&hpdma 53 0x43 0x12>,
+ <&hpdma 54 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 24>;
+ status = "disabled";
+ };
+
+ spi3: spi@400c0000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x400c0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI3>;
+ resets = <&rcc SPI3_R>;
+ dmas = <&hpdma 53 0x20 0x3012>,
+ <&hpdma 54 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 24>;
+ status = "disabled";
+ };
+
+ spdifrx: audio-controller@400d0000 {
+ compatible = "st,stm32h7-spdifrx";
+ reg = <0x400d0000 0x400>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SPDIFRX>;
+ clock-names = "kclk";
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&hpdma 71 0x43 0x212>,
+ <&hpdma 72 0x43 0x212>;
+ dma-names = "rx", "rx-ctrl";
+ access-controllers = <&rifsc 30>;
+ status = "disabled";
+ };
+
+ usart2: serial@400e0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x400e0000 0x400>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART2>;
+ dmas = <&hpdma 11 0x20 0x10012>,
+ <&hpdma 12 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 32>;
+ status = "disabled";
+ };
+
+ usart3: serial@400f0000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x400f0000 0x400>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART3>;
+ dmas = <&hpdma 13 0x20 0x10012>,
+ <&hpdma 14 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 33>;
+ status = "disabled";
+ };
+
+ uart4: serial@40100000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40100000 0x400>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART4>;
+ dmas = <&hpdma 15 0x20 0x10012>,
+ <&hpdma 16 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 34>;
+ status = "disabled";
+ };
+
+ uart5: serial@40110000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40110000 0x400>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART5>;
+ dmas = <&hpdma 17 0x20 0x10012>,
+ <&hpdma 18 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 35>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@40120000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40120000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C1>;
+ resets = <&rcc I2C1_R>;
+ dmas = <&hpdma 27 0x20 0x3012>,
+ <&hpdma 28 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 41>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@40130000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40130000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C2>;
+ resets = <&rcc I2C2_R>;
+ dmas = <&hpdma 30 0x20 0x3012>,
+ <&hpdma 31 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 42>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@40180000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x40180000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C7>;
+ resets = <&rcc I2C7_R>;
+ dmas = <&hpdma 45 0x20 0x3012>,
+ <&hpdma 46 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 47>;
+ status = "disabled";
+ };
+
+ usart6: serial@40220000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40220000 0x400>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART6>;
+ dmas = <&hpdma 19 0x20 0x10012>,
+ <&hpdma 20 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 36>;
+ status = "disabled";
+ };
+
+ i2s1: audio-controller@40230000 {
+ compatible = "st,stm32mp25-i2s";
+ reg = <0x40230000 0x400>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_SPI1>, <&rcc CK_KER_SPI1>;
+ clock-names = "pclk", "i2sclk";
+ resets = <&rcc SPI1_R>;
+ dmas = <&hpdma 49 0x43 0x12>,
+ <&hpdma 50 0x43 0x21>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 22>;
+ status = "disabled";
+ };
+
+ spi1: spi@40230000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40230000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI1>;
+ resets = <&rcc SPI1_R>;
+ dmas = <&hpdma 49 0x20 0x3012>,
+ <&hpdma 50 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 22>;
+ status = "disabled";
+ };
+
+ spi4: spi@40240000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40240000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI4>;
+ resets = <&rcc SPI4_R>;
+ dmas = <&hpdma 55 0x20 0x3012>,
+ <&hpdma 56 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 25>;
+ status = "disabled";
+ };
+
+ spi5: spi@40280000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x40280000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI5>;
+ resets = <&rcc SPI5_R>;
+ dmas = <&hpdma 57 0x20 0x3012>,
+ <&hpdma 58 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 26>;
+ status = "disabled";
+ };
+
+ sai1: sai@40290000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x40290000 0x4>, <0x4029a3f0 0x10>;
+ ranges = <0 0x40290000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI1>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI1_R>;
+ access-controllers = <&rifsc 49>;
+ status = "disabled";
+
+ sai1a: audio-controller@40290004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI1>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 73 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai1b: audio-controller@40290024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI1>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 74 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ sai2: sai@402a0000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x402a0000 0x4>, <0x402aa3f0 0x10>;
+ ranges = <0 0x402a0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI2>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI2_R>;
+ access-controllers = <&rifsc 50>;
+ status = "disabled";
+
+ sai2a: audio-controller@402a0004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI2>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 75 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai2b: audio-controller@402a0024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI2>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 76 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ sai3: sai@402b0000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x402b0000 0x4>, <0x402ba3f0 0x10>;
+ ranges = <0 0x402b0000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI3>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI3_R>;
+ access-controllers = <&rifsc 51>;
+ status = "disabled";
+
+ sai3a: audio-controller@402b0004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI3>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 77 0x43 0x21>;
+ status = "disabled";
+ };
+
+ sai3b: audio-controller@502b0024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI3>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 78 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ usart1: serial@40330000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40330000 0x400>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_USART1>;
+ dmas = <&hpdma 9 0x20 0x10012>,
+ <&hpdma 10 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 31>;
+ status = "disabled";
+ };
+
+ sai4: sai@40340000 {
+ compatible = "st,stm32mp25-sai";
+ reg = <0x40340000 0x4>, <0x4034a3f0 0x10>;
+ ranges = <0 0x40340000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&rcc CK_BUS_SAI4>;
+ clock-names = "pclk";
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc SAI4_R>;
+ access-controllers = <&rifsc 52>;
+ status = "disabled";
+
+ sai4a: audio-controller@40340004 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI4>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 79 0x63 0x21>;
+ status = "disabled";
+ };
+
+ sai4b: audio-controller@40340024 {
+ compatible = "st,stm32-sai-sub-b";
+ reg = <0x24 0x20>;
+ #sound-dai-cells = <0>;
+ clocks = <&rcc CK_KER_SAI4>;
+ clock-names = "sai_ck";
+ dmas = <&hpdma 80 0x43 0x12>;
+ status = "disabled";
+ };
+ };
+
+ uart7: serial@40370000 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40370000 0x400>;
+ interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_UART7>;
+ dmas = <&hpdma 21 0x20 0x10012>,
+ <&hpdma 22 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 37>;
+ status = "disabled";
+ };
+
+ rng: rng@42020000 {
+ compatible = "st,stm32mp25-rng";
+ reg = <0x42020000 0x400>;
+ clocks = <&clk_rcbsec>, <&rcc CK_BUS_RNG>;
+ clock-names = "core", "bus";
+ resets = <&rcc RNG_R>;
+ access-controllers = <&rifsc 92>;
+ status = "disabled";
+ };
+
+ spi8: spi@46020000 {
+ compatible = "st,stm32mp25-spi";
+ reg = <0x46020000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SPI8>;
+ resets = <&rcc SPI8_R>;
+ dmas = <&hpdma 171 0x20 0x3012>,
+ <&hpdma 172 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 29>;
+ status = "disabled";
+ };
+
+ i2c8: i2c@46040000 {
+ compatible = "st,stm32mp25-i2c";
+ reg = <0x46040000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-names = "event";
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_I2C8>;
+ resets = <&rcc I2C8_R>;
+ dmas = <&hpdma 168 0x20 0x3012>,
+ <&hpdma 169 0x20 0x3021>;
+ dma-names = "rx", "tx";
+ access-controllers = <&rifsc 48>;
+ status = "disabled";
+ };
+
+ csi: csi@48020000 {
+ compatible = "st,stm32mp25-csi";
+ reg = <0x48020000 0x2000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc CSI_R>;
+ clocks = <&rcc CK_KER_CSI>, <&rcc CK_KER_CSITXESC>,
+ <&rcc CK_KER_CSIPHY>;
+ clock-names = "pclk", "txesc", "csi2phy";
+ access-controllers = <&rifsc 86>;
+ status = "disabled";
+ };
+
+ dcmipp: dcmipp@48030000 {
+ compatible = "st,stm32mp25-dcmipp";
+ reg = <0x48030000 0x1000>;
+ interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&rcc DCMIPP_R>;
+ clocks = <&rcc CK_BUS_DCMIPP>, <&rcc CK_KER_CSI>;
+ clock-names = "kclk", "mclk";
+ access-controllers = <&rifsc 87>;
+ status = "disabled";
+ };
+
+ sdmmc1: mmc@48220000 {
+ compatible = "st,stm32mp25-sdmmc2", "arm,pl18x", "arm,primecell";
+ reg = <0x48220000 0x400>, <0x44230400 0x8>;
+ arm,primecell-periphid = <0x00353180>;
+ interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_KER_SDMMC1 >;
+ clock-names = "apb_pclk";
+ resets = <&rcc SDMMC1_R>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ access-controllers = <&rifsc 76>;
+ status = "disabled";
+ };
+
+ ethernet1: ethernet@482c0000 {
+ compatible = "st,stm32mp25-dwmac", "snps,dwmac-5.20";
+ reg = <0x482c0000 0x4000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ptp_ref",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc CK_ETH1_MAC>,
+ <&rcc CK_ETH1_TX>,
+ <&rcc CK_ETH1_RX>,
+ <&rcc CK_KER_ETH1PTP>,
+ <&rcc CK_ETH1_STP>,
+ <&rcc CK_KER_ETH1>;
+ snps,axi-config = <&stmmac_axi_config_1>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&mtl_rx_setup_1>;
+ snps,mtl-tx-config = <&mtl_tx_setup_1>;
+ snps,pbl = <2>;
+ snps,tso;
+ st,syscon = <&syscfg 0x3000>;
+ access-controllers = <&rifsc 60>;
+ status = "disabled";
+
+ mtl_rx_setup_1: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ mtl_tx_setup_1: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ };
+
+ stmmac_axi_config_1: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
+ };
+
+ bsec: efuse@44000000 {
+ compatible = "st,stm32mp25-bsec";
+ reg = <0x44000000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ part_number_otp@24 {
+ reg = <0x24 0x4>;
+ };
+
+ package_otp@1e8 {
+ reg = <0x1e8 0x1>;
+ bits = <0 3>;
+ };
+ };
+
+ rcc: clock-controller@44200000 {
+ compatible = "st,stm32mp25-rcc";
+ reg = <0x44200000 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ clocks = <&scmi_clk CK_SCMI_HSE>,
+ <&scmi_clk CK_SCMI_HSI>,
+ <&scmi_clk CK_SCMI_MSI>,
+ <&scmi_clk CK_SCMI_LSE>,
+ <&scmi_clk CK_SCMI_LSI>,
+ <&scmi_clk CK_SCMI_HSE_DIV2>,
+ <&scmi_clk CK_SCMI_ICN_HS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_LS_MCU>,
+ <&scmi_clk CK_SCMI_ICN_SDMMC>,
+ <&scmi_clk CK_SCMI_ICN_DDR>,
+ <&scmi_clk CK_SCMI_ICN_DISPLAY>,
+ <&scmi_clk CK_SCMI_ICN_HSL>,
+ <&scmi_clk CK_SCMI_ICN_NIC>,
+ <&scmi_clk CK_SCMI_ICN_VID>,
+ <&scmi_clk CK_SCMI_FLEXGEN_07>,
+ <&scmi_clk CK_SCMI_FLEXGEN_08>,
+ <&scmi_clk CK_SCMI_FLEXGEN_09>,
+ <&scmi_clk CK_SCMI_FLEXGEN_10>,
+ <&scmi_clk CK_SCMI_FLEXGEN_11>,
+ <&scmi_clk CK_SCMI_FLEXGEN_12>,
+ <&scmi_clk CK_SCMI_FLEXGEN_13>,
+ <&scmi_clk CK_SCMI_FLEXGEN_14>,
+ <&scmi_clk CK_SCMI_FLEXGEN_15>,
+ <&scmi_clk CK_SCMI_FLEXGEN_16>,
+ <&scmi_clk CK_SCMI_FLEXGEN_17>,
+ <&scmi_clk CK_SCMI_FLEXGEN_18>,
+ <&scmi_clk CK_SCMI_FLEXGEN_19>,
+ <&scmi_clk CK_SCMI_FLEXGEN_20>,
+ <&scmi_clk CK_SCMI_FLEXGEN_21>,
+ <&scmi_clk CK_SCMI_FLEXGEN_22>,
+ <&scmi_clk CK_SCMI_FLEXGEN_23>,
+ <&scmi_clk CK_SCMI_FLEXGEN_24>,
+ <&scmi_clk CK_SCMI_FLEXGEN_25>,
+ <&scmi_clk CK_SCMI_FLEXGEN_26>,
+ <&scmi_clk CK_SCMI_FLEXGEN_27>,
+ <&scmi_clk CK_SCMI_FLEXGEN_28>,
+ <&scmi_clk CK_SCMI_FLEXGEN_29>,
+ <&scmi_clk CK_SCMI_FLEXGEN_30>,
+ <&scmi_clk CK_SCMI_FLEXGEN_31>,
+ <&scmi_clk CK_SCMI_FLEXGEN_32>,
+ <&scmi_clk CK_SCMI_FLEXGEN_33>,
+ <&scmi_clk CK_SCMI_FLEXGEN_34>,
+ <&scmi_clk CK_SCMI_FLEXGEN_35>,
+ <&scmi_clk CK_SCMI_FLEXGEN_36>,
+ <&scmi_clk CK_SCMI_FLEXGEN_37>,
+ <&scmi_clk CK_SCMI_FLEXGEN_38>,
+ <&scmi_clk CK_SCMI_FLEXGEN_39>,
+ <&scmi_clk CK_SCMI_FLEXGEN_40>,
+ <&scmi_clk CK_SCMI_FLEXGEN_41>,
+ <&scmi_clk CK_SCMI_FLEXGEN_42>,
+ <&scmi_clk CK_SCMI_FLEXGEN_43>,
+ <&scmi_clk CK_SCMI_FLEXGEN_44>,
+ <&scmi_clk CK_SCMI_FLEXGEN_45>,
+ <&scmi_clk CK_SCMI_FLEXGEN_46>,
+ <&scmi_clk CK_SCMI_FLEXGEN_47>,
+ <&scmi_clk CK_SCMI_FLEXGEN_48>,
+ <&scmi_clk CK_SCMI_FLEXGEN_49>,
+ <&scmi_clk CK_SCMI_FLEXGEN_50>,
+ <&scmi_clk CK_SCMI_FLEXGEN_51>,
+ <&scmi_clk CK_SCMI_FLEXGEN_52>,
+ <&scmi_clk CK_SCMI_FLEXGEN_53>,
+ <&scmi_clk CK_SCMI_FLEXGEN_54>,
+ <&scmi_clk CK_SCMI_FLEXGEN_55>,
+ <&scmi_clk CK_SCMI_FLEXGEN_56>,
+ <&scmi_clk CK_SCMI_FLEXGEN_57>,
+ <&scmi_clk CK_SCMI_FLEXGEN_58>,
+ <&scmi_clk CK_SCMI_FLEXGEN_59>,
+ <&scmi_clk CK_SCMI_FLEXGEN_60>,
+ <&scmi_clk CK_SCMI_FLEXGEN_61>,
+ <&scmi_clk CK_SCMI_FLEXGEN_62>,
+ <&scmi_clk CK_SCMI_FLEXGEN_63>,
+ <&scmi_clk CK_SCMI_ICN_APB1>,
+ <&scmi_clk CK_SCMI_ICN_APB2>,
+ <&scmi_clk CK_SCMI_ICN_APB3>,
+ <&scmi_clk CK_SCMI_ICN_APB4>,
+ <&scmi_clk CK_SCMI_ICN_APBDBG>,
+ <&scmi_clk CK_SCMI_TIMG1>,
+ <&scmi_clk CK_SCMI_TIMG2>,
+ <&scmi_clk CK_SCMI_PLL3>,
+ <&clk_dsi_txbyte>;
+ access-controllers = <&rifsc 156>;
+ };
+
+ exti1: interrupt-controller@44220000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ reg = <0x44220000 0x400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended =
+ <&intc GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_40 */
+ <&intc GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_60 */
+ <&intc GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_70 */
+ <0>,
+ <&intc GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <0>, /* EXTI_80 */
+ <0>,
+ <0>,
+ <&intc GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ syscfg: syscon@44230000 {
+ compatible = "st,stm32mp23-syscfg", "syscon";
+ reg = <0x44230000 0x10000>;
+ };
+
+ pinctrl: pinctrl@44240000 {
+ compatible = "st,stm32mp257-pinctrl";
+ ranges = <0 0x44240000 0xa0400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&exti1>;
+ st,syscfg = <&exti1 0x60 0xff>;
+ pins-are-numbered;
+
+ gpioa: gpio@44240000 {
+ reg = <0x0 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOA>;
+ st,bank-name = "GPIOA";
+ status = "disabled";
+ };
+
+ gpiob: gpio@44250000 {
+ reg = <0x10000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOB>;
+ st,bank-name = "GPIOB";
+ status = "disabled";
+ };
+
+ gpioc: gpio@44260000 {
+ reg = <0x20000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOC>;
+ st,bank-name = "GPIOC";
+ status = "disabled";
+ };
+
+ gpiod: gpio@44270000 {
+ reg = <0x30000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOD>;
+ st,bank-name = "GPIOD";
+ status = "disabled";
+ };
+
+ gpioe: gpio@44280000 {
+ reg = <0x40000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOE>;
+ st,bank-name = "GPIOE";
+ status = "disabled";
+ };
+
+ gpiof: gpio@44290000 {
+ reg = <0x50000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOF>;
+ st,bank-name = "GPIOF";
+ status = "disabled";
+ };
+
+ gpiog: gpio@442a0000 {
+ reg = <0x60000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOG>;
+ st,bank-name = "GPIOG";
+ status = "disabled";
+ };
+
+ gpioh: gpio@442b0000 {
+ reg = <0x70000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOH>;
+ st,bank-name = "GPIOH";
+ status = "disabled";
+ };
+
+ gpioi: gpio@442c0000 {
+ reg = <0x80000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOI>;
+ st,bank-name = "GPIOI";
+ status = "disabled";
+ };
+
+ gpioj: gpio@442d0000 {
+ reg = <0x90000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOJ>;
+ st,bank-name = "GPIOJ";
+ status = "disabled";
+ };
+
+ gpiok: gpio@442e0000 {
+ reg = <0xa0000 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOK>;
+ st,bank-name = "GPIOK";
+ status = "disabled";
+ };
+ };
+
+ rtc: rtc@46000000 {
+ compatible = "st,stm32mp25-rtc";
+ reg = <0x46000000 0x400>;
+ clocks = <&scmi_clk CK_SCMI_RTC>,
+ <&scmi_clk CK_SCMI_RTCCK>;
+ clock-names = "pclk", "rtc_ck";
+ interrupts-extended = <&exti2 17 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ pinctrl_z: pinctrl@46200000 {
+ compatible = "st,stm32mp257-z-pinctrl";
+ ranges = <0 0x46200000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&exti1>;
+ st,syscfg = <&exti1 0x60 0xff>;
+ pins-are-numbered;
+
+ gpioz: gpio@46200000 {
+ reg = <0 0x400>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&scmi_clk CK_SCMI_GPIOZ>;
+ st,bank-name = "GPIOZ";
+ st,bank-ioport = <11>;
+ status = "disabled";
+ };
+
+ };
+
+ exti2: interrupt-controller@46230000 {
+ compatible = "st,stm32mp1-exti", "syscon";
+ reg = <0x46230000 0x400>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts-extended =
+ <&intc GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_0 */
+ <&intc GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_10 */
+ <&intc GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_20 */
+ <&intc GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_30 */
+ <&intc GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_40 */
+ <0>,
+ <0>,
+ <&intc GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, /* EXTI_50 */
+ <&intc GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>,
+ <0>, /* EXTI_60 */
+ <&intc GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <&intc GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <0>,
+ <0>,
+ <&intc GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>; /* EXTI_70 */
+ };
+
+ intc: interrupt-controller@4ac10000 {
+ compatible = "arm,cortex-a7-gic";
+ reg = <0x4ac10000 0x1000>,
+ <0x4ac20000 0x2000>,
+ <0x4ac40000 0x2000>,
+ <0x4ac60000 0x2000>;
+ #interrupt-cells = <3>;
+ #address-cells = <1>;
+ interrupt-controller;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp233.dtsi b/arch/arm64/boot/dts/st/stm32mp233.dtsi
new file mode 100644
index 000000000000..78f4059fca5d
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp233.dtsi
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include "stm32mp231.dtsi"
+
+/ {
+ cpus {
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a35";
+ reg = <1>;
+ device_type = "cpu";
+ enable-method = "psci";
+ power-domains = <&cpu1_pd>;
+ power-domain-names = "psci";
+ };
+ };
+
+ arm-pmu {
+ interrupts = <GIC_SPI 368 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 369 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+
+ psci {
+ cpu1_pd: power-domain-cpu1 {
+ #power-domain-cells = <0>;
+ power-domains = <&cluster_pd>;
+ };
+ };
+
+ timer {
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+};
+
+&optee {
+ interrupts = <GIC_PPI 15 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+};
+
+&rifsc {
+ ethernet2: ethernet@482d0000 {
+ compatible = "st,stm32mp25-dwmac", "snps,dwmac-5.20";
+ reg = <0x482d0000 0x4000>;
+ reg-names = "stmmaceth";
+ interrupts-extended = <&intc GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clock-names = "stmmaceth",
+ "mac-clk-tx",
+ "mac-clk-rx",
+ "ptp_ref",
+ "ethstp",
+ "eth-ck";
+ clocks = <&rcc CK_ETH2_MAC>,
+ <&rcc CK_ETH2_TX>,
+ <&rcc CK_ETH2_RX>,
+ <&rcc CK_KER_ETH2PTP>,
+ <&rcc CK_ETH2_STP>,
+ <&rcc CK_KER_ETH2>;
+ snps,axi-config = <&stmmac_axi_config_2>;
+ snps,mixed-burst;
+ snps,mtl-rx-config = <&mtl_rx_setup_2>;
+ snps,mtl-tx-config = <&mtl_tx_setup_2>;
+ snps,pbl = <2>;
+ snps,tso;
+ st,syscon = <&syscfg 0x3400>;
+ access-controllers = <&rifsc 61>;
+ status = "disabled";
+
+ mtl_rx_setup_2: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ queue0 {};
+ queue1 {};
+ };
+
+ mtl_tx_setup_2: tx-queues-config {
+ snps,tx-queues-to-use = <4>;
+ queue0 {};
+ queue1 {};
+ queue2 {};
+ queue3 {};
+ };
+
+ stmmac_axi_config_2: stmmac-axi-config {
+ snps,blen = <0 0 0 0 16 8 4>;
+ snps,rd_osr_lmt = <0x7>;
+ snps,wr_osr_lmt = <0x7>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp235.dtsi b/arch/arm64/boot/dts/st/stm32mp235.dtsi
new file mode 100644
index 000000000000..2719c088dd59
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp235.dtsi
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+#include "stm32mp233.dtsi"
+
+&rifsc {
+ vdec: vdec@480d0000 {
+ compatible = "st,stm32mp25-vdec";
+ reg = <0x480d0000 0x3c8>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rcc CK_BUS_VDEC>;
+ access-controllers = <&rifsc 89>;
+ };
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp235f-dk.dts b/arch/arm64/boot/dts/st/stm32mp235f-dk.dts
new file mode 100644
index 000000000000..04d1b434c433
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp235f-dk.dts
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Amelie Delaunay <amelie.delaunay@foss.st.com> for STMicroelectronics.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include "stm32mp235.dtsi"
+#include "stm32mp23xf.dtsi"
+#include "stm32mp25-pinctrl.dtsi"
+#include "stm32mp25xxak-pinctrl.dtsi"
+
+/ {
+ model = "STMicroelectronics STM32MP235F-DK Discovery Board";
+ compatible = "st,stm32mp235f-dk", "st,stm32mp235";
+
+ aliases {
+ serial0 = &usart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-user-1 {
+ label = "User-1";
+ linux,code = <BTN_1>;
+ gpios = <&gpioc 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ button-user-2 {
+ label = "User-2";
+ linux,code = <BTN_2>;
+ gpios = <&gpioc 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpioh 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x1 0x0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ fw@80000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x80000000 0x0 0x4000000>;
+ no-map;
+ };
+ };
+};
+
+&arm_wdt {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&scmi_regu {
+ scmi_vddio1: regulator@0 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ scmi_vdd_sdcard: regulator@23 {
+ reg = <VOLTD_SCMI_STPMIC2_LDO7>;
+ regulator-name = "vdd_sdcard";
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ cd-gpios = <&gpiod 3 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&scmi_vdd_sdcard>;
+ vqmmc-supply = <&scmi_vddio1>;
+ status = "okay";
+};
+
+&usart2 {
+ pinctrl-names = "default", "idle", "sleep";
+ pinctrl-0 = <&usart2_pins_a>;
+ pinctrl-1 = <&usart2_idle_pins_a>;
+ pinctrl-2 = <&usart2_sleep_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp23xc.dtsi b/arch/arm64/boot/dts/st/stm32mp23xc.dtsi
new file mode 100644
index 000000000000..e33b00b424e1
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp23xc.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp23xf.dtsi b/arch/arm64/boot/dts/st/stm32mp23xf.dtsi
new file mode 100644
index 000000000000..e33b00b424e1
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp23xf.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/ {
+};
diff --git a/arch/arm64/boot/dts/st/stm32mp257f-dk.dts b/arch/arm64/boot/dts/st/stm32mp257f-dk.dts
new file mode 100644
index 000000000000..a278a1e3ce03
--- /dev/null
+++ b/arch/arm64/boot/dts/st/stm32mp257f-dk.dts
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@foss.st.com> for STMicroelectronics.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include "stm32mp257.dtsi"
+#include "stm32mp25xf.dtsi"
+#include "stm32mp25-pinctrl.dtsi"
+#include "stm32mp25xxak-pinctrl.dtsi"
+
+/ {
+ model = "STMicroelectronics STM32MP257F-DK Discovery Board";
+ compatible = "st,stm32mp257f-dk", "st,stm32mp257";
+
+ aliases {
+ serial0 = &usart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ button-user-1 {
+ label = "User-1";
+ linux,code = <BTN_1>;
+ gpios = <&gpioc 5 GPIO_ACTIVE_HIGH>;
+ };
+
+ button-user-2 {
+ label = "User-2";
+ linux,code = <BTN_2>;
+ gpios = <&gpioc 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ function = LED_FUNCTION_HEARTBEAT;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpioh 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x1 0x0>;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ fw@80000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x80000000 0x0 0x4000000>;
+ no-map;
+ };
+ };
+};
+
+&arm_wdt {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&scmi_regu {
+ scmi_vddio1: regulator@0 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ scmi_vdd_sdcard: regulator@23 {
+ reg = <VOLTD_SCMI_STPMIC2_LDO7>;
+ regulator-name = "vdd_sdcard";
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ cd-gpios = <&gpiod 3 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ disable-wp;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&scmi_vdd_sdcard>;
+ vqmmc-supply = <&scmi_vddio1>;
+ status = "okay";
+};
+
+&usart2 {
+ pinctrl-names = "default", "idle", "sleep";
+ pinctrl-0 = <&usart2_pins_a>;
+ pinctrl-1 = <&usart2_idle_pins_a>;
+ pinctrl-2 = <&usart2_sleep_pins_a>;
+ /delete-property/dmas;
+ /delete-property/dma-names;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/tesla/fsd.dtsi b/arch/arm64/boot/dts/tesla/fsd.dtsi
index 690b4ed9c29b..9951eef9507c 100644
--- a/arch/arm64/boot/dts/tesla/fsd.dtsi
+++ b/arch/arm64/boot/dts/tesla/fsd.dtsi
@@ -92,7 +92,7 @@
reg = <0x0 0x000>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -108,7 +108,7 @@
reg = <0x0 0x001>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -124,7 +124,7 @@
reg = <0x0 0x002>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -139,7 +139,7 @@
compatible = "arm,cortex-a72";
reg = <0x0 0x003>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -156,7 +156,7 @@
reg = <0x0 0x100>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -172,7 +172,7 @@
reg = <0x0 0x101>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -188,7 +188,7 @@
reg = <0x0 0x102>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -204,7 +204,7 @@
reg = <0x0 0x103>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -221,7 +221,7 @@
reg = <0x0 0x200>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -237,7 +237,7 @@
reg = <0x0 0x201>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -253,7 +253,7 @@
reg = <0x0 0x202>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -269,7 +269,7 @@
reg = <0x0 0x203>;
enable-method = "psci";
clock-frequency = <2400000000>;
- cpu-idle-states = <&CPU_SLEEP>;
+ cpu-idle-states = <&cpu_sleep>;
i-cache-size = <0xc000>;
i-cache-line-size = <64>;
i-cache-sets = <256>;
@@ -291,7 +291,7 @@
idle-states {
entry-method = "psci";
- CPU_SLEEP: cpu-sleep {
+ cpu_sleep: cpu-sleep {
idle-state-name = "c2";
compatible = "arm,idle-state";
local-timer-stop;
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 8a4bdf87e2d4..03d4cecfc001 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -54,6 +54,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-am642-evm-nand.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-rdk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-gpio-fan.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-pcie-usb2.dtbo
+dtb-$(CONFIG_ARCH_K3) += k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtbo
dtb-$(CONFIG_ARCH_K3) += k3-am642-sk.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am642-tqma64xxl-mbax4xxl.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
@@ -185,6 +186,8 @@ k3-am642-phyboard-electra-gpio-fan-dtbs := \
k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-gpio-fan.dtbo
k3-am642-phyboard-electra-pcie-usb2-dtbs := \
k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-pcie-usb2.dtbo
+k3-am642-phyboard-electra-x27-gpio1-spi1-uart3-dtbs := \
+ k3-am642-phyboard-electra-rdk.dtb k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtbo
k3-am642-tqma64xxl-mbax4xxl-sdcard-dtbs := \
k3-am642-tqma64xxl-mbax4xxl.dtb k3-am64-tqma64xxl-mbax4xxl-sdcard.dtbo
k3-am642-tqma64xxl-mbax4xxl-wlan-dtbs := \
diff --git a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
index 2ef4cbaec789..55ed418c023b 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-phycore-som.dtsi
@@ -29,6 +29,7 @@
memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ bootph-all;
};
reserved_memory: reserved-memory {
@@ -36,15 +37,21 @@
#size-cells = <2>;
ranges;
- ramoops@9ca00000 {
+ ramoops@9c700000 {
compatible = "ramoops";
- reg = <0x00 0x9ca00000 0x00 0x00100000>;
+ reg = <0x00 0x9c700000 0x00 0x00100000>;
record-size = <0x8000>;
console-size = <0x8000>;
ftrace-size = <0x00>;
pmsg-size = <0x8000>;
};
+ rtos_ipc_memory_region: ipc-memories@9c800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00 0x9c800000 0x00 0x00300000>;
+ no-map;
+ };
+
mcu_m4fss_dma_memory_region: m4f-dma-memory@9cb00000 {
compatible = "shared-dma-pool";
reg = <0x00 0x9cb00000 0x00 0x100000>;
@@ -131,6 +138,7 @@
AM62X_IOPAD(0x1e0, PIN_INPUT_PULLUP, 0) /* (B16) I2C0_SCL */
AM62X_IOPAD(0x1e4, PIN_INPUT_PULLUP, 0) /* (A16) I2C0_SDA */
>;
+ bootph-all;
};
main_mdio1_pins_default: main-mdio1-default-pins {
@@ -138,6 +146,7 @@
AM62X_IOPAD(0x160, PIN_OUTPUT, 0) /* (AD24) MDIO0_MDC */
AM62X_IOPAD(0x15c, PIN_INPUT, 0) /* (AB22) MDIO0_MDIO */
>;
+ bootph-all;
};
main_mmc0_pins_default: main-mmc0-default-pins {
@@ -153,6 +162,7 @@
AM62X_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (AD2) MMC0_DAT6 */
AM62X_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AC2) MMC0_DAT7 */
>;
+ bootph-all;
};
main_rgmii1_pins_default: main-rgmii1-default-pins {
@@ -170,6 +180,7 @@
AM62X_IOPAD(0x130, PIN_OUTPUT, 0) /* (AE19) RGMII1_TXC */
AM62X_IOPAD(0x12c, PIN_OUTPUT, 0) /* (AD19) RGMII1_TX_CTL */
>;
+ bootph-all;
};
ospi0_pins_default: ospi0-default-pins {
@@ -186,6 +197,7 @@
AM62X_IOPAD(0x028, PIN_INPUT, 0) /* (J22) OSPI0_D7 */
AM62X_IOPAD(0x008, PIN_INPUT, 0) /* (J24) OSPI0_DQS */
>;
+ bootph-all;
};
pmic_irq_pins_default: pmic-irq-default-pins {
@@ -210,6 +222,7 @@
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
+ bootph-all;
};
&cpsw3g_mdio {
@@ -220,6 +233,7 @@
cpsw3g_phy1: ethernet-phy@1 {
compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ bootph-all;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
};
@@ -232,10 +246,15 @@
};
};
+&main_pktdma {
+ bootph-all;
+};
+
&main_i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
clock-frequency = <400000>;
+ bootph-all;
status = "okay";
pmic@30 {
@@ -355,6 +374,7 @@
cdns,tchsh-ns = <60>;
cdns,tslch-ns = <60>;
cdns,read-delay = <0>;
+ bootph-all;
};
};
@@ -363,5 +383,6 @@
pinctrl-0 = <&main_mmc0_pins_default>;
disable-wp;
non-removable;
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
index 9202181fbd65..fcc4cb2e9389 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin-dahlia.dtsi
@@ -28,10 +28,10 @@
"Headphone Jack", "HPOUTR",
"IN2L", "Line In Jack",
"IN2R", "Line In Jack",
- "Headphone Jack", "MICBIAS",
- "IN1L", "Headphone Jack";
+ "Microphone Jack", "MICBIAS",
+ "IN1L", "Microphone Jack";
simple-audio-card,widgets =
- "Microphone", "Headphone Jack",
+ "Microphone", "Microphone Jack",
"Headphone", "Headphone Jack",
"Line", "Line In Jack";
diff --git a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
index 75c80290b12a..a5469f2712f0 100644
--- a/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
+++ b/arch/arm64/boot/dts/ti/k3-am625-beagleplay.dts
@@ -65,6 +65,14 @@
pmsg-size = <0x8000>;
};
+ /* global cma region */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x00 0x8000000>;
+ linux,cma-default;
+ };
+
secure_tfa_ddr: tfa@9e780000 {
reg = <0x00 0x9e780000 0x00 0x80000>;
no-map;
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
index 0469c766b769..9ed9d703ff24 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-mcu.dtsi
@@ -12,7 +12,6 @@
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;
- status = "disabled";
};
mcu_esm: esm@4100000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
index a5aceaa39670..147d56b87984 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62a-phycore-som.dtsi
@@ -42,6 +42,7 @@
device_type = "memory";
/* 2G RAM */
reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ bootph-all;
};
reserved-memory {
@@ -99,6 +100,7 @@
AM62AX_IOPAD(0x1e0, PIN_INPUT_PULLUP, 0) /* (D17) I2C0_SCL */
AM62AX_IOPAD(0x1e4, PIN_INPUT_PULLUP, 0) /* (E16) I2C0_SDA */
>;
+ bootph-all;
};
main_mdio1_pins_default: main-mdio1-default-pins {
@@ -106,6 +108,7 @@
AM62AX_IOPAD(0x160, PIN_OUTPUT, 0) /* (V12) MDIO0_MDC */
AM62AX_IOPAD(0x15c, PIN_INPUT, 0) /* (V13) MDIO0_MDIO */
>;
+ bootph-all;
};
main_mmc0_pins_default: main-mmc0-default-pins {
@@ -121,6 +124,7 @@
AM62AX_IOPAD(0x1fc, PIN_INPUT_PULLUP, 0) /* (W9) MMC0_DAT6 */
AM62AX_IOPAD(0x1f8, PIN_INPUT_PULLUP, 0) /* (AB8) MMC0_DAT7 */
>;
+ bootph-all;
};
main_rgmii1_pins_default: main-rgmii1-default-pins {
@@ -138,6 +142,7 @@
AM62AX_IOPAD(0x130, PIN_OUTPUT, 0) /* (AB17) RGMII1_TXC */
AM62AX_IOPAD(0x12c, PIN_OUTPUT, 0) /* (W16) RGMII1_TX_CTL */
>;
+ bootph-all;
};
ospi0_pins_default: ospi0-default-pins {
@@ -155,6 +160,7 @@
AM62AX_IOPAD(0x028, PIN_INPUT, 0) /* (J22) OSPI0_D7 */
AM62AX_IOPAD(0x008, PIN_INPUT, 0) /* (L21) OSPI0_DQS */
>;
+ bootph-all;
};
pmic_irq_pins_default: pmic-irq-default-pins {
@@ -165,14 +171,15 @@
};
&cpsw3g {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_rgmii1_pins_default>;
+ status = "okay";
};
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
+ bootph-all;
};
&cpsw3g_mdio {
@@ -182,6 +189,7 @@
cpsw3g_phy1: ethernet-phy@1 {
compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ bootph-all;
ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
@@ -196,6 +204,7 @@
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
clock-frequency = <400000>;
+ bootph-all;
status = "okay";
pmic@30 {
@@ -215,8 +224,8 @@
interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
- ti,power-button;
system-power-controller;
+ ti,power-button;
regulators {
vdd_3v3: buck1 {
@@ -302,6 +311,10 @@
status = "okay";
};
+&main_pktdma {
+ bootph-all;
+};
+
&ospi0 {
pinctrl-names = "default";
pinctrl-0 = <&ospi0_pins_default>;
@@ -318,6 +331,7 @@
cdns,tchsh-ns = <60>;
cdns,tslch-ns = <60>;
cdns,read-delay = <0>;
+ bootph-all;
};
};
@@ -326,5 +340,6 @@
pinctrl-0 = <&main_mmc0_pins_default>;
disable-wp;
non-removable;
+ bootph-all;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
index a6f0d87a50d8..1c9d95696c83 100644
--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
@@ -18,10 +18,13 @@
aliases {
serial0 = &wkup_uart0;
+ serial1 = &mcu_uart0;
serial2 = &main_uart0;
serial3 = &main_uart1;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
+ rtc0 = &wkup_rtc0;
+ rtc1 = &tps659312;
};
chosen {
@@ -655,6 +658,7 @@
};
&usb0 {
+ bootph-all;
usb-role-switch;
port {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
index b33aff0d65c9..bd6a00d13aea 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-mcu.dtsi
@@ -12,15 +12,7 @@
#pinctrl-cells = <1>;
pinctrl-single,register-width = <32>;
pinctrl-single,function-mask = <0xffffffff>;
- pinctrl-single,gpio-range =
- <&mcu_pmx_range 0 21 PIN_GPIO_RANGE_IOPAD>,
- <&mcu_pmx_range 23 1 PIN_GPIO_RANGE_IOPAD>,
- <&mcu_pmx_range 32 2 PIN_GPIO_RANGE_IOPAD>;
bootph-all;
-
- mcu_pmx_range: gpio-range {
- #pinctrl-single,gpio-range-cells = <3>;
- };
};
mcu_esm: esm@4100000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi
index 6f32135f00a5..6757b37a9de3 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-j722s-common-wakeup.dtsi
@@ -2,9 +2,11 @@
/*
* Device Tree file for the WAKEUP domain peripherals shared by AM62P and J722S
*
- * Copyright (C) 2023-2024 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2023-2025 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <dt-bindings/bus/ti-sysc.h>
+
&cbass_wakeup {
wkup_conf: bus@43000000 {
compatible = "simple-bus";
@@ -41,14 +43,34 @@
};
};
- wkup_uart0: serial@2b300000 {
- compatible = "ti,am64-uart", "ti,am654-uart";
- reg = <0x00 0x2b300000 0x00 0x100>;
- interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ target-module@2b300050 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0 0x2b300050 0 0x4>,
+ <0 0x2b300054 0 0x4>,
+ <0 0x2b300058 0 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET |
+ SYSC_OMAP2_AUTOIDLE)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ ti,no-reset-on-init;
power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 114 0>;
- clock-names = "fclk";
- status = "disabled";
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0x2b300000 0x100000>;
+
+ wkup_uart0: serial@0 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0 0x100>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
};
wkup_i2c0: i2c@2b200000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
index 420c77c8e9e5..6aea9d3f134e 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-main.dtsi
@@ -42,17 +42,23 @@
ti,interrupt-ranges = <5 69 35>;
};
-&main_pmx0 {
- pinctrl-single,gpio-range =
- <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 72 22 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
+&main_conf {
+ audio_refclk0: clock-controller@82e0 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e0 0x4>;
+ clocks = <&k3_clks 157 0>;
+ assigned-clocks = <&k3_clks 157 0>;
+ assigned-clock-parents = <&k3_clks 157 16>;
+ #clock-cells = <0>;
+ };
- main_pmx0_range: gpio-range {
- #pinctrl-single,gpio-range-cells = <3>;
+ audio_refclk1: clock-controller@82e4 {
+ compatible = "ti,am62-audio-refclk";
+ reg = <0x82e4 0x4>;
+ clocks = <&k3_clks 157 18>;
+ assigned-clocks = <&k3_clks 157 18>;
+ assigned-clock-parents = <&k3_clks 157 34>;
+ #clock-cells = <0>;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index ad71d2f27f53..d29f524600af 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -19,6 +19,7 @@
aliases {
serial0 = &wkup_uart0;
+ serial1 = &mcu_uart0;
serial2 = &main_uart0;
serial3 = &main_uart1;
mmc0 = &sdhci0;
@@ -310,7 +311,7 @@
main_usb1_pins_default: main-usb1-default-pins {
pinctrl-single,pins = <
- AM62PX_IOPAD(0x0258, PIN_INPUT, 0) /* (G21) USB1_DRVVBUS */
+ AM62PX_IOPAD(0x0258, PIN_INPUT | PIN_DS_PULLUD_ENABLE | PIN_DS_PULL_UP, 0) /* (G21) USB1_DRVVBUS */
>;
};
@@ -519,6 +520,7 @@
};
&usb0 {
+ bootph-all;
usb-role-switch;
port {
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
index 922cad14c9f8..aab74d6019b0 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-phyboard-lyra.dtsi
@@ -138,6 +138,7 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
+ bootph-all;
};
vcc_3v3_sw: regulator-vcc-3v3-sw {
@@ -233,6 +234,7 @@
AM62X_IOPAD(0x224, PIN_INPUT_PULLUP, 0) /* (D22) MMC1_DAT3 */
AM62X_IOPAD(0x240, PIN_INPUT_PULLUP, 0) /* (D17) MMC1_SDCD */
>;
+ bootph-all;
};
main_rgmii2_pins_default: main-rgmii2-default-pins {
@@ -257,6 +259,7 @@
AM62X_IOPAD(0x1c8, PIN_INPUT, 0) /* (D14) UART0_RXD */
AM62X_IOPAD(0x1cc, PIN_OUTPUT, 0) /* (E14) UART0_TXD */
>;
+ bootph-all;
};
main_uart1_pins_default: main-uart1-default-pins {
@@ -266,6 +269,7 @@
AM62X_IOPAD(0x1ac, PIN_INPUT, 2) /* (E19) MCASP0_AFSR.UART1_RXD */
AM62X_IOPAD(0x1b0, PIN_OUTPUT, 2) /* (A20) MCASP0_ACLKR.UART1_TXD */
>;
+ bootph-pre-ram;
};
main_usb1_pins_default: main-usb1-default-pins {
@@ -430,12 +434,14 @@
&main_uart0 {
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
+ bootph-all;
status = "okay";
};
&main_uart1 {
pinctrl-names = "default";
pinctrl-0 = <&main_uart1_pins_default>;
+ bootph-pre-ram;
/* Main UART1 may be used by TIFS firmware */
status = "okay";
};
@@ -467,11 +473,13 @@
pinctrl-0 = <&main_mmc1_pins_default>;
disable-wp;
no-1-8-v;
+ bootph-all;
status = "okay";
};
&usbss0 {
ti,vbus-divider;
+ bootph-all;
status = "okay";
};
@@ -482,6 +490,7 @@
&usb0 {
usb-role-switch;
+ bootph-all;
port {
typec_hs: endpoint {
diff --git a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
index 2f129e8cd5b9..d52cb2a5a589 100644
--- a/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62x-sk-common.dtsi
@@ -12,6 +12,8 @@
/ {
aliases {
+ serial0 = &wkup_uart0;
+ serial1 = &mcu_uart0;
serial2 = &main_uart0;
mmc0 = &sdhci0;
mmc1 = &sdhci1;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
index 99a6fdfaa7fb..d9d491b12c33 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
@@ -27,6 +27,7 @@
memory@80000000 {
device_type = "memory";
reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ bootph-all;
};
reserved_memory: reserved-memory {
@@ -99,6 +100,12 @@
reg = <0x00 0xa4100000 0x00 0xf00000>;
no-map;
};
+
+ rtos_ipc_memory_region: ipc-memories@a5000000 {
+ reg = <0x00 0xa5000000 0x00 0x00800000>;
+ alignment = <0x1000>;
+ no-map;
+ };
};
leds {
@@ -132,6 +139,7 @@
AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */
AM64X_IOPAD(0x0100, PIN_OUTPUT, 7) /* (V7) PRG1_PRU0_GPO18.GPIO0_63 */
>;
+ bootph-all;
};
cpsw_rgmii1_pins_default: cpsw-rgmii1-default-pins {
@@ -150,6 +158,7 @@
AM64X_IOPAD(0x014c, PIN_OUTPUT, 4) /* (AA14) PRG1_PRU1_GPO17.RGMII1_TD3 */
AM64X_IOPAD(0x0154, PIN_INPUT, 7) /* (V12) PRG1_PRU1_GPO19.GPIO0_84 */
>;
+ bootph-all;
};
eeprom_wp_pins_default: eeprom-wp-default-pins {
@@ -169,6 +178,7 @@
AM64X_IOPAD(0x0260, PIN_INPUT, 0) /* (A18) I2C0_SCL */
AM64X_IOPAD(0x0264, PIN_INPUT, 0) /* (B18) I2C0_SDA */
>;
+ bootph-all;
};
ospi0_pins_default: ospi0-default-pins {
@@ -185,6 +195,7 @@
AM64X_IOPAD(0x0028, PIN_INPUT, 0) /* (M17) OSPI0_D7 */
AM64X_IOPAD(0x002c, PIN_OUTPUT, 0) /* (L19) OSPI0_CSn0 */
>;
+ bootph-all;
};
rtc_pins_default: rtc-defaults-pins {
@@ -201,26 +212,29 @@
};
&cpsw3g_mdio {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&cpsw_mdio_pins_default>;
+ bootph-all;
+ status = "okay";
cpsw3g_phy1: ethernet-phy@1 {
compatible = "ethernet-phy-id2000.a231", "ethernet-phy-ieee802.3-c22";
reg = <1>;
interrupt-parent = <&main_gpio0>;
interrupts = <84 IRQ_TYPE_EDGE_FALLING>;
- ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
reset-gpios = <&main_gpio0 63 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <1000>;
+ bootph-all;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
};
};
&cpsw_port1 {
phy-mode = "rgmii-rxid";
phy-handle = <&cpsw3g_phy1>;
+ bootph-all;
status = "okay";
};
@@ -262,10 +276,11 @@
};
&main_i2c0 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_i2c0_pins_default>;
clock-frequency = <400000>;
+ bootph-all;
+ status = "okay";
eeprom@50 {
compatible = "atmel,24c32";
@@ -330,6 +345,10 @@
};
};
+&main_pktdma {
+ bootph-all;
+};
+
&main_r5fss0_core0 {
mboxes = <&mailbox0_cluster2 &mbox_main_r5fss0_core0>;
memory-region = <&main_r5fss0_core0_dma_memory_region>,
@@ -362,9 +381,9 @@
};
&ospi0 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&ospi0_pins_default>;
+ status = "okay";
serial_flash: flash@0 {
compatible = "jedec,spi-nor";
@@ -377,15 +396,17 @@
cdns,tchsh-ns = <60>;
cdns,tslch-ns = <60>;
cdns,read-delay = <0>;
+ bootph-all;
};
};
&sdhci0 {
- status = "okay";
non-removable;
ti,driver-strength-ohm = <50>;
disable-wp;
keep-power-in-suspend;
+ bootph-all;
+ status = "okay";
};
&tscadc0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
index bc8e1ce11047..f63c101b7d61 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
@@ -171,6 +171,7 @@
regulator-max-microvolt = <3300000>;
regulator-boot-on;
regulator-always-on;
+ bootph-all;
};
};
@@ -275,6 +276,7 @@
AM64X_IOPAD(0x0294, PIN_INPUT_PULLUP, 0) /* (J19) MMC1_CMD */
AM64X_IOPAD(0x0298, PIN_INPUT_PULLUP, 0) /* (D19) MMC1_SDCD */
>;
+ bootph-all;
};
main_spi0_pins_default: main-spi0-default-pins {
@@ -291,6 +293,7 @@
AM64X_IOPAD(0x0230, PIN_INPUT, 0) /* (D15) UART0_RXD */
AM64X_IOPAD(0x0234, PIN_OUTPUT, 0) /* (C16) UART0_TXD */
>;
+ bootph-all;
};
main_uart1_pins_default: main-uart1-default-pins {
@@ -349,10 +352,10 @@
};
&main_i2c1 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_i2c1_pins_default>;
clock-frequency = <400000>;
+ status = "okay";
eeprom@51 {
compatible = "atmel,24c02";
@@ -382,25 +385,25 @@
};
&main_mcan0 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_mcan0_pins_default>;
phys = <&can_tc1>;
+ status = "okay";
};
&main_mcan1 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_mcan1_pins_default>;
phys = <&can_tc2>;
+ status = "okay";
};
&main_spi0 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_spi0_pins_default>;
cs-gpios = <0>, <&main_gpio1 43 GPIO_ACTIVE_LOW>;
ti,pindir-d0-out-d1-in;
+ status = "okay";
tpm@1 {
compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
@@ -410,25 +413,27 @@
};
&main_uart0 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_uart0_pins_default>;
+ bootph-all;
+ status = "okay";
};
&main_uart1 {
- status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&main_uart1_pins_default>;
uart-has-rtscts;
+ status = "okay";
};
&sdhci1 {
- status = "okay";
vmmc-supply = <&vcc_3v3_mmc>;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc1_pins_default>;
disable-wp;
no-1-8-v;
+ bootph-all;
+ status = "okay";
};
&serdes0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtso b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtso
new file mode 100644
index 000000000000..996c42ec4253
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-x27-gpio1-spi1-uart3.dtso
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Copyright (C) 2025 PHYTEC Messtechnik GmbH
+ * Authors:
+ * Wadim Egorov <w.egorov@phytec.de>
+ * Daniel Schultz <d.schultz@phytec.de>
+ *
+ * GPIO, SPI and UART examples for the X27 expansion connector.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include "k3-pinctrl.h"
+
+&{/} {
+ aliases {
+ serial5 = "/bus@f4000/serial@2830000";
+ };
+};
+
+&main_pmx0 {
+ main_gpio1_exp_header_gpio_pins_default: main-gpio1-exp-header-gpio-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0220, PIN_INPUT, 7) /* (D14) SPI1_CS1.GPIO1_48 */
+ >;
+ };
+
+ main_spi1_pins_default: main-spi1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0224, PIN_INPUT, 0) /* (C14) SPI1_CLK */
+ AM64X_IOPAD(0x021C, PIN_OUTPUT, 0) /* (B14) SPI1_CS0 */
+ AM64X_IOPAD(0x0228, PIN_OUTPUT, 0) /* (B15) SPI1_D0 */
+ AM64X_IOPAD(0x022C, PIN_INPUT, 0) /* (A15) SPI1_D1 */
+ >;
+ };
+
+ main_uart3_pins_default: main-uart3-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0048, PIN_INPUT, 2) /* (U20) GPMC0_AD3.UART3_RXD */
+ AM64X_IOPAD(0x004c, PIN_OUTPUT, 2) /* (U18) GPMC0_AD4.UART3_TXD */
+ >;
+ };
+};
+
+&main_gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_gpio1_exp_header_gpio_pins_default>;
+ status = "okay";
+};
+
+&main_spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_spi1_pins_default>;
+ ti,pindir-d0-out-d1-in = <1>;
+ status = "okay";
+};
+
+&main_uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart3_pins_default>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
index 4c1e02a4e7a2..4421852161dd 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts
@@ -540,6 +540,7 @@
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_USB3>;
resets = <&serdes_wiz3 1>, <&serdes_wiz3 2>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
index 69b3d1ed8a21..440ef57be294 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721e-sk.dts
@@ -1040,6 +1040,7 @@
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_USB3>;
resets = <&serdes_wiz3 1>, <&serdes_wiz3 2>;
+ bootph-all;
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
index b3a0385ed3d8..54fc5c4f8c3f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-som-p0.dtsi
@@ -448,6 +448,47 @@
cdns,tchsh-ns = <60>;
cdns,tslch-ns = <60>;
cdns,read-delay = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "ospi.tiboot3";
+ reg = <0x0 0x80000>;
+ };
+
+ partition@80000 {
+ label = "ospi.tispl";
+ reg = <0x80000 0x200000>;
+ };
+
+ partition@280000 {
+ label = "ospi.u-boot";
+ reg = <0x280000 0x400000>;
+ };
+
+ partition@680000 {
+ label = "ospi.env";
+ reg = <0x680000 0x40000>;
+ };
+
+ partition@6c0000 {
+ label = "ospi.env.backup";
+ reg = <0x6c0000 0x40000>;
+ };
+
+ partition@800000 {
+ label = "ospi.rootfs";
+ reg = <0x800000 0x37c0000>;
+ };
+
+ partition@3fc0000 {
+ label = "ospi.phypattern";
+ reg = <0x3fc0000 0x40000>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
index d184e9c1a0a5..2127316f36a3 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j722s-evm.dts
@@ -263,6 +263,13 @@
bootph-all;
};
+ main_i2c2_pins_default: main-i2c2-default-pins {
+ pinctrl-single,pins = <
+ J722S_IOPAD(0x00b0, PIN_INPUT_PULLUP, 1) /* (P22) GPMC0_CSn2.I2C2_SCL */
+ J722S_IOPAD(0x00b4, PIN_INPUT_PULLUP, 1) /* (P23) GPMC0_CSn3.I2C2_SDA */
+ >;
+ };
+
main_uart0_pins_default: main-uart0-default-pins {
pinctrl-single,pins = <
J722S_IOPAD(0x01c8, PIN_INPUT, 0) /* (A22) UART0_RXD */
@@ -590,7 +597,7 @@
p05-hog {
/* P05 - USB2.0_MUX_SEL */
gpio-hog;
- gpios = <5 GPIO_ACTIVE_HIGH>;
+ gpios = <5 GPIO_ACTIVE_LOW>;
output-high;
};
@@ -631,6 +638,27 @@
};
};
+&main_i2c2 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c2_pins_default>;
+ clock-frequency = <400000>;
+
+ pca9543_0: i2c-mux@70 {
+ compatible = "nxp,pca9543";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x70>;
+ };
+
+ pca9543_1: i2c-mux@71 {
+ compatible = "nxp,pca9543";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ };
+};
+
&ospi0 {
pinctrl-names = "default";
pinctrl-0 = <&ospi0_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
index 3ac2d45a0558..6850f50530f1 100644
--- a/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j722s-main.dtsi
@@ -154,6 +154,189 @@
};
};
+ ti_csi2rx1: ticsi2rx@30122000 {
+ compatible = "ti,j721e-csi2rx-shim";
+ reg = <0x00 0x30122000 0x00 0x1000>;
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dmas = <&main_bcdma_csi 0 0x5100 0>;
+ dma-names = "rx0";
+ power-domains = <&k3_pds 247 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ cdns_csi2rx1: csi-bridge@30121000 {
+ compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+ reg = <0x00 0x30121000 0x00 0x1000>;
+ clocks = <&k3_clks 247 0>, <&k3_clks 247 3>, <&k3_clks 247 0>,
+ <&k3_clks 247 0>, <&k3_clks 247 4>, <&k3_clks 247 4>;
+ clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+ "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+ phys = <&dphy1>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi1_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ csi1_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ csi1_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ csi1_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ csi1_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+ };
+ };
+ };
+
+ ti_csi2rx2: ticsi2rx@30142000 {
+ compatible = "ti,j721e-csi2rx-shim";
+ reg = <0x00 0x30142000 0x00 0x1000>;
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ power-domains = <&k3_pds 248 TI_SCI_PD_EXCLUSIVE>;
+ dmas = <&main_bcdma_csi 0 0x5200 0>;
+ dma-names = "rx0";
+ status = "disabled";
+
+ cdns_csi2rx2: csi-bridge@30141000 {
+ compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+ reg = <0x00 0x30141000 0x00 0x1000>;
+ clocks = <&k3_clks 248 0>, <&k3_clks 248 3>, <&k3_clks 248 0>,
+ <&k3_clks 248 0>, <&k3_clks 248 4>, <&k3_clks 248 4>;
+ clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+ "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+ phys = <&dphy2>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi2_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ csi2_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ csi2_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ csi2_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ csi2_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+ };
+ };
+ };
+
+ ti_csi2rx3: ticsi2rx@30162000 {
+ compatible = "ti,j721e-csi2rx-shim";
+ reg = <0x00 0x30162000 0x00 0x1000>;
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dmas = <&main_bcdma_csi 0 0x5300 0>;
+ dma-names = "rx0";
+ power-domains = <&k3_pds 249 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
+ cdns_csi2rx3: csi-bridge@30161000 {
+ compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+ reg = <0x00 0x30161000 0x00 0x1000>;
+ clocks = <&k3_clks 249 0>, <&k3_clks 249 3>, <&k3_clks 249 0>,
+ <&k3_clks 249 0>, <&k3_clks 249 4>, <&k3_clks 249 4>;
+ clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+ "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+ phys = <&dphy3>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ csi3_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ csi3_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ csi3_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ csi3_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ csi3_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+ };
+ };
+ };
+
+ dphy1: phy@30130000 {
+ compatible = "cdns,dphy-rx";
+ reg = <0x00 0x30130000 0x00 0x1100>;
+ #phy-cells = <0>;
+ power-domains = <&k3_pds 251 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ dphy2: phy@30150000 {
+ compatible = "cdns,dphy-rx";
+ reg = <0x00 0x30150000 0x00 0x1100>;
+ #phy-cells = <0>;
+ power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
+ dphy3: phy@30170000 {
+ compatible = "cdns,dphy-rx";
+ reg = <0x00 0x30170000 0x00 0x1100>;
+ #phy-cells = <0>;
+ power-domains = <&k3_pds 253 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+ };
+
main_r5fss0: r5fss@78400000 {
compatible = "ti,am62-r5fss";
#address-cells = <1>;
@@ -204,6 +387,16 @@
};
};
+&main_bcdma_csi {
+ compatible = "ti,j722s-dmss-bcdma-csi";
+ reg = <0x00 0x4e230000 0x00 0x100>,
+ <0x00 0x4e180000 0x00 0x20000>,
+ <0x00 0x4e300000 0x00 0x10000>,
+ <0x00 0x4e100000 0x00 0x80000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
+ ti,sci-rm-range-tchan = <0x22>;
+};
+
/* MCU domain overrides */
&mcu_r5fss0_core0 {
@@ -251,21 +444,6 @@
ti,interrupt-ranges = <7 71 21>;
};
-&main_pmx0 {
- pinctrl-single,gpio-range =
- <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 72 17 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
- <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;
-
- main_pmx0_range: gpio-range {
- #pinctrl-single,gpio-range-cells = <3>;
- };
-};
-
&main_gpio0 {
gpio-ranges = <&main_pmx0 0 0 32>, <&main_pmx0 32 33 38>,
<&main_pmx0 70 72 17>;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso b/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso
index dcd2c7c39ec3..c1f9573557d0 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm-quad-port-eth-exp1.dtso
@@ -102,13 +102,6 @@
gpios = <16 GPIO_ACTIVE_HIGH>;
output-low;
};
-
- /* Toggle MUX2 for MDIO lines */
- mux-sel-hog {
- gpio-hog;
- gpios = <13 GPIO_ACTIVE_HIGH>, <14 GPIO_ACTIVE_HIGH>, <15 GPIO_ACTIVE_HIGH>;
- output-high;
- };
};
&main_pmx0 {
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
index 83bbf94b58d1..1944616ab357 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-j742s2-main-common.dtsi
@@ -84,7 +84,9 @@
<0x10 0x3>, <0x14 0x3>, /* SERDES1 lane0/1 select */
<0x18 0x3>, <0x1c 0x3>, /* SERDES1 lane2/3 select */
<0x20 0x3>, <0x24 0x3>, /* SERDES2 lane0/1 select */
- <0x28 0x3>, <0x2c 0x3>; /* SERDES2 lane2/3 select */
+ <0x28 0x3>, <0x2c 0x3>, /* SERDES2 lane2/3 select */
+ <0x40 0x3>, <0x44 0x3>, /* SERDES4 lane0/1 select */
+ <0x48 0x3>, <0x4c 0x3>; /* SERDES4 lane2/3 select */
idle-states = <J784S4_SERDES0_LANE0_PCIE1_LANE0>,
<J784S4_SERDES0_LANE1_PCIE1_LANE1>,
<J784S4_SERDES0_LANE2_IP3_UNUSED>,
@@ -193,7 +195,7 @@
ranges;
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */
+ reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
<0x00 0x01900000 0x00 0x100000>, /* GICR */
<0x00 0x6f000000 0x00 0x2000>, /* GICC */
<0x00 0x6f010000 0x00 0x1000>, /* GICH */
diff --git a/arch/arm64/boot/dts/xilinx/Makefile b/arch/arm64/boot/dts/xilinx/Makefile
index 1068b0fa8e98..7f5a8801cad1 100644
--- a/arch/arm64/boot/dts/xilinx/Makefile
+++ b/arch/arm64/boot/dts/xilinx/Makefile
@@ -29,3 +29,5 @@ zynqmp-smk-k26-revA-sck-kv-g-revA-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revA.dtb
zynqmp-smk-k26-revA-sck-kv-g-revB-dtbs := zynqmp-smk-k26-revA.dtb zynqmp-sck-kv-g-revB.dtbo
dtb-$(CONFIG_ARCH_ZYNQMP) += zynqmp-smk-k26-revA-sck-kv-g-revB.dtb
+
+dtb-$(CONFIG_ARCH_ZYNQMP) += versal-net-vn-x-b2197-01-revA.dtb
diff --git a/arch/arm64/boot/dts/xilinx/versal-net-clk.dtsi b/arch/arm64/boot/dts/xilinx/versal-net-clk.dtsi
new file mode 100644
index 000000000000..b7a8a1a512cb
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/versal-net-clk.dtsi
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx Versal NET fixed clock
+ *
+ * (C) Copyright 2022, Xilinx, Inc.
+ * (C) Copyright 2022 - 2025, Advanced Micro Devices, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ */
+
+/ {
+ clk60: clk60 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <60000000>;
+ };
+
+ clk100: clk100 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ clk125: clk125 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ clk150: clk150 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <150000000>;
+ };
+
+ clk160: clk160 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <160000000>;
+ };
+
+ clk200: clk200 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ };
+
+ clk250: clk250 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <250000000>;
+ };
+
+ clk300: clk300 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <300000000>;
+ };
+
+ clk450: clk450 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <450000000>;
+ };
+
+ clk1200: clk1200 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1200000000>;
+ };
+
+ firmware {
+ versal_net_firmware: versal-net-firmware {
+ compatible = "xlnx,versal-net-firmware", "xlnx,versal-firmware";
+ bootph-all;
+ method = "smc";
+ };
+ };
+};
+
+&adma0 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma1 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma2 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma3 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma4 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma5 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma6 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&adma7 {
+ clocks = <&clk450>, <&clk450>;
+};
+
+&can0 {
+ clocks = <&clk160>, <&clk160>;
+};
+
+&can1 {
+ clocks = <&clk160>, <&clk160>;
+};
+
+&gem0 {
+ clocks = <&clk125>, <&clk125>, <&clk125>, <&clk125>, <&clk250>;
+};
+
+&gem1 {
+ clocks = <&clk125>, <&clk125>, <&clk125>, <&clk125>, <&clk250>;
+};
+
+&gpio0 {
+ clocks = <&clk100>;
+};
+
+&gpio1 {
+ clocks = <&clk100>;
+};
+
+&i2c0 {
+ clocks = <&clk100>;
+};
+
+&i2c1 {
+ clocks = <&clk100>;
+};
+
+&i3c0 {
+ clocks = <&clk100>;
+};
+
+&i3c1 {
+ clocks = <&clk100>;
+};
+
+&ospi {
+ clocks = <&clk200>;
+};
+
+&qspi {
+ clocks = <&clk300>, <&clk300>;
+};
+
+&rtc {
+ /* Nothing */
+};
+
+&sdhci0 {
+ clocks = <&clk200>, <&clk200>, <&clk1200>;
+};
+
+&sdhci1 {
+ clocks = <&clk200>, <&clk200>, <&clk1200>;
+};
+
+&serial0 {
+ clocks = <&clk100>, <&clk100>;
+};
+
+&serial1 {
+ clocks = <&clk100>, <&clk100>;
+};
+
+&spi0 {
+ clocks = <&clk200>, <&clk200>;
+};
+
+&spi1 {
+ clocks = <&clk200>, <&clk200>;
+};
+
+&ttc0 {
+ clocks = <&clk150>;
+};
+
+&usb0 {
+ clocks = <&clk60>, <&clk60>;
+};
+
+&dwc3_0 {
+ clocks = <&clk60>;
+};
+
+&usb1 {
+ clocks = <&clk60>, <&clk60>;
+};
+
+&dwc3_1 {
+ clocks = <&clk60>;
+};
+
+&wwdt0 {
+ clocks = <&clk150>;
+};
+
+&wwdt1 {
+ clocks = <&clk150>;
+};
+
+&wwdt2 {
+ clocks = <&clk150>;
+};
+
+&wwdt3 {
+ clocks = <&clk150>;
+};
+
+&lpd_wwdt0 {
+ clocks = <&clk150>;
+};
+
+&lpd_wwdt1 {
+ clocks = <&clk150>;
+};
diff --git a/arch/arm64/boot/dts/xilinx/versal-net-vn-x-b2197-01-revA.dts b/arch/arm64/boot/dts/xilinx/versal-net-vn-x-b2197-01-revA.dts
new file mode 100644
index 000000000000..06b2301f48a0
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/versal-net-vn-x-b2197-01-revA.dts
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx Versal Net VNX board revA
+ *
+ * (C) Copyright 2022, Xilinx, Inc.
+ * (C) Copyright 2022 - 2025, Advanced Micro Devices, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ */
+
+/dts-v1/;
+
+#include "versal-net.dtsi"
+#include "versal-net-clk.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "xlnx,versal-net-vnx-revA", "xlnx,versal-net-vnx", "xlnx,versal-net";
+ model = "Xilinx Versal NET VNX revA";
+ dma-coherent;
+
+ memory: memory@0 {
+ reg = <0 0 0 0x80000000>;
+ device_type = "memory";
+ };
+
+ memory_hi: memory@800000000 {
+ reg = <8 0 3 0x80000000>;
+ device_type = "memory";
+ };
+
+ memory_hi2: memory@50000000000 {
+ reg = <0x500 0 4 0>;
+ device_type = "memory";
+ };
+
+ chosen {
+ bootargs = "console=ttyAMA1,115200n8";
+ stdout-path = "serial1:115200n8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ rsc_tbl_carveout: rproc@bbf14000 {
+ reg = <0 0xbbf14000 0 0x1000>;
+ no-map;
+ };
+ rpu0vdev0vring0: rpu0vdev0vring0@bbf15000 {
+ reg = <0 0xbbf15000 0 0x1000>;
+ no-map;
+ };
+ rpu0vdev0vring1: rpu0vdev0vring1@bbf16000 {
+ reg = <0 0xbbf16000 0 0x1000>;
+ no-map;
+ };
+ rpu0vdev0buffer: rpu0vdev0buffer@bbf17000 {
+ reg = <0 0xbbf17000 0 0xD000>;
+ no-map;
+ };
+ reserve_others: reserveothers@0 {
+ reg = <0 0x0 0 0x1c200000>;
+ no-map;
+ };
+ pdi_update: pdiupdate@1c200000 {
+ reg = <0 0x1c200000 0 0x6000000>;
+ no-map;
+ };
+ reserve_optee_atf: reserveopteeatf@22200000 {
+ reg = <0 0x22200000 0 0x4100000>;
+ no-map;
+ };
+ };
+};
+
+&gem1 {
+ status = "okay";
+ iommus = <&smmu 0x235>;
+ phy-handle = <&phy>;
+ phy-mode = "rmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy: ethernet-phy@4 {
+ reg = <4>;
+ };
+ };
+};
+
+&ospi {
+ num-cs = <2>;
+ iommus = <&smmu 0x245>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+};
+
+&sdhci1 {
+ status = "okay";
+ iommus = <&smmu 0x243>;
+ non-removable;
+ disable-wp;
+ no-sd;
+ no-sdio;
+ cap-mmc-hw-reset;
+ bus-width = <8>;
+ no-1-8-v;
+};
+
+&serial1 {
+ status = "okay";
+};
+
+&smmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/versal-net.dtsi b/arch/arm64/boot/dts/xilinx/versal-net.dtsi
new file mode 100644
index 000000000000..fc9f49e57385
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/versal-net.dtsi
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dts file for Xilinx Versal NET
+ *
+ * (C) Copyright 2022, Xilinx, Inc.
+ * (C) Copyright 2022 - 2025, Advanced Micro Devices, Inc.
+ *
+ * Michal Simek <michal.simek@amd.com>
+ */
+
+/dts-v1/;
+
+/ {
+ compatible = "xlnx,versal-net";
+ model = "Xilinx Versal NET";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&gic>;
+
+ options {
+ u-boot {
+ compatible = "u-boot,config";
+ bootscr-address = /bits/ 64 <0x20000000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu100>;
+ };
+ core2 {
+ cpu = <&cpu200>;
+ };
+ core3 {
+ cpu = <&cpu300>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu10000>;
+ };
+
+ core1 {
+ cpu = <&cpu10100>;
+ };
+
+ core2 {
+ cpu = <&cpu10200>;
+ };
+
+ core3 {
+ cpu = <&cpu10300>;
+ };
+ };
+ cluster2 {
+ core0 {
+ cpu = <&cpu20000>;
+ };
+
+ core1 {
+ cpu = <&cpu20100>;
+ };
+
+ core2 {
+ cpu = <&cpu20200>;
+ };
+
+ core3 {
+ cpu = <&cpu20300>;
+ };
+ };
+ cluster3 {
+ core0 {
+ cpu = <&cpu30000>;
+ };
+
+ core1 {
+ cpu = <&cpu30100>;
+ };
+
+ core2 {
+ cpu = <&cpu30200>;
+ };
+
+ core3 {
+ cpu = <&cpu30300>;
+ };
+ };
+
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu100: cpu@100 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x100>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu200: cpu@200 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x200>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu300: cpu@300 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x300>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu10000: cpu@10000 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x10000>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu10100: cpu@10100 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x10100>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu10200: cpu@10200 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x10200>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu10300: cpu@10300 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x10300>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu20000: cpu@20000 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x20000>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu20100: cpu@20100 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x20100>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu20200: cpu@20200 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x20200>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu20300: cpu@20300 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x20300>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu30000: cpu@30000 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x30000>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu30100: cpu@30100 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x30100>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu30200: cpu@30200 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x30200>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ cpu30300: cpu@30300 {
+ compatible = "arm,cortex-a78";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x30300>;
+ operating-points-v2 = <&cpu_opp_table>;
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x40000000>;
+ local-timer-stop;
+ entry-latency-us = <300>;
+ exit-latency-us = <600>;
+ min-residency-us = <10000>;
+ };
+ };
+ };
+
+ cpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-1066000000 {
+ opp-hz = /bits/ 64 <1066000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-1866000000 {
+ opp-hz = /bits/ 64 <1866000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-1900000000 {
+ opp-hz = /bits/ 64 <1900000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-1999000000 {
+ opp-hz = /bits/ 64 <1999000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-2050000000 {
+ opp-hz = /bits/ 64 <2050000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-2100000000 {
+ opp-hz = /bits/ 64 <2100000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-2200000000 {
+ opp-hz = /bits/ 64 <2200000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ opp-2400000000 {
+ opp-hz = /bits/ 64 <2400000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <500000>;
+ };
+ };
+
+ aliases {
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &dcc;
+ mmc0 = &sdhci0;
+ mmc1 = &sdhci1;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ rtc = &rtc;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ spi0 = &ospi;
+ spi1 = &qspi;
+ };
+
+ dcc: dcc {
+ compatible = "arm,dcc";
+ status = "disabled";
+ bootph-all;
+ };
+
+ firmware {
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+ };
+
+ fpga: fpga-region {
+ compatible = "fpga-region";
+ fpga-mgr = <&versal_fpga>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ };
+
+ timer: timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 4>, <1 14 4>, <1 11 4>, <1 10 4>;
+ };
+
+ versal_fpga: versal-fpga {
+ compatible = "xlnx,versal-fpga";
+ };
+
+ amba: axi {
+ compatible = "simple-bus";
+ bootph-all;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ adma0: dma-controller@ebd00000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd00000 0 0x1000>;
+ interrupts = <0 72 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma1: dma-controller@ebd10000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd10000 0 0x1000>;
+ interrupts = <0 73 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma2: dma-controller@ebd20000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd20000 0 0x1000>;
+ interrupts = <0 74 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma3: dma-controller@ebd30000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd30000 0 0x1000>;
+ interrupts = <0 75 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma4: dma-controller@ebd40000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd40000 0 0x1000>;
+ interrupts = <0 76 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma5: dma-controller@ebd50000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd50000 0 0x1000>;
+ interrupts = <0 77 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma6: dma-controller@ebd60000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd60000 0 0x1000>;
+ interrupts = <0 78 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ adma7: dma-controller@ebd70000 {
+ compatible = "xlnx,zynqmp-dma-1.0";
+ status = "disabled";
+ reg = <0 0xebd70000 0 0x1000>;
+ interrupts = <0 79 4>;
+ clock-names = "clk_main", "clk_apb";
+ #dma-cells = <1>;
+ xlnx,bus-width = <64>;
+ };
+
+ can0: can@f1980000 {
+ compatible = "xlnx,canfd-2.0";
+ status = "disabled";
+ reg = <0 0xf1980000 0 0x6000>;
+ interrupts = <0 27 4>;
+ clock-names = "can_clk", "s_axi_aclk";
+ rx-fifo-depth = <64>;
+ tx-mailbox-count = <32>;
+ };
+
+ can1: can@f1990000 {
+ compatible = "xlnx,canfd-2.0";
+ status = "disabled";
+ reg = <0 0xf1990000 0 0x6000>;
+ interrupts = <0 28 4>;
+ clock-names = "can_clk", "s_axi_aclk";
+ rx-fifo-depth = <64>;
+ tx-mailbox-count = <32>;
+ };
+
+ gem0: ethernet@f19e0000 {
+ compatible = "xlnx,versal-gem", "cdns,gem";
+ status = "disabled";
+ reg = <0 0xf19e0000 0 0x1000>;
+ interrupts = <0 39 4>, <0 39 4>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk",
+ "tsu_clk";
+ };
+
+ gem1: ethernet@f19f0000 {
+ compatible = "xlnx,versal-gem", "cdns,gem";
+ status = "disabled";
+ reg = <0 0xf19f0000 0 0x1000>;
+ interrupts = <0 41 4>, <0 41 4>;
+ clock-names = "pclk", "hclk", "tx_clk", "rx_clk",
+ "tsu_clk";
+ };
+
+ gic: interrupt-controller@e2000000 {
+ compatible = "arm,gic-v3";
+ #interrupt-cells = <3>;
+ reg = <0 0xe2000000 0 0x10000>,
+ <0 0xe2060000 0 0x200000>;
+ interrupt-controller;
+ interrupts = <1 9 4>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ its: msi-controller@e2040000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0 0xe2040000 0 0x20000>;
+ };
+ };
+
+ gpio0: gpio@f19d0000 {
+ compatible = "xlnx,versal-gpio-1.0";
+ status = "disabled";
+ reg = <0 0xf19d0000 0 0x1000>;
+ interrupts = <0 20 4>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ gpio1: gpio@f1020000 {
+ compatible = "xlnx,pmc-gpio-1.0";
+ status = "disabled";
+ reg = <0 0xf1020000 0 0x1000>;
+ interrupts = <0 180 4>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ i2c0: i2c@f1940000 {
+ compatible = "cdns,i2c-r1p14";
+ status = "disabled";
+ reg = <0 0xf1940000 0 0x1000>;
+ interrupts = <0 21 4>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c1: i2c@f1950000 {
+ compatible = "cdns,i2c-r1p14";
+ status = "disabled";
+ reg = <0 0xf1950000 0 0x1000>;
+ interrupts = <0 22 4>;
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i3c0: i3c@f1948000 {
+ compatible = "snps,dw-i3c-master-1.00a";
+ status = "disabled";
+ reg = <0 0xf1948000 0 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ interrupts = <0 21 4>;
+ };
+
+ i3c1: i3c@f1958000 {
+ compatible = "snps,dw-i3c-master-1.00a";
+ status = "disabled";
+ reg = <0 0xf1958000 0 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ interrupts = <0 22 4>;
+ };
+
+ ospi: spi@f1010000 {
+ compatible = "xlnx,versal-ospi-1.0", "cdns,qspi-nor";
+ status = "disabled";
+ reg = <0 0xf1010000 0 0x10000>,
+ <0 0xc0000000 0 0x20000000>;
+ interrupts = <0 182 4>;
+ cdns,fifo-depth = <256>;
+ cdns,fifo-width = <4>;
+ cdns,is-dma = <1>; /* u-boot specific */
+ cdns,trigger-address = <0xc0000000>;
+ };
+
+ qspi: spi@f1030000 {
+ compatible = "xlnx,versal-qspi-1.0";
+ status = "disabled";
+ reg = <0 0xf1030000 0 0x1000>;
+ interrupts = <0 183 4>;
+ clock-names = "ref_clk", "pclk";
+ };
+
+ rtc: rtc@f12a0000 {
+ compatible = "xlnx,zynqmp-rtc";
+ status = "disabled";
+ reg = <0 0xf12a0000 0 0x100>;
+ interrupts = <0 200 4>, <0 201 4>;
+ interrupt-names = "alarm", "sec";
+ calibration = <0x8000>;
+ };
+
+ sdhci0: mmc@f1040000 {
+ compatible = "xlnx,versal-8.9a", "arasan,sdhci-8.9a";
+ status = "disabled";
+ reg = <0 0xf1040000 0 0x10000>;
+ interrupts = <0 184 4>;
+ clock-names = "clk_xin", "clk_ahb", "gate";
+ #clock-cells = <1>;
+ clock-output-names = "clk_out_sd0", "clk_in_sd0";
+ };
+
+ sdhci1: mmc@f1050000 {
+ compatible = "xlnx,versal-net-emmc";
+ status = "disabled";
+ reg = <0 0xf1050000 0 0x10000>;
+ interrupts = <0 186 4>;
+ clock-names = "clk_xin", "clk_ahb", "gate";
+ #clock-cells = <1>;
+ clock-output-names = "clk_out_sd1", "clk_in_sd1";
+ };
+
+ serial0: serial@f1920000 {
+ bootph-all;
+ compatible = "arm,pl011", "arm,primecell";
+ status = "disabled";
+ reg = <0 0xf1920000 0 0x1000>;
+ interrupts = <0 25 4>;
+ reg-io-width = <4>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ serial1: serial@f1930000 {
+ bootph-all;
+ compatible = "arm,pl011", "arm,primecell";
+ status = "disabled";
+ reg = <0 0xf1930000 0 0x1000>;
+ interrupts = <0 26 4>;
+ reg-io-width = <4>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ smmu: iommu@ec000000 {
+ compatible = "arm,smmu-v3";
+ status = "disabled";
+ reg = <0 0xec000000 0 0x40000>;
+ #iommu-cells = <1>;
+ interrupt-names = "combined";
+ interrupts = <0 169 4>;
+ dma-coherent;
+ };
+
+ spi0: spi@f1960000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupts = <0 23 4>;
+ reg = <0 0xf1960000 0 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ };
+
+ spi1: spi@f1970000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupts = <0 24 4>;
+ reg = <0 0xf1970000 0 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ };
+
+ ttc0: timer@f1dc0000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupts = <0 43 4>, <0 44 4>, <0 45 4>;
+ timer-width = <32>;
+ reg = <0x0 0xf1dc0000 0x0 0x1000>;
+ };
+
+ ttc1: timer@f1dd0000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupts = <0 46 4>, <0 47 4>, <0 48 4>;
+ timer-width = <32>;
+ reg = <0x0 0xf1dd0000 0x0 0x1000>;
+ };
+
+ ttc2: timer@f1de0000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupts = <0 49 4>, <0 50 4>, <0 51 4>;
+ timer-width = <32>;
+ reg = <0x0 0xf1de0000 0x0 0x1000>;
+ };
+
+ ttc3: timer@f1df0000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupts = <0 52 4>, <0 53 4>, <0 54 4>;
+ timer-width = <32>;
+ reg = <0x0 0xf1df0000 0x0 0x1000>;
+ };
+
+ usb0: usb@f1e00000 {
+ compatible = "xlnx,versal-dwc3";
+ status = "disabled";
+ reg = <0 0xf1e00000 0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ dwc3_0: usb@f1b00000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0 0xf1b00000 0 0x10000>;
+ interrupt-names = "host", "peripheral", "otg", "wakeup";
+ interrupts = <0 29 4>, <0 29 4>, <0 33 4>, <0 98 4>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ dr_mode = "peripheral";
+ maximum-speed = "high-speed";
+ snps,usb3_lpm_capable;
+ clock-names = "ref";
+ };
+ };
+
+ usb1: usb@f1e10000 {
+ compatible = "xlnx,versal-dwc3";
+ status = "disabled";
+ reg = <0x0 0xf1e10000 0x0 0x100>;
+ clock-names = "bus_clk", "ref_clk";
+ ranges;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ dwc3_1: usb@f1c00000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ reg = <0x0 0xf1c00000 0x0 0x10000>;
+ interrupt-names = "host", "peripheral", "otg", "wakeup";
+ interrupts = <0 34 4>, <0 34 4>, <0 38 4>, <0 99 4>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ dr_mode = "host";
+ maximum-speed = "high-speed";
+ snps,usb3_lpm_capable;
+ clock-names = "ref";
+ };
+ };
+
+ wwdt0: watchdog@ecc10000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xecc10000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+
+ wwdt1: watchdog@ecd10000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xecd10000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+
+ wwdt2: watchdog@ece10000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xece10000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+
+ wwdt3: watchdog@ecf10000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xecf10000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+
+ lpd_wwdt0: watchdog@ea420000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xea420000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+
+ lpd_wwdt1: watchdog@ea430000 {
+ compatible = "xlnx,versal-wwdt";
+ status = "disabled";
+ reg = <0 0xea430000 0 0x10000>;
+ timeout-sec = <30>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/xilinx/xlnx-zynqmp-clk.h b/arch/arm64/boot/dts/xilinx/xlnx-zynqmp-clk.h
new file mode 100644
index 000000000000..0aa17f2a2818
--- /dev/null
+++ b/arch/arm64/boot/dts/xilinx/xlnx-zynqmp-clk.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Zynq MPSoC Firmware layer
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ */
+
+#ifndef _XLNX_ZYNQMP_CLK_H
+#define _XLNX_ZYNQMP_CLK_H
+
+#define IOPLL 0
+#define RPLL 1
+#define APLL 2
+#define DPLL 3
+#define VPLL 4
+#define IOPLL_TO_FPD 5
+#define RPLL_TO_FPD 6
+#define APLL_TO_LPD 7
+#define DPLL_TO_LPD 8
+#define VPLL_TO_LPD 9
+#define ACPU 10
+#define ACPU_HALF 11
+#define DBF_FPD 12
+#define DBF_LPD 13
+#define DBG_TRACE 14
+#define DBG_TSTMP 15
+#define DP_VIDEO_REF 16
+#define DP_AUDIO_REF 17
+#define DP_STC_REF 18
+#define GDMA_REF 19
+#define DPDMA_REF 20
+#define DDR_REF 21
+#define SATA_REF 22
+#define PCIE_REF 23
+#define GPU_REF 24
+#define GPU_PP0_REF 25
+#define GPU_PP1_REF 26
+#define TOPSW_MAIN 27
+#define TOPSW_LSBUS 28
+#define GTGREF0_REF 29
+#define LPD_SWITCH 30
+#define LPD_LSBUS 31
+#define USB0_BUS_REF 32
+#define USB1_BUS_REF 33
+#define USB3_DUAL_REF 34
+#define USB0 35
+#define USB1 36
+#define CPU_R5 37
+#define CPU_R5_CORE 38
+#define CSU_SPB 39
+#define CSU_PLL 40
+#define PCAP 41
+#define IOU_SWITCH 42
+#define GEM_TSU_REF 43
+#define GEM_TSU 44
+#define GEM0_TX 45
+#define GEM1_TX 46
+#define GEM2_TX 47
+#define GEM3_TX 48
+#define GEM0_RX 49
+#define GEM1_RX 50
+#define GEM2_RX 51
+#define GEM3_RX 52
+#define QSPI_REF 53
+#define SDIO0_REF 54
+#define SDIO1_REF 55
+#define UART0_REF 56
+#define UART1_REF 57
+#define SPI0_REF 58
+#define SPI1_REF 59
+#define NAND_REF 60
+#define I2C0_REF 61
+#define I2C1_REF 62
+#define CAN0_REF 63
+#define CAN1_REF 64
+#define CAN0 65
+#define CAN1 66
+#define DLL_REF 67
+#define ADMA_REF 68
+#define TIMESTAMP_REF 69
+#define AMS_REF 70
+#define PL0_REF 71
+#define PL1_REF 72
+#define PL2_REF 73
+#define PL3_REF 74
+#define WDT 75
+#define IOPLL_INT 76
+#define IOPLL_PRE_SRC 77
+#define IOPLL_HALF 78
+#define IOPLL_INT_MUX 79
+#define IOPLL_POST_SRC 80
+#define RPLL_INT 81
+#define RPLL_PRE_SRC 82
+#define RPLL_HALF 83
+#define RPLL_INT_MUX 84
+#define RPLL_POST_SRC 85
+#define APLL_INT 86
+#define APLL_PRE_SRC 87
+#define APLL_HALF 88
+#define APLL_INT_MUX 89
+#define APLL_POST_SRC 90
+#define DPLL_INT 91
+#define DPLL_PRE_SRC 92
+#define DPLL_HALF 93
+#define DPLL_INT_MUX 94
+#define DPLL_POST_SRC 95
+#define VPLL_INT 96
+#define VPLL_PRE_SRC 97
+#define VPLL_HALF 98
+#define VPLL_INT_MUX 99
+#define VPLL_POST_SRC 100
+#define CAN0_MIO 101
+#define CAN1_MIO 102
+#define ACPU_FULL 103
+#define GEM0_REF 104
+#define GEM1_REF 105
+#define GEM2_REF 106
+#define GEM3_REF 107
+#define GEM0_REF_UNG 108
+#define GEM1_REF_UNG 109
+#define GEM2_REF_UNG 110
+#define GEM3_REF_UNG 111
+#define LPD_WDT 112
+
+#endif /* _XLNX_ZYNQMP_CLK_H */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
index 60d1b1acf9a0..52e122fc7c9e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -8,41 +8,46 @@
* Michal Simek <michal.simek@amd.com>
*/
-#include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+#include "xlnx-zynqmp-clk.h"
/ {
- pss_ref_clk: pss_ref_clk {
+ pss_ref_clk: pss-ref-clk {
bootph-all;
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <33333333>;
+ clock-output-names = "pss_ref_clk";
};
- video_clk: video_clk {
+ video_clk: video-clk {
bootph-all;
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <27000000>;
+ clock-output-names = "video_clk";
};
- pss_alt_ref_clk: pss_alt_ref_clk {
+ pss_alt_ref_clk: pss-alt-ref-clk {
bootph-all;
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <0>;
+ clock-output-names = "pss_alt_ref_clk";
};
- gt_crx_ref_clk: gt_crx_ref_clk {
+ gt_crx_ref_clk: gt-crx-ref-clk {
bootph-all;
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <108000000>;
+ clock-output-names = "gt_crx_ref_clk";
};
- aux_ref_clk: aux_ref_clk {
+ aux_ref_clk: aux-ref-clk {
bootph-all;
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <27000000>;
+ clock-output-names = "aux_ref_clk";
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 1f25423de383..5bb8f09422a2 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -177,6 +177,7 @@ CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_GACT=m
CONFIG_NET_ACT_MIRRED=m
+CONFIG_HSR=m
CONFIG_NET_ACT_GATE=m
CONFIG_QRTR_SMD=m
CONFIG_QRTR_TUN=m
@@ -284,6 +285,7 @@ CONFIG_MTD_NAND_BRCMNAND=m
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPI_NAND=m
CONFIG_MTD_UBI=m
CONFIG_MTD_HYPERBUS=m
CONFIG_HBMC_AM654=m
@@ -845,6 +847,7 @@ CONFIG_VIDEO_IMX8_ISI=m
CONFIG_VIDEO_IMX8_ISI_M2M=y
CONFIG_VIDEO_IMX8_JPEG=m
CONFIG_VIDEO_QCOM_CAMSS=m
+CONFIG_VIDEO_QCOM_IRIS=m
CONFIG_VIDEO_QCOM_VENUS=m
CONFIG_VIDEO_RCAR_ISP=m
CONFIG_VIDEO_RCAR_CSI2=m
@@ -859,6 +862,8 @@ CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
CONFIG_VIDEO_SUN6I_CSI=m
+CONFIG_VIDEO_SYNOPSYS_HDMIRX=m
+CONFIG_VIDEO_SYNOPSYS_HDMIRX_LOAD_DEFAULT_EDID=y
CONFIG_VIDEO_TI_J721E_CSI2RX=m
CONFIG_VIDEO_HANTRO=m
CONFIG_VIDEO_IMX219=m
@@ -911,6 +916,7 @@ CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_PANEL_VISIONOX_VTDR6130=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
CONFIG_DRM_FSL_LDB=m
CONFIG_DRM_ITE_IT6263=m
CONFIG_DRM_LONTIUM_LT8912B=m
@@ -1147,6 +1153,7 @@ CONFIG_TYPEC_HD3SS3220=m
CONFIG_TYPEC_MUX_FSA4480=m
CONFIG_TYPEC_MUX_GPIO_SBU=m
CONFIG_TYPEC_MUX_NB7VPQ904M=m
+CONFIG_TYPEC_MUX_PS883X=m
CONFIG_TYPEC_MUX_WCD939X_USBSS=m
CONFIG_TYPEC_DP_ALTMODE=m
CONFIG_MMC=y
@@ -1188,6 +1195,7 @@ CONFIG_SCSI_UFS_HISI=y
CONFIG_SCSI_UFS_RENESAS=m
CONFIG_SCSI_UFS_TI_J721E=m
CONFIG_SCSI_UFS_EXYNOS=y
+CONFIG_SCSI_UFS_ROCKCHIP=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_CLASS_MULTICOLOR=m
@@ -1270,6 +1278,8 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_GREYBUS=m
+CONFIG_GREYBUS_BEAGLEPLAY=m
CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
CONFIG_VIDEO_MAX96712=m
@@ -1320,6 +1330,7 @@ CONFIG_CLK_X1E80100_DISPCC=m
CONFIG_CLK_X1E80100_GCC=y
CONFIG_CLK_X1E80100_GPUCC=m
CONFIG_CLK_X1E80100_TCSRCC=y
+CONFIG_CLK_QCM2290_GPUCC=m
CONFIG_QCOM_A53PLL=y
CONFIG_QCOM_CLK_APCS_MSM8916=y
CONFIG_QCOM_CLK_APCC_MSM8996=y
@@ -1333,6 +1344,7 @@ CONFIG_IPQ_GCC_5332=y
CONFIG_IPQ_GCC_6018=y
CONFIG_IPQ_GCC_8074=y
CONFIG_IPQ_GCC_9574=y
+CONFIG_IPQ_NSSCC_9574=m
CONFIG_MSM_GCC_8916=y
CONFIG_MSM_MMCC_8994=m
CONFIG_MSM_GCC_8994=y
@@ -1394,6 +1406,7 @@ CONFIG_SM_TCSRCC_8650=y
CONFIG_SM_TCSRCC_8750=m
CONFIG_SA_VIDEOCC_8775P=m
CONFIG_SM_VIDEOCC_8250=y
+CONFIG_SM_VIDEOCC_8550=m
CONFIG_QCOM_HFPLL=y
CONFIG_CLK_GFM_LPASS_SM8250=m
CONFIG_CLK_RCAR_USB2_CLOCK_SEL=y
@@ -1561,6 +1574,7 @@ CONFIG_RESET_RZG2L_USBPHY_CTRL=y
CONFIG_RESET_TI_SCI=y
CONFIG_PHY_XGENE=y
CONFIG_PHY_CAN_TRANSCEIVER=m
+CONFIG_PHY_NXP_PTN3222=m
CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_CADENCE_TORRENT=m
CONFIG_PHY_CADENCE_DPHY_RX=m
diff --git a/arch/arm64/hyperv/hv_core.c b/arch/arm64/hyperv/hv_core.c
index 69004f619c57..e33a9e3c366a 100644
--- a/arch/arm64/hyperv/hv_core.c
+++ b/arch/arm64/hyperv/hv_core.c
@@ -54,6 +54,23 @@ u64 hv_do_fast_hypercall8(u16 code, u64 input)
EXPORT_SYMBOL_GPL(hv_do_fast_hypercall8);
/*
+ * hv_do_fast_hypercall16 -- Invoke the specified hypercall
+ * with arguments in registers instead of physical memory.
+ * Avoids the overhead of virt_to_phys for simple hypercalls.
+ */
+u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
+{
+ struct arm_smccc_res res;
+ u64 control;
+
+ control = (u64)code | HV_HYPERCALL_FAST_BIT;
+
+ arm_smccc_1_1_hvc(HV_FUNC_ID, control, input1, input2, &res);
+ return res.a0;
+}
+EXPORT_SYMBOL_GPL(hv_do_fast_hypercall16);
+
+/*
* Set a single VP register to a 64-bit value.
*/
void hv_set_vpreg(u32 msr, u64 value)
diff --git a/arch/arm64/hyperv/mshyperv.c b/arch/arm64/hyperv/mshyperv.c
index fc49949b7df6..4e27cc29c79e 100644
--- a/arch/arm64/hyperv/mshyperv.c
+++ b/arch/arm64/hyperv/mshyperv.c
@@ -26,6 +26,7 @@ int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
return 0;
}
+EXPORT_SYMBOL_GPL(hv_get_hypervisor_version);
static int __init hyperv_init(void)
{
@@ -61,6 +62,8 @@ static int __init hyperv_init(void)
ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
ms_hyperv.misc_features);
+ hv_identify_partition_type();
+
ret = hv_common_init();
if (ret)
return ret;
@@ -72,6 +75,9 @@ static int __init hyperv_init(void)
return ret;
}
+ if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
+ hv_get_partition_id();
+
ms_hyperv_late_init();
hyperv_initialized = true;
diff --git a/arch/arm64/include/asm/apple_m1_pmu.h b/arch/arm64/include/asm/apple_m1_pmu.h
index 99483b19b99f..02e05d05851f 100644
--- a/arch/arm64/include/asm/apple_m1_pmu.h
+++ b/arch/arm64/include/asm/apple_m1_pmu.h
@@ -37,6 +37,7 @@
#define PMCR0_PMI_ENABLE_8_9 GENMASK(45, 44)
#define SYS_IMP_APL_PMCR1_EL1 sys_reg(3, 1, 15, 1, 0)
+#define SYS_IMP_APL_PMCR1_EL12 sys_reg(3, 1, 15, 7, 2)
#define PMCR1_COUNT_A64_EL0_0_7 GENMASK(15, 8)
#define PMCR1_COUNT_A64_EL1_0_7 GENMASK(23, 16)
#define PMCR1_COUNT_A64_EL0_8_9 GENMASK(41, 40)
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index b8a5861dc7b7..292f2687a12e 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -9,7 +9,8 @@
#define EX_TYPE_BPF 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_KACCESS_ERR_ZERO 3
-#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+#define EX_TYPE_UACCESS_CPY 4
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 5
/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
#define EX_DATA_REG_ERR_SHIFT 0
@@ -23,6 +24,9 @@
#define EX_DATA_REG_ADDR_SHIFT 5
#define EX_DATA_REG_ADDR GENMASK(9, 5)
+/* Data fields for EX_TYPE_UACCESS_CPY */
+#define EX_DATA_UACCESS_WRITE BIT(0)
+
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
@@ -69,6 +73,10 @@
.endif
.endm
+ .macro _asm_extable_uaccess_cpy, insn, fixup, uaccess_is_write
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_CPY, \uaccess_is_write)
+ .endm
+
#else /* __ASSEMBLY__ */
#include <linux/stringify.h>
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5b6efe8abeeb..9148f5a31968 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -61,6 +61,10 @@ alternative_else_nop_endif
9999: x; \
_asm_extable_uaccess 9999b, l
+#define USER_CPY(l, uaccess_is_write, x...) \
+9999: x; \
+ _asm_extable_uaccess_cpy 9999b, l, uaccess_is_write
+
/*
* Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 06a4670bdb0b..99cd6546e72e 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -35,7 +35,7 @@
#define ARCH_DMA_MINALIGN (128)
#define ARCH_KMALLOC_MINALIGN (8)
-#ifndef __ASSEMBLY__
+#if !defined(__ASSEMBLY__) && !defined(BUILD_VDSO)
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
@@ -118,6 +118,6 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
return ctr;
}
-#endif /* __ASSEMBLY__ */
+#endif /* !defined(__ASSEMBLY__) && !defined(BUILD_VDSO) */
#endif
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 0b5ca6e0eb09..9d769291a306 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -71,6 +71,8 @@ cpucap_is_possible(const unsigned int cap)
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
*/
return true;
+ case ARM64_HAS_PMUV3:
+ return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
}
return true;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e0e4478f5fb5..c4326f1cb917 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -525,29 +525,6 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
}
-/*
- * Fields that identify the version of the Performance Monitors Extension do
- * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
- * "Alternative ID scheme used for the Performance Monitors Extension version".
- */
-static inline u64 __attribute_const__
-cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
-{
- u64 val = cpuid_feature_extract_unsigned_field(features, field);
- u64 mask = GENMASK_ULL(field + 3, field);
-
- /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
- if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
- val = 0;
-
- if (val > cap) {
- features &= ~mask;
- features |= (cap << field) & mask;
- }
-
- return features;
-}
-
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
{
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -866,6 +843,11 @@ static __always_inline bool system_supports_mpam_hcr(void)
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
}
+static inline bool system_supports_pmuv3(void)
+{
+ return cpus_have_final_cap(ARM64_HAS_PMUV3);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 6f3f4142e214..c607e0bf5e0b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
@@ -119,6 +120,7 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@@ -159,6 +161,7 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
@@ -196,10 +199,21 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
+
+/*
+ * NOTES:
+ * - Qualcomm Kryo 5XX Prime / Gold ID themselves as MIDR_CORTEX_A77
+ * - Qualcomm Kryo 5XX Silver IDs itself as MIDR_QCOM_KRYO_4XX_SILVER
+ * - Qualcomm Kryo 6XX Prime IDs itself as MIDR_CORTEX_X1
+ * - Qualcomm Kryo 6XX Gold IDs itself as ARM_CPU_PART_CORTEX_A78
+ * - Qualcomm Kryo 6XX Silver IDs itself as MIDR_CORTEX_A55
+ */
+
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
@@ -232,6 +246,16 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline u32 __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(MIDR_EL1);
+}
+
+/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
*
@@ -266,30 +290,14 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
return _model == model && rv >= rv_min && rv <= rv_max;
}
-static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
-{
- return midr_is_cpu_model_range(midr, range->model,
- range->rv_min, range->rv_max);
-}
-
-static inline bool
-is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
-{
- while (ranges->model)
- if (is_midr_in_range(midr, ranges++))
- return true;
- return false;
-}
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
-/*
- * The CPU ID never changes at run time, so we might as well tell the
- * compiler that it's constant. Use this function to read the CPU ID
- * rather than directly reading processor_id or read_cpuid() directly.
- */
-static inline u32 __attribute_const__ read_cpuid_id(void)
-{
- return read_cpuid(MIDR_EL1);
-}
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
+bool is_midr_in_range_list(struct midr_range const *ranges);
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 25e162651750..ebceaae3c749 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -16,6 +16,32 @@
#include <asm/sysreg.h>
#include <linux/irqchip/arm-gic-v3.h>
+.macro init_el2_hcr val
+ mov_q x0, \val
+
+ /*
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
+ * can reset into an UNKNOWN state and might not read as 1 until it has
+ * been initialized explicitly.
+ *
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
+ * don't advertise it (they predate this relaxation).
+ *
+ * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
+ * indicating whether the CPU is running in E2H mode.
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
+ cmp x1, #0
+ b.ge .LnVHE_\@
+
+ orr x0, x0, #HCR_E2H
+.LnVHE_\@:
+ msr hcr_el2, x0
+ isb
+.endm
+
.macro __init_el2_sctlr
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
msr sctlr_el2, x0
@@ -233,6 +259,30 @@
.Lskip_fgt_\@:
.endm
+.macro __init_el2_fgt2
+ mrs x1, id_aa64mmfr0_el1
+ ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
+ cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2
+ b.lt .Lskip_fgt2_\@
+
+ mov x0, xzr
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9
+ b.lt .Lskip_pmuv3p9_\@
+
+ orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0
+ orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0
+ orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1
+.Lskip_pmuv3p9_\@:
+ msr_s SYS_HDFGRTR2_EL2, x0
+ msr_s SYS_HDFGWTR2_EL2, x0
+ msr_s SYS_HFGRTR2_EL2, xzr
+ msr_s SYS_HFGWTR2_EL2, xzr
+ msr_s SYS_HFGITR2_EL2, xzr
+.Lskip_fgt2_\@:
+.endm
+
.macro __init_el2_gcs
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
@@ -244,11 +294,6 @@
.Lskip_gcs_\@:
.endm
-.macro __init_el2_nvhe_prepare_eret
- mov x0, #INIT_PSTATE_EL1
- msr spsr_el2, x0
-.endm
-
.macro __init_el2_mpam
/* Memory Partitioning And Monitoring: disable EL2 traps */
mrs x1, id_aa64pfr0_el1
@@ -283,6 +328,7 @@
__init_el2_nvhe_idregs
__init_el2_cptr
__init_el2_fgt
+ __init_el2_fgt2
__init_el2_gcs
.endm
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 72b0e71cc3de..9dc39612bdf5 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -33,6 +33,8 @@ do { \
(b)->data = (tmp).data; \
} while (0)
+bool insn_may_access_user(unsigned long addr, unsigned long esr);
+
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
@@ -45,5 +47,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
}
#endif /* !CONFIG_BPF_JIT */
-bool fixup_exception(struct pt_regs *regs);
+bool fixup_exception(struct pt_regs *regs, unsigned long esr);
#endif
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index f2a84efc3618..564bc09b3e06 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -80,7 +80,6 @@ extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
-extern void fpsimd_kvm_prepare(void);
struct cpu_fp_state {
struct user_fpsimd_state *st;
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index 409e239834d1..a12fd897c877 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -6,6 +6,7 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+void kvm_arm_target_impl_cpu_init(void);
#ifdef CONFIG_ARM_PKVM_GUEST
void pkvm_init_hyp_services(void);
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index fd5a08450b12..9e93733523f6 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -45,11 +45,11 @@
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
-#define EARLY_ENTRIES(vstart, vend, shift, add) \
- (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
+#define EARLY_ENTRIES(lvl, vstart, vend) \
+ SPAN_NR_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * PTDESC_TABLE_SHIFT)
-#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
- (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
+#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
+ ((lvls) > (lvl) ? EARLY_ENTRIES(lvl, vstart, vend) + (add) : 0)
#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \
+ EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c2417a424b98..974d72b5905b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -92,12 +92,12 @@
* SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
* TID3: Trap EL1 reads of group 3 ID registers
- * TID2: Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1
+ * TID1: Trap REVIDR_EL1, AIDR_EL1, and SMIDR_EL1
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
+ HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 78ec1ef2cfe8..d7cf66573aca 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -275,6 +275,19 @@ static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.esr_el2;
}
+static inline bool guest_hyp_wfx_traps_enabled(const struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
+ u64 hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
+ return false;
+
+ return ((is_wfe && (hcr_el2 & HCR_TWE)) ||
+ (!is_wfe && (hcr_el2 & HCR_TWI)));
+}
+
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
@@ -649,4 +662,28 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
{
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
}
+
+static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ /*
+ * In general, all HCRX_EL2 bits are gated by a feature.
+ * The only reason we can set SMPME without checking any
+ * feature is that its effects are not directly observable
+ * from the guest.
+ */
+ vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+
+ if (kvm_has_tcr2(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
+
+ if (kvm_has_fpmr(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+ }
+}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d919557af5e5..e98cfe7855a6 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -44,14 +44,15 @@
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
-#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
-#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
-#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
-#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
+#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
+#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
+#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -86,6 +87,9 @@ struct kvm_hyp_memcache {
phys_addr_t head;
unsigned long nr_pages;
struct pkvm_mapping *mapping; /* only used from EL1 */
+
+#define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1)
+ unsigned long flags;
};
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
@@ -237,7 +241,8 @@ struct kvm_arch_memory_slot {
struct kvm_smccc_features {
unsigned long std_bmap;
unsigned long std_hyp_bmap;
- unsigned long vendor_hyp_bmap;
+ unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
+ unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
};
typedef unsigned int pkvm_handle_t;
@@ -245,6 +250,7 @@ typedef unsigned int pkvm_handle_t;
struct kvm_protected_vm {
pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
+ struct kvm_hyp_memcache stage2_teardown_mc;
bool enabled;
};
@@ -334,6 +340,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
/* SVE exposed to guest */
#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
+ /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
+#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -373,6 +381,9 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
+ u64 midr_el1;
+ u64 revidr_el1;
+ u64 aidr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
@@ -557,7 +568,33 @@ enum vcpu_sysreg {
VNCR(CNTP_CVAL_EL0),
VNCR(CNTP_CTL_EL0),
+ VNCR(ICH_LR0_EL2),
+ VNCR(ICH_LR1_EL2),
+ VNCR(ICH_LR2_EL2),
+ VNCR(ICH_LR3_EL2),
+ VNCR(ICH_LR4_EL2),
+ VNCR(ICH_LR5_EL2),
+ VNCR(ICH_LR6_EL2),
+ VNCR(ICH_LR7_EL2),
+ VNCR(ICH_LR8_EL2),
+ VNCR(ICH_LR9_EL2),
+ VNCR(ICH_LR10_EL2),
+ VNCR(ICH_LR11_EL2),
+ VNCR(ICH_LR12_EL2),
+ VNCR(ICH_LR13_EL2),
+ VNCR(ICH_LR14_EL2),
+ VNCR(ICH_LR15_EL2),
+
+ VNCR(ICH_AP0R0_EL2),
+ VNCR(ICH_AP0R1_EL2),
+ VNCR(ICH_AP0R2_EL2),
+ VNCR(ICH_AP0R3_EL2),
+ VNCR(ICH_AP1R0_EL2),
+ VNCR(ICH_AP1R1_EL2),
+ VNCR(ICH_AP1R2_EL2),
+ VNCR(ICH_AP1R3_EL2),
VNCR(ICH_HCR_EL2),
+ VNCR(ICH_VMCR_EL2),
NR_SYS_REGS /* Nothing after this line! */
};
@@ -869,6 +906,8 @@ struct kvm_vcpu_arch {
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
+/* pKVM VCPU setup completed */
+#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -919,6 +958,8 @@ struct kvm_vcpu_arch {
#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5))
/* WFI instruction trapped */
#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
+/* KVM is currently emulating a nested ERET */
+#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -1334,8 +1375,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-
void kvm_init_host_debug_data(void);
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
@@ -1459,6 +1498,12 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
return &ka->id_regs[IDREG_IDX(reg)];
case SYS_CTR_EL0:
return &ka->ctr_el0;
+ case SYS_MIDR_EL1:
+ return &ka->midr_el1;
+ case SYS_REVIDR_EL1:
+ return &ka->revidr_el1;
+ case SYS_AIDR_EL1:
+ return &ka->aidr_el1;
default:
WARN_ON_ONCE(1);
return NULL;
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c838309e4ec4..e6be1f5d0967 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -76,6 +76,8 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
+u64 __gic_v3_get_lr(unsigned int lr);
+
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 56c4bcd35e2e..692f403c1896 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -188,6 +188,7 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
}
int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
+u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
#ifdef CONFIG_ARM64_PTR_AUTH
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index eb65f12e81d9..abd693ce5b93 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -19,6 +19,7 @@
int pkvm_init_host_vm(struct kvm *kvm);
int pkvm_create_hyp_vm(struct kvm *kvm);
void pkvm_destroy_hyp_vm(struct kvm *kvm);
+int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
/*
* This functions as an allow-list of protected VM capabilities.
diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
index f8f78f622dd2..a2a1eeb36d4b 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -21,4 +21,15 @@ static inline bool force_dma_unencrypted(struct device *dev)
return is_realm_world();
}
+/*
+ * For Arm CCA guests, canonical addresses are "encrypted", so no changes
+ * required for dma_addr_encrypted().
+ * The unencrypted DMA buffers must be accessed via the unprotected IPA,
+ * "top IPA bit" set.
+ */
+#define dma_addr_unencrypted(x) ((x) | PROT_NS_SHARED)
+
+/* Clear the "top" IPA bit while converting back */
+#define dma_addr_canonical(x) ((x) & ~PROT_NS_SHARED)
+
#endif /* __ASM_MEM_ENCRYPT_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 662471cfc536..30a29e88994b 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -101,8 +101,7 @@ static inline bool kaslr_requires_kpti(void)
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
extern const struct midr_range cavium_erratum_27456_cpus[];
- if (is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus))
+ if (is_midr_in_range_list(cavium_erratum_27456_cpus))
return false;
}
diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h
index 2e2f83bafcfb..b721d3134ab6 100644
--- a/arch/arm64/include/asm/mshyperv.h
+++ b/arch/arm64/include/asm/mshyperv.h
@@ -40,6 +40,19 @@ static inline u64 hv_get_msr(unsigned int reg)
return hv_get_vpreg(reg);
}
+/*
+ * Nested is not supported on arm64
+ */
+static inline void hv_set_non_nested_msr(unsigned int reg, u64 value)
+{
+ hv_set_msr(reg, value);
+}
+
+static inline u64 hv_get_non_nested_msr(unsigned int reg)
+{
+ return hv_get_msr(reg);
+}
+
/* SMCCC hypercall parameters */
#define HV_SMCCC_FUNC_NUMBER 1
#define HV_FUNC_ID ARM_SMCCC_CALL_VAL( \
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index a9136cc551cc..f3b77deedfa2 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -7,40 +7,46 @@
#include <asm/memory.h>
+#define PTDESC_ORDER 3
+
+/* Number of VA bits resolved by a single translation table level */
+#define PTDESC_TABLE_SHIFT (PAGE_SHIFT - PTDESC_ORDER)
+
/*
* Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
- * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
+ * bits with PTDESC_TABLE_SHIFT bits at each page table level. Hence:
*
- * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
+ * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), PTDESC_TABLE_SHIFT)
*
* where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
*
* We cannot include linux/kernel.h which defines DIV_ROUND_UP here
* due to build issues. So we open code DIV_ROUND_UP here:
*
- * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
+ * ((((va_bits) - PAGE_SHIFT) + PTDESC_TABLE_SHIFT - 1) / PTDESC_TABLE_SHIFT)
*
* which gets simplified as :
*/
-#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
+#define ARM64_HW_PGTABLE_LEVELS(va_bits) \
+ (((va_bits) - PTDESC_ORDER - 1) / PTDESC_TABLE_SHIFT)
/*
* Size mapped by an entry at level n ( -1 <= n <= 3)
- * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
+ * We map PTDESC_TABLE_SHIFT at all translation levels and PAGE_SHIFT bits
* in the final page. The maximum number of translation levels supported by
* the architecture is 5. Hence, starting at level n, we have further
* ((4 - n) - 1) levels of translation excluding the offset within the page.
* So, the total number of bits mapped by an entry at level n is :
*
- * ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
+ * ((4 - n) - 1) * PTDESC_TABLE_SHIFT + PAGE_SHIFT
*
* Rearranging it a bit we get :
- * (4 - n) * (PAGE_SHIFT - 3) + 3
+ * (4 - n) * PTDESC_TABLE_SHIFT + PTDESC_ORDER
*/
-#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
+#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) (PTDESC_TABLE_SHIFT * (4 - (n)) + PTDESC_ORDER)
-#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PTE (1 << PTDESC_TABLE_SHIFT)
/*
* PMD_SHIFT determines the size a level 2 page table entry can map.
@@ -49,7 +55,7 @@
#define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PMD (1 << PTDESC_TABLE_SHIFT)
#endif
/*
@@ -59,14 +65,14 @@
#define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PUD (1 << PTDESC_TABLE_SHIFT)
#endif
#if CONFIG_PGTABLE_LEVELS > 4
#define P4D_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(0)
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
-#define PTRS_PER_P4D (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_P4D (1 << PTDESC_TABLE_SHIFT)
#endif
/*
@@ -97,7 +103,6 @@
* Level -1 descriptor (PGD).
*/
#define PGD_TYPE_TABLE (_AT(pgdval_t, 3) << 0)
-#define PGD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
#define PGD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
#define PGD_TABLE_AF (_AT(pgdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */
#define PGD_TABLE_PXN (_AT(pgdval_t, 1) << 59)
@@ -107,7 +112,6 @@
* Level 0 descriptor (P4D).
*/
#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0)
-#define P4D_TABLE_BIT (_AT(p4dval_t, 1) << 1)
#define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0)
#define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0)
#define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */
@@ -119,7 +123,6 @@
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
-#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
@@ -133,7 +136,6 @@
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
-#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_TABLE_AF (_AT(pmdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */
/*
@@ -162,7 +164,6 @@
#define PTE_VALID (_AT(pteval_t, 1) << 0)
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
-#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index a95f1f77bb39..7830d031742e 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -169,25 +169,25 @@ static inline bool __pure lpa2_is_enabled(void)
#define PAGE_GCS_RO __pgprot(_PAGE_GCS_RO)
#define PIE_E0 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
#define PIE_E1 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS_RO), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW))
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL), PIE_RW))
#endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0b2a2ad1b9e8..84f05f781a70 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -68,10 +68,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
-/*
- * Macros to convert between a physical address and its placement in a
- * page table entry, taking care of 52-bit addresses.
- */
#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
@@ -84,8 +80,15 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
}
#else
-#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
-#define __phys_to_pte_val(phys) (phys)
+static inline phys_addr_t __pte_to_phys(pte_t pte)
+{
+ return pte_val(pte) & PTE_ADDR_LOW;
+}
+
+static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
+{
+ return phys;
+}
#endif
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
@@ -483,12 +486,12 @@ static inline pmd_t pte_pmd(pte_t pte)
static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
{
- return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
+ return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT);
}
static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
{
- return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
+ return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT);
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
@@ -548,18 +551,6 @@ static inline int pmd_protnone(pmd_t pmd)
#endif
#define pmd_present(pmd) pte_present(pmd_pte(pmd))
-
-/*
- * THP definitions.
- */
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_huge(pmd_t pmd)
-{
- return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
@@ -585,7 +576,18 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
-#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ /*
+ * It's possible that the pmd is present-invalid on entry
+ * and in that case it needs to remain present-invalid on
+ * exit. So ensure the VALID bit does not get modified.
+ */
+ pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID;
+ pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID;
+
+ return __pmd((pmd_val(pmd) & ~mask) | val);
+}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
@@ -613,7 +615,18 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd)
#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+ /*
+ * It's possible that the pud is present-invalid on entry
+ * and in that case it needs to remain present-invalid on
+ * exit. So ensure the VALID bit does not get modified.
+ */
+ pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID;
+ pudval_t val = PUD_TYPE_SECT & ~PTE_VALID;
+
+ return __pud((pud_val(pud) & ~mask) | val);
+}
#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
@@ -724,6 +737,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ /*
+ * If pmd is present-invalid, pmd_table() won't detect it
+ * as a table, so force the valid bit for the comparison.
+ */
+ return pmd_val(pmd) && pmd_present(pmd) &&
+ !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
static inline bool pud_sect(pud_t pud) { return false; }
static inline bool pud_table(pud_t pud) { return true; }
@@ -805,7 +830,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!pud_table(pud))
+#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
+ PUD_TYPE_TABLE)
#define pud_present(pud) pte_present(pud_pte(pud))
#ifndef __PAGETABLE_PMD_FOLDED
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
@@ -896,7 +922,9 @@ static inline bool mm_pud_folded(const struct mm_struct *mm)
pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
-#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & P4D_TABLE_BIT))
+#define p4d_bad(p4d) (pgtable_l4_enabled() && \
+ ((p4d_val(p4d) & P4D_TYPE_MASK) != \
+ P4D_TYPE_TABLE))
#define p4d_present(p4d) (!p4d_none(p4d))
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
@@ -1023,7 +1051,9 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm)
pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
-#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & PGD_TABLE_BIT))
+#define pgd_bad(pgd) (pgtable_l5_enabled() && \
+ ((pgd_val(pgd) & PGD_TYPE_MASK) != \
+ PGD_TYPE_TABLE))
#define pgd_present(pgd) (!pgd_none(pgd))
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
diff --git a/arch/arm64/include/asm/por.h b/arch/arm64/include/asm/por.h
index e06e9f473675..d913d5b529e4 100644
--- a/arch/arm64/include/asm/por.h
+++ b/arch/arm64/include/asm/por.h
@@ -6,26 +6,27 @@
#ifndef _ASM_ARM64_POR_H
#define _ASM_ARM64_POR_H
-#define POR_BITS_PER_PKEY 4
-#define POR_ELx_IDX(por_elx, idx) (((por_elx) >> ((idx) * POR_BITS_PER_PKEY)) & 0xf)
+#include <asm/sysreg.h>
+
+#define POR_EL0_INIT POR_ELx_PERM_PREP(0, POE_RWX)
static inline bool por_elx_allows_read(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_R;
}
static inline bool por_elx_allows_write(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_W;
}
static inline bool por_elx_allows_exec(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_X;
}
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
index 0c4d9045c31f..f1524cdeacf1 100644
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
-u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 05ea5223d2d5..2639d3633073 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -562,9 +562,6 @@
#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
@@ -985,10 +982,6 @@
#define SYS_MPIDR_SAFE_VAL (BIT(31))
/* GIC Hypervisor interface registers */
-/* ICH_MISR_EL2 bit definitions */
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
/* ICH_LR*_EL2 bit definitions */
#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
@@ -1003,17 +996,6 @@
#define ICH_LR_PRIORITY_SHIFT 48
#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-/* ICH_HCR_EL2 bit definitions */
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_NPIE (1 << 3)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_TDIR (1 << 14)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
/* ICH_VMCR_EL2 bit definitions */
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
@@ -1034,18 +1016,6 @@
#define ICH_VMCR_ENG1_SHIFT 1
#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-/* ICH_VTR_EL2 bit definitions */
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-#define ICH_VTR_TDS_SHIFT 19
-#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
-
/*
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
@@ -1062,8 +1032,11 @@
#define PIE_RX UL(0xa)
#define PIE_RW UL(0xc)
#define PIE_RWX UL(0xe)
+#define PIE_MASK UL(0xf)
-#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
+#define PIRx_ELx_BITS_PER_IDX 4
+#define PIRx_ELx_PERM_SHIFT(idx) ((idx) * PIRx_ELx_BITS_PER_IDX)
+#define PIRx_ELx_PERM_PREP(idx, perm) (((perm) & PIE_MASK) << PIRx_ELx_PERM_SHIFT(idx))
/*
* Permission Overlay Extension (POE) permission encodings.
@@ -1074,12 +1047,14 @@
#define POE_RX UL(0x3)
#define POE_W UL(0x4)
#define POE_RW UL(0x5)
-#define POE_XW UL(0x6)
-#define POE_RXW UL(0x7)
+#define POE_WX UL(0x6)
+#define POE_RWX UL(0x7)
#define POE_MASK UL(0xf)
-/* Initial value for Permission Overlay Extension for EL0 */
-#define POR_EL0_INIT POE_RXW
+#define POR_ELx_BITS_PER_IDX 4
+#define POR_ELx_PERM_SHIFT(idx) ((idx) * POR_ELx_BITS_PER_IDX)
+#define POR_ELx_PERM_GET(idx, reg) (((reg) >> POR_ELx_PERM_SHIFT(idx)) & POE_MASK)
+#define POR_ELx_PERM_PREP(idx, perm) (((perm) & POE_MASK) << POR_ELx_PERM_SHIFT(idx))
/*
* Definitions for Guarded Control Stack
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index bc94e036a26b..8104aee4f9a0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user, lpa2) \
do { \
+ typeof(start) __flush_start = start; \
+ typeof(pages) __flush_pages = pages; \
int num = 0; \
int scale = 3; \
int shift = lpa2 ? 16 : PAGE_SHIFT; \
unsigned long addr; \
\
- while (pages > 0) { \
+ while (__flush_pages > 0) { \
if (!system_supports_tlb_range() || \
- pages == 1 || \
- (lpa2 && start != ALIGN(start, SZ_64K))) { \
- addr = __TLBI_VADDR(start, asid); \
+ __flush_pages == 1 || \
+ (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
+ addr = __TLBI_VADDR(__flush_start, asid); \
__tlbi_level(op, addr, tlb_level); \
if (tlbi_user) \
__tlbi_user_level(op, addr, tlb_level); \
- start += stride; \
- pages -= stride >> PAGE_SHIFT; \
+ __flush_start += stride; \
+ __flush_pages -= stride >> PAGE_SHIFT; \
continue; \
} \
\
- num = __TLBI_RANGE_NUM(pages, scale); \
+ num = __TLBI_RANGE_NUM(__flush_pages, scale); \
if (num >= 0) { \
- addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
+ addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
scale, num, tlb_level); \
__tlbi(r##op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
- pages -= __TLBI_RANGE_PAGES(num, scale); \
+ __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+ __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
} \
scale--; \
} \
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 3e3c3fdb1842..61679070f595 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -5,7 +5,7 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
-#define __VVAR_PAGES 2
+#define __VDSO_PAGES 4
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index 778c1202bbbf..d60ea7a72a9c 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -104,7 +104,7 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
u64 res;
@@ -131,45 +131,33 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return res;
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- const struct vdso_data *ret;
+ const struct vdso_time_data *ret;
/*
- * This simply puts &_vdso_data into ret. The reason why we don't use
- * `ret = _vdso_data` is that the compiler tends to optimise this in a
- * very suboptimal way: instead of keeping &_vdso_data in a register,
- * it goes through a relocation almost every time _vdso_data must be
+ * This simply puts &_vdso_time_data into ret. The reason why we don't use
+ * `ret = _vdso_time_data` is that the compiler tends to optimise this in a
+ * very suboptimal way: instead of keeping &_vdso_time_data in a register,
+ * it goes through a relocation almost every time _vdso_time_data must be
* accessed (even in subfunctions). This is both time and space
* consuming: each relocation uses a word in the code section, and it
* has to be loaded at runtime.
*
* This trick hides the assignment from the compiler. Since it cannot
* track where the pointer comes from, it will only use one relocation
- * where __arch_get_vdso_data() is called, and then keep the result in
- * a register.
+ * where __aarch64_get_vdso_u_time_data() is called, and then keep the
+ * result in a register.
*/
- asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
+ asm volatile("mov %0, %1" : "=r"(ret) : "r"(&vdso_u_time_data));
return ret;
}
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- const struct vdso_data *ret;
-
- /* See __arch_get_vdso_data(). */
- asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
-
- return ret;
-}
-#endif
-
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
{
- return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+ return vc->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
}
#define vdso_clocksource_ok vdso_clocksource_ok
diff --git a/arch/arm64/include/asm/vdso/getrandom.h b/arch/arm64/include/asm/vdso/getrandom.h
index 342f807e2044..a2197da1951b 100644
--- a/arch/arm64/include/asm/vdso/getrandom.h
+++ b/arch/arm64/include/asm/vdso/getrandom.h
@@ -33,18 +33,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
-static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
-{
- /*
- * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
- * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
- * PAGE_OFFSET points to the real VVAR page.
- */
- if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * (1UL << CONFIG_PAGE_SHIFT);
- return &_vdso_rng_data;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 764d13e2916c..92a2b59a9f3d 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -67,7 +67,7 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
u64 res;
@@ -99,20 +99,6 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return res;
}
-static __always_inline
-const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index eea51946d45a..de58951b8df6 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -2,44 +2,21 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#define __VDSO_RND_DATA_OFFSET 480
-
#ifndef __ASSEMBLY__
#include <vdso/datapage.h>
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
#define VDSO_PRECISION_MASK ~(0xFF00ULL<<48)
-extern struct vdso_data *vdso_data;
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
static __always_inline
-struct vdso_data *__arm64_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __arm64_get_k_vdso_data
-
-static __always_inline
-struct vdso_rng_data *__arm64_get_k_vdso_rnd_data(void)
-{
- return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
-}
-#define __arch_get_k_vdso_rng_data __arm64_get_k_vdso_rnd_data
-
-static __always_inline
-void __arm64_update_vsyscall(struct vdso_data *vdata)
+void __arm64_update_vsyscall(struct vdso_time_data *vdata)
{
- vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
- vdata[CS_RAW].mask = VDSO_PRECISION_MASK;
+ vdata->clock_data[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
+ vdata->clock_data[CS_RAW].mask = VDSO_PRECISION_MASK;
}
#define __arch_update_vsyscall __arm64_update_vsyscall
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 568bf858f319..af9d9acaf997 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -105,6 +105,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
struct kvm_vcpu_init {
__u32 target;
@@ -371,6 +372,7 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@@ -381,6 +383,17 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 64-127 */
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
+#ifdef __KERNEL__
+ KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT,
+#endif
+};
+
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
@@ -403,6 +416,7 @@ enum {
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
+#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 7ce555862895..b55f5f705750 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -14,31 +14,85 @@
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
+static u64 target_impl_cpu_num;
+static struct target_impl_cpu *target_impl_cpus;
+
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus)
+{
+ if (target_impl_cpu_num || !num || !impl_cpus)
+ return false;
+
+ target_impl_cpu_num = num;
+ target_impl_cpus = impl_cpus;
+ return true;
+}
+
+static inline bool is_midr_in_range(struct midr_range const *range)
+{
+ int i;
+
+ if (!target_impl_cpu_num)
+ return midr_is_cpu_model_range(read_cpuid_id(), range->model,
+ range->rv_min, range->rv_max);
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (midr_is_cpu_model_range(target_impl_cpus[i].midr,
+ range->model,
+ range->rv_min, range->rv_max))
+ return true;
+ }
+ return false;
+}
+
+bool is_midr_in_range_list(struct midr_range const *ranges)
+{
+ while (ranges->model)
+ if (is_midr_in_range(ranges++))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(is_midr_in_range_list);
+
static bool __maybe_unused
-is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+__is_affected_midr_range(const struct arm64_cpu_capabilities *entry,
+ u32 midr, u32 revidr)
{
const struct arm64_midr_revidr *fix;
- u32 midr = read_cpuid_id(), revidr;
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- if (!is_midr_in_range(midr, &entry->midr_range))
+ if (!is_midr_in_range(&entry->midr_range))
return false;
midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
- revidr = read_cpuid(REVIDR_EL1);
for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
return false;
-
return true;
}
static bool __maybe_unused
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ int i;
+
+ if (!target_impl_cpu_num) {
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return __is_affected_midr_range(entry, read_cpuid_id(),
+ read_cpuid(REVIDR_EL1));
+ }
+
+ for (i = 0; i < target_impl_cpu_num; i++) {
+ if (__is_affected_midr_range(entry, target_impl_cpus[i].midr,
+ target_impl_cpus[i].midr))
+ return true;
+ }
+ return false;
+}
+
+static bool __maybe_unused
is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
int scope)
{
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
+ return is_midr_in_range_list(entry->midr_range_list);
}
static bool __maybe_unused
@@ -186,12 +240,48 @@ static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range(midr, &range) && has_dic;
+ return is_midr_in_range(&range) && has_dic;
+}
+
+static const struct midr_range impdef_pmuv3_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
+ {},
+};
+
+static bool has_impdef_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ unsigned int pmuver;
+
+ if (!is_kernel_in_hyp_mode())
+ return false;
+
+ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ if (pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ return false;
+
+ return is_midr_in_range_list(impdef_pmuv3_cpus);
+}
+
+static void cpu_enable_impdef_pmuv3_traps(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set_s(SYS_HACR_EL2, 0, BIT(56));
}
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
@@ -795,5 +885,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
})),
},
{
+ .desc = "Apple IMPDEF PMUv3 Traps",
+ .capability = ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_impdef_pmuv3,
+ .cpu_enable = cpu_enable_impdef_pmuv3_traps,
+ },
+ {
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d561cf3b8ac7..9c4d6d552b25 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -86,6 +86,7 @@
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
+#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -497,6 +498,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_NV_frac_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -1792,7 +1794,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
char const *str = "kpti command line option";
bool meltdown_safe;
- meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+ meltdown_safe = is_midr_in_range_list(kpti_safe_list);
/* Defer to CPU feature registers */
if (has_cpuid_feature(entry, scope))
@@ -1862,7 +1864,7 @@ static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
!(has_cpuid_feature(entry, scope) ||
- is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
+ is_midr_in_range_list(nv1_ni_list)));
}
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
@@ -1898,6 +1900,28 @@ static bool has_lpa2(const struct arm64_cpu_capabilities *entry, int scope)
}
#endif
+#ifdef CONFIG_HW_PERF_EVENTS
+static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ unsigned int pmuver;
+
+ /*
+ * PMUVer follows the standard ID scheme for an unsigned field with the
+ * exception of 0xF (IMP_DEF) which is treated specially and implies
+ * FEAT_PMUv3 is not implemented.
+ *
+ * See DDI0487L.a D24.1.3.2 for more details.
+ */
+ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_EL1_PMUVer_SHIFT);
+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ return false;
+
+ return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP;
+}
+#endif
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
@@ -2045,7 +2069,7 @@ static bool cpu_has_broken_dbm(void)
{},
};
- return is_midr_in_range_list(read_cpuid_id(), cpus);
+ return is_midr_in_range_list(cpus);
}
static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
@@ -2162,7 +2186,7 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap,
if (kvm_get_mode() != KVM_MODE_NV)
return false;
- if (!has_cpuid_feature(cap, scope)) {
+ if (!cpucap_multi_entry_cap_matches(cap, scope)) {
pr_warn("unavailable: %s\n", cap->desc);
return false;
}
@@ -2519,7 +2543,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_NESTED_VIRT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_nested_virt_support,
- ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
+ .match_list = (const struct arm64_cpu_capabilities []){
+ {
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, NV, NV2)
+ },
+ {
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY)
+ },
+ { /* Sentinel */ }
+ },
},
{
.capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
@@ -2999,6 +3033,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, GCS, IMP)
},
#endif
+#ifdef CONFIG_HW_PERF_EVENTS
+ {
+ .desc = "PMUv3",
+ .capability = ARM64_HAS_PMUV3,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_pmuv3,
+ },
+#endif
{},
};
@@ -3680,6 +3722,7 @@ unsigned long cpu_get_elf_hwcap3(void)
static void __init setup_boot_cpu_capabilities(void)
{
+ kvm_arm_target_impl_cpu_init();
/*
* The boot CPU's feature register values have been recorded. Detect
* boot cpucaps and local cpucaps for the boot CPU, then enable and
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 2e94d20c4ac7..b735f4c2fe5e 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -27,9 +27,10 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
int ret = 1;
unsigned long addr;
void *tags = NULL;
+ int locked = 0;
for (addr = start; addr < start + len; addr += PAGE_SIZE) {
- struct page *page = get_dump_page(addr);
+ struct page *page = get_dump_page(addr, &locked);
/*
* get_dump_page() returns NULL when encountering an empty
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 5ab1970ee543..2ce73525de2c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -298,25 +298,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el2, x0
isb
0:
- mov_q x0, HCR_HOST_NVHE_FLAGS
-
- /*
- * Compliant CPUs advertise their VHE-onlyness with
- * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
- * RES1 in that case. Publish the E2H bit early so that
- * it can be picked up by the init_el2_state macro.
- *
- * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
- * don't advertise it (they predate this relaxation).
- */
- mrs_s x1, SYS_ID_AA64MMFR4_EL1
- tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
-
- orr x0, x0, #HCR_E2H
-1:
- msr hcr_el2, x0
- isb
+ init_el2_hcr HCR_HOST_NVHE_FLAGS
init_el2_state
/* Hypervisor stub */
@@ -339,7 +322,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr sctlr_el1, x1
mov x2, xzr
3:
- __init_el2_nvhe_prepare_eret
+ mov x0, #INIT_PSTATE_EL1
+ msr spsr_el2, x0
mov w0, #BOOT_CPU_MODE_EL2
orr x0, x0, x2
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index ef3a69cc398e..5e3c4b58f279 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -49,6 +49,7 @@ PROVIDE(__pi_arm64_sw_feature_override = arm64_sw_feature_override);
PROVIDE(__pi_arm64_use_ng_mappings = arm64_use_ng_mappings);
#ifdef CONFIG_CAVIUM_ERRATUM_27456
PROVIDE(__pi_cavium_erratum_27456_cpus = cavium_erratum_27456_cpus);
+PROVIDE(__pi_is_midr_in_range_list = is_midr_in_range_list);
#endif
PROVIDE(__pi__ctype = _ctype);
PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
@@ -112,11 +113,6 @@ KVM_NVHE_ALIAS(broken_cntvoff_key);
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
-/* PMU available static key */
-#ifdef CONFIG_HW_PERF_EVENTS
-KVM_NVHE_ALIAS(kvm_arm_pmu_available);
-#endif
-
/* Position-independent library routines */
KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page);
diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
index 2b69e3beeef8..81345f68f9fc 100644
--- a/arch/arm64/kernel/pi/map_range.c
+++ b/arch/arm64/kernel/pi/map_range.c
@@ -31,7 +31,7 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
{
u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
pteval_t protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
- int lshift = (3 - level) * (PAGE_SHIFT - 3);
+ int lshift = (3 - level) * PTDESC_TABLE_SHIFT;
u64 lmask = (PAGE_SIZE << lshift) - 1;
start &= PAGE_MASK;
@@ -45,12 +45,12 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
* clearing the mapping
*/
if (protval)
- protval |= (level < 3) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
+ protval |= (level == 2) ? PMD_TYPE_SECT : PTE_TYPE_PAGE;
while (start < end) {
u64 next = min((start | lmask) + 1, PAGE_ALIGN(end));
- if (level < 3 && (start | next | pa) & lmask) {
+ if (level < 2 || (level == 2 && (start | next | pa) & lmask)) {
/*
* This chunk needs a finer grained mapping. Create a
* table mapping if necessary and recurse.
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index da53722f95d4..d5d11fd11549 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -172,7 +172,7 @@ static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
return SPECTRE_UNAFFECTED;
/* Alternatively, we have a list of unaffected CPUs */
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ if (is_midr_in_range_list(spectre_v2_safe_list))
return SPECTRE_UNAFFECTED;
return SPECTRE_VULNERABLE;
@@ -331,7 +331,7 @@ bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
};
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
+ return is_midr_in_range_list(spectre_v3a_unsafe_list);
}
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
@@ -475,7 +475,7 @@ static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
{ /* sentinel */ },
};
- if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
+ if (is_midr_in_range_list(spectre_v4_safe_list))
return SPECTRE_UNAFFECTED;
/* CPU features are detected first */
@@ -845,52 +845,86 @@ static unsigned long system_bhb_mitigations;
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer.
*/
-u8 spectre_bhb_loop_affected(int scope)
+static bool is_spectre_bhb_safe(int scope)
+{
+ static const struct midr_range spectre_bhb_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
+ MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
+ {},
+ };
+ static bool all_safe = true;
+
+ if (scope != SCOPE_LOCAL_CPU)
+ return all_safe;
+
+ if (is_midr_in_range_list(spectre_bhb_safe_list))
+ return true;
+
+ all_safe = false;
+
+ return false;
+}
+
+static u8 spectre_bhb_loop_affected(void)
{
u8 k = 0;
- static u8 max_bhb_k;
-
- if (scope == SCOPE_LOCAL_CPU) {
- static const struct midr_range spectre_bhb_k32_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
- MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
- MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
- {},
- };
- static const struct midr_range spectre_bhb_k24_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
- MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
- {},
- };
- static const struct midr_range spectre_bhb_k11_list[] = {
- MIDR_ALL_VERSIONS(MIDR_AMPERE1),
- {},
- };
- static const struct midr_range spectre_bhb_k8_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- {},
- };
-
- if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
- k = 32;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
- k = 24;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
- k = 11;
- else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
- k = 8;
-
- max_bhb_k = max(max_bhb_k, k);
- } else {
- k = max_bhb_k;
- }
+
+ static const struct midr_range spectre_bhb_k132_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
+ };
+ static const struct midr_range spectre_bhb_k38_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
+ };
+ static const struct midr_range spectre_bhb_k32_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k24_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
+ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k11_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_AMPERE1),
+ {},
+ };
+ static const struct midr_range spectre_bhb_k8_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ {},
+ };
+
+ if (is_midr_in_range_list(spectre_bhb_k132_list))
+ k = 132;
+ else if (is_midr_in_range_list(spectre_bhb_k38_list))
+ k = 38;
+ else if (is_midr_in_range_list(spectre_bhb_k32_list))
+ k = 32;
+ else if (is_midr_in_range_list(spectre_bhb_k24_list))
+ k = 24;
+ else if (is_midr_in_range_list(spectre_bhb_k11_list))
+ k = 11;
+ else if (is_midr_in_range_list(spectre_bhb_k8_list))
+ k = 8;
return k;
}
@@ -916,29 +950,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
}
}
-static bool is_spectre_bhb_fw_affected(int scope)
+static bool has_spectre_bhb_fw_mitigation(void)
{
- static bool system_affected;
enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
- static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- {},
- };
- bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
- spectre_bhb_firmware_mitigated_list);
-
- if (scope != SCOPE_LOCAL_CPU)
- return system_affected;
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
- if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
- system_affected = true;
- return true;
- }
-
- return false;
+ return has_smccc && fw_state == SPECTRE_MITIGATED;
}
static bool supports_ecbhb(int scope)
@@ -954,6 +972,8 @@ static bool supports_ecbhb(int scope)
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
}
+static u8 max_bhb_k;
+
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
int scope)
{
@@ -962,16 +982,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
if (supports_csv2p3(scope))
return false;
- if (supports_clearbhb(scope))
- return true;
-
- if (spectre_bhb_loop_affected(scope))
- return true;
+ if (is_spectre_bhb_safe(scope))
+ return false;
- if (is_spectre_bhb_fw_affected(scope))
- return true;
+ /*
+ * At this point the core isn't known to be "safe" so we're going to
+ * assume it's vulnerable. We still need to update `max_bhb_k` though,
+ * but only if we aren't mitigating with clearbhb though.
+ */
+ if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
+ max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
- return false;
+ return true;
}
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
@@ -1002,7 +1024,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cpu_cb;
- enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+ enum mitigation_state state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
@@ -1028,7 +1050,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
- } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
+ } else if (spectre_bhb_loop_affected()) {
/*
* Ensure KVM uses the indirect vector which will have the
* branchy-loop added. A57/A72-r0 will already have selected
@@ -1041,32 +1063,29 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
state = SPECTRE_MITIGATED;
set_bit(BHB_LOOP, &system_bhb_mitigations);
- } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
- fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
- if (fw_state == SPECTRE_MITIGATED) {
- /*
- * Ensure KVM uses one of the spectre bp_hardening
- * vectors. The indirect vector doesn't include the EL3
- * call, so needs upgrading to
- * HYP_VECTOR_SPECTRE_INDIRECT.
- */
- if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
- data->slot += 1;
-
- this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
-
- /*
- * The WA3 call in the vectors supersedes the WA1 call
- * made during context-switch. Uninstall any firmware
- * bp_hardening callback.
- */
- cpu_cb = spectre_v2_get_sw_mitigation_cb();
- if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
- __this_cpu_write(bp_hardening_data.fn, NULL);
-
- state = SPECTRE_MITIGATED;
- set_bit(BHB_FW, &system_bhb_mitigations);
- }
+ } else if (has_spectre_bhb_fw_mitigation()) {
+ /*
+ * Ensure KVM uses one of the spectre bp_hardening
+ * vectors. The indirect vector doesn't include the EL3
+ * call, so needs upgrading to
+ * HYP_VECTOR_SPECTRE_INDIRECT.
+ */
+ if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
+ data->slot += 1;
+
+ this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
+
+ /*
+ * The WA3 call in the vectors supersedes the WA1 call
+ * made during context-switch. Uninstall any firmware
+ * bp_hardening callback.
+ */
+ cpu_cb = spectre_v2_get_sw_mitigation_cb();
+ if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
+ __this_cpu_write(bp_hardening_data.fn, NULL);
+
+ state = SPECTRE_MITIGATED;
+ set_bit(BHB_FW, &system_bhb_mitigations);
}
update_mitigation_state(&spectre_bhb_state, state);
@@ -1100,7 +1119,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
{
u8 rd;
u32 insn;
- u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
BUG_ON(nr_inst != 1); /* MOV -> MOV */
@@ -1109,7 +1127,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
- insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
+ insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_ZERO);
*updptr++ = cpu_to_le32(insn);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 99ea26d400ff..a7c37afb4ebe 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -91,7 +91,7 @@ static void save_reset_user_access_state(struct user_access_state *ua_state)
u64 por_enable_all = 0;
for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
- por_enable_all |= POE_RXW << (pkey * POR_BITS_PER_PKEY);
+ por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
write_sysreg_s(por_enable_all, SYS_POR_EL0);
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index cb180684d10d..5d07ee85bdae 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -15,8 +15,11 @@
#include <linux/arch_topology.h>
#include <linux/cacheinfo.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/init.h>
#include <linux/percpu.h>
+#include <linux/sched/isolation.h>
+#include <linux/xarray.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -37,17 +40,28 @@ static bool __init acpi_cpu_is_threaded(int cpu)
return !!is_threaded;
}
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
int cpu, topology_id;
if (acpi_disabled)
return 0;
+ xa_init(&hetero_cpu);
+
for_each_possible_cpu(cpu) {
topology_id = find_acpi_cpu_topology(cpu, 0);
if (topology_id < 0)
@@ -57,6 +71,34 @@ int __init parse_acpi_topology(void)
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
} else {
cpu_topology[cpu].thread_id = -1;
cpu_topology[cpu].core_id = topology_id;
@@ -67,6 +109,19 @@ int __init parse_acpi_topology(void)
cpu_topology[cpu].package_id = topology_id;
}
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
#endif
@@ -88,18 +143,28 @@ int __init parse_acpi_topology(void)
* initialized.
*/
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale) = 1UL << (2 * SCHED_CAPACITY_SHIFT);
-static DEFINE_PER_CPU(u64, arch_const_cycles_prev);
-static DEFINE_PER_CPU(u64, arch_core_cycles_prev);
static cpumask_var_t amu_fie_cpus;
+struct amu_cntr_sample {
+ u64 arch_const_cycles_prev;
+ u64 arch_core_cycles_prev;
+ unsigned long last_scale_update;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct amu_cntr_sample, cpu_amu_samples);
+
void update_freq_counters_refs(void)
{
- this_cpu_write(arch_core_cycles_prev, read_corecnt());
- this_cpu_write(arch_const_cycles_prev, read_constcnt());
+ struct amu_cntr_sample *amu_sample = this_cpu_ptr(&cpu_amu_samples);
+
+ amu_sample->arch_core_cycles_prev = read_corecnt();
+ amu_sample->arch_const_cycles_prev = read_constcnt();
}
static inline bool freq_counters_valid(int cpu)
{
+ struct amu_cntr_sample *amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
+
if ((cpu >= nr_cpu_ids) || !cpumask_test_cpu(cpu, cpu_present_mask))
return false;
@@ -108,8 +173,8 @@ static inline bool freq_counters_valid(int cpu)
return false;
}
- if (unlikely(!per_cpu(arch_const_cycles_prev, cpu) ||
- !per_cpu(arch_core_cycles_prev, cpu))) {
+ if (unlikely(!amu_sample->arch_const_cycles_prev ||
+ !amu_sample->arch_core_cycles_prev)) {
pr_debug("CPU%d: cycle counters are not enabled.\n", cpu);
return false;
}
@@ -152,17 +217,22 @@ void freq_inv_set_max_ratio(int cpu, u64 max_rate)
static void amu_scale_freq_tick(void)
{
+ struct amu_cntr_sample *amu_sample = this_cpu_ptr(&cpu_amu_samples);
u64 prev_core_cnt, prev_const_cnt;
u64 core_cnt, const_cnt, scale;
- prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
- prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+ prev_const_cnt = amu_sample->arch_const_cycles_prev;
+ prev_core_cnt = amu_sample->arch_core_cycles_prev;
update_freq_counters_refs();
- const_cnt = this_cpu_read(arch_const_cycles_prev);
- core_cnt = this_cpu_read(arch_core_cycles_prev);
+ const_cnt = amu_sample->arch_const_cycles_prev;
+ core_cnt = amu_sample->arch_core_cycles_prev;
+ /*
+ * This should not happen unless the AMUs have been reset and the
+ * counter values have not been restored - unlikely
+ */
if (unlikely(core_cnt <= prev_core_cnt ||
const_cnt <= prev_const_cnt))
return;
@@ -182,6 +252,8 @@ static void amu_scale_freq_tick(void)
scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
this_cpu_write(arch_freq_scale, (unsigned long)scale);
+
+ amu_sample->last_scale_update = jiffies;
}
static struct scale_freq_data amu_sfd = {
@@ -189,6 +261,96 @@ static struct scale_freq_data amu_sfd = {
.set_freq_scale = amu_scale_freq_tick,
};
+static __always_inline bool amu_fie_cpu_supported(unsigned int cpu)
+{
+ return cpumask_available(amu_fie_cpus) &&
+ cpumask_test_cpu(cpu, amu_fie_cpus);
+}
+
+void arch_cpu_idle_enter(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (!amu_fie_cpu_supported(cpu))
+ return;
+
+ /* Kick in AMU update but only if one has not happened already */
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK) &&
+ time_is_before_jiffies(per_cpu(cpu_amu_samples.last_scale_update, cpu)))
+ amu_scale_freq_tick();
+}
+
+#define AMU_SAMPLE_EXP_MS 20
+
+int arch_freq_get_on_cpu(int cpu)
+{
+ struct amu_cntr_sample *amu_sample;
+ unsigned int start_cpu = cpu;
+ unsigned long last_update;
+ unsigned int freq = 0;
+ u64 scale;
+
+ if (!amu_fie_cpu_supported(cpu) || !arch_scale_freq_ref(cpu))
+ return -EOPNOTSUPP;
+
+ while (1) {
+
+ amu_sample = per_cpu_ptr(&cpu_amu_samples, cpu);
+
+ last_update = amu_sample->last_scale_update;
+
+ /*
+ * For those CPUs that are in full dynticks mode, or those that have
+ * not seen tick for a while, try an alternative source for the counters
+ * (and thus freq scale), if available, for given policy: this boils
+ * down to identifying an active cpu within the same freq domain, if any.
+ */
+ if (!housekeeping_cpu(cpu, HK_TYPE_TICK) ||
+ time_is_before_jiffies(last_update + msecs_to_jiffies(AMU_SAMPLE_EXP_MS))) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ int ref_cpu;
+
+ if (!policy)
+ return -EINVAL;
+
+ if (!cpumask_intersects(policy->related_cpus,
+ housekeeping_cpumask(HK_TYPE_TICK))) {
+ cpufreq_cpu_put(policy);
+ return -EOPNOTSUPP;
+ }
+
+ for_each_cpu_wrap(ref_cpu, policy->cpus, cpu + 1) {
+ if (ref_cpu == start_cpu) {
+ /* Prevent verifying same CPU twice */
+ ref_cpu = nr_cpu_ids;
+ break;
+ }
+ if (!idle_cpu(ref_cpu))
+ break;
+ }
+
+ cpufreq_cpu_put(policy);
+
+ if (ref_cpu >= nr_cpu_ids)
+ /* No alternative to pull info from */
+ return -EAGAIN;
+
+ cpu = ref_cpu;
+ } else {
+ break;
+ }
+ }
+ /*
+ * Reversed computation to the one used to determine
+ * the arch_freq_scale value
+ * (see amu_scale_freq_tick for details)
+ */
+ scale = arch_scale_freq_capacity(cpu);
+ freq = scale * arch_scale_freq_ref(cpu);
+ freq >>= SCHED_CAPACITY_SHIFT;
+ return freq;
+}
+
static void amu_fie_setup(const struct cpumask *cpus)
{
int cpu;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 4e26bd356a48..529cff825531 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -172,14 +172,6 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-#ifdef CONFIG_PREEMPT
-#define S_PREEMPT " PREEMPT"
-#elif defined(CONFIG_PREEMPT_RT)
-#define S_PREEMPT " PREEMPT_RT"
-#else
-#define S_PREEMPT ""
-#endif
-
#define S_SMP " SMP"
static int __die(const char *str, long err, struct pt_regs *regs)
@@ -187,7 +179,7 @@ static int __die(const char *str, long err, struct pt_regs *regs)
static int die_counter;
int ret;
- pr_emerg("Internal error: %s: %016lx [#%d]" S_PREEMPT S_SMP "\n",
+ pr_emerg("Internal error: %s: %016lx [#%d] " S_SMP "\n",
str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index e8ed8e5b713b..887ac0b05961 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -18,7 +18,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <linux/vmalloc.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
@@ -57,12 +57,6 @@ static struct vdso_abi_info vdso_info[] __ro_after_init = {
#endif /* CONFIG_COMPAT_VDSO */
};
-/*
- * The vDSO data page.
- */
-static union vdso_data_store vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = vdso_data_store.data;
-
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
@@ -104,78 +98,6 @@ static int __init __vdso_init(enum vdso_abi abi)
return 0;
}
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-static const struct vm_special_mapping vvar_map;
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a task
- * changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
-
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vvar_map))
- zap_vma_pages(vma);
- }
-
- mmap_read_unlock(mm);
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *timens_page = find_timens_vvar_page(vma);
- unsigned long pfn;
-
- switch (vmf->pgoff) {
- case VVAR_DATA_PAGE_OFFSET:
- if (timens_page)
- pfn = page_to_pfn(timens_page);
- else
- pfn = sym_to_pfn(vdso_data);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
- * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- pfn = sym_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
-static const struct vm_special_mapping vvar_map = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
-
static int __setup_additional_pages(enum vdso_abi abi,
struct mm_struct *mm,
struct linux_binprm *bprm,
@@ -185,11 +107,11 @@ static int __setup_additional_pages(enum vdso_abi abi,
unsigned long gp_flags = 0;
void *ret;
- BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+ BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
- vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
+ vdso_mapping_len = vdso_text_len + VDSO_NR_PAGES * PAGE_SIZE;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -197,16 +119,14 @@ static int __setup_additional_pages(enum vdso_abi abi,
goto up_fail;
}
- ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
- VM_READ|VM_MAYREAD|VM_PFNMAP,
- &vvar_map);
+ ret = vdso_install_vvar_mapping(mm, vdso_base);
if (IS_ERR(ret))
goto up_fail;
if (system_supports_bti_kernel())
gp_flags = VM_ARM64_BTI;
- vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
+ vdso_base += VDSO_NR_PAGES * PAGE_SIZE;
mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags|
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 35685c036044..5e27e46aa496 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -7,7 +7,7 @@
#
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso := vgettimeofday.o note.o sigreturn.o vgetrandom.o vgetrandom-chacha.o
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 47ad6944f9f0..52314be29191 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -20,11 +20,8 @@ OUTPUT_ARCH(aarch64)
SECTIONS
{
- PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
- PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET);
-#ifdef CONFIG_TIME_NS
- PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 25a2cb6317f3..f2dfdc7dc818 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -3,7 +3,7 @@
# Makefile for vdso32
#
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
# Same as cc-*option, but using CC_COMPAT instead of CC
ifeq ($(CONFIG_CC_IS_CLANG), y)
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
index 732702a187e9..e02b27487ce8 100644
--- a/arch/arm64/kernel/vdso32/vdso.lds.S
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -12,16 +12,15 @@
#include <asm/page.h>
#include <asm/vdso.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
OUTPUT_ARCH(arm)
SECTIONS
{
- PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
-#ifdef CONFIG_TIME_NS
- PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3cf7adb2b503..209bc76263f1 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -23,7 +23,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
vgic/vgic-v3.o vgic/vgic-v4.o \
vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
- vgic/vgic-its.o vgic/vgic-debug.o
+ vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-v3-nested.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 70802e4c91cf..5133dcbfe9f7 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1070,8 +1070,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
else
ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
- hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
- ctxt->hrtimer.function = kvm_hrtimer_expire;
+ hrtimer_setup(&ctxt->hrtimer, kvm_hrtimer_expire, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
switch (timerid) {
case TIMER_PTIMER:
@@ -1098,8 +1097,8 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
timer_set_offset(vcpu_ptimer(vcpu), 0);
}
- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
- timer->bg_timer.function = kvm_bg_timer_expire;
+ hrtimer_setup(&timer->bg_timer, kvm_bg_timer_expire, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_HARD);
}
void kvm_timer_init_vm(struct kvm *kvm)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 0160b4924351..68fec8c95fee 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -125,6 +125,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
}
mutex_unlock(&kvm->slots_lock);
break;
+ case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
+ mutex_lock(&kvm->lock);
+ if (!kvm->created_vcpus) {
+ r = 0;
+ set_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags);
+ }
+ mutex_unlock(&kvm->lock);
+ break;
default:
break;
}
@@ -313,6 +321,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_SYSTEM_SUSPEND:
case KVM_CAP_IRQFD_RESAMPLE:
case KVM_CAP_COUNTER_OFFSET:
+ case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
@@ -366,7 +375,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = get_num_wrps();
break;
case KVM_CAP_ARM_PMU_V3:
- r = kvm_arm_support_pmu_v3();
+ r = kvm_supports_guest_pmuv3();
break;
case KVM_CAP_ARM_INJECT_SERROR_ESR:
r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
@@ -466,7 +475,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
- return kvm_share_hyp(vcpu, vcpu + 1);
+ err = kvm_share_hyp(vcpu, vcpu + 1);
+ if (err)
+ kvm_vgic_vcpu_destroy(vcpu);
+
+ return err;
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -586,8 +599,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
nommu:
vcpu->cpu = cpu;
- kvm_vgic_load(vcpu);
+ /*
+ * The timer must be loaded before the vgic to correctly set up physical
+ * interrupt deactivation in nested state (e.g. timer interrupt).
+ */
kvm_timer_vcpu_load(vcpu);
+ kvm_vgic_load(vcpu);
kvm_vcpu_load_debug(vcpu);
if (has_vhe())
kvm_vcpu_load_vhe(vcpu);
@@ -825,6 +842,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;
+ if (vcpu_has_nv(vcpu)) {
+ ret = kvm_vgic_vcpu_nv_init(vcpu);
+ if (ret)
+ return ret;
+ }
+
/*
* This needs to happen after any restriction has been applied
* to the feature set.
@@ -835,14 +858,20 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;
- ret = kvm_arm_pmu_v3_enable(vcpu);
- if (ret)
- return ret;
+ if (kvm_vcpu_has_pmu(vcpu)) {
+ ret = kvm_arm_pmu_v3_enable(vcpu);
+ if (ret)
+ return ret;
+ }
if (is_protected_kvm_enabled()) {
ret = pkvm_create_hyp_vm(kvm);
if (ret)
return ret;
+
+ ret = pkvm_create_hyp_vcpu(vcpu);
+ if (ret)
+ return ret;
}
mutex_lock(&kvm->arch.config_lock);
@@ -1148,7 +1177,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- kvm_pmu_flush_hwstate(vcpu);
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_flush_hwstate(vcpu);
local_irq_disable();
@@ -1167,7 +1197,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */
- kvm_pmu_sync_hwstate(vcpu);
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_sync_hwstate(vcpu);
if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@@ -1197,7 +1228,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* that the vgic can properly sample the updated state of the
* interrupt line.
*/
- kvm_pmu_sync_hwstate(vcpu);
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_sync_hwstate(vcpu);
/*
* Sync the vgic state before syncing the timer state because
@@ -1386,7 +1418,7 @@ static unsigned long system_supported_vcpu_features(void)
if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
- if (!kvm_arm_support_pmu_v3())
+ if (!kvm_supports_guest_pmuv3())
clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
if (!system_supports_sve())
@@ -2307,6 +2339,13 @@ static int __init init_subsystems(void)
goto out;
}
+ if (kvm_mode == KVM_MODE_NV &&
+ !(vgic_present && kvm_vgic_global_state.type == VGIC_V3)) {
+ kvm_err("NV support requires GICv3, giving up\n");
+ err = -EINVAL;
+ goto out;
+ }
+
/*
* Init HYP architected timer support
*/
@@ -2714,6 +2753,14 @@ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
+
+ /*
+ * The only thing we have a chance of directly-injecting is LPIs. Maybe
+ * one day...
+ */
+ if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
+ return 0;
return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
&irqfd->irq_entry);
@@ -2723,6 +2770,10 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
{
struct kvm_kernel_irqfd *irqfd =
container_of(cons, struct kvm_kernel_irqfd, consumer);
+ struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry;
+
+ if (irq_entry->type != KVM_IRQ_ROUTING_MSI)
+ return;
kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
&irqfd->irq_entry);
@@ -2803,11 +2854,12 @@ static __init int kvm_arm_init(void)
if (err)
goto out_hyp;
- kvm_info("%s%sVHE mode initialized successfully\n",
+ kvm_info("%s%sVHE%s mode initialized successfully\n",
in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
"Protected " : "Hyp "),
in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
- "h" : "n"));
+ "h" : "n"),
+ cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": "");
/*
* FIXME: Do something reasonable if kvm_init() fails after pKVM
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 3a96c96816e9..f74a66ce3064 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1090,22 +1090,22 @@ static void compute_s1_overlay_permissions(struct kvm_vcpu *vcpu,
break;
}
- if (pov_perms & ~POE_RXW)
+ if (pov_perms & ~POE_RWX)
pov_perms = POE_NONE;
if (wi->poe && wr->pov) {
wr->pr &= pov_perms & POE_R;
- wr->px &= pov_perms & POE_X;
wr->pw &= pov_perms & POE_W;
+ wr->px &= pov_perms & POE_X;
}
- if (uov_perms & ~POE_RXW)
+ if (uov_perms & ~POE_RWX)
uov_perms = POE_NONE;
if (wi->e0poe && wr->uov) {
wr->ur &= uov_perms & POE_R;
- wr->ux &= uov_perms & POE_X;
wr->uw &= uov_perms & POE_W;
+ wr->ux &= uov_perms & POE_X;
}
}
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 607d37bab70b..0fcfcc0478f9 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -412,26 +412,26 @@ static const struct trap_bits coarse_trap_bits[] = {
},
[CGT_ICH_HCR_TC] = {
.index = ICH_HCR_EL2,
- .value = ICH_HCR_TC,
- .mask = ICH_HCR_TC,
+ .value = ICH_HCR_EL2_TC,
+ .mask = ICH_HCR_EL2_TC,
.behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TALL0] = {
.index = ICH_HCR_EL2,
- .value = ICH_HCR_TALL0,
- .mask = ICH_HCR_TALL0,
+ .value = ICH_HCR_EL2_TALL0,
+ .mask = ICH_HCR_EL2_TALL0,
.behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TALL1] = {
.index = ICH_HCR_EL2,
- .value = ICH_HCR_TALL1,
- .mask = ICH_HCR_TALL1,
+ .value = ICH_HCR_EL2_TALL1,
+ .mask = ICH_HCR_EL2_TALL1,
.behaviour = BEHAVE_FORWARD_RW,
},
[CGT_ICH_HCR_TDIR] = {
.index = ICH_HCR_EL2,
- .value = ICH_HCR_TDIR,
- .mask = ICH_HCR_TDIR,
+ .value = ICH_HCR_EL2_TDIR,
+ .mask = ICH_HCR_EL2_TDIR,
.behaviour = BEHAVE_FORWARD_RW,
},
};
@@ -2503,6 +2503,7 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
}
preempt_disable();
+ vcpu_set_flag(vcpu, IN_NESTED_ERET);
kvm_arch_vcpu_put(vcpu);
if (!esr_iss_is_eretax(esr))
@@ -2514,9 +2515,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) = spsr;
kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ vcpu_clear_flag(vcpu, IN_NESTED_ERET);
preempt_enable();
- kvm_pmu_nested_transition(vcpu);
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_nested_transition(vcpu);
}
static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
@@ -2599,7 +2602,8 @@ static int kvm_inject_nested(struct kvm_vcpu *vcpu, u64 esr_el2,
kvm_arch_vcpu_load(vcpu, smp_processor_id());
preempt_enable();
- kvm_pmu_nested_transition(vcpu);
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_pmu_nested_transition(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 512d152233ff..b73dc26bc44b 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -129,8 +129,12 @@ static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu)
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
+ bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
- if (esr & ESR_ELx_WFx_ISS_WFE) {
+ if (guest_hyp_wfx_traps_enabled(vcpu))
+ return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ if (is_wfe) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
} else {
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 23bbe28eaaf9..b741ea6aefa5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -244,7 +244,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
* EL1 instead of being trapped to EL2.
*/
- if (kvm_arm_support_pmu_v3()) {
+ if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;
write_sysreg(0, pmselr_el0);
@@ -281,7 +281,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2);
- if (kvm_arm_support_pmu_v3()) {
+ if (system_supports_pmuv3()) {
struct kvm_cpu_context *hctxt;
hctxt = host_data_ptr(host_ctxt);
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 76ff095c6b6e..b9cff893bbe0 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -43,6 +43,17 @@ static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt)
return &ctxt_sys_reg(ctxt, MDSCR_EL1);
}
+static inline u64 ctxt_midr_el1(struct kvm_cpu_context *ctxt)
+{
+ struct kvm *kvm = kern_hyp_va(ctxt_to_vcpu(ctxt)->kvm);
+
+ if (!(ctxt_is_guest(ctxt) &&
+ test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags)))
+ return read_cpuid_id();
+
+ return kvm_read_vm_id_reg(kvm, SYS_MIDR_EL1);
+}
+
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
*ctxt_mdscr_el1(ctxt) = read_sysreg(mdscr_el1);
@@ -168,8 +179,9 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
}
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt,
- u64 mpidr)
+ u64 midr, u64 mpidr)
{
+ write_sysreg(midr, vpidr_el2);
write_sysreg(mpidr, vmpidr_el2);
if (has_vhe() ||
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 978f38c386ee..ea0a704da9b8 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -56,7 +56,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
int hyp_pin_shared_mem(void *from, void *to);
void hyp_unpin_shared_mem(void *from, void *to);
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
+void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
index e42bf68c8848..ce31d3b73603 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -43,12 +43,6 @@ struct pkvm_hyp_vm {
struct hyp_pool pool;
hyp_spinlock_t lock;
- /*
- * The number of vcpus initialized and ready to run.
- * Modifying this is protected by 'vm_table_lock'.
- */
- unsigned int nr_vcpus;
-
/* Array of the hyp vCPU structures for this VM. */
struct pkvm_hyp_vcpu *vcpus[];
};
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index fc1866226067..f8af11189572 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -73,8 +73,12 @@ __do_hyp_init:
eret
SYM_CODE_END(__kvm_hyp_init)
+/*
+ * Initialize EL2 CPU state to sane values.
+ *
+ * HCR_EL2.E2H must have been initialized already.
+ */
SYM_CODE_START_LOCAL(__kvm_init_el2_state)
- /* Initialize EL2 CPU state to sane values. */
init_el2_state // Clobbers x0..x2
finalise_el2_state
ret
@@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2}
- bl __kvm_init_el2_state
+ init_el2_hcr 0
- __init_el2_nvhe_prepare_eret
+ bl __kvm_init_el2_state
/* Enable MMU, set vectors and stack. */
mov x0, x28
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 19c3c631708c..f34f11c720d7 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -266,7 +266,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
return 0;
}
-void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
+void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
{
struct hyp_page *page;
void *addr;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 3927fe52a3dd..5a335a51deca 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -46,7 +46,8 @@ static void pkvm_vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_FWB;
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
- !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+ !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
+ kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0) == read_cpuid(CTR_EL0))
vcpu->arch.hcr_el2 |= HCR_TID4;
else
vcpu->arch.hcr_el2 |= HCR_TID2;
@@ -166,8 +167,13 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
pkvm_vcpu_reset_hcr(vcpu);
- if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu)))
+ if ((!pkvm_hyp_vcpu_is_protected(hyp_vcpu))) {
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ /* Trust the host for non-protected vcpu features. */
+ vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2;
return 0;
+ }
ret = pkvm_check_pvm_cpu_features(vcpu);
if (ret)
@@ -175,6 +181,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
pvm_init_traps_hcr(vcpu);
pvm_init_traps_mdcr(vcpu);
+ vcpu_set_hcrx(vcpu);
return 0;
}
@@ -239,10 +246,12 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle);
- if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
+ if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
goto unlock;
hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+ if (!hyp_vcpu)
+ goto unlock;
/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
@@ -315,6 +324,9 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
unsigned long host_arch_flags = READ_ONCE(host_kvm->arch.flags);
DECLARE_BITMAP(allowed_features, KVM_VCPU_MAX_FEATURES);
+ /* CTR_EL0 is always under host control, even for protected VMs. */
+ hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
+
if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
@@ -325,6 +337,10 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
bitmap_copy(kvm->arch.vcpu_features,
host_kvm->arch.vcpu_features,
KVM_VCPU_MAX_FEATURES);
+
+ if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &host_arch_flags))
+ hyp_vm->kvm.arch.midr_el1 = host_kvm->arch.midr_el1;
+
return;
}
@@ -361,8 +377,14 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
{
int i;
- for (i = 0; i < nr_vcpus; i++)
- unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
+ for (i = 0; i < nr_vcpus; i++) {
+ struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vcpus[i];
+
+ if (!hyp_vcpu)
+ continue;
+
+ unpin_host_vcpu(hyp_vcpu->host_vcpu);
+ }
}
static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
@@ -386,24 +408,18 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
struct pkvm_hyp_vm *hyp_vm,
- struct kvm_vcpu *host_vcpu,
- unsigned int vcpu_idx)
+ struct kvm_vcpu *host_vcpu)
{
int ret = 0;
if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
return -EBUSY;
- if (host_vcpu->vcpu_idx != vcpu_idx) {
- ret = -EINVAL;
- goto done;
- }
-
hyp_vcpu->host_vcpu = host_vcpu;
hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
- hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
+ hyp_vcpu->vcpu.vcpu_idx = READ_ONCE(host_vcpu->vcpu_idx);
hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
@@ -641,27 +657,28 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
goto unlock;
}
- idx = hyp_vm->nr_vcpus;
+ ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
+ if (ret)
+ goto unlock;
+
+ idx = hyp_vcpu->vcpu.vcpu_idx;
if (idx >= hyp_vm->kvm.created_vcpus) {
ret = -EINVAL;
goto unlock;
}
- ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
- if (ret)
+ if (hyp_vm->vcpus[idx]) {
+ ret = -EINVAL;
goto unlock;
+ }
hyp_vm->vcpus[idx] = hyp_vcpu;
- hyp_vm->nr_vcpus++;
unlock:
hyp_spin_unlock(&vm_table_lock);
- if (ret) {
+ if (ret)
unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
- return ret;
- }
-
- return 0;
+ return ret;
}
static void
@@ -678,7 +695,7 @@ teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
int __pkvm_teardown_vm(pkvm_handle_t handle)
{
- struct kvm_hyp_memcache *mc;
+ struct kvm_hyp_memcache *mc, *stage2_mc;
struct pkvm_hyp_vm *hyp_vm;
struct kvm *host_kvm;
unsigned int idx;
@@ -706,18 +723,24 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
/* Reclaim guest pages (including page-table pages) */
mc = &host_kvm->arch.pkvm.teardown_mc;
- reclaim_guest_pages(hyp_vm, mc);
- unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
+ stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
+ reclaim_pgtable_pages(hyp_vm, stage2_mc);
+ unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->kvm.created_vcpus);
/* Push the metadata pages to the teardown memcache */
- for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
+ for (idx = 0; idx < hyp_vm->kvm.created_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
- struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
+ struct kvm_hyp_memcache *vcpu_mc;
+
+ if (!hyp_vcpu)
+ continue;
+
+ vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;
while (vcpu_mc->nr_pages) {
void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);
- push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ push_hyp_memcache(stage2_mc, addr, hyp_virt_to_phys);
unmap_donated_memory_noclear(addr, PAGE_SIZE);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 9c2ce1e0e99a..c3e196fb8b18 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
if (is_cpu_on)
release_boot_args(boot_args);
+ write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
+ write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
+
__host_enter(host_ctxt);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
index dba101565de3..3cc613cce5f5 100644
--- a/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/sysreg-sr.c
@@ -28,7 +28,9 @@ void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
{
- __sysreg_restore_el1_state(ctxt, ctxt_sys_reg(ctxt, MPIDR_EL1));
+ u64 midr = ctxt_midr_el1(ctxt);
+
+ __sysreg_restore_el1_state(ctxt, midr, ctxt_sys_reg(ctxt, MPIDR_EL1));
__sysreg_restore_common_state(ctxt);
__sysreg_restore_user_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 3f9741e51d41..ed363aa3027e 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -18,7 +18,7 @@
#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
-static u64 __gic_v3_get_lr(unsigned int lr)
+u64 __gic_v3_get_lr(unsigned int lr)
{
switch (lr & 0xf) {
case 0:
@@ -218,7 +218,7 @@ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
elrsr = read_gicreg(ICH_ELRSR_EL2);
- write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
+ write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EL2_En, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++) {
if (elrsr & (1 << i))
@@ -274,7 +274,7 @@ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
* system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1
* so that the trap bits can take effect. Yes, we *loves* the GIC.
*/
- if (!(cpu_if->vgic_hcr & ICH_HCR_EN)) {
+ if (!(cpu_if->vgic_hcr & ICH_HCR_EL2_En)) {
write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1);
isb();
} else if (!cpu_if->vgic_sre) {
@@ -752,7 +752,7 @@ static void __vgic_v3_bump_eoicount(void)
u32 hcr;
hcr = read_gicreg(ICH_HCR_EL2);
- hcr += 1 << ICH_HCR_EOIcount_SHIFT;
+ hcr += 1 << ICH_HCR_EL2_EOIcount_SHIFT;
write_gicreg(hcr, ICH_HCR_EL2);
}
@@ -1069,7 +1069,7 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
case SYS_ICC_EOIR0_EL1:
case SYS_ICC_HPPIR0_EL1:
case SYS_ICC_IAR0_EL1:
- return ich_hcr & ICH_HCR_TALL0;
+ return ich_hcr & ICH_HCR_EL2_TALL0;
case SYS_ICC_IGRPEN1_EL1:
if (is_read &&
@@ -1090,10 +1090,10 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
case SYS_ICC_EOIR1_EL1:
case SYS_ICC_HPPIR1_EL1:
case SYS_ICC_IAR1_EL1:
- return ich_hcr & ICH_HCR_TALL1;
+ return ich_hcr & ICH_HCR_EL2_TALL1;
case SYS_ICC_DIR_EL1:
- if (ich_hcr & ICH_HCR_TDIR)
+ if (ich_hcr & ICH_HCR_EL2_TDIR)
return true;
fallthrough;
@@ -1101,7 +1101,7 @@ static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
case SYS_ICC_RPR_EL1:
case SYS_ICC_CTLR_EL1:
case SYS_ICC_PMR_EL1:
- return ich_hcr & ICH_HCR_TC;
+ return ich_hcr & ICH_HCR_EL2_TC;
default:
return false;
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 647737d6e8d0..731a0378ed13 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -527,6 +527,25 @@ static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
return kvm_hyp_handle_sysreg(vcpu, exit_code);
}
+static bool kvm_hyp_handle_impdef(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ u64 iss;
+
+ if (!cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+ return false;
+
+ /*
+ * Compute a synthetic ESR for a sysreg trap. Conveniently, AFSR1_EL2
+ * is populated with a correct ISS for a sysreg trap. These fruity
+ * parts are 64bit only, so unconditionally set IL.
+ */
+ iss = ESR_ELx_ISS(read_sysreg_s(SYS_AFSR1_EL2));
+ vcpu->arch.fault.esr_el2 = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SYS64) |
+ FIELD_PREP(ESR_ELx_ISS_MASK, iss) |
+ ESR_ELx_IL;
+ return false;
+}
+
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -538,6 +557,9 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_ERET] = kvm_hyp_handle_eret,
[ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
+
+ /* Apple shenanigans */
+ [0x3F] = kvm_hyp_handle_impdef,
};
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
index 90b018e06f2c..3814b0b2c937 100644
--- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c
@@ -87,11 +87,12 @@ static void __sysreg_restore_vel2_state(struct kvm_vcpu *vcpu)
write_sysreg(__vcpu_sys_reg(vcpu, PAR_EL1), par_el1);
write_sysreg(__vcpu_sys_reg(vcpu, TPIDR_EL1), tpidr_el1);
- write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
- write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
+ write_sysreg(ctxt_midr_el1(&vcpu->arch.ctxt), vpidr_el2);
+ write_sysreg(__vcpu_sys_reg(vcpu, MPIDR_EL1), vmpidr_el2);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, MAIR_EL2), SYS_MAIR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, VBAR_EL2), SYS_VBAR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, CONTEXTIDR_EL2), SYS_CONTEXTIDR);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, AMAIR_EL2), SYS_AMAIR);
if (vcpu_el2_e2h_is_set(vcpu)) {
/*
@@ -191,7 +192,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt;
- u64 mpidr;
+ u64 midr, mpidr;
host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_user_state(host_ctxt);
@@ -221,22 +222,17 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
} else {
if (vcpu_has_nv(vcpu)) {
/*
- * Use the guest hypervisor's VPIDR_EL2 when in a
- * nested state. The hardware value of MIDR_EL1 gets
- * restored on put.
- */
- write_sysreg(ctxt_sys_reg(guest_ctxt, VPIDR_EL2), vpidr_el2);
-
- /*
* As we're restoring a nested guest, set the value
* provided by the guest hypervisor.
*/
+ midr = ctxt_sys_reg(guest_ctxt, VPIDR_EL2);
mpidr = ctxt_sys_reg(guest_ctxt, VMPIDR_EL2);
} else {
+ midr = ctxt_midr_el1(guest_ctxt);
mpidr = ctxt_sys_reg(guest_ctxt, MPIDR_EL1);
}
- __sysreg_restore_el1_state(guest_ctxt, mpidr);
+ __sysreg_restore_el1_state(guest_ctxt, midr, mpidr);
}
vcpu_set_flag(vcpu, SYSREGS_ON_CPU);
@@ -271,9 +267,5 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
/* Restore host user state */
__sysreg_restore_user_state(host_ctxt);
- /* If leaving a nesting guest, restore MIDR_EL1 default view */
- if (vcpu_has_nv(vcpu))
- write_sysreg(read_cpuid_id(), vpidr_el2);
-
vcpu_clear_flag(vcpu, SYSREGS_ON_CPU);
}
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index 27ce4cb44904..569941eeb3fe 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -15,6 +15,8 @@
GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
+#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2 \
+ GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT - 1, 0)
static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
{
@@ -360,6 +362,8 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
break;
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
val[0] = smccc_feat->vendor_hyp_bmap;
+ /* Function numbers 2-63 are reserved for pKVM for now */
+ val[2] = smccc_feat->vendor_hyp_bmap_2;
break;
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
kvm_ptp_get_time(vcpu, val);
@@ -387,6 +391,7 @@ static const u64 kvm_arm_fw_reg_ids[] = {
KVM_REG_ARM_STD_BMAP,
KVM_REG_ARM_STD_HYP_BMAP,
KVM_REG_ARM_VENDOR_HYP_BMAP,
+ KVM_REG_ARM_VENDOR_HYP_BMAP_2,
};
void kvm_arm_init_hypercalls(struct kvm *kvm)
@@ -497,6 +502,9 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_VENDOR_HYP_BMAP:
val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
break;
+ case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
+ val = READ_ONCE(smccc_feat->vendor_hyp_bmap_2);
+ break;
default:
return -ENOENT;
}
@@ -527,6 +535,10 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
break;
+ case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
+ fw_reg_bmap = &smccc_feat->vendor_hyp_bmap_2;
+ fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2;
+ break;
default:
return -ENOENT;
}
@@ -633,6 +645,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case KVM_REG_ARM_STD_BMAP:
case KVM_REG_ARM_STD_HYP_BMAP:
case KVM_REG_ARM_VENDOR_HYP_BMAP:
+ case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
default:
return -ENOENT;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1f55b0c7b11d..2feb6c6b63af 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1086,14 +1086,26 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
}
}
-static void hyp_mc_free_fn(void *addr, void *unused)
+static void hyp_mc_free_fn(void *addr, void *mc)
{
+ struct kvm_hyp_memcache *memcache = mc;
+
+ if (memcache->flags & HYP_MEMCACHE_ACCOUNT_STAGE2)
+ kvm_account_pgtable_pages(addr, -1);
+
free_page((unsigned long)addr);
}
-static void *hyp_mc_alloc_fn(void *unused)
+static void *hyp_mc_alloc_fn(void *mc)
{
- return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ struct kvm_hyp_memcache *memcache = mc;
+ void *addr;
+
+ addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ if (addr && memcache->flags & HYP_MEMCACHE_ACCOUNT_STAGE2)
+ kvm_account_pgtable_pages(addr, 1);
+
+ return addr;
}
void free_hyp_memcache(struct kvm_hyp_memcache *mc)
@@ -1102,7 +1114,7 @@ void free_hyp_memcache(struct kvm_hyp_memcache *mc)
return;
kfree(mc->mapping);
- __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, NULL);
+ __free_hyp_memcache(mc, hyp_mc_free_fn, kvm_host_va, mc);
}
int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
@@ -1117,7 +1129,7 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
}
return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
- kvm_host_pa, NULL);
+ kvm_host_pa, mc);
}
/**
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 0c9387d2f507..4a3fc11f7ecf 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -16,9 +16,6 @@
#include "sys_regs.h"
-/* Protection against the sysreg repainting madness... */
-#define NV_FTR(r, f) ID_AA64##r##_EL1_##f
-
/*
* Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
* memory usage and potential number of different sets of S2 PTs in
@@ -54,6 +51,10 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
struct kvm_s2_mmu *tmp;
int num_mmus, ret = 0;
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
+ !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
+ return -EINVAL;
+
/*
* Let's treat memory allocation failures as benign: If we fail to
* allocate anything, return an error and keep the allocated array
@@ -807,134 +808,151 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
* This list should get updated as new features get added to the NV
* support, and new extension to the architecture.
*/
-static void limit_nv_id_regs(struct kvm *kvm)
+u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
{
- u64 val, tmp;
-
- /* Support everything but TME */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1);
- val &= ~NV_FTR(ISAR0, TME);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR0_EL1, val);
-
- /* Support everything but Spec Invalidation and LS64 */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1);
- val &= ~(NV_FTR(ISAR1, LS64) |
- NV_FTR(ISAR1, SPECRES));
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64ISAR1_EL1, val);
-
- /* No AMU, MPAM, S-EL2, or RAS */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1);
- val &= ~(GENMASK_ULL(55, 52) |
- NV_FTR(PFR0, AMU) |
- NV_FTR(PFR0, MPAM) |
- NV_FTR(PFR0, SEL2) |
- NV_FTR(PFR0, RAS) |
- NV_FTR(PFR0, EL3) |
- NV_FTR(PFR0, EL2) |
- NV_FTR(PFR0, EL1) |
- NV_FTR(PFR0, EL0));
- /* 64bit only at any EL */
- val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001);
- val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
- val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
- val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
-
- /* Only support BTI, SSBS, CSV2_frac */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1);
- val &= (NV_FTR(PFR1, BT) |
- NV_FTR(PFR1, SSBS) |
- NV_FTR(PFR1, CSV2_frac));
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR1_EL1, val);
-
- /* Hide ECV, ExS, Secure Memory */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1);
- val &= ~(NV_FTR(MMFR0, ECV) |
- NV_FTR(MMFR0, EXS) |
- NV_FTR(MMFR0, TGRAN4_2) |
- NV_FTR(MMFR0, TGRAN16_2) |
- NV_FTR(MMFR0, TGRAN64_2) |
- NV_FTR(MMFR0, SNSMEM));
-
- /* Disallow unsupported S2 page sizes */
- switch (PAGE_SIZE) {
- case SZ_64K:
- val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
- fallthrough;
- case SZ_16K:
- val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
- fallthrough;
- case SZ_4K:
- /* Support everything */
+ switch (reg) {
+ case SYS_ID_AA64ISAR0_EL1:
+ /* Support everything but TME */
+ val &= ~ID_AA64ISAR0_EL1_TME;
break;
- }
- /*
- * Since we can't support a guest S2 page size smaller than
- * the host's own page size (due to KVM only populating its
- * own S2 using the kernel's page size), advertise the
- * limitation using FEAT_GTG.
- */
- switch (PAGE_SIZE) {
- case SZ_4K:
- val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
- fallthrough;
- case SZ_16K:
- val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
- fallthrough;
- case SZ_64K:
- val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
+
+ case SYS_ID_AA64ISAR1_EL1:
+ /* Support everything but LS64 and Spec Invalidation */
+ val &= ~(ID_AA64ISAR1_EL1_LS64 |
+ ID_AA64ISAR1_EL1_SPECRES);
+ break;
+
+ case SYS_ID_AA64PFR0_EL1:
+ /* No RME, AMU, MPAM, S-EL2, or RAS */
+ val &= ~(ID_AA64PFR0_EL1_RME |
+ ID_AA64PFR0_EL1_AMU |
+ ID_AA64PFR0_EL1_MPAM |
+ ID_AA64PFR0_EL1_SEL2 |
+ ID_AA64PFR0_EL1_RAS |
+ ID_AA64PFR0_EL1_EL3 |
+ ID_AA64PFR0_EL1_EL2 |
+ ID_AA64PFR0_EL1_EL1 |
+ ID_AA64PFR0_EL1_EL0);
+ /* 64bit only at any EL */
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
+ break;
+
+ case SYS_ID_AA64PFR1_EL1:
+ /* Only support BTI, SSBS, CSV2_frac */
+ val &= (ID_AA64PFR1_EL1_BT |
+ ID_AA64PFR1_EL1_SSBS |
+ ID_AA64PFR1_EL1_CSV2_frac);
+ break;
+
+ case SYS_ID_AA64MMFR0_EL1:
+ /* Hide ExS, Secure Memory */
+ val &= ~(ID_AA64MMFR0_EL1_EXS |
+ ID_AA64MMFR0_EL1_TGRAN4_2 |
+ ID_AA64MMFR0_EL1_TGRAN16_2 |
+ ID_AA64MMFR0_EL1_TGRAN64_2 |
+ ID_AA64MMFR0_EL1_SNSMEM);
+
+ /* Hide CNTPOFF if present */
+ val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
+
+ /* Disallow unsupported S2 page sizes */
+ switch (PAGE_SIZE) {
+ case SZ_64K:
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
+ fallthrough;
+ case SZ_16K:
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
+ fallthrough;
+ case SZ_4K:
+ /* Support everything */
+ break;
+ }
+
+ /*
+ * Since we can't support a guest S2 page size smaller
+ * than the host's own page size (due to KVM only
+ * populating its own S2 using the kernel's page
+ * size), advertise the limitation using FEAT_GTG.
+ */
+ switch (PAGE_SIZE) {
+ case SZ_4K:
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
+ fallthrough;
+ case SZ_16K:
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
+ fallthrough;
+ case SZ_64K:
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
+ break;
+ }
+
+ /* Cap PARange to 48bits */
+ val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
+ break;
+
+ case SYS_ID_AA64MMFR1_EL1:
+ val &= (ID_AA64MMFR1_EL1_HCX |
+ ID_AA64MMFR1_EL1_PAN |
+ ID_AA64MMFR1_EL1_LO |
+ ID_AA64MMFR1_EL1_HPDS |
+ ID_AA64MMFR1_EL1_VH |
+ ID_AA64MMFR1_EL1_VMIDBits);
+ /* FEAT_E2H0 implies no VHE */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
+ val &= ~ID_AA64MMFR1_EL1_VH;
+ break;
+
+ case SYS_ID_AA64MMFR2_EL1:
+ val &= ~(ID_AA64MMFR2_EL1_BBM |
+ ID_AA64MMFR2_EL1_TTL |
+ GENMASK_ULL(47, 44) |
+ ID_AA64MMFR2_EL1_ST |
+ ID_AA64MMFR2_EL1_CCIDX |
+ ID_AA64MMFR2_EL1_VARange);
+
+ /* Force TTL support */
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
+ break;
+
+ case SYS_ID_AA64MMFR4_EL1:
+ /*
+ * You get EITHER
+ *
+ * - FEAT_VHE without FEAT_E2H0
+ * - FEAT_NV limited to FEAT_NV2
+ * - HCR_EL2.NV1 being RES0
+ *
+ * OR
+ *
+ * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
+ *
+ * Life is too short for anything else.
+ */
+ if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
+ val = 0;
+ } else {
+ val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
+ val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
+ }
+ break;
+
+ case SYS_ID_AA64DFR0_EL1:
+ /* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
+ val &= (ID_AA64DFR0_EL1_PMUVer |
+ ID_AA64DFR0_EL1_WRPs |
+ ID_AA64DFR0_EL1_BRPs |
+ ID_AA64DFR0_EL1_DebugVer|
+ ID_AA64DFR0_EL1_HPMN0);
+
+ /* Cap Debug to ARMv8.1 */
+ val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, VHE);
break;
}
- /* Cap PARange to 48bits */
- tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
- if (tmp > 0b0101) {
- val &= ~NV_FTR(MMFR0, PARANGE);
- val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
- }
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR0_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1);
- val &= (NV_FTR(MMFR1, HCX) |
- NV_FTR(MMFR1, PAN) |
- NV_FTR(MMFR1, LO) |
- NV_FTR(MMFR1, HPDS) |
- NV_FTR(MMFR1, VH) |
- NV_FTR(MMFR1, VMIDBits));
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR1_EL1, val);
-
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1);
- val &= ~(NV_FTR(MMFR2, BBM) |
- NV_FTR(MMFR2, TTL) |
- GENMASK_ULL(47, 44) |
- NV_FTR(MMFR2, ST) |
- NV_FTR(MMFR2, CCIDX) |
- NV_FTR(MMFR2, VARange));
-
- /* Force TTL support */
- val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR2_EL1, val);
-
- val = 0;
- if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
- val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
- ID_AA64MMFR4_EL1_E2H0_NI_NV1);
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64MMFR4_EL1, val);
-
- /* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
- val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1);
- val &= (NV_FTR(DFR0, PMUVer) |
- NV_FTR(DFR0, WRPs) |
- NV_FTR(DFR0, BRPs) |
- NV_FTR(DFR0, DebugVer) |
- NV_FTR(DFR0, HPMN0));
-
- /* Cap Debug to ARMv8.1 */
- tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
- if (tmp > 0b0111) {
- val &= ~NV_FTR(DFR0, DebugVer);
- val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
- }
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64DFR0_EL1, val);
+
+ return val;
}
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
@@ -981,8 +999,6 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
if (!kvm->arch.sysreg_masks)
return -ENOMEM;
- limit_nv_id_regs(kvm);
-
/* VTTBR_EL2 */
res0 = res1 = 0;
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
@@ -1021,10 +1037,11 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
res0 |= HCR_FIEN;
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
res0 |= HCR_FWB;
- if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
- res0 |= HCR_NV2;
- if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
- res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
+ /* Implementation choice: NV2 is the only supported config */
+ if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
+ res0 |= (HCR_NV2 | HCR_NV | HCR_AT);
+ if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, NI))
+ res0 |= HCR_NV1;
if (!(kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
kvm_vcpu_has_feature(kvm, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
res0 |= (HCR_API | HCR_APK);
@@ -1034,6 +1051,8 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
res0 |= (HCR_TEA | HCR_TERR);
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
res0 |= HCR_TLOR;
+ if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
+ res0 |= HCR_E2H;
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
res1 |= HCR_E2H;
set_sysreg_masks(kvm, HCR_EL2, res0, res1);
@@ -1290,6 +1309,15 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
res0 |= GENMASK(11, 8);
set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1);
+ /* ICH_HCR_EL2 */
+ res0 = ICH_HCR_EL2_RES0;
+ res1 = ICH_HCR_EL2_RES1;
+ if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
+ res0 |= ICH_HCR_EL2_TDIR;
+ /* No GICv4 is presented to the guest */
+ res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
+ set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1);
+
out:
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
(void)__vcpu_sys_reg(vcpu, sr);
@@ -1309,4 +1337,8 @@ void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
}
write_unlock(&vcpu->kvm->mmu_lock);
}
+
+ /* Must be last, as may switch context! */
+ if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
+ kvm_inject_nested_irq(vcpu);
}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 930b677eb9b0..0f89157d31fd 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -111,6 +111,29 @@ static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
host_kvm->arch.pkvm.handle = 0;
free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+ free_hyp_memcache(&host_kvm->arch.pkvm.stage2_teardown_mc);
+}
+
+static int __pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
+{
+ size_t hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
+ pkvm_handle_t handle = vcpu->kvm->arch.pkvm.handle;
+ void *hyp_vcpu;
+ int ret;
+
+ vcpu->arch.pkvm_memcache.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
+
+ hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
+ if (!hyp_vcpu)
+ return -ENOMEM;
+
+ ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, vcpu, hyp_vcpu);
+ if (!ret)
+ vcpu_set_flag(vcpu, VCPU_PKVM_FINALIZED);
+ else
+ free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
+
+ return ret;
}
/*
@@ -125,11 +148,8 @@ static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
*/
static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
{
- size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
- struct kvm_vcpu *host_vcpu;
- pkvm_handle_t handle;
+ size_t pgd_sz, hyp_vm_sz;
void *pgd, *hyp_vm;
- unsigned long idx;
int ret;
if (host_kvm->created_vcpus < 1)
@@ -161,40 +181,11 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
if (ret < 0)
goto free_vm;
- handle = ret;
-
- host_kvm->arch.pkvm.handle = handle;
-
- /* Donate memory for the vcpus at hyp and initialize it. */
- hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
- kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
- void *hyp_vcpu;
-
- /* Indexing of the vcpus to be sequential starting at 0. */
- if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
- ret = -EINVAL;
- goto destroy_vm;
- }
-
- hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
- if (!hyp_vcpu) {
- ret = -ENOMEM;
- goto destroy_vm;
- }
-
- ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
- hyp_vcpu);
- if (ret) {
- free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
- goto destroy_vm;
- }
- }
+ host_kvm->arch.pkvm.handle = ret;
+ host_kvm->arch.pkvm.stage2_teardown_mc.flags |= HYP_MEMCACHE_ACCOUNT_STAGE2;
+ kvm_account_pgtable_pages(pgd, pgd_sz / PAGE_SIZE);
return 0;
-
-destroy_vm:
- __pkvm_destroy_hyp_vm(host_kvm);
- return ret;
free_vm:
free_pages_exact(hyp_vm, hyp_vm_sz);
free_pgd:
@@ -214,6 +205,18 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
return ret;
}
+int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ mutex_lock(&vcpu->kvm->arch.config_lock);
+ if (!vcpu_get_flag(vcpu, VCPU_PKVM_FINALIZED))
+ ret = __pkvm_create_hyp_vcpu(vcpu);
+ mutex_unlock(&vcpu->kvm->arch.config_lock);
+
+ return ret;
+}
+
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
{
mutex_lock(&host_kvm->arch.config_lock);
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 6c5950b9ceac..a1bc10d7116a 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -17,8 +17,6 @@
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
-DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
static LIST_HEAD(arm_pmus);
static DEFINE_MUTEX(arm_pmus_lock);
@@ -26,6 +24,12 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
+bool kvm_supports_guest_pmuv3(void)
+{
+ guard(mutex)(&arm_pmus_lock);
+ return !list_empty(&arm_pmus);
+}
+
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
{
return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
@@ -150,9 +154,6 @@ static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
*/
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
{
- if (!kvm_vcpu_has_pmu(vcpu))
- return 0;
-
return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
}
@@ -191,13 +192,23 @@ static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
*/
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
}
/**
+ * kvm_pmu_set_counter_value_user - set PMU counter value from user
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+ kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
+ __vcpu_sys_reg(vcpu, counter_index_to_reg(select_idx)) = val;
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+}
+
+/**
* kvm_pmu_release_perf_event - remove the perf event
* @pmc: The PMU counter pointer
*/
@@ -248,20 +259,6 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
}
/**
- * kvm_pmu_vcpu_reset - reset pmu state for cpu
- * @vcpu: The vcpu pointer
- *
- */
-void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
-{
- unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu);
- int i;
-
- for_each_set_bit(i, &mask, 32)
- kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
-}
-
-/**
* kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
* @vcpu: The vcpu pointer
*
@@ -350,7 +347,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
- if (!kvm_vcpu_has_pmu(vcpu) || !val)
+ if (!val)
return;
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -401,9 +398,6 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
bool overflow;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
overflow = kvm_pmu_overflow_status(vcpu);
if (pmu->irq_level == overflow)
return;
@@ -599,9 +593,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
int i;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
@@ -673,6 +664,20 @@ static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc)
return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2;
}
+static int kvm_map_pmu_event(struct kvm *kvm, unsigned int eventsel)
+{
+ struct arm_pmu *pmu = kvm->arch.arm_pmu;
+
+ /*
+ * The CPU PMU likely isn't PMUv3; let the driver provide a mapping
+ * for the guest's PMUv3 event ID.
+ */
+ if (unlikely(pmu->map_pmuv3_event))
+ return pmu->map_pmuv3_event(eventsel);
+
+ return eventsel;
+}
+
/**
* kvm_pmu_create_perf_event - create a perf event for a counter
* @pmc: Counter context
@@ -683,7 +688,8 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
struct perf_event *event;
struct perf_event_attr attr;
- u64 eventsel, evtreg;
+ int eventsel;
+ u64 evtreg;
evtreg = kvm_pmc_read_evtreg(pmc);
@@ -709,6 +715,14 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
!test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
return;
+ /*
+ * Don't create an event if we're running on hardware that requires
+ * PMUv3 event translation and we couldn't find a valid mapping.
+ */
+ eventsel = kvm_map_pmu_event(vcpu->kvm, eventsel);
+ if (eventsel < 0)
+ return;
+
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.type = arm_pmu->pmu.type;
attr.size = sizeof(attr);
@@ -766,9 +780,6 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
u64 reg;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
reg = counter_index_to_evtreg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
@@ -786,29 +797,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
return;
- mutex_lock(&arm_pmus_lock);
+ guard(mutex)(&arm_pmus_lock);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
- goto out_unlock;
+ return;
entry->arm_pmu = pmu;
list_add_tail(&entry->entry, &arm_pmus);
-
- if (list_is_singular(&arm_pmus))
- static_branch_enable(&kvm_arm_pmu_available);
-
-out_unlock:
- mutex_unlock(&arm_pmus_lock);
}
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
- struct arm_pmu *tmp, *pmu = NULL;
struct arm_pmu_entry *entry;
+ struct arm_pmu *pmu;
int cpu;
- mutex_lock(&arm_pmus_lock);
+ guard(mutex)(&arm_pmus_lock);
/*
* It is safe to use a stale cpu to iterate the list of PMUs so long as
@@ -829,42 +834,62 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
*/
cpu = raw_smp_processor_id();
list_for_each_entry(entry, &arm_pmus, entry) {
- tmp = entry->arm_pmu;
+ pmu = entry->arm_pmu;
- if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
- pmu = tmp;
- break;
- }
+ if (cpumask_test_cpu(cpu, &pmu->supported_cpus))
+ return pmu;
}
- mutex_unlock(&arm_pmus_lock);
+ return NULL;
+}
+
+static u64 __compute_pmceid(struct arm_pmu *pmu, bool pmceid1)
+{
+ u32 hi[2], lo[2];
+
+ bitmap_to_arr32(lo, pmu->pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+ bitmap_to_arr32(hi, pmu->pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
- return pmu;
+ return ((u64)hi[pmceid1] << 32) | lo[pmceid1];
+}
+
+static u64 compute_pmceid0(struct arm_pmu *pmu)
+{
+ u64 val = __compute_pmceid(pmu, 0);
+
+ /* always support SW_INCR */
+ val |= BIT(ARMV8_PMUV3_PERFCTR_SW_INCR);
+ /* always support CHAIN */
+ val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
+ return val;
+}
+
+static u64 compute_pmceid1(struct arm_pmu *pmu)
+{
+ u64 val = __compute_pmceid(pmu, 1);
+
+ /*
+ * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
+ * as RAZ
+ */
+ val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
+ return val;
}
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
{
+ struct arm_pmu *cpu_pmu = vcpu->kvm->arch.arm_pmu;
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0;
int base, i, nr_events;
- if (!kvm_vcpu_has_pmu(vcpu))
- return 0;
-
if (!pmceid1) {
- val = read_sysreg(pmceid0_el0);
- /* always support CHAIN */
- val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
+ val = compute_pmceid0(cpu_pmu);
base = 0;
} else {
- val = read_sysreg(pmceid1_el0);
- /*
- * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
- * as RAZ
- */
- val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
- BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
- BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
+ val = compute_pmceid1(cpu_pmu);
base = 32;
}
@@ -900,9 +925,6 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
- if (!kvm_vcpu_has_pmu(vcpu))
- return 0;
-
if (!vcpu->arch.pmu.created)
return -EINVAL;
@@ -925,9 +947,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- /* One-off reload of the PMU on first run */
- kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
-
return 0;
}
@@ -995,6 +1014,13 @@ u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
/*
+ * PMUv3 requires that all event counters are capable of counting any
+ * event, though the same may not be true of non-PMUv3 hardware.
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+ return 1;
+
+ /*
* The arm_pmu->cntr_mask considers the fixed counter(s) as well.
* Ignore those and return only the general-purpose counters.
*/
@@ -1205,13 +1231,26 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
u8 kvm_arm_pmu_get_pmuver_limit(void)
{
- u64 tmp;
+ unsigned int pmuver;
+
+ pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer,
+ read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
+
+ /*
+ * Spoof a barebones PMUv3 implementation if the system supports IMPDEF
+ * traps of the PMUv3 sysregs
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_PMUV3_IMPDEF_TRAPS))
+ return ID_AA64DFR0_EL1_PMUVer_IMP;
+
+ /*
+ * Otherwise, treat IMPLEMENTATION DEFINED functionality as
+ * unimplemented
+ */
+ if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ return 0;
- tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
- tmp = cpuid_feature_cap_perfmon_field(tmp,
- ID_AA64DFR0_EL1_PMUVer_SHIFT,
- ID_AA64DFR0_EL1_PMUVer_V3P5);
- return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
+ return min(pmuver, ID_AA64DFR0_EL1_PMUVer_V3P5);
}
/**
@@ -1231,9 +1270,6 @@ void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
unsigned long mask;
int i;
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
for_each_set_bit(i, &mask, 32) {
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 0b3adf3e17b4..6b48a3d16d0d 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -41,7 +41,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
+ if (!system_supports_pmuv3() || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -57,7 +57,7 @@ void kvm_clr_pmu_events(u64 clr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3())
+ if (!system_supports_pmuv3())
return;
pmu->events_host &= ~clr;
@@ -133,7 +133,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
struct kvm_pmu_events *pmu;
u64 events_guest, events_host;
- if (!kvm_arm_support_pmu_v3() || !has_vhe())
+ if (!system_supports_pmuv3() || !has_vhe())
return;
preempt_disable();
@@ -154,7 +154,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
struct kvm_pmu_events *pmu;
u64 events_guest, events_host;
- if (!kvm_arm_support_pmu_v3() || !has_vhe())
+ if (!system_supports_pmuv3() || !has_vhe())
return;
pmu = kvm_get_pmu_events();
@@ -180,7 +180,7 @@ bool kvm_set_pmuserenr(u64 val)
struct kvm_cpu_context *hctxt;
struct kvm_vcpu *vcpu;
- if (!kvm_arm_support_pmu_v3() || !has_vhe())
+ if (!system_supports_pmuv3() || !has_vhe())
return false;
vcpu = kvm_get_running_vcpu();
diff --git a/arch/arm64/kvm/ptdump.c b/arch/arm64/kvm/ptdump.c
index e4a342e903e2..098416d7e5c2 100644
--- a/arch/arm64/kvm/ptdump.c
+++ b/arch/arm64/kvm/ptdump.c
@@ -52,8 +52,8 @@ static const struct ptdump_prot_bits stage2_pte_bits[] = {
.set = "AF",
.clear = " ",
}, {
- .mask = PTE_TABLE_BIT | PTE_VALID,
- .val = PTE_VALID,
+ .mask = PMD_TYPE_MASK,
+ .val = PMD_TYPE_SECT,
.set = "BLK",
.clear = " ",
},
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 803e11b0dc8f..f82fcc614e13 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -196,9 +196,6 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false;
spin_unlock(&vcpu->arch.mp_state_lock);
- /* Reset PMU outside of the non-preemptible section */
- kvm_pmu_vcpu_reset(vcpu);
-
preempt_disable();
loaded = (vcpu->cpu != -1);
if (loaded)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 82430c1e1dd0..005ad28f7306 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/uaccess.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/arm_pmuv3.h>
#include <asm/cacheflush.h>
@@ -531,7 +532,13 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
if (p->is_write)
return ignore_write(vcpu, p);
- p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ if (p->Op1 == 4) { /* ICC_SRE_EL2 */
+ p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE |
+ ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB);
+ } else { /* ICC_SRE_EL1 */
+ p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+ }
+
return true;
}
@@ -960,6 +967,22 @@ static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
return 0;
}
+static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ u64 idx;
+
+ if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
+ /* PMCCNTR_EL0 */
+ idx = ARMV8_PMU_CYCLE_IDX;
+ else
+ /* PMEVCNTRn_EL0 */
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+
+ kvm_pmu_set_counter_value_user(vcpu, idx, val);
+ return 0;
+}
+
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -1051,25 +1074,10 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
{
- bool set;
-
- val &= kvm_pmu_accessible_counter_mask(vcpu);
-
- switch (r->reg) {
- case PMOVSSET_EL0:
- /* CRm[1] being set indicates a SET register, and CLR otherwise */
- set = r->CRm & 2;
- break;
- default:
- /* Op2[0] being set indicates a SET register, and CLR otherwise */
- set = r->Op2 & 1;
- break;
- }
+ u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
- if (set)
- __vcpu_sys_reg(vcpu, r->reg) |= val;
- else
- __vcpu_sys_reg(vcpu, r->reg) &= ~val;
+ __vcpu_sys_reg(vcpu, r->reg) = val & mask;
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
return 0;
}
@@ -1229,6 +1237,8 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
return 0;
}
@@ -1255,6 +1265,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
#define PMU_PMEVCNTR_EL0(n) \
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
+ .set_user = set_pmu_evcntr, \
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
@@ -1627,6 +1638,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
+ val &= ~ID_AA64MMFR2_EL1_NV;
break;
case SYS_ID_AA64MMFR3_EL1:
val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE |
@@ -1637,6 +1649,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
break;
}
+ if (vcpu_has_nv(vcpu))
+ val = limit_nv_id_reg(vcpu->kvm, id, val);
+
return val;
}
@@ -1663,15 +1678,24 @@ static bool is_feature_id_reg(u32 encoding)
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
* registers KVM maintains on a per-VM basis.
+ *
+ * Additionally, the implementation ID registers and CTR_EL0 are handled as
+ * per-VM registers.
*/
static inline bool is_vm_ftr_id_reg(u32 id)
{
- if (id == SYS_CTR_EL0)
+ switch (id) {
+ case SYS_CTR_EL0:
+ case SYS_MIDR_EL1:
+ case SYS_REVIDR_EL1:
+ case SYS_AIDR_EL1:
return true;
+ default:
+ return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
+ sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
+ sys_reg_CRm(id) < 8);
- return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
- sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
- sys_reg_CRm(id) < 8);
+ }
}
static inline bool is_vcpu_ftr_id_reg(u32 id)
@@ -1802,16 +1826,6 @@ static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
return val;
}
-#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
-({ \
- u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
- (val) &= ~reg##_##field##_MASK; \
- (val) |= FIELD_PREP(reg##_##field##_MASK, \
- min(__f_val, \
- (u64)SYS_FIELD_VALUE(reg, field, limit))); \
- (val); \
-})
-
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
{
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
@@ -1870,12 +1884,14 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+ u8 perfmon;
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
val &= ~ID_DFR0_EL1_PerfMon_MASK;
- if (kvm_vcpu_has_pmu(vcpu))
+ if (kvm_vcpu_has_pmu(vcpu)) {
+ perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
+ }
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
@@ -1945,6 +1961,37 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, user_val);
}
+static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
+ u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK |
+ ID_AA64MMFR0_EL1_TGRAN16_2_MASK |
+ ID_AA64MMFR0_EL1_TGRAN64_2_MASK;
+
+ if (vcpu_has_nv(vcpu) &&
+ ((sanitized_val & tgran2_mask) != (user_val & tgran2_mask)))
+ return -EINVAL;
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
+static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, u64 user_val)
+{
+ u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
+ u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
+
+ /*
+ * We made the mistake to expose the now deprecated NV field,
+ * so allow userspace to write it, but silently ignore it.
+ */
+ if ((hw_val & nv_mask) == (user_val & nv_mask))
+ user_val &= ~nv_mask;
+
+ return set_id_reg(vcpu, rd, user_val);
+}
+
static int set_ctr_el0(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
@@ -2266,35 +2313,33 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
* from userspace.
*/
+#define ID_DESC_DEFAULT_CALLBACKS \
+ .access = access_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_id_reg, \
+ .visibility = id_visibility, \
+ .reset = kvm_read_sanitised_id_reg
+
#define ID_DESC(name) \
SYS_DESC(SYS_##name), \
- .access = access_id_reg, \
- .get_user = get_id_reg \
+ ID_DESC_DEFAULT_CALLBACKS
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
ID_DESC(name), \
- .set_user = set_id_reg, \
- .visibility = id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define AA32_ID_SANITISED(name) { \
ID_DESC(name), \
- .set_user = set_id_reg, \
.visibility = aa32_id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
/* sys_reg_desc initialiser for writable ID registers */
#define ID_WRITABLE(name, mask) { \
ID_DESC(name), \
- .set_user = set_id_reg, \
- .visibility = id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = mask, \
}
@@ -2302,8 +2347,6 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
#define ID_FILTERED(sysreg, name, mask) { \
ID_DESC(sysreg), \
.set_user = set_##name, \
- .visibility = id_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = (mask), \
}
@@ -2313,12 +2356,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
* (1 <= crm < 8, 0 <= Op2 < 8).
*/
#define ID_UNALLOCATED(crm, op2) { \
+ .name = "S3_0_0_" #crm "_" #op2, \
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
- .access = access_id_reg, \
- .get_user = get_id_reg, \
- .set_user = set_id_reg, \
+ ID_DESC_DEFAULT_CALLBACKS, \
.visibility = raz_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
@@ -2329,9 +2370,7 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
*/
#define ID_HIDDEN(name) { \
ID_DESC(name), \
- .set_user = set_id_reg, \
.visibility = raz_visibility, \
- .reset = kvm_read_sanitised_id_reg, \
.val = 0, \
}
@@ -2426,6 +2465,59 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
vq = min(vq, vcpu_sve_max_vq(vcpu));
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
+
+ return true;
+}
+
+static bool access_gic_vtr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = kvm_vgic_global_state.ich_vtr_el2;
+ p->regval &= ~(ICH_VTR_EL2_DVIM |
+ ICH_VTR_EL2_A3V |
+ ICH_VTR_EL2_IDbits);
+ p->regval |= ICH_VTR_EL2_nV4;
+
+ return true;
+}
+
+static bool access_gic_misr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_misr(vcpu);
+
+ return true;
+}
+
+static bool access_gic_eisr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_eisr(vcpu);
+
+ return true;
+}
+
+static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = vgic_v3_get_elrsr(vcpu);
+
return true;
}
@@ -2493,6 +2585,120 @@ static bool access_mdcr(struct kvm_vcpu *vcpu,
return true;
}
+/*
+ * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
+ * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
+ * The values made visible to userspace were the register values of the boot
+ * CPU.
+ *
+ * At the same time, reads from these registers at EL1 previously were not
+ * trapped, allowing the guest to read the actual hardware value. On big-little
+ * machines, this means the VM can see different values depending on where a
+ * given vCPU got scheduled.
+ *
+ * These registers are now trapped as collateral damage from SME, and what
+ * follows attempts to give a user / guest view consistent with the existing
+ * ABI.
+ */
+static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ /*
+ * Return the VM-scoped implementation ID register values if userspace
+ * has made them writable.
+ */
+ if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
+ return access_id_reg(vcpu, p, r);
+
+ /*
+ * Otherwise, fall back to the old behavior of returning the value of
+ * the current CPU.
+ */
+ switch (reg_to_encoding(r)) {
+ case SYS_REVIDR_EL1:
+ p->regval = read_sysreg(revidr_el1);
+ break;
+ case SYS_AIDR_EL1:
+ p->regval = read_sysreg(aidr_el1);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return true;
+}
+
+static u64 __ro_after_init boot_cpu_midr_val;
+static u64 __ro_after_init boot_cpu_revidr_val;
+static u64 __ro_after_init boot_cpu_aidr_val;
+
+static void init_imp_id_regs(void)
+{
+ boot_cpu_midr_val = read_sysreg(midr_el1);
+ boot_cpu_revidr_val = read_sysreg(revidr_el1);
+ boot_cpu_aidr_val = read_sysreg(aidr_el1);
+}
+
+static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ switch (reg_to_encoding(r)) {
+ case SYS_MIDR_EL1:
+ return boot_cpu_midr_val;
+ case SYS_REVIDR_EL1:
+ return boot_cpu_revidr_val;
+ case SYS_AIDR_EL1:
+ return boot_cpu_aidr_val;
+ default:
+ KVM_BUG_ON(1, vcpu->kvm);
+ return 0;
+ }
+}
+
+static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
+ u64 val)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 expected;
+
+ guard(mutex)(&kvm->arch.config_lock);
+
+ expected = read_id_reg(vcpu, r);
+ if (expected == val)
+ return 0;
+
+ if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
+ return -EINVAL;
+
+ /*
+ * Once the VM has started the ID registers are immutable. Reject the
+ * write if userspace tries to change it.
+ */
+ if (kvm_vm_has_ran_once(kvm))
+ return -EBUSY;
+
+ /*
+ * Any value is allowed for the implementation ID registers so long as
+ * it is within the writable mask.
+ */
+ if ((val & r->val) != val)
+ return -EINVAL;
+
+ kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
+ return 0;
+}
+
+#define IMPLEMENTATION_ID(reg, mask) { \
+ SYS_DESC(SYS_##reg), \
+ .access = access_imp_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_imp_id_reg, \
+ .reset = reset_imp_id_reg, \
+ .val = mask, \
+}
/*
* Architected system registers.
@@ -2542,7 +2748,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
+ IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+ IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
/*
* ID regs: all ID_SANITISED() entries here must have corresponding
@@ -2660,10 +2868,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(6,7),
/* CRm=7 */
- ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
- ID_AA64MMFR0_EL1_TGRAN4_2 |
- ID_AA64MMFR0_EL1_TGRAN64_2 |
- ID_AA64MMFR0_EL1_TGRAN16_2 |
+ ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
+ ~(ID_AA64MMFR0_EL1_RES0 |
ID_AA64MMFR0_EL1_ASIDBITS)),
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
ID_AA64MMFR1_EL1_HCX |
@@ -2671,7 +2877,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64MMFR1_EL1_XNX |
ID_AA64MMFR1_EL1_VH |
ID_AA64MMFR1_EL1_VMIDBits)),
- ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
+ ID_FILTERED(ID_AA64MMFR2_EL1,
+ id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
ID_AA64MMFR2_EL1_EVT |
ID_AA64MMFR2_EL1_FWB |
ID_AA64MMFR2_EL1_IDS |
@@ -2680,7 +2887,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
ID_AA64MMFR3_EL1_S1PIE |
ID_AA64MMFR3_EL1_S1POE)),
- ID_SANITISED(ID_AA64MMFR4_EL1),
+ ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
ID_UNALLOCATED(7,5),
ID_UNALLOCATED(7,6),
ID_UNALLOCATED(7,7),
@@ -2814,6 +3021,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
+ IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
ID_FILTERED(CTR_EL0, ctr_el0,
CTR_EL0_DIC_MASK |
@@ -2850,7 +3058,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(PMCCNTR_EL0),
.access = access_pmu_evcntr, .reset = reset_unknown,
- .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+ .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
+ .set_user = set_pmu_evcntr },
{ PMU_SYS_REG(PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(PMXEVCNTR_EL0),
@@ -3102,7 +3311,40 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_RMR_EL2), undef_access },
+ EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0),
+
+ { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
+
EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0),
+ { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
+ { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
+ { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
+ { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
+ EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0),
+
+ EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0),
+ EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0),
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
@@ -4272,9 +4514,13 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
* Certain AArch32 ID registers are handled by rerouting to the AArch64
* system register table. Registers in the ID range where CRm=0 are
* excluded from this scheme as they do not trivially map into AArch64
- * system register encodings.
+ * system register encodings, except for AIDR/REVIDR.
*/
- if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
+ if (params.Op1 == 0 && params.CRn == 0 &&
+ (params.CRm || params.Op2 == 6 /* REVIDR */))
+ return kvm_emulate_cp15_id_reg(vcpu, &params);
+ if (params.Op1 == 1 && params.CRn == 0 &&
+ params.CRm == 0 && params.Op2 == 7 /* AIDR */)
return kvm_emulate_cp15_id_reg(vcpu, &params);
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
@@ -4473,6 +4719,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
}
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
+
+ if (kvm_vcpu_has_pmu(vcpu))
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
}
/**
@@ -4578,65 +4827,6 @@ id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
return r;
}
-/*
- * These are the invariant sys_reg registers: we let the guest see the
- * host versions of these, so they're part of the guest state.
- *
- * A future CPU may provide a mechanism to present different values to
- * the guest, or a future kvm may trap them.
- */
-
-#define FUNCTION_INVARIANT(reg) \
- static u64 reset_##reg(struct kvm_vcpu *v, \
- const struct sys_reg_desc *r) \
- { \
- ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
- return ((struct sys_reg_desc *)r)->val; \
- }
-
-FUNCTION_INVARIANT(midr_el1)
-FUNCTION_INVARIANT(revidr_el1)
-FUNCTION_INVARIANT(aidr_el1)
-
-/* ->val is filled in by kvm_sys_reg_table_init() */
-static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
- { SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
- { SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
- { SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
-};
-
-static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- return put_user(r->val, uaddr);
-}
-
-static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
-{
- const struct sys_reg_desc *r;
- u64 val;
-
- r = get_reg_by_id(id, invariant_sys_regs,
- ARRAY_SIZE(invariant_sys_regs));
- if (!r)
- return -ENOENT;
-
- if (get_user(val, uaddr))
- return -EFAULT;
-
- /* This is what we mean by invariant: you can't change it. */
- if (r->val != val)
- return -EINVAL;
-
- return 0;
-}
-
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
{
u32 val;
@@ -4718,15 +4908,10 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_get(vcpu, reg->id, uaddr);
- err = get_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_get_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -4762,15 +4947,10 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
- int err;
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
return demux_c15_set(vcpu, reg->id, uaddr);
- err = set_invariant_sys_reg(reg->id, uaddr);
- if (err != -ENOENT)
- return err;
-
return kvm_sys_reg_set_user(vcpu, reg,
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
}
@@ -4859,23 +5039,14 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
{
- return ARRAY_SIZE(invariant_sys_regs)
- + num_demux_regs()
+ return num_demux_regs()
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
}
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
int err;
- /* Then give them all the invariant registers' indices. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
- if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
- return -EFAULT;
- uindices++;
- }
-
err = walk_sys_regs(vcpu, uindices);
if (err < 0)
return err;
@@ -4971,25 +5142,7 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
mutex_lock(&kvm->arch.config_lock);
vcpu_set_hcr(vcpu);
vcpu_set_ich_hcr(vcpu);
-
- if (cpus_have_final_cap(ARM64_HAS_HCX)) {
- /*
- * In general, all HCRX_EL2 bits are gated by a feature.
- * The only reason we can set SMPME without checking any
- * feature is that its effects are not directly observable
- * from the guest.
- */
- vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
-
- if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
- vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
-
- if (kvm_has_tcr2(kvm))
- vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
-
- if (kvm_has_fpmr(kvm))
- vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
- }
+ vcpu_set_hcrx(vcpu);
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
goto out;
@@ -5101,15 +5254,12 @@ int __init kvm_sys_reg_table_init(void)
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
- valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
if (!valid)
return -EINVAL;
- /* We abuse the reset function to overwrite the table itself. */
- for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
- invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
+ init_imp_id_regs();
ret = populate_nv_trap_config();
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 1d94ed6efad2..cc6338d38766 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -247,4 +247,14 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
Op2(sys_reg_Op2(reg))
+#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
+({ \
+ u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
+ (val) &= ~reg##_##field##_MASK; \
+ (val) |= FIELD_PREP(reg##_##field##_MASK, \
+ min(__f_val, \
+ (u64)SYS_FIELD_VALUE(reg, field, limit))); \
+ (val); \
+})
+
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 9e7c486b48c2..5eacb4b3250a 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -35,12 +35,12 @@ static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
vgic_v3_cpu->num_id_bits = host_id_bits;
- host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
+ host_seis = FIELD_GET(ICH_VTR_EL2_SEIS, kvm_vgic_global_state.ich_vtr_el2);
seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
if (host_seis != seis)
return -EINVAL;
- host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
+ host_a3v = FIELD_GET(ICH_VTR_EL2_A3V, kvm_vgic_global_state.ich_vtr_el2);
a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
if (host_a3v != a3v)
return -EINVAL;
@@ -68,10 +68,10 @@ static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
- FIELD_GET(ICH_VTR_SEIS_MASK,
+ FIELD_GET(ICH_VTR_EL2_SEIS,
kvm_vgic_global_state.ich_vtr_el2));
val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
- FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
+ FIELD_GET(ICH_VTR_EL2_A3V, kvm_vgic_global_state.ich_vtr_el2));
/*
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 775461cf2d2d..1f33e71c2a73 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -198,6 +198,27 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
return 0;
}
+/* Default GICv3 Maintenance Interrupt INTID, as per SBSA */
+#define DEFAULT_MI_INTID 25
+
+int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guard(mutex)(&vcpu->kvm->arch.config_lock);
+
+ /*
+ * Matching the tradition established with the timers, provide
+ * a default PPI for the maintenance interrupt. It makes
+ * things easier to reason about.
+ */
+ if (vcpu->kvm->arch.vgic.mi_intid == 0)
+ vcpu->kvm->arch.vgic.mi_intid = DEFAULT_MI_INTID;
+ ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.mi_intid, vcpu);
+
+ return ret;
+}
+
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -588,12 +609,20 @@ void kvm_vgic_cpu_down(void)
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
{
+ struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
+
/*
* We cannot rely on the vgic maintenance interrupt to be
* delivered synchronously. This means we can only use it to
* exit the VM, and we perform the handling of EOIed
* interrupts on the exit path (see vgic_fold_lr_state).
+ *
+ * Of course, NV throws a wrench in this plan, and needs
+ * something special.
*/
+ if (vcpu && vgic_state_is_nested(vcpu))
+ vgic_v3_handle_nested_maint_irq(vcpu);
+
return IRQ_HANDLED;
}
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 5f4f57aaa23e..359094f68c23 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -303,6 +303,12 @@ static int vgic_get_common_attr(struct kvm_device *dev,
VGIC_NR_PRIVATE_IRQS, uaddr);
break;
}
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+ r = put_user(dev->kvm->arch.vgic.mi_intid, uaddr);
+ break;
+ }
}
return r;
@@ -517,7 +523,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
struct vgic_reg_attr reg_attr;
gpa_t addr;
struct kvm_vcpu *vcpu;
- bool uaccess;
+ bool uaccess, post_init = true;
u32 val;
int ret;
@@ -533,6 +539,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
/* Sysregs uaccess is performed by the sysreg handling code */
uaccess = false;
break;
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
+ post_init = false;
+ fallthrough;
default:
uaccess = true;
}
@@ -552,7 +561,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->arch.config_lock);
- if (unlikely(!vgic_initialized(dev->kvm))) {
+ if (post_init != vgic_initialized(dev->kvm)) {
ret = -EBUSY;
goto out;
}
@@ -582,6 +591,19 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
}
break;
}
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
+ if (!is_write) {
+ val = dev->kvm->arch.vgic.mi_intid;
+ ret = 0;
+ break;
+ }
+
+ ret = -EINVAL;
+ if ((val < VGIC_NR_PRIVATE_IRQS) && (val >= VGIC_NR_SGIS)) {
+ dev->kvm->arch.vgic.mi_intid = val;
+ ret = 0;
+ }
+ break;
default:
ret = -EINVAL;
break;
@@ -608,6 +630,7 @@ static int vgic_v3_set_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
return vgic_v3_attr_regs_access(dev, attr, true);
default:
return vgic_set_common_attr(dev, attr);
@@ -622,6 +645,7 @@ static int vgic_v3_get_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
return vgic_v3_attr_regs_access(dev, attr, false);
default:
return vgic_get_common_attr(dev, attr);
@@ -645,6 +669,7 @@ static int vgic_v3_has_attr(struct kvm_device *dev,
case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
return vgic_v3_has_attr_regs(dev, attr);
case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
return 0;
case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c
new file mode 100644
index 000000000000..bfa5bde1f106
--- /dev/null
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_nested.h>
+
+#include "vgic.h"
+
+#define ICH_LRN(n) (ICH_LR0_EL2 + (n))
+#define ICH_AP0RN(n) (ICH_AP0R0_EL2 + (n))
+#define ICH_AP1RN(n) (ICH_AP1R0_EL2 + (n))
+
+struct mi_state {
+ u16 eisr;
+ u16 elrsr;
+ bool pend;
+};
+
+/*
+ * The shadow registers loaded to the hardware when running a L2 guest
+ * with the virtual IMO/FMO bits set.
+ */
+struct shadow_if {
+ struct vgic_v3_cpu_if cpuif;
+ unsigned long lr_map;
+};
+
+static DEFINE_PER_CPU(struct shadow_if, shadow_if);
+
+/*
+ * Nesting GICv3 support
+ *
+ * On a non-nesting VM (only running at EL0/EL1), the host hypervisor
+ * completely controls the interrupts injected via the list registers.
+ * Consequently, most of the state that is modified by the guest (by ACK-ing
+ * and EOI-ing interrupts) is synced by KVM on each entry/exit, so that we
+ * keep a semi-consistent view of the interrupts.
+ *
+ * This still applies for a NV guest, but only while "InHost" (either
+ * running at EL2, or at EL0 with HCR_EL2.{E2H.TGE}=={1,1}.
+ *
+ * When running a L2 guest ("not InHost"), things are radically different,
+ * as the L1 guest is in charge of provisioning the interrupts via its own
+ * view of the ICH_LR*_EL2 registers, which conveniently live in the VNCR
+ * page. This means that the flow described above does work (there is no
+ * state to rebuild in the L0 hypervisor), and that most things happed on L2
+ * load/put:
+ *
+ * - on L2 load: move the in-memory L1 vGIC configuration into a shadow,
+ * per-CPU data structure that is used to populate the actual LRs. This is
+ * an extra copy that we could avoid, but life is short. In the process,
+ * we remap any interrupt that has the HW bit set to the mapped interrupt
+ * on the host, should the host consider it a HW one. This allows the HW
+ * deactivation to take its course, such as for the timer.
+ *
+ * - on L2 put: perform the inverse transformation, so that the result of L2
+ * running becomes visible to L1 in the VNCR-accessible registers.
+ *
+ * - there is nothing to do on L2 entry, as everything will have happened
+ * on load. However, this is the point where we detect that an interrupt
+ * targeting L1 and prepare the grand switcheroo.
+ *
+ * - on L2 exit: emulate the HW bit, and deactivate corresponding the L1
+ * interrupt. The L0 active state will be cleared by the HW if the L1
+ * interrupt was itself backed by a HW interrupt.
+ *
+ * Maintenance Interrupt (MI) management:
+ *
+ * Since the L2 guest runs the vgic in its full glory, MIs get delivered and
+ * used as a handover point between L2 and L1.
+ *
+ * - on delivery of a MI to L0 while L2 is running: make the L1 MI pending,
+ * and let it rip. This will initiate a vcpu_put() on L2, and allow L1 to
+ * run and process the MI.
+ *
+ * - L1 MI is a fully virtual interrupt, not linked to the host's MI. Its
+ * state must be computed at each entry/exit of the guest, much like we do
+ * it for the PMU interrupt.
+ *
+ * - because most of the ICH_*_EL2 registers live in the VNCR page, the
+ * quality of emulation is poor: L1 can setup the vgic so that an MI would
+ * immediately fire, and not observe anything until the next exit. Trying
+ * to read ICH_MISR_EL2 would do the trick, for example.
+ *
+ * System register emulation:
+ *
+ * We get two classes of registers:
+ *
+ * - those backed by memory (LRs, APRs, HCR, VMCR): L1 can freely access
+ * them, and L0 doesn't see a thing.
+ *
+ * - those that always trap (ELRSR, EISR, MISR): these are status registers
+ * that are built on the fly based on the in-memory state.
+ *
+ * Only L1 can access the ICH_*_EL2 registers. A non-NV L2 obviously cannot,
+ * and a NV L2 would either access the VNCR page provided by L1 (memory
+ * based registers), or see the access redirected to L1 (registers that
+ * trap) thanks to NV being set by L1.
+ */
+
+bool vgic_state_is_nested(struct kvm_vcpu *vcpu)
+{
+ u64 xmo;
+
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO);
+ WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO),
+ "Separate virtual IRQ/FIQ settings not supported\n");
+
+ return !!xmo;
+ }
+
+ return false;
+}
+
+static struct shadow_if *get_shadow_if(void)
+{
+ return this_cpu_ptr(&shadow_if);
+}
+
+static bool lr_triggers_eoi(u64 lr)
+{
+ return !(lr & (ICH_LR_STATE | ICH_LR_HW)) && (lr & ICH_LR_EOI);
+}
+
+static void vgic_compute_mi_state(struct kvm_vcpu *vcpu, struct mi_state *mi_state)
+{
+ u16 eisr = 0, elrsr = 0;
+ bool pend = false;
+
+ for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
+
+ if (lr_triggers_eoi(lr))
+ eisr |= BIT(i);
+ if (!(lr & ICH_LR_STATE))
+ elrsr |= BIT(i);
+ pend |= (lr & ICH_LR_PENDING_BIT);
+ }
+
+ mi_state->eisr = eisr;
+ mi_state->elrsr = elrsr;
+ mi_state->pend = pend;
+}
+
+u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu)
+{
+ struct mi_state mi_state;
+
+ vgic_compute_mi_state(vcpu, &mi_state);
+ return mi_state.eisr;
+}
+
+u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu)
+{
+ struct mi_state mi_state;
+
+ vgic_compute_mi_state(vcpu, &mi_state);
+ return mi_state.elrsr;
+}
+
+u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
+{
+ struct mi_state mi_state;
+ u64 reg = 0, hcr, vmcr;
+
+ hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
+ vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
+
+ vgic_compute_mi_state(vcpu, &mi_state);
+
+ if (mi_state.eisr)
+ reg |= ICH_MISR_EL2_EOI;
+
+ if (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_UIE) {
+ int used_lrs = kvm_vgic_global_state.nr_lr;
+
+ used_lrs -= hweight16(mi_state.elrsr);
+ reg |= (used_lrs <= 1) ? ICH_MISR_EL2_U : 0;
+ }
+
+ if ((hcr & ICH_HCR_EL2_LRENPIE) && FIELD_GET(ICH_HCR_EL2_EOIcount_MASK, hcr))
+ reg |= ICH_MISR_EL2_LRENP;
+
+ if ((hcr & ICH_HCR_EL2_NPIE) && !mi_state.pend)
+ reg |= ICH_MISR_EL2_NP;
+
+ if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_ENG0_MASK))
+ reg |= ICH_MISR_EL2_VGrp0E;
+
+ if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_ENG0_MASK))
+ reg |= ICH_MISR_EL2_VGrp0D;
+
+ if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_ENG1_MASK))
+ reg |= ICH_MISR_EL2_VGrp1E;
+
+ if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_ENG1_MASK))
+ reg |= ICH_MISR_EL2_VGrp1D;
+
+ return reg;
+}
+
+/*
+ * For LRs which have HW bit set such as timer interrupts, we modify them to
+ * have the host hardware interrupt number instead of the virtual one programmed
+ * by the guest hypervisor.
+ */
+static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu,
+ struct vgic_v3_cpu_if *s_cpu_if)
+{
+ unsigned long lr_map = 0;
+ int index = 0;
+
+ for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
+ struct vgic_irq *irq;
+
+ if (!(lr & ICH_LR_STATE))
+ lr = 0;
+
+ if (!(lr & ICH_LR_HW))
+ goto next;
+
+ /* We have the HW bit set, check for validity of pINTID */
+ irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
+ if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI ) {
+ /* There was no real mapping, so nuke the HW bit */
+ lr &= ~ICH_LR_HW;
+ if (irq)
+ vgic_put_irq(vcpu->kvm, irq);
+ goto next;
+ }
+
+ /* It is illegal to have the EOI bit set with HW */
+ lr &= ~ICH_LR_EOI;
+
+ /* Translate the virtual mapping to the real one */
+ lr &= ~ICH_LR_PHYS_ID_MASK;
+ lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
+
+ vgic_put_irq(vcpu->kvm, irq);
+
+next:
+ s_cpu_if->vgic_lr[index] = lr;
+ if (lr) {
+ lr_map |= BIT(i);
+ index++;
+ }
+ }
+
+ container_of(s_cpu_if, struct shadow_if, cpuif)->lr_map = lr_map;
+ s_cpu_if->used_lrs = index;
+}
+
+void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
+{
+ struct shadow_if *shadow_if = get_shadow_if();
+ int i, index = 0;
+
+ for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
+ u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
+ struct vgic_irq *irq;
+
+ if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
+ goto next;
+
+ /*
+ * If we had a HW lr programmed by the guest hypervisor, we
+ * need to emulate the HW effect between the guest hypervisor
+ * and the nested guest.
+ */
+ irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
+ if (WARN_ON(!irq)) /* Shouldn't happen as we check on load */
+ goto next;
+
+ lr = __gic_v3_get_lr(index);
+ if (!(lr & ICH_LR_STATE))
+ irq->active = false;
+
+ vgic_put_irq(vcpu->kvm, irq);
+ next:
+ index++;
+ }
+}
+
+static void vgic_v3_create_shadow_state(struct kvm_vcpu *vcpu,
+ struct vgic_v3_cpu_if *s_cpu_if)
+{
+ struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ u64 val = 0;
+ int i;
+
+ /*
+ * If we're on a system with a broken vgic that requires
+ * trapping, propagate the trapping requirements.
+ *
+ * Ah, the smell of rotten fruits...
+ */
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
+ ICH_HCR_EL2_TC | ICH_HCR_EL2_TDIR);
+ s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val;
+ s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
+ s_cpu_if->vgic_sre = host_if->vgic_sre;
+
+ for (i = 0; i < 4; i++) {
+ s_cpu_if->vgic_ap0r[i] = __vcpu_sys_reg(vcpu, ICH_AP0RN(i));
+ s_cpu_if->vgic_ap1r[i] = __vcpu_sys_reg(vcpu, ICH_AP1RN(i));
+ }
+
+ vgic_v3_create_shadow_lr(vcpu, s_cpu_if);
+}
+
+void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
+{
+ struct shadow_if *shadow_if = get_shadow_if();
+ struct vgic_v3_cpu_if *cpu_if = &shadow_if->cpuif;
+
+ BUG_ON(!vgic_state_is_nested(vcpu));
+
+ vgic_v3_create_shadow_state(vcpu, cpu_if);
+
+ __vgic_v3_restore_vmcr_aprs(cpu_if);
+ __vgic_v3_activate_traps(cpu_if);
+
+ __vgic_v3_restore_state(cpu_if);
+
+ /*
+ * Propagate the number of used LRs for the benefit of the HYP
+ * GICv3 emulation code. Yes, this is a pretty sorry hack.
+ */
+ vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs;
+}
+
+void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
+{
+ struct shadow_if *shadow_if = get_shadow_if();
+ struct vgic_v3_cpu_if *s_cpu_if = &shadow_if->cpuif;
+ u64 val;
+ int i;
+
+ __vgic_v3_save_vmcr_aprs(s_cpu_if);
+ __vgic_v3_deactivate_traps(s_cpu_if);
+ __vgic_v3_save_state(s_cpu_if);
+
+ /*
+ * Translate the shadow state HW fields back to the virtual ones
+ * before copying the shadow struct back to the nested one.
+ */
+ val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
+ val &= ~ICH_HCR_EL2_EOIcount_MASK;
+ val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
+ __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val;
+ __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr;
+
+ for (i = 0; i < 4; i++) {
+ __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i];
+ __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i];
+ }
+
+ for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
+ val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
+
+ val &= ~ICH_LR_STATE;
+ val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
+
+ __vcpu_sys_reg(vcpu, ICH_LRN(i)) = val;
+ s_cpu_if->vgic_lr[i] = 0;
+ }
+
+ shadow_if->lr_map = 0;
+ vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0;
+}
+
+/*
+ * If we exit a L2 VM with a pending maintenance interrupt from the GIC,
+ * then we need to forward this to L1 so that it can re-sync the appropriate
+ * LRs and sample level triggered interrupts again.
+ */
+void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
+{
+ bool state = read_sysreg_s(SYS_ICH_MISR_EL2);
+
+ /* This will force a switch back to L1 if the level is high */
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu,
+ vcpu->kvm->arch.vgic.mi_intid, state, vcpu);
+
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
+}
+
+void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
+{
+ bool level;
+
+ level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En;
+ if (level)
+ level &= vgic_v3_get_misr(vcpu);
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu,
+ vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
+}
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index d7233ab982d0..b9ad7c42c5b0 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -24,7 +24,7 @@ void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
- cpuif->vgic_hcr |= ICH_HCR_UIE;
+ cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
}
static bool lr_signals_eoi_mi(u64 lr_val)
@@ -42,7 +42,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+ cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE;
for (lr = 0; lr < cpuif->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
@@ -284,15 +284,13 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
vgic_v3->vgic_sre = 0;
}
- vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
- ICH_VTR_ID_BITS_MASK) >>
- ICH_VTR_ID_BITS_SHIFT;
- vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
- ICH_VTR_PRI_BITS_MASK) >>
- ICH_VTR_PRI_BITS_SHIFT) + 1;
+ vcpu->arch.vgic_cpu.num_id_bits = FIELD_GET(ICH_VTR_EL2_IDbits,
+ kvm_vgic_global_state.ich_vtr_el2);
+ vcpu->arch.vgic_cpu.num_pri_bits = FIELD_GET(ICH_VTR_EL2_PRIbits,
+ kvm_vgic_global_state.ich_vtr_el2) + 1;
/* Get the show on the road... */
- vgic_v3->vgic_hcr = ICH_HCR_EN;
+ vgic_v3->vgic_hcr = ICH_HCR_EL2_En;
}
void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
@@ -301,18 +299,19 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu)
/* Hide GICv3 sysreg if necessary */
if (!kvm_has_gicv3(vcpu->kvm)) {
- vgic_v3->vgic_hcr |= ICH_HCR_TALL0 | ICH_HCR_TALL1 | ICH_HCR_TC;
+ vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
+ ICH_HCR_EL2_TC);
return;
}
if (group0_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
+ vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL0;
if (group1_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
+ vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL1;
if (common_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_TC;
+ vgic_v3->vgic_hcr |= ICH_HCR_EL2_TC;
if (dir_trap)
- vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
+ vgic_v3->vgic_hcr |= ICH_HCR_EL2_TDIR;
}
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
@@ -632,8 +631,8 @@ static const struct midr_range broken_seis[] = {
static bool vgic_v3_broken_seis(void)
{
- return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
- is_midr_in_range_list(read_cpuid_id(), broken_seis));
+ return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_SEIS) &&
+ is_midr_in_range_list(broken_seis));
}
/**
@@ -706,10 +705,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (vgic_v3_broken_seis()) {
kvm_info("GICv3 with broken locally generated SEI\n");
- kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
+ kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_EL2_SEIS;
group0_trap = true;
group1_trap = true;
- if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
+ if (ich_vtr_el2 & ICH_VTR_EL2_TDS)
dir_trap = true;
else
common_trap = true;
@@ -735,6 +734,12 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ /* If the vgic is nested, perform the full state loading */
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_load_nested(vcpu);
+ return;
+ }
+
if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
@@ -748,6 +753,11 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_put_nested(vcpu);
+ return;
+ }
+
if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index eedecbbbcf31..c7de6154627c 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -336,6 +336,22 @@ void vgic_v4_teardown(struct kvm *kvm)
its_vm->vpes = NULL;
}
+static inline bool vgic_v4_want_doorbell(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_get_flag(vcpu, IN_WFI))
+ return true;
+
+ if (likely(!vcpu_has_nv(vcpu)))
+ return false;
+
+ /*
+ * GICv4 hardware is only ever used for the L1. Mark the vPE (i.e. the
+ * L1 context) nonresident and request a doorbell to kick us out of the
+ * L2 when an IRQ becomes pending.
+ */
+ return vcpu_get_flag(vcpu, IN_NESTED_ERET);
+}
+
int vgic_v4_put(struct kvm_vcpu *vcpu)
{
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
@@ -343,7 +359,7 @@ int vgic_v4_put(struct kvm_vcpu *vcpu)
if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;
- return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
+ return its_make_vpe_non_resident(vpe, vgic_v4_want_doorbell(vcpu));
}
int vgic_v4_load(struct kvm_vcpu *vcpu)
@@ -415,7 +431,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
struct vgic_irq *irq;
struct its_vlpi_map map;
unsigned long flags;
- int ret;
+ int ret = 0;
if (!vgic_supports_direct_msis(kvm))
return 0;
@@ -430,10 +446,15 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
mutex_lock(&its->its_lock);
- /* Perform the actual DevID/EventID -> LPI translation. */
- ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
- irq_entry->msi.data, &irq);
- if (ret)
+ /*
+ * Perform the actual DevID/EventID -> LPI translation.
+ *
+ * Silently exit if translation fails as the guest (or userspace!) has
+ * managed to do something stupid. Emulated LPI injection will still
+ * work if the guest figures itself out at a later time.
+ */
+ if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+ irq_entry->msi.data, &irq))
goto out;
/* Silently exit if the vLPI is already mapped */
@@ -512,7 +533,7 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
if (ret)
goto out;
- WARN_ON(!(irq->hw && irq->host_irq == virq));
+ WARN_ON(irq->hw && irq->host_irq != virq);
if (irq->hw) {
atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
irq->hw = false;
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index cc8c6b9b5dd8..8f8096d48925 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -872,6 +872,15 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
int used_lrs;
+ /* If nesting, emulate the HW effect from L0 to L1 */
+ if (vgic_state_is_nested(vcpu)) {
+ vgic_v3_sync_nested(vcpu);
+ return;
+ }
+
+ if (vcpu_has_nv(vcpu))
+ vgic_v3_nested_update_mi(vcpu);
+
/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
@@ -901,6 +910,35 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
/*
+ * If in a nested state, we must return early. Two possibilities:
+ *
+ * - If we have any pending IRQ for the guest and the guest
+ * expects IRQs to be handled in its virtual EL2 mode (the
+ * virtual IMO bit is set) and it is not already running in
+ * virtual EL2 mode, then we have to emulate an IRQ
+ * exception to virtual EL2.
+ *
+ * We do that by placing a request to ourselves which will
+ * abort the entry procedure and inject the exception at the
+ * beginning of the run loop.
+ *
+ * - Otherwise, do exactly *NOTHING*. The guest state is
+ * already loaded, and we can carry on with running it.
+ *
+ * If we have NV, but are not in a nested state, compute the
+ * maintenance interrupt state, as it may fire.
+ */
+ if (vgic_state_is_nested(vcpu)) {
+ if (kvm_vgic_vcpu_pending_irq(vcpu))
+ kvm_make_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu);
+
+ return;
+ }
+
+ if (vcpu_has_nv(vcpu))
+ vgic_v3_nested_update_mi(vcpu);
+
+ /*
* If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without
* taking any lock. There is a potential race with someone injecting
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 122d95b4e284..0c5a63712702 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -353,4 +353,10 @@ static inline bool kvm_has_gicv3(struct kvm *kvm)
return kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP);
}
+void vgic_v3_sync_nested(struct kvm_vcpu *vcpu);
+void vgic_v3_load_nested(struct kvm_vcpu *vcpu);
+void vgic_v3_put_nested(struct kvm_vcpu *vcpu);
+void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu);
+void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index a5a5f5b97b17..de9a303b6ad0 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,14 +17,27 @@
* Alignment fixed up by hardware.
*/
- .p2align 4
- // Alignment is for the loop, but since the prologue (including BTI)
- // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
add x2, x0, x1
+
+#ifdef CONFIG_AS_HAS_MOPS
+ .arch_extension mops
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+
+USER(9f, setpt [x0]!, x1!, xzr)
+USER(6f, setmt [x0]!, x1!, xzr)
+USER(6f, setet [x0]!, x1!, xzr)
+ mov x0, #0
+ ret
+.Lno_mops:
+#endif
+
subs x1, x1, #8
b.mi 2f
-1:
+
+1: .p2align 4
USER(9f, sttr xzr, [x0])
add x0, x0, #8
subs x1, x1, #8
@@ -47,6 +60,10 @@ USER(7f, sttrb wzr, [x2, #-1])
ret
// Exception fixups
+6: b.cs 9f
+ // Registers are in Option A format
+ add x0, x0, x1
+ b 9f
7: sub x0, x2, #5 // Adjust for faulting on the final byte...
8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
9: sub x0, x2, x0
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 34e317907524..400057d607ec 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -52,6 +52,13 @@
stp \reg1, \reg2, [\ptr], \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 0, cpyfprt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfmrt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 0, cpyfert [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
@@ -62,6 +69,9 @@ SYM_FUNC_START(__arch_copy_from_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
index 488df234c49a..7f2f5a0e2fb9 100644
--- a/arch/arm64/lib/copy_template.S
+++ b/arch/arm64/lib/copy_template.S
@@ -40,6 +40,16 @@ D_l .req x13
D_h .req x14
mov dst, dstin
+
+#ifdef CONFIG_AS_HAS_MOPS
+alternative_if_not ARM64_HAS_MOPS
+ b .Lno_mops
+alternative_else_nop_endif
+ cpy1 dst, src, count
+ b .Lexitfunc
+.Lno_mops:
+#endif
+
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
b.lo .Ltiny15
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 802231772608..819f2e3fc7a9 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -51,6 +51,13 @@
user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm
+ .macro cpy1 dst, src, count
+ .arch_extension mops
+ USER_CPY(9997f, 1, cpyfpwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfmwt [\dst]!, [\src]!, \count!)
+ USER_CPY(9996f, 1, cpyfewt [\dst]!, [\src]!, \count!)
+ .endm
+
end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
@@ -61,6 +68,9 @@ SYM_FUNC_START(__arch_copy_to_user)
ret
// Exception fixups
+9996: b.cs 9997f
+ // Registers are in Option A format
+ add dst, dst, count
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
diff --git a/arch/arm64/lib/crc-t10dif-glue.c b/arch/arm64/lib/crc-t10dif-glue.c
index dab7e3796232..bacd18f23168 100644
--- a/arch/arm64/lib/crc-t10dif-glue.c
+++ b/arch/arm64/lib/crc-t10dif-glue.c
@@ -45,9 +45,7 @@ u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
crc_t10dif_pmull_p8(crc, data, length, buf);
kernel_neon_end();
- crc = 0;
- data = buf;
- length = sizeof(buf);
+ return crc_t10dif_generic(0, buf, sizeof(buf));
}
}
return crc_t10dif_generic(crc, data, length);
@@ -70,12 +68,6 @@ static void __exit crc_t10dif_arm64_exit(void)
}
module_exit(crc_t10dif_arm64_exit);
-bool crc_t10dif_is_optimized(void)
-{
- return static_key_enabled(&have_asimd);
-}
-EXPORT_SYMBOL(crc_t10dif_is_optimized);
-
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("CRC-T10DIF using arm64 NEON and Crypto Extensions");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/lib/crc32-glue.c b/arch/arm64/lib/crc32-glue.c
index 15c4c9db573e..ed3acd71178f 100644
--- a/arch/arm64/lib/crc32-glue.c
+++ b/arch/arm64/lib/crc32-glue.c
@@ -22,7 +22,7 @@ asmlinkage u32 crc32_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32c_le_arm64_4way(u32 crc, unsigned char const *p, size_t len);
asmlinkage u32 crc32_be_arm64_4way(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_le_base(crc, p, len);
@@ -43,10 +43,10 @@ u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
kernel_neon_begin();
@@ -62,9 +62,9 @@ u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
return crc32c_le_arm64(crc, p, len);
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
-u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
return crc32_be_base(crc, p, len);
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index 228d681a8715..6e0528831cd3 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -8,8 +8,33 @@
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
+#include <asm/esr.h>
#include <asm/ptrace.h>
+static bool cpy_faulted_on_uaccess(const struct exception_table_entry *ex,
+ unsigned long esr)
+{
+ bool uaccess_is_write = FIELD_GET(EX_DATA_UACCESS_WRITE, ex->data);
+ bool fault_on_write = esr & ESR_ELx_WNR;
+
+ return uaccess_is_write == fault_on_write;
+}
+
+bool insn_may_access_user(unsigned long addr, unsigned long esr)
+{
+ const struct exception_table_entry *ex = search_exception_tables(addr);
+
+ if (!ex)
+ return false;
+
+ switch (ex->type) {
+ case EX_TYPE_UACCESS_CPY:
+ return cpy_faulted_on_uaccess(ex, esr);
+ default:
+ return true;
+ }
+}
+
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
@@ -29,6 +54,17 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
return true;
}
+static bool ex_handler_uaccess_cpy(const struct exception_table_entry *ex,
+ struct pt_regs *regs, unsigned long esr)
+{
+ /* Do not fix up faults on kernel memory accesses */
+ if (!cpy_faulted_on_uaccess(ex, esr))
+ return false;
+
+ regs->pc = get_ex_fixup(ex);
+ return true;
+}
+
static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -56,7 +92,7 @@ ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
return true;
}
-bool fixup_exception(struct pt_regs *regs)
+bool fixup_exception(struct pt_regs *regs, unsigned long esr)
{
const struct exception_table_entry *ex;
@@ -70,6 +106,8 @@ bool fixup_exception(struct pt_regs *regs)
case EX_TYPE_UACCESS_ERR_ZERO:
case EX_TYPE_KACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_UACCESS_CPY:
+ return ex_handler_uaccess_cpy(ex, regs, esr);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ef63651099a9..ec0a337891dd 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -375,7 +375,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
* Are we prepared to handle this kernel fault?
* We are almost certainly not prepared to handle instruction faults.
*/
- if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
+ if (!is_el1_instruction_abort(esr) && fixup_exception(regs, esr))
return;
if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
@@ -606,7 +606,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
die_kernel_fault("execution of user memory",
addr, esr, regs);
- if (!search_exception_tables(regs->pc))
+ if (!insn_may_access_user(regs->pc, esr))
die_kernel_fault("access to user memory outside uaccess routines",
addr, esr, regs);
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index b3a7fafe8892..cfe8cb8ba1cc 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -334,7 +334,9 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
switch (hp_size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
- return PGDIR_SIZE - PUD_SIZE;
+ if (pud_sect_supported())
+ return PGDIR_SIZE - PUD_SIZE;
+ break;
#endif
case CONT_PMD_SIZE:
return PUD_SIZE - CONT_PMD_SIZE;
@@ -356,23 +358,21 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
switch (pagesize) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
- entry = pud_pte(pud_mkhuge(pte_pud(entry)));
+ if (pud_sect_supported())
+ return pud_pte(pud_mkhuge(pte_pud(entry)));
break;
#endif
case CONT_PMD_SIZE:
- entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
- fallthrough;
+ return pmd_pte(pmd_mkhuge(pmd_mkcont(pte_pmd(entry))));
case PMD_SIZE:
- entry = pmd_pte(pmd_mkhuge(pte_pmd(entry)));
- break;
+ return pmd_pte(pmd_mkhuge(pte_pmd(entry)));
case CONT_PTE_SIZE:
- entry = pte_mkcont(entry);
- break;
+ return pte_mkcont(entry);
default:
- pr_warn("%s: unrecognized huge page size 0x%lx\n",
- __func__, pagesize);
break;
}
+ pr_warn("%s: unrecognized huge page size 0x%lx\n",
+ __func__, pagesize);
return entry;
}
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index b65a29440a0c..d541ce45daeb 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -190,7 +190,7 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
*/
static bool __init root_level_aligned(u64 addr)
{
- int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * (PAGE_SHIFT - 3);
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 1) * PTDESC_TABLE_SHIFT;
return (addr % (PAGE_SIZE << shift)) == 0;
}
@@ -245,7 +245,7 @@ static int __init root_level_idx(u64 addr)
*/
u64 vabits = IS_ENABLED(CONFIG_ARM64_64K_PAGES) ? VA_BITS
: vabits_actual;
- int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * (PAGE_SHIFT - 3);
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits) - 1) * PTDESC_TABLE_SHIFT;
return (addr & ~_PAGE_OFFSET(vabits)) >> (shift + PAGE_SHIFT);
}
@@ -269,7 +269,7 @@ static void __init clone_next_level(u64 addr, pgd_t *tmp_pg_dir, pud_t *pud)
*/
static int __init next_level_idx(u64 addr)
{
- int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * (PAGE_SHIFT - 3);
+ int shift = (ARM64_HW_PGTABLE_LEVELS(vabits_actual) - 2) * PTDESC_TABLE_SHIFT;
return (addr >> (shift + PAGE_SHIFT)) % PTRS_PER_PTE;
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index b4df5bc5b1b8..b98f89420713 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1177,8 +1177,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+ /* [start, end] should be within one section */
+ WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page));
- if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) ||
+ (end - start < PAGES_PER_SECTION * sizeof(struct page)))
return vmemmap_populate_basepages(start, end, node, altmap);
else
return vmemmap_populate_hugepages(start, end, node, altmap);
@@ -1555,9 +1558,8 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
#ifdef CONFIG_ARCH_HAS_PKEYS
int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val)
{
- u64 new_por = POE_RXW;
+ u64 new_por;
u64 old_por;
- u64 pkey_shift;
if (!system_supports_poe())
return -ENOSPC;
@@ -1571,7 +1573,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long i
return -EINVAL;
/* Set the bits we need in POR: */
- new_por = POE_RXW;
+ new_por = POE_RWX;
if (init_val & PKEY_DISABLE_WRITE)
new_por &= ~POE_W;
if (init_val & PKEY_DISABLE_ACCESS)
@@ -1582,12 +1584,11 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long i
new_por &= ~POE_X;
/* Shift the bits in to the correct place in POR for pkey: */
- pkey_shift = pkey * POR_BITS_PER_PKEY;
- new_por <<= pkey_shift;
+ new_por = POR_ELx_PERM_PREP(pkey, new_por);
/* Get old POR and mask off any old bits in place: */
old_por = read_sysreg_s(SYS_POR_EL0);
- old_por &= ~(POE_MASK << pkey_shift);
+ old_por &= ~(POE_MASK << POR_ELx_PERM_SHIFT(pkey));
/* Write old part along with new part: */
write_sysreg_s(old_por | new_por, SYS_POR_EL0);
diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
index cde44c13dda1..7d94e09b01b3 100644
--- a/arch/arm64/mm/physaddr.c
+++ b/arch/arm64/mm/physaddr.c
@@ -10,7 +10,7 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
WARN(!__is_lm_address(__tag_reset(x)),
- "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ "virt_to_phys used for non-linear address: %p (%pS)\n",
(void *)x,
(void *)x);
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 688fbe0271ca..8cec0da4cff2 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -80,8 +80,8 @@ static const struct ptdump_prot_bits pte_bits[] = {
.set = "CON",
.clear = " ",
}, {
- .mask = PTE_TABLE_BIT | PTE_VALID,
- .val = PTE_VALID,
+ .mask = PMD_TYPE_MASK,
+ .val = PMD_TYPE_SECT,
.set = "BLK",
.clear = " ",
}, {
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 1e65f2fb45bd..772c1b008e43 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -45,6 +45,7 @@ HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
HAS_PAN
+HAS_PMUV3
HAS_S1PIE
HAS_S1POE
HAS_RAS_EXTN
@@ -104,6 +105,7 @@ WORKAROUND_CAVIUM_TX2_219_TVM
WORKAROUND_CLEAN_CACHE
WORKAROUND_DEVICE_LOAD_ACQUIRE
WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_PMUV3_IMPDEF_TRAPS
WORKAROUND_QCOM_FALKOR_E1003
WORKAROUND_QCOM_ORYON_CNTVOFF
WORKAROUND_REPEAT_TLBI
diff --git a/arch/arm64/tools/gen-sysreg.awk b/arch/arm64/tools/gen-sysreg.awk
index 1a2afc9fdd42..f2a1732cb1f6 100755
--- a/arch/arm64/tools/gen-sysreg.awk
+++ b/arch/arm64/tools/gen-sysreg.awk
@@ -111,7 +111,7 @@ END {
/^$/ { next }
/^[\t ]*#/ { next }
-/^SysregFields/ && block_current() == "Root" {
+$1 == "SysregFields" && block_current() == "Root" {
block_push("SysregFields")
expect_fields(2)
@@ -127,7 +127,8 @@ END {
next
}
-/^EndSysregFields/ && block_current() == "SysregFields" {
+$1 == "EndSysregFields" && block_current() == "SysregFields" {
+ expect_fields(1)
if (next_bit > 0)
fatal("Unspecified bits in " reg)
@@ -145,7 +146,7 @@ END {
next
}
-/^Sysreg/ && block_current() == "Root" {
+$1 == "Sysreg" && block_current() == "Root" {
block_push("Sysreg")
expect_fields(7)
@@ -177,7 +178,8 @@ END {
next
}
-/^EndSysreg/ && block_current() == "Sysreg" {
+$1 == "EndSysreg" && block_current() == "Sysreg" {
+ expect_fields(1)
if (next_bit > 0)
fatal("Unspecified bits in " reg)
@@ -206,7 +208,7 @@ END {
# Currently this is effectivey a comment, in future we may want to emit
# defines for the fields.
-(/^Fields/ || /^Mapping/) && block_current() == "Sysreg" {
+($1 == "Fields" || $1 == "Mapping") && block_current() == "Sysreg" {
expect_fields(2)
if (next_bit != 63)
@@ -224,7 +226,7 @@ END {
}
-/^Res0/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Res0" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES0", $2)
field = "RES0_" msb "_" lsb
@@ -234,7 +236,7 @@ END {
next
}
-/^Res1/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Res1" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "RES1", $2)
field = "RES1_" msb "_" lsb
@@ -244,7 +246,7 @@ END {
next
}
-/^Unkn/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Unkn" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, "UNKN", $2)
field = "UNKN_" msb "_" lsb
@@ -254,7 +256,7 @@ END {
next
}
-/^Field/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Field" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(3)
field = $3
parse_bitdef(reg, field, $2)
@@ -265,14 +267,14 @@ END {
next
}
-/^Raz/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Raz" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
expect_fields(2)
parse_bitdef(reg, field, $2)
next
}
-/^SignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "SignedEnum" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
block_push("Enum")
expect_fields(3)
@@ -285,7 +287,7 @@ END {
next
}
-/^UnsignedEnum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "UnsignedEnum" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
block_push("Enum")
expect_fields(3)
@@ -298,7 +300,7 @@ END {
next
}
-/^Enum/ && (block_current() == "Sysreg" || block_current() == "SysregFields") {
+$1 == "Enum" && (block_current() == "Sysreg" || block_current() == "SysregFields") {
block_push("Enum")
expect_fields(3)
@@ -310,7 +312,8 @@ END {
next
}
-/^EndEnum/ && block_current() == "Enum" {
+$1 == "EndEnum" && block_current() == "Enum" {
+ expect_fields(1)
field = null
msb = null
diff --git a/arch/arm64/tools/syscall_32.tbl b/arch/arm64/tools/syscall_32.tbl
index 69a829912a05..0765b3a8d6d6 100644
--- a/arch/arm64/tools/syscall_32.tbl
+++ b/arch/arm64/tools/syscall_32.tbl
@@ -478,3 +478,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 762ee084b37c..f9476848a2ed 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1664,6 +1664,7 @@ EndEnum
UnsignedEnum 59:56 FGT
0b0000 NI
0b0001 IMP
+ 0b0010 FGT2
EndEnum
Res0 55:48
UnsignedEnum 47:44 EXS
@@ -1725,6 +1726,7 @@ Enum 3:0 PARANGE
0b0100 44
0b0101 48
0b0110 52
+ 0b0111 56
EndEnum
EndSysreg
@@ -2074,7 +2076,7 @@ EndEnum
Res0 4:2
Field 1 ExTRE
Field 0 E0TRE
-EndSysregFields
+EndSysreg
Sysreg SMPRI_EL1 3 0 1 2 4
Res0 63:4
@@ -2641,6 +2643,101 @@ Field 0 E0HTRE
EndSysreg
+Sysreg HDFGRTR2_EL2 3 4 3 1 0
+Res0 63:25
+Field 24 nPMBMAR_EL1
+Field 23 nMDSTEPOP_EL1
+Field 22 nTRBMPAM_EL1
+Res0 21
+Field 20 nTRCITECR_EL1
+Field 19 nPMSDSFR_EL1
+Field 18 nSPMDEVAFF_EL1
+Field 17 nSPMID
+Field 16 nSPMSCR_EL1
+Field 15 nSPMACCESSR_EL1
+Field 14 nSPMCR_EL0
+Field 13 nSPMOVS
+Field 12 nSPMINTEN
+Field 11 nSPMCNTEN
+Field 10 nSPMSELR_EL0
+Field 9 nSPMEVTYPERn_EL0
+Field 8 nSPMEVCNTRn_EL0
+Field 7 nPMSSCR_EL1
+Field 6 nPMSSDATA
+Field 5 nMDSELR_EL1
+Field 4 nPMUACR_EL1
+Field 3 nPMICFILTR_EL0
+Field 2 nPMICNTR_EL0
+Field 1 nPMIAR_EL1
+Field 0 nPMECR_EL1
+EndSysreg
+
+Sysreg HDFGWTR2_EL2 3 4 3 1 1
+Res0 63:25
+Field 24 nPMBMAR_EL1
+Field 23 nMDSTEPOP_EL1
+Field 22 nTRBMPAM_EL1
+Field 21 nPMZR_EL0
+Field 20 nTRCITECR_EL1
+Field 19 nPMSDSFR_EL1
+Res0 18:17
+Field 16 nSPMSCR_EL1
+Field 15 nSPMACCESSR_EL1
+Field 14 nSPMCR_EL0
+Field 13 nSPMOVS
+Field 12 nSPMINTEN
+Field 11 nSPMCNTEN
+Field 10 nSPMSELR_EL0
+Field 9 nSPMEVTYPERn_EL0
+Field 8 nSPMEVCNTRn_EL0
+Field 7 nPMSSCR_EL1
+Res0 6
+Field 5 nMDSELR_EL1
+Field 4 nPMUACR_EL1
+Field 3 nPMICFILTR_EL0
+Field 2 nPMICNTR_EL0
+Field 1 nPMIAR_EL1
+Field 0 nPMECR_EL1
+EndSysreg
+
+Sysreg HFGRTR2_EL2 3 4 3 1 2
+Res0 63:15
+Field 14 nACTLRALIAS_EL1
+Field 13 nACTLRMASK_EL1
+Field 12 nTCR2ALIAS_EL1
+Field 11 nTCRALIAS_EL1
+Field 10 nSCTLRALIAS2_EL1
+Field 9 nSCTLRALIAS_EL1
+Field 8 nCPACRALIAS_EL1
+Field 7 nTCR2MASK_EL1
+Field 6 nTCRMASK_EL1
+Field 5 nSCTLR2MASK_EL1
+Field 4 nSCTLRMASK_EL1
+Field 3 nCPACRMASK_EL1
+Field 2 nRCWSMASK_EL1
+Field 1 nERXGSR_EL1
+Field 0 nPFAR_EL1
+EndSysreg
+
+Sysreg HFGWTR2_EL2 3 4 3 1 3
+Res0 63:15
+Field 14 nACTLRALIAS_EL1
+Field 13 nACTLRMASK_EL1
+Field 12 nTCR2ALIAS_EL1
+Field 11 nTCRALIAS_EL1
+Field 10 nSCTLRALIAS2_EL1
+Field 9 nSCTLRALIAS_EL1
+Field 8 nCPACRALIAS_EL1
+Field 7 nTCR2MASK_EL1
+Field 6 nTCRMASK_EL1
+Field 5 nSCTLR2MASK_EL1
+Field 4 nSCTLRMASK_EL1
+Field 3 nCPACRMASK_EL1
+Field 2 nRCWSMASK_EL1
+Res0 1
+Field 0 nPFAR_EL1
+EndSysreg
+
Sysreg HDFGRTR_EL2 3 4 3 1 4
Field 63 PMBIDR_EL1
Field 62 nPMSNEVFR_EL1
@@ -2813,6 +2910,12 @@ Field 1 AMEVCNTR00_EL0
Field 0 AMCNTEN0
EndSysreg
+Sysreg HFGITR2_EL2 3 4 3 1 7
+Res0 63:2
+Field 1 nDCCIVAPS
+Field 0 TSBCSYNC
+EndSysreg
+
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
@@ -3035,6 +3138,54 @@ Field 31:16 PhyPARTID29
Field 15:0 PhyPARTID28
EndSysreg
+Sysreg ICH_HCR_EL2 3 4 12 11 0
+Res0 63:32
+Field 31:27 EOIcount
+Res0 26:16
+Field 15 DVIM
+Field 14 TDIR
+Field 13 TSEI
+Field 12 TALL1
+Field 11 TALL0
+Field 10 TC
+Res0 9
+Field 8 vSGIEOICount
+Field 7 VGrp1DIE
+Field 6 VGrp1EIE
+Field 5 VGrp0DIE
+Field 4 VGrp0EIE
+Field 3 NPIE
+Field 2 LRENPIE
+Field 1 UIE
+Field 0 En
+EndSysreg
+
+Sysreg ICH_VTR_EL2 3 4 12 11 1
+Res0 63:32
+Field 31:29 PRIbits
+Field 28:26 PREbits
+Field 25:23 IDbits
+Field 22 SEIS
+Field 21 A3V
+Field 20 nV4
+Field 19 TDS
+Field 18 DVIM
+Res0 17:5
+Field 4:0 ListRegs
+EndSysreg
+
+Sysreg ICH_MISR_EL2 3 4 12 11 2
+Res0 63:8
+Field 7 VGrp1D
+Field 6 VGrp1E
+Field 5 VGrp0D
+Field 4 VGrp0E
+Field 3 NP
+Field 2 LRENP
+Field 1 U
+Field 0 EOI
+EndSysreg
+
Sysreg CONTEXTIDR_EL2 3 4 13 0 1
Fields CONTEXTIDR_ELx
EndSysreg
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index e5b8b4b2109a..aee904833dec 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -300,7 +300,7 @@ bad_area:
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}
-static struct ctl_table alignment_tbl[5] = {
+static const struct ctl_table alignment_tbl[] = {
{
.procname = "kernel_enable",
.data = &align_kern_enable,
diff --git a/arch/csky/kernel/vdso/Makefile b/arch/csky/kernel/vdso/Makefile
index 069ef0b17fe5..a3042287a070 100644
--- a/arch/csky/kernel/vdso/Makefile
+++ b/arch/csky/kernel/vdso/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
# Symbols present in the vdso
vdso-syms += rt_sigreturn
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index 6cb764947596..469c025297c6 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -75,7 +75,6 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=y
CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 2b8bd27a852f..687502917ae2 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -30,6 +30,7 @@ config LOONGARCH
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
@@ -106,6 +107,7 @@ config LOONGARCH
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select GPIOLIB
select HAS_IOPORT
@@ -291,6 +293,9 @@ config AS_HAS_LBT_EXTENSION
config AS_HAS_LVZ_EXTENSION
def_bool $(as-instr,hvcl 0)
+config CC_HAS_ANNOTATE_TABLEJUMP
+ def_bool $(cc-option,-mannotate-tablejump)
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 567bd122a9ee..0304eabbe606 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -101,7 +101,11 @@ KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub)
ifdef CONFIG_OBJTOOL
-KBUILD_CFLAGS += -fno-jump-tables
+ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
+KBUILD_CFLAGS += -mannotate-tablejump
+else
+KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
+endif
endif
KBUILD_RUSTFLAGS += --target=loongarch64-unknown-none-softfloat -Ccode-model=small
diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
index 23cf26cc3e5f..3514ea78f525 100644
--- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
+++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts
@@ -90,11 +90,6 @@
#address-cells = <1>;
#size-cells = <0>;
- spidev@0 {
- compatible = "rohm,dh2228fv";
- spi-max-frequency = <100000000>;
- reg = <0>;
- };
};
&ehci0 {
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 73c77500ac46..3c240afe5aed 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -981,7 +981,6 @@ CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
CONFIG_PSTORE=m
CONFIG_PSTORE_COMPRESS=y
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_EROFS_FS_ZIP_LZMA=y
diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h
index 590982cd986e..f457c2662e2f 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -12,6 +12,7 @@
#include <linux/kvm.h>
#include <linux/kvm_types.h>
#include <linux/mutex.h>
+#include <linux/perf_event.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/types.h>
@@ -176,6 +177,9 @@ struct kvm_vcpu_arch {
/* Pointers stored here for easy accessing from assembly code */
int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ /* GPA (=HVA) of PGD for secondary mmu */
+ unsigned long kvm_pgd;
+
/* Host registers preserved across guest mode execution */
unsigned long host_sp;
unsigned long host_tp;
@@ -289,6 +293,8 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch)
return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT;
}
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu);
+
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
@@ -320,7 +326,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
/* Misc */
static inline void kvm_arch_hardware_unsetup(void) {}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
diff --git a/arch/loongarch/include/asm/vdso.h b/arch/loongarch/include/asm/vdso.h
index d3ba35eb23e7..f72ec79e2dde 100644
--- a/arch/loongarch/include/asm/vdso.h
+++ b/arch/loongarch/include/asm/vdso.h
@@ -31,7 +31,6 @@ struct loongarch_vdso_info {
unsigned long size;
unsigned long offset_sigreturn;
struct vm_special_mapping code_mapping;
- struct vm_special_mapping data_mapping;
};
extern struct loongarch_vdso_info vdso_info;
diff --git a/arch/loongarch/include/asm/vdso/arch_data.h b/arch/loongarch/include/asm/vdso/arch_data.h
new file mode 100644
index 000000000000..322d0a5f1c84
--- /dev/null
+++ b/arch/loongarch/include/asm/vdso/arch_data.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#ifndef _VDSO_ARCH_DATA_H
+#define _VDSO_ARCH_DATA_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/vdso.h>
+
+struct vdso_pcpu_data {
+ u32 node;
+} ____cacheline_aligned_in_smp;
+
+struct vdso_arch_data {
+ struct vdso_pcpu_data pdata[NR_CPUS];
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/loongarch/include/asm/vdso/getrandom.h b/arch/loongarch/include/asm/vdso/getrandom.h
index e80f3c4ac748..48c43f55b039 100644
--- a/arch/loongarch/include/asm/vdso/getrandom.h
+++ b/arch/loongarch/include/asm/vdso/getrandom.h
@@ -28,11 +28,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
-static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
-{
- return &_loongarch_data.rng_data;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/loongarch/include/asm/vdso/gettimeofday.h b/arch/loongarch/include/asm/vdso/gettimeofday.h
index 7eb3f041af76..88cfcf133116 100644
--- a/arch/loongarch/include/asm/vdso/gettimeofday.h
+++ b/arch/loongarch/include/asm/vdso/gettimeofday.h
@@ -72,7 +72,7 @@ static __always_inline int clock_getres_fallback(
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
uint64_t count;
@@ -89,18 +89,6 @@ static inline bool loongarch_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable loongarch_vdso_hres_capable
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
-}
-#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/loongarch/include/asm/vdso/vdso.h b/arch/loongarch/include/asm/vdso/vdso.h
index 1c183a9b2115..50c65fb29daf 100644
--- a/arch/loongarch/include/asm/vdso/vdso.h
+++ b/arch/loongarch/include/asm/vdso/vdso.h
@@ -12,43 +12,9 @@
#include <asm/asm.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
-struct vdso_pcpu_data {
- u32 node;
-} ____cacheline_aligned_in_smp;
-
-struct loongarch_vdso_data {
- struct vdso_pcpu_data pdata[NR_CPUS];
- struct vdso_rng_data rng_data;
-};
-
-/*
- * The layout of vvar:
- *
- * high
- * +---------------------+--------------------------+
- * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE |
- * +---------------------+--------------------------+
- * | time-ns vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * | generic vdso data | PAGE_SIZE |
- * +---------------------+--------------------------+
- * low
- */
-#define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data))
-#define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT)
-
-enum vvar_pages {
- VVAR_GENERIC_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_LOONGARCH_PAGES_START,
- VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1,
- VVAR_NR_PAGES,
-};
-
-#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
-
-extern struct loongarch_vdso_data _loongarch_data __attribute__((visibility("hidden")));
+#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
#endif /* __ASSEMBLY__ */
diff --git a/arch/loongarch/include/asm/vdso/vsyscall.h b/arch/loongarch/include/asm/vdso/vsyscall.h
index 8987e951d0a9..1140b54b4bc8 100644
--- a/arch/loongarch/include/asm/vdso/vsyscall.h
+++ b/arch/loongarch/include/asm/vdso/vsyscall.h
@@ -6,23 +6,6 @@
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-extern struct vdso_rng_data *vdso_rng_data;
-
-static __always_inline
-struct vdso_data *__loongarch_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __loongarch_get_k_vdso_data
-
-static __always_inline
-struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void)
-{
- return vdso_rng_data;
-}
-#define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 382a09a7152c..1120ac2824f6 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -249,18 +249,6 @@ static __init int setup_node(int pxm)
return acpi_map_pxm_to_node(pxm);
}
-/*
- * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
- * I/O localities since SRAT does not list them. I/O localities are
- * not supported at this point.
- */
-unsigned int numa_distance_cnt;
-
-static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
-{
- return slit->locality_count;
-}
-
void __init numa_set_distance(int from, int to, int distance)
{
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index 8be1c38ad8eb..db1e4bb26b6a 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -296,6 +296,7 @@ static void __used output_kvm_defines(void)
OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp);
OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp);
OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd);
+ OFFSET(KVM_ARCH_KVMPGD, kvm_vcpu_arch, kvm_pgd);
OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit);
OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry);
OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry);
@@ -315,6 +316,6 @@ static void __used output_vdso_defines(void)
{
COMMENT("LoongArch vDSO offsets.");
- DEFINE(__VVAR_PAGES, VVAR_NR_PAGES);
+ DEFINE(__VDSO_PAGES, VDSO_NR_PAGES);
BLANK();
}
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index 8ae641dc53bb..f9381800e291 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -126,14 +126,14 @@ void kexec_reboot(void)
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
- unreachable();
+ BUG();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
- unreachable();
+ BUG();
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index edcfdfcad7d2..90cb3ca96f08 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void)
*/
static void __init arch_mem_init(char **cmdline_p)
{
+ /* Recalculate max_low_pfn for "mem=xxx" */
+ max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index fbf747447f13..4b24589c0b56 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -19,6 +19,7 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
#include <linux/tracepoint.h>
@@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu)
mb();
}
-void __noreturn arch_cpu_idle_dead(void)
+static void __noreturn idle_play_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
@@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void)
BUG();
}
+#ifdef CONFIG_HIBERNATION
+static void __noreturn poll_play_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("nop\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ BUG();
+}
+#endif
+
+static void (*play_dead)(void) = idle_play_dead;
+
+void __noreturn arch_cpu_idle_dead(void)
+{
+ play_dead();
+ BUG(); /* play_dead() doesn't return */
+}
+
+#ifdef CONFIG_HIBERNATION
+int hibernate_resume_nonboot_cpu_disable(void)
+{
+ int ret;
+
+ play_dead = poll_play_dead;
+ ret = suspend_disable_secondary_cpus();
+ play_dead = idle_play_dead;
+
+ return ret;
+}
+#endif
+
#endif
/*
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
index 05e5fbac102a..10cf1608c7b3 100644
--- a/arch/loongarch/kernel/vdso.c
+++ b/arch/loongarch/kernel/vdso.c
@@ -14,7 +14,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <asm/page.h>
#include <asm/vdso.h>
@@ -25,18 +25,6 @@
extern char vdso_start[], vdso_end[];
-/* Kernel-provided data used by the VDSO. */
-static union vdso_data_store generic_vdso_data __page_aligned_data;
-
-static union {
- u8 page[LOONGARCH_VDSO_DATA_SIZE];
- struct loongarch_vdso_data vdata;
-} loongarch_vdso_data __page_aligned_data;
-
-struct vdso_data *vdso_data = generic_vdso_data.data;
-struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
-struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data;
-
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
current->mm->context.vdso = (void *)(new_vma->vm_start);
@@ -44,53 +32,12 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
return 0;
}
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- unsigned long pfn;
- struct page *timens_page = find_timens_vvar_page(vma);
-
- switch (vmf->pgoff) {
- case VVAR_GENERIC_PAGE_OFFSET:
- if (!timens_page)
- pfn = sym_to_pfn(vdso_data);
- else
- pfn = page_to_pfn(timens_page);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace specific
- * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
- * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- else
- pfn = sym_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
- pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
- break;
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
.code_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
},
- .data_mapping = {
- .name = "[vvar]",
- .fault = vvar_fault,
- },
.offset_sigreturn = vdso_offset_sigreturn,
};
@@ -101,7 +48,7 @@ static int __init init_vdso(void)
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
for_each_possible_cpu(cpu)
- vdso_pdata[cpu].node = cpu_to_node(cpu);
+ vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu);
vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start);
vdso_info.code_mapping.pages =
@@ -115,37 +62,6 @@ static int __init init_vdso(void)
}
subsys_initcall(init_vdso);
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a
- * task changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
-
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
@@ -181,9 +97,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
- vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
- VM_READ | VM_MAYREAD | VM_PFNMAP,
- &info->data_mapping);
+ vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig
index 97a811077ac3..40eea6da7c25 100644
--- a/arch/loongarch/kvm/Kconfig
+++ b/arch/loongarch/kvm/Kconfig
@@ -33,6 +33,7 @@ config KVM
select KVM_MMIO
select KVM_XFER_TO_GUEST_WORK
select SCHED_INFO
+ select GUEST_PERF_EVENTS if PERF_EVENTS
help
Support hosting virtualized guest machines using
hardware virtualization extensions. You will need
diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile
index 3a01292f71cc..f4c8e35c216a 100644
--- a/arch/loongarch/kvm/Makefile
+++ b/arch/loongarch/kvm/Makefile
@@ -3,8 +3,6 @@
# Makefile for LoongArch KVM support
#
-ccflags-y += -I $(src)
-
include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index c1e8ec5b941b..ea321403644a 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
struct kvm_run *run = vcpu->run;
unsigned long badv = vcpu->arch.badv;
+ /* Inject ADE exception if exceed max GPA size */
+ if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
+ kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
+ return RESUME_GUEST;
+ }
+
ret = kvm_handle_mm_fault(vcpu, badv, write);
if (ret) {
/* Treat as MMIO */
diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c
index f6d3242b9234..d165cd38c6bb 100644
--- a/arch/loongarch/kvm/main.c
+++ b/arch/loongarch/kvm/main.c
@@ -317,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void)
kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx",
read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc());
+ /*
+ * HW Guest CSR registers are lost after CPU suspend and resume.
+ * Clear last_vcpu so that Guest CSR registers forced to reload
+ * from vCPU SW state.
+ */
+ this_cpu_ptr(vmcs)->last_vcpu = NULL;
+
return 0;
}
@@ -387,6 +394,7 @@ static int kvm_loongarch_env_init(void)
}
kvm_init_gcsr_flag();
+ kvm_register_perf_callbacks(NULL);
/* Register LoongArch IPI interrupt controller interface. */
ret = kvm_loongarch_register_ipi_device();
@@ -418,6 +426,8 @@ static void kvm_loongarch_env_exit(void)
}
kfree(kvm_loongarch_ops);
}
+
+ kvm_unregister_perf_callbacks();
}
static int kvm_loongarch_init(void)
diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S
index 1be185e94807..f1768b7a6194 100644
--- a/arch/loongarch/kvm/switch.S
+++ b/arch/loongarch/kvm/switch.S
@@ -60,16 +60,8 @@
ld.d t0, a2, KVM_ARCH_GPC
csrwr t0, LOONGARCH_CSR_ERA
- /* Save host PGDL */
- csrrd t0, LOONGARCH_CSR_PGDL
- st.d t0, a2, KVM_ARCH_HPGD
-
- /* Switch to kvm */
- ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
-
- /* Load guest PGDL */
- li.w t0, KVM_GPGD
- ldx.d t0, t1, t0
+ /* Load PGD for KVM hypervisor */
+ ld.d t0, a2, KVM_ARCH_KVMPGD
csrwr t0, LOONGARCH_CSR_PGDL
/* Mix GID and RID */
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 20f941af3e9e..8e427b379661 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int ret = RESUME_GUEST;
unsigned long estat = vcpu->arch.host_estat;
- u32 intr = estat & 0x1fff; /* Ignore NMI */
+ u32 intr = estat & CSR_ESTAT_IS;
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -361,6 +361,34 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
+ unsigned long val;
+
+ preempt_disable();
+ val = gcsr_read(LOONGARCH_CSR_CRMD);
+ preempt_enable();
+
+ return (val & CSR_PRMD_PPLV) == PLV_KERN;
+}
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pc;
+}
+
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
+ * any event that arrives while a vCPU is loaded is considered to be "in guest".
+ */
+bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
+}
+#endif
+
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
+{
return false;
}
@@ -1459,8 +1487,17 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.vpid = 0;
vcpu->arch.flush_gpa = INVALID_GPA;
- hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
- vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
+ hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED_HARD);
+
+ /* Get GPA (=HVA) of PGD for kvm hypervisor */
+ vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
+
+ /*
+ * Get PGD for primary mmu, virtual address is used since there is
+ * memory access after loading from CSR_PGD in tlb exception fast path.
+ */
+ vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
vcpu->arch.handle_exit = kvm_handle_exit;
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index b8b3e1972d6e..edccfc8c9cd8 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (kvm_pvtime_supported())
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
- kvm->arch.gpa_size = BIT(cpu_vabits - 1);
+ /*
+ * cpu_vabits means user address space only (a half of total).
+ * GPA size of VM is the same with the size of user address space.
+ */
+ kvm->arch.gpa_size = BIT(cpu_vabits);
kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1;
kvm->arch.invalid_ptes[0] = 0;
kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table;
diff --git a/arch/loongarch/lib/crc32-loongarch.c b/arch/loongarch/lib/crc32-loongarch.c
index 8af8113ecd9d..c44ee4f32557 100644
--- a/arch/loongarch/lib/crc32-loongarch.c
+++ b/arch/loongarch/lib/crc32-loongarch.c
@@ -65,10 +65,10 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
@@ -100,7 +100,7 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index 914e82ff3f65..1df9e99582cc 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -3,6 +3,7 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/kfence.h>
#include <linux/memblock.h>
@@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
+ if (filp && is_file_hugepages(filp))
+ info.align_mask = huge_page_mask_align(filp);
+ else
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index fdde1bcd4e26..1c26147aff70 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -2,7 +2,7 @@
# Objects to go into the VDSO.
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso-y := elf.o vgetcpu.o vgettimeofday.o vgetrandom.o \
vgetrandom-chacha.o sigreturn.o
diff --git a/arch/loongarch/vdso/vdso.lds.S b/arch/loongarch/vdso/vdso.lds.S
index 160cfaef2de4..8ff986499947 100644
--- a/arch/loongarch/vdso/vdso.lds.S
+++ b/arch/loongarch/vdso/vdso.lds.S
@@ -5,6 +5,7 @@
*/
#include <asm/page.h>
#include <generated/asm-offsets.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
@@ -12,11 +13,8 @@ OUTPUT_ARCH(loongarch)
SECTIONS
{
- PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
-#ifdef CONFIG_TIME_NS
- PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
- PROVIDE(_loongarch_data = _vdso_data + 2 * PAGE_SIZE);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 0db51258b2a7..5301cd9d0f83 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -19,27 +19,19 @@ static __always_inline int read_cpu_id(void)
return cpu_id;
}
-static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void)
-{
- return _loongarch_data.pdata;
-}
-
extern
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
{
int cpu_id;
- const struct vdso_pcpu_data *data;
cpu_id = read_cpu_id();
if (cpu)
*cpu = cpu_id;
- if (node) {
- data = get_pcpu_data();
- *node = data[cpu_id].node;
- }
+ if (node)
+ *node = vdso_u_arch_data.pdata[cpu_id].node;
return 0;
}
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b2ed0308c0ea..eb5bb6d36899 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -18,12 +18,13 @@ config M68K
select DMA_DIRECT_REMAP if M68K_NONCOHERENT_DMA && !COLDFIRE
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
- select GENERIC_IOMAP if HAS_IOPORT
+ select GENERIC_IOMAP if HAS_IOPORT && MMU && !COLDFIRE
select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_MULDI3
+ select GENERIC_PCI_IOMAP if PCI
select HAS_IOPORT if PCI || ISA || ATARI_ROM_ISA
select HAVE_ARCH_LIBGCC_H
select HAVE_ARCH_SECCOMP
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index dbf2ea561c85..31ecb8b7b9f1 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -299,7 +299,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_AMIGA=m
@@ -336,6 +335,7 @@ CONFIG_ATA=y
CONFIG_PATA_GAYLE=y
CONFIG_PATA_BUDDHA=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -486,7 +486,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index b0fd199cc0a4..1f57514624d5 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -295,7 +295,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -316,6 +315,7 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -443,7 +443,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index bb5b2d3b6c10..02db7a48e57e 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -302,7 +302,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_ATARI=m
@@ -331,6 +330,7 @@ CONFIG_ATA=y
# CONFIG_ATA_BMDMA is not set
CONFIG_PATA_FALCON=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -463,7 +463,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 8315a13bab73..f0e673cb17eb 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -292,7 +292,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -314,6 +313,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_BVME6000_SCSI=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -435,7 +435,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 350370657e5f..e8ca5a50b86d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -294,7 +294,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -315,6 +314,7 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -445,7 +445,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index f942b4755702..b3a270441bb1 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -293,7 +293,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_SWIM=m
CONFIG_ZRAM=m
@@ -320,6 +319,7 @@ CONFIG_ATA=y
# CONFIG_ATA_BMDMA is not set
CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -462,7 +462,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b1eaad02efab..d215dba006ce 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -313,7 +313,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -363,6 +362,7 @@ CONFIG_PATA_GAYLE=y
CONFIG_PATA_BUDDHA=y
CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -549,7 +549,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 6309a4442bb3..a888ed93ff82 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -291,7 +291,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -313,6 +312,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME147_SCSI=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -435,7 +435,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 3feb0731f814..b481782375f6 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -292,7 +292,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -314,6 +313,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME16x_SCSI=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -436,7 +436,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ea04b1b0da7d..6eba743d8eb5 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -293,7 +293,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
@@ -320,6 +319,7 @@ CONFIG_ATA=y
# CONFIG_ATA_BMDMA is not set
CONFIG_PATA_FALCON=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -452,7 +452,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index f52d9af92153..9bdbb418ffa8 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -288,7 +288,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -310,6 +309,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3_SCSI=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -433,7 +433,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index f348447824da..e1cf20fa5343 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -289,7 +289,6 @@ CONFIG_NET_IFE=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_TEST_ASYNC_DRIVER_PROBE=m
-CONFIG_DM_KUNIT_TEST=m
CONFIG_CONNECTOR=m
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=y
@@ -311,6 +310,7 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3X_ESP=y
CONFIG_MD=y
+CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
@@ -433,7 +433,6 @@ CONFIG_OMFS_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_QNX6FS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EROFS_FS=m
CONFIG_NFS_FS=y
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 2c96e8480173..516371d5587a 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -123,10 +123,6 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define PCI_IO_SIZE 0x00010000 /* 64k */
#define PCI_IO_MASK (PCI_IO_SIZE - 1)
-#define HAVE_ARCH_PIO_SIZE
-#define PIO_OFFSET 0
-#define PIO_MASK 0xffff
-#define PIO_RESERVED 0x10000
#define PCI_IOBASE ((void __iomem *) PCI_IO_PA)
#define PCI_SPACE_LIMIT PCI_IO_MASK
#endif /* CONFIG_PCI */
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 8f2676c3a988..3c43c09d4489 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -95,10 +95,24 @@ static inline void set_fc(unsigned long val)
"movec %0,%/dfc\n\t"
: /* no outputs */ : "r" (val) : "memory");
}
+
+static inline unsigned long get_fc(void)
+{
+ unsigned long val;
+
+ __asm__ ("movec %/dfc,%0" : "=r" (val) : );
+
+ return val;
+}
#else
static inline void set_fc(unsigned long val)
{
}
+
+static inline unsigned long get_fc(void)
+{
+ return USER_DATA;
+}
#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
struct thread_struct {
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index f1ae4ed890db..80afc3a18724 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -44,8 +44,10 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm)
pgd_t *new_pgd;
new_pgd = __pgd_alloc(mm, 0);
- memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
- memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
+ if (likely(new_pgd != NULL)) {
+ memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+ memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
+ }
return new_pgd;
}
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index 15c1a595a1de..0fba32552836 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -147,8 +147,7 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
break;
case BI_COMMAND_LINE:
- strscpy(m68k_command_line, data,
- sizeof(m68k_command_line));
+ strscpy(m68k_command_line, data);
break;
case BI_RNG_SEED: {
@@ -243,8 +242,7 @@ void __init setup_arch(char **cmdline_p)
setup_initial_init_mm((void *)PAGE_OFFSET, _etext, _edata, _end);
#if defined(CONFIG_BOOTPARAM)
- strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
- m68k_command_line[CL_SIZE - 1] = 0;
+ strscpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
#endif /* CONFIG_BOOTPARAM */
process_uboot_commandline(&m68k_command_line[0], CL_SIZE);
*cmdline_p = m68k_command_line;
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index c926da9d5ec2..f9872098f5ca 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/console.h>
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index f5ed71f1910d..9fe47112c586 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -466,3 +466,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 8bb1cb3a7490..5e52ea150d5c 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/console.h>
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 119bd32efcfb..b39fc3717d8e 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
+#include <linux/string_choices.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -370,8 +371,8 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
}
#ifdef DEBUG_MMU_EMU
- pr_info("%s: vaddr=%lx type=%s crp=%p\n", __func__, vaddr,
- read_flag ? "read" : "write", crp);
+ pr_info("%s: vaddr=%lx type=%s crp=%px\n", __func__, vaddr,
+ str_read_write(read_flag), crp);
#endif
segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
@@ -417,7 +418,7 @@ int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
pte_val (*pte) |= SUN3_PAGE_ACCESSED;
#ifdef DEBUG_MMU_EMU
- pr_info("seg:%ld crp:%p ->", get_fs().seg, crp);
+ pr_info("seg:%ld crp:%px ->", get_fc(), crp);
print_pte_vaddr (vaddr);
pr_cont("\n");
#endif
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 680f568b77f2..7b6e97828e55 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -472,3 +472,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1924f2d83932..85928260863b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -38,7 +38,6 @@ config MIPS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_GETTIMEOFDAY
- select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_ISA_DMA if EISA
@@ -47,10 +46,12 @@ config MIPS
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
+ select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_IDLE_POLL_SETUP
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_DATA_STORE
select GUP_GET_PXX_LOW_HIGH if CPU_MIPS32 && PHYS_ADDR_T_64BIT
select HAS_IOPORT if !NO_IOPORT_MAP || ISA
select HAVE_ARCH_COMPILER_H
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 6a68a96d13f8..f56e8db5da95 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -69,7 +69,6 @@ CONFIG_USB_HCD_BCMA=y
CONFIG_USB_HCD_SSB=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_CRC32_SARWATE=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_INFO_REDUCED=y
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index f7c4b3529a2c..fe282630b51c 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -239,7 +239,6 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_T10DIF=m
-CONFIG_CRC7=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index e835730ea7fa..b0b551efac7c 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -70,4 +70,3 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 6eff21ff15d5..281dd7d0f805 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -216,7 +216,6 @@ CONFIG_CRYPTO_USER=y
CONFIG_CRYPTO_CRYPTD=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_USER_API_SKCIPHER=y
-CONFIG_CRC32_SLICEBY4=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index da51b9731db0..9655567614aa 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -180,7 +180,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 424e3f011fc2..1539fe8eb34d 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -175,7 +175,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index cfc8bf791792..58c36720c94a 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -175,7 +175,6 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 00329bb5de5a..5ab149cd3178 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -219,4 +219,3 @@ CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
-CONFIG_CRC7=m
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 930c5f6ed182..121e7e48fa77 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -178,7 +178,6 @@ CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 4390d30206d9..e308b82c094a 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -347,7 +347,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index d63d8be8cb50..fa5b04063ddb 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -354,7 +354,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 338bb6544a93..40283171af68 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -353,7 +353,6 @@ CONFIG_CRAMFS=m
CONFIG_VXFS_FS=m
CONFIG_MINIX_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 08e1c1f2f4de..3a6d0384b774 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -336,7 +336,6 @@ CONFIG_MINIX_FS=m
CONFIG_HPFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFSD=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index 332f9094e847..8404e0a9d8b2 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -129,7 +129,6 @@ CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
CONFIG_CRC_ITU_T=m
-CONFIG_CRC32_SARWATE=y
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 08c0aa03fd56..7b91edfe3e07 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -141,7 +141,6 @@ CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XZ=y
CONFIG_CRYPTO_ARC4=m
CONFIG_CRC_ITU_T=m
-CONFIG_CRC32_SARWATE=y
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 0bddb568af7c..78c6573f91f2 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -67,17 +67,6 @@ static inline void set_io_port_base(unsigned long base)
}
/*
- * Provide the necessary definitions for generic iomap. We make use of
- * mips_io_port_base for iomap(), but we don't reserve any low addresses for
- * use with I/O ports.
- */
-
-#define HAVE_ARCH_PIO_SIZE
-#define PIO_OFFSET mips_io_port_base
-#define PIO_MASK IO_SPACE_LIMIT
-#define PIO_RESERVED 0x0UL
-
-/*
* Enforce in-order execution of data I/O. In the MIPS architecture
* these are equivalent to corresponding platform-specific memory
* barriers defined in <asm/barrier.h>. API pinched from PowerPC,
@@ -397,8 +386,8 @@ static inline void writes##bwlq(volatile void __iomem *mem, \
} \
} \
\
-static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
- unsigned int count) \
+static inline void reads##bwlq(const volatile void __iomem *mem, \
+ void *addr, unsigned int count) \
{ \
volatile type *__addr = addr; \
\
@@ -555,6 +544,16 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
void __ioread64_copy(void *to, const void __iomem *from, size_t count);
+#if defined(CONFIG_PCI) && defined(CONFIG_PCI_DRIVERS_LEGACY)
+struct pci_dev;
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
+#define pci_iounmap pci_iounmap
+#endif
+
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *)mips_io_port_base)
+#endif
+
#include <asm-generic/io.h>
static inline void *isa_bus_to_virt(unsigned long address)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f7222eb594ea..c14b10821817 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -886,7 +886,6 @@ extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/mips/include/asm/mach-loongson64/spaces.h b/arch/mips/include/asm/mach-loongson64/spaces.h
index ce04e998a37b..dbd26db5f2c5 100644
--- a/arch/mips/include/asm/mach-loongson64/spaces.h
+++ b/arch/mips/include/asm/mach-loongson64/spaces.h
@@ -7,9 +7,10 @@
#endif /* CONFIG_64BIT */
/* Skip 128k to trap NULL pointer dereferences */
-#define PCI_IOBASE _AC(0xc000000000000000 + SZ_128K, UL)
+#define PCI_PORT_BASE _AC(0xc000000000000000 + SZ_128K, UL)
+#define PCI_IOBASE (void __iomem *)PCI_PORT_BASE
#define PCI_IOSIZE SZ_16M
-#define MAP_BASE (PCI_IOBASE + PCI_IOSIZE)
+#define MAP_BASE (PCI_PORT_BASE + PCI_IOSIZE)
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
index a9f0570d0f04..a63d106c89c6 100644
--- a/arch/mips/include/asm/mach-ralink/spaces.h
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -2,7 +2,7 @@
#ifndef __ASM_MACH_RALINK_SPACES_H_
#define __ASM_MACH_RALINK_SPACES_H_
-#define PCI_IOBASE mips_io_port_base
+#define PCI_IOBASE (void __iomem *)mips_io_port_base
#define PCI_IOSIZE SZ_64K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
index 44a45f3fa4b0..fd32baa30e17 100644
--- a/arch/mips/include/asm/vdso/gettimeofday.h
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -167,7 +167,7 @@ static __always_inline u64 read_r4k_count(void)
#ifdef CONFIG_CLKSRC_MIPS_GIC
-static __always_inline u64 read_gic_count(const struct vdso_data *data)
+static __always_inline u64 read_gic_count(const struct vdso_time_data *data)
{
void __iomem *gic = get_gic(data);
u32 hi, hi2, lo;
@@ -184,7 +184,7 @@ static __always_inline u64 read_gic_count(const struct vdso_data *data)
#endif
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
#ifdef CONFIG_CSRC_R4K
if (clock_mode == VDSO_CLOCKMODE_R4K)
@@ -209,10 +209,11 @@ static inline bool mips_vdso_hres_capable(void)
}
#define __arch_vdso_hres_capable mips_vdso_hres_capable
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- return get_vdso_data();
+ return get_vdso_time_data();
}
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h
index 6cd88191fefa..acd0efcd3d93 100644
--- a/arch/mips/include/asm/vdso/vdso.h
+++ b/arch/mips/include/asm/vdso/vdso.h
@@ -5,16 +5,18 @@
*/
#include <asm/sgidefs.h>
+#include <vdso/page.h>
+
+#define __VDSO_PAGES 4
#ifndef __ASSEMBLY__
#include <asm/asm.h>
-#include <asm/page.h>
#include <asm/vdso.h>
-static inline unsigned long get_vdso_base(void)
+static inline const struct vdso_time_data *get_vdso_time_data(void)
{
- unsigned long addr;
+ const struct vdso_time_data *addr;
/*
* We can't use cpu_has_mips_r6 since it needs the cpu_data[]
@@ -27,7 +29,7 @@ static inline unsigned long get_vdso_base(void)
* We can't use addiupc because there is no label-label
* support for the addiupc reloc
*/
- __asm__("lapc %0, _start \n"
+ __asm__("lapc %0, vdso_u_time_data \n"
: "=r" (addr) : :);
#else
/*
@@ -46,7 +48,7 @@ static inline unsigned long get_vdso_base(void)
" .set noreorder \n"
" bal 1f \n"
" nop \n"
- " .word _start - . \n"
+ " .word vdso_u_time_data - . \n"
"1: lw %0, 0($31) \n"
" " STR(PTR_ADDU) " %0, $31, %0 \n"
" .set pop \n"
@@ -58,14 +60,9 @@ static inline unsigned long get_vdso_base(void)
return addr;
}
-static inline const struct vdso_data *get_vdso_data(void)
-{
- return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
-}
-
#ifdef CONFIG_CLKSRC_MIPS_GIC
-static inline void __iomem *get_gic(const struct vdso_data *data)
+static inline void __iomem *get_gic(const struct vdso_time_data *data)
{
return (void __iomem *)((unsigned long)data & PAGE_MASK) - PAGE_SIZE;
}
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
index a4582870aaea..2b1debb62dee 100644
--- a/arch/mips/include/asm/vdso/vsyscall.h
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -2,22 +2,12 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
+#include <asm/page.h>
+
#ifndef __ASSEMBLY__
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-static __always_inline
-struct vdso_data *__mips_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __mips_get_k_vdso_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 61503a36067e..f7107479c7fa 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1326,24 +1326,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs)
return -1;
}
-#ifdef CONFIG_SECCOMP
- if (unlikely(test_thread_flag(TIF_SECCOMP))) {
- int ret, i;
- struct seccomp_data sd;
- unsigned long args[6];
-
- sd.nr = current_thread_info()->syscall;
- sd.arch = syscall_get_arch(current);
- syscall_get_arguments(current, regs, args);
- for (i = 0; i < 6; i++)
- sd.args[i] = args[i];
- sd.instruction_pointer = KSTK_EIP(current);
-
- ret = __secure_computing(&sd);
- if (ret == -1)
- return ret;
- }
-#endif
+ if (secure_computing())
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 0b9b7e25b69a..aa70e371bb54 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -405,3 +405,4 @@
464 n32 getxattrat sys_getxattrat
465 n32 listxattrat sys_listxattrat
466 n32 removexattrat sys_removexattrat
+467 n32 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index c844cd5cda62..1e8c44c7b614 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -381,3 +381,4 @@
464 n64 getxattrat sys_getxattrat
465 n64 listxattrat sys_listxattrat
466 n64 removexattrat sys_removexattrat
+467 n64 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 349b8aad1159..114a5a1a6230 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -454,3 +454,4 @@
464 o32 getxattrat sys_getxattrat
465 o32 listxattrat sys_listxattrat
466 o32 removexattrat sys_removexattrat
+467 o32 open_tree_attr sys_open_tree_attr
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 75c9d3618f58..de096777172f 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/vdso_datastore.h>
#include <asm/abi.h>
#include <asm/mips-cps.h>
@@ -23,20 +24,7 @@
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
-/* Kernel-provided data used by the VDSO. */
-static union vdso_data_store mips_vdso_data __page_aligned_data;
-struct vdso_data *vdso_data = mips_vdso_data.data;
-
-/*
- * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
- * what we map and where within the area they are mapped is determined at
- * runtime.
- */
-static struct page *no_pages[] = { NULL };
-static struct vm_special_mapping vdso_vvar_mapping = {
- .name = "[vvar]",
- .pages = no_pages,
-};
+static_assert(VDSO_NR_PAGES == __VDSO_PAGES);
static void __init init_vdso_image(struct mips_vdso_image *image)
{
@@ -90,7 +78,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
- unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
+ unsigned long gic_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
struct vm_area_struct *vma;
int ret;
@@ -119,8 +107,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* the counter registers at the start.
*/
gic_size = mips_gic_present() ? PAGE_SIZE : 0;
- vvar_size = gic_size + PAGE_SIZE;
- size = vvar_size + image->size;
+ size = gic_size + VDSO_NR_PAGES * PAGE_SIZE + image->size;
/*
* Find a region that's large enough for us to perform the
@@ -143,15 +130,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
*/
if (cpu_has_dc_aliases) {
base = __ALIGN_MASK(base, shm_align_mask);
- base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
+ base += ((unsigned long)vdso_k_time_data - gic_size) & shm_align_mask;
}
data_addr = base + gic_size;
- vdso_addr = data_addr + PAGE_SIZE;
+ vdso_addr = data_addr + VDSO_NR_PAGES * PAGE_SIZE;
- vma = _install_special_mapping(mm, base, vvar_size,
- VM_READ | VM_MAYREAD,
- &vdso_vvar_mapping);
+ vma = vdso_install_vvar_mapping(mm, data_addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
@@ -161,6 +146,17 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (gic_size) {
gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
gic_pfn = PFN_DOWN(__pa(gic_base));
+ static const struct vm_special_mapping gic_mapping = {
+ .name = "[gic]",
+ .pages = (struct page **) { NULL },
+ };
+
+ vma = _install_special_mapping(mm, base, gic_size, VM_READ | VM_MAYREAD,
+ &gic_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
@@ -168,13 +164,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto out;
}
- /* Map data page. */
- ret = remap_pfn_range(vma, data_addr,
- virt_to_phys(vdso_data) >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot);
- if (ret)
- goto out;
-
/* Map VDSO image. */
vma = _install_special_mapping(mm, vdso_addr, image->size,
VM_READ | VM_EXEC |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 60b43ea85c12..cef3c423a41a 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -288,9 +288,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
- hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+ hrtimer_setup(&vcpu->arch.comparecount_timer, kvm_mips_comparecount_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate space for host mode exception handlers that handle
diff --git a/arch/mips/lib/crc32-mips.c b/arch/mips/lib/crc32-mips.c
index 083e5d693a16..676a4b3e290b 100644
--- a/arch/mips/lib/crc32-mips.c
+++ b/arch/mips/lib/crc32-mips.c
@@ -16,15 +16,6 @@
#include <asm/mipsregs.h>
#include <linux/unaligned.h>
-enum crc_op_size {
- b, h, w, d,
-};
-
-enum crc_type {
- crc32,
- crc32c,
-};
-
#ifndef TOOLCHAIN_SUPPORTS_CRC
#define _ASM_SET_CRC(OP, SZ, TYPE) \
_ASM_MACRO_3R(OP, rt, rs, rt2, \
@@ -117,10 +108,10 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_64BIT)) {
for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
@@ -158,7 +149,7 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
}
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index a9cb28813f0b..1b7ce19fb3bb 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -43,4 +43,14 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
return (void __iomem *) (ctrl->io_map_base + port);
}
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ struct pci_controller *ctrl = dev->bus->sysdata;
+ void __iomem *base = (void __iomem *)ctrl->io_map_base;
+
+ if (addr < base || addr > (base + resource_size(ctrl->io_resource)))
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
#endif /* CONFIG_PCI_DRIVERS_LEGACY */
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index a35dd7311795..b9f90f33fc9a 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -128,7 +128,7 @@ void __init prom_init(void)
}
/* init base address of io space */
- set_io_port_base(PCI_IOBASE);
+ set_io_port_base((unsigned long)PCI_IOBASE);
if (loongson_sysconf.early_config)
loongson_sysconf.early_config();
@@ -178,7 +178,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
return -EINVAL;
}
- vaddr = PCI_IOBASE + range->io_start;
+ vaddr = (unsigned long)PCI_IOBASE + range->io_start;
vmap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index b289b2c1b294..fb4c493aaffa 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -2,7 +2,7 @@
# Objects to go into the VDSO.
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso-y := elf.o vgettimeofday.o sigreturn.o
diff --git a/arch/mips/vdso/vdso.lds.S b/arch/mips/vdso/vdso.lds.S
index 836465e3bcb8..c8bbe56d89cb 100644
--- a/arch/mips/vdso/vdso.lds.S
+++ b/arch/mips/vdso/vdso.lds.S
@@ -5,6 +5,8 @@
*/
#include <asm/sgidefs.h>
+#include <asm/vdso/vdso.h>
+#include <vdso/datapage.h>
#if _MIPS_SIM == _MIPS_SIM_ABI64
OUTPUT_FORMAT("elf64-tradlittlemips", "elf64-tradbigmips", "elf64-tradlittlemips")
@@ -18,7 +20,8 @@ OUTPUT_ARCH(mips)
SECTIONS
{
- PROVIDE(_start = .);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
/*
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 19a804860ed5..44197d30b8af 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -268,7 +268,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
CONFIG_CONFIGFS_FS=y
-CONFIG_SYSV_FS=y
CONFIG_NFS_FS=m
CONFIG_NFS_V4=m
CONFIG_NFS_V4_1=y
@@ -294,7 +293,6 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
-CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 3143cf29ce27..59d85d2386bb 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -227,36 +227,54 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL)))
#ifdef CONFIG_64BIT
-#define ioread64 ioread64
-#define ioread64be ioread64be
-#define iowrite64 iowrite64
-#define iowrite64be iowrite64be
extern u64 ioread64(const void __iomem *addr);
extern u64 ioread64be(const void __iomem *addr);
+#define ioread64 ioread64
+#define ioread64be ioread64be
+
extern void iowrite64(u64 val, void __iomem *addr);
extern void iowrite64be(u64 val, void __iomem *addr);
+#define iowrite64 iowrite64
+#define iowrite64be iowrite64be
#endif
-#include <asm-generic/iomap.h>
-/*
- * These get provided from <asm-generic/iomap.h> since parisc does not
- * select GENERIC_IOMAP.
- */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
#define ioport_map ioport_map
#define ioport_unmap ioport_unmap
+
+extern unsigned int ioread8(const void __iomem *);
+extern unsigned int ioread16(const void __iomem *);
+extern unsigned int ioread16be(const void __iomem *);
+extern unsigned int ioread32(const void __iomem *);
+extern unsigned int ioread32be(const void __iomem *);
#define ioread8 ioread8
#define ioread16 ioread16
#define ioread32 ioread32
#define ioread16be ioread16be
#define ioread32be ioread32be
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite16be(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite32be(u32, void __iomem *);
#define iowrite8 iowrite8
#define iowrite16 iowrite16
#define iowrite32 iowrite32
#define iowrite16be iowrite16be
#define iowrite32be iowrite32be
+
+extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
#define iowrite8_rep iowrite8_rep
#define iowrite16_rep iowrite16_rep
#define iowrite32_rep iowrite32_rep
diff --git a/arch/parisc/include/asm/vdso.h b/arch/parisc/include/asm/vdso.h
index 2a2dc11b5545..5f581c1d6460 100644
--- a/arch/parisc/include/asm/vdso.h
+++ b/arch/parisc/include/asm/vdso.h
@@ -12,8 +12,6 @@
#define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
#define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
-extern struct vdso_data *vdso_data;
-
#endif /* __ASSEMBLY __ */
/* Default link addresses for the vDSOs */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index aa9cd4b951fe..96831c988606 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -132,16 +132,16 @@
#define SO_PASSPIDFD 0x404A
#define SO_PEERPIDFD 0x404B
-#define SO_DEVMEM_LINEAR 78
-#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
-#define SO_DEVMEM_DMABUF 79
-#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
-#define SO_DEVMEM_DONTNEED 80
-
#define SCM_TS_OPT_ID 0x404C
#define SO_RCVPRIORITY 0x404D
+#define SO_DEVMEM_LINEAR 0x404E
+#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR
+#define SO_DEVMEM_DMABUF 0x404F
+#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF
+#define SO_DEVMEM_DONTNEED 0x4050
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index d9fc94c86965..94df3cb957e9 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -465,3 +465,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 288f8b85978f..4ee8d17da229 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -1,5 +1,5 @@
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
KCOV_INSTRUMENT := n
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index bc5d9553f311..c63f4069170f 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -1,5 +1,5 @@
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
KCOV_INSTRUMENT := n
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 424f188e62d9..b5630f8ad436 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -159,6 +159,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UBSAN
+ select ARCH_HAS_VDSO_ARCH_DATA
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAVE_EXTRA_ELF_NOTES if SPU_BASE
select ARCH_KEEP_MEMBLOCK
@@ -209,6 +210,7 @@ config PPC
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select HAS_IOPORT if PCI
select HAVE_ARCH_AUDITSYSCALL
@@ -286,6 +288,7 @@ config PPC
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,$(m32-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 -mstack-protector-guard-offset=0)
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,$(m64-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 -mstack-protector-guard-offset=0)
select HAVE_STATIC_CALL if PPC32
+ select HAVE_STATIC_CALL_INLINE if PPC32
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_GEN
@@ -411,12 +414,9 @@ config ARCH_HAS_ADD_PAGES
config PPC_DCR_NATIVE
bool
-config PPC_DCR_MMIO
- bool
-
config PPC_DCR
bool
- depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
+ depends on PPC_DCR_NATIVE
default y
config PPC_PCI_OF_BUS_MAP
@@ -442,11 +442,6 @@ config PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
PCI domain dependent and each PCI controller on own domain can have
256 PCI buses, like it is on other Linux architectures.
-config PPC_OF_PLATFORM_PCI
- bool
- depends on PCI
- depends on PPC64 # not supported on 32 bits yet
-
config ARCH_SUPPORTS_UPROBES
def_bool y
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 20d05605fa83..f15e5920080b 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -216,13 +216,6 @@ config PPC_EARLY_DEBUG_RTAS_PANEL
help
Select this to enable early debugging via the RTAS panel.
-config PPC_EARLY_DEBUG_RTAS_CONSOLE
- bool "RTAS Console"
- depends on PPC_RTAS
- select UDBG_RTAS_CONSOLE
- help
- Select this to enable early debugging via the RTAS console.
-
config PPC_EARLY_DEBUG_PAS_REALMODE
bool "PA Semi real mode"
depends on PPC_PASEMI
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 1ff6ad4f6cd2..184d0680e661 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -173,7 +173,6 @@ src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
-src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
@@ -276,7 +275,6 @@ quiet_cmd_wrap = WRAP $@
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
image-$(CONFIG_PPC_POWERNV) += zImage.pseries
-image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
index 269e930b3b0b..c4e4d2a9b460 100644
--- a/arch/powerpc/boot/dts/microwatt.dts
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -1,4 +1,5 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
/ {
#size-cells = <0x02>;
@@ -8,6 +9,7 @@
aliases {
serial0 = &UART0;
+ ethernet = &enet0;
};
reserved-memory {
@@ -35,40 +37,79 @@
ibm,powerpc-cpu-features {
display-name = "Microwatt";
- isa = <3000>;
+ isa = <3010>;
device_type = "cpu-features";
compatible = "ibm,powerpc-cpu-features";
mmu-radix {
isa = <3000>;
- usable-privilege = <2>;
+ usable-privilege = <6>;
+ os-support = <0>;
};
little-endian {
- isa = <2050>;
- usable-privilege = <3>;
+ isa = <0>;
+ usable-privilege = <7>;
+ os-support = <0>;
hwcap-bit-nr = <1>;
};
cache-inhibited-large-page {
- isa = <2040>;
- usable-privilege = <2>;
+ isa = <0>;
+ usable-privilege = <6>;
+ os-support = <0>;
};
fixed-point-v3 {
isa = <3000>;
- usable-privilege = <3>;
+ usable-privilege = <7>;
};
no-execute {
- isa = <2010>;
+ isa = <0x00>;
usable-privilege = <2>;
+ os-support = <0>;
};
floating-point {
+ hfscr-bit-nr = <0>;
hwcap-bit-nr = <27>;
isa = <0>;
- usable-privilege = <3>;
+ usable-privilege = <7>;
+ hv-support = <1>;
+ os-support = <0>;
+ };
+
+ prefixed-instructions {
+ hfscr-bit-nr = <13>;
+ fscr-bit-nr = <13>;
+ isa = <3010>;
+ usable-privilege = <7>;
+ os-support = <1>;
+ hv-support = <1>;
+ };
+
+ tar {
+ hfscr-bit-nr = <8>;
+ fscr-bit-nr = <8>;
+ isa = <2070>;
+ usable-privilege = <7>;
+ os-support = <1>;
+ hv-support = <1>;
+ hwcap-bit-nr = <58>;
+ };
+
+ control-register {
+ isa = <0>;
+ usable-privilege = <7>;
+ };
+
+ system-call-vectored {
+ isa = <3000>;
+ usable-privilege = <7>;
+ os-support = <1>;
+ fscr-bit-nr = <12>;
+ hwcap-bit-nr = <52>;
};
};
@@ -101,6 +142,36 @@
ibm,mmu-lpid-bits = <12>;
ibm,mmu-pid-bits = <20>;
};
+
+ PowerPC,Microwatt@1 {
+ i-cache-sets = <2>;
+ ibm,dec-bits = <64>;
+ reservation-granule-size = <64>;
+ clock-frequency = <100000000>;
+ timebase-frequency = <100000000>;
+ i-tlb-sets = <1>;
+ ibm,ppc-interrupt-server#s = <1>;
+ i-cache-block-size = <64>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <2>;
+ i-tlb-size = <64>;
+ cpu-version = <0x990000>;
+ status = "okay";
+ i-cache-size = <0x1000>;
+ ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
+ tlb-size = <0>;
+ tlb-sets = <0>;
+ device_type = "cpu";
+ d-tlb-size = <128>;
+ d-tlb-sets = <2>;
+ reg = <1>;
+ general-purpose;
+ 64-bit;
+ d-cache-size = <0x1000>;
+ ibm,chip-id = <0>;
+ ibm,mmu-lpid-bits = <12>;
+ ibm,mmu-pid-bits = <20>;
+ };
};
soc@c0000000 {
@@ -113,8 +184,8 @@
interrupt-controller@4000 {
compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
- ibm,interrupt-server-ranges = <0x0 0x1>;
- reg = <0x4000 0x100>;
+ ibm,interrupt-server-ranges = <0x0 0x2>;
+ reg = <0x4000 0x10 0x4010 0x10>;
};
ICS: interrupt-controller@5000 {
@@ -138,7 +209,18 @@
interrupts = <0x10 0x1>;
};
- ethernet@8020000 {
+ gpio: gpio@7000 {
+ device_type = "gpio";
+ compatible = "faraday,ftgpio010";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x7000 0x80>;
+ interrupts = <0x14 1>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ enet0: ethernet@8020000 {
compatible = "litex,liteeth";
reg = <0x8021000 0x100
0x8020800 0x100
@@ -160,7 +242,6 @@
reg-names = "phy", "core", "reader", "writer", "irq";
bus-width = <4>;
interrupts = <0x13 1>;
- cap-sd-highspeed;
clocks = <&sys_clk>;
};
};
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index da6fc203e2dc..6f58ee1edf1f 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -223,7 +223,6 @@ CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_MD5=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index 97f4d4851735..3c6445c98a85 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -44,7 +44,6 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index b33f0034990c..3347192b77b8 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -25,7 +25,6 @@ CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
-CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_RTAS_FLASH=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -133,7 +132,6 @@ CONFIG_SKGE=m
CONFIG_SKY2=m
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
-CONFIG_SPIDER_NET=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO_I8042 is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 50cc59eb36cf..354180ab94bc 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -47,7 +47,6 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 3009b0efaf34..d6d2a458847b 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -112,7 +112,6 @@ CONFIG_QNX4FS_FS=m
CONFIG_RCU_TRACE=y
CONFIG_RESET_CONTROLLER=y
CONFIG_ROOT_NFS=y
-CONFIG_SYSV_FS=m
CONFIG_SYSVIPC=y
CONFIG_TMPFS=y
CONFIG_UBIFS_FS=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 6f449411abf7..a0d27c59ea78 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -39,4 +39,3 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_CCITT=y
-CONFIG_CRC32_SLICEBY4=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 77306be62e9e..89da51d724fb 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -70,7 +70,6 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_DEV_TALITOS=y
-CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 465eb96c755e..5fa154185efa 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -51,7 +51,6 @@ CONFIG_PS3_DISK=m
CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
-CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_RTAS_FLASH=m
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -228,7 +227,6 @@ CONFIG_NETXEN_NIC=m
CONFIG_SUNGEM=y
CONFIG_GELIC_NET=m
CONFIG_GELIC_WIRELESS=y
-CONFIG_SPIDER_NET=m
CONFIG_BROADCOM_PHY=m
CONFIG_MARVELL_PHY=y
CONFIG_PPP=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index ca0c90e95837..a91a766b71a4 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -485,7 +485,6 @@ CONFIG_VIA_VELOCITY=m
CONFIG_PCMCIA_XIRC2PS=m
CONFIG_FDDI=y
CONFIG_SKFP=m
-CONFIG_NET_SB1000=m
CONFIG_BROADCOM_PHY=m
CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
@@ -986,7 +985,6 @@ CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
CONFIG_QNX4FS_FS=m
CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 9d44e6630908..1eb446452fc0 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -281,7 +281,6 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC16=y
CONFIG_CRC_ITU_T=y
-CONFIG_LIBCRC32C=y
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index 383c0966e92f..425f10837a18 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -54,7 +54,6 @@ CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 9b38f4a7bc15..2f00b22b0823 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -51,3 +51,4 @@ $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
+OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
diff --git a/arch/powerpc/include/asm/cell-pmu.h b/arch/powerpc/include/asm/cell-pmu.h
index 6a79b5d1c44f..7fbefd64b4fb 100644
--- a/arch/powerpc/include/asm/cell-pmu.h
+++ b/arch/powerpc/include/asm/cell-pmu.h
@@ -20,36 +20,9 @@
/* Macros for the pm_control register. */
#define CBE_PM_16BIT_CTR(ctr) (1 << (24 - ((ctr) & (NR_PHYS_CTRS - 1))))
-#define CBE_PM_ENABLE_PERF_MON 0x80000000
-#define CBE_PM_STOP_AT_MAX 0x40000000
-#define CBE_PM_TRACE_MODE_GET(pm_control) (((pm_control) >> 28) & 0x3)
-#define CBE_PM_TRACE_MODE_SET(mode) (((mode) & 0x3) << 28)
-#define CBE_PM_TRACE_BUF_OVFLW(bit) (((bit) & 0x1) << 17)
-#define CBE_PM_COUNT_MODE_SET(count) (((count) & 0x3) << 18)
-#define CBE_PM_FREEZE_ALL_CTRS 0x00100000
-#define CBE_PM_ENABLE_EXT_TRACE 0x00008000
-#define CBE_PM_SPU_ADDR_TRACE_SET(msk) (((msk) & 0x3) << 9)
/* Macros for the trace_address register. */
-#define CBE_PM_TRACE_BUF_FULL 0x00000800
#define CBE_PM_TRACE_BUF_EMPTY 0x00000400
-#define CBE_PM_TRACE_BUF_DATA_COUNT(ta) ((ta) & 0x3ff)
-#define CBE_PM_TRACE_BUF_MAX_COUNT 0x400
-
-/* Macros for the pm07_control registers. */
-#define CBE_PM_CTR_INPUT_MUX(pm07_control) (((pm07_control) >> 26) & 0x3f)
-#define CBE_PM_CTR_INPUT_CONTROL 0x02000000
-#define CBE_PM_CTR_POLARITY 0x01000000
-#define CBE_PM_CTR_COUNT_CYCLES 0x00800000
-#define CBE_PM_CTR_ENABLE 0x00400000
-#define PM07_CTR_INPUT_MUX(x) (((x) & 0x3F) << 26)
-#define PM07_CTR_INPUT_CONTROL(x) (((x) & 1) << 25)
-#define PM07_CTR_POLARITY(x) (((x) & 1) << 24)
-#define PM07_CTR_COUNT_CYCLES(x) (((x) & 1) << 23)
-#define PM07_CTR_ENABLE(x) (((x) & 1) << 22)
-
-/* Macros for the pm_status register. */
-#define CBE_PM_CTR_OVERFLOW_INTR(ctr) (1 << (31 - ((ctr) & 7)))
enum pm_reg_name {
group_control,
@@ -62,33 +35,4 @@ enum pm_reg_name {
pm_start_stop,
};
-/* Routines for reading/writing the PMU registers. */
-extern u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr);
-extern void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val);
-extern u32 cbe_read_ctr(u32 cpu, u32 ctr);
-extern void cbe_write_ctr(u32 cpu, u32 ctr, u32 val);
-
-extern u32 cbe_read_pm07_control(u32 cpu, u32 ctr);
-extern void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val);
-extern u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg);
-extern void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val);
-
-extern u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr);
-extern void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size);
-
-extern void cbe_enable_pm(u32 cpu);
-extern void cbe_disable_pm(u32 cpu);
-
-extern void cbe_read_trace_buffer(u32 cpu, u64 *buf);
-
-extern void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask);
-extern void cbe_disable_pm_interrupts(u32 cpu);
-extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu);
-extern void cbe_sync_irq(int node);
-
-#define CBE_COUNT_SUPERVISOR_MODE 0
-#define CBE_COUNT_HYPERVISOR_MODE 1
-#define CBE_COUNT_PROBLEM_MODE 2
-#define CBE_COUNT_ALL_MODES 3
-
#endif /* __ASM_CELL_PMU_H__ */
diff --git a/arch/powerpc/include/asm/cell-regs.h b/arch/powerpc/include/asm/cell-regs.h
index e1c431ef30e0..20f7339a3d4a 100644
--- a/arch/powerpc/include/asm/cell-regs.h
+++ b/arch/powerpc/include/asm/cell-regs.h
@@ -18,293 +18,6 @@
#include <asm/cell-pmu.h>
-/*
- *
- * Some HID register definitions
- *
- */
-
-/* CBE specific HID0 bits */
-#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul
-#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul
-#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul
-#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul
-
-#define MAX_CBE 2
-
-/*
- *
- * Pervasive unit register definitions
- *
- */
-
-union spe_reg {
- u64 val;
- u8 spe[8];
-};
-
-union ppe_spe_reg {
- u64 val;
- struct {
- u32 ppe;
- u32 spe;
- };
-};
-
-
-struct cbe_pmd_regs {
- /* Debug Bus Control */
- u64 pad_0x0000; /* 0x0000 */
-
- u64 group_control; /* 0x0008 */
-
- u8 pad_0x0010_0x00a8 [0x00a8 - 0x0010]; /* 0x0010 */
-
- u64 debug_bus_control; /* 0x00a8 */
-
- u8 pad_0x00b0_0x0100 [0x0100 - 0x00b0]; /* 0x00b0 */
-
- u64 trace_aux_data; /* 0x0100 */
- u64 trace_buffer_0_63; /* 0x0108 */
- u64 trace_buffer_64_127; /* 0x0110 */
- u64 trace_address; /* 0x0118 */
- u64 ext_tr_timer; /* 0x0120 */
-
- u8 pad_0x0128_0x0400 [0x0400 - 0x0128]; /* 0x0128 */
-
- /* Performance Monitor */
- u64 pm_status; /* 0x0400 */
- u64 pm_control; /* 0x0408 */
- u64 pm_interval; /* 0x0410 */
- u64 pm_ctr[4]; /* 0x0418 */
- u64 pm_start_stop; /* 0x0438 */
- u64 pm07_control[8]; /* 0x0440 */
-
- u8 pad_0x0480_0x0800 [0x0800 - 0x0480]; /* 0x0480 */
-
- /* Thermal Sensor Registers */
- union spe_reg ts_ctsr1; /* 0x0800 */
- u64 ts_ctsr2; /* 0x0808 */
- union spe_reg ts_mtsr1; /* 0x0810 */
- u64 ts_mtsr2; /* 0x0818 */
- union spe_reg ts_itr1; /* 0x0820 */
- u64 ts_itr2; /* 0x0828 */
- u64 ts_gitr; /* 0x0830 */
- u64 ts_isr; /* 0x0838 */
- u64 ts_imr; /* 0x0840 */
- union spe_reg tm_cr1; /* 0x0848 */
- u64 tm_cr2; /* 0x0850 */
- u64 tm_simr; /* 0x0858 */
- union ppe_spe_reg tm_tpr; /* 0x0860 */
- union spe_reg tm_str1; /* 0x0868 */
- u64 tm_str2; /* 0x0870 */
- union ppe_spe_reg tm_tsr; /* 0x0878 */
-
- /* Power Management */
- u64 pmcr; /* 0x0880 */
-#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000
- u64 pmsr; /* 0x0888 */
-
- /* Time Base Register */
- u64 tbr; /* 0x0890 */
-
- u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */
-
- /* Fault Isolation Registers */
- u64 checkstop_fir; /* 0x0c00 */
- u64 recoverable_fir; /* 0x0c08 */
- u64 spec_att_mchk_fir; /* 0x0c10 */
- u32 fir_mode_reg; /* 0x0c18 */
- u8 pad_0x0c1c_0x0c20 [4]; /* 0x0c1c */
-#define CBE_PMD_FIR_MODE_M8 0x00800
- u64 fir_enable_mask; /* 0x0c20 */
-
- u8 pad_0x0c28_0x0ca8 [0x0ca8 - 0x0c28]; /* 0x0c28 */
- u64 ras_esc_0; /* 0x0ca8 */
- u8 pad_0x0cb0_0x1000 [0x1000 - 0x0cb0]; /* 0x0cb0 */
-};
-
-extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np);
-extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu);
-
-/*
- * PMU shadow registers
- *
- * Many of the registers in the performance monitoring unit are write-only,
- * so we need to save a copy of what we write to those registers.
- *
- * The actual data counters are read/write. However, writing to the counters
- * only takes effect if the PMU is enabled. Otherwise the value is stored in
- * a hardware latch until the next time the PMU is enabled. So we save a copy
- * of the counter values if we need to read them back while the PMU is
- * disabled. The counter_value_in_latch field is a bitmap indicating which
- * counters currently have a value waiting to be written.
- */
-
-struct cbe_pmd_shadow_regs {
- u32 group_control;
- u32 debug_bus_control;
- u32 trace_address;
- u32 ext_tr_timer;
- u32 pm_status;
- u32 pm_control;
- u32 pm_interval;
- u32 pm_start_stop;
- u32 pm07_control[NR_CTRS];
-
- u32 pm_ctr[NR_PHYS_CTRS];
- u32 counter_value_in_latch;
-};
-
-extern struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np);
-extern struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu);
-
-/*
- *
- * IIC unit register definitions
- *
- */
-
-struct cbe_iic_pending_bits {
- u32 data;
- u8 flags;
- u8 class;
- u8 source;
- u8 prio;
-};
-
-#define CBE_IIC_IRQ_VALID 0x80
-#define CBE_IIC_IRQ_IPI 0x40
-
-struct cbe_iic_thread_regs {
- struct cbe_iic_pending_bits pending;
- struct cbe_iic_pending_bits pending_destr;
- u64 generate;
- u64 prio;
-};
-
-struct cbe_iic_regs {
- u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */
-
- /* IIC interrupt registers */
- struct cbe_iic_thread_regs thread[2]; /* 0x0400 */
-
- u64 iic_ir; /* 0x0440 */
-#define CBE_IIC_IR_PRIO(x) (((x) & 0xf) << 12)
-#define CBE_IIC_IR_DEST_NODE(x) (((x) & 0xf) << 4)
-#define CBE_IIC_IR_DEST_UNIT(x) ((x) & 0xf)
-#define CBE_IIC_IR_IOC_0 0x0
-#define CBE_IIC_IR_IOC_1S 0xb
-#define CBE_IIC_IR_PT_0 0xe
-#define CBE_IIC_IR_PT_1 0xf
-
- u64 iic_is; /* 0x0448 */
-#define CBE_IIC_IS_PMI 0x2
-
- u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */
-
- /* IOC FIR */
- u64 ioc_fir_reset; /* 0x0500 */
- u64 ioc_fir_set; /* 0x0508 */
- u64 ioc_checkstop_enable; /* 0x0510 */
- u64 ioc_fir_error_mask; /* 0x0518 */
- u64 ioc_syserr_enable; /* 0x0520 */
- u64 ioc_fir; /* 0x0528 */
-
- u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */
-};
-
-extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np);
-extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu);
-
-
-struct cbe_mic_tm_regs {
- u8 pad_0x0000_0x0040[0x0040 - 0x0000]; /* 0x0000 */
-
- u64 mic_ctl_cnfg2; /* 0x0040 */
-#define CBE_MIC_ENABLE_AUX_TRC 0x8000000000000000LL
-#define CBE_MIC_DISABLE_PWR_SAV_2 0x0200000000000000LL
-#define CBE_MIC_DISABLE_AUX_TRC_WRAP 0x0100000000000000LL
-#define CBE_MIC_ENABLE_AUX_TRC_INT 0x0080000000000000LL
-
- u64 pad_0x0048; /* 0x0048 */
-
- u64 mic_aux_trc_base; /* 0x0050 */
- u64 mic_aux_trc_max_addr; /* 0x0058 */
- u64 mic_aux_trc_cur_addr; /* 0x0060 */
- u64 mic_aux_trc_grf_addr; /* 0x0068 */
- u64 mic_aux_trc_grf_data; /* 0x0070 */
-
- u64 pad_0x0078; /* 0x0078 */
-
- u64 mic_ctl_cnfg_0; /* 0x0080 */
-#define CBE_MIC_DISABLE_PWR_SAV_0 0x8000000000000000LL
-
- u64 pad_0x0088; /* 0x0088 */
-
- u64 slow_fast_timer_0; /* 0x0090 */
- u64 slow_next_timer_0; /* 0x0098 */
-
- u8 pad_0x00a0_0x00f8[0x00f8 - 0x00a0]; /* 0x00a0 */
- u64 mic_df_ecc_address_0; /* 0x00f8 */
-
- u8 pad_0x0100_0x01b8[0x01b8 - 0x0100]; /* 0x0100 */
- u64 mic_df_ecc_address_1; /* 0x01b8 */
-
- u64 mic_ctl_cnfg_1; /* 0x01c0 */
-#define CBE_MIC_DISABLE_PWR_SAV_1 0x8000000000000000LL
-
- u64 pad_0x01c8; /* 0x01c8 */
-
- u64 slow_fast_timer_1; /* 0x01d0 */
- u64 slow_next_timer_1; /* 0x01d8 */
-
- u8 pad_0x01e0_0x0208[0x0208 - 0x01e0]; /* 0x01e0 */
- u64 mic_exc; /* 0x0208 */
-#define CBE_MIC_EXC_BLOCK_SCRUB 0x0800000000000000ULL
-#define CBE_MIC_EXC_FAST_SCRUB 0x0100000000000000ULL
-
- u64 mic_mnt_cfg; /* 0x0210 */
-#define CBE_MIC_MNT_CFG_CHAN_0_POP 0x0002000000000000ULL
-#define CBE_MIC_MNT_CFG_CHAN_1_POP 0x0004000000000000ULL
-
- u64 mic_df_config; /* 0x0218 */
-#define CBE_MIC_ECC_DISABLE_0 0x4000000000000000ULL
-#define CBE_MIC_ECC_REP_SINGLE_0 0x2000000000000000ULL
-#define CBE_MIC_ECC_DISABLE_1 0x0080000000000000ULL
-#define CBE_MIC_ECC_REP_SINGLE_1 0x0040000000000000ULL
-
- u8 pad_0x0220_0x0230[0x0230 - 0x0220]; /* 0x0220 */
- u64 mic_fir; /* 0x0230 */
-#define CBE_MIC_FIR_ECC_SINGLE_0_ERR 0x0200000000000000ULL
-#define CBE_MIC_FIR_ECC_MULTI_0_ERR 0x0100000000000000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_1_ERR 0x0080000000000000ULL
-#define CBE_MIC_FIR_ECC_MULTI_1_ERR 0x0040000000000000ULL
-#define CBE_MIC_FIR_ECC_ERR_MASK 0xffff000000000000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_0_CTE 0x0000020000000000ULL
-#define CBE_MIC_FIR_ECC_MULTI_0_CTE 0x0000010000000000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_1_CTE 0x0000008000000000ULL
-#define CBE_MIC_FIR_ECC_MULTI_1_CTE 0x0000004000000000ULL
-#define CBE_MIC_FIR_ECC_CTE_MASK 0x0000ffff00000000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_0_RESET 0x0000000002000000ULL
-#define CBE_MIC_FIR_ECC_MULTI_0_RESET 0x0000000001000000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_1_RESET 0x0000000000800000ULL
-#define CBE_MIC_FIR_ECC_MULTI_1_RESET 0x0000000000400000ULL
-#define CBE_MIC_FIR_ECC_RESET_MASK 0x00000000ffff0000ULL
-#define CBE_MIC_FIR_ECC_SINGLE_0_SET 0x0000000000000200ULL
-#define CBE_MIC_FIR_ECC_MULTI_0_SET 0x0000000000000100ULL
-#define CBE_MIC_FIR_ECC_SINGLE_1_SET 0x0000000000000080ULL
-#define CBE_MIC_FIR_ECC_MULTI_1_SET 0x0000000000000040ULL
-#define CBE_MIC_FIR_ECC_SET_MASK 0x000000000000ffffULL
- u64 mic_fir_debug; /* 0x0238 */
-
- u8 pad_0x0240_0x1000[0x1000 - 0x0240]; /* 0x0240 */
-};
-
-extern struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np);
-extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
-
-
/* Cell page table entries */
#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
@@ -315,13 +28,4 @@ extern struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu);
#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-/* some utility functions to deal with SMT */
-extern u32 cbe_get_hw_thread_id(int cpu);
-extern u32 cbe_cpu_to_node(int cpu);
-extern u32 cbe_node_to_cpu(int node);
-
-/* Init this module early */
-extern void cbe_regs_init(void);
-
-
#endif /* CBE_REGS_H */
diff --git a/arch/powerpc/include/asm/dcr-generic.h b/arch/powerpc/include/asm/dcr-generic.h
deleted file mode 100644
index 099c28dd40b9..000000000000
--- a/arch/powerpc/include/asm/dcr-generic.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- */
-
-#ifndef _ASM_POWERPC_DCR_GENERIC_H
-#define _ASM_POWERPC_DCR_GENERIC_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-enum host_type_t {DCR_HOST_MMIO, DCR_HOST_NATIVE, DCR_HOST_INVALID};
-
-typedef struct {
- enum host_type_t type;
- union {
- dcr_host_mmio_t mmio;
- dcr_host_native_t native;
- } host;
-} dcr_host_t;
-
-extern bool dcr_map_ok_generic(dcr_host_t host);
-
-extern dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n,
- unsigned int dcr_c);
-extern void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c);
-
-extern u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n);
-
-extern void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_DCR_GENERIC_H */
-
-
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
deleted file mode 100644
index fc6d93ef4a13..000000000000
--- a/arch/powerpc/include/asm/dcr-mmio.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- */
-
-#ifndef _ASM_POWERPC_DCR_MMIO_H
-#define _ASM_POWERPC_DCR_MMIO_H
-#ifdef __KERNEL__
-
-#include <asm/io.h>
-
-typedef struct {
- void __iomem *token;
- unsigned int stride;
- unsigned int base;
-} dcr_host_mmio_t;
-
-static inline bool dcr_map_ok_mmio(dcr_host_mmio_t host)
-{
- return host.token != NULL;
-}
-
-extern dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int dcr_c);
-extern void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c);
-
-static inline u32 dcr_read_mmio(dcr_host_mmio_t host, unsigned int dcr_n)
-{
- return in_be32(host.token + ((host.base + dcr_n) * host.stride));
-}
-
-static inline void dcr_write_mmio(dcr_host_mmio_t host,
- unsigned int dcr_n,
- u32 value)
-{
- out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_DCR_MMIO_H */
-
-
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
index 64030e3a1f30..180021cd0b30 100644
--- a/arch/powerpc/include/asm/dcr.h
+++ b/arch/powerpc/include/asm/dcr.h
@@ -10,46 +10,14 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_DCR
-#ifdef CONFIG_PPC_DCR_NATIVE
#include <asm/dcr-native.h>
-#endif
-#ifdef CONFIG_PPC_DCR_MMIO
-#include <asm/dcr-mmio.h>
-#endif
-
-
-/* Indirection layer for providing both NATIVE and MMIO support. */
-
-#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
-
-#include <asm/dcr-generic.h>
-
-#define DCR_MAP_OK(host) dcr_map_ok_generic(host)
-#define dcr_map(dev, dcr_n, dcr_c) dcr_map_generic(dev, dcr_n, dcr_c)
-#define dcr_unmap(host, dcr_c) dcr_unmap_generic(host, dcr_c)
-#define dcr_read(host, dcr_n) dcr_read_generic(host, dcr_n)
-#define dcr_write(host, dcr_n, value) dcr_write_generic(host, dcr_n, value)
-
-#else
-
-#ifdef CONFIG_PPC_DCR_NATIVE
typedef dcr_host_native_t dcr_host_t;
#define DCR_MAP_OK(host) dcr_map_ok_native(host)
#define dcr_map(dev, dcr_n, dcr_c) dcr_map_native(dev, dcr_n, dcr_c)
#define dcr_unmap(host, dcr_c) dcr_unmap_native(host, dcr_c)
#define dcr_read(host, dcr_n) dcr_read_native(host, dcr_n)
#define dcr_write(host, dcr_n, value) dcr_write_native(host, dcr_n, value)
-#else
-typedef dcr_host_mmio_t dcr_host_t;
-#define DCR_MAP_OK(host) dcr_map_ok_mmio(host)
-#define dcr_map(dev, dcr_n, dcr_c) dcr_map_mmio(dev, dcr_n, dcr_c)
-#define dcr_unmap(host, dcr_c) dcr_unmap_mmio(host, dcr_c)
-#define dcr_read(host, dcr_n) dcr_read_mmio(host, dcr_n)
-#define dcr_write(host, dcr_n, value) dcr_write_mmio(host, dcr_n, value)
-#endif
-
-#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
/*
* additional helpers to read the DCR * base from the device-tree
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 65d1f291393d..eeef13db2770 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -348,6 +348,7 @@
#define H_SCM_FLUSH 0x44C
#define H_GET_ENERGY_SCALE_INFO 0x450
#define H_PKS_SIGNED_UPDATE 0x454
+#define H_HTM 0x458
#define H_WATCHDOG 0x45C
#define H_GUEST_GET_CAPABILITIES 0x460
#define H_GUEST_SET_CAPABILITIES 0x464
@@ -498,6 +499,39 @@
#define H_GUEST_CAP_POWER11 (1UL<<(63-3))
#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+/*
+ * Defines for H_HTM - Macros for hardware trace macro (HTM) function.
+ */
+#define H_HTM_FLAGS_HARDWARE_TARGET (1ul << 63)
+#define H_HTM_FLAGS_LOGICAL_TARGET (1ul << 62)
+#define H_HTM_FLAGS_PROCID_TARGET (1ul << 61)
+#define H_HTM_FLAGS_NOWRAP (1ul << 60)
+
+#define H_HTM_OP_SHIFT (63-15)
+#define H_HTM_OP(x) ((unsigned long)(x)<<H_HTM_OP_SHIFT)
+#define H_HTM_OP_CAPABILITIES 0x01
+#define H_HTM_OP_STATUS 0x02
+#define H_HTM_OP_SETUP 0x03
+#define H_HTM_OP_CONFIGURE 0x04
+#define H_HTM_OP_START 0x05
+#define H_HTM_OP_STOP 0x06
+#define H_HTM_OP_DECONFIGURE 0x07
+#define H_HTM_OP_DUMP_DETAILS 0x08
+#define H_HTM_OP_DUMP_DATA 0x09
+#define H_HTM_OP_DUMP_SYSMEM_CONF 0x0a
+#define H_HTM_OP_DUMP_SYSPROC_CONF 0x0b
+
+#define H_HTM_TYPE_SHIFT (63-31)
+#define H_HTM_TYPE(x) ((unsigned long)(x)<<H_HTM_TYPE_SHIFT)
+#define H_HTM_TYPE_NEST 0x01
+#define H_HTM_TYPE_CORE 0x02
+#define H_HTM_TYPE_LLAT 0x03
+#define H_HTM_TYPE_GLOBAL 0xff
+
+#define H_HTM_TARGET_NODE_INDEX(x) ((unsigned long)(x)<<(63-15))
+#define H_HTM_TARGET_NODAL_CHIP_INDEX(x) ((unsigned long)(x)<<(63-31))
+#define H_HTM_TARGET_CORE_INDEX_ON_CHIP(x) ((unsigned long)(x)<<(63-47))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/include/asm/io-defs.h b/arch/powerpc/include/asm/io-defs.h
index faf8617cc574..5c2be9b54a9d 100644
--- a/arch/powerpc/include/asm/io-defs.h
+++ b/arch/powerpc/include/asm/io-defs.h
@@ -1,61 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* This file is meant to be include multiple times by other headers */
-/* last 2 argments are used by platforms/cell/io-workarounds.[ch] */
-DEF_PCI_AC_RET(readb, u8, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_RET(readw, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_RET(readl, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_RET(readw_be, u16, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_RET(readl_be, u32, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_NORET(writeb, (u8 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-DEF_PCI_AC_NORET(writew, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-DEF_PCI_AC_NORET(writel, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-DEF_PCI_AC_NORET(writew_be, (u16 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-DEF_PCI_AC_NORET(writel_be, (u32 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-
-#ifdef __powerpc64__
-DEF_PCI_AC_RET(readq, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_RET(readq_be, u64, (const PCI_IO_ADDR addr), (addr), mem, addr)
-DEF_PCI_AC_NORET(writeq, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-DEF_PCI_AC_NORET(writeq_be, (u64 val, PCI_IO_ADDR addr), (val, addr), mem, addr)
-#endif /* __powerpc64__ */
-
-DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port), pio, port)
-DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port), pio, port)
-DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port), pio, port)
-DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port), pio, port)
-DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port), pio, port)
-DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port), pio, port)
-
-DEF_PCI_AC_NORET(readsb, (const PCI_IO_ADDR a, void *b, unsigned long c),
- (a, b, c), mem, a)
-DEF_PCI_AC_NORET(readsw, (const PCI_IO_ADDR a, void *b, unsigned long c),
- (a, b, c), mem, a)
-DEF_PCI_AC_NORET(readsl, (const PCI_IO_ADDR a, void *b, unsigned long c),
- (a, b, c), mem, a)
-DEF_PCI_AC_NORET(writesb, (PCI_IO_ADDR a, const void *b, unsigned long c),
- (a, b, c), mem, a)
-DEF_PCI_AC_NORET(writesw, (PCI_IO_ADDR a, const void *b, unsigned long c),
- (a, b, c), mem, a)
-DEF_PCI_AC_NORET(writesl, (PCI_IO_ADDR a, const void *b, unsigned long c),
- (a, b, c), mem, a)
-
-DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c),
- (p, b, c), pio, p)
-DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c),
- (p, b, c), pio, p)
-DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c),
- (p, b, c), pio, p)
-DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c),
- (p, b, c), pio, p)
-DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c),
- (p, b, c), pio, p)
-DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c),
- (p, b, c), pio, p)
-
-DEF_PCI_AC_NORET(memset_io, (PCI_IO_ADDR a, int c, unsigned long n),
- (a, c, n), mem, a)
-DEF_PCI_AC_NORET(memcpy_fromio, (void *d, const PCI_IO_ADDR s, unsigned long n),
- (d, s, n), mem, s)
-DEF_PCI_AC_NORET(memcpy_toio, (PCI_IO_ADDR d, const void *s, unsigned long n),
- (d, s, n), mem, d)
+DEF_PCI_AC_RET(inb, u8, (unsigned long port), (port))
+DEF_PCI_AC_RET(inw, u16, (unsigned long port), (port))
+DEF_PCI_AC_RET(inl, u32, (unsigned long port), (port))
+DEF_PCI_AC_NORET(outb, (u8 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outw, (u16 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(outl, (u32 val, unsigned long port), (val, port))
+DEF_PCI_AC_NORET(insb, (unsigned long p, void *b, unsigned long c), (p, b, c))
+DEF_PCI_AC_NORET(insw, (unsigned long p, void *b, unsigned long c), (p, b, c))
+DEF_PCI_AC_NORET(insl, (unsigned long p, void *b, unsigned long c), (p, b, c))
+DEF_PCI_AC_NORET(outsb, (unsigned long p, const void *b, unsigned long c), (p, b, c))
+DEF_PCI_AC_NORET(outsw, (unsigned long p, const void *b, unsigned long c), (p, b, c))
+DEF_PCI_AC_NORET(outsl, (unsigned long p, const void *b, unsigned long c), (p, b, c))
diff --git a/arch/powerpc/include/asm/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h
deleted file mode 100644
index 3cce499fbe27..000000000000
--- a/arch/powerpc/include/asm/io-workarounds.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Support PCI IO workaround
- *
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- */
-
-#ifndef _IO_WORKAROUNDS_H
-#define _IO_WORKAROUNDS_H
-
-#ifdef CONFIG_PPC_IO_WORKAROUNDS
-#include <linux/io.h>
-#include <asm/pci-bridge.h>
-
-/* Bus info */
-struct iowa_bus {
- struct pci_controller *phb;
- struct ppc_pci_io *ops;
- void *private;
-};
-
-void iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
- int (*)(struct iowa_bus *, void *), void *);
-struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
-struct iowa_bus *iowa_pio_find_bus(unsigned long);
-
-extern struct ppc_pci_io spiderpci_ops;
-extern int spiderpci_iowa_init(struct iowa_bus *, void *);
-
-#define SPIDER_PCI_REG_BASE 0xd000
-#define SPIDER_PCI_REG_SIZE 0x1000
-#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
-#define SPIDER_PCI_DUMMY_READ 0x0810
-#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
-
-#endif
-
-#if defined(CONFIG_PPC_IO_WORKAROUNDS) && defined(CONFIG_PPC_INDIRECT_MMIO)
-extern bool io_workaround_inited;
-
-static inline bool iowa_is_active(void)
-{
- return unlikely(io_workaround_inited);
-}
-#else
-static inline bool iowa_is_active(void)
-{
- return false;
-}
-#endif
-
-void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
- pgprot_t prot, void *caller);
-
-#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index fd92ac450169..492e8855e00f 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -65,8 +65,8 @@ extern resource_size_t isa_mem_base;
extern bool isa_io_special;
#ifdef CONFIG_PPC32
-#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
-#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#error CONFIG_PPC_INDIRECT_PIO is not yet supported on 32 bits
#endif
#endif
@@ -80,16 +80,12 @@ extern bool isa_io_special;
*
* in_8, in_le16, in_be16, in_le32, in_be32, in_le64, in_be64
* out_8, out_le16, out_be16, out_le32, out_be32, out_le64, out_be64
- * _insb, _insw_ns, _insl_ns, _outsb, _outsw_ns, _outsl_ns
+ * _insb, _insw, _insl, _outsb, _outsw, _outsl
*
* Those operate directly on a kernel virtual address. Note that the prototype
* for the out_* accessors has the arguments in opposite order from the usual
* linux PCI accessors. Unlike those, they take the address first and the value
* next.
- *
- * Note: I might drop the _ns suffix on the stream operations soon as it is
- * simply normal for stream operations to not swap in the first place.
- *
*/
/* -mprefixed can generate offsets beyond range, fall back hack */
@@ -228,19 +224,10 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val)
*/
extern void _insb(const volatile u8 __iomem *addr, void *buf, long count);
extern void _outsb(volatile u8 __iomem *addr,const void *buf,long count);
-extern void _insw_ns(const volatile u16 __iomem *addr, void *buf, long count);
-extern void _outsw_ns(volatile u16 __iomem *addr, const void *buf, long count);
-extern void _insl_ns(const volatile u32 __iomem *addr, void *buf, long count);
-extern void _outsl_ns(volatile u32 __iomem *addr, const void *buf, long count);
-
-/* The _ns naming is historical and will be removed. For now, just #define
- * the non _ns equivalent names
- */
-#define _insw _insw_ns
-#define _insl _insl_ns
-#define _outsw _outsw_ns
-#define _outsl _outsl_ns
-
+extern void _insw(const volatile u16 __iomem *addr, void *buf, long count);
+extern void _outsw(volatile u16 __iomem *addr, const void *buf, long count);
+extern void _insl(const volatile u32 __iomem *addr, void *buf, long count);
+extern void _outsl(volatile u32 __iomem *addr, const void *buf, long count);
/*
* memset_io, memcpy_toio, memcpy_fromio base implementations are out of line
@@ -261,9 +248,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* for PowerPC is as close as possible to the x86 version of these, and thus
* provides fairly heavy weight barriers for the non-raw versions
*
- * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
- * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
- * own implementation of some or all of the accessors.
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_PIO
+ * is set allowing the platform to provide its own implementation of some
+ * of the accessors.
*/
/*
@@ -274,116 +261,11 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
#include <asm/eeh.h>
#endif
-/* Shortcut to the MMIO argument pointer */
-#define PCI_IO_ADDR volatile void __iomem *
-
-/* Indirect IO address tokens:
- *
- * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
- * on all MMIOs. (Note that this is all 64 bits only for now)
- *
- * To help platforms who may need to differentiate MMIO addresses in
- * their hooks, a bitfield is reserved for use by the platform near the
- * top of MMIO addresses (not PIO, those have to cope the hard way).
- *
- * The highest address in the kernel virtual space are:
- *
- * d0003fffffffffff # with Hash MMU
- * c00fffffffffffff # with Radix MMU
- *
- * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
- * that can be used for the field.
- *
- * The direct IO mapping operations will then mask off those bits
- * before doing the actual access, though that only happen when
- * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
- * mechanism
- *
- * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
- * all PIO functions call through a hook.
- */
-
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-#define PCI_IO_IND_TOKEN_SHIFT 52
-#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
-#define PCI_FIX_ADDR(addr) \
- ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
-#define PCI_GET_ADDR_TOKEN(addr) \
- (((unsigned long)(addr) & PCI_IO_IND_TOKEN_MASK) >> \
- PCI_IO_IND_TOKEN_SHIFT)
-#define PCI_SET_ADDR_TOKEN(addr, token) \
-do { \
- unsigned long __a = (unsigned long)(addr); \
- __a &= ~PCI_IO_IND_TOKEN_MASK; \
- __a |= ((unsigned long)(token)) << PCI_IO_IND_TOKEN_SHIFT; \
- (addr) = (void __iomem *)__a; \
-} while(0)
-#else
-#define PCI_FIX_ADDR(addr) (addr)
-#endif
-
-
-/*
- * Non ordered and non-swapping "raw" accessors
- */
-
-static inline unsigned char __raw_readb(const volatile void __iomem *addr)
-{
- return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
-}
-#define __raw_readb __raw_readb
-
-static inline unsigned short __raw_readw(const volatile void __iomem *addr)
-{
- return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
-}
-#define __raw_readw __raw_readw
-
-static inline unsigned int __raw_readl(const volatile void __iomem *addr)
-{
- return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
-}
-#define __raw_readl __raw_readl
-
-static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
-{
- *(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
-}
-#define __raw_writeb __raw_writeb
-
-static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
-{
- *(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
-}
-#define __raw_writew __raw_writew
-
-static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
-{
- *(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
-}
-#define __raw_writel __raw_writel
+#define _IO_PORT(port) ((volatile void __iomem *)(_IO_BASE + (port)))
#ifdef __powerpc64__
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
- return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
-}
-#define __raw_readq __raw_readq
-
-static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
-{
- *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
-}
-#define __raw_writeq __raw_writeq
-
-static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
-{
- __raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
-}
-#define __raw_writeq_be __raw_writeq_be
-
/*
- * Real mode versions of the above. Those instructions are only supposed
+ * Real mode versions of raw accessors. Those instructions are only supposed
* to be used in hypervisor real mode as per the architecture spec.
*/
static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
@@ -551,30 +433,23 @@ __do_out_asm(_rec_outl, "stwbrx")
* possible to hook directly at the toplevel PIO operation if they have to
* be handled differently
*/
-#define __do_writeb(val, addr) out_8(PCI_FIX_ADDR(addr), val)
-#define __do_writew(val, addr) out_le16(PCI_FIX_ADDR(addr), val)
-#define __do_writel(val, addr) out_le32(PCI_FIX_ADDR(addr), val)
-#define __do_writeq(val, addr) out_le64(PCI_FIX_ADDR(addr), val)
-#define __do_writew_be(val, addr) out_be16(PCI_FIX_ADDR(addr), val)
-#define __do_writel_be(val, addr) out_be32(PCI_FIX_ADDR(addr), val)
-#define __do_writeq_be(val, addr) out_be64(PCI_FIX_ADDR(addr), val)
#ifdef CONFIG_EEH
-#define __do_readb(addr) eeh_readb(PCI_FIX_ADDR(addr))
-#define __do_readw(addr) eeh_readw(PCI_FIX_ADDR(addr))
-#define __do_readl(addr) eeh_readl(PCI_FIX_ADDR(addr))
-#define __do_readq(addr) eeh_readq(PCI_FIX_ADDR(addr))
-#define __do_readw_be(addr) eeh_readw_be(PCI_FIX_ADDR(addr))
-#define __do_readl_be(addr) eeh_readl_be(PCI_FIX_ADDR(addr))
-#define __do_readq_be(addr) eeh_readq_be(PCI_FIX_ADDR(addr))
+#define __do_readb(addr) eeh_readb(addr)
+#define __do_readw(addr) eeh_readw(addr)
+#define __do_readl(addr) eeh_readl(addr)
+#define __do_readq(addr) eeh_readq(addr)
+#define __do_readw_be(addr) eeh_readw_be(addr)
+#define __do_readl_be(addr) eeh_readl_be(addr)
+#define __do_readq_be(addr) eeh_readq_be(addr)
#else /* CONFIG_EEH */
-#define __do_readb(addr) in_8(PCI_FIX_ADDR(addr))
-#define __do_readw(addr) in_le16(PCI_FIX_ADDR(addr))
-#define __do_readl(addr) in_le32(PCI_FIX_ADDR(addr))
-#define __do_readq(addr) in_le64(PCI_FIX_ADDR(addr))
-#define __do_readw_be(addr) in_be16(PCI_FIX_ADDR(addr))
-#define __do_readl_be(addr) in_be32(PCI_FIX_ADDR(addr))
-#define __do_readq_be(addr) in_be64(PCI_FIX_ADDR(addr))
+#define __do_readb(addr) in_8(addr)
+#define __do_readw(addr) in_le16(addr)
+#define __do_readl(addr) in_le32(addr)
+#define __do_readq(addr) in_le64(addr)
+#define __do_readw_be(addr) in_be16(addr)
+#define __do_readl_be(addr) in_be32(addr)
+#define __do_readq_be(addr) in_be64(addr)
#endif /* !defined(CONFIG_EEH) */
#ifdef CONFIG_PPC32
@@ -585,64 +460,185 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_inw(port) _rec_inw(port)
#define __do_inl(port) _rec_inl(port)
#else /* CONFIG_PPC32 */
-#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
-#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
-#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
-#define __do_inb(port) readb((PCI_IO_ADDR)(_IO_BASE + port));
-#define __do_inw(port) readw((PCI_IO_ADDR)(_IO_BASE + port));
-#define __do_inl(port) readl((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_outb(val, port) writeb(val,_IO_PORT(port));
+#define __do_outw(val, port) writew(val,_IO_PORT(port));
+#define __do_outl(val, port) writel(val,_IO_PORT(port));
+#define __do_inb(port) readb(_IO_PORT(port));
+#define __do_inw(port) readw(_IO_PORT(port));
+#define __do_inl(port) readl(_IO_PORT(port));
#endif /* !CONFIG_PPC32 */
#ifdef CONFIG_EEH
-#define __do_readsb(a, b, n) eeh_readsb(PCI_FIX_ADDR(a), (b), (n))
-#define __do_readsw(a, b, n) eeh_readsw(PCI_FIX_ADDR(a), (b), (n))
-#define __do_readsl(a, b, n) eeh_readsl(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsb(a, b, n) eeh_readsb(a, (b), (n))
+#define __do_readsw(a, b, n) eeh_readsw(a, (b), (n))
+#define __do_readsl(a, b, n) eeh_readsl(a, (b), (n))
#else /* CONFIG_EEH */
-#define __do_readsb(a, b, n) _insb(PCI_FIX_ADDR(a), (b), (n))
-#define __do_readsw(a, b, n) _insw(PCI_FIX_ADDR(a), (b), (n))
-#define __do_readsl(a, b, n) _insl(PCI_FIX_ADDR(a), (b), (n))
+#define __do_readsb(a, b, n) _insb(a, (b), (n))
+#define __do_readsw(a, b, n) _insw(a, (b), (n))
+#define __do_readsl(a, b, n) _insl(a, (b), (n))
#endif /* !CONFIG_EEH */
-#define __do_writesb(a, b, n) _outsb(PCI_FIX_ADDR(a),(b),(n))
-#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
-#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
-
-#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
-#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
-#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
-#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
-#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
-#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
-
-#define __do_memset_io(addr, c, n) \
- _memset_io(PCI_FIX_ADDR(addr), c, n)
-#define __do_memcpy_toio(dst, src, n) \
- _memcpy_toio(PCI_FIX_ADDR(dst), src, n)
+#define __do_writesb(a, b, n) _outsb(a, (b), (n))
+#define __do_writesw(a, b, n) _outsw(a, (b), (n))
+#define __do_writesl(a, b, n) _outsl(a, (b), (n))
+
+#define __do_insb(p, b, n) readsb(_IO_PORT(p), (b), (n))
+#define __do_insw(p, b, n) readsw(_IO_PORT(p), (b), (n))
+#define __do_insl(p, b, n) readsl(_IO_PORT(p), (b), (n))
+#define __do_outsb(p, b, n) writesb(_IO_PORT(p),(b),(n))
+#define __do_outsw(p, b, n) writesw(_IO_PORT(p),(b),(n))
+#define __do_outsl(p, b, n) writesl(_IO_PORT(p),(b),(n))
#ifdef CONFIG_EEH
#define __do_memcpy_fromio(dst, src, n) \
- eeh_memcpy_fromio(dst, PCI_FIX_ADDR(src), n)
+ eeh_memcpy_fromio(dst, src, n)
#else /* CONFIG_EEH */
#define __do_memcpy_fromio(dst, src, n) \
- _memcpy_fromio(dst,PCI_FIX_ADDR(src),n)
+ _memcpy_fromio(dst, src, n)
#endif /* !CONFIG_EEH */
-#ifdef CONFIG_PPC_INDIRECT_PIO
-#define DEF_PCI_HOOK_pio(x) x
-#else
-#define DEF_PCI_HOOK_pio(x) NULL
-#endif
+static inline u8 readb(const volatile void __iomem *addr)
+{
+ return __do_readb(addr);
+}
+#define readb readb
+
+static inline u16 readw(const volatile void __iomem *addr)
+{
+ return __do_readw(addr);
+}
+#define readw readw
+
+static inline u32 readl(const volatile void __iomem *addr)
+{
+ return __do_readl(addr);
+}
+#define readl readl
+
+static inline u16 readw_be(const volatile void __iomem *addr)
+{
+ return __do_readw_be(addr);
+}
+
+static inline u32 readl_be(const volatile void __iomem *addr)
+{
+ return __do_readl_be(addr);
+}
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-#define DEF_PCI_HOOK_mem(x) x
+static inline void writeb(u8 val, volatile void __iomem *addr)
+{
+ out_8(addr, val);
+}
+#define writeb writeb
+
+static inline void writew(u16 val, volatile void __iomem *addr)
+{
+ out_le16(addr, val);
+}
+#define writew writew
+
+static inline void writel(u32 val, volatile void __iomem *addr)
+{
+ out_le32(addr, val);
+}
+#define writel writel
+
+static inline void writew_be(u16 val, volatile void __iomem *addr)
+{
+ out_be16(addr, val);
+}
+
+static inline void writel_be(u32 val, volatile void __iomem *addr)
+{
+ out_be32(addr, val);
+}
+
+static inline void readsb(const volatile void __iomem *a, void *b, unsigned long c)
+{
+ __do_readsb(a, b, c);
+}
+#define readsb readsb
+
+static inline void readsw(const volatile void __iomem *a, void *b, unsigned long c)
+{
+ __do_readsw(a, b, c);
+}
+#define readsw readsw
+
+static inline void readsl(const volatile void __iomem *a, void *b, unsigned long c)
+{
+ __do_readsl(a, b, c);
+}
+#define readsl readsl
+
+static inline void writesb(volatile void __iomem *a, const void *b, unsigned long c)
+{
+ __do_writesb(a, b, c);
+}
+#define writesb writesb
+
+static inline void writesw(volatile void __iomem *a, const void *b, unsigned long c)
+{
+ __do_writesw(a, b, c);
+}
+#define writesw writesw
+
+static inline void writesl(volatile void __iomem *a, const void *b, unsigned long c)
+{
+ __do_writesl(a, b, c);
+}
+#define writesl writesl
+
+static inline void memset_io(volatile void __iomem *a, int c, unsigned long n)
+{
+ _memset_io(a, c, n);
+}
+#define memset_io memset_io
+
+static inline void memcpy_fromio(void *d, const volatile void __iomem *s, unsigned long n)
+{
+ __do_memcpy_fromio(d, s, n);
+}
+#define memcpy_fromio memcpy_fromio
+
+static inline void memcpy_toio(volatile void __iomem *d, const void *s, unsigned long n)
+{
+ _memcpy_toio(d, s, n);
+}
+#define memcpy_toio memcpy_toio
+
+#ifdef __powerpc64__
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return __do_readq(addr);
+}
+
+static inline u64 readq_be(const volatile void __iomem *addr)
+{
+ return __do_readq_be(addr);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ out_le64(addr, val);
+}
+
+static inline void writeq_be(u64 val, volatile void __iomem *addr)
+{
+ out_be64(addr, val);
+}
+#endif /* __powerpc64__ */
+
+#ifdef CONFIG_PPC_INDIRECT_PIO
+#define DEF_PCI_HOOK(x) x
#else
-#define DEF_PCI_HOOK_mem(x) NULL
+#define DEF_PCI_HOOK(x) NULL
#endif
/* Structure containing all the hooks */
extern struct ppc_pci_io {
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) ret (*name) at;
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) void (*name) at;
+#define DEF_PCI_AC_RET(name, ret, at, al) ret (*name) at;
+#define DEF_PCI_AC_NORET(name, at, al) void (*name) at;
#include <asm/io-defs.h>
@@ -652,18 +648,18 @@ extern struct ppc_pci_io {
} ppc_pci_io;
/* The inline wrappers */
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+#define DEF_PCI_AC_RET(name, ret, at, al) \
static inline ret name at \
{ \
- if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
return ppc_pci_io.name al; \
return __do_##name al; \
}
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+#define DEF_PCI_AC_NORET(name, at, al) \
static inline void name at \
{ \
- if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \
+ if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \
ppc_pci_io.name al; \
else \
__do_##name al; \
@@ -674,21 +670,7 @@ static inline void name at \
#undef DEF_PCI_AC_RET
#undef DEF_PCI_AC_NORET
-/* Some drivers check for the presence of readq & writeq with
- * a #ifdef, so we make them happy here.
- */
-#define readb readb
-#define readw readw
-#define readl readl
-#define writeb writeb
-#define writew writew
-#define writel writel
-#define readsb readsb
-#define readsw readsw
-#define readsl readsl
-#define writesb writesb
-#define writesw writesw
-#define writesl writesl
+// Signal to asm-generic/io.h that we have implemented these.
#define inb inb
#define inw inw
#define inl inl
@@ -705,9 +687,6 @@ static inline void name at \
#define readq readq
#define writeq writeq
#endif
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
/*
* We don't do relaxed operations yet, at least not with this semantic
@@ -738,35 +717,11 @@ static inline unsigned int ioread32be(const void __iomem *addr)
#define ioread32be ioread32be
#ifdef __powerpc64__
-static inline u64 ioread64_lo_hi(const void __iomem *addr)
-{
- return readq(addr);
-}
-#define ioread64_lo_hi ioread64_lo_hi
-
-static inline u64 ioread64_hi_lo(const void __iomem *addr)
-{
- return readq(addr);
-}
-#define ioread64_hi_lo ioread64_hi_lo
-
static inline u64 ioread64be(const void __iomem *addr)
{
return readq_be(addr);
}
#define ioread64be ioread64be
-
-static inline u64 ioread64be_lo_hi(const void __iomem *addr)
-{
- return readq_be(addr);
-}
-#define ioread64be_lo_hi ioread64be_lo_hi
-
-static inline u64 ioread64be_hi_lo(const void __iomem *addr)
-{
- return readq_be(addr);
-}
-#define ioread64be_hi_lo ioread64be_hi_lo
#endif /* __powerpc64__ */
static inline void iowrite16be(u16 val, void __iomem *addr)
@@ -782,35 +737,11 @@ static inline void iowrite32be(u32 val, void __iomem *addr)
#define iowrite32be iowrite32be
#ifdef __powerpc64__
-static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
-{
- writeq(val, addr);
-}
-#define iowrite64_lo_hi iowrite64_lo_hi
-
-static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
-{
- writeq(val, addr);
-}
-#define iowrite64_hi_lo iowrite64_hi_lo
-
static inline void iowrite64be(u64 val, void __iomem *addr)
{
writeq_be(val, addr);
}
#define iowrite64be iowrite64be
-
-static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
-{
- writeq_be(val, addr);
-}
-#define iowrite64be_lo_hi iowrite64be_lo_hi
-
-static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
-{
- writeq_be(val, addr);
-}
-#define iowrite64be_hi_lo iowrite64be_hi_lo
#endif /* __powerpc64__ */
struct pci_dev;
@@ -1030,6 +961,14 @@ static inline void * bus_to_virt(unsigned long address)
#include <asm-generic/io.h>
+#ifdef __powerpc64__
+static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
+{
+ __raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
+}
+#define __raw_writeq_be __raw_writeq_be
+#endif // __powerpc64__
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 04072b5f8962..b410021ad4c6 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -317,12 +317,6 @@ extern void iommu_flush_tce(struct iommu_table *tbl);
extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
-#ifdef CONFIG_PPC_CELL_NATIVE
-extern bool iommu_fixed_is_weak;
-#else
-#define iommu_fixed_is_weak false
-#endif
-
extern const struct dma_map_ops dma_iommu_ops;
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 6e1108f8fce6..2d139c807577 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -902,7 +902,6 @@ struct kvm_vcpu_arch {
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index d99863cd6cde..049152f8d597 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -29,6 +29,7 @@ extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn;
u64 memory_hotplug_max(void);
+u64 hot_add_drconf_memory_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
#endif
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 71648c126970..91be7b885944 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -65,6 +65,27 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa)
return vpa_call(H_VPA_REG_DTL, cpu, vpa);
}
+static inline long htm_call(unsigned long flags, unsigned long target,
+ unsigned long operation, unsigned long param1,
+ unsigned long param2, unsigned long param3)
+{
+ return plpar_hcall_norets(H_HTM, flags, target, operation,
+ param1, param2, param3);
+}
+
+static inline long htm_get_dump_hardware(unsigned long nodeindex,
+ unsigned long nodalchipindex, unsigned long coreindexonchip,
+ unsigned long type, unsigned long addr, unsigned long size,
+ unsigned long offset)
+{
+ return htm_call(H_HTM_FLAGS_HARDWARE_TARGET,
+ H_HTM_TARGET_NODE_INDEX(nodeindex) |
+ H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) |
+ H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip),
+ H_HTM_OP(H_HTM_OP_DUMP_DATA) | H_HTM_TYPE(type),
+ addr, size, offset);
+}
+
extern void vpa_init(int cpu);
static inline long plpar_pte_enter(unsigned long flags,
diff --git a/arch/powerpc/include/asm/pmi.h b/arch/powerpc/include/asm/pmi.h
deleted file mode 100644
index 478f0a2fe7f4..000000000000
--- a/arch/powerpc/include/asm/pmi.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _POWERPC_PMI_H
-#define _POWERPC_PMI_H
-
-/*
- * Definitions for talking with PMI device on PowerPC
- *
- * PMI (Platform Management Interrupt) is a way to communicate
- * with the BMC (Baseboard Management Controller) via interrupts.
- * Unlike IPMI it is bidirectional and has a low latency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#ifdef __KERNEL__
-
-#define PMI_TYPE_FREQ_CHANGE 0x01
-#define PMI_TYPE_POWER_BUTTON 0x02
-#define PMI_READ_TYPE 0
-#define PMI_READ_DATA0 1
-#define PMI_READ_DATA1 2
-#define PMI_READ_DATA2 3
-#define PMI_WRITE_TYPE 4
-#define PMI_WRITE_DATA0 5
-#define PMI_WRITE_DATA1 6
-#define PMI_WRITE_DATA2 7
-
-#define PMI_ACK 0x80
-
-#define PMI_TIMEOUT 100
-
-typedef struct {
- u8 type;
- u8 data0;
- u8 data1;
- u8 data2;
-} pmi_message_t;
-
-struct pmi_handler {
- struct list_head node;
- u8 type;
- void (*handle_pmi_message) (pmi_message_t);
-};
-
-int pmi_register_handler(struct pmi_handler *);
-void pmi_unregister_handler(struct pmi_handler *);
-
-int pmi_send_message(pmi_message_t);
-
-#endif /* __KERNEL__ */
-#endif /* _POWERPC_PMI_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c0107d8ddd8c..f679a11a7e7f 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -17,6 +17,8 @@
struct device_node;
struct property;
+#define MIN_RMA 768 /* Minimum RMA (in MB) for CAS negotiation */
+
#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
#define OF_DT_PROP 0x3 /* Property: name off, size,
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h
index 6fee411d973d..66b111fa1cd1 100644
--- a/arch/powerpc/include/asm/spu_priv1.h
+++ b/arch/powerpc/include/asm/spu_priv1.h
@@ -215,8 +215,6 @@ spu_disable_spu (struct spu_context *ctx)
* and only intended to be used by the platform setup code.
*/
-extern const struct spu_priv1_ops spu_priv1_mmio_ops;
-
extern const struct spu_management_ops spu_management_of_ops;
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
index de1018cc522b..e3d5d3823dac 100644
--- a/arch/powerpc/include/asm/static_call.h
+++ b/arch/powerpc/include/asm/static_call.h
@@ -26,4 +26,6 @@
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr")
#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20")
+#define CALL_INSN_SIZE 4
+
#endif /* _ASM_POWERPC_STATIC_CALL_H */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 9bdd8080299b..f8885586efaf 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -89,9 +89,6 @@ static inline unsigned long tb_ticks_since(unsigned long tstamp)
#define mulhdu(x, y) mul_u64_u64_shr(x, y, 64)
#endif
-extern void div128_by_32(u64 dividend_high, u64 dividend_low,
- unsigned divisor, struct div_result *dr);
-
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 16bacfe8c7a2..da15b5efe807 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -152,6 +152,7 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
{
return cpu == cpu_first_thread_sibling(cpu);
}
+#define topology_is_primary_thread topology_is_primary_thread
static inline bool topology_smt_thread_allowed(unsigned int cpu)
{
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 8d972bc98b55..1ca23fbfe087 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_VDSO_H
#define VDSO_VERSION_STRING LINUX_2.6.15
+#define __VDSO_PAGES 4
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/vdso/arch_data.h b/arch/powerpc/include/asm/vdso/arch_data.h
new file mode 100644
index 000000000000..c240a6b87518
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/arch_data.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
+ * Copyright (C) 2005 Benjamin Herrenschmidy <benh@kernel.crashing.org>,
+ * IBM Corp.
+ */
+#ifndef _ASM_POWERPC_VDSO_ARCH_DATA_H
+#define _ASM_POWERPC_VDSO_ARCH_DATA_H
+
+#include <linux/unistd.h>
+#include <linux/types.h>
+
+#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32)
+
+#ifdef CONFIG_PPC64
+
+struct vdso_arch_data {
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec */
+ __u32 dcache_block_size; /* L1 d-cache block size */
+ __u32 icache_block_size; /* L1 i-cache block size */
+ __u32 dcache_log_block_size; /* L1 d-cache log block size */
+ __u32 icache_log_block_size; /* L1 i-cache log block size */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
+};
+
+#else /* CONFIG_PPC64 */
+
+struct vdso_arch_data {
+ __u64 tb_ticks_per_sec; /* Timebase tics / sec */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
+};
+
+#endif /* CONFIG_PPC64 */
+
+#endif /* _ASM_POWERPC_VDSO_ARCH_DATA_H */
diff --git a/arch/powerpc/include/asm/vdso/getrandom.h b/arch/powerpc/include/asm/vdso/getrandom.h
index 80ce0709725e..067a5396aac6 100644
--- a/arch/powerpc/include/asm/vdso/getrandom.h
+++ b/arch/powerpc/include/asm/vdso/getrandom.h
@@ -43,20 +43,21 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig
(unsigned long)len, (unsigned long)flags);
}
-static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void)
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(void)
{
- struct vdso_arch_data *data;
+ struct vdso_rng_data *data;
asm (
" bcl 20, 31, .+4 ;"
"0: mflr %0 ;"
- " addis %0, %0, (_vdso_datapage - 0b)@ha ;"
- " addi %0, %0, (_vdso_datapage - 0b)@l ;"
+ " addis %0, %0, (vdso_u_rng_data - 0b)@ha ;"
+ " addi %0, %0, (vdso_u_rng_data - 0b)@l ;"
: "=r" (data) : : "lr"
);
- return &data->rng_data;
+ return data;
}
+#define __arch_get_vdso_u_rng_data __arch_get_vdso_u_rng_data
ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
size_t opaque_len);
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index c6390890a60c..99c9d6f43fde 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -94,22 +94,12 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
#endif
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return get_tb();
}
-const struct vdso_data *__arch_get_vdso_data(void);
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
-}
-#endif
-
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
{
return true;
}
@@ -135,21 +125,22 @@ static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
#ifdef __powerpc64__
int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
#else
int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
#endif
int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
- const struct vdso_data *vd);
+ const struct vdso_time_data *vd);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h
index 48560a119559..c2c9ae1b22e7 100644
--- a/arch/powerpc/include/asm/vdso/vsyscall.h
+++ b/arch/powerpc/include/asm/vdso/vsyscall.h
@@ -6,19 +6,6 @@
#include <asm/vdso_datapage.h>
-static __always_inline
-struct vdso_data *__arch_get_k_vdso_data(void)
-{
- return vdso_data->data;
-}
-#define __arch_get_k_vdso_data __arch_get_k_vdso_data
-
-static __always_inline
-struct vdso_rng_data *__arch_get_k_vdso_rng_data(void)
-{
- return &vdso_data->rng_data;
-}
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index a202f5b63479..95d45a50355d 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -11,56 +11,18 @@
#ifndef __ASSEMBLY__
-#include <linux/unistd.h>
-#include <linux/time.h>
#include <vdso/datapage.h>
-#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32)
-
-#ifdef CONFIG_PPC64
-
-struct vdso_arch_data {
- __u64 tb_ticks_per_sec; /* Timebase tics / sec */
- __u32 dcache_block_size; /* L1 d-cache block size */
- __u32 icache_block_size; /* L1 i-cache block size */
- __u32 dcache_log_block_size; /* L1 d-cache log block size */
- __u32 icache_log_block_size; /* L1 i-cache log block size */
- __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
- __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
-
- struct vdso_rng_data rng_data;
-
- struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT);
-};
-
-#else /* CONFIG_PPC64 */
-
-struct vdso_arch_data {
- __u64 tb_ticks_per_sec; /* Timebase tics / sec */
- __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
- __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
- struct vdso_rng_data rng_data;
-
- struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT);
-};
-
-#endif /* CONFIG_PPC64 */
-
-extern struct vdso_arch_data *vdso_data;
-
#else /* __ASSEMBLY__ */
-.macro get_datapage ptr offset=0
+.macro get_datapage ptr symbol
bcl 20, 31, .+4
999:
mflr \ptr
- addis \ptr, \ptr, (_vdso_datapage - 999b + \offset)@ha
- addi \ptr, \ptr, (_vdso_datapage - 999b + \offset)@l
+ addis \ptr, \ptr, (\symbol - 999b)@ha
+ addi \ptr, \ptr, (\symbol - 999b)@l
.endm
-#include <asm/asm-offsets.h>
-#include <asm/page.h>
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 89090485bec1..60ef312dab05 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -31,7 +31,6 @@
#ifdef CONFIG_PPC_ICP_NATIVE
extern int icp_native_init(void);
extern void icp_native_flush_interrupt(void);
-extern void icp_native_cause_ipi_rm(int cpu);
#else
static inline int icp_native_init(void) { return -ENODEV; }
#endif
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index f2d44b44f46c..535cdb1e411a 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -12,13 +12,11 @@
#ifdef CONFIG_XMON
extern void xmon_setup(void);
-void __init xmon_register_spus(struct list_head *list);
struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern irqreturn_t xmon_irq(int, void *);
#else
static inline void xmon_setup(void) { }
-static inline void xmon_register_spus(struct list_head *list) { }
#endif
#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f43c1198768c..6ac621155ec3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -70,7 +70,7 @@ obj-y := cputable.o syscalls.o switch.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o firmware.o \
+ prom_parse.o firmware.o \
hw_breakpoint_constraints.o interrupt.o \
kdebugfs.o stacktrace.o syscall.o
obj-y += ptrace/
@@ -152,8 +152,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
-obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
-
obj-y += trace/
ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 7a390bd4f4af..b3048f6d3822 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -334,7 +334,6 @@ int main(void)
#endif /* ! CONFIG_PPC64 */
/* datapage offsets for use by vdso */
- OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index f0ae39e77e37..4d64a5db50f3 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -136,7 +136,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
- if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
+ if (!phb->controller_ops.iommu_bypass_supported)
return false;
return phb->controller_ops.iommu_bypass_supported(pdev, mask);
}
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 195b075d116c..b7229430ca94 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -2537,27 +2537,8 @@ EXC_REAL_NONE(0x1000, 0x100)
EXC_VIRT_NONE(0x5000, 0x100)
EXC_REAL_NONE(0x1100, 0x100)
EXC_VIRT_NONE(0x5100, 0x100)
-
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_system_error)
- IVEC=0x1200
- IHSRR=1
-INT_DEFINE_END(cbe_system_error)
-
-EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
- GEN_INT_ENTRY cbe_system_error, virt=0
-EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
-EXC_VIRT_NONE(0x5200, 0x100)
-EXC_COMMON_BEGIN(cbe_system_error_common)
- GEN_COMMON cbe_system_error
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_system_error_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
-#endif
/**
* Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
@@ -2708,26 +2689,8 @@ EXC_COMMON_BEGIN(denorm_exception_common)
b interrupt_return_hsrr
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_maintenance)
- IVEC=0x1600
- IHSRR=1
-INT_DEFINE_END(cbe_maintenance)
-
-EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
- GEN_INT_ENTRY cbe_maintenance, virt=0
-EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
-EXC_VIRT_NONE(0x5600, 0x100)
-EXC_COMMON_BEGIN(cbe_maintenance_common)
- GEN_COMMON cbe_maintenance
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_maintenance_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
-#endif
INT_DEFINE_BEGIN(altivec_assist)
@@ -2755,26 +2718,8 @@ EXC_COMMON_BEGIN(altivec_assist_common)
b interrupt_return_srr
-#ifdef CONFIG_CBE_RAS
-INT_DEFINE_BEGIN(cbe_thermal)
- IVEC=0x1800
- IHSRR=1
-INT_DEFINE_END(cbe_thermal)
-
-EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
- GEN_INT_ENTRY cbe_thermal, virt=0
-EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
-EXC_VIRT_NONE(0x5800, 0x100)
-EXC_COMMON_BEGIN(cbe_thermal_common)
- GEN_COMMON cbe_thermal
- addi r3,r1,STACK_INT_FRAME_REGS
- bl CFUNC(cbe_thermal_exception)
- b interrupt_return_hsrr
-
-#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
-#endif
#ifdef CONFIG_PPC_WATCHDOG
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 4b371c738213..df16c7f547ab 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -33,6 +33,7 @@
#include <asm/fadump-internal.h>
#include <asm/setup.h>
#include <asm/interrupt.h>
+#include <asm/prom.h>
/*
* The CPU who acquired the lock to trigger the fadump crash should
@@ -751,7 +752,7 @@ u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* prstatus.pr_pid = ????
*/
elf_core_copy_regs(&prstatus.pr_reg, regs);
- buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
}
@@ -1764,19 +1765,19 @@ void __init fadump_setup_param_area(void)
range_end = memblock_end_of_DRAM();
} else {
/*
- * Passing additional parameters is supported for hash MMU only
- * if the first memory block size is 768MB or higher.
+ * Memory range for passing additional parameters for HASH MMU
+ * must meet the following conditions:
+ * 1. The first memory block size must be higher than the
+ * minimum RMA (MIN_RMA) size. Bootloader can use memory
+ * upto RMA size. So it should be avoided.
+ * 2. The range should be between MIN_RMA and RMA size (ppc64_rma_size)
+ * 3. It must not overlap with the fadump reserved area.
*/
- if (ppc64_rma_size < 0x30000000)
+ if (ppc64_rma_size < MIN_RMA*1024*1024)
return;
- /*
- * 640 MB to 768 MB is not used by PFW/bootloader. So, try reserving
- * memory for passing additional parameters in this range to avoid
- * being stomped on by PFW/bootloader.
- */
- range_start = 0x2A000000;
- range_end = range_start + 0x4000000;
+ range_start = MIN_RMA * 1024 * 1024;
+ range_end = min(ppc64_rma_size, fw_dump.boot_mem_top);
}
fw_dump.param_area = memblock_phys_alloc_range(COMMAND_LINE_SIZE,
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
deleted file mode 100644
index c877f074d174..000000000000
--- a/arch/powerpc/kernel/io-workarounds.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Support PCI IO workaround
- *
- * Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
- * IBM, Corp.
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/sched/mm.h> /* for init_mm */
-#include <linux/pgtable.h>
-
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/ppc-pci.h>
-#include <asm/io-workarounds.h>
-#include <asm/pte-walk.h>
-
-
-#define IOWA_MAX_BUS 8
-
-static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
-static unsigned int iowa_bus_count;
-
-static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
-{
- int i, j;
- struct resource *res;
- unsigned long vstart, vend;
-
- for (i = 0; i < iowa_bus_count; i++) {
- struct iowa_bus *bus = &iowa_busses[i];
- struct pci_controller *phb = bus->phb;
-
- if (vaddr) {
- vstart = (unsigned long)phb->io_base_virt;
- vend = vstart + phb->pci_io_size - 1;
- if ((vaddr >= vstart) && (vaddr <= vend))
- return bus;
- }
-
- if (paddr)
- for (j = 0; j < 3; j++) {
- res = &phb->mem_resources[j];
- if (paddr >= res->start && paddr <= res->end)
- return bus;
- }
- }
-
- return NULL;
-}
-
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
-{
- struct iowa_bus *bus;
- int token;
-
- token = PCI_GET_ADDR_TOKEN(addr);
-
- if (token && token <= iowa_bus_count)
- bus = &iowa_busses[token - 1];
- else {
- unsigned long vaddr, paddr;
-
- vaddr = (unsigned long)PCI_FIX_ADDR(addr);
- if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
- return NULL;
-
- paddr = ppc_find_vmap_phys(vaddr);
-
- bus = iowa_pci_find(vaddr, paddr);
-
- if (bus == NULL)
- return NULL;
- }
-
- return bus;
-}
-#else /* CONFIG_PPC_INDIRECT_MMIO */
-struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
-{
- return NULL;
-}
-#endif /* !CONFIG_PPC_INDIRECT_MMIO */
-
-#ifdef CONFIG_PPC_INDIRECT_PIO
-struct iowa_bus *iowa_pio_find_bus(unsigned long port)
-{
- unsigned long vaddr = (unsigned long)pci_io_base + port;
- return iowa_pci_find(vaddr, 0);
-}
-#else
-struct iowa_bus *iowa_pio_find_bus(unsigned long port)
-{
- return NULL;
-}
-#endif
-
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
-static ret iowa_##name at \
-{ \
- struct iowa_bus *bus; \
- bus = iowa_##space##_find_bus(aa); \
- if (bus && bus->ops && bus->ops->name) \
- return bus->ops->name al; \
- return __do_##name al; \
-}
-
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
-static void iowa_##name at \
-{ \
- struct iowa_bus *bus; \
- bus = iowa_##space##_find_bus(aa); \
- if (bus && bus->ops && bus->ops->name) { \
- bus->ops->name al; \
- return; \
- } \
- __do_##name al; \
-}
-
-#include <asm/io-defs.h>
-
-#undef DEF_PCI_AC_RET
-#undef DEF_PCI_AC_NORET
-
-static const struct ppc_pci_io iowa_pci_io = {
-
-#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
-#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
-
-#include <asm/io-defs.h>
-
-#undef DEF_PCI_AC_RET
-#undef DEF_PCI_AC_NORET
-
-};
-
-#ifdef CONFIG_PPC_INDIRECT_MMIO
-void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
- pgprot_t prot, void *caller)
-{
- struct iowa_bus *bus;
- void __iomem *res = __ioremap_caller(addr, size, prot, caller);
- int busno;
-
- bus = iowa_pci_find(0, (unsigned long)addr);
- if (bus != NULL) {
- busno = bus - iowa_busses;
- PCI_SET_ADDR_TOKEN(res, busno + 1);
- }
- return res;
-}
-#endif /* !CONFIG_PPC_INDIRECT_MMIO */
-
-bool io_workaround_inited;
-
-/* Enable IO workaround */
-static void io_workaround_init(void)
-{
- if (io_workaround_inited)
- return;
- ppc_pci_io = iowa_pci_io;
- io_workaround_inited = true;
-}
-
-/* Register new bus to support workaround */
-void iowa_register_bus(struct pci_controller *phb, struct ppc_pci_io *ops,
- int (*initfunc)(struct iowa_bus *, void *), void *data)
-{
- struct iowa_bus *bus;
- struct device_node *np = phb->dn;
-
- io_workaround_init();
-
- if (iowa_bus_count >= IOWA_MAX_BUS) {
- pr_err("IOWA:Too many pci bridges, "
- "workarounds disabled for %pOF\n", np);
- return;
- }
-
- bus = &iowa_busses[iowa_bus_count];
- bus->phb = phb;
- bus->ops = ops;
- bus->private = data;
-
- if (initfunc)
- if ((*initfunc)(bus, data))
- return;
-
- iowa_bus_count++;
-
- pr_debug("IOWA:[%d]Add bus, %pOF.\n", iowa_bus_count-1, np);
-}
-
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 6af535905984..bcc201c01514 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -31,13 +31,14 @@ void _insb(const volatile u8 __iomem *port, void *buf, long count)
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u8 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
EXPORT_SYMBOL(_insb);
@@ -47,75 +48,80 @@ void _outsb(volatile u8 __iomem *port, const void *buf, long count)
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u8 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
EXPORT_SYMBOL(_outsb);
-void _insw_ns(const volatile u16 __iomem *port, void *buf, long count)
+void _insw(const volatile u16 __iomem *port, void *buf, long count)
{
u16 *tbuf = buf;
u16 tmp;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u16 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
-EXPORT_SYMBOL(_insw_ns);
+EXPORT_SYMBOL(_insw);
-void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count)
+void _outsw(volatile u16 __iomem *port, const void *buf, long count)
{
const u16 *tbuf = buf;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u16 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
-EXPORT_SYMBOL(_outsw_ns);
+EXPORT_SYMBOL(_outsw);
-void _insl_ns(const volatile u32 __iomem *port, void *buf, long count)
+void _insl(const volatile u32 __iomem *port, void *buf, long count)
{
u32 *tbuf = buf;
u32 tmp;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
tmp = *(const volatile u32 __force *)port;
eieio();
*tbuf++ = tmp;
} while (--count != 0);
- asm volatile("twi 0,%0,0; isync" : : "r" (tmp));
+ data_barrier(tmp);
}
-EXPORT_SYMBOL(_insl_ns);
+EXPORT_SYMBOL(_insl);
-void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count)
+void _outsl(volatile u32 __iomem *port, const void *buf, long count)
{
const u32 *tbuf = buf;
if (unlikely(count <= 0))
return;
- asm volatile("sync");
+
+ mb();
do {
*(volatile u32 __force *)port = *tbuf++;
} while (--count != 0);
- asm volatile("sync");
+ mb();
}
-EXPORT_SYMBOL(_outsl_ns);
+EXPORT_SYMBOL(_outsl);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
@@ -127,7 +133,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n)
lc |= lc << 8;
lc |= lc << 16;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && !IO_CHECK_ALIGN(p, 4)) {
*((volatile u8 *)p) = c;
p++;
@@ -143,7 +149,7 @@ _memset_io(volatile void __iomem *addr, int c, unsigned long n)
p++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memset_io);
@@ -152,7 +158,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
{
void *vsrc = (void __force *) src;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && (!IO_CHECK_ALIGN(vsrc, 4) || !IO_CHECK_ALIGN(dest, 4))) {
*((u8 *)dest) = *((volatile u8 *)vsrc);
eieio();
@@ -174,7 +180,7 @@ void _memcpy_fromio(void *dest, const volatile void __iomem *src,
dest++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memcpy_fromio);
@@ -182,7 +188,7 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
{
void *vdest = (void __force *) dest;
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
while(n && (!IO_CHECK_ALIGN(vdest, 4) || !IO_CHECK_ALIGN(src, 4))) {
*((volatile u8 *)vdest) = *((u8 *)src);
src++;
@@ -201,6 +207,6 @@ void _memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n)
vdest++;
n--;
}
- __asm__ __volatile__ ("sync" : : : "memory");
+ mb();
}
EXPORT_SYMBOL(_memcpy_toio);
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
deleted file mode 100644
index adc76fa58d1e..000000000000
--- a/arch/powerpc/kernel/of_platform.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
- * <benh@kernel.crashing.org>
- * and Arnd Bergmann, IBM Corp.
- */
-
-#undef DEBUG
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/mod_devicetable.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/atomic.h>
-
-#include <asm/errno.h>
-#include <asm/topology.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <asm/eeh.h>
-
-#ifdef CONFIG_PPC_OF_PLATFORM_PCI
-
-/* The probing of PCI controllers from of_platform is currently
- * 64 bits only, mostly due to gratuitous differences between
- * the 32 and 64 bits PCI code on PowerPC and the 32 bits one
- * lacking some bits needed here.
- */
-
-static int of_pci_phb_probe(struct platform_device *dev)
-{
- struct pci_controller *phb;
-
- /* Check if we can do that ... */
- if (ppc_md.pci_setup_phb == NULL)
- return -ENODEV;
-
- pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node);
-
- /* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(dev->dev.of_node);
- if (!phb)
- return -ENODEV;
-
- /* Setup parent in sysfs */
- phb->parent = &dev->dev;
-
- /* Setup the PHB using arch provided callback */
- if (ppc_md.pci_setup_phb(phb)) {
- pcibios_free_controller(phb);
- return -ENODEV;
- }
-
- /* Process "ranges" property */
- pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
-
- /* Init pci_dn data structures */
- pci_devs_phb_init_dynamic(phb);
-
- /* Create EEH PE for the PHB */
- eeh_phb_pe_create(phb);
-
- /* Scan the bus */
- pcibios_scan_phb(phb);
- if (phb->bus == NULL)
- return -ENXIO;
-
- /* Claim resources. This might need some rework as well depending
- * whether we are doing probe-only or not, like assigning unassigned
- * resources etc...
- */
- pcibios_claim_one_bus(phb->bus);
-
- /* Add probed PCI devices to the device model */
- pci_bus_add_devices(phb->bus);
-
- return 0;
-}
-
-static const struct of_device_id of_pci_phb_ids[] = {
- { .type = "pci", },
- { .type = "pcix", },
- { .type = "pcie", },
- { .type = "pciex", },
- { .type = "ht", },
- {}
-};
-
-static struct platform_driver of_pci_phb_driver = {
- .probe = of_pci_phb_probe,
- .driver = {
- .name = "of-pci",
- .of_match_table = of_pci_phb_ids,
- },
-};
-
-builtin_platform_driver(of_pci_phb_driver);
-
-#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 57082fac4668..827c958677f8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1061,7 +1061,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.virt_base = cpu_to_be32(0xffffffff),
.virt_size = cpu_to_be32(0xffffffff),
.load_base = cpu_to_be32(0xffffffff),
- .min_rma = cpu_to_be32(512), /* 512MB min RMA */
+ .min_rma = cpu_to_be32(MIN_RMA),
.min_load = cpu_to_be32(0xffffffff), /* full client load */
.min_rma_percent = 0, /* min RMA percentage of total RAM */
.max_pft_size = 48, /* max log_2(hash table size) */
@@ -2889,11 +2889,11 @@ static void __init fixup_device_tree_pmac(void)
char type[8];
phandle node;
- // Some pmacs are missing #size-cells on escc nodes
+ // Some pmacs are missing #size-cells on escc or i2s nodes
for (node = 0; prom_next_node(&node); ) {
type[0] = '\0';
prom_getprop(node, "device_type", type, sizeof(type));
- if (prom_strcmp(type, "escc"))
+ if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
continue;
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 727ed4a14545..c6997df63287 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -215,7 +215,7 @@ static int do_seccomp(struct pt_regs *regs)
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (__secure_computing(NULL))
+ if (__secure_computing())
return -1;
/*
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index d31c9799cab2..d7a738f1858d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -798,66 +798,6 @@ void __init udbg_init_rtas_panel(void)
udbg_putc = call_rtas_display_status_delay;
}
-#ifdef CONFIG_UDBG_RTAS_CONSOLE
-
-/* If you think you're dying before early_init_dt_scan_rtas() does its
- * work, you can hard code the token values for your firmware here and
- * hardcode rtas.base/entry etc.
- */
-static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
-static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
-
-static void udbg_rtascon_putc(char c)
-{
- int tries;
-
- if (!rtas.base)
- return;
-
- /* Add CRs before LFs */
- if (c == '\n')
- udbg_rtascon_putc('\r');
-
- /* if there is more than one character to be displayed, wait a bit */
- for (tries = 0; tries < 16; tries++) {
- if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
- break;
- udelay(1000);
- }
-}
-
-static int udbg_rtascon_getc_poll(void)
-{
- int c;
-
- if (!rtas.base)
- return -1;
-
- if (rtas_call(rtas_getchar_token, 0, 2, &c))
- return -1;
-
- return c;
-}
-
-static int udbg_rtascon_getc(void)
-{
- int c;
-
- while ((c = udbg_rtascon_getc_poll()) == -1)
- ;
-
- return c;
-}
-
-
-void __init udbg_init_rtas_console(void)
-{
- udbg_putc = udbg_rtascon_putc;
- udbg_getc = udbg_rtascon_getc;
- udbg_getc_poll = udbg_rtascon_getc_poll;
-}
-#endif /* CONFIG_UDBG_RTAS_CONSOLE */
-
void rtas_progress(char *s, unsigned short hex)
{
struct device_node *root;
@@ -2135,21 +2075,6 @@ int __init early_init_dt_scan_rtas(unsigned long node,
rtas.size = *sizep;
}
-#ifdef CONFIG_UDBG_RTAS_CONSOLE
- basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
- if (basep)
- rtas_putchar_token = *basep;
-
- basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
- if (basep)
- rtas_getchar_token = *basep;
-
- if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
- rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
- udbg_init_rtas_console();
-
-#endif
-
/* break now */
return 1;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e67f3048611f..7284c8021eeb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -892,7 +892,7 @@ unsigned long memory_block_size_bytes(void)
}
#endif
-#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#ifdef CONFIG_PPC_INDIRECT_PIO
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
index 7cfd0710e757..ec3101f95e53 100644
--- a/arch/powerpc/kernel/static_call.c
+++ b/arch/powerpc/kernel/static_call.c
@@ -8,26 +8,54 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
int err;
bool is_ret0 = (func == __static_call_return0);
- unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func);
- bool is_short = is_offset_in_branch_range((long)target - (long)tramp);
-
- if (!tramp)
- return;
+ unsigned long _tramp = (unsigned long)tramp;
+ unsigned long _func = (unsigned long)func;
+ unsigned long _ret0 = _tramp + PPC_SCT_RET0;
+ bool is_short = is_offset_in_branch_range((long)func - (long)(site ? : tramp));
mutex_lock(&text_mutex);
- if (func && !is_short) {
- err = patch_ulong(tramp + PPC_SCT_DATA, target);
- if (err)
- goto out;
+ if (site && tail) {
+ if (!func)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_BLR()));
+ else if (is_ret0)
+ err = patch_branch(site, _ret0, 0);
+ else if (is_short)
+ err = patch_branch(site, _func, 0);
+ else if (tramp)
+ err = patch_branch(site, _tramp, 0);
+ else
+ err = 0;
+ } else if (site) {
+ if (!func)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_NOP()));
+ else if (is_ret0)
+ err = patch_instruction(site, ppc_inst(PPC_RAW_LI(_R3, 0)));
+ else if (is_short)
+ err = patch_branch(site, _func, BRANCH_SET_LINK);
+ else if (tramp)
+ err = patch_branch(site, _tramp, BRANCH_SET_LINK);
+ else
+ err = 0;
+ } else if (tramp) {
+ if (func && !is_short) {
+ err = patch_ulong(tramp + PPC_SCT_DATA, _func);
+ if (err)
+ goto out;
+ }
+
+ if (!func)
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
+ else if (is_ret0)
+ err = patch_branch(tramp, _ret0, 0);
+ else if (is_short)
+ err = patch_branch(tramp, _func, 0);
+ else
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
+ } else {
+ err = 0;
}
- if (!func)
- err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
- else if (is_short)
- err = patch_branch(tramp, target, 0);
- else
- err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
out:
mutex_unlock(&text_mutex);
diff --git a/arch/powerpc/kernel/switch.S b/arch/powerpc/kernel/switch.S
index 608c0ce7cec6..59e3ee99db0e 100644
--- a/arch/powerpc/kernel/switch.S
+++ b/arch/powerpc/kernel/switch.S
@@ -39,7 +39,6 @@ flush_branch_caches:
// Flush the link stack
.rept 64
- ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
b 1f
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index d8b4ab78bef0..9a084bdb8926 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -557,3 +557,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0727332ad86f..8224381c1dba 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -901,6 +901,38 @@ void secondary_cpu_time_init(void)
register_decrementer_clockevent(smp_processor_id());
}
+/*
+ * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
+ * result.
+ */
+static __init void div128_by_32(u64 dividend_high, u64 dividend_low,
+ unsigned int divisor, struct div_result *dr)
+{
+ unsigned long a, b, c, d;
+ unsigned long w, x, y, z;
+ u64 ra, rb, rc;
+
+ a = dividend_high >> 32;
+ b = dividend_high & 0xffffffff;
+ c = dividend_low >> 32;
+ d = dividend_low & 0xffffffff;
+
+ w = a / divisor;
+ ra = ((u64)(a - (w * divisor)) << 32) + b;
+
+ rb = ((u64)do_div(ra, divisor) << 32) + c;
+ x = ra;
+
+ rc = ((u64)do_div(rb, divisor) << 32) + d;
+ y = rb;
+
+ do_div(rc, divisor);
+ z = rc;
+
+ dr->result_high = ((u64)w << 32) + x;
+ dr->result_low = ((u64)y << 32) + z;
+}
+
/* This function is only called on the boot processor */
void __init time_init(void)
{
@@ -950,7 +982,7 @@ void __init time_init(void)
sys_tz.tz_dsttime = 0;
}
- vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
+ vdso_k_arch_data->tb_ticks_per_sec = tb_ticks_per_sec;
#ifdef CONFIG_PPC64_PROC_SYSTEMCFG
systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
#endif
@@ -974,39 +1006,6 @@ void __init time_init(void)
enable_sched_clock_irqtime();
}
-/*
- * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
- * result.
- */
-void div128_by_32(u64 dividend_high, u64 dividend_low,
- unsigned divisor, struct div_result *dr)
-{
- unsigned long a, b, c, d;
- unsigned long w, x, y, z;
- u64 ra, rb, rc;
-
- a = dividend_high >> 32;
- b = dividend_high & 0xffffffff;
- c = dividend_low >> 32;
- d = dividend_low & 0xffffffff;
-
- w = a / divisor;
- ra = ((u64)(a - (w * divisor)) << 32) + b;
-
- rb = ((u64) do_div(ra, divisor) << 32) + c;
- x = ra;
-
- rc = ((u64) do_div(rb, divisor) << 32) + d;
- y = rb;
-
- do_div(rc, divisor);
- z = rc;
-
- dr->result_high = ((u64)w << 32) + x;
- dr->result_low = ((u64)y << 32) + z;
-
-}
-
/* We don't need to calibrate delay, we use the CPU timebase for that */
void calibrate_delay(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index edf5cabe5dfd..cb8e9357383e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -263,10 +263,9 @@ static int __die(const char *str, struct pt_regs *regs, long err)
{
printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
- printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
+ printk("%s PAGE_SIZE=%luK%s %s%s%s%s %s\n",
IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
PAGE_SIZE / 1024, get_mmu_str(),
- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 0a72a537f879..862b22b2b616 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -36,9 +36,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_PANEL)
/* RTAS panel debug */
udbg_init_rtas_panel();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_RTAS_CONSOLE)
- /* RTAS console debug */
- udbg_init_rtas_console();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 43379365ce1b..219d67bcf747 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -17,7 +17,7 @@
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/syscalls.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <vdso/datapage.h>
#include <asm/syscall.h>
@@ -32,6 +32,8 @@
#include <asm/vdso_datapage.h>
#include <asm/setup.h>
+static_assert(__VDSO_PAGES == VDSO_NR_PAGES);
+
/* The alignment of the vDSO */
#define VDSO_ALIGNMENT (1 << 16)
@@ -40,24 +42,6 @@ extern char vdso64_start, vdso64_end;
long sys_ni_syscall(void);
-/*
- * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
- * Once the early boot kernel code no longer needs to muck around
- * with it, it will become dynamically allocated
- */
-static union {
- struct vdso_arch_data data;
- u8 page[2 * PAGE_SIZE];
-} vdso_data_store __page_aligned_data;
-struct vdso_arch_data *vdso_data = &vdso_data_store.data;
-
-enum vvar_pages {
- VVAR_BASE_PAGE_OFFSET,
- VVAR_TIME_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
unsigned long text_size)
{
@@ -96,14 +80,6 @@ static void vdso_close(const struct vm_special_mapping *sm, struct vm_area_struc
mm->context.vdso = NULL;
}
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf);
-
-static struct vm_special_mapping vvar_spec __ro_after_init = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
-
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
@@ -116,73 +92,6 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
.close = vdso_close,
};
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return vvar_page;
-}
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a task
- * changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- VMA_ITERATOR(vmi, mm, 0);
- struct vm_area_struct *vma;
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vvar_spec))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *timens_page = find_timens_vvar_page(vma);
- unsigned long pfn;
-
- switch (vmf->pgoff) {
- case VVAR_BASE_PAGE_OFFSET:
- pfn = virt_to_pfn(vdso_data);
- break;
- case VVAR_TIME_PAGE_OFFSET:
- if (timens_page)
- pfn = page_to_pfn(timens_page);
- else
- pfn = virt_to_pfn(vdso_data->data);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
- * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- pfn = virt_to_pfn(vdso_data->data);
- break;
-#endif /* CONFIG_TIME_NS */
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
@@ -191,7 +100,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
{
unsigned long vdso_size, vdso_base, mappings_size;
struct vm_special_mapping *vdso_spec;
- unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long vvar_size = VDSO_NR_PAGES * PAGE_SIZE;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -217,9 +126,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
/* Add required alignment. */
vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
- vma = _install_special_mapping(mm, vdso_base, vvar_size,
- VM_READ | VM_MAYREAD | VM_IO |
- VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
+ vma = vdso_install_vvar_mapping(mm, vdso_base);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -299,10 +206,10 @@ static void __init vdso_setup_syscall_map(void)
for (i = 0; i < NR_syscalls; i++) {
if (sys_call_table[i] != (void *)&sys_ni_syscall)
- vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
+ vdso_k_arch_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
if (IS_ENABLED(CONFIG_COMPAT) &&
compat_sys_call_table[i] != (void *)&sys_ni_syscall)
- vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
+ vdso_k_arch_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
}
}
@@ -352,10 +259,10 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
static int __init vdso_init(void)
{
#ifdef CONFIG_PPC64
- vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
- vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
- vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
- vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
+ vdso_k_arch_data->dcache_block_size = ppc64_caches.l1d.block_size;
+ vdso_k_arch_data->icache_block_size = ppc64_caches.l1i.block_size;
+ vdso_k_arch_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
+ vdso_k_arch_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
#endif /* CONFIG_PPC64 */
vdso_setup_syscall_map();
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
index 0e3ed6fb199f..e8824f933326 100644
--- a/arch/powerpc/kernel/vdso/Makefile
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -3,7 +3,7 @@
# List of files in the vdso, has to be asm only for now
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o
obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o
diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index 0085ae464dac..488d3ade11e6 100644
--- a/arch/powerpc/kernel/vdso/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
#ifdef CONFIG_PPC64
mflr r12
.cfi_register lr,r12
- get_datapage r10
+ get_datapage r10 vdso_u_arch_data
mtlr r12
.cfi_restore lr
#endif
diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S
index db8e167f0166..d23b2e8e2a34 100644
--- a/arch/powerpc/kernel/vdso/datapage.S
+++ b/arch/powerpc/kernel/vdso/datapage.S
@@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
mflr r12
.cfi_register lr,r12
mr. r4,r3
- get_datapage r3
+ get_datapage r3 vdso_u_arch_data
mtlr r12
#ifdef __powerpc64__
addi r3,r3,CFG_SYSCALL_MAP64
@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
- get_datapage r3
+ get_datapage r3 vdso_u_arch_data
#ifndef __powerpc64__
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
#endif
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index 5333848322ca..79c967212444 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -33,9 +33,9 @@
.cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
#endif
.ifeq \call_time
- get_datapage r5 VDSO_DATA_OFFSET
+ get_datapage r5 vdso_u_time_data
.else
- get_datapage r4 VDSO_DATA_OFFSET
+ get_datapage r4 vdso_u_time_data
.endif
bl CFUNC(DOTSYM(\funct))
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 1a1b0b6d681a..72a1012b8a20 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -6,6 +6,7 @@
#include <asm/vdso.h>
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
@@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - 3 * PAGE_SIZE);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index e21b5506cad6..32102a05eaa7 100644
--- a/arch/powerpc/kernel/vdso/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -6,6 +6,7 @@
#include <asm/vdso.h>
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
+#include <vdso/datapage.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
@@ -16,7 +17,8 @@ OUTPUT_ARCH(powerpc:common64)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - 3 * PAGE_SIZE);
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c
index 55a287c9a736..6f5167d81af5 100644
--- a/arch/powerpc/kernel/vdso/vgettimeofday.c
+++ b/arch/powerpc/kernel/vdso/vgettimeofday.c
@@ -7,43 +7,43 @@
#ifdef __powerpc64__
int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_getres_data(vd, clock_id, res);
}
#else
int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime32_data(vd, clock, ts);
}
int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_clock_getres_time32_data(vd, clock_id, res);
}
#endif
int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
return __cvdso_gettimeofday_data(vd, tv, tz);
}
-__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd)
+__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_time_data *vd)
{
return __cvdso_time_data(vd, time);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b4c9decc7a75..de6ee7d35cff 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -1,10 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_PPC64
-#define PROVIDE32(x) PROVIDE(__unused__##x)
-#else
-#define PROVIDE32(x) PROVIDE(x)
-#endif
-
#define BSS_FIRST_SECTIONS *(.bss.prominit)
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
@@ -127,7 +121,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_etext = .;
- PROVIDE32 (etext = .);
/* Read-only data */
RO_DATA(PAGE_SIZE)
@@ -394,7 +387,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_edata = .;
- PROVIDE32 (edata = .);
/*
* And finally the bss
@@ -404,7 +396,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_end = . ;
- PROVIDE32 (end = .);
DWARF_DEBUG
ELF_DETAILS
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 8c464a5d8246..2429cb1c7baa 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -495,8 +495,7 @@ static void start_watchdog(void *arg)
*this_cpu_ptr(&wd_timer_tb) = get_tb();
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
+ hrtimer_setup(hrtimer, watchdog_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
HRTIMER_MODE_REL_PINNED);
}
diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
index 104c9911f406..dd86e338307d 100644
--- a/arch/powerpc/kexec/relocate_32.S
+++ b/arch/powerpc/kexec/relocate_32.S
@@ -348,16 +348,13 @@ write_utlb:
rlwinm r10, r24, 0, 22, 27
cmpwi r10, PPC47x_TLB0_4K
- bne 0f
li r10, 0x1000 /* r10 = 4k */
- ANNOTATE_INTRA_FUNCTION_CALL
- bl 1f
+ beq 0f
-0:
/* Defaults to 256M */
lis r10, 0x1000
- bcl 20,31,$+4
+0: bcl 20,31,$+4
1: mflr r4
addi r4, r4, (2f-1b) /* virtual address of 2f */
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 5b7212edbb13..c7e4b62642ea 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -125,8 +125,6 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
return (u32*)pteg;
}
-extern char etext[];
-
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
bool iswrite)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ea7ad200b330..83f7504349d2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1524,14 +1524,12 @@ kvm_flush_link_stack:
/* Flush the link stack. On Power8 it's up to 32 entries in size. */
.rept 32
- ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
/* And on Power9 it's up to 64. */
BEGIN_FTR_SECTION
.rept 32
- ANNOTATE_INTRA_FUNCTION_CALL
bl .+4
.endr
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index ce1d91eed231..153587741864 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -550,12 +550,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
+ fallthrough;
case KVM_CAP_SPAPR_TCE_64:
- r = 1;
- break;
case KVM_CAP_SPAPR_TCE_VFIO:
- r = !!cpu_has_feature(CPU_FTR_HVMODE);
- break;
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
@@ -766,8 +763,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int err;
- hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
+ hrtimer_setup(&vcpu->arch.dec_timer, kvmppc_decrementer_wakeup, CLOCK_REALTIME,
+ HRTIMER_MODE_ABS);
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
diff --git a/arch/powerpc/lib/crc-t10dif-glue.c b/arch/powerpc/lib/crc-t10dif-glue.c
index 730850dbc51d..f411b0120cc5 100644
--- a/arch/powerpc/lib/crc-t10dif-glue.c
+++ b/arch/powerpc/lib/crc-t10dif-glue.c
@@ -78,12 +78,6 @@ static void __exit crc_t10dif_powerpc_exit(void)
}
module_exit(crc_t10dif_powerpc_exit);
-bool crc_t10dif_is_optimized(void)
-{
- return static_key_enabled(&have_vec_crypto);
-}
-EXPORT_SYMBOL(crc_t10dif_is_optimized);
-
MODULE_AUTHOR("Daniel Axtens <dja@axtens.net>");
MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions");
MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/lib/crc32-glue.c b/arch/powerpc/lib/crc32-glue.c
index 79cc954f499f..dbd10f339183 100644
--- a/arch/powerpc/lib/crc32-glue.c
+++ b/arch/powerpc/lib/crc32-glue.c
@@ -23,18 +23,18 @@ u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
unsigned int prealign;
unsigned int tail;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
- crc = crc32c_le_base(crc, p, prealign);
+ crc = crc32c_base(crc, p, prealign);
len -= prealign;
p += prealign;
}
@@ -52,12 +52,12 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
tail = len & VMX_ALIGN_MASK;
if (tail) {
p += len & ~VMX_ALIGN_MASK;
- crc = crc32c_le_base(crc, p, tail);
+ crc = crc32c_base(crc, p, tail);
}
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index c8b4fa71d4a7..734610052cf4 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1358,18 +1358,6 @@ static void __init htab_initialize(void)
} else {
unsigned long limit = MEMBLOCK_ALLOC_ANYWHERE;
-#ifdef CONFIG_PPC_CELL
- /*
- * Cell may require the hash table down low when using the
- * Axon IOMMU in order to fit the dynamic region over it, see
- * comments in cell/iommu.c
- */
- if (fdt_subnode_offset(initial_boot_params, 0, "axon") > 0) {
- limit = 0x80000000;
- pr_info("Hash table forced below 2G for Axon IOMMU\n");
- }
-#endif /* CONFIG_PPC_CELL */
-
table = memblock_phys_alloc_range(htab_size_bytes,
htab_size_bytes,
0, limit);
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index ce64abea9e3e..c0c45d033cba 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -587,7 +587,7 @@ int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
/*
* Does the CPU support tlbie?
*/
-bool tlbie_capable __read_mostly = true;
+bool tlbie_capable __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
EXPORT_SYMBOL(tlbie_capable);
/*
@@ -595,7 +595,7 @@ EXPORT_SYMBOL(tlbie_capable);
* address spaces? tlbie may still be used for nMMU accelerators, and for KVM
* guest address spaces.
*/
-bool tlbie_enabled __read_mostly = true;
+bool tlbie_enabled __read_mostly = IS_ENABLED(CONFIG_PPC_RADIX_BROADCAST_TLBIE);
static int __init setup_disable_tlbie(char *str)
{
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 7b0afcabd89f..70b08bf3dd1f 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -4,7 +4,6 @@
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/vmalloc.h>
-#include <asm/io-workarounds.h>
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot);
@@ -14,8 +13,6 @@ void __iomem *ioremap(phys_addr_t addr, unsigned long size)
pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
- if (iowa_is_active())
- return iowa_ioremap(addr, size, prot, caller);
return __ioremap_caller(addr, size, prot, caller);
}
EXPORT_SYMBOL(ioremap);
@@ -25,8 +22,6 @@ void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
- if (iowa_is_active())
- return iowa_ioremap(addr, size, prot, caller);
return __ioremap_caller(addr, size, prot, caller);
}
EXPORT_SYMBOL(ioremap_wc);
@@ -36,8 +31,6 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
pgprot_t prot = pgprot_cached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
- if (iowa_is_active())
- return iowa_ioremap(addr, size, prot, caller);
return __ioremap_caller(addr, size, prot, caller);
}
@@ -50,8 +43,6 @@ void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags)
if (pte_write(pte))
pte = pte_mkdirty(pte);
- if (iowa_is_active())
- return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
}
EXPORT_SYMBOL(ioremap_prot);
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index d24e5f166723..fb8b55bd2cd5 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -52,6 +52,6 @@ void iounmap(volatile void __iomem *token)
if (!slab_is_available())
return;
- generic_iounmap(PCI_FIX_ADDR(token));
+ generic_iounmap(token);
}
EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index c7708c8fad29..34806c858e54 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -319,28 +319,6 @@ void __init mem_init(void)
per_cpu(next_tlbcam_idx, smp_processor_id()) =
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
-
-#ifdef CONFIG_PPC32
- pr_info("Kernel virtual memory layout:\n");
-#ifdef CONFIG_KASAN
- pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
- KASAN_SHADOW_START, KASAN_SHADOW_END);
-#endif
- pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
-#ifdef CONFIG_HIGHMEM
- pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
-#endif /* CONFIG_HIGHMEM */
- if (ioremap_bot != IOREMAP_TOP)
- pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
- ioremap_bot, IOREMAP_TOP);
- pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
- VMALLOC_START, VMALLOC_END);
-#ifdef MODULES_VADDR
- pr_info(" * 0x%08lx..0x%08lx : modules\n",
- MODULES_VADDR, MODULES_END);
-#endif
-#endif /* CONFIG_PPC32 */
}
void free_initmem(void)
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3c1da08304d0..603a0f652ba6 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1336,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid;
}
-static u64 hot_add_drconf_memory_max(void)
+u64 hot_add_drconf_memory_max(void)
{
struct device_node *memory = NULL;
struct device_node *dn = NULL;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 2b79171ee185..b906d28f74fd 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -132,7 +132,10 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
-static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
+static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
+{
+}
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
@@ -444,7 +447,8 @@ static void power_pmu_bhrb_disable(struct perf_event *event)
/* Called from ctxsw to prevent one process's branch entries to
* mingle with the other process's entries during context switch.
*/
-static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
if (!ppmu->bhrb_nr)
return;
@@ -2222,6 +2226,10 @@ static struct pmu power_pmu = {
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE)
+
+#define SIER_TYPE_SHIFT 15
+#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -2291,6 +2299,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
record = 0;
/*
+ * SIER[46-48] presents instruction type of the sampled instruction.
+ * In ISA v3.0 and before values "0" and "7" are considered reserved.
+ * In ISA v3.1, value "7" has been used to indicate "larx/stcx".
+ * Drop the sample if "type" has reserved values for this field with a
+ * ISA version check.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
+ ppmu->get_mem_data_src) {
+ val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
+ if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
+ record = 0;
+ atomic64_inc(&event->lost_samples);
+ }
+ }
+
+ /*
* Finally record data if requested.
*/
if (record) {
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 56301b2bc8ae..2b3547fdba4a 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -319,10 +319,18 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
return;
}
- sier = mfspr(SPRN_SIER);
+ /*
+ * Use regs-dar for SPRN_SIER which is saved
+ * during perf_read_regs at the beginning
+ * of the PMU interrupt handler to avoid multiple
+ * reads of SPRN_SIER
+ */
+ sier = regs->dar;
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
+ if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
+ dsrc->val = 0;
return;
+ }
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
@@ -338,8 +346,12 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
* to determine the exact instruction type. If the sampling
* criteria is neither load or store, set the type as default
* to NA.
+ *
+ * Use regs->dsisr for MMCRA which is saved during perf_read_regs
+ * at the beginning of the PMU interrupt handler to avoid
+ * multiple reads of SPRN_MMCRA
*/
- mmcra = mfspr(SPRN_MMCRA);
+ mmcra = regs->dsisr;
op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
switch (op_type) {
diff --git a/arch/powerpc/perf/vpa-pmu.c b/arch/powerpc/perf/vpa-pmu.c
index 6a5bfd2a13b5..840733468959 100644
--- a/arch/powerpc/perf/vpa-pmu.c
+++ b/arch/powerpc/perf/vpa-pmu.c
@@ -156,6 +156,7 @@ static void vpa_pmu_del(struct perf_event *event, int flags)
}
static struct pmu vpa_pmu = {
+ .module = THIS_MODULE,
.task_ctx_nr = perf_sw_context,
.name = "vpa_pmu",
.event_init = vpa_pmu_event_init,
diff --git a/arch/powerpc/platforms/44x/uic.c b/arch/powerpc/platforms/44x/uic.c
index e3e148b9dd18..8b03ae4cb3f6 100644
--- a/arch/powerpc/platforms/44x/uic.c
+++ b/arch/powerpc/platforms/44x/uic.c
@@ -37,7 +37,7 @@
#define UIC_VR 0x7
#define UIC_VCR 0x8
-struct uic *primary_uic;
+static struct uic *primary_uic;
struct uic {
int index;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index a454149ae02f..fea3766eac0f 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -70,10 +70,6 @@ config PPC_DT_CPU_FTRS
firmware provides this binding.
If you're not sure say Y.
-config UDBG_RTAS_CONSOLE
- bool "RTAS based debug console"
- depends on PPC_RTAS
-
config PPC_SMP_MUXED_IPI
bool
help
@@ -186,12 +182,6 @@ config PPC_INDIRECT_PIO
bool
select GENERIC_IOMAP
-config PPC_INDIRECT_MMIO
- bool
-
-config PPC_IO_WORKAROUNDS
- bool
-
source "drivers/cpufreq/Kconfig"
menu "CPUIdle driver"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 1453ccc900c4..613b383ed8b3 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -449,6 +449,19 @@ config PPC_RADIX_MMU_DEFAULT
If you're unsure, say Y.
+config PPC_RADIX_BROADCAST_TLBIE
+ bool
+ depends on PPC_RADIX_MMU
+ help
+ Power ISA v3.0 and later implementations in the Linux Compliancy Subset
+ and lower are not required to implement broadcast TLBIE instructions.
+ Platforms with CPUs that do implement TLBIE broadcast, that is, where
+ a TLB invalidation instruction performed on one CPU operates on the
+ TLBs of all CPUs in the system, should select this option. If this
+ option is selected, the disable_tlbie kernel command line option can
+ be used to cause global TLB invalidations to be done via IPIs; without
+ it, IPIs will be used unconditionally.
+
config PPC_KERNEL_PREFIXED
depends on PPC_HAVE_PREFIXED_SUPPORT
depends on CC_HAS_PREFIXED
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 34669b060f36..db65bfcd1e74 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -3,42 +3,6 @@ config PPC_CELL
select PPC_64S_HASH_MMU if PPC64
bool
-config PPC_CELL_COMMON
- bool
- select PPC_CELL
- select PPC_DCR_MMIO
- select PPC_INDIRECT_PIO
- select PPC_INDIRECT_MMIO
- select PPC_HASH_MMU_NATIVE
- select PPC_RTAS
- select IRQ_EDGE_EOI_HANDLER
-
-config PPC_CELL_NATIVE
- bool
- select PPC_CELL_COMMON
- select MPIC
- select PPC_IO_WORKAROUNDS
- select IBM_EMAC_EMAC4 if IBM_EMAC
- select IBM_EMAC_RGMII if IBM_EMAC
- select IBM_EMAC_ZMII if IBM_EMAC #test only
- select IBM_EMAC_TAH if IBM_EMAC #test only
-
-config PPC_IBM_CELL_BLADE
- bool "IBM Cell Blade"
- depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN
- select PPC_CELL_NATIVE
- select PPC_OF_PLATFORM_PCI
- select FORCE_PCI
- select MMIO_NVRAM
- select PPC_UDBG_16550
- select UDBG_RTAS_CONSOLE
-
-config AXON_MSI
- bool
- depends on PPC_IBM_CELL_BLADE && PCI_MSI
- select IRQ_DOMAIN_NOMAP
- default y
-
menu "Cell Broadband Engine options"
depends on PPC_CELL
@@ -57,48 +21,4 @@ config SPU_BASE
bool
select PPC_COPRO_BASE
-config CBE_RAS
- bool "RAS features for bare metal Cell BE"
- depends on PPC_CELL_NATIVE
- default y
-
-config PPC_IBM_CELL_RESETBUTTON
- bool "IBM Cell Blade Pinhole reset button"
- depends on CBE_RAS && PPC_IBM_CELL_BLADE
- default y
- help
- Support Pinhole Resetbutton on IBM Cell blades.
- This adds a method to trigger system reset via front panel pinhole button.
-
-config PPC_IBM_CELL_POWERBUTTON
- tristate "IBM Cell Blade power button"
- depends on PPC_IBM_CELL_BLADE && INPUT_EVDEV
- default y
- help
- Support Powerbutton on IBM Cell blades.
- This will enable the powerbutton as an input device.
-
-config CBE_THERM
- tristate "CBE thermal support"
- default m
- depends on CBE_RAS && SPU_BASE
-
-config PPC_PMI
- tristate
- default y
- depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON
- help
- PMI (Platform Management Interrupt) is a way to
- communicate with the BMC (Baseboard Management Controller).
- It is used in some IBM Cell blades.
-
-config CBE_CPUFREQ_SPU_GOVERNOR
- tristate "CBE frequency scaling based on SPU usage"
- depends on SPU_FS && CPU_FREQ
- default m
- help
- This governor checks for spu usage to adjust the cpu frequency.
- If no spu is running on a given cpu, that cpu will be throttled to
- the minimal possible frequency.
-
endmenu
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 7ea6692f67e2..7e5ff239c376 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,27 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
-
-obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
- pmu.o spider-pci.o
-obj-$(CONFIG_CBE_RAS) += ras.o
-
-obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
-obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o
-
-obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
-
-ifdef CONFIG_SMP
-obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
-endif
-
-# needed only when building loadable spufs.ko
-spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
-spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
-
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spu_syscalls.o \
- $(spu-priv1-y) \
- $(spu-manage-y) \
spufs/
-
-obj-$(CONFIG_AXON_MSI) += axon_msi.o
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
deleted file mode 100644
index d243f7fd8982..000000000000
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ /dev/null
@@ -1,481 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2007, Michael Ellerman, IBM Corporation.
- */
-
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-
-#include <asm/dcr.h>
-#include <asm/machdep.h>
-
-#include "cell.h"
-
-/*
- * MSIC registers, specified as offsets from dcr_base
- */
-#define MSIC_CTRL_REG 0x0
-
-/* Base Address registers specify FIFO location in BE memory */
-#define MSIC_BASE_ADDR_HI_REG 0x3
-#define MSIC_BASE_ADDR_LO_REG 0x4
-
-/* Hold the read/write offsets into the FIFO */
-#define MSIC_READ_OFFSET_REG 0x5
-#define MSIC_WRITE_OFFSET_REG 0x6
-
-
-/* MSIC control register flags */
-#define MSIC_CTRL_ENABLE 0x0001
-#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
-#define MSIC_CTRL_IRQ_ENABLE 0x0008
-#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
-
-/*
- * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
- * Currently we're using a 64KB FIFO size.
- */
-#define MSIC_FIFO_SIZE_SHIFT 16
-#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
-
-/*
- * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
- * 8-9 of the MSIC control reg.
- */
-#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
-
-/*
- * We need to mask the read/write offsets to make sure they stay within
- * the bounds of the FIFO. Also they should always be 16-byte aligned.
- */
-#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
-
-/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
-#define MSIC_FIFO_ENTRY_SIZE 0x10
-
-
-struct axon_msic {
- struct irq_domain *irq_domain;
- __le32 *fifo_virt;
- dma_addr_t fifo_phys;
- dcr_host_t dcr_host;
- u32 read_offset;
-#ifdef DEBUG
- u32 __iomem *trigger;
-#endif
-};
-
-#ifdef DEBUG
-void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
-#else
-static inline void axon_msi_debug_setup(struct device_node *dn,
- struct axon_msic *msic) { }
-#endif
-
-
-static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
-{
- pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
-
- dcr_write(msic->dcr_host, dcr_n, val);
-}
-
-static void axon_msi_cascade(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct axon_msic *msic = irq_desc_get_handler_data(desc);
- u32 write_offset, msi;
- int idx;
- int retry = 0;
-
- write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
- pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
-
- /* write_offset doesn't wrap properly, so we have to mask it */
- write_offset &= MSIC_FIFO_SIZE_MASK;
-
- while (msic->read_offset != write_offset && retry < 100) {
- idx = msic->read_offset / sizeof(__le32);
- msi = le32_to_cpu(msic->fifo_virt[idx]);
- msi &= 0xFFFF;
-
- pr_devel("axon_msi: woff %x roff %x msi %x\n",
- write_offset, msic->read_offset, msi);
-
- if (msi < irq_get_nr_irqs() && irq_get_chip_data(msi) == msic) {
- generic_handle_irq(msi);
- msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
- } else {
- /*
- * Reading the MSIC_WRITE_OFFSET_REG does not
- * reliably flush the outstanding DMA to the
- * FIFO buffer. Here we were reading stale
- * data, so we need to retry.
- */
- udelay(1);
- retry++;
- pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
- continue;
- }
-
- if (retry) {
- pr_devel("axon_msi: late irq 0x%x, retry %d\n",
- msi, retry);
- retry = 0;
- }
-
- msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
- msic->read_offset &= MSIC_FIFO_SIZE_MASK;
- }
-
- if (retry) {
- printk(KERN_WARNING "axon_msi: irq timed out\n");
-
- msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
- msic->read_offset &= MSIC_FIFO_SIZE_MASK;
- }
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-static struct axon_msic *find_msi_translator(struct pci_dev *dev)
-{
- struct irq_domain *irq_domain;
- struct device_node *dn, *tmp;
- const phandle *ph;
- struct axon_msic *msic = NULL;
-
- dn = of_node_get(pci_device_to_OF_node(dev));
- if (!dn) {
- dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
- return NULL;
- }
-
- for (; dn; dn = of_get_next_parent(dn)) {
- ph = of_get_property(dn, "msi-translator", NULL);
- if (ph)
- break;
- }
-
- if (!ph) {
- dev_dbg(&dev->dev,
- "axon_msi: no msi-translator property found\n");
- goto out_error;
- }
-
- tmp = dn;
- dn = of_find_node_by_phandle(*ph);
- of_node_put(tmp);
- if (!dn) {
- dev_dbg(&dev->dev,
- "axon_msi: msi-translator doesn't point to a node\n");
- goto out_error;
- }
-
- irq_domain = irq_find_host(dn);
- if (!irq_domain) {
- dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
- dn);
- goto out_error;
- }
-
- msic = irq_domain->host_data;
-
-out_error:
- of_node_put(dn);
-
- return msic;
-}
-
-static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
-{
- struct device_node *dn;
- int len;
- const u32 *prop;
-
- dn = of_node_get(pci_device_to_OF_node(dev));
- if (!dn) {
- dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
- return -ENODEV;
- }
-
- for (; dn; dn = of_get_next_parent(dn)) {
- if (!dev->no_64bit_msi) {
- prop = of_get_property(dn, "msi-address-64", &len);
- if (prop)
- break;
- }
-
- prop = of_get_property(dn, "msi-address-32", &len);
- if (prop)
- break;
- }
-
- if (!prop) {
- dev_dbg(&dev->dev,
- "axon_msi: no msi-address-(32|64) properties found\n");
- of_node_put(dn);
- return -ENOENT;
- }
-
- switch (len) {
- case 8:
- msg->address_hi = prop[0];
- msg->address_lo = prop[1];
- break;
- case 4:
- msg->address_hi = 0;
- msg->address_lo = prop[0];
- break;
- default:
- dev_dbg(&dev->dev,
- "axon_msi: malformed msi-address-(32|64) property\n");
- of_node_put(dn);
- return -EINVAL;
- }
-
- of_node_put(dn);
-
- return 0;
-}
-
-static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- unsigned int virq, rc;
- struct msi_desc *entry;
- struct msi_msg msg;
- struct axon_msic *msic;
-
- msic = find_msi_translator(dev);
- if (!msic)
- return -ENODEV;
-
- rc = setup_msi_msg_address(dev, &msg);
- if (rc)
- return rc;
-
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
- virq = irq_create_direct_mapping(msic->irq_domain);
- if (!virq) {
- dev_warn(&dev->dev,
- "axon_msi: virq allocation failed!\n");
- return -1;
- }
- dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
-
- irq_set_msi_desc(virq, entry);
- msg.data = virq;
- pci_write_msi_msg(virq, &msg);
- }
-
- return 0;
-}
-
-static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *entry;
-
- dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
-
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- entry->irq = 0;
- }
-}
-
-static struct irq_chip msic_irq_chip = {
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
- .irq_shutdown = pci_msi_mask_irq,
- .name = "AXON-MSI",
-};
-
-static int msic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_data(virq, h->host_data);
- irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops msic_host_ops = {
- .map = msic_host_map,
-};
-
-static void axon_msi_shutdown(struct platform_device *device)
-{
- struct axon_msic *msic = dev_get_drvdata(&device->dev);
- u32 tmp;
-
- pr_devel("axon_msi: disabling %pOF\n",
- irq_domain_get_of_node(msic->irq_domain));
- tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
- tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
- msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
-}
-
-static int axon_msi_probe(struct platform_device *device)
-{
- struct device_node *dn = device->dev.of_node;
- struct axon_msic *msic;
- unsigned int virq;
- int dcr_base, dcr_len;
-
- pr_devel("axon_msi: setting up dn %pOF\n", dn);
-
- msic = kzalloc(sizeof(*msic), GFP_KERNEL);
- if (!msic) {
- printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
- dn);
- goto out;
- }
-
- dcr_base = dcr_resource_start(dn, 0);
- dcr_len = dcr_resource_len(dn, 0);
-
- if (dcr_base == 0 || dcr_len == 0) {
- printk(KERN_ERR
- "axon_msi: couldn't parse dcr properties on %pOF\n",
- dn);
- goto out_free_msic;
- }
-
- msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
- if (!DCR_MAP_OK(msic->dcr_host)) {
- printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
- dn);
- goto out_free_msic;
- }
-
- msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
- &msic->fifo_phys, GFP_KERNEL);
- if (!msic->fifo_virt) {
- printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
- dn);
- goto out_free_msic;
- }
-
- virq = irq_of_parse_and_map(dn, 0);
- if (!virq) {
- printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
- dn);
- goto out_free_fifo;
- }
- memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
-
- /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
- msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
- if (!msic->irq_domain) {
- printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
- dn);
- goto out_free_fifo;
- }
-
- irq_set_handler_data(virq, msic);
- irq_set_chained_handler(virq, axon_msi_cascade);
- pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
-
- /* Enable the MSIC hardware */
- msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
- msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
- msic->fifo_phys & 0xFFFFFFFF);
- msic_dcr_write(msic, MSIC_CTRL_REG,
- MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
- MSIC_CTRL_FIFO_SIZE);
-
- msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
- & MSIC_FIFO_SIZE_MASK;
-
- dev_set_drvdata(&device->dev, msic);
-
- cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
- cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
-
- axon_msi_debug_setup(dn, msic);
-
- printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
-
- return 0;
-
-out_free_fifo:
- dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
- msic->fifo_phys);
-out_free_msic:
- kfree(msic);
-out:
-
- return -1;
-}
-
-static const struct of_device_id axon_msi_device_id[] = {
- {
- .compatible = "ibm,axon-msic"
- },
- {}
-};
-
-static struct platform_driver axon_msi_driver = {
- .probe = axon_msi_probe,
- .shutdown = axon_msi_shutdown,
- .driver = {
- .name = "axon-msi",
- .of_match_table = axon_msi_device_id,
- },
-};
-
-static int __init axon_msi_init(void)
-{
- return platform_driver_register(&axon_msi_driver);
-}
-subsys_initcall(axon_msi_init);
-
-
-#ifdef DEBUG
-static int msic_set(void *data, u64 val)
-{
- struct axon_msic *msic = data;
- out_le32(msic->trigger, val);
- return 0;
-}
-
-static int msic_get(void *data, u64 *val)
-{
- *val = 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
-
-void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
-{
- char name[8];
- struct resource res;
-
- if (of_address_to_resource(dn, 0, &res)) {
- pr_devel("axon_msi: couldn't get reg property\n");
- return;
- }
-
- msic->trigger = ioremap(res.start, 0x4);
- if (!msic->trigger) {
- pr_devel("axon_msi: ioremap failed\n");
- return;
- }
-
- snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
-
- debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic);
-}
-#endif /* DEBUG */
diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c
deleted file mode 100644
index 3d121acdf69b..000000000000
--- a/arch/powerpc/platforms/cell/cbe_powerbutton.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * driver for powerbutton on IBM cell blades
- *
- * (C) Copyright IBM Corp. 2005-2008
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <asm/pmi.h>
-
-static struct input_dev *button_dev;
-static struct platform_device *button_pdev;
-
-static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg)
-{
- BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON);
-
- input_report_key(button_dev, KEY_POWER, 1);
- input_sync(button_dev);
- input_report_key(button_dev, KEY_POWER, 0);
- input_sync(button_dev);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_POWER_BUTTON,
- .handle_pmi_message = cbe_powerbutton_handle_pmi,
-};
-
-static int __init cbe_powerbutton_init(void)
-{
- int ret = 0;
- struct input_dev *dev;
-
- if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) {
- printk(KERN_ERR "%s: Not a cell blade.\n", __func__);
- ret = -ENODEV;
- goto out;
- }
-
- dev = input_allocate_device();
- if (!dev) {
- ret = -ENOMEM;
- printk(KERN_ERR "%s: Not enough memory.\n", __func__);
- goto out;
- }
-
- set_bit(EV_KEY, dev->evbit);
- set_bit(KEY_POWER, dev->keybit);
-
- dev->name = "Power Button";
- dev->id.bustype = BUS_HOST;
-
- /* this makes the button look like an acpi power button
- * no clue whether anyone relies on that though */
- dev->id.product = 0x02;
- dev->phys = "LNXPWRBN/button/input0";
-
- button_pdev = platform_device_register_simple("power_button", 0, NULL, 0);
- if (IS_ERR(button_pdev)) {
- ret = PTR_ERR(button_pdev);
- goto out_free_input;
- }
-
- dev->dev.parent = &button_pdev->dev;
- ret = input_register_device(dev);
- if (ret) {
- printk(KERN_ERR "%s: Failed to register device\n", __func__);
- goto out_free_pdev;
- }
-
- button_dev = dev;
-
- ret = pmi_register_handler(&cbe_pmi_handler);
- if (ret) {
- printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__);
- goto out_free_pdev;
- }
-
- goto out;
-
-out_free_pdev:
- platform_device_unregister(button_pdev);
-out_free_input:
- input_free_device(dev);
-out:
- return ret;
-}
-
-static void __exit cbe_powerbutton_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- platform_device_unregister(button_pdev);
- input_free_device(button_dev);
-}
-
-module_init(cbe_powerbutton_init);
-module_exit(cbe_powerbutton_exit);
-
-MODULE_DESCRIPTION("Driver for powerbutton on IBM cell blades");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
deleted file mode 100644
index 99b3558753e9..000000000000
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ /dev/null
@@ -1,298 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * cbe_regs.c
- *
- * Accessor routines for the various MMIO register blocks of the CBE
- *
- * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
- */
-
-#include <linux/percpu.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/pgtable.h>
-
-#include <asm/io.h>
-#include <asm/ptrace.h>
-#include <asm/cell-regs.h>
-
-/*
- * Current implementation uses "cpu" nodes. We build our own mapping
- * array of cpu numbers to cpu nodes locally for now to allow interrupt
- * time code to have a fast path rather than call of_get_cpu_node(). If
- * we implement cpu hotplug, we'll have to install an appropriate notifier
- * in order to release references to the cpu going away
- */
-static struct cbe_regs_map
-{
- struct device_node *cpu_node;
- struct device_node *be_node;
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_iic_regs __iomem *iic_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- struct cbe_pmd_shadow_regs pmd_shadow_regs;
-} cbe_regs_maps[MAX_CBE];
-static int cbe_regs_map_count;
-
-static struct cbe_thread_map
-{
- struct device_node *cpu_node;
- struct device_node *be_node;
- struct cbe_regs_map *regs;
- unsigned int thread_id;
- unsigned int cbe_id;
-} cbe_thread_map[NR_CPUS];
-
-static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
-static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
-
-static struct cbe_regs_map *cbe_find_map(struct device_node *np)
-{
- int i;
- struct device_node *tmp_np;
-
- if (!of_node_is_type(np, "spe")) {
- for (i = 0; i < cbe_regs_map_count; i++)
- if (cbe_regs_maps[i].cpu_node == np ||
- cbe_regs_maps[i].be_node == np)
- return &cbe_regs_maps[i];
- return NULL;
- }
-
- if (np->data)
- return np->data;
-
- /* walk up path until cpu or be node was found */
- tmp_np = np;
- do {
- tmp_np = tmp_np->parent;
- /* on a correct devicetree we wont get up to root */
- BUG_ON(!tmp_np);
- } while (!of_node_is_type(tmp_np, "cpu") ||
- !of_node_is_type(tmp_np, "be"));
-
- np->data = cbe_find_map(tmp_np);
-
- return np->data;
-}
-
-struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
-{
- struct cbe_regs_map *map = cbe_find_map(np);
- if (map == NULL)
- return NULL;
- return map->pmd_regs;
-}
-EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
-
-struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
-{
- struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
- if (map == NULL)
- return NULL;
- return map->pmd_regs;
-}
-EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
-
-struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
-{
- struct cbe_regs_map *map = cbe_find_map(np);
- if (map == NULL)
- return NULL;
- return &map->pmd_shadow_regs;
-}
-
-struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
-{
- struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
- if (map == NULL)
- return NULL;
- return &map->pmd_shadow_regs;
-}
-
-struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
-{
- struct cbe_regs_map *map = cbe_find_map(np);
- if (map == NULL)
- return NULL;
- return map->iic_regs;
-}
-
-struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
-{
- struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
- if (map == NULL)
- return NULL;
- return map->iic_regs;
-}
-
-struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
-{
- struct cbe_regs_map *map = cbe_find_map(np);
- if (map == NULL)
- return NULL;
- return map->mic_tm_regs;
-}
-
-struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
-{
- struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
- if (map == NULL)
- return NULL;
- return map->mic_tm_regs;
-}
-EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
-
-u32 cbe_get_hw_thread_id(int cpu)
-{
- return cbe_thread_map[cpu].thread_id;
-}
-EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
-
-u32 cbe_cpu_to_node(int cpu)
-{
- return cbe_thread_map[cpu].cbe_id;
-}
-EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
-
-u32 cbe_node_to_cpu(int node)
-{
- return cpumask_first(&cbe_local_mask[node]);
-
-}
-EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
-
-static struct device_node *__init cbe_get_be_node(int cpu_id)
-{
- struct device_node *np;
-
- for_each_node_by_type (np, "be") {
- int len,i;
- const phandle *cpu_handle;
-
- cpu_handle = of_get_property(np, "cpus", &len);
-
- /*
- * the CAB SLOF tree is non compliant, so we just assume
- * there is only one node
- */
- if (WARN_ON_ONCE(!cpu_handle))
- return np;
-
- for (i = 0; i < len; i++) {
- struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]);
- struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL);
-
- of_node_put(ch_np);
- of_node_put(ci_np);
-
- if (ch_np == ci_np)
- return np;
- }
- }
-
- return NULL;
-}
-
-static void __init cbe_fill_regs_map(struct cbe_regs_map *map)
-{
- if(map->be_node) {
- struct device_node *be, *np, *parent_np;
-
- be = map->be_node;
-
- for_each_node_by_type(np, "pervasive") {
- parent_np = of_get_parent(np);
- if (parent_np == be)
- map->pmd_regs = of_iomap(np, 0);
- of_node_put(parent_np);
- }
-
- for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") {
- parent_np = of_get_parent(np);
- if (parent_np == be)
- map->iic_regs = of_iomap(np, 2);
- of_node_put(parent_np);
- }
-
- for_each_node_by_type(np, "mic-tm") {
- parent_np = of_get_parent(np);
- if (parent_np == be)
- map->mic_tm_regs = of_iomap(np, 0);
- of_node_put(parent_np);
- }
- } else {
- struct device_node *cpu;
- /* That hack must die die die ! */
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *prop;
-
- cpu = map->cpu_node;
-
- prop = of_get_property(cpu, "pervasive", NULL);
- if (prop != NULL)
- map->pmd_regs = ioremap(prop->address, prop->len);
-
- prop = of_get_property(cpu, "iic", NULL);
- if (prop != NULL)
- map->iic_regs = ioremap(prop->address, prop->len);
-
- prop = of_get_property(cpu, "mic-tm", NULL);
- if (prop != NULL)
- map->mic_tm_regs = ioremap(prop->address, prop->len);
- }
-}
-
-
-void __init cbe_regs_init(void)
-{
- int i;
- unsigned int thread_id;
- struct device_node *cpu;
-
- /* Build local fast map of CPUs */
- for_each_possible_cpu(i) {
- cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
- cbe_thread_map[i].be_node = cbe_get_be_node(i);
- cbe_thread_map[i].thread_id = thread_id;
- }
-
- /* Find maps for each device tree CPU */
- for_each_node_by_type(cpu, "cpu") {
- struct cbe_regs_map *map;
- unsigned int cbe_id;
-
- cbe_id = cbe_regs_map_count++;
- map = &cbe_regs_maps[cbe_id];
-
- if (cbe_regs_map_count > MAX_CBE) {
- printk(KERN_ERR "cbe_regs: More BE chips than supported"
- "!\n");
- cbe_regs_map_count--;
- of_node_put(cpu);
- return;
- }
- of_node_put(map->cpu_node);
- map->cpu_node = of_node_get(cpu);
-
- for_each_possible_cpu(i) {
- struct cbe_thread_map *thread = &cbe_thread_map[i];
-
- if (thread->cpu_node == cpu) {
- thread->regs = map;
- thread->cbe_id = cbe_id;
- map->be_node = thread->be_node;
- cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
- if(thread->thread_id == 0)
- cpumask_set_cpu(i, &cbe_first_online_cpu);
- }
- }
-
- cbe_fill_regs_map(map);
- }
-}
-
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
deleted file mode 100644
index c295c6714f9b..000000000000
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ /dev/null
@@ -1,387 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * thermal support for the cell processor
- *
- * This module adds some sysfs attributes to cpu and spu nodes.
- * Base for measurements are the digital thermal sensors (DTS)
- * located on the chip.
- * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius
- * The attributes can be found under
- * /sys/devices/system/cpu/cpuX/thermal
- * /sys/devices/system/spu/spuX/thermal
- *
- * The following attributes are added for each node:
- * temperature:
- * contains the current temperature measured by the DTS
- * throttle_begin:
- * throttling begins when temperature is greater or equal to
- * throttle_begin. Setting this value to 125 prevents throttling.
- * throttle_end:
- * throttling is being ceased, if the temperature is lower than
- * throttle_end. Due to a delay between applying throttling and
- * a reduced temperature this value should be less than throttle_begin.
- * A value equal to throttle_begin provides only a very little hysteresis.
- * throttle_full_stop:
- * If the temperatrue is greater or equal to throttle_full_stop,
- * full throttling is applied to the cpu or spu. This value should be
- * greater than throttle_begin and throttle_end. Setting this value to
- * 65 prevents the unit from running code at all.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/cpu.h>
-#include <linux/stringify.h>
-#include <asm/spu.h>
-#include <asm/io.h>
-#include <asm/cell-regs.h>
-
-#include "spu_priv1_mmio.h"
-
-#define TEMP_MIN 65
-#define TEMP_MAX 125
-
-#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \
-struct device_attribute attr_ ## _prefix ## _ ## _name = { \
- .attr = { .name = __stringify(_name), .mode = _mode }, \
- .show = _prefix ## _show_ ## _name, \
- .store = _prefix ## _store_ ## _name, \
-};
-
-static inline u8 reg_to_temp(u8 reg_value)
-{
- return ((reg_value & 0x3f) << 1) + TEMP_MIN;
-}
-
-static inline u8 temp_to_reg(u8 temp)
-{
- return ((temp - TEMP_MIN) >> 1) & 0x3f;
-}
-
-static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev)
-{
- struct spu *spu;
-
- spu = container_of(dev, struct spu, dev);
-
- return cbe_get_pmd_regs(spu_devnode(spu));
-}
-
-/* returns the value for a given spu in a given register */
-static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg)
-{
- union spe_reg value;
- struct spu *spu;
-
- spu = container_of(dev, struct spu, dev);
- value.val = in_be64(&reg->val);
-
- return value.spe[spu->spe_id];
-}
-
-static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- u8 value;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = get_pmd_regs(dev);
-
- value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1);
-
- return sprintf(buf, "%d\n", reg_to_temp(value));
-}
-
-static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos)
-{
- u64 value;
-
- value = in_be64(&pmd_regs->tm_tpr.val);
- /* access the corresponding byte */
- value >>= pos;
- value &= 0x3F;
-
- return sprintf(buf, "%d\n", reg_to_temp(value));
-}
-
-static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
-{
- u64 reg_value;
- unsigned int temp;
- u64 new_value;
- int ret;
-
- ret = sscanf(buf, "%u", &temp);
-
- if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX)
- return -EINVAL;
-
- new_value = temp_to_reg(temp);
-
- reg_value = in_be64(&pmd_regs->tm_tpr.val);
-
- /* zero out bits for new value */
- reg_value &= ~(0xffull << pos);
- /* set bits to new value */
- reg_value |= new_value << pos;
-
- out_be64(&pmd_regs->tm_tpr.val, reg_value);
- return size;
-}
-
-static ssize_t spu_show_throttle_end(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(get_pmd_regs(dev), buf, 0);
-}
-
-static ssize_t spu_show_throttle_begin(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(get_pmd_regs(dev), buf, 8);
-}
-
-static ssize_t spu_show_throttle_full_stop(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(get_pmd_regs(dev), buf, 16);
-}
-
-static ssize_t spu_store_throttle_end(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(get_pmd_regs(dev), buf, size, 0);
-}
-
-static ssize_t spu_store_throttle_begin(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(get_pmd_regs(dev), buf, size, 8);
-}
-
-static ssize_t spu_store_throttle_full_stop(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(get_pmd_regs(dev), buf, size, 16);
-}
-
-static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- u64 value;
-
- pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
- value = in_be64(&pmd_regs->ts_ctsr2);
-
- value = (value >> pos) & 0x3f;
-
- return sprintf(buf, "%d\n", reg_to_temp(value));
-}
-
-
-/* shows the temperature of the DTS on the PPE,
- * located near the linear thermal sensor */
-static ssize_t ppe_show_temp0(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return ppe_show_temp(dev, buf, 32);
-}
-
-/* shows the temperature of the second DTS on the PPE */
-static ssize_t ppe_show_temp1(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return ppe_show_temp(dev, buf, 0);
-}
-
-static ssize_t ppe_show_throttle_end(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32);
-}
-
-static ssize_t ppe_show_throttle_begin(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40);
-}
-
-static ssize_t ppe_show_throttle_full_stop(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48);
-}
-
-static ssize_t ppe_store_throttle_end(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32);
-}
-
-static ssize_t ppe_store_throttle_begin(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40);
-}
-
-static ssize_t ppe_store_throttle_full_stop(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48);
-}
-
-
-static struct device_attribute attr_spu_temperature = {
- .attr = {.name = "temperature", .mode = 0400 },
- .show = spu_show_temp,
-};
-
-static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600);
-static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600);
-static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600);
-
-
-static struct attribute *spu_attributes[] = {
- &attr_spu_temperature.attr,
- &attr_spu_throttle_end.attr,
- &attr_spu_throttle_begin.attr,
- &attr_spu_throttle_full_stop.attr,
- NULL,
-};
-
-static const struct attribute_group spu_attribute_group = {
- .name = "thermal",
- .attrs = spu_attributes,
-};
-
-static struct device_attribute attr_ppe_temperature0 = {
- .attr = {.name = "temperature0", .mode = 0400 },
- .show = ppe_show_temp0,
-};
-
-static struct device_attribute attr_ppe_temperature1 = {
- .attr = {.name = "temperature1", .mode = 0400 },
- .show = ppe_show_temp1,
-};
-
-static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600);
-static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600);
-static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600);
-
-static struct attribute *ppe_attributes[] = {
- &attr_ppe_temperature0.attr,
- &attr_ppe_temperature1.attr,
- &attr_ppe_throttle_end.attr,
- &attr_ppe_throttle_begin.attr,
- &attr_ppe_throttle_full_stop.attr,
- NULL,
-};
-
-static struct attribute_group ppe_attribute_group = {
- .name = "thermal",
- .attrs = ppe_attributes,
-};
-
-/*
- * initialize throttling with default values
- */
-static int __init init_default_values(void)
-{
- int cpu;
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct device *dev;
- union ppe_spe_reg tpr;
- union spe_reg str1;
- u64 str2;
- union spe_reg cr1;
- u64 cr2;
-
- /* TPR defaults */
- /* ppe
- * 1F - no full stop
- * 08 - dynamic throttling starts if over 80 degrees
- * 03 - dynamic throttling ceases if below 70 degrees */
- tpr.ppe = 0x1F0803;
- /* spe
- * 10 - full stopped when over 96 degrees
- * 08 - dynamic throttling starts if over 80 degrees
- * 03 - dynamic throttling ceases if below 70 degrees
- */
- tpr.spe = 0x100803;
-
- /* STR defaults */
- /* str1
- * 10 - stop 16 of 32 cycles
- */
- str1.val = 0x1010101010101010ull;
- /* str2
- * 10 - stop 16 of 32 cycles
- */
- str2 = 0x10;
-
- /* CR defaults */
- /* cr1
- * 4 - normal operation
- */
- cr1.val = 0x0404040404040404ull;
- /* cr2
- * 4 - normal operation
- */
- cr2 = 0x04;
-
- for_each_possible_cpu (cpu) {
- pr_debug("processing cpu %d\n", cpu);
- dev = get_cpu_device(cpu);
-
- if (!dev) {
- pr_info("invalid dev pointer for cbe_thermal\n");
- return -EINVAL;
- }
-
- pmd_regs = cbe_get_cpu_pmd_regs(dev->id);
-
- if (!pmd_regs) {
- pr_info("invalid CBE regs pointer for cbe_thermal\n");
- return -EINVAL;
- }
-
- out_be64(&pmd_regs->tm_str2, str2);
- out_be64(&pmd_regs->tm_str1.val, str1.val);
- out_be64(&pmd_regs->tm_tpr.val, tpr.val);
- out_be64(&pmd_regs->tm_cr1.val, cr1.val);
- out_be64(&pmd_regs->tm_cr2, cr2);
- }
-
- return 0;
-}
-
-
-static int __init thermal_init(void)
-{
- int rc = init_default_values();
-
- if (rc == 0) {
- spu_add_dev_attr_group(&spu_attribute_group);
- cpu_add_dev_attr_group(&ppe_attribute_group);
- }
-
- return rc;
-}
-module_init(thermal_init);
-
-static void __exit thermal_exit(void)
-{
- spu_remove_dev_attr_group(&spu_attribute_group);
- cpu_remove_dev_attr_group(&ppe_attribute_group);
-}
-module_exit(thermal_exit);
-
-MODULE_DESCRIPTION("Cell processor thermal driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
-
diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h
deleted file mode 100644
index d5142e905ab3..000000000000
--- a/arch/powerpc/platforms/cell/cell.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Cell Platform common data structures
- *
- * Copyright 2015, Daniel Axtens, IBM Corporation
- */
-
-#ifndef CELL_H
-#define CELL_H
-
-#include <asm/pci-bridge.h>
-
-extern struct pci_controller_ops cell_pci_controller_ops;
-
-#endif
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
deleted file mode 100644
index 79172ba36eca..000000000000
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * spu aware cpufreq governor for the cell processor
- *
- * © Copyright IBM Corporation 2006-2008
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/atomic.h>
-#include <asm/machdep.h>
-#include <asm/spu.h>
-
-#define POLL_TIME 100000 /* in µs */
-#define EXP 753 /* exp(-1) in fixed-point */
-
-struct spu_gov_info_struct {
- unsigned long busy_spus; /* fixed-point */
- struct cpufreq_policy *policy;
- struct delayed_work work;
- unsigned int poll_int; /* µs */
-};
-static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
-
-static int calc_freq(struct spu_gov_info_struct *info)
-{
- int cpu;
- int busy_spus;
-
- cpu = info->policy->cpu;
- busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
-
- info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1);
- pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
- cpu, busy_spus, info->busy_spus);
-
- return info->policy->max * info->busy_spus / FIXED_1;
-}
-
-static void spu_gov_work(struct work_struct *work)
-{
- struct spu_gov_info_struct *info;
- int delay;
- unsigned long target_freq;
-
- info = container_of(work, struct spu_gov_info_struct, work.work);
-
- /* after cancel_delayed_work_sync we unset info->policy */
- BUG_ON(info->policy == NULL);
-
- target_freq = calc_freq(info);
- __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
-
- delay = usecs_to_jiffies(info->poll_int);
- schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
-}
-
-static void spu_gov_init_work(struct spu_gov_info_struct *info)
-{
- int delay = usecs_to_jiffies(info->poll_int);
- INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
- schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
-}
-
-static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
-{
- cancel_delayed_work_sync(&info->work);
-}
-
-static int spu_gov_start(struct cpufreq_policy *policy)
-{
- unsigned int cpu = policy->cpu;
- struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
- struct spu_gov_info_struct *affected_info;
- int i;
-
- if (!cpu_online(cpu)) {
- printk(KERN_ERR "cpu %d is not online\n", cpu);
- return -EINVAL;
- }
-
- if (!policy->cur) {
- printk(KERN_ERR "no cpu specified in policy\n");
- return -EINVAL;
- }
-
- /* initialize spu_gov_info for all affected cpus */
- for_each_cpu(i, policy->cpus) {
- affected_info = &per_cpu(spu_gov_info, i);
- affected_info->policy = policy;
- }
-
- info->poll_int = POLL_TIME;
-
- /* setup timer */
- spu_gov_init_work(info);
-
- return 0;
-}
-
-static void spu_gov_stop(struct cpufreq_policy *policy)
-{
- unsigned int cpu = policy->cpu;
- struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu);
- int i;
-
- /* cancel timer */
- spu_gov_cancel_work(info);
-
- /* clean spu_gov_info for all affected cpus */
- for_each_cpu (i, policy->cpus) {
- info = &per_cpu(spu_gov_info, i);
- info->policy = NULL;
- }
-}
-
-static struct cpufreq_governor spu_governor = {
- .name = "spudemand",
- .start = spu_gov_start,
- .stop = spu_gov_stop,
- .owner = THIS_MODULE,
-};
-cpufreq_governor_init(spu_governor);
-cpufreq_governor_exit(spu_governor);
-
-MODULE_DESCRIPTION("SPU-aware cpufreq governor for the cell processor");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
deleted file mode 100644
index 03ee8152ee97..000000000000
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ /dev/null
@@ -1,390 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Internal Interrupt Controller
- *
- * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- * IBM, Corp.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- *
- * Author: Arnd Bergmann <arndb@de.ibm.com>
- *
- * TODO:
- * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
- * vs node numbers in the setup code
- * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
- * a non-active node to the active node)
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/export.h>
-#include <linux/percpu.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/kernel_stat.h>
-#include <linux/pgtable.h>
-#include <linux/of_address.h>
-
-#include <asm/io.h>
-#include <asm/ptrace.h>
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "interrupt.h"
-
-struct iic {
- struct cbe_iic_thread_regs __iomem *regs;
- u8 target_id;
- u8 eoi_stack[16];
- int eoi_ptr;
- struct device_node *node;
-};
-
-static DEFINE_PER_CPU(struct iic, cpu_iic);
-#define IIC_NODE_COUNT 2
-static struct irq_domain *iic_host;
-
-/* Convert between "pending" bits and hw irq number */
-static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
-{
- unsigned char unit = bits.source & 0xf;
- unsigned char node = bits.source >> 4;
- unsigned char class = bits.class & 3;
-
- /* Decode IPIs */
- if (bits.flags & CBE_IIC_IRQ_IPI)
- return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
- else
- return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
-}
-
-static void iic_mask(struct irq_data *d)
-{
-}
-
-static void iic_unmask(struct irq_data *d)
-{
-}
-
-static void iic_eoi(struct irq_data *d)
-{
- struct iic *iic = this_cpu_ptr(&cpu_iic);
- out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
- BUG_ON(iic->eoi_ptr < 0);
-}
-
-static struct irq_chip iic_chip = {
- .name = "CELL-IIC",
- .irq_mask = iic_mask,
- .irq_unmask = iic_unmask,
- .irq_eoi = iic_eoi,
-};
-
-
-static void iic_ioexc_eoi(struct irq_data *d)
-{
-}
-
-static void iic_ioexc_cascade(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct cbe_iic_regs __iomem *node_iic =
- (void __iomem *)irq_desc_get_handler_data(desc);
- unsigned int irq = irq_desc_get_irq(desc);
- unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
- unsigned long bits, ack;
- int cascade;
-
- for (;;) {
- bits = in_be64(&node_iic->iic_is);
- if (bits == 0)
- break;
- /* pre-ack edge interrupts */
- ack = bits & IIC_ISR_EDGE_MASK;
- if (ack)
- out_be64(&node_iic->iic_is, ack);
- /* handle them */
- for (cascade = 63; cascade >= 0; cascade--)
- if (bits & (0x8000000000000000UL >> cascade))
- generic_handle_domain_irq(iic_host,
- base | cascade);
- /* post-ack level interrupts */
- ack = bits & ~IIC_ISR_EDGE_MASK;
- if (ack)
- out_be64(&node_iic->iic_is, ack);
- }
- chip->irq_eoi(&desc->irq_data);
-}
-
-
-static struct irq_chip iic_ioexc_chip = {
- .name = "CELL-IOEX",
- .irq_mask = iic_mask,
- .irq_unmask = iic_unmask,
- .irq_eoi = iic_ioexc_eoi,
-};
-
-/* Get an IRQ number from the pending state register of the IIC */
-static unsigned int iic_get_irq(void)
-{
- struct cbe_iic_pending_bits pending;
- struct iic *iic;
- unsigned int virq;
-
- iic = this_cpu_ptr(&cpu_iic);
- *(unsigned long *) &pending =
- in_be64((u64 __iomem *) &iic->regs->pending_destr);
- if (!(pending.flags & CBE_IIC_IRQ_VALID))
- return 0;
- virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
- if (!virq)
- return 0;
- iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
- BUG_ON(iic->eoi_ptr > 15);
- return virq;
-}
-
-void iic_setup_cpu(void)
-{
- out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
-}
-
-u8 iic_get_target_id(int cpu)
-{
- return per_cpu(cpu_iic, cpu).target_id;
-}
-
-EXPORT_SYMBOL_GPL(iic_get_target_id);
-
-#ifdef CONFIG_SMP
-
-/* Use the highest interrupt priorities for IPI */
-static inline int iic_msg_to_irq(int msg)
-{
- return IIC_IRQ_TYPE_IPI + 0xf - msg;
-}
-
-void iic_message_pass(int cpu, int msg)
-{
- out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
-}
-
-static void iic_request_ipi(int msg)
-{
- int virq;
-
- virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
- if (!virq) {
- printk(KERN_ERR
- "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
- return;
- }
-
- /*
- * If smp_request_message_ipi encounters an error it will notify
- * the error. If a message is not needed it will return non-zero.
- */
- if (smp_request_message_ipi(virq, msg))
- irq_dispose_mapping(virq);
-}
-
-void iic_request_IPIs(void)
-{
- iic_request_ipi(PPC_MSG_CALL_FUNCTION);
- iic_request_ipi(PPC_MSG_RESCHEDULE);
- iic_request_ipi(PPC_MSG_TICK_BROADCAST);
- iic_request_ipi(PPC_MSG_NMI_IPI);
-}
-
-#endif /* CONFIG_SMP */
-
-
-static int iic_host_match(struct irq_domain *h, struct device_node *node,
- enum irq_domain_bus_token bus_token)
-{
- return of_device_is_compatible(node,
- "IBM,CBEA-Internal-Interrupt-Controller");
-}
-
-static int iic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- switch (hw & IIC_IRQ_TYPE_MASK) {
- case IIC_IRQ_TYPE_IPI:
- irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
- break;
- case IIC_IRQ_TYPE_IOEXC:
- irq_set_chip_and_handler(virq, &iic_ioexc_chip,
- handle_edge_eoi_irq);
- break;
- default:
- irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
- }
- return 0;
-}
-
-static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_flags)
-
-{
- unsigned int node, ext, unit, class;
- const u32 *val;
-
- if (!of_device_is_compatible(ct,
- "IBM,CBEA-Internal-Interrupt-Controller"))
- return -ENODEV;
- if (intsize != 1)
- return -ENODEV;
- val = of_get_property(ct, "#interrupt-cells", NULL);
- if (val == NULL || *val != 1)
- return -ENODEV;
-
- node = intspec[0] >> 24;
- ext = (intspec[0] >> 16) & 0xff;
- class = (intspec[0] >> 8) & 0xff;
- unit = intspec[0] & 0xff;
-
- /* Check if node is in supported range */
- if (node > 1)
- return -EINVAL;
-
- /* Build up interrupt number, special case for IO exceptions */
- *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
- if (unit == IIC_UNIT_IIC && class == 1)
- *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
- else
- *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
- (class << IIC_IRQ_CLASS_SHIFT) | unit;
-
- /* Dummy flags, ignored by iic code */
- *out_flags = IRQ_TYPE_EDGE_RISING;
-
- return 0;
-}
-
-static const struct irq_domain_ops iic_host_ops = {
- .match = iic_host_match,
- .map = iic_host_map,
- .xlate = iic_host_xlate,
-};
-
-static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
- struct device_node *node)
-{
- /* XXX FIXME: should locate the linux CPU number from the HW cpu
- * number properly. We are lucky for now
- */
- struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
-
- iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
- BUG_ON(iic->regs == NULL);
-
- iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
- iic->eoi_stack[0] = 0xff;
- iic->node = of_node_get(node);
- out_be64(&iic->regs->prio, 0);
-
- printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n",
- hw_cpu, iic->target_id, node);
-}
-
-static int __init setup_iic(void)
-{
- struct device_node *dn;
- struct resource r0, r1;
- unsigned int node, cascade, found = 0;
- struct cbe_iic_regs __iomem *node_iic;
- const u32 *np;
-
- for_each_node_by_name(dn, "interrupt-controller") {
- if (!of_device_is_compatible(dn,
- "IBM,CBEA-Internal-Interrupt-Controller"))
- continue;
- np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
- if (np == NULL) {
- printk(KERN_WARNING "IIC: CPU association not found\n");
- of_node_put(dn);
- return -ENODEV;
- }
- if (of_address_to_resource(dn, 0, &r0) ||
- of_address_to_resource(dn, 1, &r1)) {
- printk(KERN_WARNING "IIC: Can't resolve addresses\n");
- of_node_put(dn);
- return -ENODEV;
- }
- found++;
- init_one_iic(np[0], r0.start, dn);
- init_one_iic(np[1], r1.start, dn);
-
- /* Setup cascade for IO exceptions. XXX cleanup tricks to get
- * node vs CPU etc...
- * Note that we configure the IIC_IRR here with a hard coded
- * priority of 1. We might want to improve that later.
- */
- node = np[0] >> 1;
- node_iic = cbe_get_cpu_iic_regs(np[0]);
- cascade = node << IIC_IRQ_NODE_SHIFT;
- cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
- cascade |= IIC_UNIT_IIC;
- cascade = irq_create_mapping(iic_host, cascade);
- if (!cascade)
- continue;
- /*
- * irq_data is a generic pointer that gets passed back
- * to us later, so the forced cast is fine.
- */
- irq_set_handler_data(cascade, (void __force *)node_iic);
- irq_set_chained_handler(cascade, iic_ioexc_cascade);
- out_be64(&node_iic->iic_ir,
- (1 << 12) /* priority */ |
- (node << 4) /* dest node */ |
- IIC_UNIT_THREAD_0 /* route them to thread 0 */);
- /* Flush pending (make sure it triggers if there is
- * anything pending
- */
- out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
- }
-
- if (found)
- return 0;
- else
- return -ENODEV;
-}
-
-void __init iic_init_IRQ(void)
-{
- /* Setup an irq host data structure */
- iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
- NULL);
- BUG_ON(iic_host == NULL);
- irq_set_default_host(iic_host);
-
- /* Discover and initialize iics */
- if (setup_iic() < 0)
- panic("IIC: Failed to initialize !\n");
-
- /* Set master interrupt handling function */
- ppc_md.get_irq = iic_get_irq;
-
- /* Enable on current CPU */
- iic_setup_cpu();
-}
-
-void iic_set_interrupt_routing(int cpu, int thread, int priority)
-{
- struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
- u64 iic_ir = 0;
- int node = cpu >> 1;
-
- /* Set which node and thread will handle the next interrupt */
- iic_ir |= CBE_IIC_IR_PRIO(priority) |
- CBE_IIC_IR_DEST_NODE(node);
- if (thread == 0)
- iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
- else
- iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
- out_be64(&iic_regs->iic_ir, iic_ir);
-}
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
deleted file mode 100644
index a47902248541..000000000000
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_CELL_PIC_H
-#define ASM_CELL_PIC_H
-#ifdef __KERNEL__
-/*
- * Mapping of IIC pending bits into per-node interrupt numbers.
- *
- * Interrupt numbers are in the range 0...0x1ff where the top bit
- * (0x100) represent the source node. Only 2 nodes are supported with
- * the current code though it's trivial to extend that if necessary using
- * higher level bits
- *
- * The bottom 8 bits are split into 2 type bits and 6 data bits that
- * depend on the type:
- *
- * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source
- * 01 (0x40 | data) : IO exception. data is the exception number as
- * defined by bit numbers in IIC_SR
- * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority)
- * and node is always 0 (IPIs are per-cpu, their source is
- * not relevant)
- * 11 (0xc0 | data) : reserved
- *
- * In addition, interrupt number 0x80000000 is defined as always invalid
- * (that is the node field is expected to never extend to move than 23 bits)
- *
- */
-
-enum {
- IIC_IRQ_INVALID = 0x80000000u,
- IIC_IRQ_NODE_MASK = 0x100,
- IIC_IRQ_NODE_SHIFT = 8,
- IIC_IRQ_MAX = 0x1ff,
- IIC_IRQ_TYPE_MASK = 0xc0,
- IIC_IRQ_TYPE_NORMAL = 0x00,
- IIC_IRQ_TYPE_IOEXC = 0x40,
- IIC_IRQ_TYPE_IPI = 0x80,
- IIC_IRQ_CLASS_SHIFT = 4,
- IIC_IRQ_CLASS_0 = 0x00,
- IIC_IRQ_CLASS_1 = 0x10,
- IIC_IRQ_CLASS_2 = 0x20,
- IIC_SOURCE_COUNT = 0x200,
-
- /* Here are defined the various source/dest units. Avoid using those
- * definitions if you can, they are mostly here for reference
- */
- IIC_UNIT_SPU_0 = 0x4,
- IIC_UNIT_SPU_1 = 0x7,
- IIC_UNIT_SPU_2 = 0x3,
- IIC_UNIT_SPU_3 = 0x8,
- IIC_UNIT_SPU_4 = 0x2,
- IIC_UNIT_SPU_5 = 0x9,
- IIC_UNIT_SPU_6 = 0x1,
- IIC_UNIT_SPU_7 = 0xa,
- IIC_UNIT_IOC_0 = 0x0,
- IIC_UNIT_IOC_1 = 0xb,
- IIC_UNIT_THREAD_0 = 0xe, /* target only */
- IIC_UNIT_THREAD_1 = 0xf, /* target only */
- IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */
-
- /* Base numbers for the external interrupts */
- IIC_IRQ_EXT_IOIF0 =
- IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0,
- IIC_IRQ_EXT_IOIF1 =
- IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1,
-
- /* Base numbers for the IIC_ISR interrupts */
- IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63,
- IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62,
- IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61,
- IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60,
- IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59,
-
- /* Which bits in IIC_ISR are edge sensitive */
- IIC_ISR_EDGE_MASK = 0x4ul,
-};
-
-extern void iic_init_IRQ(void);
-extern void iic_message_pass(int cpu, int msg);
-extern void iic_request_IPIs(void);
-extern void iic_setup_cpu(void);
-
-extern u8 iic_get_target_id(int cpu);
-
-extern void spider_init_IRQ(void);
-
-extern void iic_set_interrupt_routing(int cpu, int thread, int priority);
-
-#endif
-#endif /* ASM_CELL_PIC_H */
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
deleted file mode 100644
index 62c9679b8ca3..000000000000
--- a/arch/powerpc/platforms/cell/iommu.c
+++ /dev/null
@@ -1,1060 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * IOMMU implementation for Cell Broadband Processor Architecture
- *
- * (C) Copyright IBM Corporation 2006-2008
- *
- * Author: Jeremy Kerr <jk@ozlabs.org>
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/notifier.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/memblock.h>
-
-#include <asm/prom.h>
-#include <asm/iommu.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/udbg.h>
-#include <asm/firmware.h>
-#include <asm/cell-regs.h>
-
-#include "cell.h"
-#include "interrupt.h"
-
-/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
- * instead of leaving them mapped to some dummy page. This can be
- * enabled once the appropriate workarounds for spider bugs have
- * been enabled
- */
-#define CELL_IOMMU_REAL_UNMAP
-
-/* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
- * IO PTEs based on the transfer direction. That can be enabled
- * once spider-net has been fixed to pass the correct direction
- * to the DMA mapping functions
- */
-#define CELL_IOMMU_STRICT_PROTECTION
-
-
-#define NR_IOMMUS 2
-
-/* IOC mmap registers */
-#define IOC_Reg_Size 0x2000
-
-#define IOC_IOPT_CacheInvd 0x908
-#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
-#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
-#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
-
-#define IOC_IOST_Origin 0x918
-#define IOC_IOST_Origin_E 0x8000000000000000ul
-#define IOC_IOST_Origin_HW 0x0000000000000800ul
-#define IOC_IOST_Origin_HL 0x0000000000000400ul
-
-#define IOC_IO_ExcpStat 0x920
-#define IOC_IO_ExcpStat_V 0x8000000000000000ul
-#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
-#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
-#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul
-#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
-#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
-#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
-
-#define IOC_IO_ExcpMask 0x928
-#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
-#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
-
-#define IOC_IOCmd_Offset 0x1000
-
-#define IOC_IOCmd_Cfg 0xc00
-#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
-
-
-/* Segment table entries */
-#define IOSTE_V 0x8000000000000000ul /* valid */
-#define IOSTE_H 0x4000000000000000ul /* cache hint */
-#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
-#define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
-#define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
-#define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
-#define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
-#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
-#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
-
-
-/* IOMMU sizing */
-#define IO_SEGMENT_SHIFT 28
-#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
-
-/* The high bit needs to be set on every DMA address */
-#define SPIDER_DMA_OFFSET 0x80000000ul
-
-struct iommu_window {
- struct list_head list;
- struct cbe_iommu *iommu;
- unsigned long offset;
- unsigned long size;
- unsigned int ioid;
- struct iommu_table table;
-};
-
-#define NAMESIZE 8
-struct cbe_iommu {
- int nid;
- char name[NAMESIZE];
- void __iomem *xlate_regs;
- void __iomem *cmd_regs;
- unsigned long *stab;
- unsigned long *ptab;
- void *pad_page;
- struct list_head windows;
-};
-
-/* Static array of iommus, one per node
- * each contains a list of windows, keyed from dma_window property
- * - on bus setup, look for a matching window, or create one
- * - on dev setup, assign iommu_table ptr
- */
-static struct cbe_iommu iommus[NR_IOMMUS];
-static int cbe_nr_iommus;
-
-static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
- long n_ptes)
-{
- u64 __iomem *reg;
- u64 val;
- long n;
-
- reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
-
- while (n_ptes > 0) {
- /* we can invalidate up to 1 << 11 PTEs at once */
- n = min(n_ptes, 1l << 11);
- val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
- | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
- | IOC_IOPT_CacheInvd_Busy;
-
- out_be64(reg, val);
- while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
- ;
-
- n_ptes -= n;
- pte += n;
- }
-}
-
-static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int i;
- unsigned long *io_pte, base_pte;
- struct iommu_window *window =
- container_of(tbl, struct iommu_window, table);
-
- /* implementing proper protection causes problems with the spidernet
- * driver - check mapping directions later, but allow read & write by
- * default for now.*/
-#ifdef CELL_IOMMU_STRICT_PROTECTION
- /* to avoid referencing a global, we use a trick here to setup the
- * protection bit. "prot" is setup to be 3 fields of 4 bits appended
- * together for each of the 3 supported direction values. It is then
- * shifted left so that the fields matching the desired direction
- * lands on the appropriate bits, and other bits are masked out.
- */
- const unsigned long prot = 0xc48;
- base_pte =
- ((prot << (52 + 4 * direction)) &
- (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
- CBE_IOPTE_M | CBE_IOPTE_SO_RW |
- (window->ioid & CBE_IOPTE_IOID_Mask);
-#else
- base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
- CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
-#endif
- if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
- base_pte &= ~CBE_IOPTE_SO_RW;
-
- io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
-
- for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
- io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
-
- mb();
-
- invalidate_tce_cache(window->iommu, io_pte, npages);
-
- pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
- index, npages, direction, base_pte);
- return 0;
-}
-
-static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
-{
-
- int i;
- unsigned long *io_pte, pte;
- struct iommu_window *window =
- container_of(tbl, struct iommu_window, table);
-
- pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
-
-#ifdef CELL_IOMMU_REAL_UNMAP
- pte = 0;
-#else
- /* spider bridge does PCI reads after freeing - insert a mapping
- * to a scratch page instead of an invalid entry */
- pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
- __pa(window->iommu->pad_page) |
- (window->ioid & CBE_IOPTE_IOID_Mask);
-#endif
-
- io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
-
- for (i = 0; i < npages; i++)
- io_pte[i] = pte;
-
- mb();
-
- invalidate_tce_cache(window->iommu, io_pte, npages);
-}
-
-static irqreturn_t ioc_interrupt(int irq, void *data)
-{
- unsigned long stat, spf;
- struct cbe_iommu *iommu = data;
-
- stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
- spf = stat & IOC_IO_ExcpStat_SPF_Mask;
-
- /* Might want to rate limit it */
- printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
- printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
- !!(stat & IOC_IO_ExcpStat_V),
- (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
- (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
- (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
- (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
- printk(KERN_ERR " page=0x%016lx\n",
- stat & IOC_IO_ExcpStat_ADDR_Mask);
-
- /* clear interrupt */
- stat &= ~IOC_IO_ExcpStat_V;
- out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
-
- return IRQ_HANDLED;
-}
-
-static int __init cell_iommu_find_ioc(int nid, unsigned long *base)
-{
- struct device_node *np;
- struct resource r;
-
- *base = 0;
-
- /* First look for new style /be nodes */
- for_each_node_by_name(np, "ioc") {
- if (of_node_to_nid(np) != nid)
- continue;
- if (of_address_to_resource(np, 0, &r)) {
- printk(KERN_ERR "iommu: can't get address for %pOF\n",
- np);
- continue;
- }
- *base = r.start;
- of_node_put(np);
- return 0;
- }
-
- /* Ok, let's try the old way */
- for_each_node_by_type(np, "cpu") {
- const unsigned int *nidp;
- const unsigned long *tmp;
-
- nidp = of_get_property(np, "node-id", NULL);
- if (nidp && *nidp == nid) {
- tmp = of_get_property(np, "ioc-translation", NULL);
- if (tmp) {
- *base = *tmp;
- of_node_put(np);
- return 0;
- }
- }
- }
-
- return -ENODEV;
-}
-
-static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu,
- unsigned long dbase, unsigned long dsize,
- unsigned long fbase, unsigned long fsize)
-{
- struct page *page;
- unsigned long segments, stab_size;
-
- segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
-
- pr_debug("%s: iommu[%d]: segments: %lu\n",
- __func__, iommu->nid, segments);
-
- /* set up the segment table */
- stab_size = segments * sizeof(unsigned long);
- page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
- BUG_ON(!page);
- iommu->stab = page_address(page);
- memset(iommu->stab, 0, stab_size);
-}
-
-static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
- unsigned long base, unsigned long size, unsigned long gap_base,
- unsigned long gap_size, unsigned long page_shift)
-{
- struct page *page;
- int i;
- unsigned long reg, segments, pages_per_segment, ptab_size,
- n_pte_pages, start_seg, *ptab;
-
- start_seg = base >> IO_SEGMENT_SHIFT;
- segments = size >> IO_SEGMENT_SHIFT;
- pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
- /* PTEs for each segment must start on a 4K boundary */
- pages_per_segment = max(pages_per_segment,
- (1 << 12) / sizeof(unsigned long));
-
- ptab_size = segments * pages_per_segment * sizeof(unsigned long);
- pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
- iommu->nid, ptab_size, get_order(ptab_size));
- page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
- BUG_ON(!page);
-
- ptab = page_address(page);
- memset(ptab, 0, ptab_size);
-
- /* number of 4K pages needed for a page table */
- n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
-
- pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
- __func__, iommu->nid, iommu->stab, ptab,
- n_pte_pages);
-
- /* initialise the STEs */
- reg = IOSTE_V | ((n_pte_pages - 1) << 5);
-
- switch (page_shift) {
- case 12: reg |= IOSTE_PS_4K; break;
- case 16: reg |= IOSTE_PS_64K; break;
- case 20: reg |= IOSTE_PS_1M; break;
- case 24: reg |= IOSTE_PS_16M; break;
- default: BUG();
- }
-
- gap_base = gap_base >> IO_SEGMENT_SHIFT;
- gap_size = gap_size >> IO_SEGMENT_SHIFT;
-
- pr_debug("Setting up IOMMU stab:\n");
- for (i = start_seg; i < (start_seg + segments); i++) {
- if (i >= gap_base && i < (gap_base + gap_size)) {
- pr_debug("\toverlap at %d, skipping\n", i);
- continue;
- }
- iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
- (i - start_seg));
- pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
- }
-
- return ptab;
-}
-
-static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu)
-{
- int ret;
- unsigned long reg, xlate_base;
- unsigned int virq;
-
- if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
- panic("%s: missing IOC register mappings for node %d\n",
- __func__, iommu->nid);
-
- iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
- iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
-
- /* ensure that the STEs have updated */
- mb();
-
- /* setup interrupts for the iommu. */
- reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
- out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
- reg & ~IOC_IO_ExcpStat_V);
- out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
- IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
-
- virq = irq_create_mapping(NULL,
- IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
- BUG_ON(!virq);
-
- ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
- BUG_ON(ret);
-
- /* set the IOC segment table origin register (and turn on the iommu) */
- reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
- out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
- in_be64(iommu->xlate_regs + IOC_IOST_Origin);
-
- /* turn on IO translation */
- reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
- out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
-}
-
-static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu,
- unsigned long base, unsigned long size)
-{
- cell_iommu_setup_stab(iommu, base, size, 0, 0);
- iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
- IOMMU_PAGE_SHIFT_4K);
- cell_iommu_enable_hardware(iommu);
-}
-
-static inline u32 cell_iommu_get_ioid(struct device_node *np)
-{
- const u32 *ioid;
-
- ioid = of_get_property(np, "ioid", NULL);
- if (ioid == NULL) {
- printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
- np);
- return 0;
- }
-
- return *ioid;
-}
-
-static struct iommu_table_ops cell_iommu_ops = {
- .set = tce_build_cell,
- .clear = tce_free_cell
-};
-
-static struct iommu_window * __init
-cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
- unsigned long offset, unsigned long size,
- unsigned long pte_offset)
-{
- struct iommu_window *window;
- struct page *page;
- u32 ioid;
-
- ioid = cell_iommu_get_ioid(np);
-
- window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
- BUG_ON(window == NULL);
-
- window->offset = offset;
- window->size = size;
- window->ioid = ioid;
- window->iommu = iommu;
-
- window->table.it_blocksize = 16;
- window->table.it_base = (unsigned long)iommu->ptab;
- window->table.it_index = iommu->nid;
- window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
- window->table.it_offset =
- (offset >> window->table.it_page_shift) + pte_offset;
- window->table.it_size = size >> window->table.it_page_shift;
- window->table.it_ops = &cell_iommu_ops;
-
- if (!iommu_init_table(&window->table, iommu->nid, 0, 0))
- panic("Failed to initialize iommu table");
-
- pr_debug("\tioid %d\n", window->ioid);
- pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
- pr_debug("\tbase 0x%016lx\n", window->table.it_base);
- pr_debug("\toffset 0x%lx\n", window->table.it_offset);
- pr_debug("\tsize %ld\n", window->table.it_size);
-
- list_add(&window->list, &iommu->windows);
-
- if (offset != 0)
- return window;
-
- /* We need to map and reserve the first IOMMU page since it's used
- * by the spider workaround. In theory, we only need to do that when
- * running on spider but it doesn't really matter.
- *
- * This code also assumes that we have a window that starts at 0,
- * which is the case on all spider based blades.
- */
- page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
- BUG_ON(!page);
- iommu->pad_page = page_address(page);
- clear_page(iommu->pad_page);
-
- __set_bit(0, window->table.it_map);
- tce_build_cell(&window->table, window->table.it_offset, 1,
- (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
-
- return window;
-}
-
-static struct cbe_iommu *cell_iommu_for_node(int nid)
-{
- int i;
-
- for (i = 0; i < cbe_nr_iommus; i++)
- if (iommus[i].nid == nid)
- return &iommus[i];
- return NULL;
-}
-
-static unsigned long cell_dma_nommu_offset;
-
-static unsigned long dma_iommu_fixed_base;
-static bool cell_iommu_enabled;
-
-/* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
-bool iommu_fixed_is_weak;
-
-static struct iommu_table *cell_get_iommu_table(struct device *dev)
-{
- struct iommu_window *window;
- struct cbe_iommu *iommu;
-
- /* Current implementation uses the first window available in that
- * node's iommu. We -might- do something smarter later though it may
- * never be necessary
- */
- iommu = cell_iommu_for_node(dev_to_node(dev));
- if (iommu == NULL || list_empty(&iommu->windows)) {
- dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
- dev->of_node, dev_to_node(dev));
- return NULL;
- }
- window = list_entry(iommu->windows.next, struct iommu_window, list);
-
- return &window->table;
-}
-
-static u64 cell_iommu_get_fixed_address(struct device *dev);
-
-static void cell_dma_dev_setup(struct device *dev)
-{
- if (cell_iommu_enabled) {
- u64 addr = cell_iommu_get_fixed_address(dev);
-
- if (addr != OF_BAD_ADDR)
- dev->archdata.dma_offset = addr + dma_iommu_fixed_base;
- set_iommu_table_base(dev, cell_get_iommu_table(dev));
- } else {
- dev->archdata.dma_offset = cell_dma_nommu_offset;
- }
-}
-
-static void cell_pci_dma_dev_setup(struct pci_dev *dev)
-{
- cell_dma_dev_setup(&dev->dev);
-}
-
-static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
- void *data)
-{
- struct device *dev = data;
-
- /* We are only interested in device addition */
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
-
- if (cell_iommu_enabled)
- dev->dma_ops = &dma_iommu_ops;
- cell_dma_dev_setup(dev);
- return 0;
-}
-
-static struct notifier_block cell_of_bus_notifier = {
- .notifier_call = cell_of_bus_notify
-};
-
-static int __init cell_iommu_get_window(struct device_node *np,
- unsigned long *base,
- unsigned long *size)
-{
- const __be32 *dma_window;
- unsigned long index;
-
- /* Use ibm,dma-window if available, else, hard code ! */
- dma_window = of_get_property(np, "ibm,dma-window", NULL);
- if (dma_window == NULL) {
- *base = 0;
- *size = 0x80000000u;
- return -ENODEV;
- }
-
- of_parse_dma_window(np, dma_window, &index, base, size);
- return 0;
-}
-
-static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
-{
- struct cbe_iommu *iommu;
- int nid, i;
-
- /* Get node ID */
- nid = of_node_to_nid(np);
- if (nid < 0) {
- printk(KERN_ERR "iommu: failed to get node for %pOF\n",
- np);
- return NULL;
- }
- pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
- nid, np);
-
- /* XXX todo: If we can have multiple windows on the same IOMMU, which
- * isn't the case today, we probably want here to check whether the
- * iommu for that node is already setup.
- * However, there might be issue with getting the size right so let's
- * ignore that for now. We might want to completely get rid of the
- * multiple window support since the cell iommu supports per-page ioids
- */
-
- if (cbe_nr_iommus >= NR_IOMMUS) {
- printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
- np);
- return NULL;
- }
-
- /* Init base fields */
- i = cbe_nr_iommus++;
- iommu = &iommus[i];
- iommu->stab = NULL;
- iommu->nid = nid;
- snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
- INIT_LIST_HEAD(&iommu->windows);
-
- return iommu;
-}
-
-static void __init cell_iommu_init_one(struct device_node *np,
- unsigned long offset)
-{
- struct cbe_iommu *iommu;
- unsigned long base, size;
-
- iommu = cell_iommu_alloc(np);
- if (!iommu)
- return;
-
- /* Obtain a window for it */
- cell_iommu_get_window(np, &base, &size);
-
- pr_debug("\ttranslating window 0x%lx...0x%lx\n",
- base, base + size - 1);
-
- /* Initialize the hardware */
- cell_iommu_setup_hardware(iommu, base, size);
-
- /* Setup the iommu_table */
- cell_iommu_setup_window(iommu, np, base, size,
- offset >> IOMMU_PAGE_SHIFT_4K);
-}
-
-static void __init cell_disable_iommus(void)
-{
- int node;
- unsigned long base, val;
- void __iomem *xregs, *cregs;
-
- /* Make sure IOC translation is disabled on all nodes */
- for_each_online_node(node) {
- if (cell_iommu_find_ioc(node, &base))
- continue;
- xregs = ioremap(base, IOC_Reg_Size);
- if (xregs == NULL)
- continue;
- cregs = xregs + IOC_IOCmd_Offset;
-
- pr_debug("iommu: cleaning up iommu on node %d\n", node);
-
- out_be64(xregs + IOC_IOST_Origin, 0);
- (void)in_be64(xregs + IOC_IOST_Origin);
- val = in_be64(cregs + IOC_IOCmd_Cfg);
- val &= ~IOC_IOCmd_Cfg_TE;
- out_be64(cregs + IOC_IOCmd_Cfg, val);
- (void)in_be64(cregs + IOC_IOCmd_Cfg);
-
- iounmap(xregs);
- }
-}
-
-static int __init cell_iommu_init_disabled(void)
-{
- struct device_node *np = NULL;
- unsigned long base = 0, size;
-
- /* When no iommu is present, we use direct DMA ops */
-
- /* First make sure all IOC translation is turned off */
- cell_disable_iommus();
-
- /* If we have no Axon, we set up the spider DMA magic offset */
- np = of_find_node_by_name(NULL, "axon");
- if (!np)
- cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
- of_node_put(np);
-
- /* Now we need to check to see where the memory is mapped
- * in PCI space. We assume that all busses use the same dma
- * window which is always the case so far on Cell, thus we
- * pick up the first pci-internal node we can find and check
- * the DMA window from there.
- */
- for_each_node_by_name(np, "axon") {
- if (np->parent == NULL || np->parent->parent != NULL)
- continue;
- if (cell_iommu_get_window(np, &base, &size) == 0)
- break;
- }
- if (np == NULL) {
- for_each_node_by_name(np, "pci-internal") {
- if (np->parent == NULL || np->parent->parent != NULL)
- continue;
- if (cell_iommu_get_window(np, &base, &size) == 0)
- break;
- }
- }
- of_node_put(np);
-
- /* If we found a DMA window, we check if it's big enough to enclose
- * all of physical memory. If not, we force enable IOMMU
- */
- if (np && size < memblock_end_of_DRAM()) {
- printk(KERN_WARNING "iommu: force-enabled, dma window"
- " (%ldMB) smaller than total memory (%lldMB)\n",
- size >> 20, memblock_end_of_DRAM() >> 20);
- return -ENODEV;
- }
-
- cell_dma_nommu_offset += base;
-
- if (cell_dma_nommu_offset != 0)
- cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
-
- printk("iommu: disabled, direct DMA offset is 0x%lx\n",
- cell_dma_nommu_offset);
-
- return 0;
-}
-
-/*
- * Fixed IOMMU mapping support
- *
- * This code adds support for setting up a fixed IOMMU mapping on certain
- * cell machines. For 64-bit devices this avoids the performance overhead of
- * mapping and unmapping pages at runtime. 32-bit devices are unable to use
- * the fixed mapping.
- *
- * The fixed mapping is established at boot, and maps all of physical memory
- * 1:1 into device space at some offset. On machines with < 30 GB of memory
- * we setup the fixed mapping immediately above the normal IOMMU window.
- *
- * For example a machine with 4GB of memory would end up with the normal
- * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
- * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
- * 3GB, plus any offset required by firmware. The firmware offset is encoded
- * in the "dma-ranges" property.
- *
- * On machines with 30GB or more of memory, we are unable to place the fixed
- * mapping above the normal IOMMU window as we would run out of address space.
- * Instead we move the normal IOMMU window to coincide with the hash page
- * table, this region does not need to be part of the fixed mapping as no
- * device should ever be DMA'ing to it. We then setup the fixed mapping
- * from 0 to 32GB.
- */
-
-static u64 cell_iommu_get_fixed_address(struct device *dev)
-{
- u64 best_size, dev_addr = OF_BAD_ADDR;
- struct device_node *np;
- struct of_range_parser parser;
- struct of_range range;
-
- /* We can be called for platform devices that have no of_node */
- np = of_node_get(dev->of_node);
- if (!np)
- goto out;
-
- while ((np = of_get_next_parent(np))) {
- if (of_pci_dma_range_parser_init(&parser, np))
- continue;
-
- if (of_range_count(&parser))
- break;
- }
-
- if (!np) {
- dev_dbg(dev, "iommu: no dma-ranges found\n");
- goto out;
- }
-
- best_size = 0;
- for_each_of_range(&parser, &range) {
- if (!range.cpu_addr)
- continue;
-
- if (range.size > best_size) {
- best_size = range.size;
- dev_addr = range.bus_addr;
- }
- }
-
- if (!best_size)
- dev_dbg(dev, "iommu: no suitable range found!\n");
-
-out:
- of_node_put(np);
-
- return dev_addr;
-}
-
-static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
-{
- return mask == DMA_BIT_MASK(64) &&
- cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
-}
-
-static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab,
- unsigned long base_pte)
-{
- unsigned long segment, offset;
-
- segment = addr >> IO_SEGMENT_SHIFT;
- offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
- ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
-
- pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
- addr, ptab, segment, offset);
-
- ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
-}
-
-static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
- struct device_node *np, unsigned long dbase, unsigned long dsize,
- unsigned long fbase, unsigned long fsize)
-{
- unsigned long base_pte, uaddr, ioaddr, *ptab;
-
- ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
-
- dma_iommu_fixed_base = fbase;
-
- pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
-
- base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
- (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
-
- if (iommu_fixed_is_weak)
- pr_info("IOMMU: Using weak ordering for fixed mapping\n");
- else {
- pr_info("IOMMU: Using strong ordering for fixed mapping\n");
- base_pte |= CBE_IOPTE_SO_RW;
- }
-
- for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
- /* Don't touch the dynamic region */
- ioaddr = uaddr + fbase;
- if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
- pr_debug("iommu: fixed/dynamic overlap, skipping\n");
- continue;
- }
-
- insert_16M_pte(uaddr, ptab, base_pte);
- }
-
- mb();
-}
-
-static int __init cell_iommu_fixed_mapping_init(void)
-{
- unsigned long dbase, dsize, fbase, fsize, hbase, hend;
- struct cbe_iommu *iommu;
- struct device_node *np;
-
- /* The fixed mapping is only supported on axon machines */
- np = of_find_node_by_name(NULL, "axon");
- of_node_put(np);
-
- if (!np) {
- pr_debug("iommu: fixed mapping disabled, no axons found\n");
- return -1;
- }
-
- /* We must have dma-ranges properties for fixed mapping to work */
- np = of_find_node_with_property(NULL, "dma-ranges");
- of_node_put(np);
-
- if (!np) {
- pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
- return -1;
- }
-
- /* The default setup is to have the fixed mapping sit after the
- * dynamic region, so find the top of the largest IOMMU window
- * on any axon, then add the size of RAM and that's our max value.
- * If that is > 32GB we have to do other shennanigans.
- */
- fbase = 0;
- for_each_node_by_name(np, "axon") {
- cell_iommu_get_window(np, &dbase, &dsize);
- fbase = max(fbase, dbase + dsize);
- }
-
- fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
- fsize = memblock_phys_mem_size();
-
- if ((fbase + fsize) <= 0x800000000ul)
- hbase = 0; /* use the device tree window */
- else {
- /* If we're over 32 GB we need to cheat. We can't map all of
- * RAM with the fixed mapping, and also fit the dynamic
- * region. So try to place the dynamic region where the hash
- * table sits, drivers never need to DMA to it, we don't
- * need a fixed mapping for that area.
- */
- if (!htab_address) {
- pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
- return -1;
- }
- hbase = __pa(htab_address);
- hend = hbase + htab_size_bytes;
-
- /* The window must start and end on a segment boundary */
- if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
- (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
- pr_debug("iommu: hash window not segment aligned\n");
- return -1;
- }
-
- /* Check the hash window fits inside the real DMA window */
- for_each_node_by_name(np, "axon") {
- cell_iommu_get_window(np, &dbase, &dsize);
-
- if (hbase < dbase || (hend > (dbase + dsize))) {
- pr_debug("iommu: hash window doesn't fit in"
- "real DMA window\n");
- of_node_put(np);
- return -1;
- }
- }
-
- fbase = 0;
- }
-
- /* Setup the dynamic regions */
- for_each_node_by_name(np, "axon") {
- iommu = cell_iommu_alloc(np);
- BUG_ON(!iommu);
-
- if (hbase == 0)
- cell_iommu_get_window(np, &dbase, &dsize);
- else {
- dbase = hbase;
- dsize = htab_size_bytes;
- }
-
- printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
- "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
- dbase + dsize, fbase, fbase + fsize);
-
- cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
- iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
- IOMMU_PAGE_SHIFT_4K);
- cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
- fbase, fsize);
- cell_iommu_enable_hardware(iommu);
- cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
- }
-
- cell_pci_controller_ops.iommu_bypass_supported =
- cell_pci_iommu_bypass_supported;
- return 0;
-}
-
-static int iommu_fixed_disabled;
-
-static int __init setup_iommu_fixed(char *str)
-{
- struct device_node *pciep;
-
- if (strcmp(str, "off") == 0)
- iommu_fixed_disabled = 1;
-
- /* If we can find a pcie-endpoint in the device tree assume that
- * we're on a triblade or a CAB so by default the fixed mapping
- * should be set to be weakly ordered; but only if the boot
- * option WASN'T set for strong ordering
- */
- pciep = of_find_node_by_type(NULL, "pcie-endpoint");
-
- if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
- iommu_fixed_is_weak = true;
-
- of_node_put(pciep);
-
- return 1;
-}
-__setup("iommu_fixed=", setup_iommu_fixed);
-
-static int __init cell_iommu_init(void)
-{
- struct device_node *np;
-
- /* If IOMMU is disabled or we have little enough RAM to not need
- * to enable it, we setup a direct mapping.
- *
- * Note: should we make sure we have the IOMMU actually disabled ?
- */
- if (iommu_is_off ||
- (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
- if (cell_iommu_init_disabled() == 0)
- goto bail;
-
- /* Setup various callbacks */
- cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
-
- if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
- goto done;
-
- /* Create an iommu for each /axon node. */
- for_each_node_by_name(np, "axon") {
- if (np->parent == NULL || np->parent->parent != NULL)
- continue;
- cell_iommu_init_one(np, 0);
- }
-
- /* Create an iommu for each toplevel /pci-internal node for
- * old hardware/firmware
- */
- for_each_node_by_name(np, "pci-internal") {
- if (np->parent == NULL || np->parent->parent != NULL)
- continue;
- cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
- }
- done:
- /* Setup default PCI iommu ops */
- set_pci_dma_ops(&dma_iommu_ops);
- cell_iommu_enabled = true;
- bail:
- /* Register callbacks on OF platform device addition/removal
- * to handle linking them to the right DMA operations
- */
- bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
-
- return 0;
-}
-machine_arch_initcall(cell, cell_iommu_init);
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
deleted file mode 100644
index 58d967ee38b3..000000000000
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ /dev/null
@@ -1,125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CBE Pervasive Monitor and Debug
- *
- * (C) Copyright IBM Corporation 2005
- *
- * Authors: Maximino Aguilar (maguilar@us.ibm.com)
- * Michael N. Day (mnday@us.ibm.com)
- */
-
-#undef DEBUG
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/percpu.h>
-#include <linux/types.h>
-#include <linux/kallsyms.h>
-#include <linux/pgtable.h>
-
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/reg.h>
-#include <asm/cell-regs.h>
-#include <asm/cpu_has_feature.h>
-
-#include "pervasive.h"
-#include "ras.h"
-
-static void cbe_power_save(void)
-{
- unsigned long ctrl, thread_switch_control;
-
- /* Ensure our interrupt state is properly tracked */
- if (!prep_irq_for_idle())
- return;
-
- ctrl = mfspr(SPRN_CTRLF);
-
- /* Enable DEC and EE interrupt request */
- thread_switch_control = mfspr(SPRN_TSC_CELL);
- thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
-
- switch (ctrl & CTRL_CT) {
- case CTRL_CT0:
- thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
- break;
- case CTRL_CT1:
- thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
- break;
- default:
- printk(KERN_WARNING "%s: unknown configuration\n",
- __func__);
- break;
- }
- mtspr(SPRN_TSC_CELL, thread_switch_control);
-
- /*
- * go into low thread priority, medium priority will be
- * restored for us after wake-up.
- */
- HMT_low();
-
- /*
- * atomically disable thread execution and runlatch.
- * External and Decrementer exceptions are still handled when the
- * thread is disabled but now enter in cbe_system_reset_exception()
- */
- ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
- mtspr(SPRN_CTRLT, ctrl);
-
- /* Re-enable interrupts in MSR */
- __hard_irq_enable();
-}
-
-static int cbe_system_reset_exception(struct pt_regs *regs)
-{
- switch (regs->msr & SRR1_WAKEMASK) {
- case SRR1_WAKEDEC:
- set_dec(1);
- break;
- case SRR1_WAKEEE:
- /*
- * Handle these when interrupts get re-enabled and we take
- * them as regular exceptions. We are in an NMI context
- * and can't handle these here.
- */
- break;
- case SRR1_WAKEMT:
- return cbe_sysreset_hack();
-#ifdef CONFIG_CBE_RAS
- case SRR1_WAKESYSERR:
- cbe_system_error_exception(regs);
- break;
- case SRR1_WAKETHERM:
- cbe_thermal_exception(regs);
- break;
-#endif /* CONFIG_CBE_RAS */
- default:
- /* do system reset */
- return 0;
- }
- /* everything handled */
- return 1;
-}
-
-void __init cbe_pervasive_init(void)
-{
- int cpu;
-
- if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
- return;
-
- for_each_possible_cpu(cpu) {
- struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
- if (!regs)
- continue;
-
- /* Enable Pause(0) control bit */
- out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
- CBE_PMD_PAUSE_ZERO_CONTROL);
- }
-
- ppc_md.power_save = cbe_power_save;
- ppc_md.system_reset_exception = cbe_system_reset_exception;
-}
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
deleted file mode 100644
index 0da74ab10716..000000000000
--- a/arch/powerpc/platforms/cell/pervasive.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Cell Pervasive Monitor and Debug interface and HW structures
- *
- * (C) Copyright IBM Corporation 2005
- *
- * Authors: Maximino Aguilar (maguilar@us.ibm.com)
- * David J. Erb (djerb@us.ibm.com)
- */
-
-
-#ifndef PERVASIVE_H
-#define PERVASIVE_H
-
-extern void cbe_pervasive_init(void);
-
-#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON
-extern int cbe_sysreset_hack(void);
-#else
-static inline int cbe_sysreset_hack(void)
-{
- return 1;
-}
-#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */
-
-#endif
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
deleted file mode 100644
index b207a7f99be5..000000000000
--- a/arch/powerpc/platforms/cell/pmu.c
+++ /dev/null
@@ -1,412 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine Performance Monitor
- *
- * (C) Copyright IBM Corporation 2001,2006
- *
- * Author:
- * David Erb (djerb@us.ibm.com)
- * Kevin Corry (kevcorry@us.ibm.com)
- */
-
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <asm/io.h>
-#include <asm/irq_regs.h>
-#include <asm/machdep.h>
-#include <asm/pmc.h>
-#include <asm/reg.h>
-#include <asm/spu.h>
-#include <asm/cell-regs.h>
-
-#include "interrupt.h"
-
-/*
- * When writing to write-only mmio addresses, save a shadow copy. All of the
- * registers are 32-bit, but stored in the upper-half of a 64-bit field in
- * pmd_regs.
- */
-
-#define WRITE_WO_MMIO(reg, x) \
- do { \
- u32 _x = (x); \
- struct cbe_pmd_regs __iomem *pmd_regs; \
- struct cbe_pmd_shadow_regs *shadow_regs; \
- pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
- shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
- out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
- shadow_regs->reg = _x; \
- } while (0)
-
-#define READ_SHADOW_REG(val, reg) \
- do { \
- struct cbe_pmd_shadow_regs *shadow_regs; \
- shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
- (val) = shadow_regs->reg; \
- } while (0)
-
-#define READ_MMIO_UPPER32(val, reg) \
- do { \
- struct cbe_pmd_regs __iomem *pmd_regs; \
- pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
- (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
- } while (0)
-
-/*
- * Physical counter registers.
- * Each physical counter can act as one 32-bit counter or two 16-bit counters.
- */
-
-u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
-{
- u32 val_in_latch, val = 0;
-
- if (phys_ctr < NR_PHYS_CTRS) {
- READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
-
- /* Read the latch or the actual counter, whichever is newer. */
- if (val_in_latch & (1 << phys_ctr)) {
- READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
- } else {
- READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
- }
- }
-
- return val;
-}
-EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
-
-void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
-{
- struct cbe_pmd_shadow_regs *shadow_regs;
- u32 pm_ctrl;
-
- if (phys_ctr < NR_PHYS_CTRS) {
- /* Writing to a counter only writes to a hardware latch.
- * The new value is not propagated to the actual counter
- * until the performance monitor is enabled.
- */
- WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
-
- pm_ctrl = cbe_read_pm(cpu, pm_control);
- if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
- /* The counters are already active, so we need to
- * rewrite the pm_control register to "re-enable"
- * the PMU.
- */
- cbe_write_pm(cpu, pm_control, pm_ctrl);
- } else {
- shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
- shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
- }
- }
-}
-EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
-
-/*
- * "Logical" counter registers.
- * These will read/write 16-bits or 32-bits depending on the
- * current size of the counter. Counters 4 - 7 are always 16-bit.
- */
-
-u32 cbe_read_ctr(u32 cpu, u32 ctr)
-{
- u32 val;
- u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
-
- val = cbe_read_phys_ctr(cpu, phys_ctr);
-
- if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
- val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
-
- return val;
-}
-EXPORT_SYMBOL_GPL(cbe_read_ctr);
-
-void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
-{
- u32 phys_ctr;
- u32 phys_val;
-
- phys_ctr = ctr & (NR_PHYS_CTRS - 1);
-
- if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
- phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
-
- if (ctr < NR_PHYS_CTRS)
- val = (val << 16) | (phys_val & 0xffff);
- else
- val = (val & 0xffff) | (phys_val & 0xffff0000);
- }
-
- cbe_write_phys_ctr(cpu, phys_ctr, val);
-}
-EXPORT_SYMBOL_GPL(cbe_write_ctr);
-
-/*
- * Counter-control registers.
- * Each "logical" counter has a corresponding control register.
- */
-
-u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
-{
- u32 pm07_control = 0;
-
- if (ctr < NR_CTRS)
- READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
-
- return pm07_control;
-}
-EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
-
-void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
-{
- if (ctr < NR_CTRS)
- WRITE_WO_MMIO(pm07_control[ctr], val);
-}
-EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
-
-/*
- * Other PMU control registers. Most of these are write-only.
- */
-
-u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
-{
- u32 val = 0;
-
- switch (reg) {
- case group_control:
- READ_SHADOW_REG(val, group_control);
- break;
-
- case debug_bus_control:
- READ_SHADOW_REG(val, debug_bus_control);
- break;
-
- case trace_address:
- READ_MMIO_UPPER32(val, trace_address);
- break;
-
- case ext_tr_timer:
- READ_SHADOW_REG(val, ext_tr_timer);
- break;
-
- case pm_status:
- READ_MMIO_UPPER32(val, pm_status);
- break;
-
- case pm_control:
- READ_SHADOW_REG(val, pm_control);
- break;
-
- case pm_interval:
- READ_MMIO_UPPER32(val, pm_interval);
- break;
-
- case pm_start_stop:
- READ_SHADOW_REG(val, pm_start_stop);
- break;
- }
-
- return val;
-}
-EXPORT_SYMBOL_GPL(cbe_read_pm);
-
-void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
-{
- switch (reg) {
- case group_control:
- WRITE_WO_MMIO(group_control, val);
- break;
-
- case debug_bus_control:
- WRITE_WO_MMIO(debug_bus_control, val);
- break;
-
- case trace_address:
- WRITE_WO_MMIO(trace_address, val);
- break;
-
- case ext_tr_timer:
- WRITE_WO_MMIO(ext_tr_timer, val);
- break;
-
- case pm_status:
- WRITE_WO_MMIO(pm_status, val);
- break;
-
- case pm_control:
- WRITE_WO_MMIO(pm_control, val);
- break;
-
- case pm_interval:
- WRITE_WO_MMIO(pm_interval, val);
- break;
-
- case pm_start_stop:
- WRITE_WO_MMIO(pm_start_stop, val);
- break;
- }
-}
-EXPORT_SYMBOL_GPL(cbe_write_pm);
-
-/*
- * Get/set the size of a physical counter to either 16 or 32 bits.
- */
-
-u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
-{
- u32 pm_ctrl, size = 0;
-
- if (phys_ctr < NR_PHYS_CTRS) {
- pm_ctrl = cbe_read_pm(cpu, pm_control);
- size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
- }
-
- return size;
-}
-EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
-
-void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
-{
- u32 pm_ctrl;
-
- if (phys_ctr < NR_PHYS_CTRS) {
- pm_ctrl = cbe_read_pm(cpu, pm_control);
- switch (ctr_size) {
- case 16:
- pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
- break;
-
- case 32:
- pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
- break;
- }
- cbe_write_pm(cpu, pm_control, pm_ctrl);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
-
-/*
- * Enable/disable the entire performance monitoring unit.
- * When we enable the PMU, all pending writes to counters get committed.
- */
-
-void cbe_enable_pm(u32 cpu)
-{
- struct cbe_pmd_shadow_regs *shadow_regs;
- u32 pm_ctrl;
-
- shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
- shadow_regs->counter_value_in_latch = 0;
-
- pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
- cbe_write_pm(cpu, pm_control, pm_ctrl);
-}
-EXPORT_SYMBOL_GPL(cbe_enable_pm);
-
-void cbe_disable_pm(u32 cpu)
-{
- u32 pm_ctrl;
- pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
- cbe_write_pm(cpu, pm_control, pm_ctrl);
-}
-EXPORT_SYMBOL_GPL(cbe_disable_pm);
-
-/*
- * Reading from the trace_buffer.
- * The trace buffer is two 64-bit registers. Reading from
- * the second half automatically increments the trace_address.
- */
-
-void cbe_read_trace_buffer(u32 cpu, u64 *buf)
-{
- struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
- *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
- *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
-}
-EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
-
-/*
- * Enabling/disabling interrupts for the entire performance monitoring unit.
- */
-
-u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
-{
- /* Reading pm_status clears the interrupt bits. */
- return cbe_read_pm(cpu, pm_status);
-}
-EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
-
-void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
-{
- /* Set which node and thread will handle the next interrupt. */
- iic_set_interrupt_routing(cpu, thread, 0);
-
- /* Enable the interrupt bits in the pm_status register. */
- if (mask)
- cbe_write_pm(cpu, pm_status, mask);
-}
-EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
-
-void cbe_disable_pm_interrupts(u32 cpu)
-{
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_write_pm(cpu, pm_status, 0);
-}
-EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
-
-static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
-{
- perf_irq(get_irq_regs());
- return IRQ_HANDLED;
-}
-
-static int __init cbe_init_pm_irq(void)
-{
- unsigned int irq;
- int rc, node;
-
- for_each_online_node(node) {
- irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
- (node << IIC_IRQ_NODE_SHIFT));
- if (!irq) {
- printk("ERROR: Unable to allocate irq for node %d\n",
- node);
- return -EINVAL;
- }
-
- rc = request_irq(irq, cbe_pm_irq,
- 0, "cbe-pmu-0", NULL);
- if (rc) {
- printk("ERROR: Request for irq on node %d failed\n",
- node);
- return rc;
- }
- }
-
- return 0;
-}
-machine_arch_initcall(cell, cbe_init_pm_irq);
-
-void cbe_sync_irq(int node)
-{
- unsigned int irq;
-
- irq = irq_find_mapping(NULL,
- IIC_IRQ_IOEX_PMI
- | (node << IIC_IRQ_NODE_SHIFT));
-
- if (!irq) {
- printk(KERN_WARNING "ERROR, unable to get existing irq %d " \
- "for node %d\n", irq, node);
- return;
- }
-
- synchronize_irq(irq);
-}
-EXPORT_SYMBOL_GPL(cbe_sync_irq);
-
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
deleted file mode 100644
index f6b87926530c..000000000000
--- a/arch/powerpc/platforms/cell/ras.c
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2006-2008, IBM Corporation.
- */
-
-#undef DEBUG
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/smp.h>
-#include <linux/reboot.h>
-#include <linux/kexec.h>
-#include <linux/crash_dump.h>
-#include <linux/of.h>
-
-#include <asm/kexec.h>
-#include <asm/reg.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "ras.h"
-#include "pervasive.h"
-
-static void dump_fir(int cpu)
-{
- struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu);
- struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu);
-
- if (pregs == NULL)
- return;
-
- /* Todo: do some nicer parsing of bits and based on them go down
- * to other sub-units FIRs and not only IIC
- */
- printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n",
- in_be64(&pregs->checkstop_fir));
- printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n",
- in_be64(&pregs->checkstop_fir));
- printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n",
- in_be64(&pregs->spec_att_mchk_fir));
-
- if (iregs == NULL)
- return;
- printk(KERN_ERR "IOC FIR : 0x%016llx\n",
- in_be64(&iregs->ioc_fir));
-
-}
-
-DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception)
-{
- int cpu = smp_processor_id();
-
- printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu);
- dump_fir(cpu);
- dump_stack();
-}
-
-DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception)
-{
- int cpu = smp_processor_id();
-
- /*
- * Nothing implemented for the maintenance interrupt at this point
- */
-
- printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu);
- dump_stack();
-}
-
-DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception)
-{
- int cpu = smp_processor_id();
-
- /*
- * Nothing implemented for the thermal interrupt at this point
- */
-
- printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu);
- dump_stack();
-}
-
-static int cbe_machine_check_handler(struct pt_regs *regs)
-{
- int cpu = smp_processor_id();
-
- printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu);
- dump_fir(cpu);
-
- /* No recovery from this code now, lets continue */
- return 0;
-}
-
-struct ptcal_area {
- struct list_head list;
- int nid;
- int order;
- struct page *pages;
-};
-
-static LIST_HEAD(ptcal_list);
-
-static int ptcal_start_tok, ptcal_stop_tok;
-
-static int __init cbe_ptcal_enable_on_node(int nid, int order)
-{
- struct ptcal_area *area;
- int ret = -ENOMEM;
- unsigned long addr;
-
- if (is_kdump_kernel())
- rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
-
- area = kmalloc(sizeof(*area), GFP_KERNEL);
- if (!area)
- goto out_err;
-
- area->nid = nid;
- area->order = order;
- area->pages = __alloc_pages_node(area->nid,
- GFP_KERNEL|__GFP_THISNODE,
- area->order);
-
- if (!area->pages) {
- printk(KERN_WARNING "%s: no page on node %d\n",
- __func__, area->nid);
- goto out_free_area;
- }
-
- /*
- * We move the ptcal area to the middle of the allocated
- * page, in order to avoid prefetches in memcpy and similar
- * functions stepping on it.
- */
- addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1);
- printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n",
- __func__, area->nid, addr);
-
- ret = -EIO;
- if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid,
- (unsigned int)(addr >> 32),
- (unsigned int)(addr & 0xffffffff))) {
- printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n",
- __func__, nid);
- goto out_free_pages;
- }
-
- list_add(&area->list, &ptcal_list);
-
- return 0;
-
-out_free_pages:
- __free_pages(area->pages, area->order);
-out_free_area:
- kfree(area);
-out_err:
- return ret;
-}
-
-static int __init cbe_ptcal_enable(void)
-{
- const u32 *size;
- struct device_node *np;
- int order, found_mic = 0;
-
- np = of_find_node_by_path("/rtas");
- if (!np)
- return -ENODEV;
-
- size = of_get_property(np, "ibm,cbe-ptcal-size", NULL);
- if (!size) {
- of_node_put(np);
- return -ENODEV;
- }
-
- pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size);
- order = get_order(*size);
- of_node_put(np);
-
- /* support for malta device trees, with be@/mic@ nodes */
- for_each_node_by_type(np, "mic-tm") {
- cbe_ptcal_enable_on_node(of_node_to_nid(np), order);
- found_mic = 1;
- }
-
- if (found_mic)
- return 0;
-
- /* support for older device tree - use cpu nodes */
- for_each_node_by_type(np, "cpu") {
- const u32 *nid = of_get_property(np, "node-id", NULL);
- if (!nid) {
- printk(KERN_ERR "%s: node %pOF is missing node-id?\n",
- __func__, np);
- continue;
- }
- cbe_ptcal_enable_on_node(*nid, order);
- found_mic = 1;
- }
-
- return found_mic ? 0 : -ENODEV;
-}
-
-static int cbe_ptcal_disable(void)
-{
- struct ptcal_area *area, *tmp;
- int ret = 0;
-
- pr_debug("%s: disabling PTCAL\n", __func__);
-
- list_for_each_entry_safe(area, tmp, &ptcal_list, list) {
- /* disable ptcal on this node */
- if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) {
- printk(KERN_ERR "%s: error disabling PTCAL "
- "on node %d!\n", __func__,
- area->nid);
- ret = -EIO;
- continue;
- }
-
- /* ensure we can access the PTCAL area */
- memset(page_address(area->pages), 0,
- 1 << (area->order + PAGE_SHIFT));
-
- /* clean up */
- list_del(&area->list);
- __free_pages(area->pages, area->order);
- kfree(area);
- }
-
- return ret;
-}
-
-static int cbe_ptcal_notify_reboot(struct notifier_block *nb,
- unsigned long code, void *data)
-{
- return cbe_ptcal_disable();
-}
-
-static void cbe_ptcal_crash_shutdown(void)
-{
- cbe_ptcal_disable();
-}
-
-static struct notifier_block cbe_ptcal_reboot_notifier = {
- .notifier_call = cbe_ptcal_notify_reboot
-};
-
-#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON
-static int sysreset_hack;
-
-static int __init cbe_sysreset_init(void)
-{
- struct cbe_pmd_regs __iomem *regs;
-
- sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0");
- if (!sysreset_hack)
- return 0;
-
- regs = cbe_get_cpu_pmd_regs(0);
- if (!regs)
- return 0;
-
- /* Enable JTAG system-reset hack */
- out_be32(&regs->fir_mode_reg,
- in_be32(&regs->fir_mode_reg) |
- CBE_PMD_FIR_MODE_M8);
-
- return 0;
-}
-device_initcall(cbe_sysreset_init);
-
-int cbe_sysreset_hack(void)
-{
- struct cbe_pmd_regs __iomem *regs;
-
- /*
- * The BMC can inject user triggered system reset exceptions,
- * but cannot set the system reset reason in srr1,
- * so check an extra register here.
- */
- if (sysreset_hack && (smp_processor_id() == 0)) {
- regs = cbe_get_cpu_pmd_regs(0);
- if (!regs)
- return 0;
- if (in_be64(&regs->ras_esc_0) & 0x0000ffff) {
- out_be64(&regs->ras_esc_0, 0);
- return 0;
- }
- }
- return 1;
-}
-#endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */
-
-static int __init cbe_ptcal_init(void)
-{
- int ret;
- ptcal_start_tok = rtas_function_token(RTAS_FN_IBM_CBE_START_PTCAL);
- ptcal_stop_tok = rtas_function_token(RTAS_FN_IBM_CBE_STOP_PTCAL);
-
- if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE
- || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE)
- return -ENODEV;
-
- ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier);
- if (ret)
- goto out1;
-
- ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown);
- if (ret)
- goto out2;
-
- return cbe_ptcal_enable();
-
-out2:
- unregister_reboot_notifier(&cbe_ptcal_reboot_notifier);
-out1:
- printk(KERN_ERR "Can't disable PTCAL, so not enabling\n");
- return ret;
-}
-
-arch_initcall(cbe_ptcal_init);
-
-void __init cbe_ras_init(void)
-{
- unsigned long hid0;
-
- /*
- * Enable System Error & thermal interrupts and wakeup conditions
- */
-
- hid0 = mfspr(SPRN_HID0);
- hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP |
- HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP;
- mtspr(SPRN_HID0, hid0);
- mb();
-
- /*
- * Install machine check handler. Leave setting of precise mode to
- * what the firmware did for now
- */
- ppc_md.machine_check_exception = cbe_machine_check_handler;
- mb();
-
- /*
- * For now, we assume that IOC_FIR is already set to forward some
- * error conditions to the System Error handler. If that is not true
- * then it will have to be fixed up here.
- */
-}
diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h
deleted file mode 100644
index 226dbd48efad..000000000000
--- a/arch/powerpc/platforms/cell/ras.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef RAS_H
-#define RAS_H
-
-#include <asm/interrupt.h>
-
-DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception);
-DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception);
-DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception);
-
-extern void cbe_ras_init(void);
-
-#endif /* RAS_H */
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
deleted file mode 100644
index f64a1ef98aa8..000000000000
--- a/arch/powerpc/platforms/cell/setup.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * linux/arch/powerpc/platforms/cell/cell_setup.c
- *
- * Copyright (C) 1995 Linus Torvalds
- * Adapted from 'alpha' version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * Modified by PPC64 Team, IBM Corp
- * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- */
-#undef DEBUG
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/export.h>
-#include <linux/unistd.h>
-#include <linux/user.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-#include <linux/mutex.h>
-#include <linux/memory_hotplug.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/rtas.h>
-#include <asm/pci-bridge.h>
-#include <asm/iommu.h>
-#include <asm/dma.h>
-#include <asm/machdep.h>
-#include <asm/time.h>
-#include <asm/nvram.h>
-#include <asm/cputable.h>
-#include <asm/ppc-pci.h>
-#include <asm/irq.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-#include <asm/cell-regs.h>
-#include <asm/io-workarounds.h>
-
-#include "cell.h"
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-static void cell_show_cpuinfo(struct seq_file *m)
-{
- struct device_node *root;
- const char *model = "";
-
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- seq_printf(m, "machine\t\t: CHRP %s\n", model);
- of_node_put(root);
-}
-
-static void cell_progress(char *s, unsigned short hex)
-{
- printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev)
-{
- struct pci_controller *hose;
- const char *s;
- int i;
-
- if (!machine_is(cell))
- return;
-
- /* We're searching for a direct child of the PHB */
- if (dev->bus->self != NULL || dev->devfn != 0)
- return;
-
- hose = pci_bus_to_host(dev->bus);
- if (hose == NULL)
- return;
-
- /* Only on PCIE */
- if (!of_device_is_compatible(hose->dn, "pciex"))
- return;
-
- /* And only on axon */
- s = of_get_property(hose->dn, "model", NULL);
- if (!s || strcmp(s, "Axon") != 0)
- return;
-
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
- dev->resource[i].start = dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
-
- printk(KERN_DEBUG "PCI: Hiding resources on Axon PCIE RC %s\n",
- pci_name(dev));
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex);
-
-static int cell_setup_phb(struct pci_controller *phb)
-{
- const char *model;
- struct device_node *np;
-
- int rc = rtas_setup_phb(phb);
- if (rc)
- return rc;
-
- phb->controller_ops = cell_pci_controller_ops;
-
- np = phb->dn;
- model = of_get_property(np, "model", NULL);
- if (model == NULL || !of_node_name_eq(np, "pci"))
- return 0;
-
- /* Setup workarounds for spider */
- if (strcmp(model, "Spider"))
- return 0;
-
- iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init,
- (void *)SPIDER_PCI_REG_BASE);
- return 0;
-}
-
-static const struct of_device_id cell_bus_ids[] __initconst = {
- { .type = "soc", },
- { .compatible = "soc", },
- { .type = "spider", },
- { .type = "axon", },
- { .type = "plb5", },
- { .type = "plb4", },
- { .type = "opb", },
- { .type = "ebc", },
- {},
-};
-
-static int __init cell_publish_devices(void)
-{
- struct device_node *root = of_find_node_by_path("/");
- struct device_node *np;
- int node;
-
- /* Publish OF platform devices for southbridge IOs */
- of_platform_bus_probe(NULL, cell_bus_ids, NULL);
-
- /* On spider based blades, we need to manually create the OF
- * platform devices for the PCI host bridges
- */
- for_each_child_of_node(root, np) {
- if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex"))
- continue;
- of_platform_device_create(np, NULL, NULL);
- }
-
- of_node_put(root);
-
- /* There is no device for the MIC memory controller, thus we create
- * a platform device for it to attach the EDAC driver to.
- */
- for_each_online_node(node) {
- if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
- continue;
- platform_device_register_simple("cbe-mic", node, NULL, 0);
- }
-
- return 0;
-}
-machine_subsys_initcall(cell, cell_publish_devices);
-
-static void __init mpic_init_IRQ(void)
-{
- struct device_node *dn;
- struct mpic *mpic;
-
- for_each_node_by_name(dn, "interrupt-controller") {
- if (!of_device_is_compatible(dn, "CBEA,platform-open-pic"))
- continue;
-
- /* The MPIC driver will get everything it needs from the
- * device-tree, just pass 0 to all arguments
- */
- mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET,
- 0, 0, " MPIC ");
- if (mpic == NULL)
- continue;
- mpic_init(mpic);
- }
-}
-
-
-static void __init cell_init_irq(void)
-{
- iic_init_IRQ();
- spider_init_IRQ();
- mpic_init_IRQ();
-}
-
-static void __init cell_set_dabrx(void)
-{
- mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
-}
-
-static void __init cell_setup_arch(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- cbe_regs_init();
-
- cell_set_dabrx();
-
-#ifdef CONFIG_CBE_RAS
- cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
- smp_init_cell();
-#endif
- /* init to some ~sane value until calibrate_delay() runs */
- loops_per_jiffy = 50000000;
-
- /* Find and initialize PCI host bridges */
- init_pci_config_tokens();
-
- cbe_pervasive_init();
-
- mmio_nvram_init();
-}
-
-static int __init cell_probe(void)
-{
- if (!of_machine_is_compatible("IBM,CBEA") &&
- !of_machine_is_compatible("IBM,CPBW-1.0"))
- return 0;
-
- pm_power_off = rtas_power_off;
-
- return 1;
-}
-
-define_machine(cell) {
- .name = "Cell",
- .probe = cell_probe,
- .setup_arch = cell_setup_arch,
- .show_cpuinfo = cell_show_cpuinfo,
- .restart = rtas_restart,
- .halt = rtas_halt,
- .get_boot_time = rtas_get_boot_time,
- .get_rtc_time = rtas_get_rtc_time,
- .set_rtc_time = rtas_set_rtc_time,
- .progress = cell_progress,
- .init_IRQ = cell_init_irq,
- .pci_setup_phb = cell_setup_phb,
-};
-
-struct pci_controller_ops cell_pci_controller_ops;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
deleted file mode 100644
index 0e8f20ecca08..000000000000
--- a/arch/powerpc/platforms/cell/smp.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SMP support for BPA machines.
- *
- * Dave Engebretsen, Peter Bergner, and
- * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
- *
- * Plus various changes from other IBM teams...
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/cache.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/cpu.h>
-#include <linux/pgtable.h>
-
-#include <asm/ptrace.h>
-#include <linux/atomic.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/paca.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/firmware.h>
-#include <asm/rtas.h>
-#include <asm/cputhreads.h>
-#include <asm/text-patching.h>
-
-#include "interrupt.h"
-#include <asm/udbg.h>
-
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/*
- * The Primary thread of each non-boot processor was started from the OF client
- * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
- */
-static cpumask_t of_spin_map;
-
-/**
- * smp_startup_cpu() - start the given cpu
- * @lcpu: Logical CPU ID of the CPU to be started.
- *
- * At boot time, there is nothing to do for primary threads which were
- * started from Open Firmware. For anything else, call RTAS with the
- * appropriate start location.
- *
- * Returns:
- * 0 - failure
- * 1 - success
- */
-static inline int smp_startup_cpu(unsigned int lcpu)
-{
- int status;
- unsigned long start_here =
- __pa(ppc_function_entry(generic_secondary_smp_init));
- unsigned int pcpu;
- int start_cpu;
-
- if (cpumask_test_cpu(lcpu, &of_spin_map))
- /* Already started by OF and sitting in spin loop */
- return 1;
-
- pcpu = get_hard_smp_processor_id(lcpu);
-
- /*
- * If the RTAS start-cpu token does not exist then presume the
- * cpu is already spinning.
- */
- start_cpu = rtas_function_token(RTAS_FN_START_CPU);
- if (start_cpu == RTAS_UNKNOWN_SERVICE)
- return 1;
-
- status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
- if (status != 0) {
- printk(KERN_ERR "start-cpu failed: %i\n", status);
- return 0;
- }
-
- return 1;
-}
-
-static void smp_cell_setup_cpu(int cpu)
-{
- if (cpu != boot_cpuid)
- iic_setup_cpu();
-
- /*
- * change default DABRX to allow user watchpoints
- */
- mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
-}
-
-static int smp_cell_kick_cpu(int nr)
-{
- if (nr < 0 || nr >= nr_cpu_ids)
- return -EINVAL;
-
- if (!smp_startup_cpu(nr))
- return -ENOENT;
-
- /*
- * The processor is currently spinning, waiting for the
- * cpu_start field to become non-zero After we set cpu_start,
- * the processor will continue on to secondary_start
- */
- paca_ptrs[nr]->cpu_start = 1;
-
- return 0;
-}
-
-static struct smp_ops_t bpa_iic_smp_ops = {
- .message_pass = iic_message_pass,
- .probe = iic_request_IPIs,
- .kick_cpu = smp_cell_kick_cpu,
- .setup_cpu = smp_cell_setup_cpu,
- .cpu_bootable = smp_generic_cpu_bootable,
-};
-
-/* This is called very early */
-void __init smp_init_cell(void)
-{
- int i;
-
- DBG(" -> smp_init_cell()\n");
-
- smp_ops = &bpa_iic_smp_ops;
-
- /* Mark threads which are still spinning in hold loops. */
- if (cpu_has_feature(CPU_FTR_SMT)) {
- for_each_present_cpu(i) {
- if (cpu_thread_in_core(i) == 0)
- cpumask_set_cpu(i, &of_spin_map);
- }
- } else
- cpumask_copy(&of_spin_map, cpu_present_mask);
-
- cpumask_clear_cpu(boot_cpuid, &of_spin_map);
-
- /* Non-lpar has additional take/give timebase */
- if (rtas_function_token(RTAS_FN_FREEZE_TIME_BASE) != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = rtas_give_timebase;
- smp_ops->take_timebase = rtas_take_timebase;
- }
-
- DBG(" <- smp_init_cell()\n");
-}
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
deleted file mode 100644
index 68439445b1c3..000000000000
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ /dev/null
@@ -1,170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * IO workarounds for PCI on Celleb/Cell platform
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include <asm/ppc-pci.h>
-#include <asm/pci-bridge.h>
-#include <asm/io-workarounds.h>
-
-#define SPIDER_PCI_DISABLE_PREFETCH
-
-struct spiderpci_iowa_private {
- void __iomem *regs;
-};
-
-static void spiderpci_io_flush(struct iowa_bus *bus)
-{
- struct spiderpci_iowa_private *priv;
-
- priv = bus->private;
- in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
- iosync();
-}
-
-#define SPIDER_PCI_MMIO_READ(name, ret) \
-static ret spiderpci_##name(const PCI_IO_ADDR addr) \
-{ \
- ret val = __do_##name(addr); \
- spiderpci_io_flush(iowa_mem_find_bus(addr)); \
- return val; \
-}
-
-#define SPIDER_PCI_MMIO_READ_STR(name) \
-static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \
- unsigned long count) \
-{ \
- __do_##name(addr, buf, count); \
- spiderpci_io_flush(iowa_mem_find_bus(addr)); \
-}
-
-SPIDER_PCI_MMIO_READ(readb, u8)
-SPIDER_PCI_MMIO_READ(readw, u16)
-SPIDER_PCI_MMIO_READ(readl, u32)
-SPIDER_PCI_MMIO_READ(readq, u64)
-SPIDER_PCI_MMIO_READ(readw_be, u16)
-SPIDER_PCI_MMIO_READ(readl_be, u32)
-SPIDER_PCI_MMIO_READ(readq_be, u64)
-SPIDER_PCI_MMIO_READ_STR(readsb)
-SPIDER_PCI_MMIO_READ_STR(readsw)
-SPIDER_PCI_MMIO_READ_STR(readsl)
-
-static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- spiderpci_io_flush(iowa_mem_find_bus(src));
-}
-
-static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
- void __iomem *regs)
-{
- void *dummy_page_va;
- dma_addr_t dummy_page_da;
-
-#ifdef SPIDER_PCI_DISABLE_PREFETCH
- u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT);
- pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val);
- out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
-#endif /* SPIDER_PCI_DISABLE_PREFETCH */
-
- /* setup dummy read */
- /*
- * On CellBlade, we can't know that which XDR memory is used by
- * kmalloc() to allocate dummy_page_va.
- * In order to improve the performance, the XDR which is used to
- * allocate dummy_page_va is the nearest the spider-pci.
- * We have to select the CBE which is the nearest the spider-pci
- * to allocate memory from the best XDR, but I don't know that
- * how to do.
- *
- * Celleb does not have this problem, because it has only one XDR.
- */
- dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dummy_page_va) {
- pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n");
- return -1;
- }
-
- dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(phb->parent, dummy_page_da)) {
- pr_err("SPIDER-IOWA:Map dummy page filed.\n");
- kfree(dummy_page_va);
- return -1;
- }
-
- out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da);
-
- return 0;
-}
-
-int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data)
-{
- void __iomem *regs = NULL;
- struct spiderpci_iowa_private *priv;
- struct device_node *np = bus->phb->dn;
- struct resource r;
- unsigned long offset = (unsigned long)data;
-
- pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n",
- np);
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- pr_err("SPIDERPCI-IOWA:"
- "Can't allocate struct spiderpci_iowa_private");
- return -1;
- }
-
- if (of_address_to_resource(np, 0, &r)) {
- pr_err("SPIDERPCI-IOWA:Can't get resource.\n");
- goto error;
- }
-
- regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE);
- if (!regs) {
- pr_err("SPIDERPCI-IOWA:ioremap failed.\n");
- goto error;
- }
- priv->regs = regs;
- bus->private = priv;
-
- if (spiderpci_pci_setup_chip(bus->phb, regs))
- goto error;
-
- return 0;
-
-error:
- kfree(priv);
- bus->private = NULL;
-
- if (regs)
- iounmap(regs);
-
- return -1;
-}
-
-struct ppc_pci_io spiderpci_ops = {
- .readb = spiderpci_readb,
- .readw = spiderpci_readw,
- .readl = spiderpci_readl,
- .readq = spiderpci_readq,
- .readw_be = spiderpci_readw_be,
- .readl_be = spiderpci_readl_be,
- .readq_be = spiderpci_readq_be,
- .readsb = spiderpci_readsb,
- .readsw = spiderpci_readsw,
- .readsl = spiderpci_readsl,
- .memcpy_fromio = spiderpci_memcpy_fromio,
-};
-
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
deleted file mode 100644
index 11df737c8c6a..000000000000
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ /dev/null
@@ -1,344 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * External Interrupt Controller on Spider South Bridge
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- *
- * Author: Arnd Bergmann <arndb@de.ibm.com>
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/ioport.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/pgtable.h>
-
-#include <asm/io.h>
-
-#include "interrupt.h"
-
-/* register layout taken from Spider spec, table 7.4-4 */
-enum {
- TIR_DEN = 0x004, /* Detection Enable Register */
- TIR_MSK = 0x084, /* Mask Level Register */
- TIR_EDC = 0x0c0, /* Edge Detection Clear Register */
- TIR_PNDA = 0x100, /* Pending Register A */
- TIR_PNDB = 0x104, /* Pending Register B */
- TIR_CS = 0x144, /* Current Status Register */
- TIR_LCSA = 0x150, /* Level Current Status Register A */
- TIR_LCSB = 0x154, /* Level Current Status Register B */
- TIR_LCSC = 0x158, /* Level Current Status Register C */
- TIR_LCSD = 0x15c, /* Level Current Status Register D */
- TIR_CFGA = 0x200, /* Setting Register A0 */
- TIR_CFGB = 0x204, /* Setting Register B0 */
- /* 0x208 ... 0x3ff Setting Register An/Bn */
- TIR_PPNDA = 0x400, /* Packet Pending Register A */
- TIR_PPNDB = 0x404, /* Packet Pending Register B */
- TIR_PIERA = 0x408, /* Packet Output Error Register A */
- TIR_PIERB = 0x40c, /* Packet Output Error Register B */
- TIR_PIEN = 0x444, /* Packet Output Enable Register */
- TIR_PIPND = 0x454, /* Packet Output Pending Register */
- TIRDID = 0x484, /* Spider Device ID Register */
- REISTIM = 0x500, /* Reissue Command Timeout Time Setting */
- REISTIMEN = 0x504, /* Reissue Command Timeout Setting */
- REISWAITEN = 0x508, /* Reissue Wait Control*/
-};
-
-#define SPIDER_CHIP_COUNT 4
-#define SPIDER_SRC_COUNT 64
-#define SPIDER_IRQ_INVALID 63
-
-struct spider_pic {
- struct irq_domain *host;
- void __iomem *regs;
- unsigned int node_id;
-};
-static struct spider_pic spider_pics[SPIDER_CHIP_COUNT];
-
-static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d)
-{
- return irq_data_get_irq_chip_data(d);
-}
-
-static void __iomem *spider_get_irq_config(struct spider_pic *pic,
- unsigned int src)
-{
- return pic->regs + TIR_CFGA + 8 * src;
-}
-
-static void spider_unmask_irq(struct irq_data *d)
-{
- struct spider_pic *pic = spider_irq_data_to_pic(d);
- void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
-
- out_be32(cfg, in_be32(cfg) | 0x30000000u);
-}
-
-static void spider_mask_irq(struct irq_data *d)
-{
- struct spider_pic *pic = spider_irq_data_to_pic(d);
- void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d));
-
- out_be32(cfg, in_be32(cfg) & ~0x30000000u);
-}
-
-static void spider_ack_irq(struct irq_data *d)
-{
- struct spider_pic *pic = spider_irq_data_to_pic(d);
- unsigned int src = irqd_to_hwirq(d);
-
- /* Reset edge detection logic if necessary
- */
- if (irqd_is_level_type(d))
- return;
-
- /* Only interrupts 47 to 50 can be set to edge */
- if (src < 47 || src > 50)
- return;
-
- /* Perform the clear of the edge logic */
- out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf));
-}
-
-static int spider_set_irq_type(struct irq_data *d, unsigned int type)
-{
- unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
- struct spider_pic *pic = spider_irq_data_to_pic(d);
- unsigned int hw = irqd_to_hwirq(d);
- void __iomem *cfg = spider_get_irq_config(pic, hw);
- u32 old_mask;
- u32 ic;
-
- /* Note that only level high is supported for most interrupts */
- if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH &&
- (hw < 47 || hw > 50))
- return -EINVAL;
-
- /* Decode sense type */
- switch(sense) {
- case IRQ_TYPE_EDGE_RISING:
- ic = 0x3;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- ic = 0x2;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- ic = 0x0;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- case IRQ_TYPE_NONE:
- ic = 0x1;
- break;
- default:
- return -EINVAL;
- }
-
- /* Configure the source. One gross hack that was there before and
- * that I've kept around is the priority to the BE which I set to
- * be the same as the interrupt source number. I don't know whether
- * that's supposed to make any kind of sense however, we'll have to
- * decide that, but for now, I'm not changing the behaviour.
- */
- old_mask = in_be32(cfg) & 0x30000000u;
- out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) |
- (pic->node_id << 4) | 0xe);
- out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff));
-
- return 0;
-}
-
-static struct irq_chip spider_pic = {
- .name = "SPIDER",
- .irq_unmask = spider_unmask_irq,
- .irq_mask = spider_mask_irq,
- .irq_ack = spider_ack_irq,
- .irq_set_type = spider_set_irq_type,
-};
-
-static int spider_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- irq_set_chip_data(virq, h->host_data);
- irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq);
-
- /* Set default irq type */
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_flags)
-
-{
- /* Spider interrupts have 2 cells, first is the interrupt source,
- * second, well, I don't know for sure yet ... We mask the top bits
- * because old device-trees encode a node number in there
- */
- *out_hwirq = intspec[0] & 0x3f;
- *out_flags = IRQ_TYPE_LEVEL_HIGH;
- return 0;
-}
-
-static const struct irq_domain_ops spider_host_ops = {
- .map = spider_host_map,
- .xlate = spider_host_xlate,
-};
-
-static void spider_irq_cascade(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct spider_pic *pic = irq_desc_get_handler_data(desc);
- unsigned int cs;
-
- cs = in_be32(pic->regs + TIR_CS) >> 24;
- if (cs != SPIDER_IRQ_INVALID)
- generic_handle_domain_irq(pic->host, cs);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-/* For hooking up the cascade we have a problem. Our device-tree is
- * crap and we don't know on which BE iic interrupt we are hooked on at
- * least not the "standard" way. We can reconstitute it based on two
- * informations though: which BE node we are connected to and whether
- * we are connected to IOIF0 or IOIF1. Right now, we really only care
- * about the IBM cell blade and we know that its firmware gives us an
- * interrupt-map property which is pretty strange.
- */
-static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
-{
- unsigned int virq;
- const u32 *imap, *tmp;
- int imaplen, intsize, unit;
- struct device_node *iic;
- struct device_node *of_node;
-
- of_node = irq_domain_get_of_node(pic->host);
-
- /* First, we check whether we have a real "interrupts" in the device
- * tree in case the device-tree is ever fixed
- */
- virq = irq_of_parse_and_map(of_node, 0);
- if (virq)
- return virq;
-
- /* Now do the horrible hacks */
- tmp = of_get_property(of_node, "#interrupt-cells", NULL);
- if (tmp == NULL)
- return 0;
- intsize = *tmp;
- imap = of_get_property(of_node, "interrupt-map", &imaplen);
- if (imap == NULL || imaplen < (intsize + 1))
- return 0;
- iic = of_find_node_by_phandle(imap[intsize]);
- if (iic == NULL)
- return 0;
- imap += intsize + 1;
- tmp = of_get_property(iic, "#interrupt-cells", NULL);
- if (tmp == NULL) {
- of_node_put(iic);
- return 0;
- }
- intsize = *tmp;
- /* Assume unit is last entry of interrupt specifier */
- unit = imap[intsize - 1];
- /* Ok, we have a unit, now let's try to get the node */
- tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL);
- if (tmp == NULL) {
- of_node_put(iic);
- return 0;
- }
- /* ugly as hell but works for now */
- pic->node_id = (*tmp) >> 1;
- of_node_put(iic);
-
- /* Ok, now let's get cracking. You may ask me why I just didn't match
- * the iic host from the iic OF node, but that way I'm still compatible
- * with really really old old firmwares for which we don't have a node
- */
- /* Manufacture an IIC interrupt number of class 2 */
- virq = irq_create_mapping(NULL,
- (pic->node_id << IIC_IRQ_NODE_SHIFT) |
- (2 << IIC_IRQ_CLASS_SHIFT) |
- unit);
- if (!virq)
- printk(KERN_ERR "spider_pic: failed to map cascade !");
- return virq;
-}
-
-
-static void __init spider_init_one(struct device_node *of_node, int chip,
- unsigned long addr)
-{
- struct spider_pic *pic = &spider_pics[chip];
- int i, virq;
-
- /* Map registers */
- pic->regs = ioremap(addr, 0x1000);
- if (pic->regs == NULL)
- panic("spider_pic: can't map registers !");
-
- /* Allocate a host */
- pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
- &spider_host_ops, pic);
- if (pic->host == NULL)
- panic("spider_pic: can't allocate irq host !");
-
- /* Go through all sources and disable them */
- for (i = 0; i < SPIDER_SRC_COUNT; i++) {
- void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i;
- out_be32(cfg, in_be32(cfg) & ~0x30000000u);
- }
-
- /* do not mask any interrupts because of level */
- out_be32(pic->regs + TIR_MSK, 0x0);
-
- /* enable interrupt packets to be output */
- out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1);
-
- /* Hook up the cascade interrupt to the iic and nodeid */
- virq = spider_find_cascade_and_node(pic);
- if (!virq)
- return;
- irq_set_handler_data(virq, pic);
- irq_set_chained_handler(virq, spider_irq_cascade);
-
- printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n",
- pic->node_id, addr, of_node);
-
- /* Enable the interrupt detection enable bit. Do this last! */
- out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1);
-}
-
-void __init spider_init_IRQ(void)
-{
- struct resource r;
- struct device_node *dn;
- int chip = 0;
-
- /* XXX node numbers are totally bogus. We _hope_ we get the device
- * nodes in the right order here but that's definitely not guaranteed,
- * we need to get the node from the device tree instead.
- * There is currently no proper property for it (but our whole
- * device-tree is bogus anyway) so all we can do is pray or maybe test
- * the address and deduce the node-id
- */
- for_each_node_by_name(dn, "interrupt-controller") {
- if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) {
- if (of_address_to_resource(dn, 0, &r)) {
- printk(KERN_WARNING "spider-pic: Failed\n");
- continue;
- }
- } else if (of_device_is_compatible(dn, "sti,platform-spider-pic")
- && (chip < 2)) {
- static long hard_coded_pics[] =
- { 0x24000008000ul, 0x34000008000ul};
- r.start = hard_coded_pics[chip];
- } else
- continue;
- spider_init_one(dn, chip++, r.start);
- }
-}
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index dea6f0f25897..2c07387201d0 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -23,7 +23,6 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
-#include <asm/xmon.h>
#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
@@ -772,7 +771,6 @@ static int __init init_spu_base(void)
fb_append_extra_logo(&logo_spe_clut224, ret);
mutex_lock(&spu_full_list_mutex);
- xmon_register_spus(&spu_full_list);
crash_register_spus(&spu_full_list);
mutex_unlock(&spu_full_list_mutex);
spu_add_dev_attr(&dev_attr_stat);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
deleted file mode 100644
index f464a1f2e568..000000000000
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ /dev/null
@@ -1,530 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * spu management operations for of based platforms
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- * Copyright 2006 Sony Corp.
- * (C) Copyright 2007 TOSHIBA CORPORATION
- */
-
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/export.h>
-#include <linux/ptrace.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
-
-#include "spufs/spufs.h"
-#include "interrupt.h"
-#include "spu_priv1_mmio.h"
-
-struct device_node *spu_devnode(struct spu *spu)
-{
- return spu->devnode;
-}
-
-EXPORT_SYMBOL_GPL(spu_devnode);
-
-static u64 __init find_spu_unit_number(struct device_node *spe)
-{
- const unsigned int *prop;
- int proplen;
-
- /* new device trees should provide the physical-id attribute */
- prop = of_get_property(spe, "physical-id", &proplen);
- if (proplen == 4)
- return (u64)*prop;
-
- /* celleb device tree provides the unit-id */
- prop = of_get_property(spe, "unit-id", &proplen);
- if (proplen == 4)
- return (u64)*prop;
-
- /* legacy device trees provide the id in the reg attribute */
- prop = of_get_property(spe, "reg", &proplen);
- if (proplen == 4)
- return (u64)*prop;
-
- return 0;
-}
-
-static void spu_unmap(struct spu *spu)
-{
- if (!firmware_has_feature(FW_FEATURE_LPAR))
- iounmap(spu->priv1);
- iounmap(spu->priv2);
- iounmap(spu->problem);
- iounmap((__force u8 __iomem *)spu->local_store);
-}
-
-static int __init spu_map_interrupts_old(struct spu *spu,
- struct device_node *np)
-{
- unsigned int isrc;
- const u32 *tmp;
- int nid;
-
- /* Get the interrupt source unit from the device-tree */
- tmp = of_get_property(np, "isrc", NULL);
- if (!tmp)
- return -ENODEV;
- isrc = tmp[0];
-
- tmp = of_get_property(np->parent->parent, "node-id", NULL);
- if (!tmp) {
- printk(KERN_WARNING "%s: can't find node-id\n", __func__);
- nid = spu->node;
- } else
- nid = tmp[0];
-
- /* Add the node number */
- isrc |= nid << IIC_IRQ_NODE_SHIFT;
-
- /* Now map interrupts of all 3 classes */
- spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
- spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
- spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
-
- /* Right now, we only fail if class 2 failed */
- if (!spu->irqs[2])
- return -EINVAL;
-
- return 0;
-}
-
-static void __iomem * __init spu_map_prop_old(struct spu *spu,
- struct device_node *n,
- const char *name)
-{
- const struct address_prop {
- unsigned long address;
- unsigned int len;
- } __attribute__((packed)) *prop;
- int proplen;
-
- prop = of_get_property(n, name, &proplen);
- if (prop == NULL || proplen != sizeof (struct address_prop))
- return NULL;
-
- return ioremap(prop->address, prop->len);
-}
-
-static int __init spu_map_device_old(struct spu *spu)
-{
- struct device_node *node = spu->devnode;
- const char *prop;
- int ret;
-
- ret = -ENODEV;
- spu->name = of_get_property(node, "name", NULL);
- if (!spu->name)
- goto out;
-
- prop = of_get_property(node, "local-store", NULL);
- if (!prop)
- goto out;
- spu->local_store_phys = *(unsigned long *)prop;
-
- /* we use local store as ram, not io memory */
- spu->local_store = (void __force *)
- spu_map_prop_old(spu, node, "local-store");
- if (!spu->local_store)
- goto out;
-
- prop = of_get_property(node, "problem", NULL);
- if (!prop)
- goto out_unmap;
- spu->problem_phys = *(unsigned long *)prop;
-
- spu->problem = spu_map_prop_old(spu, node, "problem");
- if (!spu->problem)
- goto out_unmap;
-
- spu->priv2 = spu_map_prop_old(spu, node, "priv2");
- if (!spu->priv2)
- goto out_unmap;
-
- if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- spu->priv1 = spu_map_prop_old(spu, node, "priv1");
- if (!spu->priv1)
- goto out_unmap;
- }
-
- ret = 0;
- goto out;
-
-out_unmap:
- spu_unmap(spu);
-out:
- return ret;
-}
-
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
-{
- int i;
-
- for (i=0; i < 3; i++) {
- spu->irqs[i] = irq_of_parse_and_map(np, i);
- if (!spu->irqs[i])
- goto err;
- }
- return 0;
-
-err:
- pr_debug("failed to map irq %x for spu %s\n", i, spu->name);
- for (; i >= 0; i--) {
- if (spu->irqs[i])
- irq_dispose_mapping(spu->irqs[i]);
- }
- return -EINVAL;
-}
-
-static int __init spu_map_resource(struct spu *spu, int nr,
- void __iomem** virt, unsigned long *phys)
-{
- struct device_node *np = spu->devnode;
- struct resource resource = { };
- unsigned long len;
- int ret;
-
- ret = of_address_to_resource(np, nr, &resource);
- if (ret)
- return ret;
- if (phys)
- *phys = resource.start;
- len = resource_size(&resource);
- *virt = ioremap(resource.start, len);
- if (!*virt)
- return -EINVAL;
- return 0;
-}
-
-static int __init spu_map_device(struct spu *spu)
-{
- struct device_node *np = spu->devnode;
- int ret = -ENODEV;
-
- spu->name = of_get_property(np, "name", NULL);
- if (!spu->name)
- goto out;
-
- ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store,
- &spu->local_store_phys);
- if (ret) {
- pr_debug("spu_new: failed to map %pOF resource 0\n",
- np);
- goto out;
- }
- ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem,
- &spu->problem_phys);
- if (ret) {
- pr_debug("spu_new: failed to map %pOF resource 1\n",
- np);
- goto out_unmap;
- }
- ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL);
- if (ret) {
- pr_debug("spu_new: failed to map %pOF resource 2\n",
- np);
- goto out_unmap;
- }
- if (!firmware_has_feature(FW_FEATURE_LPAR))
- ret = spu_map_resource(spu, 3,
- (void __iomem**)&spu->priv1, NULL);
- if (ret) {
- pr_debug("spu_new: failed to map %pOF resource 3\n",
- np);
- goto out_unmap;
- }
- pr_debug("spu_new: %pOF maps:\n", np);
- pr_debug(" local store : 0x%016lx -> 0x%p\n",
- spu->local_store_phys, spu->local_store);
- pr_debug(" problem state : 0x%016lx -> 0x%p\n",
- spu->problem_phys, spu->problem);
- pr_debug(" priv2 : 0x%p\n", spu->priv2);
- pr_debug(" priv1 : 0x%p\n", spu->priv1);
-
- return 0;
-
-out_unmap:
- spu_unmap(spu);
-out:
- pr_debug("failed to map spe %s: %d\n", spu->name, ret);
- return ret;
-}
-
-static int __init of_enumerate_spus(int (*fn)(void *data))
-{
- int ret;
- struct device_node *node;
- unsigned int n = 0;
-
- ret = -ENODEV;
- for_each_node_by_type(node, "spe") {
- ret = fn(node);
- if (ret) {
- printk(KERN_WARNING "%s: Error initializing %pOFn\n",
- __func__, node);
- of_node_put(node);
- break;
- }
- n++;
- }
- return ret ? ret : n;
-}
-
-static int __init of_create_spu(struct spu *spu, void *data)
-{
- int ret;
- struct device_node *spe = (struct device_node *)data;
- static int legacy_map = 0, legacy_irq = 0;
-
- spu->devnode = of_node_get(spe);
- spu->spe_id = find_spu_unit_number(spe);
-
- spu->node = of_node_to_nid(spe);
- if (spu->node >= MAX_NUMNODES) {
- printk(KERN_WARNING "SPE %pOF on node %d ignored,"
- " node number too big\n", spe, spu->node);
- printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
- ret = -ENODEV;
- goto out;
- }
-
- ret = spu_map_device(spu);
- if (ret) {
- if (!legacy_map) {
- legacy_map = 1;
- printk(KERN_WARNING "%s: Legacy device tree found, "
- "trying to map old style\n", __func__);
- }
- ret = spu_map_device_old(spu);
- if (ret) {
- printk(KERN_ERR "Unable to map %s\n",
- spu->name);
- goto out;
- }
- }
-
- ret = spu_map_interrupts(spu, spe);
- if (ret) {
- if (!legacy_irq) {
- legacy_irq = 1;
- printk(KERN_WARNING "%s: Legacy device tree found, "
- "trying old style irq\n", __func__);
- }
- ret = spu_map_interrupts_old(spu, spe);
- if (ret) {
- printk(KERN_ERR "%s: could not map interrupts\n",
- spu->name);
- goto out_unmap;
- }
- }
-
- pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
- spu->local_store, spu->problem, spu->priv1,
- spu->priv2, spu->number);
- goto out;
-
-out_unmap:
- spu_unmap(spu);
-out:
- return ret;
-}
-
-static int of_destroy_spu(struct spu *spu)
-{
- spu_unmap(spu);
- of_node_put(spu->devnode);
- return 0;
-}
-
-static void enable_spu_by_master_run(struct spu_context *ctx)
-{
- ctx->ops->master_start(ctx);
-}
-
-static void disable_spu_by_master_run(struct spu_context *ctx)
-{
- ctx->ops->master_stop(ctx);
-}
-
-/* Hardcoded affinity idxs for qs20 */
-#define QS20_SPES_PER_BE 8
-static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
-static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
-
-static struct spu *__init spu_lookup_reg(int node, u32 reg)
-{
- struct spu *spu;
- const u32 *spu_reg;
-
- list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
- spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
- if (*spu_reg == reg)
- return spu;
- }
- return NULL;
-}
-
-static void __init init_affinity_qs20_harcoded(void)
-{
- int node, i;
- struct spu *last_spu, *spu;
- u32 reg;
-
- for (node = 0; node < MAX_NUMNODES; node++) {
- last_spu = NULL;
- for (i = 0; i < QS20_SPES_PER_BE; i++) {
- reg = qs20_reg_idxs[i];
- spu = spu_lookup_reg(node, reg);
- if (!spu)
- continue;
- spu->has_mem_affinity = qs20_reg_memory[reg];
- if (last_spu)
- list_add_tail(&spu->aff_list,
- &last_spu->aff_list);
- last_spu = spu;
- }
- }
-}
-
-static int __init of_has_vicinity(void)
-{
- struct device_node *dn;
-
- for_each_node_by_type(dn, "spe") {
- if (of_property_present(dn, "vicinity")) {
- of_node_put(dn);
- return 1;
- }
- }
- return 0;
-}
-
-static struct spu *__init devnode_spu(int cbe, struct device_node *dn)
-{
- struct spu *spu;
-
- list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
- if (spu_devnode(spu) == dn)
- return spu;
- return NULL;
-}
-
-static struct spu * __init
-neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
-{
- struct spu *spu;
- struct device_node *spu_dn;
- const phandle *vic_handles;
- int lenp, i;
-
- list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
- spu_dn = spu_devnode(spu);
- if (spu_dn == avoid)
- continue;
- vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
- for (i=0; i < (lenp / sizeof(phandle)); i++) {
- if (vic_handles[i] == target->phandle)
- return spu;
- }
- }
- return NULL;
-}
-
-static void __init init_affinity_node(int cbe)
-{
- struct spu *spu, *last_spu;
- struct device_node *vic_dn, *last_spu_dn;
- phandle avoid_ph;
- const phandle *vic_handles;
- int lenp, i, added;
-
- last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
- cbe_list);
- avoid_ph = 0;
- for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
- last_spu_dn = spu_devnode(last_spu);
- vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
-
- /*
- * Walk through each phandle in vicinity property of the spu
- * (typically two vicinity phandles per spe node)
- */
- for (i = 0; i < (lenp / sizeof(phandle)); i++) {
- if (vic_handles[i] == avoid_ph)
- continue;
-
- vic_dn = of_find_node_by_phandle(vic_handles[i]);
- if (!vic_dn)
- continue;
-
- if (of_node_name_eq(vic_dn, "spe") ) {
- spu = devnode_spu(cbe, vic_dn);
- avoid_ph = last_spu_dn->phandle;
- } else {
- /*
- * "mic-tm" and "bif0" nodes do not have
- * vicinity property. So we need to find the
- * spe which has vic_dn as neighbour, but
- * skipping the one we came from (last_spu_dn)
- */
- spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
- if (!spu)
- continue;
- if (of_node_name_eq(vic_dn, "mic-tm")) {
- last_spu->has_mem_affinity = 1;
- spu->has_mem_affinity = 1;
- }
- avoid_ph = vic_dn->phandle;
- }
-
- of_node_put(vic_dn);
-
- list_add_tail(&spu->aff_list, &last_spu->aff_list);
- last_spu = spu;
- break;
- }
- }
-}
-
-static void __init init_affinity_fw(void)
-{
- int cbe;
-
- for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
- init_affinity_node(cbe);
-}
-
-static int __init init_affinity(void)
-{
- if (of_has_vicinity()) {
- init_affinity_fw();
- } else {
- if (of_machine_is_compatible("IBM,CPBW-1.0"))
- init_affinity_qs20_harcoded();
- else
- printk("No affinity configuration found\n");
- }
-
- return 0;
-}
-
-const struct spu_management_ops spu_management_of_ops = {
- .enumerate_spus = of_enumerate_spus,
- .create_spu = of_create_spu,
- .destroy_spu = of_destroy_spu,
- .enable_spu = enable_spu_by_master_run,
- .disable_spu = disable_spu_by_master_run,
- .init_affinity = init_affinity,
-};
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
deleted file mode 100644
index d150e3987304..000000000000
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ /dev/null
@@ -1,167 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * spu hypervisor abstraction for direct hardware access.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- * Copyright 2006 Sony Corp.
- */
-
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/ptrace.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
-
-#include "interrupt.h"
-#include "spu_priv1_mmio.h"
-
-static void int_mask_and(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
-
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask);
-}
-
-static void int_mask_or(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
-
- old_mask = in_be64(&spu->priv1->int_mask_RW[class]);
- out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask);
-}
-
-static void int_mask_set(struct spu *spu, int class, u64 mask)
-{
- out_be64(&spu->priv1->int_mask_RW[class], mask);
-}
-
-static u64 int_mask_get(struct spu *spu, int class)
-{
- return in_be64(&spu->priv1->int_mask_RW[class]);
-}
-
-static void int_stat_clear(struct spu *spu, int class, u64 stat)
-{
- out_be64(&spu->priv1->int_stat_RW[class], stat);
-}
-
-static u64 int_stat_get(struct spu *spu, int class)
-{
- return in_be64(&spu->priv1->int_stat_RW[class]);
-}
-
-static void cpu_affinity_set(struct spu *spu, int cpu)
-{
- u64 target;
- u64 route;
-
- if (nr_cpus_node(spu->node)) {
- const struct cpumask *spumask = cpumask_of_node(spu->node),
- *cpumask = cpumask_of_node(cpu_to_node(cpu));
-
- if (!cpumask_intersects(spumask, cpumask))
- return;
- }
-
- target = iic_get_target_id(cpu);
- route = target << 48 | target << 32 | target << 16;
- out_be64(&spu->priv1->int_route_RW, route);
-}
-
-static u64 mfc_dar_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_dar_RW);
-}
-
-static u64 mfc_dsisr_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_dsisr_RW);
-}
-
-static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
-{
- out_be64(&spu->priv1->mfc_dsisr_RW, dsisr);
-}
-
-static void mfc_sdr_setup(struct spu *spu)
-{
- out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
-}
-
-static void mfc_sr1_set(struct spu *spu, u64 sr1)
-{
- out_be64(&spu->priv1->mfc_sr1_RW, sr1);
-}
-
-static u64 mfc_sr1_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_sr1_RW);
-}
-
-static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
-{
- out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id);
-}
-
-static u64 mfc_tclass_id_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->mfc_tclass_id_RW);
-}
-
-static void tlb_invalidate(struct spu *spu)
-{
- out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul);
-}
-
-static void resource_allocation_groupID_set(struct spu *spu, u64 id)
-{
- out_be64(&spu->priv1->resource_allocation_groupID_RW, id);
-}
-
-static u64 resource_allocation_groupID_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->resource_allocation_groupID_RW);
-}
-
-static void resource_allocation_enable_set(struct spu *spu, u64 enable)
-{
- out_be64(&spu->priv1->resource_allocation_enable_RW, enable);
-}
-
-static u64 resource_allocation_enable_get(struct spu *spu)
-{
- return in_be64(&spu->priv1->resource_allocation_enable_RW);
-}
-
-const struct spu_priv1_ops spu_priv1_mmio_ops =
-{
- .int_mask_and = int_mask_and,
- .int_mask_or = int_mask_or,
- .int_mask_set = int_mask_set,
- .int_mask_get = int_mask_get,
- .int_stat_clear = int_stat_clear,
- .int_stat_get = int_stat_get,
- .cpu_affinity_set = cpu_affinity_set,
- .mfc_dar_get = mfc_dar_get,
- .mfc_dsisr_get = mfc_dsisr_get,
- .mfc_dsisr_set = mfc_dsisr_set,
- .mfc_sdr_setup = mfc_sdr_setup,
- .mfc_sr1_set = mfc_sr1_set,
- .mfc_sr1_get = mfc_sr1_get,
- .mfc_tclass_id_set = mfc_tclass_id_set,
- .mfc_tclass_id_get = mfc_tclass_id_get,
- .tlb_invalidate = tlb_invalidate,
- .resource_allocation_groupID_set = resource_allocation_groupID_set,
- .resource_allocation_groupID_get = resource_allocation_groupID_get,
- .resource_allocation_enable_set = resource_allocation_enable_set,
- .resource_allocation_enable_get = resource_allocation_enable_get,
-};
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.h b/arch/powerpc/platforms/cell/spu_priv1_mmio.h
deleted file mode 100644
index 04f0db339dc1..000000000000
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * spu hypervisor abstraction for direct hardware access.
- *
- * Copyright (C) 2006 Sony Computer Entertainment Inc.
- * Copyright 2006 Sony Corp.
- */
-
-#ifndef SPU_PRIV1_MMIO_H
-#define SPU_PRIV1_MMIO_H
-
-struct device_node *spu_devnode(struct spu *spu);
-
-#endif /* SPU_PRIV1_MMIO_H */
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
index 6af443a1db99..cb2aff635bb0 100644
--- a/arch/powerpc/platforms/microwatt/Kconfig
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_MICROWATT
- depends on PPC_BOOK3S_64 && !SMP
+ depends on PPC_BOOK3S_64
bool "Microwatt SoC platform"
select PPC_XICS
select PPC_ICS_NATIVE
select PPC_ICP_NATIVE
select PPC_UDBG_16550
+ select COMMON_CLK
help
This option enables support for FPGA-based Microwatt implementations.
diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile
index 116d6d3ad3f0..d973b2ab4042 100644
--- a/arch/powerpc/platforms/microwatt/Makefile
+++ b/arch/powerpc/platforms/microwatt/Makefile
@@ -1 +1,2 @@
obj-y += setup.o rng.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h
index 335417e95e66..891aa2800768 100644
--- a/arch/powerpc/platforms/microwatt/microwatt.h
+++ b/arch/powerpc/platforms/microwatt/microwatt.h
@@ -3,5 +3,6 @@
#define _MICROWATT_H
void microwatt_rng_init(void);
+void microwatt_init_smp(void);
#endif /* _MICROWATT_H */
diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
index 5e1c0997170d..6af2ccef736c 100644
--- a/arch/powerpc/platforms/microwatt/setup.c
+++ b/arch/powerpc/platforms/microwatt/setup.c
@@ -29,15 +29,33 @@ static int __init microwatt_populate(void)
}
machine_arch_initcall(microwatt, microwatt_populate);
+static int __init microwatt_probe(void)
+{
+ /* Main reason for having this is to start the other CPU(s) */
+ if (IS_ENABLED(CONFIG_SMP))
+ microwatt_init_smp();
+ return 1;
+}
+
static void __init microwatt_setup_arch(void)
{
microwatt_rng_init();
}
+static void microwatt_idle(void)
+{
+ if (!prep_irq_for_idle_irqsoff())
+ return;
+
+ __asm__ __volatile__ ("wait");
+}
+
define_machine(microwatt) {
.name = "microwatt",
.compatible = "microwatt-soc",
+ .probe = microwatt_probe,
.init_IRQ = microwatt_init_IRQ,
.setup_arch = microwatt_setup_arch,
.progress = udbg_progress,
+ .power_save = microwatt_idle,
};
diff --git a/arch/powerpc/platforms/microwatt/smp.c b/arch/powerpc/platforms/microwatt/smp.c
new file mode 100644
index 000000000000..7dbf2ca73d47
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/smp.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/*
+ * SMP support functions for Microwatt
+ * Copyright 2025 Paul Mackerras <paulus@ozlabs.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <asm/early_ioremap.h>
+#include <asm/ppc-opcode.h>
+#include <asm/reg.h>
+#include <asm/smp.h>
+#include <asm/xics.h>
+
+#include "microwatt.h"
+
+static void __init microwatt_smp_probe(void)
+{
+ xics_smp_probe();
+}
+
+static void microwatt_smp_setup_cpu(int cpu)
+{
+ if (cpu != 0)
+ xics_setup_cpu();
+}
+
+static struct smp_ops_t microwatt_smp_ops = {
+ .probe = microwatt_smp_probe,
+ .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
+ .kick_cpu = smp_generic_kick_cpu,
+ .setup_cpu = microwatt_smp_setup_cpu,
+};
+
+/* XXX get from device tree */
+#define SYSCON_BASE 0xc0000000
+#define SYSCON_LENGTH 0x100
+
+#define SYSCON_CPU_CTRL 0x58
+
+void __init microwatt_init_smp(void)
+{
+ volatile unsigned char __iomem *syscon;
+ int ncpus;
+ int timeout;
+
+ syscon = early_ioremap(SYSCON_BASE, SYSCON_LENGTH);
+ if (syscon == NULL) {
+ pr_err("Failed to map SYSCON\n");
+ return;
+ }
+ ncpus = (readl(syscon + SYSCON_CPU_CTRL) >> 8) & 0xff;
+ if (ncpus < 2)
+ goto out;
+
+ smp_ops = &microwatt_smp_ops;
+
+ /*
+ * Write two instructions at location 0:
+ * mfspr r3, PIR
+ * b __secondary_hold
+ */
+ *(unsigned int *)KERNELBASE = PPC_RAW_MFSPR(3, SPRN_PIR);
+ *(unsigned int *)(KERNELBASE+4) = PPC_RAW_BRANCH(&__secondary_hold - (char *)(KERNELBASE+4));
+
+ /* enable the other CPUs, they start at location 0 */
+ writel((1ul << ncpus) - 1, syscon + SYSCON_CPU_CTRL);
+
+ timeout = 10000;
+ while (!__secondary_hold_acknowledge) {
+ if (--timeout == 0)
+ break;
+ barrier();
+ }
+
+ out:
+ early_iounmap((void *)syscon, SYSCON_LENGTH);
+}
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 70a46acc70d6..3fbe0295ce14 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -17,6 +17,7 @@ config PPC_POWERNV
select MMU_NOTIFIER
select FORCE_SMP
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select PPC_RADIX_BROADCAST_TLBIE
default y
config OPAL_PRD
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index c9a9b759cc92..a379ff86c120 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -149,7 +149,7 @@ static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
/* end of vector */
bufp[idx++] = cpu_to_be64(AT_NULL);
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV,
+ buf = append_elf64_note(buf, NN_AUXV, NT_AUXV,
oc_conf->auxv_buf, AUXV_DESC_SZ);
return buf;
}
@@ -252,7 +252,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
* crashing CPU's prstatus.
*/
first_cpu_note = buf;
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
+ buf = append_elf64_note(buf, NN_PRSTATUS, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) {
@@ -279,7 +279,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
fill_prstatus(&prstatus, thread_pir, &regs);
if (thread_pir != oc_conf->crashing_cpu) {
- buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME,
+ buf = append_elf64_note(buf, NN_PRSTATUS,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
} else {
@@ -287,7 +287,7 @@ static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
* Add crashing CPU as the first NT_PRSTATUS note for
* GDB to process the core file appropriately.
*/
- append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME,
+ append_elf64_note(first_cpu_note, NN_PRSTATUS,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 42fc66e97539..a934c2a262f6 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -23,6 +23,7 @@ config PPC_PSERIES
select FORCE_SMP
select SWIOTLB
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select PPC_RADIX_BROADCAST_TLBIE
default y
config PARAVIRT
@@ -128,6 +129,15 @@ config CMM
will be reused for other LPARs. The interface allows firmware to
balance memory across many LPARs.
+config HTMDUMP
+ tristate "PowerVM data dumper"
+ depends on PPC_PSERIES && DEBUG_FS
+ default m
+ help
+ Select this option, if you want to enable the kernel debugfs
+ interface to dump the Hardware Trace Macro (HTM) function data
+ in the LPAR.
+
config HV_PERF_CTRS
bool "Hypervisor supplied PMU events (24x7 & GPCI)"
default y
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 7bf506f6b8c8..3f3e3492e436 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
+obj-$(CONFIG_HTMDUMP) += htmdump.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
diff --git a/arch/powerpc/platforms/pseries/htmdump.c b/arch/powerpc/platforms/pseries/htmdump.c
new file mode 100644
index 000000000000..57fc1700f604
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/htmdump.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) IBM Corporation, 2024
+ */
+
+#define pr_fmt(fmt) "htmdump: " fmt
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/plpar_wrappers.h>
+
+static void *htm_buf;
+static u32 nodeindex;
+static u32 nodalchipindex;
+static u32 coreindexonchip;
+static u32 htmtype;
+static struct dentry *htmdump_debugfs_dir;
+
+static ssize_t htmdump_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ void *htm_buf = filp->private_data;
+ unsigned long page, read_size, available;
+ loff_t offset;
+ long rc;
+
+ page = ALIGN_DOWN(*ppos, PAGE_SIZE);
+ offset = (*ppos) % PAGE_SIZE;
+
+ rc = htm_get_dump_hardware(nodeindex, nodalchipindex, coreindexonchip,
+ htmtype, virt_to_phys(htm_buf), PAGE_SIZE, page);
+
+ switch (rc) {
+ case H_SUCCESS:
+ /* H_PARTIAL for the case where all available data can't be
+ * returned due to buffer size constraint.
+ */
+ case H_PARTIAL:
+ break;
+ /* H_NOT_AVAILABLE indicates reading from an offset outside the range,
+ * i.e. past end of file.
+ */
+ case H_NOT_AVAILABLE:
+ return 0;
+ case H_BUSY:
+ case H_LONG_BUSY_ORDER_1_MSEC:
+ case H_LONG_BUSY_ORDER_10_MSEC:
+ case H_LONG_BUSY_ORDER_100_MSEC:
+ case H_LONG_BUSY_ORDER_1_SEC:
+ case H_LONG_BUSY_ORDER_10_SEC:
+ case H_LONG_BUSY_ORDER_100_SEC:
+ return -EBUSY;
+ case H_PARAMETER:
+ case H_P2:
+ case H_P3:
+ case H_P4:
+ case H_P5:
+ case H_P6:
+ return -EINVAL;
+ case H_STATE:
+ return -EIO;
+ case H_AUTHORITY:
+ return -EPERM;
+ }
+
+ available = PAGE_SIZE;
+ read_size = min(count, available);
+ *ppos += read_size;
+ return simple_read_from_buffer(ubuf, count, &offset, htm_buf, available);
+}
+
+static const struct file_operations htmdump_fops = {
+ .llseek = NULL,
+ .read = htmdump_read,
+ .open = simple_open,
+};
+
+static int htmdump_init_debugfs(void)
+{
+ htm_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!htm_buf) {
+ pr_err("Failed to allocate htmdump buf\n");
+ return -ENOMEM;
+ }
+
+ htmdump_debugfs_dir = debugfs_create_dir("htmdump",
+ arch_debugfs_dir);
+
+ debugfs_create_u32("nodeindex", 0600,
+ htmdump_debugfs_dir, &nodeindex);
+ debugfs_create_u32("nodalchipindex", 0600,
+ htmdump_debugfs_dir, &nodalchipindex);
+ debugfs_create_u32("coreindexonchip", 0600,
+ htmdump_debugfs_dir, &coreindexonchip);
+ debugfs_create_u32("htmtype", 0600,
+ htmdump_debugfs_dir, &htmtype);
+ debugfs_create_file("trace", 0400, htmdump_debugfs_dir, htm_buf, &htmdump_fops);
+
+ return 0;
+}
+
+static int __init htmdump_init(void)
+{
+ if (htmdump_init_debugfs())
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit htmdump_exit(void)
+{
+ debugfs_remove_recursive(htmdump_debugfs_dir);
+ kfree(htm_buf);
+}
+
+module_init(htmdump_init);
+module_exit(htmdump_exit);
+MODULE_DESCRIPTION("PHYP Hardware Trace Macro (HTM) data dumper");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index ae6f7a235d8b..d6ebc19fb99c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -52,7 +52,8 @@ enum {
enum {
DDW_EXT_SIZE = 0,
DDW_EXT_RESET_DMA_WIN = 1,
- DDW_EXT_QUERY_OUT_SIZE = 2
+ DDW_EXT_QUERY_OUT_SIZE = 2,
+ DDW_EXT_LIMITED_ADDR_MODE = 3
};
static struct iommu_table *iommu_pseries_alloc_table(int node)
@@ -1284,17 +1285,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
static phys_addr_t ddw_memory_hotplug_max(void)
{
- resource_size_t max_addr = memory_hotplug_max();
- struct device_node *memory;
+ resource_size_t max_addr;
- for_each_node_by_type(memory, "memory") {
- struct resource res;
-
- if (of_address_to_resource(memory, 0, &res))
- continue;
-
- max_addr = max_t(resource_size_t, max_addr, res.end + 1);
- }
+#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+ max_addr = hot_add_drconf_memory_max();
+#else
+ max_addr = memblock_end_of_DRAM();
+#endif
return max_addr;
}
@@ -1331,6 +1328,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
ret);
}
+/*
+ * Platforms support placing PHB in limited address mode starting with LoPAR
+ * level 2.13 implement. In this mode, the DMA address returned by DDW is over
+ * 4GB but, less than 64-bits. This benefits IO adapters that don't support
+ * 64-bits for DMA addresses.
+ */
+static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn)
+{
+ int ret;
+ u32 cfg_addr, reset_dma_win, las_supported;
+ u64 buid;
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
+ ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
+ if (ret)
+ goto out;
+
+ ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported);
+
+ /* Limited Address Space extension available on the platform but DDW in
+ * limited addressing mode not supported
+ */
+ if (!ret && !las_supported)
+ ret = -EPROTO;
+
+ if (ret) {
+ dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret);
+ goto out;
+ }
+
+ dn = pci_device_to_OF_node(dev);
+ pdn = PCI_DN(dn);
+ buid = pdn->phb->buid;
+ cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
+
+ ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), 1);
+ if (ret)
+ dev_info(&dev->dev,
+ "ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ",
+ reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
+ ret);
+
+out:
+ return ret;
+}
+
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
static int iommu_get_page_shift(u32 query_page_size)
{
@@ -1398,7 +1443,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
*
* returns true if can map all pages (direct mapping), false otherwise..
*/
-static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask)
{
int len = 0, ret;
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
@@ -1417,6 +1462,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
bool pmem_present;
struct pci_dn *pci = PCI_DN(pdn);
struct property *default_win = NULL;
+ bool limited_addr_req = false, limited_addr_enabled = false;
+ int dev_max_ddw;
+ int ddw_sz;
dn = of_find_node_by_type(NULL, "ibm,pmemory");
pmem_present = dn != NULL;
@@ -1443,7 +1491,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
- * ibm,remove-pe-dma-window
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
@@ -1463,6 +1510,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret != 0)
goto out_failed;
+ /* DMA Limited Addressing required? This is when the driver has
+ * requested to create DDW but supports mask which is less than 64-bits
+ */
+ limited_addr_req = (dma_mask != DMA_BIT_MASK(64));
+
+ /* place the PHB in Limited Addressing mode */
+ if (limited_addr_req) {
+ if (limited_dma_window(dev, pdn))
+ goto out_failed;
+
+ /* PHB is in Limited address mode */
+ limited_addr_enabled = true;
+ }
+
/*
* If there is no window available, remove the default DMA window,
* if it's present. This will make all the resources available to the
@@ -1509,6 +1570,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_failed;
}
+ /* Maximum DMA window size that the device can address (in log2) */
+ dev_max_ddw = fls64(dma_mask);
+
+ /* If the device DMA mask is less than 64-bits, make sure the DMA window
+ * size is not bigger than what the device can access
+ */
+ ddw_sz = min(order_base_2(query.largest_available_block << page_shift),
+ dev_max_ddw);
+
/*
* The "ibm,pmemory" can appear anywhere in the address space.
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
@@ -1517,23 +1587,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
len = max_ram_len;
if (pmem_present) {
- if (query.largest_available_block >=
- (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
+ if (ddw_sz >= MAX_PHYSMEM_BITS)
len = MAX_PHYSMEM_BITS;
else
dev_info(&dev->dev, "Skipping ibm,pmemory");
}
/* check if the available block * number of ptes will map everything */
- if (query.largest_available_block < (1ULL << (len - page_shift))) {
+ if (ddw_sz < len) {
dev_dbg(&dev->dev,
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
1ULL << len,
query.largest_available_block,
1ULL << page_shift);
- len = order_base_2(query.largest_available_block << page_shift);
-
+ len = ddw_sz;
dynamic_mapping = true;
} else {
direct_mapping = !default_win_removed ||
@@ -1547,8 +1615,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
*/
if (default_win_removed && pmem_present && !direct_mapping) {
/* DDW is big enough to be split */
- if ((query.largest_available_block << page_shift) >=
- MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
+ if ((1ULL << ddw_sz) >=
+ MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
+
direct_mapping = true;
/* offset of the Dynamic part of DDW */
@@ -1559,8 +1628,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dynamic_mapping = true;
/* create max size DDW possible */
- len = order_base_2(query.largest_available_block
- << page_shift);
+ len = ddw_sz;
}
}
@@ -1600,7 +1668,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (direct_mapping) {
/* DDW maps the whole partition, so enable direct DMA mapping */
- ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
+ ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
win64->value, tce_setrange_multi_pSeriesLP_walk);
if (ret) {
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
@@ -1689,7 +1757,7 @@ out_remove_win:
__remove_dma_window(pdn, ddw_avail, create.liobn);
out_failed:
- if (default_win_removed)
+ if (default_win_removed || limited_addr_enabled)
reset_dma_window(dev, pdn);
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
@@ -1708,6 +1776,9 @@ out_unlock:
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
(1ULL << max_ram_len);
+ dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n",
+ limited_addr_req, limited_addr_enabled, direct_mapping);
+
return direct_mapping;
}
@@ -1833,8 +1904,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
- /* only attempt to use a new window if 64-bit DMA is requested */
- if (dma_mask < DMA_BIT_MASK(64))
+ /* For DDW, DMA mask should be more than 32-bits. For mask more then
+ * 32-bits but less then 64-bits, DMA addressing is supported in
+ * Limited Addressing mode.
+ */
+ if (dma_mask <= DMA_BIT_MASK(32))
return false;
dev_dbg(&pdev->dev, "node is %pOF\n", dn);
@@ -1847,7 +1921,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
*/
pdn = pci_dma_find(dn, NULL);
if (pdn && PCI_DN(pdn))
- return enable_ddw(pdev, pdn);
+ return enable_ddw(pdev, pdn, dma_mask);
return false;
}
@@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
struct memory_notify *arg = data;
int ret = 0;
+ /* This notifier can get called when onlining persistent memory as well.
+ * TCEs are not pre-mapped for persistent memory. Persistent memory will
+ * always be above ddw_memory_hotplug_max()
+ */
+
switch (action) {
case MEM_GOING_ONLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
- if (window->direct) {
+ if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
+ ddw_memory_hotplug_max()) {
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
@@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
case MEM_OFFLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
- if (window->direct) {
+ if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
+ ddw_memory_hotplug_max()) {
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
diff --git a/arch/powerpc/platforms/pseries/papr-vpd.c b/arch/powerpc/platforms/pseries/papr-vpd.c
index 1574176e3ffc..c86950d7105a 100644
--- a/arch/powerpc/platforms/pseries/papr-vpd.c
+++ b/arch/powerpc/platforms/pseries/papr-vpd.c
@@ -482,14 +482,13 @@ static long papr_vpd_create_handle(struct papr_location_code __user *ulc)
goto free_blob;
}
- file = anon_inode_getfile("[papr-vpd]", &papr_vpd_handle_ops,
- (void *)blob, O_RDONLY);
+ file = anon_inode_getfile_fmode("[papr-vpd]", &papr_vpd_handle_ops,
+ (void *)blob, O_RDONLY,
+ FMODE_LSEEK | FMODE_PREAD);
if (IS_ERR(file)) {
err = PTR_ERR(file);
goto put_fd;
}
-
- file->f_mode |= FMODE_LSEEK | FMODE_PREAD;
fd_install(fd, file);
return fd;
put_fd:
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 24a177d164f1..0834a9a12600 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
-obj-$(CONFIG_PPC_PMI) += pmi.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o fsl_mpic_err.o
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 70ce66eadff1..cb44a69958e7 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -11,107 +11,6 @@
#include <linux/of_address.h>
#include <asm/dcr.h>
-#ifdef CONFIG_PPC_DCR_MMIO
-static struct device_node *find_dcr_parent(struct device_node *node)
-{
- struct device_node *par, *tmp;
- const u32 *p;
-
- for (par = of_node_get(node); par;) {
- if (of_property_read_bool(par, "dcr-controller"))
- break;
- p = of_get_property(par, "dcr-parent", NULL);
- tmp = par;
- if (p == NULL)
- par = of_get_parent(par);
- else
- par = of_find_node_by_phandle(*p);
- of_node_put(tmp);
- }
- return par;
-}
-#endif
-
-#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
-
-bool dcr_map_ok_generic(dcr_host_t host)
-{
- if (host.type == DCR_HOST_NATIVE)
- return dcr_map_ok_native(host.host.native);
- else if (host.type == DCR_HOST_MMIO)
- return dcr_map_ok_mmio(host.host.mmio);
- else
- return false;
-}
-EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
-
-dcr_host_t dcr_map_generic(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int dcr_c)
-{
- dcr_host_t host;
- struct device_node *dp;
- const char *prop;
-
- host.type = DCR_HOST_INVALID;
-
- dp = find_dcr_parent(dev);
- if (dp == NULL)
- return host;
-
- prop = of_get_property(dp, "dcr-access-method", NULL);
-
- pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop);
-
- if (!strcmp(prop, "native")) {
- host.type = DCR_HOST_NATIVE;
- host.host.native = dcr_map_native(dev, dcr_n, dcr_c);
- } else if (!strcmp(prop, "mmio")) {
- host.type = DCR_HOST_MMIO;
- host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c);
- }
-
- of_node_put(dp);
- return host;
-}
-EXPORT_SYMBOL_GPL(dcr_map_generic);
-
-void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c)
-{
- if (host.type == DCR_HOST_NATIVE)
- dcr_unmap_native(host.host.native, dcr_c);
- else if (host.type == DCR_HOST_MMIO)
- dcr_unmap_mmio(host.host.mmio, dcr_c);
- else /* host.type == DCR_HOST_INVALID */
- WARN_ON(true);
-}
-EXPORT_SYMBOL_GPL(dcr_unmap_generic);
-
-u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n)
-{
- if (host.type == DCR_HOST_NATIVE)
- return dcr_read_native(host.host.native, dcr_n);
- else if (host.type == DCR_HOST_MMIO)
- return dcr_read_mmio(host.host.mmio, dcr_n);
- else /* host.type == DCR_HOST_INVALID */
- WARN_ON(true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(dcr_read_generic);
-
-void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value)
-{
- if (host.type == DCR_HOST_NATIVE)
- dcr_write_native(host.host.native, dcr_n, value);
- else if (host.type == DCR_HOST_MMIO)
- dcr_write_mmio(host.host.mmio, dcr_n, value);
- else /* host.type == DCR_HOST_INVALID */
- WARN_ON(true);
-}
-EXPORT_SYMBOL_GPL(dcr_write_generic);
-
-#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
-
unsigned int dcr_resource_start(const struct device_node *np,
unsigned int index)
{
@@ -137,86 +36,5 @@ unsigned int dcr_resource_len(const struct device_node *np, unsigned int index)
}
EXPORT_SYMBOL_GPL(dcr_resource_len);
-#ifdef CONFIG_PPC_DCR_MMIO
-
-static u64 of_translate_dcr_address(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int *out_stride)
-{
- struct device_node *dp;
- const u32 *p;
- unsigned int stride;
- u64 ret = OF_BAD_ADDR;
-
- dp = find_dcr_parent(dev);
- if (dp == NULL)
- return OF_BAD_ADDR;
-
- /* Stride is not properly defined yet, default to 0x10 for Axon */
- p = of_get_property(dp, "dcr-mmio-stride", NULL);
- stride = (p == NULL) ? 0x10 : *p;
-
- /* XXX FIXME: Which property name is to use of the 2 following ? */
- p = of_get_property(dp, "dcr-mmio-range", NULL);
- if (p == NULL)
- p = of_get_property(dp, "dcr-mmio-space", NULL);
- if (p == NULL)
- goto done;
-
- /* Maybe could do some better range checking here */
- ret = of_translate_address(dp, p);
- if (ret != OF_BAD_ADDR)
- ret += (u64)(stride) * (u64)dcr_n;
- if (out_stride)
- *out_stride = stride;
-
- done:
- of_node_put(dp);
- return ret;
-}
-
-dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int dcr_c)
-{
- dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
- u64 addr;
-
- pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n",
- dev, dcr_n, dcr_c);
-
- addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
- pr_debug("translates to addr: 0x%llx, stride: 0x%x\n",
- (unsigned long long) addr, ret.stride);
- if (addr == OF_BAD_ADDR)
- return ret;
- pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
- ret.token = ioremap(addr, dcr_c * ret.stride);
- if (ret.token == NULL)
- return ret;
- pr_debug("mapped at 0x%p -> base is 0x%p\n",
- ret.token, ret.token - dcr_n * ret.stride);
- ret.token -= dcr_n * ret.stride;
- return ret;
-}
-EXPORT_SYMBOL_GPL(dcr_map_mmio);
-
-void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c)
-{
- dcr_host_mmio_t h = host;
-
- if (h.token == NULL)
- return;
- h.token += host.base * h.stride;
- iounmap(h.token);
- h.token = NULL;
-}
-EXPORT_SYMBOL_GPL(dcr_unmap_mmio);
-
-#endif /* defined(CONFIG_PPC_DCR_MMIO) */
-
-#ifdef CONFIG_PPC_DCR_NATIVE
DEFINE_SPINLOCK(dcr_ind_lock);
EXPORT_SYMBOL_GPL(dcr_ind_lock);
-#endif /* defined(CONFIG_PPC_DCR_NATIVE) */
-
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 5f69e2d50f26..037b04bf9a9f 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -762,8 +762,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
- printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
- primary_ipic->regs);
+ pr_info("IPIC (%d IRQ sources) at MMIO %pa\n", NR_IPIC_INTS, &res.start);
return ipic;
}
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
deleted file mode 100644
index 2511e586fe31..000000000000
--- a/arch/powerpc/sysdev/pmi.c
+++ /dev/null
@@ -1,267 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
- *
- * PMI (Platform Management Interrupt) is a way to communicate
- * with the BMC (Baseboard Management Controller) via interrupts.
- * Unlike IPMI it is bidirectional and has a low latency.
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/completion.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/workqueue.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <asm/pmi.h>
-
-struct pmi_data {
- struct list_head handler;
- spinlock_t handler_spinlock;
- spinlock_t pmi_spinlock;
- struct mutex msg_mutex;
- pmi_message_t msg;
- struct completion *completion;
- struct platform_device *dev;
- int irq;
- u8 __iomem *pmi_reg;
- struct work_struct work;
-};
-
-static struct pmi_data *data;
-
-static irqreturn_t pmi_irq_handler(int irq, void *dev_id)
-{
- u8 type;
- int rc;
-
- spin_lock(&data->pmi_spinlock);
-
- type = ioread8(data->pmi_reg + PMI_READ_TYPE);
- pr_debug("pmi: got message of type %d\n", type);
-
- if (type & PMI_ACK && !data->completion) {
- printk(KERN_WARNING "pmi: got unexpected ACK message.\n");
- rc = -EIO;
- goto unlock;
- }
-
- if (data->completion && !(type & PMI_ACK)) {
- printk(KERN_WARNING "pmi: expected ACK, but got %d\n", type);
- rc = -EIO;
- goto unlock;
- }
-
- data->msg.type = type;
- data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0);
- data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1);
- data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2);
- rc = 0;
-unlock:
- spin_unlock(&data->pmi_spinlock);
-
- if (rc == -EIO) {
- rc = IRQ_HANDLED;
- goto out;
- }
-
- if (data->msg.type & PMI_ACK) {
- complete(data->completion);
- rc = IRQ_HANDLED;
- goto out;
- }
-
- schedule_work(&data->work);
-
- rc = IRQ_HANDLED;
-out:
- return rc;
-}
-
-
-static const struct of_device_id pmi_match[] = {
- { .type = "ibm,pmi", .name = "ibm,pmi" },
- { .type = "ibm,pmi" },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, pmi_match);
-
-static void pmi_notify_handlers(struct work_struct *work)
-{
- struct pmi_handler *handler;
-
- spin_lock(&data->handler_spinlock);
- list_for_each_entry(handler, &data->handler, node) {
- pr_debug("pmi: notifying handler %p\n", handler);
- if (handler->type == data->msg.type)
- handler->handle_pmi_message(data->msg);
- }
- spin_unlock(&data->handler_spinlock);
-}
-
-static int pmi_of_probe(struct platform_device *dev)
-{
- struct device_node *np = dev->dev.of_node;
- int rc;
-
- if (data) {
- printk(KERN_ERR "pmi: driver has already been initialized.\n");
- rc = -EBUSY;
- goto out;
- }
-
- data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL);
- if (!data) {
- printk(KERN_ERR "pmi: could not allocate memory.\n");
- rc = -ENOMEM;
- goto out;
- }
-
- data->pmi_reg = of_iomap(np, 0);
- if (!data->pmi_reg) {
- printk(KERN_ERR "pmi: invalid register address.\n");
- rc = -EFAULT;
- goto error_cleanup_data;
- }
-
- INIT_LIST_HEAD(&data->handler);
-
- mutex_init(&data->msg_mutex);
- spin_lock_init(&data->pmi_spinlock);
- spin_lock_init(&data->handler_spinlock);
-
- INIT_WORK(&data->work, pmi_notify_handlers);
-
- data->dev = dev;
-
- data->irq = irq_of_parse_and_map(np, 0);
- if (!data->irq) {
- printk(KERN_ERR "pmi: invalid interrupt.\n");
- rc = -EFAULT;
- goto error_cleanup_iomap;
- }
-
- rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL);
- if (rc) {
- printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n",
- data->irq, rc);
- goto error_cleanup_iomap;
- }
-
- printk(KERN_INFO "pmi: found pmi device at addr %p.\n", data->pmi_reg);
-
- goto out;
-
-error_cleanup_iomap:
- iounmap(data->pmi_reg);
-
-error_cleanup_data:
- kfree(data);
-
-out:
- return rc;
-}
-
-static void pmi_of_remove(struct platform_device *dev)
-{
- struct pmi_handler *handler, *tmp;
-
- free_irq(data->irq, NULL);
- iounmap(data->pmi_reg);
-
- spin_lock(&data->handler_spinlock);
-
- list_for_each_entry_safe(handler, tmp, &data->handler, node)
- list_del(&handler->node);
-
- spin_unlock(&data->handler_spinlock);
-
- kfree(data);
- data = NULL;
-}
-
-static struct platform_driver pmi_of_platform_driver = {
- .probe = pmi_of_probe,
- .remove = pmi_of_remove,
- .driver = {
- .name = "pmi",
- .of_match_table = pmi_match,
- },
-};
-module_platform_driver(pmi_of_platform_driver);
-
-int pmi_send_message(pmi_message_t msg)
-{
- unsigned long flags;
- DECLARE_COMPLETION_ONSTACK(completion);
-
- if (!data)
- return -ENODEV;
-
- mutex_lock(&data->msg_mutex);
-
- data->msg = msg;
- pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg);
-
- data->completion = &completion;
-
- spin_lock_irqsave(&data->pmi_spinlock, flags);
- iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0);
- iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1);
- iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2);
- iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE);
- spin_unlock_irqrestore(&data->pmi_spinlock, flags);
-
- pr_debug("pmi_send_message: wait for completion\n");
-
- wait_for_completion_interruptible_timeout(data->completion,
- PMI_TIMEOUT);
-
- data->completion = NULL;
-
- mutex_unlock(&data->msg_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pmi_send_message);
-
-int pmi_register_handler(struct pmi_handler *handler)
-{
- if (!data)
- return -ENODEV;
-
- spin_lock(&data->handler_spinlock);
- list_add_tail(&handler->node, &data->handler);
- spin_unlock(&data->handler_spinlock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pmi_register_handler);
-
-void pmi_unregister_handler(struct pmi_handler *handler)
-{
- if (!data)
- return;
-
- pr_debug("pmi: unregistering handler %p\n", handler);
-
- spin_lock(&data->handler_spinlock);
- list_del(&handler->node);
- spin_unlock(&data->handler_spinlock);
-}
-EXPORT_SYMBOL_GPL(pmi_unregister_handler);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
-MODULE_DESCRIPTION("IBM Platform Management Interrupt driver");
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 700b67476a7d..4e89158a577c 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -145,27 +145,6 @@ static void icp_native_cause_ipi(int cpu)
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-void icp_native_cause_ipi_rm(int cpu)
-{
- /*
- * Currently not used to send IPIs to another CPU
- * on the same core. Only caller is KVM real mode.
- * Need the physical address of the XICS to be
- * previously saved in kvm_hstate in the paca.
- */
- void __iomem *xics_phys;
-
- /*
- * Just like the cause_ipi functions, it is required to
- * include a full barrier before causing the IPI.
- */
- xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
- mb();
- __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
-}
-#endif
-
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index d778011060a8..d74b147126b7 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -16,7 +16,4 @@ ccflags-$(CONFIG_CC_IS_CLANG) += -Wframe-larger-than=4096
obj-y += xmon.o nonstdio.o spr_access.o xmon_bpts.o
-ifdef CONFIG_XMON_DISASSEMBLY
-obj-y += ppc-dis.o ppc-opc.o
-obj-$(CONFIG_SPU_BASE) += spu-dis.o spu-opc.o
-endif
+obj-$(CONFIG_XMON_DISASSEMBLY) += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/spu-dis.c b/arch/powerpc/xmon/spu-dis.c
deleted file mode 100644
index 4b0a4e640f08..000000000000
--- a/arch/powerpc/xmon/spu-dis.c
+++ /dev/null
@@ -1,237 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Disassemble SPU instructions
-
- Copyright 2006 Free Software Foundation, Inc.
-
- This file is part of GDB, GAS, and the GNU binutils.
-
- */
-
-#include <linux/string.h>
-#include "nonstdio.h"
-#include "ansidecl.h"
-#include "spu.h"
-#include "dis-asm.h"
-
-/* This file provides a disassembler function which uses
- the disassembler interface defined in dis-asm.h. */
-
-extern const struct spu_opcode spu_opcodes[];
-extern const int spu_num_opcodes;
-
-#define SPU_DISASM_TBL_SIZE (1 << 11)
-static const struct spu_opcode *spu_disassemble_table[SPU_DISASM_TBL_SIZE];
-
-static void
-init_spu_disassemble (void)
-{
- int i;
-
- /* If two instructions have the same opcode then we prefer the first
- * one. In most cases it is just an alternate mnemonic. */
- for (i = 0; i < spu_num_opcodes; i++)
- {
- int o = spu_opcodes[i].opcode;
- if (o >= SPU_DISASM_TBL_SIZE)
- continue; /* abort (); */
- if (spu_disassemble_table[o] == 0)
- spu_disassemble_table[o] = &spu_opcodes[i];
- }
-}
-
-/* Determine the instruction from the 10 least significant bits. */
-static const struct spu_opcode *
-get_index_for_opcode (unsigned int insn)
-{
- const struct spu_opcode *index;
- unsigned int opcode = insn >> (32-11);
-
- /* Init the table. This assumes that element 0/opcode 0 (currently
- * NOP) is always used */
- if (spu_disassemble_table[0] == 0)
- init_spu_disassemble ();
-
- if ((index = spu_disassemble_table[opcode & 0x780]) != 0
- && index->insn_type == RRR)
- return index;
-
- if ((index = spu_disassemble_table[opcode & 0x7f0]) != 0
- && (index->insn_type == RI18 || index->insn_type == LBT))
- return index;
-
- if ((index = spu_disassemble_table[opcode & 0x7f8]) != 0
- && index->insn_type == RI10)
- return index;
-
- if ((index = spu_disassemble_table[opcode & 0x7fc]) != 0
- && (index->insn_type == RI16))
- return index;
-
- if ((index = spu_disassemble_table[opcode & 0x7fe]) != 0
- && (index->insn_type == RI8))
- return index;
-
- if ((index = spu_disassemble_table[opcode & 0x7ff]) != 0)
- return index;
-
- return NULL;
-}
-
-/* Print a Spu instruction. */
-
-int
-print_insn_spu (unsigned long insn, unsigned long memaddr)
-{
- int value;
- int hex_value;
- const struct spu_opcode *index;
- enum spu_insns tag;
-
- index = get_index_for_opcode (insn);
-
- if (index == 0)
- {
- printf(".long 0x%lx", insn);
- }
- else
- {
- int i;
- int paren = 0;
- tag = (enum spu_insns)(index - spu_opcodes);
- printf("%s", index->mnemonic);
- if (tag == M_BI || tag == M_BISL || tag == M_IRET || tag == M_BISLED
- || tag == M_BIHNZ || tag == M_BIHZ || tag == M_BINZ || tag == M_BIZ
- || tag == M_SYNC || tag == M_HBR)
- {
- int fb = (insn >> (32-18)) & 0x7f;
- if (fb & 0x40)
- printf(tag == M_SYNC ? "c" : "p");
- if (fb & 0x20)
- printf("d");
- if (fb & 0x10)
- printf("e");
- }
- if (index->arg[0] != 0)
- printf("\t");
- hex_value = 0;
- for (i = 1; i <= index->arg[0]; i++)
- {
- int arg = index->arg[i];
- if (arg != A_P && !paren && i > 1)
- printf(",");
-
- switch (arg)
- {
- case A_T:
- printf("$%lu",
- DECODE_INSN_RT (insn));
- break;
- case A_A:
- printf("$%lu",
- DECODE_INSN_RA (insn));
- break;
- case A_B:
- printf("$%lu",
- DECODE_INSN_RB (insn));
- break;
- case A_C:
- printf("$%lu",
- DECODE_INSN_RC (insn));
- break;
- case A_S:
- printf("$sp%lu",
- DECODE_INSN_RA (insn));
- break;
- case A_H:
- printf("$ch%lu",
- DECODE_INSN_RA (insn));
- break;
- case A_P:
- paren++;
- printf("(");
- break;
- case A_U7A:
- printf("%lu",
- 173 - DECODE_INSN_U8 (insn));
- break;
- case A_U7B:
- printf("%lu",
- 155 - DECODE_INSN_U8 (insn));
- break;
- case A_S3:
- case A_S6:
- case A_S7:
- case A_S7N:
- case A_U3:
- case A_U5:
- case A_U6:
- case A_U7:
- hex_value = DECODE_INSN_I7 (insn);
- printf("%d", hex_value);
- break;
- case A_S11:
- print_address(memaddr + DECODE_INSN_I9a (insn) * 4);
- break;
- case A_S11I:
- print_address(memaddr + DECODE_INSN_I9b (insn) * 4);
- break;
- case A_S10:
- case A_S10B:
- hex_value = DECODE_INSN_I10 (insn);
- printf("%d", hex_value);
- break;
- case A_S14:
- hex_value = DECODE_INSN_I10 (insn) * 16;
- printf("%d", hex_value);
- break;
- case A_S16:
- hex_value = DECODE_INSN_I16 (insn);
- printf("%d", hex_value);
- break;
- case A_X16:
- hex_value = DECODE_INSN_U16 (insn);
- printf("%u", hex_value);
- break;
- case A_R18:
- value = DECODE_INSN_I16 (insn) * 4;
- if (value == 0)
- printf("%d", value);
- else
- {
- hex_value = memaddr + value;
- print_address(hex_value & 0x3ffff);
- }
- break;
- case A_S18:
- value = DECODE_INSN_U16 (insn) * 4;
- if (value == 0)
- printf("%d", value);
- else
- print_address(value);
- break;
- case A_U18:
- value = DECODE_INSN_U18 (insn);
- if (value == 0 || 1)
- {
- hex_value = value;
- printf("%u", value);
- }
- else
- print_address(value);
- break;
- case A_U14:
- hex_value = DECODE_INSN_U14 (insn);
- printf("%u", hex_value);
- break;
- }
- if (arg != A_P && paren)
- {
- printf(")");
- paren--;
- }
- }
- if (hex_value > 16)
- printf("\t# %x", hex_value);
- }
- return 4;
-}
diff --git a/arch/powerpc/xmon/spu-insns.h b/arch/powerpc/xmon/spu-insns.h
deleted file mode 100644
index 7e1126a19909..000000000000
--- a/arch/powerpc/xmon/spu-insns.h
+++ /dev/null
@@ -1,399 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* SPU ELF support for BFD.
-
- Copyright 2006 Free Software Foundation, Inc.
-
- This file is part of BFD, the Binary File Descriptor library.
-
- */
-
-/* SPU Opcode Table
-
--=-=-= FORMAT =-=-=-
-
- +----+-------+-------+-------+-------+ +------------+-------+-------+-------+
-RRR | op | RC | RB | RA | RT | RI7 | op | I7 | RA | RT |
- +----+-------+-------+-------+-------+ +------------+-------+-------+-------+
- 0 3 1 1 2 3 0 1 1 2 3
- 0 7 4 1 0 7 4 1
-
- +-----------+--------+-------+-------+ +---------+----------+-------+-------+
-RI8 | op | I8 | RA | RT | RI10 | op | I10 | RA | RT |
- +-----------+--------+-------+-------+ +---------+----------+-------+-------+
- 0 9 1 2 3 0 7 1 2 3
- 7 4 1 7 4 1
-
- +----------+-----------------+-------+ +--------+-------------------+-------+
-RI16 | op | I16 | RT | RI18 | op | I18 | RT |
- +----------+-----------------+-------+ +--------+-------------------+-------+
- 0 8 2 3 0 6 2 3
- 4 1 4 1
-
- +------------+-------+-------+-------+ +-------+--+-----------------+-------+
-RR | op | RB | RA | RT | LBT | op |RO| I16 | RO |
- +------------+-------+-------+-------+ +-------+--+-----------------+-------+
- 0 1 1 2 3 0 6 8 2 3
- 0 7 4 1 4 1
-
- +------------+----+--+-------+-------+
- LBTI | op | // |RO| RA | RO |
- +------------+----+--+-------+-------+
- 0 1 1 1 2 3
- 0 5 7 4 1
-
--=-=-= OPCODE =-=-=-
-
-OPCODE field specifies the most significant 11bit of the instruction. Some formats don't have 11bits for opcode field, and in this
-case, bit field other than op are defined as 0s. For example, opcode of fma instruction which is RRR format is defined as 0x700,
-since 0x700 -> 11'b11100000000, this means opcode is 4'b1110, and other 7bits are defined as 7'b0000000.
-
--=-=-= ASM_FORMAT =-=-=-
-
-RRR category RI7 category
- ASM_RRR mnemonic RC, RA, RB, RT ASM_RI4 mnemonic RT, RA, I4
- ASM_RI7 mnemonic RT, RA, I7
-
-RI8 category RI10 category
- ASM_RUI8 mnemonic RT, RA, UI8 ASM_AI10 mnemonic RA, I10
- ASM_RI10 mnemonic RT, RA, R10
- ASM_RI10IDX mnemonic RT, I10(RA)
-
-RI16 category RI18 category
- ASM_I16W mnemonic I16W ASM_RI18 mnemonic RT, I18
- ASM_RI16 mnemonic RT, I16
- ASM_RI16W mnemonic RT, I16W
-
-RR category LBT category
- ASM_MFSPR mnemonic RT, SA ASM_LBT mnemonic brinst, brtarg
- ASM_MTSPR mnemonic SA, RT
- ASM_NOOP mnemonic LBTI category
- ASM_RA mnemonic RA ASM_LBTI mnemonic brinst, RA
- ASM_RAB mnemonic RA, RB
- ASM_RDCH mnemonic RT, CA
- ASM_RR mnemonic RT, RA, RB
- ASM_RT mnemonic RT
- ASM_RTA mnemonic RT, RA
- ASM_WRCH mnemonic CA, RT
-
-Note that RRR instructions have the names for RC and RT reversed from
-what's in the ISA, in order to put RT in the same position it appears
-for other formats.
-
--=-=-= DEPENDENCY =-=-=-
-
-DEPENDENCY filed consists of 5 digits. This represents which register is used as source and which register is used as target.
-The first(most significant) digit is always 0. Then it is followd by RC, RB, RA and RT digits.
-If the digit is 0, this means the corresponding register is not used in the instruction.
-If the digit is 1, this means the corresponding register is used as a source in the instruction.
-If the digit is 2, this means the corresponding register is used as a target in the instruction.
-If the digit is 3, this means the corresponding register is used as both source and target in the instruction.
-For example, fms instruction has 00113 as the DEPENDENCY field. This means RC is not used in this operation, RB and RA are
-used as sources and RT is the target.
-
--=-=-= PIPE =-=-=-
-
-This field shows which execution pipe is used for the instruction
-
-pipe0 execution pipelines:
- FP6 SP floating pipeline
- FP7 integer operations executed in SP floating pipeline
- FPD DP floating pipeline
- FX2 FXU pipeline
- FX3 Rotate/Shift pipeline
- FXB Byte pipeline
- NOP No pipeline
-
-pipe1 execution pipelines:
- BR Branch pipeline
- LNOP No pipeline
- LS Load/Store pipeline
- SHUF Shuffle pipeline
- SPR SPR/CH pipeline
-
-*/
-
-#define _A0() {0}
-#define _A1(a) {1,a}
-#define _A2(a,b) {2,a,b}
-#define _A3(a,b,c) {3,a,b,c}
-#define _A4(a,b,c,d) {4,a,b,c,d}
-
-/* TAG FORMAT OPCODE MNEMONIC ASM_FORMAT DEPENDENCY PIPE COMMENT */
-/* 0[RC][RB][RA][RT] */
-/* 1:src, 2:target */
-
-APUOP(M_BR, RI16, 0x190, "br", _A1(A_R18), 00000, BR) /* BRel IP<-IP+I16 */
-APUOP(M_BRSL, RI16, 0x198, "brsl", _A2(A_T,A_R18), 00002, BR) /* BRelSetLink RT,IP<-IP,IP+I16 */
-APUOP(M_BRA, RI16, 0x180, "bra", _A1(A_S18), 00000, BR) /* BRAbs IP<-I16 */
-APUOP(M_BRASL, RI16, 0x188, "brasl", _A2(A_T,A_S18), 00002, BR) /* BRAbsSetLink RT,IP<-IP,I16 */
-APUOP(M_FSMBI, RI16, 0x194, "fsmbi", _A2(A_T,A_X16), 00002, SHUF) /* FormSelMask%I RT<-fsm(I16) */
-APUOP(M_LQA, RI16, 0x184, "lqa", _A2(A_T,A_S18), 00002, LS) /* LoadQAbs RT<-M[I16] */
-APUOP(M_LQR, RI16, 0x19C, "lqr", _A2(A_T,A_R18), 00002, LS) /* LoadQRel RT<-M[IP+I16] */
-APUOP(M_STOP, RR, 0x000, "stop", _A0(), 00000, BR) /* STOP stop */
-APUOP(M_STOP2, RR, 0x000, "stop", _A1(A_U14), 00000, BR) /* STOP stop */
-APUOP(M_STOPD, RR, 0x140, "stopd", _A3(A_T,A_A,A_B), 00111, BR) /* STOPD stop (with register dependencies) */
-APUOP(M_LNOP, RR, 0x001, "lnop", _A0(), 00000, LNOP) /* LNOP no_operation */
-APUOP(M_SYNC, RR, 0x002, "sync", _A0(), 00000, BR) /* SYNC flush_pipe */
-APUOP(M_DSYNC, RR, 0x003, "dsync", _A0(), 00000, BR) /* DSYNC flush_store_queue */
-APUOP(M_MFSPR, RR, 0x00c, "mfspr", _A2(A_T,A_S), 00002, SPR) /* MFSPR RT<-SA */
-APUOP(M_RDCH, RR, 0x00d, "rdch", _A2(A_T,A_H), 00002, SPR) /* ReaDCHannel RT<-CA:data */
-APUOP(M_RCHCNT, RR, 0x00f, "rchcnt", _A2(A_T,A_H), 00002, SPR) /* ReaDCHanCouNT RT<-CA:count */
-APUOP(M_HBRA, LBT, 0x080, "hbra", _A2(A_S11,A_S18), 00000, LS) /* HBRA BTB[B9]<-M[I16] */
-APUOP(M_HBRR, LBT, 0x090, "hbrr", _A2(A_S11,A_R18), 00000, LS) /* HBRR BTB[B9]<-M[IP+I16] */
-APUOP(M_BRZ, RI16, 0x100, "brz", _A2(A_T,A_R18), 00001, BR) /* BRZ IP<-IP+I16_if(RT) */
-APUOP(M_BRNZ, RI16, 0x108, "brnz", _A2(A_T,A_R18), 00001, BR) /* BRNZ IP<-IP+I16_if(RT) */
-APUOP(M_BRHZ, RI16, 0x110, "brhz", _A2(A_T,A_R18), 00001, BR) /* BRHZ IP<-IP+I16_if(RT) */
-APUOP(M_BRHNZ, RI16, 0x118, "brhnz", _A2(A_T,A_R18), 00001, BR) /* BRHNZ IP<-IP+I16_if(RT) */
-APUOP(M_STQA, RI16, 0x104, "stqa", _A2(A_T,A_S18), 00001, LS) /* SToreQAbs M[I16]<-RT */
-APUOP(M_STQR, RI16, 0x11C, "stqr", _A2(A_T,A_R18), 00001, LS) /* SToreQRel M[IP+I16]<-RT */
-APUOP(M_MTSPR, RR, 0x10c, "mtspr", _A2(A_S,A_T), 00001, SPR) /* MTSPR SA<-RT */
-APUOP(M_WRCH, RR, 0x10d, "wrch", _A2(A_H,A_T), 00001, SPR) /* ChanWRite CA<-RT */
-APUOP(M_LQD, RI10, 0x1a0, "lqd", _A4(A_T,A_S14,A_P,A_A), 00012, LS) /* LoadQDisp RT<-M[Ra+I10] */
-APUOP(M_BI, RR, 0x1a8, "bi", _A1(A_A), 00010, BR) /* BI IP<-RA */
-APUOP(M_BISL, RR, 0x1a9, "bisl", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
-APUOP(M_IRET, RR, 0x1aa, "iret", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
-APUOP(M_IRET2, RR, 0x1aa, "iret", _A0(), 00010, BR) /* IRET IP<-SRR0 */
-APUOP(M_BISLED, RR, 0x1ab, "bisled", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
-APUOP(M_HBR, LBTI, 0x1ac, "hbr", _A2(A_S11I,A_A), 00010, LS) /* HBR BTB[B9]<-M[Ra] */
-APUOP(M_FREST, RR, 0x1b8, "frest", _A2(A_T,A_A), 00012, SHUF) /* FREST RT<-recip(RA) */
-APUOP(M_FRSQEST, RR, 0x1b9, "frsqest", _A2(A_T,A_A), 00012, SHUF) /* FRSQEST RT<-rsqrt(RA) */
-APUOP(M_FSM, RR, 0x1b4, "fsm", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
-APUOP(M_FSMH, RR, 0x1b5, "fsmh", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
-APUOP(M_FSMB, RR, 0x1b6, "fsmb", _A2(A_T,A_A), 00012, SHUF) /* FormSelMask% RT<-expand(Ra) */
-APUOP(M_GB, RR, 0x1b0, "gb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
-APUOP(M_GBH, RR, 0x1b1, "gbh", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
-APUOP(M_GBB, RR, 0x1b2, "gbb", _A2(A_T,A_A), 00012, SHUF) /* GatherBits% RT<-gather(RA) */
-APUOP(M_CBD, RI7, 0x1f4, "cbd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
-APUOP(M_CHD, RI7, 0x1f5, "chd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
-APUOP(M_CWD, RI7, 0x1f6, "cwd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
-APUOP(M_CDD, RI7, 0x1f7, "cdd", _A4(A_T,A_U7,A_P,A_A), 00012, SHUF) /* genCtl%%insD RT<-sta(Ra+I4,siz) */
-APUOP(M_ROTQBII, RI7, 0x1f8, "rotqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* ROTQBII RT<-RA<<<I7 */
-APUOP(M_ROTQBYI, RI7, 0x1fc, "rotqbyi", _A3(A_T,A_A,A_S7N), 00012, SHUF) /* ROTQBYI RT<-RA<<<(I7*8) */
-APUOP(M_ROTQMBII, RI7, 0x1f9, "rotqmbii", _A3(A_T,A_A,A_S3), 00012, SHUF) /* ROTQMBII RT<-RA<<I7 */
-APUOP(M_ROTQMBYI, RI7, 0x1fd, "rotqmbyi", _A3(A_T,A_A,A_S6), 00012, SHUF) /* ROTQMBYI RT<-RA<<I7 */
-APUOP(M_SHLQBII, RI7, 0x1fb, "shlqbii", _A3(A_T,A_A,A_U3), 00012, SHUF) /* SHLQBII RT<-RA<<I7 */
-APUOP(M_SHLQBYI, RI7, 0x1ff, "shlqbyi", _A3(A_T,A_A,A_U5), 00012, SHUF) /* SHLQBYI RT<-RA<<I7 */
-APUOP(M_STQD, RI10, 0x120, "stqd", _A4(A_T,A_S14,A_P,A_A), 00011, LS) /* SToreQDisp M[Ra+I10]<-RT */
-APUOP(M_BIHNZ, RR, 0x12b, "bihnz", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
-APUOP(M_BIHZ, RR, 0x12a, "bihz", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOP(M_BINZ, RR, 0x129, "binz", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
-APUOP(M_BIZ, RR, 0x128, "biz", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-APUOP(M_CBX, RR, 0x1d4, "cbx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
-APUOP(M_CHX, RR, 0x1d5, "chx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
-APUOP(M_CWX, RR, 0x1d6, "cwx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
-APUOP(M_CDX, RR, 0x1d7, "cdx", _A3(A_T,A_A,A_B), 00112, SHUF) /* genCtl%%insX RT<-sta(Ra+Rb,siz) */
-APUOP(M_LQX, RR, 0x1c4, "lqx", _A3(A_T,A_A,A_B), 00112, LS) /* LoadQindeX RT<-M[Ra+Rb] */
-APUOP(M_ROTQBI, RR, 0x1d8, "rotqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBI RT<-RA<<<Rb */
-APUOP(M_ROTQMBI, RR, 0x1d9, "rotqmbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBI RT<-RA<<Rb */
-APUOP(M_SHLQBI, RR, 0x1db, "shlqbi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBI RT<-RA<<Rb */
-APUOP(M_ROTQBY, RR, 0x1dc, "rotqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBY RT<-RA<<<(Rb*8) */
-APUOP(M_ROTQMBY, RR, 0x1dd, "rotqmby", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBY RT<-RA<<Rb */
-APUOP(M_SHLQBY, RR, 0x1df, "shlqby", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBY RT<-RA<<Rb */
-APUOP(M_ROTQBYBI, RR, 0x1cc, "rotqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQBYBI RT<-RA<<Rb */
-APUOP(M_ROTQMBYBI, RR, 0x1cd, "rotqmbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* ROTQMBYBI RT<-RA<<Rb */
-APUOP(M_SHLQBYBI, RR, 0x1cf, "shlqbybi", _A3(A_T,A_A,A_B), 00112, SHUF) /* SHLQBYBI RT<-RA<<Rb */
-APUOP(M_STQX, RR, 0x144, "stqx", _A3(A_T,A_A,A_B), 00111, LS) /* SToreQindeX M[Ra+Rb]<-RT */
-APUOP(M_SHUFB, RRR, 0x580, "shufb", _A4(A_C,A_A,A_B,A_T), 02111, SHUF) /* SHUFfleBytes RC<-f(RA,RB,RT) */
-APUOP(M_IL, RI16, 0x204, "il", _A2(A_T,A_S16), 00002, FX2) /* ImmLoad RT<-sxt(I16) */
-APUOP(M_ILH, RI16, 0x20c, "ilh", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadH RT<-I16 */
-APUOP(M_ILHU, RI16, 0x208, "ilhu", _A2(A_T,A_X16), 00002, FX2) /* ImmLoadHUpper RT<-I16<<16 */
-APUOP(M_ILA, RI18, 0x210, "ila", _A2(A_T,A_U18), 00002, FX2) /* ImmLoadAddr RT<-zxt(I18) */
-APUOP(M_NOP, RR, 0x201, "nop", _A1(A_T), 00000, NOP) /* XNOP no_operation */
-APUOP(M_NOP2, RR, 0x201, "nop", _A0(), 00000, NOP) /* XNOP no_operation */
-APUOP(M_IOHL, RI16, 0x304, "iohl", _A2(A_T,A_X16), 00003, FX2) /* AddImmeXt RT<-RT+sxt(I16) */
-APUOP(M_ANDBI, RI10, 0x0b0, "andbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* AND%I RT<-RA&I10 */
-APUOP(M_ANDHI, RI10, 0x0a8, "andhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */
-APUOP(M_ANDI, RI10, 0x0a0, "andi", _A3(A_T,A_A,A_S10), 00012, FX2) /* AND%I RT<-RA&I10 */
-APUOP(M_ORBI, RI10, 0x030, "orbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* OR%I RT<-RA|I10 */
-APUOP(M_ORHI, RI10, 0x028, "orhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */
-APUOP(M_ORI, RI10, 0x020, "ori", _A3(A_T,A_A,A_S10), 00012, FX2) /* OR%I RT<-RA|I10 */
-APUOP(M_ORX, RR, 0x1f0, "orx", _A2(A_T,A_A), 00012, BR) /* ORX RT<-RA.w0|RA.w1|RA.w2|RA.w3 */
-APUOP(M_XORBI, RI10, 0x230, "xorbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* XOR%I RT<-RA^I10 */
-APUOP(M_XORHI, RI10, 0x228, "xorhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */
-APUOP(M_XORI, RI10, 0x220, "xori", _A3(A_T,A_A,A_S10), 00012, FX2) /* XOR%I RT<-RA^I10 */
-APUOP(M_AHI, RI10, 0x0e8, "ahi", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */
-APUOP(M_AI, RI10, 0x0e0, "ai", _A3(A_T,A_A,A_S10), 00012, FX2) /* Add%Immed RT<-RA+I10 */
-APUOP(M_SFHI, RI10, 0x068, "sfhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */
-APUOP(M_SFI, RI10, 0x060, "sfi", _A3(A_T,A_A,A_S10), 00012, FX2) /* SubFrom%Imm RT<-I10-RA */
-APUOP(M_CGTBI, RI10, 0x270, "cgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CGT%I RT<-(RA>I10) */
-APUOP(M_CGTHI, RI10, 0x268, "cgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */
-APUOP(M_CGTI, RI10, 0x260, "cgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CGT%I RT<-(RA>I10) */
-APUOP(M_CLGTBI, RI10, 0x2f0, "clgtbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
-APUOP(M_CLGTHI, RI10, 0x2e8, "clgthi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
-APUOP(M_CLGTI, RI10, 0x2e0, "clgti", _A3(A_T,A_A,A_S10), 00012, FX2) /* CLGT%I RT<-(RA>I10) */
-APUOP(M_CEQBI, RI10, 0x3f0, "ceqbi", _A3(A_T,A_A,A_S10B), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
-APUOP(M_CEQHI, RI10, 0x3e8, "ceqhi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
-APUOP(M_CEQI, RI10, 0x3e0, "ceqi", _A3(A_T,A_A,A_S10), 00012, FX2) /* CEQ%I RT<-(RA=I10) */
-APUOP(M_HGTI, RI10, 0x278, "hgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */
-APUOP(M_HGTI2, RI10, 0x278, "hgti", _A2(A_A,A_S10), 00010, FX2) /* HaltGTI halt_if(RA>I10) */
-APUOP(M_HLGTI, RI10, 0x2f8, "hlgti", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */
-APUOP(M_HLGTI2, RI10, 0x2f8, "hlgti", _A2(A_A,A_S10), 00010, FX2) /* HaltLGTI halt_if(RA>I10) */
-APUOP(M_HEQI, RI10, 0x3f8, "heqi", _A3(A_T,A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */
-APUOP(M_HEQI2, RI10, 0x3f8, "heqi", _A2(A_A,A_S10), 00010, FX2) /* HaltEQImm halt_if(RA=I10) */
-APUOP(M_MPYI, RI10, 0x3a0, "mpyi", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYI RT<-RA*I10 */
-APUOP(M_MPYUI, RI10, 0x3a8, "mpyui", _A3(A_T,A_A,A_S10), 00012, FP7) /* MPYUI RT<-RA*I10 */
-APUOP(M_CFLTS, RI8, 0x3b0, "cflts", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTS RT<-int(RA,I8) */
-APUOP(M_CFLTU, RI8, 0x3b2, "cfltu", _A3(A_T,A_A,A_U7A), 00012, FP7) /* CFLTU RT<-int(RA,I8) */
-APUOP(M_CSFLT, RI8, 0x3b4, "csflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CSFLT RT<-flt(RA,I8) */
-APUOP(M_CUFLT, RI8, 0x3b6, "cuflt", _A3(A_T,A_A,A_U7B), 00012, FP7) /* CUFLT RT<-flt(RA,I8) */
-APUOP(M_FESD, RR, 0x3b8, "fesd", _A2(A_T,A_A), 00012, FPD) /* FESD RT<-double(RA) */
-APUOP(M_FRDS, RR, 0x3b9, "frds", _A2(A_T,A_A), 00012, FPD) /* FRDS RT<-single(RA) */
-APUOP(M_FSCRRD, RR, 0x398, "fscrrd", _A1(A_T), 00002, FPD) /* FSCRRD RT<-FP_status */
-APUOP(M_FSCRWR, RR, 0x3ba, "fscrwr", _A2(A_T,A_A), 00010, FP7) /* FSCRWR FP_status<-RA */
-APUOP(M_FSCRWR2, RR, 0x3ba, "fscrwr", _A1(A_A), 00010, FP7) /* FSCRWR FP_status<-RA */
-APUOP(M_CLZ, RR, 0x2a5, "clz", _A2(A_T,A_A), 00012, FX2) /* CLZ RT<-clz(RA) */
-APUOP(M_CNTB, RR, 0x2b4, "cntb", _A2(A_T,A_A), 00012, FXB) /* CNT RT<-pop(RA) */
-APUOP(M_XSBH, RR, 0x2b6, "xsbh", _A2(A_T,A_A), 00012, FX2) /* eXtSignBtoH RT<-sign_ext(RA) */
-APUOP(M_XSHW, RR, 0x2ae, "xshw", _A2(A_T,A_A), 00012, FX2) /* eXtSignHtoW RT<-sign_ext(RA) */
-APUOP(M_XSWD, RR, 0x2a6, "xswd", _A2(A_T,A_A), 00012, FX2) /* eXtSignWtoD RT<-sign_ext(RA) */
-APUOP(M_ROTI, RI7, 0x078, "roti", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */
-APUOP(M_ROTMI, RI7, 0x079, "rotmi", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROT%MI RT<-RA<<I7 */
-APUOP(M_ROTMAI, RI7, 0x07a, "rotmai", _A3(A_T,A_A,A_S7), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */
-APUOP(M_SHLI, RI7, 0x07b, "shli", _A3(A_T,A_A,A_U6), 00012, FX3) /* SHL%I RT<-RA<<I7 */
-APUOP(M_ROTHI, RI7, 0x07c, "rothi", _A3(A_T,A_A,A_S7N), 00012, FX3) /* ROT%I RT<-RA<<<I7 */
-APUOP(M_ROTHMI, RI7, 0x07d, "rothmi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROT%MI RT<-RA<<I7 */
-APUOP(M_ROTMAHI, RI7, 0x07e, "rotmahi", _A3(A_T,A_A,A_S6), 00012, FX3) /* ROTMA%I RT<-RA<<I7 */
-APUOP(M_SHLHI, RI7, 0x07f, "shlhi", _A3(A_T,A_A,A_U5), 00012, FX3) /* SHL%I RT<-RA<<I7 */
-APUOP(M_A, RR, 0x0c0, "a", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */
-APUOP(M_AH, RR, 0x0c8, "ah", _A3(A_T,A_A,A_B), 00112, FX2) /* Add% RT<-RA+RB */
-APUOP(M_SF, RR, 0x040, "sf", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */
-APUOP(M_SFH, RR, 0x048, "sfh", _A3(A_T,A_A,A_B), 00112, FX2) /* SubFrom% RT<-RB-RA */
-APUOP(M_CGT, RR, 0x240, "cgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
-APUOP(M_CGTB, RR, 0x250, "cgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
-APUOP(M_CGTH, RR, 0x248, "cgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CGT% RT<-(RA>RB) */
-APUOP(M_CLGT, RR, 0x2c0, "clgt", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
-APUOP(M_CLGTB, RR, 0x2d0, "clgtb", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
-APUOP(M_CLGTH, RR, 0x2c8, "clgth", _A3(A_T,A_A,A_B), 00112, FX2) /* CLGT% RT<-(RA>RB) */
-APUOP(M_CEQ, RR, 0x3c0, "ceq", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
-APUOP(M_CEQB, RR, 0x3d0, "ceqb", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
-APUOP(M_CEQH, RR, 0x3c8, "ceqh", _A3(A_T,A_A,A_B), 00112, FX2) /* CEQ% RT<-(RA=RB) */
-APUOP(M_HGT, RR, 0x258, "hgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */
-APUOP(M_HGT2, RR, 0x258, "hgt", _A2(A_A,A_B), 00110, FX2) /* HaltGT halt_if(RA>RB) */
-APUOP(M_HLGT, RR, 0x2d8, "hlgt", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */
-APUOP(M_HLGT2, RR, 0x2d8, "hlgt", _A2(A_A,A_B), 00110, FX2) /* HaltLGT halt_if(RA>RB) */
-APUOP(M_HEQ, RR, 0x3d8, "heq", _A3(A_T,A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */
-APUOP(M_HEQ2, RR, 0x3d8, "heq", _A2(A_A,A_B), 00110, FX2) /* HaltEQ halt_if(RA=RB) */
-APUOP(M_FCEQ, RR, 0x3c2, "fceq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCEQ RT<-(RA=RB) */
-APUOP(M_FCMEQ, RR, 0x3ca, "fcmeq", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMEQ RT<-(|RA|=|RB|) */
-APUOP(M_FCGT, RR, 0x2c2, "fcgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCGT RT<-(RA<RB) */
-APUOP(M_FCMGT, RR, 0x2ca, "fcmgt", _A3(A_T,A_A,A_B), 00112, FX2) /* FCMGT RT<-(|RA|<|RB|) */
-APUOP(M_AND, RR, 0x0c1, "and", _A3(A_T,A_A,A_B), 00112, FX2) /* AND RT<-RA&RB */
-APUOP(M_NAND, RR, 0x0c9, "nand", _A3(A_T,A_A,A_B), 00112, FX2) /* NAND RT<-!(RA&RB) */
-APUOP(M_OR, RR, 0x041, "or", _A3(A_T,A_A,A_B), 00112, FX2) /* OR RT<-RA|RB */
-APUOP(M_NOR, RR, 0x049, "nor", _A3(A_T,A_A,A_B), 00112, FX2) /* NOR RT<-!(RA&RB) */
-APUOP(M_XOR, RR, 0x241, "xor", _A3(A_T,A_A,A_B), 00112, FX2) /* XOR RT<-RA^RB */
-APUOP(M_EQV, RR, 0x249, "eqv", _A3(A_T,A_A,A_B), 00112, FX2) /* EQuiValent RT<-!(RA^RB) */
-APUOP(M_ANDC, RR, 0x2c1, "andc", _A3(A_T,A_A,A_B), 00112, FX2) /* ANDComplement RT<-RA&!RB */
-APUOP(M_ORC, RR, 0x2c9, "orc", _A3(A_T,A_A,A_B), 00112, FX2) /* ORComplement RT<-RA|!RB */
-APUOP(M_ABSDB, RR, 0x053, "absdb", _A3(A_T,A_A,A_B), 00112, FXB) /* ABSoluteDiff RT<-|RA-RB| */
-APUOP(M_AVGB, RR, 0x0d3, "avgb", _A3(A_T,A_A,A_B), 00112, FXB) /* AVG% RT<-(RA+RB+1)/2 */
-APUOP(M_SUMB, RR, 0x253, "sumb", _A3(A_T,A_A,A_B), 00112, FXB) /* SUM% RT<-f(RA,RB) */
-APUOP(M_DFA, RR, 0x2cc, "dfa", _A3(A_T,A_A,A_B), 00112, FPD) /* DFAdd RT<-RA+RB */
-APUOP(M_DFM, RR, 0x2ce, "dfm", _A3(A_T,A_A,A_B), 00112, FPD) /* DFMul RT<-RA*RB */
-APUOP(M_DFS, RR, 0x2cd, "dfs", _A3(A_T,A_A,A_B), 00112, FPD) /* DFSub RT<-RA-RB */
-APUOP(M_FA, RR, 0x2c4, "fa", _A3(A_T,A_A,A_B), 00112, FP6) /* FAdd RT<-RA+RB */
-APUOP(M_FM, RR, 0x2c6, "fm", _A3(A_T,A_A,A_B), 00112, FP6) /* FMul RT<-RA*RB */
-APUOP(M_FS, RR, 0x2c5, "fs", _A3(A_T,A_A,A_B), 00112, FP6) /* FSub RT<-RA-RB */
-APUOP(M_MPY, RR, 0x3c4, "mpy", _A3(A_T,A_A,A_B), 00112, FP7) /* MPY RT<-RA*RB */
-APUOP(M_MPYH, RR, 0x3c5, "mpyh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYH RT<-(RAh*RB)<<16 */
-APUOP(M_MPYHH, RR, 0x3c6, "mpyhh", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHH RT<-RAh*RBh */
-APUOP(M_MPYHHU, RR, 0x3ce, "mpyhhu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYHHU RT<-RAh*RBh */
-APUOP(M_MPYS, RR, 0x3c7, "mpys", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYS RT<-(RA*RB)>>16 */
-APUOP(M_MPYU, RR, 0x3cc, "mpyu", _A3(A_T,A_A,A_B), 00112, FP7) /* MPYU RT<-RA*RB */
-APUOP(M_FI, RR, 0x3d4, "fi", _A3(A_T,A_A,A_B), 00112, FP7) /* FInterpolate RT<-f(RA,RB) */
-APUOP(M_ROT, RR, 0x058, "rot", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */
-APUOP(M_ROTM, RR, 0x059, "rotm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */
-APUOP(M_ROTMA, RR, 0x05a, "rotma", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */
-APUOP(M_SHL, RR, 0x05b, "shl", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */
-APUOP(M_ROTH, RR, 0x05c, "roth", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT% RT<-RA<<<RB */
-APUOP(M_ROTHM, RR, 0x05d, "rothm", _A3(A_T,A_A,A_B), 00112, FX3) /* ROT%M RT<-RA<<Rb */
-APUOP(M_ROTMAH, RR, 0x05e, "rotmah", _A3(A_T,A_A,A_B), 00112, FX3) /* ROTMA% RT<-RA<<Rb */
-APUOP(M_SHLH, RR, 0x05f, "shlh", _A3(A_T,A_A,A_B), 00112, FX3) /* SHL% RT<-RA<<Rb */
-APUOP(M_MPYHHA, RR, 0x346, "mpyhha", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHA RT<-RAh*RBh+RT */
-APUOP(M_MPYHHAU, RR, 0x34e, "mpyhhau", _A3(A_T,A_A,A_B), 00113, FP7) /* MPYHHAU RT<-RAh*RBh+RT */
-APUOP(M_DFMA, RR, 0x35c, "dfma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMAdd RT<-RT+RA*RB */
-APUOP(M_DFMS, RR, 0x35d, "dfms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFMSub RT<-RA*RB-RT */
-APUOP(M_DFNMS, RR, 0x35e, "dfnms", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMSub RT<-RT-RA*RB */
-APUOP(M_DFNMA, RR, 0x35f, "dfnma", _A3(A_T,A_A,A_B), 00113, FPD) /* DFNMAdd RT<-(-RT)-RA*RB */
-APUOP(M_FMA, RRR, 0x700, "fma", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMAdd RC<-RT+RA*RB */
-APUOP(M_FMS, RRR, 0x780, "fms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FMSub RC<-RA*RB-RT */
-APUOP(M_FNMS, RRR, 0x680, "fnms", _A4(A_C,A_A,A_B,A_T), 02111, FP6) /* FNMSub RC<-RT-RA*RB */
-APUOP(M_MPYA, RRR, 0x600, "mpya", _A4(A_C,A_A,A_B,A_T), 02111, FP7) /* MPYA RC<-RA*RB+RT */
-APUOP(M_SELB, RRR, 0x400, "selb", _A4(A_C,A_A,A_B,A_T), 02111, FX2) /* SELectBits RC<-RA&RT|RB&!RT */
-/* for system function call, this uses op-code of mtspr */
-APUOP(M_SYSCALL, RI7, 0x10c, "syscall", _A3(A_T,A_A,A_S7N), 00002, SPR) /* System Call */
-/*
-pseudo instruction:
-system call
-value of I9 operation
-0 halt
-1 rt[0] = open(MEM[ra[0]], ra[1])
-2 rt[0] = close(ra[0])
-3 rt[0] = read(ra[0], MEM[ra[1]], ra[2])
-4 rt[0] = write(ra[0], MEM[ra[1]], ra[2])
-5 printf(MEM[ra[0]], ra[1], ra[2], ra[3])
-42 rt[0] = clock()
-52 rt[0] = lseek(ra0, ra1, ra2)
-
-*/
-
-
-/* new multiprecision add/sub */
-APUOP(M_ADDX, RR, 0x340, "addx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */
-APUOP(M_CG, RR, 0x0c2, "cg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */
-APUOP(M_CGX, RR, 0x342, "cgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
-APUOP(M_SFX, RR, 0x341, "sfx", _A3(A_T,A_A,A_B), 00113, FX2) /* Add_eXtended RT<-RA+RB+RT */
-APUOP(M_BG, RR, 0x042, "bg", _A3(A_T,A_A,A_B), 00112, FX2) /* CarryGenerate RT<-cout(RA+RB) */
-APUOP(M_BGX, RR, 0x343, "bgx", _A3(A_T,A_A,A_B), 00113, FX2) /* CarryGen_eXtd RT<-cout(RA+RB+RT) */
-
-/*
-
-The following ops are a subset of above except with feature bits set.
-Feature bits are bits 11-17 of the instruction:
-
- 11 - C & P feature bit
- 12 - disable interrupts
- 13 - enable interrupts
-
-*/
-APUOPFB(M_BID, RR, 0x1a8, 0x20, "bid", _A1(A_A), 00010, BR) /* BI IP<-RA */
-APUOPFB(M_BIE, RR, 0x1a8, 0x10, "bie", _A1(A_A), 00010, BR) /* BI IP<-RA */
-APUOPFB(M_BISLD, RR, 0x1a9, 0x20, "bisld", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
-APUOPFB(M_BISLE, RR, 0x1a9, 0x10, "bisle", _A2(A_T,A_A), 00012, BR) /* BISL RT,IP<-IP,RA */
-APUOPFB(M_IRETD, RR, 0x1aa, 0x20, "iretd", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
-APUOPFB(M_IRETD2, RR, 0x1aa, 0x20, "iretd", _A0(), 00010, BR) /* IRET IP<-SRR0 */
-APUOPFB(M_IRETE, RR, 0x1aa, 0x10, "irete", _A1(A_A), 00010, BR) /* IRET IP<-SRR0 */
-APUOPFB(M_IRETE2, RR, 0x1aa, 0x10, "irete", _A0(), 00010, BR) /* IRET IP<-SRR0 */
-APUOPFB(M_BISLEDD, RR, 0x1ab, 0x20, "bisledd", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
-APUOPFB(M_BISLEDE, RR, 0x1ab, 0x10, "bislede", _A2(A_T,A_A), 00012, BR) /* BISLED RT,IP<-IP,RA_if(ext) */
-APUOPFB(M_BIHNZD, RR, 0x12b, 0x20, "bihnzd", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
-APUOPFB(M_BIHNZE, RR, 0x12b, 0x10, "bihnze", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
-APUOPFB(M_BIHZD, RR, 0x12a, 0x20, "bihzd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOPFB(M_BIHZE, RR, 0x12a, 0x10, "bihze", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOPFB(M_BINZD, RR, 0x129, 0x20, "binzd", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
-APUOPFB(M_BINZE, RR, 0x129, 0x10, "binze", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
-APUOPFB(M_BIZD, RR, 0x128, 0x20, "bizd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-APUOPFB(M_BIZE, RR, 0x128, 0x10, "bize", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-APUOPFB(M_SYNCC, RR, 0x002, 0x40, "syncc", _A0(), 00000, BR) /* SYNCC flush_pipe */
-APUOPFB(M_HBRP, LBTI, 0x1ac, 0x40, "hbrp", _A0(), 00010, LS) /* HBR BTB[B9]<-M[Ra] */
-
-/* Synonyms required by the AS manual. */
-APUOP(M_LR, RI10, 0x020, "lr", _A2(A_T,A_A), 00012, FX2) /* OR%I RT<-RA|I10 */
-APUOP(M_BIHT, RR, 0x12b, "biht", _A2(A_T,A_A), 00011, BR) /* BIHNZ IP<-RA_if(RT) */
-APUOP(M_BIHF, RR, 0x12a, "bihf", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOP(M_BIT, RR, 0x129, "bit", _A2(A_T,A_A), 00011, BR) /* BINZ IP<-RA_if(RT) */
-APUOP(M_BIF, RR, 0x128, "bif", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-APUOPFB(M_BIHTD, RR, 0x12b, 0x20, "bihtd", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */
-APUOPFB(M_BIHTE, RR, 0x12b, 0x10, "bihte", _A2(A_T,A_A), 00011, BR) /* BIHNF IP<-RA_if(RT) */
-APUOPFB(M_BIHFD, RR, 0x12a, 0x20, "bihfd", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOPFB(M_BIHFE, RR, 0x12a, 0x10, "bihfe", _A2(A_T,A_A), 00011, BR) /* BIHZ IP<-RA_if(RT) */
-APUOPFB(M_BITD, RR, 0x129, 0x20, "bitd", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */
-APUOPFB(M_BITE, RR, 0x129, 0x10, "bite", _A2(A_T,A_A), 00011, BR) /* BINF IP<-RA_if(RT) */
-APUOPFB(M_BIFD, RR, 0x128, 0x20, "bifd", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-APUOPFB(M_BIFE, RR, 0x128, 0x10, "bife", _A2(A_T,A_A), 00011, BR) /* BIZ IP<-RA_if(RT) */
-
-#undef _A0
-#undef _A1
-#undef _A2
-#undef _A3
-#undef _A4
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
deleted file mode 100644
index 6d8197cc540b..000000000000
--- a/arch/powerpc/xmon/spu-opc.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* SPU opcode list
-
- Copyright 2006 Free Software Foundation, Inc.
-
- This file is part of GDB, GAS, and the GNU binutils.
-
- */
-
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include "spu.h"
-
-/* This file holds the Spu opcode table */
-
-
-/*
- Example contents of spu-insn.h
- id_tag mode mode type opcode mnemonic asmtype dependency FPU L/S? branch? instruction
- QUAD WORD (0,RC,RB,RA,RT) latency
- APUOP(M_LQD, 1, 0, RI9, 0x1f8, "lqd", ASM_RI9IDX, 00012, FXU, 1, 0) Load Quadword d-form
- */
-
-const struct spu_opcode spu_opcodes[] = {
-#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
- { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
-#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
- { MACFORMAT, OPCODE, MNEMONIC, ASMFORMAT },
-#include "spu-insns.h"
-#undef APUOP
-#undef APUOPFB
-};
-
-const int spu_num_opcodes = ARRAY_SIZE(spu_opcodes);
diff --git a/arch/powerpc/xmon/spu.h b/arch/powerpc/xmon/spu.h
deleted file mode 100644
index 2d13b1a5fa87..000000000000
--- a/arch/powerpc/xmon/spu.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* SPU ELF support for BFD.
-
- Copyright 2006 Free Software Foundation, Inc.
-
- This file is part of GDB, GAS, and the GNU binutils.
-
- */
-
-
-/* These two enums are from rel_apu/common/spu_asm_format.h */
-/* definition of instruction format */
-typedef enum {
- RRR,
- RI18,
- RI16,
- RI10,
- RI8,
- RI7,
- RR,
- LBT,
- LBTI,
- IDATA,
- UNKNOWN_IFORMAT
-} spu_iformat;
-
-/* These values describe assembly instruction arguments. They indicate
- * how to encode, range checking and which relocation to use. */
-typedef enum {
- A_T, /* register at pos 0 */
- A_A, /* register at pos 7 */
- A_B, /* register at pos 14 */
- A_C, /* register at pos 21 */
- A_S, /* special purpose register at pos 7 */
- A_H, /* channel register at pos 7 */
- A_P, /* parenthesis, this has to separate regs from immediates */
- A_S3,
- A_S6,
- A_S7N,
- A_S7,
- A_U7A,
- A_U7B,
- A_S10B,
- A_S10,
- A_S11,
- A_S11I,
- A_S14,
- A_S16,
- A_S18,
- A_R18,
- A_U3,
- A_U5,
- A_U6,
- A_U7,
- A_U14,
- A_X16,
- A_U18,
- A_MAX
-} spu_aformat;
-
-enum spu_insns {
-#define APUOP(TAG,MACFORMAT,OPCODE,MNEMONIC,ASMFORMAT,DEP,PIPE) \
- TAG,
-#define APUOPFB(TAG,MACFORMAT,OPCODE,FB,MNEMONIC,ASMFORMAT,DEP,PIPE) \
- TAG,
-#include "spu-insns.h"
-#undef APUOP
-#undef APUOPFB
- M_SPU_MAX
-};
-
-struct spu_opcode
-{
- spu_iformat insn_type;
- unsigned int opcode;
- char *mnemonic;
- int arg[5];
-};
-
-#define SIGNED_EXTRACT(insn,size,pos) (((int)((insn) << (32-size-pos))) >> (32-size))
-#define UNSIGNED_EXTRACT(insn,size,pos) (((insn) >> pos) & ((1 << size)-1))
-
-#define DECODE_INSN_RT(insn) (insn & 0x7f)
-#define DECODE_INSN_RA(insn) ((insn >> 7) & 0x7f)
-#define DECODE_INSN_RB(insn) ((insn >> 14) & 0x7f)
-#define DECODE_INSN_RC(insn) ((insn >> 21) & 0x7f)
-
-#define DECODE_INSN_I10(insn) SIGNED_EXTRACT(insn,10,14)
-#define DECODE_INSN_U10(insn) UNSIGNED_EXTRACT(insn,10,14)
-
-/* For branching, immediate loads, hbr and lqa/stqa. */
-#define DECODE_INSN_I16(insn) SIGNED_EXTRACT(insn,16,7)
-#define DECODE_INSN_U16(insn) UNSIGNED_EXTRACT(insn,16,7)
-
-/* for stop */
-#define DECODE_INSN_U14(insn) UNSIGNED_EXTRACT(insn,14,0)
-
-/* For ila */
-#define DECODE_INSN_I18(insn) SIGNED_EXTRACT(insn,18,7)
-#define DECODE_INSN_U18(insn) UNSIGNED_EXTRACT(insn,18,7)
-
-/* For rotate and shift and generate control mask */
-#define DECODE_INSN_I7(insn) SIGNED_EXTRACT(insn,7,14)
-#define DECODE_INSN_U7(insn) UNSIGNED_EXTRACT(insn,7,14)
-
-/* For float <-> int conversion */
-#define DECODE_INSN_I8(insn) SIGNED_EXTRACT(insn,8,14)
-#define DECODE_INSN_U8(insn) UNSIGNED_EXTRACT(insn,8,14)
-
-/* For hbr */
-#define DECODE_INSN_I9a(insn) ((SIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
-#define DECODE_INSN_I9b(insn) ((SIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
-#define DECODE_INSN_U9a(insn) ((UNSIGNED_EXTRACT(insn,2,23) << 7) | UNSIGNED_EXTRACT(insn,7,0))
-#define DECODE_INSN_U9b(insn) ((UNSIGNED_EXTRACT(insn,2,14) << 7) | UNSIGNED_EXTRACT(insn,7,0))
-
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 268859e4df87..88abffa8b54c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -41,8 +41,6 @@
#include <asm/rtas.h>
#include <asm/sstep.h>
#include <asm/irq_regs.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
#include <asm/setjmp.h>
#include <asm/reg.h>
#include <asm/debug.h>
@@ -188,8 +186,6 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after);
static const char *getvecname(unsigned long vec);
-static int do_spu_cmd(void);
-
#ifdef CONFIG_44x
static void dump_tlb_44x(void);
#endif
@@ -272,13 +268,6 @@ Commands:\n\
P list processes/tasks\n\
r print registers\n\
s single step\n"
-#ifdef CONFIG_SPU_BASE
-" ss stop execution on all spus\n\
- sr restore execution on stopped spus\n\
- sf # dump spu fields for spu # (in hex)\n\
- sd # dump spu local store for spu # (in hex)\n\
- sdi # disassemble spu local store for spu # (in hex)\n"
-#endif
" S print special registers\n\
Sa print all SPRs\n\
Sr # read SPR #\n\
@@ -1112,8 +1101,6 @@ cmds(struct pt_regs *excp)
cacheflush();
break;
case 's':
- if (do_spu_cmd() == 0)
- break;
if (do_step(excp))
return cmd;
break;
@@ -1271,11 +1258,7 @@ static int xmon_batch_next_cpu(void)
{
unsigned long cpu;
- while (!cpumask_empty(&xmon_batch_cpus)) {
- cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
- xmon_batch_start_cpu, true);
- if (cpu >= nr_cpu_ids)
- break;
+ for_each_cpu_wrap(cpu, &xmon_batch_cpus, xmon_batch_start_cpu) {
if (xmon_batch_start_cpu == -1)
xmon_batch_start_cpu = cpu;
if (xmon_switch_cpu(cpu))
@@ -4107,263 +4090,3 @@ void __init xmon_setup(void)
if (xmon_early)
debugger(NULL);
}
-
-#ifdef CONFIG_SPU_BASE
-
-struct spu_info {
- struct spu *spu;
- u64 saved_mfc_sr1_RW;
- u32 saved_spu_runcntl_RW;
- unsigned long dump_addr;
- u8 stopped_ok;
-};
-
-#define XMON_NUM_SPUS 16 /* Enough for current hardware */
-
-static struct spu_info spu_info[XMON_NUM_SPUS];
-
-void __init xmon_register_spus(struct list_head *list)
-{
- struct spu *spu;
-
- list_for_each_entry(spu, list, full_list) {
- if (spu->number >= XMON_NUM_SPUS) {
- WARN_ON(1);
- continue;
- }
-
- spu_info[spu->number].spu = spu;
- spu_info[spu->number].stopped_ok = 0;
- spu_info[spu->number].dump_addr = (unsigned long)
- spu_info[spu->number].spu->local_store;
- }
-}
-
-static void stop_spus(void)
-{
- struct spu *spu;
- volatile int i;
- u64 tmp;
-
- for (i = 0; i < XMON_NUM_SPUS; i++) {
- if (!spu_info[i].spu)
- continue;
-
- if (setjmp(bus_error_jmp) == 0) {
- catch_memory_errors = 1;
- sync();
-
- spu = spu_info[i].spu;
-
- spu_info[i].saved_spu_runcntl_RW =
- in_be32(&spu->problem->spu_runcntl_RW);
-
- tmp = spu_mfc_sr1_get(spu);
- spu_info[i].saved_mfc_sr1_RW = tmp;
-
- tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
- spu_mfc_sr1_set(spu, tmp);
-
- sync();
- __delay(200);
-
- spu_info[i].stopped_ok = 1;
-
- printf("Stopped spu %.2d (was %s)\n", i,
- spu_info[i].saved_spu_runcntl_RW ?
- "running" : "stopped");
- } else {
- catch_memory_errors = 0;
- printf("*** Error stopping spu %.2d\n", i);
- }
- catch_memory_errors = 0;
- }
-}
-
-static void restart_spus(void)
-{
- struct spu *spu;
- volatile int i;
-
- for (i = 0; i < XMON_NUM_SPUS; i++) {
- if (!spu_info[i].spu)
- continue;
-
- if (!spu_info[i].stopped_ok) {
- printf("*** Error, spu %d was not successfully stopped"
- ", not restarting\n", i);
- continue;
- }
-
- if (setjmp(bus_error_jmp) == 0) {
- catch_memory_errors = 1;
- sync();
-
- spu = spu_info[i].spu;
- spu_mfc_sr1_set(spu, spu_info[i].saved_mfc_sr1_RW);
- out_be32(&spu->problem->spu_runcntl_RW,
- spu_info[i].saved_spu_runcntl_RW);
-
- sync();
- __delay(200);
-
- printf("Restarted spu %.2d\n", i);
- } else {
- catch_memory_errors = 0;
- printf("*** Error restarting spu %.2d\n", i);
- }
- catch_memory_errors = 0;
- }
-}
-
-#define DUMP_WIDTH 23
-#define DUMP_VALUE(format, field, value) \
-do { \
- if (setjmp(bus_error_jmp) == 0) { \
- catch_memory_errors = 1; \
- sync(); \
- printf(" %-*s = "format"\n", DUMP_WIDTH, \
- #field, value); \
- sync(); \
- __delay(200); \
- } else { \
- catch_memory_errors = 0; \
- printf(" %-*s = *** Error reading field.\n", \
- DUMP_WIDTH, #field); \
- } \
- catch_memory_errors = 0; \
-} while (0)
-
-#define DUMP_FIELD(obj, format, field) \
- DUMP_VALUE(format, field, obj->field)
-
-static void dump_spu_fields(struct spu *spu)
-{
- printf("Dumping spu fields at address %p:\n", spu);
-
- DUMP_FIELD(spu, "0x%x", number);
- DUMP_FIELD(spu, "%s", name);
- DUMP_FIELD(spu, "0x%lx", local_store_phys);
- DUMP_FIELD(spu, "0x%p", local_store);
- DUMP_FIELD(spu, "0x%lx", ls_size);
- DUMP_FIELD(spu, "0x%x", node);
- DUMP_FIELD(spu, "0x%lx", flags);
- DUMP_FIELD(spu, "%llu", class_0_pending);
- DUMP_FIELD(spu, "0x%llx", class_0_dar);
- DUMP_FIELD(spu, "0x%llx", class_1_dar);
- DUMP_FIELD(spu, "0x%llx", class_1_dsisr);
- DUMP_FIELD(spu, "0x%x", irqs[0]);
- DUMP_FIELD(spu, "0x%x", irqs[1]);
- DUMP_FIELD(spu, "0x%x", irqs[2]);
- DUMP_FIELD(spu, "0x%x", slb_replace);
- DUMP_FIELD(spu, "%d", pid);
- DUMP_FIELD(spu, "0x%p", mm);
- DUMP_FIELD(spu, "0x%p", ctx);
- DUMP_FIELD(spu, "0x%p", rq);
- DUMP_FIELD(spu, "0x%llx", timestamp);
- DUMP_FIELD(spu, "0x%lx", problem_phys);
- DUMP_FIELD(spu, "0x%p", problem);
- DUMP_VALUE("0x%x", problem->spu_runcntl_RW,
- in_be32(&spu->problem->spu_runcntl_RW));
- DUMP_VALUE("0x%x", problem->spu_status_R,
- in_be32(&spu->problem->spu_status_R));
- DUMP_VALUE("0x%x", problem->spu_npc_RW,
- in_be32(&spu->problem->spu_npc_RW));
- DUMP_FIELD(spu, "0x%p", priv2);
- DUMP_FIELD(spu, "0x%p", pdata);
-}
-
-static int spu_inst_dump(unsigned long adr, long count, int praddr)
-{
- return generic_inst_dump(adr, count, praddr, print_insn_spu);
-}
-
-static void dump_spu_ls(unsigned long num, int subcmd)
-{
- unsigned long offset, addr, ls_addr;
-
- if (setjmp(bus_error_jmp) == 0) {
- catch_memory_errors = 1;
- sync();
- ls_addr = (unsigned long)spu_info[num].spu->local_store;
- sync();
- __delay(200);
- } else {
- catch_memory_errors = 0;
- printf("*** Error: accessing spu info for spu %ld\n", num);
- return;
- }
- catch_memory_errors = 0;
-
- if (scanhex(&offset))
- addr = ls_addr + offset;
- else
- addr = spu_info[num].dump_addr;
-
- if (addr >= ls_addr + LS_SIZE) {
- printf("*** Error: address outside of local store\n");
- return;
- }
-
- switch (subcmd) {
- case 'i':
- addr += spu_inst_dump(addr, 16, 1);
- last_cmd = "sdi\n";
- break;
- default:
- prdump(addr, 64);
- addr += 64;
- last_cmd = "sd\n";
- break;
- }
-
- spu_info[num].dump_addr = addr;
-}
-
-static int do_spu_cmd(void)
-{
- static unsigned long num = 0;
- int cmd, subcmd = 0;
-
- cmd = inchar();
- switch (cmd) {
- case 's':
- stop_spus();
- break;
- case 'r':
- restart_spus();
- break;
- case 'd':
- subcmd = inchar();
- if (isxdigit(subcmd) || subcmd == '\n')
- termch = subcmd;
- fallthrough;
- case 'f':
- scanhex(&num);
- if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
- printf("*** Error: invalid spu number\n");
- return 0;
- }
-
- switch (cmd) {
- case 'f':
- dump_spu_fields(spu_info[num].spu);
- break;
- default:
- dump_spu_ls(num, subcmd);
- break;
- }
-
- break;
- default:
- return -1;
- }
-
- return 0;
-}
-#else /* ! CONFIG_SPU_BASE */
-static int do_spu_cmd(void)
-{
- return -1;
-}
-#endif
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 7612c52e9b1e..10116f68569d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -25,6 +25,8 @@ config RISCV
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_CRC32 if RISCV_ISA_ZBC
+ select ARCH_HAS_CRC64 if 64BIT && RISCV_ISA_ZBC
+ select ARCH_HAS_CRC_T10DIF if RISCV_ISA_ZBC
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -53,7 +55,7 @@ config RISCV
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN
- select ARCH_HAS_VDSO_TIME_DATA
+ select ARCH_HAS_VDSO_ARCH_DATA if GENERIC_VDSO_DATA_STORE
select ARCH_KEEP_MEMBLOCK if ACPI
select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if 64BIT && MMU
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
@@ -111,11 +113,13 @@ config RISCV
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_LIB_DEVMEM_IS_ALLOWED
+ select GENERIC_PENDING_IRQ if SMP
select GENERIC_PCI_IOMAP
select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select GENERIC_VDSO_DATA_STORE if MMU
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
select HARDIRQS_SW_RESEND
select HAS_IOPORT if MMU
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 1916cf7ba450..17606940bb52 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -26,6 +26,7 @@ config ARCH_SOPHGO
config ARCH_SPACEMIT
bool "SpacemiT SoCs"
+ select PINCTRL
help
This enables support for SpacemiT SoC platform hardware.
diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
index 1069134f2e12..a6dda55a2d1d 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit-fabric.dtsi
@@ -32,8 +32,9 @@
#interrupt-cells = <0x1>;
#size-cells = <0x2>;
device_type = "pci";
- reg = <0x30 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
- reg-names = "cfg", "apb";
+ reg = <0x30 0x0 0x0 0x8000000>, <0x0 0x43008000 0x0 0x2000>,
+ <0x0 0x4300a000 0x0 0x2000>;
+ reg-names = "cfg", "bridge", "ctrl";
bus-range = <0x0 0x7f>;
interrupt-parent = <&plic>;
interrupts = <119>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi
index 8230f06ddf48..36a9860f31da 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-m100pfs-fabric.dtsi
@@ -20,8 +20,9 @@
#interrupt-cells = <0x1>;
#size-cells = <0x2>;
device_type = "pci";
- reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
- reg-names = "cfg", "apb";
+ reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43008000 0x0 0x2000>,
+ <0x0 0x4300a000 0x0 0x2000>;
+ reg-names = "cfg", "bridge", "ctrl";
bus-range = <0x0 0x7f>;
interrupt-parent = <&plic>;
interrupts = <119>;
diff --git a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
index 9a56de7b91d6..a57dca891965 100644
--- a/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs-polarberry-fabric.dtsi
@@ -20,8 +20,9 @@
#interrupt-cells = <0x1>;
#size-cells = <0x2>;
device_type = "pci";
- reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43000000 0x0 0x10000>;
- reg-names = "cfg", "apb";
+ reg = <0x20 0x0 0x0 0x8000000>, <0x0 0x43008000 0x0 0x2000>,
+ <0x0 0x4300a000 0x0 0x2000>;
+ reg-names = "cfg", "bridge", "ctrl";
bus-range = <0x0 0x7f>;
interrupt-parent = <&plic>;
interrupts = <119>;
diff --git a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
index be596d01ff8d..34645a5f6038 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
+++ b/arch/riscv/boot/dts/sophgo/sg2042-milkv-pioneer.dts
@@ -73,6 +73,13 @@
};
/ {
+ pwmfan: pwm-fan {
+ compatible = "pwm-fan";
+ cooling-levels = <103 128 179 230 255>;
+ pwms = <&pwm 0 40000 0>;
+ #cooling-cells = <2>;
+ };
+
thermal-zones {
soc-thermal {
polling-delay-passive = <1000>;
@@ -104,6 +111,28 @@
type = "hot";
};
};
+
+ cooling-maps {
+ map0 {
+ trip = <&soc_active1>;
+ cooling-device = <&pwmfan 0 1>;
+ };
+
+ map1 {
+ trip = <&soc_active2>;
+ cooling-device = <&pwmfan 1 2>;
+ };
+
+ map2 {
+ trip = <&soc_active3>;
+ cooling-device = <&pwmfan 2 3>;
+ };
+
+ map3 {
+ trip = <&soc_hot>;
+ cooling-device = <&pwmfan 3 4>;
+ };
+ };
};
board-thermal {
@@ -118,6 +147,13 @@
type = "active";
};
};
+
+ cooling-maps {
+ map4 {
+ trip = <&board_active>;
+ cooling-device = <&pwmfan 3 4>;
+ };
+ };
};
};
};
diff --git a/arch/riscv/boot/dts/sophgo/sg2042.dtsi b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
index e62ac51ac55a..aa8b7fcc125d 100644
--- a/arch/riscv/boot/dts/sophgo/sg2042.dtsi
+++ b/arch/riscv/boot/dts/sophgo/sg2042.dtsi
@@ -165,6 +165,15 @@
};
};
+ pwm: pwm@703000c000 {
+ compatible = "sophgo,sg2042-pwm";
+ reg = <0x70 0x3000c000 0x0 0x20>;
+ #pwm-cells = <3>;
+ clocks = <&clkgen GATE_CLK_APB_PWM>;
+ clock-names = "apb";
+ resets = <&rstgen RST_PWM>;
+ };
+
pllclk: clock-controller@70300100c0 {
compatible = "sophgo,sg2042-pll";
reg = <0x70 0x300100c0 0x0 0x40>;
@@ -173,6 +182,16 @@
#clock-cells = <1>;
};
+ msi: msi-controller@7030010304 {
+ compatible = "sophgo,sg2042-msi";
+ reg = <0x70 0x30010304 0x0 0x4>,
+ <0x70 0x30010300 0x0 0x4>;
+ reg-names = "clr", "doorbell";
+ msi-controller;
+ #msi-cells = <0>;
+ msi-ranges = <&intc 64 IRQ_TYPE_LEVEL_HIGH 32>;
+ };
+
rpgate: clock-controller@7030010368 {
compatible = "sophgo,sg2042-rpgate";
reg = <0x70 0x30010368 0x0 0x98>;
diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile
index ac617319a574..92e13ce1c16d 100644
--- a/arch/riscv/boot/dts/spacemit/Makefile
+++ b/arch/riscv/boot/dts/spacemit/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_SPACEMIT) += k1-bananapi-f3.dtb
+dtb-$(CONFIG_ARCH_SPACEMIT) += k1-milkv-jupiter.dtb
diff --git a/arch/riscv/boot/dts/spacemit/k1-milkv-jupiter.dts b/arch/riscv/boot/dts/spacemit/k1-milkv-jupiter.dts
new file mode 100644
index 000000000000..448319214104
--- /dev/null
+++ b/arch/riscv/boot/dts/spacemit/k1-milkv-jupiter.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright (C) 2024 Yangyu Chen <cyy@cyyself.name>
+ * Copyright (C) 2025 Javier Martinez Canillas <javierm@redhat.com>
+ */
+
+#include "k1.dtsi"
+#include "k1-pinctrl.dtsi"
+
+/ {
+ model = "Milk-V Jupiter (K1)";
+ compatible = "milkv,jupiter", "spacemit,k1";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_2_cfg>;
+ status = "okay";
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
index 48fb5091b817..c2f70f5e2918 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-common.dtsi
@@ -233,7 +233,7 @@
regulator-always-on;
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1540000>;
- regulator-name = "vdd-cpu";
+ regulator-name = "vdd_cpu";
};
emmc_vdd: aldo4 {
@@ -350,12 +350,6 @@
&spi0 {
pinctrl-names = "default";
pinctrl-0 = <&spi0_pins>;
-
- spi_dev0: spi@0 {
- compatible = "rohm,dh2228fv";
- reg = <0>;
- spi-max-frequency = <10000000>;
- };
};
&syscrg {
diff --git a/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts b/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
index 30b0715196b6..8d9ce8b69a71 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-deepcomputing-fml13v01.dts
@@ -11,6 +11,40 @@
compatible = "deepcomputing,fml13v01", "starfive,jh7110";
};
+&pcie1 {
+ perst-gpios = <&sysgpio 21 GPIO_ACTIVE_LOW>;
+ phys = <&pciephy1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie1_pins>;
+ status = "okay";
+};
+
+&sysgpio {
+ pcie1_pins: pcie1-0 {
+ clkreq-pins {
+ pinmux = <GPIOMUX(29, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-down;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ wake-pins {
+ pinmux = <GPIOMUX(28, GPOUT_HIGH,
+ GPOEN_DISABLE,
+ GPI_NONE)>;
+ bias-pull-up;
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+ };
+};
+
&usb0 {
dr_mode = "host";
status = "okay";
diff --git a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
index b764d4d92fd9..31e825be2065 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
+++ b/arch/riscv/boot/dts/starfive/jh7110-pine64-star64.dts
@@ -100,3 +100,8 @@
pinctrl-0 = <&usb0_pins>;
status = "okay";
};
+
+&usb_cdns3 {
+ phys = <&usbphy0>, <&pciephy0>;
+ phy-names = "cdns3,usb2-phy", "cdns3,usb3-phy";
+};
diff --git a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
index 256de17f5261..ae49c908e7fb 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
+++ b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h
@@ -89,7 +89,7 @@
#define GPOUT_SYS_SDIO1_DATA1 59
#define GPOUT_SYS_SDIO1_DATA2 60
#define GPOUT_SYS_SDIO1_DATA3 61
-#define GPOUT_SYS_SDIO1_DATA4 63
+#define GPOUT_SYS_SDIO1_DATA4 62
#define GPOUT_SYS_SDIO1_DATA5 63
#define GPOUT_SYS_SDIO1_DATA6 64
#define GPOUT_SYS_SDIO1_DATA7 65
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 0d8339357bad..0ba74ef04679 100644
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
@@ -611,6 +611,8 @@
pciephy0: phy@10210000 {
compatible = "starfive,jh7110-pcie-phy";
reg = <0x0 0x10210000 0x0 0x10000>;
+ starfive,sys-syscon = <&sys_syscon 0x18>;
+ starfive,stg-syscon = <&stg_syscon 0x148 0x1f4>;
#phy-cells = <0>;
};
@@ -1022,7 +1024,6 @@
snps,force_thresh_dma_mode;
snps,axi-config = <&stmmac_axi_setup>;
snps,tso;
- snps,en-tx-lpi-clockgating;
snps,txpbl = <16>;
snps,rxpbl = <16>;
starfive,syscon = <&aon_syscon 0xc 0x12>;
@@ -1053,7 +1054,6 @@
snps,force_thresh_dma_mode;
snps,axi-config = <&stmmac_axi_setup>;
snps,tso;
- snps,en-tx-lpi-clockgating;
snps,txpbl = <16>;
snps,rxpbl = <16>;
starfive,syscon = <&sys_syscon 0x90 0x2>;
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index c6bd3d8354a9..49a0f48d93df 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -226,7 +226,7 @@ legacy:
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation may be reordered on other architectures than x86.
+ * This is an atomic fully-ordered operation (implied full memory barrier).
*/
static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long *addr)
{
@@ -238,7 +238,7 @@ static __always_inline int arch_test_and_set_bit(int nr, volatile unsigned long
* @nr: Bit to clear
* @addr: Address to count from
*
- * This operation can be reordered on other architectures other than x86.
+ * This is an atomic fully-ordered operation (implied full memory barrier).
*/
static __always_inline int arch_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 1c5c641075d2..0257f4aa7ff4 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -136,7 +136,7 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#include <asm-generic/io.h>
#ifdef CONFIG_MMU
-#define arch_memremap_wb(addr, size) \
+#define arch_memremap_wb(addr, size, flags) \
((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index cc33e35cd628..0e9c2fab6378 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -301,8 +301,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index f891478829a5..c130d8100232 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -14,7 +14,7 @@
*/
#ifdef CONFIG_MMU
-#define __VVAR_PAGES 2
+#define __VDSO_PAGES 4
#ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h>
diff --git a/arch/riscv/include/asm/vdso/time_data.h b/arch/riscv/include/asm/vdso/arch_data.h
index dfa65228999b..da57a3786f7a 100644
--- a/arch/riscv/include/asm/vdso/time_data.h
+++ b/arch/riscv/include/asm/vdso/arch_data.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __RISCV_ASM_VDSO_TIME_DATA_H
-#define __RISCV_ASM_VDSO_TIME_DATA_H
+#ifndef __RISCV_ASM_VDSO_ARCH_DATA_H
+#define __RISCV_ASM_VDSO_ARCH_DATA_H
#include <linux/types.h>
#include <vdso/datapage.h>
#include <asm/hwprobe.h>
-struct arch_vdso_time_data {
+struct vdso_arch_data {
/* Stash static answers to the hwprobe queries when all CPUs are selected. */
__u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
@@ -14,4 +14,4 @@ struct arch_vdso_time_data {
__u8 homogeneous_cpus;
};
-#endif /* __RISCV_ASM_VDSO_TIME_DATA_H */
+#endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
index ba3283cf7acc..29164f84f93c 100644
--- a/arch/riscv/include/asm/vdso/gettimeofday.h
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -69,7 +69,7 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
/*
* The purpose of csr_read(CSR_TIME) is to trap the system into
@@ -79,18 +79,6 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return csr_read(CSR_TIME);
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
-}
-#endif
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
index e8a9c4b53c0c..1140b54b4bc8 100644
--- a/arch/riscv/include/asm/vdso/vsyscall.h
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -6,15 +6,6 @@
#include <vdso/datapage.h>
-extern struct vdso_data *vdso_data;
-
-static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-
-#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index bcd3b816306c..04a4e5495512 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -450,8 +450,7 @@ static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
static int __init init_hwprobe_vdso_data(void)
{
- struct vdso_data *vd = __arch_get_k_vdso_data();
- struct arch_vdso_time_data *avd = &vd->arch_data;
+ struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 3ca3ae4277e1..cc2895d1fbc2 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -13,20 +13,11 @@
#include <linux/err.h>
#include <asm/page.h>
#include <asm/vdso.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <vdso/datapage.h>
#include <vdso/vsyscall.h>
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
-#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
-
-static union vdso_data_store vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = vdso_data_store.data;
+#define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT)
struct __vdso_info {
const char *name;
@@ -79,78 +70,6 @@ static void __init __vdso_init(struct __vdso_info *vdso_info)
vdso_info->cm->pages = vdso_pagelist;
}
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-static const struct vm_special_mapping rv_vvar_map;
-
-/*
- * The vvar mapping contains data for a specific time namespace, so when a task
- * changes namespace we must unmap its vvar data for the old namespace.
- * Subsequent faults will map in data for the new namespace.
- *
- * For more details see timens_setup_vdso_data().
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
-
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &rv_vvar_map))
- zap_vma_pages(vma);
- }
-
- mmap_read_unlock(mm);
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *timens_page = find_timens_vvar_page(vma);
- unsigned long pfn;
-
- switch (vmf->pgoff) {
- case VVAR_DATA_PAGE_OFFSET:
- if (timens_page)
- pfn = page_to_pfn(timens_page);
- else
- pfn = sym_to_pfn(vdso_data);
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
- * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- pfn = sym_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- default:
- return VM_FAULT_SIGBUS;
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
-static const struct vm_special_mapping rv_vvar_map = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
-
static struct vm_special_mapping rv_vdso_map __ro_after_init = {
.name = "[vdso]",
.mremap = vdso_mremap,
@@ -196,7 +115,7 @@ static int __setup_additional_pages(struct mm_struct *mm,
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
void *ret;
- BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+ BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */
@@ -208,8 +127,7 @@ static int __setup_additional_pages(struct mm_struct *mm,
goto up_fail;
}
- ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE,
- (VM_READ | VM_MAYREAD | VM_PFNMAP), &rv_vvar_map);
+ ret = vdso_install_vvar_mapping(mm, vdso_base);
if (IS_ERR(ret))
goto up_fail;
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 9a1b555e8733..ad73607abc28 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -2,7 +2,7 @@
# Copied from arch/tile/kernel/vdso/Makefile
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
# Symbols present in the vdso
vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index a158c029344f..2ddeba6c68dd 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -16,8 +16,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpusetsize, unsigned long *cpus,
unsigned int flags)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
- const struct arch_vdso_time_data *avd = &vd->arch_data;
+ const struct vdso_arch_data *avd = &vdso_u_arch_data;
bool all_cpus = !cpusetsize && !cpus;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
@@ -51,8 +50,7 @@ static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count,
size_t cpusetsize, unsigned long *cpus,
unsigned int flags)
{
- const struct vdso_data *vd = __arch_get_vdso_data();
- const struct arch_vdso_time_data *avd = &vd->arch_data;
+ const struct vdso_arch_data *avd = &vdso_u_arch_data;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
unsigned char *c = (unsigned char *)cpus;
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index cbe2a179331d..8e86965a8aae 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -4,15 +4,14 @@
*/
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
- PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
-#ifdef CONFIG_TIME_NS
- PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 1fa8be5ee509..4b24705dc63a 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -172,8 +172,8 @@ module_init(riscv_kvm_init);
static void __exit riscv_kvm_exit(void)
{
- kvm_riscv_teardown();
-
kvm_exit();
+
+ kvm_riscv_teardown();
}
module_exit(riscv_kvm_exit);
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index f6d27b59c641..43ee8e33ba23 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -203,7 +203,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SVADE:
/*
* The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero.
- * Svade is not allowed to disable when the platform use Svade.
+ * Svade can't be disabled unless we support Svadu.
*/
return arch_has_hw_pte_young();
default:
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 2707a51b082c..78ac3216a54d 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -666,6 +666,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
.type = etype,
.size = sizeof(struct perf_event_attr),
.pinned = true,
+ .disabled = true,
/*
* It should never reach here if the platform doesn't support the sscofpmf
* extension as mode filtering won't work without it.
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 96e7a4e463f7..ff672fa71fcc 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -248,18 +248,19 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
if (t->init_done)
return -EINVAL;
- hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
t->init_done = true;
t->next_set = false;
/* Enable sstc for every vcpu if available in hardware */
if (riscv_isa_extension_available(NULL, SSTC)) {
t->sstc_enabled = true;
- t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
+ hrtimer_setup(&t->hrt, kvm_riscv_vcpu_vstimer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
} else {
t->sstc_enabled = false;
- t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
+ hrtimer_setup(&t->hrt, kvm_riscv_vcpu_hrtimer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 79368a895fee..b1c46153606a 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -16,6 +16,11 @@ lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o
obj-$(CONFIG_CRC32_ARCH) += crc32-riscv.o
+crc32-riscv-y := crc32.o crc32_msb.o crc32_lsb.o
+obj-$(CONFIG_CRC64_ARCH) += crc64-riscv.o
+crc64-riscv-y := crc64.o crc64_msb.o crc64_lsb.o
+obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-riscv.o
+crc-t10dif-riscv-y := crc-t10dif.o crc16_msb.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
diff --git a/arch/riscv/lib/crc-clmul-consts.h b/arch/riscv/lib/crc-clmul-consts.h
new file mode 100644
index 000000000000..8d73449235ef
--- /dev/null
+++ b/arch/riscv/lib/crc-clmul-consts.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CRC constants generated by:
+ *
+ * ./scripts/gen-crc-consts.py riscv_clmul crc16_msb_0x8bb7,crc32_msb_0x04c11db7,crc32_lsb_0xedb88320,crc32_lsb_0x82f63b78,crc64_msb_0x42f0e1eba9ea3693,crc64_lsb_0x9a6c9329ac4bc9b5
+ *
+ * Do not edit manually.
+ */
+
+struct crc_clmul_consts {
+ unsigned long fold_across_2_longs_const_hi;
+ unsigned long fold_across_2_longs_const_lo;
+ unsigned long barrett_reduction_const_1;
+ unsigned long barrett_reduction_const_2;
+};
+
+/*
+ * Constants generated for most-significant-bit-first CRC-16 using
+ * G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ */
+static const struct crc_clmul_consts crc16_msb_0x8bb7_consts __maybe_unused = {
+#ifdef CONFIG_64BIT
+ .fold_across_2_longs_const_hi = 0x0000000000001faa, /* x^192 mod G */
+ .fold_across_2_longs_const_lo = 0x000000000000a010, /* x^128 mod G */
+ .barrett_reduction_const_1 = 0xfb2d2bfc0e99d245, /* floor(x^79 / G) */
+ .barrett_reduction_const_2 = 0x0000000000008bb7, /* G - x^16 */
+#else
+ .fold_across_2_longs_const_hi = 0x00005890, /* x^96 mod G */
+ .fold_across_2_longs_const_lo = 0x0000f249, /* x^64 mod G */
+ .barrett_reduction_const_1 = 0xfb2d2bfc, /* floor(x^47 / G) */
+ .barrett_reduction_const_2 = 0x00008bb7, /* G - x^16 */
+#endif
+};
+
+/*
+ * Constants generated for most-significant-bit-first CRC-32 using
+ * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 +
+ * x^5 + x^4 + x^2 + x^1 + x^0
+ */
+static const struct crc_clmul_consts crc32_msb_0x04c11db7_consts __maybe_unused = {
+#ifdef CONFIG_64BIT
+ .fold_across_2_longs_const_hi = 0x00000000c5b9cd4c, /* x^192 mod G */
+ .fold_across_2_longs_const_lo = 0x00000000e8a45605, /* x^128 mod G */
+ .barrett_reduction_const_1 = 0x826880efa40da72d, /* floor(x^95 / G) */
+ .barrett_reduction_const_2 = 0x0000000004c11db7, /* G - x^32 */
+#else
+ .fold_across_2_longs_const_hi = 0xf200aa66, /* x^96 mod G */
+ .fold_across_2_longs_const_lo = 0x490d678d, /* x^64 mod G */
+ .barrett_reduction_const_1 = 0x826880ef, /* floor(x^63 / G) */
+ .barrett_reduction_const_2 = 0x04c11db7, /* G - x^32 */
+#endif
+};
+
+/*
+ * Constants generated for least-significant-bit-first CRC-32 using
+ * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 +
+ * x^5 + x^4 + x^2 + x^1 + x^0
+ */
+static const struct crc_clmul_consts crc32_lsb_0xedb88320_consts __maybe_unused = {
+#ifdef CONFIG_64BIT
+ .fold_across_2_longs_const_hi = 0x65673b4600000000, /* x^191 mod G */
+ .fold_across_2_longs_const_lo = 0x9ba54c6f00000000, /* x^127 mod G */
+ .barrett_reduction_const_1 = 0xb4e5b025f7011641, /* floor(x^95 / G) */
+ .barrett_reduction_const_2 = 0x00000000edb88320, /* (G - x^32) * x^32 */
+#else
+ .fold_across_2_longs_const_hi = 0xccaa009e, /* x^95 mod G */
+ .fold_across_2_longs_const_lo = 0xb8bc6765, /* x^63 mod G */
+ .barrett_reduction_const_1 = 0xf7011641, /* floor(x^63 / G) */
+ .barrett_reduction_const_2 = 0xedb88320, /* (G - x^32) * x^0 */
+#endif
+};
+
+/*
+ * Constants generated for least-significant-bit-first CRC-32 using
+ * G(x) = x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 + x^18 +
+ * x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
+ */
+static const struct crc_clmul_consts crc32_lsb_0x82f63b78_consts __maybe_unused = {
+#ifdef CONFIG_64BIT
+ .fold_across_2_longs_const_hi = 0x3743f7bd00000000, /* x^191 mod G */
+ .fold_across_2_longs_const_lo = 0x3171d43000000000, /* x^127 mod G */
+ .barrett_reduction_const_1 = 0x4869ec38dea713f1, /* floor(x^95 / G) */
+ .barrett_reduction_const_2 = 0x0000000082f63b78, /* (G - x^32) * x^32 */
+#else
+ .fold_across_2_longs_const_hi = 0x493c7d27, /* x^95 mod G */
+ .fold_across_2_longs_const_lo = 0xdd45aab8, /* x^63 mod G */
+ .barrett_reduction_const_1 = 0xdea713f1, /* floor(x^63 / G) */
+ .barrett_reduction_const_2 = 0x82f63b78, /* (G - x^32) * x^0 */
+#endif
+};
+
+/*
+ * Constants generated for most-significant-bit-first CRC-64 using
+ * G(x) = x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x^1 + x^0
+ */
+#ifdef CONFIG_64BIT
+static const struct crc_clmul_consts crc64_msb_0x42f0e1eba9ea3693_consts __maybe_unused = {
+ .fold_across_2_longs_const_hi = 0x4eb938a7d257740e, /* x^192 mod G */
+ .fold_across_2_longs_const_lo = 0x05f5c3c7eb52fab6, /* x^128 mod G */
+ .barrett_reduction_const_1 = 0xabc694e836627c39, /* floor(x^127 / G) */
+ .barrett_reduction_const_2 = 0x42f0e1eba9ea3693, /* G - x^64 */
+};
+#endif
+
+/*
+ * Constants generated for least-significant-bit-first CRC-64 using
+ * G(x) = x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 +
+ * x^47 + x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 +
+ * x^26 + x^23 + x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 +
+ * x^4 + x^3 + x^0
+ */
+#ifdef CONFIG_64BIT
+static const struct crc_clmul_consts crc64_lsb_0x9a6c9329ac4bc9b5_consts __maybe_unused = {
+ .fold_across_2_longs_const_hi = 0xeadc41fd2ba3d420, /* x^191 mod G */
+ .fold_across_2_longs_const_lo = 0x21e9761e252621ac, /* x^127 mod G */
+ .barrett_reduction_const_1 = 0x27ecfa329aef9f77, /* floor(x^127 / G) */
+ .barrett_reduction_const_2 = 0x9a6c9329ac4bc9b5, /* (G - x^64) * x^0 */
+};
+#endif
diff --git a/arch/riscv/lib/crc-clmul-template.h b/arch/riscv/lib/crc-clmul-template.h
new file mode 100644
index 000000000000..77187e7f1762
--- /dev/null
+++ b/arch/riscv/lib/crc-clmul-template.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2025 Google LLC */
+
+/*
+ * This file is a "template" that generates a CRC function optimized using the
+ * RISC-V Zbc (scalar carryless multiplication) extension. The includer of this
+ * file must define the following parameters to specify the type of CRC:
+ *
+ * crc_t: the data type of the CRC, e.g. u32 for a 32-bit CRC
+ * LSB_CRC: 0 for a msb (most-significant-bit) first CRC, i.e. natural
+ * mapping between bits and polynomial coefficients
+ * 1 for a lsb (least-significant-bit) first CRC, i.e. reflected
+ * mapping between bits and polynomial coefficients
+ */
+
+#include <asm/byteorder.h>
+#include <linux/minmax.h>
+
+#define CRC_BITS (8 * sizeof(crc_t)) /* a.k.a. 'n' */
+
+static inline unsigned long clmul(unsigned long a, unsigned long b)
+{
+ unsigned long res;
+
+ asm(".option push\n"
+ ".option arch,+zbc\n"
+ "clmul %0, %1, %2\n"
+ ".option pop\n"
+ : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+static inline unsigned long clmulh(unsigned long a, unsigned long b)
+{
+ unsigned long res;
+
+ asm(".option push\n"
+ ".option arch,+zbc\n"
+ "clmulh %0, %1, %2\n"
+ ".option pop\n"
+ : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+static inline unsigned long clmulr(unsigned long a, unsigned long b)
+{
+ unsigned long res;
+
+ asm(".option push\n"
+ ".option arch,+zbc\n"
+ "clmulr %0, %1, %2\n"
+ ".option pop\n"
+ : "=r" (res) : "r" (a), "r" (b));
+ return res;
+}
+
+/*
+ * crc_load_long() loads one "unsigned long" of aligned data bytes, producing a
+ * polynomial whose bit order matches the CRC's bit order.
+ */
+#ifdef CONFIG_64BIT
+# if LSB_CRC
+# define crc_load_long(x) le64_to_cpup(x)
+# else
+# define crc_load_long(x) be64_to_cpup(x)
+# endif
+#else
+# if LSB_CRC
+# define crc_load_long(x) le32_to_cpup(x)
+# else
+# define crc_load_long(x) be32_to_cpup(x)
+# endif
+#endif
+
+/* XOR @crc into the end of @msgpoly that represents the high-order terms. */
+static inline unsigned long
+crc_clmul_prep(crc_t crc, unsigned long msgpoly)
+{
+#if LSB_CRC
+ return msgpoly ^ crc;
+#else
+ return msgpoly ^ ((unsigned long)crc << (BITS_PER_LONG - CRC_BITS));
+#endif
+}
+
+/*
+ * Multiply the long-sized @msgpoly by x^n (a.k.a. x^CRC_BITS) and reduce it
+ * modulo the generator polynomial G. This gives the CRC of @msgpoly.
+ */
+static inline crc_t
+crc_clmul_long(unsigned long msgpoly, const struct crc_clmul_consts *consts)
+{
+ unsigned long tmp;
+
+ /*
+ * First step of Barrett reduction with integrated multiplication by
+ * x^n: calculate floor((msgpoly * x^n) / G). This is the value by
+ * which G needs to be multiplied to cancel out the x^n and higher terms
+ * of msgpoly * x^n. Do it using the following formula:
+ *
+ * msb-first:
+ * floor((msgpoly * floor(x^(BITS_PER_LONG-1+n) / G)) / x^(BITS_PER_LONG-1))
+ * lsb-first:
+ * floor((msgpoly * floor(x^(BITS_PER_LONG-1+n) / G) * x) / x^BITS_PER_LONG)
+ *
+ * barrett_reduction_const_1 contains floor(x^(BITS_PER_LONG-1+n) / G),
+ * which fits a long exactly. Using any lower power of x there would
+ * not carry enough precision through the calculation, while using any
+ * higher power of x would require extra instructions to handle a wider
+ * multiplication. In the msb-first case, using this power of x results
+ * in needing a floored division by x^(BITS_PER_LONG-1), which matches
+ * what clmulr produces. In the lsb-first case, a factor of x gets
+ * implicitly introduced by each carryless multiplication (shown as
+ * '* x' above), and the floored division instead needs to be by
+ * x^BITS_PER_LONG which matches what clmul produces.
+ */
+#if LSB_CRC
+ tmp = clmul(msgpoly, consts->barrett_reduction_const_1);
+#else
+ tmp = clmulr(msgpoly, consts->barrett_reduction_const_1);
+#endif
+
+ /*
+ * Second step of Barrett reduction:
+ *
+ * crc := (msgpoly * x^n) + (G * floor((msgpoly * x^n) / G))
+ *
+ * This reduces (msgpoly * x^n) modulo G by adding the appropriate
+ * multiple of G to it. The result uses only the x^0..x^(n-1) terms.
+ * HOWEVER, since the unreduced value (msgpoly * x^n) is zero in those
+ * terms in the first place, it is more efficient to do the equivalent:
+ *
+ * crc := ((G - x^n) * floor((msgpoly * x^n) / G)) mod x^n
+ *
+ * In the lsb-first case further modify it to the following which avoids
+ * a shift, as the crc ends up in the physically low n bits from clmulr:
+ *
+ * product := ((G - x^n) * x^(BITS_PER_LONG - n)) * floor((msgpoly * x^n) / G) * x
+ * crc := floor(product / x^(BITS_PER_LONG + 1 - n)) mod x^n
+ *
+ * barrett_reduction_const_2 contains the constant multiplier (G - x^n)
+ * or (G - x^n) * x^(BITS_PER_LONG - n) from the formulas above. The
+ * cast of the result to crc_t is essential, as it applies the mod x^n!
+ */
+#if LSB_CRC
+ return clmulr(tmp, consts->barrett_reduction_const_2);
+#else
+ return clmul(tmp, consts->barrett_reduction_const_2);
+#endif
+}
+
+/* Update @crc with the data from @msgpoly. */
+static inline crc_t
+crc_clmul_update_long(crc_t crc, unsigned long msgpoly,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul_long(crc_clmul_prep(crc, msgpoly), consts);
+}
+
+/* Update @crc with 1 <= @len < sizeof(unsigned long) bytes of data. */
+static inline crc_t
+crc_clmul_update_partial(crc_t crc, const u8 *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ unsigned long msgpoly;
+ size_t i;
+
+#if LSB_CRC
+ msgpoly = (unsigned long)p[0] << (BITS_PER_LONG - 8);
+ for (i = 1; i < len; i++)
+ msgpoly = (msgpoly >> 8) ^ ((unsigned long)p[i] << (BITS_PER_LONG - 8));
+#else
+ msgpoly = p[0];
+ for (i = 1; i < len; i++)
+ msgpoly = (msgpoly << 8) ^ p[i];
+#endif
+
+ if (len >= sizeof(crc_t)) {
+ #if LSB_CRC
+ msgpoly ^= (unsigned long)crc << (BITS_PER_LONG - 8*len);
+ #else
+ msgpoly ^= (unsigned long)crc << (8*len - CRC_BITS);
+ #endif
+ return crc_clmul_long(msgpoly, consts);
+ }
+#if LSB_CRC
+ msgpoly ^= (unsigned long)crc << (BITS_PER_LONG - 8*len);
+ return crc_clmul_long(msgpoly, consts) ^ (crc >> (8*len));
+#else
+ msgpoly ^= crc >> (CRC_BITS - 8*len);
+ return crc_clmul_long(msgpoly, consts) ^ (crc << (8*len));
+#endif
+}
+
+static inline crc_t
+crc_clmul(crc_t crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ size_t align;
+
+ /* This implementation assumes that the CRC fits in an unsigned long. */
+ BUILD_BUG_ON(sizeof(crc_t) > sizeof(unsigned long));
+
+ /* If the buffer is not long-aligned, align it. */
+ align = (unsigned long)p % sizeof(unsigned long);
+ if (align && len) {
+ align = min(sizeof(unsigned long) - align, len);
+ crc = crc_clmul_update_partial(crc, p, align, consts);
+ p += align;
+ len -= align;
+ }
+
+ if (len >= 4 * sizeof(unsigned long)) {
+ unsigned long m0, m1;
+
+ m0 = crc_clmul_prep(crc, crc_load_long(p));
+ m1 = crc_load_long(p + sizeof(unsigned long));
+ p += 2 * sizeof(unsigned long);
+ len -= 2 * sizeof(unsigned long);
+ /*
+ * Main loop. Each iteration starts with a message polynomial
+ * (x^BITS_PER_LONG)*m0 + m1, then logically extends it by two
+ * more longs of data to form x^(3*BITS_PER_LONG)*m0 +
+ * x^(2*BITS_PER_LONG)*m1 + x^BITS_PER_LONG*m2 + m3, then
+ * "folds" that back into a congruent (modulo G) value that uses
+ * just m0 and m1 again. This is done by multiplying m0 by the
+ * precomputed constant (x^(3*BITS_PER_LONG) mod G) and m1 by
+ * the precomputed constant (x^(2*BITS_PER_LONG) mod G), then
+ * adding the results to m2 and m3 as appropriate. Each such
+ * multiplication produces a result twice the length of a long,
+ * which in RISC-V is two instructions clmul and clmulh.
+ *
+ * This could be changed to fold across more than 2 longs at a
+ * time if there is a CPU that can take advantage of it.
+ */
+ do {
+ unsigned long p0, p1, p2, p3;
+
+ p0 = clmulh(m0, consts->fold_across_2_longs_const_hi);
+ p1 = clmul(m0, consts->fold_across_2_longs_const_hi);
+ p2 = clmulh(m1, consts->fold_across_2_longs_const_lo);
+ p3 = clmul(m1, consts->fold_across_2_longs_const_lo);
+ m0 = (LSB_CRC ? p1 ^ p3 : p0 ^ p2) ^ crc_load_long(p);
+ m1 = (LSB_CRC ? p0 ^ p2 : p1 ^ p3) ^
+ crc_load_long(p + sizeof(unsigned long));
+
+ p += 2 * sizeof(unsigned long);
+ len -= 2 * sizeof(unsigned long);
+ } while (len >= 2 * sizeof(unsigned long));
+
+ crc = crc_clmul_long(m0, consts);
+ crc = crc_clmul_update_long(crc, m1, consts);
+ }
+
+ while (len >= sizeof(unsigned long)) {
+ crc = crc_clmul_update_long(crc, crc_load_long(p), consts);
+ p += sizeof(unsigned long);
+ len -= sizeof(unsigned long);
+ }
+
+ if (len)
+ crc = crc_clmul_update_partial(crc, p, len, consts);
+
+ return crc;
+}
diff --git a/arch/riscv/lib/crc-clmul.h b/arch/riscv/lib/crc-clmul.h
new file mode 100644
index 000000000000..dd1736245815
--- /dev/null
+++ b/arch/riscv/lib/crc-clmul.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2025 Google LLC */
+
+#ifndef _RISCV_CRC_CLMUL_H
+#define _RISCV_CRC_CLMUL_H
+
+#include <linux/types.h>
+#include "crc-clmul-consts.h"
+
+u16 crc16_msb_clmul(u16 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts);
+u32 crc32_msb_clmul(u32 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts);
+u32 crc32_lsb_clmul(u32 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts);
+#ifdef CONFIG_64BIT
+u64 crc64_msb_clmul(u64 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts);
+u64 crc64_lsb_clmul(u64 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts);
+#endif
+
+#endif /* _RISCV_CRC_CLMUL_H */
diff --git a/arch/riscv/lib/crc-t10dif.c b/arch/riscv/lib/crc-t10dif.c
new file mode 100644
index 000000000000..e6b0051ccd86
--- /dev/null
+++ b/arch/riscv/lib/crc-t10dif.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized CRC-T10DIF function
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <linux/crc-t10dif.h>
+#include <linux/module.h>
+
+#include "crc-clmul.h"
+
+u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc16_msb_clmul(crc, p, len, &crc16_msb_0x8bb7_consts);
+ return crc_t10dif_generic(crc, p, len);
+}
+EXPORT_SYMBOL(crc_t10dif_arch);
+
+MODULE_DESCRIPTION("RISC-V optimized CRC-T10DIF function");
+MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc16_msb.c b/arch/riscv/lib/crc16_msb.c
new file mode 100644
index 000000000000..554d295e95f5
--- /dev/null
+++ b/arch/riscv/lib/crc16_msb.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized most-significant-bit-first CRC16
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "crc-clmul.h"
+
+typedef u16 crc_t;
+#define LSB_CRC 0
+#include "crc-clmul-template.h"
+
+u16 crc16_msb_clmul(u16 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul(crc, p, len, consts);
+}
diff --git a/arch/riscv/lib/crc32-riscv.c b/arch/riscv/lib/crc32-riscv.c
deleted file mode 100644
index 53d56ab422c7..000000000000
--- a/arch/riscv/lib/crc32-riscv.c
+++ /dev/null
@@ -1,311 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Accelerated CRC32 implementation with Zbc extension.
- *
- * Copyright (C) 2024 Intel Corporation
- */
-
-#include <asm/hwcap.h>
-#include <asm/alternative-macros.h>
-#include <asm/byteorder.h>
-
-#include <linux/types.h>
-#include <linux/minmax.h>
-#include <linux/crc32poly.h>
-#include <linux/crc32.h>
-#include <linux/byteorder/generic.h>
-#include <linux/module.h>
-
-/*
- * Refer to https://www.corsix.org/content/barrett-reduction-polynomials for
- * better understanding of how this math works.
- *
- * let "+" denotes polynomial add (XOR)
- * let "-" denotes polynomial sub (XOR)
- * let "*" denotes polynomial multiplication
- * let "/" denotes polynomial floor division
- * let "S" denotes source data, XLEN bit wide
- * let "P" denotes CRC32 polynomial
- * let "T" denotes 2^(XLEN+32)
- * let "QT" denotes quotient of T/P, with the bit for 2^XLEN being implicit
- *
- * crc32(S, P)
- * => S * (2^32) - S * (2^32) / P * P
- * => lowest 32 bits of: S * (2^32) / P * P
- * => lowest 32 bits of: S * (2^32) * (T / P) / T * P
- * => lowest 32 bits of: S * (2^32) * quotient / T * P
- * => lowest 32 bits of: S * quotient / 2^XLEN * P
- * => lowest 32 bits of: (clmul_high_part(S, QT) + S) * P
- * => clmul_low_part(clmul_high_part(S, QT) + S, P)
- *
- * In terms of below implementations, the BE case is more intuitive, since the
- * higher order bit sits at more significant position.
- */
-
-#if __riscv_xlen == 64
-/* Slide by XLEN bits per iteration */
-# define STEP_ORDER 3
-
-/* Each below polynomial quotient has an implicit bit for 2^XLEN */
-
-/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in LE format */
-# define CRC32_POLY_QT_LE 0x5a72d812fb808b20
-
-/* Polynomial quotient of (2^(XLEN+32))/CRC32C_POLY, in LE format */
-# define CRC32C_POLY_QT_LE 0xa434f61c6f5389f8
-
-/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in BE format, it should be
- * the same as the bit-reversed version of CRC32_POLY_QT_LE
- */
-# define CRC32_POLY_QT_BE 0x04d101df481b4e5a
-
-static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr)
-{
- return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr);
-}
-
-static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
-{
- u32 crc;
-
- /* We don't have a "clmulrh" insn, so use clmul + slli instead. */
- asm volatile (".option push\n"
- ".option arch,+zbc\n"
- "clmul %0, %1, %2\n"
- "slli %0, %0, 1\n"
- "xor %0, %0, %1\n"
- "clmulr %0, %0, %3\n"
- "srli %0, %0, 32\n"
- ".option pop\n"
- : "=&r" (crc)
- : "r" (s),
- "r" (poly_qt),
- "r" ((u64)poly << 32)
- :);
- return crc;
-}
-
-static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr)
-{
- return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr);
-}
-
-#elif __riscv_xlen == 32
-# define STEP_ORDER 2
-/* Each quotient should match the upper half of its analog in RV64 */
-# define CRC32_POLY_QT_LE 0xfb808b20
-# define CRC32C_POLY_QT_LE 0x6f5389f8
-# define CRC32_POLY_QT_BE 0x04d101df
-
-static inline u32 crc32_le_prep(u32 crc, unsigned long const *ptr)
-{
- return crc ^ (__force u32)__cpu_to_le32(*ptr);
-}
-
-static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt)
-{
- u32 crc;
-
- /* We don't have a "clmulrh" insn, so use clmul + slli instead. */
- asm volatile (".option push\n"
- ".option arch,+zbc\n"
- "clmul %0, %1, %2\n"
- "slli %0, %0, 1\n"
- "xor %0, %0, %1\n"
- "clmulr %0, %0, %3\n"
- ".option pop\n"
- : "=&r" (crc)
- : "r" (s),
- "r" (poly_qt),
- "r" (poly)
- :);
- return crc;
-}
-
-static inline u32 crc32_be_prep(u32 crc, unsigned long const *ptr)
-{
- return crc ^ (__force u32)__cpu_to_be32(*ptr);
-}
-
-#else
-# error "Unexpected __riscv_xlen"
-#endif
-
-static inline u32 crc32_be_zbc(unsigned long s)
-{
- u32 crc;
-
- asm volatile (".option push\n"
- ".option arch,+zbc\n"
- "clmulh %0, %1, %2\n"
- "xor %0, %0, %1\n"
- "clmul %0, %0, %3\n"
- ".option pop\n"
- : "=&r" (crc)
- : "r" (s),
- "r" (CRC32_POLY_QT_BE),
- "r" (CRC32_POLY_BE)
- :);
- return crc;
-}
-
-#define STEP (1 << STEP_ORDER)
-#define OFFSET_MASK (STEP - 1)
-
-typedef u32 (*fallback)(u32 crc, unsigned char const *p, size_t len);
-
-static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p,
- size_t len, u32 poly,
- unsigned long poly_qt)
-{
- size_t bits = len * 8;
- unsigned long s = 0;
- u32 crc_low = 0;
-
- for (int i = 0; i < len; i++)
- s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8);
-
- s ^= (unsigned long)crc << (__riscv_xlen - bits);
- if (__riscv_xlen == 32 || len < sizeof(u32))
- crc_low = crc >> bits;
-
- crc = crc32_le_zbc(s, poly, poly_qt);
- crc ^= crc_low;
-
- return crc;
-}
-
-static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
- size_t len, u32 poly,
- unsigned long poly_qt,
- fallback crc_fb)
-{
- size_t offset, head_len, tail_len;
- unsigned long const *p_ul;
- unsigned long s;
-
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBC, 1)
- : : : : legacy);
-
- /* Handle the unaligned head. */
- offset = (unsigned long)p & OFFSET_MASK;
- if (offset && len) {
- head_len = min(STEP - offset, len);
- crc = crc32_le_unaligned(crc, p, head_len, poly, poly_qt);
- p += head_len;
- len -= head_len;
- }
-
- tail_len = len & OFFSET_MASK;
- len = len >> STEP_ORDER;
- p_ul = (unsigned long const *)p;
-
- for (int i = 0; i < len; i++) {
- s = crc32_le_prep(crc, p_ul);
- crc = crc32_le_zbc(s, poly, poly_qt);
- p_ul++;
- }
-
- /* Handle the tail bytes. */
- p = (unsigned char const *)p_ul;
- if (tail_len)
- crc = crc32_le_unaligned(crc, p, tail_len, poly, poly_qt);
-
- return crc;
-
-legacy:
- return crc_fb(crc, p, len);
-}
-
-u32 __pure crc32_le_arch(u32 crc, const u8 *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE,
- crc32_le_base);
-}
-EXPORT_SYMBOL(crc32_le_arch);
-
-u32 __pure crc32c_le_arch(u32 crc, const u8 *p, size_t len)
-{
- return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
- CRC32C_POLY_QT_LE, crc32c_le_base);
-}
-EXPORT_SYMBOL(crc32c_le_arch);
-
-static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
- size_t len)
-{
- size_t bits = len * 8;
- unsigned long s = 0;
- u32 crc_low = 0;
-
- s = 0;
- for (int i = 0; i < len; i++)
- s = *p++ | (s << 8);
-
- if (__riscv_xlen == 32 || len < sizeof(u32)) {
- s ^= crc >> (32 - bits);
- crc_low = crc << bits;
- } else {
- s ^= (unsigned long)crc << (bits - 32);
- }
-
- crc = crc32_be_zbc(s);
- crc ^= crc_low;
-
- return crc;
-}
-
-u32 __pure crc32_be_arch(u32 crc, const u8 *p, size_t len)
-{
- size_t offset, head_len, tail_len;
- unsigned long const *p_ul;
- unsigned long s;
-
- asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
- RISCV_ISA_EXT_ZBC, 1)
- : : : : legacy);
-
- /* Handle the unaligned head. */
- offset = (unsigned long)p & OFFSET_MASK;
- if (offset && len) {
- head_len = min(STEP - offset, len);
- crc = crc32_be_unaligned(crc, p, head_len);
- p += head_len;
- len -= head_len;
- }
-
- tail_len = len & OFFSET_MASK;
- len = len >> STEP_ORDER;
- p_ul = (unsigned long const *)p;
-
- for (int i = 0; i < len; i++) {
- s = crc32_be_prep(crc, p_ul);
- crc = crc32_be_zbc(s);
- p_ul++;
- }
-
- /* Handle the tail bytes. */
- p = (unsigned char const *)p_ul;
- if (tail_len)
- crc = crc32_be_unaligned(crc, p, tail_len);
-
- return crc;
-
-legacy:
- return crc32_be_base(crc, p, len);
-}
-EXPORT_SYMBOL(crc32_be_arch);
-
-u32 crc32_optimizations(void)
-{
- if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
- return CRC32_LE_OPTIMIZATION |
- CRC32_BE_OPTIMIZATION |
- CRC32C_OPTIMIZATION;
- return 0;
-}
-EXPORT_SYMBOL(crc32_optimizations);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Accelerated CRC32 implementation with Zbc extension");
diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32.c
new file mode 100644
index 000000000000..a3188b7d9c40
--- /dev/null
+++ b/arch/riscv/lib/crc32.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized CRC32 functions
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+
+#include "crc-clmul.h"
+
+u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc32_lsb_clmul(crc, p, len,
+ &crc32_lsb_0xedb88320_consts);
+ return crc32_le_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_le_arch);
+
+u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc32_msb_clmul(crc, p, len,
+ &crc32_msb_0x04c11db7_consts);
+ return crc32_be_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32_be_arch);
+
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc32_lsb_clmul(crc, p, len,
+ &crc32_lsb_0x82f63b78_consts);
+ return crc32c_base(crc, p, len);
+}
+EXPORT_SYMBOL(crc32c_arch);
+
+u32 crc32_optimizations(void)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return CRC32_LE_OPTIMIZATION |
+ CRC32_BE_OPTIMIZATION |
+ CRC32C_OPTIMIZATION;
+ return 0;
+}
+EXPORT_SYMBOL(crc32_optimizations);
+
+MODULE_DESCRIPTION("RISC-V optimized CRC32 functions");
+MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc32_lsb.c b/arch/riscv/lib/crc32_lsb.c
new file mode 100644
index 000000000000..72fd67e7470c
--- /dev/null
+++ b/arch/riscv/lib/crc32_lsb.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized least-significant-bit-first CRC32
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "crc-clmul.h"
+
+typedef u32 crc_t;
+#define LSB_CRC 1
+#include "crc-clmul-template.h"
+
+u32 crc32_lsb_clmul(u32 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul(crc, p, len, consts);
+}
diff --git a/arch/riscv/lib/crc32_msb.c b/arch/riscv/lib/crc32_msb.c
new file mode 100644
index 000000000000..fdbeaccc369f
--- /dev/null
+++ b/arch/riscv/lib/crc32_msb.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized most-significant-bit-first CRC32
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "crc-clmul.h"
+
+typedef u32 crc_t;
+#define LSB_CRC 0
+#include "crc-clmul-template.h"
+
+u32 crc32_msb_clmul(u32 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul(crc, p, len, consts);
+}
diff --git a/arch/riscv/lib/crc64.c b/arch/riscv/lib/crc64.c
new file mode 100644
index 000000000000..f0015a27836a
--- /dev/null
+++ b/arch/riscv/lib/crc64.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized CRC64 functions
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <linux/crc64.h>
+#include <linux/module.h>
+
+#include "crc-clmul.h"
+
+u64 crc64_be_arch(u64 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc64_msb_clmul(crc, p, len,
+ &crc64_msb_0x42f0e1eba9ea3693_consts);
+ return crc64_be_generic(crc, p, len);
+}
+EXPORT_SYMBOL(crc64_be_arch);
+
+u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
+{
+ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBC))
+ return crc64_lsb_clmul(crc, p, len,
+ &crc64_lsb_0x9a6c9329ac4bc9b5_consts);
+ return crc64_nvme_generic(crc, p, len);
+}
+EXPORT_SYMBOL(crc64_nvme_arch);
+
+MODULE_DESCRIPTION("RISC-V optimized CRC64 functions");
+MODULE_LICENSE("GPL");
diff --git a/arch/riscv/lib/crc64_lsb.c b/arch/riscv/lib/crc64_lsb.c
new file mode 100644
index 000000000000..c5371bb85d90
--- /dev/null
+++ b/arch/riscv/lib/crc64_lsb.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized least-significant-bit-first CRC64
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "crc-clmul.h"
+
+typedef u64 crc_t;
+#define LSB_CRC 1
+#include "crc-clmul-template.h"
+
+u64 crc64_lsb_clmul(u64 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul(crc, p, len, consts);
+}
diff --git a/arch/riscv/lib/crc64_msb.c b/arch/riscv/lib/crc64_msb.c
new file mode 100644
index 000000000000..1925d1dbe225
--- /dev/null
+++ b/arch/riscv/lib/crc64_msb.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RISC-V optimized most-significant-bit-first CRC64
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "crc-clmul.h"
+
+typedef u64 crc_t;
+#define LSB_CRC 0
+#include "crc-clmul-template.h"
+
+u64 crc64_msb_clmul(u64 crc, const void *p, size_t len,
+ const struct crc_clmul_consts *consts)
+{
+ return crc_clmul(crc, p, len, consts);
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9c9ec08d78c7..7ed6f229250c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -166,6 +166,7 @@ config S390
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select GENERIC_IOREMAP if PCI
select HAVE_ALIGNED_STRUCT_PAGE
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 80bdfbae6e5b..26aa455d4068 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -815,9 +815,6 @@ CONFIG_SYSTEM_BLACKLIST_KEYRING=y
CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
-CONFIG_CRC4=m
-CONFIG_CRC7=m
-CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 449a0e996b96..8392f8a5ad6d 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -803,9 +803,6 @@ CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
CONFIG_CRYPTO_LIB_CURVE25519=m
CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
-CONFIG_CRC4=m
-CONFIG_CRC7=m
-CONFIG_CRC8=m
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 4e73ef46d4b2..9f2814d0e1e9 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -139,7 +139,6 @@ int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool interruptible);
-int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
/**
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 9a367866cab0..424f899d8163 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1056,7 +1056,6 @@ bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 474e1f8d1d3c..7cc6a7cc66e7 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -144,7 +144,7 @@ struct zpci_dev {
u8 util_str_avail : 1;
u8 irqs_registered : 1;
u8 tid_avail : 1;
- u8 reserved : 1;
+ u8 rtr_avail : 1; /* Relaxed translation allowed */
unsigned int devfn; /* DEVFN part of the RID*/
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
@@ -217,6 +217,7 @@ extern struct airq_iv *zpci_aif_sbv;
struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
int zpci_add_device(struct zpci_dev *zdev);
int zpci_enable_device(struct zpci_dev *);
+int zpci_reenable_device(struct zpci_dev *zdev);
int zpci_disable_device(struct zpci_dev *);
int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
int zpci_deconfigure_device(struct zpci_dev *zdev);
@@ -245,6 +246,7 @@ void update_uid_checking(bool new);
/* IOMMU Interface */
int zpci_init_iommu(struct zpci_dev *zdev);
void zpci_destroy_iommu(struct zpci_dev *zdev);
+int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status);
#ifdef CONFIG_PCI
static inline bool zpci_use_mio(struct zpci_dev *zdev)
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index 3fff2f7095c8..7ebff39c84b3 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -156,7 +156,9 @@ struct clp_rsp_query_pci_grp {
u16 : 4;
u16 noi : 12; /* number of interrupts */
u8 version;
- u8 : 6;
+ u8 : 2;
+ u8 rtr : 1; /* Relaxed translation requirement */
+ u8 : 3;
u8 frame : 1;
u8 refresh : 1; /* TLB refresh mode */
u16 : 3;
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index b11f5b6d0bd1..46fb0ef6f984 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -631,7 +631,7 @@ int uv_pin_shared(unsigned long paddr);
int uv_destroy_folio(struct folio *folio);
int uv_destroy_pte(pte_t pte);
int uv_convert_from_secure_pte(pte_t pte);
-int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb);
+int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb);
int uv_convert_from_secure(unsigned long paddr);
int uv_convert_from_secure_folio(struct folio *folio);
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index 92c73e4d97a9..420a073fdde5 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -6,13 +6,11 @@
#ifndef __ASSEMBLY__
-extern struct vdso_data *vdso_data;
-
int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
-#define __VVAR_PAGES 2
+#define __VDSO_PAGES 4
#define VDSO_VERSION_STRING LINUX_2.6.29
diff --git a/arch/s390/include/asm/vdso/getrandom.h b/arch/s390/include/asm/vdso/getrandom.h
index 36355af7160b..f8713ce39bb2 100644
--- a/arch/s390/include/asm/vdso/getrandom.h
+++ b/arch/s390/include/asm/vdso/getrandom.h
@@ -23,18 +23,6 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig
return syscall3(__NR_getrandom, (long)buffer, (long)len, (long)flags);
}
-static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
-{
- /*
- * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
- * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
- * PAGE_OFFSET points to the real VVAR page.
- */
- if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
- return &_vdso_rng_data;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index 7937765ccfa5..fb4564308e9d 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -14,12 +14,7 @@
#include <linux/compiler.h>
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return _vdso_data;
-}
-
-static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *vd)
+static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd)
{
u64 adj, now;
@@ -49,12 +44,4 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
return syscall2(__NR_clock_getres, (long)clkid, (long)ts);
}
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
-}
-#endif
-
#endif
diff --git a/arch/s390/include/asm/vdso/vsyscall.h b/arch/s390/include/asm/vdso/vsyscall.h
index 3eb576ecd3bd..d346ebe51301 100644
--- a/arch/s390/include/asm/vdso/vsyscall.h
+++ b/arch/s390/include/asm/vdso/vsyscall.h
@@ -2,32 +2,12 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#define __VDSO_RND_DATA_OFFSET 768
-
#ifndef __ASSEMBLY__
#include <linux/hrtimer.h>
#include <vdso/datapage.h>
#include <asm/vdso.h>
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES
-};
-
-static __always_inline struct vdso_data *__s390_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __s390_get_k_vdso_data
-
-static __always_inline struct vdso_rng_data *__s390_get_k_vdso_rnd_data(void)
-{
- return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
-}
-#define __arch_get_k_vdso_rng_data __s390_get_k_vdso_rnd_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 276cb4c1e11b..4a981266b483 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -246,15 +246,6 @@ bool is_kdump_kernel(void)
}
EXPORT_SYMBOL_GPL(is_kdump_kernel);
-static const char *nt_name(Elf64_Word type)
-{
- const char *name = "LINUX";
-
- if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
- name = KEXEC_CORE_NOTE_NAME;
- return name;
-}
-
/*
* Initialize ELF note
*/
@@ -279,10 +270,8 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
return PTR_ADD(buf, len);
}
-static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
-{
- return nt_init_name(buf, type, desc, d_len, nt_name(type));
-}
+#define nt_init(buf, type, desc) \
+ nt_init_name(buf, NT_ ## type, &(desc), sizeof(desc), NN_ ## type)
/*
* Calculate the size of ELF note
@@ -298,10 +287,7 @@ static size_t nt_size_name(int d_len, const char *name)
return size;
}
-static inline size_t nt_size(Elf64_Word type, int d_len)
-{
- return nt_size_name(d_len, nt_name(type));
-}
+#define nt_size(type, desc) nt_size_name(sizeof(desc), NN_ ## type)
/*
* Fill ELF notes for one CPU with save area registers
@@ -322,18 +308,16 @@ static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa)
memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc));
memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs));
/* Create ELF notes for the CPU */
- ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus));
- ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset));
- ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer));
- ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp));
- ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg));
- ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs));
- ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix));
+ ptr = nt_init(ptr, PRSTATUS, nt_prstatus);
+ ptr = nt_init(ptr, PRFPREG, nt_fpregset);
+ ptr = nt_init(ptr, S390_TIMER, sa->timer);
+ ptr = nt_init(ptr, S390_TODCMP, sa->todcmp);
+ ptr = nt_init(ptr, S390_TODPREG, sa->todpreg);
+ ptr = nt_init(ptr, S390_CTRS, sa->ctrs);
+ ptr = nt_init(ptr, S390_PREFIX, sa->prefix);
if (cpu_has_vx()) {
- ptr = nt_init(ptr, NT_S390_VXRS_HIGH,
- &sa->vxrs_high, sizeof(sa->vxrs_high));
- ptr = nt_init(ptr, NT_S390_VXRS_LOW,
- &sa->vxrs_low, sizeof(sa->vxrs_low));
+ ptr = nt_init(ptr, S390_VXRS_HIGH, sa->vxrs_high);
+ ptr = nt_init(ptr, S390_VXRS_LOW, sa->vxrs_low);
}
return ptr;
}
@@ -346,16 +330,16 @@ static size_t get_cpu_elf_notes_size(void)
struct save_area *sa = NULL;
size_t size;
- size = nt_size(NT_PRSTATUS, sizeof(struct elf_prstatus));
- size += nt_size(NT_PRFPREG, sizeof(elf_fpregset_t));
- size += nt_size(NT_S390_TIMER, sizeof(sa->timer));
- size += nt_size(NT_S390_TODCMP, sizeof(sa->todcmp));
- size += nt_size(NT_S390_TODPREG, sizeof(sa->todpreg));
- size += nt_size(NT_S390_CTRS, sizeof(sa->ctrs));
- size += nt_size(NT_S390_PREFIX, sizeof(sa->prefix));
+ size = nt_size(PRSTATUS, struct elf_prstatus);
+ size += nt_size(PRFPREG, elf_fpregset_t);
+ size += nt_size(S390_TIMER, sa->timer);
+ size += nt_size(S390_TODCMP, sa->todcmp);
+ size += nt_size(S390_TODPREG, sa->todpreg);
+ size += nt_size(S390_CTRS, sa->ctrs);
+ size += nt_size(S390_PREFIX, sa->prefix);
if (cpu_has_vx()) {
- size += nt_size(NT_S390_VXRS_HIGH, sizeof(sa->vxrs_high));
- size += nt_size(NT_S390_VXRS_LOW, sizeof(sa->vxrs_low));
+ size += nt_size(S390_VXRS_HIGH, sa->vxrs_high);
+ size += nt_size(S390_VXRS_LOW, sa->vxrs_low);
}
return size;
@@ -371,7 +355,7 @@ static void *nt_prpsinfo(void *ptr)
memset(&prpsinfo, 0, sizeof(prpsinfo));
prpsinfo.pr_sname = 'R';
strcpy(prpsinfo.pr_fname, "vmlinux");
- return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo));
+ return nt_init(ptr, PRPSINFO, prpsinfo);
}
/*
@@ -610,7 +594,7 @@ static size_t get_elfcorehdr_size(int phdr_count)
/* PT_NOTES */
size += sizeof(Elf64_Phdr);
/* nt_prpsinfo */
- size += nt_size(NT_PRPSINFO, sizeof(struct elf_prpsinfo));
+ size += nt_size(PRPSINFO, struct elf_prpsinfo);
/* regsets */
size += get_cpu_cnt() * get_cpu_elf_notes_size();
/* nt_vmcoreinfo */
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 1ecd0580561f..911b95cd57e5 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -198,13 +198,8 @@ void __noreturn die(struct pt_regs *regs, const char *str)
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
- printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
+ printk("%s: %04x ilc:%d [#%d]", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter);
-#ifdef CONFIG_PREEMPT
- pr_cont("PREEMPT ");
-#elif defined(CONFIG_PREEMPT_RT)
- pr_cont("PREEMPT_RT ");
-#endif
pr_cont("SMP ");
if (debug_pagealloc_enabled())
pr_cont("DEBUG_PAGEALLOC");
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 63ba6306632e..e540b022ceb2 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -266,12 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
+ unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15];
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
+ if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs))
*parent = (unsigned long)&return_to_handler;
}
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 10725f5a6f0f..63875270941b 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -518,7 +518,8 @@ static void paicrypt_have_samples(void)
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event CRYPTO_ALL is allowed.
*/
-static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
/* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, save old values.
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index a8f0bad99cf0..fd14d5ebccbc 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -542,7 +542,8 @@ static void paiext_have_samples(void)
/* Called on schedule-in and schedule-out. No access to event structure,
* but for sampling only event NNPA_ALL is allowed.
*/
-static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
/* We started with a clean page on event installation. So read out
* results on schedule_out and if page was dirty, save old values.
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 5ce9a795a0fe..745649ad9779 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -72,7 +72,7 @@ void notrace stop_machine_yield(const struct cpumask *cpumask)
this_cpu = smp_processor_id();
if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
__this_cpu_write(cpu_relax_retry, 0);
- cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
+ cpu = cpumask_next_wrap(this_cpu, cpumask);
if (cpu >= nr_cpu_ids)
return;
if (arch_vcpu_is_preempted(cpu))
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index e9115b4d8b63..a4569b96ef06 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -469,3 +469,4 @@
464 common getxattrat sys_getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e9f47c3a6197..699a18f1c54e 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -79,12 +79,10 @@ void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
- int cs;
/* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod;
- for (cs = 0; cs < CS_BASES; cs++)
- vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+ vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
return;
@@ -373,7 +371,6 @@ static void clock_sync_global(long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
- int cs;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
@@ -389,10 +386,8 @@ static void clock_sync_global(long delta)
panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
- for (cs = 0; cs < CS_BASES; cs++) {
- vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
- vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
- }
+ vdso_k_time_data->arch_data.tod_steering_end = tod_steering_end;
+ vdso_k_time_data->arch_data.tod_steering_delta = tod_steering_delta;
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 24fee11b030d..b746213d3110 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -285,10 +285,10 @@ static void __init test_monitor_call(void)
return;
asm volatile(
" mc 0,0\n"
- "0: xgr %0,%0\n"
+ "0: lhi %[val],0\n"
"1:\n"
- EX_TABLE(0b,1b)
- : "+d" (val));
+ EX_TABLE(0b, 1b)
+ : [val] "+d" (val));
if (!val)
panic("Monitor call doesn't work!\n");
}
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 9f05df2da2f7..9a5d5be8acf4 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -206,6 +206,39 @@ int uv_convert_from_secure_pte(pte_t pte)
return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
}
+/**
+ * should_export_before_import - Determine whether an export is needed
+ * before an import-like operation
+ * @uvcb: the Ultravisor control block of the UVC to be performed
+ * @mm: the mm of the process
+ *
+ * Returns whether an export is needed before every import-like operation.
+ * This is needed for shared pages, which don't trigger a secure storage
+ * exception when accessed from a different guest.
+ *
+ * Although considered as one, the Unpin Page UVC is not an actual import,
+ * so it is not affected.
+ *
+ * No export is needed also when there is only one protected VM, because the
+ * page cannot belong to the wrong VM in that case (there is no "other VM"
+ * it can belong to).
+ *
+ * Return: true if an export is needed before every import, otherwise false.
+ */
+static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
+{
+ /*
+ * The misc feature indicates, among other things, that importing a
+ * shared page from a different protected VM will automatically also
+ * transfer its ownership.
+ */
+ if (uv_has_feature(BIT_UV_FEAT_MISC))
+ return false;
+ if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
+ return false;
+ return atomic_read(&mm->context.protected_count) > 1;
+}
+
/*
* Calculate the expected ref_count for a folio that would otherwise have no
* further pins. This was cribbed from similar functions in other places in
@@ -228,7 +261,7 @@ static int expected_folio_refs(struct folio *folio)
}
/**
- * make_folio_secure() - make a folio secure
+ * __make_folio_secure() - make a folio secure
* @folio: the folio to make secure
* @uvcb: the uvcb that describes the UVC to be used
*
@@ -237,20 +270,18 @@ static int expected_folio_refs(struct folio *folio)
*
* Return: 0 on success;
* -EBUSY if the folio is in writeback or has too many references;
- * -E2BIG if the folio is large;
* -EAGAIN if the UVC needs to be attempted again;
* -ENXIO if the address is not mapped;
* -EINVAL if the UVC failed for other reasons.
*
* Context: The caller must hold exactly one extra reference on the folio
- * (it's the same logic as split_folio())
+ * (it's the same logic as split_folio()), and the folio must be
+ * locked.
*/
-int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
+static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
{
int expected, cc = 0;
- if (folio_test_large(folio))
- return -E2BIG;
if (folio_test_writeback(folio))
return -EBUSY;
expected = expected_folio_refs(folio) + 1;
@@ -277,7 +308,98 @@ int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
return -EAGAIN;
return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
}
-EXPORT_SYMBOL_GPL(make_folio_secure);
+
+static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct uv_cb_header *uvcb)
+{
+ int rc;
+
+ if (!folio_trylock(folio))
+ return -EAGAIN;
+ if (should_export_before_import(uvcb, mm))
+ uv_convert_from_secure(folio_to_phys(folio));
+ rc = __make_folio_secure(folio, uvcb);
+ folio_unlock(folio);
+
+ return rc;
+}
+
+/**
+ * s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split.
+ * @mm: the mm containing the folio to work on
+ * @folio: the folio
+ * @split: whether to split a large folio
+ *
+ * Context: Must be called while holding an extra reference to the folio;
+ * the mm lock should not be held.
+ * Return: 0 if the folio was split successfully;
+ * -EAGAIN if the folio was not split successfully but another attempt
+ * can be made, or if @split was set to false;
+ * -EINVAL in case of other errors. See split_folio().
+ */
+static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
+{
+ int rc;
+
+ lockdep_assert_not_held(&mm->mmap_lock);
+ folio_wait_writeback(folio);
+ lru_add_drain_all();
+ if (split) {
+ folio_lock(folio);
+ rc = split_folio(folio);
+ folio_unlock(folio);
+
+ if (rc != -EBUSY)
+ return rc;
+ }
+ return -EAGAIN;
+}
+
+int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb)
+{
+ struct vm_area_struct *vma;
+ struct folio_walk fw;
+ struct folio *folio;
+ int rc;
+
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, hva);
+ if (!vma) {
+ mmap_read_unlock(mm);
+ return -EFAULT;
+ }
+ folio = folio_walk_start(&fw, vma, hva, 0);
+ if (!folio) {
+ mmap_read_unlock(mm);
+ return -ENXIO;
+ }
+
+ folio_get(folio);
+ /*
+ * Secure pages cannot be huge and userspace should not combine both.
+ * In case userspace does it anyway this will result in an -EFAULT for
+ * the unpack. The guest is thus never reaching secure mode.
+ * If userspace plays dirty tricks and decides to map huge pages at a
+ * later point in time, it will receive a segmentation fault or
+ * KVM_RUN will return -EFAULT.
+ */
+ if (folio_test_hugetlb(folio))
+ rc = -EFAULT;
+ else if (folio_test_large(folio))
+ rc = -E2BIG;
+ else if (!pte_write(fw.pte) || (pte_val(fw.pte) & _PAGE_INVALID))
+ rc = -ENXIO;
+ else
+ rc = make_folio_secure(mm, folio, uvcb);
+ folio_walk_end(&fw, vma);
+ mmap_read_unlock(mm);
+
+ if (rc == -E2BIG || rc == -EBUSY)
+ rc = s390_wiggle_split_folio(mm, folio, rc == -E2BIG);
+ folio_put(folio);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(make_hva_secure);
/*
* To be called with the folio locked or with an extra reference! This will
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 598b512cde01..70c8f9ad13cd 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -16,8 +16,8 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp.h>
-#include <linux/time_namespace.h>
#include <linux/random.h>
+#include <linux/vdso_datastore.h>
#include <vdso/datapage.h>
#include <asm/vdso/vsyscall.h>
#include <asm/alternative.h>
@@ -26,85 +26,6 @@
extern char vdso64_start[], vdso64_end[];
extern char vdso32_start[], vdso32_end[];
-static struct vm_special_mapping vvar_mapping;
-
-static union vdso_data_store vdso_data_store __page_aligned_data;
-
-struct vdso_data *vdso_data = vdso_data_store.data;
-
-#ifdef CONFIG_TIME_NS
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)(vvar_page);
-}
-
-/*
- * The VVAR page layout depends on whether a task belongs to the root or
- * non-root time namespace. Whenever a task changes its namespace, the VVAR
- * page tables are cleared and then they will be re-faulted with a
- * corresponding layout.
- * See also the comment near timens_setup_vdso_data() for details.
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- VMA_ITERATOR(vmi, mm, 0);
- struct vm_area_struct *vma;
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (!vma_is_special_mapping(vma, &vvar_mapping))
- continue;
- zap_vma_pages(vma);
- break;
- }
- mmap_read_unlock(mm);
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *timens_page = find_timens_vvar_page(vma);
- unsigned long addr, pfn;
- vm_fault_t err;
-
- switch (vmf->pgoff) {
- case VVAR_DATA_PAGE_OFFSET:
- pfn = virt_to_pfn(vdso_data);
- if (timens_page) {
- /*
- * Fault in VVAR page too, since it will be accessed
- * to get clock data anyway.
- */
- addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
- err = vmf_insert_pfn(vma, addr, pfn);
- if (unlikely(err & VM_FAULT_ERROR))
- return err;
- pfn = page_to_pfn(timens_page);
- }
- break;
-#ifdef CONFIG_TIME_NS
- case VVAR_TIMENS_PAGE_OFFSET:
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
- * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (!timens_page)
- return VM_FAULT_SIGBUS;
- pfn = virt_to_pfn(vdso_data);
- break;
-#endif /* CONFIG_TIME_NS */
- default:
- return VM_FAULT_SIGBUS;
- }
- return vmf_insert_pfn(vma, vmf->address, pfn);
-}
-
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
@@ -112,11 +33,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-static struct vm_special_mapping vvar_mapping = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
-
static struct vm_special_mapping vdso64_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
@@ -142,7 +58,7 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
struct vm_area_struct *vma;
int rc;
- BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
+ BUILD_BUG_ON(VDSO_NR_PAGES != __VDSO_PAGES);
if (mmap_write_lock_killable(mm))
return -EINTR;
@@ -157,14 +73,11 @@ static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
rc = vvar_start;
if (IS_ERR_VALUE(vvar_start))
goto out;
- vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
- VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
- VM_PFNMAP,
- &vvar_mapping);
+ vma = vdso_install_vvar_mapping(mm, vvar_start);
rc = PTR_ERR(vma);
if (IS_ERR(vma))
goto out;
- vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
+ vdso_text_start = vvar_start + VDSO_NR_PAGES * PAGE_SIZE;
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
@@ -220,7 +133,7 @@ unsigned long vdso_text_size(void)
unsigned long vdso_size(void)
{
- return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+ return vdso_text_size() + VDSO_NR_PAGES * PAGE_SIZE;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 2c5afb88d298..1e4ddd1a683f 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -2,7 +2,7 @@
# List of files in the vdso
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso32 = vdso_user_wrapper-32.o note-32.o
# Build rules
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
index c916c4f73f76..9630d58c2080 100644
--- a/arch/s390/kernel/vdso32/vdso32.lds.S
+++ b/arch/s390/kernel/vdso32/vdso32.lds.S
@@ -6,16 +6,15 @@
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
OUTPUT_ARCH(s390:31-bit)
SECTIONS
{
- PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
-#ifdef CONFIG_TIME_NS
- PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index ad206f2068d8..d8f0df742809 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -2,7 +2,7 @@
# List of files in the vdso
# Include the generic Makefile to check the built vdso.
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
obj-vdso64 = vdso_user_wrapper.o note.o vgetrandom-chacha.o
obj-cvdso64 = vdso64_generic.o getcpu.o vgetrandom.o
VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index ec42b7d9cb53..e4f6551ae898 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -7,17 +7,15 @@
#include <asm/vdso/vsyscall.h>
#include <asm/page.h>
#include <asm/vdso.h>
+#include <vdso/datapage.h>
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
SECTIONS
{
- PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
- PROVIDE(_vdso_rng_data = _vdso_data + __VDSO_RND_DATA_OFFSET);
-#ifdef CONFIG_TIME_NS
- PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
-#endif
+ VDSO_VVAR_SYMS
+
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/s390/kvm/gmap.c b/arch/s390/kvm/gmap.c
index 02adf151d4de..6d8944d1b4a0 100644
--- a/arch/s390/kvm/gmap.c
+++ b/arch/s390/kvm/gmap.c
@@ -23,116 +23,25 @@
#include "gmap.h"
/**
- * should_export_before_import - Determine whether an export is needed
- * before an import-like operation
- * @uvcb: the Ultravisor control block of the UVC to be performed
- * @mm: the mm of the process
- *
- * Returns whether an export is needed before every import-like operation.
- * This is needed for shared pages, which don't trigger a secure storage
- * exception when accessed from a different guest.
- *
- * Although considered as one, the Unpin Page UVC is not an actual import,
- * so it is not affected.
- *
- * No export is needed also when there is only one protected VM, because the
- * page cannot belong to the wrong VM in that case (there is no "other VM"
- * it can belong to).
- *
- * Return: true if an export is needed before every import, otherwise false.
- */
-static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
-{
- /*
- * The misc feature indicates, among other things, that importing a
- * shared page from a different protected VM will automatically also
- * transfer its ownership.
- */
- if (uv_has_feature(BIT_UV_FEAT_MISC))
- return false;
- if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
- return false;
- return atomic_read(&mm->context.protected_count) > 1;
-}
-
-static int __gmap_make_secure(struct gmap *gmap, struct page *page, void *uvcb)
-{
- struct folio *folio = page_folio(page);
- int rc;
-
- /*
- * Secure pages cannot be huge and userspace should not combine both.
- * In case userspace does it anyway this will result in an -EFAULT for
- * the unpack. The guest is thus never reaching secure mode.
- * If userspace plays dirty tricks and decides to map huge pages at a
- * later point in time, it will receive a segmentation fault or
- * KVM_RUN will return -EFAULT.
- */
- if (folio_test_hugetlb(folio))
- return -EFAULT;
- if (folio_test_large(folio)) {
- mmap_read_unlock(gmap->mm);
- rc = kvm_s390_wiggle_split_folio(gmap->mm, folio, true);
- mmap_read_lock(gmap->mm);
- if (rc)
- return rc;
- folio = page_folio(page);
- }
-
- if (!folio_trylock(folio))
- return -EAGAIN;
- if (should_export_before_import(uvcb, gmap->mm))
- uv_convert_from_secure(folio_to_phys(folio));
- rc = make_folio_secure(folio, uvcb);
- folio_unlock(folio);
-
- /*
- * In theory a race is possible and the folio might have become
- * large again before the folio_trylock() above. In that case, no
- * action is performed and -EAGAIN is returned; the callers will
- * have to try again later.
- * In most cases this implies running the VM again, getting the same
- * exception again, and make another attempt in this function.
- * This is expected to happen extremely rarely.
- */
- if (rc == -E2BIG)
- return -EAGAIN;
- /* The folio has too many references, try to shake some off */
- if (rc == -EBUSY) {
- mmap_read_unlock(gmap->mm);
- kvm_s390_wiggle_split_folio(gmap->mm, folio, false);
- mmap_read_lock(gmap->mm);
- return -EAGAIN;
- }
-
- return rc;
-}
-
-/**
* gmap_make_secure() - make one guest page secure
* @gmap: the guest gmap
* @gaddr: the guest address that needs to be made secure
* @uvcb: the UVCB specifying which operation needs to be performed
*
* Context: needs to be called with kvm->srcu held.
- * Return: 0 on success, < 0 in case of error (see __gmap_make_secure()).
+ * Return: 0 on success, < 0 in case of error.
*/
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
{
struct kvm *kvm = gmap->private;
- struct page *page;
- int rc = 0;
+ unsigned long vmaddr;
lockdep_assert_held(&kvm->srcu);
- page = gfn_to_page(kvm, gpa_to_gfn(gaddr));
- mmap_read_lock(gmap->mm);
- if (page)
- rc = __gmap_make_secure(gmap, page, uvcb);
- kvm_release_page_clean(page);
- mmap_read_unlock(gmap->mm);
-
- return rc;
+ vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr));
+ if (kvm_is_error_hva(vmaddr))
+ return -EFAULT;
+ return make_hva_secure(gmap->mm, vmaddr, uvcb);
}
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 07ff0e10cb7f..0f00f8e85fee 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -3174,8 +3174,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
gi->alert.mask = 0;
spin_lock_init(&gi->alert.ref_lock);
gi->expires = 50 * 1000; /* 50 usec */
- hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- gi->timer.function = gisa_vcpu_kicker;
+ hrtimer_setup(&gi->timer, gisa_vcpu_kicker, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ebecb96bacce..48104e59648b 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -3943,8 +3943,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
if (rc)
return rc;
}
- hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ hrtimer_setup(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vcpu->arch.sie_block->hpid = HPID_KVM;
@@ -4952,6 +4952,7 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
{
unsigned int flags = 0;
unsigned long gaddr;
+ int rc;
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
if (kvm_s390_cur_gmap_fault_is_write())
@@ -4961,16 +4962,6 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
case 0:
vcpu->stat.exit_null++;
break;
- case PGM_NON_SECURE_STORAGE_ACCESS:
- kvm_s390_assert_primary_as(vcpu);
- /*
- * This is normal operation; a page belonging to a protected
- * guest has not been imported yet. Try to import the page into
- * the protected guest.
- */
- if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL)
- send_sig(SIGSEGV, current, 0);
- break;
case PGM_SECURE_STORAGE_ACCESS:
case PGM_SECURE_STORAGE_VIOLATION:
kvm_s390_assert_primary_as(vcpu);
@@ -4995,6 +4986,20 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
send_sig(SIGSEGV, current, 0);
}
break;
+ case PGM_NON_SECURE_STORAGE_ACCESS:
+ kvm_s390_assert_primary_as(vcpu);
+ /*
+ * This is normal operation; a page belonging to a protected
+ * guest has not been imported yet. Try to import the page into
+ * the protected guest.
+ */
+ rc = gmap_convert_to_secure(vcpu->arch.gmap, gaddr);
+ if (rc == -EINVAL)
+ send_sig(SIGSEGV, current, 0);
+ if (rc != -ENXIO)
+ break;
+ flags = FAULT_FLAG_WRITE;
+ fallthrough;
case PGM_PROTECTION:
case PGM_SEGMENT_TRANSLATION:
case PGM_PAGE_TRANSLATION:
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c
index 9b9e7fdd5380..8c40154ff50f 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/pci.c
@@ -433,7 +433,6 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
{
struct zpci_dev *zdev = opaque;
- u8 status;
int rc;
if (!zdev)
@@ -480,13 +479,7 @@ static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
*/
zdev->gisa = (u32)virt_to_phys(&kvm->arch.sie_page2->gisa);
- rc = zpci_enable_device(zdev);
- if (rc)
- goto clear_gisa;
-
- /* Re-register the IOMMU that was already created */
- rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table), &status);
+ rc = zpci_reenable_device(zdev);
if (rc)
goto clear_gisa;
@@ -516,7 +509,6 @@ static void kvm_s390_pci_unregister_kvm(void *opaque)
{
struct zpci_dev *zdev = opaque;
struct kvm *kvm;
- u8 status;
if (!zdev)
return;
@@ -550,12 +542,7 @@ static void kvm_s390_pci_unregister_kvm(void *opaque)
goto out;
}
- if (zpci_enable_device(zdev))
- goto out;
-
- /* Re-register the IOMMU that was already created */
- zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table), &status);
+ zpci_reenable_device(zdev);
out:
spin_lock(&kvm->arch.kzdev_list_lock);
diff --git a/arch/s390/lib/crc32-glue.c b/arch/s390/lib/crc32-glue.c
index 137080e61f90..124214a27340 100644
--- a/arch/s390/lib/crc32-glue.c
+++ b/arch/s390/lib/crc32-glue.c
@@ -62,7 +62,7 @@ static DEFINE_STATIC_KEY_FALSE(have_vxrs);
DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base)
DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base)
-DEFINE_CRC32_VX(crc32c_le_arch, crc32c_le_vgfm_16, crc32c_le_base)
+DEFINE_CRC32_VX(crc32c_arch, crc32c_le_vgfm_16, crc32c_base)
static int __init crc32_s390_init(void)
{
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 94d927785800..d14b488e7a1f 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2626,31 +2626,3 @@ int s390_replace_asce(struct gmap *gmap)
return 0;
}
EXPORT_SYMBOL_GPL(s390_replace_asce);
-
-/**
- * kvm_s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split
- * @mm: the mm containing the folio to work on
- * @folio: the folio
- * @split: whether to split a large folio
- *
- * Context: Must be called while holding an extra reference to the folio;
- * the mm lock should not be held.
- */
-int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split)
-{
- int rc;
-
- lockdep_assert_not_held(&mm->mmap_lock);
- folio_wait_writeback(folio);
- lru_add_drain_all();
- if (split) {
- folio_lock(folio);
- rc = split_folio(folio);
- folio_unlock(folio);
-
- if (rc != -EBUSY)
- return rc;
- }
- return -EAGAIN;
-}
-EXPORT_SYMBOL_GPL(kvm_s390_wiggle_split_folio);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 88f72745fa59..fa879351efb5 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -124,14 +124,13 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
struct zpci_fib fib = {0};
u8 cc;
- WARN_ON_ONCE(iota & 0x3fff);
fib.pba = base;
/* Work around off by one in ISM virt device */
if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
fib.pal = limit + (1 << 12);
else
fib.pal = limit;
- fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
+ fib.iota = iota;
fib.gd = zdev->gisa;
cc = zpci_mod_fc(req, &fib, status);
if (cc)
@@ -690,6 +689,23 @@ int zpci_enable_device(struct zpci_dev *zdev)
}
EXPORT_SYMBOL_GPL(zpci_enable_device);
+int zpci_reenable_device(struct zpci_dev *zdev)
+{
+ u8 status;
+ int rc;
+
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ return rc;
+
+ rc = zpci_iommu_register_ioat(zdev, &status);
+ if (rc)
+ zpci_disable_device(zdev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(zpci_reenable_device);
+
int zpci_disable_device(struct zpci_dev *zdev)
{
u32 fh = zdev->fh;
@@ -739,7 +755,6 @@ EXPORT_SYMBOL_GPL(zpci_disable_device);
*/
int zpci_hot_reset_device(struct zpci_dev *zdev)
{
- u8 status;
int rc;
lockdep_assert_held(&zdev->state_lock);
@@ -758,19 +773,9 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
return rc;
}
- rc = zpci_enable_device(zdev);
- if (rc)
- return rc;
-
- if (zdev->dma_table)
- rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table), &status);
- if (rc) {
- zpci_disable_device(zdev);
- return rc;
- }
+ rc = zpci_reenable_device(zdev);
- return 0;
+ return rc;
}
/**
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 39a481ec4a40..0e725039861f 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -19,6 +19,7 @@
#include <linux/jump_label.h>
#include <linux/pci.h>
#include <linux/printk.h>
+#include <linux/dma-direct.h>
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
@@ -283,10 +284,34 @@ static struct zpci_bus *zpci_bus_alloc(int topo, bool topo_is_tid)
return zbus;
}
+static void pci_dma_range_setup(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct bus_dma_region *map;
+ u64 aligned_end;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return;
+
+ map->cpu_start = 0;
+ map->dma_start = PAGE_ALIGN(zdev->start_dma);
+ aligned_end = PAGE_ALIGN_DOWN(zdev->end_dma + 1);
+ if (aligned_end >= map->dma_start)
+ map->size = aligned_end - map->dma_start;
+ else
+ map->size = 0;
+ WARN_ON_ONCE(map->size == 0);
+
+ pdev->dev.dma_range_map = map;
+}
+
void pcibios_bus_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
+ pci_dma_range_setup(pdev);
+
/*
* With pdev->no_vf_scan the common PCI probing code does not
* perform PF/VF linking.
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 14bf7e8d06b7..27248686e588 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -112,6 +112,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
zdev->version = response->version;
zdev->maxstbl = response->maxstbl;
zdev->dtsm = response->dtsm;
+ zdev->rtr_avail = response->rtr;
switch (response->version) {
case 1:
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 2de1ea6c3a8c..0ecad08e1b1e 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -52,7 +52,6 @@ static DEVICE_ATTR_RO(mio_enabled);
static int _do_recover(struct pci_dev *pdev, struct zpci_dev *zdev)
{
- u8 status;
int ret;
pci_stop_and_remove_bus_device(pdev);
@@ -70,16 +69,8 @@ static int _do_recover(struct pci_dev *pdev, struct zpci_dev *zdev)
return ret;
}
- ret = zpci_enable_device(zdev);
- if (ret)
- return ret;
+ ret = zpci_reenable_device(zdev);
- if (zdev->dma_table) {
- ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(zdev->dma_table), &status);
- if (ret)
- zpci_disable_device(zdev);
- }
return ret;
}
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 78e0e7be57ee..472fdf365cad 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -104,5 +104,3 @@ CONFIG_CRYPTO_LZO=y
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
CONFIG_CRC_ITU_T=y
-CONFIG_CRC7=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index 977ae405a75e..1b1174a07e36 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -195,4 +195,3 @@ CONFIG_CRYPTO_LZO=y
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC16=y
-CONFIG_LIBCRC32C=y
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 99bc0e889287..11ff5fd510de 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -266,4 +266,3 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC16=m
-CONFIG_LIBCRC32C=m
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index cf5eab840d57..0f663ebec700 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -19,7 +19,6 @@
#include <asm/machvec.h>
#include <asm/page.h>
#include <linux/pgtable.h>
-#include <asm-generic/iomap.h>
#define __IO_PREFIX generic
#include <asm/io_generic.h>
@@ -100,7 +99,7 @@ pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
} \
} \
\
-static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
+static inline void pfx##reads##bwlq(const volatile void __iomem *mem, \
void *addr, unsigned int count) \
{ \
volatile type *__addr = addr; \
@@ -114,37 +113,18 @@ static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
-void __raw_writesl(void __iomem *addr, const void *data, int longlen);
-void __raw_readsl(const void __iomem *addr, void *data, int longlen);
+void __raw_writesl(void volatile __iomem *addr, const void *data, int longlen);
+void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
__BUILD_MEMORY_STRING(__raw_, q, u64)
#define ioport_map ioport_map
-#define ioport_unmap ioport_unmap
#define pci_iounmap pci_iounmap
-#define ioread8 ioread8
-#define ioread16 ioread16
-#define ioread16be ioread16be
-#define ioread32 ioread32
-#define ioread32be ioread32be
-
-#define iowrite8 iowrite8
-#define iowrite16 iowrite16
-#define iowrite16be iowrite16be
-#define iowrite32 iowrite32
-#define iowrite32be iowrite32be
-
-#define ioread8_rep ioread8_rep
-#define ioread16_rep ioread16_rep
-#define ioread32_rep ioread32_rep
-
-#define iowrite8_rep iowrite8_rep
-#define iowrite16_rep iowrite16_rep
-#define iowrite32_rep iowrite32_rep
-
#ifdef CONFIG_HAS_IOPORT_MAP
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+
/*
* Slowdown I/O port space accesses for antique hardware.
*/
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index ba917008d63e..7b453592adaf 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -21,10 +21,7 @@ obj-y := head_32.o debugtraps.o dumpstack.o \
syscalls_32.o time.o topology.o traps.o \
traps_32.o unwinder.o
-ifndef CONFIG_GENERIC_IOMAP
-obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
-endif
obj-y += sys_sh32.o
obj-y += cpu/
diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c
deleted file mode 100644
index 0a0dff4e66de..000000000000
--- a/arch/sh/kernel/iomap.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/iomap.c
- *
- * Copyright (C) 2000 Niibe Yutaka
- * Copyright (C) 2005 - 2007 Paul Mundt
- */
-#include <linux/module.h>
-#include <linux/io.h>
-
-unsigned int ioread8(const void __iomem *addr)
-{
- return readb(addr);
-}
-EXPORT_SYMBOL(ioread8);
-
-unsigned int ioread16(const void __iomem *addr)
-{
- return readw(addr);
-}
-EXPORT_SYMBOL(ioread16);
-
-unsigned int ioread16be(const void __iomem *addr)
-{
- return be16_to_cpu(__raw_readw(addr));
-}
-EXPORT_SYMBOL(ioread16be);
-
-unsigned int ioread32(const void __iomem *addr)
-{
- return readl(addr);
-}
-EXPORT_SYMBOL(ioread32);
-
-unsigned int ioread32be(const void __iomem *addr)
-{
- return be32_to_cpu(__raw_readl(addr));
-}
-EXPORT_SYMBOL(ioread32be);
-
-void iowrite8(u8 val, void __iomem *addr)
-{
- writeb(val, addr);
-}
-EXPORT_SYMBOL(iowrite8);
-
-void iowrite16(u16 val, void __iomem *addr)
-{
- writew(val, addr);
-}
-EXPORT_SYMBOL(iowrite16);
-
-void iowrite16be(u16 val, void __iomem *addr)
-{
- __raw_writew(cpu_to_be16(val), addr);
-}
-EXPORT_SYMBOL(iowrite16be);
-
-void iowrite32(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-EXPORT_SYMBOL(iowrite32);
-
-void iowrite32be(u32 val, void __iomem *addr)
-{
- __raw_writel(cpu_to_be32(val), addr);
-}
-EXPORT_SYMBOL(iowrite32be);
-
-/*
- * These are the "repeat MMIO read/write" functions.
- * Note the "__raw" accesses, since we don't want to
- * convert to CPU byte order. We write in "IO byte
- * order" (we also don't have IO barriers).
- */
-static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
-{
- while (--count >= 0) {
- u8 data = __raw_readb(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
-{
- while (--count >= 0) {
- u16 data = __raw_readw(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
-{
- while (--count >= 0) {
- u32 data = __raw_readl(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
-{
- while (--count >= 0) {
- __raw_writeb(*src, addr);
- src++;
- }
-}
-
-static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
-{
- while (--count >= 0) {
- __raw_writew(*src, addr);
- src++;
- }
-}
-
-static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
-{
- while (--count >= 0) {
- __raw_writel(*src, addr);
- src++;
- }
-}
-
-void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insb(addr, dst, count);
-}
-EXPORT_SYMBOL(ioread8_rep);
-
-void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insw(addr, dst, count);
-}
-EXPORT_SYMBOL(ioread16_rep);
-
-void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insl(addr, dst, count);
-}
-EXPORT_SYMBOL(ioread32_rep);
-
-void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsb(addr, src, count);
-}
-EXPORT_SYMBOL(iowrite8_rep);
-
-void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsw(addr, src, count);
-}
-EXPORT_SYMBOL(iowrite16_rep);
-
-void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsl(addr, src, count);
-}
-EXPORT_SYMBOL(iowrite32_rep);
diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c
index c8aff8a20164..915a3dfd9f02 100644
--- a/arch/sh/kernel/ioport.c
+++ b/arch/sh/kernel/ioport.c
@@ -23,8 +23,3 @@ void __iomem *ioport_map(unsigned long port, unsigned int nr)
return (void __iomem *)(port + sh_io_port_base);
}
EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index c8cad33bf250..52a7652fcff6 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -470,3 +470,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index add35c51e017..1563dcc55fd3 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/elf.h>
#include <linux/sched.h>
+#include <linux/sysctl.h>
#include <linux/err.h>
/*
@@ -30,6 +31,18 @@ static int __init vdso_setup(char *s)
}
__setup("vdso=", vdso_setup);
+static const struct ctl_table vdso_table[] = {
+ {
+ .procname = "vdso_enabled",
+ .data = &vdso_enabled,
+ .maxlen = sizeof(vdso_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+};
+
/*
* These symbols are defined by vsyscall.o to mark the bounds
* of the ELF DSO images included therein.
@@ -58,6 +71,14 @@ int __init vsyscall_init(void)
return 0;
}
+static int __init vm_sysctl_init(void)
+{
+ register_sysctl_init("vm", vdso_table);
+ return 0;
+}
+
+fs_initcall(vm_sysctl_init);
+
/* Setup a VMA at program startup for the vsyscall page */
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
diff --git a/arch/sh/lib/io.c b/arch/sh/lib/io.c
index ebcf7c0a7335..dc6345e4c53b 100644
--- a/arch/sh/lib/io.c
+++ b/arch/sh/lib/io.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/io.h>
-void __raw_readsl(const void __iomem *addr, void *datap, int len)
+void __raw_readsl(const volatile void __iomem *addr, void *datap, int len)
{
u32 *data;
@@ -60,7 +60,7 @@ void __raw_readsl(const void __iomem *addr, void *datap, int len)
}
EXPORT_SYMBOL(__raw_readsl);
-void __raw_writesl(void __iomem *addr, const void *data, int len)
+void __raw_writesl(volatile void __iomem *addr, const void *data, int len)
{
if (likely(len != 0)) {
int tmp1;
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 5010164de3e4..f6341b063b01 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -94,4 +94,3 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 0bb0d4da5227..01b2bdfbf9a8 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -230,7 +230,6 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC16=m
-CONFIG_LIBCRC32C=m
CONFIG_VCC=m
CONFIG_PATA_CMD64X=y
CONFIG_IP_PNP=y
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 727f99d333b3..83e45eb6c095 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -512,3 +512,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/sparc/lib/crc32_glue.c b/arch/sparc/lib/crc32_glue.c
index 41076d2b1fd2..a70752c729cf 100644
--- a/arch/sparc/lib/crc32_glue.c
+++ b/arch/sparc/lib/crc32_glue.c
@@ -27,17 +27,17 @@ EXPORT_SYMBOL(crc32_le_arch);
void crc32c_sparc64(u32 *crcp, const u64 *data, size_t len);
-u32 crc32c_le_arch(u32 crc, const u8 *data, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *data, size_t len)
{
size_t n = -(uintptr_t)data & 7;
if (!static_branch_likely(&have_crc32c_opcode))
- return crc32c_le_base(crc, data, len);
+ return crc32c_base(crc, data, len);
if (n) {
/* Data isn't 8-byte aligned. Align it. */
n = min(n, len);
- crc = crc32c_le_base(crc, data, n);
+ crc = crc32c_base(crc, data, n);
data += n;
len -= n;
}
@@ -48,10 +48,10 @@ u32 crc32c_le_arch(u32 crc, const u8 *data, size_t len)
len -= n;
}
if (len)
- crc = crc32c_le_base(crc, data, len);
+ crc = crc32c_base(crc, data, len);
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *data, size_t len)
{
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index 50ec2978cda5..fdc4a8f5a49c 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -22,7 +22,7 @@ targets += $(foreach x, 32 64, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg)
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
+VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 \
-z max-page-size=8192
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
@@ -101,7 +101,6 @@ $(obj)/vdso32.so.dbg: FORCE \
quiet_cmd_vdso = VDSO $@
cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -T $(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(src)/checkundef.sh '$(OBJDUMP)' '$@'
+ -T $(filter %.lds,$^) $(filter %.o,$^)
-VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic
+VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic --no-undefined
diff --git a/arch/sparc/vdso/checkundef.sh b/arch/sparc/vdso/checkundef.sh
deleted file mode 100644
index 2d85876ffc32..000000000000
--- a/arch/sparc/vdso/checkundef.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-objdump="$1"
-file="$2"
-$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1
-if [ $? -eq 1 ]; then
- exit 0
-else
- echo "$file: undefined symbols found" >&2
- exit 1
-fi
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 79ea97d4797e..8be91974e786 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -440,25 +440,24 @@ void __init arch_cpu_finalize_init(void)
os_check_bugs();
}
-void apply_seal_endbr(s32 *start, s32 *end, struct module *mod)
+void apply_seal_endbr(s32 *start, s32 *end)
{
}
-void apply_retpolines(s32 *start, s32 *end, struct module *mod)
+void apply_retpolines(s32 *start, s32 *end)
{
}
-void apply_returns(s32 *start, s32 *end, struct module *mod)
+void apply_returns(s32 *start, s32 *end)
{
}
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
- s32 *start_cfi, s32 *end_cfi, struct module *mod)
+ s32 *start_cfi, s32 *end_cfi)
{
}
-void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
- struct module *mod)
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
}
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index cf0ad89f5639..f7fb3d88c57b 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,4 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
+
+# Branch profiling isn't noinstr-safe. Disable it for arch/x86/*
+subdir-ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += coco/
obj-y += entry/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index be2c311f5118..9427b5292ca2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -77,7 +77,8 @@ config X86
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CPU_PASID if IOMMU_SVA
select ARCH_HAS_CRC32
- select ARCH_HAS_CRC_T10DIF if X86_64
+ select ARCH_HAS_CRC64 if X86_64
+ select ARCH_HAS_CRC_T10DIF
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
@@ -85,6 +86,7 @@ config X86
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_EXECMEM_ROX if X86_64
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
@@ -132,7 +134,7 @@ config X86
select ARCH_SUPPORTS_AUTOFDO_CLANG
select ARCH_SUPPORTS_PROPELLER_CLANG if X86_64
select ARCH_USE_BUILTIN_BSWAP
- select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64
+ select ARCH_USE_CMPXCHG_LOCKREF if X86_CX8
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
@@ -178,6 +180,7 @@ config X86
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY
+ select GENERIC_VDSO_DATA_STORE
select GENERIC_VDSO_TIME_NS
select GENERIC_VDSO_OVERFLOW_PROTECT
select GUP_GET_PXX_LOW_HIGH if X86_PAE
@@ -232,7 +235,7 @@ config X86
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
- select HAVE_EISA
+ select HAVE_EISA if X86_32
select HAVE_EXIT_THREAD
select HAVE_GUP_FAST
select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE
@@ -277,7 +280,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
+ select MMU_GATHER_RCU_TABLE_FREE
select MMU_GATHER_MERGE_VMAS
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_REGS_AND_STACK_ACCESS_API
@@ -285,7 +288,7 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_SETUP_PER_CPU_AREA
select HAVE_SOFTIRQ_ON_OWN_STACK
- select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
+ select HAVE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if HAVE_OBJTOOL
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
@@ -426,15 +429,6 @@ config PGTABLE_LEVELS
default 3 if X86_PAE
default 2
-config CC_HAS_SANE_STACKPROTECTOR
- bool
- default $(success,$(srctree)/scripts/gcc-x86_64-has-stack-protector.sh $(CC) $(CLANG_FLAGS)) if 64BIT
- default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC) $(CLANG_FLAGS))
- help
- We have to make sure stack protector is unconditionally disabled if
- the compiler produces broken code or if it does not let us control
- the segment on 32-bit kernels.
-
menu "Processor type and features"
config SMP
@@ -505,6 +499,7 @@ config X86_CPU_RESCTRL
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
select PROC_CPU_RESCTRL if PROC_FS
+ select RESCTRL_FS_PSEUDO_LOCK
help
Enable x86 CPU resource control support.
@@ -521,6 +516,12 @@ config X86_CPU_RESCTRL
Say N if unsure.
+config RESCTRL_FS_PSEUDO_LOCK
+ bool
+ help
+ Software mechanism to pin data in a cache portion using
+ micro-architecture specific knowledge.
+
config X86_FRED
bool "Flexible Return and Event Delivery"
depends on X86_64
@@ -530,12 +531,6 @@ config X86_FRED
ring transitions and exception/interrupt handling if the
system supports it.
-config X86_BIGSMP
- bool "Support for big SMP systems with more than 8 CPUs"
- depends on SMP && X86_32
- help
- This option is needed for the systems that have more than 8 CPUs.
-
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
default y
@@ -553,13 +548,12 @@ config X86_EXTENDED_PLATFORM
AMD Elan
RDC R-321x SoC
SGI 320/540 (Visual Workstation)
- STA2X11-based (e.g. Northville)
- Moorestown MID devices
64-bit platforms (CONFIG_64BIT=y):
Numascale NumaChip
ScaleMP vSMP
SGI Ultraviolet
+ Merrifield/Moorefield MID devices
If you have one of these systems, or if you want to build a
generic distribution kernel, say Y here - otherwise say N.
@@ -604,8 +598,31 @@ config X86_UV
This option is needed in order to support SGI Ultraviolet systems.
If you don't have one of these, you should say N here.
-# Following is an alphabetically sorted list of 32 bit extended platforms
-# Please maintain the alphabetic order if and when there are additions
+config X86_INTEL_MID
+ bool "Intel Z34xx/Z35xx MID platform support"
+ depends on X86_EXTENDED_PLATFORM
+ depends on X86_PLATFORM_DEVICES
+ depends on PCI
+ depends on X86_64 || (EXPERT && PCI_GOANY)
+ depends on X86_IO_APIC
+ select I2C
+ select DW_APB_TIMER
+ select INTEL_SCU_PCI
+ help
+ Select to build a kernel capable of supporting 64-bit Intel MID
+ (Mobile Internet Device) platform systems which do not have
+ the PCI legacy interfaces.
+
+ The only supported devices are the 22nm Merrified (Z34xx)
+ and Moorefield (Z35xx) SoC used in the Intel Edison board and
+ a small number of Android devices such as the Asus Zenfone 2,
+ Asus FonePad 8 and Dell Venue 7.
+
+ If you are building for a PC class system or non-MID tablet
+ SoCs like Bay Trail (Z36xx/Z37xx), say N here.
+
+ Intel MID platforms are based on an Intel processor and chipset which
+ consume less power than most of the x86 derivatives.
config X86_GOLDFISH
bool "Goldfish (Virtual Platform)"
@@ -615,6 +632,9 @@ config X86_GOLDFISH
for Android development. Unless you are building for the Android
Goldfish emulator say N here.
+# Following is an alphabetically sorted list of 32 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
+
config X86_INTEL_CE
bool "CE4100 TV platform"
depends on PCI
@@ -630,24 +650,6 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_INTEL_MID
- bool "Intel MID platform support"
- depends on X86_EXTENDED_PLATFORM
- depends on X86_PLATFORM_DEVICES
- depends on PCI
- depends on X86_64 || (PCI_GOANY && X86_32)
- depends on X86_IO_APIC
- select I2C
- select DW_APB_TIMER
- select INTEL_SCU_PCI
- help
- Select to build a kernel capable of supporting Intel MID (Mobile
- Internet Device) platform systems which do not have the PCI legacy
- interfaces. If you are building for a PC class system say N here.
-
- Intel MID platforms are based on an Intel processor and chipset which
- consume less power than most of the x86 derivatives.
-
config X86_INTEL_QUARK
bool "Intel Quark platform support"
depends on X86_32
@@ -729,18 +731,6 @@ config X86_RDC321X
as R-8610-(G).
If you don't have one of these chips, you should say N here.
-config X86_32_NON_STANDARD
- bool "Support non-standard 32-bit SMP architectures"
- depends on X86_32 && SMP
- depends on X86_EXTENDED_PLATFORM
- help
- This option compiles in the bigsmp and STA2X11 default
- subarchitectures. It is intended for a generic binary
- kernel. If you select them all, kernel will probe it one by
- one and will fallback to default.
-
-# Alphabetically sorted list of Non standard 32 bit platforms
-
config X86_SUPPORTS_MEMORY_FAILURE
def_bool y
# MCE code calls memory_failure():
@@ -750,19 +740,6 @@ config X86_SUPPORTS_MEMORY_FAILURE
depends on X86_64 || !SPARSEMEM
select ARCH_SUPPORTS_MEMORY_FAILURE
-config STA2X11
- bool "STA2X11 Companion Chip Support"
- depends on X86_32_NON_STANDARD && PCI
- select SWIOTLB
- select MFD_STA2X11
- select GPIOLIB
- help
- This adds support for boards based on the STA2X11 IO-Hub,
- a.k.a. "ConneXt". The chip is used in place of the standard
- PC chipset, so all "standard" peripherals are missing. If this
- option is selected the kernel will still be able to boot on
- standard PC machines.
-
config X86_32_IRIS
tristate "Eurobraille/Iris poweroff module"
depends on X86_32
@@ -1012,8 +989,7 @@ config NR_CPUS_RANGE_BEGIN
config NR_CPUS_RANGE_END
int
depends on X86_32
- default 64 if SMP && X86_BIGSMP
- default 8 if SMP && !X86_BIGSMP
+ default 8 if SMP
default 1 if !SMP
config NR_CPUS_RANGE_END
@@ -1026,7 +1002,6 @@ config NR_CPUS_RANGE_END
config NR_CPUS_DEFAULT
int
depends on X86_32
- default 32 if X86_BIGSMP
default 8 if SMP
default 1 if !SMP
@@ -1102,7 +1077,7 @@ config UP_LATE_INIT
config X86_UP_APIC
bool "Local APIC support on uniprocessors" if !PCI_MSI
default PCI_MSI
- depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+ depends on X86_32 && !SMP
help
A local APIC (Advanced Programmable Interrupt Controller) is an
integrated interrupt controller in the CPU. If you have a single-CPU
@@ -1127,7 +1102,7 @@ config X86_UP_IOAPIC
config X86_LOCAL_APIC
def_bool y
- depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
+ depends on X86_64 || SMP || X86_UP_APIC || PCI_MSI
select IRQ_DOMAIN_HIERARCHY
config ACPI_MADT_WAKEUP
@@ -1341,6 +1316,7 @@ config X86_REBOOTFIXUPS
config MICROCODE
def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
+ select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
config MICROCODE_INITRD32
def_bool y
@@ -1395,15 +1371,11 @@ config X86_CPUID
with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
/dev/cpu/31/cpuid.
-choice
- prompt "High Memory Support"
- default HIGHMEM4G
+config HIGHMEM4G
+ bool "High Memory Support"
depends on X86_32
-
-config NOHIGHMEM
- bool "off"
help
- Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ Linux can use up to 4 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
Gigabytes large. That means that, if you have a large amount of
physical memory, not all of it can be "permanently mapped" by the
@@ -1419,38 +1391,9 @@ config NOHIGHMEM
possible.
If the machine has between 1 and 4 Gigabytes physical RAM, then
- answer "4GB" here.
-
- If more than 4 Gigabytes is used then answer "64GB" here. This
- selection turns Intel PAE (Physical Address Extension) mode on.
- PAE implements 3-level paging on IA32 processors. PAE is fully
- supported by Linux, PAE mode is implemented on all recent Intel
- processors (Pentium Pro and better). NOTE: If you say "64GB" here,
- then the kernel will not boot on CPUs that don't support PAE!
-
- The actual amount of total physical memory will either be
- auto detected or can be forced by using a kernel command line option
- such as "mem=256M". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
-
- If unsure, say "off".
-
-config HIGHMEM4G
- bool "4GB"
- help
- Select this if you have a 32-bit processor and between 1 and 4
- gigabytes of physical RAM.
-
-config HIGHMEM64G
- bool "64GB"
- depends on X86_HAVE_PAE
- select X86_PAE
- help
- Select this if you have a 32-bit processor and more than 4
- gigabytes of physical RAM.
+ answer "Y" here.
-endchoice
+ If unsure, say N.
choice
prompt "Memory split" if EXPERT
@@ -1496,14 +1439,12 @@ config PAGE_OFFSET
depends on X86_32
config HIGHMEM
- def_bool y
- depends on X86_32 && (HIGHMEM64G || HIGHMEM4G)
+ def_bool HIGHMEM4G
config X86_PAE
bool "PAE (Physical Address Extension) Support"
depends on X86_32 && X86_HAVE_PAE
select PHYS_ADDR_T_64BIT
- select SWIOTLB
help
PAE is required for NX support, and furthermore enables
larger swapspace support for non-overcommit purposes. It
@@ -1573,8 +1514,7 @@ config AMD_MEM_ENCRYPT
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
depends on SMP
- depends on X86_64 || (X86_32 && HIGHMEM64G && X86_BIGSMP)
- default y if X86_BIGSMP
+ depends on X86_64
select USE_PERCPU_NUMA_NODE_ID
select OF_NUMA if OF
help
@@ -1587,9 +1527,6 @@ config NUMA
For 64-bit this is recommended if the system is Intel Core i7
(or later), AMD Opteron, or EM64T NUMA.
- For 32-bit this is only needed if you boot a 32-bit
- kernel on a 64-bit NUMA platform.
-
Otherwise, you should say N.
config AMD_NUMA
@@ -1628,7 +1565,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD
+ depends on X86_64 || NUMA || X86_32
select SPARSEMEM_STATIC if X86_32
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
@@ -1674,15 +1611,6 @@ config X86_PMEM_LEGACY
Say Y if unsure.
-config HIGHPTE
- bool "Allocate 3rd-level pagetables from highmem"
- depends on HIGHMEM
- help
- The VM uses one page table entry for each page of physical memory.
- For systems with a lot of RAM, this can be wasteful of precious
- low memory. Setting this option will put user-space page table
- entries in high memory.
-
config X86_CHECK_BIOS_CORRUPTION
bool "Check for low memory corruption"
help
@@ -2450,18 +2378,20 @@ config CC_HAS_NAMED_AS
def_bool $(success,echo 'int __seg_fs fs; int __seg_gs gs;' | $(CC) -x c - -S -o /dev/null)
depends on CC_IS_GCC
+#
+# -fsanitize=kernel-address (KASAN) and -fsanitize=thread (KCSAN)
+# are incompatible with named address spaces with GCC < 13.3
+# (see GCC PR sanitizer/111736 and also PR sanitizer/115172).
+#
+
config CC_HAS_NAMED_AS_FIXED_SANITIZERS
- def_bool CC_IS_GCC && GCC_VERSION >= 130300
+ def_bool y
+ depends on !(KASAN || KCSAN) || GCC_VERSION >= 130300
+ depends on !(UBSAN_BOOL && KASAN) || GCC_VERSION >= 140200
config USE_X86_SEG_SUPPORT
- def_bool y
- depends on CC_HAS_NAMED_AS
- #
- # -fsanitize=kernel-address (KASAN) and -fsanitize=thread
- # (KCSAN) are incompatible with named address spaces with
- # GCC < 13.3 - see GCC PR sanitizer/111736.
- #
- depends on !(KASAN || KCSAN) || CC_HAS_NAMED_AS_FIXED_SANITIZERS
+ def_bool CC_HAS_NAMED_AS
+ depends on CC_HAS_NAMED_AS_FIXED_SANITIZERS
config CC_HAS_SLS
def_bool $(cc-option,-mharden-sls=all)
@@ -2472,6 +2402,10 @@ config CC_HAS_RETURN_THUNK
config CC_HAS_ENTRY_PADDING
def_bool $(cc-option,-fpatchable-function-entry=16,16)
+config CC_HAS_KCFI_ARITY
+ def_bool $(cc-option,-fsanitize=kcfi -fsanitize-kcfi-arity)
+ depends on CC_IS_CLANG && !RUST
+
config FUNCTION_PADDING_CFI
int
default 59 if FUNCTION_ALIGNMENT_64B
@@ -2497,6 +2431,10 @@ config FINEIBT
depends on X86_KERNEL_IBT && CFI_CLANG && MITIGATION_RETPOLINE
select CALL_PADDING
+config FINEIBT_BHI
+ def_bool y
+ depends on FINEIBT && CC_HAS_KCFI_ARITY
+
config HAVE_CALL_THUNKS
def_bool y
depends on CC_HAS_ENTRY_PADDING && MITIGATION_RETHUNK && OBJTOOL
@@ -3201,4 +3139,6 @@ config HAVE_ATOMIC_IOMAP
source "arch/x86/kvm/Kconfig"
+source "arch/x86/Kconfig.cpufeatures"
+
source "arch/x86/Kconfig.assembler"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 2a7279d80460..753b8763abae 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Put here option for CPU selection and depending optimization
choice
- prompt "Processor family"
- default M686 if X86_32
- default GENERIC_CPU if X86_64
+ prompt "x86-32 Processor family"
+ depends on X86_32
+ default M686
help
This is the processor type of your CPU. This information is
used for optimizing purposes. In order to compile a kernel
@@ -31,7 +31,6 @@ choice
- "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
- "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
- "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
- - "Opteron/Athlon64/Hammer/K8" for all K8 and newer AMD CPUs.
- "Crusoe" for the Transmeta Crusoe series.
- "Efficeon" for the Transmeta Efficeon series.
- "Winchip-C6" for original IDT Winchip.
@@ -42,13 +41,10 @@ choice
- "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
- "VIA C3-2" for VIA C3-2 "Nehemiah" (model 9 and above).
- "VIA C7" for VIA C7.
- - "Intel P4" for the Pentium 4/Netburst microarchitecture.
- - "Core 2/newer Xeon" for all core2 and newer Intel CPUs.
- "Intel Atom" for the Atom-microarchitecture CPUs.
- - "Generic-x86-64" for a kernel which runs on any x86-64 CPU.
See each option's help text for additional details. If you don't know
- what to do, choose "486".
+ what to do, choose "Pentium-Pro".
config M486SX
bool "486SX"
@@ -114,11 +110,11 @@ config MPENTIUMIII
extensions.
config MPENTIUMM
- bool "Pentium M"
+ bool "Pentium M/Pentium Dual Core/Core Solo/Core Duo"
depends on X86_32
help
Select this for Intel Pentium M (not Pentium-4 M)
- notebook chips.
+ "Merom" Core Solo/Duo notebook chips
config MPENTIUM4
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
@@ -139,22 +135,10 @@ config MPENTIUM4
-Mobile Pentium 4
-Mobile Pentium 4 M
-Extreme Edition (Gallatin)
- -Prescott
- -Prescott 2M
- -Cedar Mill
- -Presler
- -Smithfiled
Xeons (Intel Xeon, Xeon MP, Xeon LV, Xeon MV) corename:
-Foster
-Prestonia
-Gallatin
- -Nocona
- -Irwindale
- -Cranford
- -Potomac
- -Paxville
- -Dempsey
-
config MK6
bool "K6/K6-II/K6-III"
@@ -172,13 +156,6 @@ config MK7
some extended instructions, and passes appropriate optimization
flags to GCC.
-config MK8
- bool "Opteron/Athlon64/Hammer/K8"
- help
- Select this for an AMD Opteron or Athlon64 Hammer-family processor.
- Enables use of some extended instructions, and passes appropriate
- optimization flags to GCC.
-
config MCRUSOE
bool "Crusoe"
depends on X86_32
@@ -258,42 +235,14 @@ config MVIAC7
Select this for a VIA C7. Selecting this uses the correct cache
shift and tells gcc to treat the CPU as a 686.
-config MPSC
- bool "Intel P4 / older Netburst based Xeon"
- depends on X86_64
- help
- Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
- Xeon CPUs with Intel 64bit which is compatible with x86-64.
- Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
- Netburst core and shouldn't use this option. You can distinguish them
- using the cpu family field
- in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
-config MCORE2
- bool "Core 2/newer Xeon"
- help
-
- Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
- 53xx) CPUs. You can distinguish newer from older Xeons by the CPU
- family in /proc/cpuinfo. Newer ones have 6 and older ones 15
- (not a typo)
-
config MATOM
bool "Intel Atom"
help
-
Select this for the Intel Atom platform. Intel Atom CPUs have an
in-order pipelining architecture and thus can benefit from
accordingly optimized code. Use a recent GCC with specific Atom
support in order to fully benefit from selecting this option.
-config GENERIC_CPU
- bool "Generic-x86-64"
- depends on X86_64
- help
- Generic x86-64 CPU.
- Run equally well on all x86-64 CPUs.
-
endchoice
config X86_GENERIC
@@ -317,8 +266,8 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_L1_CACHE_SHIFT
int
- default "7" if MPENTIUM4 || MPSC
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ default "7" if MPENTIUM4
+ default "6" if MK7 || MPENTIUMM || MATOM || MVIAC7 || X86_GENERIC || X86_64
default "4" if MELAN || M486SX || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
@@ -336,51 +285,35 @@ config X86_ALIGNMENT_16
config X86_INTEL_USERCOPY
def_bool y
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK7 || MEFFICEON
config X86_USE_PPRO_CHECKSUM
def_bool y
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-
-#
-# P6_NOPs are a relatively minor optimization that require a family >=
-# 6 processor, except that it is broken on certain VIA chips.
-# Furthermore, AMD chips prefer a totally different sequence of NOPs
-# (which work on all CPUs). In addition, it looks like Virtual PC
-# does not understand them.
-#
-# As a result, disallow these if we're not compiling for X86_64 (these
-# NOPs do work on all x86-64 capable chips); the list of processors in
-# the right-hand clause are the cores that benefit from this optimization.
-#
-config X86_P6_NOP
- def_bool y
- depends on X86_64
- depends on (MCORE2 || MPENTIUM4 || MPSC)
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MATOM
config X86_TSC
def_bool y
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MATOM) || X86_64
config X86_HAVE_PAE
def_bool y
- depends on MCRUSOE || MEFFICEON || MCYRIXIII || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC7 || MCORE2 || MATOM || X86_64
+ depends on MCRUSOE || MEFFICEON || MCYRIXIII || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC7 || MATOM || X86_64
-config X86_CMPXCHG64
+config X86_CX8
def_bool y
- depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7
+ depends on X86_HAVE_PAE || M586TSC || M586MMX || MK6 || MK7 || MGEODEGX1 || MGEODE_LX
# this should be set for all -march=.. options where the compiler
# generates cmov.
config X86_CMOV
def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || MATOM || MGEODE_LX || X86_64)
config X86_MINIMUM_CPU_FAMILY
int
default "64" if X86_64
- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8)
- default "5" if X86_32 && X86_CMPXCHG64
+ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MK7)
+ default "5" if X86_32 && X86_CX8
default "4"
config X86_DEBUGCTLMSR
@@ -401,6 +334,10 @@ menuconfig PROCESSOR_SELECT
This lets you choose what x86 vendor support code your kernel
will include.
+config BROADCAST_TLB_FLUSH
+ def_bool y
+ depends on CPU_SUP_AMD && 64BIT
+
config CPU_SUP_INTEL
default y
bool "Support Intel processors" if PROCESSOR_SELECT
diff --git a/arch/x86/Kconfig.cpufeatures b/arch/x86/Kconfig.cpufeatures
new file mode 100644
index 000000000000..e12d5b7e39a2
--- /dev/null
+++ b/arch/x86/Kconfig.cpufeatures
@@ -0,0 +1,201 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# x86 feature bits (see arch/x86/include/asm/cpufeatures.h) that are
+# either REQUIRED to be enabled, or DISABLED (always ignored) for this
+# particular compile-time configuration. The tests for these features
+# are turned into compile-time constants via the generated
+# <asm/cpufeaturemasks.h>.
+#
+# The naming of these variables *must* match asm/cpufeatures.h, e.g.,
+# X86_FEATURE_ALWAYS <==> X86_REQUIRED_FEATURE_ALWAYS
+# X86_FEATURE_FRED <==> X86_DISABLED_FEATURE_FRED
+#
+# And these REQUIRED and DISABLED config options are manipulated in an
+# AWK script as the following example:
+#
+# +----------------------+
+# | X86_FRED = y ? |
+# +----------------------+
+# / \
+# Y / \ N
+# +-------------------------------------+ +-------------------------------+
+# | X86_DISABLED_FEATURE_FRED undefined | | X86_DISABLED_FEATURE_FRED = y |
+# +-------------------------------------+ +-------------------------------+
+# |
+# |
+# +-------------------------------------------+ |
+# | X86_FEATURE_FRED: feature word 12, bit 17 | ---->|
+# +-------------------------------------------+ |
+# |
+# |
+# +-------------------------------+
+# | set bit 17 of DISABLED_MASK12 |
+# +-------------------------------+
+#
+
+config X86_REQUIRED_FEATURE_ALWAYS
+ def_bool y
+
+config X86_REQUIRED_FEATURE_NOPL
+ def_bool y
+ depends on X86_64 || X86_P6_NOP
+
+config X86_REQUIRED_FEATURE_CX8
+ def_bool y
+ depends on X86_CX8
+
+# this should be set for all -march=.. options where the compiler
+# generates cmov.
+config X86_REQUIRED_FEATURE_CMOV
+ def_bool y
+ depends on X86_CMOV
+
+# this should be set for all -march= options where the compiler
+# generates movbe.
+config X86_REQUIRED_FEATURE_MOVBE
+ def_bool y
+ depends on MATOM
+
+config X86_REQUIRED_FEATURE_CPUID
+ def_bool y
+ depends on X86_64
+
+config X86_REQUIRED_FEATURE_UP
+ def_bool y
+ depends on !SMP
+
+config X86_REQUIRED_FEATURE_FPU
+ def_bool y
+ depends on !MATH_EMULATION
+
+config X86_REQUIRED_FEATURE_PAE
+ def_bool y
+ depends on X86_64 || X86_PAE
+
+config X86_REQUIRED_FEATURE_PSE
+ def_bool y
+ depends on X86_64 && !PARAVIRT_XXL
+
+config X86_REQUIRED_FEATURE_PGE
+ def_bool y
+ depends on X86_64 && !PARAVIRT_XXL
+
+config X86_REQUIRED_FEATURE_MSR
+ def_bool y
+ depends on X86_64
+
+config X86_REQUIRED_FEATURE_FXSR
+ def_bool y
+ depends on X86_64
+
+config X86_REQUIRED_FEATURE_XMM
+ def_bool y
+ depends on X86_64
+
+config X86_REQUIRED_FEATURE_XMM2
+ def_bool y
+ depends on X86_64
+
+config X86_REQUIRED_FEATURE_LM
+ def_bool y
+ depends on X86_64
+
+config X86_DISABLED_FEATURE_UMIP
+ def_bool y
+ depends on !X86_UMIP
+
+config X86_DISABLED_FEATURE_VME
+ def_bool y
+ depends on X86_64
+
+config X86_DISABLED_FEATURE_K6_MTRR
+ def_bool y
+ depends on X86_64
+
+config X86_DISABLED_FEATURE_CYRIX_ARR
+ def_bool y
+ depends on X86_64
+
+config X86_DISABLED_FEATURE_CENTAUR_MCR
+ def_bool y
+ depends on X86_64
+
+config X86_DISABLED_FEATURE_PCID
+ def_bool y
+ depends on !X86_64
+
+config X86_DISABLED_FEATURE_PKU
+ def_bool y
+ depends on !X86_INTEL_MEMORY_PROTECTION_KEYS
+
+config X86_DISABLED_FEATURE_OSPKE
+ def_bool y
+ depends on !X86_INTEL_MEMORY_PROTECTION_KEYS
+
+config X86_DISABLED_FEATURE_LA57
+ def_bool y
+ depends on !X86_5LEVEL
+
+config X86_DISABLED_FEATURE_PTI
+ def_bool y
+ depends on !MITIGATION_PAGE_TABLE_ISOLATION
+
+config X86_DISABLED_FEATURE_RETPOLINE
+ def_bool y
+ depends on !MITIGATION_RETPOLINE
+
+config X86_DISABLED_FEATURE_RETPOLINE_LFENCE
+ def_bool y
+ depends on !MITIGATION_RETPOLINE
+
+config X86_DISABLED_FEATURE_RETHUNK
+ def_bool y
+ depends on !MITIGATION_RETHUNK
+
+config X86_DISABLED_FEATURE_UNRET
+ def_bool y
+ depends on !MITIGATION_UNRET_ENTRY
+
+config X86_DISABLED_FEATURE_CALL_DEPTH
+ def_bool y
+ depends on !MITIGATION_CALL_DEPTH_TRACKING
+
+config X86_DISABLED_FEATURE_LAM
+ def_bool y
+ depends on !ADDRESS_MASKING
+
+config X86_DISABLED_FEATURE_ENQCMD
+ def_bool y
+ depends on !INTEL_IOMMU_SVM
+
+config X86_DISABLED_FEATURE_SGX
+ def_bool y
+ depends on !X86_SGX
+
+config X86_DISABLED_FEATURE_XENPV
+ def_bool y
+ depends on !XEN_PV
+
+config X86_DISABLED_FEATURE_TDX_GUEST
+ def_bool y
+ depends on !INTEL_TDX_GUEST
+
+config X86_DISABLED_FEATURE_USER_SHSTK
+ def_bool y
+ depends on !X86_USER_SHADOW_STACK
+
+config X86_DISABLED_FEATURE_IBT
+ def_bool y
+ depends on !X86_KERNEL_IBT
+
+config X86_DISABLED_FEATURE_FRED
+ def_bool y
+ depends on !X86_FRED
+
+config X86_DISABLED_FEATURE_SEV_SNP
+ def_bool y
+ depends on !KVM_AMD_SEV
+
+config X86_DISABLED_FEATURE_INVLPGB
+ def_bool y
+ depends on !BROADCAST_TLB_FLUSH
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5b773b34768d..0fc7e8fd1a2e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -137,17 +137,12 @@ ifeq ($(CONFIG_X86_32),y)
include $(srctree)/arch/x86/Makefile_32.cpu
KBUILD_CFLAGS += $(cflags-y)
- # temporary until string.h is fixed
+ ifneq ($(call clang-min-version, 160000),y)
+ # https://github.com/llvm/llvm-project/issues/53645
KBUILD_CFLAGS += -ffreestanding
-
- ifeq ($(CONFIG_STACKPROTECTOR),y)
- ifeq ($(CONFIG_SMP),y)
- KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \
- -mstack-protector-guard-symbol=__ref_stack_chk_guard
- else
- KBUILD_CFLAGS += -mstack-protector-guard=global
- endif
endif
+
+ percpu_seg := fs
else
BITS := 64
UTS_MACHINE := x86_64
@@ -178,25 +173,24 @@ else
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
- # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
- cflags-$(CONFIG_MK8) += -march=k8
- cflags-$(CONFIG_MPSC) += -march=nocona
- cflags-$(CONFIG_MCORE2) += -march=core2
- cflags-$(CONFIG_MATOM) += -march=atom
- cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic
- KBUILD_CFLAGS += $(cflags-y)
-
- rustflags-$(CONFIG_MK8) += -Ctarget-cpu=k8
- rustflags-$(CONFIG_MPSC) += -Ctarget-cpu=nocona
- rustflags-$(CONFIG_MCORE2) += -Ctarget-cpu=core2
- rustflags-$(CONFIG_MATOM) += -Ctarget-cpu=atom
- rustflags-$(CONFIG_GENERIC_CPU) += -Ztune-cpu=generic
- KBUILD_RUSTFLAGS += $(rustflags-y)
+ KBUILD_CFLAGS += -march=x86-64 -mtune=generic
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=x86-64 -Ztune-cpu=generic
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
KBUILD_RUSTFLAGS += -Cno-redzone=y
KBUILD_RUSTFLAGS += -Ccode-model=kernel
+
+ percpu_seg := gs
+endif
+
+ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifeq ($(CONFIG_SMP),y)
+ KBUILD_CFLAGS += -mstack-protector-guard-reg=$(percpu_seg)
+ KBUILD_CFLAGS += -mstack-protector-guard-symbol=__ref_stack_chk_guard
+ else
+ KBUILD_CFLAGS += -mstack-protector-guard=global
+ endif
endif
#
@@ -277,6 +271,21 @@ archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
###
+# <asm/cpufeaturemasks.h> header generation
+
+cpufeaturemasks.hdr := arch/x86/include/generated/asm/cpufeaturemasks.h
+cpufeaturemasks.awk := $(srctree)/arch/x86/tools/cpufeaturemasks.awk
+cpufeatures_hdr := $(srctree)/arch/x86/include/asm/cpufeatures.h
+targets += $(cpufeaturemasks.hdr)
+quiet_cmd_gen_featuremasks = GEN $@
+ cmd_gen_featuremasks = $(AWK) -f $(cpufeaturemasks.awk) $(cpufeatures_hdr) $(KCONFIG_CONFIG) > $@
+
+$(cpufeaturemasks.hdr): $(cpufeaturemasks.awk) $(cpufeatures_hdr) $(KCONFIG_CONFIG) FORCE
+ $(shell mkdir -p $(dir $@))
+ $(call if_changed,gen_featuremasks)
+archprepare: $(cpufeaturemasks.hdr)
+
+###
# Kernel objects
libs-y += arch/x86/lib/
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 94834c4b5e5e..af7de9a42752 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -24,7 +24,6 @@ cflags-$(CONFIG_MK6) += -march=k6
# Please note, that patches that add -march=athlon-xp and friends are pointless.
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
-cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
@@ -32,9 +31,7 @@ cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
-cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
-cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+cflags-$(CONFIG_MATOM) += -march=atom
# AMD Elan support
cflags-$(CONFIG_MELAN) += -march=i486
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 1189be057ebd..070ef534c915 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -12,3 +12,4 @@ fdimage
mtools.conf
image.iso
hdimage
+tools/
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 9cc0ff6e9067..8589471b65a1 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -35,7 +35,6 @@ setup-y += video-vesa.o
setup-y += video-bios.o
targets += $(setup-y)
-hostprogs := tools/build
hostprogs += mkcpustr
HOST_EXTRACFLAGS += -I$(srctree)/tools/include \
@@ -61,11 +60,9 @@ KBUILD_CFLAGS += $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
quiet_cmd_image = BUILD $@
-silent_redirect_image = >/dev/null
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
- $(obj)/zoffset.h $@ $($(quiet)redirect_image)
+ cmd_image = cp $< $@; truncate -s %4K $@; cat $(obj)/vmlinux.bin >>$@
-$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
+$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin FORCE
$(call if_changed,image)
@$(kecho) 'Kernel: $@ is ready' ' (#'$(or $(KBUILD_BUILD_VERSION),`cat .version`)')'
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0f24f7ebec9b..38f17a1e1e36 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -16,7 +16,7 @@
#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stdarg.h>
#include <linux/types.h>
@@ -327,6 +327,6 @@ void probe_cards(int unsafe);
/* video-vesa.c */
void vesa_store_edid(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* BOOT_BOOT_H */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 606c74f27459..0e0b238e8363 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -98,6 +98,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
+ vmlinux-objs-y += $(obj)/la57toggle.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 1dcb794c5479..3dc86352cdbe 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -483,110 +483,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
jmp *%rax
SYM_FUNC_END(.Lrelocated)
-/*
- * This is the 32-bit trampoline that will be copied over to low memory. It
- * will be called using the ordinary 64-bit calling convention from code
- * running in 64-bit mode.
- *
- * Return address is at the top of the stack (might be above 4G).
- * The first argument (EDI) contains the address of the temporary PGD level
- * page table in 32-bit addressable memory which will be programmed into
- * register CR3.
- */
- .section ".rodata", "a", @progbits
-SYM_CODE_START(trampoline_32bit_src)
- /*
- * Preserve callee save 64-bit registers on the stack: this is
- * necessary because the architecture does not guarantee that GPRs will
- * retain their full 64-bit values across a 32-bit mode switch.
- */
- pushq %r15
- pushq %r14
- pushq %r13
- pushq %r12
- pushq %rbp
- pushq %rbx
-
- /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
- movq %rsp, %rbx
- shrq $32, %rbx
-
- /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
- pushq $__KERNEL32_CS
- leaq 0f(%rip), %rax
- pushq %rax
- lretq
-
- /*
- * The 32-bit code below will do a far jump back to long mode and end
- * up here after reconfiguring the number of paging levels. First, the
- * stack pointer needs to be restored to its full 64-bit value before
- * the callee save register contents can be popped from the stack.
- */
-.Lret:
- shlq $32, %rbx
- orq %rbx, %rsp
-
- /* Restore the preserved 64-bit registers */
- popq %rbx
- popq %rbp
- popq %r12
- popq %r13
- popq %r14
- popq %r15
- retq
-
.code32
-0:
- /* Disable paging */
- movl %cr0, %eax
- btrl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /* Point CR3 to the trampoline's new top level page table */
- movl %edi, %cr3
-
- /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
- movl $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_LME, %eax
- /* Avoid writing EFER if no change was made (for TDX guest) */
- jc 1f
- wrmsr
-1:
- /* Toggle CR4.LA57 */
- movl %cr4, %eax
- btcl $X86_CR4_LA57_BIT, %eax
- movl %eax, %cr4
-
- /* Enable paging again. */
- movl %cr0, %eax
- btsl $X86_CR0_PG_BIT, %eax
- movl %eax, %cr0
-
- /*
- * Return to the 64-bit calling code using LJMP rather than LRET, to
- * avoid the need for a 32-bit addressable stack. The destination
- * address will be adjusted after the template code is copied into a
- * 32-bit addressable buffer.
- */
-.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
-SYM_CODE_END(trampoline_32bit_src)
-
-/*
- * This symbol is placed right after trampoline_32bit_src() so its address can
- * be used to infer the size of the trampoline code.
- */
-SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
-
- /*
- * The trampoline code has a size limit.
- * Make sure we fail to compile if the trampoline code grows
- * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
- */
- .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
-
- .text
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/compressed/la57toggle.S
new file mode 100644
index 000000000000..9ee002387eb1
--- /dev/null
+++ b/arch/x86/boot/compressed/la57toggle.S
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/boot.h>
+#include <asm/msr.h>
+#include <asm/processor-flags.h>
+#include "pgtable.h"
+
+/*
+ * This is the 32-bit trampoline that will be copied over to low memory. It
+ * will be called using the ordinary 64-bit calling convention from code
+ * running in 64-bit mode.
+ *
+ * Return address is at the top of the stack (might be above 4G).
+ * The first argument (EDI) contains the address of the temporary PGD level
+ * page table in 32-bit addressable memory which will be programmed into
+ * register CR3.
+ */
+
+ .section ".rodata", "a", @progbits
+SYM_CODE_START(trampoline_32bit_src)
+ /*
+ * Preserve callee save 64-bit registers on the stack: this is
+ * necessary because the architecture does not guarantee that GPRs will
+ * retain their full 64-bit values across a 32-bit mode switch.
+ */
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbp
+ pushq %rbx
+
+ /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
+ movq %rsp, %rbx
+ shrq $32, %rbx
+
+ /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+ pushq $__KERNEL32_CS
+ leaq 0f(%rip), %rax
+ pushq %rax
+ lretq
+
+ /*
+ * The 32-bit code below will do a far jump back to long mode and end
+ * up here after reconfiguring the number of paging levels. First, the
+ * stack pointer needs to be restored to its full 64-bit value before
+ * the callee save register contents can be popped from the stack.
+ */
+.Lret:
+ shlq $32, %rbx
+ orq %rbx, %rsp
+
+ /* Restore the preserved 64-bit registers */
+ popq %rbx
+ popq %rbp
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .code32
+0:
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Point CR3 to the trampoline's new top level page table */
+ movl %edi, %cr3
+
+ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ /* Avoid writing EFER if no change was made (for TDX guest) */
+ jc 1f
+ wrmsr
+1:
+ /* Toggle CR4.LA57 */
+ movl %cr4, %eax
+ btcl $X86_CR4_LA57_BIT, %eax
+ movl %eax, %cr4
+
+ /* Enable paging again. */
+ movl %cr0, %eax
+ btsl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /*
+ * Return to the 64-bit calling code using LJMP rather than LRET, to
+ * avoid the need for a 32-bit addressable stack. The destination
+ * address will be adjusted after the template code is copied into a
+ * 32-bit addressable buffer.
+ */
+.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
+SYM_CODE_END(trampoline_32bit_src)
+
+/*
+ * This symbol is placed right after trampoline_32bit_src() so its address can
+ * be used to infer the size of the trampoline code.
+ */
+SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
+
+ /*
+ * The trampoline code has a size limit.
+ * Make sure we fail to compile if the trampoline code grows
+ * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
+ */
+ .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 0d37420cad02..1cdcd4aaf395 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -235,7 +235,7 @@ static void handle_relocations(void *output, unsigned long output_len,
/*
* Process relocations: 32 bit relocations first then 64 bit after.
- * Three sets of binary relocations are added to the end of the kernel
+ * Two sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
* address of the location which needs to be updated stored as a
* 32-bit value which is sign extended to 64 bits.
@@ -245,8 +245,6 @@ static void handle_relocations(void *output, unsigned long output_len,
* kernel bits...
* 0 - zero terminator for 64 bit relocations
* 64 bit relocation repeated
- * 0 - zero terminator for inverse 32 bit relocations
- * 32 bit inverse relocation repeated
* 0 - zero terminator for 32 bit relocations
* 32 bit relocation repeated
*
@@ -263,16 +261,6 @@ static void handle_relocations(void *output, unsigned long output_len,
*(uint32_t *)ptr += delta;
}
#ifdef CONFIG_X86_64
- while (*--reloc) {
- long extended = *reloc;
- extended += map;
-
- ptr = (unsigned long)extended;
- if (ptr < min_addr || ptr > max_addr)
- error("inverse 32-bit relocation outside of kernel!\n");
-
- *(int32_t *)ptr -= delta;
- }
for (reloc--; *reloc; reloc--) {
long extended = *reloc;
extended += map;
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index c882e1f67af0..d8c5de40669d 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "misc.h"
#include <asm/bootparam.h>
+#include <asm/bootparam_utils.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
#include "pgtable.h"
@@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
+ sanitize_boot_params(bp);
boot_params_ptr = bp;
/*
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index 083ec6d7722a..3b2bc61c9408 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -48,7 +48,7 @@ SECTIONS
*(.data)
*(.data.*)
- /* Add 4 bytes of extra space for a CRC-32 checksum */
+ /* Add 4 bytes of extra space for the obsolete CRC-32 checksum */
. = ALIGN(. + 4, 0x200);
_edata = . ;
}
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 0aae4d4ed615..f82de8de5dc6 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -22,10 +22,11 @@
# include "boot.h"
#endif
#include <linux/types.h>
+#include <asm/cpufeaturemasks.h>
#include <asm/intel-family.h>
#include <asm/processor-flags.h>
-#include <asm/required-features.h>
#include <asm/msr-index.h>
+
#include "string.h"
#include "msr.h"
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
index d75237ba7ce9..916bac09b464 100644
--- a/arch/x86/boot/cpuflags.c
+++ b/arch/x86/boot/cpuflags.c
@@ -3,7 +3,6 @@
#include "bitops.h"
#include <asm/processor-flags.h>
-#include <asm/required-features.h>
#include <asm/msr-index.h>
#include "cpuflags.h"
@@ -29,40 +28,32 @@ static int has_fpu(void)
return fsw == 0 && (fcw & 0x103f) == 0x003f;
}
+#ifdef CONFIG_X86_32
/*
* For building the 16-bit code we want to explicitly specify 32-bit
* push/pop operations, rather than just saying 'pushf' or 'popf' and
- * letting the compiler choose. But this is also included from the
- * compressed/ directory where it may be 64-bit code, and thus needs
- * to be 'pushfq' or 'popfq' in that case.
+ * letting the compiler choose.
*/
-#ifdef __x86_64__
-#define PUSHF "pushfq"
-#define POPF "popfq"
-#else
-#define PUSHF "pushfl"
-#define POPF "popfl"
-#endif
-
-int has_eflag(unsigned long mask)
+bool has_eflag(unsigned long mask)
{
unsigned long f0, f1;
- asm volatile(PUSHF " \n\t"
- PUSHF " \n\t"
+ asm volatile("pushfl \n\t"
+ "pushfl \n\t"
"pop %0 \n\t"
"mov %0,%1 \n\t"
"xor %2,%1 \n\t"
"push %1 \n\t"
- POPF " \n\t"
- PUSHF " \n\t"
+ "popfl \n\t"
+ "pushfl \n\t"
"pop %1 \n\t"
- POPF
+ "popfl"
: "=&r" (f0), "=&r" (f1)
: "ri" (mask));
return !!((f0^f1) & mask);
}
+#endif
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
{
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
index 475b8fde90f7..a398d9204ad0 100644
--- a/arch/x86/boot/cpuflags.h
+++ b/arch/x86/boot/cpuflags.h
@@ -15,8 +15,13 @@ struct cpu_features {
extern struct cpu_features cpu;
extern u32 cpu_vendor[3];
-int has_eflag(unsigned long mask);
+#ifdef CONFIG_X86_32
+bool has_eflag(unsigned long mask);
+#else
+static inline bool has_eflag(unsigned long mask) { return true; }
+#endif
void get_cpuflags(void);
void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d);
+bool has_cpuflag(int flag);
#endif
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index c9299aeb7333..3882ead513f7 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -22,6 +22,7 @@
# This script requires:
# bash
# syslinux
+# genisoimage
# mtools (for fdimage* and hdimage)
# edk2/OVMF (for hdimage)
#
@@ -251,7 +252,9 @@ geniso() {
cp "$isolinux" "$ldlinux" "$tmp_dir"
cp "$FBZIMAGE" "$tmp_dir"/linux
echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
- cp "${FDINITRDS[@]}" "$tmp_dir"/
+ if [ ${#FDINITRDS[@]} -gt 0 ]; then
+ cp "${FDINITRDS[@]}" "$tmp_dir"/
+ fi
genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
-quiet -o "$FIMAGE" -b isolinux.bin \
-c boot.cat -no-emul-boot -boot-load-size 4 \
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
index da0ccc5de538..22d730b227e3 100644
--- a/arch/x86/boot/mkcpustr.c
+++ b/arch/x86/boot/mkcpustr.c
@@ -12,8 +12,6 @@
#include <stdio.h>
-#include "../include/asm/required-features.h"
-#include "../include/asm/disabled-features.h"
#include "../include/asm/cpufeatures.h"
#include "../include/asm/vmxfeatures.h"
#include "../kernel/cpu/capflags.c"
@@ -23,6 +21,7 @@ int main(void)
int i, j;
const char *str;
+ printf("#include <asm/cpufeaturemasks.h>\n\n");
printf("static const char x86_cap_strs[] =\n");
for (i = 0; i < NCAPINTS; i++) {
diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
index 3a2d1360abb0..e1d594a60204 100644
--- a/arch/x86/boot/setup.ld
+++ b/arch/x86/boot/setup.ld
@@ -45,6 +45,8 @@ SECTIONS
setup_size = ALIGN(ABSOLUTE(.), 4096);
setup_sects = ABSOLUTE(setup_size / 512);
+ ASSERT(setup_sects >= 5, "The setup must be at least 5 sectors in size");
+ ASSERT(setup_sects <= 64, "The setup must be at most 64 sectors in size");
}
. = ALIGN(16);
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
deleted file mode 100644
index 10311d77c67f..000000000000
--- a/arch/x86/boot/tools/build.c
+++ /dev/null
@@ -1,247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 1991, 1992 Linus Torvalds
- * Copyright (C) 1997 Martin Mares
- * Copyright (C) 2007 H. Peter Anvin
- */
-
-/*
- * This file builds a disk-image from three different files:
- *
- * - setup: 8086 machine code, sets up system parm
- * - system: 80386 code for actual system
- * - zoffset.h: header with ZO_* defines
- *
- * It does some checking that all files are of the correct type, and writes
- * the result to the specified destination, removing headers and padding to
- * the right amount. It also writes some system data to stdout.
- */
-
-/*
- * Changes by tytso to allow root device specification
- * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
- * Cross compiling fixes by Gertjan van Wingerde, July 1996
- * Rewritten by Martin Mares, April 1997
- * Substantially overhauled by H. Peter Anvin, April 2007
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <tools/le_byteshift.h>
-
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned int u32;
-
-/* Minimal number of setup sectors */
-#define SETUP_SECT_MIN 5
-#define SETUP_SECT_MAX 64
-
-/* This must be large enough to hold the entire setup */
-u8 buf[SETUP_SECT_MAX*512];
-
-static unsigned long _edata;
-
-/*----------------------------------------------------------------------*/
-
-static const u32 crctab32[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419,
- 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4,
- 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07,
- 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
- 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856,
- 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
- 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
- 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
- 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
- 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a,
- 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599,
- 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190,
- 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
- 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e,
- 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
- 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed,
- 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
- 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3,
- 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
- 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
- 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5,
- 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010,
- 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17,
- 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6,
- 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
- 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
- 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344,
- 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
- 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a,
- 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
- 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1,
- 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c,
- 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
- 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe,
- 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31,
- 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c,
- 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
- 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b,
- 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
- 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1,
- 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
- 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
- 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7,
- 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66,
- 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
- 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8,
- 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b,
- 0x2d02ef8d
-};
-
-static u32 partial_crc32_one(u8 c, u32 crc)
-{
- return crctab32[(crc ^ c) & 0xff] ^ (crc >> 8);
-}
-
-static u32 partial_crc32(const u8 *s, int len, u32 crc)
-{
- while (len--)
- crc = partial_crc32_one(*s++, crc);
- return crc;
-}
-
-static void die(const char * str, ...)
-{
- va_list args;
- va_start(args, str);
- vfprintf(stderr, str, args);
- va_end(args);
- fputc('\n', stderr);
- exit(1);
-}
-
-static void usage(void)
-{
- die("Usage: build setup system zoffset.h image");
-}
-
-/*
- * Parse zoffset.h and find the entry points. We could just #include zoffset.h
- * but that would mean tools/build would have to be rebuilt every time. It's
- * not as if parsing it is hard...
- */
-#define PARSE_ZOFS(p, sym) do { \
- if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \
- sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \
-} while (0)
-
-static void parse_zoffset(char *fname)
-{
- FILE *file;
- char *p;
- int c;
-
- file = fopen(fname, "r");
- if (!file)
- die("Unable to open `%s': %m", fname);
- c = fread(buf, 1, sizeof(buf) - 1, file);
- if (ferror(file))
- die("read-error on `zoffset.h'");
- fclose(file);
- buf[c] = 0;
-
- p = (char *)buf;
-
- while (p && *p) {
- PARSE_ZOFS(p, _edata);
-
- p = strchr(p, '\n');
- while (p && (*p == '\r' || *p == '\n'))
- p++;
- }
-}
-
-int main(int argc, char ** argv)
-{
- unsigned int i, sz, setup_sectors;
- int c;
- struct stat sb;
- FILE *file, *dest;
- int fd;
- void *kernel;
- u32 crc = 0xffffffffUL;
-
- if (argc != 5)
- usage();
- parse_zoffset(argv[3]);
-
- dest = fopen(argv[4], "w");
- if (!dest)
- die("Unable to write `%s': %m", argv[4]);
-
- /* Copy the setup code */
- file = fopen(argv[1], "r");
- if (!file)
- die("Unable to open `%s': %m", argv[1]);
- c = fread(buf, 1, sizeof(buf), file);
- if (ferror(file))
- die("read-error on `setup'");
- if (c < 1024)
- die("The setup must be at least 1024 bytes");
- if (get_unaligned_le16(&buf[510]) != 0xAA55)
- die("Boot block hasn't got boot flag (0xAA55)");
- fclose(file);
-
- /* Pad unused space with zeros */
- setup_sectors = (c + 4095) / 4096;
- setup_sectors *= 8;
- if (setup_sectors < SETUP_SECT_MIN)
- setup_sectors = SETUP_SECT_MIN;
- i = setup_sectors*512;
- memset(buf+c, 0, i-c);
-
- /* Open and stat the kernel file */
- fd = open(argv[2], O_RDONLY);
- if (fd < 0)
- die("Unable to open `%s': %m", argv[2]);
- if (fstat(fd, &sb))
- die("Unable to stat `%s': %m", argv[2]);
- if (_edata != sb.st_size)
- die("Unexpected file size `%s': %u != %u", argv[2], _edata,
- sb.st_size);
- sz = _edata - 4;
- kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
- if (kernel == MAP_FAILED)
- die("Unable to mmap '%s': %m", argv[2]);
-
- crc = partial_crc32(buf, i, crc);
- if (fwrite(buf, 1, i, dest) != i)
- die("Writing setup failed");
-
- /* Copy the kernel code */
- crc = partial_crc32(kernel, sz, crc);
- if (fwrite(kernel, 1, sz, dest) != sz)
- die("Writing kernel failed");
-
- /* Write the CRC */
- put_unaligned_le32(crc, buf);
- if (fwrite(buf, 1, 4, dest) != 4)
- die("Writing CRC failed");
-
- /* Catch any delayed write failures */
- if (fclose(dest))
- die("Writing image failed");
-
- close(fd);
-
- /* Everything is OK */
- return 0;
-}
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c
index 82492efc5d94..b0c1a7a57497 100644
--- a/arch/x86/coco/sev/core.c
+++ b/arch/x86/coco/sev/core.c
@@ -9,8 +9,6 @@
#define pr_fmt(fmt) "SEV: " fmt
-#define DISABLE_BRANCH_PROFILING
-
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/cc_platform.h>
@@ -1482,8 +1480,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
case MSR_AMD64_GUEST_TSC_FREQ:
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
return __vc_handle_secure_tsc_msrs(regs, write);
- else
- break;
+ break;
default:
break;
}
@@ -2853,19 +2850,8 @@ struct snp_msg_desc *snp_msg_alloc(void)
if (!mdesc->response)
goto e_free_request;
- mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE);
- if (!mdesc->certs_data)
- goto e_free_response;
-
- /* initial the input address for guest request */
- mdesc->input.req_gpa = __pa(mdesc->request);
- mdesc->input.resp_gpa = __pa(mdesc->response);
- mdesc->input.data_gpa = __pa(mdesc->certs_data);
-
return mdesc;
-e_free_response:
- free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
e_free_request:
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
e_unmap:
@@ -2885,7 +2871,6 @@ void snp_msg_free(struct snp_msg_desc *mdesc)
kfree(mdesc->ctx);
free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg));
free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg));
- free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE);
iounmap((__force void __iomem *)mdesc->secrets);
memset(mdesc, 0, sizeof(*mdesc));
@@ -3054,7 +3039,7 @@ retry_request:
* sequence number must be incremented or the VMPCK must be deleted to
* prevent reuse of the IV.
*/
- rc = snp_issue_guest_request(req, &mdesc->input, rio);
+ rc = snp_issue_guest_request(req, &req->input, rio);
switch (rc) {
case -ENOSPC:
/*
@@ -3064,7 +3049,7 @@ retry_request:
* order to increment the sequence number and thus avoid
* IV reuse.
*/
- override_npages = mdesc->input.data_npages;
+ override_npages = req->input.data_npages;
req->exit_code = SVM_VMGEXIT_GUEST_REQUEST;
/*
@@ -3120,7 +3105,7 @@ retry_request:
}
if (override_npages)
- mdesc->input.data_npages = override_npages;
+ req->input.data_npages = override_npages;
return rc;
}
@@ -3158,6 +3143,11 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
*/
memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request));
+ /* Initialize the input address for guest request */
+ req->input.req_gpa = __pa(mdesc->request);
+ req->input.resp_gpa = __pa(mdesc->response);
+ req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0;
+
rc = __handle_guest_request(mdesc, req, rio);
if (rc) {
if (rc == -EIO &&
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 32809a06dab4..7772b01ab738 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -167,11 +167,11 @@ static void __noreturn tdx_panic(const char *msg)
/* Define register order according to the GHCI */
struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
- char str[64];
+ char bytes[64] __nonstring;
} message;
/* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
- strtomem_pad(message.str, msg, '\0');
+ strtomem_pad(message.bytes, msg, '\0');
args.r8 = message.r8;
args.r9 = message.r9;
diff --git a/arch/x86/configs/xen.config b/arch/x86/configs/xen.config
index 581296255b39..d5d091e03bd3 100644
--- a/arch/x86/configs/xen.config
+++ b/arch/x86/configs/xen.config
@@ -1,6 +1,4 @@
# global x86 required specific stuff
-# On 32-bit HIGHMEM4G is not allowed
-CONFIG_HIGHMEM64G=y
CONFIG_64BIT=y
# These enable us to allow some of the
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index eb153eff9331..b37881bb9f15 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -17,6 +17,7 @@
*/
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/frame.h>
#define STATE1 %xmm0
@@ -1071,6 +1072,7 @@ SYM_FUNC_END(_aesni_inc)
* size_t len, u8 *iv)
*/
SYM_FUNC_START(aesni_ctr_enc)
+ ANNOTATE_NOENDBR
FRAME_BEGIN
cmp $16, LEN
jb .Lctr_enc_just_ret
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 11e95fc62636..3e9ab5cdade4 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1536,26 +1536,6 @@ DEFINE_GCM_ALGS(vaes_avx10_512, FLAG_AVX10_512,
AES_GCM_KEY_AVX10_SIZE, 800);
#endif /* CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ */
-/*
- * This is a list of CPU models that are known to suffer from downclocking when
- * zmm registers (512-bit vectors) are used. On these CPUs, the AES mode
- * implementations with zmm registers won't be used by default. Implementations
- * with ymm registers (256-bit vectors) will be used by default instead.
- */
-static const struct x86_cpu_id zmm_exclusion_list[] = {
- X86_MATCH_VFM(INTEL_SKYLAKE_X, 0),
- X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
- X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
- X86_MATCH_VFM(INTEL_ICELAKE, 0),
- X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
- X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0),
- X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0),
- X86_MATCH_VFM(INTEL_TIGERLAKE, 0),
- /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
- /* Also allow AMD CPUs (starting with Zen 4, the first with AVX-512). */
- {},
-};
-
static int __init register_avx_algs(void)
{
int err;
@@ -1600,7 +1580,7 @@ static int __init register_avx_algs(void)
if (err)
return err;
- if (x86_match_cpu(zmm_exclusion_list)) {
+ if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) {
int i;
aes_xts_alg_vaes_avx10_512.base.cra_priority = 1;
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index 646477a13e11..1dfef28c1266 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -16,6 +16,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/frame.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -882,7 +883,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
jmp .Ldec_max24;
SYM_FUNC_END(__camellia_dec_blk16)
-SYM_FUNC_START(camellia_ecb_enc_16way)
+SYM_TYPED_FUNC_START(camellia_ecb_enc_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -907,7 +908,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way)
RET;
SYM_FUNC_END(camellia_ecb_enc_16way)
-SYM_FUNC_START(camellia_ecb_dec_16way)
+SYM_TYPED_FUNC_START(camellia_ecb_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
@@ -937,7 +938,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way)
RET;
SYM_FUNC_END(camellia_ecb_dec_16way)
-SYM_FUNC_START(camellia_cbc_dec_16way)
+SYM_TYPED_FUNC_START(camellia_cbc_dec_16way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index a0eb94e53b1b..b1c9b9450555 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/frame.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
index 816b6bb8bded..824cb94de6c2 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
.file "camellia-x86_64-asm_64.S"
.text
@@ -177,7 +178,7 @@
bswapq RAB0; \
movq RAB0, 4*2(RIO);
-SYM_FUNC_START(__camellia_enc_blk)
+SYM_TYPED_FUNC_START(__camellia_enc_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -224,7 +225,7 @@ SYM_FUNC_START(__camellia_enc_blk)
RET;
SYM_FUNC_END(__camellia_enc_blk)
-SYM_FUNC_START(camellia_dec_blk)
+SYM_TYPED_FUNC_START(camellia_dec_blk)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -411,7 +412,7 @@ SYM_FUNC_END(camellia_dec_blk)
bswapq RAB1; \
movq RAB1, 12*2(RIO);
-SYM_FUNC_START(__camellia_enc_blk_2way)
+SYM_TYPED_FUNC_START(__camellia_enc_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -460,7 +461,7 @@ SYM_FUNC_START(__camellia_enc_blk_2way)
RET;
SYM_FUNC_END(__camellia_enc_blk_2way)
-SYM_FUNC_START(camellia_dec_blk_2way)
+SYM_TYPED_FUNC_START(camellia_dec_blk_2way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 97e283621851..84e47f7f6188 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/frame.h>
#include "glue_helper-asm-avx.S"
@@ -656,7 +657,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
RET;
SYM_FUNC_END(__serpent_dec_blk8_avx)
-SYM_FUNC_START(serpent_ecb_enc_8way_avx)
+SYM_TYPED_FUNC_START(serpent_ecb_enc_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -674,7 +675,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx)
RET;
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
-SYM_FUNC_START(serpent_ecb_dec_8way_avx)
+SYM_TYPED_FUNC_START(serpent_ecb_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -692,7 +693,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx)
RET;
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
-SYM_FUNC_START(serpent_cbc_dec_8way_avx)
+SYM_TYPED_FUNC_START(serpent_cbc_dec_8way_avx)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index d2288bf38a8a..071e90e7f0d8 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
.file "twofish-x86_64-asm-3way.S"
.text
@@ -220,7 +221,7 @@
rorq $32, RAB2; \
outunpack3(mov, RIO, 2, RAB, 2);
-SYM_FUNC_START(__twofish_enc_blk_3way)
+SYM_TYPED_FUNC_START(__twofish_enc_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
@@ -269,7 +270,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
RET;
SYM_FUNC_END(__twofish_enc_blk_3way)
-SYM_FUNC_START(twofish_dec_blk_3way)
+SYM_TYPED_FUNC_START(twofish_dec_blk_3way)
/* input:
* %rdi: ctx, CTX
* %rsi: dst
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index 775af290cd19..e08b4ba07b93 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -8,6 +8,7 @@
.text
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#define a_offset 0
@@ -202,7 +203,7 @@
xor %r8d, d ## D;\
ror $1, d ## D;
-SYM_FUNC_START(twofish_enc_blk)
+SYM_TYPED_FUNC_START(twofish_enc_blk)
pushq R1
/* %rdi contains the ctx address */
@@ -255,7 +256,7 @@ SYM_FUNC_START(twofish_enc_blk)
RET
SYM_FUNC_END(twofish_enc_blk)
-SYM_FUNC_START(twofish_dec_blk)
+SYM_TYPED_FUNC_START(twofish_dec_blk)
pushq R1
/* %rdi contains the ctx address */
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index ce1cc1622385..72cae8e0ce85 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -7,12 +7,13 @@ KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
-CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE)
-CFLAGS_common.o += -fno-stack-protector
+CFLAGS_syscall_32.o += -fno-stack-protector
+CFLAGS_syscall_64.o += -fno-stack-protector
obj-y := entry.o entry_$(BITS).o syscall_$(BITS).o
-obj-y += common.o
obj-y += vdso/
obj-y += vsyscall/
@@ -23,4 +24,3 @@ CFLAGS_REMOVE_entry_fred.o += -pg $(CC_FLAGS_FTRACE)
obj-$(CONFIG_X86_FRED) += entry_64_fred.o entry_fred.o
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
-obj-$(CONFIG_X86_X32_ABI) += syscall_x32.o
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index ea81770629ee..cb0911c5dc5d 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -431,6 +431,7 @@ For 32-bit we have the following conventions - kernel is built with
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func
SYM_FUNC_START(\name)
+ ANNOTATE_NOENDBR
pushq %rbp
movq %rsp, %rbp
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
deleted file mode 100644
index 14db5b85114c..000000000000
--- a/arch/x86/entry/common.c
+++ /dev/null
@@ -1,524 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * common.c - C code for kernel entry and exit
- * Copyright (c) 2015 Andrew Lutomirski
- *
- * Based on asm and ptrace code by many authors. The code here originated
- * in ptrace.c and signal.c.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/entry-common.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/export.h>
-#include <linux/nospec.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/init.h>
-
-#ifdef CONFIG_XEN_PV
-#include <xen/xen-ops.h>
-#include <xen/events.h>
-#endif
-
-#include <asm/apic.h>
-#include <asm/desc.h>
-#include <asm/traps.h>
-#include <asm/vdso.h>
-#include <asm/cpufeature.h>
-#include <asm/fpu/api.h>
-#include <asm/nospec-branch.h>
-#include <asm/io_bitmap.h>
-#include <asm/syscall.h>
-#include <asm/irq_stack.h>
-
-#ifdef CONFIG_X86_64
-
-static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
-{
- /*
- * Convert negative numbers to very high and thus out of range
- * numbers for comparisons.
- */
- unsigned int unr = nr;
-
- if (likely(unr < NR_syscalls)) {
- unr = array_index_nospec(unr, NR_syscalls);
- regs->ax = x64_sys_call(regs, unr);
- return true;
- }
- return false;
-}
-
-static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
-{
- /*
- * Adjust the starting offset of the table, and convert numbers
- * < __X32_SYSCALL_BIT to very high and thus out of range
- * numbers for comparisons.
- */
- unsigned int xnr = nr - __X32_SYSCALL_BIT;
-
- if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
- xnr = array_index_nospec(xnr, X32_NR_syscalls);
- regs->ax = x32_sys_call(regs, xnr);
- return true;
- }
- return false;
-}
-
-/* Returns true to return using SYSRET, or false to use IRET */
-__visible noinstr bool do_syscall_64(struct pt_regs *regs, int nr)
-{
- add_random_kstack_offset();
- nr = syscall_enter_from_user_mode(regs, nr);
-
- instrumentation_begin();
-
- if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
- /* Invalid system call, but still a system call. */
- regs->ax = __x64_sys_ni_syscall(regs);
- }
-
- instrumentation_end();
- syscall_exit_to_user_mode(regs);
-
- /*
- * Check that the register state is valid for using SYSRET to exit
- * to userspace. Otherwise use the slower but fully capable IRET
- * exit path.
- */
-
- /* XEN PV guests always use the IRET path */
- if (cpu_feature_enabled(X86_FEATURE_XENPV))
- return false;
-
- /* SYSRET requires RCX == RIP and R11 == EFLAGS */
- if (unlikely(regs->cx != regs->ip || regs->r11 != regs->flags))
- return false;
-
- /* CS and SS must match the values set in MSR_STAR */
- if (unlikely(regs->cs != __USER_CS || regs->ss != __USER_DS))
- return false;
-
- /*
- * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
- * in kernel space. This essentially lets the user take over
- * the kernel, since userspace controls RSP.
- *
- * TASK_SIZE_MAX covers all user-accessible addresses other than
- * the deprecated vsyscall page.
- */
- if (unlikely(regs->ip >= TASK_SIZE_MAX))
- return false;
-
- /*
- * SYSRET cannot restore RF. It can restore TF, but unlike IRET,
- * restoring TF results in a trap from userspace immediately after
- * SYSRET.
- */
- if (unlikely(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)))
- return false;
-
- /* Use SYSRET to exit to userspace */
- return true;
-}
-#endif
-
-#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
-static __always_inline int syscall_32_enter(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_IA32_EMULATION))
- current_thread_info()->status |= TS_COMPAT;
-
- return (int)regs->orig_ax;
-}
-
-#ifdef CONFIG_IA32_EMULATION
-bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
-
-static int ia32_emulation_override_cmdline(char *arg)
-{
- return kstrtobool(arg, &__ia32_enabled);
-}
-early_param("ia32_emulation", ia32_emulation_override_cmdline);
-#endif
-
-/*
- * Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
- */
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
-{
- /*
- * Convert negative numbers to very high and thus out of range
- * numbers for comparisons.
- */
- unsigned int unr = nr;
-
- if (likely(unr < IA32_NR_syscalls)) {
- unr = array_index_nospec(unr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call(regs, unr);
- } else if (nr != -1) {
- regs->ax = __ia32_sys_ni_syscall(regs);
- }
-}
-
-#ifdef CONFIG_IA32_EMULATION
-static __always_inline bool int80_is_external(void)
-{
- const unsigned int offs = (0x80 / 32) * 0x10;
- const u32 bit = BIT(0x80 % 32);
-
- /* The local APIC on XENPV guests is fake */
- if (cpu_feature_enabled(X86_FEATURE_XENPV))
- return false;
-
- /*
- * If vector 0x80 is set in the APIC ISR then this is an external
- * interrupt. Either from broken hardware or injected by a VMM.
- *
- * Note: In guest mode this is only valid for secure guests where
- * the secure module fully controls the vAPIC exposed to the guest.
- */
- return apic_read(APIC_ISR + offs) & bit;
-}
-
-/**
- * do_int80_emulation - 32-bit legacy syscall C entry from asm
- * @regs: syscall arguments in struct pt_args on the stack.
- *
- * This entry point can be used by 32-bit and 64-bit programs to perform
- * 32-bit system calls. Instances of INT $0x80 can be found inline in
- * various programs and libraries. It is also used by the vDSO's
- * __kernel_vsyscall fallback for hardware that doesn't support a faster
- * entry method. Restarted 32-bit system calls also fall back to INT
- * $0x80 regardless of what instruction was originally used to do the
- * system call.
- *
- * This is considered a slow path. It is not used by most libc
- * implementations on modern hardware except during process startup.
- *
- * The arguments for the INT $0x80 based syscall are on stack in the
- * pt_regs structure:
- * eax: system call number
- * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
- */
-__visible noinstr void do_int80_emulation(struct pt_regs *regs)
-{
- int nr;
-
- /* Kernel does not use INT $0x80! */
- if (unlikely(!user_mode(regs))) {
- irqentry_enter(regs);
- instrumentation_begin();
- panic("Unexpected external interrupt 0x80\n");
- }
-
- /*
- * Establish kernel context for instrumentation, including for
- * int80_is_external() below which calls into the APIC driver.
- * Identical for soft and external interrupts.
- */
- enter_from_user_mode(regs);
-
- instrumentation_begin();
- add_random_kstack_offset();
-
- /* Validate that this is a soft interrupt to the extent possible */
- if (unlikely(int80_is_external()))
- panic("Unexpected external interrupt 0x80\n");
-
- /*
- * The low level idtentry code pushed -1 into regs::orig_ax
- * and regs::ax contains the syscall number.
- *
- * User tracing code (ptrace or signal handlers) might assume
- * that the regs::orig_ax contains a 32-bit number on invoking
- * a 32-bit syscall.
- *
- * Establish the syscall convention by saving the 32bit truncated
- * syscall number in regs::orig_ax and by invalidating regs::ax.
- */
- regs->orig_ax = regs->ax & GENMASK(31, 0);
- regs->ax = -ENOSYS;
-
- nr = syscall_32_enter(regs);
-
- local_irq_enable();
- nr = syscall_enter_from_user_mode_work(regs, nr);
- do_syscall_32_irqs_on(regs, nr);
-
- instrumentation_end();
- syscall_exit_to_user_mode(regs);
-}
-
-#ifdef CONFIG_X86_FRED
-/*
- * A FRED-specific INT80 handler is warranted for the follwing reasons:
- *
- * 1) As INT instructions and hardware interrupts are separate event
- * types, FRED does not preclude the use of vector 0x80 for external
- * interrupts. As a result, the FRED setup code does not reserve
- * vector 0x80 and calling int80_is_external() is not merely
- * suboptimal but actively incorrect: it could cause a system call
- * to be incorrectly ignored.
- *
- * 2) It is called only for handling vector 0x80 of event type
- * EVENT_TYPE_SWINT and will never be called to handle any external
- * interrupt (event type EVENT_TYPE_EXTINT).
- *
- * 3) FRED has separate entry flows depending on if the event came from
- * user space or kernel space, and because the kernel does not use
- * INT insns, the FRED kernel entry handler fred_entry_from_kernel()
- * falls through to fred_bad_type() if the event type is
- * EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling
- * an INT insn, it can only be from a user level.
- *
- * 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will
- * likely take a different approach if it is ever needed: it
- * probably belongs in either fred_intx()/ fred_other() or
- * asm_fred_entrypoint_user(), depending on if this ought to be done
- * for all entries from userspace or only system
- * calls.
- *
- * 5) INT $0x80 is the fast path for 32-bit system calls under FRED.
- */
-DEFINE_FREDENTRY_RAW(int80_emulation)
-{
- int nr;
-
- enter_from_user_mode(regs);
-
- instrumentation_begin();
- add_random_kstack_offset();
-
- /*
- * FRED pushed 0 into regs::orig_ax and regs::ax contains the
- * syscall number.
- *
- * User tracing code (ptrace or signal handlers) might assume
- * that the regs::orig_ax contains a 32-bit number on invoking
- * a 32-bit syscall.
- *
- * Establish the syscall convention by saving the 32bit truncated
- * syscall number in regs::orig_ax and by invalidating regs::ax.
- */
- regs->orig_ax = regs->ax & GENMASK(31, 0);
- regs->ax = -ENOSYS;
-
- nr = syscall_32_enter(regs);
-
- local_irq_enable();
- nr = syscall_enter_from_user_mode_work(regs, nr);
- do_syscall_32_irqs_on(regs, nr);
-
- instrumentation_end();
- syscall_exit_to_user_mode(regs);
-}
-#endif
-#else /* CONFIG_IA32_EMULATION */
-
-/* Handles int $0x80 on a 32bit kernel */
-__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
-{
- int nr = syscall_32_enter(regs);
-
- add_random_kstack_offset();
- /*
- * Subtlety here: if ptrace pokes something larger than 2^31-1 into
- * orig_ax, the int return value truncates it. This matches
- * the semantics of syscall_get_nr().
- */
- nr = syscall_enter_from_user_mode(regs, nr);
- instrumentation_begin();
-
- do_syscall_32_irqs_on(regs, nr);
-
- instrumentation_end();
- syscall_exit_to_user_mode(regs);
-}
-#endif /* !CONFIG_IA32_EMULATION */
-
-static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
-{
- int nr = syscall_32_enter(regs);
- int res;
-
- add_random_kstack_offset();
- /*
- * This cannot use syscall_enter_from_user_mode() as it has to
- * fetch EBP before invoking any of the syscall entry work
- * functions.
- */
- syscall_enter_from_user_mode_prepare(regs);
-
- instrumentation_begin();
- /* Fetch EBP from where the vDSO stashed it. */
- if (IS_ENABLED(CONFIG_X86_64)) {
- /*
- * Micro-optimization: the pointer we're following is
- * explicitly 32 bits, so it can't be out of range.
- */
- res = __get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp);
- } else {
- res = get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp);
- }
-
- if (res) {
- /* User code screwed up. */
- regs->ax = -EFAULT;
-
- local_irq_disable();
- instrumentation_end();
- irqentry_exit_to_user_mode(regs);
- return false;
- }
-
- nr = syscall_enter_from_user_mode_work(regs, nr);
-
- /* Now this is just like a normal syscall. */
- do_syscall_32_irqs_on(regs, nr);
-
- instrumentation_end();
- syscall_exit_to_user_mode(regs);
- return true;
-}
-
-/* Returns true to return using SYSEXIT/SYSRETL, or false to use IRET */
-__visible noinstr bool do_fast_syscall_32(struct pt_regs *regs)
-{
- /*
- * Called using the internal vDSO SYSENTER/SYSCALL32 calling
- * convention. Adjust regs so it looks like we entered using int80.
- */
- unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
- vdso_image_32.sym_int80_landing_pad;
-
- /*
- * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
- * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
- * Fix it up.
- */
- regs->ip = landing_pad;
-
- /* Invoke the syscall. If it failed, keep it simple: use IRET. */
- if (!__do_fast_syscall_32(regs))
- return false;
-
- /*
- * Check that the register state is valid for using SYSRETL/SYSEXIT
- * to exit to userspace. Otherwise use the slower but fully capable
- * IRET exit path.
- */
-
- /* XEN PV guests always use the IRET path */
- if (cpu_feature_enabled(X86_FEATURE_XENPV))
- return false;
-
- /* EIP must point to the VDSO landing pad */
- if (unlikely(regs->ip != landing_pad))
- return false;
-
- /* CS and SS must match the values set in MSR_STAR */
- if (unlikely(regs->cs != __USER32_CS || regs->ss != __USER_DS))
- return false;
-
- /* If the TF, RF, or VM flags are set, use IRET */
- if (unlikely(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)))
- return false;
-
- /* Use SYSRETL/SYSEXIT to exit to userspace */
- return true;
-}
-
-/* Returns true to return using SYSEXIT/SYSRETL, or false to use IRET */
-__visible noinstr bool do_SYSENTER_32(struct pt_regs *regs)
-{
- /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
- regs->sp = regs->bp;
-
- /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
- regs->flags |= X86_EFLAGS_IF;
-
- return do_fast_syscall_32(regs);
-}
-#endif
-
-SYSCALL_DEFINE0(ni_syscall)
-{
- return -ENOSYS;
-}
-
-#ifdef CONFIG_XEN_PV
-#ifndef CONFIG_PREEMPTION
-/*
- * Some hypercalls issued by the toolstack can take many 10s of
- * seconds. Allow tasks running hypercalls via the privcmd driver to
- * be voluntarily preempted even if full kernel preemption is
- * disabled.
- *
- * Such preemptible hypercalls are bracketed by
- * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
- * calls.
- */
-DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
-EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
-
-/*
- * In case of scheduling the flag must be cleared and restored after
- * returning from schedule as the task might move to a different CPU.
- */
-static __always_inline bool get_and_clear_inhcall(void)
-{
- bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
-
- __this_cpu_write(xen_in_preemptible_hcall, false);
- return inhcall;
-}
-
-static __always_inline void restore_inhcall(bool inhcall)
-{
- __this_cpu_write(xen_in_preemptible_hcall, inhcall);
-}
-#else
-static __always_inline bool get_and_clear_inhcall(void) { return false; }
-static __always_inline void restore_inhcall(bool inhcall) { }
-#endif
-
-static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- inc_irq_stat(irq_hv_callback_count);
-
- xen_evtchn_do_upcall();
-
- set_irq_regs(old_regs);
-}
-
-__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
-{
- irqentry_state_t state = irqentry_enter(regs);
- bool inhcall;
-
- instrumentation_begin();
- run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
-
- inhcall = get_and_clear_inhcall();
- if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- irqentry_exit_cond_resched();
- instrumentation_end();
- restore_inhcall(inhcall);
- } else {
- instrumentation_end();
- irqentry_exit(regs, state);
- }
-}
-#endif /* CONFIG_XEN_PV */
diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S
index b7ea3e8e9ecc..d3caa31240ed 100644
--- a/arch/x86/entry/entry.S
+++ b/arch/x86/entry/entry.S
@@ -5,6 +5,7 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/segment.h>
@@ -17,6 +18,7 @@
.pushsection .noinstr.text, "ax"
SYM_FUNC_START(entry_ibpb)
+ ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
xorl %edx, %edx
@@ -52,7 +54,6 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
THUNK warn_thunk_thunk, __warn_thunk
-#ifndef CONFIG_X86_64
/*
* Clang's implementation of TLS stack cookies requires the variable in
* question to be a TLS variable. If the variable happens to be defined as an
@@ -63,7 +64,6 @@ THUNK warn_thunk_thunk, __warn_thunk
* entirely in the C code, and use an alias emitted by the linker script
* instead.
*/
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP)
EXPORT_SYMBOL(__ref_stack_chk_guard);
#endif
-#endif
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 20be5758c2d2..92c0b4a94e0a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1153,7 +1153,7 @@ SYM_CODE_START(asm_exc_nmi)
* is using the thread stack right now, so it's safe for us to use it.
*/
movl %esp, %ebx
- movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esp
+ movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call exc_nmi
movl %ebx, %esp
@@ -1217,7 +1217,7 @@ SYM_CODE_START(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
- movl PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %esi
+ movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
call make_task_dead
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f52dbe0ad93c..f40bdf97d390 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -92,7 +92,7 @@ SYM_CODE_START(entry_SYSCALL_64)
/* tss.sp2 is scratch space. */
movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
@@ -175,6 +175,7 @@ SYM_CODE_END(entry_SYSCALL_64)
*/
.pushsection .text, "ax"
SYM_FUNC_START(__switch_to_asm)
+ ANNOTATE_NOENDBR
/*
* Save callee-saved registers
* This must match the order in inactive_task_frame
@@ -192,7 +193,7 @@ SYM_FUNC_START(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
- movq %rbx, PER_CPU_VAR(fixed_percpu_data + FIXED_stack_canary)
+ movq %rbx, PER_CPU_VAR(__stack_chk_guard)
#endif
/*
@@ -742,6 +743,7 @@ _ASM_NOKPROBE(common_interrupt_return)
* Is in entry.text as it shouldn't be instrumented.
*/
SYM_FUNC_START(asm_load_gs_index)
+ ANNOTATE_NOENDBR
FRAME_BEGIN
swapgs
.Lgs_change:
@@ -1166,7 +1168,7 @@ SYM_CODE_START(asm_exc_nmi)
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
UNWIND_HINT_IRET_REGS base=%rdx offset=8
pushq 5*8(%rdx) /* pt_regs->ss */
pushq 4*8(%rdx) /* pt_regs->rsp */
@@ -1484,7 +1486,7 @@ SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_REGS
@@ -1526,6 +1528,7 @@ SYM_CODE_END(rewind_stack_and_make_dead)
* refactored in the future if needed.
*/
SYM_FUNC_START(clear_bhb_loop)
+ ANNOTATE_NOENDBR
push %rbp
mov %rsp, %rbp
movl $5, %ecx
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ed0a5f2dc129..a45e1125fc6c 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -57,7 +57,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
popq %rax
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
@@ -193,7 +193,7 @@ SYM_CODE_START(entry_SYSCALL_compat)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
/* Switch to the kernel stack */
- movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
diff --git a/arch/x86/entry/entry_64_fred.S b/arch/x86/entry/entry_64_fred.S
index a02bc6f3d2e6..29c5c32c16c3 100644
--- a/arch/x86/entry/entry_64_fred.S
+++ b/arch/x86/entry/entry_64_fred.S
@@ -58,6 +58,7 @@ SYM_CODE_END(asm_fred_entrypoint_kernel)
#if IS_ENABLED(CONFIG_KVM_INTEL)
SYM_FUNC_START(asm_fred_entry_from_kvm)
+ ANNOTATE_NOENDBR
push %rbp
mov %rsp, %rbp
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 8cc9950d7104..2b15ea17bb7c 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -1,10 +1,16 @@
-// SPDX-License-Identifier: GPL-2.0
-/* System call table for i386. */
+// SPDX-License-Identifier: GPL-2.0-only
+/* 32-bit system call dispatch */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
+#include <linux/entry-common.h>
+#include <linux/nospec.h>
+#include <linux/uaccess.h>
+#include <asm/apic.h>
+#include <asm/traps.h>
+#include <asm/cpufeature.h>
#include <asm/syscall.h>
#ifdef CONFIG_IA32_EMULATION
@@ -41,4 +47,324 @@ long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
#include <asm/syscalls_32.h>
default: return __ia32_sys_ni_syscall(regs);
}
-};
+}
+
+static __always_inline int syscall_32_enter(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_IA32_EMULATION))
+ current_thread_info()->status |= TS_COMPAT;
+
+ return (int)regs->orig_ax;
+}
+
+#ifdef CONFIG_IA32_EMULATION
+bool __ia32_enabled __ro_after_init = !IS_ENABLED(CONFIG_IA32_EMULATION_DEFAULT_DISABLED);
+
+static int __init ia32_emulation_override_cmdline(char *arg)
+{
+ return kstrtobool(arg, &__ia32_enabled);
+}
+early_param("ia32_emulation", ia32_emulation_override_cmdline);
+#endif
+
+/*
+ * Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
+ */
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+{
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < IA32_NR_syscalls)) {
+ unr = array_index_nospec(unr, IA32_NR_syscalls);
+ regs->ax = ia32_sys_call(regs, unr);
+ } else if (nr != -1) {
+ regs->ax = __ia32_sys_ni_syscall(regs);
+ }
+}
+
+#ifdef CONFIG_IA32_EMULATION
+static __always_inline bool int80_is_external(void)
+{
+ const unsigned int offs = (0x80 / 32) * 0x10;
+ const u32 bit = BIT(0x80 % 32);
+
+ /* The local APIC on XENPV guests is fake */
+ if (cpu_feature_enabled(X86_FEATURE_XENPV))
+ return false;
+
+ /*
+ * If vector 0x80 is set in the APIC ISR then this is an external
+ * interrupt. Either from broken hardware or injected by a VMM.
+ *
+ * Note: In guest mode this is only valid for secure guests where
+ * the secure module fully controls the vAPIC exposed to the guest.
+ */
+ return apic_read(APIC_ISR + offs) & bit;
+}
+
+/**
+ * do_int80_emulation - 32-bit legacy syscall C entry from asm
+ * @regs: syscall arguments in struct pt_args on the stack.
+ *
+ * This entry point can be used by 32-bit and 64-bit programs to perform
+ * 32-bit system calls. Instances of INT $0x80 can be found inline in
+ * various programs and libraries. It is also used by the vDSO's
+ * __kernel_vsyscall fallback for hardware that doesn't support a faster
+ * entry method. Restarted 32-bit system calls also fall back to INT
+ * $0x80 regardless of what instruction was originally used to do the
+ * system call.
+ *
+ * This is considered a slow path. It is not used by most libc
+ * implementations on modern hardware except during process startup.
+ *
+ * The arguments for the INT $0x80 based syscall are on stack in the
+ * pt_regs structure:
+ * eax: system call number
+ * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6
+ */
+__visible noinstr void do_int80_emulation(struct pt_regs *regs)
+{
+ int nr;
+
+ /* Kernel does not use INT $0x80! */
+ if (unlikely(!user_mode(regs))) {
+ irqentry_enter(regs);
+ instrumentation_begin();
+ panic("Unexpected external interrupt 0x80\n");
+ }
+
+ /*
+ * Establish kernel context for instrumentation, including for
+ * int80_is_external() below which calls into the APIC driver.
+ * Identical for soft and external interrupts.
+ */
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ add_random_kstack_offset();
+
+ /* Validate that this is a soft interrupt to the extent possible */
+ if (unlikely(int80_is_external()))
+ panic("Unexpected external interrupt 0x80\n");
+
+ /*
+ * The low level idtentry code pushed -1 into regs::orig_ax
+ * and regs::ax contains the syscall number.
+ *
+ * User tracing code (ptrace or signal handlers) might assume
+ * that the regs::orig_ax contains a 32-bit number on invoking
+ * a 32-bit syscall.
+ *
+ * Establish the syscall convention by saving the 32bit truncated
+ * syscall number in regs::orig_ax and by invalidating regs::ax.
+ */
+ regs->orig_ax = regs->ax & GENMASK(31, 0);
+ regs->ax = -ENOSYS;
+
+ nr = syscall_32_enter(regs);
+
+ local_irq_enable();
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+
+#ifdef CONFIG_X86_FRED
+/*
+ * A FRED-specific INT80 handler is warranted for the follwing reasons:
+ *
+ * 1) As INT instructions and hardware interrupts are separate event
+ * types, FRED does not preclude the use of vector 0x80 for external
+ * interrupts. As a result, the FRED setup code does not reserve
+ * vector 0x80 and calling int80_is_external() is not merely
+ * suboptimal but actively incorrect: it could cause a system call
+ * to be incorrectly ignored.
+ *
+ * 2) It is called only for handling vector 0x80 of event type
+ * EVENT_TYPE_SWINT and will never be called to handle any external
+ * interrupt (event type EVENT_TYPE_EXTINT).
+ *
+ * 3) FRED has separate entry flows depending on if the event came from
+ * user space or kernel space, and because the kernel does not use
+ * INT insns, the FRED kernel entry handler fred_entry_from_kernel()
+ * falls through to fred_bad_type() if the event type is
+ * EVENT_TYPE_SWINT, i.e., INT insns. So if the kernel is handling
+ * an INT insn, it can only be from a user level.
+ *
+ * 4) int80_emulation() does a CLEAR_BRANCH_HISTORY. While FRED will
+ * likely take a different approach if it is ever needed: it
+ * probably belongs in either fred_intx()/ fred_other() or
+ * asm_fred_entrypoint_user(), depending on if this ought to be done
+ * for all entries from userspace or only system
+ * calls.
+ *
+ * 5) INT $0x80 is the fast path for 32-bit system calls under FRED.
+ */
+DEFINE_FREDENTRY_RAW(int80_emulation)
+{
+ int nr;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ add_random_kstack_offset();
+
+ /*
+ * FRED pushed 0 into regs::orig_ax and regs::ax contains the
+ * syscall number.
+ *
+ * User tracing code (ptrace or signal handlers) might assume
+ * that the regs::orig_ax contains a 32-bit number on invoking
+ * a 32-bit syscall.
+ *
+ * Establish the syscall convention by saving the 32bit truncated
+ * syscall number in regs::orig_ax and by invalidating regs::ax.
+ */
+ regs->orig_ax = regs->ax & GENMASK(31, 0);
+ regs->ax = -ENOSYS;
+
+ nr = syscall_32_enter(regs);
+
+ local_irq_enable();
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+#endif /* CONFIG_X86_FRED */
+
+#else /* CONFIG_IA32_EMULATION */
+
+/* Handles int $0x80 on a 32bit kernel */
+__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
+{
+ int nr = syscall_32_enter(regs);
+
+ add_random_kstack_offset();
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^31-1 into
+ * orig_ax, the int return value truncates it. This matches
+ * the semantics of syscall_get_nr().
+ */
+ nr = syscall_enter_from_user_mode(regs, nr);
+ instrumentation_begin();
+
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+}
+#endif /* !CONFIG_IA32_EMULATION */
+
+static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
+{
+ int nr = syscall_32_enter(regs);
+ int res;
+
+ add_random_kstack_offset();
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
+ instrumentation_begin();
+ /* Fetch EBP from where the vDSO stashed it. */
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /*
+ * Micro-optimization: the pointer we're following is
+ * explicitly 32 bits, so it can't be out of range.
+ */
+ res = __get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ } else {
+ res = get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ }
+
+ if (res) {
+ /* User code screwed up. */
+ regs->ax = -EFAULT;
+
+ local_irq_disable();
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
+ return false;
+ }
+
+ nr = syscall_enter_from_user_mode_work(regs, nr);
+
+ /* Now this is just like a normal syscall. */
+ do_syscall_32_irqs_on(regs, nr);
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+ return true;
+}
+
+/* Returns true to return using SYSEXIT/SYSRETL, or false to use IRET */
+__visible noinstr bool do_fast_syscall_32(struct pt_regs *regs)
+{
+ /*
+ * Called using the internal vDSO SYSENTER/SYSCALL32 calling
+ * convention. Adjust regs so it looks like we entered using int80.
+ */
+ unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
+ vdso_image_32.sym_int80_landing_pad;
+
+ /*
+ * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
+ * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
+ * Fix it up.
+ */
+ regs->ip = landing_pad;
+
+ /* Invoke the syscall. If it failed, keep it simple: use IRET. */
+ if (!__do_fast_syscall_32(regs))
+ return false;
+
+ /*
+ * Check that the register state is valid for using SYSRETL/SYSEXIT
+ * to exit to userspace. Otherwise use the slower but fully capable
+ * IRET exit path.
+ */
+
+ /* XEN PV guests always use the IRET path */
+ if (cpu_feature_enabled(X86_FEATURE_XENPV))
+ return false;
+
+ /* EIP must point to the VDSO landing pad */
+ if (unlikely(regs->ip != landing_pad))
+ return false;
+
+ /* CS and SS must match the values set in MSR_STAR */
+ if (unlikely(regs->cs != __USER32_CS || regs->ss != __USER_DS))
+ return false;
+
+ /* If the TF, RF, or VM flags are set, use IRET */
+ if (unlikely(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)))
+ return false;
+
+ /* Use SYSRETL/SYSEXIT to exit to userspace */
+ return true;
+}
+
+/* Returns true to return using SYSEXIT/SYSRETL, or false to use IRET */
+__visible noinstr bool do_SYSENTER_32(struct pt_regs *regs)
+{
+ /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+ regs->sp = regs->bp;
+
+ /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
+ regs->flags |= X86_EFLAGS_IF;
+
+ return do_fast_syscall_32(regs);
+}
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index ba8354424860..b6e68ea98b83 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -1,15 +1,20 @@
-// SPDX-License-Identifier: GPL-2.0
-/* System call table for x86-64. */
+// SPDX-License-Identifier: GPL-2.0-only
+/* 64-bit system call dispatch */
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
+#include <linux/entry-common.h>
+#include <linux/nospec.h>
#include <asm/syscall.h>
#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
+#ifdef CONFIG_X86_X32_ABI
+#include <asm/syscalls_x32.h>
+#endif
#undef __SYSCALL
#undef __SYSCALL_NORETURN
@@ -33,4 +38,104 @@ long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
#include <asm/syscalls_64.h>
default: return __x64_sys_ni_syscall(regs);
}
-};
+}
+
+#ifdef CONFIG_X86_X32_ABI
+long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
+{
+ switch (nr) {
+ #include <asm/syscalls_x32.h>
+ default: return __x64_sys_ni_syscall(regs);
+ }
+}
+#endif
+
+static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+{
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < NR_syscalls)) {
+ unr = array_index_nospec(unr, NR_syscalls);
+ regs->ax = x64_sys_call(regs, unr);
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+{
+ /*
+ * Adjust the starting offset of the table, and convert numbers
+ * < __X32_SYSCALL_BIT to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int xnr = nr - __X32_SYSCALL_BIT;
+
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ xnr = array_index_nospec(xnr, X32_NR_syscalls);
+ regs->ax = x32_sys_call(regs, xnr);
+ return true;
+ }
+ return false;
+}
+
+/* Returns true to return using SYSRET, or false to use IRET */
+__visible noinstr bool do_syscall_64(struct pt_regs *regs, int nr)
+{
+ add_random_kstack_offset();
+ nr = syscall_enter_from_user_mode(regs, nr);
+
+ instrumentation_begin();
+
+ if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
+ /* Invalid system call, but still a system call. */
+ regs->ax = __x64_sys_ni_syscall(regs);
+ }
+
+ instrumentation_end();
+ syscall_exit_to_user_mode(regs);
+
+ /*
+ * Check that the register state is valid for using SYSRET to exit
+ * to userspace. Otherwise use the slower but fully capable IRET
+ * exit path.
+ */
+
+ /* XEN PV guests always use the IRET path */
+ if (cpu_feature_enabled(X86_FEATURE_XENPV))
+ return false;
+
+ /* SYSRET requires RCX == RIP and R11 == EFLAGS */
+ if (unlikely(regs->cx != regs->ip || regs->r11 != regs->flags))
+ return false;
+
+ /* CS and SS must match the values set in MSR_STAR */
+ if (unlikely(regs->cs != __USER_CS || regs->ss != __USER_DS))
+ return false;
+
+ /*
+ * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
+ * in kernel space. This essentially lets the user take over
+ * the kernel, since userspace controls RSP.
+ *
+ * TASK_SIZE_MAX covers all user-accessible addresses other than
+ * the deprecated vsyscall page.
+ */
+ if (unlikely(regs->ip >= TASK_SIZE_MAX))
+ return false;
+
+ /*
+ * SYSRET cannot restore RF. It can restore TF, but unlike IRET,
+ * restoring TF results in a trap from userspace immediately after
+ * SYSRET.
+ */
+ if (unlikely(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)))
+ return false;
+
+ /* Use SYSRET to exit to userspace */
+ return true;
+}
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
deleted file mode 100644
index fb77908f44f3..000000000000
--- a/arch/x86/entry/syscall_x32.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* System call table for x32 ABI. */
-
-#include <linux/linkage.h>
-#include <linux/sys.h>
-#include <linux/cache.h>
-#include <linux/syscalls.h>
-#include <asm/syscall.h>
-
-#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
-#define __SYSCALL_NORETURN(nr, sym) extern long __noreturn __x64_##sym(const struct pt_regs *);
-#include <asm/syscalls_x32.h>
-#undef __SYSCALL
-
-#undef __SYSCALL_NORETURN
-#define __SYSCALL_NORETURN __SYSCALL
-
-#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
-long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
-{
- switch (nr) {
- #include <asm/syscalls_x32.h>
- default: return __x64_sys_ni_syscall(regs);
- }
-};
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 4d0fb2fba7e2..ac007ea00979 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -396,7 +396,7 @@
381 i386 pkey_alloc sys_pkey_alloc
382 i386 pkey_free sys_pkey_free
383 i386 statx sys_statx
-384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl
+384 i386 arch_prctl sys_arch_prctl
385 i386 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
386 i386 rseq sys_rseq
393 i386 semget sys_semget
@@ -472,3 +472,4 @@
464 i386 getxattrat sys_getxattrat
465 i386 listxattrat sys_listxattrat
466 i386 removexattrat sys_removexattrat
+467 i386 open_tree_attr sys_open_tree_attr
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 5eb708bff1c7..cfb5ca41e30d 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -390,6 +390,7 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index c9216ac4fb1e..54d3e9774d62 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -4,7 +4,7 @@
#
# Include the generic Makefile to check the built vDSO:
-include $(srctree)/lib/vdso/Makefile
+include $(srctree)/lib/vdso/Makefile.include
# Files to link into the vDSO:
vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vgetrandom.o vgetrandom-chacha.o
@@ -32,7 +32,7 @@ targets += $(foreach x, 64 x32 32, vdso-image-$(x).c vdso$(x).so vdso$(x).so.dbg
CPPFLAGS_vdso.lds += -P -C
-VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 \
-z max-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
@@ -133,6 +133,7 @@ KBUILD_CFLAGS_32 += -fno-stack-protector
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS_32 += -DBUILD_VDSO
ifdef CONFIG_MITIGATION_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
@@ -151,10 +152,9 @@ $(obj)/vdso32.so.dbg: $(obj)/vdso32/vdso32.lds $(vobjs32) FORCE
quiet_cmd_vdso = VDSO $@
cmd_vdso = $(LD) -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
- -T $(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(src)/checkundef.sh '$(NM)' '$@'
+ -T $(filter %.lds,$^) $(filter %.o,$^)
-VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 \
+VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 --no-undefined \
$(call ld-option, --eh-frame-hdr) -Bsymbolic -z noexecstack
quiet_cmd_vdso_and_check = VDSO $@
diff --git a/arch/x86/entry/vdso/checkundef.sh b/arch/x86/entry/vdso/checkundef.sh
deleted file mode 100755
index 7ee90a9b549d..000000000000
--- a/arch/x86/entry/vdso/checkundef.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-nm="$1"
-file="$2"
-$nm "$file" | grep '^ *U' > /dev/null 2>&1
-if [ $? -eq 1 ]; then
- exit 0
-else
- echo "$file: undefined symbols found" >&2
- exit 1
-fi
diff --git a/arch/x86/entry/vdso/extable.h b/arch/x86/entry/vdso/extable.h
index b56f6b012941..baba612b832c 100644
--- a/arch/x86/entry/vdso/extable.h
+++ b/arch/x86/entry/vdso/extable.h
@@ -7,7 +7,7 @@
* vDSO uses a dedicated handler the addresses are relative to the overall
* exception table, not each individual entry.
*/
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _ASM_VDSO_EXTABLE_HANDLE(from, to) \
ASM_VDSO_EXTABLE_HANDLE from to
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index 872947c1004c..ec1ac191a057 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/vdso.h>
#include <asm/vdso/vsyscall.h>
+#include <vdso/datapage.h>
/*
* Linker script for vDSO. This is an ELF shared object prelinked to
@@ -17,14 +18,9 @@ SECTIONS
* segment.
*/
- vvar_start = . - __VVAR_PAGES * PAGE_SIZE;
- vvar_page = vvar_start;
+ VDSO_VVAR_SYMS
- vdso_rng_data = vvar_page + __VDSO_RND_DATA_OFFSET;
-
- timens_page = vvar_start + PAGE_SIZE;
-
- vclock_pages = vvar_start + VDSO_NR_VCLOCK_PAGES * PAGE_SIZE;
+ vclock_pages = VDSO_VCLOCK_PAGES_START(vdso_u_data);
pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE;
hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE;
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 90d15f2a7205..f84e8f8fa5fe 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -69,33 +69,12 @@
const char *outfilename;
-/* Symbols that we need in vdso2c. */
-enum {
- sym_vvar_start,
- sym_vvar_page,
- sym_pvclock_page,
- sym_hvclock_page,
- sym_timens_page,
-};
-
-const int special_pages[] = {
- sym_vvar_page,
- sym_pvclock_page,
- sym_hvclock_page,
- sym_timens_page,
-};
-
struct vdso_sym {
const char *name;
bool export;
};
struct vdso_sym required_syms[] = {
- [sym_vvar_start] = {"vvar_start", true},
- [sym_vvar_page] = {"vvar_page", true},
- [sym_pvclock_page] = {"pvclock_page", true},
- [sym_hvclock_page] = {"hvclock_page", true},
- [sym_timens_page] = {"timens_page", true},
{"VDSO32_NOTE_MASK", true},
{"__kernel_vsyscall", true},
{"__kernel_sigreturn", true},
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 67b3e37576a6..78ed1c1f28b9 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -150,26 +150,6 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
}
}
- /* Validate mapping addresses. */
- for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
- INT_BITS symval = syms[special_pages[i]];
-
- if (!symval)
- continue; /* The mapping isn't used; ignore it. */
-
- if (symval % 4096)
- fail("%s must be a multiple of 4096\n",
- required_syms[i].name);
- if (symval + 4096 < syms[sym_vvar_start])
- fail("%s underruns vvar_start\n",
- required_syms[i].name);
- if (symval + 4096 > 0)
- fail("%s is on the wrong side of the vdso text\n",
- required_syms[i].name);
- }
- if (syms[sym_vvar_start] % 4096)
- fail("vvar_begin must be a multiple of 4096\n");
-
if (!image_name) {
fwrite(stripped_addr, stripped_len, 1, outfile);
return;
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index f6d2d8aba643..8894013eea1d 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -51,15 +51,17 @@ __setup("vdso32=", vdso32_setup);
__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
#endif
-#ifdef CONFIG_X86_64
#ifdef CONFIG_SYSCTL
-/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
-static const struct ctl_table abi_table2[] = {
+static const struct ctl_table vdso_table[] = {
{
+#ifdef CONFIG_X86_64
.procname = "vsyscall32",
+#else
+ .procname = "vdso_enabled",
+#endif
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
@@ -71,10 +73,14 @@ static const struct ctl_table abi_table2[] = {
static __init int ia32_binfmt_init(void)
{
- register_sysctl("abi", abi_table2);
+#ifdef CONFIG_X86_64
+ /* Register vsyscall32 into the ABI table */
+ register_sysctl("abi", vdso_table);
+#else
+ register_sysctl_init("vm", vdso_table);
+#endif
return 0;
}
__initcall(ia32_binfmt_init);
#endif /* CONFIG_SYSCTL */
-#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 39e6efc1a9ca..9518bf1ddf35 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -14,7 +14,7 @@
#include <linux/elf.h>
#include <linux/cpu.h>
#include <linux/ptrace.h>
-#include <linux/time_namespace.h>
+#include <linux/vdso_datastore.h>
#include <asm/pvclock.h>
#include <asm/vgtod.h>
@@ -27,13 +27,7 @@
#include <asm/vdso/vsyscall.h>
#include <clocksource/hyperv_timer.h>
-struct vdso_data *arch_get_vdso_data(void *vvar_page)
-{
- return (struct vdso_data *)vvar_page;
-}
-
-static union vdso_data_store vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = vdso_data_store.data;
+static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES);
unsigned int vclocks_used __read_mostly;
@@ -48,13 +42,11 @@ int __init init_vdso_image(const struct vdso_image *image)
apply_alternatives((struct alt_instr *)(image->data + image->alt),
(struct alt_instr *)(image->data + image->alt +
- image->alt_len),
- NULL);
+ image->alt_len));
return 0;
}
-static const struct vm_special_mapping vvar_mapping;
struct linux_binprm;
static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
@@ -98,99 +90,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-#ifdef CONFIG_TIME_NS
-/*
- * The vvar page layout depends on whether a task belongs to the root or
- * non-root time namespace. Whenever a task changes its namespace, the VVAR
- * page tables are cleared and then they will re-faulted with a
- * corresponding layout.
- * See also the comment near timens_setup_vdso_data() for details.
- */
-int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
-{
- struct mm_struct *mm = task->mm;
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, mm, 0);
-
- mmap_read_lock(mm);
- for_each_vma(vmi, vma) {
- if (vma_is_special_mapping(vma, &vvar_mapping))
- zap_vma_pages(vma);
- }
- mmap_read_unlock(mm);
-
- return 0;
-}
-#endif
-
-static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
- struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- const struct vdso_image *image = vma->vm_mm->context.vdso_image;
- unsigned long pfn;
- long sym_offset;
-
- if (!image)
- return VM_FAULT_SIGBUS;
-
- sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
- image->sym_vvar_start;
-
- /*
- * Sanity check: a symbol offset of zero means that the page
- * does not exist for this vdso image, not that the page is at
- * offset zero relative to the text mapping. This should be
- * impossible here, because sym_offset should only be zero for
- * the page past the end of the vvar mapping.
- */
- if (sym_offset == 0)
- return VM_FAULT_SIGBUS;
-
- if (sym_offset == image->sym_vvar_page) {
- struct page *timens_page = find_timens_vvar_page(vma);
-
- pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT;
-
- /*
- * If a task belongs to a time namespace then a namespace
- * specific VVAR is mapped with the sym_vvar_page offset and
- * the real VVAR page is mapped with the sym_timens_page
- * offset.
- * See also the comment near timens_setup_vdso_data().
- */
- if (timens_page) {
- unsigned long addr;
- vm_fault_t err;
-
- /*
- * Optimization: inside time namespace pre-fault
- * VVAR page too. As on timens page there are only
- * offsets for clocks on VVAR, it'll be faulted
- * shortly by VDSO code.
- */
- addr = vmf->address + (image->sym_timens_page - sym_offset);
- err = vmf_insert_pfn(vma, addr, pfn);
- if (unlikely(err & VM_FAULT_ERROR))
- return err;
-
- pfn = page_to_pfn(timens_page);
- }
-
- return vmf_insert_pfn(vma, vmf->address, pfn);
-
- } else if (sym_offset == image->sym_timens_page) {
- struct page *timens_page = find_timens_vvar_page(vma);
-
- if (!timens_page)
- return VM_FAULT_SIGBUS;
-
- pfn = __pa_symbol(vdso_data) >> PAGE_SHIFT;
- return vmf_insert_pfn(vma, vmf->address, pfn);
- }
-
- return VM_FAULT_SIGBUS;
-}
-
static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
@@ -212,7 +111,6 @@ static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
case VDSO_PAGE_HVCLOCK_OFFSET:
{
unsigned long pfn = hv_get_tsc_pfn();
-
if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
return vmf_insert_pfn(vma, vmf->address, pfn);
break;
@@ -228,10 +126,6 @@ static const struct vm_special_mapping vdso_mapping = {
.fault = vdso_fault,
.mremap = vdso_mremap,
};
-static const struct vm_special_mapping vvar_mapping = {
- .name = "[vvar]",
- .fault = vvar_fault,
-};
static const struct vm_special_mapping vvar_vclock_mapping = {
.name = "[vvar_vclock]",
.fault = vvar_vclock_fault,
@@ -253,13 +147,13 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
return -EINTR;
addr = get_unmapped_area(NULL, addr,
- image->size - image->sym_vvar_start, 0, 0);
+ image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
- text_start = addr - image->sym_vvar_start;
+ text_start = addr + __VDSO_PAGES * PAGE_SIZE;
/*
* MAYWRITE to allow gdb to COW and set breakpoints
@@ -276,13 +170,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
goto up_fail;
}
- vma = _install_special_mapping(mm,
- addr,
- (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
- VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
- VM_PFNMAP,
- &vvar_mapping);
-
+ vma = vdso_install_vvar_mapping(mm, addr);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
do_munmap(mm, text_start, image->size, NULL);
@@ -290,7 +178,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
vma = _install_special_mapping(mm,
- addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
+ VDSO_VCLOCK_PAGES_START(addr),
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
VM_PFNMAP,
@@ -327,7 +215,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
*/
for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
- vma_is_special_mapping(vma, &vvar_mapping) ||
+ vma_is_special_mapping(vma, &vdso_vvar_mapping) ||
vma_is_special_mapping(vma, &vvar_vclock_mapping)) {
mmap_write_unlock(mm);
return -EEXIST;
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index 780acd3dff22..ec3427463382 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void)
* On ctxswin, sched_in = true, called after the PMU has started
* On ctxswout, sched_in = false, called before the PMU is stopped
*/
-void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index e7a8b8758e08..0252b7ea8bca 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -28,9 +28,6 @@ static u32 ibs_caps;
#include <asm/nmi.h>
#include <asm/amd-ibs.h>
-#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
-#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
-
/* attr.config2 */
#define IBS_SW_FILTER_MASK 1
@@ -89,6 +86,7 @@ struct perf_ibs {
u64 cnt_mask;
u64 enable_mask;
u64 valid_mask;
+ u16 min_period;
u64 max_period;
unsigned long offset_mask[1];
int offset_max;
@@ -270,11 +268,19 @@ static int validate_group(struct perf_event *event)
return 0;
}
+static bool perf_ibs_ldlat_event(struct perf_ibs *perf_ibs,
+ struct perf_event *event)
+{
+ return perf_ibs == &perf_ibs_op &&
+ (ibs_caps & IBS_CAPS_OPLDLAT) &&
+ (event->attr.config1 & 0xFFF);
+}
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs;
- u64 max_cnt, config;
+ u64 config;
int ret;
perf_ibs = get_ibs_pmu(event->attr.type);
@@ -310,25 +316,47 @@ static int perf_ibs_init(struct perf_event *event)
if (config & perf_ibs->cnt_mask)
/* raw max_cnt may not be set */
return -EINVAL;
- if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
- /*
- * lower 4 bits can not be set in ibs max cnt,
- * but allowing it in case we adjust the
- * sample period to set a frequency.
- */
- return -EINVAL;
- hwc->sample_period &= ~0x0FULL;
- if (!hwc->sample_period)
- hwc->sample_period = 0x10;
+
+ if (event->attr.freq) {
+ hwc->sample_period = perf_ibs->min_period;
+ } else {
+ /* Silently mask off lower nibble. IBS hw mandates it. */
+ hwc->sample_period &= ~0x0FULL;
+ if (hwc->sample_period < perf_ibs->min_period)
+ return -EINVAL;
+ }
} else {
- max_cnt = config & perf_ibs->cnt_mask;
+ u64 period = 0;
+
+ if (event->attr.freq)
+ return -EINVAL;
+
+ if (perf_ibs == &perf_ibs_op) {
+ period = (config & IBS_OP_MAX_CNT) << 4;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ period |= config & IBS_OP_MAX_CNT_EXT_MASK;
+ } else {
+ period = (config & IBS_FETCH_MAX_CNT) << 4;
+ }
+
config &= ~perf_ibs->cnt_mask;
- event->attr.sample_period = max_cnt << 4;
- hwc->sample_period = event->attr.sample_period;
+ event->attr.sample_period = period;
+ hwc->sample_period = period;
+
+ if (hwc->sample_period < perf_ibs->min_period)
+ return -EINVAL;
}
- if (!hwc->sample_period)
- return -EINVAL;
+ if (perf_ibs_ldlat_event(perf_ibs, event)) {
+ u64 ldlat = event->attr.config1 & 0xFFF;
+
+ if (ldlat < 128 || ldlat > 2048)
+ return -EINVAL;
+ ldlat >>= 7;
+
+ config |= (ldlat - 1) << 59;
+ config |= IBS_OP_L3MISSONLY | IBS_OP_LDLAT_EN;
+ }
/*
* If we modify hwc->sample_period, we also need to update
@@ -349,7 +377,8 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
int overflow;
/* ignore lower 4 bits in min count: */
- overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
+ overflow = perf_event_set_period(hwc, perf_ibs->min_period,
+ perf_ibs->max_period, period);
local64_set(&hwc->prev_count, 0);
return overflow;
@@ -447,6 +476,9 @@ static void perf_ibs_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
+ if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
+ hwc->sample_period = perf_ibs->min_period;
+
perf_ibs_set_period(perf_ibs, hwc, &period);
if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
config |= period & IBS_OP_MAX_CNT_EXT_MASK;
@@ -554,6 +586,28 @@ static void perf_ibs_del(struct perf_event *event, int flags)
static void perf_ibs_read(struct perf_event *event) { }
+static int perf_ibs_check_period(struct perf_event *event, u64 value)
+{
+ struct perf_ibs *perf_ibs;
+ u64 low_nibble;
+
+ if (event->attr.freq)
+ return 0;
+
+ perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
+ low_nibble = value & 0xFULL;
+
+ /*
+ * This contradicts with perf_ibs_init() which allows sample period
+ * with lower nibble bits set but silently masks them off. Whereas
+ * this returns error.
+ */
+ if (low_nibble || value < perf_ibs->min_period)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* We need to initialize with empty group if all attributes in the
* group are dynamic.
@@ -572,7 +626,10 @@ PMU_FORMAT_ATTR(cnt_ctl, "config:19");
PMU_FORMAT_ATTR(swfilt, "config2:0");
PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
+PMU_EVENT_ATTR_STRING(ldlat, ibs_op_ldlat_format, "config1:0-11");
PMU_EVENT_ATTR_STRING(zen4_ibs_extensions, zen4_ibs_extensions, "1");
+PMU_EVENT_ATTR_STRING(ldlat, ibs_op_ldlat_cap, "1");
+PMU_EVENT_ATTR_STRING(dtlb_pgsize, ibs_op_dtlb_pgsize_cap, "1");
static umode_t
zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
@@ -580,6 +637,18 @@ zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int
return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
}
+static umode_t
+ibs_op_ldlat_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_OPLDLAT ? attr->mode : 0;
+}
+
+static umode_t
+ibs_op_dtlb_pgsize_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_OPDTLBPGSIZE ? attr->mode : 0;
+}
+
static struct attribute *fetch_attrs[] = {
&format_attr_rand_en.attr,
&format_attr_swfilt.attr,
@@ -596,6 +665,16 @@ static struct attribute *zen4_ibs_extensions_attrs[] = {
NULL,
};
+static struct attribute *ibs_op_ldlat_cap_attrs[] = {
+ &ibs_op_ldlat_cap.attr.attr,
+ NULL,
+};
+
+static struct attribute *ibs_op_dtlb_pgsize_cap_attrs[] = {
+ &ibs_op_dtlb_pgsize_cap.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_fetch_formats = {
.name = "format",
.attrs = fetch_attrs,
@@ -613,6 +692,18 @@ static struct attribute_group group_zen4_ibs_extensions = {
.is_visible = zen4_ibs_extensions_is_visible,
};
+static struct attribute_group group_ibs_op_ldlat_cap = {
+ .name = "caps",
+ .attrs = ibs_op_ldlat_cap_attrs,
+ .is_visible = ibs_op_ldlat_is_visible,
+};
+
+static struct attribute_group group_ibs_op_dtlb_pgsize_cap = {
+ .name = "caps",
+ .attrs = ibs_op_dtlb_pgsize_cap_attrs,
+ .is_visible = ibs_op_dtlb_pgsize_is_visible,
+};
+
static const struct attribute_group *fetch_attr_groups[] = {
&group_fetch_formats,
&empty_caps_group,
@@ -651,6 +742,11 @@ static struct attribute_group group_op_formats = {
.attrs = op_attrs,
};
+static struct attribute *ibs_op_ldlat_format_attrs[] = {
+ &ibs_op_ldlat_format.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_cnt_ctl = {
.name = "format",
.attrs = cnt_ctl_attrs,
@@ -669,10 +765,19 @@ static const struct attribute_group *op_attr_groups[] = {
NULL,
};
+static struct attribute_group group_ibs_op_ldlat_format = {
+ .name = "format",
+ .attrs = ibs_op_ldlat_format_attrs,
+ .is_visible = ibs_op_ldlat_is_visible,
+};
+
static const struct attribute_group *op_attr_update[] = {
&group_cnt_ctl,
&group_op_l3missonly,
&group_zen4_ibs_extensions,
+ &group_ibs_op_ldlat_cap,
+ &group_ibs_op_ldlat_format,
+ &group_ibs_op_dtlb_pgsize_cap,
NULL,
};
@@ -686,12 +791,14 @@ static struct perf_ibs perf_ibs_fetch = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .check_period = perf_ibs_check_period,
},
.msr = MSR_AMD64_IBSFETCHCTL,
- .config_mask = IBS_FETCH_CONFIG_MASK,
+ .config_mask = IBS_FETCH_MAX_CNT | IBS_FETCH_RAND_EN,
.cnt_mask = IBS_FETCH_MAX_CNT,
.enable_mask = IBS_FETCH_ENABLE,
.valid_mask = IBS_FETCH_VAL,
+ .min_period = 0x10,
.max_period = IBS_FETCH_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
.offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
@@ -709,13 +816,15 @@ static struct perf_ibs perf_ibs_op = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .check_period = perf_ibs_check_period,
},
.msr = MSR_AMD64_IBSOPCTL,
- .config_mask = IBS_OP_CONFIG_MASK,
+ .config_mask = IBS_OP_MAX_CNT,
.cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
IBS_OP_CUR_CNT_RAND,
.enable_mask = IBS_OP_ENABLE,
.valid_mask = IBS_OP_VAL,
+ .min_period = 0x90,
.max_period = IBS_OP_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
.offset_max = MSR_AMD64_IBSOP_REG_COUNT,
@@ -917,6 +1026,10 @@ static void perf_ibs_get_tlb_lvl(union ibs_op_data3 *op_data3,
if (!op_data3->dc_lin_addr_valid)
return;
+ if ((ibs_caps & IBS_CAPS_OPDTLBPGSIZE) &&
+ !op_data3->dc_phy_addr_valid)
+ return;
+
if (!op_data3->dc_l1tlb_miss) {
data_src->mem_dtlb = PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT;
return;
@@ -941,6 +1054,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
}
+/* Be careful. Works only for contiguous MSRs. */
+#define ibs_fetch_msr_idx(msr) (msr - MSR_AMD64_IBSFETCHCTL)
#define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
@@ -1021,21 +1136,92 @@ static void perf_ibs_parse_ld_st_data(__u64 sample_type,
}
}
-static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
+static bool perf_ibs_is_mem_sample_type(struct perf_ibs *perf_ibs,
+ struct perf_event *event)
+{
+ u64 sample_type = event->attr.sample_type;
+
+ return perf_ibs == &perf_ibs_op &&
+ sample_type & (PERF_SAMPLE_DATA_SRC |
+ PERF_SAMPLE_WEIGHT_TYPE |
+ PERF_SAMPLE_ADDR |
+ PERF_SAMPLE_PHYS_ADDR);
+}
+
+static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs,
+ struct perf_event *event,
int check_rip)
{
- if (sample_type & PERF_SAMPLE_RAW ||
- (perf_ibs == &perf_ibs_op &&
- (sample_type & PERF_SAMPLE_DATA_SRC ||
- sample_type & PERF_SAMPLE_WEIGHT_TYPE ||
- sample_type & PERF_SAMPLE_ADDR ||
- sample_type & PERF_SAMPLE_PHYS_ADDR)))
+ if (event->attr.sample_type & PERF_SAMPLE_RAW ||
+ perf_ibs_is_mem_sample_type(perf_ibs, event) ||
+ perf_ibs_ldlat_event(perf_ibs, event))
return perf_ibs->offset_max;
else if (check_rip)
return 3;
return 1;
}
+static bool perf_ibs_is_kernel_data_addr(struct perf_event *event,
+ struct perf_ibs_data *ibs_data)
+{
+ u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW;
+ union ibs_op_data3 op_data3;
+ u64 dc_lin_addr;
+
+ op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
+ dc_lin_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
+
+ return unlikely((event->attr.sample_type & sample_type_mask) &&
+ op_data3.dc_lin_addr_valid && kernel_ip(dc_lin_addr));
+}
+
+static bool perf_ibs_is_kernel_br_target(struct perf_event *event,
+ struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ union ibs_op_data op_data;
+ u64 br_target;
+
+ op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
+ br_target = ibs_data->regs[br_target_idx];
+
+ return unlikely((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ op_data.op_brn_ret && kernel_ip(br_target));
+}
+
+static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
+ struct pt_regs *regs, struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ if (perf_exclude_event(event, regs))
+ return true;
+
+ if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
+ return false;
+
+ if (perf_ibs_is_kernel_data_addr(event, ibs_data))
+ return true;
+
+ if (br_target_idx != -1 &&
+ perf_ibs_is_kernel_br_target(event, ibs_data, br_target_idx))
+ return true;
+
+ return false;
+}
+
+static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
+ struct perf_ibs_data *ibs_data)
+{
+ if (perf_ibs == &perf_ibs_op) {
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)] &= ~(1ULL << 18);
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)] = 0;
+ return;
+ }
+
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHCTL)] &= ~(1ULL << 52);
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHPHYSAD)] = 0;
+}
+
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
@@ -1048,6 +1234,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
u64 *buf, *config, period, new_config = 0;
+ int br_target_idx = -1;
if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
@@ -1084,7 +1271,7 @@ fail:
offset = 1;
check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
- offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
+ offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
do {
rdmsrl(msr + offset, *buf++);
@@ -1093,6 +1280,22 @@ fail:
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
+
+ if (perf_ibs_ldlat_event(perf_ibs, event)) {
+ union ibs_op_data3 op_data3;
+
+ op_data3.val = ibs_data.regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
+ /*
+ * Opening event is errored out if load latency threshold is
+ * outside of [128, 2048] range. Since the event has reached
+ * interrupt handler, we can safely assume the threshold is
+ * within [128, 2048] range.
+ */
+ if (!op_data3.ld_op || !op_data3.dc_miss ||
+ op_data3.dc_miss_lat <= (event->attr.config1 & 0xFFF))
+ goto out;
+ }
+
/*
* Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
* depending on their availability.
@@ -1102,6 +1305,7 @@ fail:
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ br_target_idx = size;
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
@@ -1129,10 +1333,19 @@ fail:
}
if ((event->attr.config2 & IBS_SW_FILTER_MASK) &&
- perf_exclude_event(event, &regs)) {
+ perf_ibs_swfilt_discard(perf_ibs, event, &regs, &ibs_data, br_target_idx)) {
throttle = perf_event_account_interrupt(event);
goto out;
}
+ /*
+ * Prevent leaking physical addresses to unprivileged users. Skip
+ * PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
+ * unprivileged users.
+ */
+ if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ perf_allow_kernel()) {
+ perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
+ }
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw = (struct perf_raw_record){
@@ -1155,6 +1368,10 @@ fail:
perf_sample_save_callchain(&data, event, iregs);
throttle = perf_event_overflow(event, &data, &regs);
+
+ if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
+ hwc->sample_period = perf_ibs->min_period;
+
out:
if (throttle) {
perf_ibs_stop(event, 0);
@@ -1244,7 +1461,8 @@ static __init int perf_ibs_op_init(void)
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
- perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
+ IBS_OP_CUR_CNT_EXT_MASK);
}
if (ibs_caps & IBS_CAPS_ZEN4)
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index b15f7b950d2e..f8228d8243f7 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -30,7 +30,7 @@
#define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
#define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
-#define IOMMU_NAME_SIZE 16
+#define IOMMU_NAME_SIZE 24
struct perf_amd_iommu {
struct list_head list;
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index 19c7b76e21bc..c06ccca96851 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -371,7 +371,8 @@ void amd_pmu_lbr_del(struct perf_event *event)
perf_sched_cb_dec(event->pmu);
}
-void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2092d615333d..6866cc5acb0b 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -87,13 +87,14 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
-DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
+DEFINE_STATIC_CALL_NULL(x86_pmu_late_setup, *x86_pmu.late_setup);
+
/*
* This one is magic, it will get called even when PMU init fails (because
* there is no PMU), in which case it should simply return NULL.
@@ -1298,6 +1299,15 @@ static void x86_pmu_enable(struct pmu *pmu)
if (cpuc->n_added) {
int n_running = cpuc->n_events - cpuc->n_added;
+
+ /*
+ * The late setup (after counters are scheduled)
+ * is required for some cases, e.g., PEBS counters
+ * snapshotting. Because an accurate counter index
+ * is needed.
+ */
+ static_call_cond(x86_pmu_late_setup)();
+
/*
* apply assignment obtained either from
* hw_perf_group_sched_in() or x86_pmu_enable()
@@ -2028,13 +2038,14 @@ static void x86_pmu_static_call_update(void)
static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
- static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs);
static_call_update(x86_pmu_filter, x86_pmu.filter);
+
+ static_call_update(x86_pmu_late_setup, x86_pmu.late_setup);
}
static void _x86_pmu_read(struct perf_event *event)
@@ -2625,15 +2636,10 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
NULL,
};
-static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
-{
- static_call_cond(x86_pmu_sched_task)(pmu_ctx, sched_in);
-}
-
-static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc)
+static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
- static_call_cond(x86_pmu_swap_task_ctx)(prev_epc, next_epc);
+ static_call_cond(x86_pmu_sched_task)(pmu_ctx, task, sched_in);
}
void perf_check_microcode(void)
@@ -2700,7 +2706,6 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
- .swap_task_ctx = x86_pmu_swap_task_ctx,
.check_period = x86_pmu_check_period,
.aux_output_match = x86_pmu_aux_output_match,
@@ -2844,7 +2849,7 @@ static bool is_uprobe_at_func_entry(struct pt_regs *regs)
return true;
/* endbr64 (64-bit only) */
- if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn))
+ if (user_64bit_mode(regs) && is_endbr((u32 *)auprobe->insn))
return true;
return false;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 8f78b0c900ef..a95e6c91c4d7 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -36,7 +36,7 @@ enum {
BTS_STATE_ACTIVE,
};
-static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
+static struct bts_ctx __percpu *bts_ctx;
#define BTS_RECORD_SIZE 24
#define BTS_SAFETY_MARGIN 4080
@@ -58,7 +58,7 @@ struct bts_buffer {
local_t head;
unsigned long end;
void **data_pages;
- struct bts_phys buf[];
+ struct bts_phys buf[] __counted_by(nr_bufs);
};
static struct pmu bts_pmu;
@@ -231,7 +231,7 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
static void __bts_event_start(struct perf_event *event)
{
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
struct bts_buffer *buf = perf_get_aux(&bts->handle);
u64 config = 0;
@@ -260,7 +260,7 @@ static void __bts_event_start(struct perf_event *event)
static void bts_event_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
struct bts_buffer *buf;
buf = perf_aux_output_begin(&bts->handle, event);
@@ -290,7 +290,7 @@ fail_stop:
static void __bts_event_stop(struct perf_event *event, int state)
{
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
/* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
WRITE_ONCE(bts->state, state);
@@ -305,7 +305,7 @@ static void __bts_event_stop(struct perf_event *event, int state)
static void bts_event_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
struct bts_buffer *buf = NULL;
int state = READ_ONCE(bts->state);
@@ -338,9 +338,14 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
- int state = READ_ONCE(bts->state);
+ struct bts_ctx *bts;
+ int state;
+
+ if (!bts_ctx)
+ return;
+ bts = this_cpu_ptr(bts_ctx);
+ state = READ_ONCE(bts->state);
/*
* Here we transition from INACTIVE to ACTIVE;
* if we instead are STOPPED from the interrupt handler,
@@ -358,7 +363,12 @@ void intel_bts_enable_local(void)
void intel_bts_disable_local(void)
{
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts;
+
+ if (!bts_ctx)
+ return;
+
+ bts = this_cpu_ptr(bts_ctx);
/*
* Here we transition from ACTIVE to INACTIVE;
@@ -450,12 +460,17 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
- struct perf_event *event = bts->handle.event;
+ struct bts_ctx *bts;
+ struct perf_event *event;
struct bts_buffer *buf;
s64 old_head;
int err = -ENOSPC, handled = 0;
+ if (!bts_ctx)
+ return 0;
+
+ bts = this_cpu_ptr(bts_ctx);
+ event = bts->handle.event;
/*
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
@@ -518,7 +533,7 @@ static void bts_event_del(struct perf_event *event, int mode)
static int bts_event_add(struct perf_event *event, int mode)
{
- struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ struct bts_ctx *bts = this_cpu_ptr(bts_ctx);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
@@ -559,7 +574,7 @@ static int bts_event_init(struct perf_event *event)
* to the user in a zero-copy fashion.
*/
if (event->attr.exclude_kernel) {
- ret = perf_allow_kernel(&event->attr);
+ ret = perf_allow_kernel();
if (ret)
return ret;
}
@@ -605,6 +620,10 @@ static __init int bts_init(void)
return -ENODEV;
}
+ bts_ctx = alloc_percpu(struct bts_ctx);
+ if (!bts_ctx)
+ return -ENOMEM;
+
bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
PERF_PMU_CAP_EXCLUSIVE;
bts_pmu.task_ctx_nr = perf_sw_context;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cdb19e3ba3aa..09d2d66c9f21 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2714,7 +2714,7 @@ static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
* modify by a NMI. PMU has to be disabled before calling this function.
*/
-static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
+static u64 intel_update_topdown_event(struct perf_event *event, int metric_end, u64 *val)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *other;
@@ -2722,13 +2722,24 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
bool reset = true;
int idx;
- /* read Fixed counter 3 */
- rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
- if (!slots)
- return 0;
+ if (!val) {
+ /* read Fixed counter 3 */
+ rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
+ if (!slots)
+ return 0;
- /* read PERF_METRICS */
- rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+ /* read PERF_METRICS */
+ rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
+ } else {
+ slots = val[0];
+ metrics = val[1];
+ /*
+ * Don't reset the PERF_METRICS and Fixed counter 3
+ * for each PEBS record read. Utilize the RDPMC metrics
+ * clear mode.
+ */
+ reset = false;
+ }
for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
if (!is_topdown_idx(idx))
@@ -2771,36 +2782,47 @@ static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
return slots;
}
-static u64 icl_update_topdown_event(struct perf_event *event)
+static u64 icl_update_topdown_event(struct perf_event *event, u64 *val)
{
return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
- x86_pmu.num_topdown_events - 1);
+ x86_pmu.num_topdown_events - 1,
+ val);
}
-DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update);
+DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
-static void intel_pmu_read_topdown_event(struct perf_event *event)
+static void intel_pmu_read_event(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN) ||
+ is_pebs_counter_event_group(event)) {
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ bool pmu_enabled = cpuc->enabled;
- /* Only need to call update_topdown_event() once for group read. */
- if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
- !is_slots_event(event))
- return;
+ /* Only need to call update_topdown_event() once for group read. */
+ if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ))
+ return;
- perf_pmu_disable(event->pmu);
- static_call(intel_pmu_update_topdown_event)(event);
- perf_pmu_enable(event->pmu);
-}
+ cpuc->enabled = 0;
+ if (pmu_enabled)
+ intel_pmu_disable_all();
-static void intel_pmu_read_event(struct perf_event *event)
-{
- if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
- intel_pmu_auto_reload_read(event);
- else if (is_topdown_count(event))
- intel_pmu_read_topdown_event(event);
- else
- x86_perf_event_update(event);
+ /*
+ * If the PEBS counters snapshotting is enabled,
+ * the topdown event is available in PEBS records.
+ */
+ if (is_topdown_event(event) && !is_pebs_counter_event_group(event))
+ static_call(intel_pmu_update_topdown_event)(event, NULL);
+ else
+ intel_pmu_drain_pebs_buffer();
+
+ cpuc->enabled = pmu_enabled;
+ if (pmu_enabled)
+ intel_pmu_enable_all(0);
+
+ return;
+ }
+
+ x86_perf_event_update(event);
}
static void intel_pmu_enable_fixed(struct perf_event *event)
@@ -2932,7 +2954,7 @@ static int intel_pmu_set_period(struct perf_event *event)
static u64 intel_pmu_update(struct perf_event *event)
{
if (unlikely(is_topdown_count(event)))
- return static_call(intel_pmu_update_topdown_event)(event);
+ return static_call(intel_pmu_update_topdown_event)(event, NULL);
return x86_perf_event_update(event);
}
@@ -3070,7 +3092,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu_handle_guest_pebs(regs, &data);
- x86_pmu.drain_pebs(regs, &data);
+ static_call(x86_pmu_drain_pebs)(regs, &data);
status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
@@ -3098,7 +3120,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
handled++;
- static_call(intel_pmu_update_topdown_event)(NULL);
+ static_call(intel_pmu_update_topdown_event)(NULL, NULL);
}
/*
@@ -3116,6 +3138,27 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
if (!test_bit(bit, cpuc->active_mask))
continue;
+ /*
+ * There may be unprocessed PEBS records in the PEBS buffer,
+ * which still stores the previous values.
+ * Process those records first before handling the latest value.
+ * For example,
+ * A is a regular counter
+ * B is a PEBS event which reads A
+ * C is a PEBS event
+ *
+ * The following can happen:
+ * B-assist A=1
+ * C A=2
+ * B-assist A=3
+ * A-overflow-PMI A=4
+ * C-assist-PMI (PEBS buffer) A=5
+ *
+ * The PEBS buffer has to be drained before handling the A-PMI
+ */
+ if (is_pebs_counter_event_group(event))
+ x86_pmu.drain_pebs(regs, &data);
+
if (!intel_pmu_save_and_restart(event))
continue;
@@ -4148,6 +4191,13 @@ static int intel_pmu_hw_config(struct perf_event *event)
event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
}
+ if ((event->attr.sample_type & PERF_SAMPLE_READ) &&
+ (x86_pmu.intel_cap.pebs_format >= 6) &&
+ x86_pmu.intel_cap.pebs_baseline &&
+ is_sampling_event(event) &&
+ event->attr.precise_ip)
+ event->group_leader->hw.flags |= PERF_X86_EVENT_PEBS_CNTR;
+
if ((event->attr.type == PERF_TYPE_HARDWARE) ||
(event->attr.type == PERF_TYPE_HW_CACHE))
return 0;
@@ -4247,7 +4297,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (x86_pmu.version < 3)
return -EINVAL;
- ret = perf_allow_cpu(&event->attr);
+ ret = perf_allow_cpu();
if (ret)
return ret;
@@ -4685,9 +4735,9 @@ static int adl_hw_config(struct perf_event *event)
return -EOPNOTSUPP;
}
-static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void)
+static enum intel_cpu_type adl_get_hybrid_cpu_type(void)
{
- return HYBRID_INTEL_CORE;
+ return INTEL_CPU_TYPE_CORE;
}
static inline bool erratum_hsw11(struct perf_event *event)
@@ -5032,7 +5082,8 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu)
static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
{
- u8 cpu_type = get_this_hybrid_cpu_type();
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ enum intel_cpu_type cpu_type = c->topo.intel_type;
int i;
/*
@@ -5041,7 +5092,7 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
* on it. There should be a fixup function provided for these
* troublesome CPUs (->get_hybrid_cpu_type).
*/
- if (cpu_type == HYBRID_INTEL_NONE) {
+ if (cpu_type == INTEL_CPU_TYPE_UNKNOWN) {
if (x86_pmu.get_hybrid_cpu_type)
cpu_type = x86_pmu.get_hybrid_cpu_type();
else
@@ -5058,16 +5109,16 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void)
enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type;
u32 native_id;
- if (cpu_type == HYBRID_INTEL_CORE && pmu_type == hybrid_big)
+ if (cpu_type == INTEL_CPU_TYPE_CORE && pmu_type == hybrid_big)
return &x86_pmu.hybrid_pmu[i];
- if (cpu_type == HYBRID_INTEL_ATOM) {
+ if (cpu_type == INTEL_CPU_TYPE_ATOM) {
if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small)
return &x86_pmu.hybrid_pmu[i];
- native_id = get_this_hybrid_cpu_native_id();
- if (native_id == skt_native_id && pmu_type == hybrid_small)
+ native_id = c->topo.intel_native_model_id;
+ if (native_id == INTEL_ATOM_SKT_NATIVE_ID && pmu_type == hybrid_small)
return &x86_pmu.hybrid_pmu[i];
- if (native_id == cmt_native_id && pmu_type == hybrid_tiny)
+ if (native_id == INTEL_ATOM_CMT_NATIVE_ID && pmu_type == hybrid_tiny)
return &x86_pmu.hybrid_pmu[i];
}
}
@@ -5244,16 +5295,10 @@ static void intel_pmu_cpu_dead(int cpu)
}
static void intel_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
- bool sched_in)
+ struct task_struct *task, bool sched_in)
{
intel_pmu_pebs_sched_task(pmu_ctx, sched_in);
- intel_pmu_lbr_sched_task(pmu_ctx, sched_in);
-}
-
-static void intel_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc)
-{
- intel_pmu_lbr_swap_task_ctx(prev_epc, next_epc);
+ intel_pmu_lbr_sched_task(pmu_ctx, task, sched_in);
}
static int intel_pmu_check_period(struct perf_event *event, u64 value)
@@ -5424,7 +5469,6 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
- .swap_task_ctx = intel_pmu_swap_task_ctx,
.check_period = intel_pmu_check_period,
@@ -6540,15 +6584,21 @@ __init int intel_pmu_init(void)
char *name;
struct x86_hybrid_pmu *pmu;
+ /* Architectural Perfmon was introduced starting with Core "Yonah" */
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
- case 0x6:
- return p6_pmu_init();
- case 0xb:
+ case 6:
+ if (boot_cpu_data.x86_vfm < INTEL_CORE_YONAH)
+ return p6_pmu_init();
+ break;
+ case 11:
return knc_pmu_init();
- case 0xf:
+ case 15:
return p4_pmu_init();
}
+
+ pr_cont("unsupported CPU family %d model %d ",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
@@ -6696,7 +6746,7 @@ __init int intel_pmu_init(void)
case INTEL_ATOM_SILVERMONT_D:
case INTEL_ATOM_SILVERMONT_MID:
case INTEL_ATOM_AIRMONT:
- case INTEL_ATOM_AIRMONT_MID:
+ case INTEL_ATOM_SILVERMONT_MID2:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index f122882ef278..1f7e1a692a7a 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -953,11 +953,11 @@ unlock:
return 1;
}
-static inline void intel_pmu_drain_pebs_buffer(void)
+void intel_pmu_drain_pebs_buffer(void)
{
struct perf_sample_data data;
- x86_pmu.drain_pebs(NULL, &data);
+ static_call(x86_pmu_drain_pebs)(NULL, &data);
}
/*
@@ -1294,6 +1294,19 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
ds->pebs_interrupt_threshold = threshold;
}
+#define PEBS_DATACFG_CNTRS(x) \
+ ((x >> PEBS_DATACFG_CNTR_SHIFT) & PEBS_DATACFG_CNTR_MASK)
+
+#define PEBS_DATACFG_CNTR_BIT(x) \
+ (((1ULL << x) & PEBS_DATACFG_CNTR_MASK) << PEBS_DATACFG_CNTR_SHIFT)
+
+#define PEBS_DATACFG_FIX(x) \
+ ((x >> PEBS_DATACFG_FIX_SHIFT) & PEBS_DATACFG_FIX_MASK)
+
+#define PEBS_DATACFG_FIX_BIT(x) \
+ (((1ULL << (x)) & PEBS_DATACFG_FIX_MASK) \
+ << PEBS_DATACFG_FIX_SHIFT)
+
static void adaptive_pebs_record_size_update(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1308,10 +1321,58 @@ static void adaptive_pebs_record_size_update(void)
sz += sizeof(struct pebs_xmm);
if (pebs_data_cfg & PEBS_DATACFG_LBRS)
sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
+ if (pebs_data_cfg & (PEBS_DATACFG_METRICS | PEBS_DATACFG_CNTR)) {
+ sz += sizeof(struct pebs_cntr_header);
+
+ /* Metrics base and Metrics Data */
+ if (pebs_data_cfg & PEBS_DATACFG_METRICS)
+ sz += 2 * sizeof(u64);
+
+ if (pebs_data_cfg & PEBS_DATACFG_CNTR) {
+ sz += (hweight64(PEBS_DATACFG_CNTRS(pebs_data_cfg)) +
+ hweight64(PEBS_DATACFG_FIX(pebs_data_cfg))) *
+ sizeof(u64);
+ }
+ }
cpuc->pebs_record_size = sz;
}
+static void __intel_pmu_pebs_update_cfg(struct perf_event *event,
+ int idx, u64 *pebs_data_cfg)
+{
+ if (is_metric_event(event)) {
+ *pebs_data_cfg |= PEBS_DATACFG_METRICS;
+ return;
+ }
+
+ *pebs_data_cfg |= PEBS_DATACFG_CNTR;
+
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ *pebs_data_cfg |= PEBS_DATACFG_FIX_BIT(idx - INTEL_PMC_IDX_FIXED);
+ else
+ *pebs_data_cfg |= PEBS_DATACFG_CNTR_BIT(idx);
+}
+
+
+static void intel_pmu_late_setup(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_event *event;
+ u64 pebs_data_cfg = 0;
+ int i;
+
+ for (i = 0; i < cpuc->n_events; i++) {
+ event = cpuc->event_list[i];
+ if (!is_pebs_counter_event_group(event))
+ continue;
+ __intel_pmu_pebs_update_cfg(event, cpuc->assign[i], &pebs_data_cfg);
+ }
+
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg)
+ cpuc->pebs_data_cfg |= pebs_data_cfg | PEBS_UPDATE_DS_SW;
+}
+
#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_WEIGHT_TYPE | \
@@ -1914,12 +1975,89 @@ static void adaptive_pebs_save_regs(struct pt_regs *regs,
#endif
}
+static void intel_perf_event_update_pmc(struct perf_event *event, u64 pmc)
+{
+ int shift = 64 - x86_pmu.cntval_bits;
+ struct hw_perf_event *hwc;
+ u64 delta, prev_pmc;
+
+ /*
+ * A recorded counter may not have an assigned event in the
+ * following cases. The value should be dropped.
+ * - An event is deleted. There is still an active PEBS event.
+ * The PEBS record doesn't shrink on pmu::del().
+ * If the counter of the deleted event once occurred in a PEBS
+ * record, PEBS still records the counter until the counter is
+ * reassigned.
+ * - An event is stopped for some reason, e.g., throttled.
+ * During this period, another event is added and takes the
+ * counter of the stopped event. The stopped event is assigned
+ * to another new and uninitialized counter, since the
+ * x86_pmu_start(RELOAD) is not invoked for a stopped event.
+ * The PEBS__DATA_CFG is updated regardless of the event state.
+ * The uninitialized counter can be recorded in a PEBS record.
+ * But the cpuc->events[uninitialized_counter] is always NULL,
+ * because the event is stopped. The uninitialized value is
+ * safely dropped.
+ */
+ if (!event)
+ return;
+
+ hwc = &event->hw;
+ prev_pmc = local64_read(&hwc->prev_count);
+
+ /* Only update the count when the PMU is disabled */
+ WARN_ON(this_cpu_read(cpu_hw_events.enabled));
+ local64_set(&hwc->prev_count, pmc);
+
+ delta = (pmc << shift) - (prev_pmc << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static inline void __setup_pebs_counter_group(struct cpu_hw_events *cpuc,
+ struct perf_event *event,
+ struct pebs_cntr_header *cntr,
+ void *next_record)
+{
+ int bit;
+
+ for_each_set_bit(bit, (unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) {
+ intel_perf_event_update_pmc(cpuc->events[bit], *(u64 *)next_record);
+ next_record += sizeof(u64);
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED) {
+ /* The slots event will be handled with perf_metric later */
+ if ((cntr->metrics == INTEL_CNTR_METRICS) &&
+ (bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) {
+ next_record += sizeof(u64);
+ continue;
+ }
+ intel_perf_event_update_pmc(cpuc->events[bit + INTEL_PMC_IDX_FIXED],
+ *(u64 *)next_record);
+ next_record += sizeof(u64);
+ }
+
+ /* HW will reload the value right after the overflow. */
+ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+ local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period);
+
+ if (cntr->metrics == INTEL_CNTR_METRICS) {
+ static_call(intel_pmu_update_topdown_event)
+ (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS],
+ (u64 *)next_record);
+ next_record += 2 * sizeof(u64);
+ }
+}
+
#define PEBS_LATENCY_MASK 0xffff
/*
* With adaptive PEBS the layout depends on what fields are configured.
*/
-
static void setup_pebs_adaptive_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data,
@@ -2049,6 +2187,28 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
}
}
+ if (format_group & (PEBS_DATACFG_CNTR | PEBS_DATACFG_METRICS)) {
+ struct pebs_cntr_header *cntr = next_record;
+ unsigned int nr;
+
+ next_record += sizeof(struct pebs_cntr_header);
+ /*
+ * The PEBS_DATA_CFG is a global register, which is the
+ * superset configuration for all PEBS events.
+ * For the PEBS record of non-sample-read group, ignore
+ * the counter snapshot fields.
+ */
+ if (is_pebs_counter_event_group(event)) {
+ __setup_pebs_counter_group(cpuc, event, cntr, next_record);
+ data->sample_flags |= PERF_SAMPLE_READ;
+ }
+
+ nr = hweight32(cntr->cntr) + hweight32(cntr->fixed);
+ if (cntr->metrics == INTEL_CNTR_METRICS)
+ nr += 2;
+ next_record += nr * sizeof(u64);
+ }
+
WARN_ONCE(next_record != __pebs + basic->format_size,
"PEBS record size %u, expected %llu, config %llx\n",
basic->format_size,
@@ -2094,15 +2254,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
return NULL;
}
-void intel_pmu_auto_reload_read(struct perf_event *event)
-{
- WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
-
- perf_pmu_disable(event->pmu);
- intel_pmu_drain_pebs_buffer();
- perf_pmu_enable(event->pmu);
-}
-
/*
* Special variant of intel_pmu_save_and_restart() for auto-reload.
*/
@@ -2211,13 +2362,21 @@ __intel_pmu_pebs_last_event(struct perf_event *event,
}
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- /*
- * Now, auto-reload is only enabled in fixed period mode.
- * The reload value is always hwc->sample_period.
- * May need to change it, if auto-reload is enabled in
- * freq mode later.
- */
- intel_pmu_save_and_restart_reload(event, count);
+ if ((is_pebs_counter_event_group(event))) {
+ /*
+ * The value of each sample has been updated when setup
+ * the corresponding sample data.
+ */
+ perf_event_update_userpage(event);
+ } else {
+ /*
+ * Now, auto-reload is only enabled in fixed period mode.
+ * The reload value is always hwc->sample_period.
+ * May need to change it, if auto-reload is enabled in
+ * freq mode later.
+ */
+ intel_pmu_save_and_restart_reload(event, count);
+ }
} else
intel_pmu_save_and_restart(event);
}
@@ -2552,6 +2711,11 @@ void __init intel_ds_init(void)
break;
case 6:
+ if (x86_pmu.intel_cap.pebs_baseline) {
+ x86_pmu.large_pebs_flags |= PERF_SAMPLE_READ;
+ x86_pmu.late_setup = intel_pmu_late_setup;
+ }
+ fallthrough;
case 5:
x86_pmu.pebs_ept = 1;
fallthrough;
@@ -2576,7 +2740,7 @@ void __init intel_ds_init(void)
PERF_SAMPLE_REGS_USER |
PERF_SAMPLE_REGS_INTR);
}
- pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+ pr_cont("PEBS fmt%d%c%s, ", format, pebs_type, pebs_qual);
/*
* The PEBS-via-PT is not supported on hybrid platforms,
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index dc641b50814e..f44c3d866f24 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -422,11 +422,17 @@ static __always_inline bool lbr_is_reset_in_cstate(void *ctx)
return !rdlbr_from(((struct x86_perf_task_context *)ctx)->tos, NULL);
}
+static inline bool has_lbr_callstack_users(void *ctx)
+{
+ return task_context_opt(ctx)->lbr_callstack_users ||
+ x86_pmu.lbr_callstack_users;
+}
+
static void __intel_pmu_lbr_restore(void *ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_context_opt(ctx)->lbr_callstack_users == 0 ||
+ if (!has_lbr_callstack_users(ctx) ||
task_context_opt(ctx)->lbr_stack_state == LBR_NONE) {
intel_pmu_lbr_reset();
return;
@@ -503,7 +509,7 @@ static void __intel_pmu_lbr_save(void *ctx)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (task_context_opt(ctx)->lbr_callstack_users == 0) {
+ if (!has_lbr_callstack_users(ctx)) {
task_context_opt(ctx)->lbr_stack_state = LBR_NONE;
return;
}
@@ -516,32 +522,11 @@ static void __intel_pmu_lbr_save(void *ctx)
cpuc->last_log_id = ++task_context_opt(ctx)->log_id;
}
-void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc)
-{
- void *prev_ctx_data, *next_ctx_data;
-
- swap(prev_epc->task_ctx_data, next_epc->task_ctx_data);
-
- /*
- * Architecture specific synchronization makes sense in case
- * both prev_epc->task_ctx_data and next_epc->task_ctx_data
- * pointers are allocated.
- */
-
- prev_ctx_data = next_epc->task_ctx_data;
- next_ctx_data = prev_epc->task_ctx_data;
-
- if (!prev_ctx_data || !next_ctx_data)
- return;
-
- swap(task_context_opt(prev_ctx_data)->lbr_callstack_users,
- task_context_opt(next_ctx_data)->lbr_callstack_users);
-}
-
-void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_ctx_data *ctx_data;
void *task_ctx;
if (!cpuc->lbr_users)
@@ -552,14 +537,18 @@ void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched
* the task was scheduled out, restore the stack. Otherwise flush
* the LBR stack.
*/
- task_ctx = pmu_ctx ? pmu_ctx->task_ctx_data : NULL;
+ rcu_read_lock();
+ ctx_data = rcu_dereference(task->perf_ctx_data);
+ task_ctx = ctx_data ? ctx_data->data : NULL;
if (task_ctx) {
if (sched_in)
__intel_pmu_lbr_restore(task_ctx);
else
__intel_pmu_lbr_save(task_ctx);
+ rcu_read_unlock();
return;
}
+ rcu_read_unlock();
/*
* Since a context switch can flip the address space and LBR entries
@@ -588,9 +577,19 @@ void intel_pmu_lbr_add(struct perf_event *event)
cpuc->br_sel = event->hw.branch_reg.reg;
- if (branch_user_callstack(cpuc->br_sel) && event->pmu_ctx->task_ctx_data)
- task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users++;
+ if (branch_user_callstack(cpuc->br_sel)) {
+ if (event->attach_state & PERF_ATTACH_TASK) {
+ struct task_struct *task = event->hw.target;
+ struct perf_ctx_data *ctx_data;
+ rcu_read_lock();
+ ctx_data = rcu_dereference(task->perf_ctx_data);
+ if (ctx_data)
+ task_context_opt(ctx_data->data)->lbr_callstack_users++;
+ rcu_read_unlock();
+ } else
+ x86_pmu.lbr_callstack_users++;
+ }
/*
* Request pmu::sched_task() callback, which will fire inside the
* regular perf event scheduling, so that call will:
@@ -664,9 +663,19 @@ void intel_pmu_lbr_del(struct perf_event *event)
if (!x86_pmu.lbr_nr)
return;
- if (branch_user_callstack(cpuc->br_sel) &&
- event->pmu_ctx->task_ctx_data)
- task_context_opt(event->pmu_ctx->task_ctx_data)->lbr_callstack_users--;
+ if (branch_user_callstack(cpuc->br_sel)) {
+ if (event->attach_state & PERF_ATTACH_TASK) {
+ struct task_struct *task = event->hw.target;
+ struct perf_ctx_data *ctx_data;
+
+ rcu_read_lock();
+ ctx_data = rcu_dereference(task->perf_ctx_data);
+ if (ctx_data)
+ task_context_opt(ctx_data->data)->lbr_callstack_users--;
+ rcu_read_unlock();
+ } else
+ x86_pmu.lbr_callstack_users--;
+ }
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
cpuc->lbr_select = 0;
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index 844bc4fc4724..c85a9fc44355 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -10,6 +10,7 @@
#include <linux/perf_event.h>
#include <asm/perf_event_p4.h>
+#include <asm/cpu_device_id.h>
#include <asm/hardirq.h>
#include <asm/apic.h>
@@ -732,9 +733,9 @@ static bool p4_event_match_cpu_model(unsigned int event_idx)
{
/* INSTR_COMPLETED event only exist for model 3, 4, 6 (Prescott) */
if (event_idx == P4_EVENT_INSTR_COMPLETED) {
- if (boot_cpu_data.x86_model != 3 &&
- boot_cpu_data.x86_model != 4 &&
- boot_cpu_data.x86_model != 6)
+ if (boot_cpu_data.x86_vfm != INTEL_P4_PRESCOTT &&
+ boot_cpu_data.x86_vfm != INTEL_P4_PRESCOTT_2M &&
+ boot_cpu_data.x86_vfm != INTEL_P4_CEDARMILL)
return false;
}
@@ -776,7 +777,7 @@ static int p4_validate_raw_event(struct perf_event *event)
* the user needs special permissions to be able to use it
*/
if (p4_ht_active() && p4_event_bind_map[v].shared) {
- v = perf_allow_cpu(&event->attr);
+ v = perf_allow_cpu();
if (v)
return v;
}
diff --git a/arch/x86/events/intel/p6.c b/arch/x86/events/intel/p6.c
index a6cffb4f4ef5..65b45e9d7016 100644
--- a/arch/x86/events/intel/p6.c
+++ b/arch/x86/events/intel/p6.c
@@ -2,6 +2,8 @@
#include <linux/perf_event.h>
#include <linux/types.h>
+#include <asm/cpu_device_id.h>
+
#include "../perf_event.h"
/*
@@ -248,30 +250,8 @@ __init int p6_pmu_init(void)
{
x86_pmu = p6_pmu;
- switch (boot_cpu_data.x86_model) {
- case 1: /* Pentium Pro */
+ if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO)
x86_add_quirk(p6_pmu_rdpmc_quirk);
- break;
-
- case 3: /* Pentium II - Klamath */
- case 5: /* Pentium II - Deschutes */
- case 6: /* Pentium II - Mendocino */
- break;
-
- case 7: /* Pentium III - Katmai */
- case 8: /* Pentium III - Coppermine */
- case 10: /* Pentium III Xeon */
- case 11: /* Pentium III - Tualatin */
- break;
-
- case 9: /* Pentium M - Banias */
- case 13: /* Pentium M - Dothan */
- break;
-
- default:
- pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
- return -ENODEV;
- }
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 60b3078b7502..a34e50fc4a8f 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -347,8 +347,7 @@ void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
{
- hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- box->hrtimer.function = uncore_pmu_hrtimer;
+ hrtimer_setup(&box->hrtimer, uncore_pmu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 31c2771545a6..2c0ce0e9545e 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -115,6 +115,11 @@ static inline bool is_branch_counters_group(struct perf_event *event)
return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS;
}
+static inline bool is_pebs_counter_event_group(struct perf_event *event)
+{
+ return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR;
+}
+
struct amd_nb {
int nb_id; /* NorthBridge id */
int refcnt; /* reference count */
@@ -669,18 +674,6 @@ enum {
#define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10
#define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1)
-/*
- * CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture
- * of the core. Bits 31-24 indicates its core type (Core or Atom)
- * and Bits [23:0] indicates the native model ID of the core.
- * Core type and native model ID are defined in below enumerations.
- */
-enum hybrid_cpu_type {
- HYBRID_INTEL_NONE,
- HYBRID_INTEL_ATOM = 0x20,
- HYBRID_INTEL_CORE = 0x40,
-};
-
#define X86_HYBRID_PMU_ATOM_IDX 0
#define X86_HYBRID_PMU_CORE_IDX 1
#define X86_HYBRID_PMU_TINY_IDX 2
@@ -697,11 +690,6 @@ enum hybrid_pmu_type {
hybrid_big_small_tiny = hybrid_big | hybrid_small_tiny,
};
-enum atom_native_id {
- cmt_native_id = 0x2, /* Crestmont */
- skt_native_id = 0x3, /* Skymont */
-};
-
struct x86_hybrid_pmu {
struct pmu pmu;
const char *name;
@@ -800,6 +788,7 @@ struct x86_pmu {
u64 (*update)(struct perf_event *event);
int (*hw_config)(struct perf_event *event);
int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
+ void (*late_setup)(void);
unsigned eventsel;
unsigned perfctr;
unsigned fixedctr;
@@ -869,7 +858,7 @@ struct x86_pmu {
void (*check_microcode)(void);
void (*sched_task)(struct perf_event_pmu_context *pmu_ctx,
- bool sched_in);
+ struct task_struct *task, bool sched_in);
/*
* Intel Arch Perfmon v2+
@@ -914,6 +903,7 @@ struct x86_pmu {
const int *lbr_sel_map; /* lbr_select mappings */
int *lbr_ctl_map; /* LBR_CTL mappings */
};
+ u64 lbr_callstack_users; /* lbr callstack system wide users */
bool lbr_double_abort; /* duplicated lbr aborts */
bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
@@ -952,14 +942,6 @@ struct x86_pmu {
int num_topdown_events;
/*
- * perf task context (i.e. struct perf_event_pmu_context::task_ctx_data)
- * switch helper to bridge calls from perf/core to perf/x86.
- * See struct pmu::swap_task_ctx() usage for examples;
- */
- void (*swap_task_ctx)(struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc);
-
- /*
* AMD bits
*/
unsigned int amd_nb_constraints : 1;
@@ -994,7 +976,7 @@ struct x86_pmu {
*/
int num_hybrid_pmus;
struct x86_hybrid_pmu *hybrid_pmu;
- enum hybrid_cpu_type (*get_hybrid_cpu_type) (void);
+ enum intel_cpu_type (*get_hybrid_cpu_type) (void);
};
struct x86_perf_task_context_opt {
@@ -1107,6 +1089,8 @@ extern struct x86_pmu x86_pmu __read_mostly;
DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period);
DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update);
+DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
+DECLARE_STATIC_CALL(x86_pmu_late_setup, *x86_pmu.late_setup);
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
{
@@ -1148,6 +1132,12 @@ extern u64 __read_mostly hw_cache_extra_regs
u64 x86_perf_event_update(struct perf_event *event);
+static inline u64 intel_pmu_topdown_event_update(struct perf_event *event, u64 *val)
+{
+ return x86_perf_event_update(event);
+}
+DECLARE_STATIC_CALL(intel_pmu_update_topdown_event, intel_pmu_topdown_event_update);
+
static inline unsigned int x86_pmu_config_addr(int index)
{
return x86_pmu.eventsel + (x86_pmu.addr_offset ?
@@ -1394,7 +1384,8 @@ void amd_pmu_lbr_reset(void);
void amd_pmu_lbr_read(void);
void amd_pmu_lbr_add(struct perf_event *event);
void amd_pmu_lbr_del(struct perf_event *event);
-void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in);
void amd_pmu_lbr_enable_all(void);
void amd_pmu_lbr_disable_all(void);
int amd_pmu_lbr_hw_config(struct perf_event *event);
@@ -1448,7 +1439,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
perf_sched_cb_dec(event->pmu);
}
-void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in);
#else
static inline int amd_brs_init(void)
{
@@ -1473,7 +1465,8 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
{
}
-static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+static inline void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
}
@@ -1643,7 +1636,7 @@ void intel_pmu_pebs_disable_all(void);
void intel_pmu_pebs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
-void intel_pmu_auto_reload_read(struct perf_event *event);
+void intel_pmu_drain_pebs_buffer(void);
void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
@@ -1653,10 +1646,8 @@ void intel_pmu_lbr_save_brstack(struct perf_sample_data *data,
struct cpu_hw_events *cpuc,
struct perf_event *event);
-void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
- struct perf_event_pmu_context *next_epc);
-
-void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in);
+void intel_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in);
u64 lbr_from_signext_quirk_wr(u64 val);
diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h
index 6c977c19f2cd..1d9e385649b5 100644
--- a/arch/x86/events/perf_event_flags.h
+++ b/arch/x86/events/perf_event_flags.h
@@ -9,7 +9,7 @@ PERF_ARCH(PEBS_LD_HSW, 0x00008) /* haswell style datala, load */
PERF_ARCH(PEBS_NA_HSW, 0x00010) /* haswell style datala, unknown */
PERF_ARCH(EXCL, 0x00020) /* HT exclusivity on counter */
PERF_ARCH(DYNAMIC, 0x00040) /* dynamic alloc'd constraint */
- /* 0x00080 */
+PERF_ARCH(PEBS_CNTR, 0x00080) /* PEBS counters snapshot */
PERF_ARCH(EXCL_ACCT, 0x00100) /* accounted EXCL event */
PERF_ARCH(AUTO_RELOAD, 0x00200) /* use PEBS auto-reload */
PERF_ARCH(LARGE_PEBS, 0x00400) /* use large PEBS */
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 6941f4811bec..8ddace8cea96 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -274,8 +274,7 @@ static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
{
struct hrtimer *hr = &rapl_pmu->hrtimer;
- hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hr->function = rapl_hrtimer_handle;
+ hrtimer_setup(hr, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
@@ -730,6 +729,7 @@ static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_
{
int nr_rapl_pmu = topology_max_packages();
struct rapl_pmus *rapl_pmus;
+ int ret;
/*
* rapl_pmu_scope must be either PKG, DIE or CORE
@@ -761,7 +761,11 @@ static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
- return init_rapl_pmu(rapl_pmus);
+ ret = init_rapl_pmu(rapl_pmus);
+ if (ret)
+ kfree(rapl_pmus);
+
+ return ret;
}
static struct rapl_model model_snb = {
diff --git a/arch/x86/hyperv/Makefile b/arch/x86/hyperv/Makefile
index 3a1548054b48..d55f494f471d 100644
--- a/arch/x86/hyperv/Makefile
+++ b/arch/x86/hyperv/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := hv_init.o mmu.o nested.o irqdomain.o ivm.o
-obj-$(CONFIG_X86_64) += hv_apic.o hv_proc.o
+obj-$(CONFIG_X86_64) += hv_apic.o
obj-$(CONFIG_HYPERV_VTL_MODE) += hv_vtl.o
ifdef CONFIG_X86_64
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f022d5f64fb6..6d91ac5f9836 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -145,6 +145,11 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
}
+ /*
+ * For this hypercall, Hyper-V treats the valid_bank_mask field
+ * of ipi_arg->vp_set as part of the fixed size input header.
+ * So the variable input header size is equal to nr_bank.
+ */
status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
ipi_arg, NULL);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 173005e6a95d..ddeb40930bc8 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -34,9 +34,6 @@
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
-u64 hv_current_partition_id = ~0ull;
-EXPORT_SYMBOL_GPL(hv_current_partition_id);
-
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
@@ -93,7 +90,7 @@ static int hv_cpu_init(unsigned int cpu)
return 0;
hvp = &hv_vp_assist_page[cpu];
- if (hv_root_partition) {
+ if (hv_root_partition()) {
/*
* For root partition we get the hypervisor provided VP assist
* page, instead of allocating a new page.
@@ -245,7 +242,7 @@ static int hv_cpu_die(unsigned int cpu)
if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
union hv_vp_assist_msr_contents msr = { 0 };
- if (hv_root_partition) {
+ if (hv_root_partition()) {
/*
* For root partition the VP assist page is mapped to
* hypervisor provided page, and thus we unmap the
@@ -320,7 +317,7 @@ static int hv_suspend(void)
union hv_x64_msr_hypercall_contents hypercall_msr;
int ret;
- if (hv_root_partition)
+ if (hv_root_partition())
return -EPERM;
/*
@@ -393,24 +390,6 @@ static void __init hv_stimer_setup_percpu_clockev(void)
old_setup_percpu_clockev();
}
-static void __init hv_get_partition_id(void)
-{
- struct hv_get_partition_id *output_page;
- u64 status;
- unsigned long flags;
-
- local_irq_save(flags);
- output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
- status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
- if (!hv_result_success(status)) {
- /* No point in proceeding if this failed */
- pr_err("Failed to get partition ID: %lld\n", status);
- BUG();
- }
- hv_current_partition_id = output_page->partition_id;
- local_irq_restore(flags);
-}
-
#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
static u8 __init get_vtl(void)
{
@@ -539,7 +518,7 @@ void __init hyperv_init(void)
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hypercall_msr.enable = 1;
- if (hv_root_partition) {
+ if (hv_root_partition()) {
struct page *pg;
void *src;
@@ -605,17 +584,15 @@ skip_hypercall_pg_init:
register_syscore_ops(&hv_syscore_ops);
- if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
+ if (ms_hyperv.priv_high & HV_ACCESS_PARTITION_ID)
hv_get_partition_id();
- BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull);
-
#ifdef CONFIG_PCI_MSI
/*
* If we're running as root, we want to create our own PCI MSI domain.
* We can't set this in hv_pci_init because that would be too late.
*/
- if (hv_root_partition)
+ if (hv_root_partition())
x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
#endif
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 4e1b1e3b5658..13242ed8ff16 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -12,6 +12,7 @@
#include <asm/i8259.h>
#include <asm/mshyperv.h>
#include <asm/realmode.h>
+#include <asm/reboot.h>
#include <../kernel/smpboot.h>
extern struct boot_params boot_params;
@@ -22,6 +23,36 @@ static bool __init hv_vtl_msi_ext_dest_id(void)
return true;
}
+/*
+ * The `native_machine_emergency_restart` function from `reboot.c` writes
+ * to the physical address 0x472 to indicate the type of reboot for the
+ * firmware. We cannot have that in VSM as the memory composition might
+ * be more generic, and such write effectively corrupts the memory thus
+ * making diagnostics harder at the very least.
+ */
+static void __noreturn hv_vtl_emergency_restart(void)
+{
+ /*
+ * Cause a triple fault and the immediate reset. Here the code does not run
+ * on the top of any firmware, whereby cannot reach out to its services.
+ * The inifinite loop is for the improbable case that the triple fault does
+ * not work and have to preserve the state intact for debugging.
+ */
+ for (;;) {
+ idt_invalidate();
+ __asm__ __volatile__("int3");
+ }
+}
+
+/*
+ * The only way to restart in the VTL mode is to triple fault as the kernel runs
+ * as firmware.
+ */
+static void __noreturn hv_vtl_restart(char __maybe_unused *cmd)
+{
+ hv_vtl_emergency_restart();
+}
+
void __init hv_vtl_init_platform(void)
{
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
@@ -30,6 +61,7 @@ void __init hv_vtl_init_platform(void)
x86_platform.realmode_init = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.timers.timer_init = x86_init_noop;
+ x86_init.resources.probe_roms = x86_init_noop;
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_mptable = x86_init_noop;
@@ -235,6 +267,9 @@ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
int __init hv_vtl_early_init(void)
{
+ machine_ops.emergency_restart = hv_vtl_emergency_restart;
+ machine_ops.restart = hv_vtl_restart;
+
/*
* `boot_cpu_has` returns the runtime feature support,
* and here is the earliest it can be used.
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index 64b921360b0f..31f0d29cbc5e 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -64,7 +64,7 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
local_irq_restore(flags);
if (!hv_result_success(status))
- pr_err("%s: hypercall failed, status %lld\n", __func__, status);
+ hv_status_err(status, "\n");
return hv_result(status);
}
@@ -224,7 +224,7 @@ static void hv_irq_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
kfree(stored_entry);
if (status != HV_STATUS_SUCCESS) {
- pr_debug("%s: failed to unmap, status %lld", __func__, status);
+ hv_status_debug(status, "failed to unmap\n");
return;
}
}
@@ -273,7 +273,7 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
status = hv_unmap_msi_interrupt(dev, &old_entry);
if (status != HV_STATUS_SUCCESS)
- pr_err("%s: hypercall failed, status %lld\n", __func__, status);
+ hv_status_err(status, "\n");
}
static void hv_msi_free_irq(struct irq_domain *domain,
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index dd68d9ad9b22..77bf05f06b9e 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -338,7 +338,7 @@ int hv_snp_boot_ap(u32 cpu, unsigned long start_ip)
vmsa->sev_features = sev_status >> 2;
ret = snp_set_vmsa(vmsa, true);
- if (!ret) {
+ if (ret) {
pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
free_page((u64)vmsa);
return ret;
@@ -464,7 +464,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
enum hv_mem_host_visibility visibility)
{
struct hv_gpa_range_for_visibility *input;
- u16 pages_processed;
u64 hv_status;
unsigned long flags;
@@ -493,7 +492,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
hv_status = hv_do_rep_hypercall(
HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
- 0, input, &pages_processed);
+ 0, input, NULL);
local_irq_restore(flags);
if (hv_result_success(hv_status))
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index cc8c3bd0e7c2..cfcb60468b01 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -205,6 +205,10 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
/*
* We can flush not more than max_gvas with one hypercall. Flush the
* whole address space if we were asked to do more.
+ *
+ * For these hypercalls, Hyper-V treats the valid_bank_mask field
+ * of flush->hv_vp_set as part of the fixed size input header.
+ * So the variable input header size is equal to nr_bank.
*/
max_gvas =
(PAGE_SIZE - sizeof(*flush) - nr_bank *
@@ -239,5 +243,4 @@ void hyperv_setup_mmu_ops(void)
pr_info("Using hypercall for remote TLB flush\n");
pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi;
- pv_ops.mmu.tlb_remove_table = tlb_remove_table;
}
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 58f4ddecc5fa..4566000e15c4 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -8,6 +8,7 @@ generated-y += syscalls_x32.h
generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
+generated-y += cpufeaturemasks.h
generic-y += early_ioremap.h
generic-y += fprobe.h
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index e3903b731305..4a37a8bd87fd 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -15,7 +15,7 @@
#define ALT_DIRECT_CALL(feature) ((ALT_FLAG_DIRECT_CALL << ALT_FLAGS_SHIFT) | (feature))
#define ALT_CALL_ALWAYS ALT_DIRECT_CALL(X86_FEATURE_ALWAYS)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stddef.h>
@@ -48,7 +48,7 @@
".popsection\n" \
"671:"
-#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE ""
@@ -87,20 +87,19 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
* instructions were patched in already:
*/
extern int alternatives_patched;
-struct module;
extern void alternative_instructions(void);
-extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
- struct module *mod);
-extern void apply_retpolines(s32 *start, s32 *end, struct module *mod);
-extern void apply_returns(s32 *start, s32 *end, struct module *mod);
-extern void apply_seal_endbr(s32 *start, s32 *end, struct module *mod);
+extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+extern void apply_retpolines(s32 *start, s32 *end);
+extern void apply_returns(s32 *start, s32 *end);
+extern void apply_seal_endbr(s32 *start, s32 *end);
extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine,
- s32 *start_cfi, s32 *end_cfi, struct module *mod);
+ s32 *start_cfi, s32 *end_cfi);
+
+struct module;
struct callthunk_sites {
s32 *call_start, *call_end;
- struct alt_instr *alt_start, *alt_end;
};
#ifdef CONFIG_CALL_THUNKS
@@ -237,10 +236,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
* references: i.e., if used for a function, it would add the PLT
* suffix.
*/
-#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
+#define alternative_call(oldfunc, newfunc, ft_flags, output, input, clobbers...) \
asm_inline volatile(ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
: ALT_OUTPUT_SP(output) \
- : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+ : [old] "i" (oldfunc), [new] "i" (newfunc) \
+ COMMA(input) \
+ : clobbers)
/*
* Like alternative_call, but there are two features and respective functions.
@@ -249,24 +250,14 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, old function is used.
*/
#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
- output, input...) \
+ output, input, clobbers...) \
asm_inline volatile(ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
"call %c[new2]", ft_flags2) \
: ALT_OUTPUT_SP(output) \
: [old] "i" (oldfunc), [new1] "i" (newfunc1), \
- [new2] "i" (newfunc2), ## input)
-
-/*
- * use this macro(s) if you need more than one output parameter
- * in alternative_io
- */
-#define ASM_OUTPUT2(a...) a
-
-/*
- * use this macro if you need clobbers but no inputs in
- * alternative_{input,io,call}()
- */
-#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+ [new2] "i" (newfunc2) \
+ COMMA(input) \
+ : clobbers)
#define ALT_OUTPUT_SP(...) ASM_CALL_CONSTRAINT, ## __VA_ARGS__
@@ -286,7 +277,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
void BUG_func(void);
void nop_func(void);
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
@@ -369,6 +360,6 @@ void nop_func(void);
ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
newinstr_yes, ft_flags
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/amd-ibs.h b/arch/x86/include/asm/amd-ibs.h
index cb2a5e113daa..77f3a589a99a 100644
--- a/arch/x86/include/asm/amd-ibs.h
+++ b/arch/x86/include/asm/amd-ibs.h
@@ -64,7 +64,8 @@ union ibs_op_ctl {
opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
reserved0:5, /* 27-31: reserved */
opcurcnt:27, /* 32-58: periodic op counter current count */
- reserved1:5; /* 59-63: reserved */
+ ldlat_thrsh:4, /* 59-62: Load Latency threshold */
+ ldlat_en:1; /* 63: Load Latency enabled */
};
};
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 4c4efb93045e..adfa0854cf2d 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -27,7 +27,6 @@ struct amd_l3_cache {
};
struct amd_northbridge {
- struct pci_dev *root;
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
diff --git a/arch/x86/include/asm/amd_node.h b/arch/x86/include/asm/amd_node.h
index 113ad3e8ee40..23fe617898a8 100644
--- a/arch/x86/include/asm/amd_node.h
+++ b/arch/x86/include/asm/amd_node.h
@@ -30,7 +30,31 @@ static inline u16 amd_num_nodes(void)
return topology_amd_nodes_per_pkg() * topology_max_packages();
}
+#ifdef CONFIG_AMD_NODE
int __must_check amd_smn_read(u16 node, u32 address, u32 *value);
int __must_check amd_smn_write(u16 node, u32 address, u32 value);
+/* Should only be used by the HSMP driver. */
+int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write);
+#else
+static inline int __must_check amd_smn_read(u16 node, u32 address, u32 *value) { return -ENODEV; }
+static inline int __must_check amd_smn_write(u16 node, u32 address, u32 value) { return -ENODEV; }
+
+static inline int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_NODE */
+
+/* helper for use with read_poll_timeout */
+static inline int smn_read_register(u32 reg)
+{
+ int data, rc;
+
+ rc = amd_smn_read(0, reg, &data);
+ if (rc)
+ return rc;
+
+ return data;
+}
#endif /*_ASM_X86_AMD_NODE_H_*/
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f21ff1932699..c903d358405d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -99,8 +99,8 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP,
- ASM_OUTPUT2("=r" (v), "=m" (*addr)),
- ASM_OUTPUT2("0" (v), "m" (*addr)));
+ ASM_OUTPUT("=r" (v), "=m" (*addr)),
+ ASM_INPUT("0" (v), "m" (*addr)));
}
static inline u32 native_apic_mem_read(u32 reg)
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index ba88edd0d58b..b5982b94bdba 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -16,9 +16,10 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm (ALTERNATIVE("call __sw_hweight32", "popcntl %1, %0", X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ asm_inline (ALTERNATIVE("call __sw_hweight32",
+ "popcntl %[val], %[cnt]", X86_FEATURE_POPCNT)
+ : [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
+ : [val] REG_IN (w));
return res;
}
@@ -44,9 +45,10 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm (ALTERNATIVE("call __sw_hweight64", "popcntq %1, %0", X86_FEATURE_POPCNT)
- : "="REG_OUT (res)
- : REG_IN (w));
+ asm_inline (ALTERNATIVE("call __sw_hweight64",
+ "popcntq %[val], %[cnt]", X86_FEATURE_POPCNT)
+ : [cnt] "=" REG_OUT (res), ASM_CALL_CONSTRAINT
+ : [val] REG_IN (w));
return res;
}
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 3674006e3974..11c6fecc3ad7 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -16,10 +16,10 @@
#include <asm/gsseg.h>
#include <asm/nospec-branch.h>
-#ifndef CONFIG_X86_CMPXCHG64
+#ifndef CONFIG_X86_CX8
extern void cmpxchg8b_emu(void);
#endif
-#if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR)
+#ifdef CONFIG_STACKPROTECTOR
extern unsigned long __ref_stack_chk_guard;
#endif
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 2bec0c89a95c..cc2881576c2c 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define __ASM_FORM(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
@@ -113,7 +113,7 @@
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef __pic__
static __always_inline __pure void *rip_rel_ptr(void *p)
{
@@ -144,7 +144,7 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
# include <asm/extable_fixup_types.h>
/* Exception table entry */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define _ASM_EXTABLE_TYPE(from, to, type) \
.pushsection "__ex_table","a" ; \
@@ -164,7 +164,7 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
# define _ASM_NOKPROBE(entry)
# endif
-#else /* ! __ASSEMBLY__ */
+#else /* ! __ASSEMBLER__ */
# define DEFINE_EXTABLE_TYPE_REG \
".macro extable_type_reg type:req reg:req\n" \
@@ -213,6 +213,17 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
/* For C file, we already have NOKPROBE_SYMBOL macro */
+/* Insert a comma if args are non-empty */
+#define COMMA(x...) __COMMA(x)
+#define __COMMA(...) , ##__VA_ARGS__
+
+/*
+ * Combine multiple asm inline constraint args into a single arg for passing to
+ * another macro.
+ */
+#define ASM_OUTPUT(x...) x
+#define ASM_INPUT(x...) x
+
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
@@ -221,7 +232,7 @@ static __always_inline __pure void *rip_rel_ptr(void *p)
*/
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_DEFAULT)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 55b4d24356ea..75743f1dfd4e 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -30,14 +30,14 @@ static __always_inline void arch_atomic_set(atomic_t *v, int i)
static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "addl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "addl %1, %0"
: "+m" (v->counter)
: "ir" (i) : "memory");
}
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "subl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "subl %1, %0"
: "+m" (v->counter)
: "ir" (i) : "memory");
}
@@ -50,14 +50,14 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
static __always_inline void arch_atomic_inc(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "incl %0"
+ asm_inline volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_dec(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "decl %0"
+ asm_inline volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter) :: "memory");
}
#define arch_atomic_dec arch_atomic_dec
@@ -116,7 +116,7 @@ static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "andl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "andl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
@@ -134,7 +134,7 @@ static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
static __always_inline void arch_atomic_or(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "orl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "orl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
@@ -152,7 +152,7 @@ static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "xorl %1,%0"
+ asm_inline volatile(LOCK_PREFIX "xorl %1, %0"
: "+m" (v->counter)
: "ir" (i)
: "memory");
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 6c6e9b9f98a4..ab838205c1c6 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -48,17 +48,20 @@ static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
ATOMIC64_EXPORT(atomic64_##sym)
#endif
-#ifdef CONFIG_X86_CMPXCHG64
-#define __alternative_atomic64(f, g, out, in...) \
- asm volatile("call %c[func]" \
+#ifdef CONFIG_X86_CX8
+#define __alternative_atomic64(f, g, out, in, clobbers...) \
+ asm volatile("call %c[func]" \
: ALT_OUTPUT_SP(out) \
- : [func] "i" (atomic64_##g##_cx8), ## in)
+ : [func] "i" (atomic64_##g##_cx8) \
+ COMMA(in) \
+ : clobbers)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
#else
-#define __alternative_atomic64(f, g, out, in...) \
- alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
- X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
+#define __alternative_atomic64(f, g, out, in, clobbers...) \
+ alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
+ X86_FEATURE_CX8, ASM_OUTPUT(out), \
+ ASM_INPUT(in), clobbers)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
ATOMIC64_DECL_ONE(sym##_386)
@@ -69,8 +72,8 @@ ATOMIC64_DECL_ONE(inc_386);
ATOMIC64_DECL_ONE(dec_386);
#endif
-#define alternative_atomic64(f, out, in...) \
- __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+#define alternative_atomic64(f, out, in, clobbers...) \
+ __alternative_atomic64(f, f, ASM_OUTPUT(out), ASM_INPUT(in), clobbers)
ATOMIC64_DECL(read);
ATOMIC64_DECL(set);
@@ -105,9 +108,10 @@ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
s64 o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
- alternative_atomic64(xchg, "=&A" (o),
- "S" (v), "b" (low), "c" (high)
- : "memory");
+ alternative_atomic64(xchg,
+ "=&A" (o),
+ ASM_INPUT("S" (v), "b" (low), "c" (high)),
+ "memory");
return o;
}
#define arch_atomic64_xchg arch_atomic64_xchg
@@ -116,23 +120,25 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
- alternative_atomic64(set, /* no output */,
- "S" (v), "b" (low), "c" (high)
- : "eax", "edx", "memory");
+ alternative_atomic64(set,
+ /* no output */,
+ ASM_INPUT("S" (v), "b" (low), "c" (high)),
+ "eax", "edx", "memory");
}
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 r;
- alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
+ alternative_atomic64(read, "=&A" (r), "c" (v), "memory");
return r;
}
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
alternative_atomic64(add_return,
- ASM_OUTPUT2("+A" (i), "+c" (v)),
- ASM_NO_INPUT_CLOBBER("memory"));
+ ASM_OUTPUT("+A" (i), "+c" (v)),
+ /* no input */,
+ "memory");
return i;
}
#define arch_atomic64_add_return arch_atomic64_add_return
@@ -140,8 +146,9 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
alternative_atomic64(sub_return,
- ASM_OUTPUT2("+A" (i), "+c" (v)),
- ASM_NO_INPUT_CLOBBER("memory"));
+ ASM_OUTPUT("+A" (i), "+c" (v)),
+ /* no input */,
+ "memory");
return i;
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
@@ -149,8 +156,10 @@ static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
s64 a;
- alternative_atomic64(inc_return, "=&A" (a),
- "S" (v) : "memory", "ecx");
+ alternative_atomic64(inc_return,
+ "=&A" (a),
+ "S" (v),
+ "memory", "ecx");
return a;
}
#define arch_atomic64_inc_return arch_atomic64_inc_return
@@ -158,8 +167,10 @@ static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
{
s64 a;
- alternative_atomic64(dec_return, "=&A" (a),
- "S" (v) : "memory", "ecx");
+ alternative_atomic64(dec_return,
+ "=&A" (a),
+ "S" (v),
+ "memory", "ecx");
return a;
}
#define arch_atomic64_dec_return arch_atomic64_dec_return
@@ -167,28 +178,34 @@ static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
- ASM_OUTPUT2("+A" (i), "+c" (v)),
- ASM_NO_INPUT_CLOBBER("memory"));
+ ASM_OUTPUT("+A" (i), "+c" (v)),
+ /* no input */,
+ "memory");
}
static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
- ASM_OUTPUT2("+A" (i), "+c" (v)),
- ASM_NO_INPUT_CLOBBER("memory"));
+ ASM_OUTPUT("+A" (i), "+c" (v)),
+ /* no input */,
+ "memory");
}
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
- __alternative_atomic64(inc, inc_return, /* no output */,
- "S" (v) : "memory", "eax", "ecx", "edx");
+ __alternative_atomic64(inc, inc_return,
+ /* no output */,
+ "S" (v),
+ "memory", "eax", "ecx", "edx");
}
#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
- __alternative_atomic64(dec, dec_return, /* no output */,
- "S" (v) : "memory", "eax", "ecx", "edx");
+ __alternative_atomic64(dec, dec_return,
+ /* no output */,
+ "S" (v),
+ "memory", "eax", "ecx", "edx");
}
#define arch_atomic64_dec arch_atomic64_dec
@@ -197,8 +214,9 @@ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32);
alternative_atomic64(add_unless,
- ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
- "S" (v) : "memory");
+ ASM_OUTPUT("+A" (a), "+c" (low), "+D" (high)),
+ "S" (v),
+ "memory");
return (int)a;
}
#define arch_atomic64_add_unless arch_atomic64_add_unless
@@ -206,8 +224,10 @@ static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
- alternative_atomic64(inc_not_zero, "=&a" (r),
- "S" (v) : "ecx", "edx", "memory");
+ alternative_atomic64(inc_not_zero,
+ "=&a" (r),
+ "S" (v),
+ "ecx", "edx", "memory");
return r;
}
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
@@ -215,8 +235,10 @@ static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 r;
- alternative_atomic64(dec_if_positive, "=&A" (r),
- "S" (v) : "ecx", "memory");
+ alternative_atomic64(dec_if_positive,
+ "=&A" (r),
+ "S" (v),
+ "ecx", "memory");
return r;
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index ae12acae5b06..87b496325b5b 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -22,14 +22,14 @@ static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "addq %1,%0"
+ asm_inline volatile(LOCK_PREFIX "addq %1, %0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter) : "memory");
}
static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "subq %1,%0"
+ asm_inline volatile(LOCK_PREFIX "subq %1, %0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter) : "memory");
}
@@ -42,7 +42,7 @@ static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "incq %0"
+ asm_inline volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter) : "memory");
}
@@ -50,7 +50,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "decq %0"
+ asm_inline volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
: "m" (v->counter) : "memory");
}
@@ -110,7 +110,7 @@ static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "andq %1,%0"
+ asm_inline volatile(LOCK_PREFIX "andq %1, %0"
: "+m" (v->counter)
: "er" (i)
: "memory");
@@ -128,7 +128,7 @@ static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "orq %1,%0"
+ asm_inline volatile(LOCK_PREFIX "orq %1, %0"
: "+m" (v->counter)
: "er" (i)
: "memory");
@@ -146,7 +146,7 @@ static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "xorq %1,%0"
+ asm_inline volatile(LOCK_PREFIX "xorq %1, %0"
: "+m" (v->counter)
: "er" (i)
: "memory");
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 7b44b3c4cce1..db70832232d4 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -12,11 +12,11 @@
*/
#ifdef CONFIG_X86_32
-#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
+#define mb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "mfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
+#define rmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "lfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
-#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
+#define wmb() asm volatile(ALTERNATIVE("lock addl $0,-4(%%esp)", "sfence", \
X86_FEATURE_XMM2) ::: "memory", "cc")
#else
#define __mb() asm volatile("mfence":::"memory")
@@ -50,7 +50,7 @@
#define __dma_rmb() barrier()
#define __dma_wmb() barrier()
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
+#define __smp_mb() asm volatile("lock addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b96d45944c59..100413aff640 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -52,12 +52,12 @@ static __always_inline void
arch_set_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
- asm volatile(LOCK_PREFIX "orb %b1,%0"
+ asm_inline volatile(LOCK_PREFIX "orb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr))
: "memory");
} else {
- asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
+ asm_inline volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@@ -72,11 +72,11 @@ static __always_inline void
arch_clear_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
- asm volatile(LOCK_PREFIX "andb %b1,%0"
+ asm_inline volatile(LOCK_PREFIX "andb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (~CONST_MASK(nr)));
} else {
- asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
+ asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@@ -98,7 +98,7 @@ static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
volatile unsigned long *addr)
{
bool negative;
- asm volatile(LOCK_PREFIX "xorb %2,%1"
+ asm_inline volatile(LOCK_PREFIX "xorb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "iq" ((char)mask) : "memory");
@@ -122,11 +122,11 @@ static __always_inline void
arch_change_bit(long nr, volatile unsigned long *addr)
{
if (__builtin_constant_p(nr)) {
- asm volatile(LOCK_PREFIX "xorb %b1,%0"
+ asm_inline volatile(LOCK_PREFIX "xorb %b1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" (CONST_MASK(nr)));
} else {
- asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
+ asm_inline volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3e5b111e619d..3f02ff6d333d 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -74,7 +74,7 @@
# define BOOT_STACK_SIZE 0x1000
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned int output_len;
extern const unsigned long kernel_text_size;
extern const unsigned long kernel_total_size;
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index e85ac0c7c039..f0e9acf72547 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -17,13 +17,17 @@
* In clang we have UD1s reporting UBSAN failures on X86, 64 and 32bit.
*/
#define INSN_ASOP 0x67
+#define INSN_LOCK 0xf0
#define OPCODE_ESCAPE 0x0f
#define SECOND_BYTE_OPCODE_UD1 0xb9
#define SECOND_BYTE_OPCODE_UD2 0x0b
#define BUG_NONE 0xffff
-#define BUG_UD1 0xfffe
-#define BUG_UD2 0xfffd
+#define BUG_UD2 0xfffe
+#define BUG_UD1 0xfffd
+#define BUG_UD1_UBSAN 0xfffc
+#define BUG_EA 0xffea
+#define BUG_LOCK 0xfff0
#ifdef CONFIG_GENERIC_BUG
diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h
index 31d19c815f99..3e51ba459154 100644
--- a/arch/x86/include/asm/cfi.h
+++ b/arch/x86/include/asm/cfi.h
@@ -101,6 +101,16 @@ enum cfi_mode {
extern enum cfi_mode cfi_mode;
+#ifdef CONFIG_FINEIBT_BHI
+extern bool cfi_bhi;
+#else
+#define cfi_bhi (0)
+#endif
+
+typedef u8 bhi_thunk[32];
+extern bhi_thunk __bhi_args[];
+extern bhi_thunk __bhi_args_end[];
+
struct pt_regs;
#ifdef CONFIG_CFI_CLANG
@@ -125,6 +135,18 @@ static inline int cfi_get_offset(void)
#define cfi_get_offset cfi_get_offset
extern u32 cfi_get_func_hash(void *func);
+extern int cfi_get_func_arity(void *func);
+
+#ifdef CONFIG_FINEIBT
+extern bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type);
+#else
+static inline bool
+decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type)
+{
+ return false;
+}
+
+#endif
#else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
@@ -137,6 +159,10 @@ static inline u32 cfi_get_func_hash(void *func)
{
return 0;
}
+static inline int cfi_get_func_arity(void *func)
+{
+ return 0;
+}
#endif /* CONFIG_CFI_CLANG */
#if HAS_KERNEL_IBT == 1
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 5612648b0202..b61f32c3459f 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -44,22 +44,22 @@ extern void __add_wrong_size(void)
__typeof__ (*(ptr)) __ret = (arg); \
switch (sizeof(*(ptr))) { \
case __X86_CASE_B: \
- asm volatile (lock #op "b %b0, %1\n" \
+ asm_inline volatile (lock #op "b %b0, %1" \
: "+q" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_W: \
- asm volatile (lock #op "w %w0, %1\n" \
+ asm_inline volatile (lock #op "w %w0, %1" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_L: \
- asm volatile (lock #op "l %0, %1\n" \
+ asm_inline volatile (lock #op "l %0, %1" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
case __X86_CASE_Q: \
- asm volatile (lock #op "q %q0, %1\n" \
+ asm_inline volatile (lock #op "q %q0, %1" \
: "+r" (__ret), "+m" (*(ptr)) \
: : "memory", "cc"); \
break; \
@@ -91,7 +91,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_B: \
{ \
volatile u8 *__ptr = (volatile u8 *)(ptr); \
- asm volatile(lock "cmpxchgb %2,%1" \
+ asm_inline volatile(lock "cmpxchgb %2, %1" \
: "=a" (__ret), "+m" (*__ptr) \
: "q" (__new), "0" (__old) \
: "memory"); \
@@ -100,7 +100,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_W: \
{ \
volatile u16 *__ptr = (volatile u16 *)(ptr); \
- asm volatile(lock "cmpxchgw %2,%1" \
+ asm_inline volatile(lock "cmpxchgw %2, %1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
@@ -109,7 +109,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_L: \
{ \
volatile u32 *__ptr = (volatile u32 *)(ptr); \
- asm volatile(lock "cmpxchgl %2,%1" \
+ asm_inline volatile(lock "cmpxchgl %2, %1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
@@ -118,7 +118,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_Q: \
{ \
volatile u64 *__ptr = (volatile u64 *)(ptr); \
- asm volatile(lock "cmpxchgq %2,%1" \
+ asm_inline volatile(lock "cmpxchgq %2, %1" \
: "=a" (__ret), "+m" (*__ptr) \
: "r" (__new), "0" (__old) \
: "memory"); \
@@ -134,7 +134,7 @@ extern void __add_wrong_size(void)
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
#define __sync_cmpxchg(ptr, old, new, size) \
- __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
+ __raw_cmpxchg((ptr), (old), (new), (size), "lock ")
#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "")
@@ -165,7 +165,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_B: \
{ \
volatile u8 *__ptr = (volatile u8 *)(_ptr); \
- asm volatile(lock "cmpxchgb %[new], %[ptr]" \
+ asm_inline volatile(lock "cmpxchgb %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
@@ -177,7 +177,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_W: \
{ \
volatile u16 *__ptr = (volatile u16 *)(_ptr); \
- asm volatile(lock "cmpxchgw %[new], %[ptr]" \
+ asm_inline volatile(lock "cmpxchgw %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
@@ -189,7 +189,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_L: \
{ \
volatile u32 *__ptr = (volatile u32 *)(_ptr); \
- asm volatile(lock "cmpxchgl %[new], %[ptr]" \
+ asm_inline volatile(lock "cmpxchgl %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
@@ -201,7 +201,7 @@ extern void __add_wrong_size(void)
case __X86_CASE_Q: \
{ \
volatile u64 *__ptr = (volatile u64 *)(_ptr); \
- asm volatile(lock "cmpxchgq %[new], %[ptr]" \
+ asm_inline volatile(lock "cmpxchgq %[new], %[ptr]" \
CC_SET(z) \
: CC_OUT(z) (success), \
[ptr] "+m" (*__ptr), \
@@ -222,7 +222,7 @@ extern void __add_wrong_size(void)
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
#define __sync_try_cmpxchg(ptr, pold, new, size) \
- __raw_try_cmpxchg((ptr), (pold), (new), (size), "lock; ")
+ __raw_try_cmpxchg((ptr), (pold), (new), (size), "lock ")
#define __try_cmpxchg_local(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), "")
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index fd1282a783dd..371f7906019e 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -19,7 +19,7 @@ union __u64_halves {
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
- asm volatile(_lock "cmpxchg8b %[ptr]" \
+ asm_inline volatile(_lock "cmpxchg8b %[ptr]" \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
@@ -45,7 +45,7 @@ static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new
n = { .full = (_new), }; \
bool ret; \
\
- asm volatile(_lock "cmpxchg8b %[ptr]" \
+ asm_inline volatile(_lock "cmpxchg8b %[ptr]" \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
@@ -69,7 +69,7 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
return __arch_try_cmpxchg64(ptr, oldp, new,);
}
-#ifdef CONFIG_X86_CMPXCHG64
+#ifdef CONFIG_X86_CX8
#define arch_cmpxchg64 __cmpxchg64
@@ -91,19 +91,21 @@ static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp,
union __u64_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
- asm volatile(ALTERNATIVE(_lock_loc \
- "call cmpxchg8b_emu", \
- _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
- : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
- : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
- : "memory"); \
+ asm_inline volatile( \
+ ALTERNATIVE(_lock_loc \
+ "call cmpxchg8b_emu", \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+ : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
+ : "b" (n.low), "c" (n.high), \
+ [ptr] "S" (_ptr) \
+ : "memory"); \
\
o.full; \
})
static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
{
- return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
+ return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock ");
}
#define arch_cmpxchg64 arch_cmpxchg64
@@ -119,14 +121,16 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
n = { .full = (_new), }; \
bool ret; \
\
- asm volatile(ALTERNATIVE(_lock_loc \
- "call cmpxchg8b_emu", \
- _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
- CC_SET(e) \
- : ALT_OUTPUT_SP(CC_OUT(e) (ret), \
- "+a" (o.low), "+d" (o.high)) \
- : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
- : "memory"); \
+ asm_inline volatile( \
+ ALTERNATIVE(_lock_loc \
+ "call cmpxchg8b_emu", \
+ _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
+ CC_SET(e) \
+ : ALT_OUTPUT_SP(CC_OUT(e) (ret), \
+ "+a" (o.low), "+d" (o.high)) \
+ : "b" (n.low), "c" (n.high), \
+ [ptr] "S" (_ptr) \
+ : "memory"); \
\
if (unlikely(!ret)) \
*(_oldp) = o.full; \
@@ -136,7 +140,7 @@ static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64
static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
{
- return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
+ return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock ");
}
#define arch_try_cmpxchg64 arch_try_cmpxchg64
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 5e241306db26..71d1e72ed879 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -38,7 +38,7 @@ union __u128_halves {
union __u128_halves o = { .full = (_old), }, \
n = { .full = (_new), }; \
\
- asm volatile(_lock "cmpxchg16b %[ptr]" \
+ asm_inline volatile(_lock "cmpxchg16b %[ptr]" \
: [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
@@ -65,7 +65,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old,
n = { .full = (_new), }; \
bool ret; \
\
- asm volatile(_lock "cmpxchg16b %[ptr]" \
+ asm_inline volatile(_lock "cmpxchg16b %[ptr]" \
CC_SET(e) \
: CC_OUT(e) (ret), \
[ptr] "+m" (*(_ptr)), \
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index aa6c8f8ca958..e7225452963f 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -15,6 +15,11 @@ enum cc_vendor {
extern enum cc_vendor cc_vendor;
extern u64 cc_mask;
+static inline u64 cc_get_mask(void)
+{
+ return cc_mask;
+}
+
static inline void cc_set_mask(u64 mask)
{
RIP_REL_REF(cc_mask) = mask;
@@ -25,7 +30,10 @@ u64 cc_mkdec(u64 val);
void cc_random_init(void);
#else
#define cc_vendor (CC_VENDOR_NONE)
-static const u64 cc_mask = 0;
+static inline u64 cc_get_mask(void)
+{
+ return 0;
+}
static inline u64 cc_mkenc(u64 val)
{
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 98eced5084ca..ad235dda1ded 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -12,7 +12,6 @@
#ifndef CONFIG_SMP
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define cpu_acpi_id(cpu) 0
-#define safe_smp_processor_id() 0
#endif /* CONFIG_SMP */
#ifdef CONFIG_HOTPLUG_CPU
@@ -50,20 +49,6 @@ static inline void split_lock_init(void) {}
static inline void bus_lock_init(void) {}
#endif
-#ifdef CONFIG_CPU_SUP_INTEL
-u8 get_this_hybrid_cpu_type(void);
-u32 get_this_hybrid_cpu_native_id(void);
-#else
-static inline u8 get_this_hybrid_cpu_type(void)
-{
- return 0;
-}
-
-static inline u32 get_this_hybrid_cpu_native_id(void)
-{
- return 0;
-}
-#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
#else
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index ba32e0f44cba..6be777a06944 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -57,7 +57,7 @@
#define X86_CPU_ID_FLAG_ENTRY_VALID BIT(0)
/**
- * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
+ * X86_MATCH_CPU - Base macro for CPU matching
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@_vendor
* @_family: The family number or X86_FAMILY_ANY
@@ -74,47 +74,18 @@
* into another macro at the usage site for good reasons, then please
* start this local macro with X86_MATCH to allow easy grepping.
*/
-#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
- _steppings, _feature, _data) { \
- .vendor = X86_VENDOR_##_vendor, \
- .family = _family, \
- .model = _model, \
- .steppings = _steppings, \
- .feature = _feature, \
- .flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
- .driver_data = (unsigned long) _data \
-}
-
-#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
- _steppings, _feature, _data) { \
+#define X86_MATCH_CPU(_vendor, _family, _model, _steppings, _feature, _type, _data) { \
.vendor = _vendor, \
.family = _family, \
.model = _model, \
.steppings = _steppings, \
.feature = _feature, \
.flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
+ .type = _type, \
.driver_data = (unsigned long) _data \
}
/**
- * X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Macro for CPU matching
- * @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
- * The name is expanded to X86_VENDOR_@_vendor
- * @_family: The family number or X86_FAMILY_ANY
- * @_model: The model number, model constant or X86_MODEL_ANY
- * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
- * @_data: Driver specific data or NULL. The internal storage
- * format is unsigned long. The supplied value, pointer
- * etc. is casted to unsigned long internally.
- *
- * The steppings arguments of X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE() is
- * set to wildcards.
- */
-#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
- X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \
- X86_STEPPING_ANY, feature, data)
-
-/**
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
* The name is expanded to X86_VENDOR_@vendor
@@ -123,13 +94,10 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
- *
- * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
- * set to wildcards.
*/
-#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
- X86_MODEL_ANY, feature, data)
+#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
+ X86_MATCH_CPU(X86_VENDOR_##vendor, family, X86_MODEL_ANY, \
+ X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
@@ -139,12 +107,10 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
- *
- * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
- * set to wildcards.
*/
-#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
- X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
+#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
+ X86_MATCH_CPU(X86_VENDOR_##vendor, X86_FAMILY_ANY, X86_MODEL_ANY, \
+ X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_FEATURE - Macro for matching a CPU feature
@@ -152,12 +118,10 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
- *
- * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
- * set to wildcards.
*/
-#define X86_MATCH_FEATURE(feature, data) \
- X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
+#define X86_MATCH_FEATURE(feature, data) \
+ X86_MATCH_CPU(X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, \
+ X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
@@ -168,13 +132,10 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
- *
- * All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
- * set to wildcards.
*/
-#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
- X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
- X86_FEATURE_ANY, data)
+#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
+ X86_MATCH_CPU(X86_VENDOR_##vendor, family, model, X86_STEPPING_ANY, \
+ X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_VENDOR_FAM - Match vendor and family
@@ -184,12 +145,10 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is casted to unsigned long internally.
- *
- * All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
- * set of wildcards.
*/
-#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
- X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
+#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
+ X86_MATCH_CPU(X86_VENDOR_##vendor, family, X86_MODEL_ANY, \
+ X86_STEPPING_ANY, X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_VFM - Match encoded vendor/family/model
@@ -197,34 +156,26 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
- *
- * Stepping and feature are set to wildcards
*/
-#define X86_MATCH_VFM(vfm, data) \
- X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
- VFM_VENDOR(vfm), \
- VFM_FAMILY(vfm), \
- VFM_MODEL(vfm), \
- X86_STEPPING_ANY, X86_FEATURE_ANY, data)
+#define X86_MATCH_VFM(vfm, data) \
+ X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
#define __X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
- * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
+ * X86_MATCH_VFM_STEPS - Match encoded vendor/family/model and steppings
+ * range.
* @vfm: Encoded 8-bits each for vendor, family, model
- * @steppings: Bitmask of steppings to match
+ * @min_step: Lowest stepping number to match
+ * @max_step: Highest stepping number to match
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
- *
- * feature is set to wildcard
*/
-#define X86_MATCH_VFM_STEPS(vfm, min_step, max_step, data) \
- X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
- VFM_VENDOR(vfm), \
- VFM_FAMILY(vfm), \
- VFM_MODEL(vfm), \
- __X86_STEPPINGS(min_step, max_step), \
- X86_FEATURE_ANY, data)
+#define X86_MATCH_VFM_STEPS(vfm, min_step, max_step, data) \
+ X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
+ __X86_STEPPINGS(min_step, max_step), X86_FEATURE_ANY, \
+ X86_CPU_TYPE_ANY, data)
/**
* X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
@@ -233,15 +184,22 @@
* @data: Driver specific data or NULL. The internal storage
* format is unsigned long. The supplied value, pointer
* etc. is cast to unsigned long internally.
- *
- * Steppings is set to wildcard
*/
-#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
- X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
- VFM_VENDOR(vfm), \
- VFM_FAMILY(vfm), \
- VFM_MODEL(vfm), \
- X86_STEPPING_ANY, feature, data)
+#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
+ X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
+
+/**
+ * X86_MATCH_VFM_CPU_TYPE - Match encoded vendor/family/model/type
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @type: CPU type e.g. P-core, E-core
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ */
+#define X86_MATCH_VFM_CPU_TYPE(vfm, type, data) \
+ X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, X86_FEATURE_ANY, type, data)
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
extern bool x86_match_min_microcode_rev(const struct x86_cpu_id *table);
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index de1ad09fe8d7..893cbca37fe9 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -4,11 +4,12 @@
#include <asm/processor.h>
-#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#if defined(__KERNEL__) && !defined(__ASSEMBLER__)
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/alternative.h>
+#include <asm/cpufeaturemasks.h>
enum cpuid_leafs
{
@@ -37,92 +38,19 @@ enum cpuid_leafs
NR_CPUID_WORDS,
};
-#define X86_CAP_FMT_NUM "%d:%d"
-#define x86_cap_flag_num(flag) ((flag) >> 5), ((flag) & 31)
-
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
-#define X86_CAP_FMT "%s"
-#define x86_cap_flag(flag) x86_cap_flags[flag]
/*
* In order to save room, we index into this array by doing
* X86_BUG_<name> - NCAPINTS*32.
*/
extern const char * const x86_bug_flags[NBUGINTS*32];
+#define x86_bug_flag(flag) x86_bug_flags[flag]
#define test_cpu_cap(c, bit) \
arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
-/*
- * There are 32 bits/features in each mask word. The high bits
- * (selected with (bit>>5) give us the word number and the low 5
- * bits give us the bit/feature number inside the word.
- * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
- * see if it is set in the mask word.
- */
-#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
- (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
-
-/*
- * {REQUIRED,DISABLED}_MASK_CHECK below may seem duplicated with the
- * following BUILD_BUG_ON_ZERO() check but when NCAPINTS gets changed, all
- * header macros which use NCAPINTS need to be changed. The duplicated macro
- * use causes the compiler to issue errors for all headers so that all usage
- * sites can be corrected.
- */
-#define REQUIRED_MASK_BIT_SET(feature_bit) \
- ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
- REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 22))
-
-#define DISABLED_MASK_BIT_SET(feature_bit) \
- ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
- CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
- DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 22))
-
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
test_cpu_cap(c, bit))
@@ -149,6 +77,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
extern void setup_clear_cpu_cap(unsigned int bit);
extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
+void check_cpufeature_deps(struct cpuinfo_x86 *c);
#define setup_force_cpu_cap(bit) do { \
\
@@ -208,5 +137,5 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
-#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLER__) */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 508c0dad116b..6c2c152d8a67 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -2,14 +2,6 @@
#ifndef _ASM_X86_CPUFEATURES_H
#define _ASM_X86_CPUFEATURES_H
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#include <asm/required-features.h>
-#endif
-
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#include <asm/disabled-features.h>
-#endif
-
/*
* Defines x86 CPU feature bits
*/
@@ -210,7 +202,6 @@
#define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */
-#define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
@@ -338,6 +329,7 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
+#define X86_FEATURE_INVLPGB (13*32+ 3) /* INVLPGB and TLBSYNC instructions supported */
#define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
@@ -386,6 +378,7 @@
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
+#define X86_FEATURE_IDLE_HLT (15*32+30) /* IDLE HLT intercept */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
@@ -468,6 +461,10 @@
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
+#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
+ * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
+ * (SRSO_MSR_FIX in the official doc).
+ */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -483,6 +480,7 @@
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32 + 7) /* Workload Classification */
+#define X86_FEATURE_PREFER_YMM (21*32 + 8) /* Avoid ZMM registers due to downclocking */
/*
* BUG word(s)
@@ -534,4 +532,5 @@
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+#define X86_BUG_SPECTRE_V2_USER X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/cpuid.h b/arch/x86/include/asm/cpuid.h
index b2b9b4ef3dae..d5749b25fa10 100644
--- a/arch/x86/include/asm/cpuid.h
+++ b/arch/x86/include/asm/cpuid.h
@@ -1,222 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * CPUID-related helpers/definitions
- */
#ifndef _ASM_X86_CPUID_H
#define _ASM_X86_CPUID_H
-#include <linux/types.h>
-
-#include <asm/string.h>
-
-struct cpuid_regs {
- u32 eax, ebx, ecx, edx;
-};
-
-enum cpuid_regs_idx {
- CPUID_EAX = 0,
- CPUID_EBX,
- CPUID_ECX,
- CPUID_EDX,
-};
-
-#define CPUID_LEAF_MWAIT 0x5
-#define CPUID_LEAF_DCA 0x9
-#define CPUID_LEAF_XSTATE 0x0d
-#define CPUID_LEAF_TSC 0x15
-#define CPUID_LEAF_FREQ 0x16
-#define CPUID_LEAF_TILE 0x1d
-
-#ifdef CONFIG_X86_32
-bool have_cpuid_p(void);
-#else
-static inline bool have_cpuid_p(void)
-{
- return true;
-}
-#endif
-static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /* ecx is often an input as well as an output. */
- asm volatile("cpuid"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx)
- : "memory");
-}
-
-#define native_cpuid_reg(reg) \
-static inline unsigned int native_cpuid_##reg(unsigned int op) \
-{ \
- unsigned int eax = op, ebx, ecx = 0, edx; \
- \
- native_cpuid(&eax, &ebx, &ecx, &edx); \
- \
- return reg; \
-}
-
-/*
- * Native CPUID functions returning a single datum.
- */
-native_cpuid_reg(eax)
-native_cpuid_reg(ebx)
-native_cpuid_reg(ecx)
-native_cpuid_reg(edx)
-
-#ifdef CONFIG_PARAVIRT_XXL
-#include <asm/paravirt.h>
-#else
-#define __cpuid native_cpuid
-#endif
-
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(unsigned int op,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- *eax = op;
- *ecx = 0;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(unsigned int op, int count,
- unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- *eax = op;
- *ecx = count;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
-
- return eax;
-}
-
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
-
- return ebx;
-}
-
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
-
- return ecx;
-}
-
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
-
- return edx;
-}
-
-static inline void __cpuid_read(unsigned int leaf, unsigned int subleaf, u32 *regs)
-{
- regs[CPUID_EAX] = leaf;
- regs[CPUID_ECX] = subleaf;
- __cpuid(regs + CPUID_EAX, regs + CPUID_EBX, regs + CPUID_ECX, regs + CPUID_EDX);
-}
-
-#define cpuid_subleaf(leaf, subleaf, regs) { \
- static_assert(sizeof(*(regs)) == 16); \
- __cpuid_read(leaf, subleaf, (u32 *)(regs)); \
-}
-
-#define cpuid_leaf(leaf, regs) { \
- static_assert(sizeof(*(regs)) == 16); \
- __cpuid_read(leaf, 0, (u32 *)(regs)); \
-}
-
-static inline void __cpuid_read_reg(unsigned int leaf, unsigned int subleaf,
- enum cpuid_regs_idx regidx, u32 *reg)
-{
- u32 regs[4];
-
- __cpuid_read(leaf, subleaf, regs);
- *reg = regs[regidx];
-}
-
-#define cpuid_subleaf_reg(leaf, subleaf, regidx, reg) { \
- static_assert(sizeof(*(reg)) == 4); \
- __cpuid_read_reg(leaf, subleaf, regidx, (u32 *)(reg)); \
-}
-
-#define cpuid_leaf_reg(leaf, regidx, reg) { \
- static_assert(sizeof(*(reg)) == 4); \
- __cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \
-}
-
-static __always_inline bool cpuid_function_is_indexed(u32 function)
-{
- switch (function) {
- case 4:
- case 7:
- case 0xb:
- case 0xd:
- case 0xf:
- case 0x10:
- case 0x12:
- case 0x14:
- case 0x17:
- case 0x18:
- case 0x1d:
- case 0x1e:
- case 0x1f:
- case 0x24:
- case 0x8000001d:
- return true;
- }
-
- return false;
-}
-
-#define for_each_possible_hypervisor_cpuid_base(function) \
- for (function = 0x40000000; function < 0x40010000; function += 0x100)
-
-static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
-{
- uint32_t base, eax, signature[3];
-
- for_each_possible_hypervisor_cpuid_base(base) {
- cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
-
- /*
- * This must not compile to "call memcmp" because it's called
- * from PVH early boot code before instrumentation is set up
- * and memcmp() itself may be instrumented.
- */
- if (!__builtin_memcmp(sig, signature, 12) &&
- (leaves == 0 || ((eax - base) >= leaves)))
- return base;
- }
-
- return 0;
-}
+#include <asm/cpuid/api.h>
#endif /* _ASM_X86_CPUID_H */
diff --git a/arch/x86/include/asm/cpuid/api.h b/arch/x86/include/asm/cpuid/api.h
new file mode 100644
index 000000000000..9c180c9cc58e
--- /dev/null
+++ b/arch/x86/include/asm/cpuid/api.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_CPUID_API_H
+#define _ASM_X86_CPUID_API_H
+
+#include <asm/cpuid/types.h>
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+#include <asm/string.h>
+
+/*
+ * Raw CPUID accessors:
+ */
+
+#ifdef CONFIG_X86_32
+bool have_cpuid_p(void);
+#else
+static inline bool have_cpuid_p(void)
+{
+ return true;
+}
+#endif
+
+static inline void native_cpuid(u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx)
+ : "memory");
+}
+
+#define NATIVE_CPUID_REG(reg) \
+static inline u32 native_cpuid_##reg(u32 op) \
+{ \
+ u32 eax = op, ebx, ecx = 0, edx; \
+ \
+ native_cpuid(&eax, &ebx, &ecx, &edx); \
+ \
+ return reg; \
+}
+
+/*
+ * Native CPUID functions returning a single datum:
+ */
+NATIVE_CPUID_REG(eax)
+NATIVE_CPUID_REG(ebx)
+NATIVE_CPUID_REG(ecx)
+NATIVE_CPUID_REG(edx)
+
+#ifdef CONFIG_PARAVIRT_XXL
+# include <asm/paravirt.h>
+#else
+# define __cpuid native_cpuid
+#endif
+
+/*
+ * Generic CPUID function
+ *
+ * Clear ECX since some CPUs (Cyrix MII) do not set or clear ECX
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(u32 op,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
+{
+ *eax = op;
+ *ecx = 0;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ECX */
+static inline void cpuid_count(u32 op, int count,
+ u32 *eax, u32 *ebx,
+ u32 *ecx, u32 *edx)
+{
+ *eax = op;
+ *ecx = count;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum:
+ */
+
+static inline u32 cpuid_eax(u32 op)
+{
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return eax;
+}
+
+static inline u32 cpuid_ebx(u32 op)
+{
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return ebx;
+}
+
+static inline u32 cpuid_ecx(u32 op)
+{
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return ecx;
+}
+
+static inline u32 cpuid_edx(u32 op)
+{
+ u32 eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+
+ return edx;
+}
+
+static inline void __cpuid_read(u32 leaf, u32 subleaf, u32 *regs)
+{
+ regs[CPUID_EAX] = leaf;
+ regs[CPUID_ECX] = subleaf;
+ __cpuid(regs + CPUID_EAX, regs + CPUID_EBX, regs + CPUID_ECX, regs + CPUID_EDX);
+}
+
+#define cpuid_subleaf(leaf, subleaf, regs) { \
+ static_assert(sizeof(*(regs)) == 16); \
+ __cpuid_read(leaf, subleaf, (u32 *)(regs)); \
+}
+
+#define cpuid_leaf(leaf, regs) { \
+ static_assert(sizeof(*(regs)) == 16); \
+ __cpuid_read(leaf, 0, (u32 *)(regs)); \
+}
+
+static inline void __cpuid_read_reg(u32 leaf, u32 subleaf,
+ enum cpuid_regs_idx regidx, u32 *reg)
+{
+ u32 regs[4];
+
+ __cpuid_read(leaf, subleaf, regs);
+ *reg = regs[regidx];
+}
+
+#define cpuid_subleaf_reg(leaf, subleaf, regidx, reg) { \
+ static_assert(sizeof(*(reg)) == 4); \
+ __cpuid_read_reg(leaf, subleaf, regidx, (u32 *)(reg)); \
+}
+
+#define cpuid_leaf_reg(leaf, regidx, reg) { \
+ static_assert(sizeof(*(reg)) == 4); \
+ __cpuid_read_reg(leaf, 0, regidx, (u32 *)(reg)); \
+}
+
+static __always_inline bool cpuid_function_is_indexed(u32 function)
+{
+ switch (function) {
+ case 4:
+ case 7:
+ case 0xb:
+ case 0xd:
+ case 0xf:
+ case 0x10:
+ case 0x12:
+ case 0x14:
+ case 0x17:
+ case 0x18:
+ case 0x1d:
+ case 0x1e:
+ case 0x1f:
+ case 0x24:
+ case 0x8000001d:
+ return true;
+ }
+
+ return false;
+}
+
+#define for_each_possible_hypervisor_cpuid_base(function) \
+ for (function = 0x40000000; function < 0x40010000; function += 0x100)
+
+static inline u32 hypervisor_cpuid_base(const char *sig, u32 leaves)
+{
+ u32 base, eax, signature[3];
+
+ for_each_possible_hypervisor_cpuid_base(base) {
+ cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
+
+ /*
+ * This must not compile to "call memcmp" because it's called
+ * from PVH early boot code before instrumentation is set up
+ * and memcmp() itself may be instrumented.
+ */
+ if (!__builtin_memcmp(sig, signature, 12) &&
+ (leaves == 0 || ((eax - base) >= leaves)))
+ return base;
+ }
+
+ return 0;
+}
+
+#endif /* _ASM_X86_CPUID_API_H */
diff --git a/arch/x86/include/asm/cpuid/types.h b/arch/x86/include/asm/cpuid/types.h
new file mode 100644
index 000000000000..8582e27e836d
--- /dev/null
+++ b/arch/x86/include/asm/cpuid/types.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_CPUID_TYPES_H
+#define _ASM_X86_CPUID_TYPES_H
+
+#include <linux/types.h>
+
+/*
+ * Types for raw CPUID access:
+ */
+
+struct cpuid_regs {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+enum cpuid_regs_idx {
+ CPUID_EAX = 0,
+ CPUID_EBX,
+ CPUID_ECX,
+ CPUID_EDX,
+};
+
+#define CPUID_LEAF_MWAIT 0x05
+#define CPUID_LEAF_DCA 0x09
+#define CPUID_LEAF_XSTATE 0x0d
+#define CPUID_LEAF_TSC 0x15
+#define CPUID_LEAF_FREQ 0x16
+#define CPUID_LEAF_TILE 0x1d
+
+#endif /* _ASM_X86_CPUID_TYPES_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
index 4acfd57de8f1..70f6b60ad67b 100644
--- a/arch/x86/include/asm/cpumask.h
+++ b/arch/x86/include/asm/cpumask.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
extern void setup_cpu_local_masks(void);
@@ -34,5 +34,5 @@ static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index bf5953883ec3..cc4a3f725b37 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -5,52 +5,28 @@
#include <linux/build_bug.h>
#include <linux/compiler.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/cache.h>
#include <asm/percpu.h>
struct task_struct;
-struct pcpu_hot {
- union {
- struct {
- struct task_struct *current_task;
- int preempt_count;
- int cpu_number;
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
- u64 call_depth;
-#endif
- unsigned long top_of_stack;
- void *hardirq_stack_ptr;
- u16 softirq_pending;
-#ifdef CONFIG_X86_64
- bool hardirq_stack_inuse;
-#else
- void *softirq_stack_ptr;
-#endif
- };
- u8 pad[64];
- };
-};
-static_assert(sizeof(struct pcpu_hot) == 64);
-
-DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot);
-
-/* const-qualified alias to pcpu_hot, aliased by linker. */
-DECLARE_PER_CPU_ALIGNED(const struct pcpu_hot __percpu_seg_override,
- const_pcpu_hot);
+DECLARE_PER_CPU_CACHE_HOT(struct task_struct *, current_task);
+/* const-qualified alias provided by the linker. */
+DECLARE_PER_CPU_CACHE_HOT(struct task_struct * const __percpu_seg_override,
+ const_current_task);
static __always_inline struct task_struct *get_current(void)
{
if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
- return this_cpu_read_const(const_pcpu_hot.current_task);
+ return this_cpu_read_const(const_current_task);
- return this_cpu_read_stable(pcpu_hot.current_task);
+ return this_cpu_read_stable(current_task);
}
#define current get_current()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 62dc9f59ea76..ec95fe44fa3a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -46,7 +46,6 @@ struct gdt_page {
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
-DECLARE_INIT_PER_CPU(gdt_page);
/* Provide the original GDT */
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index d440a65af8f3..7e6b9314758a 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -58,7 +58,7 @@
#define DESC_USER (_DESC_DPL(3))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -166,7 +166,7 @@ struct desc_ptr {
unsigned long address;
} __attribute__((packed)) ;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* Boot IDT definitions */
#define BOOT_IDT_ENTRIES 32
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
deleted file mode 100644
index c492bdc97b05..000000000000
--- a/arch/x86/include/asm/disabled-features.h
+++ /dev/null
@@ -1,161 +0,0 @@
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#define _ASM_X86_DISABLED_FEATURES_H
-
-/* These features, although they might be available in a CPU
- * will not be used because the compile options to support
- * them are not present.
- *
- * This code allows them to be checked and disabled at
- * compile time without an explicit #ifdef. Use
- * cpu_feature_enabled().
- */
-
-#ifdef CONFIG_X86_UMIP
-# define DISABLE_UMIP 0
-#else
-# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
-#endif
-
-#ifdef CONFIG_X86_64
-# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
-# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
-# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
-# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
-# define DISABLE_PCID 0
-#else
-# define DISABLE_VME 0
-# define DISABLE_K6_MTRR 0
-# define DISABLE_CYRIX_ARR 0
-# define DISABLE_CENTAUR_MCR 0
-# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
-#endif /* CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-# define DISABLE_PKU 0
-# define DISABLE_OSPKE 0
-#else
-# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
-# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
-
-#ifdef CONFIG_X86_5LEVEL
-# define DISABLE_LA57 0
-#else
-# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
-# define DISABLE_PTI 0
-#else
-# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_RETPOLINE
-# define DISABLE_RETPOLINE 0
-#else
-# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
- (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
-#endif
-
-#ifdef CONFIG_MITIGATION_RETHUNK
-# define DISABLE_RETHUNK 0
-#else
-# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_UNRET_ENTRY
-# define DISABLE_UNRET 0
-#else
-# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
-# define DISABLE_CALL_DEPTH_TRACKING 0
-#else
-# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
-#endif
-
-#ifdef CONFIG_ADDRESS_MASKING
-# define DISABLE_LAM 0
-#else
-# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31))
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
-
-#ifdef CONFIG_X86_SGX
-# define DISABLE_SGX 0
-#else
-# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
-#endif
-
-#ifdef CONFIG_XEN_PV
-# define DISABLE_XENPV 0
-#else
-# define DISABLE_XENPV (1 << (X86_FEATURE_XENPV & 31))
-#endif
-
-#ifdef CONFIG_INTEL_TDX_GUEST
-# define DISABLE_TDX_GUEST 0
-#else
-# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
-#endif
-
-#ifdef CONFIG_X86_USER_SHADOW_STACK
-#define DISABLE_USER_SHSTK 0
-#else
-#define DISABLE_USER_SHSTK (1 << (X86_FEATURE_USER_SHSTK & 31))
-#endif
-
-#ifdef CONFIG_X86_KERNEL_IBT
-#define DISABLE_IBT 0
-#else
-#define DISABLE_IBT (1 << (X86_FEATURE_IBT & 31))
-#endif
-
-#ifdef CONFIG_X86_FRED
-# define DISABLE_FRED 0
-#else
-# define DISABLE_FRED (1 << (X86_FEATURE_FRED & 31))
-#endif
-
-#ifdef CONFIG_KVM_AMD_SEV
-#define DISABLE_SEV_SNP 0
-#else
-#define DISABLE_SEV_SNP (1 << (X86_FEATURE_SEV_SNP & 31))
-#endif
-
-/*
- * Make sure to add features to the correct mask
- */
-#define DISABLED_MASK0 (DISABLE_VME)
-#define DISABLED_MASK1 0
-#define DISABLED_MASK2 0
-#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4 (DISABLE_PCID)
-#define DISABLED_MASK5 0
-#define DISABLED_MASK6 0
-#define DISABLED_MASK7 (DISABLE_PTI)
-#define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST)
-#define DISABLED_MASK9 (DISABLE_SGX)
-#define DISABLED_MASK10 0
-#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
- DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK)
-#define DISABLED_MASK12 (DISABLE_FRED|DISABLE_LAM)
-#define DISABLED_MASK13 0
-#define DISABLED_MASK14 0
-#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
- DISABLE_ENQCMD)
-#define DISABLED_MASK17 0
-#define DISABLED_MASK18 (DISABLE_IBT)
-#define DISABLED_MASK19 (DISABLE_SEV_SNP)
-#define DISABLED_MASK20 0
-#define DISABLED_MASK21 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
-
-#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 430fca13bb56..302e11b15da8 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index 2e74a7f0e935..c83645d5b2a8 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -29,7 +29,6 @@ extern unsigned long e820__end_of_low_ram_pfn(void);
extern u64 e820__memblock_alloc_reserved(u64 size, u64 align);
extern void e820__memblock_setup(void);
-extern void e820__reserve_setup_data(void);
extern void e820__finish_early_params(void);
extern void e820__reserve_resources(void);
extern void e820__reserve_resources_late(void);
diff --git a/arch/x86/include/asm/e820/types.h b/arch/x86/include/asm/e820/types.h
index 314f75d886d0..80c4a7266629 100644
--- a/arch/x86/include/asm/e820/types.h
+++ b/arch/x86/include/asm/e820/types.h
@@ -35,15 +35,6 @@ enum e820_type {
* marking it with the IORES_DESC_SOFT_RESERVED designation.
*/
E820_TYPE_SOFT_RESERVED = 0xefffffff,
-
- /*
- * Reserved RAM used by the kernel itself if
- * CONFIG_INTEL_TXT=y is enabled, memory of this type
- * will be included in the S3 integrity calculation
- * and so should not include any memory that the BIOS
- * might alter over the S3 transition:
- */
- E820_TYPE_RESERVED_KERN = 128,
};
/*
diff --git a/arch/x86/include/asm/edac.h b/arch/x86/include/asm/edac.h
index 426fc53ff803..dfbd1ebb9f10 100644
--- a/arch/x86/include/asm/edac.h
+++ b/arch/x86/include/asm/edac.h
@@ -13,7 +13,7 @@ static inline void edac_atomic_scrub(void *va, u32 size)
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
- asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
+ asm volatile("lock addl $0, %0"::"m" (*virt_addr));
}
#endif /* _ASM_X86_EDAC_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1fb83d47711f..128602612eca 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -54,8 +54,9 @@ typedef struct user_i387_struct elf_fpregset_t;
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
-#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
- offset to GOT */
+#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative offset to GOT */
+#define R_X86_64_GOTPCRELX 41
+#define R_X86_64_REX_GOTPCRELX 42
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index d0dcefb5cc59..4519c9f35ba0 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -31,7 +31,7 @@
/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
#define FIXMAP_PMD_TOP 507
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <asm/apicdef.h>
#include <asm/page.h>
@@ -196,5 +196,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
void __early_set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index f86ad3335529..f42de5f05e7e 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -16,10 +16,9 @@
/*
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
- * disables preemption so be careful if you intend to use it for long periods
- * of time.
- * If you intend to use the FPU in irq/softirq you need to check first with
- * irq_fpu_usable() if it is possible.
+ * disables preemption and softirq processing, so be careful if you intend to
+ * use it for long periods of time. Kernel-mode FPU cannot be used in all
+ * contexts -- see irq_fpu_usable() for details.
*/
/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
@@ -50,10 +49,10 @@ static inline void kernel_fpu_begin(void)
}
/*
- * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate.
- * A context switch will (and softirq might) save CPU's FPU registers to
- * fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
- * a random state.
+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate, or while
+ * using the FPU in kernel mode. A context switch will (and softirq might) save
+ * CPU's FPU registers to fpu->fpstate.regs and set TIF_NEED_FPU_LOAD leaving
+ * CPU's FPU registers in a random state.
*
* local_bh_disable() protects against both preemption and soft interrupts
* on !RT kernels.
@@ -63,8 +62,6 @@ static inline void kernel_fpu_begin(void)
* preemptible. Disabling preemption is the right choice here as bottom
* half processing is always in thread context on RT kernels so it
* implicitly prevents bottom half processing as well.
- *
- * Disabling preemption also serializes against kernel_fpu_begin().
*/
static inline void fpregs_lock(void)
{
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index fb42659f6e98..0ab65073c1cc 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -11,7 +11,7 @@
#ifdef CONFIG_FRAME_POINTER
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro FRAME_BEGIN
push %_ASM_BP
@@ -51,7 +51,7 @@
.endm
#endif /* CONFIG_X86_64 */
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
#define FRAME_BEGIN \
"push %" _ASM_BP "\n" \
@@ -82,18 +82,18 @@ static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
#endif /* CONFIG_X86_64 */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define FRAME_OFFSET __ASM_SEL(4, 8)
#else /* !CONFIG_FRAME_POINTER */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro ENCODE_FRAME_POINTER ptregs_offset=0
.endm
-#else /* !__ASSEMBLY */
+#else /* !__ASSEMBLER__ */
#define ENCODE_FRAME_POINTER
diff --git a/arch/x86/include/asm/fred.h b/arch/x86/include/asm/fred.h
index 25ca00bd70e8..2a29e5216881 100644
--- a/arch/x86/include/asm/fred.h
+++ b/arch/x86/include/asm/fred.h
@@ -32,7 +32,7 @@
#define FRED_CONFIG_INT_STKLVL(l) (_AT(unsigned long, l) << 9)
#define FRED_CONFIG_ENTRYPOINT(p) _AT(unsigned long, (p))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_X86_FRED
#include <linux/kernel.h>
@@ -113,6 +113,6 @@ static inline void fred_entry_from_kvm(unsigned int type, unsigned int vector) {
static inline void fred_sync_rsp0(unsigned long rsp0) { }
static inline void fred_update_rsp0(void) { }
#endif /* CONFIG_X86_FRED */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* ASM_X86_FRED_H */
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index 9e7e8ca8e299..02f239569b93 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -2,7 +2,7 @@
#ifndef _ASM_FSGSBASE_H
#define _ASM_FSGSBASE_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_X86_64
@@ -80,6 +80,6 @@ extern unsigned long x86_fsgsbase_read_task(struct task_struct *task,
#endif /* CONFIG_X86_64 */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_FSGSBASE_H */
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index f9cb4d07df58..93156ac4ffe0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -22,7 +22,7 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern void __fentry__(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
@@ -36,21 +36,9 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
{
-#ifdef CONFIG_X86_KERNEL_IBT
- u32 instr;
-
- /* We want to be extra safe in case entry ip is on the page edge,
- * but otherwise we need to avoid get_kernel_nofault()'s overhead.
- */
- if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
- if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
- return fentry_ip;
- } else {
- instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
- }
- if (is_endbr(instr))
+ if (is_endbr((void*)(fentry_ip - ENDBR_INSN_SIZE)))
fentry_ip -= ENDBR_INSN_SIZE;
-#endif
+
return fentry_ip;
}
#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
@@ -118,11 +106,11 @@ struct dyn_arch_ftrace {
};
#endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_FUNCTION_TRACER */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer);
@@ -166,6 +154,6 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
#endif /* !COMPILE_OFFSETS */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_FTRACE_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 6ffa8b75f4cd..f00c09ffe6a9 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -3,7 +3,6 @@
#define _ASM_X86_HARDIRQ_H
#include <linux/threads.h>
-#include <asm/current.h>
typedef struct {
#if IS_ENABLED(CONFIG_KVM_INTEL)
@@ -66,7 +65,8 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu);
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
-#define local_softirq_pending_ref pcpu_hot.softirq_pending
+DECLARE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
+#define local_softirq_pending_ref __softirq_pending
#if IS_ENABLED(CONFIG_KVM_INTEL)
/*
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index edebf1020e04..162ebd73a698 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -16,7 +16,7 @@
#include <asm/irq_vectors.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/percpu.h>
#include <linux/profile.h>
@@ -128,6 +128,6 @@ extern char spurious_entries_start[];
typedef struct irq_desc* vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
-#endif /* !ASSEMBLY_ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_HW_IRQ_H */
diff --git a/arch/x86/include/asm/ibt.h b/arch/x86/include/asm/ibt.h
index 1e59581d500c..28d845257303 100644
--- a/arch/x86/include/asm/ibt.h
+++ b/arch/x86/include/asm/ibt.h
@@ -21,7 +21,7 @@
#define HAS_KERNEL_IBT 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_X86_64
#define ASM_ENDBR "endbr64\n\t"
@@ -41,7 +41,7 @@
_ASM_PTR fname "\n\t" \
".popsection\n\t"
-static inline __attribute_const__ u32 gen_endbr(void)
+static __always_inline __attribute_const__ u32 gen_endbr(void)
{
u32 endbr;
@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void)
return endbr;
}
-static inline __attribute_const__ u32 gen_endbr_poison(void)
+static __always_inline __attribute_const__ u32 gen_endbr_poison(void)
{
/*
* 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it
@@ -65,19 +65,24 @@ static inline __attribute_const__ u32 gen_endbr_poison(void)
return 0x001f0f66; /* osp nopl (%rax) */
}
-static inline bool is_endbr(u32 val)
+static inline bool __is_endbr(u32 val)
{
if (val == gen_endbr_poison())
return true;
+ /* See cfi_fineibt_bhi_preamble() */
+ if (IS_ENABLED(CONFIG_FINEIBT_BHI) && val == 0x001f0ff5)
+ return true;
+
val &= ~0x01000000U; /* ENDBR32 -> ENDBR64 */
return val == gen_endbr();
}
+extern __noendbr bool is_endbr(u32 *val);
extern __noendbr u64 ibt_save(bool disable);
extern __noendbr void ibt_restore(u64 save);
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#ifdef CONFIG_X86_64
#define ENDBR endbr64
@@ -85,29 +90,29 @@ extern __noendbr void ibt_restore(u64 save);
#define ENDBR endbr32
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#else /* !IBT */
#define HAS_KERNEL_IBT 0
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define ASM_ENDBR
#define IBT_NOSEAL(name)
#define __noendbr
-static inline bool is_endbr(u32 val) { return false; }
+static inline bool is_endbr(u32 *val) { return false; }
static inline u64 ibt_save(bool disable) { return 0; }
static inline void ibt_restore(u64 save) { }
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define ENDBR
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_X86_KERNEL_IBT */
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index ad5c68f0509d..a4ec27c67988 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -7,7 +7,7 @@
#define IDT_ALIGN (8 * (1 + HAS_KERNEL_IBT))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/entry-common.h>
#include <linux/hardirq.h>
@@ -474,7 +474,7 @@ static inline void fred_install_sysvec(unsigned int vector, const idtentry_t fun
idt_install_sysvec(vector, asm_##function); \
}
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
/*
* The ASM variants for DECLARE_IDTENTRY*() which emit the ASM entry stubs.
@@ -579,7 +579,7 @@ SYM_CODE_START(spurious_entries_start)
SYM_CODE_END(spurious_entries_start)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* The actual entry points. Note that DECLARE_IDTENTRY*() serves two
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 0e82ebc5d1e1..8b1b1abcef15 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -2,7 +2,11 @@
#ifndef _ASM_X86_INIT_H
#define _ASM_X86_INIT_H
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000
+#define __head __section(".head.text") __no_sanitize_undefined __no_stack_protector
+#else
#define __head __section(".head.text") __no_sanitize_undefined
+#endif
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
index 438ccd4f3cc4..e48a00b3311d 100644
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@ -6,7 +6,7 @@
#ifndef X86_ASM_INST_H
#define X86_ASM_INST_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define REG_NUM_INVALID 100
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 6d7b04ffc5fd..3a97a7eefb51 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -45,7 +45,18 @@
/* Wildcard match so X86_MATCH_VFM(ANY) works */
#define INTEL_ANY IFM(X86_FAMILY_ANY, X86_MODEL_ANY)
+/* Family 5 */
+#define INTEL_FAM5_START IFM(5, 0x00) /* Notational marker, also P5 A-step */
+#define INTEL_PENTIUM_75 IFM(5, 0x02) /* P54C */
+#define INTEL_PENTIUM_MMX IFM(5, 0x04) /* P55C */
+#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
+
+/* Family 6 */
#define INTEL_PENTIUM_PRO IFM(6, 0x01)
+#define INTEL_PENTIUM_II_KLAMATH IFM(6, 0x03)
+#define INTEL_PENTIUM_III_DESCHUTES IFM(6, 0x05)
+#define INTEL_PENTIUM_III_TUALATIN IFM(6, 0x0B)
+#define INTEL_PENTIUM_M_DOTHAN IFM(6, 0x0D)
#define INTEL_CORE_YONAH IFM(6, 0x0E)
@@ -110,9 +121,9 @@
#define INTEL_SAPPHIRERAPIDS_X IFM(6, 0x8F) /* Golden Cove */
-#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF)
+#define INTEL_EMERALDRAPIDS_X IFM(6, 0xCF) /* Raptor Cove */
-#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD)
+#define INTEL_GRANITERAPIDS_X IFM(6, 0xAD) /* Redwood Cove */
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
/* "Hybrid" Processors (P-Core/E-Core) */
@@ -126,16 +137,16 @@
#define INTEL_RAPTORLAKE_P IFM(6, 0xBA)
#define INTEL_RAPTORLAKE_S IFM(6, 0xBF)
-#define INTEL_METEORLAKE IFM(6, 0xAC)
+#define INTEL_METEORLAKE IFM(6, 0xAC) /* Redwood Cove / Crestmont */
#define INTEL_METEORLAKE_L IFM(6, 0xAA)
-#define INTEL_ARROWLAKE_H IFM(6, 0xC5)
+#define INTEL_ARROWLAKE_H IFM(6, 0xC5) /* Lion Cove / Skymont */
#define INTEL_ARROWLAKE IFM(6, 0xC6)
#define INTEL_ARROWLAKE_U IFM(6, 0xB5)
-#define INTEL_LUNARLAKE_M IFM(6, 0xBD)
+#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
-#define INTEL_PANTHERLAKE_L IFM(6, 0xCC)
+#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
/* "Small Core" Processors (Atom/E-Core) */
@@ -149,9 +160,9 @@
#define INTEL_ATOM_SILVERMONT IFM(6, 0x37) /* Bay Trail, Valleyview */
#define INTEL_ATOM_SILVERMONT_D IFM(6, 0x4D) /* Avaton, Rangely */
#define INTEL_ATOM_SILVERMONT_MID IFM(6, 0x4A) /* Merriefield */
+#define INTEL_ATOM_SILVERMONT_MID2 IFM(6, 0x5A) /* Anniedale */
#define INTEL_ATOM_AIRMONT IFM(6, 0x4C) /* Cherry Trail, Braswell */
-#define INTEL_ATOM_AIRMONT_MID IFM(6, 0x5A) /* Moorefield */
#define INTEL_ATOM_AIRMONT_NP IFM(6, 0x75) /* Lightning Mountain */
#define INTEL_ATOM_GOLDMONT IFM(6, 0x5C) /* Apollo Lake */
@@ -176,16 +187,35 @@
#define INTEL_XEON_PHI_KNL IFM(6, 0x57) /* Knights Landing */
#define INTEL_XEON_PHI_KNM IFM(6, 0x85) /* Knights Mill */
-/* Family 5 */
-#define INTEL_QUARK_X1000 IFM(5, 0x09) /* Quark X1000 SoC */
+/* Notational marker denoting the last Family 6 model */
+#define INTEL_FAM6_LAST IFM(6, 0xFF)
+
+/* Family 15 - NetBurst */
+#define INTEL_P4_WILLAMETTE IFM(15, 0x01) /* Also Xeon Foster */
+#define INTEL_P4_PRESCOTT IFM(15, 0x03)
+#define INTEL_P4_PRESCOTT_2M IFM(15, 0x04)
+#define INTEL_P4_CEDARMILL IFM(15, 0x06) /* Also Xeon Dempsey */
/* Family 19 */
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
-/* CPU core types */
+/*
+ * Intel CPU core types
+ *
+ * CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture
+ * of the core. Bits 31-24 indicates its core type (Core or Atom)
+ * and Bits [23:0] indicates the native model ID of the core.
+ * Core type and native model ID are defined in below enumerations.
+ */
enum intel_cpu_type {
+ INTEL_CPU_TYPE_UNKNOWN,
INTEL_CPU_TYPE_ATOM = 0x20,
INTEL_CPU_TYPE_CORE = 0x40,
};
+enum intel_native_id {
+ INTEL_ATOM_CMT_NATIVE_ID = 0x2, /* Crestmont */
+ INTEL_ATOM_SKT_NATIVE_ID = 0x3, /* Skymont */
+};
+
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index ed580c7f9d0a..1a0dc2b2bf5b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -175,6 +175,9 @@ extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, un
extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
#define ioremap_encrypted ioremap_encrypted
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags);
+#define arch_memremap_wb arch_memremap_wb
+
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 562a547c29a5..735c3a491f60 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -116,7 +116,7 @@
ASM_CALL_ARG2
#define call_on_irqstack(func, asm_call, argconstr...) \
- call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr), \
+ call_on_stack(__this_cpu_read(hardirq_stack_ptr), \
func, asm_call, argconstr)
/* Macros to assert type correctness for run_*_on_irqstack macros */
@@ -135,7 +135,7 @@
* User mode entry and interrupt on the irq stack do not \
* switch stacks. If from user mode the task stack is empty. \
*/ \
- if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
+ if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
irq_enter_rcu(); \
func(c_args); \
irq_exit_rcu(); \
@@ -146,9 +146,9 @@
* places. Invoke the stack switch macro with the call \
* sequence which matches the above direct invocation. \
*/ \
- __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
+ __this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(func, asm_call, constr); \
- __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
} \
}
@@ -212,9 +212,9 @@
*/
#define do_softirq_own_stack() \
{ \
- __this_cpu_write(pcpu_hot.hardirq_stack_inuse, true); \
+ __this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
- __this_cpu_write(pcpu_hot.hardirq_stack_inuse, false); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
}
#endif
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index cf7fc2b8e3ce..abb8374c9ff7 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -4,7 +4,7 @@
#include <asm/processor-flags.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/nospec-branch.h>
@@ -79,7 +79,7 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
static __always_inline unsigned long arch_local_save_flags(void)
@@ -133,10 +133,10 @@ static __always_inline unsigned long arch_local_irq_save(void)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_PARAVIRT_XXL */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
@@ -154,6 +154,6 @@ static __always_inline void arch_local_irq_restore(unsigned long flags)
if (!arch_irqs_disabled_flags(flags))
arch_local_irq_enable();
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 3f1c1d6c0da1..61dd1dee7812 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -7,7 +7,7 @@
#include <asm/asm.h>
#include <asm/nops.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stringify.h>
#include <linux/types.h>
@@ -55,6 +55,6 @@ l_yes:
extern int arch_jump_entry_size(struct jump_entry *entry);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index de75306b932e..d7e33c7f096b 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -23,7 +23,7 @@
(1ULL << (__VIRTUAL_MASK_SHIFT - \
KASAN_SHADOW_SCALE_SHIFT)))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 8ad187462b68..5432457d2338 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -13,11 +13,12 @@
# define KEXEC_CONTROL_PAGE_SIZE 4096
# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/string.h>
#include <linux/kernel.h>
+#include <asm/asm.h>
#include <asm/page.h>
#include <asm/ptrace.h>
@@ -71,41 +72,32 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
if (oldregs) {
memcpy(newregs, oldregs, sizeof(*newregs));
} else {
+ asm volatile("mov %%" _ASM_BX ",%0" : "=m"(newregs->bx));
+ asm volatile("mov %%" _ASM_CX ",%0" : "=m"(newregs->cx));
+ asm volatile("mov %%" _ASM_DX ",%0" : "=m"(newregs->dx));
+ asm volatile("mov %%" _ASM_SI ",%0" : "=m"(newregs->si));
+ asm volatile("mov %%" _ASM_DI ",%0" : "=m"(newregs->di));
+ asm volatile("mov %%" _ASM_BP ",%0" : "=m"(newregs->bp));
+ asm volatile("mov %%" _ASM_AX ",%0" : "=m"(newregs->ax));
+ asm volatile("mov %%" _ASM_SP ",%0" : "=m"(newregs->sp));
+#ifdef CONFIG_X86_64
+ asm volatile("mov %%r8,%0" : "=m"(newregs->r8));
+ asm volatile("mov %%r9,%0" : "=m"(newregs->r9));
+ asm volatile("mov %%r10,%0" : "=m"(newregs->r10));
+ asm volatile("mov %%r11,%0" : "=m"(newregs->r11));
+ asm volatile("mov %%r12,%0" : "=m"(newregs->r12));
+ asm volatile("mov %%r13,%0" : "=m"(newregs->r13));
+ asm volatile("mov %%r14,%0" : "=m"(newregs->r14));
+ asm volatile("mov %%r15,%0" : "=m"(newregs->r15));
+#endif
+ asm volatile("mov %%ss,%k0" : "=a"(newregs->ss));
+ asm volatile("mov %%cs,%k0" : "=a"(newregs->cs));
#ifdef CONFIG_X86_32
- asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
- asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
- asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
- asm volatile("movl %%esi,%0" : "=m"(newregs->si));
- asm volatile("movl %%edi,%0" : "=m"(newregs->di));
- asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
- asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
- asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
- asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
- asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
- asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
- asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
- asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
-#else
- asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
- asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
- asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
- asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
- asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
- asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
- asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
- asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
- asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
- asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
- asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
- asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
- asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
- asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
- asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
- asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
- asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
- asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
- asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
+ asm volatile("mov %%ds,%k0" : "=a"(newregs->ds));
+ asm volatile("mov %%es,%k0" : "=a"(newregs->es));
#endif
+ asm volatile("pushf\n\t"
+ "pop %0" : "=m"(newregs->flags));
newregs->ip = _THIS_IP_;
}
}
@@ -225,6 +217,6 @@ unsigned int arch_crash_get_elfcorehdr_size(void);
#define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_KEXEC_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0b7af5902ff7..a884ab544335 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -27,6 +27,7 @@
#include <linux/kfifo.h>
#include <linux/sched/vhost_task.h>
#include <linux/call_once.h>
+#include <linux/atomic.h>
#include <asm/apic.h>
#include <asm/pvclock-abi.h>
@@ -405,7 +406,7 @@ union kvm_cpu_role {
};
struct kvm_rmap_head {
- unsigned long val;
+ atomic_long_t val;
};
struct kvm_pio_request {
@@ -780,6 +781,7 @@ struct kvm_vcpu_arch {
u32 pkru;
u32 hflags;
u64 efer;
+ u64 host_debugctl;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
bool load_eoi_exitmap_pending;
@@ -879,6 +881,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
+ bool cpuid_dynamic_bits_dirty;
bool is_amd_compatible;
/*
@@ -908,7 +911,8 @@ struct kvm_vcpu_arch {
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
gpa_t time;
- struct pvclock_vcpu_time_info hv_clock;
+ s8 pvclock_tsc_shift;
+ u32 pvclock_tsc_mul;
unsigned int hw_tsc_khz;
struct gfn_to_pfn_cache pv_time;
/* set guest stopped flag in pvclock flags field */
@@ -996,8 +1000,8 @@ struct kvm_vcpu_arch {
u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
u16 vec;
u32 id;
- bool send_user_only;
u32 host_apf_flags;
+ bool send_always;
bool delivery_as_pf_vmexit;
bool pageready_pending;
} apf;
@@ -1052,6 +1056,7 @@ struct kvm_vcpu_arch {
/* Protected Guests */
bool guest_state_protected;
+ bool guest_tsc_protected;
/*
* Set when PDPTS were loaded directly by the userspace without
@@ -1188,6 +1193,8 @@ struct kvm_xen {
struct gfn_to_pfn_cache shinfo_cache;
struct idr evtchn_ports;
unsigned long poll_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+
+ struct kvm_xen_hvm_config hvm_config;
};
#endif
@@ -1353,8 +1360,6 @@ struct kvm_arch {
u64 shadow_mmio_value;
- struct iommu_domain *iommu_domain;
- bool iommu_noncoherent;
#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
atomic_t noncoherent_dma_count;
#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
@@ -1410,8 +1415,6 @@ struct kvm_arch {
struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
- struct kvm_xen_hvm_config xen_hvm_config;
-
/* reads protected by irq_srcu, writes by irq_lock */
struct hlist_head mask_notifier_list;
@@ -1478,6 +1481,7 @@ struct kvm_arch {
* tdp_mmu_page set.
*
* For reads, this list is protected by:
+ * RCU alone or
* the MMU lock in read mode + RCU or
* the MMU lock in write mode
*
@@ -2163,8 +2167,8 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
-void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
-void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
+ bool has_error_code, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index dc31b13b87a0..b51d8a4673f5 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -38,7 +38,7 @@
#define ASM_FUNC_ALIGN __stringify(__FUNC_ALIGN)
#define SYM_F_ALIGN __FUNC_ALIGN
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#if defined(CONFIG_MITIGATION_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define RET jmp __x86_return_thunk
@@ -50,7 +50,7 @@
#endif
#endif /* CONFIG_MITIGATION_RETPOLINE */
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#if defined(CONFIG_MITIGATION_RETHUNK) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
#define ASM_RET "jmp __x86_return_thunk\n\t"
@@ -62,7 +62,7 @@
#endif
#endif /* CONFIG_MITIGATION_RETPOLINE */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the
@@ -119,33 +119,27 @@
/* SYM_FUNC_START -- use for global functions */
#define SYM_FUNC_START(name) \
- SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
- ENDBR
+ SYM_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)
/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
#define SYM_FUNC_START_NOALIGN(name) \
- SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE) \
- ENDBR
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
/* SYM_FUNC_START_LOCAL -- use for local functions */
#define SYM_FUNC_START_LOCAL(name) \
- SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN) \
- ENDBR
+ SYM_START(name, SYM_L_LOCAL, SYM_F_ALIGN)
/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
- SYM_START(name, SYM_L_LOCAL, SYM_A_NONE) \
- ENDBR
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
/* SYM_FUNC_START_WEAK -- use for weak functions */
#define SYM_FUNC_START_WEAK(name) \
- SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN) \
- ENDBR
+ SYM_START(name, SYM_L_WEAK, SYM_F_ALIGN)
/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
#define SYM_FUNC_START_WEAK_NOALIGN(name) \
- SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
- ENDBR
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
#endif /* _ASM_X86_LINKAGE_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index eb2db07ef39c..6c77c03139f7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -296,8 +296,6 @@ enum mcp_flags {
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
-bool mce_notify_irq(void);
-
DECLARE_PER_CPU(struct mce, injectm);
/* Disable CMCI/polling for MCA bank claimed by firmware */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index f922b682b9b4..1530ee301dfe 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -10,7 +10,7 @@
#ifndef __X86_MEM_ENCRYPT_H__
#define __X86_MEM_ENCRYPT_H__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/init.h>
#include <linux/cc_platform.h>
@@ -114,6 +114,6 @@ void add_encrypt_protection_map(void);
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 3b496cdcb74b..8b8055a8eb9e 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -69,6 +69,18 @@ typedef struct {
u16 pkey_allocation_map;
s16 execute_only_pkey;
#endif
+
+#ifdef CONFIG_BROADCAST_TLB_FLUSH
+ /*
+ * The global ASID will be a non-zero value when the process has
+ * the same ASID across all CPUs, allowing it to make use of
+ * hardware-assisted remote TLB invalidation like AMD INVLPGB.
+ */
+ u16 global_asid;
+
+ /* The process is transitioning to a new global ASID number. */
+ bool asid_transition;
+#endif
} mm_context_t;
#define INIT_MM_CONTEXT(mm) \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 795fdd53bd0a..2398058b6e83 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_MMU_CONTEXT_H
#define _ASM_X86_MMU_CONTEXT_H
-#include <asm/desc.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/pkeys.h>
@@ -13,6 +12,7 @@
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/gsseg.h>
+#include <asm/desc.h>
extern atomic64_t last_mm_ctx_id;
@@ -139,6 +139,11 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
#define enter_lazy_tlb enter_lazy_tlb
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
+#define mm_init_global_asid mm_init_global_asid
+extern void mm_init_global_asid(struct mm_struct *mm);
+
+extern void mm_free_global_asid(struct mm_struct *mm);
+
/*
* Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve().
@@ -161,6 +166,8 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1;
}
#endif
+
+ mm_init_global_asid(mm);
mm_reset_untag_mask(mm);
init_new_context_ldt(mm);
return 0;
@@ -170,6 +177,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm)
{
destroy_context_ldt(mm);
+ mm_free_global_asid(mm);
}
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index f91ab1e75f9f..bab5ccfc60a7 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -43,8 +43,6 @@ extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;
-extern u64 hv_current_partition_id;
-
extern union hv_ghcb * __percpu *hv_ghcb_pg;
bool hv_isolation_type_snp(void);
@@ -58,10 +56,6 @@ u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
#define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
#define HV_AP_SEGMENT_LIMIT 0xffffffff
-int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
-int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
-int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
-
/*
* If the hypercall involves no input or output parameters, the hypervisor
* ignores the corresponding GPA pointer.
@@ -77,11 +71,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
return hv_tdx_hypercall(control, input_address, output_address);
if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
- __asm__ __volatile__("mov %4, %%r8\n"
+ __asm__ __volatile__("mov %[output_address], %%r8\n"
"vmmcall"
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address)
- : "r" (output_address)
+ : [output_address] "r" (output_address)
: "cc", "memory", "r8", "r9", "r10", "r11");
return hv_status;
}
@@ -89,12 +83,12 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
if (!hv_hypercall_pg)
return U64_MAX;
- __asm__ __volatile__("mov %4, %%r8\n"
+ __asm__ __volatile__("mov %[output_address], %%r8\n"
CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input_address)
- : "r" (output_address),
- THUNK_TARGET(hv_hypercall_pg)
+ : [output_address] "r" (output_address),
+ THUNK_TARGET(hv_hypercall_pg)
: "cc", "memory", "r8", "r9", "r10", "r11");
#else
u32 input_address_hi = upper_32_bits(input_address);
@@ -160,7 +154,7 @@ static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
: "cc", "edi", "esi");
}
#endif
- return hv_status;
+ return hv_status;
}
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
@@ -187,18 +181,18 @@ static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
return hv_tdx_hypercall(control, input1, input2);
if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
- __asm__ __volatile__("mov %4, %%r8\n"
+ __asm__ __volatile__("mov %[input2], %%r8\n"
"vmmcall"
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
- : "r" (input2)
+ : [input2] "r" (input2)
: "cc", "r8", "r9", "r10", "r11");
} else {
- __asm__ __volatile__("mov %4, %%r8\n"
+ __asm__ __volatile__("mov %[input2], %%r8\n"
CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
- : "r" (input2),
+ : [input2] "r" (input2),
THUNK_TARGET(hv_hypercall_pg)
: "cc", "r8", "r9", "r10", "r11");
}
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 72765b2fe0d8..e6134ef2263d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -25,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_TCE 15 /* Enable Translation Cache Extensions */
#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
@@ -34,6 +35,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_TCE (1<<_EFER_TCE)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/*
@@ -701,15 +703,17 @@
#define MSR_AMD_CPPC_REQ 0xc00102b3
#define MSR_AMD_CPPC_STATUS 0xc00102b4
-#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
-#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
-#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
-#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
+/* Masks for use with MSR_AMD_CPPC_CAP1 */
+#define AMD_CPPC_LOWEST_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_LOWNONLIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_NOMINAL_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_HIGHEST_PERF_MASK GENMASK(31, 24)
-#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
-#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
-#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
-#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+/* Masks for use with MSR_AMD_CPPC_REQ */
+#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
+#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
+#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
+#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
/* AMD Performance Counter Global Status and Control MSRs */
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
@@ -721,6 +725,7 @@
/* Zen4 */
#define MSR_ZEN4_BP_CFG 0xc001102e
+#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
/* Fam 19h MSRs */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 001853541f1e..9397a319d165 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -4,7 +4,7 @@
#include "msr-index.h"
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/asm.h>
#include <asm/errno.h>
@@ -397,5 +397,5 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs);
}
#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 41a0ebb699ec..f677382093f3 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -56,6 +56,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *);
+void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
+
void stop_nmi(void);
void restart_nmi(void);
void local_touch_nmi(void);
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 1c1b7550fa55..cd94221d8335 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -82,7 +82,7 @@
#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern const unsigned char * const x86_nops[];
#endif
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7e8bf78c03d5..e4d11e3318f0 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -12,7 +12,6 @@
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
#include <asm/percpu.h>
-#include <asm/current.h>
/*
* Call depth tracking for Intel SKL CPUs to address the RSB underflow
@@ -78,21 +77,21 @@
#include <asm/asm-offsets.h>
#define CREDIT_CALL_DEPTH \
- movq $-1, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+ movq $-1, PER_CPU_VAR(__x86_call_depth);
#define RESET_CALL_DEPTH \
xor %eax, %eax; \
bts $63, %rax; \
- movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth);
+ movq %rax, PER_CPU_VAR(__x86_call_depth);
#define RESET_CALL_DEPTH_FROM_CALL \
movb $0xfc, %al; \
shl $56, %rax; \
- movq %rax, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
+ movq %rax, PER_CPU_VAR(__x86_call_depth); \
CALL_THUNKS_DEBUG_INC_CALLS
#define INCREMENT_CALL_DEPTH \
- sarq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth); \
+ sarq $5, PER_CPU_VAR(__x86_call_depth); \
CALL_THUNKS_DEBUG_INC_CALLS
#else
@@ -177,7 +176,7 @@
add $(BITS_PER_LONG/8), %_ASM_SP; \
lfence;
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/*
* (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
@@ -198,9 +197,8 @@
.endm
/*
- * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
- * to the retpoline thunk with a CS prefix when the register requires
- * a RAX prefix byte to encode. Also see apply_retpolines().
+ * Emits a conditional CS prefix that is compatible with
+ * -mindirect-branch-cs-prefix.
*/
.macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
@@ -336,7 +334,7 @@
#define CLEAR_BRANCH_HISTORY_VMEXIT
#endif
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];
@@ -388,6 +386,8 @@ extern void call_depth_return_thunk(void);
__stringify(INCREMENT_CALL_DEPTH), \
X86_FEATURE_CALL_DEPTH)
+DECLARE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+
#ifdef CONFIG_CALL_THUNKS_DEBUG
DECLARE_PER_CPU(u64, __x86_call_count);
DECLARE_PER_CPU(u64, __x86_ret_count);
@@ -421,19 +421,26 @@ static inline void call_depth_return_thunk(void) {}
#ifdef CONFIG_X86_64
/*
+ * Emits a conditional CS prefix that is compatible with
+ * -mindirect-branch-cs-prefix.
+ */
+#define __CS_PREFIX(reg) \
+ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
+ ".ifc \\rs," reg "\n" \
+ ".byte 0x2e\n" \
+ ".endif\n" \
+ ".endr\n"
+
+/*
* Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
*/
-# define CALL_NOSPEC \
- ALTERNATIVE_2( \
- ANNOTATE_RETPOLINE_SAFE \
- "call *%[thunk_target]\n", \
- "call __x86_indirect_thunk_%V[thunk_target]\n", \
- X86_FEATURE_RETPOLINE, \
- "lfence;\n" \
- ANNOTATE_RETPOLINE_SAFE \
- "call *%[thunk_target]\n", \
- X86_FEATURE_RETPOLINE_LFENCE)
+#ifdef CONFIG_MITIGATION_RETPOLINE
+#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
+ "call __x86_indirect_thunk_%V[thunk_target]\n"
+#else
+#define CALL_NOSPEC "call *%[thunk_target]\n"
+#endif
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
@@ -515,7 +522,7 @@ extern u64 x86_pred_cmd;
static inline void indirect_branch_prediction_barrier(void)
{
- alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+ alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
}
/* The Intel SPEC CTRL MSR base value cache */
@@ -552,6 +559,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
+
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
@@ -596,6 +605,6 @@ static __always_inline void mds_idle_clear_cpu_buffers(void)
mds_clear_cpu_buffers();
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h
index 46d7e06763c9..e0125afa53fb 100644
--- a/arch/x86/include/asm/orc_types.h
+++ b/arch/x86/include/asm/orc_types.h
@@ -45,7 +45,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/byteorder.h>
/*
@@ -73,6 +73,6 @@ struct orc_entry {
#endif
} __packed;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index c9fe207916f4..9265f2fca99a 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -14,7 +14,7 @@
#include <asm/page_32.h>
#endif /* CONFIG_X86_64 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct page;
@@ -84,7 +84,7 @@ static __always_inline u64 __is_canonical_address(u64 vaddr, u8 vaddr_bits)
return __canonical_address(vaddr, vaddr_bits) == vaddr;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index 580d71aca65a..0c623706cb7e 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -4,7 +4,7 @@
#include <asm/page_32_types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
@@ -26,6 +26,6 @@ static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PAGE_32_H */
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index faf9cc1c14bb..a9b62e0e6f79 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -11,8 +11,8 @@
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ * If you want more physical memory than this then see the CONFIG_VMSPLIT_2G
+ * and CONFIG_HIGHMEM4G options in the kernel configuration.
*/
#define __PAGE_OFFSET_BASE _AC(CONFIG_PAGE_OFFSET, UL)
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
@@ -63,7 +63,7 @@
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This much address space is reserved for vmalloc() and iomap()
@@ -75,6 +75,6 @@ extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern void setup_bootmem_allocator(void);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PAGE_32_DEFS_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index d63576608ce7..d3aab6f4e59a 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -4,7 +4,7 @@
#include <asm/page_64_types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
@@ -55,11 +55,12 @@ static inline void clear_page(void *page)
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
- "D" (page)
- : "cc", "memory", "rax", "rcx");
+ "D" (page),
+ "cc", "memory", "rax", "rcx");
}
void copy_page(void *to, void *from);
+KCFI_REFERENCE(copy_page);
#ifdef CONFIG_X86_5LEVEL
/*
@@ -94,7 +95,7 @@ static __always_inline unsigned long task_size_max(void)
}
#endif /* CONFIG_X86_5LEVEL */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
# define __HAVE_ARCH_GATE_AREA 1
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 06ef25411d62..1faa8f88850a 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/kaslr.h>
#endif
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 974688973cf6..9f77bf03d747 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -43,7 +43,7 @@
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif /* CONFIG_X86_64 */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
extern phys_addr_t physical_mask;
@@ -66,6 +66,6 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
extern void initmem_init(void);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PAGE_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 041aff51eb50..bed346bfac89 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -6,7 +6,7 @@
#include <asm/paravirt_types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct mm_struct;
#endif
@@ -15,7 +15,7 @@ struct mm_struct;
#include <asm/asm.h>
#include <asm/nospec-branch.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -91,11 +91,6 @@ static inline void __flush_tlb_multi(const struct cpumask *cpumask,
PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
}
-static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
-}
-
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
PVOP_VCALL1(mmu.exit_mmap, mm);
@@ -720,7 +715,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
extern void default_banner(void);
void native_pv_lock_init(void) __init;
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT_XXL
@@ -740,18 +735,18 @@ void native_pv_lock_init(void) __init;
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void native_pv_lock_init(void)
{
}
#endif
#endif /* !CONFIG_PARAVIRT */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef CONFIG_PARAVIRT_XXL
static inline void paravirt_enter_mmap(struct mm_struct *mm)
{
@@ -769,5 +764,5 @@ static inline void paravirt_set_cap(void)
{
}
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index fea56b04f436..62912023b46f 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -4,7 +4,7 @@
#ifdef CONFIG_PARAVIRT
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <asm/desc_defs.h>
@@ -134,8 +134,6 @@ struct pv_mmu_ops {
void (*flush_tlb_multi)(const struct cpumask *cpus,
const struct flush_tlb_info *info);
- void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
-
/* Hook for intercepting the destruction of an mm_struct. */
void (*exit_mmap)(struct mm_struct *mm);
void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
@@ -242,9 +240,17 @@ extern struct paravirt_patch_template pv_ops;
#define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op)
-int paravirt_disable_iospace(void);
-
-/* This generates an indirect call based on the operation type number. */
+/*
+ * This generates an indirect call based on the operation type number.
+ *
+ * Since alternatives run after enabling CET/IBT -- the latter setting/clearing
+ * capabilities and the former requiring all capabilities being finalized --
+ * these indirect calls are subject to IBT and the paravirt stubs should have
+ * ENDBR on.
+ *
+ * OTOH since this is effectively a __nocfi indirect call, the paravirt stubs
+ * don't need to bother with CFI prefixes.
+ */
#define PARAVIRT_CALL \
ANNOTATE_RETPOLINE_SAFE \
"call *%[paravirt_opptr];"
@@ -519,7 +525,7 @@ unsigned long pv_native_read_cr2(void);
#define paravirt_nop ((void *)nop_func)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e525cd85f999..105db2d33c7b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -10,7 +10,7 @@
# define __percpu_rel
#endif
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#ifdef CONFIG_SMP
# define __percpu %__percpu_seg:
@@ -20,14 +20,9 @@
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel
-#ifdef CONFIG_X86_64_SMP
-# define INIT_PER_CPU_VAR(var) init_per_cpu__##var
-#else
-# define INIT_PER_CPU_VAR(var) var
-#endif
-
#else /* !__ASSEMBLY__: */
+#include <linux/args.h>
#include <linux/build_bug.h>
#include <linux/stringify.h>
#include <asm/asm.h>
@@ -41,12 +36,7 @@
# define __seg_fs __attribute__((address_space(__seg_fs)))
#endif
-#ifdef CONFIG_X86_64
-# define __percpu_seg_override __seg_gs
-#else
-# define __percpu_seg_override __seg_fs
-#endif
-
+#define __percpu_seg_override CONCATENATE(__seg_, __percpu_seg)
#define __percpu_prefix ""
#else /* !CONFIG_CC_HAS_NAMED_AS: */
@@ -98,22 +88,6 @@
#define __force_percpu_arg(x) __force_percpu_prefix "%" #x
/*
- * Initialized pointers to per-CPU variables needed for the boot
- * processor need to use these macros to get the proper address
- * offset from __per_cpu_load on SMP.
- *
- * There also must be an entry in vmlinux_64.lds.S
- */
-#define DECLARE_INIT_PER_CPU(var) \
- extern typeof(var) init_per_cpu_var(var)
-
-#ifdef CONFIG_X86_64_SMP
-# define init_per_cpu_var(var) init_per_cpu__##var
-#else
-# define init_per_cpu_var(var) var
-#endif
-
-/*
* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though).
*/
@@ -128,15 +102,10 @@
#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
#define __pcpu_cast_8(val) ((u64)(val))
-#define __pcpu_op1_1(op, dst) op "b " dst
-#define __pcpu_op1_2(op, dst) op "w " dst
-#define __pcpu_op1_4(op, dst) op "l " dst
-#define __pcpu_op1_8(op, dst) op "q " dst
-
-#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
-#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
-#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
-#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
+#define __pcpu_op_1(op) op "b "
+#define __pcpu_op_2(op) op "w "
+#define __pcpu_op_4(op) op "l "
+#define __pcpu_op_8(op) op "q "
#define __pcpu_reg_1(mod, x) mod "q" (x)
#define __pcpu_reg_2(mod, x) mod "r" (x)
@@ -168,7 +137,8 @@ do { \
({ \
__pcpu_type_##size pfo_val__; \
\
- asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), "%[val]") \
+ asm qual (__pcpu_op_##size("mov") \
+ __percpu_arg([var]) ", %[val]" \
: [val] __pcpu_reg_##size("=", pfo_val__) \
: [var] "m" (__my_cpu_var(_var))); \
\
@@ -184,7 +154,8 @@ do { \
pto_tmp__ = (_val); \
(void)pto_tmp__; \
} \
- asm qual(__pcpu_op2_##size("mov", "%[val]", __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size("mov") "%[val], " \
+ __percpu_arg([var]) \
: [var] "=m" (__my_cpu_var(_var)) \
: [val] __pcpu_reg_imm_##size(pto_val__)); \
} while (0)
@@ -201,7 +172,8 @@ do { \
({ \
__pcpu_type_##size pfo_val__; \
\
- asm(__pcpu_op2_##size("mov", __force_percpu_arg(a[var]), "%[val]") \
+ asm(__pcpu_op_##size("mov") \
+ __force_percpu_arg(a[var]) ", %[val]" \
: [val] __pcpu_reg_##size("=", pfo_val__) \
: [var] "i" (&(_var))); \
\
@@ -210,7 +182,7 @@ do { \
#define percpu_unary_op(size, qual, op, _var) \
({ \
- asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size(op) __percpu_arg([var]) \
: [var] "+m" (__my_cpu_var(_var))); \
})
@@ -223,7 +195,7 @@ do { \
pto_tmp__ = (_val); \
(void)pto_tmp__; \
} \
- asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size(op) "%[val], " __percpu_arg([var]) \
: [var] "+m" (__my_cpu_var(_var)) \
: [val] __pcpu_reg_imm_##size(pto_val__)); \
} while (0)
@@ -259,8 +231,8 @@ do { \
({ \
__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
\
- asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
- __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size("xadd") "%[tmp], " \
+ __percpu_arg([var]) \
: [tmp] __pcpu_reg_##size("+", paro_tmp__), \
[var] "+m" (__my_cpu_var(_var)) \
: : "memory"); \
@@ -303,8 +275,8 @@ do { \
__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
\
- asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
- __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \
+ __percpu_arg([var]) \
: [oval] "+a" (pco_old__), \
[var] "+m" (__my_cpu_var(_var)) \
: [nval] __pcpu_reg_##size(, pco_new__) \
@@ -320,8 +292,8 @@ do { \
__pcpu_type_##size pco_old__ = *pco_oval__; \
__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
\
- asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
- __percpu_arg([var])) \
+ asm qual (__pcpu_op_##size("cmpxchg") "%[nval], " \
+ __percpu_arg([var]) \
CC_SET(z) \
: CC_OUT(z) (success), \
[oval] "+a" (pco_old__), \
@@ -348,15 +320,14 @@ do { \
old__.var = _oval; \
new__.var = _nval; \
\
- asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
- "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
- : [var] "+m" (__my_cpu_var(_var)), \
- "+a" (old__.low), \
- "+d" (old__.high) \
- : "b" (new__.low), \
- "c" (new__.high), \
- "S" (&(_var)) \
- : "memory"); \
+ asm_inline qual ( \
+ ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
+ "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
+ : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
+ "+a" (old__.low), "+d" (old__.high)) \
+ : "b" (new__.low), "c" (new__.high), \
+ "S" (&(_var)) \
+ : "memory"); \
\
old__.var; \
})
@@ -378,17 +349,16 @@ do { \
old__.var = *_oval; \
new__.var = _nval; \
\
- asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
- "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
- CC_SET(z) \
- : CC_OUT(z) (success), \
- [var] "+m" (__my_cpu_var(_var)), \
- "+a" (old__.low), \
- "+d" (old__.high) \
- : "b" (new__.low), \
- "c" (new__.high), \
- "S" (&(_var)) \
- : "memory"); \
+ asm_inline qual ( \
+ ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
+ "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
+ CC_SET(z) \
+ : ALT_OUTPUT_SP(CC_OUT(z) (success), \
+ [var] "+m" (__my_cpu_var(_var)), \
+ "+a" (old__.low), "+d" (old__.high)) \
+ : "b" (new__.low), "c" (new__.high), \
+ "S" (&(_var)) \
+ : "memory"); \
if (unlikely(!success)) \
*_oval = old__.var; \
\
@@ -419,15 +389,14 @@ do { \
old__.var = _oval; \
new__.var = _nval; \
\
- asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
- "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
- : [var] "+m" (__my_cpu_var(_var)), \
- "+a" (old__.low), \
- "+d" (old__.high) \
- : "b" (new__.low), \
- "c" (new__.high), \
- "S" (&(_var)) \
- : "memory"); \
+ asm_inline qual ( \
+ ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
+ "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
+ : ALT_OUTPUT_SP([var] "+m" (__my_cpu_var(_var)), \
+ "+a" (old__.low), "+d" (old__.high)) \
+ : "b" (new__.low), "c" (new__.high), \
+ "S" (&(_var)) \
+ : "memory"); \
\
old__.var; \
})
@@ -449,19 +418,19 @@ do { \
old__.var = *_oval; \
new__.var = _nval; \
\
- asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
- "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
- CC_SET(z) \
- : CC_OUT(z) (success), \
- [var] "+m" (__my_cpu_var(_var)), \
- "+a" (old__.low), \
- "+d" (old__.high) \
- : "b" (new__.low), \
- "c" (new__.high), \
- "S" (&(_var)) \
- : "memory"); \
+ asm_inline qual ( \
+ ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
+ "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
+ CC_SET(z) \
+ : ALT_OUTPUT_SP(CC_OUT(z) (success), \
+ [var] "+m" (__my_cpu_var(_var)), \
+ "+a" (old__.low), "+d" (old__.high)) \
+ : "b" (new__.low), "c" (new__.high), \
+ "S" (&(_var)) \
+ : "memory"); \
if (unlikely(!success)) \
*_oval = old__.var; \
+ \
likely(success); \
})
@@ -582,7 +551,7 @@ do { \
* it is accessed while this_cpu_read_stable() allows the value to be cached.
* this_cpu_read_stable() is more efficient and can be used if its value
* is guaranteed to be valid across CPUs. The current users include
- * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
+ * current_task and cpu_current_top_of_stack, both of which are
* actually per-thread variables implemented as per-CPU variables and
* thus stable for the duration of the respective task.
*/
@@ -617,9 +586,9 @@ do { \
#include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
+DECLARE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 0ba8d20f2d1d..812dac3f79f0 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -141,6 +141,12 @@
#define PEBS_DATACFG_XMMS BIT_ULL(2)
#define PEBS_DATACFG_LBRS BIT_ULL(3)
#define PEBS_DATACFG_LBR_SHIFT 24
+#define PEBS_DATACFG_CNTR BIT_ULL(4)
+#define PEBS_DATACFG_CNTR_SHIFT 32
+#define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0)
+#define PEBS_DATACFG_FIX_SHIFT 48
+#define PEBS_DATACFG_FIX_MASK GENMASK_ULL(7, 0)
+#define PEBS_DATACFG_METRICS BIT_ULL(5)
/* Steal the highest bit of pebs_data_cfg for SW usage */
#define PEBS_UPDATE_DS_SW BIT_ULL(63)
@@ -482,6 +488,15 @@ struct pebs_xmm {
u64 xmm[16*2]; /* two entries for each register */
};
+struct pebs_cntr_header {
+ u32 cntr;
+ u32 fixed;
+ u32 metrics;
+ u32 reserved;
+};
+
+#define INTEL_CNTR_METRICS 0x3
+
/*
* AMD Extended Performance Monitoring and Debug cpuid feature detection
*/
@@ -509,6 +524,8 @@ struct pebs_xmm {
#define IBS_CAPS_FETCHCTLEXTD (1U<<9)
#define IBS_CAPS_OPDATA4 (1U<<10)
#define IBS_CAPS_ZEN4 (1U<<11)
+#define IBS_CAPS_OPLDLAT (1U<<12)
+#define IBS_CAPS_OPDTLBPGSIZE (1U<<19)
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
| IBS_CAPS_FETCHSAM \
@@ -534,8 +551,11 @@ struct pebs_xmm {
* The lower 7 bits of the current count are random bits
* preloaded by hardware and ignored in software
*/
+#define IBS_OP_LDLAT_EN (1ULL<<63)
+#define IBS_OP_LDLAT_THRSH (0xFULL<<59)
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
+#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
#define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index dd4841231bb9..a33147520044 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -29,11 +29,6 @@ static inline void paravirt_release_pud(unsigned long pfn) {}
static inline void paravirt_release_p4d(unsigned long pfn) {}
#endif
-/*
- * Flags to use when allocating a user page table page.
- */
-extern gfp_t __userpte_alloc_gfp;
-
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
/*
* Instead of one PGD, we acquire two PGDs. Being order-1, it is
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
index 7f6ccff0ba72..66425424ce91 100644
--- a/arch/x86/include/asm/pgtable-2level_types.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
typedef unsigned long pteval_t;
@@ -16,24 +16,24 @@ typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#define SHARED_KERNEL_PMD 0
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
/*
- * traditional i386 two-level paging structure:
+ * Traditional i386 two-level paging structure:
*/
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
-
/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
+ * The i386 is two-level, so we don't really have any
+ * PMD directory physically:
*/
+#define PTRS_PER_PMD 1
#define PTRS_PER_PTE 1024
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
index 80911349519e..9d5b257d44e3 100644
--- a/arch/x86/include/asm/pgtable-3level_types.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
typedef u64 pteval_t;
@@ -25,7 +25,7 @@ typedef union {
};
pmdval_t pmd;
} pmd_t;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
index a0c1525f1b6f..e12e52ae8083 100644
--- a/arch/x86/include/asm/pgtable-invert.h
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -2,7 +2,7 @@
#ifndef _ASM_PGTABLE_INVERT_H
#define _ASM_PGTABLE_INVERT_H 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* A clear pte value is special, and doesn't get inverted.
@@ -36,6 +36,6 @@ static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
return val;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 593f10aabd45..7bd6bd6df4a1 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -15,7 +15,7 @@
cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
: (prot))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/spinlock.h>
#include <asm/x86_init.h>
#include <asm/pkru.h>
@@ -973,7 +973,7 @@ static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
}
#endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#ifdef CONFIG_X86_32
@@ -982,7 +982,7 @@ static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
# include <asm/pgtable_64.h>
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/log2.h>
@@ -1233,12 +1233,12 @@ static inline int pgd_none(pgd_t pgd)
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern int direct_gbpages;
void init_mem_mapping(void);
@@ -1812,6 +1812,6 @@ bool arch_is_platform_page(u64 paddr);
WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
set_pgd(pgdp, pgd); \
})
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PGTABLE_H */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 7d4ad8907297..b612cc57a4d3 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -13,7 +13,7 @@
* This file contains the functions and defines necessary to modify and use
* the i386 page table tree.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/processor.h>
#include <linux/threads.h>
#include <asm/paravirt.h>
@@ -45,7 +45,7 @@ do { \
flush_tlb_one_kernel((vaddr)); \
} while (0)
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* This is used to calculate the .brk reservation for initial pagetables.
diff --git a/arch/x86/include/asm/pgtable_32_areas.h b/arch/x86/include/asm/pgtable_32_areas.h
index b6355416a15a..921148b42967 100644
--- a/arch/x86/include/asm/pgtable_32_areas.h
+++ b/arch/x86/include/asm/pgtable_32_areas.h
@@ -13,7 +13,7 @@
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern bool __vmalloc_start_set; /* set once high_memory is set */
#endif
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index d1426b64c1b9..b89f8f1194a9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -5,7 +5,7 @@
#include <linux/const.h>
#include <asm/pgtable_64_types.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* This file contains the functions and defines necessary to modify and use
@@ -270,7 +270,7 @@ static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
#include <asm/pgtable-invert.h>
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define l4_index(x) (((x) >> 39) & 511)
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
@@ -291,5 +291,5 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map)
i = i + 1 ; \
.endr
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index ec68f8369bdc..5bb782d856f2 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -4,7 +4,7 @@
#include <asm/sparsemem.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <asm/kaslr.h>
@@ -44,7 +44,7 @@ static inline bool pgtable_l5_enabled(void)
extern unsigned int pgdir_shift;
extern unsigned int ptrs_per_p4d;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#define SHARED_KERNEL_PMD 0
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4b804531b03c..b74ec5c3643b 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -33,6 +33,7 @@
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
+#define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
#ifdef CONFIG_X86_64
@@ -64,6 +65,7 @@
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define _PAGE_KERNEL_4K (_AT(pteval_t, 1) << _PAGE_BIT_KERNEL_4K)
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
@@ -164,7 +166,7 @@
* to have the WB mode at index 0 (all bits clear). This is the default
* right now and likely would break too much if changed.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
enum page_cache_mode {
_PAGE_CACHE_MODE_WB = 0,
_PAGE_CACHE_MODE_WC = 1,
@@ -177,7 +179,7 @@ enum page_cache_mode {
};
#endif
-#define _PAGE_CC (_AT(pteval_t, cc_mask))
+#define _PAGE_CC (_AT(pteval_t, cc_get_mask()))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
@@ -239,7 +241,7 @@ enum page_cache_mode {
#define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC)
#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC)
@@ -262,7 +264,7 @@ enum page_cache_mode {
#define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO)
#define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* early identity mapping pte attrib macros.
@@ -281,7 +283,7 @@ enum page_cache_mode {
# include <asm/pgtable_64_types.h>
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -580,6 +582,6 @@ extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
unsigned long page_flags);
extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
unsigned long numpages);
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 919909d8cb77..578441db09f0 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -4,10 +4,11 @@
#include <asm/rmwcc.h>
#include <asm/percpu.h>
-#include <asm/current.h>
#include <linux/static_call_types.h>
+DECLARE_PER_CPU_CACHE_HOT(int, __preempt_count);
+
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
@@ -23,18 +24,18 @@
*/
static __always_inline int preempt_count(void)
{
- return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED;
+ return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
}
static __always_inline void preempt_count_set(int pc)
{
int old, new;
- old = raw_cpu_read_4(pcpu_hot.preempt_count);
+ old = raw_cpu_read_4(__preempt_count);
do {
new = (old & PREEMPT_NEED_RESCHED) |
(pc & ~PREEMPT_NEED_RESCHED);
- } while (!raw_cpu_try_cmpxchg_4(pcpu_hot.preempt_count, &old, new));
+ } while (!raw_cpu_try_cmpxchg_4(__preempt_count, &old, new));
}
/*
@@ -43,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
@@ -57,17 +58,17 @@ static __always_inline void preempt_count_set(int pc)
static __always_inline void set_preempt_need_resched(void)
{
- raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED);
+ raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
}
static __always_inline void clear_preempt_need_resched(void)
{
- raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED);
+ raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED);
+ return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
}
/*
@@ -76,12 +77,12 @@ static __always_inline bool test_preempt_need_resched(void)
static __always_inline void __preempt_count_add(int val)
{
- raw_cpu_add_4(pcpu_hot.preempt_count, val);
+ raw_cpu_add_4(__preempt_count, val);
}
static __always_inline void __preempt_count_sub(int val)
{
- raw_cpu_add_4(pcpu_hot.preempt_count, -val);
+ raw_cpu_add_4(__preempt_count, -val);
}
/*
@@ -91,7 +92,7 @@ static __always_inline void __preempt_count_sub(int val)
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return GEN_UNARY_RMWcc("decl", __my_cpu_var(pcpu_hot.preempt_count), e,
+ return GEN_UNARY_RMWcc("decl", __my_cpu_var(__preempt_count), e,
__percpu_arg([var]));
}
@@ -100,7 +101,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset);
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}
#ifdef CONFIG_PREEMPTION
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c0cd10182e90..5d2f7e5aff26 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -60,18 +60,13 @@ struct vm86;
# define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
-enum tlb_infos {
- ENTRIES,
- NR_INFO
-};
-
-extern u16 __read_mostly tlb_lli_4k[NR_INFO];
-extern u16 __read_mostly tlb_lli_2m[NR_INFO];
-extern u16 __read_mostly tlb_lli_4m[NR_INFO];
-extern u16 __read_mostly tlb_lld_4k[NR_INFO];
-extern u16 __read_mostly tlb_lld_2m[NR_INFO];
-extern u16 __read_mostly tlb_lld_4m[NR_INFO];
-extern u16 __read_mostly tlb_lld_1g[NR_INFO];
+extern u16 __read_mostly tlb_lli_4k;
+extern u16 __read_mostly tlb_lli_2m;
+extern u16 __read_mostly tlb_lli_4m;
+extern u16 __read_mostly tlb_lld_4k;
+extern u16 __read_mostly tlb_lld_2m;
+extern u16 __read_mostly tlb_lld_4m;
+extern u16 __read_mostly tlb_lld_1g;
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
@@ -234,7 +229,7 @@ static inline unsigned long long l1tf_pfn_limit(void)
void init_cpu_devs(void);
void get_cpu_vendor(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
-extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+extern void identify_secondary_cpu(unsigned int cpu);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
@@ -420,37 +415,33 @@ struct irq_stack {
char stack[IRQ_STACK_SIZE];
} __aligned(IRQ_STACK_SIZE);
+DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
#ifdef CONFIG_X86_64
-struct fixed_percpu_data {
- /*
- * GCC hardcodes the stack canary as %gs:40. Since the
- * irq_stack is the object at %gs:0, we reserve the bottom
- * 48 bytes of the irq stack for the canary.
- *
- * Once we are willing to require -mstack-protector-guard-symbol=
- * support for x86_64 stackprotector, we can get rid of this.
- */
- char gs_base[40];
- unsigned long stack_canary;
-};
+DECLARE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
+#else
+DECLARE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
+#endif
-DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
-DECLARE_INIT_PER_CPU(fixed_percpu_data);
+DECLARE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack);
+/* const-qualified alias provided by the linker. */
+DECLARE_PER_CPU_CACHE_HOT(const unsigned long __percpu_seg_override,
+ const_cpu_current_top_of_stack);
+#ifdef CONFIG_X86_64
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
- return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
+#ifdef CONFIG_SMP
+ return per_cpu_offset(cpu);
+#else
+ return 0;
+#endif
}
extern asmlinkage void entry_SYSCALL32_ignore(void);
/* Save actual FS/GS selectors and bases to current->thread */
void current_save_fsgs(void);
-#else /* X86_64 */
-#ifdef CONFIG_STACKPROTECTOR
-DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
-#endif
-#endif /* !X86_64 */
+#endif /* X86_64 */
struct perf_event;
@@ -561,9 +552,9 @@ static __always_inline unsigned long current_top_of_stack(void)
* entry trampoline.
*/
if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
- return this_cpu_read_const(const_pcpu_hot.top_of_stack);
+ return this_cpu_read_const(const_cpu_current_top_of_stack);
- return this_cpu_read_stable(pcpu_hot.top_of_stack);
+ return this_cpu_read_stable(cpu_current_top_of_stack);
}
static __always_inline bool on_thread_stack(void)
@@ -668,8 +659,6 @@ static __always_inline void prefetchw(const void *x)
.sysenter_cs = __KERNEL_CS, \
}
-#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-
#else
extern unsigned long __top_init_kernel_stack[];
@@ -677,8 +666,6 @@ extern unsigned long __top_init_kernel_stack[];
.sp = (unsigned long)&__top_init_kernel_stack, \
}
-extern unsigned long KSTK_ESP(struct task_struct *task);
-
#endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
@@ -692,6 +679,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
#define TASK_UNMAPPED_BASE __TASK_UNMAPPED_BASE(TASK_SIZE_LOW)
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
/* Get/set a process' ability to use the timestamp counter instruction */
#define GET_TSC_CTL(adr) get_tsc_mode((adr))
@@ -757,6 +745,7 @@ extern enum l1tf_mitigations l1tf_mitigation;
enum mds_mitigations {
MDS_MITIGATION_OFF,
+ MDS_MITIGATION_AUTO,
MDS_MITIGATION_FULL,
MDS_MITIGATION_VMWERV,
};
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 365798cb4408..5d0dbab85264 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -8,7 +8,7 @@
#ifndef _ASM_X86_PROM_H
#define _ASM_X86_PROM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/of.h>
#include <linux/types.h>
@@ -33,5 +33,5 @@ static inline void x86_flattree_get_config(void) { }
extern char cmd_line[COMMAND_LINE_SIZE];
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 484f4f0131a5..05224a695872 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -15,7 +15,6 @@ void entry_SYSCALL_64(void);
void entry_SYSCALL_64_safe_stack(void);
void entry_SYSRETQ_unsafe_stack(void);
void entry_SYSRETQ_end(void);
-long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
#endif
#ifdef CONFIG_X86_32
@@ -41,6 +40,6 @@ void x86_configure_nx(void);
extern int reboot_force;
-long do_arch_prctl_common(int option, unsigned long arg2);
+long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2);
#endif /* _ASM_X86_PROTO_H */
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
index ab167c96b9ab..88d0a1ab1f77 100644
--- a/arch/x86/include/asm/pti.h
+++ b/arch/x86/include/asm/pti.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PTI_H
#define _ASM_X86_PTI_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
extern void pti_init(void);
@@ -11,5 +11,5 @@ extern void pti_finalize(void);
static inline void pti_check_boottime_disable(void) { }
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PTI_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 5a83fbd9bc0b..50f75467f73d 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -6,7 +6,7 @@
#include <asm/page_types.h>
#include <uapi/asm/ptrace.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef __i386__
struct pt_regs {
@@ -469,5 +469,5 @@ extern int do_set_thread_area(struct task_struct *p, int idx,
# define do_set_thread_area_64(p, s, t) (0)
#endif
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
index 5528e9325049..2fee5e9f1ccc 100644
--- a/arch/x86/include/asm/purgatory.h
+++ b/arch/x86/include/asm/purgatory.h
@@ -2,10 +2,10 @@
#ifndef _ASM_X86_PURGATORY_H
#define _ASM_X86_PURGATORY_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/purgatory.h>
extern void purgatory(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_PURGATORY_H */
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 1436226efe3e..b9fece5fc96d 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PVCLOCK_ABI_H
#define _ASM_X86_PVCLOCK_ABI_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* These structs MUST NOT be changed.
@@ -44,5 +44,5 @@ struct pvclock_wall_clock {
#define PVCLOCK_GUEST_STOPPED (1 << 1)
/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 87e5482acd0d..f607081a022a 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -9,7 +9,7 @@
#define TH_FLAGS_SME_ACTIVE_BIT 0
#define TH_FLAGS_SME_ACTIVE BIT(TH_FLAGS_SME_ACTIVE_BIT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <asm/io.h>
@@ -95,6 +95,6 @@ void reserve_real_mode(void);
void load_trampoline_pgtable(void);
void init_real_mode(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
deleted file mode 100644
index e9187ddd3d1f..000000000000
--- a/arch/x86/include/asm/required-features.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#define _ASM_X86_REQUIRED_FEATURES_H
-
-/* Define minimum CPUID feature set for kernel These bits are checked
- really early to actually display a visible error message before the
- kernel dies. Make sure to assign features to the proper mask!
-
- Some requirements that are not in CPUID yet are also in the
- CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
-
- The real information is in arch/x86/Kconfig.cpu, this just converts
- the CONFIGs into a bitmask */
-
-#ifndef CONFIG_MATH_EMULATION
-# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
-#else
-# define NEED_FPU 0
-#endif
-
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
-#else
-# define NEED_PAE 0
-#endif
-
-#ifdef CONFIG_X86_CMPXCHG64
-# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
-#else
-# define NEED_CX8 0
-#endif
-
-#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
-# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
-#else
-# define NEED_CMOV 0
-#endif
-
-# define NEED_3DNOW 0
-
-#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
-# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
-#else
-# define NEED_NOPL 0
-#endif
-
-#ifdef CONFIG_MATOM
-# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
-#else
-# define NEED_MOVBE 0
-#endif
-
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT_XXL
-/* Paravirtualized systems may not have PSE or PGE available */
-#define NEED_PSE 0
-#define NEED_PGE 0
-#else
-#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
-#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
-#endif
-#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
-#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
-#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
-#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
-#define NEED_LM (1<<(X86_FEATURE_LM & 31))
-#else
-#define NEED_PSE 0
-#define NEED_MSR 0
-#define NEED_PGE 0
-#define NEED_FXSR 0
-#define NEED_XMM 0
-#define NEED_XMM2 0
-#define NEED_LM 0
-#endif
-
-#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
- NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
- NEED_XMM|NEED_XMM2)
-#define SSE_MASK (NEED_XMM|NEED_XMM2)
-
-#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
-
-#define REQUIRED_MASK2 0
-#define REQUIRED_MASK3 (NEED_NOPL)
-#define REQUIRED_MASK4 (NEED_MOVBE)
-#define REQUIRED_MASK5 0
-#define REQUIRED_MASK6 0
-#define REQUIRED_MASK7 0
-#define REQUIRED_MASK8 0
-#define REQUIRED_MASK9 0
-#define REQUIRED_MASK10 0
-#define REQUIRED_MASK11 0
-#define REQUIRED_MASK12 0
-#define REQUIRED_MASK13 0
-#define REQUIRED_MASK14 0
-#define REQUIRED_MASK15 0
-#define REQUIRED_MASK16 0
-#define REQUIRED_MASK17 0
-#define REQUIRED_MASK18 0
-#define REQUIRED_MASK19 0
-#define REQUIRED_MASK20 0
-#define REQUIRED_MASK21 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
-
-#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
index 8b1b6ce1e51b..011bf67a1866 100644
--- a/arch/x86/include/asm/resctrl.h
+++ b/arch/x86/include/asm/resctrl.h
@@ -4,8 +4,10 @@
#ifdef CONFIG_X86_CPU_RESCTRL
-#include <linux/sched.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/resctrl_types.h>
+#include <linux/sched.h>
/*
* This value can never be a valid CLOSID, and is used when mapping a
@@ -40,6 +42,7 @@ DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
+extern unsigned int rdt_mon_features;
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
@@ -79,6 +82,21 @@ static inline void resctrl_arch_disable_mon(void)
static_branch_dec_cpuslocked(&rdt_enable_key);
}
+static inline bool resctrl_arch_is_llc_occupancy_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID));
+}
+
+static inline bool resctrl_arch_is_mbm_total_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID));
+}
+
+static inline bool resctrl_arch_is_mbm_local_enabled(void)
+{
+ return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
+}
+
/*
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
@@ -96,8 +114,8 @@ static inline void resctrl_arch_disable_mon(void)
static inline void __resctrl_sched_in(struct task_struct *tsk)
{
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
- u32 closid = state->default_closid;
- u32 rmid = state->default_rmid;
+ u32 closid = READ_ONCE(state->default_closid);
+ u32 rmid = READ_ONCE(state->default_rmid);
u32 tmp;
/*
@@ -132,6 +150,13 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
return val * scale;
}
+static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid,
+ u32 rmid)
+{
+ WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid);
+ WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid);
+}
+
static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk,
u32 closid, u32 rmid)
{
@@ -178,6 +203,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid
static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
void *ctx) { };
+u64 resctrl_arch_get_prefetch_disable_bits(void);
+int resctrl_arch_pseudo_lock_fn(void *_plr);
+int resctrl_arch_measure_cycles_lat_fn(void *_plr);
+int resctrl_arch_measure_l2_residency(void *_plr);
+int resctrl_arch_measure_l3_residency(void *_plr);
void resctrl_cpu_detect(struct cpuinfo_x86 *c);
#else
diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
index 363266cbcada..3821ee3fae35 100644
--- a/arch/x86/include/asm/rmwcc.h
+++ b/arch/x86/include/asm/rmwcc.h
@@ -29,7 +29,7 @@ cc_label: c = true; \
#define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \
({ \
bool c; \
- asm volatile (fullop CC_SET(cc) \
+ asm_inline volatile (fullop CC_SET(cc) \
: [var] "+m" (_var), CC_OUT(cc) (c) \
: __VA_ARGS__ : clobbers); \
c; \
diff --git a/arch/x86/include/asm/runtime-const.h b/arch/x86/include/asm/runtime-const.h
index 6652ebddfd02..8d983cfd06ea 100644
--- a/arch/x86/include/asm/runtime-const.h
+++ b/arch/x86/include/asm/runtime-const.h
@@ -2,6 +2,18 @@
#ifndef _ASM_RUNTIME_CONST_H
#define _ASM_RUNTIME_CONST_H
+#ifdef __ASSEMBLY__
+
+.macro RUNTIME_CONST_PTR sym reg
+ movq $0x0123456789abcdef, %\reg
+ 1:
+ .pushsection runtime_ptr_\sym, "a"
+ .long 1b - 8 - .
+ .popsection
+.endm
+
+#else /* __ASSEMBLY__ */
+
#define runtime_const_ptr(sym) ({ \
typeof(sym) __ret; \
asm_inline("mov %1,%0\n1:\n" \
@@ -58,4 +70,5 @@ static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
}
}
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 9d6411c65920..77d8f49b92bd 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -233,7 +233,7 @@
#define VDSO_CPUNODE_BITS 12
#define VDSO_CPUNODE_MASK 0xfff
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* Helper functions to store/load CPU and node numbers */
@@ -265,7 +265,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
*node = (p >> VDSO_CPUNODE_BITS);
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef __KERNEL__
@@ -286,7 +286,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
*/
#define XEN_EARLY_IDT_HANDLER_SIZE (8 + ENDBR_INSN_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
extern void early_ignore_irq(void);
@@ -350,7 +350,7 @@ static inline void __loadsegment_fs(unsigned short value)
#define savesegment(seg, value) \
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SEGMENT_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index cc62ef70ccc0..8d9f1c9aaa4c 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -2,7 +2,6 @@
#ifndef _ASM_X86_SET_MEMORY_H
#define _ASM_X86_SET_MEMORY_H
-#include <linux/mm.h>
#include <asm/page.h>
#include <asm-generic/set_memory.h>
@@ -38,7 +37,6 @@ int set_memory_rox(unsigned long addr, int numpages);
* The caller is required to take care of these.
*/
-int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot);
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wt(unsigned long addr, int numpages);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 85f4fde3515c..ad9212df0ec0 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -27,7 +27,7 @@
#define OLD_CL_ADDRESS 0x020 /* Relative to real mode data */
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/cache.h>
#include <asm/bootparam.h>
@@ -46,6 +46,7 @@ void setup_bios_corruption_check(void);
void early_platform_quirks(void);
extern unsigned long saved_video_mode;
+extern unsigned long acpi_realmode_flags;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
@@ -141,7 +142,7 @@ extern bool builtin_cmdline_added __ro_after_init;
#define builtin_cmdline_added 0
#endif
-#else /* __ASSEMBLY */
+#else /* __ASSEMBLER__ */
.macro __RESERVE_BRK name, size
.pushsection .bss..brk, "aw"
@@ -153,6 +154,6 @@ SYM_DATA_END(__brk_\name)
#define RESERVE_BRK(name, size) __RESERVE_BRK name, size
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SETUP_H */
diff --git a/arch/x86/include/asm/setup_data.h b/arch/x86/include/asm/setup_data.h
index 77c51111a893..7bb16f843c93 100644
--- a/arch/x86/include/asm/setup_data.h
+++ b/arch/x86/include/asm/setup_data.h
@@ -4,7 +4,7 @@
#include <uapi/asm/setup_data.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct pci_setup_rom {
struct setup_data data;
@@ -27,6 +27,6 @@ struct efi_setup_data {
u64 reserved[8];
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SETUP_DATA_H */
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index dcbccdb280f9..acb85b9346d8 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -212,8 +212,16 @@ struct snp_psc_desc {
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
/*
- * Error codes related to GHCB input that can be communicated back to the guest
- * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ * GHCB-defined return codes that are communicated back to the guest via
+ * SW_EXITINFO1.
+ */
+#define GHCB_HV_RESP_NO_ACTION 0
+#define GHCB_HV_RESP_ISSUE_EXCEPTION 1
+#define GHCB_HV_RESP_MALFORMED_INPUT 2
+
+/*
+ * GHCB-defined sub-error codes for malformed input (see above) that are
+ * communicated back to the guest via SW_EXITINFO2[31:0].
*/
#define GHCB_ERR_NOT_REGISTERED 1
#define GHCB_ERR_INVALID_USAGE 2
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 1581246491b5..ba7999f66abe 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -203,6 +203,9 @@ struct snp_guest_req {
unsigned int vmpck_id;
u8 msg_version;
u8 msg_type;
+
+ struct snp_req_data input;
+ void *certs_data;
};
/*
@@ -263,9 +266,6 @@ struct snp_msg_desc {
struct snp_guest_msg secret_request, secret_response;
struct snp_secrets_page *secrets;
- struct snp_req_data input;
-
- void *certs_data;
struct aesgcm_ctx *ctx;
diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
index fcbbef484a78..a28ff6b14145 100644
--- a/arch/x86/include/asm/shared/tdx.h
+++ b/arch/x86/include/asm/shared/tdx.h
@@ -106,7 +106,7 @@
#define TDX_PS_1G 2
#define TDX_PS_NR (TDX_PS_1G + 1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler_attributes.h>
@@ -177,5 +177,5 @@ static __always_inline u64 hcall_func(u64 exit_reason)
return exit_reason;
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_SHARED_TDX_H */
diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h
index 4cb77e004615..ba6f2fe43848 100644
--- a/arch/x86/include/asm/shstk.h
+++ b/arch/x86/include/asm/shstk.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_SHSTK_H
#define _ASM_X86_SHSTK_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
struct task_struct;
@@ -37,6 +37,6 @@ static inline int shstk_update_last_frame(unsigned long val) { return 0; }
static inline bool shstk_is_enabled(void) { return false; }
#endif /* CONFIG_X86_USER_SHADOW_STACK */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SHSTK_H */
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 4a4043ca6493..c72d46175374 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_SIGNAL_H
#define _ASM_X86_SIGNAL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
/* Most things should be clean enough to redefine this at will, if care
@@ -28,9 +28,9 @@ typedef struct {
#define SA_IA32_ABI 0x02000000u
#define SA_X32_ABI 0x01000000u
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#include <uapi/asm/signal.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define __ARCH_HAS_SA_RESTORER
@@ -101,5 +101,5 @@ struct pt_regs;
#endif /* !__i386__ */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 2de1e5a75c57..daea94c2993c 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -13,7 +13,7 @@
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define ASM_CLAC \
ALTERNATIVE "", "clac", X86_FEATURE_SMAP
@@ -21,7 +21,7 @@
#define ASM_STAC \
ALTERNATIVE "", "stac", X86_FEATURE_SMAP
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
static __always_inline void clac(void)
{
@@ -61,6 +61,6 @@ static __always_inline void smap_restore(unsigned long flags)
#define ASM_STAC \
ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_SMAP_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ca073f40698f..0c1c68039d6f 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_SMP_H
#define _ASM_X86_SMP_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/cpumask.h>
+#include <linux/thread_info.h>
#include <asm/cpumask.h>
-#include <asm/current.h>
-#include <asm/thread_info.h>
+
+DECLARE_PER_CPU_CACHE_HOT(int, cpu_number);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
@@ -114,13 +115,12 @@ void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
void smp_kick_mwait_play_dead(void);
+void __noreturn mwait_play_dead(unsigned int eax_hint);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
-void smp_store_cpu_info(int id);
-
asmlinkage __visible void smp_reboot_interrupt(void);
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
__visible void smp_call_function_interrupt(struct pt_regs *regs);
@@ -133,14 +133,8 @@ __visible void smp_call_function_single_interrupt(struct pt_regs *r);
* This function is needed by all SMP systems. It must _always_ be valid
* from the initial startup.
*/
-#define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number)
-#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number)
-
-#ifdef CONFIG_X86_32
-extern int safe_smp_processor_id(void);
-#else
-# define safe_smp_processor_id() smp_processor_id()
-#endif
+#define raw_smp_processor_id() this_cpu_read(cpu_number)
+#define __smp_processor_id() __this_cpu_read(cpu_number)
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
@@ -164,6 +158,8 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
}
+
+static inline void __noreturn mwait_play_dead(unsigned int eax_hint) { BUG(); }
#endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_NMI_SELFTEST
@@ -175,7 +171,7 @@ extern void nmi_selftest(void);
extern unsigned int smpboot_control;
extern unsigned long apic_mmio_base;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/* Control bits for startup_64 */
#define STARTUP_READ_APICID 0x80000000
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 03e7c2d49559..6266d6b9e0b8 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -42,14 +42,14 @@ static __always_inline void native_write_cr2(unsigned long val)
asm volatile("mov %0,%%cr2": : "r" (val) : "memory");
}
-static inline unsigned long __native_read_cr3(void)
+static __always_inline unsigned long __native_read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val) : __FORCE_ORDER);
return val;
}
-static inline void native_write_cr3(unsigned long val)
+static __always_inline void native_write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val) : "memory");
}
@@ -176,9 +176,8 @@ static __always_inline void clflush(volatile void *__p)
static inline void clflushopt(volatile void *__p)
{
- alternative_io(".byte 0x3e; clflush %0",
- ".byte 0x66; clflush %0",
- X86_FEATURE_CLFLUSHOPT,
+ alternative_io("ds clflush %0",
+ "clflushopt %0", X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p));
}
@@ -186,14 +185,11 @@ static inline void clwb(volatile void *__p)
{
volatile struct { char x[64]; } *p = __p;
- asm volatile(ALTERNATIVE_2(
- ".byte 0x3e; clflush (%[pax])",
- ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
- X86_FEATURE_CLFLUSHOPT,
- ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
- X86_FEATURE_CLWB)
- : [p] "+m" (*p)
- : [pax] "a" (p));
+ asm_inline volatile(ALTERNATIVE_2(
+ "ds clflush %0",
+ "clflushopt %0", X86_FEATURE_CLFLUSHOPT,
+ "clwb %0", X86_FEATURE_CLWB)
+ : "+m" (*p));
}
#ifdef CONFIG_X86_USER_SHADOW_STACK
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
deleted file mode 100644
index e0975e9c4f47..000000000000
--- a/arch/x86/include/asm/sta2x11.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
- */
-#ifndef __ASM_STA2X11_H
-#define __ASM_STA2X11_H
-
-#include <linux/pci.h>
-
-/* This needs to be called from the MFD to configure its sub-devices */
-struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
-
-#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 00473a650f51..cd761b14eb02 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -2,26 +2,10 @@
/*
* GCC stack protector support.
*
- * Stack protector works by putting predefined pattern at the start of
+ * Stack protector works by putting a predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
- * returning from the function. The pattern is called stack canary
- * and unfortunately gcc historically required it to be at a fixed offset
- * from the percpu segment base. On x86_64, the offset is 40 bytes.
- *
- * The same segment is shared by percpu area and stack canary. On
- * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
- * base of percpu area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at the appropriate
- * offset. On x86_32, the stack canary is just a regular percpu
- * variable.
- *
- * Putting percpu data in %fs on 32-bit is a minor optimization compared to
- * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
- * to load 0 into %fs on exit to usermode, whereas with percpu data in
- * %gs, we are likely to load a non-null %gs on return to user mode.
- *
- * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
- * support, we can remove some of this complexity.
+ * returning from the function. The pattern is called the stack canary
+ * and is a unique value for each task.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -36,6 +20,8 @@
#include <linux/sched.h>
+DECLARE_PER_CPU_CACHE_HOT(unsigned long, __stack_chk_guard);
+
/*
* Initialize the stackprotector canary value.
*
@@ -51,25 +37,13 @@ static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary = get_random_canary();
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
-#endif
-
current->stack_canary = canary;
-#ifdef CONFIG_X86_64
- this_cpu_write(fixed_percpu_data.stack_canary, canary);
-#else
this_cpu_write(__stack_chk_guard, canary);
-#endif
}
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
{
-#ifdef CONFIG_X86_64
- per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
-#else
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
-#endif
}
#else /* STACKPROTECTOR */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 9d0b324eab21..79e9695dc13e 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -21,6 +21,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
void *__memset(void *s, int c, size_t n);
+KCFI_REFERENCE(__memset);
/*
* KMSAN needs to instrument as much code as possible. Use C versions of
@@ -70,6 +71,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t count);
void *__memmove(void *dest, const void *src, size_t count);
+KCFI_REFERENCE(__memmove);
int memcmp(const void *cs, const void *ct, size_t count);
size_t strlen(const char *s);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index e2fac21471f5..9b7fa99ae951 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -116,6 +116,7 @@ enum {
INTERCEPT_INVPCID,
INTERCEPT_MCOMMIT,
INTERCEPT_TLBSYNC,
+ INTERCEPT_IDLE_HLT = 166,
};
@@ -290,10 +291,6 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
-#define SVM_SEV_FEAT_INT_INJ_MODES \
- (SVM_SEV_FEAT_RESTRICTED_INJECTION | \
- SVM_SEV_FEAT_ALTERNATE_INJECTION)
-
struct vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 6d8d6bc183b7..cd21a0405ac5 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -31,7 +31,7 @@
*/
static inline void sync_set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
+ asm volatile("lock " __ASM_SIZE(bts) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -49,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
+ asm volatile("lock " __ASM_SIZE(btr) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -66,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
+ asm volatile("lock " __ASM_SIZE(btc) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -82,7 +82,7 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
*/
static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
- return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
+ return GEN_BINARY_RMWcc("lock " __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
/**
@@ -95,7 +95,7 @@ static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
+ return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/**
@@ -108,7 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
- return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
+ return GEN_BINARY_RMWcc("lock " __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
#define sync_test_bit(nr, addr) test_bit(nr, addr)
diff --git a/arch/x86/include/asm/tdx.h b/arch/x86/include/asm/tdx.h
index b4b16dafd55e..65394aa9b49f 100644
--- a/arch/x86/include/asm/tdx.h
+++ b/arch/x86/include/asm/tdx.h
@@ -30,7 +30,7 @@
#define TDX_SUCCESS 0ULL
#define TDX_RND_NO_ENTROPY 0x8000020300000000ULL
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <uapi/asm/mce.h>
@@ -126,5 +126,5 @@ static inline int tdx_enable(void) { return -ENODEV; }
static inline const char *tdx_dump_mce_info(struct mce *m) { return NULL; }
#endif /* CONFIG_INTEL_TDX_HOST */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_TDX_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a55c214f3ba6..9282465eea21 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -54,7 +54,7 @@
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
#include <asm/cpufeature.h>
#include <linux/atomic.h>
@@ -73,7 +73,7 @@ struct thread_info {
.flags = 0, \
}
-#else /* !__ASSEMBLY__ */
+#else /* !__ASSEMBLER__ */
#include <asm/asm-offsets.h>
@@ -161,7 +161,7 @@ struct thread_info {
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Walks up the stack frames to make sure that the specified object is
@@ -213,7 +213,7 @@ static inline int arch_within_stack_frames(const void * const stack,
#endif
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* Thread-synchronous status.
@@ -224,7 +224,7 @@ static inline int arch_within_stack_frames(const void * const stack,
*/
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_COMPAT
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
@@ -242,6 +242,6 @@ static inline int arch_within_stack_frames(const void * const stack,
extern void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_THREAD_INFO_H */
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 77f52bc1578a..866ea78ba156 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,6 +6,9 @@
static inline void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
+#include <linux/kernel.h>
+#include <vdso/bits.h>
+#include <vdso/page.h>
static inline void tlb_flush(struct mmu_gather *tlb)
{
@@ -25,4 +28,139 @@ static inline void invlpg(unsigned long addr)
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
}
+enum addr_stride {
+ PTE_STRIDE = 0,
+ PMD_STRIDE = 1
+};
+
+/*
+ * INVLPGB can be targeted by virtual address, PCID, ASID, or any combination
+ * of the three. For example:
+ * - FLAG_VA | FLAG_INCLUDE_GLOBAL: invalidate all TLB entries at the address
+ * - FLAG_PCID: invalidate all TLB entries matching the PCID
+ *
+ * The first is used to invalidate (kernel) mappings at a particular
+ * address across all processes.
+ *
+ * The latter invalidates all TLB entries matching a PCID.
+ */
+#define INVLPGB_FLAG_VA BIT(0)
+#define INVLPGB_FLAG_PCID BIT(1)
+#define INVLPGB_FLAG_ASID BIT(2)
+#define INVLPGB_FLAG_INCLUDE_GLOBAL BIT(3)
+#define INVLPGB_FLAG_FINAL_ONLY BIT(4)
+#define INVLPGB_FLAG_INCLUDE_NESTED BIT(5)
+
+/* The implied mode when all bits are clear: */
+#define INVLPGB_MODE_ALL_NONGLOBALS 0UL
+
+#ifdef CONFIG_BROADCAST_TLB_FLUSH
+/*
+ * INVLPGB does broadcast TLB invalidation across all the CPUs in the system.
+ *
+ * The INVLPGB instruction is weakly ordered, and a batch of invalidations can
+ * be done in a parallel fashion.
+ *
+ * The instruction takes the number of extra pages to invalidate, beyond the
+ * first page, while __invlpgb gets the more human readable number of pages to
+ * invalidate.
+ *
+ * The bits in rax[0:2] determine respectively which components of the address
+ * (VA, PCID, ASID) get compared when flushing. If neither bits are set, *any*
+ * address in the specified range matches.
+ *
+ * Since it is desired to only flush TLB entries for the ASID that is executing
+ * the instruction (a host/hypervisor or a guest), the ASID valid bit should
+ * always be set. On a host/hypervisor, the hardware will use the ASID value
+ * specified in EDX[15:0] (which should be 0). On a guest, the hardware will
+ * use the actual ASID value of the guest.
+ *
+ * TLBSYNC is used to ensure that pending INVLPGB invalidations initiated from
+ * this CPU have completed.
+ */
+static inline void __invlpgb(unsigned long asid, unsigned long pcid,
+ unsigned long addr, u16 nr_pages,
+ enum addr_stride stride, u8 flags)
+{
+ u64 rax = addr | flags | INVLPGB_FLAG_ASID;
+ u32 ecx = (stride << 31) | (nr_pages - 1);
+ u32 edx = (pcid << 16) | asid;
+
+ /* The low bits in rax are for flags. Verify addr is clean. */
+ VM_WARN_ON_ONCE(addr & ~PAGE_MASK);
+
+ /* INVLPGB; supported in binutils >= 2.36. */
+ asm volatile(".byte 0x0f, 0x01, 0xfe" :: "a" (rax), "c" (ecx), "d" (edx));
+}
+
+static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags)
+{
+ __invlpgb(asid, pcid, 0, 1, 0, flags);
+}
+
+static inline void __tlbsync(void)
+{
+ /*
+ * TLBSYNC waits for INVLPGB instructions originating on the same CPU
+ * to have completed. Print a warning if the task has been migrated,
+ * and might not be waiting on all the INVLPGBs issued during this TLB
+ * invalidation sequence.
+ */
+ cant_migrate();
+
+ /* TLBSYNC: supported in binutils >= 0.36. */
+ asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory");
+}
+#else
+/* Some compilers (I'm looking at you clang!) simply can't do DCE */
+static inline void __invlpgb(unsigned long asid, unsigned long pcid,
+ unsigned long addr, u16 nr_pages,
+ enum addr_stride s, u8 flags) { }
+static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags) { }
+static inline void __tlbsync(void) { }
+#endif
+
+static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
+ unsigned long addr,
+ u16 nr, bool stride)
+{
+ enum addr_stride str = stride ? PMD_STRIDE : PTE_STRIDE;
+ u8 flags = INVLPGB_FLAG_PCID | INVLPGB_FLAG_VA;
+
+ __invlpgb(0, pcid, addr, nr, str, flags);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
+{
+ __invlpgb_all(0, pcid, INVLPGB_FLAG_PCID);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invlpgb_flush_all(void)
+{
+ /*
+ * TLBSYNC at the end needs to make sure all flushes done on the
+ * current CPU have been executed system-wide. Therefore, make
+ * sure nothing gets migrated in-between but disable preemption
+ * as it is cheaper.
+ */
+ guard(preempt)();
+ __invlpgb_all(0, 0, INVLPGB_FLAG_INCLUDE_GLOBAL);
+ __tlbsync();
+}
+
+/* Flush addr, including globals, for all PCIDs. */
+static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
+{
+ __invlpgb(0, 0, addr, nr, PTE_STRIDE, INVLPGB_FLAG_INCLUDE_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invlpgb_flush_all_nonglobals(void)
+{
+ guard(preempt)();
+ __invlpgb_all(0, 0, INVLPGB_MODE_ALL_NONGLOBALS);
+ __tlbsync();
+}
#endif /* _ASM_X86_TLB_H */
diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h
index 1ad56eb3e8a8..80aaf64ff25f 100644
--- a/arch/x86/include/asm/tlbbatch.h
+++ b/arch/x86/include/asm/tlbbatch.h
@@ -10,6 +10,11 @@ struct arch_tlbflush_unmap_batch {
* the PFNs being flushed..
*/
struct cpumask cpumask;
+ /*
+ * Set if pages were unmapped from any MM, even one that does not
+ * have active CPUs in its cpumask.
+ */
+ bool unmapped_pages;
};
#endif /* _ARCH_X86_TLBBATCH_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 02fc2aa06e9e..a9af8759de34 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -6,6 +6,7 @@
#include <linux/mmu_notifier.h>
#include <linux/sched.h>
+#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
@@ -183,6 +184,9 @@ static inline void cr4_init_shadow(void)
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
+/* How many pages can be invalidated with one INVLPGB. */
+extern u16 invlpgb_count_max;
+
extern void initialize_tlbstate_and_flush(void);
/*
@@ -231,6 +235,71 @@ void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
+static inline bool is_dyn_asid(u16 asid)
+{
+ return asid < TLB_NR_DYN_ASIDS;
+}
+
+static inline bool is_global_asid(u16 asid)
+{
+ return !is_dyn_asid(asid);
+}
+
+#ifdef CONFIG_BROADCAST_TLB_FLUSH
+static inline u16 mm_global_asid(struct mm_struct *mm)
+{
+ u16 asid;
+
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return 0;
+
+ asid = smp_load_acquire(&mm->context.global_asid);
+
+ /* mm->context.global_asid is either 0, or a global ASID */
+ VM_WARN_ON_ONCE(asid && is_dyn_asid(asid));
+
+ return asid;
+}
+
+static inline void mm_init_global_asid(struct mm_struct *mm)
+{
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ mm->context.global_asid = 0;
+ mm->context.asid_transition = false;
+ }
+}
+
+static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
+{
+ /*
+ * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
+ * finish_asid_transition() needs to observe asid_transition = true
+ * once it observes global_asid.
+ */
+ mm->context.asid_transition = true;
+ smp_store_release(&mm->context.global_asid, asid);
+}
+
+static inline void mm_clear_asid_transition(struct mm_struct *mm)
+{
+ WRITE_ONCE(mm->context.asid_transition, false);
+}
+
+static inline bool mm_in_asid_transition(struct mm_struct *mm)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return false;
+
+ return mm && READ_ONCE(mm->context.asid_transition);
+}
+#else
+static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
+static inline void mm_init_global_asid(struct mm_struct *mm) { }
+static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
+static inline void mm_clear_asid_transition(struct mm_struct *mm) { }
+static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
+#endif /* CONFIG_BROADCAST_TLB_FLUSH */
+
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
@@ -242,7 +311,7 @@ void flush_tlb_multi(const struct cpumask *cpumask,
flush_tlb_mm_range((vma)->vm_mm, start, end, \
((vma)->vm_flags & VM_HUGETLB) \
? huge_page_shift(hstate_vma(vma)) \
- : PAGE_SHIFT, false)
+ : PAGE_SHIFT, true)
extern void flush_tlb_all(void);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -284,6 +353,7 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
{
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+ batch->unmapped_pages = true;
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index ec134b719144..6c79ee7c0957 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -229,11 +229,11 @@ static inline bool topology_is_primary_thread(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_primary_thread_mask);
}
+#define topology_is_primary_thread topology_is_primary_thread
#else /* CONFIG_SMP */
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
static inline int topology_max_smt_threads(void) { return 1; }
-static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
static inline unsigned int topology_amd_nodes_per_pkg(void) { return 1; }
#endif /* !CONFIG_SMP */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 1f1deaecd364..869b88061801 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -35,8 +35,6 @@ static inline int get_si_code(unsigned long condition)
return TRAP_BRKPT;
}
-extern int panic_on_unrecovered_nmi;
-
void math_emulate(struct math_emu_info *);
bool fault_in_kernel_space(unsigned long address);
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 85cc57cb6539..8f4579c5a6f8 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -5,7 +5,7 @@
#include "orc_types.h"
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.macro UNWIND_HINT_END_OF_STACK
UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK
@@ -88,6 +88,6 @@
#define UNWIND_HINT_RESTORE \
UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_UNWIND_HINTS_H */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index d7f6592b74a9..80be0da733df 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -18,12 +18,6 @@ struct vdso_image {
unsigned long extable_base, extable_len;
const void *extable;
- long sym_vvar_start; /* Negative offset to the vvar area */
-
- long sym_vvar_page;
- long sym_pvclock_page;
- long sym_hvclock_page;
- long sym_timens_page;
long sym_VDSO32_NOTE_MASK;
long sym___kernel_sigreturn;
long sym___kernel_rt_sigreturn;
diff --git a/arch/x86/include/asm/vdso/getrandom.h b/arch/x86/include/asm/vdso/getrandom.h
index 2bf9c0e970c3..ff1c11b9fa27 100644
--- a/arch/x86/include/asm/vdso/getrandom.h
+++ b/arch/x86/include/asm/vdso/getrandom.h
@@ -5,7 +5,7 @@
#ifndef __ASM_VDSO_GETRANDOM_H
#define __ASM_VDSO_GETRANDOM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/unistd.h>
@@ -27,16 +27,6 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig
return ret;
}
-extern struct vdso_rng_data vdso_rng_data
- __attribute__((visibility("hidden")));
-
-static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
-{
- if (IS_ENABLED(CONFIG_TIME_NS) && __arch_get_vdso_data()->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return (void *)&vdso_rng_data + ((void *)&timens_page - (void *)__arch_get_vdso_data());
- return &vdso_rng_data;
-}
-
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 375a34b0f365..73b2e7ee8f0f 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -10,7 +10,7 @@
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <uapi/linux/time.h>
#include <asm/vgtod.h>
@@ -19,12 +19,6 @@
#include <asm/pvclock.h>
#include <clocksource/hyperv_timer.h>
-extern struct vdso_data vvar_page
- __attribute__((visibility("hidden")));
-
-extern struct vdso_data timens_page
- __attribute__((visibility("hidden")));
-
#define VDSO_HAS_TIME 1
#define VDSO_HAS_CLOCK_GETRES 1
@@ -59,14 +53,6 @@ extern struct ms_hyperv_tsc_page hvclock_page
__attribute__((visibility("hidden")));
#endif
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return &timens_page;
-}
-#endif
-
#ifndef BUILD_VDSO32
static __always_inline
@@ -250,7 +236,7 @@ static u64 vread_hvclock(void)
#endif
static inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
if (likely(clock_mode == VDSO_CLOCKMODE_TSC))
return (u64)rdtsc_ordered() & S64_MAX;
@@ -275,12 +261,7 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode,
return U64_MAX;
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
-{
- return &vvar_page;
-}
-
-static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool arch_vdso_clocksource_ok(const struct vdso_clock *vc)
{
return true;
}
@@ -319,37 +300,37 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
* declares everything with the MSB/Sign-bit set as invalid. Therefore the
* effective mask is S64_MAX.
*/
-static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+static __always_inline u64 vdso_calc_ns(const struct vdso_clock *vc, u64 cycles, u64 base)
{
- u64 delta = cycles - vd->cycle_last;
+ u64 delta = cycles - vc->cycle_last;
/*
* Negative motion and deltas which can cause multiplication
* overflow require special treatment. This check covers both as
- * negative motion is guaranteed to be greater than @vd::max_cycles
+ * negative motion is guaranteed to be greater than @vc::max_cycles
* due to unsigned comparison.
*
* Due to the MSB/Sign-bit being used as invalid marker (see
* arch_vdso_cycles_ok() above), the effective mask is S64_MAX, but that
* case is also unlikely and will also take the unlikely path here.
*/
- if (unlikely(delta > vd->max_cycles)) {
+ if (unlikely(delta > vc->max_cycles)) {
/*
* Due to the above mentioned TSC wobbles, filter out
* negative motion. Per the above masking, the effective
* sign bit is now bit 62.
*/
if (delta & (1ULL << 62))
- return base >> vd->shift;
+ return base >> vc->shift;
/* Handle multiplication overflow gracefully */
- return mul_u64_u32_add_u64_shr(delta & S64_MAX, vd->mult, base, vd->shift);
+ return mul_u64_u32_add_u64_shr(delta & S64_MAX, vc->mult, base, vc->shift);
}
- return ((delta * vd->mult) + base) >> vd->shift;
+ return ((delta * vc->mult) + base) >> vc->shift;
}
#define vdso_calc_ns vdso_calc_ns
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
index 2cbce97d29ea..c9b2ba7a9ec4 100644
--- a/arch/x86/include/asm/vdso/processor.h
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -5,7 +5,7 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static __always_inline void rep_nop(void)
@@ -22,6 +22,6 @@ struct getcpu_cache;
notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h
index 37b4a70559a8..4aa311a923f2 100644
--- a/arch/x86/include/asm/vdso/vsyscall.h
+++ b/arch/x86/include/asm/vdso/vsyscall.h
@@ -2,40 +2,21 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#define __VDSO_RND_DATA_OFFSET 640
-#define __VVAR_PAGES 4
+#define __VDSO_PAGES 6
#define VDSO_NR_VCLOCK_PAGES 2
+#define VDSO_VCLOCK_PAGES_START(_b) ((_b) + (__VDSO_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE)
#define VDSO_PAGE_PVCLOCK_OFFSET 0
#define VDSO_PAGE_HVCLOCK_OFFSET 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <vdso/datapage.h>
#include <asm/vgtod.h>
-extern struct vdso_data *vdso_data;
-
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
-static __always_inline
-struct vdso_data *__x86_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __x86_get_k_vdso_data
-
-static __always_inline
-struct vdso_rng_data *__x86_get_k_vdso_rng_data(void)
-{
- return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
-}
-#define __arch_get_k_vdso_rng_data __x86_get_k_vdso_rng_data
-
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
index 75884d2cdec3..5d471253c755 100644
--- a/arch/x86/include/asm/vermagic.h
+++ b/arch/x86/include/asm/vermagic.h
@@ -15,8 +15,6 @@
#define MODULE_PROC_FAMILY "586TSC "
#elif defined CONFIG_M586MMX
#define MODULE_PROC_FAMILY "586MMX "
-#elif defined CONFIG_MCORE2
-#define MODULE_PROC_FAMILY "CORE2 "
#elif defined CONFIG_MATOM
#define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686
@@ -33,8 +31,6 @@
#define MODULE_PROC_FAMILY "K6 "
#elif defined CONFIG_MK7
#define MODULE_PROC_FAMILY "K7 "
-#elif defined CONFIG_MK8
-#define MODULE_PROC_FAMILY "K8 "
#elif defined CONFIG_MELAN
#define MODULE_PROC_FAMILY "ELAN "
#elif defined CONFIG_MCRUSOE
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index f7fd4369b821..8707361b24da 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -580,18 +580,22 @@ enum vm_entry_failure_code {
/*
* Exit Qualifications for EPT Violations
*/
-#define EPT_VIOLATION_ACC_READ_BIT 0
-#define EPT_VIOLATION_ACC_WRITE_BIT 1
-#define EPT_VIOLATION_ACC_INSTR_BIT 2
-#define EPT_VIOLATION_RWX_SHIFT 3
-#define EPT_VIOLATION_GVA_IS_VALID_BIT 7
-#define EPT_VIOLATION_GVA_TRANSLATED_BIT 8
-#define EPT_VIOLATION_ACC_READ (1 << EPT_VIOLATION_ACC_READ_BIT)
-#define EPT_VIOLATION_ACC_WRITE (1 << EPT_VIOLATION_ACC_WRITE_BIT)
-#define EPT_VIOLATION_ACC_INSTR (1 << EPT_VIOLATION_ACC_INSTR_BIT)
-#define EPT_VIOLATION_RWX_MASK (VMX_EPT_RWX_MASK << EPT_VIOLATION_RWX_SHIFT)
-#define EPT_VIOLATION_GVA_IS_VALID (1 << EPT_VIOLATION_GVA_IS_VALID_BIT)
-#define EPT_VIOLATION_GVA_TRANSLATED (1 << EPT_VIOLATION_GVA_TRANSLATED_BIT)
+#define EPT_VIOLATION_ACC_READ BIT(0)
+#define EPT_VIOLATION_ACC_WRITE BIT(1)
+#define EPT_VIOLATION_ACC_INSTR BIT(2)
+#define EPT_VIOLATION_PROT_READ BIT(3)
+#define EPT_VIOLATION_PROT_WRITE BIT(4)
+#define EPT_VIOLATION_PROT_EXEC BIT(5)
+#define EPT_VIOLATION_PROT_MASK (EPT_VIOLATION_PROT_READ | \
+ EPT_VIOLATION_PROT_WRITE | \
+ EPT_VIOLATION_PROT_EXEC)
+#define EPT_VIOLATION_GVA_IS_VALID BIT(7)
+#define EPT_VIOLATION_GVA_TRANSLATED BIT(8)
+
+#define EPT_VIOLATION_RWX_TO_PROT(__epte) (((__epte) & VMX_EPT_RWX_MASK) << 3)
+
+static_assert(EPT_VIOLATION_RWX_TO_PROT(VMX_EPT_RWX_MASK) ==
+ (EPT_VIOLATION_PROT_READ | EPT_VIOLATION_PROT_WRITE | EPT_VIOLATION_PROT_EXEC));
/*
* Exit Qualifications for NOTIFY VM EXIT
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index baca0b00ef76..a078a2b0f032 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -72,7 +72,7 @@
#endif
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/* Explicitly size integers that represent pfns in the public interface
* with Xen so that on ARM we can have one ABI that works for 32 and 64
* bit guests. */
@@ -137,7 +137,7 @@ DEFINE_GUEST_HANDLE(xen_ulong_t);
#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct trap_info {
uint8_t vector; /* exception vector */
uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
@@ -186,7 +186,7 @@ struct arch_shared_info {
uint32_t wc_sec_hi;
#endif
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#ifdef CONFIG_X86_32
#include <asm/xen/interface_32.h>
@@ -196,7 +196,7 @@ struct arch_shared_info {
#include <asm/pvclock-abi.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* The following is all CPU context. Note that the fpu_ctxt block is filled
* in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
@@ -376,7 +376,7 @@ struct xen_pmu_arch {
} c;
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
* Prefix forces emulation of some non-trapping instructions.
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h
index dc40578abded..74d9768a9cf7 100644
--- a/arch/x86/include/asm/xen/interface_32.h
+++ b/arch/x86/include/asm/xen/interface_32.h
@@ -44,7 +44,7 @@
*/
#define __HYPERVISOR_VIRT_START 0xF5800000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct cpu_user_regs {
uint32_t ebx;
@@ -85,7 +85,7 @@ typedef struct xen_callback xen_callback_t;
#define XEN_CALLBACK(__cs, __eip) \
((struct xen_callback){ .cs = (__cs), .eip = (unsigned long)(__eip) })
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
/*
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index c10f279aae93..38a19edb81a3 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -77,7 +77,7 @@
#define VGCF_in_syscall (1<<_VGCF_in_syscall)
#define VGCF_IN_SYSCALL VGCF_in_syscall
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct iret_context {
/* Top of stack (%rsp at point of hypercall). */
@@ -143,7 +143,7 @@ typedef unsigned long xen_callback_t;
#define XEN_CALLBACK(__cs, __rip) \
((unsigned long)(__rip))
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_XEN_INTERFACE_64_H */
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 9b82eebd7add..dafbf581c515 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -26,7 +26,7 @@
#define XLF_5LEVEL_ENABLED (1<<6)
#define XLF_MEM_ENCRYPTION (1<<7)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/screen_info.h>
@@ -210,6 +210,6 @@ enum x86_hardware_subarch {
X86_NR_SUBARCHS,
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_BOOTPARAM_H */
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
index 2f491efe3a12..55bc66867156 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
@@ -54,7 +54,7 @@
*/
#define E820_RESERVED_KERN 128
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
struct e820entry {
__u64 addr; /* start of memory segment */
@@ -76,7 +76,7 @@ struct e820map {
#define BIOS_ROM_BASE 0xffe00000
#define BIOS_ROM_END 0xffffffff
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_E820_H */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 9e75da97bce0..460306b35a4b 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -559,6 +559,9 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
+#define KVM_XEN_MSR_MIN_INDEX 0x40000000u
+#define KVM_XEN_MSR_MAX_INDEX 0x4fffffffu
+
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
diff --git a/arch/x86/include/uapi/asm/ldt.h b/arch/x86/include/uapi/asm/ldt.h
index d62ac5db093b..a82c039d8e6a 100644
--- a/arch/x86/include/uapi/asm/ldt.h
+++ b/arch/x86/include/uapi/asm/ldt.h
@@ -12,7 +12,7 @@
/* The size of each LDT entry. */
#define LDT_ENTRY_SIZE 8
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* Note on 64bit base and limit is ignored and you cannot set DS/ES/CS
* not to the default values if you still want to do syscalls. This
@@ -44,5 +44,5 @@ struct user_desc {
#define MODIFY_LDT_CONTENTS_STACK 1
#define MODIFY_LDT_CONTENTS_CODE 2
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_X86_LDT_H */
diff --git a/arch/x86/include/uapi/asm/msr.h b/arch/x86/include/uapi/asm/msr.h
index e7516b402a00..4b8917ca28fe 100644
--- a/arch/x86/include/uapi/asm/msr.h
+++ b/arch/x86/include/uapi/asm/msr.h
@@ -2,7 +2,7 @@
#ifndef _UAPI_ASM_X86_MSR_H
#define _UAPI_ASM_X86_MSR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/ioctl.h>
@@ -10,5 +10,5 @@
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_MSR_H */
diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
index 16074b9c93bb..5823584dea13 100644
--- a/arch/x86/include/uapi/asm/ptrace-abi.h
+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
@@ -25,7 +25,7 @@
#else /* __i386__ */
-#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS)
+#if defined(__ASSEMBLER__) || defined(__FRAME_OFFSETS)
/*
* C ABI says these regs are callee-preserved. They aren't saved on kernel entry
* unless syscall needs a complete, fully filled "struct pt_regs".
@@ -57,7 +57,7 @@
#define EFLAGS 144
#define RSP 152
#define SS 160
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/* top of stack page */
#define FRAME_SIZE 168
@@ -87,7 +87,7 @@
#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#endif
diff --git a/arch/x86/include/uapi/asm/ptrace.h b/arch/x86/include/uapi/asm/ptrace.h
index 85165c0edafc..e0b5b4f6226b 100644
--- a/arch/x86/include/uapi/asm/ptrace.h
+++ b/arch/x86/include/uapi/asm/ptrace.h
@@ -7,7 +7,7 @@
#include <asm/processor-flags.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef __i386__
/* this struct defines the way the registers are stored on the
@@ -81,6 +81,6 @@ struct pt_regs {
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_PTRACE_H */
diff --git a/arch/x86/include/uapi/asm/setup_data.h b/arch/x86/include/uapi/asm/setup_data.h
index b111b0c18544..50c45ead4e7c 100644
--- a/arch/x86/include/uapi/asm/setup_data.h
+++ b/arch/x86/include/uapi/asm/setup_data.h
@@ -18,7 +18,7 @@
#define SETUP_INDIRECT (1<<31)
#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -78,6 +78,6 @@ struct ima_setup_data {
__u64 size;
} __attribute__((packed));
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_SETUP_DATA_H */
diff --git a/arch/x86/include/uapi/asm/signal.h b/arch/x86/include/uapi/asm/signal.h
index f777346450ec..1067efabf18b 100644
--- a/arch/x86/include/uapi/asm/signal.h
+++ b/arch/x86/include/uapi/asm/signal.h
@@ -2,7 +2,7 @@
#ifndef _UAPI_ASM_X86_SIGNAL_H
#define _UAPI_ASM_X86_SIGNAL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <linux/compiler.h>
@@ -16,7 +16,7 @@ struct siginfo;
typedef unsigned long sigset_t;
#endif /* __KERNEL__ */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define SIGHUP 1
@@ -68,7 +68,7 @@ typedef unsigned long sigset_t;
#include <asm-generic/signal-defs.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
# ifndef __KERNEL__
@@ -106,6 +106,6 @@ typedef struct sigaltstack {
__kernel_size_t ss_size;
} stack_t;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI_ASM_X86_SIGNAL_H */
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 1814b413fd57..ec1321248dac 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -95,6 +95,7 @@
#define SVM_EXIT_CR14_WRITE_TRAP 0x09e
#define SVM_EXIT_CR15_WRITE_TRAP 0x09f
#define SVM_EXIT_INVPCID 0x0a2
+#define SVM_EXIT_IDLE_HLT 0x0a6
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
@@ -224,6 +225,7 @@
{ SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \
{ SVM_EXIT_CR8_WRITE_TRAP, "write_cr8_trap" }, \
{ SVM_EXIT_INVPCID, "invpcid" }, \
+ { SVM_EXIT_IDLE_HLT, "idle-halt" }, \
{ SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index b43eb7e384eb..84cfa179802c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -44,6 +44,8 @@ KCOV_INSTRUMENT_unwind_orc.o := n
KCOV_INSTRUMENT_unwind_frame.o := n
KCOV_INSTRUMENT_unwind_guess.o := n
+CFLAGS_head32.o := -fno-stack-protector
+CFLAGS_head64.o := -fno-stack-protector
CFLAGS_irq.o := -I $(src)/../include/asm/trace
obj-y += head_$(BITS).o
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index d745dd586303..77bfb846490c 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -4,6 +4,8 @@
* Copyright (c) 2016, Intel Corporation.
*/
+#include <linux/bitfield.h>
+
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -149,7 +151,7 @@ int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
if (ret)
goto out;
- val = AMD_CPPC_HIGHEST_PERF(val);
+ val = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, val);
} else {
ret = cppc_get_highest_perf(cpu, &val);
if (ret)
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 5854f0b8f0f1..d5ac34186555 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -13,9 +13,11 @@
#include <linux/sched.h>
#include <acpi/processor.h>
+#include <asm/cpu_device_id.h>
#include <asm/cpuid.h>
#include <asm/mwait.h>
#include <asm/special_insns.h>
+#include <asm/smp.h>
/*
* Initialize bm_flags based on the CPU cache properties
@@ -47,12 +49,11 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
/*
* On all recent Intel platforms, ARB_DISABLE is a nop.
* So, set bm_control to zero to indicate that ARB_DISABLE
- * is not required while entering C3 type state on
- * P4, Core and beyond CPUs
+ * is not required while entering C3 type state.
*/
if (c->x86_vendor == X86_VENDOR_INTEL &&
- (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
- flags->bm_control = 0;
+ (c->x86 > 15 || (c->x86_vfm >= INTEL_CORE2_MEROM && c->x86_vfm <= INTEL_FAM6_LAST)))
+ flags->bm_control = 0;
if (c->x86_vendor == X86_VENDOR_CENTAUR) {
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
@@ -205,6 +206,16 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
+void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cstate_entry *percpu_entry;
+
+ percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+ mwait_play_dead(percpu_entry->states[cx->index].eax);
+}
+EXPORT_SYMBOL_GPL(acpi_processor_ffh_play_dead);
+
void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/acpi/madt_playdead.S b/arch/x86/kernel/acpi/madt_playdead.S
index 4e498d28cdc8..aefb9cb583ad 100644
--- a/arch/x86/kernel/acpi/madt_playdead.S
+++ b/arch/x86/kernel/acpi/madt_playdead.S
@@ -14,6 +14,7 @@
* rsi: PGD of the identity mapping
*/
SYM_FUNC_START(asm_acpi_mp_play_dead)
+ ANNOTATE_NOENDBR
/* Turn off global entries. Following CR3 write will flush them. */
movq %cr4, %rdx
andq $~(X86_CR4_PGE), %rdx
diff --git a/arch/x86/kernel/acpi/madt_wakeup.c b/arch/x86/kernel/acpi/madt_wakeup.c
index d5ef6215583b..f36f28405dcc 100644
--- a/arch/x86/kernel/acpi/madt_wakeup.c
+++ b/arch/x86/kernel/acpi/madt_wakeup.c
@@ -70,58 +70,6 @@ static void __init free_pgt_page(void *pgt, void *dummy)
return memblock_free(pgt, PAGE_SIZE);
}
-/*
- * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
- * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches
- * to the identity mapping and the function has be present at the same spot in
- * the virtual address space before and after switching page tables.
- */
-static int __init init_transition_pgtable(pgd_t *pgd)
-{
- pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
- unsigned long vaddr, paddr;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- vaddr = (unsigned long)asm_acpi_mp_play_dead;
- pgd += pgd_index(vaddr);
- if (!pgd_present(*pgd)) {
- p4d = (p4d_t *)alloc_pgt_page(NULL);
- if (!p4d)
- return -ENOMEM;
- set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
- }
- p4d = p4d_offset(pgd, vaddr);
- if (!p4d_present(*p4d)) {
- pud = (pud_t *)alloc_pgt_page(NULL);
- if (!pud)
- return -ENOMEM;
- set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
- }
- pud = pud_offset(p4d, vaddr);
- if (!pud_present(*pud)) {
- pmd = (pmd_t *)alloc_pgt_page(NULL);
- if (!pmd)
- return -ENOMEM;
- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
- }
- pmd = pmd_offset(pud, vaddr);
- if (!pmd_present(*pmd)) {
- pte = (pte_t *)alloc_pgt_page(NULL);
- if (!pte)
- return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
- }
- pte = pte_offset_kernel(pmd, vaddr);
-
- paddr = __pa(vaddr);
- set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
-
- return 0;
-}
-
static int __init acpi_mp_setup_reset(u64 reset_vector)
{
struct x86_mapping_info info = {
@@ -130,6 +78,7 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
.page_flag = __PAGE_KERNEL_LARGE_EXEC,
.kernpg_flag = _KERNPG_TABLE_NOENC,
};
+ unsigned long mstart, mend;
pgd_t *pgd;
pgd = alloc_pgt_page(NULL);
@@ -137,8 +86,6 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
return -ENOMEM;
for (int i = 0; i < nr_pfn_mapped; i++) {
- unsigned long mstart, mend;
-
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
@@ -147,14 +94,24 @@ static int __init acpi_mp_setup_reset(u64 reset_vector)
}
}
- if (kernel_ident_mapping_init(&info, pgd,
- PAGE_ALIGN_DOWN(reset_vector),
- PAGE_ALIGN(reset_vector + 1))) {
+ mstart = PAGE_ALIGN_DOWN(reset_vector);
+ mend = mstart + PAGE_SIZE;
+ if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
kernel_ident_mapping_free(&info, pgd);
return -ENOMEM;
}
- if (init_transition_pgtable(pgd)) {
+ /*
+ * Make sure asm_acpi_mp_play_dead() is present in the identity mapping
+ * at the same place as in the kernel page tables.
+ * asm_acpi_mp_play_dead() switches to the identity mapping and the
+ * function must be present at the same spot in the virtual address space
+ * before and after switching page tables.
+ */
+ info.offset = __START_KERNEL_map - phys_base;
+ mstart = PAGE_ALIGN_DOWN(__pa(asm_acpi_mp_play_dead));
+ mend = mstart + PAGE_SIZE;
+ if (kernel_ident_mapping_init(&info, pgd, mstart, mend)) {
kernel_ident_mapping_free(&info, pgd);
return -ENOMEM;
}
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index b200a193beeb..04f561f75e99 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,6 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
+ ANNOTATE_NOENDBR
movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c71b575bf229..bf82c6f7d690 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -392,10 +392,8 @@ EXPORT_SYMBOL(BUG_func);
* Rewrite the "call BUG_func" replacement to point to the target of the
* indirect pv_ops call "call *disp(%ip)".
*/
-static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a,
- struct module *mod)
+static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a)
{
- u8 *wr_instr = module_writable_address(mod, instr);
void *target, *bug = &BUG_func;
s32 disp;
@@ -405,14 +403,14 @@ static int alt_replace_call(u8 *instr, u8 *insn_buff, struct alt_instr *a,
}
if (a->instrlen != 6 ||
- wr_instr[0] != CALL_RIP_REL_OPCODE ||
- wr_instr[1] != CALL_RIP_REL_MODRM) {
+ instr[0] != CALL_RIP_REL_OPCODE ||
+ instr[1] != CALL_RIP_REL_MODRM) {
pr_err("ALT_FLAG_DIRECT_CALL set for unrecognized indirect call\n");
BUG();
}
/* Skip CALL_RIP_REL_OPCODE and CALL_RIP_REL_MODRM */
- disp = *(s32 *)(wr_instr + 2);
+ disp = *(s32 *)(instr + 2);
#ifdef CONFIG_X86_64
/* ff 15 00 00 00 00 call *0x0(%rip) */
/* target address is stored at "next instruction + disp". */
@@ -450,8 +448,7 @@ static inline u8 * instr_va(struct alt_instr *i)
* to refetch changed I$ lines.
*/
void __init_or_module noinline apply_alternatives(struct alt_instr *start,
- struct alt_instr *end,
- struct module *mod)
+ struct alt_instr *end)
{
u8 insn_buff[MAX_PATCH_LEN];
u8 *instr, *replacement;
@@ -480,7 +477,6 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
*/
for (a = start; a < end; a++) {
int insn_buff_sz = 0;
- u8 *wr_instr, *wr_replacement;
/*
* In case of nested ALTERNATIVE()s the outer alternative might
@@ -494,11 +490,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
}
instr = instr_va(a);
- wr_instr = module_writable_address(mod, instr);
-
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- wr_replacement = module_writable_address(mod, replacement);
-
BUG_ON(a->instrlen > sizeof(insn_buff));
BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
@@ -509,9 +501,9 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
- memcpy(insn_buff, wr_instr, a->instrlen);
+ memcpy(insn_buff, instr, a->instrlen);
optimize_nops(instr, insn_buff, a->instrlen);
- text_poke_early(wr_instr, insn_buff, a->instrlen);
+ text_poke_early(instr, insn_buff, a->instrlen);
continue;
}
@@ -521,12 +513,11 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
instr, instr, a->instrlen,
replacement, a->replacementlen, a->flags);
- memcpy(insn_buff, wr_replacement, a->replacementlen);
+ memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
if (a->flags & ALT_FLAG_DIRECT_CALL) {
- insn_buff_sz = alt_replace_call(instr, insn_buff, a,
- mod);
+ insn_buff_sz = alt_replace_call(instr, insn_buff, a);
if (insn_buff_sz < 0)
continue;
}
@@ -536,11 +527,11 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen);
- DUMP_BYTES(ALT, wr_instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr);
DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
DUMP_BYTES(ALT, insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
- text_poke_early(wr_instr, insn_buff, insn_buff_sz);
+ text_poke_early(instr, insn_buff, insn_buff_sz);
}
kasan_enable_current();
@@ -731,20 +722,18 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
/*
* Generated by 'objtool --retpoline'.
*/
-void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
- struct module *mod)
+void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
struct insn insn;
int len, ret;
u8 bytes[16];
u8 op1, op2;
- ret = insn_decode_kernel(&insn, wr_addr);
+ ret = insn_decode_kernel(&insn, addr);
if (WARN_ON_ONCE(ret < 0))
continue;
@@ -752,6 +741,11 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
op2 = insn.opcode.bytes[1];
switch (op1) {
+ case 0x70 ... 0x7f: /* Jcc.d8 */
+ /* See cfi_paranoid. */
+ WARN_ON_ONCE(cfi_mode != CFI_FINEIBT);
+ continue;
+
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
break;
@@ -772,9 +766,9 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
len = patch_retpoline(addr, &insn, bytes);
if (len == insn.length) {
optimize_nops(addr, bytes, len);
- DUMP_BYTES(RETPOLINE, ((u8*)wr_addr), len, "%px: orig: ", addr);
+ DUMP_BYTES(RETPOLINE, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RETPOLINE, ((u8*)bytes), len, "%px: repl: ", addr);
- text_poke_early(wr_addr, bytes, len);
+ text_poke_early(addr, bytes, len);
}
}
}
@@ -810,8 +804,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
return i;
}
-void __init_or_module noinline apply_returns(s32 *start, s32 *end,
- struct module *mod)
+void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
@@ -820,13 +813,12 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end,
for (s = start; s < end; s++) {
void *dest = NULL, *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
struct insn insn;
int len, ret;
u8 bytes[16];
u8 op;
- ret = insn_decode_kernel(&insn, wr_addr);
+ ret = insn_decode_kernel(&insn, addr);
if (WARN_ON_ONCE(ret < 0))
continue;
@@ -846,41 +838,59 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end,
len = patch_return(addr, &insn, bytes);
if (len == insn.length) {
- DUMP_BYTES(RET, ((u8*)wr_addr), len, "%px: orig: ", addr);
+ DUMP_BYTES(RET, ((u8*)addr), len, "%px: orig: ", addr);
DUMP_BYTES(RET, ((u8*)bytes), len, "%px: repl: ", addr);
- text_poke_early(wr_addr, bytes, len);
+ text_poke_early(addr, bytes, len);
}
}
}
-#else
-void __init_or_module noinline apply_returns(s32 *start, s32 *end,
- struct module *mod) { }
-#endif /* CONFIG_MITIGATION_RETHUNK */
+#else /* !CONFIG_MITIGATION_RETHUNK: */
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
+#endif /* !CONFIG_MITIGATION_RETHUNK */
#else /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
-void __init_or_module noinline apply_retpolines(s32 *start, s32 *end,
- struct module *mod) { }
-void __init_or_module noinline apply_returns(s32 *start, s32 *end,
- struct module *mod) { }
+void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
+void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
-#endif /* CONFIG_MITIGATION_RETPOLINE && CONFIG_OBJTOOL */
+#endif /* !CONFIG_MITIGATION_RETPOLINE || !CONFIG_OBJTOOL */
#ifdef CONFIG_X86_KERNEL_IBT
-static void poison_cfi(void *addr, void *wr_addr);
+__noendbr bool is_endbr(u32 *val)
+{
+ u32 endbr;
+
+ __get_kernel_nofault(&endbr, val, u32, Efault);
+ return __is_endbr(endbr);
+
+Efault:
+ return false;
+}
-static void __init_or_module poison_endbr(void *addr, void *wr_addr, bool warn)
+#ifdef CONFIG_FINEIBT
+
+static __noendbr bool exact_endbr(u32 *val)
{
- u32 endbr, poison = gen_endbr_poison();
+ u32 endbr;
- if (WARN_ON_ONCE(get_kernel_nofault(endbr, wr_addr)))
- return;
+ __get_kernel_nofault(&endbr, val, u32, Efault);
+ return endbr == gen_endbr();
+
+Efault:
+ return false;
+}
- if (!is_endbr(endbr)) {
- WARN_ON_ONCE(warn);
+#endif
+
+static void poison_cfi(void *addr);
+
+static void __init_or_module poison_endbr(void *addr)
+{
+ u32 poison = gen_endbr_poison();
+
+ if (WARN_ON_ONCE(!is_endbr(addr)))
return;
- }
DPRINTK(ENDBR, "ENDBR at: %pS (%px)", addr, addr);
@@ -889,7 +899,7 @@ static void __init_or_module poison_endbr(void *addr, void *wr_addr, bool warn)
*/
DUMP_BYTES(ENDBR, ((u8*)addr), 4, "%px: orig: ", addr);
DUMP_BYTES(ENDBR, ((u8*)&poison), 4, "%px: repl: ", addr);
- text_poke_early(wr_addr, &poison, 4);
+ text_poke_early(addr, &poison, 4);
}
/*
@@ -898,36 +908,39 @@ static void __init_or_module poison_endbr(void *addr, void *wr_addr, bool warn)
* Seal the functions for indirect calls by clobbering the ENDBR instructions
* and the kCFI hash value.
*/
-void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end, struct module *mod)
+void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
- poison_endbr(addr, wr_addr, true);
+ poison_endbr(addr);
if (IS_ENABLED(CONFIG_FINEIBT))
- poison_cfi(addr - 16, wr_addr - 16);
+ poison_cfi(addr - 16);
}
}
-#else
+#else /* !CONFIG_X86_KERNEL_IBT: */
-void __init_or_module apply_seal_endbr(s32 *start, s32 *end, struct module *mod) { }
+void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { }
-#endif /* CONFIG_X86_KERNEL_IBT */
+#endif /* !CONFIG_X86_KERNEL_IBT */
#ifdef CONFIG_CFI_AUTO_DEFAULT
-#define __CFI_DEFAULT CFI_AUTO
+# define __CFI_DEFAULT CFI_AUTO
#elif defined(CONFIG_CFI_CLANG)
-#define __CFI_DEFAULT CFI_KCFI
+# define __CFI_DEFAULT CFI_KCFI
#else
-#define __CFI_DEFAULT CFI_OFF
+# define __CFI_DEFAULT CFI_OFF
#endif
enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT;
+#ifdef CONFIG_FINEIBT_BHI
+bool cfi_bhi __ro_after_init = false;
+#endif
+
#ifdef CONFIG_CFI_CLANG
struct bpf_insn;
@@ -935,11 +948,7 @@ struct bpf_insn;
extern unsigned int __bpf_prog_runX(const void *ctx,
const struct bpf_insn *insn);
-/*
- * Force a reference to the external symbol so the compiler generates
- * __kcfi_typid.
- */
-__ADDRESSABLE(__bpf_prog_runX);
+KCFI_REFERENCE(__bpf_prog_runX);
/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */
asm (
@@ -956,7 +965,7 @@ asm (
/* Must match bpf_callback_t */
extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
-__ADDRESSABLE(__bpf_callback_fn);
+KCFI_REFERENCE(__bpf_callback_fn);
/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */
asm (
@@ -991,6 +1000,21 @@ u32 cfi_get_func_hash(void *func)
return hash;
}
+
+int cfi_get_func_arity(void *func)
+{
+ bhi_thunk *target;
+ s32 disp;
+
+ if (cfi_mode != CFI_FINEIBT && !cfi_bhi)
+ return 0;
+
+ if (get_kernel_nofault(disp, func - 4))
+ return 0;
+
+ target = func + disp;
+ return target - __bhi_args;
+}
#endif
#ifdef CONFIG_FINEIBT
@@ -998,6 +1022,8 @@ u32 cfi_get_func_hash(void *func)
static bool cfi_rand __ro_after_init = true;
static u32 cfi_seed __ro_after_init;
+static bool cfi_paranoid __ro_after_init = false;
+
/*
* Re-hash the CFI hash with a boot-time seed while making sure the result is
* not a valid ENDBR instruction.
@@ -1005,7 +1031,7 @@ static u32 cfi_seed __ro_after_init;
static u32 cfi_rehash(u32 hash)
{
hash ^= cfi_seed;
- while (unlikely(is_endbr(hash) || is_endbr(-hash))) {
+ while (unlikely(__is_endbr(hash) || __is_endbr(-hash))) {
bool lsb = hash & 1;
hash >>= 1;
if (lsb)
@@ -1037,6 +1063,25 @@ static __init int cfi_parse_cmdline(char *str)
cfi_mode = CFI_FINEIBT;
} else if (!strcmp(str, "norand")) {
cfi_rand = false;
+ } else if (!strcmp(str, "warn")) {
+ pr_alert("CFI mismatch non-fatal!\n");
+ cfi_warn = true;
+ } else if (!strcmp(str, "paranoid")) {
+ if (cfi_mode == CFI_FINEIBT) {
+ cfi_paranoid = true;
+ } else {
+ pr_err("Ignoring paranoid; depends on fineibt.\n");
+ }
+ } else if (!strcmp(str, "bhi")) {
+#ifdef CONFIG_FINEIBT_BHI
+ if (cfi_mode == CFI_FINEIBT) {
+ cfi_bhi = true;
+ } else {
+ pr_err("Ignoring bhi; depends on fineibt.\n");
+ }
+#else
+ pr_err("Ignoring bhi; depends on FINEIBT_BHI=y.\n");
+#endif
} else {
pr_err("Ignoring unknown cfi option (%s).", str);
}
@@ -1054,9 +1099,9 @@ early_param("cfi", cfi_parse_cmdline);
* __cfi_\func: __cfi_\func:
* movl $0x12345678,%eax // 5 endbr64 // 4
* nop subl $0x12345678,%r10d // 7
- * nop jz 1f // 2
- * nop ud2 // 2
- * nop 1: nop // 1
+ * nop jne __cfi_\func+6 // 2
+ * nop nop3 // 3
+ * nop
* nop
* nop
* nop
@@ -1068,34 +1113,53 @@ early_param("cfi", cfi_parse_cmdline);
*
* caller: caller:
* movl $(-0x12345678),%r10d // 6 movl $0x12345678,%r10d // 6
- * addl $-15(%r11),%r10d // 4 sub $16,%r11 // 4
+ * addl $-15(%r11),%r10d // 4 lea -0x10(%r11),%r11 // 4
* je 1f // 2 nop4 // 4
* ud2 // 2
- * 1: call __x86_indirect_thunk_r11 // 5 call *%r11; nop2; // 5
+ * 1: cs call __x86_indirect_thunk_r11 // 6 call *%r11; nop3; // 6
*
*/
-asm( ".pushsection .rodata \n"
- "fineibt_preamble_start: \n"
- " endbr64 \n"
- " subl $0x12345678, %r10d \n"
- " je fineibt_preamble_end \n"
- " ud2 \n"
- " nop \n"
- "fineibt_preamble_end: \n"
+/*
+ * <fineibt_preamble_start>:
+ * 0: f3 0f 1e fa endbr64
+ * 4: 41 81 <ea> 78 56 34 12 sub $0x12345678, %r10d
+ * b: 75 f9 jne 6 <fineibt_preamble_start+0x6>
+ * d: 0f 1f 00 nopl (%rax)
+ *
+ * Note that the JNE target is the 0xEA byte inside the SUB, this decodes as
+ * (bad) on x86_64 and raises #UD.
+ */
+asm( ".pushsection .rodata \n"
+ "fineibt_preamble_start: \n"
+ " endbr64 \n"
+ " subl $0x12345678, %r10d \n"
+ "fineibt_preamble_bhi: \n"
+ " jne fineibt_preamble_start+6 \n"
+ ASM_NOP3
+ "fineibt_preamble_end: \n"
".popsection\n"
);
extern u8 fineibt_preamble_start[];
+extern u8 fineibt_preamble_bhi[];
extern u8 fineibt_preamble_end[];
#define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
+#define fineibt_preamble_bhi (fineibt_preamble_bhi - fineibt_preamble_start)
+#define fineibt_preamble_ud 6
#define fineibt_preamble_hash 7
+/*
+ * <fineibt_caller_start>:
+ * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
+ * 6: 4d 8d 5b f0 lea -0x10(%r11), %r11
+ * a: 0f 1f 40 00 nopl 0x0(%rax)
+ */
asm( ".pushsection .rodata \n"
"fineibt_caller_start: \n"
" movl $0x12345678, %r10d \n"
- " sub $16, %r11 \n"
+ " lea -0x10(%r11), %r11 \n"
ASM_NOP4
"fineibt_caller_end: \n"
".popsection \n"
@@ -1109,13 +1173,62 @@ extern u8 fineibt_caller_end[];
#define fineibt_caller_jmp (fineibt_caller_size - 2)
-static u32 decode_preamble_hash(void *addr)
+/*
+ * Since FineIBT does hash validation on the callee side it is prone to
+ * circumvention attacks where a 'naked' ENDBR instruction exists that
+ * is not part of the fineibt_preamble sequence.
+ *
+ * Notably the x86 entry points must be ENDBR and equally cannot be
+ * fineibt_preamble.
+ *
+ * The fineibt_paranoid caller sequence adds additional caller side
+ * hash validation. This stops such circumvention attacks dead, but at the cost
+ * of adding a load.
+ *
+ * <fineibt_paranoid_start>:
+ * 0: 41 ba 78 56 34 12 mov $0x12345678, %r10d
+ * 6: 45 3b 53 f7 cmp -0x9(%r11), %r10d
+ * a: 4d 8d 5b <f0> lea -0x10(%r11), %r11
+ * e: 75 fd jne d <fineibt_paranoid_start+0xd>
+ * 10: 41 ff d3 call *%r11
+ * 13: 90 nop
+ *
+ * Notably LEA does not modify flags and can be reordered with the CMP,
+ * avoiding a dependency. Again, using a non-taken (backwards) branch
+ * for the failure case, abusing LEA's immediate 0xf0 as LOCK prefix for the
+ * Jcc.d8, causing #UD.
+ */
+asm( ".pushsection .rodata \n"
+ "fineibt_paranoid_start: \n"
+ " movl $0x12345678, %r10d \n"
+ " cmpl -9(%r11), %r10d \n"
+ " lea -0x10(%r11), %r11 \n"
+ " jne fineibt_paranoid_start+0xd \n"
+ "fineibt_paranoid_ind: \n"
+ " call *%r11 \n"
+ " nop \n"
+ "fineibt_paranoid_end: \n"
+ ".popsection \n"
+);
+
+extern u8 fineibt_paranoid_start[];
+extern u8 fineibt_paranoid_ind[];
+extern u8 fineibt_paranoid_end[];
+
+#define fineibt_paranoid_size (fineibt_paranoid_end - fineibt_paranoid_start)
+#define fineibt_paranoid_ind (fineibt_paranoid_ind - fineibt_paranoid_start)
+#define fineibt_paranoid_ud 0xd
+
+static u32 decode_preamble_hash(void *addr, int *reg)
{
u8 *p = addr;
- /* b8 78 56 34 12 mov $0x12345678,%eax */
- if (p[0] == 0xb8)
+ /* b8+reg 78 56 34 12 movl $0x12345678,\reg */
+ if (p[0] >= 0xb8 && p[0] < 0xc0) {
+ if (reg)
+ *reg = p[0] - 0xb8;
return *(u32 *)(addr + 1);
+ }
return 0; /* invalid hash value */
}
@@ -1124,11 +1237,11 @@ static u32 decode_caller_hash(void *addr)
{
u8 *p = addr;
- /* 41 ba 78 56 34 12 mov $0x12345678,%r10d */
+ /* 41 ba 88 a9 cb ed mov $(-0x12345678),%r10d */
if (p[0] == 0x41 && p[1] == 0xba)
return -*(u32 *)(addr + 2);
- /* e8 0c 78 56 34 12 jmp.d8 +12 */
+ /* e8 0c 88 a9 cb ed jmp.d8 +12 */
if (p[0] == JMP8_INSN_OPCODE && p[1] == fineibt_caller_jmp)
return -*(u32 *)(addr + 2);
@@ -1136,7 +1249,7 @@ static u32 decode_caller_hash(void *addr)
}
/* .retpoline_sites */
-static int cfi_disable_callers(s32 *start, s32 *end, struct module *mod)
+static int cfi_disable_callers(s32 *start, s32 *end)
{
/*
* Disable kCFI by patching in a JMP.d8, this leaves the hash immediate
@@ -1148,23 +1261,20 @@ static int cfi_disable_callers(s32 *start, s32 *end, struct module *mod)
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr;
u32 hash;
addr -= fineibt_caller_size;
- wr_addr = module_writable_address(mod, addr);
- hash = decode_caller_hash(wr_addr);
-
+ hash = decode_caller_hash(addr);
if (!hash) /* nocfi callers */
continue;
- text_poke_early(wr_addr, jmp, 2);
+ text_poke_early(addr, jmp, 2);
}
return 0;
}
-static int cfi_enable_callers(s32 *start, s32 *end, struct module *mod)
+static int cfi_enable_callers(s32 *start, s32 *end)
{
/*
* Re-enable kCFI, undo what cfi_disable_callers() did.
@@ -1174,126 +1284,212 @@ static int cfi_enable_callers(s32 *start, s32 *end, struct module *mod)
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr;
u32 hash;
addr -= fineibt_caller_size;
- wr_addr = module_writable_address(mod, addr);
- hash = decode_caller_hash(wr_addr);
+ hash = decode_caller_hash(addr);
if (!hash) /* nocfi callers */
continue;
- text_poke_early(wr_addr, mov, 2);
+ text_poke_early(addr, mov, 2);
}
return 0;
}
/* .cfi_sites */
-static int cfi_rand_preamble(s32 *start, s32 *end, struct module *mod)
+static int cfi_rand_preamble(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
u32 hash;
- hash = decode_preamble_hash(wr_addr);
+ hash = decode_preamble_hash(addr, NULL);
if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
addr, addr, 5, addr))
return -EINVAL;
hash = cfi_rehash(hash);
- text_poke_early(wr_addr + 1, &hash, 4);
+ text_poke_early(addr + 1, &hash, 4);
}
return 0;
}
-static int cfi_rewrite_preamble(s32 *start, s32 *end, struct module *mod)
+static void cfi_fineibt_bhi_preamble(void *addr, int arity)
+{
+ if (!arity)
+ return;
+
+ if (!cfi_warn && arity == 1) {
+ /*
+ * Crazy scheme to allow arity-1 inline:
+ *
+ * __cfi_foo:
+ * 0: f3 0f 1e fa endbr64
+ * 4: 41 81 <ea> 78 56 34 12 sub 0x12345678, %r10d
+ * b: 49 0f 45 fa cmovne %r10, %rdi
+ * f: 75 f5 jne __cfi_foo+6
+ * 11: 0f 1f 00 nopl (%rax)
+ *
+ * Code that direct calls to foo()+0, decodes the tail end as:
+ *
+ * foo:
+ * 0: f5 cmc
+ * 1: 0f 1f 00 nopl (%rax)
+ *
+ * which clobbers CF, but does not affect anything ABI
+ * wise.
+ *
+ * Notably, this scheme is incompatible with permissive CFI
+ * because the CMOVcc is unconditional and RDI will have been
+ * clobbered.
+ */
+ const u8 magic[9] = {
+ 0x49, 0x0f, 0x45, 0xfa,
+ 0x75, 0xf5,
+ BYTES_NOP3,
+ };
+
+ text_poke_early(addr + fineibt_preamble_bhi, magic, 9);
+
+ return;
+ }
+
+ text_poke_early(addr + fineibt_preamble_bhi,
+ text_gen_insn(CALL_INSN_OPCODE,
+ addr + fineibt_preamble_bhi,
+ __bhi_args[arity]),
+ CALL_INSN_SIZE);
+}
+
+static int cfi_rewrite_preamble(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
+ int arity;
u32 hash;
- hash = decode_preamble_hash(wr_addr);
+ /*
+ * When the function doesn't start with ENDBR the compiler will
+ * have determined there are no indirect calls to it and we
+ * don't need no CFI either.
+ */
+ if (!is_endbr(addr + 16))
+ continue;
+
+ hash = decode_preamble_hash(addr, &arity);
if (WARN(!hash, "no CFI hash found at: %pS %px %*ph\n",
addr, addr, 5, addr))
return -EINVAL;
- text_poke_early(wr_addr, fineibt_preamble_start, fineibt_preamble_size);
- WARN_ON(*(u32 *)(wr_addr + fineibt_preamble_hash) != 0x12345678);
- text_poke_early(wr_addr + fineibt_preamble_hash, &hash, 4);
+ text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
+ WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
+ text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
+
+ WARN_ONCE(!IS_ENABLED(CONFIG_FINEIBT_BHI) && arity,
+ "kCFI preamble has wrong register at: %pS %*ph\n",
+ addr, 5, addr);
+
+ if (cfi_bhi)
+ cfi_fineibt_bhi_preamble(addr, arity);
}
return 0;
}
-static void cfi_rewrite_endbr(s32 *start, s32 *end, struct module *mod)
+static void cfi_rewrite_endbr(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr = module_writable_address(mod, addr);
- poison_endbr(addr + 16, wr_addr + 16, false);
+ if (!exact_endbr(addr + 16))
+ continue;
+
+ poison_endbr(addr + 16);
}
}
/* .retpoline_sites */
-static int cfi_rand_callers(s32 *start, s32 *end, struct module *mod)
+static int cfi_rand_callers(s32 *start, s32 *end)
{
s32 *s;
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr;
u32 hash;
addr -= fineibt_caller_size;
- wr_addr = module_writable_address(mod, addr);
- hash = decode_caller_hash(wr_addr);
+ hash = decode_caller_hash(addr);
if (hash) {
hash = -cfi_rehash(hash);
- text_poke_early(wr_addr + 2, &hash, 4);
+ text_poke_early(addr + 2, &hash, 4);
}
}
return 0;
}
-static int cfi_rewrite_callers(s32 *start, s32 *end, struct module *mod)
+static int cfi_rewrite_callers(s32 *start, s32 *end)
{
s32 *s;
+ BUG_ON(fineibt_paranoid_size != 20);
+
for (s = start; s < end; s++) {
void *addr = (void *)s + *s;
- void *wr_addr;
+ struct insn insn;
+ u8 bytes[20];
u32 hash;
+ int ret;
+ u8 op;
addr -= fineibt_caller_size;
- wr_addr = module_writable_address(mod, addr);
- hash = decode_caller_hash(wr_addr);
- if (hash) {
- text_poke_early(wr_addr, fineibt_caller_start, fineibt_caller_size);
- WARN_ON(*(u32 *)(wr_addr + fineibt_caller_hash) != 0x12345678);
- text_poke_early(wr_addr + fineibt_caller_hash, &hash, 4);
+ hash = decode_caller_hash(addr);
+ if (!hash)
+ continue;
+
+ if (!cfi_paranoid) {
+ text_poke_early(addr, fineibt_caller_start, fineibt_caller_size);
+ WARN_ON(*(u32 *)(addr + fineibt_caller_hash) != 0x12345678);
+ text_poke_early(addr + fineibt_caller_hash, &hash, 4);
+ /* rely on apply_retpolines() */
+ continue;
+ }
+
+ /* cfi_paranoid */
+ ret = insn_decode_kernel(&insn, addr + fineibt_caller_size);
+ if (WARN_ON_ONCE(ret < 0))
+ continue;
+
+ op = insn.opcode.bytes[0];
+ if (op != CALL_INSN_OPCODE && op != JMP32_INSN_OPCODE) {
+ WARN_ON_ONCE(1);
+ continue;
}
- /* rely on apply_retpolines() */
+
+ memcpy(bytes, fineibt_paranoid_start, fineibt_paranoid_size);
+ memcpy(bytes + fineibt_caller_hash, &hash, 4);
+
+ ret = emit_indirect(op, 11, bytes + fineibt_paranoid_ind);
+ if (WARN_ON_ONCE(ret != 3))
+ continue;
+
+ text_poke_early(addr, bytes, fineibt_paranoid_size);
}
return 0;
}
static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
- s32 *start_cfi, s32 *end_cfi, struct module *mod)
+ s32 *start_cfi, s32 *end_cfi, bool builtin)
{
- bool builtin = mod ? false : true;
int ret;
if (WARN_ONCE(fineibt_preamble_size != 16,
@@ -1302,8 +1498,15 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
if (cfi_mode == CFI_AUTO) {
cfi_mode = CFI_KCFI;
- if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
+ if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT)) {
+ /*
+ * FRED has much saner context on exception entry and
+ * is less easy to take advantage of.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_FRED))
+ cfi_paranoid = true;
cfi_mode = CFI_FINEIBT;
+ }
}
/*
@@ -1311,7 +1514,7 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
* rewrite them. This disables all CFI. If this succeeds but any of the
* later stages fails, we're without CFI.
*/
- ret = cfi_disable_callers(start_retpoline, end_retpoline, mod);
+ ret = cfi_disable_callers(start_retpoline, end_retpoline);
if (ret)
goto err;
@@ -1322,11 +1525,11 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash);
}
- ret = cfi_rand_preamble(start_cfi, end_cfi, mod);
+ ret = cfi_rand_preamble(start_cfi, end_cfi);
if (ret)
goto err;
- ret = cfi_rand_callers(start_retpoline, end_retpoline, mod);
+ ret = cfi_rand_callers(start_retpoline, end_retpoline);
if (ret)
goto err;
}
@@ -1338,7 +1541,7 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
return;
case CFI_KCFI:
- ret = cfi_enable_callers(start_retpoline, end_retpoline, mod);
+ ret = cfi_enable_callers(start_retpoline, end_retpoline);
if (ret)
goto err;
@@ -1348,20 +1551,23 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
case CFI_FINEIBT:
/* place the FineIBT preamble at func()-16 */
- ret = cfi_rewrite_preamble(start_cfi, end_cfi, mod);
+ ret = cfi_rewrite_preamble(start_cfi, end_cfi);
if (ret)
goto err;
/* rewrite the callers to target func()-16 */
- ret = cfi_rewrite_callers(start_retpoline, end_retpoline, mod);
+ ret = cfi_rewrite_callers(start_retpoline, end_retpoline);
if (ret)
goto err;
/* now that nobody targets func()+0, remove ENDBR there */
- cfi_rewrite_endbr(start_cfi, end_cfi, mod);
+ cfi_rewrite_endbr(start_cfi, end_cfi);
- if (builtin)
- pr_info("Using FineIBT CFI\n");
+ if (builtin) {
+ pr_info("Using %sFineIBT%s CFI\n",
+ cfi_paranoid ? "paranoid " : "",
+ cfi_bhi ? "+BHI" : "");
+ }
return;
default:
@@ -1377,11 +1583,25 @@ static inline void poison_hash(void *addr)
*(u32 *)addr = 0;
}
-static void poison_cfi(void *addr, void *wr_addr)
+static void poison_cfi(void *addr)
{
+ /*
+ * Compilers manage to be inconsistent with ENDBR vs __cfi prefixes,
+ * some (static) functions for which they can determine the address
+ * is never taken do not get a __cfi prefix, but *DO* get an ENDBR.
+ *
+ * As such, these functions will get sealed, but we need to be careful
+ * to not unconditionally scribble the previous function.
+ */
switch (cfi_mode) {
case CFI_FINEIBT:
/*
+ * FineIBT prefix should start with an ENDBR.
+ */
+ if (!is_endbr(addr))
+ break;
+
+ /*
* __cfi_\func:
* osp nopl (%rax)
* subl $0, %r10d
@@ -1389,17 +1609,23 @@ static void poison_cfi(void *addr, void *wr_addr)
* ud2
* 1: nop
*/
- poison_endbr(addr, wr_addr, false);
- poison_hash(wr_addr + fineibt_preamble_hash);
+ poison_endbr(addr);
+ poison_hash(addr + fineibt_preamble_hash);
break;
case CFI_KCFI:
/*
+ * kCFI prefix should start with a valid hash.
+ */
+ if (!decode_preamble_hash(addr, NULL))
+ break;
+
+ /*
* __cfi_\func:
* movl $0, %eax
* .skip 11, 0x90
*/
- poison_hash(wr_addr + 1);
+ poison_hash(addr + 1);
break;
default:
@@ -1407,24 +1633,135 @@ static void poison_cfi(void *addr, void *wr_addr)
}
}
-#else
+/*
+ * When regs->ip points to a 0xEA byte in the FineIBT preamble,
+ * return true and fill out target and type.
+ *
+ * We check the preamble by checking for the ENDBR instruction relative to the
+ * 0xEA instruction.
+ */
+static bool decode_fineibt_preamble(struct pt_regs *regs, unsigned long *target, u32 *type)
+{
+ unsigned long addr = regs->ip - fineibt_preamble_ud;
+ u32 hash;
+
+ if (!exact_endbr((void *)addr))
+ return false;
+
+ *target = addr + fineibt_preamble_size;
+
+ __get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault);
+ *type = (u32)regs->r10 + hash;
+
+ /*
+ * Since regs->ip points to the middle of an instruction; it cannot
+ * continue with the normal fixup.
+ */
+ regs->ip = *target;
+
+ return true;
+
+Efault:
+ return false;
+}
+
+/*
+ * regs->ip points to one of the UD2 in __bhi_args[].
+ */
+static bool decode_fineibt_bhi(struct pt_regs *regs, unsigned long *target, u32 *type)
+{
+ unsigned long addr;
+ u32 hash;
+
+ if (!cfi_bhi)
+ return false;
+
+ if (regs->ip < (unsigned long)__bhi_args ||
+ regs->ip >= (unsigned long)__bhi_args_end)
+ return false;
+
+ /*
+ * Fetch the return address from the stack, this points to the
+ * FineIBT preamble. Since the CALL instruction is in the 5 last
+ * bytes of the preamble, the return address is in fact the target
+ * address.
+ */
+ __get_kernel_nofault(&addr, regs->sp, unsigned long, Efault);
+ *target = addr;
+
+ addr -= fineibt_preamble_size;
+ if (!exact_endbr((void *)addr))
+ return false;
+
+ __get_kernel_nofault(&hash, addr + fineibt_preamble_hash, u32, Efault);
+ *type = (u32)regs->r10 + hash;
+
+ /*
+ * The UD2 sites are constructed with a RET immediately following,
+ * as such the non-fatal case can use the regular fixup.
+ */
+ return true;
+
+Efault:
+ return false;
+}
+
+/*
+ * regs->ip points to a LOCK Jcc.d8 instruction from the fineibt_paranoid_start[]
+ * sequence.
+ */
+static bool decode_fineibt_paranoid(struct pt_regs *regs, unsigned long *target, u32 *type)
+{
+ unsigned long addr = regs->ip - fineibt_paranoid_ud;
+ u32 hash;
+
+ if (!cfi_paranoid || !is_cfi_trap(addr + fineibt_caller_size - LEN_UD2))
+ return false;
+
+ __get_kernel_nofault(&hash, addr + fineibt_caller_hash, u32, Efault);
+ *target = regs->r11 + fineibt_preamble_size;
+ *type = regs->r10;
+
+ /*
+ * Since the trapping instruction is the exact, but LOCK prefixed,
+ * Jcc.d8 that got us here, the normal fixup will work.
+ */
+ return true;
+
+Efault:
+ return false;
+}
+
+bool decode_fineibt_insn(struct pt_regs *regs, unsigned long *target, u32 *type)
+{
+ if (decode_fineibt_paranoid(regs, target, type))
+ return true;
+
+ if (decode_fineibt_bhi(regs, target, type))
+ return true;
+
+ return decode_fineibt_preamble(regs, target, type);
+}
+
+#else /* !CONFIG_FINEIBT: */
static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
- s32 *start_cfi, s32 *end_cfi, struct module *mod)
+ s32 *start_cfi, s32 *end_cfi, bool builtin)
{
}
#ifdef CONFIG_X86_KERNEL_IBT
-static void poison_cfi(void *addr, void *wr_addr) { }
+static void poison_cfi(void *addr) { }
#endif
-#endif
+#endif /* !CONFIG_FINEIBT */
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
- s32 *start_cfi, s32 *end_cfi, struct module *mod)
+ s32 *start_cfi, s32 *end_cfi)
{
return __apply_fineibt(start_retpoline, end_retpoline,
- start_cfi, end_cfi, mod);
+ start_cfi, end_cfi,
+ /* .builtin = */ false);
}
#ifdef CONFIG_SMP
@@ -1721,27 +2058,27 @@ void __init alternative_instructions(void)
paravirt_set_cap();
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
- __cfi_sites, __cfi_sites_end, NULL);
+ __cfi_sites, __cfi_sites_end, true);
/*
* Rewrite the retpolines, must be done before alternatives since
* those can rewrite the retpoline thunks.
*/
- apply_retpolines(__retpoline_sites, __retpoline_sites_end, NULL);
- apply_returns(__return_sites, __return_sites_end, NULL);
-
- apply_alternatives(__alt_instructions, __alt_instructions_end, NULL);
+ apply_retpolines(__retpoline_sites, __retpoline_sites_end);
+ apply_returns(__return_sites, __return_sites_end);
/*
- * Now all calls are established. Apply the call thunks if
- * required.
+ * Adjust all CALL instructions to point to func()-10, including
+ * those in .altinstr_replacement.
*/
callthunks_patch_builtin_calls();
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+
/*
* Seal all functions that do not have their address taken.
*/
- apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end, NULL);
+ apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
#ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 11fac09e3a8c..6d12a9b69432 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -73,7 +73,6 @@ static int amd_cache_northbridges(void)
amd_northbridges.nb = nb;
for (i = 0; i < amd_northbridges.num; i++) {
- node_to_amd_nb(i)->root = amd_node_get_root(i);
node_to_amd_nb(i)->misc = amd_node_get_func(i, 3);
/*
@@ -143,7 +142,6 @@ bool __init early_is_amd_nb(u32 device)
struct resource *amd_get_mmconfig_range(struct resource *res)
{
- u32 address;
u64 base, msr;
unsigned int segn_busn_bits;
@@ -151,13 +149,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
return NULL;
- /* assume all cpus from fam10h have mmconfig */
- if (boot_cpu_data.x86 < 0x10)
+ /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */
+ if (boot_cpu_data.x86 < 0x10 ||
+ rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr))
return NULL;
- address = MSR_FAM10H_MMIO_CONF_BASE;
- rdmsrl(address, msr);
-
/* mmconfig is not enabled */
if (!(msr & FAM10H_MMIO_CONF_ENABLE))
return NULL;
diff --git a/arch/x86/kernel/amd_node.c b/arch/x86/kernel/amd_node.c
index d2ec7fd555c5..b670fa85c61b 100644
--- a/arch/x86/kernel/amd_node.c
+++ b/arch/x86/kernel/amd_node.c
@@ -8,6 +8,7 @@
* Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
*/
+#include <linux/debugfs.h>
#include <asm/amd_node.h>
/*
@@ -93,10 +94,14 @@ static struct pci_dev **amd_roots;
/* Protect the PCI config register pairs used for SMN. */
static DEFINE_MUTEX(smn_mutex);
+static bool smn_exclusive;
#define SMN_INDEX_OFFSET 0x60
#define SMN_DATA_OFFSET 0x64
+#define HSMP_INDEX_OFFSET 0xc4
+#define HSMP_DATA_OFFSET 0xc8
+
/*
* SMN accesses may fail in ways that are difficult to detect here in the called
* functions amd_smn_read() and amd_smn_write(). Therefore, callers must do
@@ -146,6 +151,9 @@ static int __amd_smn_rw(u8 i_off, u8 d_off, u16 node, u32 address, u32 *value, b
if (!root)
return err;
+ if (!smn_exclusive)
+ return err;
+
guard(mutex)(&smn_mutex);
err = pci_write_config_dword(root, i_off, address);
@@ -179,6 +187,93 @@ int __must_check amd_smn_write(u16 node, u32 address, u32 value)
}
EXPORT_SYMBOL_GPL(amd_smn_write);
+int __must_check amd_smn_hsmp_rdwr(u16 node, u32 address, u32 *value, bool write)
+{
+ return __amd_smn_rw(HSMP_INDEX_OFFSET, HSMP_DATA_OFFSET, node, address, value, write);
+}
+EXPORT_SYMBOL_GPL(amd_smn_hsmp_rdwr);
+
+static struct dentry *debugfs_dir;
+static u16 debug_node;
+static u32 debug_address;
+
+static ssize_t smn_node_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u16 node;
+ int ret;
+
+ ret = kstrtou16_from_user(userbuf, count, 0, &node);
+ if (ret)
+ return ret;
+
+ if (node >= amd_num_nodes())
+ return -ENODEV;
+
+ debug_node = node;
+ return count;
+}
+
+static int smn_node_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "0x%08x\n", debug_node);
+ return 0;
+}
+
+static ssize_t smn_address_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ ret = kstrtouint_from_user(userbuf, count, 0, &debug_address);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int smn_address_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "0x%08x\n", debug_address);
+ return 0;
+}
+
+static int smn_value_show(struct seq_file *m, void *v)
+{
+ u32 val;
+ int ret;
+
+ ret = amd_smn_read(debug_node, debug_address, &val);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "0x%08x\n", val);
+ return 0;
+}
+
+static ssize_t smn_value_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ u32 val;
+ int ret;
+
+ ret = kstrtouint_from_user(userbuf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
+ ret = amd_smn_write(debug_node, debug_address, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+DEFINE_SHOW_STORE_ATTRIBUTE(smn_node);
+DEFINE_SHOW_STORE_ATTRIBUTE(smn_address);
+DEFINE_SHOW_STORE_ATTRIBUTE(smn_value);
+
static int amd_cache_roots(void)
{
u16 node, num_nodes = amd_num_nodes();
@@ -193,6 +288,48 @@ static int amd_cache_roots(void)
return 0;
}
+static int reserve_root_config_spaces(void)
+{
+ struct pci_dev *root = NULL;
+ struct pci_bus *bus = NULL;
+
+ while ((bus = pci_find_next_bus(bus))) {
+ /* Root device is Device 0 Function 0 on each Primary Bus. */
+ root = pci_get_slot(bus, 0);
+ if (!root)
+ continue;
+
+ if (root->vendor != PCI_VENDOR_ID_AMD &&
+ root->vendor != PCI_VENDOR_ID_HYGON)
+ continue;
+
+ pci_dbg(root, "Reserving PCI config space\n");
+
+ /*
+ * There are a few SMN index/data pairs and other registers
+ * that shouldn't be accessed by user space.
+ * So reserve the entire PCI config space for simplicity rather
+ * than covering specific registers piecemeal.
+ */
+ if (!pci_request_config_region_exclusive(root, 0, PCI_CFG_SPACE_SIZE, NULL)) {
+ pci_err(root, "Failed to reserve config space\n");
+ return -EEXIST;
+ }
+ }
+
+ smn_exclusive = true;
+ return 0;
+}
+
+static bool enable_dfs;
+
+static int __init amd_smn_enable_dfs(char *str)
+{
+ enable_dfs = true;
+ return 1;
+}
+__setup("amd_smn_debugfs_enable", amd_smn_enable_dfs);
+
static int __init amd_smn_init(void)
{
int err;
@@ -209,6 +346,18 @@ static int __init amd_smn_init(void)
if (err)
return err;
+ err = reserve_root_config_spaces();
+ if (err)
+ return err;
+
+ if (enable_dfs) {
+ debugfs_dir = debugfs_create_dir("amd_smn", arch_debugfs_dir);
+
+ debugfs_create_file("node", 0600, debugfs_dir, NULL, &smn_node_fops);
+ debugfs_create_file("address", 0600, debugfs_dir, NULL, &smn_address_fops);
+ debugfs_create_file("value", 0600, debugfs_dir, NULL, &smn_value_fops);
+ }
+
return 0;
}
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
index 3bf0487cf3b7..52d1808ee360 100644
--- a/arch/x86/kernel/apic/Makefile
+++ b/arch/x86/kernel/apic/Makefile
@@ -23,8 +23,5 @@ obj-$(CONFIG_X86_X2APIC) += x2apic_cluster.o
obj-y += apic_flat_64.o
endif
-# APIC probe will depend on the listing order here
-obj-$(CONFIG_X86_BIGSMP) += bigsmp_32.o
-
# For 32bit, probe_32 need to be listed last
obj-$(CONFIG_X86_LOCAL_APIC) += probe_$(BITS).o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index e893dc6f11c1..62584a347931 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1371,8 +1371,6 @@ void __init apic_intr_mode_init(void)
x86_64_probe_apic();
- x86_32_install_bigsmp();
-
if (x86_platform.apic_post_init)
x86_platform.apic_post_init();
@@ -1674,7 +1672,6 @@ static __init void apic_read_boot_cpu_id(bool x2apic)
boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
}
topology_register_boot_apic(boot_cpu_physical_apicid);
- x86_32_probe_bigsmp_early();
}
#ifdef CONFIG_X86_X2APIC
@@ -2014,8 +2011,8 @@ static bool __init detect_init_APIC(void)
case X86_VENDOR_HYGON:
break;
case X86_VENDOR_INTEL:
- if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
- (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
+ if ((boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)) ||
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO)
break;
goto no_apic;
default:
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
deleted file mode 100644
index 9285d500d5b4..000000000000
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
- *
- * Drives the local APIC in "clustered mode".
- */
-#include <linux/cpumask.h>
-#include <linux/dmi.h>
-#include <linux/smp.h>
-
-#include <asm/apic.h>
-#include <asm/io_apic.h>
-
-#include "local.h"
-
-static u32 bigsmp_get_apic_id(u32 x)
-{
- return (x >> 24) & 0xFF;
-}
-
-static void bigsmp_send_IPI_allbutself(int vector)
-{
- default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
-}
-
-static void bigsmp_send_IPI_all(int vector)
-{
- default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
-}
-
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static int hp_ht_bigsmp(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
- dmi_bigsmp = 1;
-
- return 0;
-}
-
-
-static const struct dmi_system_id bigsmp_dmi_table[] = {
- { hp_ht_bigsmp, "HP ProLiant DL760 G2",
- { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
- }
- },
-
- { hp_ht_bigsmp, "HP ProLiant DL740",
- { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
- DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
- }
- },
- { } /* NULL entry stops DMI scanning */
-};
-
-static int probe_bigsmp(void)
-{
- return dmi_check_system(bigsmp_dmi_table);
-}
-
-static struct apic apic_bigsmp __ro_after_init = {
-
- .name = "bigsmp",
- .probe = probe_bigsmp,
-
- .dest_mode_logical = false,
-
- .disable_esr = 1,
-
- .cpu_present_to_apicid = default_cpu_present_to_apicid,
-
- .max_apic_id = 0xFE,
- .get_apic_id = bigsmp_get_apic_id,
-
- .calc_dest_apicid = apic_default_calc_apicid,
-
- .send_IPI = default_send_IPI_single_phys,
- .send_IPI_mask = default_send_IPI_mask_sequence_phys,
- .send_IPI_mask_allbutself = NULL,
- .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
- .send_IPI_all = bigsmp_send_IPI_all,
- .send_IPI_self = default_send_IPI_self,
-
- .read = native_apic_mem_read,
- .write = native_apic_mem_write,
- .eoi = native_apic_mem_eoi,
- .icr_read = native_apic_icr_read,
- .icr_write = native_apic_icr_write,
- .wait_icr_idle = apic_mem_wait_icr_idle,
- .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout,
-};
-
-bool __init apic_bigsmp_possible(bool cmdline_override)
-{
- return apic == &apic_bigsmp || !cmdline_override;
-}
-
-void __init apic_bigsmp_force(void)
-{
- if (apic != &apic_bigsmp)
- apic_install_driver(&apic_bigsmp);
-}
-
-apic_driver(apic_bigsmp);
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 5da693d633b7..98a57cb4aa86 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -3,6 +3,7 @@
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/smp.h>
+#include <linux/string_choices.h>
#include <asm/io_apic.h>
@@ -23,7 +24,7 @@ __setup("no_ipi_broadcast=", apic_ipi_shorthand);
static int __init print_ipi_mode(void)
{
pr_info("IPI shorthand broadcast: %s\n",
- apic_ipi_shorthand_off ? "disabled" : "enabled");
+ str_disabled_enabled(apic_ipi_shorthand_off));
return 0;
}
late_initcall(print_ipi_mode);
@@ -287,34 +288,4 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
-
-#ifdef CONFIG_SMP
-static int convert_apicid_to_cpu(u32 apic_id)
-{
- int i;
-
- for_each_possible_cpu(i) {
- if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
- return i;
- }
- return -1;
-}
-
-int safe_smp_processor_id(void)
-{
- u32 apicid;
- int cpuid;
-
- if (!boot_cpu_has(X86_FEATURE_APIC))
- return 0;
-
- apicid = read_apic_id();
- if (apicid == BAD_APICID)
- return 0;
-
- cpuid = convert_apicid_to_cpu(apicid);
-
- return cpuid >= 0 ? cpuid : 0;
-}
-#endif
#endif
diff --git a/arch/x86/kernel/apic/local.h b/arch/x86/kernel/apic/local.h
index 842fe28496be..bdcf609eb283 100644
--- a/arch/x86/kernel/apic/local.h
+++ b/arch/x86/kernel/apic/local.h
@@ -65,17 +65,4 @@ void default_send_IPI_self(int vector);
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector);
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector);
void default_send_IPI_mask_logical(const struct cpumask *mask, int vector);
-void x86_32_probe_bigsmp_early(void);
-void x86_32_install_bigsmp(void);
-#else
-static inline void x86_32_probe_bigsmp_early(void) { }
-static inline void x86_32_install_bigsmp(void) { }
-#endif
-
-#ifdef CONFIG_X86_BIGSMP
-bool apic_bigsmp_possible(bool cmdline_selected);
-void apic_bigsmp_force(void);
-#else
-static inline bool apic_bigsmp_possible(bool cmdline_selected) { return false; };
-static inline void apic_bigsmp_force(void) { }
#endif
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index f75ee345c02d..87bc9e7ca5d6 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -93,35 +93,6 @@ static int __init parse_apic(char *arg)
}
early_param("apic", parse_apic);
-void __init x86_32_probe_bigsmp_early(void)
-{
- if (nr_cpu_ids <= 8 || xen_pv_domain())
- return;
-
- if (IS_ENABLED(CONFIG_X86_BIGSMP)) {
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (!APIC_XAPIC(boot_cpu_apic_version))
- break;
- /* P4 and above */
- fallthrough;
- case X86_VENDOR_HYGON:
- case X86_VENDOR_AMD:
- if (apic_bigsmp_possible(cmdline_apic))
- return;
- break;
- }
- }
- pr_info("Limiting to 8 possible CPUs\n");
- set_nr_cpu_ids(8);
-}
-
-void __init x86_32_install_bigsmp(void)
-{
- if (nr_cpu_ids > 8 && !xen_pv_domain())
- apic_bigsmp_force();
-}
-
void __init x86_32_probe_apic(void)
{
if (!cmdline_apic) {
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 736f62812f5c..72fa4bb78f0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -888,8 +888,109 @@ static int apic_set_affinity(struct irq_data *irqd,
return err ? err : IRQ_SET_MASK_OK;
}
+static void free_moved_vector(struct apic_chip_data *apicd)
+{
+ unsigned int vector = apicd->prev_vector;
+ unsigned int cpu = apicd->prev_cpu;
+ bool managed = apicd->is_managed;
+
+ /*
+ * Managed interrupts are usually not migrated away
+ * from an online CPU, but CPU isolation 'managed_irq'
+ * can make that happen.
+ * 1) Activation does not take the isolation into account
+ * to keep the code simple
+ * 2) Migration away from an isolated CPU can happen when
+ * a non-isolated CPU which is in the calculated
+ * affinity mask comes online.
+ */
+ trace_vector_free_moved(apicd->irq, cpu, vector, managed);
+ irq_matrix_free(vector_matrix, cpu, vector, managed);
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
+ hlist_del_init(&apicd->clist);
+ apicd->prev_vector = 0;
+ apicd->move_in_progress = 0;
+}
+
+/*
+ * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
+ */
+static void apic_force_complete_move(struct irq_data *irqd)
+{
+ unsigned int cpu = smp_processor_id();
+ struct apic_chip_data *apicd;
+ unsigned int vector;
+
+ guard(raw_spinlock)(&vector_lock);
+ apicd = apic_chip_data(irqd);
+ if (!apicd)
+ return;
+
+ /*
+ * If prev_vector is empty or the descriptor is neither currently
+ * nor previously on the outgoing CPU no action required.
+ */
+ vector = apicd->prev_vector;
+ if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
+ return;
+
+ /*
+ * This is tricky. If the cleanup of the old vector has not been
+ * done yet, then the following setaffinity call will fail with
+ * -EBUSY. This can leave the interrupt in a stale state.
+ *
+ * All CPUs are stuck in stop machine with interrupts disabled so
+ * calling __irq_complete_move() would be completely pointless.
+ *
+ * 1) The interrupt is in move_in_progress state. That means that we
+ * have not seen an interrupt since the io_apic was reprogrammed to
+ * the new vector.
+ *
+ * 2) The interrupt has fired on the new vector, but the cleanup IPIs
+ * have not been processed yet.
+ */
+ if (apicd->move_in_progress) {
+ /*
+ * In theory there is a race:
+ *
+ * set_ioapic(new_vector) <-- Interrupt is raised before update
+ * is effective, i.e. it's raised on
+ * the old vector.
+ *
+ * So if the target cpu cannot handle that interrupt before
+ * the old vector is cleaned up, we get a spurious interrupt
+ * and in the worst case the ioapic irq line becomes stale.
+ *
+ * But in case of cpu hotplug this should be a non issue
+ * because if the affinity update happens right before all
+ * cpus rendezvous in stop machine, there is no way that the
+ * interrupt can be blocked on the target cpu because all cpus
+ * loops first with interrupts enabled in stop machine, so the
+ * old vector is not yet cleaned up when the interrupt fires.
+ *
+ * So the only way to run into this issue is if the delivery
+ * of the interrupt on the apic/system bus would be delayed
+ * beyond the point where the target cpu disables interrupts
+ * in stop machine. I doubt that it can happen, but at least
+ * there is a theoretical chance. Virtualization might be
+ * able to expose this, but AFAICT the IOAPIC emulation is not
+ * as stupid as the real hardware.
+ *
+ * Anyway, there is nothing we can do about that at this point
+ * w/o refactoring the whole fixup_irq() business completely.
+ * We print at least the irq number and the old vector number,
+ * so we have the necessary information when a problem in that
+ * area arises.
+ */
+ pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
+ irqd->irq, vector);
+ }
+ free_moved_vector(apicd);
+}
+
#else
-# define apic_set_affinity NULL
+# define apic_set_affinity NULL
+# define apic_force_complete_move NULL
#endif
static int apic_retrigger_irq(struct irq_data *irqd)
@@ -923,39 +1024,16 @@ static void x86_vector_msi_compose_msg(struct irq_data *data,
}
static struct irq_chip lapic_controller = {
- .name = "APIC",
- .irq_ack = apic_ack_edge,
- .irq_set_affinity = apic_set_affinity,
- .irq_compose_msi_msg = x86_vector_msi_compose_msg,
- .irq_retrigger = apic_retrigger_irq,
+ .name = "APIC",
+ .irq_ack = apic_ack_edge,
+ .irq_set_affinity = apic_set_affinity,
+ .irq_compose_msi_msg = x86_vector_msi_compose_msg,
+ .irq_force_complete_move = apic_force_complete_move,
+ .irq_retrigger = apic_retrigger_irq,
};
#ifdef CONFIG_SMP
-static void free_moved_vector(struct apic_chip_data *apicd)
-{
- unsigned int vector = apicd->prev_vector;
- unsigned int cpu = apicd->prev_cpu;
- bool managed = apicd->is_managed;
-
- /*
- * Managed interrupts are usually not migrated away
- * from an online CPU, but CPU isolation 'managed_irq'
- * can make that happen.
- * 1) Activation does not take the isolation into account
- * to keep the code simple
- * 2) Migration away from an isolated CPU can happen when
- * a non-isolated CPU which is in the calculated
- * affinity mask comes online.
- */
- trace_vector_free_moved(apicd->irq, cpu, vector, managed);
- irq_matrix_free(vector_matrix, cpu, vector, managed);
- per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
- hlist_del_init(&apicd->clist);
- apicd->prev_vector = 0;
- apicd->move_in_progress = 0;
-}
-
static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr)
{
struct apic_chip_data *apicd;
@@ -1068,99 +1146,6 @@ void irq_complete_move(struct irq_cfg *cfg)
__vector_schedule_cleanup(apicd);
}
-/*
- * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
- */
-void irq_force_complete_move(struct irq_desc *desc)
-{
- unsigned int cpu = smp_processor_id();
- struct apic_chip_data *apicd;
- struct irq_data *irqd;
- unsigned int vector;
-
- /*
- * The function is called for all descriptors regardless of which
- * irqdomain they belong to. For example if an IRQ is provided by
- * an irq_chip as part of a GPIO driver, the chip data for that
- * descriptor is specific to the irq_chip in question.
- *
- * Check first that the chip_data is what we expect
- * (apic_chip_data) before touching it any further.
- */
- irqd = irq_domain_get_irq_data(x86_vector_domain,
- irq_desc_get_irq(desc));
- if (!irqd)
- return;
-
- raw_spin_lock(&vector_lock);
- apicd = apic_chip_data(irqd);
- if (!apicd)
- goto unlock;
-
- /*
- * If prev_vector is empty or the descriptor is neither currently
- * nor previously on the outgoing CPU no action required.
- */
- vector = apicd->prev_vector;
- if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
- goto unlock;
-
- /*
- * This is tricky. If the cleanup of the old vector has not been
- * done yet, then the following setaffinity call will fail with
- * -EBUSY. This can leave the interrupt in a stale state.
- *
- * All CPUs are stuck in stop machine with interrupts disabled so
- * calling __irq_complete_move() would be completely pointless.
- *
- * 1) The interrupt is in move_in_progress state. That means that we
- * have not seen an interrupt since the io_apic was reprogrammed to
- * the new vector.
- *
- * 2) The interrupt has fired on the new vector, but the cleanup IPIs
- * have not been processed yet.
- */
- if (apicd->move_in_progress) {
- /*
- * In theory there is a race:
- *
- * set_ioapic(new_vector) <-- Interrupt is raised before update
- * is effective, i.e. it's raised on
- * the old vector.
- *
- * So if the target cpu cannot handle that interrupt before
- * the old vector is cleaned up, we get a spurious interrupt
- * and in the worst case the ioapic irq line becomes stale.
- *
- * But in case of cpu hotplug this should be a non issue
- * because if the affinity update happens right before all
- * cpus rendezvous in stop machine, there is no way that the
- * interrupt can be blocked on the target cpu because all cpus
- * loops first with interrupts enabled in stop machine, so the
- * old vector is not yet cleaned up when the interrupt fires.
- *
- * So the only way to run into this issue is if the delivery
- * of the interrupt on the apic/system bus would be delayed
- * beyond the point where the target cpu disables interrupts
- * in stop machine. I doubt that it can happen, but at least
- * there is a theoretical chance. Virtualization might be
- * able to expose this, but AFAICT the IOAPIC emulation is not
- * as stupid as the real hardware.
- *
- * Anyway, there is nothing we can do about that at this point
- * w/o refactoring the whole fixup_irq() business completely.
- * We print at least the irq number and the old vector number,
- * so we have the necessary information when a problem in that
- * area arises.
- */
- pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
- irqd->irq, vector);
- }
- free_moved_vector(apicd);
-unlock:
- raw_spin_unlock(&vector_lock);
-}
-
#ifdef CONFIG_HOTPLUG_CPU
/*
* Note, this is not accurate accounting, but at least good enough to
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index a98020bf31bb..ad4ea6fb3b6c 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -107,11 +107,6 @@ static void __used common(void)
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
OFFSET(TSS_sp2, tss_struct, x86_tss.sp2);
- OFFSET(X86_top_of_stack, pcpu_hot, top_of_stack);
- OFFSET(X86_current_task, pcpu_hot, current_task);
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
- OFFSET(X86_call_depth, pcpu_hot, call_depth);
-#endif
#if IS_ENABLED(CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64)
/* Offset for fields in aria_ctx */
BLANK();
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index bb65371ea9df..590b6cd0eac0 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -54,11 +54,5 @@ int main(void)
BLANK();
#undef ENTRY
- BLANK();
-
-#ifdef CONFIG_STACKPROTECTOR
- OFFSET(FIXED_stack_canary, fixed_percpu_data, stack_canary);
- BLANK();
-#endif
return 0;
}
diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c
index 3fed7ae58b60..73274d76ce16 100644
--- a/arch/x86/kernel/bootflag.c
+++ b/arch/x86/kernel/bootflag.c
@@ -8,6 +8,7 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
+#include <linux/bitops.h>
#include <asm/io.h>
#include <linux/mc146818rtc.h>
@@ -20,27 +21,13 @@
int sbf_port __initdata = -1; /* set via acpi_boot_init() */
-static int __init parity(u8 v)
-{
- int x = 0;
- int i;
-
- for (i = 0; i < 8; i++) {
- x ^= (v & 1);
- v >>= 1;
- }
-
- return x;
-}
-
static void __init sbf_write(u8 v)
{
unsigned long flags;
if (sbf_port != -1) {
- v &= ~SBF_PARITY;
- if (!parity(v))
- v |= SBF_PARITY;
+ if (!parity8(v))
+ v ^= SBF_PARITY;
printk(KERN_INFO "Simple Boot Flag at 0x%x set to 0x%x\n",
sbf_port, v);
@@ -66,14 +53,14 @@ static u8 __init sbf_read(void)
return v;
}
-static int __init sbf_value_valid(u8 v)
+static bool __init sbf_value_valid(u8 v)
{
if (v & SBF_RESERVED) /* Reserved bits */
- return 0;
- if (!parity(v))
- return 0;
+ return false;
+ if (!parity8(v))
+ return false;
- return 1;
+ return true;
}
static int __init sbf_init(void)
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index 8418a892d195..25ae54250112 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -240,21 +240,10 @@ patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
}
static __init_or_module void
-patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
- const struct core_text *ct)
-{
- struct alt_instr *a;
-
- for (a = start; a < end; a++)
- patch_call((void *)&a->instr_offset + a->instr_offset, ct);
-}
-
-static __init_or_module void
callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
{
prdbg("Patching call sites %s\n", ct->name);
patch_call_sites(cs->call_start, cs->call_end, ct);
- patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
prdbg("Patching call sites done%s\n", ct->name);
}
@@ -263,8 +252,6 @@ void __init callthunks_patch_builtin_calls(void)
struct callthunk_sites cs = {
.call_start = __call_sites,
.call_end = __call_sites_end,
- .alt_start = __alt_instructions,
- .alt_end = __alt_instructions_end
};
if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
diff --git a/arch/x86/kernel/cfi.c b/arch/x86/kernel/cfi.c
index e6bf78fac146..77086cf565ec 100644
--- a/arch/x86/kernel/cfi.c
+++ b/arch/x86/kernel/cfi.c
@@ -67,16 +67,30 @@ static bool decode_cfi_insn(struct pt_regs *regs, unsigned long *target,
*/
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
- unsigned long target;
+ unsigned long target, addr = regs->ip;
u32 type;
- if (!is_cfi_trap(regs->ip))
- return BUG_TRAP_TYPE_NONE;
+ switch (cfi_mode) {
+ case CFI_KCFI:
+ if (!is_cfi_trap(addr))
+ return BUG_TRAP_TYPE_NONE;
+
+ if (!decode_cfi_insn(regs, &target, &type))
+ return report_cfi_failure_noaddr(regs, addr);
+
+ break;
- if (!decode_cfi_insn(regs, &target, &type))
- return report_cfi_failure_noaddr(regs, regs->ip);
+ case CFI_FINEIBT:
+ if (!decode_fineibt_insn(regs, &target, &type))
+ return BUG_TRAP_TYPE_NONE;
+
+ break;
+
+ default:
+ return BUG_TRAP_TYPE_NONE;
+ }
- return report_cfi_failure(regs, regs->ip, &target, type);
+ return report_cfi_failure(regs, addr, &target, type);
}
/*
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 54194f5995de..79569f72b8ee 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -29,6 +29,8 @@
#include "cpu.h"
+u16 invlpgb_count_max __ro_after_init;
+
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
u32 gprs[8] = { 0 };
@@ -632,7 +634,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
* (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
- if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
+ if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &value)) {
value &= ~BIT_64(32);
@@ -1073,6 +1075,10 @@ static void init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+
+ /* Enable Translation Cache Extension */
+ if (cpu_has(c, X86_FEATURE_TCE))
+ msr_set_bit(MSR_EFER, _EFER_TCE);
}
#ifdef CONFIG_X86_32
@@ -1105,8 +1111,8 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
- tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
- tlb_lli_4k[ENTRIES] = ebx & mask;
+ tlb_lld_4k = (ebx >> 16) & mask;
+ tlb_lli_4k = ebx & mask;
/*
* K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
@@ -1119,26 +1125,30 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
if (!((eax >> 16) & mask))
- tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
+ tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff;
else
- tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
+ tlb_lld_2m = (eax >> 16) & mask;
/* a 4M entry uses two 2M entries */
- tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
+ tlb_lld_4m = tlb_lld_2m >> 1;
/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
if (!(eax & mask)) {
/* Erratum 658 */
if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
- tlb_lli_2m[ENTRIES] = 1024;
+ tlb_lli_2m = 1024;
} else {
cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
- tlb_lli_2m[ENTRIES] = eax & 0xff;
+ tlb_lli_2m = eax & 0xff;
}
} else
- tlb_lli_2m[ENTRIES] = eax & mask;
+ tlb_lli_2m = eax & mask;
+
+ tlb_lli_4m = tlb_lli_2m >> 1;
- tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
+ /* Max number of pages INVLPGB can invalidate in one shot */
+ if (cpu_has(c, X86_FEATURE_INVLPGB))
+ invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1;
}
static const struct cpu_dev amd_cpu_dev = {
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index f642de2ebdac..6cf31a1649c4 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -498,7 +498,7 @@ void arch_scale_freq_tick(void)
*/
#define MAX_SAMPLE_AGE ((unsigned long)HZ / 50)
-unsigned int arch_freq_get_on_cpu(int cpu)
+int arch_freq_get_on_cpu(int cpu)
{
struct aperfmperf *s = per_cpu_ptr(&cpu_samples, cpu);
unsigned int seq, freq;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a5d0998d7604..4386aa6c69e1 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -113,6 +113,10 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+/* Control IBPB on vCPU load */
+DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
+EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
+
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -234,7 +238,7 @@ static void x86_amd_ssb_disable(void)
/* Default mitigation for MDS-affected CPUs */
static enum mds_mitigations mds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF;
static bool mds_nosmt __ro_after_init = false;
static const char * const mds_strings[] = {
@@ -243,6 +247,40 @@ static const char * const mds_strings[] = {
[MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
};
+enum taa_mitigations {
+ TAA_MITIGATION_OFF,
+ TAA_MITIGATION_AUTO,
+ TAA_MITIGATION_UCODE_NEEDED,
+ TAA_MITIGATION_VERW,
+ TAA_MITIGATION_TSX_DISABLED,
+};
+
+/* Default mitigation for TAA-affected CPUs */
+static enum taa_mitigations taa_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF;
+
+enum mmio_mitigations {
+ MMIO_MITIGATION_OFF,
+ MMIO_MITIGATION_AUTO,
+ MMIO_MITIGATION_UCODE_NEEDED,
+ MMIO_MITIGATION_VERW,
+};
+
+/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
+static enum mmio_mitigations mmio_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF;
+
+enum rfds_mitigations {
+ RFDS_MITIGATION_OFF,
+ RFDS_MITIGATION_AUTO,
+ RFDS_MITIGATION_VERW,
+ RFDS_MITIGATION_UCODE_NEEDED,
+};
+
+/* Default mitigation for Register File Data Sampling */
+static enum rfds_mitigations rfds_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF;
+
static void __init mds_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
@@ -250,6 +288,9 @@ static void __init mds_select_mitigation(void)
return;
}
+ if (mds_mitigation == MDS_MITIGATION_AUTO)
+ mds_mitigation = MDS_MITIGATION_FULL;
+
if (mds_mitigation == MDS_MITIGATION_FULL) {
if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
mds_mitigation = MDS_MITIGATION_VMWERV;
@@ -286,16 +327,6 @@ early_param("mds", mds_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "TAA: " fmt
-enum taa_mitigations {
- TAA_MITIGATION_OFF,
- TAA_MITIGATION_UCODE_NEEDED,
- TAA_MITIGATION_VERW,
- TAA_MITIGATION_TSX_DISABLED,
-};
-
-/* Default mitigation for TAA-affected CPUs */
-static enum taa_mitigations taa_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_VERW : TAA_MITIGATION_OFF;
static bool taa_nosmt __ro_after_init;
static const char * const taa_strings[] = {
@@ -386,15 +417,6 @@ early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "MMIO Stale Data: " fmt
-enum mmio_mitigations {
- MMIO_MITIGATION_OFF,
- MMIO_MITIGATION_UCODE_NEEDED,
- MMIO_MITIGATION_VERW,
-};
-
-/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
-static enum mmio_mitigations mmio_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_VERW : MMIO_MITIGATION_OFF;
static bool mmio_nosmt __ro_after_init = false;
static const char * const mmio_strings[] = {
@@ -483,16 +505,6 @@ early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "Register File Data Sampling: " fmt
-enum rfds_mitigations {
- RFDS_MITIGATION_OFF,
- RFDS_MITIGATION_VERW,
- RFDS_MITIGATION_UCODE_NEEDED,
-};
-
-/* Default mitigation for Register File Data Sampling */
-static enum rfds_mitigations rfds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
-
static const char * const rfds_strings[] = {
[RFDS_MITIGATION_OFF] = "Vulnerable",
[RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
@@ -508,6 +520,9 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
+ if (rfds_mitigation == RFDS_MITIGATION_AUTO)
+ rfds_mitigation = RFDS_MITIGATION_VERW;
+
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
@@ -1293,9 +1308,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(void)
{
+ enum spectre_v2_user_cmd mode;
char arg[20];
int ret, i;
+ mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
+ SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
+
switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE;
@@ -1308,7 +1327,7 @@ spectre_v2_parse_user_cmdline(void)
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg));
if (ret < 0)
- return SPECTRE_V2_USER_CMD_AUTO;
+ return mode;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) {
@@ -1318,8 +1337,8 @@ spectre_v2_parse_user_cmdline(void)
}
}
- pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
- return SPECTRE_V2_USER_CMD_AUTO;
+ pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
+ return mode;
}
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
@@ -1331,16 +1350,11 @@ static void __init
spectre_v2_user_select_mitigation(void)
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
- bool smt_possible = IS_ENABLED(CONFIG_SMP);
enum spectre_v2_user_cmd cmd;
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
return;
- if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
- cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
- smt_possible = false;
-
cmd = spectre_v2_parse_user_cmdline();
switch (cmd) {
case SPECTRE_V2_USER_CMD_NONE:
@@ -1364,7 +1378,7 @@ spectre_v2_user_select_mitigation(void)
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
- setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ static_branch_enable(&switch_vcpu_ibpb);
spectre_v2_user_ibpb = mode;
switch (cmd) {
@@ -1401,7 +1415,7 @@ spectre_v2_user_select_mitigation(void)
* so allow for STIBP to be selected in those cases.
*/
if (!boot_cpu_has(X86_FEATURE_STIBP) ||
- !smt_possible ||
+ !cpu_smt_possible() ||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
return;
@@ -1973,6 +1987,7 @@ void cpu_bugs_smt_update(void)
switch (mds_mitigation) {
case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_AUTO:
case MDS_MITIGATION_VMWERV:
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
pr_warn_once(MDS_MSG_SMT);
@@ -1984,6 +1999,7 @@ void cpu_bugs_smt_update(void)
switch (taa_mitigation) {
case TAA_MITIGATION_VERW:
+ case TAA_MITIGATION_AUTO:
case TAA_MITIGATION_UCODE_NEEDED:
if (sched_smt_active())
pr_warn_once(TAA_MSG_SMT);
@@ -1995,6 +2011,7 @@ void cpu_bugs_smt_update(void)
switch (mmio_mitigation) {
case MMIO_MITIGATION_VERW:
+ case MMIO_MITIGATION_AUTO:
case MMIO_MITIGATION_UCODE_NEEDED:
if (sched_smt_active())
pr_warn_once(MMIO_MSG_SMT);
@@ -2522,6 +2539,7 @@ enum srso_mitigation {
SRSO_MITIGATION_SAFE_RET,
SRSO_MITIGATION_IBPB,
SRSO_MITIGATION_IBPB_ON_VMEXIT,
+ SRSO_MITIGATION_BP_SPEC_REDUCE,
};
enum srso_mitigation_cmd {
@@ -2539,7 +2557,8 @@ static const char * const srso_strings[] = {
[SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
+ [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
};
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
@@ -2578,7 +2597,7 @@ static void __init srso_select_mitigation(void)
srso_cmd == SRSO_CMD_OFF) {
if (boot_cpu_has(X86_FEATURE_SBPB))
x86_pred_cmd = PRED_CMD_SBPB;
- return;
+ goto out;
}
if (has_microcode) {
@@ -2590,7 +2609,7 @@ static void __init srso_select_mitigation(void)
*/
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
- return;
+ goto out;
}
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
@@ -2670,6 +2689,12 @@ static void __init srso_select_mitigation(void)
ibpb_on_vmexit:
case SRSO_CMD_IBPB_ON_VMEXIT:
+ if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
+ pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
+ srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
+ break;
+ }
+
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
if (has_microcode) {
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
@@ -2691,7 +2716,15 @@ ibpb_on_vmexit:
}
out:
- pr_info("%s\n", srso_strings[srso_mitigation]);
+ /*
+ * Clear the feature flag if this mitigation is not selected as that
+ * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
+ */
+ if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
+ setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
+
+ if (srso_mitigation != SRSO_MITIGATION_NONE)
+ pr_info("%s\n", srso_strings[srso_mitigation]);
}
#undef pr_fmt
diff --git a/arch/x86/kernel/cpu/bus_lock.c b/arch/x86/kernel/cpu/bus_lock.c
index 6cba85c79d42..97222efb4d2a 100644
--- a/arch/x86/kernel/cpu/bus_lock.c
+++ b/arch/x86/kernel/cpu/bus_lock.c
@@ -192,7 +192,13 @@ static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
}
-static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
+/*
+ * In order for each CPU to schedule its delayed work independently of the
+ * others, delayed work struct must be per-CPU. This is not required when
+ * sysctl_sld_mitigate is enabled because of the semaphore that limits
+ * the number of simultaneously scheduled delayed works to 1.
+ */
+static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
@@ -213,7 +219,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip)
{
- struct delayed_work *work;
+ struct delayed_work *work = NULL;
int cpu;
if (!current->reported_split_lock)
@@ -235,11 +241,17 @@ static void split_lock_warn(unsigned long ip)
if (down_interruptible(&buslock_sem) == -EINTR)
return;
work = &sl_reenable_unlock;
- } else {
- work = &sl_reenable;
}
cpu = get_cpu();
+
+ if (!work) {
+ work = this_cpu_ptr(&sl_reenable);
+ /* Deferred initialization of per-CPU struct */
+ if (!work->work.func)
+ INIT_DELAYED_WORK(work, __split_lock_reenable);
+ }
+
schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index e6fa03ed9172..b3a520959b51 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -8,21 +8,19 @@
* Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
-#include <linux/slab.h>
#include <linux/cacheinfo.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
-#include <linux/sched.h>
-#include <linux/capability.h>
-#include <linux/sysfs.h>
#include <linux/pci.h>
#include <linux/stop_machine.h>
+#include <linux/sysfs.h>
-#include <asm/cpufeature.h>
-#include <asm/cacheinfo.h>
#include <asm/amd_nb.h>
-#include <asm/smp.h>
+#include <asm/cacheinfo.h>
+#include <asm/cpufeature.h>
#include <asm/mtrr.h>
+#include <asm/smp.h>
#include <asm/tlbflush.h>
#include "cpu.h"
@@ -31,7 +29,6 @@
#define LVL_1_DATA 2
#define LVL_2 3
#define LVL_3 4
-#define LVL_TRACE 5
/* Shared last level cache maps */
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
@@ -96,10 +93,6 @@ static const struct _cache_table cache_table[] =
{ 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
{ 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
- { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
- { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
- { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
- { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
{ 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
{ 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
{ 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
@@ -787,19 +780,13 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
}
}
}
- /*
- * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
- * trace cache
- */
- if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
+
+ /* Don't use CPUID(2) if CPUID(4) is supported. */
+ if (!ci->num_leaves && c->cpuid_level > 1) {
/* supports eax=2 call */
int j, n;
unsigned int regs[4];
unsigned char *dp = (unsigned char *)regs;
- int only_trace = 0;
-
- if (ci->num_leaves && c->x86 == 15)
- only_trace = 1;
/* Number of times to iterate */
n = cpuid_eax(2) & 0xFF;
@@ -808,7 +795,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 3 ; j++)
+ for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;
@@ -820,8 +807,6 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
/* look up this descriptor in the table */
while (cache_table[k].descriptor != 0) {
if (cache_table[k].descriptor == des) {
- if (only_trace && cache_table[k].cache_type != LVL_TRACE)
- break;
switch (cache_table[k].cache_type) {
case LVL_1_INST:
l1i += cache_table[k].size;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7cce91b19fb2..12126adbc3a9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -667,8 +667,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
if (!warn)
continue;
- pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
- x86_cap_flag(df->feature), df->level);
+ pr_warn("CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
+ x86_cap_flags[df->feature], df->level);
}
}
@@ -846,13 +846,13 @@ void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
c->x86_cache_size = l2size;
}
-u16 __read_mostly tlb_lli_4k[NR_INFO];
-u16 __read_mostly tlb_lli_2m[NR_INFO];
-u16 __read_mostly tlb_lli_4m[NR_INFO];
-u16 __read_mostly tlb_lld_4k[NR_INFO];
-u16 __read_mostly tlb_lld_2m[NR_INFO];
-u16 __read_mostly tlb_lld_4m[NR_INFO];
-u16 __read_mostly tlb_lld_1g[NR_INFO];
+u16 __read_mostly tlb_lli_4k;
+u16 __read_mostly tlb_lli_2m;
+u16 __read_mostly tlb_lli_4m;
+u16 __read_mostly tlb_lld_4k;
+u16 __read_mostly tlb_lld_2m;
+u16 __read_mostly tlb_lld_4m;
+u16 __read_mostly tlb_lld_1g;
static void cpu_detect_tlb(struct cpuinfo_x86 *c)
{
@@ -860,12 +860,10 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
this_cpu->c_detect_tlb(c);
pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
- tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
- tlb_lli_4m[ENTRIES]);
+ tlb_lli_4k, tlb_lli_2m, tlb_lli_4m);
pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
- tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
- tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
+ tlb_lld_4k, tlb_lld_2m, tlb_lld_4m, tlb_lld_1g);
}
void get_cpu_vendor(struct cpuinfo_x86 *c)
@@ -1164,7 +1162,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
- VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
+ VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID2,NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
@@ -1205,6 +1203,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define VULNBL_INTEL_STEPS(vfm, max_stepping, issues) \
X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
+#define VULNBL_INTEL_TYPE(vfm, cpu_type, issues) \
+ X86_MATCH_VFM_CPU_TYPE(vfm, INTEL_CPU_TYPE_##cpu_type, issues)
+
#define VULNBL_AMD(family, blacklist) \
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
@@ -1253,9 +1254,9 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS),
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPS(INTEL_ALDERLAKE, X86_STEP_MAX, RFDS),
+ VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
- VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE, X86_STEP_MAX, RFDS),
+ VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS),
VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS),
@@ -1331,8 +1332,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) {
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
+ }
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
!(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
@@ -1479,15 +1482,96 @@ static void detect_nopl(void)
#endif
}
+static inline bool parse_set_clear_cpuid(char *arg, bool set)
+{
+ char *opt;
+ int taint = 0;
+
+ while (arg) {
+ bool found __maybe_unused = false;
+ unsigned int bit;
+
+ opt = strsep(&arg, ",");
+
+ /*
+ * Handle naked numbers first for feature flags which don't
+ * have names. It doesn't make sense for a bug not to have a
+ * name so don't handle bug flags here.
+ */
+ if (!kstrtouint(opt, 10, &bit)) {
+ if (bit < NCAPINTS * 32) {
+
+ if (set) {
+ pr_warn("setcpuid: force-enabling CPU feature flag:");
+ setup_force_cpu_cap(bit);
+ } else {
+ pr_warn("clearcpuid: force-disabling CPU feature flag:");
+ setup_clear_cpu_cap(bit);
+ }
+ /* empty-string, i.e., ""-defined feature flags */
+ if (!x86_cap_flags[bit])
+ pr_cont(" %d:%d\n", bit >> 5, bit & 31);
+ else
+ pr_cont(" %s\n", x86_cap_flags[bit]);
+
+ taint++;
+ }
+ /*
+ * The assumption is that there are no feature names with only
+ * numbers in the name thus go to the next argument.
+ */
+ continue;
+ }
+
+ for (bit = 0; bit < 32 * (NCAPINTS + NBUGINTS); bit++) {
+ const char *flag;
+ const char *kind;
+
+ if (bit < 32 * NCAPINTS) {
+ flag = x86_cap_flags[bit];
+ kind = "feature";
+ } else {
+ kind = "bug";
+ flag = x86_bug_flags[bit - (32 * NCAPINTS)];
+ }
+
+ if (!flag)
+ continue;
+
+ if (strcmp(flag, opt))
+ continue;
+
+ if (set) {
+ pr_warn("setcpuid: force-enabling CPU %s flag: %s\n",
+ kind, flag);
+ setup_force_cpu_cap(bit);
+ } else {
+ pr_warn("clearcpuid: force-disabling CPU %s flag: %s\n",
+ kind, flag);
+ setup_clear_cpu_cap(bit);
+ }
+ taint++;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ pr_warn("%s: unknown CPU flag: %s", set ? "setcpuid" : "clearcpuid", opt);
+ }
+
+ return taint;
+}
+
+
/*
* We parse cpu parameters early because fpu__init_system() is executed
* before parse_early_param().
*/
static void __init cpu_parse_early_param(void)
{
+ bool cpuid_taint = false;
char arg[128];
- char *argptr = arg, *opt;
- int arglen, taint = 0;
+ int arglen;
#ifdef CONFIG_X86_32
if (cmdline_find_option_bool(boot_command_line, "no387"))
@@ -1519,61 +1603,17 @@ static void __init cpu_parse_early_param(void)
setup_clear_cpu_cap(X86_FEATURE_FRED);
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
- if (arglen <= 0)
- return;
+ if (arglen > 0)
+ cpuid_taint |= parse_set_clear_cpuid(arg, false);
- pr_info("Clearing CPUID bits:");
-
- while (argptr) {
- bool found __maybe_unused = false;
- unsigned int bit;
+ arglen = cmdline_find_option(boot_command_line, "setcpuid", arg, sizeof(arg));
+ if (arglen > 0)
+ cpuid_taint |= parse_set_clear_cpuid(arg, true);
- opt = strsep(&argptr, ",");
-
- /*
- * Handle naked numbers first for feature flags which don't
- * have names.
- */
- if (!kstrtouint(opt, 10, &bit)) {
- if (bit < NCAPINTS * 32) {
-
- /* empty-string, i.e., ""-defined feature flags */
- if (!x86_cap_flags[bit])
- pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
- else
- pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
-
- setup_clear_cpu_cap(bit);
- taint++;
- }
- /*
- * The assumption is that there are no feature names with only
- * numbers in the name thus go to the next argument.
- */
- continue;
- }
-
- for (bit = 0; bit < 32 * NCAPINTS; bit++) {
- if (!x86_cap_flag(bit))
- continue;
-
- if (strcmp(x86_cap_flag(bit), opt))
- continue;
-
- pr_cont(" %s", opt);
- setup_clear_cpu_cap(bit);
- taint++;
- found = true;
- break;
- }
-
- if (!found)
- pr_cont(" (unknown: %s)", opt);
- }
- pr_cont("\n");
-
- if (taint)
+ if (cpuid_taint) {
+ pr_warn("!!! setcpuid=/clearcpuid= in use, this is for TESTING ONLY, may break things horribly. Tainting kernel.\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
}
/*
@@ -1610,6 +1650,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
c->cpu_index = 0;
filter_cpuid_features(c, false);
+ check_cpufeature_deps(c);
if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c);
@@ -1870,6 +1911,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
/* Filter out anything that depends on CPUID levels we don't have */
filter_cpuid_features(c, true);
+ /* Check for unmet dependencies based on the CPUID dependency table */
+ check_cpufeature_deps(c);
+
/* If the model name is still unset, do table lookup. */
if (!c->x86_model_id[0]) {
const char *p;
@@ -1962,9 +2006,15 @@ static __init void identify_boot_cpu(void)
lkgs_init();
}
-void identify_secondary_cpu(struct cpuinfo_x86 *c)
+void identify_secondary_cpu(unsigned int cpu)
{
- BUG_ON(c == &boot_cpu_data);
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ /* Copy boot_cpu_data only on the first bringup */
+ if (!c->initialized)
+ *c = boot_cpu_data;
+ c->cpu_index = cpu;
+
identify_cpu(c);
#ifdef CONFIG_X86_32
enable_sep_cpu();
@@ -1975,6 +2025,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
update_gds_msr();
tsx_ap_init();
+ c->initialized = true;
}
void print_cpu_info(struct cpuinfo_x86 *c)
@@ -2005,27 +2056,40 @@ void print_cpu_info(struct cpuinfo_x86 *c)
}
/*
- * clearcpuid= was already parsed in cpu_parse_early_param(). This dummy
- * function prevents it from becoming an environment variable for init.
+ * clearcpuid= and setcpuid= were already parsed in cpu_parse_early_param().
+ * These dummy functions prevent them from becoming an environment variable for
+ * init.
*/
+
static __init int setup_clearcpuid(char *arg)
{
return 1;
}
__setup("clearcpuid=", setup_clearcpuid);
-DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
- .current_task = &init_task,
- .preempt_count = INIT_PREEMPT_COUNT,
- .top_of_stack = TOP_OF_INIT_STACK,
-};
-EXPORT_PER_CPU_SYMBOL(pcpu_hot);
-EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
+static __init int setup_setcpuid(char *arg)
+{
+ return 1;
+}
+__setup("setcpuid=", setup_setcpuid);
+
+DEFINE_PER_CPU_CACHE_HOT(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+EXPORT_PER_CPU_SYMBOL(const_current_task);
+
+DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
+EXPORT_PER_CPU_SYMBOL(__preempt_count);
+
+DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
#ifdef CONFIG_X86_64
-DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
- fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
-EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
+/*
+ * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
+ * so that this space is reserved in the hot cache section even when the
+ * mitigation is disabled.
+ */
+DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
+EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
static void wrmsrl_cstar(unsigned long val)
{
@@ -2089,18 +2153,15 @@ void syscall_init(void)
if (!cpu_feature_enabled(X86_FEATURE_FRED))
idt_syscall_init();
}
-
-#else /* CONFIG_X86_64 */
+#endif /* CONFIG_X86_64 */
#ifdef CONFIG_STACKPROTECTOR
-DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
+DEFINE_PER_CPU_CACHE_HOT(unsigned long, __stack_chk_guard);
#ifndef CONFIG_SMP
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
#endif
-#endif /* CONFIG_X86_64 */
-
/*
* Clear all 6 debug registers:
*/
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 1beccefbaff9..51deb60a9d26 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -33,14 +33,6 @@ struct cpu_dev {
#endif
};
-struct _tlb_table {
- unsigned char descriptor;
- char tlb_type;
- unsigned int entries;
- /* unsigned int ways; */
- char info[128];
-};
-
#define cpu_dev_register(cpu_devX) \
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
__section(".x86_cpu_dev.init") = \
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index df838e3bdbe0..a2fbea0be535 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -147,3 +147,38 @@ void setup_clear_cpu_cap(unsigned int feature)
{
do_clear_cpu_cap(NULL, feature);
}
+
+/*
+ * Return the feature "name" if available, otherwise return
+ * the X86_FEATURE_* numerals to make it easier to identify
+ * the feature.
+ */
+static const char *x86_feature_name(unsigned int feature, char *buf)
+{
+ if (x86_cap_flags[feature])
+ return x86_cap_flags[feature];
+
+ snprintf(buf, 16, "%d*32+%2d", feature / 32, feature % 32);
+
+ return buf;
+}
+
+void check_cpufeature_deps(struct cpuinfo_x86 *c)
+{
+ char feature_buf[16], depends_buf[16];
+ const struct cpuid_dep *d;
+
+ for (d = cpuid_deps; d->feature; d++) {
+ if (cpu_has(c, d->feature) && !cpu_has(c, d->depends)) {
+ /*
+ * Only warn about the first unmet dependency on the
+ * first CPU where it is encountered to avoid spamming
+ * the kernel log.
+ */
+ pr_warn_once("x86 CPU feature dependency check failure: CPU%d has '%s' enabled but '%s' disabled. Kernel might be fine, but no guarantees.\n",
+ smp_processor_id(),
+ x86_feature_name(d->feature, feature_buf),
+ x86_feature_name(d->depends, depends_buf));
+ }
+ }
+}
diff --git a/arch/x86/kernel/cpu/debugfs.c b/arch/x86/kernel/cpu/debugfs.c
index cacfd3f6abef..1976fef2dfe5 100644
--- a/arch/x86/kernel/cpu/debugfs.c
+++ b/arch/x86/kernel/cpu/debugfs.c
@@ -16,8 +16,8 @@ static int cpu_debug_show(struct seq_file *m, void *p)
if (!c->initialized)
return 0;
- seq_printf(m, "initial_apicid: %x\n", c->topo.initial_apicid);
- seq_printf(m, "apicid: %x\n", c->topo.apicid);
+ seq_printf(m, "initial_apicid: 0x%x\n", c->topo.initial_apicid);
+ seq_printf(m, "apicid: 0x%x\n", c->topo.apicid);
seq_printf(m, "pkg_id: %u\n", c->topo.pkg_id);
seq_printf(m, "die_id: %u\n", c->topo.die_id);
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index c5191b06f9f2..6af4a4a90a52 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -240,26 +240,26 @@ static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
- tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
- tlb_lli_4k[ENTRIES] = ebx & mask;
+ tlb_lld_4k = (ebx >> 16) & mask;
+ tlb_lli_4k = ebx & mask;
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
if (!((eax >> 16) & mask))
- tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
+ tlb_lld_2m = (cpuid_eax(0x80000005) >> 16) & 0xff;
else
- tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
+ tlb_lld_2m = (eax >> 16) & mask;
/* a 4M entry uses two 2M entries */
- tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
+ tlb_lld_4m = tlb_lld_2m >> 1;
/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
if (!(eax & mask)) {
cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
- tlb_lli_2m[ENTRIES] = eax & 0xff;
+ tlb_lli_2m = eax & 0xff;
} else
- tlb_lli_2m[ENTRIES] = eax & mask;
+ tlb_lli_2m = eax & mask;
- tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
+ tlb_lli_4m = tlb_lli_2m >> 1;
}
static const struct cpu_dev hygon_cpu_dev = {
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3dce22f00dc3..cdc9813871ef 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1,40 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/pgtable.h>
-#include <linux/string.h>
#include <linux/bitops.h>
-#include <linux/smp.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/thread_info.h>
#include <linux/init.h>
-#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/minmax.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+
+#ifdef CONFIG_X86_64
+#include <linux/topology.h>
+#endif
-#include <asm/cpufeature.h>
-#include <asm/msr.h>
#include <asm/bugs.h>
+#include <asm/cpu_device_id.h>
+#include <asm/cpufeature.h>
#include <asm/cpu.h>
+#include <asm/hwcap2.h>
#include <asm/intel-family.h>
#include <asm/microcode.h>
-#include <asm/hwcap2.h>
-#include <asm/elf.h>
-#include <asm/cpu_device_id.h>
-#include <asm/resctrl.h>
+#include <asm/msr.h>
#include <asm/numa.h>
+#include <asm/resctrl.h>
#include <asm/thermal.h>
-
-#ifdef CONFIG_X86_64
-#include <linux/topology.h>
-#endif
+#include <asm/uaccess.h>
#include "cpu.h"
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#endif
-
/*
* Processors which have self-snooping capability can handle conflicting
* memory type across CPUs by snooping its own cache. However, there exists
@@ -195,7 +186,7 @@ void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
- if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
+ if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN)
return;
/*
@@ -210,10 +201,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
- if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
- (c->x86 == 0x6 && c->x86_model >= 0x0e))
- set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
c->microcode = intel_get_microcode_revision();
@@ -256,8 +243,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
#endif
/* CPUID workaround for 0F33/0F34 CPU */
- if (c->x86 == 0xF && c->x86_model == 0x3
- && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
+ if (c->x86_vfm == INTEL_P4_PRESCOTT &&
+ (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
c->x86_phys_bits = 36;
/*
@@ -266,10 +253,16 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*
* It is also reliable across cores and sockets. (but not across
* cabinets - we turn it off in that case explicitly.)
+ *
+ * Use a model-specific check for some older CPUs that have invariant
+ * TSC but may not report it architecturally via 8000_0007.
*/
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+ } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
+ (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
@@ -298,12 +291,19 @@ static void early_init_intel(struct cpuinfo_x86 *c)
clear_cpu_cap(c, X86_FEATURE_PAT);
/*
- * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
- * clear the fast string and enhanced fast string CPU capabilities.
+ * Modern CPUs are generally expected to have a sane fast string
+ * implementation. However, BIOSes typically have a knob to tweak
+ * the architectural MISC_ENABLE.FAST_STRING enable bit.
+ *
+ * Adhere to the preference and program the Linux-defined fast
+ * string flag and enhanced fast string capabilities accordingly.
*/
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
+ if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
- if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
+ if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
+ /* X86_FEATURE_ERMS is set based on CPUID */
+ set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+ } else {
pr_info("Disabled fast string operations\n");
setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
setup_clear_cpu_cap(X86_FEATURE_ERMS);
@@ -350,9 +350,7 @@ static void bsp_init_intel(struct cpuinfo_x86 *c)
int ppro_with_ram_bug(void)
{
/* Uses data from early_cpu_detect now */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 1 &&
+ if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
boot_cpu_data.x86_stepping < 8) {
pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
@@ -369,9 +367,8 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
/*
* Mask B, Pentium, but not Pentium MMX
*/
- if (c->x86 == 5 &&
- c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
- c->x86_model <= 3) {
+ if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX &&
+ c->x86_stepping >= 1 && c->x86_stepping <= 4) {
/*
* Remember we have B step Pentia with bugs
*/
@@ -398,7 +395,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* The Quark is also family 5, but does not have the same bug.
*/
clear_cpu_bug(c, X86_BUG_F00F);
- if (c->x86 == 5 && c->x86_model < 9) {
+ if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) {
static int f00f_workaround_enabled;
set_cpu_bug(c, X86_BUG_F00F);
@@ -413,7 +410,8 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
* model 3 mask 3
*/
- if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
+ if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) ||
+ c->x86_vfm < INTEL_PENTIUM_II_KLAMATH)
clear_cpu_cap(c, X86_FEATURE_SEP);
/*
@@ -431,7 +429,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* P4 Xeon erratum 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
- if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
+ if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) {
if (msr_set_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
pr_info("CPU: C0 stepping P4 Xeon detected.\n");
@@ -445,27 +443,20 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* integrated APIC (see 11AP erratum in "Pentium Processor
* Specification Update").
*/
- if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
+ if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 &&
(c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
set_cpu_bug(c, X86_BUG_11AP);
-
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
- * Set up the preferred alignment for movsl bulk memory moves
+ * MOVSL bulk memory moves can be slow when source and dest are not
+ * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment.
+ *
+ * Set the preferred alignment for Pentium Pro and newer processors, as
+ * it has only been tested on these.
*/
- switch (c->x86) {
- case 4: /* 486: untested */
- break;
- case 5: /* Old Pentia: untested */
- break;
- case 6: /* PII/PIII only like movsl with 8-byte alignment */
- movsl_mask.mask = 7;
- break;
- case 15: /* P4 is OK down to 8-byte alignment */
+ if (c->x86_vfm >= INTEL_PENTIUM_PRO)
movsl_mask.mask = 7;
- break;
- }
#endif
intel_smp_check(c);
@@ -521,6 +512,25 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
}
+/*
+ * This is a list of Intel CPUs that are known to suffer from downclocking when
+ * ZMM registers (512-bit vectors) are used. On these CPUs, when the kernel
+ * executes SIMD-optimized code such as cryptography functions or CRCs, it
+ * should prefer 256-bit (YMM) code to 512-bit (ZMM) code.
+ */
+static const struct x86_cpu_id zmm_exclusion_list[] = {
+ X86_MATCH_VFM(INTEL_SKYLAKE_X, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, 0),
+ /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
+ {},
+};
+
static void init_intel(struct cpuinfo_x86 *c)
{
early_init_intel(c);
@@ -563,8 +573,6 @@ static void init_intel(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
- if (c->x86 == 6)
- set_cpu_cap(c, X86_FEATURE_REP_GOOD);
#else
/*
* Names for the Pentium II/Celeron processors
@@ -601,6 +609,9 @@ static void init_intel(struct cpuinfo_x86 *c)
}
#endif
+ if (x86_match_cpu(zmm_exclusion_list))
+ set_cpu_cap(c, X86_FEATURE_PREFER_YMM);
+
/* Work around errata */
srat_detect_node(c);
@@ -622,83 +633,103 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
* to determine which, so we use a boottime override
* for the 512kb model, and assume 256 otherwise.
*/
- if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
+ if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0)
size = 256;
/*
* Intel Quark SoC X1000 contains a 4-way set associative
* 16K cache with a 16 byte cache line and 256 lines per tag
*/
- if ((c->x86 == 5) && (c->x86_model == 9))
+ if (c->x86_vfm == INTEL_QUARK_X1000)
size = 16;
return size;
}
#endif
-#define TLB_INST_4K 0x01
-#define TLB_INST_4M 0x02
-#define TLB_INST_2M_4M 0x03
+#define TLB_INST_4K 0x01
+#define TLB_INST_4M 0x02
+#define TLB_INST_2M_4M 0x03
-#define TLB_INST_ALL 0x05
-#define TLB_INST_1G 0x06
+#define TLB_INST_ALL 0x05
+#define TLB_INST_1G 0x06
-#define TLB_DATA_4K 0x11
-#define TLB_DATA_4M 0x12
-#define TLB_DATA_2M_4M 0x13
-#define TLB_DATA_4K_4M 0x14
+#define TLB_DATA_4K 0x11
+#define TLB_DATA_4M 0x12
+#define TLB_DATA_2M_4M 0x13
+#define TLB_DATA_4K_4M 0x14
-#define TLB_DATA_1G 0x16
+#define TLB_DATA_1G 0x16
+#define TLB_DATA_1G_2M_4M 0x17
-#define TLB_DATA0_4K 0x21
-#define TLB_DATA0_4M 0x22
-#define TLB_DATA0_2M_4M 0x23
+#define TLB_DATA0_4K 0x21
+#define TLB_DATA0_4M 0x22
+#define TLB_DATA0_2M_4M 0x23
-#define STLB_4K 0x41
-#define STLB_4K_2M 0x42
+#define STLB_4K 0x41
+#define STLB_4K_2M 0x42
+
+/*
+ * All of leaf 0x2's one-byte TLB descriptors implies the same number of
+ * entries for their respective TLB types. The 0x63 descriptor is an
+ * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
+ * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
+ * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
+ * intel_tlb_table[] mapping.
+ */
+#define TLB_0x63_2M_4M_ENTRIES 32
+
+struct _tlb_table {
+ unsigned char descriptor;
+ char tlb_type;
+ unsigned int entries;
+};
static const struct _tlb_table intel_tlb_table[] = {
- { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
- { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
- { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
- { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
- { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
- { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
- { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
- { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
- { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
- { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
- { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
- { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
- { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
- { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
- { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
- { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
- { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
- { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
- { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
- { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
- { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
- { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
- { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
- { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
- { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
- { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
- { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
- { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
- { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
- { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
- { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
- { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
- { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
- { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
- { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
- { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
+ { 0x01, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages, 4-way set associative */
+ { 0x02, TLB_INST_4M, 2}, /* TLB_INST 4 MByte pages, full associative */
+ { 0x03, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way set associative */
+ { 0x04, TLB_DATA_4M, 8}, /* TLB_DATA 4 MByte pages, 4-way set associative */
+ { 0x05, TLB_DATA_4M, 32}, /* TLB_DATA 4 MByte pages, 4-way set associative */
+ { 0x0b, TLB_INST_4M, 4}, /* TLB_INST 4 MByte pages, 4-way set associative */
+ { 0x4f, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages */
+ { 0x50, TLB_INST_ALL, 64}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ { 0x51, TLB_INST_ALL, 128}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ { 0x52, TLB_INST_ALL, 256}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
+ { 0x55, TLB_INST_2M_4M, 7}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
+ { 0x56, TLB_DATA0_4M, 16}, /* TLB_DATA0 4 MByte pages, 4-way set associative */
+ { 0x57, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, 4-way associative */
+ { 0x59, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, fully associative */
+ { 0x5a, TLB_DATA0_2M_4M, 32}, /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */
+ { 0x5b, TLB_DATA_4K_4M, 64}, /* TLB_DATA 4 KByte and 4 MByte pages */
+ { 0x5c, TLB_DATA_4K_4M, 128}, /* TLB_DATA 4 KByte and 4 MByte pages */
+ { 0x5d, TLB_DATA_4K_4M, 256}, /* TLB_DATA 4 KByte and 4 MByte pages */
+ { 0x61, TLB_INST_4K, 48}, /* TLB_INST 4 KByte pages, full associative */
+ { 0x63, TLB_DATA_1G_2M_4M, 4}, /* TLB_DATA 1 GByte pages, 4-way set associative
+ * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */
+ { 0x6b, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 8-way associative */
+ { 0x6c, TLB_DATA_2M_4M, 128}, /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */
+ { 0x6d, TLB_DATA_1G, 16}, /* TLB_DATA 1 GByte pages, fully associative */
+ { 0x76, TLB_INST_2M_4M, 8}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
+ { 0xb0, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 4-way set associative */
+ { 0xb1, TLB_INST_2M_4M, 4}, /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */
+ { 0xb2, TLB_INST_4K, 64}, /* TLB_INST 4KByte pages, 4-way set associative */
+ { 0xb3, TLB_DATA_4K, 128}, /* TLB_DATA 4 KByte pages, 4-way set associative */
+ { 0xb4, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 4-way associative */
+ { 0xb5, TLB_INST_4K, 64}, /* TLB_INST 4 KByte pages, 8-way set associative */
+ { 0xb6, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 8-way set associative */
+ { 0xba, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way associative */
+ { 0xc0, TLB_DATA_4K_4M, 8}, /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */
+ { 0xc1, STLB_4K_2M, 1024}, /* STLB 4 KByte and 2 MByte pages, 8-way associative */
+ { 0xc2, TLB_DATA_2M_4M, 16}, /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */
+ { 0xca, STLB_4K, 512}, /* STLB 4 KByte pages, 4-way associative */
{ 0x00, 0, 0 }
};
static void intel_tlb_lookup(const unsigned char desc)
{
+ unsigned int entries;
unsigned char k;
+
if (desc == 0)
return;
@@ -710,75 +741,58 @@ static void intel_tlb_lookup(const unsigned char desc)
if (intel_tlb_table[k].tlb_type == 0)
return;
+ entries = intel_tlb_table[k].entries;
switch (intel_tlb_table[k].tlb_type) {
case STLB_4K:
- if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_4k = max(tlb_lli_4k, entries);
+ tlb_lld_4k = max(tlb_lld_4k, entries);
break;
case STLB_4K_2M:
- if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_4k = max(tlb_lli_4k, entries);
+ tlb_lld_4k = max(tlb_lld_4k, entries);
+ tlb_lli_2m = max(tlb_lli_2m, entries);
+ tlb_lld_2m = max(tlb_lld_2m, entries);
+ tlb_lli_4m = max(tlb_lli_4m, entries);
+ tlb_lld_4m = max(tlb_lld_4m, entries);
break;
case TLB_INST_ALL:
- if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_4k = max(tlb_lli_4k, entries);
+ tlb_lli_2m = max(tlb_lli_2m, entries);
+ tlb_lli_4m = max(tlb_lli_4m, entries);
break;
case TLB_INST_4K:
- if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_4k = max(tlb_lli_4k, entries);
break;
case TLB_INST_4M:
- if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_4m = max(tlb_lli_4m, entries);
break;
case TLB_INST_2M_4M:
- if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lli_2m = max(tlb_lli_2m, entries);
+ tlb_lli_4m = max(tlb_lli_4m, entries);
break;
case TLB_DATA_4K:
case TLB_DATA0_4K:
- if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lld_4k = max(tlb_lld_4k, entries);
break;
case TLB_DATA_4M:
case TLB_DATA0_4M:
- if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lld_4m = max(tlb_lld_4m, entries);
break;
case TLB_DATA_2M_4M:
case TLB_DATA0_2M_4M:
- if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lld_2m = max(tlb_lld_2m, entries);
+ tlb_lld_4m = max(tlb_lld_4m, entries);
break;
case TLB_DATA_4K_4M:
- if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
- if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lld_4k = max(tlb_lld_4k, entries);
+ tlb_lld_4m = max(tlb_lld_4m, entries);
break;
+ case TLB_DATA_1G_2M_4M:
+ tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES);
+ tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES);
+ fallthrough;
case TLB_DATA_1G:
- if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
- tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
+ tlb_lld_1g = max(tlb_lld_1g, entries);
break;
}
}
@@ -799,7 +813,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c)
cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
/* If bit 31 is set, this is an unknown format */
- for (j = 0 ; j < 3 ; j++)
+ for (j = 0 ; j < 4 ; j++)
if (regs[j] & (1 << 31))
regs[j] = 0;
@@ -873,34 +887,3 @@ static const struct cpu_dev intel_cpu_dev = {
};
cpu_dev_register(intel_cpu_dev);
-
-#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
-
-/**
- * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
- *
- * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
- * a hybrid processor. If the processor is not hybrid, returns 0.
- */
-u8 get_this_hybrid_cpu_type(void)
-{
- if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
- return 0;
-
- return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
-}
-
-/**
- * get_this_hybrid_cpu_native_id() - Get the native id of this hybrid CPU
- *
- * Returns the uarch native ID [23:0] of a CPU in a hybrid processor.
- * If the processor is not hybrid, returns 0.
- */
-u32 get_this_hybrid_cpu_native_id(void)
-{
- if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
- return 0;
-
- return cpuid_eax(0x0000001a) &
- (BIT_ULL(X86_HYBRID_CPU_TYPE_ID_SHIFT) - 1);
-}
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index 4f3c65429f82..6af1e8baeb0f 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -6,6 +6,34 @@
#include <linux/slab.h>
/**
+ * x86_match_vendor_cpu_type - helper function to match the hardware defined
+ * cpu-type for a single entry in the x86_cpu_id
+ * table. Note, this function does not match the
+ * generic cpu-types TOPO_CPU_TYPE_EFFICIENCY and
+ * TOPO_CPU_TYPE_PERFORMANCE.
+ * @c: Pointer to the cpuinfo_x86 structure of the CPU to match.
+ * @m: Pointer to the x86_cpu_id entry to match against.
+ *
+ * Return: true if the cpu-type matches, false otherwise.
+ */
+static bool x86_match_vendor_cpu_type(struct cpuinfo_x86 *c, const struct x86_cpu_id *m)
+{
+ if (m->type == X86_CPU_TYPE_ANY)
+ return true;
+
+ /* Hybrid CPUs are special, they are assumed to match all cpu-types */
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ return true;
+
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+ return m->type == c->topo.intel_type;
+ if (c->x86_vendor == X86_VENDOR_AMD)
+ return m->type == c->topo.amd_type;
+
+ return false;
+}
+
+/**
* x86_match_cpu - match current CPU against an array of x86_cpu_ids
* @match: Pointer to array of x86_cpu_ids. Last entry terminated with
* {}.
@@ -50,6 +78,8 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
continue;
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
continue;
+ if (!x86_match_vendor_cpu_type(c, m))
+ continue;
return m;
}
return NULL;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 0dc00c9894c7..1f14c3308b6b 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -584,6 +584,28 @@ bool mce_is_correctable(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_correctable);
+/*
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+ * context.
+ */
+static bool mce_notify_irq(void)
+{
+ /* Not more than two messages every minute */
+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+
+ if (test_and_clear_bit(0, &mce_need_notify)) {
+ mce_work_trigger();
+
+ if (__ratelimit(&ratelimit))
+ pr_info(HW_ERR "Machine check events logged\n");
+
+ return true;
+ }
+
+ return false;
+}
+
static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
@@ -1773,28 +1795,6 @@ static void mce_timer_delete_all(void)
del_timer_sync(&per_cpu(mce_timer, cpu));
}
-/*
- * Notify the user(s) about new machine check events.
- * Can be called from interrupt context, but not from machine check/NMI
- * context.
- */
-bool mce_notify_irq(void)
-{
- /* Not more than two messages every minute */
- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-
- if (test_and_clear_bit(0, &mce_need_notify)) {
- mce_work_trigger();
-
- if (__ratelimit(&ratelimit))
- pr_info(HW_ERR "Machine check events logged\n");
-
- return true;
- }
- return false;
-}
-EXPORT_SYMBOL_GPL(mce_notify_irq);
-
static void __mcheck_cpu_mce_banks_init(void)
{
struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 313fe682db33..06e3cf7229ce 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -229,7 +229,6 @@ static int raise_local(void)
} else if (m->status) {
pr_info("Starting machine check poll CPU %d\n", cpu);
raise_poll(m);
- mce_notify_irq();
pr_info("Machine check poll done on CPU %d\n", cpu);
} else
m->finished = 0;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index a5dac7f3c0a0..138689b8e1d8 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -23,14 +23,18 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
+#include <linux/bsearch.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <crypto/sha2.h>
+
#include <asm/microcode.h>
#include <asm/processor.h>
+#include <asm/cmdline.h>
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
@@ -145,6 +149,113 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
*/
static u32 bsp_cpuid_1_eax __ro_after_init;
+static bool sha_check = true;
+
+struct patch_digest {
+ u32 patch_id;
+ u8 sha256[SHA256_DIGEST_SIZE];
+};
+
+#include "amd_shas.c"
+
+static int cmp_id(const void *key, const void *elem)
+{
+ struct patch_digest *pd = (struct patch_digest *)elem;
+ u32 patch_id = *(u32 *)key;
+
+ if (patch_id == pd->patch_id)
+ return 0;
+ else if (patch_id < pd->patch_id)
+ return -1;
+ else
+ return 1;
+}
+
+static bool need_sha_check(u32 cur_rev)
+{
+ switch (cur_rev >> 8) {
+ case 0x80012: return cur_rev <= 0x800126f; break;
+ case 0x80082: return cur_rev <= 0x800820f; break;
+ case 0x83010: return cur_rev <= 0x830107c; break;
+ case 0x86001: return cur_rev <= 0x860010e; break;
+ case 0x86081: return cur_rev <= 0x8608108; break;
+ case 0x87010: return cur_rev <= 0x8701034; break;
+ case 0x8a000: return cur_rev <= 0x8a0000a; break;
+ case 0xa0010: return cur_rev <= 0xa00107a; break;
+ case 0xa0011: return cur_rev <= 0xa0011da; break;
+ case 0xa0012: return cur_rev <= 0xa001243; break;
+ case 0xa0082: return cur_rev <= 0xa00820e; break;
+ case 0xa1011: return cur_rev <= 0xa101153; break;
+ case 0xa1012: return cur_rev <= 0xa10124e; break;
+ case 0xa1081: return cur_rev <= 0xa108109; break;
+ case 0xa2010: return cur_rev <= 0xa20102f; break;
+ case 0xa2012: return cur_rev <= 0xa201212; break;
+ case 0xa4041: return cur_rev <= 0xa404109; break;
+ case 0xa5000: return cur_rev <= 0xa500013; break;
+ case 0xa6012: return cur_rev <= 0xa60120a; break;
+ case 0xa7041: return cur_rev <= 0xa704109; break;
+ case 0xa7052: return cur_rev <= 0xa705208; break;
+ case 0xa7080: return cur_rev <= 0xa708009; break;
+ case 0xa70c0: return cur_rev <= 0xa70C009; break;
+ case 0xaa001: return cur_rev <= 0xaa00116; break;
+ case 0xaa002: return cur_rev <= 0xaa00218; break;
+ default: break;
+ }
+
+ pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
+ pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
+ return true;
+}
+
+static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
+{
+ struct patch_digest *pd = NULL;
+ u8 digest[SHA256_DIGEST_SIZE];
+ struct sha256_state s;
+ int i;
+
+ if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
+ x86_family(bsp_cpuid_1_eax) > 0x19)
+ return true;
+
+ if (!need_sha_check(cur_rev))
+ return true;
+
+ if (!sha_check)
+ return true;
+
+ pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
+ if (!pd) {
+ pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
+ return false;
+ }
+
+ sha256_init(&s);
+ sha256_update(&s, data, len);
+ sha256_final(&s, digest);
+
+ if (memcmp(digest, pd->sha256, sizeof(digest))) {
+ pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
+
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++)
+ pr_cont("0x%x ", digest[i]);
+ pr_info("\n");
+
+ return false;
+ }
+
+ return true;
+}
+
+static u32 get_patch_level(void)
+{
+ u32 rev, dummy __always_unused;
+
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
+ return rev;
+}
+
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
{
union zen_patch_rev p;
@@ -246,8 +357,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
-static bool
-__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
+static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;
@@ -484,10 +594,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
}
}
-static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
+static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
+ unsigned int psize)
{
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
- u32 rev, dummy;
+
+ if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
+ return -1;
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
@@ -505,47 +618,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize)
}
/* verify patch application was successful */
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
- if (rev != mc->hdr.patch_id)
+ *cur_rev = get_patch_level();
+ if (*cur_rev != mc->hdr.patch_id)
return false;
return true;
}
-/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
- *
- * Returns true if container found (sets @desc), false otherwise.
- */
-static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size)
-{
- struct cont_desc desc = { 0 };
- struct microcode_amd *mc;
-
- scan_containers(ucode, size, &desc);
-
- mc = desc.mc;
- if (!mc)
- return false;
-
- /*
- * Allow application of the same revision to pick up SMT-specific
- * changes even if the revision of the other SMT thread is already
- * up-to-date.
- */
- if (old_rev > mc->hdr.patch_id)
- return false;
-
- return __apply_microcode_amd(mc, desc.psize);
-}
-
static bool get_builtin_microcode(struct cpio_data *cp)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@@ -583,14 +662,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret)
return found;
}
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
{
+ struct cont_desc desc = { };
+ struct microcode_amd *mc;
struct cpio_data cp = { };
- u32 dummy;
+ char buf[4];
+ u32 rev;
+
+ if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
+ if (!strncmp(buf, "off", 3)) {
+ sha_check = false;
+ pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
+ }
bsp_cpuid_1_eax = cpuid_1_eax;
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
+ rev = get_patch_level();
+ ed->old_rev = rev;
/* Needed in load_microcode_amd() */
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
@@ -598,37 +698,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_
if (!find_blobs_in_containers(&cp))
return;
- if (early_apply_microcode(ed->old_rev, cp.data, cp.size))
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
-}
-
-static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size);
-
-static int __init save_microcode_in_initrd(void)
-{
- unsigned int cpuid_1_eax = native_cpuid_eax(1);
- struct cpuinfo_x86 *c = &boot_cpu_data;
- struct cont_desc desc = { 0 };
- enum ucode_state ret;
- struct cpio_data cp;
-
- if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
- return 0;
-
- if (!find_blobs_in_containers(&cp))
- return -EINVAL;
-
scan_containers(cp.data, cp.size, &desc);
- if (!desc.mc)
- return -EINVAL;
- ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
- if (ret > UCODE_UPDATED)
- return -EINVAL;
+ mc = desc.mc;
+ if (!mc)
+ return;
- return 0;
+ /*
+ * Allow application of the same revision to pick up SMT-specific
+ * changes even if the revision of the other SMT thread is already
+ * up-to-date.
+ */
+ if (ed->old_rev > mc->hdr.patch_id)
+ return;
+
+ if (__apply_microcode_amd(mc, &rev, desc.psize))
+ ed->new_rev = rev;
}
-early_initcall(save_microcode_in_initrd);
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
struct ucode_patch *n,
@@ -729,14 +815,9 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u32 rev, dummy __always_unused;
u16 equiv_id = 0;
- /* fetch rev if not populated yet: */
- if (!uci->cpu_sig.rev) {
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- uci->cpu_sig.rev = rev;
- }
+ uci->cpu_sig.rev = get_patch_level();
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
@@ -759,22 +840,20 @@ void reload_ucode_amd(unsigned int cpu)
mc = p->data;
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
+ rev = get_patch_level();
if (rev < mc->hdr.patch_id) {
- if (__apply_microcode_amd(mc, p->size))
- pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
+ if (__apply_microcode_amd(mc, &rev, p->size))
+ pr_info_once("reload revision: 0x%08x\n", rev);
}
}
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct ucode_patch *p;
csig->sig = cpuid_eax(0x00000001);
- csig->rev = c->microcode;
+ csig->rev = get_patch_level();
/*
* a patch could have been loaded early, set uci->mc so that
@@ -815,7 +894,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
goto out;
}
- if (!__apply_microcode_amd(mc_amd, p->size)) {
+ if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
@@ -937,8 +1016,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
}
/* Scan the blob in @data and add microcode patches to the cache. */
-static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
- size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
{
u8 *fw = (u8 *)data;
size_t offset;
@@ -996,7 +1074,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
if (ret != UCODE_OK)
return ret;
- for_each_node(nid) {
+ for_each_node_with_cpus(nid) {
cpu = cpumask_first(cpumask_of_node(nid));
c = &cpu_data(cpu);
@@ -1013,6 +1091,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
return ret;
}
+static int __init save_microcode_in_initrd(void)
+{
+ unsigned int cpuid_1_eax = native_cpuid_eax(1);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct cont_desc desc = { 0 };
+ enum ucode_state ret;
+ struct cpio_data cp;
+
+ if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
+ return 0;
+
+ if (!find_blobs_in_containers(&cp))
+ return -EINVAL;
+
+ scan_containers(cp.data, cp.size, &desc);
+ if (!desc.mc)
+ return -EINVAL;
+
+ ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+ if (ret > UCODE_UPDATED)
+ return -EINVAL;
+
+ return 0;
+}
+early_initcall(save_microcode_in_initrd);
+
/*
* AMD microcode firmware naming convention, up to family 15h they are in
* the legacy file:
diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c
new file mode 100644
index 000000000000..2a1655b1fdd8
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
@@ -0,0 +1,444 @@
+/* Keep 'em sorted. */
+static const struct patch_digest phashes[] = {
+ { 0x8001227, {
+ 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
+ 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
+ 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
+ 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
+ }
+ },
+ { 0x8001250, {
+ 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
+ 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
+ 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
+ 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
+ }
+ },
+ { 0x800126e, {
+ 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
+ 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
+ 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
+ 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
+ }
+ },
+ { 0x800126f, {
+ 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
+ 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
+ 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
+ 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
+ }
+ },
+ { 0x800820d, {
+ 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
+ 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
+ 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
+ 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
+ }
+ },
+ { 0x8301025, {
+ 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
+ 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
+ 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
+ 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
+ }
+ },
+ { 0x8301055, {
+ 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
+ 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
+ 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
+ 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
+ }
+ },
+ { 0x8301072, {
+ 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
+ 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
+ 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
+ 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
+ }
+ },
+ { 0x830107a, {
+ 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
+ 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
+ 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
+ 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
+ }
+ },
+ { 0x830107b, {
+ 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
+ 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
+ 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
+ 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
+ }
+ },
+ { 0x830107c, {
+ 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
+ 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
+ 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
+ 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
+ }
+ },
+ { 0x860010d, {
+ 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
+ 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
+ 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
+ 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
+ }
+ },
+ { 0x8608108, {
+ 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
+ 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
+ 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
+ 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
+ }
+ },
+ { 0x8701034, {
+ 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
+ 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
+ 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
+ 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
+ }
+ },
+ { 0x8a00008, {
+ 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
+ 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
+ 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
+ 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
+ }
+ },
+ { 0x8a0000a, {
+ 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
+ 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
+ 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
+ 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
+ }
+ },
+ { 0xa00104c, {
+ 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
+ 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
+ 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
+ 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
+ }
+ },
+ { 0xa00104e, {
+ 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
+ 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
+ 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
+ 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
+ }
+ },
+ { 0xa001053, {
+ 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
+ 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
+ 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
+ 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
+ }
+ },
+ { 0xa001058, {
+ 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
+ 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
+ 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
+ 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
+ }
+ },
+ { 0xa001075, {
+ 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
+ 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
+ 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
+ 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
+ }
+ },
+ { 0xa001078, {
+ 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
+ 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
+ 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
+ 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
+ }
+ },
+ { 0xa001079, {
+ 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
+ 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
+ 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
+ 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
+ }
+ },
+ { 0xa00107a, {
+ 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
+ 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
+ 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
+ 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
+ }
+ },
+ { 0xa001143, {
+ 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
+ 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
+ 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
+ 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
+ }
+ },
+ { 0xa001144, {
+ 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
+ 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
+ 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
+ 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
+ }
+ },
+ { 0xa00115d, {
+ 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
+ 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
+ 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
+ 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
+ }
+ },
+ { 0xa001173, {
+ 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
+ 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
+ 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
+ 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
+ }
+ },
+ { 0xa0011a8, {
+ 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
+ 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
+ 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
+ 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
+ }
+ },
+ { 0xa0011ce, {
+ 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
+ 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
+ 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
+ 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
+ }
+ },
+ { 0xa0011d1, {
+ 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
+ 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
+ 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
+ 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
+ }
+ },
+ { 0xa0011d3, {
+ 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
+ 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
+ 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
+ 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
+ }
+ },
+ { 0xa0011d5, {
+ 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
+ 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
+ 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
+ 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
+ }
+ },
+ { 0xa001223, {
+ 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
+ 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
+ 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
+ 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
+ }
+ },
+ { 0xa001224, {
+ 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
+ 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
+ 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
+ 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
+ }
+ },
+ { 0xa001227, {
+ 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
+ 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
+ 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
+ 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
+ }
+ },
+ { 0xa001229, {
+ 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
+ 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
+ 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
+ 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
+ }
+ },
+ { 0xa00122e, {
+ 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
+ 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
+ 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
+ 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
+ }
+ },
+ { 0xa001231, {
+ 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
+ 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
+ 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
+ 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
+ }
+ },
+ { 0xa001234, {
+ 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
+ 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
+ 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
+ 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
+ }
+ },
+ { 0xa001236, {
+ 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
+ 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
+ 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
+ 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
+ }
+ },
+ { 0xa001238, {
+ 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
+ 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
+ 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
+ 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
+ }
+ },
+ { 0xa00820c, {
+ 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
+ 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
+ 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
+ 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
+ }
+ },
+ { 0xa10113e, {
+ 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
+ 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
+ 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
+ 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
+ }
+ },
+ { 0xa101144, {
+ 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
+ 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
+ 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
+ 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
+ }
+ },
+ { 0xa101148, {
+ 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
+ 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
+ 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
+ 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
+ }
+ },
+ { 0xa10123e, {
+ 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
+ 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
+ 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
+ 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
+ }
+ },
+ { 0xa101244, {
+ 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
+ 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
+ 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
+ 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
+ }
+ },
+ { 0xa101248, {
+ 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
+ 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
+ 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
+ 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
+ }
+ },
+ { 0xa108108, {
+ 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
+ 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
+ 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
+ 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
+ }
+ },
+ { 0xa20102d, {
+ 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
+ 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
+ 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
+ 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
+ }
+ },
+ { 0xa201210, {
+ 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
+ 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
+ 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
+ 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
+ }
+ },
+ { 0xa404107, {
+ 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
+ 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
+ 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
+ 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
+ }
+ },
+ { 0xa500011, {
+ 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
+ 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
+ 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
+ 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
+ }
+ },
+ { 0xa601209, {
+ 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
+ 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
+ 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
+ 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
+ }
+ },
+ { 0xa704107, {
+ 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
+ 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
+ 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
+ 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
+ }
+ },
+ { 0xa705206, {
+ 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
+ 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
+ 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
+ 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
+ }
+ },
+ { 0xa708007, {
+ 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
+ 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
+ 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
+ 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
+ }
+ },
+ { 0xa70c005, {
+ 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
+ 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
+ 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
+ 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
+ }
+ },
+ { 0xaa00116, {
+ 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
+ 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
+ 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
+ 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
+ }
+ },
+ { 0xaa00212, {
+ 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
+ 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
+ 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
+ 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
+ }
+ },
+ { 0xaa00213, {
+ 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
+ 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
+ 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
+ 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
+ }
+ },
+ { 0xaa00215, {
+ 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
+ 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
+ 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
+ 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
+ }
+ },
+};
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index f3d534807d91..819199bc0119 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -74,7 +74,7 @@ void intel_collect_cpu_info(struct cpu_signature *sig)
sig->pf = 0;
sig->rev = intel_get_microcode_revision();
- if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) {
+ if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) {
unsigned int val[2];
/* get processor flags from MSR 0x17 */
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
index 21776c529fa9..5df621752fef 100644
--- a/arch/x86/kernel/cpu/microcode/internal.h
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -100,14 +100,12 @@ extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
void load_ucode_amd_ap(unsigned int family);
-int save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(unsigned int cpu);
struct microcode_ops *init_amd_microcode(void);
void exit_amd_microcode(void);
#else /* CONFIG_CPU_SUP_AMD */
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
static inline void load_ucode_amd_ap(unsigned int family) { }
-static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
static inline void reload_ucode_amd(unsigned int cpu) { }
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
static inline void exit_amd_microcode(void) { }
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index f285757618fc..3e2533954675 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -33,8 +33,6 @@
#include <asm/numa.h>
#include <asm/svm.h>
-/* Is Linux running as the root partition? */
-bool hv_root_partition;
/* Is Linux running on nested Microsoft Hypervisor */
bool hv_nested;
struct ms_hyperv_info ms_hyperv;
@@ -109,6 +107,7 @@ void hv_set_msr(unsigned int reg, u64 value)
}
EXPORT_SYMBOL_GPL(hv_set_msr);
+static void (*mshv_handler)(void);
static void (*vmbus_handler)(void);
static void (*hv_stimer0_handler)(void);
static void (*hv_kexec_handler)(void);
@@ -119,6 +118,9 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
struct pt_regs *old_regs = set_irq_regs(regs);
inc_irq_stat(irq_hv_callback_count);
+ if (mshv_handler)
+ mshv_handler();
+
if (vmbus_handler)
vmbus_handler();
@@ -128,6 +130,11 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
set_irq_regs(old_regs);
}
+void hv_setup_mshv_handler(void (*handler)(void))
+{
+ mshv_handler = handler;
+}
+
void hv_setup_vmbus_handler(void (*handler)(void))
{
vmbus_handler = handler;
@@ -422,6 +429,7 @@ int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
return 0;
}
+EXPORT_SYMBOL_GPL(hv_get_hypervisor_version);
static void __init ms_hyperv_init_platform(void)
{
@@ -436,13 +444,15 @@ static void __init ms_hyperv_init_platform(void)
*/
ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
+ ms_hyperv.ext_features = cpuid_ecx(HYPERV_CPUID_FEATURES);
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
- pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
- ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
+ pr_info("Hyper-V: privilege flags low %#x, high %#x, ext %#x, hints %#x, misc %#x\n",
+ ms_hyperv.features, ms_hyperv.priv_high,
+ ms_hyperv.ext_features, ms_hyperv.hints,
ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
@@ -451,25 +461,7 @@ static void __init ms_hyperv_init_platform(void)
pr_debug("Hyper-V: max %u virtual processors, %u logical processors\n",
ms_hyperv.max_vp_index, ms_hyperv.max_lp_index);
- /*
- * Check CPU management privilege.
- *
- * To mirror what Windows does we should extract CPU management
- * features and use the ReservedIdentityBit to detect if Linux is the
- * root partition. But that requires negotiating CPU management
- * interface (a process to be finalized). For now, use the privilege
- * flag as the indicator for running as root.
- *
- * Hyper-V should never specify running as root and as a Confidential
- * VM. But to protect against a compromised/malicious Hyper-V trying
- * to exploit root behavior to expose Confidential VM memory, ignore
- * the root partition setting if also a Confidential VM.
- */
- if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
- !(ms_hyperv.priv_high & HV_ISOLATION)) {
- hv_root_partition = true;
- pr_info("Hyper-V: running as root partition\n");
- }
+ hv_identify_partition_type();
if (ms_hyperv.hints & HV_X64_HYPERV_NESTED) {
hv_nested = true;
@@ -618,7 +610,7 @@ static void __init ms_hyperv_init_platform(void)
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
- if (hv_root_partition ||
+ if (hv_root_partition() ||
(!ms_hyperv.paravisor_present && hv_isolation_type_snp()))
smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
# endif
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 2fdfda2b60e4..e2c6b471d230 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -9,9 +9,11 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/cc_platform.h>
+#include <linux/string_choices.h>
#include <asm/processor-flags.h>
#include <asm/cacheinfo.h>
#include <asm/cpufeature.h>
+#include <asm/cpu_device_id.h>
#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
#include <asm/tlbflush.h>
@@ -646,10 +648,10 @@ static void __init print_mtrr_state(void)
pr_info("MTRR default type: %s\n",
mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
- pr_info("MTRR fixed ranges %sabled:\n",
- ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
- (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
- "en" : "dis");
+ pr_info("MTRR fixed ranges %s:\n",
+ str_enabled_disabled(
+ (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+ (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)));
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
print_fixed(0x80000 + i * 0x20000, 0x04000,
@@ -661,8 +663,8 @@ static void __init print_mtrr_state(void)
/* tail */
print_fixed_last();
}
- pr_info("MTRR variable ranges %sabled:\n",
- mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
+ pr_info("MTRR variable ranges %s:\n",
+ str_enabled_disabled(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED));
high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
@@ -1025,8 +1027,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
* For Intel PPro stepping <= 7
* must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
*/
- if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model == 1 &&
+ if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
boot_cpu_data.x86_stepping <= 7) {
if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index a5c506f6da7f..4049235b1bfe 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -99,7 +99,6 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
char *ptr;
char line[LINE_SIZE];
int length;
- size_t linelen;
memset(line, 0, LINE_SIZE);
@@ -108,9 +107,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
if (length < 0)
return length;
- linelen = strlen(line);
- ptr = line + linelen - 1;
- if (linelen && *ptr == '\n')
+ ptr = line + length - 1;
+ if (length && *ptr == '\n')
*ptr = '\0';
if (!strncmp(line, "disable=", 8)) {
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 41ed01f46bd9..6571d432cbe3 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -86,9 +86,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = arch_freq_get_on_cpu(cpu);
+ int freq = arch_freq_get_on_cpu(cpu);
- seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, (freq % 1000));
+ if (freq < 0)
+ seq_puts(m, "cpu MHz\t\t: Unknown\n");
+ else
+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, (freq % 1000));
}
/* Cache size */
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 4a06c37b9cf1..0c13b0befd8a 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
-obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o
+obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o
+obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o
+obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 3d1735ed8d1f..cf29681d01e0 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -44,12 +44,6 @@ static DEFINE_MUTEX(domain_list_lock);
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
/*
- * Used to store the max resource name width and max resource data width
- * to display the schemata in a tabular format
- */
-int max_name_width, max_data_width;
-
-/*
* Global boolean for rdt_alloc which is true if any
* resource allocation is enabled.
*/
@@ -62,7 +56,7 @@ static void mba_wrmsr_amd(struct msr_param *m);
#define ctrl_domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.ctrl_domains)
#define mon_domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.mon_domains)
-struct rdt_hw_resource rdt_resources_all[] = {
+struct rdt_hw_resource rdt_resources_all[RDT_NUM_RESOURCES] = {
[RDT_RESOURCE_L3] =
{
.r_resctrl = {
@@ -72,9 +66,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.mon_scope = RESCTRL_L3_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_L3),
.mon_domains = mon_domain_init(RDT_RESOURCE_L3),
- .parse_ctrlval = parse_cbm,
- .format_str = "%d=%0*x",
- .fflags = RFTYPE_RES_CACHE,
+ .schema_fmt = RESCTRL_SCHEMA_BITMAP,
},
.msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
@@ -86,9 +78,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.name = "L2",
.ctrl_scope = RESCTRL_L2_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_L2),
- .parse_ctrlval = parse_cbm,
- .format_str = "%d=%0*x",
- .fflags = RFTYPE_RES_CACHE,
+ .schema_fmt = RESCTRL_SCHEMA_BITMAP,
},
.msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
@@ -100,9 +90,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.name = "MB",
.ctrl_scope = RESCTRL_L3_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_MBA),
- .parse_ctrlval = parse_bw,
- .format_str = "%d=%*u",
- .fflags = RFTYPE_RES_MB,
+ .schema_fmt = RESCTRL_SCHEMA_RANGE,
},
},
[RDT_RESOURCE_SMBA] =
@@ -112,9 +100,7 @@ struct rdt_hw_resource rdt_resources_all[] = {
.name = "SMBA",
.ctrl_scope = RESCTRL_L3_CACHE,
.ctrl_domains = ctrl_domain_init(RDT_RESOURCE_SMBA),
- .parse_ctrlval = parse_bw,
- .format_str = "%d=%*u",
- .fflags = RFTYPE_RES_MB,
+ .schema_fmt = RESCTRL_SCHEMA_RANGE,
},
},
};
@@ -127,6 +113,14 @@ u32 resctrl_arch_system_num_rmid_idx(void)
return r->num_rmid;
}
+struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l)
+{
+ if (l >= RDT_NUM_RESOURCES)
+ return NULL;
+
+ return &rdt_resources_all[l].r_resctrl;
+}
+
/*
* cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
* as they do not have CPUID enumeration support for Cache allocation.
@@ -161,7 +155,6 @@ static inline void cache_alloc_hsw_probe(void)
return;
hw_res->num_closid = 4;
- r->default_ctrl = max_cbm;
r->cache.cbm_len = 20;
r->cache.shareable_bits = 0xc0000;
r->cache.min_cbm_bits = 2;
@@ -174,7 +167,7 @@ static inline void cache_alloc_hsw_probe(void)
bool is_mba_sc(struct rdt_resource *r)
{
if (!r)
- return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
+ r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
/*
* The software controller support is only applicable to MBA resource.
@@ -217,7 +210,7 @@ static __init bool __get_mem_config_intel(struct rdt_resource *r)
cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
hw_res->num_closid = edx.split.cos_max + 1;
max_delay = eax.split.max_delay + 1;
- r->default_ctrl = MAX_MBA_BW;
+ r->membw.max_bw = MAX_MBA_BW;
r->membw.arch_needs_linear = true;
if (ecx & MBA_IS_LINEAR) {
r->membw.delay_linear = true;
@@ -228,16 +221,12 @@ static __init bool __get_mem_config_intel(struct rdt_resource *r)
return false;
r->membw.arch_needs_linear = false;
}
- r->data_width = 3;
if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
else
r->membw.throttle_mode = THREAD_THROTTLE_MAX;
- resctrl_file_fflags_init("thread_throttle_mode",
- RFTYPE_CTRL_INFO | RFTYPE_RES_MB);
-
r->alloc_capable = true;
return true;
@@ -256,7 +245,7 @@ static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
cpuid_count(0x80000020, subleaf, &eax, &ebx, &ecx, &edx);
hw_res->num_closid = edx + 1;
- r->default_ctrl = 1 << eax;
+ r->membw.max_bw = 1 << eax;
/* AMD does not use delay */
r->membw.delay_linear = false;
@@ -269,8 +258,6 @@ static __init bool __rdt_get_mem_config_amd(struct rdt_resource *r)
r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
r->membw.min_bw = 0;
r->membw.bw_gran = 1;
- /* Max value is 2048, Data width should be 4 in decimal */
- r->data_width = 4;
r->alloc_capable = true;
@@ -283,14 +270,13 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
union cpuid_0x10_1_eax eax;
union cpuid_0x10_x_ecx ecx;
union cpuid_0x10_x_edx edx;
- u32 ebx;
+ u32 ebx, default_ctrl;
cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full);
hw_res->num_closid = edx.split.cos_max + 1;
r->cache.cbm_len = eax.split.cbm_len + 1;
- r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
- r->cache.shareable_bits = ebx & r->default_ctrl;
- r->data_width = (r->cache.cbm_len + 3) / 4;
+ default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->cache.shareable_bits = ebx & default_ctrl;
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
r->cache.arch_has_sparse_bitmasks = ecx.split.noncont;
r->alloc_capable = true;
@@ -337,7 +323,7 @@ static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
return MAX_MBA_BW - bw;
pr_warn_once("Non Linear delay-bw map not supported but queried\n");
- return r->default_ctrl;
+ return MAX_MBA_BW;
}
static void mba_wrmsr_intel(struct msr_param *m)
@@ -361,36 +347,6 @@ static void cat_wrmsr(struct msr_param *m)
wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
}
-struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu, struct rdt_resource *r)
-{
- struct rdt_ctrl_domain *d;
-
- lockdep_assert_cpus_held();
-
- list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
- /* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
- return d;
- }
-
- return NULL;
-}
-
-struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, struct rdt_resource *r)
-{
- struct rdt_mon_domain *d;
-
- lockdep_assert_cpus_held();
-
- list_for_each_entry(d, &r->mon_domains, hdr.list) {
- /* Find the domain that contains this CPU */
- if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
- return d;
- }
-
- return NULL;
-}
-
u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
{
return resctrl_to_arch_res(r)->num_closid;
@@ -405,36 +361,6 @@ void rdt_ctrl_update(void *arg)
hw_res->msr_update(m);
}
-/*
- * rdt_find_domain - Search for a domain id in a resource domain list.
- *
- * Search the domain list to find the domain id. If the domain id is
- * found, return the domain. NULL otherwise. If the domain id is not
- * found (and NULL returned) then the first domain with id bigger than
- * the input id can be returned to the caller via @pos.
- */
-struct rdt_domain_hdr *rdt_find_domain(struct list_head *h, int id,
- struct list_head **pos)
-{
- struct rdt_domain_hdr *d;
- struct list_head *l;
-
- list_for_each(l, h) {
- d = list_entry(l, struct rdt_domain_hdr, list);
- /* When id is found, return its domain. */
- if (id == d->id)
- return d;
- /* Stop searching when finding id's position in sorted list. */
- if (id < d->id)
- break;
- }
-
- if (pos)
- *pos = l;
-
- return NULL;
-}
-
static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
@@ -446,7 +372,7 @@ static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc)
* For Memory Allocation: Set b/w requested to 100%
*/
for (i = 0; i < hw_res->num_closid; i++, dc++)
- *dc = r->default_ctrl;
+ *dc = resctrl_get_default_ctrl(r);
}
static void ctrl_domain_free(struct rdt_hw_ctrl_domain *hw_dom)
@@ -494,13 +420,13 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_mon_domain *hw_dom)
{
size_t tsize;
- if (is_mbm_total_enabled()) {
+ if (resctrl_arch_is_mbm_total_enabled()) {
tsize = sizeof(*hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_total)
return -ENOMEM;
}
- if (is_mbm_local_enabled()) {
+ if (resctrl_arch_is_mbm_local_enabled()) {
tsize = sizeof(*hw_dom->arch_mbm_local);
hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL);
if (!hw_dom->arch_mbm_local) {
@@ -545,7 +471,7 @@ static void domain_add_cpu_ctrl(int cpu, struct rdt_resource *r)
return;
}
- hdr = rdt_find_domain(&r->ctrl_domains, id, &add_pos);
+ hdr = resctrl_find_domain(&r->ctrl_domains, id, &add_pos);
if (hdr) {
if (WARN_ON_ONCE(hdr->type != RESCTRL_CTRL_DOMAIN))
return;
@@ -600,7 +526,7 @@ static void domain_add_cpu_mon(int cpu, struct rdt_resource *r)
return;
}
- hdr = rdt_find_domain(&r->mon_domains, id, &add_pos);
+ hdr = resctrl_find_domain(&r->mon_domains, id, &add_pos);
if (hdr) {
if (WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN))
return;
@@ -665,7 +591,7 @@ static void domain_remove_cpu_ctrl(int cpu, struct rdt_resource *r)
return;
}
- hdr = rdt_find_domain(&r->ctrl_domains, id, NULL);
+ hdr = resctrl_find_domain(&r->ctrl_domains, id, NULL);
if (!hdr) {
pr_warn("Can't find control domain for id=%d for CPU %d for resource %s\n",
id, cpu, r->name);
@@ -711,7 +637,7 @@ static void domain_remove_cpu_mon(int cpu, struct rdt_resource *r)
return;
}
- hdr = rdt_find_domain(&r->mon_domains, id, NULL);
+ hdr = resctrl_find_domain(&r->mon_domains, id, NULL);
if (!hdr) {
pr_warn("Can't find monitor domain for id=%d for CPU %d for resource %s\n",
id, cpu, r->name);
@@ -786,20 +712,6 @@ static int resctrl_arch_offline_cpu(unsigned int cpu)
return 0;
}
-/*
- * Choose a width for the resource name and resource data based on the
- * resource that has widest name and cbm.
- */
-static __init void rdt_init_padding(void)
-{
- struct rdt_resource *r;
-
- for_each_alloc_capable_rdt_resource(r) {
- if (r->data_width > max_data_width)
- max_data_width = r->data_width;
- }
-}
-
enum {
RDT_FLAG_CMT,
RDT_FLAG_MBM_TOTAL,
@@ -885,6 +797,21 @@ bool __init rdt_cpu_has(int flag)
return ret;
}
+__init bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt)
+{
+ if (!rdt_cpu_has(X86_FEATURE_BMEC))
+ return false;
+
+ switch (evt) {
+ case QOS_L3_MBM_TOTAL_EVENT_ID:
+ return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL);
+ case QOS_L3_MBM_LOCAL_EVENT_ID:
+ return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL);
+ default:
+ return false;
+ }
+}
+
static __init bool get_mem_config(void)
{
struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
@@ -963,11 +890,6 @@ static __init bool get_rdt_mon_resources(void)
if (!rdt_mon_features)
return false;
- if (is_mbm_local_enabled())
- mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
- else if (is_mbm_total_enabled())
- mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
-
return !rdt_get_mon_l3_config(r);
}
@@ -1086,7 +1008,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
}
}
-static int __init resctrl_late_init(void)
+static int __init resctrl_arch_late_init(void)
{
struct rdt_resource *r;
int state, ret;
@@ -1102,8 +1024,6 @@ static int __init resctrl_late_init(void)
if (!get_rdt_resources())
return -ENODEV;
- rdt_init_padding();
-
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/resctrl/cat:online:",
resctrl_arch_online_cpu,
@@ -1111,7 +1031,7 @@ static int __init resctrl_late_init(void)
if (state < 0)
return state;
- ret = rdtgroup_init();
+ ret = resctrl_init();
if (ret) {
cpuhp_remove_state(state);
return ret;
@@ -1127,18 +1047,13 @@ static int __init resctrl_late_init(void)
return 0;
}
-late_initcall(resctrl_late_init);
+late_initcall(resctrl_arch_late_init);
-static void __exit resctrl_exit(void)
+static void __exit resctrl_arch_exit(void)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
-
cpuhp_remove_state(rdt_online);
- rdtgroup_exit();
-
- if (r->mon_capable)
- rdt_put_mon_l3_config();
+ resctrl_exit();
}
-__exitcall(resctrl_exit);
+__exitcall(resctrl_arch_exit);
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 536351159cc2..0a0ac5f6112e 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -23,6 +23,15 @@
#include "internal.h"
+struct rdt_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
+typedef int (ctrlval_parser_t)(struct rdt_parse_data *data,
+ struct resctrl_schema *s,
+ struct rdt_ctrl_domain *d);
+
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and max bandwidth values specified by the
@@ -54,9 +63,9 @@ static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
- if (bw < r->membw.min_bw || bw > r->default_ctrl) {
+ if (bw < r->membw.min_bw || bw > r->membw.max_bw) {
rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n",
- bw, r->membw.min_bw, r->default_ctrl);
+ bw, r->membw.min_bw, r->membw.max_bw);
return false;
}
@@ -64,8 +73,8 @@ static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
-int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d)
+static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
+ struct rdt_ctrl_domain *d)
{
struct resctrl_staged_config *cfg;
u32 closid = data->rdtgrp->closid;
@@ -104,8 +113,9 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
*/
static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
{
- unsigned long first_bit, zero_bit, val;
+ u32 supported_bits = BIT_MASK(r->cache.cbm_len) - 1;
unsigned int cbm_len = r->cache.cbm_len;
+ unsigned long first_bit, zero_bit, val;
int ret;
ret = kstrtoul(buf, 16, &val);
@@ -114,7 +124,7 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return false;
}
- if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) {
+ if ((r->cache.min_cbm_bits > 0 && val == 0) || val > supported_bits) {
rdt_last_cmd_puts("Mask out of range\n");
return false;
}
@@ -143,8 +153,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d)
+static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
+ struct rdt_ctrl_domain *d)
{
struct rdtgroup *rdtgrp = data->rdtgrp;
struct resctrl_staged_config *cfg;
@@ -210,6 +220,7 @@ static int parse_line(char *line, struct resctrl_schema *s,
struct rdtgroup *rdtgrp)
{
enum resctrl_conf_type t = s->conf_type;
+ ctrlval_parser_t *parse_ctrlval = NULL;
struct resctrl_staged_config *cfg;
struct rdt_resource *r = s->res;
struct rdt_parse_data data;
@@ -220,6 +231,18 @@ static int parse_line(char *line, struct resctrl_schema *s,
/* Walking r->domains, ensure it can't race with cpuhp */
lockdep_assert_cpus_held();
+ switch (r->schema_fmt) {
+ case RESCTRL_SCHEMA_BITMAP:
+ parse_ctrlval = &parse_cbm;
+ break;
+ case RESCTRL_SCHEMA_RANGE:
+ parse_ctrlval = &parse_bw;
+ break;
+ }
+
+ if (WARN_ON_ONCE(!parse_ctrlval))
+ return -EINVAL;
+
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
(r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) {
rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
@@ -240,7 +263,7 @@ next:
if (d->hdr.id == dom_id) {
data.buf = dom;
data.rdtgrp = rdtgrp;
- if (r->parse_ctrlval(&data, s, d))
+ if (parse_ctrlval(&data, s, d))
return -EINVAL;
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
cfg = &d->staged_config[t];
@@ -264,25 +287,12 @@ next:
return -EINVAL;
}
-static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
-{
- switch (type) {
- default:
- case CDP_NONE:
- return closid;
- case CDP_CODE:
- return closid * 2 + 1;
- case CDP_DATA:
- return closid * 2;
- }
-}
-
int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type t, u32 cfg_val)
{
struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- u32 idx = get_config_index(closid, t);
+ u32 idx = resctrl_get_config_index(closid, t);
struct msr_param msr_param;
if (!cpumask_test_cpu(smp_processor_id(), &d->hdr.cpu_mask))
@@ -319,7 +329,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
if (!cfg->have_new_ctrl)
continue;
- idx = get_config_index(closid, t);
+ idx = resctrl_get_config_index(closid, t);
if (cfg->new_ctrl == hw_dom->ctrl_val[idx])
continue;
hw_dom->ctrl_val[idx] = cfg->new_ctrl;
@@ -439,7 +449,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
u32 closid, enum resctrl_conf_type type)
{
struct rdt_hw_ctrl_domain *hw_dom = resctrl_to_arch_ctrl_dom(d);
- u32 idx = get_config_index(closid, type);
+ u32 idx = resctrl_get_config_index(closid, type);
return hw_dom->ctrl_val[idx];
}
@@ -465,8 +475,7 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo
ctrl_val = resctrl_arch_get_config(r, dom, closid,
schema->conf_type);
- seq_printf(s, r->format_str, dom->hdr.id, max_data_width,
- ctrl_val);
+ seq_printf(s, schema->fmt_str, dom->hdr.id, ctrl_val);
sep = true;
}
seq_puts(s, "\n");
@@ -537,12 +546,12 @@ ssize_t rdtgroup_mba_mbps_event_write(struct kernfs_open_file *of,
rdt_last_cmd_clear();
if (!strcmp(buf, "mbm_local_bytes")) {
- if (is_mbm_local_enabled())
+ if (resctrl_arch_is_mbm_local_enabled())
rdtgrp->mba_mbps_event = QOS_L3_MBM_LOCAL_EVENT_ID;
else
ret = -EINVAL;
} else if (!strcmp(buf, "mbm_total_bytes")) {
- if (is_mbm_total_enabled())
+ if (resctrl_arch_is_mbm_total_enabled())
rdtgrp->mba_mbps_event = QOS_L3_MBM_TOTAL_EVENT_ID;
else
ret = -EINVAL;
@@ -588,6 +597,28 @@ int rdtgroup_mba_mbps_event_show(struct kernfs_open_file *of,
return ret;
}
+struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
+ struct list_head **pos)
+{
+ struct rdt_domain_hdr *d;
+ struct list_head *l;
+
+ list_for_each(l, h) {
+ d = list_entry(l, struct rdt_domain_hdr, list);
+ /* When id is found, return its domain. */
+ if (id == d->id)
+ return d;
+ /* Stop searching when finding id's position in sorted list. */
+ if (id < d->id)
+ break;
+ }
+
+ if (pos)
+ *pos = l;
+
+ return NULL;
+}
+
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
cpumask_t *cpumask, int evtid, int first)
@@ -649,7 +680,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
resid = md.u.rid;
domid = md.u.domid;
evtid = md.u.evtid;
- r = &rdt_resources_all[resid].r_resctrl;
+ r = resctrl_arch_get_resource(resid);
if (md.u.sum) {
/*
@@ -673,7 +704,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
* This file provides data from a single domain. Search
* the resource to find the domain with "domid".
*/
- hdr = rdt_find_domain(&r->mon_domains, domid, NULL);
+ hdr = resctrl_find_domain(&r->mon_domains, domid, NULL);
if (!hdr || WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) {
ret = -ENOENT;
goto out;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 20c898f09b7e..c44c5b496355 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -32,30 +32,6 @@
*/
#define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE)
-/* Reads to Local DRAM Memory */
-#define READS_TO_LOCAL_MEM BIT(0)
-
-/* Reads to Remote DRAM Memory */
-#define READS_TO_REMOTE_MEM BIT(1)
-
-/* Non-Temporal Writes to Local Memory */
-#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2)
-
-/* Non-Temporal Writes to Remote Memory */
-#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3)
-
-/* Reads to Local Memory the system identifies as "Slow Memory" */
-#define READS_TO_LOCAL_S_MEM BIT(4)
-
-/* Reads to Remote Memory the system identifies as "Slow Memory" */
-#define READS_TO_REMOTE_S_MEM BIT(5)
-
-/* Dirty Victims to All Types of Memory */
-#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6)
-
-/* Max event bits supported */
-#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
-
/**
* cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that
* aren't marked nohz_full
@@ -180,7 +156,6 @@ struct rmid_read {
void *arch_mon_ctx;
};
-extern unsigned int rdt_mon_features;
extern struct list_head resctrl_schema_all;
extern bool resctrl_mounted;
@@ -234,43 +209,6 @@ struct mongroup {
};
/**
- * struct pseudo_lock_region - pseudo-lock region information
- * @s: Resctrl schema for the resource to which this
- * pseudo-locked region belongs
- * @d: RDT domain to which this pseudo-locked region
- * belongs
- * @cbm: bitmask of the pseudo-locked region
- * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
- * completion
- * @thread_done: variable used by waitqueue to test if pseudo-locking
- * thread completed
- * @cpu: core associated with the cache on which the setup code
- * will be run
- * @line_size: size of the cache lines
- * @size: size of pseudo-locked region in bytes
- * @kmem: the kernel memory associated with pseudo-locked region
- * @minor: minor number of character device associated with this
- * region
- * @debugfs_dir: pointer to this region's directory in the debugfs
- * filesystem
- * @pm_reqs: Power management QoS requests related to this region
- */
-struct pseudo_lock_region {
- struct resctrl_schema *s;
- struct rdt_ctrl_domain *d;
- u32 cbm;
- wait_queue_head_t lock_thread_wq;
- int thread_done;
- int cpu;
- unsigned int line_size;
- unsigned int size;
- void *kmem;
- unsigned int minor;
- struct dentry *debugfs_dir;
- struct list_head pm_reqs;
-};
-
-/**
* struct rdtgroup - store rdtgroup's data in resctrl file system.
* @kn: kernfs node
* @rdtgroup_list: linked list for all rdtgroups
@@ -326,10 +264,7 @@ struct rdtgroup {
/* List of all resource groups */
extern struct list_head rdt_all_groups;
-extern int max_name_width, max_data_width;
-
-int __init rdtgroup_init(void);
-void __exit rdtgroup_exit(void);
+extern int max_name_width;
/**
* struct rftype - describe each file in the resctrl file system
@@ -433,37 +368,6 @@ struct msr_param {
u32 high;
};
-static inline bool is_llc_occupancy_enabled(void)
-{
- return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID));
-}
-
-static inline bool is_mbm_total_enabled(void)
-{
- return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID));
-}
-
-static inline bool is_mbm_local_enabled(void)
-{
- return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
-}
-
-static inline bool is_mbm_enabled(void)
-{
- return (is_mbm_total_enabled() || is_mbm_local_enabled());
-}
-
-static inline bool is_mbm_event(int e)
-{
- return (e >= QOS_L3_MBM_TOTAL_EVENT_ID &&
- e <= QOS_L3_MBM_LOCAL_EVENT_ID);
-}
-
-struct rdt_parse_data {
- struct rdtgroup *rdtgrp;
- char *buf;
-};
-
/**
* struct rdt_hw_resource - arch private attributes of a resctrl resource
* @r_resctrl: Attributes of the resource used directly by resctrl.
@@ -476,8 +380,6 @@ struct rdt_parse_data {
* @msr_update: Function pointer to update QOS MSRs
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
* @mbm_width: Monitor width, to detect and correct for overflow.
- * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth
- * Monitoring Event Configuration (BMEC) is supported.
* @cdp_enabled: CDP state of this resource
*
* Members of this structure are either private to the architecture
@@ -491,7 +393,6 @@ struct rdt_hw_resource {
void (*msr_update)(struct msr_param *m);
unsigned int mon_scale;
unsigned int mbm_width;
- unsigned int mbm_cfg_mask;
bool cdp_enabled;
};
@@ -500,11 +401,6 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r
return container_of(r, struct rdt_hw_resource, r_resctrl);
}
-int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d);
-int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
- struct rdt_ctrl_domain *d);
-
extern struct mutex rdtgroup_mutex;
extern struct rdt_hw_resource rdt_resources_all[];
@@ -512,24 +408,6 @@ extern struct rdtgroup rdtgroup_default;
extern struct dentry *debugfs_resctrl;
extern enum resctrl_event_id mba_mbps_default_event;
-enum resctrl_res_level {
- RDT_RESOURCE_L3,
- RDT_RESOURCE_L2,
- RDT_RESOURCE_MBA,
- RDT_RESOURCE_SMBA,
-
- /* Must be the last */
- RDT_NUM_RESOURCES,
-};
-
-static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res)
-{
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res);
-
- hw_res++;
- return &hw_res->r_resctrl;
-}
-
static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
{
return rdt_resources_all[l].cdp_enabled;
@@ -539,27 +417,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
void arch_mon_domain_online(struct rdt_resource *r, struct rdt_mon_domain *d);
-/*
- * To return the common struct rdt_resource, which is contained in struct
- * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource.
- */
-#define for_each_rdt_resource(r) \
- for (r = &rdt_resources_all[0].r_resctrl; \
- r <= &rdt_resources_all[RDT_NUM_RESOURCES - 1].r_resctrl; \
- r = resctrl_inc(r))
-
-#define for_each_capable_rdt_resource(r) \
- for_each_rdt_resource(r) \
- if (r->alloc_capable || r->mon_capable)
-
-#define for_each_alloc_capable_rdt_resource(r) \
- for_each_rdt_resource(r) \
- if (r->alloc_capable)
-
-#define for_each_mon_capable_rdt_resource(r) \
- for_each_rdt_resource(r) \
- if (r->mon_capable)
-
/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
union cpuid_0x10_1_eax {
struct {
@@ -604,8 +461,6 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn);
int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name);
int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
umode_t mask);
-struct rdt_domain_hdr *rdt_find_domain(struct list_head *h, int id,
- struct list_head **pos);
ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int rdtgroup_schemata_show(struct kernfs_open_file *of,
@@ -620,28 +475,19 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_ctrl_domain
unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r);
-int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
-int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
-bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
-int rdt_pseudo_lock_init(void);
-void rdt_pseudo_lock_release(void);
-int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
-void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
-struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu, struct rdt_resource *r);
-struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu, struct rdt_resource *r);
int closids_supported(void);
void closid_free(int closid);
int alloc_rmid(u32 closid);
void free_rmid(u32 closid, u32 rmid);
int rdt_get_mon_l3_config(struct rdt_resource *r);
-void __exit rdt_put_mon_l3_config(void);
+void resctrl_mon_resource_exit(void);
bool __init rdt_cpu_has(int flag);
void mon_event_count(void *info);
int rdtgroup_mondata_show(struct seq_file *m, void *arg);
void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
struct rdt_mon_domain *d, struct rdtgroup *rdtgrp,
cpumask_t *cpumask, int evtid, int first);
+int __init resctrl_mon_resource_init(void);
void mbm_setup_overflow_handler(struct rdt_mon_domain *dom,
unsigned long delay_ms,
int exclude_cpu);
@@ -658,4 +504,45 @@ void resctrl_file_fflags_init(const char *config, unsigned long fflags);
void rdt_staged_configs_clear(void);
bool closid_allocated(unsigned int closid);
int resctrl_find_cleanest_closid(void);
+
+#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
+int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
+int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm);
+bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d);
+int rdt_pseudo_lock_init(void);
+void rdt_pseudo_lock_release(void);
+int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
+void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
+#else
+static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_ctrl_domain *d, unsigned long cbm)
+{
+ return false;
+}
+
+static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
+{
+ return false;
+}
+
+static inline int rdt_pseudo_lock_init(void) { return 0; }
+static inline void rdt_pseudo_lock_release(void) { }
+static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { }
+#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
+
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 94a1d9780461..a93ed7d2a160 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -295,11 +295,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *
{
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
- if (is_mbm_total_enabled())
+ if (resctrl_arch_is_mbm_total_enabled())
memset(hw_dom->arch_mbm_total, 0,
sizeof(*hw_dom->arch_mbm_total) * r->num_rmid);
- if (is_mbm_local_enabled())
+ if (resctrl_arch_is_mbm_local_enabled())
memset(hw_dom->arch_mbm_local, 0,
sizeof(*hw_dom->arch_mbm_local) * r->num_rmid);
}
@@ -365,7 +365,7 @@ static void limbo_release_entry(struct rmid_entry *entry)
*/
void __check_limbo(struct rdt_mon_domain *d, bool force_free)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
struct rmid_entry *entry;
u32 idx, cur_idx = 1;
@@ -521,7 +521,7 @@ int alloc_rmid(u32 closid)
static void add_rmid_to_limbo(struct rmid_entry *entry)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
struct rdt_mon_domain *d;
u32 idx;
@@ -569,7 +569,7 @@ void free_rmid(u32 closid, u32 rmid)
entry = __rmid_entry(idx);
- if (is_llc_occupancy_enabled())
+ if (resctrl_arch_is_llc_occupancy_enabled())
add_rmid_to_limbo(entry);
else
list_add_tail(&entry->list, &rmid_free_lru);
@@ -718,6 +718,22 @@ void mon_event_count(void *info)
rr->err = 0;
}
+static struct rdt_ctrl_domain *get_ctrl_domain_from_cpu(int cpu,
+ struct rdt_resource *r)
+{
+ struct rdt_ctrl_domain *d;
+
+ lockdep_assert_cpus_held();
+
+ list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
+ return d;
+ }
+
+ return NULL;
+}
+
/*
* Feedback loop for MBA software controller (mba_sc)
*
@@ -761,7 +777,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_mon_domain *dom_mbm)
struct rdtgroup *entry;
u32 cur_bw, user_bw;
- r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+ r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
evt_id = rgrp->mba_mbps_event;
closid = rgrp->closid;
@@ -852,10 +868,10 @@ static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d,
* This is protected from concurrent reads from user as both
* the user and overflow handler hold the global mutex.
*/
- if (is_mbm_total_enabled())
+ if (resctrl_arch_is_mbm_total_enabled())
mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID);
- if (is_mbm_local_enabled())
+ if (resctrl_arch_is_mbm_local_enabled())
mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID);
}
@@ -925,7 +941,7 @@ void mbm_handle_overflow(struct work_struct *work)
if (!resctrl_mounted || !resctrl_arch_mon_capable())
goto out_unlock;
- r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
d = container_of(work, struct rdt_mon_domain, mbm_over.work);
list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
@@ -1027,7 +1043,7 @@ static int dom_data_init(struct rdt_resource *r)
/*
* RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
* are always allocated. These are used for the rdtgroup_default
- * control group, which will be setup later in rdtgroup_init().
+ * control group, which will be setup later in resctrl_init().
*/
idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
RESCTRL_RESERVED_RMID);
@@ -1040,10 +1056,13 @@ out_unlock:
return err;
}
-static void __exit dom_data_exit(void)
+static void dom_data_exit(struct rdt_resource *r)
{
mutex_lock(&rdtgroup_mutex);
+ if (!r->mon_capable)
+ goto out_unlock;
+
if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
kfree(closid_num_dirty_rmid);
closid_num_dirty_rmid = NULL;
@@ -1052,6 +1071,7 @@ static void __exit dom_data_exit(void)
kfree(rmid_ptrs);
rmid_ptrs = NULL;
+out_unlock:
mutex_unlock(&rdtgroup_mutex);
}
@@ -1081,11 +1101,11 @@ static void l3_mon_evt_init(struct rdt_resource *r)
{
INIT_LIST_HEAD(&r->evt_list);
- if (is_llc_occupancy_enabled())
+ if (resctrl_arch_is_llc_occupancy_enabled())
list_add_tail(&llc_occupancy_event.list, &r->evt_list);
- if (is_mbm_total_enabled())
+ if (resctrl_arch_is_mbm_total_enabled())
list_add_tail(&mbm_total_event.list, &r->evt_list);
- if (is_mbm_local_enabled())
+ if (resctrl_arch_is_mbm_local_enabled())
list_add_tail(&mbm_local_event.list, &r->evt_list);
}
@@ -1172,12 +1192,56 @@ static __init int snc_get_config(void)
return ret;
}
+/**
+ * resctrl_mon_resource_init() - Initialise global monitoring structures.
+ *
+ * Allocate and initialise global monitor resources that do not belong to a
+ * specific domain. i.e. the rmid_ptrs[] used for the limbo and free lists.
+ * Called once during boot after the struct rdt_resource's have been configured
+ * but before the filesystem is mounted.
+ * Resctrl's cpuhp callbacks may be called before this point to bring a domain
+ * online.
+ *
+ * Returns 0 for success, or -ENOMEM.
+ */
+int __init resctrl_mon_resource_init(void)
+{
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+ int ret;
+
+ if (!r->mon_capable)
+ return 0;
+
+ ret = dom_data_init(r);
+ if (ret)
+ return ret;
+
+ l3_mon_evt_init(r);
+
+ if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) {
+ mbm_total_event.configurable = true;
+ resctrl_file_fflags_init("mbm_total_bytes_config",
+ RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
+ }
+ if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) {
+ mbm_local_event.configurable = true;
+ resctrl_file_fflags_init("mbm_local_bytes_config",
+ RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
+ }
+
+ if (resctrl_arch_is_mbm_local_enabled())
+ mba_mbps_default_event = QOS_L3_MBM_LOCAL_EVENT_ID;
+ else if (resctrl_arch_is_mbm_total_enabled())
+ mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID;
+
+ return 0;
+}
+
int __init rdt_get_mon_l3_config(struct rdt_resource *r)
{
unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
unsigned int threshold;
- int ret;
snc_nodes_per_l3_cache = snc_get_config();
@@ -1207,39 +1271,24 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
*/
resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
- ret = dom_data_init(r);
- if (ret)
- return ret;
-
if (rdt_cpu_has(X86_FEATURE_BMEC)) {
u32 eax, ebx, ecx, edx;
/* Detect list of bandwidth sources that can be tracked */
cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx);
- hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
-
- if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
- mbm_total_event.configurable = true;
- resctrl_file_fflags_init("mbm_total_bytes_config",
- RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
- }
- if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
- mbm_local_event.configurable = true;
- resctrl_file_fflags_init("mbm_local_bytes_config",
- RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
- }
+ r->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
}
- l3_mon_evt_init(r);
-
r->mon_capable = true;
return 0;
}
-void __exit rdt_put_mon_l3_config(void)
+void resctrl_mon_resource_exit(void)
{
- dom_data_exit();
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+
+ dom_data_exit(r);
}
void __init intel_rdt_mbm_apply_quirk(void)
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 42cc162f7fc9..01fa7890b43f 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -61,7 +61,8 @@ static const struct class pseudo_lock_class = {
};
/**
- * get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
+ * platforms
* @void: It takes no parameters.
*
* Capture the list of platforms that have been validated to support
@@ -75,14 +76,16 @@ static const struct class pseudo_lock_class = {
* in the SDM.
*
* When adding a platform here also add support for its cache events to
- * measure_cycles_perf_fn()
+ * resctrl_arch_measure_l*_residency()
*
* Return:
* If platform is supported, the bits to disable hardware prefetchers, 0
* if platform is not supported.
*/
-static u64 get_prefetch_disable_bits(void)
+u64 resctrl_arch_get_prefetch_disable_bits(void)
{
+ prefetch_disable_bits = 0;
+
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 != 6)
return 0;
@@ -98,7 +101,8 @@ static u64 get_prefetch_disable_bits(void)
* 3 DCU IP Prefetcher Disable (R/W)
* 63:4 Reserved
*/
- return 0xF;
+ prefetch_disable_bits = 0xF;
+ break;
case INTEL_ATOM_GOLDMONT:
case INTEL_ATOM_GOLDMONT_PLUS:
/*
@@ -109,10 +113,11 @@ static u64 get_prefetch_disable_bits(void)
* 2 DCU Hardware Prefetcher Disable (R/W)
* 63:3 Reserved
*/
- return 0x5;
+ prefetch_disable_bits = 0x5;
+ break;
}
- return 0;
+ return prefetch_disable_bits;
}
/**
@@ -408,8 +413,8 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
}
/**
- * pseudo_lock_fn - Load kernel memory into cache
- * @_rdtgrp: resource group to which pseudo-lock region belongs
+ * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
+ * @_plr: the pseudo-lock region descriptor
*
* This is the core pseudo-locking flow.
*
@@ -426,10 +431,9 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
*
* Return: 0. Waiter on waitqueue will be woken on completion.
*/
-static int pseudo_lock_fn(void *_rdtgrp)
+int resctrl_arch_pseudo_lock_fn(void *_plr)
{
- struct rdtgroup *rdtgrp = _rdtgrp;
- struct pseudo_lock_region *plr = rdtgrp->plr;
+ struct pseudo_lock_region *plr = _plr;
u32 rmid_p, closid_p;
unsigned long i;
u64 saved_msr;
@@ -489,7 +493,8 @@ static int pseudo_lock_fn(void *_rdtgrp)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid);
+ __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+
/*
* Cache was flushed earlier. Now access kernel memory to read it
* into cache region associated with just activated plr->closid.
@@ -712,8 +717,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* Not knowing the bits to disable prefetching implies that this
* platform does not support Cache Pseudo-Locking.
*/
- prefetch_disable_bits = get_prefetch_disable_bits();
- if (prefetch_disable_bits == 0) {
+ if (resctrl_arch_get_prefetch_disable_bits() == 0) {
rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL;
}
@@ -872,7 +876,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
}
/**
- * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
+ * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
+ * pseudo-locked memory
* @_plr: pseudo-lock region to measure
*
* There is no deterministic way to test if a memory region is cached. One
@@ -885,7 +890,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
*
* Return: 0. Waiter on waitqueue will be woken on completion.
*/
-static int measure_cycles_lat_fn(void *_plr)
+int resctrl_arch_measure_cycles_lat_fn(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
u32 saved_low, saved_high;
@@ -1069,7 +1074,7 @@ out:
return 0;
}
-static int measure_l2_residency(void *_plr)
+int resctrl_arch_measure_l2_residency(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
struct residency_counts counts = {0};
@@ -1107,7 +1112,7 @@ out:
return 0;
}
-static int measure_l3_residency(void *_plr)
+int resctrl_arch_measure_l3_residency(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
struct residency_counts counts = {0};
@@ -1205,14 +1210,14 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
plr->cpu = cpu;
if (sel == 1)
- thread = kthread_run_on_cpu(measure_cycles_lat_fn, plr,
- cpu, "pseudo_lock_measure/%u");
+ thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
+ plr, cpu, "pseudo_lock_measure/%u");
else if (sel == 2)
- thread = kthread_run_on_cpu(measure_l2_residency, plr,
- cpu, "pseudo_lock_measure/%u");
+ thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
+ plr, cpu, "pseudo_lock_measure/%u");
else if (sel == 3)
- thread = kthread_run_on_cpu(measure_l3_residency, plr,
- cpu, "pseudo_lock_measure/%u");
+ thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
+ plr, cpu, "pseudo_lock_measure/%u");
else
goto out;
@@ -1307,7 +1312,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
plr->thread_done = 0;
- thread = kthread_run_on_cpu(pseudo_lock_fn, rdtgrp,
+ thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, plr,
plr->cpu, "pseudo_lock/%u");
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 6419e04d8a7b..c6274d40b217 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -57,6 +57,12 @@ static struct kernfs_node *kn_mongrp;
/* Kernel fs node for "mon_data" directory under root */
static struct kernfs_node *kn_mondata;
+/*
+ * Used to store the max resource name width to display the schemata names in
+ * a tabular format.
+ */
+int max_name_width;
+
static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512];
@@ -111,6 +117,18 @@ void rdt_staged_configs_clear(void)
}
}
+static bool resctrl_is_mbm_enabled(void)
+{
+ return (resctrl_arch_is_mbm_total_enabled() ||
+ resctrl_arch_is_mbm_local_enabled());
+}
+
+static bool resctrl_is_mbm_event(int e)
+{
+ return (e >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ e <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
/*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer.
@@ -157,7 +175,8 @@ static int closid_alloc(void)
lockdep_assert_held(&rdtgroup_mutex);
- if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
+ if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) &&
+ resctrl_arch_is_llc_occupancy_enabled()) {
cleanest_closid = resctrl_find_cleanest_closid();
if (cleanest_closid < 0)
return cleanest_closid;
@@ -348,13 +367,13 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
* from update_closid_rmid() is protected against __switch_to() because
* preemption is disabled.
*/
-static void update_cpu_closid_rmid(void *info)
+void resctrl_arch_sync_cpu_closid_rmid(void *info)
{
- struct rdtgroup *r = info;
+ struct resctrl_cpu_defaults *r = info;
if (r) {
this_cpu_write(pqr_state.default_closid, r->closid);
- this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
+ this_cpu_write(pqr_state.default_rmid, r->rmid);
}
/*
@@ -369,11 +388,20 @@ static void update_cpu_closid_rmid(void *info)
* Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
*
* Per task closids/rmids must have been set up before calling this function.
+ * @r may be NULL.
*/
static void
update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{
- on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1);
+ struct resctrl_cpu_defaults defaults, *p = NULL;
+
+ if (r) {
+ defaults.closid = r->closid;
+ defaults.rmid = r->mon.rmid;
+ p = &defaults;
+ }
+
+ on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_closid_rmid, p, 1);
}
static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
@@ -971,7 +999,7 @@ static int rdt_default_ctrl_show(struct kernfs_open_file *of,
struct resctrl_schema *s = of->kn->parent->priv;
struct rdt_resource *r = s->res;
- seq_printf(seq, "%x\n", r->default_ctrl);
+ seq_printf(seq, "%x\n", resctrl_get_default_ctrl(r));
return 0;
}
@@ -1160,10 +1188,19 @@ static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
struct resctrl_schema *s = of->kn->parent->priv;
struct rdt_resource *r = s->res;
- if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
+ switch (r->membw.throttle_mode) {
+ case THREAD_THROTTLE_PER_THREAD:
seq_puts(seq, "per-thread\n");
- else
+ return 0;
+ case THREAD_THROTTLE_MAX:
seq_puts(seq, "max\n");
+ return 0;
+ case THREAD_THROTTLE_UNDEFINED:
+ seq_puts(seq, "undefined\n");
+ return 0;
+ }
+
+ WARN_ON_ONCE(1);
return 0;
}
@@ -1425,7 +1462,8 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out;
}
rdtgrp->mode = RDT_MODE_EXCLUSIVE;
- } else if (!strcmp(buf, "pseudo-locksetup")) {
+ } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) &&
+ !strcmp(buf, "pseudo-locksetup")) {
ret = rdtgroup_locksetup_enter(rdtgrp);
if (ret)
goto out;
@@ -1552,11 +1590,6 @@ out:
return ret;
}
-struct mon_config_info {
- u32 evtid;
- u32 mon_config;
-};
-
#define INVALID_CONFIG_INDEX UINT_MAX
/**
@@ -1581,31 +1614,32 @@ static inline unsigned int mon_event_config_index_get(u32 evtid)
}
}
-static void mon_event_config_read(void *info)
+void resctrl_arch_mon_event_config_read(void *_config_info)
{
- struct mon_config_info *mon_info = info;
+ struct resctrl_mon_config_info *config_info = _config_info;
unsigned int index;
u64 msrval;
- index = mon_event_config_index_get(mon_info->evtid);
+ index = mon_event_config_index_get(config_info->evtid);
if (index == INVALID_CONFIG_INDEX) {
- pr_warn_once("Invalid event id %d\n", mon_info->evtid);
+ pr_warn_once("Invalid event id %d\n", config_info->evtid);
return;
}
rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval);
/* Report only the valid event configuration bits */
- mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
+ config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
}
-static void mondata_config_read(struct rdt_mon_domain *d, struct mon_config_info *mon_info)
+static void mondata_config_read(struct resctrl_mon_config_info *mon_info)
{
- smp_call_function_any(&d->hdr.cpu_mask, mon_event_config_read, mon_info, 1);
+ smp_call_function_any(&mon_info->d->hdr.cpu_mask,
+ resctrl_arch_mon_event_config_read, mon_info, 1);
}
static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
{
- struct mon_config_info mon_info;
+ struct resctrl_mon_config_info mon_info;
struct rdt_mon_domain *dom;
bool sep = false;
@@ -1616,9 +1650,11 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid
if (sep)
seq_puts(s, ";");
- memset(&mon_info, 0, sizeof(struct mon_config_info));
+ memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info));
+ mon_info.r = r;
+ mon_info.d = dom;
mon_info.evtid = evtid;
- mondata_config_read(dom, &mon_info);
+ mondata_config_read(&mon_info);
seq_printf(s, "%d=0x%02x", dom->hdr.id, mon_info.mon_config);
sep = true;
@@ -1651,30 +1687,32 @@ static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
return 0;
}
-static void mon_event_config_write(void *info)
+void resctrl_arch_mon_event_config_write(void *_config_info)
{
- struct mon_config_info *mon_info = info;
+ struct resctrl_mon_config_info *config_info = _config_info;
unsigned int index;
- index = mon_event_config_index_get(mon_info->evtid);
+ index = mon_event_config_index_get(config_info->evtid);
if (index == INVALID_CONFIG_INDEX) {
- pr_warn_once("Invalid event id %d\n", mon_info->evtid);
+ pr_warn_once("Invalid event id %d\n", config_info->evtid);
return;
}
- wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0);
+ wrmsr(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config, 0);
}
static void mbm_config_write_domain(struct rdt_resource *r,
struct rdt_mon_domain *d, u32 evtid, u32 val)
{
- struct mon_config_info mon_info = {0};
+ struct resctrl_mon_config_info mon_info = {0};
/*
* Read the current config value first. If both are the same then
* no need to write it again.
*/
+ mon_info.r = r;
+ mon_info.d = d;
mon_info.evtid = evtid;
- mondata_config_read(d, &mon_info);
+ mondata_config_read(&mon_info);
if (mon_info.mon_config == val)
return;
@@ -1686,7 +1724,7 @@ static void mbm_config_write_domain(struct rdt_resource *r,
* are scoped at the domain level. Writing any of these MSRs
* on one CPU is observed by all the CPUs in the domain.
*/
- smp_call_function_any(&d->hdr.cpu_mask, mon_event_config_write,
+ smp_call_function_any(&d->hdr.cpu_mask, resctrl_arch_mon_event_config_write,
&mon_info, 1);
/*
@@ -1703,7 +1741,6 @@ static void mbm_config_write_domain(struct rdt_resource *r,
static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
{
- struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
char *dom_str = NULL, *id_str;
unsigned long dom_id, val;
struct rdt_mon_domain *d;
@@ -1730,9 +1767,9 @@ next:
}
/* Value from user cannot be more than the supported set of events */
- if ((val & hw_res->mbm_cfg_mask) != val) {
+ if ((val & r->mbm_cfg_mask) != val) {
rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
- hw_res->mbm_cfg_mask);
+ r->mbm_cfg_mask);
return -EINVAL;
}
@@ -2036,6 +2073,28 @@ static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
return NULL;
}
+static void thread_throttle_mode_init(void)
+{
+ enum membw_throttle_mode throttle_mode = THREAD_THROTTLE_UNDEFINED;
+ struct rdt_resource *r_mba, *r_smba;
+
+ r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
+ if (r_mba->alloc_capable &&
+ r_mba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
+ throttle_mode = r_mba->membw.throttle_mode;
+
+ r_smba = resctrl_arch_get_resource(RDT_RESOURCE_SMBA);
+ if (r_smba->alloc_capable &&
+ r_smba->membw.throttle_mode != THREAD_THROTTLE_UNDEFINED)
+ throttle_mode = r_smba->membw.throttle_mode;
+
+ if (throttle_mode == THREAD_THROTTLE_UNDEFINED)
+ return;
+
+ resctrl_file_fflags_init("thread_throttle_mode",
+ RFTYPE_CTRL_INFO | RFTYPE_RES_MB);
+}
+
void resctrl_file_fflags_init(const char *config, unsigned long fflags)
{
struct rftype *rft;
@@ -2164,6 +2223,20 @@ static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
return ret;
}
+static unsigned long fflags_from_resource(struct rdt_resource *r)
+{
+ switch (r->rid) {
+ case RDT_RESOURCE_L3:
+ case RDT_RESOURCE_L2:
+ return RFTYPE_RES_CACHE;
+ case RDT_RESOURCE_MBA:
+ case RDT_RESOURCE_SMBA:
+ return RFTYPE_RES_MB;
+ }
+
+ return WARN_ON_ONCE(1);
+}
+
static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
{
struct resctrl_schema *s;
@@ -2184,14 +2257,14 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
/* loop over enabled controls, these are all alloc_capable */
list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res;
- fflags = r->fflags | RFTYPE_CTRL_INFO;
+ fflags = fflags_from_resource(r) | RFTYPE_CTRL_INFO;
ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
if (ret)
goto out_destroy;
}
for_each_mon_capable_rdt_resource(r) {
- fflags = r->fflags | RFTYPE_MON_INFO;
+ fflags = fflags_from_resource(r) | RFTYPE_MON_INFO;
sprintf(name, "%s_MON", r->name);
ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
if (ret)
@@ -2255,7 +2328,7 @@ static void l2_qos_cfg_update(void *arg)
static inline bool is_mba_linear(void)
{
- return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
+ return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear;
}
static int set_cache_qos_cfg(int level, bool enable)
@@ -2345,10 +2418,10 @@ static void mba_sc_domain_destroy(struct rdt_resource *r,
*/
static bool supports_mba_mbps(void)
{
- struct rdt_resource *rmbm = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+ struct rdt_resource *rmbm = resctrl_arch_get_resource(RDT_RESOURCE_L3);
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
- return (is_mbm_enabled() &&
+ return (resctrl_is_mbm_enabled() &&
r->alloc_capable && is_mba_linear() &&
r->ctrl_scope == rmbm->mon_scope);
}
@@ -2359,7 +2432,7 @@ static bool supports_mba_mbps(void)
*/
static int set_mba_sc(bool mba_sc)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
+ struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA);
u32 num_closid = resctrl_arch_get_num_closid(r);
struct rdt_ctrl_domain *d;
unsigned long fflags;
@@ -2596,6 +2669,20 @@ static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type
if (cl > max_name_width)
max_name_width = cl;
+ switch (r->schema_fmt) {
+ case RESCTRL_SCHEMA_BITMAP:
+ s->fmt_str = "%d=%x";
+ break;
+ case RESCTRL_SCHEMA_RANGE:
+ s->fmt_str = "%d=%u";
+ break;
+ }
+
+ if (WARN_ON_ONCE(!s->fmt_str)) {
+ kfree(s);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&s->list);
list_add(&s->list, &resctrl_schema_all);
@@ -2712,8 +2799,8 @@ static int rdt_get_tree(struct fs_context *fc)
if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
resctrl_mounted = true;
- if (is_mbm_enabled()) {
- r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ if (resctrl_is_mbm_enabled()) {
+ r = resctrl_arch_get_resource(RDT_RESOURCE_L3);
list_for_each_entry(dom, &r->mon_domains, hdr.list)
mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
RESCTRL_PICK_ANY_CPU);
@@ -2823,7 +2910,7 @@ static int rdt_init_fs_context(struct fs_context *fc)
return 0;
}
-static int reset_all_ctrls(struct rdt_resource *r)
+void resctrl_arch_reset_all_ctrls(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
struct rdt_hw_ctrl_domain *hw_dom;
@@ -2847,12 +2934,12 @@ static int reset_all_ctrls(struct rdt_resource *r)
hw_dom = resctrl_to_arch_ctrl_dom(d);
for (i = 0; i < hw_res->num_closid; i++)
- hw_dom->ctrl_val[i] = r->default_ctrl;
+ hw_dom->ctrl_val[i] = resctrl_get_default_ctrl(r);
msr_param.dom = d;
smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
- return 0;
+ return;
}
/*
@@ -2971,9 +3058,10 @@ static void rdt_kill_sb(struct super_block *sb)
rdt_disable_ctx();
- /*Put everything back to default values. */
+ /* Put everything back to default values. */
for_each_alloc_capable_rdt_resource(r)
- reset_all_ctrls(r);
+ resctrl_arch_reset_all_ctrls(r);
+
rmdir_all_sub();
rdt_pseudo_lock_release();
rdtgroup_default.mode = RDT_MODE_SHAREABLE;
@@ -3080,7 +3168,7 @@ static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d,
if (ret)
return ret;
- if (!do_sum && is_mbm_event(mevt->evtid))
+ if (!do_sum && resctrl_is_mbm_event(mevt->evtid))
mon_event_read(&rr, r, d, prgrp, &d->hdr.cpu_mask, mevt->evtid, true);
}
@@ -3382,7 +3470,7 @@ static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
}
cfg = &d->staged_config[CDP_NONE];
- cfg->new_ctrl = r->default_ctrl;
+ cfg->new_ctrl = resctrl_get_default_ctrl(r);
cfg->have_new_ctrl = true;
}
}
@@ -3696,14 +3784,21 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{
struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
+ u32 closid, rmid;
int cpu;
/* Give any tasks back to the parent group */
rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
- /* Update per cpu rmid of the moved CPUs first */
+ /*
+ * Update per cpu closid/rmid of the moved CPUs first.
+ * Note: the closid will not change, but the arch code still needs it.
+ */
+ closid = prdtgrp->closid;
+ rmid = prdtgrp->mon.rmid;
for_each_cpu(cpu, &rdtgrp->cpu_mask)
- per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
+ resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
+
/*
* Update the MSR on moved CPUs and CPUs which have moved
* task running on them.
@@ -3736,6 +3831,7 @@ static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{
+ u32 closid, rmid;
int cpu;
/* Give any tasks back to the default group */
@@ -3746,10 +3842,10 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
&rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
/* Update per cpu closid and rmid of the moved CPUs first */
- for_each_cpu(cpu, &rdtgrp->cpu_mask) {
- per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
- per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
- }
+ closid = rdtgroup_default.closid;
+ rmid = rdtgroup_default.mon.rmid;
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid);
/*
* Update the MSR on moved CPUs and CPUs which have moved
@@ -3950,7 +4046,7 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
seq_puts(seq, ",cdpl2");
- if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
+ if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA)))
seq_puts(seq, ",mba_MBps");
if (resctrl_debug)
@@ -4029,9 +4125,9 @@ void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d
if (resctrl_mounted && resctrl_arch_mon_capable())
rmdir_mondata_subdir_allrdtgrp(r, d);
- if (is_mbm_enabled())
+ if (resctrl_is_mbm_enabled())
cancel_delayed_work(&d->mbm_over);
- if (is_llc_occupancy_enabled() && has_busy_rmid(d)) {
+ if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) {
/*
* When a package is going down, forcefully
* decrement rmid->ebusy. There is no way to know
@@ -4049,17 +4145,30 @@ void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d
mutex_unlock(&rdtgroup_mutex);
}
+/**
+ * domain_setup_mon_state() - Initialise domain monitoring structures.
+ * @r: The resource for the newly online domain.
+ * @d: The newly online domain.
+ *
+ * Allocate monitor resources that belong to this domain.
+ * Called when the first CPU of a domain comes online, regardless of whether
+ * the filesystem is mounted.
+ * During boot this may be called before global allocations have been made by
+ * resctrl_mon_resource_init().
+ *
+ * Returns 0 for success, or -ENOMEM.
+ */
static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d)
{
u32 idx_limit = resctrl_arch_system_num_rmid_idx();
size_t tsize;
- if (is_llc_occupancy_enabled()) {
+ if (resctrl_arch_is_llc_occupancy_enabled()) {
d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL);
if (!d->rmid_busy_llc)
return -ENOMEM;
}
- if (is_mbm_total_enabled()) {
+ if (resctrl_arch_is_mbm_total_enabled()) {
tsize = sizeof(*d->mbm_total);
d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL);
if (!d->mbm_total) {
@@ -4067,7 +4176,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain
return -ENOMEM;
}
}
- if (is_mbm_local_enabled()) {
+ if (resctrl_arch_is_mbm_local_enabled()) {
tsize = sizeof(*d->mbm_local);
d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL);
if (!d->mbm_local) {
@@ -4106,13 +4215,13 @@ int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
if (err)
goto out_unlock;
- if (is_mbm_enabled()) {
+ if (resctrl_is_mbm_enabled()) {
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
RESCTRL_PICK_ANY_CPU);
}
- if (is_llc_occupancy_enabled())
+ if (resctrl_arch_is_llc_occupancy_enabled())
INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
/*
@@ -4148,9 +4257,25 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
}
}
+static struct rdt_mon_domain *get_mon_domain_from_cpu(int cpu,
+ struct rdt_resource *r)
+{
+ struct rdt_mon_domain *d;
+
+ lockdep_assert_cpus_held();
+
+ list_for_each_entry(d, &r->mon_domains, hdr.list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask))
+ return d;
+ }
+
+ return NULL;
+}
+
void resctrl_offline_cpu(unsigned int cpu)
{
- struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
+ struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3);
struct rdt_mon_domain *d;
struct rdtgroup *rdtgrp;
@@ -4167,12 +4292,12 @@ void resctrl_offline_cpu(unsigned int cpu)
d = get_mon_domain_from_cpu(cpu, l3);
if (d) {
- if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
+ if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
cancel_delayed_work(&d->mbm_over);
mbm_setup_overflow_handler(d, 0, cpu);
}
- if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
- has_busy_rmid(d)) {
+ if (resctrl_arch_is_llc_occupancy_enabled() &&
+ cpu == d->cqm_work_cpu && has_busy_rmid(d)) {
cancel_delayed_work(&d->cqm_limbo);
cqm_setup_limbo_handler(d, 0, cpu);
}
@@ -4183,14 +4308,14 @@ out_unlock:
}
/*
- * rdtgroup_init - rdtgroup initialization
+ * resctrl_init - resctrl filesystem initialization
*
* Setup resctrl file system including set up root, create mount point,
- * register rdtgroup filesystem, and initialize files under root directory.
+ * register resctrl filesystem, and initialize files under root directory.
*
* Return: 0 on success or -errno
*/
-int __init rdtgroup_init(void)
+int __init resctrl_init(void)
{
int ret = 0;
@@ -4199,10 +4324,18 @@ int __init rdtgroup_init(void)
rdtgroup_setup_default();
- ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+ thread_throttle_mode_init();
+
+ ret = resctrl_mon_resource_init();
if (ret)
return ret;
+ ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+ if (ret) {
+ resctrl_mon_resource_exit();
+ return ret;
+ }
+
ret = register_filesystem(&rdt_fs_type);
if (ret)
goto cleanup_mountpoint;
@@ -4234,13 +4367,16 @@ int __init rdtgroup_init(void)
cleanup_mountpoint:
sysfs_remove_mount_point(fs_kobj, "resctrl");
+ resctrl_mon_resource_exit();
return ret;
}
-void __exit rdtgroup_exit(void)
+void __exit resctrl_exit(void)
{
debugfs_remove_recursive(debugfs_resctrl);
unregister_filesystem(&rdt_fs_type);
sysfs_remove_mount_point(fs_kobj, "resctrl");
+
+ resctrl_mon_resource_exit();
}
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index 22b65a5f5ec6..7f8d1e11dbee 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -150,13 +150,15 @@ int __init sgx_drv_init(void)
u64 xfrm_mask;
int ret;
- if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
+ if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
+ pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
+ }
cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
if (!(eax & 1)) {
- pr_err("SGX disabled: SGX1 instruction support not available.\n");
+ pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n");
return -ENODEV;
}
@@ -173,8 +175,10 @@ int __init sgx_drv_init(void)
}
ret = misc_register(&sgx_dev_enclave);
- if (ret)
+ if (ret) {
+ pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret);
return ret;
+ }
return 0;
}
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index b65ab214bdf5..776a20172867 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
struct file *backing;
long ret;
+ /*
+ * ECREATE would detect this too, but checking here also ensures
+ * that the 'encl_size' calculations below can never overflow.
+ */
+ if (!is_power_of_2(secs->size))
+ return -EINVAL;
+
va_page = sgx_encl_grow(encl, true);
if (IS_ERR(va_page))
return PTR_ERR(va_page);
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 00189cdeb775..cb3f900c46fc 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -26,6 +26,7 @@
#include <linux/export.h>
#include <linux/clocksource.h>
#include <linux/cpu.h>
+#include <linux/efi.h>
#include <linux/reboot.h>
#include <linux/static_call.h>
#include <asm/div64.h>
@@ -429,6 +430,9 @@ static void __init vmware_platform_setup(void)
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
+ if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT))
+ x86_init.mpparse.find_mptable = mpparse_find_mptable;
+
vmware_paravirt_ops_setup();
#ifdef CONFIG_X86_IO_APIC
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 340af8155658..0be61c45400c 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -140,7 +140,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
x86_platform.guest.enc_kexec_begin();
x86_platform.guest.enc_kexec_finish();
- crash_save_cpu(regs, safe_smp_processor_id());
+ crash_save_cpu(regs, smp_processor_id());
}
#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_HOTPLUG)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index a7d562697e50..91639d1e4ec2 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -395,18 +395,13 @@ NOKPROBE_SYMBOL(oops_end);
static void __die_header(const char *str, struct pt_regs *regs, long err)
{
- const char *pr = "";
-
/* Save the regs of the first oops for the executive summary later. */
if (!die_counter)
exec_summary_regs = *regs;
- if (IS_ENABLED(CONFIG_PREEMPTION))
- pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
-
printk(KERN_DEFAULT
- "Oops: %s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff,
- ++die_counter, pr,
+ "Oops: %s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff,
+ ++die_counter,
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index b4905d5173fd..722fd712e1cf 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -37,7 +37,7 @@ const char *stack_type_name(enum stack_type type)
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+ unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
@@ -62,7 +62,7 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr);
+ unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index f05339fee778..6c5defd6569a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -134,7 +134,7 @@ static __always_inline bool in_exception_stack(unsigned long *stack, struct stac
static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+ unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *begin;
/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 82b96ed9890a..57120f0749cc 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -28,18 +28,13 @@
* the first 128 E820 memory entries in boot_params.e820_table and the remaining
* (if any) entries of the SETUP_E820_EXT nodes. We use this to:
*
- * - inform the user about the firmware's notion of memory layout
- * via /sys/firmware/memmap
- *
* - the hibernation code uses it to generate a kernel-independent CRC32
* checksum of the physical memory layout of a system.
*
* - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
* passed to us by the bootloader - the major difference between
- * e820_table_firmware[] and this one is that, the latter marks the setup_data
- * list created by the EFI boot stub as reserved, so that kexec can reuse the
- * setup_data information in the second kernel. Besides, e820_table_kexec[]
- * might also be modified by the kexec itself to fake a mptable.
+ * e820_table_firmware[] and this one is that e820_table_kexec[]
+ * might be modified by the kexec itself to fake an mptable.
* We use this to:
*
* - kexec, which is a bootloader in disguise, uses the original E820
@@ -47,6 +42,11 @@
* can have a restricted E820 map while the kexec()-ed kexec-kernel
* can have access to full memory - etc.
*
+ * Export the memory layout via /sys/firmware/memmap. kexec-tools uses
+ * the entries to create an E820 table for the kexec kernel.
+ *
+ * kexec_file_load in-kernel code uses the table for the kexec kernel.
+ *
* - 'e820_table': this is the main E820 table that is massaged by the
* low level x86 platform code, or modified by boot parameters, before
* passed on to higher level MM layers.
@@ -187,8 +187,7 @@ void __init e820__range_add(u64 start, u64 size, enum e820_type type)
static void __init e820_print_type(enum e820_type type)
{
switch (type) {
- case E820_TYPE_RAM: /* Fall through: */
- case E820_TYPE_RESERVED_KERN: pr_cont("usable"); break;
+ case E820_TYPE_RAM: pr_cont("usable"); break;
case E820_TYPE_RESERVED: pr_cont("reserved"); break;
case E820_TYPE_SOFT_RESERVED: pr_cont("soft reserved"); break;
case E820_TYPE_ACPI: pr_cont("ACPI data"); break;
@@ -764,7 +763,7 @@ void __init e820__register_nosave_regions(unsigned long limit_pfn)
pfn = PFN_DOWN(entry->addr + entry->size);
- if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
+ if (entry->type != E820_TYPE_RAM)
register_nosave_region(PFN_UP(entry->addr), pfn);
if (pfn >= limit_pfn)
@@ -991,60 +990,6 @@ static int __init parse_memmap_opt(char *str)
early_param("memmap", parse_memmap_opt);
/*
- * Reserve all entries from the bootloader's extensible data nodes list,
- * because if present we are going to use it later on to fetch e820
- * entries from it:
- */
-void __init e820__reserve_setup_data(void)
-{
- struct setup_indirect *indirect;
- struct setup_data *data;
- u64 pa_data, pa_next;
- u32 len;
-
- pa_data = boot_params.hdr.setup_data;
- if (!pa_data)
- return;
-
- while (pa_data) {
- data = early_memremap(pa_data, sizeof(*data));
- if (!data) {
- pr_warn("e820: failed to memremap setup_data entry\n");
- return;
- }
-
- len = sizeof(*data);
- pa_next = data->next;
-
- e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
-
- if (data->type == SETUP_INDIRECT) {
- len += data->len;
- early_memunmap(data, sizeof(*data));
- data = early_memremap(pa_data, len);
- if (!data) {
- pr_warn("e820: failed to memremap indirect setup_data\n");
- return;
- }
-
- indirect = (struct setup_indirect *)data->data;
-
- if (indirect->type != SETUP_INDIRECT)
- e820__range_update(indirect->addr, indirect->len,
- E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- }
-
- pa_data = pa_next;
- early_memunmap(data, len);
- }
-
- e820__update_table(e820_table);
-
- pr_info("extended physical RAM map:\n");
- e820__print_table("reserve setup_data");
-}
-
-/*
* Called after parse_early_param(), after early parameters (such as mem=)
* have been processed, in which case we already have an E820 table filled in
* via the parameter callback function(s), but it's not sorted and printed yet:
@@ -1063,7 +1008,6 @@ void __init e820__finish_early_params(void)
static const char *__init e820_type_to_string(struct e820_entry *entry)
{
switch (entry->type) {
- case E820_TYPE_RESERVED_KERN: /* Fall-through: */
case E820_TYPE_RAM: return "System RAM";
case E820_TYPE_ACPI: return "ACPI Tables";
case E820_TYPE_NVS: return "ACPI Non-volatile Storage";
@@ -1079,7 +1023,6 @@ static const char *__init e820_type_to_string(struct e820_entry *entry)
static unsigned long __init e820_type_to_iomem_type(struct e820_entry *entry)
{
switch (entry->type) {
- case E820_TYPE_RESERVED_KERN: /* Fall-through: */
case E820_TYPE_RAM: return IORESOURCE_SYSTEM_RAM;
case E820_TYPE_ACPI: /* Fall-through: */
case E820_TYPE_NVS: /* Fall-through: */
@@ -1101,7 +1044,6 @@ static unsigned long __init e820_type_to_iores_desc(struct e820_entry *entry)
case E820_TYPE_PRAM: return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
case E820_TYPE_RESERVED: return IORES_DESC_RESERVED;
case E820_TYPE_SOFT_RESERVED: return IORES_DESC_SOFT_RESERVED;
- case E820_TYPE_RESERVED_KERN: /* Fall-through: */
case E820_TYPE_RAM: /* Fall-through: */
case E820_TYPE_UNUSABLE: /* Fall-through: */
default: return IORES_DESC_NONE;
@@ -1124,7 +1066,6 @@ static bool __init do_mark_busy(enum e820_type type, struct resource *res)
case E820_TYPE_PRAM:
case E820_TYPE_PMEM:
return false;
- case E820_TYPE_RESERVED_KERN:
case E820_TYPE_RAM:
case E820_TYPE_ACPI:
case E820_TYPE_NVS:
@@ -1176,9 +1117,9 @@ void __init e820__reserve_resources(void)
res++;
}
- /* Expose the bootloader-provided memory layout to the sysfs. */
- for (i = 0; i < e820_table_firmware->nr_entries; i++) {
- struct e820_entry *entry = e820_table_firmware->entries + i;
+ /* Expose the kexec e820 table to the sysfs. */
+ for (i = 0; i < e820_table_kexec->nr_entries; i++) {
+ struct e820_entry *entry = e820_table_kexec->entries + i;
firmware_map_add_early(entry->addr, entry->addr + entry->size, e820_type_to_string(entry));
}
@@ -1302,6 +1243,36 @@ void __init e820__memblock_setup(void)
int i;
u64 end;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Memory used by the kernel cannot be hot-removed because Linux
+ * cannot migrate the kernel pages. When memory hotplug is
+ * enabled, we should prevent memblock from allocating memory
+ * for the kernel.
+ *
+ * ACPI SRAT records all hotpluggable memory ranges. But before
+ * SRAT is parsed, we don't know about it.
+ *
+ * The kernel image is loaded into memory at very early time. We
+ * cannot prevent this anyway. So on NUMA system, we set any
+ * node the kernel resides in as un-hotpluggable.
+ *
+ * Since on modern servers, one node could have double-digit
+ * gigabytes memory, we can assume the memory around the kernel
+ * image is also un-hotpluggable. So before SRAT is parsed, just
+ * allocate memory near the kernel image to try the best to keep
+ * the kernel away from hotpluggable memory.
+ */
+ if (movable_node_is_enabled())
+ memblock_set_bottom_up(true);
+#endif
+
+ /*
+ * At this point only the first megabyte is mapped for sure, the
+ * rest of the memory cannot be used for memblock resizing
+ */
+ memblock_set_current_limit(ISA_END_ADDRESS);
+
/*
* The bootstrap memblock region count maximum is 128 entries
* (INIT_MEMBLOCK_REGIONS), but EFI might pass us more E820 entries
@@ -1323,7 +1294,7 @@ void __init e820__memblock_setup(void)
if (entry->type == E820_TYPE_SOFT_RESERVED)
memblock_reserve(entry->addr, entry->size);
- if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
+ if (entry->type != E820_TYPE_RAM)
continue;
memblock_add(entry->addr, entry->size);
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 44f937015e1e..fc1714bad045 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -19,6 +19,7 @@
#include <linux/usb/ehci_def.h>
#include <linux/usb/xhci-dbgp.h>
#include <asm/pci_x86.h>
+#include <linux/static_call.h>
/* Simple VGA output */
#define VGABASE (__ISA_IO_base + 0xb8000)
@@ -94,26 +95,28 @@ static unsigned long early_serial_base = 0x3f8; /* ttyS0 */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
-static unsigned int io_serial_in(unsigned long addr, int offset)
+static __noendbr unsigned int io_serial_in(unsigned long addr, int offset)
{
return inb(addr + offset);
}
+ANNOTATE_NOENDBR_SYM(io_serial_in);
-static void io_serial_out(unsigned long addr, int offset, int value)
+static __noendbr void io_serial_out(unsigned long addr, int offset, int value)
{
outb(value, addr + offset);
}
+ANNOTATE_NOENDBR_SYM(io_serial_out);
-static unsigned int (*serial_in)(unsigned long addr, int offset) = io_serial_in;
-static void (*serial_out)(unsigned long addr, int offset, int value) = io_serial_out;
+DEFINE_STATIC_CALL(serial_in, io_serial_in);
+DEFINE_STATIC_CALL(serial_out, io_serial_out);
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
- while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout)
+ while ((static_call(serial_in)(early_serial_base, LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
- serial_out(early_serial_base, TXR, ch);
+ static_call(serial_out)(early_serial_base, TXR, ch);
return timeout ? 0 : -1;
}
@@ -131,16 +134,16 @@ static __init void early_serial_hw_init(unsigned divisor)
{
unsigned char c;
- serial_out(early_serial_base, LCR, 0x3); /* 8n1 */
- serial_out(early_serial_base, IER, 0); /* no interrupt */
- serial_out(early_serial_base, FCR, 0); /* no fifo */
- serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */
+ static_call(serial_out)(early_serial_base, LCR, 0x3); /* 8n1 */
+ static_call(serial_out)(early_serial_base, IER, 0); /* no interrupt */
+ static_call(serial_out)(early_serial_base, FCR, 0); /* no fifo */
+ static_call(serial_out)(early_serial_base, MCR, 0x3); /* DTR + RTS */
- c = serial_in(early_serial_base, LCR);
- serial_out(early_serial_base, LCR, c | DLAB);
- serial_out(early_serial_base, DLL, divisor & 0xff);
- serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff);
- serial_out(early_serial_base, LCR, c & ~DLAB);
+ c = static_call(serial_in)(early_serial_base, LCR);
+ static_call(serial_out)(early_serial_base, LCR, c | DLAB);
+ static_call(serial_out)(early_serial_base, DLL, divisor & 0xff);
+ static_call(serial_out)(early_serial_base, DLH, (divisor >> 8) & 0xff);
+ static_call(serial_out)(early_serial_base, LCR, c & ~DLAB);
}
#define DEFAULT_BAUD 9600
@@ -183,28 +186,26 @@ static __init void early_serial_init(char *s)
/* Convert from baud to divisor value */
divisor = 115200 / baud;
- /* These will always be IO based ports */
- serial_in = io_serial_in;
- serial_out = io_serial_out;
-
/* Set up the HW */
early_serial_hw_init(divisor);
}
#ifdef CONFIG_PCI
-static void mem32_serial_out(unsigned long addr, int offset, int value)
+static __noendbr void mem32_serial_out(unsigned long addr, int offset, int value)
{
u32 __iomem *vaddr = (u32 __iomem *)addr;
/* shift implied by pointer type */
writel(value, vaddr + offset);
}
+ANNOTATE_NOENDBR_SYM(mem32_serial_out);
-static unsigned int mem32_serial_in(unsigned long addr, int offset)
+static __noendbr unsigned int mem32_serial_in(unsigned long addr, int offset)
{
u32 __iomem *vaddr = (u32 __iomem *)addr;
/* shift implied by pointer type */
return readl(vaddr + offset);
}
+ANNOTATE_NOENDBR_SYM(mem32_serial_in);
/*
* early_pci_serial_init()
@@ -278,15 +279,13 @@ static __init void early_pci_serial_init(char *s)
*/
if ((bar0 & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
/* it is IO mapped */
- serial_in = io_serial_in;
- serial_out = io_serial_out;
early_serial_base = bar0 & PCI_BASE_ADDRESS_IO_MASK;
write_pci_config(bus, slot, func, PCI_COMMAND,
cmdreg|PCI_COMMAND_IO);
} else {
/* It is memory mapped - assume 32-bit alignment */
- serial_in = mem32_serial_in;
- serial_out = mem32_serial_out;
+ static_call_update(serial_in, mem32_serial_in);
+ static_call_update(serial_out, mem32_serial_out);
/* WARNING! assuming the address is always in the first 4G */
early_serial_base =
(unsigned long)early_ioremap(bar0 & PCI_BASE_ADDRESS_MEM_MASK, 0x10);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 1209c7aebb21..1b734a9ff088 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -60,9 +60,16 @@ bool irq_fpu_usable(void)
if (WARN_ON_ONCE(in_nmi()))
return false;
- /* In kernel FPU usage already active? */
- if (this_cpu_read(in_kernel_fpu))
+ /*
+ * In kernel FPU usage already active? This detects any explicitly
+ * nested usage in task or softirq context, which is unsupported. It
+ * also detects attempted usage in a hardirq that has interrupted a
+ * kernel-mode FPU section.
+ */
+ if (this_cpu_read(in_kernel_fpu)) {
+ WARN_ON_FPU(!in_hardirq());
return false;
+ }
/*
* When not in NMI or hard interrupt context, FPU can be used in:
@@ -220,7 +227,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
struct fpstate *fpstate;
unsigned int size;
- size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
+ size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
fpstate = vzalloc(size);
if (!fpstate)
return false;
@@ -232,8 +239,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
fpstate->is_guest = true;
gfpu->fpstate = fpstate;
- gfpu->xfeatures = fpu_user_cfg.default_features;
- gfpu->perm = fpu_user_cfg.default_features;
+ gfpu->xfeatures = fpu_kernel_cfg.default_features;
+ gfpu->perm = fpu_kernel_cfg.default_features;
/*
* KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
@@ -420,7 +427,8 @@ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
- preempt_disable();
+ if (!irqs_disabled())
+ fpregs_lock();
WARN_ON_FPU(!irq_fpu_usable());
WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
@@ -448,7 +456,8 @@ void kernel_fpu_end(void)
WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false);
- preempt_enable();
+ if (!irqs_disabled())
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
diff --git a/arch/x86/kernel/fpu/internal.h b/arch/x86/kernel/fpu/internal.h
index dbdb31f55fc7..975de070c9c9 100644
--- a/arch/x86/kernel/fpu/internal.h
+++ b/arch/x86/kernel/fpu/internal.h
@@ -18,7 +18,7 @@ static __always_inline __pure bool use_fxsr(void)
#ifdef CONFIG_X86_DEBUG_FPU
# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
#else
-# define WARN_ON_FPU(x) ({ (void)(x); 0; })
+# define WARN_ON_FPU(x) ({ BUILD_BUG_ON_INVALID(x); 0; })
#endif
/* Used in init.c */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 8f62e0666dea..6c69cb28b298 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -27,19 +27,14 @@
static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
struct _fpx_sw_bytes *fx_sw)
{
- int min_xstate_size = sizeof(struct fxregs_state) +
- sizeof(struct xstate_header);
void __user *fpstate = fxbuf;
unsigned int magic2;
if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw)))
return false;
- /* Check for the first magic field and other error scenarios. */
- if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
- fx_sw->xstate_size < min_xstate_size ||
- fx_sw->xstate_size > current->thread.fpu.fpstate->user_size ||
- fx_sw->xstate_size > fx_sw->extended_size)
+ /* Check for the first magic field */
+ if (fx_sw->magic1 != FP_XSTATE_MAGIC1)
goto setfx;
/*
@@ -48,7 +43,7 @@ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf,
* fpstate layout with out copying the extended state information
* in the memory layout.
*/
- if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)))
+ if (__get_user(magic2, (__u32 __user *)(fpstate + current->thread.fpu.fpstate->user_size)))
return false;
if (likely(magic2 == FP_XSTATE_MAGIC2))
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 27417b685c1d..6a41d1610d8b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -259,32 +259,20 @@ static void __init setup_xstate_cache(void)
}
}
-static void __init print_xstate_feature(u64 xstate_mask)
-{
- const char *feature_name;
-
- if (cpu_has_xfeatures(xstate_mask, &feature_name))
- pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
-}
-
/*
* Print out all the supported xstate features:
*/
static void __init print_xstate_features(void)
{
- print_xstate_feature(XFEATURE_MASK_FP);
- print_xstate_feature(XFEATURE_MASK_SSE);
- print_xstate_feature(XFEATURE_MASK_YMM);
- print_xstate_feature(XFEATURE_MASK_BNDREGS);
- print_xstate_feature(XFEATURE_MASK_BNDCSR);
- print_xstate_feature(XFEATURE_MASK_OPMASK);
- print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
- print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
- print_xstate_feature(XFEATURE_MASK_PKRU);
- print_xstate_feature(XFEATURE_MASK_PASID);
- print_xstate_feature(XFEATURE_MASK_CET_USER);
- print_xstate_feature(XFEATURE_MASK_XTILE_CFG);
- print_xstate_feature(XFEATURE_MASK_XTILE_DATA);
+ int i;
+
+ for (i = 0; i < XFEATURE_MAX; i++) {
+ u64 mask = BIT_ULL(i);
+ const char *name;
+
+ if (cpu_has_xfeatures(mask, &name))
+ pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", mask, name);
+ }
}
/*
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index aa16f1a1bbcf..0fd34f53f025 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -94,30 +94,33 @@ static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 ma
/* XSAVE/XRSTOR wrapper functions */
#ifdef CONFIG_X86_64
-#define REX_PREFIX "0x48, "
+#define REX_SUFFIX "64"
#else
-#define REX_PREFIX
+#define REX_SUFFIX
#endif
-/* These macros all use (%edi)/(%rdi) as the single memory argument. */
-#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
-#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
-#define XSAVEC ".byte " REX_PREFIX "0x0f,0xc7,0x27"
-#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
-#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
-#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
+#define XSAVE "xsave" REX_SUFFIX " %[xa]"
+#define XSAVEOPT "xsaveopt" REX_SUFFIX " %[xa]"
+#define XSAVEC "xsavec" REX_SUFFIX " %[xa]"
+#define XSAVES "xsaves" REX_SUFFIX " %[xa]"
+#define XRSTOR "xrstor" REX_SUFFIX " %[xa]"
+#define XRSTORS "xrstors" REX_SUFFIX " %[xa]"
/*
* After this @err contains 0 on success or the trap number when the
* operation raises an exception.
+ *
+ * The [xa] input parameter below represents the struct xregs_state pointer
+ * and the asm symbolic name for the argument used in the XSAVE/XRSTOR insns
+ * above.
*/
#define XSTATE_OP(op, st, lmask, hmask, err) \
asm volatile("1:" op "\n\t" \
"xor %[err], %[err]\n" \
- "2:\n\t" \
+ "2:\n" \
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_MCE_SAFE) \
: [err] "=a" (err) \
- : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : [xa] "m" (*(st)), "a" (lmask), "d" (hmask) \
: "memory")
/*
@@ -137,12 +140,12 @@ static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 ma
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
XSAVEC, X86_FEATURE_XSAVEC, \
XSAVES, X86_FEATURE_XSAVES) \
- "\n" \
+ "\n\t" \
"xor %[err], %[err]\n" \
"3:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
- : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : [xa] "m" (*(st)), "a" (lmask), "d" (hmask) \
: "memory")
/*
@@ -156,7 +159,7 @@ static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 ma
"3:\n" \
_ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE) \
: \
- : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : [xa] "m" (*(st)), "a" (lmask), "d" (hmask) \
: "memory")
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_DEBUG_FPU)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 166bc0ea3bdf..cace6e8d7cc7 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -118,13 +118,10 @@ ftrace_modify_code_direct(unsigned long ip, const char *old_code,
return ret;
/* replace the text with the new text */
- if (ftrace_poke_late) {
+ if (ftrace_poke_late)
text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
- } else {
- mutex_lock(&text_mutex);
- text_poke((void *)ip, new_code, MCOUNT_INSN_SIZE);
- mutex_unlock(&text_mutex);
- }
+ else
+ text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
return 0;
}
@@ -321,7 +318,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
union ftrace_op_code_union op_ptr;
- void *ret;
+ int ret;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
start_offset = (unsigned long)ftrace_regs_caller;
@@ -352,15 +349,15 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
- ret = text_poke_copy(trampoline, (void *)start_offset, size);
- if (WARN_ON(!ret))
+ ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
+ if (WARN_ON(ret < 0))
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
- text_poke_copy(ip, retq, sizeof(retq));
+ memcpy(ip, retq, sizeof(retq));
/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
@@ -368,7 +365,8 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(*(char *)ip != 0x75))
goto fail;
- if (!text_poke_copy(ip, x86_nops[2], 2))
+ ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
+ if (ret < 0)
goto fail;
}
@@ -381,7 +379,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
*/
ptr = (unsigned long *)(trampoline + size + RET_SIZE);
- text_poke_copy(ptr, &ops, sizeof(unsigned long));
+ *ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
@@ -397,7 +395,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
op_ptr.offset = offset;
/* put in the new offset to the ftrace_ops */
- text_poke_copy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
+ memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
/* put in the call to the function */
mutex_lock(&text_mutex);
@@ -407,9 +405,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
* the depth accounting before the call already.
*/
dest = ftrace_ops_get_func(ops);
- text_poke_copy_locked(trampoline + call_offset,
- text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
- CALL_INSN_SIZE, false);
+ memcpy(trampoline + call_offset,
+ text_gen_insn(CALL_INSN_OPCODE, trampoline + call_offset, dest),
+ CALL_INSN_SIZE);
mutex_unlock(&text_mutex);
/* ALLOC_TRAMP flags lets us know we created it */
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index d51647228596..367da3638167 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -146,12 +146,14 @@ SYM_FUNC_END(ftrace_stub_graph)
#ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__)
+ ANNOTATE_NOENDBR
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
SYM_FUNC_START(ftrace_caller)
+ ANNOTATE_NOENDBR
/* save_mcount_regs fills in first two parameters */
save_mcount_regs
@@ -197,6 +199,7 @@ SYM_FUNC_END(ftrace_caller);
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
SYM_FUNC_START(ftrace_regs_caller)
+ ANNOTATE_NOENDBR
/* Save the current flags before any operations that can change them */
pushfq
@@ -310,6 +313,7 @@ SYM_FUNC_END(ftrace_regs_caller)
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
SYM_FUNC_START(ftrace_stub_direct_tramp)
+ ANNOTATE_NOENDBR
CALL_DEPTH_ACCOUNT
RET
SYM_FUNC_END(ftrace_stub_direct_tramp)
@@ -317,6 +321,7 @@ SYM_FUNC_END(ftrace_stub_direct_tramp)
#else /* ! CONFIG_DYNAMIC_FTRACE */
SYM_FUNC_START(__fentry__)
+ ANNOTATE_NOENDBR
CALL_DEPTH_ACCOUNT
cmpq $ftrace_stub, ftrace_trace_function
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 22c9ba305ac1..fa9b6339975f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -5,8 +5,6 @@
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
-#define DISABLE_BRANCH_PROFILING
-
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
@@ -567,7 +565,7 @@ void early_setup_idt(void)
*/
void __head startup_64_setup_gdt_idt(void)
{
- struct desc_struct *gdt = (void *)(__force unsigned long)init_per_cpu_var(gdt_page.gdt);
+ struct desc_struct *gdt = (void *)(__force unsigned long)gdt_page.gdt;
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 31345e0ba006..fefe2a25cf02 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -61,11 +61,14 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Set up the stack for verify_cpu() */
leaq __top_init_kernel_stack(%rip), %rsp
- /* Setup GSBASE to allow stack canary access for C code */
+ /*
+ * Set up GSBASE.
+ * Note that on SMP the boot CPU uses the init data section until
+ * the per-CPU areas are set up.
+ */
movl $MSR_GS_BASE, %ecx
- leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
- movl %edx, %eax
- shrq $32, %rdx
+ xorl %eax, %eax
+ xorl %edx, %edx
wrmsr
call startup_64_setup_gdt_idt
@@ -319,7 +322,7 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
*
* RDX contains the per-cpu offset
*/
- movq pcpu_hot + X86_current_task(%rdx), %rax
+ movq current_task(%rdx), %rax
movq TASK_threadsp(%rax), %rsp
/*
@@ -359,17 +362,12 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
movl %eax,%fs
movl %eax,%gs
- /* Set up %gs.
- *
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ /*
+ * Set up GSBASE.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
-#ifndef CONFIG_SMP
- leaq INIT_PER_CPU_VAR(fixed_percpu_data)(%rip), %rdx
-#endif
movl %edx, %eax
shrq $32, %rdx
wrmsr
@@ -435,7 +433,7 @@ SYM_CODE_START(soft_restart_cpu)
UNWIND_HINT_END_OF_STACK
/* Find the idle task stack */
- movq PER_CPU_VAR(pcpu_hot + X86_current_task), %rcx
+ movq PER_CPU_VAR(current_task), %rcx
movq TASK_threadsp(%rcx), %rsp
jmp .Ljump_to_C_code
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index c20d1832c481..2bade73f49e3 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -23,6 +23,7 @@
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
+#include <asm/io_apic.h>
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index e2fab3ceb09f..6290dd120f5e 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -144,7 +144,7 @@ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on)
* Update the sequence number to force a TSS update on return to
* user mode.
*/
- iobm->sequence = atomic64_add_return(1, &io_bitmap_sequence);
+ iobm->sequence = atomic64_inc_return(&io_bitmap_sequence);
return 0;
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index feca4f20b06a..81f9b78e0f7b 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -33,6 +33,11 @@
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
+DEFINE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
+EXPORT_PER_CPU_SYMBOL(__softirq_pending);
+
+DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
+
atomic_t irq_err_count;
/*
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index dc1049c01f9b..c7a5d2960d57 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -29,12 +29,9 @@
int sysctl_panic_on_stackoverflow __read_mostly;
/* Debugging check for stack overflow: is there less than 1KB free? */
-static int check_stack_overflow(void)
+static bool check_stack_overflow(void)
{
- long sp;
-
- __asm__ __volatile__("andl %%esp,%0" :
- "=r" (sp) : "0" (THREAD_SIZE - 1));
+ unsigned long sp = current_stack_pointer & (THREAD_SIZE - 1);
return sp < (sizeof(struct thread_info) + STACK_WARN);
}
@@ -48,18 +45,19 @@ static void print_stack_overflow(void)
}
#else
-static inline int check_stack_overflow(void) { return 0; }
+static inline bool check_stack_overflow(void) { return false; }
static inline void print_stack_overflow(void) { }
#endif
+DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
+
static void call_on_stack(void *func, void *stack)
{
- asm volatile("xchgl %%ebx,%%esp \n"
+ asm volatile("xchgl %[sp], %%esp\n"
CALL_NOSPEC
- "movl %%ebx,%%esp \n"
- : "=b" (stack)
- : "0" (stack),
- [thunk_target] "D"(func)
+ "movl %[sp], %%esp"
+ : [sp] "+b" (stack)
+ : [thunk_target] "D" (func)
: "memory", "cc", "edx", "ecx", "eax");
}
@@ -68,13 +66,13 @@ static inline void *current_stack(void)
return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
}
-static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
+static inline bool execute_on_irq_stack(bool overflow, struct irq_desc *desc)
{
struct irq_stack *curstk, *irqstk;
- u32 *isp, *prev_esp, arg1;
+ u32 *isp, *prev_esp;
curstk = (struct irq_stack *) current_stack();
- irqstk = __this_cpu_read(pcpu_hot.hardirq_stack_ptr);
+ irqstk = __this_cpu_read(hardirq_stack_ptr);
/*
* this is where we switch to the IRQ stack. However, if we are
@@ -83,7 +81,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
* current stack (which is the irq stack already after all)
*/
if (unlikely(curstk == irqstk))
- return 0;
+ return false;
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
@@ -94,14 +92,13 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
- asm volatile("xchgl %%ebx,%%esp \n"
+ asm volatile("xchgl %[sp], %%esp\n"
CALL_NOSPEC
- "movl %%ebx,%%esp \n"
- : "=a" (arg1), "=b" (isp)
- : "0" (desc), "1" (isp),
- [thunk_target] "D" (desc->handle_irq)
- : "memory", "cc", "ecx");
- return 1;
+ "movl %[sp], %%esp"
+ : "+a" (desc), [sp] "+b" (isp)
+ : [thunk_target] "D" (desc->handle_irq)
+ : "memory", "cc", "edx", "ecx");
+ return true;
}
/*
@@ -112,7 +109,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
int node = cpu_to_node(cpu);
struct page *ph, *ps;
- if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
+ if (per_cpu(hardirq_stack_ptr, cpu))
return 0;
ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
@@ -124,8 +121,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return -ENOMEM;
}
- per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = page_address(ph);
- per_cpu(pcpu_hot.softirq_stack_ptr, cpu) = page_address(ps);
+ per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
+ per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
return 0;
}
@@ -135,7 +132,7 @@ void do_softirq_own_stack(void)
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- irqstk = __this_cpu_read(pcpu_hot.softirq_stack_ptr);
+ irqstk = __this_cpu_read(softirq_stack_ptr);
/* build the stack frame on the softirq stack */
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
@@ -150,7 +147,7 @@ void do_softirq_own_stack(void)
void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
- int overflow = check_stack_overflow();
+ bool overflow = check_stack_overflow();
if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
if (unlikely(overflow))
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index ade0043ce56e..ca78dce39361 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -26,8 +26,8 @@
#include <asm/io_apic.h>
#include <asm/apic.h>
+DEFINE_PER_CPU_CACHE_HOT(bool, hardirq_stack_inuse);
DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
-DECLARE_INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_VMAP_STACK
/*
@@ -51,7 +51,7 @@ static int map_irq_stack(unsigned int cpu)
return -ENOMEM;
/* Store actual TOS to avoid adjustment in the hotpath */
- per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
return 0;
}
#else
@@ -64,14 +64,14 @@ static int map_irq_stack(unsigned int cpu)
void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
/* Store actual TOS to avoid adjustment in the hotpath */
- per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
return 0;
}
#endif
int irq_init_percpu_irqstack(unsigned int cpu)
{
- if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
+ if (per_cpu(hardirq_stack_ptr, cpu))
return 0;
return map_irq_stack(cpu);
}
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
index 7f542a7799cb..fdabd5dda154 100644
--- a/arch/x86/kernel/irqflags.S
+++ b/arch/x86/kernel/irqflags.S
@@ -9,6 +9,7 @@
*/
.pushsection .noinstr.text, "ax"
SYM_FUNC_START(native_save_fl)
+ ENDBR
pushf
pop %_ASM_AX
RET
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 72e6a45e7ec2..09608fd93687 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -373,16 +373,7 @@ out:
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
- u32 insn;
-
- /*
- * Since 'addr' is not guaranteed to be safe to access, use
- * copy_from_kernel_nofault() to read the instruction:
- */
- if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
- return NULL;
-
- if (is_endbr(insn)) {
+ if (is_endbr((u32 *)addr)) {
*on_func_entry = !offset || offset == 4;
if (*on_func_entry)
offset = 4;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 7a422a6c5983..3be9b3342c67 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -838,7 +838,6 @@ static void __init kvm_guest_init(void)
#ifdef CONFIG_SMP
if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
- pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 8984abd91c00..a7998f351701 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -19,6 +19,7 @@
#include <linux/jump_label.h>
#include <linux/random.h>
#include <linux/memory.h>
+#include <linux/stackprotector.h>
#include <asm/text-patching.h>
#include <asm/page.h>
@@ -130,6 +131,20 @@ static int __write_relocate_add(Elf64_Shdr *sechdrs,
goto overflow;
size = 4;
break;
+#if defined(CONFIG_STACKPROTECTOR) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000
+ case R_X86_64_REX_GOTPCRELX: {
+ static unsigned long __percpu *const addr = &__stack_chk_guard;
+
+ if (sym->st_value != (u64)addr) {
+ pr_err("%s: Unsupported GOTPCREL relocation\n", me->name);
+ return -ENOEXEC;
+ }
+
+ val = (u64)&addr + rel[i].r_addend;
+ fallthrough;
+ }
+#endif
case R_X86_64_PC32:
case R_X86_64_PLT32:
val -= (u64)loc;
@@ -146,21 +161,18 @@ static int __write_relocate_add(Elf64_Shdr *sechdrs,
}
if (apply) {
- void *wr_loc = module_writable_address(me, loc);
-
- if (memcmp(wr_loc, &zero, size)) {
+ if (memcmp(loc, &zero, size)) {
pr_err("x86/modules: Invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), loc, val);
return -ENOEXEC;
}
- write(wr_loc, &val, size);
+ write(loc, &val, size);
} else {
if (memcmp(loc, &val, size)) {
pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for type %d, loc %p, val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), loc, val);
return -ENOEXEC;
}
- /* FIXME: needs care for ROX module allocations */
write(loc, &zero, size);
}
}
@@ -227,7 +239,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- const Elf_Shdr *s, *alt = NULL,
+ const Elf_Shdr *s, *alt = NULL, *locks = NULL,
*orc = NULL, *orc_ip = NULL,
*retpolines = NULL, *returns = NULL, *ibt_endbr = NULL,
*calls = NULL, *cfi = NULL;
@@ -236,6 +248,8 @@ int module_finalize(const Elf_Ehdr *hdr,
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
+ if (!strcmp(".smp_locks", secstrings + s->sh_name))
+ locks = s;
if (!strcmp(".orc_unwind", secstrings + s->sh_name))
orc = s;
if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
@@ -266,60 +280,33 @@ int module_finalize(const Elf_Ehdr *hdr,
csize = cfi->sh_size;
}
- apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize, me);
+ apply_fineibt(rseg, rseg + rsize, cseg, cseg + csize);
}
if (retpolines) {
void *rseg = (void *)retpolines->sh_addr;
- apply_retpolines(rseg, rseg + retpolines->sh_size, me);
+ apply_retpolines(rseg, rseg + retpolines->sh_size);
}
if (returns) {
void *rseg = (void *)returns->sh_addr;
- apply_returns(rseg, rseg + returns->sh_size, me);
- }
- if (alt) {
- /* patch .altinstructions */
- void *aseg = (void *)alt->sh_addr;
- apply_alternatives(aseg, aseg + alt->sh_size, me);
+ apply_returns(rseg, rseg + returns->sh_size);
}
- if (calls || alt) {
+ if (calls) {
struct callthunk_sites cs = {};
- if (calls) {
- cs.call_start = (void *)calls->sh_addr;
- cs.call_end = (void *)calls->sh_addr + calls->sh_size;
- }
-
- if (alt) {
- cs.alt_start = (void *)alt->sh_addr;
- cs.alt_end = (void *)alt->sh_addr + alt->sh_size;
- }
+ cs.call_start = (void *)calls->sh_addr;
+ cs.call_end = (void *)calls->sh_addr + calls->sh_size;
callthunks_patch_module_calls(&cs, me);
}
+ if (alt) {
+ /* patch .altinstructions */
+ void *aseg = (void *)alt->sh_addr;
+ apply_alternatives(aseg, aseg + alt->sh_size);
+ }
if (ibt_endbr) {
void *iseg = (void *)ibt_endbr->sh_addr;
- apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size, me);
+ apply_seal_endbr(iseg, iseg + ibt_endbr->sh_size);
}
-
- if (orc && orc_ip)
- unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
- (void *)orc->sh_addr, orc->sh_size);
-
- return 0;
-}
-
-int module_post_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
-{
- const Elf_Shdr *s, *locks = NULL;
- char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-
- for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
- if (!strcmp(".smp_locks", secstrings + s->sh_name))
- locks = s;
- }
-
if (locks) {
void *lseg = (void *)locks->sh_addr;
void *text = me->mem[MOD_TEXT].base;
@@ -329,6 +316,10 @@ int module_post_finalize(const Elf_Ehdr *hdr,
text, text_end);
}
+ if (orc && orc_ip)
+ unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
+ (void *)orc->sh_addr, orc->sh_size);
+
return 0;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index ed163c8c8604..9a95d00f1423 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -40,8 +40,12 @@
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
+/*
+ * An emergency handler can be set in any context including NMI
+ */
struct nmi_desc {
raw_spinlock_t lock;
+ nmi_handler_t emerg_handler;
struct list_head head;
};
@@ -132,9 +136,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
static int nmi_handle(unsigned int type, struct pt_regs *regs)
{
struct nmi_desc *desc = nmi_to_desc(type);
+ nmi_handler_t ehandler;
struct nmiaction *a;
int handled=0;
+ /*
+ * Call the emergency handler, if set
+ *
+ * In the case of crash_nmi_callback() emergency handler, it will
+ * return in the case of the crashing CPU to enable it to complete
+ * other necessary crashing actions ASAP. Other handlers in the
+ * linked list won't need to be run.
+ */
+ ehandler = desc->emerg_handler;
+ if (ehandler)
+ return ehandler(type, regs);
+
rcu_read_lock();
/*
@@ -224,6 +241,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
}
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
+/**
+ * set_emergency_nmi_handler - Set emergency handler
+ * @type: NMI type
+ * @handler: the emergency handler to be stored
+ *
+ * Set an emergency NMI handler which, if set, will preempt all the other
+ * handlers in the linked list. If a NULL handler is passed in, it will clear
+ * it. It is expected that concurrent calls to this function will not happen
+ * or the system is screwed beyond repair.
+ */
+void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
+{
+ struct nmi_desc *desc = nmi_to_desc(type);
+
+ if (WARN_ON_ONCE(desc->emerg_handler == handler))
+ return;
+ desc->emerg_handler = handler;
+
+ /*
+ * Ensure the emergency handler is visible to other CPUs before
+ * function return
+ */
+ smp_wmb();
+}
+
static void
pci_serr_error(unsigned char reason, struct pt_regs *regs)
{
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1ccaa3397a67..97925632c28e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -59,21 +59,6 @@ void __init native_pv_lock_init(void)
static_branch_enable(&virt_spin_lock_key);
}
-#ifndef CONFIG_PT_RECLAIM
-static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct ptdesc *ptdesc = (struct ptdesc *)table;
-
- pagetable_dtor(ptdesc);
- tlb_remove_page(tlb, ptdesc_page(ptdesc));
-}
-#else
-static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- tlb_remove_table(tlb, table);
-}
-#endif
-
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
@@ -90,30 +75,20 @@ void paravirt_set_sched_clock(u64 (*func)(void))
static_call_update(pv_sched_clock, func);
}
-/* These are in entry.S */
-static struct resource reserve_ioports = {
- .start = 0,
- .end = IO_SPACE_LIMIT,
- .name = "paravirt-ioport",
- .flags = IORESOURCE_IO | IORESOURCE_BUSY,
-};
+#ifdef CONFIG_PARAVIRT_XXL
+static noinstr void pv_native_write_cr2(unsigned long val)
+{
+ native_write_cr2(val);
+}
-/*
- * Reserve the whole legacy IO space to prevent any legacy drivers
- * from wasting time probing for their hardware. This is a fairly
- * brute-force approach to disabling all non-virtual drivers.
- *
- * Note that this must be called very early to have any effect.
- */
-int paravirt_disable_iospace(void)
+static noinstr unsigned long pv_native_read_cr3(void)
{
- return request_resource(&ioport_resource, &reserve_ioports);
+ return __native_read_cr3();
}
-#ifdef CONFIG_PARAVIRT_XXL
-static noinstr void pv_native_write_cr2(unsigned long val)
+static noinstr void pv_native_write_cr3(unsigned long cr3)
{
- native_write_cr2(val);
+ native_write_cr3(cr3);
}
static noinstr unsigned long pv_native_get_debugreg(int regno)
@@ -195,7 +170,6 @@ struct paravirt_patch_template pv_ops = {
.mmu.flush_tlb_kernel = native_flush_tlb_global,
.mmu.flush_tlb_one_user = native_flush_tlb_one_user,
.mmu.flush_tlb_multi = native_flush_tlb_multi,
- .mmu.tlb_remove_table = native_tlb_remove_table,
.mmu.exit_mmap = paravirt_nop,
.mmu.notify_page_enc_status_changed = paravirt_nop,
@@ -203,8 +177,8 @@ struct paravirt_patch_template pv_ops = {
#ifdef CONFIG_PARAVIRT_XXL
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
.mmu.write_cr2 = pv_native_write_cr2,
- .mmu.read_cr3 = __native_read_cr3,
- .mmu.write_cr3 = native_write_cr3,
+ .mmu.read_cr3 = pv_native_read_cr3,
+ .mmu.write_cr3 = pv_native_write_cr3,
.mmu.pgd_alloc = __paravirt_pgd_alloc,
.mmu.pgd_free = paravirt_nop,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6da6769d7254..91f6ff618852 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -93,7 +93,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- memcpy(dst, src, arch_task_struct_size);
+ /* init_task is not dynamically sized (incomplete FPU state) */
+ if (unlikely(src == &init_task))
+ memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0);
+ else
+ memcpy(dst, src, arch_task_struct_size);
+
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
@@ -1043,7 +1048,7 @@ unsigned long __get_wchan(struct task_struct *p)
return addr;
}
-long do_arch_prctl_common(int option, unsigned long arg2)
+SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
{
switch (option) {
case ARCH_GET_CPUID:
@@ -1058,5 +1063,13 @@ long do_arch_prctl_common(int option, unsigned long arg2)
return fpu_xstate_prctl(option, arg2);
}
+ if (!in_ia32_syscall())
+ return do_arch_prctl_64(current, option, arg2);
+
return -EINVAL;
}
+
+SYSCALL_DEFINE0(ni_syscall)
+{
+ return -ENOSYS;
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 0917c7f25720..4636ef359973 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -190,13 +190,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
arch_end_context_switch(next_p);
/*
- * Reload esp0 and pcpu_hot.top_of_stack. This changes
+ * Reload esp0 and cpu_current_top_of_stack. This changes
* current_thread_info(). Refresh the SYSENTER configuration in
* case prev or next is vm86.
*/
update_task_stack(next_p);
refresh_sysenter_cs(next);
- this_cpu_write(pcpu_hot.top_of_stack,
+ this_cpu_write(cpu_current_top_of_stack,
(unsigned long)task_stack_page(next_p) +
THREAD_SIZE);
@@ -206,7 +206,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
loadsegment(gs, next->gs);
- raw_cpu_write(pcpu_hot.current_task, next_p);
+ raw_cpu_write(current_task, next_p);
switch_fpu_finish(next_p);
@@ -215,8 +215,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p;
}
-
-SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
-{
- return do_arch_prctl_common(option, arg2);
-}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 226472332a70..7196ca7048be 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -614,7 +614,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
- this_cpu_read(pcpu_hot.hardirq_stack_inuse));
+ this_cpu_read(hardirq_stack_inuse));
if (!test_tsk_thread_flag(prev_p, TIF_NEED_FPU_LOAD))
switch_fpu_prepare(prev_p, cpu);
@@ -668,8 +668,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Switch the PDA and FPU contexts.
*/
- raw_cpu_write(pcpu_hot.current_task, next_p);
- raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p));
+ raw_cpu_write(current_task, next_p);
+ raw_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
switch_fpu_finish(next_p);
@@ -942,7 +942,7 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
case ARCH_MAP_VDSO_X32:
return prctl_map_vdso(&vdso_image_x32, arg2);
# endif
-# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+# ifdef CONFIG_IA32_EMULATION
case ARCH_MAP_VDSO_32:
return prctl_map_vdso(&vdso_image_32, arg2);
# endif
@@ -979,26 +979,3 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
return ret;
}
-
-SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
-{
- long ret;
-
- ret = do_arch_prctl_64(current, option, arg2);
- if (ret == -EINVAL)
- ret = do_arch_prctl_common(option, arg2);
-
- return ret;
-}
-
-#ifdef CONFIG_IA32_EMULATION
-COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
-{
- return do_arch_prctl_common(option, arg2);
-}
-#endif
-
-unsigned long KSTK_ESP(struct task_struct *task)
-{
- return task_pt_regs(task)->sp;
-}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 6d0df6a58873..a92f18db9610 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -10,6 +10,8 @@
#include <asm/setup.h>
#include <asm/mce.h>
+#include <linux/platform_data/x86/apple.h>
+
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
static void quirk_intel_irqbalance(struct pci_dev *dev)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index dc1dd3f3e67f..964f6b0a3d68 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -921,20 +921,16 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
return;
/* Make a note of crashing cpu. Will be used in NMI callback. */
- crashing_cpu = safe_smp_processor_id();
+ crashing_cpu = smp_processor_id();
shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
- /* Would it be better to replace the trap vector here? */
- if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
- NMI_FLAG_FIRST, "crash"))
- return; /* Return what? */
+
/*
- * Ensure the new callback function is set before sending
- * out the NMI
+ * Set emergency handler to preempt other handlers.
*/
- wmb();
+ set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR);
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index b44d8863e57f..ac058971a382 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -40,6 +40,16 @@ SYM_DATA(kexec_pa_table_page, .quad 0)
SYM_DATA(kexec_pa_swap_page, .quad 0)
SYM_DATA_LOCAL(pa_backup_pages_map, .quad 0)
+ .balign 16
+SYM_DATA_START_LOCAL(kexec_debug_gdt)
+ .word kexec_debug_gdt_end - kexec_debug_gdt - 1
+ .long 0
+ .word 0
+ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
+SYM_DATA_END_LABEL(kexec_debug_gdt, SYM_L_LOCAL, kexec_debug_gdt_end)
+
.section .text..relocate_kernel,"ax";
.code64
SYM_CODE_START_NOALIGN(relocate_kernel)
@@ -116,6 +126,19 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
/* store the start address on the stack */
pushq %rdx
+ /* Create a GDTR (16 bits limit, 64 bits addr) on stack */
+ leaq kexec_debug_gdt(%rip), %rax
+ pushq %rax
+ pushw (%rax)
+
+ /* Load the GDT, put the stack back */
+ lgdt (%rsp)
+ addq $10, %rsp
+
+ /* Test that we can load segments */
+ movq %ds, %rax
+ movq %rax, %ds
+
/*
* Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP
* below.
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cebee310e200..c7164a8de983 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -56,6 +56,9 @@
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
+#if defined(CONFIG_X86_LOCAL_APIC)
+#include <asm/nmi.h>
+#endif
/*
* max_low_pfn_mapped: highest directly mapped pfn < 4 GB
@@ -146,6 +149,69 @@ static size_t ima_kexec_buffer_size;
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
int bootloader_type, bootloader_version;
+static const struct ctl_table x86_sysctl_table[] = {
+ {
+ .procname = "panic_on_unrecovered_nmi",
+ .data = &panic_on_unrecovered_nmi,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "panic_on_io_nmi",
+ .data = &panic_on_io_nmi,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "bootloader_type",
+ .data = &bootloader_type,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "bootloader_version",
+ .data = &bootloader_version,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "io_delay_type",
+ .data = &io_delay_type,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#if defined(CONFIG_X86_LOCAL_APIC)
+ {
+ .procname = "unknown_nmi_panic",
+ .data = &unknown_nmi_panic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+#if defined(CONFIG_ACPI_SLEEP)
+ {
+ .procname = "acpi_video_flags",
+ .data = &acpi_realmode_flags,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+#endif
+};
+
+static int __init init_x86_sysctl(void)
+{
+ register_sysctl_init("kernel", x86_sysctl_table);
+ return 0;
+}
+arch_initcall(init_x86_sysctl);
+
/*
* Setup options
*/
@@ -429,6 +495,46 @@ static void __init parse_setup_data(void)
}
}
+/*
+ * Translate the fields of 'struct boot_param' into global variables
+ * representing these parameters.
+ */
+static void __init parse_boot_params(void)
+{
+ ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
+ screen_info = boot_params.screen_info;
+ edid_info = boot_params.edid_info;
+#ifdef CONFIG_X86_32
+ apm_info.bios = boot_params.apm_bios_info;
+ ist_info = boot_params.ist_info;
+#endif
+ saved_video_mode = boot_params.hdr.vid_mode;
+ bootloader_type = boot_params.hdr.type_of_loader;
+ if ((bootloader_type >> 4) == 0xe) {
+ bootloader_type &= 0xf;
+ bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
+ }
+ bootloader_version = bootloader_type & 0xf;
+ bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
+#endif
+#ifdef CONFIG_EFI
+ if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+ EFI32_LOADER_SIGNATURE, 4)) {
+ set_bit(EFI_BOOT, &efi.flags);
+ } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
+ EFI64_LOADER_SIGNATURE, 4)) {
+ set_bit(EFI_BOOT, &efi.flags);
+ set_bit(EFI_64BIT, &efi.flags);
+ }
+#endif
+
+ if (!boot_params.hdr.root_flags)
+ root_mountflags &= ~MS_RDONLY;
+}
+
static void __init memblock_x86_reserve_range_setup_data(void)
{
struct setup_indirect *indirect;
@@ -527,6 +633,23 @@ void __init reserve_standard_io_resources(void)
}
+static void __init setup_kernel_resources(void)
+{
+ code_resource.start = __pa_symbol(_text);
+ code_resource.end = __pa_symbol(_etext)-1;
+ rodata_resource.start = __pa_symbol(__start_rodata);
+ rodata_resource.end = __pa_symbol(__end_rodata)-1;
+ data_resource.start = __pa_symbol(_sdata);
+ data_resource.end = __pa_symbol(_edata)-1;
+ bss_resource.start = __pa_symbol(__bss_start);
+ bss_resource.end = __pa_symbol(__bss_stop)-1;
+
+ insert_resource(&iomem_resource, &code_resource);
+ insert_resource(&iomem_resource, &rodata_resource);
+ insert_resource(&iomem_resource, &data_resource);
+ insert_resource(&iomem_resource, &bss_resource);
+}
+
static bool __init snb_gfx_workaround_needed(void)
{
#ifdef CONFIG_PCI
@@ -789,35 +912,7 @@ void __init setup_arch(char **cmdline_p)
setup_olpc_ofw_pgd();
- ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
- screen_info = boot_params.screen_info;
- edid_info = boot_params.edid_info;
-#ifdef CONFIG_X86_32
- apm_info.bios = boot_params.apm_bios_info;
- ist_info = boot_params.ist_info;
-#endif
- saved_video_mode = boot_params.hdr.vid_mode;
- bootloader_type = boot_params.hdr.type_of_loader;
- if ((bootloader_type >> 4) == 0xe) {
- bootloader_type &= 0xf;
- bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
- }
- bootloader_version = bootloader_type & 0xf;
- bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
-
-#ifdef CONFIG_BLK_DEV_RAM
- rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
-#endif
-#ifdef CONFIG_EFI
- if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
- EFI32_LOADER_SIGNATURE, 4)) {
- set_bit(EFI_BOOT, &efi.flags);
- } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
- EFI64_LOADER_SIGNATURE, 4)) {
- set_bit(EFI_BOOT, &efi.flags);
- set_bit(EFI_64BIT, &efi.flags);
- }
-#endif
+ parse_boot_params();
x86_init.oem.arch_setup();
@@ -841,19 +936,8 @@ void __init setup_arch(char **cmdline_p)
copy_edd();
- if (!boot_params.hdr.root_flags)
- root_mountflags &= ~MS_RDONLY;
setup_initial_init_mm(_text, _etext, _edata, (void *)_brk_end);
- code_resource.start = __pa_symbol(_text);
- code_resource.end = __pa_symbol(_etext)-1;
- rodata_resource.start = __pa_symbol(__start_rodata);
- rodata_resource.end = __pa_symbol(__end_rodata)-1;
- data_resource.start = __pa_symbol(_sdata);
- data_resource.end = __pa_symbol(_edata)-1;
- bss_resource.start = __pa_symbol(__bss_start);
- bss_resource.end = __pa_symbol(__bss_stop)-1;
-
/*
* x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug
@@ -866,30 +950,6 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_memblock_x86_reserve_range();
-#ifdef CONFIG_MEMORY_HOTPLUG
- /*
- * Memory used by the kernel cannot be hot-removed because Linux
- * cannot migrate the kernel pages. When memory hotplug is
- * enabled, we should prevent memblock from allocating memory
- * for the kernel.
- *
- * ACPI SRAT records all hotpluggable memory ranges. But before
- * SRAT is parsed, we don't know about it.
- *
- * The kernel image is loaded into memory at very early time. We
- * cannot prevent this anyway. So on NUMA system, we set any
- * node the kernel resides in as un-hotpluggable.
- *
- * Since on modern servers, one node could have double-digit
- * gigabytes memory, we can assume the memory around the kernel
- * image is also un-hotpluggable. So before SRAT is parsed, just
- * allocate memory near the kernel image to try the best to keep
- * the kernel away from hotpluggable memory.
- */
- if (movable_node_is_enabled())
- memblock_set_bottom_up(true);
-#endif
-
x86_report_nx();
apic_setup_apic_calls();
@@ -901,7 +961,6 @@ void __init setup_arch(char **cmdline_p)
setup_clear_cpu_cap(X86_FEATURE_APIC);
}
- e820__reserve_setup_data();
e820__finish_early_params();
if (efi_enabled(EFI_BOOT))
@@ -921,11 +980,11 @@ void __init setup_arch(char **cmdline_p)
tsc_early_init();
x86_init.resources.probe_roms();
- /* after parse_early_param, so could debug it */
- insert_resource(&iomem_resource, &code_resource);
- insert_resource(&iomem_resource, &rodata_resource);
- insert_resource(&iomem_resource, &data_resource);
- insert_resource(&iomem_resource, &bss_resource);
+ /*
+ * Add resources for kernel text and data to the iomem_resource.
+ * Do it after parse_early_param, so it can be debugged.
+ */
+ setup_kernel_resources();
e820_add_kernel_range();
trim_bios_range();
@@ -990,7 +1049,6 @@ void __init setup_arch(char **cmdline_p)
cleanup_highmap();
- memblock_set_current_limit(ISA_END_ADDRESS);
e820__memblock_setup();
/*
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index b30d6e180df7..bfa48e7a32a2 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -23,18 +23,13 @@
#include <asm/cpumask.h>
#include <asm/cpu.h>
-#ifdef CONFIG_X86_64
-#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
-#else
-#define BOOT_PERCPU_OFFSET 0
-#endif
+DEFINE_PER_CPU_CACHE_HOT(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+DEFINE_PER_CPU_CACHE_HOT(unsigned long, this_cpu_off);
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
- [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
-};
+unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
EXPORT_SYMBOL(__per_cpu_offset);
/*
@@ -169,7 +164,7 @@ void __init setup_per_cpu_areas(void)
for_each_possible_cpu(cpu) {
per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
- per_cpu(pcpu_hot.cpu_number, cpu) = cpu;
+ per_cpu(cpu_number, cpu) = cpu;
setup_percpu_segment(cpu);
/*
* Copy data used in early init routines from the
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index ef654530bf5a..98123ff10506 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -33,25 +33,55 @@
#include <asm/smap.h>
#include <asm/gsseg.h>
+/*
+ * The first GDT descriptor is reserved as 'NULL descriptor'. As bits 0
+ * and 1 of a segment selector, i.e., the RPL bits, are NOT used to index
+ * GDT, selector values 0~3 all point to the NULL descriptor, thus values
+ * 0, 1, 2 and 3 are all valid NULL selector values.
+ *
+ * However IRET zeros ES, FS, GS, and DS segment registers if any of them
+ * is found to have any nonzero NULL selector value, which can be used by
+ * userspace in pre-FRED systems to spot any interrupt/exception by loading
+ * a nonzero NULL selector and waiting for it to become zero. Before FRED
+ * there was nothing software could do to prevent such an information leak.
+ *
+ * ERETU, the only legit instruction to return to userspace from kernel
+ * under FRED, by design does NOT zero any segment register to avoid this
+ * problem behavior.
+ *
+ * As such, leave NULL selector values 0~3 unchanged.
+ */
+static inline u16 fixup_rpl(u16 sel)
+{
+ return sel <= 3 ? sel : sel | 3;
+}
+
#ifdef CONFIG_IA32_EMULATION
#include <asm/unistd_32_ia32.h>
static inline void reload_segments(struct sigcontext_32 *sc)
{
- unsigned int cur;
+ u16 cur;
+ /*
+ * Reload fs and gs if they have changed in the signal
+ * handler. This does not handle long fs/gs base changes in
+ * the handler, but does not clobber them at least in the
+ * normal case.
+ */
savesegment(gs, cur);
- if ((sc->gs | 0x03) != cur)
- load_gs_index(sc->gs | 0x03);
+ if (fixup_rpl(sc->gs) != cur)
+ load_gs_index(fixup_rpl(sc->gs));
savesegment(fs, cur);
- if ((sc->fs | 0x03) != cur)
- loadsegment(fs, sc->fs | 0x03);
+ if (fixup_rpl(sc->fs) != cur)
+ loadsegment(fs, fixup_rpl(sc->fs));
+
savesegment(ds, cur);
- if ((sc->ds | 0x03) != cur)
- loadsegment(ds, sc->ds | 0x03);
+ if (fixup_rpl(sc->ds) != cur)
+ loadsegment(ds, fixup_rpl(sc->ds));
savesegment(es, cur);
- if ((sc->es | 0x03) != cur)
- loadsegment(es, sc->es | 0x03);
+ if (fixup_rpl(sc->es) != cur)
+ loadsegment(es, fixup_rpl(sc->es));
}
#define sigset32_t compat_sigset_t
@@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs,
regs->orig_ax = -1;
#ifdef CONFIG_IA32_EMULATION
- /*
- * Reload fs and gs if they have changed in the signal
- * handler. This does not handle long fs/gs base changes in
- * the handler, but does not clobber them at least in the
- * normal case.
- */
reload_segments(&sc);
#else
- loadsegment(gs, sc.gs);
- regs->fs = sc.fs;
- regs->es = sc.es;
- regs->ds = sc.ds;
+ loadsegment(gs, fixup_rpl(sc.gs));
+ regs->fs = fixup_rpl(sc.fs);
+ regs->es = fixup_rpl(sc.es);
+ regs->ds = fixup_rpl(sc.ds);
#endif
return fpu__restore_sig(compat_ptr(sc.fpstate), 1);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c10850ae6f09..d6cf1e23c2a3 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -190,7 +190,7 @@ static void ap_starting(void)
apic_ap_setup();
/* Save the processor parameters. */
- smp_store_cpu_info(cpuid);
+ identify_secondary_cpu(cpuid);
/*
* The topology information must be up to date before
@@ -215,7 +215,7 @@ static void ap_calibrate_delay(void)
{
/*
* Calibrate the delay loop and update loops_per_jiffy in cpu_data.
- * smp_store_cpu_info() stored a value that is close but not as
+ * identify_secondary_cpu() stored a value that is close but not as
* accurate as the value just calculated.
*
* As this is invoked after the TSC synchronization check,
@@ -229,7 +229,7 @@ static void ap_calibrate_delay(void)
/*
* Activate a secondary processor.
*/
-static void notrace start_secondary(void *unused)
+static void notrace __noendbr start_secondary(void *unused)
{
/*
* Don't put *anything* except direct CPU state initialization
@@ -314,26 +314,7 @@ static void notrace start_secondary(void *unused)
wmb();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-void smp_store_cpu_info(int id)
-{
- struct cpuinfo_x86 *c = &cpu_data(id);
-
- /* Copy boot_cpu_data only on the first bringup */
- if (!c->initialized)
- *c = boot_cpu_data;
- c->cpu_index = id;
- /*
- * During boot time, CPU0 has this setup already. Save the info when
- * bringing up an AP.
- */
- identify_secondary_cpu(c);
- c->initialized = true;
-}
+ANNOTATE_NOENDBR_SYM(start_secondary);
static bool
topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
@@ -654,10 +635,9 @@ static void impress_friends(void)
* But that slows boot and resume on modern processors, which include
* many cores and don't require that delay.
*
- * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
- * Modern processor families are quirked to remove the delay entirely.
+ * Cmdline "cpu_init_udelay=" is available to override this delay.
*/
-#define UDELAY_10MS_DEFAULT 10000
+#define UDELAY_10MS_LEGACY 10000
static unsigned int init_udelay = UINT_MAX;
@@ -669,21 +649,21 @@ static int __init cpu_init_udelay(char *str)
}
early_param("cpu_init_udelay", cpu_init_udelay);
-static void __init smp_quirk_init_udelay(void)
+static void __init smp_set_init_udelay(void)
{
/* if cmdline changed it from default, leave it alone */
if (init_udelay != UINT_MAX)
return;
/* if modern processor, use no delay */
- if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO) ||
+ (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 >= 0x18) ||
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xF)) {
init_udelay = 0;
return;
}
/* else, use legacy delay */
- init_udelay = UDELAY_10MS_DEFAULT;
+ init_udelay = UDELAY_10MS_LEGACY;
}
/*
@@ -841,7 +821,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
/* Just in case we booted with a single CPU. */
alternatives_enable_smp();
- per_cpu(pcpu_hot.current_task, cpu) = idle;
+ per_cpu(current_task, cpu) = idle;
cpu_init_stack_canary(cpu, idle);
/* Initialize the interrupt stack(s) */
@@ -851,7 +831,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
- per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle);
+ per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
#endif
return 0;
}
@@ -1094,7 +1074,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
uv_system_init();
- smp_quirk_init_udelay();
+ smp_set_init_udelay();
speculative_store_bypass_ht_init();
@@ -1262,43 +1242,9 @@ void play_dead_common(void)
* We need to flush the caches before going to sleep, lest we have
* dirty data in our caches when we come back up.
*/
-static inline void mwait_play_dead(void)
+void __noreturn mwait_play_dead(unsigned int eax_hint)
{
struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
- unsigned int eax, ebx, ecx, edx;
- unsigned int highest_cstate = 0;
- unsigned int highest_subcstate = 0;
- int i;
-
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
- boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- return;
- if (!this_cpu_has(X86_FEATURE_MWAIT))
- return;
- if (!this_cpu_has(X86_FEATURE_CLFLUSH))
- return;
-
- eax = CPUID_LEAF_MWAIT;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- /*
- * eax will be 0 if EDX enumeration is not valid.
- * Initialized below to cstate, sub_cstate value when EDX is valid.
- */
- if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
- eax = 0;
- } else {
- edx >>= MWAIT_SUBSTATE_SIZE;
- for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
- if (edx & MWAIT_SUBSTATE_MASK) {
- highest_cstate = i;
- highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
- }
- }
- eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
- (highest_subcstate - 1);
- }
/* Set up state for the kexec() hack below */
md->status = CPUDEAD_MWAIT_WAIT;
@@ -1319,7 +1265,7 @@ static inline void mwait_play_dead(void)
mb();
__monitor(md, 0, 0);
mb();
- __mwait(eax, 0);
+ __mwait(eax_hint, 0);
if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) {
/*
@@ -1391,9 +1337,9 @@ void native_play_dead(void)
play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS);
- mwait_play_dead();
- if (cpuidle_play_dead())
- hlt_play_dead();
+ /* Below returns only on error. */
+ cpuidle_play_dead();
+ hlt_play_dead();
}
#else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 9e51242ed125..a59c72e77645 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -158,7 +158,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
mutex_lock(&text_mutex);
- if (tramp) {
+ if (tramp && !site) {
__static_call_validate(tramp, true, true);
__static_call_transform(tramp, __sc_insn(!func, true), func, false);
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 4c1bcb6053fc..46b8f1f16676 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -200,8 +200,7 @@ static int tboot_setup_sleep(void)
tboot->num_mac_regions = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
- if ((e820_table->entries[i].type != E820_TYPE_RAM)
- && (e820_table->entries[i].type != E820_TYPE_RESERVED_KERN))
+ if (e820_table->entries[i].type != E820_TYPE_RAM)
continue;
add_mac_region(e820_table->entries[i].addr, e820_table->entries[i].size);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 2dbadf347b5f..9f88b8a78e50 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -94,10 +94,20 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
/*
* Check for UD1 or UD2, accounting for Address Size Override Prefixes.
- * If it's a UD1, get the ModRM byte to pass along to UBSan.
+ * If it's a UD1, further decode to determine its use:
+ *
+ * FineIBT: ea (bad)
+ * FineIBT: f0 75 f9 lock jne . - 6
+ * UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
+ * UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
+ * static_call: 0f b9 cc ud1 %esp,%ecx
+ *
+ * Notably UBSAN uses EAX, static_call uses ECX.
*/
-__always_inline int decode_bug(unsigned long addr, u32 *imm)
+__always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
{
+ unsigned long start = addr;
+ bool lock = false;
u8 v;
if (addr < TASK_SIZE_MAX)
@@ -106,28 +116,67 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm)
v = *(u8 *)(addr++);
if (v == INSN_ASOP)
v = *(u8 *)(addr++);
- if (v != OPCODE_ESCAPE)
+
+ if (v == INSN_LOCK) {
+ lock = true;
+ v = *(u8 *)(addr++);
+ }
+
+ switch (v) {
+ case 0x70 ... 0x7f: /* Jcc.d8 */
+ addr += 1; /* d8 */
+ *len = addr - start;
+ WARN_ON_ONCE(!lock);
+ return BUG_LOCK;
+
+ case 0xea:
+ *len = addr - start;
+ return BUG_EA;
+
+ case OPCODE_ESCAPE:
+ break;
+
+ default:
return BUG_NONE;
+ }
v = *(u8 *)(addr++);
- if (v == SECOND_BYTE_OPCODE_UD2)
+ if (v == SECOND_BYTE_OPCODE_UD2) {
+ *len = addr - start;
return BUG_UD2;
+ }
- if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1)
+ if (v != SECOND_BYTE_OPCODE_UD1)
return BUG_NONE;
- /* Retrieve the immediate (type value) for the UBSAN UD1 */
- v = *(u8 *)(addr++);
- if (X86_MODRM_RM(v) == 4)
- addr++;
-
*imm = 0;
- if (X86_MODRM_MOD(v) == 1)
- *imm = *(u8 *)addr;
- else if (X86_MODRM_MOD(v) == 2)
- *imm = *(u32 *)addr;
- else
- WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v));
+ v = *(u8 *)(addr++); /* ModRM */
+
+ if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
+ addr++; /* SIB */
+
+ /* Decode immediate, if present */
+ switch (X86_MODRM_MOD(v)) {
+ case 0: if (X86_MODRM_RM(v) == 5)
+ addr += 4; /* RIP + disp32 */
+ break;
+
+ case 1: *imm = *(s8 *)addr;
+ addr += 1;
+ break;
+
+ case 2: *imm = *(s32 *)addr;
+ addr += 4;
+ break;
+
+ case 3: break;
+ }
+
+ /* record instruction length */
+ *len = addr - start;
+
+ if (X86_MODRM_REG(v) == 0) /* EAX */
+ return BUG_UD1_UBSAN;
return BUG_UD1;
}
@@ -257,11 +306,12 @@ static inline void handle_invalid_op(struct pt_regs *regs)
static noinstr bool handle_bug(struct pt_regs *regs)
{
+ unsigned long addr = regs->ip;
bool handled = false;
- int ud_type;
- u32 imm;
+ int ud_type, ud_len;
+ s32 ud_imm;
- ud_type = decode_bug(regs->ip, &imm);
+ ud_type = decode_bug(addr, &ud_imm, &ud_len);
if (ud_type == BUG_NONE)
return handled;
@@ -281,15 +331,47 @@ static noinstr bool handle_bug(struct pt_regs *regs)
*/
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_enable();
- if (ud_type == BUG_UD2) {
- if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
- handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
- regs->ip += LEN_UD2;
+
+ switch (ud_type) {
+ case BUG_UD2:
+ if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+ handled = true;
+ break;
+ }
+ fallthrough;
+
+ case BUG_EA:
+ case BUG_LOCK:
+ if (handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
handled = true;
+ break;
+ }
+ break;
+
+ case BUG_UD1_UBSAN:
+ if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
+ pr_crit("%s at %pS\n",
+ report_ubsan_failure(regs, ud_imm),
+ (void *)regs->ip);
}
- } else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
- pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * When continuing, and regs->ip hasn't changed, move it to the next
+ * instruction. When not continuing execution, restore the instruction
+ * pointer.
+ */
+ if (handled) {
+ if (regs->ip == addr)
+ regs->ip += ud_len;
+ } else {
+ regs->ip = addr;
}
+
if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_disable();
instrumentation_end();
@@ -380,6 +462,21 @@ __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
#endif
/*
+ * Prevent the compiler and/or objtool from marking the !CONFIG_X86_ESPFIX64
+ * version of exc_double_fault() as noreturn. Otherwise the noreturn mismatch
+ * between configs triggers objtool warnings.
+ *
+ * This is a temporary hack until we have compiler or plugin support for
+ * annotating noreturns.
+ */
+#ifdef CONFIG_X86_ESPFIX64
+#define always_true() true
+#else
+bool always_true(void);
+bool __weak always_true(void) { return true; }
+#endif
+
+/*
* Runs on an IST stack for x86_64 and on a special task stack for x86_32.
*
* On x86_64, this is more or less a normal kernel entry. Notwithstanding the
@@ -514,7 +611,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
die("double fault", regs, error_code);
- panic("Machine halted.");
+ if (always_true())
+ panic("Machine halted.");
instrumentation_end();
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 34dec0b72ea8..88e5a4ed9db3 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -959,7 +959,7 @@ static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
- if (!sched_clock_stable())
+ if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
@@ -979,7 +979,7 @@ void tsc_restore_sched_clock_state(void)
unsigned long flags;
int cpu;
- if (!sched_clock_stable())
+ if (!static_branch_likely(&__use_tsc) && !sched_clock_stable())
return;
local_irq_save(flags);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index deeb02825670..48e6cc1cb017 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -152,7 +152,7 @@ static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &freq_desc_byt),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &freq_desc_tng),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &freq_desc_cht),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &freq_desc_ann),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2, &freq_desc_ann),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &freq_desc_lgm),
{}
};
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 5a952c5ea66b..9194695662b2 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -357,19 +357,23 @@ void *arch_uprobe_trampoline(unsigned long *psize)
return &insn;
}
-static unsigned long trampoline_check_ip(void)
+static unsigned long trampoline_check_ip(unsigned long tramp)
{
- unsigned long tramp = uprobe_get_trampoline_vaddr();
-
return tramp + (uretprobe_syscall_check - uretprobe_trampoline_entry);
}
SYSCALL_DEFINE0(uretprobe)
{
struct pt_regs *regs = task_pt_regs(current);
- unsigned long err, ip, sp, r11_cx_ax[3];
+ unsigned long err, ip, sp, r11_cx_ax[3], tramp;
+
+ /* If there's no trampoline, we are called from wrong place. */
+ tramp = uprobe_get_trampoline_vaddr();
+ if (unlikely(tramp == UPROBE_NO_TRAMPOLINE_VADDR))
+ goto sigill;
- if (regs->ip != trampoline_check_ip())
+ /* Make sure the ip matches the only allowed sys_uretprobe caller. */
+ if (unlikely(regs->ip != trampoline_check_ip(tramp)))
goto sigill;
err = copy_from_user(r11_cx_ax, (void __user *)regs->sp, sizeof(r11_cx_ax));
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 1258a5872d12..37ad43792452 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -29,8 +29,12 @@
*/
#include <asm/cpufeatures.h>
+#include <asm/cpufeaturemasks.h>
#include <asm/msr-index.h>
+#define SSE_MASK \
+ (REQUIRED_MASK0 & ((1<<(X86_FEATURE_XMM & 31)) | (1<<(X86_FEATURE_XMM2 & 31))))
+
SYM_FUNC_START_LOCAL(verify_cpu)
pushf # Save caller passed flags
push $0 # Kill any dangerous flags
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0deb4887d6e9..ccdc45e5b759 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -43,7 +43,8 @@ ENTRY(phys_startup_64)
#endif
jiffies = jiffies_64;
-const_pcpu_hot = pcpu_hot;
+const_current_task = current_task;
+const_cpu_current_top_of_stack = cpu_current_top_of_stack;
#if defined(CONFIG_X86_64)
/*
@@ -112,12 +113,6 @@ ASSERT(__relocate_kernel_end - __relocate_kernel_start <= KEXEC_CONTROL_CODE_MAX
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(6); /* RW_ */
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_SMP
- percpu PT_LOAD FLAGS(6); /* RW_ */
-#endif
- init PT_LOAD FLAGS(7); /* RWE */
-#endif
note PT_NOTE FLAGS(0); /* ___ */
}
@@ -193,6 +188,8 @@ SECTIONS
PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHE_HOT_DATA(L1_CACHE_BYTES)
+
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
DATA_DATA
@@ -216,21 +213,7 @@ SECTIONS
__init_begin = .; /* paired with __init_end */
}
-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
- /*
- * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
- * output PHDR, so the next output section - .init.text - should
- * start another segment - init.
- */
- PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
- ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
- "per-CPU data too large - increase CONFIG_PHYSICAL_START")
-#endif
-
INIT_TEXT_SECTION(PAGE_SIZE)
-#ifdef CONFIG_X86_64
- :init
-#endif
/*
* Section for code used exclusively before alternatives are run. All
@@ -347,9 +330,8 @@ SECTIONS
EXIT_DATA
}
-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
- PERCPU_SECTION(INTERNODE_CACHE_BYTES)
-#endif
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ ASSERT(__per_cpu_hot_end - __per_cpu_hot_start <= 64, "percpu cache hot data too large")
RUNTIME_CONST_VARIABLES
RUNTIME_CONST(ptr, USER_PTR_MAX)
@@ -493,19 +475,6 @@ SECTIONS
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
#ifdef CONFIG_X86_64
-/*
- * Per-cpu symbols which need to be offset from __per_cpu_load
- * for the boot processor.
- */
-#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
-INIT_PER_CPU(gdt_page);
-INIT_PER_CPU(fixed_percpu_data);
-INIT_PER_CPU(irq_stack_backing_store);
-
-#ifdef CONFIG_SMP
-. = ASSERT((fixed_percpu_data == 0),
- "fixed_percpu_data is not at start of per-cpu area");
-#endif
#ifdef CONFIG_MITIGATION_UNRET_ENTRY
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ea2c4f21c1ca..fe8ea8c097de 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM_X86
select KVM_COMMON
select KVM_GENERIC_MMU_NOTIFIER
select KVM_ELIDE_TLB_FLUSH_IF_YOUNG
+ select KVM_MMU_LOCKLESS_AGING
select HAVE_KVM_IRQCHIP
select HAVE_KVM_PFNCACHE
select HAVE_KVM_DIRTY_RING_TSO
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8eb3a88707f2..5e4d4934c0d3 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -58,25 +58,24 @@ void __init kvm_init_xstate_sizes(void)
u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
- int feature_bit = 0;
u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
+ int i;
xstate_bv &= XFEATURE_MASK_EXTEND;
- while (xstate_bv) {
- if (xstate_bv & 0x1) {
- struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit];
- u32 offset;
-
- /* ECX[1]: 64B alignment in compacted form */
- if (compacted)
- offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
- else
- offset = xs->ebx;
- ret = max(ret, offset + xs->eax);
- }
+ for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes) && xstate_bv; i++) {
+ struct cpuid_xstate_sizes *xs = &xstate_sizes[i];
+ u32 offset;
- xstate_bv >>= 1;
- feature_bit++;
+ if (!(xstate_bv & BIT_ULL(i)))
+ continue;
+
+ /* ECX[1]: 64B alignment in compacted form */
+ if (compacted)
+ offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret;
+ else
+ offset = xs->ebx;
+ ret = max(ret, offset + xs->eax);
+ xstate_bv &= ~BIT_ULL(i);
}
return ret;
@@ -196,6 +195,7 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
}
static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu);
+static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
@@ -300,10 +300,12 @@ static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu,
guest_cpu_cap_change(vcpu, x86_feature, has_feature);
}
-void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
+static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
+ vcpu->arch.cpuid_dynamic_bits_dirty = false;
+
best = kvm_find_cpuid_entry(vcpu, 1);
if (best) {
kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSXSAVE,
@@ -333,7 +335,6 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
}
-EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu)
{
@@ -646,6 +647,9 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
if (cpuid->nent < vcpu->arch.cpuid_nent)
return -E2BIG;
+ if (vcpu->arch.cpuid_dynamic_bits_dirty)
+ kvm_update_cpuid_runtime(vcpu);
+
if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
return -EFAULT;
@@ -1704,7 +1708,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
phys_as = entry->eax & 0xff;
g_phys_as = phys_as;
if (kvm_mmu_get_max_tdp_level() < 5)
- g_phys_as = min(g_phys_as, 48);
+ g_phys_as = min(g_phys_as, 48U);
}
entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16);
@@ -1763,19 +1767,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {
- entry->eax = entry->ebx;
+ entry->eax = entry->ebx = 0;
break;
}
cpuid_entry_override(entry, CPUID_8000_0022_EAX);
- if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
- ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
- else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
- ebx.split.num_core_pmc = AMD64_NUM_COUNTERS_CORE;
- else
- ebx.split.num_core_pmc = AMD64_NUM_COUNTERS;
-
+ ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp;
entry->ebx = ebx.full;
break;
}
@@ -1985,6 +1983,9 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
struct kvm_cpuid_entry2 *entry;
bool exact, used_max_basic = false;
+ if (vcpu->arch.cpuid_dynamic_bits_dirty)
+ kvm_update_cpuid_runtime(vcpu);
+
entry = kvm_find_cpuid_entry_index(vcpu, function, index);
exact = !!entry;
@@ -2000,12 +2001,29 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
*edx = entry->edx;
if (function == 7 && index == 0) {
u64 data;
- if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
+ if ((*ebx & (feature_bit(RTM) | feature_bit(HLE))) &&
+ !__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
(data & TSX_CTRL_CPUID_CLEAR))
*ebx &= ~(feature_bit(RTM) | feature_bit(HLE));
} else if (function == 0x80000007) {
if (kvm_hv_invtsc_suppressed(vcpu))
*edx &= ~feature_bit(CONSTANT_TSC);
+ } else if (IS_ENABLED(CONFIG_KVM_XEN) &&
+ kvm_xen_is_tsc_leaf(vcpu, function)) {
+ /*
+ * Update guest TSC frequency information if necessary.
+ * Ignore failures, there is no sane value that can be
+ * provided if KVM can't get the TSC frequency.
+ */
+ if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu))
+ kvm_guest_time_update(vcpu);
+
+ if (index == 1) {
+ *ecx = vcpu->arch.pvclock_tsc_mul;
+ *edx = vcpu->arch.pvclock_tsc_shift;
+ } else if (index == 2) {
+ *eax = vcpu->arch.hw_tsc_khz;
+ }
}
} else {
*eax = *ebx = *ecx = *edx = 0;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 67d80aa72d50..d2884162a46a 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -11,7 +11,6 @@ extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void);
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
-void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
u32 function, u32 index);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
@@ -232,6 +231,14 @@ static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
{
unsigned int x86_leaf = __feature_leaf(x86_feature);
+ /*
+ * Except for MWAIT, querying dynamic feature bits is disallowed, so
+ * that KVM can defer runtime updates until the next CPUID emulation.
+ */
+ BUILD_BUG_ON(x86_feature == X86_FEATURE_APIC ||
+ x86_feature == X86_FEATURE_OSXSAVE ||
+ x86_feature == X86_FEATURE_OSPKE);
+
return vcpu->arch.cpu_caps[x86_leaf] & __feature_bit(x86_feature);
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 60986f67c35a..1349e278cd2a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -477,8 +477,11 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
+ .src_type = ctxt->src.type,
+ .dst_type = ctxt->dst.type,
.ad_bytes = ctxt->ad_bytes,
- .next_rip = ctxt->eip,
+ .rip = ctxt->eip,
+ .next_rip = ctxt->_eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 6ebeb6cea6c0..24f0318c50d7 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -952,8 +952,7 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
{
memset(stimer, 0, sizeof(*stimer));
stimer->index = timer_index;
- hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- stimer->timer.function = stimer_timer_callback;
+ hrtimer_setup(&stimer->timer, stimer_timer_callback, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
stimer_prepare_msg(stimer);
}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index d7ab8780ab9e..739aa6c0d0c3 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -690,8 +690,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
pit->kvm = kvm;
pit_state = &pit->pit_state;
- hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- pit_state->timer.function = pit_timer_fn;
+ hrtimer_setup(&pit_state->timer, pit_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
pit_state->irq_ack_notifier.gsi = 0;
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 8dec646e764b..a8fb19940975 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -567,7 +567,7 @@ static void pic_irq_request(struct kvm *kvm, int level)
{
struct kvm_pic *s = kvm->arch.vpic;
- if (!s->output)
+ if (!s->output && level)
s->wakeup_needed = true;
s->output = level;
}
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 73072585e164..c1df5acfacaf 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -44,7 +44,10 @@ struct x86_instruction_info {
u64 dst_val; /* value of destination operand */
u8 src_bytes; /* size of source operand */
u8 dst_bytes; /* size of destination operand */
+ u8 src_type; /* type of source operand */
+ u8 dst_type; /* type of destination operand */
u8 ad_bytes; /* size of src/dst address */
+ u64 rip; /* rip of the instruction */
u64 next_rip; /* rip following the instruction */
};
@@ -272,8 +275,10 @@ struct operand {
};
};
+#define X86_MAX_INSTRUCTION_LENGTH 15
+
struct fetch_cache {
- u8 data[15];
+ u8 data[X86_MAX_INSTRUCTION_LENGTH];
u8 *ptr;
u8 *end;
};
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a009c94c26c2..28e3317124fd 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -221,13 +221,6 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
}
}
-static void kvm_apic_map_free(struct rcu_head *rcu)
-{
- struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
-
- kvfree(map);
-}
-
static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
struct kvm_vcpu *vcpu,
bool *xapic_id_mismatch)
@@ -489,7 +482,7 @@ out:
mutex_unlock(&kvm->arch.apic_map_lock);
if (old)
- call_rcu(&old->rcu, kvm_apic_map_free);
+ kvfree_rcu(old, rcu);
kvm_make_scan_ioapic_request(kvm);
}
@@ -2593,7 +2586,7 @@ static void __kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu->arch.apic_base = value;
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
if (!apic)
return;
@@ -2921,9 +2914,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_HARD);
- apic->lapic_timer.timer.function = apic_timer_fn;
+ hrtimer_setup(&apic->lapic_timer.timer, apic_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_HARD);
if (lapic_timer_advance)
apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
@@ -3397,9 +3389,9 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
kvm_vcpu_reset(vcpu, true);
if (kvm_vcpu_is_bsp(apic->vcpu))
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
else
- vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
}
if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
@@ -3408,7 +3400,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
sipi_vector = apic->sipi_vector;
kvm_x86_call(vcpu_deliver_sipi_vector)(vcpu,
sipi_vector);
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
}
}
return 0;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8160870398b9..63bb77ee1bb1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -501,7 +501,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
return false;
}
- if (!spte_has_volatile_bits(old_spte))
+ if (!spte_needs_atomic_update(old_spte))
__update_clear_spte_fast(sptep, new_spte);
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
@@ -524,7 +524,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
int level = sptep_to_sp(sptep)->role.level;
if (!is_shadow_present_pte(old_spte) ||
- !spte_has_volatile_bits(old_spte))
+ !spte_needs_atomic_update(old_spte))
__update_clear_spte_fast(sptep, SHADOW_NONPRESENT_VALUE);
else
old_spte = __update_clear_spte_slow(sptep, SHADOW_NONPRESENT_VALUE);
@@ -853,32 +853,173 @@ static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu
* About rmap_head encoding:
*
* If the bit zero of rmap_head->val is clear, then it points to the only spte
- * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
+ * in this rmap chain. Otherwise, (rmap_head->val & ~3) points to a struct
* pte_list_desc containing more mappings.
*/
#define KVM_RMAP_MANY BIT(0)
/*
+ * rmaps and PTE lists are mostly protected by mmu_lock (the shadow MMU always
+ * operates with mmu_lock held for write), but rmaps can be walked without
+ * holding mmu_lock so long as the caller can tolerate SPTEs in the rmap chain
+ * being zapped/dropped _while the rmap is locked_.
+ *
+ * Other than the KVM_RMAP_LOCKED flag, modifications to rmap entries must be
+ * done while holding mmu_lock for write. This allows a task walking rmaps
+ * without holding mmu_lock to concurrently walk the same entries as a task
+ * that is holding mmu_lock but _not_ the rmap lock. Neither task will modify
+ * the rmaps, thus the walks are stable.
+ *
+ * As alluded to above, SPTEs in rmaps are _not_ protected by KVM_RMAP_LOCKED,
+ * only the rmap chains themselves are protected. E.g. holding an rmap's lock
+ * ensures all "struct pte_list_desc" fields are stable.
+ */
+#define KVM_RMAP_LOCKED BIT(1)
+
+static unsigned long __kvm_rmap_lock(struct kvm_rmap_head *rmap_head)
+{
+ unsigned long old_val, new_val;
+
+ lockdep_assert_preemption_disabled();
+
+ /*
+ * Elide the lock if the rmap is empty, as lockless walkers (read-only
+ * mode) don't need to (and can't) walk an empty rmap, nor can they add
+ * entries to the rmap. I.e. the only paths that process empty rmaps
+ * do so while holding mmu_lock for write, and are mutually exclusive.
+ */
+ old_val = atomic_long_read(&rmap_head->val);
+ if (!old_val)
+ return 0;
+
+ do {
+ /*
+ * If the rmap is locked, wait for it to be unlocked before
+ * trying acquire the lock, e.g. to avoid bouncing the cache
+ * line.
+ */
+ while (old_val & KVM_RMAP_LOCKED) {
+ cpu_relax();
+ old_val = atomic_long_read(&rmap_head->val);
+ }
+
+ /*
+ * Recheck for an empty rmap, it may have been purged by the
+ * task that held the lock.
+ */
+ if (!old_val)
+ return 0;
+
+ new_val = old_val | KVM_RMAP_LOCKED;
+ /*
+ * Use try_cmpxchg_acquire() to prevent reads and writes to the rmap
+ * from being reordered outside of the critical section created by
+ * __kvm_rmap_lock().
+ *
+ * Pairs with the atomic_long_set_release() in kvm_rmap_unlock().
+ *
+ * For the !old_val case, no ordering is needed, as there is no rmap
+ * to walk.
+ */
+ } while (!atomic_long_try_cmpxchg_acquire(&rmap_head->val, &old_val, new_val));
+
+ /*
+ * Return the old value, i.e. _without_ the LOCKED bit set. It's
+ * impossible for the return value to be 0 (see above), i.e. the read-
+ * only unlock flow can't get a false positive and fail to unlock.
+ */
+ return old_val;
+}
+
+static unsigned long kvm_rmap_lock(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head)
+{
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ return __kvm_rmap_lock(rmap_head);
+}
+
+static void __kvm_rmap_unlock(struct kvm_rmap_head *rmap_head,
+ unsigned long val)
+{
+ KVM_MMU_WARN_ON(val & KVM_RMAP_LOCKED);
+ /*
+ * Ensure that all accesses to the rmap have completed before unlocking
+ * the rmap.
+ *
+ * Pairs with the atomic_long_try_cmpxchg_acquire() in __kvm_rmap_lock().
+ */
+ atomic_long_set_release(&rmap_head->val, val);
+}
+
+static void kvm_rmap_unlock(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head,
+ unsigned long new_val)
+{
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
+ __kvm_rmap_unlock(rmap_head, new_val);
+}
+
+static unsigned long kvm_rmap_get(struct kvm_rmap_head *rmap_head)
+{
+ return atomic_long_read(&rmap_head->val) & ~KVM_RMAP_LOCKED;
+}
+
+/*
+ * If mmu_lock isn't held, rmaps can only be locked in read-only mode. The
+ * actual locking is the same, but the caller is disallowed from modifying the
+ * rmap, and so the unlock flow is a nop if the rmap is/was empty.
+ */
+static unsigned long kvm_rmap_lock_readonly(struct kvm_rmap_head *rmap_head)
+{
+ unsigned long rmap_val;
+
+ preempt_disable();
+ rmap_val = __kvm_rmap_lock(rmap_head);
+
+ if (!rmap_val)
+ preempt_enable();
+
+ return rmap_val;
+}
+
+static void kvm_rmap_unlock_readonly(struct kvm_rmap_head *rmap_head,
+ unsigned long old_val)
+{
+ if (!old_val)
+ return;
+
+ KVM_MMU_WARN_ON(old_val != kvm_rmap_get(rmap_head));
+
+ __kvm_rmap_unlock(rmap_head, old_val);
+ preempt_enable();
+}
+
+/*
* Returns the number of pointers in the rmap chain, not counting the new one.
*/
-static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
- struct kvm_rmap_head *rmap_head)
+static int pte_list_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ u64 *spte, struct kvm_rmap_head *rmap_head)
{
+ unsigned long old_val, new_val;
struct pte_list_desc *desc;
int count = 0;
- if (!rmap_head->val) {
- rmap_head->val = (unsigned long)spte;
- } else if (!(rmap_head->val & KVM_RMAP_MANY)) {
+ old_val = kvm_rmap_lock(kvm, rmap_head);
+
+ if (!old_val) {
+ new_val = (unsigned long)spte;
+ } else if (!(old_val & KVM_RMAP_MANY)) {
desc = kvm_mmu_memory_cache_alloc(cache);
- desc->sptes[0] = (u64 *)rmap_head->val;
+ desc->sptes[0] = (u64 *)old_val;
desc->sptes[1] = spte;
desc->spte_count = 2;
desc->tail_count = 0;
- rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
+ new_val = (unsigned long)desc | KVM_RMAP_MANY;
++count;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ desc = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
count = desc->tail_count + desc->spte_count;
/*
@@ -887,21 +1028,25 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
*/
if (desc->spte_count == PTE_LIST_EXT) {
desc = kvm_mmu_memory_cache_alloc(cache);
- desc->more = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ desc->more = (struct pte_list_desc *)(old_val & ~KVM_RMAP_MANY);
desc->spte_count = 0;
desc->tail_count = count;
- rmap_head->val = (unsigned long)desc | KVM_RMAP_MANY;
+ new_val = (unsigned long)desc | KVM_RMAP_MANY;
+ } else {
+ new_val = old_val;
}
desc->sptes[desc->spte_count++] = spte;
}
+
+ kvm_rmap_unlock(kvm, rmap_head, new_val);
+
return count;
}
-static void pte_list_desc_remove_entry(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
+static void pte_list_desc_remove_entry(struct kvm *kvm, unsigned long *rmap_val,
struct pte_list_desc *desc, int i)
{
- struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ struct pte_list_desc *head_desc = (struct pte_list_desc *)(*rmap_val & ~KVM_RMAP_MANY);
int j = head_desc->spte_count - 1;
/*
@@ -928,9 +1073,9 @@ static void pte_list_desc_remove_entry(struct kvm *kvm,
* head at the next descriptor, i.e. the new head.
*/
if (!head_desc->more)
- rmap_head->val = 0;
+ *rmap_val = 0;
else
- rmap_head->val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
+ *rmap_val = (unsigned long)head_desc->more | KVM_RMAP_MANY;
mmu_free_pte_list_desc(head_desc);
}
@@ -938,24 +1083,26 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
+ unsigned long rmap_val;
int i;
- if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
- return;
+ rmap_val = kvm_rmap_lock(kvm, rmap_head);
+ if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_val, kvm))
+ goto out;
- if (!(rmap_head->val & KVM_RMAP_MANY)) {
- if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
- return;
+ if (!(rmap_val & KVM_RMAP_MANY)) {
+ if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_val != spte, kvm))
+ goto out;
- rmap_head->val = 0;
+ rmap_val = 0;
} else {
- desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
while (desc) {
for (i = 0; i < desc->spte_count; ++i) {
if (desc->sptes[i] == spte) {
- pte_list_desc_remove_entry(kvm, rmap_head,
+ pte_list_desc_remove_entry(kvm, &rmap_val,
desc, i);
- return;
+ goto out;
}
}
desc = desc->more;
@@ -963,6 +1110,9 @@ static void pte_list_remove(struct kvm *kvm, u64 *spte,
KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
}
+
+out:
+ kvm_rmap_unlock(kvm, rmap_head, rmap_val);
}
static void kvm_zap_one_rmap_spte(struct kvm *kvm,
@@ -977,17 +1127,19 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
+ unsigned long rmap_val;
int i;
- if (!rmap_head->val)
+ rmap_val = kvm_rmap_lock(kvm, rmap_head);
+ if (!rmap_val)
return false;
- if (!(rmap_head->val & KVM_RMAP_MANY)) {
- mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
+ if (!(rmap_val & KVM_RMAP_MANY)) {
+ mmu_spte_clear_track_bits(kvm, (u64 *)rmap_val);
goto out;
}
- desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
@@ -997,20 +1149,21 @@ static bool kvm_zap_all_rmap_sptes(struct kvm *kvm,
}
out:
/* rmap_head is meaningless now, remember to reset it */
- rmap_head->val = 0;
+ kvm_rmap_unlock(kvm, rmap_head, 0);
return true;
}
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
{
+ unsigned long rmap_val = kvm_rmap_get(rmap_head);
struct pte_list_desc *desc;
- if (!rmap_head->val)
+ if (!rmap_val)
return 0;
- else if (!(rmap_head->val & KVM_RMAP_MANY))
+ else if (!(rmap_val & KVM_RMAP_MANY))
return 1;
- desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
return desc->tail_count + desc->spte_count;
}
@@ -1053,6 +1206,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
*/
struct rmap_iterator {
/* private fields */
+ struct rmap_head *head;
struct pte_list_desc *desc; /* holds the sptep if not NULL */
int pos; /* index of the sptep */
};
@@ -1067,23 +1221,19 @@ struct rmap_iterator {
static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
struct rmap_iterator *iter)
{
- u64 *sptep;
+ unsigned long rmap_val = kvm_rmap_get(rmap_head);
- if (!rmap_head->val)
+ if (!rmap_val)
return NULL;
- if (!(rmap_head->val & KVM_RMAP_MANY)) {
+ if (!(rmap_val & KVM_RMAP_MANY)) {
iter->desc = NULL;
- sptep = (u64 *)rmap_head->val;
- goto out;
+ return (u64 *)rmap_val;
}
- iter->desc = (struct pte_list_desc *)(rmap_head->val & ~KVM_RMAP_MANY);
+ iter->desc = (struct pte_list_desc *)(rmap_val & ~KVM_RMAP_MANY);
iter->pos = 0;
- sptep = iter->desc->sptes[iter->pos];
-out:
- BUG_ON(!is_shadow_present_pte(*sptep));
- return sptep;
+ return iter->desc->sptes[iter->pos];
}
/*
@@ -1093,14 +1243,11 @@ out:
*/
static u64 *rmap_get_next(struct rmap_iterator *iter)
{
- u64 *sptep;
-
if (iter->desc) {
if (iter->pos < PTE_LIST_EXT - 1) {
++iter->pos;
- sptep = iter->desc->sptes[iter->pos];
- if (sptep)
- goto out;
+ if (iter->desc->sptes[iter->pos])
+ return iter->desc->sptes[iter->pos];
}
iter->desc = iter->desc->more;
@@ -1108,20 +1255,24 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
if (iter->desc) {
iter->pos = 0;
/* desc->sptes[0] cannot be NULL */
- sptep = iter->desc->sptes[iter->pos];
- goto out;
+ return iter->desc->sptes[iter->pos];
}
}
return NULL;
-out:
- BUG_ON(!is_shadow_present_pte(*sptep));
- return sptep;
}
-#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
- for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
- _spte_; _spte_ = rmap_get_next(_iter_))
+#define __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ for (_sptep_ = rmap_get_first(_rmap_head_, _iter_); \
+ _sptep_; _sptep_ = rmap_get_next(_iter_))
+
+#define for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ if (!WARN_ON_ONCE(!is_shadow_present_pte(*(_sptep_)))) \
+
+#define for_each_rmap_spte_lockless(_rmap_head_, _iter_, _sptep_, _spte_) \
+ __for_each_rmap_spte(_rmap_head_, _iter_, _sptep_) \
+ if (is_shadow_present_pte(_spte_ = mmu_spte_get_lockless(sptep)))
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
@@ -1207,12 +1358,13 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct rmap_iterator iter;
bool flush = false;
- for_each_rmap_spte(rmap_head, &iter, sptep)
+ for_each_rmap_spte(rmap_head, &iter, sptep) {
if (spte_ad_need_write_protect(*sptep))
flush |= test_and_clear_bit(PT_WRITABLE_SHIFT,
(unsigned long *)sptep);
else
flush |= spte_clear_dirty(sptep);
+ }
return flush;
}
@@ -1401,7 +1553,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
while (++iterator->rmap <= iterator->end_rmap) {
iterator->gfn += KVM_PAGES_PER_HPAGE(iterator->level);
- if (iterator->rmap->val)
+ if (atomic_long_read(&iterator->rmap->val))
return;
}
@@ -1533,7 +1685,7 @@ static void __rmap_add(struct kvm *kvm,
kvm_update_page_stats(kvm, sp->role.level, 1);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
- rmap_count = pte_list_add(cache, spte, rmap_head);
+ rmap_count = pte_list_add(kvm, cache, spte, rmap_head);
if (rmap_count > kvm->stat.max_mmu_rmap_size)
kvm->stat.max_mmu_rmap_size = rmap_count;
@@ -1552,51 +1704,67 @@ static void rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot,
}
static bool kvm_rmap_age_gfn_range(struct kvm *kvm,
- struct kvm_gfn_range *range, bool test_only)
+ struct kvm_gfn_range *range,
+ bool test_only)
{
- struct slot_rmap_walk_iterator iterator;
+ struct kvm_rmap_head *rmap_head;
struct rmap_iterator iter;
+ unsigned long rmap_val;
bool young = false;
u64 *sptep;
+ gfn_t gfn;
+ int level;
+ u64 spte;
- for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- range->start, range->end - 1, &iterator) {
- for_each_rmap_spte(iterator.rmap, &iter, sptep) {
- u64 spte = *sptep;
+ for (level = PG_LEVEL_4K; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
+ for (gfn = range->start; gfn < range->end;
+ gfn += KVM_PAGES_PER_HPAGE(level)) {
+ rmap_head = gfn_to_rmap(gfn, level, range->slot);
+ rmap_val = kvm_rmap_lock_readonly(rmap_head);
- if (!is_accessed_spte(spte))
- continue;
+ for_each_rmap_spte_lockless(rmap_head, &iter, sptep, spte) {
+ if (!is_accessed_spte(spte))
+ continue;
- if (test_only)
- return true;
-
- if (spte_ad_enabled(spte)) {
- clear_bit((ffs(shadow_accessed_mask) - 1),
- (unsigned long *)sptep);
- } else {
- /*
- * WARN if mmu_spte_update() signals the need
- * for a TLB flush, as Access tracking a SPTE
- * should never trigger an _immediate_ flush.
- */
- spte = mark_spte_for_access_track(spte);
- WARN_ON_ONCE(mmu_spte_update(sptep, spte));
+ if (test_only) {
+ kvm_rmap_unlock_readonly(rmap_head, rmap_val);
+ return true;
+ }
+
+ if (spte_ad_enabled(spte))
+ clear_bit((ffs(shadow_accessed_mask) - 1),
+ (unsigned long *)sptep);
+ else
+ /*
+ * If the following cmpxchg fails, the
+ * spte is being concurrently modified
+ * and should most likely stay young.
+ */
+ cmpxchg64(sptep, spte,
+ mark_spte_for_access_track(spte));
+ young = true;
}
- young = true;
+
+ kvm_rmap_unlock_readonly(rmap_head, rmap_val);
}
}
return young;
}
+static bool kvm_may_have_shadow_mmu_sptes(struct kvm *kvm)
+{
+ return !tdp_mmu_enabled || READ_ONCE(kvm->arch.indirect_shadow_pages);
+}
+
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
- if (kvm_memslots_have_rmaps(kvm))
- young = kvm_rmap_age_gfn_range(kvm, range, false);
-
if (tdp_mmu_enabled)
- young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
+ young = kvm_tdp_mmu_age_gfn_range(kvm, range);
+
+ if (kvm_may_have_shadow_mmu_sptes(kvm))
+ young |= kvm_rmap_age_gfn_range(kvm, range, false);
return young;
}
@@ -1605,11 +1773,14 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
- if (kvm_memslots_have_rmaps(kvm))
- young = kvm_rmap_age_gfn_range(kvm, range, true);
-
if (tdp_mmu_enabled)
- young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
+ young = kvm_tdp_mmu_test_age_gfn(kvm, range);
+
+ if (young)
+ return young;
+
+ if (kvm_may_have_shadow_mmu_sptes(kvm))
+ young |= kvm_rmap_age_gfn_range(kvm, range, true);
return young;
}
@@ -1656,13 +1827,14 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn)
return hash_64(gfn, KVM_MMU_HASH_SHIFT);
}
-static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
+static void mmu_page_add_parent_pte(struct kvm *kvm,
+ struct kvm_mmu_memory_cache *cache,
struct kvm_mmu_page *sp, u64 *parent_pte)
{
if (!parent_pte)
return;
- pte_list_add(cache, parent_pte, &sp->parent_ptes);
+ pte_list_add(kvm, cache, parent_pte, &sp->parent_ptes);
}
static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
@@ -2352,7 +2524,7 @@ static void __link_shadow_page(struct kvm *kvm,
mmu_spte_set(sptep, spte);
- mmu_page_add_parent_pte(cache, sp, sptep);
+ mmu_page_add_parent_pte(kvm, cache, sp, sptep);
/*
* The non-direct sub-pagetable must be updated before linking. For
@@ -2416,7 +2588,8 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
* avoids retaining a large number of stale nested SPs.
*/
if (tdp_enabled && invalid_list &&
- child->role.guest_mode && !child->parent_ptes.val)
+ child->role.guest_mode &&
+ !atomic_long_read(&child->parent_ptes.val))
return kvm_mmu_prepare_zap_page(kvm, child,
invalid_list);
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index f4711674c47b..68e323568e95 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -510,8 +510,7 @@ error:
* Note, pte_access holds the raw RWX bits from the EPTE, not
* ACC_*_MASK flags!
*/
- walker->fault.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
- EPT_VIOLATION_RWX_SHIFT;
+ walker->fault.exit_qualification |= EPT_VIOLATION_RWX_TO_PROT(pte_access);
}
#endif
walker->fault.address = addr;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index 22551e2f1d00..0f9f47b4ab0e 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -129,25 +129,32 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
}
/*
- * Returns true if the SPTE has bits that may be set without holding mmu_lock.
- * The caller is responsible for checking if the SPTE is shadow-present, and
- * for determining whether or not the caller cares about non-leaf SPTEs.
+ * Returns true if the SPTE needs to be updated atomically due to having bits
+ * that may be changed without holding mmu_lock, and for which KVM must not
+ * lose information. E.g. KVM must not drop Dirty bit information. The caller
+ * is responsible for checking if the SPTE is shadow-present, and for
+ * determining whether or not the caller cares about non-leaf SPTEs.
*/
-bool spte_has_volatile_bits(u64 spte)
+bool spte_needs_atomic_update(u64 spte)
{
+ /* SPTEs can be made Writable bit by KVM's fast page fault handler. */
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
return true;
- if (is_access_track_spte(spte))
+ /*
+ * A/D-disabled SPTEs can be access-tracked by aging, and access-tracked
+ * SPTEs can be restored by KVM's fast page fault handler.
+ */
+ if (!spte_ad_enabled(spte))
return true;
- if (spte_ad_enabled(spte)) {
- if (!(spte & shadow_accessed_mask) ||
- (is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
- return true;
- }
-
- return false;
+ /*
+ * Dirty and Accessed bits can be set by the CPU. Ignore the Accessed
+ * bit, as KVM tolerates false negatives/positives, e.g. KVM doesn't
+ * invalidate TLBs when aging SPTEs, and so it's safe to clobber the
+ * Accessed bit (and rare in practice).
+ */
+ return is_writable_pte(spte) && !(spte & shadow_dirty_mask);
}
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 59746854c0af..79cdceba9857 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -519,7 +519,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen;
}
-bool spte_has_volatile_bits(u64 spte);
+bool spte_needs_atomic_update(u64 spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index 047b78333653..364c5da6c499 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -25,6 +25,13 @@ static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
return xchg(rcu_dereference(sptep), new_spte);
}
+static inline u64 tdp_mmu_clear_spte_bits_atomic(tdp_ptep_t sptep, u64 mask)
+{
+ atomic64_t *sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
+
+ return (u64)atomic64_fetch_and(~mask, sptep_atomic);
+}
+
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
{
KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte));
@@ -32,28 +39,21 @@ static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
}
/*
- * SPTEs must be modified atomically if they are shadow-present, leaf
- * SPTEs, and have volatile bits, i.e. has bits that can be set outside
- * of mmu_lock. The Writable bit can be set by KVM's fast page fault
- * handler, and Accessed and Dirty bits can be set by the CPU.
- *
- * Note, non-leaf SPTEs do have Accessed bits and those bits are
- * technically volatile, but KVM doesn't consume the Accessed bit of
- * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This
- * logic needs to be reassessed if KVM were to use non-leaf Accessed
- * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
+ * SPTEs must be modified atomically if they are shadow-present, leaf SPTEs,
+ * and have volatile bits (bits that can be set outside of mmu_lock) that
+ * must not be clobbered.
*/
-static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
+static inline bool kvm_tdp_mmu_spte_need_atomic_update(u64 old_spte, int level)
{
return is_shadow_present_pte(old_spte) &&
is_last_spte(old_spte, level) &&
- spte_has_volatile_bits(old_spte);
+ spte_needs_atomic_update(old_spte);
}
static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
u64 new_spte, int level)
{
- if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level))
+ if (kvm_tdp_mmu_spte_need_atomic_update(old_spte, level))
return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);
__kvm_tdp_mmu_write_spte(sptep, new_spte);
@@ -63,12 +63,8 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte,
u64 mask, int level)
{
- atomic64_t *sptep_atomic;
-
- if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) {
- sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
- return (u64)atomic64_fetch_and(~mask, sptep_atomic);
- }
+ if (kvm_tdp_mmu_spte_need_atomic_update(old_spte, level))
+ return tdp_mmu_clear_spte_bits_atomic(sptep, mask);
__kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask);
return old_spte;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 046b6ba31197..7cc0564f5f97 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -193,6 +193,19 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
!tdp_mmu_root_match((_root), (_types)))) { \
} else
+/*
+ * Iterate over all TDP MMU roots in an RCU read-side critical section.
+ * It is safe to iterate over the SPTEs under the root, but their values will
+ * be unstable, so all writes must be atomic. As this routine is meant to be
+ * used without holding the mmu_lock at all, any bits that are flipped must
+ * be reflected in kvm_tdp_mmu_spte_need_atomic_write().
+ */
+#define for_each_tdp_mmu_root_rcu(_kvm, _root, _as_id, _types) \
+ list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link) \
+ if ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \
+ !tdp_mmu_root_match((_root), (_types))) { \
+ } else
+
#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
__for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)
@@ -774,9 +787,6 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
continue; \
else
-#define tdp_mmu_for_each_pte(_iter, _kvm, _root, _start, _end) \
- for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
-
static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,
struct tdp_iter *iter)
{
@@ -1235,7 +1245,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
rcu_read_lock();
- tdp_mmu_for_each_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {
+ for_each_tdp_pte(iter, kvm, root, fault->gfn, fault->gfn + 1) {
int r;
if (fault->nx_huge_page_workaround_enabled)
@@ -1332,21 +1342,22 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
* from the clear_young() or clear_flush_young() notifier, which uses the
* return value to determine if the page has been accessed.
*/
-static void kvm_tdp_mmu_age_spte(struct tdp_iter *iter)
+static void kvm_tdp_mmu_age_spte(struct kvm *kvm, struct tdp_iter *iter)
{
u64 new_spte;
if (spte_ad_enabled(iter->old_spte)) {
- iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
- iter->old_spte,
- shadow_accessed_mask,
- iter->level);
+ iter->old_spte = tdp_mmu_clear_spte_bits_atomic(iter->sptep,
+ shadow_accessed_mask);
new_spte = iter->old_spte & ~shadow_accessed_mask;
} else {
new_spte = mark_spte_for_access_track(iter->old_spte);
- iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
- iter->old_spte, new_spte,
- iter->level);
+ /*
+ * It is safe for the following cmpxchg to fail. Leave the
+ * Accessed bit set, as the spte is most likely young anyway.
+ */
+ if (__tdp_mmu_set_spte_atomic(kvm, iter, new_spte))
+ return;
}
trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
@@ -1371,9 +1382,9 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
* valid roots!
*/
WARN_ON(types & ~KVM_VALID_ROOTS);
- __for_each_tdp_mmu_root(kvm, root, range->slot->as_id, types) {
- guard(rcu)();
+ guard(rcu)();
+ for_each_tdp_mmu_root_rcu(kvm, root, range->slot->as_id, types) {
tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
if (!is_accessed_spte(iter.old_spte))
continue;
@@ -1382,7 +1393,7 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
return true;
ret = true;
- kvm_tdp_mmu_age_spte(&iter);
+ kvm_tdp_mmu_age_spte(kvm, &iter);
}
}
@@ -1904,7 +1915,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
*root_level = vcpu->arch.mmu->root_role.level;
- tdp_mmu_for_each_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
+ for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
}
@@ -1931,7 +1942,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
struct tdp_iter iter;
tdp_ptep_t sptep = NULL;
- tdp_mmu_for_each_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
+ for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
*spte = iter.old_spte;
sptep = iter.sptep;
}
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index e0ab7df27b66..699e551ec93b 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -358,7 +358,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
goto error;
#endif
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
kvm_mmu_reset_context(vcpu);
return;
error:
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 04c375bf1ac2..834b67672d50 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -994,7 +994,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
/* in case we halted in L2 */
- svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
/* Give the current vmcb to the guest */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0dbb25442ec1..0bc708ee2788 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -140,7 +140,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
}
@@ -226,9 +226,7 @@ e_uncharge:
static unsigned int sev_get_asid(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
- return sev->asid;
+ return to_kvm_sev_info(kvm)->asid;
}
static void sev_asid_free(struct kvm_sev_info *sev)
@@ -403,7 +401,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct kvm_sev_init *data,
unsigned long vm_type)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_platform_init_args init_args = {0};
bool es_active = vm_type != KVM_X86_SEV_VM;
u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
@@ -500,10 +498,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_guest_init2(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_init data;
- if (!sev->need_init)
+ if (!to_kvm_sev_info(kvm)->need_init)
return -EINVAL;
if (kvm->arch.vm_type != KVM_X86_SEV_VM &&
@@ -543,14 +540,14 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
return __sev_issue_cmd(sev->fd, id, data, error);
}
static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_launch_start start;
struct kvm_sev_launch_start params;
void *dh_blob, *session_blob;
@@ -622,9 +619,9 @@ e_free_dh:
static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long ulen, unsigned long *n,
- int write)
+ unsigned int flags)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
unsigned long npages, size;
int npinned;
unsigned long locked, lock_limit;
@@ -663,7 +660,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return ERR_PTR(-ENOMEM);
/* Pin the user virtual address. */
- npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ npinned = pin_user_pages_fast(uaddr, npages, flags, pages);
if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages);
ret = -ENOMEM;
@@ -686,11 +683,9 @@ err:
static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
unsigned long npages)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
unpin_user_pages(pages, npages);
kvfree(pages);
- sev->pages_locked -= npages;
+ to_kvm_sev_info(kvm)->pages_locked -= npages;
}
static void sev_clflush_pages(struct page *pages[], unsigned long npages)
@@ -734,7 +729,6 @@ static unsigned long get_num_contig_pages(unsigned long idx,
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_launch_update_data params;
struct sev_data_launch_update_data data;
struct page **inpages;
@@ -751,7 +745,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
vaddr_end = vaddr + size;
/* Lock the user memory. */
- inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+ inpages = sev_pin_memory(kvm, vaddr, size, &npages, FOLL_WRITE);
if (IS_ERR(inpages))
return PTR_ERR(inpages);
@@ -762,7 +756,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
sev_clflush_pages(inpages, npages);
data.reserved = 0;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
int offset, len;
@@ -802,7 +796,7 @@ e_unpin:
static int sev_es_sync_vmsa(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
struct sev_es_save_area *save = svm->sev_es.vmsa;
struct xregs_state *xsave;
const u8 *s;
@@ -972,7 +966,6 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
void __user *measure = u64_to_user_ptr(argp->data);
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_measure data;
struct kvm_sev_launch_measure params;
void __user *p = NULL;
@@ -1005,7 +998,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
}
cmd:
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
/*
@@ -1033,19 +1026,17 @@ e_free_blob:
static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_finish data;
if (!sev_guest(kvm))
return -ENOTTY;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
}
static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_guest_status params;
struct sev_data_guest_status data;
int ret;
@@ -1055,7 +1046,7 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
memset(&data, 0, sizeof(data));
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
if (ret)
return ret;
@@ -1074,11 +1065,10 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
unsigned long dst, int size,
int *error, bool enc)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_dbg data;
data.reserved = 0;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
data.dst_addr = dst;
data.src_addr = src;
data.len = size;
@@ -1250,7 +1240,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (IS_ERR(src_p))
return PTR_ERR(src_p);
- dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+ dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, FOLL_WRITE);
if (IS_ERR(dst_p)) {
sev_unpin_memory(kvm, src_p, n);
return PTR_ERR(dst_p);
@@ -1302,7 +1292,6 @@ err:
static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_launch_secret data;
struct kvm_sev_launch_secret params;
struct page **pages;
@@ -1316,7 +1305,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
- pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+ pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, FOLL_WRITE);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -1358,7 +1347,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.hdr_address = __psp_pa(hdr);
data.hdr_len = params.hdr_len;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
kfree(hdr);
@@ -1378,7 +1367,6 @@ e_unpin_memory:
static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
void __user *report = u64_to_user_ptr(argp->data);
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_attestation_report data;
struct kvm_sev_attestation_report params;
void __user *p;
@@ -1411,7 +1399,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
}
cmd:
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
/*
* If we query the session length, FW responded with expected data.
@@ -1441,12 +1429,11 @@ static int
__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct kvm_sev_send_start *params)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_start data;
int ret;
memset(&data, 0, sizeof(data));
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
params->session_len = data.session_len;
@@ -1459,7 +1446,6 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_start data;
struct kvm_sev_send_start params;
void *amd_certs, *session_data;
@@ -1520,7 +1506,7 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.amd_certs_len = params.amd_certs_len;
data.session_address = __psp_pa(session_data);
data.session_len = params.session_len;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
@@ -1552,12 +1538,11 @@ static int
__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct kvm_sev_send_update_data *params)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_update_data data;
int ret;
memset(&data, 0, sizeof(data));
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
params->hdr_len = data.hdr_len;
@@ -1572,7 +1557,6 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_update_data data;
struct kvm_sev_send_update_data params;
void *hdr, *trans_data;
@@ -1626,7 +1610,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
data.guest_address |= sev_me_mask;
data.guest_len = params.guest_len;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
@@ -1657,31 +1641,29 @@ e_unpin:
static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_finish data;
if (!sev_guest(kvm))
return -ENOTTY;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
}
static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_send_cancel data;
if (!sev_guest(kvm))
return -ENOTTY;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
}
static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_receive_start start;
struct kvm_sev_receive_start params;
int *error = &argp->error;
@@ -1755,7 +1737,6 @@ e_free_pdh:
static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_receive_update_data params;
struct sev_data_receive_update_data data;
void *hdr = NULL, *trans = NULL;
@@ -1798,7 +1779,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Pin guest memory */
guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
- PAGE_SIZE, &n, 1);
+ PAGE_SIZE, &n, FOLL_WRITE);
if (IS_ERR(guest_page)) {
ret = PTR_ERR(guest_page);
goto e_free_trans;
@@ -1815,7 +1796,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
data.guest_address |= sev_me_mask;
data.guest_len = params.guest_len;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
&argp->error);
@@ -1832,13 +1813,12 @@ e_free_hdr:
static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct sev_data_receive_finish data;
if (!sev_guest(kvm))
return -ENOTTY;
- data.handle = sev->handle;
+ data.handle = to_kvm_sev_info(kvm)->handle;
return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
}
@@ -1858,8 +1838,8 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
- struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
+ struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
int r = -EBUSY;
if (dst_kvm == src_kvm)
@@ -1893,8 +1873,8 @@ release_dst:
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
- struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = to_kvm_sev_info(dst_kvm);
+ struct kvm_sev_info *src_sev = to_kvm_sev_info(src_kvm);
mutex_unlock(&dst_kvm->lock);
mutex_unlock(&src_kvm->lock);
@@ -1968,8 +1948,8 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
- struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
+ struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
+ struct kvm_sev_info *src = to_kvm_sev_info(src_kvm);
struct kvm_vcpu *dst_vcpu, *src_vcpu;
struct vcpu_svm *dst_svm, *src_svm;
struct kvm_sev_info *mirror;
@@ -2009,8 +1989,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
* and add the new mirror to the list.
*/
if (is_mirroring_enc_context(dst_kvm)) {
- struct kvm_sev_info *owner_sev_info =
- &to_kvm_svm(dst->enc_context_owner)->sev_info;
+ struct kvm_sev_info *owner_sev_info = to_kvm_sev_info(dst->enc_context_owner);
list_del(&src->mirror_entry);
list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
@@ -2069,7 +2048,7 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
{
- struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = to_kvm_sev_info(kvm);
struct kvm_sev_info *src_sev, *cg_cleanup_sev;
CLASS(fd, f)(source_fd);
struct kvm *source_kvm;
@@ -2093,7 +2072,7 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
goto out_unlock;
}
- src_sev = &to_kvm_svm(source_kvm)->sev_info;
+ src_sev = to_kvm_sev_info(source_kvm);
dst_sev->misc_cg = get_current_misc_cg();
cg_cleanup_sev = dst_sev;
@@ -2181,7 +2160,7 @@ static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int snp_bind_asid(struct kvm *kvm, int *error)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_snp_activate data = {0};
data.gctx_paddr = __psp_pa(sev->snp_context);
@@ -2191,7 +2170,7 @@ static int snp_bind_asid(struct kvm *kvm, int *error)
static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_snp_launch_start start = {0};
struct kvm_sev_snp_launch_start params;
int rc;
@@ -2260,7 +2239,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
void __user *src, int order, void *opaque)
{
struct sev_gmem_populate_args *sev_populate_args = opaque;
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
int n_private = 0, ret, i;
int npages = (1 << order);
gfn_t gfn;
@@ -2350,7 +2329,7 @@ err:
static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_gmem_populate_args sev_populate_args = {0};
struct kvm_sev_snp_launch_update params;
struct kvm_memory_slot *memslot;
@@ -2434,7 +2413,7 @@ out:
static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_snp_launch_update data = {};
struct kvm_vcpu *vcpu;
unsigned long i;
@@ -2482,7 +2461,7 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct kvm_sev_snp_launch_finish params;
struct sev_data_snp_launch_finish *data;
void *id_block = NULL, *id_auth = NULL;
@@ -2677,7 +2656,7 @@ out:
int sev_mem_enc_register_region(struct kvm *kvm,
struct kvm_enc_region *range)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct enc_region *region;
int ret = 0;
@@ -2696,7 +2675,8 @@ int sev_mem_enc_register_region(struct kvm *kvm,
return -ENOMEM;
mutex_lock(&kvm->lock);
- region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+ region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages,
+ FOLL_WRITE | FOLL_LONGTERM);
if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages);
mutex_unlock(&kvm->lock);
@@ -2729,7 +2709,7 @@ e_free:
static struct enc_region *
find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct list_head *head = &sev->regions_list;
struct enc_region *i;
@@ -2824,9 +2804,9 @@ int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
- source_sev = &to_kvm_svm(source_kvm)->sev_info;
+ source_sev = to_kvm_sev_info(source_kvm);
kvm_get_kvm(source_kvm);
- mirror_sev = &to_kvm_svm(kvm)->sev_info;
+ mirror_sev = to_kvm_sev_info(kvm);
list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
/* Set enc_context_owner and copy its encryption context over */
@@ -2854,7 +2834,7 @@ e_unlock:
static int snp_decommission_context(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_data_snp_addr data = {};
int ret;
@@ -2879,7 +2859,7 @@ static int snp_decommission_context(struct kvm *kvm)
void sev_vm_destroy(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct list_head *head = &sev->regions_list;
struct list_head *pos, *q;
@@ -3271,7 +3251,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
if (kvm_ghcb_xcr0_is_valid(svm)) {
vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
}
/* Copy the GHCB exit information into the VMCB fields */
@@ -3430,8 +3410,7 @@ vmgexit_err:
dump_ghcb(svm);
}
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, reason);
+ svm_vmgexit_bad_input(svm, reason);
/* Resume the guest to "return" the error code. */
return 1;
@@ -3472,10 +3451,19 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb = NULL;
}
-void pre_sev_run(struct vcpu_svm *svm, int cpu)
+int pre_sev_run(struct vcpu_svm *svm, int cpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
- unsigned int asid = sev_get_asid(svm->vcpu.kvm);
+ struct kvm *kvm = svm->vcpu.kvm;
+ unsigned int asid = sev_get_asid(kvm);
+
+ /*
+ * Reject KVM_RUN if userspace attempts to run the vCPU with an invalid
+ * VMSA, e.g. if userspace forces the vCPU to be RUNNABLE after an SNP
+ * AP Destroy event.
+ */
+ if (sev_es_guest(kvm) && !VALID_PAGE(svm->vmcb->control.vmsa_pa))
+ return -EINVAL;
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -3488,11 +3476,12 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
*/
if (sd->sev_vmcbs[asid] == svm->vmcb &&
svm->vcpu.arch.last_vmentry_cpu == cpu)
- return;
+ return 0;
sd->sev_vmcbs[asid] = svm->vmcb;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
+ return 0;
}
#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
@@ -3574,8 +3563,7 @@ static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
return 0;
e_scratch:
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+ svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_SCRATCH_AREA);
return 1;
}
@@ -3675,7 +3663,14 @@ static void snp_complete_psc(struct vcpu_svm *svm, u64 psc_ret)
svm->sev_es.psc_inflight = 0;
svm->sev_es.psc_idx = 0;
svm->sev_es.psc_2m = false;
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, psc_ret);
+
+ /*
+ * PSC requests always get a "no action" response in SW_EXITINFO1, with
+ * a PSC-specific return code in SW_EXITINFO2 that provides the "real"
+ * return code. E.g. if the PSC request was interrupted, the need to
+ * retry is communicated via SW_EXITINFO2, not SW_EXITINFO1.
+ */
+ svm_vmgexit_no_action(svm, psc_ret);
}
static void __snp_complete_one_psc(struct vcpu_svm *svm)
@@ -3847,110 +3842,90 @@ next_range:
BUG();
}
-static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
+/*
+ * Invoked as part of svm_vcpu_reset() processing of an init event.
+ */
+void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_memory_slot *slot;
+ struct page *page;
+ kvm_pfn_t pfn;
+ gfn_t gfn;
- WARN_ON(!mutex_is_locked(&svm->sev_es.snp_vmsa_mutex));
+ if (!sev_snp_guest(vcpu->kvm))
+ return;
+
+ guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
+
+ if (!svm->sev_es.snp_ap_waiting_for_reset)
+ return;
+
+ svm->sev_es.snp_ap_waiting_for_reset = false;
/* Mark the vCPU as offline and not runnable */
vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_HALTED);
/* Clear use of the VMSA */
svm->vmcb->control.vmsa_pa = INVALID_PAGE;
- if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
- gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
- struct kvm_memory_slot *slot;
- struct page *page;
- kvm_pfn_t pfn;
-
- slot = gfn_to_memslot(vcpu->kvm, gfn);
- if (!slot)
- return -EINVAL;
-
- /*
- * The new VMSA will be private memory guest memory, so
- * retrieve the PFN from the gmem backend.
- */
- if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
- return -EINVAL;
-
- /*
- * From this point forward, the VMSA will always be a
- * guest-mapped page rather than the initial one allocated
- * by KVM in svm->sev_es.vmsa. In theory, svm->sev_es.vmsa
- * could be free'd and cleaned up here, but that involves
- * cleanups like wbinvd_on_all_cpus() which would ideally
- * be handled during teardown rather than guest boot.
- * Deferring that also allows the existing logic for SEV-ES
- * VMSAs to be re-used with minimal SNP-specific changes.
- */
- svm->sev_es.snp_has_guest_vmsa = true;
-
- /* Use the new VMSA */
- svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
-
- /* Mark the vCPU as runnable */
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
-
- /*
- * gmem pages aren't currently migratable, but if this ever
- * changes then care should be taken to ensure
- * svm->sev_es.vmsa is pinned through some other means.
- */
- kvm_release_page_clean(page);
- }
-
/*
* When replacing the VMSA during SEV-SNP AP creation,
* mark the VMCB dirty so that full state is always reloaded.
*/
vmcb_mark_all_dirty(svm->vmcb);
- return 0;
-}
+ if (!VALID_PAGE(svm->sev_es.snp_vmsa_gpa))
+ return;
-/*
- * Invoked as part of svm_vcpu_reset() processing of an init event.
- */
-void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- int ret;
+ gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
+ svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
- if (!sev_snp_guest(vcpu->kvm))
+ slot = gfn_to_memslot(vcpu->kvm, gfn);
+ if (!slot)
return;
- mutex_lock(&svm->sev_es.snp_vmsa_mutex);
+ /*
+ * The new VMSA will be private memory guest memory, so retrieve the
+ * PFN from the gmem backend.
+ */
+ if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
+ return;
- if (!svm->sev_es.snp_ap_waiting_for_reset)
- goto unlock;
+ /*
+ * From this point forward, the VMSA will always be a guest-mapped page
+ * rather than the initial one allocated by KVM in svm->sev_es.vmsa. In
+ * theory, svm->sev_es.vmsa could be free'd and cleaned up here, but
+ * that involves cleanups like wbinvd_on_all_cpus() which would ideally
+ * be handled during teardown rather than guest boot. Deferring that
+ * also allows the existing logic for SEV-ES VMSAs to be re-used with
+ * minimal SNP-specific changes.
+ */
+ svm->sev_es.snp_has_guest_vmsa = true;
- svm->sev_es.snp_ap_waiting_for_reset = false;
+ /* Use the new VMSA */
+ svm->vmcb->control.vmsa_pa = pfn_to_hpa(pfn);
- ret = __sev_snp_update_protected_guest_state(vcpu);
- if (ret)
- vcpu_unimpl(vcpu, "snp: AP state update on init failed\n");
+ /* Mark the vCPU as runnable */
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
-unlock:
- mutex_unlock(&svm->sev_es.snp_vmsa_mutex);
+ /*
+ * gmem pages aren't currently migratable, but if this ever changes
+ * then care should be taken to ensure svm->sev_es.vmsa is pinned
+ * through some other means.
+ */
+ kvm_release_page_clean(page);
}
static int sev_snp_ap_creation(struct vcpu_svm *svm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
struct kvm_vcpu *vcpu = &svm->vcpu;
struct kvm_vcpu *target_vcpu;
struct vcpu_svm *target_svm;
unsigned int request;
unsigned int apic_id;
- bool kick;
- int ret;
request = lower_32_bits(svm->vmcb->control.exit_info_1);
apic_id = upper_32_bits(svm->vmcb->control.exit_info_1);
@@ -3963,47 +3938,23 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
return -EINVAL;
}
- ret = 0;
-
target_svm = to_svm(target_vcpu);
- /*
- * The target vCPU is valid, so the vCPU will be kicked unless the
- * request is for CREATE_ON_INIT. For any errors at this stage, the
- * kick will place the vCPU in an non-runnable state.
- */
- kick = true;
-
- mutex_lock(&target_svm->sev_es.snp_vmsa_mutex);
-
- target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
- target_svm->sev_es.snp_ap_waiting_for_reset = true;
-
- /* Interrupt injection mode shouldn't change for AP creation */
- if (request < SVM_VMGEXIT_AP_DESTROY) {
- u64 sev_features;
-
- sev_features = vcpu->arch.regs[VCPU_REGS_RAX];
- sev_features ^= sev->vmsa_features;
-
- if (sev_features & SVM_SEV_FEAT_INT_INJ_MODES) {
- vcpu_unimpl(vcpu, "vmgexit: invalid AP injection mode [%#lx] from guest\n",
- vcpu->arch.regs[VCPU_REGS_RAX]);
- ret = -EINVAL;
- goto out;
- }
- }
+ guard(mutex)(&target_svm->sev_es.snp_vmsa_mutex);
switch (request) {
case SVM_VMGEXIT_AP_CREATE_ON_INIT:
- kick = false;
- fallthrough;
case SVM_VMGEXIT_AP_CREATE:
+ if (vcpu->arch.regs[VCPU_REGS_RAX] != sev->vmsa_features) {
+ vcpu_unimpl(vcpu, "vmgexit: mismatched AP sev_features [%#lx] != [%#llx] from guest\n",
+ vcpu->arch.regs[VCPU_REGS_RAX], sev->vmsa_features);
+ return -EINVAL;
+ }
+
if (!page_address_valid(vcpu, svm->vmcb->control.exit_info_2)) {
vcpu_unimpl(vcpu, "vmgexit: invalid AP VMSA address [%#llx] from guest\n",
svm->vmcb->control.exit_info_2);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -4017,30 +3968,32 @@ static int sev_snp_ap_creation(struct vcpu_svm *svm)
vcpu_unimpl(vcpu,
"vmgexit: AP VMSA address [%llx] from guest is unsafe as it is 2M aligned\n",
svm->vmcb->control.exit_info_2);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
target_svm->sev_es.snp_vmsa_gpa = svm->vmcb->control.exit_info_2;
break;
case SVM_VMGEXIT_AP_DESTROY:
+ target_svm->sev_es.snp_vmsa_gpa = INVALID_PAGE;
break;
default:
vcpu_unimpl(vcpu, "vmgexit: invalid AP creation request [%#x] from guest\n",
request);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-out:
- if (kick) {
+ target_svm->sev_es.snp_ap_waiting_for_reset = true;
+
+ /*
+ * Unless Creation is deferred until INIT, signal the vCPU to update
+ * its state.
+ */
+ if (request != SVM_VMGEXIT_AP_CREATE_ON_INIT) {
kvm_make_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, target_vcpu);
kvm_vcpu_kick(target_vcpu);
}
- mutex_unlock(&target_svm->sev_es.snp_vmsa_mutex);
-
- return ret;
+ return 0;
}
static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa)
@@ -4079,7 +4032,8 @@ static int snp_handle_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_
goto out_unlock;
}
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, SNP_GUEST_ERR(0, fw_err));
+ /* No action is requested *from KVM* if there was a firmware error. */
+ svm_vmgexit_no_action(svm, SNP_GUEST_ERR(0, fw_err));
ret = 1; /* resume guest */
@@ -4135,8 +4089,7 @@ static int snp_handle_ext_guest_req(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t r
return snp_handle_guest_req(svm, req_gpa, resp_gpa);
request_invalid:
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
+ svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
return 1; /* resume guest */
}
@@ -4144,7 +4097,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
u64 ghcb_info;
int ret = 1;
@@ -4328,8 +4281,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
if (ret)
return ret;
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
+ svm_vmgexit_success(svm, 0);
exit_code = kvm_ghcb_get_sw_exit_code(control);
switch (exit_code) {
@@ -4364,7 +4316,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = kvm_emulate_ap_reset_hold(vcpu);
break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
- struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
switch (control->exit_info_1) {
case 0:
@@ -4373,21 +4325,19 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
break;
case 1:
/* Get AP jump table address */
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
+ svm_vmgexit_success(svm, sev->ap_jump_table);
break;
default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1);
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
+ svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
}
ret = 1;
break;
}
case SVM_VMGEXIT_HV_FEATURES:
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_HV_FT_SUPPORTED);
-
+ svm_vmgexit_success(svm, GHCB_HV_FT_SUPPORTED);
ret = 1;
break;
case SVM_VMGEXIT_TERM_REQUEST:
@@ -4408,8 +4358,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
case SVM_VMGEXIT_AP_CREATION:
ret = sev_snp_ap_creation(svm);
if (ret) {
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
+ svm_vmgexit_bad_input(svm, GHCB_ERR_INVALID_INPUT);
}
ret = 1;
@@ -4575,7 +4524,7 @@ void sev_init_vmcb(struct vcpu_svm *svm)
void sev_es_vcpu_reset(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
/*
* Set the GHCB MSR value as per the GHCB specification when emulating
@@ -4590,6 +4539,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
{
+ struct kvm *kvm = svm->vcpu.kvm;
+
/*
* All host state for SEV-ES guests is categorized into three swap types
* based on how it is handled by hardware during a world switch:
@@ -4613,14 +4564,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
/*
* If DebugSwap is enabled, debug registers are loaded but NOT saved by
- * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
- * saves and loads debug registers (Type-A).
+ * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does
+ * not save or load debug registers. Sadly, KVM can't prevent SNP
+ * guests from lying about DebugSwap on secondary vCPUs, i.e. the
+ * SEV_FEATURES provided at "AP Create" isn't guaranteed to match what
+ * the guest has actually enabled (or not!) in the VMSA.
+ *
+ * If DebugSwap is *possible*, save the masks so that they're restored
+ * if the guest enables DebugSwap. But for the DRs themselves, do NOT
+ * rely on the CPU to restore the host values; KVM will restore them as
+ * needed in common code, via hw_breakpoint_restore(). Note, KVM does
+ * NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs
+ * don't need to be restored per se, KVM just needs to ensure they are
+ * loaded with the correct values *if* the CPU writes the MSRs.
*/
- if (sev_vcpu_has_debug_swap(svm)) {
- hostsa->dr0 = native_get_debugreg(0);
- hostsa->dr1 = native_get_debugreg(1);
- hostsa->dr2 = native_get_debugreg(2);
- hostsa->dr3 = native_get_debugreg(3);
+ if (sev_vcpu_has_debug_swap(svm) ||
+ (sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) {
hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
@@ -4645,7 +4604,7 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* Return from an AP Reset Hold VMGEXIT, where the guest will
* set the CS and RIP. Set SW_EXIT_INFO_2 to a non-zero value.
*/
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
+ svm_vmgexit_success(svm, 1);
break;
case AP_RESET_HOLD_MSR_PROTO:
/*
@@ -4843,7 +4802,7 @@ static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
kvm_pfn_t pfn_aligned;
gfn_t gfn_aligned;
int level, rc;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a713c803a3a3..d5d0c5c3300b 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -607,6 +607,9 @@ static void svm_disable_virtualization_cpu(void)
kvm_cpu_svm_disable();
amd_pmu_disable_virt();
+
+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
}
static int svm_enable_virtualization_cpu(void)
@@ -684,6 +687,9 @@ static int svm_enable_virtualization_cpu(void)
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
}
+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+
return 0;
}
@@ -1297,8 +1303,12 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_MWAIT);
}
- if (!kvm_hlt_in_guest(vcpu->kvm))
- svm_set_intercept(svm, INTERCEPT_HLT);
+ if (!kvm_hlt_in_guest(vcpu->kvm)) {
+ if (cpu_feature_enabled(X86_FEATURE_IDLE_HLT))
+ svm_set_intercept(svm, INTERCEPT_IDLE_HLT);
+ else
+ svm_set_intercept(svm, INTERCEPT_HLT);
+ }
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
@@ -1559,7 +1569,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
- if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
+ if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) &&
+ static_branch_likely(&switch_vcpu_ibpb))
indirect_branch_prediction_barrier();
}
if (kvm_vcpu_apicv_active(vcpu))
@@ -1932,7 +1943,7 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -2973,11 +2984,7 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
return kvm_complete_insn_gp(vcpu, err);
- ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
- ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
- X86_TRAP_GP |
- SVM_EVTINJ_TYPE_EXEPT |
- SVM_EVTINJ_VALID);
+ svm_vmgexit_inject_exception(svm, X86_TRAP_GP);
return 1;
}
@@ -3165,6 +3172,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
break;
}
+
+ /*
+ * AMD changed the architectural behavior of bits 5:2. On CPUs
+ * without BusLockTrap, bits 5:2 control "external pins", but
+ * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap
+ * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed
+ * the guest to set bits 5:2 despite not actually virtualizing
+ * Performance-Monitoring/Breakpoint external pins. Drop bits
+ * 5:2 for backwards compatibility.
+ */
+ data &= ~GENMASK(5, 2);
+
+ /*
+ * Suppress BTF as KVM doesn't virtualize BTF, but there's no
+ * way to communicate lack of support to the guest.
+ */
+ if (data & DEBUGCTLMSR_BTF) {
+ kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data);
+ data &= ~DEBUGCTLMSR_BTF;
+ }
+
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
@@ -3272,6 +3300,17 @@ static int invpcid_interception(struct kvm_vcpu *vcpu)
type = svm->vmcb->control.exit_info_2;
gva = svm->vmcb->control.exit_info_1;
+ /*
+ * FIXME: Perform segment checks for 32-bit mode, and inject #SS if the
+ * stack segment is used. The intercept takes priority over all
+ * #GP checks except CPL>0, but somehow still generates a linear
+ * address? The APM is sorely lacking.
+ */
+ if (is_noncanonical_address(gva, vcpu, 0)) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+
return kvm_handle_invpcid(vcpu, type, gva);
}
@@ -3342,6 +3381,7 @@ static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
[SVM_EXIT_CR8_WRITE_TRAP] = cr_trap,
[SVM_EXIT_INVPCID] = invpcid_interception,
+ [SVM_EXIT_IDLE_HLT] = kvm_emulate_halt,
[SVM_EXIT_NPF] = npf_interception,
[SVM_EXIT_RSM] = rsm_interception,
[SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
@@ -3504,7 +3544,7 @@ int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
return interrupt_window_interception(vcpu);
else if (exit_code == SVM_EXIT_INTR)
return intr_interception(vcpu);
- else if (exit_code == SVM_EXIT_HLT)
+ else if (exit_code == SVM_EXIT_HLT || exit_code == SVM_EXIT_IDLE_HLT)
return kvm_emulate_halt(vcpu);
else if (exit_code == SVM_EXIT_NPF)
return npf_interception(vcpu);
@@ -3587,7 +3627,7 @@ static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
return svm_invoke_exit_handler(vcpu, exit_code);
}
-static void pre_svm_run(struct kvm_vcpu *vcpu)
+static int pre_svm_run(struct kvm_vcpu *vcpu)
{
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3609,6 +3649,8 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
/* FIXME: handle wraparound of asid_generation */
if (svm->current_vmcb->asid_generation != sd->asid_generation)
new_asid(svm, sd);
+
+ return 0;
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -4116,20 +4158,23 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
vcpu->arch.nmi_injected = true;
svm->nmi_l1_to_l2 = nmi_l1_to_l2;
break;
- case SVM_EXITINTINFO_TYPE_EXEPT:
+ case SVM_EXITINTINFO_TYPE_EXEPT: {
+ u32 error_code = 0;
+
/*
* Never re-inject a #VC exception.
*/
if (vector == X86_TRAP_VC)
break;
- if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
- u32 err = svm->vmcb->control.exit_int_info_err;
- kvm_requeue_exception_e(vcpu, vector, err);
+ if (exitintinfo & SVM_EXITINTINFO_VALID_ERR)
+ error_code = svm->vmcb->control.exit_int_info_err;
- } else
- kvm_requeue_exception(vcpu, vector);
+ kvm_requeue_exception(vcpu, vector,
+ exitintinfo & SVM_EXITINTINFO_VALID_ERR,
+ error_code);
break;
+ }
case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(vcpu, vector, false);
break;
@@ -4189,6 +4234,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
guest_state_enter_irqoff();
+ /*
+ * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of
+ * VMRUN controls whether or not physical IRQs are masked (KVM always
+ * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the
+ * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow
+ * into guest state if delivery of an event during VMRUN triggers a
+ * #VMEXIT, and the guest_state transitions already tell lockdep that
+ * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of
+ * this path, so IRQs aren't actually unmasked while running host code.
+ */
+ raw_local_irq_enable();
+
amd_clear_divider();
if (sev_es_guest(vcpu->kvm))
@@ -4197,6 +4254,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
else
__svm_vcpu_run(svm, spec_ctrl_intercepted);
+ raw_local_irq_disable();
+
guest_state_exit_irqoff();
}
@@ -4231,7 +4290,12 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (force_immediate_exit)
smp_send_reschedule(vcpu->cpu);
- pre_svm_run(vcpu);
+ if (pre_svm_run(vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ vcpu->run->fail_entry.hardware_entry_failure_reason = SVM_EXIT_ERR;
+ vcpu->run->fail_entry.cpu = vcpu->cpu;
+ return EXIT_FASTPATH_EXIT_USERSPACE;
+ }
sync_lapic_to_cr8(vcpu);
@@ -4253,6 +4317,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
clgi();
kvm_load_guest_xsave_state(vcpu);
+ /*
+ * Hardware only context switches DEBUGCTL if LBR virtualization is
+ * enabled. Manually load DEBUGCTL if necessary (and restore it after
+ * VM-Exit), as running with the host's DEBUGCTL can negatively affect
+ * guest state and can even be fatal, e.g. due to Bus Lock Detect.
+ */
+ if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
+ vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
+ update_debugctlmsr(svm->vmcb->save.dbgctl);
+
kvm_wait_lapic_expire(vcpu);
/*
@@ -4280,6 +4354,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu,
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) &&
+ vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl)
+ update_debugctlmsr(vcpu->arch.host_debugctl);
+
kvm_load_host_xsave_state(vcpu);
stgi();
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 9d7cdb8fbf87..d4490eaed55d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -361,20 +361,18 @@ static __always_inline struct kvm_sev_info *to_kvm_sev_info(struct kvm *kvm)
#ifdef CONFIG_KVM_AMD_SEV
static __always_inline bool sev_guest(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
-
- return sev->active;
+ return to_kvm_sev_info(kvm)->active;
}
static __always_inline bool sev_es_guest(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
return sev->es_active && !WARN_ON_ONCE(!sev->active);
}
static __always_inline bool sev_snp_guest(struct kvm *kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
return (sev->vmsa_features & SVM_SEV_FEAT_SNP_ACTIVE) &&
!WARN_ON_ONCE(!sev_es_guest(kvm));
@@ -581,10 +579,39 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
return false;
}
+static inline void svm_vmgexit_set_return_code(struct vcpu_svm *svm,
+ u64 response, u64 data)
+{
+ ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, response);
+ ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, data);
+}
+
+static inline void svm_vmgexit_inject_exception(struct vcpu_svm *svm, u8 vector)
+{
+ u64 data = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT | vector;
+
+ svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_ISSUE_EXCEPTION, data);
+}
+
+static inline void svm_vmgexit_bad_input(struct vcpu_svm *svm, u64 suberror)
+{
+ svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_MALFORMED_INPUT, suberror);
+}
+
+static inline void svm_vmgexit_success(struct vcpu_svm *svm, u64 data)
+{
+ svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
+}
+
+static inline void svm_vmgexit_no_action(struct vcpu_svm *svm, u64 data)
+{
+ svm_vmgexit_set_return_code(svm, GHCB_HV_RESP_NO_ACTION, data);
+}
+
/* svm.c */
#define MSR_INVALID 0xffffffffU
-#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR)
extern bool dump_invalid_vmcb;
@@ -715,7 +742,7 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
/* sev.c */
-void pre_sev_run(struct vcpu_svm *svm, int cpu);
+int pre_sev_run(struct vcpu_svm *svm, int cpu);
void sev_init_vmcb(struct vcpu_svm *svm);
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 2ed80aea3bb1..0c61153b275f 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run)
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Enter guest mode */
- sti
-
3: vmrun %_ASM_AX
4:
- cli
-
/* Pop @svm to RAX while it's the only available register. */
pop %_ASM_AX
@@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
- sti
-
1: vmrun %rax
-
-2: cli
-
+2:
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0b844cb97978..ccda95e53f62 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -830,12 +830,12 @@ TRACE_EVENT(kvm_emulate_insn,
TP_ARGS(vcpu, failed),
TP_STRUCT__entry(
- __field( __u64, rip )
- __field( __u32, csbase )
- __field( __u8, len )
- __array( __u8, insn, 15 )
- __field( __u8, flags )
- __field( __u8, failed )
+ __field( __u64, rip )
+ __field( __u32, csbase )
+ __field( __u8, len )
+ __array( __u8, insn, X86_MAX_INSTRUCTION_LENGTH )
+ __field( __u8, flags )
+ __field( __u8, failed )
),
TP_fast_assign(
@@ -846,7 +846,7 @@ TRACE_EVENT(kvm_emulate_insn,
__entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len;
memcpy(__entry->insn,
vcpu->arch.emulate_ctxt->fetch.data,
- 15);
+ X86_MAX_INSTRUCTION_LENGTH);
__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode);
__entry->failed = failed;
),
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ed8a3cb53961..5504d9e9fd32 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2970,7 +2970,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR:
case INTR_TYPE_PRIV_SW_EXCEPTION:
- if (CC(vmcs12->vm_entry_instruction_len > 15) ||
+ if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH) ||
CC(vmcs12->vm_entry_instruction_len == 0 &&
CC(!nested_cpu_has_zero_length_injection(vcpu))))
return -EINVAL;
@@ -3771,7 +3771,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
break;
case GUEST_ACTIVITY_WAIT_SIPI:
vmx->nested.nested_run_pending = 0;
- vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
break;
default:
break;
@@ -4618,7 +4618,7 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
*/
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 vm_exit_reason, u32 exit_intr_info,
- unsigned long exit_qualification)
+ unsigned long exit_qualification, u32 exit_insn_len)
{
/* update exit information fields: */
vmcs12->vm_exit_reason = vm_exit_reason;
@@ -4646,7 +4646,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vm_exit_reason, exit_intr_info);
vmcs12->vm_exit_intr_info = exit_intr_info;
- vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ vmcs12->vm_exit_instruction_len = exit_insn_len;
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/*
@@ -4930,8 +4930,9 @@ vmabort:
* and modify vmcs12 to make it see what it would expect to see there if
* L2 was its real guest. Must only be called when in L2 (is_guest_mode())
*/
-void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
- u32 exit_intr_info, unsigned long exit_qualification)
+void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ u32 exit_intr_info, unsigned long exit_qualification,
+ u32 exit_insn_len)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -4981,7 +4982,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (vm_exit_reason != -1)
prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
- exit_intr_info, exit_qualification);
+ exit_intr_info, exit_qualification,
+ exit_insn_len);
/*
* Must happen outside of sync_vmcs02_to_vmcs12() as it will
@@ -5071,7 +5073,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx->nested.need_vmcs12_to_shadow_sync = true;
/* in case we halted in L2 */
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
if (likely(!vmx->fail)) {
if (vm_exit_reason != -1)
@@ -5327,9 +5329,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
goto out_shadow_vmcs;
- hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_PINNED);
- vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+ hrtimer_setup(&vmx->nested.preemption_timer, vmx_preemption_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_PINNED);
vmx->nested.vpid02 = allocate_vpid();
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 2c296b6abb8c..6eedcfc91070 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -26,8 +26,26 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry);
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu);
-void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
- u32 exit_intr_info, unsigned long exit_qualification);
+void __nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ u32 exit_intr_info, unsigned long exit_qualification,
+ u32 exit_insn_len);
+
+static inline void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
+ u32 exit_intr_info,
+ unsigned long exit_qualification)
+{
+ u32 exit_insn_len;
+
+ if (to_vmx(vcpu)->fail || vm_exit_reason == -1 ||
+ (vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ exit_insn_len = 0;
+ else
+ exit_insn_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+
+ __nested_vmx_vmexit(vcpu, vm_exit_reason, exit_intr_info,
+ exit_qualification, exit_insn_len);
+}
+
void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu);
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6c56d5235f0f..5c5766467a61 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1477,7 +1477,8 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
* performs IBPB on nested VM-Exit (a single nested transition
* may switch the active VMCS multiple times).
*/
- if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+ if (static_branch_likely(&switch_vcpu_ibpb) &&
+ (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)))
indirect_branch_prediction_barrier();
}
@@ -1514,16 +1515,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
*/
void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu);
-
- vmx->host_debugctlmsr = get_debugctlmsr();
}
void vmx_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2582,6 +2579,34 @@ static u64 adjust_vmx_controls64(u64 ctl_opt, u32 msr)
return ctl_opt & allowed;
}
+#define vmx_check_entry_exit_pairs(pairs, entry_controls, exit_controls) \
+({ \
+ int i, r = 0; \
+ \
+ BUILD_BUG_ON(sizeof(pairs[0].entry_control) != sizeof(entry_controls)); \
+ BUILD_BUG_ON(sizeof(pairs[0].exit_control) != sizeof(exit_controls)); \
+ \
+ for (i = 0; i < ARRAY_SIZE(pairs); i++) { \
+ typeof(entry_controls) n_ctrl = pairs[i].entry_control; \
+ typeof(exit_controls) x_ctrl = pairs[i].exit_control; \
+ \
+ if (!(entry_controls & n_ctrl) == !(exit_controls & x_ctrl)) \
+ continue; \
+ \
+ pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, " \
+ "entry = %llx (%llx), exit = %llx (%llx)\n", \
+ (u64)(entry_controls & n_ctrl), (u64)n_ctrl, \
+ (u64)(exit_controls & x_ctrl), (u64)x_ctrl); \
+ \
+ if (error_on_inconsistent_vmcs_config) \
+ r = -EIO; \
+ \
+ entry_controls &= ~n_ctrl; \
+ exit_controls &= ~x_ctrl; \
+ } \
+ r; \
+})
+
static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap)
{
@@ -2593,7 +2618,6 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
u32 _vmentry_control = 0;
u64 basic_msr;
u64 misc_msr;
- int i;
/*
* LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
@@ -2697,22 +2721,9 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
&_vmentry_control))
return -EIO;
- for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
- u32 n_ctrl = vmcs_entry_exit_pairs[i].entry_control;
- u32 x_ctrl = vmcs_entry_exit_pairs[i].exit_control;
-
- if (!(_vmentry_control & n_ctrl) == !(_vmexit_control & x_ctrl))
- continue;
-
- pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
- _vmentry_control & n_ctrl, _vmexit_control & x_ctrl);
-
- if (error_on_inconsistent_vmcs_config)
- return -EIO;
-
- _vmentry_control &= ~n_ctrl;
- _vmexit_control &= ~x_ctrl;
- }
+ if (vmx_check_entry_exit_pairs(vmcs_entry_exit_pairs,
+ _vmentry_control, _vmexit_control))
+ return -EIO;
/*
* Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
@@ -3523,7 +3534,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
vmcs_writel(GUEST_CR4, hw_cr4);
if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
}
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
@@ -5215,6 +5226,12 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
(kvm_get_rflags(vcpu) & X86_EFLAGS_AC);
}
+static bool is_xfd_nm_fault(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.guest_fpu.fpstate->xfd &&
+ !kvm_is_cr0_bit_set(vcpu, X86_CR0_TS);
+}
+
static int handle_exception_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5241,7 +5258,8 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
* point.
*/
if (is_nm_fault(intr_info)) {
- kvm_queue_exception(vcpu, NM_VECTOR);
+ kvm_queue_exception_p(vcpu, NM_VECTOR,
+ is_xfd_nm_fault(vcpu) ? vcpu->arch.guest_fpu.xfd_err : 0);
return 1;
}
@@ -5821,7 +5839,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
? PFERR_FETCH_MASK : 0;
/* ept page table entry is present? */
- error_code |= (exit_qualification & EPT_VIOLATION_RWX_MASK)
+ error_code |= (exit_qualification & EPT_VIOLATION_PROT_MASK)
? PFERR_PRESENT_MASK : 0;
if (error_code & EPT_VIOLATION_GVA_IS_VALID)
@@ -5875,11 +5893,35 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu)
return 1;
}
-static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
+/*
+ * Returns true if emulation is required (due to the vCPU having invalid state
+ * with unsrestricted guest mode disabled) and KVM can't faithfully emulate the
+ * current vCPU state.
+ */
+static bool vmx_unhandleable_emulation_required(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- return vmx->emulation_required && !vmx->rmode.vm86_active &&
+ if (!vmx->emulation_required)
+ return false;
+
+ /*
+ * It is architecturally impossible for emulation to be required when a
+ * nested VM-Enter is pending completion, as VM-Enter will VM-Fail if
+ * guest state is invalid and unrestricted guest is disabled, i.e. KVM
+ * should synthesize VM-Fail instead emulation L2 code. This path is
+ * only reachable if userspace modifies L2 guest state after KVM has
+ * performed the nested VM-Enter consistency checks.
+ */
+ if (vmx->nested.nested_run_pending)
+ return true;
+
+ /*
+ * KVM only supports emulating exceptions if the vCPU is in Real Mode.
+ * If emulation is required, KVM can't perform a successful VM-Enter to
+ * inject the exception.
+ */
+ return !vmx->rmode.vm86_active &&
(kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
}
@@ -5902,7 +5944,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (!kvm_emulate_instruction(vcpu, 0))
return 0;
- if (vmx_emulation_required_with_pending_exception(vcpu)) {
+ if (vmx_unhandleable_emulation_required(vcpu)) {
kvm_prepare_emulation_failure_exit(vcpu);
return 0;
}
@@ -5926,7 +5968,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
{
- if (vmx_emulation_required_with_pending_exception(vcpu)) {
+ if (vmx_unhandleable_emulation_required(vcpu)) {
kvm_prepare_emulation_failure_exit(vcpu);
return 0;
}
@@ -7001,16 +7043,15 @@ static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
* MSR value is not clobbered by the host activity before the guest
* has chance to consume it.
*
- * Do not blindly read xfd_err here, since this exception might
- * be caused by L1 interception on a platform which doesn't
- * support xfd at all.
+ * Update the guest's XFD_ERR if and only if XFD is enabled, as the #NM
+ * interception may have been caused by L1 interception. Per the SDM,
+ * XFD_ERR is not modified for non-XFD #NM, i.e. if CR0.TS=1.
*
- * Do it conditionally upon guest_fpu::xfd. xfd_err matters
- * only when xfd contains a non-zero value.
- *
- * Queuing exception is done in vmx_handle_exit. See comment there.
+ * Note, XFD_ERR is updated _before_ the #NM interception check, i.e.
+ * unlike CR2 and DR6, the value is not a payload that is attached to
+ * the #NM exception.
*/
- if (vcpu->arch.guest_fpu.fpstate->xfd)
+ if (is_xfd_nm_fault(vcpu))
rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
}
@@ -7161,13 +7202,17 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
fallthrough;
- case INTR_TYPE_HARD_EXCEPTION:
- if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
- u32 err = vmcs_read32(error_code_field);
- kvm_requeue_exception_e(vcpu, vector, err);
- } else
- kvm_requeue_exception(vcpu, vector);
+ case INTR_TYPE_HARD_EXCEPTION: {
+ u32 error_code = 0;
+
+ if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK)
+ error_code = vmcs_read32(error_code_field);
+
+ kvm_requeue_exception(vcpu, vector,
+ idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK,
+ error_code);
break;
+ }
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
fallthrough;
@@ -7458,8 +7503,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
}
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
- if (vmx->host_debugctlmsr)
- update_debugctlmsr(vmx->host_debugctlmsr);
+ if (vcpu->arch.host_debugctl)
+ update_debugctlmsr(vcpu->arch.host_debugctl);
#ifndef CONFIG_X86_64
/*
@@ -8009,38 +8054,50 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
}
-static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
- struct x86_instruction_info *info)
+static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ unsigned long *exit_qualification)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned short port;
- bool intercept;
int size;
+ bool imm;
+
+ /*
+ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
+ * VM-exits depend on the 'unconditional IO exiting' VM-execution
+ * control.
+ *
+ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
+ */
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
port = info->src_val;
size = info->dst_bytes;
+ imm = info->src_type == OP_IMM;
} else {
port = info->dst_val;
size = info->src_bytes;
+ imm = info->dst_type == OP_IMM;
}
- /*
- * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
- * VM-exits depend on the 'unconditional IO exiting' VM-execution
- * control.
- *
- * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
- */
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- intercept = nested_cpu_has(vmcs12,
- CPU_BASED_UNCOND_IO_EXITING);
- else
- intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
- /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
- return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
+ *exit_qualification = ((unsigned long)port << 16) | (size - 1);
+
+ if (info->intercept == x86_intercept_ins ||
+ info->intercept == x86_intercept_outs)
+ *exit_qualification |= BIT(4);
+
+ if (info->rep_prefix)
+ *exit_qualification |= BIT(5);
+
+ if (imm)
+ *exit_qualification |= BIT(6);
+
+ return nested_vmx_check_io_bitmaps(vcpu, port, size);
}
int vmx_check_intercept(struct kvm_vcpu *vcpu,
@@ -8049,26 +8106,34 @@ int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_exception *exception)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned long exit_qualification = 0;
+ u32 vm_exit_reason;
+ u64 exit_insn_len;
switch (info->intercept) {
- /*
- * RDPID causes #UD if disabled through secondary execution controls.
- * Because it is marked as EmulateOnUD, we need to intercept it here.
- * Note, RDPID is hidden behind ENABLE_RDTSCP.
- */
case x86_intercept_rdpid:
+ /*
+ * RDPID causes #UD if not enabled through secondary execution
+ * controls (ENABLE_RDTSCP). Note, the implicit MSR access to
+ * TSC_AUX is NOT subject to interception, i.e. checking only
+ * the dedicated execution control is architecturally correct.
+ */
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
exception->vector = UD_VECTOR;
exception->error_code_valid = false;
return X86EMUL_PROPAGATE_FAULT;
}
- break;
+ return X86EMUL_CONTINUE;
case x86_intercept_in:
case x86_intercept_ins:
case x86_intercept_out:
case x86_intercept_outs:
- return vmx_check_intercept_io(vcpu, info);
+ if (!vmx_is_io_intercepted(vcpu, info, &exit_qualification))
+ return X86EMUL_CONTINUE;
+
+ vm_exit_reason = EXIT_REASON_IO_INSTRUCTION;
+ break;
case x86_intercept_lgdt:
case x86_intercept_lidt:
@@ -8081,7 +8146,24 @@ int vmx_check_intercept(struct kvm_vcpu *vcpu,
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
return X86EMUL_CONTINUE;
- /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
+ if (info->intercept == x86_intercept_lldt ||
+ info->intercept == x86_intercept_ltr ||
+ info->intercept == x86_intercept_sldt ||
+ info->intercept == x86_intercept_str)
+ vm_exit_reason = EXIT_REASON_LDTR_TR;
+ else
+ vm_exit_reason = EXIT_REASON_GDTR_IDTR;
+ /*
+ * FIXME: Decode the ModR/M to generate the correct exit
+ * qualification for memory operands.
+ */
+ break;
+
+ case x86_intercept_hlt:
+ if (!nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING))
+ return X86EMUL_CONTINUE;
+
+ vm_exit_reason = EXIT_REASON_HLT;
break;
case x86_intercept_pause:
@@ -8094,17 +8176,24 @@ int vmx_check_intercept(struct kvm_vcpu *vcpu,
* the PAUSE.
*/
if ((info->rep_prefix != REPE_PREFIX) ||
- !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
+ !nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING))
return X86EMUL_CONTINUE;
+ vm_exit_reason = EXIT_REASON_PAUSE_INSTRUCTION;
break;
/* TODO: check more intercepts... */
default:
- break;
+ return X86EMUL_UNHANDLEABLE;
}
- return X86EMUL_UNHANDLEABLE;
+ exit_insn_len = abs_diff((s64)info->next_rip, (s64)info->rip);
+ if (!exit_insn_len || exit_insn_len > X86_MAX_INSTRUCTION_LENGTH)
+ return X86EMUL_UNHANDLEABLE;
+
+ __nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification,
+ exit_insn_len);
+ return X86EMUL_INTERCEPTED;
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 8b111ce1087c..951e44dc9d0e 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -340,8 +340,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
- unsigned long host_debugctlmsr;
-
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 633c87e2fd92..96677576c836 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -118,7 +118,7 @@ do_exception:
#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
- asm volatile("1: vmread %2, %1\n\t"
+ asm volatile("1: vmread %[field], %[output]\n\t"
".byte 0x3e\n\t" /* branch taken hint */
"ja 3f\n\t"
@@ -127,24 +127,26 @@ do_exception:
* @field, and bounce through the trampoline to preserve
* volatile registers.
*/
- "xorl %k1, %k1\n\t"
+ "xorl %k[output], %k[output]\n\t"
"2:\n\t"
- "push %1\n\t"
- "push %2\n\t"
+ "push %[output]\n\t"
+ "push %[field]\n\t"
"call vmread_error_trampoline\n\t"
/*
* Unwind the stack. Note, the trampoline zeros out the
* memory for @fault so that the result is '0' on error.
*/
- "pop %2\n\t"
- "pop %1\n\t"
+ "pop %[field]\n\t"
+ "pop %[output]\n\t"
"3:\n\t"
/* VMREAD faulted. As above, except push '1' for @fault. */
- _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[output])
- : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
+ : ASM_CALL_CONSTRAINT, [output] "=&r" (value)
+ : [field] "r" (field)
+ : "cc");
return value;
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6fc4ddc606bd..c841817a914a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -800,9 +800,9 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
ex->payload = payload;
}
-static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
- unsigned nr, bool has_error, u32 error_code,
- bool has_payload, unsigned long payload, bool reinject)
+static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned int nr,
+ bool has_error, u32 error_code,
+ bool has_payload, unsigned long payload)
{
u32 prev_nr;
int class1, class2;
@@ -810,13 +810,10 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_EVENT, vcpu);
/*
- * If the exception is destined for L2 and isn't being reinjected,
- * morph it to a VM-Exit if L1 wants to intercept the exception. A
- * previously injected exception is not checked because it was checked
- * when it was original queued, and re-checking is incorrect if _L1_
- * injected the exception, in which case it's exempt from interception.
+ * If the exception is destined for L2, morph it to a VM-Exit if L1
+ * wants to intercept the exception.
*/
- if (!reinject && is_guest_mode(vcpu) &&
+ if (is_guest_mode(vcpu) &&
kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
has_payload, payload);
@@ -825,28 +822,9 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
queue:
- if (reinject) {
- /*
- * On VM-Entry, an exception can be pending if and only
- * if event injection was blocked by nested_run_pending.
- * In that case, however, vcpu_enter_guest() requests an
- * immediate exit, and the guest shouldn't proceed far
- * enough to need reinjection.
- */
- WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
- vcpu->arch.exception.injected = true;
- if (WARN_ON_ONCE(has_payload)) {
- /*
- * A reinjected event has already
- * delivered its payload.
- */
- has_payload = false;
- payload = 0;
- }
- } else {
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.injected = false;
- }
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.injected = false;
+
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.vector = nr;
vcpu->arch.exception.error_code = error_code;
@@ -887,29 +865,52 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
+ kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
-void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
-{
- kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
-}
-EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
unsigned long payload)
{
- kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
+ kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
u32 error_code, unsigned long payload)
{
- kvm_multiple_exception(vcpu, nr, true, error_code,
- true, payload, false);
+ kvm_multiple_exception(vcpu, nr, true, error_code, true, payload);
+}
+
+void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
+ bool has_error_code, u32 error_code)
+{
+
+ /*
+ * On VM-Entry, an exception can be pending if and only if event
+ * injection was blocked by nested_run_pending. In that case, however,
+ * vcpu_enter_guest() requests an immediate exit, and the guest
+ * shouldn't proceed far enough to need reinjection.
+ */
+ WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
+
+ /*
+ * Do not check for interception when injecting an event for L2, as the
+ * exception was checked for intercept when it was original queued, and
+ * re-checking is incorrect if _L1_ injected the exception, in which
+ * case it's exempt from interception.
+ */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ vcpu->arch.exception.injected = true;
+ vcpu->arch.exception.has_error_code = has_error_code;
+ vcpu->arch.exception.vector = nr;
+ vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
}
+EXPORT_SYMBOL_GPL(kvm_requeue_exception);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
@@ -980,16 +981,10 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu)
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
-void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
-{
- kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
-}
-EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
-
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false.
@@ -1264,7 +1259,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
return 0;
}
@@ -2080,10 +2075,20 @@ EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
{
- if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
- !guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT))
+ bool enabled;
+
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS))
+ goto emulate_as_nop;
+
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT))
+ enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_MWAIT);
+ else
+ enabled = vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT;
+
+ if (!enabled)
return kvm_handle_invalid_op(vcpu);
+emulate_as_nop:
pr_warn_once("%s instruction emulated as NOP!\n", insn);
return kvm_emulate_as_nop(vcpu);
}
@@ -2569,6 +2574,9 @@ EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
{
+ if (vcpu->arch.guest_tsc_protected)
+ return;
+
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
vcpu->arch.l1_tsc_offset,
l1_offset);
@@ -2626,12 +2634,18 @@ static inline bool kvm_check_tsc_unstable(void)
* participates in.
*/
static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
- u64 ns, bool matched)
+ u64 ns, bool matched, bool user_set_tsc)
{
struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.tsc_write_lock);
+ if (vcpu->arch.guest_tsc_protected)
+ return;
+
+ if (user_set_tsc)
+ vcpu->kvm->arch.user_set_tsc = true;
+
/*
* We also track th most recent recorded KHZ, write and time to
* allow the matching interval to be extended at each write.
@@ -2717,8 +2731,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
}
}
- if (user_value)
- kvm->arch.user_set_tsc = true;
/*
* For a reliable TSC, we can match TSC offsets, and for an unstable
@@ -2738,7 +2750,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
matched = true;
}
- __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
+ __kvm_synchronize_tsc(vcpu, offset, data, ns, matched, !!user_value);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
}
@@ -3116,15 +3128,17 @@ u64 get_kvmclock_ns(struct kvm *kvm)
return data.clock;
}
-static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
+static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
+ struct kvm_vcpu *vcpu,
struct gfn_to_pfn_cache *gpc,
- unsigned int offset,
- bool force_tsc_unstable)
+ unsigned int offset)
{
- struct kvm_vcpu_arch *vcpu = &v->arch;
struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info hv_clock;
unsigned long flags;
+ memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
+
read_lock_irqsave(&gpc->lock, flags);
while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
read_unlock_irqrestore(&gpc->lock, flags);
@@ -3144,52 +3158,34 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
* it is consistent.
*/
- guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1;
+ guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1;
smp_wmb();
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
-
- if (vcpu->pvclock_set_guest_stopped_request) {
- vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
- vcpu->pvclock_set_guest_stopped_request = false;
- }
+ hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
- memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
-
- if (force_tsc_unstable)
- guest_hv_clock->flags &= ~PVCLOCK_TSC_STABLE_BIT;
+ memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
smp_wmb();
- guest_hv_clock->version = ++vcpu->hv_clock.version;
+ guest_hv_clock->version = ++hv_clock.version;
kvm_gpc_mark_dirty_in_slot(gpc);
read_unlock_irqrestore(&gpc->lock, flags);
- trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
+ trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
}
-static int kvm_guest_time_update(struct kvm_vcpu *v)
+int kvm_guest_time_update(struct kvm_vcpu *v)
{
+ struct pvclock_vcpu_time_info hv_clock = {};
unsigned long flags, tgt_tsc_khz;
unsigned seq;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns;
u64 tsc_timestamp, host_tsc;
- u8 pvclock_flags;
bool use_master_clock;
-#ifdef CONFIG_KVM_XEN
- /*
- * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
- * explicitly told to use TSC as its clocksource Xen will not set this bit.
- * This default behaviour led to bugs in some guest kernels which cause
- * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
- */
- bool xen_pvclock_tsc_unstable =
- ka->xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
-#endif
kernel_ns = 0;
host_tsc = 0;
@@ -3250,35 +3246,57 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
- &vcpu->hv_clock.tsc_shift,
- &vcpu->hv_clock.tsc_to_system_mul);
+ &vcpu->pvclock_tsc_shift,
+ &vcpu->pvclock_tsc_mul);
vcpu->hw_tsc_khz = tgt_tsc_khz;
- kvm_xen_update_tsc_info(v);
}
- vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
- vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
+ hv_clock.tsc_shift = vcpu->pvclock_tsc_shift;
+ hv_clock.tsc_to_system_mul = vcpu->pvclock_tsc_mul;
+ hv_clock.tsc_timestamp = tsc_timestamp;
+ hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_guest_tsc = tsc_timestamp;
/* If the host uses TSC clocksource, then it is stable */
- pvclock_flags = 0;
+ hv_clock.flags = 0;
if (use_master_clock)
- pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
+ hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
- vcpu->hv_clock.flags = pvclock_flags;
+ if (vcpu->pv_time.active) {
+ /*
+ * GUEST_STOPPED is only supported by kvmclock, and KVM's
+ * historic behavior is to only process the request if kvmclock
+ * is active/enabled.
+ */
+ if (vcpu->pvclock_set_guest_stopped_request) {
+ hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
+ vcpu->pvclock_set_guest_stopped_request = false;
+ }
+ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
+
+ hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
+ }
+
+ kvm_hv_setup_tsc_page(v->kvm, &hv_clock);
- if (vcpu->pv_time.active)
- kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0, false);
#ifdef CONFIG_KVM_XEN
+ /*
+ * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
+ * explicitly told to use TSC as its clocksource Xen will not set this bit.
+ * This default behaviour led to bugs in some guest kernels which cause
+ * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
+ *
+ * Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
+ */
+ if (ka->xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
+ hv_clock.flags &= ~PVCLOCK_TSC_STABLE_BIT;
+
if (vcpu->xen.vcpu_info_cache.active)
- kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
- offsetof(struct compat_vcpu_info, time),
- xen_pvclock_tsc_unstable);
+ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_info_cache,
+ offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_cache.active)
- kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0,
- xen_pvclock_tsc_unstable);
+ kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->xen.vcpu_time_info_cache, 0);
#endif
- kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
@@ -3544,7 +3562,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
sizeof(u64)))
return 1;
- vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+ vcpu->arch.apf.send_always = (data & KVM_ASYNC_PF_SEND_ALWAYS);
vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
kvm_async_pf_wakeup_all(vcpu);
@@ -3733,7 +3751,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u32 msr = msr_info->index;
u64 data = msr_info->data;
- if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
+ /*
+ * Do not allow host-initiated writes to trigger the Xen hypercall
+ * page setup; it could incur locking paths which are not expected
+ * if userspace sets the MSR in an unusual location.
+ */
+ if (kvm_xen_is_hypercall_page_msr(vcpu->kvm, msr) &&
+ !msr_info->host_initiated)
return kvm_xen_write_hypercall_page(vcpu, data);
switch (msr) {
@@ -3889,7 +3913,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_XMM3))
return 1;
vcpu->arch.ia32_misc_enable_msr = data;
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
} else {
vcpu->arch.ia32_misc_enable_msr = data;
}
@@ -3906,7 +3930,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
if (msr_info->host_initiated) {
kvm_synchronize_tsc(vcpu, &data);
- } else {
+ } else if (!vcpu->arch.guest_tsc_protected) {
u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
adjust_tsc_offset_guest(vcpu, adj);
vcpu->arch.ia32_tsc_adjust_msr += adj;
@@ -3924,7 +3948,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & ~kvm_caps.supported_xss)
return 1;
vcpu->arch.ia32_xss = data;
- kvm_update_cpuid_runtime(vcpu);
+ vcpu->arch.cpuid_dynamic_bits_dirty = true;
break;
case MSR_SMI_COUNT:
if (!msr_info->host_initiated)
@@ -4573,6 +4597,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
return type < 32 && (kvm_caps.supported_vm_types & BIT(type));
}
+static inline u32 kvm_sync_valid_fields(struct kvm *kvm)
+{
+ return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS;
+}
+
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r = 0;
@@ -4681,7 +4710,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_SYNC_REGS:
- r = KVM_SYNC_X86_VALID_FIELDS;
+ r = kvm_sync_valid_fields(kvm);
break;
case KVM_CAP_ADJUST_CLOCK:
r = KVM_CLOCK_VALID_FLAGS;
@@ -4986,7 +5015,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 offset = kvm_compute_l1_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
- vcpu->arch.tsc_catchup = 1;
+ if (!vcpu->arch.guest_tsc_protected)
+ vcpu->arch.tsc_catchup = 1;
}
if (kvm_lapic_hv_timer_in_use(vcpu))
@@ -5725,8 +5755,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
ns = get_kvmclock_base_ns();
- kvm->arch.user_set_tsc = true;
- __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
+ __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched, true);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
r = 0;
@@ -6905,23 +6934,15 @@ static int kvm_arch_suspend_notifier(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
- int ret = 0;
-
- mutex_lock(&kvm->lock);
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!vcpu->arch.pv_time.active)
- continue;
- ret = kvm_set_guest_paused(vcpu);
- if (ret) {
- kvm_err("Failed to pause guest VCPU%d: %d\n",
- vcpu->vcpu_id, ret);
- break;
- }
- }
- mutex_unlock(&kvm->lock);
+ /*
+ * Ignore the return, marking the guest paused only "fails" if the vCPU
+ * isn't using kvmclock; continuing on is correct and desirable.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ (void)kvm_set_guest_paused(vcpu);
- return ret ? NOTIFY_BAD : NOTIFY_DONE;
+ return NOTIFY_DONE;
}
int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
@@ -10968,6 +10989,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(0, 7);
}
+ vcpu->arch.host_debugctl = get_debugctlmsr();
+
guest_timing_enter_irqoff();
for (;;) {
@@ -11218,9 +11241,7 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
fallthrough;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
@@ -11297,9 +11318,8 @@ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
if (kvm_vcpu_has_events(vcpu))
- vcpu->arch.pv.pv_unhalted = false;
- else
- vcpu->arch.mp_state = state;
+ state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, state);
return 1;
} else {
vcpu->run->exit_reason = reason;
@@ -11472,6 +11492,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
+ u32 sync_valid_fields;
int r;
r = kvm_mmu_post_init_vm(vcpu->kvm);
@@ -11517,8 +11538,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
- if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
- (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
+ sync_valid_fields = kvm_sync_valid_fields(vcpu->kvm);
+ if ((kvm_run->kvm_valid_regs & ~sync_valid_fields) ||
+ (kvm_run->kvm_dirty_regs & ~sync_valid_fields)) {
r = -EINVAL;
goto out;
}
@@ -11576,7 +11598,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
out:
kvm_put_guest_fpu(vcpu);
- if (kvm_run->kvm_valid_regs)
+ if (kvm_run->kvm_valid_regs && likely(!vcpu->arch.guest_state_protected))
store_regs(vcpu);
post_kvm_run_save(vcpu);
kvm_vcpu_srcu_read_unlock(vcpu);
@@ -11819,10 +11841,10 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
goto out;
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
- vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
} else
- vcpu->arch.mp_state = mp_state->mp_state;
+ kvm_set_mp_state(vcpu, mp_state->mp_state);
kvm_make_request(KVM_REQ_EVENT, vcpu);
ret = 0;
@@ -11949,7 +11971,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!is_protmode(vcpu))
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
return 0;
}
@@ -12252,9 +12274,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
else
- vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_UNINITIALIZED);
r = kvm_mmu_create(vcpu);
if (r < 0)
@@ -12361,6 +12383,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int idx;
+ kvm_clear_async_pf_completion_queue(vcpu);
+ kvm_mmu_unload(vcpu);
+
kvmclock_reset(vcpu);
kvm_x86_call(vcpu_free)(vcpu);
@@ -12754,31 +12779,6 @@ out:
return ret;
}
-static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
-{
- vcpu_load(vcpu);
- kvm_mmu_unload(vcpu);
- vcpu_put(vcpu);
-}
-
-static void kvm_unload_vcpu_mmus(struct kvm *kvm)
-{
- unsigned long i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_clear_async_pf_completion_queue(vcpu);
- kvm_unload_vcpu_mmu(vcpu);
- }
-}
-
-void kvm_arch_sync_events(struct kvm *kvm)
-{
- cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
- cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
- kvm_free_pit(kvm);
-}
-
/**
* __x86_set_memory_region: Setup KVM internal memory slot
*
@@ -12857,6 +12857,17 @@ EXPORT_SYMBOL_GPL(__x86_set_memory_region);
void kvm_arch_pre_destroy_vm(struct kvm *kvm)
{
+ /*
+ * Stop all background workers and kthreads before destroying vCPUs, as
+ * iterating over vCPUs in a different task while vCPUs are being freed
+ * is unsafe, i.e. will lead to use-after-free. The PIT also needs to
+ * be stopped before IRQ routing is freed.
+ */
+ cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
+ cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
+
+ kvm_free_pit(kvm);
+
kvm_mmu_pre_destroy_vm(kvm);
}
@@ -12876,9 +12887,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
__x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
mutex_unlock(&kvm->slots_lock);
}
- kvm_unload_vcpu_mmus(kvm);
kvm_destroy_vcpus(kvm);
- kvm_x86_call(vm_destroy)(kvm);
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
@@ -12888,6 +12897,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_page_track_cleanup(kvm);
kvm_xen_destroy_vm(kvm);
kvm_hv_destroy_vm(kvm);
+ kvm_x86_call(vm_destroy)(kvm);
}
static void memslot_rmap_free(struct kvm_memory_slot *slot)
@@ -13381,8 +13391,8 @@ static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
if (!kvm_pv_async_pf_enabled(vcpu))
return false;
- if (vcpu->arch.apf.send_user_only &&
- kvm_x86_call(get_cpl)(vcpu) == 0)
+ if (!vcpu->arch.apf.send_always &&
+ (vcpu->arch.guest_state_protected || !kvm_x86_call(get_cpl)(vcpu)))
return false;
if (is_guest_mode(vcpu)) {
@@ -13472,7 +13482,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
}
vcpu->arch.apf.halted = false;
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
}
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 91e50a513100..9dc32a409076 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -121,6 +121,13 @@ static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
return vcpu->arch.last_vmentry_cpu != -1;
}
+static inline void kvm_set_mp_state(struct kvm_vcpu *vcpu, int mp_state)
+{
+ vcpu->arch.mp_state = mp_state;
+ if (mp_state == KVM_MP_STATE_RUNNABLE)
+ vcpu->arch.pv.pv_unhalted = false;
+}
+
static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
{
return vcpu->arch.exception.pending ||
@@ -362,6 +369,7 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
u64 get_kvmclock_ns(struct kvm *kvm);
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm);
bool kvm_get_monotonic_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp);
+int kvm_guest_time_update(struct kvm_vcpu *v);
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index a909b817b9c0..bd21e9c335ad 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -150,11 +150,46 @@ static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+static int xen_get_guest_pvclock(struct kvm_vcpu *vcpu,
+ struct pvclock_vcpu_time_info *hv_clock,
+ struct gfn_to_pfn_cache *gpc,
+ unsigned int offset)
+{
+ unsigned long flags;
+ int r;
+
+ read_lock_irqsave(&gpc->lock, flags);
+ while (!kvm_gpc_check(gpc, offset + sizeof(*hv_clock))) {
+ read_unlock_irqrestore(&gpc->lock, flags);
+
+ r = kvm_gpc_refresh(gpc, offset + sizeof(*hv_clock));
+ if (r)
+ return r;
+
+ read_lock_irqsave(&gpc->lock, flags);
+ }
+
+ memcpy(hv_clock, gpc->khva + offset, sizeof(*hv_clock));
+ read_unlock_irqrestore(&gpc->lock, flags);
+
+ /*
+ * Sanity check TSC shift+multiplier to verify the guest's view of time
+ * is more or less consistent.
+ */
+ if (hv_clock->tsc_shift != vcpu->arch.pvclock_tsc_shift ||
+ hv_clock->tsc_to_system_mul != vcpu->arch.pvclock_tsc_mul)
+ return -EINVAL;
+
+ return 0;
+}
+
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs,
bool linux_wa)
{
+ struct kvm_vcpu_xen *xen = &vcpu->arch.xen;
int64_t kernel_now, delta;
uint64_t guest_now;
+ int r = -EOPNOTSUPP;
/*
* The guest provides the requested timeout in absolute nanoseconds
@@ -173,10 +208,29 @@ static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs,
* the absolute CLOCK_MONOTONIC time at which the timer should
* fire.
*/
- if (vcpu->arch.hv_clock.version && vcpu->kvm->arch.use_master_clock &&
- static_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+ do {
+ struct pvclock_vcpu_time_info hv_clock;
uint64_t host_tsc, guest_tsc;
+ if (!static_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
+ !vcpu->kvm->arch.use_master_clock)
+ break;
+
+ /*
+ * If both Xen PV clocks are active, arbitrarily try to use the
+ * compat clock first, but also try to use the non-compat clock
+ * if the compat clock is unusable. The two PV clocks hold the
+ * same information, but it's possible one (or both) is stale
+ * and/or currently unreachable.
+ */
+ if (xen->vcpu_info_cache.active)
+ r = xen_get_guest_pvclock(vcpu, &hv_clock, &xen->vcpu_info_cache,
+ offsetof(struct compat_vcpu_info, time));
+ if (r && xen->vcpu_time_info_cache.active)
+ r = xen_get_guest_pvclock(vcpu, &hv_clock, &xen->vcpu_time_info_cache, 0);
+ if (r)
+ break;
+
if (!IS_ENABLED(CONFIG_64BIT) ||
!kvm_get_monotonic_and_clockread(&kernel_now, &host_tsc)) {
/*
@@ -197,9 +251,10 @@ static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs,
/* Calculate the guest kvmclock as the guest would do it. */
guest_tsc = kvm_read_l1_tsc(vcpu, host_tsc);
- guest_now = __pvclock_read_cycles(&vcpu->arch.hv_clock,
- guest_tsc);
- } else {
+ guest_now = __pvclock_read_cycles(&hv_clock, guest_tsc);
+ } while (0);
+
+ if (r) {
/*
* Without CONSTANT_TSC, get_kvmclock_ns() is the only option.
*
@@ -1280,10 +1335,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
* Note, truncation is a non-issue as 'lm' is guaranteed to be
* false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
*/
- hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
- : kvm->arch.xen_hvm_config.blob_addr_32;
- u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
- : kvm->arch.xen_hvm_config.blob_size_32;
+ hva_t blob_addr = lm ? kvm->arch.xen.hvm_config.blob_addr_64
+ : kvm->arch.xen.hvm_config.blob_addr_32;
+ u8 blob_size = lm ? kvm->arch.xen.hvm_config.blob_size_64
+ : kvm->arch.xen.hvm_config.blob_size_32;
u8 *page;
int ret;
@@ -1324,15 +1379,24 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
xhc->blob_size_32 || xhc->blob_size_64))
return -EINVAL;
+ /*
+ * Restrict the MSR to the range that is unofficially reserved for
+ * synthetic, virtualization-defined MSRs, e.g. to prevent confusing
+ * KVM by colliding with a real MSR that requires special handling.
+ */
+ if (xhc->msr &&
+ (xhc->msr < KVM_XEN_MSR_MIN_INDEX || xhc->msr > KVM_XEN_MSR_MAX_INDEX))
+ return -EINVAL;
+
mutex_lock(&kvm->arch.xen.xen_lock);
- if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
+ if (xhc->msr && !kvm->arch.xen.hvm_config.msr)
static_branch_inc(&kvm_xen_enabled.key);
- else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
+ else if (!xhc->msr && kvm->arch.xen.hvm_config.msr)
static_branch_slow_dec_deferred(&kvm_xen_enabled);
- old_flags = kvm->arch.xen_hvm_config.flags;
- memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
+ old_flags = kvm->arch.xen.hvm_config.flags;
+ memcpy(&kvm->arch.xen.hvm_config, xhc, sizeof(*xhc));
mutex_unlock(&kvm->arch.xen.xen_lock);
@@ -1413,7 +1477,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
int i;
if (!lapic_in_kernel(vcpu) ||
- !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
+ !(vcpu->kvm->arch.xen.hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
return false;
if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
@@ -1480,7 +1544,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
- vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_HALTED);
if (sched_poll.timeout)
mod_timer(&vcpu->arch.xen.poll_timer,
@@ -1491,7 +1555,7 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
if (sched_poll.timeout)
del_timer(&vcpu->arch.xen.poll_timer);
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+ kvm_set_mp_state(vcpu, KVM_MP_STATE_RUNNABLE);
}
vcpu->arch.xen.poll_evtchn = 0;
@@ -2225,8 +2289,8 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.xen.poll_evtchn = 0;
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
- hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
- vcpu->arch.xen.timer.function = xen_timer_callback;
+ hrtimer_setup(&vcpu->arch.xen.timer, xen_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS_HARD);
kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm);
kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm);
@@ -2247,29 +2311,6 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
del_timer_sync(&vcpu->arch.xen.poll_timer);
}
-void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *entry;
- u32 function;
-
- if (!vcpu->arch.xen.cpuid.base)
- return;
-
- function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3);
- if (function > vcpu->arch.xen.cpuid.limit)
- return;
-
- entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
- if (entry) {
- entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul;
- entry->edx = vcpu->arch.hv_clock.tsc_shift;
- }
-
- entry = kvm_find_cpuid_entry_index(vcpu, function, 2);
- if (entry)
- entry->eax = vcpu->arch.hw_tsc_khz;
-}
-
void kvm_xen_init_vm(struct kvm *kvm)
{
mutex_init(&kvm->arch.xen.xen_lock);
@@ -2291,6 +2332,6 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
}
idr_destroy(&kvm->arch.xen.evtchn_ports);
- if (kvm->arch.xen_hvm_config.msr)
+ if (kvm->arch.xen.hvm_config.msr)
static_branch_slow_dec_deferred(&kvm_xen_enabled);
}
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index f5841d9000ae..59e6128a7bd3 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -9,6 +9,7 @@
#ifndef __ARCH_X86_KVM_XEN_H__
#define __ARCH_X86_KVM_XEN_H__
+#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
#ifdef CONFIG_KVM_XEN
@@ -35,7 +36,6 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe,
int kvm_xen_setup_evtchn(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
-void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
{
@@ -50,16 +50,32 @@ static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
kvm_xen_inject_vcpu_vector(vcpu);
}
+static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function)
+{
+ return static_branch_unlikely(&kvm_xen_enabled.key) &&
+ vcpu->arch.xen.cpuid.base &&
+ function <= vcpu->arch.xen.cpuid.limit &&
+ function == (vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3));
+}
+
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
{
return static_branch_unlikely(&kvm_xen_enabled.key) &&
- kvm->arch.xen_hvm_config.msr;
+ kvm->arch.xen.hvm_config.msr;
+}
+
+static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
+{
+ if (!static_branch_unlikely(&kvm_xen_enabled.key))
+ return false;
+
+ return msr && msr == kvm->arch.xen.hvm_config.msr;
}
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
{
return static_branch_unlikely(&kvm_xen_enabled.key) &&
- (kvm->arch.xen_hvm_config.flags &
+ (kvm->arch.xen.hvm_config.flags &
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
}
@@ -124,6 +140,11 @@ static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
return false;
}
+static inline bool kvm_xen_is_hypercall_page_msr(struct kvm *kvm, u32 msr)
+{
+ return false;
+}
+
static inline bool kvm_xen_hypercall_enabled(struct kvm *kvm)
{
return false;
@@ -157,8 +178,9 @@ static inline bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
return false;
}
-static inline void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
+static inline bool kvm_xen_is_tsc_leaf(struct kvm_vcpu *vcpu, u32 function)
{
+ return false;
}
#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 8a59c61624c2..1c50352eb49f 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -42,8 +42,11 @@ obj-$(CONFIG_CRC32_ARCH) += crc32-x86.o
crc32-x86-y := crc32-glue.o crc32-pclmul.o
crc32-x86-$(CONFIG_64BIT) += crc32c-3way.o
+obj-$(CONFIG_CRC64_ARCH) += crc64-x86.o
+crc64-x86-y := crc64-glue.o crc64-pclmul.o
+
obj-$(CONFIG_CRC_T10DIF_ARCH) += crc-t10dif-x86.o
-crc-t10dif-x86-y := crc-t10dif-glue.o crct10dif-pcl-asm_64.o
+crc-t10dif-x86-y := crc-t10dif-glue.o crc16-msb-pclmul.o
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
obj-y += iomem.o
@@ -56,7 +59,7 @@ ifeq ($(CONFIG_X86_32),y)
lib-y += string_32.o
lib-y += memmove_32.o
lib-y += cmpxchg8b_emu.o
-ifneq ($(CONFIG_X86_CMPXCHG64),y)
+ifneq ($(CONFIG_X86_CX8),y)
lib-y += atomic64_386_32.o
endif
else
@@ -66,5 +69,6 @@ endif
lib-y += clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o copy_user_uncached_64.o
- lib-y += cmpxchg16b_emu.o
+ lib-y += cmpxchg16b_emu.o
+ lib-y += bhi.o
endif
diff --git a/arch/x86/lib/bhi.S b/arch/x86/lib/bhi.S
new file mode 100644
index 000000000000..58891681261b
--- /dev/null
+++ b/arch/x86/lib/bhi.S
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/linkage.h>
+#include <asm/unwind_hints.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * Notably, the FineIBT preamble calling these will have ZF set and r10 zero.
+ *
+ * The very last element is in fact larger than 32 bytes, but since its the
+ * last element, this does not matter,
+ *
+ * There are 2 #UD sites, located between 0,1-2,3 and 4,5-6,7 such that they
+ * can be reached using Jcc.d8, these elements (1 and 5) have sufficiently
+ * big alignment holes for this to not stagger the array.
+ */
+
+.pushsection .noinstr.text, "ax"
+
+ .align 32
+SYM_CODE_START(__bhi_args)
+
+#ifdef CONFIG_FINEIBT_BHI
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_0, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_1, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 8
+ ANNOTATE_REACHABLE
+.Lud_1: ud2
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_2, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_3, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_1
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_4, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_5, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 8
+ ANNOTATE_REACHABLE
+.Lud_2: ud2
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_6, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ cmovne %r10, %r9
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_7, SYM_L_LOCAL)
+ ANNOTATE_NOENDBR
+ UNWIND_HINT_FUNC
+ jne .Lud_2
+ cmovne %r10, %rdi
+ cmovne %r10, %rsi
+ cmovne %r10, %rdx
+ cmovne %r10, %rcx
+ cmovne %r10, %r8
+ cmovne %r10, %r9
+ cmovne %r10, %rsp
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+
+#endif /* CONFIG_FINEIBT_BHI */
+
+ .align 32
+SYM_INNER_LABEL(__bhi_args_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
+ nop /* Work around toolchain+objtool quirk */
+SYM_CODE_END(__bhi_args)
+
+.popsection
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 2760a15fbc00..a508e4a8c66a 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <linux/objtool.h>
#include <asm/asm.h>
/*
@@ -14,7 +16,7 @@
* Zero a page.
* %rdi - page
*/
-SYM_FUNC_START(clear_page_rep)
+SYM_TYPED_FUNC_START(clear_page_rep)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
@@ -22,7 +24,7 @@ SYM_FUNC_START(clear_page_rep)
SYM_FUNC_END(clear_page_rep)
EXPORT_SYMBOL_GPL(clear_page_rep)
-SYM_FUNC_START(clear_page_orig)
+SYM_TYPED_FUNC_START(clear_page_orig)
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -44,7 +46,7 @@ SYM_FUNC_START(clear_page_orig)
SYM_FUNC_END(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
-SYM_FUNC_START(clear_page_erms)
+SYM_TYPED_FUNC_START(clear_page_erms)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
@@ -63,6 +65,7 @@ EXPORT_SYMBOL_GPL(clear_page_erms)
* rcx: uncleared bytes or 0 if successful.
*/
SYM_FUNC_START(rep_stos_alternative)
+ ANNOTATE_NOENDBR
cmpq $64,%rcx
jae .Lunrolled
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index 1c96be769adc..d4bb24347ff8 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,7 +7,7 @@
.text
-#ifndef CONFIG_X86_CMPXCHG64
+#ifndef CONFIG_X86_CX8
/*
* Emulate 'cmpxchg8b (%esi)' on UP
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index d6ae793d08fa..d8e87fedc20d 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -3,6 +3,7 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
@@ -13,7 +14,7 @@
* prefetch distance based on SMP/UP.
*/
ALIGN
-SYM_FUNC_START(copy_page)
+SYM_TYPED_FUNC_START(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index fc9fb5d06174..aa8c341b2441 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -8,6 +8,8 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <linux/objtool.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
#include <asm/asm.h>
@@ -30,6 +32,7 @@
* it simpler for us, we can clobber rsi/rdi and rax freely.
*/
SYM_FUNC_START(rep_movs_alternative)
+ ANNOTATE_NOENDBR
cmpq $64,%rcx
jae .Llarge
diff --git a/arch/x86/lib/copy_user_uncached_64.S b/arch/x86/lib/copy_user_uncached_64.S
index 2918e36eece2..18350b343c2a 100644
--- a/arch/x86/lib/copy_user_uncached_64.S
+++ b/arch/x86/lib/copy_user_uncached_64.S
@@ -5,6 +5,7 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/asm.h>
/*
@@ -27,6 +28,7 @@
* rax uncopied bytes or 0 if successful.
*/
SYM_FUNC_START(__copy_user_nocache)
+ ANNOTATE_NOENDBR
/* If destination is not 7-byte aligned, we'll have to align it */
testb $7,%dil
jne .Lalign
diff --git a/arch/x86/lib/crc-pclmul-consts.h b/arch/x86/lib/crc-pclmul-consts.h
new file mode 100644
index 000000000000..fcc63c064333
--- /dev/null
+++ b/arch/x86/lib/crc-pclmul-consts.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CRC constants generated by:
+ *
+ * ./scripts/gen-crc-consts.py x86_pclmul crc16_msb_0x8bb7,crc32_lsb_0xedb88320,crc64_msb_0x42f0e1eba9ea3693,crc64_lsb_0x9a6c9329ac4bc9b5
+ *
+ * Do not edit manually.
+ */
+
+/*
+ * CRC folding constants generated for most-significant-bit-first CRC-16 using
+ * G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ */
+static const struct {
+ u8 bswap_mask[16];
+ u64 fold_across_2048_bits_consts[2];
+ u64 fold_across_1024_bits_consts[2];
+ u64 fold_across_512_bits_consts[2];
+ u64 fold_across_256_bits_consts[2];
+ u64 fold_across_128_bits_consts[2];
+ u8 shuf_table[48];
+ u64 barrett_reduction_consts[2];
+} crc16_msb_0x8bb7_consts ____cacheline_aligned __maybe_unused = {
+ .bswap_mask = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ .fold_across_2048_bits_consts = {
+ 0xdccf000000000000, /* LO64_TERMS: (x^2000 mod G) * x^48 */
+ 0x4b0b000000000000, /* HI64_TERMS: (x^2064 mod G) * x^48 */
+ },
+ .fold_across_1024_bits_consts = {
+ 0x9d9d000000000000, /* LO64_TERMS: (x^976 mod G) * x^48 */
+ 0x7cf5000000000000, /* HI64_TERMS: (x^1040 mod G) * x^48 */
+ },
+ .fold_across_512_bits_consts = {
+ 0x044c000000000000, /* LO64_TERMS: (x^464 mod G) * x^48 */
+ 0xe658000000000000, /* HI64_TERMS: (x^528 mod G) * x^48 */
+ },
+ .fold_across_256_bits_consts = {
+ 0x6ee3000000000000, /* LO64_TERMS: (x^208 mod G) * x^48 */
+ 0xe7b5000000000000, /* HI64_TERMS: (x^272 mod G) * x^48 */
+ },
+ .fold_across_128_bits_consts = {
+ 0x2d56000000000000, /* LO64_TERMS: (x^80 mod G) * x^48 */
+ 0x06df000000000000, /* HI64_TERMS: (x^144 mod G) * x^48 */
+ },
+ .shuf_table = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ },
+ .barrett_reduction_consts = {
+ 0x8bb7000000000000, /* LO64_TERMS: (G - x^16) * x^48 */
+ 0xf65a57f81d33a48a, /* HI64_TERMS: (floor(x^79 / G) * x) - x^64 */
+ },
+};
+
+/*
+ * CRC folding constants generated for least-significant-bit-first CRC-32 using
+ * G(x) = x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 +
+ * x^5 + x^4 + x^2 + x^1 + x^0
+ */
+static const struct {
+ u64 fold_across_2048_bits_consts[2];
+ u64 fold_across_1024_bits_consts[2];
+ u64 fold_across_512_bits_consts[2];
+ u64 fold_across_256_bits_consts[2];
+ u64 fold_across_128_bits_consts[2];
+ u8 shuf_table[48];
+ u64 barrett_reduction_consts[2];
+} crc32_lsb_0xedb88320_consts ____cacheline_aligned __maybe_unused = {
+ .fold_across_2048_bits_consts = {
+ 0x00000000ce3371cb, /* HI64_TERMS: (x^2079 mod G) * x^32 */
+ 0x00000000e95c1271, /* LO64_TERMS: (x^2015 mod G) * x^32 */
+ },
+ .fold_across_1024_bits_consts = {
+ 0x0000000033fff533, /* HI64_TERMS: (x^1055 mod G) * x^32 */
+ 0x00000000910eeec1, /* LO64_TERMS: (x^991 mod G) * x^32 */
+ },
+ .fold_across_512_bits_consts = {
+ 0x000000008f352d95, /* HI64_TERMS: (x^543 mod G) * x^32 */
+ 0x000000001d9513d7, /* LO64_TERMS: (x^479 mod G) * x^32 */
+ },
+ .fold_across_256_bits_consts = {
+ 0x00000000f1da05aa, /* HI64_TERMS: (x^287 mod G) * x^32 */
+ 0x0000000081256527, /* LO64_TERMS: (x^223 mod G) * x^32 */
+ },
+ .fold_across_128_bits_consts = {
+ 0x00000000ae689191, /* HI64_TERMS: (x^159 mod G) * x^32 */
+ 0x00000000ccaa009e, /* LO64_TERMS: (x^95 mod G) * x^32 */
+ },
+ .shuf_table = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ },
+ .barrett_reduction_consts = {
+ 0xb4e5b025f7011641, /* HI64_TERMS: floor(x^95 / G) */
+ 0x00000001db710640, /* LO64_TERMS: (G - x^32) * x^31 */
+ },
+};
+
+/*
+ * CRC folding constants generated for most-significant-bit-first CRC-64 using
+ * G(x) = x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 +
+ * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 +
+ * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 +
+ * x^7 + x^4 + x^1 + x^0
+ */
+static const struct {
+ u8 bswap_mask[16];
+ u64 fold_across_2048_bits_consts[2];
+ u64 fold_across_1024_bits_consts[2];
+ u64 fold_across_512_bits_consts[2];
+ u64 fold_across_256_bits_consts[2];
+ u64 fold_across_128_bits_consts[2];
+ u8 shuf_table[48];
+ u64 barrett_reduction_consts[2];
+} crc64_msb_0x42f0e1eba9ea3693_consts ____cacheline_aligned __maybe_unused = {
+ .bswap_mask = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
+ .fold_across_2048_bits_consts = {
+ 0x7f52691a60ddc70d, /* LO64_TERMS: (x^2048 mod G) * x^0 */
+ 0x7036b0389f6a0c82, /* HI64_TERMS: (x^2112 mod G) * x^0 */
+ },
+ .fold_across_1024_bits_consts = {
+ 0x05cf79dea9ac37d6, /* LO64_TERMS: (x^1024 mod G) * x^0 */
+ 0x001067e571d7d5c2, /* HI64_TERMS: (x^1088 mod G) * x^0 */
+ },
+ .fold_across_512_bits_consts = {
+ 0x5f6843ca540df020, /* LO64_TERMS: (x^512 mod G) * x^0 */
+ 0xddf4b6981205b83f, /* HI64_TERMS: (x^576 mod G) * x^0 */
+ },
+ .fold_across_256_bits_consts = {
+ 0x571bee0a227ef92b, /* LO64_TERMS: (x^256 mod G) * x^0 */
+ 0x44bef2a201b5200c, /* HI64_TERMS: (x^320 mod G) * x^0 */
+ },
+ .fold_across_128_bits_consts = {
+ 0x05f5c3c7eb52fab6, /* LO64_TERMS: (x^128 mod G) * x^0 */
+ 0x4eb938a7d257740e, /* HI64_TERMS: (x^192 mod G) * x^0 */
+ },
+ .shuf_table = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ },
+ .barrett_reduction_consts = {
+ 0x42f0e1eba9ea3693, /* LO64_TERMS: (G - x^64) * x^0 */
+ 0x578d29d06cc4f872, /* HI64_TERMS: (floor(x^127 / G) * x) - x^64 */
+ },
+};
+
+/*
+ * CRC folding constants generated for least-significant-bit-first CRC-64 using
+ * G(x) = x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 +
+ * x^47 + x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 +
+ * x^26 + x^23 + x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 +
+ * x^4 + x^3 + x^0
+ */
+static const struct {
+ u64 fold_across_2048_bits_consts[2];
+ u64 fold_across_1024_bits_consts[2];
+ u64 fold_across_512_bits_consts[2];
+ u64 fold_across_256_bits_consts[2];
+ u64 fold_across_128_bits_consts[2];
+ u8 shuf_table[48];
+ u64 barrett_reduction_consts[2];
+} crc64_lsb_0x9a6c9329ac4bc9b5_consts ____cacheline_aligned __maybe_unused = {
+ .fold_across_2048_bits_consts = {
+ 0x37ccd3e14069cabc, /* HI64_TERMS: (x^2111 mod G) * x^0 */
+ 0xa043808c0f782663, /* LO64_TERMS: (x^2047 mod G) * x^0 */
+ },
+ .fold_across_1024_bits_consts = {
+ 0xa1ca681e733f9c40, /* HI64_TERMS: (x^1087 mod G) * x^0 */
+ 0x5f852fb61e8d92dc, /* LO64_TERMS: (x^1023 mod G) * x^0 */
+ },
+ .fold_across_512_bits_consts = {
+ 0x0c32cdb31e18a84a, /* HI64_TERMS: (x^575 mod G) * x^0 */
+ 0x62242240ace5045a, /* LO64_TERMS: (x^511 mod G) * x^0 */
+ },
+ .fold_across_256_bits_consts = {
+ 0xb0bc2e589204f500, /* HI64_TERMS: (x^319 mod G) * x^0 */
+ 0xe1e0bb9d45d7a44c, /* LO64_TERMS: (x^255 mod G) * x^0 */
+ },
+ .fold_across_128_bits_consts = {
+ 0xeadc41fd2ba3d420, /* HI64_TERMS: (x^191 mod G) * x^0 */
+ 0x21e9761e252621ac, /* LO64_TERMS: (x^127 mod G) * x^0 */
+ },
+ .shuf_table = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ },
+ .barrett_reduction_consts = {
+ 0x27ecfa329aef9f77, /* HI64_TERMS: floor(x^127 / G) */
+ 0x34d926535897936a, /* LO64_TERMS: (G - x^64 - x^0) / x */
+ },
+};
diff --git a/arch/x86/lib/crc-pclmul-template.S b/arch/x86/lib/crc-pclmul-template.S
new file mode 100644
index 000000000000..ae0b6144c503
--- /dev/null
+++ b/arch/x86/lib/crc-pclmul-template.S
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+//
+// Template to generate [V]PCLMULQDQ-based CRC functions for x86
+//
+// Copyright 2025 Google LLC
+//
+// Author: Eric Biggers <ebiggers@google.com>
+
+#include <linux/linkage.h>
+#include <linux/objtool.h>
+
+// Offsets within the generated constants table
+.set OFFSETOF_BSWAP_MASK, -5*16 // msb-first CRCs only
+.set OFFSETOF_FOLD_ACROSS_2048_BITS_CONSTS, -4*16 // must precede next
+.set OFFSETOF_FOLD_ACROSS_1024_BITS_CONSTS, -3*16 // must precede next
+.set OFFSETOF_FOLD_ACROSS_512_BITS_CONSTS, -2*16 // must precede next
+.set OFFSETOF_FOLD_ACROSS_256_BITS_CONSTS, -1*16 // must precede next
+.set OFFSETOF_FOLD_ACROSS_128_BITS_CONSTS, 0*16 // must be 0
+.set OFFSETOF_SHUF_TABLE, 1*16
+.set OFFSETOF_BARRETT_REDUCTION_CONSTS, 4*16
+
+// Emit a VEX (or EVEX) coded instruction if allowed, or emulate it using the
+// corresponding non-VEX instruction plus any needed moves. The supported
+// instruction formats are:
+//
+// - Two-arg [src, dst], where the non-VEX format is the same.
+// - Three-arg [src1, src2, dst] where the non-VEX format is
+// [src1, src2_and_dst]. If src2 != dst, then src1 must != dst too.
+//
+// \insn gives the instruction without a "v" prefix and including any immediate
+// argument if needed to make the instruction follow one of the above formats.
+// If \unaligned_mem_tmp is given, then the emitted non-VEX code moves \arg1 to
+// it first; this is needed when \arg1 is an unaligned mem operand.
+.macro _cond_vex insn:req, arg1:req, arg2:req, arg3, unaligned_mem_tmp
+.if AVX_LEVEL == 0
+ // VEX not allowed. Emulate it.
+ .ifnb \arg3 // Three-arg [src1, src2, dst]
+ .ifc "\arg2", "\arg3" // src2 == dst?
+ .ifnb \unaligned_mem_tmp
+ movdqu \arg1, \unaligned_mem_tmp
+ \insn \unaligned_mem_tmp, \arg3
+ .else
+ \insn \arg1, \arg3
+ .endif
+ .else // src2 != dst
+ .ifc "\arg1", "\arg3"
+ .error "Can't have src1 == dst when src2 != dst"
+ .endif
+ .ifnb \unaligned_mem_tmp
+ movdqu \arg1, \unaligned_mem_tmp
+ movdqa \arg2, \arg3
+ \insn \unaligned_mem_tmp, \arg3
+ .else
+ movdqa \arg2, \arg3
+ \insn \arg1, \arg3
+ .endif
+ .endif
+ .else // Two-arg [src, dst]
+ .ifnb \unaligned_mem_tmp
+ movdqu \arg1, \unaligned_mem_tmp
+ \insn \unaligned_mem_tmp, \arg2
+ .else
+ \insn \arg1, \arg2
+ .endif
+ .endif
+.else
+ // VEX is allowed. Emit the desired instruction directly.
+ .ifnb \arg3
+ v\insn \arg1, \arg2, \arg3
+ .else
+ v\insn \arg1, \arg2
+ .endif
+.endif
+.endm
+
+// Broadcast an aligned 128-bit mem operand to all 128-bit lanes of a vector
+// register of length VL.
+.macro _vbroadcast src, dst
+.if VL == 16
+ _cond_vex movdqa, \src, \dst
+.elseif VL == 32
+ vbroadcasti128 \src, \dst
+.else
+ vbroadcasti32x4 \src, \dst
+.endif
+.endm
+
+// Load \vl bytes from the unaligned mem operand \src into \dst, and if the CRC
+// is msb-first use \bswap_mask to reflect the bytes within each 128-bit lane.
+.macro _load_data vl, src, bswap_mask, dst
+.if \vl < 64
+ _cond_vex movdqu, "\src", \dst
+.else
+ vmovdqu8 \src, \dst
+.endif
+.if !LSB_CRC
+ _cond_vex pshufb, \bswap_mask, \dst, \dst
+.endif
+.endm
+
+.macro _prepare_v0 vl, v0, v1, bswap_mask
+.if LSB_CRC
+ .if \vl < 64
+ _cond_vex pxor, (BUF), \v0, \v0, unaligned_mem_tmp=\v1
+ .else
+ vpxorq (BUF), \v0, \v0
+ .endif
+.else
+ _load_data \vl, (BUF), \bswap_mask, \v1
+ .if \vl < 64
+ _cond_vex pxor, \v1, \v0, \v0
+ .else
+ vpxorq \v1, \v0, \v0
+ .endif
+.endif
+.endm
+
+// The x^0..x^63 terms, i.e. poly128 mod x^64, i.e. the physically low qword for
+// msb-first order or the physically high qword for lsb-first order
+#define LO64_TERMS 0
+
+// The x^64..x^127 terms, i.e. floor(poly128 / x^64), i.e. the physically high
+// qword for msb-first order or the physically low qword for lsb-first order
+#define HI64_TERMS 1
+
+// Multiply the given \src1_terms of each 128-bit lane of \src1 by the given
+// \src2_terms of each 128-bit lane of \src2, and write the result(s) to \dst.
+.macro _pclmulqdq src1, src1_terms, src2, src2_terms, dst
+ _cond_vex "pclmulqdq $((\src1_terms ^ LSB_CRC) << 4) ^ (\src2_terms ^ LSB_CRC),", \
+ \src1, \src2, \dst
+.endm
+
+// Fold \acc into \data and store the result back into \acc. \data can be an
+// unaligned mem operand if using VEX is allowed and the CRC is lsb-first so no
+// byte-reflection is needed; otherwise it must be a vector register. \consts
+// is a vector register containing the needed fold constants, and \tmp is a
+// temporary vector register. All arguments must be the same length.
+.macro _fold_vec acc, data, consts, tmp
+ _pclmulqdq \consts, HI64_TERMS, \acc, HI64_TERMS, \tmp
+ _pclmulqdq \consts, LO64_TERMS, \acc, LO64_TERMS, \acc
+.if AVX_LEVEL <= 2
+ _cond_vex pxor, \data, \tmp, \tmp
+ _cond_vex pxor, \tmp, \acc, \acc
+.else
+ vpternlogq $0x96, \data, \tmp, \acc
+.endif
+.endm
+
+// Fold \acc into \data and store the result back into \acc. \data is an
+// unaligned mem operand, \consts is a vector register containing the needed
+// fold constants, \bswap_mask is a vector register containing the
+// byte-reflection table if the CRC is msb-first, and \tmp1 and \tmp2 are
+// temporary vector registers. All arguments must have length \vl.
+.macro _fold_vec_mem vl, acc, data, consts, bswap_mask, tmp1, tmp2
+.if AVX_LEVEL == 0 || !LSB_CRC
+ _load_data \vl, \data, \bswap_mask, \tmp1
+ _fold_vec \acc, \tmp1, \consts, \tmp2
+.else
+ _fold_vec \acc, \data, \consts, \tmp1
+.endif
+.endm
+
+// Load the constants for folding across 2**i vectors of length VL at a time
+// into all 128-bit lanes of the vector register CONSTS.
+.macro _load_vec_folding_consts i
+ _vbroadcast OFFSETOF_FOLD_ACROSS_128_BITS_CONSTS+(4-LOG2_VL-\i)*16(CONSTS_PTR), \
+ CONSTS
+.endm
+
+// Given vector registers \v0 and \v1 of length \vl, fold \v0 into \v1 and store
+// the result back into \v0. If the remaining length mod \vl is nonzero, also
+// fold \vl data bytes from BUF. For both operations the fold distance is \vl.
+// \consts must be a register of length \vl containing the fold constants.
+.macro _fold_vec_final vl, v0, v1, consts, bswap_mask, tmp1, tmp2
+ _fold_vec \v0, \v1, \consts, \tmp1
+ test $\vl, LEN8
+ jz .Lfold_vec_final_done\@
+ _fold_vec_mem \vl, \v0, (BUF), \consts, \bswap_mask, \tmp1, \tmp2
+ add $\vl, BUF
+.Lfold_vec_final_done\@:
+.endm
+
+// This macro generates the body of a CRC function with the following prototype:
+//
+// crc_t crc_func(crc_t crc, const u8 *buf, size_t len, const void *consts);
+//
+// |crc| is the initial CRC, and crc_t is a data type wide enough to hold it.
+// |buf| is the data to checksum. |len| is the data length in bytes, which must
+// be at least 16. |consts| is a pointer to the fold_across_128_bits_consts
+// field of the constants struct that was generated for the chosen CRC variant.
+//
+// Moving onto the macro parameters, \n is the number of bits in the CRC, e.g.
+// 32 for a CRC-32. Currently the supported values are 8, 16, 32, and 64. If
+// the file is compiled in i386 mode, then the maximum supported value is 32.
+//
+// \lsb_crc is 1 if the CRC processes the least significant bit of each byte
+// first, i.e. maps bit0 to x^7, bit1 to x^6, ..., bit7 to x^0. \lsb_crc is 0
+// if the CRC processes the most significant bit of each byte first, i.e. maps
+// bit0 to x^0, bit1 to x^1, bit7 to x^7.
+//
+// \vl is the maximum length of vector register to use in bytes: 16, 32, or 64.
+//
+// \avx_level is the level of AVX support to use: 0 for SSE only, 2 for AVX2, or
+// 512 for AVX512.
+//
+// If \vl == 16 && \avx_level == 0, the generated code requires:
+// PCLMULQDQ && SSE4.1. (Note: all known CPUs with PCLMULQDQ also have SSE4.1.)
+//
+// If \vl == 32 && \avx_level == 2, the generated code requires:
+// VPCLMULQDQ && AVX2.
+//
+// If \vl == 64 && \avx_level == 512, the generated code requires:
+// VPCLMULQDQ && AVX512BW && AVX512VL.
+//
+// Other \vl and \avx_level combinations are either not supported or not useful.
+.macro _crc_pclmul n, lsb_crc, vl, avx_level
+ .set LSB_CRC, \lsb_crc
+ .set VL, \vl
+ .set AVX_LEVEL, \avx_level
+
+ // Define aliases for the xmm, ymm, or zmm registers according to VL.
+.irp i, 0,1,2,3,4,5,6,7
+ .if VL == 16
+ .set V\i, %xmm\i
+ .set LOG2_VL, 4
+ .elseif VL == 32
+ .set V\i, %ymm\i
+ .set LOG2_VL, 5
+ .elseif VL == 64
+ .set V\i, %zmm\i
+ .set LOG2_VL, 6
+ .else
+ .error "Unsupported vector length"
+ .endif
+.endr
+ // Define aliases for the function parameters.
+ // Note: when crc_t is shorter than u32, zero-extension to 32 bits is
+ // guaranteed by the ABI. Zero-extension to 64 bits is *not* guaranteed
+ // when crc_t is shorter than u64.
+#ifdef __x86_64__
+.if \n <= 32
+ .set CRC, %edi
+.else
+ .set CRC, %rdi
+.endif
+ .set BUF, %rsi
+ .set LEN, %rdx
+ .set LEN32, %edx
+ .set LEN8, %dl
+ .set CONSTS_PTR, %rcx
+#else
+ // 32-bit support, assuming -mregparm=3 and not including support for
+ // CRC-64 (which would use both eax and edx to pass the crc parameter).
+ .set CRC, %eax
+ .set BUF, %edx
+ .set LEN, %ecx
+ .set LEN32, %ecx
+ .set LEN8, %cl
+ .set CONSTS_PTR, %ebx // Passed on stack
+#endif
+
+ // Define aliases for some local variables. V0-V5 are used without
+ // aliases (for accumulators, data, temporary values, etc). Staying
+ // within the first 8 vector registers keeps the code 32-bit SSE
+ // compatible and reduces the size of 64-bit SSE code slightly.
+ .set BSWAP_MASK, V6
+ .set BSWAP_MASK_YMM, %ymm6
+ .set BSWAP_MASK_XMM, %xmm6
+ .set CONSTS, V7
+ .set CONSTS_YMM, %ymm7
+ .set CONSTS_XMM, %xmm7
+
+ // Use ANNOTATE_NOENDBR to suppress an objtool warning, since the
+ // functions generated by this macro are called only by static_call.
+ ANNOTATE_NOENDBR
+
+#ifdef __i386__
+ push CONSTS_PTR
+ mov 8(%esp), CONSTS_PTR
+#endif
+
+ // Create a 128-bit vector that contains the initial CRC in the end
+ // representing the high-order polynomial coefficients, and the rest 0.
+ // If the CRC is msb-first, also load the byte-reflection table.
+.if \n <= 32
+ _cond_vex movd, CRC, %xmm0
+.else
+ _cond_vex movq, CRC, %xmm0
+.endif
+.if !LSB_CRC
+ _cond_vex pslldq, $(128-\n)/8, %xmm0, %xmm0
+ _vbroadcast OFFSETOF_BSWAP_MASK(CONSTS_PTR), BSWAP_MASK
+.endif
+
+ // Load the first vector of data and XOR the initial CRC into the
+ // appropriate end of the first 128-bit lane of data. If LEN < VL, then
+ // use a short vector and jump ahead to the final reduction. (LEN >= 16
+ // is guaranteed here but not necessarily LEN >= VL.)
+.if VL >= 32
+ cmp $VL, LEN
+ jae .Lat_least_1vec\@
+ .if VL == 64
+ cmp $32, LEN32
+ jb .Lless_than_32bytes\@
+ _prepare_v0 32, %ymm0, %ymm1, BSWAP_MASK_YMM
+ add $32, BUF
+ jmp .Lreduce_256bits_to_128bits\@
+.Lless_than_32bytes\@:
+ .endif
+ _prepare_v0 16, %xmm0, %xmm1, BSWAP_MASK_XMM
+ add $16, BUF
+ vmovdqa OFFSETOF_FOLD_ACROSS_128_BITS_CONSTS(CONSTS_PTR), CONSTS_XMM
+ jmp .Lcheck_for_partial_block\@
+.Lat_least_1vec\@:
+.endif
+ _prepare_v0 VL, V0, V1, BSWAP_MASK
+
+ // Handle VL <= LEN < 4*VL.
+ cmp $4*VL-1, LEN
+ ja .Lat_least_4vecs\@
+ add $VL, BUF
+ // If VL <= LEN < 2*VL, then jump ahead to the reduction from 1 vector.
+ // If VL==16 then load fold_across_128_bits_consts first, as the final
+ // reduction depends on it and it won't be loaded anywhere else.
+ cmp $2*VL-1, LEN32
+.if VL == 16
+ _cond_vex movdqa, OFFSETOF_FOLD_ACROSS_128_BITS_CONSTS(CONSTS_PTR), CONSTS_XMM
+.endif
+ jbe .Lreduce_1vec_to_128bits\@
+ // Otherwise 2*VL <= LEN < 4*VL. Load one more vector and jump ahead to
+ // the reduction from 2 vectors.
+ _load_data VL, (BUF), BSWAP_MASK, V1
+ add $VL, BUF
+ jmp .Lreduce_2vecs_to_1\@
+
+.Lat_least_4vecs\@:
+ // Load 3 more vectors of data.
+ _load_data VL, 1*VL(BUF), BSWAP_MASK, V1
+ _load_data VL, 2*VL(BUF), BSWAP_MASK, V2
+ _load_data VL, 3*VL(BUF), BSWAP_MASK, V3
+ sub $-4*VL, BUF // Shorter than 'add 4*VL' when VL=32
+ add $-4*VL, LEN // Shorter than 'sub 4*VL' when VL=32
+
+ // Main loop: while LEN >= 4*VL, fold the 4 vectors V0-V3 into the next
+ // 4 vectors of data and write the result back to V0-V3.
+ cmp $4*VL-1, LEN // Shorter than 'cmp 4*VL' when VL=32
+ jbe .Lreduce_4vecs_to_2\@
+ _load_vec_folding_consts 2
+.Lfold_4vecs_loop\@:
+ _fold_vec_mem VL, V0, 0*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ _fold_vec_mem VL, V1, 1*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ _fold_vec_mem VL, V2, 2*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ _fold_vec_mem VL, V3, 3*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ sub $-4*VL, BUF
+ add $-4*VL, LEN
+ cmp $4*VL-1, LEN
+ ja .Lfold_4vecs_loop\@
+
+ // Fold V0,V1 into V2,V3 and write the result back to V0,V1. Then fold
+ // two more vectors of data from BUF, if at least that much remains.
+.Lreduce_4vecs_to_2\@:
+ _load_vec_folding_consts 1
+ _fold_vec V0, V2, CONSTS, V4
+ _fold_vec V1, V3, CONSTS, V4
+ test $2*VL, LEN8
+ jz .Lreduce_2vecs_to_1\@
+ _fold_vec_mem VL, V0, 0*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ _fold_vec_mem VL, V1, 1*VL(BUF), CONSTS, BSWAP_MASK, V4, V5
+ sub $-2*VL, BUF
+
+ // Fold V0 into V1 and write the result back to V0. Then fold one more
+ // vector of data from BUF, if at least that much remains.
+.Lreduce_2vecs_to_1\@:
+ _load_vec_folding_consts 0
+ _fold_vec_final VL, V0, V1, CONSTS, BSWAP_MASK, V4, V5
+
+.Lreduce_1vec_to_128bits\@:
+.if VL == 64
+ // Reduce 512-bit %zmm0 to 256-bit %ymm0. Then fold 256 more bits of
+ // data from BUF, if at least that much remains.
+ vbroadcasti128 OFFSETOF_FOLD_ACROSS_256_BITS_CONSTS(CONSTS_PTR), CONSTS_YMM
+ vextracti64x4 $1, %zmm0, %ymm1
+ _fold_vec_final 32, %ymm0, %ymm1, CONSTS_YMM, BSWAP_MASK_YMM, %ymm4, %ymm5
+.Lreduce_256bits_to_128bits\@:
+.endif
+.if VL >= 32
+ // Reduce 256-bit %ymm0 to 128-bit %xmm0. Then fold 128 more bits of
+ // data from BUF, if at least that much remains.
+ vmovdqa OFFSETOF_FOLD_ACROSS_128_BITS_CONSTS(CONSTS_PTR), CONSTS_XMM
+ vextracti128 $1, %ymm0, %xmm1
+ _fold_vec_final 16, %xmm0, %xmm1, CONSTS_XMM, BSWAP_MASK_XMM, %xmm4, %xmm5
+.Lcheck_for_partial_block\@:
+.endif
+ and $15, LEN32
+ jz .Lreduce_128bits_to_crc\@
+
+ // 1 <= LEN <= 15 data bytes remain in BUF. The polynomial is now
+ // A*(x^(8*LEN)) + B, where A is the 128-bit polynomial stored in %xmm0
+ // and B is the polynomial of the remaining LEN data bytes. To reduce
+ // this to 128 bits without needing fold constants for each possible
+ // LEN, rearrange this expression into C1*(x^128) + C2, where
+ // C1 = floor(A / x^(128 - 8*LEN)) and C2 = A*x^(8*LEN) + B mod x^128.
+ // Then fold C1 into C2, which is just another fold across 128 bits.
+
+.if !LSB_CRC || AVX_LEVEL == 0
+ // Load the last 16 data bytes. Note that originally LEN was >= 16.
+ _load_data 16, "-16(BUF,LEN)", BSWAP_MASK_XMM, %xmm2
+.endif // Else will use vpblendvb mem operand later.
+.if !LSB_CRC
+ neg LEN // Needed for indexing shuf_table
+.endif
+
+ // tmp = A*x^(8*LEN) mod x^128
+ // lsb: pshufb by [LEN, LEN+1, ..., 15, -1, -1, ..., -1]
+ // i.e. right-shift by LEN bytes.
+ // msb: pshufb by [-1, -1, ..., -1, 0, 1, ..., 15-LEN]
+ // i.e. left-shift by LEN bytes.
+ _cond_vex movdqu, "OFFSETOF_SHUF_TABLE+16(CONSTS_PTR,LEN)", %xmm3
+ _cond_vex pshufb, %xmm3, %xmm0, %xmm1
+
+ // C1 = floor(A / x^(128 - 8*LEN))
+ // lsb: pshufb by [-1, -1, ..., -1, 0, 1, ..., LEN-1]
+ // i.e. left-shift by 16-LEN bytes.
+ // msb: pshufb by [16-LEN, 16-LEN+1, ..., 15, -1, -1, ..., -1]
+ // i.e. right-shift by 16-LEN bytes.
+ _cond_vex pshufb, "OFFSETOF_SHUF_TABLE+32*!LSB_CRC(CONSTS_PTR,LEN)", \
+ %xmm0, %xmm0, unaligned_mem_tmp=%xmm4
+
+ // C2 = tmp + B. This is just a blend of tmp with the last 16 data
+ // bytes (reflected if msb-first). The blend mask is the shuffle table
+ // that was used to create tmp. 0 selects tmp, and 1 last16databytes.
+.if AVX_LEVEL == 0
+ movdqa %xmm0, %xmm4
+ movdqa %xmm3, %xmm0
+ pblendvb %xmm2, %xmm1 // uses %xmm0 as implicit operand
+ movdqa %xmm4, %xmm0
+.elseif LSB_CRC
+ vpblendvb %xmm3, -16(BUF,LEN), %xmm1, %xmm1
+.else
+ vpblendvb %xmm3, %xmm2, %xmm1, %xmm1
+.endif
+
+ // Fold C1 into C2 and store the 128-bit result in %xmm0.
+ _fold_vec %xmm0, %xmm1, CONSTS_XMM, %xmm4
+
+.Lreduce_128bits_to_crc\@:
+ // Compute the CRC as %xmm0 * x^n mod G. Here %xmm0 means the 128-bit
+ // polynomial stored in %xmm0 (using either lsb-first or msb-first bit
+ // order according to LSB_CRC), and G is the CRC's generator polynomial.
+
+ // First, multiply %xmm0 by x^n and reduce the result to 64+n bits:
+ //
+ // t0 := (x^(64+n) mod G) * floor(%xmm0 / x^64) +
+ // x^n * (%xmm0 mod x^64)
+ //
+ // Store t0 * x^(64-n) in %xmm0. I.e., actually do:
+ //
+ // %xmm0 := ((x^(64+n) mod G) * x^(64-n)) * floor(%xmm0 / x^64) +
+ // x^64 * (%xmm0 mod x^64)
+ //
+ // The extra unreduced factor of x^(64-n) makes floor(t0 / x^n) aligned
+ // to the HI64_TERMS of %xmm0 so that the next pclmulqdq can easily
+ // select it. The 64-bit constant (x^(64+n) mod G) * x^(64-n) in the
+ // msb-first case, or (x^(63+n) mod G) * x^(64-n) in the lsb-first case
+ // (considering the extra factor of x that gets implicitly introduced by
+ // each pclmulqdq when using lsb-first order), is identical to the
+ // constant that was used earlier for folding the LO64_TERMS across 128
+ // bits. Thus it's already available in LO64_TERMS of CONSTS_XMM.
+ _pclmulqdq CONSTS_XMM, LO64_TERMS, %xmm0, HI64_TERMS, %xmm1
+.if LSB_CRC
+ _cond_vex psrldq, $8, %xmm0, %xmm0 // x^64 * (%xmm0 mod x^64)
+.else
+ _cond_vex pslldq, $8, %xmm0, %xmm0 // x^64 * (%xmm0 mod x^64)
+.endif
+ _cond_vex pxor, %xmm1, %xmm0, %xmm0
+ // The HI64_TERMS of %xmm0 now contain floor(t0 / x^n).
+ // The LO64_TERMS of %xmm0 now contain (t0 mod x^n) * x^(64-n).
+
+ // First step of Barrett reduction: Compute floor(t0 / G). This is the
+ // polynomial by which G needs to be multiplied to cancel out the x^n
+ // and higher terms of t0, i.e. to reduce t0 mod G. First do:
+ //
+ // t1 := floor(x^(63+n) / G) * x * floor(t0 / x^n)
+ //
+ // Then the desired value floor(t0 / G) is floor(t1 / x^64). The 63 in
+ // x^(63+n) is the maximum degree of floor(t0 / x^n) and thus the lowest
+ // value that makes enough precision be carried through the calculation.
+ //
+ // The '* x' makes it so the result is floor(t1 / x^64) rather than
+ // floor(t1 / x^63), making it qword-aligned in HI64_TERMS so that it
+ // can be extracted much more easily in the next step. In the lsb-first
+ // case the '* x' happens implicitly. In the msb-first case it must be
+ // done explicitly; floor(x^(63+n) / G) * x is a 65-bit constant, so the
+ // constant passed to pclmulqdq is (floor(x^(63+n) / G) * x) - x^64, and
+ // the multiplication by the x^64 term is handled using a pxor. The
+ // pxor causes the low 64 terms of t1 to be wrong, but they are unused.
+ _cond_vex movdqa, OFFSETOF_BARRETT_REDUCTION_CONSTS(CONSTS_PTR), CONSTS_XMM
+ _pclmulqdq CONSTS_XMM, HI64_TERMS, %xmm0, HI64_TERMS, %xmm1
+.if !LSB_CRC
+ _cond_vex pxor, %xmm0, %xmm1, %xmm1 // += x^64 * floor(t0 / x^n)
+.endif
+ // The HI64_TERMS of %xmm1 now contain floor(t1 / x^64) = floor(t0 / G).
+
+ // Second step of Barrett reduction: Cancel out the x^n and higher terms
+ // of t0 by subtracting the needed multiple of G. This gives the CRC:
+ //
+ // crc := t0 - (G * floor(t0 / G))
+ //
+ // But %xmm0 contains t0 * x^(64-n), so it's more convenient to do:
+ //
+ // crc := ((t0 * x^(64-n)) - ((G * x^(64-n)) * floor(t0 / G))) / x^(64-n)
+ //
+ // Furthermore, since the resulting CRC is n-bit, if mod x^n is
+ // explicitly applied to it then the x^n term of G makes no difference
+ // in the result and can be omitted. This helps keep the constant
+ // multiplier in 64 bits in most cases. This gives the following:
+ //
+ // %xmm0 := %xmm0 - (((G - x^n) * x^(64-n)) * floor(t0 / G))
+ // crc := (%xmm0 / x^(64-n)) mod x^n
+ //
+ // In the lsb-first case, each pclmulqdq implicitly introduces
+ // an extra factor of x, so in that case the constant that needs to be
+ // passed to pclmulqdq is actually '(G - x^n) * x^(63-n)' when n <= 63.
+ // For lsb-first CRCs where n=64, the extra factor of x cannot be as
+ // easily avoided. In that case, instead pass '(G - x^n - x^0) / x' to
+ // pclmulqdq and handle the x^0 term (i.e. 1) separately. (All CRC
+ // polynomials have nonzero x^n and x^0 terms.) It works out as: the
+ // CRC has be XORed with the physically low qword of %xmm1, representing
+ // floor(t0 / G). The most efficient way to do that is to move it to
+ // the physically high qword and use a ternlog to combine the two XORs.
+.if LSB_CRC && \n == 64
+ _cond_vex punpcklqdq, %xmm1, %xmm2, %xmm2
+ _pclmulqdq CONSTS_XMM, LO64_TERMS, %xmm1, HI64_TERMS, %xmm1
+ .if AVX_LEVEL <= 2
+ _cond_vex pxor, %xmm2, %xmm0, %xmm0
+ _cond_vex pxor, %xmm1, %xmm0, %xmm0
+ .else
+ vpternlogq $0x96, %xmm2, %xmm1, %xmm0
+ .endif
+ _cond_vex "pextrq $1,", %xmm0, %rax // (%xmm0 / x^0) mod x^64
+.else
+ _pclmulqdq CONSTS_XMM, LO64_TERMS, %xmm1, HI64_TERMS, %xmm1
+ _cond_vex pxor, %xmm1, %xmm0, %xmm0
+ .if \n == 8
+ _cond_vex "pextrb $7 + LSB_CRC,", %xmm0, %eax // (%xmm0 / x^56) mod x^8
+ .elseif \n == 16
+ _cond_vex "pextrw $3 + LSB_CRC,", %xmm0, %eax // (%xmm0 / x^48) mod x^16
+ .elseif \n == 32
+ _cond_vex "pextrd $1 + LSB_CRC,", %xmm0, %eax // (%xmm0 / x^32) mod x^32
+ .else // \n == 64 && !LSB_CRC
+ _cond_vex movq, %xmm0, %rax // (%xmm0 / x^0) mod x^64
+ .endif
+.endif
+
+.if VL > 16
+ vzeroupper // Needed when ymm or zmm registers may have been used.
+.endif
+#ifdef __i386__
+ pop CONSTS_PTR
+#endif
+ RET
+.endm
+
+#ifdef CONFIG_AS_VPCLMULQDQ
+#define DEFINE_CRC_PCLMUL_FUNCS(prefix, bits, lsb) \
+SYM_FUNC_START(prefix##_pclmul_sse); \
+ _crc_pclmul n=bits, lsb_crc=lsb, vl=16, avx_level=0; \
+SYM_FUNC_END(prefix##_pclmul_sse); \
+ \
+SYM_FUNC_START(prefix##_vpclmul_avx2); \
+ _crc_pclmul n=bits, lsb_crc=lsb, vl=32, avx_level=2; \
+SYM_FUNC_END(prefix##_vpclmul_avx2); \
+ \
+SYM_FUNC_START(prefix##_vpclmul_avx512); \
+ _crc_pclmul n=bits, lsb_crc=lsb, vl=64, avx_level=512; \
+SYM_FUNC_END(prefix##_vpclmul_avx512);
+#else
+#define DEFINE_CRC_PCLMUL_FUNCS(prefix, bits, lsb) \
+SYM_FUNC_START(prefix##_pclmul_sse); \
+ _crc_pclmul n=bits, lsb_crc=lsb, vl=16, avx_level=0; \
+SYM_FUNC_END(prefix##_pclmul_sse);
+#endif // !CONFIG_AS_VPCLMULQDQ
diff --git a/arch/x86/lib/crc-pclmul-template.h b/arch/x86/lib/crc-pclmul-template.h
new file mode 100644
index 000000000000..c5b3bfe11d8d
--- /dev/null
+++ b/arch/x86/lib/crc-pclmul-template.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Macros for accessing the [V]PCLMULQDQ-based CRC functions that are
+ * instantiated by crc-pclmul-template.S
+ *
+ * Copyright 2025 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+#ifndef _CRC_PCLMUL_TEMPLATE_H
+#define _CRC_PCLMUL_TEMPLATE_H
+
+#include <asm/cpufeatures.h>
+#include <asm/simd.h>
+#include <crypto/internal/simd.h>
+#include <linux/static_call.h>
+#include "crc-pclmul-consts.h"
+
+#define DECLARE_CRC_PCLMUL_FUNCS(prefix, crc_t) \
+crc_t prefix##_pclmul_sse(crc_t crc, const u8 *p, size_t len, \
+ const void *consts_ptr); \
+crc_t prefix##_vpclmul_avx2(crc_t crc, const u8 *p, size_t len, \
+ const void *consts_ptr); \
+crc_t prefix##_vpclmul_avx512(crc_t crc, const u8 *p, size_t len, \
+ const void *consts_ptr); \
+DEFINE_STATIC_CALL(prefix##_pclmul, prefix##_pclmul_sse)
+
+#define INIT_CRC_PCLMUL(prefix) \
+do { \
+ if (IS_ENABLED(CONFIG_AS_VPCLMULQDQ) && \
+ boot_cpu_has(X86_FEATURE_VPCLMULQDQ) && \
+ boot_cpu_has(X86_FEATURE_AVX2) && \
+ cpu_has_xfeatures(XFEATURE_MASK_YMM, NULL)) { \
+ if (boot_cpu_has(X86_FEATURE_AVX512BW) && \
+ boot_cpu_has(X86_FEATURE_AVX512VL) && \
+ !boot_cpu_has(X86_FEATURE_PREFER_YMM) && \
+ cpu_has_xfeatures(XFEATURE_MASK_AVX512, NULL)) { \
+ static_call_update(prefix##_pclmul, \
+ prefix##_vpclmul_avx512); \
+ } else { \
+ static_call_update(prefix##_pclmul, \
+ prefix##_vpclmul_avx2); \
+ } \
+ } \
+} while (0)
+
+/*
+ * Call a [V]PCLMULQDQ optimized CRC function if the data length is at least 16
+ * bytes, the CPU has PCLMULQDQ support, and the current context may use SIMD.
+ *
+ * 16 bytes is the minimum length supported by the [V]PCLMULQDQ functions.
+ * There is overhead associated with kernel_fpu_begin() and kernel_fpu_end(),
+ * varying by CPU and factors such as which parts of the "FPU" state userspace
+ * has touched, which could result in a larger cutoff being better. Indeed, a
+ * larger cutoff is usually better for a *single* message. However, the
+ * overhead of the FPU section gets amortized if multiple FPU sections get
+ * executed before returning to userspace, since the XSAVE and XRSTOR occur only
+ * once. Considering that and the fact that the [V]PCLMULQDQ code is lighter on
+ * the dcache than the table-based code is, a 16-byte cutoff seems to work well.
+ */
+#define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq) \
+do { \
+ if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) && \
+ crypto_simd_usable()) { \
+ const void *consts_ptr; \
+ \
+ consts_ptr = (consts).fold_across_128_bits_consts; \
+ kernel_fpu_begin(); \
+ crc = static_call(prefix##_pclmul)((crc), (p), (len), \
+ consts_ptr); \
+ kernel_fpu_end(); \
+ return crc; \
+ } \
+} while (0)
+
+#endif /* _CRC_PCLMUL_TEMPLATE_H */
diff --git a/arch/x86/lib/crc-t10dif-glue.c b/arch/x86/lib/crc-t10dif-glue.c
index 13f07ddc9122..f89c335cde3c 100644
--- a/arch/x86/lib/crc-t10dif-glue.c
+++ b/arch/x86/lib/crc-t10dif-glue.c
@@ -1,37 +1,32 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * CRC-T10DIF using PCLMULQDQ instructions
+ * CRC-T10DIF using [V]PCLMULQDQ instructions
*
* Copyright 2024 Google LLC
*/
-#include <asm/cpufeatures.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <linux/crc-t10dif.h>
#include <linux/module.h>
+#include "crc-pclmul-template.h"
static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
-asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
+DECLARE_CRC_PCLMUL_FUNCS(crc16_msb, u16);
u16 crc_t10dif_arch(u16 crc, const u8 *p, size_t len)
{
- if (len >= 16 &&
- static_key_enabled(&have_pclmulqdq) && crypto_simd_usable()) {
- kernel_fpu_begin();
- crc = crc_t10dif_pcl(crc, p, len);
- kernel_fpu_end();
- return crc;
- }
+ CRC_PCLMUL(crc, p, len, crc16_msb, crc16_msb_0x8bb7_consts,
+ have_pclmulqdq);
return crc_t10dif_generic(crc, p, len);
}
EXPORT_SYMBOL(crc_t10dif_arch);
static int __init crc_t10dif_x86_init(void)
{
- if (boot_cpu_has(X86_FEATURE_PCLMULQDQ))
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);
+ INIT_CRC_PCLMUL(crc16_msb);
+ }
return 0;
}
arch_initcall(crc_t10dif_x86_init);
@@ -41,11 +36,5 @@ static void __exit crc_t10dif_x86_exit(void)
}
module_exit(crc_t10dif_x86_exit);
-bool crc_t10dif_is_optimized(void)
-{
- return static_key_enabled(&have_pclmulqdq);
-}
-EXPORT_SYMBOL(crc_t10dif_is_optimized);
-
-MODULE_DESCRIPTION("CRC-T10DIF using PCLMULQDQ instructions");
+MODULE_DESCRIPTION("CRC-T10DIF using [V]PCLMULQDQ instructions");
MODULE_LICENSE("GPL");
diff --git a/arch/x86/lib/crc16-msb-pclmul.S b/arch/x86/lib/crc16-msb-pclmul.S
new file mode 100644
index 000000000000..e9fe248093a8
--- /dev/null
+++ b/arch/x86/lib/crc16-msb-pclmul.S
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+// Copyright 2025 Google LLC
+
+#include "crc-pclmul-template.S"
+
+DEFINE_CRC_PCLMUL_FUNCS(crc16_msb, /* bits= */ 16, /* lsb= */ 0)
diff --git a/arch/x86/lib/crc32-glue.c b/arch/x86/lib/crc32-glue.c
index 2dd18a886ded..e3f93b17ac3f 100644
--- a/arch/x86/lib/crc32-glue.c
+++ b/arch/x86/lib/crc32-glue.c
@@ -7,43 +7,20 @@
* Copyright 2024 Google LLC
*/
-#include <asm/cpufeatures.h>
-#include <asm/simd.h>
-#include <crypto/internal/simd.h>
#include <linux/crc32.h>
-#include <linux/linkage.h>
#include <linux/module.h>
-
-/* minimum size of buffer for crc32_pclmul_le_16 */
-#define CRC32_PCLMUL_MIN_LEN 64
+#include "crc-pclmul-template.h"
static DEFINE_STATIC_KEY_FALSE(have_crc32);
static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
-u32 crc32_pclmul_le_16(u32 crc, const u8 *buffer, size_t len);
+DECLARE_CRC_PCLMUL_FUNCS(crc32_lsb, u32);
u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
{
- if (len >= CRC32_PCLMUL_MIN_LEN + 15 &&
- static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
- size_t n = -(uintptr_t)p & 15;
-
- /* align p to 16-byte boundary */
- if (n) {
- crc = crc32_le_base(crc, p, n);
- p += n;
- len -= n;
- }
- n = round_down(len, 16);
- kernel_fpu_begin();
- crc = crc32_pclmul_le_16(crc, p, n);
- kernel_fpu_end();
- p += n;
- len -= n;
- }
- if (len)
- crc = crc32_le_base(crc, p, len);
- return crc;
+ CRC_PCLMUL(crc, p, len, crc32_lsb, crc32_lsb_0xedb88320_consts,
+ have_pclmulqdq);
+ return crc32_le_base(crc, p, len);
}
EXPORT_SYMBOL(crc32_le_arch);
@@ -61,12 +38,12 @@ EXPORT_SYMBOL(crc32_le_arch);
asmlinkage u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
size_t num_longs;
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
@@ -78,14 +55,22 @@ u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
for (num_longs = len / sizeof(unsigned long);
num_longs != 0; num_longs--, p += sizeof(unsigned long))
- asm(CRC32_INST : "+r" (crc) : "rm" (*(unsigned long *)p));
+ asm(CRC32_INST : "+r" (crc) : ASM_INPUT_RM (*(unsigned long *)p));
- for (len %= sizeof(unsigned long); len; len--, p++)
- asm("crc32b %1, %0" : "+r" (crc) : "rm" (*p));
+ if (sizeof(unsigned long) > 4 && (len & 4)) {
+ asm("crc32l %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u32 *)p));
+ p += 4;
+ }
+ if (len & 2) {
+ asm("crc32w %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u16 *)p));
+ p += 2;
+ }
+ if (len & 1)
+ asm("crc32b %1, %0" : "+r" (crc) : ASM_INPUT_RM (*p));
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
@@ -97,8 +82,10 @@ static int __init crc32_x86_init(void)
{
if (boot_cpu_has(X86_FEATURE_XMM4_2))
static_branch_enable(&have_crc32);
- if (boot_cpu_has(X86_FEATURE_PCLMULQDQ))
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
static_branch_enable(&have_pclmulqdq);
+ INIT_CRC_PCLMUL(crc32_lsb);
+ }
return 0;
}
arch_initcall(crc32_x86_init);
diff --git a/arch/x86/lib/crc32-pclmul.S b/arch/x86/lib/crc32-pclmul.S
index f9637789cac1..f20f40fb0172 100644
--- a/arch/x86/lib/crc32-pclmul.S
+++ b/arch/x86/lib/crc32-pclmul.S
@@ -1,217 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
- * calculation.
- * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
- * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
- * at:
- * http://www.intel.com/products/processor/manuals/
- * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
- * Volume 2B: Instruction Set Reference, N-Z
- *
- * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
- * Alexander Boyko <Alexander_Boyko@xyratex.com>
- */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+// Copyright 2025 Google LLC
-#include <linux/linkage.h>
+#include "crc-pclmul-template.S"
-
-.section .rodata
-.align 16
-/*
- * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
- * #define CONSTANT_R1 0x154442bd4LL
- *
- * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
- * #define CONSTANT_R2 0x1c6e41596LL
- */
-.Lconstant_R2R1:
- .octa 0x00000001c6e415960000000154442bd4
-/*
- * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
- * #define CONSTANT_R3 0x1751997d0LL
- *
- * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
- * #define CONSTANT_R4 0x0ccaa009eLL
- */
-.Lconstant_R4R3:
- .octa 0x00000000ccaa009e00000001751997d0
-/*
- * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
- * #define CONSTANT_R5 0x163cd6124LL
- */
-.Lconstant_R5:
- .octa 0x00000000000000000000000163cd6124
-.Lconstant_mask32:
- .octa 0x000000000000000000000000FFFFFFFF
-/*
- * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
- *
- * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
- * #define CONSTANT_RU 0x1F7011641LL
- */
-.Lconstant_RUpoly:
- .octa 0x00000001F701164100000001DB710641
-
-#define CONSTANT %xmm0
-
-#ifdef __x86_64__
-#define CRC %edi
-#define BUF %rsi
-#define LEN %rdx
-#else
-#define CRC %eax
-#define BUF %edx
-#define LEN %ecx
-#endif
-
-
-
-.text
-/**
- * Calculate crc32
- * CRC - initial crc32
- * BUF - buffer (16 bytes aligned)
- * LEN - sizeof buffer (16 bytes aligned), LEN should be greater than 63
- * return %eax crc32
- * u32 crc32_pclmul_le_16(u32 crc, const u8 *buffer, size_t len);
- */
-
-SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
- movdqa (BUF), %xmm1
- movdqa 0x10(BUF), %xmm2
- movdqa 0x20(BUF), %xmm3
- movdqa 0x30(BUF), %xmm4
- movd CRC, CONSTANT
- pxor CONSTANT, %xmm1
- sub $0x40, LEN
- add $0x40, BUF
- cmp $0x40, LEN
- jb .Lless_64
-
-#ifdef __x86_64__
- movdqa .Lconstant_R2R1(%rip), CONSTANT
-#else
- movdqa .Lconstant_R2R1, CONSTANT
-#endif
-
-.Lloop_64:/* 64 bytes Full cache line folding */
- prefetchnta 0x40(BUF)
- movdqa %xmm1, %xmm5
- movdqa %xmm2, %xmm6
- movdqa %xmm3, %xmm7
-#ifdef __x86_64__
- movdqa %xmm4, %xmm8
-#endif
- pclmulqdq $0x00, CONSTANT, %xmm1
- pclmulqdq $0x00, CONSTANT, %xmm2
- pclmulqdq $0x00, CONSTANT, %xmm3
-#ifdef __x86_64__
- pclmulqdq $0x00, CONSTANT, %xmm4
-#endif
- pclmulqdq $0x11, CONSTANT, %xmm5
- pclmulqdq $0x11, CONSTANT, %xmm6
- pclmulqdq $0x11, CONSTANT, %xmm7
-#ifdef __x86_64__
- pclmulqdq $0x11, CONSTANT, %xmm8
-#endif
- pxor %xmm5, %xmm1
- pxor %xmm6, %xmm2
- pxor %xmm7, %xmm3
-#ifdef __x86_64__
- pxor %xmm8, %xmm4
-#else
- /* xmm8 unsupported for x32 */
- movdqa %xmm4, %xmm5
- pclmulqdq $0x00, CONSTANT, %xmm4
- pclmulqdq $0x11, CONSTANT, %xmm5
- pxor %xmm5, %xmm4
-#endif
-
- pxor (BUF), %xmm1
- pxor 0x10(BUF), %xmm2
- pxor 0x20(BUF), %xmm3
- pxor 0x30(BUF), %xmm4
-
- sub $0x40, LEN
- add $0x40, BUF
- cmp $0x40, LEN
- jge .Lloop_64
-.Lless_64:/* Folding cache line into 128bit */
-#ifdef __x86_64__
- movdqa .Lconstant_R4R3(%rip), CONSTANT
-#else
- movdqa .Lconstant_R4R3, CONSTANT
-#endif
- prefetchnta (BUF)
-
- movdqa %xmm1, %xmm5
- pclmulqdq $0x00, CONSTANT, %xmm1
- pclmulqdq $0x11, CONSTANT, %xmm5
- pxor %xmm5, %xmm1
- pxor %xmm2, %xmm1
-
- movdqa %xmm1, %xmm5
- pclmulqdq $0x00, CONSTANT, %xmm1
- pclmulqdq $0x11, CONSTANT, %xmm5
- pxor %xmm5, %xmm1
- pxor %xmm3, %xmm1
-
- movdqa %xmm1, %xmm5
- pclmulqdq $0x00, CONSTANT, %xmm1
- pclmulqdq $0x11, CONSTANT, %xmm5
- pxor %xmm5, %xmm1
- pxor %xmm4, %xmm1
-
- cmp $0x10, LEN
- jb .Lfold_64
-.Lloop_16:/* Folding rest buffer into 128bit */
- movdqa %xmm1, %xmm5
- pclmulqdq $0x00, CONSTANT, %xmm1
- pclmulqdq $0x11, CONSTANT, %xmm5
- pxor %xmm5, %xmm1
- pxor (BUF), %xmm1
- sub $0x10, LEN
- add $0x10, BUF
- cmp $0x10, LEN
- jge .Lloop_16
-
-.Lfold_64:
- /* perform the last 64 bit fold, also adds 32 zeroes
- * to the input stream */
- pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
- psrldq $0x08, %xmm1
- pxor CONSTANT, %xmm1
-
- /* final 32-bit fold */
- movdqa %xmm1, %xmm2
-#ifdef __x86_64__
- movdqa .Lconstant_R5(%rip), CONSTANT
- movdqa .Lconstant_mask32(%rip), %xmm3
-#else
- movdqa .Lconstant_R5, CONSTANT
- movdqa .Lconstant_mask32, %xmm3
-#endif
- psrldq $0x04, %xmm2
- pand %xmm3, %xmm1
- pclmulqdq $0x00, CONSTANT, %xmm1
- pxor %xmm2, %xmm1
-
- /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
-#ifdef __x86_64__
- movdqa .Lconstant_RUpoly(%rip), CONSTANT
-#else
- movdqa .Lconstant_RUpoly, CONSTANT
-#endif
- movdqa %xmm1, %xmm2
- pand %xmm3, %xmm1
- pclmulqdq $0x10, CONSTANT, %xmm1
- pand %xmm3, %xmm1
- pclmulqdq $0x00, CONSTANT, %xmm1
- pxor %xmm2, %xmm1
- pextrd $0x01, %xmm1, %eax
-
- RET
-SYM_FUNC_END(crc32_pclmul_le_16)
+DEFINE_CRC_PCLMUL_FUNCS(crc32_lsb, /* bits= */ 32, /* lsb= */ 1)
diff --git a/arch/x86/lib/crc64-glue.c b/arch/x86/lib/crc64-glue.c
new file mode 100644
index 000000000000..b0e1b719ecbf
--- /dev/null
+++ b/arch/x86/lib/crc64-glue.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * CRC64 using [V]PCLMULQDQ instructions
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include <linux/crc64.h>
+#include <linux/module.h>
+#include "crc-pclmul-template.h"
+
+static DEFINE_STATIC_KEY_FALSE(have_pclmulqdq);
+
+DECLARE_CRC_PCLMUL_FUNCS(crc64_msb, u64);
+DECLARE_CRC_PCLMUL_FUNCS(crc64_lsb, u64);
+
+u64 crc64_be_arch(u64 crc, const u8 *p, size_t len)
+{
+ CRC_PCLMUL(crc, p, len, crc64_msb, crc64_msb_0x42f0e1eba9ea3693_consts,
+ have_pclmulqdq);
+ return crc64_be_generic(crc, p, len);
+}
+EXPORT_SYMBOL_GPL(crc64_be_arch);
+
+u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len)
+{
+ CRC_PCLMUL(crc, p, len, crc64_lsb, crc64_lsb_0x9a6c9329ac4bc9b5_consts,
+ have_pclmulqdq);
+ return crc64_nvme_generic(crc, p, len);
+}
+EXPORT_SYMBOL_GPL(crc64_nvme_arch);
+
+static int __init crc64_x86_init(void)
+{
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
+ static_branch_enable(&have_pclmulqdq);
+ INIT_CRC_PCLMUL(crc64_msb);
+ INIT_CRC_PCLMUL(crc64_lsb);
+ }
+ return 0;
+}
+arch_initcall(crc64_x86_init);
+
+static void __exit crc64_x86_exit(void)
+{
+}
+module_exit(crc64_x86_exit);
+
+MODULE_DESCRIPTION("CRC64 using [V]PCLMULQDQ instructions");
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/lib/crc64-pclmul.S b/arch/x86/lib/crc64-pclmul.S
new file mode 100644
index 000000000000..4173051b5197
--- /dev/null
+++ b/arch/x86/lib/crc64-pclmul.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+// Copyright 2025 Google LLC
+
+#include "crc-pclmul-template.S"
+
+DEFINE_CRC_PCLMUL_FUNCS(crc64_msb, /* bits= */ 64, /* lsb= */ 0)
+DEFINE_CRC_PCLMUL_FUNCS(crc64_lsb, /* bits= */ 64, /* lsb= */ 1)
diff --git a/arch/x86/lib/crct10dif-pcl-asm_64.S b/arch/x86/lib/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 5286db5b8165..000000000000
--- a/arch/x86/lib/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,332 +0,0 @@
-########################################################################
-# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
-#
-# Copyright (c) 2013, Intel Corporation
-#
-# Authors:
-# Erdinc Ozturk <erdinc.ozturk@intel.com>
-# Vinodh Gopal <vinodh.gopal@intel.com>
-# James Guilford <james.guilford@intel.com>
-# Tim Chen <tim.c.chen@linux.intel.com>
-#
-# This software is available to you under a choice of one of two
-# licenses. You may choose to be licensed under the terms of the GNU
-# General Public License (GPL) Version 2, available from the file
-# COPYING in the main directory of this source tree, or the
-# OpenIB.org BSD license below:
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the
-# distribution.
-#
-# * Neither the name of the Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-#
-# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Reference paper titled "Fast CRC Computation for Generic
-# Polynomials Using PCLMULQDQ Instruction"
-# URL: http://www.intel.com/content/dam/www/public/us/en/documents
-# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
-#
-
-#include <linux/linkage.h>
-
-.text
-
-#define init_crc %edi
-#define buf %rsi
-#define len %rdx
-
-#define FOLD_CONSTS %xmm10
-#define BSWAP_MASK %xmm11
-
-# Fold reg1, reg2 into the next 32 data bytes, storing the result back into
-# reg1, reg2.
-.macro fold_32_bytes offset, reg1, reg2
- movdqu \offset(buf), %xmm9
- movdqu \offset+16(buf), %xmm12
- pshufb BSWAP_MASK, %xmm9
- pshufb BSWAP_MASK, %xmm12
- movdqa \reg1, %xmm8
- movdqa \reg2, %xmm13
- pclmulqdq $0x00, FOLD_CONSTS, \reg1
- pclmulqdq $0x11, FOLD_CONSTS, %xmm8
- pclmulqdq $0x00, FOLD_CONSTS, \reg2
- pclmulqdq $0x11, FOLD_CONSTS, %xmm13
- pxor %xmm9 , \reg1
- xorps %xmm8 , \reg1
- pxor %xmm12, \reg2
- xorps %xmm13, \reg2
-.endm
-
-# Fold src_reg into dst_reg.
-.macro fold_16_bytes src_reg, dst_reg
- movdqa \src_reg, %xmm8
- pclmulqdq $0x11, FOLD_CONSTS, \src_reg
- pclmulqdq $0x00, FOLD_CONSTS, %xmm8
- pxor %xmm8, \dst_reg
- xorps \src_reg, \dst_reg
-.endm
-
-#
-# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len);
-#
-# Assumes len >= 16.
-#
-SYM_FUNC_START(crc_t10dif_pcl)
-
- movdqa .Lbswap_mask(%rip), BSWAP_MASK
-
- # For sizes less than 256 bytes, we can't fold 128 bytes at a time.
- cmp $256, len
- jl .Lless_than_256_bytes
-
- # Load the first 128 data bytes. Byte swapping is necessary to make the
- # bit order match the polynomial coefficient order.
- movdqu 16*0(buf), %xmm0
- movdqu 16*1(buf), %xmm1
- movdqu 16*2(buf), %xmm2
- movdqu 16*3(buf), %xmm3
- movdqu 16*4(buf), %xmm4
- movdqu 16*5(buf), %xmm5
- movdqu 16*6(buf), %xmm6
- movdqu 16*7(buf), %xmm7
- add $128, buf
- pshufb BSWAP_MASK, %xmm0
- pshufb BSWAP_MASK, %xmm1
- pshufb BSWAP_MASK, %xmm2
- pshufb BSWAP_MASK, %xmm3
- pshufb BSWAP_MASK, %xmm4
- pshufb BSWAP_MASK, %xmm5
- pshufb BSWAP_MASK, %xmm6
- pshufb BSWAP_MASK, %xmm7
-
- # XOR the first 16 data *bits* with the initial CRC value.
- pxor %xmm8, %xmm8
- pinsrw $7, init_crc, %xmm8
- pxor %xmm8, %xmm0
-
- movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS
-
- # Subtract 128 for the 128 data bytes just consumed. Subtract another
- # 128 to simplify the termination condition of the following loop.
- sub $256, len
-
- # While >= 128 data bytes remain (not counting xmm0-7), fold the 128
- # bytes xmm0-7 into them, storing the result back into xmm0-7.
-.Lfold_128_bytes_loop:
- fold_32_bytes 0, %xmm0, %xmm1
- fold_32_bytes 32, %xmm2, %xmm3
- fold_32_bytes 64, %xmm4, %xmm5
- fold_32_bytes 96, %xmm6, %xmm7
- add $128, buf
- sub $128, len
- jge .Lfold_128_bytes_loop
-
- # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7.
-
- # Fold across 64 bytes.
- movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS
- fold_16_bytes %xmm0, %xmm4
- fold_16_bytes %xmm1, %xmm5
- fold_16_bytes %xmm2, %xmm6
- fold_16_bytes %xmm3, %xmm7
- # Fold across 32 bytes.
- movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS
- fold_16_bytes %xmm4, %xmm6
- fold_16_bytes %xmm5, %xmm7
- # Fold across 16 bytes.
- movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
- fold_16_bytes %xmm6, %xmm7
-
- # Add 128 to get the correct number of data bytes remaining in 0...127
- # (not counting xmm7), following the previous extra subtraction by 128.
- # Then subtract 16 to simplify the termination condition of the
- # following loop.
- add $128-16, len
-
- # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes
- # xmm7 into them, storing the result back into xmm7.
- jl .Lfold_16_bytes_loop_done
-.Lfold_16_bytes_loop:
- movdqa %xmm7, %xmm8
- pclmulqdq $0x11, FOLD_CONSTS, %xmm7
- pclmulqdq $0x00, FOLD_CONSTS, %xmm8
- pxor %xmm8, %xmm7
- movdqu (buf), %xmm0
- pshufb BSWAP_MASK, %xmm0
- pxor %xmm0 , %xmm7
- add $16, buf
- sub $16, len
- jge .Lfold_16_bytes_loop
-
-.Lfold_16_bytes_loop_done:
- # Add 16 to get the correct number of data bytes remaining in 0...15
- # (not counting xmm7), following the previous extra subtraction by 16.
- add $16, len
- je .Lreduce_final_16_bytes
-
-.Lhandle_partial_segment:
- # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16
- # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do
- # this without needing a fold constant for each possible 'len', redivide
- # the bytes into a first chunk of 'len' bytes and a second chunk of 16
- # bytes, then fold the first chunk into the second.
-
- movdqa %xmm7, %xmm2
-
- # xmm1 = last 16 original data bytes
- movdqu -16(buf, len), %xmm1
- pshufb BSWAP_MASK, %xmm1
-
- # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes.
- lea .Lbyteshift_table+16(%rip), %rax
- sub len, %rax
- movdqu (%rax), %xmm0
- pshufb %xmm0, %xmm2
-
- # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes.
- pxor .Lmask1(%rip), %xmm0
- pshufb %xmm0, %xmm7
-
- # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes),
- # then '16-len' bytes from xmm2 (high-order bytes).
- pblendvb %xmm2, %xmm1 #xmm0 is implicit
-
- # Fold the first chunk into the second chunk, storing the result in xmm7.
- movdqa %xmm7, %xmm8
- pclmulqdq $0x11, FOLD_CONSTS, %xmm7
- pclmulqdq $0x00, FOLD_CONSTS, %xmm8
- pxor %xmm8, %xmm7
- pxor %xmm1, %xmm7
-
-.Lreduce_final_16_bytes:
- # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC
-
- # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
- movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS
-
- # Fold the high 64 bits into the low 64 bits, while also multiplying by
- # x^64. This produces a 128-bit value congruent to x^64 * M(x) and
- # whose low 48 bits are 0.
- movdqa %xmm7, %xmm0
- pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x))
- pslldq $8, %xmm0
- pxor %xmm0, %xmm7 # + low bits * x^64
-
- # Fold the high 32 bits into the low 96 bits. This produces a 96-bit
- # value congruent to x^64 * M(x) and whose low 48 bits are 0.
- movdqa %xmm7, %xmm0
- pand .Lmask2(%rip), %xmm0 # zero high 32 bits
- psrldq $12, %xmm7 # extract high 32 bits
- pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x))
- pxor %xmm0, %xmm7 # + low bits
-
- # Load G(x) and floor(x^48 / G(x)).
- movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS
-
- # Use Barrett reduction to compute the final CRC value.
- movdqa %xmm7, %xmm0
- pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x))
- psrlq $32, %xmm7 # /= x^32
- pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x)
- psrlq $48, %xmm0
- pxor %xmm7, %xmm0 # + low 16 nonzero bits
- # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
-
- pextrw $0, %xmm0, %eax
- RET
-
-.align 16
-.Lless_than_256_bytes:
- # Checksumming a buffer of length 16...255 bytes
-
- # Load the first 16 data bytes.
- movdqu (buf), %xmm7
- pshufb BSWAP_MASK, %xmm7
- add $16, buf
-
- # XOR the first 16 data *bits* with the initial CRC value.
- pxor %xmm0, %xmm0
- pinsrw $7, init_crc, %xmm0
- pxor %xmm0, %xmm7
-
- movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS
- cmp $16, len
- je .Lreduce_final_16_bytes # len == 16
- sub $32, len
- jge .Lfold_16_bytes_loop # 32 <= len <= 255
- add $16, len
- jmp .Lhandle_partial_segment # 17 <= len <= 31
-SYM_FUNC_END(crc_t10dif_pcl)
-
-.section .rodata, "a", @progbits
-.align 16
-
-# Fold constants precomputed from the polynomial 0x18bb7
-# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
-.Lfold_across_128_bytes_consts:
- .quad 0x0000000000006123 # x^(8*128) mod G(x)
- .quad 0x0000000000002295 # x^(8*128+64) mod G(x)
-.Lfold_across_64_bytes_consts:
- .quad 0x0000000000001069 # x^(4*128) mod G(x)
- .quad 0x000000000000dd31 # x^(4*128+64) mod G(x)
-.Lfold_across_32_bytes_consts:
- .quad 0x000000000000857d # x^(2*128) mod G(x)
- .quad 0x0000000000007acc # x^(2*128+64) mod G(x)
-.Lfold_across_16_bytes_consts:
- .quad 0x000000000000a010 # x^(1*128) mod G(x)
- .quad 0x0000000000001faa # x^(1*128+64) mod G(x)
-.Lfinal_fold_consts:
- .quad 0x1368000000000000 # x^48 * (x^48 mod G(x))
- .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x))
-.Lbarrett_reduction_consts:
- .quad 0x0000000000018bb7 # G(x)
- .quad 0x00000001f65a57f8 # floor(x^48 / G(x))
-
-.section .rodata.cst16.mask1, "aM", @progbits, 16
-.align 16
-.Lmask1:
- .octa 0x80808080808080808080808080808080
-
-.section .rodata.cst16.mask2, "aM", @progbits, 16
-.align 16
-.Lmask2:
- .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
-
-.section .rodata.cst16.bswap_mask, "aM", @progbits, 16
-.align 16
-.Lbswap_mask:
- .octa 0x000102030405060708090A0B0C0D0E0F
-
-.section .rodata.cst32.byteshift_table, "aM", @progbits, 32
-.align 16
-# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len]
-# is the index vector to shift left by 'len' bytes, and is also {0x80, ...,
-# 0x80} XOR the index vector to shift right by '16 - len' bytes.
-.Lbyteshift_table:
- .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
- .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
- .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
- .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 23f81ca3f06b..e86eda2c0b04 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -131,7 +131,7 @@ static void delay_halt_mwaitx(u64 unused, u64 cycles)
* Use cpu_tss_rw as a cacheline-aligned, seldom accessed per-cpu
* variable as the monitor target.
*/
- __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
+ __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
/*
* AMD, like Intel, supports the EAX hint and EAX=0xf means, do not
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 89ecd57c9d42..9d5654b8a72a 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -28,22 +28,20 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/page_types.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/runtime-const.h>
#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC
.macro check_range size:req
.if IS_ENABLED(CONFIG_X86_64)
- movq $0x0123456789abcdef,%rdx
- 1:
- .pushsection runtime_ptr_USER_PTR_MAX,"a"
- .long 1b - 8 - .
- .popsection
+ RUNTIME_CONST_PTR USER_PTR_MAX, rdx
cmp %rdx, %rax
cmova %rdx, %rax
.else
@@ -62,6 +60,7 @@
.text
SYM_FUNC_START(__get_user_1)
+ ANNOTATE_NOENDBR
check_range size=1
ASM_STAC
UACCESS movzbl (%_ASM_AX),%edx
@@ -72,6 +71,7 @@ SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
+ ANNOTATE_NOENDBR
check_range size=2
ASM_STAC
UACCESS movzwl (%_ASM_AX),%edx
@@ -82,6 +82,7 @@ SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
+ ANNOTATE_NOENDBR
check_range size=4
ASM_STAC
UACCESS movl (%_ASM_AX),%edx
@@ -92,6 +93,7 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
+ ANNOTATE_NOENDBR
#ifndef CONFIG_X86_64
xor %ecx,%ecx
#endif
@@ -111,6 +113,7 @@ EXPORT_SYMBOL(__get_user_8)
/* .. and the same for __get_user, just without the range checks */
SYM_FUNC_START(__get_user_nocheck_1)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movzbl (%_ASM_AX),%edx
@@ -121,6 +124,7 @@ SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
SYM_FUNC_START(__get_user_nocheck_2)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movzwl (%_ASM_AX),%edx
@@ -131,6 +135,7 @@ SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
SYM_FUNC_START(__get_user_nocheck_4)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
UACCESS movl (%_ASM_AX),%edx
@@ -141,6 +146,7 @@ SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
SYM_FUNC_START(__get_user_nocheck_8)
+ ANNOTATE_NOENDBR
ASM_STAC
ASM_BARRIER_NOSPEC
#ifdef CONFIG_X86_64
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 774bdf3e6f0a..edbeb3ecad38 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/asm.h>
@@ -9,6 +10,7 @@
* %rdi: w
*/
SYM_FUNC_START(__sw_hweight32)
+ ANNOTATE_NOENDBR
#ifdef CONFIG_X86_64
movl %edi, %eax # w
@@ -42,6 +44,7 @@ EXPORT_SYMBOL(__sw_hweight32)
*/
#ifdef CONFIG_X86_64
SYM_FUNC_START(__sw_hweight64)
+ ANNOTATE_NOENDBR
pushq %rdi
pushq %rdx
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 1b60ae81ecd8..aa1f92ee6b2e 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,6 +8,7 @@
*/
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
@@ -26,7 +27,7 @@
* Output:
* rax: dest
*/
-SYM_FUNC_START(__memmove)
+SYM_TYPED_FUNC_START(__memmove)
mov %rdi, %rax
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 0199d56cb479..d66b710d628f 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -3,6 +3,7 @@
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
@@ -28,7 +29,7 @@
* only for the return value that is the same as the source input,
* which the compiler could/should do much better anyway.
*/
-SYM_FUNC_START(__memset)
+SYM_TYPED_FUNC_START(__memset)
ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
movq %rdi,%r9
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index ebd259f31496..5ef8494896e8 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <linux/errno.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/msr.h>
@@ -12,7 +13,7 @@
*
*/
.macro op_safe_regs op
-SYM_FUNC_START(\op\()_safe_regs)
+SYM_TYPED_FUNC_START(\op\()_safe_regs)
pushq %rbx
pushq %r12
movq %rdi, %r10 /* Save pointer */
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 4bf4fad5b148..5a18ecc04a6c 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, true);
}
+EXPORT_SYMBOL_GPL(msr_set_bit);
/**
* msr_clear_bit - Clear @bit in a MSR @msr.
@@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, false);
}
+EXPORT_SYMBOL_GPL(msr_clear_bit);
#ifdef CONFIG_TRACEPOINTS
void do_trace_write_msr(unsigned int msr, u64 val, int failed)
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 975c9c18263d..46d9e9b98a61 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -13,6 +13,7 @@
*/
#include <linux/export.h>
#include <linux/linkage.h>
+#include <linux/objtool.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/asm.h>
@@ -45,6 +46,7 @@
.text
SYM_FUNC_START(__put_user_1)
+ ANNOTATE_NOENDBR
check_range size=1
ASM_STAC
1: movb %al,(%_ASM_CX)
@@ -55,6 +57,7 @@ SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_nocheck_1)
+ ANNOTATE_NOENDBR
ASM_STAC
2: movb %al,(%_ASM_CX)
xor %ecx,%ecx
@@ -64,6 +67,7 @@ SYM_FUNC_END(__put_user_nocheck_1)
EXPORT_SYMBOL(__put_user_nocheck_1)
SYM_FUNC_START(__put_user_2)
+ ANNOTATE_NOENDBR
check_range size=2
ASM_STAC
3: movw %ax,(%_ASM_CX)
@@ -74,6 +78,7 @@ SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_nocheck_2)
+ ANNOTATE_NOENDBR
ASM_STAC
4: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
@@ -83,6 +88,7 @@ SYM_FUNC_END(__put_user_nocheck_2)
EXPORT_SYMBOL(__put_user_nocheck_2)
SYM_FUNC_START(__put_user_4)
+ ANNOTATE_NOENDBR
check_range size=4
ASM_STAC
5: movl %eax,(%_ASM_CX)
@@ -93,6 +99,7 @@ SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_nocheck_4)
+ ANNOTATE_NOENDBR
ASM_STAC
6: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
@@ -102,6 +109,7 @@ SYM_FUNC_END(__put_user_nocheck_4)
EXPORT_SYMBOL(__put_user_nocheck_4)
SYM_FUNC_START(__put_user_8)
+ ANNOTATE_NOENDBR
check_range size=8
ASM_STAC
7: mov %_ASM_AX,(%_ASM_CX)
@@ -115,6 +123,7 @@ SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
SYM_FUNC_START(__put_user_nocheck_8)
+ ANNOTATE_NOENDBR
ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 391059b2c6fb..a26c43abd47d 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -326,6 +326,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)
SYM_FUNC_START(entry_untrain_ret)
+ ANNOTATE_NOENDBR
ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
@@ -342,7 +343,7 @@ SYM_FUNC_START(call_depth_return_thunk)
* case.
*/
CALL_THUNKS_DEBUG_INC_RETS
- shlq $5, PER_CPU_VAR(pcpu_hot + X86_call_depth)
+ shlq $5, PER_CPU_VAR(__x86_call_depth)
jz 1f
ANNOTATE_UNRET_SAFE
ret
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index e9251b89a9e9..654280aaa3e9 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -18,7 +18,7 @@
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* clean_cache_range - write back a cache range with CLWB
- * @vaddr: virtual start address
+ * @addr: virtual start address
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
diff --git a/arch/x86/math-emu/control_w.h b/arch/x86/math-emu/control_w.h
index 60f4dcc5edc3..93cbc89b34e2 100644
--- a/arch/x86/math-emu/control_w.h
+++ b/arch/x86/math-emu/control_w.h
@@ -11,7 +11,7 @@
#ifndef _CONTROLW_H_
#define _CONTROLW_H_
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define _Const_(x) $##x
#else
#define _Const_(x) x
diff --git a/arch/x86/math-emu/exception.h b/arch/x86/math-emu/exception.h
index 75230b977577..59961d350bc4 100644
--- a/arch/x86/math-emu/exception.h
+++ b/arch/x86/math-emu/exception.h
@@ -10,7 +10,7 @@
#ifndef _EXCEPTION_H_
#define _EXCEPTION_H_
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define Const_(x) $##x
#else
#define Const_(x) x
@@ -37,7 +37,7 @@
#define PRECISION_LOST_UP Const_((EX_Precision | SW_C1))
#define PRECISION_LOST_DOWN Const_(EX_Precision)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef DEBUG
#define EXCEPTION(x) { printk("exception in %s at line %d\n", \
@@ -46,6 +46,6 @@
#define EXCEPTION(x) FPU_exception(x)
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _EXCEPTION_H_ */
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index 0c122226ca56..def569c50b76 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -20,7 +20,7 @@
*/
#define PECULIAR_486
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include "fpu_asm.h"
#define Const(x) $##x
#else
@@ -68,7 +68,7 @@
#define FPU_Exception Const(0x80000000) /* Added to tag returns. */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include "fpu_system.h"
@@ -213,6 +213,6 @@ asmlinkage int FPU_round(FPU_REG *arg, unsigned int extent, int dummy,
#include "fpu_proto.h"
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _FPU_EMU_H_ */
diff --git a/arch/x86/math-emu/status_w.h b/arch/x86/math-emu/status_w.h
index b77bafec9526..f642957330ef 100644
--- a/arch/x86/math-emu/status_w.h
+++ b/arch/x86/math-emu/status_w.h
@@ -13,7 +13,7 @@
#include "fpu_emu.h" /* for definition of PECULIAR_486 */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define Const__(x) $##x
#else
#define Const__(x) x
@@ -37,7 +37,7 @@
#define SW_Exc_Mask Const__(0x27f) /* Status word exception bit mask */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define COMP_A_gt_B 1
#define COMP_A_eq_B 2
@@ -63,6 +63,6 @@ static inline void setcc(int cc)
# define clear_C1()
#endif /* PECULIAR_486 */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _STATUS_H_ */
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 5ab7bd2f1983..bd5d101c5c37 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -101,9 +101,7 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
pmd_t *pmd;
bool use_gbpage;
- next = (addr & PUD_MASK) + PUD_SIZE;
- if (next > end)
- next = end;
+ next = pud_addr_end(addr, end);
/* if this is already a gbpage, this portion is already mapped */
if (pud_leaf(*pud))
@@ -154,10 +152,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
p4d_t *p4d = p4d_page + p4d_index(addr);
pud_t *pud;
- next = (addr & P4D_MASK) + P4D_SIZE;
- if (next > end)
- next = end;
-
+ next = p4d_addr_end(addr, end);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, 0);
result = ident_pud_init(info, pud, addr, next);
@@ -199,10 +194,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
pgd_t *pgd = pgd_page + pgd_index(addr);
p4d_t *p4d;
- next = (addr & PGDIR_MASK) + PGDIR_SIZE;
- if (next > end)
- next = end;
-
+ next = pgd_addr_end(addr, end);
if (pgd_present(*pgd)) {
p4d = p4d_offset(pgd, 0);
result = ident_p4d_init(info, p4d, addr, next);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 62aa4d66a032..bfa444a7dbb0 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -645,8 +645,13 @@ static void __init memory_map_top_down(unsigned long map_start,
*/
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
map_end);
- memblock_phys_free(addr, PMD_SIZE);
- real_end = addr + PMD_SIZE;
+ if (!addr) {
+ pr_warn("Failed to release memory for alloc_low_pages()");
+ real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
+ } else {
+ memblock_phys_free(addr, PMD_SIZE);
+ real_end = addr + PMD_SIZE;
+ }
/* step_size need to be small so pgt_buf from BRK could cover it */
step_size = PMD_SIZE;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ac41b1e0940d..f288aad8dc74 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -582,7 +582,7 @@ static void __init lowmem_pfn_init(void)
"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
#define MSG_HIGHMEM_TRIMMED \
- "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
+ "Warning: only 4GB will be used. Support for for CONFIG_HIGHMEM64G was removed!\n"
/*
* We have more RAM than fits into lowmem - we try to put it into
* highmem, also taking the highmem=x boot parameter into account:
@@ -606,18 +606,13 @@ static void __init highmem_pfn_init(void)
#ifndef CONFIG_HIGHMEM
/* Maximum memory usable is what is directly addressable */
printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
- if (max_pfn > MAX_NONPAE_PFN)
- printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
- else
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
max_pfn = MAXMEM_PFN;
#else /* !CONFIG_HIGHMEM */
-#ifndef CONFIG_HIGHMEM64G
if (max_pfn > MAX_NONPAE_PFN) {
max_pfn = MAX_NONPAE_PFN;
printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
}
-#endif /* !CONFIG_HIGHMEM64G */
#endif /* !CONFIG_HIGHMEM */
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 01ea7c6df303..519aa53114fa 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -469,8 +469,6 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN) &&
- !e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_ACPI))
set_pte_init(pte, __pte(0), init);
continue;
@@ -526,8 +524,6 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN) &&
- !e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_ACPI))
set_pmd_init(pmd, __pmd(0), init);
continue;
@@ -615,8 +611,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN) &&
- !e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_ACPI))
set_pud_init(pud, __pud(0), init);
continue;
@@ -704,8 +698,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
- E820_TYPE_RESERVED_KERN) &&
- !e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_ACPI))
set_p4d_init(p4d, __p4d(0), init);
continue;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 38ff7791a9c7..42c90b420773 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -503,6 +503,14 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
+void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags)
+{
+ if ((flags & MEMREMAP_DEC) || cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ return (void __force *)ioremap_cache(phys_addr, size);
+
+ return (void __force *)ioremap_encrypted(phys_addr, size);
+}
+
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 9dddf19a5571..0539efd0d216 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#define DISABLE_BRANCH_PROFILING
#define pr_fmt(fmt) "kasan: " fmt
/* cpu_feature_enabled() cannot be used this early */
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 11a93542d198..3c306de52fd4 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -113,8 +113,14 @@ void __init kernel_randomize_memory(void)
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
- /* Adapt physical memory region size based on available memory */
- if (memory_tb < kaslr_regions[0].size_tb)
+ /*
+ * Adapt physical memory region size based on available memory,
+ * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
+ * device BAR space assuming the direct map space is large enough
+ * for creating a ZONE_DEVICE mapping in the direct map corresponding
+ * to the physical BAR address.
+ */
+ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb;
/*
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index b56c5c073003..7490ff6d83b1 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -7,8 +7,6 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#define DISABLE_BRANCH_PROFILING
-
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index e25288ee33c2..f8a33b25ae86 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -72,6 +72,7 @@ SYM_FUNC_START(sme_encrypt_execute)
SYM_FUNC_END(sme_encrypt_execute)
SYM_FUNC_START(__enc_copy)
+ ANNOTATE_NOENDBR
/*
* Routine used to encrypt memory in place.
* This routine must be run outside of the kernel proper since
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index e6c7686f443a..5eecdd92da10 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -7,8 +7,6 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#define DISABLE_BRANCH_PROFILING
-
/*
* Since we're dealing with identity mappings, physical and virtual
* addresses are the same, so override these defines which are ultimately
@@ -565,7 +563,7 @@ void __head sme_enable(struct boot_params *bp)
}
RIP_REL_REF(sme_me_mask) = me_mask;
- physical_mask &= ~me_mask;
- cc_vendor = CC_VENDOR_AMD;
+ RIP_REL_REF(physical_mask) &= ~me_mask;
+ RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD;
cc_set_mask(me_mask);
}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index b8a6ffffb451..5ed2109211da 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -84,7 +84,6 @@ static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
- unsigned long gap_min, gap_max;
/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
@@ -94,13 +93,7 @@ static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
* Top of mmap area (just below the process stack).
* Leave an at least ~128 MB hole with possible stack randomization.
*/
- gap_min = SIZE_128M;
- gap_max = (task_size / 6) * 5;
-
- if (gap < gap_min)
- gap = gap_min;
- else if (gap > gap_max)
- gap = gap_max;
+ gap = clamp(gap, SIZE_128M, (task_size / 6) * 5);
return PAGE_ALIGN(task_size - gap - rnd);
}
diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c
index 3d2f7f0a6ed1..ad3c1feec990 100644
--- a/arch/x86/mm/pat/cpa-test.c
+++ b/arch/x86/mm/pat/cpa-test.c
@@ -183,7 +183,7 @@ static int pageattr_test(void)
break;
case 1:
- err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
+ err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1);
break;
case 2:
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index feb8cc6a12bf..e40861c9cb90 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -43,6 +43,7 @@
#include <linux/fs.h>
#include <linux/rbtree.h>
+#include <asm/cpu_device_id.h>
#include <asm/cacheflush.h>
#include <asm/cacheinfo.h>
#include <asm/processor.h>
@@ -290,9 +291,8 @@ void __init pat_bp_init(void)
return;
}
- if ((c->x86_vendor == X86_VENDOR_INTEL) &&
- (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
- ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
+ if ((c->x86_vfm >= INTEL_PENTIUM_PRO && c->x86_vfm <= INTEL_PENTIUM_M_DOTHAN) ||
+ (c->x86_vfm >= INTEL_P4_WILLAMETTE && c->x86_vfm <= INTEL_P4_CEDARMILL)) {
/*
* PAT support with the lower four entries. Intel Pentium 2,
* 3, M, and 4 are affected by PAT errata, which makes the
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index ef4514d64c05..72405d315b41 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -73,6 +73,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
+#define CPA_COLLAPSE 16 /* try to collapse large pages */
static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
{
@@ -105,6 +106,18 @@ static void split_page_count(int level)
direct_pages_count[level - 1] += PTRS_PER_PTE;
}
+static void collapse_page_count(int level)
+{
+ direct_pages_count[level]++;
+ if (system_state == SYSTEM_RUNNING) {
+ if (level == PG_LEVEL_2M)
+ count_vm_event(DIRECT_MAP_LEVEL2_COLLAPSE);
+ else if (level == PG_LEVEL_1G)
+ count_vm_event(DIRECT_MAP_LEVEL3_COLLAPSE);
+ }
+ direct_pages_count[level - 1] -= PTRS_PER_PTE;
+}
+
void arch_report_meminfo(struct seq_file *m)
{
seq_printf(m, "DirectMap4k: %8lu kB\n",
@@ -122,6 +135,7 @@ void arch_report_meminfo(struct seq_file *m)
}
#else
static inline void split_page_count(int level) { }
+static inline void collapse_page_count(int level) { }
#endif
#ifdef CONFIG_X86_CPA_STATISTICS
@@ -211,14 +225,14 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
+#ifdef CONFIG_X86_64
+
static inline int
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
{
return addr >= start && addr <= end;
}
-#ifdef CONFIG_X86_64
-
/*
* The kernel image is mapped into two places in the virtual address space
* (addresses without KASLR, of course):
@@ -394,16 +408,49 @@ static void __cpa_flush_tlb(void *data)
flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
}
-static void cpa_flush(struct cpa_data *data, int cache)
+static int collapse_large_pages(unsigned long addr, struct list_head *pgtables);
+
+static void cpa_collapse_large_pages(struct cpa_data *cpa)
+{
+ unsigned long start, addr, end;
+ struct ptdesc *ptdesc, *tmp;
+ LIST_HEAD(pgtables);
+ int collapsed = 0;
+ int i;
+
+ if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
+ for (i = 0; i < cpa->numpages; i++)
+ collapsed += collapse_large_pages(__cpa_addr(cpa, i),
+ &pgtables);
+ } else {
+ addr = __cpa_addr(cpa, 0);
+ start = addr & PMD_MASK;
+ end = addr + PAGE_SIZE * cpa->numpages;
+
+ for (addr = start; within(addr, start, end); addr += PMD_SIZE)
+ collapsed += collapse_large_pages(addr, &pgtables);
+ }
+
+ if (!collapsed)
+ return;
+
+ flush_tlb_all();
+
+ list_for_each_entry_safe(ptdesc, tmp, &pgtables, pt_list) {
+ list_del(&ptdesc->pt_list);
+ __free_page(ptdesc_page(ptdesc));
+ }
+}
+
+static void cpa_flush(struct cpa_data *cpa, int cache)
{
- struct cpa_data *cpa = data;
unsigned int i;
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache);
- return;
+ goto collapse_large_pages;
}
if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
@@ -412,7 +459,7 @@ static void cpa_flush(struct cpa_data *data, int cache)
on_each_cpu(__cpa_flush_tlb, cpa, 1);
if (!cache)
- return;
+ goto collapse_large_pages;
mb();
for (i = 0; i < cpa->numpages; i++) {
@@ -428,6 +475,10 @@ static void cpa_flush(struct cpa_data *data, int cache)
clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
}
mb();
+
+collapse_large_pages:
+ if (cpa->flags & CPA_COLLAPSE)
+ cpa_collapse_large_pages(cpa);
}
static bool overlaps(unsigned long r1_start, unsigned long r1_end,
@@ -1197,6 +1248,161 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
return 0;
}
+static int collapse_pmd_page(pmd_t *pmd, unsigned long addr,
+ struct list_head *pgtables)
+{
+ pmd_t _pmd, old_pmd;
+ pte_t *pte, first;
+ unsigned long pfn;
+ pgprot_t pgprot;
+ int i = 0;
+
+ addr &= PMD_MASK;
+ pte = pte_offset_kernel(pmd, addr);
+ first = *pte;
+ pfn = pte_pfn(first);
+
+ /* Make sure alignment is suitable */
+ if (PFN_PHYS(pfn) & ~PMD_MASK)
+ return 0;
+
+ /* The page is 4k intentionally */
+ if (pte_flags(first) & _PAGE_KERNEL_4K)
+ return 0;
+
+ /* Check that the rest of PTEs are compatible with the first one */
+ for (i = 1, pte++; i < PTRS_PER_PTE; i++, pte++) {
+ pte_t entry = *pte;
+
+ if (!pte_present(entry))
+ return 0;
+ if (pte_flags(entry) != pte_flags(first))
+ return 0;
+ if (pte_pfn(entry) != pte_pfn(first) + i)
+ return 0;
+ }
+
+ old_pmd = *pmd;
+
+ /* Success: set up a large page */
+ pgprot = pgprot_4k_2_large(pte_pgprot(first));
+ pgprot_val(pgprot) |= _PAGE_PSE;
+ _pmd = pfn_pmd(pfn, pgprot);
+ set_pmd(pmd, _pmd);
+
+ /* Queue the page table to be freed after TLB flush */
+ list_add(&page_ptdesc(pmd_page(old_pmd))->pt_list, pgtables);
+
+ if (IS_ENABLED(CONFIG_X86_32) && !SHARED_KERNEL_PMD) {
+ struct page *page;
+
+ /* Update all PGD tables to use the same large page */
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
+ /* Something is wrong if entries doesn't match */
+ if (WARN_ON(pmd_val(old_pmd) != pmd_val(*pmd)))
+ continue;
+ set_pmd(pmd, _pmd);
+ }
+ }
+
+ if (virt_addr_valid(addr) && pfn_range_is_mapped(pfn, pfn + 1))
+ collapse_page_count(PG_LEVEL_2M);
+
+ return 1;
+}
+
+static int collapse_pud_page(pud_t *pud, unsigned long addr,
+ struct list_head *pgtables)
+{
+ unsigned long pfn;
+ pmd_t *pmd, first;
+ int i;
+
+ if (!direct_gbpages)
+ return 0;
+
+ addr &= PUD_MASK;
+ pmd = pmd_offset(pud, addr);
+ first = *pmd;
+
+ /*
+ * To restore PUD page all PMD entries must be large and
+ * have suitable alignment
+ */
+ pfn = pmd_pfn(first);
+ if (!pmd_leaf(first) || (PFN_PHYS(pfn) & ~PUD_MASK))
+ return 0;
+
+ /*
+ * To restore PUD page, all following PMDs must be compatible with the
+ * first one.
+ */
+ for (i = 1, pmd++; i < PTRS_PER_PMD; i++, pmd++) {
+ pmd_t entry = *pmd;
+
+ if (!pmd_present(entry) || !pmd_leaf(entry))
+ return 0;
+ if (pmd_flags(entry) != pmd_flags(first))
+ return 0;
+ if (pmd_pfn(entry) != pmd_pfn(first) + i * PTRS_PER_PTE)
+ return 0;
+ }
+
+ /* Restore PUD page and queue page table to be freed after TLB flush */
+ list_add(&page_ptdesc(pud_page(*pud))->pt_list, pgtables);
+ set_pud(pud, pfn_pud(pfn, pmd_pgprot(first)));
+
+ if (virt_addr_valid(addr) && pfn_range_is_mapped(pfn, pfn + 1))
+ collapse_page_count(PG_LEVEL_1G);
+
+ return 1;
+}
+
+/*
+ * Collapse PMD and PUD pages in the kernel mapping around the address where
+ * possible.
+ *
+ * Caller must flush TLB and free page tables queued on the list before
+ * touching the new entries. CPU must not see TLB entries of different size
+ * with different attributes.
+ */
+static int collapse_large_pages(unsigned long addr, struct list_head *pgtables)
+{
+ int collapsed = 0;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ addr &= PMD_MASK;
+
+ spin_lock(&pgd_lock);
+ pgd = pgd_offset_k(addr);
+ if (pgd_none(*pgd))
+ goto out;
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
+ goto out;
+ pud = pud_offset(p4d, addr);
+ if (!pud_present(*pud) || pud_leaf(*pud))
+ goto out;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd) || pmd_leaf(*pmd))
+ goto out;
+
+ collapsed = collapse_pmd_page(pmd, addr, pgtables);
+ if (collapsed)
+ collapsed += collapse_pud_page(pud, addr, pgtables);
+
+out:
+ spin_unlock(&pgd_lock);
+ return collapsed;
+}
+
static bool try_to_free_pte_page(pte_t *pte)
{
int i;
@@ -1942,19 +2148,6 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
CPA_PAGES_ARRAY, pages);
}
-/*
- * __set_memory_prot is an internal helper for callers that have been passed
- * a pgprot_t value from upper layers and a reservation has already been taken.
- * If you want to set the pgprot to a specific page protocol, use the
- * set_memory_xx() functions.
- */
-int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
-{
- return change_page_attr_set_clr(&addr, numpages, prot,
- __pgprot(~pgprot_val(prot)), 0, 0,
- NULL);
-}
-
int _set_memory_uc(unsigned long addr, int numpages)
{
/*
@@ -2120,7 +2313,8 @@ int set_memory_rox(unsigned long addr, int numpages)
if (__supported_pte_mask & _PAGE_NX)
clr.pgprot |= _PAGE_NX;
- return change_page_attr_clear(&addr, numpages, clr, 0);
+ return change_page_attr_set_clr(&addr, numpages, __pgprot(0), clr, 0,
+ CPA_COLLAPSE, NULL);
}
int set_memory_rw(unsigned long addr, int numpages)
@@ -2147,7 +2341,8 @@ int set_memory_p(unsigned long addr, int numpages)
int set_memory_4k(unsigned long addr, int numpages)
{
- return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+ return change_page_attr_set_clr(&addr, numpages,
+ __pgprot(_PAGE_KERNEL_4K),
__pgprot(0), 1, 0, NULL);
}
@@ -2420,7 +2615,7 @@ static int __set_pages_np(struct page *page, int numpages)
.pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(0),
- .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+ .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS };
/*
@@ -2507,7 +2702,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
- .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
+ .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
.flags = CPA_NO_CHECK_ALIAS,
};
@@ -2550,7 +2745,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
- .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+ .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS,
};
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 1fef5ad32d5a..cec321fb74f2 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -12,59 +12,15 @@ phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
EXPORT_SYMBOL(physical_mask);
#endif
-#ifdef CONFIG_HIGHPTE
-#define PGTABLE_HIGHMEM __GFP_HIGHMEM
-#else
-#define PGTABLE_HIGHMEM 0
-#endif
-
-#ifndef CONFIG_PARAVIRT
-#ifndef CONFIG_PT_RECLAIM
-static inline
-void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct ptdesc *ptdesc = (struct ptdesc *)table;
-
- pagetable_dtor(ptdesc);
- tlb_remove_page(tlb, ptdesc_page(ptdesc));
-}
-#else
-static inline
-void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- tlb_remove_table(tlb, table);
-}
-#endif /* !CONFIG_PT_RECLAIM */
-#endif /* !CONFIG_PARAVIRT */
-
-gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
-
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- return __pte_alloc_one(mm, __userpte_alloc_gfp);
-}
-
-static int __init setup_userpte(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- /*
- * "userpte=nohigh" disables allocation of user pagetables in
- * high memory.
- */
- if (strcmp(arg, "nohigh") == 0)
- __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
- else
- return -EINVAL;
- return 0;
+ return __pte_alloc_one(mm, GFP_PGTABLE_USER);
}
-early_param("userpte", setup_userpte);
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
paravirt_release_pte(page_to_pfn(pte));
- paravirt_tlb_remove_table(tlb, page_ptdesc(pte));
+ tlb_remove_table(tlb, page_ptdesc(pte));
}
#if CONFIG_PGTABLE_LEVELS > 2
@@ -78,21 +34,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
#ifdef CONFIG_X86_PAE
tlb->need_flush_all = 1;
#endif
- paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pmd));
+ tlb_remove_table(tlb, virt_to_ptdesc(pmd));
}
#if CONFIG_PGTABLE_LEVELS > 3
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
- paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pud));
+ tlb_remove_table(tlb, virt_to_ptdesc(pud));
}
#if CONFIG_PGTABLE_LEVELS > 4
void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
{
paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
- paravirt_tlb_remove_table(tlb, virt_to_ptdesc(p4d));
+ tlb_remove_table(tlb, virt_to_ptdesc(p4d));
}
#endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6cf881a942bb..e459d97ef397 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -74,13 +74,15 @@
* use different names for each of them:
*
* ASID - [0, TLB_NR_DYN_ASIDS-1]
- * the canonical identifier for an mm
+ * the canonical identifier for an mm, dynamically allocated on each CPU
+ * [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1]
+ * the canonical, global identifier for an mm, identical across all CPUs
*
- * kPCID - [1, TLB_NR_DYN_ASIDS]
+ * kPCID - [1, MAX_ASID_AVAILABLE]
* the value we write into the PCID part of CR3; corresponds to the
* ASID+1, because PCID 0 is special.
*
- * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE]
* for KPTI each mm has two address spaces and thus needs two
* PCID values, but we can still do with a single ASID denomination
* for each mm. Corresponds to kPCID + 2048.
@@ -225,6 +227,20 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
return;
}
+ /*
+ * TLB consistency for global ASIDs is maintained with hardware assisted
+ * remote TLB flushing. Global ASIDs are always up to date.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
+ u16 global_asid = mm_global_asid(next);
+
+ if (global_asid) {
+ *new_asid = global_asid;
+ *need_flush = false;
+ return;
+ }
+ }
+
if (this_cpu_read(cpu_tlbstate.invalidate_other))
clear_asid_other();
@@ -252,6 +268,268 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
}
/*
+ * Global ASIDs are allocated for multi-threaded processes that are
+ * active on multiple CPUs simultaneously, giving each of those
+ * processes the same PCID on every CPU, for use with hardware-assisted
+ * TLB shootdown on remote CPUs, like AMD INVLPGB or Intel RAR.
+ *
+ * These global ASIDs are held for the lifetime of the process.
+ */
+static DEFINE_RAW_SPINLOCK(global_asid_lock);
+static u16 last_global_asid = MAX_ASID_AVAILABLE;
+static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE);
+static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE);
+static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1;
+
+/*
+ * When the search for a free ASID in the global ASID space reaches
+ * MAX_ASID_AVAILABLE, a global TLB flush guarantees that previously
+ * freed global ASIDs are safe to re-use.
+ *
+ * This way the global flush only needs to happen at ASID rollover
+ * time, and not at ASID allocation time.
+ */
+static void reset_global_asid_space(void)
+{
+ lockdep_assert_held(&global_asid_lock);
+
+ invlpgb_flush_all_nonglobals();
+
+ /*
+ * The TLB flush above makes it safe to re-use the previously
+ * freed global ASIDs.
+ */
+ bitmap_andnot(global_asid_used, global_asid_used,
+ global_asid_freed, MAX_ASID_AVAILABLE);
+ bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE);
+
+ /* Restart the search from the start of global ASID space. */
+ last_global_asid = TLB_NR_DYN_ASIDS;
+}
+
+static u16 allocate_global_asid(void)
+{
+ u16 asid;
+
+ lockdep_assert_held(&global_asid_lock);
+
+ /* The previous allocation hit the edge of available address space */
+ if (last_global_asid >= MAX_ASID_AVAILABLE - 1)
+ reset_global_asid_space();
+
+ asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid);
+
+ if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) {
+ /* This should never happen. */
+ VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n",
+ global_asid_available);
+ return 0;
+ }
+
+ /* Claim this global ASID. */
+ __set_bit(asid, global_asid_used);
+ last_global_asid = asid;
+ global_asid_available--;
+ return asid;
+}
+
+/*
+ * Check whether a process is currently active on more than @threshold CPUs.
+ * This is a cheap estimation on whether or not it may make sense to assign
+ * a global ASID to this process, and use broadcast TLB invalidation.
+ */
+static bool mm_active_cpus_exceeds(struct mm_struct *mm, int threshold)
+{
+ int count = 0;
+ int cpu;
+
+ /* This quick check should eliminate most single threaded programs. */
+ if (cpumask_weight(mm_cpumask(mm)) <= threshold)
+ return false;
+
+ /* Slower check to make sure. */
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ /* Skip the CPUs that aren't really running this process. */
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm)
+ continue;
+
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ continue;
+
+ if (++count > threshold)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Assign a global ASID to the current process, protecting against
+ * races between multiple threads in the process.
+ */
+static void use_global_asid(struct mm_struct *mm)
+{
+ u16 asid;
+
+ guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+ /* This process is already using broadcast TLB invalidation. */
+ if (mm_global_asid(mm))
+ return;
+
+ /*
+ * The last global ASID was consumed while waiting for the lock.
+ *
+ * If this fires, a more aggressive ASID reuse scheme might be
+ * needed.
+ */
+ if (!global_asid_available) {
+ VM_WARN_ONCE(1, "Ran out of global ASIDs\n");
+ return;
+ }
+
+ asid = allocate_global_asid();
+ if (!asid)
+ return;
+
+ mm_assign_global_asid(mm, asid);
+}
+
+void mm_free_global_asid(struct mm_struct *mm)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return;
+
+ if (!mm_global_asid(mm))
+ return;
+
+ guard(raw_spinlock_irqsave)(&global_asid_lock);
+
+ /* The global ASID can be re-used only after flush at wrap-around. */
+#ifdef CONFIG_BROADCAST_TLB_FLUSH
+ __set_bit(mm->context.global_asid, global_asid_freed);
+
+ mm->context.global_asid = 0;
+ global_asid_available++;
+#endif
+}
+
+/*
+ * Is the mm transitioning from a CPU-local ASID to a global ASID?
+ */
+static bool mm_needs_global_asid(struct mm_struct *mm, u16 asid)
+{
+ u16 global_asid = mm_global_asid(mm);
+
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return false;
+
+ /* Process is transitioning to a global ASID */
+ if (global_asid && asid != global_asid)
+ return true;
+
+ return false;
+}
+
+/*
+ * x86 has 4k ASIDs (2k when compiled with KPTI), but the largest x86
+ * systems have over 8k CPUs. Because of this potential ASID shortage,
+ * global ASIDs are handed out to processes that have frequent TLB
+ * flushes and are active on 4 or more CPUs simultaneously.
+ */
+static void consider_global_asid(struct mm_struct *mm)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ return;
+
+ /* Check every once in a while. */
+ if ((current->pid & 0x1f) != (jiffies & 0x1f))
+ return;
+
+ /*
+ * Assign a global ASID if the process is active on
+ * 4 or more CPUs simultaneously.
+ */
+ if (mm_active_cpus_exceeds(mm, 3))
+ use_global_asid(mm);
+}
+
+static void finish_asid_transition(struct flush_tlb_info *info)
+{
+ struct mm_struct *mm = info->mm;
+ int bc_asid = mm_global_asid(mm);
+ int cpu;
+
+ if (!mm_in_asid_transition(mm))
+ return;
+
+ for_each_cpu(cpu, mm_cpumask(mm)) {
+ /*
+ * The remote CPU is context switching. Wait for that to
+ * finish, to catch the unlikely case of it switching to
+ * the target mm with an out of date ASID.
+ */
+ while (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) == LOADED_MM_SWITCHING)
+ cpu_relax();
+
+ if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm)
+ continue;
+
+ /*
+ * If at least one CPU is not using the global ASID yet,
+ * send a TLB flush IPI. The IPI should cause stragglers
+ * to transition soon.
+ *
+ * This can race with the CPU switching to another task;
+ * that results in a (harmless) extra IPI.
+ */
+ if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm_asid, cpu)) != bc_asid) {
+ flush_tlb_multi(mm_cpumask(info->mm), info);
+ return;
+ }
+ }
+
+ /* All the CPUs running this process are using the global ASID. */
+ mm_clear_asid_transition(mm);
+}
+
+static void broadcast_tlb_flush(struct flush_tlb_info *info)
+{
+ bool pmd = info->stride_shift == PMD_SHIFT;
+ unsigned long asid = mm_global_asid(info->mm);
+ unsigned long addr = info->start;
+
+ /*
+ * TLB flushes with INVLPGB are kicked off asynchronously.
+ * The inc_mm_tlb_gen() guarantees page table updates are done
+ * before these TLB flushes happen.
+ */
+ if (info->end == TLB_FLUSH_ALL) {
+ invlpgb_flush_single_pcid_nosync(kern_pcid(asid));
+ /* Do any CPUs supporting INVLPGB need PTI? */
+ if (cpu_feature_enabled(X86_FEATURE_PTI))
+ invlpgb_flush_single_pcid_nosync(user_pcid(asid));
+ } else do {
+ unsigned long nr = 1;
+
+ if (info->stride_shift <= PMD_SHIFT) {
+ nr = (info->end - addr) >> info->stride_shift;
+ nr = clamp_val(nr, 1, invlpgb_count_max);
+ }
+
+ invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd);
+ if (cpu_feature_enabled(X86_FEATURE_PTI))
+ invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd);
+
+ addr += nr << info->stride_shift;
+ } while (addr < info->end);
+
+ finish_asid_transition(info);
+
+ /* Wait for the INVLPGBs kicked off above to finish. */
+ __tlbsync();
+}
+
+/*
* Given an ASID, flush the corresponding user ASID. We can delay this
* until the next time we switch to it.
*
@@ -447,8 +725,7 @@ static void cond_mitigation(struct task_struct *next)
* different context than the user space task which ran
* last on this CPU.
*/
- if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
- (unsigned long)next->mm)
+ if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != (unsigned long)next->mm)
indirect_branch_prediction_barrier();
}
@@ -556,7 +833,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
*/
if (prev == next) {
/* Not actually switching mm's */
- VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+ VM_WARN_ON(is_dyn_asid(prev_asid) &&
+ this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
next->context.ctx_id);
/*
@@ -573,6 +851,20 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
!cpumask_test_cpu(cpu, mm_cpumask(next))))
cpumask_set_cpu(cpu, mm_cpumask(next));
+ /* Check if the current mm is transitioning to a global ASID */
+ if (mm_needs_global_asid(next, prev_asid)) {
+ next_tlb_gen = atomic64_read(&next->context.tlb_gen);
+ choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+ goto reload_tlb;
+ }
+
+ /*
+ * Broadcast TLB invalidation keeps this ASID up to date
+ * all the time.
+ */
+ if (is_global_asid(prev_asid))
+ return;
+
/*
* If the CPU is not in lazy TLB mode, we are just switching
* from one thread in a process to another thread in the same
@@ -607,6 +899,13 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
cond_mitigation(tsk);
/*
+ * Let nmi_uaccess_okay() and finish_asid_transition()
+ * know that CR3 is changing.
+ */
+ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+ barrier();
+
+ /*
* Leave this CPU in prev's mm_cpumask. Atomic writes to
* mm_cpumask can be expensive under contention. The CPU
* will be removed lazily at TLB flush time.
@@ -620,14 +919,12 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
-
- /* Let nmi_uaccess_okay() know that we're changing CR3. */
- this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
- barrier();
}
+reload_tlb:
new_lam = mm_lam_cr3_mask(next);
if (need_flush) {
+ VM_WARN_ON_ONCE(is_global_asid(new_asid));
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, new_lam, true);
@@ -746,7 +1043,7 @@ static void flush_tlb_func(void *info)
const struct flush_tlb_info *f = info;
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
- u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+ u64 local_tlb_gen;
bool local = smp_processor_id() == f->initiating_cpu;
unsigned long nr_invalidate = 0;
u64 mm_tlb_gen;
@@ -769,6 +1066,16 @@ static void flush_tlb_func(void *info)
if (unlikely(loaded_mm == &init_mm))
return;
+ /* Reload the ASID if transitioning into or out of a global ASID */
+ if (mm_needs_global_asid(loaded_mm, loaded_mm_asid)) {
+ switch_mm_irqs_off(NULL, loaded_mm, NULL);
+ loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ }
+
+ /* Broadcast ASIDs are always kept up to date with INVLPGB. */
+ if (is_global_asid(loaded_mm_asid))
+ return;
+
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
@@ -786,6 +1093,8 @@ static void flush_tlb_func(void *info)
return;
}
+ local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
f->new_tlb_gen <= local_tlb_gen)) {
/*
@@ -953,7 +1262,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
- if (info->freed_tables)
+ if (info->freed_tables || mm_in_asid_transition(info->mm))
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else
on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
@@ -1000,6 +1309,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
#endif
+ /*
+ * If the number of flushes is so large that a full flush
+ * would be faster, do a full flush.
+ */
+ if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) {
+ start = 0;
+ end = TLB_FLUSH_ALL;
+ }
+
info->start = start;
info->end = end;
info->mm = mm;
@@ -1026,17 +1344,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
bool freed_tables)
{
struct flush_tlb_info *info;
+ int cpu = get_cpu();
u64 new_tlb_gen;
- int cpu;
-
- cpu = get_cpu();
-
- /* Should we flush just the requested range? */
- if ((end == TLB_FLUSH_ALL) ||
- ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
- start = 0;
- end = TLB_FLUSH_ALL;
- }
/* This is also a barrier that synchronizes with switch_mm(). */
new_tlb_gen = inc_mm_tlb_gen(mm);
@@ -1049,9 +1358,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
* a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case.
*/
- if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+ if (mm_global_asid(mm)) {
+ broadcast_tlb_flush(info);
+ } else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
info->trim_cpumask = should_trim_cpumask(mm);
flush_tlb_multi(mm_cpumask(mm), info);
+ consider_global_asid(mm);
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
@@ -1064,7 +1376,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
-
static void do_flush_tlb_all(void *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -1074,7 +1385,32 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
- on_each_cpu(do_flush_tlb_all, NULL, 1);
+
+ /* First try (faster) hardware-assisted TLB invalidation. */
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ invlpgb_flush_all();
+ else
+ /* Fall back to the IPI-based invalidation. */
+ on_each_cpu(do_flush_tlb_all, NULL, 1);
+}
+
+/* Flush an arbitrarily large range of memory with INVLPGB. */
+static void invlpgb_kernel_range_flush(struct flush_tlb_info *info)
+{
+ unsigned long addr, nr;
+
+ for (addr = info->start; addr < info->end; addr += nr << PAGE_SHIFT) {
+ nr = (info->end - addr) >> PAGE_SHIFT;
+
+ /*
+ * INVLPGB has a limit on the size of ranges it can
+ * flush. Break up large flushes.
+ */
+ nr = clamp_val(nr, 1, invlpgb_count_max);
+
+ invlpgb_flush_addr_nosync(addr, nr);
+ }
+ __tlbsync();
}
static void do_kernel_range_flush(void *info)
@@ -1087,24 +1423,37 @@ static void do_kernel_range_flush(void *info)
flush_tlb_one_kernel(addr);
}
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static void kernel_tlb_flush_all(struct flush_tlb_info *info)
{
- /* Balance as user space task's flush, a bit conservative */
- if (end == TLB_FLUSH_ALL ||
- (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ invlpgb_flush_all();
+ else
on_each_cpu(do_flush_tlb_all, NULL, 1);
- } else {
- struct flush_tlb_info *info;
-
- preempt_disable();
- info = get_flush_tlb_info(NULL, start, end, 0, false,
- TLB_GENERATION_INVALID);
+}
+static void kernel_tlb_flush_range(struct flush_tlb_info *info)
+{
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB))
+ invlpgb_kernel_range_flush(info);
+ else
on_each_cpu(do_kernel_range_flush, info, 1);
+}
- put_flush_tlb_info();
- preempt_enable();
- }
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_info *info;
+
+ guard(preempt)();
+
+ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
+ TLB_GENERATION_INVALID);
+
+ if (info->end == TLB_FLUSH_ALL)
+ kernel_tlb_flush_all(info);
+ else
+ kernel_tlb_flush_range(info);
+
+ put_flush_tlb_info();
}
/*
@@ -1283,7 +1632,10 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case.
*/
- if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB) && batch->unmapped_pages) {
+ invlpgb_flush_all_nonglobals();
+ batch->unmapped_pages = false;
+ } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
flush_tlb_multi(&batch->cpumask, info);
} else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
lockdep_assert_irqs_enabled();
@@ -1325,7 +1677,7 @@ bool nmi_uaccess_okay(void)
if (loaded_mm != current_mm)
return false;
- VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+ VM_WARN_ON_ONCE(__pa(current_mm->pgd) != read_cr3_pa());
return true;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a43fc5af973d..72776dcb75aa 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -410,16 +410,20 @@ static void emit_nops(u8 **pprog, int len)
* Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
* in arch/x86/kernel/alternative.c
*/
+static int emit_call(u8 **prog, void *func, void *ip);
-static void emit_fineibt(u8 **pprog, u32 hash)
+static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity)
{
u8 *prog = *pprog;
EMIT_ENDBR();
EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */
- EMIT2(0x74, 0x07); /* jz.d8 +7 */
- EMIT2(0x0f, 0x0b); /* ud2 */
- EMIT1(0x90); /* nop */
+ if (cfi_bhi) {
+ emit_call(&prog, __bhi_args[arity], ip + 11);
+ } else {
+ EMIT2(0x75, 0xf9); /* jne.d8 .-7 */
+ EMIT3(0x0f, 0x1f, 0x00); /* nop3 */
+ }
EMIT_ENDBR_POISON();
*pprog = prog;
@@ -448,13 +452,13 @@ static void emit_kcfi(u8 **pprog, u32 hash)
*pprog = prog;
}
-static void emit_cfi(u8 **pprog, u32 hash)
+static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity)
{
u8 *prog = *pprog;
switch (cfi_mode) {
case CFI_FINEIBT:
- emit_fineibt(&prog, hash);
+ emit_fineibt(&prog, ip, hash, arity);
break;
case CFI_KCFI:
@@ -505,13 +509,17 @@ static void emit_prologue_tail_call(u8 **pprog, bool is_subprog)
* bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
* while jumping to another program
*/
-static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
+static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf,
bool tail_call_reachable, bool is_subprog,
bool is_exception_cb)
{
u8 *prog = *pprog;
- emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash);
+ if (is_subprog) {
+ emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5);
+ } else {
+ emit_cfi(&prog, ip, cfi_bpf_hash, 1);
+ }
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
*/
@@ -641,7 +649,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
* See emit_prologue(), for IBT builds the trampoline hook is preceded
* with an ENDBR instruction.
*/
- if (is_endbr(*(u32 *)ip))
+ if (is_endbr(ip))
ip += ENDBR_INSN_SIZE;
return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
@@ -1480,7 +1488,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
detect_reg_usage(insn, insn_cnt, callee_regs_used);
- emit_prologue(&prog, stack_depth,
+ emit_prologue(&prog, image, stack_depth,
bpf_prog_was_classic(bpf_prog), tail_call_reachable,
bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb);
/* Exception callback will clobber callee regs for its own use, and
@@ -3036,7 +3044,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
/* skip patched call instruction and point orig_call to actual
* body of the kernel function.
*/
- if (is_endbr(*(u32 *)orig_call))
+ if (is_endbr(orig_call))
orig_call += ENDBR_INSN_SIZE;
orig_call += X86_PATCH_SIZE;
}
@@ -3047,7 +3055,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
/*
* Indirect call for bpf_struct_ops
*/
- emit_cfi(&prog, cfi_get_func_hash(func_addr));
+ emit_cfi(&prog, image,
+ cfi_get_func_hash(func_addr),
+ cfi_get_func_arity(func_addr));
} else {
/*
* Direct-call fentry stub, as such it needs accounting for the
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 48bcada5cabe..4933fb337983 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -12,8 +12,6 @@ obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
obj-$(CONFIG_ACPI) += acpi.o
obj-y += legacy.o irq.o
-obj-$(CONFIG_STA2X11) += sta2x11-fixup.o
-
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_X86_INTEL_MID) += intel_mid_pci.o
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
deleted file mode 100644
index 8c8ddc4dcc08..000000000000
--- a/arch/x86/pci/sta2x11-fixup.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * DMA translation between STA2x11 AMBA memory mapping and the x86 memory mapping
- *
- * ST Microelectronics ConneXt (STA2X11/STA2X10)
- *
- * Copyright (c) 2010-2011 Wind River Systems, Inc.
- */
-
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/export.h>
-#include <linux/list.h>
-#include <linux/dma-map-ops.h>
-#include <linux/swiotlb.h>
-#include <asm/iommu.h>
-#include <asm/sta2x11.h>
-
-#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
-
-/*
- * We build a list of bus numbers that are under the ConneXt. The
- * main bridge hosts 4 busses, which are the 4 endpoints, in order.
- */
-#define STA2X11_NR_EP 4 /* 0..3 included */
-#define STA2X11_NR_FUNCS 8 /* 0..7 included */
-#define STA2X11_AMBA_SIZE (512 << 20)
-
-struct sta2x11_ahb_regs { /* saved during suspend */
- u32 base, pexlbase, pexhbase, crw;
-};
-
-struct sta2x11_mapping {
- int is_suspended;
- struct sta2x11_ahb_regs regs[STA2X11_NR_FUNCS];
-};
-
-struct sta2x11_instance {
- struct list_head list;
- int bus0;
- struct sta2x11_mapping map[STA2X11_NR_EP];
-};
-
-static LIST_HEAD(sta2x11_instance_list);
-
-/* At probe time, record new instances of this bridge (likely one only) */
-static void sta2x11_new_instance(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
-
- instance = kzalloc(sizeof(*instance), GFP_ATOMIC);
- if (!instance)
- return;
- /* This has a subordinate bridge, with 4 more-subordinate ones */
- instance->bus0 = pdev->subordinate->number + 1;
-
- if (list_empty(&sta2x11_instance_list)) {
- int size = STA2X11_SWIOTLB_SIZE;
- /* First instance: register your own swiotlb area */
- dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
- if (swiotlb_init_late(size, GFP_DMA, NULL))
- dev_emerg(&pdev->dev, "init swiotlb failed\n");
- }
- list_add(&instance->list, &sta2x11_instance_list);
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, 0xcc17, sta2x11_new_instance);
-
-/*
- * Utility functions used in this file from below
- */
-static struct sta2x11_instance *sta2x11_pdev_to_instance(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
- int ep;
-
- list_for_each_entry(instance, &sta2x11_instance_list, list) {
- ep = pdev->bus->number - instance->bus0;
- if (ep >= 0 && ep < STA2X11_NR_EP)
- return instance;
- }
- return NULL;
-}
-
-static int sta2x11_pdev_to_ep(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
-
- instance = sta2x11_pdev_to_instance(pdev);
- if (!instance)
- return -1;
-
- return pdev->bus->number - instance->bus0;
-}
-
-/* This is exported, as some devices need to access the MFD registers */
-struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev)
-{
- return sta2x11_pdev_to_instance(pdev);
-}
-EXPORT_SYMBOL(sta2x11_get_instance);
-
-/* At setup time, we use our own ops if the device is a ConneXt one */
-static void sta2x11_setup_pdev(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
-
- if (!instance) /* either a sta2x11 bridge or another ST device */
- return;
-
- /* We must enable all devices as master, for audio DMA to work */
- pci_set_master(pdev);
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_setup_pdev);
-
-/*
- * At boot we must set up the mappings for the pcie-to-amba bridge.
- * It involves device access, and the same happens at suspend/resume time
- */
-
-#define AHB_MAPB 0xCA4
-#define AHB_CRW(i) (AHB_MAPB + 0 + (i) * 0x10)
-#define AHB_CRW_SZMASK 0xfffffc00UL
-#define AHB_CRW_ENABLE (1 << 0)
-#define AHB_CRW_WTYPE_MEM (2 << 1)
-#define AHB_CRW_ROE (1UL << 3) /* Relax Order Ena */
-#define AHB_CRW_NSE (1UL << 4) /* No Snoop Enable */
-#define AHB_BASE(i) (AHB_MAPB + 4 + (i) * 0x10)
-#define AHB_PEXLBASE(i) (AHB_MAPB + 8 + (i) * 0x10)
-#define AHB_PEXHBASE(i) (AHB_MAPB + 12 + (i) * 0x10)
-
-/* At probe time, enable mapping for each endpoint, using the pdev */
-static void sta2x11_map_ep(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
- struct device *dev = &pdev->dev;
- u32 amba_base, max_amba_addr;
- int i, ret;
-
- if (!instance)
- return;
-
- pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
- max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
-
- ret = dma_direct_set_offset(dev, 0, amba_base, STA2X11_AMBA_SIZE);
- if (ret)
- dev_err(dev, "sta2x11: could not set DMA offset\n");
-
- dev->bus_dma_limit = max_amba_addr;
- dma_set_mask_and_coherent(&pdev->dev, max_amba_addr);
-
- /* Configure AHB mapping */
- pci_write_config_dword(pdev, AHB_PEXLBASE(0), 0);
- pci_write_config_dword(pdev, AHB_PEXHBASE(0), 0);
- pci_write_config_dword(pdev, AHB_CRW(0), STA2X11_AMBA_SIZE |
- AHB_CRW_WTYPE_MEM | AHB_CRW_ENABLE);
-
- /* Disable all the other windows */
- for (i = 1; i < STA2X11_NR_FUNCS; i++)
- pci_write_config_dword(pdev, AHB_CRW(i), 0);
-
- dev_info(&pdev->dev,
- "sta2x11: Map EP %i: AMBA address %#8x-%#8x\n",
- sta2x11_pdev_to_ep(pdev), amba_base, max_amba_addr);
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, sta2x11_map_ep);
-
-#ifdef CONFIG_PM /* Some register values must be saved and restored */
-
-static struct sta2x11_mapping *sta2x11_pdev_to_mapping(struct pci_dev *pdev)
-{
- struct sta2x11_instance *instance;
- int ep;
-
- instance = sta2x11_pdev_to_instance(pdev);
- if (!instance)
- return NULL;
- ep = sta2x11_pdev_to_ep(pdev);
- return instance->map + ep;
-}
-
-static void suspend_mapping(struct pci_dev *pdev)
-{
- struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
- int i;
-
- if (!map)
- return;
-
- if (map->is_suspended)
- return;
- map->is_suspended = 1;
-
- /* Save all window configs */
- for (i = 0; i < STA2X11_NR_FUNCS; i++) {
- struct sta2x11_ahb_regs *regs = map->regs + i;
-
- pci_read_config_dword(pdev, AHB_BASE(i), &regs->base);
- pci_read_config_dword(pdev, AHB_PEXLBASE(i), &regs->pexlbase);
- pci_read_config_dword(pdev, AHB_PEXHBASE(i), &regs->pexhbase);
- pci_read_config_dword(pdev, AHB_CRW(i), &regs->crw);
- }
-}
-DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, suspend_mapping);
-
-static void resume_mapping(struct pci_dev *pdev)
-{
- struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev);
- int i;
-
- if (!map)
- return;
-
-
- if (!map->is_suspended)
- goto out;
- map->is_suspended = 0;
-
- /* Restore all window configs */
- for (i = 0; i < STA2X11_NR_FUNCS; i++) {
- struct sta2x11_ahb_regs *regs = map->regs + i;
-
- pci_write_config_dword(pdev, AHB_BASE(i), regs->base);
- pci_write_config_dword(pdev, AHB_PEXLBASE(i), regs->pexlbase);
- pci_write_config_dword(pdev, AHB_PEXHBASE(i), regs->pexhbase);
- pci_write_config_dword(pdev, AHB_CRW(i), regs->crw);
- }
-out:
- pci_set_master(pdev); /* Like at boot, enable master on all devices */
-}
-DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_STMICRO, PCI_ANY_ID, resume_mapping);
-
-#endif /* CONFIG_PM */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 0f2fe524f60d..b8755cde2419 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -436,7 +436,8 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = {
};
static struct msi_domain_info xen_pci_msi_domain_info = {
- .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
+ .flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS |
+ MSI_FLAG_DEV_SYSFS | MSI_FLAG_NO_MASK,
.ops = &xen_pci_msi_domain_ops,
};
@@ -484,11 +485,6 @@ static __init void xen_setup_pci_msi(void)
* in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
- /*
- * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
- * controlled by the hypervisor.
- */
- pci_msi_ignore_mask = 1;
}
#else /* CONFIG_PCI_MSI */
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index ccb23c73cbe8..63066e7c8517 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-#include <linux/pm_wakeup.h>
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index cf5dca2dbb91..e108ce7dad6a 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -215,13 +215,12 @@ static u32 __init olpc_dt_get_board_revision(void)
static int __init olpc_dt_compatible_match(phandle node, const char *compat)
{
char buf[64], *p;
- int plen, len;
+ int plen;
plen = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
if (plen <= 0)
return 0;
- len = strlen(compat);
for (p = buf; p < buf + plen; p += strlen(p) + 1) {
if (strcmp(p, compat) == 0)
return 1;
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 4733a5f467b8..cfa18ec7d55f 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -173,10 +173,14 @@ SYM_CODE_START(pvh_start_xen)
1:
UNWIND_HINT_END_OF_STACK
- /* Set base address in stack canary descriptor. */
- mov $MSR_GS_BASE,%ecx
- leal canary(%rip), %eax
- xor %edx, %edx
+ /*
+ * Set up GSBASE.
+ * Note that on SMP the boot CPU uses the init data section until
+ * the per-CPU areas are set up.
+ */
+ movl $MSR_GS_BASE,%ecx
+ xorl %eax, %eax
+ xorl %edx, %edx
wrmsr
/* Call xen_prepare_pvh() via the kernel virtual mapping */
@@ -238,8 +242,6 @@ SYM_DATA_START_LOCAL(gdt_start)
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
.balign 16
-SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
-
SYM_DATA_START_LOCAL(early_stack)
.fill BOOT_STACK_SIZE, 1, 0
SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 0a0539e1cc81..8c534c36adfa 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -26,6 +26,7 @@
/* code below belongs to the image kernel */
.align PAGE_SIZE
SYM_FUNC_START(restore_registers)
+ ANNOTATE_NOENDBR
/* go back to the original page tables */
movq %r9, %cr3
@@ -119,6 +120,7 @@ SYM_FUNC_END(restore_image)
/* code below has been relocated to a safe page */
SYM_FUNC_START(core_restore_code)
+ ANNOTATE_NOENDBR
/* switch to temporary page tables */
movq %rax, %cr3
/* flush TLB */
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
index c76041a35397..867e55f1d6af 100644
--- a/arch/x86/realmode/rm/realmode.h
+++ b/arch/x86/realmode/rm/realmode.h
@@ -2,7 +2,7 @@
#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
#define ARCH_X86_REALMODE_RM_REALMODE_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
/*
* 16-bit ljmpw to the real_mode_seg
@@ -12,7 +12,7 @@
*/
#define LJMPW_RM(to) .byte 0xea ; .word (to), real_mode_seg
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Signature at the end of the realmode region
diff --git a/arch/x86/realmode/rm/wakeup.h b/arch/x86/realmode/rm/wakeup.h
index 0e4fd08ae447..3b6d8fa82d3e 100644
--- a/arch/x86/realmode/rm/wakeup.h
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -7,7 +7,7 @@
#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
/* This must match data at wakeup.S */
diff --git a/arch/x86/tools/cpufeaturemasks.awk b/arch/x86/tools/cpufeaturemasks.awk
new file mode 100755
index 000000000000..173d5bf2d999
--- /dev/null
+++ b/arch/x86/tools/cpufeaturemasks.awk
@@ -0,0 +1,88 @@
+#!/usr/bin/awk
+#
+# Convert cpufeatures.h to a list of compile-time masks
+# Note: this blithely assumes that each word has at least one
+# feature defined in it; if not, something else is wrong!
+#
+
+BEGIN {
+ printf "#ifndef _ASM_X86_CPUFEATUREMASKS_H\n";
+ printf "#define _ASM_X86_CPUFEATUREMASKS_H\n\n";
+
+ file = 0
+}
+
+FNR == 1 {
+ ++file;
+
+ # arch/x86/include/asm/cpufeatures.h
+ if (file == 1)
+ FS = "[ \t()*+]+";
+
+ # .config
+ if (file == 2)
+ FS = "=";
+}
+
+# Create a dictionary of sorts, containing all defined feature bits
+file == 1 && $1 ~ /^#define$/ && $2 ~ /^X86_FEATURE_/ {
+ nfeat = $3 * $4 + $5;
+ feat = $2;
+ sub(/^X86_FEATURE_/, "", feat);
+ feats[nfeat] = feat;
+}
+file == 1 && $1 ~ /^#define$/ && $2 == "NCAPINTS" {
+ ncapints = int($3);
+}
+
+# Create a dictionary featstat[REQUIRED|DISABLED, FEATURE_NAME] = on | off
+file == 2 && $1 ~ /^CONFIG_X86_(REQUIRED|DISABLED)_FEATURE_/ {
+ on = ($2 == "y");
+ if (split($1, fs, "CONFIG_X86_|_FEATURE_") == 3)
+ featstat[fs[2], fs[3]] = on;
+}
+
+END {
+ sets[1] = "REQUIRED";
+ sets[2] = "DISABLED";
+
+ for (ns in sets) {
+ s = sets[ns];
+
+ printf "/*\n";
+ printf " * %s features:\n", s;
+ printf " *\n";
+ fstr = "";
+ for (i = 0; i < ncapints; i++) {
+ mask = 0;
+ for (j = 0; j < 32; j++) {
+ feat = feats[i*32 + j];
+ if (featstat[s, feat]) {
+ nfstr = fstr " " feat;
+ if (length(nfstr) > 72) {
+ printf " * %s\n", fstr;
+ nfstr = " " feat;
+ }
+ fstr = nfstr;
+ mask += (2 ^ j);
+ }
+ }
+ masks[i] = mask;
+ }
+ printf " * %s\n */\n", fstr;
+
+ for (i = 0; i < ncapints; i++)
+ printf "#define %s_MASK%d\t0x%08xU\n", s, i, masks[i];
+
+ printf "\n#define %s_MASK_BIT_SET(x)\t\t\t\\\n", s;
+ printf "\t((\t\t\t\t\t";
+ for (i = 0; i < ncapints; i++) {
+ if (masks[i])
+ printf "\t\\\n\t\t((x) >> 5) == %2d ? %s_MASK%d :", i, s, i;
+ }
+ printf " 0\t\\\n";
+ printf "\t) & (1U << ((x) & 31)))\n\n";
+ }
+
+ printf "#endif /* _ASM_X86_CPUFEATUREMASKS_H */\n";
+}
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 472540aeabc2..6c2986d2ad11 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -10,6 +10,7 @@
#include <assert.h>
#include <unistd.h>
#include <stdarg.h>
+#include <linux/kallsyms.h>
#define unlikely(cond) (cond)
@@ -106,7 +107,7 @@ static void parse_args(int argc, char **argv)
}
}
-#define BUFSIZE 256
+#define BUFSIZE (256 + KSYM_NAME_LEN)
int main(int argc, char **argv)
{
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index e937be979ec8..5778bc498415 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -29,9 +29,13 @@ static struct relocs relocs16;
static struct relocs relocs32;
#if ELF_BITS == 64
-static struct relocs relocs32neg;
static struct relocs relocs64;
# define FMT PRIu64
+
+#ifndef R_X86_64_REX_GOTPCRELX
+# define R_X86_64_REX_GOTPCRELX 42
+#endif
+
#else
# define FMT PRIu32
#endif
@@ -86,8 +90,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"__initramfs_start|"
"(jiffies|jiffies_64)|"
#if ELF_BITS == 64
- "__per_cpu_load|"
- "init_per_cpu__.*|"
"__end_rodata_hpage_align|"
#endif
"_end)$"
@@ -227,6 +229,7 @@ static const char *rel_type(unsigned type)
REL_TYPE(R_X86_64_PC16),
REL_TYPE(R_X86_64_8),
REL_TYPE(R_X86_64_PC8),
+ REL_TYPE(R_X86_64_REX_GOTPCRELX),
#else
REL_TYPE(R_386_NONE),
REL_TYPE(R_386_32),
@@ -284,34 +287,6 @@ static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
return name;
}
-static Elf_Sym *sym_lookup(const char *symname)
-{
- int i;
-
- for (i = 0; i < shnum; i++) {
- struct section *sec = &secs[i];
- long nsyms;
- char *strtab;
- Elf_Sym *symtab;
- Elf_Sym *sym;
-
- if (sec->shdr.sh_type != SHT_SYMTAB)
- continue;
-
- nsyms = sec->shdr.sh_size/sizeof(Elf_Sym);
- symtab = sec->symtab;
- strtab = sec->link->strtab;
-
- for (sym = symtab; --nsyms >= 0; sym++) {
- if (!sym->st_name)
- continue;
- if (strcmp(symname, strtab + sym->st_name) == 0)
- return sym;
- }
- }
- return 0;
-}
-
#if BYTE_ORDER == LITTLE_ENDIAN
# define le16_to_cpu(val) (val)
# define le32_to_cpu(val) (val)
@@ -760,84 +735,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
}
}
-/*
- * The .data..percpu section is a special case for x86_64 SMP kernels.
- * It is used to initialize the actual per_cpu areas and to provide
- * definitions for the per_cpu variables that correspond to their offsets
- * within the percpu area. Since the values of all of the symbols need
- * to be offsets from the start of the per_cpu area the virtual address
- * (sh_addr) of .data..percpu is 0 in SMP kernels.
- *
- * This means that:
- *
- * Relocations that reference symbols in the per_cpu area do not
- * need further relocation (since the value is an offset relative
- * to the start of the per_cpu area that does not change).
- *
- * Relocations that apply to the per_cpu area need to have their
- * offset adjusted by by the value of __per_cpu_load to make them
- * point to the correct place in the loaded image (because the
- * virtual address of .data..percpu is 0).
- *
- * For non SMP kernels .data..percpu is linked as part of the normal
- * kernel data and does not require special treatment.
- *
- */
-static int per_cpu_shndx = -1;
-static Elf_Addr per_cpu_load_addr;
-
-static void percpu_init(void)
-{
- int i;
-
- for (i = 0; i < shnum; i++) {
- ElfW(Sym) *sym;
-
- if (strcmp(sec_name(i), ".data..percpu"))
- continue;
-
- if (secs[i].shdr.sh_addr != 0) /* non SMP kernel */
- return;
-
- sym = sym_lookup("__per_cpu_load");
- if (!sym)
- die("can't find __per_cpu_load\n");
-
- per_cpu_shndx = i;
- per_cpu_load_addr = sym->st_value;
-
- return;
- }
-}
-
#if ELF_BITS == 64
-/*
- * Check to see if a symbol lies in the .data..percpu section.
- *
- * The linker incorrectly associates some symbols with the
- * .data..percpu section so we also need to check the symbol
- * name to make sure that we classify the symbol correctly.
- *
- * The GNU linker incorrectly associates:
- * __init_begin
- * __per_cpu_load
- *
- * The "gold" linker incorrectly associates:
- * init_per_cpu__fixed_percpu_data
- * init_per_cpu__gdt_page
- */
-static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
-{
- int shndx = sym_index(sym);
-
- return (shndx == per_cpu_shndx) &&
- strcmp(symname, "__init_begin") &&
- strcmp(symname, "__per_cpu_load") &&
- strncmp(symname, "init_per_cpu_", 13);
-}
-
-
static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
const char *symname)
{
@@ -848,12 +747,6 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
if (sym->st_shndx == SHN_UNDEF)
return 0;
- /*
- * Adjust the offset if this reloc applies to the percpu section.
- */
- if (sec->shdr.sh_info == per_cpu_shndx)
- offset += per_cpu_load_addr;
-
switch (r_type) {
case R_X86_64_NONE:
/* NONE can be ignored. */
@@ -861,33 +754,23 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
case R_X86_64_PC32:
case R_X86_64_PLT32:
+ case R_X86_64_REX_GOTPCRELX:
/*
- * PC relative relocations don't need to be adjusted unless
- * referencing a percpu symbol.
+ * PC relative relocations don't need to be adjusted.
*
* NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
*/
- if (is_percpu_sym(sym, symname))
- add_reloc(&relocs32neg, offset);
break;
case R_X86_64_PC64:
/*
* Only used by jump labels
*/
- if (is_percpu_sym(sym, symname))
- die("Invalid R_X86_64_PC64 relocation against per-CPU symbol %s\n", symname);
break;
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:
- /*
- * References to the percpu area don't need to be adjusted.
- */
- if (is_percpu_sym(sym, symname))
- break;
-
if (shn_abs) {
/*
* Whitelisted absolute symbols do not require
@@ -1055,7 +938,8 @@ static int cmp_relocs(const void *va, const void *vb)
static void sort_relocs(struct relocs *r)
{
- qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
+ if (r->count)
+ qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
}
static int write32(uint32_t v, FILE *f)
@@ -1099,7 +983,6 @@ static void emit_relocs(int as_text, int use_real_mode)
/* Order the relocations for more efficient processing */
sort_relocs(&relocs32);
#if ELF_BITS == 64
- sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
#else
sort_relocs(&relocs16);
@@ -1131,13 +1014,6 @@ static void emit_relocs(int as_text, int use_real_mode)
/* Now print each relocation */
for (i = 0; i < relocs64.count; i++)
write_reloc(relocs64.offset[i], stdout);
-
- /* Print a stop */
- write_reloc(0, stdout);
-
- /* Now print each inverse 32-bit relocation */
- for (i = 0; i < relocs32neg.count; i++)
- write_reloc(relocs32neg.offset[i], stdout);
#endif
/* Print a stop */
@@ -1190,9 +1066,6 @@ void process(FILE *fp, int use_real_mode, int as_text,
read_symtabs(fp);
read_relocs(fp);
- if (ELF_BITS == 64)
- percpu_init();
-
if (show_absolute_syms) {
print_absolute_symbols();
return;
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 42e74a5a7d78..fc473ca12c44 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -198,7 +198,6 @@ static void __init __snp_fixup_e820_tables(u64 pa)
pr_info("Reserving start/end of RMP table on a 2MB boundary [0x%016llx]\n", pa);
e820__range_update(pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
e820__range_update_table(e820_table_kexec, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
- e820__range_update_table(e820_table_firmware, pa, PMD_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
if (!memblock_is_region_reserved(pa, PMD_SIZE))
memblock_reserve(pa, PMD_SIZE);
}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 77e788e928cd..98d8a50d2aed 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -9,7 +9,7 @@ config XEN
select PARAVIRT_CLOCK
select X86_HV_CALLBACK_VECTOR
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
+ depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MATOM)
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5e57835e999d..dcc2041f8e61 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -73,6 +73,7 @@
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
+#include <asm/irq_stack.h>
#ifdef CONFIG_X86_IOPL_IOPERM
#include <asm/io_bitmap.h>
#endif
@@ -94,6 +95,44 @@ void *xen_initial_gdt;
static int xen_cpu_up_prepare_pv(unsigned int cpu);
static int xen_cpu_dead_pv(unsigned int cpu);
+#ifndef CONFIG_PREEMPTION
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+/*
+ * In case of scheduling the flag must be cleared and restored after
+ * returning from schedule as the task might move to a different CPU.
+ */
+static __always_inline bool get_and_clear_inhcall(void)
+{
+ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+ return inhcall;
+}
+
+static __always_inline void restore_inhcall(bool inhcall)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+}
+
+#else
+
+static __always_inline bool get_and_clear_inhcall(void) { return false; }
+static __always_inline void restore_inhcall(bool inhcall) { }
+
+#endif
+
struct tls_descs {
struct desc_struct desc[3];
};
@@ -687,6 +726,36 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check)
}
#endif
+static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ inc_irq_stat(irq_hv_callback_count);
+
+ xen_evtchn_do_upcall();
+
+ set_irq_regs(old_regs);
+}
+
+__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+ bool inhcall;
+
+ instrumentation_begin();
+ run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
+
+ inhcall = get_and_clear_inhcall();
+ if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
+ irqentry_exit_cond_resched();
+ instrumentation_end();
+ restore_inhcall(inhcall);
+ } else {
+ instrumentation_end();
+ irqentry_exit(regs, state);
+ }
+}
+
struct trap_array_entry {
void (*orig)(void);
void (*xen)(void);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index d078de2c952b..38971c6dcd4b 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2189,7 +2189,6 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_multi = xen_flush_tlb_multi,
- .tlb_remove_table = tlb_remove_table,
.pgd_alloc = xen_pgd_alloc,
.pgd_free = xen_pgd_free,
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 6863d3da7dec..688ff59318ae 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -70,7 +70,7 @@ static void cpu_bringup(void)
xen_enable_syscall();
}
cpu = smp_processor_id();
- smp_store_cpu_info(cpu);
+ identify_secondary_cpu(cpu);
set_cpu_sibling_map(cpu);
speculative_store_bypass_ht_init();
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index b518f36d1ca2..109af12f7647 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -51,6 +51,7 @@ SYM_FUNC_END(xen_hypercall_pv)
* non-zero.
*/
SYM_FUNC_START(xen_irq_disable_direct)
+ ENDBR
movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
RET
SYM_FUNC_END(xen_irq_disable_direct)
@@ -90,6 +91,7 @@ SYM_FUNC_END(check_events)
* then enter the hypervisor to get them handled.
*/
SYM_FUNC_START(xen_irq_enable_direct)
+ ENDBR
FRAME_BEGIN
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
@@ -120,6 +122,7 @@ SYM_FUNC_END(xen_irq_enable_direct)
* x86 use opposite senses (mask vs enable).
*/
SYM_FUNC_START(xen_save_fl_direct)
+ ENDBR
testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
setz %ah
addb %ah, %ah
@@ -127,6 +130,7 @@ SYM_FUNC_START(xen_save_fl_direct)
SYM_FUNC_END(xen_save_fl_direct)
SYM_FUNC_START(xen_read_cr2)
+ ENDBR
FRAME_BEGIN
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
@@ -135,6 +139,7 @@ SYM_FUNC_START(xen_read_cr2)
SYM_FUNC_END(xen_read_cr2);
SYM_FUNC_START(xen_read_cr2_direct)
+ ENDBR
FRAME_BEGIN
_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
FRAME_END
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 894edf8d6d62..5dad6c51cdc3 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -31,16 +31,14 @@ SYM_CODE_START(startup_xen)
leaq __top_init_kernel_stack(%rip), %rsp
- /* Set up %gs.
- *
- * The base of %gs always points to fixed_percpu_data. If the
- * stack protector canary is enabled, it is located at %gs:40.
+ /*
+ * Set up GSBASE.
* Note that, on SMP, the boot cpu uses init data section until
* the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
- cdq
+ xorl %eax, %eax
+ xorl %edx, %edx
wrmsr
mov %rsi, %rdi
@@ -133,11 +131,13 @@ SYM_FUNC_START(xen_hypercall_hvm)
SYM_FUNC_END(xen_hypercall_hvm)
SYM_FUNC_START(xen_hypercall_amd)
+ ANNOTATE_NOENDBR
vmmcall
RET
SYM_FUNC_END(xen_hypercall_amd)
SYM_FUNC_START(xen_hypercall_intel)
+ ANNOTATE_NOENDBR
vmcall
RET
SYM_FUNC_END(xen_hypercall_intel)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 37effc1b134e..f657a77314f8 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -437,3 +437,4 @@
464 common getxattrat sys_getxattrat
465 common listxattrat sys_listxattrat
466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 38092d21acf8..44c07c4e0833 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -629,15 +629,11 @@ DEFINE_SPINLOCK(die_lock);
void __noreturn die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
- const char *pr = "";
-
- if (IS_ENABLED(CONFIG_PREEMPTION))
- pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
console_verbose();
spin_lock_irq(&die_lock);
- pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
+ pr_info("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
show_regs(regs);
if (!user_mode(regs))
show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
diff --git a/block/Kconfig b/block/Kconfig
index 5b623b876d3b..df8973bc0539 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -63,7 +63,7 @@ config BLK_DEV_BSGLIB
config BLK_DEV_INTEGRITY
bool "Block layer data integrity support"
select CRC_T10DIF
- select CRC64_ROCKSOFT
+ select CRC64
help
Some storage devices allow extra information to be
stored/retrieved to help protect the data. The block layer
diff --git a/block/Makefile b/block/Makefile
index 33748123710b..3a941dc0d27f 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -26,7 +26,8 @@ obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
-obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
+obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o \
+ bio-integrity-auto.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
diff --git a/block/badblocks.c b/block/badblocks.c
index db4ec8b9b2a8..ece64e76fe8f 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -528,51 +528,6 @@ out:
}
/*
- * Return 'true' if the range indicated by 'bad' can be backward merged
- * with the bad range (from the bad table) index by 'behind'.
- */
-static bool can_merge_behind(struct badblocks *bb,
- struct badblocks_context *bad, int behind)
-{
- sector_t sectors = bad->len;
- sector_t s = bad->start;
- u64 *p = bb->page;
-
- if ((s < BB_OFFSET(p[behind])) &&
- ((s + sectors) >= BB_OFFSET(p[behind])) &&
- ((BB_END(p[behind]) - s) <= BB_MAX_LEN) &&
- BB_ACK(p[behind]) == bad->ack)
- return true;
- return false;
-}
-
-/*
- * Do backward merge for range indicated by 'bad' and the bad range
- * (from the bad table) indexed by 'behind'. The return value is merged
- * sectors from bad->len.
- */
-static int behind_merge(struct badblocks *bb, struct badblocks_context *bad,
- int behind)
-{
- sector_t sectors = bad->len;
- sector_t s = bad->start;
- u64 *p = bb->page;
- int merged = 0;
-
- WARN_ON(s >= BB_OFFSET(p[behind]));
- WARN_ON((s + sectors) < BB_OFFSET(p[behind]));
-
- if (s < BB_OFFSET(p[behind])) {
- merged = BB_OFFSET(p[behind]) - s;
- p[behind] = BB_MAKE(s, BB_LEN(p[behind]) + merged, bad->ack);
-
- WARN_ON((BB_LEN(p[behind]) + merged) >= BB_MAX_LEN);
- }
-
- return merged;
-}
-
-/*
* Return 'true' if the range indicated by 'bad' can be forward
* merged with the bad range (from the bad table) indexed by 'prev'.
*/
@@ -745,7 +700,7 @@ static bool can_front_overwrite(struct badblocks *bb, int prev,
*extra = 2;
}
- if ((bb->count + (*extra)) >= MAX_BADBLOCKS)
+ if ((bb->count + (*extra)) > MAX_BADBLOCKS)
return false;
return true;
@@ -855,40 +810,60 @@ static void badblocks_update_acked(struct badblocks *bb)
bb->unacked_exist = 0;
}
+/*
+ * Return 'true' if the range indicated by 'bad' is exactly backward
+ * overlapped with the bad range (from bad table) indexed by 'behind'.
+ */
+static bool try_adjacent_combine(struct badblocks *bb, int prev)
+{
+ u64 *p = bb->page;
+
+ if (prev >= 0 && (prev + 1) < bb->count &&
+ BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) &&
+ (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN &&
+ BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) {
+ p[prev] = BB_MAKE(BB_OFFSET(p[prev]),
+ BB_LEN(p[prev]) + BB_LEN(p[prev + 1]),
+ BB_ACK(p[prev]));
+
+ if ((prev + 2) < bb->count)
+ memmove(p + prev + 1, p + prev + 2,
+ (bb->count - (prev + 2)) * 8);
+ bb->count--;
+ return true;
+ }
+ return false;
+}
+
/* Do exact work to set bad block range into the bad block table */
-static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors,
- int acknowledged)
+static bool _badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
+ int acknowledged)
{
- int retried = 0, space_desired = 0;
- int orig_len, len = 0, added = 0;
+ int len = 0, added = 0;
struct badblocks_context bad;
int prev = -1, hint = -1;
- sector_t orig_start;
unsigned long flags;
- int rv = 0;
u64 *p;
if (bb->shift < 0)
/* badblocks are disabled */
- return 1;
+ return false;
if (sectors == 0)
/* Invalid sectors number */
- return 1;
+ return false;
if (bb->shift) {
/* round the start down, and the end up */
sector_t next = s + sectors;
- rounddown(s, bb->shift);
- roundup(next, bb->shift);
+ rounddown(s, 1 << bb->shift);
+ roundup(next, 1 << bb->shift);
sectors = next - s;
}
write_seqlock_irqsave(&bb->lock, flags);
- orig_start = s;
- orig_len = sectors;
bad.ack = acknowledged;
p = bb->page;
@@ -897,6 +872,9 @@ re_insert:
bad.len = sectors;
len = 0;
+ if (badblocks_full(bb))
+ goto out;
+
if (badblocks_empty(bb)) {
len = insert_at(bb, 0, &bad);
bb->count++;
@@ -908,32 +886,14 @@ re_insert:
/* start before all badblocks */
if (prev < 0) {
- if (!badblocks_full(bb)) {
- /* insert on the first */
- if (bad.len > (BB_OFFSET(p[0]) - bad.start))
- bad.len = BB_OFFSET(p[0]) - bad.start;
- len = insert_at(bb, 0, &bad);
- bb->count++;
- added++;
- hint = 0;
- goto update_sectors;
- }
-
- /* No sapce, try to merge */
- if (overlap_behind(bb, &bad, 0)) {
- if (can_merge_behind(bb, &bad, 0)) {
- len = behind_merge(bb, &bad, 0);
- added++;
- } else {
- len = BB_OFFSET(p[0]) - s;
- space_desired = 1;
- }
- hint = 0;
- goto update_sectors;
- }
-
- /* no table space and give up */
- goto out;
+ /* insert on the first */
+ if (bad.len > (BB_OFFSET(p[0]) - bad.start))
+ bad.len = BB_OFFSET(p[0]) - bad.start;
+ len = insert_at(bb, 0, &bad);
+ bb->count++;
+ added++;
+ hint = ++prev;
+ goto update_sectors;
}
/* in case p[prev-1] can be merged with p[prev] */
@@ -945,33 +905,6 @@ re_insert:
goto update_sectors;
}
- if (overlap_front(bb, prev, &bad)) {
- if (can_merge_front(bb, prev, &bad)) {
- len = front_merge(bb, prev, &bad);
- added++;
- } else {
- int extra = 0;
-
- if (!can_front_overwrite(bb, prev, &bad, &extra)) {
- len = min_t(sector_t,
- BB_END(p[prev]) - s, sectors);
- hint = prev;
- goto update_sectors;
- }
-
- len = front_overwrite(bb, prev, &bad, extra);
- added++;
- bb->count += extra;
-
- if (can_combine_front(bb, prev, &bad)) {
- front_combine(bb, prev);
- bb->count--;
- }
- }
- hint = prev;
- goto update_sectors;
- }
-
if (can_merge_front(bb, prev, &bad)) {
len = front_merge(bb, prev, &bad);
added++;
@@ -979,21 +912,29 @@ re_insert:
goto update_sectors;
}
- /* if no space in table, still try to merge in the covered range */
- if (badblocks_full(bb)) {
- /* skip the cannot-merge range */
- if (((prev + 1) < bb->count) &&
- overlap_behind(bb, &bad, prev + 1) &&
- ((s + sectors) >= BB_END(p[prev + 1]))) {
- len = BB_END(p[prev + 1]) - s;
- hint = prev + 1;
+ if (overlap_front(bb, prev, &bad)) {
+ int extra = 0;
+
+ if (!can_front_overwrite(bb, prev, &bad, &extra)) {
+ if (extra > 0)
+ goto out;
+
+ len = min_t(sector_t,
+ BB_END(p[prev]) - s, sectors);
+ hint = prev;
goto update_sectors;
}
- /* no retry any more */
- len = sectors;
- space_desired = 1;
- hint = -1;
+ len = front_overwrite(bb, prev, &bad, extra);
+ added++;
+ bb->count += extra;
+
+ if (can_combine_front(bb, prev, &bad)) {
+ front_combine(bb, prev);
+ bb->count--;
+ }
+
+ hint = prev;
goto update_sectors;
}
@@ -1006,7 +947,7 @@ re_insert:
len = insert_at(bb, prev + 1, &bad);
bb->count++;
added++;
- hint = prev + 1;
+ hint = ++prev;
update_sectors:
s += len;
@@ -1015,35 +956,12 @@ update_sectors:
if (sectors > 0)
goto re_insert;
- WARN_ON(sectors < 0);
-
/*
* Check whether the following already set range can be
* merged. (prev < 0) condition is not handled here,
* because it's already complicated enough.
*/
- if (prev >= 0 &&
- (prev + 1) < bb->count &&
- BB_END(p[prev]) == BB_OFFSET(p[prev + 1]) &&
- (BB_LEN(p[prev]) + BB_LEN(p[prev + 1])) <= BB_MAX_LEN &&
- BB_ACK(p[prev]) == BB_ACK(p[prev + 1])) {
- p[prev] = BB_MAKE(BB_OFFSET(p[prev]),
- BB_LEN(p[prev]) + BB_LEN(p[prev + 1]),
- BB_ACK(p[prev]));
-
- if ((prev + 2) < bb->count)
- memmove(p + prev + 1, p + prev + 2,
- (bb->count - (prev + 2)) * 8);
- bb->count--;
- }
-
- if (space_desired && !badblocks_full(bb)) {
- s = orig_start;
- sectors = orig_len;
- space_desired = 0;
- if (retried++ < 3)
- goto re_insert;
- }
+ try_adjacent_combine(bb, prev);
out:
if (added) {
@@ -1057,10 +975,7 @@ out:
write_sequnlock_irqrestore(&bb->lock, flags);
- if (!added)
- rv = 1;
-
- return rv;
+ return sectors == 0;
}
/*
@@ -1131,21 +1046,20 @@ static int front_splitting_clear(struct badblocks *bb, int prev,
}
/* Do the exact work to clear bad block range from the bad block table */
-static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
+static bool _badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors)
{
struct badblocks_context bad;
int prev = -1, hint = -1;
int len = 0, cleared = 0;
- int rv = 0;
u64 *p;
if (bb->shift < 0)
/* badblocks are disabled */
- return 1;
+ return false;
if (sectors == 0)
/* Invalid sectors number */
- return 1;
+ return false;
if (bb->shift) {
sector_t target;
@@ -1157,8 +1071,8 @@ static int _badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* isn't than to think a block is not bad when it is.
*/
target = s + sectors;
- roundup(s, bb->shift);
- rounddown(target, bb->shift);
+ roundup(s, 1 << bb->shift);
+ rounddown(target, 1 << bb->shift);
sectors = target - s;
}
@@ -1214,7 +1128,7 @@ re_clear:
if ((BB_OFFSET(p[prev]) < bad.start) &&
(BB_END(p[prev]) > (bad.start + bad.len))) {
/* Splitting */
- if ((bb->count + 1) < MAX_BADBLOCKS) {
+ if ((bb->count + 1) <= MAX_BADBLOCKS) {
len = front_splitting_clear(bb, prev, &bad);
bb->count += 1;
cleared++;
@@ -1255,8 +1169,6 @@ update_sectors:
if (sectors > 0)
goto re_clear;
- WARN_ON(sectors < 0);
-
if (cleared) {
badblocks_update_acked(bb);
set_changed(bb);
@@ -1265,40 +1177,21 @@ update_sectors:
write_sequnlock_irq(&bb->lock);
if (!cleared)
- rv = 1;
+ return false;
- return rv;
+ return true;
}
/* Do the exact work to check bad blocks range from the bad block table */
-static int _badblocks_check(struct badblocks *bb, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors)
+static int _badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors)
{
- int unacked_badblocks, acked_badblocks;
int prev = -1, hint = -1, set = 0;
struct badblocks_context bad;
- unsigned int seq;
+ int unacked_badblocks = 0;
+ int acked_badblocks = 0;
+ u64 *p = bb->page;
int len, rv;
- u64 *p;
-
- WARN_ON(bb->shift < 0 || sectors == 0);
-
- if (bb->shift > 0) {
- sector_t target;
-
- /* round the start down, and the end up */
- target = s + sectors;
- rounddown(s, bb->shift);
- roundup(target, bb->shift);
- sectors = target - s;
- }
-
-retry:
- seq = read_seqbegin(&bb->lock);
-
- p = bb->page;
- unacked_badblocks = 0;
- acked_badblocks = 0;
re_check:
bad.start = s;
@@ -1349,14 +1242,15 @@ re_check:
len = sectors;
update_sectors:
+ /* This situation should never happen */
+ WARN_ON(sectors < len);
+
s += len;
sectors -= len;
if (sectors > 0)
goto re_check;
- WARN_ON(sectors < 0);
-
if (unacked_badblocks > 0)
rv = -1;
else if (acked_badblocks > 0)
@@ -1364,9 +1258,6 @@ update_sectors:
else
rv = 0;
- if (read_seqretry(&bb->lock, seq))
- goto retry;
-
return rv;
}
@@ -1404,10 +1295,30 @@ update_sectors:
* -1: there are bad blocks which have not yet been acknowledged in metadata.
* plus the start/length of the first bad section we overlap.
*/
-int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors)
+int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors)
{
- return _badblocks_check(bb, s, sectors, first_bad, bad_sectors);
+ unsigned int seq;
+ int rv;
+
+ WARN_ON(bb->shift < 0 || sectors == 0);
+
+ if (bb->shift > 0) {
+ /* round the start down, and the end up */
+ sector_t target = s + sectors;
+
+ rounddown(s, 1 << bb->shift);
+ roundup(target, 1 << bb->shift);
+ sectors = target - s;
+ }
+
+retry:
+ seq = read_seqbegin(&bb->lock);
+ rv = _badblocks_check(bb, s, sectors, first_bad, bad_sectors);
+ if (read_seqretry(&bb->lock, seq))
+ goto retry;
+
+ return rv;
}
EXPORT_SYMBOL_GPL(badblocks_check);
@@ -1423,11 +1334,12 @@ EXPORT_SYMBOL_GPL(badblocks_check);
* decide how best to handle it.
*
* Return:
- * 0: success
- * 1: failed to set badblocks (out of space)
+ * true: success
+ * false: failed to set badblocks (out of space). Parital setting will be
+ * treated as failure.
*/
-int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
- int acknowledged)
+bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
+ int acknowledged)
{
return _badblocks_set(bb, s, sectors, acknowledged);
}
@@ -1444,10 +1356,10 @@ EXPORT_SYMBOL_GPL(badblocks_set);
* drop the remove request.
*
* Return:
- * 0: success
- * 1: failed to clear badblocks
+ * true: success
+ * false: failed to clear badblocks
*/
-int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
+bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors)
{
return _badblocks_clear(bb, s, sectors);
}
@@ -1479,6 +1391,11 @@ void ack_all_badblocks(struct badblocks *bb)
p[i] = BB_MAKE(start, len, 1);
}
}
+
+ for (i = 0; i < bb->count ; i++)
+ while (try_adjacent_combine(bb, i))
+ ;
+
bb->unacked_exist = 0;
}
write_sequnlock_irq(&bb->lock);
@@ -1564,10 +1481,10 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
return -EINVAL;
}
- if (badblocks_set(bb, sector, length, !unack))
+ if (!badblocks_set(bb, sector, length, !unack))
return -ENOSPC;
- else
- return len;
+
+ return len;
}
EXPORT_SYMBOL_GPL(badblocks_store);
diff --git a/block/bdev.c b/block/bdev.c
index 9d73a8fbf7f9..4844d1e27b6f 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -148,6 +148,8 @@ static void set_init_blocksize(struct block_device *bdev)
bsize <<= 1;
}
BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
+ mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping,
+ get_order(bsize));
}
int set_blocksize(struct file *file, int size)
@@ -169,6 +171,7 @@ int set_blocksize(struct file *file, int size)
if (inode->i_blkbits != blksize_bits(size)) {
sync_blockdev(bdev);
inode->i_blkbits = blksize_bits(size);
+ mapping_set_folio_min_order(inode->i_mapping, get_order(size));
kill_bdev(bdev);
}
return 0;
@@ -178,10 +181,11 @@ EXPORT_SYMBOL(set_blocksize);
int sb_set_blocksize(struct super_block *sb, int size)
{
+ if (!(sb->s_type->fs_flags & FS_LBS) && size > PAGE_SIZE)
+ return 0;
if (set_blocksize(sb->s_bdev_file, size))
return 0;
- /* If we get here, we know size is power of two
- * and it's value is between 512 and PAGE_SIZE */
+ /* If we get here, we know size is validated */
sb->s_blocksize = size;
sb->s_blocksize_bits = blksize_bits(size);
return sb->s_blocksize;
@@ -1274,9 +1278,6 @@ void bdev_statx(struct path *path, struct kstat *stat,
struct inode *backing_inode;
struct block_device *bdev;
- if (!(request_mask & (STATX_DIOALIGN | STATX_WRITE_ATOMIC)))
- return;
-
backing_inode = d_backing_inode(path->dentry);
/*
@@ -1303,6 +1304,8 @@ void bdev_statx(struct path *path, struct kstat *stat,
queue_atomic_write_unit_max_bytes(bd_queue));
}
+ stat->blksize = bdev_io_min(bdev);
+
blkdev_put_no_open(bdev);
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 167542201603..abd80dc13562 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7315,9 +7315,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
INIT_LIST_HEAD(&bfqd->dispatch);
- hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
+ hrtimer_setup(&bfqd->idle_slice_timer, bfq_idle_slice_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
bfqd->queue_weights_tree = RB_ROOT_CACHED;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
diff --git a/block/bio-integrity-auto.c b/block/bio-integrity-auto.c
new file mode 100644
index 000000000000..e524c609be50
--- /dev/null
+++ b/block/bio-integrity-auto.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2007, 2008, 2009 Oracle Corporation
+ * Written by: Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * Automatically generate and verify integrity data on PI capable devices if the
+ * bio submitter didn't provide PI itself. This ensures that kernel verifies
+ * data integrity even if the file system (or other user of the block device) is
+ * not aware of PI.
+ */
+#include <linux/blk-integrity.h>
+#include <linux/workqueue.h>
+#include "blk.h"
+
+struct bio_integrity_data {
+ struct bio *bio;
+ struct bvec_iter saved_bio_iter;
+ struct work_struct work;
+ struct bio_integrity_payload bip;
+ struct bio_vec bvec;
+};
+
+static struct kmem_cache *bid_slab;
+static mempool_t bid_pool;
+static struct workqueue_struct *kintegrityd_wq;
+
+static void bio_integrity_finish(struct bio_integrity_data *bid)
+{
+ bid->bio->bi_integrity = NULL;
+ bid->bio->bi_opf &= ~REQ_INTEGRITY;
+ kfree(bvec_virt(bid->bip.bip_vec));
+ mempool_free(bid, &bid_pool);
+}
+
+static void bio_integrity_verify_fn(struct work_struct *work)
+{
+ struct bio_integrity_data *bid =
+ container_of(work, struct bio_integrity_data, work);
+ struct bio *bio = bid->bio;
+
+ blk_integrity_verify_iter(bio, &bid->saved_bio_iter);
+ bio_integrity_finish(bid);
+ bio_endio(bio);
+}
+
+/**
+ * __bio_integrity_endio - Integrity I/O completion function
+ * @bio: Protected bio
+ *
+ * Normally I/O completion is done in interrupt context. However, verifying I/O
+ * integrity is a time-consuming task which must be run in process context.
+ *
+ * This function postpones completion accordingly.
+ */
+bool __bio_integrity_endio(struct bio *bio)
+{
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_data *bid =
+ container_of(bip, struct bio_integrity_data, bip);
+
+ if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
+ INIT_WORK(&bid->work, bio_integrity_verify_fn);
+ queue_work(kintegrityd_wq, &bid->work);
+ return false;
+ }
+
+ bio_integrity_finish(bid);
+ return true;
+}
+
+/**
+ * bio_integrity_prep - Prepare bio for integrity I/O
+ * @bio: bio to prepare
+ *
+ * Checks if the bio already has an integrity payload attached. If it does, the
+ * payload has been generated by another kernel subsystem, and we just pass it
+ * through.
+ * Otherwise allocates integrity payload and for writes the integrity metadata
+ * will be generated. For reads, the completion handler will verify the
+ * metadata.
+ */
+bool bio_integrity_prep(struct bio *bio)
+{
+ struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+ struct bio_integrity_data *bid;
+ gfp_t gfp = GFP_NOIO;
+ unsigned int len;
+ void *buf;
+
+ if (!bi)
+ return true;
+
+ if (!bio_sectors(bio))
+ return true;
+
+ /* Already protected? */
+ if (bio_integrity(bio))
+ return true;
+
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ if (bi->flags & BLK_INTEGRITY_NOVERIFY)
+ return true;
+ break;
+ case REQ_OP_WRITE:
+ if (bi->flags & BLK_INTEGRITY_NOGENERATE)
+ return true;
+
+ /*
+ * Zero the memory allocated to not leak uninitialized kernel
+ * memory to disk for non-integrity metadata where nothing else
+ * initializes the memory.
+ */
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
+ gfp |= __GFP_ZERO;
+ break;
+ default:
+ return true;
+ }
+
+ if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
+ return true;
+
+ /* Allocate kernel buffer for protection data */
+ len = bio_integrity_bytes(bi, bio_sectors(bio));
+ buf = kmalloc(len, gfp);
+ if (!buf)
+ goto err_end_io;
+ bid = mempool_alloc(&bid_pool, GFP_NOIO);
+ if (!bid)
+ goto err_free_buf;
+ bio_integrity_init(bio, &bid->bip, &bid->bvec, 1);
+
+ bid->bio = bio;
+
+ bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
+ bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
+
+ if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
+ bid->bip.bip_flags |= BIP_IP_CHECKSUM;
+ if (bi->csum_type)
+ bid->bip.bip_flags |= BIP_CHECK_GUARD;
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ bid->bip.bip_flags |= BIP_CHECK_REFTAG;
+
+ if (bio_integrity_add_page(bio, virt_to_page(buf), len,
+ offset_in_page(buf)) < len)
+ goto err_end_io;
+
+ /* Auto-generate integrity metadata if this is a write */
+ if (bio_data_dir(bio) == WRITE)
+ blk_integrity_generate(bio);
+ else
+ bid->saved_bio_iter = bio->bi_iter;
+ return true;
+
+err_free_buf:
+ kfree(buf);
+err_end_io:
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return false;
+}
+EXPORT_SYMBOL(bio_integrity_prep);
+
+void blk_flush_integrity(void)
+{
+ flush_workqueue(kintegrityd_wq);
+}
+
+static int __init blk_integrity_auto_init(void)
+{
+ bid_slab = kmem_cache_create("bio_integrity_data",
+ sizeof(struct bio_integrity_data), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+
+ if (mempool_init_slab_pool(&bid_pool, BIO_POOL_SIZE, bid_slab))
+ panic("bio: can't create integrity pool\n");
+
+ /*
+ * kintegrityd won't block much but may burn a lot of CPU cycles.
+ * Make it highpri CPU intensive wq with max concurrency of 1.
+ */
+ kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+ if (!kintegrityd_wq)
+ panic("Failed to create kintegrityd\n");
+ return 0;
+}
+subsys_initcall(blk_integrity_auto_init);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5d81ad9a3d20..608594a154a5 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -7,20 +7,12 @@
*/
#include <linux/blk-integrity.h>
-#include <linux/mempool.h>
-#include <linux/export.h>
-#include <linux/bio.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
#include "blk.h"
-static struct kmem_cache *bip_slab;
-static struct workqueue_struct *kintegrityd_wq;
-
-void blk_flush_integrity(void)
-{
- flush_workqueue(kintegrityd_wq);
-}
+struct bio_integrity_alloc {
+ struct bio_integrity_payload bip;
+ struct bio_vec bvecs[];
+};
/**
* bio_integrity_free - Free bio integrity payload
@@ -30,21 +22,23 @@ void blk_flush_integrity(void)
*/
void bio_integrity_free(struct bio *bio)
{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_set *bs = bio->bi_pool;
-
- if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
- if (bip->bip_vec)
- bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
- bip->bip_max_vcnt);
- mempool_free(bip, &bs->bio_integrity_pool);
- } else {
- kfree(bip);
- }
+ kfree(bio_integrity(bio));
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
+void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
+ struct bio_vec *bvecs, unsigned int nr_vecs)
+{
+ memset(bip, 0, sizeof(*bip));
+ bip->bip_max_vcnt = nr_vecs;
+ if (nr_vecs)
+ bip->bip_vec = bvecs;
+
+ bio->bi_integrity = bip;
+ bio->bi_opf |= REQ_INTEGRITY;
+}
+
/**
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
@@ -59,48 +53,16 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
gfp_t gfp_mask,
unsigned int nr_vecs)
{
- struct bio_integrity_payload *bip;
- struct bio_set *bs = bio->bi_pool;
- unsigned inline_vecs;
+ struct bio_integrity_alloc *bia;
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
return ERR_PTR(-EOPNOTSUPP);
- if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
- bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
- inline_vecs = nr_vecs;
- } else {
- bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
- inline_vecs = BIO_INLINE_VECS;
- }
-
- if (unlikely(!bip))
+ bia = kmalloc(struct_size(bia, bvecs, nr_vecs), gfp_mask);
+ if (unlikely(!bia))
return ERR_PTR(-ENOMEM);
-
- memset(bip, 0, sizeof(*bip));
-
- /* always report as many vecs as asked explicitly, not inline vecs */
- bip->bip_max_vcnt = nr_vecs;
- if (nr_vecs > inline_vecs) {
- bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
- &bip->bip_max_vcnt, gfp_mask);
- if (!bip->bip_vec)
- goto err;
- } else if (nr_vecs) {
- bip->bip_vec = bip->bip_inline_vecs;
- }
-
- bip->bip_bio = bio;
- bio->bi_integrity = bip;
- bio->bi_opf |= REQ_INTEGRITY;
-
- return bip;
-err:
- if (bs && mempool_initialized(&bs->bio_integrity_pool))
- mempool_free(bip, &bs->bio_integrity_pool);
- else
- kfree(bip);
- return ERR_PTR(-ENOMEM);
+ bio_integrity_init(bio, &bia->bip, bia->bvecs, nr_vecs);
+ return &bia->bip;
}
EXPORT_SYMBOL(bio_integrity_alloc);
@@ -414,149 +376,6 @@ int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
}
/**
- * bio_integrity_prep - Prepare bio for integrity I/O
- * @bio: bio to prepare
- *
- * Description: Checks if the bio already has an integrity payload attached.
- * If it does, the payload has been generated by another kernel subsystem,
- * and we just pass it through. Otherwise allocates integrity payload.
- * The bio must have data direction, target device and start sector set priot
- * to calling. In the WRITE case, integrity metadata will be generated using
- * the block device's integrity function. In the READ case, the buffer
- * will be prepared for DMA and a suitable end_io handler set up.
- */
-bool bio_integrity_prep(struct bio *bio)
-{
- struct bio_integrity_payload *bip;
- struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- unsigned int len;
- void *buf;
- gfp_t gfp = GFP_NOIO;
-
- if (!bi)
- return true;
-
- if (!bio_sectors(bio))
- return true;
-
- /* Already protected? */
- if (bio_integrity(bio))
- return true;
-
- switch (bio_op(bio)) {
- case REQ_OP_READ:
- if (bi->flags & BLK_INTEGRITY_NOVERIFY)
- return true;
- break;
- case REQ_OP_WRITE:
- if (bi->flags & BLK_INTEGRITY_NOGENERATE)
- return true;
-
- /*
- * Zero the memory allocated to not leak uninitialized kernel
- * memory to disk for non-integrity metadata where nothing else
- * initializes the memory.
- */
- if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
- gfp |= __GFP_ZERO;
- break;
- default:
- return true;
- }
-
- /* Allocate kernel buffer for protection data */
- len = bio_integrity_bytes(bi, bio_sectors(bio));
- buf = kmalloc(len, gfp);
- if (unlikely(buf == NULL)) {
- goto err_end_io;
- }
-
- bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
- if (IS_ERR(bip)) {
- kfree(buf);
- goto err_end_io;
- }
-
- bip->bip_flags |= BIP_BLOCK_INTEGRITY;
- bip_set_seed(bip, bio->bi_iter.bi_sector);
-
- if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
- bip->bip_flags |= BIP_IP_CHECKSUM;
-
- /* describe what tags to check in payload */
- if (bi->csum_type)
- bip->bip_flags |= BIP_CHECK_GUARD;
- if (bi->flags & BLK_INTEGRITY_REF_TAG)
- bip->bip_flags |= BIP_CHECK_REFTAG;
- if (bio_integrity_add_page(bio, virt_to_page(buf), len,
- offset_in_page(buf)) < len) {
- printk(KERN_ERR "could not attach integrity payload\n");
- goto err_end_io;
- }
-
- /* Auto-generate integrity metadata if this is a write */
- if (bio_data_dir(bio) == WRITE)
- blk_integrity_generate(bio);
- else
- bip->bio_iter = bio->bi_iter;
- return true;
-
-err_end_io:
- bio->bi_status = BLK_STS_RESOURCE;
- bio_endio(bio);
- return false;
-}
-EXPORT_SYMBOL(bio_integrity_prep);
-
-/**
- * bio_integrity_verify_fn - Integrity I/O completion worker
- * @work: Work struct stored in bio to be verified
- *
- * Description: This workqueue function is called to complete a READ
- * request. The function verifies the transferred integrity metadata
- * and then calls the original bio end_io function.
- */
-static void bio_integrity_verify_fn(struct work_struct *work)
-{
- struct bio_integrity_payload *bip =
- container_of(work, struct bio_integrity_payload, bip_work);
- struct bio *bio = bip->bip_bio;
-
- blk_integrity_verify(bio);
-
- kfree(bvec_virt(bip->bip_vec));
- bio_integrity_free(bio);
- bio_endio(bio);
-}
-
-/**
- * __bio_integrity_endio - Integrity I/O completion function
- * @bio: Protected bio
- *
- * Description: Completion for integrity I/O
- *
- * Normally I/O completion is done in interrupt context. However,
- * verifying I/O integrity is a time-consuming task which must be run
- * in process context. This function postpones completion
- * accordingly.
- */
-bool __bio_integrity_endio(struct bio *bio)
-{
- struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- struct bio_integrity_payload *bip = bio_integrity(bio);
-
- if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
- INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
- queue_work(kintegrityd_wq, &bip->bip_work);
- return false;
- }
-
- kfree(bvec_virt(bip->bip_vec));
- bio_integrity_free(bio);
- return true;
-}
-
-/**
* bio_integrity_advance - Advance integrity vector
* @bio: bio whose integrity vector to update
* @bytes_done: number of data bytes that have been completed
@@ -617,44 +436,3 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
return 0;
}
-
-int bioset_integrity_create(struct bio_set *bs, int pool_size)
-{
- if (mempool_initialized(&bs->bio_integrity_pool))
- return 0;
-
- if (mempool_init_slab_pool(&bs->bio_integrity_pool,
- pool_size, bip_slab))
- return -1;
-
- if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
- mempool_exit(&bs->bio_integrity_pool);
- return -1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bioset_integrity_create);
-
-void bioset_integrity_free(struct bio_set *bs)
-{
- mempool_exit(&bs->bio_integrity_pool);
- mempool_exit(&bs->bvec_integrity_pool);
-}
-
-void __init bio_integrity_init(void)
-{
- /*
- * kintegrityd won't block much but may burn a lot of CPU cycles.
- * Make it highpri CPU intensive wq with max concurrency of 1.
- */
- kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
- WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
- if (!kintegrityd_wq)
- panic("Failed to create kintegrityd\n");
-
- bip_slab = kmem_cache_create("bio_integrity_payload",
- sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * BIO_INLINE_VECS,
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
-}
diff --git a/block/bio.c b/block/bio.c
index 6ac5983ba51e..4e6c85a33d74 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1026,9 +1026,10 @@ EXPORT_SYMBOL(bio_add_page);
void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
size_t off)
{
+ unsigned long nr = off / PAGE_SIZE;
+
WARN_ON_ONCE(len > UINT_MAX);
- WARN_ON_ONCE(off > UINT_MAX);
- __bio_add_page(bio, &folio->page, len, off);
+ __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
@@ -1049,9 +1050,11 @@ EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
size_t off)
{
- if (len > UINT_MAX || off > UINT_MAX)
+ unsigned long nr = off / PAGE_SIZE;
+
+ if (len > UINT_MAX)
return false;
- return bio_add_page(bio, &folio->page, len, off) > 0;
+ return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
}
EXPORT_SYMBOL(bio_add_folio);
@@ -1657,7 +1660,6 @@ void bioset_exit(struct bio_set *bs)
mempool_exit(&bs->bio_pool);
mempool_exit(&bs->bvec_pool);
- bioset_integrity_free(bs);
if (bs->bio_slab)
bio_put_slab(bs);
bs->bio_slab = NULL;
@@ -1737,8 +1739,6 @@ static int __init init_bio(void)
BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
- bio_integrity_init();
-
for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
struct biovec_slab *bvs = bvec_slabs + i;
@@ -1754,9 +1754,6 @@ static int __init init_bio(void)
BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
panic("bio: can't allocate bios\n");
- if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
- panic("bio: can't create integrity pool\n");
-
return 0;
}
subsys_initcall(init_bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9ed93d91d754..5905f277057b 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -659,6 +659,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
+ pr_info_once("blkio.%s is deprecated\n", cftype->name);
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -815,6 +816,41 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
ctx->bdev = bdev;
return 0;
}
+/*
+ * Similar to blkg_conf_open_bdev, but additionally freezes the queue,
+ * acquires q->elevator_lock, and ensures the correct locking order
+ * between q->elevator_lock and q->rq_qos_mutex.
+ *
+ * This function returns negative error on failure. On success it returns
+ * memflags which must be saved and later passed to blkg_conf_exit_frozen
+ * for restoring the memalloc scope.
+ */
+unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
+{
+ int ret;
+ unsigned long memflags;
+
+ if (ctx->bdev)
+ return -EINVAL;
+
+ ret = blkg_conf_open_bdev(ctx);
+ if (ret < 0)
+ return ret;
+ /*
+ * At this point, we haven’t started protecting anything related to QoS,
+ * so we release q->rq_qos_mutex here, which was first acquired in blkg_
+ * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
+ * the queue and acquiring q->elevator_lock to maintain the correct
+ * locking order.
+ */
+ mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
+
+ memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
+ mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
+ mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
+
+ return memflags;
+}
/**
* blkg_conf_prep - parse and prepare for per-blkg config update
@@ -971,6 +1007,22 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
}
EXPORT_SYMBOL_GPL(blkg_conf_exit);
+/*
+ * Similar to blkg_conf_exit, but also unfreezes the queue and releases
+ * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
+ * is used to open the bdev.
+ */
+void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
+{
+ if (ctx->bdev) {
+ struct request_queue *q = ctx->bdev->bd_queue;
+
+ blkg_conf_exit(ctx);
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ }
+}
+
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
@@ -1727,27 +1779,27 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg *blkcg;
int i, ret;
+ /*
+ * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
+ * without pd_alloc_fn/pd_free_fn can't be activated.
+ */
+ if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
+ (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
+ return -EINVAL;
+
mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */
- ret = -ENOSPC;
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (!blkcg_policy[i])
break;
if (i >= BLKCG_MAX_POLS) {
pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
+ ret = -ENOSPC;
goto err_unlock;
}
- /*
- * Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
- * without pd_alloc_fn/pd_free_fn can't be activated.
- */
- if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
- (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
- goto err_unlock;
-
/* register @pol */
pol->plid = i;
blkcg_policy[pol->plid] = pol;
@@ -1758,8 +1810,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
- if (!cpd)
+ if (!cpd) {
+ ret = -ENOMEM;
goto err_free_cpds;
+ }
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
@@ -1770,12 +1824,15 @@ int blkcg_policy_register(struct blkcg_policy *pol)
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
- if (pol->dfl_cftypes)
+ if (pol->dfl_cftypes == pol->legacy_cftypes) {
+ WARN_ON(cgroup_add_cftypes(&io_cgrp_subsys,
+ pol->dfl_cftypes));
+ } else {
WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
pol->dfl_cftypes));
- if (pol->legacy_cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
pol->legacy_cftypes));
+ }
mutex_unlock(&blkcg_pol_register_mutex);
return 0;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 2c4663bd993a..81868ad86330 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -219,9 +219,11 @@ struct blkg_conf_ctx {
void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
+unsigned long blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx);
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
struct blkg_conf_ctx *ctx);
void blkg_conf_exit(struct blkg_conf_ctx *ctx);
+void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags);
/**
* bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
diff --git a/block/blk-core.c b/block/blk-core.c
index d6c4fa3943b5..4623de79effa 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -429,6 +429,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
refcount_set(&q->refs, 1);
mutex_init(&q->debugfs_mutex);
+ mutex_init(&q->elevator_lock);
mutex_init(&q->sysfs_lock);
mutex_init(&q->limits_lock);
mutex_init(&q->rq_qos_mutex);
@@ -455,6 +456,12 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
lockdep_init_map(&q->q_lockdep_map, "&q->q_usage_counter(queue)",
&q->q_lock_cls_key, 0);
+ /* Teach lockdep about lock ordering (reclaim WRT queue freeze lock). */
+ fs_reclaim_acquire(GFP_KERNEL);
+ rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
+ rwsem_release(&q->io_lockdep_map, _RET_IP_);
+ fs_reclaim_release(GFP_KERNEL);
+
q->nr_requests = BLKDEV_DEFAULT_RQ;
return q;
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 29a205482617..f154be0b575a 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -87,7 +87,7 @@ static struct bio_set crypto_bio_split;
* This is the key we set when evicting a keyslot. This *should* be the all 0's
* key, but AES-XTS rejects that key, so we use some random bytes instead.
*/
-static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
+static u8 blank_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE];
static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
{
@@ -119,7 +119,7 @@ blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
blk_crypto_fallback_evict_keyslot(slot);
slotp->crypto_mode = crypto_mode;
- err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
+ err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->bytes,
key->size);
if (err) {
blk_crypto_fallback_evict_keyslot(slot);
@@ -539,7 +539,7 @@ static int blk_crypto_fallback_init(void)
if (blk_crypto_fallback_inited)
return 0;
- get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
+ get_random_bytes(blank_key, sizeof(blank_key));
err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
@@ -561,6 +561,7 @@ static int blk_crypto_fallback_init(void)
blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+ blk_crypto_fallback_profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
/* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index 93a141979694..ccf6dff6ff6b 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -14,6 +14,7 @@ struct blk_crypto_mode {
const char *name; /* name of this mode, shown in sysfs */
const char *cipher_str; /* crypto API name (for fallback case) */
unsigned int keysize; /* key size in bytes */
+ unsigned int security_strength; /* security strength in bytes */
unsigned int ivsize; /* iv size in bytes */
};
@@ -82,6 +83,9 @@ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
const struct blk_crypto_config *cfg);
+int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp);
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int blk_crypto_sysfs_register(struct gendisk *disk)
@@ -129,6 +133,12 @@ static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
return false;
}
+static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp)
+{
+ return -ENOTTY;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 7fabc883e39f..94a155912bf1 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -352,6 +352,8 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
return false;
if (profile->max_dun_bytes_supported < cfg->dun_bytes)
return false;
+ if (!(profile->key_types_supported & cfg->key_type))
+ return false;
return true;
}
@@ -463,6 +465,99 @@ bool blk_crypto_register(struct blk_crypto_profile *profile,
EXPORT_SYMBOL_GPL(blk_crypto_register);
/**
+ * blk_crypto_derive_sw_secret() - Derive software secret from wrapped key
+ * @bdev: a block device that supports hardware-wrapped keys
+ * @eph_key: a hardware-wrapped key in ephemerally-wrapped form
+ * @eph_key_size: size of @eph_key in bytes
+ * @sw_secret: (output) the software secret
+ *
+ * Given a hardware-wrapped key in ephemerally-wrapped form (the same form that
+ * it is used for I/O), ask the hardware to derive the secret which software can
+ * use for cryptographic tasks other than inline encryption. This secret is
+ * guaranteed to be cryptographically isolated from the inline encryption key,
+ * i.e. derived with a different KDF context.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the block device doesn't support
+ * hardware-wrapped keys, -EBADMSG if the key isn't a valid
+ * ephemerally-wrapped key, or another -errno code.
+ */
+int blk_crypto_derive_sw_secret(struct block_device *bdev,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
+{
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+ int err;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.derive_sw_secret)
+ return -EOPNOTSUPP;
+ blk_crypto_hw_enter(profile);
+ err = profile->ll_ops.derive_sw_secret(profile, eph_key, eph_key_size,
+ sw_secret);
+ blk_crypto_hw_exit(profile);
+ return err;
+}
+
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.import_key)
+ return -EOPNOTSUPP;
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.import_key(profile, raw_key, raw_key_size,
+ lt_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.generate_key)
+ return -EOPNOTSUPP;
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.generate_key(profile, lt_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.prepare_key)
+ return -EOPNOTSUPP;
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.prepare_key(profile, lt_key, lt_key_size,
+ eph_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
+/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
* by child device
* @parent: the crypto profile for the parent device
@@ -485,10 +580,12 @@ void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
child->max_dun_bytes_supported);
for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++)
parent->modes_supported[i] &= child->modes_supported[i];
+ parent->key_types_supported &= child->key_types_supported;
} else {
parent->max_dun_bytes_supported = 0;
memset(parent->modes_supported, 0,
sizeof(parent->modes_supported));
+ parent->key_types_supported = 0;
}
}
EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities);
@@ -521,6 +618,9 @@ bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
target->max_dun_bytes_supported)
return false;
+ if (reference->key_types_supported & ~target->key_types_supported)
+ return false;
+
return true;
}
EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities);
@@ -555,5 +655,6 @@ void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
sizeof(dst->modes_supported));
dst->max_dun_bytes_supported = src->max_dun_bytes_supported;
+ dst->key_types_supported = src->key_types_supported;
}
EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);
diff --git a/block/blk-crypto-sysfs.c b/block/blk-crypto-sysfs.c
index a304434489ba..e832f403f200 100644
--- a/block/blk-crypto-sysfs.c
+++ b/block/blk-crypto-sysfs.c
@@ -31,6 +31,13 @@ static struct blk_crypto_attr *attr_to_crypto_attr(struct attribute *attr)
return container_of(attr, struct blk_crypto_attr, attr);
}
+static ssize_t hw_wrapped_keys_show(struct blk_crypto_profile *profile,
+ struct blk_crypto_attr *attr, char *page)
+{
+ /* Always show supported, since the file doesn't exist otherwise. */
+ return sysfs_emit(page, "supported\n");
+}
+
static ssize_t max_dun_bits_show(struct blk_crypto_profile *profile,
struct blk_crypto_attr *attr, char *page)
{
@@ -43,20 +50,48 @@ static ssize_t num_keyslots_show(struct blk_crypto_profile *profile,
return sysfs_emit(page, "%u\n", profile->num_slots);
}
+static ssize_t raw_keys_show(struct blk_crypto_profile *profile,
+ struct blk_crypto_attr *attr, char *page)
+{
+ /* Always show supported, since the file doesn't exist otherwise. */
+ return sysfs_emit(page, "supported\n");
+}
+
#define BLK_CRYPTO_RO_ATTR(_name) \
static struct blk_crypto_attr _name##_attr = __ATTR_RO(_name)
+BLK_CRYPTO_RO_ATTR(hw_wrapped_keys);
BLK_CRYPTO_RO_ATTR(max_dun_bits);
BLK_CRYPTO_RO_ATTR(num_keyslots);
+BLK_CRYPTO_RO_ATTR(raw_keys);
+
+static umode_t blk_crypto_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj);
+ struct blk_crypto_attr *a = attr_to_crypto_attr(attr);
+
+ if (a == &hw_wrapped_keys_attr &&
+ !(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return 0;
+ if (a == &raw_keys_attr &&
+ !(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_RAW))
+ return 0;
+
+ return 0444;
+}
static struct attribute *blk_crypto_attrs[] = {
+ &hw_wrapped_keys_attr.attr,
&max_dun_bits_attr.attr,
&num_keyslots_attr.attr,
+ &raw_keys_attr.attr,
NULL,
};
static const struct attribute_group blk_crypto_attr_group = {
.attrs = blk_crypto_attrs,
+ .is_visible = blk_crypto_is_visible,
};
/*
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 4d760b092deb..4b1ad84d1b5a 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -23,24 +23,28 @@ const struct blk_crypto_mode blk_crypto_modes[] = {
.name = "AES-256-XTS",
.cipher_str = "xts(aes)",
.keysize = 64,
+ .security_strength = 32,
.ivsize = 16,
},
[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
.name = "AES-128-CBC-ESSIV",
.cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16,
+ .security_strength = 16,
.ivsize = 16,
},
[BLK_ENCRYPTION_MODE_ADIANTUM] = {
.name = "Adiantum",
.cipher_str = "adiantum(xchacha12,aes)",
.keysize = 32,
+ .security_strength = 32,
.ivsize = 32,
},
[BLK_ENCRYPTION_MODE_SM4_XTS] = {
.name = "SM4-XTS",
.cipher_str = "xts(sm4)",
.keysize = 32,
+ .security_strength = 16,
.ivsize = 16,
},
};
@@ -76,9 +80,15 @@ static int __init bio_crypt_ctx_init(void)
/* This is assumed in various places. */
BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
- /* Sanity check that no algorithm exceeds the defined limits. */
+ /*
+ * Validate the crypto mode properties. This ideally would be done with
+ * static assertions, but boot-time checks are the next best thing.
+ */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
- BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
+ BUG_ON(blk_crypto_modes[i].keysize >
+ BLK_CRYPTO_MAX_RAW_KEY_SIZE);
+ BUG_ON(blk_crypto_modes[i].security_strength >
+ blk_crypto_modes[i].keysize);
BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
}
@@ -315,17 +325,20 @@ int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
/**
* blk_crypto_init_key() - Prepare a key for use with blk-crypto
* @blk_key: Pointer to the blk_crypto_key to initialize.
- * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
- * @crypto_mode; see blk_crypto_modes[].
+ * @key_bytes: the bytes of the key
+ * @key_size: size of the key in bytes
+ * @key_type: type of the key -- either raw or hardware-wrapped
* @crypto_mode: identifier for the encryption algorithm to use
* @dun_bytes: number of bytes that will be used to specify the DUN when this
* key is used
* @data_unit_size: the data unit size to use for en/decryption
*
* Return: 0 on success, -errno on failure. The caller is responsible for
- * zeroizing both blk_key and raw_key when done with them.
+ * zeroizing both blk_key and key_bytes when done with them.
*/
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+ const u8 *key_bytes, size_t key_size,
+ enum blk_crypto_key_type key_type,
enum blk_crypto_mode_num crypto_mode,
unsigned int dun_bytes,
unsigned int data_unit_size)
@@ -338,8 +351,19 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
return -EINVAL;
mode = &blk_crypto_modes[crypto_mode];
- if (mode->keysize == 0)
+ switch (key_type) {
+ case BLK_CRYPTO_KEY_TYPE_RAW:
+ if (key_size != mode->keysize)
+ return -EINVAL;
+ break;
+ case BLK_CRYPTO_KEY_TYPE_HW_WRAPPED:
+ if (key_size < mode->security_strength ||
+ key_size > BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ }
if (dun_bytes == 0 || dun_bytes > mode->ivsize)
return -EINVAL;
@@ -350,9 +374,10 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
blk_key->crypto_cfg.crypto_mode = crypto_mode;
blk_key->crypto_cfg.dun_bytes = dun_bytes;
blk_key->crypto_cfg.data_unit_size = data_unit_size;
+ blk_key->crypto_cfg.key_type = key_type;
blk_key->data_unit_size_bits = ilog2(data_unit_size);
- blk_key->size = mode->keysize;
- memcpy(blk_key->raw, raw_key, mode->keysize);
+ blk_key->size = key_size;
+ memcpy(blk_key->bytes, key_bytes, key_size);
return 0;
}
@@ -372,8 +397,10 @@ bool blk_crypto_config_supported_natively(struct block_device *bdev,
bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg)
{
- return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
- blk_crypto_config_supported_natively(bdev, cfg);
+ if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
+ cfg->key_type == BLK_CRYPTO_KEY_TYPE_RAW)
+ return true;
+ return blk_crypto_config_supported_natively(bdev, cfg);
}
/**
@@ -387,15 +414,21 @@ bool blk_crypto_config_supported(struct block_device *bdev,
* an skcipher, and *should not* be called from the data path, since that might
* cause a deadlock
*
- * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
- * blk-crypto-fallback is either disabled or the needed algorithm
- * is disabled in the crypto API; or another -errno code.
+ * Return: 0 on success; -EOPNOTSUPP if the key is wrapped but the hardware does
+ * not support wrapped keys; -ENOPKG if the key is a raw key but the
+ * hardware does not support raw keys and blk-crypto-fallback is either
+ * disabled or the needed algorithm is disabled in the crypto API; or
+ * another -errno code if something else went wrong.
*/
int blk_crypto_start_using_key(struct block_device *bdev,
const struct blk_crypto_key *key)
{
if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
return 0;
+ if (key->crypto_cfg.key_type != BLK_CRYPTO_KEY_TYPE_RAW) {
+ pr_warn_ratelimited("%pg: no support for wrapped keys\n", bdev);
+ return -EOPNOTSUPP;
+ }
return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
}
@@ -436,3 +469,146 @@ void blk_crypto_evict_key(struct block_device *bdev,
pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
+
+static int blk_crypto_ioctl_import_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_import_key_arg arg;
+ u8 raw_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE];
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ if (arg.raw_key_size < 16 || arg.raw_key_size > sizeof(raw_key))
+ return -EINVAL;
+
+ if (copy_from_user(raw_key, u64_to_user_ptr(arg.raw_key_ptr),
+ arg.raw_key_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = blk_crypto_import_key(profile, raw_key, arg.raw_key_size, lt_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.lt_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.lt_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
+ arg.lt_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(raw_key, sizeof(raw_key));
+ memzero_explicit(lt_key, sizeof(lt_key));
+ return ret;
+}
+
+static int blk_crypto_ioctl_generate_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_generate_key_arg arg;
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ ret = blk_crypto_generate_key(profile, lt_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.lt_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.lt_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
+ arg.lt_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(lt_key, sizeof(lt_key));
+ return ret;
+}
+
+static int blk_crypto_ioctl_prepare_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_prepare_key_arg arg;
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ if (arg.lt_key_size > sizeof(lt_key))
+ return -EINVAL;
+
+ if (copy_from_user(lt_key, u64_to_user_ptr(arg.lt_key_ptr),
+ arg.lt_key_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = blk_crypto_prepare_key(profile, lt_key, arg.lt_key_size, eph_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.eph_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.eph_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.eph_key_ptr), eph_key,
+ arg.eph_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(lt_key, sizeof(lt_key));
+ memzero_explicit(eph_key, sizeof(eph_key));
+ return ret;
+}
+
+int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp)
+{
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case BLKCRYPTOIMPORTKEY:
+ return blk_crypto_ioctl_import_key(profile, argp);
+ case BLKCRYPTOGENERATEKEY:
+ return blk_crypto_ioctl_generate_key(profile, argp);
+ case BLKCRYPTOPREPAREKEY:
+ return blk_crypto_ioctl_prepare_key(profile, argp);
+ default:
+ return -ENOTTY;
+ }
+}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index a72e2a83d075..43d6152897a4 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,9 +95,9 @@ static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, blk_opf_t flags);
static inline struct blk_flush_queue *
-blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
+blk_get_flush_queue(struct blk_mq_ctx *ctx)
{
- return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
+ return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq;
}
static unsigned int blk_flush_cur_seq(struct request *rq)
@@ -205,7 +205,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
struct list_head *running;
struct request *rq, *n;
unsigned long flags = 0;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx);
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -341,7 +341,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(ctx);
if (q->elevator) {
WARN_ON(rq->tag < 0);
@@ -382,7 +382,7 @@ static void blk_rq_init_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
bool supports_fua = q->limits.features & BLK_FEAT_FUA;
unsigned int policy = 0;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 65a1d4427ccf..5bfd70311359 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2718,8 +2718,7 @@ retry_lock:
* All waiters are on iocg->waitq and the wait states are
* synchronized using waitq.lock.
*/
- init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
- wait.wait.private = current;
+ init_wait_func(&wait.wait, iocg_wake_fn);
wait.bio = bio;
wait.abs_cost = abs_cost;
wait.committed = false; /* will be set true by waker */
@@ -3004,8 +3003,7 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
iocg->hweight_inuse = WEIGHT_ONE;
init_waitqueue_head(&iocg->waitq);
- hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- iocg->waitq_timer.function = iocg_waitq_timer_fn;
+ hrtimer_setup(&iocg->waitq_timer, iocg_waitq_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
iocg->level = blkg->blkcg->css.cgroup->level;
@@ -3224,14 +3222,16 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
u32 qos[NR_QOS_PARAMS];
bool enable, user;
char *body, *p;
- unsigned int memflags;
+ unsigned long memflags;
int ret;
blkg_conf_init(&ctx, input);
- ret = blkg_conf_open_bdev(&ctx);
- if (ret)
+ memflags = blkg_conf_open_bdev_frozen(&ctx);
+ if (IS_ERR_VALUE(memflags)) {
+ ret = memflags;
goto err;
+ }
body = ctx.body;
disk = ctx.bdev->bd_disk;
@@ -3248,7 +3248,6 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(disk->queue);
}
- memflags = blk_mq_freeze_queue(disk->queue);
blk_mq_quiesce_queue(disk->queue);
spin_lock_irq(&ioc->lock);
@@ -3348,19 +3347,15 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
wbt_enable_default(disk);
blk_mq_unquiesce_queue(disk->queue);
- blk_mq_unfreeze_queue(disk->queue, memflags);
- blkg_conf_exit(&ctx);
+ blkg_conf_exit_frozen(&ctx, memflags);
return nbytes;
einval:
spin_unlock_irq(&ioc->lock);
-
blk_mq_unquiesce_queue(disk->queue);
- blk_mq_unfreeze_queue(disk->queue, memflags);
-
ret = -EINVAL;
err:
- blkg_conf_exit(&ctx);
+ blkg_conf_exit_frozen(&ctx, memflags);
return ret;
}
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 8fff7ccc0ac7..13659dc15c3f 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -113,27 +113,18 @@ static void ioprio_free_cpd(struct blkcg_policy_data *cpd)
kfree(blkcg);
}
-#define IOPRIO_ATTRS \
- { \
- .name = "prio.class", \
- .seq_show = ioprio_show_prio_policy, \
- .write = ioprio_set_prio_policy, \
- }, \
- { } /* sentinel */
-
-/* cgroup v2 attributes */
static struct cftype ioprio_files[] = {
- IOPRIO_ATTRS
-};
-
-/* cgroup v1 attributes */
-static struct cftype ioprio_legacy_files[] = {
- IOPRIO_ATTRS
+ {
+ .name = "prio.class",
+ .seq_show = ioprio_show_prio_policy,
+ .write = ioprio_set_prio_policy,
+ },
+ { } /* sentinel */
};
static struct blkcg_policy ioprio_policy = {
.dfl_cftypes = ioprio_files,
- .legacy_cftypes = ioprio_legacy_files,
+ .legacy_cftypes = ioprio_files,
.cpd_alloc_fn = ioprio_alloc_cpd,
.cpd_free_fn = ioprio_free_cpd,
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1d1589c35297..fdd4efb54c6c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -551,8 +551,8 @@ static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
* Map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries.
*/
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist, struct scatterlist **last_sg)
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+ struct scatterlist **last_sg)
{
struct req_iterator iter = {
.bio = rq->bio,
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index adf5f0697b6b..3421b5521fe2 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -347,9 +347,14 @@ static int hctx_busy_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
struct show_busy_params params = { .m = m, .hctx = hctx };
+ int res;
+ res = mutex_lock_interruptible(&hctx->queue->elevator_lock);
+ if (res)
+ return res;
blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
&params);
+ mutex_unlock(&hctx->queue->elevator_lock);
return 0;
}
@@ -400,15 +405,14 @@ static int hctx_tags_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
- goto out;
+ return res;
if (hctx->tags)
blk_mq_debugfs_tags_show(m, hctx->tags);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
-out:
- return res;
+ return 0;
}
static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
@@ -417,15 +421,14 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
- goto out;
+ return res;
if (hctx->tags)
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
-out:
- return res;
+ return 0;
}
static int hctx_sched_tags_show(void *data, struct seq_file *m)
@@ -434,15 +437,14 @@ static int hctx_sched_tags_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
- goto out;
+ return res;
if (hctx->sched_tags)
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
-out:
- return res;
+ return 0;
}
static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
@@ -451,15 +453,14 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
struct request_queue *q = hctx->queue;
int res;
- res = mutex_lock_interruptible(&q->sysfs_lock);
+ res = mutex_lock_interruptible(&q->elevator_lock);
if (res)
- goto out;
+ return res;
if (hctx->sched_tags)
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
-out:
- return res;
+ return 0;
}
static int hctx_active_show(void *data, struct seq_file *m)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 7442ca27c2bf..109611445d40 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -349,7 +349,7 @@ bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
}
ctx = blk_mq_get_ctx(q);
- hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ hctx = blk_mq_map_queue(bio->bi_opf, ctx);
type = hctx->type;
if (list_empty_careful(&ctx->rq_lists[type]))
goto out_put;
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 3feeeccf8a99..24656980f443 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -61,9 +61,9 @@ static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
if (!entry->show)
return -EIO;
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
res = entry->show(hctx, page);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
return res;
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index b9f417d980b4..d880c50629d6 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -190,8 +190,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
sbitmap_finish_wait(bt, ws, &wait);
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
- data->ctx);
+ data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 40490ac88045..ae8494d88897 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -508,7 +508,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
retry:
data->ctx = blk_mq_get_ctx(q);
- data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+ data->hctx = blk_mq_map_queue(data->cmd_flags, data->ctx);
if (q->elevator) {
/*
@@ -3314,6 +3314,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq->special_vec = rq_src->special_vec;
}
rq->nr_phys_segments = rq_src->nr_phys_segments;
+ rq->nr_integrity_segments = rq_src->nr_integrity_segments;
if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
goto free_and_out;
@@ -4094,6 +4095,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
+ mutex_lock(&q->elevator_lock);
+
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
hctx->nr_ctx = 0;
@@ -4198,6 +4201,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+
+ mutex_unlock(&q->elevator_lock);
}
/*
@@ -4467,7 +4472,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
unsigned long i, j;
/* protect against switching io scheduler */
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int old_node;
int node = blk_mq_get_hctx_node(set, i);
@@ -4500,7 +4505,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
xa_for_each_start(&q->hctx_table, j, hctx, j)
blk_mq_exit_hctx(q, set, hctx, j);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
/* unregister cpuhp callbacks for exited hctxs */
blk_mq_remove_hw_queues_cpuhp(q);
@@ -4933,10 +4938,9 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
if (!qe)
return false;
- /* q->elevator needs protection from ->sysfs_lock */
- mutex_lock(&q->sysfs_lock);
+ /* Accessing q->elevator needs protection from ->elevator_lock. */
+ mutex_lock(&q->elevator_lock);
- /* the check has to be done with holding sysfs_lock */
if (!q->elevator) {
kfree(qe);
goto unlock;
@@ -4950,7 +4954,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
list_add(&qe->node, head);
elevator_disable(q);
unlock:
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
return true;
}
@@ -4980,11 +4984,11 @@ static void blk_mq_elv_switch_back(struct list_head *head,
list_del(&qe->node);
kfree(qe);
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
elevator_switch(q, t);
/* drop the reference acquired in blk_mq_elv_switch_none */
elevator_put(t);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 44979e92b79f..3011a78cf16a 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -100,12 +100,10 @@ static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
/*
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
- * @q: request queue
* @opf: operation type (REQ_OP_*) and flags (e.g. REQ_POLLED).
* @ctx: software queue cpu ctx
*/
-static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
- blk_opf_t opf,
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf,
struct blk_mq_ctx *ctx)
{
return ctx->hctxs[blk_mq_get_hctx_type(opf)];
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index d4d4f4dc0e23..95982bc46ba1 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -196,7 +196,6 @@ bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
struct rq_qos_wait_data {
struct wait_queue_entry wq;
- struct task_struct *task;
struct rq_wait *rqw;
acquire_inflight_cb_t *cb;
void *private_data;
@@ -218,7 +217,20 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
return -1;
data->got_token = true;
- wake_up_process(data->task);
+ /*
+ * autoremove_wake_function() removes the wait entry only when it
+ * actually changed the task state. We want the wait always removed.
+ * Remove explicitly and use default_wake_function().
+ */
+ default_wake_function(curr, mode, wake_flags, key);
+ /*
+ * Note that the order of operations is important as finish_wait()
+ * tests whether @curr is removed without grabbing the lock. This
+ * should be the last thing to do to make sure we will not have a
+ * UAF access to @data. And the semantics of memory barrier in it
+ * also make sure the waiter will see the latest @data->got_token
+ * once list_empty_careful() in finish_wait() returns true.
+ */
list_del_init_careful(&curr->entry);
return 1;
}
@@ -244,41 +256,55 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
cleanup_cb_t *cleanup_cb)
{
struct rq_qos_wait_data data = {
- .wq = {
- .func = rq_qos_wake_function,
- .entry = LIST_HEAD_INIT(data.wq.entry),
- },
- .task = current,
- .rqw = rqw,
- .cb = acquire_inflight_cb,
- .private_data = private_data,
+ .rqw = rqw,
+ .cb = acquire_inflight_cb,
+ .private_data = private_data,
+ .got_token = false,
};
- bool has_sleeper;
+ bool first_waiter;
- has_sleeper = wq_has_sleeper(&rqw->wait);
- if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
+ /*
+ * If there are no waiters in the waiting queue, try to increase the
+ * inflight counter if we can. Otherwise, prepare for adding ourselves
+ * to the waiting queue.
+ */
+ if (!waitqueue_active(&rqw->wait) && acquire_inflight_cb(rqw, private_data))
return;
- has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
+ init_wait_func(&data.wq, rq_qos_wake_function);
+ first_waiter = prepare_to_wait_exclusive(&rqw->wait, &data.wq,
TASK_UNINTERRUPTIBLE);
+ /*
+ * Make sure there is at least one inflight process; otherwise, waiters
+ * will never be woken up. Since there may be no inflight process before
+ * adding ourselves to the waiting queue above, we need to try to
+ * increase the inflight counter for ourselves. And it is sufficient to
+ * guarantee that at least the first waiter to enter the waiting queue
+ * will re-check the waiting condition before going to sleep, thus
+ * ensuring forward progress.
+ */
+ if (!data.got_token && first_waiter && acquire_inflight_cb(rqw, private_data)) {
+ finish_wait(&rqw->wait, &data.wq);
+ /*
+ * We raced with rq_qos_wake_function() getting a token,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ *
+ * Enough memory barrier in list_empty_careful() in
+ * finish_wait() is paired with list_del_init_careful()
+ * in rq_qos_wake_function() to make sure we will see
+ * the latest @data->got_token.
+ */
+ if (data.got_token)
+ cleanup_cb(rqw, private_data);
+ return;
+ }
+
+ /* we are now relying on the waker to increase our inflight counter. */
do {
- /* The memory barrier in set_current_state saves us here. */
if (data.got_token)
break;
- if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
- finish_wait(&rqw->wait, &data.wq);
-
- /*
- * We raced with rq_qos_wake_function() getting a token,
- * which means we now have two. Put our local token
- * and wake anyone else potentially waiting for one.
- */
- if (data.got_token)
- cleanup_cb(rqw, private_data);
- return;
- }
io_schedule();
- has_sleeper = true;
set_current_state(TASK_UNINTERRUPTIBLE);
} while (1);
finish_wait(&rqw->wait, &data.wq);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index b9c6f0ec1c49..6b2dbe645d23 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -21,7 +21,7 @@
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
- q->rq_timeout = timeout;
+ WRITE_ONCE(q->rq_timeout, timeout);
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
@@ -114,9 +114,15 @@ static int blk_validate_integrity_limits(struct queue_limits *lim)
pr_warn("invalid PI settings.\n");
return -EINVAL;
}
+ bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY;
return 0;
}
+ if (lim->features & BLK_FEAT_BOUNCE_HIGH) {
+ pr_warn("no bounce buffer support for integrity metadata\n");
+ return -EINVAL;
+ }
+
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
pr_warn("integrity support disabled.\n");
return -EINVAL;
@@ -867,36 +873,28 @@ bool queue_limits_stack_integrity(struct queue_limits *t,
if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
return true;
- if (!ti->tuple_size) {
- /* inherit the settings from the first underlying device */
- if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
- ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
- (bi->flags & BLK_INTEGRITY_REF_TAG);
- ti->csum_type = bi->csum_type;
- ti->tuple_size = bi->tuple_size;
- ti->pi_offset = bi->pi_offset;
- ti->interval_exp = bi->interval_exp;
- ti->tag_size = bi->tag_size;
- goto done;
- }
- if (!bi->tuple_size)
- goto done;
+ if (ti->flags & BLK_INTEGRITY_STACKED) {
+ if (ti->tuple_size != bi->tuple_size)
+ goto incompatible;
+ if (ti->interval_exp != bi->interval_exp)
+ goto incompatible;
+ if (ti->tag_size != bi->tag_size)
+ goto incompatible;
+ if (ti->csum_type != bi->csum_type)
+ goto incompatible;
+ if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
+ (bi->flags & BLK_INTEGRITY_REF_TAG))
+ goto incompatible;
+ } else {
+ ti->flags = BLK_INTEGRITY_STACKED;
+ ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) |
+ (bi->flags & BLK_INTEGRITY_REF_TAG);
+ ti->csum_type = bi->csum_type;
+ ti->tuple_size = bi->tuple_size;
+ ti->pi_offset = bi->pi_offset;
+ ti->interval_exp = bi->interval_exp;
+ ti->tag_size = bi->tag_size;
}
-
- if (ti->tuple_size != bi->tuple_size)
- goto incompatible;
- if (ti->interval_exp != bi->interval_exp)
- goto incompatible;
- if (ti->tag_size != bi->tag_size)
- goto incompatible;
- if (ti->csum_type != bi->csum_type)
- goto incompatible;
- if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
- (bi->flags & BLK_INTEGRITY_REF_TAG))
- goto incompatible;
-
-done:
- ti->flags |= BLK_INTEGRITY_STACKED;
return true;
incompatible:
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6f548a4376aa..a2882751f0d2 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -23,10 +23,11 @@
struct queue_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct gendisk *disk, char *page);
+ ssize_t (*show_limit)(struct gendisk *disk, char *page);
+
ssize_t (*store)(struct gendisk *disk, const char *page, size_t count);
int (*store_limit)(struct gendisk *disk, const char *page,
size_t count, struct queue_limits *lim);
- void (*load_module)(struct gendisk *disk, const char *page, size_t count);
};
static ssize_t
@@ -52,7 +53,12 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
static ssize_t queue_requests_show(struct gendisk *disk, char *page)
{
- return queue_var_show(disk->queue->nr_requests, page);
+ ssize_t ret;
+
+ mutex_lock(&disk->queue->elevator_lock);
+ ret = queue_var_show(disk->queue->nr_requests, page);
+ mutex_unlock(&disk->queue->elevator_lock);
+ return ret;
}
static ssize_t
@@ -60,27 +66,38 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
{
unsigned long nr;
int ret, err;
+ unsigned int memflags;
+ struct request_queue *q = disk->queue;
- if (!queue_is_mq(disk->queue))
+ if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
if (ret < 0)
return ret;
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
err = blk_mq_update_nr_requests(disk->queue, nr);
if (err)
- return err;
-
+ ret = err;
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
- return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
+ ssize_t ret;
+
+ mutex_lock(&disk->queue->limits_lock);
+ ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
+ mutex_unlock(&disk->queue->limits_lock);
+
+ return ret;
}
static ssize_t
@@ -88,11 +105,22 @@ queue_ra_store(struct gendisk *disk, const char *page, size_t count)
{
unsigned long ra_kb;
ssize_t ret;
+ unsigned int memflags;
+ struct request_queue *q = disk->queue;
ret = queue_var_store(&ra_kb, page, count);
if (ret < 0)
return ret;
+ /*
+ * ->ra_pages is protected by ->limits_lock because it is usually
+ * calculated from the queue limits by queue_limits_commit_update.
+ */
+ mutex_lock(&q->limits_lock);
+ memflags = blk_mq_freeze_queue(q);
disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
+ mutex_unlock(&q->limits_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+
return ret;
}
@@ -238,8 +266,9 @@ static ssize_t queue_poll_show(struct gendisk *disk, char *page)
{
if (queue_is_mq(disk->queue))
return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue));
+
return sysfs_emit(page, "%u\n",
- !!(disk->queue->limits.features & BLK_FEAT_POLL));
+ !!(disk->queue->limits.features & BLK_FEAT_POLL));
}
static ssize_t queue_zoned_show(struct gendisk *disk, char *page)
@@ -286,17 +315,21 @@ static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page,
size_t count)
{
unsigned long nm;
+ unsigned int memflags;
+ struct request_queue *q = disk->queue;
ssize_t ret = queue_var_store(&nm, page, count);
if (ret < 0)
return ret;
- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, disk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, disk->queue);
+ memflags = blk_mq_freeze_queue(q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else if (nm)
- blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -316,11 +349,19 @@ queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
#ifdef CONFIG_SMP
struct request_queue *q = disk->queue;
unsigned long val;
+ unsigned int memflags;
ret = queue_var_store(&val, page, count);
if (ret < 0)
return ret;
+ /*
+ * Here we update two queue flags each using atomic bitops, although
+ * updating two flags isn't atomic it should be harmless as those flags
+ * are accessed individually using atomic test_bit operation. So we
+ * don't grab any lock while updating these flags.
+ */
+ memflags = blk_mq_freeze_queue(q);
if (val == 2) {
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
@@ -331,6 +372,7 @@ queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count)
blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
+ blk_mq_unfreeze_queue(q, memflags);
#endif
return ret;
}
@@ -344,29 +386,43 @@ static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page,
static ssize_t queue_poll_store(struct gendisk *disk, const char *page,
size_t count)
{
- if (!(disk->queue->limits.features & BLK_FEAT_POLL))
- return -EINVAL;
+ unsigned int memflags;
+ ssize_t ret = count;
+ struct request_queue *q = disk->queue;
+
+ memflags = blk_mq_freeze_queue(q);
+ if (!(q->limits.features & BLK_FEAT_POLL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
pr_info_ratelimited("writes to the poll attribute are ignored.\n");
pr_info_ratelimited("please use driver specific parameters instead.\n");
- return count;
+out:
+ blk_mq_unfreeze_queue(q, memflags);
+ return ret;
}
static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page)
{
- return sysfs_emit(page, "%u\n", jiffies_to_msecs(disk->queue->rq_timeout));
+ return sysfs_emit(page, "%u\n",
+ jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout)));
}
static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page,
size_t count)
{
- unsigned int val;
+ unsigned int val, memflags;
int err;
+ struct request_queue *q = disk->queue;
err = kstrtou32(page, 10, &val);
if (err || val == 0)
return -EINVAL;
- blk_queue_rq_timeout(disk->queue, msecs_to_jiffies(val));
+ memflags = blk_mq_freeze_queue(q);
+ blk_queue_rq_timeout(q, msecs_to_jiffies(val));
+ blk_mq_unfreeze_queue(q, memflags);
return count;
}
@@ -412,57 +468,55 @@ static struct queue_sysfs_entry _prefix##_entry = { \
.store = _prefix##_store, \
};
-#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \
+#define QUEUE_LIM_RO_ENTRY(_prefix, _name) \
static struct queue_sysfs_entry _prefix##_entry = { \
- .attr = { .name = _name, .mode = 0644 }, \
- .show = _prefix##_show, \
- .store_limit = _prefix##_store, \
+ .attr = { .name = _name, .mode = 0444 }, \
+ .show_limit = _prefix##_show, \
}
-#define QUEUE_RW_LOAD_MODULE_ENTRY(_prefix, _name) \
-static struct queue_sysfs_entry _prefix##_entry = { \
+#define QUEUE_LIM_RW_ENTRY(_prefix, _name) \
+static struct queue_sysfs_entry _prefix##_entry = { \
.attr = { .name = _name, .mode = 0644 }, \
- .show = _prefix##_show, \
- .load_module = _prefix##_load_module, \
- .store = _prefix##_store, \
+ .show_limit = _prefix##_show, \
+ .store_limit = _prefix##_store, \
}
QUEUE_RW_ENTRY(queue_requests, "nr_requests");
QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
-QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
-QUEUE_RO_ENTRY(queue_max_segments, "max_segments");
-QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
-QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size");
-QUEUE_RW_LOAD_MODULE_ENTRY(elv_iosched, "scheduler");
-
-QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size");
-QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size");
-QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
-QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size");
-QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size");
-
-QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
-QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity");
-QUEUE_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
+QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
+QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments");
+QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments");
+QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size");
+QUEUE_RW_ENTRY(elv_iosched, "scheduler");
+
+QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size");
+QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size");
+QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors");
+QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size");
+QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size");
+
+QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments");
+QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity");
+QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes");
QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes");
QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
-QUEUE_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
-QUEUE_RO_ENTRY(queue_atomic_write_boundary_sectors,
+QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes");
+QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors,
"atomic_write_boundary_bytes");
-QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
-QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
+QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes");
+QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes");
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
-QUEUE_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
-QUEUE_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
-QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
+QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes");
+QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes");
+QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity");
-QUEUE_RO_ENTRY(queue_zoned, "zoned");
+QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned");
QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones");
-QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones");
-QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones");
+QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones");
+QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones");
QUEUE_RW_ENTRY(queue_nomerges, "nomerges");
QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough");
@@ -470,16 +524,16 @@ QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
QUEUE_RW_ENTRY(queue_poll, "io_poll");
QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache");
-QUEUE_RO_ENTRY(queue_fua, "fua");
-QUEUE_RO_ENTRY(queue_dax, "dax");
+QUEUE_LIM_RO_ENTRY(queue_fua, "fua");
+QUEUE_LIM_RO_ENTRY(queue_dax, "dax");
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
-QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
-QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment");
+QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask");
+QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment");
/* legacy alias for logical_block_size: */
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
- .attr = {.name = "hw_sector_size", .mode = 0444 },
- .show = queue_logical_block_size_show,
+ .attr = {.name = "hw_sector_size", .mode = 0444 },
+ .show_limit = queue_logical_block_size_show,
};
QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational");
@@ -503,14 +557,24 @@ static ssize_t queue_var_store64(s64 *var, const char *page)
static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page)
{
- if (!wbt_rq_qos(disk->queue))
- return -EINVAL;
+ ssize_t ret;
+ struct request_queue *q = disk->queue;
- if (wbt_disabled(disk->queue))
- return sysfs_emit(page, "0\n");
+ mutex_lock(&q->elevator_lock);
+ if (!wbt_rq_qos(q)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (wbt_disabled(q)) {
+ ret = sysfs_emit(page, "0\n");
+ goto out;
+ }
- return sysfs_emit(page, "%llu\n",
- div_u64(wbt_get_min_lat(disk->queue), 1000));
+ ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
+out:
+ mutex_unlock(&q->elevator_lock);
+ return ret;
}
static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
@@ -520,6 +584,7 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
struct rq_qos *rqos;
ssize_t ret;
s64 val;
+ unsigned int memflags;
ret = queue_var_store64(&val, page);
if (ret < 0)
@@ -527,20 +592,24 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
if (val < -1)
return -EINVAL;
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
+
rqos = wbt_rq_qos(q);
if (!rqos) {
ret = wbt_init(disk);
if (ret)
- return ret;
+ goto out;
}
+ ret = count;
if (val == -1)
val = wbt_default_latency_nsec(q);
else if (val >= 0)
val *= 1000ULL;
if (wbt_get_min_lat(q) == val)
- return count;
+ goto out;
/*
* Ensure that the queue is idled, in case the latency update
@@ -552,8 +621,11 @@ static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page,
wbt_set_min_lat(q, val);
blk_mq_unquiesce_queue(q);
+out:
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
- return count;
+ return ret;
}
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
@@ -561,7 +633,9 @@ QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
/* Common attributes for bio-based and request-based queues. */
static struct attribute *queue_attrs[] = {
- &queue_ra_entry.attr,
+ /*
+ * Attributes which are protected with q->limits_lock.
+ */
&queue_max_hw_sectors_entry.attr,
&queue_max_sectors_entry.attr,
&queue_max_segments_entry.attr,
@@ -577,44 +651,58 @@ static struct attribute *queue_attrs[] = {
&queue_discard_granularity_entry.attr,
&queue_max_discard_sectors_entry.attr,
&queue_max_hw_discard_sectors_entry.attr,
- &queue_discard_zeroes_data_entry.attr,
&queue_atomic_write_max_sectors_entry.attr,
&queue_atomic_write_boundary_sectors_entry.attr,
&queue_atomic_write_unit_min_entry.attr,
&queue_atomic_write_unit_max_entry.attr,
- &queue_write_same_max_entry.attr,
&queue_max_write_zeroes_sectors_entry.attr,
&queue_max_zone_append_sectors_entry.attr,
&queue_zone_write_granularity_entry.attr,
&queue_rotational_entry.attr,
&queue_zoned_entry.attr,
- &queue_nr_zones_entry.attr,
&queue_max_open_zones_entry.attr,
&queue_max_active_zones_entry.attr,
- &queue_nomerges_entry.attr,
&queue_iostats_passthrough_entry.attr,
&queue_iostats_entry.attr,
&queue_stable_writes_entry.attr,
&queue_add_random_entry.attr,
- &queue_poll_entry.attr,
&queue_wc_entry.attr,
&queue_fua_entry.attr,
&queue_dax_entry.attr,
- &queue_poll_delay_entry.attr,
&queue_virt_boundary_mask_entry.attr,
&queue_dma_alignment_entry.attr,
+ &queue_ra_entry.attr,
+
+ /*
+ * Attributes which don't require locking.
+ */
+ &queue_discard_zeroes_data_entry.attr,
+ &queue_write_same_max_entry.attr,
+ &queue_nr_zones_entry.attr,
+ &queue_nomerges_entry.attr,
+ &queue_poll_entry.attr,
+ &queue_poll_delay_entry.attr,
+
NULL,
};
/* Request-based queue attributes that are not relevant for bio-based queues. */
static struct attribute *blk_mq_queue_attrs[] = {
- &queue_requests_entry.attr,
+ /*
+ * Attributes which require some form of locking other than
+ * q->sysfs_lock.
+ */
&elv_iosched_entry.attr,
- &queue_rq_affinity_entry.attr,
- &queue_io_timeout_entry.attr,
+ &queue_requests_entry.attr,
#ifdef CONFIG_BLK_WBT
&queue_wb_lat_entry.attr,
#endif
+ /*
+ * Attributes which don't require locking.
+ */
+ &queue_rq_affinity_entry.attr,
+ &queue_io_timeout_entry.attr,
+
NULL,
};
@@ -664,14 +752,20 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
- ssize_t res;
- if (!entry->show)
+ if (!entry->show && !entry->show_limit)
return -EIO;
- mutex_lock(&disk->queue->sysfs_lock);
- res = entry->show(disk, page);
- mutex_unlock(&disk->queue->sysfs_lock);
- return res;
+
+ if (entry->show_limit) {
+ ssize_t res;
+
+ mutex_lock(&disk->queue->limits_lock);
+ res = entry->show_limit(disk, page);
+ mutex_unlock(&disk->queue->limits_lock);
+ return res;
+ }
+
+ return entry->show(disk, page);
}
static ssize_t
@@ -681,21 +775,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
struct queue_sysfs_entry *entry = to_queue(attr);
struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
struct request_queue *q = disk->queue;
- unsigned int memflags;
- ssize_t res;
if (!entry->store_limit && !entry->store)
return -EIO;
- /*
- * If the attribute needs to load a module, do it before freezing the
- * queue to ensure that the module file can be read when the request
- * queue is the one for the device storing the module file.
- */
- if (entry->load_module)
- entry->load_module(disk, page, length);
-
if (entry->store_limit) {
+ ssize_t res;
+
struct queue_limits lim = queue_limits_start_update(q);
res = entry->store_limit(disk, page, length, &lim);
@@ -710,12 +796,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
return length;
}
- mutex_lock(&q->sysfs_lock);
- memflags = blk_mq_freeze_queue(q);
- res = entry->store(disk, page, length);
- blk_mq_unfreeze_queue(q, memflags);
- mutex_unlock(&q->sysfs_lock);
- return res;
+ return entry->store(disk, page, length);
}
static const struct sysfs_ops queue_sysfs_ops = {
@@ -784,18 +865,22 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
goto out_debugfs_remove;
+ ret = blk_crypto_sysfs_register(disk);
+ if (ret)
+ goto out_unregister_ia_ranges;
+
+ mutex_lock(&q->elevator_lock);
if (q->elevator) {
ret = elv_register_queue(q, false);
- if (ret)
- goto out_unregister_ia_ranges;
+ if (ret) {
+ mutex_unlock(&q->elevator_lock);
+ goto out_crypto_sysfs_unregister;
+ }
}
-
- ret = blk_crypto_sysfs_register(disk);
- if (ret)
- goto out_elv_unregister;
+ wbt_enable_default(disk);
+ mutex_unlock(&q->elevator_lock);
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
- wbt_enable_default(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
@@ -817,8 +902,8 @@ int blk_register_queue(struct gendisk *disk)
return ret;
-out_elv_unregister:
- elv_unregister_queue(q);
+out_crypto_sysfs_unregister:
+ blk_crypto_sysfs_unregister(disk);
out_unregister_ia_ranges:
disk_unregister_independent_access_ranges(disk);
out_debugfs_remove:
@@ -864,8 +949,11 @@ void blk_unregister_queue(struct gendisk *disk)
blk_mq_sysfs_unregister(disk);
blk_crypto_sysfs_unregister(disk);
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
elv_unregister_queue(q);
+ mutex_unlock(&q->elevator_lock);
+
+ mutex_lock(&q->sysfs_lock);
disk_unregister_independent_access_ranges(disk);
mutex_unlock(&q->sysfs_lock);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8d149aff9fd0..91dab43c65ab 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -478,8 +478,6 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
{
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
- tg->carryover_bytes[rw] = 0;
- tg->carryover_ios[rw] = 0;
/*
* Previous slice has expired. We must have trimmed it after last
@@ -498,16 +496,14 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
}
static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
- bool clear_carryover)
+ bool clear)
{
- tg->bytes_disp[rw] = 0;
- tg->io_disp[rw] = 0;
+ if (clear) {
+ tg->bytes_disp[rw] = 0;
+ tg->io_disp[rw] = 0;
+ }
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
- if (clear_carryover) {
- tg->carryover_bytes[rw] = 0;
- tg->carryover_ios[rw] = 0;
- }
throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
@@ -599,29 +595,34 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
* sooner, then we need to reduce slice_end. A high bogus slice_end
* is bad because it does not allow new slice to start.
*/
-
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
time_elapsed = rounddown(jiffies - tg->slice_start[rw],
tg->td->throtl_slice);
- if (!time_elapsed)
+ /* Don't trim slice until at least 2 slices are used */
+ if (time_elapsed < tg->td->throtl_slice * 2)
return;
+ /*
+ * The bio submission time may be a few jiffies more than the expected
+ * waiting time, due to 'extra_bytes' can't be divided in
+ * tg_within_bps_limit(), and also due to timer wakeup delay. In this
+ * case, adjust slice_start will discard the extra wait time, causing
+ * lower rate than expected. Therefore, other than the above rounddown,
+ * one extra slice is preserved for deviation.
+ */
+ time_elapsed -= tg->td->throtl_slice;
bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
- time_elapsed) +
- tg->carryover_bytes[rw];
- io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
- tg->carryover_ios[rw];
+ time_elapsed);
+ io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed);
if (bytes_trim <= 0 && io_trim <= 0)
return;
- tg->carryover_bytes[rw] = 0;
if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
- tg->carryover_ios[rw] = 0;
if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
@@ -636,7 +637,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
jiffies);
}
-static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
+static void __tg_update_carryover(struct throtl_grp *tg, bool rw,
+ long long *bytes, int *ios)
{
unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
u64 bps_limit = tg_bps_limit(tg, rw);
@@ -649,26 +651,28 @@ static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
* configuration.
*/
if (bps_limit != U64_MAX)
- tg->carryover_bytes[rw] +=
- calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
+ *bytes = calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
tg->bytes_disp[rw];
if (iops_limit != UINT_MAX)
- tg->carryover_ios[rw] +=
- calculate_io_allowed(iops_limit, jiffy_elapsed) -
+ *ios = calculate_io_allowed(iops_limit, jiffy_elapsed) -
tg->io_disp[rw];
+ tg->bytes_disp[rw] -= *bytes;
+ tg->io_disp[rw] -= *ios;
}
static void tg_update_carryover(struct throtl_grp *tg)
{
+ long long bytes[2] = {0};
+ int ios[2] = {0};
+
if (tg->service_queue.nr_queued[READ])
- __tg_update_carryover(tg, READ);
+ __tg_update_carryover(tg, READ, &bytes[READ], &ios[READ]);
if (tg->service_queue.nr_queued[WRITE])
- __tg_update_carryover(tg, WRITE);
+ __tg_update_carryover(tg, WRITE, &bytes[WRITE], &ios[WRITE]);
/* see comments in struct throtl_grp for meaning of these fields. */
throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
- tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
- tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
+ bytes[READ], bytes[WRITE], ios[READ], ios[WRITE]);
}
static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
@@ -686,8 +690,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
/* Round up to the next throttle slice, wait time must be nonzero */
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
- io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
- tg->carryover_ios[rw];
+ io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd);
if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
return 0;
@@ -720,8 +723,7 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = tg->td->throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
- bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
- tg->carryover_bytes[rw];
+ bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd);
if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
return 0;
@@ -810,13 +812,10 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */
- if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
+ if (!bio_flagged(bio, BIO_BPS_THROTTLED))
tg->bytes_disp[rw] += bio_size;
- tg->last_bytes_disp[rw] += bio_size;
- }
tg->io_disp[rw]++;
- tg->last_io_disp[rw]++;
}
/**
@@ -1614,13 +1613,6 @@ static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
return tg_may_dispatch(tg, bio, NULL);
}
-static void tg_dispatch_in_debt(struct throtl_grp *tg, struct bio *bio, bool rw)
-{
- if (!bio_flagged(bio, BIO_BPS_THROTTLED))
- tg->carryover_bytes[rw] -= throtl_bio_data_size(bio);
- tg->carryover_ios[rw]--;
-}
-
bool __blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
@@ -1657,10 +1649,12 @@ bool __blk_throtl_bio(struct bio *bio)
/*
* IOs which may cause priority inversions are
* dispatched directly, even if they're over limit.
- * Debts are handled by carryover_bytes/ios while
- * calculating wait time.
+ *
+ * Charge and dispatch directly, and our throttle
+ * control algorithm is adaptive, and extra IO bytes
+ * will be throttled for paying the debt
*/
- tg_dispatch_in_debt(tg, bio, rw);
+ throtl_charge_bio(tg, bio);
} else {
/* if above limits, break to queue */
break;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index 1a36d1278eea..7964cc041e06 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -102,12 +102,9 @@ struct throtl_grp {
unsigned int iops[2];
/* Number of bytes dispatched in current slice */
- uint64_t bytes_disp[2];
+ int64_t bytes_disp[2];
/* Number of bio's dispatched in current slice */
- unsigned int io_disp[2];
-
- uint64_t last_bytes_disp[2];
- unsigned int last_io_disp[2];
+ int io_disp[2];
/*
* The following two fields are updated when new configuration is
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6dfc659d22e2..f1754d07f7e0 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -136,8 +136,9 @@ enum {
RWB_MIN_WRITE_SAMPLES = 3,
/*
- * If we have this number of consecutive windows with not enough
- * information to scale up or down, scale up.
+ * If we have this number of consecutive windows without enough
+ * information to scale up or down, slowly return to center state
+ * (step == 0).
*/
RWB_UNKNOWN_BUMP = 5,
};
@@ -446,9 +447,9 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
break;
case LAT_UNKNOWN_WRITES:
/*
- * We started a the center step, but don't have a valid
- * read/write sample, but we do have writes going on.
- * Allow step to go negative, to increase write perf.
+ * We don't have a valid read/write sample, but we do have
+ * writes going on. Allow step to go negative, to increase
+ * write performance.
*/
scale_up(rwb);
break;
@@ -638,11 +639,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
__wbt_done(rqos, flags);
}
-/*
- * May sleep, if we have exceeded the writeback limits. Caller can pass
- * in an irq held spinlock, if it holds one when calling this function.
- * If we do sleep, we'll release and re-grab it.
- */
+/* May sleep, if we have exceeded the writeback limits. */
static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{
struct rq_wb *rwb = RQWB(rqos);
diff --git a/block/blk.h b/block/blk.h
index 9cf9a0099416..006e3be433d2 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -715,7 +715,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
void blk_integrity_generate(struct bio *bio);
-void blk_integrity_verify(struct bio *bio);
+void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
void blk_integrity_prepare(struct request *rq);
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
diff --git a/block/bounce.c b/block/bounce.c
index 0d898cd5ec49..09a9616cf209 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -41,8 +41,6 @@ static void init_bounce_bioset(void)
ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
BUG_ON(ret);
- if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
- BUG_ON(1);
ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
BUG_ON(ret);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 93523d8f8195..9ceb5d0832f5 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -219,7 +219,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
if (!buf->sg_list)
return -ENOMEM;
sg_init_table(buf->sg_list, req->nr_phys_segments);
- buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
+ buf->sg_cnt = blk_rq_map_sg(req, buf->sg_list);
buf->payload_len = blk_rq_bytes(req);
return 0;
}
diff --git a/block/elevator.c b/block/elevator.c
index cd2ce4921601..b4d08026b02c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -457,7 +457,7 @@ int elv_register_queue(struct request_queue *q, bool uevent)
struct elevator_queue *e = q->elevator;
int error;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->elevator_lock);
error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
if (!error) {
@@ -481,7 +481,7 @@ void elv_unregister_queue(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->elevator_lock);
if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
kobject_uevent(&e->kobj, KOBJ_REMOVE);
@@ -618,7 +618,7 @@ int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
unsigned int memflags;
int ret;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->elevator_lock);
memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
@@ -655,7 +655,7 @@ void elevator_disable(struct request_queue *q)
{
unsigned int memflags;
- lockdep_assert_held(&q->sysfs_lock);
+ lockdep_assert_held(&q->elevator_lock);
memflags = blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
@@ -700,34 +700,44 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
return ret;
}
-void elv_iosched_load_module(struct gendisk *disk, const char *buf,
- size_t count)
+static void elv_iosched_load_module(char *elevator_name)
{
- char elevator_name[ELV_NAME_MAX];
struct elevator_type *found;
- const char *name;
-
- strscpy(elevator_name, buf, sizeof(elevator_name));
- name = strstrip(elevator_name);
spin_lock(&elv_list_lock);
- found = __elevator_find(name);
+ found = __elevator_find(elevator_name);
spin_unlock(&elv_list_lock);
if (!found)
- request_module("%s-iosched", name);
+ request_module("%s-iosched", elevator_name);
}
ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
size_t count)
{
char elevator_name[ELV_NAME_MAX];
+ char *name;
int ret;
+ unsigned int memflags;
+ struct request_queue *q = disk->queue;
+ /*
+ * If the attribute needs to load a module, do it before freezing the
+ * queue to ensure that the module file can be read when the request
+ * queue is the one for the device storing the module file.
+ */
strscpy(elevator_name, buf, sizeof(elevator_name));
- ret = elevator_change(disk->queue, strstrip(elevator_name));
+ name = strstrip(elevator_name);
+
+ elv_iosched_load_module(name);
+
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
+ ret = elevator_change(q, name);
if (!ret)
- return count;
+ ret = count;
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
return ret;
}
@@ -738,6 +748,7 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
struct elevator_type *cur = NULL, *e;
int len = 0;
+ mutex_lock(&q->elevator_lock);
if (!q->elevator) {
len += sprintf(name+len, "[none] ");
} else {
@@ -755,6 +766,8 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
spin_unlock(&elv_list_lock);
len += sprintf(name+len, "\n");
+ mutex_unlock(&q->elevator_lock);
+
return len;
}
diff --git a/block/elevator.h b/block/elevator.h
index e526662c5dbb..e4e44dfac503 100644
--- a/block/elevator.h
+++ b/block/elevator.h
@@ -148,8 +148,6 @@ extern void elv_unregister(struct elevator_type *);
* io scheduler sysfs switching
*/
ssize_t elv_iosched_show(struct gendisk *disk, char *page);
-void elv_iosched_load_module(struct gendisk *disk, const char *page,
- size_t count);
ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
diff --git a/block/genhd.c b/block/genhd.c
index e9375e20d866..c2bd86cd09de 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -565,8 +565,11 @@ out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor);
out_exit_elevator:
- if (disk->queue->elevator)
+ if (disk->queue->elevator) {
+ mutex_lock(&disk->queue->elevator_lock);
elevator_exit(disk->queue);
+ mutex_unlock(&disk->queue->elevator_lock);
+ }
return ret;
}
EXPORT_SYMBOL_GPL(add_disk_fwnode);
@@ -742,9 +745,9 @@ void del_gendisk(struct gendisk *disk)
blk_mq_quiesce_queue(q);
if (q->elevator) {
- mutex_lock(&q->sysfs_lock);
+ mutex_lock(&q->elevator_lock);
elevator_exit(q);
- mutex_unlock(&q->sysfs_lock);
+ mutex_unlock(&q->elevator_lock);
}
rq_qos_exit(q);
blk_mq_unquiesce_queue(q);
diff --git a/block/ioctl.c b/block/ioctl.c
index 6554b728bae6..faa40f383e27 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -15,6 +15,7 @@
#include <linux/io_uring/cmd.h>
#include <uapi/linux/blkdev.h>
#include "blk.h"
+#include "blk-crypto-internal.h"
static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition __user *upart, int op)
@@ -620,6 +621,10 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
return blk_trace_ioctl(bdev, cmd, argp);
+ case BLKCRYPTOIMPORTKEY:
+ case BLKCRYPTOGENERATEKEY:
+ case BLKCRYPTOPREPAREKEY:
+ return blk_crypto_ioctl(bdev, cmd, argp);
case IOC_PR_REGISTER:
return blkdev_pr_register(bdev, mode, argp);
case IOC_PR_RESERVE:
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index dc31f2dfa414..0f0f8452609a 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -568,7 +568,7 @@ static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(bio->bi_opf, ctx);
struct kyber_hctx_data *khd = hctx->sched_data;
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 5e9be13a56a8..7acba66eed48 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
out[size] = 0;
while (i < size) {
- u8 c = le16_to_cpu(in[i]) & 0xff;
+ u8 c = le16_to_cpu(in[i]) & 0x7f;
if (c && !isprint(c))
c = '!';
diff --git a/block/partitions/sgi.c b/block/partitions/sgi.c
index 9cc6b8c1eea4..b5ecddd5181a 100644
--- a/block/partitions/sgi.c
+++ b/block/partitions/sgi.c
@@ -50,8 +50,6 @@ int sgi_partition(struct parsed_partitions *state)
p = &label->partitions[0];
magic = label->magic_mushroom;
if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
- /*printk("Dev %s SGI disklabel: bad magic %08x\n",
- state->disk->disk_name, be32_to_cpu(magic));*/
put_dev_sector(sect);
return 0;
}
diff --git a/block/partitions/sun.c b/block/partitions/sun.c
index ddf9e6def4b2..2419af76120f 100644
--- a/block/partitions/sun.c
+++ b/block/partitions/sun.c
@@ -74,8 +74,6 @@ int sun_partition(struct parsed_partitions *state)
p = label->partitions;
if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
-/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
- state->disk->disk_name, be16_to_cpu(label->magic)); */
put_dev_sector(sect);
return 0;
}
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 2d05421f0fa5..851db518ee5e 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -210,7 +210,7 @@ static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
{
- return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
+ return cpu_to_be64(crc64_nvme(crc, data, len));
}
static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
@@ -404,7 +404,7 @@ void blk_integrity_generate(struct bio *bio)
}
}
-void blk_integrity_verify(struct bio *bio)
+void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter)
{
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
struct bio_integrity_payload *bip = bio_integrity(bio);
@@ -418,9 +418,9 @@ void blk_integrity_verify(struct bio *bio)
*/
iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
iter.interval = 1 << bi->interval_exp;
- iter.seed = bip->bio_iter.bi_sector;
+ iter.seed = saved_iter->bi_sector;
iter.prot_buf = bvec_virt(bip->bip_vec);
- __bio_for_each_segment(bv, bio, bviter, bip->bio_iter) {
+ __bio_for_each_segment(bv, bio, bviter, *saved_iter) {
void *kaddr = bvec_kmap_local(&bv);
blk_status_t ret = BLK_STS_OK;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 74ae5f52b784..69bbe696b94d 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -141,6 +141,12 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
+config CRYPTO_HKDF
+ tristate
+ select CRYPTO_SHA256 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_SHA512 if !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+ select CRYPTO_HASH2
+
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -1081,26 +1087,6 @@ config CRYPTO_CRC32
Used by RoCEv2 and f2fs.
-config CRYPTO_CRCT10DIF
- tristate "CRCT10DIF"
- select CRYPTO_HASH
- select CRC_T10DIF
- help
- CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
-
- CRC algorithm used by the SCSI Block Commands standard.
-
-config CRYPTO_CRC64_ROCKSOFT
- tristate "CRC64 based on Rocksoft Model algorithm"
- depends on CRC64
- select CRYPTO_HASH
- help
- CRC64 CRC algorithm based on the Rocksoft Model CRC Algorithm
-
- Used by the NVMe implementation of T10 DIF (BLK_DEV_INTEGRITY)
-
- See https://zlib.net/crc_v3.txt
-
endmenu
menu "Compression"
diff --git a/crypto/Makefile b/crypto/Makefile
index f67e853c4690..c95e95e75ad4 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
obj-$(CONFIG_CRYPTO_SIG2) += sig.o
obj-$(CONFIG_CRYPTO_KPP2) += kpp.o
+obj-$(CONFIG_CRYPTO_HKDF) += hkdf.o
dh_generic-y := dh.o
dh_generic-y += dh_helper.o
@@ -155,9 +156,6 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
CFLAGS_crc32c_generic.o += -DARCH=$(ARCH)
CFLAGS_crc32_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_generic.o
-CFLAGS_crct10dif_generic.o += -DARCH=$(ARCH)
-obj-$(CONFIG_CRYPTO_CRC64_ROCKSOFT) += crc64_rocksoft_generic.o
obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 985da981d6e2..b1a36d32dc50 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -85,7 +85,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = crc32c_le_base(ctx->crc, data, length);
+ ctx->crc = crc32c_base(ctx->crc, data, length);
return 0;
}
@@ -94,7 +94,7 @@ static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = __crc32c_le(ctx->crc, data, length);
+ ctx->crc = crc32c(ctx->crc, data, length);
return 0;
}
@@ -108,14 +108,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c_base(*crcp, data, len), out);
return 0;
}
static int __chksum_finup_arch(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c(*crcp, data, len), out);
return 0;
}
diff --git a/crypto/crc64_rocksoft_generic.c b/crypto/crc64_rocksoft_generic.c
deleted file mode 100644
index ce0f3059b912..000000000000
--- a/crypto/crc64_rocksoft_generic.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <linux/crc64.h>
-#include <linux/module.h>
-#include <crypto/internal/hash.h>
-#include <linux/unaligned.h>
-
-static int chksum_init(struct shash_desc *desc)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- *crc = crc64_rocksoft_generic(*crc, data, length);
-
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le64(*crc, out);
- return 0;
-}
-
-static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out)
-{
- crc = crc64_rocksoft_generic(crc, data, len);
- put_unaligned_le64(crc, out);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- u64 *crc = shash_desc_ctx(desc);
-
- return __chksum_finup(*crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static struct shash_alg alg = {
- .digestsize = sizeof(u64),
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(u64),
- .base = {
- .cra_name = CRC64_ROCKSOFT_STRING,
- .cra_driver_name = "crc64-rocksoft-generic",
- .cra_priority = 200,
- .cra_blocksize = 1,
- .cra_module = THIS_MODULE,
- }
-};
-
-static int __init crc64_rocksoft_init(void)
-{
- return crypto_register_shash(&alg);
-}
-
-static void __exit crc64_rocksoft_exit(void)
-{
- crypto_unregister_shash(&alg);
-}
-
-module_init(crc64_rocksoft_init);
-module_exit(crc64_rocksoft_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Rocksoft model CRC64 calculation.");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft");
-MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
deleted file mode 100644
index 259cb01932cb..000000000000
--- a/crypto/crct10dif_generic.c
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Cryptographic API.
- *
- * T10 Data Integrity Field CRC16 Crypto Transform
- *
- * Copyright (c) 2007 Oracle Corporation. All rights reserved.
- * Written by Martin K. Petersen <martin.petersen@oracle.com>
- * Copyright (C) 2013 Intel Corporation
- * Author: Tim Chen <tim.c.chen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/crc-t10dif.h>
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-struct chksum_desc_ctx {
- __u16 crc;
-};
-
-/*
- * Steps through buffer one byte at a time, calculates reflected
- * crc using table.
- */
-
-static int chksum_init(struct shash_desc *desc)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = 0;
-
- return 0;
-}
-
-static int chksum_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_update_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- ctx->crc = crc_t10dif_update(ctx->crc, data, length);
- return 0;
-}
-
-static int chksum_final(struct shash_desc *desc, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- *(__u16 *)out = ctx->crc;
- return 0;
-}
-
-static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
-{
- *(__u16 *)out = crc_t10dif_generic(crc, data, len);
- return 0;
-}
-
-static int __chksum_finup_arch(__u16 crc, const u8 *data, unsigned int len,
- u8 *out)
-{
- *(__u16 *)out = crc_t10dif_update(crc, data, len);
- return 0;
-}
-
-static int chksum_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(ctx->crc, data, len, out);
-}
-
-static int chksum_finup_arch(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup_arch(ctx->crc, data, len, out);
-}
-
-static int chksum_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup(0, data, length, out);
-}
-
-static int chksum_digest_arch(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return __chksum_finup_arch(0, data, length, out);
-}
-
-static struct shash_alg algs[] = {{
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update,
- .final = chksum_final,
- .finup = chksum_finup,
- .digest = chksum_digest,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}, {
- .digestsize = CRC_T10DIF_DIGEST_SIZE,
- .init = chksum_init,
- .update = chksum_update_arch,
- .final = chksum_final,
- .finup = chksum_finup_arch,
- .digest = chksum_digest_arch,
- .descsize = sizeof(struct chksum_desc_ctx),
- .base.cra_name = "crct10dif",
- .base.cra_driver_name = "crct10dif-" __stringify(ARCH),
- .base.cra_priority = 150,
- .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
- .base.cra_module = THIS_MODULE,
-}};
-
-static int num_algs;
-
-static int __init crct10dif_mod_init(void)
-{
- /* register the arch flavor only if it differs from the generic one */
- num_algs = 1 + crc_t10dif_is_optimized();
-
- return crypto_register_shashes(algs, num_algs);
-}
-
-static void __exit crct10dif_mod_fini(void)
-{
- crypto_unregister_shashes(algs, num_algs);
-}
-
-subsys_initcall(crct10dif_mod_init);
-module_exit(crct10dif_mod_fini);
-
-MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
-MODULE_DESCRIPTION("T10 DIF CRC calculation.");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("crct10dif");
-MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/hkdf.c b/crypto/hkdf.c
new file mode 100644
index 000000000000..2434c5c42545
--- /dev/null
+++ b/crypto/hkdf.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation
+ * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010):
+ * "Cryptographic Extraction and Key Derivation: The HKDF Scheme".
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/hkdf.h>
+#include <linux/module.h>
+
+/*
+ * HKDF consists of two steps:
+ *
+ * 1. HKDF-Extract: extract a pseudorandom key from the input keying material
+ * and optional salt.
+ * 2. HKDF-Expand: expand the pseudorandom key into output keying material of
+ * any length, parameterized by an application-specific info string.
+ *
+ */
+
+/**
+ * hkdf_extract - HKDF-Extract (RFC 5869 section 2.2)
+ * @hmac_tfm: an HMAC transform using the hash function desired for HKDF. The
+ * caller is responsible for setting the @prk afterwards.
+ * @ikm: input keying material
+ * @ikmlen: length of @ikm
+ * @salt: input salt value
+ * @saltlen: length of @salt
+ * @prk: resulting pseudorandom key
+ *
+ * Extracts a pseudorandom key @prk from the input keying material
+ * @ikm with length @ikmlen and salt @salt with length @saltlen.
+ * The length of @prk is given by the digest size of @hmac_tfm.
+ * For an 'unsalted' version of HKDF-Extract @salt must be set
+ * to all zeroes and @saltlen must be set to the length of @prk.
+ *
+ * Returns 0 on success with the pseudorandom key stored in @prk,
+ * or a negative errno value otherwise.
+ */
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk)
+{
+ int err;
+
+ err = crypto_shash_setkey(hmac_tfm, salt, saltlen);
+ if (!err)
+ err = crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_extract);
+
+/**
+ * hkdf_expand - HKDF-Expand (RFC 5869 section 2.3)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @info: application-specific information
+ * @infolen: length of @info
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * This expands the pseudorandom key, which was already keyed into @hmac_tfm,
+ * into @okmlen bytes of output keying material parameterized by the
+ * application-specific @info of length @infolen bytes.
+ * This is thread-safe and may be called by multiple threads in parallel.
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen)
+{
+ SHASH_DESC_ON_STACK(desc, hmac_tfm);
+ unsigned int i, hashlen = crypto_shash_digestsize(hmac_tfm);
+ int err;
+ const u8 *prev = NULL;
+ u8 counter = 1;
+ u8 tmp[HASH_MAX_DIGESTSIZE] = {};
+
+ if (WARN_ON(okmlen > 255 * hashlen))
+ return -EINVAL;
+
+ desc->tfm = hmac_tfm;
+
+ for (i = 0; i < okmlen; i += hashlen) {
+ err = crypto_shash_init(desc);
+ if (err)
+ goto out;
+
+ if (prev) {
+ err = crypto_shash_update(desc, prev, hashlen);
+ if (err)
+ goto out;
+ }
+
+ if (infolen) {
+ err = crypto_shash_update(desc, info, infolen);
+ if (err)
+ goto out;
+ }
+
+ BUILD_BUG_ON(sizeof(counter) != 1);
+ if (okmlen - i < hashlen) {
+ err = crypto_shash_finup(desc, &counter, 1, tmp);
+ if (err)
+ goto out;
+ memcpy(&okm[i], tmp, okmlen - i);
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ err = crypto_shash_finup(desc, &counter, 1, &okm[i]);
+ if (err)
+ goto out;
+ }
+ counter++;
+ prev = &okm[i];
+ }
+ err = 0;
+out:
+ if (unlikely(err))
+ memzero_explicit(okm, okmlen); /* so caller doesn't need to */
+ shash_desc_zero(desc);
+ memzero_explicit(tmp, HASH_MAX_DIGESTSIZE);
+ return err;
+}
+EXPORT_SYMBOL_GPL(hkdf_expand);
+
+struct hkdf_testvec {
+ const char *test;
+ const u8 *ikm;
+ const u8 *salt;
+ const u8 *info;
+ const u8 *prk;
+ const u8 *okm;
+ u16 ikm_size;
+ u16 salt_size;
+ u16 info_size;
+ u16 prk_size;
+ u16 okm_size;
+};
+
+/*
+ * HKDF test vectors from RFC5869
+ *
+ * Additional HKDF test vectors from
+ * https://github.com/brycx/Test-Vector-Generation/blob/master/HKDF/hkdf-hmac-sha2-test-vectors.md
+ */
+static const struct hkdf_testvec hkdf_sha256_tv[] = {
+ {
+ .test = "basic hdkf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x07\x77\x09\x36\x2c\x2e\x32\xdf\x0d\xdc\x3f\x0d\xc4\x7b\xba\x63"
+ "\x90\xb6\xc7\x3b\xb5\x0f\x9c\x31\x22\xec\x84\x4a\xd7\xc2\xb3\xe5",
+ .prk_size = 32,
+ .okm = "\x3c\xb2\x5f\x25\xfa\xac\xd5\x7a\x90\x43\x4f\x64\xd0\x36\x2f\x2a"
+ "\x2d\x2d\x0a\x90\xcf\x1a\x5a\x4c\x5d\xb0\x2d\x56\xec\xc4\xc5\xbf"
+ "\x34\x00\x72\x08\xd5\xb8\x87\x18\x58\x65",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x06\xa6\xb8\x8c\x58\x53\x36\x1a\x06\x10\x4c\x9c\xeb\x35\xb4\x5c"
+ "\xef\x76\x00\x14\x90\x46\x71\x01\x4a\x19\x3f\x40\xc1\x5f\xc2\x44",
+ .prk_size = 32,
+ .okm = "\xb1\x1e\x39\x8d\xc8\x03\x27\xa1\xc8\xe7\xf7\x8c\x59\x6a\x49\x34"
+ "\x4f\x01\x2e\xda\x2d\x4e\xfa\xd8\xa0\x50\xcc\x4c\x19\xaf\xa9\x7c"
+ "\x59\x04\x5a\x99\xca\xc7\x82\x72\x71\xcb\x41\xc6\x5e\x59\x0e\x09"
+ "\xda\x32\x75\x60\x0c\x2f\x09\xb8\x36\x77\x93\xa9\xac\xa3\xdb\x71"
+ "\xcc\x30\xc5\x81\x79\xec\x3e\x87\xc1\x4c\x01\xd5\xc1\xf3\x43\x4f"
+ "\x1d\x87",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x19\xef\x24\xa3\x2c\x71\x7b\x16\x7f\x33\xa9\x1d\x6f\x64\x8b\xdf"
+ "\x96\x59\x67\x76\xaf\xdb\x63\x77\xac\x43\x4c\x1c\x29\x3c\xcb\x04",
+ .prk_size = 32,
+ .okm = "\x8d\xa4\xe7\x75\xa5\x63\xc1\x8f\x71\x5f\x80\x2a\x06\x3c\x5a\x31"
+ "\xb8\xa1\x1f\x5c\x5e\xe1\x87\x9e\xc3\x45\x4e\x5f\x3c\x73\x8d\x2d"
+ "\x9d\x20\x13\x95\xfa\xa4\xb6\x1a\x96\xc8",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x82\x65\xf6\x9d\x7f\xf7\xe5\x01\x37\x93\x01\x5c\xa0\xef\x92\x0c"
+ "\xb1\x68\x21\x99\xc8\xbc\x3a\x00\xda\x0c\xab\x47\xb7\xb0\x0f\xdf",
+ .prk_size = 32,
+ .okm = "\x58\xdc\xe1\x0d\x58\x01\xcd\xfd\xa8\x31\x72\x6b\xfe\xbc\xb7\x43"
+ "\xd1\x4a\x7e\xe8\x3a\xa0\x57\xa9\x3d\x59\xb0\xa1\x31\x7f\xf0\x9d"
+ "\x10\x5c\xce\xcf\x53\x56\x92\xb1\x4d\xd5",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 32,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xaa\x84\x1e\x1f\x35\x74\xf3\x2d\x13\xfb\xa8\x00\x5f\xcd\x9b\x8d"
+ "\x77\x67\x82\xa5\xdf\xa1\x92\x38\x92\xfd\x8b\x63\x5d\x3a\x89\xdf",
+ .prk_size = 32,
+ .okm = "\x59\x68\x99\x17\x9a\xb1\xbc\x00\xa7\xc0\x37\x86\xff\x43\xee\x53"
+ "\x50\x04\xbe\x2b\xb9\xbe\x68\xbc\x14\x06\x63\x6f\x54\xbd\x33\x8a"
+ "\x66\xa2\x37\xba\x2a\xcb\xce\xe3\xc9\xa7",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha384_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x70\x4b\x39\x99\x07\x79\xce\x1d\xc5\x48\x05\x2c\x7d\xc3\x9f\x30"
+ "\x35\x70\xdd\x13\xfb\x39\xf7\xac\xc5\x64\x68\x0b\xef\x80\xe8\xde"
+ "\xc7\x0e\xe9\xa7\xe1\xf3\xe2\x93\xef\x68\xec\xeb\x07\x2a\x5a\xde",
+ .prk_size = 48,
+ .okm = "\x9b\x50\x97\xa8\x60\x38\xb8\x05\x30\x90\x76\xa4\x4b\x3a\x9f\x38"
+ "\x06\x3e\x25\xb5\x16\xdc\xbf\x36\x9f\x39\x4c\xfa\xb4\x36\x85\xf7"
+ "\x48\xb6\x45\x77\x63\xe4\xf0\x20\x4f\xc5",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\xb3\x19\xf6\x83\x1d\xff\x93\x14\xef\xb6\x43\xba\xa2\x92\x63\xb3"
+ "\x0e\x4a\x8d\x77\x9f\xe3\x1e\x9c\x90\x1e\xfd\x7d\xe7\x37\xc8\x5b"
+ "\x62\xe6\x76\xd4\xdc\x87\xb0\x89\x5c\x6a\x7d\xc9\x7b\x52\xce\xbb",
+ .prk_size = 48,
+ .okm = "\x48\x4c\xa0\x52\xb8\xcc\x72\x4f\xd1\xc4\xec\x64\xd5\x7b\x4e\x81"
+ "\x8c\x7e\x25\xa8\xe0\xf4\x56\x9e\xd7\x2a\x6a\x05\xfe\x06\x49\xee"
+ "\xbf\x69\xf8\xd5\xc8\x32\x85\x6b\xf4\xe4\xfb\xc1\x79\x67\xd5\x49"
+ "\x75\x32\x4a\x94\x98\x7f\x7f\x41\x83\x58\x17\xd8\x99\x4f\xdb\xd6"
+ "\xf4\xc0\x9c\x55\x00\xdc\xa2\x4a\x56\x22\x2f\xea\x53\xd8\x96\x7a"
+ "\x8b\x2e",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x10\xe4\x0c\xf0\x72\xa4\xc5\x62\x6e\x43\xdd\x22\xc1\xcf\x72\x7d"
+ "\x4b\xb1\x40\x97\x5c\x9a\xd0\xcb\xc8\xe4\x5b\x40\x06\x8f\x8f\x0b"
+ "\xa5\x7c\xdb\x59\x8a\xf9\xdf\xa6\x96\x3a\x96\x89\x9a\xf0\x47\xe5",
+ .prk_size = 48,
+ .okm = "\xc8\xc9\x6e\x71\x0f\x89\xb0\xd7\x99\x0b\xca\x68\xbc\xde\xc8\xcf"
+ "\x85\x40\x62\xe5\x4c\x73\xa7\xab\xc7\x43\xfa\xde\x9b\x24\x2d\xaa"
+ "\xcc\x1c\xea\x56\x70\x41\x5b\x52\x84\x9c",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x6d\x31\x69\x98\x28\x79\x80\x88\xb3\x59\xda\xd5\x0b\x8f\x01\xb0"
+ "\x15\xf1\x7a\xa3\xbd\x4e\x27\xa6\xe9\xf8\x73\xb7\x15\x85\xca\x6a"
+ "\x00\xd1\xf0\x82\x12\x8a\xdb\x3c\xf0\x53\x0b\x57\xc0\xf9\xac\x72",
+ .prk_size = 48,
+ .okm = "\xfb\x7e\x67\x43\xeb\x42\xcd\xe9\x6f\x1b\x70\x77\x89\x52\xab\x75"
+ "\x48\xca\xfe\x53\x24\x9f\x7f\xfe\x14\x97\xa1\x63\x5b\x20\x1f\xf1"
+ "\x85\xb9\x3e\x95\x19\x92\xd8\x58\xf1\x1a",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 48,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x9d\x2d\xa5\x06\x6f\x05\xd1\x6c\x59\xfe\xdf\x6c\x5f\x32\xc7\x5e"
+ "\xda\x9a\x47\xa7\x9c\x93\x6a\xa4\x4c\xb7\x63\xa8\xe2\x2f\xfb\xfc"
+ "\xd8\xfe\x55\x43\x58\x53\x47\x21\x90\x39\xd1\x68\x28\x36\x33\xf5",
+ .prk_size = 48,
+ .okm = "\x6a\xd7\xc7\x26\xc8\x40\x09\x54\x6a\x76\xe0\x54\x5d\xf2\x66\x78"
+ "\x7e\x2b\x2c\xd6\xca\x43\x73\xa1\xf3\x14\x50\xa7\xbd\xf9\x48\x2b"
+ "\xfa\xb8\x11\xf5\x54\x20\x0e\xad\x8f\x53",
+ .okm_size = 42,
+ }
+};
+
+static const struct hkdf_testvec hkdf_sha512_tv[] = {
+ {
+ .test = "basic hkdf test",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x66\x57\x99\x82\x37\x37\xde\xd0\x4a\x88\xe4\x7e\x54\xa5\x89\x0b"
+ "\xb2\xc3\xd2\x47\xc7\xa4\x25\x4a\x8e\x61\x35\x07\x23\x59\x0a\x26"
+ "\xc3\x62\x38\x12\x7d\x86\x61\xb8\x8c\xf8\x0e\xf8\x02\xd5\x7e\x2f"
+ "\x7c\xeb\xcf\x1e\x00\xe0\x83\x84\x8b\xe1\x99\x29\xc6\x1b\x42\x37",
+ .prk_size = 64,
+ .okm = "\x83\x23\x90\x08\x6c\xda\x71\xfb\x47\x62\x5b\xb5\xce\xb1\x68\xe4"
+ "\xc8\xe2\x6a\x1a\x16\xed\x34\xd9\xfc\x7f\xe9\x2c\x14\x81\x57\x93"
+ "\x38\xda\x36\x2c\xb8\xd9\xf9\x25\xd7\xcb",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with long input",
+ .ikm = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f",
+ .ikm_size = 80,
+ .salt = "\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
+ "\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
+ "\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf",
+ .salt_size = 80,
+ .info = "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
+ .info_size = 80,
+ .prk = "\x35\x67\x25\x42\x90\x7d\x4e\x14\x2c\x00\xe8\x44\x99\xe7\x4e\x1d"
+ "\xe0\x8b\xe8\x65\x35\xf9\x24\xe0\x22\x80\x4a\xd7\x75\xdd\xe2\x7e"
+ "\xc8\x6c\xd1\xe5\xb7\xd1\x78\xc7\x44\x89\xbd\xbe\xb3\x07\x12\xbe"
+ "\xb8\x2d\x4f\x97\x41\x6c\x5a\x94\xea\x81\xeb\xdf\x3e\x62\x9e\x4a",
+ .prk_size = 64,
+ .okm = "\xce\x6c\x97\x19\x28\x05\xb3\x46\xe6\x16\x1e\x82\x1e\xd1\x65\x67"
+ "\x3b\x84\xf4\x00\xa2\xb5\x14\xb2\xfe\x23\xd8\x4c\xd1\x89\xdd\xf1"
+ "\xb6\x95\xb4\x8c\xbd\x1c\x83\x88\x44\x11\x37\xb3\xce\x28\xf1\x6a"
+ "\xa6\x4b\xa3\x3b\xa4\x66\xb2\x4d\xf6\xcf\xcb\x02\x1e\xcf\xf2\x35"
+ "\xf6\xa2\x05\x6c\xe3\xaf\x1d\xe4\x4d\x57\x20\x97\xa8\x50\x5d\x9e"
+ "\x7a\x93",
+ .okm_size = 82,
+ }, {
+ .test = "hkdf test with zero salt and info",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+ "\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 22,
+ .salt = NULL,
+ .salt_size = 0,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\xfd\x20\x0c\x49\x87\xac\x49\x13\x13\xbd\x4a\x2a\x13\x28\x71\x21"
+ "\x24\x72\x39\xe1\x1c\x9e\xf8\x28\x02\x04\x4b\x66\xef\x35\x7e\x5b"
+ "\x19\x44\x98\xd0\x68\x26\x11\x38\x23\x48\x57\x2a\x7b\x16\x11\xde"
+ "\x54\x76\x40\x94\x28\x63\x20\x57\x8a\x86\x3f\x36\x56\x2b\x0d\xf6",
+ .prk_size = 64,
+ .okm = "\xf5\xfa\x02\xb1\x82\x98\xa7\x2a\x8c\x23\x89\x8a\x87\x03\x47\x2c"
+ "\x6e\xb1\x79\xdc\x20\x4c\x03\x42\x5c\x97\x0e\x3b\x16\x4b\xf9\x0f"
+ "\xff\x22\xd0\x48\x36\xd0\xe2\x34\x3b\xac",
+ .okm_size = 42,
+ }, {
+ .test = "hkdf test with short input",
+ .ikm = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ .ikm_size = 11,
+ .salt = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c",
+ .salt_size = 13,
+ .info = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9",
+ .info_size = 10,
+ .prk = "\x67\x40\x9c\x9c\xac\x28\xb5\x2e\xe9\xfa\xd9\x1c\x2f\xda\x99\x9f"
+ "\x7c\xa2\x2e\x34\x34\xf0\xae\x77\x28\x63\x83\x65\x68\xad\x6a\x7f"
+ "\x10\xcf\x11\x3b\xfd\xdd\x56\x01\x29\xa5\x94\xa8\xf5\x23\x85\xc2"
+ "\xd6\x61\xd7\x85\xd2\x9c\xe9\x3a\x11\x40\x0c\x92\x06\x83\x18\x1d",
+ .prk_size = 64,
+ .okm = "\x74\x13\xe8\x99\x7e\x02\x06\x10\xfb\xf6\x82\x3f\x2c\xe1\x4b\xff"
+ "\x01\x87\x5d\xb1\xca\x55\xf6\x8c\xfc\xf3\x95\x4d\xc8\xaf\xf5\x35"
+ "\x59\xbd\x5e\x30\x28\xb0\x80\xf7\xc0\x68",
+ .okm_size = 42,
+ }, {
+ .test = "unsalted hkdf test with zero info",
+ .ikm = "\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c"
+ "\x0c\x0c\x0c\x0c\x0c\x0c",
+ .ikm_size = 22,
+ .salt = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
+ .salt_size = 64,
+ .info = NULL,
+ .info_size = 0,
+ .prk = "\x53\x46\xb3\x76\xbf\x3a\xa9\xf8\x4f\x8f\x6e\xd5\xb1\xc4\xf4\x89"
+ "\x17\x2e\x24\x4d\xac\x30\x3d\x12\xf6\x8e\xcc\x76\x6e\xa6\x00\xaa"
+ "\x88\x49\x5e\x7f\xb6\x05\x80\x31\x22\xfa\x13\x69\x24\xa8\x40\xb1"
+ "\xf0\x71\x9d\x2d\x5f\x68\xe2\x9b\x24\x22\x99\xd7\x58\xed\x68\x0c",
+ .prk_size = 64,
+ .okm = "\x14\x07\xd4\x60\x13\xd9\x8b\xc6\xde\xce\xfc\xfe\xe5\x5f\x0f\x90"
+ "\xb0\xc7\xf6\x3d\x68\xeb\x1a\x80\xea\xf0\x7e\x95\x3c\xfc\x0a\x3a"
+ "\x52\x40\xa1\x55\xd6\xe4\xda\xa9\x65\xbb",
+ .okm_size = 42,
+ }
+};
+
+static int hkdf_test(const char *shash, const struct hkdf_testvec *tv)
+{ struct crypto_shash *tfm = NULL;
+ u8 *prk = NULL, *okm = NULL;
+ unsigned int prk_size;
+ const char *driver;
+ int err;
+
+ tfm = crypto_alloc_shash(shash, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("%s(%s): failed to allocate transform: %ld\n",
+ tv->test, shash, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ driver = crypto_shash_driver_name(tfm);
+
+ prk_size = crypto_shash_digestsize(tfm);
+ prk = kzalloc(prk_size, GFP_KERNEL);
+ if (!prk) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (tv->prk_size != prk_size) {
+ pr_err("%s(%s): prk size mismatch (vec %u, digest %u\n",
+ tv->test, driver, tv->prk_size, prk_size);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ err = hkdf_extract(tfm, tv->ikm, tv->ikm_size,
+ tv->salt, tv->salt_size, prk);
+ if (err) {
+ pr_err("%s(%s): hkdf_extract failed with %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ if (memcmp(prk, tv->prk, tv->prk_size)) {
+ pr_err("%s(%s): hkdf_extract prk mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "prk: ", DUMP_PREFIX_NONE,
+ 16, 1, prk, tv->prk_size, false);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ okm = kzalloc(tv->okm_size, GFP_KERNEL);
+ if (!okm) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = crypto_shash_setkey(tfm, tv->prk, tv->prk_size);
+ if (err) {
+ pr_err("%s(%s): failed to set prk, error %d\n",
+ tv->test, driver, err);
+ goto out_free;
+ }
+
+ err = hkdf_expand(tfm, tv->info, tv->info_size,
+ okm, tv->okm_size);
+ if (err) {
+ pr_err("%s(%s): hkdf_expand() failed with %d\n",
+ tv->test, driver, err);
+ } else if (memcmp(okm, tv->okm, tv->okm_size)) {
+ pr_err("%s(%s): hkdf_expand() okm mismatch\n",
+ tv->test, driver);
+ print_hex_dump(KERN_ERR, "okm: ", DUMP_PREFIX_NONE,
+ 16, 1, okm, tv->okm_size, false);
+ err = -EINVAL;
+ }
+out_free:
+ kfree(okm);
+ kfree(prk);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+static int __init crypto_hkdf_module_init(void)
+{
+ int ret = 0, i;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha256_tv); i++) {
+ ret = hkdf_test("hmac(sha256)", &hkdf_sha256_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha384_tv); i++) {
+ ret = hkdf_test("hmac(sha384)", &hkdf_sha384_tv[i]);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < ARRAY_SIZE(hkdf_sha512_tv); i++) {
+ ret = hkdf_test("hmac(sha512)", &hkdf_sha512_tv[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit crypto_hkdf_module_exit(void) {}
+
+module_init(crypto_hkdf_module_init);
+module_exit(crypto_hkdf_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HMAC-based Key Derivation Function (HKDF)");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e1a74cb2cfbe..879fc21dcc16 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1654,10 +1654,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret = min(ret, tcrypt_test("ghash"));
break;
- case 47:
- ret = min(ret, tcrypt_test("crct10dif"));
- break;
-
case 48:
ret = min(ret, tcrypt_test("sha3-224"));
break;
@@ -2272,10 +2268,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
- case 320:
- test_hash_speed("crct10dif", sec, generic_hash_speed_template);
- if (mode > 300 && mode < 400) break;
- fallthrough;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e61490ba4095..1e42582bc7f1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4760,20 +4760,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(crc32c_tv_template)
}
}, {
- .alg = "crc64-rocksoft",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crc64_rocksoft_tv_template)
- }
- }, {
- .alg = "crct10dif",
- .test = alg_test_hash,
- .fips_allowed = 1,
- .suite = {
- .hash = __VECS(crct10dif_tv_template)
- }
- }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d754ab997186..d3a99d15a3e5 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6017,309 +6017,6 @@ static const struct hash_testvec rmd160_tv_template[] = {
}
};
-static const u8 zeroes[4096] = { [0 ... 4095] = 0 };
-static const u8 ones[4096] = { [0 ... 4095] = 0xff };
-
-static const struct hash_testvec crc64_rocksoft_tv_template[] = {
- {
- .plaintext = zeroes,
- .psize = 4096,
- .digest = "\x4e\xb6\x22\xeb\x67\xd3\x82\x64",
- }, {
- .plaintext = ones,
- .psize = 4096,
- .digest = "\xac\xa3\xec\x02\x73\xba\xdd\xc0",
- }
-};
-
-static const struct hash_testvec crct10dif_tv_template[] = {
- {
- .plaintext = "abc",
- .psize = 3,
- .digest = (u8 *)(u16 []){ 0x443b },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 79,
- .digest = (u8 *)(u16 []){ 0x4b70 },
- }, {
- .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
- "ddddddddddddd",
- .psize = 56,
- .digest = (u8 *)(u16 []){ 0x9ce3 },
- }, {
- .plaintext = "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "1234567890123456789012345678901234567890"
- "123456789012345678901234567890123456789",
- .psize = 319,
- .digest = (u8 *)(u16 []){ 0x44c6 },
- }, {
- .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49"
- "\xe0\x54\xeb\x82\x19\x8d\x24\xbb"
- "\x2f\xc6\x5d\xf4\x68\xff\x96\x0a"
- "\xa1\x38\xcf\x43\xda\x71\x08\x7c"
- "\x13\xaa\x1e\xb5\x4c\xe3\x57\xee"
- "\x85\x1c\x90\x27\xbe\x32\xc9\x60"
- "\xf7\x6b\x02\x99\x0d\xa4\x3b\xd2"
- "\x46\xdd\x74\x0b\x7f\x16\xad\x21"
- "\xb8\x4f\xe6\x5a\xf1\x88\x1f\x93"
- "\x2a\xc1\x35\xcc\x63\xfa\x6e\x05"
- "\x9c\x10\xa7\x3e\xd5\x49\xe0\x77"
- "\x0e\x82\x19\xb0\x24\xbb\x52\xe9"
- "\x5d\xf4\x8b\x22\x96\x2d\xc4\x38"
- "\xcf\x66\xfd\x71\x08\x9f\x13\xaa"
- "\x41\xd8\x4c\xe3\x7a\x11\x85\x1c"
- "\xb3\x27\xbe\x55\xec\x60\xf7\x8e"
- "\x02\x99\x30\xc7\x3b\xd2\x69\x00"
- "\x74\x0b\xa2\x16\xad\x44\xdb\x4f"
- "\xe6\x7d\x14\x88\x1f\xb6\x2a\xc1"
- "\x58\xef\x63\xfa\x91\x05\x9c\x33"
- "\xca\x3e\xd5\x6c\x03\x77\x0e\xa5"
- "\x19\xb0\x47\xde\x52\xe9\x80\x17"
- "\x8b\x22\xb9\x2d\xc4\x5b\xf2\x66"
- "\xfd\x94\x08\x9f\x36\xcd\x41\xd8"
- "\x6f\x06\x7a\x11\xa8\x1c\xb3\x4a"
- "\xe1\x55\xec\x83\x1a\x8e\x25\xbc"
- "\x30\xc7\x5e\xf5\x69\x00\x97\x0b"
- "\xa2\x39\xd0\x44\xdb\x72\x09\x7d"
- "\x14\xab\x1f\xb6\x4d\xe4\x58\xef"
- "\x86\x1d\x91\x28\xbf\x33\xca\x61"
- "\xf8\x6c\x03\x9a\x0e\xa5\x3c\xd3"
- "\x47\xde\x75\x0c\x80\x17\xae\x22"
- "\xb9\x50\xe7\x5b\xf2\x89\x20\x94"
- "\x2b\xc2\x36\xcd\x64\xfb\x6f\x06"
- "\x9d\x11\xa8\x3f\xd6\x4a\xe1\x78"
- "\x0f\x83\x1a\xb1\x25\xbc\x53\xea"
- "\x5e\xf5\x8c\x00\x97\x2e\xc5\x39"
- "\xd0\x67\xfe\x72\x09\xa0\x14\xab"
- "\x42\xd9\x4d\xe4\x7b\x12\x86\x1d"
- "\xb4\x28\xbf\x56\xed\x61\xf8\x8f"
- "\x03\x9a\x31\xc8\x3c\xd3\x6a\x01"
- "\x75\x0c\xa3\x17\xae\x45\xdc\x50"
- "\xe7\x7e\x15\x89\x20\xb7\x2b\xc2"
- "\x59\xf0\x64\xfb\x92\x06\x9d\x34"
- "\xcb\x3f\xd6\x6d\x04\x78\x0f\xa6"
- "\x1a\xb1\x48\xdf\x53\xea\x81\x18"
- "\x8c\x23\xba\x2e\xc5\x5c\xf3\x67"
- "\xfe\x95\x09\xa0\x37\xce\x42\xd9"
- "\x70\x07\x7b\x12\xa9\x1d\xb4\x4b"
- "\xe2\x56\xed\x84\x1b\x8f\x26\xbd"
- "\x31\xc8\x5f\xf6\x6a\x01\x98\x0c"
- "\xa3\x3a\xd1\x45\xdc\x73\x0a\x7e"
- "\x15\xac\x20\xb7\x4e\xe5\x59\xf0"
- "\x87\x1e\x92\x29\xc0\x34\xcb\x62"
- "\xf9\x6d\x04\x9b\x0f\xa6\x3d\xd4"
- "\x48\xdf\x76\x0d\x81\x18\xaf\x23"
- "\xba\x51\xe8\x5c\xf3\x8a\x21\x95"
- "\x2c\xc3\x37\xce\x65\xfc\x70\x07"
- "\x9e\x12\xa9\x40\xd7\x4b\xe2\x79"
- "\x10\x84\x1b\xb2\x26\xbd\x54\xeb"
- "\x5f\xf6\x8d\x01\x98\x2f\xc6\x3a"
- "\xd1\x68\xff\x73\x0a\xa1\x15\xac"
- "\x43\xda\x4e\xe5\x7c\x13\x87\x1e"
- "\xb5\x29\xc0\x57\xee\x62\xf9\x90"
- "\x04\x9b\x32\xc9\x3d\xd4\x6b\x02"
- "\x76\x0d\xa4\x18\xaf\x46\xdd\x51"
- "\xe8\x7f\x16\x8a\x21\xb8\x2c\xc3"
- "\x5a\xf1\x65\xfc\x93\x07\x9e\x35"
- "\xcc\x40\xd7\x6e\x05\x79\x10\xa7"
- "\x1b\xb2\x49\xe0\x54\xeb\x82\x19"
- "\x8d\x24\xbb\x2f\xc6\x5d\xf4\x68"
- "\xff\x96\x0a\xa1\x38\xcf\x43\xda"
- "\x71\x08\x7c\x13\xaa\x1e\xb5\x4c"
- "\xe3\x57\xee\x85\x1c\x90\x27\xbe"
- "\x32\xc9\x60\xf7\x6b\x02\x99\x0d"
- "\xa4\x3b\xd2\x46\xdd\x74\x0b\x7f"
- "\x16\xad\x21\xb8\x4f\xe6\x5a\xf1"
- "\x88\x1f\x93\x2a\xc1\x35\xcc\x63"
- "\xfa\x6e\x05\x9c\x10\xa7\x3e\xd5"
- "\x49\xe0\x77\x0e\x82\x19\xb0\x24"
- "\xbb\x52\xe9\x5d\xf4\x8b\x22\x96"
- "\x2d\xc4\x38\xcf\x66\xfd\x71\x08"
- "\x9f\x13\xaa\x41\xd8\x4c\xe3\x7a"
- "\x11\x85\x1c\xb3\x27\xbe\x55\xec"
- "\x60\xf7\x8e\x02\x99\x30\xc7\x3b"
- "\xd2\x69\x00\x74\x0b\xa2\x16\xad"
- "\x44\xdb\x4f\xe6\x7d\x14\x88\x1f"
- "\xb6\x2a\xc1\x58\xef\x63\xfa\x91"
- "\x05\x9c\x33\xca\x3e\xd5\x6c\x03"
- "\x77\x0e\xa5\x19\xb0\x47\xde\x52"
- "\xe9\x80\x17\x8b\x22\xb9\x2d\xc4"
- "\x5b\xf2\x66\xfd\x94\x08\x9f\x36"
- "\xcd\x41\xd8\x6f\x06\x7a\x11\xa8"
- "\x1c\xb3\x4a\xe1\x55\xec\x83\x1a"
- "\x8e\x25\xbc\x30\xc7\x5e\xf5\x69"
- "\x00\x97\x0b\xa2\x39\xd0\x44\xdb"
- "\x72\x09\x7d\x14\xab\x1f\xb6\x4d"
- "\xe4\x58\xef\x86\x1d\x91\x28\xbf"
- "\x33\xca\x61\xf8\x6c\x03\x9a\x0e"
- "\xa5\x3c\xd3\x47\xde\x75\x0c\x80"
- "\x17\xae\x22\xb9\x50\xe7\x5b\xf2"
- "\x89\x20\x94\x2b\xc2\x36\xcd\x64"
- "\xfb\x6f\x06\x9d\x11\xa8\x3f\xd6"
- "\x4a\xe1\x78\x0f\x83\x1a\xb1\x25"
- "\xbc\x53\xea\x5e\xf5\x8c\x00\x97"
- "\x2e\xc5\x39\xd0\x67\xfe\x72\x09"
- "\xa0\x14\xab\x42\xd9\x4d\xe4\x7b"
- "\x12\x86\x1d\xb4\x28\xbf\x56\xed"
- "\x61\xf8\x8f\x03\x9a\x31\xc8\x3c"
- "\xd3\x6a\x01\x75\x0c\xa3\x17\xae"
- "\x45\xdc\x50\xe7\x7e\x15\x89\x20"
- "\xb7\x2b\xc2\x59\xf0\x64\xfb\x92"
- "\x06\x9d\x34\xcb\x3f\xd6\x6d\x04"
- "\x78\x0f\xa6\x1a\xb1\x48\xdf\x53"
- "\xea\x81\x18\x8c\x23\xba\x2e\xc5"
- "\x5c\xf3\x67\xfe\x95\x09\xa0\x37"
- "\xce\x42\xd9\x70\x07\x7b\x12\xa9"
- "\x1d\xb4\x4b\xe2\x56\xed\x84\x1b"
- "\x8f\x26\xbd\x31\xc8\x5f\xf6\x6a"
- "\x01\x98\x0c\xa3\x3a\xd1\x45\xdc"
- "\x73\x0a\x7e\x15\xac\x20\xb7\x4e"
- "\xe5\x59\xf0\x87\x1e\x92\x29\xc0"
- "\x34\xcb\x62\xf9\x6d\x04\x9b\x0f"
- "\xa6\x3d\xd4\x48\xdf\x76\x0d\x81"
- "\x18\xaf\x23\xba\x51\xe8\x5c\xf3"
- "\x8a\x21\x95\x2c\xc3\x37\xce\x65"
- "\xfc\x70\x07\x9e\x12\xa9\x40\xd7"
- "\x4b\xe2\x79\x10\x84\x1b\xb2\x26"
- "\xbd\x54\xeb\x5f\xf6\x8d\x01\x98"
- "\x2f\xc6\x3a\xd1\x68\xff\x73\x0a"
- "\xa1\x15\xac\x43\xda\x4e\xe5\x7c"
- "\x13\x87\x1e\xb5\x29\xc0\x57\xee"
- "\x62\xf9\x90\x04\x9b\x32\xc9\x3d"
- "\xd4\x6b\x02\x76\x0d\xa4\x18\xaf"
- "\x46\xdd\x51\xe8\x7f\x16\x8a\x21"
- "\xb8\x2c\xc3\x5a\xf1\x65\xfc\x93"
- "\x07\x9e\x35\xcc\x40\xd7\x6e\x05"
- "\x79\x10\xa7\x1b\xb2\x49\xe0\x54"
- "\xeb\x82\x19\x8d\x24\xbb\x2f\xc6"
- "\x5d\xf4\x68\xff\x96\x0a\xa1\x38"
- "\xcf\x43\xda\x71\x08\x7c\x13\xaa"
- "\x1e\xb5\x4c\xe3\x57\xee\x85\x1c"
- "\x90\x27\xbe\x32\xc9\x60\xf7\x6b"
- "\x02\x99\x0d\xa4\x3b\xd2\x46\xdd"
- "\x74\x0b\x7f\x16\xad\x21\xb8\x4f"
- "\xe6\x5a\xf1\x88\x1f\x93\x2a\xc1"
- "\x35\xcc\x63\xfa\x6e\x05\x9c\x10"
- "\xa7\x3e\xd5\x49\xe0\x77\x0e\x82"
- "\x19\xb0\x24\xbb\x52\xe9\x5d\xf4"
- "\x8b\x22\x96\x2d\xc4\x38\xcf\x66"
- "\xfd\x71\x08\x9f\x13\xaa\x41\xd8"
- "\x4c\xe3\x7a\x11\x85\x1c\xb3\x27"
- "\xbe\x55\xec\x60\xf7\x8e\x02\x99"
- "\x30\xc7\x3b\xd2\x69\x00\x74\x0b"
- "\xa2\x16\xad\x44\xdb\x4f\xe6\x7d"
- "\x14\x88\x1f\xb6\x2a\xc1\x58\xef"
- "\x63\xfa\x91\x05\x9c\x33\xca\x3e"
- "\xd5\x6c\x03\x77\x0e\xa5\x19\xb0"
- "\x47\xde\x52\xe9\x80\x17\x8b\x22"
- "\xb9\x2d\xc4\x5b\xf2\x66\xfd\x94"
- "\x08\x9f\x36\xcd\x41\xd8\x6f\x06"
- "\x7a\x11\xa8\x1c\xb3\x4a\xe1\x55"
- "\xec\x83\x1a\x8e\x25\xbc\x30\xc7"
- "\x5e\xf5\x69\x00\x97\x0b\xa2\x39"
- "\xd0\x44\xdb\x72\x09\x7d\x14\xab"
- "\x1f\xb6\x4d\xe4\x58\xef\x86\x1d"
- "\x91\x28\xbf\x33\xca\x61\xf8\x6c"
- "\x03\x9a\x0e\xa5\x3c\xd3\x47\xde"
- "\x75\x0c\x80\x17\xae\x22\xb9\x50"
- "\xe7\x5b\xf2\x89\x20\x94\x2b\xc2"
- "\x36\xcd\x64\xfb\x6f\x06\x9d\x11"
- "\xa8\x3f\xd6\x4a\xe1\x78\x0f\x83"
- "\x1a\xb1\x25\xbc\x53\xea\x5e\xf5"
- "\x8c\x00\x97\x2e\xc5\x39\xd0\x67"
- "\xfe\x72\x09\xa0\x14\xab\x42\xd9"
- "\x4d\xe4\x7b\x12\x86\x1d\xb4\x28"
- "\xbf\x56\xed\x61\xf8\x8f\x03\x9a"
- "\x31\xc8\x3c\xd3\x6a\x01\x75\x0c"
- "\xa3\x17\xae\x45\xdc\x50\xe7\x7e"
- "\x15\x89\x20\xb7\x2b\xc2\x59\xf0"
- "\x64\xfb\x92\x06\x9d\x34\xcb\x3f"
- "\xd6\x6d\x04\x78\x0f\xa6\x1a\xb1"
- "\x48\xdf\x53\xea\x81\x18\x8c\x23"
- "\xba\x2e\xc5\x5c\xf3\x67\xfe\x95"
- "\x09\xa0\x37\xce\x42\xd9\x70\x07"
- "\x7b\x12\xa9\x1d\xb4\x4b\xe2\x56"
- "\xed\x84\x1b\x8f\x26\xbd\x31\xc8"
- "\x5f\xf6\x6a\x01\x98\x0c\xa3\x3a"
- "\xd1\x45\xdc\x73\x0a\x7e\x15\xac"
- "\x20\xb7\x4e\xe5\x59\xf0\x87\x1e"
- "\x92\x29\xc0\x34\xcb\x62\xf9\x6d"
- "\x04\x9b\x0f\xa6\x3d\xd4\x48\xdf"
- "\x76\x0d\x81\x18\xaf\x23\xba\x51"
- "\xe8\x5c\xf3\x8a\x21\x95\x2c\xc3"
- "\x37\xce\x65\xfc\x70\x07\x9e\x12"
- "\xa9\x40\xd7\x4b\xe2\x79\x10\x84"
- "\x1b\xb2\x26\xbd\x54\xeb\x5f\xf6"
- "\x8d\x01\x98\x2f\xc6\x3a\xd1\x68"
- "\xff\x73\x0a\xa1\x15\xac\x43\xda"
- "\x4e\xe5\x7c\x13\x87\x1e\xb5\x29"
- "\xc0\x57\xee\x62\xf9\x90\x04\x9b"
- "\x32\xc9\x3d\xd4\x6b\x02\x76\x0d"
- "\xa4\x18\xaf\x46\xdd\x51\xe8\x7f"
- "\x16\x8a\x21\xb8\x2c\xc3\x5a\xf1"
- "\x65\xfc\x93\x07\x9e\x35\xcc\x40"
- "\xd7\x6e\x05\x79\x10\xa7\x1b\xb2"
- "\x49\xe0\x54\xeb\x82\x19\x8d\x24"
- "\xbb\x2f\xc6\x5d\xf4\x68\xff\x96"
- "\x0a\xa1\x38\xcf\x43\xda\x71\x08"
- "\x7c\x13\xaa\x1e\xb5\x4c\xe3\x57"
- "\xee\x85\x1c\x90\x27\xbe\x32\xc9"
- "\x60\xf7\x6b\x02\x99\x0d\xa4\x3b"
- "\xd2\x46\xdd\x74\x0b\x7f\x16\xad"
- "\x21\xb8\x4f\xe6\x5a\xf1\x88\x1f"
- "\x93\x2a\xc1\x35\xcc\x63\xfa\x6e"
- "\x05\x9c\x10\xa7\x3e\xd5\x49\xe0"
- "\x77\x0e\x82\x19\xb0\x24\xbb\x52"
- "\xe9\x5d\xf4\x8b\x22\x96\x2d\xc4"
- "\x38\xcf\x66\xfd\x71\x08\x9f\x13"
- "\xaa\x41\xd8\x4c\xe3\x7a\x11\x85"
- "\x1c\xb3\x27\xbe\x55\xec\x60\xf7"
- "\x8e\x02\x99\x30\xc7\x3b\xd2\x69"
- "\x00\x74\x0b\xa2\x16\xad\x44\xdb"
- "\x4f\xe6\x7d\x14\x88\x1f\xb6\x2a"
- "\xc1\x58\xef\x63\xfa\x91\x05\x9c"
- "\x33\xca\x3e\xd5\x6c\x03\x77\x0e"
- "\xa5\x19\xb0\x47\xde\x52\xe9\x80"
- "\x17\x8b\x22\xb9\x2d\xc4\x5b\xf2"
- "\x66\xfd\x94\x08\x9f\x36\xcd\x41"
- "\xd8\x6f\x06\x7a\x11\xa8\x1c\xb3"
- "\x4a\xe1\x55\xec\x83\x1a\x8e\x25"
- "\xbc\x30\xc7\x5e\xf5\x69\x00\x97"
- "\x0b\xa2\x39\xd0\x44\xdb\x72\x09"
- "\x7d\x14\xab\x1f\xb6\x4d\xe4\x58"
- "\xef\x86\x1d\x91\x28\xbf\x33\xca"
- "\x61\xf8\x6c\x03\x9a\x0e\xa5\x3c"
- "\xd3\x47\xde\x75\x0c\x80\x17\xae"
- "\x22\xb9\x50\xe7\x5b\xf2\x89\x20"
- "\x94\x2b\xc2\x36\xcd\x64\xfb\x6f"
- "\x06\x9d\x11\xa8\x3f\xd6\x4a\xe1"
- "\x78\x0f\x83\x1a\xb1\x25\xbc\x53"
- "\xea\x5e\xf5\x8c\x00\x97\x2e\xc5"
- "\x39\xd0\x67\xfe\x72\x09\xa0\x14"
- "\xab\x42\xd9\x4d\xe4\x7b\x12\x86"
- "\x1d\xb4\x28\xbf\x56\xed\x61\xf8"
- "\x8f\x03\x9a\x31\xc8\x3c\xd3\x6a"
- "\x01\x75\x0c\xa3\x17\xae\x45\xdc"
- "\x50\xe7\x7e\x15\x89\x20\xb7\x2b"
- "\xc2\x59\xf0\x64\xfb\x92\x06\x9d"
- "\x34\xcb\x3f\xd6\x6d\x04\x78\x0f"
- "\xa6\x1a\xb1\x48\xdf\x53\xea\x81"
- "\x18\x8c\x23\xba\x2e\xc5\x5c\xf3"
- "\x67\xfe\x95\x09\xa0\x37\xce\x42"
- "\xd9\x70\x07\x7b\x12\xa9\x1d\xb4"
- "\x4b\xe2\x56\xed\x84\x1b\x8f\x26"
- "\xbd\x31\xc8\x5f\xf6\x6a\x01\x98",
- .psize = 2048,
- .digest = (u8 *)(u16 []){ 0x23ca },
- }
-};
-
/*
* Streebog test vectors from RFC 6986 and GOST R 34.11-2012
*/
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index c20eb63750f5..43aba57b48f0 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
struct sg_table *sgt_in, u64 size, u64 offset)
{
- int total_len, len, nents, offf = 0, offl = 0;
struct scatterlist *sg, *sgn, *sgf, *sgl;
+ unsigned int len, nents, offf, offl;
struct sg_table *sgt;
+ size_t total_len;
int ret, j;
/* find out number of relevant nents needed for this mem */
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
sgf = NULL;
sgl = NULL;
nents = 0;
+ offf = 0;
+ offl = 0;
size = size ? size : PAGE_SIZE;
for_each_sgtable_dma_sg(sgt_in, sg, j) {
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
u32 count, u64 total_size)
{
+ u64 total;
int i;
for (i = 0; i < count; i++) {
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
return -EINVAL;
- if (slice_ent[i].offset + slice_ent[i].size > total_size)
+ if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
+ total > total_size)
return -EINVAL;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d81b55f5068c..7f10aa38269d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -452,7 +452,7 @@ config ACPI_SBS
the modules will be called sbs and sbshc.
config ACPI_HED
- tristate "Hardware Error Device"
+ bool "Hardware Error Device"
help
This driver supports the Hardware Error Device (PNP0C33),
which is used to report some hardware errors notified via
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 40208a0f5dfb..797070fc9a3f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -5,6 +5,10 @@
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+ifdef CONFIG_TRACE_BRANCH_PROFILING
+CFLAGS_processor_idle.o += -DDISABLE_BRANCH_PROFILING
+endif
+
#
# ACPI Boot-Time Table Parsing
#
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 01abf26764b0..435ec60a9682 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -120,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"IBM0071"},
/* smsc-ircc2 */
{"SMCf010"},
- /* sb1000 */
- {"GIC1000"},
/* parport_pc */
{"PNP0400"}, /* Standard LPT Printer Port */
{"PNP0401"}, /* ECP Printer Port */
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a972831dbd66..efdadc74e3f4 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <acpi/video.h>
#include <linux/uaccess.h>
+#include <linux/string_choices.h>
#define ACPI_VIDEO_BUS_NAME "Video Bus"
#define ACPI_VIDEO_DEVICE_NAME "Video Device"
@@ -2039,9 +2040,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
- video->flags.multihead ? "yes" : "no",
- video->flags.rom ? "yes" : "no",
- video->flags.post ? "yes" : "no");
+ str_yes_no(video->flags.multihead),
+ str_yes_no(video->flags.rom),
+ str_yes_no(video->flags.post));
mutex_lock(&video_list_lock);
list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock);
diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c
index 52b2abf88689..f30f138352b7 100644
--- a/drivers/acpi/arm64/dma.c
+++ b/drivers/acpi/arm64/dma.c
@@ -26,6 +26,11 @@ void acpi_arch_dma_setup(struct device *dev)
else
end = (1ULL << 32) - 1;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ return;
+ }
+
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
end = dma_range_map_max(map);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 7773e6b860e7..90b09840536d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -24,6 +24,7 @@
#define ACPI_BUTTON_CLASS "button"
#define ACPI_BUTTON_FILE_STATE "state"
#define ACPI_BUTTON_TYPE_UNKNOWN 0x00
+#define ACPI_BUTTON_NOTIFY_WAKE 0x02
#define ACPI_BUTTON_NOTIFY_STATUS 0x80
#define ACPI_BUTTON_SUBCLASS_POWER "power"
@@ -443,7 +444,12 @@ static void acpi_button_notify(acpi_handle handle, u32 event, void *data)
struct input_dev *input;
int keycode;
- if (event != ACPI_BUTTON_NOTIFY_STATUS) {
+ switch (event) {
+ case ACPI_BUTTON_NOTIFY_STATUS:
+ break;
+ case ACPI_BUTTON_NOTIFY_WAKE:
+ break;
+ default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
event);
return;
@@ -629,7 +635,7 @@ static int acpi_button_add(struct acpi_device *device)
break;
default:
status = acpi_install_notify_handler(device->handle,
- ACPI_DEVICE_NOTIFY, handler,
+ ACPI_ALL_NOTIFY, handler,
device);
break;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 3b4d048c4941..dbd4446025ec 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1161,7 +1161,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_complete);
*/
int acpi_subsys_suspend(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
@@ -1320,7 +1320,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
*/
int acpi_subsys_poweroff(struct device *dev)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ if (!dev_pm_smart_suspend(dev) ||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
pm_runtime_resume(dev);
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 488b51e2cb31..15eba1c70e66 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -49,6 +49,7 @@ struct acpi_fan_fst {
struct acpi_fan {
bool acpi4;
+ bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index f4f6e2381f1d..22d29ac2447c 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -75,15 +75,6 @@ int acpi_fan_create_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i, status;
- sysfs_attr_init(&fan->fine_grain_control.attr);
- fan->fine_grain_control.show = show_fine_grain_control;
- fan->fine_grain_control.store = NULL;
- fan->fine_grain_control.attr.name = "fine_grain_control";
- fan->fine_grain_control.attr.mode = 0444;
- status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
- if (status)
- return status;
-
/* _FST is present if we are here */
sysfs_attr_init(&fan->fst_speed.attr);
fan->fst_speed.show = show_fan_speed;
@@ -92,7 +83,19 @@ int acpi_fan_create_attributes(struct acpi_device *device)
fan->fst_speed.attr.mode = 0444;
status = sysfs_create_file(&device->dev.kobj, &fan->fst_speed.attr);
if (status)
- goto rem_fine_grain_attr;
+ return status;
+
+ if (!fan->acpi4)
+ return 0;
+
+ sysfs_attr_init(&fan->fine_grain_control.attr);
+ fan->fine_grain_control.show = show_fine_grain_control;
+ fan->fine_grain_control.store = NULL;
+ fan->fine_grain_control.attr.name = "fine_grain_control";
+ fan->fine_grain_control.attr.mode = 0444;
+ status = sysfs_create_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+ if (status)
+ goto rem_fst_attr;
for (i = 0; i < fan->fps_count; ++i) {
struct acpi_fan_fps *fps = &fan->fps[i];
@@ -109,18 +112,18 @@ int acpi_fan_create_attributes(struct acpi_device *device)
for (j = 0; j < i; ++j)
sysfs_remove_file(&device->dev.kobj, &fan->fps[j].dev_attr.attr);
- goto rem_fst_attr;
+ goto rem_fine_grain_attr;
}
}
return 0;
-rem_fst_attr:
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
-
rem_fine_grain_attr:
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
+rem_fst_attr:
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
return status;
}
@@ -129,9 +132,13 @@ void acpi_fan_delete_attributes(struct acpi_device *device)
struct acpi_fan *fan = acpi_driver_data(device);
int i;
+ sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
+
+ if (!fan->acpi4)
+ return;
+
for (i = 0; i < fan->fps_count; ++i)
sysfs_remove_file(&device->dev.kobj, &fan->fps[i].dev_attr.attr);
- sysfs_remove_file(&device->dev.kobj, &fan->fst_speed.attr);
sysfs_remove_file(&device->dev.kobj, &fan->fine_grain_control.attr);
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 10016f52f4f4..8ad12ad3aaaf 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -203,12 +203,16 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
+static bool acpi_fan_has_fst(struct acpi_device *device)
+{
+ return acpi_has_method(device->handle, "_FST");
+}
+
static bool acpi_fan_is_acpi4(struct acpi_device *device)
{
return acpi_has_method(device->handle, "_FIF") &&
acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL") &&
- acpi_has_method(device->handle, "_FST");
+ acpi_has_method(device->handle, "_FSL");
}
static int acpi_fan_get_fif(struct acpi_device *device)
@@ -327,7 +331,12 @@ static int acpi_fan_probe(struct platform_device *pdev)
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_is_acpi4(device)) {
+ if (acpi_fan_has_fst(device)) {
+ fan->has_fst = true;
+ fan->acpi4 = acpi_fan_is_acpi4(device);
+ }
+
+ if (fan->acpi4) {
result = acpi_fan_get_fif(device);
if (result)
return result;
@@ -335,7 +344,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_get_fps(device);
if (result)
return result;
+ }
+ if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
if (result)
return result;
@@ -343,9 +354,9 @@ static int acpi_fan_probe(struct platform_device *pdev)
result = acpi_fan_create_attributes(device);
if (result)
return result;
+ }
- fan->acpi4 = true;
- } else {
+ if (!fan->acpi4) {
result = acpi_device_update_power(device, NULL);
if (result) {
dev_err(&device->dev, "Failed to set initial power state\n");
@@ -391,7 +402,7 @@ err_remove_link:
err_unregister:
thermal_cooling_device_unregister(cdev);
err_end:
- if (fan->acpi4)
+ if (fan->has_fst)
acpi_fan_delete_attributes(device);
return result;
@@ -401,7 +412,7 @@ static void acpi_fan_remove(struct platform_device *pdev)
{
struct acpi_fan *fan = platform_get_drvdata(pdev);
- if (fan->acpi4) {
+ if (fan->has_fst) {
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
acpi_fan_delete_attributes(device);
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index bd0d31a398fa..e8d90605106e 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -43,6 +43,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_fan_input:
return 0444;
case hwmon_fan_target:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
@@ -57,6 +61,10 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
case hwmon_power:
switch (attr) {
case hwmon_power_input:
+ /* Only acpi4 fans support fan control. */
+ if (!fan->acpi4)
+ return 0;
+
/*
* When in fine grain control mode, not every fan control value
* has an associated fan performance state.
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 7652515a6be1..3499f86c411e 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -80,7 +80,12 @@ static struct acpi_driver acpi_hed_driver = {
.remove = acpi_hed_remove,
},
};
-module_acpi_driver(acpi_hed_driver);
+
+static int __init acpi_hed_driver_init(void)
+{
+ return acpi_bus_register_driver(&acpi_hed_driver);
+}
+subsys_initcall(acpi_hed_driver_init);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 00ac0d7bb8c9..ce815d7cb8f6 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -51,6 +51,7 @@ int node_to_pxm(int node)
return PXM_INVAL;
return node_to_pxm_map[node];
}
+EXPORT_SYMBOL_GPL(node_to_pxm);
static void __acpi_map_pxm_to_node(int pxm, int node)
{
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index 2ad53cc6aae5..671407fc2bd4 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -21,9 +21,15 @@ struct platform_profile_handler {
struct device dev;
int minor;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
const struct platform_profile_ops *ops;
};
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (!test_bit(*bit, handler->choices))
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
return -EOPNOTSUPP;
return handler->ops->profile_set(dev, *bit);
@@ -239,53 +245,85 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
- * @data: The available profile choices
+ * @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
-static int _aggregate_choices(struct device *dev, void *data)
+static int _aggregate_choices(struct device *dev, void *arg)
{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
- unsigned long *aggregate = data;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
- bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
+ bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
else
- bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
return 0;
}
/**
- * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
* @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
+
+ return 0;
+}
+
+/**
+ * platform_profile_choices_show - Show the available profile choices for legacy sysfs interface
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to write to
*
* Return: The number of bytes written
*/
-static ssize_t platform_profile_choices_show(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_choices_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
- unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int err;
- set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
- aggregate, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
}
/* no profile handler registered any more */
- if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
return -EINVAL;
- return _commmon_choices_show(aggregate, buf);
+ return _commmon_choices_show(data.aggregate, buf);
}
/**
@@ -333,14 +371,14 @@ static int _store_and_notify(struct device *dev, void *data)
/**
* platform_profile_show - Show the current profile for legacy sysfs interface
- * @dev: The device
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to write to
*
* Return: The number of bytes written
*/
-static ssize_t platform_profile_show(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
@@ -362,18 +400,21 @@ static ssize_t platform_profile_show(struct device *dev,
/**
* platform_profile_store - Set the profile for legacy sysfs interface
- * @dev: The device
+ * @kobj: The kobject
* @attr: The attribute
* @buf: The buffer to read from
* @count: The number of bytes to read
*
* Return: The number of bytes read
*/
-static ssize_t platform_profile_store(struct device *dev,
- struct device_attribute *attr,
+static ssize_t platform_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
const char *buf, size_t count)
{
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int ret;
int i;
@@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (ret)
return ret;
- if (!test_bit(i, choices))
+ if (!test_bit(i, data.aggregate))
return -EOPNOTSUPP;
ret = class_for_each_device(&platform_profile_class, NULL, &i,
@@ -401,12 +442,12 @@ static ssize_t platform_profile_store(struct device *dev,
return count;
}
-static DEVICE_ATTR_RO(platform_profile_choices);
-static DEVICE_ATTR_RW(platform_profile);
+static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
+static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
- &dev_attr_platform_profile_choices.attr,
- &dev_attr_platform_profile.attr,
+ &attr_platform_profile_choices.attr,
+ &attr_platform_profile.attr,
NULL
};
@@ -453,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
*/
int platform_profile_cycle(void)
{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
@@ -470,14 +514,14 @@ int platform_profile_cycle(void)
return -EINVAL;
err = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
/* never iterate into a custom if all drivers supported it */
- clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- next = find_next_bit_wrap(choices,
+ next = find_next_bit_wrap(data.aggregate,
PLATFORM_PROFILE_LAST,
profile + 1);
@@ -532,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
return ERR_PTR(-EINVAL);
}
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
guard(mutex)(&profile_lock);
/* create class interface for individual handler */
@@ -575,24 +627,23 @@ EXPORT_SYMBOL_GPL(platform_profile_register);
/**
* platform_profile_remove - Unregisters a platform profile class device
* @dev: Class device
- *
- * Return: 0
*/
-int platform_profile_remove(struct device *dev)
+void platform_profile_remove(struct device *dev)
{
- struct platform_profile_handler *pprof = to_pprof_handler(dev);
- int id;
+ struct platform_profile_handler *pprof;
+
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ pprof = to_pprof_handler(dev);
+
guard(mutex)(&profile_lock);
- id = pprof->minor;
+ ida_free(&platform_profile_ida, pprof->minor);
device_unregister(&pprof->dev);
- ida_free(&platform_profile_ida, id);
sysfs_notify(acpi_kobj, NULL, "platform_profile");
-
sysfs_update_group(acpi_kobj, &platform_profile_group);
-
- return 0;
}
EXPORT_SYMBOL_GPL(platform_profile_remove);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 25174c24d3d7..b7243d7563b1 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include <linux/acpi.h>
@@ -197,7 +198,7 @@ static int __get_state(acpi_handle handle, u8 *state)
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
- cur_state ? "on" : "off");
+ str_on_off(cur_state));
*state = cur_state;
return 0;
@@ -240,7 +241,7 @@ static int acpi_power_get_list_state(struct list_head *list, u8 *state)
break;
}
- pr_debug("Power resource list is %s\n", cur_state ? "on" : "off");
+ pr_debug("Power resource list is %s\n", str_on_off(cur_state));
*state = cur_state;
return 0;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 698897b29de2..586cc7d1d8aa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -590,6 +590,8 @@ static void acpi_idle_play_dead(struct cpuidle_device *dev, int index)
raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
+ } else if (cx->entry_method == ACPI_CSTATE_FFH) {
+ acpi_processor_ffh_play_dead(cx);
} else
return;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 9f4efa8f75a6..fb1fe9f3b1a3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1632,13 +1632,6 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = viot_iommu_configure(dev);
mutex_unlock(&iommu_probe_device_lock);
- /*
- * If we have reason to believe the IOMMU driver missed the initial
- * iommu_probe_device() call for dev, replay it to get things in order.
- */
- if (!err && dev->bus)
- err = iommu_probe_device(dev);
-
return err;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 95982c098d5b..0c874186f8ae 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -803,7 +803,7 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_aml_dependency_fix(tz);
- /* Get trip points [_CRT, _PSV, etc.] (required). */
+ /* Get trip points [_ACi, _PSV, etc.] (required). */
acpi_thermal_get_trip_points(tz);
crit_temp = acpi_thermal_get_critical_trip(tz);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 8ef259b4d037..71482d639a6d 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -364,7 +364,8 @@ static int amba_dma_configure(struct device *dev)
ret = acpi_dma_configure(dev, attr);
}
- if (!ret && !drv->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index bc6bae76ccaf..94c6446604fc 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f813dbdc2346..163ac909bd06 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -63,6 +63,7 @@ enum board_ids {
board_ahci_pcs_quirk_no_devslp,
board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
+ board_ahci_yes_fbs_atapi_dma,
/* board IDs for specific chipsets in alphabetical order */
board_ahci_al,
@@ -188,6 +189,14 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_yes_fbs_atapi_dma] = {
+ AHCI_HFLAGS (AHCI_HFLAG_YES_FBS |
+ AHCI_HFLAG_ATAPI_DMA_QUIRK),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
/* by chipsets */
[board_ahci_al] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_MSI),
@@ -589,6 +598,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
+ .driver_data = board_ahci_yes_fbs_atapi_dma },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),
@@ -1665,13 +1676,15 @@ static int ahci_get_irq_vector(struct ata_host *host, int port)
return pci_irq_vector(to_pci_dev(host->dev), port);
}
-static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
+static void ahci_init_irq(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
int nvec;
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
+ if (hpriv->flags & AHCI_HFLAG_NO_MSI) {
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
+ return;
+ }
/*
* If number of MSIs is less than number of ports then Sharing Last
@@ -1685,7 +1698,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
hpriv->get_irq_vector = ahci_get_irq_vector;
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
- return nvec;
+ return;
}
/*
@@ -1700,12 +1713,13 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
/*
* If the host is not capable of supporting per-port vectors, fall
- * back to single MSI before finally attempting single MSI-X.
+ * back to single MSI before finally attempting single MSI-X or
+ * a legacy INTx.
*/
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec == 1)
- return nvec;
- return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ return;
+ pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX | PCI_IRQ_INTX);
}
static void ahci_mark_external_port(struct ata_port *ap)
@@ -1985,10 +1999,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->private_data = hpriv;
- if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
- /* legacy intx interrupts */
- pcim_intx(pdev, 1);
- }
+ ahci_init_irq(pdev, n_ports, hpriv);
+
hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c842e2de6ef9..2c10c8f440d1 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -246,6 +246,7 @@ enum {
AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
+ AHCI_HFLAG_ATAPI_DMA_QUIRK = BIT(29), /* force ATAPI to use DMA */
/* ap->flags bits */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index e7ace4b10f15..22afa4ff860d 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1322,6 +1322,10 @@ static void ahci_dev_config(struct ata_device *dev)
{
struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
+ if ((dev->class == ATA_DEV_ATAPI) &&
+ (hpriv->flags & AHCI_HFLAG_ATAPI_DMA_QUIRK))
+ dev->quirks |= ATA_QUIRK_ATAPI_MOD16_DMA;
+
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
ata_dev_info(dev,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c085dd81ebe7..773799cfd443 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -88,6 +88,7 @@ struct ata_force_param {
unsigned int xfer_mask;
unsigned int quirk_on;
unsigned int quirk_off;
+ unsigned int pflags_on;
u16 lflags_on;
u16 lflags_off;
};
@@ -332,6 +333,35 @@ void ata_force_cbl(struct ata_port *ap)
}
/**
+ * ata_force_pflags - force port flags according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force port flags according to libata.force and whine about it.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_pflags(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ /* let pflags stack */
+ if (fe->param.pflags_on) {
+ ap->pflags |= fe->param.pflags_on;
+ ata_port_notice(ap,
+ "FORCE: port flag 0x%x forced -> 0x%x\n",
+ fe->param.pflags_on, ap->pflags);
+ }
+ }
+}
+
+/**
* ata_force_link_limits - force link limits according to libata.force
* @link: ATA link of interest
*
@@ -486,6 +516,7 @@ static void ata_force_quirks(struct ata_device *dev)
}
}
#else
+static inline void ata_force_pflags(struct ata_port *ap) { }
static inline void ata_force_link_limits(struct ata_link *link) { }
static inline void ata_force_xfermask(struct ata_device *dev) { }
static inline void ata_force_quirks(struct ata_device *dev) { }
@@ -2243,7 +2274,7 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
- "NCQ Send/Recv Log not supported\n");
+ "NCQ Non-Data Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
@@ -2845,6 +2876,10 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->quirks |= ATA_QUIRK_NOLPM;
+ if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
if (ap->flags & ATA_FLAG_NO_LPM)
dev->quirks |= ATA_QUIRK_NOLPM;
@@ -3897,6 +3932,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
+ [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
[__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
[__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
[__ATA_QUIRK_NO_FUA] = "nofua",
@@ -4142,13 +4178,16 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
ATA_QUIRK_ZERO_AFTER_TRIM },
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI, },
+ ATA_QUIRK_NO_NCQ_ON_ATI |
+ ATA_QUIRK_NO_LPM_ON_ATI },
{ "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM },
@@ -4544,7 +4583,7 @@ int atapi_check_dma(struct ata_queued_cmd *qc)
*/
if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
unlikely(qc->nbytes & 15))
- return 1;
+ return -EOPNOTSUPP;
if (ap->ops->check_atapi_dma)
return ap->ops->check_atapi_dma(qc);
@@ -5452,6 +5491,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
#endif
ata_sff_port_init(ap);
+ ata_force_pflags(ap);
+
return ap;
}
EXPORT_SYMBOL_GPL(ata_port_alloc);
@@ -6264,6 +6305,9 @@ EXPORT_SYMBOL_GPL(ata_platform_remove_one);
{ "no" #name, .lflags_on = (flags) }, \
{ #name, .lflags_off = (flags) }
+#define force_pflag_on(name, flags) \
+ { #name, .pflags_on = (flags) }
+
#define force_quirk_on(name, flag) \
{ #name, .quirk_on = (flag) }
@@ -6323,6 +6367,8 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
+ force_pflag_on(external, ATA_PFLAG_EXTERNAL),
+
force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3b303d4ae37a..16cd676eae1f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1542,8 +1542,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
- /* is it pointless to prefer PIO for "safety reasons"? */
- if (ap->flags & ATA_FLAG_PIO_DMA) {
+ /*
+ * Do not use DMA if the connected device only supports PIO, even if the
+ * port prefers PIO commands via DMA.
+ *
+ * Ideally, we should call atapi_check_dma() to check if it is safe for
+ * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
+ * Since we can't check the command, perhaps we should only use pio?
+ */
+ if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index dce24806a052..2d32125c16fd 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -935,9 +935,8 @@ static int octeon_cf_probe(struct platform_device *pdev)
ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
/* True IDE mode needs a timer to poll for not-busy. */
- hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cf_port->delayed_finish.function = octeon_cf_delayed_finish;
+ hrtimer_setup(&cf_port->delayed_finish, octeon_cf_delayed_finish, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
} else {
/* 16 bit but not True IDE */
base = cs0 + 0x800;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 57cbf2cef618..4ecd8f33b082 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -25,6 +25,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/string_choices.h>
#define DRV_NAME "sata_via"
#define DRV_VERSION "2.6"
@@ -359,7 +360,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
ata_port_info(ap,
"SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ str_up_down(online), sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 8934e6ad5772..bedc6133f970 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -503,6 +503,7 @@ config HT16K33
config MAX6959
tristate "Maxim MAX6958/6959 7-segment LED controller"
depends on I2C
+ select BITREVERSE
select REGMAP_I2C
select LINEDISP
help
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 19b619376d48..09020bb8ad15 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -595,18 +595,19 @@ static int charlcd_init(struct charlcd *lcd)
return 0;
}
-struct charlcd *charlcd_alloc(void)
+struct charlcd *charlcd_alloc(unsigned int drvdata_size)
{
struct charlcd_priv *priv;
struct charlcd *lcd;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL);
if (!priv)
return NULL;
priv->esc_seq.len = -1;
lcd = &priv->lcd;
+ lcd->drvdata = priv->drvdata;
return lcd;
}
diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h
index 4d4287209d04..d10b89740bca 100644
--- a/drivers/auxdisplay/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -51,7 +51,7 @@ struct charlcd {
unsigned long y;
} addr;
- void *drvdata;
+ void *drvdata; /* Set by charlcd_alloc() */
};
/**
@@ -95,7 +95,8 @@ struct charlcd_ops {
};
void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on);
-struct charlcd *charlcd_alloc(void);
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 0526f0d90a79..cef42656c4b0 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -222,20 +222,17 @@ static int hd44780_probe(struct platform_device *pdev)
return -EINVAL;
}
- hdc = hd44780_common_alloc();
- if (!hdc)
- return -ENOMEM;
-
- lcd = charlcd_alloc();
+ lcd = hd44780_common_alloc();
if (!lcd)
- goto fail1;
+ return -ENOMEM;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
goto fail2;
+ hdc = lcd->drvdata;
hdc->hd44780 = hd;
- lcd->drvdata = hdc;
+
for (i = 0; i < ifwidth; i++) {
hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i,
GPIOD_OUT_LOW);
@@ -313,9 +310,7 @@ static int hd44780_probe(struct platform_device *pdev)
fail3:
kfree(hd);
fail2:
- kfree(lcd);
-fail1:
- kfree(hdc);
+ hd44780_common_free(lcd);
return ret;
}
@@ -326,9 +321,7 @@ static void hd44780_remove(struct platform_device *pdev)
charlcd_unregister(lcd);
kfree(hdc->hd44780);
- kfree(lcd->drvdata);
-
- kfree(lcd);
+ hd44780_common_free(lcd);
}
static const struct of_device_id hd44780_of_match[] = {
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 4ef87c3118c0..1792fe2a4460 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -351,20 +351,28 @@ int hd44780_common_redefine_char(struct charlcd *lcd, char *esc)
}
EXPORT_SYMBOL_GPL(hd44780_common_redefine_char);
-struct hd44780_common *hd44780_common_alloc(void)
+struct charlcd *hd44780_common_alloc(void)
{
- struct hd44780_common *hd;
+ struct hd44780_common *hdc;
+ struct charlcd *lcd;
- hd = kzalloc(sizeof(*hd), GFP_KERNEL);
- if (!hd)
+ lcd = charlcd_alloc(sizeof(*hdc));
+ if (!lcd)
return NULL;
- hd->ifwidth = 8;
- hd->bwidth = DEFAULT_LCD_BWIDTH;
- hd->hwidth = DEFAULT_LCD_HWIDTH;
- return hd;
+ hdc = lcd->drvdata;
+ hdc->ifwidth = 8;
+ hdc->bwidth = DEFAULT_LCD_BWIDTH;
+ hdc->hwidth = DEFAULT_LCD_HWIDTH;
+ return lcd;
}
EXPORT_SYMBOL_GPL(hd44780_common_alloc);
+void hd44780_common_free(struct charlcd *lcd)
+{
+ charlcd_free(lcd);
+}
+EXPORT_SYMBOL_GPL(hd44780_common_free);
+
MODULE_DESCRIPTION("Common functions for HD44780 (and compatibles) LCD displays");
MODULE_LICENSE("GPL");
diff --git a/drivers/auxdisplay/hd44780_common.h b/drivers/auxdisplay/hd44780_common.h
index a16aa8c29c99..4c87f55722b6 100644
--- a/drivers/auxdisplay/hd44780_common.h
+++ b/drivers/auxdisplay/hd44780_common.h
@@ -30,4 +30,6 @@ int hd44780_common_blink(struct charlcd *lcd, enum charlcd_onoff on);
int hd44780_common_fontsize(struct charlcd *lcd, enum charlcd_fontsize size);
int hd44780_common_lines(struct charlcd *lcd, enum charlcd_lines lines);
int hd44780_common_redefine_char(struct charlcd *lcd, char *esc);
-struct hd44780_common *hd44780_common_alloc(void);
+
+struct charlcd *hd44780_common_alloc(void);
+void hd44780_common_free(struct charlcd *lcd);
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index a28daa4ffbf7..045dbef49dee 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -298,20 +298,18 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
- lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
- if (!lcd2s)
- return -ENOMEM;
-
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
return err;
- lcd = charlcd_alloc();
+ lcd = charlcd_alloc(sizeof(*lcd2s));
if (!lcd)
return -ENOMEM;
- lcd->drvdata = lcd2s;
+ lcd->ops = &lcd2s_ops;
+
+ lcd2s = lcd->drvdata;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -326,8 +324,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c)
if (err)
goto fail1;
- lcd->ops = &lcd2s_ops;
-
err = charlcd_register(lcd2s->charlcd);
if (err)
goto fail1;
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index a731f28455b4..91ccb9789d43 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -831,18 +831,12 @@ static void lcd_init(void)
struct charlcd *charlcd;
struct hd44780_common *hdc;
- hdc = hd44780_common_alloc();
- if (!hdc)
+ charlcd = hd44780_common_alloc();
+ if (!charlcd)
return;
- charlcd = charlcd_alloc();
- if (!charlcd) {
- kfree(hdc);
- return;
- }
-
+ hdc = charlcd->drvdata;
hdc->hd44780 = &lcd;
- charlcd->drvdata = hdc;
/*
* Init lcd struct with load-time values to preserve exact
@@ -1664,7 +1658,7 @@ err_lcd_unreg:
if (lcd.enabled)
charlcd_unregister(lcd.charlcd);
err_unreg_device:
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
parport_unregister_device(pprt);
pprt = NULL;
@@ -1691,8 +1685,7 @@ static void panel_detach(struct parport *port)
if (lcd.enabled) {
charlcd_unregister(lcd.charlcd);
lcd.initialized = false;
- kfree(lcd.charlcd->drvdata);
- kfree(lcd.charlcd);
+ hd44780_common_free(lcd.charlcd);
lcd.charlcd = NULL;
}
diff --git a/drivers/auxdisplay/seg-led-gpio.c b/drivers/auxdisplay/seg-led-gpio.c
index f10c25e6bf12..dfb62e9ce9b4 100644
--- a/drivers/auxdisplay/seg-led-gpio.c
+++ b/drivers/auxdisplay/seg-led-gpio.c
@@ -36,8 +36,7 @@ static void seg_led_update(struct work_struct *work)
bitmap_set_value8(values, map_to_seg7(&map->map.seg7, linedisp->buf[0]), 0);
- gpiod_set_array_value_cansleep(priv->segment_gpios->ndescs, priv->segment_gpios->desc,
- priv->segment_gpios->info, values);
+ gpiod_multi_set_value_cansleep(priv->segment_gpios, values);
}
static int seg_led_linedisp_get_map_type(struct linedisp *linedisp)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 3ebe77566788..af0029d30dbe 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpu_smt.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
@@ -28,7 +29,7 @@
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
-DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
+DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 0;
EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
static bool supports_scale_freq_counters(const struct cpumask *cpus)
@@ -293,13 +294,15 @@ void topology_normalize_cpu_scale(void)
capacity_scale = 1;
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity_scale = max(capacity, capacity_scale);
}
pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
for_each_possible_cpu(cpu) {
- capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
+ capacity = raw_capacity[cpu] *
+ (per_cpu(capacity_freq_ref, cpu) ?: 1);
capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
capacity_scale);
topology_set_cpu_scale(cpu, capacity);
@@ -506,6 +509,10 @@ core_initcall(free_raw_capacity);
#endif
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/* Used to enable the SMT control */
+static unsigned int max_smt_thread_num = 1;
+
/*
* This function returns the logic cpu number of the node.
* There are basically three kinds of return values:
@@ -565,6 +572,8 @@ static int __init parse_core(struct device_node *core, int package_id,
i++;
} while (1);
+ max_smt_thread_num = max_t(unsigned int, max_smt_thread_num, i);
+
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
@@ -677,6 +686,17 @@ static int __init parse_socket(struct device_node *socket)
if (!has_socket)
ret = parse_cluster(socket, 0, -1, 0);
+ /*
+ * Reset the max_smt_thread_num to 1 on failure. Since on failure
+ * we need to notify the framework the SMT is not supported, but
+ * max_smt_thread_num can be initialized to the SMT thread number
+ * of the cores which are successfully parsed.
+ */
+ if (ret)
+ max_smt_thread_num = 1;
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+
return ret;
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5a1f05198114..2fde698430df 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index b848764ef018..6dd1a8860f1c 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -63,22 +63,6 @@ __setup("devtmpfs.mount=", mount_param);
static struct vfsmount *mnt;
-static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- struct super_block *s = mnt->mnt_sb;
- int err;
-
- atomic_inc(&s->s_active);
- down_write(&s->s_umount);
- err = reconfigure_single(s, flags, data);
- if (err < 0) {
- deactivate_locked_super(s);
- return ERR_PTR(err);
- }
- return dget(s->s_root);
-}
-
static struct file_system_type internal_fs_type = {
.name = "devtmpfs",
#ifdef CONFIG_TMPFS
@@ -89,9 +73,40 @@ static struct file_system_type internal_fs_type = {
.kill_sb = kill_litter_super,
};
+/* Simply take a ref on the existing mount */
+static int devtmpfs_get_tree(struct fs_context *fc)
+{
+ struct super_block *sb = mnt->mnt_sb;
+
+ atomic_inc(&sb->s_active);
+ down_write(&sb->s_umount);
+ fc->root = dget(sb->s_root);
+ return 0;
+}
+
+/* Ops are filled in during init depending on underlying shmem or ramfs type */
+struct fs_context_operations devtmpfs_context_ops = {};
+
+/* Call the underlying initialization and set to our ops */
+static int devtmpfs_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+#ifdef CONFIG_TMPFS
+ ret = shmem_init_fs_context(fc);
+#else
+ ret = ramfs_init_fs_context(fc);
+#endif
+ if (ret < 0)
+ return ret;
+
+ fc->ops = &devtmpfs_context_ops;
+
+ return 0;
+}
+
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
- .mount = public_dev_mount,
+ .init_fs_context = devtmpfs_init_fs_context,
};
static int devtmpfs_submit_req(struct req *req, const char *tmp)
@@ -160,18 +175,17 @@ static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
- int err;
dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
- if (!err)
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
- return err;
+ return PTR_ERR_OR_ZERO(dentry);
}
static int create_path(const char *nodepath)
@@ -245,15 +259,12 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+
dput(dentry);
inode_unlock(d_inode(parent.dentry));
path_put(&parent);
@@ -310,6 +321,8 @@ static int handle_remove(const char *nodename, struct device *dev)
{
struct path parent;
struct dentry *dentry;
+ struct kstat stat;
+ struct path p;
int deleted = 0;
int err;
@@ -317,32 +330,28 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (d_really_is_positive(dentry)) {
- struct kstat stat;
- struct path p = {.mnt = parent.mnt, .dentry = dentry};
- err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
- AT_STATX_SYNC_AS_STAT);
- if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = GLOBAL_ROOT_UID;
- newattrs.ia_gid = GLOBAL_ROOT_GID;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- inode_lock(d_inode(dentry));
- notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
- inode_unlock(d_inode(dentry));
- err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry, NULL);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ p.mnt = parent.mnt;
+ p.dentry = dentry;
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
dput(dentry);
inode_unlock(d_inode(parent.dentry));
@@ -443,6 +452,31 @@ static int __ref devtmpfsd(void *p)
}
/*
+ * Get the underlying (shmem/ramfs) context ops to build ours
+ */
+static int devtmpfs_configure_context(void)
+{
+ struct fs_context *fc;
+
+ fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
+ MS_RMT_MASK);
+ if (IS_ERR(fc))
+ return PTR_ERR(fc);
+
+ /* Set up devtmpfs_context_ops based on underlying type */
+ devtmpfs_context_ops.free = fc->ops->free;
+ devtmpfs_context_ops.dup = fc->ops->dup;
+ devtmpfs_context_ops.parse_param = fc->ops->parse_param;
+ devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
+ devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
+ devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
+
+ put_fs_context(fc);
+
+ return 0;
+}
+
+/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
@@ -456,6 +490,13 @@ int __init devtmpfs_init(void)
pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
+
+ err = devtmpfs_configure_context();
+ if (err) {
+ pr_err("unable to configure devtmpfs type %d\n", err);
+ return err;
+ }
+
err = register_filesystem(&dev_fs_type);
if (err) {
pr_err("unable to register devtmpfs type %d\n", err);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 6f2a33722c52..1813cfd0c4bd 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1451,7 +1451,8 @@ static int platform_dma_configure(struct device *dev)
attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
- if (ret || drv->driver_managed_dma)
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (ret || !dev->driver || drv->driver_managed_dma)
return ret;
ret = iommu_device_use_default_domain(dev);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index e18ba676cdf6..b69bcb37c830 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -259,39 +259,6 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
-
-/**
- * of_pm_clk_add_clk - Start using a device clock for power management.
- * @dev: Device whose clock is going to be used for power management.
- * @name: Name of clock that is going to be used for power management.
- *
- * Add the clock described in the 'clocks' device-tree node that matches
- * with the 'name' provided, to the list of clocks used for the power
- * management of @dev. On success, returns 0. Returns a negative error
- * code if the clock is not found or cannot be added.
- */
-int of_pm_clk_add_clk(struct device *dev, const char *name)
-{
- struct clk *clk;
- int ret;
-
- if (!dev || !dev->of_node || !name)
- return -EINVAL;
-
- clk = of_clk_get_by_name(dev->of_node, name);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
-
/**
* of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management.
@@ -377,46 +344,6 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_clk_remove - Stop using a device clock for power management.
- * @dev: Device whose clock should not be used for PM any more.
- * @con_id: Connection ID of the clock.
- *
- * Remove the clock represented by @con_id from the list of clocks used for
- * the power management of @dev.
- */
-void pm_clk_remove(struct device *dev, const char *con_id)
-{
- struct pm_subsys_data *psd = dev_to_psd(dev);
- struct pm_clock_entry *ce;
-
- if (!psd)
- return;
-
- pm_clk_list_lock(psd);
-
- list_for_each_entry(ce, &psd->clock_list, node) {
- if (!con_id && !ce->con_id)
- goto remove;
- else if (!con_id || !ce->con_id)
- continue;
- else if (!strcmp(con_id, ce->con_id))
- goto remove;
- }
-
- pm_clk_list_unlock(psd);
- return;
-
- remove:
- list_del(&ce->node);
- if (ce->enabled_when_prepared)
- psd->clock_op_might_sleep--;
- pm_clk_list_unlock(psd);
-
- __pm_clk_remove(ce);
-}
-EXPORT_SYMBOL_GPL(pm_clk_remove);
-
-/**
* pm_clk_remove_clk - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @clk: Clock pointer
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 4fa525668cb7..6502720bb564 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -115,18 +115,6 @@ int pm_generic_freeze_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
- * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
- * @dev: Device to freeze.
- */
-int pm_generic_freeze_late(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
-
-/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
@@ -187,18 +175,6 @@ int pm_generic_thaw_noirq(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
- * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
- * @dev: Device to thaw.
- */
-int pm_generic_thaw_early(struct device *dev)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
-}
-EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
-
-/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 40e1d8d8a589..ac2a197c1234 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -249,7 +249,7 @@ static int dpm_wait_fn(struct device *dev, void *async_ptr)
static void dpm_wait_for_children(struct device *dev, bool async)
{
- device_for_each_child(dev, &async, dpm_wait_fn);
+ device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
@@ -599,27 +599,34 @@ static bool is_async(struct device *dev)
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
- reinit_completion(&dev->power.completion);
+ if (!is_async(dev))
+ return false;
- if (is_async(dev)) {
- dev->power.async_in_progress = true;
+ dev->power.work_in_progress = true;
- get_device(dev);
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
- if (async_schedule_dev_nocall(func, dev))
- return true;
+ put_device(dev);
- put_device(dev);
- }
/*
- * Because async_schedule_dev_nocall() above has returned false or it
- * has not been called at all, func() is not running and it is safe to
- * update the async_in_progress flag without extra synchronization.
+ * async_schedule_dev_nocall() above has returned false, so func() is
+ * not running and it is safe to update power.work_in_progress without
+ * extra synchronization.
*/
- dev->power.async_in_progress = false;
+ dev->power.work_in_progress = false;
+
return false;
}
+static void dpm_clear_async_state(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+ dev->power.work_in_progress = false;
+}
+
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@@ -656,15 +663,13 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
* so change its status accordingly.
*
* Otherwise, the device is going to be resumed, so set its PM-runtime
- * status to "active" unless its power.set_active flag is clear, in
+ * status to "active" unless its power.smart_suspend flag is clear, in
* which case it is not necessary to update its PM-runtime status.
*/
- if (skip_resume) {
+ if (skip_resume)
pm_runtime_set_suspended(dev);
- } else if (dev->power.set_active) {
+ else if (dev_pm_smart_suspend(dev))
pm_runtime_set_active(dev);
- dev->power.set_active = false;
- }
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -731,14 +736,16 @@ static void dpm_noirq_resume_devices(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_noirq);
+ }
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -871,14 +878,16 @@ void dpm_resume_early(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume_early);
+ }
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -929,7 +938,17 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ if (!dev->power.is_suspended)
+ goto Complete;
+
if (dev->power.direct_complete) {
+ /*
+ * Allow new children to be added under the device after this
+ * point if it has no PM callbacks.
+ */
+ if (dev->power.no_pm_callbacks)
+ dev->power.is_prepared = false;
+
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
@@ -947,9 +966,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
*/
dev->power.is_prepared = false;
- if (!dev->power.is_suspended)
- goto Unlock;
-
if (dev->pm_domain) {
info = "power domain ";
callback = pm_op(&dev->pm_domain->ops, state);
@@ -989,7 +1005,6 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
- Unlock:
device_unlock(dev);
dpm_watchdog_clear(&wd);
@@ -1037,14 +1052,16 @@ void dpm_resume(pm_message_t state)
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
*/
- list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
+ dpm_clear_async_state(dev);
dpm_async_fn(dev, async_resume);
+ }
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
- if (!dev->power.async_in_progress) {
+ if (!dev->power.work_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -1109,6 +1126,8 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
out:
+ /* If enabling runtime PM for the device is blocked, unblock it. */
+ pm_runtime_unblock(dev);
pm_runtime_put(dev);
}
@@ -1270,24 +1289,17 @@ Skip:
dev->power.is_noirq_suspended = true;
/*
- * Skipping the resume of devices that were in use right before the
- * system suspend (as indicated by their PM-runtime usage counters)
- * would be suboptimal. Also resume them if doing that is not allowed
- * to be skipped.
+ * Devices must be resumed unless they are explicitly allowed to be left
+ * in suspend, but even in that case skipping the resume of devices that
+ * were in use right before the system suspend (as indicated by their
+ * runtime PM usage counters and child counters) would be suboptimal.
*/
- if (atomic_read(&dev->power.usage_count) > 1 ||
- !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
- dev->power.may_skip_resume))
+ if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev))
dev->power.must_resume = true;
- if (dev->power.must_resume) {
- if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
- dev->power.set_active = true;
- if (dev->parent && !dev->parent->power.ignore_children)
- dev->parent->power.set_active = true;
- }
+ if (dev->power.must_resume)
dpm_superior_set_must_resume(dev);
- }
Complete:
complete_all(&dev->power.completion);
@@ -1320,6 +1332,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
@@ -1404,6 +1417,10 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
__pm_runtime_disable(dev, false);
dpm_wait_for_subordinate(dev, async);
@@ -1493,6 +1510,7 @@ int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend_late))
continue;
@@ -1650,6 +1668,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
+ dev->power.is_suspended = true;
goto Complete;
}
@@ -1760,6 +1779,7 @@ int dpm_suspend(pm_message_t state)
list_move(&dev->power.entry, &dpm_suspended_list);
+ dpm_clear_async_state(dev);
if (dpm_async_fn(dev, async_suspend))
continue;
@@ -1791,6 +1811,46 @@ int dpm_suspend(pm_message_t state)
return error;
}
+static bool device_prepare_smart_suspend(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = true;
+ int idx;
+
+ /*
+ * The "smart suspend" feature is enabled for devices whose drivers ask
+ * for it and for devices without PM callbacks.
+ *
+ * However, if "smart suspend" is not enabled for the device's parent
+ * or any of its suppliers that take runtime PM into account, it cannot
+ * be enabled for the device either.
+ */
+ if (!dev->power.no_pm_callbacks &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ return false;
+
+ if (dev->parent && !dev_pm_smart_suspend(dev->parent) &&
+ !dev->parent->power.ignore_children && !pm_runtime_blocked(dev->parent))
+ return false;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (!dev_pm_smart_suspend(link->supplier) &&
+ !pm_runtime_blocked(link->supplier)) {
+ ret = false;
+ break;
+ }
+ }
+
+ device_links_read_unlock(idx);
+
+ return ret;
+}
+
/**
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
@@ -1802,6 +1862,7 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
+ bool smart_suspend;
int ret = 0;
/*
@@ -1811,6 +1872,13 @@ static int device_prepare(struct device *dev, pm_message_t state)
* it again during the complete phase.
*/
pm_runtime_get_noresume(dev);
+ /*
+ * If runtime PM is disabled for the device at this point and it has
+ * never been enabled so far, it should not be enabled until this system
+ * suspend-resume cycle is complete, so prepare to trigger a warning on
+ * subsequent attempts to enable it.
+ */
+ smart_suspend = !pm_runtime_block_if_disabled(dev);
if (dev->power.syscore)
return 0;
@@ -1845,6 +1913,13 @@ unlock:
pm_runtime_put(dev);
return ret;
}
+ /* Do not enable "smart suspend" for devices with disabled runtime PM. */
+ if (smart_suspend)
+ smart_suspend = device_prepare_smart_suspend(dev);
+
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.smart_suspend = smart_suspend;
/*
* A positive return value from ->prepare() means "this device appears
* to be runtime-suspended and its state is fine, so if it really is
@@ -1852,11 +1927,12 @@ unlock:
* will do the same thing with all of its descendants". This only
* applies to suspend transitions, however.
*/
- spin_lock_irq(&dev->power.lock);
dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
(ret > 0 || dev->power.no_pm_callbacks) &&
!dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+
spin_unlock_irq(&dev->power.lock);
+
return 0;
}
@@ -2020,6 +2096,5 @@ void device_pm_check_callbacks(struct device *dev)
bool dev_pm_skip_suspend(struct device *dev)
{
- return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- pm_runtime_status_suspended(dev);
+ return dev_pm_smart_suspend(dev) && pm_runtime_status_suspended(dev);
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 2ee45841486b..0e127b0329c0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -448,8 +448,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
retval = __rpm_callback(cb, dev);
}
- dev->power.runtime_error = retval;
- return retval != -EACCES ? retval : -EIO;
+ /*
+ * Since -EACCES means that runtime PM is disabled for the given device,
+ * it should not be returned by runtime PM callbacks. If it is returned
+ * nevertheless, assume it to be a transient error and convert it to
+ * -EAGAIN.
+ */
+ if (retval == -EACCES)
+ retval = -EAGAIN;
+
+ if (retval != -EAGAIN && retval != -EBUSY)
+ dev->power.runtime_error = retval;
+
+ return retval;
}
/**
@@ -725,21 +736,18 @@ static int rpm_suspend(struct device *dev, int rpmflags)
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ /*
+ * On transient errors, if the callback routine failed an autosuspend,
+ * and if the last_busy time has been updated so that there is a new
+ * autosuspend expiration time, automatically reschedule another
+ * autosuspend.
+ */
+ if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+
+ pm_runtime_cancel_pending(dev);
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
goto out;
}
@@ -1460,20 +1468,31 @@ int pm_runtime_barrier(struct device *dev)
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
-/**
- * __pm_runtime_disable - Disable runtime PM of a device.
- * @dev: Device to handle.
- * @check_resume: If set, check if there's a resume request for the device.
- *
- * Increment power.disable_depth for the device and if it was zero previously,
- * cancel all pending runtime PM requests for the device and wait for all
- * operations in progress to complete. The device can be either active or
- * suspended after its runtime PM has been disabled.
- *
- * If @check_resume is set and there's a resume request pending when
- * __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its runtime PM.
- */
+bool pm_runtime_block_if_disabled(struct device *dev)
+{
+ bool ret;
+
+ spin_lock_irq(&dev->power.lock);
+
+ ret = !pm_runtime_enabled(dev);
+ if (ret && dev->power.last_status == RPM_INVALID)
+ dev->power.last_status = RPM_BLOCKED;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ return ret;
+}
+
+void pm_runtime_unblock(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.last_status == RPM_BLOCKED)
+ dev->power.last_status = RPM_INVALID;
+
+ spin_unlock_irq(&dev->power.lock);
+}
+
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
spin_lock_irq(&dev->power.lock);
@@ -1532,6 +1551,10 @@ void pm_runtime_enable(struct device *dev)
if (--dev->power.disable_depth > 0)
goto out;
+ if (dev->power.last_status == RPM_BLOCKED) {
+ dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n");
+ dump_stack();
+ }
dev->power.last_status = RPM_INVALID;
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
@@ -1764,8 +1787,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- dev->power.suspend_timer.function = pm_suspend_timer_fn;
+ hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1874,7 +1897,7 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-static bool pm_runtime_need_not_resume(struct device *dev)
+bool pm_runtime_need_not_resume(struct device *dev)
{
return atomic_read(&dev->power.usage_count) <= 1 &&
(atomic_read(&dev->power.child_count) == 0 ||
@@ -1959,7 +1982,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume)
goto out;
/*
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index b1f8508c3966..f7fcf2de1301 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -21,6 +21,37 @@ static const struct regcache_ops *cache_types[] = {
&regcache_flat_ops,
};
+static int regcache_defaults_cmp(const void *a, const void *b)
+{
+ const struct reg_default *x = a;
+ const struct reg_default *y = b;
+
+ if (x->reg > y->reg)
+ return 1;
+ else if (x->reg < y->reg)
+ return -1;
+ else
+ return 0;
+}
+
+static void regcache_defaults_swap(void *a, void *b, int size)
+{
+ struct reg_default *x = a;
+ struct reg_default *y = b;
+ struct reg_default tmp;
+
+ tmp = *x;
+ *x = *y;
+ *y = tmp;
+}
+
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults)
+{
+ sort(defaults, ndefaults, sizeof(*defaults),
+ regcache_defaults_cmp, regcache_defaults_swap);
+}
+EXPORT_SYMBOL_GPL(regcache_sort_defaults);
+
static int regcache_hw_init(struct regmap *map)
{
int i, j;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c05fe27a96b6..674527d770dc 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -45,8 +45,6 @@ enum {
Lo_deleting,
};
-struct loop_func_table;
-
struct loop_device {
int lo_number;
loff_t lo_offset;
@@ -54,7 +52,8 @@ struct loop_device {
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
- struct file * lo_backing_file;
+ struct file *lo_backing_file;
+ unsigned int lo_min_dio_size;
struct block_device *lo_device;
gfp_t old_gfp_mask;
@@ -169,29 +168,14 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file)
* of backing device, and the logical block size of loop is bigger than that of
* the backing device.
*/
-static bool lo_bdev_can_use_dio(struct loop_device *lo,
- struct block_device *backing_bdev)
-{
- unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
-
- if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
- return false;
- if (lo->lo_offset & (sb_bsize - 1))
- return false;
- return true;
-}
-
static bool lo_can_use_dio(struct loop_device *lo)
{
- struct inode *inode = lo->lo_backing_file->f_mapping->host;
-
if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
return false;
-
- if (S_ISBLK(inode->i_mode))
- return lo_bdev_can_use_dio(lo, I_BDEV(inode));
- if (inode->i_sb->s_bdev)
- return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
+ if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
+ return false;
+ if (lo->lo_offset & (lo->lo_min_dio_size - 1))
+ return false;
return true;
}
@@ -205,20 +189,12 @@ static bool lo_can_use_dio(struct loop_device *lo)
*/
static inline void loop_update_dio(struct loop_device *lo)
{
- bool dio_in_use = lo->lo_flags & LO_FLAGS_DIRECT_IO;
-
lockdep_assert_held(&lo->lo_mutex);
WARN_ON_ONCE(lo->lo_state == Lo_bound &&
lo->lo_queue->mq_freeze_depth == 0);
- if (lo->lo_backing_file->f_flags & O_DIRECT)
- lo->lo_flags |= LO_FLAGS_DIRECT_IO;
if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo))
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
-
- /* flush dirty pages before starting to issue direct I/O */
- if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !dio_in_use)
- vfs_fsync(lo->lo_backing_file, 0);
}
/**
@@ -541,6 +517,28 @@ static void loop_reread_partitions(struct loop_device *lo)
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static unsigned int loop_query_min_dio_size(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
+ struct kstat st;
+
+ /*
+ * Use the minimal dio alignment of the file system if provided.
+ */
+ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
+ (st.result_mask & STATX_DIOALIGN))
+ return st.dio_offset_align;
+
+ /*
+ * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
+ * a handful of file systems support the STATX_DIOALIGN flag.
+ */
+ if (sb_bdev)
+ return bdev_logical_block_size(sb_bdev);
+ return SECTOR_SIZE;
+}
+
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
@@ -573,6 +571,17 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
return 0;
}
+static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
+{
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
+ if (lo->lo_backing_file->f_flags & O_DIRECT)
+ lo->lo_flags |= LO_FLAGS_DIRECT_IO;
+ lo->lo_min_dio_size = loop_query_min_dio_size(lo);
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -622,14 +631,18 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_err;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
/* and ... switch */
disk_force_media_change(lo->lo_disk);
memflags = blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
- mapping_set_gfp_mask(file->f_mapping,
- lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
@@ -971,12 +984,11 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static unsigned int loop_default_blocksize(struct loop_device *lo,
- struct block_device *backing_bdev)
+static unsigned int loop_default_blocksize(struct loop_device *lo)
{
- /* In case of direct I/O, match underlying block size */
- if ((lo->lo_backing_file->f_flags & O_DIRECT) && backing_bdev)
- return bdev_logical_block_size(backing_bdev);
+ /* In case of direct I/O, match underlying minimum I/O size */
+ if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
+ return lo->lo_min_dio_size;
return SECTOR_SIZE;
}
@@ -994,7 +1006,7 @@ static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
backing_bdev = inode->i_sb->s_bdev;
if (!bsize)
- bsize = loop_default_blocksize(lo, backing_bdev);
+ bsize = loop_default_blocksize(lo);
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
@@ -1019,7 +1031,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
const struct loop_config *config)
{
struct file *file = fget(config->fd);
- struct address_space *mapping;
struct queue_limits lim;
int error;
loff_t size;
@@ -1055,8 +1066,6 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
- mapping = file->f_mapping;
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1088,9 +1097,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->lo_device = bdev;
- lo->lo_backing_file = file;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_assign_backing_file(lo, file);
lim = queue_limits_start_update(lo->lo_queue);
loop_update_limits(lo, &lim, config->block_size);
@@ -1099,6 +1106,13 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (error)
goto out_unlock;
+ /*
+ * We might switch to direct I/O mode for the loop device, write back
+ * all dirty data the page cache now that so that the individual I/O
+ * operations don't have to do that.
+ */
+ vfs_fsync(file, 0);
+
loop_update_dio(lo);
loop_sysfs_init(lo);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 95361099a2dc..0d619df03fa9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2056,7 +2056,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
+ nents = blk_rq_map_sg(rq, command->sg);
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
prefetch(&port->flags);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index d94ef37480bd..3bb9cee0a9b5 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -473,6 +473,8 @@ NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
NULLB_DEVICE_ATTR(fua, bool, NULL);
NULLB_DEVICE_ATTR(rotational, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_once, bool, NULL);
+NULLB_DEVICE_ATTR(badblocks_partial_io, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -559,14 +561,14 @@ static ssize_t nullb_device_badblocks_store(struct config_item *item,
goto out;
/* enable badblocks */
cmpxchg(&t_dev->badblocks.shift, -1, 0);
- if (buf[0] == '+')
- ret = badblocks_set(&t_dev->badblocks, start,
- end - start + 1, 1);
- else
- ret = badblocks_clear(&t_dev->badblocks, start,
- end - start + 1);
- if (ret == 0)
+ if (buf[0] == '+') {
+ if (badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1))
+ ret = count;
+ } else if (badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1)) {
ret = count;
+ }
out:
kfree(orig);
return ret;
@@ -592,41 +594,43 @@ static ssize_t nullb_device_zone_offline_store(struct config_item *item,
CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
static struct configfs_attribute *nullb_device_attrs[] = {
- &nullb_device_attr_size,
+ &nullb_device_attr_badblocks,
+ &nullb_device_attr_badblocks_once,
+ &nullb_device_attr_badblocks_partial_io,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_cache_size,
&nullb_device_attr_completion_nsec,
- &nullb_device_attr_submit_queues,
- &nullb_device_attr_poll_queues,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_fua,
&nullb_device_attr_home_node,
- &nullb_device_attr_queue_mode,
- &nullb_device_attr_blocksize,
- &nullb_device_attr_max_sectors,
- &nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_blocking,
- &nullb_device_attr_use_per_node_hctx,
- &nullb_device_attr_power,
- &nullb_device_attr_memory_backed,
- &nullb_device_attr_discard,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_max_sectors,
&nullb_device_attr_mbps,
- &nullb_device_attr_cache_size,
- &nullb_device_attr_badblocks,
- &nullb_device_attr_zoned,
- &nullb_device_attr_zone_size,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_no_sched,
+ &nullb_device_attr_poll_queues,
+ &nullb_device_attr_power,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_rotational,
+ &nullb_device_attr_shared_tag_bitmap,
+ &nullb_device_attr_shared_tags,
+ &nullb_device_attr_size,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_virt_boundary,
+ &nullb_device_attr_zone_append_max_sectors,
&nullb_device_attr_zone_capacity,
- &nullb_device_attr_zone_nr_conv,
- &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_full,
&nullb_device_attr_zone_max_active,
- &nullb_device_attr_zone_append_max_sectors,
- &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_nr_conv,
&nullb_device_attr_zone_offline,
- &nullb_device_attr_zone_full,
- &nullb_device_attr_virt_boundary,
- &nullb_device_attr_no_sched,
- &nullb_device_attr_shared_tags,
- &nullb_device_attr_shared_tag_bitmap,
- &nullb_device_attr_fua,
- &nullb_device_attr_rotational,
+ &nullb_device_attr_zone_readonly,
+ &nullb_device_attr_zone_size,
+ &nullb_device_attr_zoned,
NULL,
};
@@ -704,16 +708,28 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE,
- "badblocks,blocking,blocksize,cache_size,fua,"
- "completion_nsec,discard,home_node,hw_queue_depth,"
- "irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,"
- "shared_tags,size,submit_queues,use_per_node_hctx,"
- "virt_boundary,zoned,zone_capacity,zone_max_active,"
- "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
- "zone_size,zone_append_max_sectors,zone_full,"
- "rotational\n");
+
+ struct configfs_attribute **entry;
+ char delimiter = ',';
+ size_t left = PAGE_SIZE;
+ size_t written = 0;
+ int ret;
+
+ for (entry = &nullb_device_attrs[0]; *entry && left > 0; entry++) {
+ if (!*(entry + 1))
+ delimiter = '\n';
+ ret = snprintf(page + written, left, "%s%c", (*entry)->ca_name,
+ delimiter);
+ if (ret >= left) {
+ WARN_ONCE(1, "Too many null_blk features to print\n");
+ memzero_explicit(page, PAGE_SIZE);
+ return -ENOBUFS;
+ }
+ left -= ret;
+ written += ret;
+ }
+
+ return written;
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -1249,25 +1265,37 @@ static int null_transfer(struct nullb *nullb, struct page *page,
return err;
}
-static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
+/*
+ * Transfer data for the given request. The transfer size is capped with the
+ * nr_sectors argument.
+ */
+static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
+ sector_t nr_sectors)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
int err = 0;
unsigned int len;
sector_t sector = blk_rq_pos(rq);
+ unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
+ unsigned int transferred_bytes = 0;
struct req_iterator iter;
struct bio_vec bvec;
spin_lock_irq(&nullb->lock);
rq_for_each_segment(bvec, rq, iter) {
len = bvec.bv_len;
+ if (transferred_bytes + len > max_bytes)
+ len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
rq->cmd_flags & REQ_FUA);
if (err)
break;
sector += len >> SECTOR_SHIFT;
+ transferred_bytes += len;
+ if (transferred_bytes >= max_bytes)
+ break;
}
spin_unlock_irq(&nullb->lock);
@@ -1295,31 +1323,51 @@ static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
return sts;
}
-static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
- sector_t sector,
- sector_t nr_sectors)
+/*
+ * Check if the command should fail for the badblocks. If so, return
+ * BLK_STS_IOERR and return number of partial I/O sectors to be written or read,
+ * which may be less than the requested number of sectors.
+ *
+ * @cmd: The command to handle.
+ * @sector: The start sector for I/O.
+ * @nr_sectors: Specifies number of sectors to write or read, and returns the
+ * number of sectors to be written or read.
+ */
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors)
{
struct badblocks *bb = &cmd->nq->dev->badblocks;
- sector_t first_bad;
- int bad_sectors;
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int block_sectors = dev->blocksize >> SECTOR_SHIFT;
+ sector_t first_bad, bad_sectors;
+ unsigned int partial_io_sectors = 0;
- if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
- return BLK_STS_IOERR;
+ if (!badblocks_check(bb, sector, *nr_sectors, &first_bad, &bad_sectors))
+ return BLK_STS_OK;
- return BLK_STS_OK;
+ if (cmd->nq->dev->badblocks_once)
+ badblocks_clear(bb, first_bad, bad_sectors);
+
+ if (cmd->nq->dev->badblocks_partial_io) {
+ if (!IS_ALIGNED(first_bad, block_sectors))
+ first_bad = ALIGN_DOWN(first_bad, block_sectors);
+ if (sector < first_bad)
+ partial_io_sectors = first_bad - sector;
+ }
+ *nr_sectors = partial_io_sectors;
+
+ return BLK_STS_IOERR;
}
-static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
- enum req_op op,
- sector_t sector,
- sector_t nr_sectors)
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
- return null_handle_rq(cmd);
+ return null_handle_data_transfer(cmd, nr_sectors);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
@@ -1366,18 +1414,19 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
- if (dev->badblocks.shift != -1) {
- ret = null_handle_badblocks(cmd, sector, nr_sectors);
+ if (dev->badblocks.shift != -1)
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+
+ if (dev->memory_backed && nr_sectors) {
+ ret = null_handle_memory_backed(cmd, op, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
- if (dev->memory_backed)
- return null_handle_memory_backed(cmd, op, sector, nr_sectors);
-
- return BLK_STS_OK;
+ return badblocks_ret;
}
static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
@@ -1426,8 +1475,7 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
{
ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
- hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- nullb->bw_timer.function = nullb_bwtimer_fn;
+ hrtimer_setup(&nullb->bw_timer, nullb_bwtimer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
@@ -1549,8 +1597,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
- blk_mq_end_request_batch))
+ if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
+ blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1604,8 +1652,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
+ hrtimer_setup(&cmd->timer, null_cmd_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
cmd->error = BLK_STS_OK;
cmd->nq = nq;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 6f9fe6171087..7bb6128dbaaf 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -63,6 +63,8 @@ struct nullb_device {
unsigned long flags; /* device flags */
unsigned int curr_cache;
struct badblocks badblocks;
+ bool badblocks_once;
+ bool badblocks_partial_io;
unsigned int nr_zones;
unsigned int nr_zones_imp_open;
@@ -131,6 +133,10 @@ blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector,
sector_t nr_sectors);
blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
+blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int *nr_sectors);
+blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
+ sector_t sector, sector_t nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 0d5f9bf95229..4e5728f45989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -353,6 +353,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
struct nullb_zone *zone = &dev->zones[zno];
+ blk_status_t badblocks_ret = BLK_STS_OK;
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
@@ -412,9 +413,20 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_IMP_OPEN;
}
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
- if (ret != BLK_STS_OK)
- goto unlock_zone;
+ if (dev->badblocks.shift != -1) {
+ badblocks_ret = null_handle_badblocks(cmd, sector, &nr_sectors);
+ if (badblocks_ret != BLK_STS_OK && !nr_sectors) {
+ ret = badblocks_ret;
+ goto unlock_zone;
+ }
+ }
+
+ if (dev->memory_backed) {
+ ret = null_handle_memory_backed(cmd, REQ_OP_WRITE, sector,
+ nr_sectors);
+ if (ret != BLK_STS_OK)
+ goto unlock_zone;
+ }
zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) {
@@ -429,7 +441,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
zone->cond = BLK_ZONE_COND_FULL;
}
- ret = BLK_STS_OK;
+ ret = badblocks_ret;
unlock_zone:
null_unlock_zone(dev, zone);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 82467ecde7ec..15627417f12e 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if ((req_op(rq) != REQ_OP_DISCARD) && (req_op(rq) != REQ_OP_WRITE_ZEROES))
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
+ sg_cnt = blk_rq_map_sg(rq, iu->sgt.sgl);
if (sg_cnt == 0)
sg_mark_end(&iu->sgt.sgl[0]);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 282f81616a78..2b33fb5b949b 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -485,7 +485,7 @@ static int __send_request(struct request *req)
}
sg_init_table(sg, port->ring_cookies);
- nsg = blk_rq_map_sg(req->q, req, sg);
+ nsg = blk_rq_map_sg(req, sg);
len = 0;
for (i = 0; i < nsg; i++)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index af5a4ff4bd3d..c060da409ed8 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -73,11 +73,10 @@
/* All UBLK_PARAM_TYPE_* should be included here */
#define UBLK_PARAM_TYPE_ALL \
(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
- UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
+ UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
+ UBLK_PARAM_TYPE_DMA_ALIGN)
struct ublk_rq_data {
- struct llist_node node;
-
struct kref ref;
};
@@ -144,8 +143,6 @@ struct ublk_queue {
struct task_struct *ubq_daemon;
char *io_cmd_buf;
- struct llist_head io_cmds;
-
unsigned long io_addr; /* mapped vm address */
unsigned int max_io_sz;
bool force_abort;
@@ -494,15 +491,17 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+
+#define UBLK_MAX_UBLKS UBLK_MINORS
+
/*
- * Max ublk devices allowed to add
+ * Max unprivileged ublk devices allowed to add
*
* It can be extended to one per-user limit in future or even controlled
* by cgroup.
*/
-#define UBLK_MAX_UBLKS UBLK_MINORS
-static unsigned int ublks_max = 64;
-static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+static unsigned int unprivileged_ublks_max = 64;
+static unsigned int unprivileged_ublks_added; /* protected by ublk_ctl_mutex */
static struct miscdevice ublk_misc;
@@ -573,6 +572,16 @@ static int ublk_validate_params(const struct ublk_device *ub)
else if (ublk_dev_is_zoned(ub))
return -EINVAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN) {
+ const struct ublk_param_dma_align *p = &ub->params.dma;
+
+ if (p->alignment >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (!is_power_of_2(p->alignment + 1))
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1100,7 +1109,7 @@ static void ublk_complete_rq(struct kref *ref)
}
/*
- * Since __ublk_rq_task_work always fails requests immediately during
+ * Since ublk_rq_task_work_cb always fails requests immediately during
* exiting, __ublk_fail_req() is only called from abort context during
* exiting. So lock is unnecessary.
*
@@ -1146,11 +1155,14 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static inline void __ublk_rq_task_work(struct request *req,
- unsigned issue_flags)
+static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- int tag = req->tag;
+ struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ struct ublk_queue *ubq = pdu->ubq;
+ int tag = pdu->tag;
+ struct request *req = blk_mq_tag_to_rq(
+ ubq->dev->tag_set.tags[ubq->q_id], tag);
struct ublk_io *io = &ubq->ios[tag];
unsigned int mapped_bytes;
@@ -1225,34 +1237,11 @@ static inline void __ublk_rq_task_work(struct request *req,
ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
}
-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
- unsigned issue_flags)
-{
- struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
- struct ublk_rq_data *data, *tmp;
-
- io_cmds = llist_reverse_order(io_cmds);
- llist_for_each_entry_safe(data, tmp, io_cmds, node)
- __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
-}
-
-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
-{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
- struct ublk_queue *ubq = pdu->ubq;
-
- ublk_forward_io_cmds(ubq, issue_flags);
-}
-
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
-
- if (llist_add(&data->node, &ubq->io_cmds)) {
- struct ublk_io *io = &ubq->ios[rq->tag];
+ struct ublk_io *io = &ubq->ios[rq->tag];
- io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
- }
+ io_uring_cmd_complete_in_task(io->cmd, ublk_rq_task_work_cb);
}
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
@@ -1445,7 +1434,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct request *rq;
/*
- * Either we fail the request or ublk_rq_task_work_fn
+ * Either we fail the request or ublk_rq_task_work_cb
* will do it
*/
rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
@@ -1911,10 +1900,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
return -EIOCBQUEUED;
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
__func__, cmd_op, tag, ret, io->flags);
- return -EIOCBQUEUED;
+ return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
@@ -1970,7 +1958,10 @@ static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
- ublk_ch_uring_cmd_local(cmd, issue_flags);
+ int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
+
+ if (ret != -EIOCBQUEUED)
+ io_uring_cmd_done(cmd, ret, 0, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2235,7 +2226,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
if (ret)
goto fail;
- ublks_added++;
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV)
+ unprivileged_ublks_added++;
return 0;
fail:
put_device(dev);
@@ -2264,11 +2256,16 @@ static int ublk_add_tag_set(struct ublk_device *ub)
static void ublk_remove(struct ublk_device *ub)
{
+ bool unprivileged;
+
ublk_stop_dev(ub);
cancel_work_sync(&ub->nosrv_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
+ unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
ublk_put_device(ub);
- ublks_added--;
+
+ if (unprivileged)
+ unprivileged_ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -2343,6 +2340,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
if (ub->params.basic.attrs & UBLK_ATTR_ROTATIONAL)
lim.features |= BLK_FEAT_ROTATIONAL;
+ if (ub->params.types & UBLK_PARAM_TYPE_DMA_ALIGN)
+ lim.dma_alignment = ub->params.dma.alignment;
+
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -2530,7 +2530,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
return ret;
ret = -EACCES;
- if (ublks_added >= ublks_max)
+ if ((info.flags & UBLK_F_UNPRIVILEGED_DEV) &&
+ unprivileged_ublks_added >= unprivileged_ublks_max)
goto out_unlock;
ret = -ENOMEM;
@@ -2757,9 +2758,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
@@ -3098,10 +3102,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (ub)
ublk_put_device(ub);
out:
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
- return -EIOCBQUEUED;
+ return ret;
}
static const struct file_operations ublk_ctl_fops = {
@@ -3165,23 +3168,26 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
-static int ublk_set_max_ublks(const char *buf, const struct kernel_param *kp)
+static int ublk_set_max_unprivileged_ublks(const char *buf,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(buf, kp, 0, UBLK_MAX_UBLKS);
}
-static int ublk_get_max_ublks(char *buf, const struct kernel_param *kp)
+static int ublk_get_max_unprivileged_ublks(char *buf,
+ const struct kernel_param *kp)
{
- return sysfs_emit(buf, "%u\n", ublks_max);
+ return sysfs_emit(buf, "%u\n", unprivileged_ublks_max);
}
-static const struct kernel_param_ops ublk_max_ublks_ops = {
- .set = ublk_set_max_ublks,
- .get = ublk_get_max_ublks,
+static const struct kernel_param_ops ublk_max_unprivileged_ublks_ops = {
+ .set = ublk_set_max_unprivileged_ublks,
+ .get = ublk_get_max_unprivileged_ublks,
};
-module_param_cb(ublks_max, &ublk_max_ublks_ops, &ublks_max, 0644);
-MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+module_param_cb(ublks_max, &ublk_max_unprivileged_ublks_ops,
+ &unprivileged_ublks_max, 0644);
+MODULE_PARM_DESC(ublks_max, "max number of unprivileged ublk devices allowed to add(default: 64)");
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_DESCRIPTION("Userspace block device");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a61ec35f426..7cffea01d868 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -226,7 +226,7 @@ static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
if (unlikely(err))
return -ENOMEM;
- return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+ return blk_rq_map_sg(req, vbr->sg_table.sgl);
}
static void virtblk_cleanup_cmd(struct request *req)
@@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
+ u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
- !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
- virtblk_complete_batch))
+ !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
+ virtblk_complete_batch))
virtblk_request_done(req);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index edcd08a9dcef..5babe575c288 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -751,7 +751,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
- num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
+ num_sg = blk_rq_map_sg(req, rinfo->shadow[id].sg);
num_grant = 0;
/* Calculate the number of grant used */
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7771edf54fb3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC
Say Y here to enable USB poll_sync for Bluetooth USB devices by
default.
+config BT_HCIBTUSB_AUTO_ISOC_ALT
+ bool "Automatically adjust alternate setting for Isoc endpoints"
+ depends on BT_HCIBTUSB
+ default y if CHROME_PLATFORMS
+ help
+ Say Y here to automatically adjusting the alternate setting for
+ HCI_USER_CHANNEL whenever a SCO link is established.
+
+ When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets
+ and configures isoc endpoint alternate setting automatically when
+ HCI_USER_CHANNEL is in use.
+
config BT_HCIBTUSB_BCM
bool "Broadcom protocol support"
depends on BT_HCIBTUSB
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index cab93935cc7f..0d6ad50da046 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -365,9 +365,8 @@ static void bfusb_rx_complete(struct urb *urb)
buf += 3;
}
- if (count < len) {
+ if (count < len)
bt_dev_err(data->hdev, "block extends over URB buffer ranges");
- }
if ((hdr & 0xe1) == 0xc1)
bfusb_recv_block(data, hdr, buf, len);
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d2540b28bc7a..48e2f400957b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -35,6 +35,11 @@ enum {
DSM_SET_RESET_METHOD = 3,
};
+#define BTINTEL_BT_DOMAIN 0x12
+#define BTINTEL_SAR_LEGACY 0
+#define BTINTEL_SAR_INC_PWR 1
+#define BTINTEL_SAR_INC_PWR_SUPPORTED 0
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -478,6 +483,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1c: /* Gale Peak (GaP) */
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
+ case 0x1f: /* Scorpious Peak */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2756,6 +2762,7 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
/* DSBR command needs to be sent for,
* 1. BlazarI or BlazarIW + B0 step product in IML image.
* 2. Gale Peak2 or BlazarU in OP image.
+ * 3. Scorpious Peak in IML image.
*/
switch (cnvi) {
@@ -2771,6 +2778,10 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
hdev->bus == HCI_USB)
break;
return 0;
+ case BTINTEL_CNVI_SCP:
+ if (ver->img_type == BTINTEL_IMG_IML)
+ break;
+ return 0;
default:
return 0;
}
@@ -2800,6 +2811,331 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
return 0;
}
+#ifdef CONFIG_ACPI
+static acpi_status btintel_evaluate_acpi_method(struct hci_dev *hdev,
+ acpi_string method,
+ union acpi_object **ptr,
+ u8 pkg_size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *p;
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev));
+ if (!handle) {
+ bt_dev_dbg(hdev, "ACPI-BT: No ACPI support for Bluetooth device");
+ return AE_NOT_EXIST;
+ }
+
+ status = acpi_evaluate_object(handle, method, NULL, &buffer);
+
+ if (ACPI_FAILURE(status)) {
+ bt_dev_dbg(hdev, "ACPI-BT: ACPI Failure: %s method: %s",
+ acpi_format_exception(status), method);
+ return status;
+ }
+
+ p = buffer.pointer;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count < pkg_size) {
+ bt_dev_warn(hdev, "ACPI-BT: Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ kfree(buffer.pointer);
+ return AE_ERROR;
+ }
+
+ *ptr = buffer.pointer;
+ return 0;
+}
+
+static union acpi_object *btintel_acpi_get_bt_pkg(union acpi_object *buffer)
+{
+ union acpi_object *domain, *bt_pkg;
+ int i;
+
+ for (i = 1; i < buffer->package.count; i++) {
+ bt_pkg = &buffer->package.elements[i];
+ domain = &bt_pkg->package.elements[0];
+ if (domain->type == ACPI_TYPE_INTEGER &&
+ domain->integer.value == BTINTEL_BT_DOMAIN)
+ return bt_pkg;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+static int btintel_send_sar_ddc(struct hci_dev *hdev, struct btintel_cp_ddc_write *data, u8 len)
+{
+ struct sk_buff *skb;
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, len, data, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send sar ddc id:0x%4.4x (%ld)",
+ le16_to_cpu(data->id), PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_send_edr(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 5;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ cmd->data[1] = sar->edr2 >> 3;
+ cmd->data[2] = sar->edr3 >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 6);
+}
+
+static int btintel_send_le(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = min3(sar->le, sar->le_lr, sar->le_2mhz) >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br >> 3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_br_mutual(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->br;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr2(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr2;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_send_edr3(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd,
+ int id, struct btintel_sar_inc_pwr *sar)
+{
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(id);
+ cmd->data[0] = sar->edr3;
+ return btintel_send_sar_ddc(hdev, cmd, 4);
+}
+
+static int btintel_set_legacy_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ u8 buffer[64];
+ int ret;
+
+ cmd = (void *)buffer;
+ ret = btintel_send_br(hdev, cmd, 0x0131, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_br(hdev, cmd, 0x0132, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x0133, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0137, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x0138, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013b, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr(hdev, cmd, 0x013c, sar);
+
+ return ret;
+}
+
+static int btintel_set_mutual_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar)
+{
+ struct btintel_cp_ddc_write *cmd;
+ struct sk_buff *skb;
+ u8 buffer[64];
+ bool enable;
+ int ret;
+
+ cmd = (void *)buffer;
+
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019e);
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED)
+ cmd->data[0] = 0x01;
+ else
+ cmd->data[0] = 0x00;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+
+ if (sar->revision == BTINTEL_SAR_INC_PWR &&
+ sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) {
+ cmd->len = 3;
+ cmd->id = cpu_to_le16(0x019f);
+ cmd->data[0] = sar->sar_2400_chain_a;
+
+ ret = btintel_send_sar_ddc(hdev, cmd, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = btintel_send_br_mutual(hdev, cmd, 0x01a0, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr2(hdev, cmd, 0x01a1, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_edr3(hdev, cmd, 0x01a2, sar);
+ if (ret)
+ return ret;
+
+ ret = btintel_send_le(hdev, cmd, 0x01a3, sar);
+ if (ret)
+ return ret;
+
+ enable = true;
+ skb = __hci_cmd_sync(hdev, 0xfe25, 1, &enable, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send Intel SAR Enable (%ld)", PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int btintel_sar_send_to_device(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar,
+ struct intel_version_tlv *ver)
+{
+ u16 cnvi, cnvr;
+ int ret;
+
+ cnvi = ver->cnvi_top & 0xfff;
+ cnvr = ver->cnvr_top & 0xfff;
+
+ if (cnvi < BTINTEL_CNVI_BLAZARI && cnvr < BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying legacy Bluetooth SAR");
+ ret = btintel_set_legacy_sar(hdev, sar);
+ } else if (cnvi == BTINTEL_CNVI_GAP || cnvr == BTINTEL_CNVR_FMP2) {
+ bt_dev_info(hdev, "Applying mutual Bluetooth SAR");
+ ret = btintel_set_mutual_sar(hdev, sar);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int btintel_acpi_set_sar(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ union acpi_object *bt_pkg, *buffer = NULL;
+ struct btintel_sar_inc_pwr sar;
+ acpi_status status;
+ u8 revision;
+ int ret;
+
+ status = btintel_evaluate_acpi_method(hdev, "BRDS", &buffer, 2);
+ if (ACPI_FAILURE(status))
+ return -ENOENT;
+
+ bt_pkg = btintel_acpi_get_bt_pkg(buffer);
+
+ if (IS_ERR(bt_pkg)) {
+ ret = PTR_ERR(bt_pkg);
+ goto error;
+ }
+
+ if (!bt_pkg->package.count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ revision = buffer->package.elements[0].integer.value;
+
+ if (revision > BTINTEL_SAR_INC_PWR) {
+ bt_dev_dbg(hdev, "BT_SAR: revision: 0x%2.2x not supported", revision);
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ memset(&sar, 0, sizeof(sar));
+
+ if (revision == BTINTEL_SAR_LEGACY && bt_pkg->package.count == 8) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.br = bt_pkg->package.elements[2].integer.value;
+ sar.edr2 = bt_pkg->package.elements[3].integer.value;
+ sar.edr3 = bt_pkg->package.elements[4].integer.value;
+ sar.le = bt_pkg->package.elements[5].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[6].integer.value;
+ sar.le_lr = bt_pkg->package.elements[7].integer.value;
+
+ } else if (revision == BTINTEL_SAR_INC_PWR && bt_pkg->package.count == 10) {
+ sar.revision = revision;
+ sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value;
+ sar.inc_power_mode = bt_pkg->package.elements[2].integer.value;
+ sar.sar_2400_chain_a = bt_pkg->package.elements[3].integer.value;
+ sar.br = bt_pkg->package.elements[4].integer.value;
+ sar.edr2 = bt_pkg->package.elements[5].integer.value;
+ sar.edr3 = bt_pkg->package.elements[6].integer.value;
+ sar.le = bt_pkg->package.elements[7].integer.value;
+ sar.le_2mhz = bt_pkg->package.elements[8].integer.value;
+ sar.le_lr = bt_pkg->package.elements[9].integer.value;
+ } else {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Apply only if it is enabled in BIOS */
+ if (sar.bt_sar_bios != 1) {
+ bt_dev_dbg(hdev, "Bluetooth SAR is not enabled");
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = btintel_sar_send_to_device(hdev, &sar, ver);
+error:
+ kfree(buffer);
+ return ret;
+}
+#endif /* CONFIG_ACPI */
+
+static int btintel_set_specific_absorption_rate(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+#ifdef CONFIG_ACPI
+ return btintel_acpi_set_sar(hdev, ver);
+#endif
+ return 0;
+}
+
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2877,6 +3213,9 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Send sar values to controller */
+ btintel_set_specific_absorption_rate(hdev, ver);
+
/* Set PPAG feature */
btintel_set_ppag(hdev, ver);
@@ -2919,6 +3258,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1c:
case 0x1d:
case 0x1e:
+ case 0x1f:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3258,6 +3598,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1b:
case 0x1d:
case 0x1e:
+ case 0x1f:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index fa43eb137821..2aece3effa4e 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -56,6 +56,10 @@ struct intel_tlv {
#define BTINTEL_CNVI_BLAZARIW 0x901
#define BTINTEL_CNVI_GAP 0x910
#define BTINTEL_CNVI_BLAZARU 0x930
+#define BTINTEL_CNVI_SCP 0xA00
+
+/* CNVR */
+#define BTINTEL_CNVR_FMP2 0x910
#define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
@@ -164,6 +168,26 @@ struct hci_ppag_enable_cmd {
#define INTEL_TLV_DEBUG_EXCEPTION 0x02
#define INTEL_TLV_TEST_EXCEPTION 0xDE
+struct btintel_cp_ddc_write {
+ u8 len;
+ __le16 id;
+ u8 data[];
+} __packed;
+
+/* Bluetooth SAR feature (BRDS), Revision 1 */
+struct btintel_sar_inc_pwr {
+ u8 revision;
+ u32 bt_sar_bios; /* Mode of SAR control to be used, 1:enabled in bios */
+ u32 inc_power_mode; /* Increased power mode */
+ u8 sar_2400_chain_a; /* Sar power restriction LB */
+ u8 br;
+ u8 edr2;
+ u8 edr3;
+ u8 le;
+ u8 le_2mhz;
+ u8 le_lr;
+};
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 091ffe3e1495..c1e69fcc9c4f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -36,6 +36,7 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
@@ -48,6 +49,19 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
#define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
#define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
+#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
+
+#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
+#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
+
+#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
+
+#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
+#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
+
/* Alive interrupt context */
enum {
BTINTEL_PCIE_ROM,
@@ -59,6 +73,83 @@ enum {
BTINTEL_PCIE_D3
};
+/* Structure for dbgc fragment buffer
+ * @buf_addr_lsb: LSB of the buffer's physical address
+ * @buf_addr_msb: MSB of the buffer's physical address
+ * @buf_size: Total size of the buffer
+ */
+struct btintel_pcie_dbgc_ctxt_buf {
+ u32 buf_addr_lsb;
+ u32 buf_addr_msb;
+ u32 buf_size;
+};
+
+/* Structure for dbgc fragment
+ * @magic_num: 0XA5A5A5A5
+ * @ver: For Driver-FW compatibility
+ * @total_size: Total size of the payload debug info
+ * @num_buf: Num of allocated debug bufs
+ * @bufs: All buffer's addresses and sizes
+ */
+struct btintel_pcie_dbgc_ctxt {
+ u32 magic_num;
+ u32 ver;
+ u32 total_size;
+ u32 num_buf;
+ struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
+};
+
+/* This function initializes the memory for DBGC buffers and formats the
+ * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
+ * size as the payload
+ */
+static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
+{
+ struct btintel_pcie_dbgc_ctxt db_frag;
+ struct data_buf *buf;
+ int i;
+
+ data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
+ data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
+ sizeof(*buf), GFP_KERNEL);
+ if (!data->dbgc.bufs)
+ return -ENOMEM;
+
+ data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ data->dbgc.count *
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE,
+ &data->dbgc.buf_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.buf_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
+ sizeof(struct btintel_pcie_dbgc_ctxt),
+ &data->dbgc.frag_p_addr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!data->dbgc.frag_v_addr)
+ return -ENOMEM;
+
+ data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
+
+ db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
+ db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
+ db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
+ db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
+
+ for (i = 0; i < data->dbgc.count; i++) {
+ buf = &data->dbgc.bufs[i];
+ buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
+ db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ }
+
+ memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
+ return 0;
+}
+
static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
u16 queue_num)
{
@@ -273,6 +364,271 @@ static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
return reg == 0 ? 0 : -ENODEV;
}
+static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ /* Set MAC_INIT bit to start primary bootloader */
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
+ reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
+ BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put_data(skb, data, size);
+ err = hci_devcd_append(hdev, skb);
+ if (err) {
+ bt_dev_err(hdev, "Failed to append data in the coredump");
+ return err;
+ }
+
+ return 0;
+}
+
+static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+ int retry = 15;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+ if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
+ reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+
+ do {
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
+ return 0;
+ /* Need delay here for Target Access harwdware to settle down*/
+ usleep_range(1000, 1200);
+
+ } while (--retry > 0);
+
+ return -ETIME;
+}
+
+static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
+{
+ u32 reg;
+
+ reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
+
+ if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
+ reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
+
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
+}
+
+static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
+ void *data, int size)
+{
+ struct intel_tlv *tlv;
+
+ tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv->type = type;
+ tlv->len = size;
+ memcpy(tlv->val, data, tlv->len);
+}
+
+static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
+{
+ u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ struct btintel_pcie_dbgc *dbgc = &data->dbgc;
+ u8 buf_idx, dump_time_len, fw_build;
+ struct hci_dev *hdev = data->hdev;
+ struct intel_tlv *tlv;
+ struct timespec64 now;
+ struct sk_buff *skb;
+ struct tm tm_now;
+ char buf[256];
+ u16 hdr_len;
+ int ret;
+
+ wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
+ offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
+
+ buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
+ if (buf_idx > dbgc->count) {
+ bt_dev_warn(hdev, "Buffer index is invalid");
+ return -EINVAL;
+ }
+
+ prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
+ if (prev_size + offset >= prev_size)
+ data->dmp_hdr.write_ptr = prev_size + offset;
+ else
+ return -EINVAL;
+
+ ktime_get_real_ts64(&now);
+ time64_to_tm(now.tv_sec, 0, &tm_now);
+ dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
+ tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
+
+ fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
+ 2000 + (data->dmp_hdr.fw_timestamp >> 8),
+ data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
+ data->dmp_hdr.fw_build_num);
+
+ hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + dump_time_len +
+ sizeof(*tlv) + fw_build;
+
+ dump_size = hdr_len + sizeof(hdr_len);
+
+ skb = alloc_skb(dump_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Add debug buffers data length to dump size */
+ dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
+
+ ret = hci_devcd_init(hdev, dump_size);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
+ kfree_skb(skb);
+ return ret;
+ }
+
+ skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+
+ data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
+ BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
+
+ btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
+
+ ret = hci_devcd_append(hdev, skb);
+ if (ret)
+ goto exit_err;
+
+ for (i = 0; i < dbgc->count; i++) {
+ ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
+ BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ if (ret)
+ break;
+ }
+
+exit_err:
+ hci_devcd_complete(hdev);
+ return ret;
+}
+
+static void btintel_pcie_dump_traces(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ int ret = 0;
+
+ ret = btintel_pcie_get_mac_access(data);
+ if (ret) {
+ bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
+ return;
+ }
+
+ ret = btintel_pcie_read_dram_buffers(data);
+
+ btintel_pcie_release_mac_access(data);
+
+ if (ret)
+ bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
+}
+
+static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+ u16 len = skb->len;
+ u16 *hdrlen_ptr;
+ char buf[80];
+
+ hdrlen_ptr = skb_put_zero(skb, sizeof(len));
+
+ snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
+ INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
+ data->dmp_hdr.fw_build_num);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
+ skb_put_data(skb, buf, strlen(buf));
+
+ snprintf(buf, sizeof(buf), "Vendor: Intel\n");
+ skb_put_data(skb, buf, strlen(buf));
+
+ *hdrlen_ptr = skb->len - len;
+}
+
+static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ switch (state) {
+ case HCI_DEVCOREDUMP_IDLE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
+ break;
+ case HCI_DEVCOREDUMP_ACTIVE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
+ break;
+ case HCI_DEVCOREDUMP_TIMEOUT:
+ case HCI_DEVCOREDUMP_ABORT:
+ case HCI_DEVCOREDUMP_DONE:
+ data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
+ break;
+ }
+}
+
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -329,20 +685,6 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
return 0;
}
-/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP
- * Sometimes during firmware image switching from ROM to IML or IML to OP image,
- * the previous image bit is not cleared by firmware when alive interrupt is
- * received. Driver needs to take care of these sticky bits when deciding the
- * current image running on controller.
- * Ex: 0x10 and 0x11 - both represents that controller is running IML
- */
-static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data)
-{
- return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM &&
- !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) &&
- !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
-}
-
static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
{
return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
@@ -393,6 +735,27 @@ static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
}
}
+static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
+ void *buf, u32 dev_addr, int len)
+{
+ int err;
+ u32 *val = buf;
+
+ /* Get device mac access */
+ err = btintel_pcie_get_mac_access(data);
+ if (err) {
+ bt_dev_err(data->hdev, "Failed to get mac access %d", err);
+ return err;
+ }
+
+ for (; len > 0; len -= 4, dev_addr += 4, val++)
+ *val = btintel_pcie_rd_dev_mem(data, dev_addr);
+
+ btintel_pcie_release_mac_access(data);
+
+ return 0;
+}
+
/* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
* BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
*/
@@ -714,6 +1077,126 @@ exit_error:
return ret;
}
+static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
+{
+ int len, err, offset, pending;
+ struct sk_buff *skb;
+ u8 *buf, prefix[64];
+ u32 addr, val;
+ u16 pkt_len;
+
+ struct tlv {
+ u8 type;
+ __le16 len;
+ u8 val[];
+ } __packed;
+
+ struct tlv *tlv;
+
+ switch (data->dmp_hdr.cnvi_top & 0xfff) {
+ case BTINTEL_CNVI_BLAZARI:
+ case BTINTEL_CNVI_BLAZARIW:
+ /* only from step B0 onwards */
+ if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
+ return;
+ len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
+ addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
+ break;
+ case BTINTEL_CNVI_SCP:
+ len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
+ addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
+ break;
+ default:
+ bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
+ return;
+ }
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto exit_on_error;
+
+ btintel_pcie_mac_init(data);
+
+ err = btintel_pcie_read_device_mem(data, buf, addr, len);
+ if (err)
+ goto exit_on_error;
+
+ val = get_unaligned_le32(buf);
+ if (val != BTINTEL_PCIE_MAGIC_NUM) {
+ bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
+ val);
+ goto exit_on_error;
+ }
+
+ snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
+
+ offset = 4;
+ do {
+ pending = len - offset;
+ if (pending < sizeof(*tlv))
+ break;
+ tlv = (struct tlv *)(buf + offset);
+
+ /* If type == 0, then there are no more TLVs to be parsed */
+ if (!tlv->type) {
+ bt_dev_dbg(data->hdev, "Invalid TLV type 0");
+ break;
+ }
+ pkt_len = le16_to_cpu(tlv->len);
+ offset += sizeof(*tlv);
+ pending = len - offset;
+ if (pkt_len > pending)
+ break;
+
+ offset += pkt_len;
+
+ /* Only TLVs of type == 1 are HCI events, no need to process other
+ * TLVs
+ */
+ if (tlv->type != 1)
+ continue;
+
+ bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
+ if (pkt_len > HCI_MAX_EVENT_SIZE)
+ break;
+ skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
+ if (!skb)
+ goto exit_on_error;
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ skb_put_data(skb, tlv->val, pkt_len);
+
+ /* copy Intel specific pcie packet type */
+ val = BTINTEL_PCIE_HCI_EVT_PKT;
+ memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
+ BTINTEL_PCIE_HCI_TYPE_LEN);
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
+ tlv->val, pkt_len, false);
+
+ btintel_pcie_recv_frame(data, skb);
+ } while (offset < len);
+
+exit_on_error:
+ kfree(buf);
+}
+
+static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
+{
+ bt_dev_err(data->hdev, "Received hw exception interrupt");
+
+ if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
+ return;
+
+ if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
+ return;
+
+ /* Trigger device core dump when there is HW exception */
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+
+ queue_work(data->workqueue, &data->rx_work);
+}
+
static void btintel_pcie_rx_work(struct work_struct *work)
{
struct btintel_pcie_data *data = container_of(work,
@@ -722,6 +1205,23 @@ static void btintel_pcie_rx_work(struct work_struct *work)
int err;
struct hci_dev *hdev = data->hdev;
+ if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
+ /* Unlike usb products, controller will not send hardware
+ * exception event on exception. Instead controller writes the
+ * hardware event to device memory along with optional debug
+ * events, raises MSIX and halts. Driver shall read the
+ * exception event from device memory and passes it stack for
+ * further processing.
+ */
+ btintel_pcie_read_hwexp(data);
+ clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
+ }
+
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
err = btintel_pcie_recv_frame(data, skb);
@@ -840,6 +1340,10 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+ /* This interrupt is raised when there is an hardware exception */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
+ btintel_pcie_msix_hw_exp_handler(data);
+
/* This interrupt is triggered by the firmware after updating
* boot_stage register and image_response register
*/
@@ -920,7 +1424,8 @@ struct btintel_pcie_causes_list {
static struct btintel_pcie_causes_list causes_list[] = {
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
{ BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
- { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
+ { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
};
/* This function configures the interrupt masks for both HW_INT_CAUSES and
@@ -1007,6 +1512,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
ci->num_urbdq1 = data->rxq.count;
ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
+
+ ci->dbg_output_mode = 0x01;
+ ci->dbgc_addr = data->dbgc.frag_p_addr;
+ ci->dbgc_size = data->dbgc.frag_size;
+ ci->dbg_preset = 0x00;
}
static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
@@ -1219,6 +1729,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
/* Setup Index Array */
btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
+ /* Setup data buffers for dbgc */
+ err = btintel_pcie_setup_dbgc(data);
+ if (err)
+ goto exit_error_txq;
+
/* Setup Context Information */
p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
@@ -1392,6 +1907,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
static int btintel_pcie_setup_internal(struct hci_dev *hdev)
{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
const u8 param[1] = { 0xFF };
struct intel_version_tlv ver_tlv;
struct sk_buff *skb;
@@ -1449,6 +1965,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
*/
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
+ case 0x1f: /* ScP */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -1474,6 +1991,23 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
break;
}
+ data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
+ data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
+ data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
+ data->dmp_hdr.fw_build_type = ver_tlv.build_type;
+ data->dmp_hdr.fw_build_num = ver_tlv.build_num;
+ data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
+
+ if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
+ data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
+
+ err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
+ btintel_pcie_dump_notify);
+ if (err) {
+ bt_dev_err(hdev, "Failed to register coredump (%d)", err);
+ goto exit_error;
+ }
+
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -1538,6 +2072,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
goto exit_error;
}
+ data->dmp_hdr.driver_name = KBUILD_MODNAME;
return 0;
exit_error:
@@ -1650,11 +2185,28 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+#ifdef CONFIG_DEV_COREDUMP
+static void btintel_pcie_coredump(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data = pci_get_drvdata(pdev);
+
+ if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
+ return;
+
+ data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
+ queue_work(data->workqueue, &data->rx_work);
+}
+#endif
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+#ifdef CONFIG_DEV_COREDUMP
+ .driver.coredump = btintel_pcie_coredump
+#endif
};
module_pci_driver(btintel_pcie_driver);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index f9aada0543c4..873178019cad 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -16,6 +16,8 @@
#define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118)
#define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C)
#define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C)
+#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440)
+#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458)
#define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460)
/* BTINTEL_PCIE_CSR Function Control Register */
@@ -23,6 +25,12 @@
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20))
+
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21))
+/* Stop MAC Access disconnection request */
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22))
+#define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23))
+
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29))
#define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31))
@@ -48,6 +56,30 @@
#define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880)
#define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause))
+/* IOSF Debug Register */
+#define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300)
+#define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C)
+#define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C)
+
+#define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F
+#define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK)
+#define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF
+
+/* The DRAM buffer count, each buffer size, and
+ * fragment buffer size
+ */
+#define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16
+#define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */
+
+#define BTINTEL_PCIE_DBGC_FRAG_VERSION 1
+#define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT
+
+/* Magic number(4), version(4), size of payload length(4) */
+#define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12
+
+/* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */
+#define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196
+
/* Causes for the FH register interrupts */
enum msix_fh_int_causes {
BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */
@@ -57,6 +89,7 @@ enum msix_fh_int_causes {
/* Causes for the HW register interrupts */
enum msix_hw_int_causes {
BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */
};
/* PCIe device states
@@ -69,6 +102,25 @@ enum {
BTINTEL_PCIE_STATE_D3_HOT = 2,
BTINTEL_PCIE_STATE_D3_COLD = 3,
};
+
+enum {
+ BTINTEL_PCIE_CORE_HALTED,
+ BTINTEL_PCIE_HWEXP_INPROGRESS,
+ BTINTEL_PCIE_COREDUMP_INPROGRESS
+};
+
+enum btintel_pcie_tlv_type {
+ BTINTEL_CNVI_BT,
+ BTINTEL_WRITE_PTR,
+ BTINTEL_WRAP_CTR,
+ BTINTEL_TRIGGER_REASON,
+ BTINTEL_FW_SHA,
+ BTINTEL_CNVR_TOP,
+ BTINTEL_CNVI_TOP,
+ BTINTEL_DUMP_TIME,
+ BTINTEL_FW_BUILD,
+};
+
#define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
/* Minimum and Maximum number of MSI-X Vector
@@ -325,6 +377,37 @@ struct rxq {
struct data_buf *bufs;
};
+/* Structure for DRAM Buffer
+ * @count: Number of descriptors
+ * @buf: Array of data_buf structure
+ */
+struct btintel_pcie_dbgc {
+ u16 count;
+
+ void *frag_v_addr;
+ dma_addr_t frag_p_addr;
+ u16 frag_size;
+
+ dma_addr_t buf_p_addr;
+ void *buf_v_addr;
+ struct data_buf *bufs;
+};
+
+struct btintel_pcie_dump_header {
+ const char *driver_name;
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u16 fw_timestamp;
+ u8 fw_build_type;
+ u32 fw_build_num;
+ u32 fw_git_sha1;
+ u32 cnvi_bt;
+ u32 write_ptr;
+ u32 wrap_ctr;
+ u16 trigger_reason;
+ int state;
+};
+
/* struct btintel_pcie_data
* @pdev: pci device
* @hdev: hdev device
@@ -405,6 +488,8 @@ struct btintel_pcie_data {
struct txq txq;
struct rxq rxq;
u32 alive_intr_ctxt;
+ struct btintel_pcie_dbgc dbgc;
+ struct btintel_pcie_dump_header dmp_hdr;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
@@ -444,3 +529,11 @@ static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data,
r &= ~bits;
iowrite32(r, data->base_addr + offset);
}
+
+static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data,
+ u32 addr)
+{
+ btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr);
+ return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG);
+}
+
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 68846c5bd4f7..4390fd571dbd 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -1330,13 +1330,6 @@ int btmtk_usb_setup(struct hci_dev *hdev)
break;
case 0x7922:
case 0x7925:
- /* Reset the device to ensure it's in the initial state before
- * downloading the firmware to ensure.
- */
-
- if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags))
- btmtk_usb_subsys_reset(hdev, dev_id);
- fallthrough;
case 0x7961:
btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id,
fw_version, fw_flavor);
@@ -1345,12 +1338,9 @@ int btmtk_usb_setup(struct hci_dev *hdev)
btmtk_usb_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
- clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
return err;
}
- set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags);
-
/* It's Device EndPoint Reset Option Register */
err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT,
MTK_EP_RST_IN_OUT_OPT);
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index bd5464bde174..edd5eead1e93 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work)
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */
- sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
+ if (bdev->func->irq_handler)
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
sdio_release_host(bdev->func);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index aa5ec1d444a9..5091dea762a0 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* NXP Bluetooth driver
- * Copyright 2023 NXP
+ * Copyright 2023-2025 NXP
*/
#include <linux/module.h>
@@ -31,6 +31,7 @@
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
#define BTNXPUART_FW_DOWNLOAD_ABORT 6
+#define BTNXPUART_FW_DUMP_IN_PROGRESS 7
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
@@ -98,14 +99,19 @@
#define PS_STATE_AWAKE 0
#define PS_STATE_SLEEP 1
-/* Bluetooth vendor command : Sleep mode */
+/* NXP Vendor Commands. Refer user manual UM11628 on nxp.com */
+/* Set custom BD Address */
+#define HCI_NXP_SET_BD_ADDR 0xfc22
+/* Set Auto-Sleep mode */
#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23
-/* Bluetooth vendor command : Wakeup method */
+/* Set Wakeup method */
#define HCI_NXP_WAKEUP_METHOD 0xfc53
-/* Bluetooth vendor command : Set operational baudrate */
+/* Set operational baudrate */
#define HCI_NXP_SET_OPER_SPEED 0xfc09
-/* Bluetooth vendor command: Independent Reset */
+/* Independent Reset (Soft Reset) */
#define HCI_NXP_IND_RESET 0xfcfc
+/* Bluetooth vendor command: Trigger FW dump */
+#define HCI_NXP_TRIGGER_DUMP 0xfe91
/* Bluetooth Power State : Vendor cmd params */
#define BT_PS_ENABLE 0x02
@@ -162,6 +168,12 @@ struct btnxpuart_data {
const char *fw_name_old;
};
+enum bootloader_param_change {
+ not_changed,
+ cmd_sent,
+ changed
+};
+
struct btnxpuart_dev {
struct hci_dev *hdev;
struct serdev_device *serdev;
@@ -177,6 +189,7 @@ struct btnxpuart_dev {
u32 fw_v1_sent_bytes;
u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
+ u32 fw_v3_prev_sent;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
wait_queue_head_t fw_dnld_done_wait_q;
@@ -185,8 +198,8 @@ struct btnxpuart_dev {
u32 new_baudrate;
u32 current_baudrate;
u32 fw_init_baudrate;
- bool timeout_changed;
- bool baudrate_changed;
+ enum bootloader_param_change timeout_changed;
+ enum bootloader_param_change baudrate_changed;
bool helper_downloaded;
struct ps_data psdata;
@@ -204,10 +217,11 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
-/* Bootloader signature error codes */
-#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
-#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
-#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
+/* Bootloader signature error codes: Refer AN12820 from nxp.com */
+#define NXP_CRC_RX_ERROR BIT(0) /* CRC error in previous packet */
+#define NXP_ACK_RX_TIMEOUT BIT(2) /* ACK not received from host */
+#define NXP_HDR_RX_TIMEOUT BIT(3) /* FW Header chunk not received */
+#define NXP_DATA_RX_TIMEOUT BIT(4) /* FW Data chunk not received */
#define HDR_LEN 16
@@ -310,6 +324,35 @@ union nxp_v3_rx_timeout_nak_u {
u8 buf[6];
};
+struct nxp_v3_crc_nak {
+ u8 nak;
+ u8 crc;
+} __packed;
+
+union nxp_v3_crc_nak_u {
+ struct nxp_v3_crc_nak pkt;
+ u8 buf[2];
+};
+
+/* FW dump */
+#define NXP_FW_DUMP_SIZE (1024 * 1000)
+
+struct nxp_fw_dump_hdr {
+ __le16 seq_num;
+ __le16 reserved;
+ __le16 buf_type;
+ __le16 buf_len;
+};
+
+union nxp_set_bd_addr_payload {
+ struct {
+ u8 param_id;
+ u8 param_len;
+ u8 param[6];
+ } __packed data;
+ u8 buf[8];
+};
+
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
@@ -447,8 +490,14 @@ static int ps_setup(struct hci_dev *hdev)
return PTR_ERR(psdata->h2c_ps_gpio);
}
- if (!psdata->h2c_ps_gpio)
+ if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) {
+ psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */
+ } else if (!psdata->h2c_ps_gpio) {
+ bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO");
psdata->h2c_wakeup_gpio = 0xff;
+ }
+
+ device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio);
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
@@ -540,9 +589,11 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode;
pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio;
+ pcmd.h2c_wakeup_gpio = 0xff;
switch (psdata->h2c_wakeupmode) {
case WAKEUP_METHOD_GPIO:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO;
+ pcmd.h2c_wakeup_gpio = psdata->h2c_wakeup_gpio;
break;
case WAKEUP_METHOD_DTR:
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR;
@@ -552,7 +603,6 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data)
pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK;
break;
}
- pcmd.h2c_wakeup_gpio = 0xff;
skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd);
if (IS_ERR(skb)) {
@@ -586,8 +636,13 @@ static void ps_init(struct hci_dev *hdev)
usleep_range(5000, 10000);
psdata->ps_state = PS_STATE_AWAKE;
- psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
- psdata->c2h_wakeup_gpio = 0xff;
+
+ if (psdata->c2h_wakeup_gpio) {
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO;
+ } else {
+ psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE;
+ psdata->c2h_wakeup_gpio = 0xff;
+ }
psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID;
if (psdata->h2c_ps_gpio)
@@ -618,11 +673,6 @@ static void ps_init(struct hci_dev *hdev)
psdata->cur_psmode = PS_MODE_DISABLE;
psdata->target_ps_mode = DEFAULT_PS_MODE;
-
- if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
- hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL);
- if (psdata->cur_psmode != psdata->target_ps_mode)
- hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL);
}
/* NXP Firmware Download Feature */
@@ -637,8 +687,8 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->boot_reg_offset = 0;
nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
- nxpdev->baudrate_changed = false;
- nxpdev->timeout_changed = false;
+ nxpdev->baudrate_changed = not_changed;
+ nxpdev->timeout_changed = not_changed;
nxpdev->helper_downloaded = false;
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
@@ -651,8 +701,10 @@ static int nxp_download_firmware(struct hci_dev *hdev)
&nxpdev->tx_state),
msecs_to_jiffies(60000));
- release_firmware(nxpdev->fw);
- memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ if (nxpdev->fw && strlen(nxpdev->fw_name)) {
+ release_firmware(nxpdev->fw);
+ memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
+ }
if (err == 0) {
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
@@ -767,6 +819,16 @@ static bool is_fw_downloading(struct btnxpuart_dev *nxpdev)
return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
+static bool ind_reset_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state);
+}
+
+static bool fw_dump_in_progress(struct btnxpuart_dev *nxpdev)
+{
+ return test_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+}
+
static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
{
if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) {
@@ -860,15 +922,14 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
len = __le16_to_cpu(req->len);
if (!nxp_data->helper_fw_name) {
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev,
- len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = changed;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev,
- len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = changed;
serdev_device_set_baudrate(nxpdev->serdev,
HCI_NXP_SEC_BAUDRATE);
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1047,32 +1108,35 @@ static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_re
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
__u32 offset = __le32_to_cpu(req->offset);
__u16 err = __le16_to_cpu(req->error);
- union nxp_v3_rx_timeout_nak_u nak_tx_buf;
-
- switch (err) {
- case NXP_ACK_RX_TIMEOUT:
- case NXP_HDR_RX_TIMEOUT:
- case NXP_DATA_RX_TIMEOUT:
- nak_tx_buf.pkt.nak = NXP_NAK_V3;
- nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
- nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
- sizeof(nak_tx_buf) - 1, 0xff);
- serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
- sizeof(nak_tx_buf));
- break;
- default:
- bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
- break;
-
+ union nxp_v3_rx_timeout_nak_u timeout_nak_buf;
+ union nxp_v3_crc_nak_u crc_nak_buf;
+
+ if (err & NXP_CRC_RX_ERROR) {
+ crc_nak_buf.pkt.nak = NXP_CRC_ERROR_V3;
+ crc_nak_buf.pkt.crc = crc8(crc8_table, crc_nak_buf.buf,
+ sizeof(crc_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, crc_nak_buf.buf,
+ sizeof(crc_nak_buf));
+ } else if (err & NXP_ACK_RX_TIMEOUT ||
+ err & NXP_HDR_RX_TIMEOUT ||
+ err & NXP_DATA_RX_TIMEOUT) {
+ timeout_nak_buf.pkt.nak = NXP_NAK_V3;
+ timeout_nak_buf.pkt.offset = __cpu_to_le32(offset);
+ timeout_nak_buf.pkt.crc = crc8(crc8_table, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf) - 1, 0xff);
+ serdev_device_write_buf(nxpdev->serdev, timeout_nak_buf.buf,
+ sizeof(timeout_nak_buf));
+ } else {
+ bt_dev_err(hdev, "Unknown bootloader error code: %d", err);
}
-
}
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct v3_data_req *req;
- __u16 len;
+ __u16 len = 0;
+ __u16 err = 0;
__u32 offset;
if (!process_boot_signature(nxpdev))
@@ -1082,23 +1146,40 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
- if (!req->error) {
+ err = __le16_to_cpu(req->error);
+
+ if (!err) {
nxp_send_ack(NXP_ACK_V3, hdev);
+ if (nxpdev->timeout_changed == cmd_sent)
+ nxpdev->timeout_changed = changed;
+ if (nxpdev->baudrate_changed == cmd_sent)
+ nxpdev->baudrate_changed = changed;
} else {
nxp_handle_fw_download_error(hdev, req);
+ if (nxpdev->timeout_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->timeout_changed = not_changed;
+ }
+ if (nxpdev->baudrate_changed == cmd_sent &&
+ err == NXP_CRC_RX_ERROR) {
+ nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent;
+ nxpdev->baudrate_changed = not_changed;
+ }
goto free_skb;
}
len = __le16_to_cpu(req->len);
- if (!nxpdev->timeout_changed) {
- nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len);
+ if (nxpdev->timeout_changed != changed) {
+ nxp_fw_change_timeout(hdev, len);
+ nxpdev->timeout_changed = cmd_sent;
goto free_skb;
}
- if (!nxpdev->baudrate_changed) {
- nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len);
- if (nxpdev->baudrate_changed) {
+ if (nxpdev->baudrate_changed != changed) {
+ if (nxp_fw_change_baudrate(hdev, len)) {
+ nxpdev->baudrate_changed = cmd_sent;
serdev_device_set_baudrate(nxpdev->serdev,
HCI_NXP_SEC_BAUDRATE);
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1130,6 +1211,7 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
nxpdev->fw_dnld_v3_offset, len);
free_skb:
+ nxpdev->fw_v3_prev_sent = len;
kfree_skb(skb);
return 0;
}
@@ -1168,7 +1250,7 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data)
static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev)
{
serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE);
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
+ if (ind_reset_in_progress(nxpdev))
serdev_device_set_flow_control(nxpdev->serdev, false);
else
serdev_device_set_flow_control(nxpdev->serdev, true);
@@ -1197,6 +1279,102 @@ static int nxp_set_ind_reset(struct hci_dev *hdev, void *data)
return hci_recv_frame(hdev, skb);
}
+/* Firmware dump */
+static void nxp_coredump(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 pcmd = 2;
+
+ skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd);
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+}
+
+static void nxp_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Nothing to be added in FW dump header */
+}
+
+static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_acl_hdr *acl_hdr = (struct hci_acl_hdr *)skb_pull_data(skb,
+ sizeof(*acl_hdr));
+ struct nxp_fw_dump_hdr *fw_dump_hdr = (struct nxp_fw_dump_hdr *)skb->data;
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ __u16 seq_num = __le16_to_cpu(fw_dump_hdr->seq_num);
+ __u16 buf_len = __le16_to_cpu(fw_dump_hdr->buf_len);
+ int err;
+
+ if (seq_num == 0x0001) {
+ if (test_and_set_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state)) {
+ bt_dev_err(hdev, "FW dump already in progress");
+ goto free_skb;
+ }
+ bt_dev_warn(hdev, "==== Start FW dump ===");
+ err = hci_devcd_init(hdev, NXP_FW_DUMP_SIZE);
+ if (err < 0)
+ goto free_skb;
+
+ schedule_delayed_work(&hdev->dump.dump_timeout,
+ msecs_to_jiffies(20000));
+ }
+
+ err = hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC));
+ if (err < 0)
+ goto free_skb;
+
+ if (buf_len == 0) {
+ bt_dev_warn(hdev, "==== FW dump complete ===");
+ clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state);
+ hci_devcd_complete(hdev);
+ nxp_set_ind_reset(hdev, NULL);
+ }
+
+free_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int nxp_recv_acl_pkt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+
+ /* FW dump chunks are ACL packets with conn handle 0xfff */
+ if ((handle & 0x0FFF) == 0xFFF)
+ return nxp_process_fw_dump(hdev, skb);
+ else
+ return hci_recv_frame(hdev, skb);
+}
+
+static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ union nxp_set_bd_addr_payload pcmd;
+ int err;
+
+ pcmd.data.param_id = 0xfe;
+ pcmd.data.param_len = 6;
+ memcpy(pcmd.data.param, bdaddr, 6);
+
+ /* BD address can be assigned only after first reset command. */
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_INIT_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev,
+ "Reset before setting local-bd-addr failed (%d)",
+ err);
+ return err;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_NXP_SET_BD_ADDR, sizeof(pcmd),
+ pcmd.buf, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Changing device address failed (%d)", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* NXP protocol */
static int nxp_setup(struct hci_dev *hdev)
{
@@ -1216,11 +1394,6 @@ static int nxp_setup(struct hci_dev *hdev)
serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate);
nxpdev->current_baudrate = nxpdev->fw_init_baudrate;
- if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
- nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
- hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL);
- }
-
ps_init(hdev);
if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state))
@@ -1229,6 +1402,22 @@ static int nxp_setup(struct hci_dev *hdev)
return 0;
}
+static int nxp_post_init(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) {
+ nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE;
+ nxp_set_baudrate_cmd(hdev, NULL);
+ }
+ if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode)
+ send_wakeup_method_cmd(hdev, NULL);
+ if (psdata->cur_psmode != psdata->target_ps_mode)
+ send_ps_cmd(hdev, NULL);
+ return 0;
+}
+
static void nxp_hw_err(struct hci_dev *hdev, u8 code)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1247,25 +1436,44 @@ static int nxp_shutdown(struct hci_dev *hdev)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
struct sk_buff *skb;
- u8 *status;
u8 pcmd = 0;
- if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) {
+ if (ind_reset_in_progress(nxpdev)) {
skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- status = skb_pull_data(skb, 1);
- if (status) {
- serdev_device_set_flow_control(nxpdev->serdev, false);
- set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
- }
- kfree_skb(skb);
+ serdev_device_set_flow_control(nxpdev->serdev, false);
+ set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
+ /* HCI_NXP_IND_RESET command may not returns any response */
+ if (!IS_ERR(skb))
+ kfree_skb(skb);
+ } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
+ nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
+ nxp_set_baudrate_cmd(hdev, NULL);
}
return 0;
}
+static bool nxp_wakeup(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+ struct ps_data *psdata = &nxpdev->psdata;
+
+ if (psdata->c2h_wakeupmode != BT_HOST_WAKEUP_METHOD_NONE)
+ return true;
+
+ return false;
+}
+
+static void nxp_reset(struct hci_dev *hdev)
+{
+ struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
+
+ if (!ind_reset_in_progress(nxpdev) && !fw_dump_in_progress(nxpdev)) {
+ bt_dev_dbg(hdev, "CMD Timeout detected. Resetting.");
+ nxp_set_ind_reset(hdev, NULL);
+ }
+}
+
static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@@ -1286,6 +1494,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb)
struct wakeup_cmd_payload wakeup_parm;
__le32 baudrate_parm;
+ if (fw_dump_in_progress(nxpdev))
+ return -EBUSY;
+
/* if vendor commands are received from user space (e.g. hcitool), update
* driver flags accordingly and ask driver to re-send the command to FW.
* In case the payload for any command does not match expected payload
@@ -1454,7 +1665,7 @@ static int btnxpuart_flush(struct hci_dev *hdev)
}
static const struct h4_recv_pkt nxp_recv_pkts[] = {
- { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_ACL, .recv = nxp_recv_acl_pkt },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ H4_RECV_ISO, .recv = hci_recv_frame },
@@ -1476,11 +1687,13 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
/* Safe to ignore out-of-sync bootloader signatures */
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err);
return count;
}
- if (!is_fw_downloading(nxpdev))
+ if (!is_fw_downloading(nxpdev) &&
+ !ind_reset_in_progress(nxpdev))
nxpdev->hdev->stat.byte_rx += count;
return count;
}
@@ -1499,6 +1712,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
{
struct hci_dev *hdev;
struct btnxpuart_dev *nxpdev;
+ bdaddr_t ba = {0};
nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL);
if (!nxpdev)
@@ -1543,11 +1757,21 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
hdev->close = btnxpuart_close;
hdev->flush = btnxpuart_flush;
hdev->setup = nxp_setup;
+ hdev->post_init = nxp_post_init;
hdev->send = nxp_enqueue;
hdev->hw_error = nxp_hw_err;
hdev->shutdown = nxp_shutdown;
+ hdev->wakeup = nxp_wakeup;
+ hdev->reset = nxp_reset;
+ hdev->set_bdaddr = nxp_set_bdaddr;
SET_HCIDEV_DEV(hdev, &serdev->dev);
+ device_property_read_u8_array(&nxpdev->serdev->dev,
+ "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (bacmp(&ba, BDADDR_ANY))
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
goto probe_fail;
@@ -1556,6 +1780,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
if (ps_setup(hdev))
goto probe_fail;
+ hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr, NULL);
+
return 0;
probe_fail:
@@ -1573,16 +1799,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
- } else {
- /* Restore FW baudrate to fw_init_baudrate if changed.
- * This will ensure FW baudrate is in sync with
- * driver baudrate in case this driver is re-inserted.
+ }
+
+ if (test_bit(HCI_RUNNING, &hdev->flags)) {
+ /* Ensure shutdown callback is executed before unregistering, so
+ * that baudrate is reset to initial value.
*/
- if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
- nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
- nxp_set_baudrate_cmd(hdev, NULL);
- }
+ nxp_shutdown(hdev);
}
+
ps_cleanup(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
@@ -1608,6 +1833,17 @@ static int nxp_serdev_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_DEV_COREDUMP
+static void nxp_serdev_coredump(struct device *dev)
+{
+ struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
+ struct hci_dev *hdev = nxpdev->hdev;
+
+ if (hdev->dump.coredump)
+ hdev->dump.coredump(hdev);
+}
+#endif
+
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
@@ -1638,6 +1874,9 @@ static struct serdev_device_driver nxp_serdev_driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
.pm = &nxp_pm_ops,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump = nxp_serdev_coredump,
+#endif
},
};
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index cdf09d9a9ad2..3d6778b95e00 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -785,6 +785,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
const char *firmware_name, const char *rampatch_name)
{
struct qca_fw_config config = {};
+ const char *variant = "";
int err;
u8 rom_ver = 0;
u32 soc_ver;
@@ -815,6 +816,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name);
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmbtfw%02x.tlv", rom_ver);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
@@ -880,16 +885,23 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
}
} else {
switch (soc_type) {
+ case QCA_WCN3950:
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T)
+ variant = "t";
+ else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/cmnv%02x%s.bin", rom_ver, variant);
+ break;
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02xu.bin", rom_ver);
- } else {
- snprintf(config.fwname, sizeof(config.fwname),
- "qca/crnv%02x.bin", rom_ver);
- }
+ if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID)
+ variant = "u";
+
+ snprintf(config.fwname, sizeof(config.fwname),
+ "qca/crnv%02x%s.bin", rom_ver, variant);
break;
case QCA_WCN3988:
snprintf(config.fwname, sizeof(config.fwname),
@@ -948,6 +960,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
* VsMsftOpCode.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 9d28c8800225..8f3c1b1c77b3 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,6 +41,9 @@
#define QCA_WCN3991_SOC_ID 0x40014320
+#define QCA_WCN3950_SOC_ID_T 0x40074130
+#define QCA_WCN3950_SOC_ID_S 0x40075130
+
/* QCA chipset version can be decided by patch and SoC
* version, combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used.
@@ -145,6 +148,7 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
+ QCA_WCN3950,
QCA_WCN3988,
QCA_WCN3990,
QCA_WCN3998,
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8149e53fd0a7..5012b5ff92c8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -34,6 +34,7 @@ static bool force_scofix;
static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC);
static bool reset = true;
+static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT);
static struct usb_driver btusb_driver;
@@ -375,10 +376,38 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -639,6 +668,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
@@ -1085,6 +1118,42 @@ static inline void btusb_free_frags(struct btusb_data *data)
spin_unlock_irqrestore(&data->rxlock, flags);
}
+static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ struct hci_ev_sync_conn_complete *ev =
+ (void *) skb->data + sizeof(*hdr);
+ struct hci_dev *hdev = data->hdev;
+ unsigned int notify_air_mode;
+
+ if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
+ return;
+
+ if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE)
+ return;
+
+ if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status)
+ return;
+
+ switch (ev->air_mode) {
+ case BT_CODEC_CVSD:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD;
+ break;
+
+ case BT_CODEC_TRANSPARENT:
+ notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP;
+ break;
+
+ default:
+ return;
+ }
+
+ bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode);
+ data->sco_num = 1;
+ data->air_mode = notify_air_mode;
+ schedule_work(&data->work);
+}
+
static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
{
if (data->intr_interval) {
@@ -1092,6 +1161,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb)
schedule_delayed_work(&data->rx_work, 0);
}
+ /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */
+ if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL))
+ btusb_sco_connected(data, skb);
+
return data->recv_event(data->hdev, skb);
}
@@ -2436,6 +2509,8 @@ static int btusb_setup_csr(struct hci_dev *hdev)
set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
@@ -2647,7 +2722,7 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
device_unlock(&btmtk_data->isopkt_intf->dev);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
- bt_dev_err(data->hdev, "Failed to claim iso interface");
+ bt_dev_err(data->hdev, "Failed to claim iso interface: %d", err);
return;
}
@@ -3644,6 +3719,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index d2d6ba8d2f8b..acba83156de9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
if (!skb) {
percpu_down_read(&hu->proto_lock);
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
skb = hu->proto->dequeue(hu);
percpu_up_read(&hu->proto_lock);
@@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!percpu_down_read_trylock(&hu->proto_lock))
return 0;
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags))
goto no_schedule;
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
@@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return -EUNATCH;
}
@@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
- if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags) ||
+ test_bit(HCI_UART_PROTO_INIT, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
percpu_down_read(&hu->proto_lock);
- if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) &&
+ !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) {
percpu_up_read(&hu->proto_lock);
return;
}
@@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
hu->proto = p;
+ set_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
err = hci_uart_register_dev(hu);
if (err) {
return err;
}
set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ clear_bit(HCI_UART_PROTO_INIT, &hu->flags);
+
return 0;
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 0ac2168f1dc4..f2558506a02c 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -623,6 +623,7 @@ static int qca_open(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1366,6 +1367,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
/* Give the controller time to process the request */
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1452,6 +1454,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
switch (qca_soc_type(hu)) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1494,6 +1497,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
* changing the baudrate of chip and host.
*/
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1528,6 +1532,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
error:
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1746,6 +1751,7 @@ static int qca_regulator_init(struct hci_uart *hu)
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1776,6 +1782,7 @@ static int qca_regulator_init(struct hci_uart *hu)
qca_set_speed(hu, QCA_INIT_SPEED);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1807,6 +1814,7 @@ static int qca_power_on(struct hci_dev *hdev)
return 0;
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1891,6 +1899,7 @@ static int qca_setup(struct hci_uart *hu)
soc_name = "qca2066";
break;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1925,6 +1934,7 @@ retry:
clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -1958,6 +1968,7 @@ retry:
}
switch (soc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2046,6 +2057,17 @@ static const struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
+static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = {
+ .soc_type = QCA_WCN3950,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 15000 },
+ { "vddxo", 60000 },
+ { "vddrf", 155000 },
+ { "vddch0", 585000 },
+ },
+ .num_vregs = 4,
+};
+
static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = {
.soc_type = QCA_WCN3988,
.vregs = (struct qca_vreg []) {
@@ -2338,6 +2360,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->btsoc_type = QCA_ROME;
switch (qcadev->btsoc_type) {
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
@@ -2359,6 +2382,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
switch (qcadev->btsoc_type) {
case QCA_WCN6855:
case QCA_WCN7850:
+ case QCA_WCN6750:
if (!device_property_present(&serdev->dev, "enable-gpios")) {
/*
* Backward compatibility with old DT sources. If the
@@ -2374,11 +2398,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
break;
}
fallthrough;
+ case QCA_WCN3950:
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
- case QCA_WCN6750:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@@ -2683,6 +2707,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
{ .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
{ .compatible = "qcom,qca9377-bt" },
+ { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950},
{ .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988},
{ .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
{ .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fbf3079b92a5..5ea5dd80e297 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -90,6 +90,7 @@ struct hci_uart {
#define HCI_UART_REGISTERED 1
#define HCI_UART_PROTO_READY 2
#define HCI_UART_NO_SUSPEND_NOTIFIER 3
+#define HCI_UART_PROTO_INIT 4
/* TX states */
#define HCI_UART_SENDING 1
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7651321d351c..a51935d37e5d 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -316,7 +316,7 @@ static inline void force_devcd_timeout(struct hci_dev *hdev,
unsigned int timeout)
{
#ifdef CONFIG_DEV_COREDUMP
- hdev->dump.timeout = msecs_to_jiffies(timeout * 1000);
+ hdev->dump.timeout = secs_to_jiffies(timeout);
#endif
}
@@ -416,6 +416,7 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks);
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -645,7 +646,7 @@ static int vhci_open(struct inode *inode, struct file *file)
file->private_data = data;
nonseekable_open(inode, file);
- schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+ schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1));
return 0;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index d1f3d327ddd1..a8be8cf246fb 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev)
else
ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
- if (!ret && !mc_drv->driver_managed_dma) {
+ /* @mc_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !mc_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index c41119b9079f..7ffea0f98162 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1095,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
diff --git a/drivers/bus/qcom-ssc-block-bus.c b/drivers/bus/qcom-ssc-block-bus.c
index 85d781a32df4..7f5fd4e0940d 100644
--- a/drivers/bus/qcom-ssc-block-bus.c
+++ b/drivers/bus/qcom-ssc-block-bus.c
@@ -264,18 +264,6 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- data->pd_names = qcom_ssc_block_pd_names;
- data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
-
- /* power domains */
- ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
-
- ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
-
/* low level overrides for when the HW logic doesn't "just work" */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpm_sscaon_config0");
data->reg_mpm_sscaon_config0 = devm_ioremap_resource(&pdev->dev, res);
@@ -343,11 +331,30 @@ static int qcom_ssc_block_bus_probe(struct platform_device *pdev)
data->ssc_axi_halt = halt_args.args[0];
+ /* power domains */
+ data->pd_names = qcom_ssc_block_pd_names;
+ data->num_pds = ARRAY_SIZE(qcom_ssc_block_pd_names);
+
+ ret = qcom_ssc_block_bus_pds_attach(&pdev->dev, data->pds, data->pd_names, data->num_pds);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "error when attaching power domains\n");
+
+ ret = qcom_ssc_block_bus_pds_enable(data->pds, data->num_pds);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "error when enabling power domains\n");
+ goto err_detach_pds_bus;
+ }
+
qcom_ssc_block_bus_init(&pdev->dev);
of_platform_populate(np, NULL, NULL, &pdev->dev);
return 0;
+
+err_detach_pds_bus:
+ qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
+
+ return ret;
}
static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
@@ -356,9 +363,6 @@ static void qcom_ssc_block_bus_remove(struct platform_device *pdev)
qcom_ssc_block_bus_deinit(&pdev->dev);
- iounmap(data->reg_mpm_sscaon_config0);
- iounmap(data->reg_mpm_sscaon_config1);
-
qcom_ssc_block_bus_pds_disable(data->pds, data->num_pds);
qcom_ssc_block_bus_pds_detach(&pdev->dev, data->pds, data->num_pds);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 5dea31769f9a..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index c573ed2ee71a..092306ca2541 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -360,7 +360,8 @@ static int cdx_dma_configure(struct device *dev)
return ret;
}
- if (!ret && !cdx_drv->driver_managed_dma) {
+ /* @cdx_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -473,8 +474,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 7174bfccc7b3..b95f6d0f17ed 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -152,8 +152,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->period = ns_to_ktime(period * NSEC_PER_USEC);
init_completion(&priv->completion);
- hrtimer_init(&priv->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->timer.function = timeriomem_rng_trigger;
+ hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 2cf595d2e10b..f7dd455dd0dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2581186fa61b..92cbd24a36d8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -278,7 +278,7 @@ static void crng_reseed(struct work_struct *work)
WRITE_ONCE(base_crng.generation, next_gen);
#ifdef CONFIG_VDSO_GETRANDOM
/* base_crng.generation's invalid value is ULONG_MAX, while
- * _vdso_rng_data.generation's invalid value is 0, so add one to the
+ * vdso_k_rng_data->generation's invalid value is 0, so add one to the
* former to arrive at the latter. Use smp_store_release so that this
* is ordered with the write above to base_crng.generation. Pairs with
* the smp_rmb() before the syscall in the vDSO code.
@@ -290,7 +290,7 @@ static void crng_reseed(struct work_struct *work)
* because the vDSO side only checks whether the value changed, without
* actually using or interpreting the value.
*/
- smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
+ smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1);
#endif
if (!static_branch_likely(&crng_is_ready))
crng_init = CRNG_READY;
@@ -743,7 +743,7 @@ static void __cold _credit_init_bits(size_t bits)
queue_work(system_unbound_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
- WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
+ WRITE_ONCE(vdso_k_rng_data->is_ready, true);
#endif
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f887569fd3d0..677bb5ac950a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -1268,12 +1269,12 @@ static void sonypi_display_info(void)
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
+ str_on_off(fnkeyinit),
+ str_on_off(camera),
+ str_on_off(compat),
mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
+ str_on_off(useinput),
+ str_on_off(SONYPI_ACPI_ACTIVE));
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 0fc9a510e059..fe4f3a609934 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -210,6 +210,15 @@ config TCG_CRB
from within Linux. To compile this driver as a module, choose
M here; the module will be called tpm_crb.
+config TCG_ARM_CRB_FFA
+ tristate "TPM CRB over Arm FF-A Transport"
+ depends on ARM_FFA_TRANSPORT && TCG_CRB
+ default TCG_CRB
+ help
+ If the Arm FF-A transport is used to access the TPM say Yes.
+ To compile this driver as a module, choose M here; the module
+ will be called tpm_crb_ffa.
+
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9bb142c75243..2b004df8c04b 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -42,5 +42,6 @@ obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_TIS_ST33ZP24) += st33zp24/
obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
obj-$(CONFIG_TCG_CRB) += tpm_crb.o
+obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 7df7abaf3e52..e25daf2396d3 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -168,6 +168,11 @@ int tpm_try_get_ops(struct tpm_chip *chip)
goto out_ops;
mutex_lock(&chip->tpm_mutex);
+
+ /* tmp_chip_start may issue IO that is denied while suspended */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ goto out_lock;
+
rc = tpm_chip_start(chip);
if (rc)
goto out_lock;
@@ -300,6 +305,7 @@ int tpm_class_shutdown(struct device *dev)
down_write(&chip->ops_sem);
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
if (!tpm_chip_start(chip)) {
+ tpm2_end_auth_session(chip);
tpm2_shutdown(chip, TPM2_SU_CLEAR);
tpm_chip_stop(chip);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b1daa0d7b341..8d7e4da6ed53 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -58,6 +58,30 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
+static void tpm_chip_cancel(struct tpm_chip *chip)
+{
+ if (!chip->ops->cancel)
+ return;
+
+ chip->ops->cancel(chip);
+}
+
+static u8 tpm_chip_status(struct tpm_chip *chip)
+{
+ if (!chip->ops->status)
+ return 0;
+
+ return chip->ops->status(chip);
+}
+
+static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ if (!chip->ops->req_canceled)
+ return false;
+
+ return chip->ops->req_canceled(chip, status);
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -104,12 +128,12 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
- u8 status = chip->ops->status(chip);
+ u8 status = tpm_chip_status(chip);
if ((status & chip->ops->req_complete_mask) ==
chip->ops->req_complete_val)
goto out_recv;
- if (chip->ops->req_canceled(chip, status)) {
+ if (tpm_chip_req_canceled(chip, status)) {
dev_err(&chip->dev, "Operation Canceled\n");
return -ECANCELED;
}
@@ -118,7 +142,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
- chip->ops->cancel(chip);
+ tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -445,18 +469,11 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!chip)
return -ENODEV;
- /* Give back zero bytes, as TPM chip has not yet fully resumed: */
- if (chip->flags & TPM_CHIP_FLAG_SUSPENDED) {
- rc = 0;
- goto out;
- }
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
rc = tpm1_get_random(chip, out, max);
-out:
tpm_put_ops(chip);
return rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index dfdcbd009720..524d802ede26 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -359,7 +359,6 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
} while (retries-- && total < max);
tpm_buf_destroy(&buf);
- tpm2_end_auth_session(chip);
return total ? total : -EIO;
out:
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index b70165b588ec..3f89635ba5e8 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -982,7 +982,7 @@ int tpm2_start_auth_session(struct tpm_chip *chip)
int rc;
if (chip->auth) {
- dev_warn_once(&chip->dev, "auth session is active\n");
+ dev_dbg_once(&chip->dev, "auth session is active\n");
return 0;
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ea085b14ab7c..876edf2705ab 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
#ifdef CONFIG_ARM64
#include <linux/arm-smccc.h>
#endif
+#include "tpm_crb_ffa.h"
#include "tpm.h"
#define ACPI_SIG_TPM2 "TPM2"
@@ -100,6 +101,8 @@ struct crb_priv {
u32 smc_func_id;
u32 __iomem *pluton_start_addr;
u32 __iomem *pluton_reply_addr;
+ u8 ffa_flags;
+ u8 ffa_attributes;
};
struct tpm2_crb_smc {
@@ -110,11 +113,30 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+/* CRB over FFA start method parameters in TCG2 ACPI table */
+struct tpm2_crb_ffa {
+ u8 flags;
+ u8 attributes;
+ u16 partition_id;
+ u8 reserved[8];
+};
+
struct tpm2_crb_pluton {
u64 start_addr;
u64 reply_addr;
};
+/*
+ * Returns true if the start method supports idle.
+ */
+static inline bool tpm_crb_has_idle(u32 start_method)
+{
+ return !(start_method == ACPI_TPM2_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
+ start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+}
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -173,9 +195,7 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -222,9 +242,7 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
int rc;
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
+ if (!tpm_crb_has_idle(priv->sm))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -255,13 +273,20 @@ static int crb_cmd_ready(struct tpm_chip *chip)
static int __crb_request_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 value = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
@@ -281,14 +306,21 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
static int __crb_relinquish_locality(struct device *dev,
struct crb_priv *priv, int loc)
{
- u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
- CRB_LOC_STATE_TPM_REG_VALID_STS;
+ u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | CRB_LOC_STATE_TPM_REG_VALID_STS;
u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
+ int rc;
if (!priv->regs_h)
return 0;
iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_LOCALITY_REQUEST, loc);
+ if (rc)
+ return rc;
+ }
+
if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
TPM2_TIMEOUT_C)) {
dev_warn(dev, "TPM_LOC_STATE_x.Relinquish timed out\n");
@@ -423,13 +455,13 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
* report only ACPI start but in practice seems to require both
* CRB start, hence invoking CRB start method if hid == MSFT0101.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
- (!strcmp(priv->hid, "MSFT0101")))
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED ||
+ !strcmp(priv->hid, "MSFT0101"))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if ((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
+ if (priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
rc = crb_do_acpi_start(chip);
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
@@ -437,6 +469,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ }
+
if (rc)
return rc;
@@ -446,13 +483,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
static void crb_cancel(struct tpm_chip *chip)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ int rc;
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if (((priv->sm == ACPI_TPM2_START_METHOD) ||
- (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ if ((priv->sm == ACPI_TPM2_START_METHOD ||
+ priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) &&
crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
+
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, chip->locality);
+ if (rc)
+ dev_err(&chip->dev, "FF-A Start failed\n");
+ }
}
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
@@ -609,8 +653,9 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
- (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER ||
+ priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA ||
+ priv->sm == ACPI_TPM2_MEMORY_MAPPED) {
if (iores &&
buf->control_address == iores->start +
sizeof(*priv->regs_h))
@@ -731,6 +776,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_ffa *crb_ffa;
struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
@@ -769,6 +815,27 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_ffa))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_CRB_WITH_ARM_FFA);
+ rc = -EINVAL;
+ goto out;
+ }
+ crb_ffa = ACPI_ADD_PTR(struct tpm2_crb_ffa, buf, sizeof(*buf));
+ priv->ffa_flags = crb_ffa->flags;
+ priv->ffa_attributes = crb_ffa->attributes;
+ rc = tpm_crb_ffa_init();
+ if (rc) {
+ /* If FF-A driver is not available yet, request probe retry */
+ if (rc == -ENOENT)
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
+ }
+
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
dev_err(dev,
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
new file mode 100644
index 000000000000..3169a87a56b6
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+
+#define pr_fmt(fmt) "CRB_FFA: " fmt
+
+#include <linux/arm_ffa.h>
+#include "tpm_crb_ffa.h"
+
+/* TPM service function status codes */
+#define CRB_FFA_OK 0x05000001
+#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
+#define CRB_FFA_NOFUNC 0x8e000001
+#define CRB_FFA_NOTSUP 0x8e000002
+#define CRB_FFA_INVARG 0x8e000005
+#define CRB_FFA_INV_CRB_CTRL_DATA 0x8e000006
+#define CRB_FFA_ALREADY 0x8e000009
+#define CRB_FFA_DENIED 0x8e00000a
+#define CRB_FFA_NOMEM 0x8e00000b
+
+#define CRB_FFA_VERSION_MAJOR 1
+#define CRB_FFA_VERSION_MINOR 0
+
+/* version encoding */
+#define CRB_FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define CRB_FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define CRB_FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MAJOR_VERSION_MASK, (x))))
+#define CRB_FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(CRB_FFA_MINOR_VERSION_MASK, (x))))
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+/*
+ * Returns the version of the interface that is available
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_GET_INTERFACE_VERSION
+ * w5-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5: TPM service interface version
+ * Bits[31:16]: major version
+ * Bits[15:0]: minor version
+ * w6-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK_RESULTS_RETURNED: The version of the interface has been
+ * returned.
+ */
+#define CRB_FFA_GET_INTERFACE_VERSION 0x0f000001
+
+/*
+ * Return information on a given feature of the TPM service
+ * Call register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function ID, CRB_FFA_START
+ * w5: Start function qualifier
+ * Bits[31:8] (MBZ)
+ * Bits[7:0]
+ * 0: Notifies TPM that a command is ready to be processed
+ * 1: Notifies TPM that a locality request is ready to be processed
+ * w6: TPM locality, one of 0..4
+ * -If the start function qualifier is 0, identifies the locality
+ * from where the command originated.
+ * -If the start function qualifier is 1, identifies the locality
+ * of the locality request
+ * w6-w7: Reserved (MBZ)
+ *
+ * Return register usage:
+ * w3: Not used (MBZ)
+ * w4: TPM service function status
+ * w5-w7: Reserved (MBZ)
+ *
+ * Possible function status codes in register w4:
+ * CRB_FFA_OK: the TPM service has been notified successfully
+ * CRB_FFA_INVARG: one or more arguments are not valid
+ * CRB_FFA_INV_CRB_CTRL_DATA: CRB control data or locality control
+ * data at the given TPM locality is not valid
+ * CRB_FFA_DENIED: the TPM has previously disabled locality requests and
+ * command processing at the given locality
+ */
+#define CRB_FFA_START 0x0f000201
+
+struct tpm_crb_ffa {
+ struct ffa_device *ffa_dev;
+ u16 major_version;
+ u16 minor_version;
+ /* lock to protect sending of FF-A messages: */
+ struct mutex msg_data_lock;
+ struct ffa_send_direct_data direct_msg_data;
+};
+
+static struct tpm_crb_ffa *tpm_crb_ffa;
+
+static int tpm_crb_ffa_to_linux_errno(int errno)
+{
+ int rc;
+
+ switch (errno) {
+ case CRB_FFA_OK:
+ rc = 0;
+ break;
+ case CRB_FFA_OK_RESULTS_RETURNED:
+ rc = 0;
+ break;
+ case CRB_FFA_NOFUNC:
+ rc = -ENOENT;
+ break;
+ case CRB_FFA_NOTSUP:
+ rc = -EPERM;
+ break;
+ case CRB_FFA_INVARG:
+ rc = -EINVAL;
+ break;
+ case CRB_FFA_INV_CRB_CTRL_DATA:
+ rc = -ENOEXEC;
+ break;
+ case CRB_FFA_ALREADY:
+ rc = -EEXIST;
+ break;
+ case CRB_FFA_DENIED:
+ rc = -EACCES;
+ break;
+ case CRB_FFA_NOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm_crb_ffa_init - called by the CRB driver to do any needed initialization
+ *
+ * This function is called by the tpm_crb driver during the tpm_crb
+ * driver's initialization. If the tpm_crb_ffa has not been probed
+ * yet, returns -ENOENT in order to force a retry. If th ffa_crb
+ * driver had been probed but failed with an error, returns -ENODEV
+ * in order to prevent further retries.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_init(void)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
+
+static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2)
+{
+ const struct ffa_msg_ops *msg_ops;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
+
+ memset(&tpm_crb_ffa->direct_msg_data, 0x00,
+ sizeof(struct ffa_send_direct_data));
+
+ tpm_crb_ffa->direct_msg_data.data1 = func_id;
+ tpm_crb_ffa->direct_msg_data.data2 = a0;
+ tpm_crb_ffa->direct_msg_data.data3 = a1;
+ tpm_crb_ffa->direct_msg_data.data4 = a2;
+
+ ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
+ &tpm_crb_ffa->direct_msg_data);
+ if (!ret)
+ ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
+
+ return ret;
+}
+
+/**
+ * tpm_crb_ffa_get_interface_version() - gets the ABI version of the TPM service
+ * @major: Pointer to caller-allocated buffer to hold the major version
+ * number the ABI
+ * @minor: Pointer to caller-allocated buffer to hold the minor version
+ * number the ABI
+ *
+ * Returns the major and minor version of the ABI of the FF-A based TPM.
+ * Allows the caller to evaluate its compatibility with the version of
+ * the ABI.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+{
+ int rc;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ if (!major || !minor)
+ return -EINVAL;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ if (!rc) {
+ *major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ *minor = CRB_FFA_MINOR_VERSION(tpm_crb_ffa->direct_msg_data.data2);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version);
+
+/**
+ * tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
+ * @request_type: Identifies whether the change to the CRB is in the command
+ * fields or locality fields.
+ * @locality: Specifies the locality number.
+ *
+ * Used by the CRB driver
+ * that might be useful to those using or modifying it. Begins with
+ * empty comment line, and may include additional embedded empty
+ * comment lines.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int tpm_crb_ffa_start(int request_type, int locality)
+{
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ if (IS_ERR_VALUE(tpm_crb_ffa))
+ return -ENODEV;
+
+ guard(mutex)(&tpm_crb_ffa->msg_data_lock);
+
+ return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00);
+}
+EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
+
+static int tpm_crb_ffa_probe(struct ffa_device *ffa_dev)
+{
+ struct tpm_crb_ffa *p;
+ int rc;
+
+ /* only one instance of a TPM partition is supported */
+ if (tpm_crb_ffa && !IS_ERR_VALUE(tpm_crb_ffa))
+ return -EEXIST;
+
+ tpm_crb_ffa = ERR_PTR(-ENODEV); // set tpm_crb_ffa so we can detect probe failure
+
+ if (!ffa_partition_supports_direct_recv(ffa_dev)) {
+ pr_err("TPM partition doesn't support direct message receive.\n");
+ return -EINVAL;
+ }
+
+ p = kzalloc(sizeof(*tpm_crb_ffa), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ tpm_crb_ffa = p;
+
+ mutex_init(&tpm_crb_ffa->msg_data_lock);
+ tpm_crb_ffa->ffa_dev = ffa_dev;
+ ffa_dev_set_drvdata(ffa_dev, tpm_crb_ffa);
+
+ /* if TPM is aarch32 use 32-bit SMCs */
+ if (!ffa_partition_check_property(ffa_dev, FFA_PARTITION_AARCH64_EXEC))
+ ffa_dev->ops->msg_ops->mode_32bit_set(ffa_dev);
+
+ /* verify compatibility of TPM service version number */
+ rc = tpm_crb_ffa_get_interface_version(&tpm_crb_ffa->major_version,
+ &tpm_crb_ffa->minor_version);
+ if (rc) {
+ pr_err("failed to get crb interface version. rc:%d", rc);
+ goto out;
+ }
+
+ pr_info("ABI version %u.%u", tpm_crb_ffa->major_version,
+ tpm_crb_ffa->minor_version);
+
+ if (tpm_crb_ffa->major_version != CRB_FFA_VERSION_MAJOR ||
+ (tpm_crb_ffa->minor_version > 0 &&
+ tpm_crb_ffa->minor_version < CRB_FFA_VERSION_MINOR)) {
+ pr_err("Incompatible ABI version");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return -EINVAL;
+}
+
+static void tpm_crb_ffa_remove(struct ffa_device *ffa_dev)
+{
+ kfree(tpm_crb_ffa);
+ tpm_crb_ffa = NULL;
+}
+
+static const struct ffa_device_id tpm_crb_ffa_device_id[] = {
+ /* 17b862a4-1806-4faf-86b3-089a58353861 */
+ { UUID_INIT(0x17b862a4, 0x1806, 0x4faf,
+ 0x86, 0xb3, 0x08, 0x9a, 0x58, 0x35, 0x38, 0x61) },
+ {}
+};
+
+static struct ffa_driver tpm_crb_ffa_driver = {
+ .name = "ffa-crb",
+ .probe = tpm_crb_ffa_probe,
+ .remove = tpm_crb_ffa_remove,
+ .id_table = tpm_crb_ffa_device_id,
+};
+
+module_ffa_driver(tpm_crb_ffa_driver);
+
+MODULE_AUTHOR("Arm");
+MODULE_DESCRIPTION("TPM CRB FFA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
new file mode 100644
index 000000000000..645c41ede10e
--- /dev/null
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ *
+ * This device driver implements the TPM CRB start method
+ * as defined in the TPM Service Command Response Buffer
+ * Interface Over FF-A (DEN0138).
+ */
+#ifndef _TPM_CRB_FFA_H
+#define _TPM_CRB_FFA_H
+
+#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
+int tpm_crb_ffa_init(void);
+int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor);
+int tpm_crb_ffa_start(int request_type, int locality);
+#else
+static inline int tpm_crb_ffa_init(void) { return 0; }
+static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; }
+static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
+#endif
+
+#define CRB_FFA_START_TYPE_COMMAND 0
+#define CRB_FFA_START_TYPE_LOCALITY_REQUEST 1
+
+#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 139556b21cc6..53ba28ccd5d3 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -164,30 +164,10 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return 0;
}
-static void ftpm_tee_tpm_op_cancel(struct tpm_chip *chip)
-{
- /* not supported */
-}
-
-static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
-{
- return 0;
-}
-
-static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
-{
- return false;
-}
-
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
.recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
- .cancel = ftpm_tee_tpm_op_cancel,
- .status = ftpm_tee_tpm_op_status,
- .req_complete_mask = 0,
- .req_complete_val = 0,
- .req_canceled = ftpm_tee_tpm_req_canceled,
};
/*
@@ -362,7 +342,7 @@ MODULE_DEVICE_TABLE(of, of_ftpm_tee_ids);
static struct platform_driver ftpm_tee_plat_driver = {
.driver = {
.name = "ftpm-tee",
- .of_match_table = of_match_ptr(of_ftpm_tee_ids),
+ .of_match_table = of_ftpm_tee_ids,
},
.shutdown = ftpm_plat_tee_shutdown,
.probe = ftpm_plat_tee_probe,
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index f98daa7bf68c..e39903b7ea07 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -21,7 +21,6 @@
/**
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
- * @state: internal state
* @session: fTPM TA session identifier.
* @resp_len: cached response buffer length.
* @resp_buf: cached response buffer.
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdef214b9f6b..ed0d3d8449b3 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -114,11 +114,10 @@ again:
return 0;
/* process status changes without irq support */
do {
+ usleep_range(priv->timeout_min, priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
- usleep_range(priv->timeout_min,
- priv->timeout_max);
} while (time_before(jiffies, stop));
return -ETIME;
}
@@ -464,7 +463,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -481,7 +483,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
&priv->int_queue, false) < 0) {
- rc = -ETIME;
+ if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags))
+ rc = -EAGAIN;
+ else
+ rc = -ETIME;
goto out_err;
}
status = tpm_tis_status(chip);
@@ -546,9 +551,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc >= 0)
/* Data transfer done successfully */
break;
- else if (rc != -EIO)
+ else if (rc != -EAGAIN && rc != -EIO)
/* Data transfer failed, not recoverable */
return rc;
+
+ usleep_range(priv->timeout_min, priv->timeout_max);
}
/* go and do it */
@@ -1144,6 +1151,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
+ if (priv->manufacturer_id == TPM_VID_IFX)
+ set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags);
+
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 690ad8e9b731..970d02c337c7 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -89,6 +89,7 @@ enum tpm_tis_flags {
TPM_TIS_INVALID_STATUS = 1,
TPM_TIS_DEFAULT_CANCELLATION = 2,
TPM_TIS_IRQ_TESTED = 3,
+ TPM_TIS_STATUS_VALID_RETRY = 4,
};
struct tpm_tis_data {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 24442485e73e..18f92dd44d45 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c
index 0358dff91da5..e9bca179998b 100644
--- a/drivers/clk/qcom/dispcc-sm8750.c
+++ b/drivers/clk/qcom/dispcc-sm8750.c
@@ -827,7 +827,6 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
&disp_cc_mdss_byte0_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
@@ -842,7 +841,6 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
&disp_cc_mdss_byte1_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ops,
},
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 86b39edba122..08b867ae3ed9 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = {
EARLY_WAKEUP_DPU_DEST,
EARLY_WAKEUP_CSIS_DEST,
EARLY_WAKEUP_SW_TRIG_APM,
- EARLY_WAKEUP_SW_TRIG_APM_SET,
- EARLY_WAKEUP_SW_TRIG_APM_CLEAR,
EARLY_WAKEUP_SW_TRIG_CLUSTER0,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET,
- EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR,
EARLY_WAKEUP_SW_TRIG_DPU,
- EARLY_WAKEUP_SW_TRIG_DPU_SET,
- EARLY_WAKEUP_SW_TRIG_DPU_CLEAR,
EARLY_WAKEUP_SW_TRIG_CSIS,
- EARLY_WAKEUP_SW_TRIG_CSIS_SET,
- EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR,
CLK_CON_MUX_MUX_CLKCMU_BO_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS,
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 2e94bba6c396..023a25af73c4 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
*/
/* Maximum lock time can be 270 * PDIV cycles */
#define PLL35XX_LOCK_FACTOR (270)
+#define PLL142XX_LOCK_FACTOR (150)
#define PLL35XX_MDIV_MASK (0x3FF)
#define PLL35XX_PDIV_MASK (0x3F)
@@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
+ if (pll->type == pll_142xx)
+ writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
+ pll->lock_reg);
+ else
+ writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR,
pll->lock_reg);
/* Change PLL PMS values */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 808f259781fd..981a578043a5 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -842,7 +842,7 @@ static u64 __arch_timer_check_delta(void)
{},
};
- if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ if (is_midr_in_range_list(broken_cval_midrs)) {
pr_warn_once("Broken CNTx_CVAL_EL1, using 31 bit TVAL instead.\n");
return CLOCKSOURCE_MASK(31);
}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index e6a02e351d77..da09f467a6bb 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -238,7 +238,7 @@ static cycles_t exynos4_read_current_timer(void)
static int __init exynos4_clocksource_init(bool frc_shared)
{
/*
- * When the frc is shared, the main processer should have already
+ * When the frc is shared, the main processor should have already
* turned it on and we shouldn't be writing to TCON.
*/
if (frc_shared)
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index f00019b078a7..09549451dd51 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -582,7 +582,7 @@ static void __init hv_init_tsc_clocksource(void)
* mapped.
*/
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
- if (hv_root_partition)
+ if (hv_root_partition())
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
@@ -627,7 +627,7 @@ void __init hv_remap_tsc_clocksource(void)
if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
return;
- if (!hv_root_partition) {
+ if (!hv_root_partition()) {
WARN(1, "%s: attempt to remap TSC page in guest partition\n",
__func__);
return;
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index a4c95161cb22..928da2f6de69 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -24,7 +24,9 @@ struct stm32_lp_private {
struct regmap *reg;
struct clock_event_device clkevt;
unsigned long period;
+ u32 psc;
struct device *dev;
+ struct clk *clk;
};
static struct stm32_lp_private*
@@ -120,6 +122,27 @@ static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv,
/* Adjust rate and period given the prescaler value */
*rate = DIV_ROUND_CLOSEST(*rate, (1 << i));
priv->period = DIV_ROUND_UP(*rate, HZ);
+ priv->psc = i;
+}
+
+static void stm32_clkevent_lp_suspend(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ stm32_clkevent_lp_shutdown(clkevt);
+
+ /* balance clk_prepare_enable() from the probe */
+ clk_disable_unprepare(priv->clk);
+}
+
+static void stm32_clkevent_lp_resume(struct clock_event_device *clkevt)
+{
+ struct stm32_lp_private *priv = to_priv(clkevt);
+
+ clk_prepare_enable(priv->clk);
+
+ /* restore prescaler */
+ regmap_write(priv->reg, STM32_LPTIM_CFGR, priv->psc << CFGR_PSC_OFFSET);
}
static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
@@ -134,6 +157,8 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot;
priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event;
priv->clkevt.rating = STM32_LP_RATING;
+ priv->clkevt.suspend = stm32_clkevent_lp_suspend;
+ priv->clkevt.resume = stm32_clkevent_lp_resume;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
@@ -151,11 +176,12 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return -ENOMEM;
priv->reg = ddata->regmap;
- ret = clk_prepare_enable(ddata->clk);
+ priv->clk = ddata->clk;
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return -EINVAL;
- rate = clk_get_rate(ddata->clk);
+ rate = clk_get_rate(priv->clk);
if (!rate) {
ret = -EINVAL;
goto out_clk_disable;
@@ -168,9 +194,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
}
if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) {
- ret = device_init_wakeup(&pdev->dev, true);
- if (ret)
- goto out_clk_disable;
+ device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, irq);
if (ret)
@@ -191,7 +215,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev)
return 0;
out_clk_disable:
- clk_disable_unprepare(ddata->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 9e46960f6a86..4f9cb943d945 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -254,7 +254,7 @@ config ARM_TEGRA186_CPUFREQ
config ARM_TEGRA194_CPUFREQ
tristate "Tegra194 CPUFreq support"
- depends on ARCH_TEGRA_194_SOC || (64BIT && COMPILE_TEST)
+ depends on ARCH_TEGRA_194_SOC || ARCH_TEGRA_234_SOC || (64BIT && COMPILE_TEST)
depends on TEGRA_BPMP
default y
help
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index eb678fa5260a..551e65d35a1d 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CPU_FREQ_CBE
- tristate "CBE frequency scaling"
- depends on CBE_RAS && PPC_CELL
- default m
- help
- This adds the cpufreq driver for Cell BE processors.
- For details, take a look at <file:Documentation/cpu-freq/>.
- If you don't have such processor, say N
-
-config CPU_FREQ_CBE_PMI
- bool "CBE frequency scaling using PMI interface"
- depends on CPU_FREQ_CBE
- default n
- help
- Select this, if you want to use the PMI interface to switch
- frequencies. Using PMI, the processor will not only be able to run at
- lower speed, but also at lower core voltage.
-
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 97c2d4f15d76..2c5c228408bf 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -340,3 +340,15 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
+config CPUFREQ_ARCH_CUR_FREQ
+ default y
+ bool "Current frequency derived from HW provided feedback"
+ help
+ This determines whether the scaling_cur_freq sysfs attribute returns
+ the last requested frequency or a more precise value based on hardware
+ provided feedback (as architected counters).
+ Given that a more precise frequency can now be provided via the
+ cpuinfo_avg_freq attribute, by enabling this option,
+ scaling_cur_freq maintains the provision of a counter based frequency,
+ for compatibility reasons.
+
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 890fff99f37d..22ab45209f9b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -91,9 +91,6 @@ obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
##################################################################################
# PowerPC platform drivers
-obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
-ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
-obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_QORIQ_CPUFREQ) += qoriq-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 463b69a2dff5..924314cdeebc 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -909,6 +909,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
+ if (acpi_cpufreq_driver.set_boost)
+ policy->boost_supported = true;
+
return result;
err_unreg:
@@ -949,7 +952,6 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct freq_attr *acpi_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus,
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 8d692415d905..32e1bdc588c5 100644
--- a/drivers/cpufreq/amd-pstate-trace.h
+++ b/drivers/cpufreq/amd-pstate-trace.h
@@ -24,9 +24,9 @@
TRACE_EVENT(amd_pstate_perf,
- TP_PROTO(unsigned long min_perf,
- unsigned long target_perf,
- unsigned long capacity,
+ TP_PROTO(u8 min_perf,
+ u8 target_perf,
+ u8 capacity,
u64 freq,
u64 mperf,
u64 aperf,
@@ -47,9 +47,9 @@ TRACE_EVENT(amd_pstate_perf,
),
TP_STRUCT__entry(
- __field(unsigned long, min_perf)
- __field(unsigned long, target_perf)
- __field(unsigned long, capacity)
+ __field(u8, min_perf)
+ __field(u8, target_perf)
+ __field(u8, capacity)
__field(unsigned long long, freq)
__field(unsigned long long, mperf)
__field(unsigned long long, aperf)
@@ -70,10 +70,10 @@ TRACE_EVENT(amd_pstate_perf,
__entry->fast_switch = fast_switch;
),
- TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
- (unsigned long)__entry->min_perf,
- (unsigned long)__entry->target_perf,
- (unsigned long)__entry->capacity,
+ TP_printk("amd_min_perf=%hhu amd_des_perf=%hhu amd_max_perf=%hhu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
+ (u8)__entry->min_perf,
+ (u8)__entry->target_perf,
+ (u8)__entry->capacity,
(unsigned long long)__entry->freq,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
@@ -86,11 +86,12 @@ TRACE_EVENT(amd_pstate_perf,
TRACE_EVENT(amd_pstate_epp_perf,
TP_PROTO(unsigned int cpu_id,
- unsigned int highest_perf,
- unsigned int epp,
- unsigned int min_perf,
- unsigned int max_perf,
- bool boost
+ u8 highest_perf,
+ u8 epp,
+ u8 min_perf,
+ u8 max_perf,
+ bool boost,
+ bool changed
),
TP_ARGS(cpu_id,
@@ -98,15 +99,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
epp,
min_perf,
max_perf,
- boost),
+ boost,
+ changed),
TP_STRUCT__entry(
__field(unsigned int, cpu_id)
- __field(unsigned int, highest_perf)
- __field(unsigned int, epp)
- __field(unsigned int, min_perf)
- __field(unsigned int, max_perf)
+ __field(u8, highest_perf)
+ __field(u8, epp)
+ __field(u8, min_perf)
+ __field(u8, max_perf)
__field(bool, boost)
+ __field(bool, changed)
),
TP_fast_assign(
@@ -116,15 +119,17 @@ TRACE_EVENT(amd_pstate_epp_perf,
__entry->min_perf = min_perf;
__entry->max_perf = max_perf;
__entry->boost = boost;
+ __entry->changed = changed;
),
- TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
+ TP_printk("cpu%u: [%hhu<->%hhu]/%hhu, epp=%hhu, boost=%u, changed=%u",
(unsigned int)__entry->cpu_id,
- (unsigned int)__entry->min_perf,
- (unsigned int)__entry->max_perf,
- (unsigned int)__entry->highest_perf,
- (unsigned int)__entry->epp,
- (bool)__entry->boost
+ (u8)__entry->min_perf,
+ (u8)__entry->max_perf,
+ (u8)__entry->highest_perf,
+ (u8)__entry->epp,
+ (bool)__entry->boost,
+ (bool)__entry->changed
)
);
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 3a0a380c3590..e671bc7d1550 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -22,39 +22,31 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
+#include <linux/cleanup.h>
#include <acpi/cppc_acpi.h>
#include "amd-pstate.h"
-/*
- * Abbreviations:
- * amd_pstate_ut: used as a shortform for AMD P-State unit test.
- * It helps to keep variable names smaller, simpler
- */
-enum amd_pstate_ut_result {
- AMD_PSTATE_UT_RESULT_PASS,
- AMD_PSTATE_UT_RESULT_FAIL,
-};
struct amd_pstate_ut_struct {
const char *name;
- void (*func)(u32 index);
- enum amd_pstate_ut_result result;
+ int (*func)(u32 index);
};
/*
* Kernel module for testing the AMD P-State unit test
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index);
-static void amd_pstate_ut_check_enabled(u32 index);
-static void amd_pstate_ut_check_perf(u32 index);
-static void amd_pstate_ut_check_freq(u32 index);
-static void amd_pstate_ut_check_driver(u32 index);
+static int amd_pstate_ut_acpi_cpc_valid(u32 index);
+static int amd_pstate_ut_check_enabled(u32 index);
+static int amd_pstate_ut_check_perf(u32 index);
+static int amd_pstate_ut_check_freq(u32 index);
+static int amd_pstate_ut_check_driver(u32 index);
static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
{"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid },
@@ -77,71 +69,67 @@ static bool get_shared_mem(void)
/*
* check the _CPC object is present in SBIOS.
*/
-static void amd_pstate_ut_acpi_cpc_valid(u32 index)
+static int amd_pstate_ut_acpi_cpc_valid(u32 index)
{
- if (acpi_cpc_valid())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (!acpi_cpc_valid()) {
pr_err("%s the _CPC object is not present in SBIOS!\n", __func__);
+ return -EINVAL;
}
+
+ return 0;
}
-static void amd_pstate_ut_pstate_enable(u32 index)
+/*
+ * check if amd pstate is enabled
+ */
+static int amd_pstate_ut_check_enabled(u32 index)
{
- int ret = 0;
u64 cppc_enable = 0;
+ int ret;
+
+ if (get_shared_mem())
+ return 0;
ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret);
- return;
+ return ret;
}
- if (cppc_enable)
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+
+ if (!cppc_enable) {
pr_err("%s amd pstate must be enabled!\n", __func__);
+ return -EINVAL;
}
-}
-/*
- * check if amd pstate is enabled
- */
-static void amd_pstate_ut_check_enabled(u32 index)
-{
- if (get_shared_mem())
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else
- amd_pstate_ut_pstate_enable(index);
+ return 0;
}
/*
* check if performance values are reasonable.
* highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0
*/
-static void amd_pstate_ut_check_perf(u32 index)
+static int amd_pstate_ut_check_perf(u32 index)
{
int cpu = 0, ret = 0;
u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0;
u64 cap1 = 0;
struct cppc_perf_caps cppc_perf;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
+ union perf_cached cur_perf;
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
- for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
if (get_shared_mem()) {
ret = cppc_get_perf_caps(cpu, &cppc_perf);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
highest_perf = cppc_perf.highest_perf;
@@ -151,50 +139,44 @@ static void amd_pstate_ut_check_perf(u32 index)
} else {
ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1);
if (ret) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- goto skip_test;
+ return ret;
}
- highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
- nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
- lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
- lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
+ highest_perf = FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1);
+ nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
}
- if (highest_perf != READ_ONCE(cpudata->highest_perf) && !cpudata->hw_prefcore) {
+ cur_perf = READ_ONCE(cpudata->perf);
+ if (highest_perf != cur_perf.highest_perf && !cpudata->hw_prefcore) {
pr_err("%s cpu%d highest=%d %d highest perf doesn't match\n",
- __func__, cpu, highest_perf, cpudata->highest_perf);
- goto skip_test;
+ __func__, cpu, highest_perf, cur_perf.highest_perf);
+ return -EINVAL;
}
- if ((nominal_perf != READ_ONCE(cpudata->nominal_perf)) ||
- (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) ||
- (lowest_perf != READ_ONCE(cpudata->lowest_perf))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if (nominal_perf != cur_perf.nominal_perf ||
+ (lowest_nonlinear_perf != cur_perf.lowest_nonlinear_perf) ||
+ (lowest_perf != cur_perf.lowest_perf)) {
pr_err("%s cpu%d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n",
- __func__, cpu, nominal_perf, cpudata->nominal_perf,
- lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
- lowest_perf, cpudata->lowest_perf);
- goto skip_test;
+ __func__, cpu, nominal_perf, cur_perf.nominal_perf,
+ lowest_nonlinear_perf, cur_perf.lowest_nonlinear_perf,
+ lowest_perf, cur_perf.lowest_perf);
+ return -EINVAL;
}
if (!((highest_perf >= nominal_perf) &&
(nominal_perf > lowest_nonlinear_perf) &&
- (lowest_nonlinear_perf > lowest_perf) &&
+ (lowest_nonlinear_perf >= lowest_perf) &&
(lowest_perf > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
/*
@@ -202,59 +184,50 @@ skip_test:
* max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0
* check max freq when set support boost mode.
*/
-static void amd_pstate_ut_check_freq(u32 index)
+static int amd_pstate_ut_check_freq(u32 index)
{
int cpu = 0;
- struct cpufreq_policy *policy = NULL;
- struct amd_cpudata *cpudata = NULL;
- for_each_possible_cpu(cpu) {
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
+ struct amd_cpudata *cpudata;
+
policy = cpufreq_cpu_get(cpu);
if (!policy)
- break;
+ continue;
cpudata = policy->driver_data;
- if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
+ if (!((policy->cpuinfo.max_freq >= cpudata->nominal_freq) &&
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
- (cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
- (cpudata->min_freq > 0))) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ (cpudata->lowest_nonlinear_freq >= policy->cpuinfo.min_freq) &&
+ (policy->cpuinfo.min_freq > 0))) {
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
- __func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
- cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- goto skip_test;
+ __func__, cpu, policy->cpuinfo.max_freq, cpudata->nominal_freq,
+ cpudata->lowest_nonlinear_freq, policy->cpuinfo.min_freq);
+ return -EINVAL;
}
if (cpudata->lowest_nonlinear_freq != policy->min) {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d cpudata_lowest_nonlinear_freq=%d policy_min=%d, they should be equal!\n",
__func__, cpu, cpudata->lowest_nonlinear_freq, policy->min);
- goto skip_test;
+ return -EINVAL;
}
if (cpudata->boost_supported) {
- if ((policy->max == cpudata->max_freq) ||
- (policy->max == cpudata->nominal_freq))
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
+ if ((policy->max != policy->cpuinfo.max_freq) &&
+ (policy->max != cpudata->nominal_freq)) {
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
- __func__, cpu, policy->max, cpudata->max_freq,
+ __func__, cpu, policy->max, policy->cpuinfo.max_freq,
cpudata->nominal_freq);
- goto skip_test;
+ return -EINVAL;
}
} else {
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- goto skip_test;
+ return -EINVAL;
}
- cpufreq_cpu_put(policy);
}
- amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
- return;
-skip_test:
- cpufreq_cpu_put(policy);
+ return 0;
}
static int amd_pstate_set_mode(enum amd_pstate_mode mode)
@@ -266,32 +239,28 @@ static int amd_pstate_set_mode(enum amd_pstate_mode mode)
return amd_pstate_update_status(mode_str, strlen(mode_str));
}
-static void amd_pstate_ut_check_driver(u32 index)
+static int amd_pstate_ut_check_driver(u32 index)
{
enum amd_pstate_mode mode1, mode2 = AMD_PSTATE_DISABLE;
- int ret;
for (mode1 = AMD_PSTATE_DISABLE; mode1 < AMD_PSTATE_MAX; mode1++) {
- ret = amd_pstate_set_mode(mode1);
+ int ret = amd_pstate_set_mode(mode1);
if (ret)
- goto out;
+ return ret;
for (mode2 = AMD_PSTATE_DISABLE; mode2 < AMD_PSTATE_MAX; mode2++) {
if (mode1 == mode2)
continue;
ret = amd_pstate_set_mode(mode2);
- if (ret)
- goto out;
+ if (ret) {
+ pr_err("%s: failed to update status for %s->%s\n", __func__,
+ amd_pstate_get_mode_string(mode1),
+ amd_pstate_get_mode_string(mode2));
+ return ret;
+ }
}
}
-out:
- if (ret)
- pr_warn("%s: failed to update status for %s->%s: %d\n", __func__,
- amd_pstate_get_mode_string(mode1),
- amd_pstate_get_mode_string(mode2), ret);
-
- amd_pstate_ut_cases[index].result = ret ?
- AMD_PSTATE_UT_RESULT_FAIL :
- AMD_PSTATE_UT_RESULT_PASS;
+
+ return 0;
}
static int __init amd_pstate_ut_init(void)
@@ -299,16 +268,12 @@ static int __init amd_pstate_ut_init(void)
u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases);
for (i = 0; i < arr_size; i++) {
- amd_pstate_ut_cases[i].func(i);
- switch (amd_pstate_ut_cases[i].result) {
- case AMD_PSTATE_UT_RESULT_PASS:
+ int ret = amd_pstate_ut_cases[i].func(i);
+
+ if (ret)
+ pr_err("%-4d %-20s\t fail: %d!\n", i+1, amd_pstate_ut_cases[i].name, ret);
+ else
pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- case AMD_PSTATE_UT_RESULT_FAIL:
- default:
- pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name);
- break;
- }
}
return 0;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 313550fa62d4..6789eed1bb5b 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -85,15 +85,9 @@ static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
-static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
static struct quirk_entry *quirks;
-#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
-#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
-#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
-#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
-
/*
* AMD Energy Preference Performance (EPP)
* The EPP is used in the CCLK DPM controller to drive
@@ -142,6 +136,19 @@ static struct quirk_entry quirk_amd_7k62 = {
.lowest_freq = 550,
};
+static inline u8 freq_to_perf(union perf_cached perf, u32 nominal_freq, unsigned int freq_val)
+{
+ u32 perf_val = DIV_ROUND_UP_ULL((u64)freq_val * perf.nominal_perf, nominal_freq);
+
+ return (u8)clamp(perf_val, perf.lowest_perf, perf.highest_perf);
+}
+
+static inline u32 perf_to_freq(union perf_cached perf, u32 nominal_freq, u8 perf_val)
+{
+ return DIV_ROUND_UP_ULL((u64)nominal_freq * perf_val,
+ perf.nominal_perf);
+}
+
static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
{
/**
@@ -183,10 +190,9 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
return -EINVAL;
}
-static DEFINE_MUTEX(amd_pstate_limits_lock);
static DEFINE_MUTEX(amd_pstate_driver_lock);
-static s16 msr_get_epp(struct amd_cpudata *cpudata)
+static u8 msr_get_epp(struct amd_cpudata *cpudata)
{
u64 value;
int ret;
@@ -207,7 +213,7 @@ static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
return static_call(amd_pstate_get_epp)(cpudata);
}
-static s16 shmem_get_epp(struct amd_cpudata *cpudata)
+static u8 shmem_get_epp(struct amd_cpudata *cpudata)
{
u64 epp;
int ret;
@@ -218,12 +224,13 @@ static s16 shmem_get_epp(struct amd_cpudata *cpudata)
return ret;
}
- return (s16)(epp & 0xff);
+ return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, epp);
}
-static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int msr_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
value = prev = READ_ONCE(cpudata->cppc_req_cached);
@@ -235,6 +242,18 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -249,24 +268,24 @@ static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
}
WRITE_ONCE(cpudata->cppc_req_cached, value);
- WRITE_ONCE(cpudata->epp_cached, epp);
return 0;
}
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
-static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
- u32 min_perf, u32 des_perf,
- u32 max_perf, u32 epp,
+static inline int amd_pstate_update_perf(struct cpufreq_policy *policy,
+ u8 min_perf, u8 des_perf,
+ u8 max_perf, u8 epp,
bool fast_switch)
{
- return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
+ return static_call(amd_pstate_update_perf)(policy, min_perf, des_perf,
max_perf, epp, fast_switch);
}
-static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int msr_set_epp(struct cpufreq_policy *policy, u8 epp)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
u64 value, prev;
int ret;
@@ -274,6 +293,19 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
value &= ~AMD_CPPC_EPP_PERF_MASK;
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ value != prev);
+ }
+
if (value == prev)
return 0;
@@ -284,7 +316,6 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
}
/* update both so that msr_update_perf() can effectively check */
- WRITE_ONCE(cpudata->epp_cached, epp);
WRITE_ONCE(cpudata->cppc_req_cached, value);
return ret;
@@ -292,17 +323,35 @@ static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
-static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static inline int amd_pstate_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- return static_call(amd_pstate_set_epp)(cpudata, epp);
+ return static_call(amd_pstate_set_epp)(policy, epp);
}
-static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
+static int shmem_set_epp(struct cpufreq_policy *policy, u8 epp)
{
- int ret;
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u8 epp_cached;
+ u64 value;
+ int ret;
+
+
+ epp_cached = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = cpudata->perf;
+
+ trace_amd_pstate_epp_perf(cpudata->cpu, perf.highest_perf,
+ epp,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK,
+ cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK,
+ cpudata->cppc_req_cached),
+ policy->boost_enabled,
+ epp != epp_cached);
+ }
- if (epp == cpudata->epp_cached)
+ if (epp == epp_cached)
return 0;
perf_ctrls.energy_perf = epp;
@@ -311,106 +360,35 @@ static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
pr_debug("failed to set energy perf value (%d)\n", ret);
return ret;
}
- WRITE_ONCE(cpudata->epp_cached, epp);
- return ret;
-}
-
-static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
- int pref_index)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- int epp;
-
- if (!pref_index)
- epp = cpudata->epp_default;
- else
- epp = epp_values[pref_index];
-
- if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
- pr_debug("EPP cannot be set under performance policy\n");
- return -EBUSY;
- }
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- epp,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
- policy->boost_enabled);
- }
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ value &= ~AMD_CPPC_EPP_PERF_MASK;
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
- return amd_pstate_set_epp(cpudata, epp);
+ return ret;
}
-static inline int msr_cppc_enable(bool enable)
+static inline int msr_cppc_enable(struct cpufreq_policy *policy)
{
- int ret, cpu;
- unsigned long logical_proc_id_mask = 0;
-
- /*
- * MSR_AMD_CPPC_ENABLE is write-once, once set it cannot be cleared.
- */
- if (!enable)
- return 0;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- unsigned long logical_id = topology_logical_package_id(cpu);
-
- if (test_bit(logical_id, &logical_proc_id_mask))
- continue;
-
- set_bit(logical_id, &logical_proc_id_mask);
-
- ret = wrmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_ENABLE,
- enable);
- if (ret)
- return ret;
- }
-
- cppc_enabled = enable;
- return 0;
+ return wrmsrl_safe_on_cpu(policy->cpu, MSR_AMD_CPPC_ENABLE, 1);
}
-static int shmem_cppc_enable(bool enable)
+static int shmem_cppc_enable(struct cpufreq_policy *policy)
{
- int cpu, ret = 0;
- struct cppc_perf_ctrls perf_ctrls;
-
- if (enable == cppc_enabled)
- return 0;
-
- for_each_present_cpu(cpu) {
- ret = cppc_set_enable(cpu, enable);
- if (ret)
- return ret;
-
- /* Enable autonomous mode for EPP */
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- /* Set desired perf as zero to allow EPP firmware control */
- perf_ctrls.desired_perf = 0;
- ret = cppc_set_perf(cpu, &perf_ctrls);
- if (ret)
- return ret;
- }
- }
-
- cppc_enabled = enable;
- return ret;
+ return cppc_set_enable(policy->cpu, 1);
}
DEFINE_STATIC_CALL(amd_pstate_cppc_enable, msr_cppc_enable);
-static inline int amd_pstate_cppc_enable(bool enable)
+static inline int amd_pstate_cppc_enable(struct cpufreq_policy *policy)
{
- return static_call(amd_pstate_cppc_enable)(enable);
+ return static_call(amd_pstate_cppc_enable)(policy);
}
static int msr_init_perf(struct amd_cpudata *cpudata)
{
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 cap1, numerator;
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
@@ -422,19 +400,22 @@ static int msr_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
- WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
- WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
- WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ perf.nominal_perf = FIELD_GET(AMD_CPPC_NOMINAL_PERF_MASK, cap1);
+ perf.lowest_nonlinear_perf = FIELD_GET(AMD_CPPC_LOWNONLIN_PERF_MASK, cap1);
+ perf.lowest_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
+ WRITE_ONCE(cpudata->perf, perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, FIELD_GET(AMD_CPPC_HIGHEST_PERF_MASK, cap1));
+
return 0;
}
static int shmem_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u64 numerator;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
@@ -445,14 +426,14 @@ static int shmem_init_perf(struct amd_cpudata *cpudata)
if (ret)
return ret;
- WRITE_ONCE(cpudata->highest_perf, numerator);
- WRITE_ONCE(cpudata->max_limit_perf, numerator);
- WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
- WRITE_ONCE(cpudata->lowest_nonlinear_perf,
- cppc_perf.lowest_nonlinear_perf);
- WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ perf.highest_perf = numerator;
+ perf.max_limit_perf = numerator;
+ perf.min_limit_perf = cppc_perf.lowest_perf;
+ perf.nominal_perf = cppc_perf.nominal_perf;
+ perf.lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
+ perf.lowest_perf = cppc_perf.lowest_perf;
+ WRITE_ONCE(cpudata->perf, perf);
WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
- WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -479,23 +460,56 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
return static_call(amd_pstate_init_perf)(cpudata);
}
-static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
+static int shmem_update_perf(struct cpufreq_policy *policy, u8 min_perf,
+ u8 des_perf, u8 max_perf, u8 epp, bool fast_switch)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
struct cppc_perf_ctrls perf_ctrls;
+ u64 value, prev;
+ int ret;
if (cppc_state == AMD_PSTATE_ACTIVE) {
- int ret = shmem_set_epp(cpudata, epp);
+ int ret = shmem_set_epp(policy, epp);
if (ret)
return ret;
}
+ value = prev = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
+ AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
+ value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
+ value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
+ value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
+ value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
+
+ if (trace_amd_pstate_epp_perf_enabled()) {
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ trace_amd_pstate_epp_perf(cpudata->cpu,
+ perf.highest_perf,
+ epp,
+ min_perf,
+ max_perf,
+ policy->boost_enabled,
+ value != prev);
+ }
+
+ if (value == prev)
+ return 0;
+
perf_ctrls.max_perf = max_perf;
perf_ctrls.min_perf = min_perf;
perf_ctrls.desired_perf = des_perf;
- return cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ ret = cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ return 0;
}
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
@@ -531,17 +545,18 @@ static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
return true;
}
-static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
- u32 des_perf, u32 max_perf, bool fast_switch, int gov_flags)
+static void amd_pstate_update(struct amd_cpudata *cpudata, u8 min_perf,
+ u8 des_perf, u8 max_perf, bool fast_switch, int gov_flags)
{
- unsigned long max_freq;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
- u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
+ union perf_cached perf = READ_ONCE(cpudata->perf);
+
+ if (!policy)
+ return;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+ des_perf = clamp_t(u8, des_perf, min_perf, max_perf);
- max_freq = READ_ONCE(cpudata->max_limit_freq);
- policy->cur = div_u64(des_perf * max_freq, max_perf);
+ policy->cur = perf_to_freq(perf, cpudata->nominal_freq, des_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
min_perf = des_perf;
@@ -550,7 +565,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
/* limit the max perf when core performance boost feature is disabled */
if (!cpudata->boost_supported)
- max_perf = min_t(unsigned long, nominal_perf, max_perf);
+ max_perf = min_t(u8, perf.nominal_perf, max_perf);
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
@@ -558,9 +573,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cpudata->cpu, fast_switch);
}
- amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
-
- cpufreq_cpu_put(policy);
+ amd_pstate_update_perf(policy, min_perf, des_perf, max_perf, 0, fast_switch);
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
@@ -572,7 +585,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
* amd-pstate qos_requests.
*/
if (policy_data->min == FREQ_QOS_MIN_DEFAULT_VALUE) {
- struct cpufreq_policy *policy = cpufreq_cpu_get(policy_data->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) =
+ cpufreq_cpu_get(policy_data->cpu);
struct amd_cpudata *cpudata;
if (!policy)
@@ -580,58 +594,48 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
cpudata = policy->driver_data;
policy_data->min = cpudata->lowest_nonlinear_freq;
- cpufreq_cpu_put(policy);
}
cpufreq_verify_within_cpu_limits(policy_data);
- pr_debug("policy_max =%d, policy_min=%d\n", policy_data->max, policy_data->min);
return 0;
}
-static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+static void amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
{
- u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
- max_freq = READ_ONCE(cpudata->max_freq);
- max_limit_perf = div_u64(policy->max * max_perf, max_freq);
- min_limit_perf = div_u64(policy->min * max_perf, max_freq);
+ perf.max_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->max);
+ perf.min_limit_perf = freq_to_perf(perf, cpudata->nominal_freq, policy->min);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
- min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
+ perf.min_limit_perf = min(perf.nominal_perf, perf.max_limit_perf);
- WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
- WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
-
- return 0;
+ WRITE_ONCE(cpudata->perf, perf);
}
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
struct cpufreq_freqs freqs;
- struct amd_cpudata *cpudata = policy->driver_data;
- unsigned long max_perf, min_perf, des_perf, cap_perf;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ u8 des_perf;
- if (!cpudata->max_freq)
- return -ENODEV;
+ cpudata = policy->driver_data;
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
- cap_perf = READ_ONCE(cpudata->highest_perf);
- min_perf = READ_ONCE(cpudata->lowest_perf);
- max_perf = cap_perf;
+ perf = READ_ONCE(cpudata->perf);
freqs.old = policy->cur;
freqs.new = target_freq;
- des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
- cpudata->max_freq);
+ des_perf = freq_to_perf(perf, cpudata->nominal_freq, target_freq);
WARN_ON(fast_switch && !policy->fast_switch_enabled);
/*
@@ -642,8 +646,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!fast_switch)
cpufreq_freq_transition_begin(policy, &freqs);
- amd_pstate_update(cpudata, min_perf, des_perf,
- max_perf, fast_switch, policy->governor->flags);
+ amd_pstate_update(cpudata, perf.min_limit_perf, des_perf,
+ perf.max_limit_perf, fast_switch,
+ policy->governor->flags);
if (!fast_switch)
cpufreq_freq_transition_end(policy, &freqs, false);
@@ -671,10 +676,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
unsigned long target_perf,
unsigned long capacity)
{
- unsigned long max_perf, min_perf, des_perf,
- cap_perf, lowest_nonlinear_perf;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ u8 max_perf, min_perf, des_perf, cap_perf;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
+ union perf_cached perf;
if (!policy)
return;
@@ -684,40 +689,38 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
-
- cap_perf = READ_ONCE(cpudata->highest_perf);
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ perf = READ_ONCE(cpudata->perf);
+ cap_perf = perf.highest_perf;
des_perf = cap_perf;
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
+ else
+ min_perf = cap_perf;
- if (min_perf < lowest_nonlinear_perf)
- min_perf = lowest_nonlinear_perf;
+ if (min_perf < perf.min_limit_perf)
+ min_perf = perf.min_limit_perf;
- max_perf = cpudata->max_limit_perf;
+ max_perf = perf.max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
- des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
-
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
policy->governor->flags);
- cpufreq_cpu_put(policy);
}
static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
{
struct amd_cpudata *cpudata = policy->driver_data;
+ union perf_cached perf = READ_ONCE(cpudata->perf);
u32 nominal_freq, max_freq;
int ret = 0;
nominal_freq = READ_ONCE(cpudata->nominal_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ max_freq = perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf);
if (on)
policy->cpuinfo.max_freq = max_freq;
@@ -744,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n");
return -EOPNOTSUPP;
}
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
refresh_frequency_limits(policy);
@@ -821,28 +823,21 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = NULL;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
- int ret;
bool highest_perf_changed = false;
if (!amd_pstate_prefcore)
return;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
- cpudata = policy->driver_data;
-
- guard(mutex)(&amd_pstate_driver_lock);
-
- ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret) {
- cpufreq_cpu_put(policy);
+ if (amd_get_highest_perf(cpu, &cur_high))
return;
- }
+
+ cpudata = policy->driver_data;
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -852,11 +847,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
- cpufreq_cpu_put(policy);
-
- if (!highest_perf_changed)
- cpufreq_update_policy(cpu);
-
}
/*
@@ -894,48 +884,45 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
}
/*
- * amd_pstate_init_freq: Initialize the max_freq, min_freq,
- * nominal_freq and lowest_nonlinear_freq for
- * the @cpudata object.
+ * amd_pstate_init_freq: Initialize the nominal_freq and lowest_nonlinear_freq
+ * for the @cpudata object.
*
- * Requires: highest_perf, lowest_perf, nominal_perf and
- * lowest_nonlinear_perf members of @cpudata to be
- * initialized.
+ * Requires: all perf members of @cpudata to be initialized.
*
- * Returns 0 on success, non-zero value on failure.
+ * Returns 0 on success, non-zero value on failure.
*/
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
- int ret;
- u32 min_freq, max_freq;
- u32 highest_perf, nominal_perf, nominal_freq;
- u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 min_freq, max_freq, nominal_freq, lowest_nonlinear_freq;
struct cppc_perf_caps cppc_perf;
+ union perf_cached perf;
+ int ret;
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
-
- if (quirks && quirks->lowest_freq)
- min_freq = quirks->lowest_freq;
- else
- min_freq = cppc_perf.lowest_freq;
+ perf = READ_ONCE(cpudata->perf);
if (quirks && quirks->nominal_freq)
nominal_freq = quirks->nominal_freq;
else
nominal_freq = cppc_perf.nominal_freq;
+ nominal_freq *= 1000;
+
+ if (quirks && quirks->lowest_freq) {
+ min_freq = quirks->lowest_freq;
+ perf.lowest_perf = freq_to_perf(perf, nominal_freq, min_freq);
+ WRITE_ONCE(cpudata->perf, perf);
+ } else
+ min_freq = cppc_perf.lowest_freq;
- highest_perf = READ_ONCE(cpudata->highest_perf);
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
+ min_freq *= 1000;
- lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
- lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
- WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
- WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
- WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
- WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+
+ max_freq = perf_to_freq(perf, nominal_freq, perf.highest_perf);
+ lowest_nonlinear_freq = perf_to_freq(perf, nominal_freq, perf.lowest_nonlinear_perf);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
/**
* Below values need to be initialized correctly, otherwise driver will fail to load
@@ -960,9 +947,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
- struct device *dev;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
+ struct device *dev;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -993,19 +981,23 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
-
policy->cpuinfo.transition_latency = amd_pstate_get_transition_latency(policy->cpu);
policy->transition_delay_us = amd_pstate_get_transition_delay_us(policy->cpu);
- policy->min = min_freq;
- policy->max = max_freq;
+ perf = READ_ONCE(cpudata->perf);
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
+
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
@@ -1027,9 +1019,6 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- cpudata->max_limit_freq = max_freq;
- cpudata->min_limit_freq = min_freq;
-
policy->driver_data = cpudata;
if (!current_pstate_driver->adjust_perf)
@@ -1040,6 +1029,7 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
free_cpudata2:
freq_qos_remove_request(&cpudata->req[0]);
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1054,28 +1044,6 @@ static void amd_pstate_cpu_exit(struct cpufreq_policy *policy)
kfree(cpudata);
}
-static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
-
- return ret;
-}
-
-static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
-{
- int ret;
-
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
-
- return ret;
-}
-
/* Sysfs attributes */
/*
@@ -1086,27 +1054,27 @@ static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
char *buf)
{
- int max_freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- max_freq = READ_ONCE(cpudata->max_freq);
- if (max_freq < 0)
- return max_freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.highest_perf));
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
char *buf)
{
- int freq;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
+ union perf_cached perf;
- freq = READ_ONCE(cpudata->lowest_nonlinear_freq);
- if (freq < 0)
- return freq;
+ cpudata = policy->driver_data;
+ perf = READ_ONCE(cpudata->perf);
- return sysfs_emit(buf, "%u\n", freq);
+ return sysfs_emit(buf, "%u\n",
+ perf_to_freq(perf, cpudata->nominal_freq, perf.lowest_nonlinear_perf));
}
/*
@@ -1116,18 +1084,17 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
- struct amd_cpudata *cpudata = policy->driver_data;
+ struct amd_cpudata *cpudata;
- perf = READ_ONCE(cpudata->highest_perf);
+ cpudata = policy->driver_data;
- return sysfs_emit(buf, "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", cpudata->perf.highest_perf);
}
static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
char *buf)
{
- u32 perf;
+ u8 perf;
struct amd_cpudata *cpudata = policy->driver_data;
perf = READ_ONCE(cpudata->prefcore_ranking);
@@ -1168,8 +1135,10 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
char str_preference[21];
ssize_t ret;
+ u8 epp;
ret = sscanf(buf, "%20s", str_preference);
if (ret != 1)
@@ -1179,9 +1148,17 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL;
- guard(mutex)(&amd_pstate_limits_lock);
+ if (!ret)
+ epp = cpudata->epp_default;
+ else
+ epp = epp_values[ret];
- ret = amd_pstate_set_energy_pref_index(policy, ret);
+ if (epp > 0 && policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(policy, epp);
return ret ? ret : count;
}
@@ -1190,9 +1167,11 @@ static ssize_t show_energy_performance_preference(
struct cpufreq_policy *policy, char *buf)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int preference;
+ u8 preference, epp;
+
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- switch (cpudata->epp_cached) {
+ switch (epp) {
case AMD_CPPC_EPP_PERFORMANCE:
preference = EPP_INDEX_PERFORMANCE;
break;
@@ -1214,7 +1193,6 @@ static ssize_t show_energy_performance_preference(
static void amd_pstate_driver_cleanup(void)
{
- amd_pstate_cppc_enable(false);
cppc_state = AMD_PSTATE_DISABLE;
current_pstate_driver = NULL;
}
@@ -1248,14 +1226,6 @@ static int amd_pstate_register_driver(int mode)
cppc_state = mode;
- ret = amd_pstate_cppc_enable(true);
- if (ret) {
- pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
- ret);
- amd_pstate_driver_cleanup();
- return ret;
- }
-
/* at least one CPU supports CPB */
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
@@ -1353,8 +1323,10 @@ int amd_pstate_update_status(const char *buf, size_t size)
if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
return -EINVAL;
- if (mode_state_machine[cppc_state][mode_idx])
+ if (mode_state_machine[cppc_state][mode_idx]) {
+ guard(mutex)(&amd_pstate_driver_lock);
return mode_state_machine[cppc_state][mode_idx](mode_idx);
+ }
return 0;
}
@@ -1375,7 +1347,6 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
return ret < 0 ? ret : count;
@@ -1451,10 +1422,11 @@ static bool amd_pstate_acpi_pm_profile_undefined(void)
static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
{
- int min_freq, max_freq, ret;
struct amd_cpudata *cpudata;
+ union perf_cached perf;
struct device *dev;
u64 value;
+ int ret;
/*
* Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
@@ -1485,20 +1457,25 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
- min_freq = READ_ONCE(cpudata->min_freq);
- max_freq = READ_ONCE(cpudata->max_freq);
+ perf = READ_ONCE(cpudata->perf);
+
+ policy->cpuinfo.min_freq = policy->min = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.lowest_perf);
+ policy->cpuinfo.max_freq = policy->max = perf_to_freq(perf,
+ cpudata->nominal_freq,
+ perf.highest_perf);
+ policy->driver_data = cpudata;
+
+ ret = amd_pstate_cppc_enable(policy);
+ if (ret)
+ goto free_cpudata1;
- policy->cpuinfo.min_freq = min_freq;
- policy->cpuinfo.max_freq = max_freq;
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- policy->driver_data = cpudata;
-
- policy->min = policy->cpuinfo.min_freq;
- policy->max = policy->cpuinfo.max_freq;
- policy->boost_enabled = READ_ONCE(cpudata->boost_supported);
+ policy->boost_supported = READ_ONCE(cpudata->boost_supported);
/*
* Set the policy to provide a valid fallback value in case
@@ -1518,13 +1495,8 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
return ret;
WRITE_ONCE(cpudata->cppc_req_cached, value);
-
- ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
- if (ret)
- return ret;
- WRITE_ONCE(cpudata->cppc_cap1_cached, value);
}
- ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
+ ret = amd_pstate_set_epp(policy, cpudata->epp_default);
if (ret)
return ret;
@@ -1533,6 +1505,7 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return 0;
free_cpudata1:
+ pr_warn("Failed to initialize CPU %d: %d\n", policy->cpu, ret);
kfree(cpudata);
return ret;
}
@@ -1552,24 +1525,21 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- u32 epp;
+ union perf_cached perf;
+ u8 epp;
- amd_pstate_update_min_max_limit(policy);
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
epp = 0;
else
- epp = READ_ONCE(cpudata->epp_cached);
+ epp = FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached);
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
- cpudata->min_limit_perf,
- cpudata->max_limit_perf,
- policy->boost_enabled);
- }
+ perf = READ_ONCE(cpudata->perf);
- return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
- cpudata->max_limit_perf, epp, false);
+ return amd_pstate_update_perf(policy, perf.min_limit_perf, 0U,
+ perf.max_limit_perf, epp, false);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1580,9 +1550,6 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
- policy->cpuinfo.max_freq, policy->max);
-
cpudata->policy = policy->policy;
ret = amd_pstate_epp_update_limit(policy);
@@ -1598,82 +1565,28 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
-static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- u64 max_perf;
- int ret;
-
- ret = amd_pstate_cppc_enable(true);
- if (ret)
- pr_err("failed to enable amd pstate during resume, return %d\n", ret);
-
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- cpudata->epp_cached,
- FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
- max_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
-}
-
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
+ pr_debug("AMD CPU Core %d going online\n", policy->cpu);
- pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
-
- ret = amd_pstate_epp_reenable(policy);
- if (ret)
- return ret;
- cpudata->suspended = false;
-
- return 0;
+ return amd_pstate_cppc_enable(policy);
}
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
- struct amd_cpudata *cpudata = policy->driver_data;
- int min_perf;
-
- if (cpudata->suspended)
- return 0;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
-
- guard(mutex)(&amd_pstate_limits_lock);
-
- if (trace_amd_pstate_epp_perf_enabled()) {
- trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE,
- min_perf, min_perf, policy->boost_enabled);
- }
-
- return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
- AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
+ return 0;
}
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
- int ret;
- /* avoid suspending when EPP is not enabled */
- if (cppc_state != AMD_PSTATE_ACTIVE)
- return 0;
+ /* invalidate to ensure it's rewritten during resume */
+ cpudata->cppc_req_cached = 0;
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
- /* disable CPPC in lowlevel firmware */
- ret = amd_pstate_cppc_enable(false);
- if (ret)
- pr_err("failed to suspend, return %d\n", ret);
-
return 0;
}
@@ -1682,10 +1595,12 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->suspended) {
- guard(mutex)(&amd_pstate_limits_lock);
+ int ret;
/* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(policy);
+ ret = amd_pstate_epp_update_limit(policy);
+ if (ret)
+ return ret;
cpudata->suspended = false;
}
@@ -1700,8 +1615,6 @@ static struct cpufreq_driver amd_pstate_driver = {
.fast_switch = amd_pstate_fast_switch,
.init = amd_pstate_cpu_init,
.exit = amd_pstate_cpu_exit,
- .suspend = amd_pstate_cpu_suspend,
- .resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
.update_limits = amd_pstate_update_limits,
.name = "amd-pstate",
@@ -1868,7 +1781,6 @@ static int __init amd_pstate_init(void)
global_attr_free:
cpufreq_unregister_driver(current_pstate_driver);
- amd_pstate_cppc_enable(false);
return ret;
}
device_initcall(amd_pstate_init);
diff --git a/drivers/cpufreq/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index 9747e3be6cee..fbe1c08d3f06 100644
--- a/drivers/cpufreq/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -13,6 +13,36 @@
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
+
+/**
+ * union perf_cached - A union to cache performance-related data.
+ * @highest_perf: the maximum performance an individual processor may reach,
+ * assuming ideal conditions
+ * For platforms that support the preferred core feature, the highest_perf value maybe
+ * configured to any value in the range 166-255 by the firmware (because the preferred
+ * core ranking is encoded in the highest_perf value). To maintain consistency across
+ * all platforms, we split the highest_perf and preferred core ranking values into
+ * cpudata->perf.highest_perf and cpudata->prefcore_ranking.
+ * @nominal_perf: the maximum sustained performance level of the processor,
+ * assuming ideal operating conditions
+ * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
+ * savings are achieved
+ * @lowest_perf: the absolute lowest performance level of the processor
+ * @min_limit_perf: Cached value of the performance corresponding to policy->min
+ * @max_limit_perf: Cached value of the performance corresponding to policy->max
+ */
+union perf_cached {
+ struct {
+ u8 highest_perf;
+ u8 nominal_perf;
+ u8 lowest_nonlinear_perf;
+ u8 lowest_perf;
+ u8 min_limit_perf;
+ u8 max_limit_perf;
+ };
+ u64 val;
+};
+
/**
* struct amd_aperf_mperf
* @aperf: actual performance frequency clock count
@@ -30,24 +60,11 @@ struct amd_aperf_mperf {
* @cpu: CPU number
* @req: constraint request to apply
* @cppc_req_cached: cached performance request hints
- * @highest_perf: the maximum performance an individual processor may reach,
- * assuming ideal conditions
- * For platforms that do not support the preferred core feature, the
- * highest_pef may be configured with 166 or 255, to avoid max frequency
- * calculated wrongly. we take the fixed value as the highest_perf.
- * @nominal_perf: the maximum sustained performance level of the processor,
- * assuming ideal operating conditions
- * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
- * savings are achieved
- * @lowest_perf: the absolute lowest performance level of the processor
+ * @perf: cached performance-related data
* @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
* priority.
- * @min_limit_perf: Cached value of the performance corresponding to policy->min
- * @max_limit_perf: Cached value of the performance corresponding to policy->max
* @min_limit_freq: Cached value of policy->min (in khz)
* @max_limit_freq: Cached value of policy->max (in khz)
- * @max_freq: the frequency (in khz) that mapped to highest_perf
- * @min_freq: the frequency (in khz) that mapped to lowest_perf
* @nominal_freq: the frequency (in khz) that mapped to nominal_perf
* @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf
* @cur: Difference of Aperf/Mperf/tsc count between last and current sample
@@ -59,7 +76,6 @@ struct amd_aperf_mperf {
* AMD P-State driver supports preferred core featue.
* @epp_cached: Cached CPPC energy-performance preference value
* @policy: Cpufreq policy value
- * @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
*
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
@@ -70,18 +86,11 @@ struct amd_cpudata {
struct freq_qos_request req[2];
u64 cppc_req_cached;
- u32 highest_perf;
- u32 nominal_perf;
- u32 lowest_nonlinear_perf;
- u32 lowest_perf;
- u32 prefcore_ranking;
- u32 min_limit_perf;
- u32 max_limit_perf;
- u32 min_limit_freq;
- u32 max_limit_freq;
+ union perf_cached perf;
- u32 max_freq;
- u32 min_freq;
+ u8 prefcore_ranking;
+ u32 min_limit_freq;
+ u32 max_limit_freq;
u32 nominal_freq;
u32 lowest_nonlinear_freq;
@@ -93,11 +102,9 @@ struct amd_cpudata {
bool hw_prefcore;
/* EPP feature related attributes*/
- s16 epp_cached;
u32 policy;
- u64 cppc_cap1_cached;
bool suspended;
- s16 epp_default;
+ u8 epp_default;
};
/*
diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
index 269b18c62d04..4994c86feb57 100644
--- a/drivers/cpufreq/apple-soc-cpufreq.c
+++ b/drivers/cpufreq/apple-soc-cpufreq.c
@@ -229,12 +229,6 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
return 0;
}
-static struct freq_attr *apple_soc_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Filled in below if boost is enabled */
- NULL,
-};
-
static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, i;
@@ -316,16 +310,6 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = true;
policy->suspend_freq = freq_table[0].frequency;
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- } else {
- apple_soc_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- apple_soc_cpufreq_driver.boost_enabled = true;
- }
- }
-
return 0;
out_free_cpufreq_table:
@@ -360,7 +344,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
.target_index = apple_soc_cpufreq_set_target,
.fast_switch = apple_soc_cpufreq_fast_switch,
.register_em = cpufreq_register_em_with_opp,
- .attr = apple_soc_cpufreq_hw_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index bea41ccabf1f..f28a4435fba7 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -102,11 +102,7 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
- /*
- * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
- * unstable because we do not know how to configure it properly.
- */
- /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 7a979db81f09..5a3545bd0d8d 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -47,7 +47,7 @@ static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk,
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/bmips-cpufreq.c b/drivers/cpufreq/bmips-cpufreq.c
index 17a4c174553d..36051880640b 100644
--- a/drivers/cpufreq/bmips-cpufreq.c
+++ b/drivers/cpufreq/bmips-cpufreq.c
@@ -150,7 +150,6 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
.get = bmips_cpufreq_get,
.init = bmips_cpufreq_init,
.exit = bmips_cpufreq_exit,
- .attr = cpufreq_generic_attr,
.name = BMIPS_CPUFREQ_PREFIX,
};
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 2fd0f6be6fa3..7b841a086acc 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -720,7 +720,6 @@ cpufreq_freq_attr_ro(brcm_avs_voltage);
cpufreq_freq_attr_ro(brcm_avs_frequency);
static struct freq_attr *brcm_avs_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&brcm_avs_pstate,
&brcm_avs_mode,
&brcm_avs_pmap,
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 8f512448382f..b3d74f9adcf0 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -34,8 +34,6 @@
*/
static LIST_HEAD(cpu_data_list);
-static bool boost_supported;
-
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@@ -653,7 +651,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
* is supported.
*/
if (caps->highest_perf > caps->nominal_perf)
- boost_supported = true;
+ policy->boost_supported = true;
/* Set policy->cur to max now. The governors will adjust later. */
policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
@@ -791,11 +789,6 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
- if (!boost_supported) {
- pr_err("BOOST not supported by CPU or firmware\n");
- return -EINVAL;
- }
-
if (state)
policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
else
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 3a7c3372bda7..e80dd982a3e2 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,12 +36,6 @@ struct private_data {
static LIST_HEAD(priv_list);
-static struct freq_attr *cpufreq_dt_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL, /* Extra space for boost-attr if required */
- NULL,
-};
-
static struct private_data *cpufreq_dt_find_data(int cpu)
{
struct private_data *priv;
@@ -120,21 +114,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
policy->dvfs_possible_from_any_cpu = true;
- /* Support turbo/boost mode */
- if (policy_has_boost_freq(policy)) {
- /* This gets disabled by core on driver unregister */
- ret = cpufreq_enable_boost_support();
- if (ret)
- goto out_clk_put;
- cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- }
-
return 0;
-
-out_clk_put:
- clk_put(cpu_clk);
-
- return ret;
}
static int cpufreq_online(struct cpufreq_policy *policy)
@@ -169,7 +149,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
- .attr = cpufreq_dt_attr,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
@@ -303,7 +283,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
int ret, cpu;
/* Request resources early so we can return in case of -EPROBE_DEFER */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
if (ret)
goto err;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 30ffbddc7ece..0cf5a320bb5e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -88,6 +88,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
static bool cpufreq_boost_supported(void);
+static int cpufreq_boost_trigger_state(int state);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -631,6 +632,9 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (!cpufreq_driver->boost_enabled)
return -EINVAL;
+ if (!policy->boost_supported)
+ return -EINVAL;
+
if (policy->boost_enabled == enable)
return count;
@@ -729,18 +733,26 @@ show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
-__weak unsigned int arch_freq_get_on_cpu(int cpu)
+__weak int arch_freq_get_on_cpu(int cpu)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline bool cpufreq_avg_freq_supported(struct cpufreq_policy *policy)
+{
+ return arch_freq_get_on_cpu(policy->cpu) != -EOPNOTSUPP;
}
static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
{
ssize_t ret;
- unsigned int freq;
+ int freq;
- freq = arch_freq_get_on_cpu(policy->cpu);
- if (freq)
+ freq = IS_ENABLED(CONFIG_CPUFREQ_ARCH_CUR_FREQ)
+ ? arch_freq_get_on_cpu(policy->cpu)
+ : 0;
+
+ if (freq > 0)
ret = sysfs_emit(buf, "%u\n", freq);
else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
@@ -785,6 +797,19 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
}
/*
+ * show_cpuinfo_avg_freq - average CPU frequency as detected by hardware
+ */
+static ssize_t show_cpuinfo_avg_freq(struct cpufreq_policy *policy,
+ char *buf)
+{
+ int avg_freq = arch_freq_get_on_cpu(policy->cpu);
+
+ if (avg_freq > 0)
+ return sysfs_emit(buf, "%u\n", avg_freq);
+ return avg_freq != 0 ? avg_freq : -EINVAL;
+}
+
+/*
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
@@ -946,6 +971,7 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
}
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
+cpufreq_freq_attr_ro(cpuinfo_avg_freq);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
@@ -1059,6 +1085,21 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
struct freq_attr **drv_attr;
int ret = 0;
+ /* Attributes that need freq_table */
+ if (policy->freq_table) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_available_freqs.attr);
+ if (ret)
+ return ret;
+
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj,
+ &cpufreq_freq_attr_scaling_boost_freqs.attr);
+ if (ret)
+ return ret;
+ }
+ }
+
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while (drv_attr && *drv_attr) {
@@ -1073,6 +1114,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_avg_freq_supported(policy)) {
+ ret = sysfs_create_file(&policy->kobj, &cpuinfo_avg_freq.attr);
+ if (ret)
+ return ret;
+ }
+
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret)
return ret;
@@ -1571,14 +1618,14 @@ static int cpufreq_online(unsigned int cpu)
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (cpufreq_driver->set_boost &&
+ if (cpufreq_driver->set_boost && policy->boost_supported &&
policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
/* If the set_boost fails, the online operation is not affected */
pr_info("%s: CPU%d: Cannot %s BOOST\n", __func__, policy->cpu,
- policy->boost_enabled ? "enable" : "disable");
+ str_enable_disable(policy->boost_enabled));
policy->boost_enabled = !policy->boost_enabled;
}
}
@@ -2772,7 +2819,7 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/*********************************************************************
* BOOST *
*********************************************************************/
-static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
int ret;
@@ -2791,8 +2838,9 @@ static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
return 0;
}
+EXPORT_SYMBOL_GPL(cpufreq_boost_set_sw);
-int cpufreq_boost_trigger_state(int state)
+static int cpufreq_boost_trigger_state(int state)
{
struct cpufreq_policy *policy;
unsigned long flags;
@@ -2807,6 +2855,9 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ if (!policy->boost_supported)
+ continue;
+
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
if (ret) {
@@ -2854,21 +2905,6 @@ static void remove_boost_sysfs_file(void)
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
-int cpufreq_enable_boost_support(void)
-{
- if (!cpufreq_driver)
- return -EINVAL;
-
- if (cpufreq_boost_supported())
- return 0;
-
- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
- /* This will get removed on driver unregister */
- return create_boost_sysfs_file();
-}
-EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
-
bool cpufreq_boost_enabled(void)
{
return cpufreq_driver->boost_enabled;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index af44ee6a6430..1a7fcaf39cc9 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
time_elapsed = update_time - j_cdbs->prev_update_time;
j_cdbs->prev_update_time = update_time;
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ /*
+ * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if
+ * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is
+ * off, where idle_time is calculated by the difference between
+ * time elapsed in jiffies and "busy time" obtained from CPU
+ * statistics. If a CPU is 100% busy, the time elapsed and busy
+ * time should grow with the same amount in two consecutive
+ * samples, but in practice there could be a tiny difference,
+ * making the accumulated idle time decrease sometimes. Hence,
+ * in this case, idle_time should be regarded as 0 in order to
+ * make the further process correct.
+ */
+ if (cur_idle_time > j_cdbs->prev_cpu_idle)
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ else
+ idle_time = 0;
+
j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
@@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
* calls, so the previous load value can be used then.
*/
load = j_cdbs->prev_load;
- } else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ } else if (unlikely(idle_time > 2 * sampling_rate &&
j_cdbs->prev_load)) {
/*
* If the CPU had gone completely idle and a task has
@@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- if (time_elapsed >= idle_time) {
+ if (time_elapsed > idle_time)
load = 100 * (time_elapsed - idle_time) / time_elapsed;
- } else {
- /*
- * That can happen if idle_time is returned by
- * get_cpu_idle_time_jiffy(). In that case
- * idle_time is roughly equal to the difference
- * between time_elapsed and "busy time" obtained
- * from CPU statistics. Then, the "busy time"
- * can end up being greater than time_elapsed
- * (for example, if jiffies_64 and the CPU
- * statistics are updated by different CPUs),
- * so idle_time may in fact be negative. That
- * means, though, that the CPU was busy all
- * the time (on the rough average) during the
- * last sampling interval and 100 can be
- * returned as the load.
- */
- load = (int)idle_time < 0 ? 100 : 0;
- }
+ else
+ load = 0;
+
j_cdbs->prev_load = load;
}
- if (unlikely((int)idle_time > 2 * sampling_rate)) {
+ if (unlikely(idle_time > 2 * sampling_rate)) {
unsigned int periods = idle_time / sampling_rate;
if (periods < idle_periods)
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 8736be3a06ce..2c277eb3795a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -101,7 +101,6 @@ static struct cpufreq_driver davinci_driver = {
.get = cpufreq_generic_get,
.init = davinci_cpu_init,
.name = "davinci",
- .attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 6e958b09e1b5..d23a97ba6478 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -376,7 +376,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 4ce5eb35dc46..36494b855e41 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -194,7 +194,6 @@ static struct cpufreq_driver elanfreq_driver = {
.target_index = elanfreq_target,
.init = elanfreq_cpu_init,
.name = "elanfreq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 10e80d912b8d..c03a91502f84 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -14,7 +14,7 @@
* FREQUENCY TABLE HELPERS *
*********************************************************************/
-bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
@@ -27,7 +27,6 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-EXPORT_SYMBOL_GPL(policy_has_boost_freq);
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
@@ -276,7 +275,6 @@ static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
/*
* scaling_boost_frequencies_show - show available boost frequencies for
@@ -288,13 +286,6 @@ static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
-EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
-
-struct freq_attr *cpufreq_generic_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
static int set_freq_table_sorted(struct cpufreq_policy *policy)
{
@@ -367,6 +358,10 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
if (ret)
return ret;
+ /* Driver's may have set this field already */
+ if (policy_has_boost_freq(policy))
+ policy->boost_supported = true;
+
return set_freq_table_sorted(policy);
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index f3c99f378ad6..db1c88e9d3f9 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -207,7 +207,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
- .attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9c4cc01fd51a..4aad79d26c64 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -936,6 +936,8 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
NULL,
};
+static bool no_cas __ro_after_init;
+
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
/*
* Protects hybrid_max_perf_cpu, the capacity_perf fields in struct cpudata,
@@ -1041,6 +1043,10 @@ static void hybrid_refresh_cpu_capacity_scaling(void)
static void hybrid_init_cpu_capacity_scaling(bool refresh)
{
+ /* Bail out if enabling capacity-aware scheduling is prohibited. */
+ if (no_cas)
+ return;
+
/*
* If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity
* scaling has been enabled already and the driver is just changing the
@@ -2200,28 +2206,20 @@ static int knl_get_turbo_pstate(int cpu)
return ret;
}
-static void hybrid_get_type(void *data)
-{
- u8 *cpu_type = data;
-
- *cpu_type = get_this_hybrid_cpu_type();
-}
-
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- u8 cpu_type = 0;
-
- smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u8 cpu_type = c->topo.intel_type;
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == 0x40)
+ if (cpu_type == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == 0x20)
+ if (cpu_type == INTEL_CPU_TYPE_ATOM)
return core_get_scaling();
}
@@ -3688,6 +3686,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists()) {
+ pr_info("P-states controlled by the platform\n");
+ return -ENODEV;
+ }
+
id = x86_match_cpu(hwp_support_ids);
if (id) {
hwp_forced = intel_pstate_hwp_is_enabled();
@@ -3743,15 +3750,6 @@ static int __init intel_pstate_init(void)
default_driver = &intel_cpufreq;
hwp_cpu_matched:
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists()) {
- pr_info("P-states controlled by the platform\n");
- return -ENODEV;
- }
-
if (!hwp_active && hwp_only)
return -ENOTSUPP;
@@ -3835,6 +3833,9 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "no_hwp"))
no_hwp = 1;
+ if (!strcmp(str, "no_cas"))
+ no_cas = true;
+
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 312f2654d1d5..24b285cbeb8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -96,7 +96,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.target_index = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.name = "kirkwood-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index bd6fe8638d39..68ccd73c8129 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -906,7 +906,6 @@ static struct cpufreq_driver longhaul_driver = {
.get = longhaul_get,
.init = longhaul_cpu_init,
.name = "longhaul",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index ed1a6dbad638..39a6c4315a60 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -91,7 +91,6 @@ static struct cpufreq_driver loongson2_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = loongson2_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct platform_device_id platform_device_ids[] = {
diff --git a/drivers/cpufreq/loongson3_cpufreq.c b/drivers/cpufreq/loongson3_cpufreq.c
index bd34bf0fafa5..1e8715ea1b77 100644
--- a/drivers/cpufreq/loongson3_cpufreq.c
+++ b/drivers/cpufreq/loongson3_cpufreq.c
@@ -299,15 +299,6 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(freq_data, i) = per_cpu(freq_data, cpu);
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret < 0) {
- pr_warn("cpufreq: Failed to enable boost: %d\n", ret);
- return ret;
- }
- loongson3_cpufreq_driver.boost_enabled = true;
- }
-
return 0;
}
@@ -337,8 +328,8 @@ static struct cpufreq_driver loongson3_cpufreq_driver = {
.offline = loongson3_cpufreq_cpu_offline,
.get = loongson3_cpufreq_get,
.target_index = loongson3_cpufreq_target,
- .attr = cpufreq_generic_attr,
.verify = cpufreq_generic_frequency_table_verify,
+ .set_boost = cpufreq_boost_set_sw,
.suspend = cpufreq_generic_suspend,
};
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 9252ebd60373..74f1b4c796e4 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -293,7 +293,6 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
.register_em = mtk_cpufreq_register_em,
.fast_switch = mtk_cpufreq_hw_fast_switch,
.name = "mtk-cpufreq-hw",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
@@ -304,7 +303,7 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
struct regulator *cpu_reg;
/* Make sure that all CPU supplies are available before proceeding. */
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index 663f61565cf7..f3f02c4b6888 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -618,7 +618,6 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
- .attr = cpufreq_generic_attr,
};
static int mtk_cpufreq_probe(struct platform_device *pdev)
@@ -632,7 +631,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"failed to get mtk cpufreq platform data\n");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
info = mtk_cpu_dvfs_info_lookup(cpu);
if (info)
continue;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index 7f3cfe668f30..2aad4c04673c 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -56,7 +56,7 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 106220c0fd11..bbb01d93b54b 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -147,7 +147,6 @@ static struct cpufreq_driver omap_driver = {
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
- .attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index ef0a3216a386..69c19233fcd4 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -227,7 +227,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.init = cpufreq_p4_cpu_init,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 5fc9cb480516..a3931349360f 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -245,7 +245,6 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.exit = pas_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = pas_cpufreq_target,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 6c9f0888a2a7..a22c22bd693a 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -439,7 +439,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .attr = cpufreq_generic_attr,
.name = "powermac",
};
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 74ff6c47df29..80897ec8f00e 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -332,7 +332,6 @@ static struct cpufreq_driver g5_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
- .attr = cpufreq_generic_attr,
};
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index f0a4a6c31204..99d2244e03b0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -253,7 +253,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 4271446c8725..fb2197dc170f 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -667,7 +667,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index a01170f7d01c..4e3ba6e68c32 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1143,7 +1143,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ae79d909943b..6094c530bf57 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -386,12 +386,8 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
static struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
__ATTR_RO(cpuinfo_nominal_freq);
-#define SCALING_BOOST_FREQS_ATTR_INDEX 2
-
static struct freq_attr *powernv_cpu_freq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_cpuinfo_nominal_freq,
- &cpufreq_freq_attr_scaling_boost_freqs,
NULL,
};
@@ -1128,9 +1124,7 @@ static int __init powernv_cpufreq_init(void)
goto out;
if (powernv_pstate_info.wof_enabled)
- powernv_cpufreq_driver.boost_enabled = true;
- else
- powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
+ powernv_cpufreq_driver.set_boost = cpufreq_boost_set_sw;
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
if (rc) {
@@ -1138,9 +1132,6 @@ static int __init powernv_cpufreq_init(void)
goto cleanup;
}
- if (powernv_pstate_info.wof_enabled)
- cpufreq_enable_boost_support();
-
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
deleted file mode 100644
index 98595b3ea13f..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ /dev/null
@@ -1,173 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpufreq driver for the cell processor
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <asm/machdep.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* the CBE supports an 8 step frequency scaling */
-static struct cpufreq_frequency_table cbe_freqs[] = {
- {0, 1, 0},
- {0, 2, 0},
- {0, 3, 0},
- {0, 4, 0},
- {0, 5, 0},
- {0, 6, 0},
- {0, 8, 0},
- {0, 10, 0},
- {0, 0, CPUFREQ_TABLE_END},
-};
-
-/*
- * hardware specific functions
- */
-
-static int set_pmode(unsigned int cpu, unsigned int slow_mode)
-{
- int rc;
-
- if (cbe_cpufreq_has_pmi)
- rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode);
- else
- rc = cbe_cpufreq_set_pmode(cpu, slow_mode);
-
- pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu));
-
- return rc;
-}
-
-/*
- * cpufreq functions
- */
-
-static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- struct cpufreq_frequency_table *pos;
- const u32 *max_freqp;
- u32 max_freq;
- int cur_pmode;
- struct device_node *cpu;
-
- cpu = of_get_cpu_node(policy->cpu, NULL);
-
- if (!cpu)
- return -ENODEV;
-
- pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- /*
- * Let's check we can actually get to the CELL regs
- */
- if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
- !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
- pr_info("invalid CBE regs pointers for cpufreq\n");
- of_node_put(cpu);
- return -EINVAL;
- }
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
-
- of_node_put(cpu);
-
- if (!max_freqp)
- return -EINVAL;
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
- pr_debug("max clock-frequency is at %u kHz\n", max_freq);
- pr_debug("initializing frequency table\n");
-
- /* initialize frequency table */
- cpufreq_for_each_entry(pos, cbe_freqs) {
- pos->frequency = max_freq / pos->driver_data;
- pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency);
- }
-
- /* if DEBUG is enabled set_pmode() measures the latency
- * of a transition */
- policy->cpuinfo.transition_latency = 25000;
-
- cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
- pr_debug("current pmode is at %d\n",cur_pmode);
-
- policy->cur = cbe_freqs[cur_pmode].frequency;
-
-#ifdef CONFIG_SMP
- cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
-#endif
-
- policy->freq_table = cbe_freqs;
- cbe_cpufreq_pmi_policy_init(policy);
- return 0;
-}
-
-static void cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cbe_cpufreq_pmi_policy_exit(policy);
-}
-
-static int cbe_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int cbe_pmode_new)
-{
- pr_debug("setting frequency for cpu %d to %d kHz, " \
- "1/%d of max frequency\n",
- policy->cpu,
- cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].driver_data);
-
- return set_pmode(policy->cpu, cbe_pmode_new);
-}
-
-static struct cpufreq_driver cbe_cpufreq_driver = {
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = cbe_cpufreq_target,
- .init = cbe_cpufreq_cpu_init,
- .exit = cbe_cpufreq_cpu_exit,
- .name = "cbe-cpufreq",
- .flags = CPUFREQ_CONST_LOOPS,
-};
-
-/*
- * module init and destoy
- */
-
-static int __init cbe_cpufreq_init(void)
-{
- int ret;
-
- if (!machine_is(cell))
- return -ENODEV;
-
- cbe_cpufreq_pmi_init();
-
- ret = cpufreq_register_driver(&cbe_cpufreq_driver);
- if (ret)
- cbe_cpufreq_pmi_exit();
-
- return ret;
-}
-
-static void __exit cbe_cpufreq_exit(void)
-{
- cpufreq_unregister_driver(&cbe_cpufreq_driver);
- cbe_cpufreq_pmi_exit();
-}
-
-module_init(cbe_cpufreq_init);
-module_exit(cbe_cpufreq_exit);
-
-MODULE_DESCRIPTION("cpufreq driver for Cell BE processors");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
deleted file mode 100644
index 00cd8633b0d9..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ppc_cbe_cpufreq.h
- *
- * This file contains the definitions used by the cbe_cpufreq driver.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- *
- */
-
-#include <linux/cpufreq.h>
-#include <linux/types.h>
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode);
-int cbe_cpufreq_get_pmode(int cpu);
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-
-#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
-extern bool cbe_cpufreq_has_pmi;
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy);
-void cbe_cpufreq_pmi_init(void);
-void cbe_cpufreq_pmi_exit(void);
-#else
-#define cbe_cpufreq_has_pmi (0)
-static inline void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy) {}
-static inline void cbe_cpufreq_pmi_init(void) {}
-static inline void cbe_cpufreq_pmi_exit(void) {}
-#endif
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
deleted file mode 100644
index 04830cd95333..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pervasive backend for the cbe_cpufreq driver
- *
- * This driver makes use of the pervasive unit to
- * engage the desired frequency.
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/machdep.h>
-#include <asm/hw_irq.h>
-#include <asm/cell-regs.h>
-
-#include "ppc_cbe_cpufreq.h"
-
-/* to write to MIC register */
-static u64 MIC_Slow_Fast_Timer_table[] = {
- [0 ... 7] = 0x007fc00000000000ull,
-};
-
-/* more values for the MIC */
-static u64 MIC_Slow_Next_Timer_table[] = {
- 0x0000240000000000ull,
- 0x0000268000000000ull,
- 0x000029C000000000ull,
- 0x00002D0000000000ull,
- 0x0000300000000000ull,
- 0x0000334000000000ull,
- 0x000039C000000000ull,
- 0x00003FC000000000ull,
-};
-
-
-int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
-{
- struct cbe_pmd_regs __iomem *pmd_regs;
- struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- unsigned long flags;
- u64 value;
-#ifdef DEBUG
- long time;
-#endif
-
- local_irq_save(flags);
-
- mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu);
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
-
-#ifdef DEBUG
- time = jiffies;
-#endif
-
- out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]);
-
- out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]);
- out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]);
-
- value = in_be64(&pmd_regs->pmcr);
- /* set bits to zero */
- value &= 0xFFFFFFFFFFFFFFF8ull;
- /* set bits to next pmode */
- value |= pmode;
-
- out_be64(&pmd_regs->pmcr, value);
-
-#ifdef DEBUG
- /* wait until new pmode appears in status register */
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- while (value != pmode) {
- cpu_relax();
- value = in_be64(&pmd_regs->pmsr) & 0x07;
- }
-
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "pervasive unit\n", time);
-#endif
- local_irq_restore(flags);
-
- return 0;
-}
-
-
-int cbe_cpufreq_get_pmode(int cpu)
-{
- int ret;
- struct cbe_pmd_regs __iomem *pmd_regs;
-
- pmd_regs = cbe_get_cpu_pmd_regs(cpu);
- ret = in_be64(&pmd_regs->pmsr) & 0x07;
-
- return ret;
-}
-
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
deleted file mode 100644
index 6f0c32592416..000000000000
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * pmi backend for the cbe_cpufreq driver
- *
- * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007
- *
- * Author: Christian Krafft <krafft@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/init.h>
-#include <linux/pm_qos.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/pmi.h>
-#include <asm/cell-regs.h>
-
-#ifdef DEBUG
-#include <asm/time.h>
-#endif
-
-#include "ppc_cbe_cpufreq.h"
-
-bool cbe_cpufreq_has_pmi = false;
-EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi);
-
-/*
- * hardware specific functions
- */
-
-int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode)
-{
- int ret;
- pmi_message_t pmi_msg;
-#ifdef DEBUG
- long time;
-#endif
- pmi_msg.type = PMI_TYPE_FREQ_CHANGE;
- pmi_msg.data1 = cbe_cpu_to_node(cpu);
- pmi_msg.data2 = pmode;
-
-#ifdef DEBUG
- time = jiffies;
-#endif
- pmi_send_message(pmi_msg);
-
-#ifdef DEBUG
- time = jiffies - time;
- time = jiffies_to_msecs(time);
- pr_debug("had to wait %lu ms for a transition using " \
- "PMI\n", time);
-#endif
- ret = pmi_msg.data2;
- pr_debug("PMI returned slow mode %d\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
-
-
-static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
-{
- struct cpufreq_policy *policy;
- struct freq_qos_request *req;
- u8 node, slow_mode;
- int cpu, ret;
-
- BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE);
-
- node = pmi_msg.data1;
- slow_mode = pmi_msg.data2;
-
- cpu = cbe_node_to_cpu(node);
-
- pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode);
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy) {
- pr_warn("cpufreq policy not found cpu%d\n", cpu);
- return;
- }
-
- req = policy->driver_data;
-
- ret = freq_qos_update_request(req,
- policy->freq_table[slow_mode].frequency);
- if (ret < 0)
- pr_warn("Failed to update freq constraint: %d\n", ret);
- else
- pr_debug("limiting node %d to slow mode %d\n", node, slow_mode);
-
- cpufreq_cpu_put(policy);
-}
-
-static struct pmi_handler cbe_pmi_handler = {
- .type = PMI_TYPE_FREQ_CHANGE,
- .handle_pmi_message = cbe_cpufreq_handle_pmi,
-};
-
-void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req;
- int ret;
-
- if (!cbe_cpufreq_has_pmi)
- return;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return;
-
- ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
- policy->freq_table[0].frequency);
- if (ret < 0) {
- pr_err("Failed to add freq constraint (%d)\n", ret);
- kfree(req);
- return;
- }
-
- policy->driver_data = req;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
-
-void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
-{
- struct freq_qos_request *req = policy->driver_data;
-
- if (cbe_cpufreq_has_pmi) {
- freq_qos_remove_request(req);
- kfree(req);
- }
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_exit);
-
-void cbe_cpufreq_pmi_init(void)
-{
- if (!pmi_register_handler(&cbe_pmi_handler))
- cbe_cpufreq_has_pmi = true;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_init);
-
-void cbe_cpufreq_pmi_exit(void)
-{
- pmi_unregister_handler(&cbe_pmi_handler);
- cbe_cpufreq_has_pmi = false;
-}
-EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_exit);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index b2e7e89feaac..8422704a3b10 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -306,7 +306,7 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
struct of_phandle_args args;
int cpu, ret;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
continue;
@@ -566,12 +566,6 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret)
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- }
-
return qcom_cpufreq_hw_lmh_init(policy, index);
}
@@ -595,12 +589,6 @@ static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
enable_irq(data->throttle_irq);
}
-static struct freq_attr *qcom_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- &cpufreq_freq_attr_scaling_boost_freqs,
- NULL
-};
-
static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -615,8 +603,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
- .attr = qcom_cpufreq_hw_attr,
.ready = qcom_cpufreq_ready,
+ .set_boost = cpufreq_boost_set_sw,
};
static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 3a8ed723a23e..54f8117103c8 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -489,7 +489,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
nvmem_cell_put(speedbin_nvmem);
}
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -543,7 +543,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -557,7 +557,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
dev_pm_domain_detach_list(drv->cpus[cpu].pd_list);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
}
@@ -568,7 +568,7 @@ static int qcom_cpufreq_suspend(struct device *dev)
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
unsigned int cpu;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
qcom_cpufreq_suspend_pd_devs(drv, cpu);
return 0;
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index a37ce051236c..8d1f5ac59132 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -254,7 +254,6 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
- .attr = cpufreq_generic_attr,
};
static const struct of_device_id qoriq_cpufreq_blacklist[] = {
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 330c8d6cf93c..103d2519dff7 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -92,7 +92,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.target_index = sc520_freq_target,
.init = sc520_freq_cpu_init,
.name = "sc520_freq",
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index b8fe758aeb01..c310aeebc8f3 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -104,7 +104,7 @@ scmi_get_sharing_cpus(struct device *cpu_dev, int domain,
int cpu, tdomain;
struct device *tcpu_dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -171,12 +171,6 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
-static struct freq_attr *scmi_cpufreq_hw_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
- NULL,
-};
-
static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
{
struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
@@ -303,17 +297,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
- if (policy_has_boost_freq(policy)) {
- ret = cpufreq_enable_boost_support();
- if (ret) {
- dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
- goto out_free_table;
- } else {
- scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
- scmi_cpufreq_driver.boost_enabled = true;
- }
- }
-
ret = freq_qos_add_request(&policy->constraints, &priv->limits_freq_req, FREQ_QOS_MAX,
FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0) {
@@ -395,13 +378,13 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
+ .set_boost = cpufreq_boost_set_sw,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index cd89c1b9832c..17cda84f00df 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -39,8 +39,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
static int
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
{
- u64 rate = policy->freq_table[index].frequency * 1000;
+ unsigned long freq_khz = policy->freq_table[index].frequency;
struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = freq_khz * 1000;
int ret;
ret = clk_set_rate(priv->clk, rate);
@@ -48,7 +49,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret)
return ret;
- if (clk_get_rate(priv->clk) != rate)
+ if (clk_get_rate(priv->clk) / 1000 != freq_khz)
return -EIO;
return 0;
@@ -64,7 +65,7 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
if (domain < 0)
return domain;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
@@ -183,7 +184,6 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
.get = scpi_cpufreq_get_rate,
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index aa74036d0420..9c0b01e00508 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -151,7 +151,6 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
- .attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index d8ab5b01d46d..707c71090cc3 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -165,7 +165,6 @@ static struct cpufreq_driver spear_cpufreq_driver = {
.target_index = spear_cpufreq_target,
.get = cpufreq_generic_get,
.init = spear_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static int spear_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3fafedb983b5..3e6e85a92212 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -507,7 +507,6 @@ static struct cpufreq_driver centrino_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = centrino_target,
.get = get_cur_freq,
- .attr = cpufreq_generic_attr,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index f2076d72bf39..262cfbde9ca7 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -315,7 +315,6 @@ static struct cpufreq_driver speedstep_driver = {
.target_index = speedstep_target,
.init = speedstep_cpu_init,
.get = speedstep_get,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0ce9d4b6dfcc..39265884c3f1 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -295,7 +295,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.get = speedstep_get,
.resume = speedstep_resume,
- .attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 17d6a149f580..47d6840b3489 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -262,7 +262,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "speed%d", speed);
config.prop_name = name;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
struct device *cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
@@ -288,7 +288,7 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
pr_err("Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
@@ -302,7 +302,7 @@ static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
dev_pm_opp_clear_config(opp_tokens[cpu]);
kfree(opp_tokens);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index c7761eb99f3c..cbabb726c664 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ u32 cpu;
policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
+ /* set same policy for all cpus in a cluster */
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == cluster)
+ cpumask_set_cpu(cpu, policy->cpus);
+ }
+
return 0;
}
@@ -123,7 +130,6 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra186_cpufreq_set_target,
.init = tegra186_cpufreq_init,
- .attr = cpufreq_generic_attr,
};
static struct cpufreq_frequency_table *init_vhint_table(
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9055dd398e7f..9b4f516f313e 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -589,7 +589,6 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.exit = tegra194_cpufreq_exit,
.online = tegra194_cpufreq_online,
.offline = tegra194_cpufreq_offline,
- .attr = cpufreq_generic_attr,
};
static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 0f86cdb7ec8a..65fea47b82e6 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -471,7 +471,6 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
- .attr = cpufreq_generic_attr,
};
#ifdef CONFIG_BL_SWITCHER
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index a050b3a6737f..7dd1b0c263c7 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -138,7 +138,7 @@ static int virt_cpufreq_get_sharing_cpus(struct cpufreq_policy *policy)
cur_perf_domain = readl_relaxed(base + policy->cpu *
PER_CPU_OFFSET + REG_PERF_DOMAIN_OFFSET);
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
continue;
@@ -265,7 +265,6 @@ static struct cpufreq_driver cpufreq_virt_driver = {
.verify = virt_cpufreq_verify_policy,
.target = virt_cpufreq_target,
.fast_switch = virt_cpufreq_fast_switch,
- .attr = cpufreq_generic_attr,
};
static int virt_cpufreq_driver_probe(struct platform_device *pdev)
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d103342b7cfc..1de9e92c5b0f 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,6 +3,9 @@
# Makefile for cpuidle.
#
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_DT_IDLE_STATES) += dt_idle_states.o
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index caba6f4bb1b7..e044fefdb816 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -137,9 +137,9 @@ out_kfree_drv:
/*
* arm_idle_init - Initializes arm cpuidle driver
*
- * Initializes arm cpuidle driver for all CPUs, if any CPU fails
- * to register cpuidle driver then rollback to cancel all CPUs
- * registration.
+ * Initializes arm cpuidle driver for all present CPUs, if any
+ * CPU fails to register cpuidle driver then rollback to cancel
+ * all CPUs registration.
*/
static int __init arm_idle_init(void)
{
@@ -147,7 +147,7 @@ static int __init arm_idle_init(void)
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = arm_idle_init_cpu(cpu);
if (ret)
goto out_fail;
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 74972deda0ea..4abba42fcc31 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -148,7 +148,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
if (!cpumask)
return -ENOMEM;
- for_each_possible_cpu(cpu)
+ for_each_present_cpu(cpu)
if (smp_cpuid_part(cpu) == part_id)
cpumask_set_cpu(cpu, cpumask);
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 2562dc001fc1..b46a83f5ffe4 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -25,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <asm/cpuidle.h>
+#include <trace/events/power.h>
#include "cpuidle-psci.h"
#include "dt_idle_states.h"
@@ -74,7 +75,9 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (!state)
state = states[idx];
+ trace_psci_domain_idle_enter(dev->cpu, state, s2idle);
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
+ trace_psci_domain_idle_exit(dev->cpu, state, s2idle);
if (s2idle)
dev_pm_genpd_resume(pd_dev);
@@ -400,7 +403,7 @@ deinit:
/*
* psci_idle_probe - Initializes PSCI cpuidle driver
*
- * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
+ * Initializes PSCI cpuidle driver for all present CPUs, if any CPU fails
* to register cpuidle driver then rollback to cancel all CPUs
* registration.
*/
@@ -410,7 +413,7 @@ static int psci_cpuidle_probe(struct platform_device *pdev)
struct cpuidle_driver *drv;
struct cpuidle_device *dev;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = psci_idle_init_cpu(&pdev->dev, cpu);
if (ret)
goto out_fail;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 3ab240e0e122..5f386761b156 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -135,7 +135,7 @@ static int spm_cpuidle_drv_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "set warm boot addr failed");
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
ret = spm_cpuidle_register(&pdev->dev, cpu);
if (ret && ret != -ENODEV) {
dev_err(&pdev->dev,
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 0c92a628bbd4..0fe1ece9fbdc 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -529,8 +529,8 @@ static int sbi_cpuidle_probe(struct platform_device *pdev)
return ret;
}
- /* Initialize CPU idle driver for each CPU */
- for_each_possible_cpu(cpu) {
+ /* Initialize CPU idle driver for each present CPU */
+ for_each_present_cpu(cpu) {
ret = sbi_cpuidle_init_cpu(&pdev->dev, cpu);
if (ret) {
pr_debug("HART%ld: idle driver init failed\n",
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 28363bfa3e4c..39aa0aea61c6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -41,7 +41,7 @@
* the C state is required to actually break even on this cost. CPUIDLE
* provides us this duration in the "target_residency" field. So all that we
* need is a good prediction of how long we'll be idle. Like the traditional
- * menu governor, we start with the actual known "next timer event" time.
+ * menu governor, we take the actual known "next timer event" time.
*
* Since there are other source of wakeups (interrupts for example) than
* the next timer event, this estimation is rather optimistic. To get a
@@ -50,30 +50,21 @@
* duration always was 50% of the next timer tick, the correction factor will
* be 0.5.
*
- * menu uses a running average for this correction factor, however it uses a
- * set of factors, not just a single factor. This stems from the realization
- * that the ratio is dependent on the order of magnitude of the expected
- * duration; if we expect 500 milliseconds of idle time the likelihood of
- * getting an interrupt very early is much higher than if we expect 50 micro
- * seconds of idle time. A second independent factor that has big impact on
- * the actual factor is if there is (disk) IO outstanding or not.
- * (as a special twist, we consider every sleep longer than 50 milliseconds
- * as perfect; there are no power gains for sleeping longer than this)
- *
- * For these two reasons we keep an array of 12 independent factors, that gets
- * indexed based on the magnitude of the expected duration as well as the
- * "is IO outstanding" property.
+ * menu uses a running average for this correction factor, but it uses a set of
+ * factors, not just a single factor. This stems from the realization that the
+ * ratio is dependent on the order of magnitude of the expected duration; if we
+ * expect 500 milliseconds of idle time the likelihood of getting an interrupt
+ * very early is much higher than if we expect 50 micro seconds of idle time.
+ * For this reason, menu keeps an array of 6 independent factors, that gets
+ * indexed based on the magnitude of the expected duration.
*
* Repeatable-interval-detector
* ----------------------------
* There are some cases where "next timer" is a completely unusable predictor:
* Those cases where the interval is fixed, for example due to hardware
- * interrupt mitigation, but also due to fixed transfer rate devices such as
- * mice.
+ * interrupt mitigation, but also due to fixed transfer rate devices like mice.
* For this, we use a different predictor: We track the duration of the last 8
- * intervals and if the stand deviation of these 8 intervals is below a
- * threshold value, we use the average of these intervals as prediction.
- *
+ * intervals and use them to estimate the duration of the next one.
*/
struct menu_device {
@@ -116,53 +107,52 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
*/
static unsigned int get_typical_interval(struct menu_device *data)
{
- int i, divisor;
- unsigned int min, max, thresh, avg;
- uint64_t sum, variance;
-
- thresh = INT_MAX; /* Discard outliers above this value */
+ s64 value, min_thresh = -1, max_thresh = UINT_MAX;
+ unsigned int max, min, divisor;
+ u64 avg, variance, avg_sq;
+ int i;
again:
-
- /* First calculate the average of past intervals */
- min = UINT_MAX;
+ /* Compute the average and variance of past intervals. */
max = 0;
- sum = 0;
+ min = UINT_MAX;
+ avg = 0;
+ variance = 0;
divisor = 0;
for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- sum += value;
- divisor++;
- if (value > max)
- max = value;
-
- if (value < min)
- min = value;
- }
+ value = data->intervals[i];
+ /*
+ * Discard the samples outside the interval between the min and
+ * max thresholds.
+ */
+ if (value <= min_thresh || value >= max_thresh)
+ continue;
+
+ divisor++;
+
+ avg += value;
+ variance += value * value;
+
+ if (value > max)
+ max = value;
+
+ if (value < min)
+ min = value;
}
if (!max)
return UINT_MAX;
- if (divisor == INTERVALS)
- avg = sum >> INTERVAL_SHIFT;
- else
- avg = div_u64(sum, divisor);
-
- /* Then try to determine variance */
- variance = 0;
- for (i = 0; i < INTERVALS; i++) {
- unsigned int value = data->intervals[i];
- if (value <= thresh) {
- int64_t diff = (int64_t)value - avg;
- variance += diff * diff;
- }
- }
- if (divisor == INTERVALS)
+ if (divisor == INTERVALS) {
+ avg >>= INTERVAL_SHIFT;
variance >>= INTERVAL_SHIFT;
- else
+ } else {
+ do_div(avg, divisor);
do_div(variance, divisor);
+ }
+
+ avg_sq = avg * avg;
+ variance -= avg_sq;
/*
* The typical interval is obtained when standard deviation is
@@ -177,25 +167,40 @@ again:
* Use this result only if there is no timer to wake us up sooner.
*/
if (likely(variance <= U64_MAX/36)) {
- if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
- || variance <= 400) {
+ if ((avg_sq > variance * 36 && divisor * 4 >= INTERVALS * 3) ||
+ variance <= 400)
return avg;
- }
}
/*
- * If we have outliers to the upside in our distribution, discard
- * those by setting the threshold to exclude these outliers, then
+ * If there are outliers, discard them by setting thresholds to exclude
+ * data points at a large enough distance from the average, then
* calculate the average and standard deviation again. Once we get
- * down to the bottom 3/4 of our samples, stop excluding samples.
+ * down to the last 3/4 of our samples, stop excluding samples.
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
*/
- if ((divisor * 4) <= INTERVALS * 3)
+ if (divisor * 4 <= INTERVALS * 3) {
+ /*
+ * If there are sufficiently many data points still under
+ * consideration after the outliers have been eliminated,
+ * returning without a prediction would be a mistake because it
+ * is likely that the next interval will not exceed the current
+ * maximum, so return the latter in that case.
+ */
+ if (divisor >= INTERVALS / 2)
+ return max;
+
return UINT_MAX;
+ }
+
+ /* Update the thresholds for the next round. */
+ if (avg - min > max - avg)
+ min_thresh = min;
+ else
+ max_thresh = max;
- thresh = max - 1;
goto again;
}
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index de4d0402f133..fd29785a3ecf 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -162,7 +162,7 @@ static int burst_update(struct shash_desc *desc, const u8 *d8,
if (mctx->poly == CRC32_POLY_LE)
ctx->partial = crc32_le(ctx->partial, d8, length);
else
- ctx->partial = __crc32c_le(ctx->partial, d8, length);
+ ctx->partial = crc32c(ctx->partial, d8, length);
goto pm_out;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index e2a1e4463b6f..0470d7c175f4 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -642,8 +642,7 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
if (ret)
return ret;
- hrtimer_init(&dfi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dfi->timer.function = rockchip_dfi_timer;
+ hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
switch (dfi->ddr_type) {
case ROCKCHIP_DDRTYPE_LPDDR2:
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 32019dc33cca..20bdc52f63a5 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
+ if (WARN_ON(src->freq_supported && !src->freq_supported_num))
+ return -EINVAL;
+
memcpy(dst, src, sizeof(*dst));
- if (src->freq_supported && src->freq_supported_num) {
+ if (src->freq_supported) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
@@ -505,7 +508,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err_xa_alloc;
return pin;
err_xa_alloc:
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 2051a7c944a5..19ad3c3b675d 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -75,6 +75,34 @@ config EDAC_GHES
In doubt, say 'Y'.
+config EDAC_SCRUB
+ bool "EDAC scrub feature"
+ help
+ The EDAC scrub feature is optional and is designed to control the
+ memory scrubbers in the system. The common sysfs scrub interface
+ abstracts the control of various arbitrary scrubbing functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC scrub feature.
+
+config EDAC_ECS
+ bool "EDAC ECS (Error Check Scrub) feature"
+ help
+ The EDAC ECS feature is optional and is designed to control on-die
+ error check scrub (e.g., DDR5 ECS) in the system. The common sysfs
+ ECS interface abstracts the control of various ECS functionalities
+ into a unified set of functions.
+ Say 'y/n' to enable/disable EDAC ECS feature.
+
+config EDAC_MEM_REPAIR
+ bool "EDAC memory repair feature"
+ help
+ The EDAC memory repair feature is optional and is designed to control
+ the memory devices with repair features, such as Post Package Repair
+ (PPR), memory sparing etc. The common sysfs memory repair interface
+ abstracts the control of various memory repair functionalities into
+ a unified set of functions.
+ Say 'y/n' to enable/disable EDAC memory repair feature.
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
@@ -168,7 +196,7 @@ config EDAC_I3200
config EDAC_IE31200
tristate "Intel e312xx"
- depends on PCI && X86
+ depends on PCI && X86 && X86_MCE_INTEL
help
Support for error detection and correction on the Intel
E3-1200 based DRAM controllers.
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 89789ba8275f..a8f2d8f6c894 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -12,6 +12,9 @@ edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o
edac_core-y += edac_module.o edac_device_sysfs.o wq.o
edac_core-$(CONFIG_EDAC_DEBUG) += debugfs.o
+edac_core-$(CONFIG_EDAC_SCRUB) += scrub.o
+edac_core-$(CONFIG_EDAC_ECS) += ecs.o
+edac_core-$(CONFIG_EDAC_MEM_REPAIR) += mem_repair.o
ifdef CONFIG_PCI
edac_core-y += edac_pci.o edac_pci_sysfs.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8414ceb43e4a..90f0eb7cc5b9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
#include <asm/amd_node.h>
@@ -1171,22 +1172,21 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
#define CS_EVEN_PRIMARY BIT(0)
@@ -1353,14 +1353,14 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
umc_debug_display_dimm_sizes(pvt, i);
}
@@ -1371,11 +1371,11 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -1398,7 +1398,7 @@ static void dct_dump_misc_regs(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -2027,15 +2027,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -3208,8 +3208,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3353,12 +3352,9 @@ static bool dct_ecc_enabled(struct amd64_pvt *pvt)
edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, nid);
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en)
- return false;
- else
- return true;
+ return ecc_en && nb_mce_en;
}
static bool umc_ecc_enabled(struct amd64_pvt *pvt)
@@ -3378,7 +3374,7 @@ static bool umc_ecc_enabled(struct amd64_pvt *pvt)
}
}
- edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
return ecc_en;
}
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 4804332d9946..8195fc9c9354 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/string_choices.h>
+
#include "edac_module.h"
static struct dentry *edac_debugfs;
@@ -22,7 +25,7 @@ static ssize_t edac_fake_inject_write(struct file *file,
"Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
errcount,
(type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
- errcount > 1 ? "s" : "",
+ str_plural(errcount),
mci->fake_inject_layer[0],
mci->fake_inject_layer[1],
mci->fake_inject_layer[2]
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
new file mode 100755
index 000000000000..1d51838a60c1
--- /dev/null
+++ b/drivers/edac/ecs.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic ECS driver is designed to support control of on-die error
+ * check scrub (e.g., DDR5 ECS). The common sysfs ECS interface abstracts
+ * the control of various ECS functionalities into a unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+#define EDAC_ECS_FRU_NAME "ecs_fru"
+
+enum edac_ecs_attributes {
+ ECS_LOG_ENTRY_TYPE,
+ ECS_MODE,
+ ECS_RESET,
+ ECS_THRESHOLD,
+ ECS_MAX_ATTRS
+};
+
+struct edac_ecs_dev_attr {
+ struct device_attribute dev_attr;
+ int fru_id;
+};
+
+struct edac_ecs_fru_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_ecs_dev_attr dev_attr[ECS_MAX_ATTRS];
+ struct attribute *ecs_attrs[ECS_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+struct edac_ecs_context {
+ u16 num_media_frus;
+ struct edac_ecs_fru_context *fru_ctxs;
+};
+
+#define TO_ECS_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_ecs_dev_attr, dev_attr)
+
+#define EDAC_ECS_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_ECS_ATTR_SHOW(log_entry_type, get_log_entry_type, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(mode, get_mode, u32, "%u\n")
+EDAC_ECS_ATTR_SHOW(threshold, get_threshold, u32, "%u\n")
+
+#define EDAC_ECS_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ struct edac_ecs_dev_attr *dev_attr = TO_ECS_DEV_ATTR(attr); \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->ecs.private, \
+ dev_attr->fru_id, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_ECS_ATTR_STORE(log_entry_type, set_log_entry_type, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(mode, set_mode, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(reset, reset, unsigned long, kstrtoul)
+EDAC_ECS_ATTR_STORE(threshold, set_threshold, unsigned long, kstrtoul)
+
+static umode_t ecs_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_ecs_ops *ops = ctx->ecs.ecs_ops;
+
+ switch (attr_id) {
+ case ECS_LOG_ENTRY_TYPE:
+ if (ops->get_log_entry_type) {
+ if (ops->set_log_entry_type)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_MODE:
+ if (ops->get_mode) {
+ if (ops->set_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case ECS_RESET:
+ if (ops->reset)
+ return a->mode;
+ break;
+ case ECS_THRESHOLD:
+ if (ops->get_threshold) {
+ if (ops->set_threshold)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_ECS_ATTR_RO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_WO(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .fru_id = _fru_id })
+
+#define EDAC_ECS_ATTR_RW(_name, _fru_id) \
+ ((struct edac_ecs_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .fru_id = _fru_id })
+
+static int ecs_create_desc(struct device *ecs_dev, const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{
+ struct edac_ecs_context *ecs_ctx;
+ u32 fru;
+
+ ecs_ctx = devm_kzalloc(ecs_dev, sizeof(*ecs_ctx), GFP_KERNEL);
+ if (!ecs_ctx)
+ return -ENOMEM;
+
+ ecs_ctx->num_media_frus = num_media_frus;
+ ecs_ctx->fru_ctxs = devm_kcalloc(ecs_dev, num_media_frus,
+ sizeof(*ecs_ctx->fru_ctxs),
+ GFP_KERNEL);
+ if (!ecs_ctx->fru_ctxs)
+ return -ENOMEM;
+
+ for (fru = 0; fru < num_media_frus; fru++) {
+ struct edac_ecs_fru_context *fru_ctx = &ecs_ctx->fru_ctxs[fru];
+ struct attribute_group *group = &fru_ctx->group;
+ int i;
+
+ fru_ctx->dev_attr[ECS_LOG_ENTRY_TYPE] = EDAC_ECS_ATTR_RW(log_entry_type, fru);
+ fru_ctx->dev_attr[ECS_MODE] = EDAC_ECS_ATTR_RW(mode, fru);
+ fru_ctx->dev_attr[ECS_RESET] = EDAC_ECS_ATTR_WO(reset, fru);
+ fru_ctx->dev_attr[ECS_THRESHOLD] = EDAC_ECS_ATTR_RW(threshold, fru);
+
+ for (i = 0; i < ECS_MAX_ATTRS; i++)
+ fru_ctx->ecs_attrs[i] = &fru_ctx->dev_attr[i].dev_attr.attr;
+
+ sprintf(fru_ctx->name, "%s%d", EDAC_ECS_FRU_NAME, fru);
+ group->name = fru_ctx->name;
+ group->attrs = fru_ctx->ecs_attrs;
+ group->is_visible = ecs_attr_visible;
+
+ attr_groups[fru] = group;
+ }
+
+ return 0;
+}
+
+/**
+ * edac_ecs_get_desc - get EDAC ECS descriptors
+ * @ecs_dev: client device, supports ECS feature
+ * @attr_groups: pointer to attribute group container
+ * @num_media_frus: number of media FRUs in the device
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups, u16 num_media_frus)
+{
+ if (!ecs_dev || !attr_groups || !num_media_frus)
+ return -EINVAL;
+
+ return ecs_create_desc(ecs_dev, attr_groups, num_media_frus);
+}
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 621dc2a5d034..0734909b08a4 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -570,3 +570,188 @@ void edac_device_handle_ue_count(struct edac_device_ctl_info *edac_dev,
block ? block->name : "N/A", count, msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue_count);
+
+static void edac_dev_release(struct device *dev)
+{
+ struct edac_dev_feat_ctx *ctx = container_of(dev, struct edac_dev_feat_ctx, dev);
+
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+ kfree(ctx->dev.groups);
+ kfree(ctx);
+}
+
+static const struct device_type edac_dev_type = {
+ .name = "edac_dev",
+ .release = edac_dev_release,
+};
+
+static void edac_dev_unreg(void *data)
+{
+ device_unregister(data);
+}
+
+/**
+ * edac_dev_register - register device for RAS features with EDAC
+ * @parent: parent device.
+ * @name: name for the folder in the /sys/bus/edac/devices/,
+ * which is derived from the parent device.
+ * For e.g. /sys/bus/edac/devices/cxl_mem0/
+ * @private: parent driver's data to store in the context if any.
+ * @num_features: number of RAS features to register.
+ * @ras_features: list of RAS features to register.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ *
+ */
+int edac_dev_register(struct device *parent, char *name,
+ void *private, int num_features,
+ const struct edac_dev_feature *ras_features)
+{
+ const struct attribute_group **ras_attr_groups;
+ struct edac_dev_data *dev_data;
+ struct edac_dev_feat_ctx *ctx;
+ int mem_repair_cnt = 0;
+ int attr_gcnt = 0;
+ int ret = -ENOMEM;
+ int scrub_cnt = 0;
+ int feat;
+
+ if (!parent || !name || !num_features || !ras_features)
+ return -EINVAL;
+
+ /* Double parse to make space for attributes */
+ for (feat = 0; feat < num_features; feat++) {
+ switch (ras_features[feat].ft_type) {
+ case RAS_FEAT_SCRUB:
+ attr_gcnt++;
+ scrub_cnt++;
+ break;
+ case RAS_FEAT_ECS:
+ attr_gcnt += ras_features[feat].ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ attr_gcnt++;
+ mem_repair_cnt++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ras_attr_groups = kcalloc(attr_gcnt + 1, sizeof(*ras_attr_groups), GFP_KERNEL);
+ if (!ras_attr_groups)
+ goto ctx_free;
+
+ if (scrub_cnt) {
+ ctx->scrub = kcalloc(scrub_cnt, sizeof(*ctx->scrub), GFP_KERNEL);
+ if (!ctx->scrub)
+ goto groups_free;
+ }
+
+ if (mem_repair_cnt) {
+ ctx->mem_repair = kcalloc(mem_repair_cnt, sizeof(*ctx->mem_repair), GFP_KERNEL);
+ if (!ctx->mem_repair)
+ goto data_mem_free;
+ }
+
+ attr_gcnt = 0;
+ scrub_cnt = 0;
+ mem_repair_cnt = 0;
+ for (feat = 0; feat < num_features; feat++, ras_features++) {
+ switch (ras_features->ft_type) {
+ case RAS_FEAT_SCRUB:
+ if (!ras_features->scrub_ops || scrub_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->scrub[scrub_cnt];
+ dev_data->instance = scrub_cnt;
+ dev_data->scrub_ops = ras_features->scrub_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_scrub_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ scrub_cnt++;
+ attr_gcnt++;
+ break;
+ case RAS_FEAT_ECS:
+ if (!ras_features->ecs_ops) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->ecs;
+ dev_data->ecs_ops = ras_features->ecs_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_ecs_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->ecs_info.num_media_frus);
+ if (ret)
+ goto data_mem_free;
+
+ attr_gcnt += ras_features->ecs_info.num_media_frus;
+ break;
+ case RAS_FEAT_MEM_REPAIR:
+ if (!ras_features->mem_repair_ops ||
+ mem_repair_cnt != ras_features->instance) {
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+
+ dev_data = &ctx->mem_repair[mem_repair_cnt];
+ dev_data->instance = mem_repair_cnt;
+ dev_data->mem_repair_ops = ras_features->mem_repair_ops;
+ dev_data->private = ras_features->ctx;
+ ret = edac_mem_repair_get_desc(parent, &ras_attr_groups[attr_gcnt],
+ ras_features->instance);
+ if (ret)
+ goto data_mem_free;
+
+ mem_repair_cnt++;
+ attr_gcnt++;
+ break;
+ default:
+ ret = -EINVAL;
+ goto data_mem_free;
+ }
+ }
+
+ ctx->dev.parent = parent;
+ ctx->dev.bus = edac_get_sysfs_subsys();
+ ctx->dev.type = &edac_dev_type;
+ ctx->dev.groups = ras_attr_groups;
+ ctx->private = private;
+ dev_set_drvdata(&ctx->dev, ctx);
+
+ ret = dev_set_name(&ctx->dev, "%s", name);
+ if (ret)
+ goto data_mem_free;
+
+ ret = device_register(&ctx->dev);
+ if (ret) {
+ put_device(&ctx->dev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, edac_dev_unreg, &ctx->dev);
+
+data_mem_free:
+ kfree(ctx->mem_repair);
+ kfree(ctx->scrub);
+groups_free:
+ kfree(ras_attr_groups);
+ctx_free:
+ kfree(ctx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(edac_dev_register);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index f45d849d3f15..355a977019e9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -751,6 +751,8 @@ static int i10nm_get_ddr_munits(void)
continue;
} else {
d->imc[lmc].mdev = mdev;
+ if (res_cfg->type == SPR)
+ skx_set_mc_mapping(d, i, lmc);
lmc++;
}
}
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 49b4499269fb..b5cf25905b05 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -899,7 +900,7 @@ static void decode_mtr(int slot_row, u16 mtr)
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 61adaa872ba7..69068f8d0cad 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/mmzone.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -620,7 +621,7 @@ static int decode_mtr(struct i7300_pvt *pvt,
edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+ str_enabled_disabled(MTR_DIMMS_ETHROTTLE(mtr)));
edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
edac_dbg(2, "\t\tNUMRANK: %s\n",
@@ -871,9 +872,9 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
edac_dbg(0, "Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_ECC_ENABLED(pvt->mc_settings)));
edac_dbg(0, "Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ str_enabled_disabled(IS_RETRY_ENABLED(pvt->mc_settings)));
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 4fc16922dc1a..204834149579 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -51,6 +51,7 @@
#include <linux/edac.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <asm/mce.h>
#include "edac_module.h"
#define EDAC_MOD_STR "ie31200_edac"
@@ -84,44 +85,23 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9 0x3ec6
#define PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10 0x3eca
-/* Test if HB is for Skylake or later. */
-#define DEVICE_ID_SKYLAKE_OR_LATER(did) \
- (((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_8) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_9) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_10) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_11) || \
- ((did) == PCI_DEVICE_ID_INTEL_IE31200_HB_12) || \
- (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \
- PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK))
-
-#define IE31200_DIMMS 4
-#define IE31200_RANKS 8
-#define IE31200_RANKS_PER_CHANNEL 4
+/* Raptor Lake-S */
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1 0xa703
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2 0x4640
+#define PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3 0x4630
+
+#define IE31200_RANKS_PER_CHANNEL 8
#define IE31200_DIMMS_PER_CHANNEL 2
#define IE31200_CHANNELS 2
+#define IE31200_IMC_NUM 2
/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
#define IE31200_MCHBAR_LOW 0x48
#define IE31200_MCHBAR_HIGH 0x4c
-#define IE31200_MCHBAR_MASK GENMASK_ULL(38, 15)
-#define IE31200_MMR_WINDOW_SIZE BIT(15)
/*
* Error Status Register (16b)
*
- * 15 reserved
- * 14 Isochronous TBWRR Run Behind FIFO Full
- * (ITCV)
- * 13 Isochronous TBWRR Run Behind FIFO Put
- * (ITSTV)
- * 12 reserved
- * 11 MCH Thermal Sensor Event
- * for SMI/SCI/SERR (GTSE)
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 reserved
- * 7 DRAM Throttle Flag (DTF)
- * 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
@@ -130,68 +110,60 @@
#define IE31200_ERRSTS_CE BIT(0)
#define IE31200_ERRSTS_BITS (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
-/*
- * Channel 0 ECC Error Log (64b)
- *
- * 63:48 Error Column Address (ERRCOL)
- * 47:32 Error Row Address (ERRROW)
- * 31:29 Error Bank Address (ERRBANK)
- * 28:27 Error Rank Address (ERRRANK)
- * 26:24 reserved
- * 23:16 Error Syndrome (ERRSYND)
- * 15: 2 reserved
- * 1 Multiple Bit Error Status (MERRSTS)
- * 0 Correctable Error Status (CERRSTS)
- */
-
-#define IE31200_C0ECCERRLOG 0x40c8
-#define IE31200_C1ECCERRLOG 0x44c8
-#define IE31200_C0ECCERRLOG_SKL 0x4048
-#define IE31200_C1ECCERRLOG_SKL 0x4448
-#define IE31200_ECCERRLOG_CE BIT(0)
-#define IE31200_ECCERRLOG_UE BIT(1)
-#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
-#define IE31200_ECCERRLOG_RANK_SHIFT 27
-#define IE31200_ECCERRLOG_SYNDROME_BITS GENMASK_ULL(23, 16)
-#define IE31200_ECCERRLOG_SYNDROME_SHIFT 16
-
-#define IE31200_ECCERRLOG_SYNDROME(log) \
- ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
- IE31200_ECCERRLOG_SYNDROME_SHIFT)
-
#define IE31200_CAPID0 0xe4
#define IE31200_CAPID0_PDCD BIT(4)
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
-#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
-#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
-#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
-#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
-
-/* Skylake reports 1GB increments, everything else is 256MB */
-#define IE31200_PAGES(n, skl) \
- (n << (28 + (2 * skl) - PAGE_SHIFT))
+/* Non-constant mask variant of FIELD_GET() */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
+struct res_config {
+ enum mem_type mtype;
+ bool cmci;
+ int imc_num;
+ /* Host MMIO configuration register */
+ u64 reg_mchbar_mask;
+ u64 reg_mchbar_window_size;
+ /* ECC error log register */
+ u64 reg_eccerrlog_offset[IE31200_CHANNELS];
+ u64 reg_eccerrlog_ce_mask;
+ u64 reg_eccerrlog_ce_ovfl_mask;
+ u64 reg_eccerrlog_ue_mask;
+ u64 reg_eccerrlog_ue_ovfl_mask;
+ u64 reg_eccerrlog_rank_mask;
+ u64 reg_eccerrlog_syndrome_mask;
+ /* MSR to clear ECC error log register */
+ u32 msr_clear_eccerrlog_offset;
+ /* DIMM characteristics register */
+ u64 reg_mad_dimm_size_granularity;
+ u64 reg_mad_dimm_offset[IE31200_CHANNELS];
+ u32 reg_mad_dimm_size_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_rank_mask[IE31200_DIMMS_PER_CHANNEL];
+ u32 reg_mad_dimm_width_mask[IE31200_DIMMS_PER_CHANNEL];
+};
+
struct ie31200_priv {
void __iomem *window;
void __iomem *c0errlog;
void __iomem *c1errlog;
+ struct res_config *cfg;
+ struct mem_ctl_info *mci;
+ struct pci_dev *pdev;
+ struct device dev;
};
+static struct ie31200_pvt {
+ struct ie31200_priv *priv[IE31200_IMC_NUM];
+} ie31200_pvt;
+
enum ie31200_chips {
IE31200 = 0,
+ IE31200_1 = 1,
};
struct ie31200_dev_info {
@@ -202,18 +174,22 @@ struct ie31200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[IE31200_CHANNELS];
+ u64 erraddr;
};
static const struct ie31200_dev_info ie31200_devs[] = {
[IE31200] = {
.ctl_name = "IE31200"
},
+ [IE31200_1] = {
+ .ctl_name = "IE31200_1"
+ },
};
struct dimm_data {
- u8 size; /* in multiples of 256MB, except Skylake is 1GB */
- u8 dual_rank : 1,
- x16_width : 2; /* 0 means x8 width */
+ u64 size; /* in bytes */
+ u8 ranks;
+ enum dev_type dtype;
};
static int how_many_channels(struct pci_dev *pdev)
@@ -251,29 +227,54 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(u64 log)
-{
- return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
-}
+#define mci_to_pci_dev(mci) (((struct ie31200_priv *)(mci)->pvt_info)->pdev)
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
+
+ /*
+ * The PCI ERRSTS register is deprecated. Write the MSR to clear
+ * the ECC error log registers in all memory controllers.
+ */
+ if (cfg->msr_clear_eccerrlog_offset) {
+ if (wrmsr_safe(cfg->msr_clear_eccerrlog_offset,
+ cfg->reg_eccerrlog_ce_mask |
+ cfg->reg_eccerrlog_ce_ovfl_mask |
+ cfg->reg_eccerrlog_ue_mask |
+ cfg->reg_eccerrlog_ue_ovfl_mask, 0) < 0)
+ ie31200_printk(KERN_ERR, "Failed to wrmsr.\n");
+
+ return;
+ }
+
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
- pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+ pci_write_bits16(mci_to_pci_dev(mci), IE31200_ERRSTS,
IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
}
static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
- struct pci_dev *pdev;
+ struct pci_dev *pdev = mci_to_pci_dev(mci);
struct ie31200_priv *priv = mci->pvt_info;
- pdev = to_pci_dev(mci->pdev);
+ /*
+ * The PCI ERRSTS register is deprecated, directly read the
+ * MMIO-mapped ECC error log registers.
+ */
+ if (priv->cfg->msr_clear_eccerrlog_offset) {
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
+ if (nr_channels == 2)
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
+
+ ie31200_clear_error_info(mci);
+ return;
+ }
/*
* This is a mess because there is no atomic way to read all the
@@ -309,46 +310,56 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
static void ie31200_process_error_info(struct mem_ctl_info *mci,
struct ie31200_error_info *info)
{
+ struct ie31200_priv *priv = mci->pvt_info;
+ struct res_config *cfg = priv->cfg;
int channel;
u64 log;
- if (!(info->errsts & IE31200_ERRSTS_BITS))
- return;
+ if (!cfg->msr_clear_eccerrlog_offset) {
+ if (!(info->errsts & IE31200_ERRSTS_BITS))
+ return;
- if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "");
- info->errsts = info->errsts2;
+ if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
+ info->errsts = info->errsts2;
+ }
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
- if (log & IE31200_ECCERRLOG_UE) {
+ if (log & cfg->reg_eccerrlog_ue_mask) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
- 0, 0, 0,
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0, 0,
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 UE", "");
- } else if (log & IE31200_ECCERRLOG_CE) {
+ } else if (log & cfg->reg_eccerrlog_ce_mask) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
- 0, 0,
- IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(log),
+ info->erraddr >> PAGE_SHIFT, 0,
+ field_get(cfg->reg_eccerrlog_syndrome_mask, log),
+ field_get(cfg->reg_eccerrlog_rank_mask, log),
channel, -1,
"ie31200 CE", "");
}
}
}
-static void ie31200_check(struct mem_ctl_info *mci)
+static void __ie31200_check(struct mem_ctl_info *mci, struct mce *mce)
{
struct ie31200_error_info info;
+ info.erraddr = mce ? mce->addr : 0;
ie31200_get_and_clear_error_info(mci, &info);
ie31200_process_error_info(mci, &info);
}
-static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+ __ie31200_check(mci, NULL);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
union {
u64 mchbar;
@@ -361,7 +372,8 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
- u.mchbar &= IE31200_MCHBAR_MASK;
+ u.mchbar &= cfg->reg_mchbar_mask;
+ u.mchbar += cfg->reg_mchbar_window_size * mc;
if (u.mchbar != (resource_size_t)u.mchbar) {
ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
@@ -369,7 +381,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return NULL;
}
- window = ioremap(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+ window = ioremap(u.mchbar, cfg->reg_mchbar_window_size);
if (!window)
ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
@@ -377,155 +389,108 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
-static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int dimm,
+ struct res_config *cfg)
{
- dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
- dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
- (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+ dd->size = field_get(cfg->reg_mad_dimm_size_mask[dimm], addr_decode) * cfg->reg_mad_dimm_size_granularity;
+ dd->ranks = field_get(cfg->reg_mad_dimm_rank_mask[dimm], addr_decode) + 1;
+ dd->dtype = field_get(cfg->reg_mad_dimm_width_mask[dimm], addr_decode) + DEV_X8;
}
-static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
- int chan)
+static void ie31200_get_dimm_config(struct mem_ctl_info *mci, void __iomem *window,
+ struct res_config *cfg, int mc)
{
- dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
- dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
- dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
-}
+ struct dimm_data dimm_info;
+ struct dimm_info *dimm;
+ unsigned long nr_pages;
+ u32 addr_decode;
+ int i, j, k;
-static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
- bool skl)
-{
- if (skl)
- __skl_populate_dimm_info(dd, addr_decode, chan);
- else
- __populate_dimm_info(dd, addr_decode, chan);
-}
+ for (i = 0; i < IE31200_CHANNELS; i++) {
+ addr_decode = readl(window + cfg->reg_mad_dimm_offset[i]);
+ edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+ for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+ populate_dimm_info(&dimm_info, addr_decode, j, cfg);
+ edac_dbg(0, "mc: %d, channel: %d, dimm: %d, size: %lld MiB, ranks: %d, DRAM chip type: %d\n",
+ mc, i, j, dimm_info.size >> 20,
+ dimm_info.ranks,
+ dimm_info.dtype);
-static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+ nr_pages = MiB_TO_PAGES(dimm_info.size >> 20);
+ if (nr_pages == 0)
+ continue;
+
+ nr_pages = nr_pages / dimm_info.ranks;
+ for (k = 0; k < dimm_info.ranks; k++) {
+ dimm = edac_get_dimm(mci, (j * dimm_info.ranks) + k, i, 0);
+ dimm->nr_pages = nr_pages;
+ edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+ dimm->grain = 8; /* just a guess */
+ dimm->mtype = cfg->mtype;
+ dimm->dtype = dimm_info.dtype;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
+ }
+ }
+}
+
+static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, int mc)
{
- int i, j, ret;
- struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
- void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode, mad_offset;
-
- /*
- * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit
- * this logic when adding new CPU support.
- */
- bool skl = DEVICE_ID_SKYLAKE_OR_LATER(pdev->device);
-
- edac_dbg(0, "MC:\n");
-
- if (!ecc_capable(pdev)) {
- ie31200_printk(KERN_INFO, "No ECC support\n");
- return -ENODEV;
- }
+ struct mem_ctl_info *mci;
+ void __iomem *window;
+ int ret;
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = IE31200_DIMMS;
+ layers[0].size = IE31200_RANKS_PER_CHANNEL;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ mci = edac_mc_alloc(mc, ARRAY_SIZE(layers), layers,
sizeof(struct ie31200_priv));
if (!mci)
return -ENOMEM;
- window = ie31200_map_mchbar(pdev);
+ window = ie31200_map_mchbar(pdev, cfg, mc);
if (!window) {
ret = -ENODEV;
goto fail_free;
}
edac_dbg(3, "MC: init mci\n");
- mci->pdev = &pdev->dev;
- if (skl)
- mci->mtype_cap = MEM_FLAG_DDR4;
- else
- mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->mtype_cap = BIT(cfg->mtype);
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+ mci->ctl_name = ie31200_devs[mc].ctl_name;
mci->dev_name = pci_name(pdev);
- mci->edac_check = ie31200_check;
+ mci->edac_check = cfg->cmci ? NULL : ie31200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
- if (skl) {
- priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
- priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
- } else {
- priv->c0errlog = window + IE31200_C0ECCERRLOG;
- priv->c1errlog = window + IE31200_C1ECCERRLOG;
- mad_offset = IE31200_MAD_DIMM_0_OFFSET;
- }
-
- /* populate DIMM info */
- for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + mad_offset +
- (i * 4));
- edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
- for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- populate_dimm_info(&dimm_info[i][j], addr_decode, j,
- skl);
- edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
- dimm_info[i][j].size,
- dimm_info[i][j].dual_rank,
- dimm_info[i][j].x16_width);
- }
- }
-
+ priv->c0errlog = window + cfg->reg_eccerrlog_offset[0];
+ priv->c1errlog = window + cfg->reg_eccerrlog_offset[1];
+ priv->cfg = cfg;
+ priv->mci = mci;
+ priv->pdev = pdev;
+ device_initialize(&priv->dev);
/*
- * The dram rank boundary (DRB) reg values are boundary addresses
- * for each DRAM rank with a granularity of 64MB. DRB regs are
- * cumulative; the last one will contain the total memory
- * contained in all ranks.
+ * The EDAC core uses mci->pdev (pointer to the structure device)
+ * as the memory controller ID. The SoCs attach one or more memory
+ * controllers to a single pci_dev (a single pci_dev->dev can
+ * correspond to multiple memory controllers).
+ *
+ * To make mci->pdev unique, assign pci_dev->dev to mci->pdev
+ * for the first memory controller and assign a unique priv->dev
+ * to mci->pdev for each additional memory controller.
*/
- for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
- for (j = 0; j < IE31200_CHANNELS; j++) {
- struct dimm_info *dimm;
- unsigned long nr_pages;
-
- nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
- if (nr_pages == 0)
- continue;
-
- if (dimm_info[j][i].dual_rank) {
- nr_pages = nr_pages / 2;
- dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* just a guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- dimm = edac_get_dimm(mci, i * 2, j, 0);
- dimm->nr_pages = nr_pages;
- edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
- dimm->grain = 8; /* same guess */
- if (skl)
- dimm->mtype = MEM_DDR4;
- else
- dimm->mtype = MEM_DDR3;
- dimm->dtype = DEV_UNKNOWN;
- dimm->edac_mode = EDAC_UNKNOWN;
- }
- }
+ mci->pdev = mc ? &priv->dev : &pdev->dev;
+ ie31200_get_dimm_config(mci, window, cfg, mc);
ie31200_clear_error_info(mci);
if (edac_mc_add_mc(mci)) {
@@ -534,16 +499,115 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
goto fail_unmap;
}
- /* get this far and it's successful */
- edac_dbg(3, "MC: success\n");
+ ie31200_pvt.priv[mc] = priv;
return 0;
-
fail_unmap:
iounmap(window);
-
fail_free:
edac_mc_free(mci);
+ return ret;
+}
+
+static void mce_check(struct mce *mce)
+{
+ struct ie31200_priv *priv;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+
+ __ie31200_check(priv->mci, mce);
+ }
+}
+
+static int mce_handler(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *mce = (struct mce *)data;
+ char *type;
+
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
+ /*
+ * Ignore unless this is a memory related error.
+ * Don't check MCI_STATUS_ADDRV since it's not set on some CPUs.
+ */
+ if ((mce->status & 0xefff) >> 7 != 1)
+ return NOTIFY_DONE;
+
+ type = mce->mcgstatus & MCG_STATUS_MCIP ? "Exception" : "Event";
+
+ edac_dbg(0, "CPU %d: Machine Check %s: 0x%llx Bank %d: 0x%llx\n",
+ mce->extcpu, type, mce->mcgstatus,
+ mce->bank, mce->status);
+ edac_dbg(0, "TSC 0x%llx\n", mce->tsc);
+ edac_dbg(0, "ADDR 0x%llx\n", mce->addr);
+ edac_dbg(0, "MISC 0x%llx\n", mce->misc);
+ edac_dbg(0, "PROCESSOR %u:0x%x TIME %llu SOCKET %u APIC 0x%x\n",
+ mce->cpuvendor, mce->cpuid, mce->time,
+ mce->socketid, mce->apicid);
+
+ mce_check(mce);
+ mce->kflags |= MCE_HANDLED_EDAC;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ie31200_mce_dec = {
+ .notifier_call = mce_handler,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static void ie31200_unregister_mcis(void)
+{
+ struct ie31200_priv *priv;
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < IE31200_IMC_NUM; i++) {
+ priv = ie31200_pvt.priv[i];
+ if (!priv)
+ continue;
+ mci = priv->mci;
+ edac_mc_del_mc(mci->pdev);
+ iounmap(priv->window);
+ edac_mc_free(mci);
+ }
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, struct res_config *cfg)
+{
+ int i, ret;
+
+ edac_dbg(0, "MC:\n");
+
+ if (!ecc_capable(pdev)) {
+ ie31200_printk(KERN_INFO, "No ECC support\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->imc_num; i++) {
+ ret = ie31200_register_mci(pdev, cfg, i);
+ if (ret)
+ goto fail_register;
+ }
+
+ if (cfg->cmci) {
+ mce_register_decode_chain(&ie31200_mce_dec);
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+
+ /* get this far and it's successful. */
+ edac_dbg(3, "MC: success\n");
+ return 0;
+
+fail_register:
+ ie31200_unregister_mcis();
return ret;
}
@@ -555,7 +619,7 @@ static int ie31200_init_one(struct pci_dev *pdev,
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
- rc = ie31200_probe1(pdev, ent->driver_data);
+ rc = ie31200_probe1(pdev, (struct res_config *)ent->driver_data);
if (rc == 0 && !mci_pdev)
mci_pdev = pci_dev_get(pdev);
@@ -564,43 +628,112 @@ static int ie31200_init_one(struct pci_dev *pdev,
static void ie31200_remove_one(struct pci_dev *pdev)
{
- struct mem_ctl_info *mci;
- struct ie31200_priv *priv;
+ struct ie31200_priv *priv = ie31200_pvt.priv[0];
edac_dbg(0, "\n");
pci_dev_put(mci_pdev);
mci_pdev = NULL;
- mci = edac_mc_del_mc(&pdev->dev);
- if (!mci)
- return;
- priv = mci->pvt_info;
- iounmap(priv->window);
- edac_mc_free(mci);
+ if (priv->cfg->cmci)
+ mce_unregister_decode_chain(&ie31200_mce_dec);
+ ie31200_unregister_mcis();
}
+static struct res_config snb_cfg = {
+ .mtype = MEM_DDR3,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x40c8,
+ .reg_eccerrlog_offset[1] = 0x44c8,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(28),
+ .reg_mad_dimm_offset[0] = 0x5004,
+ .reg_mad_dimm_offset[1] = 0x5008,
+ .reg_mad_dimm_size_mask[0] = GENMASK(7, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(15, 8),
+ .reg_mad_dimm_rank_mask[0] = BIT(17),
+ .reg_mad_dimm_rank_mask[1] = BIT(18),
+ .reg_mad_dimm_width_mask[0] = BIT(19),
+ .reg_mad_dimm_width_mask[1] = BIT(20),
+};
+
+static struct res_config skl_cfg = {
+ .mtype = MEM_DDR4,
+ .imc_num = 1,
+ .reg_mchbar_mask = GENMASK_ULL(38, 15),
+ .reg_mchbar_window_size = BIT_ULL(15),
+ .reg_eccerrlog_offset[0] = 0x4048,
+ .reg_eccerrlog_offset[1] = 0x4448,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ue_mask = BIT_ULL(1),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .reg_mad_dimm_size_granularity = BIT_ULL(30),
+ .reg_mad_dimm_offset[0] = 0x500c,
+ .reg_mad_dimm_offset[1] = 0x5010,
+ .reg_mad_dimm_size_mask[0] = GENMASK(5, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(21, 16),
+ .reg_mad_dimm_rank_mask[0] = BIT(10),
+ .reg_mad_dimm_rank_mask[1] = BIT(26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(9, 8),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
+struct res_config rpl_s_cfg = {
+ .mtype = MEM_DDR5,
+ .cmci = true,
+ .imc_num = 2,
+ .reg_mchbar_mask = GENMASK_ULL(41, 17),
+ .reg_mchbar_window_size = BIT_ULL(16),
+ .reg_eccerrlog_offset[0] = 0xe048,
+ .reg_eccerrlog_offset[1] = 0xe848,
+ .reg_eccerrlog_ce_mask = BIT_ULL(0),
+ .reg_eccerrlog_ce_ovfl_mask = BIT_ULL(1),
+ .reg_eccerrlog_ue_mask = BIT_ULL(2),
+ .reg_eccerrlog_ue_ovfl_mask = BIT_ULL(3),
+ .reg_eccerrlog_rank_mask = GENMASK_ULL(28, 27),
+ .reg_eccerrlog_syndrome_mask = GENMASK_ULL(23, 16),
+ .msr_clear_eccerrlog_offset = 0x791,
+ .reg_mad_dimm_offset[0] = 0xd80c,
+ .reg_mad_dimm_offset[1] = 0xd810,
+ .reg_mad_dimm_size_granularity = BIT_ULL(29),
+ .reg_mad_dimm_size_mask[0] = GENMASK(6, 0),
+ .reg_mad_dimm_size_mask[1] = GENMASK(22, 16),
+ .reg_mad_dimm_rank_mask[0] = GENMASK(10, 9),
+ .reg_mad_dimm_rank_mask[1] = GENMASK(27, 26),
+ .reg_mad_dimm_width_mask[0] = GENMASK(8, 7),
+ .reg_mad_dimm_width_mask[1] = GENMASK(25, 24),
+};
+
static const struct pci_device_id ie31200_pci_tbl[] = {
- { PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_11), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_12), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
- { PCI_VEND_DEV(INTEL, IE31200_HB_CFL_10), PCI_ANY_ID, PCI_ANY_ID, 0, 0, IE31200 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_1), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_2), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_3), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_4), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_5), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_6), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_7), (kernel_ulong_t)&snb_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_11), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_12), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_1), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_2), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_3), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_4), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_5), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_6), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_7), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_8), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_9), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_10), (kernel_ulong_t)&skl_cfg },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ 0, } /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
@@ -617,12 +750,10 @@ static int __init ie31200_init(void)
int pci_rc, i;
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
pci_rc = pci_register_driver(&ie31200_driver);
if (pci_rc < 0)
- goto fail0;
+ return pci_rc;
if (!mci_pdev) {
ie31200_registered = 0;
@@ -633,11 +764,13 @@ static int __init ie31200_init(void)
if (mci_pdev)
break;
}
+
if (!mci_pdev) {
edac_dbg(0, "ie31200 pci_get_device fail\n");
pci_rc = -ENODEV;
- goto fail1;
+ goto fail0;
}
+
pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
if (pci_rc < 0) {
edac_dbg(0, "ie31200 init fail\n");
@@ -645,12 +778,12 @@ static int __init ie31200_init(void)
goto fail1;
}
}
- return 0;
+ return 0;
fail1:
- pci_unregister_driver(&ie31200_driver);
-fail0:
pci_dev_put(mci_pdev);
+fail0:
+ pci_unregister_driver(&ie31200_driver);
return pci_rc;
}
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index fdf3a84fe698..5807517ee32d 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -125,7 +125,7 @@
#define MEM_SLICE_HASH_MASK(v) (GET_BITFIELD(v, 6, 19) << 6)
#define MEM_SLICE_HASH_LSB_MASK_BIT(v) GET_BITFIELD(v, 24, 26)
-static struct res_config {
+static const struct res_config {
bool machine_check;
int num_imc;
u32 imc_base;
@@ -472,7 +472,7 @@ static u64 rpl_p_err_addr(u64 ecclog)
return ECC_ERROR_LOG_ADDR45(ecclog);
}
-static struct res_config ehl_cfg = {
+static const struct res_config ehl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xdc00,
@@ -482,7 +482,7 @@ static struct res_config ehl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config icl_cfg = {
+static const struct res_config icl_cfg = {
.num_imc = 1,
.imc_base = 0x5000,
.ibecc_base = 0xd800,
@@ -492,7 +492,7 @@ static struct res_config icl_cfg = {
.err_addr_to_imc_addr = ehl_err_addr_to_imc_addr,
};
-static struct res_config tgl_cfg = {
+static const struct res_config tgl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0x5000,
@@ -506,7 +506,7 @@ static struct res_config tgl_cfg = {
.err_addr_to_imc_addr = tgl_err_addr_to_imc_addr,
};
-static struct res_config adl_cfg = {
+static const struct res_config adl_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -517,7 +517,7 @@ static struct res_config adl_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config adl_n_cfg = {
+static const struct res_config adl_n_cfg = {
.machine_check = true,
.num_imc = 1,
.imc_base = 0xd800,
@@ -528,7 +528,7 @@ static struct res_config adl_n_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config rpl_p_cfg = {
+static const struct res_config rpl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -540,7 +540,7 @@ static struct res_config rpl_p_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_ps_cfg = {
+static const struct res_config mtl_ps_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -551,7 +551,7 @@ static struct res_config mtl_ps_cfg = {
.err_addr_to_imc_addr = adl_err_addr_to_imc_addr,
};
-static struct res_config mtl_p_cfg = {
+static const struct res_config mtl_p_cfg = {
.machine_check = true,
.num_imc = 2,
.imc_base = 0xd800,
@@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc)
{
u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET);
- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) {
- /* Clear CE/UE bits by writing 1s */
- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
- return ecclog;
- }
+ /*
+ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain
+ * the invalid value ~0. This will result in a flood of invalid
+ * error reports in polling mode. Skip it.
+ */
+ if (ecclog == ~0)
+ return 0;
- return 0;
+ /* Neither a CE nor a UE. Skip it.*/
+ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)))
+ return 0;
+
+ /* Clear CE/UE bits by writing 1s */
+ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET);
+
+ return ecclog;
}
static void errsts_clear(struct igen6_imc *imc)
@@ -1374,7 +1383,7 @@ static void unregister_err_handler(void)
unregister_nmi_handler(NMI_SERR, IGEN6_NMI_NAME);
}
-static void opstate_set(struct res_config *cfg, const struct pci_device_id *ent)
+static void opstate_set(const struct res_config *cfg, const struct pci_device_id *ent)
{
/*
* Quirk: Certain SoCs' error reporting interrupts don't work.
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
new file mode 100755
index 000000000000..3b1a845457b0
--- /dev/null
+++ b/drivers/edac/mem_repair.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC memory repair driver is designed to control the memory
+ * devices with memory repair features, such as Post Package Repair (PPR),
+ * memory sparing etc. The common sysfs memory repair interface abstracts
+ * the control of various arbitrary memory repair functionalities into a
+ * unified set of functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_mem_repair_attributes {
+ MR_TYPE,
+ MR_PERSIST_MODE,
+ MR_SAFE_IN_USE,
+ MR_HPA,
+ MR_MIN_HPA,
+ MR_MAX_HPA,
+ MR_DPA,
+ MR_MIN_DPA,
+ MR_MAX_DPA,
+ MR_NIBBLE_MASK,
+ MR_BANK_GROUP,
+ MR_BANK,
+ MR_RANK,
+ MR_ROW,
+ MR_COLUMN,
+ MR_CHANNEL,
+ MR_SUB_CHANNEL,
+ MEM_DO_REPAIR,
+ MR_MAX_ATTRS
+};
+
+struct edac_mem_repair_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_mem_repair_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_mem_repair_dev_attr mem_repair_dev_attr[MR_MAX_ATTRS];
+ struct attribute *mem_repair_attrs[MR_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_MR_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_mem_repair_dev_attr, dev_attr)
+
+#define MR_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+MR_ATTR_SHOW(repair_type, get_repair_type, const char *, "%s\n")
+MR_ATTR_SHOW(persist_mode, get_persist_mode, bool, "%u\n")
+MR_ATTR_SHOW(repair_safe_when_in_use, get_repair_safe_when_in_use, bool, "%u\n")
+MR_ATTR_SHOW(hpa, get_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_hpa, get_min_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_hpa, get_max_hpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(dpa, get_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(min_dpa, get_min_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(max_dpa, get_max_dpa, u64, "0x%llx\n")
+MR_ATTR_SHOW(nibble_mask, get_nibble_mask, u32, "0x%x\n")
+MR_ATTR_SHOW(bank_group, get_bank_group, u32, "%u\n")
+MR_ATTR_SHOW(bank, get_bank, u32, "%u\n")
+MR_ATTR_SHOW(rank, get_rank, u32, "%u\n")
+MR_ATTR_SHOW(row, get_row, u32, "0x%x\n")
+MR_ATTR_SHOW(column, get_column, u32, "%u\n")
+MR_ATTR_SHOW(channel, get_channel, u32, "%u\n")
+MR_ATTR_SHOW(sub_channel, get_sub_channel, u32, "%u\n")
+
+#define MR_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = \
+ ctx->mem_repair[inst].mem_repair_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, \
+ data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_ATTR_STORE(persist_mode, set_persist_mode, unsigned long, kstrtoul)
+MR_ATTR_STORE(hpa, set_hpa, u64, kstrtou64)
+MR_ATTR_STORE(dpa, set_dpa, u64, kstrtou64)
+MR_ATTR_STORE(nibble_mask, set_nibble_mask, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank_group, set_bank_group, unsigned long, kstrtoul)
+MR_ATTR_STORE(bank, set_bank, unsigned long, kstrtoul)
+MR_ATTR_STORE(rank, set_rank, unsigned long, kstrtoul)
+MR_ATTR_STORE(row, set_row, unsigned long, kstrtoul)
+MR_ATTR_STORE(column, set_column, unsigned long, kstrtoul)
+MR_ATTR_STORE(channel, set_channel, unsigned long, kstrtoul)
+MR_ATTR_STORE(sub_channel, set_sub_channel, unsigned long, kstrtoul)
+
+#define MR_DO_OP(attrib, cb) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_MR_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops; \
+ unsigned long data; \
+ int ret; \
+ \
+ ret = kstrtoul(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->mem_repair[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+MR_DO_OP(repair, do_repair)
+
+static umode_t mem_repair_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ u8 inst = TO_MR_DEV_ATTR(dev_attr)->instance;
+ const struct edac_mem_repair_ops *ops = ctx->mem_repair[inst].mem_repair_ops;
+
+ switch (attr_id) {
+ case MR_TYPE:
+ if (ops->get_repair_type)
+ return a->mode;
+ break;
+ case MR_PERSIST_MODE:
+ if (ops->get_persist_mode) {
+ if (ops->set_persist_mode)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SAFE_IN_USE:
+ if (ops->get_repair_safe_when_in_use)
+ return a->mode;
+ break;
+ case MR_HPA:
+ if (ops->get_hpa) {
+ if (ops->set_hpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_HPA:
+ if (ops->get_min_hpa)
+ return a->mode;
+ break;
+ case MR_MAX_HPA:
+ if (ops->get_max_hpa)
+ return a->mode;
+ break;
+ case MR_DPA:
+ if (ops->get_dpa) {
+ if (ops->set_dpa)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_MIN_DPA:
+ if (ops->get_min_dpa)
+ return a->mode;
+ break;
+ case MR_MAX_DPA:
+ if (ops->get_max_dpa)
+ return a->mode;
+ break;
+ case MR_NIBBLE_MASK:
+ if (ops->get_nibble_mask) {
+ if (ops->set_nibble_mask)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK_GROUP:
+ if (ops->get_bank_group) {
+ if (ops->set_bank_group)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_BANK:
+ if (ops->get_bank) {
+ if (ops->set_bank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_RANK:
+ if (ops->get_rank) {
+ if (ops->set_rank)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_ROW:
+ if (ops->get_row) {
+ if (ops->set_row)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_COLUMN:
+ if (ops->get_column) {
+ if (ops->set_column)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_CHANNEL:
+ if (ops->get_channel) {
+ if (ops->set_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MR_SUB_CHANNEL:
+ if (ops->get_sub_channel) {
+ if (ops->set_sub_channel)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case MEM_DO_REPAIR:
+ if (ops->do_repair)
+ return a->mode;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define MR_ATTR_RO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_WO(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define MR_ATTR_RW(_name, _instance) \
+ ((struct edac_mem_repair_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int mem_repair_create_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{
+ struct edac_mem_repair_context *ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_mem_repair_dev_attr dev_attr[] = {
+ [MR_TYPE] = MR_ATTR_RO(repair_type, instance),
+ [MR_PERSIST_MODE] = MR_ATTR_RW(persist_mode, instance),
+ [MR_SAFE_IN_USE] = MR_ATTR_RO(repair_safe_when_in_use, instance),
+ [MR_HPA] = MR_ATTR_RW(hpa, instance),
+ [MR_MIN_HPA] = MR_ATTR_RO(min_hpa, instance),
+ [MR_MAX_HPA] = MR_ATTR_RO(max_hpa, instance),
+ [MR_DPA] = MR_ATTR_RW(dpa, instance),
+ [MR_MIN_DPA] = MR_ATTR_RO(min_dpa, instance),
+ [MR_MAX_DPA] = MR_ATTR_RO(max_dpa, instance),
+ [MR_NIBBLE_MASK] = MR_ATTR_RW(nibble_mask, instance),
+ [MR_BANK_GROUP] = MR_ATTR_RW(bank_group, instance),
+ [MR_BANK] = MR_ATTR_RW(bank, instance),
+ [MR_RANK] = MR_ATTR_RW(rank, instance),
+ [MR_ROW] = MR_ATTR_RW(row, instance),
+ [MR_COLUMN] = MR_ATTR_RW(column, instance),
+ [MR_CHANNEL] = MR_ATTR_RW(channel, instance),
+ [MR_SUB_CHANNEL] = MR_ATTR_RW(sub_channel, instance),
+ [MEM_DO_REPAIR] = MR_ATTR_WO(repair, instance)
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < MR_MAX_ATTRS; i++) {
+ memcpy(&ctx->mem_repair_dev_attr[i],
+ &dev_attr[i], sizeof(dev_attr[i]));
+ ctx->mem_repair_attrs[i] =
+ &ctx->mem_repair_dev_attr[i].dev_attr.attr;
+ }
+
+ sprintf(ctx->name, "%s%d", "mem_repair", instance);
+ group = &ctx->group;
+ group->name = ctx->name;
+ group->attrs = ctx->mem_repair_attrs;
+ group->is_visible = mem_repair_attr_visible;
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_mem_repair_get_desc - get EDAC memory repair descriptors
+ * @dev: client device with memory repair feature
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's memory repair instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!dev || !attr_groups)
+ return -EINVAL;
+
+ return mem_repair_create_desc(dev, attr_groups, instance);
+}
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index f93f2f2b1cf2..af14c8a3279f 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -372,7 +372,7 @@ static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
struct b_cr_asym_mem_region1_mchbar *as1,
struct b_cr_asym_2way_mem_region_mchbar *as2way)
{
- const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
+ static const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
int mask = 0;
if (as2way->asym_2way_interleave_enable)
@@ -489,7 +489,7 @@ static int dnv_get_registers(void)
*/
static int get_registers(void)
{
- const int intlv[] = { 10, 11, 12, 12 };
+ static const int intlv[] = { 10, 11, 12, 12 };
if (RD_REG(&tolud, b_cr_tolud_pci) ||
RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
new file mode 100755
index 000000000000..e421d3ebd959
--- /dev/null
+++ b/drivers/edac/scrub.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The generic EDAC scrub driver controls the memory scrubbers in the
+ * system. The common sysfs scrub interface abstracts the control of
+ * various arbitrary scrubbing functionalities into a unified set of
+ * functions.
+ *
+ * Copyright (c) 2024-2025 HiSilicon Limited.
+ */
+
+#include <linux/edac.h>
+
+enum edac_scrub_attributes {
+ SCRUB_ADDRESS,
+ SCRUB_SIZE,
+ SCRUB_ENABLE_BACKGROUND,
+ SCRUB_MIN_CYCLE_DURATION,
+ SCRUB_MAX_CYCLE_DURATION,
+ SCRUB_CUR_CYCLE_DURATION,
+ SCRUB_MAX_ATTRS
+};
+
+struct edac_scrub_dev_attr {
+ struct device_attribute dev_attr;
+ u8 instance;
+};
+
+struct edac_scrub_context {
+ char name[EDAC_FEAT_NAME_LEN];
+ struct edac_scrub_dev_attr scrub_dev_attr[SCRUB_MAX_ATTRS];
+ struct attribute *scrub_attrs[SCRUB_MAX_ATTRS + 1];
+ struct attribute_group group;
+};
+
+#define TO_SCRUB_DEV_ATTR(_dev_attr) \
+ container_of(_dev_attr, struct edac_scrub_dev_attr, dev_attr)
+
+#define EDAC_SCRUB_ATTR_SHOW(attrib, cb, type, format) \
+static ssize_t attrib##_show(struct device *ras_feat_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, &data); \
+ if (ret) \
+ return ret; \
+ \
+ return sysfs_emit(buf, format, data); \
+}
+
+EDAC_SCRUB_ATTR_SHOW(addr, read_addr, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(size, read_size, u64, "0x%llx\n")
+EDAC_SCRUB_ATTR_SHOW(enable_background, get_enabled_bg, bool, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(min_cycle_duration, get_min_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(max_cycle_duration, get_max_cycle, u32, "%u\n")
+EDAC_SCRUB_ATTR_SHOW(current_cycle_duration, get_cycle_duration, u32, "%u\n")
+
+#define EDAC_SCRUB_ATTR_STORE(attrib, cb, type, conv_func) \
+static ssize_t attrib##_store(struct device *ras_feat_dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ u8 inst = TO_SCRUB_DEV_ATTR(attr)->instance; \
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev); \
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops; \
+ type data; \
+ int ret; \
+ \
+ ret = conv_func(buf, 0, &data); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = ops->cb(ras_feat_dev->parent, ctx->scrub[inst].private, data); \
+ if (ret) \
+ return ret; \
+ \
+ return len; \
+}
+
+EDAC_SCRUB_ATTR_STORE(addr, write_addr, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(size, write_size, u64, kstrtou64)
+EDAC_SCRUB_ATTR_STORE(enable_background, set_enabled_bg, unsigned long, kstrtoul)
+EDAC_SCRUB_ATTR_STORE(current_cycle_duration, set_cycle_duration, unsigned long, kstrtoul)
+
+static umode_t scrub_attr_visible(struct kobject *kobj, struct attribute *a, int attr_id)
+{
+ struct device *ras_feat_dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(a, struct device_attribute, attr);
+ u8 inst = TO_SCRUB_DEV_ATTR(dev_attr)->instance;
+ struct edac_dev_feat_ctx *ctx = dev_get_drvdata(ras_feat_dev);
+ const struct edac_scrub_ops *ops = ctx->scrub[inst].scrub_ops;
+
+ switch (attr_id) {
+ case SCRUB_ADDRESS:
+ if (ops->read_addr) {
+ if (ops->write_addr)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_SIZE:
+ if (ops->read_size) {
+ if (ops->write_size)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_ENABLE_BACKGROUND:
+ if (ops->get_enabled_bg) {
+ if (ops->set_enabled_bg)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ case SCRUB_MIN_CYCLE_DURATION:
+ if (ops->get_min_cycle)
+ return a->mode;
+ break;
+ case SCRUB_MAX_CYCLE_DURATION:
+ if (ops->get_max_cycle)
+ return a->mode;
+ break;
+ case SCRUB_CUR_CYCLE_DURATION:
+ if (ops->get_cycle_duration) {
+ if (ops->set_cycle_duration)
+ return a->mode;
+ else
+ return 0444;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define EDAC_SCRUB_ATTR_RO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_WO(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_WO(_name), \
+ .instance = _instance })
+
+#define EDAC_SCRUB_ATTR_RW(_name, _instance) \
+ ((struct edac_scrub_dev_attr) { .dev_attr = __ATTR_RW(_name), \
+ .instance = _instance })
+
+static int scrub_create_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ struct edac_scrub_context *scrub_ctx;
+ struct attribute_group *group;
+ int i;
+ struct edac_scrub_dev_attr dev_attr[] = {
+ [SCRUB_ADDRESS] = EDAC_SCRUB_ATTR_RW(addr, instance),
+ [SCRUB_SIZE] = EDAC_SCRUB_ATTR_RW(size, instance),
+ [SCRUB_ENABLE_BACKGROUND] = EDAC_SCRUB_ATTR_RW(enable_background, instance),
+ [SCRUB_MIN_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(min_cycle_duration, instance),
+ [SCRUB_MAX_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RO(max_cycle_duration, instance),
+ [SCRUB_CUR_CYCLE_DURATION] = EDAC_SCRUB_ATTR_RW(current_cycle_duration, instance)
+ };
+
+ scrub_ctx = devm_kzalloc(scrub_dev, sizeof(*scrub_ctx), GFP_KERNEL);
+ if (!scrub_ctx)
+ return -ENOMEM;
+
+ group = &scrub_ctx->group;
+ for (i = 0; i < SCRUB_MAX_ATTRS; i++) {
+ memcpy(&scrub_ctx->scrub_dev_attr[i], &dev_attr[i], sizeof(dev_attr[i]));
+ scrub_ctx->scrub_attrs[i] = &scrub_ctx->scrub_dev_attr[i].dev_attr.attr;
+ }
+ sprintf(scrub_ctx->name, "%s%d", "scrub", instance);
+ group->name = scrub_ctx->name;
+ group->attrs = scrub_ctx->scrub_attrs;
+ group->is_visible = scrub_attr_visible;
+
+ attr_groups[0] = group;
+
+ return 0;
+}
+
+/**
+ * edac_scrub_get_desc - get EDAC scrub descriptors
+ * @scrub_dev: client device, with scrub support
+ * @attr_groups: pointer to attribute group container
+ * @instance: device's scrub instance number.
+ *
+ * Return:
+ * * %0 - Success.
+ * * %-EINVAL - Invalid parameters passed.
+ * * %-ENOMEM - Dynamic memory allocation failed.
+ */
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups, u8 instance)
+{
+ if (!scrub_dev || !attr_groups)
+ return -EINVAL;
+
+ return scrub_create_desc(scrub_dev, attr_groups, instance);
+}
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index f7bd930e058f..fa5b442b1844 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -121,6 +121,35 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
+static void skx_init_mc_mapping(struct skx_dev *d)
+{
+ /*
+ * By default, the BIOS presents all memory controllers within each
+ * socket to the EDAC driver. The physical indices are the same as
+ * the logical indices of the memory controllers enumerated by the
+ * EDAC driver.
+ */
+ for (int i = 0; i < NUM_IMC; i++)
+ d->mc_mapping[i] = i;
+}
+
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
+{
+ edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
+
+ d->mc_mapping[pmc] = lmc;
+}
+EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
+
+static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+{
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, d->mc_mapping[pmc]);
+
+ return d->mc_mapping[pmc];
+}
+
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
struct skx_dev *d;
@@ -188,6 +217,8 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
+ res->imc = skx_get_mc_mapping(d, res->imc);
+
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
continue;
@@ -326,6 +357,8 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
+
+ skx_init_mc_mapping(d);
}
if (list)
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index b0845bdd4516..ca5408803f87 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -93,6 +93,16 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping[NUM_IMC];
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -242,6 +252,7 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 699c7d29d80c..9955396c9a52 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "edac_module.h"
@@ -1407,7 +1408,7 @@ static void xgene_edac_iob_gic_report(struct edac_device_ctl_info *edac_dev)
dev_err(edac_dev->dev, "Multiple XGIC write size error\n");
info = readl(ctx->dev_csr + XGICTRANSERRREQINFO);
dev_err(edac_dev->dev, "XGIC %s access @ 0x%08X (0x%08X)\n",
- info & REQTYPE_MASK ? "read" : "write", ERRADDR_RD(info),
+ str_read_write(info & REQTYPE_MASK), ERRADDR_RD(info),
info);
writel(reg, ctx->dev_csr + XGICTRANSERRINTSTS);
@@ -1489,19 +1490,19 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
if (reg & AGENT_OFFLINE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to offline agent error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & UNIMPL_RBPAGE_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s access to unimplemented page error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & WORD_ALIGNED_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s word aligned access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (reg & PAGE_ACCESS_ERR_MASK)
dev_err(edac_dev->dev,
"IOB bus %s to page out of range access error\n",
- write ? "write" : "read");
+ str_write_read(write));
if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
return;
if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
@@ -1560,7 +1561,7 @@ rb_skip:
err_addr_lo = readl(ctx->dev_csr + IOBBATRANSERRREQINFOL);
err_addr_hi = readl(ctx->dev_csr + IOBBATRANSERRREQINFOH);
dev_err(edac_dev->dev, "IOB BA %s access at 0x%02X.%08X (0x%08X)\n",
- REQTYPE_F2_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_F2_RD(err_addr_hi)),
ERRADDRH_F2_RD(err_addr_hi), err_addr_lo, err_addr_hi);
if (reg & WRERR_RESP_MASK)
dev_err(edac_dev->dev, "IOB BA requestor ID 0x%08X\n",
@@ -1611,7 +1612,7 @@ chk_iob_axi0:
dev_err(edac_dev->dev,
"%sAXI slave 0 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS0TRANSERRINTSTS);
@@ -1625,7 +1626,7 @@ chk_iob_axi1:
dev_err(edac_dev->dev,
"%sAXI slave 1 illegal %s access @ 0x%02X.%08X (0x%08X)\n",
reg & IOBAXIS0_M_ILLEGAL_ACCESS_MASK ? "Multiple " : "",
- REQTYPE_RD(err_addr_hi) ? "read" : "write",
+ str_read_write(REQTYPE_RD(err_addr_hi)),
ERRADDRH_RD(err_addr_hi), err_addr_lo, err_addr_hi);
writel(reg, ctx->dev_csr + IOBAXIS1TRANSERRINTSTS);
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 9f35f69e0f9e..aadc395ee168 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -212,9 +212,20 @@ config SYSFB_SIMPLEFB
If unsure, say Y.
+config TH1520_AON_PROTOCOL
+ tristate "Always-On firmware protocol"
+ depends on ARCH_THEAD || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Power, clock, and resource management capabilities on the TH1520 SoC are
+ managed by the E902 core. Firmware running on this core communicates with
+ the kernel through the Always-On protocol, using hardware mailbox as a medium.
+ Say yes if you need such capabilities.
+
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER
+ default ARCH_K3
help
TI System Control Interface (TISCI) Message Protocol is used to manage
compute systems such as ARM, DSP etc with the system controller in
@@ -267,6 +278,7 @@ source "drivers/firmware/meson/Kconfig"
source "drivers/firmware/microchip/Kconfig"
source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/qcom/Kconfig"
+source "drivers/firmware/samsung/Kconfig"
source "drivers/firmware/smccc/Kconfig"
source "drivers/firmware/tegra/Kconfig"
source "drivers/firmware/xilinx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 7a8d486e718f..4ddec2820c96 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_SYSFB) += sysfb.o
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
+obj-$(CONFIG_TH1520_AON_PROTOCOL) += thead,th1520-aon.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
@@ -33,6 +34,7 @@ obj-y += efi/
obj-y += imx/
obj-y += psci/
obj-y += qcom/
+obj-y += samsung/
obj-y += smccc/
obj-y += tegra/
obj-y += xilinx/
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index dfda5ffc14db..50bfe56c755e 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,7 +15,7 @@
#include "common.h"
-#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
+#define FFA_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb"
static DEFINE_IDA(ffa_bus_id);
@@ -68,7 +68,7 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e
{
const struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ return add_uevent_var(env, "MODALIAS=" FFA_UEVENT_MODALIAS_FMT,
ffa_dev->vm_id, &ffa_dev->uuid);
}
@@ -77,7 +77,7 @@ static ssize_t modalias_show(struct device *dev,
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
- return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
+ return sysfs_emit(buf, FFA_UEVENT_MODALIAS_FMT, ffa_dev->vm_id,
&ffa_dev->uuid);
}
static DEVICE_ATTR_RO(modalias);
@@ -160,11 +160,12 @@ static int __ffa_devices_unregister(struct device *dev, void *data)
return 0;
}
-static void ffa_devices_unregister(void)
+void ffa_devices_unregister(void)
{
bus_for_each_dev(&ffa_bus_type, NULL, NULL,
__ffa_devices_unregister);
}
+EXPORT_SYMBOL_GPL(ffa_devices_unregister);
bool ffa_device_is_valid(struct ffa_device *ffa_dev)
{
@@ -192,7 +193,6 @@ ffa_device_register(const struct ffa_partition_info *part_info,
const struct ffa_ops *ops)
{
int id, ret;
- uuid_t uuid;
struct device *dev;
struct ffa_device *ffa_dev;
@@ -212,14 +212,14 @@ ffa_device_register(const struct ffa_partition_info *part_info,
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
+ dev->dma_mask = &dev->coherent_dma_mask;
dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->id = id;
ffa_dev->vm_id = part_info->id;
ffa_dev->properties = part_info->properties;
ffa_dev->ops = ops;
- import_uuid(&uuid, (u8 *)part_info->uuid);
- uuid_copy(&ffa_dev->uuid, &uuid);
+ uuid_copy(&ffa_dev->uuid, &part_info->uuid);
ret = device_register(&ffa_dev->dev);
if (ret) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2c2ec3c35f15..19295282de24 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -44,7 +44,7 @@
#include "common.h"
-#define FFA_DRIVER_VERSION FFA_VERSION_1_1
+#define FFA_DRIVER_VERSION FFA_VERSION_1_2
#define FFA_MIN_VERSION FFA_VERSION_1_0
#define SENDER_ID_MASK GENMASK(31, 16)
@@ -114,7 +114,6 @@ struct ffa_drv_info {
};
static struct ffa_drv_info *drv_info;
-static void ffa_partitions_cleanup(void);
/*
* The driver must be able to support all the versions from the earliest
@@ -145,11 +144,19 @@ static int ffa_version_check(u32 *version)
.a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
}, &ver);
- if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
+ if ((s32)ver.a0 == FFA_RET_NOT_SUPPORTED) {
pr_info("FFA_VERSION returned not supported\n");
return -EOPNOTSUPP;
}
+ if (FFA_MAJOR_VERSION(ver.a0) > FFA_MAJOR_VERSION(FFA_DRIVER_VERSION)) {
+ pr_err("Incompatible v%d.%d! Latest supported v%d.%d\n",
+ FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
+ FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
+ FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
+ return -EINVAL;
+ }
+
if (ver.a0 < FFA_MIN_VERSION) {
pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
@@ -276,9 +283,21 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
}
if (buffer && count <= num_partitions)
- for (idx = 0; idx < count; idx++)
- memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
- buf_sz);
+ for (idx = 0; idx < count; idx++) {
+ struct ffa_partition_info_le {
+ __le16 id;
+ __le16 exec_ctxt;
+ __le32 properties;
+ uuid_t uuid;
+ } *rx_buf = drv_info->rx_buffer + idx * sz;
+ struct ffa_partition_info *buf = buffer + idx;
+
+ buf->id = le16_to_cpu(rx_buf->id);
+ buf->exec_ctxt = le16_to_cpu(rx_buf->exec_ctxt);
+ buf->properties = le32_to_cpu(rx_buf->properties);
+ if (buf_sz > 8)
+ import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid);
+ }
ffa_rx_release();
@@ -295,14 +314,24 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
#define CURRENT_INDEX(x) ((u16)(FIELD_GET(CURRENT_INDEX_MASK, (x))))
#define UUID_INFO_TAG(x) ((u16)(FIELD_GET(UUID_INFO_TAG_MASK, (x))))
#define PARTITION_INFO_SZ(x) ((u16)(FIELD_GET(PARTITION_INFO_SZ_MASK, (x))))
+#define PART_INFO_ID_MASK GENMASK(15, 0)
+#define PART_INFO_EXEC_CXT_MASK GENMASK(31, 16)
+#define PART_INFO_PROPS_MASK GENMASK(63, 32)
+#define PART_INFO_ID(x) ((u16)(FIELD_GET(PART_INFO_ID_MASK, (x))))
+#define PART_INFO_EXEC_CXT(x) ((u16)(FIELD_GET(PART_INFO_EXEC_CXT_MASK, (x))))
+#define PART_INFO_PROPERTIES(x) ((u32)(FIELD_GET(PART_INFO_PROPS_MASK, (x))))
static int
__ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
struct ffa_partition_info *buffer, int num_parts)
{
u16 buf_sz, start_idx, cur_idx, count = 0, prev_idx = 0, tag = 0;
+ struct ffa_partition_info *buf = buffer;
ffa_value_t partition_info;
do {
+ __le64 *regs;
+ int idx;
+
start_idx = prev_idx ? prev_idx + 1 : 0;
invoke_ffa_fn((ffa_value_t){
@@ -326,8 +355,25 @@ __ffa_partition_info_get_regs(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
if (buf_sz > sizeof(*buffer))
buf_sz = sizeof(*buffer);
- memcpy(buffer + prev_idx * buf_sz, &partition_info.a3,
- (cur_idx - start_idx + 1) * buf_sz);
+ regs = (void *)&partition_info.a3;
+ for (idx = 0; idx < cur_idx - start_idx + 1; idx++, buf++) {
+ union {
+ uuid_t uuid;
+ u64 regs[2];
+ } uuid_regs = {
+ .regs = {
+ le64_to_cpu(*(regs + 1)),
+ le64_to_cpu(*(regs + 2)),
+ }
+ };
+ u64 val = *(u64 *)regs;
+
+ buf->id = PART_INFO_ID(val);
+ buf->exec_ctxt = PART_INFO_EXEC_CXT(val);
+ buf->properties = PART_INFO_PROPERTIES(val);
+ uuid_copy(&buf->uuid, &uuid_regs.uuid);
+ regs += 3;
+ }
prev_idx = cur_idx;
} while (cur_idx < (count - 1));
@@ -445,9 +491,9 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
return -EINVAL;
}
-static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
+static int ffa_msg_send2(struct ffa_device *dev, u16 src_id, void *buf, size_t sz)
{
- u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
+ u32 src_dst_ids = PACK_TARGET_INFO(src_id, dev->vm_id);
struct ffa_indirect_msg_hdr *msg;
ffa_value_t ret;
int retval = 0;
@@ -463,6 +509,7 @@ static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz)
msg->offset = sizeof(*msg);
msg->send_recv_id = src_dst_ids;
msg->size = sz;
+ uuid_copy(&msg->uuid, &dev->uuid);
memcpy((u8 *)msg + msg->offset, buf, sz);
/* flags = 0, sender VMID = 0 works for both physical/virtual NS */
@@ -760,6 +807,13 @@ static int ffa_notification_bitmap_destroy(void)
return 0;
}
+enum notify_type {
+ SECURE_PARTITION,
+ NON_SECURE_VM,
+ SPM_FRAMEWORK,
+ NS_HYP_FRAMEWORK,
+};
+
#define NOTIFICATION_LOW_MASK GENMASK(31, 0)
#define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
#define NOTIFICATION_BITMAP_HIGH(x) \
@@ -783,10 +837,22 @@ static int ffa_notification_bitmap_destroy(void)
#define MAX_IDS_32 10
#define PER_VCPU_NOTIFICATION_FLAG BIT(0)
-#define SECURE_PARTITION_BITMAP BIT(0)
-#define NON_SECURE_VM_BITMAP BIT(1)
-#define SPM_FRAMEWORK_BITMAP BIT(2)
-#define NS_HYP_FRAMEWORK_BITMAP BIT(3)
+#define SECURE_PARTITION_BITMAP_ENABLE BIT(SECURE_PARTITION)
+#define NON_SECURE_VM_BITMAP_ENABLE BIT(NON_SECURE_VM)
+#define SPM_FRAMEWORK_BITMAP_ENABLE BIT(SPM_FRAMEWORK)
+#define NS_HYP_FRAMEWORK_BITMAP_ENABLE BIT(NS_HYP_FRAMEWORK)
+#define FFA_BITMAP_SECURE_ENABLE_MASK \
+ (SECURE_PARTITION_BITMAP_ENABLE | SPM_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_NS_ENABLE_MASK \
+ (NON_SECURE_VM_BITMAP_ENABLE | NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+#define FFA_BITMAP_ALL_ENABLE_MASK \
+ (FFA_BITMAP_SECURE_ENABLE_MASK | FFA_BITMAP_NS_ENABLE_MASK)
+
+#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
+
+#define SPM_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_LOW(x)
+#define NS_HYP_FRAMEWORK_BITMAP(x) NOTIFICATION_BITMAP_HIGH(x)
+#define FRAMEWORK_NOTIFY_RX_BUFFER_FULL BIT(0)
static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
u32 flags, bool is_bind)
@@ -852,9 +918,15 @@ static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
else if (ret.a0 != FFA_SUCCESS)
return -EINVAL; /* Something else went wrong. */
- notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
- notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
- notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
+ if (flags & SECURE_PARTITION_BITMAP_ENABLE)
+ notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
+ if (flags & NON_SECURE_VM_BITMAP_ENABLE)
+ notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
+ if (flags & SPM_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = SPM_FRAMEWORK_BITMAP(ret.a6);
+ if (flags & NS_HYP_FRAMEWORK_BITMAP_ENABLE)
+ notify->arch_map = PACK_NOTIFICATION_BITMAP(notify->arch_map,
+ ret.a7);
return 0;
}
@@ -863,27 +935,32 @@ struct ffa_dev_part_info {
ffa_sched_recv_cb callback;
void *cb_data;
rwlock_t rw_lock;
+ struct ffa_device *dev;
+ struct list_head node;
};
static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
ffa_sched_recv_cb callback;
+ struct list_head *phead;
void *cb_data;
- partition = xa_load(&drv_info->partition_info, part_id);
- if (!partition) {
+ phead = xa_load(&drv_info->partition_info, part_id);
+ if (!phead) {
pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
return;
}
- read_lock(&partition->rw_lock);
- callback = partition->callback;
- cb_data = partition->cb_data;
- read_unlock(&partition->rw_lock);
+ list_for_each_entry_safe(partition, tmp, phead, node) {
+ read_lock(&partition->rw_lock);
+ callback = partition->callback;
+ cb_data = partition->cb_data;
+ read_unlock(&partition->rw_lock);
- if (callback)
- callback(vcpu, is_per_vcpu, cb_data);
+ if (callback)
+ callback(vcpu, is_per_vcpu, cb_data);
+ }
}
static void ffa_notification_info_get(void)
@@ -899,7 +976,7 @@ static void ffa_notification_info_get(void)
}, &ret);
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
- if (ret.a2 != FFA_RET_NO_DATA)
+ if ((s32)ret.a2 != FFA_RET_NO_DATA)
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
ret.a0, ret.a2);
return;
@@ -935,7 +1012,7 @@ static void ffa_notification_info_get(void)
}
/* Per vCPU Notification */
- for (idx = 0; idx < ids_count[list]; idx++) {
+ for (idx = 1; idx < ids_count[list]; idx++) {
if (ids_processed >= max_ids - 1)
break;
@@ -1015,17 +1092,17 @@ static int ffa_sync_send_receive(struct ffa_device *dev,
static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz)
{
- return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz);
+ return ffa_msg_send2(dev, drv_info->vm_id, buf, sz);
}
-static int ffa_sync_send_receive2(struct ffa_device *dev, const uuid_t *uuid,
+static int ffa_sync_send_receive2(struct ffa_device *dev,
struct ffa_send_direct_data2 *data)
{
if (!drv_info->msg_direct_req2_supp)
return -EOPNOTSUPP;
return ffa_msg_send_direct_req2(drv_info->vm_id, dev->vm_id,
- uuid, data);
+ &dev->uuid, data);
}
static int ffa_memory_share(struct ffa_mem_ops_args *args)
@@ -1051,35 +1128,39 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_MEM_LEND, args);
}
-#define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
-
#define ffa_notifications_disabled() (!drv_info->notif_enabled)
-enum notify_type {
- NON_SECURE_VM,
- SECURE_PARTITION,
- FRAMEWORK,
-};
-
struct notifier_cb_info {
struct hlist_node hnode;
+ struct ffa_device *dev;
+ ffa_fwk_notifier_cb fwk_cb;
ffa_notifier_cb cb;
void *cb_data;
- enum notify_type type;
};
-static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
- void *cb_data, bool is_registration)
+static int
+ffa_sched_recv_cb_update(struct ffa_device *dev, ffa_sched_recv_cb callback,
+ void *cb_data, bool is_registration)
{
- struct ffa_dev_part_info *partition;
+ struct ffa_dev_part_info *partition = NULL, *tmp;
+ struct list_head *phead;
bool cb_valid;
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
- partition = xa_load(&drv_info->partition_info, part_id);
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (!phead) {
+ pr_err("%s: Invalid partition ID 0x%x\n", __func__, dev->vm_id);
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(partition, tmp, phead, node)
+ if (partition->dev == dev)
+ break;
+
if (!partition) {
- pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+ pr_err("%s: No such partition ID 0x%x\n", __func__, dev->vm_id);
return -EINVAL;
}
@@ -1101,12 +1182,12 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
static int ffa_sched_recv_cb_register(struct ffa_device *dev,
ffa_sched_recv_cb cb, void *cb_data)
{
- return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
+ return ffa_sched_recv_cb_update(dev, cb, cb_data, true);
}
static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
{
- return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
+ return ffa_sched_recv_cb_update(dev, NULL, NULL, false);
}
static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
@@ -1119,27 +1200,69 @@ static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
return ffa_notification_bind_common(dst_id, bitmap, 0, false);
}
-/* Should be called while the notify_lock is taken */
+static enum notify_type ffa_notify_type_get(u16 vm_id)
+{
+ if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
+ return SECURE_PARTITION;
+ else
+ return NON_SECURE_VM;
+}
+
+/* notifier_hnode_get* should be called with notify_lock held */
static struct notifier_cb_info *
-notifier_hash_node_get(u16 notify_id, enum notify_type type)
+notifier_hnode_get_by_vmid(u16 notify_id, int vmid)
{
struct notifier_cb_info *node;
hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
- if (type == node->type)
+ if (node->fwk_cb && vmid == node->dev->vm_id)
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_vmid_uuid(u16 notify_id, int vmid, const uuid_t *uuid)
+{
+ struct notifier_cb_info *node;
+
+ if (uuid_is_null(uuid))
+ return notifier_hnode_get_by_vmid(notify_id, vmid);
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->fwk_cb && vmid == node->dev->vm_id &&
+ uuid_equal(&node->dev->uuid, uuid))
+ return node;
+
+ return NULL;
+}
+
+static struct notifier_cb_info *
+notifier_hnode_get_by_type(u16 notify_id, enum notify_type type)
+{
+ struct notifier_cb_info *node;
+
+ hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
+ if (node->cb && type == ffa_notify_type_get(node->dev->vm_id))
return node;
return NULL;
}
static int
-update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
- void *cb_data, bool is_registration)
+update_notifier_cb(struct ffa_device *dev, int notify_id, void *cb,
+ void *cb_data, bool is_registration, bool is_framework)
{
struct notifier_cb_info *cb_info = NULL;
+ enum notify_type type = ffa_notify_type_get(dev->vm_id);
bool cb_found;
- cb_info = notifier_hash_node_get(notify_id, type);
+ if (is_framework)
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, dev->vm_id,
+ &dev->uuid);
+ else
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
+
cb_found = !!cb_info;
if (!(is_registration ^ cb_found))
@@ -1150,9 +1273,12 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
if (!cb_info)
return -ENOMEM;
- cb_info->type = type;
- cb_info->cb = cb;
+ cb_info->dev = dev;
cb_info->cb_data = cb_data;
+ if (is_framework)
+ cb_info->fwk_cb = cb;
+ else
+ cb_info->cb = cb;
hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
} else {
@@ -1162,18 +1288,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
return 0;
}
-static enum notify_type ffa_notify_type_get(u16 vm_id)
-{
- if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
- return SECURE_PARTITION;
- else
- return NON_SECURE_VM;
-}
-
-static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+static int __ffa_notify_relinquish(struct ffa_device *dev, int notify_id,
+ bool is_framework)
{
int rc;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1183,26 +1301,38 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
mutex_lock(&drv_info->notify_lock);
- rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
+ rc = update_notifier_cb(dev, notify_id, NULL, NULL, false,
+ is_framework);
if (rc) {
pr_err("Could not unregister notification callback\n");
mutex_unlock(&drv_info->notify_lock);
return rc;
}
- rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
mutex_unlock(&drv_info->notify_lock);
return rc;
}
-static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
- ffa_notifier_cb cb, void *cb_data, int notify_id)
+static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, false);
+}
+
+static int ffa_fwk_notify_relinquish(struct ffa_device *dev, int notify_id)
+{
+ return __ffa_notify_relinquish(dev, notify_id, true);
+}
+
+static int __ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ void *cb, void *cb_data,
+ int notify_id, bool is_framework)
{
int rc;
u32 flags = 0;
- enum notify_type type = ffa_notify_type_get(dev->vm_id);
if (ffa_notifications_disabled())
return -EOPNOTSUPP;
@@ -1212,26 +1342,44 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
mutex_lock(&drv_info->notify_lock);
- if (is_per_vcpu)
- flags = PER_VCPU_NOTIFICATION_FLAG;
+ if (!is_framework) {
+ if (is_per_vcpu)
+ flags = PER_VCPU_NOTIFICATION_FLAG;
- rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
- if (rc) {
- mutex_unlock(&drv_info->notify_lock);
- return rc;
+ rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
+ if (rc) {
+ mutex_unlock(&drv_info->notify_lock);
+ return rc;
+ }
}
- rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
+ rc = update_notifier_cb(dev, notify_id, cb, cb_data, true,
+ is_framework);
if (rc) {
pr_err("Failed to register callback for %d - %d\n",
notify_id, rc);
- ffa_notification_unbind(dev->vm_id, BIT(notify_id));
+ if (!is_framework)
+ ffa_notification_unbind(dev->vm_id, BIT(notify_id));
}
mutex_unlock(&drv_info->notify_lock);
return rc;
}
+static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, is_per_vcpu, cb, cb_data, notify_id,
+ false);
+}
+
+static int
+ffa_fwk_notify_request(struct ffa_device *dev, ffa_fwk_notifier_cb cb,
+ void *cb_data, int notify_id)
+{
+ return __ffa_notify_request(dev, false, cb, cb_data, notify_id, true);
+}
+
static int ffa_notify_send(struct ffa_device *dev, int notify_id,
bool is_per_vcpu, u16 vcpu)
{
@@ -1258,7 +1406,7 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
continue;
mutex_lock(&drv_info->notify_lock);
- cb_info = notifier_hash_node_get(notify_id, type);
+ cb_info = notifier_hnode_get_by_type(notify_id, type);
mutex_unlock(&drv_info->notify_lock);
if (cb_info && cb_info->cb)
@@ -1266,21 +1414,68 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
}
}
-static void notif_get_and_handle(void *unused)
+static void handle_fwk_notif_callbacks(u32 bitmap)
+{
+ void *buf;
+ uuid_t uuid;
+ int notify_id = 0, target;
+ struct ffa_indirect_msg_hdr *msg;
+ struct notifier_cb_info *cb_info = NULL;
+
+ /* Only one framework notification defined and supported for now */
+ if (!(bitmap & FRAMEWORK_NOTIFY_RX_BUFFER_FULL))
+ return;
+
+ mutex_lock(&drv_info->rx_lock);
+
+ msg = drv_info->rx_buffer;
+ buf = kmemdup((void *)msg + msg->offset, msg->size, GFP_KERNEL);
+ if (!buf) {
+ mutex_unlock(&drv_info->rx_lock);
+ return;
+ }
+
+ target = SENDER_ID(msg->send_recv_id);
+ if (msg->offset >= sizeof(*msg))
+ uuid_copy(&uuid, &msg->uuid);
+ else
+ uuid_copy(&uuid, &uuid_null);
+
+ mutex_unlock(&drv_info->rx_lock);
+
+ ffa_rx_release();
+
+ mutex_lock(&drv_info->notify_lock);
+ cb_info = notifier_hnode_get_by_vmid_uuid(notify_id, target, &uuid);
+ mutex_unlock(&drv_info->notify_lock);
+
+ if (cb_info && cb_info->fwk_cb)
+ cb_info->fwk_cb(notify_id, cb_info->cb_data, buf);
+ kfree(buf);
+}
+
+static void notif_get_and_handle(void *cb_data)
{
int rc;
- struct ffa_notify_bitmaps bitmaps;
+ u32 flags;
+ struct ffa_drv_info *info = cb_data;
+ struct ffa_notify_bitmaps bitmaps = { 0 };
+
+ if (info->vm_id == 0) /* Non secure physical instance */
+ flags = FFA_BITMAP_SECURE_ENABLE_MASK;
+ else
+ flags = FFA_BITMAP_ALL_ENABLE_MASK;
- rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
- SPM_FRAMEWORK_BITMAP, &bitmaps);
+ rc = ffa_notification_get(flags, &bitmaps);
if (rc) {
pr_err("Failed to retrieve notifications with %d!\n", rc);
return;
}
+ handle_fwk_notif_callbacks(SPM_FRAMEWORK_BITMAP(bitmaps.arch_map));
+ handle_fwk_notif_callbacks(NS_HYP_FRAMEWORK_BITMAP(bitmaps.arch_map));
handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
- handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
}
static void
@@ -1329,6 +1524,8 @@ static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
.sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
.notify_request = ffa_notify_request,
.notify_relinquish = ffa_notify_relinquish,
+ .fwk_notify_request = ffa_fwk_notify_request,
+ .fwk_notify_relinquish = ffa_fwk_notify_relinquish,
.notify_send = ffa_notify_send,
};
@@ -1384,11 +1581,110 @@ static struct notifier_block ffa_bus_nb = {
.notifier_call = ffa_bus_notifier,
};
+static int ffa_xa_add_partition_info(struct ffa_device *dev)
+{
+ struct ffa_dev_part_info *info;
+ struct list_head *head, *phead;
+ int ret = -ENOMEM;
+
+ phead = xa_load(&drv_info->partition_info, dev->vm_id);
+ if (phead) {
+ head = phead;
+ list_for_each_entry(info, head, node) {
+ if (info->dev == dev) {
+ pr_err("%s: duplicate dev %p part ID 0x%x\n",
+ __func__, dev, dev->vm_id);
+ return -EEXIST;
+ }
+ }
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ret;
+
+ rwlock_init(&info->rw_lock);
+ info->dev = dev;
+
+ if (!phead) {
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
+ if (!phead)
+ goto free_out;
+
+ INIT_LIST_HEAD(phead);
+
+ ret = xa_insert(&drv_info->partition_info, dev->vm_id, phead,
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("%s: failed to save part ID 0x%x Ret:%d\n",
+ __func__, dev->vm_id, ret);
+ goto free_out;
+ }
+ }
+ list_add(&info->node, phead);
+ return 0;
+
+free_out:
+ kfree(phead);
+ kfree(info);
+ return ret;
+}
+
+static int ffa_setup_host_partition(int vm_id)
+{
+ struct ffa_partition_info buf = { 0 };
+ struct ffa_device *ffa_dev;
+ int ret;
+
+ buf.id = vm_id;
+ ffa_dev = ffa_device_register(&buf, &ffa_drv_ops);
+ if (!ffa_dev) {
+ pr_err("%s: failed to register host partition ID 0x%x\n",
+ __func__, vm_id);
+ return -EINVAL;
+ }
+
+ ret = ffa_xa_add_partition_info(ffa_dev);
+ if (ret)
+ return ret;
+
+ if (ffa_notifications_disabled())
+ return 0;
+
+ ret = ffa_sched_recv_cb_update(ffa_dev, ffa_self_notif_handle,
+ drv_info, true);
+ if (ret)
+ pr_info("Failed to register driver sched callback %d\n", ret);
+
+ return ret;
+}
+
+static void ffa_partitions_cleanup(void)
+{
+ struct list_head *phead;
+ unsigned long idx;
+
+ /* Clean up/free all registered devices */
+ ffa_devices_unregister();
+
+ xa_for_each(&drv_info->partition_info, idx, phead) {
+ struct ffa_dev_part_info *info, *tmp;
+
+ xa_erase(&drv_info->partition_info, idx);
+ list_for_each_entry_safe(info, tmp, phead, node) {
+ list_del(&info->node);
+ kfree(info);
+ }
+ kfree(phead);
+ }
+
+ xa_destroy(&drv_info->partition_info);
+}
+
static int ffa_setup_partitions(void)
{
int count, idx, ret;
struct ffa_device *ffa_dev;
- struct ffa_dev_part_info *info;
struct ffa_partition_info *pbuf, *tpbuf;
if (drv_info->version == FFA_VERSION_1_0) {
@@ -1422,59 +1718,30 @@ static int ffa_setup_partitions(void)
!(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
ffa_mode_32bit_set(ffa_dev);
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
+ if (ffa_xa_add_partition_info(ffa_dev)) {
ffa_device_unregister(ffa_dev);
continue;
}
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, tpbuf->id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
- __func__, tpbuf->id, ret);
- ffa_device_unregister(ffa_dev);
- kfree(info);
- }
}
kfree(pbuf);
- /* Allocate for the host */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- /* Already registered devices are freed on bus_exit */
- ffa_partitions_cleanup();
- return -ENOMEM;
- }
+ /*
+ * Check if the host is already added as part of partition info
+ * No multiple UUID possible for the host, so just checking if
+ * there is an entry will suffice
+ */
+ if (xa_load(&drv_info->partition_info, drv_info->vm_id))
+ return 0;
- rwlock_init(&info->rw_lock);
- ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
- info, GFP_KERNEL);
- if (ret) {
- pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
- __func__, drv_info->vm_id, ret);
- kfree(info);
- /* Already registered devices are freed on bus_exit */
+ /* Allocate for the host */
+ ret = ffa_setup_host_partition(drv_info->vm_id);
+ if (ret)
ffa_partitions_cleanup();
- }
return ret;
}
-static void ffa_partitions_cleanup(void)
-{
- struct ffa_dev_part_info *info;
- unsigned long idx;
-
- xa_for_each(&drv_info->partition_info, idx, info) {
- xa_erase(&drv_info->partition_info, idx);
- kfree(info);
- }
-
- xa_destroy(&drv_info->partition_info);
-}
-
/* FFA FEATURE IDs */
#define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
@@ -1777,19 +2044,10 @@ static int __init ffa_init(void)
ffa_notifications_setup();
ret = ffa_setup_partitions();
- if (ret) {
- pr_err("failed to setup partitions\n");
- goto cleanup_notifs;
- }
-
- ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
- drv_info, true);
- if (ret)
- pr_info("Failed to register driver sched callback %d\n", ret);
-
- return 0;
+ if (!ret)
+ return ret;
-cleanup_notifs:
+ pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
free_pages:
if (drv_info->tx_buffer)
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index a3386bf36de5..7af01664ce7e 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -17,6 +17,8 @@
#include "common.h"
+#define SCMI_UEVENT_MODALIAS_FMT "%s:%02x:%s"
+
BLOCKING_NOTIFIER_HEAD(scmi_requested_devices_nh);
EXPORT_SYMBOL_GPL(scmi_requested_devices_nh);
@@ -42,7 +44,7 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
* This helper let an SCMI driver request specific devices identified by the
* @id_table to be created for each active SCMI instance.
*
- * The requested device name MUST NOT be already existent for any protocol;
+ * The requested device name MUST NOT be already existent for this protocol;
* at first the freshly requested @id_table is annotated in the IDR table
* @scmi_requested_devices and then the requested device is advertised to any
* registered party via the @scmi_requested_devices_nh notification chain.
@@ -52,7 +54,6 @@ static atomic_t scmi_syspower_registered = ATOMIC_INIT(0);
static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
{
int ret = 0;
- unsigned int id = 0;
struct list_head *head, *phead = NULL;
struct scmi_requested_dev *rdev;
@@ -67,19 +68,13 @@ static int scmi_protocol_device_request(const struct scmi_device_id *id_table)
}
/*
- * Search for the matching protocol rdev list and then search
- * of any existent equally named device...fails if any duplicate found.
+ * Find the matching protocol rdev list and then search of any
+ * existent equally named device...fails if any duplicate found.
*/
mutex_lock(&scmi_requested_devices_mtx);
- idr_for_each_entry(&scmi_requested_devices, head, id) {
- if (!phead) {
- /* A list found registered in the IDR is never empty */
- rdev = list_first_entry(head, struct scmi_requested_dev,
- node);
- if (rdev->id_table->protocol_id ==
- id_table->protocol_id)
- phead = head;
- }
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
+ if (phead) {
+ head = phead;
list_for_each_entry(rdev, head, node) {
if (!strcmp(rdev->id_table->name, id_table->name)) {
pr_err("Ignoring duplicate request [%d] %s\n",
@@ -283,11 +278,59 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
+static int scmi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT,
+ dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
+ scmi_dev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "0x%02x\n", scmi_dev->protocol_id);
+}
+static DEVICE_ATTR_RO(protocol_id);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scmi_device *scmi_dev = to_scmi_dev(dev);
+
+ return sprintf(buf, "%s\n", scmi_dev->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *scmi_device_attributes_attrs[] = {
+ &dev_attr_protocol_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(scmi_device_attributes);
+
const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
.remove = scmi_dev_remove,
+ .uevent = scmi_device_uevent,
+ .dev_groups = scmi_device_attributes_groups,
};
EXPORT_SYMBOL_GPL(scmi_bus_type);
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 60050da54bf2..1c75a4c9c371 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -1997,17 +1997,7 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
else if (db->width == 4)
SCMI_PROTO_FC_RING_DB(32);
else /* db->width == 8 */
-#ifdef CONFIG_64BIT
SCMI_PROTO_FC_RING_DB(64);
-#else
- {
- u64 val = 0;
-
- if (db->mask)
- val = ioread64_hi_lo(db->addr) & db->mask;
- iowrite64_hi_lo(db->set | val, db->addr);
- }
-#endif
}
/**
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
index 5dcf62f19faf..8748874f0552 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_bin_error.c
@@ -534,11 +534,6 @@ static int cs_dsp_bin_err_test_adsp2_16bit_init(struct kunit *test)
return cs_dsp_bin_err_test_common_init(test, dsp, 1);
}
-static struct kunit_case cs_dsp_bin_err_test_cases_halo[] = {
-
- { } /* terminator */
-};
-
static void cs_dsp_bin_err_block_types_desc(const struct cs_dsp_bin_test_param *param,
char *desc)
{
@@ -560,7 +555,7 @@ KUNIT_ARRAY_PARAM(bin_test_block_types,
bin_test_block_types_cases,
cs_dsp_bin_err_block_types_desc);
-static struct kunit_case cs_dsp_bin_err_test_cases_adsp2[] = {
+static struct kunit_case cs_dsp_bin_err_test_cases[] = {
KUNIT_CASE(bin_load_with_unknown_blocks),
KUNIT_CASE(bin_err_wrong_magic),
KUNIT_CASE(bin_err_too_short_for_header),
@@ -578,21 +573,21 @@ static struct kunit_suite cs_dsp_bin_err_test_halo = {
.name = "cs_dsp_bin_err_halo",
.init = cs_dsp_bin_err_test_halo_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_halo,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_32bit = {
.name = "cs_dsp_bin_err_adsp2_32bit",
.init = cs_dsp_bin_err_test_adsp2_32bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
static struct kunit_suite cs_dsp_bin_err_test_adsp2_16bit = {
.name = "cs_dsp_bin_err_adsp2_16bit",
.init = cs_dsp_bin_err_test_adsp2_16bit_init,
.exit = cs_dsp_bin_err_test_exit,
- .test_cases = cs_dsp_bin_err_test_cases_adsp2,
+ .test_cases = cs_dsp_bin_err_test_cases,
};
kunit_test_suites(&cs_dsp_bin_err_test_halo,
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
index cb90964740ea..942ba1af5e7c 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_control_parse.c
@@ -73,6 +73,18 @@ static const struct cs_dsp_mock_coeff_def mock_coeff_template = {
.length_bytes = 4,
};
+static char *cs_dsp_ctl_alloc_test_string(struct kunit *test, char c, size_t len)
+{
+ char *str;
+
+ str = kunit_kmalloc(test, len + 1, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ memset(str, c, len);
+ str[len] = '\0';
+
+ return str;
+}
+
/* Algorithm info block without controls should load */
static void cs_dsp_ctl_parse_no_coeffs(struct kunit *test)
{
@@ -160,12 +172,8 @@ static void cs_dsp_ctl_parse_max_v1_name(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *name;
- name = kunit_kzalloc(test, 256, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
- def.fullname = name;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -252,14 +260,9 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
struct cs_dsp_test_local *local = priv->local;
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
- char *name;
struct firmware *wmfw;
- name = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, name);
- memset(name, 'A', 255);
-
- def.shortname = name;
+ def.shortname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -273,7 +276,7 @@ static void cs_dsp_ctl_parse_max_short_name(struct kunit *test)
ctl = list_first_entry_or_null(&priv->dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
KUNIT_ASSERT_NOT_NULL(test, ctl);
KUNIT_EXPECT_EQ(test, ctl->subname_len, 255);
- KUNIT_EXPECT_MEMEQ(test, ctl->subname, name, ctl->subname_len);
+ KUNIT_EXPECT_MEMEQ(test, ctl->subname, def.shortname, ctl->subname_len);
KUNIT_EXPECT_EQ(test, ctl->flags, def.flags);
KUNIT_EXPECT_EQ(test, ctl->type, def.type);
KUNIT_EXPECT_EQ(test, ctl->len, def.length_bytes);
@@ -323,12 +326,8 @@ static void cs_dsp_ctl_parse_with_max_fullname(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname;
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -392,12 +391,8 @@ static void cs_dsp_ctl_parse_with_max_description(struct kunit *test)
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *description;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
@@ -429,17 +424,9 @@ static void cs_dsp_ctl_parse_with_max_fullname_and_description(struct kunit *tes
struct cs_dsp_mock_coeff_def def = mock_coeff_template;
struct cs_dsp_coeff_ctl *ctl;
struct firmware *wmfw;
- char *fullname, *description;
-
- fullname = kunit_kmalloc(test, 255, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fullname);
- memset(fullname, 'A', 255);
- def.fullname = fullname;
- description = kunit_kmalloc(test, 65535, GFP_KERNEL);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, description);
- memset(description, 'A', 65535);
- def.description = description;
+ def.fullname = cs_dsp_ctl_alloc_test_string(test, 'A', 255);
+ def.description = cs_dsp_ctl_alloc_test_string(test, 'A', 65535);
cs_dsp_mock_wmfw_start_alg_info_block(local->wmfw_builder,
cs_dsp_ctl_parse_test_algs[0].id,
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 5a732018be36..fd80b2f3233a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size,
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
+ /* Avoid address 0x0, as it can be mistaken for NULL */
+ if (alloc_min == 0)
+ alloc_min = align;
+
size = round_up(size, EFI_ALLOC_ALIGN);
/* count the suitable slots in each memory map entry */
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 1dd4362ef9a3..8c28e25ddc8a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev)
return ret;
sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
+ of_node_put(args.np);
num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
for (i = 0; i < num_channel; i++) {
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index 447246bd04be..98a463e9774b 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev);
- auxiliary_set_drvdata(aux_dev, qcuefi);
- status = qcuefi_set_reference(qcuefi);
- if (status)
- return status;
-
- status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
- if (status)
- qcuefi_set_reference(NULL);
-
memset(&pool_config, 0, sizeof(pool_config));
pool_config.initial_size = SZ_4K;
pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
@@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (IS_ERR(qcuefi->mempool))
return PTR_ERR(qcuefi->mempool);
+ auxiliary_set_drvdata(aux_dev, qcuefi);
+ status = qcuefi_set_reference(qcuefi);
+ if (status)
+ return status;
+
+ status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops);
+ if (status)
+ qcuefi_set_reference(NULL);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index f0569bb9411f..fc4d67e4c4a6 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -2301,8 +2301,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
if (IS_ERR(__scm->mempool)) {
- dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
- "Failed to create the SCM memory pool\n");
+ ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
goto err;
}
diff --git a/drivers/firmware/samsung/Kconfig b/drivers/firmware/samsung/Kconfig
new file mode 100644
index 000000000000..16d81aeb1d41
--- /dev/null
+++ b/drivers/firmware/samsung/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EXYNOS_ACPM_PROTOCOL
+ tristate "Exynos Alive Clock and Power Manager (ACPM) Message Protocol"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on MAILBOX
+ help
+ Alive Clock and Power Manager (ACPM) Message Protocol is defined for
+ the purpose of communication between the ACPM firmware and masters
+ (AP, AOC, ...). ACPM firmware operates on the Active Power Management
+ (APM) module that handles overall power activities.
+
+ This protocol driver provides interface for all the client drivers
+ making use of the features offered by the APM.
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
new file mode 100644
index 000000000000..7b4c9f6f34f5
--- /dev/null
+++ b/drivers/firmware/samsung/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
new file mode 100644
index 000000000000..85e90d236da2
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PMIC_CHANNEL GENMASK(15, 12)
+#define ACPM_PMIC_TYPE GENMASK(11, 8)
+#define ACPM_PMIC_REG GENMASK(7, 0)
+
+#define ACPM_PMIC_RETURN GENMASK(31, 24)
+#define ACPM_PMIC_MASK GENMASK(23, 16)
+#define ACPM_PMIC_VALUE GENMASK(15, 8)
+#define ACPM_PMIC_FUNC GENMASK(7, 0)
+
+#define ACPM_PMIC_BULK_SHIFT 8
+#define ACPM_PMIC_BULK_MASK GENMASK(7, 0)
+#define ACPM_PMIC_BULK_MAX_COUNT 8
+
+enum exynos_acpm_pmic_func {
+ ACPM_PMIC_READ,
+ ACPM_PMIC_WRITE,
+ ACPM_PMIC_UPDATE,
+ ACPM_PMIC_BULK_READ,
+ ACPM_PMIC_BULK_WRITE,
+};
+
+static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
+{
+ return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
+}
+
+static inline u32 acpm_pmic_get_bulk(u32 data, unsigned int i)
+{
+ return (data >> (ACPM_PMIC_BULK_SHIFT * i)) & ACPM_PMIC_BULK_MASK;
+}
+
+static void acpm_pmic_set_xfer(struct acpm_xfer *xfer, u32 *cmd,
+ unsigned int acpm_chan_id)
+{
+ xfer->txd = cmd;
+ xfer->rxd = cmd;
+ xfer->txlen = sizeof(cmd);
+ xfer->rxlen = sizeof(cmd);
+ xfer->acpm_chan_id = acpm_chan_id;
+}
+
+static void acpm_pmic_init_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_READ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_read_cmd(cmd, type, reg, chan);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ *buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_READ) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+}
+
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int i, ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_read_cmd(cmd, type, reg, chan, count);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[2], i);
+ else
+ buf[i] = acpm_pmic_get_bulk(xfer.rxd[3], i - 4);
+ }
+
+ return 0;
+}
+
+static void acpm_pmic_init_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_write_cmd(cmd, type, reg, chan, value);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ int i;
+
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_BULK_WRITE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, count);
+
+ for (i = 0; i < count; i++) {
+ if (i < 4)
+ cmd[2] |= acpm_pmic_set_bulk(buf[i], i);
+ else
+ cmd[3] |= acpm_pmic_set_bulk(buf[i], i - 4);
+ }
+}
+
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ if (count > ACPM_PMIC_BULK_MAX_COUNT)
+ return -EINVAL;
+
+ acpm_pmic_init_bulk_write_cmd(cmd, type, reg, chan, count, buf);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
+
+static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ cmd[0] = FIELD_PREP(ACPM_PMIC_TYPE, type) |
+ FIELD_PREP(ACPM_PMIC_REG, reg) |
+ FIELD_PREP(ACPM_PMIC_CHANNEL, chan);
+ cmd[1] = FIELD_PREP(ACPM_PMIC_FUNC, ACPM_PMIC_UPDATE) |
+ FIELD_PREP(ACPM_PMIC_VALUE, value) |
+ FIELD_PREP(ACPM_PMIC_MASK, mask);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask)
+{
+ struct acpm_xfer xfer;
+ u32 cmd[4] = {0};
+ int ret;
+
+ acpm_pmic_init_update_cmd(cmd, type, reg, chan, value, mask);
+ acpm_pmic_set_xfer(&xfer, cmd, acpm_chan_id);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.h b/drivers/firmware/samsung/exynos-acpm-pmic.h
new file mode 100644
index 000000000000..078421888a14
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_PMIC_H__
+#define __EXYNOS_ACPM_PMIC_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_pmic_read_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+int acpm_pmic_bulk_read(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+int acpm_pmic_write_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+int acpm_pmic_bulk_write(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+int acpm_pmic_update_reg(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+#endif /* __EXYNOS_ACPM_PMIC_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
new file mode 100644
index 000000000000..a85b2dbdd9f0
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox/exynos-message.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-pmic.h"
+
+#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
+
+/* The unit of counter is 20 us. 5000 * 20 = 100 ms */
+#define ACPM_POLL_TIMEOUT 5000
+#define ACPM_TX_TIMEOUT_US 500000
+
+#define ACPM_GS101_INITDATA_BASE 0xa000
+
+/**
+ * struct acpm_shmem - shared memory configuration information.
+ * @reserved: unused fields.
+ * @chans: offset to array of struct acpm_chan_shmem.
+ * @reserved1: unused fields.
+ * @num_chans: number of channels.
+ */
+struct acpm_shmem {
+ u32 reserved[2];
+ u32 chans;
+ u32 reserved1[3];
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_chan_shmem - descriptor of a shared memory channel.
+ *
+ * @id: channel ID.
+ * @reserved: unused fields.
+ * @rx_rear: rear pointer of APM RX queue (TX for AP).
+ * @rx_front: front pointer of APM RX queue (TX for AP).
+ * @rx_base: base address of APM RX queue (TX for AP).
+ * @reserved1: unused fields.
+ * @tx_rear: rear pointer of APM TX queue (RX for AP).
+ * @tx_front: front pointer of APM TX queue (RX for AP).
+ * @tx_base: base address of APM TX queue (RX for AP).
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @reserved2: unused fields.
+ * @poll_completion: true when the channel works on polling.
+ */
+struct acpm_chan_shmem {
+ u32 id;
+ u32 reserved[3];
+ u32 rx_rear;
+ u32 rx_front;
+ u32 rx_base;
+ u32 reserved1[3];
+ u32 tx_rear;
+ u32 tx_front;
+ u32 tx_base;
+ u32 qlen;
+ u32 mlen;
+ u32 reserved2[2];
+ u32 poll_completion;
+};
+
+/**
+ * struct acpm_queue - exynos acpm queue.
+ *
+ * @rear: rear address of the queue.
+ * @front: front address of the queue.
+ * @base: base address of the queue.
+ */
+struct acpm_queue {
+ void __iomem *rear;
+ void __iomem *front;
+ void __iomem *base;
+};
+
+/**
+ * struct acpm_rx_data - RX queue data.
+ *
+ * @cmd: pointer to where the data shall be saved.
+ * @n_cmd: number of 32-bit commands.
+ * @response: true if the client expects the RX data.
+ */
+struct acpm_rx_data {
+ u32 *cmd;
+ size_t n_cmd;
+ bool response;
+};
+
+#define ACPM_SEQNUM_MAX 64
+
+/**
+ * struct acpm_chan - driver internal representation of a channel.
+ * @cl: mailbox client.
+ * @chan: mailbox channel.
+ * @acpm: pointer to driver private data.
+ * @tx: TX queue. The enqueue is done by the host.
+ * - front index is written by the host.
+ * - rear index is written by the firmware.
+ *
+ * @rx: RX queue. The enqueue is done by the firmware.
+ * - front index is written by the firmware.
+ * - rear index is written by the host.
+ * @tx_lock: protects TX queue.
+ * @rx_lock: protects RX queue.
+ * @qlen: queue length. Applies to both TX/RX queues.
+ * @mlen: message length. Applies to both TX/RX queues.
+ * @seqnum: sequence number of the last message enqueued on TX queue.
+ * @id: channel ID.
+ * @poll_completion: indicates if the transfer needs to be polled for
+ * completion or interrupt mode is used.
+ * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
+ * @rx_data: internal buffer used to drain the RX queue.
+ */
+struct acpm_chan {
+ struct mbox_client cl;
+ struct mbox_chan *chan;
+ struct acpm_info *acpm;
+ struct acpm_queue tx;
+ struct acpm_queue rx;
+ struct mutex tx_lock;
+ struct mutex rx_lock;
+
+ unsigned int qlen;
+ unsigned int mlen;
+ u8 seqnum;
+ u8 id;
+ bool poll_completion;
+
+ DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
+ struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
+};
+
+/**
+ * struct acpm_info - driver's private data.
+ * @shmem: pointer to the SRAM configuration data.
+ * @sram_base: base address of SRAM.
+ * @chans: pointer to the ACPM channel parameters retrieved from SRAM.
+ * @dev: pointer to the exynos-acpm device.
+ * @handle: instance of acpm_handle to send to clients.
+ * @num_chans: number of channels available for this controller.
+ */
+struct acpm_info {
+ struct acpm_shmem __iomem *shmem;
+ void __iomem *sram_base;
+ struct acpm_chan *chans;
+ struct device *dev;
+ struct acpm_handle handle;
+ u32 num_chans;
+};
+
+/**
+ * struct acpm_match_data - of_device_id data.
+ * @initdata_base: offset in SRAM where the channels configuration resides.
+ */
+struct acpm_match_data {
+ loff_t initdata_base;
+};
+
+#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
+#define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
+
+/**
+ * acpm_get_rx() - get response from RX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer to get response for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
+{
+ u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
+ const void __iomem *base, *addr;
+ struct acpm_rx_data *rx_data;
+ u32 i, val, mlen;
+ bool rx_set = false;
+
+ guard(mutex)(&achan->rx_lock);
+
+ rx_front = readl(achan->rx.front);
+ i = readl(achan->rx.rear);
+
+ /* Bail out if RX is empty. */
+ if (i == rx_front)
+ return 0;
+
+ base = achan->rx.base;
+ mlen = achan->mlen;
+
+ tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ /* Drain RX queue. */
+ do {
+ /* Read RX seqnum. */
+ addr = base + mlen * i;
+ val = readl(addr);
+
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
+ if (!rx_seqnum)
+ return -EIO;
+ /*
+ * mssg seqnum starts with value 1, whereas the driver considers
+ * the first mssg at index 0.
+ */
+ seqnum = rx_seqnum - 1;
+ rx_data = &achan->rx_data[seqnum];
+
+ if (rx_data->response) {
+ if (rx_seqnum == tx_seqnum) {
+ __ioread32_copy(xfer->rxd, addr,
+ xfer->rxlen / 4);
+ rx_set = true;
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ } else {
+ /*
+ * The RX data corresponds to another request.
+ * Save the data to drain the queue, but don't
+ * clear yet the bitmap. It will be cleared
+ * after the response is copied to the request.
+ */
+ __ioread32_copy(rx_data->cmd, addr,
+ xfer->rxlen / 4);
+ }
+ } else {
+ clear_bit(seqnum, achan->bitmap_seqnum);
+ }
+
+ i = (i + 1) % achan->qlen;
+ } while (i != rx_front);
+
+ /* We saved all responses, mark RX empty. */
+ writel(rx_front, achan->rx.rear);
+
+ /*
+ * If the response was not in this iteration of the queue, check if the
+ * RX data was previously saved.
+ */
+ rx_data = &achan->rx_data[tx_seqnum - 1];
+ if (!rx_set && rx_data->response) {
+ rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM,
+ rx_data->cmd[0]);
+
+ if (rx_seqnum == tx_seqnum) {
+ memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
+ clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_dequeue_by_polling() - RX dequeue by polling.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_dequeue_by_polling(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct device *dev = achan->acpm->dev;
+ unsigned int cnt_20us = 0;
+ u32 seqnum;
+ int ret;
+
+ seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
+
+ do {
+ ret = acpm_get_rx(achan, xfer);
+ if (ret)
+ return ret;
+
+ if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
+ return 0;
+
+ /* Determined experimentally. */
+ usleep_range(20, 30);
+ cnt_20us++;
+ } while (cnt_20us < ACPM_POLL_TIMEOUT);
+
+ dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
+ achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
+
+ return -ETIME;
+}
+
+/**
+ * acpm_wait_for_queue_slots() - wait for queue slots.
+ *
+ * @achan: ACPM channel info.
+ * @next_tx_front: next front index of the TX queue.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
+{
+ u32 val, ret;
+
+ /*
+ * Wait for RX front to keep up with TX front. Make sure there's at
+ * least one element between them.
+ */
+ ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret) {
+ dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
+ return ret;
+ }
+
+ ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
+ ACPM_TX_TIMEOUT_US);
+ if (ret)
+ dev_err(achan->acpm->dev, "TX queue is full.\n");
+
+ return ret;
+}
+
+/**
+ * acpm_prepare_xfer() - prepare a transfer before writing the message to the
+ * TX queue.
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being prepared.
+ */
+static void acpm_prepare_xfer(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ struct acpm_rx_data *rx_data;
+ u32 *txd = (u32 *)xfer->txd;
+
+ /* Prevent chan->seqnum from being re-used */
+ do {
+ if (++achan->seqnum == ACPM_SEQNUM_MAX)
+ achan->seqnum = 1;
+ } while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
+
+ txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
+
+ /* Clear data for upcoming responses */
+ rx_data = &achan->rx_data[achan->seqnum - 1];
+ memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
+ if (xfer->rxd)
+ rx_data->response = true;
+
+ /* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
+ set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
+}
+
+/**
+ * acpm_wait_for_message_response - an helper to group all possible ways of
+ * waiting for a synchronous message response.
+ *
+ * @achan: ACPM channel info.
+ * @xfer: reference to the transfer being waited for.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_wait_for_message_response(struct acpm_chan *achan,
+ const struct acpm_xfer *xfer)
+{
+ /* Just polling mode supported for now. */
+ return acpm_dequeue_by_polling(achan, xfer);
+}
+
+/**
+ * acpm_do_xfer() - do one transfer.
+ * @handle: pointer to the acpm handle.
+ * @xfer: transfer to initiate and wait for response.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct exynos_mbox_msg msg;
+ struct acpm_chan *achan;
+ u32 idx, tx_front;
+ int ret;
+
+ if (xfer->acpm_chan_id >= acpm->num_chans)
+ return -EINVAL;
+
+ achan = &acpm->chans[xfer->acpm_chan_id];
+
+ if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
+ return -EINVAL;
+
+ if (!achan->poll_completion) {
+ dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ scoped_guard(mutex, &achan->tx_lock) {
+ tx_front = readl(achan->tx.front);
+ idx = (tx_front + 1) % achan->qlen;
+
+ ret = acpm_wait_for_queue_slots(achan, idx);
+ if (ret)
+ return ret;
+
+ acpm_prepare_xfer(achan, xfer);
+
+ /* Write TX command. */
+ __iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
+ xfer->txd, xfer->txlen / 4);
+
+ /* Advance TX front. */
+ writel(idx, achan->tx.front);
+ }
+
+ msg.chan_id = xfer->acpm_chan_id;
+ msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
+ ret = mbox_send_message(achan->chan, (void *)&msg);
+ if (ret < 0)
+ return ret;
+
+ ret = acpm_wait_for_message_response(achan, xfer);
+
+ /*
+ * NOTE: we might prefer not to need the mailbox ticker to manage the
+ * transfer queueing since the protocol layer queues things by itself.
+ * Unfortunately, we have to kick the mailbox framework after we have
+ * received our message.
+ */
+ mbox_client_txdone(achan->chan, ret);
+
+ return ret;
+}
+
+/**
+ * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
+ * TX/RX queues.
+ * @achan: ACPM channel info.
+ * @chan_shmem: __iomem pointer to a channel described in shared memory.
+ */
+static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
+ struct acpm_chan_shmem __iomem *chan_shmem)
+{
+ void __iomem *base = achan->acpm->sram_base;
+ struct acpm_queue *rx = &achan->rx;
+ struct acpm_queue *tx = &achan->tx;
+
+ achan->mlen = readl(&chan_shmem->mlen);
+ achan->poll_completion = readl(&chan_shmem->poll_completion);
+ achan->id = readl(&chan_shmem->id);
+ achan->qlen = readl(&chan_shmem->qlen);
+
+ tx->base = base + readl(&chan_shmem->rx_base);
+ tx->rear = base + readl(&chan_shmem->rx_rear);
+ tx->front = base + readl(&chan_shmem->rx_front);
+
+ rx->base = base + readl(&chan_shmem->tx_base);
+ rx->rear = base + readl(&chan_shmem->tx_rear);
+ rx->front = base + readl(&chan_shmem->tx_front);
+
+ dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
+ achan->id, achan->poll_completion, achan->mlen, achan->qlen);
+}
+
+/**
+ * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
+ * firmware.
+ * @achan: ACPM channel info.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
+{
+ struct device *dev = achan->acpm->dev;
+ struct acpm_rx_data *rx_data;
+ size_t cmd_size, n_cmd;
+ int i;
+
+ if (achan->mlen == 0)
+ return 0;
+
+ cmd_size = sizeof(*(achan->rx_data[0].cmd));
+ n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
+
+ for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
+ rx_data = &achan->rx_data[i];
+ rx_data->n_cmd = n_cmd;
+ rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
+ if (!rx_data->cmd)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_free_mbox_chans() - free mailbox channels.
+ * @acpm: pointer to driver data.
+ */
+static void acpm_free_mbox_chans(struct acpm_info *acpm)
+{
+ int i;
+
+ for (i = 0; i < acpm->num_chans; i++)
+ if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
+ mbox_free_channel(acpm->chans[i].chan);
+}
+
+/**
+ * acpm_channels_init() - initialize channels based on the configuration data in
+ * the shared memory.
+ * @acpm: pointer to driver data.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int acpm_channels_init(struct acpm_info *acpm)
+{
+ struct acpm_shmem __iomem *shmem = acpm->shmem;
+ struct acpm_chan_shmem __iomem *chans_shmem;
+ struct device *dev = acpm->dev;
+ int i, ret;
+
+ acpm->num_chans = readl(&shmem->num_chans);
+ acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
+ GFP_KERNEL);
+ if (!acpm->chans)
+ return -ENOMEM;
+
+ chans_shmem = acpm->sram_base + readl(&shmem->chans);
+
+ for (i = 0; i < acpm->num_chans; i++) {
+ struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
+ struct acpm_chan *achan = &acpm->chans[i];
+ struct mbox_client *cl = &achan->cl;
+
+ achan->acpm = acpm;
+
+ acpm_chan_shmem_get_params(achan, chan_shmem);
+
+ ret = acpm_achan_alloc_cmds(achan);
+ if (ret)
+ return ret;
+
+ mutex_init(&achan->rx_lock);
+ mutex_init(&achan->tx_lock);
+
+ cl->dev = dev;
+
+ achan->chan = mbox_request_channel(cl, 0);
+ if (IS_ERR(achan->chan)) {
+ acpm_free_mbox_chans(acpm);
+ return PTR_ERR(achan->chan);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * acpm_setup_ops() - setup the operations structures.
+ * @acpm: pointer to the driver data.
+ */
+static void acpm_setup_ops(struct acpm_info *acpm)
+{
+ struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+
+ pmic_ops->read_reg = acpm_pmic_read_reg;
+ pmic_ops->bulk_read = acpm_pmic_bulk_read;
+ pmic_ops->write_reg = acpm_pmic_write_reg;
+ pmic_ops->bulk_write = acpm_pmic_bulk_write;
+ pmic_ops->update_reg = acpm_pmic_update_reg;
+}
+
+static int acpm_probe(struct platform_device *pdev)
+{
+ const struct acpm_match_data *match_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *shmem;
+ struct acpm_info *acpm;
+ resource_size_t size;
+ struct resource res;
+ int ret;
+
+ acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
+ if (!acpm)
+ return -ENOMEM;
+
+ shmem = of_parse_phandle(dev->of_node, "shmem", 0);
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get shared memory.\n");
+
+ size = resource_size(&res);
+ acpm->sram_base = devm_ioremap(dev, res.start, size);
+ if (!acpm->sram_base)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to ioremap shared memory.\n");
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get match data.\n");
+
+ acpm->shmem = acpm->sram_base + match_data->initdata_base;
+ acpm->dev = dev;
+
+ ret = acpm_channels_init(acpm);
+ if (ret)
+ return ret;
+
+ acpm_setup_ops(acpm);
+
+ platform_set_drvdata(pdev, acpm);
+
+ return 0;
+}
+
+/**
+ * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+ * @handle: Handle acquired by acpm_get_by_phandle.
+ */
+static void acpm_handle_put(const struct acpm_handle *handle)
+{
+ struct acpm_info *acpm = handle_to_acpm_info(handle);
+ struct device *dev = acpm->dev;
+
+ module_put(dev->driver->owner);
+ /* Drop reference taken with of_find_device_by_node(). */
+ put_device(dev);
+}
+
+/**
+ * devm_acpm_release() - devres release method.
+ * @dev: pointer to device.
+ * @res: pointer to resource.
+ */
+static void devm_acpm_release(struct device *dev, void *res)
+{
+ acpm_handle_put(*(struct acpm_handle **)res);
+}
+
+/**
+ * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ struct platform_device *pdev;
+ struct device_node *acpm_np;
+ struct device_link *link;
+ struct acpm_info *acpm;
+
+ acpm_np = of_parse_phandle(dev->of_node, property, 0);
+ if (!acpm_np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(acpm_np);
+ if (!pdev) {
+ dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
+ of_node_put(acpm_np);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(acpm_np);
+
+ acpm = platform_get_drvdata(pdev);
+ if (!acpm) {
+ dev_err(dev, "Cannot get drvdata from %s\n",
+ dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!try_module_get(pdev->dev.driver->owner)) {
+ dev_err(dev, "Cannot get module reference.\n");
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!link) {
+ dev_err(&pdev->dev,
+ "Failed to create device link to consumer %s.\n",
+ dev_name(dev));
+ platform_device_put(pdev);
+ module_put(pdev->dev.driver->owner);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &acpm->handle;
+}
+
+/**
+ * devm_acpm_get_by_phandle() - managed get handle using phandle.
+ * @dev: device pointer requesting ACPM handle.
+ * @property: property name containing phandle on ACPM node.
+ *
+ * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+ */
+const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct acpm_handle **ptr, *handle;
+
+ ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ handle = acpm_get_by_phandle(dev, property);
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+
+static const struct acpm_match_data acpm_gs101 = {
+ .initdata_base = ACPM_GS101_INITDATA_BASE,
+};
+
+static const struct of_device_id acpm_match[] = {
+ {
+ .compatible = "google,gs101-acpm-ipc",
+ .data = &acpm_gs101,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, acpm_match);
+
+static struct platform_driver acpm_driver = {
+ .probe = acpm_probe,
+ .driver = {
+ .name = "exynos-acpm-protocol",
+ .of_match_table = acpm_match,
+ },
+};
+module_platform_driver(acpm_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/samsung/exynos-acpm.h b/drivers/firmware/samsung/exynos-acpm.h
new file mode 100644
index 000000000000..2d14cb58f98c
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_H__
+#define __EXYNOS_ACPM_H__
+
+struct acpm_xfer {
+ const u32 *txd;
+ u32 *rxd;
+ size_t txlen;
+ size_t rxlen;
+ unsigned int acpm_chan_id;
+};
+
+struct acpm_handle;
+
+int acpm_do_xfer(const struct acpm_handle *handle,
+ const struct acpm_xfer *xfer);
+
+#endif /* __EXYNOS_ACPM_H__ */
diff --git a/drivers/firmware/smccc/kvm_guest.c b/drivers/firmware/smccc/kvm_guest.c
index f3319be20b36..5767aed25cdc 100644
--- a/drivers/firmware/smccc/kvm_guest.c
+++ b/drivers/firmware/smccc/kvm_guest.c
@@ -6,8 +6,11 @@
#include <linux/bitmap.h>
#include <linux/cache.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/string.h>
+#include <uapi/linux/psci.h>
+
#include <asm/hypervisor.h>
static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { };
@@ -51,3 +54,66 @@ bool kvm_arm_hyp_service_available(u32 func_id)
return test_bit(func_id, __kvm_arm_hyp_services);
}
EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+
+#ifdef CONFIG_ARM64
+void __init kvm_arm_target_impl_cpu_init(void)
+{
+ int i;
+ u32 ver;
+ u64 max_cpus;
+ struct arm_smccc_res res;
+ struct target_impl_cpu *target;
+
+ if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER) ||
+ !kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS))
+ return;
+
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID,
+ 0, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Version info is in lower 32 bits and is in SMMCCC_VERSION format */
+ ver = lower_32_bits(res.a1);
+ if (PSCI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Unsupported target CPU implementation version v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+ return;
+ }
+
+ if (!res.a2) {
+ pr_warn("No target implementation CPUs specified\n");
+ return;
+ }
+
+ max_cpus = res.a2;
+ target = memblock_alloc(sizeof(*target) * max_cpus, __alignof__(*target));
+ if (!target) {
+ pr_warn("Not enough memory for struct target_impl_cpu\n");
+ return;
+ }
+
+ for (i = 0; i < max_cpus; i++) {
+ arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID,
+ i, &res);
+ if (res.a0 != SMCCC_RET_SUCCESS) {
+ pr_warn("Discovering target implementation CPUs failed\n");
+ goto mem_free;
+ }
+ target[i].midr = res.a1;
+ target[i].revidr = res.a2;
+ target[i].aidr = res.a3;
+ };
+
+ if (!cpu_errata_set_target_impl(max_cpus, target)) {
+ pr_warn("Failed to set target implementation CPUs\n");
+ goto mem_free;
+ }
+
+ pr_info("Number of target implementation CPUs is %lld\n", max_cpus);
+ return;
+
+mem_free:
+ memblock_free(target, sizeof(*target) * max_cpus);
+}
+#endif
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 1990263fbba0..c24b3fca1cfe 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -32,6 +32,85 @@
static struct soc_device *soc_dev;
static struct soc_device_attribute *soc_dev_attr;
+#ifdef CONFIG_ARM64
+
+static char __ro_after_init smccc_soc_id_name[136] = "";
+
+static inline void str_fragment_from_reg(char *dst, unsigned long reg)
+{
+ dst[0] = (reg >> 0) & 0xff;
+ dst[1] = (reg >> 8) & 0xff;
+ dst[2] = (reg >> 16) & 0xff;
+ dst[3] = (reg >> 24) & 0xff;
+ dst[4] = (reg >> 32) & 0xff;
+ dst[5] = (reg >> 40) & 0xff;
+ dst[6] = (reg >> 48) & 0xff;
+ dst[7] = (reg >> 56) & 0xff;
+}
+
+static char __init *smccc_soc_name_init(void)
+{
+ struct arm_smccc_1_2_regs args;
+ struct arm_smccc_1_2_regs res;
+ size_t len;
+
+ /*
+ * Issue Number 1.6 of the Arm SMC Calling Convention
+ * specification introduces an optional "name" string
+ * to the ARM_SMCCC_ARCH_SOC_ID function. Fetch it if
+ * available.
+ */
+ args.a0 = ARM_SMCCC_ARCH_SOC_ID;
+ args.a1 = 2; /* SOC_ID name */
+ arm_smccc_1_2_invoke(&args, &res);
+
+ if ((u32)res.a0 == 0) {
+ /*
+ * Copy res.a1..res.a17 to the smccc_soc_id_name string
+ * 8 bytes at a time. As per Issue 1.6 of the Arm SMC
+ * Calling Convention, the string will be NUL terminated
+ * and padded, from the end of the string to the end of the
+ * 136 byte buffer, with NULs.
+ */
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 0, res.a1);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 1, res.a2);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 2, res.a3);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 3, res.a4);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 4, res.a5);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 5, res.a6);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 6, res.a7);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 7, res.a8);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 8, res.a9);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 9, res.a10);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 10, res.a11);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 11, res.a12);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 12, res.a13);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 13, res.a14);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 14, res.a15);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 15, res.a16);
+ str_fragment_from_reg(smccc_soc_id_name + 8 * 16, res.a17);
+
+ len = strnlen(smccc_soc_id_name, sizeof(smccc_soc_id_name));
+ if (len) {
+ if (len == sizeof(smccc_soc_id_name))
+ pr_warn(FW_BUG "Ignoring improperly formatted name\n");
+ else
+ return smccc_soc_id_name;
+ }
+ }
+
+ return NULL;
+}
+
+#else
+
+static char __init *smccc_soc_name_init(void)
+{
+ return NULL;
+}
+
+#endif
+
static int __init smccc_soc_init(void)
{
int soc_id_rev, soc_id_version;
@@ -72,6 +151,7 @@ static int __init smccc_soc_init(void)
soc_dev_attr->soc_id = soc_id_str;
soc_dev_attr->revision = soc_id_rev_str;
soc_dev_attr->family = soc_id_jep106_id_str;
+ soc_dev_attr->machine = smccc_soc_name_init();
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
diff --git a/drivers/firmware/thead,th1520-aon.c b/drivers/firmware/thead,th1520-aon.c
new file mode 100644
index 000000000000..38f812ac9920
--- /dev/null
+++ b/drivers/firmware/thead,th1520-aon.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/device.h>
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/slab.h>
+
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000))
+#define MAX_TX_TIMEOUT 500
+
+struct th1520_aon_chan {
+ struct mbox_chan *ch;
+ struct th1520_aon_rpc_ack_common ack_msg;
+ struct mbox_client cl;
+ struct completion done;
+
+ /* make sure only one RPC is performed at a time */
+ struct mutex transaction_lock;
+};
+
+struct th1520_aon_msg_req_set_resource_power_mode {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u16 resource;
+ u16 mode;
+ u16 reserved[10];
+} __packed __aligned(1);
+
+/*
+ * This type is used to indicate error response for most functions.
+ */
+enum th1520_aon_error_codes {
+ LIGHT_AON_ERR_NONE = 0, /* Success */
+ LIGHT_AON_ERR_VERSION = 1, /* Incompatible API version */
+ LIGHT_AON_ERR_CONFIG = 2, /* Configuration error */
+ LIGHT_AON_ERR_PARM = 3, /* Bad parameter */
+ LIGHT_AON_ERR_NOACCESS = 4, /* Permission error (no access) */
+ LIGHT_AON_ERR_LOCKED = 5, /* Permission error (locked) */
+ LIGHT_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
+ LIGHT_AON_ERR_NOTFOUND = 7, /* Not found */
+ LIGHT_AON_ERR_NOPOWER = 8, /* No power */
+ LIGHT_AON_ERR_IPC = 9, /* Generic IPC error */
+ LIGHT_AON_ERR_BUSY = 10, /* Resource is currently busy/active */
+ LIGHT_AON_ERR_FAIL = 11, /* General I/O failure */
+ LIGHT_AON_ERR_LAST
+};
+
+static int th1520_aon_linux_errmap[LIGHT_AON_ERR_LAST] = {
+ 0, /* LIGHT_AON_ERR_NONE */
+ -EINVAL, /* LIGHT_AON_ERR_VERSION */
+ -EINVAL, /* LIGHT_AON_ERR_CONFIG */
+ -EINVAL, /* LIGHT_AON_ERR_PARM */
+ -EACCES, /* LIGHT_AON_ERR_NOACCESS */
+ -EACCES, /* LIGHT_AON_ERR_LOCKED */
+ -ERANGE, /* LIGHT_AON_ERR_UNAVAILABLE */
+ -EEXIST, /* LIGHT_AON_ERR_NOTFOUND */
+ -EPERM, /* LIGHT_AON_ERR_NOPOWER */
+ -EPIPE, /* LIGHT_AON_ERR_IPC */
+ -EBUSY, /* LIGHT_AON_ERR_BUSY */
+ -EIO, /* LIGHT_AON_ERR_FAIL */
+};
+
+static inline int th1520_aon_to_linux_errno(int errno)
+{
+ if (errno >= LIGHT_AON_ERR_NONE && errno < LIGHT_AON_ERR_LAST)
+ return th1520_aon_linux_errmap[errno];
+
+ return -EIO;
+}
+
+static void th1520_aon_rx_callback(struct mbox_client *c, void *rx_msg)
+{
+ struct th1520_aon_chan *aon_chan =
+ container_of(c, struct th1520_aon_chan, cl);
+ struct th1520_aon_rpc_msg_hdr *hdr =
+ (struct th1520_aon_rpc_msg_hdr *)rx_msg;
+ u8 recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size;
+
+ if (recv_size != sizeof(struct th1520_aon_rpc_ack_common)) {
+ dev_err(c->dev, "Invalid ack size, not completing\n");
+ return;
+ }
+
+ memcpy(&aon_chan->ack_msg, rx_msg, recv_size);
+ complete(&aon_chan->done);
+}
+
+/**
+ * th1520_aon_call_rpc() - Send an RPC request to the TH1520 AON subsystem
+ * @aon_chan: Pointer to the AON channel structure
+ * @msg: Pointer to the message (RPC payload) that will be sent
+ *
+ * This function sends an RPC message to the TH1520 AON subsystem via mailbox.
+ * It takes the provided @msg buffer, formats it with version and service flags,
+ * then blocks until the RPC completes or times out. The completion is signaled
+ * by the `aon_chan->done` completion, which is waited upon for a duration
+ * defined by `MAX_RX_TIMEOUT`.
+ *
+ * Return:
+ * * 0 on success
+ * * -ETIMEDOUT if the RPC call times out
+ * * A negative error code if the mailbox send fails or if AON responds with
+ * a non-zero error code (converted via th1520_aon_to_linux_errno()).
+ */
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg)
+{
+ struct th1520_aon_rpc_msg_hdr *hdr = msg;
+ int ret;
+
+ mutex_lock(&aon_chan->transaction_lock);
+ reinit_completion(&aon_chan->done);
+
+ RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION);
+ RPC_SET_SVC_ID(hdr, hdr->svc);
+ RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA);
+ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK);
+
+ ret = mbox_send_message(aon_chan->ch, msg);
+ if (ret < 0) {
+ dev_err(aon_chan->cl.dev, "RPC send msg failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&aon_chan->done, MAX_RX_TIMEOUT)) {
+ dev_err(aon_chan->cl.dev, "RPC send msg timeout\n");
+ mutex_unlock(&aon_chan->transaction_lock);
+ return -ETIMEDOUT;
+ }
+
+ ret = aon_chan->ack_msg.err_code;
+
+out:
+ mutex_unlock(&aon_chan->transaction_lock);
+
+ return th1520_aon_to_linux_errno(ret);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_call_rpc);
+
+/**
+ * th1520_aon_power_update() - Change power state of a resource via TH1520 AON
+ * @aon_chan: Pointer to the AON channel structure
+ * @rsrc: Resource ID whose power state needs to be updated
+ * @power_on: Boolean indicating whether the resource should be powered on (true)
+ * or powered off (false)
+ *
+ * This function requests the TH1520 AON subsystem to set the power mode of the
+ * given resource (@rsrc) to either on or off. It constructs the message in
+ * `struct th1520_aon_msg_req_set_resource_power_mode` and then invokes
+ * th1520_aon_call_rpc() to make the request. If the AON call fails, an error
+ * message is logged along with the specific return code.
+ *
+ * Return:
+ * * 0 on success
+ * * A negative error code in case of failures (propagated from
+ * th1520_aon_call_rpc()).
+ */
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on)
+{
+ struct th1520_aon_msg_req_set_resource_power_mode msg = {};
+ struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr;
+ int ret;
+
+ hdr->svc = TH1520_AON_RPC_SVC_PM;
+ hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE;
+ hdr->size = TH1520_AON_RPC_MSG_NUM;
+
+ RPC_SET_BE16(&msg.resource, 0, rsrc);
+ RPC_SET_BE16(&msg.resource, 2,
+ (power_on ? TH1520_AON_PM_PW_MODE_ON :
+ TH1520_AON_PM_PW_MODE_OFF));
+
+ ret = th1520_aon_call_rpc(aon_chan, &msg);
+ if (ret)
+ dev_err(aon_chan->cl.dev, "failed to power %s resource %d ret %d\n",
+ power_on ? "up" : "off", rsrc, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_power_update);
+
+/**
+ * th1520_aon_init() - Initialize TH1520 AON firmware protocol interface
+ * @dev: Device pointer for the AON subsystem
+ *
+ * This function initializes the TH1520 AON firmware protocol interface by:
+ * - Allocating and initializing the AON channel structure
+ * - Setting up the mailbox client
+ * - Requesting the AON mailbox channel
+ * - Initializing synchronization primitives
+ *
+ * Return:
+ * * Valid pointer to th1520_aon_chan structure on success
+ * * ERR_PTR(-ENOMEM) if memory allocation fails
+ * * ERR_PTR() with other negative error codes from mailbox operations
+ */
+struct th1520_aon_chan *th1520_aon_init(struct device *dev)
+{
+ struct th1520_aon_chan *aon_chan;
+ struct mbox_client *cl;
+ int ret;
+
+ aon_chan = kzalloc(sizeof(*aon_chan), GFP_KERNEL);
+ if (!aon_chan)
+ return ERR_PTR(-ENOMEM);
+
+ cl = &aon_chan->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = MAX_TX_TIMEOUT;
+ cl->rx_callback = th1520_aon_rx_callback;
+
+ aon_chan->ch = mbox_request_channel_byname(cl, "aon");
+ if (IS_ERR(aon_chan->ch)) {
+ dev_err(dev, "Failed to request aon mbox chan\n");
+ ret = PTR_ERR(aon_chan->ch);
+ kfree(aon_chan);
+ return ERR_PTR(ret);
+ }
+
+ mutex_init(&aon_chan->transaction_lock);
+ init_completion(&aon_chan->done);
+
+ return aon_chan;
+}
+EXPORT_SYMBOL_GPL(th1520_aon_init);
+
+/**
+ * th1520_aon_deinit() - Clean up TH1520 AON firmware protocol interface
+ * @aon_chan: Pointer to the AON channel structure to clean up
+ *
+ * This function cleans up resources allocated by th1520_aon_init():
+ * - Frees the mailbox channel
+ * - Frees the AON channel
+ */
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan)
+{
+ mbox_free_channel(aon_chan->ch);
+ kfree(aon_chan);
+}
+EXPORT_SYMBOL_GPL(th1520_aon_deinit);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 Always-On firmware protocol library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 720fa8b5d8e9..7356e860e65c 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1139,17 +1139,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
int zynqmp_pm_fpga_get_config_status(u32 *value)
{
u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 buf, lower_addr, upper_addr;
int ret;
if (!value)
return -EINVAL;
- lower_addr = lower_32_bits((u64)&buf);
- upper_addr = upper_32_bits((u64)&buf);
-
ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4,
- XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr,
+ XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, 0, 0,
XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG);
*value = ret_payload[1];
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 98b4d1633b25..f2c39bbff83a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -757,6 +757,7 @@ config GPIO_VF610
default y if SOC_VF610
depends on ARCH_MXC || COMPILE_TEST
select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
help
Say yes here to support i.MX or Vybrid vf610 GPIOs.
@@ -1670,7 +1671,7 @@ config GPIO_AMD8111
config GPIO_BT8XX
tristate "BT8XX GPIO abuser"
- depends on VIDEO_BT848=n
+ depends on VIDEO_BT848=n || COMPILE_TEST
help
The BT8xx frame grabber chip has 24 GPIO pins that can be abused
as a cheap PCI GPIO card.
@@ -1791,7 +1792,6 @@ menu "SPI GPIO expanders"
config GPIO_74X164
tristate "74x164 serial-in/parallel-out 8-bits shift register"
- depends on OF_GPIO
help
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
shift registers. This driver can be used to provide access
@@ -1911,6 +1911,7 @@ config GPIO_SIM
tristate "GPIO Simulator Module"
select IRQ_SIM
select CONFIGFS_FS
+ select DEV_SYNC_PROBE
help
This enables the GPIO simulator - a configfs-based GPIO testing
driver.
@@ -1939,6 +1940,7 @@ config GPIO_VIRTUSER
select DEBUG_FS
select CONFIGFS_FS
select IRQ_WORK
+ select DEV_SYNC_PROBE
help
Say Y here to enable the configurable, configfs-based virtual GPIO
consumer testing driver.
@@ -1949,3 +1951,6 @@ config GPIO_VIRTUSER
endmenu
endif
+
+config DEV_SYNC_PROBE
+ tristate
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index af3ba4d81b58..af130882ffee 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
# directly supported by gpio-generic
gpio-generic-$(CONFIG_GPIO_GENERIC) += gpio-mmio.o
+# Utilities for drivers that need synchronous fake device creation
+obj-$(CONFIG_DEV_SYNC_PROBE) += dev-sync-probe.o
+
obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 942d1cd2bd3c..b5f0a7a2e1bf 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -1,6 +1,7 @@
This is a place for planning the ongoing long-term work in the GPIO
subsystem.
+===============================================================================
GPIO descriptors
@@ -48,6 +49,7 @@ Work items:
numberspace accessors from <linux/gpio.h> and eventually delete
<linux/gpio.h> altogether.
+-------------------------------------------------------------------------------
Get rid of <linux/of_gpio.h>
@@ -75,6 +77,7 @@ Work items:
- Delete <linux/of_gpio.h> when all the above is complete and everything
uses <linux/gpio/consumer.h> or <linux/gpio/driver.h> instead.
+-------------------------------------------------------------------------------
Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
@@ -85,15 +88,7 @@ Work items:
to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
-
-Get rid of <linux/gpio.h>
-
-This legacy header is a one stop shop for anything GPIO is closely tied
-to the global GPIO numberspace. The endgame of the above refactorings will
-be the removal of <linux/gpio.h> and from that point only the specialized
-headers under <linux/gpio/*.h> will be used. This requires all the above to
-be completed and is expected to take a long time.
-
+-------------------------------------------------------------------------------
Collect drivers
@@ -108,6 +103,7 @@ At the same time it makes sense to get rid of code duplication in existing or
new coming drivers. For example, gpio-ml-ioh should be incorporated into
gpio-pch.
+-------------------------------------------------------------------------------
Generic MMIO GPIO
@@ -128,6 +124,7 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
+-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -135,6 +132,7 @@ In the very similar way to Generic MMIO GPIO convert the users which can
take advantage of using regmap over direct IO accessors. Note, even in
MMIO case the regmap MMIO with gpio-regmap.c is preferable over gpio-mmio.c.
+-------------------------------------------------------------------------------
GPIOLIB irqchip
@@ -144,53 +142,7 @@ try to cover any generic kind of irqchip cascaded from a GPIO.
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-
-Increase integration with pin control
-
-There are already ways to use pin control as back-end for GPIO and
-it may make sense to bring these subsystems closer. One reason for
-creating pin control as its own subsystem was that we could avoid any
-use of the global GPIO numbers. Once the above is complete, it may
-make sense to simply join the subsystems into one and make pin
-multiplexing, pin configuration, GPIO, etc selectable options in one
-and the same pin control and GPIO subsystem.
-
-
-Debugfs in place of sysfs
-
-The old sysfs code that enables simple uses of GPIOs from the
-command line is still popular despite the existance of the proper
-character device. The reason is that it is simple to use on
-root filesystems where you only have a minimal set of tools such
-as "cat", "echo" etc.
-
-The old sysfs still need to be strongly deprecated and removed
-as it relies on the global GPIO numberspace that assume a strict
-order of global GPIO numbers that do not change between boots
-and is independent of probe order.
-
-To solve this and provide an ABI that people can use for hacks
-and development, implement a debugfs interface to manipulate
-GPIO lines that can do everything that sysfs can do today: one
-directory per gpiochip and one file entry per line:
-
-/sys/kernel/debug/gpiochip/gpiochip0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio0
-/sys/kernel/debug/gpiochip/gpiochip0/gpio1
-/sys/kernel/debug/gpiochip/gpiochip0/gpio2
-/sys/kernel/debug/gpiochip/gpiochip0/gpio3
-...
-/sys/kernel/debug/gpiochip/gpiochip1
-/sys/kernel/debug/gpiochip/gpiochip1/gpio0
-/sys/kernel/debug/gpiochip/gpiochip1/gpio1
-...
-
-The exact files and design of the debugfs interface can be
-discussed but the idea is to provide a low-level access point
-for debugging and hacking and to expose all lines without the
-need of any exporting. Also provide ample ammunition to shoot
-oneself in the foot, because this is debugfs after all.
-
+-------------------------------------------------------------------------------
Moving over to immutable irq_chip structures
@@ -209,3 +161,28 @@ A small number of drivers have been converted (pl061, tegra186, msm,
amd, apple), and can be used as examples of how to proceed with this
conversion. Note that drivers using the generic irqchip framework
cannot be converted yet, but watch this space!
+
+-------------------------------------------------------------------------------
+
+Convert all GPIO chips to using the new, value returning line setters
+
+struct gpio_chip's set() and set_multiple() callbacks are now deprecated. They
+return void and thus do not allow drivers to indicate failure to set the line
+value back to the caller.
+
+We've now added new variants - set_rv() and set_multiple_rv() that return an
+integer. Let's convert all GPIO drivers treewide to use the new callbacks,
+remove the old ones and finally rename the new ones back to the old names.
+
+-------------------------------------------------------------------------------
+
+Extend the sysfs ABI to allow exporting lines by their HW offsets
+
+The need to support the sysfs GPIO class is one of the main obstacles to
+removing the global GPIO numberspace from the kernel. In order to wean users
+off using global numbers from user-space, extend the existing interface with
+new per-gpiochip export/unexport attributes that allow to refer to GPIOs using
+their hardware offsets within the chip.
+
+Encourage users to switch to using them and eventually remove the existing
+global export/unexport attribues.
diff --git a/drivers/gpio/dev-sync-probe.c b/drivers/gpio/dev-sync-probe.c
new file mode 100644
index 000000000000..9ea733b863b2
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Common code for drivers creating fake platform devices.
+ *
+ * Provides synchronous device creation: waits for probe completion and
+ * returns the probe success or error status to the device creator.
+ *
+ * Copyright (C) 2021 Bartosz Golaszewski <brgl@bgdev.pl>
+ * Copyright (C) 2025 Koichiro Den <koichiro.den@canonical.com>
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include "dev-sync-probe.h"
+
+static int dev_sync_probe_notifier_call(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct dev_sync_probe_data *pdata;
+ struct device *dev = data;
+
+ pdata = container_of(nb, struct dev_sync_probe_data, bus_notifier);
+ if (!device_match_name(dev, pdata->name))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER:
+ pdata->driver_bound = true;
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ pdata->driver_bound = false;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ complete(&pdata->probe_completion);
+ return NOTIFY_OK;
+}
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ init_completion(&data->probe_completion);
+ data->bus_notifier.notifier_call = dev_sync_probe_notifier_call;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_init);
+
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo)
+{
+ struct platform_device *pdev;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s.%d", pdevinfo->name, pdevinfo->id);
+ if (!name)
+ return -ENOMEM;
+
+ data->driver_bound = false;
+ data->name = name;
+ reinit_completion(&data->probe_completion);
+ bus_register_notifier(&platform_bus_type, &data->bus_notifier);
+
+ pdev = platform_device_register_full(pdevinfo);
+ if (IS_ERR(pdev)) {
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+ kfree(data->name);
+ return PTR_ERR(pdev);
+ }
+
+ wait_for_completion(&data->probe_completion);
+ bus_unregister_notifier(&platform_bus_type, &data->bus_notifier);
+
+ if (!data->driver_bound) {
+ platform_device_unregister(pdev);
+ kfree(data->name);
+ return -ENXIO;
+ }
+
+ data->pdev = pdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_register);
+
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data)
+{
+ platform_device_unregister(data->pdev);
+ kfree(data->name);
+ data->pdev = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_sync_probe_unregister);
+
+MODULE_AUTHOR("Bartosz Golaszewski <brgl@bgdev.pl>");
+MODULE_AUTHOR("Koichiro Den <koichiro.den@canonical.com>");
+MODULE_DESCRIPTION("Utilities for synchronous fake device creation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/dev-sync-probe.h b/drivers/gpio/dev-sync-probe.h
new file mode 100644
index 000000000000..4b3d52b70519
--- /dev/null
+++ b/drivers/gpio/dev-sync-probe.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef DEV_SYNC_PROBE_H
+#define DEV_SYNC_PROBE_H
+
+#include <linux/completion.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+struct dev_sync_probe_data {
+ struct platform_device *pdev;
+ const char *name;
+
+ /* Synchronize with probe */
+ struct notifier_block bus_notifier;
+ struct completion probe_completion;
+ bool driver_bound;
+};
+
+void dev_sync_probe_init(struct dev_sync_probe_data *data);
+int dev_sync_probe_register(struct dev_sync_probe_data *data,
+ struct platform_device_info *pdevinfo);
+void dev_sync_probe_unregister(struct dev_sync_probe_data *data);
+
+#endif /* DEV_SYNC_PROBE_H */
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index fca6cd2eb1dd..4dd5c2c330bb 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
@@ -29,7 +30,7 @@ struct gen_74x164_chip {
* register at the end of the transfer. So, to have a logical
* numbering, store the bytes in reverse order.
*/
- u8 buffer[];
+ u8 buffer[] __counted_by(registers);
};
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
@@ -43,34 +44,31 @@ static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- int ret;
- mutex_lock(&chip->lock);
- ret = (chip->buffer[bank] >> pin) & 0x1;
- mutex_unlock(&chip->lock);
+ guard(mutex)(&chip->lock);
- return ret;
+ return !!(chip->buffer[bank] & BIT(pin));
}
-static void gen_74x164_set_value(struct gpio_chip *gc,
- unsigned offset, int val)
+static int gen_74x164_set_value(struct gpio_chip *gc,
+ unsigned int offset, int val)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
u8 bank = chip->registers - 1 - offset / 8;
u8 pin = offset % 8;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
if (val)
- chip->buffer[bank] |= (1 << pin);
+ chip->buffer[bank] |= BIT(pin);
else
- chip->buffer[bank] &= ~(1 << pin);
+ chip->buffer[bank] &= ~BIT(pin);
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
-static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gen_74x164_chip *chip = gpiochip_get_data(gc);
unsigned long offset;
@@ -78,7 +76,8 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
size_t bank;
unsigned long bitmask;
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
+
for_each_set_clump8(offset, bankmask, mask, chip->registers * 8) {
bank = chip->registers - 1 - offset / 8;
bitmask = bitmap_get_value8(bits, offset) & bankmask;
@@ -86,8 +85,7 @@ static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
chip->buffer[bank] &= ~bankmask;
chip->buffer[bank] |= bitmask;
}
- __gen_74x164_write_config(chip);
- mutex_unlock(&chip->lock);
+ return __gen_74x164_write_config(chip);
}
static int gen_74x164_direction_output(struct gpio_chip *gc,
@@ -97,8 +95,22 @@ static int gen_74x164_direction_output(struct gpio_chip *gc,
return 0;
}
+static void gen_74x164_deactivate(void *data)
+{
+ struct gen_74x164_chip *chip = data;
+
+ gpiod_set_value_cansleep(chip->gpiod_oe, 0);
+}
+
+static int gen_74x164_activate(struct device *dev, struct gen_74x164_chip *chip)
+{
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+ return devm_add_action_or_reset(dev, gen_74x164_deactivate, chip);
+}
+
static int gen_74x164_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct gen_74x164_chip *chip;
u32 nregs;
int ret;
@@ -112,55 +124,44 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
- if (ret) {
- dev_err(&spi->dev, "Missing 'registers-number' property.\n");
- return -EINVAL;
- }
+ ret = device_property_read_u32(dev, "registers-number", &nregs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Missing 'registers-number' property.\n");
- chip = devm_kzalloc(&spi->dev, sizeof(*chip) + nregs, GFP_KERNEL);
+ chip = devm_kzalloc(dev, struct_size(chip, buffer, nregs), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- chip->gpiod_oe = devm_gpiod_get_optional(&spi->dev, "enable",
- GPIOD_OUT_LOW);
+ chip->registers = nregs;
+
+ chip->gpiod_oe = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- spi_set_drvdata(spi, chip);
-
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set = gen_74x164_set_value;
- chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
+ chip->gpio_chip.set_rv = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
-
- chip->registers = nregs;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
-
chip->gpio_chip.can_sleep = true;
- chip->gpio_chip.parent = &spi->dev;
+ chip->gpio_chip.parent = dev;
chip->gpio_chip.owner = THIS_MODULE;
- ret = devm_mutex_init(&spi->dev, &chip->lock);
+ ret = devm_mutex_init(dev, &chip->lock);
if (ret)
return ret;
ret = __gen_74x164_write_config(chip);
if (ret)
- return dev_err_probe(&spi->dev, ret, "Config write failed\n");
-
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
- return devm_gpiochip_add_data(&spi->dev, &chip->gpio_chip, chip);
-}
+ return dev_err_probe(dev, ret, "Config write failed\n");
-static void gen_74x164_remove(struct spi_device *spi)
-{
- struct gen_74x164_chip *chip = spi_get_drvdata(spi);
+ ret = gen_74x164_activate(dev, chip);
+ if (ret)
+ return ret;
- gpiod_set_value_cansleep(chip->gpiod_oe, 0);
+ return devm_gpiochip_add_data(dev, &chip->gpio_chip, chip);
}
static const struct spi_device_id gen_74x164_spi_ids[] = {
@@ -183,7 +184,6 @@ static struct spi_driver gen_74x164_driver = {
.of_match_table = gen_74x164_dt_ids,
},
.probe = gen_74x164_probe,
- .remove = gen_74x164_remove,
.id_table = gen_74x164_spi_ids,
};
module_spi_driver(gen_74x164_driver);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 6dafab0cf964..dc2b941c3726 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -3,11 +3,13 @@
* Copyright (C) 2011-2012 Avionic Design GmbH
*/
+#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -78,7 +80,7 @@ static int adnp_gpio_get(struct gpio_chip *chip, unsigned offset)
return (value & BIT(pos)) ? 1 : 0;
}
-static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
+static int __adnp_gpio_set(struct adnp *adnp, unsigned int offset, int value)
{
unsigned int reg = offset >> adnp->reg_shift;
unsigned int pos = offset & 7;
@@ -87,23 +89,23 @@ static void __adnp_gpio_set(struct adnp *adnp, unsigned offset, int value)
err = adnp_read(adnp, GPIO_PLR(adnp) + reg, &val);
if (err < 0)
- return;
+ return err;
if (value)
val |= BIT(pos);
else
val &= ~BIT(pos);
- adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
+ return adnp_write(adnp, GPIO_PLR(adnp) + reg, val);
}
-static void adnp_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int adnp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct adnp *adnp = gpiochip_get_data(chip);
- mutex_lock(&adnp->i2c_lock);
- __adnp_gpio_set(adnp, offset, value);
- mutex_unlock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
+
+ return __adnp_gpio_set(adnp, offset, value);
}
static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -114,32 +116,26 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
u8 value;
int err;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
+ return err;
value &= ~BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, value);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &value);
if (err < 0)
- goto out;
-
- if (value & BIT(pos)) {
- err = -EPERM;
- goto out;
- }
+ return err;
- err = 0;
+ if (value & BIT(pos))
+ return -EPERM;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -151,33 +147,28 @@ static int adnp_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int err;
u8 val;
- mutex_lock(&adnp->i2c_lock);
+ guard(mutex)(&adnp->i2c_lock);
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
val |= BIT(pos);
err = adnp_write(adnp, GPIO_DDR(adnp) + reg, val);
if (err < 0)
- goto out;
+ return err;
err = adnp_read(adnp, GPIO_DDR(adnp) + reg, &val);
if (err < 0)
- goto out;
+ return err;
- if (!(val & BIT(pos))) {
- err = -EPERM;
- goto out;
- }
+ if (!(val & BIT(pos)))
+ return -EPERM;
__adnp_gpio_set(adnp, offset, value);
- err = 0;
-out:
- mutex_unlock(&adnp->i2c_lock);
- return err;
+ return 0;
}
static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
@@ -187,27 +178,26 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int err;
for (i = 0; i < num_regs; i++) {
- u8 ddr, plr, ier, isr;
+ u8 ddr = 0, plr = 0, ier = 0, isr = 0;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_DDR(adnp) + i, &ddr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ return;
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0)
- goto unlock;
-
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0)
- goto unlock;
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ return;
- mutex_unlock(&adnp->i2c_lock);
+ }
for (j = 0; j < 8; j++) {
unsigned int bit = (i << adnp->reg_shift) + j;
@@ -232,11 +222,6 @@ static void adnp_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
direction, level, interrupt, pending);
}
}
-
- return;
-
-unlock:
- mutex_unlock(&adnp->i2c_lock);
}
static irqreturn_t adnp_irq(int irq, void *data)
@@ -248,32 +233,24 @@ static irqreturn_t adnp_irq(int irq, void *data)
for (i = 0; i < num_regs; i++) {
unsigned int base = i << adnp->reg_shift, bit;
- u8 changed, level, isr, ier;
+ u8 changed, level = 0, isr = 0, ier = 0;
unsigned long pending;
int err;
- mutex_lock(&adnp->i2c_lock);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_PLR(adnp) + i, &level);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
+ err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
+ if (err < 0)
+ continue;
- err = adnp_read(adnp, GPIO_ISR(adnp) + i, &isr);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
+ err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
+ if (err < 0)
+ continue;
}
- err = adnp_read(adnp, GPIO_IER(adnp) + i, &ier);
- if (err < 0) {
- mutex_unlock(&adnp->i2c_lock);
- continue;
- }
-
- mutex_unlock(&adnp->i2c_lock);
-
/* determine pins that changed levels */
changed = level ^ adnp->irq_level[i];
@@ -365,12 +342,12 @@ static void adnp_irq_bus_unlock(struct irq_data *d)
struct adnp *adnp = gpiochip_get_data(gc);
unsigned int num_regs = 1 << adnp->reg_shift, i;
- mutex_lock(&adnp->i2c_lock);
-
- for (i = 0; i < num_regs; i++)
- adnp_write(adnp, GPIO_IER(adnp) + i, adnp->irq_enable[i]);
+ scoped_guard(mutex, &adnp->i2c_lock) {
+ for (i = 0; i < num_regs; i++)
+ adnp_write(adnp, GPIO_IER(adnp) + i,
+ adnp->irq_enable[i]);
+ }
- mutex_unlock(&adnp->i2c_lock);
mutex_unlock(&adnp->irq_lock);
}
@@ -453,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set = adnp_gpio_set;
+ chip->set_rv = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
@@ -506,7 +483,10 @@ static int adnp_i2c_probe(struct i2c_client *client)
if (!adnp)
return -ENOMEM;
- mutex_init(&adnp->i2c_lock);
+ err = devm_mutex_init(&client->dev, &adnp->i2c_lock);
+ if (err)
+ return err;
+
adnp->client = client;
err = adnp_gpio_setup(adnp, num_gpios, device_property_read_bool(dev, "interrupt-controller"));
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index c55e821c63b6..57d12c10cbda 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -40,16 +40,18 @@ static int adp5520_gpio_get_value(struct gpio_chip *chip, unsigned off)
return !!(reg_val & dev->lut[off]);
}
-static void adp5520_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
+static int adp5520_gpio_set_value(struct gpio_chip *chip,
+ unsigned int off, int val)
{
struct adp5520_gpio *dev;
dev = gpiochip_get_data(chip);
if (val)
- adp5520_set_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_set_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
else
- adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT, dev->lut[off]);
+ return adp5520_clr_bits(dev->master, ADP5520_GPIO_OUT,
+ dev->lut[off]);
}
static int adp5520_gpio_direction_input(struct gpio_chip *chip, unsigned off)
@@ -120,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set = adp5520_gpio_set_value;
+ gc->set_rv = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index 000d31f09671..d5c0f1b267c8 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -86,14 +86,16 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
return !!(val & bit);
}
-static void adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off, int val)
+static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off,
+ int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
unsigned int bank = ADP5585_BANK(off);
unsigned int bit = ADP5585_BIT(off);
- regmap_update_bits(adp5585_gpio->regmap, ADP5585_GPO_DATA_OUT_A + bank,
- bit, val ? bit : 0);
+ return regmap_update_bits(adp5585_gpio->regmap,
+ ADP5585_GPO_DATA_OUT_A + bank,
+ bit, val ? bit : 0);
}
static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
@@ -192,7 +194,7 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set = adp5585_gpio_set_value;
+ gc->set_rv = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 65f41cc3eafc..d232ea865356 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
struct platform_device *pdev;
int res, id;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr)
- return -ENOMEM;
+ if (!aggr) {
+ res = -ENOMEM;
+ goto put_module;
+ }
memcpy(aggr->args, buf, count + 1);
@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
}
aggr->pdev = pdev;
+ module_put(THIS_MODULE);
return count;
remove_table:
@@ -175,6 +181,8 @@ free_table:
kfree(aggr->lookups);
free_ga:
kfree(aggr);
+put_module:
+ module_put(THIS_MODULE);
return res;
}
@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
if (error)
return error;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
- if (!aggr)
+ if (!aggr) {
+ module_put(THIS_MODULE);
return -ENOENT;
+ }
gpio_aggregator_free(aggr);
+ module_put(THIS_MODULE);
return count;
}
static DRIVER_ATTR_WO(delete_device);
@@ -358,25 +372,30 @@ static void gpio_fwd_delay(struct gpio_chip *chip, unsigned int offset, int valu
udelay(delay_us);
}
-static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ int ret;
if (chip->can_sleep)
- gpiod_set_value_cansleep(fwd->descs[offset], value);
+ ret = gpiod_set_value_cansleep(fwd->descs[offset], value);
else
- gpiod_set_value(fwd->descs[offset], value);
+ ret = gpiod_set_value(fwd->descs[offset], value);
+ if (ret)
+ return ret;
if (fwd->delay_timings)
gpio_fwd_delay(chip, offset, value);
+
+ return ret;
}
-static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
- unsigned long *bits)
+static int gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_desc **descs = fwd_tmp_descs(fwd);
unsigned long *values = fwd_tmp_values(fwd);
- unsigned int i, j = 0;
+ unsigned int i, j = 0, ret;
for_each_set_bit(i, mask, fwd->chip.ngpio) {
__assign_bit(j, values, test_bit(i, bits));
@@ -384,26 +403,31 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
}
if (fwd->chip.can_sleep)
- gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ ret = gpiod_set_array_value_cansleep(j, descs, NULL, values);
else
- gpiod_set_array_value(j, descs, NULL, values);
+ ret = gpiod_set_array_value(j, descs, NULL, values);
+
+ return ret;
}
-static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
unsigned long flags;
+ int ret;
if (chip->can_sleep) {
mutex_lock(&fwd->mlock);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
mutex_unlock(&fwd->mlock);
} else {
spin_lock_irqsave(&fwd->slock, flags);
- gpio_fwd_set_multiple(fwd, mask, bits);
+ ret = gpio_fwd_set_multiple(fwd, mask, bits);
spin_unlock_irqrestore(&fwd->slock, flags);
}
+
+ return ret;
}
static int gpio_fwd_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -533,8 +557,8 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set = gpio_fwd_set;
- chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->set_rv = gpio_fwd_set;
+ chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 11edf1fe6c90..77a674cf99e4 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -35,15 +35,15 @@ static int altr_a10sr_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset - ALTR_A10SR_LED_VALID_SHIFT));
}
-static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct altr_a10sr_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
- BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
- value ? BIT(ALTR_A10SR_LED_VALID_SHIFT + offset)
- : 0);
+ return regmap_update_bits(gpio->regmap, ALTR_A10SR_LED_REG,
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset),
+ value ?
+ BIT(ALTR_A10SR_LED_VALID_SHIFT + offset) : 0);
}
static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set = altr_a10sr_gpio_set,
+ .set_rv = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 17ab039c7413..1b28525726d7 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -113,7 +113,7 @@ static int altera_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(readl(altera_gc->regs + ALTERA_GPIO_DATA) & BIT(offset));
}
-static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int altera_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct altera_gpio_chip *altera_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -127,6 +127,8 @@ static void altera_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
data_reg &= ~BIT(offset);
writel(data_reg, altera_gc->regs + ALTERA_GPIO_DATA);
raw_spin_unlock_irqrestore(&altera_gc->gpio_lock, flags);
+
+ return 0;
}
static int altera_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -257,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set = altera_gpio_set;
+ altera_gc->gc.set_rv = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index 2a21354ed6a0..f8d0cea46049 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -95,8 +95,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
-static void amd_fch_gpio_set(struct gpio_chip *gc,
- unsigned int gpio, int value)
+static int amd_fch_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
unsigned long flags;
struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc);
@@ -113,6 +112,8 @@ static void amd_fch_gpio_set(struct gpio_chip *gc,
writel_relaxed(mask, ptr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int amd_fch_gpio_get(struct gpio_chip *gc,
@@ -164,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set = amd_fch_gpio_set;
+ priv->gc.set_rv = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 3377667a28de..425d8472f744 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -94,7 +94,7 @@ static void amd_gpio_free(struct gpio_chip *chip, unsigned offset)
iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset));
}
-static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int amd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct amd_gpio *agp = gpiochip_get_data(chip);
u8 temp;
@@ -107,6 +107,8 @@ static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&agp->lock, flags);
dev_dbg(&agp->pdev->dev, "Setting gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
+
+ return 0;
}
static int amd_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -163,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set = amd_gpio_set,
+ .set_rv = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index c15fda99120a..e530c94dcce8 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -121,7 +121,8 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ARIZONA_GPN_DIR | ARIZONA_GPN_LVL, value);
}
-static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int arizona_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
@@ -129,8 +130,8 @@ static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = ARIZONA_GPN_LVL;
- regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
- ARIZONA_GPN_LVL, value);
+ return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_LVL, value);
}
static const struct gpio_chip template_chip = {
@@ -139,7 +140,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set = arizona_gpio_set,
+ .set_rv = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 34eb26298e32..00b31497ecff 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
#include <linux/hashtable.h>
@@ -170,17 +171,14 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
const struct aspeed_sgpio_bank *bank = to_bank(offset);
- unsigned long flags;
enum aspeed_sgpio_reg reg;
int rc = 0;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -211,16 +209,13 @@ static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
return 0;
}
-static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
- sgpio_set_value(gc, offset, val);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ return sgpio_set_value(gc, offset, val);
}
static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
@@ -231,15 +226,14 @@ static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
int rc;
/* No special action is required for setting the direction; we'll
* error-out in sgpio_set_value if this isn't an output GPIO */
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
rc = sgpio_set_value(gc, offset, val);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
@@ -269,7 +263,6 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *status_addr;
int offset;
u32 bit;
@@ -278,18 +271,15 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
status_addr = bank_reg(gpio, bank, reg_irq_status);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
iowrite32(bit, status_addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
{
const struct aspeed_sgpio_bank *bank;
struct aspeed_sgpio *gpio;
- unsigned long flags;
u32 reg, bit;
void __iomem *addr;
int offset;
@@ -301,17 +291,15 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- reg = ioread32(addr);
- if (set)
- reg |= bit;
- else
- reg &= ~bit;
-
- iowrite32(reg, addr);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ reg = ioread32(addr);
+ if (set)
+ reg |= bit;
+ else
+ reg &= ~bit;
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ iowrite32(reg, addr);
+ }
/* Masking the IRQ */
if (!set)
@@ -339,7 +327,6 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
const struct aspeed_sgpio_bank *bank;
irq_flow_handler_t handler;
struct aspeed_sgpio *gpio;
- unsigned long flags;
void __iomem *addr;
int offset;
@@ -366,24 +353,22 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
- addr = bank_reg(gpio, bank, reg_irq_type0);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type0;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type1);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type1;
- iowrite32(reg, addr);
-
- addr = bank_reg(gpio, bank, reg_irq_type2);
- reg = ioread32(addr);
- reg = (reg & ~bit) | type2;
- iowrite32(reg, addr);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ addr = bank_reg(gpio, bank, reg_irq_type0);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type0;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type1);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type1;
+ iowrite32(reg, addr);
+
+ addr = bank_reg(gpio, bank, reg_irq_type2);
+ reg = ioread32(addr);
+ reg = (reg & ~bit) | type2;
+ iowrite32(reg, addr);
+ }
irq_set_handler_locked(d, handler);
@@ -487,13 +472,12 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
void __iomem *reg;
u32 val;
reg = bank_reg(gpio, to_bank(offset), reg_tolerance);
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
val = readl(reg);
@@ -504,8 +488,6 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
writel(val, reg);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
@@ -614,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set = aspeed_sgpio_set;
+ gpio->chip.set_rv = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 40c1bd80f8b0..2d340a343a17 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
#include <linux/gpio/driver.h>
@@ -423,41 +424,38 @@ static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
gpio->config->llops->reg_bit_get(gpio, offset, reg_val);
}
-static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
}
static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
if (!have_input(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 0);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return 0;
}
@@ -465,13 +463,12 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
unsigned int offset, int val)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
bool copro = false;
if (!have_output(gpio, offset))
return -ENOTSUPP;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
copro = aspeed_gpio_copro_request(gpio, offset);
__aspeed_gpio_set(gc, offset, val);
@@ -479,7 +476,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -487,7 +483,6 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc,
static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 val;
if (!have_input(gpio, offset))
@@ -496,12 +491,10 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
if (!have_output(gpio, offset))
return GPIO_LINE_DIRECTION_IN;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
val = gpio->config->llops->reg_bit_get(gpio, offset, reg_dir);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
@@ -527,7 +520,6 @@ static inline int irqd_to_aspeed_gpio_data(struct irq_data *d,
static void aspeed_gpio_irq_ack(struct irq_data *d)
{
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -535,20 +527,19 @@ static void aspeed_gpio_irq_ack(struct irq_data *d)
if (rc)
return;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_status, 1);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
}
static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
{
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -560,14 +551,14 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_enable, set);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
/* Masking the IRQ */
if (!set)
@@ -591,7 +582,6 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
u32 type2 = 0;
irq_flow_handler_t handler;
struct aspeed_gpio *gpio;
- unsigned long flags;
int rc, offset;
bool copro = false;
@@ -620,16 +610,19 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
- copro = aspeed_gpio_copro_request(gpio, offset);
+ scoped_guard(raw_spinlock_irqsave, &gpio->lock) {
+ copro = aspeed_gpio_copro_request(gpio, offset);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0, type0);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1, type1);
- gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2, type2);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0,
+ type0);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1,
+ type1);
+ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2,
+ type2);
- if (copro)
- aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ if (copro)
+ aspeed_gpio_copro_release(gpio, offset);
+ }
irq_set_handler_locked(d, handler);
@@ -686,17 +679,16 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
unsigned int offset, bool enable)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
bool copro = false;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
+
copro = aspeed_gpio_copro_request(gpio, offset);
gpio->config->llops->reg_bit_set(gpio, offset, reg_tolerance, enable);
if (copro)
aspeed_gpio_copro_release(gpio, offset);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return 0;
}
@@ -798,7 +790,6 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
u32 requested_cycles;
- unsigned long flags;
int rc;
int i;
@@ -812,12 +803,12 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
return rc;
}
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
if (timer_allocation_registered(gpio, offset)) {
rc = unregister_allocated_timer(gpio, offset);
if (rc < 0)
- goto out;
+ return rc;
}
/* Try to find a timer already configured for the debounce period */
@@ -855,7 +846,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
* consistency.
*/
configure_timer(gpio, offset, 0);
- goto out;
+ return rc;
}
i = j;
@@ -863,34 +854,26 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
iowrite32(requested_cycles, gpio->base + gpio->config->debounce_timers_array[i]);
}
- if (WARN(i == 0, "Cannot register index of disabled timer\n")) {
- rc = -EINVAL;
- goto out;
- }
+ if (WARN(i == 0, "Cannot register index of disabled timer\n"))
+ return -EINVAL;
register_allocated_timer(gpio, offset, i);
configure_timer(gpio, offset, i);
-out:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
static int disable_debounce(struct gpio_chip *chip, unsigned int offset)
{
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- unsigned long flags;
int rc;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
rc = unregister_allocated_timer(gpio, offset);
if (!rc)
configure_timer(gpio, offset, 0);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
return rc;
}
@@ -961,7 +944,6 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
- unsigned long flags;
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
@@ -974,13 +956,12 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0xff) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0xff)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]++;
/* Switch command source */
@@ -994,8 +975,6 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
*dreg_offset = bank->rdata_reg;
if (bit)
*bit = GPIO_OFFSET(offset);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio);
@@ -1009,7 +988,6 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
- unsigned long flags;
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
@@ -1021,21 +999,19 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
return -EINVAL;
bindex = offset >> 3;
- raw_spin_lock_irqsave(&gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&gpio->lock);
/* Sanity check, this shouldn't happen */
- if (gpio->cf_copro_bankmap[bindex] == 0) {
- rc = -EIO;
- goto bail;
- }
+ if (gpio->cf_copro_bankmap[bindex] == 0)
+ return -EIO;
+
gpio->cf_copro_bankmap[bindex]--;
/* Switch command source */
if (gpio->cf_copro_bankmap[bindex] == 0)
aspeed_gpio_change_cmd_source(gpio, offset,
GPIO_CMDSRC_ARM);
- bail:
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
return rc;
}
EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio);
@@ -1376,7 +1352,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set = aspeed_gpio_set;
+ gpio->chip.set_rv = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 64908f1a5e7f..17c287dc7471 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -100,7 +101,6 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
@@ -112,13 +112,11 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
}
if (--bank->gpio_unlock_count[bit] == 0) {
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val |= BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
}
@@ -126,19 +124,16 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
unsigned gpio)
{
u32 val;
- unsigned long flags;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
if (bank->gpio_unlock_count[bit] == 0) {
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
val &= ~BIT(bit);
bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
++bank->gpio_unlock_count[bit];
@@ -154,22 +149,23 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
return val ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
-static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+static int bcm_kona_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
/* this function only applies to output pin */
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
- goto out;
+ return 0;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -177,8 +173,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
val |= BIT(bit);
writel(val, reg_base + reg_offset);
-out:
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ return 0;
}
static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
@@ -188,11 +183,11 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
if (bcm_kona_gpio_get_dir(chip, gpio) == GPIO_LINE_DIRECTION_IN)
reg_offset = GPIO_IN_STATUS(bank_id);
@@ -202,8 +197,6 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
/* read the GPIO bank status */
val = readl(reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
/* return the specified bit status */
return !!(val & BIT(bit));
}
@@ -228,19 +221,17 @@ static int bcm_kona_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
val |= GPIO_GPCTR0_IOTR_CMD_INPUT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -252,11 +243,11 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val, reg_offset;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_IOTR_MASK;
@@ -268,8 +259,6 @@ static int bcm_kona_gpio_direction_output(struct gpio_chip *chip,
val |= BIT(bit);
writel(val, reg_base + reg_offset);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -289,7 +278,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
struct bcm_kona_gpio *kona_gpio;
void __iomem *reg_base;
u32 val, res;
- unsigned long flags;
kona_gpio = gpiochip_get_data(chip);
reg_base = kona_gpio->reg_base;
@@ -312,7 +300,7 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
}
/* spin lock for read-modify-write of the GPIO register */
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_DBR_MASK;
@@ -327,8 +315,6 @@ static int bcm_kona_gpio_set_debounce(struct gpio_chip *chip, unsigned gpio,
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
@@ -353,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set = bcm_kona_gpio_set,
+ .set_rv = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
@@ -367,17 +353,15 @@ static void bcm_kona_gpio_irq_ack(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_STATUS(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_STATUS(bank_id));
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_mask(struct irq_data *d)
@@ -388,19 +372,16 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MASK(bank_id));
gpiochip_disable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
@@ -411,19 +392,16 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
int bank_id = GPIO_BANK(gpio);
int bit = GPIO_BIT(gpio);
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
val |= BIT(bit);
writel(val, reg_base + GPIO_INT_MSKCLR(bank_id));
gpiochip_enable_irq(&kona_gpio->gpio_chip, gpio);
-
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
}
static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -433,7 +411,6 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned gpio = d->hwirq;
u32 lvl_type;
u32 val;
- unsigned long flags;
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
@@ -459,15 +436,13 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ guard(raw_spinlock_irqsave)(&kona_gpio->lock);
val = readl(reg_base + GPIO_CONTROL(gpio));
val &= ~GPIO_GPCTR0_ITR_MASK;
val |= lvl_type << GPIO_GPCTR0_ITR_SHIFT;
writel(val, reg_base + GPIO_CONTROL(gpio));
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
-
return 0;
}
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 08ff2857256f..36701500925e 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -37,21 +37,18 @@ static int bd71815gpo_get(struct gpio_chip *chip, unsigned int offset)
return (val >> offset) & 1;
}
-static void bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71815gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct bd71815_gpio *bd71815 = gpiochip_get_data(chip);
- int ret, bit;
+ int bit;
bit = BIT(offset);
if (value)
- ret = regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- else
- ret = regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
+ return regmap_set_bits(bd71815->regmap, BD71815_REG_GPO, bit);
- if (ret)
- dev_warn(bd71815->dev, "failed to toggle GPO\n");
+ return regmap_clear_bits(bd71815->regmap, BD71815_REG_GPO, bit);
}
static int bd71815_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -88,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set = bd71815gpo_set,
+ .set_rv = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index b2ccc320c7b5..4ba151e5cf25 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -16,10 +16,9 @@ struct bd71828_gpio {
struct gpio_chip gpio;
};
-static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
struct bd71828_gpio *bdgpio = gpiochip_get_data(chip);
u8 val = (value) ? BD71828_GPIO_OUT_HI : BD71828_GPIO_OUT_LO;
@@ -28,12 +27,10 @@ static void bd71828_gpio_set(struct gpio_chip *chip, unsigned int offset,
* we are dealing with - then we are done
*/
if (offset == HALL_GPIO_OFFSET)
- return;
+ return 0;
- ret = regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
- BD71828_GPIO_OUT_MASK, val);
- if (ret)
- dev_err(bdgpio->dev, "Could not set gpio to %d\n", value);
+ return regmap_update_bits(bdgpio->regmap, GPIO_OUT_REG(offset),
+ BD71828_GPIO_OUT_MASK, val);
}
static int bd71828_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -112,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set = bd71828_gpio_set;
+ bdgpio->gpio.set_rv = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 9a4d55f703bb..8df1361e3e84 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -72,13 +72,13 @@ static int bd9571mwv_gpio_get(struct gpio_chip *chip, unsigned int offset)
return val & BIT(offset);
}
-static void bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
+static int bd9571mwv_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct bd9571mwv_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
- BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(gpio->regmap, BD9571MWV_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
}
static const struct gpio_chip template_chip = {
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set = bd9571mwv_gpio_set,
+ .set_rv = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 491b529d25f8..ca3472977431 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -9,6 +9,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
@@ -224,7 +225,7 @@ static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
ret = disable_irq_wake(priv->parent_wake_irq);
if (ret)
dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
- enable ? "enable" : "disable");
+ str_enable_disable(enable));
return ret;
}
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7920cf256798..7c9e81fea37a 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -31,6 +31,7 @@
*/
+#include <linux/cleanup.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -69,10 +70,9 @@ MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, which is the
static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
data &= ~(1 << nr);
@@ -82,20 +82,17 @@ static int bt8xxgpio_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
outen &= ~(1 << nr);
bgwrite(outen, BT848_GPIO_OUT_EN);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
static int bt8xxgpio_gpio_get(struct gpio_chip *gpio, unsigned nr)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
+
val = bgread(BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
return !!(val & (1 << nr));
}
@@ -104,10 +101,9 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
unsigned nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 outen, data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
outen = bgread(BT848_GPIO_OUT_EN);
outen |= (1 << nr);
@@ -120,19 +116,15 @@ static int bt8xxgpio_gpio_direction_output(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
-static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int bt8xxgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct bt8xxgpio *bg = gpiochip_get_data(gpio);
- unsigned long flags;
u32 data;
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
data = bgread(BT848_GPIO_DATA);
if (val)
@@ -141,7 +133,7 @@ static void bt8xxgpio_gpio_set(struct gpio_chip *gpio,
data &= ~(1 << nr);
bgwrite(data, BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
+ return 0;
}
static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
@@ -153,7 +145,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set = bt8xxgpio_gpio_set;
+ c->set_rv = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
@@ -236,18 +228,15 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
-
- spin_lock_irqsave(&bg->lock, flags);
- bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
- bg->saved_data = bgread(BT848_GPIO_DATA);
+ scoped_guard(spinlock_irqsave, &bg->lock) {
+ bg->saved_outen = bgread(BT848_GPIO_OUT_EN);
+ bg->saved_data = bgread(BT848_GPIO_DATA);
- bgwrite(0, BT848_INT_MASK);
- bgwrite(~0x0, BT848_INT_STAT);
- bgwrite(0x0, BT848_GPIO_OUT_EN);
-
- spin_unlock_irqrestore(&bg->lock, flags);
+ bgwrite(0, BT848_INT_MASK);
+ bgwrite(~0x0, BT848_INT_STAT);
+ bgwrite(0x0, BT848_GPIO_OUT_EN);
+ }
pci_save_state(pdev);
pci_disable_device(pdev);
@@ -259,7 +248,6 @@ static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
static int bt8xxgpio_resume(struct pci_dev *pdev)
{
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- unsigned long flags;
int err;
pci_set_power_state(pdev, PCI_D0);
@@ -268,7 +256,7 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
return err;
pci_restore_state(pdev);
- spin_lock_irqsave(&bg->lock, flags);
+ guard(spinlock_irqsave)(&bg->lock);
bgwrite(0, BT848_INT_MASK);
bgwrite(0, BT848_GPIO_DMA_CTL);
@@ -277,8 +265,6 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
bgwrite(bg->saved_data & bg->saved_outen,
BT848_GPIO_DATA);
- spin_unlock_irqrestore(&bg->lock, flags);
-
return 0;
}
#else
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 9213faa11522..1495bec62456 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -51,8 +51,8 @@ static int cgbc_gpio_get(struct gpio_chip *chip, unsigned int offset)
return (int)(val & (u8)BIT(offset));
}
-static void __cgbc_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int __cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
struct cgbc_device_data *cgbc = gpio->cgbc;
@@ -61,23 +61,23 @@ static void __cgbc_gpio_set(struct gpio_chip *chip,
ret = cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_GET, (offset > 7) ? 1 : 0, 0, &val);
if (ret)
- return;
+ return ret;
if (value)
val |= BIT(offset % 8);
else
val &= ~(BIT(offset % 8));
- cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
+ return cgbc_gpio_cmd(cgbc, CGBC_GPIO_CMD_SET, (offset > 7) ? 1 : 0, val, &val);
}
-static void cgbc_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int cgbc_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
- scoped_guard(mutex, &gpio->lock)
- __cgbc_gpio_set(chip, offset, value);
+ guard(mutex)(&gpio->lock);
+
+ return __cgbc_gpio_set(chip, offset, value);
}
static int cgbc_gpio_direction_set(struct gpio_chip *chip,
@@ -116,10 +116,14 @@ static int cgbc_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
struct cgbc_gpio_data *gpio = gpiochip_get_data(chip);
+ int ret;
guard(mutex)(&gpio->lock);
- __cgbc_gpio_set(chip, offset, value);
+ ret = __cgbc_gpio_set(chip, offset, value);
+ if (ret)
+ return ret;
+
return cgbc_gpio_direction_set(chip, offset, GPIO_LINE_DIRECTION_OUT);
}
@@ -167,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set = cgbc_gpio_set;
+ chip->set_rv = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 4968232f70f2..8b49f02c7896 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -27,7 +27,7 @@ struct creg_gpio {
const struct creg_layout *layout;
};
-static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct creg_gpio *hcg = gpiochip_get_data(gc);
const struct creg_layout *layout = hcg->layout;
@@ -47,13 +47,13 @@ static void creg_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg |= (value << reg_shift);
writel(reg, hcg->regs);
spin_unlock_irqrestore(&hcg->lock, flags);
+
+ return 0;
}
static int creg_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- creg_gpio_set(gc, offset, val);
-
- return 0;
+ return creg_gpio_set(gc, offset, val);
}
static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg,
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set = creg_gpio_set;
+ hcg->gc.set_rv = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 0c09bb54dc0c..53cd5ff6247b 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -24,24 +24,21 @@
static const char cros_ec_gpio_prefix[] = "EC:";
/* Setting gpios is only supported when the system is unlocked */
-static void cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int cros_ec_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
const char *name = gc->names[gpio] + strlen(cros_ec_gpio_prefix);
struct cros_ec_device *cros_ec = gpiochip_get_data(gc);
struct ec_params_gpio_set params = {
.val = val,
};
- int ret;
ssize_t copied;
copied = strscpy(params.name, name, sizeof(params.name));
if (copied < 0)
- return;
+ return copied;
- ret = cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
- sizeof(params), NULL, 0);
- if (ret < 0)
- dev_err(gc->parent, "error setting gpio%d (%s) on EC: %d\n", gpio, name, ret);
+ return cros_ec_cmd(cros_ec, 0, EC_CMD_GPIO_SET, &params,
+ sizeof(params), NULL, 0);
}
static int cros_ec_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -191,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set = cros_ec_gpio_set;
+ gc->set_rv = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 25db014494a4..8db7cca3a060 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#define CRYSTALCOVE_GPIO_NUM 16
@@ -167,18 +168,18 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct crystalcove_gpio *cg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
if (value)
- regmap_update_bits(cg->regmap, reg, 1, 1);
- else
- regmap_update_bits(cg->regmap, reg, 1, 0);
+ return regmap_update_bits(cg->regmap, reg, 1, 1);
+
+ return regmap_update_bits(cg->regmap, reg, 1, 0);
}
static int crystalcove_irq_type(struct irq_data *data, unsigned int type)
@@ -317,7 +318,7 @@ static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip
offset = gpio % 8;
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
@@ -348,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set = crystalcove_gpio_set;
+ cg->chip.set_rv = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6da3a247614a..143d1f4173a6 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -232,12 +232,14 @@ static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
-static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int chip_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
if (val)
cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
else
cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
+
+ return 0;
}
static int chip_direction_input(struct gpio_chip *c, unsigned offset)
@@ -294,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set = chip_gpio_set,
+ .set_rv = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6f3905f1b8f5..6482c5b267db 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -89,30 +89,20 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
}
}
-static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9052_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9052_gpio *gpio = gpiochip_get_data(gc);
- int ret;
- if (da9052_gpio_port_odd(offset)) {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_ODD_PORT_MODE,
- value << DA9052_GPIO_ODD_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio odd reg,%d",
- ret);
- } else {
- ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
- DA9052_GPIO_0_1_REG,
- DA9052_GPIO_EVEN_PORT_MODE,
- value << DA9052_GPIO_EVEN_SHIFT);
- if (ret != 0)
- dev_err(gpio->da9052->dev,
- "Failed to updated gpio even reg,%d",
- ret);
- }
+ if (da9052_gpio_port_odd(offset))
+ return da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_ODD_PORT_MODE,
+ value << DA9052_GPIO_ODD_SHIFT);
+
+ return da9052_reg_update(gpio->da9052,
+ (offset >> 1) + DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_EVEN_PORT_MODE,
+ value << DA9052_GPIO_EVEN_SHIFT);
}
static int da9052_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -182,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set = da9052_gpio_set,
+ .set_rv = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 49446a030f10..3d9d0c700100 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -59,14 +59,12 @@ static int da9055_gpio_get(struct gpio_chip *gc, unsigned offset)
}
-static void da9055_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+static int da9055_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct da9055_gpio *gpio = gpiochip_get_data(gc);
- da9055_reg_update(gpio->da9055,
- DA9055_REG_GPIO_MODE0_2,
- 1 << offset,
- value << offset);
+ return da9055_reg_update(gpio->da9055, DA9055_REG_GPIO_MODE0_2,
+ 1 << offset, value << offset);
}
static int da9055_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -102,9 +100,7 @@ static int da9055_gpio_direction_output(struct gpio_chip *gc,
if (ret < 0)
return ret;
- da9055_gpio_set(gc, offset, value);
-
- return 0;
+ return da9055_gpio_set(gc, offset, value);
}
static int da9055_gpio_to_irq(struct gpio_chip *gc, u32 offset)
@@ -120,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set = da9055_gpio_set,
+ .set_rv = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8c033e8cf3c9..63fc7888c1d4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -139,7 +139,7 @@ static int davinci_gpio_get(struct gpio_chip *chip, unsigned offset)
/*
* Assuming the pin is muxed as a gpio output, set its output value.
*/
-static void
+static int
davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct davinci_gpio_controller *d = gpiochip_get_data(chip);
@@ -150,6 +150,8 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
writel_relaxed(__gpio_mask(offset),
value ? &g->set_data : &g->clr_data);
+
+ return 0;
}
static int davinci_gpio_probe(struct platform_device *pdev)
@@ -209,7 +211,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set = davinci_gpio_set;
+ chips->chip.set_rv = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 169f33c41c59..30a0522ae735 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#define GRGPIO_MAX_NGPIO 32
@@ -438,7 +439,7 @@ static int grgpio_probe(struct platform_device *ofdev)
}
dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
- priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
+ priv->regs, gc->base, gc->ngpio, str_on_off(priv->domain));
return 0;
}
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index d7c3b20c8482..3d0ff09284fb 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -38,12 +38,14 @@
* in the corresponding device tree properties.
*/
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/delay.h>
#include "gpiolib.h"
@@ -71,46 +73,46 @@ static int gpio_latch_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static void gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
- void (*set)(struct gpio_desc *desc, int value),
- unsigned int offset, bool val)
+static int gpio_latch_set_unlocked(struct gpio_latch_priv *priv,
+ int (*set)(struct gpio_desc *desc, int value),
+ unsigned int offset, bool val)
{
- int latch = offset / priv->n_latched_gpios;
- int i;
+ int latch = offset / priv->n_latched_gpios, i, ret;
assign_bit(offset, priv->shadow, val);
- for (i = 0; i < priv->n_latched_gpios; i++)
- set(priv->latched_gpios->desc[i],
- test_bit(latch * priv->n_latched_gpios + i, priv->shadow));
+ for (i = 0; i < priv->n_latched_gpios; i++) {
+ ret = set(priv->latched_gpios->desc[i],
+ test_bit(latch * priv->n_latched_gpios + i,
+ priv->shadow));
+ if (ret)
+ return ret;
+ }
ndelay(priv->setup_duration_ns);
set(priv->clk_gpios->desc[latch], 1);
ndelay(priv->clock_duration_ns);
set(priv->clk_gpios->desc[latch], 0);
+
+ return 0;
}
-static void gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- unsigned long flags;
-
- spin_lock_irqsave(&priv->spinlock, flags);
- gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
+ guard(spinlock_irqsave)(&priv->spinlock);
- spin_unlock_irqrestore(&priv->spinlock, flags);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value, offset, val);
}
-static void gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
+static int gpio_latch_set_can_sleep(struct gpio_chip *gc, unsigned int offset, int val)
{
struct gpio_latch_priv *priv = gpiochip_get_data(gc);
- mutex_lock(&priv->mutex);
-
- gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
+ guard(mutex)(&priv->mutex);
- mutex_unlock(&priv->mutex);
+ return gpio_latch_set_unlocked(priv, gpiod_set_value_cansleep, offset, val);
}
static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_latches)
@@ -138,50 +140,52 @@ static bool gpio_latch_can_sleep(struct gpio_latch_priv *priv, unsigned int n_la
static int gpio_latch_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gpio_latch_priv *priv;
unsigned int n_latches;
- struct device_node *np = pdev->dev.of_node;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->clk_gpios = devm_gpiod_get_array(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ priv->clk_gpios = devm_gpiod_get_array(dev, "clk", GPIOD_OUT_LOW);
if (IS_ERR(priv->clk_gpios))
return PTR_ERR(priv->clk_gpios);
- priv->latched_gpios = devm_gpiod_get_array(&pdev->dev, "latched", GPIOD_OUT_LOW);
+ priv->latched_gpios = devm_gpiod_get_array(dev, "latched", GPIOD_OUT_LOW);
if (IS_ERR(priv->latched_gpios))
return PTR_ERR(priv->latched_gpios);
n_latches = priv->clk_gpios->ndescs;
priv->n_latched_gpios = priv->latched_gpios->ndescs;
- priv->shadow = devm_bitmap_zalloc(&pdev->dev, n_latches * priv->n_latched_gpios,
+ priv->shadow = devm_bitmap_zalloc(dev, n_latches * priv->n_latched_gpios,
GFP_KERNEL);
if (!priv->shadow)
return -ENOMEM;
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set = gpio_latch_set_can_sleep;
+ priv->gc.set_rv = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set = gpio_latch_set;
+ priv->gc.set_rv = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
- of_property_read_u32(np, "setup-duration-ns", &priv->setup_duration_ns);
+ device_property_read_u32(dev, "setup-duration-ns",
+ &priv->setup_duration_ns);
if (priv->setup_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "setup-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "setup-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->setup_duration_ns = DURATION_NS_MAX;
}
- of_property_read_u32(np, "clock-duration-ns", &priv->clock_duration_ns);
+ device_property_read_u32(dev, "clock-duration-ns",
+ &priv->clock_duration_ns);
if (priv->clock_duration_ns > DURATION_NS_MAX) {
- dev_warn(&pdev->dev, "clock-duration-ns too high, limit to %d\n",
+ dev_warn(dev, "clock-duration-ns too high, limit to %d\n",
DURATION_NS_MAX);
priv->clock_duration_ns = DURATION_NS_MAX;
}
@@ -190,11 +194,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
priv->gc.ngpio = n_latches * priv->n_latched_gpios;
priv->gc.owner = THIS_MODULE;
priv->gc.base = -1;
- priv->gc.parent = &pdev->dev;
+ priv->gc.parent = dev;
platform_set_drvdata(pdev, priv);
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(dev, &priv->gc, priv);
}
static const struct of_device_id gpio_latch_ids[] = {
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 7f4d78fd800e..a9a93036f08f 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -31,7 +31,6 @@ struct loongson_gpio_chip_data {
struct loongson_gpio_chip {
struct gpio_chip chip;
- struct fwnode_handle *fwnode;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -138,7 +137,6 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
void __iomem *reg_base)
{
int ret;
- u32 ngpios;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
@@ -159,8 +157,6 @@ static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgp
lgpio->chip.direction_output = loongson_gpio_direction_output;
lgpio->chip.set = loongson_gpio_set;
lgpio->chip.parent = dev;
- device_property_read_u32(dev, "ngpios", &ngpios);
- lgpio->chip.ngpio = ngpios;
spin_lock_init(&lgpio->lock);
}
@@ -258,6 +254,33 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
.out_offset = 0x900,
};
+/* LS7A2000 chipset GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
+ .label = "ls7a2000_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+};
+
+/* LS7A2000 ACPI GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data1 = {
+ .label = "ls7a2000_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x4,
+ .in_offset = 0x8,
+ .out_offset = 0x0,
+};
+
+/* Loongson-3A6000 node GPIO */
+static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = {
+ .label = "ls3a6000_gpio",
+ .mode = BIT_CTRL_MODE,
+ .conf_offset = 0x0,
+ .in_offset = 0xc,
+ .out_offset = 0x8,
+};
+
static const struct of_device_id loongson_gpio_of_match[] = {
{
.compatible = "loongson,ls2k-gpio",
@@ -291,6 +314,18 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.compatible = "loongson,ls7a-gpio",
.data = &loongson_gpio_ls7a_data,
},
+ {
+ .compatible = "loongson,ls7a2000-gpio1",
+ .data = &loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .compatible = "loongson,ls7a2000-gpio2",
+ .data = &loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .compatible = "loongson,ls3a6000-gpio",
+ .data = &loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, loongson_gpio_of_match);
@@ -316,6 +351,18 @@ static const struct acpi_device_id loongson_gpio_acpi_match[] = {
.id = "LOON000C",
.driver_data = (kernel_ulong_t)&loongson_gpio_ls2k2000_data2,
},
+ {
+ .id = "LOON000D",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data0,
+ },
+ {
+ .id = "LOON000E",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls7a2000_data1,
+ },
+ {
+ .id = "LOON000F",
+ .driver_data = (kernel_ulong_t)&loongson_gpio_ls3a6000_data,
+ },
{}
};
MODULE_DEVICE_TABLE(acpi, loongson_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index bbacc714632b..fc0708ab5192 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -309,23 +309,21 @@ static int max3191x_set_config(struct gpio_chip *gpio, unsigned int offset,
return 0;
}
-static void gpiod_set_array_single_value_cansleep(unsigned int ndescs,
- struct gpio_desc **desc,
- struct gpio_array *info,
+static void max3191x_gpiod_multi_set_single_value(struct gpio_descs *descs,
int value)
{
unsigned long *values;
- values = bitmap_alloc(ndescs, GFP_KERNEL);
+ values = bitmap_alloc(descs->ndescs, GFP_KERNEL);
if (!values)
return;
if (value)
- bitmap_fill(values, ndescs);
+ bitmap_fill(values, descs->ndescs);
else
- bitmap_zero(values, ndescs);
+ bitmap_zero(values, descs->ndescs);
- gpiod_set_array_value_cansleep(ndescs, desc, info, values);
+ gpiod_multi_set_value_cansleep(descs, values);
bitmap_free(values);
}
@@ -396,10 +394,8 @@ static int max3191x_probe(struct spi_device *spi)
max3191x->mode = device_property_read_bool(dev, "maxim,modesel-8bit")
? STATUS_BYTE_DISABLED : STATUS_BYTE_ENABLED;
if (max3191x->modesel_pins)
- gpiod_set_array_single_value_cansleep(
- max3191x->modesel_pins->ndescs,
- max3191x->modesel_pins->desc,
- max3191x->modesel_pins->info, max3191x->mode);
+ max3191x_gpiod_multi_set_single_value(max3191x->modesel_pins,
+ max3191x->mode);
max3191x->ignore_uv = device_property_read_bool(dev,
"maxim,ignore-undervoltage");
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index 3075f2513c6f..a553e141059f 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -62,18 +62,16 @@ static int max77650_gpio_direction_output(struct gpio_chip *gc,
MAX77650_REG_CNFG_GPIO, mask, regval);
}
-static void max77650_gpio_set_value(struct gpio_chip *gc,
- unsigned int offset, int value)
+static int max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
- int rv, regval;
+ int regval;
regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
- rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
- MAX77650_GPIO_OUTVAL_MASK, regval);
- if (rv)
- dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+ return regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
}
static int max77650_gpio_get_value(struct gpio_chip *gc,
@@ -168,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set = max77650_gpio_set_value;
+ chip->gc.set_rv = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index d89e78f0ead3..4841e4ebe7a6 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -49,6 +49,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -323,9 +324,20 @@ static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
gc->write_reg(gc->reg_clr, clear_mask);
}
+static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
+{
+ if (!gc->bgpio_pinctrl)
+ return 0;
+
+ if (dir_out)
+ return pinctrl_gpio_direction_output(gc, gpio);
+ else
+ return pinctrl_gpio_direction_input(gc, gpio);
+}
+
static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
@@ -339,7 +351,7 @@ static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -357,7 +369,7 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
- return 0;
+ return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
@@ -403,7 +415,7 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
{
bgpio_dir_out(gc, gpio, val);
gc->set(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
@@ -411,7 +423,7 @@ static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
{
gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
- return 0;
+ return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_setup_accessors(struct device *dev,
@@ -562,10 +574,13 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
{
- if (gpio_pin < chip->ngpio)
- return 0;
+ if (gpio_pin >= chip->ngpio)
+ return -EINVAL;
- return -EINVAL;
+ if (chip->bgpio_pinctrl)
+ return gpiochip_generic_request(chip, gpio_pin);
+
+ return 0;
}
/**
@@ -632,6 +647,12 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
if (ret)
return ret;
+ if (flags & BGPIOF_PINCTRL_BACKEND) {
+ gc->bgpio_pinctrl = true;
+ /* Currently this callback is only used for pincontrol */
+ gc->free = gpiochip_generic_free;
+ }
+
gc->bgpio_data = gc->read_reg(gc->reg_dat);
if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d39c6618bade..266c0953d914 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -122,7 +122,7 @@ static void __gpio_mockup_set(struct gpio_mockup_chip *chip,
chip->lines[offset].value = !!value;
}
-static void gpio_mockup_set(struct gpio_chip *gc,
+static int gpio_mockup_set(struct gpio_chip *gc,
unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -130,10 +130,12 @@ static void gpio_mockup_set(struct gpio_chip *gc,
guard(mutex)(&chip->lock);
__gpio_mockup_set(chip, offset, value);
+
+ return 0;
}
-static void gpio_mockup_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
unsigned int bit;
@@ -142,6 +144,8 @@ static void gpio_mockup_set_multiple(struct gpio_chip *gc,
for_each_set_bit(bit, mask, gc->ngpio)
__gpio_mockup_set(chip, bit, test_bit(bit, bits));
+
+ return 0;
}
static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
@@ -445,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set = gpio_mockup_set;
+ gc->set_rv = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple = gpio_mockup_set_multiple;
+ gc->set_multiple_rv = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5ffb332e9849..3604abcb6fec 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -49,6 +49,7 @@
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* GPIO unit register offsets.
@@ -297,12 +298,12 @@ static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
/*
* Functions implementing the gpio_chip methods
*/
-static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
+static int mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
- BIT(pin), value ? BIT(pin) : 0);
+ return regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ BIT(pin), value ? BIT(pin) : 0);
}
static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
@@ -907,14 +908,14 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (is_out) {
seq_printf(s, " out %s %s\n",
- out & msk ? "hi" : "lo",
+ str_hi_lo(out & msk),
blink & msk ? "(blink )" : "");
continue;
}
seq_printf(s, " in %s (act %s) - IRQ",
- (data_in ^ in_pol) & msk ? "hi" : "lo",
- in_pol & msk ? "lo" : "hi");
+ str_hi_lo((data_in ^ in_pol) & msk),
+ str_lo_hi(in_pol & msk));
if (!((edg_msk | lvl_msk) & msk)) {
seq_puts(s, " disabled\n");
continue;
@@ -1172,7 +1173,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set = mvebu_gpio_set;
+ mvchip->chip.set_rv = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 836f1cc760c2..fa19a44943fd 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <linux/gpio/gpio-nomadik.h>
@@ -430,7 +431,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
gpio,
label ?: "(none)",
- data_out ? "hi" : "lo",
+ str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d63c1030e6ac..442435ded020 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,7 +570,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
return !!(reg_val & bit);
}
-static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
+ int val)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
@@ -578,7 +579,7 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
guard(mutex)(&chip->i2c_lock);
- regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ return regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
@@ -616,8 +617,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int pca953x_gpio_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
DECLARE_BITMAP(reg_val, MAX_LINE);
@@ -627,11 +628,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
ret = pca953x_read_regs(chip, chip->regs->output, reg_val);
if (ret)
- return;
+ return ret;
bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio);
- pca953x_write_regs(chip, chip->regs->output, reg_val);
+ return pca953x_write_regs(chip, chip->regs->output, reg_val);
}
static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
@@ -693,10 +694,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set = pca953x_gpio_set_value;
+ gc->set_rv = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple = pca953x_gpio_set_multiple;
+ gc->set_multiple_rv = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 7c57eaeb0afe..2e5f5d7f8865 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -5,6 +5,8 @@
* Copyright (C) 2007 David Brownell
*/
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -272,12 +274,11 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
+ struct gpio_desc *reset_gpio;
struct pcf857x *gpio;
unsigned int n_latch = 0;
int status;
- device_property_read_u32(&client->dev, "lines-initial-states", &n_latch);
-
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
@@ -297,6 +298,30 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
+ reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(reset_gpio),
+ "failed to get reset GPIO\n");
+
+ if (reset_gpio) {
+ /* Reset already held with devm_gpiod_get_optional with GPIOD_OUT_HIGH */
+ fsleep(4); /* tw(rst) > 4us */
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ fsleep(100); /* trst > 100uS */
+
+ /*
+ * Performing a reset means "The PCA9670 registers and I2C-bus
+ * state machine will be held in their default state until the
+ * RESET input is once again HIGH".
+ *
+ * This is the same as writing 1 for all pins, which is the same
+ * as n_latch=0, the default value of the variable.
+ */
+ } else {
+ device_property_read_u32(&client->dev, "lines-initial-states",
+ &n_latch);
+ }
+
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
* DAC instead of pin change IRQs; and its inputs can be the
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2ecee3269a0c..18c965ee02c8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
@@ -336,9 +336,6 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long flags;
bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
-
if (!bankmask)
return 0;
@@ -347,7 +344,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
@@ -356,7 +353,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
@@ -367,9 +364,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -380,18 +377,15 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
u32 val, bankmask;
bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (chip->valid_mask)
- bankmask &= chip->valid_mask[0];
-
if (!bankmask)
return;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +462,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
- *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
+ if (ret) {
+ *npins = RCAR_MAX_GPIO_PER_BANK;
+ } else {
+ *npins = args.args[2];
+ of_node_put(args.np);
+ }
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
@@ -482,10 +481,13 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static void gpio_rcar_enable_inputs(struct gpio_rcar_priv *p)
{
u32 mask = GENMASK(p->gpio_chip.ngpio - 1, 0);
+ const unsigned long *valid_mask;
+
+ valid_mask = gpiochip_query_valid_mask(&p->gpio_chip);
/* Select "Input Enable" in INEN */
- if (p->gpio_chip.valid_mask)
- mask &= p->gpio_chip.valid_mask[0];
+ if (valid_mask)
+ mask &= valid_mask[0];
if (mask)
gpio_rcar_write(p, INEN, gpio_rcar_read(p, INEN) | mask);
}
@@ -505,7 +507,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 05f8781b5204..87c4225784cf 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -17,6 +17,8 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/regmap.h>
+#include "gpiolib.h"
+
struct gpio_regmap {
struct device *parent;
struct regmap *regmap;
@@ -81,33 +83,43 @@ static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & mask);
}
-static void gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
unsigned int reg, mask;
+ int ret;
+
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
if (val)
- regmap_update_bits(gpio->regmap, reg, mask, mask);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, mask);
else
- regmap_update_bits(gpio->regmap, reg, mask, 0);
+ ret = regmap_update_bits(gpio->regmap, reg, mask, 0);
+
+ return ret;
}
-static void gpio_regmap_set_with_clear(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int gpio_regmap_set_with_clear(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base, reg, mask;
+ int ret;
if (val)
base = gpio_regmap_addr(gpio->reg_set_base);
else
base = gpio_regmap_addr(gpio->reg_clr_base);
- gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
- regmap_write(gpio->regmap, reg, mask);
+ ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
+ if (ret)
+ return ret;
+
+ return regmap_write(gpio->regmap, reg, mask);
}
static int gpio_regmap_get_direction(struct gpio_chip *chip,
@@ -210,9 +222,6 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!config->parent)
return ERR_PTR(-EINVAL);
- if (!config->ngpio)
- return ERR_PTR(-EINVAL);
-
/* we need at least one */
if (!config->reg_dat_base && !config->reg_set_base)
return ERR_PTR(-EINVAL);
@@ -233,31 +242,16 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
gpio->parent = config->parent;
gpio->driver_data = config->drvdata;
gpio->regmap = config->regmap;
- gpio->ngpio_per_reg = config->ngpio_per_reg;
- gpio->reg_stride = config->reg_stride;
- gpio->reg_mask_xlate = config->reg_mask_xlate;
gpio->reg_dat_base = config->reg_dat_base;
gpio->reg_set_base = config->reg_set_base;
gpio->reg_clr_base = config->reg_clr_base;
gpio->reg_dir_in_base = config->reg_dir_in_base;
gpio->reg_dir_out_base = config->reg_dir_out_base;
- /* if not set, assume there is only one register */
- if (!gpio->ngpio_per_reg)
- gpio->ngpio_per_reg = config->ngpio;
-
- /* if not set, assume they are consecutive */
- if (!gpio->reg_stride)
- gpio->reg_stride = 1;
-
- if (!gpio->reg_mask_xlate)
- gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
-
chip = &gpio->gpio_chip;
chip->parent = config->parent;
chip->fwnode = config->fwnode;
chip->base = -1;
- chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
@@ -266,9 +260,9 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set = gpio_regmap_set_with_clear;
+ chip->set_rv = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set = gpio_regmap_set;
+ chip->set_rv = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
@@ -276,6 +270,27 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->direction_output = gpio_regmap_direction_output;
}
+ chip->ngpio = config->ngpio;
+ if (!chip->ngpio) {
+ ret = gpiochip_get_ngpios(chip, chip->parent);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ /* if not set, assume there is only one register */
+ gpio->ngpio_per_reg = config->ngpio_per_reg;
+ if (!gpio->ngpio_per_reg)
+ gpio->ngpio_per_reg = config->ngpio;
+
+ /* if not set, assume they are consecutive */
+ gpio->reg_stride = config->reg_stride;
+ if (!gpio->reg_stride)
+ gpio->reg_stride = 1;
+
+ gpio->reg_mask_xlate = config->reg_mask_xlate;
+ if (!gpio->reg_mask_xlate)
+ gpio->reg_mask_xlate = gpio_regmap_simple_xlate;
+
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
goto err_free_gpio;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index b6c230fab840..f638219a7c4f 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -10,7 +10,6 @@
#include <linux/array_size.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -37,6 +36,8 @@
#include <linux/sysfs.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_SIM_NGPIO_MAX 1024
#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
@@ -119,12 +120,14 @@ static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset)
return !!test_bit(offset, chip->value_map);
}
-static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
__assign_bit(offset, chip->value_map, value);
+
+ return 0;
}
static int gpio_sim_get_multiple(struct gpio_chip *gc,
@@ -138,14 +141,16 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
return 0;
}
-static void gpio_sim_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpio_sim_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
scoped_guard(mutex, &chip->lock)
bitmap_replace(chip->value_map, chip->value_map, bits, mask,
gc->ngpio);
+
+ return 0;
}
static int gpio_sim_direction_output(struct gpio_chip *gc,
@@ -481,9 +486,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set = gpio_sim_set;
+ gc->set_rv = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple = gpio_sim_set_multiple;
+ gc->set_multiple_rv = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
@@ -541,14 +546,9 @@ static struct platform_driver gpio_sim_driver = {
};
struct gpio_sim_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- /*
- * If pdev is NULL, the device is 'pending' (waiting for configuration).
- * Once the pointer is assigned, the device has been created and the
- * item is 'live'.
- */
- struct platform_device *pdev;
int id;
/*
@@ -562,46 +562,11 @@ struct gpio_sim_device {
*/
struct mutex lock;
- /*
- * This is used to synchronously wait for the driver's probe to complete
- * and notify the user-space about any errors.
- */
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_hog *hogs;
struct list_head bank_list;
};
-/* This is called with dev->lock already taken. */
-static int gpio_sim_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_sim_device *simdev = container_of(nb,
- struct gpio_sim_device,
- bus_notifier);
- struct device *dev = data;
- char devname[32];
-
- snprintf(devname, sizeof(devname), "gpio-sim.%u", simdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- if (action == BUS_NOTIFY_BOUND_DRIVER)
- simdev->driver_bound = true;
- else if (action == BUS_NOTIFY_DRIVER_NOT_BOUND)
- simdev->driver_bound = false;
- else
- return NOTIFY_DONE;
-
- complete(&simdev->probe_completion);
-
- return NOTIFY_OK;
-}
-
static struct gpio_sim_device *to_gpio_sim_device(struct config_item *item)
{
struct config_group *group = to_config_group(item);
@@ -708,7 +673,7 @@ static bool gpio_sim_device_is_live(struct gpio_sim_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
static char *gpio_sim_strdup_trimmed(const char *str, size_t count)
@@ -730,7 +695,7 @@ static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -939,7 +904,6 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
struct gpio_sim_bank *bank;
int ret;
@@ -981,31 +945,13 @@ static int gpio_sim_device_activate(struct gpio_sim_device *dev)
pdevinfo.fwnode = swnode;
pdevinfo.id = dev->id;
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
- gpio_sim_remove_hogs(dev);
- gpio_sim_remove_swnode_recursive(swnode);
- return PTR_ERR(pdev);
- }
-
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
-
- if (!dev->driver_bound) {
- /* Probe failed, check kernel log. */
- platform_device_unregister(pdev);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret) {
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- return -ENXIO;
+ return ret;
}
- dev->pdev = pdev;
-
return 0;
}
@@ -1015,11 +961,10 @@ static void gpio_sim_device_deactivate(struct gpio_sim_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
gpio_sim_remove_hogs(dev);
gpio_sim_remove_swnode_recursive(swnode);
- dev->pdev = NULL;
}
static void
@@ -1120,7 +1065,7 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
if (gpio_sim_device_is_live(dev))
- return device_for_each_child(&dev->pdev->dev, &ctx,
+ return device_for_each_child(&dev->probe_data.pdev->dev, &ctx,
gpio_sim_emit_chip_name);
return sprintf(page, "none\n");
@@ -1561,8 +1506,7 @@ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->bank_list);
- dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 222279a9d82b..dce8ff322e47 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -282,8 +283,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)",
- val ? "hi" : "lo");
+ gpio, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -352,7 +352,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
gpio, label ?: "(none)",
- val ? "hi" : "lo",
+ str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
rise_values[rise],
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index c36a9dbccd4d..4dad7ce0c4dc 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -36,7 +36,6 @@ struct vf610_gpio_port {
struct clk *clk_port;
struct clk *clk_gpio;
int irq;
- spinlock_t lock; /* protect gpio direction registers */
};
#define GPIO_PDOR 0x00
@@ -94,78 +93,6 @@ static inline u32 vf610_gpio_readl(void __iomem *reg)
return readl_relaxed(reg);
}
-static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = GPIO_PDIR;
-
- if (port->sdata->have_paddr) {
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- if (mask)
- offset = GPIO_PDOR;
- }
-
- return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
-}
-
-static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
- unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
-
- vf610_gpio_writel(mask, port->gpio_base + offset);
-}
-
-static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- if (port->sdata->have_paddr) {
- guard(spinlock_irqsave)(&port->lock);
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val &= ~mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_input(chip, gpio);
-}
-
-static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
- int value)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(chip);
- u32 mask = BIT(gpio);
- u32 val;
-
- vf610_gpio_set(chip, gpio, value);
-
- if (port->sdata->have_paddr) {
- guard(spinlock_irqsave)(&port->lock);
- val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- val |= mask;
- vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
- }
-
- return pinctrl_gpio_direction_output(chip, gpio);
-}
-
-static int vf610_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
-{
- struct vf610_gpio_port *port = gpiochip_get_data(gc);
- u32 mask = BIT(gpio);
-
- mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
-
- if (mask)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
static void vf610_gpio_irq_handler(struct irq_desc *desc)
{
struct vf610_gpio_port *port =
@@ -291,6 +218,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
+ unsigned long flags;
int i;
int ret;
bool dual_base;
@@ -300,7 +228,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
port->sdata = device_get_match_data(dev);
- spin_lock_init(&port->lock);
dual_base = port->sdata->have_dual_base;
@@ -367,23 +294,25 @@ static int vf610_gpio_probe(struct platform_device *pdev)
}
gc = &port->gc;
- gc->parent = dev;
- gc->label = dev_name(dev);
- gc->ngpio = VF610_GPIO_PER_PORT;
- gc->base = -1;
-
- gc->request = gpiochip_generic_request;
- gc->free = gpiochip_generic_free;
- gc->direction_input = vf610_gpio_direction_input;
- gc->get = vf610_gpio_get;
- gc->direction_output = vf610_gpio_direction_output;
- gc->set = vf610_gpio_set;
+ flags = BGPIOF_PINCTRL_BACKEND;
/*
- * only IP has Port Data Direction Register(PDDR) can
- * support get direction
+ * We only read the output register for current value on output
+ * lines if the direction register is available so we can switch
+ * direction.
*/
if (port->sdata->have_paddr)
- gc->get_direction = vf610_gpio_get_direction;
+ flags |= BGPIOF_READ_OUTPUT_REG_SET;
+ ret = bgpio_init(gc, dev, 4,
+ port->gpio_base + GPIO_PDIR,
+ port->gpio_base + GPIO_PDOR,
+ NULL,
+ port->sdata->have_paddr ? port->gpio_base + GPIO_PDDR : NULL,
+ NULL,
+ flags);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
+ gc->label = dev_name(dev);
+ gc->base = -1;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 93544ff62513..ac39da17a29b 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -350,19 +350,6 @@ static void virtio_gpio_irq_bus_sync_unlock(struct irq_data *d)
mutex_unlock(&vgpio->irq_lock);
}
-static struct irq_chip vgpio_irq_chip = {
- .name = "virtio-gpio",
- .irq_enable = virtio_gpio_irq_enable,
- .irq_disable = virtio_gpio_irq_disable,
- .irq_mask = virtio_gpio_irq_mask,
- .irq_unmask = virtio_gpio_irq_unmask,
- .irq_set_type = virtio_gpio_irq_set_type,
-
- /* These are required to implement irqchip for slow busses */
- .irq_bus_lock = virtio_gpio_irq_bus_lock,
- .irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock,
-};
-
static bool ignore_irq(struct virtio_gpio *vgpio, int gpio,
struct vgpio_irq_line *irq_line)
{
@@ -542,6 +529,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
struct virtio_gpio_config config;
struct device *dev = &vdev->dev;
struct virtio_gpio *vgpio;
+ struct irq_chip *gpio_irq_chip;
u32 gpio_names_size;
u16 ngpio;
int ret, i;
@@ -591,13 +579,26 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
if (!vgpio->irq_lines)
return -ENOMEM;
+ gpio_irq_chip = devm_kzalloc(dev, sizeof(*gpio_irq_chip), GFP_KERNEL);
+ if (!gpio_irq_chip)
+ return -ENOMEM;
+
+ gpio_irq_chip->name = dev_name(dev);
+ gpio_irq_chip->irq_enable = virtio_gpio_irq_enable;
+ gpio_irq_chip->irq_disable = virtio_gpio_irq_disable;
+ gpio_irq_chip->irq_mask = virtio_gpio_irq_mask;
+ gpio_irq_chip->irq_unmask = virtio_gpio_irq_unmask;
+ gpio_irq_chip->irq_set_type = virtio_gpio_irq_set_type;
+ gpio_irq_chip->irq_bus_lock = virtio_gpio_irq_bus_lock;
+ gpio_irq_chip->irq_bus_sync_unlock = virtio_gpio_irq_bus_sync_unlock;
+
/* The event comes from the outside so no parent handler */
vgpio->gc.irq.parent_handler = NULL;
vgpio->gc.irq.num_parents = 0;
vgpio->gc.irq.parents = NULL;
vgpio->gc.irq.default_type = IRQ_TYPE_NONE;
vgpio->gc.irq.handler = handle_level_irq;
- vgpio->gc.irq.chip = &vgpio_irq_chip;
+ vgpio->gc.irq.chip = gpio_irq_chip;
for (i = 0; i < ngpio; i++) {
vgpio->irq_lines[i].type = VIRTIO_GPIO_IRQ_TYPE_NONE;
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index e89f299f2140..13407fd4f0eb 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -11,7 +11,6 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/cleanup.h>
-#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -37,6 +36,8 @@
#include <linux/string_helpers.h>
#include <linux/types.h>
+#include "dev-sync-probe.h"
+
#define GPIO_VIRTUSER_NAME_BUF_LEN 32
static DEFINE_IDA(gpio_virtuser_ida);
@@ -973,49 +974,17 @@ static struct platform_driver gpio_virtuser_driver = {
};
struct gpio_virtuser_device {
+ struct dev_sync_probe_data probe_data;
struct config_group group;
- struct platform_device *pdev;
int id;
struct mutex lock;
- struct notifier_block bus_notifier;
- struct completion probe_completion;
- bool driver_bound;
-
struct gpiod_lookup_table *lookup_table;
struct list_head lookup_list;
};
-static int gpio_virtuser_bus_notifier_call(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct gpio_virtuser_device *vdev;
- struct device *dev = data;
- char devname[32];
-
- vdev = container_of(nb, struct gpio_virtuser_device, bus_notifier);
- snprintf(devname, sizeof(devname), "gpio-virtuser.%d", vdev->id);
-
- if (!device_match_name(dev, devname))
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_BOUND_DRIVER:
- vdev->driver_bound = true;
- break;
- case BUS_NOTIFY_DRIVER_NOT_BOUND:
- vdev->driver_bound = false;
- break;
- default:
- return NOTIFY_DONE;
- }
-
- complete(&vdev->probe_completion);
- return NOTIFY_OK;
-}
-
static struct gpio_virtuser_device *
to_gpio_virtuser_device(struct config_item *item)
{
@@ -1029,7 +998,7 @@ gpio_virtuser_device_is_live(struct gpio_virtuser_device *dev)
{
lockdep_assert_held(&dev->lock);
- return !!dev->pdev;
+ return !!dev->probe_data.pdev;
}
struct gpio_virtuser_lookup {
@@ -1369,7 +1338,7 @@ gpio_virtuser_device_config_dev_name_show(struct config_item *item,
guard(mutex)(&dev->lock);
- pdev = dev->pdev;
+ pdev = dev->probe_data.pdev;
if (pdev)
return sprintf(page, "%s\n", dev_name(&pdev->dev));
@@ -1478,7 +1447,6 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
{
struct platform_device_info pdevinfo;
struct fwnode_handle *swnode;
- struct platform_device *pdev;
int ret;
lockdep_assert_held(&dev->lock);
@@ -1499,31 +1467,12 @@ gpio_virtuser_device_activate(struct gpio_virtuser_device *dev)
if (ret)
goto err_remove_swnode;
- reinit_completion(&dev->probe_completion);
- dev->driver_bound = false;
- bus_register_notifier(&platform_bus_type, &dev->bus_notifier);
-
- pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
+ ret = dev_sync_probe_register(&dev->probe_data, &pdevinfo);
+ if (ret)
goto err_remove_lookup_table;
- }
-
- wait_for_completion(&dev->probe_completion);
- bus_unregister_notifier(&platform_bus_type, &dev->bus_notifier);
-
- if (!dev->driver_bound) {
- ret = -ENXIO;
- goto err_unregister_pdev;
- }
-
- dev->pdev = pdev;
return 0;
-err_unregister_pdev:
- platform_device_unregister(pdev);
err_remove_lookup_table:
gpio_virtuser_remove_lookup_table(dev);
err_remove_swnode:
@@ -1539,11 +1488,10 @@ gpio_virtuser_device_deactivate(struct gpio_virtuser_device *dev)
lockdep_assert_held(&dev->lock);
- swnode = dev_fwnode(&dev->pdev->dev);
- platform_device_unregister(dev->pdev);
+ swnode = dev_fwnode(&dev->probe_data.pdev->dev);
+ dev_sync_probe_unregister(&dev->probe_data);
gpio_virtuser_remove_lookup_table(dev);
fwnode_remove_software_node(swnode);
- dev->pdev = NULL;
}
static void
@@ -1772,8 +1720,7 @@ gpio_virtuser_config_make_device_group(struct config_group *group,
&gpio_virtuser_device_config_group_type);
mutex_init(&dev->lock);
INIT_LIST_HEAD(&dev->lookup_list);
- dev->bus_notifier.notifier_call = gpio_virtuser_bus_notifier_call;
- init_completion(&dev->probe_completion);
+ dev_sync_probe_init(&dev->probe_data);
return &no_free_ptr(dev)->group;
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 94ca9d03c094..1ec24f6f9300 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
/*
* Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks:
@@ -393,7 +394,7 @@ static void wcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s\n",
gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
- ctli & 0x1 ? "hi" : "lo",
+ str_hi_lo(ctli & 0x1),
ctli & CTLI_INTCNT_NE ? "fall" : " ",
ctli & CTLI_INTCNT_PE ? "rise" : " ",
ctlo,
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index f7d5120ff8f1..61bb83a1e8ae 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -16,6 +16,7 @@
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -234,7 +235,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " %s %s %s %s%s\n"
" %s%s (0x%4x)\n",
reg & WM831X_GPN_DIR ? "in" : "out",
- wm831x_gpio_get(chip, i) ? "high" : "low",
+ str_high_low(wm831x_gpio_get(chip, i)),
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 792d94c49077..c58a7e1349b4 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -45,8 +45,7 @@
* struct xgpio_instance - Stores information about GPIO device
* @gc: GPIO chip
* @regs: register block
- * @hw_map: GPIO pin mapping on hardware side
- * @sw_map: GPIO pin mapping on software side
+ * @map: GPIO pin mapping on hardware side
* @state: GPIO write state shadow register
* @last_irq_read: GPIO read state register from last interrupt
* @dir: GPIO direction shadow register
@@ -60,8 +59,7 @@
struct xgpio_instance {
struct gpio_chip gc;
void __iomem *regs;
- DECLARE_BITMAP(hw_map, 64);
- DECLARE_BITMAP(sw_map, 64);
+ DECLARE_BITMAP(map, 64);
DECLARE_BITMAP(state, 64);
DECLARE_BITMAP(last_irq_read, 64);
DECLARE_BITMAP(dir, 64);
@@ -73,33 +71,6 @@ struct xgpio_instance {
struct clk *clk;
};
-static inline int xgpio_from_bit(struct xgpio_instance *chip, int bit)
-{
- return bitmap_bitremap(bit, chip->hw_map, chip->sw_map, 64);
-}
-
-static inline int xgpio_to_bit(struct xgpio_instance *chip, int gpio)
-{
- return bitmap_bitremap(gpio, chip->sw_map, chip->hw_map, 64);
-}
-
-static inline u32 xgpio_get_value32(const unsigned long *map, int bit)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- return (map[index] >> offset) & 0xFFFFFFFFul;
-}
-
-static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
-{
- const size_t index = BIT_WORD(bit);
- const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
-
- map[index] &= ~(0xFFFFFFFFul << offset);
- map[index] |= (unsigned long)v << offset;
-}
-
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
{
switch (ch) {
@@ -115,20 +86,23 @@ static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
static void xgpio_read_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = xgpio_readreg(addr);
- xgpio_set_value32(a, bit, xgpio_readreg(addr));
+ bitmap_write(a, value, round_down(bit, 32), 32);
}
static void xgpio_write_ch(struct xgpio_instance *chip, int reg, int bit, unsigned long *a)
{
void __iomem *addr = chip->regs + reg + xgpio_regoffset(chip, bit / 32);
+ unsigned long value = bitmap_read(a, round_down(bit, 32), 32);
- xgpio_writereg(addr, xgpio_get_value32(a, bit));
+ xgpio_writereg(addr, value);
}
static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_read_ch(chip, reg, bit, a);
@@ -136,7 +110,8 @@ static void xgpio_read_ch_all(struct xgpio_instance *chip, int reg, unsigned lon
static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned long *a)
{
- int bit, lastbit = xgpio_to_bit(chip, chip->gc.ngpio - 1);
+ unsigned long lastbit = find_nth_bit(chip->map, 64, chip->gc.ngpio - 1);
+ int bit;
for (bit = 0; bit <= lastbit ; bit += 32)
xgpio_write_ch(chip, reg, bit, a);
@@ -156,7 +131,7 @@ static void xgpio_write_ch_all(struct xgpio_instance *chip, int reg, unsigned lo
static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
DECLARE_BITMAP(state, 64);
xgpio_read_ch(chip, XGPIO_DATA_OFFSET, bit, state);
@@ -177,7 +152,7 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -207,8 +182,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64);
- bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64);
+ bitmap_scatter(hw_mask, mask, chip->map, 64);
+ bitmap_scatter(hw_bits, bits, chip->map, 64);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -234,7 +209,7 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -263,7 +238,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
- int bit = xgpio_to_bit(chip, gpio);
+ unsigned long bit = find_nth_bit(chip->map, 64, gpio);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
@@ -395,14 +370,15 @@ static void xgpio_irq_mask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), temp;
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
__clear_bit(bit, chip->enable);
- if (xgpio_get_value32(chip->enable, bit) == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Disable per channel interrupt */
temp = xgpio_readreg(chip->regs + XGPIO_IPIER_OFFSET);
temp &= ~mask;
@@ -422,17 +398,15 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
unsigned long flags;
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
- u32 old_enable = xgpio_get_value32(chip->enable, bit);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset), enable;
u32 mask = BIT(bit / 32), val;
gpiochip_enable_irq(&chip->gc, irq_offset);
raw_spin_lock_irqsave(&chip->gpio_lock, flags);
- __set_bit(bit, chip->enable);
-
- if (old_enable == 0) {
+ enable = bitmap_read(chip->enable, round_down(bit, 32), 32);
+ if (enable == 0) {
/* Clear any existing per-channel interrupts */
val = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
val &= mask;
@@ -447,6 +421,8 @@ static void xgpio_irq_unmask(struct irq_data *irq_data)
xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val);
}
+ __set_bit(bit, chip->enable);
+
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
}
@@ -462,7 +438,7 @@ static int xgpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
{
struct xgpio_instance *chip = irq_data_get_irq_chip_data(irq_data);
int irq_offset = irqd_to_hwirq(irq_data);
- int bit = xgpio_to_bit(chip, irq_offset);
+ unsigned long bit = find_nth_bit(chip->map, 64, irq_offset);
/*
* The Xilinx GPIO hardware provides a single interrupt status
@@ -502,10 +478,10 @@ static void xgpio_irqhandler(struct irq_desc *desc)
struct irq_chip *irqchip = irq_desc_get_chip(desc);
DECLARE_BITMAP(rising, 64);
DECLARE_BITMAP(falling, 64);
- DECLARE_BITMAP(all, 64);
+ DECLARE_BITMAP(hw, 64);
+ DECLARE_BITMAP(sw, 64);
int irq_offset;
u32 status;
- u32 bit;
status = xgpio_readreg(chip->regs + XGPIO_IPISR_OFFSET);
xgpio_writereg(chip->regs + XGPIO_IPISR_OFFSET, status);
@@ -514,29 +490,28 @@ static void xgpio_irqhandler(struct irq_desc *desc)
raw_spin_lock(&chip->gpio_lock);
- xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all);
+ xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, hw);
bitmap_complement(rising, chip->last_irq_read, 64);
- bitmap_and(rising, rising, all, 64);
+ bitmap_and(rising, rising, hw, 64);
bitmap_and(rising, rising, chip->enable, 64);
bitmap_and(rising, rising, chip->rising_edge, 64);
- bitmap_complement(falling, all, 64);
+ bitmap_complement(falling, hw, 64);
bitmap_and(falling, falling, chip->last_irq_read, 64);
bitmap_and(falling, falling, chip->enable, 64);
bitmap_and(falling, falling, chip->falling_edge, 64);
- bitmap_copy(chip->last_irq_read, all, 64);
- bitmap_or(all, rising, falling, 64);
+ bitmap_copy(chip->last_irq_read, hw, 64);
+ bitmap_or(hw, rising, falling, 64);
raw_spin_unlock(&chip->gpio_lock);
dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling);
- for_each_set_bit(bit, all, 64) {
- irq_offset = xgpio_from_bit(chip, bit);
+ bitmap_gather(sw, hw, chip->map, 64);
+ for_each_set_bit(irq_offset, sw, 64)
generic_handle_domain_irq(gc->irq.domain, irq_offset);
- }
chained_irq_exit(irqchip, desc);
}
@@ -613,17 +588,14 @@ static int xgpio_probe(struct platform_device *pdev)
if (width[1] > 32)
return -EINVAL;
- /* Setup software pin mapping */
- bitmap_set(chip->sw_map, 0, width[0] + width[1]);
-
/* Setup hardware pin mapping */
- bitmap_set(chip->hw_map, 0, width[0]);
- bitmap_set(chip->hw_map, 32, width[1]);
+ bitmap_set(chip->map, 0, width[0]);
+ bitmap_set(chip->map, 32, width[1]);
raw_spin_lock_init(&chip->gpio_lock);
chip->gc.base = -1;
- chip->gc.ngpio = bitmap_weight(chip->hw_map, 64);
+ chip->gc.ngpio = bitmap_weight(chip->map, 64);
chip->gc.parent = dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index dc2710c21c50..842cf875bb92 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/spi/spi.h>
+#include <linux/string_choices.h>
#include <linux/regmap.h>
/* XRA1403 registers */
@@ -140,7 +141,7 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
chip->base + i, label,
(gcr & BIT(i)) ? "in" : "out",
- (gsr & BIT(i)) ? "hi" : "lo");
+ str_hi_lo(gsr & BIT(i)));
}
}
#else
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index f7746c57ba76..69caa35c58df 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -994,7 +994,7 @@ __acpi_find_gpio(struct fwnode_handle *fwnode, const char *con_id, unsigned int
desc = acpi_get_gpiod_from_data(fwnode,
propname, idx, info);
if (PTR_ERR(desc) == -EPROBE_DEFER)
- return ERR_CAST(desc);
+ return desc;
if (!IS_ERR(desc))
return desc;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 40f76a90fd7d..107d75558b5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -2729,8 +2729,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = atomic_notifier_chain_register(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ ret = raw_notifier_chain_register(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
@@ -2754,8 +2755,9 @@ out_unregister_device_notifier:
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
out_unregister_line_notifier:
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
@@ -2779,8 +2781,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
blocking_notifier_chain_unregister(&gdev->device_notifier,
&cdev->device_unregistered_nb);
- atomic_notifier_chain_unregister(&gdev->line_state_notifier,
- &cdev->lineinfo_changed_nb);
+ scoped_guard(write_lock_irqsave, &gdev->line_state_lock)
+ raw_notifier_chain_unregister(&gdev->line_state_notifier,
+ &cdev->lineinfo_changed_nb);
bitmap_free(cdev->watched_lines);
gpio_device_put(gdev);
kfree(cdev);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2e537ee979f3..eb667f8f1ead 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -203,6 +203,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
*/
{ "qi,lb60", "rb-gpios", true },
#endif
+#if IS_ENABLED(CONFIG_IEEE802154_CA8210)
+ /*
+ * According to the datasheet, the NRST pin 27 is an active-low
+ * signal. However, the device tree schema and admittedly
+ * the out-of-tree implementations have been used for a long
+ * time incorrectly by describing reset GPIO as active-high.
+ */
+ { "cascoda,ca8210", "reset-gpio", false },
+#endif
#if IS_ENABLED(CONFIG_PCI_LANTIQ)
/*
* According to the PCI specification, the RST# pin is an
@@ -929,7 +938,7 @@ struct notifier_block gpio_of_notifier = {
#endif /* CONFIG_OF_DYNAMIC */
/**
- * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags
+ * of_gpio_twocell_xlate - translate twocell gpiospec to the GPIO number and flags
* @gc: pointer to the gpio_chip structure
* @gpiospec: GPIO specifier as found in the device tree
* @flags: a flags pointer to fill in
@@ -941,9 +950,9 @@ struct notifier_block gpio_of_notifier = {
* Returns:
* GPIO number (>= 0) on success, negative errno on failure.
*/
-static int of_gpio_simple_xlate(struct gpio_chip *gc,
- const struct of_phandle_args *gpiospec,
- u32 *flags)
+static int of_gpio_twocell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
{
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
@@ -951,7 +960,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
* number and the flags from a single gpio cell -- this is possible,
* but not recommended).
*/
- if (gc->of_gpio_n_cells < 2) {
+ if (gc->of_gpio_n_cells != 2) {
WARN_ON(1);
return -EINVAL;
}
@@ -968,6 +977,49 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc,
return gpiospec->args[0];
}
+/**
+ * of_gpio_threecell_xlate - translate threecell gpiospec to the GPIO number and flags
+ * @gc: pointer to the gpio_chip structure
+ * @gpiospec: GPIO specifier as found in the device tree
+ * @flags: a flags pointer to fill in
+ *
+ * This is simple translation function, suitable for the most 1:n mapped
+ * GPIO chips, i.e. several GPIO chip instances from one device tree node.
+ * In this case the following binding is implied:
+ *
+ * foo-gpios = <&gpio instance offset flags>;
+ *
+ * Returns:
+ * GPIO number (>= 0) on success, negative errno on failure.
+ */
+static int of_gpio_threecell_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gc->of_gpio_n_cells != 3) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(gpiospec->args_count != 3))
+ return -EINVAL;
+
+ /*
+ * Check chip instance number, the driver responds with true if
+ * this is the chip we are looking for.
+ */
+ if (!gc->of_node_instance_match(gc, gpiospec->args[0]))
+ return -EINVAL;
+
+ if (gpiospec->args[1] >= gc->ngpio)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[2];
+
+ return gpiospec->args[1];
+}
+
#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
#include <linux/gpio/legacy-of-mm-gpiochip.h>
/**
@@ -1057,6 +1109,9 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
bool has_group_names;
+ int offset; /* Offset of the first GPIO line on the chip */
+ int pin; /* Pin base number in the range */
+ int count; /* Number of pins/GPIO lines to map */
np = dev_of_node(&chip->gpiodev->dev);
if (!np)
@@ -1065,7 +1120,15 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
has_group_names = of_property_present(np, group_names_propname);
for (;; index++) {
- ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
+ /*
+ * Ordinary phandles contain 2-3 cells:
+ * gpios = <&gpio [instance] offset flags>;
+ * Ranges always contain one more cell:
+ * gpio-ranges <&pinctrl [gpio_instance] gpio_offet pin_offet count>;
+ * This is why we parse chip->of_gpio_n_cells + 1 cells
+ */
+ ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges",
+ chip->of_gpio_n_cells + 1,
index, &pinspec);
if (ret)
break;
@@ -1075,13 +1138,33 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
if (!pctldev)
return -EPROBE_DEFER;
+ if (chip->of_gpio_n_cells == 3) {
+ /* First cell is the gpiochip instance number */
+ offset = pinspec.args[1];
+ pin = pinspec.args[2];
+ count = pinspec.args[3];
+ } else {
+ offset = pinspec.args[0];
+ pin = pinspec.args[1];
+ count = pinspec.args[2];
+ }
+
+ /*
+ * With multiple GPIO chips per node, check that this chip is the
+ * right instance.
+ */
+ if (chip->of_node_instance_match &&
+ (chip->of_gpio_n_cells == 3) &&
+ !chip->of_node_instance_match(chip, pinspec.args[0]))
+ continue;
+
/* Ignore ranges outside of this GPIO chip */
- if (pinspec.args[0] >= (chip->offset + chip->ngpio))
+ if (offset >= (chip->offset + chip->ngpio))
continue;
- if (pinspec.args[0] + pinspec.args[2] <= chip->offset)
+ if (offset + count <= chip->offset)
continue;
- if (pinspec.args[2]) {
+ if (count) {
/* npins != 0: linear range */
if (has_group_names) {
of_property_read_string_index(np,
@@ -1095,27 +1178,27 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
/* Trim the range to fit this GPIO chip */
- if (chip->offset > pinspec.args[0]) {
- trim = chip->offset - pinspec.args[0];
- pinspec.args[2] -= trim;
- pinspec.args[1] += trim;
- pinspec.args[0] = 0;
+ if (chip->offset > offset) {
+ trim = chip->offset - offset;
+ count -= trim;
+ pin += trim;
+ offset = 0;
} else {
- pinspec.args[0] -= chip->offset;
+ offset -= chip->offset;
}
- if ((pinspec.args[0] + pinspec.args[2]) > chip->ngpio)
- pinspec.args[2] = chip->ngpio - pinspec.args[0];
+ if ((offset + count) > chip->ngpio)
+ count = chip->ngpio - offset;
ret = gpiochip_add_pin_range(chip,
pinctrl_dev_get_devname(pctldev),
- pinspec.args[0],
- pinspec.args[1],
- pinspec.args[2]);
+ offset,
+ pin,
+ count);
if (ret)
return ret;
} else {
/* npins == 0: special range */
- if (pinspec.args[1]) {
+ if (pin) {
pr_err("%pOF: Illegal gpio-range format.\n",
np);
break;
@@ -1140,7 +1223,7 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
}
ret = gpiochip_add_pingroup_range(chip, pctldev,
- pinspec.args[0], name);
+ offset, name);
if (ret)
return ret;
}
@@ -1163,8 +1246,14 @@ int of_gpiochip_add(struct gpio_chip *chip)
return 0;
if (!chip->of_xlate) {
- chip->of_gpio_n_cells = 2;
- chip->of_xlate = of_gpio_simple_xlate;
+ if (chip->of_gpio_n_cells == 3) {
+ if (!chip->of_node_instance_match)
+ return -EINVAL;
+ chip->of_xlate = of_gpio_threecell_xlate;
+ } else {
+ chip->of_gpio_n_cells = 2;
+ chip->of_xlate = of_gpio_twocell_xlate;
+ }
}
if (chip->of_gpio_n_cells > MAX_PHANDLE_ARGS)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 8741600af7ef..b8197502a5ac 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -341,6 +342,25 @@ static int gpiochip_find_base_unlocked(u16 ngpio)
}
}
+static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->get_direction))
+ return -EOPNOTSUPP;
+
+ ret = gc->get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret != GPIO_LINE_DIRECTION_OUT && ret != GPIO_LINE_DIRECTION_IN)
+ ret = -EBADE;
+
+ return ret;
+}
+
/**
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
@@ -381,7 +401,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (!guard.gc->get_direction)
return -ENOTSUPP;
- ret = guard.gc->get_direction(guard.gc, offset);
+ ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;
@@ -652,7 +672,7 @@ static int gpiochip_apply_reserved_ranges(struct gpio_chip *gc)
if (start >= gc->ngpio || start + count > gc->ngpio)
continue;
- bitmap_clear(gc->valid_mask, start, count);
+ bitmap_clear(gc->gpiodev->valid_mask, start, count);
}
kfree(ranges);
@@ -666,8 +686,8 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (!(gpiochip_count_reserved_ranges(gc) || gc->init_valid_mask))
return 0;
- gc->valid_mask = gpiochip_allocate_mask(gc);
- if (!gc->valid_mask)
+ gc->gpiodev->valid_mask = gpiochip_allocate_mask(gc);
+ if (!gc->gpiodev->valid_mask)
return -ENOMEM;
ret = gpiochip_apply_reserved_ranges(gc);
@@ -676,7 +696,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
if (gc->init_valid_mask)
return gc->init_valid_mask(gc,
- gc->valid_mask,
+ gc->gpiodev->valid_mask,
gc->ngpio);
return 0;
@@ -684,7 +704,7 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc)
static void gpiochip_free_valid_mask(struct gpio_chip *gc)
{
- gpiochip_free_mask(&gc->valid_mask);
+ gpiochip_free_mask(&gc->gpiodev->valid_mask);
}
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
@@ -703,13 +723,29 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
return 0;
}
+/**
+ * gpiochip_query_valid_mask - return the GPIO validity information
+ * @gc: gpio chip which validity information is queried
+ *
+ * Returns: bitmap representing valid GPIOs or NULL if all GPIOs are valid
+ *
+ * Some GPIO chips may support configurations where some of the pins aren't
+ * available. These chips can have valid_mask set to represent the valid
+ * GPIOs. This function can be used to retrieve this information.
+ */
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc)
+{
+ return gc->gpiodev->valid_mask;
+}
+EXPORT_SYMBOL_GPL(gpiochip_query_valid_mask);
+
bool gpiochip_line_is_valid(const struct gpio_chip *gc,
unsigned int offset)
{
/* No mask means all valid */
- if (likely(!gc->valid_mask))
+ if (likely(!gc->gpiodev->valid_mask))
return true;
- return test_bit(offset, gc->valid_mask);
+ return test_bit(offset, gc->gpiodev->valid_mask);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
@@ -882,13 +918,29 @@ void *gpiochip_get_data(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
+/*
+ * If the calling driver provides the specific firmware node,
+ * use it. Otherwise use the one from the parent device, if any.
+ */
+static struct fwnode_handle *gpiochip_choose_fwnode(struct gpio_chip *gc)
+{
+ if (gc->fwnode)
+ return gc->fwnode;
+
+ if (gc->parent)
+ return dev_fwnode(gc->parent);
+
+ return NULL;
+}
+
int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
{
+ struct fwnode_handle *fwnode = gpiochip_choose_fwnode(gc);
u32 ngpios = gc->ngpio;
int ret;
if (ngpios == 0) {
- ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ ret = fwnode_property_read_u32(fwnode, "ngpios", &ngpios);
if (ret == -ENODATA)
/*
* -ENODATA means that there is no property found and
@@ -925,6 +977,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret = 0;
+ /* Only allow one set() and one set_multiple(). */
+ if ((gc->set && gc->set_rv) ||
+ (gc->set_multiple && gc->set_multiple_rv))
+ return -EINVAL;
+
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -941,14 +998,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gc->gpiodev = gdev;
gpiochip_set_data(gc, data);
- /*
- * If the calling driver did not initialize firmware node,
- * do it here using the parent device, if any.
- */
- if (gc->fwnode)
- device_set_node(&gdev->dev, gc->fwnode);
- else if (gc->parent)
- device_set_node(&gdev->dev, dev_fwnode(gc->parent));
+ device_set_node(&gdev->dev, gpiochip_choose_fwnode(gc));
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@@ -1025,7 +1075,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
}
}
- ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ rwlock_init(&gdev->line_state_lock);
+ RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
ret = init_srcu_struct(&gdev->srcu);
@@ -1056,24 +1107,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
- if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- ret = gc->get_direction(gc, desc_index);
- if (ret < 0)
- /*
- * FIXME: Bail-out here once all GPIO drivers
- * are updated to not return errors in
- * situations that can be considered normal
- * operation.
- */
- dev_warn(&gdev->dev,
- "%s: get_direction failed: %d\n",
- __func__, ret);
-
- assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
- } else {
+ /*
+ * We would typically want to use gpiochip_get_direction() here
+ * but we must not check the return value and bail-out as pin
+ * controllers can have pins configured to alternate functions
+ * and return -EINVAL. Also: there's no need to take the SRCU
+ * lock here.
+ */
+ if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
+ assign_bit(FLAG_IS_OUT, &desc->flags,
+ !gc->get_direction(gc, desc_index));
+ else
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
- }
}
ret = of_gpiochip_add(gc);
@@ -2329,16 +2375,18 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
return -EBUSY;
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset))
+ return -EINVAL;
+
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
if (guard.gc->request) {
- offset = gpio_chip_hwgpio(desc);
- if (gpiochip_line_is_valid(guard.gc, offset))
- ret = guard.gc->request(guard.gc, offset);
- else
- ret = -EINVAL;
+ ret = guard.gc->request(guard.gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
if (ret)
goto out_clear_bit;
}
@@ -2581,6 +2629,9 @@ int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
return -ENOTSUPP;
ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ if (ret > 0)
+ ret = -EBADE;
+
#ifdef CONFIG_GPIO_CDEV
/*
* Special case - if we're setting debounce period, we need to store
@@ -2686,6 +2737,39 @@ int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
return ret;
}
+static int gpiochip_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_input))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_input(gc, offset);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
+
+static int gpiochip_direction_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(!gc->direction_output))
+ return -EOPNOTSUPP;
+
+ ret = gc->direction_output(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+}
+
/**
* gpiod_direction_input - set the GPIO direction to input
* @desc: GPIO to set to input
@@ -2737,11 +2821,10 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
* assume we are in input mode after this.
*/
if (guard.gc->direction_input) {
- ret = guard.gc->direction_input(guard.gc,
- gpio_chip_hwgpio(desc));
+ ret = gpiochip_direction_input(guard.gc,
+ gpio_chip_hwgpio(desc));
} else if (guard.gc->get_direction) {
- dir = guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2762,6 +2845,27 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
return ret;
}
+static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ return -EOPNOTSUPP;
+
+ if (gc->set_rv) {
+ ret = gc->set_rv(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+ }
+
+ gc->set(gc, offset, value);
+ return 0;
+}
+
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
int val = !!value, ret = 0, dir;
@@ -2783,13 +2887,13 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
}
if (guard.gc->direction_output) {
- ret = guard.gc->direction_output(guard.gc,
- gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_direction_output(guard.gc,
+ gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
- dir = guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2804,7 +2908,9 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), val);
+ if (ret)
+ return ret;
}
if (!ret)
@@ -2894,19 +3000,15 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
if (!ret)
goto set_output_value;
/* Emulate open drain by not actively driving the line high */
- if (value) {
- ret = gpiod_direction_input_nonotify(desc);
+ if (value)
goto set_output_flag;
- }
} else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
/* Emulate open source by not actively driving the line low */
- if (!value) {
- ret = gpiod_direction_input_nonotify(desc);
+ if (!value)
goto set_output_flag;
- }
} else {
gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL);
}
@@ -2918,17 +3020,20 @@ set_output_value:
return gpiod_direction_output_raw_commit(desc, value);
set_output_flag:
+ ret = gpiod_direction_input_nonotify(desc);
+ if (ret)
+ return ret;
/*
* When emulating open-source or open-drain functionalities by not
* actively driving the line (setting mode to input) we still need to
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- if (ret == 0)
- set_bit(FLAG_IS_OUT, &desc->flags);
- return ret;
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return 0;
}
+#if IS_ENABLED(CONFIG_HTE)
/**
* gpiod_enable_hw_timestamp_ns - Enable hardware timestamp in nanoseconds.
*
@@ -2994,6 +3099,7 @@ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
return ret;
}
EXPORT_SYMBOL_GPL(gpiod_disable_hw_timestamp_ns);
+#endif /* CONFIG_HTE */
/**
* gpiod_set_config - sets @config for a GPIO
@@ -3100,9 +3206,23 @@ void gpiod_toggle_active_low(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
+static int gpiochip_get(struct gpio_chip *gc, unsigned int offset)
+{
+ int ret;
+
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
+ /* Make sure this is called after checking for gc->get(). */
+ ret = gc->get(gc, offset);
+ if (ret > 1)
+ ret = -EBADE;
+
+ return ret;
+}
+
static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc)
{
- return gc->get ? gc->get(gc, gpio_chip_hwgpio(desc)) : -EIO;
+ return gc->get ? gpiochip_get(gc, gpio_chip_hwgpio(desc)) : -EIO;
}
/* I/O calls are only valid after configuration completed; the relevant
@@ -3151,15 +3271,21 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ int ret;
+
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->get_multiple)
- return gc->get_multiple(gc, mask, bits);
+ if (gc->get_multiple) {
+ ret = gc->get_multiple(gc, mask, bits);
+ if (ret > 0)
+ return -EBADE;
+ }
+
if (gc->get) {
int i, value;
for_each_set_bit(i, mask, gc->ngpio) {
- value = gc->get(gc, i);
+ value = gpiochip_get(gc, i);
if (value < 0)
return value;
__assign_bit(i, bits, value);
@@ -3412,18 +3538,18 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
} else {
- ret = guard.gc->direction_output(guard.gc, offset, 0);
+ ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
}
@@ -3432,6 +3558,8 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
gpiod_err(desc,
"%s: Error in set_value for open drain err %d\n",
__func__, ret);
+
+ return ret;
}
/*
@@ -3439,36 +3567,38 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
* @desc: gpio descriptor whose state need to be set.
* @value: Non-zero for setting it HIGH otherwise it will set to LOW.
*/
-static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
+static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
int ret = 0, offset = gpio_chip_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
if (value) {
- ret = guard.gc->direction_output(guard.gc, offset, 1);
+ ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
} else {
- ret = guard.gc->direction_input(guard.gc, offset);
+ ret = gpiochip_direction_input(guard.gc, offset);
}
trace_gpio_direction(desc_to_gpio(desc), !value, ret);
if (ret < 0)
gpiod_err(desc,
"%s: Error in set_value for open source err %d\n",
__func__, ret);
+
+ return ret;
}
-static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
+static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
- return;
+ return -ENODEV;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- guard.gc->set(guard.gc, gpio_chip_hwgpio(desc), value);
+ return gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), value);
}
/*
@@ -3480,21 +3610,38 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
* defines which outputs are to be changed
* @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
* defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, negative error number on failure.
*/
-static void gpio_chip_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
+static int gpiochip_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
{
+ unsigned int i;
+ int ret;
+
lockdep_assert_held(&gc->gpiodev->srcu);
+ if (gc->set_multiple_rv) {
+ ret = gc->set_multiple_rv(gc, mask, bits);
+ if (ret > 0)
+ ret = -EBADE;
+
+ return ret;
+ }
+
if (gc->set_multiple) {
gc->set_multiple(gc, mask, bits);
- } else {
- unsigned int i;
+ return 0;
+ }
- /* set outputs if the corresponding mask bit is set */
- for_each_set_bit(i, mask, gc->ngpio)
- gc->set(gc, i, test_bit(i, bits));
+ /* set outputs if the corresponding mask bit is set */
+ for_each_set_bit(i, mask, gc->ngpio) {
+ ret = gpiochip_set(gc, i, test_bit(i, bits));
+ if (ret)
+ break;
}
+
+ return ret;
}
int gpiod_set_array_value_complex(bool raw, bool can_sleep,
@@ -3504,7 +3651,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
unsigned long *value_bitmap)
{
struct gpio_chip *gc;
- int i = 0;
+ int i = 0, ret;
/*
* Validate array_info against desc_array and its size.
@@ -3527,7 +3674,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
+ ret = gpiochip_set_multiple(gc, array_info->set_mask,
+ value_bitmap);
+ if (ret)
+ return ret;
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -3604,8 +3754,11 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
} while ((i < array_size) &&
gpio_device_chip_cmp(desc_array[i]->gdev, guard.gc));
/* push collected bits to outputs */
- if (count != 0)
- gpio_chip_set_multiple(guard.gc, mask, bits);
+ if (count != 0) {
+ ret = gpiochip_set_multiple(guard.gc, mask, bits);
+ if (ret)
+ return ret;
+ }
if (mask != fastpath_mask)
bitmap_free(mask);
@@ -3625,13 +3778,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_raw_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_raw_value_commit(desc, value);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -3643,17 +3799,21 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
* This sets the value of a GPIO line backing a descriptor, applying
* different semantic quirks like active low and open drain/source
* handling.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
+
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- gpio_set_open_drain_value_commit(desc, value);
+ return gpio_set_open_drain_value_commit(desc, value);
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- gpio_set_open_source_value_commit(desc, value);
- else
- gpiod_set_raw_value_commit(desc, value);
+ return gpio_set_open_source_value_commit(desc, value);
+
+ return gpiod_set_raw_value_commit(desc, value);
}
/**
@@ -3666,13 +3826,16 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
*
* This function can be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value(struct gpio_desc *desc, int value)
+int gpiod_set_value(struct gpio_desc *desc, int value)
{
- VALIDATE_DESC_VOID(desc);
+ VALIDATE_DESC(desc);
/* Should be using gpiod_set_value_cansleep() */
WARN_ON(desc->gdev->can_sleep);
- gpiod_set_value_nocheck(desc, value);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value);
@@ -4090,12 +4253,15 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value_cansleep);
* regard for its ACTIVE_LOW status.
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_raw_value_commit(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_raw_value_commit(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -4108,12 +4274,15 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
* account
*
* This function is to be called from contexts that can sleep.
+ *
+ * Returns:
+ * 0 on success, negative error number on failure.
*/
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep();
- VALIDATE_DESC_VOID(desc);
- gpiod_set_value_nocheck(desc, value);
+ VALIDATE_DESC(desc);
+ return gpiod_set_value_nocheck(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
@@ -4193,8 +4362,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
{
- atomic_notifier_call_chain(&desc->gdev->line_state_notifier,
- action, desc);
+ guard(read_lock_irqsave)(&desc->gdev->line_state_lock);
+
+ raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc);
}
/**
@@ -4749,10 +4919,10 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
return ret;
}
- gpiod_dbg(desc, "hogged as %s%s\n",
+ gpiod_dbg(desc, "hogged as %s/%s\n",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ? "output" : "input",
(dflags & GPIOD_FLAGS_BIT_DIR_OUT) ?
- (dflags & GPIOD_FLAGS_BIT_DIR_VAL) ? "/high" : "/low" : "");
+ str_high_low(dflags & GPIOD_FLAGS_BIT_DIR_VAL) : "?");
return 0;
}
@@ -5026,6 +5196,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
unsigned int gpio = gdev->base;
struct gpio_desc *desc;
struct gpio_chip *gc;
+ unsigned long flags;
int value;
guard(srcu)(&gdev->srcu);
@@ -5038,16 +5209,17 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &desc->flags)) {
+ flags = READ_ONCE(desc->flags);
+ is_irq = test_bit(FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &desc->flags);
+ is_out = test_bit(FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ active_low = test_bit(FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
- value >= 0 ? (value ? "hi" : "lo") : "? ",
+ value >= 0 ? str_hi_lo(value) : "? ",
is_irq ? "IRQ " : "",
active_low ? "ACTIVE LOW" : "");
} else if (desc->name) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 147156ec502b..58f64056de77 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -16,6 +16,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/workqueue.h>
@@ -32,6 +33,8 @@
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
* @descs: array of ngpio descriptors.
+ * @valid_mask: If not %NULL, holds bitmask of GPIOs which are valid to be
+ * used from the chip.
* @desc_srcu: ensures consistent state of GPIO descriptors exposed to users
* @ngpio: the number of GPIO lines on this GPIO device, equal to the size
* of the @descs array.
@@ -45,6 +48,7 @@
* @list: links gpio_device:s together for traversal
* @line_state_notifier: used to notify subscribers about lines being
* requested, released or reconfigured
+ * @line_state_lock: RW-spinlock protecting the line state notifier
* @line_state_wq: used to emit line state events from a separate thread in
* process context
* @device_notifier: used to notify character device wait queues about the GPIO
@@ -65,6 +69,7 @@ struct gpio_device {
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
+ unsigned long *valid_mask;
struct srcu_struct desc_srcu;
unsigned int base;
u16 ngpio;
@@ -72,7 +77,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct atomic_notifier_head line_state_notifier;
+ struct raw_notifier_head line_state_notifier;
+ rwlock_t line_state_lock;
struct workqueue_struct *line_state_wq;
struct blocking_notifier_head device_notifier;
struct srcu_struct srcu;
@@ -183,24 +189,24 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define FLAG_REQUESTED 0
+#define FLAG_IS_OUT 1
+#define FLAG_EXPORT 2 /* protected by sysfs_lock */
+#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
+#define FLAG_ACTIVE_LOW 6 /* value has active low */
+#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
+#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
+#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
struct gpio_desc_label __rcu *label;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 95a05b03f799..c0ddbe7d6f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2555,7 +2555,6 @@ static int amdgpu_pmops_freeze(struct device *dev)
int r;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s4 = false;
if (r)
return r;
@@ -2567,8 +2566,13 @@ static int amdgpu_pmops_freeze(struct device *dev)
static int amdgpu_pmops_thaw(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ adev->in_s4 = false;
+
+ return r;
}
static int amdgpu_pmops_poweroff(struct device *dev)
@@ -2581,6 +2585,9 @@ static int amdgpu_pmops_poweroff(struct device *dev)
static int amdgpu_pmops_restore(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ adev->in_s4 = false;
return amdgpu_device_resume(drm_dev, true);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 03308261f894..7507d9443028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -188,8 +188,8 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
- hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate;
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 2523221a2519..48ff00427882 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2437,7 +2437,7 @@ static int gfx_v12_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
(void **)&adev->gfx.me.me_fw_data_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
- gfx_v12_0_pfp_fini(adev);
+ gfx_v12_0_me_fini(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b749f1c3f6a9..c3c144a4f45e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -501,9 +501,6 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
uint64_t *flags)
{
struct amdgpu_bo *bo = mapping->bo_va->base.bo;
- struct amdgpu_device *bo_adev;
- bool coherent, is_system;
-
*flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
@@ -519,25 +516,11 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (!bo)
- return;
-
- if (bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
- AMDGPU_GEM_CREATE_UNCACHED))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
-
- bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
- coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
- is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) ||
- (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT);
-
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
- /* WA for HW bug */
- if (is_system || ((bo_adev != adev) && coherent))
- *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
-
+ if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 47db483c3516..95c609317a8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -78,12 +78,12 @@ static const struct amdgpu_video_codecs nv_video_codecs_encode = {
/* Navi1x */
static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
};
@@ -104,10 +104,10 @@ static const struct amdgpu_video_codecs sc_video_codecs_encode = {
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
@@ -115,10 +115,10 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[]
};
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] = {
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a59b4c36cad7..e98fb3fa36a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -103,12 +103,11 @@ static const struct amdgpu_video_codecs vega_video_codecs_encode =
/* Vega */
static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
};
static const struct amdgpu_video_codecs vega_video_codecs_decode =
@@ -120,12 +119,12 @@ static const struct amdgpu_video_codecs vega_video_codecs_decode =
/* Raven */
static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)},
};
@@ -138,10 +137,10 @@ static const struct amdgpu_video_codecs rv_video_codecs_decode =
/* Renoir, Arcturus */
static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 1920, 1088, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 1920, 1088, 5)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 1920, 1088, 4)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 16384, 16384, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c633b7ff2943..09fd6ef99b3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -284,7 +284,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
return 0;
}
- ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 06615f160331..0c9c4d8b7b71 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -167,16 +167,16 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -188,9 +188,9 @@ static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
};
@@ -206,16 +206,16 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
{
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 3,
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 5,
},
{
@@ -227,9 +227,9 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
},
{
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
+ .max_width = 1920,
+ .max_height = 1088,
+ .max_pixels_per_frame = 1920 * 1088,
.max_level = 4,
},
{
@@ -239,13 +239,6 @@ static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
.max_pixels_per_frame = 4096 * 4096,
.max_level = 186,
},
- {
- .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
- .max_width = 4096,
- .max_height = 4096,
- .max_pixels_per_frame = 4096 * 4096,
- .max_level = 0,
- },
};
static const struct amdgpu_video_codecs cz_video_codecs_decode =
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 651660958e5b..0320163b6e74 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -3644,7 +3644,7 @@ static const uint32_t cwsr_trap_gfx9_4_3_hex[] = {
};
static const uint32_t cwsr_trap_gfx12_hex[] = {
- 0xbfa00001, 0xbfa0024b,
+ 0xbfa00001, 0xbfa002a2,
0xb0804009, 0xb8f8f804,
0x9178ff78, 0x00008c00,
0xb8fbf811, 0x8b6eff78,
@@ -3718,7 +3718,15 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00011677, 0xd7610000,
0x00011a79, 0xd7610000,
0x00011c7e, 0xd7610000,
- 0x00011e7f, 0xbefe00ff,
+ 0x00011e7f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x00003fff, 0xbeff0080,
0xee0a407a, 0x000c0000,
0x00004000, 0xd760007a,
@@ -3755,38 +3763,46 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x00000200, 0xbef600ff,
0x01000000, 0x7e000280,
0x7e020280, 0x7e040280,
- 0xbefd0080, 0xbe804ec2,
- 0xbf94fffe, 0xb8faf804,
- 0x8b7a847a, 0x91788478,
- 0x8c787a78, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xd7610002, 0x0000fa6c,
- 0x807d817d, 0x917aff6d,
- 0x80000000, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xd7610002, 0x0000fa6e,
- 0x807d817d, 0xd7610002,
- 0x0000fa6f, 0x807d817d,
- 0xd7610002, 0x0000fa78,
- 0x807d817d, 0xb8faf811,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xd7610002,
- 0x0000fa7b, 0x807d817d,
- 0xb8f1f801, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f814, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f815, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f812, 0xd7610002,
- 0x0000fa71, 0x807d817d,
- 0xb8f1f813, 0xd7610002,
- 0x0000fa71, 0x807d817d,
+ 0xbe804ec2, 0xbf94fffe,
+ 0xb8faf804, 0x8b7a847a,
+ 0x91788478, 0x8c787a78,
+ 0x917aff6d, 0x80000000,
+ 0xd7610002, 0x00010071,
+ 0xd7610002, 0x0001026c,
+ 0xd7610002, 0x0001047a,
+ 0xd7610002, 0x0001066e,
+ 0xd7610002, 0x0001086f,
+ 0xd7610002, 0x00010a78,
+ 0xd7610002, 0x00010e7b,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xd8500000, 0x00000000,
+ 0xb8faf811, 0xd7610002,
+ 0x00010c7a, 0xb8faf801,
+ 0xd7610002, 0x0001107a,
+ 0xb8faf814, 0xd7610002,
+ 0x0001127a, 0xb8faf815,
+ 0xd7610002, 0x0001147a,
+ 0xb8faf812, 0xd7610002,
+ 0x0001167a, 0xb8faf813,
+ 0xd7610002, 0x0001187a,
0xb8faf802, 0xd7610002,
- 0x0000fa7a, 0x807d817d,
- 0xbefa50c1, 0xbfc70000,
- 0xd7610002, 0x0000fa7a,
- 0x807d817d, 0xbefe00ff,
+ 0x00011a7a, 0xbefa50c1,
+ 0xbfc70000, 0xd7610002,
+ 0x00011c7a, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xbefe00ff,
0x0000ffff, 0xbeff0080,
0xc4068070, 0x008ce802,
0x00000000, 0xbefe00c1,
@@ -3801,331 +3817,358 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0xbe824102, 0xbe844104,
0xbe864106, 0xbe884108,
0xbe8a410a, 0xbe8c410c,
- 0xbe8e410e, 0xd7610002,
- 0x0000f200, 0x80798179,
- 0xd7610002, 0x0000f201,
- 0x80798179, 0xd7610002,
- 0x0000f202, 0x80798179,
- 0xd7610002, 0x0000f203,
- 0x80798179, 0xd7610002,
- 0x0000f204, 0x80798179,
- 0xd7610002, 0x0000f205,
- 0x80798179, 0xd7610002,
- 0x0000f206, 0x80798179,
- 0xd7610002, 0x0000f207,
- 0x80798179, 0xd7610002,
- 0x0000f208, 0x80798179,
- 0xd7610002, 0x0000f209,
- 0x80798179, 0xd7610002,
- 0x0000f20a, 0x80798179,
- 0xd7610002, 0x0000f20b,
- 0x80798179, 0xd7610002,
- 0x0000f20c, 0x80798179,
- 0xd7610002, 0x0000f20d,
- 0x80798179, 0xd7610002,
- 0x0000f20e, 0x80798179,
- 0xd7610002, 0x0000f20f,
- 0x80798179, 0xbf06a079,
- 0xbfa10007, 0xc4068070,
+ 0xbe8e410e, 0xbf068079,
+ 0xbfa10032, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd7610002,
+ 0x0001180c, 0xd7610002,
+ 0x00011a0d, 0xd7610002,
+ 0x00011c0e, 0xd7610002,
+ 0x00011e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xbfa00038, 0xd7610002,
+ 0x00012000, 0xd7610002,
+ 0x00012201, 0xd7610002,
+ 0x00012402, 0xd7610002,
+ 0x00012603, 0xd7610002,
+ 0x00012804, 0xd7610002,
+ 0x00012a05, 0xd7610002,
+ 0x00012c06, 0xd7610002,
+ 0x00012e07, 0xd7610002,
+ 0x00013008, 0xd7610002,
+ 0x00013209, 0xd7610002,
+ 0x0001340a, 0xd7610002,
+ 0x0001360b, 0xd7610002,
+ 0x0001380c, 0xd7610002,
+ 0x00013a0d, 0xd7610002,
+ 0x00013c0e, 0xd7610002,
+ 0x00013e0f, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0x80799079,
+ 0xc4068070, 0x008ce802,
+ 0x00000000, 0x8070ff70,
+ 0x00000080, 0xbef90080,
+ 0x7e040280, 0x807d907d,
+ 0xbf0aff7d, 0x00000060,
+ 0xbfa2ff88, 0xbe804100,
+ 0xbe824102, 0xbe844104,
+ 0xbe864106, 0xbe884108,
+ 0xbe8a410a, 0xd7610002,
+ 0x00010000, 0xd7610002,
+ 0x00010201, 0xd7610002,
+ 0x00010402, 0xd7610002,
+ 0x00010603, 0xd7610002,
+ 0x00010804, 0xd7610002,
+ 0x00010a05, 0xd7610002,
+ 0x00010c06, 0xd7610002,
+ 0x00010e07, 0xd7610002,
+ 0x00011008, 0xd7610002,
+ 0x00011209, 0xd7610002,
+ 0x0001140a, 0xd7610002,
+ 0x0001160b, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xd8500000,
+ 0x00000000, 0xc4068070,
0x008ce802, 0x00000000,
+ 0xbefe00c1, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8fb4306, 0x8b7bc17b,
+ 0xbfa10044, 0x8b7aff6d,
+ 0x80000000, 0xbfa10041,
+ 0x847b897b, 0xbef6007b,
+ 0xb8f03b05, 0x80708170,
+ 0xbf0d9973, 0xbfa20002,
+ 0x84708970, 0xbfa00001,
+ 0x84708a70, 0xb8fa1e06,
+ 0x847a8a7a, 0x80707a70,
+ 0x8070ff70, 0x00000200,
0x8070ff70, 0x00000080,
- 0xbef90080, 0x7e040280,
- 0x807d907d, 0xbf0aff7d,
- 0x00000060, 0xbfa2ffbb,
- 0xbe804100, 0xbe824102,
- 0xbe844104, 0xbe864106,
- 0xbe884108, 0xbe8a410a,
- 0xd7610002, 0x0000f200,
- 0x80798179, 0xd7610002,
- 0x0000f201, 0x80798179,
- 0xd7610002, 0x0000f202,
- 0x80798179, 0xd7610002,
- 0x0000f203, 0x80798179,
- 0xd7610002, 0x0000f204,
- 0x80798179, 0xd7610002,
- 0x0000f205, 0x80798179,
- 0xd7610002, 0x0000f206,
- 0x80798179, 0xd7610002,
- 0x0000f207, 0x80798179,
- 0xd7610002, 0x0000f208,
- 0x80798179, 0xd7610002,
- 0x0000f209, 0x80798179,
- 0xd7610002, 0x0000f20a,
- 0x80798179, 0xd7610002,
- 0x0000f20b, 0x80798179,
- 0xc4068070, 0x008ce802,
- 0x00000000, 0xbefe00c1,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8fb4306,
- 0x8b7bc17b, 0xbfa10044,
- 0x8b7aff6d, 0x80000000,
- 0xbfa10041, 0x847b897b,
- 0xbef6007b, 0xb8f03b05,
- 0x80708170, 0xbf0d9973,
- 0xbfa20002, 0x84708970,
- 0xbfa00001, 0x84708a70,
- 0xb8fa1e06, 0x847a8a7a,
- 0x80707a70, 0x8070ff70,
- 0x00000200, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xd71f0000,
- 0x000100c1, 0xd7200000,
- 0x000200c1, 0x16000084,
- 0x857d9973, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa20013, 0xbe8300ff,
- 0x00000080, 0xbf800000,
- 0xbf800000, 0xbf800000,
- 0xd8d80000, 0x01000000,
- 0xbf8a0000, 0xc4068070,
- 0x008ce801, 0x00000000,
- 0x807d037d, 0x80700370,
- 0xd5250000, 0x0001ff00,
- 0x00000080, 0xbf0a7b7d,
- 0xbfa2fff3, 0xbfa00012,
- 0xbe8300ff, 0x00000100,
+ 0xbef600ff, 0x01000000,
+ 0xd71f0000, 0x000100c1,
+ 0xd7200000, 0x000200c1,
+ 0x16000084, 0x857d9973,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa20013,
+ 0xbe8300ff, 0x00000080,
0xbf800000, 0xbf800000,
0xbf800000, 0xd8d80000,
0x01000000, 0xbf8a0000,
0xc4068070, 0x008ce801,
0x00000000, 0x807d037d,
0x80700370, 0xd5250000,
- 0x0001ff00, 0x00000100,
+ 0x0001ff00, 0x00000080,
0xbf0a7b7d, 0xbfa2fff3,
- 0xbefe00c1, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa20004, 0xbef000ff,
- 0x00000200, 0xbeff0080,
- 0xbfa00003, 0xbef000ff,
- 0x00000400, 0xbeff00c1,
- 0xb8fb3b05, 0x807b817b,
- 0x847b827b, 0x857d9973,
- 0x8b7d817d, 0xbf06817d,
- 0xbfa2001b, 0xbef600ff,
- 0x01000000, 0xbefd0084,
- 0xbf0a7b7d, 0xbfa10040,
- 0x7e008700, 0x7e028701,
- 0x7e048702, 0x7e068703,
- 0xc4068070, 0x008ce800,
- 0x00000000, 0xc4068070,
- 0x008ce801, 0x00008000,
- 0xc4068070, 0x008ce802,
- 0x00010000, 0xc4068070,
- 0x008ce803, 0x00018000,
- 0x807d847d, 0x8070ff70,
- 0x00000200, 0xbf0a7b7d,
- 0xbfa2ffeb, 0xbfa0002a,
+ 0xbfa00012, 0xbe8300ff,
+ 0x00000100, 0xbf800000,
+ 0xbf800000, 0xbf800000,
+ 0xd8d80000, 0x01000000,
+ 0xbf8a0000, 0xc4068070,
+ 0x008ce801, 0x00000000,
+ 0x807d037d, 0x80700370,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a7b7d,
+ 0xbfa2fff3, 0xbefe00c1,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa20004,
+ 0xbef000ff, 0x00000200,
+ 0xbeff0080, 0xbfa00003,
+ 0xbef000ff, 0x00000400,
+ 0xbeff00c1, 0xb8fb3b05,
+ 0x807b817b, 0x847b827b,
+ 0x857d9973, 0x8b7d817d,
+ 0xbf06817d, 0xbfa2001b,
0xbef600ff, 0x01000000,
0xbefd0084, 0xbf0a7b7d,
- 0xbfa10015, 0x7e008700,
+ 0xbfa10040, 0x7e008700,
0x7e028701, 0x7e048702,
0x7e068703, 0xc4068070,
0x008ce800, 0x00000000,
0xc4068070, 0x008ce801,
- 0x00010000, 0xc4068070,
- 0x008ce802, 0x00020000,
+ 0x00008000, 0xc4068070,
+ 0x008ce802, 0x00010000,
0xc4068070, 0x008ce803,
- 0x00030000, 0x807d847d,
- 0x8070ff70, 0x00000400,
+ 0x00018000, 0x807d847d,
+ 0x8070ff70, 0x00000200,
0xbf0a7b7d, 0xbfa2ffeb,
- 0xb8fb1e06, 0x8b7bc17b,
- 0xbfa1000d, 0x847b837b,
- 0x807b7d7b, 0xbefe00c1,
- 0xbeff0080, 0x7e008700,
+ 0xbfa0002a, 0xbef600ff,
+ 0x01000000, 0xbefd0084,
+ 0xbf0a7b7d, 0xbfa10015,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xc4068070, 0x008ce800,
- 0x00000000, 0x807d817d,
- 0x8070ff70, 0x00000080,
- 0xbf0a7b7d, 0xbfa2fff7,
- 0xbfa0016e, 0xbef4007e,
- 0x8b75ff7f, 0x0000ffff,
- 0x8c75ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x10807fac, 0xbef1007f,
- 0xb8f20742, 0x84729972,
- 0x8b6eff7f, 0x04000000,
- 0xbfa1003b, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef4306,
- 0x8b6fc16f, 0xbfa10030,
- 0x846f896f, 0xbef6006f,
+ 0x00000000, 0xc4068070,
+ 0x008ce801, 0x00010000,
+ 0xc4068070, 0x008ce802,
+ 0x00020000, 0xc4068070,
+ 0x008ce803, 0x00030000,
+ 0x807d847d, 0x8070ff70,
+ 0x00000400, 0xbf0a7b7d,
+ 0xbfa2ffeb, 0xb8fb1e06,
+ 0x8b7bc17b, 0xbfa1000d,
+ 0x847b837b, 0x807b7d7b,
+ 0xbefe00c1, 0xbeff0080,
+ 0x7e008700, 0xc4068070,
+ 0x008ce800, 0x00000000,
+ 0x807d817d, 0x8070ff70,
+ 0x00000080, 0xbf0a7b7d,
+ 0xbfa2fff7, 0xbfa0016e,
+ 0xbef4007e, 0x8b75ff7f,
+ 0x0000ffff, 0x8c75ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x10807fac,
+ 0xbef1007f, 0xb8f20742,
+ 0x84729972, 0x8b6eff7f,
+ 0x04000000, 0xbfa1003b,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef4306, 0x8b6fc16f,
+ 0xbfa10030, 0x846f896f,
+ 0xbef6006f, 0xb8f83b05,
+ 0x80788178, 0xbf0d9972,
+ 0xbfa20002, 0x84788978,
+ 0xbfa00001, 0x84788a78,
+ 0xb8ee1e06, 0x846e8a6e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbefd0080, 0xbfa2000d,
+ 0xc4050078, 0x0080e800,
+ 0x00000000, 0xbf8a0000,
+ 0xdac00000, 0x00000000,
+ 0x807dff7d, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff4,
+ 0xbfa0000c, 0xc4050078,
+ 0x0080e800, 0x00000000,
+ 0xbf8a0000, 0xdac00000,
+ 0x00000000, 0x807dff7d,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7d,
+ 0xbfa2fff4, 0xbef80080,
+ 0xbefe00c1, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa20002, 0xbeff0080,
+ 0xbfa00001, 0xbeff00c1,
+ 0xb8ef3b05, 0x806f816f,
+ 0x846f826f, 0x857d9972,
+ 0x8b7d817d, 0xbf06817d,
+ 0xbfa2002c, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000200,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10061, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00008000, 0xc4050078,
+ 0x008ce802, 0x00010000,
+ 0xc4050078, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00008000, 0xc405006e,
+ 0x008ce802, 0x00010000,
+ 0xc405006e, 0x008ce803,
+ 0x00018000, 0xbf8a0000,
+ 0xbfa0003d, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefd0084, 0xbf0a6f7d,
+ 0xbfa10016, 0xc4050078,
+ 0x008ce800, 0x00000000,
+ 0xc4050078, 0x008ce801,
+ 0x00010000, 0xc4050078,
+ 0x008ce802, 0x00020000,
+ 0xc4050078, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807d847d, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7d,
+ 0xbfa2ffea, 0xb8ef1e06,
+ 0x8b6fc16f, 0xbfa1000f,
+ 0x846f836f, 0x806f7d6f,
+ 0xbefe00c1, 0xbeff0080,
+ 0xc4050078, 0x008ce800,
+ 0x00000000, 0xbf8a0000,
+ 0x7e008500, 0x807d817d,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7d, 0xbfa2fff6,
+ 0xbeff00c1, 0xc405006e,
+ 0x008ce800, 0x00000000,
+ 0xc405006e, 0x008ce801,
+ 0x00010000, 0xc405006e,
+ 0x008ce802, 0x00020000,
+ 0xc405006e, 0x008ce803,
+ 0x00030000, 0xbf8a0000,
0xb8f83b05, 0x80788178,
0xbf0d9972, 0xbfa20002,
0x84788978, 0xbfa00001,
0x84788a78, 0xb8ee1e06,
0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbefd0080,
- 0xbfa2000d, 0xc4050078,
- 0x0080e800, 0x00000000,
- 0xbf8a0000, 0xdac00000,
- 0x00000000, 0x807dff7d,
- 0x00000080, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff4, 0xbfa0000c,
- 0xc4050078, 0x0080e800,
- 0x00000000, 0xbf8a0000,
- 0xdac00000, 0x00000000,
- 0x807dff7d, 0x00000100,
- 0x8078ff78, 0x00000100,
- 0xbf0a6f7d, 0xbfa2fff4,
- 0xbef80080, 0xbefe00c1,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa20002,
- 0xbeff0080, 0xbfa00001,
- 0xbeff00c1, 0xb8ef3b05,
- 0x806f816f, 0x846f826f,
- 0x857d9972, 0x8b7d817d,
- 0xbf06817d, 0xbfa2002c,
+ 0x80f8ff78, 0x00000050,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000200, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10061,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00008000,
- 0xc4050078, 0x008ce802,
- 0x00010000, 0xc4050078,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
+ 0xbefd00ff, 0x0000006c,
+ 0x80f89078, 0xf462403a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd847d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0x80f8a078, 0xf462603a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd887d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0x80f8c078, 0xf462803a,
+ 0xf0000000, 0xbf8a0000,
+ 0x80fd907d, 0xbf800000,
+ 0xbe804300, 0xbe824302,
+ 0xbe844304, 0xbe864306,
+ 0xbe884308, 0xbe8a430a,
+ 0xbe8c430c, 0xbe8e430e,
+ 0xbf06807d, 0xbfa1fff0,
+ 0xb980f801, 0x00000000,
+ 0xb8f83b05, 0x80788178,
+ 0xbf0d9972, 0xbfa20002,
+ 0x84788978, 0xbfa00001,
+ 0x84788a78, 0xb8ee1e06,
+ 0x846e8a6e, 0x80786e78,
0x8078ff78, 0x00000200,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00008000,
- 0xc405006e, 0x008ce802,
- 0x00010000, 0xc405006e,
- 0x008ce803, 0x00018000,
- 0xbf8a0000, 0xbfa0003d,
0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefd0084,
- 0xbf0a6f7d, 0xbfa10016,
- 0xc4050078, 0x008ce800,
- 0x00000000, 0xc4050078,
- 0x008ce801, 0x00010000,
- 0xc4050078, 0x008ce802,
- 0x00020000, 0xc4050078,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0x7e008500,
- 0x7e028501, 0x7e048502,
- 0x7e068503, 0x807d847d,
- 0x8078ff78, 0x00000400,
- 0xbf0a6f7d, 0xbfa2ffea,
- 0xb8ef1e06, 0x8b6fc16f,
- 0xbfa1000f, 0x846f836f,
- 0x806f7d6f, 0xbefe00c1,
- 0xbeff0080, 0xc4050078,
- 0x008ce800, 0x00000000,
- 0xbf8a0000, 0x7e008500,
- 0x807d817d, 0x8078ff78,
- 0x00000080, 0xbf0a6f7d,
- 0xbfa2fff6, 0xbeff00c1,
- 0xc405006e, 0x008ce800,
- 0x00000000, 0xc405006e,
- 0x008ce801, 0x00010000,
- 0xc405006e, 0x008ce802,
- 0x00020000, 0xc405006e,
- 0x008ce803, 0x00030000,
- 0xbf8a0000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0x80f8ff78,
- 0x00000050, 0xbef600ff,
- 0x01000000, 0xbefd00ff,
- 0x0000006c, 0x80f89078,
- 0xf462403a, 0xf0000000,
- 0xbf8a0000, 0x80fd847d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0x80f8a078,
- 0xf462603a, 0xf0000000,
- 0xbf8a0000, 0x80fd887d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0x80f8c078,
- 0xf462803a, 0xf0000000,
- 0xbf8a0000, 0x80fd907d,
- 0xbf800000, 0xbe804300,
- 0xbe824302, 0xbe844304,
- 0xbe864306, 0xbe884308,
- 0xbe8a430a, 0xbe8c430c,
- 0xbe8e430e, 0xbf06807d,
- 0xbfa1fff0, 0xb980f801,
- 0x00000000, 0xb8f83b05,
- 0x80788178, 0xbf0d9972,
- 0xbfa20002, 0x84788978,
- 0xbfa00001, 0x84788a78,
- 0xb8ee1e06, 0x846e8a6e,
- 0x80786e78, 0x8078ff78,
- 0x00000200, 0xbef600ff,
- 0x01000000, 0xbeff0071,
- 0xf4621bfa, 0xf0000000,
- 0x80788478, 0xf4621b3a,
+ 0xbeff0071, 0xf4621bfa,
0xf0000000, 0x80788478,
- 0xf4621b7a, 0xf0000000,
- 0x80788478, 0xf4621c3a,
+ 0xf4621b3a, 0xf0000000,
+ 0x80788478, 0xf4621b7a,
0xf0000000, 0x80788478,
- 0xf4621c7a, 0xf0000000,
- 0x80788478, 0xf4621eba,
+ 0xf4621c3a, 0xf0000000,
+ 0x80788478, 0xf4621c7a,
0xf0000000, 0x80788478,
- 0xf4621efa, 0xf0000000,
- 0x80788478, 0xf4621e7a,
+ 0xf4621eba, 0xf0000000,
+ 0x80788478, 0xf4621efa,
0xf0000000, 0x80788478,
- 0xf4621cfa, 0xf0000000,
- 0x80788478, 0xf4621bba,
+ 0xf4621e7a, 0xf0000000,
+ 0x80788478, 0xf4621cfa,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef814,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef815, 0xf4621bba,
+ 0xb96ef814, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xb96ef812,
+ 0xbf8a0000, 0xb96ef815,
0xf4621bba, 0xf0000000,
0x80788478, 0xbf8a0000,
- 0xb96ef813, 0x8b6eff7f,
- 0x04000000, 0xbfa1000d,
- 0x80788478, 0xf4621bba,
+ 0xb96ef812, 0xf4621bba,
0xf0000000, 0x80788478,
- 0xbf8a0000, 0xbf0d806e,
- 0xbfa10006, 0x856e906e,
- 0x8b6e6e6e, 0xbfa10003,
- 0xbe804ec1, 0x816ec16e,
- 0xbfa0fffb, 0xbefd006f,
- 0xbefe0070, 0xbeff0071,
- 0xb97b2011, 0x857b867b,
- 0xb97b0191, 0x857b827b,
- 0xb97bba11, 0xb973f801,
- 0xb8ee3b05, 0x806e816e,
- 0xbf0d9972, 0xbfa20002,
- 0x846e896e, 0xbfa00001,
- 0x846e8a6e, 0xb8ef1e06,
- 0x846f8a6f, 0x806e6f6e,
- 0x806eff6e, 0x00000200,
- 0x806e746e, 0x826f8075,
- 0x8b6fff6f, 0x0000ffff,
- 0xf4605c37, 0xf8000050,
- 0xf4605d37, 0xf8000060,
- 0xf4601e77, 0xf8000074,
- 0xbf8a0000, 0x8b6dff6d,
- 0x0000ffff, 0x8bfe7e7e,
- 0x8bea6a6a, 0xb97af804,
+ 0xbf8a0000, 0xb96ef813,
+ 0x8b6eff7f, 0x04000000,
+ 0xbfa1000d, 0x80788478,
+ 0xf4621bba, 0xf0000000,
+ 0x80788478, 0xbf8a0000,
+ 0xbf0d806e, 0xbfa10006,
+ 0x856e906e, 0x8b6e6e6e,
+ 0xbfa10003, 0xbe804ec1,
+ 0x816ec16e, 0xbfa0fffb,
+ 0xbefd006f, 0xbefe0070,
+ 0xbeff0071, 0xb97b2011,
+ 0x857b867b, 0xb97b0191,
+ 0x857b827b, 0xb97bba11,
+ 0xb973f801, 0xb8ee3b05,
+ 0x806e816e, 0xbf0d9972,
+ 0xbfa20002, 0x846e896e,
+ 0xbfa00001, 0x846e8a6e,
+ 0xb8ef1e06, 0x846f8a6f,
+ 0x806e6f6e, 0x806eff6e,
+ 0x00000200, 0x806e746e,
+ 0x826f8075, 0x8b6fff6f,
+ 0x0000ffff, 0xf4605c37,
+ 0xf8000050, 0xf4605d37,
+ 0xf8000060, 0xf4601e77,
+ 0xf8000074, 0xbf8a0000,
+ 0x8b6dff6d, 0x0000ffff,
+ 0x8bfe7e7e, 0x8bea6a6a,
+ 0xb97af804, 0xbe804ec2,
+ 0xbf94fffe, 0xbe804a6c,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbe804ec2,
- 0xbf94fffe, 0xbfb10000,
+ 0xbfb10000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
- 0xbf9f0000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_5_0_hex[] = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
index 7b9d36e5fa43..5a1a1b1f897f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -30,6 +30,7 @@
#define CHIP_GFX12 37
#define SINGLE_STEP_MISSED_WORKAROUND 1 //workaround for lost TRAP_AFTER_INST exception when SAVECTX raised
+#define HAVE_VALU_SGPR_HAZARD (ASIC_FAMILY == CHIP_GFX12)
var SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK = 0x4
var SQ_WAVE_STATE_PRIV_SCC_SHIFT = 9
@@ -351,6 +352,7 @@ L_HAVE_VGPRS:
v_writelane_b32 v0, ttmp13, 0xD
v_writelane_b32 v0, exec_lo, 0xE
v_writelane_b32 v0, exec_hi, 0xF
+ valu_sgpr_hazard()
s_mov_b32 exec_lo, 0x3FFF
s_mov_b32 exec_hi, 0x0
@@ -417,7 +419,6 @@ L_SAVE_HWREG:
v_mov_b32 v0, 0x0 //Offset[31:0] from buffer resource
v_mov_b32 v1, 0x0 //Offset[63:32] from buffer resource
v_mov_b32 v2, 0x0 //Set of SGPRs for TCP store
- s_mov_b32 m0, 0x0 //Next lane of v2 to write to
// Ensure no further changes to barrier or LDS state.
// STATE_PRIV.BARRIER_COMPLETE may change up to this point.
@@ -430,40 +431,41 @@ L_SAVE_HWREG:
s_andn2_b32 s_save_state_priv, s_save_state_priv, SQ_WAVE_STATE_PRIV_BARRIER_COMPLETE_MASK
s_or_b32 s_save_state_priv, s_save_state_priv, s_save_tmp
- write_hwreg_to_v2(s_save_m0)
- write_hwreg_to_v2(s_save_pc_lo)
s_andn2_b32 s_save_tmp, s_save_pc_hi, S_SAVE_PC_HI_FIRST_WAVE_MASK
- write_hwreg_to_v2(s_save_tmp)
- write_hwreg_to_v2(s_save_exec_lo)
- write_hwreg_to_v2(s_save_exec_hi)
- write_hwreg_to_v2(s_save_state_priv)
+ v_writelane_b32 v2, s_save_m0, 0x0
+ v_writelane_b32 v2, s_save_pc_lo, 0x1
+ v_writelane_b32 v2, s_save_tmp, 0x2
+ v_writelane_b32 v2, s_save_exec_lo, 0x3
+ v_writelane_b32 v2, s_save_exec_hi, 0x4
+ v_writelane_b32 v2, s_save_state_priv, 0x5
+ v_writelane_b32 v2, s_save_xnack_mask, 0x7
+ valu_sgpr_hazard()
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_PRIV)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0x6
- write_hwreg_to_v2(s_save_xnack_mask)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_MODE)
+ v_writelane_b32 v2, s_save_tmp, 0x8
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_MODE)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
+ v_writelane_b32 v2, s_save_tmp, 0x9
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_LO)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
+ v_writelane_b32 v2, s_save_tmp, 0xA
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_SCRATCH_BASE_HI)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
+ v_writelane_b32 v2, s_save_tmp, 0xB
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_EXCP_FLAG_USER)
- write_hwreg_to_v2(s_save_m0)
-
- s_getreg_b32 s_save_m0, hwreg(HW_REG_WAVE_TRAP_CTRL)
- write_hwreg_to_v2(s_save_m0)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_TRAP_CTRL)
+ v_writelane_b32 v2, s_save_tmp, 0xC
s_getreg_b32 s_save_tmp, hwreg(HW_REG_WAVE_STATUS)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xD
s_get_barrier_state s_save_tmp, -1
s_wait_kmcnt (0)
- write_hwreg_to_v2(s_save_tmp)
+ v_writelane_b32 v2, s_save_tmp, 0xE
+ valu_sgpr_hazard()
// Write HWREGs with 16 VGPR lanes. TTMPs occupy space after this.
s_mov_b32 exec_lo, 0xFFFF
@@ -497,10 +499,12 @@ L_SAVE_SGPR_LOOP:
s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
- write_16sgpr_to_v2(s0)
-
- s_cmp_eq_u32 ttmp13, 0x20 //have 32 VGPR lanes filled?
- s_cbranch_scc0 L_SAVE_SGPR_SKIP_TCP_STORE
+ s_cmp_eq_u32 ttmp13, 0x0
+ s_cbranch_scc0 L_WRITE_V2_SECOND_HALF
+ write_16sgpr_to_v2(s0, 0x0)
+ s_branch L_SAVE_SGPR_SKIP_TCP_STORE
+L_WRITE_V2_SECOND_HALF:
+ write_16sgpr_to_v2(s0, 0x10)
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset scope:SCOPE_SYS
s_add_u32 s_save_mem_offset, s_save_mem_offset, 0x80
@@ -1056,27 +1060,21 @@ L_END_PGM:
s_endpgm_saved
end
-function write_hwreg_to_v2(s)
- // Copy into VGPR for later TCP store.
- v_writelane_b32 v2, s, m0
- s_add_u32 m0, m0, 0x1
-end
-
-
-function write_16sgpr_to_v2(s)
+function write_16sgpr_to_v2(s, lane_offset)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 16; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx + lane_offset
end
+ valu_sgpr_hazard()
+ s_add_u32 ttmp13, ttmp13, 0x10
end
function write_12sgpr_to_v2(s)
// Copy into VGPR for later TCP store.
for var sgpr_idx = 0; sgpr_idx < 12; sgpr_idx ++
- v_writelane_b32 v2, s[sgpr_idx], ttmp13
- s_add_u32 ttmp13, ttmp13, 0x1
+ v_writelane_b32 v2, s[sgpr_idx], sgpr_idx
end
+ valu_sgpr_hazard()
end
function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
@@ -1128,3 +1126,11 @@ function get_wave_size2(s_reg)
s_getreg_b32 s_reg, hwreg(HW_REG_WAVE_STATUS,SQ_WAVE_STATUS_WAVE64_SHIFT,SQ_WAVE_STATUS_WAVE64_SIZE)
s_lshl_b32 s_reg, s_reg, S_WAVE_SIZE
end
+
+function valu_sgpr_hazard
+#if HAVE_VALU_SGPR_HAZARD
+ for var rep = 0; rep < 8; rep ++
+ ds_nop
+ end
+#endif
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index d4593374e7a1..34c2c42c0f95 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1230,11 +1230,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- retval = remove_queue_mes(dqm, q, qpd);
- if (retval) {
+ int err;
+
+ err = remove_queue_mes(dqm, q, qpd);
+ if (err) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- goto out;
+ retval = err;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ecccd7adbab4..4afff7094caf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -233,6 +233,7 @@ void kfd_queue_buffer_put(struct amdgpu_bo **bo)
int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties)
{
struct kfd_topology_device *topo_dev;
+ u64 expected_queue_size;
struct amdgpu_vm *vm;
u32 total_cwsr_size;
int err;
@@ -241,6 +242,15 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
if (!topo_dev)
return -EINVAL;
+ /* AQL queues on GFX7 and GFX8 appear twice their actual size */
+ if (properties->type == KFD_QUEUE_TYPE_COMPUTE &&
+ properties->format == KFD_QUEUE_FORMAT_AQL &&
+ topo_dev->node_props.gfx_target_version >= 70000 &&
+ topo_dev->node_props.gfx_target_version < 90000)
+ expected_queue_size = properties->queue_size / 2;
+ else
+ expected_queue_size = properties->queue_size;
+
vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(vm->root.bo, false);
if (err)
@@ -255,7 +265,7 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
- &properties->ring_bo, properties->queue_size);
+ &properties->ring_bo, expected_queue_size);
if (err)
goto out_err_unreserve;
@@ -266,8 +276,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
- properties->eop_buf_bo->tbo.base.size,
+ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
+ properties->eop_ring_buffer_size,
topo_dev->node_props.eop_buffer_size);
err = -EINVAL;
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index bd3e20d981e0..9477a4adcd36 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1286,13 +1286,7 @@ svm_range_get_pte_flags(struct kfd_node *node,
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
- if (domain == SVM_RANGE_VRAM_DOMAIN) {
- if (bo_node != node)
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- } else {
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
- }
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
break;
default:
mapping_flags |= coherent ?
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 9d9645a2d18e..39df45f652b3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -245,6 +245,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static void handle_hpd_rx_irq(void *param);
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -1741,7 +1745,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_
}
if (quirk_entries.support_edp0_on_dp1) {
init_data->flags.support_edp0_on_dp1 = true;
- drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "support_edp0_on_dp1 attached\n");
}
}
@@ -3371,8 +3375,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
mutex_unlock(&dm->dc_lock);
+ /* set the backlight after a reset */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i])
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
return 0;
}
+
+ /* leave display off for S4 sequence */
+ if (adev->in_s4)
+ return 0;
+
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_state_release(dm_state->context);
dm_state->context = dc_state_create(dm->dc, NULL);
@@ -4906,6 +4921,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
dm->backlight_dev[aconnector->bl_idx] =
backlight_device_register(bl_name, aconnector->base.kdev, dm,
&amdgpu_dm_backlight_ops, &props);
+ dm->brightness[aconnector->bl_idx] = props.brightness;
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
DRM_ERROR("DM: Backlight registration failed!\n");
@@ -4973,7 +4989,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
aconnector->bl_idx = bl_idx;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
- dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
dm->backlight_link[bl_idx] = link;
dm->num_of_edps++;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index e339c7a8d541..c0dc23244049 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
for (i = 0; i < hdcp_work->max_link; i++) {
cancel_delayed_work_sync(&hdcp_work[i].callback_dwork);
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
+ cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork);
}
sysfs_remove_bin_file(kobj, &hdcp_work[0].attr);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index c4a7fd453e5f..a215234151ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,8 +894,16 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int irq_type;
int i;
+ /* First, clear all hpd and hpdrx interrupts */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
+ if (!dc_interrupt_set(adev->dm.dc, i, false))
+ drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
+ i);
+ }
+
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_dm_connector *amdgpu_dm_connector;
@@ -908,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
+ /*
+ * Get a base driver irq reference for hpd ints for the lifetime
+ * of dm. Note that only hpd interrupt types are registered with
+ * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
+ * hpd_rx isn't available. DM currently controls hpd_rx
+ * explicitly with dc_interrupt_set()
+ */
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- true);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+ /*
+ * TODO: There's a mismatch between mode_info.num_hpd
+ * and what bios reports as the # of connectors with hpd
+ * sources. Since the # of hpd source types registered
+ * with base driver == mode_info.num_hpd, we have to
+ * fallback to dc_interrupt_set for the remaining types.
+ */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -921,12 +950,6 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
- }
}
/**
@@ -942,7 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
- int i;
+ int irq_type;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -956,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
dc_link = amdgpu_dm_connector->dc_link;
if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
- dc_interrupt_set(adev->dm.dc,
- dc_link->irq_source_hpd,
- false);
+ irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
+
+ /* TODO: See same TODO in amdgpu_dm_hpd_init() */
+ if (irq_type < adev->mode_info.num_hpd) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
+ dc_link->irq_source_hpd);
+ } else {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ false);
+ }
}
if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
@@ -968,10 +1000,4 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
-
- /* Update reference counts for HPDs */
- for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
- if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
- drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 774cc3f4f3fd..92472109f84a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
if (!dcc->enable)
return 0;
- if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
- !dc->cap_funcs.get_dcc_compression_cap)
+ if (adev->family < AMDGPU_FAMILY_GC_12_0_0 &&
+ format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return -EINVAL;
+
+ if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 520a34a42827..298668e9729c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
- if (!plane_state->dst_rect.width ||
+ if (!plane_state ||
+ !plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
@@ -3388,10 +3389,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
break;
case COLOR_DEPTH_121212:
normalized_pix_clk = (pix_clk * 36) / 24;
- break;
+ break;
+ case COLOR_DEPTH_141414:
+ normalized_pix_clk = (pix_clk * 42) / 24;
+ break;
case COLOR_DEPTH_161616:
normalized_pix_clk = (pix_clk * 48) / 24;
- break;
+ break;
default:
ASSERT(0);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index bf636b28e3e1..6e2fce329d73 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -69,5 +69,16 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
+ /* only use HW lock for PSR1 on single eDP */
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(link->dc, edp_links, &edp_num);
+
+ if (edp_num == 1)
+ return true;
+ }
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
index e5fb0e8333e4..e691a1cf3356 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c
@@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = {
dce60_timing_generator_enable_advanced_request,
.configure_crc = dce60_configure_crc,
.get_crc = dce110_get_crc,
+ .is_two_pixels_per_container = dce110_is_two_pixels_per_container,
};
void dce60_timing_generator_construct(
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 15ea216e903d..6157886f4802 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -704,7 +704,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
cw6.region.base = DMUB_CW6_BASE;
cw6.region.top = cw6.region.base + fw_state_fb->size;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->fw_state = (void *)((uintptr_t)(fw_state_fb->cpu_addr) + DMUB_DEBUG_FW_STATE_OFFSET);
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e8ae7681bf0a..77b1f061bbf0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2421,6 +2421,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
*states = ATTR_STATE_SUPPORTED;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 9b2f4fe1578b..ddb6444406d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1895,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
NULL);
}
-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
-{
- int ret = 0;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
- ret = smu_v14_0_allow_ih_interrupt(smu);
-
- return ret;
-}
-
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
@@ -1916,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
if (ret)
return ret;
- return smu_v14_0_process_pending_interrupt(smu);
+ return smu_v14_0_allow_ih_interrupt(smu);
}
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 5cad09c5f2ff..3f1fcf8c4ee8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1193,16 +1193,9 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
PP_OD_FEATURE_GFXCLK_BIT))
break;
- PPTable_t *pptable = smu->smu_table.driver_pptable;
- const OverDriveLimits_t * const overdrive_upperlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMax;
- const OverDriveLimits_t * const overdrive_lowerlimits =
- &pptable->SkuTable.OverDriveLimitsBasicMin;
-
size += sysfs_emit_at(buf, size, "OD_SCLK_OFFSET:\n");
- size += sysfs_emit_at(buf, size, "0: %dMhz\n1: %uMhz\n",
- overdrive_lowerlimits->GfxclkFoffset,
- overdrive_upperlimits->GfxclkFoffset);
+ size += sysfs_emit_at(buf, size, "%dMhz\n",
+ od_table->OverDriveTable.GfxclkFoffset);
break;
case SMU_OD_MCLK:
@@ -1337,12 +1330,8 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMIN,
- &min_value,
- NULL);
- smu_v14_0_2_get_od_setting_limits(smu,
PP_OD_FEATURE_GFXCLK_FMAX,
- NULL,
+ &min_value,
&max_value);
size += sysfs_emit_at(buf, size, "SCLK_OFFSET: %7dMhz %10uMhz\n",
min_value, max_value);
@@ -1627,6 +1616,39 @@ out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
}
+static int smu_v14_0_2_get_fan_speed_pwm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ if (!speed)
+ return -EINVAL;
+
+ ret = smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANPWM,
+ speed);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
+ return ret;
+ }
+
+ /* Convert the PMFW output which is in percent to pwm(255) based */
+ *speed = min(*speed * 255 / 100, (uint32_t)255);
+
+ return 0;
+}
+
+static int smu_v14_0_2_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ if (!speed)
+ return -EINVAL;
+
+ return smu_v14_0_2_get_smu_metrics_data(smu,
+ METRICS_CURR_FANSPEED,
+ speed);
+}
+
static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2417,36 +2439,24 @@ static int smu_v14_0_2_od_edit_dpm_table(struct smu_context *smu,
return -ENOTSUPP;
}
- for (i = 0; i < size; i += 2) {
- if (i + 2 > size) {
- dev_info(adev->dev, "invalid number of input parameters %d\n", size);
- return -EINVAL;
- }
-
- switch (input[i]) {
- case 1:
- smu_v14_0_2_get_od_setting_limits(smu,
- PP_OD_FEATURE_GFXCLK_FMAX,
- &minimum,
- &maximum);
- if (input[i + 1] < minimum ||
- input[i + 1] > maximum) {
- dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
- input[i + 1], minimum, maximum);
- return -EINVAL;
- }
-
- od_table->OverDriveTable.GfxclkFoffset = input[i + 1];
- od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
- break;
+ if (size != 1) {
+ dev_info(adev->dev, "invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
- default:
- dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
- dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
- return -EINVAL;
- }
+ smu_v14_0_2_get_od_setting_limits(smu,
+ PP_OD_FEATURE_GFXCLK_FMAX,
+ &minimum,
+ &maximum);
+ if (input[0] < minimum ||
+ input[0] > maximum) {
+ dev_info(adev->dev, "GfxclkFoffset must be within [%d, %u]!\n",
+ minimum, maximum);
+ return -EINVAL;
}
+ od_table->OverDriveTable.GfxclkFoffset = input[0];
+ od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
@@ -2804,6 +2814,8 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.set_performance_level = smu_v14_0_set_performance_level,
.gfx_off_control = smu_v14_0_gfx_off_control,
.get_unique_id = smu_v14_0_2_get_unique_id,
+ .get_fan_speed_pwm = smu_v14_0_2_get_fan_speed_pwm,
+ .get_fan_speed_rpm = smu_v14_0_2_get_fan_speed_rpm,
.get_power_limit = smu_v14_0_2_get_power_limit,
.set_power_limit = smu_v14_0_2_set_power_limit,
.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index 379850c83e51..d239f1e3c456 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -323,7 +323,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l
{
struct drm_log *dlog = client_to_drm_log(client);
- console_stop(&dlog->con);
+ console_suspend(&dlog->con);
return 0;
}
@@ -332,7 +332,7 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo
{
struct drm_log *dlog = client_to_drm_log(client);
- console_start(&dlog->con);
+ console_resume(&dlog->con);
return 0;
}
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 06c91c5b7f7c..6d09bef671da 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -4025,6 +4025,22 @@ out:
return 0;
}
+static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
+{
+ bool probing_done = false;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
+ probing_done = mgr->mst_primary->link_address_sent;
+ drm_dp_mst_topology_put_mstb(mgr->mst_primary);
+ }
+
+ mutex_unlock(&mgr->lock);
+
+ return probing_done;
+}
+
static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_pending_up_req *up_req)
@@ -4055,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
- dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
- hotplug = true;
+ if (!primary_mstb_probing_is_done(mgr)) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
+ } else {
+ dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
+ hotplug = true;
+ }
}
drm_dp_mst_topology_put_mstb(mstb);
@@ -4138,10 +4158,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
+ drm_dp_mst_topology_put_mstb(mst_primary);
+
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4150,16 +4171,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
-
- mutex_lock(&mgr->probe_lock);
- handle_csn = mst_primary->link_address_sent;
- mutex_unlock(&mgr->probe_lock);
-
- if (!handle_csn) {
- drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
- kfree(up_req);
- goto out_put_primary;
- }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
@@ -4174,9 +4185,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-
-out_put_primary:
- drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 370dc676e3aa..fd36b8fd54e9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
+
+ if (connector->dpms == mode)
+ goto out;
+
connector->dpms = mode;
crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5f24d6b41cc6..48b08c9611a7 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -1427,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name);
* callback. For atomic drivers the remapping to the "ACTIVE" property is
* implemented in the DRM core.
*
+ * On atomic drivers any DPMS setproperty ioctl where the value does not
+ * change is completely skipped, otherwise a full atomic commit will occur.
+ * On legacy drivers the exact behavior is driver specific.
+ *
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
*
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index cb2ad12bce57..385eb5e10047 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -5,6 +5,8 @@
*/
#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index bcf248f69252..6903e2010cb9 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -545,7 +545,7 @@ impl EncodedMsg<'_> {
}
self.push(&mut offset, (MODE_STOP, 4));
- let pad_offset = (offset + 7) / 8;
+ let pad_offset = offset.div_ceil(8);
for i in pad_offset..self.version.max_data() {
self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)];
}
@@ -659,7 +659,7 @@ struct QrImage<'a> {
impl QrImage<'_> {
fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> {
let width = em.version.width();
- let stride = (width + 7) / 8;
+ let stride = width.div_ceil(8);
let data = qrdata;
let mut qr_image = QrImage {
@@ -911,16 +911,16 @@ impl QrImage<'_> {
///
/// * `url`: The base URL of the QR code. It will be encoded as Binary segment.
/// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it
-/// will be encoded as binary segment, otherwise it will be encoded
-/// efficiently as a numeric segment, and appended to the URL.
+/// will be encoded as binary segment, otherwise it will be encoded
+/// efficiently as a numeric segment, and appended to the URL.
/// * `data_len`: Length of the data, that needs to be encoded, must be less
-/// than data_size.
+/// than data_size.
/// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold
-/// a V40 QR code. It will then be overwritten with the QR code image.
+/// a V40 QR code. It will then be overwritten with the QR code image.
/// * `tmp`: A temporary buffer that the QR code encoder will use, to write the
-/// segments and ECC.
+/// segments and ECC.
/// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes
-/// long for V40.
+/// long for V40.
///
/// # Safety
///
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 7e76790c6a81..cba97d7db131 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
0, PCI_DEVFN(2, 0));
int ret = -1;
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
/* Get the address of the platform config vbt */
pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
pci_dev_put(pci_gfx_root);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index f59abfa7622a..0d49f168a919 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
return 0;
err_free_mmio:
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
@@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
+ iounmap(hv->vram);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 41128469f12a..c9dcf2bbd4c7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7830,9 +7830,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_program_dpkgc_latency(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
-
intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -7906,6 +7903,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
+ if (state->modeset)
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index a65cf97ad12d..86d6185fda50 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1867,7 +1867,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
mst_stream_encoders_create(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ &intel_dp->aux, 16,
+ INTEL_NUM_PIPES(display), conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 21274aa9bddd..c3dabb857960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
+ * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple
+ * times with different size and offset).
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 4;
+ return 5;
}
static inline struct i915_gtt_view
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index b3cbf85c00cb..00d00c480cc5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -231,8 +231,8 @@ static void delayed_huc_load_init(struct intel_huc *huc)
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
- hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+ hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static void delayed_huc_load_fini(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 95570cabdf27..f668cd9487f1 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -581,8 +581,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
vgpu->display.port_num = port_num;
/* Init hrtimer based on default refresh rate */
- hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- vblank_timer->timer.function = vblank_timer_fn;
+ hrtimer_setup(&vblank_timer->timer, vblank_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
vblank_timer->vrefresh_k = port->vrefresh_k;
vblank_timer->period = DIV64_U64_ROUND_CLOSEST(NSEC_PER_SEC * MSEC_PER_SEC, vblank_timer->vrefresh_k);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c077fb4674f0..9f97f743aa71 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -286,8 +286,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
return -ENOMEM;
INIT_LIST_HEAD(&data->lru_runq_head);
- hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- data->timer.function = tbs_timer_fn;
+ hrtimer_setup(&data->timer, tbs_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5384d1bb4923..279e266b4b06 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3359,9 +3359,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
"opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- hrtimer_init(&stream->poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
mutex_init(&stream->lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e55db036be1b..0ce87f188d11 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -1264,8 +1264,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
int ret = -ENOMEM;
spin_lock_init(&pmu->lock);
- hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pmu->timer.function = i915_sample;
+ hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->cpuhp.cpu = -1;
init_rc6(pmu);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8f62cfa23fb7..ea0b8e7e4828 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -293,8 +293,7 @@ static void __rq_init_watchdog(struct i915_request *rq)
{
struct i915_request_watchdog *wdg = &rq->watchdog;
- hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wdg->timer.function = __rq_watchdog_expired;
+ hrtimer_setup(&wdg->timer, __rq_watchdog_expired, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void __rq_arm_watchdog(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index eed4937c3ff3..bdcfcae83b52 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2103,8 +2103,7 @@ static int __fw_domain_init(struct intel_uncore *uncore,
d->mask = BIT(domain_id);
- hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- d->timer.function = intel_uncore_fw_release_timer;
+ hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
uncore->fw_domains |= BIT(domain_id);
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index c39beb70c317..6d13864851fc 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
static void
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
{
- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
- fw_obj->fw_mm_node.size);
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
}
static bool
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 73707daa4e52..5dbb636d7d4f 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
if (sf_id == ROGUE_FW_SF_LAST)
return -EINVAL;
- timestamp = read_fw_trace(trace_seq_data, 1) |
- ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
+ read_fw_trace(trace_seq_data, 2);
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index c4f08432882b..43411be930a2 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
return PVR_DRIVER_NAME;
}
+static void pvr_queue_fence_release_work(struct work_struct *w)
+{
+ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(&fence->base);
+}
+
static void pvr_queue_fence_release(struct dma_fence *f)
{
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
- pvr_context_put(fence->queue->ctx);
- dma_fence_free(f);
+ queue_work(pvr_dev->sched_wq, &fence->release_work);
}
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
pvr_context_get(queue->ctx);
fence->queue = queue;
+ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
dma_fence_init(&fence->base, fence_ops,
&fence_ctx->lock, fence_ctx->id,
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
static void
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
- &queue->job_fence_ctx);
+ if (!fence->ops)
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index e06ced69302f..93fe9ac9f58c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -5,6 +5,7 @@
#define PVR_QUEUE_H
#include <drm/gpu_scheduler.h>
+#include <linux/workqueue.h>
#include "pvr_cccb.h"
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
/** @queue: Queue that created this fence. */
struct pvr_queue *queue;
+
+ /** @release_work: Fence release work structure. */
+ struct work_struct release_work;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 363f885a7098..2896fa7501b1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -293,8 +293,9 @@ err_bind_op_fini:
static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
- struct pvr_vm_context *vm_ctx, u64 device_addr,
- u64 size)
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
int err;
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
goto err_bind_op_fini;
}
+ bind_op->pvr_obj = pvr_obj;
bind_op->vm_ctx = vm_ctx;
bind_op->device_addr = device_addr;
bind_op->size = size;
@@ -598,20 +600,6 @@ err_free:
}
/**
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
- * @vm_ctx: Target VM context.
- *
- * This function ensures that no mappings are left dangling by unmapping them
- * all in order of ascending device-virtual address.
- */
-void
-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
-{
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
-}
-
-/**
* pvr_vm_context_release() - Teardown a VM context.
* @ref_count: Pointer to reference counter of the VM context.
*
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- return 0;
-
- /* Acquire lock on the GEM being mapped. */
+ /* Acquire lock on the GEM object being mapped/unmapped. */
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
}
@@ -772,8 +756,10 @@ err_cleanup:
}
/**
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
+ * memory.
* @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
* @device_addr: Virtual device address at the start of the target mapping.
* @size: Size of the target mapping.
*
@@ -784,9 +770,13 @@ err_cleanup:
* * Any error encountered while performing internal operations required to
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
* pvr_vm_gpuva_remap).
+ *
+ * The vm_ctx->lock must be held when calling this function.
*/
-int
-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+static int
+pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
},
};
- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
- size);
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
+ device_addr, size);
if (err)
return err;
+ pvr_gem_object_get(pvr_obj);
+
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
@@ -818,6 +810,96 @@ err_cleanup:
return err;
}
+/**
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
+ * memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
+{
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by drm_gpuva_find,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
+ if (va) {
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range);
+ } else {
+ err = -ENOENT;
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ mutex_lock(&vm_ctx->lock);
+
+ for (;;) {
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
+ vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range);
+ if (!va)
+ break;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+
+ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range));
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+}
+
/* Static data areas are determined by firmware. */
static const struct drm_pvr_static_data_area static_data_areas[] = {
{
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index 79406243617c..b0528dffa7f1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
+int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 1a5d4f1c8b42..d41e5a6bbee0 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -65,8 +65,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
- hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fctx->deadline_timer.function = deadline_timer;
+ hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
kthread_init_work(&fctx->deadline_work, deadline_work);
diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c
index afedd61c3e28..a6efe1eac271 100644
--- a/drivers/gpu/drm/msm/msm_io_utils.c
+++ b/drivers/gpu/drm/msm/msm_io_utils.c
@@ -135,8 +135,7 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode)
{
- hrtimer_init(&work->timer, clock_id, mode);
- work->timer.function = msm_hrtimer_worktimer;
+ hrtimer_setup(&work->timer, msm_hrtimer_worktimer, clock_id, mode);
work->worker = worker;
kthread_init_work(&work->work, fn);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ce840300578d..1050a4617fc1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 05c13102a8cb..d22889fbfa9c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r300_gpu_init(struct radeon_device *rdev)
+/* rs400_gpu_init also calls this! */
+void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 1e00f6b99f94..8f5e07834fcc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
*/
extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
+extern void r300_gpu_init(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index d1871af967d4..2355a78e1b69 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
bool destroyed = false, created = false, allocated = false;
- uint32_t tmp, handle = 0;
+ uint32_t tmp = 0, handle = 0;
uint32_t *size = &tmp;
int i, r = 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d6c18fd740ec..13cd0a688a65 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
static void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: is this correct ? */
- r420_pipes_init(rdev);
+ /* Earlier code was calling r420_pipes_init and then
+ * rs400_mc_wait_for_idle(rdev). The problem is that
+ * at least on my Mobility Radeon Xpress 200M RC410 card
+ * that ends up in this code path ends up num_gb_pipes == 3
+ * while the card seems to have only one pipe. With the
+ * r420 pipe initialization method.
+ *
+ * Problems shown up as HyperZ glitches, see:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110897
+ *
+ * Delegating initialization to r300 code seems to work
+ * and results in proper pipe numbers. The rs400 cards
+ * are said to be not r400, but r300 kind of cards.
+ */
+ r300_gpu_init(rdev);
+
if (rs400_mc_wait_for_idle(rdev)) {
pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
RREG32(RADEON_MC_STATUS));
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c75302ca3427..f56e77e7f6d0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
#include <linux/stringify.h>
@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
__entry->seqno)
);
-#endif
+#endif /* _GPU_SCHED_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 69bcf0e99d57..da00572d7d42 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
dma_fence_get(&s_fence->finished);
- if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
- drm_sched_entity_kill_jobs_cb))
+ if (!prev ||
+ dma_fence_add_callback(prev, &job->finish_cb,
+ drm_sched_entity_kill_jobs_cb)) {
+ /*
+ * Adding callback above failed.
+ * dma_fence_put() checks for NULL.
+ */
+ dma_fence_put(prev);
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+ }
prev = &s_fence->finished;
}
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index c67e1f906785..8706763af8fb 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_hw_blank(bochs, false);
-
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
@@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, false);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index da08ddb01d21..05608c894ed9 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
unsigned long irqflags;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ spin_lock_irqsave(&v3d->job_lock, irqflags);
+ v3d->bin_job = NULL;
+ spin_unlock_irqrestore(&v3d->job_lock, irqflags);
return NULL;
+ }
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
@@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
- if (unlikely(job->base.base.s_fence->finished.error))
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->render_job = NULL;
return NULL;
+ }
v3d->render_job = job;
@@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->tfu_job = NULL;
+ return NULL;
+ }
+
+ v3d->tfu_job = job;
+
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
return NULL;
- v3d->tfu_job = job;
if (job->base.irq_fence)
dma_fence_put(job->base.irq_fence);
job->base.irq_fence = dma_fence_get(fence);
@@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int i, csd_cfg0_reg;
+ if (unlikely(job->base.base.s_fence->finished.error)) {
+ v3d->csd_job = NULL;
+ return NULL;
+ }
+
v3d->csd_job = job;
v3d_invalidate_caches(v3d);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 28a57ae109fc..ae4e36bc337c 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -64,8 +64,8 @@ static int vkms_enable_vblank(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- out->vblank_hrtimer.function = &vkms_vblank_simulate;
+ hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
out->period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 8651b788e98b..aec774fa4d7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -290,8 +290,8 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc)
drm_calc_timestamping_constants(crtc, &crtc->mode);
- hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- du->vkms.timer.function = &vmw_vkms_vblank_simulate;
+ hrtimer_setup(&du->vkms.timer, &vmw_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
du->vkms.period_ns = ktime_set(0, vblank->framedur_ns);
hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 2eb9633f163a..2a2f250fa495 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
-
- /*
- * Flip to the newly created mapping ASAP, so we can re-use the
- * first part of GGTT for WOPCM, prevent flickering, and prevent
- * the lookup of sysmem scratch pages.
- */
- plane->check_plane(crtc_state, plane_state);
- plane->async_flip(NULL, plane, crtc_state, plane_state, true);
return;
nofb:
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index d9386ab03140..43bf6f140d40 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -341,7 +341,6 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -356,4 +355,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index c5b95470fa32..f67803e15a0e 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 5d6fb79957b6..9f4f27d1ef4a 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -380,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_gt(gt);
xe_wa_process_oob(gt);
- xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
@@ -474,6 +472,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
xe_gt_mcr_set_implicit_defaults(gt);
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
err = xe_gt_clock_init(gt);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index df7f130fb663..b995d1d51aed 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -6,6 +6,7 @@
#include "xe_guc_pc.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -19,6 +20,7 @@
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_printk.h"
+#include "xe_gt_throttle.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
@@ -49,6 +51,9 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
+#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -113,9 +118,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state)
+ enum slpc_global_state state,
+ int timeout_ms)
{
- int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int timeout_us = 1000 * timeout_ms;
int slept, wait = 10;
xe_device_assert_mem_access(pc_to_xe(pc));
@@ -164,7 +170,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
/* Blocking here to ensure the results are ready before reading them */
@@ -187,7 +194,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -208,7 +216,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -440,6 +449,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
return freq;
}
+static u32 get_cur_freq(struct xe_gt *gt)
+{
+ u32 freq;
+
+ freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
+ freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
+ return decode_freq(freq);
+}
+
/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
@@ -463,10 +481,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
return -ETIMEDOUT;
}
- *freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
-
- *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
- *freq = decode_freq(*freq);
+ *freq = get_cur_freq(gt);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
@@ -1002,6 +1017,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
unsigned int fw_ref;
+ ktime_t earlier;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
@@ -1026,14 +1042,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
memset(pc->bo->vmap.vaddr, 0, size);
slpc_shared_data_write(pc, header.size, size);
+ earlier = ktime_get();
ret = pc_action_reset(pc);
if (ret)
goto out;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
- xe_gt_err(gt, "GuC PC Start failed\n");
- ret = -EIO;
- goto out;
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS)) {
+ xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
+ xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
+ xe_gt_throttle_get_limit_reasons(gt));
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
+ xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ goto out;
+ }
+
+ xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
+ ktime_ms_delta(ktime_get(), earlier));
}
ret = pc_init_freqs(pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index b6a2dd742ebd..1a5fe4822a62 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1246,11 +1246,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 089834467880..c3cc0fa105e8 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/*
+/**
* xe_mark_range_accessed() - mark a range is accessed, so core mm
* have such information for memory eviction or write back to
* hard disk
- *
* @range: the range to mark
* @write: if write to this range, we mark pages in this range
* as dirty
@@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
}
}
-/*
+static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ struct hmm_range *range, struct rw_semaphore *notifier_sem)
+{
+ unsigned long i, npages, hmm_pfn;
+ unsigned long num_chunks = 0;
+ int ret;
+
+ /* HMM docs says this is needed. */
+ ret = down_read_interruptible(notifier_sem);
+ if (ret)
+ return ret;
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ up_read(notifier_sem);
+ return -EAGAIN;
+ }
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages;) {
+ unsigned long len;
+
+ hmm_pfn = range->hmm_pfns[i];
+ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
+
+ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+
+ /* If order > 0 the page may extend beyond range->start */
+ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
+ i += len;
+ num_chunks++;
+ }
+ up_read(notifier_sem);
+
+ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
+}
+
+/**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
* and will be used to program GPU page table later.
- *
* @xe: the xe device who will access the dma-address in sg table
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table.
+ * @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE
@@ -78,43 +113,88 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st, bool write)
+ struct sg_table *st,
+ struct rw_semaphore *notifier_sem,
+ bool write)
{
+ unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
- int ret;
+ struct scatterlist *sgl;
+ struct page *page;
+ unsigned long i, j;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ lockdep_assert_held(notifier_sem);
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
+ i = 0;
+ for_each_sg(st->sgl, sgl, st->nents, j) {
+ unsigned long hmm_pfn, size;
+
+ hmm_pfn = range->hmm_pfns[i];
+ page = hmm_pfn_to_page(hmm_pfn);
+ xe_assert(xe, !is_device_private_page(page));
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= page_to_pfn(page) & (size - 1);
+ i += size;
+
+ if (unlikely(j == st->nents - 1)) {
+ xe_assert(xe, i >= npages);
+ if (i > npages)
+ size -= (i - npages);
+
+ sg_mark_end(sgl);
+ } else {
+ xe_assert(xe, i < npages);
+ }
+
+ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
- xe_sg_segment_size(dev), GFP_KERNEL);
- if (ret)
- goto free_pages;
+ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(st);
- st = NULL;
+ lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+
+ mutex_lock(&userptr->unmap_mutex);
+ xe_assert(vm->xe, !userptr->mapped);
+ userptr->mapped = true;
+ mutex_unlock(&userptr->unmap_mutex);
+}
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+
+ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+ !lockdep_is_held_type(&vm->lock, 0) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ /* Don't unmap in exec critical section. */
+ xe_vm_assert_held(vm);
+ /* Don't unmap while mapping the sg. */
+ lockdep_assert_held(&vm->lock);
}
-free_pages:
- kvfree(pages);
- return ret;
+ mutex_lock(&userptr->unmap_mutex);
+ if (userptr->sg && userptr->mapped)
+ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ userptr->mapped = false;
+ mutex_unlock(&userptr->unmap_mutex);
}
-/*
+/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- *
* @uvma: the userptr vma which hold the scatter gather table
*
* With function xe_userptr_populate_range, we allocate storage of
@@ -124,16 +204,9 @@ free_pages:
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- struct device *dev = xe->drm.dev;
-
- xe_assert(xe, userptr->sg);
- dma_unmap_sgtable(dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+ xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
@@ -166,13 +239,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ unsigned long *pfns;
struct xe_userptr *userptr;
struct xe_vma *vma = &uvma->vma;
u64 userptr_start = xe_vma_userptr(vma);
u64 userptr_end = userptr_start + xe_vma_size(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range;
+ struct hmm_range hmm_range = {
+ .pfn_flags_mask = 0, /* ignore pfns */
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .start = userptr_start,
+ .end = userptr_end,
+ .notifier = &uvma->userptr.notifier,
+ .dev_private_owner = vm->xe,
+ };
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
u64 npages;
@@ -199,19 +279,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
return -ENOMEM;
if (write)
- flags |= HMM_PFN_REQ_WRITE;
+ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto free_pfns;
}
- hmm_range.default_flags = flags;
hmm_range.hmm_pfns = pfns;
- hmm_range.notifier = &userptr->notifier;
- hmm_range.start = userptr_start;
- hmm_range.end = userptr_end;
- hmm_range.dev_private_owner = vm->xe;
while (true) {
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
@@ -238,16 +313,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret)
goto free_pfns;
+ ret = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (ret)
+ goto free_st;
+
+ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
+ &vm->userptr.notifier_lock, write);
+ if (ret)
+ goto out_unlock;
+
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
+ xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+ return 0;
+out_unlock:
+ up_read(&vm->userptr.notifier_lock);
+free_st:
+ sg_free_table(&userptr->sgt);
free_pfns:
kvfree(pfns);
return ret;
}
-
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
index 909dc2bdcd97..0ea98d8e7bbc 100644
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -3,9 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#ifndef _XE_HMM_H_
+#define _XE_HMM_H_
+
#include <linux/types.h>
struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eb6cd91e1e22..1fa46a04425e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -1766,8 +1766,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
WRITE_ONCE(u->exclusive_stream, stream);
- hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index c9cc0c091dfd..89fd2c043136 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -267,6 +267,15 @@ int xe_pm_init_early(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
+static u32 vram_threshold_value(struct xe_device *xe)
+{
+ /* FIXME: D3Cold temporarily disabled by default on BMG */
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return 0;
+
+ return DEFAULT_VRAM_THRESHOLD;
+}
+
/**
* xe_pm_init - Initialize Xe Power Management
* @xe: xe device instance
@@ -277,6 +286,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
*/
int xe_pm_init(struct xe_device *xe)
{
+ u32 vram_threshold;
int err;
/* For now suspend/resume is only allowed with GuC */
@@ -290,7 +300,8 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
- err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ vram_threshold = vram_threshold_value(xe);
+ err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 1ddcc7e79a93..dc24baa84092 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -28,6 +28,8 @@ struct xe_pt_dir {
struct xe_pt pt;
/** @children: Array of page-table child nodes */
struct xe_ptw *children[XE_PDES];
+ /** @staging: Array of page-table staging nodes */
+ struct xe_ptw *staging[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
return container_of(pt, struct xe_pt_dir, pt);
}
-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+static struct xe_pt *
+xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->children[index], struct xe_pt, base);
+ return container_of(pt_dir->staging[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
}
pt->bo = bo;
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -206,8 +210,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
for (i = 0; i < XE_PDES; i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ if (xe_pt_entry_staging(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
deferred);
}
}
@@ -376,8 +380,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
/* Continue building a non-connected subtree. */
struct iosys_map *map = &parent->bo->vmap;
- if (unlikely(xe_child))
+ if (unlikely(xe_child)) {
parent->base.children[offset] = &xe_child->base;
+ parent->base.staging[offset] = &xe_child->base;
+ }
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -614,6 +620,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_bind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.vm = xe_vma_vm(vma),
.tile = tile,
@@ -873,7 +880,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
@@ -885,6 +892,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_vm_assert_held(vm);
}
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_pt_commit_prepare_locks_assert(vma);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+}
+
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
@@ -895,13 +912,17 @@ static void xe_pt_commit(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
if (!pt->level)
continue;
+ pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ int j_ = j + entries[i].ofs;
+ pt_dir->children[j_] = pt_dir->staging[j_];
xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
}
}
@@ -913,7 +934,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_pt *pt = entries[i].pt;
@@ -928,10 +949,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
}
@@ -943,7 +964,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
{
u32 i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
@@ -961,10 +982,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
struct xe_pt *oldpte = NULL;
- if (xe_pt_entry(pt_dir, j_))
- oldpte = xe_pt_entry(pt_dir, j_);
+ if (xe_pt_entry_staging(pt_dir, j_))
+ oldpte = xe_pt_entry_staging(pt_dir, j_);
- pt_dir->children[j_] = &newpte->base;
+ pt_dir->staging[j_] = &newpte->base;
entries[i].pt_entries[j].pt = oldpte;
}
}
@@ -1213,42 +1234,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
uvma = to_userptr_vma(vma);
- notifier_seq = uvma->userptr.notifier_seq;
+ if (xe_pt_userptr_inject_eagain(uvma))
+ xe_vma_userptr_force_invalidate(uvma);
- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
- return 0;
+ notifier_seq = uvma->userptr.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq) &&
- !xe_pt_userptr_inject_eagain(uvma))
+ notifier_seq))
return 0;
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm))
return -EAGAIN;
- } else {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
-
- if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
- }
- }
+ /*
+ * Just continue the operation since exec or rebind worker
+ * will take care of rebinding.
+ */
return 0;
}
@@ -1514,6 +1515,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_unbind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.tile = tile,
.modified_start = xe_vma_start(vma),
@@ -1555,7 +1557,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1568,7 +1570,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
continue;
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
- pt_dir->children[j] =
+ pt_dir->staging[j] =
entries[i].pt_entries[j - entry->ofs].pt ?
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
}
@@ -1581,7 +1583,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; ++i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1595,8 +1597,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
entry->pt_entries[j - entry->ofs].pt =
- xe_pt_entry(pt_dir, j);
- pt_dir->children[j] = NULL;
+ xe_pt_entry_staging(pt_dir, j);
+ pt_dir->staging[j] = NULL;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index b8b3d2aea492..be602a763ff3 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
+ (parent->children ?: NULL);
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index 5ecc4d2f0f65..5c02c244f7de 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -11,12 +11,14 @@
/**
* struct xe_ptw - base class for driver pagetable subclassing.
* @children: Pointer to an array of children if any.
+ * @staging: Pointer to an array of staging if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
* embed an array of xe_ptw pointers.
*/
struct xe_ptw {
struct xe_ptw **children;
+ struct xe_ptw **staging;
};
/**
@@ -41,6 +43,8 @@ struct xe_pt_walk {
* as shared pagetables.
*/
bool shared_pt_mode;
+ /** @staging: Walk staging PT structure */
+ bool staging;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 30259eba450b..5956631c0d40 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -579,51 +579,26 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- /* No need to stop gpu access if the userptr is not yet bound. */
- if (!userptr->initial_bind) {
- up_write(&vm->userptr.notifier_lock);
- return true;
- }
-
/*
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
- up_write(&vm->userptr.notifier_lock);
-
/*
* Preempt fences turn into schedule disables, pipeline these.
* Note that even in fault mode, we need to wait for binds and
@@ -641,11 +616,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
+ xe_hmm_userptr_unmap(uvma);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->userptr.notifier_lock);
trace_xe_vma_userptr_invalidate_complete(vma);
return true;
@@ -655,6 +656,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
.invalidate = vma_userptr_invalidate,
};
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq))
+ uvma->userptr.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
@@ -1012,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
+ mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
@@ -1053,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
+ mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
@@ -1778,9 +1809,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions))
- return -EINVAL;
-
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
@@ -2286,8 +2314,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
break;
}
case DRM_GPUVA_OP_UNMAP:
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
case DRM_GPUVA_OP_PREFETCH:
- /* FIXME: Need to skip some prefetch ops */
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err)
+ return err;
+ }
+
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 23adb7442881..b882bfb31bd0 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -274,9 +274,17 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7f9a303e51d8..a4b4091cfd0d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -59,12 +59,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
+ /** @unmap_mutex: Mutex protecting dma-unmapping */
+ struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
+ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+ bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
@@ -227,8 +231,8 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires the @userptr.notifer_lock in
- * write mode.
+ * items to the list requires either the @userptr.notifer_lock in
+ * write mode, OR @lock in write mode.
*/
struct list_head invalidated;
} userptr;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 46cae925b095..1f93e5e276c0 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
return true;
}
+/*
+ * Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
+ * valid paging domain.
+ */
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
* Similarly, if host1x is already attached to an IOMMU (via the DMA
* API), don't try to attach again.
*/
+ if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
+ domain = NULL;
if (!host1x_wants_iommu(host) || domain)
return domain;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index dfc245867a46..a503252702b7 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -148,6 +148,31 @@ config HID_APPLEIR
Say Y here if you want support for Apple infrared remote control.
+config HID_APPLETB_BL
+ tristate "Apple Touch Bar Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want support for the backlight of Touch Bars on x86
+ MacBook Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-appletb-bl.
+
+config HID_APPLETB_KBD
+ tristate "Apple Touch Bar Keyboard Mode"
+ depends on USB_HID
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select HID_APPLETB_BL
+ help
+ Say Y here if you want support for the keyboard mode (escape,
+ function, media and brightness keys) of Touch Bars on x86 MacBook
+ Pros.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hid-appletb-kbd.
+
config HID_ASUS
tristate "Asus"
depends on USB_HID
@@ -603,6 +628,7 @@ config HID_LOGITECH
tristate "Logitech devices"
depends on USB_HID
depends on LEDS_CLASS
+ depends on LEDS_CLASS_MULTICOLOR
default !EXPERT
help
Support for Logitech devices that are not fully compliant with HID standard.
@@ -1220,6 +1246,20 @@ config HID_U2FZERO
allow setting the brightness to anything but 1, which will
trigger a single blink and immediately reset back to 0.
+config HID_UNIVERSAL_PIDFF
+ tristate "universal-pidff: extended USB PID driver compatibility and usage"
+ depends on USB_HID
+ depends on HID_PID
+ help
+ Extended PID support for selected devices.
+
+ Contains report fixups, extended usable button range and
+ pidff quirk management to extend compatibility with slightly
+ non-compliant USB PID devices and better fuzz/flat values for
+ high precision direct drive devices.
+
+ Supports Moza Racing, Cammus, VRS, FFBeast and more.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 482b096eea28..10ae5dedbd84 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -29,6 +29,8 @@ obj-$(CONFIG_HID_ALPS) += hid-alps.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
+obj-$(CONFIG_HID_APPLETB_BL) += hid-appletb-bl.o
+obj-$(CONFIG_HID_APPLETB_KBD) += hid-appletb-kbd.o
obj-$(CONFIG_HID_CREATIVE_SB0540) += hid-creative-sb0540.o
obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
@@ -140,6 +142,7 @@ hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-params.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
obj-$(CONFIG_HID_UDRAW_PS3) += hid-udraw-ps3.o
+obj-$(CONFIG_HID_UNIVERSAL_PIDFF) += hid-universal-pidff.o
obj-$(CONFIG_HID_LED) += hid-led.o
obj-$(CONFIG_HID_XIAOMI) += hid-xiaomi.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
@@ -166,7 +169,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
-obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index 799b8686a88a..f44a3bb2fbd4 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -42,6 +42,7 @@ struct amd_mp2_sensor_info {
struct sfh_dev_status {
bool is_hpd_present;
+ bool is_hpd_enabled;
bool is_als_present;
bool is_sra_present;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 48cfd0c58241..1c1fd63330c9 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "amd_sfh_pcie.h"
#include "sfh1_1/amd_sfh_init.h"
@@ -330,6 +331,57 @@ static const struct dmi_system_id dmi_nodevs[] = {
{ }
};
+static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(mp2->dev_en.is_hpd_enabled));
+}
+
+static ssize_t hpd_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+ bool enabled;
+ int ret;
+
+ ret = kstrtobool(buf, &enabled);
+ if (ret)
+ return ret;
+
+ mp2->sfh1_1_ops->toggle_hpd(mp2, enabled);
+
+ return count;
+}
+static DEVICE_ATTR_RW(hpd);
+
+static umode_t sfh_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+
+ if (!mp2->sfh1_1_ops || !mp2->dev_en.is_hpd_present)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *sfh_attrs[] = {
+ &dev_attr_hpd.attr,
+ NULL,
+};
+
+static struct attribute_group sfh_attr_group = {
+ .attrs = sfh_attrs,
+ .is_visible = sfh_attr_is_visible,
+};
+
+static const struct attribute_group *amd_sfh_groups[] = {
+ &sfh_attr_group,
+ NULL,
+};
+
static void sfh1_1_init_work(struct work_struct *work)
{
struct amd_mp2_dev *mp2 = container_of(work, struct amd_mp2_dev, work);
@@ -341,6 +393,11 @@ static void sfh1_1_init_work(struct work_struct *work)
amd_sfh_clear_intr(mp2);
mp2->init_done = 1;
+
+ rc = sysfs_update_group(&mp2->pdev->dev.kobj, &sfh_attr_group);
+ if (rc)
+ dev_warn(&mp2->pdev->dev, "failed to update sysfs group\n");
+
}
static void sfh_init_work(struct work_struct *work)
@@ -487,6 +544,7 @@ static struct pci_driver amd_mp2_pci_driver = {
.driver.pm = &amd_mp2_pm_ops,
.shutdown = amd_sfh_shutdown,
.remove = amd_sfh_remove,
+ .dev_groups = amd_sfh_groups,
};
module_pci_driver(amd_mp2_pci_driver);
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index e9929c4aa72e..25f0ebfcbd5f 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -212,6 +212,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
switch (cl_data->sensor_idx[i]) {
case HPD_IDX:
privdata->dev_en.is_hpd_present = true;
+ privdata->dev_en.is_hpd_enabled = true;
+ amd_sfh_toggle_hpd(privdata, false);
break;
case ALS_IDX:
privdata->dev_en.is_als_present = true;
@@ -255,6 +257,10 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
+ /* leave HPD alone; policy is controlled by sysfs */
+ if (cl_data->sensor_idx[i] == HPD_IDX)
+ continue;
+
if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
info.sensor_idx = cl_data->sensor_idx[i];
mp2->mp2_ops->start(mp2, info);
@@ -285,8 +291,10 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
- if (cl_data->sensor_idx[i] != HPD_IDX &&
- cl_data->sensor_sts[i] == SENSOR_ENABLED) {
+ /* leave HPD alone; policy is controlled by sysfs */
+ if (cl_data->sensor_idx[i] == HPD_IDX)
+ continue;
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
status = amd_sfh_wait_for_response
(mp2, cl_data->sensor_idx[i], DISABLE_SENSOR);
@@ -304,6 +312,44 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
amd_sfh_clear_intr(mp2);
}
+void amd_sfh_toggle_hpd(struct amd_mp2_dev *mp2, bool enabled)
+{
+ struct amdtp_cl_data *cl_data = mp2->cl_data;
+ struct amd_mp2_sensor_info info;
+ int i, status;
+
+ if (mp2->dev_en.is_hpd_enabled == enabled)
+ return;
+
+ for (i = 0; i < cl_data->num_hid_devices; i++) {
+ if (cl_data->sensor_idx[i] != HPD_IDX)
+ continue;
+ info.sensor_idx = cl_data->sensor_idx[i];
+ if (enabled) {
+ mp2->mp2_ops->start(mp2, info);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], ENABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_ENABLED;
+ if (status == SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ } else {
+ mp2->mp2_ops->stop(mp2, cl_data->sensor_idx[i]);
+ status = amd_sfh_wait_for_response
+ (mp2, cl_data->sensor_idx[i], DISABLE_SENSOR);
+ if (status == 0)
+ status = SENSOR_DISABLED;
+ if (status != SENSOR_ENABLED)
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ }
+ dev_dbg(&mp2->pdev->dev, "toggle sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
+ break;
+ }
+ mp2->dev_en.is_hpd_enabled = enabled;
+}
+
static void amd_mp2_pci_remove(void *privdata)
{
struct amd_mp2_dev *mp2 = privdata;
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
index 21c44990bbeb..797d206641c6 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.h
@@ -15,12 +15,15 @@
struct amd_sfh1_1_ops {
int (*init)(struct amd_mp2_dev *mp2);
+ void (*toggle_hpd)(struct amd_mp2_dev *mp2, bool enable);
};
int amd_sfh1_1_init(struct amd_mp2_dev *mp2);
+void amd_sfh_toggle_hpd(struct amd_mp2_dev *mp2, bool enabled);
static const struct amd_sfh1_1_ops __maybe_unused sfh1_1_ops = {
.init = amd_sfh1_1_init,
+ .toggle_hpd = amd_sfh_toggle_hpd,
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
index ffb98b4c36cb..837d59e7a661 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
@@ -129,7 +129,7 @@ static int amd_sfh_hpd_info(u8 *user_present)
if (!user_present)
return -EINVAL;
- if (!emp2 || !emp2->dev_en.is_hpd_present)
+ if (!emp2 || !emp2->dev_en.is_hpd_present || !emp2->dev_en.is_hpd_enabled)
return -ENODEV;
hpdstatus.val = readl(emp2->mmio + amd_get_c2p_val(emp2, 4));
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
index a4a4f324aedd..489cb4fcc2cd 100644
--- a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -41,7 +41,7 @@ static const __u8 fixed_rdesc[] = {
0x15, 0x00, // Logical Minimum (0) 22
0x25, 0x01, // Logical Maximum (1) 24
0x75, 0x01, // Report Size (1) 26
- 0x95, 0x05, // Report Count (5) 28 /* changed (was 5) */
+ 0x95, 0x05, // Report Count (5) 28 /* changed (was 6) */
0x81, 0x02, // Input (Data,Var,Abs) 30
0x05, 0x09, // Usage Page (Button) /* inserted */
0x09, 0x4a, // Usage (0x4a) /* inserted to be translated as input usage 0x149: BTN_STYLUS3 */
@@ -189,8 +189,68 @@ static const __u8 fixed_rdesc[] = {
0x96, 0x00, 0x01, // Report Count (256) 322
0xb1, 0x02, // Feature (Data,Var,Abs) 325
0xc0, // End Collection 327
+ /* New in Firmware Version: HUION_M220_240524 */
+ 0x05, 0x01, // Usage Page (Generic Desktop) 328
+ 0x09, 0x01, // Usage (Pointer) 330
+ 0xa1, 0x01, // Collection (Application) 332
+ 0x09, 0x01, // Usage (Pointer) 334
+ 0xa1, 0x00, // Collection (Physical) 336
+ 0x05, 0x09, // Usage Page (Button) 338
+ 0x19, 0x01, // UsageMinimum (1) 340
+ 0x29, 0x03, // UsageMaximum (3) 342
+ 0x15, 0x00, // Logical Minimum (0) 344
+ 0x25, 0x01, // Logical Maximum (1) 346
+ 0x85, 0x02, // Report ID (2) 348
+ 0x95, 0x03, // Report Count (3) 350
+ 0x75, 0x01, // Report Size (1) 352
+ 0x81, 0x02, // Input (Data,Var,Abs) 354
+ 0x95, 0x01, // Report Count (1) 356
+ 0x75, 0x05, // Report Size (5) 358
+ 0x81, 0x01, // Input (Cnst,Arr,Abs) 360
+ 0x05, 0x01, // Usage Page (Generic Desktop) 362
+ 0x09, 0x30, // Usage (X) 364
+ 0x09, 0x31, // Usage (Y) 366
+ 0x15, 0x81, // Logical Minimum (-127) 368
+ 0x25, 0x7f, // Logical Maximum (127) 370
+ 0x75, 0x08, // Report Size (8) 372
+ 0x95, 0x02, // Report Count (2) 374
+ 0x81, 0x06, // Input (Data,Var,Rel) 376
+ 0x95, 0x04, // Report Count (4) 378
+ 0x75, 0x08, // Report Size (8) 380
+ 0x81, 0x01, // Input (Cnst,Arr,Abs) 382
+ 0xc0, // End Collection 384
+ 0xc0, // End Collection 385
+ 0x05, 0x0d, // Usage Page (Digitizers) 386
+ 0x09, 0x05, // Usage (Touch Pad) 388
+ 0xa1, 0x01, // Collection (Application) 390
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 392
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 395
+ 0x15, 0x00, // Logical Minimum (0) 397
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 399
+ 0x75, 0x08, // Report Size (8) 402
+ 0x95, 0x10, // Report Count (16) 404
+ 0x85, 0x3f, // Report ID (63) 406
+ 0x81, 0x22, // Input (Data,Var,Abs,NoPref) 408
+ 0xc0, // End Collection 410
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 411
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 414
+ 0xa1, 0x01, // Collection (Application) 416
+ 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 418
+ 0x09, 0x0c, // Usage (Vendor Usage 0x0c) 421
+ 0x15, 0x00, // Logical Minimum (0) 423
+ 0x26, 0xff, 0x00, // Logical Maximum (255) 425
+ 0x85, 0x44, // Report ID (68) 428
+ 0x75, 0x08, // Report Size (8) 430
+ 0x96, 0x6b, 0x05, // Report Count (1387) 432
+ 0x81, 0x00, // Input (Data,Arr,Abs) 435
+ 0xc0, // End Collection 437
};
+#define PRE_240524_RDESC_SIZE 328
+#define PRE_240524_RDESC_FIXED_SIZE 338 /* The original bits of the descriptor */
+#define FW_240524_RDESC_SIZE 438
+#define FW_240524_RDESC_FIXED_SIZE sizeof(fixed_rdesc)
+
SEC(HID_BPF_RDESC_FIXUP)
int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
{
@@ -199,9 +259,14 @@ int BPF_PROG(hid_fix_rdesc_huion_kamvas_pro_19, struct hid_bpf_ctx *hctx)
if (!data)
return 0; /* EPERM check */
- __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+ if (hctx->size == FW_240524_RDESC_SIZE) {
+ __builtin_memcpy(data, fixed_rdesc, FW_240524_RDESC_FIXED_SIZE);
+ return sizeof(fixed_rdesc);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc, PRE_240524_RDESC_FIXED_SIZE);
- return sizeof(fixed_rdesc);
+ return PRE_240524_RDESC_FIXED_SIZE;
}
/*
@@ -263,7 +328,9 @@ HID_BPF_OPS(huion_Kamvas_pro_19) = {
SEC("syscall")
int probe(struct hid_bpf_probe_args *ctx)
{
- ctx->retval = ctx->rdesc_size != 328;
+
+ ctx->retval = !((ctx->rdesc_size == PRE_240524_RDESC_SIZE) ||
+ (ctx->rdesc_size == FW_240524_RDESC_SIZE));
if (ctx->retval)
ctx->retval = -EINVAL;
diff --git a/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c b/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c
new file mode 100644
index 000000000000..ec360d71130f
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__KeydialK20.bpf.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_KEYDIAL_K20 0x0069
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_KEYDIAL_K20),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21h_230511
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T21h_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * as keyboard shortcuts and the dial as wheel but it's not forwarded by the kernel.
+ * In tablet mode it uses a vendor specific hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF currently works for both modes only. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 18 bytes
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 0xFF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This report descriptor appears to be identical for all Huion devices.
+ *
+ * Second hidraw node is the Pad. This one sends the button events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 135 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x06, // Usage (Keyboard) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x03, // Report ID (3) 6
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 8
+ * # 0x19, 0xe0, // UsageMinimum (224) 10
+ * # 0x29, 0xe7, // UsageMaximum (231) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x08, // Report Count (8) 20
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 22
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 24
+ * # 0x19, 0x00, // UsageMinimum (0) 26
+ * # 0x29, 0xff, // UsageMaximum (255) 28
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 30
+ * # 0x75, 0x08, // Report Size (8) 33
+ * # 0x95, 0x06, // Report Count (6) 35
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 37
+ * # 0xc0, // End Collection 39
+ * # 0x05, 0x0c, // Usage Page (Consumer) 40
+ * # 0x09, 0x01, // Usage (Consumer Control) 42
+ * # 0xa1, 0x01, // Collection (Application) 44
+ * # 0x85, 0x04, // Report ID (4) 46
+ * # 0x05, 0x0c, // Usage Page (Consumer) 48
+ * # 0x19, 0x00, // UsageMinimum (0) 50
+ * # 0x2a, 0x80, 0x03, // UsageMaximum (896) 52
+ * # 0x15, 0x00, // Logical Minimum (0) 55
+ * # 0x26, 0x80, 0x03, // Logical Maximum (896) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x01, // Report Count (1) 62
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 64
+ * # 0xc0, // End Collection 66
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 67
+ * # 0x09, 0x02, // Usage (Mouse) 69
+ * # 0xa1, 0x01, // Collection (Application) 71
+ * # 0x09, 0x01, // Usage (Pointer) 73
+ * # 0x85, 0x05, // Report ID (5) 75
+ * # 0xa1, 0x00, // Collection (Physical) 77
+ * # 0x05, 0x09, // Usage Page (Button) 79
+ * # 0x19, 0x01, // UsageMinimum (1) 81
+ * # 0x29, 0x05, // UsageMaximum (5) 83
+ * # 0x15, 0x00, // Logical Minimum (0) 85
+ * # 0x25, 0x01, // Logical Maximum (1) 87
+ * # 0x95, 0x05, // Report Count (5) 89
+ * # 0x75, 0x01, // Report Size (1) 91
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x75, 0x03, // Report Size (3) 97
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 99
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 101
+ * # 0x09, 0x30, // Usage (X) 103
+ * # 0x09, 0x31, // Usage (Y) 105
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 107
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 110
+ * # 0x75, 0x10, // Report Size (16) 113
+ * # 0x95, 0x02, // Report Count (2) 115
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 117
+ * # 0x95, 0x01, // Report Count (1) 119
+ * # 0x75, 0x08, // Report Size (8) 121
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 123
+ * # 0x09, 0x38, // Usage (Wheel) 125
+ * # 0x15, 0x81, // Logical Minimum (-127) 127
+ * # 0x25, 0x7f, // Logical Maximum (127) 129
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 131
+ * # 0xc0, // End Collection 133
+ * # 0xc0, // End Collection 134
+ * R: 135 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 05 0c 19 00 2a 80 03 15 00 26 80 03 75 10 95 01 81 00 c0 05 01 09 02 a1 01 09 01 85 05 a1 00 05 09 19 01 29 05 15 00 25 01 95 05 75 01 81 02 95 01 75 03 81 01 05 01 09 30 09 31 16 00 80 26 ff 7f 7510 95 02 81 06 95 01 75 08 05 01 09 38 15 81 25 7f 81 06 c0 c0
+ *
+ * Third hidraw node is a multi-axis controller which sends the dial events
+ * and the button inside the dial. If the tablet is switched to raw mode it is mute.
+ *
+ * # HUION Huion Keydial_K20
+ * # Report descriptor length: 108 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Button 1) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * R: 108 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0
+ *
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 135
+#define PUCK_REPORT_DESCRIPTOR_LENGTH 108
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 18
+#define PAD_KBD_REPORT_ID 3
+#define PAD_CC_REPORT_ID 3 // never sends events
+#define PAD_MOUSE_REPORT_ID 4 // never sends events
+#define PUCK_REPORT_ID 17
+#define VENDOR_REPORT_ID 8
+#define PAD_KBD_REPORT_LENGTH 8
+#define PAD_CC_REPORT_LENGTH 3
+#define PAD_MOUSE_REPORT_LENGTH 7
+#define PUCK_REPORT_LENGTH 9
+#define VENDOR_REPORT_LENGTH 12
+
+__u32 last_button_state;
+
+static const __u8 disabled_rdesc_puck[] = {
+ FixedSizeVendorReport(PUCK_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_KBD_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_CC_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_MOUSE_REPORT_LENGTH)
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ // We send our pad events on the vendor report id because why not
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is a button so we look like a tablet
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4-7 are the button state for 19 buttons + pad out to u32
+ // We send the first 10 buttons as buttons 1-10 which is BTN_0 -> BTN_9
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(10)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ // We send the other 9 buttons as buttons 0x31 and above -> BTN_A - BTN_TL2
+ UsageMinimum_i8(0x31)
+ UsageMaximum_i8(0x3a)
+ ReportCount(9)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(13)
+ ReportSize(1)
+ Input(Const) // padding
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+/* Identical to fixed_rdesc_pad but with different FixedSizeVendorReport */
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ // We send our pad events on the vendor report id because why not
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is a button so we look like a tablet
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4-7 are the button state for 19 buttons + pad out to u32
+ // We send the first 10 buttons as buttons 1-10 which is BTN_0 -> BTN_9
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(10)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ // We send the other 9 buttons as buttons 0x31 and above -> BTN_A - BTN_TL2
+ UsageMinimum_i8(0x31)
+ UsageMaximum_i8(0x3a)
+ ReportCount(9)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(13)
+ ReportSize(1)
+ Input(Const) // padding
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report lengths
+ FixedSizeVendorReport(PAD_KBD_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_CC_REPORT_LENGTH)
+ FixedSizeVendorReport(PAD_MOUSE_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(k20_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/puck nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ } else {
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+
+ }
+ }
+ if (rdesc_size == PUCK_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_puck, sizeof(disabled_rdesc_puck));
+ return sizeof(disabled_rdesc_puck);
+ }
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(k20_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 pad:7;
+ __u8 x;
+ __u8 y;
+ __u32 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __u8 wheel = 0;
+
+ /* Wheel report */
+ if (data[1] == 0xf1) {
+ if (data[5] == 2)
+ wheel = 0xff;
+ else
+ wheel = data[5];
+ } else {
+ /* data[4..6] are the buttons, mapped correctly */
+ last_button_state = data[4] | (data[5] << 8) | (data[6] << 16);
+ wheel = 0; // wheel
+ }
+
+ pad_report = (struct pad_report *)data;
+ pad_report->report_id = VENDOR_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->wheel = wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ if (data[0] == PAD_KBD_REPORT_ID) {
+ const __u8 button_mapping[] = {
+ 0x0e, /* Button 1: K */
+ 0x0a, /* Button 2: G */
+ 0x0f, /* Button 3: L */
+ 0x4c, /* Button 4: Delete */
+ 0x0c, /* Button 5: I */
+ 0x07, /* Button 6: D */
+ 0x05, /* Button 7: B */
+ 0x08, /* Button 8: E */
+ 0x16, /* Button 9: S */
+ 0x1d, /* Button 10: Z */
+ 0x06, /* Button 11: C */
+ 0x19, /* Button 12: V */
+ 0xff, /* Button 13: LeftControl */
+ 0xff, /* Button 14: LeftAlt */
+ 0xff, /* Button 15: LeftShift */
+ 0x28, /* Button 16: Return Enter */
+ 0x2c, /* Button 17: Spacebar */
+ 0x11, /* Button 18: N */
+ };
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 pad:7;
+ __u8 x;
+ __u8 y;
+ __u32 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+ int i, b;
+ __u8 modifiers = data[1];
+ __u32 buttons = 0;
+
+ if (modifiers & 0x01) { /* Control */
+ buttons |= BIT(12);
+ }
+ if (modifiers & 0x02) { /* Shift */
+ buttons |= BIT(14);
+ }
+ if (modifiers & 0x04) { /* Alt */
+ buttons |= BIT(13);
+ }
+
+ for (i = 2; i < PAD_KBD_REPORT_LENGTH; i++) {
+ if (!data[i])
+ break;
+
+ for (b = 0; b < ARRAY_SIZE(button_mapping); b++) {
+ if (data[i] == button_mapping[b]) {
+ buttons |= BIT(b);
+ break;
+ }
+ }
+ data[i] = 0;
+ }
+
+ pad_report = (struct pad_report *)data;
+ pad_report->report_id = VENDOR_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = buttons;
+ // The wheel happens on a different hidraw node but its
+ // values are unreliable (as is the button inside the wheel).
+ // So the wheel is simply always zero, if you want the wheel
+ // to work reliably, use the tablet mode.
+ pad_report->wheel = 0;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(keydial_k20) = {
+ .hid_device_event = (void *)k20_fix_events,
+ .hid_rdesc_fixup = (void *)k20_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PUCK_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c b/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c
new file mode 100644
index 000000000000..a123003fb5fd
--- /dev/null
+++ b/drivers/hid/bpf/progs/TUXEDO__Sirius-16-Gen1-and-Gen2.bpf.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2025 TUXEDO Computers GmbH
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, 0x048D, 0x8910)
+);
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(ignore_key_fix_event, struct hid_bpf_ctx *hid_ctx)
+{
+ const int expected_length = 37;
+ const int expected_report_id = 1;
+ __u8 *data;
+ int i;
+
+ if (hid_ctx->size < expected_length)
+ return 0;
+
+ data = hid_bpf_get_data(hid_ctx, 0, expected_length);
+ if (!data || data[0] != expected_report_id)
+ return 0;
+
+ // Zero out F13 (HID usage ID: 0x68) key press.
+ // The first 6 parallel key presses (excluding modifier keys) are
+ // encoded in an array containing usage IDs.
+ for (i = 3; i < 9; ++i)
+ if (data[i] == 0x68)
+ data[i] = 0x00;
+ // Additional parallel key presses starting with the 7th (excluding
+ // modifier keys) are encoded as a bit flag with the offset being
+ // the usage ID.
+ data[22] &= 0xfe;
+
+ return 0;
+}
+
+HID_BPF_OPS(ignore_button) = {
+ .hid_device_event = (void *)ignore_key_fix_event,
+};
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
new file mode 100644
index 000000000000..1a0aeea6a081
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__ACK05.bpf.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define HID_BPF_ASYNC_MAX_CTX 1
+#include "hid_bpf_async.h"
+
+#define VID_UGEE 0x28BD
+/* same PID whether connected directly or through the provided dongle: */
+#define PID_ACK05_REMOTE 0x0202
+
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ACK05_REMOTE),
+);
+
+/*
+ * By default, the pad reports the buttons through a set of key sequences.
+ *
+ * The pad reports a classic keyboard report descriptor:
+ * # HANVON UGEE Shortcut Remote
+ * Report descriptor length: 102 bytes
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x09, // Report ID (9) 6
+ * 0x09, 0x01, // Usage (Pointer) 8
+ * 0xa1, 0x00, // Collection (Physical) 10
+ * 0x05, 0x09, // Usage Page (Button) 12
+ * 0x19, 0x01, // UsageMinimum (1) 14
+ * 0x29, 0x03, // UsageMaximum (3) 16
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x25, 0x01, // Logical Maximum (1) 20
+ * 0x95, 0x03, // Report Count (3) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x95, 0x05, // Report Count (5) 28
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 30
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 32
+ * 0x09, 0x30, // Usage (X) 34
+ * 0x09, 0x31, // Usage (Y) 36
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 38
+ * 0x95, 0x02, // Report Count (2) 41
+ * 0x75, 0x10, // Report Size (16) 43
+ * 0x81, 0x02, // Input (Data,Var,Abs) 45
+ * 0x05, 0x0d, // Usage Page (Digitizers) 47
+ * 0x09, 0x30, // Usage (Tip Pressure) 49
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 51
+ * 0x95, 0x01, // Report Count (1) 54
+ * 0x75, 0x10, // Report Size (16) 56
+ * 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * 0xc0, // End Collection 60
+ * 0xc0, // End Collection 61
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 62
+ * 0x09, 0x06, // Usage (Keyboard) 64
+ * 0xa1, 0x01, // Collection (Application) 66
+ * 0x85, 0x06, // Report ID (6) 68
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 70
+ * 0x19, 0xe0, // UsageMinimum (224) 72
+ * 0x29, 0xe7, // UsageMaximum (231) 74
+ * 0x15, 0x00, // Logical Minimum (0) 76
+ * 0x25, 0x01, // Logical Maximum (1) 78
+ * 0x75, 0x01, // Report Size (1) 80
+ * 0x95, 0x08, // Report Count (8) 82
+ * 0x81, 0x02, // Input (Data,Var,Abs) 84
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 86
+ * 0x19, 0x00, // UsageMinimum (0) 88
+ * 0x29, 0xff, // UsageMaximum (255) 90
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 92
+ * 0x75, 0x08, // Report Size (8) 95
+ * 0x95, 0x06, // Report Count (6) 97
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 99
+ * 0xc0, // End Collection 101
+ *
+ * Each button gets assigned the following events:
+ *
+ * Buttons released: 06 00 00 00 00 00 00 00
+ * Button 1: 06 01 12 00 00 00 00 00 -> LControl + o
+ * Button 2: 06 01 11 00 00 00 00 00 -> LControl + n
+ * Button 3: 06 00 3e 00 00 00 00 00 -> F5
+ * Button 4: 06 02 00 00 00 00 00 00 -> LShift
+ * Button 5: 06 01 00 00 00 00 00 00 -> LControl
+ * Button 6: 06 04 00 00 00 00 00 00 -> LAlt
+ * Button 7: 06 01 16 00 00 00 00 00 -> LControl + s
+ * Button 8: 06 01 1d 00 00 00 00 00 -> LControl + z
+ * Button 9: 06 00 2c 00 00 00 00 00 -> Space
+ * Button 10: 06 03 1d 00 00 00 00 00 -> LControl + LShift + z
+ * Wheel: 06 01 57 00 00 00 00 00 -> clockwise rotation (LControl + Keypad Plus)
+ * Wheel: 06 01 56 00 00 00 00 00 -> counter-clockwise rotation
+ * (LControl + Keypad Minus)
+ *
+ * However, multiple buttons can be pressed at the same time, and when this happens,
+ * each button gets assigned a new slot in the Input (Data,Arr,Abs):
+ *
+ * Button 1 + 3: 06 01 12 3e 00 00 00 00 -> LControl + o + F5
+ *
+ * When a modifier is pressed (Button 4, 5, or 6), the assigned key is set to 00:
+ *
+ * Button 5 + 7: 06 01 00 16 00 00 00 00 -> LControl + s
+ *
+ * This is mostly fine, but with Button 8 and Button 10 sharing the same
+ * key value ("z"), there are cases where we can not know which is which.
+ *
+ */
+
+#define PAD_WIRED_DESCRIPTOR_LENGTH 102
+#define PAD_DONGLE_DESCRIPTOR_LENGTH 177
+#define STYLUS_DESCRIPTOR_LENGTH 109
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define PAD_REPORT_ID 6
+#define RAW_PAD_REPORT_ID 0xf0
+#define RAW_BATTERY_REPORT_ID 0xf2
+#define VENDOR_REPORT_ID 2
+#define PAD_REPORT_LENGTH 8
+#define VENDOR_REPORT_LENGTH 12
+
+__u16 last_button_state;
+
+static const __u8 disabled_rdesc[] = {
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(RAW_PAD_REPORT_ID)
+ // Byte 1 in report - same than report ID
+ ReportCount(1)
+ ReportSize(8)
+ Input(Const) // padding (internal report ID)
+ LogicalMaximum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 2-3 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(0x0a)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ Usage_i8(0x31) // will be mapped as BTN_A / BTN_SOUTH
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportCount(5) // padding
+ Input(Const)
+ // Byte 4 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 5/6 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 7 in report is the dial
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // -- Byte 0 in report
+ ReportId(RAW_BATTERY_REPORT_ID)
+ // Byte 1 in report - same than report ID
+ ReportCount(1)
+ ReportSize(8)
+ Input(Const) // padding (internal report ID)
+ // Byte 2 in report - always 0x01
+ Input(Const) // padding (internal report ID)
+ UsagePage_Digitizers
+ /*
+ * We represent the device as a stylus to force the kernel to not
+ * directly query its battery state. Instead the kernel will rely
+ * only on the provided events.
+ */
+ Usage_Dig_Stylus
+ CollectionPhysical(
+ // Byte 3 in report - battery value
+ UsagePage_BatterySystem
+ Usage_BS_AbsoluteStateOfCharge
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(100)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report - charging state
+ Usage_BS_Charging
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Abs)
+ )
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(ack05_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ /*
+ * The vendor fixed rdesc is appended after the current one,
+ * to keep the output reports working.
+ */
+ __builtin_memcpy(data + rdesc_size, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor) + rdesc_size;
+ }
+
+ hid_set_name(hctx->hid, "Disabled by HID-BPF Hanvon Ugee Shortcut Remote");
+
+ __builtin_memcpy(data, disabled_rdesc, sizeof(disabled_rdesc));
+ return sizeof(disabled_rdesc);
+}
+
+static int HID_BPF_ASYNC_FUN(switch_to_raw_mode)(struct hid_bpf_ctx *hid)
+{
+ static __u8 magic_0[32] = {0x02, 0xb0, 0x04, 0x00, 0x00};
+ int err;
+
+ /*
+ * The proprietary driver sends the 3 following packets after the
+ * above one.
+ * These don't seem to have any effect, so we don't send them to save
+ * some processing time.
+ *
+ * static __u8 magic_1[32] = {0x02, 0xb4, 0x01, 0x00, 0x01};
+ * static __u8 magic_2[32] = {0x02, 0xb4, 0x01, 0x00, 0xff};
+ * static __u8 magic_3[32] = {0x02, 0xb8, 0x04, 0x00, 0x00};
+ */
+
+ err = hid_bpf_hw_output_report(hid, magic_0, sizeof(magic_0));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(ack05_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH);
+ int ret = 0;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ /* reconnect event */
+ if (data[1] == 0xf8 && data[2] == 02 && data[3] == 0x01)
+ HID_BPF_ASYNC_DELAYED_CALL(switch_to_raw_mode, hctx, 10);
+
+ /* button event */
+ if (data[1] == RAW_PAD_REPORT_ID) {
+ data[0] = data[1];
+ if (data[7] == 0x02)
+ data[7] = 0xff;
+ ret = 8;
+ } else if (data[1] == RAW_BATTERY_REPORT_ID) {
+ data[0] = data[1];
+ ret = 5;
+ }
+
+ return ret;
+}
+
+HID_BPF_OPS(xppen_ack05_remote) = {
+ .hid_device_event = (void *)ack05_fix_events,
+ .hid_rdesc_fixup = (void *)ack05_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_WIRED_DESCRIPTOR_LENGTH:
+ case PAD_DONGLE_DESCRIPTOR_LENGTH:
+ case STYLUS_DESCRIPTOR_LENGTH:
+ case VENDOR_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ break;
+ }
+
+ if (ctx->rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ struct hid_bpf_ctx *hctx = hid_bpf_allocate_context(ctx->hid);
+
+ if (!hctx) {
+ ctx->retval = -EINVAL;
+ return 0;
+ }
+
+ ctx->retval = HID_BPF_ASYNC_INIT(switch_to_raw_mode) ||
+ switch_to_raw_mode(hctx);
+
+ hid_bpf_release_context(hctx);
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
index a669525691aa..0c7e5cc5dc7e 100644
--- a/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
+++ b/drivers/hid/bpf/progs/XPPen__ArtistPro16Gen2.bpf.c
@@ -10,10 +10,12 @@
#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
#define PID_ARTIST_PRO14_GEN2 0x095A
#define PID_ARTIST_PRO16_GEN2 0x095B
+#define PID_ARTIST_PRO19_GEN2 0x096A
HID_BPF_CONFIG(
HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO14_GEN2),
- HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO16_GEN2)
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO16_GEN2),
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_ARTIST_PRO19_GEN2)
);
/*
@@ -22,7 +24,7 @@ HID_BPF_CONFIG(
* - when the eraser button is pressed and the stylus is touching the tablet,
* the device sends Tip Switch instead of sending Eraser
*
- * This descriptor uses physical dimensions of the 16" device.
+ * This descriptor uses the physical dimensions of the 16" device.
*/
static const __u8 fixed_rdesc[] = {
0x05, 0x0d, // Usage Page (Digitizers) 0
@@ -100,6 +102,12 @@ int BPF_PROG(hid_fix_rdesc_xppen_artistpro16gen2, struct hid_bpf_ctx *hctx)
data[62] = 0x62;
data[73] = 0x1c;
data[72] = 0xfd;
+ } else if (hctx->hid->product == PID_ARTIST_PRO19_GEN2) {
+ /* 19" screen reports 16.101" x 9.057" */
+ data[63] = 0x3e;
+ data[62] = 0xe5;
+ data[73] = 0x23;
+ data[72] = 0x61;
}
return sizeof(fixed_rdesc);
@@ -177,6 +185,27 @@ static const __u16 angle_offsets_vertical_16[128] = {
188, 186, 184, 182, 180, 178, 176, 174, 172
};
+/* 19" inch screen 16.101" x 9.057" */
+static const __u16 angle_offsets_horizontal_19[128] = {
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 25, 27, 29, 31, 33, 35, 37, 39, 41,
+ 42, 44, 46, 48, 50, 51, 53, 55, 57, 58, 60, 62, 63, 65, 67, 68, 70, 71, 73, 74, 76,
+ 77, 79, 80, 82, 83, 84, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99, 100,
+ 101, 102, 103, 104, 104, 105, 106, 106, 107, 108, 108, 109, 109, 110, 110, 111,
+ 111, 112, 112, 112, 112, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113,
+ 113, 113, 112, 112, 112, 112, 111, 111, 110, 110, 109, 109, 108, 108, 107, 106,
+ 106, 105, 104, 104, 103, 102, 101, 100, 99, 98, 97, 96, 95, 94, 93, 92, 90
+};
+static const __u16 angle_offsets_vertical_19[128] = {
+ 0, 4, 7, 11, 14, 18, 21, 25, 28, 32, 35, 38, 42, 45, 49, 52, 56, 59, 62, 66, 69, 72,
+ 75, 79, 82, 85, 88, 91, 95, 98, 101, 104, 107, 110, 113, 116, 118, 121, 124, 127,
+ 129, 132, 135, 137, 140, 142, 145, 147, 150, 152, 154, 157, 159, 161, 163, 165, 167,
+ 169, 171, 173, 174, 176, 178, 179, 181, 183, 184, 185, 187, 188, 189, 190, 192, 193,
+ 194, 195, 195, 196, 197, 198, 198, 199, 199, 200, 200, 201, 201, 201, 201, 201, 201,
+ 201, 201, 201, 201, 201, 200, 200, 199, 199, 198, 198, 197, 196, 195, 195, 194, 193,
+ 192, 190, 189, 188, 187, 185, 184, 183, 181, 179, 178, 176, 174, 173, 171, 169, 167,
+ 165, 163, 161
+};
+
static void compensate_coordinates_by_tilt(__u8 *data, const __u8 idx,
const __s8 tilt, const __u16 (*compensation_table)[128])
{
@@ -241,12 +270,19 @@ static int xppen_16_fix_angle_offset(struct hid_bpf_ctx *hctx)
__s8 tilt_x = (__s8) data[8];
__s8 tilt_y = (__s8) data[9];
- if (hctx->hid->product == PID_ARTIST_PRO14_GEN2) {
+ switch (hctx->hid->product) {
+ case PID_ARTIST_PRO14_GEN2:
compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_14);
compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_14);
- } else if (hctx->hid->product == PID_ARTIST_PRO16_GEN2) {
+ break;
+ case PID_ARTIST_PRO16_GEN2:
compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_16);
compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_16);
+ break;
+ case PID_ARTIST_PRO19_GEN2:
+ compensate_coordinates_by_tilt(data, 2, tilt_x, &angle_offsets_horizontal_19);
+ compensate_coordinates_by_tilt(data, 4, tilt_y, &angle_offsets_vertical_19);
+ break;
}
return 0;
diff --git a/drivers/hid/bpf/progs/hid_bpf_async.h b/drivers/hid/bpf/progs/hid_bpf_async.h
new file mode 100644
index 000000000000..9ab585434239
--- /dev/null
+++ b/drivers/hid/bpf/progs/hid_bpf_async.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (c) 2024 Benjamin Tissoires
+ */
+
+#ifndef __HID_BPF_ASYNC_H__
+#define __HID_BPF_ASYNC_H__
+
+#ifndef HID_BPF_ASYNC_MAX_CTX
+#error "HID_BPF_ASYNC_MAX_CTX should be set to the maximum number of concurrent async functions"
+#endif /* HID_BPF_ASYNC_MAX_CTX */
+
+#define CLOCK_MONOTONIC 1
+
+typedef int (*hid_bpf_async_callback_t)(void *map, int *key, void *value);
+
+enum hid_bpf_async_state {
+ HID_BPF_ASYNC_STATE_UNSET = 0,
+ HID_BPF_ASYNC_STATE_INITIALIZING,
+ HID_BPF_ASYNC_STATE_INITIALIZED,
+ HID_BPF_ASYNC_STATE_STARTING,
+ HID_BPF_ASYNC_STATE_RUNNING,
+};
+
+struct hid_bpf_async_map_elem {
+ struct bpf_spin_lock lock;
+ enum hid_bpf_async_state state;
+ struct bpf_timer t;
+ struct bpf_wq wq;
+ u32 hid;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, HID_BPF_ASYNC_MAX_CTX);
+ __type(key, u32);
+ __type(value, struct hid_bpf_async_map_elem);
+} hid_bpf_async_ctx_map SEC(".maps");
+
+/**
+ * HID_BPF_ASYNC_CB: macro to define an async callback used in a bpf_wq
+ *
+ * The caller is responsible for allocating a key in the async map
+ * with hid_bpf_async_get_ctx().
+ */
+#define HID_BPF_ASYNC_CB(cb) \
+cb(void *map, int *key, void *value); \
+static __always_inline int \
+____##cb(struct hid_bpf_ctx *ctx); \
+typeof(cb(0, 0, 0)) cb(void *map, int *key, void *value) \
+{ \
+ struct hid_bpf_async_map_elem *e; \
+ struct hid_bpf_ctx *ctx; \
+ \
+ e = (struct hid_bpf_async_map_elem *)value; \
+ ctx = hid_bpf_allocate_context(e->hid); \
+ if (!ctx) \
+ return 0; /* EPERM check */ \
+ \
+ e->state = HID_BPF_ASYNC_STATE_RUNNING; \
+ \
+ ____##cb(ctx); \
+ \
+ e->state = HID_BPF_ASYNC_STATE_INITIALIZED; \
+ hid_bpf_release_context(ctx); \
+ return 0; \
+} \
+static __always_inline int \
+____##cb
+
+/**
+ * ASYNC: macro to automatically handle async callbacks contexts
+ *
+ * Needs to be used in conjunction with HID_BPF_ASYNC_INIT and HID_BPF_ASYNC_DELAYED_CALL
+ */
+#define HID_BPF_ASYNC_FUN(fun) \
+fun(struct hid_bpf_ctx *ctx); \
+int ____key__##fun; \
+static int ____async_init_##fun(void) \
+{ \
+ ____key__##fun = hid_bpf_async_get_ctx(); \
+ if (____key__##fun < 0) \
+ return ____key__##fun; \
+ return 0; \
+} \
+static int HID_BPF_ASYNC_CB(____##fun##_cb)(struct hid_bpf_ctx *hctx) \
+{ \
+ return fun(hctx); \
+} \
+typeof(fun(0)) fun
+
+#define HID_BPF_ASYNC_INIT(fun) ____async_init_##fun()
+#define HID_BPF_ASYNC_DELAYED_CALL(fun, ctx, delay) \
+ hid_bpf_async_delayed_call(ctx, delay, ____key__##fun, ____##fun##_cb)
+
+/*
+ * internal cb for starting the delayed work callback in a workqueue.
+ */
+static int __start_wq_timer_cb(void *map, int *key, void *value)
+{
+ struct hid_bpf_async_map_elem *e = (struct hid_bpf_async_map_elem *)value;
+
+ bpf_wq_start(&e->wq, 0);
+
+ return 0;
+}
+
+static int hid_bpf_async_find_empty_key(void)
+{
+ int i;
+
+ bpf_for(i, 0, HID_BPF_ASYNC_MAX_CTX) {
+ struct hid_bpf_async_map_elem *elem;
+ int key = i;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -ENOMEM; /* should never happen */
+
+ bpf_spin_lock(&elem->lock);
+
+ if (elem->state == HID_BPF_ASYNC_STATE_UNSET) {
+ elem->state = HID_BPF_ASYNC_STATE_INITIALIZING;
+ bpf_spin_unlock(&elem->lock);
+ return i;
+ }
+
+ bpf_spin_unlock(&elem->lock);
+ }
+
+ return -EINVAL;
+}
+
+static int hid_bpf_async_get_ctx(void)
+{
+ int key = hid_bpf_async_find_empty_key();
+ struct hid_bpf_async_map_elem *elem;
+ int err;
+
+ if (key < 0)
+ return key;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -EINVAL;
+
+ err = bpf_timer_init(&elem->t, &hid_bpf_async_ctx_map, CLOCK_MONOTONIC);
+ if (err)
+ return err;
+
+ err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
+ if (err)
+ return err;
+
+ err = bpf_wq_init(&elem->wq, &hid_bpf_async_ctx_map, 0);
+ if (err)
+ return err;
+
+ elem->state = HID_BPF_ASYNC_STATE_INITIALIZED;
+
+ return key;
+}
+
+static inline u64 ms_to_ns(u64 milliseconds)
+{
+ return (u64)milliseconds * 1000UL * 1000UL;
+}
+
+static int hid_bpf_async_delayed_call(struct hid_bpf_ctx *hctx, u64 milliseconds, int key,
+ hid_bpf_async_callback_t wq_cb)
+{
+ struct hid_bpf_async_map_elem *elem;
+ int err;
+
+ elem = bpf_map_lookup_elem(&hid_bpf_async_ctx_map, &key);
+ if (!elem)
+ return -EINVAL;
+
+ bpf_spin_lock(&elem->lock);
+ /* The wq must be:
+ * - HID_BPF_ASYNC_STATE_INITIALIZED -> it's been initialized and ready to be called
+ * - HID_BPF_ASYNC_STATE_RUNNING -> possible re-entry from the wq itself
+ */
+ if (elem->state != HID_BPF_ASYNC_STATE_INITIALIZED &&
+ elem->state != HID_BPF_ASYNC_STATE_RUNNING) {
+ bpf_spin_unlock(&elem->lock);
+ return -EINVAL;
+ }
+ elem->state = HID_BPF_ASYNC_STATE_STARTING;
+ bpf_spin_unlock(&elem->lock);
+
+ elem->hid = hctx->hid->id;
+
+ err = bpf_wq_set_callback(&elem->wq, wq_cb, 0);
+ if (err)
+ return err;
+
+ if (milliseconds) {
+ /* needed for every call because a cancel might unset this */
+ err = bpf_timer_set_callback(&elem->t, __start_wq_timer_cb);
+ if (err)
+ return err;
+
+ err = bpf_timer_start(&elem->t, ms_to_ns(milliseconds), 0);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ return bpf_wq_start(&elem->wq, 0);
+}
+
+static inline int hid_bpf_async_call(struct hid_bpf_ctx *ctx, int key,
+ hid_bpf_async_callback_t wq_cb)
+{
+ return hid_bpf_async_delayed_call(ctx, 0, key, wq_cb);
+}
+
+#endif /* __HID_BPF_ASYNC_H__ */
diff --git a/drivers/hid/bpf/progs/hid_bpf_helpers.h b/drivers/hid/bpf/progs/hid_bpf_helpers.h
index 3ba24d125a08..bf19785a6b06 100644
--- a/drivers/hid/bpf/progs/hid_bpf_helpers.h
+++ b/drivers/hid/bpf/progs/hid_bpf_helpers.h
@@ -19,6 +19,25 @@ extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
size_t buf__sz,
enum hid_report_type type,
enum hid_class_request reqtype) __ksym;
+extern int hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx,
+ __u8 *buf, size_t buf__sz) __weak __ksym;
+extern int hid_bpf_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __weak __ksym;
+extern int hid_bpf_try_input_report(struct hid_bpf_ctx *ctx,
+ enum hid_report_type type,
+ __u8 *data,
+ size_t buf__sz) __weak __ksym;
+
+/* bpf_wq implementation */
+extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym;
+extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym;
+extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
+ int (callback_fn)(void *map, int *key, void *value),
+ unsigned int flags__k, void *aux__ign) __ksym;
+#define bpf_wq_set_callback(wq, cb, flags) \
+ bpf_wq_set_callback_impl(wq, cb, flags, NULL)
#define HID_MAX_DESCRIPTOR_SIZE 4096
#define HID_IGNORE_EVENT -1
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 49812a76b7ed..d900dd05c335 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
return false;
}
+static bool apple_is_omoton_kb066(struct hid_device *hdev)
+{
+ return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
+ strcmp(hdev->name, "Bluetooth Keyboard") == 0;
+}
+
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@@ -546,9 +552,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
- if (usage->hid == 0xc0301) /* Omoton KB066 quirk */
- code = KEY_F6;
-
if (usage->code != code) {
input_event_with_scancode(input, usage->type, code, usage->hid, value);
@@ -728,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
{
struct apple_sc *asc = hid_get_drvdata(hdev);
- if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks &= ~APPLE_HAS_FN;
}
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 8deded185725..c45e5aa569d2 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
unsigned long flags;
- if (len != 5)
+ if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
goto out;
if (!memcmp(data, keydown, sizeof(keydown))) {
diff --git a/drivers/hid/hid-appletb-bl.c b/drivers/hid/hid-appletb-bl.c
new file mode 100644
index 000000000000..bad2aead8780
--- /dev/null
+++ b/drivers/hid/hid-appletb-bl.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar Backlight Driver
+ *
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ * Copyright (c) 2022-2023 Kerem Karabay <kekrby@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hid.h>
+#include <linux/backlight.h>
+#include <linux/device.h>
+
+#include "hid-ids.h"
+
+#define APPLETB_BL_ON 1
+#define APPLETB_BL_DIM 3
+#define APPLETB_BL_OFF 4
+
+#define HID_UP_APPLEVENDOR_TB_BL 0xff120000
+
+#define HID_VD_APPLE_TB_BRIGHTNESS 0xff120001
+#define HID_USAGE_AUX1 0xff120020
+#define HID_USAGE_BRIGHTNESS 0xff120021
+
+static int appletb_bl_def_brightness = 2;
+module_param_named(brightness, appletb_bl_def_brightness, int, 0444);
+MODULE_PARM_DESC(brightness, "Default brightness:\n"
+ " 0 - Touchbar is off\n"
+ " 1 - Dim brightness\n"
+ " [2] - Full brightness");
+
+struct appletb_bl {
+ struct hid_field *aux1_field, *brightness_field;
+ struct backlight_device *bdev;
+
+ bool full_on;
+};
+
+static const u8 appletb_bl_brightness_map[] = {
+ APPLETB_BL_OFF,
+ APPLETB_BL_DIM,
+ APPLETB_BL_ON,
+};
+
+static int appletb_bl_set_brightness(struct appletb_bl *bl, u8 brightness)
+{
+ struct hid_report *report = bl->brightness_field->report;
+ struct hid_device *hdev = report->device;
+ int ret;
+
+ ret = hid_set_field(bl->aux1_field, 0, 1);
+ if (ret) {
+ hid_err(hdev, "Failed to set auxiliary field (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = hid_set_field(bl->brightness_field, 0, brightness);
+ if (ret) {
+ hid_err(hdev, "Failed to set brightness field (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!bl->full_on) {
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "Device didn't power on (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ bl->full_on = true;
+ }
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+
+ if (brightness == APPLETB_BL_OFF) {
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ bl->full_on = false;
+ }
+
+ return 0;
+}
+
+static int appletb_bl_update_status(struct backlight_device *bdev)
+{
+ struct appletb_bl *bl = bl_get_data(bdev);
+ u8 brightness;
+
+ if (backlight_is_blank(bdev))
+ brightness = APPLETB_BL_OFF;
+ else
+ brightness = appletb_bl_brightness_map[backlight_get_brightness(bdev)];
+
+ return appletb_bl_set_brightness(bl, brightness);
+}
+
+static const struct backlight_ops appletb_bl_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = appletb_bl_update_status,
+};
+
+static int appletb_bl_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct hid_field *aux1_field, *brightness_field;
+ struct backlight_properties bl_props = { 0 };
+ struct device *dev = &hdev->dev;
+ struct appletb_bl *bl;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID parse failed\n");
+
+ aux1_field = hid_find_field(hdev, HID_FEATURE_REPORT,
+ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_AUX1);
+
+ brightness_field = hid_find_field(hdev, HID_FEATURE_REPORT,
+ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_BRIGHTNESS);
+
+ if (!aux1_field || !brightness_field)
+ return -ENODEV;
+
+ if (aux1_field->report != brightness_field->report)
+ return dev_err_probe(dev, -ENODEV, "Encountered unexpected report structure\n");
+
+ bl = devm_kzalloc(dev, sizeof(*bl), GFP_KERNEL);
+ if (!bl)
+ return -ENOMEM;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DRIVER);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID hardware start failed\n");
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "HID hardware open failed\n");
+ goto stop_hw;
+ }
+
+ bl->aux1_field = aux1_field;
+ bl->brightness_field = brightness_field;
+
+ ret = appletb_bl_set_brightness(bl,
+ appletb_bl_brightness_map[(appletb_bl_def_brightness > 2) ? 2 : appletb_bl_def_brightness]);
+
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set default touch bar brightness to %d\n",
+ appletb_bl_def_brightness);
+ goto close_hw;
+ }
+
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.max_brightness = ARRAY_SIZE(appletb_bl_brightness_map) - 1;
+
+ bl->bdev = devm_backlight_device_register(dev, "appletb_backlight", dev, bl,
+ &appletb_bl_backlight_ops, &bl_props);
+ if (IS_ERR(bl->bdev)) {
+ ret = PTR_ERR(bl->bdev);
+ dev_err_probe(dev, ret, "Failed to register backlight device\n");
+ goto close_hw;
+ }
+
+ hid_set_drvdata(hdev, bl);
+
+ return 0;
+
+close_hw:
+ hid_hw_close(hdev);
+stop_hw:
+ hid_hw_stop(hdev);
+
+ return ret;
+}
+
+static void appletb_bl_remove(struct hid_device *hdev)
+{
+ struct appletb_bl *bl = hid_get_drvdata(hdev);
+
+ appletb_bl_set_brightness(bl, APPLETB_BL_OFF);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id appletb_bl_hid_ids[] = {
+ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge DFR Brightness */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, appletb_bl_hid_ids);
+
+static struct hid_driver appletb_bl_hid_driver = {
+ .name = "hid-appletb-bl",
+ .id_table = appletb_bl_hid_ids,
+ .probe = appletb_bl_probe,
+ .remove = appletb_bl_remove,
+};
+module_hid_driver(appletb_bl_hid_driver);
+
+MODULE_AUTHOR("Ronald Tschalär");
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_DESCRIPTION("MacBook Pro Touch Bar Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
new file mode 100644
index 000000000000..d4b95aa3eecb
--- /dev/null
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple Touch Bar Keyboard Mode Driver
+ *
+ * Copyright (c) 2017-2018 Ronald Tschalär
+ * Copyright (c) 2022-2023 Kerem Karabay <kekrby@gmail.com>
+ * Copyright (c) 2024-2025 Aditya Garg <gargaditya08@live.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hid.h>
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <linux/sysfs.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/backlight.h>
+#include <linux/timer.h>
+#include <linux/input/sparse-keymap.h>
+
+#include "hid-ids.h"
+
+#define APPLETB_KBD_MODE_ESC 0
+#define APPLETB_KBD_MODE_FN 1
+#define APPLETB_KBD_MODE_SPCL 2
+#define APPLETB_KBD_MODE_OFF 3
+#define APPLETB_KBD_MODE_MAX APPLETB_KBD_MODE_OFF
+
+#define APPLETB_DEVID_KEYBOARD 1
+#define APPLETB_DEVID_TRACKPAD 2
+
+#define HID_USAGE_MODE 0x00ff0004
+
+static int appletb_tb_def_mode = APPLETB_KBD_MODE_SPCL;
+module_param_named(mode, appletb_tb_def_mode, int, 0444);
+MODULE_PARM_DESC(mode, "Default touchbar mode:\n"
+ " 0 - escape key only\n"
+ " 1 - function-keys\n"
+ " [2] - special keys");
+
+static bool appletb_tb_fn_toggle = true;
+module_param_named(fntoggle, appletb_tb_fn_toggle, bool, 0644);
+MODULE_PARM_DESC(fntoggle, "Switch between Fn and media controls on pressing Fn key");
+
+static bool appletb_tb_autodim = true;
+module_param_named(autodim, appletb_tb_autodim, bool, 0644);
+MODULE_PARM_DESC(autodim, "Automatically dim and turn off the Touch Bar after some time");
+
+static int appletb_tb_dim_timeout = 60;
+module_param_named(dim_timeout, appletb_tb_dim_timeout, int, 0644);
+MODULE_PARM_DESC(dim_timeout, "Dim timeout in sec");
+
+static int appletb_tb_idle_timeout = 15;
+module_param_named(idle_timeout, appletb_tb_idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout, "Idle timeout in sec");
+
+struct appletb_kbd {
+ struct hid_field *mode_field;
+ struct input_handler inp_handler;
+ struct input_handle kbd_handle;
+ struct input_handle tpd_handle;
+ struct backlight_device *backlight_dev;
+ struct timer_list inactivity_timer;
+ bool has_dimmed;
+ bool has_turned_off;
+ u8 saved_mode;
+ u8 current_mode;
+};
+
+static const struct key_entry appletb_kbd_keymap[] = {
+ { KE_KEY, KEY_ESC, { KEY_ESC } },
+ { KE_KEY, KEY_F1, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, KEY_F2, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, KEY_F3, { KEY_RESERVED } },
+ { KE_KEY, KEY_F4, { KEY_RESERVED } },
+ { KE_KEY, KEY_F5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, KEY_F6, { KEY_KBDILLUMUP } },
+ { KE_KEY, KEY_F7, { KEY_PREVIOUSSONG } },
+ { KE_KEY, KEY_F8, { KEY_PLAYPAUSE } },
+ { KE_KEY, KEY_F9, { KEY_NEXTSONG } },
+ { KE_KEY, KEY_F10, { KEY_MUTE } },
+ { KE_KEY, KEY_F11, { KEY_VOLUMEDOWN } },
+ { KE_KEY, KEY_F12, { KEY_VOLUMEUP } },
+ { KE_END, 0 }
+};
+
+static int appletb_kbd_set_mode(struct appletb_kbd *kbd, u8 mode)
+{
+ struct hid_report *report = kbd->mode_field->report;
+ struct hid_device *hdev = report->device;
+ int ret;
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret) {
+ hid_err(hdev, "Device didn't resume (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = hid_set_field(kbd->mode_field, 0, mode);
+ if (ret) {
+ hid_err(hdev, "Failed to set mode field to %u (%pe)\n", mode, ERR_PTR(ret));
+ goto power_normal;
+ }
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+
+ kbd->current_mode = mode;
+
+power_normal:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+
+ return ret;
+}
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct appletb_kbd *kbd = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", kbd->current_mode);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct appletb_kbd *kbd = dev_get_drvdata(dev);
+ u8 mode;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &mode);
+ if (ret)
+ return ret;
+
+ if (mode > APPLETB_KBD_MODE_MAX)
+ return -EINVAL;
+
+ ret = appletb_kbd_set_mode(kbd, mode);
+
+ return ret < 0 ? ret : size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static struct attribute *appletb_kbd_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(appletb_kbd);
+
+static int appletb_tb_key_to_slot(unsigned int code)
+{
+ switch (code) {
+ case KEY_ESC:
+ return 0;
+ case KEY_F1 ... KEY_F10:
+ return code - KEY_F1 + 1;
+ case KEY_F11 ... KEY_F12:
+ return code - KEY_F11 + 11;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void appletb_inactivity_timer(struct timer_list *t)
+{
+ struct appletb_kbd *kbd = from_timer(kbd, t, inactivity_timer);
+
+ if (kbd->backlight_dev && appletb_tb_autodim) {
+ if (!kbd->has_dimmed) {
+ backlight_device_set_brightness(kbd->backlight_dev, 1);
+ kbd->has_dimmed = true;
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000));
+ } else if (!kbd->has_turned_off) {
+ backlight_device_set_brightness(kbd->backlight_dev, 0);
+ kbd->has_turned_off = true;
+ }
+ }
+}
+
+static void reset_inactivity_timer(struct appletb_kbd *kbd)
+{
+ if (kbd->backlight_dev && appletb_tb_autodim) {
+ if (kbd->has_dimmed || kbd->has_turned_off) {
+ backlight_device_set_brightness(kbd->backlight_dev, 2);
+ kbd->has_dimmed = false;
+ kbd->has_turned_off = false;
+ }
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ }
+}
+
+static int appletb_kbd_hid_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+ struct key_entry *translation;
+ struct input_dev *input;
+ int slot;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD || usage->type != EV_KEY)
+ return 0;
+
+ input = field->hidinput->input;
+
+ /*
+ * Skip non-touch-bar keys.
+ *
+ * Either the touch bar itself or usbhid generate a slew of key-down
+ * events for all the meta keys. None of which we're at all interested
+ * in.
+ */
+ slot = appletb_tb_key_to_slot(usage->code);
+ if (slot < 0)
+ return 0;
+
+ reset_inactivity_timer(kbd);
+
+ translation = sparse_keymap_entry_from_scancode(input, usage->code);
+
+ if (translation && kbd->current_mode == APPLETB_KBD_MODE_SPCL) {
+ input_event(input, usage->type, translation->keycode, value);
+
+ return 1;
+ }
+
+ return kbd->current_mode == APPLETB_KBD_MODE_OFF;
+}
+
+static void appletb_kbd_inp_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ struct appletb_kbd *kbd = handle->private;
+
+ reset_inactivity_timer(kbd);
+
+ if (type == EV_KEY && code == KEY_FN && appletb_tb_fn_toggle &&
+ (kbd->current_mode == APPLETB_KBD_MODE_SPCL ||
+ kbd->current_mode == APPLETB_KBD_MODE_FN)) {
+ if (value == 1) {
+ kbd->saved_mode = kbd->current_mode;
+ appletb_kbd_set_mode(kbd, kbd->current_mode == APPLETB_KBD_MODE_SPCL
+ ? APPLETB_KBD_MODE_FN : APPLETB_KBD_MODE_SPCL);
+ } else if (value == 0) {
+ if (kbd->saved_mode != kbd->current_mode)
+ appletb_kbd_set_mode(kbd, kbd->saved_mode);
+ }
+ }
+}
+
+static int appletb_kbd_inp_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct appletb_kbd *kbd = handler->private;
+ struct input_handle *handle;
+ int rc;
+
+ if (id->driver_info == APPLETB_DEVID_KEYBOARD) {
+ handle = &kbd->kbd_handle;
+ handle->name = "tbkbd";
+ } else if (id->driver_info == APPLETB_DEVID_TRACKPAD) {
+ handle = &kbd->tpd_handle;
+ handle->name = "tbtpd";
+ } else {
+ return -ENOENT;
+ }
+
+ if (handle->dev)
+ return -EEXIST;
+
+ handle->open = 0;
+ handle->dev = input_get_device(dev);
+ handle->handler = handler;
+ handle->private = kbd;
+
+ rc = input_register_handle(handle);
+ if (rc)
+ goto err_free_dev;
+
+ rc = input_open_device(handle);
+ if (rc)
+ goto err_unregister_handle;
+
+ return 0;
+
+ err_unregister_handle:
+ input_unregister_handle(handle);
+ err_free_dev:
+ input_put_device(handle->dev);
+ handle->dev = NULL;
+ return rc;
+}
+
+static void appletb_kbd_inp_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+
+ input_put_device(handle->dev);
+ handle->dev = NULL;
+}
+
+static int appletb_kbd_input_configured(struct hid_device *hdev, struct hid_input *hidinput)
+{
+ int idx;
+ struct input_dev *input = hidinput->input;
+
+ /*
+ * Clear various input capabilities that are blindly set by the hid
+ * driver (usbkbd.c)
+ */
+ memset(input->evbit, 0, sizeof(input->evbit));
+ memset(input->keybit, 0, sizeof(input->keybit));
+ memset(input->ledbit, 0, sizeof(input->ledbit));
+
+ __set_bit(EV_REP, input->evbit);
+
+ sparse_keymap_setup(input, appletb_kbd_keymap, NULL);
+
+ for (idx = 0; appletb_kbd_keymap[idx].type != KE_END; idx++)
+ input_set_capability(input, EV_KEY, appletb_kbd_keymap[idx].code);
+
+ return 0;
+}
+
+static const struct input_device_id appletb_kbd_input_devices[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_BUS |
+ INPUT_DEVICE_ID_MATCH_VENDOR |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .bustype = BUS_USB,
+ .vendor = USB_VENDOR_ID_APPLE,
+ .keybit = { [BIT_WORD(KEY_FN)] = BIT_MASK(KEY_FN) },
+ .driver_info = APPLETB_DEVID_KEYBOARD,
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_BUS |
+ INPUT_DEVICE_ID_MATCH_VENDOR |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .bustype = BUS_USB,
+ .vendor = USB_VENDOR_ID_APPLE,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .driver_info = APPLETB_DEVID_TRACKPAD,
+ },
+ { }
+};
+
+static bool appletb_kbd_match_internal_device(struct input_handler *handler,
+ struct input_dev *inp_dev)
+{
+ struct device *dev = &inp_dev->dev;
+
+ /* in kernel: dev && !is_usb_device(dev) */
+ while (dev && !(dev->type && dev->type->name &&
+ !strcmp(dev->type->name, "usb_device")))
+ dev = dev->parent;
+
+ /*
+ * Apple labels all their internal keyboards and trackpads as such,
+ * instead of maintaining an ever expanding list of product-id's we
+ * just look at the device's product name.
+ */
+ if (dev)
+ return !!strstr(to_usb_device(dev)->product, "Internal Keyboard");
+
+ return false;
+}
+
+static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct appletb_kbd *kbd;
+ struct device *dev = &hdev->dev;
+ struct hid_field *mode_field;
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID parse failed\n");
+
+ mode_field = hid_find_field(hdev, HID_OUTPUT_REPORT,
+ HID_GD_KEYBOARD, HID_USAGE_MODE);
+ if (!mode_field)
+ return -ENODEV;
+
+ kbd = devm_kzalloc(dev, sizeof(*kbd), GFP_KERNEL);
+ if (!kbd)
+ return -ENOMEM;
+
+ kbd->mode_field = mode_field;
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT);
+ if (ret)
+ return dev_err_probe(dev, ret, "HID hw start failed\n");
+
+ ret = hid_hw_open(hdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "HID hw open failed\n");
+ goto stop_hw;
+ }
+
+ kbd->backlight_dev = backlight_device_get_by_name("appletb_backlight");
+ if (!kbd->backlight_dev) {
+ dev_err_probe(dev, -ENODEV, "Failed to get backlight device\n");
+ } else {
+ backlight_device_set_brightness(kbd->backlight_dev, 2);
+ timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0);
+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000));
+ }
+
+ kbd->inp_handler.event = appletb_kbd_inp_event;
+ kbd->inp_handler.connect = appletb_kbd_inp_connect;
+ kbd->inp_handler.disconnect = appletb_kbd_inp_disconnect;
+ kbd->inp_handler.name = "appletb";
+ kbd->inp_handler.id_table = appletb_kbd_input_devices;
+ kbd->inp_handler.match = appletb_kbd_match_internal_device;
+ kbd->inp_handler.private = kbd;
+
+ ret = input_register_handler(&kbd->inp_handler);
+ if (ret) {
+ dev_err_probe(dev, ret, "Unable to register keyboard handler\n");
+ goto close_hw;
+ }
+
+ ret = appletb_kbd_set_mode(kbd, appletb_tb_def_mode);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set touchbar mode\n");
+ goto close_hw;
+ }
+
+ hid_set_drvdata(hdev, kbd);
+
+ return 0;
+
+close_hw:
+ hid_hw_close(hdev);
+stop_hw:
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void appletb_kbd_remove(struct hid_device *hdev)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF);
+
+ input_unregister_handler(&kbd->inp_handler);
+ del_timer_sync(&kbd->inactivity_timer);
+
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
+}
+
+#ifdef CONFIG_PM
+static int appletb_kbd_suspend(struct hid_device *hdev, pm_message_t msg)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ kbd->saved_mode = kbd->current_mode;
+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF);
+
+ return 0;
+}
+
+static int appletb_kbd_reset_resume(struct hid_device *hdev)
+{
+ struct appletb_kbd *kbd = hid_get_drvdata(hdev);
+
+ appletb_kbd_set_mode(kbd, kbd->saved_mode);
+
+ return 0;
+}
+#endif
+
+static const struct hid_device_id appletb_kbd_hid_ids[] = {
+ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge Display */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, appletb_kbd_hid_ids);
+
+static struct hid_driver appletb_kbd_hid_driver = {
+ .name = "hid-appletb-kbd",
+ .id_table = appletb_kbd_hid_ids,
+ .probe = appletb_kbd_probe,
+ .remove = appletb_kbd_remove,
+ .event = appletb_kbd_hid_event,
+ .input_configured = appletb_kbd_input_configured,
+#ifdef CONFIG_PM
+ .suspend = appletb_kbd_suspend,
+ .reset_resume = appletb_kbd_reset_resume,
+#endif
+ .driver.dev_groups = appletb_kbd_groups,
+};
+module_hid_driver(appletb_kbd_hid_driver);
+
+/* The backlight driver should be loaded before the keyboard driver is initialised */
+MODULE_SOFTDEP("pre: hid_appletb_bl");
+
+MODULE_AUTHOR("Ronald Tschalär");
+MODULE_AUTHOR("Kerem Karabay <kekrby@gmail.com>");
+MODULE_AUTHOR("Aditya Garg <gargaditya08@live.com>");
+MODULE_DESCRIPTION("MacBook Pro Touch Bar Keyboard Mode driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4497b50799db..4741ff626771 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -657,7 +657,11 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
+ if (item->tag >= HID_MAIN_ITEM_TAG_RESERVED_MIN &&
+ item->tag <= HID_MAIN_ITEM_TAG_RESERVED_MAX)
+ hid_warn(parser->device, "reserved main item tag 0x%x\n", item->tag);
+ else
+ hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index 56e858066c3c..afbd67aa9719 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -71,11 +71,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -120,6 +118,12 @@ enum {
CORSAIR_VOID_BATTERY_CHARGING = 5,
};
+enum {
+ CORSAIR_VOID_ADD_BATTERY = 0,
+ CORSAIR_VOID_REMOVE_BATTERY = 1,
+ CORSAIR_VOID_UPDATE_BATTERY = 2,
+};
+
static enum power_supply_property corsair_void_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -155,12 +159,12 @@ struct corsair_void_drvdata {
struct power_supply *battery;
struct power_supply_desc battery_desc;
- struct mutex battery_mutex;
struct delayed_work delayed_status_work;
struct delayed_work delayed_firmware_work;
- struct work_struct battery_remove_work;
- struct work_struct battery_add_work;
+
+ unsigned long battery_work_flags;
+ struct work_struct battery_work;
};
/*
@@ -260,11 +264,9 @@ success:
/* Inform power supply if battery values changed */
if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_changed(drvdata->battery);
- }
- }
+ set_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
}
}
@@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
}
-static void corsair_void_battery_remove_work_handler(struct work_struct *work)
-{
- struct corsair_void_drvdata *drvdata;
-
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_remove_work);
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_unregister(drvdata->battery);
- drvdata->battery = NULL;
- }
- }
-}
-
-static void corsair_void_battery_add_work_handler(struct work_struct *work)
+static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
{
- struct corsair_void_drvdata *drvdata;
struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_add_work);
- guard(mutex)(&drvdata->battery_mutex);
if (drvdata->battery)
return;
@@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
drvdata->battery = new_supply;
}
+static void corsair_void_battery_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata = container_of(work,
+ struct corsair_void_drvdata, battery_work);
+
+ bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
+ &drvdata->battery_work_flags);
+ bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
+ &drvdata->battery_work_flags);
+ bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+
+ if (add_battery && !remove_battery) {
+ corsair_void_add_battery(drvdata);
+ } else if (remove_battery && !add_battery && drvdata->battery) {
+ power_supply_unregister(drvdata->battery);
+ drvdata->battery = NULL;
+ }
+
+ if (update_battery && drvdata->battery)
+ power_supply_changed(drvdata->battery);
+
+}
+
static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_add_work);
+ set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
schedule_delayed_work(&drvdata->delayed_firmware_work,
msecs_to_jiffies(100));
}
static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_remove_work);
+ set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
corsair_void_set_unknown_wireless_data(drvdata);
corsair_void_set_unknown_batt(drvdata);
@@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
drvdata->battery_desc.get_property = corsair_void_battery_get_property;
drvdata->battery = NULL;
- INIT_WORK(&drvdata->battery_remove_work,
- corsair_void_battery_remove_work_handler);
- INIT_WORK(&drvdata->battery_add_work,
- corsair_void_battery_add_work_handler);
- ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
- if (ret)
- return ret;
+ INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
if (ret)
@@ -721,8 +725,7 @@ static void corsair_void_remove(struct hid_device *hid_dev)
struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
hid_hw_stop(hid_dev);
- cancel_work_sync(&drvdata->battery_remove_work);
- cancel_work_sync(&drvdata->battery_add_work);
+ cancel_work_sync(&drvdata->battery_work);
if (drvdata->battery)
power_supply_unregister(drvdata->battery);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 541d682af15a..8433306148d5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO_RECORD_START] = "MacroRecordStart",
[KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
[KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
- [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
+ [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger",
[KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
[KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
[KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 0f292b5d3e26..4c1ccf7a267a 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -22,7 +22,6 @@
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
-#include <linux/pm_wakeup.h>
#include <linux/unaligned.h>
#include "hid-ids.h"
@@ -268,11 +267,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
mutex_unlock(&cbas_ec_reglock);
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
{ "GOOG000B", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+#endif
#ifdef CONFIG_OF
static const struct of_device_id cbas_ec_of_match[] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7e400624908e..288a2b864cc4 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -190,6 +190,12 @@
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
+#define USB_VENDOR_ID_ASETEK 0x2433
+#define USB_DEVICE_ID_ASETEK_INVICTA 0xf300
+#define USB_DEVICE_ID_ASETEK_FORTE 0xf301
+#define USB_DEVICE_ID_ASETEK_LA_PRIMA 0xf303
+#define USB_DEVICE_ID_ASETEK_TONY_KANAAN 0xf306
+
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
@@ -262,6 +268,10 @@
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
+#define USB_VENDOR_ID_CAMMUS 0x3416
+#define USB_DEVICE_ID_CAMMUS_C5 0x0301
+#define USB_DEVICE_ID_CAMMUS_C12 0x0302
+
#define USB_VENDOR_ID_CANDO 0x2087
#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -453,6 +463,11 @@
#define USB_VENDOR_ID_EVISION 0x320f
#define USB_DEVICE_ID_EVISION_ICL01 0x5041
+#define USB_VENDOR_ID_FFBEAST 0x045b
+#define USB_DEVICE_ID_FFBEAST_JOYSTICK 0x58f9
+#define USB_DEVICE_ID_FFBEAST_RUDDER 0x5968
+#define USB_DEVICE_ID_FFBEAST_WHEEL 0x59d7
+
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
@@ -817,6 +832,13 @@
#define I2C_DEVICE_ID_LG_8001 0x8001
#define I2C_DEVICE_ID_LG_7010 0x7010
+#define USB_VENDOR_ID_LITE_STAR 0x11ff
+#define USB_DEVICE_ID_PXN_V10 0x3245
+#define USB_DEVICE_ID_PXN_V12 0x1212
+#define USB_DEVICE_ID_PXN_V12_LITE 0x1112
+#define USB_DEVICE_ID_PXN_V12_LITE_2 0x1211
+#define USB_DEVICE_LITE_STAR_GT987_FF 0x2141
+
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_Z_10_SPK 0x0a07
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
@@ -964,6 +986,18 @@
#define USB_VENDOR_ID_MONTEREY 0x0566
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
+#define USB_VENDOR_ID_MOZA 0x346e
+#define USB_DEVICE_ID_MOZA_R3 0x0005
+#define USB_DEVICE_ID_MOZA_R3_2 0x0015
+#define USB_DEVICE_ID_MOZA_R5 0x0004
+#define USB_DEVICE_ID_MOZA_R5_2 0x0014
+#define USB_DEVICE_ID_MOZA_R9 0x0002
+#define USB_DEVICE_ID_MOZA_R9_2 0x0012
+#define USB_DEVICE_ID_MOZA_R12 0x0006
+#define USB_DEVICE_ID_MOZA_R12_2 0x0016
+#define USB_DEVICE_ID_MOZA_R16_R21 0x0000
+#define USB_DEVICE_ID_MOZA_R16_R21_2 0x0010
+
#define USB_VENDOR_ID_MSI 0x1770
#define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00
@@ -1377,6 +1411,9 @@
#define USB_DEVICE_ID_VELLEMAN_K8061_FIRST 0x8061
#define USB_DEVICE_ID_VELLEMAN_K8061_LAST 0x8068
+#define USB_VENDOR_ID_VRS 0x0483
+#define USB_DEVICE_ID_VRS_DFP 0xa355
+
#define USB_VENDOR_ID_VTL 0x0306
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index a7d9ca02779e..af29ba840522 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -728,11 +728,9 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
report_key_event(input, KEY_RFKILL);
return 1;
- } else {
- platform_profile_cycle();
- return 1;
}
- return 0;
+ platform_profile_cycle();
+ return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ?
@@ -778,7 +776,7 @@ static int lenovo_raw_event(struct hid_device *hdev,
if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB
|| hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2)
&& size >= 3 && report->id == 0x03))
- return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data));
+ return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(__le32 *)data));
return 0;
}
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index 53e7b90f9cc3..f8605656257b 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -8,11 +8,13 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/usb.h>
#include <linux/wait.h>
+#include <dt-bindings/leds/common.h>
#include "hid-ids.h"
@@ -44,9 +46,13 @@ enum lg_g15_led_type {
};
struct lg_g15_led {
- struct led_classdev cdev;
+ union {
+ struct led_classdev cdev;
+ struct led_classdev_mc mcdev;
+ };
enum led_brightness brightness;
enum lg_g15_led_type led;
+ /* Used to store initial color intensities before subled_info is allocated */
u8 red, green, blue;
};
@@ -229,15 +235,15 @@ static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
struct lg_g15_led *g15_led,
enum led_brightness brightness)
{
+ struct mc_subled *subleds = g15_led->mcdev.subled_info;
int ret;
+ led_mc_calc_color_components(&g15_led->mcdev, brightness);
+
g15->transfer_buf[0] = 5 + g15_led->led;
- g15->transfer_buf[1] =
- DIV_ROUND_CLOSEST(g15_led->red * brightness, 255);
- g15->transfer_buf[2] =
- DIV_ROUND_CLOSEST(g15_led->green * brightness, 255);
- g15->transfer_buf[3] =
- DIV_ROUND_CLOSEST(g15_led->blue * brightness, 255);
+ g15->transfer_buf[1] = subleds[0].brightness;
+ g15->transfer_buf[2] = subleds[1].brightness;
+ g15->transfer_buf[3] = subleds[2].brightness;
ret = hid_hw_raw_request(g15->hdev,
LG_G510_FEATURE_BACKLIGHT_RGB + g15_led->led,
@@ -258,8 +264,9 @@ static int lg_g510_kbd_led_write(struct lg_g15_data *g15,
static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(led_cdev);
struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
+ container_of(mc, struct lg_g15_led, mcdev);
struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
int ret;
@@ -276,82 +283,20 @@ static int lg_g510_kbd_led_set(struct led_classdev *led_cdev,
static enum led_brightness lg_g510_kbd_led_get(struct led_classdev *led_cdev)
{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(led_cdev);
struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
+ container_of(mc, struct lg_g15_led, mcdev);
return g15_led->brightness;
}
-static ssize_t color_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
- struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
- unsigned long value;
- int ret;
-
- if (count < 7 || (count == 8 && buf[7] != '\n') || count > 8)
- return -EINVAL;
-
- if (buf[0] != '#')
- return -EINVAL;
-
- ret = kstrtoul(buf + 1, 16, &value);
- if (ret)
- return ret;
-
- mutex_lock(&g15->mutex);
- g15_led->red = (value & 0xff0000) >> 16;
- g15_led->green = (value & 0x00ff00) >> 8;
- g15_led->blue = (value & 0x0000ff);
- ret = lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
- mutex_unlock(&g15->mutex);
-
- return (ret < 0) ? ret : count;
-}
-
-static ssize_t color_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
- struct lg_g15_led *g15_led =
- container_of(led_cdev, struct lg_g15_led, cdev);
- struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
- ssize_t ret;
-
- mutex_lock(&g15->mutex);
- ret = sprintf(buf, "#%02x%02x%02x\n",
- g15_led->red, g15_led->green, g15_led->blue);
- mutex_unlock(&g15->mutex);
-
- return ret;
-}
-
-static DEVICE_ATTR_RW(color);
-
-static struct attribute *lg_g510_kbd_led_attrs[] = {
- &dev_attr_color.attr,
- NULL,
-};
-
-static const struct attribute_group lg_g510_kbd_led_group = {
- .attrs = lg_g510_kbd_led_attrs,
-};
-
-static const struct attribute_group *lg_g510_kbd_led_groups[] = {
- &lg_g510_kbd_led_group,
- NULL,
-};
-
static void lg_g510_leds_sync_work(struct work_struct *work)
{
struct lg_g15_data *g15 = container_of(work, struct lg_g15_data, work);
+ struct lg_g15_led *g15_led = &g15->leds[LG_G15_KBD_BRIGHTNESS];
mutex_lock(&g15->mutex);
- lg_g510_kbd_led_write(g15, &g15->leds[LG_G15_KBD_BRIGHTNESS],
- g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ lg_g510_kbd_led_write(g15, g15_led, g15_led->brightness);
mutex_unlock(&g15->mutex);
}
@@ -667,8 +612,46 @@ static void lg_g15_input_close(struct input_dev *dev)
hid_hw_close(hdev);
}
+static void lg_g15_setup_led_rgb(struct lg_g15_data *g15, int index)
+{
+ int i;
+ struct mc_subled *subled_info;
+
+ g15->leds[index].mcdev.led_cdev.brightness_set_blocking =
+ lg_g510_kbd_led_set;
+ g15->leds[index].mcdev.led_cdev.brightness_get =
+ lg_g510_kbd_led_get;
+ g15->leds[index].mcdev.led_cdev.max_brightness = 255;
+ g15->leds[index].mcdev.num_colors = 3;
+
+ subled_info = devm_kcalloc(&g15->hdev->dev, 3, sizeof(*subled_info), GFP_KERNEL);
+ if (!subled_info)
+ return;
+
+ for (i = 0; i < 3; i++) {
+ switch (i + 1) {
+ case LED_COLOR_ID_RED:
+ subled_info[i].color_index = LED_COLOR_ID_RED;
+ subled_info[i].intensity = g15->leds[index].red;
+ break;
+ case LED_COLOR_ID_GREEN:
+ subled_info[i].color_index = LED_COLOR_ID_GREEN;
+ subled_info[i].intensity = g15->leds[index].green;
+ break;
+ case LED_COLOR_ID_BLUE:
+ subled_info[i].color_index = LED_COLOR_ID_BLUE;
+ subled_info[i].intensity = g15->leds[index].blue;
+ break;
+ }
+ subled_info[i].channel = i;
+ }
+ g15->leds[index].mcdev.subled_info = subled_info;
+}
+
static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
{
+ int ret;
+
g15->leds[i].led = i;
g15->leds[i].cdev.name = name;
@@ -685,6 +668,7 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
} else {
g15->leds[i].cdev.max_brightness = 1;
}
+ ret = devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
break;
case LG_G510:
case LG_G510_USB_AUDIO:
@@ -697,12 +681,11 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
g15->leds[i].cdev.name = "g15::power_on_backlight_val";
fallthrough;
case LG_G15_KBD_BRIGHTNESS:
- g15->leds[i].cdev.brightness_set_blocking =
- lg_g510_kbd_led_set;
- g15->leds[i].cdev.brightness_get =
- lg_g510_kbd_led_get;
- g15->leds[i].cdev.max_brightness = 255;
- g15->leds[i].cdev.groups = lg_g510_kbd_led_groups;
+ /* register multicolor LED */
+ lg_g15_setup_led_rgb(g15, i);
+ ret = devm_led_classdev_multicolor_register_ext(&g15->hdev->dev,
+ &g15->leds[i].mcdev,
+ NULL);
break;
default:
g15->leds[i].cdev.brightness_set_blocking =
@@ -710,11 +693,12 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
g15->leds[i].cdev.brightness_get =
lg_g510_mkey_led_get;
g15->leds[i].cdev.max_brightness = 1;
+ ret = devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
}
break;
}
- return devm_led_classdev_register(&g15->hdev->dev, &g15->leds[i].cdev);
+ return ret;
}
/* Common input device init code shared between keyboards and Z-10 speaker handling */
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 11ac246176ae..839d5bcd72b1 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -457,13 +457,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
};
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_A, JC_BTN_A, },
- { BTN_B, JC_BTN_B, },
- { BTN_C, JC_BTN_R, },
- { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
- { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
- { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
- { BTN_SELECT, JC_BTN_ZR, },
+ { BTN_WEST, JC_BTN_A, }, /* A */
+ { BTN_SOUTH, JC_BTN_B, }, /* B */
+ { BTN_EAST, JC_BTN_R, }, /* C */
+ { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */
+ { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */
+ { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */
+ { BTN_SELECT, JC_BTN_ZR, }, /* Mode */
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index 25cfd964dc25..acb9eb18f7cc 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -6,9 +6,6 @@
* Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
*/
-/*
- */
-
#include "hid-ids.h"
#include <linux/hid.h>
@@ -23,30 +20,28 @@
#define PLT_VOL_UP 0x00b1
#define PLT_VOL_DOWN 0x00b2
+#define PLT_MIC_MUTE 0x00b5
#define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP)
#define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN)
+#define PLT1_MIC_MUTE (PLT_HID_1_0_PAGE | PLT_MIC_MUTE)
#define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP)
#define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN)
+#define PLT2_MIC_MUTE (PLT_HID_2_0_PAGE | PLT_MIC_MUTE)
+#define HID_TELEPHONY_MUTE (HID_UP_TELEPHONY | 0x2f)
+#define HID_CONSUMER_MUTE (HID_UP_CONSUMER | 0xe2)
#define PLT_DA60 0xda60
#define PLT_BT300_MIN 0x0413
#define PLT_BT300_MAX 0x0418
-
-#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
- (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
-
-#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
-#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1)
-
#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
-#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */
struct plt_drv_data {
unsigned long device_type;
- unsigned long last_volume_key_ts;
- u32 quirks;
+ unsigned long last_key_ts;
+ unsigned long double_key_to;
+ __u16 last_key;
};
static int plantronics_input_mapping(struct hid_device *hdev,
@@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned short mapped_key;
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
unsigned long plt_type = drv_data->device_type;
+ int allow_mute = usage->hid == HID_TELEPHONY_MUTE;
+ int allow_consumer = field->application == HID_CP_CONSUMERCONTROL &&
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER &&
+ usage->hid != HID_CONSUMER_MUTE;
/* special case for PTT products */
if (field->application == HID_GD_JOYSTICK)
goto defaulted;
- /* handle volume up/down mapping */
/* non-standard types or multi-HID interfaces - plt_type is PID */
if (!(plt_type & HID_USAGE_PAGE)) {
switch (plt_type) {
case PLT_DA60:
- if (PLT_ALLOW_CONSUMER)
+ if (allow_consumer)
goto defaulted;
- goto ignored;
+ if (usage->hid == HID_CONSUMER_MUTE) {
+ mapped_key = KEY_MICMUTE;
+ goto mapped;
+ }
+ break;
default:
- if (PLT_ALLOW_CONSUMER)
+ if (allow_consumer || allow_mute)
goto defaulted;
}
+ goto ignored;
}
- /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
- /* 'basic telephony compliant' - allow default consumer page map */
- else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
- (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) {
- if (PLT_ALLOW_CONSUMER)
- goto defaulted;
- }
- /* not 'basic telephony' - apply legacy mapping */
- /* only map if the field is in the device's primary vendor page */
- else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) {
+
+ /* handle standard consumer control mapping */
+ /* and standard telephony mic mute mapping */
+ if (allow_consumer || allow_mute)
+ goto defaulted;
+
+ /* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */
+ /* if not 'basic telephony compliant' - map vendor unique controls */
+ if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY &&
+ (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) &&
+ !((field->application ^ plt_type) & HID_USAGE_PAGE))
switch (usage->hid) {
case PLT1_VOL_UP:
case PLT2_VOL_UP:
@@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev,
case PLT2_VOL_DOWN:
mapped_key = KEY_VOLUMEDOWN;
goto mapped;
+ case PLT1_MIC_MUTE:
+ case PLT2_MIC_MUTE:
+ mapped_key = KEY_MICMUTE;
+ goto mapped;
}
- }
/*
* Future mapping of call control or other usages,
@@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
*/
ignored:
+ hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
+ usage->hid, field->application);
return -1;
defaulted:
@@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
+ unsigned long prev_tsto, cur_ts;
+ __u16 prev_key, cur_key;
- if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
- unsigned long prev_ts, cur_ts;
+ /* Usages are filtered in plantronics_usages. */
- /* Usages are filtered in plantronics_usages. */
+ /* HZ too low for ms resolution - double key detection disabled */
+ /* or it is a key release - handle key presses only. */
+ if (!drv_data->double_key_to || !value)
+ return 0;
- if (!value) /* Handle key presses only. */
- return 0;
+ prev_tsto = drv_data->last_key_ts + drv_data->double_key_to;
+ cur_ts = drv_data->last_key_ts = jiffies;
+ prev_key = drv_data->last_key;
+ cur_key = drv_data->last_key = usage->code;
- prev_ts = drv_data->last_volume_key_ts;
- cur_ts = jiffies;
- if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
- return 1; /* Ignore the repeated key. */
-
- drv_data->last_volume_key_ts = cur_ts;
+ /* If the same key occurs in <= double_key_to -- ignore it */
+ if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) {
+ hid_dbg(hdev, "double key %d ignored\n", cur_key);
+ return 1; /* Ignore the repeated key. */
}
- if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) {
- unsigned long prev_ts, cur_ts;
-
- /* Usages are filtered in plantronics_usages. */
-
- if (!value) /* Handle key presses only. */
- return 0;
-
- prev_ts = drv_data->last_volume_key_ts;
- cur_ts = jiffies;
- if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT)
- return 1; /* Ignore the followed opposite volume key. */
-
- drv_data->last_volume_key_ts = cur_ts;
- }
-
return 0;
}
@@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err;
+ return ret;
}
drv_data->device_type = plantronics_device_type(hdev);
- drv_data->quirks = id->driver_data;
- drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+ drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
+ drv_data->last_key_ts = jiffies - drv_data->double_key_to;
+
+ /* if HZ does not allow ms resolution - disable double key detection */
+ if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT)
+ drv_data->double_key_to = 0;
hid_set_drvdata(hdev, drv_data);
@@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev,
if (ret)
hid_err(hdev, "hw start failed\n");
-err:
return ret;
}
static const struct hid_device_id plantronics_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES),
- .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES),
- .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
- USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES),
- .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
{ }
};
@@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices);
static const struct hid_usage_id plantronics_usages[] = {
{ HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
{ HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
+ { HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID },
+ { HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID },
+ { PLT2_VOL_UP, EV_KEY, HID_ANY_ID },
+ { PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID },
+ { PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID },
+ { PLT1_VOL_UP, EV_KEY, HID_ANY_ID },
+ { PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID },
+ { PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID },
{ HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
};
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5d7a418ccdbe..646171598e41 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -328,8 +328,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
@@ -338,6 +336,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
#endif
+#if IS_ENABLED(CONFIG_HID_APPLETB_BL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLETB_KBD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
+#endif
#if IS_ENABLED(CONFIG_HID_ASUS)
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
@@ -595,6 +599,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
#endif
+#if IS_ENABLED(CONFIG_HID_PLAYSTATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) },
+#endif
#if IS_ENABLED(CONFIG_HID_PRIMAX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
#endif
@@ -664,11 +679,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index c9e65e9088b3..dfd9d22ed559 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -559,15 +559,13 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
if (steam->gamepad_mode)
enable = false;
+ mutex_lock(&steam->report_mutex);
if (enable) {
- mutex_lock(&steam->report_mutex);
/* enable esc, enter, cursors */
steam_send_report_byte(steam, ID_SET_DEFAULT_DIGITAL_MAPPINGS);
/* reset settings */
steam_send_report_byte(steam, ID_LOAD_DEFAULT_SETTINGS);
- mutex_unlock(&steam->report_mutex);
} else {
- mutex_lock(&steam->report_mutex);
/* disable esc, enter, cursor */
steam_send_report_byte(steam, ID_CLEAR_DIGITAL_MAPPINGS);
@@ -579,15 +577,14 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
SETTING_RIGHT_TRACKPAD_CLICK_PRESSURE, 0xFFFF, /* disable haptic click */
SETTING_STEAM_WATCHDOG_ENABLE, 0, /* disable watchdog that tests if Steam is active */
0);
- mutex_unlock(&steam->report_mutex);
} else {
steam_write_settings(steam,
SETTING_LEFT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
SETTING_RIGHT_TRACKPAD_MODE, TRACKPAD_NONE, /* disable mouse */
0);
- mutex_unlock(&steam->report_mutex);
}
}
+ mutex_unlock(&steam->report_mutex);
}
static int steam_input_open(struct input_dev *dev)
@@ -1327,11 +1324,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
+ hid_destroy_device(steam->client_hdev);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
cancel_work_sync(&steam->rumble_work);
cancel_work_sync(&steam->unregister_work);
- hid_destroy_device(steam->client_hdev);
steam->client_hdev = NULL;
steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
new file mode 100644
index 000000000000..001a0f5efb9d
--- /dev/null
+++ b/drivers/hid/hid-universal-pidff.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID UNIVERSAL PIDFF
+ * hid-pidff wrapper for PID-enabled devices
+ * Handles device reports, quirks and extends usable button range
+ *
+ * Copyright (c) 2024, 2025 Oleg Makarenko
+ * Copyright (c) 2024, 2025 Tomasz Pakuła
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/input-event-codes.h>
+#include "hid-ids.h"
+#include "usbhid/hid-pidff.h"
+
+#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
+
+/*
+ * Map buttons manually to extend the default joystick button limit
+ */
+static int universal_pidff_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
+ return 0;
+
+ if (field->application != HID_GD_JOYSTICK)
+ return 0;
+
+ int button = ((usage->hid - 1) & HID_USAGE);
+ int code = button + BTN_JOYSTICK;
+
+ /* Detect the end of JOYSTICK buttons range */
+ if (code > BTN_DEAD)
+ code = button + KEY_NEXT_FAVORITE - JOY_RANGE;
+
+ /*
+ * Map overflowing buttons to KEY_RESERVED to not ignore
+ * them and let them still trigger MSC_SCAN
+ */
+ if (code > KEY_MAX)
+ code = KEY_RESERVED;
+
+ hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ hid_dbg(hdev, "Button %d: usage %d", button, code);
+ return 1;
+}
+
+/*
+ * Check if the device is PID and initialize it
+ * Add quirks after initialisation
+ */
+static int universal_pidff_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int i, error;
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "HID parse failed\n");
+ goto err;
+ }
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hdev, "HID hw start failed\n");
+ goto err;
+ }
+
+ /* Check if device contains PID usage page */
+ error = 1;
+ for (i = 0; i < hdev->collection_size; i++)
+ if ((hdev->collection[i].usage & HID_USAGE_PAGE) == HID_UP_PID) {
+ error = 0;
+ hid_dbg(hdev, "PID usage page found\n");
+ break;
+ }
+
+ /*
+ * Do not fail as this might be the second "device"
+ * just for additional buttons/axes. Exit cleanly if force
+ * feedback usage page wasn't found (included devices were
+ * tested and confirmed to be USB PID after all).
+ */
+ if (error) {
+ hid_dbg(hdev, "PID usage page not found in the descriptor\n");
+ return 0;
+ }
+
+ /* Check if HID_PID support is enabled */
+ int (*init_function)(struct hid_device *, u32);
+ init_function = hid_pidff_init_with_quirks;
+
+ if (!init_function) {
+ hid_warn(hdev, "HID_PID support not enabled!\n");
+ return 0;
+ }
+
+ error = init_function(hdev, id->driver_data);
+ if (error) {
+ hid_warn(hdev, "Error initialising force feedback\n");
+ goto err;
+ }
+
+ hid_info(hdev, "Universal pidff driver loaded successfully!");
+
+ return 0;
+err:
+ return error;
+}
+
+static int universal_pidff_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ int axis;
+ struct input_dev *input = hidinput->input;
+
+ if (!input->absinfo)
+ return 0;
+
+ /* Decrease fuzz and deadzone on available axes */
+ for (axis = ABS_X; axis <= ABS_BRAKE; axis++) {
+ if (!test_bit(axis, input->absbit))
+ continue;
+
+ input_set_abs_params(input, axis,
+ input->absinfo[axis].minimum,
+ input->absinfo[axis].maximum,
+ axis == ABS_X ? 0 : 8, 0);
+ }
+
+ /* Remove fuzz and deadzone from the second joystick axis */
+ if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
+ hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
+ input_set_abs_params(input, ABS_Y,
+ input->absinfo[ABS_Y].minimum,
+ input->absinfo[ABS_Y].maximum, 0, 0);
+
+ return 0;
+}
+
+static const struct hid_device_id universal_pidff_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
+ .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
+ .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_LITE_STAR_GT987_FF),
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
+
+static struct hid_driver universal_pidff = {
+ .name = "hid-universal-pidff",
+ .id_table = universal_pidff_devices,
+ .input_mapping = universal_pidff_input_mapping,
+ .probe = universal_pidff_probe,
+ .input_configured = universal_pidff_input_configured
+};
+module_hid_driver(universal_pidff);
+
+MODULE_DESCRIPTION("Universal driver for USB PID Force Feedback devices");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oleg Makarenko <oleg@makarenk.ooo>");
+MODULE_AUTHOR("Tomasz Pakuła <tomasz.pakula.oficjalny@gmail.com>");
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 75544448c239..d3912e3f2f13 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -290,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
dev_err(&ihid->client->dev,
- "failed to set a report to device: %d\n", error);
+ "failed to get a report from device: %d\n", error);
return error;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index cb04cd1d980b..6550ad5bfbb5 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 00c6f0ebf356..be2c62fc8251 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -261,12 +261,14 @@ err_hid_data:
*/
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{
+ void *data;
int i;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) {
- kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ kfree(data);
client_data->hid_sensor_hubs[i] = NULL;
}
}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 2de93f4a25ca..fa51155ebe39 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -557,20 +557,19 @@ static int quicki2c_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ mem_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ ret = PTR_ERR_OR_ZERO(mem_addr);
if (ret) {
dev_err_once(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
goto disable_pci_device;
}
- mem_addr = pcim_iomap_table(pdev)[0];
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err_once(&pdev->dev, "No usable DMA configuration %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
}
@@ -578,7 +577,7 @@ static int quicki2c_probe(struct pci_dev *pdev,
if (ret < 0) {
dev_err_once(&pdev->dev,
"Failed to allocate IRQ vectors. ret = %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pdev->irq = pci_irq_vector(pdev, 0);
@@ -587,7 +586,7 @@ static int quicki2c_probe(struct pci_dev *pdev,
if (IS_ERR(qcdev)) {
dev_err_once(&pdev->dev, "QuickI2C device init failed\n");
ret = PTR_ERR(qcdev);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pci_set_drvdata(pdev, qcdev);
@@ -666,8 +665,6 @@ dma_deinit:
quicki2c_dma_deinit(qcdev);
dev_deinit:
quicki2c_dev_deinit(qcdev);
-unmap_io_region:
- pcim_iounmap_regions(pdev, BIT(0));
disable_pci_device:
pci_clear_master(pdev);
@@ -697,7 +694,6 @@ static void quicki2c_remove(struct pci_dev *pdev)
quicki2c_dev_deinit(qcdev);
- pcim_iounmap_regions(pdev, BIT(0));
pci_clear_master(pdev);
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 4641e818dfa4..d4f89f44c3b4 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -426,7 +426,7 @@ static struct quickspi_device *quickspi_dev_init(struct pci_dev *pdev, void __io
thc_interrupt_enable(qsdev->thc_hw, true);
- qsdev->state = QUICKSPI_INITED;
+ qsdev->state = QUICKSPI_INITIATED;
return qsdev;
}
@@ -575,20 +575,19 @@ static int quickspi_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ mem_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
+ ret = PTR_ERR_OR_ZERO(mem_addr);
if (ret) {
dev_err(&pdev->dev, "Failed to get PCI regions, ret = %d.\n", ret);
goto disable_pci_device;
}
- mem_addr = pcim_iomap_table(pdev)[0];
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "No usable DMA configuration %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
}
@@ -596,7 +595,7 @@ static int quickspi_probe(struct pci_dev *pdev,
if (ret < 0) {
dev_err(&pdev->dev,
"Failed to allocate IRQ vectors. ret = %d\n", ret);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pdev->irq = pci_irq_vector(pdev, 0);
@@ -605,7 +604,7 @@ static int quickspi_probe(struct pci_dev *pdev,
if (IS_ERR(qsdev)) {
dev_err(&pdev->dev, "QuickSPI device init failed\n");
ret = PTR_ERR(qsdev);
- goto unmap_io_region;
+ goto disable_pci_device;
}
pci_set_drvdata(pdev, qsdev);
@@ -668,8 +667,6 @@ dma_deinit:
quickspi_dma_deinit(qsdev);
dev_deinit:
quickspi_dev_deinit(qsdev);
-unmap_io_region:
- pcim_iounmap_regions(pdev, BIT(0));
disable_pci_device:
pci_clear_master(pdev);
@@ -699,7 +696,6 @@ static void quickspi_remove(struct pci_dev *pdev)
quickspi_dev_deinit(qsdev);
- pcim_iounmap_regions(pdev, BIT(0));
pci_clear_master(pdev);
}
@@ -909,6 +905,8 @@ static int quickspi_restore(struct device *device)
thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+ qsdev->state = QUICKSPI_ENABLED;
+
return 0;
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 75179bb26767..6fdf674b21c5 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -57,9 +57,9 @@
enum quickspi_dev_state {
QUICKSPI_NONE,
+ QUICKSPI_INITIATED,
QUICKSPI_RESETING,
- QUICKSPI_RESETED,
- QUICKSPI_INITED,
+ QUICKSPI_RESET,
QUICKSPI_ENABLED,
QUICKSPI_DISABLED,
};
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
index 7373238ceb18..e6ba2ddcc9cb 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -107,7 +107,7 @@ static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
return 0;
}
- dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
+ dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int reset_tic(struct quickspi_device *qsdev)
return -EINVAL;
}
- qsdev->state = QUICKSPI_RESETED;
+ qsdev->state = QUICKSPI_RESET;
ret = quickspi_get_device_descriptor(qsdev);
if (ret)
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
index eb23bea77686..8f97e71df7f4 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dma.c
@@ -295,7 +295,7 @@ static void release_dma_buffers(struct thc_device *dev,
return;
for (i = 0; i < config->prd_tbl_num; i++) {
- if (!config->sgls[i] | !config->sgls_nent[i])
+ if (!config->sgls[i] || !config->sgls_nent[i])
continue;
dma_unmap_sg(dev->dev, config->sgls[i],
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index a6eb6fe6130d..44c2351b870f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -35,6 +35,7 @@
#include <linux/hid-debug.h>
#include <linux/hidraw.h>
#include "usbhid.h"
+#include "hid-pidff.h"
/*
* Version Information
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 3b4ee21cd811..8dfd2c554a27 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -3,27 +3,27 @@
* Force feedback driver for USB HID PID compliant devices
*
* Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com>
+ * Upgraded 2025 by Oleg Makarenko and Tomasz Pakuła
*/
-/*
- */
-
-/* #define DEBUG */
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "hid-pidff.h"
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/usb.h>
-
#include <linux/hid.h>
+#include <linux/minmax.h>
-#include "usbhid.h"
#define PID_EFFECTS_MAX 64
+#define PID_INFINITE U16_MAX
-/* Report usage table used to put reports into an array */
+/* Linux Force Feedback API uses miliseconds as time unit */
+#define FF_TIME_EXPONENT -3
+#define FF_INFINITE 0
+/* Report usage table used to put reports into an array */
#define PID_SET_EFFECT 0
#define PID_EFFECT_OPERATION 1
#define PID_DEVICE_GAIN 2
@@ -44,12 +44,19 @@ static const u8 pidff_reports[] = {
0x21, 0x77, 0x7d, 0x7f, 0x89, 0x90, 0x96, 0xab,
0x5a, 0x5f, 0x6e, 0x73, 0x74
};
+/*
+ * device_control is really 0x95, but 0x96 specified
+ * as it is the usage of the only field in that report.
+ */
-/* device_control is really 0x95, but 0x96 specified as it is the usage of
-the only field in that report */
+/* PID special fields */
+#define PID_EFFECT_TYPE 0x25
+#define PID_DIRECTION 0x57
+#define PID_EFFECT_OPERATION_ARRAY 0x78
+#define PID_BLOCK_LOAD_STATUS 0x8b
+#define PID_DEVICE_CONTROL_ARRAY 0x96
/* Value usage tables used to put fields and values into arrays */
-
#define PID_EFFECT_BLOCK_INDEX 0
#define PID_DURATION 1
@@ -107,10 +114,13 @@ static const u8 pidff_device_gain[] = { 0x7e };
static const u8 pidff_pool[] = { 0x80, 0x83, 0xa9 };
/* Special field key tables used to put special field keys into arrays */
-
#define PID_ENABLE_ACTUATORS 0
-#define PID_RESET 1
-static const u8 pidff_device_control[] = { 0x97, 0x9a };
+#define PID_DISABLE_ACTUATORS 1
+#define PID_STOP_ALL_EFFECTS 2
+#define PID_RESET 3
+#define PID_PAUSE 4
+#define PID_CONTINUE 5
+static const u8 pidff_device_control[] = { 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c };
#define PID_CONSTANT 0
#define PID_RAMP 1
@@ -130,12 +140,16 @@ static const u8 pidff_effect_types[] = {
#define PID_BLOCK_LOAD_SUCCESS 0
#define PID_BLOCK_LOAD_FULL 1
-static const u8 pidff_block_load_status[] = { 0x8c, 0x8d };
+#define PID_BLOCK_LOAD_ERROR 2
+static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e};
#define PID_EFFECT_START 0
#define PID_EFFECT_STOP 1
static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
+/* Polar direction 90 degrees (East) */
+#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000
+
struct pidff_usage {
struct hid_field *field;
s32 *value;
@@ -159,8 +173,10 @@ struct pidff_device {
struct pidff_usage effect_operation[sizeof(pidff_effect_operation)];
struct pidff_usage block_free[sizeof(pidff_block_free)];
- /* Special field is a field that is not composed of
- usage<->value pairs that pidff_usage values are */
+ /*
+ * Special field is a field that is not composed of
+ * usage<->value pairs that pidff_usage values are
+ */
/* Special field in create_new_effect */
struct hid_field *create_new_effect_type;
@@ -184,30 +200,61 @@ struct pidff_device {
int operation_id[sizeof(pidff_effect_operation_status)];
int pid_id[PID_EFFECTS_MAX];
+
+ u32 quirks;
+ u8 effect_count;
};
/*
+ * Clamp value for a given field
+ */
+static s32 pidff_clamp(s32 i, struct hid_field *field)
+{
+ s32 clamped = clamp(i, field->logical_minimum, field->logical_maximum);
+ pr_debug("clamped from %d to %d", i, clamped);
+ return clamped;
+}
+
+/*
* Scale an unsigned value with range 0..max for the given field
*/
static int pidff_rescale(int i, int max, struct hid_field *field)
{
return i * (field->logical_maximum - field->logical_minimum) / max +
- field->logical_minimum;
+ field->logical_minimum;
}
/*
- * Scale a signed value in range -0x8000..0x7fff for the given field
+ * Scale a signed value in range S16_MIN..S16_MAX for the given field
*/
static int pidff_rescale_signed(int i, struct hid_field *field)
{
- return i == 0 ? 0 : i >
- 0 ? i * field->logical_maximum / 0x7fff : i *
- field->logical_minimum / -0x8000;
+ if (i > 0) return i * field->logical_maximum / S16_MAX;
+ if (i < 0) return i * field->logical_minimum / S16_MIN;
+ return 0;
+}
+
+/*
+ * Scale time value from Linux default (ms) to field units
+ */
+static u32 pidff_rescale_time(u16 time, struct hid_field *field)
+{
+ u32 scaled_time = time;
+ int exponent = field->unit_exponent;
+ pr_debug("time field exponent: %d\n", exponent);
+
+ for (;exponent < FF_TIME_EXPONENT; exponent++)
+ scaled_time *= 10;
+ for (;exponent > FF_TIME_EXPONENT; exponent--)
+ scaled_time /= 10;
+
+ pr_debug("time calculated from %d to %d\n", time, scaled_time);
+ return scaled_time;
}
static void pidff_set(struct pidff_usage *usage, u16 value)
{
- usage->value[0] = pidff_rescale(value, 0xffff, usage->field);
+ usage->value[0] = pidff_rescale(value, U16_MAX, usage->field);
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
@@ -218,14 +265,35 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
else {
if (value < 0)
usage->value[0] =
- pidff_rescale(-value, 0x8000, usage->field);
+ pidff_rescale(-value, -S16_MIN, usage->field);
else
usage->value[0] =
- pidff_rescale(value, 0x7fff, usage->field);
+ pidff_rescale(value, S16_MAX, usage->field);
}
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
+static void pidff_set_time(struct pidff_usage *usage, u16 time)
+{
+ u32 modified_time = pidff_rescale_time(time, usage->field);
+ usage->value[0] = pidff_clamp(modified_time, usage->field);
+}
+
+static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
+{
+ /* Infinite value conversion from Linux API -> PID */
+ if (duration == FF_INFINITE)
+ duration = PID_INFINITE;
+
+ /* PID defines INFINITE as the max possible value for duration field */
+ if (duration == PID_INFINITE) {
+ usage->value[0] = (1U << usage->field->report_size) - 1;
+ return;
+ }
+
+ pidff_set_time(usage, duration);
+}
+
/*
* Send envelope report to the device
*/
@@ -233,19 +301,21 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
struct ff_envelope *envelope)
{
pidff->set_envelope[PID_EFFECT_BLOCK_INDEX].value[0] =
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_envelope[PID_ATTACK_LEVEL].value[0] =
- pidff_rescale(envelope->attack_level >
- 0x7fff ? 0x7fff : envelope->attack_level, 0x7fff,
- pidff->set_envelope[PID_ATTACK_LEVEL].field);
+ pidff_rescale(envelope->attack_level >
+ S16_MAX ? S16_MAX : envelope->attack_level, S16_MAX,
+ pidff->set_envelope[PID_ATTACK_LEVEL].field);
pidff->set_envelope[PID_FADE_LEVEL].value[0] =
- pidff_rescale(envelope->fade_level >
- 0x7fff ? 0x7fff : envelope->fade_level, 0x7fff,
- pidff->set_envelope[PID_FADE_LEVEL].field);
+ pidff_rescale(envelope->fade_level >
+ S16_MAX ? S16_MAX : envelope->fade_level, S16_MAX,
+ pidff->set_envelope[PID_FADE_LEVEL].field);
- pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length;
- pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length;
+ pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME],
+ envelope->attack_length);
+ pidff_set_time(&pidff->set_envelope[PID_FADE_TIME],
+ envelope->fade_length);
hid_dbg(pidff->hid, "attack %u => %d\n",
envelope->attack_level,
@@ -261,10 +331,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
- return envelope->attack_level != old->attack_level ||
- envelope->fade_level != old->fade_level ||
+ bool needs_new_envelope;
+ needs_new_envelope = envelope->attack_level != 0 ||
+ envelope->fade_level != 0 ||
+ envelope->attack_length != 0 ||
+ envelope->fade_length != 0;
+
+ if (!needs_new_envelope)
+ return false;
+
+ if (!old)
+ return needs_new_envelope;
+
+ return envelope->attack_level != old->attack_level ||
+ envelope->fade_level != old->fade_level ||
envelope->attack_length != old->attack_length ||
- envelope->fade_length != old->fade_length;
+ envelope->fade_length != old->fade_length;
}
/*
@@ -301,17 +383,27 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
pidff->set_effect_type->value[0] =
pidff->create_new_effect_type->value[0];
- pidff->set_effect[PID_DURATION].value[0] = effect->replay.length;
+
+ pidff_set_duration(&pidff->set_effect[PID_DURATION],
+ effect->replay.length);
+
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
- pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] =
- effect->trigger.interval;
+ pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT],
+ effect->trigger.interval);
pidff->set_effect[PID_GAIN].value[0] =
pidff->set_effect[PID_GAIN].field->logical_maximum;
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- pidff->effect_direction->value[0] =
- pidff_rescale(effect->direction, 0xffff,
- pidff->effect_direction);
- pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay;
+
+ /* Use fixed direction if needed */
+ pidff->effect_direction->value[0] = pidff_rescale(
+ pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
+ PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
+ U16_MAX, pidff->effect_direction);
+
+ /* Omit setting delay field if it's missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
+ pidff_set_time(&pidff->set_effect[PID_START_DELAY],
+ effect->replay.delay);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -343,11 +435,11 @@ static void pidff_set_periodic_report(struct pidff_device *pidff,
pidff_set_signed(&pidff->set_periodic[PID_OFFSET],
effect->u.periodic.offset);
pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase);
- pidff->set_periodic[PID_PERIOD].value[0] = effect->u.periodic.period;
+ pidff_set_time(&pidff->set_periodic[PID_PERIOD],
+ effect->u.periodic.period);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC],
HID_REQ_SET_REPORT);
-
}
/*
@@ -368,13 +460,19 @@ static int pidff_needs_set_periodic(struct ff_effect *effect,
static void pidff_set_condition_report(struct pidff_device *pidff,
struct ff_effect *effect)
{
- int i;
+ int i, max_axis;
+
+ /* Devices missing Parameter Block Offset can only have one axis */
+ max_axis = pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO ? 1 : 2;
pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
- for (i = 0; i < 2; i++) {
- pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
+ for (i = 0; i < max_axis; i++) {
+ /* Omit Parameter Block Offset if missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_PBO))
+ pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i;
+
pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET],
effect->u.condition[i].center);
pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT],
@@ -442,8 +540,103 @@ static int pidff_needs_set_ramp(struct ff_effect *effect, struct ff_effect *old)
}
/*
+ * Set device gain
+ */
+static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
+{
+ if (!pidff->device_gain[PID_DEVICE_GAIN_FIELD].field)
+ return;
+
+ pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
+ hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
+ HID_REQ_SET_REPORT);
+}
+
+/*
+ * Send device control report to the device
+ */
+static void pidff_set_device_control(struct pidff_device *pidff, int field)
+{
+ int i, index;
+ int field_index = pidff->control_id[field];
+
+ if (field_index < 1)
+ return;
+
+ /* Detect if the field is a bitmask variable or an array */
+ if (pidff->device_control->flags & HID_MAIN_ITEM_VARIABLE) {
+ hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
+
+ /* Clear current bitmask */
+ for(i = 0; i < sizeof(pidff_device_control); i++) {
+ index = pidff->control_id[i];
+ if (index < 1)
+ continue;
+
+ pidff->device_control->value[index - 1] = 0;
+ }
+
+ pidff->device_control->value[field_index - 1] = 1;
+ } else {
+ hid_dbg(pidff->hid, "DEVICE_CONTROL is an array\n");
+ pidff->device_control->value[0] = field_index;
+ }
+
+ hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
+ hid_hw_wait(pidff->hid);
+}
+
+/*
+ * Modify actuators state
+ */
+static void pidff_set_actuators(struct pidff_device *pidff, bool enable)
+{
+ hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable");
+ pidff_set_device_control(pidff,
+ enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS);
+}
+
+/*
+ * Reset the device, stop all effects, enable actuators
+ */
+static void pidff_reset(struct pidff_device *pidff)
+{
+ /* We reset twice as sometimes hid_wait_io isn't waiting long enough */
+ pidff_set_device_control(pidff, PID_RESET);
+ pidff_set_device_control(pidff, PID_RESET);
+ pidff->effect_count = 0;
+
+ pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS);
+ pidff_set_actuators(pidff, 1);
+}
+
+/*
+ * Fetch pool report
+ */
+static void pidff_fetch_pool(struct pidff_device *pidff)
+{
+ int i;
+ struct hid_device *hid = pidff->hid;
+
+ /* Repeat if PID_SIMULTANEOUS_MAX < 2 to make sure it's correct */
+ for(i = 0; i < 20; i++) {
+ hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+
+ if (!pidff->pool[PID_SIMULTANEOUS_MAX].value)
+ return;
+ if (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] >= 2)
+ return;
+ }
+ hid_warn(hid, "device reports %d simultaneous effects\n",
+ pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
+}
+
+/*
* Send a request for effect upload to the device
*
+ * Reset and enable actuators if no effects were present on the device
+ *
* Returns 0 if device reported success, -ENOSPC if the device reported memory
* is full. Upon unknown response the function will retry for 60 times, if
* still unsuccessful -EIO is returned.
@@ -452,6 +645,9 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
{
int j;
+ if (!pidff->effect_count)
+ pidff_reset(pidff);
+
pidff->create_new_effect_type->value[0] = efnum;
hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
HID_REQ_SET_REPORT);
@@ -471,6 +667,8 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
+
+ pidff->effect_count++;
return 0;
}
if (pidff->block_load_status->value[0] ==
@@ -480,6 +678,11 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
return -ENOSPC;
}
+ if (pidff->block_load_status->value[0] ==
+ pidff->status_id[PID_BLOCK_LOAD_ERROR]) {
+ hid_dbg(pidff->hid, "device error during effect creation\n");
+ return -EREMOTEIO;
+ }
}
hid_err(pidff->hid, "pid_block_load failed 60 times\n");
return -EIO;
@@ -498,7 +701,8 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
} else {
pidff->effect_operation_status->value[0] =
pidff->operation_id[PID_EFFECT_START];
- pidff->effect_operation[PID_LOOP_COUNT].value[0] = n;
+ pidff->effect_operation[PID_LOOP_COUNT].value[0] =
+ pidff_clamp(n, pidff->effect_operation[PID_LOOP_COUNT].field);
}
hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION],
@@ -511,20 +715,22 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
static int pidff_playback(struct input_dev *dev, int effect_id, int value)
{
struct pidff_device *pidff = dev->ff->private;
-
pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
-
return 0;
}
/*
* Erase effect with PID id
+ * Decrease the device effect counter
*/
static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
{
pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE],
HID_REQ_SET_REPORT);
+
+ if (pidff->effect_count > 0)
+ pidff->effect_count--;
}
/*
@@ -537,8 +743,11 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
hid_dbg(pidff->hid, "starting to erase %d/%d\n",
effect_id, pidff->pid_id[effect_id]);
- /* Wait for the queue to clear. We do not want a full fifo to
- prevent the effect removal. */
+
+ /*
+ * Wait for the queue to clear. We do not want
+ * a full fifo to prevent the effect removal.
+ */
hid_hw_wait(pidff->hid);
pidff_playback_pid(pidff, pid_id, 0);
pidff_erase_pid(pidff, pid_id);
@@ -574,11 +783,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_constant(effect, old))
pidff_set_constant_force_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.constant.envelope,
- &old->u.constant.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.constant.envelope);
+ if (pidff_needs_set_envelope(&effect->u.constant.envelope,
+ old ? &old->u.constant.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
break;
case FF_PERIODIC:
@@ -604,6 +811,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
return -EINVAL;
}
+ if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
+ type_id = PID_SINE;
+
error = pidff_request_effect_upload(pidff,
pidff->type_id[type_id]);
if (error)
@@ -613,11 +823,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_periodic(effect, old))
pidff_set_periodic_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.periodic.envelope,
- &old->u.periodic.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.periodic.envelope);
+ if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
+ old ? &old->u.periodic.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
break;
case FF_RAMP:
@@ -631,56 +839,32 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
pidff_set_effect_report(pidff, effect);
if (!old || pidff_needs_set_ramp(effect, old))
pidff_set_ramp_force_report(pidff, effect);
- if (!old ||
- pidff_needs_set_envelope(&effect->u.ramp.envelope,
- &old->u.ramp.envelope))
- pidff_set_envelope_report(pidff,
- &effect->u.ramp.envelope);
+ if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
+ old ? &old->u.ramp.envelope : NULL))
+ pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
break;
case FF_SPRING:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_SPRING]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
- case FF_FRICTION:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_FRICTION]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
case FF_DAMPER:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_DAMPER]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
- break;
-
case FF_INERTIA:
+ case FF_FRICTION:
if (!old) {
+ switch(effect->type) {
+ case FF_SPRING:
+ type_id = PID_SPRING;
+ break;
+ case FF_DAMPER:
+ type_id = PID_DAMPER;
+ break;
+ case FF_INERTIA:
+ type_id = PID_INERTIA;
+ break;
+ case FF_FRICTION:
+ type_id = PID_FRICTION;
+ break;
+ }
error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_INERTIA]);
+ pidff->type_id[type_id]);
if (error)
return error;
}
@@ -709,11 +893,7 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
*/
static void pidff_set_gain(struct input_dev *dev, u16 gain)
{
- struct pidff_device *pidff = dev->ff->private;
-
- pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
- hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
+ pidff_set_gain_report(dev->ff->private, gain);
}
static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
@@ -736,7 +916,10 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- pidff->set_effect[PID_START_DELAY].value[0] = 0;
+
+ /* Omit setting delay field if it's missing */
+ if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
+ pidff->set_effect[PID_START_DELAY].value[0] = 0;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
HID_REQ_SET_REPORT);
@@ -747,9 +930,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
*/
static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
{
- struct pidff_device *pidff = dev->ff->private;
-
- pidff_autocenter(pidff, magnitude);
+ pidff_autocenter(dev->ff->private, magnitude);
}
/*
@@ -758,7 +939,13 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
struct hid_report *report, int count, int strict)
{
+ if (!report) {
+ pr_debug("pidff_find_fields, null report\n");
+ return -1;
+ }
+
int i, j, k, found;
+ int return_value = 0;
for (k = 0; k < count; k++) {
found = 0;
@@ -783,12 +970,22 @@ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
if (found)
break;
}
- if (!found && strict) {
+ if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
+ pr_debug("Delay field not found, but that's OK\n");
+ pr_debug("Setting MISSING_DELAY quirk\n");
+ return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
+ }
+ else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
+ pr_debug("PBO field not found, but that's OK\n");
+ pr_debug("Setting MISSING_PBO quirk\n");
+ return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
+ }
+ else if (!found && strict) {
pr_debug("failed to locate %d\n", k);
return -1;
}
}
- return 0;
+ return return_value;
}
/*
@@ -871,6 +1068,11 @@ static int pidff_reports_ok(struct pidff_device *pidff)
static struct hid_field *pidff_find_special_field(struct hid_report *report,
int usage, int enforce_min)
{
+ if (!report) {
+ pr_debug("pidff_find_special_field, null report\n");
+ return NULL;
+ }
+
int i;
for (i = 0; i < report->maxfield; i++) {
@@ -923,22 +1125,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
pidff->create_new_effect_type =
pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT],
- 0x25, 1);
+ PID_EFFECT_TYPE, 1);
pidff->set_effect_type =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
- 0x25, 1);
+ PID_EFFECT_TYPE, 1);
pidff->effect_direction =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
- 0x57, 0);
+ PID_DIRECTION, 0);
pidff->device_control =
pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
- 0x96, 1);
+ PID_DEVICE_CONTROL_ARRAY,
+ !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
+
pidff->block_load_status =
pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
- 0x8b, 1);
+ PID_BLOCK_LOAD_STATUS, 1);
pidff->effect_operation_status =
pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION],
- 0x78, 1);
+ PID_EFFECT_OPERATION_ARRAY, 1);
hid_dbg(pidff->hid, "search done\n");
@@ -967,10 +1171,6 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
return -1;
}
- pidff_find_special_keys(pidff->control_id, pidff->device_control,
- pidff_device_control,
- sizeof(pidff_device_control));
-
PIDFF_FIND_SPECIAL_KEYS(control_id, device_control, device_control);
if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type,
@@ -1049,7 +1249,6 @@ static int pidff_find_effects(struct pidff_device *pidff,
set_bit(FF_FRICTION, dev->ffbit);
return 0;
-
}
#define PIDFF_FIND_FIELDS(name, report, strict) \
@@ -1062,12 +1261,19 @@ static int pidff_find_effects(struct pidff_device *pidff,
*/
static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
{
- int envelope_ok = 0;
+ int status = 0;
- if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
+ /* Save info about the device not having the DELAY ffb field. */
+ status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
+ if (status == -1) {
hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
+ pidff->quirks |= status;
+
+ if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
+ hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
+
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
@@ -1085,13 +1291,10 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
return -ENODEV;
}
- if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1))
- envelope_ok = 1;
-
if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev))
return -ENODEV;
- if (!envelope_ok) {
+ if (PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) {
if (test_and_clear_bit(FF_CONSTANT, dev->ffbit))
hid_warn(pidff->hid,
"has constant effect but no envelope\n");
@@ -1116,16 +1319,20 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
clear_bit(FF_RAMP, dev->ffbit);
}
- if ((test_bit(FF_SPRING, dev->ffbit) ||
- test_bit(FF_DAMPER, dev->ffbit) ||
- test_bit(FF_FRICTION, dev->ffbit) ||
- test_bit(FF_INERTIA, dev->ffbit)) &&
- PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
- hid_warn(pidff->hid, "unknown condition effect layout\n");
- clear_bit(FF_SPRING, dev->ffbit);
- clear_bit(FF_DAMPER, dev->ffbit);
- clear_bit(FF_FRICTION, dev->ffbit);
- clear_bit(FF_INERTIA, dev->ffbit);
+ if (test_bit(FF_SPRING, dev->ffbit) ||
+ test_bit(FF_DAMPER, dev->ffbit) ||
+ test_bit(FF_FRICTION, dev->ffbit) ||
+ test_bit(FF_INERTIA, dev->ffbit)) {
+ status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
+
+ if (status < 0) {
+ hid_warn(pidff->hid, "unknown condition effect layout\n");
+ clear_bit(FF_SPRING, dev->ffbit);
+ clear_bit(FF_DAMPER, dev->ffbit);
+ clear_bit(FF_FRICTION, dev->ffbit);
+ clear_bit(FF_INERTIA, dev->ffbit);
+ }
+ pidff->quirks |= status;
}
if (test_bit(FF_PERIODIC, dev->ffbit) &&
@@ -1143,46 +1350,6 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
}
/*
- * Reset the device
- */
-static void pidff_reset(struct pidff_device *pidff)
-{
- struct hid_device *hid = pidff->hid;
- int i = 0;
-
- pidff->device_control->value[0] = pidff->control_id[PID_RESET];
- /* We reset twice as sometimes hid_wait_io isn't waiting long enough */
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
-
- pidff->device_control->value[0] =
- pidff->control_id[PID_ENABLE_ACTUATORS];
- hid_hw_request(hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
- hid_hw_wait(hid);
-
- /* pool report is sometimes messed up, refetch it */
- hid_hw_request(hid, pidff->reports[PID_POOL], HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
-
- if (pidff->pool[PID_SIMULTANEOUS_MAX].value) {
- while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) {
- if (i++ > 20) {
- hid_warn(pidff->hid,
- "device reports %d simultaneous effects\n",
- pidff->pool[PID_SIMULTANEOUS_MAX].value[0]);
- break;
- }
- hid_dbg(pidff->hid, "pid_pool requested again\n");
- hid_hw_request(hid, pidff->reports[PID_POOL],
- HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
- }
-}
-
-/*
* Test if autocenter modification is using the supported method
*/
static int pidff_check_autocenter(struct pidff_device *pidff,
@@ -1206,24 +1373,23 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
if (pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] ==
pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1) {
- pidff_autocenter(pidff, 0xffff);
+ pidff_autocenter(pidff, U16_MAX);
set_bit(FF_AUTOCENTER, dev->ffbit);
} else {
hid_notice(pidff->hid,
"device has unknown autocenter control method\n");
}
-
pidff_erase_pid(pidff,
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]);
return 0;
-
}
/*
* Check if the device is PID and initialize it
+ * Set initial quirks
*/
-int hid_pidff_init(struct hid_device *hid)
+int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
{
struct pidff_device *pidff;
struct hid_input *hidinput = list_entry(hid->inputs.next,
@@ -1245,6 +1411,8 @@ int hid_pidff_init(struct hid_device *hid)
return -ENOMEM;
pidff->hid = hid;
+ pidff->quirks = initial_quirks;
+ pidff->effect_count = 0;
hid_device_io_start(hid);
@@ -1261,14 +1429,9 @@ int hid_pidff_init(struct hid_device *hid)
if (error)
goto fail;
- pidff_reset(pidff);
-
- if (test_bit(FF_GAIN, dev->ffbit)) {
- pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff);
- hid_hw_request(hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
- }
-
+ /* pool report is sometimes messed up, refetch it */
+ pidff_fetch_pool(pidff);
+ pidff_set_gain_report(pidff, U16_MAX);
error = pidff_check_autocenter(pidff, dev);
if (error)
goto fail;
@@ -1311,6 +1474,7 @@ int hid_pidff_init(struct hid_device *hid)
ff->playback = pidff_playback;
hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
+ hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
hid_device_io_stop(hid);
@@ -1322,3 +1486,14 @@ int hid_pidff_init(struct hid_device *hid)
kfree(pidff);
return error;
}
+EXPORT_SYMBOL_GPL(hid_pidff_init_with_quirks);
+
+/*
+ * Check if the device is PID and initialize it
+ * Wrapper made to keep the compatibility with old
+ * init function
+ */
+int hid_pidff_init(struct hid_device *hid)
+{
+ return hid_pidff_init_with_quirks(hid, 0);
+}
diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
new file mode 100644
index 000000000000..dda571e0a5bd
--- /dev/null
+++ b/drivers/hid/usbhid/hid-pidff.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __HID_PIDFF_H
+#define __HID_PIDFF_H
+
+#include <linux/hid.h>
+
+/* HID PIDFF quirks */
+
+/* Delay field (0xA7) missing. Skip it during set effect report upload */
+#define HID_PIDFF_QUIRK_MISSING_DELAY BIT(0)
+
+/* Missing Paramter block offset (0x23). Skip it during SET_CONDITION
+ report upload */
+#define HID_PIDFF_QUIRK_MISSING_PBO BIT(1)
+
+/* Initialise device control field even if logical_minimum != 1 */
+#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2)
+
+/* Use fixed 0x4000 direction during SET_EFFECT report upload */
+#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3)
+
+/* Force all periodic effects to be uploaded as SINE */
+#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4)
+
+#ifdef CONFIG_HID_PID
+int hid_pidff_init(struct hid_device *hid);
+int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks);
+#else
+#define hid_pidff_init NULL
+#define hid_pidff_init_with_quirks NULL
+#endif
+
+#endif
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index c439ed2f16db..af6bc76dbf64 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
- kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
+ kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8125383932ec..97393a3083ca 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -69,16 +69,27 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
struct kfifo_rec_ptr_2 *fifo)
{
while (!kfifo_is_empty(fifo)) {
- u8 buf[WACOM_PKGLEN_MAX];
- int size;
+ int size = kfifo_peek_len(fifo);
+ u8 *buf = kzalloc(size, GFP_KERNEL);
+ unsigned int count;
int err;
- size = kfifo_out(fifo, buf, sizeof(buf));
+ count = kfifo_out(fifo, buf, size);
+ if (count != size) {
+ // Hard to say what is the "right" action in this
+ // circumstance. Skipping the entry and continuing
+ // to flush seems reasonable enough, however.
+ hid_warn(hdev, "%s: removed fifo entry with unexpected size\n",
+ __func__);
+ continue;
+ }
err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
if (err) {
hid_warn(hdev, "%s: unable to flush event due to error %d\n",
__func__, err);
}
+
+ kfree(buf);
}
}
@@ -158,13 +169,10 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (wacom->wacom_wac.features.type == BOOTLOADER)
return 0;
- if (size > WACOM_PKGLEN_MAX)
- return 1;
-
if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
return -1;
- memcpy(wacom->wacom_wac.data, raw_data, size);
+ wacom->wacom_wac.data = raw_data;
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -1286,6 +1294,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
static int wacom_devm_kfifo_alloc(struct wacom *wacom)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ int fifo_size = min(PAGE_SIZE, 10 * wacom_wac->features.pktlen);
struct kfifo_rec_ptr_2 *pen_fifo;
int error;
@@ -1296,7 +1305,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
if (!pen_fifo)
return -ENOMEM;
- error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ error = kfifo_alloc(pen_fifo, fifo_size, GFP_KERNEL);
if (error) {
devres_free(pen_fifo);
return error;
@@ -2352,12 +2361,14 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
unsigned int connect_mask = HID_CONNECT_HIDRAW;
features->pktlen = wacom_compute_pktlen(hdev);
- if (features->pktlen > WACOM_PKGLEN_MAX)
- return -EINVAL;
if (!devres_open_group(&hdev->dev, wacom, GFP_KERNEL))
return -ENOMEM;
+ error = wacom_devm_kfifo_alloc(wacom);
+ if (error)
+ goto fail;
+
wacom->resources = true;
error = wacom_allocate_inputs(wacom);
@@ -2821,10 +2832,6 @@ static int wacom_probe(struct hid_device *hdev,
if (features->check_for_hid_type && features->hid_type != hdev->type)
return -ENODEV;
- error = wacom_devm_kfifo_alloc(wacom);
- if (error)
- return error;
-
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b60bfafc6a8f..5107a676e24f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1201,12 +1201,10 @@ static void wacom_intuos_bt_process_data(struct wacom_wac *wacom,
static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
{
- unsigned char data[WACOM_PKGLEN_MAX];
+ u8 *data = kmemdup(wacom->data, len, GFP_KERNEL);
int i = 1;
unsigned power_raw, battery_capacity, bat_charging, ps_connected;
- memcpy(data, wacom->data, len);
-
switch (data[0]) {
case 0x04:
wacom_intuos_bt_process_data(wacom, data + i);
@@ -1230,8 +1228,10 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
dev_dbg(wacom->pen_input->dev.parent,
"Unknown report: %d,%d size:%zu\n",
data[0], data[1], len);
- return 0;
+ break;
}
+
+ kfree(data);
return 0;
}
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 0c3c6a6aaae9..d4f7d8ca1e7e 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -7,9 +7,6 @@
#include <linux/hid.h>
#include <linux/kfifo.h>
-/* maximum packet length for USB/BT devices */
-#define WACOM_PKGLEN_MAX 361
-
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
#define WACOM_STATUS_UNKNOWN 255
@@ -277,7 +274,7 @@ struct wacom_features {
unsigned touch_max;
int oVid;
int oPid;
- int pktlen;
+ unsigned int pktlen;
bool check_for_hid_type;
int hid_type;
};
@@ -341,7 +338,7 @@ struct wacom_wac {
char pen_name[WACOM_NAME_MAX];
char touch_name[WACOM_NAME_MAX];
char pad_name[WACOM_NAME_MAX];
- unsigned char data[WACOM_PKGLEN_MAX];
+ u8 *data;
int tool[2];
int id[2];
__u64 serial[2];
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 862c47b191af..6c1416167bd2 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -55,4 +55,21 @@ config HYPERV_BALLOON
help
Select this option to enable Hyper-V Balloon driver.
+config MSHV_ROOT
+ tristate "Microsoft Hyper-V root partition support"
+ depends on HYPERV && (X86_64 || ARM64)
+ depends on !HYPERV_VTL_MODE
+ # The hypervisor interface operates on 4k pages. Enforcing it here
+ # simplifies many assumptions in the root partition code.
+ # e.g. When withdrawing memory, the hypervisor gives back 4k pages in
+ # no particular order, making it impossible to reassemble larger pages
+ depends on PAGE_SIZE_4KB
+ select EVENTFD
+ default n
+ help
+ Select this option to enable support for booting and running as root
+ partition on Microsoft Hyper-V.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index b992c0ed182b..976189c725dc 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
+obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
CFLAGS_hv_trace.o = -I$(src)
CFLAGS_hv_balloon.o = -I$(src)
@@ -11,6 +12,9 @@ hv_vmbus-y := vmbus_drv.o \
channel_mgmt.o ring_buffer.o hv_trace.o
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_utils_transport.o
+mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
+ mshv_root_hv_call.o mshv_portid_table.o
# Code that must be built-in
obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
+obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 36d9ba097ff5..308c8f279df8 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -144,7 +144,7 @@ int hv_synic_alloc(void)
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!ms_hyperv.paravisor_present && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (!hv_cpu->synic_message_page) {
@@ -272,7 +272,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -291,7 +291,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -313,17 +313,7 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
-
- /*
- * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
- * it doesn't provide a recommendation flag and AEOI must be disabled.
- */
-#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
- shared_sint.auto_eoi =
- !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
-#else
- shared_sint.auto_eoi = 0;
-#endif
+ shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
@@ -367,7 +357,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -379,7 +369,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
- if (ms_hyperv.paravisor_present || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition()) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
@@ -433,13 +423,47 @@ retry:
return pending;
}
+static int hv_pick_new_cpu(struct vmbus_channel *channel)
+{
+ int ret = -EBUSY;
+ int start;
+ int cpu;
+
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
+
+ /*
+ * We can't assume that the relevant interrupts will be sent before
+ * the cpu is offlined on older versions of hyperv.
+ */
+ if (vmbus_proto_version < VERSION_WIN10_V5_3)
+ return -EBUSY;
+
+ start = get_random_u32_below(nr_cpu_ids);
+
+ for_each_cpu_wrap(cpu, cpu_online_mask, start) {
+ if (channel->target_cpu == cpu ||
+ channel->target_cpu == VMBUS_CONNECT_CPU)
+ continue;
+
+ ret = vmbus_channel_set_cpu(channel, cpu);
+ if (!ret)
+ break;
+ }
+
+ if (ret)
+ ret = vmbus_channel_set_cpu(channel, VMBUS_CONNECT_CPU);
+
+ return ret;
+}
+
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
int hv_synic_cleanup(unsigned int cpu)
{
struct vmbus_channel *channel, *sc;
- bool channel_found = false;
+ int ret = 0;
if (vmbus_connection.conn_state != CONNECTED)
goto always_cleanup;
@@ -456,38 +480,34 @@ int hv_synic_cleanup(unsigned int cpu)
/*
* Search for channels which are bound to the CPU we're about to
- * cleanup. In case we find one and vmbus is still connected, we
- * fail; this will effectively prevent CPU offlining.
- *
- * TODO: Re-bind the channels to different CPUs.
+ * cleanup.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(channel);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
- channel_found = true;
- break;
+ ret = hv_pick_new_cpu(sc);
+ if (ret) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ return ret;
+ }
}
}
- if (channel_found)
- break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
- if (channel_found)
- return -EBUSY;
-
/*
- * channel_found == false means that any channels that were previously
- * assigned to the CPU have been reassigned elsewhere with a call of
- * vmbus_send_modifychannel(). Scan the event flags page looking for
- * bits that are set and waiting with a timeout for vmbus_chan_sched()
- * to process such bits. If bits are still set after this operation
- * and VMBus is connected, fail the CPU offlining operation.
+ * Scan the event flags page looking for bits that are set and waiting
+ * with a timeout for vmbus_chan_sched() to process such bits. If bits
+ * are still set after this operation and VMBus is connected, fail the
+ * CPU offlining operation.
*/
if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending())
return -EBUSY;
@@ -497,5 +517,5 @@ always_cleanup:
hv_synic_disable_regs(cpu);
- return 0;
+ return ret;
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index f2e6f55d6ca6..b3b11be11650 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -31,8 +31,14 @@
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
+u64 hv_current_partition_id = HV_PARTITION_ID_SELF;
+EXPORT_SYMBOL_GPL(hv_current_partition_id);
+
+enum hv_partition_type hv_curr_partition_type;
+EXPORT_SYMBOL_GPL(hv_curr_partition_type);
+
/*
- * hv_root_partition, ms_hyperv and hv_nested are defined here with other
+ * ms_hyperv and hv_nested are defined here with other
* Hyper-V specific globals so they are shared across all architectures and are
* built only when CONFIG_HYPERV is defined. But on x86,
* ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
@@ -40,9 +46,6 @@
* here, allowing for an overriding definition in the module containing
* ms_hyperv_init_platform().
*/
-bool __weak hv_root_partition;
-EXPORT_SYMBOL_GPL(hv_root_partition);
-
bool __weak hv_nested;
EXPORT_SYMBOL_GPL(hv_nested);
@@ -66,6 +69,16 @@ static void hv_kmsg_dump_unregister(void);
static struct ctl_table_header *hv_ctl_table_hdr;
/*
+ * Per-cpu array holding the tail pointer for the SynIC event ring buffer
+ * for each SINT.
+ *
+ * We cannot maintain this in mshv driver because the tail pointer should
+ * persist even if the mshv driver is unloaded.
+ */
+u8 * __percpu *hv_synic_eventring_tail;
+EXPORT_SYMBOL_GPL(hv_synic_eventring_tail);
+
+/*
* Hyper-V specific initialization and shutdown code that is
* common across all architectures. Called from architecture
* specific initialization functions.
@@ -87,6 +100,9 @@ void __init hv_common_free(void)
free_percpu(hyperv_pcpu_input_arg);
hyperv_pcpu_input_arg = NULL;
+
+ free_percpu(hv_synic_eventring_tail);
+ hv_synic_eventring_tail = NULL;
}
/*
@@ -280,7 +296,26 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
- return hv_root_partition || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+ return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+}
+
+void __init hv_get_partition_id(void)
+{
+ struct hv_output_get_partition_id *output;
+ unsigned long flags;
+ u64 status, pt_id;
+
+ local_irq_save(flags);
+ output = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output);
+ pt_id = output->partition_id;
+ local_irq_restore(flags);
+
+ if (hv_result_success(status))
+ hv_current_partition_id = pt_id;
+ else
+ pr_err("Hyper-V: failed to get partition ID: %#x\n",
+ hv_result(status));
}
int __init hv_common_init(void)
@@ -350,6 +385,11 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
+ if (hv_root_partition()) {
+ hv_synic_eventring_tail = alloc_percpu(u8 *);
+ BUG_ON(!hv_synic_eventring_tail);
+ }
+
hv_vp_index = kmalloc_array(nr_cpu_ids, sizeof(*hv_vp_index),
GFP_KERNEL);
if (!hv_vp_index) {
@@ -438,11 +478,12 @@ error:
int hv_common_cpu_init(unsigned int cpu)
{
void **inputarg, **outputarg;
+ u8 **synic_eventring_tail;
u64 msr_vp_index;
gfp_t flags;
const int pgcount = hv_output_page_exists() ? 2 : 1;
void *mem;
- int ret;
+ int ret = 0;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
@@ -450,8 +491,8 @@ int hv_common_cpu_init(unsigned int cpu)
inputarg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
/*
- * hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory is already
- * allocated if this CPU was previously online and then taken offline
+ * The per-cpu memory is already allocated if this CPU was previously
+ * online and then taken offline
*/
if (!*inputarg) {
mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
@@ -498,11 +539,21 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
- return 0;
+ if (hv_root_partition()) {
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+ *synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
+ sizeof(u8), flags);
+ /* No need to unwind any of the above on failure here */
+ if (unlikely(!*synic_eventring_tail))
+ ret = -ENOMEM;
+ }
+
+ return ret;
}
int hv_common_cpu_die(unsigned int cpu)
{
+ u8 **synic_eventring_tail;
/*
* The hyperv_pcpu_input_arg and hyperv_pcpu_output_arg memory
* is not freed when the CPU goes offline as the hyperv_pcpu_input_arg
@@ -515,6 +566,10 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
+ synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
+ kfree(*synic_eventring_tail);
+ *synic_eventring_tail = NULL;
+
return 0;
}
@@ -572,7 +627,7 @@ EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
bool hv_is_hibernation_supported(void)
{
- return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
+ return !hv_root_partition() && acpi_sleep_state_supported(ACPI_STATE_S4);
}
EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
@@ -625,6 +680,11 @@ void __weak hv_remove_vmbus_handler(void)
}
EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
+void __weak hv_setup_mshv_handler(void (*handler)(void))
+{
+}
+EXPORT_SYMBOL_GPL(hv_setup_mshv_handler);
+
void __weak hv_setup_kexec_handler(void (*handler)(void))
{
}
@@ -661,3 +721,121 @@ u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
+
+void hv_identify_partition_type(void)
+{
+ /* Assume guest role */
+ hv_curr_partition_type = HV_PARTITION_TYPE_GUEST;
+ /*
+ * Check partition creation and cpu management privileges
+ *
+ * Hyper-V should never specify running as root and as a Confidential
+ * VM. But to protect against a compromised/malicious Hyper-V trying
+ * to exploit root behavior to expose Confidential VM memory, ignore
+ * the root partition setting if also a Confidential VM.
+ */
+ if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
+ (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
+ !(ms_hyperv.priv_high & HV_ISOLATION)) {
+ pr_info("Hyper-V: running as root partition\n");
+ if (IS_ENABLED(CONFIG_MSHV_ROOT))
+ hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
+ else
+ pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
+ }
+}
+
+struct hv_status_info {
+ char *string;
+ int errno;
+ u16 code;
+};
+
+/*
+ * Note on the errno mappings:
+ * A failed hypercall is usually only recoverable (or loggable) near
+ * the call site where the HV_STATUS_* code is known. So the errno
+ * it gets converted to is not too useful further up the stack.
+ * Provide a few mappings that could be useful, and revert to -EIO
+ * as a fallback.
+ */
+static const struct hv_status_info hv_status_infos[] = {
+#define _STATUS_INFO(status, errno) { #status, (errno), (status) }
+ _STATUS_INFO(HV_STATUS_SUCCESS, 0),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_CODE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_HYPERCALL_INPUT, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_ALIGNMENT, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARAMETER, -EINVAL),
+ _STATUS_INFO(HV_STATUS_ACCESS_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_DENIED, -EIO),
+ _STATUS_INFO(HV_STATUS_UNKNOWN_PROPERTY, -EIO),
+ _STATUS_INFO(HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE, -EIO),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_MEMORY, -ENOMEM),
+ _STATUS_INFO(HV_STATUS_INVALID_PARTITION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_NOT_FOUND, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_PORT_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_CONNECTION_ID, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INSUFFICIENT_BUFFERS, -EIO),
+ _STATUS_INFO(HV_STATUS_NOT_ACKNOWLEDGED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_VP_STATE, -EIO),
+ _STATUS_INFO(HV_STATUS_NO_RESOURCES, -EIO),
+ _STATUS_INFO(HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EINVAL),
+ _STATUS_INFO(HV_STATUS_INVALID_LP_INDEX, -EIO),
+ _STATUS_INFO(HV_STATUS_INVALID_REGISTER_VALUE, -EIO),
+ _STATUS_INFO(HV_STATUS_OPERATION_FAILED, -EIO),
+ _STATUS_INFO(HV_STATUS_TIME_OUT, -EIO),
+ _STATUS_INFO(HV_STATUS_CALL_PENDING, -EIO),
+ _STATUS_INFO(HV_STATUS_VTL_ALREADY_ENABLED, -EIO),
+#undef _STATUS_INFO
+};
+
+static inline const struct hv_status_info *find_hv_status_info(u64 hv_status)
+{
+ int i;
+ u16 code = hv_result(hv_status);
+
+ for (i = 0; i < ARRAY_SIZE(hv_status_infos); ++i) {
+ const struct hv_status_info *info = &hv_status_infos[i];
+
+ if (info->code == code)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* Convert a hypercall result into a linux-friendly error code. */
+int hv_result_to_errno(u64 status)
+{
+ const struct hv_status_info *info;
+
+ /* hv_do_hypercall() may return U64_MAX, hypercalls aren't possible */
+ if (unlikely(status == U64_MAX))
+ return -EOPNOTSUPP;
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->errno;
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(hv_result_to_errno);
+
+const char *hv_result_to_string(u64 status)
+{
+ const struct hv_status_info *info;
+
+ if (unlikely(status == U64_MAX))
+ return "Hypercall page missing!";
+
+ info = find_hv_status_info(status);
+ if (info)
+ return info->string;
+
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(hv_result_to_string);
diff --git a/arch/x86/hyperv/hv_proc.c b/drivers/hv/hv_proc.c
index ac4c834d4435..7d7ecb6f6137 100644
--- a/arch/x86/hyperv/hv_proc.c
+++ b/drivers/hv/hv_proc.c
@@ -6,11 +6,7 @@
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <linux/minmax.h>
-#include <asm/hypervisor.h>
#include <asm/mshyperv.h>
-#include <asm/apic.h>
-
-#include <asm/trace/hyperv.h>
/*
* See struct hv_deposit_memory. The first u64 is partition ID, the rest
@@ -91,8 +87,8 @@ int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
page_count, 0, input_page, NULL);
local_irq_restore(flags);
if (!hv_result_success(status)) {
- pr_err("Failed to deposit pages: %lld\n", status);
- ret = hv_result(status);
+ hv_status_err(status, "\n");
+ ret = hv_result_to_errno(status);
goto err_free_allocations;
}
@@ -111,6 +107,7 @@ free_buf:
kfree(counts);
return ret;
}
+EXPORT_SYMBOL_GPL(hv_call_deposit_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
{
@@ -118,7 +115,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
struct hv_output_add_logical_processor *output;
u64 status;
unsigned long flags;
- int ret = HV_STATUS_SUCCESS;
+ int ret = 0;
/*
* When adding a logical processor, the hypervisor may return
@@ -141,9 +138,9 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
if (!hv_result_success(status)) {
- pr_err("%s: cpu %u apic ID %u, %lld\n", __func__,
- lp_index, apic_id, status);
- ret = hv_result(status);
+ hv_status_err(status, "cpu %u apic ID: %u\n",
+ lp_index, apic_id);
+ ret = hv_result_to_errno(status);
}
break;
}
@@ -158,7 +155,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
struct hv_create_vp *input;
u64 status;
unsigned long irq_flags;
- int ret = HV_STATUS_SUCCESS;
+ int ret = 0;
/* Root VPs don't seem to need pages deposited */
if (partition_id != hv_current_partition_id) {
@@ -183,9 +180,9 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
if (!hv_result_success(status)) {
- pr_err("%s: vcpu %u, lp %u, %lld\n", __func__,
- vp_index, flags, status);
- ret = hv_result(status);
+ hv_status_err(status, "vcpu: %u, lp: %u\n",
+ vp_index, flags);
+ ret = hv_result_to_errno(status);
}
break;
}
@@ -195,4 +192,4 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
return ret;
}
-
+EXPORT_SYMBOL_GPL(hv_call_create_vp);
diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
new file mode 100644
index 000000000000..0340a67acd0a
--- /dev/null
+++ b/drivers/hv/mshv.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_H_
+#define _MSHV_H_
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <hyperv/hvhdk.h>
+
+#define mshv_field_nonzero(STRUCT, MEMBER) \
+ memchr_inv(&((STRUCT).MEMBER), \
+ 0, sizeof_field(typeof(STRUCT), MEMBER))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers);
+
+int hv_call_get_partition_property(u64 partition_id, u64 property_code,
+ u64 *property_value);
+
+int mshv_do_pre_guest_mode_work(ulong th_flags);
+
+#endif /* _MSHV_H */
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
new file mode 100644
index 000000000000..2575e6d7a71f
--- /dev/null
+++ b/drivers/hv/mshv_common.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * This file contains functions that will be called from one or more modules.
+ * If any of these modules are configured to build, this file is built and just
+ * statically linked in.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+#include <linux/resume_user_mode.h>
+
+#include "mshv.h"
+
+#define HV_GET_REGISTER_BATCH_SIZE \
+ (HV_HYP_PAGE_SIZE / sizeof(union hv_register_value))
+#define HV_SET_REGISTER_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_registers)) \
+ / sizeof(struct hv_register_assoc))
+
+int hv_call_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_get_vp_registers *input_page;
+ union hv_register_value *output_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_GET_REGISTER_BATCH_SIZE);
+ for (i = 0; i < rep_count; ++i)
+ input_page->names[i] = registers[i].name;
+
+ status = hv_do_rep_hypercall(HVCALL_GET_VP_REGISTERS, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ registers[i].value = output_page[i];
+
+ registers += completed;
+ remaining -= completed;
+ }
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_get_vp_registers);
+
+int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ union hv_input_vtl input_vtl,
+ struct hv_register_assoc *registers)
+{
+ struct hv_input_set_vp_registers *input_page;
+ u16 completed = 0;
+ unsigned long remaining = count;
+ int rep_count;
+ u64 status = HV_STATUS_SUCCESS;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->vp_index = vp_index;
+ input_page->input_vtl.as_uint8 = input_vtl.as_uint8;
+ input_page->rsvd_z8 = 0;
+ input_page->rsvd_z16 = 0;
+
+ while (remaining) {
+ rep_count = min(remaining, HV_SET_REGISTER_BATCH_SIZE);
+ memcpy(input_page->elements, registers,
+ sizeof(struct hv_register_assoc) * rep_count);
+
+ status = hv_do_rep_hypercall(HVCALL_SET_VP_REGISTERS, rep_count,
+ 0, input_page, NULL);
+ if (!hv_result_success(status))
+ break;
+
+ completed = hv_repcomp(status);
+ registers += completed;
+ remaining -= completed;
+ }
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_set_vp_registers);
+
+int hv_call_get_partition_property(u64 partition_id,
+ u64 property_code,
+ u64 *property_value)
+{
+ u64 status;
+ unsigned long flags;
+ struct hv_input_get_partition_property *input;
+ struct hv_output_get_partition_property *output;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->property_code = property_code;
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY, input, output);
+
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ return hv_result_to_errno(status);
+ }
+ *property_value = output->property_value;
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
+
+/*
+ * Handle any pre-processing before going into the guest mode on this cpu, most
+ * notably call schedule(). Must be invoked with both preemption and
+ * interrupts enabled.
+ *
+ * Returns: 0 on success, -errno on error.
+ */
+int mshv_do_pre_guest_mode_work(ulong th_flags)
+{
+ if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ return -EINTR;
+
+ if (th_flags & _TIF_NEED_RESCHED)
+ schedule();
+
+ if (th_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
new file mode 100644
index 000000000000..8dd22be2ca0b
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * eventfd support for mshv
+ *
+ * Heavily inspired from KVM implementation of irqfd/ioeventfd. The basic
+ * framework code is taken from the kvm implementation.
+ *
+ * All credits to kvm developers.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/eventfd.h>
+
+#if IS_ENABLED(CONFIG_X86_64)
+#include <asm/apic.h>
+#endif
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+static struct workqueue_struct *irqfd_cleanup_wq;
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_add_head_rcu(&mian->link, &partition->irq_ack_notifier_list);
+ mutex_unlock(&partition->pt_irq_lock);
+}
+
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian)
+{
+ mutex_lock(&partition->pt_irq_lock);
+ hlist_del_init_rcu(&mian->link);
+ mutex_unlock(&partition->pt_irq_lock);
+ synchronize_rcu();
+}
+
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi)
+{
+ struct mshv_irq_ack_notifier *mian;
+ bool acked = false;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mian, &partition->irq_ack_notifier_list,
+ link) {
+ if (mian->irq_ack_gsi == gsi) {
+ mian->irq_acked(mian);
+ acked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ return acked;
+}
+
+#if IS_ENABLED(CONFIG_ARM64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return false;
+}
+#elif IS_ENABLED(CONFIG_X86_64)
+static inline bool hv_should_clear_interrupt(enum hv_interrupt_type type)
+{
+ return type == HV_X64_INTERRUPT_TYPE_EXTINT;
+}
+#endif
+
+static void mshv_irqfd_resampler_ack(struct mshv_irq_ack_notifier *mian)
+{
+ struct mshv_irqfd_resampler *resampler;
+ struct mshv_partition *partition;
+ struct mshv_irqfd *irqfd;
+ int idx;
+
+ resampler = container_of(mian, struct mshv_irqfd_resampler,
+ rsmplr_notifier);
+ partition = resampler->rsmplr_partn;
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+
+ hlist_for_each_entry_rcu(irqfd, &resampler->rsmplr_irqfd_list,
+ irqfd_resampler_hnode) {
+ if (hv_should_clear_interrupt(irqfd->irqfd_lapic_irq.lapic_control.interrupt_type))
+ hv_call_clear_virtual_interrupt(partition->pt_id);
+
+ eventfd_signal(irqfd->irqfd_resamplefd);
+ }
+
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static bool
+mshv_vp_irq_vector_injected(union hv_vp_register_page_interrupt_vectors iv,
+ u32 vector)
+{
+ int i;
+
+ for (i = 0; i < iv.vector_count; i++) {
+ if (iv.vector[i] == vector)
+ return true;
+ }
+
+ return false;
+}
+
+static int mshv_vp_irq_try_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ union hv_vp_register_page_interrupt_vectors iv, new_iv;
+
+ iv = vp->vp_register_page->interrupt_vectors;
+ new_iv = iv;
+
+ if (mshv_vp_irq_vector_injected(iv, vector))
+ return 0;
+
+ if (iv.vector_count >= HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT)
+ return -ENOSPC;
+
+ new_iv.vector[new_iv.vector_count++] = vector;
+
+ if (cmpxchg(&vp->vp_register_page->interrupt_vectors.as_uint64,
+ iv.as_uint64, new_iv.as_uint64) != iv.as_uint64)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int mshv_vp_irq_set_vector(struct mshv_vp *vp, u32 vector)
+{
+ int ret;
+
+ do {
+ ret = mshv_vp_irq_try_set_vector(vp, vector);
+ } while (ret == -EAGAIN && !need_resched());
+
+ return ret;
+}
+
+/*
+ * Try to raise irq for guest via shared vector array. hyp does the actual
+ * inject of the interrupt.
+ */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ struct mshv_vp *vp;
+
+ if (!(ms_hyperv.ext_features &
+ HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE))
+ return -EOPNOTSUPP;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return -EOPNOTSUPP;
+
+ if (irq->lapic_control.logical_dest_mode)
+ return -EOPNOTSUPP;
+
+ vp = partition->pt_vp_array[irq->lapic_apic_id];
+
+ if (!vp->vp_register_page)
+ return -EOPNOTSUPP;
+
+ if (mshv_vp_irq_set_vector(vp, irq->lapic_vector))
+ return -EINVAL;
+
+ if (vp->run.flags.root_sched_dispatched &&
+ vp->vp_register_page->interrupt_vectors.as_uint64)
+ return -EBUSY;
+
+ wake_up(&vp->run.vp_suspend_queue);
+
+ return 0;
+}
+#else /* CONFIG_X86_64 */
+static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static void mshv_assert_irq_slow(struct mshv_irqfd *irqfd)
+{
+ struct mshv_partition *partition = irqfd->irqfd_partn;
+ struct mshv_lapic_irq *irq = &irqfd->irqfd_lapic_irq;
+ unsigned int seq;
+ int idx;
+
+ WARN_ON(irqfd->irqfd_resampler &&
+ !irq->lapic_control.level_triggered);
+
+ idx = srcu_read_lock(&partition->pt_irq_srcu);
+ if (irqfd->irqfd_girq_ent.guest_irq_num) {
+ if (!irqfd->irqfd_girq_ent.girq_entry_valid) {
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+ return;
+ }
+
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+ }
+
+ hv_call_assert_virtual_interrupt(irqfd->irqfd_partn->pt_id,
+ irq->lapic_vector, irq->lapic_apic_id,
+ irq->lapic_control);
+ srcu_read_unlock(&partition->pt_irq_srcu, idx);
+}
+
+static void mshv_irqfd_resampler_shutdown(struct mshv_irqfd *irqfd)
+{
+ struct mshv_irqfd_resampler *rp = irqfd->irqfd_resampler;
+ struct mshv_partition *pt = rp->rsmplr_partn;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_del_rcu(&irqfd->irqfd_resampler_hnode);
+ synchronize_srcu(&pt->pt_irq_srcu);
+
+ if (hlist_empty(&rp->rsmplr_irqfd_list)) {
+ hlist_del(&rp->rsmplr_hnode);
+ mshv_unregister_irq_ack_notifier(pt, &rp->rsmplr_notifier);
+ kfree(rp);
+ }
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+}
+
+/*
+ * Race-free decouple logic (ordering is critical)
+ */
+static void mshv_irqfd_shutdown(struct work_struct *work)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(work, struct mshv_irqfd, irqfd_shutdown);
+
+ /*
+ * Synchronize with the wait-queue and unhook ourselves to prevent
+ * further events.
+ */
+ remove_wait_queue(irqfd->irqfd_wqh, &irqfd->irqfd_wait);
+
+ if (irqfd->irqfd_resampler) {
+ mshv_irqfd_resampler_shutdown(irqfd);
+ eventfd_ctx_put(irqfd->irqfd_resamplefd);
+ }
+
+ /*
+ * It is now safe to release the object's resources
+ */
+ eventfd_ctx_put(irqfd->irqfd_eventfd_ctx);
+ kfree(irqfd);
+}
+
+/* assumes partition->pt_irqfds_lock is held */
+static bool mshv_irqfd_is_active(struct mshv_irqfd *irqfd)
+{
+ return !hlist_unhashed(&irqfd->irqfd_hnode);
+}
+
+/*
+ * Mark the irqfd as inactive and schedule it for removal
+ *
+ * assumes partition->pt_irqfds_lock is held
+ */
+static void mshv_irqfd_deactivate(struct mshv_irqfd *irqfd)
+{
+ if (!mshv_irqfd_is_active(irqfd))
+ return;
+
+ hlist_del(&irqfd->irqfd_hnode);
+
+ queue_work(irqfd_cleanup_wq, &irqfd->irqfd_shutdown);
+}
+
+/*
+ * Called with wqh->lock held and interrupts disabled
+ */
+static int mshv_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key)
+{
+ struct mshv_irqfd *irqfd = container_of(wait, struct mshv_irqfd,
+ irqfd_wait);
+ unsigned long flags = (unsigned long)key;
+ int idx;
+ unsigned int seq;
+ struct mshv_partition *pt = irqfd->irqfd_partn;
+ int ret = 0;
+
+ if (flags & POLLIN) {
+ u64 cnt;
+
+ eventfd_ctx_do_read(irqfd->irqfd_eventfd_ctx, &cnt);
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ do {
+ seq = read_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ } while (read_seqcount_retry(&irqfd->irqfd_irqe_sc, seq));
+
+ /* An event has been signaled, raise an interrupt */
+ ret = mshv_try_assert_irq_fast(irqfd);
+ if (ret)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+
+ ret = 1;
+ }
+
+ if (flags & POLLHUP) {
+ /* The eventfd is closing, detach from the partition */
+ unsigned long flags;
+
+ spin_lock_irqsave(&pt->pt_irqfds_lock, flags);
+
+ /*
+ * We must check if someone deactivated the irqfd before
+ * we could acquire the pt_irqfds_lock since the item is
+ * deactivated from the mshv side before it is unhooked from
+ * the wait-queue. If it is already deactivated, we can
+ * simply return knowing the other side will cleanup for us.
+ * We cannot race against the irqfd going away since the
+ * other side is required to acquire wqh->lock, which we hold
+ */
+ if (mshv_irqfd_is_active(irqfd))
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irqrestore(&pt->pt_irqfds_lock, flags);
+ }
+
+ return ret;
+}
+
+/* Must be called under pt_irqfds_lock */
+static void mshv_irqfd_update(struct mshv_partition *pt,
+ struct mshv_irqfd *irqfd)
+{
+ write_seqcount_begin(&irqfd->irqfd_irqe_sc);
+ irqfd->irqfd_girq_ent = mshv_ret_girq_entry(pt,
+ irqfd->irqfd_irqnum);
+ mshv_copy_girq_info(&irqfd->irqfd_girq_ent, &irqfd->irqfd_lapic_irq);
+ write_seqcount_end(&irqfd->irqfd_irqe_sc);
+}
+
+void mshv_irqfd_routing_update(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ hlist_for_each_entry(irqfd, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_update(pt, irqfd);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+}
+
+static void mshv_irqfd_queue_proc(struct file *file, wait_queue_head_t *wqh,
+ poll_table *polltbl)
+{
+ struct mshv_irqfd *irqfd =
+ container_of(polltbl, struct mshv_irqfd, irqfd_polltbl);
+
+ irqfd->irqfd_wqh = wqh;
+ add_wait_queue_priority(wqh, &irqfd->irqfd_wait);
+}
+
+static int mshv_irqfd_assign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
+ struct mshv_irqfd *irqfd, *tmp;
+ unsigned int events;
+ struct fd f;
+ int ret;
+ int idx;
+
+ irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
+ if (!irqfd)
+ return -ENOMEM;
+
+ irqfd->irqfd_partn = pt;
+ irqfd->irqfd_irqnum = args->gsi;
+ INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown);
+ seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock);
+
+ f = fdget(args->fd);
+ if (!fd_file(f)) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ eventfd = eventfd_ctx_fileget(fd_file(f));
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto fail;
+ }
+
+ irqfd->irqfd_eventfd_ctx = eventfd;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE)) {
+ struct mshv_irqfd_resampler *rp;
+
+ resamplefd = eventfd_ctx_fdget(args->resamplefd);
+ if (IS_ERR(resamplefd)) {
+ ret = PTR_ERR(resamplefd);
+ goto fail;
+ }
+
+ irqfd->irqfd_resamplefd = resamplefd;
+
+ mutex_lock(&pt->irqfds_resampler_lock);
+
+ hlist_for_each_entry(rp, &pt->irqfds_resampler_list,
+ rsmplr_hnode) {
+ if (rp->rsmplr_notifier.irq_ack_gsi ==
+ irqfd->irqfd_irqnum) {
+ irqfd->irqfd_resampler = rp;
+ break;
+ }
+ }
+
+ if (!irqfd->irqfd_resampler) {
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL_ACCOUNT);
+ if (!rp) {
+ ret = -ENOMEM;
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ goto fail;
+ }
+
+ rp->rsmplr_partn = pt;
+ INIT_HLIST_HEAD(&rp->rsmplr_irqfd_list);
+ rp->rsmplr_notifier.irq_ack_gsi = irqfd->irqfd_irqnum;
+ rp->rsmplr_notifier.irq_acked =
+ mshv_irqfd_resampler_ack;
+
+ hlist_add_head(&rp->rsmplr_hnode,
+ &pt->irqfds_resampler_list);
+ mshv_register_irq_ack_notifier(pt,
+ &rp->rsmplr_notifier);
+ irqfd->irqfd_resampler = rp;
+ }
+
+ hlist_add_head_rcu(&irqfd->irqfd_resampler_hnode,
+ &irqfd->irqfd_resampler->rsmplr_irqfd_list);
+
+ mutex_unlock(&pt->irqfds_resampler_lock);
+ }
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd
+ */
+ init_waitqueue_func_entry(&irqfd->irqfd_wait, mshv_irqfd_wakeup);
+ init_poll_funcptr(&irqfd->irqfd_polltbl, mshv_irqfd_queue_proc);
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+ if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE) &&
+ !irqfd->irqfd_lapic_irq.lapic_control.level_triggered) {
+ /*
+ * Resample Fd must be for level triggered interrupt
+ * Otherwise return with failure
+ */
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+ hlist_for_each_entry(tmp, &pt->pt_irqfds_list, irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx != tmp->irqfd_eventfd_ctx)
+ continue;
+ /* This fd is used for another irq already. */
+ ret = -EBUSY;
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+ goto fail;
+ }
+
+ idx = srcu_read_lock(&pt->pt_irq_srcu);
+ mshv_irqfd_update(pt, irqfd);
+ hlist_add_head(&irqfd->irqfd_hnode, &pt->pt_irqfds_list);
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered, and trigger it as if we didn't miss it.
+ */
+ events = vfs_poll(fd_file(f), &irqfd->irqfd_polltbl);
+
+ if (events & POLLIN)
+ mshv_assert_irq_slow(irqfd);
+
+ srcu_read_unlock(&pt->pt_irq_srcu, idx);
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the POLLHUP
+ */
+ fdput(f);
+
+ return 0;
+
+fail:
+ if (irqfd->irqfd_resampler)
+ mshv_irqfd_resampler_shutdown(irqfd);
+
+ if (resamplefd && !IS_ERR(resamplefd))
+ eventfd_ctx_put(resamplefd);
+
+ if (eventfd && !IS_ERR(eventfd))
+ eventfd_ctx_put(eventfd);
+
+ fdput(f);
+
+out:
+ kfree(irqfd);
+ return ret;
+}
+
+/*
+ * shutdown any irqfd's that match fd+gsi
+ */
+static int mshv_irqfd_deassign(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list,
+ irqfd_hnode) {
+ if (irqfd->irqfd_eventfd_ctx == eventfd &&
+ irqfd->irqfd_irqnum == args->gsi)
+
+ mshv_irqfd_deactivate(irqfd);
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * so that we guarantee there will not be any more interrupts on this
+ * gsi once this deassign function returns.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+ return 0;
+}
+
+int mshv_set_unset_irqfd(struct mshv_partition *pt,
+ struct mshv_user_irqfd *args)
+{
+ if (args->flags & ~MSHV_IRQFD_FLAGS_MASK)
+ return -EINVAL;
+
+ if (args->flags & BIT(MSHV_IRQFD_BIT_DEASSIGN))
+ return mshv_irqfd_deassign(pt, args);
+
+ return mshv_irqfd_assign(pt, args);
+}
+
+/*
+ * This function is called as the mshv VM fd is being released.
+ * Shutdown all irqfds that still remain open
+ */
+static void mshv_irqfd_release(struct mshv_partition *pt)
+{
+ struct mshv_irqfd *irqfd;
+ struct hlist_node *n;
+
+ spin_lock_irq(&pt->pt_irqfds_lock);
+
+ hlist_for_each_entry_safe(irqfd, n, &pt->pt_irqfds_list, irqfd_hnode)
+ mshv_irqfd_deactivate(irqfd);
+
+ spin_unlock_irq(&pt->pt_irqfds_lock);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed
+ * since we do not take a mshv_partition* reference.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+}
+
+int mshv_irqfd_wq_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mshv_irqfd_wq_cleanup(void)
+{
+ destroy_workqueue(irqfd_cleanup_wq);
+}
+
+/*
+ * --------------------------------------------------------------------
+ * ioeventfd: translate a MMIO memory write to an eventfd signal.
+ *
+ * userspace can register a MMIO address with an eventfd for receiving
+ * notification when the memory has been touched.
+ * --------------------------------------------------------------------
+ */
+
+static void ioeventfd_release(struct mshv_ioeventfd *p, u64 partition_id)
+{
+ if (p->iovntfd_doorbell_id > 0)
+ mshv_unregister_doorbell(partition_id, p->iovntfd_doorbell_id);
+ eventfd_ctx_put(p->iovntfd_eventfd);
+ kfree(p);
+}
+
+/* MMIO writes trigger an event if the addr/val match */
+static void ioeventfd_mmio_write(int doorbell_id, void *data)
+{
+ struct mshv_partition *partition = (struct mshv_partition *)data;
+ struct mshv_ioeventfd *p;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(p, &partition->ioeventfds_list, iovntfd_hnode)
+ if (p->iovntfd_doorbell_id == doorbell_id) {
+ eventfd_signal(p->iovntfd_eventfd);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static bool ioeventfd_check_collision(struct mshv_partition *pt,
+ struct mshv_ioeventfd *p)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *_p;
+
+ hlist_for_each_entry(_p, &pt->ioeventfds_list, iovntfd_hnode)
+ if (_p->iovntfd_addr == p->iovntfd_addr &&
+ _p->iovntfd_length == p->iovntfd_length &&
+ (_p->iovntfd_wildcard || p->iovntfd_wildcard ||
+ _p->iovntfd_datamatch == p->iovntfd_datamatch))
+ return true;
+
+ return false;
+}
+
+static int mshv_assign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ u64 doorbell_flags = 0;
+ int ret;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ /* must be natural-word sized */
+ switch (args->len) {
+ case 0:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY;
+ break;
+ case 1:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE;
+ break;
+ case 2:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD;
+ break;
+ case 4:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD;
+ break;
+ case 8:
+ doorbell_flags = HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* check for range overflow */
+ if (args->addr + args->len < args->addr)
+ return -EINVAL;
+
+ /* check for extra flags that we don't understand */
+ if (args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK)
+ return -EINVAL;
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ p->iovntfd_addr = args->addr;
+ p->iovntfd_length = args->len;
+ p->iovntfd_eventfd = eventfd;
+
+ /* The datamatch feature is optional, otherwise this is a wildcard */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH)) {
+ p->iovntfd_datamatch = args->datamatch;
+ } else {
+ p->iovntfd_wildcard = true;
+ doorbell_flags |= HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE;
+ }
+
+ if (ioeventfd_check_collision(pt, p)) {
+ ret = -EEXIST;
+ goto unlock_fail;
+ }
+
+ ret = mshv_register_doorbell(pt->pt_id, ioeventfd_mmio_write,
+ (void *)pt, p->iovntfd_addr,
+ p->iovntfd_datamatch, doorbell_flags);
+ if (ret < 0)
+ goto unlock_fail;
+
+ p->iovntfd_doorbell_id = ret;
+
+ hlist_add_head_rcu(&p->iovntfd_hnode, &pt->ioeventfds_list);
+
+ return 0;
+
+unlock_fail:
+ kfree(p);
+
+fail:
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+static int mshv_deassign_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ struct mshv_ioeventfd *p;
+ struct eventfd_ctx *eventfd;
+ struct hlist_node *n;
+ int ret = -ENOENT;
+
+ /* This mutex is currently protecting ioeventfd.items list */
+ WARN_ON_ONCE(!mutex_is_locked(&pt->pt_mutex));
+
+ eventfd = eventfd_ctx_fdget(args->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ hlist_for_each_entry_safe(p, n, &pt->ioeventfds_list, iovntfd_hnode) {
+ bool wildcard = !(args->flags & BIT(MSHV_IOEVENTFD_BIT_DATAMATCH));
+
+ if (p->iovntfd_eventfd != eventfd ||
+ p->iovntfd_addr != args->addr ||
+ p->iovntfd_length != args->len ||
+ p->iovntfd_wildcard != wildcard)
+ continue;
+
+ if (!p->iovntfd_wildcard &&
+ p->iovntfd_datamatch != args->datamatch)
+ continue;
+
+ hlist_del_rcu(&p->iovntfd_hnode);
+ synchronize_rcu();
+ ioeventfd_release(p, pt->pt_id);
+ ret = 0;
+ break;
+ }
+
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args)
+ __must_hold(&pt->mutex)
+{
+ if ((args->flags & ~MSHV_IOEVENTFD_FLAGS_MASK) ||
+ mshv_field_nonzero(*args, rsvd))
+ return -EINVAL;
+
+ /* PIO not yet implemented */
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_PIO))
+ return -EOPNOTSUPP;
+
+ if (args->flags & BIT(MSHV_IOEVENTFD_BIT_DEASSIGN))
+ return mshv_deassign_ioeventfd(pt, args);
+
+ return mshv_assign_ioeventfd(pt, args);
+}
+
+void mshv_eventfd_init(struct mshv_partition *pt)
+{
+ spin_lock_init(&pt->pt_irqfds_lock);
+ INIT_HLIST_HEAD(&pt->pt_irqfds_list);
+
+ INIT_HLIST_HEAD(&pt->irqfds_resampler_list);
+ mutex_init(&pt->irqfds_resampler_lock);
+
+ INIT_HLIST_HEAD(&pt->ioeventfds_list);
+}
+
+void mshv_eventfd_release(struct mshv_partition *pt)
+{
+ struct hlist_head items;
+ struct hlist_node *n;
+ struct mshv_ioeventfd *p;
+
+ hlist_move_list(&pt->ioeventfds_list, &items);
+ synchronize_rcu();
+
+ hlist_for_each_entry_safe(p, n, &items, iovntfd_hnode) {
+ hlist_del(&p->iovntfd_hnode);
+ ioeventfd_release(p, pt->pt_id);
+ }
+
+ mshv_irqfd_release(pt);
+}
diff --git a/drivers/hv/mshv_eventfd.h b/drivers/hv/mshv_eventfd.h
new file mode 100644
index 000000000000..332e7670a344
--- /dev/null
+++ b/drivers/hv/mshv_eventfd.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * irqfd: Allows an fd to be used to inject an interrupt to the guest.
+ * ioeventfd: Allow an fd to be used to receive a signal from the guest.
+ * All credit goes to kvm developers.
+ */
+
+#ifndef __LINUX_MSHV_EVENTFD_H
+#define __LINUX_MSHV_EVENTFD_H
+
+#include <linux/poll.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* struct to contain list of irqfds sharing an irq. Updates are protected by
+ * partition.irqfds.resampler_lock
+ */
+struct mshv_irqfd_resampler {
+ struct mshv_partition *rsmplr_partn;
+ struct hlist_head rsmplr_irqfd_list;
+ struct mshv_irq_ack_notifier rsmplr_notifier;
+ struct hlist_node rsmplr_hnode;
+};
+
+struct mshv_irqfd {
+ struct mshv_partition *irqfd_partn;
+ struct eventfd_ctx *irqfd_eventfd_ctx;
+ struct mshv_guest_irq_ent irqfd_girq_ent;
+ seqcount_spinlock_t irqfd_irqe_sc;
+ u32 irqfd_irqnum;
+ struct mshv_lapic_irq irqfd_lapic_irq;
+ struct hlist_node irqfd_hnode;
+ poll_table irqfd_polltbl;
+ wait_queue_head_t *irqfd_wqh;
+ wait_queue_entry_t irqfd_wait;
+ struct work_struct irqfd_shutdown;
+ struct mshv_irqfd_resampler *irqfd_resampler;
+ struct eventfd_ctx *irqfd_resamplefd;
+ struct hlist_node irqfd_resampler_hnode;
+};
+
+void mshv_eventfd_init(struct mshv_partition *partition);
+void mshv_eventfd_release(struct mshv_partition *partition);
+
+void mshv_register_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+void mshv_unregister_irq_ack_notifier(struct mshv_partition *partition,
+ struct mshv_irq_ack_notifier *mian);
+bool mshv_notify_acked_gsi(struct mshv_partition *partition, int gsi);
+
+int mshv_set_unset_irqfd(struct mshv_partition *partition,
+ struct mshv_user_irqfd *args);
+
+int mshv_irqfd_wq_init(void);
+void mshv_irqfd_wq_cleanup(void);
+
+struct mshv_ioeventfd {
+ struct hlist_node iovntfd_hnode;
+ u64 iovntfd_addr;
+ int iovntfd_length;
+ struct eventfd_ctx *iovntfd_eventfd;
+ u64 iovntfd_datamatch;
+ int iovntfd_doorbell_id;
+ bool iovntfd_wildcard;
+};
+
+int mshv_set_unset_ioeventfd(struct mshv_partition *pt,
+ struct mshv_user_ioeventfd *args);
+
+#endif /* __LINUX_MSHV_EVENTFD_H */
diff --git a/drivers/hv/mshv_irq.c b/drivers/hv/mshv_irq.c
new file mode 100644
index 000000000000..d0fb9ef734f4
--- /dev/null
+++ b/drivers/hv/mshv_irq.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+/* called from the ioctl code, user wants to update the guest irq table */
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *ue,
+ unsigned int numents)
+{
+ struct mshv_girq_routing_table *new = NULL, *old;
+ u32 i, nr_rt_entries = 0;
+ int r = 0;
+
+ if (numents == 0)
+ goto swap_routes;
+
+ for (i = 0; i < numents; i++) {
+ if (ue[i].gsi >= MSHV_MAX_GUEST_IRQS)
+ return -EINVAL;
+
+ if (ue[i].address_hi)
+ return -EINVAL;
+
+ nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
+ }
+ nr_rt_entries += 1;
+
+ new = kzalloc(struct_size(new, mshv_girq_info_tbl, nr_rt_entries),
+ GFP_KERNEL_ACCOUNT);
+ if (!new)
+ return -ENOMEM;
+
+ new->num_rt_entries = nr_rt_entries;
+ for (i = 0; i < numents; i++) {
+ struct mshv_guest_irq_ent *girq;
+
+ girq = &new->mshv_girq_info_tbl[ue[i].gsi];
+
+ /*
+ * Allow only one to one mapping between GSI and MSI routing.
+ */
+ if (girq->guest_irq_num != 0) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ girq->guest_irq_num = ue[i].gsi;
+ girq->girq_addr_lo = ue[i].address_lo;
+ girq->girq_addr_hi = ue[i].address_hi;
+ girq->girq_irq_data = ue[i].data;
+ girq->girq_entry_valid = true;
+ }
+
+swap_routes:
+ mutex_lock(&partition->pt_irq_lock);
+ old = rcu_dereference_protected(partition->pt_girq_tbl, 1);
+ rcu_assign_pointer(partition->pt_girq_tbl, new);
+ mshv_irqfd_routing_update(partition);
+ mutex_unlock(&partition->pt_irq_lock);
+
+ synchronize_srcu_expedited(&partition->pt_irq_srcu);
+ new = old;
+
+out:
+ kfree(new);
+
+ return r;
+}
+
+/* vm is going away, kfree the irq routing table */
+void mshv_free_routing_table(struct mshv_partition *partition)
+{
+ struct mshv_girq_routing_table *rt =
+ rcu_access_pointer(partition->pt_girq_tbl);
+
+ kfree(rt);
+}
+
+struct mshv_guest_irq_ent
+mshv_ret_girq_entry(struct mshv_partition *partition, u32 irqnum)
+{
+ struct mshv_guest_irq_ent entry = { 0 };
+ struct mshv_girq_routing_table *girq_tbl;
+
+ girq_tbl = srcu_dereference_check(partition->pt_girq_tbl,
+ &partition->pt_irq_srcu,
+ lockdep_is_held(&partition->pt_irq_lock));
+ if (!girq_tbl || irqnum >= girq_tbl->num_rt_entries) {
+ /*
+ * Premature register_irqfd, setting valid_entry = 0
+ * would ignore this entry anyway
+ */
+ entry.guest_irq_num = irqnum;
+ return entry;
+ }
+
+ return girq_tbl->mshv_girq_info_tbl[irqnum];
+}
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *ent,
+ struct mshv_lapic_irq *lirq)
+{
+ memset(lirq, 0, sizeof(*lirq));
+ if (!ent || !ent->girq_entry_valid)
+ return;
+
+ lirq->lapic_vector = ent->girq_irq_data & 0xFF;
+ lirq->lapic_apic_id = (ent->girq_addr_lo >> 12) & 0xFF;
+ lirq->lapic_control.interrupt_type = (ent->girq_irq_data & 0x700) >> 8;
+ lirq->lapic_control.level_triggered = (ent->girq_irq_data >> 15) & 0x1;
+ lirq->lapic_control.logical_dest_mode = (ent->girq_addr_lo >> 2) & 0x1;
+}
diff --git a/drivers/hv/mshv_portid_table.c b/drivers/hv/mshv_portid_table.c
new file mode 100644
index 000000000000..c349af1f0aaa
--- /dev/null
+++ b/drivers/hv/mshv_portid_table.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <asm/mshyperv.h>
+
+#include "mshv.h"
+#include "mshv_root.h"
+
+/*
+ * Ports and connections are hypervisor struct used for inter-partition
+ * communication. Port represents the source and connection represents
+ * the destination. Partitions are responsible for managing the port and
+ * connection ids.
+ *
+ */
+
+#define PORTID_MIN 1
+#define PORTID_MAX INT_MAX
+
+static DEFINE_IDR(port_table_idr);
+
+void
+mshv_port_table_fini(void)
+{
+ struct port_table_info *port_info;
+ unsigned long i, tmp;
+
+ idr_lock(&port_table_idr);
+ if (!idr_is_empty(&port_table_idr)) {
+ idr_for_each_entry_ul(&port_table_idr, port_info, tmp, i) {
+ port_info = idr_remove(&port_table_idr, i);
+ kfree_rcu(port_info, portbl_rcu);
+ }
+ }
+ idr_unlock(&port_table_idr);
+}
+
+int
+mshv_portid_alloc(struct port_table_info *info)
+{
+ int ret = 0;
+
+ idr_lock(&port_table_idr);
+ ret = idr_alloc(&port_table_idr, info, PORTID_MIN,
+ PORTID_MAX, GFP_KERNEL);
+ idr_unlock(&port_table_idr);
+
+ return ret;
+}
+
+void
+mshv_portid_free(int port_id)
+{
+ struct port_table_info *info;
+
+ idr_lock(&port_table_idr);
+ info = idr_remove(&port_table_idr, port_id);
+ WARN_ON(!info);
+ idr_unlock(&port_table_idr);
+
+ synchronize_rcu();
+ kfree(info);
+}
+
+int
+mshv_portid_lookup(int port_id, struct port_table_info *info)
+{
+ struct port_table_info *_info;
+ int ret = -ENOENT;
+
+ rcu_read_lock();
+ _info = idr_find(&port_table_idr, port_id);
+ rcu_read_unlock();
+
+ if (_info) {
+ *info = *_info;
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
new file mode 100644
index 000000000000..e3931b0f1269
--- /dev/null
+++ b/drivers/hv/mshv_root.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ */
+
+#ifndef _MSHV_ROOT_H_
+#define _MSHV_ROOT_H_
+
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/semaphore.h>
+#include <linux/sched.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/hashtable.h>
+#include <linux/dev_printk.h>
+#include <linux/build_bug.h>
+#include <uapi/linux/mshv.h>
+
+/*
+ * Hypervisor must be between these version numbers (inclusive)
+ * to guarantee compatibility
+ */
+#define MSHV_HV_MIN_VERSION (27744)
+#define MSHV_HV_MAX_VERSION (27751)
+
+static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
+
+#define MSHV_MAX_VPS 256
+
+#define MSHV_PARTITIONS_HASH_BITS 9
+
+#define MSHV_PIN_PAGES_BATCH_SIZE (0x10000000ULL / HV_HYP_PAGE_SIZE)
+
+struct mshv_vp {
+ u32 vp_index;
+ struct mshv_partition *vp_partition;
+ struct mutex vp_mutex;
+ struct hv_vp_register_page *vp_register_page;
+ struct hv_message *vp_intercept_msg_page;
+ void *vp_ghcb_page;
+ struct hv_stats_page *vp_stats_pages[2];
+ struct {
+ atomic64_t vp_signaled_count;
+ struct {
+ u64 intercept_suspend: 1;
+ u64 root_sched_blocked: 1; /* root scheduler only */
+ u64 root_sched_dispatched: 1; /* root scheduler only */
+ u64 reserved: 61;
+ } flags;
+ unsigned int kicked_by_hv;
+ wait_queue_head_t vp_suspend_queue;
+ } run;
+};
+
+#define vp_fmt(fmt) "p%lluvp%u: " fmt
+#define vp_devprintk(level, v, fmt, ...) \
+do { \
+ const struct mshv_vp *__vp = (v); \
+ const struct mshv_partition *__pt = __vp->vp_partition; \
+ dev_##level(__pt->pt_module_dev, vp_fmt(fmt), __pt->pt_id, \
+ __vp->vp_index, ##__VA_ARGS__); \
+} while (0)
+#define vp_emerg(v, fmt, ...) vp_devprintk(emerg, v, fmt, ##__VA_ARGS__)
+#define vp_crit(v, fmt, ...) vp_devprintk(crit, v, fmt, ##__VA_ARGS__)
+#define vp_alert(v, fmt, ...) vp_devprintk(alert, v, fmt, ##__VA_ARGS__)
+#define vp_err(v, fmt, ...) vp_devprintk(err, v, fmt, ##__VA_ARGS__)
+#define vp_warn(v, fmt, ...) vp_devprintk(warn, v, fmt, ##__VA_ARGS__)
+#define vp_notice(v, fmt, ...) vp_devprintk(notice, v, fmt, ##__VA_ARGS__)
+#define vp_info(v, fmt, ...) vp_devprintk(info, v, fmt, ##__VA_ARGS__)
+#define vp_dbg(v, fmt, ...) vp_devprintk(dbg, v, fmt, ##__VA_ARGS__)
+
+struct mshv_mem_region {
+ struct hlist_node hnode;
+ u64 nr_pages;
+ u64 start_gfn;
+ u64 start_uaddr;
+ u32 hv_map_flags;
+ struct {
+ u64 large_pages: 1; /* 2MiB */
+ u64 range_pinned: 1;
+ u64 reserved: 62;
+ } flags;
+ struct mshv_partition *partition;
+ struct page *pages[];
+};
+
+struct mshv_irq_ack_notifier {
+ struct hlist_node link;
+ unsigned int irq_ack_gsi;
+ void (*irq_acked)(struct mshv_irq_ack_notifier *mian);
+};
+
+struct mshv_partition {
+ struct device *pt_module_dev;
+
+ struct hlist_node pt_hnode;
+ u64 pt_id;
+ refcount_t pt_ref_count;
+ struct mutex pt_mutex;
+ struct hlist_head pt_mem_regions; // not ordered
+
+ u32 pt_vp_count;
+ struct mshv_vp *pt_vp_array[MSHV_MAX_VPS];
+
+ struct mutex pt_irq_lock;
+ struct srcu_struct pt_irq_srcu;
+ struct hlist_head irq_ack_notifier_list;
+
+ struct hlist_head pt_devices;
+
+ /*
+ * MSHV does not support more than one async hypercall in flight
+ * for a single partition. Thus, it is okay to define per partition
+ * async hypercall status.
+ */
+ struct completion async_hypercall;
+ u64 async_hypercall_status;
+
+ spinlock_t pt_irqfds_lock;
+ struct hlist_head pt_irqfds_list;
+ struct mutex irqfds_resampler_lock;
+ struct hlist_head irqfds_resampler_list;
+
+ struct hlist_head ioeventfds_list;
+
+ struct mshv_girq_routing_table __rcu *pt_girq_tbl;
+ u64 isolation_type;
+ bool import_completed;
+ bool pt_initialized;
+};
+
+#define pt_fmt(fmt) "p%llu: " fmt
+#define pt_devprintk(level, p, fmt, ...) \
+do { \
+ const struct mshv_partition *__pt = (p); \
+ dev_##level(__pt->pt_module_dev, pt_fmt(fmt), __pt->pt_id, \
+ ##__VA_ARGS__); \
+} while (0)
+#define pt_emerg(p, fmt, ...) pt_devprintk(emerg, p, fmt, ##__VA_ARGS__)
+#define pt_crit(p, fmt, ...) pt_devprintk(crit, p, fmt, ##__VA_ARGS__)
+#define pt_alert(p, fmt, ...) pt_devprintk(alert, p, fmt, ##__VA_ARGS__)
+#define pt_err(p, fmt, ...) pt_devprintk(err, p, fmt, ##__VA_ARGS__)
+#define pt_warn(p, fmt, ...) pt_devprintk(warn, p, fmt, ##__VA_ARGS__)
+#define pt_notice(p, fmt, ...) pt_devprintk(notice, p, fmt, ##__VA_ARGS__)
+#define pt_info(p, fmt, ...) pt_devprintk(info, p, fmt, ##__VA_ARGS__)
+#define pt_dbg(p, fmt, ...) pt_devprintk(dbg, p, fmt, ##__VA_ARGS__)
+
+struct mshv_lapic_irq {
+ u32 lapic_vector;
+ u64 lapic_apic_id;
+ union hv_interrupt_control lapic_control;
+};
+
+#define MSHV_MAX_GUEST_IRQS 4096
+
+/* representation of one guest irq entry, either msi or legacy */
+struct mshv_guest_irq_ent {
+ u32 girq_entry_valid; /* vfio looks at this */
+ u32 guest_irq_num; /* a unique number for each irq */
+ u32 girq_addr_lo; /* guest irq msi address info */
+ u32 girq_addr_hi;
+ u32 girq_irq_data; /* idt vector in some cases */
+};
+
+struct mshv_girq_routing_table {
+ u32 num_rt_entries;
+ struct mshv_guest_irq_ent mshv_girq_info_tbl[];
+};
+
+struct hv_synic_pages {
+ struct hv_message_page *synic_message_page;
+ struct hv_synic_event_flags_page *synic_event_flags_page;
+ struct hv_synic_event_ring_page *synic_event_ring_page;
+};
+
+struct mshv_root {
+ struct hv_synic_pages __percpu *synic_pages;
+ spinlock_t pt_ht_lock;
+ DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS);
+};
+
+/*
+ * Callback for doorbell events.
+ * NOTE: This is called in interrupt context. Callback
+ * should defer slow and sleeping logic to later.
+ */
+typedef void (*doorbell_cb_t) (int doorbell_id, void *);
+
+/*
+ * port table information
+ */
+struct port_table_info {
+ struct rcu_head portbl_rcu;
+ enum hv_port_type hv_port_type;
+ union {
+ struct {
+ u64 reserved[2];
+ } hv_port_message;
+ struct {
+ u64 reserved[2];
+ } hv_port_event;
+ struct {
+ u64 reserved[2];
+ } hv_port_monitor;
+ struct {
+ doorbell_cb_t doorbell_cb;
+ void *data;
+ } hv_port_doorbell;
+ };
+};
+
+int mshv_update_routing_table(struct mshv_partition *partition,
+ const struct mshv_user_irq_entry *entries,
+ unsigned int numents);
+void mshv_free_routing_table(struct mshv_partition *partition);
+
+struct mshv_guest_irq_ent mshv_ret_girq_entry(struct mshv_partition *partition,
+ u32 irq_num);
+
+void mshv_copy_girq_info(struct mshv_guest_irq_ent *src_irq,
+ struct mshv_lapic_irq *dest_irq);
+
+void mshv_irqfd_routing_update(struct mshv_partition *partition);
+
+void mshv_port_table_fini(void);
+int mshv_portid_alloc(struct port_table_info *info);
+int mshv_portid_lookup(int port_id, struct port_table_info *info);
+void mshv_portid_free(int port_id);
+
+int mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb,
+ void *data, u64 gpa, u64 val, u64 flags);
+void mshv_unregister_doorbell(u64 partition_id, int doorbell_portid);
+
+void mshv_isr(void);
+int mshv_synic_init(unsigned int cpu);
+int mshv_synic_cleanup(unsigned int cpu);
+
+static inline bool mshv_partition_encrypted(struct mshv_partition *partition)
+{
+ return partition->isolation_type == HV_PARTITION_ISOLATION_TYPE_SNP;
+}
+
+struct mshv_partition *mshv_partition_get(struct mshv_partition *partition);
+void mshv_partition_put(struct mshv_partition *partition);
+struct mshv_partition *mshv_partition_find(u64 partition_id) __must_hold(RCU);
+
+/* hypercalls */
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id);
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id);
+int hv_call_initialize_partition(u64 partition_id);
+int hv_call_finalize_partition(u64 partition_id);
+int hv_call_delete_partition(u64 partition_id);
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs);
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages);
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags);
+int hv_call_delete_vp(u64 partition_id, u32 vp_index);
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control);
+int hv_call_clear_virtual_interrupt(u64 partition_id);
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states);
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output);
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes);
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page);
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl);
+int hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id, struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node);
+int hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id);
+int hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node);
+int hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id);
+int hv_call_notify_port_ring_empty(u32 sint_index);
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr);
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity);
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire);
+
+extern struct mshv_root mshv_root;
+extern enum hv_scheduler_type hv_scheduler_type;
+extern u8 * __percpu *hv_synic_eventring_tail;
+
+#endif /* _MSHV_ROOT_H_ */
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
new file mode 100644
index 000000000000..a222a16107f6
--- /dev/null
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Hypercall helper functions used by the mshv_root module.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_root.h"
+
+/* Determined empirically */
+#define HV_INIT_PARTITION_DEPOSIT_PAGES 208
+#define HV_MAP_GPA_DEPOSIT_PAGES 256
+#define HV_UMAP_GPA_PAGES 512
+
+#define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
+
+#define HV_WITHDRAW_BATCH_SIZE (HV_HYP_PAGE_SIZE / sizeof(u64))
+#define HV_MAP_GPA_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_map_gpa_pages)) \
+ / sizeof(u64))
+#define HV_GET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_vp_state)) \
+ / sizeof(u64))
+#define HV_SET_VP_STATE_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_input_set_vp_state)) \
+ / sizeof(u64))
+#define HV_GET_GPA_ACCESS_STATES_BATCH_SIZE \
+ ((HV_HYP_PAGE_SIZE - sizeof(union hv_gpa_page_access_state)) \
+ / sizeof(union hv_gpa_page_access_state))
+#define HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT \
+ ((HV_HYP_PAGE_SIZE - \
+ sizeof(struct hv_input_modify_sparse_spa_page_host_access)) / \
+ sizeof(u64))
+
+int hv_call_withdraw_memory(u64 count, int node, u64 partition_id)
+{
+ struct hv_input_withdraw_memory *input_page;
+ struct hv_output_withdraw_memory *output_page;
+ struct page *page;
+ u16 completed;
+ unsigned long remaining = count;
+ u64 status;
+ int i;
+ unsigned long flags;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ output_page = page_address(page);
+
+ while (remaining) {
+ local_irq_save(flags);
+
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ input_page->partition_id = partition_id;
+ status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY,
+ min(remaining, HV_WITHDRAW_BATCH_SIZE),
+ 0, input_page, output_page);
+
+ local_irq_restore(flags);
+
+ completed = hv_repcomp(status);
+
+ for (i = 0; i < completed; i++)
+ __free_page(pfn_to_page(output_page->gpa_page_list[i]));
+
+ if (!hv_result_success(status)) {
+ if (hv_result(status) == HV_STATUS_NO_RESOURCES)
+ status = HV_STATUS_SUCCESS;
+ break;
+ }
+
+ remaining -= completed;
+ }
+ free_page((unsigned long)output_page);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_create_partition(u64 flags,
+ struct hv_partition_creation_properties creation_properties,
+ union hv_partition_isolation_properties isolation_properties,
+ u64 *partition_id)
+{
+ struct hv_input_create_partition *input;
+ struct hv_output_create_partition *output;
+ u64 status;
+ int ret;
+ unsigned long irq_flags;
+
+ do {
+ local_irq_save(irq_flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->flags = flags;
+ input->compatibility_version = HV_COMPATIBILITY_21_H2;
+
+ memcpy(&input->partition_creation_properties, &creation_properties,
+ sizeof(creation_properties));
+
+ memcpy(&input->isolation_properties, &isolation_properties,
+ sizeof(isolation_properties));
+
+ status = hv_do_hypercall(HVCALL_CREATE_PARTITION,
+ input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *partition_id = output->partition_id;
+ local_irq_restore(irq_flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(irq_flags);
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_initialize_partition(u64 partition_id)
+{
+ struct hv_input_initialize_partition input;
+ u64 status;
+ int ret;
+
+ input.partition_id = partition_id;
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_INIT_PARTITION_DEPOSIT_PAGES);
+ if (ret)
+ return ret;
+
+ do {
+ status = hv_do_fast_hypercall8(HVCALL_INITIALIZE_PARTITION,
+ *(u64 *)&input);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_finalize_partition(u64 partition_id)
+{
+ struct hv_input_finalize_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION,
+ *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_partition(u64 partition_id)
+{
+ struct hv_input_delete_partition input;
+ u64 status;
+
+ input.partition_id = partition_id;
+ status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input);
+
+ return hv_result_to_errno(status);
+}
+
+/* Ask the hypervisor to map guest ram pages or the guest mmio space */
+static int hv_do_map_gpa_hcall(u64 partition_id, u64 gfn, u64 page_struct_count,
+ u32 flags, struct page **pages, u64 mmio_spa)
+{
+ struct hv_input_map_gpa_pages *input_page;
+ u64 status, *pfnlist;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+ u64 page_count = page_struct_count;
+
+ if (page_count == 0 || (pages && mmio_spa))
+ return -EINVAL;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE) {
+ if (mmio_spa)
+ return -EINVAL;
+
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain, HV_MAP_GPA_BATCH_SIZE);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->map_flags = flags;
+ pfnlist = input_page->source_gpa_page_list;
+
+ for (i = 0; i < rep_count; i++)
+ if (flags & HV_MAP_GPA_NO_ACCESS) {
+ pfnlist[i] = 0;
+ } else if (pages) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count) {
+ ret = -EINVAL;
+ break;
+ }
+ pfnlist[i] = page_to_pfn(pages[index]);
+ } else {
+ pfnlist[i] = mmio_spa + done + i;
+ }
+ if (ret)
+ break;
+
+ status = hv_do_rep_hypercall(HVCALL_MAP_GPA_PAGES, rep_count, 0,
+ input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id,
+ HV_MAP_GPA_DEPOSIT_PAGES);
+ if (ret)
+ break;
+
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ if (ret && done) {
+ u32 unmap_flags = 0;
+
+ if (flags & HV_MAP_GPA_LARGE_PAGE)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+ hv_call_unmap_gpa_pages(partition_id, gfn, done, unmap_flags);
+ }
+
+ return ret;
+}
+
+/* Ask the hypervisor to map guest ram pages */
+int hv_call_map_gpa_pages(u64 partition_id, u64 gpa_target, u64 page_count,
+ u32 flags, struct page **pages)
+{
+ return hv_do_map_gpa_hcall(partition_id, gpa_target, page_count,
+ flags, pages, 0);
+}
+
+/* Ask the hypervisor to map guest mmio space */
+int hv_call_map_mmio_pages(u64 partition_id, u64 gfn, u64 mmio_spa, u64 numpgs)
+{
+ int i;
+ u32 flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE |
+ HV_MAP_GPA_NOT_CACHED;
+
+ for (i = 0; i < numpgs; i++)
+ if (page_is_ram(mmio_spa + i))
+ return -EINVAL;
+
+ return hv_do_map_gpa_hcall(partition_id, gfn, numpgs, flags, NULL,
+ mmio_spa);
+}
+
+int hv_call_unmap_gpa_pages(u64 partition_id, u64 gfn, u64 page_count_4k,
+ u32 flags)
+{
+ struct hv_input_unmap_gpa_pages *input_page;
+ u64 status, page_count = page_count_4k;
+ unsigned long irq_flags, large_shift = 0;
+ int ret = 0, done = 0;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_UNMAP_GPA_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong completed, remain = page_count - done;
+ int rep_count = min(remain, HV_UMAP_GPA_PAGES);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ input_page->target_partition_id = partition_id;
+ input_page->target_gpa_base = gfn + (done << large_shift);
+ input_page->unmap_flags = flags;
+ status = hv_do_rep_hypercall(HVCALL_UNMAP_GPA_PAGES, rep_count,
+ 0, input_page, NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+ if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ done += completed;
+ }
+
+ return ret;
+}
+
+int hv_call_get_gpa_access_states(u64 partition_id, u32 count, u64 gpa_base_pfn,
+ union hv_gpa_page_access_state_flags state_flags,
+ int *written_total,
+ union hv_gpa_page_access_state *states)
+{
+ struct hv_input_get_gpa_pages_access_state *input_page;
+ union hv_gpa_page_access_state *output_page;
+ int completed = 0;
+ unsigned long remaining = count;
+ int rep_count, i;
+ u64 status = 0;
+ unsigned long flags;
+
+ *written_total = 0;
+ while (remaining) {
+ local_irq_save(flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input_page->partition_id = partition_id;
+ input_page->hv_gpa_page_number = gpa_base_pfn + *written_total;
+ input_page->flags = state_flags;
+ rep_count = min(remaining, HV_GET_GPA_ACCESS_STATES_BATCH_SIZE);
+
+ status = hv_do_rep_hypercall(HVCALL_GET_GPA_PAGES_ACCESS_STATES, rep_count,
+ 0, input_page, output_page);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ break;
+ }
+ completed = hv_repcomp(status);
+ for (i = 0; i < completed; ++i)
+ states[i].as_uint8 = output_page[i].as_uint8;
+
+ local_irq_restore(flags);
+ states += completed;
+ *written_total += completed;
+ remaining -= completed;
+ }
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
+ u64 dest_addr,
+ union hv_interrupt_control control)
+{
+ struct hv_input_assert_virtual_interrupt *input;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->vector = vector;
+ input->dest_addr = dest_addr;
+ input->control = control;
+ status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_delete_vp(u64 partition_id, u32 vp_index)
+{
+ union hv_input_delete_vp input = {};
+ u64 status;
+
+ input.partition_id = partition_id;
+ input.vp_index = vp_index;
+
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_VP,
+ input.as_uint64[0], input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+EXPORT_SYMBOL_GPL(hv_call_delete_vp);
+
+int hv_call_get_vp_state(u32 vp_index, u64 partition_id,
+ struct hv_vp_state_data state_data,
+ /* Choose between pages and ret_output */
+ u64 page_count, struct page **pages,
+ union hv_output_get_vp_state *ret_output)
+{
+ struct hv_input_get_vp_state *input;
+ union hv_output_get_vp_state *output;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+
+ if (page_count > HV_GET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+
+ if (!page_count && !ret_output)
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ for (i = 0; i < page_count; i++)
+ input->output_data_pfns[i] = page_to_pfn(pages[i]);
+
+ control = (HVCALL_GET_VP_STATE) |
+ (page_count << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status) && ret_output)
+ memcpy(ret_output, output, sizeof(*output));
+
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
+ /* Choose between pages and bytes */
+ struct hv_vp_state_data state_data, u64 page_count,
+ struct page **pages, u32 num_bytes, u8 *bytes)
+{
+ struct hv_input_set_vp_state *input;
+ u64 status;
+ int i;
+ u64 control;
+ unsigned long flags;
+ int ret = 0;
+ u16 varhead_sz;
+
+ if (page_count > HV_SET_VP_STATE_BATCH_SIZE)
+ return -EINVAL;
+ if (sizeof(*input) + num_bytes > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (num_bytes)
+ /* round up to 8 and divide by 8 */
+ varhead_sz = (num_bytes + 7) >> 3;
+ else if (page_count)
+ varhead_sz = page_count;
+ else
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->state_data = state_data;
+ if (num_bytes) {
+ memcpy((u8 *)input->data, bytes, num_bytes);
+ } else {
+ for (i = 0; i < page_count; i++)
+ input->data[i].pfns = page_to_pfn(pages[i]);
+ }
+
+ control = (HVCALL_SET_VP_STATE) |
+ (varhead_sz << HV_HYPERCALL_VARHEAD_OFFSET);
+
+ status = hv_do_hypercall(control, input, NULL);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page)
+{
+ struct hv_input_map_vp_state_page *input;
+ struct hv_output_map_vp_state_page *output;
+ u64 status;
+ int ret;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output);
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (hv_result_success(status))
+ *state_page = pfn_to_page(output->map_location);
+ local_irq_restore(flags);
+ ret = hv_result_to_errno(status);
+ break;
+ }
+
+ local_irq_restore(flags);
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl)
+{
+ unsigned long flags;
+ u64 status;
+ struct hv_input_unmap_vp_state_page *input;
+
+ local_irq_save(flags);
+
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+
+ input->partition_id = partition_id;
+ input->vp_index = vp_index;
+ input->type = type;
+ input->input_vtl = input_vtl;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_VP_STATE_PAGE, input, NULL);
+
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_clear_virtual_interrupt(u64 partition_id)
+{
+ int status;
+
+ status = hv_do_fast_hypercall8(HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ partition_id);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ struct hv_port_info *port_info,
+ u8 port_vtl, u8 min_connection_vtl, int node)
+{
+ struct hv_input_create_port *input;
+ unsigned long flags;
+ int ret = 0;
+ int status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->port_info = *port_info;
+ input->port_vtl = port_vtl;
+ input->min_connection_vtl = min_connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CREATE_PORT, input, NULL);
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, port_partition_id, 1);
+
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_delete_port(u64 port_partition_id, union hv_port_id port_id)
+{
+ union hv_input_delete_port input = { 0 };
+ int status;
+
+ input.port_partition_id = port_partition_id;
+ input.port_id = port_id;
+ status = hv_do_fast_hypercall16(HVCALL_DELETE_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
+ u64 connection_partition_id,
+ union hv_connection_id connection_id,
+ struct hv_connection_info *connection_info,
+ u8 connection_vtl, int node)
+{
+ struct hv_input_connect_port *input;
+ unsigned long flags;
+ int ret = 0, status;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(input, 0, sizeof(*input));
+ input->port_partition_id = port_partition_id;
+ input->port_id = port_id;
+ input->connection_partition_id = connection_partition_id;
+ input->connection_id = connection_id;
+ input->connection_info = *connection_info;
+ input->connection_vtl = connection_vtl;
+ input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
+ status = hv_do_hypercall(HVCALL_CONNECT_PORT, input, NULL);
+
+ local_irq_restore(flags);
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ break;
+ }
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ connection_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+int
+hv_call_disconnect_port(u64 connection_partition_id,
+ union hv_connection_id connection_id)
+{
+ union hv_input_disconnect_port input = { 0 };
+ int status;
+
+ input.connection_partition_id = connection_partition_id;
+ input.connection_id = connection_id;
+ input.is_doorbell = 1;
+ status = hv_do_fast_hypercall16(HVCALL_DISCONNECT_PORT,
+ input.as_uint64[0],
+ input.as_uint64[1]);
+
+ return hv_result_to_errno(status);
+}
+
+int
+hv_call_notify_port_ring_empty(u32 sint_index)
+{
+ union hv_input_notify_port_ring_empty input = { 0 };
+ int status;
+
+ input.sint_index = sint_index;
+ status = hv_do_fast_hypercall8(HVCALL_NOTIFY_PORT_RING_EMPTY,
+ input.as_uint64);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_map_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr)
+{
+ unsigned long flags;
+ struct hv_input_map_stats_page *input;
+ struct hv_output_map_stats_page *output;
+ u64 status, pfn;
+ int ret = 0;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE, input, output);
+ pfn = output->map_location;
+
+ local_irq_restore(flags);
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_result_to_errno(status);
+ if (hv_result_success(status))
+ break;
+ return ret;
+ }
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ if (ret)
+ return ret;
+ } while (!ret);
+
+ *addr = page_address(pfn_to_page(pfn));
+
+ return ret;
+}
+
+int hv_call_unmap_stat_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity)
+{
+ unsigned long flags;
+ struct hv_input_unmap_stats_page *input;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+
+ status = hv_do_hypercall(HVCALL_UNMAP_STATS_PAGE, input, NULL);
+ local_irq_restore(flags);
+
+ return hv_result_to_errno(status);
+}
+
+int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
+ u64 page_struct_count, u32 host_access,
+ u32 flags, u8 acquire)
+{
+ struct hv_input_modify_sparse_spa_page_host_access *input_page;
+ u64 status;
+ int done = 0;
+ unsigned long irq_flags, large_shift = 0;
+ u64 page_count = page_struct_count;
+ u16 code = acquire ? HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS :
+ HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS;
+
+ if (page_count == 0)
+ return -EINVAL;
+
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE) {
+ if (!HV_PAGE_COUNT_2M_ALIGNED(page_count))
+ return -EINVAL;
+ large_shift = HV_HYP_LARGE_PAGE_SHIFT - HV_HYP_PAGE_SHIFT;
+ page_count >>= large_shift;
+ }
+
+ while (done < page_count) {
+ ulong i, completed, remain = page_count - done;
+ int rep_count = min(remain,
+ HV_MODIFY_SPARSE_SPA_PAGE_HOST_ACCESS_MAX_PAGE_COUNT);
+
+ local_irq_save(irq_flags);
+ input_page = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input_page, 0, sizeof(*input_page));
+ /* Only set the partition id if you are making the pages
+ * exclusive
+ */
+ if (flags & HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE)
+ input_page->partition_id = partition_id;
+ input_page->flags = flags;
+ input_page->host_access = host_access;
+
+ for (i = 0; i < rep_count; i++) {
+ u64 index = (done + i) << large_shift;
+
+ if (index >= page_struct_count)
+ return -EINVAL;
+
+ input_page->spa_page_list[i] =
+ page_to_pfn(pages[index]);
+ }
+
+ status = hv_do_rep_hypercall(code, rep_count, 0, input_page,
+ NULL);
+ local_irq_restore(irq_flags);
+
+ completed = hv_repcomp(status);
+
+ if (!hv_result_success(status))
+ return hv_result_to_errno(status);
+
+ done += completed;
+ }
+
+ return 0;
+}
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
new file mode 100644
index 000000000000..72df774e410a
--- /dev/null
+++ b/drivers/hv/mshv_root_main.c
@@ -0,0 +1,2307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, Microsoft Corporation.
+ *
+ * The main part of the mshv_root module, providing APIs to create
+ * and manage guest partitions.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/cpuhotplug.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+#include <linux/hyperv.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/page-flags.h>
+#include <linux/crash_dump.h>
+#include <linux/panic_notifier.h>
+#include <linux/vmalloc.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+#include "mshv_root.h"
+
+MODULE_AUTHOR("Microsoft");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
+
+/* TODO move this to mshyperv.h when needed outside driver */
+static inline bool hv_parent_partition(void)
+{
+ return hv_root_partition();
+}
+
+/* TODO move this to another file when debugfs code is added */
+enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
+#if defined(CONFIG_X86)
+ VpRootDispatchThreadBlocked = 201,
+#elif defined(CONFIG_ARM64)
+ VpRootDispatchThreadBlocked = 94,
+#endif
+ VpStatsMaxCounter
+};
+
+struct hv_stats_page {
+ union {
+ u64 vp_cntrs[VpStatsMaxCounter]; /* VP counters */
+ u8 data[HV_HYP_PAGE_SIZE];
+ };
+} __packed;
+
+struct mshv_root mshv_root;
+
+enum hv_scheduler_type hv_scheduler_type;
+
+/* Once we implement the fast extended hypercall ABI they can go away. */
+static void * __percpu *root_scheduler_input;
+static void * __percpu *root_scheduler_output;
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_dev_open(struct inode *inode, struct file *filp);
+static int mshv_dev_release(struct inode *inode, struct file *filp);
+static int mshv_vp_release(struct inode *inode, struct file *filp);
+static long mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_partition_release(struct inode *inode, struct file *filp);
+static long mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma);
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf);
+static int mshv_init_async_handler(struct mshv_partition *partition);
+static void mshv_async_hvcall_handler(void *data, u64 *status);
+
+static const union hv_input_vtl input_vtl_zero;
+static const union hv_input_vtl input_vtl_normal = {
+ .target_vtl = HV_NORMAL_VTL,
+ .use_target_vtl = 1,
+};
+
+static const struct vm_operations_struct mshv_vp_vm_ops = {
+ .fault = mshv_vp_fault,
+};
+
+static const struct file_operations mshv_vp_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_vp_release,
+ .unlocked_ioctl = mshv_vp_ioctl,
+ .llseek = noop_llseek,
+ .mmap = mshv_vp_mmap,
+};
+
+static const struct file_operations mshv_partition_fops = {
+ .owner = THIS_MODULE,
+ .release = mshv_partition_release,
+ .unlocked_ioctl = mshv_partition_ioctl,
+ .llseek = noop_llseek,
+};
+
+static const struct file_operations mshv_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = mshv_dev_open,
+ .release = mshv_dev_release,
+ .unlocked_ioctl = mshv_dev_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice mshv_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mshv",
+ .fops = &mshv_dev_fops,
+ .mode = 0600,
+};
+
+/*
+ * Only allow hypercalls that have a u64 partition id as the first member of
+ * the input structure.
+ * These are sorted by value.
+ */
+static u16 mshv_passthru_hvcalls[] = {
+ HVCALL_GET_PARTITION_PROPERTY,
+ HVCALL_SET_PARTITION_PROPERTY,
+ HVCALL_INSTALL_INTERCEPT,
+ HVCALL_GET_VP_REGISTERS,
+ HVCALL_SET_VP_REGISTERS,
+ HVCALL_TRANSLATE_VIRTUAL_ADDRESS,
+ HVCALL_CLEAR_VIRTUAL_INTERRUPT,
+ HVCALL_REGISTER_INTERCEPT_RESULT,
+ HVCALL_ASSERT_VIRTUAL_INTERRUPT,
+ HVCALL_GET_GPA_PAGES_ACCESS_STATES,
+ HVCALL_SIGNAL_EVENT_DIRECT,
+ HVCALL_POST_MESSAGE_DIRECT,
+ HVCALL_GET_VP_CPUID_VALUES,
+};
+
+static bool mshv_hvcall_is_async(u16 code)
+{
+ switch (code) {
+ case HVCALL_SET_PARTITION_PROPERTY:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
+ bool partition_locked,
+ void __user *user_args)
+{
+ u64 status;
+ int ret = 0, i;
+ bool is_async;
+ struct mshv_root_hvcall args;
+ struct page *page;
+ unsigned int pages_order;
+ void *input_pg = NULL;
+ void *output_pg = NULL;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.status || !args.in_ptr || args.in_sz < sizeof(u64) ||
+ mshv_field_nonzero(args, rsvd) || args.in_sz > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ if (args.out_ptr && (!args.out_sz || args.out_sz > HV_HYP_PAGE_SIZE))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mshv_passthru_hvcalls); ++i)
+ if (args.code == mshv_passthru_hvcalls[i])
+ break;
+
+ if (i >= ARRAY_SIZE(mshv_passthru_hvcalls))
+ return -EINVAL;
+
+ is_async = mshv_hvcall_is_async(args.code);
+ if (is_async) {
+ /* async hypercalls can only be called from partition fd */
+ if (!partition_locked)
+ return -EINVAL;
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ return ret;
+ }
+
+ pages_order = args.out_ptr ? 1 : 0;
+ page = alloc_pages(GFP_KERNEL, pages_order);
+ if (!page)
+ return -ENOMEM;
+ input_pg = page_address(page);
+
+ if (args.out_ptr)
+ output_pg = (char *)input_pg + PAGE_SIZE;
+ else
+ output_pg = NULL;
+
+ if (copy_from_user(input_pg, (void __user *)args.in_ptr,
+ args.in_sz)) {
+ ret = -EFAULT;
+ goto free_pages_out;
+ }
+
+ /*
+ * NOTE: This only works because all the allowed hypercalls' input
+ * structs begin with a u64 partition_id field.
+ */
+ *(u64 *)input_pg = partition->pt_id;
+
+ if (args.reps)
+ status = hv_do_rep_hypercall(args.code, args.reps, 0,
+ input_pg, output_pg);
+ else
+ status = hv_do_hypercall(args.code, input_pg, output_pg);
+
+ if (hv_result(status) == HV_STATUS_CALL_PENDING) {
+ if (is_async) {
+ mshv_async_hvcall_handler(partition, &status);
+ } else { /* Paranoia check. This shouldn't happen! */
+ ret = -EBADFD;
+ goto free_pages_out;
+ }
+ }
+
+ if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
+ ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1);
+ if (!ret)
+ ret = -EAGAIN;
+ } else if (!hv_result_success(status)) {
+ ret = hv_result_to_errno(status);
+ }
+
+ /*
+ * Always return the status and output data regardless of result.
+ * The VMM may need it to determine how to proceed. E.g. the status may
+ * contain the number of reps completed if a rep hypercall partially
+ * succeeded.
+ */
+ args.status = hv_result(status);
+ args.reps = args.reps ? hv_repcomp(status) : 0;
+ if (copy_to_user(user_args, &args, sizeof(args)))
+ ret = -EFAULT;
+
+ if (output_pg &&
+ copy_to_user((void __user *)args.out_ptr, output_pg, args.out_sz))
+ ret = -EFAULT;
+
+free_pages_out:
+ free_pages((unsigned long)input_pg, pages_order);
+
+ return ret;
+}
+
+static inline bool is_ghcb_mapping_available(void)
+{
+#if IS_ENABLED(CONFIG_X86_64)
+ return ms_hyperv.ext_features & HV_VP_GHCB_ROOT_MAPPING_AVAILABLE;
+#else
+ return 0;
+#endif
+}
+
+static int mshv_get_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_get_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+static int mshv_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
+ struct hv_register_assoc *registers)
+{
+ return hv_call_set_vp_registers(vp_index, partition_id,
+ count, input_vtl_zero, registers);
+}
+
+/*
+ * Explicit guest vCPU suspend is asynchronous by nature (as it is requested by
+ * dom0 vCPU for guest vCPU) and thus it can race with "intercept" suspend,
+ * done by the hypervisor.
+ * "Intercept" suspend leads to asynchronous message delivery to dom0 which
+ * should be awaited to keep the VP loop consistent (i.e. no message pending
+ * upon VP resume).
+ * VP intercept suspend can't be done when the VP is explicitly suspended
+ * already, and thus can be only two possible race scenarios:
+ * 1. implicit suspend bit set -> explicit suspend bit set -> message sent
+ * 2. implicit suspend bit set -> message sent -> explicit suspend bit set
+ * Checking for implicit suspend bit set after explicit suspend request has
+ * succeeded in either case allows us to reliably identify, if there is a
+ * message to receive and deliver to VMM.
+ */
+static int
+mshv_suspend_vp(const struct mshv_vp *vp, bool *message_in_flight)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND
+ };
+ struct hv_register_assoc intercept_suspend = {
+ .name = HV_REGISTER_INTERCEPT_SUSPEND
+ };
+ union hv_explicit_suspend_register *es =
+ &explicit_suspend.value.explicit_suspend;
+ union hv_intercept_suspend_register *is =
+ &intercept_suspend.value.intercept_suspend;
+ int ret;
+
+ es->suspended = 1;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to explicitly suspend vCPU\n");
+ return ret;
+ }
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &intercept_suspend);
+ if (ret) {
+ vp_err(vp, "Failed to get intercept suspend state\n");
+ return ret;
+ }
+
+ *message_in_flight = is->suspended;
+
+ return 0;
+}
+
+/*
+ * This function is used when VPs are scheduled by the hypervisor's
+ * scheduler.
+ *
+ * Caller has to make sure the registers contain cleared
+ * HV_REGISTER_INTERCEPT_SUSPEND and HV_REGISTER_EXPLICIT_SUSPEND registers
+ * exactly in this order (the hypervisor clears them sequentially) to avoid
+ * potential invalid clearing a newly arrived HV_REGISTER_INTERCEPT_SUSPEND
+ * after VP is released from HV_REGISTER_EXPLICIT_SUSPEND in case of the
+ * opposite order.
+ */
+static long mshv_run_vp_with_hyp_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+ struct hv_register_assoc suspend_regs[2] = {
+ { .name = HV_REGISTER_INTERCEPT_SUSPEND },
+ { .name = HV_REGISTER_EXPLICIT_SUSPEND }
+ };
+ size_t count = ARRAY_SIZE(suspend_regs);
+
+ /* Resume VP execution */
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ count, suspend_regs);
+ if (ret) {
+ vp_err(vp, "Failed to resume vp execution. %lx\n", ret);
+ return ret;
+ }
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1);
+ if (ret) {
+ bool message_in_flight;
+
+ /*
+ * Otherwise the waiting was interrupted by a signal: suspend
+ * the vCPU explicitly and copy message in flight (if any).
+ */
+ ret = mshv_suspend_vp(vp, &message_in_flight);
+ if (ret)
+ return ret;
+
+ /* Return if no message in flight */
+ if (!message_in_flight)
+ return -EINTR;
+
+ /* Wait for the message in flight. */
+ wait_event(vp->run.vp_suspend_queue, vp->run.kicked_by_hv == 1);
+ }
+
+ /*
+ * Reset the flag to make the wait_event call above work
+ * next time.
+ */
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int
+mshv_vp_dispatch(struct mshv_vp *vp, u32 flags,
+ struct hv_output_dispatch_vp *res)
+{
+ struct hv_input_dispatch_vp *input;
+ struct hv_output_dispatch_vp *output;
+ u64 status;
+
+ preempt_disable();
+ input = *this_cpu_ptr(root_scheduler_input);
+ output = *this_cpu_ptr(root_scheduler_output);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+
+ input->partition_id = vp->vp_partition->pt_id;
+ input->vp_index = vp->vp_index;
+ input->time_slice = 0; /* Run forever until something happens */
+ input->spec_ctrl = 0; /* TODO: set sensible flags */
+ input->flags = flags;
+
+ vp->run.flags.root_sched_dispatched = 1;
+ status = hv_do_hypercall(HVCALL_DISPATCH_VP, input, output);
+ vp->run.flags.root_sched_dispatched = 0;
+
+ *res = *output;
+ preempt_enable();
+
+ if (!hv_result_success(status))
+ vp_err(vp, "%s: status %s\n", __func__,
+ hv_result_to_string(status));
+
+ return hv_result_to_errno(status);
+}
+
+static int
+mshv_vp_clear_explicit_suspend(struct mshv_vp *vp)
+{
+ struct hv_register_assoc explicit_suspend = {
+ .name = HV_REGISTER_EXPLICIT_SUSPEND,
+ .value.explicit_suspend.suspended = 0,
+ };
+ int ret;
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &explicit_suspend);
+
+ if (ret)
+ vp_err(vp, "Failed to unsuspend\n");
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_X86_64)
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ if (!vp->vp_register_page)
+ return 0;
+ return vp->vp_register_page->interrupt_vectors.as_uint64;
+}
+#else
+static u64 mshv_vp_interrupt_pending(struct mshv_vp *vp)
+{
+ return 0;
+}
+#endif
+
+static bool mshv_vp_dispatch_thread_blocked(struct mshv_vp *vp)
+{
+ struct hv_stats_page **stats = vp->vp_stats_pages;
+ u64 *self_vp_cntrs = stats[HV_STATS_AREA_SELF]->vp_cntrs;
+ u64 *parent_vp_cntrs = stats[HV_STATS_AREA_PARENT]->vp_cntrs;
+
+ if (self_vp_cntrs[VpRootDispatchThreadBlocked])
+ return self_vp_cntrs[VpRootDispatchThreadBlocked];
+ return parent_vp_cntrs[VpRootDispatchThreadBlocked];
+}
+
+static int
+mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
+{
+ int ret;
+
+ ret = wait_event_interruptible(vp->run.vp_suspend_queue,
+ (vp->run.kicked_by_hv == 1 &&
+ !mshv_vp_dispatch_thread_blocked(vp)) ||
+ mshv_vp_interrupt_pending(vp));
+ if (ret)
+ return -EINTR;
+
+ vp->run.flags.root_sched_blocked = 0;
+ vp->run.kicked_by_hv = 0;
+
+ return 0;
+}
+
+static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
+{
+ const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
+ _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
+ ulong th_flags;
+
+ th_flags = read_thread_flags();
+ while (th_flags & work_flags) {
+ int ret;
+
+ /* nb: following will call schedule */
+ ret = mshv_do_pre_guest_mode_work(th_flags);
+
+ if (ret)
+ return ret;
+
+ th_flags = read_thread_flags();
+ }
+
+ return 0;
+}
+
+/* Must be called with interrupts enabled */
+static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
+{
+ long ret;
+
+ if (vp->run.flags.root_sched_blocked) {
+ /*
+ * Dispatch state of this VP is blocked. Need to wait
+ * for the hypervisor to clear the blocked state before
+ * dispatching it.
+ */
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ return ret;
+ }
+
+ do {
+ u32 flags = 0;
+ struct hv_output_dispatch_vp output;
+
+ ret = mshv_pre_guest_mode_work(vp);
+ if (ret)
+ break;
+
+ if (vp->run.flags.intercept_suspend)
+ flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
+
+ if (mshv_vp_interrupt_pending(vp))
+ flags |= HV_DISPATCH_VP_FLAG_SCAN_INTERRUPT_INJECTION;
+
+ ret = mshv_vp_dispatch(vp, flags, &output);
+ if (ret)
+ break;
+
+ vp->run.flags.intercept_suspend = 0;
+
+ if (output.dispatch_state == HV_VP_DISPATCH_STATE_BLOCKED) {
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_SUSPEND) {
+ /*
+ * TODO: remove the warning once VP canceling
+ * is supported
+ */
+ WARN_ONCE(atomic64_read(&vp->run.vp_signaled_count),
+ "%s: vp#%d: unexpected explicit suspend\n",
+ __func__, vp->vp_index);
+ /*
+ * Need to clear explicit suspend before
+ * dispatching.
+ * Explicit suspend is either:
+ * - set right after the first VP dispatch or
+ * - set explicitly via hypercall
+ * Since the latter case is not yet supported,
+ * simply clear it here.
+ */
+ ret = mshv_vp_clear_explicit_suspend(vp);
+ if (ret)
+ break;
+
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ } else {
+ vp->run.flags.root_sched_blocked = 1;
+ ret = mshv_vp_wait_for_hv_kick(vp);
+ if (ret)
+ break;
+ }
+ } else {
+ /* HV_VP_DISPATCH_STATE_READY */
+ if (output.dispatch_event ==
+ HV_VP_DISPATCH_EVENT_INTERCEPT)
+ vp->run.flags.intercept_suspend = 1;
+ }
+ } while (!vp->run.flags.intercept_suspend);
+
+ return ret;
+}
+
+static_assert(sizeof(struct hv_message) <= MSHV_RUN_VP_BUF_SZ,
+ "sizeof(struct hv_message) must not exceed MSHV_RUN_VP_BUF_SZ");
+
+static long mshv_vp_ioctl_run_vp(struct mshv_vp *vp, void __user *ret_msg)
+{
+ long rc;
+
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ rc = mshv_run_vp_with_root_scheduler(vp);
+ else
+ rc = mshv_run_vp_with_hyp_scheduler(vp);
+
+ if (rc)
+ return rc;
+
+ if (copy_to_user(ret_msg, vp->vp_intercept_msg_page,
+ sizeof(struct hv_message)))
+ rc = -EFAULT;
+
+ return rc;
+}
+
+static int
+mshv_vp_ioctl_get_set_state_pfn(struct mshv_vp *vp,
+ struct hv_vp_state_data state_data,
+ unsigned long user_pfn, size_t page_count,
+ bool is_set)
+{
+ int completed, ret = 0;
+ unsigned long check;
+ struct page **pages;
+
+ if (page_count > INT_MAX)
+ return -EINVAL;
+ /*
+ * Check the arithmetic for wraparound/overflow.
+ * The last page address in the buffer is:
+ * (user_pfn + (page_count - 1)) * PAGE_SIZE
+ */
+ if (check_add_overflow(user_pfn, (page_count - 1), &check))
+ return -EOVERFLOW;
+ if (check_mul_overflow(check, PAGE_SIZE, &check))
+ return -EOVERFLOW;
+
+ /* Pin user pages so hypervisor can copy directly to them */
+ pages = kcalloc(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (completed = 0; completed < page_count; completed += ret) {
+ unsigned long user_addr = (user_pfn + completed) * PAGE_SIZE;
+ int remaining = page_count - completed;
+
+ ret = pin_user_pages_fast(user_addr, remaining, FOLL_WRITE,
+ &pages[completed]);
+ if (ret < 0) {
+ vp_err(vp, "%s: Failed to pin user pages error %i\n",
+ __func__, ret);
+ goto unpin_pages;
+ }
+ }
+
+ if (is_set)
+ ret = hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ 0, NULL);
+ else
+ ret = hv_call_get_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, page_count, pages,
+ NULL);
+
+unpin_pages:
+ unpin_user_pages(pages, completed);
+ kfree(pages);
+ return ret;
+}
+
+static long
+mshv_vp_ioctl_get_set_state(struct mshv_vp *vp,
+ struct mshv_get_set_vp_state __user *user_args,
+ bool is_set)
+{
+ struct mshv_get_set_vp_state args;
+ long ret = 0;
+ union hv_output_get_vp_state vp_state;
+ u32 data_sz;
+ struct hv_vp_state_data state_data = {};
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.type >= MSHV_VP_STATE_COUNT || mshv_field_nonzero(args, rsvd) ||
+ !args.buf_sz || !PAGE_ALIGNED(args.buf_sz) ||
+ !PAGE_ALIGNED(args.buf_ptr))
+ return -EINVAL;
+
+ if (!access_ok((void __user *)args.buf_ptr, args.buf_sz))
+ return -EFAULT;
+
+ switch (args.type) {
+ case MSHV_VP_STATE_LAPIC:
+ state_data.type = HV_GET_SET_VP_STATE_LAPIC_STATE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_XSAVE:
+ {
+ u64 data_sz_64;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_XSAVE_STATES,
+ &state_data.xsave.states.as_uint64);
+ if (ret)
+ return ret;
+
+ ret = hv_call_get_partition_property(vp->vp_partition->pt_id,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
+ &data_sz_64);
+ if (ret)
+ return ret;
+
+ data_sz = (u32)data_sz_64;
+ state_data.xsave.flags = 0;
+ /* Always request legacy states */
+ state_data.xsave.states.legacy_x87 = 1;
+ state_data.xsave.states.legacy_sse = 1;
+ state_data.type = HV_GET_SET_VP_STATE_XSAVE;
+ break;
+ }
+ case MSHV_VP_STATE_SIMP:
+ state_data.type = HV_GET_SET_VP_STATE_SIM_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SIEFP:
+ state_data.type = HV_GET_SET_VP_STATE_SIEF_PAGE;
+ data_sz = HV_HYP_PAGE_SIZE;
+ break;
+ case MSHV_VP_STATE_SYNTHETIC_TIMERS:
+ state_data.type = HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS;
+ data_sz = sizeof(vp_state.synthetic_timers_state);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (copy_to_user(&user_args->buf_sz, &data_sz, sizeof(user_args->buf_sz)))
+ return -EFAULT;
+
+ if (data_sz > args.buf_sz)
+ return -EINVAL;
+
+ /* If the data is transmitted via pfns, delegate to helper */
+ if (state_data.type & HV_GET_SET_VP_STATE_TYPE_PFN) {
+ unsigned long user_pfn = PFN_DOWN(args.buf_ptr);
+ size_t page_count = PFN_DOWN(args.buf_sz);
+
+ return mshv_vp_ioctl_get_set_state_pfn(vp, state_data, user_pfn,
+ page_count, is_set);
+ }
+
+ /* Paranoia check - this shouldn't happen! */
+ if (data_sz > sizeof(vp_state)) {
+ vp_err(vp, "Invalid vp state data size!\n");
+ return -EINVAL;
+ }
+
+ if (is_set) {
+ if (copy_from_user(&vp_state, (__user void *)args.buf_ptr, data_sz))
+ return -EFAULT;
+
+ return hv_call_set_vp_state(vp->vp_index,
+ vp->vp_partition->pt_id,
+ state_data, 0, NULL,
+ sizeof(vp_state), (u8 *)&vp_state);
+ }
+
+ ret = hv_call_get_vp_state(vp->vp_index, vp->vp_partition->pt_id,
+ state_data, 0, NULL, &vp_state);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)args.buf_ptr, &vp_state, data_sz))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long
+mshv_vp_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_vp *vp = filp->private_data;
+ long r = -ENOTTY;
+
+ if (mutex_lock_killable(&vp->vp_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_RUN_VP:
+ r = mshv_vp_ioctl_run_vp(vp, (void __user *)arg);
+ break;
+ case MSHV_GET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, false);
+ break;
+ case MSHV_SET_VP_STATE:
+ r = mshv_vp_ioctl_get_set_state(vp, (void __user *)arg, true);
+ break;
+ case MSHV_ROOT_HVCALL:
+ r = mshv_ioctl_passthru_hvcall(vp->vp_partition, false,
+ (void __user *)arg);
+ break;
+ default:
+ vp_warn(vp, "Invalid ioctl: %#x\n", ioctl);
+ break;
+ }
+ mutex_unlock(&vp->vp_mutex);
+
+ return r;
+}
+
+static vm_fault_t mshv_vp_fault(struct vm_fault *vmf)
+{
+ struct mshv_vp *vp = vmf->vma->vm_file->private_data;
+
+ switch (vmf->vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ vmf->page = virt_to_page(vp->vp_register_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ vmf->page = virt_to_page(vp->vp_intercept_msg_page);
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ vmf->page = virt_to_page(vp->vp_ghcb_page);
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static int mshv_vp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct mshv_vp *vp = file->private_data;
+
+ switch (vma->vm_pgoff) {
+ case MSHV_VP_MMAP_OFFSET_REGISTERS:
+ if (!vp->vp_register_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE:
+ if (!vp->vp_intercept_msg_page)
+ return -ENODEV;
+ break;
+ case MSHV_VP_MMAP_OFFSET_GHCB:
+ if (!vp->vp_ghcb_page)
+ return -ENODEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vma->vm_ops = &mshv_vp_vm_ops;
+ return 0;
+}
+
+static int
+mshv_vp_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_vp *vp = filp->private_data;
+
+ /* Rest of VP cleanup happens in destroy_partition() */
+ mshv_partition_put(vp->vp_partition);
+ return 0;
+}
+
+static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index)
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+}
+
+static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
+ void *stats_pages[])
+{
+ union hv_stats_object_identity identity = {
+ .vp.partition_id = partition_id,
+ .vp.vp_index = vp_index,
+ };
+ int err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_SELF]);
+ if (err)
+ return err;
+
+ identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
+ err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_PARENT]);
+ if (err)
+ goto unmap_self;
+
+ return 0;
+
+unmap_self:
+ identity.vp.stats_area_type = HV_STATS_AREA_SELF;
+ hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ return err;
+}
+
+static long
+mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
+ void __user *arg)
+{
+ struct mshv_create_vp args;
+ struct mshv_vp *vp;
+ struct page *intercept_message_page, *register_page, *ghcb_page;
+ void *stats_pages[2];
+ long ret;
+
+ if (copy_from_user(&args, arg, sizeof(args)))
+ return -EFAULT;
+
+ if (args.vp_index >= MSHV_MAX_VPS)
+ return -EINVAL;
+
+ if (partition->pt_vp_array[args.vp_index])
+ return -EEXIST;
+
+ ret = hv_call_create_vp(NUMA_NO_NODE, partition->pt_id, args.vp_index,
+ 0 /* Only valid for root partition VPs */);
+ if (ret)
+ return ret;
+
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero,
+ &intercept_message_page);
+ if (ret)
+ goto destroy_vp;
+
+ if (!mshv_partition_encrypted(partition)) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero,
+ &register_page);
+ if (ret)
+ goto unmap_intercept_message_page;
+ }
+
+ if (mshv_partition_encrypted(partition) &&
+ is_ghcb_mapping_available()) {
+ ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal,
+ &ghcb_page);
+ if (ret)
+ goto unmap_register_page;
+ }
+
+ if (hv_parent_partition()) {
+ ret = mshv_vp_stats_map(partition->pt_id, args.vp_index,
+ stats_pages);
+ if (ret)
+ goto unmap_ghcb_page;
+ }
+
+ vp = kzalloc(sizeof(*vp), GFP_KERNEL);
+ if (!vp)
+ goto unmap_stats_pages;
+
+ vp->vp_partition = mshv_partition_get(partition);
+ if (!vp->vp_partition) {
+ ret = -EBADF;
+ goto free_vp;
+ }
+
+ mutex_init(&vp->vp_mutex);
+ init_waitqueue_head(&vp->run.vp_suspend_queue);
+ atomic64_set(&vp->run.vp_signaled_count, 0);
+
+ vp->vp_index = args.vp_index;
+ vp->vp_intercept_msg_page = page_to_virt(intercept_message_page);
+ if (!mshv_partition_encrypted(partition))
+ vp->vp_register_page = page_to_virt(register_page);
+
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available())
+ vp->vp_ghcb_page = page_to_virt(ghcb_page);
+
+ if (hv_parent_partition())
+ memcpy(vp->vp_stats_pages, stats_pages, sizeof(stats_pages));
+
+ /*
+ * Keep anon_inode_getfd last: it installs fd in the file struct and
+ * thus makes the state accessible in user space.
+ */
+ ret = anon_inode_getfd("mshv_vp", &mshv_vp_fops, vp,
+ O_RDWR | O_CLOEXEC);
+ if (ret < 0)
+ goto put_partition;
+
+ /* already exclusive with the partition mutex for all ioctls */
+ partition->pt_vp_count++;
+ partition->pt_vp_array[args.vp_index] = vp;
+
+ return ret;
+
+put_partition:
+ mshv_partition_put(partition);
+free_vp:
+ kfree(vp);
+unmap_stats_pages:
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, args.vp_index);
+unmap_ghcb_page:
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ }
+unmap_register_page:
+ if (!mshv_partition_encrypted(partition)) {
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ }
+unmap_intercept_message_page:
+ hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+destroy_vp:
+ hv_call_delete_vp(partition->pt_id, args.vp_index);
+ return ret;
+}
+
+static int mshv_init_async_handler(struct mshv_partition *partition)
+{
+ if (completion_done(&partition->async_hypercall)) {
+ pt_err(partition,
+ "Cannot issue async hypercall while another one in progress!\n");
+ return -EPERM;
+ }
+
+ reinit_completion(&partition->async_hypercall);
+ return 0;
+}
+
+static void mshv_async_hvcall_handler(void *data, u64 *status)
+{
+ struct mshv_partition *partition = data;
+
+ wait_for_completion(&partition->async_hypercall);
+ pt_dbg(partition, "Async hypercall completed!\n");
+
+ *status = partition->async_hypercall_status;
+}
+
+static int
+mshv_partition_region_share(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE,
+ flags, true);
+}
+
+static int
+mshv_partition_region_unshare(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
+
+ if (region->flags.large_pages)
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages, region->nr_pages,
+ 0,
+ flags, false);
+}
+
+static int
+mshv_region_remap_pages(struct mshv_mem_region *region, u32 map_flags,
+ u64 page_offset, u64 page_count)
+{
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ if (region->flags.large_pages)
+ map_flags |= HV_MAP_GPA_LARGE_PAGE;
+
+ /* ask the hypervisor to map guest ram */
+ return hv_call_map_gpa_pages(region->partition->pt_id,
+ region->start_gfn + page_offset,
+ page_count, map_flags,
+ region->pages + page_offset);
+}
+
+static int
+mshv_region_map(struct mshv_mem_region *region)
+{
+ u32 map_flags = region->hv_map_flags;
+
+ return mshv_region_remap_pages(region, map_flags,
+ 0, region->nr_pages);
+}
+
+static void
+mshv_region_evict_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ if (region->flags.range_pinned)
+ unpin_user_pages(region->pages + page_offset, page_count);
+
+ memset(region->pages + page_offset, 0,
+ page_count * sizeof(struct page *));
+}
+
+static void
+mshv_region_evict(struct mshv_mem_region *region)
+{
+ mshv_region_evict_pages(region, 0, region->nr_pages);
+}
+
+static int
+mshv_region_populate_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ u64 done_count, nr_pages;
+ struct page **pages;
+ __u64 userspace_addr;
+ int ret;
+
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ for (done_count = 0; done_count < page_count; done_count += ret) {
+ pages = region->pages + page_offset + done_count;
+ userspace_addr = region->start_uaddr +
+ (page_offset + done_count) *
+ HV_HYP_PAGE_SIZE;
+ nr_pages = min(page_count - done_count,
+ MSHV_PIN_PAGES_BATCH_SIZE);
+
+ /*
+ * Pinning assuming 4k pages works for large pages too.
+ * All page structs within the large page are returned.
+ *
+ * Pin requests are batched because pin_user_pages_fast
+ * with the FOLL_LONGTERM flag does a large temporary
+ * allocation of contiguous memory.
+ */
+ if (region->flags.range_pinned)
+ ret = pin_user_pages_fast(userspace_addr,
+ nr_pages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret < 0)
+ goto release_pages;
+ }
+
+ if (PageHuge(region->pages[page_offset]))
+ region->flags.large_pages = true;
+
+ return 0;
+
+release_pages:
+ mshv_region_evict_pages(region, page_offset, done_count);
+ return ret;
+}
+
+static int
+mshv_region_populate(struct mshv_mem_region *region)
+{
+ return mshv_region_populate_pages(region, 0, region->nr_pages);
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (gfn >= region->start_gfn &&
+ gfn < region->start_gfn + region->nr_pages)
+ return region;
+ }
+
+ return NULL;
+}
+
+static struct mshv_mem_region *
+mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (uaddr >= region->start_uaddr &&
+ uaddr < region->start_uaddr +
+ (region->nr_pages << HV_HYP_PAGE_SHIFT))
+ return region;
+ }
+
+ return NULL;
+}
+
+/*
+ * NB: caller checks and makes sure mem->size is page aligned
+ * Returns: 0 with regionpp updated on success, or -errno
+ */
+static int mshv_partition_create_region(struct mshv_partition *partition,
+ struct mshv_user_mem_region *mem,
+ struct mshv_mem_region **regionpp,
+ bool is_mmio)
+{
+ struct mshv_mem_region *region;
+ u64 nr_pages = HVPFN_DOWN(mem->size);
+
+ /* Reject overlapping regions */
+ if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) ||
+ mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr) ||
+ mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1))
+ return -EEXIST;
+
+ region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
+ if (!region)
+ return -ENOMEM;
+
+ region->nr_pages = nr_pages;
+ region->start_gfn = mem->guest_pfn;
+ region->start_uaddr = mem->userspace_addr;
+ region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
+ region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
+ if (mem->flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
+ region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
+
+ /* Note: large_pages flag populated when we pin the pages */
+ if (!is_mmio)
+ region->flags.range_pinned = true;
+
+ region->partition = partition;
+
+ *regionpp = region;
+
+ return 0;
+}
+
+/*
+ * Map guest ram. if snp, make sure to release that from the host first
+ * Side Effects: In case of failure, pages are unpinned when feasible.
+ */
+static int
+mshv_partition_mem_region_map(struct mshv_mem_region *region)
+{
+ struct mshv_partition *partition = region->partition;
+ int ret;
+
+ ret = mshv_region_populate(region);
+ if (ret) {
+ pt_err(partition, "Failed to populate memory region: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /*
+ * For an SNP partition it is a requirement that for every memory region
+ * that we are going to map for this partition we should make sure that
+ * host access to that region is released. This is ensured by doing an
+ * additional hypercall which will update the SLAT to release host
+ * access to guest memory regions.
+ */
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_unshare(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to unshare memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, ret);
+ goto evict_region;
+ }
+ }
+
+ ret = mshv_region_map(region);
+ if (ret && mshv_partition_encrypted(partition)) {
+ int shrc;
+
+ shrc = mshv_partition_region_share(region);
+ if (!shrc)
+ goto evict_region;
+
+ pt_err(partition,
+ "Failed to share memory region (guest_pfn: %llu): %d\n",
+ region->start_gfn, shrc);
+ /*
+ * Don't unpin if marking shared failed because pages are no
+ * longer mapped in the host, ie root, anymore.
+ */
+ goto err_out;
+ }
+
+ return 0;
+
+evict_region:
+ mshv_region_evict(region);
+err_out:
+ return ret;
+}
+
+/*
+ * This maps two things: guest RAM and for pci passthru mmio space.
+ *
+ * mmio:
+ * - vfio overloads vm_pgoff to store the mmio start pfn/spa.
+ * - Two things need to happen for mapping mmio range:
+ * 1. mapped in the uaddr so VMM can access it.
+ * 2. mapped in the hwpt (gfn <-> mmio phys addr) so guest can access it.
+ *
+ * This function takes care of the second. The first one is managed by vfio,
+ * and hence is taken care of via vfio_pci_mmap_fault().
+ */
+static long
+mshv_map_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ struct vm_area_struct *vma;
+ bool is_mmio;
+ ulong mmio_pfn;
+ long ret;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) ||
+ !access_ok((const void *)mem.userspace_addr, mem.size))
+ return -EINVAL;
+
+ mmap_read_lock(current->mm);
+ vma = vma_lookup(current->mm, mem.userspace_addr);
+ is_mmio = vma ? !!(vma->vm_flags & (VM_IO | VM_PFNMAP)) : 0;
+ mmio_pfn = is_mmio ? vma->vm_pgoff : 0;
+ mmap_read_unlock(current->mm);
+
+ if (!vma)
+ return -EINVAL;
+
+ ret = mshv_partition_create_region(partition, &mem, &region,
+ is_mmio);
+ if (ret)
+ return ret;
+
+ if (is_mmio)
+ ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn,
+ mmio_pfn, HVPFN_DOWN(mem.size));
+ else
+ ret = mshv_partition_mem_region_map(region);
+
+ if (ret)
+ goto errout;
+
+ /* Install the new region */
+ hlist_add_head(&region->hnode, &partition->pt_mem_regions);
+
+ return 0;
+
+errout:
+ vfree(region);
+ return ret;
+}
+
+/* Called for unmapping both the guest ram and the mmio space */
+static long
+mshv_unmap_user_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region mem)
+{
+ struct mshv_mem_region *region;
+ u32 unmap_flags = 0;
+
+ if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
+ return -EINVAL;
+
+ region = mshv_partition_region_by_gfn(partition, mem.guest_pfn);
+ if (!region)
+ return -EINVAL;
+
+ /* Paranoia check */
+ if (region->start_uaddr != mem.userspace_addr ||
+ region->start_gfn != mem.guest_pfn ||
+ region->nr_pages != HVPFN_DOWN(mem.size))
+ return -EINVAL;
+
+ hlist_del(&region->hnode);
+
+ if (region->flags.large_pages)
+ unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+
+ /* ignore unmap failures and continue as process may be exiting */
+ hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
+ region->nr_pages, unmap_flags);
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ return 0;
+}
+
+static long
+mshv_partition_ioctl_set_memory(struct mshv_partition *partition,
+ struct mshv_user_mem_region __user *user_mem)
+{
+ struct mshv_user_mem_region mem;
+
+ if (copy_from_user(&mem, user_mem, sizeof(mem)))
+ return -EFAULT;
+
+ if (!mem.size ||
+ !PAGE_ALIGNED(mem.size) ||
+ !PAGE_ALIGNED(mem.userspace_addr) ||
+ (mem.flags & ~MSHV_SET_MEM_FLAGS_MASK) ||
+ mshv_field_nonzero(mem, rsvd))
+ return -EINVAL;
+
+ if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP))
+ return mshv_unmap_user_memory(partition, mem);
+
+ return mshv_map_user_memory(partition, mem);
+}
+
+static long
+mshv_partition_ioctl_ioeventfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_ioeventfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_ioeventfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_irqfd(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irqfd args;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ return mshv_set_unset_irqfd(partition, &args);
+}
+
+static long
+mshv_partition_ioctl_get_gpap_access_bitmap(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_gpap_access_bitmap args;
+ union hv_gpa_page_access_state *states;
+ long ret, i;
+ union hv_gpa_page_access_state_flags hv_flags = {};
+ u8 hv_type_mask;
+ ulong bitmap_buf_sz, states_buf_sz;
+ int written = 0;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.access_type >= MSHV_GPAP_ACCESS_TYPE_COUNT ||
+ args.access_op >= MSHV_GPAP_ACCESS_OP_COUNT ||
+ mshv_field_nonzero(args, rsvd) || !args.page_count ||
+ !args.bitmap_ptr)
+ return -EINVAL;
+
+ if (check_mul_overflow(args.page_count, sizeof(*states), &states_buf_sz))
+ return -E2BIG;
+
+ /* Num bytes needed to store bitmap; one bit per page rounded up */
+ bitmap_buf_sz = DIV_ROUND_UP(args.page_count, 8);
+
+ /* Sanity check */
+ if (bitmap_buf_sz > states_buf_sz)
+ return -EBADFD;
+
+ switch (args.access_type) {
+ case MSHV_GPAP_ACCESS_TYPE_ACCESSED:
+ hv_type_mask = 1;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_accessed = 1;
+ /* not accessed implies not dirty */
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ case MSHV_GPAP_ACCESS_TYPE_DIRTY:
+ hv_type_mask = 2;
+ if (args.access_op == MSHV_GPAP_ACCESS_OP_CLEAR) {
+ hv_flags.clear_dirty = 1;
+ } else { /* MSHV_GPAP_ACCESS_OP_SET */
+ hv_flags.set_dirty = 1;
+ /* dirty implies accessed */
+ hv_flags.set_accessed = 1;
+ }
+ break;
+ }
+
+ states = vzalloc(states_buf_sz);
+ if (!states)
+ return -ENOMEM;
+
+ ret = hv_call_get_gpa_access_states(partition->pt_id, args.page_count,
+ args.gpap_base, hv_flags, &written,
+ states);
+ if (ret)
+ goto free_return;
+
+ /*
+ * Overwrite states buffer with bitmap - the bits in hv_type_mask
+ * correspond to bitfields in hv_gpa_page_access_state
+ */
+ for (i = 0; i < written; ++i)
+ __assign_bit(i, (ulong *)states,
+ states[i].as_uint8 & hv_type_mask);
+
+ /* zero the unused bits in the last byte(s) of the returned bitmap */
+ for (i = written; i < bitmap_buf_sz * 8; ++i)
+ __clear_bit(i, (ulong *)states);
+
+ if (copy_to_user((void __user *)args.bitmap_ptr, states, bitmap_buf_sz))
+ ret = -EFAULT;
+
+free_return:
+ vfree(states);
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_set_msi_routing(struct mshv_partition *partition,
+ void __user *user_args)
+{
+ struct mshv_user_irq_entry *entries = NULL;
+ struct mshv_user_irq_table args;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ if (args.nr > MSHV_MAX_GUEST_IRQS ||
+ mshv_field_nonzero(args, rsvd))
+ return -EINVAL;
+
+ if (args.nr) {
+ struct mshv_user_irq_table __user *urouting = user_args;
+
+ entries = vmemdup_user(urouting->entries,
+ array_size(sizeof(*entries),
+ args.nr));
+ if (IS_ERR(entries))
+ return PTR_ERR(entries);
+ }
+ ret = mshv_update_routing_table(partition, entries, args.nr);
+ kvfree(entries);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl_initialize(struct mshv_partition *partition)
+{
+ long ret;
+
+ if (partition->pt_initialized)
+ return 0;
+
+ ret = hv_call_initialize_partition(partition->pt_id);
+ if (ret)
+ goto withdraw_mem;
+
+ partition->pt_initialized = true;
+
+ return 0;
+
+withdraw_mem:
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+
+ return ret;
+}
+
+static long
+mshv_partition_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct mshv_partition *partition = filp->private_data;
+ long ret;
+ void __user *uarg = (void __user *)arg;
+
+ if (mutex_lock_killable(&partition->pt_mutex))
+ return -EINTR;
+
+ switch (ioctl) {
+ case MSHV_INITIALIZE_PARTITION:
+ ret = mshv_partition_ioctl_initialize(partition);
+ break;
+ case MSHV_SET_GUEST_MEMORY:
+ ret = mshv_partition_ioctl_set_memory(partition, uarg);
+ break;
+ case MSHV_CREATE_VP:
+ ret = mshv_partition_ioctl_create_vp(partition, uarg);
+ break;
+ case MSHV_IRQFD:
+ ret = mshv_partition_ioctl_irqfd(partition, uarg);
+ break;
+ case MSHV_IOEVENTFD:
+ ret = mshv_partition_ioctl_ioeventfd(partition, uarg);
+ break;
+ case MSHV_SET_MSI_ROUTING:
+ ret = mshv_partition_ioctl_set_msi_routing(partition, uarg);
+ break;
+ case MSHV_GET_GPAP_ACCESS_BITMAP:
+ ret = mshv_partition_ioctl_get_gpap_access_bitmap(partition,
+ uarg);
+ break;
+ case MSHV_ROOT_HVCALL:
+ ret = mshv_ioctl_passthru_hvcall(partition, true, uarg);
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ mutex_unlock(&partition->pt_mutex);
+ return ret;
+}
+
+static int
+disable_vp_dispatch(struct mshv_vp *vp)
+{
+ int ret;
+ struct hv_register_assoc dispatch_suspend = {
+ .name = HV_REGISTER_DISPATCH_SUSPEND,
+ .value.dispatch_suspend.suspended = 1,
+ };
+
+ ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &dispatch_suspend);
+ if (ret)
+ vp_err(vp, "failed to suspend\n");
+
+ return ret;
+}
+
+static int
+get_vp_signaled_count(struct mshv_vp *vp, u64 *count)
+{
+ int ret;
+ struct hv_register_assoc root_signal_count = {
+ .name = HV_REGISTER_VP_ROOT_SIGNAL_COUNT,
+ };
+
+ ret = mshv_get_vp_registers(vp->vp_index, vp->vp_partition->pt_id,
+ 1, &root_signal_count);
+
+ if (ret) {
+ vp_err(vp, "Failed to get root signal count");
+ *count = 0;
+ return ret;
+ }
+
+ *count = root_signal_count.value.reg64;
+
+ return ret;
+}
+
+static void
+drain_vp_signals(struct mshv_vp *vp)
+{
+ u64 hv_signal_count;
+ u64 vp_signal_count;
+
+ get_vp_signaled_count(vp, &hv_signal_count);
+
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+
+ /*
+ * There should be at most 1 outstanding notification, but be extra
+ * careful anyway.
+ */
+ while (hv_signal_count != vp_signal_count) {
+ WARN_ON(hv_signal_count - vp_signal_count != 1);
+
+ if (wait_event_interruptible(vp->run.vp_suspend_queue,
+ vp->run.kicked_by_hv == 1))
+ break;
+ vp->run.kicked_by_hv = 0;
+ vp_signal_count = atomic64_read(&vp->run.vp_signaled_count);
+ }
+}
+
+static void drain_all_vps(const struct mshv_partition *partition)
+{
+ int i;
+ struct mshv_vp *vp;
+
+ /*
+ * VPs are reachable from ISR. It is safe to not take the partition
+ * lock because nobody else can enter this function and drop the
+ * partition from the list.
+ */
+ for (i = 0; i < MSHV_MAX_VPS; i++) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+ /*
+ * Disable dispatching of the VP in the hypervisor. After this
+ * the hypervisor guarantees it won't generate any signals for
+ * the VP and the hypervisor's VP signal count won't change.
+ */
+ disable_vp_dispatch(vp);
+ drain_vp_signals(vp);
+ }
+}
+
+static void
+remove_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+ hlist_del_rcu(&partition->pt_hnode);
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ synchronize_rcu();
+}
+
+/*
+ * Tear down a partition and remove it from the list.
+ * Partition's refcount must be 0
+ */
+static void destroy_partition(struct mshv_partition *partition)
+{
+ struct mshv_vp *vp;
+ struct mshv_mem_region *region;
+ int i, ret;
+ struct hlist_node *n;
+
+ if (refcount_read(&partition->pt_ref_count)) {
+ pt_err(partition,
+ "Attempt to destroy partition but refcount > 0\n");
+ return;
+ }
+
+ if (partition->pt_initialized) {
+ /*
+ * We only need to drain signals for root scheduler. This should be
+ * done before removing the partition from the partition list.
+ */
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ drain_all_vps(partition);
+
+ /* Remove vps */
+ for (i = 0; i < MSHV_MAX_VPS; ++i) {
+ vp = partition->pt_vp_array[i];
+ if (!vp)
+ continue;
+
+ if (hv_parent_partition())
+ mshv_vp_stats_unmap(partition->pt_id, vp->vp_index);
+
+ if (vp->vp_register_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero);
+ vp->vp_register_page = NULL;
+ }
+
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero);
+ vp->vp_intercept_msg_page = NULL;
+
+ if (vp->vp_ghcb_page) {
+ (void)hv_call_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal);
+ vp->vp_ghcb_page = NULL;
+ }
+
+ kfree(vp);
+
+ partition->pt_vp_array[i] = NULL;
+ }
+
+ /* Deallocates and unmaps everything including vcpus, GPA mappings etc */
+ hv_call_finalize_partition(partition->pt_id);
+
+ partition->pt_initialized = false;
+ }
+
+ remove_partition(partition);
+
+ /* Remove regions, regain access to the memory and unpin the pages */
+ hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
+ hnode) {
+ hlist_del(&region->hnode);
+
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_partition_region_share(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ mshv_region_evict(region);
+
+ vfree(region);
+ }
+
+ /* Withdraw and free all pages we deposited */
+ hv_call_withdraw_memory(U64_MAX, NUMA_NO_NODE, partition->pt_id);
+ hv_call_delete_partition(partition->pt_id);
+
+ mshv_free_routing_table(partition);
+ kfree(partition);
+}
+
+struct
+mshv_partition *mshv_partition_get(struct mshv_partition *partition)
+{
+ if (refcount_inc_not_zero(&partition->pt_ref_count))
+ return partition;
+ return NULL;
+}
+
+struct
+mshv_partition *mshv_partition_find(u64 partition_id)
+ __must_hold(RCU)
+{
+ struct mshv_partition *p;
+
+ hash_for_each_possible_rcu(mshv_root.pt_htable, p, pt_hnode,
+ partition_id)
+ if (p->pt_id == partition_id)
+ return p;
+
+ return NULL;
+}
+
+void
+mshv_partition_put(struct mshv_partition *partition)
+{
+ if (refcount_dec_and_test(&partition->pt_ref_count))
+ destroy_partition(partition);
+}
+
+static int
+mshv_partition_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_partition *partition = filp->private_data;
+
+ mshv_eventfd_release(partition);
+
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+
+ mshv_partition_put(partition);
+
+ return 0;
+}
+
+static int
+add_partition(struct mshv_partition *partition)
+{
+ spin_lock(&mshv_root.pt_ht_lock);
+
+ hash_add_rcu(mshv_root.pt_htable, &partition->pt_hnode,
+ partition->pt_id);
+
+ spin_unlock(&mshv_root.pt_ht_lock);
+
+ return 0;
+}
+
+static long
+mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
+{
+ struct mshv_create_partition args;
+ u64 creation_flags;
+ struct hv_partition_creation_properties creation_properties = {};
+ union hv_partition_isolation_properties isolation_properties = {};
+ struct mshv_partition *partition;
+ struct file *file;
+ int fd;
+ long ret;
+
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ if ((args.pt_flags & ~MSHV_PT_FLAGS_MASK) ||
+ args.pt_isolation >= MSHV_PT_ISOLATION_COUNT)
+ return -EINVAL;
+
+ /* Only support EXO partitions */
+ creation_flags = HV_PARTITION_CREATION_FLAG_EXO_PARTITION |
+ HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED;
+
+ if (args.pt_flags & BIT(MSHV_PT_BIT_LAPIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_X2APIC))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE;
+ if (args.pt_flags & BIT(MSHV_PT_BIT_GPA_SUPER_PAGES))
+ creation_flags |= HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED;
+
+ switch (args.pt_isolation) {
+ case MSHV_PT_ISOLATION_NONE:
+ isolation_properties.isolation_type =
+ HV_PARTITION_ISOLATION_TYPE_NONE;
+ break;
+ }
+
+ partition = kzalloc(sizeof(*partition), GFP_KERNEL);
+ if (!partition)
+ return -ENOMEM;
+
+ partition->pt_module_dev = module_dev;
+ partition->isolation_type = isolation_properties.isolation_type;
+
+ refcount_set(&partition->pt_ref_count, 1);
+
+ mutex_init(&partition->pt_mutex);
+
+ mutex_init(&partition->pt_irq_lock);
+
+ init_completion(&partition->async_hypercall);
+
+ INIT_HLIST_HEAD(&partition->irq_ack_notifier_list);
+
+ INIT_HLIST_HEAD(&partition->pt_devices);
+
+ INIT_HLIST_HEAD(&partition->pt_mem_regions);
+
+ mshv_eventfd_init(partition);
+
+ ret = init_srcu_struct(&partition->pt_irq_srcu);
+ if (ret)
+ goto free_partition;
+
+ ret = hv_call_create_partition(creation_flags,
+ creation_properties,
+ isolation_properties,
+ &partition->pt_id);
+ if (ret)
+ goto cleanup_irq_srcu;
+
+ ret = add_partition(partition);
+ if (ret)
+ goto delete_partition;
+
+ ret = mshv_init_async_handler(partition);
+ if (ret)
+ goto remove_partition;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto remove_partition;
+ }
+
+ file = anon_inode_getfile("mshv_partition", &mshv_partition_fops,
+ partition, O_RDWR);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto put_fd;
+ }
+
+ fd_install(fd, file);
+
+ return fd;
+
+put_fd:
+ put_unused_fd(fd);
+remove_partition:
+ remove_partition(partition);
+delete_partition:
+ hv_call_delete_partition(partition->pt_id);
+cleanup_irq_srcu:
+ cleanup_srcu_struct(&partition->pt_irq_srcu);
+free_partition:
+ kfree(partition);
+
+ return ret;
+}
+
+static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct miscdevice *misc = filp->private_data;
+
+ switch (ioctl) {
+ case MSHV_CREATE_PARTITION:
+ return mshv_ioctl_create_partition((void __user *)arg,
+ misc->this_device);
+ }
+
+ return -ENOTTY;
+}
+
+static int
+mshv_dev_open(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int
+mshv_dev_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static int mshv_cpuhp_online;
+static int mshv_root_sched_online;
+
+static const char *scheduler_type_to_string(enum hv_scheduler_type type)
+{
+ switch (type) {
+ case HV_SCHEDULER_TYPE_LP:
+ return "classic scheduler without SMT";
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ return "classic scheduler with SMT";
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ return "core scheduler";
+ case HV_SCHEDULER_TYPE_ROOT:
+ return "root scheduler";
+ default:
+ return "unknown scheduler";
+ };
+}
+
+/* TODO move this to hv_common.c when needed outside */
+static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
+{
+ struct hv_input_get_system_property *input;
+ struct hv_output_get_system_property *output;
+ unsigned long flags;
+ u64 status;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ memset(output, 0, sizeof(*output));
+ input->property_id = HV_SYSTEM_PROPERTY_SCHEDULER_TYPE;
+
+ status = hv_do_hypercall(HVCALL_GET_SYSTEM_PROPERTY, input, output);
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ pr_err("%s: %s\n", __func__, hv_result_to_string(status));
+ return hv_result_to_errno(status);
+ }
+
+ *out = output->scheduler_type;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* Retrieve and stash the supported scheduler type */
+static int __init mshv_retrieve_scheduler_type(struct device *dev)
+{
+ int ret;
+
+ ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Hypervisor using %s\n",
+ scheduler_type_to_string(hv_scheduler_type));
+
+ switch (hv_scheduler_type) {
+ case HV_SCHEDULER_TYPE_CORE_SMT:
+ case HV_SCHEDULER_TYPE_LP_SMT:
+ case HV_SCHEDULER_TYPE_ROOT:
+ case HV_SCHEDULER_TYPE_LP:
+ /* Supported scheduler, nothing to do */
+ break;
+ default:
+ dev_err(dev, "unsupported scheduler 0x%x, bailing.\n",
+ hv_scheduler_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mshv_root_scheduler_init(unsigned int cpu)
+{
+ void **inputarg, **outputarg, *p;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ /* Allocate two consecutive pages. One for input, one for output. */
+ p = kmalloc(2 * HV_HYP_PAGE_SIZE, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ *inputarg = p;
+ *outputarg = (char *)p + HV_HYP_PAGE_SIZE;
+
+ return 0;
+}
+
+static int mshv_root_scheduler_cleanup(unsigned int cpu)
+{
+ void *p, **inputarg, **outputarg;
+
+ inputarg = (void **)this_cpu_ptr(root_scheduler_input);
+ outputarg = (void **)this_cpu_ptr(root_scheduler_output);
+
+ p = *inputarg;
+
+ *inputarg = NULL;
+ *outputarg = NULL;
+
+ kfree(p);
+
+ return 0;
+}
+
+/* Must be called after retrieving the scheduler type */
+static int
+root_scheduler_init(struct device *dev)
+{
+ int ret;
+
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return 0;
+
+ root_scheduler_input = alloc_percpu(void *);
+ root_scheduler_output = alloc_percpu(void *);
+
+ if (!root_scheduler_input || !root_scheduler_output) {
+ dev_err(dev, "Failed to allocate root scheduler buffers\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_root_sched",
+ mshv_root_scheduler_init,
+ mshv_root_scheduler_cleanup);
+
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup root scheduler state: %i\n", ret);
+ goto out;
+ }
+
+ mshv_root_sched_online = ret;
+
+ return 0;
+
+out:
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+ return ret;
+}
+
+static void
+root_scheduler_deinit(void)
+{
+ if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
+ return;
+
+ cpuhp_remove_state(mshv_root_sched_online);
+ free_percpu(root_scheduler_input);
+ free_percpu(root_scheduler_output);
+}
+
+static int mshv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ cpuhp_remove_state(mshv_cpuhp_online);
+ return 0;
+}
+
+struct notifier_block mshv_reboot_nb = {
+ .notifier_call = mshv_reboot_notify,
+};
+
+static void mshv_root_partition_exit(void)
+{
+ unregister_reboot_notifier(&mshv_reboot_nb);
+ root_scheduler_deinit();
+}
+
+static int __init mshv_root_partition_init(struct device *dev)
+{
+ int err;
+
+ if (mshv_retrieve_scheduler_type(dev))
+ return -ENODEV;
+
+ err = root_scheduler_init(dev);
+ if (err)
+ return err;
+
+ err = register_reboot_notifier(&mshv_reboot_nb);
+ if (err)
+ goto root_sched_deinit;
+
+ return 0;
+
+root_sched_deinit:
+ root_scheduler_deinit();
+ return err;
+}
+
+static int __init mshv_parent_partition_init(void)
+{
+ int ret;
+ struct device *dev;
+ union hv_hypervisor_version_info version_info;
+
+ if (!hv_root_partition() || is_kdump_kernel())
+ return -ENODEV;
+
+ if (hv_get_hypervisor_version(&version_info))
+ return -ENODEV;
+
+ ret = misc_register(&mshv_dev);
+ if (ret)
+ return ret;
+
+ dev = mshv_dev.this_device;
+
+ if (version_info.build_number < MSHV_HV_MIN_VERSION ||
+ version_info.build_number > MSHV_HV_MAX_VERSION) {
+ dev_err(dev, "Running on unvalidated Hyper-V version\n");
+ dev_err(dev, "Versions: current: %u min: %u max: %u\n",
+ version_info.build_number, MSHV_HV_MIN_VERSION,
+ MSHV_HV_MAX_VERSION);
+ }
+
+ mshv_root.synic_pages = alloc_percpu(struct hv_synic_pages);
+ if (!mshv_root.synic_pages) {
+ dev_err(dev, "Failed to allocate percpu synic page\n");
+ ret = -ENOMEM;
+ goto device_deregister;
+ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic",
+ mshv_synic_init,
+ mshv_synic_cleanup);
+ if (ret < 0) {
+ dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret);
+ goto free_synic_pages;
+ }
+
+ mshv_cpuhp_online = ret;
+
+ ret = mshv_root_partition_init(dev);
+ if (ret)
+ goto remove_cpu_state;
+
+ ret = mshv_irqfd_wq_init();
+ if (ret)
+ goto exit_partition;
+
+ spin_lock_init(&mshv_root.pt_ht_lock);
+ hash_init(mshv_root.pt_htable);
+
+ hv_setup_mshv_handler(mshv_isr);
+
+ return 0;
+
+exit_partition:
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+remove_cpu_state:
+ cpuhp_remove_state(mshv_cpuhp_online);
+free_synic_pages:
+ free_percpu(mshv_root.synic_pages);
+device_deregister:
+ misc_deregister(&mshv_dev);
+ return ret;
+}
+
+static void __exit mshv_parent_partition_exit(void)
+{
+ hv_setup_mshv_handler(NULL);
+ mshv_port_table_fini();
+ misc_deregister(&mshv_dev);
+ mshv_irqfd_wq_cleanup();
+ if (hv_root_partition())
+ mshv_root_partition_exit();
+ cpuhp_remove_state(mshv_cpuhp_online);
+ free_percpu(mshv_root.synic_pages);
+}
+
+module_init(mshv_parent_partition_init);
+module_exit(mshv_parent_partition_exit);
diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
new file mode 100644
index 000000000000..e6b6381b7c36
--- /dev/null
+++ b/drivers/hv/mshv_synic.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * mshv_root module's main interrupt handler and associated functionality.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/random.h>
+#include <asm/mshyperv.h>
+
+#include "mshv_eventfd.h"
+#include "mshv.h"
+
+static u32 synic_event_ring_get_queued_port(u32 sint_index)
+{
+ struct hv_synic_event_ring_page **event_ring_page;
+ volatile struct hv_synic_event_ring *ring;
+ struct hv_synic_pages *spages;
+ u8 **synic_eventring_tail;
+ u32 message;
+ u8 tail;
+
+ spages = this_cpu_ptr(mshv_root.synic_pages);
+ event_ring_page = &spages->synic_event_ring_page;
+ synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
+
+ if (unlikely(!*synic_eventring_tail)) {
+ pr_debug("Missing synic event ring tail!\n");
+ return 0;
+ }
+ tail = (*synic_eventring_tail)[sint_index];
+
+ if (unlikely(!*event_ring_page)) {
+ pr_debug("Missing synic event ring page!\n");
+ return 0;
+ }
+
+ ring = &(*event_ring_page)->sint_event_ring[sint_index];
+
+ /*
+ * Get the message.
+ */
+ message = ring->data[tail];
+
+ if (!message) {
+ if (ring->ring_full) {
+ /*
+ * Ring is marked full, but we would have consumed all
+ * the messages. Notify the hypervisor that ring is now
+ * empty and check again.
+ */
+ ring->ring_full = 0;
+ hv_call_notify_port_ring_empty(sint_index);
+ message = ring->data[tail];
+ }
+
+ if (!message) {
+ ring->signal_masked = 0;
+ /*
+ * Unmask the signal and sync with hypervisor
+ * before one last check for any message.
+ */
+ mb();
+ message = ring->data[tail];
+
+ /*
+ * Ok, lets bail out.
+ */
+ if (!message)
+ return 0;
+ }
+
+ ring->signal_masked = 1;
+ }
+
+ /*
+ * Clear the message in the ring buffer.
+ */
+ ring->data[tail] = 0;
+
+ if (++tail == HV_SYNIC_EVENT_RING_MESSAGE_COUNT)
+ tail = 0;
+
+ (*synic_eventring_tail)[sint_index] = tail;
+
+ return message;
+}
+
+static bool
+mshv_doorbell_isr(struct hv_message *msg)
+{
+ struct hv_notification_message_payload *notification;
+ u32 port;
+
+ if (msg->header.message_type != HVMSG_SYNIC_SINT_INTERCEPT)
+ return false;
+
+ notification = (struct hv_notification_message_payload *)msg->u.payload;
+ if (notification->sint_index != HV_SYNIC_DOORBELL_SINT_INDEX)
+ return false;
+
+ while ((port = synic_event_ring_get_queued_port(HV_SYNIC_DOORBELL_SINT_INDEX))) {
+ struct port_table_info ptinfo = { 0 };
+
+ if (mshv_portid_lookup(port, &ptinfo)) {
+ pr_debug("Failed to get port info from port_table!\n");
+ continue;
+ }
+
+ if (ptinfo.hv_port_type != HV_PORT_TYPE_DOORBELL) {
+ pr_debug("Not a doorbell port!, port: %d, port_type: %d\n",
+ port, ptinfo.hv_port_type);
+ continue;
+ }
+
+ /* Invoke the callback */
+ ptinfo.hv_port_doorbell.doorbell_cb(port,
+ ptinfo.hv_port_doorbell.data);
+ }
+
+ return true;
+}
+
+static bool mshv_async_call_completion_isr(struct hv_message *msg)
+{
+ bool handled = false;
+ struct hv_async_completion_message_payload *async_msg;
+ struct mshv_partition *partition;
+ u64 partition_id;
+
+ if (msg->header.message_type != HVMSG_ASYNC_CALL_COMPLETION)
+ goto out;
+
+ async_msg =
+ (struct hv_async_completion_message_payload *)msg->u.payload;
+
+ partition_id = async_msg->partition_id;
+
+ /*
+ * Hold this lock for the rest of the isr, because the partition could
+ * be released anytime.
+ * e.g. the MSHV_RUN_VP thread could wake on another cpu; it could
+ * release the partition unless we hold this!
+ */
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ partition->async_hypercall_status = async_msg->status;
+ complete(&partition->async_hypercall);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+out:
+ return handled;
+}
+
+static void kick_vp(struct mshv_vp *vp)
+{
+ atomic64_inc(&vp->run.vp_signaled_count);
+ vp->run.kicked_by_hv = 1;
+ wake_up(&vp->run.vp_suspend_queue);
+}
+
+static void
+handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message *msg)
+{
+ int bank_idx, vps_signaled = 0, bank_mask_size;
+ struct mshv_partition *partition;
+ const struct hv_vpset *vpset;
+ const u64 *bank_contents;
+ u64 partition_id = msg->partition_id;
+
+ if (msg->vp_bitset.bitset.format != HV_GENERIC_SET_SPARSE_4K) {
+ pr_debug("scheduler message format is not HV_GENERIC_SET_SPARSE_4K");
+ return;
+ }
+
+ if (msg->vp_count == 0) {
+ pr_debug("scheduler message with no VP specified");
+ return;
+ }
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n", partition_id);
+ goto unlock_out;
+ }
+
+ vpset = &msg->vp_bitset.bitset;
+
+ bank_idx = -1;
+ bank_contents = vpset->bank_contents;
+ bank_mask_size = sizeof(vpset->valid_bank_mask) * BITS_PER_BYTE;
+
+ while (true) {
+ int vp_bank_idx = -1;
+ int vp_bank_size = sizeof(*bank_contents) * BITS_PER_BYTE;
+ int vp_index;
+
+ bank_idx = find_next_bit((unsigned long *)&vpset->valid_bank_mask,
+ bank_mask_size, bank_idx + 1);
+ if (bank_idx == bank_mask_size)
+ break;
+
+ while (true) {
+ struct mshv_vp *vp;
+
+ vp_bank_idx = find_next_bit((unsigned long *)bank_contents,
+ vp_bank_size, vp_bank_idx + 1);
+ if (vp_bank_idx == vp_bank_size)
+ break;
+
+ vp_index = (bank_idx * vp_bank_size) + vp_bank_idx;
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n",
+ vp_index);
+ goto unlock_out;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+ vps_signaled++;
+ }
+
+ bank_contents++;
+ }
+
+unlock_out:
+ rcu_read_unlock();
+
+ if (vps_signaled != msg->vp_count)
+ pr_debug("asked to signal %u VPs but only did %u\n",
+ msg->vp_count, vps_signaled);
+}
+
+static void
+handle_pair_message(const struct hv_vp_signal_pair_scheduler_message *msg)
+{
+ struct mshv_partition *partition = NULL;
+ struct mshv_vp *vp;
+ int idx;
+
+ rcu_read_lock();
+
+ for (idx = 0; idx < msg->vp_count; idx++) {
+ u64 partition_id = msg->partition_ids[idx];
+ u32 vp_index = msg->vp_indexes[idx];
+
+ if (idx == 0 || partition->pt_id != partition_id) {
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ break;
+ }
+ }
+
+ /* This shouldn't happen, but just in case. */
+ if (unlikely(vp_index >= MSHV_MAX_VPS)) {
+ pr_debug("VP index %u out of bounds\n", vp_index);
+ break;
+ }
+
+ vp = partition->pt_vp_array[vp_index];
+ if (!vp) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ break;
+ }
+
+ kick_vp(vp);
+ }
+
+ rcu_read_unlock();
+}
+
+static bool
+mshv_scheduler_isr(struct hv_message *msg)
+{
+ if (msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_BITSET &&
+ msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_PAIR)
+ return false;
+
+ if (msg->header.message_type == HVMSG_SCHEDULER_VP_SIGNAL_BITSET)
+ handle_bitset_message((struct hv_vp_signal_bitset_scheduler_message *)
+ msg->u.payload);
+ else
+ handle_pair_message((struct hv_vp_signal_pair_scheduler_message *)
+ msg->u.payload);
+
+ return true;
+}
+
+static bool
+mshv_intercept_isr(struct hv_message *msg)
+{
+ struct mshv_partition *partition;
+ bool handled = false;
+ struct mshv_vp *vp;
+ u64 partition_id;
+ u32 vp_index;
+
+ partition_id = msg->header.sender;
+
+ rcu_read_lock();
+
+ partition = mshv_partition_find(partition_id);
+ if (unlikely(!partition)) {
+ pr_debug("failed to find partition %llu\n",
+ partition_id);
+ goto unlock_out;
+ }
+
+ if (msg->header.message_type == HVMSG_X64_APIC_EOI) {
+ /*
+ * Check if this gsi is registered in the
+ * ack_notifier list and invoke the callback
+ * if registered.
+ */
+
+ /*
+ * If there is a notifier, the ack callback is supposed
+ * to handle the VMEXIT. So we need not pass this message
+ * to vcpu thread.
+ */
+ struct hv_x64_apic_eoi_message *eoi_msg =
+ (struct hv_x64_apic_eoi_message *)&msg->u.payload[0];
+
+ if (mshv_notify_acked_gsi(partition, eoi_msg->interrupt_vector)) {
+ handled = true;
+ goto unlock_out;
+ }
+ }
+
+ /*
+ * We should get an opaque intercept message here for all intercept
+ * messages, since we're using the mapped VP intercept message page.
+ *
+ * The intercept message will have been placed in intercept message
+ * page at this point.
+ *
+ * Make sure the message type matches our expectation.
+ */
+ if (msg->header.message_type != HVMSG_OPAQUE_INTERCEPT) {
+ pr_debug("wrong message type %d", msg->header.message_type);
+ goto unlock_out;
+ }
+
+ /*
+ * Since we directly index the vp, and it has to exist for us to be here
+ * (because the vp is only deleted when the partition is), no additional
+ * locking is needed here
+ */
+ vp_index =
+ ((struct hv_opaque_intercept_message *)msg->u.payload)->vp_index;
+ vp = partition->pt_vp_array[vp_index];
+ if (unlikely(!vp)) {
+ pr_debug("failed to find VP %u\n", vp_index);
+ goto unlock_out;
+ }
+
+ kick_vp(vp);
+
+ handled = true;
+
+unlock_out:
+ rcu_read_unlock();
+
+ return handled;
+}
+
+void mshv_isr(void)
+{
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message *msg;
+ bool handled;
+
+ if (unlikely(!(*msg_page))) {
+ pr_debug("Missing synic page!\n");
+ return;
+ }
+
+ msg = &((*msg_page)->sint_message[HV_SYNIC_INTERCEPTION_SINT_INDEX]);
+
+ /*
+ * If the type isn't set, there isn't really a message;
+ * it may be some other hyperv interrupt
+ */
+ if (msg->header.message_type == HVMSG_NONE)
+ return;
+
+ handled = mshv_doorbell_isr(msg);
+
+ if (!handled)
+ handled = mshv_scheduler_isr(msg);
+
+ if (!handled)
+ handled = mshv_async_call_completion_isr(msg);
+
+ if (!handled)
+ handled = mshv_intercept_isr(msg);
+
+ if (handled) {
+ /*
+ * Acknowledge message with hypervisor if another message is
+ * pending.
+ */
+ msg->header.message_type = HVMSG_NONE;
+ /*
+ * Ensure the write is complete so the hypervisor will deliver
+ * the next message if available.
+ */
+ mb();
+ if (msg->header.message_flags.msg_pending)
+ hv_set_non_nested_msr(HV_MSR_EOM, 0);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR);
+#endif
+ } else {
+ pr_warn_once("%s: unknown message type 0x%x\n", __func__,
+ msg->header.message_type);
+ }
+}
+
+int mshv_synic_init(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ union hv_synic_sint sint;
+#endif
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Setup the Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = true;
+ *msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+
+ if (!(*msg_page))
+ return -EFAULT;
+
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = true;
+ *event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_flags_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+
+ /* Setup the Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = true;
+ *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
+ PAGE_SIZE, MEMREMAP_WB);
+
+ if (!(*event_ring_page))
+ goto cleanup;
+
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+
+#ifdef HYPERVISOR_CALLBACK_VECTOR
+ /* Enable intercepts */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Doorbell SINT */
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.as_intercept = 1;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+#endif
+
+ /* Enable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+
+cleanup:
+ if (*event_ring_page) {
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+ }
+ if (*event_flags_page) {
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+ }
+ if (*msg_page) {
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+ }
+
+ return -EFAULT;
+}
+
+int mshv_synic_cleanup(unsigned int cpu)
+{
+ union hv_synic_sint sint;
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ union hv_synic_sirbp sirbp;
+ union hv_synic_scontrol sctrl;
+ struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
+ struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_synic_event_flags_page **event_flags_page =
+ &spages->synic_event_flags_page;
+ struct hv_synic_event_ring_page **event_ring_page =
+ &spages->synic_event_ring_page;
+
+ /* Disable the interrupt */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Doorbell SINT */
+ sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX);
+ sint.masked = true;
+ hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
+ sint.as_uint64);
+
+ /* Disable Synic's event ring page */
+ sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
+ sirbp.sirbp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
+ memunmap(*event_ring_page);
+
+ /* Disable Synic's event flags page */
+ siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
+ siefp.siefp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ memunmap(*event_flags_page);
+
+ /* Disable Synic's message page */
+ simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
+ simp.simp_enabled = false;
+ hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
+ memunmap(*msg_page);
+
+ /* Disable global synic bit */
+ sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+
+ return 0;
+}
+
+int
+mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb, void *data,
+ u64 gpa, u64 val, u64 flags)
+{
+ struct hv_connection_info connection_info = { 0 };
+ union hv_connection_id connection_id = { 0 };
+ struct port_table_info *port_table_info;
+ struct hv_port_info port_info = { 0 };
+ union hv_port_id port_id = { 0 };
+ int ret;
+
+ port_table_info = kmalloc(sizeof(*port_table_info), GFP_KERNEL);
+ if (!port_table_info)
+ return -ENOMEM;
+
+ port_table_info->hv_port_type = HV_PORT_TYPE_DOORBELL;
+ port_table_info->hv_port_doorbell.doorbell_cb = doorbell_cb;
+ port_table_info->hv_port_doorbell.data = data;
+ ret = mshv_portid_alloc(port_table_info);
+ if (ret < 0) {
+ kfree(port_table_info);
+ return ret;
+ }
+
+ port_id.u.id = ret;
+ port_info.port_type = HV_PORT_TYPE_DOORBELL;
+ port_info.doorbell_port_info.target_sint = HV_SYNIC_DOORBELL_SINT_INDEX;
+ port_info.doorbell_port_info.target_vp = HV_ANY_VP;
+ ret = hv_call_create_port(hv_current_partition_id, port_id, partition_id,
+ &port_info,
+ 0, 0, NUMA_NO_NODE);
+
+ if (ret < 0) {
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ connection_id.u.id = port_id.u.id;
+ connection_info.port_type = HV_PORT_TYPE_DOORBELL;
+ connection_info.doorbell_connection_info.gpa = gpa;
+ connection_info.doorbell_connection_info.trigger_value = val;
+ connection_info.doorbell_connection_info.flags = flags;
+
+ ret = hv_call_connect_port(hv_current_partition_id, port_id, partition_id,
+ connection_id, &connection_info, 0, NUMA_NO_NODE);
+ if (ret < 0) {
+ hv_call_delete_port(hv_current_partition_id, port_id);
+ mshv_portid_free(port_id.u.id);
+ return ret;
+ }
+
+ // lets use the port_id as the doorbell_id
+ return port_id.u.id;
+}
+
+void
+mshv_unregister_doorbell(u64 partition_id, int doorbell_portid)
+{
+ union hv_port_id port_id = { 0 };
+ union hv_connection_id connection_id = { 0 };
+
+ connection_id.u.id = doorbell_portid;
+ hv_call_disconnect_port(partition_id, connection_id);
+
+ port_id.u.id = doorbell_portid;
+ hv_call_delete_port(hv_current_partition_id, port_id);
+
+ mshv_portid_free(doorbell_portid);
+}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0f6cd44fff29..8d3cff42bdbb 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1611,16 +1611,16 @@ static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
-static ssize_t target_cpu_store(struct vmbus_channel *channel,
- const char *buf, size_t count)
+
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu)
{
- u32 target_cpu, origin_cpu;
- ssize_t ret = count;
+ u32 origin_cpu;
+ int ret = 0;
- if (vmbus_proto_version < VERSION_WIN10_V4_1)
- return -EIO;
+ lockdep_assert_cpus_held();
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
- if (sscanf(buf, "%uu", &target_cpu) != 1)
+ if (vmbus_proto_version < VERSION_WIN10_V4_1)
return -EIO;
/* Validate target_cpu for the cpumask_test_cpu() operation below. */
@@ -1630,22 +1630,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
return -EINVAL;
- /* No CPUs should come up or down during this. */
- cpus_read_lock();
-
- if (!cpu_online(target_cpu)) {
- cpus_read_unlock();
+ if (!cpu_online(target_cpu))
return -EINVAL;
- }
/*
- * Synchronizes target_cpu_store() and channel closure:
+ * Synchronizes vmbus_channel_set_cpu() and channel closure:
*
* { Initially: state = CHANNEL_OPENED }
*
* CPU1 CPU2
*
- * [target_cpu_store()] [vmbus_disconnect_ring()]
+ * [vmbus_channel_set_cpu()] [vmbus_disconnect_ring()]
*
* LOCK channel_mutex LOCK channel_mutex
* LOAD r1 = state LOAD r2 = state
@@ -1660,7 +1655,6 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
* Note. The host processes the channel messages "sequentially", in
* the order in which they are received on a per-partition basis.
*/
- mutex_lock(&vmbus_connection.channel_mutex);
/*
* Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
@@ -1668,17 +1662,17 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
*/
if (channel->state != CHANNEL_OPENED_STATE) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
origin_cpu = channel->target_cpu;
if (target_cpu == origin_cpu)
- goto cpu_store_unlock;
+ goto end;
if (vmbus_send_modifychannel(channel,
hv_cpu_number_to_vp_number(target_cpu))) {
ret = -EIO;
- goto cpu_store_unlock;
+ goto end;
}
/*
@@ -1708,10 +1702,26 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
origin_cpu, target_cpu);
}
-cpu_store_unlock:
+end:
+ return ret;
+}
+
+static ssize_t target_cpu_store(struct vmbus_channel *channel,
+ const char *buf, size_t count)
+{
+ u32 target_cpu;
+ ssize_t ret;
+
+ if (sscanf(buf, "%uu", &target_cpu) != 1)
+ return -EIO;
+
+ cpus_read_lock();
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_channel_set_cpu(channel, target_cpu);
mutex_unlock(&vmbus_connection.channel_mutex);
cpus_read_unlock();
- return ret;
+
+ return ret ?: count;
}
static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
@@ -2262,12 +2272,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size)
struct resource *iter;
mutex_lock(&hyperv_mmio_lock);
+
+ /*
+ * If all bytes of the MMIO range to be released are within the
+ * special case fb_mmio shadow region, skip releasing the shadow
+ * region since no corresponding __request_region() was done
+ * in vmbus_allocate_mmio().
+ */
+ if (fb_mmio && start >= fb_mmio->start &&
+ (start + size - 1 <= fb_mmio->end))
+ goto skip_shadow_release;
+
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= start + size) || (iter->end <= start))
continue;
__release_region(iter, start, size);
}
+
+skip_shadow_release:
release_mem_region(start, size);
mutex_unlock(&hyperv_mmio_lock);
@@ -2646,7 +2669,7 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
- if (hv_root_partition && !hv_nested)
+ if (hv_root_partition() && !hv_nested)
return 0;
/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4cbaba15d86e..f91f713b0105 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -463,6 +463,16 @@ config SENSORS_BT1_PVT_ALARMS
the data conversion will be periodically performed and the data will be
saved in the internal driver cache.
+config SENSORS_CGBC
+ tristate "Congatec Board Controller Sensors"
+ depends on MFD_CGBC
+ help
+ Enable sensors support for the Congatec Board Controller. It has
+ temperature, voltage, current and fan sensors.
+
+ This driver can also be built as a module. If so, the module will be
+ called cgbc-hwmon.
+
config SENSORS_CHIPCAP2
tristate "Amphenol ChipCap 2 relative humidity and temperature sensor"
depends on I2C
@@ -789,6 +799,17 @@ config SENSORS_HS3001
This driver can also be built as a module. If so, the module
will be called hs3001.
+config SENSORS_HTU31
+ tristate "Measurement Specialties HTU31 humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ If you say yes here you get support for the HTU31 humidity
+ and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called htu31.
+
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
@@ -1519,7 +1540,7 @@ config SENSORS_LM90
MAX6657, MAX6658, MAX6659, MAX6680, MAX6681, MAX6692, MAX6695,
MAX6696,
ON Semiconductor NCT1008, NCT210, NCT72, NCT214, NCT218,
- Winbond/Nuvoton W83L771W/G/AWG/ASG,
+ Winbond/Nuvoton W83L771W/G/AWG/ASG, NCT7716, NCT7717 and NCT7718,
Philips NE1618, SA56004, GMT G781, Texas Instruments TMP451 and TMP461
sensor chips.
@@ -1625,7 +1646,7 @@ config SENSORS_NTC_THERMISTOR
B57891S0103 from EPCOS.
This driver can also be built as a module. If so, the module
- will be called ntc-thermistor.
+ will be called ntc_thermistor.
config SENSORS_NCT6683
tristate "Nuvoton NCT6683D"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index b7ef0f0562d3..766c652ef22b 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_SENSORS_ASUS_ROG_RYUJIN) += asus_rog_ryujin.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o
obj-$(CONFIG_SENSORS_BT1_PVT) += bt1-pvt.o
+obj-$(CONFIG_SENSORS_CGBC) += cgbc-hwmon.o
obj-$(CONFIG_SENSORS_CHIPCAP2) += chipcap2.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_CORSAIR_CPRO) += corsair-cpro.o
@@ -91,6 +92,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HS3001) += hs3001.o
+obj-$(CONFIG_SENSORS_HTU31) += htu31.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 44afb07409a4..29ccdc2fb7ff 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -87,25 +87,14 @@ struct acpi_power_meter_resource {
bool power_alarm;
int sensors_valid;
unsigned long sensors_last_updated;
- struct sensor_device_attribute sensors[NUM_SENSORS];
- int num_sensors;
+#define POWER_METER_TRIP_AVERAGE_MIN_IDX 0
+#define POWER_METER_TRIP_AVERAGE_MAX_IDX 1
s64 trip[2];
int num_domain_devices;
struct acpi_device **domain_devices;
struct kobject *holders_dir;
};
-struct sensor_template {
- char *label;
- ssize_t (*show)(struct device *dev,
- struct device_attribute *devattr,
- char *buf);
- ssize_t (*set)(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count);
- int index;
-};
-
/* Averaging interval */
static int update_avg_interval(struct acpi_power_meter_resource *resource)
{
@@ -124,62 +113,6 @@ static int update_avg_interval(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_avg_interval(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_avg_interval(resource);
- mutex_unlock(&resource->lock);
-
- return sprintf(buf, "%llu\n", resource->avg_interval);
-}
-
-static ssize_t set_avg_interval(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
- int res;
- unsigned long temp;
- unsigned long long data;
- acpi_status status;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- if (temp > resource->caps.max_avg_interval ||
- temp < resource->caps.min_avg_interval)
- return -EINVAL;
- arg0.integer.value = temp;
-
- mutex_lock(&resource->lock);
- status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
- &args, &data);
- if (ACPI_SUCCESS(status))
- resource->avg_interval = temp;
- mutex_unlock(&resource->lock);
-
- if (ACPI_FAILURE(status)) {
- acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_PAI",
- status);
- return -EINVAL;
- }
-
- /* _PAI returns 0 on success, nonzero otherwise */
- if (data)
- return -EINVAL;
-
- return count;
-}
-
/* Cap functions */
static int update_cap(struct acpi_power_meter_resource *resource)
{
@@ -198,61 +131,6 @@ static int update_cap(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_cap(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_cap(resource);
- mutex_unlock(&resource->lock);
-
- return sprintf(buf, "%llu\n", resource->cap * 1000);
-}
-
-static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
- int res;
- unsigned long temp;
- unsigned long long data;
- acpi_status status;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
- return -EINVAL;
- arg0.integer.value = temp;
-
- mutex_lock(&resource->lock);
- status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
- &args, &data);
- if (ACPI_SUCCESS(status))
- resource->cap = temp;
- mutex_unlock(&resource->lock);
-
- if (ACPI_FAILURE(status)) {
- acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_SHL",
- status);
- return -EINVAL;
- }
-
- /* _SHL returns 0 on success, nonzero otherwise */
- if (data)
- return -EINVAL;
-
- return count;
-}
-
/* Power meter trip points */
static int set_acpi_trip(struct acpi_power_meter_resource *resource)
{
@@ -287,34 +165,6 @@ static int set_acpi_trip(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- unsigned long temp, trip_bk;
- int res;
-
- res = kstrtoul(buf, 10, &temp);
- if (res)
- return res;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
-
- guard(mutex)(&resource->lock);
-
- trip_bk = resource->trip[attr->index - 7];
- resource->trip[attr->index - 7] = temp;
- res = set_acpi_trip(resource);
- if (res) {
- resource->trip[attr->index - 7] = trip_bk;
- return res;
- }
-
- return count;
-}
-
/* Power meter */
static int update_meter(struct acpi_power_meter_resource *resource)
{
@@ -341,202 +191,6 @@ static int update_meter(struct acpi_power_meter_resource *resource)
return 0;
}
-static ssize_t show_power(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
-
- mutex_lock(&resource->lock);
- update_meter(resource);
- mutex_unlock(&resource->lock);
-
- if (resource->power == UNKNOWN_POWER)
- return -ENODATA;
-
- return sprintf(buf, "%llu\n", resource->power * 1000);
-}
-
-/* Miscellaneous */
-static ssize_t show_str(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- acpi_string val;
- int ret;
-
- mutex_lock(&resource->lock);
- switch (attr->index) {
- case 0:
- val = resource->model_number;
- break;
- case 1:
- val = resource->serial_number;
- break;
- case 2:
- val = resource->oem_info;
- break;
- default:
- WARN(1, "Implementation error: unexpected attribute index %d\n",
- attr->index);
- val = "";
- break;
- }
- ret = sprintf(buf, "%s\n", val);
- mutex_unlock(&resource->lock);
- return ret;
-}
-
-static ssize_t show_val(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- u64 val = 0;
- int ret;
-
- guard(mutex)(&resource->lock);
-
- switch (attr->index) {
- case 0:
- val = resource->caps.min_avg_interval;
- break;
- case 1:
- val = resource->caps.max_avg_interval;
- break;
- case 2:
- val = resource->caps.min_cap * 1000;
- break;
- case 3:
- val = resource->caps.max_cap * 1000;
- break;
- case 4:
- if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS)
- return sprintf(buf, "unknown\n");
-
- val = resource->caps.hysteresis * 1000;
- break;
- case 5:
- if (resource->caps.flags & POWER_METER_IS_BATTERY)
- val = 1;
- else
- val = 0;
- break;
- case 6:
- ret = update_meter(resource);
- if (ret)
- return ret;
- /* need to update cap if not to support the notification. */
- if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
- ret = update_cap(resource);
- if (ret)
- return ret;
- }
- val = resource->power_alarm || resource->power > resource->cap;
- resource->power_alarm = resource->power > resource->cap;
- break;
- case 7:
- case 8:
- if (resource->trip[attr->index - 7] < 0)
- return sprintf(buf, "unknown\n");
-
- val = resource->trip[attr->index - 7] * 1000;
- break;
- default:
- WARN(1, "Implementation error: unexpected attribute index %d\n",
- attr->index);
- break;
- }
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static ssize_t show_accuracy(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
- unsigned int acc = resource->caps.accuracy;
-
- return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
-}
-
-static ssize_t show_name(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME);
-}
-
-#define RO_SENSOR_TEMPLATE(_label, _show, _index) \
- { \
- .label = _label, \
- .show = _show, \
- .index = _index, \
- }
-
-#define RW_SENSOR_TEMPLATE(_label, _show, _set, _index) \
- { \
- .label = _label, \
- .show = _show, \
- .set = _set, \
- .index = _index, \
- }
-
-/* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */
-static struct sensor_template meter_attrs[] = {
- RO_SENSOR_TEMPLATE(POWER_AVERAGE_NAME, show_power, 0),
- RO_SENSOR_TEMPLATE("power1_accuracy", show_accuracy, 0),
- RO_SENSOR_TEMPLATE("power1_average_interval_min", show_val, 0),
- RO_SENSOR_TEMPLATE("power1_average_interval_max", show_val, 1),
- RO_SENSOR_TEMPLATE("power1_is_battery", show_val, 5),
- RW_SENSOR_TEMPLATE(POWER_AVG_INTERVAL_NAME, show_avg_interval,
- set_avg_interval, 0),
- {},
-};
-
-static struct sensor_template misc_cap_attrs[] = {
- RO_SENSOR_TEMPLATE("power1_cap_min", show_val, 2),
- RO_SENSOR_TEMPLATE("power1_cap_max", show_val, 3),
- RO_SENSOR_TEMPLATE("power1_cap_hyst", show_val, 4),
- RO_SENSOR_TEMPLATE(POWER_ALARM_NAME, show_val, 6),
- {},
-};
-
-static struct sensor_template ro_cap_attrs[] = {
- RO_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, 0),
- {},
-};
-
-static struct sensor_template rw_cap_attrs[] = {
- RW_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, set_cap, 0),
- {},
-};
-
-static struct sensor_template trip_attrs[] = {
- RW_SENSOR_TEMPLATE("power1_average_min", show_val, set_trip, 7),
- RW_SENSOR_TEMPLATE("power1_average_max", show_val, set_trip, 8),
- {},
-};
-
-static struct sensor_template misc_attrs[] = {
- RO_SENSOR_TEMPLATE("name", show_name, 0),
- RO_SENSOR_TEMPLATE("power1_model_number", show_str, 0),
- RO_SENSOR_TEMPLATE("power1_oem_info", show_str, 2),
- RO_SENSOR_TEMPLATE("power1_serial_number", show_str, 1),
- {},
-};
-
-#undef RO_SENSOR_TEMPLATE
-#undef RW_SENSOR_TEMPLATE
-
/* Read power domain data */
static void remove_domain_devices(struct acpi_power_meter_resource *resource)
{
@@ -638,109 +292,434 @@ end:
return res;
}
-/* Registration and deregistration */
-static int register_attrs(struct acpi_power_meter_resource *resource,
- struct sensor_template *attrs)
+static int set_trip(struct acpi_power_meter_resource *resource, u16 trip_idx,
+ unsigned long trip)
{
- struct device *dev = &resource->acpi_dev->dev;
- struct sensor_device_attribute *sensors =
- &resource->sensors[resource->num_sensors];
- int res = 0;
+ unsigned long trip_bk;
+ int ret;
- while (attrs->label) {
- sensors->dev_attr.attr.name = attrs->label;
- sensors->dev_attr.attr.mode = 0444;
- sensors->dev_attr.show = attrs->show;
- sensors->index = attrs->index;
+ trip = DIV_ROUND_CLOSEST(trip, 1000);
+ trip_bk = resource->trip[trip_idx];
- if (attrs->set) {
- sensors->dev_attr.attr.mode |= 0200;
- sensors->dev_attr.store = attrs->set;
- }
+ resource->trip[trip_idx] = trip;
+ ret = set_acpi_trip(resource);
+ if (ret) {
+ dev_err(&resource->acpi_dev->dev, "set %s failed.\n",
+ (trip_idx == POWER_METER_TRIP_AVERAGE_MIN_IDX) ?
+ "power1_average_min" : "power1_average_max");
+ resource->trip[trip_idx] = trip_bk;
+ }
- sysfs_attr_init(&sensors->dev_attr.attr);
- res = device_create_file(dev, &sensors->dev_attr);
- if (res) {
- sensors->dev_attr.attr.name = NULL;
- goto error;
- }
- sensors++;
- resource->num_sensors++;
- attrs++;
+ return ret;
+}
+
+static int set_cap(struct acpi_power_meter_resource *resource,
+ unsigned long cap)
+{
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long data;
+ acpi_status status;
+
+ cap = DIV_ROUND_CLOSEST(cap, 1000);
+ if (cap > resource->caps.max_cap || cap < resource->caps.min_cap)
+ return -EINVAL;
+
+ arg0.integer.value = cap;
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL",
+ &args, &data);
+ if (ACPI_FAILURE(status)) {
+ acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_SHL",
+ status);
+ return -EINVAL;
}
+ resource->cap = cap;
-error:
- return res;
+ /* _SHL returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
+
+ return 0;
}
-static void remove_attrs(struct acpi_power_meter_resource *resource)
+static int set_avg_interval(struct acpi_power_meter_resource *resource,
+ unsigned long val)
{
- int i;
+ union acpi_object arg0 = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list args = { 1, &arg0 };
+ unsigned long long data;
+ acpi_status status;
- for (i = 0; i < resource->num_sensors; i++) {
- if (!resource->sensors[i].dev_attr.attr.name)
- continue;
- device_remove_file(&resource->acpi_dev->dev,
- &resource->sensors[i].dev_attr);
+ if (val > resource->caps.max_avg_interval ||
+ val < resource->caps.min_avg_interval)
+ return -EINVAL;
+
+ arg0.integer.value = val;
+ status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI",
+ &args, &data);
+ if (ACPI_FAILURE(status)) {
+ acpi_evaluation_failure_warn(resource->acpi_dev->handle, "_PAI",
+ status);
+ return -EINVAL;
}
+ resource->avg_interval = val;
- remove_domain_devices(resource);
+ /* _PAI returns 0 on success, nonzero otherwise */
+ if (data)
+ return -EINVAL;
- resource->num_sensors = 0;
+ return 0;
}
-static int setup_attrs(struct acpi_power_meter_resource *resource)
+static int get_power_alarm_state(struct acpi_power_meter_resource *resource,
+ long *val)
{
- int res = 0;
+ int ret;
- /* _PMD method is optional. */
- res = read_domain_devices(resource);
- if (res && res != -ENODEV)
- return res;
+ ret = update_meter(resource);
+ if (ret)
+ return ret;
- if (resource->caps.flags & POWER_METER_CAN_MEASURE) {
- res = register_attrs(resource, meter_attrs);
- if (res)
- goto error;
+ /* need to update cap if not to support the notification. */
+ if (!(resource->caps.flags & POWER_METER_CAN_NOTIFY)) {
+ ret = update_cap(resource);
+ if (ret)
+ return ret;
+ resource->power_alarm = resource->power > resource->cap;
+ *val = resource->power_alarm;
+ } else {
+ *val = resource->power_alarm || resource->power > resource->cap;
+ resource->power_alarm = resource->power > resource->cap;
}
- if (resource->caps.flags & POWER_METER_CAN_CAP) {
- if (!can_cap_in_hardware()) {
- dev_warn(&resource->acpi_dev->dev,
- "Ignoring unsafe software power cap!\n");
- goto skip_unsafe_cap;
+ return 0;
+}
+
+static umode_t power_meter_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct acpi_power_meter_resource *res = data;
+
+ if (type != hwmon_power)
+ return 0;
+
+ switch (attr) {
+ case hwmon_power_average:
+ case hwmon_power_average_interval_min:
+ case hwmon_power_average_interval_max:
+ if (res->caps.flags & POWER_METER_CAN_MEASURE)
+ return 0444;
+ break;
+ case hwmon_power_average_interval:
+ if (res->caps.flags & POWER_METER_CAN_MEASURE)
+ return 0644;
+ break;
+ case hwmon_power_cap_min:
+ case hwmon_power_cap_max:
+ case hwmon_power_alarm:
+ if (res->caps.flags & POWER_METER_CAN_CAP && can_cap_in_hardware())
+ return 0444;
+ break;
+ case hwmon_power_cap:
+ if (res->caps.flags & POWER_METER_CAN_CAP && can_cap_in_hardware()) {
+ if (res->caps.configurable_cap)
+ return 0644;
+ else
+ return 0444;
}
+ break;
+ default:
+ break;
+ }
- if (resource->caps.configurable_cap)
- res = register_attrs(resource, rw_cap_attrs);
- else
- res = register_attrs(resource, ro_cap_attrs);
+ return 0;
+}
- if (res)
- goto error;
+static int power_meter_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ int ret = 0;
- res = register_attrs(resource, misc_cap_attrs);
- if (res)
- goto error;
+ if (type != hwmon_power)
+ return -EINVAL;
+
+ guard(mutex)(&res->lock);
+
+ switch (attr) {
+ case hwmon_power_average:
+ ret = update_meter(res);
+ if (ret)
+ return ret;
+ if (res->power == UNKNOWN_POWER)
+ return -ENODATA;
+ *val = res->power * 1000;
+ break;
+ case hwmon_power_average_interval_min:
+ *val = res->caps.min_avg_interval;
+ break;
+ case hwmon_power_average_interval_max:
+ *val = res->caps.max_avg_interval;
+ break;
+ case hwmon_power_average_interval:
+ ret = update_avg_interval(res);
+ if (ret)
+ return ret;
+ *val = (res)->avg_interval;
+ break;
+ case hwmon_power_cap_min:
+ *val = res->caps.min_cap * 1000;
+ break;
+ case hwmon_power_cap_max:
+ *val = res->caps.max_cap * 1000;
+ break;
+ case hwmon_power_alarm:
+ ret = get_power_alarm_state(res, val);
+ if (ret)
+ return ret;
+ break;
+ case hwmon_power_cap:
+ ret = update_cap(res);
+ if (ret)
+ return ret;
+ *val = res->cap * 1000;
+ break;
+ default:
+ break;
}
-skip_unsafe_cap:
- if (resource->caps.flags & POWER_METER_CAN_TRIP) {
- res = register_attrs(resource, trip_attrs);
- if (res)
- goto error;
+ return 0;
+}
+
+static int power_meter_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ int ret;
+
+ if (type != hwmon_power)
+ return -EINVAL;
+
+ guard(mutex)(&res->lock);
+ switch (attr) {
+ case hwmon_power_cap:
+ ret = set_cap(res, val);
+ break;
+ case hwmon_power_average_interval:
+ ret = set_avg_interval(res, val);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
}
- res = register_attrs(resource, misc_attrs);
- if (res)
- goto error;
+ return ret;
+}
- return res;
-error:
- remove_attrs(resource);
- return res;
+static const struct hwmon_channel_info * const power_meter_info[] = {
+ HWMON_CHANNEL_INFO(power, HWMON_P_AVERAGE |
+ HWMON_P_AVERAGE_INTERVAL | HWMON_P_AVERAGE_INTERVAL_MIN |
+ HWMON_P_AVERAGE_INTERVAL_MAX | HWMON_P_CAP | HWMON_P_CAP_MIN |
+ HWMON_P_CAP_MAX | HWMON_P_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops power_meter_ops = {
+ .is_visible = power_meter_is_visible,
+ .read = power_meter_read,
+ .write = power_meter_write,
+};
+
+static const struct hwmon_chip_info power_meter_chip_info = {
+ .ops = &power_meter_ops,
+ .info = power_meter_info,
+};
+
+static ssize_t power1_average_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned long trip;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &trip);
+ if (ret)
+ return ret;
+
+ mutex_lock(&res->lock);
+ ret = set_trip(res, POWER_METER_TRIP_AVERAGE_MAX_IDX, trip);
+ mutex_unlock(&res->lock);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t power1_average_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned long trip;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &trip);
+ if (ret)
+ return ret;
+
+ mutex_lock(&res->lock);
+ ret = set_trip(res, POWER_METER_TRIP_AVERAGE_MIN_IDX, trip);
+ mutex_unlock(&res->lock);
+
+ return ret == 0 ? count : ret;
+}
+
+static ssize_t power1_average_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->trip[POWER_METER_TRIP_AVERAGE_MIN_IDX] < 0)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%lld\n",
+ res->trip[POWER_METER_TRIP_AVERAGE_MIN_IDX] * 1000);
+}
+
+static ssize_t power1_average_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->trip[POWER_METER_TRIP_AVERAGE_MAX_IDX] < 0)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%lld\n",
+ res->trip[POWER_METER_TRIP_AVERAGE_MAX_IDX] * 1000);
+}
+
+static ssize_t power1_cap_hyst_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (res->caps.hysteresis == UNKNOWN_HYSTERESIS)
+ return sysfs_emit(buf, "unknown\n");
+
+ return sysfs_emit(buf, "%llu\n", res->caps.hysteresis * 1000);
+}
+
+static ssize_t power1_accuracy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+ unsigned int acc = res->caps.accuracy;
+
+ return sysfs_emit(buf, "%u.%u%%\n", acc / 1000, acc % 1000);
+}
+
+static ssize_t power1_is_battery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n",
+ res->caps.flags & POWER_METER_IS_BATTERY ? 1 : 0);
+}
+
+static ssize_t power1_model_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->model_number);
+}
+
+static ssize_t power1_oem_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->oem_info);
+}
+
+static ssize_t power1_serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", res->serial_number);
}
+/* depend on POWER_METER_CAN_TRIP */
+static DEVICE_ATTR_RW(power1_average_max);
+static DEVICE_ATTR_RW(power1_average_min);
+
+/* depend on POWER_METER_CAN_CAP */
+static DEVICE_ATTR_RO(power1_cap_hyst);
+
+/* depend on POWER_METER_CAN_MEASURE */
+static DEVICE_ATTR_RO(power1_accuracy);
+static DEVICE_ATTR_RO(power1_is_battery);
+
+static DEVICE_ATTR_RO(power1_model_number);
+static DEVICE_ATTR_RO(power1_oem_info);
+static DEVICE_ATTR_RO(power1_serial_number);
+
+static umode_t power_extra_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct acpi_power_meter_resource *res = dev_get_drvdata(dev);
+
+ if (attr == &dev_attr_power1_is_battery.attr ||
+ attr == &dev_attr_power1_accuracy.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_MEASURE) == 0)
+ return 0;
+ }
+
+ if (attr == &dev_attr_power1_cap_hyst.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_CAP) == 0) {
+ return 0;
+ } else if (!can_cap_in_hardware()) {
+ dev_warn(&res->acpi_dev->dev,
+ "Ignoring unsafe software power cap!\n");
+ return 0;
+ }
+ }
+
+ if (attr == &dev_attr_power1_average_max.attr ||
+ attr == &dev_attr_power1_average_min.attr) {
+ if ((res->caps.flags & POWER_METER_CAN_TRIP) == 0)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute *power_extra_attrs[] = {
+ &dev_attr_power1_average_max.attr,
+ &dev_attr_power1_average_min.attr,
+ &dev_attr_power1_cap_hyst.attr,
+ &dev_attr_power1_accuracy.attr,
+ &dev_attr_power1_is_battery.attr,
+ &dev_attr_power1_model_number.attr,
+ &dev_attr_power1_oem_info.attr,
+ &dev_attr_power1_serial_number.attr,
+ NULL
+};
+
+static const struct attribute_group power_extra_group = {
+ .attrs = power_extra_attrs,
+ .is_visible = power_extra_is_visible,
+};
+
+__ATTRIBUTE_GROUPS(power_extra);
+
static void free_capabilities(struct acpi_power_meter_resource *resource)
{
acpi_string *str;
@@ -848,13 +827,23 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
case METER_NOTIFY_CONFIG:
mutex_lock(&resource->lock);
free_capabilities(resource);
+ remove_domain_devices(resource);
+ hwmon_device_unregister(resource->hwmon_dev);
res = read_capabilities(resource);
- mutex_unlock(&resource->lock);
if (res)
- break;
-
- remove_attrs(resource);
- setup_attrs(resource);
+ dev_err_once(&device->dev, "read capabilities failed.\n");
+ res = read_domain_devices(resource);
+ if (res && res != -ENODEV)
+ dev_err_once(&device->dev, "read domain devices failed.\n");
+ resource->hwmon_dev =
+ hwmon_device_register_with_info(&device->dev,
+ ACPI_POWER_METER_NAME,
+ resource,
+ &power_meter_chip_info,
+ power_extra_groups);
+ if (IS_ERR(resource->hwmon_dev))
+ dev_err_once(&device->dev, "register hwmon device failed.\n");
+ mutex_unlock(&resource->lock);
break;
case METER_NOTIFY_TRIP:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
@@ -916,7 +905,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
struct acpi_device *ipi_device = acpi_dev_get_first_match_dev("IPI0001", NULL, -1);
if (ipi_device && acpi_wait_for_acpi_ipmi())
- dev_warn(&device->dev, "Waiting for ACPI IPMI timeout");
+ dev_warn(&device->dev, "Waiting for ACPI IPMI timeout");
acpi_dev_put(ipi_device);
}
#endif
@@ -928,11 +917,16 @@ static int acpi_power_meter_add(struct acpi_device *device)
resource->trip[0] = -1;
resource->trip[1] = -1;
- res = setup_attrs(resource);
- if (res)
+ /* _PMD method is optional. */
+ res = read_domain_devices(resource);
+ if (res && res != -ENODEV)
goto exit_free_capability;
- resource->hwmon_dev = hwmon_device_register(&device->dev);
+ resource->hwmon_dev =
+ hwmon_device_register_with_info(&device->dev,
+ ACPI_POWER_METER_NAME, resource,
+ &power_meter_chip_info,
+ power_extra_groups);
if (IS_ERR(resource->hwmon_dev)) {
res = PTR_ERR(resource->hwmon_dev);
goto exit_remove;
@@ -942,7 +936,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
goto exit;
exit_remove:
- remove_attrs(resource);
+ remove_domain_devices(resource);
exit_free_capability:
free_capabilities(resource);
exit_free:
@@ -961,7 +955,7 @@ static void acpi_power_meter_remove(struct acpi_device *device)
resource = acpi_driver_data(device);
hwmon_device_unregister(resource->hwmon_dev);
- remove_attrs(resource);
+ remove_domain_devices(resource);
free_capabilities(resource);
kfree(resource);
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 7802bbf5f958..59424103f634 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -22,11 +22,13 @@
*/
#define AD7314_TEMP_MASK 0x7FE0
#define AD7314_TEMP_SHIFT 5
+#define AD7314_LEADING_ZEROS_MASK BIT(15)
/*
* ADT7301 and ADT7302 temperature masks
*/
#define ADT7301_TEMP_MASK 0x3FFF
+#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14))
enum ad7314_variant {
adt7301,
@@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
+ if (ret & AD7314_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
data = sign_extend32(data, 9);
return sprintf(buf, "%d\n", 250 * data);
case adt7301:
case adt7302:
+ if (ret & ADT7301_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
/*
* Documented as a 13 bit twos complement register
* with a sign bit - which is a 14 bit 2's complement
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 43e54dc513da..006ced5ab6e6 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -316,6 +316,14 @@ static const struct ec_board_info board_info_prime_x570_pro = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_prime_x670e_pro_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
@@ -503,6 +511,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_prime_x470_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
&board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X670E-PRO WIFI",
+ &board_info_prime_x670e_pro_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
&board_info_pro_art_x570_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
new file mode 100644
index 000000000000..772f44d56ccf
--- /dev/null
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * cgbc-hwmon - Congatec Board Controller hardware monitoring driver
+ *
+ * Copyright (C) 2024 Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/cgbc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define CGBC_HWMON_CMD_SENSOR 0x77
+#define CGBC_HWMON_CMD_SENSOR_DATA_SIZE 0x05
+
+#define CGBC_HWMON_TYPE_MASK GENMASK(6, 5)
+#define CGBC_HWMON_ID_MASK GENMASK(4, 0)
+#define CGBC_HWMON_ACTIVE_BIT BIT(7)
+
+struct cgbc_hwmon_sensor {
+ enum hwmon_sensor_types type;
+ bool active;
+ unsigned int index;
+ unsigned int channel;
+ const char *label;
+};
+
+struct cgbc_hwmon_data {
+ struct cgbc_device_data *cgbc;
+ unsigned int nb_sensors;
+ struct cgbc_hwmon_sensor *sensors;
+};
+
+enum cgbc_sensor_types {
+ CGBC_HWMON_TYPE_TEMP = 1,
+ CGBC_HWMON_TYPE_IN,
+ CGBC_HWMON_TYPE_FAN
+};
+
+static const char * const cgbc_hwmon_labels_temp[] = {
+ "CPU Temperature",
+ "Box Temperature",
+ "Ambient Temperature",
+ "Board Temperature",
+ "Carrier Temperature",
+ "Chipset Temperature",
+ "Video Temperature",
+ "Other Temperature",
+ "TOPDIM Temperature",
+ "BOTTOMDIM Temperature",
+};
+
+static const struct {
+ enum hwmon_sensor_types type;
+ const char *label;
+} cgbc_hwmon_labels_in[] = {
+ { hwmon_in, "CPU Voltage" },
+ { hwmon_in, "DC Runtime Voltage" },
+ { hwmon_in, "DC Standby Voltage" },
+ { hwmon_in, "CMOS Battery Voltage" },
+ { hwmon_in, "Battery Voltage" },
+ { hwmon_in, "AC Voltage" },
+ { hwmon_in, "Other Voltage" },
+ { hwmon_in, "5V Voltage" },
+ { hwmon_in, "5V Standby Voltage" },
+ { hwmon_in, "3V3 Voltage" },
+ { hwmon_in, "3V3 Standby Voltage" },
+ { hwmon_in, "VCore A Voltage" },
+ { hwmon_in, "VCore B Voltage" },
+ { hwmon_in, "12V Voltage" },
+ { hwmon_curr, "DC Current" },
+ { hwmon_curr, "5V Current" },
+ { hwmon_curr, "12V Current" },
+};
+
+#define CGBC_HWMON_NB_IN_SENSORS 14
+
+static const char * const cgbc_hwmon_labels_fan[] = {
+ "CPU Fan",
+ "Box Fan",
+ "Ambient Fan",
+ "Chipset Fan",
+ "Video Fan",
+ "Other Fan",
+};
+
+static int cgbc_hwmon_cmd(struct cgbc_device_data *cgbc, u8 index, u8 *data)
+{
+ u8 cmd[2] = {CGBC_HWMON_CMD_SENSOR, index};
+
+ return cgbc_command(cgbc, cmd, sizeof(cmd), data, CGBC_HWMON_CMD_SENSOR_DATA_SIZE, NULL);
+}
+
+static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *hwmon)
+{
+ struct cgbc_device_data *cgbc = hwmon->cgbc;
+ struct cgbc_hwmon_sensor *sensor = hwmon->sensors;
+ u8 data[CGBC_HWMON_CMD_SENSOR_DATA_SIZE], nb_sensors, i;
+ int ret;
+
+ ret = cgbc_hwmon_cmd(cgbc, 0, &data[0]);
+ if (ret)
+ return ret;
+
+ nb_sensors = data[0];
+
+ hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ sensor = hwmon->sensors;
+
+ for (i = 0; i < nb_sensors; i++) {
+ enum cgbc_sensor_types type;
+ unsigned int channel;
+
+ /*
+ * No need to request data for the first sensor.
+ * We got data for the first sensor when we ask the number of sensors to the Board
+ * Controller.
+ */
+ if (i) {
+ ret = cgbc_hwmon_cmd(cgbc, i, &data[0]);
+ if (ret)
+ return ret;
+ }
+
+ type = FIELD_GET(CGBC_HWMON_TYPE_MASK, data[1]);
+ channel = FIELD_GET(CGBC_HWMON_ID_MASK, data[1]) - 1;
+
+ if (type == CGBC_HWMON_TYPE_TEMP && channel < ARRAY_SIZE(cgbc_hwmon_labels_temp)) {
+ sensor->type = hwmon_temp;
+ sensor->label = cgbc_hwmon_labels_temp[channel];
+ } else if (type == CGBC_HWMON_TYPE_IN &&
+ channel < ARRAY_SIZE(cgbc_hwmon_labels_in)) {
+ /*
+ * The Board Controller doesn't differentiate current and voltage sensors.
+ * Get the sensor type from cgbc_hwmon_labels_in[channel].type instead.
+ */
+ sensor->type = cgbc_hwmon_labels_in[channel].type;
+ sensor->label = cgbc_hwmon_labels_in[channel].label;
+ } else if (type == CGBC_HWMON_TYPE_FAN &&
+ channel < ARRAY_SIZE(cgbc_hwmon_labels_fan)) {
+ sensor->type = hwmon_fan;
+ sensor->label = cgbc_hwmon_labels_fan[channel];
+ } else {
+ dev_warn(dev, "Board Controller returned an unknown sensor (type=%d, channel=%d), ignore it",
+ type, channel);
+ continue;
+ }
+
+ sensor->active = FIELD_GET(CGBC_HWMON_ACTIVE_BIT, data[1]);
+ sensor->channel = channel;
+ sensor->index = i;
+ sensor++;
+ hwmon->nb_sensors++;
+ }
+
+ return 0;
+}
+
+static struct cgbc_hwmon_sensor *cgbc_hwmon_find_sensor(struct cgbc_hwmon_data *hwmon,
+ enum hwmon_sensor_types type, int channel)
+{
+ struct cgbc_hwmon_sensor *sensor = NULL;
+ int i;
+
+ /*
+ * The Board Controller doesn't differentiate current and voltage sensors.
+ * The channel value (from the Board Controller point of view) shall be computed for current
+ * sensors.
+ */
+ if (type == hwmon_curr)
+ channel += CGBC_HWMON_NB_IN_SENSORS;
+
+ for (i = 0; i < hwmon->nb_sensors; i++) {
+ if (hwmon->sensors[i].type == type && hwmon->sensors[i].channel == channel) {
+ sensor = &hwmon->sensors[i];
+ break;
+ }
+ }
+
+ return sensor;
+}
+
+static int cgbc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct cgbc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct cgbc_hwmon_sensor *sensor = cgbc_hwmon_find_sensor(hwmon, type, channel);
+ struct cgbc_device_data *cgbc = hwmon->cgbc;
+ u8 data[CGBC_HWMON_CMD_SENSOR_DATA_SIZE];
+ int ret;
+
+ ret = cgbc_hwmon_cmd(cgbc, sensor->index, &data[0]);
+ if (ret)
+ return ret;
+
+ *val = (data[3] << 8) | data[2];
+
+ /*
+ * For the Board Controller 1lsb = 0.1 degree centigrade.
+ * Other units are as expected.
+ */
+ if (sensor->type == hwmon_temp)
+ *val *= 100;
+
+ return 0;
+}
+
+static umode_t cgbc_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ struct cgbc_hwmon_data *data = (struct cgbc_hwmon_data *)_data;
+ struct cgbc_hwmon_sensor *sensor;
+
+ sensor = cgbc_hwmon_find_sensor(data, type, channel);
+ if (!sensor)
+ return 0;
+
+ return sensor->active ? 0444 : 0;
+}
+
+static int cgbc_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct cgbc_hwmon_data *hwmon = dev_get_drvdata(dev);
+ struct cgbc_hwmon_sensor *sensor = cgbc_hwmon_find_sensor(hwmon, type, channel);
+
+ *str = sensor->label;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const cgbc_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops cgbc_hwmon_ops = {
+ .is_visible = cgbc_hwmon_is_visible,
+ .read = cgbc_hwmon_read,
+ .read_string = cgbc_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info cgbc_chip_info = {
+ .ops = &cgbc_hwmon_ops,
+ .info = cgbc_hwmon_info,
+};
+
+static int cgbc_hwmon_probe(struct platform_device *pdev)
+{
+ struct cgbc_device_data *cgbc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct cgbc_hwmon_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->cgbc = cgbc;
+
+ ret = cgbc_hwmon_probe_sensors(dev, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to probe sensors");
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "cgbc_hwmon", data, &cgbc_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver cgbc_hwmon_driver = {
+ .driver = {
+ .name = "cgbc-hwmon",
+ },
+ .probe = cgbc_hwmon_probe,
+};
+
+module_platform_driver(cgbc_hwmon_driver);
+
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
+MODULE_DESCRIPTION("Congatec Board Controller Hardware Monitoring Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index cd00adaad1b4..79e5606e6d2f 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -73,7 +73,7 @@
#define DELL_SMM_LEGACY_EXECUTE 0x1
#define DELL_SMM_NO_TEMP 10
-#define DELL_SMM_NO_FANS 3
+#define DELL_SMM_NO_FANS 4
struct smm_regs {
unsigned int eax;
@@ -1074,11 +1074,14 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT
),
NULL
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 4d39fbd83769..234c54956a4b 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -112,8 +112,6 @@ static char *emc2305_fan_name[] = {
"emc2305_fan5",
};
-static void emc2305_unset_tz(struct device *dev);
-
static int emc2305_get_max_channel(const struct emc2305_data *data)
{
return data->pwm_num;
@@ -293,8 +291,9 @@ static int emc2305_set_single_tz(struct device *dev, int idx)
pwm = data->pwm_min[cdev_idx];
data->cdev_data[cdev_idx].cdev =
- thermal_cooling_device_register(emc2305_fan_name[idx], data,
- &emc2305_cooling_ops);
+ devm_thermal_of_cooling_device_register(dev, dev->of_node,
+ emc2305_fan_name[idx], data,
+ &emc2305_cooling_ops);
if (IS_ERR(data->cdev_data[cdev_idx].cdev)) {
dev_err(dev, "Failed to register cooling device %s\n", emc2305_fan_name[idx]);
@@ -332,24 +331,9 @@ static int emc2305_set_tz(struct device *dev)
for (i = 0; i < data->pwm_num; i++) {
ret = emc2305_set_single_tz(dev, i + 1);
if (ret)
- goto thermal_cooling_device_register_fail;
+ return ret;
}
return 0;
-
-thermal_cooling_device_register_fail:
- emc2305_unset_tz(dev);
- return ret;
-}
-
-static void emc2305_unset_tz(struct device *dev)
-{
- struct emc2305_data *data = dev_get_drvdata(dev);
- int i;
-
- /* Unregister cooling device. */
- for (i = 0; i < EMC2305_PWM_MAX; i++)
- if (data->cdev_data[i].cdev)
- thermal_cooling_device_unregister(data->cdev_data[i].cdev);
}
static umode_t
@@ -599,20 +583,18 @@ static int emc2305_probe(struct i2c_client *client)
return 0;
}
-static void emc2305_remove(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- emc2305_unset_tz(dev);
-}
+static const struct of_device_id of_emc2305_match_table[] = {
+ { .compatible = "microchip,emc2305", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_emc2305_match_table);
static struct i2c_driver emc2305_driver = {
.driver = {
.name = "emc2305",
+ .of_match_table = of_emc2305_match_table,
},
.probe = emc2305_probe,
- .remove = emc2305_remove,
.id_table = emc2305_ids,
};
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index d92c536be9af..b779240328d5 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -393,7 +393,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (state >= fan_data->num_speed)
return -EINVAL;
+ mutex_lock(&fan_data->lock);
+
set_fan_speed(fan_data, state);
+
+ mutex_unlock(&fan_data->lock);
+
return 0;
}
@@ -489,7 +494,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
static void gpio_fan_stop(void *data)
{
+ struct gpio_fan_data *fan_data = data;
+
+ mutex_lock(&fan_data->lock);
set_fan_speed(data, 0);
+ mutex_unlock(&fan_data->lock);
}
static int gpio_fan_probe(struct platform_device *pdev)
@@ -562,7 +571,9 @@ static int gpio_fan_suspend(struct device *dev)
if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, 0);
+ mutex_unlock(&fan_data->lock);
}
return 0;
@@ -572,8 +583,11 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->gpios)
+ if (fan_data->gpios) {
+ mutex_lock(&fan_data->lock);
set_fan_speed(fan_data, fan_data->resume_speed);
+ mutex_unlock(&fan_data->lock);
+ }
return 0;
}
diff --git a/drivers/hwmon/gsc-hwmon.c b/drivers/hwmon/gsc-hwmon.c
index 14a6385cd7cc..0f9af82cebec 100644
--- a/drivers/hwmon/gsc-hwmon.c
+++ b/drivers/hwmon/gsc-hwmon.c
@@ -47,7 +47,6 @@ static const struct regmap_bus gsc_hwmon_regmap_bus = {
static const struct regmap_config gsc_hwmon_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
};
static ssize_t pwm_auto_point_temp_show(struct device *dev,
diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
index d6bdad26feb1..03c684ba83bd 100644
--- a/drivers/hwmon/hp-wmi-sensors.c
+++ b/drivers/hwmon/hp-wmi-sensors.c
@@ -1197,7 +1197,7 @@ static int hp_wmi_update_info(struct hp_wmi_sensors *state,
if (time_after(jiffies, info->last_updated + HZ)) {
mutex_lock(&state->lock);
- wobj = hp_wmi_get_wobj(HP_WMI_NUMERIC_SENSOR_GUID, instance);
+ wobj = wmidev_block_query(state->wdev, instance);
if (!wobj) {
ret = -EIO;
goto out_unlock;
@@ -1745,7 +1745,7 @@ static int init_numeric_sensors(struct hp_wmi_sensors *state,
return -ENOMEM;
for (i = 0, info = info_arr; i < icount; i++, info++) {
- wobj = hp_wmi_get_wobj(HP_WMI_NUMERIC_SENSOR_GUID, i);
+ wobj = wmidev_block_query(state->wdev, i);
if (!wobj)
return -EIO;
diff --git a/drivers/hwmon/htu31.c b/drivers/hwmon/htu31.c
new file mode 100644
index 000000000000..7521a371aa6c
--- /dev/null
+++ b/drivers/hwmon/htu31.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * The driver for Measurement Specialties HTU31 Temperature and Humidity sensor.
+ *
+ * Copyright (C) 2025
+ * Author: Andrei Lalaev <andrey.lalaev@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#define HTU31_READ_TEMP_HUM_CMD 0x00
+#define HTU31_READ_SERIAL_CMD 0x0a
+#define HTU31_CONVERSION_CMD 0x5e
+#define HTU31_HEATER_OFF_CMD 0x02
+#define HTU31_HEATER_ON_CMD 0x04
+
+#define HTU31_TEMP_HUM_LEN 6
+
+/* Conversion time for the highest resolution */
+#define HTU31_HUMIDITY_CONV_TIME 10000 /* us */
+#define HTU31_TEMPERATURE_CONV_TIME 15000 /* us */
+
+#define HTU31_SERIAL_NUMBER_LEN 3
+#define HTU31_SERIAL_NUMBER_CRC_LEN 1
+#define HTU31_SERIAL_NUMBER_CRC_OFFSET 3
+
+#define HTU31_CRC8_INIT_VAL 0
+#define HTU31_CRC8_POLYNOMIAL 0x31
+DECLARE_CRC8_TABLE(htu31_crc8_table);
+
+/**
+ * struct htu31_data - all the data required to operate a HTU31 chip
+ * @client: the i2c client associated with the HTU31
+ * @lock: a mutex to prevent parallel access to the data
+ * @wait_time: the time needed by sensor to convert values
+ * @temperature: the latest temperature value in millidegrees
+ * @humidity: the latest relative humidity value in millipercent
+ * @serial_number: the serial number of the sensor
+ * @heater_enable: the internal state of the heater
+ */
+struct htu31_data {
+ struct i2c_client *client;
+ struct mutex lock; /* Used to protect against parallel data updates */
+ long wait_time;
+ long temperature;
+ long humidity;
+ u8 serial_number[HTU31_SERIAL_NUMBER_LEN];
+ bool heater_enable;
+};
+
+static long htu31_temp_to_millicelsius(u16 val)
+{
+ return -40000 + DIV_ROUND_CLOSEST_ULL(165000ULL * val, 65535);
+}
+
+static long htu31_relative_humidity(u16 val)
+{
+ return DIV_ROUND_CLOSEST_ULL(100000ULL * val, 65535);
+}
+
+static int htu31_data_fetch_command(struct htu31_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 conversion_on = HTU31_CONVERSION_CMD;
+ u8 read_data_cmd = HTU31_READ_TEMP_HUM_CMD;
+ u8 t_h_buf[HTU31_TEMP_HUM_LEN] = {};
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &read_data_cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(t_h_buf),
+ .buf = t_h_buf,
+ },
+ };
+ int ret;
+ u8 crc;
+
+ guard(mutex)(&data->lock);
+
+ ret = i2c_master_send(client, &conversion_on, 1);
+ if (ret != 1) {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev,
+ "Conversion command is failed. Error code: %d\n", ret);
+ return ret;
+ }
+
+ fsleep(data->wait_time);
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ ret = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev,
+ "T&H command is failed. Error code: %d\n", ret);
+ return ret;
+ }
+
+ crc = crc8(htu31_crc8_table, &t_h_buf[0], 2, HTU31_CRC8_INIT_VAL);
+ if (crc != t_h_buf[2]) {
+ dev_err(&client->dev, "Temperature CRC mismatch\n");
+ return -EIO;
+ }
+
+ crc = crc8(htu31_crc8_table, &t_h_buf[3], 2, HTU31_CRC8_INIT_VAL);
+ if (crc != t_h_buf[5]) {
+ dev_err(&client->dev, "Humidity CRC mismatch\n");
+ return -EIO;
+ }
+
+ data->temperature = htu31_temp_to_millicelsius(be16_to_cpup((__be16 *)&t_h_buf[0]));
+ data->humidity = htu31_relative_humidity(be16_to_cpup((__be16 *)&t_h_buf[3]));
+
+ return 0;
+}
+
+static umode_t htu31_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_temp:
+ case hwmon_humidity:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int htu31_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = htu31_data_fetch_command(data);
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_temp:
+ if (attr != hwmon_temp_input)
+ return -EINVAL;
+
+ *val = data->temperature;
+ break;
+ case hwmon_humidity:
+ if (attr != hwmon_humidity_input)
+ return -EINVAL;
+
+ *val = data->humidity;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int htu31_read_serial_number(struct htu31_data *data)
+{
+ struct i2c_client *client = data->client;
+ u8 read_sn_cmd = HTU31_READ_SERIAL_CMD;
+ u8 sn_buf[HTU31_SERIAL_NUMBER_LEN + HTU31_SERIAL_NUMBER_CRC_LEN];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &read_sn_cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(sn_buf),
+ .buf = sn_buf,
+ },
+ };
+ int ret;
+ u8 crc;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ crc = crc8(htu31_crc8_table, sn_buf, HTU31_SERIAL_NUMBER_LEN, HTU31_CRC8_INIT_VAL);
+ if (crc != sn_buf[HTU31_SERIAL_NUMBER_CRC_OFFSET]) {
+ dev_err(&client->dev, "Serial number CRC mismatch\n");
+ return -EIO;
+ }
+
+ memcpy(data->serial_number, sn_buf, HTU31_SERIAL_NUMBER_LEN);
+
+ return 0;
+}
+
+static ssize_t heater_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", data->heater_enable);
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct htu31_data *data = dev_get_drvdata(dev);
+ u8 heater_cmd;
+ bool status;
+ int ret;
+
+ ret = kstrtobool(buf, &status);
+ if (ret)
+ return ret;
+
+ heater_cmd = status ? HTU31_HEATER_ON_CMD : HTU31_HEATER_OFF_CMD;
+
+ guard(mutex)(&data->lock);
+
+ ret = i2c_master_send(data->client, &heater_cmd, 1);
+ if (ret < 0)
+ return ret;
+
+ data->heater_enable = status;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(heater_enable);
+
+static int serial_number_show(struct seq_file *seq_file,
+ void *unused)
+{
+ struct htu31_data *data = seq_file->private;
+
+ seq_printf(seq_file, "%X%X%X\n", data->serial_number[0],
+ data->serial_number[1], data->serial_number[2]);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(serial_number);
+
+static struct attribute *htu31_attrs[] = {
+ &dev_attr_heater_enable.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(htu31);
+
+static const struct hwmon_channel_info * const htu31_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops htu31_hwmon_ops = {
+ .is_visible = htu31_is_visible,
+ .read = htu31_read,
+};
+
+static const struct hwmon_chip_info htu31_chip_info = {
+ .info = htu31_info,
+ .ops = &htu31_hwmon_ops,
+};
+
+static int htu31_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct htu31_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ data->wait_time = HTU31_TEMPERATURE_CONV_TIME + HTU31_HUMIDITY_CONV_TIME;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ crc8_populate_msb(htu31_crc8_table, HTU31_CRC8_POLYNOMIAL);
+
+ ret = htu31_read_serial_number(data);
+ if (ret) {
+ dev_err(dev, "Failed to read serial number\n");
+ return ret;
+ }
+
+ debugfs_create_file("serial_number",
+ 0444,
+ client->debugfs,
+ data,
+ &serial_number_fops);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ data,
+ &htu31_chip_info,
+ htu31_groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id htu31_id[] = {
+ { "htu31" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, htu31_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id htu31_of_match[] = {
+ { .compatible = "meas,htu31" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, htu31_of_match);
+#endif
+
+static struct i2c_driver htu31_driver = {
+ .driver = {
+ .name = "htu31",
+ .of_match_table = of_match_ptr(htu31_of_match),
+ },
+ .probe = htu31_probe,
+ .id_table = htu31_id,
+};
+module_i2c_driver(htu31_driver);
+
+MODULE_AUTHOR("Andrei Lalaev <andrey.lalaev@gmail.com>");
+MODULE_DESCRIPTION("HTU31 Temperature and Humidity sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 9703d60e9bbf..1688c210888a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -646,8 +646,8 @@ static const char * const hwmon_power_attr_templates[] = {
[hwmon_power_enable] = "power%d_enable",
[hwmon_power_average] = "power%d_average",
[hwmon_power_average_interval] = "power%d_average_interval",
- [hwmon_power_average_interval_max] = "power%d_interval_max",
- [hwmon_power_average_interval_min] = "power%d_interval_min",
+ [hwmon_power_average_interval_max] = "power%d_average_interval_max",
+ [hwmon_power_average_interval_min] = "power%d_average_interval_min",
[hwmon_power_average_highest] = "power%d_average_highest",
[hwmon_power_average_lowest] = "power%d_average_lowest",
[hwmon_power_average_max] = "power%d_average_max",
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 1bf479a0f793..ce0e3f214f5b 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -116,7 +116,6 @@ struct ina3221_input {
* @fields: Register fields of the device
* @inputs: Array of channel input source specific structures
* @lock: mutex lock to serialize sysfs attribute accesses
- * @debugfs: Pointer to debugfs entry for device
* @reg_config: Register value of INA3221_CONFIG
* @summation_shunt_resistor: equivalent shunt resistor value for summation
* @summation_channel_control: Value written to SCC field in INA3221_MASK_ENABLE
@@ -128,7 +127,6 @@ struct ina3221_data {
struct regmap_field *fields[F_MAX_FIELDS];
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
struct mutex lock;
- struct dentry *debugfs;
u32 reg_config;
int summation_shunt_resistor;
u32 summation_channel_control;
@@ -913,12 +911,9 @@ static int ina3221_probe(struct i2c_client *client)
goto fail;
}
- scnprintf(name, sizeof(name), "%s-%s", INA3221_DRIVER_NAME, dev_name(dev));
- ina->debugfs = debugfs_create_dir(name, NULL);
-
for (i = 0; i < INA3221_NUM_CHANNELS; i++) {
scnprintf(name, sizeof(name), "in%d_summation_disable", i);
- debugfs_create_bool(name, 0400, ina->debugfs,
+ debugfs_create_bool(name, 0400, client->debugfs,
&ina->inputs[i].summation_disable);
}
@@ -940,8 +935,6 @@ static void ina3221_remove(struct i2c_client *client)
struct ina3221_data *ina = dev_get_drvdata(&client->dev);
int i;
- debugfs_remove_recursive(ina->debugfs);
-
pm_runtime_disable(ina->pm_dev);
pm_runtime_set_suspended(ina->pm_dev);
diff --git a/drivers/hwmon/isl28022.c b/drivers/hwmon/isl28022.c
index 3f9b4520b53e..1fb9864635db 100644
--- a/drivers/hwmon/isl28022.c
+++ b/drivers/hwmon/isl28022.c
@@ -324,26 +324,6 @@ static int shunt_voltage_show(struct seq_file *seqf, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(shunt_voltage);
-static struct dentry *isl28022_debugfs_root;
-
-static void isl28022_debugfs_remove(void *res)
-{
- debugfs_remove_recursive(res);
-}
-
-static void isl28022_debugfs_init(struct i2c_client *client, struct isl28022_data *data)
-{
- char name[16];
- struct dentry *debugfs;
-
- scnprintf(name, sizeof(name), "%d-%04hx", client->adapter->nr, client->addr);
-
- debugfs = debugfs_create_dir(name, isl28022_debugfs_root);
- debugfs_create_file("shunt_voltage", 0444, debugfs, data, &shunt_voltage_fops);
-
- devm_add_action_or_reset(&client->dev, isl28022_debugfs_remove, debugfs);
-}
-
/*
* read property values and make consistency checks.
*
@@ -475,7 +455,7 @@ static int isl28022_probe(struct i2c_client *client)
if (err)
return err;
- isl28022_debugfs_init(client, data);
+ debugfs_create_file("shunt_voltage", 0444, client->debugfs, data, &shunt_voltage_fops);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
data, &isl28022_chip_info, NULL);
@@ -505,27 +485,7 @@ static struct i2c_driver isl28022_driver = {
.probe = isl28022_probe,
.id_table = isl28022_ids,
};
-
-static int __init isl28022_init(void)
-{
- int err;
-
- isl28022_debugfs_root = debugfs_create_dir("isl28022", NULL);
- err = i2c_add_driver(&isl28022_driver);
- if (!err)
- return 0;
-
- debugfs_remove_recursive(isl28022_debugfs_root);
- return err;
-}
-module_init(isl28022_init);
-
-static void __exit isl28022_exit(void)
-{
- i2c_del_driver(&isl28022_driver);
- debugfs_remove_recursive(isl28022_debugfs_root);
-}
-module_exit(isl28022_exit);
+module_i2c_driver(isl28022_driver);
MODULE_AUTHOR("Carsten Spieß <mail@carsten-spiess.de>");
MODULE_DESCRIPTION("ISL28022 driver");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d0b4cc9a5011..3685906cc57c 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -467,6 +467,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(data, 4);
break;
case 0x31: /* Zen2 Threadripper */
+ case 0x47: /* Cyan Skillfish */
case 0x60: /* Renoir */
case 0x68: /* Lucienne */
case 0x71: /* Zen2 */
@@ -535,6 +536,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 511d95a0efb3..75f09553fd67 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -90,6 +90,9 @@
* This driver also supports NE1618 from Philips. It is similar to NE1617
* but supports 11 bit external temperature values.
*
+ * This driver also supports NCT7716, NCT7717 and NCT7718 from Nuvoton.
+ * The NCT7716 is similar to NCT7717 but has one more address support.
+ *
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
* concern all supported chipsets, unless mentioned otherwise.
@@ -119,13 +122,15 @@
* Address is fully defined internally and cannot be changed except for
* MAX6659, MAX6680 and MAX6681.
* LM86, LM89, LM90, LM99, ADM1032, ADM1032-1, ADT7461, ADT7461A, MAX6649,
- * MAX6657, MAX6658, NCT1008 and W83L771 have address 0x4c.
+ * MAX6657, MAX6658, NCT1008, NCT7718 and W83L771 have address 0x4c.
* ADM1032-2, ADT7461-2, ADT7461A-2, LM89-1, LM99-1, MAX6646, and NCT1008D
* have address 0x4d.
* MAX6647 has address 0x4e.
* MAX6659 can have address 0x4c, 0x4d or 0x4e.
* MAX6654, MAX6680, and MAX6681 can have address 0x18, 0x19, 0x1a, 0x29,
* 0x2a, 0x2b, 0x4c, 0x4d or 0x4e.
+ * NCT7716 can have address 0x48 or 0x49.
+ * NCT7717 has address 0x48.
* SA56004 can have address 0x48 through 0x4F.
*/
@@ -136,7 +141,7 @@ static const unsigned short normal_i2c[] = {
enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481,
g781, lm84, lm90, lm99,
max1617, max6642, max6646, max6648, max6654, max6657, max6659, max6680, max6696,
- nct210, nct72, ne1618, sa56004, tmp451, tmp461, w83l771,
+ nct210, nct72, nct7716, nct7717, nct7718, ne1618, sa56004, tmp451, tmp461, w83l771,
};
/*
@@ -191,6 +196,9 @@ enum chips { adm1023, adm1032, adt7461, adt7461a, adt7481,
#define ADT7481_REG_MAN_ID 0x3e
#define ADT7481_REG_CHIP_ID 0x3d
+/* NCT7716/7717/7718 registers */
+#define NCT7716_REG_CHIP_ID 0xFD
+
/* Device features */
#define LM90_HAVE_EXTENDED_TEMP BIT(0) /* extended temperature support */
#define LM90_HAVE_OFFSET BIT(1) /* temperature offset register */
@@ -275,6 +283,9 @@ static const struct i2c_device_id lm90_id[] = {
{ "nct214", nct72 },
{ "nct218", nct72 },
{ "nct72", nct72 },
+ { "nct7716", nct7716 },
+ { "nct7717", nct7717 },
+ { "nct7718", nct7718 },
{ "ne1618", ne1618 },
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
@@ -383,6 +394,18 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.data = (void *)nct72
},
{
+ .compatible = "nuvoton,nct7716",
+ .data = (void *)nct7716
+ },
+ {
+ .compatible = "nuvoton,nct7717",
+ .data = (void *)nct7717
+ },
+ {
+ .compatible = "nuvoton,nct7718",
+ .data = (void *)nct7718
+ },
+ {
.compatible = "winbond,w83l771",
.data = (void *)w83l771
},
@@ -601,6 +624,26 @@ static const struct lm90_params lm90_params[] = {
.resolution = 11,
.max_convrate = 7,
},
+ [nct7716] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_CONVRATE,
+ .alert_alarms = 0x40,
+ .resolution = 8,
+ .max_convrate = 8,
+ },
+ [nct7717] = {
+ .flags = LM90_HAVE_ALARMS | LM90_HAVE_CONVRATE,
+ .alert_alarms = 0x40,
+ .resolution = 8,
+ .max_convrate = 8,
+ },
+ [nct7718] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT
+ | LM90_HAVE_ALARMS | LM90_HAVE_LOW | LM90_HAVE_CONVRATE
+ | LM90_HAVE_REMOTE_EXT,
+ .alert_alarms = 0x7c,
+ .resolution = 11,
+ .max_convrate = 8,
+ },
[ne1618] = {
.flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_BROKEN_ALERT
| LM90_HAVE_LOW | LM90_HAVE_CONVRATE | LM90_HAVE_REMOTE_EXT,
@@ -2300,6 +2343,38 @@ static const char *lm90_detect_nuvoton(struct i2c_client *client, int chip_id,
return name;
}
+static const char *lm90_detect_nuvoton_50(struct i2c_client *client, int chip_id,
+ int config1, int convrate)
+{
+ int chip_id2 = i2c_smbus_read_byte_data(client, NCT7716_REG_CHIP_ID);
+ int config2 = i2c_smbus_read_byte_data(client, LM90_REG_CONFIG2);
+ int address = client->addr;
+ const char *name = NULL;
+
+ if (chip_id2 < 0 || config2 < 0)
+ return NULL;
+
+ if (chip_id2 != 0x50 || convrate > 0x08)
+ return NULL;
+
+ switch (chip_id) {
+ case 0x90:
+ if (address == 0x48 && !(config1 & 0x3e) && !(config2 & 0xfe))
+ name = "nct7717";
+ break;
+ case 0x91:
+ if ((address == 0x48 || address == 0x49) && !(config1 & 0x3e) &&
+ !(config2 & 0xfe))
+ name = "nct7716";
+ else if (address == 0x4c && !(config1 & 0x38) && !(config2 & 0xf8))
+ name = "nct7718";
+ break;
+ default:
+ break;
+ }
+ return name;
+}
+
static const char *lm90_detect_nxp(struct i2c_client *client, bool common_address,
int chip_id, int config1, int convrate)
{
@@ -2484,6 +2559,9 @@ static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info)
name = lm90_detect_maxim(client, common_address, chip_id,
config1, convrate);
break;
+ case 0x50:
+ name = lm90_detect_nuvoton_50(client, chip_id, config1, convrate);
+ break;
case 0x54: /* ON MC1066, Microchip TC1068, TCM1617 (originally TelCom) */
if (common_address && !(config1 & 0x3f) && !(convrate & 0xf8))
name = "mc1066";
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index 4f608a3790fb..7f38d2696239 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1674,47 +1674,19 @@ static int ltc4282_show_power1_bad_fault_log(void *arg, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(ltc4282_power1_bad_fault_log,
ltc4282_show_power1_bad_fault_log, NULL, "%llu\n");
-static void ltc4282_debugfs_remove(void *dir)
+static void ltc4282_debugfs_init(struct ltc4282_state *st, struct i2c_client *i2c)
{
- debugfs_remove_recursive(dir);
-}
-
-static void ltc4282_debugfs_init(struct ltc4282_state *st,
- struct i2c_client *i2c,
- const struct device *hwmon)
-{
- const char *debugfs_name;
- struct dentry *dentry;
- int ret;
-
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
- return;
-
- debugfs_name = devm_kasprintf(&i2c->dev, GFP_KERNEL, "ltc4282-%s",
- dev_name(hwmon));
- if (!debugfs_name)
- return;
-
- dentry = debugfs_create_dir(debugfs_name, NULL);
- if (IS_ERR(dentry))
- return;
-
- ret = devm_add_action_or_reset(&i2c->dev, ltc4282_debugfs_remove,
- dentry);
- if (ret)
- return;
-
- debugfs_create_file_unsafe("power1_bad_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("power1_bad_fault_log", 0400, i2c->debugfs, st,
&ltc4282_power1_bad_fault_log);
- debugfs_create_file_unsafe("in0_fet_short_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in0_fet_short_fault_log", 0400, i2c->debugfs, st,
&ltc4282_fet_short_fault_log);
- debugfs_create_file_unsafe("in0_fet_bad_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in0_fet_bad_fault_log", 0400, i2c->debugfs, st,
&ltc4282_fet_bad_fault_log);
- debugfs_create_file_unsafe("in1_crit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in1_crit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_in1_crit_fault_log);
- debugfs_create_file_unsafe("in1_lcrit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("in1_lcrit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_in1_lcrit_fault_log);
- debugfs_create_file_unsafe("curr1_crit_fault_log", 0400, dentry, st,
+ debugfs_create_file_unsafe("curr1_crit_fault_log", 0400, i2c->debugfs, st,
&ltc4282_curr1_crit_fault_log);
}
@@ -1757,7 +1729,7 @@ static int ltc4282_probe(struct i2c_client *i2c)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- ltc4282_debugfs_init(st, i2c, hwmon);
+ ltc4282_debugfs_init(st, i2c);
return 0;
}
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 416ac02e9f74..6cda35388b24 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -176,6 +176,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MSI2 0x200
#define NCT6683_CUSTOMER_ID_MSI3 0x207
#define NCT6683_CUSTOMER_ID_MSI4 0x20d
+#define NCT6683_CUSTOMER_ID_AMD 0x162b
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_CUSTOMER_ID_ASROCK3 0x1631
@@ -1231,6 +1232,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_MSI4:
break;
+ case NCT6683_CUSTOMER_ID_AMD:
+ break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
case NCT6683_CUSTOMER_ID_ASROCK2:
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index fa3351351825..79bc67ffb998 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index b5352900463f..d21f7266c411 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
};
static const struct ntc_compensation ncpXXxh103[] = {
- { .temp_c = -40, .ohm = 247565 },
- { .temp_c = -35, .ohm = 181742 },
- { .temp_c = -30, .ohm = 135128 },
- { .temp_c = -25, .ohm = 101678 },
- { .temp_c = -20, .ohm = 77373 },
- { .temp_c = -15, .ohm = 59504 },
- { .temp_c = -10, .ohm = 46222 },
- { .temp_c = -5, .ohm = 36244 },
- { .temp_c = 0, .ohm = 28674 },
- { .temp_c = 5, .ohm = 22878 },
- { .temp_c = 10, .ohm = 18399 },
- { .temp_c = 15, .ohm = 14910 },
- { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = -40, .ohm = 195652 },
+ { .temp_c = -35, .ohm = 148171 },
+ { .temp_c = -30, .ohm = 113347 },
+ { .temp_c = -25, .ohm = 87559 },
+ { .temp_c = -20, .ohm = 68237 },
+ { .temp_c = -15, .ohm = 53650 },
+ { .temp_c = -10, .ohm = 42506 },
+ { .temp_c = -5, .ohm = 33892 },
+ { .temp_c = 0, .ohm = 27219 },
+ { .temp_c = 5, .ohm = 22021 },
+ { .temp_c = 10, .ohm = 17926 },
+ { .temp_c = 15, .ohm = 14674 },
+ { .temp_c = 20, .ohm = 12081 },
{ .temp_c = 25, .ohm = 10000 },
- { .temp_c = 30, .ohm = 8271 },
- { .temp_c = 35, .ohm = 6883 },
- { .temp_c = 40, .ohm = 5762 },
- { .temp_c = 45, .ohm = 4851 },
- { .temp_c = 50, .ohm = 4105 },
- { .temp_c = 55, .ohm = 3492 },
- { .temp_c = 60, .ohm = 2985 },
- { .temp_c = 65, .ohm = 2563 },
- { .temp_c = 70, .ohm = 2211 },
- { .temp_c = 75, .ohm = 1915 },
- { .temp_c = 80, .ohm = 1666 },
- { .temp_c = 85, .ohm = 1454 },
- { .temp_c = 90, .ohm = 1275 },
- { .temp_c = 95, .ohm = 1121 },
- { .temp_c = 100, .ohm = 990 },
- { .temp_c = 105, .ohm = 876 },
- { .temp_c = 110, .ohm = 779 },
- { .temp_c = 115, .ohm = 694 },
- { .temp_c = 120, .ohm = 620 },
- { .temp_c = 125, .ohm = 556 },
+ { .temp_c = 30, .ohm = 8315 },
+ { .temp_c = 35, .ohm = 6948 },
+ { .temp_c = 40, .ohm = 5834 },
+ { .temp_c = 45, .ohm = 4917 },
+ { .temp_c = 50, .ohm = 4161 },
+ { .temp_c = 55, .ohm = 3535 },
+ { .temp_c = 60, .ohm = 3014 },
+ { .temp_c = 65, .ohm = 2586 },
+ { .temp_c = 70, .ohm = 2228 },
+ { .temp_c = 75, .ohm = 1925 },
+ { .temp_c = 80, .ohm = 1669 },
+ { .temp_c = 85, .ohm = 1452 },
+ { .temp_c = 90, .ohm = 1268 },
+ { .temp_c = 95, .ohm = 1110 },
+ { .temp_c = 100, .ohm = 974 },
+ { .temp_c = 105, .ohm = 858 },
+ { .temp_c = 110, .ohm = 758 },
+ { .temp_c = 115, .ohm = 672 },
+ { .temp_c = 120, .ohm = 596 },
+ { .temp_c = 125, .ohm = 531 },
};
/*
@@ -387,12 +387,9 @@ static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv)
puo = data->pullup_ohm;
pdo = data->pulldown_ohm;
- if (uv == 0)
- return (data->connect == NTC_CONNECTED_POSITIVE) ?
- INT_MAX : 0;
- if (uv >= puv)
- return (data->connect == NTC_CONNECTED_POSITIVE) ?
- 0 : INT_MAX;
+ /* faulty adc value */
+ if (uv == 0 || uv >= puv)
+ return -ENODATA;
if (data->connect == NTC_CONNECTED_POSITIVE && puo == 0)
n = div_u64(pdo * (puv - uv), uv);
@@ -404,8 +401,10 @@ static int get_ohm_of_thermistor(struct ntc_data *data, unsigned int uv)
else
n = div64_u64_safe(pdo * puo * uv, pdo * (puv - uv) - puo * uv);
- if (n > INT_MAX)
- n = INT_MAX;
+ /* sensor out of bounds */
+ if (n > data->comp[0].ohm || n < data->comp[data->n_comp - 1].ohm)
+ return -ENODATA;
+
return n;
}
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index d6762259dd69..fbe82d9852e0 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
return 0;
ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
- if (ret == -ENODATA) /* Use default or previous value */
- return 0;
if (ret)
return ret;
@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 419469f40ba0..c9b3c3149982 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -133,6 +133,15 @@ config SENSORS_DPS920AB
This driver can also be built as a module. If so, the module will
be called dps920ab.
+config SENSORS_INA233
+ tristate "Texas Instruments INA233 and compatibles"
+ help
+ If you say yes here you get hardware monitoring support for Texas
+ Instruments INA233.
+
+ This driver can also be built as a module. If so, the module will
+ be called ina233.
+
config SENSORS_INSPUR_IPSPS
tristate "INSPUR Power System Power Supply"
help
@@ -233,9 +242,9 @@ config SENSORS_LTC2978_REGULATOR
depends on SENSORS_LTC2978 && REGULATOR
help
If you say yes here you get regulator support for Linear Technology
- LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889, LTC7841,
- LTC7880, LTM4644, LTM4675, LTM4676, LTM4677, LTM4678, LTM4680,
- LTM4686, and LTM4700.
+ LT7170, LT7171, LTC3880, LTC3883, LTC3884, LTC3886, LTC3887, LTC3889,
+ LTC7841, LTC7880, LTM4644, LTM4673, LTM4675, LTM4676, LTM4677,
+ LTM4678, LTM4680, LTM4686, and LTM4700.
config SENSORS_LTC3815
tristate "Linear Technologies LTC3815"
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index c7eb7739b7f8..56f128c4653e 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SENSORS_DELTA_AHE50DC_FAN) += delta-ahe50dc-fan.o
obj-$(CONFIG_SENSORS_FSP_3Y) += fsp-3y.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_DPS920AB) += dps920ab.o
+obj-$(CONFIG_SENSORS_INA233) += ina233.o
obj-$(CONFIG_SENSORS_INSPUR_IPSPS) += inspur-ipsps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
obj-$(CONFIG_SENSORS_IR36021) += ir36021.o
diff --git a/drivers/hwmon/pmbus/ina233.c b/drivers/hwmon/pmbus/ina233.c
new file mode 100644
index 000000000000..dde1e1678394
--- /dev/null
+++ b/drivers/hwmon/pmbus/ina233.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for ina233
+ *
+ * Copyright (c) 2025 Leo Yang
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+#define MFR_READ_VSHUNT 0xd1
+#define MFR_CALIBRATION 0xd4
+
+#define INA233_MAX_CURRENT_DEFAULT 32768000 /* uA */
+#define INA233_RSHUNT_DEFAULT 2000 /* uOhm */
+
+#define MAX_M_VAL 32767
+
+static void calculate_coef(int *m, int *R, u32 current_lsb, int power_coef)
+{
+ u64 scaled_m;
+ int scale_factor = 0;
+ int scale_coef = 1;
+
+ /*
+ * 1000000 from Current_LSB A->uA .
+ * scale_coef is for scaling up to minimize rounding errors,
+ * If there is no decimal information, no need to scale.
+ */
+ if (1000000 % current_lsb) {
+ /* Scaling to keep integer precision */
+ scale_factor = -3;
+ scale_coef = 1000;
+ }
+
+ /*
+ * Unit Conversion (Current_LSB A->uA) and use scaling(scale_factor)
+ * to keep integer precision.
+ * Formulae referenced from spec.
+ */
+ scaled_m = div64_u64(1000000 * scale_coef, (u64)current_lsb * power_coef);
+
+ /* Maximize while keeping it bounded.*/
+ while (scaled_m > MAX_M_VAL) {
+ scaled_m = div_u64(scaled_m, 10);
+ scale_factor++;
+ }
+ /* Scale up only if fractional part exists. */
+ while (scaled_m * 10 < MAX_M_VAL && scale_coef != 1) {
+ scaled_m *= 10;
+ scale_factor--;
+ }
+
+ *m = scaled_m;
+ *R = scale_factor;
+}
+
+static int ina233_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIRT_READ_VMON:
+ ret = pmbus_read_word_data(client, 0, 0xff, MFR_READ_VSHUNT);
+
+ /* Adjust returned value to match VIN coefficients */
+ /* VIN: 1.25 mV VSHUNT: 2.5 uV LSB */
+ ret = DIV_ROUND_CLOSEST(ret * 25, 12500);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static int ina233_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ int ret, m, R;
+ u32 rshunt;
+ u32 max_current;
+ u32 current_lsb;
+ u16 calibration;
+ struct pmbus_driver_info *info;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = 1;
+ info->format[PSC_VOLTAGE_IN] = direct;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->format[PSC_CURRENT_OUT] = direct;
+ info->format[PSC_POWER] = direct;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
+ info->m[PSC_VOLTAGE_IN] = 8;
+ info->R[PSC_VOLTAGE_IN] = 2;
+ info->m[PSC_VOLTAGE_OUT] = 8;
+ info->R[PSC_VOLTAGE_OUT] = 2;
+ info->read_word_data = ina233_read_word_data;
+
+ /* If INA233 skips current/power, shunt-resistor and current-lsb aren't needed. */
+ /* read rshunt value (uOhm) */
+ ret = device_property_read_u32(dev, "shunt-resistor", &rshunt);
+ if (ret) {
+ if (ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Shunt resistor property read fail.\n");
+ rshunt = INA233_RSHUNT_DEFAULT;
+ }
+ if (!rshunt)
+ return dev_err_probe(dev, -EINVAL,
+ "Shunt resistor cannot be zero.\n");
+
+ /* read Maximum expected current value (uA) */
+ ret = device_property_read_u32(dev, "ti,maximum-expected-current-microamp", &max_current);
+ if (ret) {
+ if (ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "Maximum expected current property read fail.\n");
+ max_current = INA233_MAX_CURRENT_DEFAULT;
+ }
+ if (max_current < 32768)
+ return dev_err_probe(dev, -EINVAL,
+ "Maximum expected current cannot less then 32768.\n");
+
+ /* Calculate Current_LSB according to the spec formula */
+ current_lsb = max_current / 32768;
+
+ /* calculate current coefficient */
+ calculate_coef(&m, &R, current_lsb, 1);
+ info->m[PSC_CURRENT_OUT] = m;
+ info->R[PSC_CURRENT_OUT] = R;
+
+ /* calculate power coefficient */
+ calculate_coef(&m, &R, current_lsb, 25);
+ info->m[PSC_POWER] = m;
+ info->R[PSC_POWER] = R;
+
+ /* write MFR_CALIBRATION register, Apply formula from spec with unit scaling. */
+ calibration = div64_u64(5120000000ULL, (u64)rshunt * current_lsb);
+ if (calibration > 0x7FFF)
+ return dev_err_probe(dev, -EINVAL,
+ "Product of Current_LSB %u and shunt resistor %u too small, MFR_CALIBRATION reg exceeds 0x7FFF.\n",
+ current_lsb, rshunt);
+ ret = i2c_smbus_write_word_data(client, MFR_CALIBRATION, calibration);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to write calibration.\n");
+
+ dev_dbg(dev, "power monitor %s (Rshunt = %u uOhm, Current_LSB = %u uA/bit)\n",
+ client->name, rshunt, current_lsb);
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id ina233_id[] = {
+ {"ina233", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ina233_id);
+
+static const struct of_device_id __maybe_unused ina233_of_match[] = {
+ { .compatible = "ti,ina233" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ina233_of_match);
+
+static struct i2c_driver ina233_driver = {
+ .driver = {
+ .name = "ina233",
+ .of_match_table = of_match_ptr(ina233_of_match),
+ },
+ .probe = ina233_probe,
+ .id_table = ina233_id,
+};
+
+module_i2c_driver(ina233_driver);
+
+MODULE_AUTHOR("Leo Yang <leo.yang.sy0@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for INA233 and compatible chips");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 4c306943383a..8f5be520a15d 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -23,11 +23,11 @@ enum chips {
/* Managers */
ltc2972, ltc2974, ltc2975, ltc2977, ltc2978, ltc2979, ltc2980,
/* Controllers */
- ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887, ltc3889, ltc7132,
- ltc7841, ltc7880,
+ lt7170, lt7171, ltc3880, ltc3882, ltc3883, ltc3884, ltc3886, ltc3887,
+ ltc3889, ltc7132, ltc7841, ltc7880,
/* Modules */
- ltm2987, ltm4664, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680, ltm4686,
- ltm4700,
+ ltm2987, ltm4664, ltm4673, ltm4675, ltm4676, ltm4677, ltm4678, ltm4680,
+ ltm4686, ltm4700,
};
/* Common for all chips */
@@ -62,6 +62,7 @@ enum chips {
#define LTC2978_ID_MASK 0xfff0
+#define LT7170_ID 0x1C10
#define LTC2972_ID 0x0310
#define LTC2974_ID 0x0210
#define LTC2975_ID 0x0220
@@ -86,6 +87,8 @@ enum chips {
#define LTM2987_ID_A 0x8010 /* A/B for two die IDs */
#define LTM2987_ID_B 0x8020
#define LTM4664_ID 0x4120
+#define LTM4673_ID_REV1 0x0230
+#define LTM4673_ID 0x4480
#define LTM4675_ID 0x47a0
#define LTM4676_ID_REV1 0x4400
#define LTM4676_ID_REV2 0x4480
@@ -535,6 +538,8 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
}
static const struct i2c_device_id ltc2978_id[] = {
+ {"lt7170", lt7170},
+ {"lt7171", lt7171},
{"ltc2972", ltc2972},
{"ltc2974", ltc2974},
{"ltc2975", ltc2975},
@@ -554,6 +559,7 @@ static const struct i2c_device_id ltc2978_id[] = {
{"ltc7880", ltc7880},
{"ltm2987", ltm2987},
{"ltm4664", ltm4664},
+ {"ltm4673", ltm4673},
{"ltm4675", ltm4675},
{"ltm4676", ltm4676},
{"ltm4677", ltm4677},
@@ -612,7 +618,7 @@ static int ltc2978_get_id(struct i2c_client *client)
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
if (ret < 0)
return ret;
- if (ret < 3 || strncmp(buf, "LTC", 3))
+ if (ret < 3 || (strncmp(buf, "LTC", 3) && strncmp(buf, "ADI", 3)))
return -ENODEV;
ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
@@ -627,6 +633,25 @@ static int ltc2978_get_id(struct i2c_client *client)
chip_id &= LTC2978_ID_MASK;
+ if (chip_id == LT7170_ID) {
+ u8 buf[I2C_SMBUS_BLOCK_MAX];
+ int ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PMBUS_IC_DEVICE_ID,
+ sizeof(buf), buf);
+ if (ret < 0)
+ return ret;
+
+ if (!strncmp(buf + 1, "LT7170", 6) ||
+ !strncmp(buf + 1, "LT7170-1", 8))
+ return lt7170;
+ if (!strncmp(buf + 1, "LT7171", 6) ||
+ !strncmp(buf + 1, "LT7171-1", 8))
+ return lt7171;
+
+ return -ENODEV;
+ }
+
if (chip_id == LTC2972_ID)
return ltc2972;
else if (chip_id == LTC2974_ID)
@@ -665,6 +690,8 @@ static int ltc2978_get_id(struct i2c_client *client)
return ltm2987;
else if (chip_id == LTM4664_ID)
return ltm4664;
+ else if (chip_id == LTM4673_ID || chip_id == LTM4673_ID_REV1)
+ return ltm4673;
else if (chip_id == LTM4675_ID)
return ltm4675;
else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
@@ -736,6 +763,20 @@ static int ltc2978_probe(struct i2c_client *client)
data->temp2_max = 0x7c00;
switch (data->id) {
+ case lt7170:
+ case lt7171:
+ data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc3883_read_word_data;
+ info->pages = LTC3883_NUM_PAGES;
+ info->format[PSC_VOLTAGE_IN] = ieee754;
+ info->format[PSC_VOLTAGE_OUT] = ieee754;
+ info->format[PSC_CURRENT_OUT] = ieee754;
+ info->format[PSC_TEMPERATURE] = ieee754;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ break;
case ltc2972:
info->read_word_data = ltc2975_read_word_data;
info->pages = LTC2972_NUM_PAGES;
@@ -869,6 +910,21 @@ static int ltc2978_probe(struct i2c_client *client)
| PMBUS_HAVE_IOUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
break;
+ case ltm4673:
+ data->features |= FEAT_NEEDS_POLLING;
+ info->read_word_data = ltc2975_read_word_data;
+ info->pages = LTC2974_NUM_PAGES;
+ info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP2;
+ for (i = 0; i < info->pages; i++) {
+ info->func[i] |= PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ }
+ break;
default:
return -ENODEV;
}
@@ -907,6 +963,8 @@ static int ltc2978_probe(struct i2c_client *client)
#ifdef CONFIG_OF
static const struct of_device_id ltc2978_of_match[] = {
+ { .compatible = "lltc,lt7170" },
+ { .compatible = "lltc,lt7171" },
{ .compatible = "lltc,ltc2972" },
{ .compatible = "lltc,ltc2974" },
{ .compatible = "lltc,ltc2975" },
@@ -926,6 +984,7 @@ static const struct of_device_id ltc2978_of_match[] = {
{ .compatible = "lltc,ltc7880" },
{ .compatible = "lltc,ltm2987" },
{ .compatible = "lltc,ltm4664" },
+ { .compatible = "lltc,ltm4673" },
{ .compatible = "lltc,ltm4675" },
{ .compatible = "lltc,ltm4676" },
{ .compatible = "lltc,ltm4677" },
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 77cf268e7d2d..920cd5408141 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
int page;
+ info->pages = PMBUS_PAGES;
+
for (page = 1; page < PMBUS_PAGES; page++) {
if (pmbus_set_page(client, page, 0xff) < 0)
break;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 787683e83db6..cfeba2e4c5c3 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dcache.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -44,8 +45,7 @@ struct pmbus_sensor {
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
bool convert; /* Whether or not to apply linear/vid/direct */
- int data; /* Sensor data.
- Negative if there was a read error */
+ int data; /* Sensor data; negative if there was a read error */
};
#define to_pmbus_sensor(_attr) \
container_of(_attr, struct pmbus_sensor, attribute)
@@ -100,7 +100,6 @@ struct pmbus_data {
int num_attributes;
struct attribute_group group;
const struct attribute_group **groups;
- struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -192,11 +191,10 @@ static void pmbus_update_ts(struct i2c_client *client, bool write_op)
struct pmbus_data *data = i2c_get_clientdata(client);
const struct pmbus_driver_info *info = data->info;
- if (info->access_delay) {
+ if (info->access_delay)
data->access_time = ktime_get();
- } else if (info->write_delay && write_op) {
+ else if (info->write_delay && write_op)
data->write_time = ktime_get();
- }
}
int pmbus_set_page(struct i2c_client *client, int page, int phase)
@@ -292,7 +290,6 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_NS_GPL(pmbus_write_word_data, "PMBUS");
-
static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
u16 word)
{
@@ -381,14 +378,14 @@ int pmbus_update_fan(struct i2c_client *client, int page, int id,
u8 to;
from = _pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ pmbus_fan_config_registers[id]);
if (from < 0)
return from;
to = (from & ~mask) | (config & mask);
if (to != from) {
rv = _pmbus_write_byte_data(client, page,
- pmbus_fan_config_registers[id], to);
+ pmbus_fan_config_registers[id], to);
if (rv < 0)
return rv;
}
@@ -563,7 +560,7 @@ static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
}
config = _pmbus_read_byte_data(client, page,
- pmbus_fan_config_registers[id]);
+ pmbus_fan_config_registers[id]);
if (config < 0)
return config;
@@ -788,7 +785,7 @@ static s64 pmbus_reg2data_linear(struct pmbus_data *data,
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
exponent = data->exponent[sensor->page];
- mantissa = (u16) sensor->data;
+ mantissa = (u16)sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
mantissa = ((s16)((sensor->data & 0x7ff) << 5)) >> 5;
@@ -1173,7 +1170,6 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
} else {
pmbus_clear_fault_page(client, page);
}
-
}
if (s1 && s2) {
s64 v1, v2;
@@ -1470,8 +1466,7 @@ static int pmbus_add_label(struct pmbus_data *data,
snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
if (!index) {
if (phase == 0xff)
- strncpy(label->label, lstring,
- sizeof(label->label) - 1);
+ strscpy(label->label, lstring);
else
snprintf(label->label, sizeof(label->label), "%s.%d",
lstring, phase);
@@ -1500,8 +1495,7 @@ struct pmbus_limit_attr {
u16 reg; /* Limit register */
u16 sbit; /* Alarm attribute status bit */
bool update; /* True if register needs updates */
- bool low; /* True if low limit; for limits with compare
- functions only */
+ bool low; /* True if low limit; for limits with compare functions only */
const char *attr; /* Attribute name */
const char *alarm; /* Alarm attribute name */
};
@@ -2212,8 +2206,8 @@ static const u32 pmbus_fan_status_flags[] = {
/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
static int pmbus_add_fan_ctrl(struct i2c_client *client,
- struct pmbus_data *data, int index, int page, int id,
- u8 config)
+ struct pmbus_data *data, int index, int page,
+ int id, u8 config)
{
struct pmbus_sensor *sensor;
@@ -2225,7 +2219,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
return -ENOMEM;
if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
- (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
@@ -2935,7 +2929,7 @@ static void pmbus_notify(struct pmbus_data *data, int page, int reg, int flags)
}
static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flags,
- unsigned int *event, bool notify)
+ unsigned int *event, bool notify)
{
int i, status;
const struct pmbus_status_category *cat;
@@ -2964,7 +2958,6 @@ static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flag
if (notify && status)
pmbus_notify(data, page, cat->reg, status);
-
}
/*
@@ -3015,7 +3008,6 @@ static int _pmbus_get_flags(struct pmbus_data *data, u8 page, unsigned int *flag
*event |= REGULATOR_EVENT_OVER_TEMP_WARN;
}
-
return 0;
}
@@ -3228,7 +3220,7 @@ static int pmbus_regulator_set_voltage(struct regulator_dev *rdev, int min_uv,
}
static int pmbus_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
+ unsigned int selector)
{
struct device *dev = rdev_get_dev(rdev);
struct i2c_client *client = to_i2c_client(dev->parent);
@@ -3320,17 +3312,16 @@ static int pmbus_regulator_register(struct pmbus_data *data)
return 0;
}
-static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
+static void pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
{
- int j;
+ int j;
- for (j = 0; j < data->info->num_regulators; j++) {
- if (page == rdev_get_id(data->rdevs[j])) {
- regulator_notifier_call_chain(data->rdevs[j], event, NULL);
- break;
- }
+ for (j = 0; j < data->info->num_regulators; j++) {
+ if (page == rdev_get_id(data->rdevs[j])) {
+ regulator_notifier_call_chain(data->rdevs[j], event, NULL);
+ break;
}
- return 0;
+ }
}
#else
static int pmbus_regulator_register(struct pmbus_data *data)
@@ -3338,9 +3329,8 @@ static int pmbus_regulator_register(struct pmbus_data *data)
return 0;
}
-static int pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
+static void pmbus_regulator_notify(struct pmbus_data *data, int page, int event)
{
- return 0;
}
#endif
@@ -3363,8 +3353,8 @@ static irqreturn_t pmbus_fault_handler(int irq, void *pdata)
{
struct pmbus_data *data = pdata;
struct i2c_client *client = to_i2c_client(data->dev);
-
int i, status, event;
+
mutex_lock(&data->update_lock);
for (i = 0; i < data->info->pages; i++) {
_pmbus_get_flags(data, i, &status, &event, true);
@@ -3428,7 +3418,6 @@ static int pmbus_irq_setup(struct i2c_client *client, struct pmbus_data *data)
static struct dentry *pmbus_debugfs_dir; /* pmbus debugfs directory */
-#if IS_ENABLED(CONFIG_DEBUG_FS)
static int pmbus_debugfs_get(void *data, u64 *val)
{
int rc;
@@ -3471,8 +3460,8 @@ static int pmbus_debugfs_get_status(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(pmbus_debugfs_ops_status, pmbus_debugfs_get_status,
NULL, "0x%04llx\n");
-static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t pmbus_debugfs_block_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
struct pmbus_debugfs_entry *entry = file->private_data;
@@ -3497,51 +3486,98 @@ static ssize_t pmbus_debugfs_mfr_read(struct file *file, char __user *buf,
return simple_read_from_buffer(buf, count, ppos, data, rc);
}
-static const struct file_operations pmbus_debugfs_ops_mfr = {
+static const struct file_operations pmbus_debugfs_block_ops = {
.llseek = noop_llseek,
- .read = pmbus_debugfs_mfr_read,
+ .read = pmbus_debugfs_block_read,
.write = NULL,
.open = simple_open,
};
-static void pmbus_remove_debugfs(void *data)
+static void pmbus_remove_symlink(void *symlink)
{
- struct dentry *entry = data;
-
- debugfs_remove_recursive(entry);
+ debugfs_remove(symlink);
}
-static int pmbus_init_debugfs(struct i2c_client *client,
- struct pmbus_data *data)
+struct pmbus_debugfs_data {
+ u8 reg;
+ u32 flag;
+ const char *name;
+};
+
+static const struct pmbus_debugfs_data pmbus_debugfs_block_data[] = {
+ { .reg = PMBUS_MFR_ID, .name = "mfr_id" },
+ { .reg = PMBUS_MFR_MODEL, .name = "mfr_model" },
+ { .reg = PMBUS_MFR_REVISION, .name = "mfr_revision" },
+ { .reg = PMBUS_MFR_LOCATION, .name = "mfr_location" },
+ { .reg = PMBUS_MFR_DATE, .name = "mfr_date" },
+ { .reg = PMBUS_MFR_SERIAL, .name = "mfr_serial" },
+};
+
+static const struct pmbus_debugfs_data pmbus_debugfs_status_data[] = {
+ { .reg = PMBUS_STATUS_VOUT, .flag = PMBUS_HAVE_STATUS_VOUT, .name = "status%d_vout" },
+ { .reg = PMBUS_STATUS_IOUT, .flag = PMBUS_HAVE_STATUS_IOUT, .name = "status%d_iout" },
+ { .reg = PMBUS_STATUS_INPUT, .flag = PMBUS_HAVE_STATUS_INPUT, .name = "status%d_input" },
+ { .reg = PMBUS_STATUS_TEMPERATURE, .flag = PMBUS_HAVE_STATUS_TEMP,
+ .name = "status%d_temp" },
+ { .reg = PMBUS_STATUS_FAN_12, .flag = PMBUS_HAVE_STATUS_FAN12, .name = "status%d_fan12" },
+ { .reg = PMBUS_STATUS_FAN_34, .flag = PMBUS_HAVE_STATUS_FAN34, .name = "status%d_fan34" },
+ { .reg = PMBUS_STATUS_CML, .name = "status%d_cml" },
+ { .reg = PMBUS_STATUS_OTHER, .name = "status%d_other" },
+ { .reg = PMBUS_STATUS_MFR_SPECIFIC, .name = "status%d_mfr" },
+};
+
+static void pmbus_init_debugfs(struct i2c_client *client,
+ struct pmbus_data *data)
{
- int i, idx = 0;
- char name[PMBUS_NAME_SIZE];
+ struct dentry *symlink_d, *debugfs = client->debugfs;
struct pmbus_debugfs_entry *entries;
+ const char *pathname, *symlink;
+ char name[PMBUS_NAME_SIZE];
+ int page, i, idx = 0;
- if (!pmbus_debugfs_dir)
- return -ENODEV;
+ /*
+ * client->debugfs may be NULL or an ERR_PTR(). dentry_path_raw()
+ * does not check if its parameters are valid, so validate
+ * client->debugfs before using it.
+ */
+ if (!pmbus_debugfs_dir || IS_ERR_OR_NULL(debugfs))
+ return;
/*
- * Create the debugfs directory for this device. Use the hwmon device
- * name to avoid conflicts (hwmon numbers are globally unique).
+ * Backwards compatibility: Create symlink from /pmbus/<hwmon_device>
+ * to i2c debugfs directory.
*/
- data->debugfs = debugfs_create_dir(dev_name(data->hwmon_dev),
- pmbus_debugfs_dir);
- if (IS_ERR_OR_NULL(data->debugfs)) {
- data->debugfs = NULL;
- return -ENODEV;
- }
+ pathname = dentry_path_raw(debugfs, name, sizeof(name));
+ if (IS_ERR(pathname))
+ return;
+
+ /*
+ * The path returned by dentry_path_raw() starts with '/'. Prepend it
+ * with ".." to get the symlink relative to the pmbus root directory.
+ */
+ symlink = kasprintf(GFP_KERNEL, "..%s", pathname);
+ if (!symlink)
+ return;
+
+ symlink_d = debugfs_create_symlink(dev_name(data->hwmon_dev),
+ pmbus_debugfs_dir, symlink);
+ kfree(symlink);
+
+ devm_add_action_or_reset(data->dev, pmbus_remove_symlink, symlink_d);
/*
* Allocate the max possible entries we need.
- * 7 entries device-specific
- * 10 entries page-specific
+ * device specific:
+ * ARRAY_SIZE(pmbus_debugfs_block_data) + 2
+ * page specific:
+ * ARRAY_SIZE(pmbus_debugfs_status_data) + 1
*/
entries = devm_kcalloc(data->dev,
- 7 + data->info->pages * 10, sizeof(*entries),
- GFP_KERNEL);
+ ARRAY_SIZE(pmbus_debugfs_block_data) + 2 +
+ data->info->pages * (ARRAY_SIZE(pmbus_debugfs_status_data) + 1),
+ sizeof(*entries), GFP_KERNEL);
if (!entries)
- return -ENOMEM;
+ return;
/*
* Add device-specific entries.
@@ -3551,184 +3587,67 @@ static int pmbus_init_debugfs(struct i2c_client *client,
* assume that values of the following registers are the same for all
* pages and report values only for page 0.
*/
- if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
+ if (!(data->flags & PMBUS_NO_CAPABILITY) &&
+ pmbus_check_byte_register(client, 0, PMBUS_CAPABILITY)) {
entries[idx].client = client;
entries[idx].page = 0;
- entries[idx].reg = PMBUS_REVISION;
- debugfs_create_file("revision", 0444, data->debugfs,
+ entries[idx].reg = PMBUS_CAPABILITY;
+ debugfs_create_file("capability", 0444, debugfs,
&entries[idx++],
&pmbus_debugfs_ops);
}
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_ID)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_ID;
- debugfs_create_file("mfr_id", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_MODEL)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_MODEL;
- debugfs_create_file("mfr_model", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_REVISION)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_REVISION;
- debugfs_create_file("mfr_revision", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
-
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_LOCATION)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_REVISION)) {
entries[idx].client = client;
entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_LOCATION;
- debugfs_create_file("mfr_location", 0444, data->debugfs,
+ entries[idx].reg = PMBUS_REVISION;
+ debugfs_create_file("pmbus_revision", 0444, debugfs,
&entries[idx++],
- &pmbus_debugfs_ops_mfr);
+ &pmbus_debugfs_ops);
}
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_DATE)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_DATE;
- debugfs_create_file("mfr_date", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
- }
+ for (i = 0; i < ARRAY_SIZE(pmbus_debugfs_block_data); i++) {
+ const struct pmbus_debugfs_data *d = &pmbus_debugfs_block_data[i];
- if (pmbus_check_block_register(client, 0, PMBUS_MFR_SERIAL)) {
- entries[idx].client = client;
- entries[idx].page = 0;
- entries[idx].reg = PMBUS_MFR_SERIAL;
- debugfs_create_file("mfr_serial", 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops_mfr);
+ if (pmbus_check_block_register(client, 0, d->reg)) {
+ entries[idx].client = client;
+ entries[idx].page = 0;
+ entries[idx].reg = d->reg;
+ debugfs_create_file(d->name, 0444, debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_block_ops);
+ }
}
/* Add page specific entries */
- for (i = 0; i < data->info->pages; ++i) {
+ for (page = 0; page < data->info->pages; ++page) {
/* Check accessibility of status register if it's not page 0 */
- if (!i || pmbus_check_status_register(client, i)) {
+ if (!page || pmbus_check_status_register(client, page)) {
/* No need to set reg as we have special read op. */
entries[idx].client = client;
- entries[idx].page = i;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d", i);
- debugfs_create_file(name, 0444, data->debugfs,
+ entries[idx].page = page;
+ scnprintf(name, PMBUS_NAME_SIZE, "status%d", page);
+ debugfs_create_file(name, 0444, debugfs,
&entries[idx++],
&pmbus_debugfs_ops_status);
}
- if (data->info->func[i] & PMBUS_HAVE_STATUS_VOUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_VOUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_vout", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_IOUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_IOUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_iout", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_INPUT) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_INPUT;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_input", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_TEMP) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_TEMPERATURE;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_temp", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i, PMBUS_STATUS_CML)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_CML;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_cml", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i, PMBUS_STATUS_OTHER)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_OTHER;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_other", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (pmbus_check_byte_register(client, i,
- PMBUS_STATUS_MFR_SPECIFIC)) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_MFR_SPECIFIC;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_mfr", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN12) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_FAN_12;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan12", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
- }
-
- if (data->info->func[i] & PMBUS_HAVE_STATUS_FAN34) {
- entries[idx].client = client;
- entries[idx].page = i;
- entries[idx].reg = PMBUS_STATUS_FAN_34;
- scnprintf(name, PMBUS_NAME_SIZE, "status%d_fan34", i);
- debugfs_create_file(name, 0444, data->debugfs,
- &entries[idx++],
- &pmbus_debugfs_ops);
+ for (i = 0; i < ARRAY_SIZE(pmbus_debugfs_status_data); i++) {
+ const struct pmbus_debugfs_data *d =
+ &pmbus_debugfs_status_data[i];
+
+ if ((data->info->func[page] & d->flag) ||
+ (!d->flag && pmbus_check_byte_register(client, page, d->reg))) {
+ entries[idx].client = client;
+ entries[idx].page = page;
+ entries[idx].reg = d->reg;
+ scnprintf(name, PMBUS_NAME_SIZE, d->name, page);
+ debugfs_create_file(name, 0444, debugfs,
+ &entries[idx++],
+ &pmbus_debugfs_ops);
+ }
}
}
-
- return devm_add_action_or_reset(data->dev,
- pmbus_remove_debugfs, data->debugfs);
-}
-#else
-static int pmbus_init_debugfs(struct i2c_client *client,
- struct pmbus_data *data)
-{
- return 0;
}
-#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
{
@@ -3800,8 +3719,8 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->groups[0] = &data->group;
memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
- data->hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- name, data, data->groups);
+ data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, name,
+ data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(data->hwmon_dev);
@@ -3815,9 +3734,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
if (ret)
return ret;
- ret = pmbus_init_debugfs(client, data);
- if (ret)
- dev_warn(dev, "Failed to register debugfs\n");
+ pmbus_init_debugfs(client, data);
return 0;
}
@@ -3825,9 +3742,15 @@ EXPORT_SYMBOL_NS_GPL(pmbus_do_probe, "PMBUS");
struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
{
- struct pmbus_data *data = i2c_get_clientdata(client);
-
- return data->debugfs;
+ /*
+ * client->debugfs may be an ERR_PTR(). Returning that to
+ * the calling code would potentially require additional
+ * complexity in the calling code and otherwise add no
+ * value. Return NULL in that case.
+ */
+ if (IS_ERR_OR_NULL(client->debugfs))
+ return NULL;
+ return client->debugfs;
}
EXPORT_SYMBOL_NS_GPL(pmbus_get_debugfs_dir, "PMBUS");
diff --git a/drivers/hwmon/pt5161l.c b/drivers/hwmon/pt5161l.c
index a9f0b23f9e76..20e3cfa625f1 100644
--- a/drivers/hwmon/pt5161l.c
+++ b/drivers/hwmon/pt5161l.c
@@ -63,7 +63,6 @@ struct pt5161l_fw_ver {
/* Each client has this additional data */
struct pt5161l_data {
struct i2c_client *client;
- struct dentry *debugfs;
struct pt5161l_fw_ver fw_ver;
struct mutex lock; /* for atomic I2C transactions */
bool init_done;
@@ -72,8 +71,6 @@ struct pt5161l_data {
bool mm_wide_reg_access; /* MM assisted wide register access */
};
-static struct dentry *pt5161l_debugfs_dir;
-
/*
* Write multiple data bytes to Aries over I2C
*/
@@ -568,21 +565,16 @@ static const struct file_operations pt5161l_debugfs_ops_hb_sts = {
.open = simple_open,
};
-static int pt5161l_init_debugfs(struct pt5161l_data *data)
+static void pt5161l_init_debugfs(struct i2c_client *client, struct pt5161l_data *data)
{
- data->debugfs = debugfs_create_dir(dev_name(&data->client->dev),
- pt5161l_debugfs_dir);
-
- debugfs_create_file("fw_ver", 0444, data->debugfs, data,
+ debugfs_create_file("fw_ver", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_fw_ver);
- debugfs_create_file("fw_load_status", 0444, data->debugfs, data,
+ debugfs_create_file("fw_load_status", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_fw_load_sts);
- debugfs_create_file("heartbeat_status", 0444, data->debugfs, data,
+ debugfs_create_file("heartbeat_status", 0444, client->debugfs, data,
&pt5161l_debugfs_ops_hb_sts);
-
- return 0;
}
static int pt5161l_probe(struct i2c_client *client)
@@ -604,17 +596,12 @@ static int pt5161l_probe(struct i2c_client *client)
data,
&pt5161l_chip_info,
NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
- pt5161l_init_debugfs(data);
-
- return PTR_ERR_OR_ZERO(hwmon_dev);
-}
-
-static void pt5161l_remove(struct i2c_client *client)
-{
- struct pt5161l_data *data = i2c_get_clientdata(client);
+ pt5161l_init_debugfs(client, data);
- debugfs_remove_recursive(data->debugfs);
+ return 0;
}
static const struct of_device_id __maybe_unused pt5161l_of_match[] = {
@@ -643,24 +630,9 @@ static struct i2c_driver pt5161l_driver = {
.acpi_match_table = ACPI_PTR(pt5161l_acpi_match),
},
.probe = pt5161l_probe,
- .remove = pt5161l_remove,
.id_table = pt5161l_id,
};
-
-static int __init pt5161l_init(void)
-{
- pt5161l_debugfs_dir = debugfs_create_dir("pt5161l", NULL);
- return i2c_add_driver(&pt5161l_driver);
-}
-
-static void __exit pt5161l_exit(void)
-{
- i2c_del_driver(&pt5161l_driver);
- debugfs_remove_recursive(pt5161l_debugfs_dir);
-}
-
-module_init(pt5161l_init);
-module_exit(pt5161l_exit);
+module_i2c_driver(pt5161l_driver);
MODULE_AUTHOR("Cosmo Chou <cosmo.chou@quantatw.com>");
MODULE_DESCRIPTION("Hwmon driver for Astera Labs Aries PCIe retimer");
diff --git a/drivers/hwmon/sg2042-mcu.c b/drivers/hwmon/sg2042-mcu.c
index aa3fb773602c..105131c4acf7 100644
--- a/drivers/hwmon/sg2042-mcu.c
+++ b/drivers/hwmon/sg2042-mcu.c
@@ -50,12 +50,9 @@
struct sg2042_mcu_data {
struct i2c_client *client;
- struct dentry *debugfs;
struct mutex mutex;
};
-static struct dentry *sgmcu_debugfs;
-
static ssize_t reset_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -292,18 +289,15 @@ static const struct hwmon_chip_info sg2042_mcu_chip_info = {
.info = sg2042_mcu_info,
};
-static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu,
- struct device *dev)
+static void sg2042_mcu_debugfs_init(struct sg2042_mcu_data *mcu)
{
- mcu->debugfs = debugfs_create_dir(dev_name(dev), sgmcu_debugfs);
-
- debugfs_create_file("firmware_version", 0444, mcu->debugfs,
+ debugfs_create_file("firmware_version", 0444, mcu->client->debugfs,
mcu, &firmware_version_fops);
- debugfs_create_file("pcb_version", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("pcb_version", 0444, mcu->client->debugfs, mcu,
&pcb_version_fops);
- debugfs_create_file("mcu_type", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("mcu_type", 0444, mcu->client->debugfs, mcu,
&mcu_type_fops);
- debugfs_create_file("board_type", 0444, mcu->debugfs, mcu,
+ debugfs_create_file("board_type", 0444, mcu->client->debugfs, mcu,
&board_type_fops);
}
@@ -333,18 +327,11 @@ static int sg2042_mcu_i2c_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- sg2042_mcu_debugfs_init(mcu, dev);
+ sg2042_mcu_debugfs_init(mcu);
return 0;
}
-static void sg2042_mcu_i2c_remove(struct i2c_client *client)
-{
- struct sg2042_mcu_data *mcu = i2c_get_clientdata(client);
-
- debugfs_remove_recursive(mcu->debugfs);
-}
-
static const struct i2c_device_id sg2042_mcu_id[] = {
{ "sg2042-hwmon-mcu" },
{ }
@@ -364,24 +351,9 @@ static struct i2c_driver sg2042_mcu_driver = {
.dev_groups = sg2042_mcu_groups,
},
.probe = sg2042_mcu_i2c_probe,
- .remove = sg2042_mcu_i2c_remove,
.id_table = sg2042_mcu_id,
};
-
-static int __init sg2042_mcu_init(void)
-{
- sgmcu_debugfs = debugfs_create_dir("sg2042-mcu", NULL);
- return i2c_add_driver(&sg2042_mcu_driver);
-}
-
-static void __exit sg2042_mcu_exit(void)
-{
- debugfs_remove_recursive(sgmcu_debugfs);
- i2c_del_driver(&sg2042_mcu_driver);
-}
-
-module_init(sg2042_mcu_init);
-module_exit(sg2042_mcu_exit);
+module_i2c_driver(sg2042_mcu_driver);
MODULE_AUTHOR("Inochi Amaoto <inochiama@outlook.com>");
MODULE_DESCRIPTION("MCU I2C driver for SG2042 soc platform");
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 650b0bcc2359..557ad3e7752a 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -44,8 +44,6 @@ static const unsigned char sht3x_cmd_read_status_reg[] = { 0xf3, 0x2d };
static const unsigned char sht3x_cmd_clear_status_reg[] = { 0x30, 0x41 };
static const unsigned char sht3x_cmd_read_serial_number[] = { 0x37, 0x80 };
-static struct dentry *debugfs;
-
/* delays for single-shot mode i2c commands, both in us */
#define SHT3X_SINGLE_WAIT_TIME_HPM 15000
#define SHT3X_SINGLE_WAIT_TIME_MPM 6000
@@ -167,7 +165,6 @@ struct sht3x_data {
enum sht3x_chips chip_id;
struct mutex i2c_lock; /* lock for sending i2c commands */
struct mutex data_lock; /* lock for updating driver data */
- struct dentry *sensor_dir;
u8 mode;
const unsigned char *command;
@@ -837,23 +834,7 @@ static int sht3x_write(struct device *dev, enum hwmon_sensor_types type,
}
}
-static void sht3x_debugfs_init(struct sht3x_data *data)
-{
- char name[32];
-
- snprintf(name, sizeof(name), "i2c%u-%02x",
- data->client->adapter->nr, data->client->addr);
- data->sensor_dir = debugfs_create_dir(name, debugfs);
- debugfs_create_u32("serial_number", 0444,
- data->sensor_dir, &data->serial_number);
-}
-
-static void sht3x_debugfs_remove(void *sensor_dir)
-{
- debugfs_remove_recursive(sensor_dir);
-}
-
-static int sht3x_serial_number_read(struct sht3x_data *data)
+static void sht3x_serial_number_read(struct sht3x_data *data)
{
int ret;
char buffer[SHT3X_RESPONSE_LENGTH];
@@ -864,11 +845,12 @@ static int sht3x_serial_number_read(struct sht3x_data *data)
buffer,
SHT3X_RESPONSE_LENGTH, 0);
if (ret)
- return ret;
+ return;
data->serial_number = (buffer[0] << 24) | (buffer[1] << 16) |
(buffer[3] << 8) | buffer[4];
- return ret;
+
+ debugfs_create_u32("serial_number", 0444, client->debugfs, &data->serial_number);
}
static const struct hwmon_ops sht3x_ops = {
@@ -930,28 +912,14 @@ static int sht3x_probe(struct i2c_client *client)
if (ret)
return ret;
- ret = sht3x_serial_number_read(data);
- if (ret) {
- dev_dbg(dev, "unable to read serial number\n");
- } else {
- sht3x_debugfs_init(data);
- ret = devm_add_action_or_reset(dev,
- sht3x_debugfs_remove,
- data->sensor_dir);
- if (ret)
- return ret;
- }
-
- hwmon_dev = devm_hwmon_device_register_with_info(dev,
- client->name,
- data,
- &sht3x_chip_info,
- sht3x_groups);
-
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &sht3x_chip_info, sht3x_groups);
if (IS_ERR(hwmon_dev))
- dev_dbg(dev, "unable to register hwmon device\n");
+ return PTR_ERR(hwmon_dev);
+
+ sht3x_serial_number_read(data);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ return 0;
}
/* device ID table */
@@ -968,20 +936,7 @@ static struct i2c_driver sht3x_i2c_driver = {
.probe = sht3x_probe,
.id_table = sht3x_ids,
};
-
-static int __init sht3x_init(void)
-{
- debugfs = debugfs_create_dir("sht3x", NULL);
- return i2c_add_driver(&sht3x_i2c_driver);
-}
-module_init(sht3x_init);
-
-static void __exit sht3x_cleanup(void)
-{
- debugfs_remove_recursive(debugfs);
- i2c_del_driver(&sht3x_i2c_driver);
-}
-module_exit(sht3x_cleanup);
+module_i2c_driver(sht3x_i2c_driver);
MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 80fb03f30c30..4cb3960d5170 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -114,7 +114,6 @@ struct tps23861_data {
struct regmap *regmap;
u32 shunt_resistor;
struct i2c_client *client;
- struct dentry *debugfs_dir;
};
static const struct regmap_config tps23861_regmap_config = {
@@ -503,25 +502,6 @@ static int tps23861_port_status_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(tps23861_port_status);
-static void tps23861_init_debugfs(struct tps23861_data *data,
- struct device *hwmon_dev)
-{
- const char *debugfs_name;
-
- debugfs_name = devm_kasprintf(&data->client->dev, GFP_KERNEL, "%s-%s",
- data->client->name, dev_name(hwmon_dev));
- if (!debugfs_name)
- return;
-
- data->debugfs_dir = debugfs_create_dir(debugfs_name, NULL);
-
- debugfs_create_file("port_status",
- 0400,
- data->debugfs_dir,
- data,
- &tps23861_port_status_fops);
-}
-
static int tps23861_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -562,18 +542,12 @@ static int tps23861_probe(struct i2c_client *client)
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- tps23861_init_debugfs(data, hwmon_dev);
+ debugfs_create_file("port_status", 0400, client->debugfs, data,
+ &tps23861_port_status_fops);
return 0;
}
-static void tps23861_remove(struct i2c_client *client)
-{
- struct tps23861_data *data = i2c_get_clientdata(client);
-
- debugfs_remove_recursive(data->debugfs_dir);
-}
-
static const struct of_device_id __maybe_unused tps23861_of_match[] = {
{ .compatible = "ti,tps23861", },
{ },
@@ -582,7 +556,6 @@ MODULE_DEVICE_TABLE(of, tps23861_of_match);
static struct i2c_driver tps23861_driver = {
.probe = tps23861_probe,
- .remove = tps23861_remove,
.driver = {
.name = "tps23861",
.of_match_table = of_match_ptr(tps23861_of_match),
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 1e3bd129a922..2cdbd5f107a2 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -105,7 +105,7 @@ struct xgene_hwmon_dev {
phys_addr_t comm_base_addr;
void *pcc_comm_addr;
- u64 usecs_lat;
+ unsigned int usecs_lat;
};
/*
@@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out;
}
- if (!ctx->pcc_comm_addr) {
+ if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 2c1a60577728..3d98e3371fff 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1216,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata)
* recorded value for 'drvdata->ccitmin' to workaround
* this problem.
*/
- if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) {
+ if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) {
if (drvdata->ccitmin == 256)
drvdata->ccitmin = 4;
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 66123d684ac9..bf99d79a4192 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -105,23 +105,32 @@ struct msc_iter {
/**
* struct msc - MSC device representation
- * @reg_base: register window base address
+ * @reg_base: register window base address for the entire MSU
+ * @msu_base: register window base address for this MSC
* @thdev: intel_th_device pointer
* @mbuf: MSU buffer, if assigned
- * @mbuf_priv MSU buffer's private data, if @mbuf
+ * @mbuf_priv: MSU buffer's private data, if @mbuf
+ * @work: a work to stop the trace when the buffer is full
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
+ * @switch_on_unlock: window to switch to when it becomes available
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
* @base: buffer's base pointer
* @base_addr: buffer's base address
+ * @orig_addr: MSC0 buffer's base address
+ * @orig_sz: MSC0 buffer's size
* @user_count: number of users of the buffer
* @mmap_count: number of mappings
* @buf_mutex: mutex to serialize access to buffer-related bits
+ * @iter_list: list of open file descriptor iterators
+ * @stop_on_full: stop the trace if the current window is full
* @enabled: MSC is enabled
* @wrap: wrapping is enabled
+ * @do_irq: IRQ resource is available, handle interrupts
+ * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata)
* @mode: MSC operating mode
* @burst_len: write burst length
* @index: number of this MSC in the MSU
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index e9d8d28e055f..e3def163d5cf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -335,6 +335,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Arrow Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-P/U */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index e9496fe97baa..495eb1dc8ac5 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -81,10 +81,8 @@ static int stm_heartbeat_init(void)
stm_heartbeat[i].data.type = STM_USER;
stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
- hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- stm_heartbeat[i].hrtimer.function =
- stm_heartbeat_hrtimer_handler;
+ hrtimer_setup(&stm_heartbeat[i].hrtimer, stm_heartbeat_hrtimer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
if (ret)
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 544c94e86b89..1eac35838040 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids);
static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali1535_setup(dev)) {
dev_warn(&dev->dev,
"ALI1535 not detected, module not inserted.\n");
@@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
"SMBus ALI1535 adapter at %04x", ali1535_offset);
- return i2c_add_adapter(&ali1535_adapter);
+ ret = i2c_add_adapter(&ali1535_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+ return ret;
}
static void ali1535_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 4761c7208102..418d11266671 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids);
static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (ali15x3_setup(dev)) {
dev_err(&dev->dev,
"ALI15X3 not detected, module not inserted.\n");
@@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name),
"SMBus ALI15X3 adapter at %04x", ali15x3_smba);
- return i2c_add_adapter(&ali15x3_adapter);
+ ret = i2c_add_adapter(&ali15x3_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE);
+ return ret;
}
static void ali15x3_remove(struct pci_dev *dev)
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index 143165300949..ef7370d3dbea 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -327,13 +327,11 @@ static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
amd_mp2_irq_isr, irq_flag, dev_name(&pci_dev->dev), privdata);
if (rc) {
pci_err(pci_dev, "Failure requesting irq %i: %d\n", privdata->dev_irq, rc);
- goto free_irq_vectors;
+ goto err_dma_mask;
}
return rc;
-free_irq_vectors:
- free_irq(privdata->dev_irq, privdata);
err_dma_mask:
pci_clear_master(pci_dev);
err_pci_enable:
@@ -376,7 +374,6 @@ static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
pm_runtime_forbid(&pci_dev->dev);
pm_runtime_get_noresume(&pci_dev->dev);
- free_irq(privdata->dev_irq, privdata);
pci_clear_master(pci_dev);
amd_mp2_clear_reg(privdata);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index ee0d25b498cb..9e5d454d8318 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1723,8 +1723,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&i2c_imx->slave_lock);
- hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+ hrtimer_setup(&i2c_imx->slave_timer, i2c_imx_slave_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
match = device_get_match_data(&pdev->dev);
if (match)
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 92faf03d64cf..f18c3e74b076 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes,
return 0;
}
-static irqreturn_t
-omap_i2c_isr(int irq, void *dev_id)
-{
- struct omap_i2c_dev *omap = dev_id;
- irqreturn_t ret = IRQ_HANDLED;
- u16 mask;
- u16 stat;
-
- stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
-
- if (stat & mask)
- ret = IRQ_WAKE_THREAD;
-
- return ret;
-}
-
static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
u16 bits;
@@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
}
if (stat & OMAP_I2C_STAT_NACK) {
- err |= OMAP_I2C_STAT_NACK;
+ omap->cmd_err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK);
+
+ if (!(stat & ~OMAP_I2C_STAT_NACK)) {
+ err = -EAGAIN;
+ break;
+ }
}
if (stat & OMAP_I2C_STAT_AL) {
@@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev)
IRQF_NO_SUSPEND, pdev->name, omap);
else
r = devm_request_threaded_irq(&pdev->dev, omap->irq,
- omap_i2c_isr, omap_i2c_isr_thread,
+ NULL, omap_i2c_isr_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
pdev->name, omap);
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 3505cf29cedd..a19c3d251804 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids);
static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ int ret;
+
if (sis630_setup(dev)) {
dev_err(&dev->dev,
"SIS630 compatible bus not detected, "
@@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
"SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
- return i2c_add_adapter(&sis630_adapter);
+ ret = i2c_add_adapter(&sis630_adapter);
+ if (ret)
+ goto release_region;
+
+ return 0;
+
+release_region:
+ release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION);
+ return ret;
}
static void sis630_remove(struct pci_dev *dev)
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 0a3c37510079..a34af1ba09bd 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
+# Branch profiling isn't noinstr-safe
+ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
+
+obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 0fdb1d1316c4..976f5be54e36 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -58,6 +58,7 @@
#include <asm/spec-ctrl.h>
#include <asm/tsc.h>
#include <asm/fpu/api.h>
+#include <asm/smp.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -90,7 +91,6 @@ struct idle_cpu {
* Indicate which enable bits to clear here.
*/
unsigned long auto_demotion_disable_flags;
- bool byt_auto_demotion_disable_flag;
bool disable_promotion_to_c1e;
bool use_acpi;
};
@@ -229,6 +229,15 @@ static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
return 0;
}
+static void intel_idle_enter_dead(struct cpuidle_device *dev, int index)
+{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ struct cpuidle_state *state = &drv->states[index];
+ unsigned long eax = flg2MWAIT(state->flags);
+
+ mwait_play_dead(eax);
+}
+
/*
* States are indexed by the cstate number,
* which is also the index into the MWAIT hint array.
@@ -1464,13 +1473,11 @@ static const struct idle_cpu idle_cpu_snx __initconst = {
static const struct idle_cpu idle_cpu_byt __initconst = {
.state_table = byt_cstates,
.disable_promotion_to_c1e = true,
- .byt_auto_demotion_disable_flag = true,
};
static const struct idle_cpu idle_cpu_cht __initconst = {
.state_table = cht_cstates,
.disable_promotion_to_c1e = true,
- .byt_auto_demotion_disable_flag = true,
};
static const struct idle_cpu idle_cpu_ivb __initconst = {
@@ -1696,6 +1703,10 @@ static bool force_use_acpi __read_mostly; /* No effect if no_acpi is set. */
module_param_named(use_acpi, force_use_acpi, bool, 0444);
MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
+static bool no_native __read_mostly; /* No effect if no_acpi is set. */
+module_param_named(no_native, no_native, bool, 0444);
+MODULE_PARM_DESC(no_native, "Ignore cpu specific (native) idle states in lieu of ACPI idle states");
+
static struct acpi_processor_power acpi_state_table __initdata;
/**
@@ -1804,6 +1815,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
mark_tsc_unstable("TSC halts in idle");
state->enter = intel_idle;
+ state->enter_dead = intel_idle_enter_dead;
state->enter_s2idle = intel_idle_s2idle;
}
}
@@ -1838,6 +1850,11 @@ static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
}
return true;
}
+
+static inline bool ignore_native(void)
+{
+ return no_native && !no_acpi;
+}
#else /* !CONFIG_ACPI_PROCESSOR_CSTATE */
#define force_use_acpi (false)
@@ -1847,6 +1864,7 @@ static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint)
{
return false;
}
+static inline bool ignore_native(void) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
/**
@@ -2059,6 +2077,15 @@ static void __init spr_idle_state_table_update(void)
}
}
+/**
+ * byt_cht_auto_demotion_disable - Disable Bay/Cherry Trail auto-demotion.
+ */
+static void __init byt_cht_auto_demotion_disable(void)
+{
+ wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
+ wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
+}
+
static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
{
unsigned int mwait_cstate = (MWAIT_HINT2CSTATE(mwait_hint) + 1) &
@@ -2140,6 +2167,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
case INTEL_ATOM_GRACEMONT:
adl_idle_state_table_update();
break;
+ case INTEL_ATOM_SILVERMONT:
+ case INTEL_ATOM_AIRMONT:
+ byt_cht_auto_demotion_disable();
+ break;
}
for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
@@ -2153,6 +2184,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
!cpuidle_state_table[cstate].enter_s2idle)
break;
+ if (!cpuidle_state_table[cstate].enter_dead)
+ cpuidle_state_table[cstate].enter_dead = intel_idle_enter_dead;
+
/* If marked as unusable, skip this state. */
if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {
pr_debug("state %s is disabled\n",
@@ -2182,11 +2216,6 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
drv->state_count++;
}
-
- if (icpu->byt_auto_demotion_disable_flag) {
- wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
- wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
- }
}
/**
@@ -2332,6 +2361,10 @@ static int __init intel_idle_init(void)
pr_debug("MWAIT substates: 0x%x\n", mwait_substates);
icpu = (const struct idle_cpu *)id->driver_data;
+ if (icpu && ignore_native()) {
+ pr_debug("ignoring native CPU idle states\n");
+ icpu = NULL;
+ }
if (icpu) {
if (icpu->state_table)
cpuidle_state_table = icpu->state_table;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index e96a5ae92375..cfaf8f7e0a07 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -1084,7 +1084,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
conf &= ~AD7192_CONF_CHAN_MASK;
for_each_set_bit(i, scan_mask, 8)
- conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
+ conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index d8e3c7a43678..d39354afd539 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -1047,7 +1047,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
cs = &st->chan_scales[ch];
*vals = (int *)cs->scale_avail;
- *length = cs->num_scales;
+ *length = cs->num_scales * 2;
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8e5aaf15a921..c3a1dea2aa82 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 'u', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
-#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
+
+#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
@@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
+
#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
{ \
.type = IIO_POSITIONRELATIVE, \
@@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
};
static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
- AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
- AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
- AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
- AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
- AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
- AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
- AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
- AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
- AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
- AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
- AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
- AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
- AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
- AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
- AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
- AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
- AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
- AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
- AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
- AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
- AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
- AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
- AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
- AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
+ AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
+ AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
+ AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
+ AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
+ AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
+ AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
+ AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
+ AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
+ AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
+ AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
+ AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
+ AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
+ AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
+ AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
+ AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
+ AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
+ AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
+ AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
+ AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
+ AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
+ AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
+ AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 90f61c47b1c4..63f518215156 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -1198,11 +1198,11 @@ static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
GFP_KERNEL);
+ ACPI_FREE(status);
if (!label)
return -ENOMEM;
indio_dev->label = label;
- ACPI_FREE(status);
return 0;
}
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 7dde5713973f..49560059f4b7 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -812,9 +812,7 @@ static int tsc2046_adc_probe(struct spi_device *spi)
spin_lock_init(&priv->state_lock);
priv->state = TSC2046_STATE_SHUTDOWN;
- hrtimer_init(&priv->trig_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_SOFT);
- priv->trig_timer.function = tsc2046_adc_timer;
+ hrtimer_setup(&priv->trig_timer, tsc2046_adc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index e7206af53af6..7944f5c1d264 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -410,6 +410,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
return ret;
}
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS);
+ if (ret)
+ return ret;
+
return ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION,
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index 848baa6e3bbf..d85b7d3de866 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st)
struct spi_device *spi = st->spi;
unsigned int chip_id;
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SOFTRESET_N_MSK |
- ADMV8818_SOFTRESET_MSK,
- FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
return ret;
}
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SDOACTIVE_N_MSK |
- ADMV8818_SDOACTIVE_MSK,
- FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
return ret;
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 0732cfa258c4..8cc071463249 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -7,7 +7,7 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/gfp_types.h>
#include <linux/i2c.h>
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 43ec57c1e604..806e55f75f65 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -7,7 +7,7 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/err.h>
#include <linux/gfp_types.h>
#include <linux/module.h>
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index 69a0d609cffc..5ed7e17f49e7 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = {
{
.part_id = 0xB1,
.max_scale_int = 16,
- .max_scale_nano = 3264320,
+ .max_scale_nano = 326432000,
}, {
.part_id = 0xB3,
.max_scale_int = 14,
- .max_scale_nano = 9712000,
+ .max_scale_nano = 97120000,
},
};
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7ab64f5c623c..76b76d12b388 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -49,9 +49,10 @@ static const u32 prox_sensitivity_addresses[] = {
#define PROX_CHANNEL(_is_proximity, _channel) \
{\
.type = _is_proximity ? IIO_PROXIMITY : IIO_ATTENTION,\
- .info_mask_separate = _is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
- BIT(IIO_CHAN_INFO_PROCESSED),\
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |\
+ .info_mask_separate = \
+ (_is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
+ BIT(IIO_CHAN_INFO_PROCESSED)) |\
+ BIT(IIO_CHAN_INFO_OFFSET) |\
BIT(IIO_CHAN_INFO_SCALE) |\
BIT(IIO_CHAN_INFO_SAMP_FREQ) |\
BIT(IIO_CHAN_INFO_HYSTERESIS),\
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index e092a935dbac..5aa8e5a22f32 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -1036,12 +1036,13 @@ static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data
return -ENOMEM;
memcpy(bin->data, fw->data, fw->size);
- release_firmware(fw);
bin->fw_size = fw->size;
bin->fw_ver = bin->data[FW_VER_OFFSET];
bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
+ release_firmware(fw);
+
return hx9023s_bin_load(data, bin);
}
diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
index 716c795d08fb..82c72baccb62 100644
--- a/drivers/iio/trigger/iio-trig-hrtimer.c
+++ b/drivers/iio/trigger/iio-trig-hrtimer.c
@@ -145,8 +145,8 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- trig_info->timer.function = iio_hrtimer_trig_handler;
+ hrtimer_setup(&trig_info->timer, iio_hrtimer_trig_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
trig_info->sampling_frequency[0] = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
trig_info->period = NSEC_PER_SEC / trig_info->sampling_frequency[0];
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3721446c6ba4..502a79136d4d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -53,12 +53,6 @@
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
-#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
-#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
/* Number of MRs to reserve for PF, leaving remainder for VFs */
#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index a94c8c5387d9..4659a2f73364 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -2130,8 +2130,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx,
- BNXT_RE_MAX_QPC_COUNT);
+ &rdev->qplib_ctx);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate RCFW Channel: %#x\n", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 5336f74297f8..457eecb99f96 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1217,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 17e62f22683b..d23074383428 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -915,7 +915,6 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
@@ -924,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz)
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
@@ -969,12 +967,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- /* Allocate one extra to hold the QP1 entries */
- rcfw->qp_tbl_size = qp_tbl_sz + 1;
- rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
- if (!rcfw->qp_tbl)
- goto fail;
spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 88814cb3aa74..ff873c5f1b25 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -262,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz);
+ struct bnxt_qplib_ctx *ctx);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
@@ -285,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
- return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 02922a0987ad..6cd05207ffed 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -871,6 +871,7 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
+ kfree(res->rcfw->qp_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -878,12 +879,20 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_dev_attr *dev_attr;
int rc;
res->netdev = netdev;
dev_attr = res->dattr;
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
+
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 711990232de1..6a13927674b4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -49,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
@@ -600,4 +607,9 @@ static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
}
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4ccd4405355a..f231e886ad9d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -176,6 +176,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 0ee60fdc18b3..7eceb3e9f4ce 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2215,11 +2215,12 @@ struct creq_query_func_resp_sb {
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
#define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
#define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
__le16 max_xp_qp_size;
__le16 create_qp_batch_size;
__le16 destroy_qp_batch_size;
- __le16 reserved16;
+ __le16 max_srq_ext;
__le64 reserved64;
};
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cbac4a442d9e..d6fbd9c2b8b4 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -635,12 +635,11 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
spin_lock_init(&ppd->cca_timer_lock);
for (i = 0; i < OPA_MAX_SLS; i++) {
- hrtimer_init(&ppd->cca_timer[i].hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
ppd->cca_timer[i].ppd = ppd;
ppd->cca_timer[i].sl = i;
ppd->cca_timer[i].ccti = 0;
- ppd->cca_timer[i].hrtimer.function = cca_timer_fn;
+ hrtimer_setup(&ppd->cca_timer[i].hrtimer, cca_timer_fn, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ppd->cc_max_table_entries = IB_CC_TABLE_CAP_DEFAULT;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 950c133d4220..6ee911f6885b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ida_destroy(&hr_dev->xrcd_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ida_destroy(&hr_dev->srq_table.srq_ida.ida);
+ xa_destroy(&hr_dev->srq_table.xa);
+ }
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 4106423a1b39..3a5c93c9fb3e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida);
+ xa_destroy(&hr_dev->cq_table.array);
mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 605562122ecc..ca0798224e56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
return ret;
}
+/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming
+ * the bt page size is not expanded by cal_best_bt_pg_sz()
+ */
+#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800
+
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
@@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
{
const struct hns_roce_buf_region *r;
int ofs, end;
+ int loop;
int unit;
int ret;
int i;
@@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
continue;
end = r->offset + r->count;
- for (ofs = r->offset; ofs < end; ofs += unit) {
+ for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+
ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
hem_list->mid_bt[i],
&hem_list->btm_bt);
@@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
+ int loop = 1;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
+ if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K))
+ cond_resched();
+ loop++;
+
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index ae24c81c9812..cf89a8db4f64 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_DEVICE_RC_RNR_NAK_GEN;
props->max_send_sge = hr_dev->caps.max_sq_sg;
props->max_recv_sge = hr_dev->caps.max_rq_sg;
- props->max_sge_rd = 1;
+ props->max_sge_rd = hr_dev->caps.max_sq_sg;
props->max_cq = hr_dev->caps.num_cqs;
props->max_cqe = hr_dev->caps.max_cqes;
props->max_mr = hr_dev->caps.num_mtpts;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9e2e76c59406..8901c142c1b6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp *ucmd,
struct hns_roce_ib_create_qp_resp *resp)
{
+ bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
+ bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
- if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
+ if (has_sdb) {
ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
if (ret) {
ibdev_err(ibdev,
@@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
}
- if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
+ if (has_rdb) {
ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
if (ret) {
ibdev_err(ibdev,
@@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
return 0;
err_sdb:
- if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
+ if (has_sdb)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
err_out:
return ret;
@@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibucontext);
hr_qp->config = uctx->config;
ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
- if (ret)
+ if (ret) {
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+ return ret;
+ }
ret = set_congest_param(hr_dev, hr_qp, ucmd);
- if (ret)
- return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
+ default_congest_type(hr_dev, hr_qp);
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
-
- default_congest_type(hr_dev, hr_qp);
}
return ret;
@@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
min(udata->outlen, sizeof(resp)));
if (ret) {
ibdev_err(ibdev, "copy qp resp failed!\n");
- goto err_store;
+ goto err_flow_ctrl;
}
}
@@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida);
xa_destroy(&hr_dev->qp_table.dip_xa);
+ xa_destroy(&hr_dev->qp_table_xa);
mutex_destroy(&hr_dev->qp_table.bank_mutex);
mutex_destroy(&hr_dev->qp_table.scc_mutex);
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index ad50b77282f8..69ce1862eabe 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -498,8 +498,6 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf)
iw_qvlist->num_vectors = rf->msix_count;
if (rf->msix_count <= num_online_cpus())
rf->msix_shared = true;
- else if (rf->msix_count > num_online_cpus() + 1)
- rf->msix_count = num_online_cpus() + 1;
pmsix = rf->msix_entries;
for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 3f13200ff71b..1ee8969595d3 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -206,6 +206,43 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
}
+static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ for (; i > 0; i--)
+ ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(pf, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
static void irdma_remove(struct auxiliary_device *aux_dev)
{
struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
@@ -216,6 +253,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
irdma_ib_unregister_device(iwdev);
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
+ irdma_deinit_interrupts(iwdev->rf, pf);
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
@@ -230,9 +268,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf
rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
rf->hw.hw_addr = pf->hw.hw_addr;
rf->pcidev = pf->pdev;
- rf->msix_count = pf->num_rdma_msix;
rf->pf_id = pf->hw.pf_id;
- rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
rf->default_vsi.vsi_idx = vsi->vsi_num;
rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
@@ -281,6 +317,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
+ err = irdma_init_interrupts(rf, pf);
+ if (err)
+ goto err_init_interrupts;
+
err = irdma_ctrl_init_hw(rf);
if (err)
goto err_ctrl_init;
@@ -311,6 +351,8 @@ err_ibreg:
err_rt_init:
irdma_ctrl_deinit_hw(rf);
err_ctrl_init:
+ irdma_deinit_interrupts(rf, pf);
+err_init_interrupts:
kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 9f0ed6e84471..ef9a9b79d711 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -117,6 +117,9 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_IRQ_NAME_STR_LEN (64)
+#define IRDMA_NUM_AEQ_MSIX 1
+#define IRDMA_MIN_MSIX 2
+
enum init_completion_state {
INVALID_STATE = 0,
INITIAL_STATE,
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 99036afb3aef..531a57f9ee7e 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
return sport;
}
-static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
struct rdma_ah_init_attr *init_attr)
{
struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
enum ib_gid_type gid_type;
+ int rate_val;
if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
@@ -67,8 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl =
- (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
+ rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr));
+ if (rate_val < 0)
+ return rate_val;
+ ah->av.stat_rate_sl = rate_val << 4;
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
@@ -89,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
}
+
+ return 0;
}
int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
@@ -121,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
return err;
}
- create_ib_ah(dev, ah, init_attr);
- return 0;
+ return create_ib_ah(dev, ah, init_attr);
}
int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e6203e26cc06..614009fb9632 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1107,9 +1107,8 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
}
/* initialize timers needed for rc qp */
timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
- hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- qp->s_rnr_timer.function = rvt_rc_rnr_retry;
+ hrtimer_setup(&qp->s_rnr_timer, rvt_rc_rnr_retry, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Driver needs to set up it's private QP structure and do any
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 1ba4a0c8726a..e27478fe9456 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
}
/* initialize rxe device parameters */
-static void rxe_init_device_param(struct rxe_dev *rxe)
+static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev)
{
- struct net_device *ndev;
-
rxe->max_inline_data = RXE_MAX_INLINE_DATA;
rxe->attr.vendor_id = RXE_VENDOR_ID;
@@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_pkeys = RXE_MAX_PKEYS;
rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
-
addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
ndev->dev_addr);
- dev_put(ndev);
-
rxe->max_ucontext = RXE_MAX_UCONTEXT;
}
@@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port)
/* initialize port state, note IB convention that HCA ports are always
* numbered from 1
*/
-static void rxe_init_ports(struct rxe_dev *rxe)
+static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev)
{
struct rxe_port *port = &rxe->port;
- struct net_device *ndev;
rxe_init_port_param(port);
- ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
- if (!ndev)
- return;
addrconf_addr_eui48((unsigned char *)&port->port_guid,
ndev->dev_addr);
- dev_put(ndev);
spin_lock_init(&port->port_lock);
}
@@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe)
}
/* initialize rxe device state */
-static void rxe_init(struct rxe_dev *rxe)
+static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev)
{
/* init default device parameters */
- rxe_init_device_param(rxe);
+ rxe_init_device_param(rxe, ndev);
- rxe_init_ports(rxe);
+ rxe_init_ports(rxe, ndev);
rxe_init_pools(rxe);
/* init pending mmap list */
@@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
struct net_device *ndev)
{
- rxe_init(rxe);
+ rxe_init(rxe, ndev);
rxe_set_mtu(rxe, mtu);
return rxe_register_device(rxe, ibdev_name, ndev);
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index ea5eee50dc39..4e692de1da93 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -676,8 +676,8 @@ static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum)
static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset,
int len)
{
- return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
- (__force __u32)csum2, len);
+ return (__force __wsum)crc32c_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
}
static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 9ad8d9856275..53db7c8191e3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -97,10 +97,13 @@ out_err:
return ret;
}
-static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ipoib_new_child_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *pdev;
struct ipoib_dev_priv *ppriv;
u16 child_pkey;
@@ -109,7 +112,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ pdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 8fe2a51df649..c33e6f33265b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -140,6 +140,7 @@ static const struct xpad_device {
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
@@ -177,6 +178,7 @@ static const struct xpad_device {
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
{ 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE },
{ 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
{ 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
@@ -238,6 +240,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -276,12 +279,15 @@ static const struct xpad_device {
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
{ 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE },
{ 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
{ 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
@@ -306,7 +312,7 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 },
{ 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 },
- { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 },
+ { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -343,6 +349,7 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
@@ -366,6 +373,7 @@ static const struct xpad_device {
{ 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
@@ -374,10 +382,15 @@ static const struct xpad_device {
{ 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 },
{ 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE },
{ 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE },
+ { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE },
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
+ { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -385,11 +398,16 @@ static const struct xpad_device {
{ 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
{ 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE },
+ { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE },
{ 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */
@@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */
XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */
- XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */
+ XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */
XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */
+ XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */
@@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */
XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */
XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */
- XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */
+ XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */
XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */
- XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */
- XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */
- XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */
- XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
+ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */
+ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
@@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth),
+ XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 22022d11470d..80b917944b51 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -100,11 +100,11 @@ enum iqs7222_reg_key_id {
enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_STAT,
- IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_CYCLE,
IQS7222_REG_GRP_GLBL,
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
+ IQS7222_REG_GRP_FILT,
IQS7222_REG_GRP_SLDR,
IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
@@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = {
struct iqs7222_reg_grp_desc {
u16 base;
+ u16 val_len;
int num_row;
int num_col;
};
@@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAC00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xC400,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAA00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
[IQS7222_REG_GRP_FILT] = {
.base = 0xAE00,
+ .val_len = 3,
.num_row = 1,
.num_col = 2,
},
@@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222)
}
static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
- u16 reg, void *val, u16 num_val)
+ u16 reg, void *val, u16 val_len)
{
u8 reg_buf[sizeof(__be16)];
int ret, i;
@@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222,
{
.addr = client->addr,
.flags = I2C_M_RD,
- .len = num_val * sizeof(__le16),
+ .len = val_len,
.buf = (u8 *)val,
},
};
@@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
__le16 val_buf;
int error;
- error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1);
+ error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
if (error)
return error;
@@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val)
}
static int iqs7222_write_burst(struct iqs7222_private *iqs7222,
- u16 reg, const void *val, u16 num_val)
+ u16 reg, const void *val, u16 val_len)
{
int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8);
- int val_len = num_val * sizeof(__le16);
int msg_len = reg_len + val_len;
int ret, i;
struct i2c_client *client = iqs7222->client;
@@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val)
{
__le16 val_buf = cpu_to_le16(val);
- return iqs7222_write_burst(iqs7222, reg, &val_buf, 1);
+ return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf));
}
static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
@@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
/*
* Acknowledge reset before writing any registers in case the device
- * suffers a spurious reset during initialization. Because this step
- * may change the reserved fields of the second filter beta register,
- * its cache must be updated.
- *
- * Writing the second filter beta register, in turn, may clobber the
- * system status register. As such, the filter beta register pair is
- * written first to protect against this hazard.
+ * suffers a spurious reset during initialization.
*/
if (dir == WRITE) {
- u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1;
- u16 filt_setup;
-
error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP,
iqs7222->sys_setup[0] |
IQS7222_SYS_SETUP_ACK_RESET);
if (error)
return error;
-
- error = iqs7222_read_word(iqs7222, reg, &filt_setup);
- if (error)
- return error;
-
- iqs7222->filt_setup[1] &= GENMASK(7, 0);
- iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0));
}
/*
@@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
int num_col = dev_desc->reg_grps[i].num_col;
u16 reg = dev_desc->reg_grps[i].base;
__le16 *val_buf;
+ u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf);
u16 *val;
if (!num_col)
@@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
switch (dir) {
case READ:
error = iqs7222_read_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
for (k = 0; k < num_col; k++)
val[k] = le16_to_cpu(val_buf[k]);
break;
@@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
for (k = 0; k < num_col; k++)
val_buf[k] = cpu_to_le16(val[k]);
error = iqs7222_write_burst(iqs7222, reg,
- val_buf, num_col);
+ val_buf, val_len);
break;
default:
@@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222)
int error, i;
error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id,
- ARRAY_SIZE(dev_id));
+ sizeof(dev_id));
if (error)
return error;
@@ -2915,7 +2909,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
__le16 status[IQS7222_MAX_COLS_STAT];
error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status,
- num_stat);
+ num_stat * sizeof(*status));
if (error)
return error;
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 127cfdc8668a..6ed9fc34948c 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "AURA1501"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"),
DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Mivvy M310 */
@@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
- * after suspend fixable with nomux + reset + noloop + nopnp. Luckily,
- * none of them have an external PS/2 port so this can safely be set for
- * all of them.
+ * after suspend fixable with the forcenorestore quirk.
* Clevo barebones come with board_vendor and/or system_vendor set to
* either the very generic string "Notebook" and/or a different value
* for each individual reseller. The only somewhat universal way to
@@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N140CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "N141CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
- /*
- * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
- * the keyboard very laggy for ~5 seconds after boot and
- * sometimes also after resume.
- * However both are required for the keyboard to not fail
- * completely sometimes after boot or resume.
- */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/*
* At least one modern Clevo barebone has the touchpad connected both
@@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX |
- SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP |
- SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX |
+ SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
@@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "P640RE"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
@@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB51RF"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PB71RD"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PC70DR"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
/* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
{
@@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"),
},
- .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
- SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
},
{
/*
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 066dc04003fa..67264c5b49cb 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1021,7 +1021,7 @@ static int ads7846_setup_pendown(struct spi_device *spi,
if (pdata->get_pendown_state) {
ts->get_pendown_state = pdata->get_pendown_state;
} else {
- ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN);
+ ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN);
if (IS_ERR(ts->gpio_pendown)) {
dev_err(&spi->dev, "failed to request pendown GPIO\n");
return PTR_ERR(ts->gpio_pendown);
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index 3fc03cf0ca23..7f8cfdd106fa 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -165,7 +165,7 @@ struct goodix_berlin_core {
struct device *dev;
struct regmap *regmap;
struct regulator *avdd;
- struct regulator *iovdd;
+ struct regulator *vddio;
struct gpio_desc *reset_gpio;
struct touchscreen_properties props;
struct goodix_berlin_fw_version fw_version;
@@ -248,22 +248,22 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
{
int error;
- error = regulator_enable(cd->iovdd);
+ error = regulator_enable(cd->vddio);
if (error) {
- dev_err(cd->dev, "Failed to enable iovdd: %d\n", error);
+ dev_err(cd->dev, "Failed to enable vddio: %d\n", error);
return error;
}
- /* Vendor waits 3ms for IOVDD to settle */
+ /* Vendor waits 3ms for VDDIO to settle */
usleep_range(3000, 3100);
error = regulator_enable(cd->avdd);
if (error) {
dev_err(cd->dev, "Failed to enable avdd: %d\n", error);
- goto err_iovdd_disable;
+ goto err_vddio_disable;
}
- /* Vendor waits 15ms for IOVDD to settle */
+ /* Vendor waits 15ms for AVDD to settle */
usleep_range(15000, 15100);
gpiod_set_value_cansleep(cd->reset_gpio, 0);
@@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd)
err_dev_reset:
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
-err_iovdd_disable:
- regulator_disable(cd->iovdd);
+err_vddio_disable:
+ regulator_disable(cd->vddio);
return error;
}
@@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd)
{
gpiod_set_value_cansleep(cd->reset_gpio, 1);
regulator_disable(cd->avdd);
- regulator_disable(cd->iovdd);
+ regulator_disable(cd->vddio);
}
static int goodix_berlin_read_version(struct goodix_berlin_core *cd)
@@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id,
return dev_err_probe(dev, PTR_ERR(cd->avdd),
"Failed to request avdd regulator\n");
- cd->iovdd = devm_regulator_get(dev, "iovdd");
- if (IS_ERR(cd->iovdd))
- return dev_err_probe(dev, PTR_ERR(cd->iovdd),
- "Failed to request iovdd regulator\n");
+ cd->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(cd->vddio))
+ return dev_err_probe(dev, PTR_ERR(cd->vddio),
+ "Failed to request vddio regulator\n");
error = goodix_berlin_power_on(cd);
if (error) {
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index abeae9102323..3c8bbe284b73 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -22,6 +22,7 @@
#define IST3032C_WHOAMI 0x32c
#define IST3038C_WHOAMI 0x38c
+#define IST3038H_WHOAMI 0x38d
#define IST3038B_REG_CHIPID 0x30
#define IST3038B_WHOAMI 0x30380b
@@ -428,11 +429,19 @@ static const struct imagis_properties imagis_3038c_data = {
.protocol_b = true,
};
+static const struct imagis_properties imagis_3038h_data = {
+ .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE,
+ .touch_coord_cmd = IST3038C_REG_TOUCH_COORD,
+ .whoami_cmd = IST3038C_REG_CHIPID,
+ .whoami_val = IST3038H_WHOAMI,
+};
+
static const struct of_device_id imagis_of_match[] = {
{ .compatible = "imagis,ist3032c", .data = &imagis_3032c_data },
{ .compatible = "imagis,ist3038", .data = &imagis_3038_data },
{ .compatible = "imagis,ist3038b", .data = &imagis_3038b_data },
{ .compatible = "imagis,ist3038c", .data = &imagis_3038c_data },
+ { .compatible = "imagis,ist3038h", .data = &imagis_3038h_data },
{ },
};
MODULE_DEVICE_TABLE(of, imagis_of_match);
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 27941245e962..88d376090e6e 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1153,11 +1153,13 @@ static const struct i2c_device_id wdt87xx_dev_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id wdt87xx_acpi_id[] = {
{ "WDHT0001", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id);
+#endif
static struct i2c_driver wdt87xx_driver = {
.probe = wdt87xx_ts_probe,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index ec1b5e32b972..cd750f512dee 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -154,7 +154,6 @@ config IOMMU_DMA
select DMA_OPS_HELPERS
select IOMMU_API
select IOMMU_IOVA
- select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
select NEED_SG_DMA_FLAGS if SWIOTLB
@@ -483,8 +482,7 @@ config MTK_IOMMU
config MTK_IOMMU_V1
tristate "MediaTek IOMMU Version 1 (M4U gen1) Support"
- depends on ARM
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
select MEMORY
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 68debf5ee2d7..220c598b7e14 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -47,7 +47,6 @@ extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
struct protection_domain *protection_domain_alloc(void);
-void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);
void amd_iommu_domain_free(struct iommu_domain *dom);
@@ -176,12 +175,11 @@ void amd_iommu_apply_ivrs_quirks(void);
#else
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
+struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-
-#endif
-
-struct dev_table_entry *amd_iommu_get_ivhd_dte_flags(u16 segid, u16 devid);
struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid);
+
+#endif /* AMD_IOMMU_H */
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 23caea22f8dc..5089b58e528a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -112,6 +112,10 @@
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
+#define FEATURE_NUM_INT_REMAP_SUP GENMASK_ULL(9, 8)
+#define FEATURE_NUM_INT_REMAP_SUP_2K(x) \
+ (FIELD_GET(FEATURE_NUM_INT_REMAP_SUP, x) == 0x1)
+
/* Note:
* The current driver only support 16-bit PASID.
* Currently, hardware only implement upto 16-bit PASID
@@ -175,13 +179,16 @@
#define CONTROL_GAM_EN 25
#define CONTROL_GALOG_EN 28
#define CONTROL_GAINT_EN 29
+#define CONTROL_NUM_INT_REMAP_MODE 43
+#define CONTROL_NUM_INT_REMAP_MODE_MASK 0x03
+#define CONTROL_NUM_INT_REMAP_MODE_2K 0x01
#define CONTROL_EPH_EN 45
#define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
#define CONTROL_SNPAVIC_EN 61
-#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_MASK 7
#define CTRL_INV_TO_NONE 0
#define CTRL_INV_TO_1MS 1
#define CTRL_INV_TO_10MS 2
@@ -309,15 +316,13 @@
#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
#define DTE_IRQ_REMAP_ENABLE 1ULL
-/*
- * AMD IOMMU hardware only support 512 IRTEs despite
- * the architectural limitation of 2048 entries.
- */
-#define DTE_INTTAB_ALIGNMENT 128
-#define DTE_INTTABLEN_VALUE 9ULL
-#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1)
#define DTE_INTTABLEN_MASK (0xfULL << 1)
-#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE)
+#define DTE_INTTABLEN_VALUE_512 9ULL
+#define DTE_INTTABLEN_512 (DTE_INTTABLEN_VALUE_512 << 1)
+#define MAX_IRQS_PER_TABLE_512 BIT(DTE_INTTABLEN_VALUE_512)
+#define DTE_INTTABLEN_VALUE_2K 11ULL
+#define DTE_INTTABLEN_2K (DTE_INTTABLEN_VALUE_2K << 1)
+#define MAX_IRQS_PER_TABLE_2K BIT(DTE_INTTABLEN_VALUE_2K)
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
@@ -492,9 +497,6 @@ extern const struct iommu_ops amd_iommu_ops;
/* IVRS indicates that pre-boot remapping was enabled */
extern bool amdr_ivrs_remap_support;
-/* kmem_cache to get tables with 128 byte alignement */
-extern struct kmem_cache *amd_iommu_irq_cache;
-
#define PCI_SBDF_TO_SEGID(sbdf) (((sbdf) >> 16) & 0xffff)
#define PCI_SBDF_TO_DEVID(sbdf) ((sbdf) & 0xffff)
#define PCI_SEG_DEVID_TO_SBDF(seg, devid) ((((u32)(seg) & 0xffff) << 16) | \
@@ -851,6 +853,7 @@ struct iommu_dev_data {
struct device *dev;
u16 devid; /* PCI Device ID */
+ unsigned int max_irqs; /* Maximum IRQs supported by device */
u32 max_pasids; /* Max supported PASIDs */
u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
int ats_qdep;
@@ -928,9 +931,6 @@ struct unity_map_entry {
* Data structures for device handling
*/
-/* size of the dma_ops aperture as power of 2 */
-extern unsigned amd_iommu_aperture_order;
-
extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index cb536d372b12..dd9e26b7b718 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
-#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
@@ -219,7 +218,6 @@ static bool __initdata cmdline_maps;
static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
-static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
static bool amd_iommu_pre_enabled = true;
@@ -412,33 +410,26 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
&entry, sizeof(entry));
}
-/* Generic functions to enable/disable certain features of the IOMMU. */
-void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
+static void iommu_feature_set(struct amd_iommu *iommu, u64 val, u64 mask, u8 shift)
{
u64 ctrl;
ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl |= (1ULL << bit);
+ mask <<= shift;
+ ctrl &= ~mask;
+ ctrl |= (val << shift) & mask;
writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
-static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
+/* Generic functions to enable/disable certain features of the IOMMU. */
+void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
{
- u64 ctrl;
-
- ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~(1ULL << bit);
- writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ iommu_feature_set(iommu, 1ULL, 1ULL, bit);
}
-static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{
- u64 ctrl;
-
- ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
- ctrl &= ~CTRL_INV_TO_MASK;
- ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
- writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ iommu_feature_set(iommu, 0ULL, 1ULL, bit);
}
/* Function to enable the hardware */
@@ -1069,7 +1060,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
if (irq_v && (int_ctl || int_tab_len)) {
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_INTTABLEN)) {
+ (int_tab_len != DTE_INTTABLEN_512 &&
+ int_tab_len != DTE_INTTABLEN_2K)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
memunmap(old_devtb);
return false;
@@ -2652,7 +2644,7 @@ static void iommu_init_flags(struct amd_iommu *iommu)
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
/* Set IOTLB invalidation timeout to 1s */
- iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+ iommu_feature_set(iommu, CTRL_INV_TO_1S, CTRL_INV_TO_MASK, CONTROL_INV_TIMEOUT);
/* Enable Enhanced Peripheral Page Request Handling */
if (check_feature(FEATURE_EPHSUP))
@@ -2745,6 +2737,17 @@ static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
iommu->irtcachedis_enabled ? "disabled" : "enabled");
}
+static void iommu_enable_2k_int(struct amd_iommu *iommu)
+{
+ if (!FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ return;
+
+ iommu_feature_set(iommu,
+ CONTROL_NUM_INT_REMAP_MODE_2K,
+ CONTROL_NUM_INT_REMAP_MODE_MASK,
+ CONTROL_NUM_INT_REMAP_MODE);
+}
+
static void early_enable_iommu(struct amd_iommu *iommu)
{
iommu_disable(iommu);
@@ -2757,6 +2760,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
+ iommu_enable_2k_int(iommu);
iommu_enable(iommu);
amd_iommu_flush_all_caches(iommu);
}
@@ -2813,6 +2817,7 @@ static void early_enable_iommus(void)
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
+ iommu_enable_2k_int(iommu);
iommu_set_device_table(iommu);
amd_iommu_flush_all_caches(iommu);
}
@@ -2939,9 +2944,6 @@ static struct syscore_ops amd_iommu_syscore_ops = {
static void __init free_iommu_resources(void)
{
- kmem_cache_destroy(amd_iommu_irq_cache);
- amd_iommu_irq_cache = NULL;
-
free_iommu_all();
free_pci_segments();
}
@@ -3040,7 +3042,7 @@ static void __init ivinfo_init(void *ivrs)
static int __init early_amd_iommu_init(void)
{
struct acpi_table_header *ivrs_base;
- int remap_cache_sz, ret;
+ int ret;
acpi_status status;
if (!amd_iommu_detected)
@@ -3102,22 +3104,7 @@ static int __init early_amd_iommu_init(void)
if (amd_iommu_irq_remap) {
struct amd_iommu_pci_seg *pci_seg;
- /*
- * Interrupt remapping enabled, create kmem_cache for the
- * remapping tables.
- */
ret = -ENOMEM;
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
- else
- remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
- amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
- remap_cache_sz,
- DTE_INTTAB_ALIGNMENT,
- 0, NULL);
- if (!amd_iommu_irq_cache)
- goto out;
-
for_each_pci_segment(pci_seg) {
if (alloc_irq_lookup_table(pci_seg))
goto out;
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index f3399087859f..26cf562dde11 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -47,13 +47,6 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
return fpte;
}
-/****************************************************************************
- *
- * The functions below are used the create the page table mappings for
- * unity mapped regions.
- *
- ****************************************************************************/
-
static void free_pt_page(u64 *pt, struct list_head *freelist)
{
struct page *p = virt_to_page(pt);
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index c616de2c5926..a56a27396305 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -254,7 +254,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
iova, map_size, gfp, &updated);
if (!pte) {
- ret = -EINVAL;
+ ret = -ENOMEM;
goto out;
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cd5116d8c3b2..be8761bbef0f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -75,8 +75,6 @@ struct iommu_cmd {
*/
DEFINE_IDA(pdom_ids);
-struct kmem_cache *amd_iommu_irq_cache;
-
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev);
@@ -868,7 +866,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
int type, devid, flags, tag;
volatile u32 *event = __evt;
int count = 0;
- u64 address;
+ u64 address, ctrl;
u32 pasid;
retry:
@@ -878,6 +876,7 @@ retry:
(event[1] & EVENT_DOMID_MASK_LO);
flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
address = (u64)(((u64)event[3]) << 32) | event[2];
+ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
if (type == 0) {
/* Did we hit the erratum? */
@@ -899,6 +898,7 @@ retry:
dev_err(dev, "Event logged [ILLEGAL_DEV_TABLE_ENTRY device=%04x:%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags);
+ dev_err(dev, "Control Reg : 0x%llx\n", ctrl);
dump_dte_entry(iommu, devid);
break;
case EVENT_TYPE_DEV_TAB_ERR:
@@ -2394,8 +2394,14 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
}
out_err:
+
iommu_completion_wait(iommu);
+ if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ dev_data->max_irqs = MAX_IRQS_PER_TABLE_2K;
+ else
+ dev_data->max_irqs = MAX_IRQS_PER_TABLE_512;
+
if (dev_is_pci(dev))
pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT);
@@ -2432,15 +2438,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
*
*****************************************************************************/
-void protection_domain_free(struct protection_domain *domain)
-{
- WARN_ON(!list_empty(&domain->dev_list));
- if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
- free_io_pgtable_ops(&domain->iop.pgtbl.ops);
- pdom_id_free(domain->id);
- kfree(domain);
-}
-
static void protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
@@ -2578,7 +2575,11 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain = to_pdomain(dom);
- protection_domain_free(domain);
+ WARN_ON(!list_empty(&domain->dev_list));
+ if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
+ free_io_pgtable_ops(&domain->iop.pgtbl.ops);
+ pdom_id_free(domain->id);
+ kfree(domain);
}
static int blocked_domain_attach_device(struct iommu_domain *domain,
@@ -3081,6 +3082,13 @@ out:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
}
+static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)
+{
+ if (dev_data && dev_data->max_irqs == MAX_IRQS_PER_TABLE_2K)
+ return DTE_INTTABLEN_2K;
+ return DTE_INTTABLEN_512;
+}
+
static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
@@ -3095,7 +3103,7 @@ static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid,
new &= ~DTE_IRQ_PHYS_ADDR_MASK;
new |= iommu_virt_to_phys(table->table);
new |= DTE_IRQ_REMAP_INTCTL;
- new |= DTE_INTTABLEN;
+ new |= iommu_get_int_tablen(dev_data);
new |= DTE_IRQ_REMAP_ENABLE;
WRITE_ONCE(dte->data[2], new);
@@ -3121,7 +3129,7 @@ static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid)
return table;
}
-static struct irq_remap_table *__alloc_irq_table(void)
+static struct irq_remap_table *__alloc_irq_table(int nid, int order)
{
struct irq_remap_table *table;
@@ -3129,19 +3137,13 @@ static struct irq_remap_table *__alloc_irq_table(void)
if (!table)
return NULL;
- table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
+ table->table = iommu_alloc_pages_node(nid, GFP_KERNEL, order);
if (!table->table) {
kfree(table);
return NULL;
}
raw_spin_lock_init(&table->lock);
- if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
- memset(table->table, 0,
- MAX_IRQS_PER_TABLE * sizeof(u32));
- else
- memset(table->table, 0,
- (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
return table;
}
@@ -3173,13 +3175,24 @@ static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
return 0;
}
+static inline size_t get_irq_table_size(unsigned int max_irqs)
+{
+ if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ return max_irqs * sizeof(u32);
+
+ return max_irqs * (sizeof(u64) * 2);
+}
+
static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
- u16 devid, struct pci_dev *pdev)
+ u16 devid, struct pci_dev *pdev,
+ unsigned int max_irqs)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
struct amd_iommu_pci_seg *pci_seg;
unsigned long flags;
+ int order = get_order(get_irq_table_size(max_irqs));
+ int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
u16 alias;
spin_lock_irqsave(&iommu_table_lock, flags);
@@ -3198,7 +3211,7 @@ static struct irq_remap_table *alloc_irq_table(struct amd_iommu *iommu,
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
- new_table = __alloc_irq_table();
+ new_table = __alloc_irq_table(nid, order);
if (!new_table)
return NULL;
@@ -3233,20 +3246,21 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
- kmem_cache_free(amd_iommu_irq_cache, new_table->table);
+ iommu_free_pages(new_table->table, order);
kfree(new_table);
}
return table;
}
static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
- bool align, struct pci_dev *pdev)
+ bool align, struct pci_dev *pdev,
+ unsigned long max_irqs)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags;
- table = alloc_irq_table(iommu, devid, pdev);
+ table = alloc_irq_table(iommu, devid, pdev, max_irqs);
if (!table)
return -ENODEV;
@@ -3257,7 +3271,7 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
/* Scan table for free entries */
for (index = ALIGN(table->min_index, alignment), c = 0;
- index < MAX_IRQS_PER_TABLE;) {
+ index < max_irqs;) {
if (!iommu->irte_ops->is_allocated(table, index)) {
c += 1;
} else {
@@ -3527,6 +3541,14 @@ static void fill_msi_msg(struct msi_msg *msg, u32 index)
msg->data = index;
msg->address_lo = 0;
msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
+ /*
+ * The struct msi_msg.dest_mode_logical is used to set the DM bit
+ * in MSI Message Address Register. For device w/ 2K int-remap support,
+ * this is bit must be set to 1 regardless of the actual destination
+ * mode, which is signified by the IRTE[DM].
+ */
+ if (FEATURE_NUM_INT_REMAP_SUP_2K(amd_iommu_efr2))
+ msg->arch_addr_lo.dest_mode_logical = true;
msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
}
@@ -3589,6 +3611,8 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct amd_ir_data *data = NULL;
struct amd_iommu *iommu;
struct irq_cfg *cfg;
+ struct iommu_dev_data *dev_data;
+ unsigned long max_irqs;
int i, ret, devid, seg, sbdf;
int index;
@@ -3607,6 +3631,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (!iommu)
return -EINVAL;
+ dev_data = search_dev_data(iommu, devid);
+ max_irqs = dev_data ? dev_data->max_irqs : MAX_IRQS_PER_TABLE_512;
+
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
if (ret < 0)
return ret;
@@ -3614,7 +3641,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
struct irq_remap_table *table;
- table = alloc_irq_table(iommu, devid, NULL);
+ table = alloc_irq_table(iommu, devid, NULL, max_irqs);
if (table) {
if (!table->min_index) {
/*
@@ -3635,9 +3662,11 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
index = alloc_irq_index(iommu, devid, nr_irqs, align,
- msi_desc_to_pci_dev(info->desc));
+ msi_desc_to_pci_dev(info->desc),
+ max_irqs);
} else {
- index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL);
+ index = alloc_irq_index(iommu, devid, nr_irqs, false, NULL,
+ max_irqs);
}
if (index < 0) {
diff --git a/drivers/iommu/amd/pasid.c b/drivers/iommu/amd/pasid.c
index 11150cfd6718..77c8e9a91cbc 100644
--- a/drivers/iommu/amd/pasid.c
+++ b/drivers/iommu/amd/pasid.c
@@ -195,7 +195,7 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {
- protection_domain_free(pdom);
+ amd_iommu_domain_free(&pdom->domain);
return ERR_PTR(ret);
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 95ba3caeb401..e13501541fdd 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -36,7 +36,7 @@
#define DART_MAX_STREAMS 256
#define DART_MAX_TTBR 4
-#define MAX_DARTS_PER_DEVICE 2
+#define MAX_DARTS_PER_DEVICE 3
/* Common registers */
@@ -277,6 +277,9 @@ struct apple_dart_domain {
* @streams: streams for this device
*/
struct apple_dart_master_cfg {
+ /* Intersection of DART capabilitles */
+ u32 supports_bypass : 1;
+
struct apple_dart_stream_map stream_maps[MAX_DARTS_PER_DEVICE];
};
@@ -684,7 +687,7 @@ static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
struct apple_dart_stream_map *stream_map;
int i;
- if (!cfg->stream_maps[0].dart->supports_bypass)
+ if (!cfg->supports_bypass)
return -EINVAL;
for_each_stream_map(i, cfg, stream_map)
@@ -792,20 +795,23 @@ static int apple_dart_of_xlate(struct device *dev,
return -EINVAL;
sid = args->args[0];
- if (!cfg)
+ if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ if (!cfg)
+ return -ENOMEM;
+ /* Will be ANDed with DART capabilities */
+ cfg->supports_bypass = true;
+ }
dev_iommu_priv_set(dev, cfg);
cfg_dart = cfg->stream_maps[0].dart;
if (cfg_dart) {
- if (cfg_dart->supports_bypass != dart->supports_bypass)
- return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
}
+ cfg->supports_bypass &= dart->supports_bypass;
+
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (cfg->stream_maps[i].dart == dart) {
set_bit(sid, cfg->stream_maps[i].sidmap);
@@ -945,7 +951,7 @@ static int apple_dart_def_domain_type(struct device *dev)
if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
return IOMMU_DOMAIN_IDENTITY;
- if (!cfg->stream_maps[0].dart->supports_bypass)
+ if (!cfg->supports_bypass)
return IOMMU_DOMAIN_DMA;
return 0;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index de205a34ffc6..8f439c265a23 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -79,8 +79,11 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
- if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put_autosuspend(smmu->dev);
+ if (pm_runtime_enabled(smmu->dev)) {
+ pm_runtime_mark_last_busy(smmu->dev);
+ __pm_runtime_put_autosuspend(smmu->dev);
+
+ }
}
static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
@@ -1195,7 +1198,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
smmu_domain->cfg.cbndx, fwspec);
- arm_smmu_rpm_use_autosuspend(smmu);
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
@@ -1218,7 +1220,6 @@ static int arm_smmu_attach_dev_type(struct device *dev,
return ret;
arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
- arm_smmu_rpm_use_autosuspend(smmu);
arm_smmu_rpm_put(smmu);
return 0;
}
@@ -1486,7 +1487,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
out_cfg_free:
kfree(cfg);
out_free:
- iommu_fwspec_free(dev);
return ERR_PTR(ret);
}
@@ -2246,6 +2246,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (dev->pm_domain) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ arm_smmu_rpm_use_autosuspend(smmu);
}
return 0;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2a9fa0c8cc00..0832998eca38 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -24,6 +24,7 @@
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/msi.h>
#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
@@ -86,7 +87,6 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
/* Options for dma-iommu use */
struct iommu_dma_options options;
- struct mutex mutex;
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
@@ -102,6 +102,9 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
+static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
/* Number of entries per flush queue */
#define IOVA_DEFAULT_FQ_SIZE 256
#define IOVA_SINGLE_FQ_SIZE 32768
@@ -397,7 +400,7 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
if (!domain->iova_cookie)
return -ENOMEM;
- mutex_init(&domain->iova_cookie->mutex);
+ iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
return 0;
}
@@ -429,6 +432,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
cookie->msi_iova = base;
domain->iova_cookie = cookie;
+ iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
return 0;
}
EXPORT_SYMBOL(iommu_get_msi_cookie);
@@ -443,6 +447,11 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi, *tmp;
+#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
+ if (domain->sw_msi != iommu_dma_sw_msi)
+ return;
+#endif
+
if (!cookie)
return;
@@ -698,23 +707,20 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
domain->geometry.aperture_start >> order);
/* start_pfn is always nonzero for an already-initialised domain */
- mutex_lock(&cookie->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
- ret = -EFAULT;
- goto done_unlock;
+ return -EFAULT;
}
- ret = 0;
- goto done_unlock;
+ return 0;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
- goto done_unlock;
+ return ret;
iommu_dma_init_options(&cookie->options, dev);
@@ -723,11 +729,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
(!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
- ret = iova_reserve_iommu_regions(dev, domain);
-
-done_unlock:
- mutex_unlock(&cookie->mutex);
- return ret;
+ return iova_reserve_iommu_regions(dev, domain);
}
/**
@@ -1800,60 +1802,26 @@ out_free_page:
return NULL;
}
-/**
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
- * @desc: MSI descriptor, will store the MSI page
- * @msi_addr: MSI target address to be mapped
- *
- * Return: 0 on success or negative error code if the mapping failed.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct iommu_dma_msi_page *msi_page;
- static DEFINE_MUTEX(msi_prepare_lock); /* see below */
+ const struct iommu_dma_msi_page *msi_page;
- if (!domain || !domain->iova_cookie) {
- desc->iommu_cookie = NULL;
+ if (!domain->iova_cookie) {
+ msi_desc_set_iommu_msi_iova(desc, 0, 0);
return 0;
}
- /*
- * In fact the whole prepare operation should already be serialised by
- * irq_domain_mutex further up the callchain, but that's pretty subtle
- * on its own, so consider this locking as failsafe documentation...
- */
- mutex_lock(&msi_prepare_lock);
+ iommu_group_mutex_assert(dev);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
- mutex_unlock(&msi_prepare_lock);
-
- msi_desc_set_iommu_cookie(desc, msi_page);
-
if (!msi_page)
return -ENOMEM;
- return 0;
-}
-/**
- * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
- * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
- * @msg: MSI message containing target physical address
- */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
-{
- struct device *dev = msi_desc_to_dev(desc);
- const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- const struct iommu_dma_msi_page *msi_page;
-
- msi_page = msi_desc_get_iommu_cookie(desc);
-
- if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
- return;
-
- msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
- msg->address_lo += lower_32_bits(msi_page->iova);
+ msi_desc_set_iommu_msi_iova(
+ desc, msi_page->iova,
+ ilog2(cookie_msi_granule(domain->iova_cookie)));
+ return 0;
}
static int iommu_dma_init(void)
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 2a86aa5d54c6..761ab647f372 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -130,7 +130,7 @@ static int __init hyperv_prepare_irq_remapping(void)
x86_init.hyper.msi_ext_dest_id())
return -ENODEV;
- if (hv_root_partition) {
+ if (hv_root_partition()) {
name = "HYPERV-ROOT-IR";
ops = &hyperv_root_ir_domain_ops;
} else {
@@ -151,7 +151,7 @@ static int __init hyperv_prepare_irq_remapping(void)
return -ENOMEM;
}
- if (hv_root_partition)
+ if (hv_root_partition())
return 0; /* The rest is only relevant to guests */
/*
@@ -217,7 +217,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
if (status != HV_STATUS_SUCCESS)
- pr_debug("%s: unexpected unmap status %lld\n", __func__, status);
+ hv_status_debug(status, "failed to unmap\n");
data->entry.ioapic_rte.as_uint64 = 0;
data->entry.source = 0; /* Invalid source */
@@ -228,7 +228,7 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
vector, &entry);
if (status != HV_STATUS_SUCCESS) {
- pr_err("%s: map hypercall failed, status %lld\n", __func__, status);
+ hv_status_err(status, "map failed\n");
return;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index bf1f0c814348..ec2f385ae25b 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -737,7 +737,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
+ DMA_PTE_WRITE;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
@@ -1172,32 +1173,59 @@ static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
return true;
}
-static void iommu_enable_pci_caps(struct device_domain_info *info)
+static void iommu_enable_pci_ats(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (!dev_is_pci(info->dev))
+ if (!info->ats_supported)
return;
pdev = to_pci_dev(info->dev);
- if (info->ats_supported && pci_ats_page_aligned(pdev) &&
- !pci_enable_ats(pdev, VTD_PAGE_SHIFT))
+ if (!pci_ats_page_aligned(pdev))
+ return;
+
+ if (!pci_enable_ats(pdev, VTD_PAGE_SHIFT))
info->ats_enabled = 1;
}
-static void iommu_disable_pci_caps(struct device_domain_info *info)
+static void iommu_disable_pci_ats(struct device_domain_info *info)
+{
+ if (!info->ats_enabled)
+ return;
+
+ pci_disable_ats(to_pci_dev(info->dev));
+ info->ats_enabled = 0;
+}
+
+static void iommu_enable_pci_pri(struct device_domain_info *info)
{
struct pci_dev *pdev;
- if (!dev_is_pci(info->dev))
+ if (!info->ats_enabled || !info->pri_supported)
return;
pdev = to_pci_dev(info->dev);
+ /* PASID is required in PRG Response Message. */
+ if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
+ return;
- if (info->ats_enabled) {
- pci_disable_ats(pdev);
- info->ats_enabled = 0;
- }
+ if (pci_reset_pri(pdev))
+ return;
+
+ if (!pci_enable_pri(pdev, PRQ_DEPTH))
+ info->pri_enabled = 1;
+}
+
+static void iommu_disable_pci_pri(struct device_domain_info *info)
+{
+ if (!info->pri_enabled)
+ return;
+
+ if (WARN_ON(info->iopf_refcount))
+ iopf_queue_remove_device(info->iommu->iopf_queue, info->dev);
+
+ pci_disable_pri(to_pci_dev(info->dev));
+ info->pri_enabled = 0;
}
static void intel_flush_iotlb_all(struct iommu_domain *domain)
@@ -1556,12 +1584,19 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
+ int ret;
if (!dev_is_pci(dev))
return domain_context_mapping_one(domain, iommu, bus, devfn);
- return pci_for_each_dma_alias(to_pci_dev(dev),
- domain_context_mapping_cb, domain);
+ ret = pci_for_each_dma_alias(to_pci_dev(dev),
+ domain_context_mapping_cb, domain);
+ if (ret)
+ return ret;
+
+ iommu_enable_pci_ats(info);
+
+ return 0;
}
/* Return largest possible superpage level for a given mapping */
@@ -1748,7 +1783,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, did, true);
+ intel_context_flush_no_pasid(info, context, did);
}
int __domain_setup_first_level(struct intel_iommu *iommu,
@@ -1843,8 +1878,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
goto out_block_translation;
- iommu_enable_pci_caps(info);
-
ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
if (ret)
goto out_block_translation;
@@ -2871,16 +2904,19 @@ void intel_iommu_shutdown(void)
if (no_iommu || dmar_disabled)
return;
- down_write(&dmar_global_lock);
+ /*
+ * All other CPUs were brought down, hotplug interrupts were disabled,
+ * no lock and RCU checking needed anymore
+ */
+ list_for_each_entry(drhd, &dmar_drhd_units, list) {
+ iommu = drhd->iommu;
- /* Disable PMRs explicitly here. */
- for_each_iommu(iommu, drhd)
+ /* Disable PMRs explicitly here. */
iommu_disable_protect_mem_regions(iommu);
- /* Make sure the IOMMUs are switched off */
- intel_disable_iommus();
-
- up_write(&dmar_global_lock);
+ /* Make sure the IOMMUs are switched off */
+ iommu_disable_translation(iommu);
+ }
}
static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
@@ -3013,6 +3049,7 @@ static int __init probe_acpi_namespace_devices(void)
if (dev->bus != &acpi_bus_type)
continue;
+ up_read(&dmar_global_lock);
adev = to_acpi_device(dev);
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn,
@@ -3022,6 +3059,7 @@ static int __init probe_acpi_namespace_devices(void)
break;
}
mutex_unlock(&adev->physical_node_lock);
+ down_read(&dmar_global_lock);
if (ret)
return ret;
@@ -3205,6 +3243,7 @@ static void domain_context_clear(struct device_domain_info *info)
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
+ iommu_disable_pci_ats(info);
}
/*
@@ -3221,7 +3260,6 @@ void device_block_translation(struct device *dev)
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
- iommu_disable_pci_caps(info);
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
@@ -3756,6 +3794,10 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
!pci_enable_pasid(pdev, info->pasid_supported & ~1))
info->pasid_enabled = 1;
+ if (sm_supported(iommu))
+ iommu_enable_pci_ats(info);
+ iommu_enable_pci_pri(info);
+
return &iommu->iommu;
free_table:
intel_pasid_free_table(dev);
@@ -3772,6 +3814,9 @@ static void intel_iommu_release_device(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
+ iommu_disable_pci_pri(info);
+ iommu_disable_pci_ats(info);
+
if (info->pasid_enabled) {
pci_disable_pasid(to_pci_dev(dev));
info->pasid_enabled = 0;
@@ -3858,151 +3903,41 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int intel_iommu_enable_sva(struct device *dev)
+int intel_iommu_enable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
-
- if (!info || dmar_disabled)
- return -EINVAL;
-
- iommu = info->iommu;
- if (!iommu)
- return -EINVAL;
-
- if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
- return -ENODEV;
-
- if (!info->pasid_enabled || !info->ats_enabled)
- return -EINVAL;
-
- /*
- * Devices having device-specific I/O fault handling should not
- * support PCI/PRI. The IOMMU side has no means to check the
- * capability of device-specific IOPF. Therefore, IOMMU can only
- * default that if the device driver enables SVA on a non-PRI
- * device, it will handle IOPF in its own way.
- */
- if (!info->pri_supported)
- return 0;
-
- /* Devices supporting PRI should have it enabled. */
- if (!info->pri_enabled)
- return -EINVAL;
-
- return 0;
-}
-
-static int context_flip_pri(struct device_domain_info *info, bool enable)
-{
struct intel_iommu *iommu = info->iommu;
- u8 bus = info->bus, devfn = info->devfn;
- struct context_entry *context;
- u16 did;
-
- spin_lock(&iommu->lock);
- if (context_copied(iommu, bus, devfn)) {
- spin_unlock(&iommu->lock);
- return -EINVAL;
- }
-
- context = iommu_context_addr(iommu, bus, devfn, false);
- if (!context || !context_present(context)) {
- spin_unlock(&iommu->lock);
- return -ENODEV;
- }
- did = context_domain_id(context);
-
- if (enable)
- context_set_sm_pre(context);
- else
- context_clear_sm_pre(context);
-
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(context, sizeof(*context));
- intel_context_flush_present(info, context, did, true);
- spin_unlock(&iommu->lock);
-
- return 0;
-}
-
-static int intel_iommu_enable_iopf(struct device *dev)
-{
- struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu;
int ret;
- if (!pdev || !info || !info->ats_enabled || !info->pri_supported)
+ if (!info->pri_enabled)
return -ENODEV;
- if (info->pri_enabled)
- return -EBUSY;
-
- iommu = info->iommu;
- if (!iommu)
- return -EINVAL;
-
- /* PASID is required in PRG Response Message. */
- if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
- return -EINVAL;
-
- ret = pci_reset_pri(pdev);
- if (ret)
- return ret;
+ if (info->iopf_refcount) {
+ info->iopf_refcount++;
+ return 0;
+ }
ret = iopf_queue_add_device(iommu->iopf_queue, dev);
if (ret)
return ret;
- ret = context_flip_pri(info, true);
- if (ret)
- goto err_remove_device;
-
- ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto err_clear_pri;
-
- info->pri_enabled = 1;
+ info->iopf_refcount = 1;
return 0;
-err_clear_pri:
- context_flip_pri(info, false);
-err_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
-static int intel_iommu_disable_iopf(struct device *dev)
+void intel_iommu_disable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
- if (!info->pri_enabled)
- return -EINVAL;
+ if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
+ return;
- /* Disable new PRI reception: */
- context_flip_pri(info, false);
+ if (--info->iopf_refcount)
+ return;
- /*
- * Remove device from fault queue and acknowledge all outstanding
- * PRQs to the device:
- */
iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- /*
- * PCIe spec states that by clearing PRI enable bit, the Page
- * Request Interface will not issue new page requests, but has
- * outstanding page requests that have been transmitted or are
- * queued for transmission. This is supposed to be called after
- * the device driver has stopped DMA, all PASIDs have been
- * unbound and the outstanding PRQs have been drained.
- */
- pci_disable_pri(to_pci_dev(dev));
- info->pri_enabled = 0;
-
- return 0;
}
static int
@@ -4013,7 +3948,7 @@ intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
return intel_iommu_enable_iopf(dev);
case IOMMU_DEV_FEAT_SVA:
- return intel_iommu_enable_sva(dev);
+ return 0;
default:
return -ENODEV;
@@ -4025,7 +3960,8 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
- return intel_iommu_disable_iopf(dev);
+ intel_iommu_disable_iopf(dev);
+ return 0;
case IOMMU_DEV_FEAT_SVA:
return 0;
@@ -4410,13 +4346,10 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
if (dev_is_real_dma_subdevice(dev))
return 0;
- if (sm_supported(iommu)) {
+ if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
- if (!ret)
- iommu_enable_pci_caps(info);
- } else {
+ else
ret = device_setup_pass_through(dev);
- }
return ret;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 6ea7bbe26b19..c4916886da5a 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -774,6 +774,7 @@ struct device_domain_info {
u8 ats_enabled:1;
u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
u8 ats_qdep;
+ unsigned int iopf_refcount;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
@@ -953,25 +954,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -1304,9 +1286,8 @@ void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end);
-void intel_context_flush_present(struct device_domain_info *info,
- struct context_entry *context,
- u16 did, bool affect_domains);
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+ struct context_entry *context, u16 did);
int intel_iommu_enable_prq(struct intel_iommu *iommu);
int intel_iommu_finish_prq(struct intel_iommu *iommu);
@@ -1314,6 +1295,9 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *msg);
void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
+int intel_iommu_enable_iopf(struct device *dev);
+void intel_iommu_disable_iopf(struct device *dev);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index ad795c772f21..ea3ca5203919 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -25,11 +25,6 @@
#include "../irq_remapping.h"
#include "../iommu-pages.h"
-enum irq_mode {
- IRQ_REMAPPING,
- IRQ_POSTING,
-};
-
struct ioapic_scope {
struct intel_iommu *iommu;
unsigned int id;
@@ -49,8 +44,8 @@ struct irq_2_iommu {
u16 irte_index;
u16 sub_handle;
u8 irte_mask;
- enum irq_mode mode;
bool posted_msi;
+ bool posted_vcpu;
};
struct intel_ir_data {
@@ -138,7 +133,6 @@ static int alloc_irte(struct intel_iommu *iommu,
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
- irq_iommu->mode = IRQ_REMAPPING;
}
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -193,8 +187,6 @@ static int modify_irte(struct irq_2_iommu *irq_iommu,
rc = qi_flush_iec(iommu, index, 0);
- /* Update iommu mode according to the IRTE mode */
- irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return rc;
@@ -1169,7 +1161,26 @@ static void intel_ir_reconfigure_irte_posted(struct irq_data *irqd)
static inline void intel_ir_reconfigure_irte_posted(struct irq_data *irqd) {}
#endif
-static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+static void __intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
+{
+ struct intel_ir_data *ir_data = irqd->chip_data;
+
+ /*
+ * Don't modify IRTEs for IRQs that are being posted to vCPUs if the
+ * host CPU affinity changes.
+ */
+ if (ir_data->irq_2_iommu.posted_vcpu && !force_host)
+ return;
+
+ ir_data->irq_2_iommu.posted_vcpu = false;
+
+ if (ir_data->irq_2_iommu.posted_msi)
+ intel_ir_reconfigure_irte_posted(irqd);
+ else
+ modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
+}
+
+static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force_host)
{
struct intel_ir_data *ir_data = irqd->chip_data;
struct irte *irte = &ir_data->irte_entry;
@@ -1182,10 +1193,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->vector = cfg->vector;
irte->dest_id = IRTE_DEST(cfg->dest_apicid);
- if (ir_data->irq_2_iommu.posted_msi)
- intel_ir_reconfigure_irte_posted(irqd);
- else if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
- modify_irte(&ir_data->irq_2_iommu, irte);
+ __intel_ir_reconfigure_irte(irqd, force_host);
}
/*
@@ -1240,7 +1248,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
/* stop posting interrupts, back to the default mode */
if (!vcpu_pi_info) {
- modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
+ __intel_ir_reconfigure_irte(data, true);
} else {
struct irte irte_pi;
@@ -1263,6 +1271,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
~(-1UL << PDA_HIGH_BIT);
+ ir_data->irq_2_iommu.posted_vcpu = true;
modify_irte(&ir_data->irq_2_iommu, &irte_pi);
}
@@ -1489,6 +1498,9 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain,
struct intel_ir_data *data = irq_data->chip_data;
struct irte entry;
+ WARN_ON_ONCE(data->irq_2_iommu.posted_vcpu);
+ data->irq_2_iommu.posted_vcpu = false;
+
memset(&entry, 0, sizeof(entry));
modify_irte(&data->irq_2_iommu, &entry);
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index fb59a7d35958..7ee18bb48bd4 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -932,7 +932,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- intel_context_flush_present(info, context, did, false);
+ intel_context_flush_no_pasid(info, context, did);
}
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -992,6 +992,8 @@ static int context_entry_set_pasid_table(struct context_entry *context,
context_set_sm_dte(context);
if (info->pasid_supported)
context_set_pasid(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
context_set_fault_enable(context);
context_set_present(context);
@@ -1117,17 +1119,15 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
/*
* Cache invalidations after change in a context table entry that was present
- * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
- * IOMMU is in scalable mode and all PASID table entries of the device were
- * non-present, set flush_domains to false. Otherwise, true.
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
+ * This helper can only be used when IOMMU is working in the legacy mode or
+ * IOMMU is in scalable mode but all PASID table entries of the device are
+ * non-present.
*/
-void intel_context_flush_present(struct device_domain_info *info,
- struct context_entry *context,
- u16 did, bool flush_domains)
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+ struct context_entry *context, u16 did)
{
struct intel_iommu *iommu = info->iommu;
- struct pasid_entry *pte;
- int i;
/*
* Device-selective context-cache invalidation. The Domain-ID field
@@ -1150,30 +1150,5 @@ void intel_context_flush_present(struct device_domain_info *info,
return;
}
- /*
- * For scalable mode:
- * - Domain-selective PASID-cache invalidation to affected domains
- * - Domain-selective IOTLB invalidation to affected domains
- * - Global Device-TLB invalidation to affected functions
- */
- if (flush_domains) {
- /*
- * If the IOMMU is running in scalable mode and there might
- * be potential PASID translations, the caller should hold
- * the lock to ensure that context changes and cache flushes
- * are atomic.
- */
- assert_spin_locked(&iommu->lock);
- for (i = 0; i < info->pasid_table->max_pasid; i++) {
- pte = intel_pasid_get_entry(info->dev, i);
- if (!pte || !pasid_pte_is_present(pte))
- continue;
-
- did = pasid_get_domain_id(pte);
- qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
- iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- }
- }
-
__context_flush_dev_iotlb(info);
}
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 064194399b38..5b6a64d96850 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -67,7 +67,7 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
u16 sid, did;
info = dev_iommu_priv_get(dev);
- if (!info->pri_enabled)
+ if (!info->iopf_refcount)
return;
iommu = info->iommu;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index f5569347591f..ba93123cb4eb 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -110,6 +110,41 @@ static const struct mmu_notifier_ops intel_mmuops = {
.free_notifier = intel_mm_free_notifier,
};
+static int intel_iommu_sva_supported(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu;
+
+ if (!info || dmar_disabled)
+ return -EINVAL;
+
+ iommu = info->iommu;
+ if (!iommu)
+ return -EINVAL;
+
+ if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE))
+ return -ENODEV;
+
+ if (!info->pasid_enabled || !info->ats_enabled)
+ return -EINVAL;
+
+ /*
+ * Devices having device-specific I/O fault handling should not
+ * support PCI/PRI. The IOMMU side has no means to check the
+ * capability of device-specific IOPF. Therefore, IOMMU can only
+ * default that if the device driver enables SVA on a non-PRI
+ * device, it will handle IOPF in its own way.
+ */
+ if (!info->pri_supported)
+ return 0;
+
+ /* Devices supporting PRI should have it enabled. */
+ if (!info->pri_enabled)
+ return -EINVAL;
+
+ return 0;
+}
+
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
@@ -121,6 +156,10 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
unsigned long sflags;
int ret = 0;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ret;
+
dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
@@ -161,6 +200,10 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
struct dmar_domain *domain;
int ret;
+ ret = intel_iommu_sva_supported(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index c004640640ee..06aca9ab52f9 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -135,7 +135,6 @@ static int dart_init_pte(struct dart_io_pgtable *data,
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_START, 0);
pte |= FIELD_PREP(APPLE_DART_PTE_SUBPAGE_END, 0xfff);
- pte |= APPLE_DART1_PTE_PROT_SP_DIS;
pte |= APPLE_DART_PTE_VALID;
for (i = 0; i < num_entries; i++)
@@ -211,6 +210,7 @@ static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
dart_iopte pte = 0;
if (data->iop.fmt == APPLE_DART) {
+ pte |= APPLE_DART1_PTE_PROT_SP_DIS;
if (!(prot & IOMMU_WRITE))
pte |= APPLE_DART1_PTE_PROT_NO_WRITE;
if (!(prot & IOMMU_READ))
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index de5b54eaa8bf..05fa6e682e88 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -17,6 +17,8 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
return dev->iommu->iommu_dev->ops;
}
+void dev_iommu_free(struct device *dev);
+
const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec)
@@ -24,8 +26,7 @@ static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwsp
return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL);
}
-int iommu_group_replace_domain(struct iommu_group *group,
- struct iommu_domain *new_domain);
+void iommu_fwspec_free(struct device *dev);
int iommu_device_register_bus(struct iommu_device *iommu,
const struct iommu_ops *ops,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 60aed01e54f2..9e1b444246f8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
static u32 iommu_cmd_line __read_mostly;
+/* Tags used with xa_tag_pointer() in group->pasid_array */
+enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 };
+
struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
@@ -352,7 +355,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
return param;
}
-static void dev_iommu_free(struct device *dev)
+void dev_iommu_free(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
@@ -404,14 +407,40 @@ EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
* Init the dev->iommu and dev->iommu_group in the struct device and get the
* driver probed
*/
-static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
+static int iommu_init_device(struct device *dev)
{
+ const struct iommu_ops *ops;
struct iommu_device *iommu_dev;
struct iommu_group *group;
int ret;
if (!dev_iommu_get(dev))
return -ENOMEM;
+ /*
+ * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing
+ * is buried in the bus dma_configure path. Properly unpicking that is
+ * still a big job, so for now just invoke the whole thing. The device
+ * already having a driver bound means dma_configure has already run and
+ * either found no IOMMU to wait for, or we're in its replay call right
+ * now, so either way there's no point calling it again.
+ */
+ if (!dev->driver && dev->bus->dma_configure) {
+ mutex_unlock(&iommu_probe_device_lock);
+ dev->bus->dma_configure(dev);
+ mutex_lock(&iommu_probe_device_lock);
+ }
+ /*
+ * At this point, relevant devices either now have a fwspec which will
+ * match ops registered with a non-NULL fwnode, or we can reasonably
+ * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
+ * be present, and that any of their registered instances has suitable
+ * ops for probing, and thus cheekily co-opt the same mechanism.
+ */
+ ops = iommu_fwspec_ops(dev->iommu->fwspec);
+ if (!ops) {
+ ret = -ENODEV;
+ goto err_free;
+ }
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@@ -514,23 +543,11 @@ DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops;
struct iommu_group *group;
struct group_device *gdev;
int ret;
/*
- * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
- * instances with non-NULL fwnodes, and client devices should have been
- * identified with a fwspec by this point. Otherwise, we can currently
- * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
- * be present, and that any of their registered instances has suitable
- * ops for probing, and thus cheekily co-opt the same mechanism.
- */
- ops = iommu_fwspec_ops(dev_iommu_fwspec_get(dev));
- if (!ops)
- return -ENODEV;
- /*
* Serialise to avoid races between IOMMU drivers registering in
* parallel and/or the "replay" calls from ACPI/OF code via client
* driver probe. Once the latter have been cleaned up we should
@@ -543,9 +560,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
if (dev->iommu_group)
return 0;
- ret = iommu_init_device(dev, ops);
+ ret = iommu_init_device(dev);
if (ret)
return ret;
+ /*
+ * And if we do now see any replay calls, they would indicate someone
+ * misusing the dma_configure path outside bus code.
+ */
+ if (dev->driver)
+ dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n");
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@@ -2147,6 +2170,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
return dev->iommu_group->default_domain;
}
+static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
+ struct iommu_attach_handle *handle)
+{
+ if (handle) {
+ handle->domain = domain;
+ return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE);
+ }
+
+ return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
+}
+
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group)
{
@@ -2187,32 +2221,6 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
-/**
- * iommu_group_replace_domain - replace the domain that a group is attached to
- * @group: IOMMU group that will be attached to the new domain
- * @new_domain: new IOMMU domain to replace with
- *
- * This API allows the group to switch domains without being forced to go to
- * the blocking domain in-between.
- *
- * If the currently attached domain is a core domain (e.g. a default_domain),
- * it will act just like the iommu_attach_group().
- */
-int iommu_group_replace_domain(struct iommu_group *group,
- struct iommu_domain *new_domain)
-{
- int ret;
-
- if (!new_domain)
- return -EINVAL;
-
- mutex_lock(&group->mutex);
- ret = __iommu_group_set_domain(group, new_domain);
- mutex_unlock(&group->mutex);
- return ret;
-}
-EXPORT_SYMBOL_NS_GPL(iommu_group_replace_domain, "IOMMUFD_INTERNAL");
-
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
@@ -2849,7 +2857,6 @@ void iommu_fwspec_free(struct device *dev)
dev_iommu_fwspec_set(dev, NULL);
}
}
-EXPORT_SYMBOL_GPL(iommu_fwspec_free);
int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{
@@ -3097,6 +3104,11 @@ int iommu_device_use_default_domain(struct device *dev)
return 0;
mutex_lock(&group->mutex);
+ /* We may race against bus_iommu_probe() finalising groups here */
+ if (!group->default_domain) {
+ ret = -EPROBE_DEFER;
+ goto unlock_out;
+ }
if (group->owner_cnt) {
if (group->domain != group->default_domain || group->owner ||
!xa_empty(&group->pasid_array)) {
@@ -3374,6 +3386,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
const struct iommu_ops *ops;
+ void *entry;
int ret;
if (!group)
@@ -3397,16 +3410,31 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
}
}
- if (handle)
- handle->domain = domain;
+ entry = iommu_make_pasid_array_entry(domain, handle);
- ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL);
+ /*
+ * Entry present is a failure case. Use xa_insert() instead of
+ * xa_reserve().
+ */
+ ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
goto out_unlock;
ret = __iommu_set_group_pasid(domain, group, pasid);
- if (ret)
- xa_erase(&group->pasid_array, pasid);
+ if (ret) {
+ xa_release(&group->pasid_array, pasid);
+ goto out_unlock;
+ }
+
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ pasid, entry, GFP_KERNEL)));
+
out_unlock:
mutex_unlock(&group->mutex);
return ret;
@@ -3480,13 +3508,17 @@ struct iommu_attach_handle *
iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
{
struct iommu_attach_handle *handle;
+ void *entry;
xa_lock(&group->pasid_array);
- handle = xa_load(&group->pasid_array, pasid);
- if (!handle)
+ entry = xa_load(&group->pasid_array, pasid);
+ if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) {
handle = ERR_PTR(-ENOENT);
- else if (type && handle->domain->type != type)
- handle = ERR_PTR(-EBUSY);
+ } else {
+ handle = xa_untag_pointer(entry);
+ if (type && handle->domain->type != type)
+ handle = ERR_PTR(-EBUSY);
+ }
xa_unlock(&group->pasid_array);
return handle;
@@ -3509,25 +3541,35 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
struct iommu_group *group,
struct iommu_attach_handle *handle)
{
+ void *entry;
int ret;
- if (handle)
- handle->domain = domain;
+ if (!handle)
+ return -EINVAL;
mutex_lock(&group->mutex);
- ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+ entry = iommu_make_pasid_array_entry(domain, handle);
+ ret = xa_insert(&group->pasid_array,
+ IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
if (ret)
- goto err_unlock;
+ goto out_unlock;
ret = __iommu_attach_group(domain, group);
- if (ret)
- goto err_erase;
- mutex_unlock(&group->mutex);
+ if (ret) {
+ xa_release(&group->pasid_array, IOMMU_NO_PASID);
+ goto out_unlock;
+ }
- return 0;
-err_erase:
- xa_erase(&group->pasid_array, IOMMU_NO_PASID);
-err_unlock:
+ /*
+ * The xa_insert() above reserved the memory, and the group->mutex is
+ * held, this cannot fail. The new domain cannot be visible until the
+ * operation succeeds as we cannot tolerate PRIs becoming concurrently
+ * queued and then failing attach.
+ */
+ WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+ IOMMU_NO_PASID, entry, GFP_KERNEL)));
+
+out_unlock:
mutex_unlock(&group->mutex);
return ret;
}
@@ -3557,33 +3599,34 @@ EXPORT_SYMBOL_NS_GPL(iommu_detach_group_handle, "IOMMUFD_INTERNAL");
* @new_domain: new IOMMU domain to replace with
* @handle: attach handle
*
- * This is a variant of iommu_group_replace_domain(). It allows the caller to
- * provide an attach handle for the new domain and use it when the domain is
- * attached.
+ * This API allows the group to switch domains without being forced to go to
+ * the blocking domain in-between. It allows the caller to provide an attach
+ * handle for the new domain and use it when the domain is attached.
+ *
+ * If the currently attached domain is a core domain (e.g. a default_domain),
+ * it will act just like the iommu_attach_group_handle().
*/
int iommu_replace_group_handle(struct iommu_group *group,
struct iommu_domain *new_domain,
struct iommu_attach_handle *handle)
{
- void *curr;
+ void *curr, *entry;
int ret;
- if (!new_domain)
+ if (!new_domain || !handle)
return -EINVAL;
mutex_lock(&group->mutex);
- if (handle) {
- ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
- if (ret)
- goto err_unlock;
- handle->domain = new_domain;
- }
+ entry = iommu_make_pasid_array_entry(new_domain, handle);
+ ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
+ if (ret)
+ goto err_unlock;
ret = __iommu_group_set_domain(group, new_domain);
if (ret)
goto err_release;
- curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+ curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
WARN_ON(xa_is_err(curr));
mutex_unlock(&group->mutex);
@@ -3596,3 +3639,32 @@ err_unlock:
return ret;
}
EXPORT_SYMBOL_NS_GPL(iommu_replace_group_handle, "IOMMUFD_INTERNAL");
+
+#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
+/**
+ * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
+ * @desc: MSI descriptor, will store the MSI page
+ * @msi_addr: MSI target address to be mapped
+ *
+ * The implementation of sw_msi() should take msi_addr and map it to
+ * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
+ * mapping information.
+ *
+ * Return: 0 on success or negative error code if the mapping failed.
+ */
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct iommu_group *group = dev->iommu_group;
+ int ret = 0;
+
+ if (!group)
+ return 0;
+
+ mutex_lock(&group->mutex);
+ if (group->domain && group->domain->sw_msi)
+ ret = group->domain->sw_msi(group->domain, desc, msi_addr);
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+#endif /* CONFIG_IRQ_MSI_IOMMU */
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index dfd0898fb6c1..4e107f69f951 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -5,6 +5,7 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <uapi/linux/iommufd.h>
+#include <linux/msi.h>
#include "../iommu-priv.h"
#include "io_pagetable.h"
@@ -293,36 +294,152 @@ u32 iommufd_device_to_id(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_to_id, "IOMMUFD");
+/*
+ * Get a iommufd_sw_msi_map for the msi physical address requested by the irq
+ * layer. The mapping to IOVA is global to the iommufd file descriptor, every
+ * domain that is attached to a device using the same MSI parameters will use
+ * the same IOVA.
+ */
+static __maybe_unused struct iommufd_sw_msi_map *
+iommufd_sw_msi_get_map(struct iommufd_ctx *ictx, phys_addr_t msi_addr,
+ phys_addr_t sw_msi_start)
+{
+ struct iommufd_sw_msi_map *cur;
+ unsigned int max_pgoff = 0;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
+ if (cur->sw_msi_start != sw_msi_start)
+ continue;
+ max_pgoff = max(max_pgoff, cur->pgoff + 1);
+ if (cur->msi_addr == msi_addr)
+ return cur;
+ }
+
+ if (ictx->sw_msi_id >=
+ BITS_PER_BYTE * sizeof_field(struct iommufd_sw_msi_maps, bitmap))
+ return ERR_PTR(-EOVERFLOW);
+
+ cur = kzalloc(sizeof(*cur), GFP_KERNEL);
+ if (!cur)
+ return ERR_PTR(-ENOMEM);
+
+ cur->sw_msi_start = sw_msi_start;
+ cur->msi_addr = msi_addr;
+ cur->pgoff = max_pgoff;
+ cur->id = ictx->sw_msi_id++;
+ list_add_tail(&cur->sw_msi_item, &ictx->sw_msi_list);
+ return cur;
+}
+
+static int iommufd_sw_msi_install(struct iommufd_ctx *ictx,
+ struct iommufd_hwpt_paging *hwpt_paging,
+ struct iommufd_sw_msi_map *msi_map)
+{
+ unsigned long iova;
+
+ lockdep_assert_held(&ictx->sw_msi_lock);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ if (!test_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap)) {
+ int rc;
+
+ rc = iommu_map(hwpt_paging->common.domain, iova,
+ msi_map->msi_addr, PAGE_SIZE,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_MMIO,
+ GFP_KERNEL_ACCOUNT);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, hwpt_paging->present_sw_msi.bitmap);
+ }
+ return 0;
+}
+
+/*
+ * Called by the irq code if the platform translates the MSI address through the
+ * IOMMU. msi_addr is the physical address of the MSI page. iommufd will
+ * allocate a fd global iova for the physical page that is the same on all
+ * domains and devices.
+ */
+#ifdef CONFIG_IRQ_MSI_IOMMU
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct iommufd_hwpt_paging *hwpt_paging;
+ struct iommu_attach_handle *raw_handle;
+ struct iommufd_attach_handle *handle;
+ struct iommufd_sw_msi_map *msi_map;
+ struct iommufd_ctx *ictx;
+ unsigned long iova;
+ int rc;
+
+ /*
+ * It is safe to call iommu_attach_handle_get() here because the iommu
+ * core code invokes this under the group mutex which also prevents any
+ * change of the attach handle for the duration of this function.
+ */
+ iommu_group_mutex_assert(dev);
+
+ raw_handle =
+ iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(raw_handle))
+ return 0;
+ hwpt_paging = find_hwpt_paging(domain->iommufd_hwpt);
+
+ handle = to_iommufd_handle(raw_handle);
+ /* No IOMMU_RESV_SW_MSI means no change to the msi_msg */
+ if (handle->idev->igroup->sw_msi_start == PHYS_ADDR_MAX)
+ return 0;
+
+ ictx = handle->idev->ictx;
+ guard(mutex)(&ictx->sw_msi_lock);
+ /*
+ * The input msi_addr is the exact byte offset of the MSI doorbell, we
+ * assume the caller has checked that it is contained with a MMIO region
+ * that is secure to map at PAGE_SIZE.
+ */
+ msi_map = iommufd_sw_msi_get_map(handle->idev->ictx,
+ msi_addr & PAGE_MASK,
+ handle->idev->igroup->sw_msi_start);
+ if (IS_ERR(msi_map))
+ return PTR_ERR(msi_map);
+
+ rc = iommufd_sw_msi_install(ictx, hwpt_paging, msi_map);
+ if (rc)
+ return rc;
+ __set_bit(msi_map->id, handle->idev->igroup->required_sw_msi.bitmap);
+
+ iova = msi_map->sw_msi_start + msi_map->pgoff * PAGE_SIZE;
+ msi_desc_set_iommu_msi_iova(desc, iova, PAGE_SHIFT);
+ return 0;
+}
+#endif
+
static int iommufd_group_setup_msi(struct iommufd_group *igroup,
struct iommufd_hwpt_paging *hwpt_paging)
{
- phys_addr_t sw_msi_start = igroup->sw_msi_start;
- int rc;
+ struct iommufd_ctx *ictx = igroup->ictx;
+ struct iommufd_sw_msi_map *cur;
+
+ if (igroup->sw_msi_start == PHYS_ADDR_MAX)
+ return 0;
/*
- * If the IOMMU driver gives a IOMMU_RESV_SW_MSI then it is asking us to
- * call iommu_get_msi_cookie() on its behalf. This is necessary to setup
- * the MSI window so iommu_dma_prepare_msi() can install pages into our
- * domain after request_irq(). If it is not done interrupts will not
- * work on this domain.
- *
- * FIXME: This is conceptually broken for iommufd since we want to allow
- * userspace to change the domains, eg switch from an identity IOAS to a
- * DMA IOAS. There is currently no way to create a MSI window that
- * matches what the IRQ layer actually expects in a newly created
- * domain.
+ * Install all the MSI pages the device has been using into the domain
*/
- if (sw_msi_start != PHYS_ADDR_MAX && !hwpt_paging->msi_cookie) {
- rc = iommu_get_msi_cookie(hwpt_paging->common.domain,
- sw_msi_start);
+ guard(mutex)(&ictx->sw_msi_lock);
+ list_for_each_entry(cur, &ictx->sw_msi_list, sw_msi_item) {
+ int rc;
+
+ if (cur->sw_msi_start != igroup->sw_msi_start ||
+ !test_bit(cur->id, igroup->required_sw_msi.bitmap))
+ continue;
+
+ rc = iommufd_sw_msi_install(ictx, hwpt_paging, cur);
if (rc)
return rc;
-
- /*
- * iommu_get_msi_cookie() can only be called once per domain,
- * it returns -EBUSY on later calls.
- */
- hwpt_paging->msi_cookie = true;
}
return 0;
}
@@ -352,6 +469,111 @@ iommufd_device_attach_reserved_iova(struct iommufd_device *idev,
return 0;
}
+/* The device attach/detach/replace helpers for attach_handle */
+
+static int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+ int rc;
+
+ lockdep_assert_held(&idev->igroup->lock);
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ if (hwpt->fault) {
+ rc = iommufd_fault_iopf_enable(idev);
+ if (rc)
+ goto out_free_handle;
+ }
+
+ handle->idev = idev;
+ rc = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
+ &handle->handle);
+ if (rc)
+ goto out_disable_iopf;
+
+ return 0;
+
+out_disable_iopf:
+ if (hwpt->fault)
+ iommufd_fault_iopf_disable(idev);
+out_free_handle:
+ kfree(handle);
+ return rc;
+}
+
+static struct iommufd_attach_handle *
+iommufd_device_get_attach_handle(struct iommufd_device *idev)
+{
+ struct iommu_attach_handle *handle;
+
+ lockdep_assert_held(&idev->igroup->lock);
+
+ handle =
+ iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
+ if (IS_ERR(handle))
+ return NULL;
+ return to_iommufd_handle(handle);
+}
+
+static void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iommufd_attach_handle *handle;
+
+ handle = iommufd_device_get_attach_handle(idev);
+ iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
+ if (hwpt->fault) {
+ iommufd_auto_response_faults(hwpt, handle);
+ iommufd_fault_iopf_disable(idev);
+ }
+ kfree(handle);
+}
+
+static int iommufd_hwpt_replace_device(struct iommufd_device *idev,
+ struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_hw_pagetable *old)
+{
+ struct iommufd_attach_handle *handle, *old_handle =
+ iommufd_device_get_attach_handle(idev);
+ int rc;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ if (hwpt->fault && !old->fault) {
+ rc = iommufd_fault_iopf_enable(idev);
+ if (rc)
+ goto out_free_handle;
+ }
+
+ handle->idev = idev;
+ rc = iommu_replace_group_handle(idev->igroup->group, hwpt->domain,
+ &handle->handle);
+ if (rc)
+ goto out_disable_iopf;
+
+ if (old->fault) {
+ iommufd_auto_response_faults(hwpt, old_handle);
+ if (!hwpt->fault)
+ iommufd_fault_iopf_disable(idev);
+ }
+ kfree(old_handle);
+
+ return 0;
+
+out_disable_iopf:
+ if (hwpt->fault && !old->fault)
+ iommufd_fault_iopf_disable(idev);
+out_free_handle:
+ kfree(handle);
+ return rc;
+}
+
int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev)
{
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index d9a937450e55..c48d72c9668c 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -17,7 +17,7 @@
#include "../iommu-priv.h"
#include "iommufd_private.h"
-static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+int iommufd_fault_iopf_enable(struct iommufd_device *idev)
{
struct device *dev = idev->dev;
int ret;
@@ -50,7 +50,7 @@ static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
return ret;
}
-static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
+void iommufd_fault_iopf_disable(struct iommufd_device *idev)
{
mutex_lock(&idev->iopf_lock);
if (!WARN_ON(idev->iopf_enabled == 0)) {
@@ -60,46 +60,8 @@ static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
mutex_unlock(&idev->iopf_lock);
}
-static int __fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- struct iommufd_attach_handle *handle;
- int ret;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_attach_group_handle(hwpt->domain, idev->igroup->group,
- &handle->handle);
- if (ret)
- kfree(handle);
-
- return ret;
-}
-
-int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- int ret;
-
- if (!hwpt->fault)
- return -EINVAL;
-
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
-
- ret = __fault_domain_attach_dev(hwpt, idev);
- if (ret)
- iommufd_fault_iopf_disable(idev);
-
- return ret;
-}
-
-static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_attach_handle *handle)
+void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_attach_handle *handle)
{
struct iommufd_fault *fault = hwpt->fault;
struct iopf_group *group, *next;
@@ -135,88 +97,6 @@ static void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
mutex_unlock(&fault->mutex);
}
-static struct iommufd_attach_handle *
-iommufd_device_get_attach_handle(struct iommufd_device *idev)
-{
- struct iommu_attach_handle *handle;
-
- handle = iommu_attach_handle_get(idev->igroup->group, IOMMU_NO_PASID, 0);
- if (IS_ERR(handle))
- return NULL;
-
- return to_iommufd_handle(handle);
-}
-
-void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- struct iommufd_attach_handle *handle;
-
- handle = iommufd_device_get_attach_handle(idev);
- iommu_detach_group_handle(hwpt->domain, idev->igroup->group);
- iommufd_auto_response_faults(hwpt, handle);
- iommufd_fault_iopf_disable(idev);
- kfree(handle);
-}
-
-static int __fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- struct iommufd_attach_handle *handle, *curr = NULL;
- int ret;
-
- if (old->fault)
- curr = iommufd_device_get_attach_handle(idev);
-
- if (hwpt->fault) {
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
-
- handle->idev = idev;
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, &handle->handle);
- } else {
- ret = iommu_replace_group_handle(idev->igroup->group,
- hwpt->domain, NULL);
- }
-
- if (!ret && curr) {
- iommufd_auto_response_faults(old, curr);
- kfree(curr);
- }
-
- return ret;
-}
-
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- bool iopf_off = !hwpt->fault && old->fault;
- bool iopf_on = hwpt->fault && !old->fault;
- int ret;
-
- if (iopf_on) {
- ret = iommufd_fault_iopf_enable(idev);
- if (ret)
- return ret;
- }
-
- ret = __fault_domain_replace_dev(idev, hwpt, old);
- if (ret) {
- if (iopf_on)
- iommufd_fault_iopf_disable(idev);
- return ret;
- }
-
- if (iopf_off)
- iommufd_fault_iopf_disable(idev);
-
- return 0;
-}
-
void iommufd_fault_destroy(struct iommufd_object *obj)
{
struct iommufd_fault *fault = container_of(obj, struct iommufd_fault, obj);
@@ -449,7 +329,7 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
struct iommufd_hw_pagetable *hwpt;
struct iommufd_fault *fault;
- hwpt = group->attach_handle->domain->fault_data;
+ hwpt = group->attach_handle->domain->iommufd_hwpt;
fault = hwpt->fault;
spin_lock(&fault->lock);
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 598be26a14e2..7de6e914232e 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -156,6 +156,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
goto out_abort;
}
}
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
/*
* Set the coherency mode before we do iopt_table_add_domain() as some
@@ -251,6 +252,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
goto out_abort;
}
hwpt->domain->owner = ops;
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -307,6 +309,7 @@ iommufd_viommu_alloc_hwpt_nested(struct iommufd_viommu *viommu, u32 flags,
goto out_abort;
}
hwpt->domain->owner = viommu->iommu_dev->ops;
+ iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
@@ -406,10 +409,10 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
}
hwpt->fault = fault;
hwpt->domain->iopf_handler = iommufd_fault_iopf_handler;
- hwpt->domain->fault_data = hwpt;
refcount_inc(&fault->obj.users);
iommufd_put_object(ucmd->ictx, &fault->obj);
}
+ hwpt->domain->iommufd_hwpt = hwpt;
cmd->out_hwpt_id = hwpt->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0b1bafc7fd99..246297452a44 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -19,6 +19,22 @@ struct iommu_group;
struct iommu_option;
struct iommufd_device;
+struct iommufd_sw_msi_map {
+ struct list_head sw_msi_item;
+ phys_addr_t sw_msi_start;
+ phys_addr_t msi_addr;
+ unsigned int pgoff;
+ unsigned int id;
+};
+
+/* Bitmap of struct iommufd_sw_msi_map::id */
+struct iommufd_sw_msi_maps {
+ DECLARE_BITMAP(bitmap, 64);
+};
+
+int iommufd_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
struct iommufd_ctx {
struct file *file;
struct xarray objects;
@@ -26,6 +42,10 @@ struct iommufd_ctx {
wait_queue_head_t destroy_wait;
struct rw_semaphore ioas_creation_lock;
+ struct mutex sw_msi_lock;
+ struct list_head sw_msi_list;
+ unsigned int sw_msi_id;
+
u8 account_mode;
/* Compatibility with VFIO no iommu */
u8 no_iommu_mode;
@@ -283,10 +303,10 @@ struct iommufd_hwpt_paging {
struct iommufd_ioas *ioas;
bool auto_domain : 1;
bool enforce_cache_coherency : 1;
- bool msi_cookie : 1;
bool nest_parent : 1;
/* Head at iommufd_ioas::hwpt_list */
struct list_head hwpt_item;
+ struct iommufd_sw_msi_maps present_sw_msi;
};
struct iommufd_hwpt_nested {
@@ -383,6 +403,7 @@ struct iommufd_group {
struct iommu_group *group;
struct iommufd_hw_pagetable *hwpt;
struct list_head device_list;
+ struct iommufd_sw_msi_maps required_sw_msi;
phys_addr_t sw_msi_start;
};
@@ -496,43 +517,10 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
-int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
-void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev);
-int iommufd_fault_domain_replace_dev(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old);
-
-static inline int iommufd_hwpt_attach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault)
- return iommufd_fault_domain_attach_dev(hwpt, idev);
-
- return iommu_attach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline void iommufd_hwpt_detach_device(struct iommufd_hw_pagetable *hwpt,
- struct iommufd_device *idev)
-{
- if (hwpt->fault) {
- iommufd_fault_domain_detach_dev(hwpt, idev);
- return;
- }
-
- iommu_detach_group(hwpt->domain, idev->igroup->group);
-}
-
-static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
- struct iommufd_hw_pagetable *hwpt,
- struct iommufd_hw_pagetable *old)
-{
- if (old->fault || hwpt->fault)
- return iommufd_fault_domain_replace_dev(idev, hwpt, old);
-
- return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
-}
+int iommufd_fault_iopf_enable(struct iommufd_device *idev);
+void iommufd_fault_iopf_disable(struct iommufd_device *idev);
+void iommufd_auto_response_faults(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_attach_handle *handle);
static inline struct iommufd_viommu *
iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index ccf616462a1c..b6fa9fd11bc1 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -227,6 +227,8 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
xa_init(&ictx->groups);
ictx->file = filp;
init_waitqueue_head(&ictx->destroy_wait);
+ mutex_init(&ictx->sw_msi_lock);
+ INIT_LIST_HEAD(&ictx->sw_msi_list);
filp->private_data = ictx;
return 0;
}
@@ -234,6 +236,8 @@ static int iommufd_fops_open(struct inode *inode, struct file *filp)
static int iommufd_fops_release(struct inode *inode, struct file *filp)
{
struct iommufd_ctx *ictx = filp->private_data;
+ struct iommufd_sw_msi_map *next;
+ struct iommufd_sw_msi_map *cur;
struct iommufd_object *obj;
/*
@@ -262,6 +266,11 @@ static int iommufd_fops_release(struct inode *inode, struct file *filp)
break;
}
WARN_ON(!xa_empty(&ictx->groups));
+
+ mutex_destroy(&ictx->sw_msi_lock);
+ list_for_each_entry_safe(cur, next, &ictx->sw_msi_list, sw_msi_item)
+ kfree(cur);
+
kfree(ictx);
return 0;
}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index a565b9e40f4a..66824982e05f 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -27,11 +27,20 @@
#include <linux/spinlock.h>
#include <linux/string_choices.h>
#include <asm/barrier.h>
-#include <asm/dma-iommu.h>
#include <dt-bindings/memory/mtk-memory-port.h>
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <soc/mediatek/smi.h>
+#if defined(CONFIG_ARM)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) NULL
+#define arm_iommu_attach_device(...) -ENODEV
+struct dma_iommu_mapping {
+ struct iommu_domain *domain;
+};
+#endif
+
#define REG_MMU_PT_BASE_ADDR 0x000
#define F_ALL_INVLD 0x2
@@ -446,22 +455,13 @@ static int mtk_iommu_v1_create_mapping(struct device *dev,
static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct iommu_fwspec *fwspec = NULL;
struct of_phandle_args iommu_spec;
struct mtk_iommu_v1_data *data;
int err, idx = 0, larbid, larbidx;
struct device_link *link;
struct device *larbdev;
- /*
- * In the deferred case, free the existed fwspec.
- * Always initialize the fwspec internally.
- */
- if (fwspec) {
- iommu_fwspec_free(dev);
- fwspec = dev_iommu_fwspec_get(dev);
- }
-
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells",
idx, &iommu_spec)) {
@@ -476,6 +476,9 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
idx++;
}
+ if (!fwspec)
+ return ERR_PTR(-ENODEV);
+
data = dev_iommu_priv_get(dev);
/* Link the consumer device with the smi-larb device(supplier) */
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 97987cd78da9..6b989a62def2 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -116,6 +116,7 @@ static void of_pci_check_device_ats(struct device *dev, struct device_node *np)
int of_iommu_configure(struct device *dev, struct device_node *master_np,
const u32 *id)
{
+ bool dev_iommu_present;
int err;
if (!master_np)
@@ -127,6 +128,7 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
mutex_unlock(&iommu_probe_device_lock);
return 0;
}
+ dev_iommu_present = dev->iommu;
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -147,11 +149,18 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np,
err = of_iommu_configure_device(master_np, dev, id);
}
- if (err)
+ if (err && dev_iommu_present)
iommu_fwspec_free(dev);
+ else if (err && dev->iommu)
+ dev_iommu_free(dev);
mutex_unlock(&iommu_probe_device_lock);
- if (!err && dev->bus)
+ /*
+ * If we're not on the iommu_probe_device() path (as indicated by the
+ * initial dev->iommu) then try to simulate it. This should no longer
+ * happen unless of_dma_configure() is being misused outside bus code.
+ */
+ if (!err && dev->bus && !dev_iommu_present)
err = iommu_probe_device(dev);
if (err && err != -EPROBE_DEFER)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 323cc665c357..af4cc91b2bbf 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -88,6 +88,7 @@ struct rk_iommu_domain {
dma_addr_t dt_dma;
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
+ struct device *dma_dev;
struct iommu_domain domain;
};
@@ -123,7 +124,6 @@ struct rk_iommudata {
struct rk_iommu *iommu;
};
-static struct device *dma_dev;
static const struct rk_iommu_ops *rk_ops;
static struct iommu_domain rk_identity_domain;
@@ -132,7 +132,7 @@ static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma,
{
size_t size = count * sizeof(u32); /* count of u32 entry */
- dma_sync_single_for_device(dma_dev, dma, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(dom->dma_dev, dma, size, DMA_TO_DEVICE);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
@@ -734,9 +734,9 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (!page_table)
return ERR_PTR(-ENOMEM);
- pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, pt_dma)) {
- dev_err(dma_dev, "DMA mapping error while allocating page table\n");
+ pt_dma = dma_map_single(rk_domain->dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(rk_domain->dma_dev, pt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA mapping error while allocating page table\n");
iommu_free_page(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1051,9 +1051,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
{
struct rk_iommu_domain *rk_domain;
-
- if (!dma_dev)
- return NULL;
+ struct rk_iommu *iommu;
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
@@ -1068,10 +1066,12 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
if (!rk_domain->dt)
goto err_free_domain;
- rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
+ iommu = rk_iommu_from_dev(dev);
+ rk_domain->dma_dev = iommu->dev;
+ rk_domain->dt_dma = dma_map_single(rk_domain->dma_dev, rk_domain->dt,
SPAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
- dev_err(dma_dev, "DMA map error for DT\n");
+ if (dma_mapping_error(rk_domain->dma_dev, rk_domain->dt_dma)) {
+ dev_err(rk_domain->dma_dev, "DMA map error for DT\n");
goto err_free_dt;
}
@@ -1105,13 +1105,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (rk_dte_is_pt_valid(dte)) {
phys_addr_t pt_phys = rk_ops->pt_address(dte);
u32 *page_table = phys_to_virt(pt_phys);
- dma_unmap_single(dma_dev, pt_phys,
+ dma_unmap_single(rk_domain->dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
iommu_free_page(page_table);
}
}
- dma_unmap_single(dma_dev, rk_domain->dt_dma,
+ dma_unmap_single(rk_domain->dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
iommu_free_page(rk_domain->dt);
@@ -1148,12 +1148,12 @@ static int rk_iommu_of_xlate(struct device *dev,
struct platform_device *iommu_dev;
struct rk_iommudata *data;
- data = devm_kzalloc(dma_dev, sizeof(*data), GFP_KERNEL);
+ iommu_dev = of_find_device_by_node(args->np);
+
+ data = devm_kzalloc(&iommu_dev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- iommu_dev = of_find_device_by_node(args->np);
-
data->iommu = platform_get_drvdata(iommu_dev);
data->iommu->domain = &rk_identity_domain;
dev_iommu_priv_set(dev, data);
@@ -1256,22 +1256,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (err)
return err;
- err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
- if (err)
- goto err_unprepare_clocks;
-
- err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
- if (err)
- goto err_remove_sysfs;
-
- /*
- * Use the first registered IOMMU device for domain to use with DMA
- * API, since a domain might not physically correspond to a single
- * IOMMU device..
- */
- if (!dma_dev)
- dma_dev = &pdev->dev;
-
pm_runtime_enable(dev);
for (i = 0; i < iommu->num_irq; i++) {
@@ -1290,12 +1274,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
+ err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
+ if (err)
+ goto err_pm_disable;
+
+ err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
+ if (err)
+ goto err_remove_sysfs;
+
return 0;
-err_pm_disable:
- pm_runtime_disable(dev);
err_remove_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
-err_unprepare_clocks:
+err_pm_disable:
+ pm_runtime_disable(dev);
clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
return err;
}
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index fbdeded3d48b..e1c76e0f9c2b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -16,7 +16,7 @@
#include "dma-iommu.h"
-static const struct iommu_ops s390_iommu_ops;
+static const struct iommu_ops s390_iommu_ops, s390_iommu_rtr_ops;
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
@@ -381,6 +381,46 @@ static void zdev_s390_domain_update(struct zpci_dev *zdev,
spin_unlock_irqrestore(&zdev->dom_lock, flags);
}
+static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
+ struct iommu_domain *domain, u8 *status)
+{
+ struct s390_domain *s390_domain;
+ int rc = 0;
+ u64 iota;
+
+ switch (domain->type) {
+ case IOMMU_DOMAIN_IDENTITY:
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
+ zdev->end_dma, 0, status);
+ break;
+ case IOMMU_DOMAIN_BLOCKED:
+ /* Nothing to do in this case */
+ break;
+ default:
+ s390_domain = to_s390_domain(domain);
+ iota = virt_to_phys(s390_domain->dma_table) |
+ ZPCI_IOTA_RTTO_FLAG;
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma,
+ zdev->end_dma, iota, status);
+ }
+
+ return rc;
+}
+
+int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&zdev->dom_lock, flags);
+
+ rc = s390_iommu_domain_reg_ioat(zdev, zdev->s390_domain, status);
+
+ spin_unlock_irqrestore(&zdev->dom_lock, flags);
+
+ return rc;
+}
+
static int blocking_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -392,9 +432,11 @@ static int blocking_domain_attach_device(struct iommu_domain *domain,
return 0;
s390_domain = to_s390_domain(zdev->s390_domain);
- spin_lock_irqsave(&s390_domain->list_lock, flags);
- list_del_rcu(&zdev->iommu_list);
- spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ if (zdev->dma_table) {
+ spin_lock_irqsave(&s390_domain->list_lock, flags);
+ list_del_rcu(&zdev->iommu_list);
+ spin_unlock_irqrestore(&s390_domain->list_lock, flags);
+ }
zpci_unregister_ioat(zdev, 0);
zdev->dma_table = NULL;
@@ -422,8 +464,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
blocking_domain_attach_device(&blocking_domain, dev);
/* If we fail now DMA remains blocked via blocking domain */
- cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
- virt_to_phys(s390_domain->dma_table), &status);
+ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
return -EIO;
zdev->dma_table = s390_domain->dma_table;
@@ -723,7 +764,13 @@ int zpci_init_iommu(struct zpci_dev *zdev)
if (rc)
goto out_err;
- rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops, NULL);
+ if (zdev->rtr_avail) {
+ rc = iommu_device_register(&zdev->iommu_dev,
+ &s390_iommu_rtr_ops, NULL);
+ } else {
+ rc = iommu_device_register(&zdev->iommu_dev, &s390_iommu_ops,
+ NULL);
+ }
if (rc)
goto out_sysfs;
@@ -787,6 +834,39 @@ static int __init s390_iommu_init(void)
}
subsys_initcall(s390_iommu_init);
+static int s390_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct zpci_dev *zdev = to_zpci_dev(dev);
+ u8 status;
+ int cc;
+
+ blocking_domain_attach_device(&blocking_domain, dev);
+
+ /* If we fail now DMA remains blocked via blocking domain */
+ cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
+
+ /*
+ * If the device is undergoing error recovery the reset code
+ * will re-establish the new domain.
+ */
+ if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return -EIO;
+
+ zdev_s390_domain_update(zdev, domain);
+
+ return 0;
+}
+
+static const struct iommu_domain_ops s390_identity_ops = {
+ .attach_dev = s390_attach_dev_identity,
+};
+
+static struct iommu_domain s390_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &s390_identity_ops,
+};
+
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
@@ -794,23 +874,31 @@ static struct iommu_domain blocking_domain = {
}
};
-static const struct iommu_ops s390_iommu_ops = {
- .blocked_domain = &blocking_domain,
- .release_domain = &blocking_domain,
- .capable = s390_iommu_capable,
- .domain_alloc_paging = s390_domain_alloc_paging,
- .probe_device = s390_iommu_probe_device,
- .device_group = generic_device_group,
- .pgsize_bitmap = SZ_4K,
- .get_resv_regions = s390_iommu_get_resv_regions,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = s390_iommu_attach_device,
- .map_pages = s390_iommu_map_pages,
- .unmap_pages = s390_iommu_unmap_pages,
- .flush_iotlb_all = s390_iommu_flush_iotlb_all,
- .iotlb_sync = s390_iommu_iotlb_sync,
- .iotlb_sync_map = s390_iommu_iotlb_sync_map,
- .iova_to_phys = s390_iommu_iova_to_phys,
- .free = s390_domain_free,
+#define S390_IOMMU_COMMON_OPS() \
+ .blocked_domain = &blocking_domain, \
+ .release_domain = &blocking_domain, \
+ .capable = s390_iommu_capable, \
+ .domain_alloc_paging = s390_domain_alloc_paging, \
+ .probe_device = s390_iommu_probe_device, \
+ .device_group = generic_device_group, \
+ .pgsize_bitmap = SZ_4K, \
+ .get_resv_regions = s390_iommu_get_resv_regions, \
+ .default_domain_ops = &(const struct iommu_domain_ops) { \
+ .attach_dev = s390_iommu_attach_device, \
+ .map_pages = s390_iommu_map_pages, \
+ .unmap_pages = s390_iommu_unmap_pages, \
+ .flush_iotlb_all = s390_iommu_flush_iotlb_all, \
+ .iotlb_sync = s390_iommu_iotlb_sync, \
+ .iotlb_sync_map = s390_iommu_iotlb_sync_map, \
+ .iova_to_phys = s390_iommu_iova_to_phys, \
+ .free = s390_domain_free, \
}
+
+static const struct iommu_ops s390_iommu_ops = {
+ S390_IOMMU_COMMON_OPS()
+};
+
+static const struct iommu_ops s390_iommu_rtr_ops = {
+ .identity_domain = &s390_identity_domain,
+ S390_IOMMU_COMMON_OPS()
};
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7f633bb5efef..69d353e1df84 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -846,7 +846,6 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
err = ops->of_xlate(dev, args);
if (err < 0) {
dev_err(dev, "failed to parse SW group ID: %d\n", err);
- iommu_fwspec_free(dev);
return err;
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c11b9965c4ad..4d707d4f252e 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -28,6 +28,7 @@ config ARM_GIC_V2M
select ARM_GIC
select IRQ_MSI_LIB
select PCI_MSI
+ select IRQ_MSI_IOMMU
config GIC_NON_BANKED
bool
@@ -38,12 +39,14 @@ config ARM_GIC_V3
select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select HAVE_ARM_SMCCC_DISCOVERY
+ select IRQ_MSI_IOMMU
config ARM_GIC_V3_ITS
bool
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
default ARM_GIC_V3
+ select IRQ_MSI_IOMMU
config ARM_GIC_V3_ITS_FSL_MC
bool
@@ -408,6 +411,7 @@ config LS_EXTIRQ
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ select IRQ_MSI_IOMMU
depends on PCI_MSI
config PARTITION_PERCPU
@@ -590,13 +594,7 @@ config RISCV_IMSIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_MSI_IRQ
-
-config RISCV_IMSIC_PCI
- bool
- depends on RISCV_IMSIC
- depends on PCI
- depends on PCI_MSI
- default RISCV_IMSIC
+ select IRQ_MSI_LIB
config SIFIVE_PLIC
bool
@@ -752,6 +750,18 @@ config MCHP_EIC
help
Support for Microchip External Interrupt Controller.
+config SOPHGO_SG2042_MSI
+ bool "Sophgo SG2042 MSI Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_MSI_LIB
+ select PCI_MSI
+ help
+ Support for the Sophgo SG2042 MSI Controller.
+ This on-chip interrupt controller enables MSI sources to be
+ routed to the primary PLIC controller on SoC.
+
config SUNPLUS_SP7021_INTC
bool "Sunplus SP7021 interrupt controller" if COMPILE_TEST
default SOC_SP7021
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 25e9ad29b8c4..dd60e597491d 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -128,4 +128,5 @@ obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
+obj-$(CONFIG_SOPHGO_SG2042_MSI) += irq-sg2042-msi.o
obj-$(CONFIG_SUNPLUS_SP7021_INTC) += irq-sp7021-intc.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2b1684c60e3c..974dc088c853 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -409,15 +409,15 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
* in use, and be cleared when coming back from the handler.
*/
if (is_kernel_in_hyp_mode() &&
- (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
- if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
+ if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2))) {
pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
}
@@ -841,7 +841,7 @@ static int aic_init_cpu(unsigned int cpu)
VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
/* vGIC maintenance IRQ */
- sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
+ sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
/* PMC FIQ */
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index f4f8e9fadbbf..d7948c55f542 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -154,24 +153,20 @@ static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-static int __init
-davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
- struct device_node *node)
+static int __init davinci_cp_intc_do_init(struct resource *res, unsigned int num_irqs,
+ struct device_node *node)
{
- unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ unsigned int num_regs = BITS_TO_LONGS(num_irqs);
int offset, irq_base;
void __iomem *req;
- req = request_mem_region(config->reg.start,
- resource_size(&config->reg),
- "davinci-cp-intc");
+ req = request_mem_region(res->start, resource_size(res), "davinci-cp-intc");
if (!req) {
pr_err("%s: register range busy\n", __func__);
return -EBUSY;
}
- davinci_cp_intc_base = ioremap(config->reg.start,
- resource_size(&config->reg));
+ davinci_cp_intc_base = ioremap(res->start, resource_size(res));
if (!davinci_cp_intc_base) {
pr_err("%s: unable to ioremap register range\n", __func__);
return -EINVAL;
@@ -184,8 +179,7 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Disable system interrupts */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
/* Set to normal mode, no nesting, no priority hold */
davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
@@ -193,28 +187,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
/* Clear system interrupt status */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(~0,
- DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+ davinci_cp_intc_write(~0, DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
/* Enable nIRQ (what about nFIQ?) */
davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+ /* 4 channels per register */
+ num_regs = (num_irqs + 3) >> 2;
/* Default all priorities to channel 7. */
- num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
for (offset = 0; offset < num_regs; offset++)
- davinci_cp_intc_write(0x07070707,
- DAVINCI_CP_INTC_CHAN_MAP(offset));
+ davinci_cp_intc_write(0x07070707, DAVINCI_CP_INTC_CHAN_MAP(offset));
- irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
if (irq_base < 0) {
- pr_err("%s: unable to allocate interrupt descriptors: %d\n",
- __func__, irq_base);
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n", __func__, irq_base);
return irq_base;
}
- davinci_cp_intc_irq_domain = irq_domain_add_legacy(
- node, config->num_irqs, irq_base, 0,
- &davinci_cp_intc_irq_domain_ops, NULL);
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(node, num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
if (!davinci_cp_intc_irq_domain) {
pr_err("%s: unable to create an interrupt domain\n", __func__);
@@ -229,31 +220,25 @@ davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
return 0;
}
-int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
-{
- return davinci_cp_intc_do_init(config, NULL);
-}
-
static int __init davinci_cp_intc_of_init(struct device_node *node,
struct device_node *parent)
{
- struct davinci_cp_intc_config config = { };
+ unsigned int num_irqs;
+ struct resource res;
int ret;
- ret = of_address_to_resource(node, 0, &config.reg);
+ ret = of_address_to_resource(node, 0, &res);
if (ret) {
- pr_err("%s: unable to get the register range from device-tree\n",
- __func__);
+ pr_err("%s: unable to get the register range from device-tree\n", __func__);
return ret;
}
- ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ ret = of_property_read_u32(node, "ti,intc-size", &num_irqs);
if (ret) {
- pr_err("%s: unable to read the 'ti,intc-size' property\n",
- __func__);
+ pr_err("%s: unable to read the 'ti,intc-size' property\n", __func__);
return ret;
}
- return davinci_cp_intc_do_init(&config, node);
+ return davinci_cp_intc_do_init(&res, num_irqs, node);
}
IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index be35c5349986..c69894861866 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -87,9 +87,6 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
- msg->address_hi = upper_32_bits(addr);
- msg->address_lo = lower_32_bits(addr);
-
if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
msg->data = 0;
else
@@ -97,7 +94,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), msg, addr);
}
static struct irq_chip gicv2m_irq_chip = {
@@ -255,6 +252,7 @@ static void __init gicv2m_teardown(void)
static struct msi_parent_ops gicv2m_msi_parent_ops = {
.supported_flags = GICV2M_MSI_FLAGS_SUPPORTED,
.required_flags = GICV2M_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "GICv2m-",
diff --git a/drivers/irqchip/irq-gic-v3-its-msi-parent.c b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
index e150365fbe89..bdb04c808148 100644
--- a/drivers/irqchip/irq-gic-v3-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-v3-its-msi-parent.c
@@ -203,6 +203,7 @@ static bool its_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
const struct msi_parent_ops gic_v3_its_msi_parent_ops = {
.supported_flags = ITS_MSI_FLAGS_SUPPORTED,
.required_flags = ITS_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "ITS-",
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 8c3ec5734f1e..0115ad6c8259 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -205,13 +205,15 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static gfp_t gfp_flags_quirk;
+
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
unsigned int order)
{
struct page *page;
int ret = 0;
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_node(node, gfp | gfp_flags_quirk, order);
if (!page)
return NULL;
@@ -1809,17 +1811,10 @@ static u64 its_irq_get_msi_base(struct its_device *its_dev)
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
- struct its_node *its;
- u64 addr;
-
- its = its_dev->its;
- addr = its->get_msi_base(its_dev);
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = its_get_event_id(d);
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
+ msg->data = its_get_event_id(d);
+ msi_msg_set_addr(irq_data_get_msi_desc(d), msg,
+ its_dev->its->get_msi_base(its_dev));
}
static int its_irq_set_irqchip_state(struct irq_data *d,
@@ -4887,6 +4882,17 @@ static bool __maybe_unused its_enable_quirk_hip09_162100801(void *data)
return true;
}
+static bool __maybe_unused its_enable_rk3568002(void *data)
+{
+ if (!of_machine_is_compatible("rockchip,rk3566") &&
+ !of_machine_is_compatible("rockchip,rk3568"))
+ return false;
+
+ gfp_flags_quirk |= GFP_DMA32;
+
+ return true;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -4954,6 +4960,14 @@ static const struct gic_quirk its_quirks[] = {
.property = "dma-noncoherent",
.init = its_set_non_coherent,
},
+#ifdef CONFIG_ROCKCHIP_ERRATUM_3568002
+ {
+ .desc = "ITS: Rockchip erratum RK3568002",
+ .iidr = 0x0201743b,
+ .mask = 0xffffffff,
+ .init = its_enable_rk3568002,
+ },
+#endif
{
}
};
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index 3fe870f8ee17..34e9ca77a8c3 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -147,22 +147,18 @@ static const struct irq_domain_ops mbi_domain_ops = {
static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- msg[0].address_hi = upper_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
- msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
msg[0].data = data->parent_data->hwirq;
-
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[0],
+ mbi_phys_base + GICD_SETSPI_NSR);
}
static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
{
mbi_compose_msi_msg(data, msg);
- msg[1].address_hi = upper_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
- msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
msg[1].data = data->parent_data->hwirq;
-
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), &msg[1],
+ mbi_phys_base + GICD_CLRSPI_NSR);
}
static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
@@ -201,6 +197,7 @@ static bool mbi_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
static const struct msi_parent_ops gic_v3_mbi_msi_parent_ops = {
.supported_flags = MBI_MSI_FLAGS_SUPPORTED,
.required_flags = MBI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.prefix = "MBI-",
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index b0e9788c0045..afbfcce3b1e3 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -24,7 +24,7 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
-#define CHAN_MAX_OUTPUT_INT 0x8
+#define CHAN_MAX_OUTPUT_INT 0xF
struct irqsteer_data {
void __iomem *regs;
@@ -228,10 +228,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
for (i = 0; i < data->irq_count; i++) {
data->irq[i] = irq_of_parse_and_map(np, i);
- if (!data->irq[i]) {
- ret = -EINVAL;
- goto out;
- }
+ if (!data->irq[i])
+ break;
irq_set_chained_handler_and_data(data->irq[i],
imx_irqsteer_irq_handler,
@@ -254,9 +252,13 @@ static void imx_irqsteer_remove(struct platform_device *pdev)
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < irqsteer_data->irq_count; i++)
+ for (i = 0; i < irqsteer_data->irq_count; i++) {
+ if (!irqsteer_data->irq[i])
+ break;
+
irq_set_chained_handler_and_data(irqsteer_data->irq[i],
NULL, NULL);
+ }
irq_domain_remove(irqsteer_data->domain);
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 4342a21de1eb..69aacdfc8bef 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -214,6 +214,7 @@ static void imx_mu_msi_irq_handler(struct irq_desc *desc)
static const struct msi_parent_ops imx_mu_msi_parent_ops = {
.supported_flags = IMX_MU_MSI_FLAGS_SUPPORTED,
.required_flags = IMX_MU_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "MU-MSI-",
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index bd337ecddb40..9c62108b3ad5 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -146,6 +146,7 @@ static const struct irq_domain_ops pch_msi_middle_domain_ops = {
static struct msi_parent_ops pch_msi_parent_ops = {
.required_flags = PCH_MSI_FLAGS_REQUIRED,
.supported_flags = PCH_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "PCH-",
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index c0e1aafe468c..3cb80796cc7c 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -87,8 +87,6 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
- msg->address_hi = upper_32_bits(msi_data->msiir_addr);
- msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq;
if (msi_affinity_flag) {
@@ -98,7 +96,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
msg->data |= cpumask_first(mask);
}
- iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
+ msi_msg_set_addr(irq_data_get_msi_desc(data), msg,
+ msi_data->msiir_addr);
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index cd789fa51519..0a25536a5d07 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -26,8 +26,6 @@
/* use for A1 like chips */
#define REG_PIN_A1_SEL 0x04
-/* Used for s4 chips */
-#define REG_EDGE_POL_S4 0x1c
/*
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
@@ -72,6 +70,7 @@ struct meson_gpio_irq_params {
bool support_edge_both;
unsigned int edge_both_offset;
unsigned int edge_single_offset;
+ unsigned int edge_pol_reg;
unsigned int pol_low_offset;
unsigned int pin_sel_mask;
struct irq_ctl_ops ops;
@@ -105,6 +104,18 @@ struct meson_gpio_irq_params {
.pin_sel_mask = 0x7f, \
.nr_channels = 8, \
+#define INIT_MESON_A4_AO_COMMON_DATA(irqs) \
+ INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
+ meson_a1_gpio_irq_sel_pin, \
+ meson_s4_gpio_irq_set_type) \
+ .support_edge_both = true, \
+ .edge_both_offset = 0, \
+ .edge_single_offset = 12, \
+ .edge_pol_reg = 0x8, \
+ .pol_low_offset = 0, \
+ .pin_sel_mask = 0xff, \
+ .nr_channels = 2, \
+
#define INIT_MESON_S4_COMMON_DATA(irqs) \
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
meson_a1_gpio_irq_sel_pin, \
@@ -112,6 +123,7 @@ struct meson_gpio_irq_params {
.support_edge_both = true, \
.edge_both_offset = 0, \
.edge_single_offset = 12, \
+ .edge_pol_reg = 0x1c, \
.pol_low_offset = 0, \
.pin_sel_mask = 0xff, \
.nr_channels = 12, \
@@ -146,6 +158,18 @@ static const struct meson_gpio_irq_params a1_params = {
INIT_MESON_A1_COMMON_DATA(62)
};
+static const struct meson_gpio_irq_params a4_params = {
+ INIT_MESON_S4_COMMON_DATA(81)
+};
+
+static const struct meson_gpio_irq_params a4_ao_params = {
+ INIT_MESON_A4_AO_COMMON_DATA(8)
+};
+
+static const struct meson_gpio_irq_params a5_params = {
+ INIT_MESON_S4_COMMON_DATA(99)
+};
+
static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
@@ -168,6 +192,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
+ { .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
+ { .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
+ { .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@@ -299,11 +326,10 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
- const struct meson_gpio_irq_params *params;
+ u32 val = 0;
- params = ctl->params;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
/*
@@ -356,19 +382,19 @@ static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
unsigned int type, u32 *channel_hwirq)
{
- u32 val = 0;
+ const struct meson_gpio_irq_params *params = ctl->params;
unsigned int idx;
+ u32 val = 0;
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
type &= IRQ_TYPE_SENSE_MASK;
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, BIT(idx), 0);
if (type == IRQ_TYPE_EDGE_BOTH) {
- val |= BIT(ctl->params->edge_both_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
- BIT(ctl->params->edge_both_offset + idx), val);
+ val = BIT(ctl->params->edge_both_offset + idx);
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg, val, val);
return 0;
}
@@ -378,7 +404,7 @@ static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
val |= BIT(ctl->params->edge_single_offset + idx);
- meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ meson_gpio_irq_update_bits(ctl, params->edge_pol_reg,
BIT(idx) | BIT(12 + idx), val);
return 0;
};
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index d8e29fc0d406..51464c6257f3 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -28,6 +28,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct msi_domain_info *info)
{
const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
+ struct irq_chip *chip = info->chip;
u32 required_flags;
/* Parent ops available? */
@@ -92,10 +93,10 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
info->flags |= required_flags;
/* Chip updates for all child bus types */
- if (!info->chip->irq_eoi)
- info->chip->irq_eoi = irq_chip_eoi_parent;
- if (!info->chip->irq_ack)
- info->chip->irq_ack = irq_chip_ack_parent;
+ if (!chip->irq_eoi && (pops->chip_flags & MSI_CHIP_FLAG_SET_EOI))
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_ack && (pops->chip_flags & MSI_CHIP_FLAG_SET_ACK))
+ chip->irq_ack = irq_chip_ack_parent;
/*
* The device MSI domain can never have a set affinity callback. It
@@ -105,7 +106,7 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
* device MSI domain aside of mask/unmask which is provided e.g. by
* PCI/MSI device domains.
*/
- info->chip->irq_set_affinity = msi_domain_set_affinity;
+ chip->irq_set_affinity = msi_domain_set_affinity;
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index 2b6183919ea4..d67f93f6d750 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -161,6 +161,7 @@ static const struct irq_domain_ops gicp_domain_ops = {
static const struct msi_parent_ops gicp_msi_parent_ops = {
.supported_flags = GICP_MSI_FLAGS_SUPPORTED,
.required_flags = GICP_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "GICP-",
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index ff19bfd258dc..28f7e81df94f 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -157,6 +157,7 @@ static const struct irq_domain_ops odmi_domain_ops = {
static const struct msi_parent_ops odmi_msi_parent_ops = {
.supported_flags = ODMI_MSI_FLAGS_SUPPORTED,
.required_flags = ODMI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.bus_select_mask = MATCH_PLATFORM_MSI,
.prefix = "ODMI-",
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 065166ab5dbc..ebd4a9014e8d 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -356,6 +356,7 @@ static void mvebu_sei_reset(struct mvebu_sei *sei)
static const struct msi_parent_ops sei_msi_parent_ops = {
.supported_flags = SEI_MSI_FLAGS_SUPPORTED,
.required_flags = SEI_MSI_FLAGS_REQUIRED,
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI | MSI_CHIP_FLAG_SET_ACK,
.bus_select_mask = MATCH_PLATFORM_MSI,
.bus_select_token = DOMAIN_BUS_GENERIC_MSI,
.prefix = "SEI-",
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 99e27e01b0b1..6a2e41f02446 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -541,43 +541,36 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
return -ENODEV;
parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- dev_err(&pdev->dev, "cannot find parent domain\n");
- return -ENODEV;
- }
+ if (!parent_domain)
+ return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
- rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
+ rzg2l_irqc_data = devm_kzalloc(dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
if (!rzg2l_irqc_data)
return -ENOMEM;
rzg2l_irqc_data->irqchip = irq_chip;
- rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+ rzg2l_irqc_data->base = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (IS_ERR(rzg2l_irqc_data->base))
return PTR_ERR(rzg2l_irqc_data->base);
ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
- if (ret) {
- dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- return ret;
- }
-
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(resetn))
- return PTR_ERR(resetn);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret);
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- return ret;
+ resetn = devm_reset_control_get_exclusive_deasserted(dev, NULL);
+ if (IS_ERR(resetn)) {
+ return dev_err_probe(dev, PTR_ERR(resetn),
+ "failed to acquire deasserted reset: %d\n", ret);
}
- pm_runtime_enable(&pdev->dev);
- ret = pm_runtime_resume_and_get(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
- }
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret);
raw_spin_lock_init(&rzg2l_irqc_data->lock);
@@ -585,9 +578,8 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
node, &rzg2l_irqc_domain_ops,
rzg2l_irqc_data);
if (!irq_domain) {
- dev_err(&pdev->dev, "failed to add irq domain\n");
- ret = -ENOMEM;
- goto pm_put;
+ pm_runtime_put(dev);
+ return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
@@ -604,13 +596,6 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
dev = NULL;
return 0;
-
-pm_put:
- pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
- return ret;
}
static int __init rzg2l_irqc_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index fe2d29e91026..3d5b5fdf9bde 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -64,11 +64,18 @@
#define ICU_TINT_LEVEL_HIGH 2
#define ICU_TINT_LEVEL_LOW 3
-#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
-#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
-#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
-#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
-#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
+#define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width)))
+#define ICU_TSSR_TSSEL_MASK(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \
+})
+
+#define ICU_TSSR_TIEN(n, field_width) \
+({\
+ typeof(field_width) (_field_width) = (field_width); \
+ BIT((_field_width) - 1) << ((n) * (_field_width)); \
+})
#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
@@ -78,20 +85,36 @@
#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
-#define ICU_PB5_TINT 0x55
+#define ICU_RZG3E_TINT_OFFSET 0x800
+#define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
+#define ICU_RZV2H_TSSEL_MAX_VAL 0x55
+
+/**
+ * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
+ * @tssel_lut: TINT lookup table
+ * @t_offs: TINT offset
+ * @max_tssel: TSSEL max value
+ * @field_width: TSSR field width
+ */
+struct rzv2h_hw_info {
+ const u8 *tssel_lut;
+ u16 t_offs;
+ u8 max_tssel;
+ u8 field_width;
+};
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
- * @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
+ * @info: Pointer to struct rzv2h_hw_info
*/
struct rzv2h_icu_priv {
void __iomem *base;
- const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
+ const struct rzv2h_hw_info *info;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
@@ -111,7 +134,7 @@ static void rzv2h_icu_eoi(struct irq_data *d)
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
@@ -130,21 +153,23 @@ static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u32 tint_nr, tssel_n, k, tssr;
+ u8 nr_tint;
if (hw_irq < ICU_TINT_START)
return;
tint_nr = hw_irq - ICU_TINT_START;
- k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(k));
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
if (enable)
- tssr |= ICU_TSSR_TIEN(tssel_n);
+ tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
else
- tssr &= ~ICU_TSSR_TIEN(tssel_n);
- writel_relaxed(tssr, priv->base + ICU_TSSR(k));
+ tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
@@ -247,8 +272,8 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
- tsctr = readl_relaxed(priv->base + ICU_TSCTR);
- titsr = readl_relaxed(priv->base + ICU_TITSR(k));
+ tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
@@ -257,7 +282,7 @@ static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
- writel_relaxed(bit, priv->base + ICU_TSCLR);
+ writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
@@ -268,6 +293,7 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
unsigned int hwirq;
u32 tint, sense;
int tint_nr;
+ u8 nr_tint;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
@@ -290,39 +316,42 @@ static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
+ priv = irq_data_to_priv(d);
tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
- if (tint > ICU_PB5_TINT)
+ if (tint > priv->info->max_tssel)
return -EINVAL;
- priv = irq_data_to_priv(d);
- hwirq = irqd_to_hwirq(d);
+ if (priv->info->tssel_lut)
+ tint = priv->info->tssel_lut[tint];
+ hwirq = irqd_to_hwirq(d);
tint_nr = hwirq - ICU_TINT_START;
- tssr_k = ICU_TSSR_K(tint_nr);
- tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
+ nr_tint = 32 / priv->info->field_width;
+ tssr_k = tint_nr / nr_tint;
+ tssel_n = tint_nr % nr_tint;
+ tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
titsr_k = ICU_TITSR_K(tint_nr);
titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
- tien = ICU_TSSR_TIEN(titsel_n);
guard(raw_spinlock)(&priv->lock);
- tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
- tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
- tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
+ tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
+ tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
+ tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
- writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
- titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
+ titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
- writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
+ writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
- writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
+ writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
return 0;
}
@@ -390,7 +419,7 @@ static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigne
if (hwirq > (ICU_NUM_IRQ - 1))
return -EINVAL;
- ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
@@ -421,7 +450,13 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static void rzv2h_icu_put_device(void *data)
+{
+ put_device(data);
+}
+
+static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
@@ -433,50 +468,48 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
if (!pdev)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
+ &pdev->dev);
+ if (ret < 0)
+ return ret;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
- ret = -ENODEV;
- goto put_dev;
+ return -ENODEV;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
- if (!rzv2h_icu_data) {
- ret = -ENOMEM;
- goto put_dev;
- }
-
- rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
+ if (!rzv2h_icu_data)
+ return -ENOMEM;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
- if (IS_ERR(rzv2h_icu_data->base)) {
- ret = PTR_ERR(rzv2h_icu_data->base);
- goto put_dev;
- }
+ if (IS_ERR(rzv2h_icu_data->base))
+ return PTR_ERR(rzv2h_icu_data->base);
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
- goto put_dev;
+ return ret;
}
- resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
if (IS_ERR(resetn)) {
ret = PTR_ERR(resetn);
- goto put_dev;
+ dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret);
+ return ret;
}
- ret = reset_control_deassert(resetn);
- if (ret) {
- dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
- goto put_dev;
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret);
+ return ret;
}
- pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
- goto pm_disable;
+ return ret;
}
raw_spin_lock_init(&rzv2h_icu_data->lock);
@@ -489,6 +522,8 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
goto pm_put;
}
+ rzv2h_icu_data->info = hw_info;
+
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
@@ -497,16 +532,61 @@ static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
pm_put:
pm_runtime_put(&pdev->dev);
-pm_disable:
- pm_runtime_disable(&pdev->dev);
- reset_control_assert(resetn);
-put_dev:
- put_device(&pdev->dev);
return ret;
}
+/* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */
+static const u8 rzg3e_tssel_lut[] = {
+ 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */
+ 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */
+ 111, 112, /* P20-P21 */
+ 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */
+ 105, 106, 107, 108, 109, 110, /* P40-P45 */
+ 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */
+ 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */
+ 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */
+ 135, 136, 137, 138, 139, 140, /* P80-P85 */
+ 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */
+ 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */
+ 59, 60, 61, /* PC0-PC2 */
+ 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */
+ 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */
+ 78, 79, 80, /* PF0-PF2 */
+ 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */
+ 33, 34, 35, 36, 37, 38, /* PH0-PH5 */
+ 4, 5, 6, 7, 8, /* PJ0-PJ4 */
+ 39, 40, 41, 42, /* PK0-PK3 */
+ 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */
+ 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */
+ 0, 1, 2, 3 /* PS0-PS3 */
+};
+
+static const struct rzv2h_hw_info rzg3e_hw_params = {
+ .tssel_lut = rzg3e_tssel_lut,
+ .t_offs = ICU_RZG3E_TINT_OFFSET,
+ .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL,
+ .field_width = 16,
+};
+
+static const struct rzv2h_hw_info rzv2h_hw_params = {
+ .t_offs = 0,
+ .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL,
+ .field_width = 8,
+};
+
+static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+}
+
+static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+{
+ return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+}
+
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index 7cd6b646774b..205ad61d15e4 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -31,7 +31,7 @@ struct aplic_direct {
};
struct aplic_idc {
- unsigned int hart_index;
+ u32 hart_index;
void __iomem *regs;
struct aplic_direct *direct;
};
@@ -219,6 +219,20 @@ static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
return 0;
}
+static int aplic_direct_get_hart_index(struct device *dev, u32 logical_index,
+ u32 *hart_index)
+{
+ const char *prop_hart_index = "riscv,hart-indexes";
+ struct device_node *np = to_of_node(dev->fwnode);
+
+ if (!np || !of_property_present(np, prop_hart_index)) {
+ *hart_index = logical_index;
+ return 0;
+ }
+
+ return of_property_read_u32_index(np, prop_hart_index, logical_index, hart_index);
+}
+
int aplic_direct_setup(struct device *dev, void __iomem *regs)
{
int i, j, rc, cpu, current_cpu, setup_count = 0;
@@ -265,8 +279,12 @@ int aplic_direct_setup(struct device *dev, void __iomem *regs)
cpumask_set_cpu(cpu, &direct->lmask);
idc = per_cpu_ptr(&aplic_idcs, cpu);
- idc->hart_index = i;
- idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
+ rc = aplic_direct_get_hart_index(dev, i, &idc->hart_index);
+ if (rc) {
+ dev_warn(dev, "hart index not found for IDC%d\n", i);
+ continue;
+ }
+ idc->regs = priv->regs + APLIC_IDC_BASE + idc->hart_index * APLIC_IDC_SIZE;
idc->direct = direct;
aplic_idc_set_delivery(idc, true);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 275df5005705..d9ae87808651 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -73,10 +73,16 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
static void imsic_handle_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- int err, cpu = smp_processor_id();
+ int cpu = smp_processor_id();
struct imsic_vector *vec;
unsigned long local_id;
+ /*
+ * Process pending local synchronization instead of waiting
+ * for per-CPU local timer to expire.
+ */
+ imsic_local_sync_all(false);
+
chained_irq_enter(chip, desc);
while ((local_id = csr_swap(CSR_TOPEI, 0))) {
@@ -97,9 +103,7 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq);
+ generic_handle_irq(vec->irq);
}
chained_irq_exit(chip, desc);
@@ -120,7 +124,7 @@ static int imsic_starting_cpu(unsigned int cpu)
* Interrupts identities might have been enabled/disabled while
* this CPU was not running so sync-up local enable/disable state.
*/
- imsic_local_sync_all();
+ imsic_local_sync_all(true);
/* Enable local interrupt delivery */
imsic_local_delivery(true);
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index c708780e8760..b8ae67c25b37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include "irq-msi-lib.h"
#include "irq-riscv-imsic-state.h"
static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
@@ -63,6 +64,11 @@ static int imsic_irq_retrigger(struct irq_data *d)
return 0;
}
+static void imsic_irq_ack(struct irq_data *d)
+{
+ irq_move_irq(d);
+}
+
static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
{
phys_addr_t msi_addr;
@@ -96,9 +102,23 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
bool force)
{
struct imsic_vector *old_vec, *new_vec;
- struct irq_data *pd = d->parent_data;
+ struct imsic_vector tmp_vec;
+
+ /*
+ * Requirements for the downstream irqdomains (or devices):
+ *
+ * 1) Downstream irqdomains (or devices) with atomic MSI update can
+ * happily do imsic_irq_set_affinity() in the process-context on
+ * any CPU so the irqchip of such irqdomains must not set the
+ * IRQCHIP_MOVE_DEFERRED flag.
+ *
+ * 2) Downstream irqdomains (or devices) with non-atomic MSI update
+ * must use imsic_irq_set_affinity() in nterrupt-context upon
+ * the next device interrupt so the irqchip of such irqdomains
+ * must set the IRQCHIP_MOVE_DEFERRED flag.
+ */
- old_vec = irq_data_get_irq_chip_data(pd);
+ old_vec = irq_data_get_irq_chip_data(d);
if (WARN_ON(!old_vec))
return -ENOENT;
@@ -111,34 +131,95 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
return -EBUSY;
/* Get a new vector on the desired set of CPUs */
- new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
+ new_vec = imsic_vector_alloc(old_vec->irq, mask_val);
if (!new_vec)
return -ENOSPC;
+ /*
+ * Device having non-atomic MSI update might see an intermediate
+ * state when changing target IMSIC vector from one CPU to another.
+ *
+ * To avoid losing interrupt to such intermediate state, do the
+ * following (just like x86 APIC):
+ *
+ * 1) First write a temporary IMSIC vector to the device which
+ * has MSI address same as the old IMSIC vector but MSI data
+ * matches the new IMSIC vector.
+ *
+ * 2) Next write the new IMSIC vector to the device.
+ *
+ * Based on the above, __imsic_local_sync() must check pending
+ * status of both old MSI data and new MSI data on the old CPU.
+ */
+ if (!irq_can_move_in_process_context(d) &&
+ new_vec->local_id != old_vec->local_id) {
+ /* Setup temporary vector */
+ tmp_vec.cpu = old_vec->cpu;
+ tmp_vec.local_id = new_vec->local_id;
+
+ /* Point device to the temporary vector */
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ }
+
/* Point device to the new vector */
- imsic_msi_update_msg(d, new_vec);
+ imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
/* Update irq descriptors with the new vector */
- pd->chip_data = new_vec;
+ d->chip_data = new_vec;
- /* Update effective affinity of parent irq data */
- irq_data_update_effective_affinity(pd, cpumask_of(new_vec->cpu));
+ /* Update effective affinity */
+ irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
/* Move state of the old vector to the new vector */
imsic_vector_move(old_vec, new_vec);
return IRQ_SET_MASK_OK_DONE;
}
+
+static void imsic_irq_force_complete_move(struct irq_data *d)
+{
+ struct imsic_vector *mvec, *vec = irq_data_get_irq_chip_data(d);
+ unsigned int cpu = smp_processor_id();
+
+ if (WARN_ON(!vec))
+ return;
+
+ /* Do nothing if there is no in-flight move */
+ mvec = imsic_vector_get_move(vec);
+ if (!mvec)
+ return;
+
+ /* Do nothing if the old IMSIC vector does not belong to current CPU */
+ if (mvec->cpu != cpu)
+ return;
+
+ /*
+ * The best we can do is force cleanup the old IMSIC vector.
+ *
+ * The challenges over here are same as x86 vector domain so
+ * refer to the comments in irq_force_complete_move() function
+ * implemented at arch/x86/kernel/apic/vector.c.
+ */
+
+ /* Force cleanup in-flight move */
+ pr_info("IRQ fixup: irq %d move in progress, old vector cpu %d local_id %d\n",
+ d->irq, mvec->cpu, mvec->local_id);
+ imsic_vector_force_move_cleanup(vec);
+}
#endif
static struct irq_chip imsic_irq_base_chip = {
- .name = "IMSIC",
- .irq_mask = imsic_irq_mask,
- .irq_unmask = imsic_irq_unmask,
- .irq_retrigger = imsic_irq_retrigger,
- .irq_compose_msi_msg = imsic_irq_compose_msg,
- .flags = IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
+ .name = "IMSIC",
+ .irq_mask = imsic_irq_mask,
+ .irq_unmask = imsic_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = imsic_irq_set_affinity,
+ .irq_force_complete_move = imsic_irq_force_complete_move,
+#endif
+ .irq_retrigger = imsic_irq_retrigger,
+ .irq_ack = imsic_irq_ack,
+ .irq_compose_msi_msg = imsic_irq_compose_msg,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
};
static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -155,7 +236,7 @@ static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return -ENOSPC;
irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
- handle_simple_irq, NULL, NULL);
+ handle_edge_irq, NULL, NULL);
irq_set_noprobe(virq);
irq_set_affinity(virq, cpu_online_mask);
irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
@@ -172,22 +253,6 @@ static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token)
-{
- const struct msi_parent_ops *ops = domain->msi_parent_ops;
- u32 busmask = BIT(bus_token);
-
- if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
- return 0;
-
- /* Handle pure domain searches */
- if (bus_token == ops->bus_select_token)
- return 1;
-
- return !!(ops->bus_select_mask & busmask);
-}
-
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
@@ -204,107 +269,37 @@ static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
static const struct irq_domain_ops imsic_base_domain_ops = {
.alloc = imsic_irq_domain_alloc,
.free = imsic_irq_domain_free,
- .select = imsic_irq_domain_select,
+ .select = msi_lib_irq_domain_select,
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
.debug_show = imsic_irq_debug_show,
#endif
};
-#ifdef CONFIG_RISCV_IMSIC_PCI
-
-static void imsic_pci_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void imsic_pci_unmask_irq(struct irq_data *d)
+static bool imsic_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
{
- irq_chip_unmask_parent(d);
- pci_msi_unmask_irq(d);
-}
-
-#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
-
-#else
-
-#define MATCH_PCI_MSI 0
-
-#endif
-
-static bool imsic_init_dev_msi_info(struct device *dev,
- struct irq_domain *domain,
- struct irq_domain *real_parent,
- struct msi_domain_info *info)
-{
- const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
-
- /* MSI parent domain specific settings */
- switch (real_parent->bus_token) {
- case DOMAIN_BUS_NEXUS:
- if (WARN_ON_ONCE(domain != real_parent))
- return false;
-#ifdef CONFIG_SMP
- info->chip->irq_set_affinity = imsic_irq_set_affinity;
-#endif
- break;
- default:
- WARN_ON_ONCE(1);
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
- }
- /* Is the target supported? */
switch (info->bus_token) {
-#ifdef CONFIG_RISCV_IMSIC_PCI
case DOMAIN_BUS_PCI_DEVICE_MSI:
case DOMAIN_BUS_PCI_DEVICE_MSIX:
- info->chip->irq_mask = imsic_pci_mask_irq;
- info->chip->irq_unmask = imsic_pci_unmask_irq;
- break;
-#endif
- case DOMAIN_BUS_DEVICE_MSI:
- /*
- * Per-device MSI should never have any MSI feature bits
- * set. It's sole purpose is to create a dumb interrupt
- * chip which has a device specific irq_write_msi_msg()
- * callback.
- */
- if (WARN_ON_ONCE(info->flags))
- return false;
-
- /* Core managed MSI descriptors */
- info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
- MSI_FLAG_FREE_MSI_DESCS;
- break;
- case DOMAIN_BUS_WIRED_TO_MSI:
+ info->chip->flags |= IRQCHIP_MOVE_DEFERRED;
break;
default:
- WARN_ON_ONCE(1);
- return false;
+ break;
}
- /* Use hierarchial chip operations re-trigger */
- info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
-
- /*
- * Mask out the domain specific MSI feature flags which are not
- * supported by the real parent.
- */
- info->flags &= pops->supported_flags;
-
- /* Enforce the required flags */
- info->flags |= pops->required_flags;
-
return true;
}
-#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
-
static const struct msi_parent_ops imsic_msi_parent_ops = {
.supported_flags = MSI_GENERIC_FLAGS_MASK |
MSI_FLAG_PCI_MSIX,
.required_flags = MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS,
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
.bus_select_token = DOMAIN_BUS_NEXUS,
.bus_select_mask = MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
.init_dev_msi_info = imsic_init_dev_msi_info,
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index b97e6cd89ed7..bdf5cd2037f2 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -124,10 +124,11 @@ void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend,
}
}
-static void __imsic_local_sync(struct imsic_local_priv *lpriv)
+static bool __imsic_local_sync(struct imsic_local_priv *lpriv)
{
- struct imsic_local_config *mlocal;
- struct imsic_vector *vec, *mvec;
+ struct imsic_local_config *tlocal, *mlocal;
+ struct imsic_vector *vec, *tvec, *mvec;
+ bool ret = true;
int i;
lockdep_assert_held(&lpriv->lock);
@@ -143,35 +144,97 @@ static void __imsic_local_sync(struct imsic_local_priv *lpriv)
__imsic_id_clear_enable(i);
/*
- * If the ID was being moved to a new ID on some other CPU
- * then we can get a MSI during the movement so check the
- * ID pending bit and re-trigger the new ID on other CPU
- * using MMIO write.
+ * Clear the previous vector pointer of the new vector only
+ * after the movement is complete on the old CPU.
*/
- mvec = READ_ONCE(vec->move);
- WRITE_ONCE(vec->move, NULL);
- if (mvec && mvec != vec) {
- if (__imsic_id_read_clear_pending(i)) {
+ mvec = READ_ONCE(vec->move_prev);
+ if (mvec) {
+ /*
+ * If the old vector has not been updated then
+ * try again in the next sync-up call.
+ */
+ if (READ_ONCE(mvec->move_next)) {
+ ret = false;
+ continue;
+ }
+
+ WRITE_ONCE(vec->move_prev, NULL);
+ }
+
+ /*
+ * If a vector was being moved to a new vector on some other
+ * CPU then we can get a MSI during the movement so check the
+ * ID pending bit and re-trigger the new ID on other CPU using
+ * MMIO write.
+ */
+ mvec = READ_ONCE(vec->move_next);
+ if (mvec) {
+ /*
+ * Devices having non-atomic MSI update might see
+ * an intermediate state so check both old ID and
+ * new ID for pending interrupts.
+ *
+ * For details, see imsic_irq_set_affinity().
+ */
+ tvec = vec->local_id == mvec->local_id ?
+ NULL : &lpriv->vectors[mvec->local_id];
+
+ if (tvec && !irq_can_move_in_process_context(irq_get_irq_data(vec->irq)) &&
+ __imsic_id_read_clear_pending(tvec->local_id)) {
+ /* Retrigger temporary vector if it was already in-use */
+ if (READ_ONCE(tvec->enable)) {
+ tlocal = per_cpu_ptr(imsic->global.local, tvec->cpu);
+ writel_relaxed(tvec->local_id, tlocal->msi_va);
+ }
+
+ mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
+ writel_relaxed(mvec->local_id, mlocal->msi_va);
+ }
+
+ if (__imsic_id_read_clear_pending(vec->local_id)) {
mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
writel_relaxed(mvec->local_id, mlocal->msi_va);
}
- imsic_vector_free(&lpriv->vectors[i]);
+ WRITE_ONCE(vec->move_next, NULL);
+ imsic_vector_free(vec);
}
skip:
bitmap_clear(lpriv->dirty_bitmap, i, 1);
}
+
+ return ret;
}
-void imsic_local_sync_all(void)
+#ifdef CONFIG_SMP
+static void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+ lockdep_assert_held(&lpriv->lock);
+
+ if (!timer_pending(&lpriv->timer)) {
+ lpriv->timer.expires = jiffies + 1;
+ add_timer_on(&lpriv->timer, smp_processor_id());
+ }
+}
+#else
+static inline void __imsic_local_timer_start(struct imsic_local_priv *lpriv)
+{
+}
+#endif
+
+void imsic_local_sync_all(bool force_all)
{
struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
unsigned long flags;
raw_spin_lock_irqsave(&lpriv->lock, flags);
- bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
- __imsic_local_sync(lpriv);
+
+ if (force_all)
+ bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
+ if (!__imsic_local_sync(lpriv))
+ __imsic_local_timer_start(lpriv);
+
raw_spin_unlock_irqrestore(&lpriv->lock, flags);
}
@@ -190,12 +253,7 @@ void imsic_local_delivery(bool enable)
#ifdef CONFIG_SMP
static void imsic_local_timer_callback(struct timer_list *timer)
{
- struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&lpriv->lock, flags);
- __imsic_local_sync(lpriv);
- raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+ imsic_local_sync_all(false);
}
static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
@@ -216,14 +274,11 @@ static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu
*/
if (cpu_online(cpu)) {
if (cpu == smp_processor_id()) {
- __imsic_local_sync(lpriv);
- return;
+ if (__imsic_local_sync(lpriv))
+ return;
}
- if (!timer_pending(&lpriv->timer)) {
- lpriv->timer.expires = jiffies + 1;
- add_timer_on(&lpriv->timer, cpu);
- }
+ __imsic_local_timer_start(lpriv);
}
}
#else
@@ -278,8 +333,26 @@ void imsic_vector_unmask(struct imsic_vector *vec)
raw_spin_unlock(&lpriv->lock);
}
-static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
- bool new_enable, struct imsic_vector *new_move)
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec)
+{
+ struct imsic_local_priv *lpriv;
+ struct imsic_vector *mvec;
+ unsigned long flags;
+
+ lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
+ raw_spin_lock_irqsave(&lpriv->lock, flags);
+
+ mvec = READ_ONCE(vec->move_prev);
+ WRITE_ONCE(vec->move_prev, NULL);
+ if (mvec)
+ imsic_vector_free(mvec);
+
+ raw_spin_unlock_irqrestore(&lpriv->lock, flags);
+}
+
+static bool imsic_vector_move_update(struct imsic_local_priv *lpriv,
+ struct imsic_vector *vec, bool is_old_vec,
+ bool new_enable, struct imsic_vector *move_vec)
{
unsigned long flags;
bool enabled;
@@ -289,7 +362,10 @@ static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsi
/* Update enable and move details */
enabled = READ_ONCE(vec->enable);
WRITE_ONCE(vec->enable, new_enable);
- WRITE_ONCE(vec->move, new_move);
+ if (is_old_vec)
+ WRITE_ONCE(vec->move_next, move_vec);
+ else
+ WRITE_ONCE(vec->move_prev, move_vec);
/* Mark the vector as dirty and synchronize */
bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
@@ -322,8 +398,8 @@ void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_ve
* interrupt on the old vector while device was being moved
* to the new vector.
*/
- enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
- imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
+ enabled = imsic_vector_move_update(old_lpriv, old_vec, true, false, new_vec);
+ imsic_vector_move_update(new_lpriv, new_vec, false, enabled, old_vec);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
@@ -368,7 +444,7 @@ struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int l
return &lpriv->vectors[local_id];
}
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
struct imsic_local_priv *lpriv;
@@ -384,9 +460,10 @@ struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
vec = &lpriv->vectors[local_id];
- vec->hwirq = hwirq;
+ vec->irq = irq;
vec->enable = false;
- vec->move = NULL;
+ vec->move_next = NULL;
+ vec->move_prev = NULL;
return vec;
}
@@ -396,7 +473,7 @@ void imsic_vector_free(struct imsic_vector *vec)
unsigned long flags;
raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
}
@@ -455,7 +532,7 @@ static int __init imsic_local_init(void)
vec = &lpriv->vectors[i];
vec->cpu = cpu;
vec->local_id = i;
- vec->hwirq = UINT_MAX;
+ vec->irq = 0;
}
}
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 391e44280827..3202ffa4e849 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -20,10 +20,11 @@ struct imsic_vector {
unsigned int cpu;
unsigned int local_id;
/* Details saved by driver in the vector */
- unsigned int hwirq;
+ unsigned int irq;
/* Details accessed using local lock held */
bool enable;
- struct imsic_vector *move;
+ struct imsic_vector *move_next;
+ struct imsic_vector *move_prev;
};
struct imsic_local_priv {
@@ -74,7 +75,7 @@ static inline void __imsic_id_clear_enable(unsigned long id)
__imsic_eix_update(id, 1, false, false);
}
-void imsic_local_sync_all(void);
+void imsic_local_sync_all(bool force_all);
void imsic_local_delivery(bool enable);
void imsic_vector_mask(struct imsic_vector *vec);
@@ -87,14 +88,15 @@ static inline bool imsic_vector_isenabled(struct imsic_vector *vec)
static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec)
{
- return READ_ONCE(vec->move);
+ return READ_ONCE(vec->move_prev);
}
+void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask);
+struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
new file mode 100644
index 000000000000..ee682e87eb8b
--- /dev/null
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SG2042 MSI Controller
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "irq-msi-lib.h"
+
+#define SG2042_MAX_MSI_VECTOR 32
+
+struct sg2042_msi_chipdata {
+ void __iomem *reg_clr; // clear reg, see TRM, 10.1.33, GP_INTR0_CLR
+
+ phys_addr_t doorbell_addr; // see TRM, 10.1.32, GP_INTR0_SET
+
+ u32 irq_first; // The vector number that MSIs starts
+ u32 num_irqs; // The number of vectors for MSIs
+
+ DECLARE_BITMAP(msi_map, SG2042_MAX_MSI_VECTOR);
+ struct mutex msi_map_lock; // lock for msi_map
+};
+
+static int sg2042_msi_allocate_hwirq(struct sg2042_msi_chipdata *data, int num_req)
+{
+ int first;
+
+ guard(mutex)(&data->msi_map_lock);
+ first = bitmap_find_free_region(data->msi_map, data->num_irqs,
+ get_count_order(num_req));
+ return first >= 0 ? first : -ENOSPC;
+}
+
+static void sg2042_msi_free_hwirq(struct sg2042_msi_chipdata *data, int hwirq, int num_req)
+{
+ guard(mutex)(&data->msi_map_lock);
+ bitmap_release_region(data->msi_map, hwirq, get_count_order(num_req));
+}
+
+static void sg2042_msi_irq_ack(struct irq_data *d)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+ int bit_off = d->hwirq;
+
+ writel(1 << bit_off, data->reg_clr);
+
+ irq_chip_ack_parent(d);
+}
+
+static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ msg->address_hi = upper_32_bits(data->doorbell_addr);
+ msg->address_lo = lower_32_bits(data->doorbell_addr);
+ msg->data = 1 << d->hwirq;
+}
+
+static const struct irq_chip sg2042_msi_middle_irq_chip = {
+ .name = "SG2042 MSI",
+ .irq_ack = sg2042_msi_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_compose_msi_msg = sg2042_msi_irq_compose_msi_msg,
+};
+
+static int sg2042_msi_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = data->irq_first + hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int sg2042_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct sg2042_msi_chipdata *data = domain->host_data;
+ int hwirq, err, i;
+
+ hwirq = sg2042_msi_allocate_hwirq(data, nr_irqs);
+ if (hwirq < 0)
+ return hwirq;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = sg2042_msi_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto err_hwirq;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &sg2042_msi_middle_irq_chip, data);
+ }
+
+ return 0;
+
+err_hwirq:
+ sg2042_msi_free_hwirq(data, hwirq, nr_irqs);
+ irq_domain_free_irqs_parent(domain, virq, i);
+
+ return err;
+}
+
+static void sg2042_msi_middle_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct sg2042_msi_chipdata *data = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ sg2042_msi_free_hwirq(data, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops sg2042_msi_middle_domain_ops = {
+ .alloc = sg2042_msi_middle_domain_alloc,
+ .free = sg2042_msi_middle_domain_free,
+ .select = msi_lib_irq_domain_select,
+};
+
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+
+#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
+
+static const struct msi_parent_ops sg2042_msi_parent_ops = {
+ .required_flags = SG2042_MSI_FLAGS_REQUIRED,
+ .supported_flags = SG2042_MSI_FLAGS_SUPPORTED,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_NEXUS,
+ .prefix = "SG2042-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int sg2042_msi_init_domains(struct sg2042_msi_chipdata *data,
+ struct irq_domain *plic_domain, struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct irq_domain *middle_domain;
+
+ middle_domain = irq_domain_create_hierarchy(plic_domain, 0, data->num_irqs, fwnode,
+ &sg2042_msi_middle_domain_ops, data);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSI middle domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
+
+ middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ middle_domain->msi_parent_ops = &sg2042_msi_parent_ops;
+
+ return 0;
+}
+
+static int sg2042_msi_probe(struct platform_device *pdev)
+{
+ struct fwnode_reference_args args = { };
+ struct sg2042_msi_chipdata *data;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *plic_domain;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(struct sg2042_msi_chipdata), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg_clr = devm_platform_ioremap_resource_byname(pdev, "clr");
+ if (IS_ERR(data->reg_clr)) {
+ dev_err(dev, "Failed to map clear register\n");
+ return PTR_ERR(data->reg_clr);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "doorbell");
+ if (!res) {
+ dev_err(dev, "Failed get resource from set\n");
+ return -EINVAL;
+ }
+ data->doorbell_addr = res->start;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges",
+ "#interrupt-cells", 0, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec base\n");
+ return ret;
+ }
+ fwnode_handle_put(args.fwnode);
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "msi-ranges", NULL,
+ args.nargs + 1, 0, &args);
+ if (ret) {
+ dev_err(dev, "Unable to parse MSI vec number\n");
+ return ret;
+ }
+
+ plic_domain = irq_find_matching_fwnode(args.fwnode, DOMAIN_BUS_ANY);
+ fwnode_handle_put(args.fwnode);
+ if (!plic_domain) {
+ pr_err("Failed to find the PLIC domain\n");
+ return -ENXIO;
+ }
+
+ data->irq_first = (u32)args.args[0];
+ data->num_irqs = (u32)args.args[args.nargs - 1];
+
+ mutex_init(&data->msi_map_lock);
+
+ return sg2042_msi_init_domains(data, plic_domain, dev);
+}
+
+static const struct of_device_id sg2042_msi_of_match[] = {
+ { .compatible = "sophgo,sg2042-msi" },
+ { }
+};
+
+static struct platform_driver sg2042_msi_driver = {
+ .driver = {
+ .name = "sg2042-msi",
+ .of_match_table = sg2042_msi_of_match,
+ },
+ .probe = sg2042_msi_probe,
+};
+builtin_platform_driver(sg2042_msi_driver);
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0b4312152024..01b0d8321728 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -48,32 +48,41 @@ enum {
SUNXI_SRC_TYPE_EDGE_RISING,
};
-struct sunxi_sc_nmi_reg_offs {
- u32 ctrl;
- u32 pend;
- u32 enable;
+struct sunxi_sc_nmi_data {
+ struct {
+ u32 ctrl;
+ u32 pend;
+ u32 enable;
+ } reg_offs;
+ u32 enable_val;
};
-static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
- .ctrl = SUN6I_NMI_CTRL,
- .pend = SUN6I_NMI_PENDING,
- .enable = SUN6I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun6i_data __initconst = {
+ .reg_offs.ctrl = SUN6I_NMI_CTRL,
+ .reg_offs.pend = SUN6I_NMI_PENDING,
+ .reg_offs.enable = SUN6I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
- .ctrl = SUN7I_NMI_CTRL,
- .pend = SUN7I_NMI_PENDING,
- .enable = SUN7I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun7i_data __initconst = {
+ .reg_offs.ctrl = SUN7I_NMI_CTRL,
+ .reg_offs.pend = SUN7I_NMI_PENDING,
+ .reg_offs.enable = SUN7I_NMI_ENABLE,
};
-static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
- .ctrl = SUN9I_NMI_CTRL,
- .pend = SUN9I_NMI_PENDING,
- .enable = SUN9I_NMI_ENABLE,
+static const struct sunxi_sc_nmi_data sun9i_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
};
-static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
- u32 val)
+static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = {
+ .reg_offs.ctrl = SUN9I_NMI_CTRL,
+ .reg_offs.pend = SUN9I_NMI_PENDING,
+ .reg_offs.enable = SUN9I_NMI_ENABLE,
+ .enable_val = BIT(31),
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val)
{
irq_reg_writel(gc, val, off);
}
@@ -143,15 +152,13 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
}
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
- const struct sunxi_sc_nmi_reg_offs *reg_offs)
+ const struct sunxi_sc_nmi_data *data)
{
- struct irq_domain *domain;
+ unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
- unsigned int irq;
- unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_domain *domain;
int ret;
-
domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
if (!domain) {
pr_err("Could not register interrupt domain.\n");
@@ -186,27 +193,28 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
+ gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED |
+ IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_SKIP_SET_WAKE;
- gc->chip_types[0].regs.ack = reg_offs->pend;
- gc->chip_types[0].regs.mask = reg_offs->enable;
- gc->chip_types[0].regs.type = reg_offs->ctrl;
+ gc->chip_types[0].regs.ack = data->reg_offs.pend;
+ gc->chip_types[0].regs.mask = data->reg_offs.enable;
+ gc->chip_types[0].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
- gc->chip_types[1].regs.ack = reg_offs->pend;
- gc->chip_types[1].regs.mask = reg_offs->enable;
- gc->chip_types[1].regs.type = reg_offs->ctrl;
+ gc->chip_types[1].regs.ack = data->reg_offs.pend;
+ gc->chip_types[1].regs.mask = data->reg_offs.enable;
+ gc->chip_types[1].regs.type = data->reg_offs.ctrl;
gc->chip_types[1].handler = handle_edge_irq;
/* Disable any active interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+ sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val);
/* Clear any pending NMI interrupts */
- sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
+ sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT);
irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
@@ -221,20 +229,27 @@ fail_irqd_remove:
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun6i_data);
}
IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun7i_data);
}
IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
static int __init sun9i_nmi_irq_init(struct device_node *node,
struct device_node *parent)
{
- return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
+ return sunxi_sc_nmi_irq_init(node, &sun9i_data);
}
IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
+
+static int __init sun55i_nmi_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
+}
+IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init);
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index 08cca128458c..fe223d363a5d 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -379,7 +379,7 @@ static void aw200xx_enable(const struct aw200xx *const chip)
static void aw200xx_disable(const struct aw200xx *const chip)
{
- return gpiod_set_value_cansleep(chip->hwen, 0);
+ gpiod_set_value_cansleep(chip->hwen, 0);
}
static int aw200xx_probe_get_display_rows(struct device *dev,
diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c
index b691c4886993..e894b3f9a0f4 100644
--- a/drivers/leds/leds-st1202.c
+++ b/drivers/leds/leds-st1202.c
@@ -261,8 +261,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
int err, reg;
for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
- struct led_init_data init_data = {};
-
err = of_property_read_u32(child, "reg", &reg);
if (err)
return dev_err_probe(dev, err, "Invalid register\n");
@@ -276,15 +274,6 @@ static int st1202_dt_init(struct st1202_chip *chip)
led->led_cdev.pattern_set = st1202_led_pattern_set;
led->led_cdev.pattern_clear = st1202_led_pattern_clear;
led->led_cdev.default_trigger = "pattern";
-
- init_data.fwnode = led->fwnode;
- init_data.devicename = "st1202";
- init_data.default_label = ":";
-
- err = devm_led_classdev_register_ext(dev, &led->led_cdev, &init_data);
- if (err < 0)
- return dev_err_probe(dev, err, "Failed to register LED class device\n");
-
led->led_cdev.brightness_set = st1202_brightness_set;
led->led_cdev.brightness_get = st1202_brightness_get;
}
@@ -368,6 +357,7 @@ static int st1202_probe(struct i2c_client *client)
return ret;
for (int i = 0; i < ST1202_MAX_LEDS; i++) {
+ struct led_init_data init_data = {};
led = &chip->leds[i];
led->chip = chip;
led->led_num = i;
@@ -384,6 +374,15 @@ static int st1202_probe(struct i2c_client *client)
if (ret < 0)
return dev_err_probe(&client->dev, ret,
"Failed to clear LED pattern\n");
+
+ init_data.fwnode = led->fwnode;
+ init_data.devicename = "st1202";
+ init_data.default_label = ":";
+
+ ret = devm_led_classdev_register_ext(&client->dev, &led->led_cdev, &init_data);
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register LED class device\n");
}
return 0;
diff --git a/drivers/leds/trigger/ledtrig-pattern.c b/drivers/leds/trigger/ledtrig-pattern.c
index aad48c2540fc..a594bd5e2233 100644
--- a/drivers/leds/trigger/ledtrig-pattern.c
+++ b/drivers/leds/trigger/ledtrig-pattern.c
@@ -483,8 +483,8 @@ static int pattern_trig_activate(struct led_classdev *led_cdev)
data->led_cdev = led_cdev;
led_set_trigger_data(led_cdev, data);
timer_setup(&data->timer, pattern_trig_timer_function, 0);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = pattern_trig_hrtimer_function;
+ hrtimer_setup(&data->hrtimer, pattern_trig_hrtimer_function, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
led_cdev->activated = true;
if (led_cdev->flags & LED_INIT_DEFAULT_TRIGGER) {
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index d3d26a2c9895..118beaf447aa 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -534,9 +534,7 @@ int mbox_controller_register(struct mbox_controller *mbox)
return -EINVAL;
}
- hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- mbox->poll_hrt.function = txdone_hrtimer;
+ hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
spin_lock_init(&mbox->poll_hrt_lock);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 731467d4ed10..b690905ab89f 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c45464b6576a..c8c1a00e7d80 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4811,23 +4811,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Cannot allocate bio set";
goto bad;
}
- r = bioset_integrity_create(&ic->recheck_bios, RECHECK_POOL_SIZE);
- if (r) {
- ti->error = "Cannot allocate bio integrity set";
- r = -ENOMEM;
- goto bad;
- }
r = bioset_init(&ic->recalc_bios, 1, 0, BIOSET_NEED_BVECS);
if (r) {
ti->error = "Cannot allocate bio set";
goto bad;
}
- r = bioset_integrity_create(&ic->recalc_bios, 1);
- if (r) {
- ti->error = "Cannot allocate bio integrity set";
- r = -ENOMEM;
- goto bad;
- }
}
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 0ef5203387b2..453803f1edf5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1081,15 +1081,9 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
__alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
if (bioset_init(&pools->io_bs, pool_size, io_front_pad, bioset_flags))
goto out_free_pools;
- if (mempool_needs_integrity &&
- bioset_integrity_create(&pools->io_bs, pool_size))
- goto out_free_pools;
init_bs:
if (bioset_init(&pools->bs, pool_size, front_pad, 0))
goto out_free_pools;
- if (mempool_needs_integrity &&
- bioset_integrity_create(&pools->bs, pool_size))
- goto out_free_pools;
t->mempools = pools;
return 0;
@@ -1250,6 +1244,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t)
profile->max_dun_bytes_supported = UINT_MAX;
memset(profile->modes_supported, 0xFF,
sizeof(profile->modes_supported));
+ profile->key_types_supported = ~0;
for (i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 23c09d22fcdb..44ec9b17cfd3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -29,8 +29,10 @@
#include <linux/buffer_head.h>
#include <linux/seq_file.h>
#include <trace/events/block.h>
+
#include "md.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
#define BITMAP_MAJOR_LO 3
/* version 4 insists the bitmap is in little-endian order
@@ -426,8 +428,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
- unsigned int bitmap_limit = (bitmap->storage.file_pages - pg_index) <<
- PAGE_SHIFT;
+ unsigned long num_pages = bitmap->storage.file_pages;
+ unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT;
loff_t sboff, offset = mddev->bitmap_info.offset;
sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE;
unsigned int size = PAGE_SIZE;
@@ -436,7 +438,7 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
/* we compare length (page numbers), not page offset. */
- if ((pg_index - store->sb_index) == store->file_pages - 1) {
+ if ((pg_index - store->sb_index) == num_pages - 1) {
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0)
@@ -942,7 +944,7 @@ out:
bmname(bitmap), err);
goto out_no_sb;
}
- bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
+ bitmap->cluster_slot = bitmap->mddev->cluster_ops->slot_number(bitmap->mddev);
goto re_read;
}
@@ -2021,7 +2023,7 @@ static void md_bitmap_free(void *data)
sysfs_put(bitmap->sysfs_can_clear);
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
- bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
+ bitmap->cluster_slot == bitmap->mddev->cluster_ops->slot_number(bitmap->mddev))
md_cluster_stop(bitmap->mddev);
/* Shouldn't be needed - but just in case.... */
@@ -2229,7 +2231,7 @@ static int bitmap_load(struct mddev *mddev)
mddev_create_serial_pool(mddev, rdev);
if (mddev_is_clustered(mddev))
- md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
+ mddev->cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
/* Clear out old bitmap info first: Either there is none, or we
* are resuming after someone else has possibly changed things,
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 6595f89becdb..94221d964d4f 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1166,7 +1166,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
struct dlm_lock_resource *bm_lockres;
char str[64];
- if (i == md_cluster_ops->slot_number(mddev))
+ if (i == slot_number(mddev))
continue;
bitmap = mddev->bitmap_ops->get_from_slot(mddev, i);
@@ -1216,7 +1216,7 @@ out:
*/
static int cluster_check_sync_size(struct mddev *mddev)
{
- int current_slot = md_cluster_ops->slot_number(mddev);
+ int current_slot = slot_number(mddev);
int node_num = mddev->bitmap_info.nodes;
struct dlm_lock_resource *bm_lockres;
struct md_bitmap_stats stats;
@@ -1612,7 +1612,14 @@ out:
return err;
}
-static const struct md_cluster_operations cluster_ops = {
+static struct md_cluster_operations cluster_ops = {
+ .head = {
+ .type = MD_CLUSTER,
+ .id = ID_CLUSTER,
+ .name = "cluster",
+ .owner = THIS_MODULE,
+ },
+
.join = join,
.leave = leave,
.slot_number = slot_number,
@@ -1642,13 +1649,12 @@ static int __init cluster_init(void)
{
pr_warn("md-cluster: support raid1 and raid10 (limited support)\n");
pr_info("Registering Cluster MD functions\n");
- register_md_cluster_operations(&cluster_ops, THIS_MODULE);
- return 0;
+ return register_md_submodule(&cluster_ops.head);
}
static void cluster_exit(void)
{
- unregister_md_cluster_operations();
+ unregister_md_submodule(&cluster_ops.head);
}
module_init(cluster_init);
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 470bf18ffde5..8fb06d853173 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -10,6 +10,8 @@ struct mddev;
struct md_rdev;
struct md_cluster_operations {
+ struct md_submodule_head head;
+
int (*join)(struct mddev *mddev, int nodes);
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
@@ -35,4 +37,8 @@ struct md_cluster_operations {
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
};
+extern int md_setup_cluster(struct mddev *mddev, int nodes);
+extern void md_cluster_stop(struct mddev *mddev);
+extern void md_reload_sb(struct mddev *mddev, int raid_disk);
+
#endif /* _MD_CLUSTER_H */
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 369aed044b40..5d9b08115375 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -5,7 +5,6 @@
*/
#include <linux/blkdev.h>
-#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -320,9 +319,13 @@ static void linear_quiesce(struct mddev *mddev, int state)
}
static struct md_personality linear_personality = {
- .name = "linear",
- .level = LEVEL_LINEAR,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_LINEAR,
+ .name = "linear",
+ .owner = THIS_MODULE,
+ },
+
.make_request = linear_make_request,
.run = linear_run,
.free = linear_free,
@@ -335,12 +338,12 @@ static struct md_personality linear_personality = {
static int __init linear_init(void)
{
- return register_md_personality(&linear_personality);
+ return register_md_submodule(&linear_personality.head);
}
static void linear_exit(void)
{
- unregister_md_personality(&linear_personality);
+ unregister_md_submodule(&linear_personality.head);
}
module_init(linear_init);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 30b3dbbce2d2..438e71e45c16 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -79,16 +79,10 @@ static const char *action_name[NR_SYNC_ACTIONS] = {
[ACTION_IDLE] = "idle",
};
-/* pers_list is a list of registered personalities protected by pers_lock. */
-static LIST_HEAD(pers_list);
-static DEFINE_SPINLOCK(pers_lock);
+static DEFINE_XARRAY(md_submodule);
static const struct kobj_type md_ktype;
-const struct md_cluster_operations *md_cluster_ops;
-EXPORT_SYMBOL(md_cluster_ops);
-static struct module *md_cluster_mod;
-
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
@@ -629,6 +623,12 @@ static void __mddev_put(struct mddev *mddev)
queue_work(md_misc_wq, &mddev->del_work);
}
+static void mddev_put_locked(struct mddev *mddev)
+{
+ if (atomic_dec_and_test(&mddev->active))
+ __mddev_put(mddev);
+}
+
void mddev_put(struct mddev *mddev)
{
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
@@ -888,16 +888,40 @@ struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
}
EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
-static struct md_personality *find_pers(int level, char *clevel)
+static struct md_personality *get_pers(int level, char *clevel)
{
- struct md_personality *pers;
- list_for_each_entry(pers, &pers_list, list) {
- if (level != LEVEL_NONE && pers->level == level)
- return pers;
- if (strcmp(pers->name, clevel)==0)
- return pers;
+ struct md_personality *ret = NULL;
+ struct md_submodule_head *head;
+ unsigned long i;
+
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type != MD_PERSONALITY)
+ continue;
+ if ((level != LEVEL_NONE && head->id == level) ||
+ !strcmp(head->name, clevel)) {
+ if (try_module_get(head->owner))
+ ret = (void *)head;
+ break;
+ }
}
- return NULL;
+ xa_unlock(&md_submodule);
+
+ if (!ret) {
+ if (level != LEVEL_NONE)
+ pr_warn("md: personality for level %d is not loaded!\n",
+ level);
+ else
+ pr_warn("md: personality for level %s is not loaded!\n",
+ clevel);
+ }
+
+ return ret;
+}
+
+static void put_pers(struct md_personality *pers)
+{
+ module_put(pers->head.owner);
}
/* return the offset of the super block in 512byte sectors */
@@ -1180,7 +1204,7 @@ int md_check_no_bitmap(struct mddev *mddev)
if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
return 0;
pr_warn("%s: bitmaps are not supported for %s\n",
- mdname(mddev), mddev->pers->name);
+ mdname(mddev), mddev->pers->head.name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
@@ -1748,7 +1772,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
count <<= sb->bblog_shift;
if (bb + 1 == 0)
break;
- if (badblocks_set(&rdev->badblocks, sector, count, 1))
+ if (!badblocks_set(&rdev->badblocks, sector, count, 1))
return -EINVAL;
}
} else if (sb->bblog_offset != 0)
@@ -2359,19 +2383,6 @@ int md_integrity_register(struct mddev *mddev)
return 0; /* shouldn't register */
pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
- if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
- (mddev->level != 1 && mddev->level != 10 &&
- bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) {
- /*
- * No need to handle the failure of bioset_integrity_create,
- * because the function is called by md_run() -> pers->run(),
- * md_run calls bioset_exit -> bioset_integrity_free in case
- * of failure case.
- */
- pr_err("md: failed to create integrity pool for %s\n",
- mdname(mddev));
- return -EINVAL;
- }
return 0;
}
EXPORT_SYMBOL(md_integrity_register);
@@ -2639,11 +2650,11 @@ repeat:
force_change = 1;
if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
nospares = 1;
- ret = md_cluster_ops->metadata_update_start(mddev);
+ ret = mddev->cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
- md_cluster_ops->metadata_update_cancel(mddev);
+ mddev->cluster_ops->metadata_update_cancel(mddev);
bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
BIT(MD_SB_CHANGE_DEVS) |
BIT(MD_SB_CHANGE_CLEAN));
@@ -2783,7 +2794,7 @@ rewrite:
/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
if (mddev_is_clustered(mddev) && ret == 0)
- md_cluster_ops->metadata_update_finish(mddev);
+ mddev->cluster_ops->metadata_update_finish(mddev);
if (mddev->in_sync != sync_req ||
!bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
@@ -2942,7 +2953,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
else {
err = 0;
if (mddev_is_clustered(mddev))
- err = md_cluster_ops->remove_disk(mddev, rdev);
+ err = mddev->cluster_ops->remove_disk(mddev, rdev);
if (err == 0) {
md_kick_rdev_from_array(rdev);
@@ -3052,7 +3063,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* by this node eventually
*/
if (!mddev_is_clustered(rdev->mddev) ||
- (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ (err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) {
clear_bit(Faulty, &rdev->flags);
err = add_bound_rdev(rdev);
}
@@ -3860,7 +3871,7 @@ level_show(struct mddev *mddev, char *page)
spin_lock(&mddev->lock);
p = mddev->pers;
if (p)
- ret = sprintf(page, "%s\n", p->name);
+ ret = sprintf(page, "%s\n", p->head.name);
else if (mddev->clevel[0])
ret = sprintf(page, "%s\n", mddev->clevel);
else if (mddev->level != LEVEL_NONE)
@@ -3917,7 +3928,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
rv = -EINVAL;
if (!mddev->pers->quiesce) {
pr_warn("md: %s: %s does not support online personality change\n",
- mdname(mddev), mddev->pers->name);
+ mdname(mddev), mddev->pers->head.name);
goto out_unlock;
}
@@ -3931,24 +3942,20 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (request_module("md-%s", clevel) != 0)
request_module("md-level-%s", clevel);
- spin_lock(&pers_lock);
- pers = find_pers(level, clevel);
- if (!pers || !try_module_get(pers->owner)) {
- spin_unlock(&pers_lock);
- pr_warn("md: personality %s not loaded\n", clevel);
+ pers = get_pers(level, clevel);
+ if (!pers) {
rv = -EINVAL;
goto out_unlock;
}
- spin_unlock(&pers_lock);
if (pers == mddev->pers) {
/* Nothing to do! */
- module_put(pers->owner);
+ put_pers(pers);
rv = len;
goto out_unlock;
}
if (!pers->takeover) {
- module_put(pers->owner);
+ put_pers(pers);
pr_warn("md: %s: %s does not support personality takeover\n",
mdname(mddev), clevel);
rv = -EINVAL;
@@ -3969,7 +3976,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0;
mddev->reshape_backwards = 0;
- module_put(pers->owner);
+ put_pers(pers);
pr_warn("md: %s: %s would not accept array\n",
mdname(mddev), clevel);
rv = PTR_ERR(priv);
@@ -3984,7 +3991,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
oldpriv = mddev->private;
mddev->pers = pers;
mddev->private = priv;
- strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
@@ -4026,7 +4033,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->to_remove = &md_redundancy_group;
}
- module_put(oldpers->owner);
+ put_pers(oldpers);
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk < 0)
@@ -5584,7 +5591,7 @@ __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
{
- if (mddev->pers == NULL || (mddev->pers->level != 1))
+ if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1))
return sprintf(page, "n/a\n");
else
return sprintf(page, "%d\n", mddev->serialize_policy);
@@ -5610,7 +5617,7 @@ serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
err = mddev_suspend_and_lock(mddev);
if (err)
return err;
- if (mddev->pers == NULL || (mddev->pers->level != 1)) {
+ if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) {
pr_err("md: serialize_policy is only effective for raid1\n");
err = -EINVAL;
goto unlock;
@@ -6096,30 +6103,21 @@ int md_run(struct mddev *mddev)
goto exit_sync_set;
}
- spin_lock(&pers_lock);
- pers = find_pers(mddev->level, mddev->clevel);
- if (!pers || !try_module_get(pers->owner)) {
- spin_unlock(&pers_lock);
- if (mddev->level != LEVEL_NONE)
- pr_warn("md: personality for level %d is not loaded!\n",
- mddev->level);
- else
- pr_warn("md: personality for level %s is not loaded!\n",
- mddev->clevel);
+ pers = get_pers(mddev->level, mddev->clevel);
+ if (!pers) {
err = -EINVAL;
goto abort;
}
- spin_unlock(&pers_lock);
- if (mddev->level != pers->level) {
- mddev->level = pers->level;
- mddev->new_level = pers->level;
+ if (mddev->level != pers->head.id) {
+ mddev->level = pers->head.id;
+ mddev->new_level = pers->head.id;
}
- strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
+ strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel));
if (mddev->reshape_position != MaxSector &&
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
- module_put(pers->owner);
+ put_pers(pers);
err = -EINVAL;
goto abort;
}
@@ -6246,7 +6244,7 @@ bitmap_abort:
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
- module_put(pers->owner);
+ put_pers(pers);
mddev->bitmap_ops->destroy(mddev);
abort:
bioset_exit(&mddev->io_clone_set);
@@ -6467,7 +6465,7 @@ static void __md_stop(struct mddev *mddev)
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
- module_put(pers->owner);
+ put_pers(pers);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
bioset_exit(&mddev->bio_set);
@@ -6983,7 +6981,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
set_bit(Candidate, &rdev->flags);
else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
/* --add initiated by this node */
- err = md_cluster_ops->add_new_disk(mddev, rdev);
+ err = mddev->cluster_ops->add_new_disk(mddev, rdev);
if (err) {
export_rdev(rdev, mddev);
return err;
@@ -7000,14 +6998,14 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (mddev_is_clustered(mddev)) {
if (info->state & (1 << MD_DISK_CANDIDATE)) {
if (!err) {
- err = md_cluster_ops->new_disk_ack(mddev,
- err == 0);
+ err = mddev->cluster_ops->new_disk_ack(
+ mddev, err == 0);
if (err)
md_kick_rdev_from_array(rdev);
}
} else {
if (err)
- md_cluster_ops->add_new_disk_cancel(mddev);
+ mddev->cluster_ops->add_new_disk_cancel(mddev);
else
err = add_bound_rdev(rdev);
}
@@ -7087,10 +7085,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
goto busy;
kick_rdev:
- if (mddev_is_clustered(mddev)) {
- if (md_cluster_ops->remove_disk(mddev, rdev))
- goto busy;
- }
+ if (mddev_is_clustered(mddev) &&
+ mddev->cluster_ops->remove_disk(mddev, rdev))
+ goto busy;
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -7393,7 +7390,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
rv = mddev->pers->resize(mddev, num_sectors);
if (!rv) {
if (mddev_is_clustered(mddev))
- md_cluster_ops->update_size(mddev, old_dev_sectors);
+ mddev->cluster_ops->update_size(mddev, old_dev_sectors);
else if (!mddev_is_dm(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
@@ -7441,6 +7438,28 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return rv;
}
+static int get_cluster_ops(struct mddev *mddev)
+{
+ xa_lock(&md_submodule);
+ mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER);
+ if (mddev->cluster_ops &&
+ !try_module_get(mddev->cluster_ops->head.owner))
+ mddev->cluster_ops = NULL;
+ xa_unlock(&md_submodule);
+
+ return mddev->cluster_ops == NULL ? -ENOENT : 0;
+}
+
+static void put_cluster_ops(struct mddev *mddev)
+{
+ if (!mddev->cluster_ops)
+ return;
+
+ mddev->cluster_ops->leave(mddev);
+ module_put(mddev->cluster_ops->head.owner);
+ mddev->cluster_ops = NULL;
+}
+
/*
* update_array_info is used to change the configuration of an
* on-line array.
@@ -7549,16 +7568,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
if (mddev->bitmap_info.nodes) {
/* hold PW on all the bitmap lock */
- if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
+ if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) {
pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
rv = -EPERM;
- md_cluster_ops->unlock_all_bitmaps(mddev);
+ mddev->cluster_ops->unlock_all_bitmaps(mddev);
goto err;
}
mddev->bitmap_info.nodes = 0;
- md_cluster_ops->leave(mddev);
- module_put(md_cluster_mod);
+ put_cluster_ops(mddev);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
mddev->bitmap_ops->destroy(mddev);
@@ -7842,7 +7860,7 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
case CLUSTERED_DISK_NACK:
if (mddev_is_clustered(mddev))
- md_cluster_ops->new_disk_ack(mddev, false);
+ mddev->cluster_ops->new_disk_ack(mddev, false);
else
err = -EINVAL;
goto unlock;
@@ -8124,7 +8142,8 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
return;
mddev->pers->error_handler(mddev, rdev);
- if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
+ if (mddev->pers->head.id == ID_RAID0 ||
+ mddev->pers->head.id == ID_LINEAR)
return;
if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
@@ -8162,14 +8181,17 @@ static void status_unused(struct seq_file *seq)
static void status_personalities(struct seq_file *seq)
{
- struct md_personality *pers;
+ struct md_submodule_head *head;
+ unsigned long i;
seq_puts(seq, "Personalities : ");
- spin_lock(&pers_lock);
- list_for_each_entry(pers, &pers_list, list)
- seq_printf(seq, "[%s] ", pers->name);
- spin_unlock(&pers_lock);
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head)
+ if (head->type == MD_PERSONALITY)
+ seq_printf(seq, "[%s] ", head->name);
+ xa_unlock(&md_submodule);
+
seq_puts(seq, "\n");
}
@@ -8392,7 +8414,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, " (read-only)");
if (mddev->ro == MD_AUTO_READ)
seq_printf(seq, " (auto-read-only)");
- seq_printf(seq, " %s", mddev->pers->name);
+ seq_printf(seq, " %s", mddev->pers->head.name);
} else {
seq_printf(seq, "inactive");
}
@@ -8461,9 +8483,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
status_unused(seq);
- if (atomic_dec_and_test(&mddev->active))
- __mddev_put(mddev);
-
+ mddev_put_locked(mddev);
return 0;
}
@@ -8514,67 +8534,34 @@ static const struct proc_ops mdstat_proc_ops = {
.proc_poll = mdstat_poll,
};
-int register_md_personality(struct md_personality *p)
-{
- pr_debug("md: %s personality registered for level %d\n",
- p->name, p->level);
- spin_lock(&pers_lock);
- list_add_tail(&p->list, &pers_list);
- spin_unlock(&pers_lock);
- return 0;
-}
-EXPORT_SYMBOL(register_md_personality);
-
-int unregister_md_personality(struct md_personality *p)
+int register_md_submodule(struct md_submodule_head *msh)
{
- pr_debug("md: %s personality unregistered\n", p->name);
- spin_lock(&pers_lock);
- list_del_init(&p->list);
- spin_unlock(&pers_lock);
- return 0;
+ return xa_insert(&md_submodule, msh->id, msh, GFP_KERNEL);
}
-EXPORT_SYMBOL(unregister_md_personality);
+EXPORT_SYMBOL_GPL(register_md_submodule);
-int register_md_cluster_operations(const struct md_cluster_operations *ops,
- struct module *module)
+void unregister_md_submodule(struct md_submodule_head *msh)
{
- int ret = 0;
- spin_lock(&pers_lock);
- if (md_cluster_ops != NULL)
- ret = -EALREADY;
- else {
- md_cluster_ops = ops;
- md_cluster_mod = module;
- }
- spin_unlock(&pers_lock);
- return ret;
+ xa_erase(&md_submodule, msh->id);
}
-EXPORT_SYMBOL(register_md_cluster_operations);
-
-int unregister_md_cluster_operations(void)
-{
- spin_lock(&pers_lock);
- md_cluster_ops = NULL;
- spin_unlock(&pers_lock);
- return 0;
-}
-EXPORT_SYMBOL(unregister_md_cluster_operations);
+EXPORT_SYMBOL_GPL(unregister_md_submodule);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
- int ret;
- if (!md_cluster_ops)
+ int ret = get_cluster_ops(mddev);
+
+ if (ret) {
request_module("md-cluster");
- spin_lock(&pers_lock);
+ ret = get_cluster_ops(mddev);
+ }
+
/* ensure module won't be unloaded */
- if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ if (ret) {
pr_warn("can't find md-cluster module or get its reference.\n");
- spin_unlock(&pers_lock);
- return -ENOENT;
+ return ret;
}
- spin_unlock(&pers_lock);
- ret = md_cluster_ops->join(mddev, nodes);
+ ret = mddev->cluster_ops->join(mddev, nodes);
if (!ret)
mddev->safemode_delay = 0;
return ret;
@@ -8582,10 +8569,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
void md_cluster_stop(struct mddev *mddev)
{
- if (!md_cluster_ops)
- return;
- md_cluster_ops->leave(mddev);
- module_put(md_cluster_mod);
+ put_cluster_ops(mddev);
}
static int is_mddev_idle(struct mddev *mddev, int init)
@@ -8978,7 +8962,7 @@ void md_do_sync(struct md_thread *thread)
}
if (mddev_is_clustered(mddev)) {
- ret = md_cluster_ops->resync_start(mddev);
+ ret = mddev->cluster_ops->resync_start(mddev);
if (ret)
goto skip;
@@ -9005,7 +8989,7 @@ void md_do_sync(struct md_thread *thread)
*
*/
if (mddev_is_clustered(mddev))
- md_cluster_ops->resync_start_notify(mddev);
+ mddev->cluster_ops->resync_start_notify(mddev);
do {
int mddev2_minor = -1;
mddev->curr_resync = MD_RESYNC_DELAYED;
@@ -9460,6 +9444,13 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
return true;
}
+ /* Check if resync is in progress. */
+ if (mddev->recovery_cp < MaxSector) {
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ return true;
+ }
+
/*
* Remove any failed drives, then add spares if possible. Spares are
* also removed and re-added, to allow the personality to fail the
@@ -9476,13 +9467,6 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
return true;
}
- /* Check if recovery is in progress. */
- if (mddev->recovery_cp < MaxSector) {
- set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
- clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- return true;
- }
-
/* Delay to choose resync/check/repair in md_do_sync(). */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
return true;
@@ -9789,7 +9773,7 @@ void md_reap_sync_thread(struct mddev *mddev)
* call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
* clustered raid */
if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
- md_cluster_ops->resync_finish(mddev);
+ mddev->cluster_ops->resync_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -9797,13 +9781,13 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
/*
- * We call md_cluster_ops->update_size here because sync_size could
+ * We call mddev->cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
* so it is time to update size across cluster.
*/
if (mddev_is_clustered(mddev) && is_reshaped
&& !test_bit(MD_CLOSING, &mddev->flags))
- md_cluster_ops->update_size(mddev, old_dev_sectors);
+ mddev->cluster_ops->update_size(mddev, old_dev_sectors);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9841,12 +9825,11 @@ EXPORT_SYMBOL(md_finish_reshape);
/* Bad block management */
-/* Returns 1 on success, 0 on failure */
-int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new)
+/* Returns true on success, false on failure */
+bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new)
{
struct mddev *mddev = rdev->mddev;
- int rv;
/*
* Recording new badblocks for faulty rdev will force unnecessary
@@ -9856,50 +9839,51 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
* avoid it.
*/
if (test_bit(Faulty, &rdev->flags))
- return 1;
+ return true;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
- if (rv == 0) {
- /* Make sure they get written out promptly */
- if (test_bit(ExternalBbl, &rdev->flags))
- sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
- sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_mask_bits(&mddev->sb_flags, 0,
- BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
- md_wakeup_thread(rdev->mddev->thread);
- return 1;
- } else
- return 0;
+
+ if (!badblocks_set(&rdev->badblocks, s, sectors, 0))
+ return false;
+
+ /* Make sure they get written out promptly */
+ if (test_bit(ExternalBbl, &rdev->flags))
+ sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+ set_mask_bits(&mddev->sb_flags, 0,
+ BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
+ md_wakeup_thread(rdev->mddev->thread);
+ return true;
}
EXPORT_SYMBOL_GPL(rdev_set_badblocks);
-int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new)
+void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new)
{
- int rv;
if (is_new)
s += rdev->new_data_offset;
else
s += rdev->data_offset;
- rv = badblocks_clear(&rdev->badblocks, s, sectors);
- if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+
+ if (!badblocks_clear(&rdev->badblocks, s, sectors))
+ return;
+
+ if (test_bit(ExternalBbl, &rdev->flags))
sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
- return rv;
}
EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
- struct mddev *mddev, *n;
+ struct mddev *mddev;
int need_delay = 0;
spin_lock(&all_mddevs_lock);
- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
@@ -9911,8 +9895,8 @@ static int md_notify_reboot(struct notifier_block *this,
mddev_unlock(mddev);
}
need_delay = 1;
- mddev_put(mddev);
spin_lock(&all_mddevs_lock);
+ mddev_put_locked(mddev);
}
spin_unlock(&all_mddevs_lock);
@@ -10029,7 +10013,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
!(le32_to_cpu(sb->feature_map) &
MD_FEATURE_RESHAPE_ACTIVE) &&
- !md_cluster_ops->resync_status_get(mddev)) {
+ !mddev->cluster_ops->resync_status_get(mddev)) {
/*
* -1 to make raid1_add_disk() set conf->fullsync
* to 1. This could avoid skipping sync when the
@@ -10245,7 +10229,7 @@ void md_autostart_arrays(int part)
static __exit void md_exit(void)
{
- struct mddev *mddev, *n;
+ struct mddev *mddev;
int delay = 1;
unregister_blkdev(MD_MAJOR,"md");
@@ -10266,7 +10250,7 @@ static __exit void md_exit(void)
remove_proc_entry("mdstat", NULL);
spin_lock(&all_mddevs_lock);
- list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
+ list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
if (!mddev_get(mddev))
continue;
spin_unlock(&all_mddevs_lock);
@@ -10278,8 +10262,8 @@ static __exit void md_exit(void)
* the mddev for destruction by a workqueue, and the
* destroy_workqueue() below will wait for that to complete.
*/
- mddev_put(mddev);
spin_lock(&all_mddevs_lock);
+ mddev_put_locked(mddev);
}
spin_unlock(&all_mddevs_lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index def808064ad8..1cf00a04bcdd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -18,11 +18,37 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/raid/md_u.h>
#include <trace/events/block.h>
-#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
+enum md_submodule_type {
+ MD_PERSONALITY = 0,
+ MD_CLUSTER,
+ MD_BITMAP, /* TODO */
+};
+
+enum md_submodule_id {
+ ID_LINEAR = LEVEL_LINEAR,
+ ID_RAID0 = 0,
+ ID_RAID1 = 1,
+ ID_RAID4 = 4,
+ ID_RAID5 = 5,
+ ID_RAID6 = 6,
+ ID_RAID10 = 10,
+ ID_CLUSTER,
+ ID_BITMAP, /* TODO */
+ ID_LLBITMAP, /* TODO */
+};
+
+struct md_submodule_head {
+ enum md_submodule_type type;
+ enum md_submodule_id id;
+ const char *name;
+ struct module *owner;
+};
+
/*
* These flags should really be called "NO_RETRY" rather than
* "FAILFAST" because they don't make any promise about time lapse,
@@ -266,8 +292,8 @@ enum flag_bits {
Nonrot, /* non-rotational device (SSD) */
};
-static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors)
+static inline int is_badblock(struct md_rdev *rdev, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors)
{
if (unlikely(rdev->badblocks.count)) {
int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
@@ -284,16 +310,17 @@ static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
int sectors)
{
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
}
-extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new);
-extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
- int is_new);
+extern bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new);
+extern void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
+ int is_new);
struct md_cluster_info;
+struct md_cluster_operations;
/**
* enum mddev_flags - md device flags.
@@ -576,6 +603,7 @@ struct mddev {
mempool_t *serial_info_pool;
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
+ struct md_cluster_operations *cluster_ops;
unsigned int good_device_nr; /* good device num within cluster raid */
unsigned int noio_flag; /* for memalloc scope API */
@@ -699,10 +727,8 @@ static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
struct md_personality
{
- char *name;
- int level;
- struct list_head list;
- struct module *owner;
+ struct md_submodule_head head;
+
bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio);
/*
* start up works that do NOT require md_thread. tasks that
@@ -843,13 +869,9 @@ static inline void safe_put_page(struct page *p)
if (p) put_page(p);
}
-extern int register_md_personality(struct md_personality *p);
-extern int unregister_md_personality(struct md_personality *p);
-extern int register_md_cluster_operations(const struct md_cluster_operations *ops,
- struct module *module);
-extern int unregister_md_cluster_operations(void);
-extern int md_setup_cluster(struct mddev *mddev, int nodes);
-extern void md_cluster_stop(struct mddev *mddev);
+int register_md_submodule(struct md_submodule_head *msh);
+void unregister_md_submodule(struct md_submodule_head *msh);
+
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
@@ -906,7 +928,6 @@ extern void md_idle_sync_thread(struct mddev *mddev);
extern void md_frozen_sync_thread(struct mddev *mddev);
extern void md_unfrozen_sync_thread(struct mddev *mddev);
-extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev);
extern void mddev_destroy_serial_pool(struct mddev *mddev,
@@ -928,7 +949,6 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
-extern const struct md_cluster_operations *md_cluster_ops;
static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 70bcc3cdf2cd..d8f639f4ae12 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -809,9 +809,13 @@ static void raid0_quiesce(struct mddev *mddev, int quiesce)
static struct md_personality raid0_personality=
{
- .name = "raid0",
- .level = 0,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID0,
+ .name = "raid0",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid0_make_request,
.run = raid0_run,
.free = raid0_free,
@@ -822,14 +826,14 @@ static struct md_personality raid0_personality=
.error_handler = raid0_error,
};
-static int __init raid0_init (void)
+static int __init raid0_init(void)
{
- return register_md_personality (&raid0_personality);
+ return register_md_submodule(&raid0_personality.head);
}
-static void raid0_exit (void)
+static void __exit raid0_exit(void)
{
- unregister_md_personality (&raid0_personality);
+ unregister_md_submodule(&raid0_personality.head);
}
module_init(raid0_init);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 4378d3250bd7..c7efd8aab675 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -247,7 +247,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
sector_t this_sector, int *len)
{
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
/* no bad block overlap */
if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
@@ -287,8 +287,8 @@ static inline bool raid1_should_read_first(struct mddev *mddev,
return true;
if (mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, READ, this_sector,
- this_sector + len))
+ mddev->cluster_ops->area_resyncing(mddev, READ, this_sector,
+ this_sector + len))
return true;
return false;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 10ea3af40991..0efc03cea24e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -36,6 +36,7 @@
#include "md.h"
#include "raid1.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
#define UNSUPPORTED_MDDEV_FLAGS \
((1L << MD_HAS_JOURNAL) | \
@@ -45,6 +46,7 @@
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
+static void raid1_free(struct mddev *mddev, void *priv);
#define RAID_1_10_NAME "raid1"
#include "raid1-10.c"
@@ -1315,8 +1317,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
struct bio *read_bio;
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
int rdisk, error;
bool r1bio_existed = !!r1_bio;
@@ -1404,7 +1404,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset;
read_bio->bi_end_io = raid1_end_read_request;
- read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &mirror->rdev->flags) &&
test_bit(R1BIO_FailFast, &r1_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
@@ -1467,7 +1466,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bool is_discard = (bio_op(bio) == REQ_OP_DISCARD);
if (mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, WRITE,
+ mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
DEFINE_WAIT(w);
@@ -1478,7 +1477,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
- if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector,
bio_end_sector(bio)))
break;
@@ -1537,7 +1536,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&rdev->nr_pending);
if (test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
int is_bad;
is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
@@ -1653,8 +1652,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
mbio->bi_end_io = raid1_end_write_request;
- mbio->bi_opf = bio_op(bio) |
- (bio->bi_opf & (REQ_SYNC | REQ_FUA | REQ_ATOMIC));
if (test_bit(FailFast, &rdev->flags) &&
!test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
@@ -2486,7 +2483,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
}
}
-static int narrow_write_error(struct r1bio *r1_bio, int i)
+static bool narrow_write_error(struct r1bio *r1_bio, int i)
{
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
@@ -2507,10 +2504,10 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
sector_t sector;
int sectors;
int sect_to_write = r1_bio->sectors;
- int ok = 1;
+ bool ok = true;
if (rdev->badblocks.shift < 0)
- return 0;
+ return false;
block_sectors = roundup(1 << rdev->badblocks.shift,
bdev_logical_block_size(rdev->bdev) >> 9);
@@ -2886,7 +2883,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
/* may need to read from here */
sector_t first_bad = MaxSector;
- int bad_sectors;
+ sector_t bad_sectors;
if (is_badblock(rdev, sector_nr, good_sectors,
&first_bad, &bad_sectors)) {
@@ -3038,9 +3035,9 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
conf->cluster_sync_low = mddev->curr_resync_completed;
conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
/* Send resync message */
- md_cluster_ops->resync_info_update(mddev,
- conf->cluster_sync_low,
- conf->cluster_sync_high);
+ mddev->cluster_ops->resync_info_update(mddev,
+ conf->cluster_sync_low,
+ conf->cluster_sync_high);
}
/* For a user-requested sync, we read all readable devices and do a
@@ -3256,8 +3253,11 @@ static int raid1_run(struct mddev *mddev)
if (!mddev_is_dm(mddev)) {
ret = raid1_set_limits(mddev);
- if (ret)
+ if (ret) {
+ if (!mddev->private)
+ raid1_free(mddev, conf);
return ret;
+ }
}
mddev->degraded = 0;
@@ -3271,6 +3271,8 @@ static int raid1_run(struct mddev *mddev)
*/
if (conf->raid_disks - mddev->degraded < 1) {
md_unregister_thread(mddev, &conf->thread);
+ if (!mddev->private)
+ raid1_free(mddev, conf);
return -EINVAL;
}
@@ -3491,9 +3493,13 @@ static void *raid1_takeover(struct mddev *mddev)
static struct md_personality raid1_personality =
{
- .name = "raid1",
- .level = 1,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID1,
+ .name = "raid1",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid1_make_request,
.run = raid1_run,
.free = raid1_free,
@@ -3510,18 +3516,18 @@ static struct md_personality raid1_personality =
.takeover = raid1_takeover,
};
-static int __init raid_init(void)
+static int __init raid1_init(void)
{
- return register_md_personality(&raid1_personality);
+ return register_md_submodule(&raid1_personality.head);
}
-static void raid_exit(void)
+static void __exit raid1_exit(void)
{
- unregister_md_personality(&raid1_personality);
+ unregister_md_submodule(&raid1_personality.head);
}
-module_init(raid_init);
-module_exit(raid_exit);
+module_init(raid1_init);
+module_exit(raid1_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 15b9ae5bf84d..846c5f29486e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -24,6 +24,7 @@
#include "raid10.h"
#include "raid0.h"
#include "md-bitmap.h"
+#include "md-cluster.h"
/*
* RAID10 provides a combination of RAID0 and RAID1 functionality.
@@ -747,7 +748,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
for (slot = 0; slot < conf->copies ; slot++) {
sector_t first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
sector_t dev_sector;
unsigned int pending;
bool nonrot;
@@ -1146,8 +1147,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
{
struct r10conf *conf = mddev->private;
struct bio *read_bio;
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
struct md_rdev *rdev;
char b[BDEVNAME_SIZE];
@@ -1228,7 +1227,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev);
read_bio->bi_end_io = raid10_end_read_request;
- read_bio->bi_opf = op | do_sync;
if (test_bit(FailFast, &rdev->flags) &&
test_bit(R10BIO_FailFast, &r10_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
@@ -1247,10 +1245,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
struct bio *bio, bool replacement,
int n_copy)
{
- const enum req_op op = bio_op(bio);
- const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
- const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
- const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC;
unsigned long flags;
struct r10conf *conf = mddev->private;
struct md_rdev *rdev;
@@ -1269,7 +1263,6 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev));
mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_opf = op | do_sync | do_fua | do_atomic;
if (!replacement && test_bit(FailFast,
&conf->mirrors[devnum].rdev->flags)
&& enough(conf, devnum))
@@ -1355,9 +1348,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int error;
if ((mddev_is_clustered(mddev) &&
- md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector,
- bio_end_sector(bio)))) {
+ mddev->cluster_ops->area_resyncing(mddev, WRITE,
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio)))) {
DEFINE_WAIT(w);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
@@ -1367,7 +1360,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
- if (!md_cluster_ops->area_resyncing(mddev, WRITE,
+ if (!mddev->cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio)))
break;
schedule();
@@ -1438,7 +1431,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
- int bad_sectors;
+ sector_t bad_sectors;
int is_bad;
is_bad = is_badblock(rdev, dev_sector, max_sectors,
@@ -1631,11 +1624,10 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return -EAGAIN;
- if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
+ if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
bio_wouldblock_error(bio);
return 0;
}
- wait_barrier(conf, false);
/*
* Check reshape again to avoid reshape happens after checking
@@ -2786,7 +2778,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
}
}
-static int narrow_write_error(struct r10bio *r10_bio, int i)
+static bool narrow_write_error(struct r10bio *r10_bio, int i)
{
struct bio *bio = r10_bio->master_bio;
struct mddev *mddev = r10_bio->mddev;
@@ -2807,10 +2799,10 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
sector_t sector;
int sectors;
int sect_to_write = r10_bio->sectors;
- int ok = 1;
+ bool ok = true;
if (rdev->badblocks.shift < 0)
- return 0;
+ return false;
block_sectors = roundup(1 << rdev->badblocks.shift,
bdev_logical_block_size(rdev->bdev) >> 9);
@@ -3413,7 +3405,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t from_addr, to_addr;
struct md_rdev *rdev = conf->mirrors[d].rdev;
sector_t sector, first_bad;
- int bad_sectors;
+ sector_t bad_sectors;
if (!rdev ||
!test_bit(In_sync, &rdev->flags))
continue;
@@ -3609,7 +3601,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
sector_t first_bad, sector;
- int bad_sectors;
+ sector_t bad_sectors;
struct md_rdev *rdev;
if (r10_bio->devs[i].repl_bio)
@@ -3716,7 +3708,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
conf->cluster_sync_low = mddev->curr_resync_completed;
raid10_set_cluster_sync_high(conf);
/* Send resync message */
- md_cluster_ops->resync_info_update(mddev,
+ mddev->cluster_ops->resync_info_update(mddev,
conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -3749,7 +3741,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (broadcast_msg) {
raid10_set_cluster_sync_high(conf);
- md_cluster_ops->resync_info_update(mddev,
+ mddev->cluster_ops->resync_info_update(mddev,
conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -4541,7 +4533,7 @@ static int raid10_start_reshape(struct mddev *mddev)
if (ret)
goto abort;
- ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
+ ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
goto abort;
@@ -4832,7 +4824,7 @@ read_more:
conf->cluster_sync_low = sb_reshape_pos;
}
- md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
+ mddev->cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
conf->cluster_sync_high);
}
@@ -4977,7 +4969,7 @@ static void raid10_update_reshape_pos(struct mddev *mddev)
struct r10conf *conf = mddev->private;
sector_t lo, hi;
- md_cluster_ops->resync_info_get(mddev, &lo, &hi);
+ mddev->cluster_ops->resync_info_get(mddev, &lo, &hi);
if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
|| mddev->reshape_position == MaxSector)
conf->reshape_progress = mddev->reshape_position;
@@ -5123,9 +5115,13 @@ static void raid10_finish_reshape(struct mddev *mddev)
static struct md_personality raid10_personality =
{
- .name = "raid10",
- .level = 10,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID10,
+ .name = "raid10",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid10_make_request,
.run = raid10_run,
.free = raid10_free,
@@ -5145,18 +5141,18 @@ static struct md_personality raid10_personality =
.update_reshape_pos = raid10_update_reshape_pos,
};
-static int __init raid_init(void)
+static int __init raid10_init(void)
{
- return register_md_personality(&raid10_personality);
+ return register_md_submodule(&raid10_personality.head);
}
-static void raid_exit(void)
+static void __exit raid10_exit(void)
{
- unregister_md_personality(&raid10_personality);
+ unregister_md_submodule(&raid10_personality.head);
}
-module_init(raid_init);
-module_exit(raid_exit);
+module_init(raid10_init);
+module_exit(raid10_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e530271cb86b..ba768ca7f422 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -714,7 +714,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
block = page_address(io->meta_page);
block->meta_size = cpu_to_le32(io->meta_offset);
- crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
+ crc = crc32c(log->uuid_checksum, block, PAGE_SIZE);
block->checksum = cpu_to_le32(crc);
log->current_io = NULL;
@@ -1020,8 +1020,8 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
continue;
addr = kmap_local_page(sh->dev[i].page);
- sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
- addr, PAGE_SIZE);
+ sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
+ addr, PAGE_SIZE);
kunmap_local(addr);
}
parity_pages = 1 + !!(sh->qd_idx >= 0);
@@ -1741,7 +1741,7 @@ static int r5l_recovery_read_meta_block(struct r5l_log *log,
le64_to_cpu(mb->position) != ctx->pos)
return -EINVAL;
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
if (stored_crc != crc)
return -EINVAL;
@@ -1780,8 +1780,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
return -ENOMEM;
r5l_recovery_create_empty_meta_block(log, page, pos, seq);
mb = page_address(page);
- mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
- mb, PAGE_SIZE));
+ mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum, mb, PAGE_SIZE));
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
REQ_SYNC | REQ_FUA, false)) {
__free_page(page);
@@ -1976,7 +1975,7 @@ r5l_recovery_verify_data_checksum(struct r5l_log *log,
r5l_recovery_read_page(log, ctx, page, log_offset);
addr = kmap_local_page(page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+ checksum = crc32c(log->uuid_checksum, addr, PAGE_SIZE);
kunmap_local(addr);
return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
}
@@ -2379,8 +2378,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
raid5_compute_blocknr(sh, i, 0));
addr = kmap_local_page(dev->page);
payload->checksum[0] = cpu_to_le32(
- crc32c_le(log->uuid_checksum, addr,
- PAGE_SIZE));
+ crc32c(log->uuid_checksum, addr,
+ PAGE_SIZE));
kunmap_local(addr);
sync_page_io(log->rdev, write_pos, PAGE_SIZE,
dev->page, REQ_OP_WRITE, false);
@@ -2392,8 +2391,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
}
}
mb->meta_size = cpu_to_le32(offset);
- mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
- mb, PAGE_SIZE));
+ mb->checksum = cpu_to_le32(crc32c(log->uuid_checksum,
+ mb, PAGE_SIZE));
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE | REQ_SYNC | REQ_FUA, false);
sh->log_start = ctx->pos;
@@ -2885,8 +2884,8 @@ int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
continue;
addr = kmap_local_page(sh->dev[i].page);
- sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
- addr, PAGE_SIZE);
+ sh->dev[i].log_checksum = crc32c(log->uuid_checksum,
+ addr, PAGE_SIZE);
kunmap_local(addr);
pages++;
}
@@ -2969,7 +2968,7 @@ static int r5l_load_log(struct r5l_log *log)
}
stored_crc = le32_to_cpu(mb->checksum);
mb->checksum = 0;
- expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
+ expected_crc = crc32c(log->uuid_checksum, mb, PAGE_SIZE);
if (stored_crc != expected_crc) {
create_super = true;
goto create;
@@ -3077,8 +3076,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
return -ENOMEM;
log->rdev = rdev;
log->need_cache_flush = bdev_write_cache(rdev->bdev);
- log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
- sizeof(rdev->mddev->uuid));
+ log->uuid_checksum = crc32c(~0, rdev->mddev->uuid,
+ sizeof(rdev->mddev->uuid));
mutex_init(&log->io_mutex);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 37c4da5311ca..c0fb335311aa 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -346,9 +346,9 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) {
le32_add_cpu(&e->pp_size, PAGE_SIZE);
io->pp_size += PAGE_SIZE;
- e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum),
- page_address(sh->ppl_page),
- PAGE_SIZE));
+ e->checksum = cpu_to_le32(crc32c(le32_to_cpu(e->checksum),
+ page_address(sh->ppl_page),
+ PAGE_SIZE));
}
list_add_tail(&sh->log_list, &io->stripe_list);
@@ -454,7 +454,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
}
pplhdr->entries_count = cpu_to_le32(io->entries_count);
- pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE));
+ pplhdr->checksum = cpu_to_le32(~crc32c(~0, pplhdr, PPL_HEADER_SIZE));
/* Rewind the buffer if current PPL is larger then remaining space */
if (log->use_multippl &&
@@ -998,7 +998,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
goto out;
}
- crc = crc32c_le(crc, page_address(page), s);
+ crc = crc32c(crc, page_address(page), s);
pp_size -= s;
sector += s >> 9;
@@ -1052,7 +1052,7 @@ static int ppl_write_empty_header(struct ppl_log *log)
log->rdev->ppl.size, GFP_NOIO, 0);
memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED);
pplhdr->signature = cpu_to_le32(log->ppl_conf->signature);
- pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE));
+ pplhdr->checksum = cpu_to_le32(~crc32c(~0, pplhdr, PAGE_SIZE));
if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset,
PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC |
@@ -1106,7 +1106,7 @@ static int ppl_load_distributed(struct ppl_log *log)
/* check header validity */
crc_stored = le32_to_cpu(pplhdr->checksum);
pplhdr->checksum = 0;
- crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE);
+ crc = ~crc32c(~0, pplhdr, PAGE_SIZE);
if (crc_stored != crc) {
pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n",
@@ -1390,7 +1390,7 @@ int ppl_init_log(struct r5conf *conf)
spin_lock_init(&ppl_conf->no_mem_stripes_lock);
if (!mddev->external) {
- ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
+ ppl_conf->signature = ~crc32c(~0, mddev->uuid, sizeof(mddev->uuid));
ppl_conf->block_size = 512;
} else {
ppl_conf->block_size =
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5c79429acc64..6389383166c0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5858,6 +5858,9 @@ static enum reshape_loc get_reshape_loc(struct mddev *mddev,
struct r5conf *conf, sector_t logical_sector)
{
sector_t reshape_progress, reshape_safe;
+
+ if (likely(conf->reshape_progress == MaxSector))
+ return LOC_NO_RESHAPE;
/*
* Spinlock is needed as reshape_progress may be
* 64bit on a 32bit platform, and so it might be
@@ -5935,22 +5938,19 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
const int rw = bio_data_dir(bi);
enum stripe_result ret;
struct stripe_head *sh;
+ enum reshape_loc loc;
sector_t new_sector;
int previous = 0, flags = 0;
int seq, dd_idx;
seq = read_seqcount_begin(&conf->gen_lock);
-
- if (unlikely(conf->reshape_progress != MaxSector)) {
- enum reshape_loc loc = get_reshape_loc(mddev, conf,
- logical_sector);
- if (loc == LOC_INSIDE_RESHAPE) {
- ret = STRIPE_SCHEDULE_AND_RETRY;
- goto out;
- }
- if (loc == LOC_AHEAD_OF_RESHAPE)
- previous = 1;
+ loc = get_reshape_loc(mddev, conf, logical_sector);
+ if (loc == LOC_INSIDE_RESHAPE) {
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
+ if (loc == LOC_AHEAD_OF_RESHAPE)
+ previous = 1;
new_sector = raid5_compute_sector(conf, logical_sector, previous,
&dd_idx, NULL);
@@ -6127,7 +6127,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
/* Bail out if conflicts with reshape and REQ_NOWAIT is set */
if ((bi->bi_opf & REQ_NOWAIT) &&
- (conf->reshape_progress != MaxSector) &&
get_reshape_loc(mddev, conf, logical_sector) == LOC_INSIDE_RESHAPE) {
bio_wouldblock_error(bi);
if (rw == WRITE)
@@ -8954,9 +8953,13 @@ static void raid5_prepare_suspend(struct mddev *mddev)
static struct md_personality raid6_personality =
{
- .name = "raid6",
- .level = 6,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID6,
+ .name = "raid6",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -8980,9 +8983,13 @@ static struct md_personality raid6_personality =
};
static struct md_personality raid5_personality =
{
- .name = "raid5",
- .level = 5,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID5,
+ .name = "raid5",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -9007,9 +9014,13 @@ static struct md_personality raid5_personality =
static struct md_personality raid4_personality =
{
- .name = "raid4",
- .level = 4,
- .owner = THIS_MODULE,
+ .head = {
+ .type = MD_PERSONALITY,
+ .id = ID_RAID4,
+ .name = "raid4",
+ .owner = THIS_MODULE,
+ },
+
.make_request = raid5_make_request,
.run = raid5_run,
.start = raid5_start,
@@ -9045,21 +9056,39 @@ static int __init raid5_init(void)
"md/raid5:prepare",
raid456_cpu_up_prepare,
raid456_cpu_dead);
- if (ret) {
- destroy_workqueue(raid5_wq);
- return ret;
- }
- register_md_personality(&raid6_personality);
- register_md_personality(&raid5_personality);
- register_md_personality(&raid4_personality);
+ if (ret)
+ goto err_destroy_wq;
+
+ ret = register_md_submodule(&raid6_personality.head);
+ if (ret)
+ goto err_cpuhp_remove;
+
+ ret = register_md_submodule(&raid5_personality.head);
+ if (ret)
+ goto err_unregister_raid6;
+
+ ret = register_md_submodule(&raid4_personality.head);
+ if (ret)
+ goto err_unregister_raid5;
+
return 0;
+
+err_unregister_raid5:
+ unregister_md_submodule(&raid5_personality.head);
+err_unregister_raid6:
+ unregister_md_submodule(&raid6_personality.head);
+err_cpuhp_remove:
+ cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
+err_destroy_wq:
+ destroy_workqueue(raid5_wq);
+ return ret;
}
-static void raid5_exit(void)
+static void __exit raid5_exit(void)
{
- unregister_md_personality(&raid6_personality);
- unregister_md_personality(&raid5_personality);
- unregister_md_personality(&raid4_personality);
+ unregister_md_submodule(&raid6_personality.head);
+ unregister_md_submodule(&raid5_personality.head);
+ unregister_md_submodule(&raid4_personality.head);
cpuhp_remove_multi_state(CPUHP_MD_RAID5_PREPARE);
destroy_workqueue(raid5_wq);
}
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index c50299246fc4..2b50578d107e 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -222,7 +222,7 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
mutex_lock(&adap->lock);
if (adap->log_addrs.num_log_addrs == 0)
err = -EPERM;
- else if (adap->is_configuring)
+ else if (adap->is_configuring && !msg_is_raw(&msg))
err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index a70451d99ebc..59ac12113f3a 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -873,19 +873,19 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (pin->wait_usecs > 150) {
pin->wait_usecs -= 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
if (pin->wait_usecs > 100) {
pin->wait_usecs /= 2;
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
return HRTIMER_RESTART;
}
pin->timer_ts = ktime_add_us(ts, pin->wait_usecs);
hrtimer_forward_now(timer,
- ns_to_ktime(pin->wait_usecs * 1000));
+ us_to_ktime(pin->wait_usecs));
pin->wait_usecs = 0;
return HRTIMER_RESTART;
}
@@ -1020,13 +1020,12 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
if (!adap->monitor_pin_cnt || usecs <= 150) {
pin->wait_usecs = 0;
pin->timer_ts = ktime_add_us(ts, usecs);
- hrtimer_forward_now(timer,
- ns_to_ktime(usecs * 1000));
+ hrtimer_forward_now(timer, us_to_ktime(usecs));
return HRTIMER_RESTART;
}
pin->wait_usecs = usecs - 100;
pin->timer_ts = ktime_add_us(ts, 100);
- hrtimer_forward_now(timer, ns_to_ktime(100000));
+ hrtimer_forward_now(timer, us_to_ktime(100));
return HRTIMER_RESTART;
}
@@ -1346,9 +1345,8 @@ struct cec_adapter *cec_pin_allocate_adapter(const struct cec_pin_ops *pin_ops,
if (pin == NULL)
return ERR_PTR(-ENOMEM);
pin->ops = pin_ops;
- hrtimer_init(&pin->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
atomic_set(&pin->work_pin_num_events, 0);
- pin->timer.function = cec_pin_timer;
+ hrtimer_setup(&pin->timer, cec_pin_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
init_waitqueue_head(&pin->kthread_waitq);
pin->tx_custom_low_usecs = CEC_TIM_CUSTOM_DEFAULT;
pin->tx_custom_high_usecs = CEC_TIM_CUSTOM_DEFAULT;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 44d8fe8b220e..9b1a650ed055 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1243,6 +1243,8 @@ static int __init smsdvb_module_init(void)
smsdvb_debugfs_register();
rc = smscore_register_hotplug(smsdvb_hotplug);
+ if (rc)
+ smsdvb_debugfs_unregister();
pr_debug("\n");
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index ded11cd8dbf7..931e5dc453b9 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -2249,10 +2249,10 @@ void tpg_log_status(struct tpg_data *tpg)
tpg->src_width, tpg->src_height,
tpg_color_enc_str(tpg->color_enc));
pr_info("tpg field: %u\n", tpg->field);
- pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
- tpg->crop.left, tpg->crop.top);
- pr_info("tpg compose: %ux%u@%dx%d\n", tpg->compose.width, tpg->compose.height,
- tpg->compose.left, tpg->compose.top);
+ pr_info("tpg crop: (%d,%d)/%ux%u\n", tpg->crop.left, tpg->crop.top,
+ tpg->crop.width, tpg->crop.height);
+ pr_info("tpg compose: (%d,%d)/%ux%u\n", tpg->compose.left, tpg->compose.top,
+ tpg->compose.width, tpg->compose.height);
pr_info("tpg colorspace: %d\n", tpg->colorspace);
pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
if (tpg->color_enc == TGP_COLOR_ENC_HSV)
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 63a4c6a4afb5..bd5c5d7223aa 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -250,12 +250,12 @@ static int dibx000_i2c_master_xfer_gpio34(struct i2c_adapter *i2c_adap, struct i
return num;
}
-static struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
+static const struct i2c_algorithm dibx000_i2c_master_gpio12_xfer_algo = {
.master_xfer = dibx000_i2c_master_xfer_gpio12,
.functionality = dibx000_i2c_func,
};
-static struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
+static const struct i2c_algorithm dibx000_i2c_master_gpio34_xfer_algo = {
.master_xfer = dibx000_i2c_master_xfer_gpio34,
.functionality = dibx000_i2c_func,
};
@@ -324,7 +324,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
return ret;
}
-static struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
+static const struct i2c_algorithm dibx000_i2c_gated_gpio67_algo = {
.master_xfer = dibx000_i2c_gated_gpio67_xfer,
.functionality = dibx000_i2c_func,
};
@@ -369,7 +369,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
return ret;
}
-static struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
+static const struct i2c_algorithm dibx000_i2c_gated_tuner_algo = {
.master_xfer = dibx000_i2c_gated_tuner_xfer,
.functionality = dibx000_i2c_func,
};
@@ -422,7 +422,7 @@ void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst)
EXPORT_SYMBOL(dibx000_reset_i2c_master);
static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
- struct i2c_algorithm *algo, const char *name,
+ const struct i2c_algorithm *algo, const char *name,
struct dibx000_i2c_master *mst)
{
strscpy(i2c_adap->name, name, sizeof(i2c_adap->name));
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 05254d8717db..0357624968f1 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1363,6 +1363,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ dev->vb_queue.lock = &dev->vb_queue_lock;
ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
@@ -1421,7 +1422,6 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
/* Init video_device structure */
dev->vdev = rtl2832_sdr_template;
dev->vdev.queue = &dev->vb_queue;
- dev->vdev.queue->lock = &dev->vb_queue_lock;
video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index da7ff2c2e8e5..ba4bb3685095 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -250,7 +250,7 @@ static int stv0299_get_symbolrate (struct stv0299_state* state)
offset /= 128;
dprintk ("%s : srate = %i\n", __func__, srate);
- dprintk ("%s : ofset = %i\n", __func__, offset);
+ dprintk ("%s : offset = %i\n", __func__, offset);
srate += offset;
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index 3e725cdcc66b..1f87eb0dcf2a 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -328,7 +328,8 @@ static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
u32 bw)
{
struct tda10048_state *state = fe->demodulator_priv;
- u64 t, z;
+ u64 t;
+ u32 z;
dprintk(1, "%s()\n", __func__);
@@ -341,6 +342,11 @@ static int tda10048_set_wref(struct dvb_frontend *fe, u32 sample_freq_hz,
/* t *= 2147483648 on 32bit platforms */
t *= (2048 * 1024);
t *= 1024;
+
+ /*
+ * Sample frequency is typically 55 MHz, with a theoretical maximum of
+ * 69 MHz. With a 32 bit z we have enough accuracy for up to 613 MHz.
+ */
z = 7 * sample_freq_hz;
do_div(t, z);
t += 5;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 8ba096b8ebca..e576b213084d 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -140,6 +140,7 @@ config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB
select REGMAP_I2C
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX214 camera.
@@ -1146,6 +1147,17 @@ config VIDEO_ISL7998X
Support for Intersil ISL7998x analog to MIPI-CSI2 or
BT.656 decoder.
+config VIDEO_LT6911UXE
+ tristate "Lontium LT6911UXE decoder"
+ depends on ACPI && VIDEO_DEV
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor-level driver for the Lontium
+ LT6911UXE HDMI to MIPI CSI-2 bridge.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lt6911uxe.
+
config VIDEO_KS0127
tristate "KS0127 video decoder"
depends on VIDEO_DEV && I2C
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index fbb988bd067a..6c23a4463527 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_VIDEO_ISL7998X) += isl7998x.o
obj-$(CONFIG_VIDEO_KS0127) += ks0127.o
obj-$(CONFIG_VIDEO_LM3560) += lm3560.o
obj-$(CONFIG_VIDEO_LM3646) += lm3646.o
+obj-$(CONFIG_VIDEO_LT6911UXE) += lt6911uxe.o
obj-$(CONFIG_VIDEO_M52790) += m52790.o
obj-$(CONFIG_VIDEO_MAX9271_LIB) += max9271.o
obj-$(CONFIG_VIDEO_MAX9286) += max9286.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index ff7dfa0278a7..6e50b14f888f 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -195,6 +195,7 @@ struct adv7180_state;
#define ADV7180_FLAG_V2 BIT(1)
#define ADV7180_FLAG_MIPI_CSI2 BIT(2)
#define ADV7180_FLAG_I2P BIT(3)
+#define ADV7180_FLAG_TEST_PATTERN BIT(4)
struct adv7180_chip_info {
unsigned int flags;
@@ -682,11 +683,15 @@ static int adv7180_init_controls(struct adv7180_state *state)
ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL);
- v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops,
- V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(test_pattern_menu) - 1,
- 0, ARRAY_SIZE(test_pattern_menu) - 1,
- test_pattern_menu);
+ if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) {
+ v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl,
+ &adv7180_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ test_pattern_menu);
+ }
state->sd.ctrl_handler = &state->ctrl_hdl;
if (state->ctrl_hdl.error) {
@@ -1221,7 +1226,7 @@ static const struct adv7180_chip_info adv7182_info = {
};
static const struct adv7180_chip_info adv7280_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1235,7 +1240,8 @@ static const struct adv7180_chip_info adv7280_info = {
};
static const struct adv7180_chip_info adv7280_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1256,7 +1262,8 @@ static const struct adv7180_chip_info adv7280_m_info = {
};
static const struct adv7180_chip_info adv7281_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1271,7 +1278,8 @@ static const struct adv7180_chip_info adv7281_info = {
};
static const struct adv7180_chip_info adv7281_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1291,7 +1299,8 @@ static const struct adv7180_chip_info adv7281_m_info = {
};
static const struct adv7180_chip_info adv7281_ma_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
@@ -1316,7 +1325,7 @@ static const struct adv7180_chip_info adv7281_ma_info = {
};
static const struct adv7180_chip_info adv7282_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN7) |
@@ -1331,7 +1340,8 @@ static const struct adv7180_chip_info adv7282_info = {
};
static const struct adv7180_chip_info adv7282_m_info = {
- .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P,
+ .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P |
+ ADV7180_FLAG_TEST_PATTERN,
.valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) |
BIT(ADV7182_INPUT_CVBS_AIN2) |
BIT(ADV7182_INPUT_CVBS_AIN3) |
diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h
index 9bc0121d0eff..2c1db5968af8 100644
--- a/drivers/media/i2c/adv748x/adv748x.h
+++ b/drivers/media/i2c/adv748x/adv748x.h
@@ -320,7 +320,7 @@ struct adv748x_state {
/* Free run pattern select */
#define ADV748X_SDP_FRP 0x14
-#define ADV748X_SDP_FRP_MASK GENMASK(3, 1)
+#define ADV748X_SDP_FRP_MASK GENMASK(2, 0)
/* Saturation */
#define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */
diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
index 4036972af3a6..f95a99d85360 100644
--- a/drivers/media/i2c/adv7511-v4l2.c
+++ b/drivers/media/i2c/adv7511-v4l2.c
@@ -1664,7 +1664,9 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
if (!err) {
adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
if (segment == 0) {
- state->edid.blocks = state->edid.data[0x7e] + 1;
+ state->edid.blocks =
+ v4l2_num_edid_blocks(state->edid.data,
+ EDID_MAX_SEGM * 2);
v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n",
__func__, state->edid.blocks);
}
@@ -1682,7 +1684,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
/* one more segment read ok */
state->edid.segments = segment + 1;
v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
- if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ if (state->edid.blocks > state->edid.segments * 2) {
/* Request next EDID segment */
v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
adv7511_wr(sd, 0xc9, 0xf);
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index cf8858cb13d4..34ccda666524 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -75,11 +75,11 @@ static const char *pll_string(unsigned int which)
#define PLL_FL(f) CCS_PLL_FLAG_##f
-static void print_pll(struct device *dev, struct ccs_pll *pll)
+static void print_pll(struct device *dev, const struct ccs_pll *pll)
{
const struct {
- struct ccs_pll_branch_fr *fr;
- struct ccs_pll_branch_bk *bk;
+ const struct ccs_pll_branch_fr *fr;
+ const struct ccs_pll_branch_bk *bk;
unsigned int which;
} branches[] = {
{ &pll->vt_fr, &pll->vt_bk, PLL_VT },
@@ -150,10 +150,10 @@ static u32 op_pix_ddr(u32 flags)
static int check_fr_bounds(struct device *dev,
const struct ccs_pll_limits *lim,
- struct ccs_pll *pll, unsigned int which)
+ const struct ccs_pll *pll, unsigned int which)
{
const struct ccs_pll_branch_limits_fr *lim_fr;
- struct ccs_pll_branch_fr *pll_fr;
+ const struct ccs_pll_branch_fr *pll_fr;
const char *s = pll_string(which);
int rval;
@@ -190,10 +190,10 @@ static int check_fr_bounds(struct device *dev,
static int check_bk_bounds(struct device *dev,
const struct ccs_pll_limits *lim,
- struct ccs_pll *pll, unsigned int which)
+ const struct ccs_pll *pll, unsigned int which)
{
const struct ccs_pll_branch_limits_bk *lim_bk;
- struct ccs_pll_branch_bk *pll_bk;
+ const struct ccs_pll_branch_bk *pll_bk;
const char *s = pll_string(which);
int rval;
@@ -230,7 +230,7 @@ static int check_bk_bounds(struct device *dev,
return rval;
}
-static int check_ext_bounds(struct device *dev, struct ccs_pll *pll)
+static int check_ext_bounds(struct device *dev, const struct ccs_pll *pll)
{
if (!(pll->flags & CCS_PLL_FLAG_FIFO_DERATING) &&
pll->pixel_rate_pixel_array > pll->pixel_rate_csi) {
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 2cdab2f3d9dc..004d28c33287 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3566,6 +3566,7 @@ static int ccs_probe(struct i2c_client *client)
out_disable_runtime_pm:
pm_runtime_put_noidle(&client->dev);
pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
out_cleanup:
ccs_cleanup(sensor);
@@ -3595,9 +3596,10 @@ static void ccs_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
+ if (!pm_runtime_status_suspended(&client->dev)) {
ccs_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
for (i = 0; i < sensor->ssds_used; i++)
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/dw9719.c b/drivers/media/i2c/dw9719.c
index c626ed845928..032fbcb981f2 100644
--- a/drivers/media/i2c/dw9719.c
+++ b/drivers/media/i2c/dw9719.c
@@ -2,8 +2,10 @@
// Copyright (c) 2012 Intel Corporation
/*
- * Based on linux/modules/camera/drivers/media/i2c/imx/dw9719.c in this repo:
- * https://github.com/ZenfoneArea/android_kernel_asus_zenfone5
+ * Based on linux/modules/camera/drivers/media/i2c/imx/dw9719.c from:
+ * https://github.com/ZenfoneArea/android_kernel_asus_zenfone5 and
+ * latte-l-oss/drivers/external_drivers/camera/drivers/media/i2c/micam/dw9761.c
+ * from: https://github.com/MiCode/Xiaomi_Kernel_OpenSource/
*/
#include <linux/delay.h>
@@ -23,26 +25,45 @@
#define DW9719_INFO CCI_REG8(0)
#define DW9719_ID 0xF1
+#define DW9761_ID 0xF4
#define DW9719_CONTROL CCI_REG8(2)
+#define DW9719_STANDBY 0x00
+#define DW9719_SHUTDOWN 0x01
#define DW9719_ENABLE_RINGING 0x02
#define DW9719_VCM_CURRENT CCI_REG16(3)
+#define DW9719_STATUS CCI_REG16(5)
+#define DW9719_STATUS_BUSY BIT(0)
+
#define DW9719_MODE CCI_REG8(6)
#define DW9719_MODE_SAC_SHIFT 4
-#define DW9719_MODE_SAC3 4
+#define DW9719_DEFAULT_SAC 4
+#define DW9761_DEFAULT_SAC 6
#define DW9719_VCM_FREQ CCI_REG8(7)
#define DW9719_DEFAULT_VCM_FREQ 0x60
+#define DW9761_DEFAULT_VCM_FREQ 0x3E
+
+#define DW9761_VCM_PRELOAD CCI_REG8(8)
+#define DW9761_DEFAULT_VCM_PRELOAD 0x73
+
#define to_dw9719_device(x) container_of(x, struct dw9719_device, sd)
+enum dw9719_model {
+ DW9719,
+ DW9761,
+};
+
struct dw9719_device {
struct v4l2_subdev sd;
struct device *dev;
struct regmap *regmap;
struct regulator *regulator;
+ enum dw9719_model model;
+ u32 mode_low_bits;
u32 sac_mode;
u32 vcm_freq;
@@ -52,30 +73,14 @@ struct dw9719_device {
} ctrls;
};
-static int dw9719_detect(struct dw9719_device *dw9719)
-{
- int ret;
- u64 val;
-
- ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
- if (ret < 0)
- return ret;
-
- if (val != DW9719_ID) {
- dev_err(dw9719->dev, "Failed to detect correct id\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
static int dw9719_power_down(struct dw9719_device *dw9719)
{
return regulator_disable(dw9719->regulator);
}
-static int dw9719_power_up(struct dw9719_device *dw9719)
+static int dw9719_power_up(struct dw9719_device *dw9719, bool detect)
{
+ u64 val;
int ret;
ret = regulator_enable(dw9719->regulator);
@@ -83,16 +88,54 @@ static int dw9719_power_up(struct dw9719_device *dw9719)
return ret;
/* Jiggle SCL pin to wake up device */
- cci_write(dw9719->regmap, DW9719_CONTROL, 1, &ret);
-
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_SHUTDOWN, &ret);
+ fsleep(100);
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_STANDBY, &ret);
/* Need 100us to transit from SHUTDOWN to STANDBY */
fsleep(100);
+ if (detect) {
+ ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case DW9719_ID:
+ dw9719->model = DW9719;
+ dw9719->mode_low_bits = 0x00;
+ dw9719->sac_mode = DW9719_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9719_DEFAULT_VCM_FREQ;
+ break;
+ case DW9761_ID:
+ dw9719->model = DW9761;
+ dw9719->mode_low_bits = 0x01;
+ dw9719->sac_mode = DW9761_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9761_DEFAULT_VCM_FREQ;
+ break;
+ default:
+ dev_err(dw9719->dev,
+ "Error unknown device id 0x%02llx\n", val);
+ return -ENXIO;
+ }
+
+ /* Optional indication of SAC mode select */
+ device_property_read_u32(dw9719->dev, "dongwoon,sac-mode",
+ &dw9719->sac_mode);
+
+ /* Optional indication of VCM frequency */
+ device_property_read_u32(dw9719->dev, "dongwoon,vcm-freq",
+ &dw9719->vcm_freq);
+ }
+
cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
- cci_write(dw9719->regmap, DW9719_MODE,
- dw9719->sac_mode << DW9719_MODE_SAC_SHIFT, &ret);
+ cci_write(dw9719->regmap, DW9719_MODE, dw9719->mode_low_bits |
+ (dw9719->sac_mode << DW9719_MODE_SAC_SHIFT), &ret);
cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
+ if (dw9719->model == DW9761)
+ cci_write(dw9719->regmap, DW9761_VCM_PRELOAD,
+ DW9761_DEFAULT_VCM_PRELOAD, &ret);
+
if (ret)
dw9719_power_down(dw9719);
@@ -159,7 +202,7 @@ static int dw9719_resume(struct device *dev)
int ret;
int val;
- ret = dw9719_power_up(dw9719);
+ ret = dw9719_power_up(dw9719, false);
if (ret)
return ret;
@@ -237,16 +280,6 @@ static int dw9719_probe(struct i2c_client *client)
return PTR_ERR(dw9719->regmap);
dw9719->dev = &client->dev;
- dw9719->sac_mode = DW9719_MODE_SAC3;
- dw9719->vcm_freq = DW9719_DEFAULT_VCM_FREQ;
-
- /* Optional indication of SAC mode select */
- device_property_read_u32(&client->dev, "dongwoon,sac-mode",
- &dw9719->sac_mode);
-
- /* Optional indication of VCM frequency */
- device_property_read_u32(&client->dev, "dongwoon,vcm-freq",
- &dw9719->vcm_freq);
dw9719->regulator = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(dw9719->regulator))
@@ -274,14 +307,10 @@ static int dw9719_probe(struct i2c_client *client)
* will work.
*/
- ret = dw9719_power_up(dw9719);
+ ret = dw9719_power_up(dw9719, true);
if (ret)
goto err_cleanup_media;
- ret = dw9719_detect(dw9719);
- if (ret)
- goto err_powerdown;
-
pm_runtime_set_active(&client->dev);
pm_runtime_get_noresume(&client->dev);
pm_runtime_enable(&client->dev);
@@ -299,7 +328,6 @@ static int dw9719_probe(struct i2c_client *client)
err_pm_runtime:
pm_runtime_disable(&client->dev);
pm_runtime_put_noidle(&client->dev);
-err_powerdown:
dw9719_power_down(dw9719);
err_cleanup_media:
media_entity_cleanup(&dw9719->sd.entity);
@@ -327,6 +355,7 @@ static void dw9719_remove(struct i2c_client *client)
static const struct i2c_device_id dw9719_id_table[] = {
{ "dw9719" },
+ { "dw9761" },
{ }
};
MODULE_DEVICE_TABLE(i2c, dw9719_id_table);
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 3ac42d1ab8b4..aed258211b8a 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -719,7 +719,7 @@ static int hi556_write_reg_list(struct hi556 *hi556,
r_list->regs[i].val);
if (ret) {
dev_err_ratelimited(&client->dev,
- "failed to write reg 0x%4.4x. error = %d",
+ "failed to write reg 0x%4.4x. error = %d\n",
r_list->regs[i].address, ret);
return ret;
}
@@ -926,7 +926,7 @@ static int hi556_identify_module(struct hi556 *hi556)
return ret;
if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
HI556_CHIP_ID, val);
return -ENXIO;
}
@@ -1002,14 +1002,14 @@ static int hi556_start_streaming(struct hi556 *hi556)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(&client->dev, "failed to set plls\n");
return ret;
}
reg_list = &hi556->cur_mode->reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(&client->dev, "failed to set mode\n");
return ret;
}
@@ -1021,7 +1021,7 @@ static int hi556_start_streaming(struct hi556 *hi556)
HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(&client->dev, "failed to set stream\n");
return ret;
}
@@ -1034,7 +1034,7 @@ static void hi556_stop_streaming(struct hi556 *hi556)
if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(&client->dev, "failed to set stream\n");
}
static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
@@ -1053,7 +1053,6 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
ret = hi556_start_streaming(hi556);
if (ret) {
- enable = 0;
hi556_stop_streaming(hi556);
pm_runtime_put(&client->dev);
}
@@ -1220,33 +1219,35 @@ static int hi556_check_hwcfg(struct device *dev)
*/
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ dev_err(dev, "can't get clock frequency\n");
+ goto check_hwcfg_error;
}
if (mclk != HI556_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
+ dev_err(dev, "external clock %d is not supported\n", mclk);
+ ret = -EINVAL;
+ goto check_hwcfg_error;
}
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
- dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto check_hwcfg_error;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequencies defined");
+ dev_err(dev, "no link frequencies defined\n");
ret = -EINVAL;
goto check_hwcfg_error;
}
@@ -1259,7 +1260,7 @@ static int hi556_check_hwcfg(struct device *dev)
}
if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported",
+ dev_err(dev, "no link frequency %lld supported\n",
link_freq_menu_items[i]);
ret = -EINVAL;
goto check_hwcfg_error;
@@ -1332,11 +1333,8 @@ static int hi556_probe(struct i2c_client *client)
int ret;
ret = hi556_check_hwcfg(&client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
- ret);
+ if (ret)
return ret;
- }
hi556 = devm_kzalloc(&client->dev, sizeof(*hi556), GFP_KERNEL);
if (!hi556)
@@ -1371,7 +1369,7 @@ static int hi556_probe(struct i2c_client *client)
ret = hi556_identify_module(hi556);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
}
@@ -1380,7 +1378,7 @@ static int hi556_probe(struct i2c_client *client)
hi556->cur_mode = &supported_modes[0];
ret = hi556_init_controls(hi556);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(&client->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1391,13 +1389,13 @@ static int hi556_probe(struct i2c_client *client)
hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi556->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d\n",
ret);
goto probe_error_media_entity_cleanup;
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index 4962cfe7c83d..dd7bc45523d8 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -15,26 +15,186 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define IMX214_REG_MODE_SELECT 0x0100
+/* Chip ID */
+#define IMX214_REG_CHIP_ID CCI_REG16(0x0016)
+#define IMX214_CHIP_ID 0x0214
+
+#define IMX214_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX214_MODE_STANDBY 0x00
#define IMX214_MODE_STREAMING 0x01
+#define IMX214_REG_FAST_STANDBY_CTRL CCI_REG8(0x0106)
+
#define IMX214_DEFAULT_CLK_FREQ 24000000
-#define IMX214_DEFAULT_LINK_FREQ 480000000
+#define IMX214_DEFAULT_LINK_FREQ 600000000
+/* Keep wrong link frequency for backward compatibility */
+#define IMX214_DEFAULT_LINK_FREQ_LEGACY 480000000
#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
#define IMX214_FPS 30
-#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+
+/* V-TIMING internal */
+#define IMX214_REG_FRM_LENGTH_LINES CCI_REG16(0x0340)
+#define IMX214_VTS_MAX 0xffff
+
+#define IMX214_VBLANK_MIN 890
+
+/* HBLANK control - read only */
+#define IMX214_PPL_DEFAULT 5008
/* Exposure control */
-#define IMX214_REG_EXPOSURE 0x0202
-#define IMX214_EXPOSURE_MIN 0
-#define IMX214_EXPOSURE_MAX 3184
+#define IMX214_REG_EXPOSURE CCI_REG16(0x0202)
+#define IMX214_EXPOSURE_OFFSET 10
+#define IMX214_EXPOSURE_MIN 1
#define IMX214_EXPOSURE_STEP 1
#define IMX214_EXPOSURE_DEFAULT 3184
+#define IMX214_REG_EXPOSURE_RATIO CCI_REG8(0x0222)
+#define IMX214_REG_SHORT_EXPOSURE CCI_REG16(0x0224)
+
+/* Analog gain control */
+#define IMX214_REG_ANALOG_GAIN CCI_REG16(0x0204)
+#define IMX214_REG_SHORT_ANALOG_GAIN CCI_REG16(0x0216)
+#define IMX214_ANA_GAIN_MIN 0
+#define IMX214_ANA_GAIN_MAX 448
+#define IMX214_ANA_GAIN_STEP 1
+#define IMX214_ANA_GAIN_DEFAULT 0x0
+
+/* Digital gain control */
+#define IMX214_REG_DIG_GAIN_GREENR CCI_REG16(0x020e)
+#define IMX214_REG_DIG_GAIN_RED CCI_REG16(0x0210)
+#define IMX214_REG_DIG_GAIN_BLUE CCI_REG16(0x0212)
+#define IMX214_REG_DIG_GAIN_GREENB CCI_REG16(0x0214)
+#define IMX214_DGTL_GAIN_MIN 0x0100
+#define IMX214_DGTL_GAIN_MAX 0x0fff
+#define IMX214_DGTL_GAIN_DEFAULT 0x0100
+#define IMX214_DGTL_GAIN_STEP 1
+
+#define IMX214_REG_ORIENTATION CCI_REG8(0x0101)
+
+#define IMX214_REG_MASK_CORR_FRAMES CCI_REG8(0x0105)
+#define IMX214_CORR_FRAMES_TRANSMIT 0
+#define IMX214_CORR_FRAMES_MASK 1
+
+#define IMX214_REG_CSI_DATA_FORMAT CCI_REG16(0x0112)
+#define IMX214_CSI_DATA_FORMAT_RAW8 0x0808
+#define IMX214_CSI_DATA_FORMAT_RAW10 0x0A0A
+#define IMX214_CSI_DATA_FORMAT_COMP6 0x0A06
+#define IMX214_CSI_DATA_FORMAT_COMP8 0x0A08
+
+#define IMX214_REG_CSI_LANE_MODE CCI_REG8(0x0114)
+#define IMX214_CSI_2_LANE_MODE 1
+#define IMX214_CSI_4_LANE_MODE 3
+
+#define IMX214_REG_EXCK_FREQ CCI_REG16(0x0136)
+#define IMX214_EXCK_FREQ(n) ((n) * 256) /* n expressed in MHz */
+
+#define IMX214_REG_TEMP_SENSOR_CONTROL CCI_REG8(0x0138)
+
+#define IMX214_REG_HDR_MODE CCI_REG8(0x0220)
+#define IMX214_HDR_MODE_OFF 0
+#define IMX214_HDR_MODE_ON 1
+
+#define IMX214_REG_HDR_RES_REDUCTION CCI_REG8(0x0221)
+#define IMX214_HDR_RES_REDU_THROUGH 0x11
+#define IMX214_HDR_RES_REDU_2_BINNING 0x22
+
+/* PLL settings */
+#define IMX214_REG_VTPXCK_DIV CCI_REG8(0x0301)
+#define IMX214_REG_VTSYCK_DIV CCI_REG8(0x0303)
+#define IMX214_REG_PREPLLCK_VT_DIV CCI_REG8(0x0305)
+#define IMX214_REG_PLL_VT_MPY CCI_REG16(0x0306)
+#define IMX214_REG_OPPXCK_DIV CCI_REG8(0x0309)
+#define IMX214_REG_OPSYCK_DIV CCI_REG8(0x030b)
+#define IMX214_REG_PLL_MULT_DRIV CCI_REG8(0x0310)
+#define IMX214_PLL_SINGLE 0
+#define IMX214_PLL_DUAL 1
+
+#define IMX214_REG_LINE_LENGTH_PCK CCI_REG16(0x0342)
+#define IMX214_REG_X_ADD_STA CCI_REG16(0x0344)
+#define IMX214_REG_Y_ADD_STA CCI_REG16(0x0346)
+#define IMX214_REG_X_ADD_END CCI_REG16(0x0348)
+#define IMX214_REG_Y_ADD_END CCI_REG16(0x034a)
+#define IMX214_REG_X_OUTPUT_SIZE CCI_REG16(0x034c)
+#define IMX214_REG_Y_OUTPUT_SIZE CCI_REG16(0x034e)
+#define IMX214_REG_X_EVEN_INC CCI_REG8(0x0381)
+#define IMX214_REG_X_ODD_INC CCI_REG8(0x0383)
+#define IMX214_REG_Y_EVEN_INC CCI_REG8(0x0385)
+#define IMX214_REG_Y_ODD_INC CCI_REG8(0x0387)
+
+#define IMX214_REG_SCALE_MODE CCI_REG8(0x0401)
+#define IMX214_SCALE_NONE 0
+#define IMX214_SCALE_HORIZONTAL 1
+#define IMX214_SCALE_FULL 2
+#define IMX214_REG_SCALE_M CCI_REG16(0x0404)
+
+#define IMX214_REG_DIG_CROP_X_OFFSET CCI_REG16(0x0408)
+#define IMX214_REG_DIG_CROP_Y_OFFSET CCI_REG16(0x040a)
+#define IMX214_REG_DIG_CROP_WIDTH CCI_REG16(0x040c)
+#define IMX214_REG_DIG_CROP_HEIGHT CCI_REG16(0x040e)
+
+#define IMX214_REG_REQ_LINK_BIT_RATE CCI_REG32(0x0820)
+#define IMX214_LINK_BIT_RATE_MBPS(n) ((n) << 16)
+
+/* Binning mode */
+#define IMX214_REG_BINNING_MODE CCI_REG8(0x0900)
+#define IMX214_BINNING_NONE 0
+#define IMX214_BINNING_ENABLE 1
+#define IMX214_REG_BINNING_TYPE CCI_REG8(0x0901)
+#define IMX214_REG_BINNING_WEIGHTING CCI_REG8(0x0902)
+#define IMX214_BINNING_AVERAGE 0x00
+#define IMX214_BINNING_SUMMED 0x01
+#define IMX214_BINNING_BAYER 0x02
+
+#define IMX214_REG_SING_DEF_CORR_EN CCI_REG8(0x0b06)
+#define IMX214_SING_DEF_CORR_OFF 0
+#define IMX214_SING_DEF_CORR_ON 1
+
+/* AWB control */
+#define IMX214_REG_ABS_GAIN_GREENR CCI_REG16(0x0b8e)
+#define IMX214_REG_ABS_GAIN_RED CCI_REG16(0x0b90)
+#define IMX214_REG_ABS_GAIN_BLUE CCI_REG16(0x0b92)
+#define IMX214_REG_ABS_GAIN_GREENB CCI_REG16(0x0b94)
+
+#define IMX214_REG_RMSC_NR_MODE CCI_REG8(0x3001)
+#define IMX214_REG_STATS_OUT_EN CCI_REG8(0x3013)
+#define IMX214_STATS_OUT_OFF 0
+#define IMX214_STATS_OUT_ON 1
+
+/* Chroma noise reduction */
+#define IMX214_REG_NML_NR_EN CCI_REG8(0x30a2)
+#define IMX214_NML_NR_OFF 0
+#define IMX214_NML_NR_ON 1
+
+#define IMX214_REG_EBD_SIZE_V CCI_REG8(0x5041)
+#define IMX214_EBD_NO 0
+#define IMX214_EBD_4_LINE 4
+
+#define IMX214_REG_RG_STATS_LMT CCI_REG16(0x6d12)
+#define IMX214_RG_STATS_LMT_10_BIT 0x03FF
+#define IMX214_RG_STATS_LMT_14_BIT 0x3FFF
+
+#define IMX214_REG_ATR_FAST_MOVE CCI_REG8(0x9300)
+
+/* Test Pattern Control */
+#define IMX214_REG_TEST_PATTERN CCI_REG16(0x0600)
+#define IMX214_TEST_PATTERN_DISABLE 0
+#define IMX214_TEST_PATTERN_SOLID_COLOR 1
+#define IMX214_TEST_PATTERN_COLOR_BARS 2
+#define IMX214_TEST_PATTERN_GREY_COLOR 3
+#define IMX214_TEST_PATTERN_PN9 4
+
+/* Test pattern colour components */
+#define IMX214_REG_TESTP_RED CCI_REG16(0x0602)
+#define IMX214_REG_TESTP_GREENR CCI_REG16(0x0604)
+#define IMX214_REG_TESTP_BLUE CCI_REG16(0x0606)
+#define IMX214_REG_TESTP_GREENB CCI_REG16(0x0608)
+#define IMX214_TESTP_COLOUR_MIN 0
+#define IMX214_TESTP_COLOUR_MAX 0x03ff
+#define IMX214_TESTP_COLOUR_STEP 1
/* IMX214 native and active pixel array size */
#define IMX214_NATIVE_WIDTH 4224U
@@ -52,6 +212,38 @@ static const char * const imx214_supply_name[] = {
#define IMX214_NUM_SUPPLIES ARRAY_SIZE(imx214_supply_name)
+/*
+ * The supported formats.
+ * This table MUST contain 4 entries per format, to cover the various flip
+ * combinations in the order
+ * - no flip
+ * - h flip
+ * - v flip
+ * - h&v flips
+ */
+static const u32 imx214_mbus_formats[] = {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+};
+
+static const char * const imx214_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bars",
+ "Solid Color",
+ "Grey Color Bars",
+ "PN9"
+};
+
+static const int imx214_test_pattern_val[] = {
+ IMX214_TEST_PATTERN_DISABLE,
+ IMX214_TEST_PATTERN_COLOR_BARS,
+ IMX214_TEST_PATTERN_SOLID_COLOR,
+ IMX214_TEST_PATTERN_GREY_COLOR,
+ IMX214_TEST_PATTERN_PN9,
+};
+
struct imx214 {
struct device *dev;
struct clk *xclk;
@@ -59,365 +251,262 @@ struct imx214 {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *pixel_rate;
struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *unit_size;
+ struct {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
struct gpio_desc *enable_gpio;
-
- /*
- * Serialize control access, get/set format, get selection
- * and start streaming.
- */
- struct mutex mutex;
-};
-
-struct reg_8 {
- u16 addr;
- u8 val;
-};
-
-enum {
- IMX214_TABLE_WAIT_MS = 0,
- IMX214_TABLE_END,
- IMX214_MAX_RETRIES,
- IMX214_WAIT_MS
};
/*From imx214_mode_tbls.h*/
-static const struct reg_8 mode_4096x2304[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x00},
- {0x0345, 0x38},
- {0x0346, 0x01},
- {0x0347, 0x98},
- {0x0348, 0x10},
- {0x0349, 0x37},
- {0x034A, 0x0A},
- {0x034B, 0x97},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x10},
- {0x034D, 0x00},
- {0x034E, 0x09},
- {0x034F, 0x00},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x10},
- {0x040D, 0x00},
- {0x040E, 0x09},
- {0x040F, 0x00},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x09},
- {0x3A04, 0x50},
- {0x3A05, 0x01},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_4096x2304[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_X_ADD_STA, 56 },
+ { IMX214_REG_Y_ADD_STA, 408 },
+ { IMX214_REG_X_ADD_END, 4151 },
+ { IMX214_REG_Y_ADD_END, 2711 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 4096 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 2304 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 4096 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 2304 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x09 },
+ { CCI_REG8(0x3A04), 0x50 },
+ { CCI_REG8(0x3A05), 0x01 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_1920x1080[] = {
- {0x0114, 0x03},
- {0x0220, 0x00},
- {0x0221, 0x11},
- {0x0222, 0x01},
- {0x0340, 0x0C},
- {0x0341, 0x7A},
- {0x0342, 0x13},
- {0x0343, 0x90},
- {0x0344, 0x04},
- {0x0345, 0x78},
- {0x0346, 0x03},
- {0x0347, 0xFC},
- {0x0348, 0x0B},
- {0x0349, 0xF7},
- {0x034A, 0x08},
- {0x034B, 0x33},
- {0x0381, 0x01},
- {0x0383, 0x01},
- {0x0385, 0x01},
- {0x0387, 0x01},
- {0x0900, 0x00},
- {0x0901, 0x00},
- {0x0902, 0x00},
- {0x3000, 0x35},
- {0x3054, 0x01},
- {0x305C, 0x11},
-
- {0x0112, 0x0A},
- {0x0113, 0x0A},
- {0x034C, 0x07},
- {0x034D, 0x80},
- {0x034E, 0x04},
- {0x034F, 0x38},
- {0x0401, 0x00},
- {0x0404, 0x00},
- {0x0405, 0x10},
- {0x0408, 0x00},
- {0x0409, 0x00},
- {0x040A, 0x00},
- {0x040B, 0x00},
- {0x040C, 0x07},
- {0x040D, 0x80},
- {0x040E, 0x04},
- {0x040F, 0x38},
-
- {0x0301, 0x05},
- {0x0303, 0x02},
- {0x0305, 0x03},
- {0x0306, 0x00},
- {0x0307, 0x96},
- {0x0309, 0x0A},
- {0x030B, 0x01},
- {0x0310, 0x00},
-
- {0x0820, 0x12},
- {0x0821, 0xC0},
- {0x0822, 0x00},
- {0x0823, 0x00},
-
- {0x3A03, 0x04},
- {0x3A04, 0xF8},
- {0x3A05, 0x02},
-
- {0x0B06, 0x01},
- {0x30A2, 0x00},
-
- {0x30B4, 0x00},
-
- {0x3A02, 0xFF},
-
- {0x3011, 0x00},
- {0x3013, 0x01},
-
- {0x0202, 0x0C},
- {0x0203, 0x70},
- {0x0224, 0x01},
- {0x0225, 0xF4},
-
- {0x0204, 0x00},
- {0x0205, 0x00},
- {0x020E, 0x01},
- {0x020F, 0x00},
- {0x0210, 0x01},
- {0x0211, 0x00},
- {0x0212, 0x01},
- {0x0213, 0x00},
- {0x0214, 0x01},
- {0x0215, 0x00},
- {0x0216, 0x00},
- {0x0217, 0x00},
-
- {0x4170, 0x00},
- {0x4171, 0x10},
- {0x4176, 0x00},
- {0x4177, 0x3C},
- {0xAE20, 0x04},
- {0xAE21, 0x5C},
-
- {IMX214_TABLE_WAIT_MS, 10},
- {0x0138, 0x01},
- {IMX214_TABLE_END, 0x00}
+static const struct cci_reg_sequence mode_1920x1080[] = {
+ { IMX214_REG_HDR_MODE, IMX214_HDR_MODE_OFF },
+ { IMX214_REG_HDR_RES_REDUCTION, IMX214_HDR_RES_REDU_THROUGH },
+ { IMX214_REG_EXPOSURE_RATIO, 1 },
+ { IMX214_REG_X_ADD_STA, 1144 },
+ { IMX214_REG_Y_ADD_STA, 1020 },
+ { IMX214_REG_X_ADD_END, 3063 },
+ { IMX214_REG_Y_ADD_END, 2099 },
+ { IMX214_REG_X_EVEN_INC, 1 },
+ { IMX214_REG_X_ODD_INC, 1 },
+ { IMX214_REG_Y_EVEN_INC, 1 },
+ { IMX214_REG_Y_ODD_INC, 1 },
+ { IMX214_REG_BINNING_MODE, IMX214_BINNING_NONE },
+ { IMX214_REG_BINNING_TYPE, 0 },
+ { IMX214_REG_BINNING_WEIGHTING, IMX214_BINNING_AVERAGE },
+ { CCI_REG8(0x3000), 0x35 },
+ { CCI_REG8(0x3054), 0x01 },
+ { CCI_REG8(0x305C), 0x11 },
+
+ { IMX214_REG_CSI_DATA_FORMAT, IMX214_CSI_DATA_FORMAT_RAW10 },
+ { IMX214_REG_X_OUTPUT_SIZE, 1920 },
+ { IMX214_REG_Y_OUTPUT_SIZE, 1080 },
+ { IMX214_REG_SCALE_MODE, IMX214_SCALE_NONE },
+ { IMX214_REG_SCALE_M, 2 },
+ { IMX214_REG_DIG_CROP_X_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_Y_OFFSET, 0 },
+ { IMX214_REG_DIG_CROP_WIDTH, 1920 },
+ { IMX214_REG_DIG_CROP_HEIGHT, 1080 },
+
+ { IMX214_REG_VTPXCK_DIV, 5 },
+ { IMX214_REG_VTSYCK_DIV, 2 },
+ { IMX214_REG_PREPLLCK_VT_DIV, 3 },
+ { IMX214_REG_PLL_VT_MPY, 150 },
+ { IMX214_REG_OPPXCK_DIV, 10 },
+ { IMX214_REG_OPSYCK_DIV, 1 },
+ { IMX214_REG_PLL_MULT_DRIV, IMX214_PLL_SINGLE },
+
+ { IMX214_REG_REQ_LINK_BIT_RATE, IMX214_LINK_BIT_RATE_MBPS(4800) },
+
+ { CCI_REG8(0x3A03), 0x04 },
+ { CCI_REG8(0x3A04), 0xF8 },
+ { CCI_REG8(0x3A05), 0x02 },
+
+ { IMX214_REG_SING_DEF_CORR_EN, IMX214_SING_DEF_CORR_ON },
+ { IMX214_REG_NML_NR_EN, IMX214_NML_NR_OFF },
+
+ { CCI_REG8(0x30B4), 0x00 },
+
+ { CCI_REG8(0x3A02), 0xFF },
+
+ { CCI_REG8(0x3011), 0x00 },
+ { IMX214_REG_STATS_OUT_EN, IMX214_STATS_OUT_ON },
+
+ { IMX214_REG_SHORT_EXPOSURE, 500 },
+
+ { CCI_REG8(0x4170), 0x00 },
+ { CCI_REG8(0x4171), 0x10 },
+ { CCI_REG8(0x4176), 0x00 },
+ { CCI_REG8(0x4177), 0x3C },
+ { CCI_REG8(0xAE20), 0x04 },
+ { CCI_REG8(0xAE21), 0x5C },
};
-static const struct reg_8 mode_table_common[] = {
+static const struct cci_reg_sequence mode_table_common[] = {
/* software reset */
/* software standby settings */
- {0x0100, 0x00},
+ { IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY },
/* ATR setting */
- {0x9300, 0x02},
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* external clock setting */
- {0x0136, 0x18},
- {0x0137, 0x00},
+ { IMX214_REG_EXCK_FREQ, IMX214_EXCK_FREQ(IMX214_DEFAULT_CLK_FREQ / 1000000) },
/* global setting */
/* basic config */
- {0x0101, 0x00},
- {0x0105, 0x01},
- {0x0106, 0x01},
- {0x4550, 0x02},
- {0x4601, 0x00},
- {0x4642, 0x05},
- {0x6227, 0x11},
- {0x6276, 0x00},
- {0x900E, 0x06},
- {0xA802, 0x90},
- {0xA803, 0x11},
- {0xA804, 0x62},
- {0xA805, 0x77},
- {0xA806, 0xAE},
- {0xA807, 0x34},
- {0xA808, 0xAE},
- {0xA809, 0x35},
- {0xA80A, 0x62},
- {0xA80B, 0x83},
- {0xAE33, 0x00},
+ { IMX214_REG_MASK_CORR_FRAMES, IMX214_CORR_FRAMES_MASK },
+ { IMX214_REG_FAST_STANDBY_CTRL, 1 },
+ { IMX214_REG_LINE_LENGTH_PCK, IMX214_PPL_DEFAULT },
+ { CCI_REG8(0x4550), 0x02 },
+ { CCI_REG8(0x4601), 0x00 },
+ { CCI_REG8(0x4642), 0x05 },
+ { CCI_REG8(0x6227), 0x11 },
+ { CCI_REG8(0x6276), 0x00 },
+ { CCI_REG8(0x900E), 0x06 },
+ { CCI_REG8(0xA802), 0x90 },
+ { CCI_REG8(0xA803), 0x11 },
+ { CCI_REG8(0xA804), 0x62 },
+ { CCI_REG8(0xA805), 0x77 },
+ { CCI_REG8(0xA806), 0xAE },
+ { CCI_REG8(0xA807), 0x34 },
+ { CCI_REG8(0xA808), 0xAE },
+ { CCI_REG8(0xA809), 0x35 },
+ { CCI_REG8(0xA80A), 0x62 },
+ { CCI_REG8(0xA80B), 0x83 },
+ { CCI_REG8(0xAE33), 0x00 },
/* analog setting */
- {0x4174, 0x00},
- {0x4175, 0x11},
- {0x4612, 0x29},
- {0x461B, 0x12},
- {0x461F, 0x06},
- {0x4635, 0x07},
- {0x4637, 0x30},
- {0x463F, 0x18},
- {0x4641, 0x0D},
- {0x465B, 0x12},
- {0x465F, 0x11},
- {0x4663, 0x11},
- {0x4667, 0x0F},
- {0x466F, 0x0F},
- {0x470E, 0x09},
- {0x4909, 0xAB},
- {0x490B, 0x95},
- {0x4915, 0x5D},
- {0x4A5F, 0xFF},
- {0x4A61, 0xFF},
- {0x4A73, 0x62},
- {0x4A85, 0x00},
- {0x4A87, 0xFF},
+ { CCI_REG8(0x4174), 0x00 },
+ { CCI_REG8(0x4175), 0x11 },
+ { CCI_REG8(0x4612), 0x29 },
+ { CCI_REG8(0x461B), 0x12 },
+ { CCI_REG8(0x461F), 0x06 },
+ { CCI_REG8(0x4635), 0x07 },
+ { CCI_REG8(0x4637), 0x30 },
+ { CCI_REG8(0x463F), 0x18 },
+ { CCI_REG8(0x4641), 0x0D },
+ { CCI_REG8(0x465B), 0x12 },
+ { CCI_REG8(0x465F), 0x11 },
+ { CCI_REG8(0x4663), 0x11 },
+ { CCI_REG8(0x4667), 0x0F },
+ { CCI_REG8(0x466F), 0x0F },
+ { CCI_REG8(0x470E), 0x09 },
+ { CCI_REG8(0x4909), 0xAB },
+ { CCI_REG8(0x490B), 0x95 },
+ { CCI_REG8(0x4915), 0x5D },
+ { CCI_REG8(0x4A5F), 0xFF },
+ { CCI_REG8(0x4A61), 0xFF },
+ { CCI_REG8(0x4A73), 0x62 },
+ { CCI_REG8(0x4A85), 0x00 },
+ { CCI_REG8(0x4A87), 0xFF },
/* embedded data */
- {0x5041, 0x04},
- {0x583C, 0x04},
- {0x620E, 0x04},
- {0x6EB2, 0x01},
- {0x6EB3, 0x00},
- {0x9300, 0x02},
+ { IMX214_REG_EBD_SIZE_V, IMX214_EBD_4_LINE },
+ { CCI_REG8(0x583C), 0x04 },
+ { CCI_REG8(0x620E), 0x04 },
+ { CCI_REG8(0x6EB2), 0x01 },
+ { CCI_REG8(0x6EB3), 0x00 },
+ { IMX214_REG_ATR_FAST_MOVE, 2 },
/* imagequality */
/* HDR setting */
- {0x3001, 0x07},
- {0x6D12, 0x3F},
- {0x6D13, 0xFF},
- {0x9344, 0x03},
- {0x9706, 0x10},
- {0x9707, 0x03},
- {0x9708, 0x03},
- {0x9E04, 0x01},
- {0x9E05, 0x00},
- {0x9E0C, 0x01},
- {0x9E0D, 0x02},
- {0x9E24, 0x00},
- {0x9E25, 0x8C},
- {0x9E26, 0x00},
- {0x9E27, 0x94},
- {0x9E28, 0x00},
- {0x9E29, 0x96},
+ { IMX214_REG_RMSC_NR_MODE, 0x07 },
+ { IMX214_REG_RG_STATS_LMT, IMX214_RG_STATS_LMT_14_BIT },
+ { CCI_REG8(0x9344), 0x03 },
+ { CCI_REG8(0x9706), 0x10 },
+ { CCI_REG8(0x9707), 0x03 },
+ { CCI_REG8(0x9708), 0x03 },
+ { CCI_REG8(0x9E04), 0x01 },
+ { CCI_REG8(0x9E05), 0x00 },
+ { CCI_REG8(0x9E0C), 0x01 },
+ { CCI_REG8(0x9E0D), 0x02 },
+ { CCI_REG8(0x9E24), 0x00 },
+ { CCI_REG8(0x9E25), 0x8C },
+ { CCI_REG8(0x9E26), 0x00 },
+ { CCI_REG8(0x9E27), 0x94 },
+ { CCI_REG8(0x9E28), 0x00 },
+ { CCI_REG8(0x9E29), 0x96 },
/* CNR parameter setting */
- {0x69DB, 0x01},
+ { CCI_REG8(0x69DB), 0x01 },
/* Moire reduction */
- {0x6957, 0x01},
+ { CCI_REG8(0x6957), 0x01 },
/* image enhancement */
- {0x6987, 0x17},
- {0x698A, 0x03},
- {0x698B, 0x03},
+ { CCI_REG8(0x6987), 0x17 },
+ { CCI_REG8(0x698A), 0x03 },
+ { CCI_REG8(0x698B), 0x03 },
/* white balanace */
- {0x0B8E, 0x01},
- {0x0B8F, 0x00},
- {0x0B90, 0x01},
- {0x0B91, 0x00},
- {0x0B92, 0x01},
- {0x0B93, 0x00},
- {0x0B94, 0x01},
- {0x0B95, 0x00},
+ { IMX214_REG_ABS_GAIN_GREENR, 0x0100 },
+ { IMX214_REG_ABS_GAIN_RED, 0x0100 },
+ { IMX214_REG_ABS_GAIN_BLUE, 0x0100 },
+ { IMX214_REG_ABS_GAIN_GREENB, 0x0100 },
/* ATR setting */
- {0x6E50, 0x00},
- {0x6E51, 0x32},
- {0x9340, 0x00},
- {0x9341, 0x3C},
- {0x9342, 0x03},
- {0x9343, 0xFF},
- {IMX214_TABLE_END, 0x00}
+ { CCI_REG8(0x6E50), 0x00 },
+ { CCI_REG8(0x6E51), 0x32 },
+ { CCI_REG8(0x9340), 0x00 },
+ { CCI_REG8(0x9341), 0x3C },
+ { CCI_REG8(0x9342), 0x03 },
+ { CCI_REG8(0x9343), 0xFF },
};
/*
@@ -427,16 +516,25 @@ static const struct reg_8 mode_table_common[] = {
static const struct imx214_mode {
u32 width;
u32 height;
- const struct reg_8 *reg_table;
+
+ /* V-timing */
+ unsigned int vts_def;
+
+ unsigned int num_of_regs;
+ const struct cci_reg_sequence *reg_table;
} imx214_modes[] = {
{
.width = 4096,
.height = 2304,
+ .vts_def = 3194,
+ .num_of_regs = ARRAY_SIZE(mode_4096x2304),
.reg_table = mode_4096x2304,
},
{
.width = 1920,
.height = 1080,
+ .vts_def = 3194,
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080),
.reg_table = mode_1920x1080,
},
};
@@ -490,14 +588,42 @@ static int __maybe_unused imx214_power_off(struct device *dev)
return 0;
}
+/* Get bayer order based on flip setting. */
+static u32 imx214_get_format_code(struct imx214 *imx214)
+{
+ unsigned int i;
+
+ i = (imx214->vflip->val ? 2 : 0) | (imx214->hflip->val ? 1 : 0);
+
+ return imx214_mbus_formats[i];
+}
+
+static void imx214_update_pad_format(struct imx214 *imx214,
+ const struct imx214_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ fmt->code = imx214_get_format_code(imx214);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ fmt->colorspace,
+ fmt->ycbcr_enc);
+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
+}
+
static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->index > 0)
+ struct imx214 *imx214 = to_imx214(sd);
+
+ if (code->index >= (ARRAY_SIZE(imx214_mbus_formats) / 4))
return -EINVAL;
- code->code = IMX214_MBUS_CODE;
+ code->code = imx214_get_format_code(imx214);
return 0;
}
@@ -506,7 +632,11 @@ static int imx214_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- if (fse->code != IMX214_MBUS_CODE)
+ struct imx214 *imx214 = to_imx214(subdev);
+ u32 code;
+
+ code = imx214_get_format_code(imx214);
+ if (fse->code != code)
return -EINVAL;
if (fse->index >= ARRAY_SIZE(imx214_modes))
@@ -549,52 +679,6 @@ static const struct v4l2_subdev_core_ops imx214_core_ops = {
#endif
};
-static struct v4l2_mbus_framefmt *
-__imx214_get_pad_format(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad,
- enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_format(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->fmt;
- default:
- return NULL;
- }
-}
-
-static int imx214_get_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct imx214 *imx214 = to_imx214(sd);
-
- mutex_lock(&imx214->mutex);
- format->format = *__imx214_get_pad_format(imx214, sd_state,
- format->pad,
- format->which);
- mutex_unlock(&imx214->mutex);
-
- return 0;
-}
-
-static struct v4l2_rect *
-__imx214_get_pad_crop(struct imx214 *imx214,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_state_get_crop(sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx214->crop;
- default:
- return NULL;
- }
-}
-
static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -604,34 +688,48 @@ static int imx214_set_format(struct v4l2_subdev *sd,
struct v4l2_rect *__crop;
const struct imx214_mode *mode;
- mutex_lock(&imx214->mutex);
-
- __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad,
- format->which);
-
mode = v4l2_find_nearest_size(imx214_modes,
ARRAY_SIZE(imx214_modes), width, height,
format->format.width,
format->format.height);
- __crop->width = mode->width;
- __crop->height = mode->height;
+ imx214_update_pad_format(imx214, mode, &format->format,
+ format->format.code);
+ __format = v4l2_subdev_state_get_format(sd_state, 0);
- __format = __imx214_get_pad_format(imx214, sd_state, format->pad,
- format->which);
- __format->width = __crop->width;
- __format->height = __crop->height;
- __format->code = IMX214_MBUS_CODE;
- __format->field = V4L2_FIELD_NONE;
- __format->colorspace = V4L2_COLORSPACE_SRGB;
- __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
- __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- __format->colorspace, __format->ycbcr_enc);
- __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
+ *__format = format->format;
- format->format = *__format;
+ __crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ __crop->width = mode->width;
+ __crop->height = mode->height;
- mutex_unlock(&imx214->mutex);
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ int exposure_max;
+ int exposure_def;
+ int hblank;
+
+ /* Update blank limits */
+ __v4l2_ctrl_modify_range(imx214->vblank, IMX214_VBLANK_MIN,
+ IMX214_VTS_MAX - mode->height, 2,
+ mode->vts_def - mode->height);
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = mode->vts_def - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(imx214->exposure,
+ imx214->exposure->minimum,
+ exposure_max, imx214->exposure->step,
+ exposure_def);
+
+ /*
+ * Currently PPL is fixed to IMX214_PPL_DEFAULT, so hblank
+ * depends on mode->width only, and is not changeable in any
+ * way other than changing the mode.
+ */
+ hblank = IMX214_PPL_DEFAULT - mode->width;
+ __v4l2_ctrl_modify_range(imx214->hblank, hblank, hblank, 1,
+ hblank);
+ }
return 0;
}
@@ -640,14 +738,9 @@ static int imx214_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
- struct imx214 *imx214 = to_imx214(sd);
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- mutex_lock(&imx214->mutex);
- sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx214->mutex);
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
return 0;
case V4L2_SEL_TGT_NATIVE_SIZE:
@@ -675,6 +768,7 @@ static int imx214_entity_init_state(struct v4l2_subdev *subdev,
struct v4l2_subdev_format fmt = { };
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
fmt.format.width = imx214_modes[0].width;
fmt.format.height = imx214_modes[0].height;
@@ -683,12 +777,41 @@ static int imx214_entity_init_state(struct v4l2_subdev *subdev,
return 0;
}
+static int imx214_update_digital_gain(struct imx214 *imx214, u32 val)
+{
+ int ret = 0;
+
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_GREENR, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_RED, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_BLUE, val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_DIG_GAIN_GREENB, val, &ret);
+
+ return ret;
+}
+
static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx214 *imx214 = container_of(ctrl->handler,
struct imx214, ctrls);
- u8 vals[2];
- int ret;
+ const struct v4l2_mbus_framefmt *format = NULL;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ int exposure_max, exposure_def;
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max =
+ format->height + ctrl->val - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
+ __v4l2_ctrl_modify_range(imx214->exposure,
+ imx214->exposure->minimum,
+ exposure_max, imx214->exposure->step,
+ exposure_def);
+ }
/*
* Applying V4L2 control value only happens
@@ -698,15 +821,47 @@ static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(imx214->regmap, IMX214_REG_ANALOG_GAIN,
+ ctrl->val, &ret);
+ cci_write(imx214->regmap, IMX214_REG_SHORT_ANALOG_GAIN,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx214_update_digital_gain(imx214, ctrl->val);
+ break;
case V4L2_CID_EXPOSURE:
- vals[1] = ctrl->val;
- vals[0] = ctrl->val >> 8;
- ret = regmap_bulk_write(imx214->regmap, IMX214_REG_EXPOSURE, vals, 2);
- if (ret < 0)
- dev_err(imx214->dev, "Error %d\n", ret);
- ret = 0;
+ cci_write(imx214->regmap, IMX214_REG_EXPOSURE, ctrl->val, &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ cci_write(imx214->regmap, IMX214_REG_ORIENTATION,
+ imx214->hflip->val | imx214->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ cci_write(imx214->regmap, IMX214_REG_FRM_LENGTH_LINES,
+ format->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ cci_write(imx214->regmap, IMX214_REG_TEST_PATTERN,
+ imx214_test_pattern_val[ctrl->val], &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_RED,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_GREENR,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_BLUE,
+ ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+ cci_write(imx214->regmap, IMX214_REG_TESTP_GREENB,
+ ctrl->val, &ret);
break;
-
default:
ret = -EINVAL;
}
@@ -729,16 +884,19 @@ static int imx214_ctrls_init(struct imx214 *imx214)
.width = 1120,
.height = 1120,
};
+ const struct imx214_mode *mode = &imx214_modes[0];
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
- int ret;
+ int exposure_max, exposure_def;
+ int hblank;
+ int i, ret;
ret = v4l2_fwnode_device_parse(imx214->dev, &props);
if (ret < 0)
return ret;
ctrl_hdlr = &imx214->ctrls;
- ret = v4l2_ctrl_handler_init(&imx214->ctrls, 6);
+ ret = v4l2_ctrl_handler_init(&imx214->ctrls, 13);
if (ret)
return ret;
@@ -764,17 +922,75 @@ static int imx214_ctrls_init(struct imx214 *imx214)
*
* Yours sincerely, Ricardo.
*/
+
+ /* Initial vblank/hblank/exposure parameters based on current mode */
+ imx214->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_VBLANK, IMX214_VBLANK_MIN,
+ IMX214_VTS_MAX - mode->height, 2,
+ mode->vts_def - mode->height);
+
+ hblank = IMX214_PPL_DEFAULT - mode->width;
+ imx214->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx214->hblank)
+ imx214->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = mode->vts_def - IMX214_EXPOSURE_OFFSET;
+ exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
imx214->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
V4L2_CID_EXPOSURE,
IMX214_EXPOSURE_MIN,
- IMX214_EXPOSURE_MAX,
+ exposure_max,
IMX214_EXPOSURE_STEP,
- IMX214_EXPOSURE_DEFAULT);
+ exposure_def);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX214_ANA_GAIN_MIN, IMX214_ANA_GAIN_MAX,
+ IMX214_ANA_GAIN_STEP, IMX214_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX214_DGTL_GAIN_MIN, IMX214_DGTL_GAIN_MAX,
+ IMX214_DGTL_GAIN_STEP, IMX214_DGTL_GAIN_DEFAULT);
+
+ imx214->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (imx214->hflip)
+ imx214->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ imx214->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (imx214->vflip)
+ imx214->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ v4l2_ctrl_cluster(2, &imx214->hflip);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx214_test_pattern_menu) - 1,
+ 0, 0, imx214_test_pattern_menu);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The assumption is that
+ * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
+ * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
+ * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
+ */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
+ V4L2_CID_TEST_PATTERN_RED + i,
+ IMX214_TESTP_COLOUR_MIN,
+ IMX214_TESTP_COLOUR_MAX,
+ IMX214_TESTP_COLOUR_STEP,
+ IMX214_TESTP_COLOUR_MAX);
+ /* The "Solid color" pattern is white by default */
+ }
imx214->unit_size = v4l2_ctrl_new_std_compound(ctrl_hdlr,
NULL,
V4L2_CID_UNIT_CELL_SIZE,
- v4l2_ctrl_ptr_create((void *)&unit_size));
+ v4l2_ctrl_ptr_create((void *)&unit_size),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx214_ctrl_ops, &props);
@@ -790,76 +1006,52 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return 0;
};
-#define MAX_CMD 4
-static int imx214_write_table(struct imx214 *imx214,
- const struct reg_8 table[])
-{
- u8 vals[MAX_CMD];
- int i;
- int ret;
-
- for (; table->addr != IMX214_TABLE_END ; table++) {
- if (table->addr == IMX214_TABLE_WAIT_MS) {
- usleep_range(table->val * 1000,
- table->val * 1000 + 500);
- continue;
- }
-
- for (i = 0; i < MAX_CMD; i++) {
- if (table[i].addr != (table[0].addr + i))
- break;
- vals[i] = table[i].val;
- }
-
- ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
-
- if (ret) {
- dev_err(imx214->dev, "write_table error: %d\n", ret);
- return ret;
- }
-
- table += i - 1;
- }
-
- return 0;
-}
-
static int imx214_start_streaming(struct imx214 *imx214)
{
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
const struct imx214_mode *mode;
int ret;
- mutex_lock(&imx214->mutex);
- ret = imx214_write_table(imx214, mode_table_common);
+ ret = cci_multi_reg_write(imx214->regmap, mode_table_common,
+ ARRAY_SIZE(mode_table_common), NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent common table %d\n", ret);
- goto error;
+ return ret;
}
- mode = v4l2_find_nearest_size(imx214_modes,
- ARRAY_SIZE(imx214_modes), width, height,
- imx214->fmt.width, imx214->fmt.height);
- ret = imx214_write_table(imx214, mode->reg_table);
+ ret = cci_write(imx214->regmap, IMX214_REG_CSI_LANE_MODE,
+ IMX214_CSI_4_LANE_MODE, NULL);
+ if (ret) {
+ dev_err(imx214->dev, "failed to configure lanes\n");
+ return ret;
+ }
+
+ state = v4l2_subdev_get_locked_active_state(&imx214->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+ mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes),
+ width, height, fmt->width, fmt->height);
+ ret = cci_multi_reg_write(imx214->regmap, mode->reg_table,
+ mode->num_of_regs, NULL);
if (ret < 0) {
dev_err(imx214->dev, "could not sent mode table %d\n", ret);
- goto error;
+ return ret;
}
+
+ usleep_range(10000, 10500);
+
+ cci_write(imx214->regmap, IMX214_REG_TEMP_SENSOR_CONTROL, 0x01, NULL);
+
ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
if (ret < 0) {
dev_err(imx214->dev, "could not sync v4l2 controls\n");
- goto error;
+ return ret;
}
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STREAMING);
- if (ret < 0) {
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STREAMING, NULL);
+ if (ret < 0)
dev_err(imx214->dev, "could not sent start table %d\n", ret);
- goto error;
- }
-
- mutex_unlock(&imx214->mutex);
- return 0;
-error:
- mutex_unlock(&imx214->mutex);
return ret;
}
@@ -867,7 +1059,8 @@ static int imx214_stop_streaming(struct imx214 *imx214)
{
int ret;
- ret = regmap_write(imx214->regmap, IMX214_REG_MODE_SELECT, IMX214_MODE_STANDBY);
+ ret = cci_write(imx214->regmap, IMX214_REG_MODE_SELECT,
+ IMX214_MODE_STANDBY, NULL);
if (ret < 0)
dev_err(imx214->dev, "could not sent stop table %d\n", ret);
@@ -877,6 +1070,7 @@ static int imx214_stop_streaming(struct imx214 *imx214)
static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct imx214 *imx214 = to_imx214(subdev);
+ struct v4l2_subdev_state *state;
int ret;
if (enable) {
@@ -884,7 +1078,9 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
if (ret < 0)
return ret;
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
ret = imx214_start_streaming(imx214);
+ v4l2_subdev_unlock_state(state);
if (ret < 0)
goto err_rpm_put;
} else {
@@ -918,12 +1114,22 @@ static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
return 0;
}
+/*
+ * Raw sensors should be using the VBLANK and HBLANK controls to determine
+ * the frame rate. However this driver was initially added using the
+ * [S|G|ENUM]_FRAME_INTERVAL ioctls with a fixed rate of 30fps.
+ * Retain the frame_interval ops for backwards compatibility, but they do
+ * nothing.
+ */
static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
+ struct imx214 *imx214 = to_imx214(subdev);
const struct imx214_mode *mode;
+ dev_warn_once(imx214->dev, "frame_interval functions return an unreliable value for compatibility reasons. Use the VBLANK and HBLANK controls to determine the correct frame rate.\n");
+
if (fie->index != 0)
return -EINVAL;
@@ -931,7 +1137,7 @@ static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
ARRAY_SIZE(imx214_modes), width, height,
fie->width, fie->height);
- fie->code = IMX214_MBUS_CODE;
+ fie->code = imx214_get_format_code(imx214);
fie->width = mode->width;
fie->height = mode->height;
fie->interval.numerator = 1;
@@ -948,7 +1154,7 @@ static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
.enum_mbus_code = imx214_enum_mbus_code,
.enum_frame_size = imx214_enum_frame_size,
.enum_frame_interval = imx214_enum_frame_interval,
- .get_fmt = imx214_get_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx214_set_format,
.get_selection = imx214_get_selection,
.get_frame_interval = imx214_get_frame_interval,
@@ -965,12 +1171,6 @@ static const struct v4l2_subdev_internal_ops imx214_internal_ops = {
.init_state = imx214_entity_init_state,
};
-static const struct regmap_config sensor_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
- .cache_type = REGCACHE_MAPLE,
-};
-
static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
{
unsigned int i;
@@ -982,6 +1182,27 @@ static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
imx214->supplies);
}
+/* Verify chip ID */
+static int imx214_identify_module(struct imx214 *imx214)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx214->sd);
+ int ret;
+ u64 val;
+
+ ret = cci_read(imx214->regmap, IMX214_REG_CHIP_ID, &val, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to read chip id %x\n",
+ IMX214_CHIP_ID);
+
+ if (val != IMX214_CHIP_ID)
+ return dev_err_probe(&client->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ IMX214_CHIP_ID, val);
+
+ return 0;
+}
+
static int imx214_parse_fwnode(struct device *dev)
{
struct fwnode_handle *endpoint;
@@ -992,28 +1213,42 @@ static int imx214_parse_fwnode(struct device *dev)
int ret;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!endpoint) {
- dev_err(dev, "endpoint node not found\n");
- return -EINVAL;
- }
+ if (!endpoint)
+ return dev_err_probe(dev, -EINVAL, "endpoint node not found\n");
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
if (ret) {
- dev_err(dev, "parsing endpoint node failed\n");
+ dev_err_probe(dev, ret, "parsing endpoint node failed\n");
goto done;
}
- for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ /* Check the number of MIPI CSI2 data lanes */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "only 4 data lanes are currently supported\n");
+ goto done;
+ }
+
+ if (bus_cfg.nr_of_link_frequencies != 1)
+ dev_warn(dev, "Only one link-frequency supported, please review your DT. Continuing anyway\n");
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) {
if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
break;
-
- if (i == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
- IMX214_DEFAULT_LINK_FREQ);
- ret = -EINVAL;
- goto done;
+ if (bus_cfg.link_frequencies[i] ==
+ IMX214_DEFAULT_LINK_FREQ_LEGACY) {
+ dev_warn(dev,
+ "link-frequencies %d not supported, please review your DT. Continuing anyway\n",
+ IMX214_DEFAULT_LINK_FREQ);
+ break;
+ }
}
+ if (i == bus_cfg.nr_of_link_frequencies)
+ ret = dev_err_probe(dev, -EINVAL,
+ "link-frequencies %d not supported, please review your DT\n",
+ IMX214_DEFAULT_LINK_FREQ);
+
done:
v4l2_fwnode_endpoint_free(&bus_cfg);
fwnode_handle_put(endpoint);
@@ -1037,34 +1272,28 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
imx214->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(imx214->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(imx214->xclk);
- }
+ if (IS_ERR(imx214->xclk))
+ return dev_err_probe(dev, PTR_ERR(imx214->xclk),
+ "failed to get xclk\n");
ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
- if (ret) {
- dev_err(dev, "could not set xclk frequency\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set xclk frequency\n");
ret = imx214_get_regulators(dev, imx214);
- if (ret < 0) {
- dev_err(dev, "cannot get regulators\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(imx214->enable_gpio)) {
- dev_err(dev, "cannot get enable gpio\n");
- return PTR_ERR(imx214->enable_gpio);
- }
+ if (IS_ERR(imx214->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(imx214->enable_gpio),
+ "failed to get enable gpio\n");
- imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
- if (IS_ERR(imx214->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(imx214->regmap);
- }
+ imx214->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(imx214->regmap))
+ return dev_err_probe(dev, PTR_ERR(imx214->regmap),
+ "failed to initialize CCI\n");
v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
imx214->sd.internal_ops = &imx214_internal_ops;
@@ -1075,17 +1304,14 @@ static int imx214_probe(struct i2c_client *client)
*/
imx214_power_on(imx214->dev);
- pm_runtime_set_active(imx214->dev);
- pm_runtime_enable(imx214->dev);
- pm_runtime_idle(imx214->dev);
+ ret = imx214_identify_module(imx214);
+ if (ret)
+ goto error_power_off;
ret = imx214_ctrls_init(imx214);
if (ret < 0)
goto error_power_off;
- mutex_init(&imx214->mutex);
- imx214->ctrls.lock = &imx214->mutex;
-
imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
imx214->sd.dev = &client->dev;
@@ -1093,28 +1319,44 @@ static int imx214_probe(struct i2c_client *client)
ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
if (ret < 0) {
- dev_err(dev, "could not register media entity\n");
+ dev_err_probe(dev, ret, "failed to init entity pads\n");
goto free_ctrl;
}
- imx214_entity_init_state(&imx214->sd, NULL);
+ imx214->sd.state_lock = imx214->ctrls.lock;
+ ret = v4l2_subdev_init_finalize(&imx214->sd);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "subdev init error\n");
+ goto free_entity;
+ }
+
+ pm_runtime_set_active(imx214->dev);
+ pm_runtime_enable(imx214->dev);
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
- dev_err(dev, "could not register v4l2 device\n");
- goto free_entity;
+ dev_err_probe(dev, ret,
+ "failed to register sensor sub-device\n");
+ goto error_subdev_cleanup;
}
+ pm_runtime_idle(imx214->dev);
+
return 0;
+error_subdev_cleanup:
+ pm_runtime_disable(imx214->dev);
+ pm_runtime_set_suspended(&client->dev);
+ v4l2_subdev_cleanup(&imx214->sd);
+
free_entity:
media_entity_cleanup(&imx214->sd.entity);
+
free_ctrl:
- mutex_destroy(&imx214->mutex);
v4l2_ctrl_handler_free(&imx214->ctrls);
+
error_power_off:
- pm_runtime_disable(imx214->dev);
- regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
+ imx214_power_off(imx214->dev);
return ret;
}
@@ -1125,13 +1367,14 @@ static void imx214_remove(struct i2c_client *client)
struct imx214 *imx214 = to_imx214(sd);
v4l2_async_unregister_subdev(&imx214->sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&imx214->sd.entity);
v4l2_ctrl_handler_free(&imx214->ctrls);
-
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
-
- mutex_destroy(&imx214->mutex);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ imx214_power_off(imx214->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct of_device_id imx214_of_match[] = {
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 2d54cea113e1..04262bbf6306 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -70,15 +70,14 @@
#define IMX219_EXPOSURE_MAX 65535
/* V_TIMING internal */
-#define IMX219_REG_VTS CCI_REG16(0x0160)
-#define IMX219_VTS_MAX 0xffff
-
-#define IMX219_VBLANK_MIN 4
-
-/* HBLANK control - read only */
-#define IMX219_PPL_DEFAULT 3448
-
+#define IMX219_REG_FRM_LENGTH_A CCI_REG16(0x0160)
+#define IMX219_FLL_MAX 0xffff
+#define IMX219_VBLANK_MIN 32
#define IMX219_REG_LINE_LENGTH_A CCI_REG16(0x0162)
+#define IMX219_LLP_MIN 0x0d78
+#define IMX219_BINNED_LLP_MIN 0x0de8
+#define IMX219_LLP_MAX 0x7ff0
+
#define IMX219_REG_X_ADD_STA_A CCI_REG16(0x0164)
#define IMX219_REG_X_ADD_END_A CCI_REG16(0x0166)
#define IMX219_REG_Y_ADD_STA_A CCI_REG16(0x0168)
@@ -133,10 +132,11 @@
/* Pixel rate is fixed for all the modes */
#define IMX219_PIXEL_RATE 182400000
-#define IMX219_PIXEL_RATE_4LANE 280800000
+#define IMX219_PIXEL_RATE_4LANE 281600000
#define IMX219_DEFAULT_LINK_FREQ 456000000
-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED 363000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE 364000000
/* IMX219 native and active pixel array size. */
#define IMX219_NATIVE_WIDTH 3296U
@@ -154,7 +154,7 @@ struct imx219_mode {
unsigned int height;
/* V-timing */
- unsigned int vts_def;
+ unsigned int fll_def;
};
static const struct cci_reg_sequence imx219_common_regs[] = {
@@ -168,15 +168,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ CCI_REG8(0x30eb), 0x05 },
{ CCI_REG8(0x30eb), 0x09 },
- /* PLL Clock Table */
- { IMX219_REG_VTPXCK_DIV, 5 },
- { IMX219_REG_VTSYCK_DIV, 1 },
- { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
- { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
- { IMX219_REG_PLL_VT_MPY, 57 },
- { IMX219_REG_OPSYCK_DIV, 1 },
- { IMX219_REG_PLL_OP_MPY, 114 },
-
/* Undocumented registers */
{ CCI_REG8(0x455e), 0x00 },
{ CCI_REG8(0x471e), 0x4b },
@@ -192,7 +183,6 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ CCI_REG8(0x479b), 0x0e },
/* Frame Bank Register Group "A" */
- { IMX219_REG_LINE_LENGTH_A, 3448 },
{ IMX219_REG_X_ODD_INC_A, 1 },
{ IMX219_REG_Y_ODD_INC_A, 1 },
@@ -201,12 +191,45 @@ static const struct cci_reg_sequence imx219_common_regs[] = {
{ IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
};
+static const struct cci_reg_sequence imx219_2lane_regs[] = {
+ /* PLL Clock Table */
+ { IMX219_REG_VTPXCK_DIV, 5 },
+ { IMX219_REG_VTSYCK_DIV, 1 },
+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PLL_VT_MPY, 57 },
+ { IMX219_REG_OPSYCK_DIV, 1 },
+ { IMX219_REG_PLL_OP_MPY, 114 },
+
+ /* 2-Lane CSI Mode */
+ { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_2_LANE_MODE },
+};
+
+static const struct cci_reg_sequence imx219_4lane_regs[] = {
+ /* PLL Clock Table */
+ { IMX219_REG_VTPXCK_DIV, 5 },
+ { IMX219_REG_VTSYCK_DIV, 1 },
+ { IMX219_REG_PREPLLCK_VT_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PREPLLCK_OP_DIV, 3 }, /* 0x03 = AUTO set */
+ { IMX219_REG_PLL_VT_MPY, 88 },
+ { IMX219_REG_OPSYCK_DIV, 1 },
+ { IMX219_REG_PLL_OP_MPY, 91 },
+
+ /* 4-Lane CSI Mode */
+ { IMX219_REG_CSI_LANE_MODE, IMX219_CSI_4_LANE_MODE },
+};
+
static const s64 imx219_link_freq_menu[] = {
IMX219_DEFAULT_LINK_FREQ,
};
static const s64 imx219_link_freq_4lane_menu[] = {
IMX219_DEFAULT_LINK_FREQ_4LANE,
+ /*
+ * This will never be advertised to userspace, but will be used for
+ * v4l2_link_freq_to_bitmap
+ */
+ IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED,
};
static const char * const imx219_test_pattern_menu[] = {
@@ -289,25 +312,25 @@ static const struct imx219_mode supported_modes[] = {
/* 8MPix 15fps mode */
.width = 3280,
.height = 2464,
- .vts_def = 3526,
+ .fll_def = 3526,
},
{
/* 1080P 30fps cropped */
.width = 1920,
.height = 1080,
- .vts_def = 1763,
+ .fll_def = 1763,
},
{
- /* 2x2 binned 30fps mode */
+ /* 2x2 binned 60fps mode */
.width = 1640,
.height = 1232,
- .vts_def = 1763,
+ .fll_def = 1707,
},
{
- /* 640x480 30fps mode */
+ /* 640x480 60fps mode */
.width = 640,
.height = 480,
- .vts_def = 1763,
+ .fll_def = 1707,
},
};
@@ -359,6 +382,62 @@ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
return imx219_mbus_formats[i];
}
+static u32 imx219_get_format_bpp(const struct v4l2_mbus_framefmt *format)
+{
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return 8;
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ default:
+ return 10;
+ }
+}
+
+static void imx219_get_binning(struct v4l2_subdev_state *state, u8 *bin_h,
+ u8 *bin_v)
+{
+ const struct v4l2_mbus_framefmt *format =
+ v4l2_subdev_state_get_format(state, 0);
+ const struct v4l2_rect *crop = v4l2_subdev_state_get_crop(state, 0);
+ u32 hbin = crop->width / format->width;
+ u32 vbin = crop->height / format->height;
+
+ *bin_h = IMX219_BINNING_NONE;
+ *bin_v = IMX219_BINNING_NONE;
+
+ /*
+ * Use analog binning only if both dimensions are binned, as it crops
+ * the other dimension.
+ */
+ if (hbin == 2 && vbin == 2) {
+ *bin_h = IMX219_BINNING_X2_ANALOG;
+ *bin_v = IMX219_BINNING_X2_ANALOG;
+
+ return;
+ }
+
+ if (hbin == 2)
+ *bin_h = IMX219_BINNING_X2;
+ if (vbin == 2)
+ *bin_v = IMX219_BINNING_X2;
+}
+
+static inline u32 imx219_get_rate_factor(struct v4l2_subdev_state *state)
+{
+ u8 bin_h, bin_v;
+
+ imx219_get_binning(state, &bin_h, &bin_v);
+
+ return (bin_h & bin_v) == IMX219_BINNING_X2_ANALOG ? 2 : 1;
+}
+
/* -----------------------------------------------------------------------------
* Controls
*/
@@ -370,10 +449,12 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
+ u32 rate_factor;
int ret = 0;
state = v4l2_subdev_get_locked_active_state(&imx219->sd);
format = v4l2_subdev_state_get_format(state, 0);
+ rate_factor = imx219_get_rate_factor(state);
if (ctrl->id == V4L2_CID_VBLANK) {
int exposure_max, exposure_def;
@@ -402,7 +483,7 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_EXPOSURE:
cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
- ctrl->val, &ret);
+ ctrl->val / rate_factor, &ret);
break;
case V4L2_CID_DIGITAL_GAIN:
cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
@@ -418,8 +499,12 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
imx219->hflip->val | imx219->vflip->val << 1, &ret);
break;
case V4L2_CID_VBLANK:
- cci_write(imx219->regmap, IMX219_REG_VTS,
- format->height + ctrl->val, &ret);
+ cci_write(imx219->regmap, IMX219_REG_FRM_LENGTH_A,
+ (format->height + ctrl->val) / rate_factor, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ cci_write(imx219->regmap, IMX219_REG_LINE_LENGTH_A,
+ format->width + ctrl->val, &ret);
break;
case V4L2_CID_TEST_PATTERN_RED:
cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
@@ -466,7 +551,7 @@ static int imx219_init_controls(struct imx219 *imx219)
const struct imx219_mode *mode = &supported_modes[0];
struct v4l2_ctrl_handler *ctrl_hdlr;
struct v4l2_fwnode_device_properties props;
- int exposure_max, exposure_def, hblank;
+ int exposure_max, exposure_def;
int i, ret;
ctrl_hdlr = &imx219->ctrl_handler;
@@ -490,18 +575,17 @@ static int imx219_init_controls(struct imx219 *imx219)
if (imx219->link_freq)
imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- /* Initial vblank/hblank/exposure parameters based on current mode */
+ /* Initial blanking and exposure. Limits are updated during set_fmt */
imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - mode->height, 1,
- mode->vts_def - mode->height);
- hblank = IMX219_PPL_DEFAULT - mode->width;
+ IMX219_FLL_MAX - mode->height, 1,
+ mode->fll_def - mode->height);
imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
- V4L2_CID_HBLANK, hblank, hblank,
- 1, hblank);
- if (imx219->hblank)
- imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = mode->vts_def - 4;
+ V4L2_CID_HBLANK,
+ IMX219_LLP_MIN - mode->width,
+ IMX219_LLP_MAX - mode->width, 1,
+ IMX219_LLP_MIN - mode->width);
+ exposure_max = mode->fll_def - 4;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
exposure_max : IMX219_EXPOSURE_DEFAULT;
imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
@@ -587,29 +671,13 @@ static int imx219_set_framefmt(struct imx219 *imx219,
{
const struct v4l2_mbus_framefmt *format;
const struct v4l2_rect *crop;
- unsigned int bpp;
- u64 bin_h, bin_v;
+ u8 bin_h, bin_v;
+ u32 bpp;
int ret = 0;
format = v4l2_subdev_state_get_format(state, 0);
crop = v4l2_subdev_state_get_crop(state, 0);
-
- switch (format->code) {
- case MEDIA_BUS_FMT_SRGGB8_1X8:
- case MEDIA_BUS_FMT_SGRBG8_1X8:
- case MEDIA_BUS_FMT_SGBRG8_1X8:
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- bpp = 8;
- break;
-
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- case MEDIA_BUS_FMT_SGRBG10_1X10:
- case MEDIA_BUS_FMT_SGBRG10_1X10:
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- default:
- bpp = 10;
- break;
- }
+ bpp = imx219_get_format_bpp(format);
cci_write(imx219->regmap, IMX219_REG_X_ADD_STA_A,
crop->left - IMX219_PIXEL_ARRAY_LEFT, &ret);
@@ -620,26 +688,7 @@ static int imx219_set_framefmt(struct imx219 *imx219,
cci_write(imx219->regmap, IMX219_REG_Y_ADD_END_A,
crop->top - IMX219_PIXEL_ARRAY_TOP + crop->height - 1, &ret);
- switch (crop->width / format->width) {
- case 1:
- default:
- bin_h = IMX219_BINNING_NONE;
- break;
- case 2:
- bin_h = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
- break;
- }
-
- switch (crop->height / format->height) {
- case 1:
- default:
- bin_v = IMX219_BINNING_NONE;
- break;
- case 2:
- bin_v = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
- break;
- }
-
+ imx219_get_binning(state, &bin_h, &bin_v);
cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_H, bin_h, &ret);
cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_V, bin_v, &ret);
@@ -662,9 +711,11 @@ static int imx219_set_framefmt(struct imx219 *imx219,
static int imx219_configure_lanes(struct imx219 *imx219)
{
- return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
- imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
- IMX219_CSI_4_LANE_MODE, NULL);
+ /* Write the appropriate PLL settings for the number of MIPI lanes */
+ return cci_multi_reg_write(imx219->regmap,
+ imx219->lanes == 2 ? imx219_2lane_regs : imx219_4lane_regs,
+ imx219->lanes == 2 ? ARRAY_SIZE(imx219_2lane_regs) :
+ ARRAY_SIZE(imx219_4lane_regs), NULL);
};
static int imx219_start_streaming(struct imx219 *imx219,
@@ -815,7 +866,11 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
const struct imx219_mode *mode;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- unsigned int bin_h, bin_v;
+ u8 bin_h, bin_v;
+ u32 prev_line_len;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ prev_line_len = format->width + imx219->hblank->val;
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
@@ -823,8 +878,6 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
fmt->format.width, fmt->format.height);
imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
-
- format = v4l2_subdev_state_get_format(state, 0);
*format = fmt->format;
/*
@@ -843,30 +896,51 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
int exposure_max;
int exposure_def;
- int hblank;
+ int hblank, llp_min;
+ int pixel_rate;
/* Update limits and set FPS to default */
__v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
- IMX219_VTS_MAX - mode->height, 1,
- mode->vts_def - mode->height);
+ IMX219_FLL_MAX - mode->height, 1,
+ mode->fll_def - mode->height);
__v4l2_ctrl_s_ctrl(imx219->vblank,
- mode->vts_def - mode->height);
+ mode->fll_def - mode->height);
/* Update max exposure while meeting expected vblanking */
- exposure_max = mode->vts_def - 4;
+ exposure_max = mode->fll_def - 4;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
exposure_max : IMX219_EXPOSURE_DEFAULT;
__v4l2_ctrl_modify_range(imx219->exposure,
imx219->exposure->minimum,
exposure_max, imx219->exposure->step,
exposure_def);
+
/*
- * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
- * depends on mode->width only, and is not changeble in any
- * way other than changing the mode.
+ * With analog binning the default minimum line length of 3448
+ * can cause artefacts with RAW10 formats, because the ADC
+ * operates on two lines together. So we switch to a higher
+ * minimum of 3560.
*/
- hblank = IMX219_PPL_DEFAULT - mode->width;
- __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
- hblank);
+ imx219_get_binning(state, &bin_h, &bin_v);
+ llp_min = (bin_h & bin_v) == IMX219_BINNING_X2_ANALOG ?
+ IMX219_BINNED_LLP_MIN : IMX219_LLP_MIN;
+ __v4l2_ctrl_modify_range(imx219->hblank, llp_min - mode->width,
+ IMX219_LLP_MAX - mode->width, 1,
+ llp_min - mode->width);
+ /*
+ * Retain PPL setting from previous mode so that the
+ * line time does not change on a mode change.
+ * Limits have to be recomputed as the controls define
+ * the blanking only, so PPL values need to have the
+ * mode width subtracted.
+ */
+ hblank = prev_line_len - mode->width;
+ __v4l2_ctrl_s_ctrl(imx219->hblank, hblank);
+
+ /* Scale the pixel rate based on the mode specific factor */
+ pixel_rate = imx219_get_pixel_rate(imx219) *
+ imx219_get_rate_factor(state);
+ __v4l2_ctrl_modify_range(imx219->pixel_rate, pixel_rate,
+ pixel_rate, 1, pixel_rate);
}
return 0;
@@ -877,10 +951,9 @@ static int imx219_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
- case V4L2_SEL_TGT_CROP: {
+ case V4L2_SEL_TGT_CROP:
sel->r = *v4l2_subdev_state_get_crop(state, 0);
return 0;
- }
case V4L2_SEL_TGT_NATIVE_SIZE:
sel->r.top = 0;
@@ -1035,6 +1108,7 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
struct v4l2_fwnode_endpoint ep_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ unsigned long link_freq_bitmap;
int ret = -EINVAL;
endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
@@ -1056,23 +1130,40 @@ static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
imx219->lanes = ep_cfg.bus.mipi_csi2.num_data_lanes;
/* Check the link frequency set in device tree */
- if (!ep_cfg.nr_of_link_frequencies) {
- dev_err_probe(dev, -EINVAL,
- "link-frequency property not found in DT\n");
- goto error_out;
+ switch (imx219->lanes) {
+ case 2:
+ ret = v4l2_link_freq_to_bitmap(dev,
+ ep_cfg.link_frequencies,
+ ep_cfg.nr_of_link_frequencies,
+ imx219_link_freq_menu,
+ ARRAY_SIZE(imx219_link_freq_menu),
+ &link_freq_bitmap);
+ break;
+ case 4:
+ ret = v4l2_link_freq_to_bitmap(dev,
+ ep_cfg.link_frequencies,
+ ep_cfg.nr_of_link_frequencies,
+ imx219_link_freq_4lane_menu,
+ ARRAY_SIZE(imx219_link_freq_4lane_menu),
+ &link_freq_bitmap);
+
+ if (!ret && (link_freq_bitmap & BIT(1))) {
+ dev_warn(dev, "Link frequency of %d not supported, but has been incorrectly advertised previously\n",
+ IMX219_DEFAULT_LINK_FREQ_4LANE_UNSUPPORTED);
+ dev_warn(dev, "Using link frequency of %d\n",
+ IMX219_DEFAULT_LINK_FREQ_4LANE);
+ link_freq_bitmap |= BIT(0);
+ }
+ break;
}
- if (ep_cfg.nr_of_link_frequencies != 1 ||
- (ep_cfg.link_frequencies[0] != ((imx219->lanes == 2) ?
- IMX219_DEFAULT_LINK_FREQ : IMX219_DEFAULT_LINK_FREQ_4LANE))) {
+ if (ret || !(link_freq_bitmap & BIT(0))) {
+ ret = -EINVAL;
dev_err_probe(dev, -EINVAL,
"Link frequency not supported: %lld\n",
ep_cfg.link_frequencies[0]);
- goto error_out;
}
- ret = 0;
-
error_out:
v4l2_fwnode_endpoint_free(&ep_cfg);
fwnode_handle_put(endpoint);
@@ -1178,6 +1269,9 @@ static int imx219_probe(struct i2c_client *client)
goto error_media_entity;
}
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
ret = v4l2_async_register_subdev_sensor(&imx219->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
@@ -1185,15 +1279,14 @@ static int imx219_probe(struct i2c_client *client)
goto error_subdev_cleanup;
}
- /* Enable runtime PM and turn off the device */
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
pm_runtime_idle(dev);
return 0;
error_subdev_cleanup:
v4l2_subdev_cleanup(&imx219->sd);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
error_media_entity:
media_entity_cleanup(&imx219->sd.entity);
@@ -1218,9 +1311,10 @@ static void imx219_remove(struct i2c_client *client)
imx219_free_controls(imx219);
pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
+ if (!pm_runtime_status_suspended(&client->dev)) {
imx219_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ }
}
static const struct of_device_id imx219_dt_ids[] = {
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index f676faf4b301..beb9169f93ad 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1170,8 +1170,10 @@ static int imx283_disable_streams(struct v4l2_subdev *sd,
}
/* Power/clock management functions */
-static int imx283_power_on(struct imx283 *imx283)
+static int imx283_power_on(struct device *dev)
{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx283 *imx283 = to_imx283(sd);
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(imx283_supply_name),
@@ -1199,29 +1201,14 @@ reg_off:
return ret;
}
-static int imx283_power_off(struct imx283 *imx283)
-{
- gpiod_set_value_cansleep(imx283->reset_gpio, 1);
- regulator_bulk_disable(ARRAY_SIZE(imx283_supply_name), imx283->supplies);
- clk_disable_unprepare(imx283->xclk);
-
- return 0;
-}
-
-static int imx283_runtime_resume(struct device *dev)
+static int imx283_power_off(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx283 *imx283 = to_imx283(sd);
- return imx283_power_on(imx283);
-}
-
-static int imx283_runtime_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct imx283 *imx283 = to_imx283(sd);
-
- imx283_power_off(imx283);
+ gpiod_set_value_cansleep(imx283->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(imx283_supply_name), imx283->supplies);
+ clk_disable_unprepare(imx283->xclk);
return 0;
}
@@ -1516,7 +1503,7 @@ static int imx283_probe(struct i2c_client *client)
* The sensor must be powered for imx283_identify_module()
* to be able to read the CHIP_ID register
*/
- ret = imx283_power_on(imx283);
+ ret = imx283_power_on(imx283->dev);
if (ret)
return ret;
@@ -1589,7 +1576,7 @@ error_pm:
pm_runtime_disable(imx283->dev);
pm_runtime_set_suspended(imx283->dev);
error_power_off:
- imx283_power_off(imx283);
+ imx283_power_off(imx283->dev);
return ret;
}
@@ -1606,12 +1593,12 @@ static void imx283_remove(struct i2c_client *client)
pm_runtime_disable(imx283->dev);
if (!pm_runtime_status_suspended(imx283->dev))
- imx283_power_off(imx283);
+ imx283_power_off(imx283->dev);
pm_runtime_set_suspended(imx283->dev);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(imx283_pm_ops, imx283_runtime_suspend,
- imx283_runtime_resume, NULL);
+static DEFINE_RUNTIME_DEV_PM_OPS(imx283_pm_ops, imx283_power_off,
+ imx283_power_on, NULL);
static const struct of_device_id imx283_dt_ids[] = {
{ .compatible = "sony,imx283" },
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index dd1b4ff983dc..701840f4a5cc 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2442,17 +2442,19 @@ static int imx319_probe(struct i2c_client *client)
if (full_power)
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
ret = v4l2_async_register_subdev_sensor(&imx319->sd);
if (ret < 0)
goto error_media_entity_pm;
+ pm_runtime_idle(&client->dev);
+
return 0;
error_media_entity_pm:
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (full_power)
+ pm_runtime_set_suspended(&client->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
@@ -2474,7 +2476,8 @@ static void imx319_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ pm_runtime_set_suspended(&client->dev);
mutex_destroy(&imx319->mutex);
}
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index fcfd1d851bd4..0beb80b8c458 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -559,12 +559,14 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
imx335->vblank,
imx335->vblank + imx335->cur_mode->height);
- return __v4l2_ctrl_modify_range(imx335->exp_ctrl,
- IMX335_EXPOSURE_MIN,
- imx335->vblank +
- imx335->cur_mode->height -
- IMX335_EXPOSURE_OFFSET,
- 1, IMX335_EXPOSURE_DEFAULT);
+ ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
+ IMX335_EXPOSURE_MIN,
+ imx335->vblank +
+ imx335->cur_mode->height -
+ IMX335_EXPOSURE_OFFSET,
+ 1, IMX335_EXPOSURE_DEFAULT);
+ if (ret)
+ return ret;
}
/*
@@ -575,6 +577,13 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
return 0;
switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ exposure = imx335->exp_ctrl->val;
+ analog_gain = imx335->again_ctrl->val;
+
+ ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
+
+ break;
case V4L2_CID_EXPOSURE:
exposure = ctrl->val;
analog_gain = imx335->again_ctrl->val;
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 3f7924aa1bd3..9f37779bd611 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -26,6 +26,10 @@
#define IMX415_PIXEL_ARRAY_WIDTH 3864
#define IMX415_PIXEL_ARRAY_HEIGHT 2192
#define IMX415_PIXEL_ARRAY_VBLANK 58
+#define IMX415_EXPOSURE_OFFSET 8
+
+#define IMX415_PIXEL_RATE_74_25MHZ 891000000
+#define IMX415_PIXEL_RATE_72MHZ 864000000
#define IMX415_NUM_CLK_PARAM_REGS 11
@@ -51,7 +55,10 @@
#define IMX415_OUTSEL CCI_REG8(0x30c0)
#define IMX415_DRV CCI_REG8(0x30c1)
#define IMX415_VMAX CCI_REG24_LE(0x3024)
+#define IMX415_VMAX_MAX 0xfffff
#define IMX415_HMAX CCI_REG16_LE(0x3028)
+#define IMX415_HMAX_MAX 0xffff
+#define IMX415_HMAX_MULTIPLIER 12
#define IMX415_SHR0 CCI_REG24_LE(0x3050)
#define IMX415_GAIN_PCG_0 CCI_REG16_LE(0x3090)
#define IMX415_AGAIN_MIN 0
@@ -445,11 +452,8 @@ static const struct imx415_clk_params imx415_clk_params[] = {
},
};
-/* all-pixel 2-lane 720 Mbps 15.74 Hz mode */
-static const struct cci_reg_sequence imx415_mode_2_720[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x07F0 },
- { IMX415_LANEMODE, IMX415_LANEMODE_2 },
+/* 720 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_720mbps[] = {
{ IMX415_TCLKPOST, 0x006F },
{ IMX415_TCLKPREPARE, 0x002F },
{ IMX415_TCLKTRAIL, 0x002F },
@@ -461,11 +465,8 @@ static const struct cci_reg_sequence imx415_mode_2_720[] = {
{ IMX415_TLPX, 0x0027 },
};
-/* all-pixel 2-lane 1440 Mbps 30.01 Hz mode */
-static const struct cci_reg_sequence imx415_mode_2_1440[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x042A },
- { IMX415_LANEMODE, IMX415_LANEMODE_2 },
+/* 1440 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_1440mbps[] = {
{ IMX415_TCLKPOST, 0x009F },
{ IMX415_TCLKPREPARE, 0x0057 },
{ IMX415_TCLKTRAIL, 0x0057 },
@@ -477,11 +478,8 @@ static const struct cci_reg_sequence imx415_mode_2_1440[] = {
{ IMX415_TLPX, 0x004F },
};
-/* all-pixel 4-lane 891 Mbps 30 Hz mode */
-static const struct cci_reg_sequence imx415_mode_4_891[] = {
- { IMX415_VMAX, 0x08CA },
- { IMX415_HMAX, 0x044C },
- { IMX415_LANEMODE, IMX415_LANEMODE_4 },
+/* 891 Mbps CSI configuration */
+static const struct cci_reg_sequence imx415_linkrate_891mbps[] = {
{ IMX415_TCLKPOST, 0x007F },
{ IMX415_TCLKPREPARE, 0x0037 },
{ IMX415_TCLKTRAIL, 0x0037 },
@@ -498,39 +496,9 @@ struct imx415_mode_reg_list {
const struct cci_reg_sequence *regs;
};
-/*
- * Mode : number of lanes, lane rate and frame rate dependent settings
- *
- * pixel_rate and hmax_pix are needed to calculate hblank for the v4l2 ctrl
- * interface. These values can not be found in the data sheet and should be
- * treated as virtual values. Use following table when adding new modes.
- *
- * lane_rate lanes fps hmax_pix pixel_rate
- *
- * 594 2 10.000 4400 99000000
- * 891 2 15.000 4400 148500000
- * 720 2 15.748 4064 144000000
- * 1782 2 30.000 4400 297000000
- * 2079 2 30.000 4400 297000000
- * 1440 2 30.019 4510 304615385
- *
- * 594 4 20.000 5500 247500000
- * 594 4 25.000 4400 247500000
- * 720 4 25.000 4400 247500000
- * 720 4 30.019 4510 304615385
- * 891 4 30.000 4400 297000000
- * 1440 4 30.019 4510 304615385
- * 1440 4 60.038 4510 609230769
- * 1485 4 60.000 4400 594000000
- * 1782 4 60.000 4400 594000000
- * 2079 4 60.000 4400 594000000
- * 2376 4 90.164 4392 891000000
- */
struct imx415_mode {
u64 lane_rate;
- u32 lanes;
- u32 hmax_pix;
- u64 pixel_rate;
+ u32 hmax_min[2];
struct imx415_mode_reg_list reg_list;
};
@@ -538,32 +506,26 @@ struct imx415_mode {
static const struct imx415_mode supported_modes[] = {
{
.lane_rate = 720000000,
- .lanes = 2,
- .hmax_pix = 4064,
- .pixel_rate = 144000000,
+ .hmax_min = { 2032, 1066 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_2_720),
- .regs = imx415_mode_2_720,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_720mbps),
+ .regs = imx415_linkrate_720mbps,
},
},
{
.lane_rate = 1440000000,
- .lanes = 2,
- .hmax_pix = 4510,
- .pixel_rate = 304615385,
+ .hmax_min = { 1066, 533 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_2_1440),
- .regs = imx415_mode_2_1440,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_1440mbps),
+ .regs = imx415_linkrate_1440mbps,
},
},
{
.lane_rate = 891000000,
- .lanes = 4,
- .hmax_pix = 4400,
- .pixel_rate = 297000000,
+ .hmax_min = { 2200, 1100 },
.reg_list = {
- .num_of_regs = ARRAY_SIZE(imx415_mode_4_891),
- .regs = imx415_mode_4_891,
+ .num_of_regs = ARRAY_SIZE(imx415_linkrate_891mbps),
+ .regs = imx415_linkrate_891mbps,
},
},
};
@@ -587,6 +549,7 @@ static const char *const imx415_test_pattern_menu[] = {
struct imx415 {
struct device *dev;
struct clk *clk;
+ unsigned long pixel_rate;
struct regulator_bulk_data supplies[ARRAY_SIZE(imx415_supply_names)];
struct gpio_desc *reset;
struct regmap *regmap;
@@ -598,8 +561,10 @@ struct imx415 {
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *exposure;
unsigned int cur_mode;
unsigned int num_data_lanes;
@@ -730,17 +695,38 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
ctrls);
const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
+ u32 exposure_max;
unsigned int vmax;
unsigned int flip;
int ret;
- if (!pm_runtime_get_if_in_use(sensor->dev))
- return 0;
-
state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
format = v4l2_subdev_state_get_format(state, 0);
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ exposure_max = format->height + ctrl->val -
+ IMX415_EXPOSURE_OFFSET;
+ __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ exposure_max, sensor->exposure->step,
+ sensor->exposure->default_value);
+ }
+
+ if (!pm_runtime_get_if_in_use(sensor->dev))
+ return 0;
+
switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ ret = cci_write(sensor->regmap, IMX415_VMAX,
+ format->height + ctrl->val, NULL);
+ if (ret)
+ return ret;
+ /*
+ * Exposure is set based on VMAX which has just changed, so
+ * program exposure register as well
+ */
+ ctrl = sensor->exposure;
+ fallthrough;
case V4L2_CID_EXPOSURE:
/* clamp the exposure value to VMAX. */
vmax = format->height + sensor->vblank->cur.val;
@@ -766,6 +752,13 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
ret = imx415_set_testpattern(sensor, ctrl->val);
break;
+ case V4L2_CID_HBLANK:
+ ret = cci_write(sensor->regmap, IMX415_HMAX,
+ (format->width + ctrl->val) /
+ IMX415_HMAX_MULTIPLIER,
+ NULL);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -784,11 +777,12 @@ static int imx415_ctrls_init(struct imx415 *sensor)
{
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl *ctrl;
- u64 pixel_rate = supported_modes[sensor->cur_mode].pixel_rate;
- u64 lane_rate = supported_modes[sensor->cur_mode].lane_rate;
+ const struct imx415_mode *cur_mode = &supported_modes[sensor->cur_mode];
+ u64 lane_rate = cur_mode->lane_rate;
u32 exposure_max = IMX415_PIXEL_ARRAY_HEIGHT +
- IMX415_PIXEL_ARRAY_VBLANK - 8;
- u32 hblank;
+ IMX415_PIXEL_ARRAY_VBLANK -
+ IMX415_EXPOSURE_OFFSET;
+ u32 hblank_min, hblank_max;
unsigned int i;
int ret;
@@ -816,36 +810,33 @@ static int imx415_ctrls_init(struct imx415 *sensor)
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops, V4L2_CID_EXPOSURE,
- 4, exposure_max, 1, exposure_max);
+ sensor->exposure = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
+ V4L2_CID_EXPOSURE, 4,
+ exposure_max, 1, exposure_max);
v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_ANALOGUE_GAIN, IMX415_AGAIN_MIN,
IMX415_AGAIN_MAX, IMX415_AGAIN_STEP,
IMX415_AGAIN_MIN);
- hblank = supported_modes[sensor->cur_mode].hmax_pix -
- IMX415_PIXEL_ARRAY_WIDTH;
+ hblank_min = (cur_mode->hmax_min[sensor->num_data_lanes == 2 ? 0 : 1] *
+ IMX415_HMAX_MULTIPLIER) - IMX415_PIXEL_ARRAY_WIDTH;
+ hblank_max = (IMX415_HMAX_MAX * IMX415_HMAX_MULTIPLIER) -
+ IMX415_PIXEL_ARRAY_WIDTH;
ctrl = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
- V4L2_CID_HBLANK, hblank, hblank, 1, hblank);
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ V4L2_CID_HBLANK, hblank_min,
+ hblank_max, IMX415_HMAX_MULTIPLIER,
+ hblank_min);
sensor->vblank = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_VBLANK,
IMX415_PIXEL_ARRAY_VBLANK,
- IMX415_PIXEL_ARRAY_VBLANK, 1,
- IMX415_PIXEL_ARRAY_VBLANK);
- if (sensor->vblank)
- sensor->vblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ IMX415_VMAX_MAX - IMX415_PIXEL_ARRAY_HEIGHT,
+ 1, IMX415_PIXEL_ARRAY_VBLANK);
- /*
- * The pixel rate used here is a virtual value and can be used for
- * calculating the frame rate together with hblank. It may not
- * necessarily be the physically correct pixel clock.
- */
- v4l2_ctrl_new_std(&sensor->ctrls, NULL, V4L2_CID_PIXEL_RATE, pixel_rate,
- pixel_rate, 1, pixel_rate);
+ v4l2_ctrl_new_std(&sensor->ctrls, NULL, V4L2_CID_PIXEL_RATE,
+ sensor->pixel_rate, sensor->pixel_rate, 1,
+ sensor->pixel_rate);
sensor->hflip = v4l2_ctrl_new_std(&sensor->ctrls, &imx415_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
@@ -890,7 +881,12 @@ static int imx415_set_mode(struct imx415 *sensor, int mode)
IMX415_NUM_CLK_PARAM_REGS,
&ret);
- return 0;
+ ret = cci_write(sensor->regmap, IMX415_LANEMODE,
+ sensor->num_data_lanes == 2 ? IMX415_LANEMODE_2 :
+ IMX415_LANEMODE_4,
+ NULL);
+
+ return ret;
}
static int imx415_setup(struct imx415 *sensor, struct v4l2_subdev_state *state)
@@ -1301,8 +1297,6 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
}
for (j = 0; j < ARRAY_SIZE(supported_modes); ++j) {
- if (sensor->num_data_lanes != supported_modes[j].lanes)
- continue;
if (bus_cfg.link_frequencies[i] * 2 !=
supported_modes[j].lane_rate)
continue;
@@ -1317,6 +1311,17 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
"no valid sensor mode defined\n");
goto done_endpoint_free;
}
+ switch (inck) {
+ case 27000000:
+ case 37125000:
+ case 74250000:
+ sensor->pixel_rate = IMX415_PIXEL_RATE_74_25MHZ;
+ break;
+ case 24000000:
+ case 72000000:
+ sensor->pixel_rate = IMX415_PIXEL_RATE_72MHZ;
+ break;
+ }
lane_rate = supported_modes[sensor->cur_mode].lane_rate;
for (i = 0; i < ARRAY_SIZE(imx415_clk_params); ++i) {
diff --git a/drivers/media/i2c/lt6911uxe.c b/drivers/media/i2c/lt6911uxe.c
new file mode 100644
index 000000000000..c5b40bb58a37
--- /dev/null
+++ b/drivers/media/i2c/lt6911uxe.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2023 - 2025 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/v4l2-dv-timings.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#define LT6911UXE_CHIP_ID 0x2102
+#define REG_CHIP_ID CCI_REG16(0xe100)
+
+#define REG_ENABLE_I2C CCI_REG8(0xe0ee)
+#define REG_HALF_PIX_CLK CCI_REG24(0xe085)
+#define REG_BYTE_CLK CCI_REG24(0xe092)
+#define REG_HALF_H_TOTAL CCI_REG16(0xe088)
+#define REG_V_TOTAL CCI_REG16(0xe08a)
+#define REG_HALF_H_ACTIVE CCI_REG16(0xe08c)
+#define REG_V_ACTIVE CCI_REG16(0xe08e)
+#define REG_MIPI_FORMAT CCI_REG8(0xe096)
+#define REG_MIPI_TX_CTRL CCI_REG8(0xe0b0)
+
+/* Interrupts */
+#define REG_INT_HDMI CCI_REG8(0xe084)
+#define INT_VIDEO_DISAPPEAR 0x0
+#define INT_VIDEO_READY 0x1
+
+#define LT6911UXE_DEFAULT_LANES 4
+#define LT6911_PAGE_CONTROL 0xff
+#define YUV422_8_BIT 0x7
+
+static const struct v4l2_dv_timings_cap lt6911uxe_timings_cap_4kp30 = {
+ .type = V4L2_DV_BT_656_1120,
+ /* keep this initialization for compatibility with CLANG */
+ .reserved = { 0 },
+ /* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
+ V4L2_INIT_BT_TIMINGS(160, 3840, /* min/max width */
+ 120, 2160, /* min/max height */
+ 50000000, 594000000, /* min/max pixelclock */
+ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT,
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_CUSTOM |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING)
+};
+
+static const struct regmap_range_cfg lt6911uxe_ranges[] = {
+ {
+ .name = "register_range",
+ .range_min = 0,
+ .range_max = 0xffff,
+ .selector_reg = LT6911_PAGE_CONTROL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config lt6911uxe_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .ranges = lt6911uxe_ranges,
+ .num_ranges = ARRAY_SIZE(lt6911uxe_ranges),
+};
+
+struct lt6911uxe_mode {
+ u32 width;
+ u32 height;
+ u32 htotal;
+ u32 vtotal;
+ u32 code;
+ u32 fps;
+ u32 lanes;
+ s64 link_freq;
+ u64 pixel_clk;
+};
+
+struct lt6911uxe {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_dv_timings timings;
+ struct lt6911uxe_mode cur_mode;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
+};
+
+static const struct v4l2_event lt6911uxe_ev_source_change = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+};
+
+static inline struct lt6911uxe *to_lt6911uxe(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct lt6911uxe, sd);
+}
+
+static s64 get_pixel_rate(struct lt6911uxe *lt6911uxe)
+{
+ s64 pixel_rate;
+
+ pixel_rate = (s64)lt6911uxe->cur_mode.width *
+ lt6911uxe->cur_mode.height *
+ lt6911uxe->cur_mode.fps * 16;
+ do_div(pixel_rate, lt6911uxe->cur_mode.lanes);
+
+ return pixel_rate;
+}
+
+static int lt6911uxe_get_detected_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ bt->width = lt6911uxe->cur_mode.width;
+ bt->height = lt6911uxe->cur_mode.height;
+ bt->vsync = lt6911uxe->cur_mode.vtotal - lt6911uxe->cur_mode.height;
+ bt->hsync = lt6911uxe->cur_mode.htotal - lt6911uxe->cur_mode.width;
+ bt->pixelclock = lt6911uxe->cur_mode.pixel_clk;
+
+ return 0;
+}
+
+static int lt6911uxe_s_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ if (v4l2_match_dv_timings(&lt6911uxe->timings, timings, 0, false)) {
+ v4l2_subdev_unlock_state(state);
+ return 0;
+ }
+
+ if (!v4l2_valid_dv_timings(timings, &lt6911uxe_timings_cap_4kp30,
+ NULL, NULL)) {
+ v4l2_subdev_unlock_state(state);
+ return -ERANGE;
+ }
+ lt6911uxe->timings = *timings;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_g_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ *timings = lt6911uxe->timings;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_query_dv_timings(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ ret = lt6911uxe_get_detected_timings(sd, timings);
+ if (ret) {
+ v4l2_subdev_unlock_state(state);
+ return ret;
+ }
+
+ if (!v4l2_valid_dv_timings(timings, &lt6911uxe_timings_cap_4kp30,
+ NULL, NULL)) {
+ v4l2_subdev_unlock_state(state);
+ return -ERANGE;
+ }
+
+ v4l2_subdev_unlock_state(state);
+ return 0;
+}
+
+static int lt6911uxe_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings,
+ &lt6911uxe_timings_cap_4kp30, NULL, NULL);
+}
+
+static int lt6911uxe_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = lt6911uxe_timings_cap_4kp30;
+ return 0;
+}
+
+static int lt6911uxe_status_update(struct lt6911uxe *lt6911uxe)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&lt6911uxe->sd);
+ u64 int_event;
+ u64 byte_clk, half_pix_clk, fps, format;
+ u64 half_htotal, vtotal, half_width, height;
+ int ret = 0;
+
+ /* Read interrupt event */
+ cci_read(lt6911uxe->regmap, REG_INT_HDMI, &int_event, &ret);
+ if (ret) {
+ dev_err(&client->dev, "failed to read interrupt event: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (int_event) {
+ case INT_VIDEO_READY:
+ cci_read(lt6911uxe->regmap, REG_BYTE_CLK, &byte_clk, &ret);
+ byte_clk *= 1000;
+ cci_read(lt6911uxe->regmap, REG_HALF_PIX_CLK,
+ &half_pix_clk, &ret);
+ half_pix_clk *= 1000;
+
+ if (ret || byte_clk == 0 || half_pix_clk == 0) {
+ dev_dbg(&client->dev,
+ "invalid ByteClock or PixelClock\n");
+ return -EINVAL;
+ }
+
+ cci_read(lt6911uxe->regmap, REG_HALF_H_TOTAL,
+ &half_htotal, &ret);
+ cci_read(lt6911uxe->regmap, REG_V_TOTAL, &vtotal, &ret);
+ if (ret || half_htotal == 0 || vtotal == 0) {
+ dev_dbg(&client->dev, "invalid htotal or vtotal\n");
+ return -EINVAL;
+ }
+
+ fps = div_u64(half_pix_clk, half_htotal * vtotal);
+ if (fps > 60) {
+ dev_dbg(&client->dev,
+ "max fps is 60, current fps: %llu\n", fps);
+ return -EINVAL;
+ }
+
+ cci_read(lt6911uxe->regmap, REG_HALF_H_ACTIVE,
+ &half_width, &ret);
+ cci_read(lt6911uxe->regmap, REG_V_ACTIVE, &height, &ret);
+ if (ret || half_width == 0 || half_width * 2 > 3840 ||
+ height == 0 || height > 2160) {
+ dev_dbg(&client->dev, "invalid width or height\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get MIPI format, YUV422_8_BIT is expected in lt6911uxe
+ */
+ cci_read(lt6911uxe->regmap, REG_MIPI_FORMAT, &format, &ret);
+ if (format != YUV422_8_BIT) {
+ dev_dbg(&client->dev, "invalid MIPI format\n");
+ return -EINVAL;
+ }
+
+ lt6911uxe->cur_mode.height = height;
+ lt6911uxe->cur_mode.width = half_width * 2;
+ lt6911uxe->cur_mode.fps = fps;
+ /* MIPI Clock Rate = ByteClock × 4, defined in lt6911uxe spec */
+ lt6911uxe->cur_mode.link_freq = byte_clk * 4;
+ lt6911uxe->cur_mode.pixel_clk = half_pix_clk * 2;
+ lt6911uxe->cur_mode.vtotal = vtotal;
+ lt6911uxe->cur_mode.htotal = half_htotal * 2;
+ break;
+
+ case INT_VIDEO_DISAPPEAR:
+ cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x0, &ret);
+ lt6911uxe->cur_mode.height = 0;
+ lt6911uxe->cur_mode.width = 0;
+ lt6911uxe->cur_mode.fps = 0;
+ lt6911uxe->cur_mode.link_freq = 0;
+ break;
+
+ default:
+ ret = -ENOLINK;
+ }
+ v4l2_subdev_notify_event(&lt6911uxe->sd, &lt6911uxe_ev_source_change);
+ return ret;
+}
+
+static int lt6911uxe_init_controls(struct lt6911uxe *lt6911uxe)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 pixel_rate;
+ int ret;
+
+ ctrl_hdlr = &lt6911uxe->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ pixel_rate = get_pixel_rate(lt6911uxe);
+ lt6911uxe->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, NULL,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate, pixel_rate, 1,
+ pixel_rate);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ goto hdlr_free;
+ }
+ lt6911uxe->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+hdlr_free:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ return ret;
+}
+
+static void lt6911uxe_update_pad_format(const struct lt6911uxe_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = mode->code;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int lt6911uxe_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ return ret;
+
+ cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x1, &ret);
+ if (ret) {
+ dev_err(&client->dev, "failed to start stream: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+ return ret;
+}
+
+static int lt6911uxe_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 pad, u64 streams_mask)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(&lt6911uxe->sd);
+ int ret;
+
+ ret = cci_write(lt6911uxe->regmap, REG_MIPI_TX_CTRL, 0x0, NULL);
+ if (ret)
+ dev_err(&client->dev, "failed to stop stream: %d\n", ret);
+
+ pm_runtime_put(&client->dev);
+ return 0;
+}
+
+static int lt6911uxe_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ u64 pixel_rate;
+
+ lt6911uxe_update_pad_format(&lt6911uxe->cur_mode, &fmt->format);
+ *v4l2_subdev_state_get_format(sd_state, fmt->pad) = fmt->format;
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ pixel_rate = get_pixel_rate(lt6911uxe);
+ __v4l2_ctrl_modify_range(lt6911uxe->pixel_rate, pixel_rate,
+ pixel_rate, 1, pixel_rate);
+
+ return 0;
+}
+
+static int lt6911uxe_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = lt6911uxe->cur_mode.code;
+
+ return 0;
+}
+
+static int lt6911uxe_get_mbus_config(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_config *cfg)
+{
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ cfg->type = V4L2_MBUS_CSI2_DPHY;
+ cfg->link_freq = lt6911uxe->cur_mode.link_freq;
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static int lt6911uxe_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ return lt6911uxe_set_format(sd, sd_state, &fmt);
+}
+
+static const struct v4l2_subdev_video_ops lt6911uxe_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+/*
+ * lt6911uxe provides editable EDID for customers, but only can be edited like
+ * updating flash. Due to this limitation, it is not possible to implement
+ * EDID support.
+ */
+static const struct v4l2_subdev_pad_ops lt6911uxe_pad_ops = {
+ .set_fmt = lt6911uxe_set_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enable_streams = lt6911uxe_enable_streams,
+ .disable_streams = lt6911uxe_disable_streams,
+ .enum_mbus_code = lt6911uxe_enum_mbus_code,
+ .get_frame_interval = v4l2_subdev_get_frame_interval,
+ .s_dv_timings = lt6911uxe_s_dv_timings,
+ .g_dv_timings = lt6911uxe_g_dv_timings,
+ .query_dv_timings = lt6911uxe_query_dv_timings,
+ .enum_dv_timings = lt6911uxe_enum_dv_timings,
+ .dv_timings_cap = lt6911uxe_dv_timings_cap,
+ .get_mbus_config = lt6911uxe_get_mbus_config,
+};
+
+static const struct v4l2_subdev_core_ops lt6911uxe_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops lt6911uxe_subdev_ops = {
+ .core = &lt6911uxe_subdev_core_ops,
+ .video = &lt6911uxe_video_ops,
+ .pad = &lt6911uxe_pad_ops,
+};
+
+static const struct media_entity_operations lt6911uxe_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops lt6911uxe_internal_ops = {
+ .init_state = lt6911uxe_init_state,
+};
+
+static int lt6911uxe_fwnode_parse(struct lt6911uxe *lt6911uxe,
+ struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ int ret;
+
+ endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
+ if (!endpoint)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "endpoint node not found\n");
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &bus_cfg);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node: %d\n", ret);
+ goto out_err;
+ }
+
+ /*
+ * Check the number of MIPI CSI2 data lanes,
+ * lt6911uxe only support 4 lanes.
+ */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != LT6911UXE_DEFAULT_LANES) {
+ dev_err(dev, "only 4 data lanes are currently supported\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+ lt6911uxe->cur_mode.lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+ lt6911uxe->cur_mode.code = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ return 0;
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
+}
+
+static int lt6911uxe_identify_module(struct lt6911uxe *lt6911uxe,
+ struct device *dev)
+{
+ u64 val;
+ int ret = 0;
+
+ /* Chip ID should be confirmed when the I2C slave is active */
+ cci_write(lt6911uxe->regmap, REG_ENABLE_I2C, 0x1, &ret);
+ cci_read(lt6911uxe->regmap, REG_CHIP_ID, &val, &ret);
+ cci_write(lt6911uxe->regmap, REG_ENABLE_I2C, 0x0, &ret);
+ if (ret)
+ return dev_err_probe(dev, ret, "fail to read chip id\n");
+
+ if (val != LT6911UXE_CHIP_ID) {
+ return dev_err_probe(dev, -ENXIO, "chip id mismatch: %x!=%x\n",
+ LT6911UXE_CHIP_ID, (u16)val);
+ }
+
+ return 0;
+}
+
+static irqreturn_t lt6911uxe_threaded_irq_fn(int irq, void *dev_id)
+{
+ struct v4l2_subdev *sd = dev_id;
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+ struct v4l2_subdev_state *state;
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE
+ };
+
+ lt6911uxe_status_update(lt6911uxe);
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ /*
+ * As a HDMI to CSI2 bridge, it needs to update the format in time
+ * when the HDMI source changes.
+ */
+ lt6911uxe_set_format(sd, state, &fmt);
+ v4l2_subdev_unlock_state(state);
+
+ return IRQ_HANDLED;
+}
+
+static void lt6911uxe_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct lt6911uxe *lt6911uxe = to_lt6911uxe(sd);
+
+ free_irq(gpiod_to_irq(lt6911uxe->irq_gpio), lt6911uxe);
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&lt6911uxe->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static int lt6911uxe_probe(struct i2c_client *client)
+{
+ struct lt6911uxe *lt6911uxe;
+ struct device *dev = &client->dev;
+ int ret;
+
+ lt6911uxe = devm_kzalloc(dev, sizeof(*lt6911uxe), GFP_KERNEL);
+ if (!lt6911uxe)
+ return -ENOMEM;
+
+ lt6911uxe->regmap = devm_regmap_init_i2c(client,
+ &lt6911uxe_regmap_config);
+ if (IS_ERR(lt6911uxe->regmap))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->regmap),
+ "failed to init CCI\n");
+
+ v4l2_i2c_subdev_init(&lt6911uxe->sd, client, &lt6911uxe_subdev_ops);
+
+ lt6911uxe->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(lt6911uxe->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->reset_gpio),
+ "failed to get reset gpio\n");
+
+ lt6911uxe->irq_gpio = devm_gpiod_get(dev, "readystat", GPIOD_IN);
+ if (IS_ERR(lt6911uxe->irq_gpio))
+ return dev_err_probe(dev, PTR_ERR(lt6911uxe->irq_gpio),
+ "failed to get ready_stat gpio\n");
+
+ ret = lt6911uxe_fwnode_parse(lt6911uxe, dev);
+ if (ret)
+ return ret;
+
+ usleep_range(10000, 10500);
+
+ ret = lt6911uxe_identify_module(lt6911uxe, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to find chip\n");
+
+ ret = lt6911uxe_init_controls(lt6911uxe);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init control\n");
+
+ lt6911uxe->sd.dev = dev;
+ lt6911uxe->sd.internal_ops = &lt6911uxe_internal_ops;
+ lt6911uxe->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ lt6911uxe->sd.entity.ops = &lt6911uxe_subdev_entity_ops;
+ lt6911uxe->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ lt6911uxe->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&lt6911uxe->sd.entity, 1, &lt6911uxe->pad);
+ if (ret) {
+ dev_err(dev, "failed to init entity pads: %d\n", ret);
+ goto v4l2_ctrl_handler_free;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+
+ ret = v4l2_subdev_init_finalize(&lt6911uxe->sd);
+ if (ret) {
+ dev_err(dev, "failed to init v4l2 subdev: %d\n", ret);
+ goto media_entity_cleanup;
+ }
+
+ /* Setting irq */
+ ret = request_threaded_irq(gpiod_to_irq(lt6911uxe->irq_gpio), NULL,
+ lt6911uxe_threaded_irq_fn,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, NULL, lt6911uxe);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ: %d\n", ret);
+ goto subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&lt6911uxe->sd);
+ if (ret) {
+ dev_err(dev, "failed to register V4L2 subdev: %d\n", ret);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(gpiod_to_irq(lt6911uxe->irq_gpio), lt6911uxe);
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&lt6911uxe->sd);
+
+media_entity_cleanup:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ media_entity_cleanup(&lt6911uxe->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(lt6911uxe->sd.ctrl_handler);
+
+ return ret;
+}
+
+static const struct acpi_device_id lt6911uxe_acpi_ids[] = {
+ { "INTC10C5" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, lt6911uxe_acpi_ids);
+
+static struct i2c_driver lt6911uxe_i2c_driver = {
+ .driver = {
+ .name = "lt6911uxe",
+ .acpi_match_table = ACPI_PTR(lt6911uxe_acpi_ids),
+ },
+ .probe = lt6911uxe_probe,
+ .remove = lt6911uxe_remove,
+};
+
+module_i2c_driver(lt6911uxe_i2c_driver);
+
+MODULE_AUTHOR("Yan Dongcheng <dongcheng.yan@intel.com>");
+MODULE_DESCRIPTION("Lontium lt6911uxe HDMI to MIPI Bridge Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index b9682264e2f5..cf0e41fc3071 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1322,9 +1322,6 @@ static int ov08x40_power_on(struct device *dev)
struct ov08x40 *ov08x = to_ov08x40(sd);
int ret;
- if (is_acpi_node(dev_fwnode(dev)))
- return 0;
-
ret = clk_prepare_enable(ov08x->xvclk);
if (ret < 0) {
dev_err(dev, "failed to enable xvclk\n");
@@ -1360,9 +1357,6 @@ static int ov08x40_power_off(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov08x40 *ov08x = to_ov08x40(sd);
- if (is_acpi_node(dev_fwnode(dev)))
- return 0;
-
gpiod_set_value_cansleep(ov08x->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ov08x40_supply_names),
ov08x->supplies);
@@ -1400,7 +1394,7 @@ static int ov08x40_read_reg(struct ov08x40 *ov08x,
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret != ARRAY_SIZE(msgs))
- return -EIO;
+ return ret < 0 ? ret : -EIO;
*val = be32_to_cpu(data_be);
@@ -1469,7 +1463,7 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
u16 reg, u32 len, u32 __val)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int buf_i, val_i;
+ int buf_i, val_i, ret;
u8 buf[6], *val_p;
__be32 val;
@@ -1487,8 +1481,9 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
while (val_i < 4)
buf[buf_i++] = val_p[val_i++];
- if (i2c_master_send(client, buf, len + 2) != len + 2)
- return -EIO;
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
return 0;
}
@@ -1937,6 +1932,35 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
OV08X40_REG_VALUE_08BIT, OV08X40_MODE_STANDBY);
}
+/* Verify chip ID */
+static int ov08x40_identify_module(struct ov08x40 *ov08x)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
+ int ret;
+ u32 val;
+
+ if (ov08x->identified)
+ return 0;
+
+ ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
+ OV08X40_REG_VALUE_24BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "error reading chip-id register: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OV08X40_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV08X40_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ dev_dbg(&client->dev, "chip id 0x%x\n", val);
+ ov08x->identified = true;
+
+ return 0;
+}
+
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
@@ -1950,6 +1974,10 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_unlock;
+ ret = ov08x40_identify_module(ov08x);
+ if (ret)
+ goto err_rpm_put;
+
/*
* Apply default & customized values
* and then start streaming.
@@ -1974,32 +2002,6 @@ err_unlock:
return ret;
}
-/* Verify chip ID */
-static int ov08x40_identify_module(struct ov08x40 *ov08x)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
- int ret;
- u32 val;
-
- if (ov08x->identified)
- return 0;
-
- ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
- OV08X40_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
- OV08X40_CHIP_ID, val);
- return -ENXIO;
- }
-
- ov08x->identified = true;
-
- return 0;
-}
-
static const struct v4l2_subdev_video_ops ov08x40_video_ops = {
.s_stream = ov08x40_set_stream,
};
@@ -2151,65 +2153,69 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
int ret;
u32 xvclk_rate;
- if (!fwnode)
- return -ENXIO;
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver.
+ * Bridge drivers doing this also add sensor properties, wait for this.
+ */
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
- if (!is_acpi_node(fwnode)) {
- ov08x->xvclk = devm_clk_get(dev, NULL);
- if (IS_ERR(ov08x->xvclk)) {
- dev_err(dev, "could not get xvclk clock (%pe)\n",
- ov08x->xvclk);
- return PTR_ERR(ov08x->xvclk);
- }
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- xvclk_rate = clk_get_rate(ov08x->xvclk);
+ ov08x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov08x->reset_gpio)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov08x->reset_gpio),
+ "getting reset GPIO\n");
+ goto out_err;
+ }
- ov08x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(ov08x->reset_gpio))
- return PTR_ERR(ov08x->reset_gpio);
+ for (i = 0; i < ARRAY_SIZE(ov08x40_supply_names); i++)
+ ov08x->supplies[i].supply = ov08x40_supply_names[i];
- for (i = 0; i < ARRAY_SIZE(ov08x40_supply_names); i++)
- ov08x->supplies[i].supply = ov08x40_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ov08x40_supply_names),
+ ov08x->supplies);
+ if (ret)
+ goto out_err;
- ret = devm_regulator_bulk_get(dev,
- ARRAY_SIZE(ov08x40_supply_names),
- ov08x->supplies);
- if (ret)
- return ret;
+ ov08x->xvclk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov08x->xvclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(ov08x->xvclk),
+ "getting xvclk\n");
+ goto out_err;
+ }
+ if (ov08x->xvclk) {
+ xvclk_rate = clk_get_rate(ov08x->xvclk);
} else {
ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&xvclk_rate);
if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
+ dev_err(dev, "can't get clock frequency\n");
+ goto out_err;
}
}
if (xvclk_rate != OV08X40_XVCLK) {
- dev_err(dev, "external clock %d is not supported",
+ dev_err(dev, "external clock %d is not supported\n",
xvclk_rate);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_err;
}
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
- return -ENXIO;
-
- ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
- fwnode_handle_put(ep);
- if (ret)
- return ret;
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV08X40_DATA_LANES) {
- dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto out_err;
}
if (!bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequencies defined");
+ dev_err(dev, "no link frequencies defined\n");
ret = -EINVAL;
goto out_err;
}
@@ -2222,7 +2228,7 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
}
if (j == bus_cfg.nr_of_link_frequencies) {
- dev_err(dev, "no link frequency %lld supported",
+ dev_err(dev, "no link frequency %lld supported\n",
link_freq_menu_items[i]);
ret = -EINVAL;
goto out_err;
@@ -2246,10 +2252,8 @@ static int ov08x40_probe(struct i2c_client *client)
/* Check HW config */
ret = ov08x40_check_hwcfg(ov08x, &client->dev);
- if (ret) {
- dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ if (ret)
return ret;
- }
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
@@ -2264,10 +2268,8 @@ static int ov08x40_probe(struct i2c_client *client)
/* Check module identity */
ret = ov08x40_identify_module(ov08x);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ if (ret)
goto probe_power_off;
- }
}
/* Set default mode to max resolution */
@@ -2324,11 +2326,14 @@ static void ov08x40_remove(struct i2c_client *client)
ov08x40_free_controls(ov08x);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov08x40_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- ov08x40_power_off(&client->dev);
}
+static DEFINE_RUNTIME_DEV_PM_OPS(ov08x40_pm_ops, ov08x40_power_off,
+ ov08x40_power_on, NULL);
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov08x40_acpi_ids[] = {
{"OVTI08F4"},
@@ -2349,6 +2354,7 @@ static struct i2c_driver ov08x40_i2c_driver = {
.name = "ov08x40",
.acpi_match_table = ACPI_PTR(ov08x40_acpi_ids),
.of_match_table = ov08x40_of_match,
+ .pm = pm_sleep_ptr(&ov08x40_pm_ops),
},
.probe = ov08x40_probe,
.remove = ov08x40_remove,
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 9a5d118b87b0..80d151e8ae29 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -83,8 +83,6 @@ static const char * const ov2740_supply_name[] = {
"DVDD",
};
-#define OV2740_NUM_SUPPLIES ARRAY_SIZE(ov2740_supply_name)
-
struct nvm_data {
struct nvmem_device *nvmem;
struct regmap *regmap;
@@ -536,7 +534,7 @@ struct ov2740 {
struct gpio_desc *reset_gpio;
struct gpio_desc *powerdown_gpio;
struct clk *clk;
- struct regulator_bulk_data supplies[OV2740_NUM_SUPPLIES];
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov2740_supply_name)];
/* Current mode */
const struct ov2740_mode *cur_mode;
@@ -655,7 +653,7 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id: %x\n", val);
+ dev_dbg(&client->dev, "chip id: 0x%x\n", val);
ov2740->identified = true;
@@ -828,8 +826,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
0, 0, ov2740_test_pattern_menu);
ret = v4l2_fwnode_device_parse(&client->dev, &props);
- if (ret)
+ if (ret) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
+ }
v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov2740_ctrl_ops, &props);
@@ -1319,7 +1319,8 @@ static int ov2740_suspend(struct device *dev)
gpiod_set_value_cansleep(ov2740->reset_gpio, 1);
gpiod_set_value_cansleep(ov2740->powerdown_gpio, 1);
clk_disable_unprepare(ov2740->clk);
- regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
return 0;
}
@@ -1329,13 +1330,15 @@ static int ov2740_resume(struct device *dev)
struct ov2740 *ov2740 = to_ov2740(sd);
int ret;
- ret = regulator_bulk_enable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
if (ret)
return ret;
ret = clk_prepare_enable(ov2740->clk);
if (ret) {
- regulator_bulk_disable(OV2740_NUM_SUPPLIES, ov2740->supplies);
+ regulator_bulk_disable(ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
return ret;
}
@@ -1351,7 +1354,8 @@ static int ov2740_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct ov2740 *ov2740;
bool full_power;
- int i, ret;
+ unsigned int i;
+ int ret;
ov2740 = devm_kzalloc(&client->dev, sizeof(*ov2740), GFP_KERNEL);
if (!ov2740)
@@ -1389,10 +1393,11 @@ static int ov2740_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
- for (i = 0; i < OV2740_NUM_SUPPLIES; i++)
+ for (i = 0; i < ARRAY_SIZE(ov2740_supply_name); i++)
ov2740->supplies[i].supply = ov2740_supply_name[i];
- ret = devm_regulator_bulk_get(dev, OV2740_NUM_SUPPLIES, ov2740->supplies);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ov2740_supply_name),
+ ov2740->supplies);
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 30f61e04ecaf..3226888d77e9 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -922,6 +922,8 @@ static int ov7251_set_power_on(struct device *dev)
return ret;
}
+ usleep_range(1000, 1100);
+
gpiod_set_value_cansleep(ov7251->enable_gpio, 1);
/* wait at least 65536 external clock cycles */
@@ -1696,7 +1698,7 @@ static int ov7251_probe(struct i2c_client *client)
return PTR_ERR(ov7251->analog_regulator);
}
- ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ ov7251->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(ov7251->enable_gpio)) {
dev_err(dev, "cannot get enable gpio\n");
return PTR_ERR(ov7251->enable_gpio);
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index 87e5d7ce5a47..c882a021cf18 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -44,6 +44,15 @@
#define OV9282_EXPOSURE_STEP 1
#define OV9282_EXPOSURE_DEFAULT 0x0282
+/* AEC/AGC manual */
+#define OV9282_REG_AEC_MANUAL 0x3503
+#define OV9282_DIGFRAC_GAIN_DELAY BIT(6)
+#define OV9282_GAIN_CHANGE_DELAY BIT(5)
+#define OV9282_GAIN_DELAY BIT(4)
+#define OV9282_GAIN_PREC16_EN BIT(3)
+#define OV9282_GAIN_MANUAL_AS_SENSGAIN BIT(2)
+#define OV9282_AEC_MANUAL_DEFAULT 0x00
+
/* Analog gain control */
#define OV9282_REG_AGAIN 0x3509
#define OV9282_AGAIN_MIN 0x10
@@ -214,7 +223,7 @@ static const struct ov9282_reg common_regs[] = {
{0x3030, 0x10},
{0x3039, 0x32},
{0x303a, 0x00},
- {0x3503, 0x08},
+ {OV9282_REG_AEC_MANUAL, OV9282_GAIN_PREC16_EN},
{0x3505, 0x8c},
{0x3507, 0x03},
{0x3508, 0x00},
@@ -296,8 +305,8 @@ static const struct ov9282_reg mode_1280x800_regs[] = {
{0x3813, 0x08},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x40},
- {0x3821, 0x00},
+ {OV9282_REG_TIMING_FORMAT_1, 0x40},
+ {OV9282_REG_TIMING_FORMAT_2, 0x00},
{0x4003, 0x40},
{0x4008, 0x04},
{0x4009, 0x0b},
@@ -327,8 +336,8 @@ static const struct ov9282_reg mode_1280x720_regs[] = {
{0x3813, 0x08},
{0x3814, 0x11},
{0x3815, 0x11},
- {0x3820, 0x3c},
- {0x3821, 0x84},
+ {OV9282_REG_TIMING_FORMAT_1, 0x3c},
+ {OV9282_REG_TIMING_FORMAT_2, 0x84},
{0x4003, 0x40},
{0x4008, 0x02},
{0x4009, 0x05},
@@ -358,8 +367,8 @@ static const struct ov9282_reg mode_640x400_regs[] = {
{0x3813, 0x04},
{0x3814, 0x31},
{0x3815, 0x22},
- {0x3820, 0x60},
- {0x3821, 0x01},
+ {OV9282_REG_TIMING_FORMAT_1, 0x60},
+ {OV9282_REG_TIMING_FORMAT_2, 0x01},
{0x4008, 0x02},
{0x4009, 0x05},
{0x400c, 0x00},
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f08db3cfe076..f4568e87f018 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -301,8 +301,9 @@ static int mipid02_detect(struct mipid02_dev *bridge)
static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
struct v4l2_mbus_framefmt *fmt)
{
+ struct media_pad *remote =
+ &bridge->s_subdev->entity.pads[bridge->s_subdev_pad_id];
struct i2c_client *client = bridge->i2c_client;
- struct v4l2_subdev *subdev = bridge->s_subdev;
struct v4l2_fwnode_endpoint *ep = &bridge->rx;
u32 bpp = bpp_from_code(fmt->code);
/*
@@ -312,7 +313,7 @@ static int mipid02_configure_from_rx_speed(struct mipid02_dev *bridge,
u64 ui_4 = 2000000000;
s64 link_freq;
- link_freq = v4l2_get_link_freq(subdev->ctrl_handler, bpp,
+ link_freq = v4l2_get_link_freq(remote, bpp,
2 * ep->bus.mipi_csi2.num_data_lanes);
if (link_freq < 0) {
dev_err(&client->dev, "Failed to get link frequency");
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 389582420ba7..143aa1359aba 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -161,10 +161,6 @@ struct tc358746 {
u16 pll_pre_div;
u16 pll_mul;
-#define TC358746_VB_MAX_SIZE (511 * 32)
-#define TC358746_VB_DEFAULT_SIZE (1 * 32)
- unsigned int vb_size; /* Video buffer size in bits */
-
struct phy_configure_opts_mipi_dphy dphy_cfg;
};
@@ -202,6 +198,15 @@ enum {
PDFORMAT_YUV444,
};
+#define TC358746_FORMAT_RAW(_bpp, _code) \
+{ \
+ .code = _code, \
+ .bus_width = _bpp, \
+ .bpp = _bpp, \
+ .pdformat = PDFORMAT_RAW##_bpp, \
+ .pdataf = PDATAF_MODE0, /* don't care */ \
+}
+
/* Check tc358746_src_mbus_code() if you add new formats */
static const struct tc358746_format tc358746_formats[] = {
{
@@ -230,7 +235,23 @@ static const struct tc358746_format tc358746_formats[] = {
.bpp = 20,
.pdformat = PDFORMAT_YUV422_10BIT,
.pdataf = PDATAF_MODE0, /* don't care */
- }
+ },
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SBGGR8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SGBRG8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SGRBG8_1X8),
+ TC358746_FORMAT_RAW(8, MEDIA_BUS_FMT_SRGGB8_1X8),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SBGGR10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SGBRG10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SGRBG10_1X10),
+ TC358746_FORMAT_RAW(10, MEDIA_BUS_FMT_SRGGB10_1X10),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SBGGR12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SGBRG12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SGRBG12_1X12),
+ TC358746_FORMAT_RAW(12, MEDIA_BUS_FMT_SRGGB12_1X12),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SBGGR14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SGBRG14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SGRBG14_1X14),
+ TC358746_FORMAT_RAW(14, MEDIA_BUS_FMT_SRGGB14_1X14),
};
/* Get n-th format for pad */
@@ -415,6 +436,70 @@ tc358746_apply_pll_config(struct tc358746 *tc358746)
return tc358746_set_bits(tc358746, PLLCTL1_REG, CKEN);
}
+#define TC358746_VB_PRECISION 10
+#define TC358746_VB_MAX_SIZE (511 * 32)
+#define TC358746_VB_DEFAULT_SIZE (1 * 32)
+
+static int tc358746_calc_vb_size(struct tc358746 *tc358746,
+ s64 source_link_freq,
+ const struct v4l2_mbus_framefmt *mbusfmt,
+ const struct tc358746_format *fmt)
+{
+ unsigned long csi_bitrate, source_bitrate;
+ unsigned int fifo_sz, tmp, n;
+ int vb_size; /* Video buffer size in bits */
+
+ source_bitrate = source_link_freq * fmt->bus_width;
+
+ csi_bitrate = tc358746->dphy_cfg.lanes * tc358746->pll_rate;
+
+ dev_dbg(tc358746->sd.dev,
+ "Fifo settings params: source-bitrate:%lu csi-bitrate:%lu",
+ source_bitrate, csi_bitrate);
+
+ /* Avoid possible FIFO overflows */
+ if (csi_bitrate < source_bitrate)
+ return -EINVAL;
+
+ /* Best case */
+ if (csi_bitrate == source_bitrate) {
+ fifo_sz = TC358746_VB_DEFAULT_SIZE;
+ vb_size = TC358746_VB_DEFAULT_SIZE;
+ } else {
+ /*
+ * Avoid possible FIFO underflow in case of
+ * csi_bitrate > source_bitrate. For such case the chip has a internal
+ * fifo which can be used to delay the line output.
+ *
+ * Fifo size calculation (excluding precision):
+ *
+ * fifo-sz, image-width - in bits
+ * sbr - source_bitrate in bits/s
+ * csir - csi_bitrate in bits/s
+ *
+ * image-width / csir >= (image-width - fifo-sz) / sbr
+ * image-width * sbr / csir >= image-width - fifo-sz
+ * fifo-sz >= image-width - image-width * sbr / csir; with n = csir/sbr
+ * fifo-sz >= image-width - image-width / n
+ */
+ source_bitrate /= TC358746_VB_PRECISION;
+ n = csi_bitrate / source_bitrate;
+ tmp = (mbusfmt->width * TC358746_VB_PRECISION) / n;
+ fifo_sz = mbusfmt->width - tmp;
+ fifo_sz *= fmt->bpp;
+ vb_size = round_up(fifo_sz, 32);
+ }
+
+ dev_dbg(tc358746->sd.dev,
+ "Found FIFO size[bits]:%u -> aligned to size[bits]:%u\n",
+ fifo_sz, vb_size);
+
+ if (vb_size > TC358746_VB_MAX_SIZE)
+ return -EINVAL;
+
+ return vb_size;
+}
+
static int tc358746_apply_misc_config(struct tc358746 *tc358746)
{
const struct v4l2_mbus_framefmt *mbusfmt;
@@ -422,6 +507,9 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
struct v4l2_subdev_state *sink_state;
const struct tc358746_format *fmt;
struct device *dev = sd->dev;
+ struct media_pad *source_pad;
+ s64 source_link_freq;
+ int vb_size;
u32 val;
int err;
@@ -430,6 +518,21 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
+ source_pad = media_entity_remote_source_pad_unique(&sd->entity);
+ if (IS_ERR(source_pad)) {
+ dev_err(dev, "Failed to get source pad of %s\n", sd->name);
+ err = PTR_ERR(source_pad);
+ goto out;
+ }
+ source_link_freq = v4l2_get_link_freq(source_pad, 0, 0);
+ if (source_link_freq <= 0) {
+ dev_err(dev,
+ "Failed to query or invalid source link frequency\n");
+ /* Return -EINVAL in case of source_link_freq is 0 */
+ err = source_link_freq ?: -EINVAL;
+ goto out;
+ }
+
/* Self defined CSI user data type id's are not supported yet */
val = PDFMT(fmt->pdformat);
dev_dbg(dev, "DATAFMT: 0x%x\n", val);
@@ -443,7 +546,13 @@ static int tc358746_apply_misc_config(struct tc358746 *tc358746)
if (err)
goto out;
- val = tc358746->vb_size / 32;
+ vb_size = tc358746_calc_vb_size(tc358746, source_link_freq, mbusfmt, fmt);
+ if (vb_size < 0) {
+ err = vb_size;
+ goto out;
+ }
+
+ val = vb_size / 32;
dev_dbg(dev, "FIFOCTL: %u (0x%x)\n", val, val);
err = tc358746_write(tc358746, FIFOCTL_REG, val);
if (err)
@@ -460,24 +569,20 @@ out:
return err;
}
-/* Use MHz as base so the div needs no u64 */
-static u32 tc358746_cfg_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz,
- unsigned int time_base)
+static u32 tc358746_cfg_to_cnt(unsigned long cfg_val, unsigned long clk_hz,
+ unsigned long long time_base)
{
- return DIV_ROUND_UP(cfg_val * clk_mhz, time_base);
+ return div64_u64((u64)cfg_val * clk_hz + time_base - 1, time_base);
}
-static u32 tc358746_ps_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_ps_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, USEC_PER_SEC);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, PSEC_PER_SEC);
}
-static u32 tc358746_us_to_cnt(unsigned int cfg_val,
- unsigned int clk_mhz)
+static u32 tc358746_us_to_cnt(unsigned long cfg_val, unsigned long clk_hz)
{
- return tc358746_cfg_to_cnt(cfg_val, clk_mhz, 1);
+ return tc358746_cfg_to_cnt(cfg_val, clk_hz, USEC_PER_SEC);
}
static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
@@ -492,7 +597,6 @@ static int tc358746_apply_dphy_config(struct tc358746 *tc358746)
/* The hs_byte_clk is also called SYSCLK in the excel sheet */
hs_byte_clk = cfg->hs_clk_rate / 8;
- hs_byte_clk /= HZ_PER_MHZ;
hf_clk = hs_byte_clk / 2;
val = tc358746_us_to_cnt(cfg->init, hf_clk) - 1;
@@ -882,97 +986,6 @@ static unsigned long tc358746_find_pll_settings(struct tc358746 *tc358746,
return best_freq;
}
-#define TC358746_PRECISION 10
-
-static int
-tc358746_link_validate(struct v4l2_subdev *sd, struct media_link *link,
- struct v4l2_subdev_format *source_fmt,
- struct v4l2_subdev_format *sink_fmt)
-{
- struct tc358746 *tc358746 = to_tc358746(sd);
- unsigned long csi_bitrate, source_bitrate;
- struct v4l2_subdev_state *sink_state;
- struct v4l2_mbus_framefmt *mbusfmt;
- const struct tc358746_format *fmt;
- unsigned int fifo_sz, tmp, n;
- struct v4l2_subdev *source;
- s64 source_link_freq;
- int err;
-
- err = v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt);
- if (err)
- return err;
-
- sink_state = v4l2_subdev_lock_and_get_active_state(sd);
- mbusfmt = v4l2_subdev_state_get_format(sink_state, TC358746_SINK);
-
- /* Check the FIFO settings */
- fmt = tc358746_get_format_by_code(TC358746_SINK, mbusfmt->code);
-
- source = media_entity_to_v4l2_subdev(link->source->entity);
- source_link_freq = v4l2_get_link_freq(source->ctrl_handler, 0, 0);
- if (source_link_freq <= 0) {
- dev_err(tc358746->sd.dev,
- "Failed to query or invalid source link frequency\n");
- v4l2_subdev_unlock_state(sink_state);
- /* Return -EINVAL in case of source_link_freq is 0 */
- return source_link_freq ? : -EINVAL;
- }
- source_bitrate = source_link_freq * fmt->bus_width;
-
- csi_bitrate = tc358746->dphy_cfg.lanes * tc358746->pll_rate;
-
- dev_dbg(tc358746->sd.dev,
- "Fifo settings params: source-bitrate:%lu csi-bitrate:%lu",
- source_bitrate, csi_bitrate);
-
- /* Avoid possible FIFO overflows */
- if (csi_bitrate < source_bitrate) {
- v4l2_subdev_unlock_state(sink_state);
- return -EINVAL;
- }
-
- /* Best case */
- if (csi_bitrate == source_bitrate) {
- fifo_sz = TC358746_VB_DEFAULT_SIZE;
- tc358746->vb_size = TC358746_VB_DEFAULT_SIZE;
- goto out;
- }
-
- /*
- * Avoid possible FIFO underflow in case of
- * csi_bitrate > source_bitrate. For such case the chip has a internal
- * fifo which can be used to delay the line output.
- *
- * Fifo size calculation (excluding precision):
- *
- * fifo-sz, image-width - in bits
- * sbr - source_bitrate in bits/s
- * csir - csi_bitrate in bits/s
- *
- * image-width / csir >= (image-width - fifo-sz) / sbr
- * image-width * sbr / csir >= image-width - fifo-sz
- * fifo-sz >= image-width - image-width * sbr / csir; with n = csir/sbr
- * fifo-sz >= image-width - image-width / n
- */
-
- source_bitrate /= TC358746_PRECISION;
- n = csi_bitrate / source_bitrate;
- tmp = (mbusfmt->width * TC358746_PRECISION) / n;
- fifo_sz = mbusfmt->width - tmp;
- fifo_sz *= fmt->bpp;
- tc358746->vb_size = round_up(fifo_sz, 32);
-
-out:
- dev_dbg(tc358746->sd.dev,
- "Found FIFO size[bits]:%u -> aligned to size[bits]:%u\n",
- fifo_sz, tc358746->vb_size);
-
- v4l2_subdev_unlock_state(sink_state);
-
- return tc358746->vb_size > TC358746_VB_MAX_SIZE ? -EINVAL : 0;
-}
-
static int tc358746_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
@@ -1040,7 +1053,7 @@ static const struct v4l2_subdev_pad_ops tc358746_pad_ops = {
.enum_mbus_code = tc358746_enum_mbus_code,
.set_fmt = tc358746_set_fmt,
.get_fmt = v4l2_subdev_get_fmt,
- .link_validate = tc358746_link_validate,
+ .link_validate = v4l2_subdev_link_validate_default,
.get_mbus_config = tc358746_get_mbus_config,
};
@@ -1352,8 +1365,6 @@ tc358746_init_output_port(struct tc358746 *tc358746, unsigned long refclk)
if (err)
goto err;
- tc358746->vb_size = TC358746_VB_DEFAULT_SIZE;
-
return 0;
err:
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 3b7e5ff5b010..959590afc80f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2315,11 +2315,10 @@ static int tda1997x_parse_dt(struct tda1997x_state *state)
return -EINVAL;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
- if (ret) {
- of_node_put(ep);
- return ret;
- }
of_node_put(ep);
+ if (ret)
+ return ret;
+
pdata->vidout_bus_type = bus_cfg.bus_type;
/* polarity of HS/VS/DE */
diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
index d77468c8587b..5b0479f3a3c0 100644
--- a/drivers/media/i2c/vgxy61.c
+++ b/drivers/media/i2c/vgxy61.c
@@ -892,8 +892,8 @@ static u32 vgxy61_get_expo_long_max(struct vgxy61_dev *sensor,
third_rot_max_expo = (sensor->frame_length / 71) * short_expo_ratio;
/* Take the minimum from all rules */
- return min(min(first_rot_max_expo, second_rot_max_expo),
- third_rot_max_expo);
+ return min3(first_rot_max_expo, second_rot_max_expo,
+ third_rot_max_expo);
}
static int vgxy61_update_exposure(struct vgxy61_dev *sensor, u16 new_expo_long,
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 036a6375627a..0dd991d70d53 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -264,18 +264,8 @@ static int amg88xx_set_power(struct video_i2c_data *data, bool on)
#if IS_REACHABLE(CONFIG_HWMON)
-static const u32 amg88xx_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info amg88xx_temp = {
- .type = hwmon_temp,
- .config = amg88xx_temp_config,
-};
-
static const struct hwmon_channel_info * const amg88xx_info[] = {
- &amg88xx_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL
};
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 9244b4320558..da23e7dfeef5 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -860,6 +860,31 @@ struct cx23885_board cx23885_boards[] = {
.amux = CX25840_AUDIO7,
} },
},
+ [CX23885_BOARD_AVERMEDIA_H789C] = {
+ .name = "AVerMedia H789-C",
+ .porta = CX23885_ANALOG_VIDEO,
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x63, /* 0xc0 >> 1 */
+ .tuner_bus = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN8_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN8_CH1 |
+ CX25840_VIN7_CH3 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ }, },
+ },
};
const unsigned int cx23885_bcount = ARRAY_SIZE(cx23885_boards);
@@ -1187,6 +1212,10 @@ struct cx23885_subid cx23885_subids[] = {
.subvendor = 0x1461,
.subdevice = 0x3100,
.card = CX23885_BOARD_AVERMEDIA_CE310B,
+ }, {
+ .subvendor = 0x1461,
+ .subdevice = 0xe139,
+ .card = CX23885_BOARD_AVERMEDIA_H789C,
},
};
const unsigned int cx23885_idcount = ARRAY_SIZE(cx23885_subids);
@@ -2413,6 +2442,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_VIEWCAST_260E:
case CX23885_BOARD_VIEWCAST_460E:
case CX23885_BOARD_AVERMEDIA_CE310B:
+ case CX23885_BOARD_AVERMEDIA_H789C:
dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
&dev->i2c_bus[2].i2c_adap,
"cx25840", 0x88 >> 1, NULL);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c8705d786cdd..a39f445ce22a 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2231,6 +2231,28 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
kfree(dev);
}
+static int __maybe_unused cx23885_suspend(struct device *dev_d)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev_d);
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct cx23885_dev *dev = to_cx23885(v4l2_dev);
+
+ cx23885_shutdown(dev);
+
+ return 0;
+}
+
+static int __maybe_unused cx23885_resume(struct device *dev_d)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev_d);
+ struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
+ struct cx23885_dev *dev = to_cx23885(v4l2_dev);
+
+ cx23885_reset(dev);
+
+ return 0;
+}
+
static const struct pci_device_id cx23885_pci_tbl[] = {
{
/* CX23885 */
@@ -2250,11 +2272,14 @@ static const struct pci_device_id cx23885_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
+static SIMPLE_DEV_PM_OPS(cx23885_pm_ops, cx23885_suspend, cx23885_resume);
+
static struct pci_driver cx23885_pci_driver = {
- .name = "cx23885",
- .id_table = cx23885_pci_tbl,
- .probe = cx23885_initdev,
- .remove = cx23885_finidev,
+ .name = "cx23885",
+ .id_table = cx23885_pci_tbl,
+ .probe = cx23885_initdev,
+ .remove = cx23885_finidev,
+ .driver.pm = &cx23885_pm_ops,
};
static int __init cx23885_init(void)
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 35d58328db56..14d219fd1d8a 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -261,7 +261,8 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
(dev->board == CX23885_BOARD_AVERMEDIA_HC81R) ||
(dev->board == CX23885_BOARD_VIEWCAST_260E) ||
(dev->board == CX23885_BOARD_VIEWCAST_460E) ||
- (dev->board == CX23885_BOARD_AVERMEDIA_CE310B)) {
+ (dev->board == CX23885_BOARD_AVERMEDIA_CE310B) ||
+ (dev->board == CX23885_BOARD_AVERMEDIA_H789C)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
INPUT(input)->amux, 0, 0);
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 349462ee2c48..8ba1f306238c 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -102,6 +102,7 @@
#define CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885 60
#define CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885 61
#define CX23885_BOARD_AVERMEDIA_CE310B 62
+#define CX23885_BOARD_AVERMEDIA_H789C 63
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index 26dd5b3884e6..335d150d81de 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -49,35 +49,6 @@ int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr)
return buf[1];
};
-int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data)
-{
- int ret;
- unsigned char bufw[2];
-
- /* Write into EEPROM */
- struct i2c_msg msg[] = {
- {
- .addr = EEPROM_I2C_ADDR,
- .flags = 0,
- .buf = &bufw[0],
- .len = 2
- }
- };
-
- bufw[0] = addr;
- bufw[1] = data;
-
- ret = i2c_transfer(i2c_adap, msg, 1);
-
- if (ret != 1) {
- pr_err("eeprom i2c write error, status=%d\n", ret);
- return -1;
- }
-
- mdelay(10); /* prophylactic delay, datasheet write cycle time = 5 ms */
- return 0;
-};
-
void netup_get_card_info(struct i2c_adapter *i2c_adap,
struct netup_card_info *cinfo)
{
diff --git a/drivers/media/pci/cx23885/netup-eeprom.h b/drivers/media/pci/cx23885/netup-eeprom.h
index 549a033679f7..fb8217eb455c 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.h
+++ b/drivers/media/pci/cx23885/netup-eeprom.h
@@ -21,7 +21,6 @@ struct netup_card_info {
};
extern int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr);
-extern int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data);
extern void netup_get_card_info(struct i2c_adapter *i2c_adap,
struct netup_card_info *cinfo);
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index a04a1d33fadb..b9f2c14d62b4 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -190,8 +190,7 @@ static int __cx88_ir_start(void *priv)
ir = core->ir;
if (ir->polling) {
- hrtimer_init(&ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ir->timer.function = cx88_ir_work;
+ hrtimer_setup(&ir->timer, cx88_ir_work, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&ir->timer,
ktime_set(0, ir->polling * 1000000),
HRTIMER_MODE_REL);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index dd73d534ac49..0c365eb59085 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -309,12 +309,17 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
unsigned int bpp, unsigned int lanes)
{
struct device *dev = &cio2->pci_dev->dev;
+ struct media_pad *src_pad;
s64 freq;
- if (!q->sensor)
- return -ENODEV;
+ src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(dev, "can't get source pad of %s (%ld)\n",
+ q->subdev.name, PTR_ERR(src_pad));
+ return PTR_ERR(src_pad);
+ }
- freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
+ freq = v4l2_get_link_freq(src_pad, bpp, lanes * 2);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.c b/drivers/media/pci/intel/ipu6/ipu6-dma.c
index b34022bad83b..1ca60ca79dba 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.c
@@ -457,36 +457,3 @@ void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
ipu6_dma_unmap_sg(sys, sgt->sgl, sgt->nents, dir, attrs);
}
EXPORT_SYMBOL_NS_GPL(ipu6_dma_unmap_sgtable, "INTEL_IPU6");
-
-/*
- * Create scatter-list for the already allocated DMA buffer
- */
-int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
- struct device *dev = &sys->auxdev.dev;
- struct ipu6_mmu *mmu = sys->mmu;
- struct vm_info *info;
- int n_pages;
- int ret = 0;
-
- info = get_vm_info(mmu, handle);
- if (!info)
- return -EFAULT;
-
- if (!info->vaddr)
- return -EFAULT;
-
- if (WARN_ON(!info->pages))
- return -ENOMEM;
-
- n_pages = PFN_UP(size);
-
- ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
- GFP_KERNEL);
- if (ret)
- dev_warn(dev, "get sgt table failed\n");
-
- return ret;
-}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-dma.h b/drivers/media/pci/intel/ipu6/ipu6-dma.h
index b51244add9e6..2882850d9366 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-dma.h
+++ b/drivers/media/pci/intel/ipu6/ipu6-dma.h
@@ -43,7 +43,4 @@ int ipu6_dma_map_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
void ipu6_dma_unmap_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
-int ipu6_dma_get_sgtable(struct ipu6_bus_device *sys, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs);
#endif /* IPU6_DMA_H */
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index 051898ce53f4..da8581a37e22 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -80,25 +80,19 @@ static const struct ipu6_csi2_error dphy_rx_errors[] = {
s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2)
{
struct media_pad *src_pad;
- struct v4l2_subdev *ext_sd;
- struct device *dev;
if (!csi2)
return -EINVAL;
- dev = &csi2->isys->adev->auxdev.dev;
src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
if (IS_ERR(src_pad)) {
- dev_err(dev, "can't get source pad of %s (%ld)\n",
+ dev_err(&csi2->isys->adev->auxdev.dev,
+ "can't get source pad of %s (%ld)\n",
csi2->asd.sd.name, PTR_ERR(src_pad));
return PTR_ERR(src_pad);
}
- ext_sd = media_entity_to_v4l2_subdev(src_pad->entity);
- if (WARN(!ext_sd, "Failed to get subdev for %s\n", csi2->asd.sd.name))
- return -ENODEV;
-
- return v4l2_get_link_freq(ext_sd->ctrl_handler, 0, 0);
+ return v4l2_get_link_freq(src_pad, 0, 0);
}
static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
index bbb66b56ee88..72f5f987ef48 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c
@@ -804,8 +804,6 @@ void ipu6_isys_queue_buf_ready(struct ipu6_isys_stream *stream,
static const struct vb2_ops ipu6_isys_queue_ops = {
.queue_setup = ipu6_isys_queue_setup,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
.buf_init = ipu6_isys_buf_init,
.buf_prepare = ipu6_isys_buf_prepare,
.buf_cleanup = ipu6_isys_buf_cleanup,
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 387963529adb..959869a88556 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -1296,6 +1296,7 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
av->vdev.release = video_device_release_empty;
av->vdev.fops = &isys_fops;
av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+ av->vdev.dev_parent = &av->isys->adev->isp->pdev->dev;
if (!av->vdev.ioctl_ops)
av->vdev.ioctl_ops = &ipu6_v4l2_ioctl_ops;
av->vdev.queue = &av->aq.vbq;
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 6a893c4547b2..92d871a378ba 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -35,8 +35,6 @@
#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
-#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
-
/* the 5s used here is based on experiment */
#define CSI_CMD_TIMEOUT (5 * HZ)
/* to setup CSI-2 link an extra delay needed and determined experimentally */
@@ -121,14 +119,13 @@ struct mei_csi {
struct mutex lock;
struct v4l2_subdev subdev;
- struct v4l2_subdev *remote;
+ struct media_pad *remote;
struct v4l2_async_notifier notifier;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *freq_ctrl;
struct v4l2_ctrl *privacy_ctrl;
/* lock for v4l2 controls */
struct mutex ctrl_lock;
- unsigned int remote_pad;
/* start streaming or not */
int streaming;
@@ -147,10 +144,6 @@ static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = {
.field = V4L2_FIELD_NONE,
};
-static s64 link_freq_menu_items[] = {
- MEI_CSI_LINK_FREQ_400MHZ
-};
-
static inline struct mei_csi *notifier_to_csi(struct v4l2_async_notifier *n)
{
return container_of(n, struct mei_csi, notifier);
@@ -161,11 +154,6 @@ static inline struct mei_csi *sd_to_csi(struct v4l2_subdev *sd)
return container_of(sd, struct mei_csi, subdev);
}
-static inline struct mei_csi *ctrl_to_csi(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct mei_csi, ctrl_handler);
-}
-
/* send a command to firmware and mutex must be held by caller */
static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
{
@@ -286,11 +274,13 @@ static void mei_csi_rx(struct mei_cl_device *cldev)
static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
{
struct mei_csi *csi = sd_to_csi(sd);
+ struct v4l2_subdev *remote_sd =
+ media_entity_to_v4l2_subdev(csi->remote->entity);
s64 freq;
int ret;
if (enable && csi->streaming == 0) {
- freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ freq = v4l2_get_link_freq(csi->remote, 0, 0);
if (freq < 0) {
dev_err(&csi->cldev->dev,
"error %lld, invalid link_freq\n", freq);
@@ -309,11 +299,11 @@ static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
if (ret < 0)
goto err_switch;
- ret = v4l2_subdev_call(csi->remote, video, s_stream, 1);
+ ret = v4l2_subdev_call(remote_sd, video, s_stream, 1);
if (ret)
goto err_switch;
} else if (!enable && csi->streaming == 1) {
- v4l2_subdev_call(csi->remote, video, s_stream, 0);
+ v4l2_subdev_call(remote_sd, video, s_stream, 0);
/* switch CSI-2 link to IVSC */
ret = csi_set_link_owner(csi, CSI_LINK_IVSC);
@@ -470,34 +460,30 @@ static int mei_csi_set_fmt(struct v4l2_subdev *sd,
return 0;
}
-static int mei_csi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static int mei_csi_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_config *mbus_config)
{
- struct mei_csi *csi = ctrl_to_csi(ctrl);
+ struct mei_csi *csi = sd_to_csi(sd);
+ unsigned int i;
s64 freq;
- if (ctrl->id == V4L2_CID_LINK_FREQ) {
- if (!csi->remote)
- return -EINVAL;
+ mbus_config->type = V4L2_MBUS_CSI2_DPHY;
+ for (i = 0; i < V4L2_MBUS_CSI2_MAX_DATA_LANES; i++)
+ mbus_config->bus.mipi_csi2.data_lanes[i] = i + 1;
+ mbus_config->bus.mipi_csi2.num_data_lanes = csi->nr_of_lanes;
- freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
- if (freq < 0) {
- dev_err(&csi->cldev->dev,
- "error %lld, invalid link_freq\n", freq);
- return -EINVAL;
- }
-
- link_freq_menu_items[0] = freq;
- ctrl->val = 0;
-
- return 0;
+ freq = v4l2_get_link_freq(csi->remote, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ return -EINVAL;
}
- return -EINVAL;
-}
+ csi->link_freq = freq;
+ mbus_config->link_freq = freq;
-static const struct v4l2_ctrl_ops mei_csi_ctrl_ops = {
- .g_volatile_ctrl = mei_csi_g_volatile_ctrl,
-};
+ return 0;
+}
static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
.s_stream = mei_csi_set_stream,
@@ -506,6 +492,7 @@ static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
.get_fmt = v4l2_subdev_get_fmt,
.set_fmt = mei_csi_set_fmt,
+ .get_mbus_config = mei_csi_get_mbus_config,
};
static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
@@ -533,8 +520,7 @@ static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
if (pad < 0)
return pad;
- csi->remote = subdev;
- csi->remote_pad = pad;
+ csi->remote = &subdev->entity.pads[pad];
return media_create_pad_link(&subdev->entity, pad,
&csi->subdev.entity, CSI_PAD_SINK,
@@ -558,28 +544,16 @@ static const struct v4l2_async_notifier_operations mei_csi_notify_ops = {
static int mei_csi_init_controls(struct mei_csi *csi)
{
- u32 max;
int ret;
mutex_init(&csi->ctrl_lock);
- ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 1);
if (ret)
return ret;
csi->ctrl_handler.lock = &csi->ctrl_lock;
- max = ARRAY_SIZE(link_freq_menu_items) - 1;
- csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
- &mei_csi_ctrl_ops,
- V4L2_CID_LINK_FREQ,
- max,
- 0,
- link_freq_menu_items);
- if (csi->freq_ctrl)
- csi->freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY |
- V4L2_CTRL_FLAG_VOLATILE;
-
csi->privacy_ctrl = v4l2_ctrl_new_std(&csi->ctrl_handler, NULL,
V4L2_CID_PRIVACY, 0, 1, 1, 0);
if (csi->privacy_ctrl)
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
index a25b68403bc6..c22ef51436ed 100644
--- a/drivers/media/pci/mgb4/mgb4_cmt.c
+++ b/drivers/media/pci/mgb4/mgb4_cmt.c
@@ -135,8 +135,8 @@ static const u16 cmt_vals_out[][15] = {
};
static const u16 cmt_vals_in[][13] = {
- {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
{0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000},
+ {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
};
static const u32 cmt_addrs_out[][15] = {
@@ -206,10 +206,11 @@ u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq)
mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ mgb4_mask_reg(video, regs->config, 0x100, 0x100);
+
for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++)
mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]);
- mgb4_mask_reg(video, regs->config, 0x100, 0x100);
mgb4_mask_reg(video, regs->config, 0x100, 0x0);
mgb4_write_reg(video, regs->config, config & ~0x1);
@@ -236,10 +237,11 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+ mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
+
for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++)
mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]);
- mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
mgb4_mask_reg(video, regs->config, 0x1000, 0x0);
mgb4_write_reg(video, regs->config, config & ~0x1);
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index f90ffc4dad52..3ce6b717ca32 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -406,8 +406,9 @@ static int get_module_version(struct mgb4_dev *mgbdev)
dev_err(dev, "unknown module type\n");
return -EINVAL;
}
- fw_version = mgb4_read_reg(&mgbdev->video, 0xC4);
- if (fw_version >> 24 != mgbdev->module_version >> 4) {
+ fw_version = mgb4_read_reg(&mgbdev->video, 0xC4) >> 24;
+ if ((MGB4_IS_FPDL3(mgbdev) && fw_version != 1) ||
+ (MGB4_IS_GMSL(mgbdev) && fw_version != 2)) {
dev_err(dev, "module/firmware type mismatch\n");
return -EINVAL;
}
@@ -599,14 +600,18 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rv = get_module_version(mgbdev);
if (rv < 0)
goto exit;
+ /* Propagate the module type(version) to the FPGA */
+ mgb4_write_reg(&mgbdev->video, 0xD4, mgbdev->module_version);
/* Video input v4l2 devices */
for (i = 0; i < MGB4_VIN_DEVICES; i++)
mgbdev->vin[i] = mgb4_vin_create(mgbdev, i);
/* Video output v4l2 devices */
- for (i = 0; i < MGB4_VOUT_DEVICES; i++)
- mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+ if (MGB4_HAS_VOUT(mgbdev)) {
+ for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+ mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+ }
/* Triggers */
mgbdev->indio_dev = mgb4_trigger_create(mgbdev);
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
index e86742d7b6c4..cc24068400a2 100644
--- a/drivers/media/pci/mgb4/mgb4_core.h
+++ b/drivers/media/pci/mgb4/mgb4_core.h
@@ -19,9 +19,13 @@
#define MGB4_VOUT_DEVICES 2
#define MGB4_IS_GMSL(mgbdev) \
- ((mgbdev)->module_version >> 4 == 2)
+ ((((mgbdev)->module_version >> 4) >= 2) && \
+ (((mgbdev)->module_version >> 4) <= 4))
#define MGB4_IS_FPDL3(mgbdev) \
- ((mgbdev)->module_version >> 4 == 1)
+ (((mgbdev)->module_version >> 4) == 1)
+#define MGB4_HAS_VOUT(mgbdev) \
+ ((((mgbdev)->module_version >> 4) >= 1) && \
+ (((mgbdev)->module_version >> 4) <= 3))
struct mgb4_dma_channel {
struct dma_chan *chan;
diff --git a/drivers/media/pci/mgb4/mgb4_regs.c b/drivers/media/pci/mgb4/mgb4_regs.c
index 31befd722d72..b45537dbfafa 100644
--- a/drivers/media/pci/mgb4/mgb4_regs.c
+++ b/drivers/media/pci/mgb4/mgb4_regs.c
@@ -5,6 +5,7 @@
*/
#include <linux/ioport.h>
+#include <linux/errno.h>
#include "mgb4_regs.h"
int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 85d2627776b6..9287faafdce5 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -85,6 +85,7 @@ source "drivers/media/platform/rockchip/Kconfig"
source "drivers/media/platform/samsung/Kconfig"
source "drivers/media/platform/st/Kconfig"
source "drivers/media/platform/sunxi/Kconfig"
+source "drivers/media/platform/synopsys/Kconfig"
source "drivers/media/platform/ti/Kconfig"
source "drivers/media/platform/verisilicon/Kconfig"
source "drivers/media/platform/via/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index ace4e34483dd..6fd7db0541c7 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -28,6 +28,7 @@ obj-y += rockchip/
obj-y += samsung/
obj-y += st/
obj-y += sunxi/
+obj-y += synopsys/
obj-y += ti/
obj-y += verisilicon/
obj-y += via/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index e491399afcc9..eb03df0d8652 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -3912,6 +3912,7 @@ static int allegro_probe(struct platform_device *pdev)
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"failed to request firmware: %d\n", ret);
+ v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 4d64df829e75..cebcae196eec 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -164,6 +164,8 @@ static void csi2rx_reset(struct csi2rx_priv *csi2rx)
static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
{
+ struct media_pad *src_pad =
+ &csi2rx->source_subdev->entity.pads[csi2rx->source_pad];
union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
struct v4l2_subdev_format sd_fmt = {
@@ -181,7 +183,7 @@ static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code);
- link_freq = v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler,
+ link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csi2rx->num_lanes);
if (link_freq < 0)
return link_freq;
diff --git a/drivers/media/platform/chips-media/coda/coda-common.c b/drivers/media/platform/chips-media/coda/coda-common.c
index 289a076c3bcc..e6e3f5ec24f6 100644
--- a/drivers/media/platform/chips-media/coda/coda-common.c
+++ b/drivers/media/platform/chips-media/coda/coda-common.c
@@ -3340,6 +3340,7 @@ static int coda_runtime_resume(struct device *dev)
static const struct dev_pm_ops coda_pm_ops = {
SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver coda_driver = {
diff --git a/drivers/media/platform/chips-media/wave5/wave5-hw.c b/drivers/media/platform/chips-media/wave5/wave5-hw.c
index c8a905994109..d94cf84c3ee5 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-hw.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-hw.c
@@ -585,7 +585,7 @@ int wave5_vpu_build_up_dec_param(struct vpu_instance *inst,
vpu_write_reg(inst->dev, W5_CMD_NUM_CQ_DEPTH_M1,
WAVE521_COMMAND_QUEUE_DEPTH - 1);
}
-
+ vpu_write_reg(inst->dev, W5_CMD_ERR_CONCEAL, 0);
ret = send_firmware_command(inst, W5_CREATE_INSTANCE, true, NULL, NULL);
if (ret) {
wave5_vdi_free_dma_memory(vpu_dev, &p_dec_info->vb_work);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index d3ff420c52ce..fd71f0c43ac3 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -1345,10 +1345,24 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
if (ret)
goto free_bitstream_vbuf;
} else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ struct dec_initial_info *initial_info =
+ &inst->codec_info->dec_info.initial_info;
+
if (inst->state == VPU_INST_STATE_STOP)
ret = switch_state(inst, VPU_INST_STATE_INIT_SEQ);
if (ret)
goto return_buffers;
+
+ if (inst->state == VPU_INST_STATE_INIT_SEQ &&
+ inst->dev->product_code == WAVE521C_CODE) {
+ if (initial_info->luma_bitdepth != 8) {
+ dev_info(inst->dev->dev, "%s: no support for %d bit depth",
+ __func__, initial_info->luma_bitdepth);
+ ret = -EINVAL;
+ goto return_buffers;
+ }
+ }
+
}
pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
@@ -1369,6 +1383,16 @@ static int streamoff_output(struct vb2_queue *q)
struct vb2_v4l2_buffer *buf;
int ret;
dma_addr_t new_rd_ptr;
+ struct dec_output_info dec_info;
+ unsigned int i;
+
+ for (i = 0; i < v4l2_m2m_num_dst_bufs_ready(m2m_ctx); i++) {
+ ret = wave5_vpu_dec_set_disp_flag(inst, i);
+ if (ret)
+ dev_dbg(inst->dev->dev,
+ "%s: Setting display flag of buf index: %u, fail: %d\n",
+ __func__, i, ret);
+ }
while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx))) {
dev_dbg(inst->dev->dev, "%s: (Multiplanar) buf type %4u | index %4u\n",
@@ -1376,6 +1400,11 @@ static int streamoff_output(struct vb2_queue *q)
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
+ while (wave5_vpu_dec_get_output_info(inst, &dec_info) == 0) {
+ if (dec_info.index_frame_display >= 0)
+ wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
+ }
+
ret = wave5_vpu_flush_instance(inst);
if (ret)
return ret;
@@ -1459,7 +1488,7 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
break;
if (wave5_vpu_dec_get_output_info(inst, &dec_output_info))
- dev_dbg(inst->dev->dev, "Getting decoding results from fw, fail\n");
+ dev_dbg(inst->dev->dev, "there is no output info\n");
}
v4l2_m2m_update_stop_streaming_state(m2m_ctx, q);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.c b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
index d1320298a0f7..e1715d3f43b0 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.c
@@ -55,12 +55,12 @@ static void wave5_vpu_handle_irq(void *dev_id)
struct vpu_device *dev = dev_id;
irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
+ seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
+ cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
list_for_each_entry(inst, &dev->instances, list) {
- seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
- cmd_done = wave5_vdi_read_register(dev, W5_RET_QUEUE_CMD_DONE_INST);
if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
@@ -269,8 +269,8 @@ static int wave5_vpu_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
- hrtimer_init(&dev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- dev->hrtimer.function = &wave5_vpu_timer_callback;
+ hrtimer_setup(&dev->hrtimer, &wave5_vpu_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
if (IS_ERR(dev->worker)) {
dev_err(&pdev->dev, "failed to create vpu irq worker\n");
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
index e16b990041c2..e5e879a13e8b 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpuapi.c
@@ -75,6 +75,16 @@ int wave5_vpu_flush_instance(struct vpu_instance *inst)
inst->type == VPU_INST_TYPE_DEC ? "DECODER" : "ENCODER", inst->id);
mutex_unlock(&inst->dev->hw_lock);
return -ETIMEDOUT;
+ } else if (ret == -EBUSY) {
+ struct dec_output_info dec_info;
+
+ mutex_unlock(&inst->dev->hw_lock);
+ wave5_vpu_dec_get_output_info(inst, &dec_info);
+ ret = mutex_lock_interruptible(&inst->dev->hw_lock);
+ if (ret)
+ return ret;
+ if (dec_info.index_frame_display > 0)
+ wave5_vpu_dec_set_disp_flag(inst, dec_info.index_frame_display);
}
} while (ret != 0);
mutex_unlock(&inst->dev->hw_lock);
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index ff23b225db70..1b0bc47355c0 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -79,8 +79,11 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
}
fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
- if (!fw)
+ if (!fw) {
+ scp_put(scp);
return ERR_PTR(-ENOMEM);
+ }
+
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index f8145998fcaf..8522f71fc901 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -594,7 +594,11 @@ static int h264_enc_init(struct mtk_vcodec_enc_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
- inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
+ if (is_ext)
+ inst->vpu_inst.id = SCP_IPI_VENC_H264;
+ else
+ inst->vpu_inst.id = IPI_VENC_H264;
+
inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_SYS);
ret = vpu_enc_init(&inst->vpu_inst);
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
index 024cd8ee1709..7a9d8928ae40 100644
--- a/drivers/media/platform/nuvoton/npcm-video.c
+++ b/drivers/media/platform/nuvoton/npcm-video.c
@@ -1648,8 +1648,8 @@ rel_ctrl_handler:
static int npcm_video_ece_init(struct npcm_video *video)
{
+ struct device_node *ece_node __free(device_node) = NULL;
struct device *dev = video->dev;
- struct device_node *ece_node;
struct platform_device *ece_pdev;
void __iomem *regs;
@@ -1669,7 +1669,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
dev_err(dev, "Failed to find ECE device\n");
return -ENODEV;
}
- of_node_put(ece_node);
+ struct device *ece_dev __free(put_device) = &ece_pdev->dev;
regs = devm_platform_ioremap_resource(ece_pdev, 0);
if (IS_ERR(regs)) {
@@ -1684,7 +1684,7 @@ static int npcm_video_ece_init(struct npcm_video *video)
return PTR_ERR(video->ece.regmap);
}
- video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL);
+ video->ece.reset = devm_reset_control_get(ece_dev, NULL);
if (IS_ERR(video->ece.reset)) {
dev_err(dev, "Failed to get ECE reset control in DTS\n");
return PTR_ERR(video->ece.reset);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 29523bb84d95..d060eadebc7a 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -597,12 +597,13 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
static int mipi_csis_calculate_params(struct mipi_csis_device *csis,
const struct csis_pix_format *csis_fmt)
{
+ struct media_pad *src_pad =
+ &csis->source.sd->entity.pads[csis->source.pad->index];
s64 link_freq;
u32 lane_rate;
/* Calculate the line rate from the pixel rate. */
- link_freq = v4l2_get_link_freq(csis->source.sd->ctrl_handler,
- csis_fmt->width,
+ link_freq = v4l2_get_link_freq(src_pad, csis_fmt->width,
csis->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(csis->dev, "Unable to obtain link frequency: %d\n",
diff --git a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index 1f2657cf6e82..a8bcf60e2f37 100644
--- a/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -287,6 +287,7 @@ static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state,
struct v4l2_subdev_state *sd_state,
u32 *hs_settle)
{
+ struct media_pad *src_pad;
s64 link_freq;
u32 lane_rate;
unsigned long esc_clk_rate;
@@ -294,13 +295,19 @@ static int imx8mq_mipi_csi_calc_hs_settle(struct csi_state *state,
const struct v4l2_mbus_framefmt *fmt;
const struct csi2_pix_format *csi2_fmt;
+ src_pad = media_entity_remote_source_pad_unique(&sd_state->sd->entity);
+ if (IS_ERR(src_pad)) {
+ dev_err(state->dev, "can't get source pad of %s (%ld)\n",
+ sd_state->sd->name, PTR_ERR(src_pad));
+ return PTR_ERR(src_pad);
+ }
+
/* Calculate the line rate from the pixel rate. */
fmt = v4l2_subdev_state_get_format(sd_state, MIPI_CSI2_PAD_SINK);
csi2_fmt = find_csi2_format(fmt->code);
- link_freq = v4l2_get_link_freq(state->src_sd->ctrl_handler,
- csi2_fmt->width,
+ link_freq = v4l2_get_link_freq(src_pad, csi2_fmt->width,
state->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(state->dev, "Unable to obtain link frequency: %d\n",
diff --git a/drivers/media/platform/qcom/Kconfig b/drivers/media/platform/qcom/Kconfig
index cc5799b9ea00..4f4d3a68e6e5 100644
--- a/drivers/media/platform/qcom/Kconfig
+++ b/drivers/media/platform/qcom/Kconfig
@@ -3,4 +3,5 @@
comment "Qualcomm media platform drivers"
source "drivers/media/platform/qcom/camss/Kconfig"
+source "drivers/media/platform/qcom/iris/Kconfig"
source "drivers/media/platform/qcom/venus/Kconfig"
diff --git a/drivers/media/platform/qcom/Makefile b/drivers/media/platform/qcom/Makefile
index 4f055c396e04..ea2221a202c0 100644
--- a/drivers/media/platform/qcom/Makefile
+++ b/drivers/media/platform/qcom/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y += camss/
+obj-y += iris/
obj-y += venus/
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index e636968a1126..f6db5b3b5ace 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -7,6 +7,7 @@ qcom-camss-objs += \
camss-csid-4-1.o \
camss-csid-4-7.o \
camss-csid-gen2.o \
+ camss-csid-780.o \
camss-csiphy-2ph-1-0.o \
camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
@@ -16,6 +17,7 @@ qcom-camss-objs += \
camss-vfe-4-8.o \
camss-vfe-17x.o \
camss-vfe-480.o \
+ camss-vfe-780.o \
camss-vfe-gen1.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-1.c b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
index c95861420502..6998e1c52895 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-1.c
@@ -17,7 +17,6 @@
#include "camss-csid-gen1.h"
#include "camss.h"
-#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x00c
@@ -139,15 +138,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
return 0;
}
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
-
- dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
@@ -180,15 +170,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid-4-7.c b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
index 08578a143688..66054d4872e6 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-4-7.c
@@ -16,7 +16,6 @@
#include "camss-csid-gen1.h"
#include "camss.h"
-#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x010
@@ -151,15 +150,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
return 0;
}
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
-
- dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
/*
* isr - CSID module interrupt service routine
* @irq: Interrupt line
@@ -205,38 +195,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- switch (sink_code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- case MEDIA_BUS_FMT_Y10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- default:
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
- }
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.c b/drivers/media/platform/qcom/camss/camss-csid-780.c
new file mode 100644
index 000000000000..4c720d177731
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-780.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-780.h"
+
+#define CSID_IO_PATH_CFG0(csid) (0x4 * (csid))
+#define OUTPUT_IFE_EN 0x100
+#define INTERNAL_CSID 1
+
+#define CSID_RST_CFG 0xC
+#define RST_MODE BIT(0)
+#define RST_LOCATION BIT(4)
+
+#define CSID_RST_CMD 0x10
+#define SELECT_HW_RST BIT(0)
+#define SELECT_IRQ_RST BIT(2)
+
+#define CSID_IRQ_CMD 0x14
+#define IRQ_CMD_CLEAR BIT(0)
+
+#define CSID_RUP_AUP_CMD 0x18
+#define CSID_RUP_AUP_RDI(rdi) ((BIT(4) | BIT(20)) << (rdi))
+
+#define CSID_TOP_IRQ_STATUS 0x7C
+#define TOP_IRQ_STATUS_RESET_DONE BIT(0)
+
+#define CSID_TOP_IRQ_MASK 0x80
+#define CSID_TOP_IRQ_CLEAR 0x84
+#define CSID_TOP_IRQ_SET 0x88
+
+#define CSID_CSI2_RX_IRQ_STATUS 0x9C
+#define CSID_CSI2_RX_IRQ_MASK 0xA0
+#define CSID_CSI2_RX_IRQ_CLEAR 0xA4
+#define CSID_CSI2_RX_IRQ_SET 0xA8
+
+#define CSID_BUF_DONE_IRQ_STATUS 0x8C
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define CSID_BUF_DONE_IRQ_MASK 0x90
+#define CSID_BUF_DONE_IRQ_CLEAR 0x94
+#define CSID_BUF_DONE_IRQ_SET 0x98
+
+#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) (0xEC + 0x10 * (rdi))
+#define RUP_DONE_IRQ_STATUS BIT(23)
+
+#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) (0xF4 + 0x10 * (rdi))
+#define CSID_CSI2_RDIN_IRQ_SET(rdi) (0xF8 + 0x10 * (rdi))
+
+#define CSID_CSI2_RX_CFG0 0x200
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
+#define CSI2_RX_CFG0_PHY_NUM_SEL 20
+
+#define CSID_CSI2_RX_CFG1 0x204
+#define CSI2_RX_CFG1_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_VC_MODE BIT(2)
+
+#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define RDI_CFG0_TIMESTAMP_EN BIT(6)
+#define RDI_CFG0_TIMESTAMP_STB_SEL BIT(8)
+#define RDI_CFG0_DECODE_FORMAT 12
+#define RDI_CFG0_DT 16
+#define RDI_CFG0_VC 22
+#define RDI_CFG0_DT_ID 27
+#define RDI_CFG0_EN BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define RDI_CTRL_START_CMD BIT(0)
+
+#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define RDI_CFG1_DROP_H_EN BIT(5)
+#define RDI_CFG1_DROP_V_EN BIT(6)
+#define RDI_CFG1_CROP_H_EN BIT(7)
+#define RDI_CFG1_CROP_V_EN BIT(8)
+#define RDI_CFG1_PIX_STORE BIT(10)
+#define RDI_CFG1_PACKING_FORMAT_MIPI BIT(15)
+
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54C + 0x100 * (rdi))
+
+#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ int val;
+
+ val = (phy->lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES;
+ val |= phy->lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL;
+ val |= (phy->csiphy_id + CSI2_RX_CFG0_PHY_SEL_BASE_IDX) << CSI2_RX_CFG0_PHY_NUM_SEL;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_ECC_CORRECTION_EN;
+ if (vc > 3)
+ val |= CSI2_RX_CFG1_VC_MODE;
+
+ writel(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ int val = 0;
+
+ if (enable)
+ val = RDI_CTRL_START_CMD;
+
+ writel(val, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_wrapper(struct csid_device *csid)
+{
+ u32 val;
+
+ /* csid lite doesn't need to configure top register */
+ if (csid->res->is_lite)
+ return;
+
+ val = OUTPUT_IFE_EN | INTERNAL_CSID;
+ writel(val, csid->camss->csid_wrapper_base + CSID_IO_PATH_CFG0(csid->id));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ u32 val;
+ u8 lane_cnt = csid->phy.lane_cnt;
+ /* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ u8 dt_id = vc & 0x03;
+
+ val = RDI_CFG0_TIMESTAMP_EN;
+ val |= RDI_CFG0_TIMESTAMP_STB_SEL;
+ /* note: for non-RDI path, this should be format->decode_format */
+ val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
+ val |= vc << RDI_CFG0_VC;
+ val |= format->data_type << RDI_CFG0_DT;
+ val |= dt_id << RDI_CFG0_DT_ID;
+
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+
+ val = RDI_CFG1_PACKING_FORMAT_MIPI;
+ val |= RDI_CFG1_PIX_STORE;
+ val |= RDI_CFG1_DROP_H_EN;
+ val |= RDI_CFG1_DROP_V_EN;
+ val |= RDI_CFG1_CROP_H_EN;
+ val |= RDI_CFG1_CROP_V_EN;
+
+ writel(val, csid->base + CSID_RDI_CFG1(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
+
+ val = 1;
+ writel(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
+
+ val = 0;
+ writel(val, csid->base + CSID_RDI_CTRL(vc));
+
+ val = readl(csid->base + CSID_RDI_CFG0(vc));
+
+ if (enable)
+ val |= RDI_CFG0_EN;
+ writel(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ u8 i;
+
+ __csid_configure_wrapper(csid);
+
+ /* Loop through all enabled VCs and configure stream for each */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+}
+
+static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
+{
+ return 0;
+}
+
+static void csid_subdev_reg_update(struct csid_device *csid, int port_id, bool clear)
+{
+ if (clear) {
+ csid->reg_update &= ~CSID_RUP_AUP_RDI(port_id);
+ } else {
+ csid->reg_update |= CSID_RUP_AUP_RDI(port_id);
+ writel(csid->reg_update, csid->base + CSID_RUP_AUP_CMD);
+ }
+}
+
+/*
+ * csid_isr - CSID module interrupt service routine
+ * @irq: Interrupt line
+ * @dev: CSID device
+ *
+ * Return IRQ_HANDLED on success
+ */
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 val, buf_done_val;
+ u8 reset_done;
+ int i;
+
+ val = readl(csid->base + CSID_TOP_IRQ_STATUS);
+ writel(val, csid->base + CSID_TOP_IRQ_CLEAR);
+ reset_done = val & TOP_IRQ_STATUS_RESET_DONE;
+
+ val = readl(csid->base + CSID_CSI2_RX_IRQ_STATUS);
+ writel(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
+
+ buf_done_val = readl(csid->base + CSID_BUF_DONE_IRQ_STATUS);
+ writel(buf_done_val, csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+
+ /* Read and clear IRQ status for each enabled RDI channel */
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ val = readl(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
+ writel(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
+
+ if (val & RUP_DONE_IRQ_STATUS)
+ /* clear the reg update bit */
+ csid_subdev_reg_update(csid, i, true);
+
+ if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i)) {
+ /*
+ * For Titan 780, bus done and RUP IRQ have been moved to
+ * CSID from VFE. Once CSID received bus done, need notify
+ * VFE of this event. Trigger VFE to handle bus done process.
+ */
+ camss_buf_done(csid->camss, csid->id, i);
+ }
+ }
+
+ val = IRQ_CMD_CLEAR;
+ writel(val, csid->base + CSID_IRQ_CMD);
+
+ if (reset_done)
+ complete(&csid->reset_complete);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csid_reset - Trigger reset on CSID module and wait to complete
+ * @csid: CSID device
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+ u32 val;
+ int i;
+
+ reinit_completion(&csid->reset_complete);
+
+ writel(1, csid->base + CSID_TOP_IRQ_CLEAR);
+ writel(1, csid->base + CSID_IRQ_CMD);
+ writel(1, csid->base + CSID_TOP_IRQ_MASK);
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
+ if (csid->phy.en_vc & BIT(i)) {
+ writel(BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i),
+ csid->base + CSID_BUF_DONE_IRQ_CLEAR);
+ writel(IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+ writel(BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i),
+ csid->base + CSID_BUF_DONE_IRQ_MASK);
+ }
+
+ /* preserve registers */
+ val = RST_LOCATION | RST_MODE;
+ writel(val, csid->base + CSID_RST_CFG);
+
+ val = SELECT_HW_RST | SELECT_IRQ_RST;
+ writel(val, csid->base + CSID_RST_CMD);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID reset timeout\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void csid_subdev_init(struct csid_device *csid)
+{
+ csid->testgen.nmodes = CSID_PAYLOAD_MODE_DISABLED;
+}
+
+const struct csid_hw_ops csid_ops_780 = {
+ .configure_stream = csid_configure_stream,
+ .configure_testgen_pattern = csid_configure_testgen_pattern,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+ .reg_update = csid_subdev_reg_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.h b/drivers/media/platform/qcom/camss/camss-csid-780.h
new file mode 100644
index 000000000000..a990c66a60ff
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-780.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * camss-csid-780.h
+ *
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 3
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+#ifndef __QC_MSM_CAMSS_CSID_780_H__
+#define __QC_MSM_CAMSS_CSID_780_H__
+
+#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
+#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
+#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3
+#define DECODE_FORMAT_UNCOMPRESSED_14_BIT 0x4
+#define DECODE_FORMAT_UNCOMPRESSED_16_BIT 0x5
+#define DECODE_FORMAT_UNCOMPRESSED_20_BIT 0x6
+#define DECODE_FORMAT_UNCOMPRESSED_24_BIT 0x7
+#define DECODE_FORMAT_PAYLOAD_ONLY 0xf
+
+#define PLAIN_FORMAT_PLAIN8 0x0 /* supports DPCM, UNCOMPRESSED_6/8_BIT */
+#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */
+#define PLAIN_FORMAT_PLAIN32 0x2 /* supports UNCOMPRESSED_20_BIT */
+
+#endif /* __QC_MSM_CAMSS_CSID_780_H__ */
diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
index e1c757933e27..2a1746dcc1c5 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c
@@ -22,11 +22,6 @@
* alternate register layout.
*/
-#define CSID_HW_VERSION 0x0
-#define HW_VERSION_STEPPING 0
-#define HW_VERSION_REVISION 16
-#define HW_VERSION_GENERATION 28
-
#define CSID_RST_STROBES 0x10
#define RST_STROBES 0
@@ -352,29 +347,6 @@ static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
}
/*
- * csid_hw_version - CSID hardware version query
- * @csid: CSID device
- *
- * Return HW version or error
- */
-static u32 csid_hw_version(struct csid_device *csid)
-{
- u32 hw_version;
- u32 hw_gen;
- u32 hw_rev;
- u32 hw_step;
-
- hw_version = readl_relaxed(csid->base + CSID_HW_VERSION);
- hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
- hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
- hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
- dev_dbg(csid->camss->dev, "CSID HW Version = %u.%u.%u\n",
- hw_gen, hw_rev, hw_step);
-
- return hw_version;
-}
-
-/*
* csid_isr - CSID module interrupt service routine
* @irq: Interrupt line
* @dev: CSID device
@@ -443,38 +415,6 @@ static int csid_reset(struct csid_device *csid)
return 0;
}
-static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
- unsigned int match_format_idx, u32 match_code)
-{
- switch (sink_code) {
- case MEDIA_BUS_FMT_SBGGR10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- case MEDIA_BUS_FMT_Y10_1X10:
- {
- u32 src_code[] = {
- MEDIA_BUS_FMT_Y10_1X10,
- MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
- };
-
- return csid_find_code(src_code, ARRAY_SIZE(src_code),
- match_format_idx, match_code);
- }
- default:
- if (match_format_idx > 0)
- return 0;
-
- return sink_code;
- }
-}
-
static void csid_subdev_init(struct csid_device *csid)
{
csid->testgen.modes = csid_testgen_modes;
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 858db5d4ca75..d08117f46f3b 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
+#include <media/mipi-csi2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
@@ -29,6 +30,11 @@
#define VFE_480_CSID_OFFSET 0x1200
#define VFE_480_LITE_CSID_OFFSET 0x200
+#define CSID_HW_VERSION 0x0
+#define HW_VERSION_STEPPING 0
+#define HW_VERSION_REVISION 16
+#define HW_VERSION_GENERATION 28
+
#define MSM_CSID_NAME "msm_csid"
const char * const csid_testgen_modes[] = {
@@ -48,119 +54,119 @@ const char * const csid_testgen_modes[] = {
static const struct csid_format_info formats_4_1[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
@@ -170,147 +176,147 @@ static const struct csid_format_info formats_4_1[] = {
static const struct csid_format_info formats_4_7[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
@@ -320,154 +326,154 @@ static const struct csid_format_info formats_4_7[] = {
static const struct csid_format_info formats_gen2[] = {
{
MEDIA_BUS_FMT_UYVY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_1X16,
- DATA_TYPE_YUV422_8BIT,
+ MIPI_CSI2_DT_YUV422_8B,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_Y8_1X8,
- DATA_TYPE_RAW_8BIT,
+ MIPI_CSI2_DT_RAW8,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
- DATA_TYPE_RAW_10BIT,
+ MIPI_CSI2_DT_RAW10,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
- DATA_TYPE_RAW_12BIT,
+ MIPI_CSI2_DT_RAW12,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
- DATA_TYPE_RAW_14BIT,
+ MIPI_CSI2_DT_RAW14,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
@@ -591,6 +597,78 @@ static int csid_set_clock_rates(struct csid_device *csid)
}
/*
+ * csid_hw_version - CSID hardware version query
+ * @csid: CSID device
+ *
+ * Return HW version or error
+ */
+u32 csid_hw_version(struct csid_device *csid)
+{
+ u32 hw_version;
+ u32 hw_gen;
+ u32 hw_rev;
+ u32 hw_step;
+
+ hw_version = readl_relaxed(csid->base + CSID_HW_VERSION);
+ hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
+ hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
+ hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
+ dev_info(csid->camss->dev, "CSID:%d HW Version = %u.%u.%u\n",
+ csid->id, hw_gen, hw_rev, hw_step);
+
+ return hw_version;
+}
+
+/*
+ * csid_src_pad_code - Pick an output/src format based on the input/sink format
+ * @csid: CSID device
+ * @sink_code: The sink format of the input
+ * @match_format_idx: Request preferred index, as defined by subdevice csid
+ * format. Set @match_code to 0 if used.
+ * @match_code: Request preferred code, set @match_format_idx to 0 if used
+ *
+ * Return 0 on failure or src format code otherwise
+ */
+u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int match_format_idx, u32 match_code)
+{
+ if (csid->camss->res->version == CAMSS_8x16) {
+ if (match_format_idx > 0)
+ return 0;
+
+ return sink_code;
+ }
+
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ match_format_idx, match_code);
+ }
+ case MEDIA_BUS_FMT_Y10_1X10:
+ {
+ u32 src_code[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
+ };
+
+ return csid_find_code(src_code, ARRAY_SIZE(src_code),
+ match_format_idx, match_code);
+ }
+ default:
+ if (match_format_idx > 0)
+ return 0;
+
+ return sink_code;
+ }
+}
+
+/*
* csid_set_power - Power on/off CSID module
* @sd: CSID V4L2 subdevice
* @on: Requested power state
@@ -683,11 +761,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
int ret;
if (enable) {
- ret = v4l2_ctrl_handler_setup(&csid->ctrls);
- if (ret < 0) {
- dev_err(csid->camss->dev,
- "could not sync v4l2 controls: %d\n", ret);
- return ret;
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_setup(&csid->ctrls);
+ if (ret < 0) {
+ dev_err(csid->camss->dev,
+ "could not sync v4l2 controls: %d\n", ret);
+ return ret;
+ }
}
if (!csid->testgen.enabled &&
@@ -761,7 +841,8 @@ static void csid_try_format(struct csid_device *csid,
break;
case MSM_CSID_PAD_SRC:
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
@@ -811,7 +892,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd,
code->code = csid->res->formats->formats[code->index].code;
} else {
- if (csid->testgen_mode->cur.val == 0) {
+ if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED ||
+ csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
@@ -1190,7 +1272,8 @@ static int csid_link_setup(struct media_entity *entity,
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
- if (csid->testgen_mode->cur.val != 0)
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED &&
+ csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
@@ -1283,24 +1366,27 @@ int msm_csid_register_entity(struct csid_device *csid,
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
- ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
- if (ret < 0) {
- dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
- return ret;
- }
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) {
+ ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
+ return ret;
+ }
- csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
- &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
- csid->testgen.nmodes, 0, 0,
- csid->testgen.modes);
+ csid->testgen_mode =
+ v4l2_ctrl_new_std_menu_items(&csid->ctrls,
+ &csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
+ csid->testgen.nmodes, 0, 0,
+ csid->testgen.modes);
- if (csid->ctrls.error) {
- dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
- ret = csid->ctrls.error;
- goto free_ctrl;
- }
+ if (csid->ctrls.error) {
+ dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
+ ret = csid->ctrls.error;
+ goto free_ctrl;
+ }
- csid->subdev.ctrl_handler = &csid->ctrls;
+ csid->subdev.ctrl_handler = &csid->ctrls;
+ }
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
@@ -1331,7 +1417,8 @@ int msm_csid_register_entity(struct csid_device *csid,
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
}
@@ -1344,7 +1431,8 @@ void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
- v4l2_ctrl_handler_free(&csid->ctrls);
+ if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED)
+ v4l2_ctrl_handler_free(&csid->ctrls);
}
inline bool csid_is_lite(struct csid_device *csid)
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 8cdae98e4dca..90b8fc5852be 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -27,29 +27,6 @@
/* CSID hardware can demultiplex up to 4 outputs */
#define MSM_CSID_MAX_SRC_STREAMS 4
-#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12
-#define DATA_TYPE_YUV420_8BIT 0x18
-#define DATA_TYPE_YUV420_10BIT 0x19
-#define DATA_TYPE_YUV420_8BIT_LEGACY 0x1a
-#define DATA_TYPE_YUV420_8BIT_SHIFTED 0x1c /* Chroma Shifted Pixel Sampling */
-#define DATA_TYPE_YUV420_10BIT_SHIFTED 0x1d /* Chroma Shifted Pixel Sampling */
-#define DATA_TYPE_YUV422_8BIT 0x1e
-#define DATA_TYPE_YUV422_10BIT 0x1f
-#define DATA_TYPE_RGB444 0x20
-#define DATA_TYPE_RGB555 0x21
-#define DATA_TYPE_RGB565 0x22
-#define DATA_TYPE_RGB666 0x23
-#define DATA_TYPE_RGB888 0x24
-#define DATA_TYPE_RAW_24BIT 0x27
-#define DATA_TYPE_RAW_6BIT 0x28
-#define DATA_TYPE_RAW_7BIT 0x29
-#define DATA_TYPE_RAW_8BIT 0x2a
-#define DATA_TYPE_RAW_10BIT 0x2b
-#define DATA_TYPE_RAW_12BIT 0x2c
-#define DATA_TYPE_RAW_14BIT 0x2d
-#define DATA_TYPE_RAW_16BIT 0x2e
-#define DATA_TYPE_RAW_20BIT 0x2f
-
#define CSID_RESET_TIMEOUT_MS 500
enum csid_testgen_mode {
@@ -152,6 +129,14 @@ struct csid_hw_ops {
* @csid: CSID device
*/
void (*subdev_init)(struct csid_device *csid);
+
+ /*
+ * reg_update - receive message from other sub device
+ * @csid: CSID device
+ * @port_id: Port id
+ * @is_clear: Indicate if it is clearing reg update or setting reg update
+ */
+ void (*reg_update)(struct csid_device *csid, int port_id, bool is_clear);
};
struct csid_subdev_resources {
@@ -169,6 +154,7 @@ struct csid_device {
void __iomem *base;
u32 irq;
char irq_name[30];
+ u32 reg_update;
struct camss_clock *clock;
int nclocks;
struct regulator_bulk_data *supplies;
@@ -228,6 +214,7 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
extern const struct csid_hw_ops csid_ops_gen2;
+extern const struct csid_hw_ops csid_ops_780;
/*
* csid_is_lite - Check if CSID is CSID lite.
@@ -237,4 +224,25 @@ extern const struct csid_hw_ops csid_ops_gen2;
*/
bool csid_is_lite(struct csid_device *csid);
+/*
+ * csid_hw_version - CSID hardware version query
+ * @csid: CSID device
+ *
+ * Return HW version or error
+ */
+u32 csid_hw_version(struct csid_device *csid);
+
+/*
+ * csid_src_pad_code - Pick an output/src format based on the input/sink format
+ * @csid: CSID device
+ * @sink_code: The sink format of the input
+ * @match_format_idx: Request preferred index, as defined by subdevice csid
+ * format. Set @match_code to 0 if used.
+ * @match_code: Request preferred code, set @match_format_idx to 0 if used
+ *
+ * Return 0 on failure or src format code otherwise
+ */
+u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
+ unsigned int match_format_idx, u32 match_code);
+
#endif /* QC_MSM_CAMSS_CSID_H */
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index cd4a8c369234..9d67e7fa6366 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -180,6 +180,11 @@ static irqreturn_t csiphy_isr(int irq, void *dev)
return IRQ_HANDLED;
}
+static int csiphy_init(struct csiphy_device *csiphy)
+{
+ return 0;
+}
+
const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
.get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
@@ -187,4 +192,5 @@ const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
+ .init = csiphy_init,
};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index f341f7b7fd8a..a6cc957b986e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -42,21 +42,21 @@
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
-#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(offset, n) ((offset) + 0x4 * (n))
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE BIT(7)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
-#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
+#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(offset, n) ((offset) + 0xb0 + 0x4 * (n))
-#define CSIPHY_DEFAULT_PARAMS 0
-#define CSIPHY_LANE_ENABLE 1
-#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
-#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
-#define CSIPHY_DNP_PARAMS 4
-#define CSIPHY_2PH_REGS 5
-#define CSIPHY_3PH_REGS 6
+#define CSIPHY_DEFAULT_PARAMS 0
+#define CSIPHY_LANE_ENABLE 1
+#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
+#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
+#define CSIPHY_DNP_PARAMS 4
+#define CSIPHY_2PH_REGS 5
+#define CSIPHY_3PH_REGS 6
-struct csiphy_reg_t {
+struct csiphy_lane_regs {
s32 reg_addr;
s32 reg_data;
s32 delay;
@@ -65,305 +65,381 @@ struct csiphy_reg_t {
/* GEN2 1.0 2PH */
static const struct
-csiphy_reg_t lane_regs_sdm845[5][14] = {
- {
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sdm845[] = {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
/* GEN2 1.1 2PH */
static const struct
-csiphy_reg_t lane_regs_sc8280xp[5][14] = {
- {
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sc8280xp[] = {
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
/* GEN2 1.2.1 2PH */
static const struct
-csiphy_reg_t lane_regs_sm8250[5][20] = {
- {
- {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
- {
- {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
- {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
- {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
- {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
- },
+csiphy_lane_regs lane_regs_sm8250[] = {
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+/* GEN2 2.1.2 2PH DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_sm8550[] = {
+ {0x0E90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x00A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0090, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0098, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x04A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0490, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0498, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x08A0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0890, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0898, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0CA0, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C90, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C98, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0x07, 0x01, CSIPHY_DEFAULT_PARAMS},
+ {0x0E30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E28, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E00, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E0C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E38, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0E08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0E10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0830, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0800, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0838, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x082C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0834, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x081C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0814, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x083C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0804, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0820, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0808, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0810, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C30, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C00, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C38, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C2C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C34, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C1C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C14, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C3C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C04, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C20, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C08, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0C10, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0094, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x005C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0494, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x045C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0894, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x085C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0860, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0864, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C94, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C5C, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C60, 0xBD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0C64, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
};
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
+ struct csiphy_device_regs *regs = csiphy->regs;
u32 hw_version;
- writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
- csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
hw_version = readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 12));
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 13)) << 8;
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 14)) << 16;
hw_version |= readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, 15)) << 24;
dev_dbg(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
}
@@ -374,31 +450,39 @@ static void csiphy_hw_version_read(struct csiphy_device *csiphy,
*/
static void csiphy_reset(struct csiphy_device *csiphy)
{
- writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ struct csiphy_device_regs *regs = csiphy->regs;
+
+ writel_relaxed(0x1, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
usleep_range(5000, 8000);
- writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
}
static irqreturn_t csiphy_isr(int irq, void *dev)
{
struct csiphy_device *csiphy = dev;
+ struct csiphy_device_regs *regs = csiphy->regs;
int i;
for (i = 0; i < 11; i++) {
int c = i + 22;
u8 val = readl_relaxed(csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
+ CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(regs->offset, i));
writel_relaxed(val, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, c));
}
- writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
- writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
+ writel_relaxed(0x1, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 10));
+ writel_relaxed(0x0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 10));
- for (i = 22; i < 33; i++)
+ for (i = 22; i < 33; i++) {
writel_relaxed(0x0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, i));
+ }
return IRQ_HANDLED;
}
@@ -500,46 +584,22 @@ static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u8 settle_cnt)
{
- const struct csiphy_reg_t *r;
- int i, l, array_size;
+ const struct csiphy_lane_regs *r = csiphy->regs->lane_regs;
+ int i, array_size = csiphy->regs->lane_array_size;
u32 val;
- switch (csiphy->camss->res->version) {
- case CAMSS_7280:
- r = &lane_regs_sm8250[0][0];
- array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
- break;
- case CAMSS_8250:
- r = &lane_regs_sm8250[0][0];
- array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
- break;
- case CAMSS_8280XP:
- r = &lane_regs_sc8280xp[0][0];
- array_size = ARRAY_SIZE(lane_regs_sc8280xp[0]);
- break;
- case CAMSS_845:
- r = &lane_regs_sdm845[0][0];
- array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
- break;
- default:
- WARN(1, "unknown cspi version\n");
- return;
- }
-
- for (l = 0; l < 5; l++) {
- for (i = 0; i < array_size; i++, r++) {
- switch (r->csiphy_param_type) {
- case CSIPHY_SETTLE_CNT_LOWER_BYTE:
- val = settle_cnt & 0xff;
- break;
- case CSIPHY_DNP_PARAMS:
- continue;
- default:
- val = r->reg_data;
- break;
- }
- writel_relaxed(val, csiphy->base + r->reg_addr);
+ for (i = 0; i < array_size; i++, r++) {
+ switch (r->csiphy_param_type) {
+ case CSIPHY_SETTLE_CNT_LOWER_BYTE:
+ val = settle_cnt & 0xff;
+ break;
+ case CSIPHY_DNP_PARAMS:
+ continue;
+ default:
+ val = r->reg_data;
+ break;
}
+ writel_relaxed(val, csiphy->base + r->reg_addr);
}
}
@@ -565,6 +625,7 @@ static bool csiphy_is_gen2(u32 version)
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
ret = true;
break;
}
@@ -577,6 +638,7 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
+ struct csiphy_device_regs *regs = csiphy->regs;
u8 settle_cnt;
u8 val;
int i;
@@ -587,16 +649,20 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
for (i = 0; i < c->num_data; i++)
val |= BIT(c->data[i].pos * 2);
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 5));
val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
val = 0x02;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(7));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 7));
val = 0x00;
- writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
+ writel_relaxed(val, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 0));
if (csiphy_is_gen2(csiphy->camss->res->version))
csiphy_gen2_config_lanes(csiphy, settle_cnt);
@@ -604,18 +670,61 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
/* IRQ_MASK registers - disable all interrupts */
- for (i = 11; i < 22; i++)
- writel_relaxed(0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
+ for (i = 11; i < 22; i++) {
+ writel_relaxed(0, csiphy->base +
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, i));
+ }
}
static void csiphy_lanes_disable(struct csiphy_device *csiphy,
struct csiphy_config *cfg)
{
+ struct csiphy_device_regs *regs = csiphy->regs;
+
writel_relaxed(0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 5));
writel_relaxed(0, csiphy->base +
- CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
+ CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(regs->offset, 6));
+}
+
+static int csiphy_init(struct csiphy_device *csiphy)
+{
+ struct device *dev = csiphy->camss->dev;
+ struct csiphy_device_regs *regs;
+
+ regs = devm_kmalloc(dev, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ csiphy->regs = regs;
+ regs->offset = 0x800;
+
+ switch (csiphy->camss->res->version) {
+ case CAMSS_845:
+ regs->lane_regs = &lane_regs_sdm845[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sdm845);
+ break;
+ case CAMSS_7280:
+ case CAMSS_8250:
+ regs->lane_regs = &lane_regs_sm8250[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8250);
+ break;
+ case CAMSS_8280XP:
+ regs->lane_regs = &lane_regs_sc8280xp[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sc8280xp);
+ break;
+ case CAMSS_8550:
+ regs->lane_regs = &lane_regs_sm8550[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
+ regs->offset = 0x1000;
+ break;
+ default:
+ WARN(1, "unknown csiphy version\n");
+ return -ENODEV;
+ }
+
+ return 0;
}
const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
@@ -625,4 +734,5 @@ const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
+ .init = csiphy_init,
};
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 3791c2d8a6cf..c053616558a7 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -594,6 +594,10 @@ int msm_csiphy_subdev_init(struct camss *camss,
csiphy->cfg.combo_mode = 0;
csiphy->res = &res->csiphy;
+ ret = csiphy->res->hw_ops->init(csiphy);
+ if (ret)
+ return ret;
+
/* Memory */
csiphy->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.h b/drivers/media/platform/qcom/camss/camss-csiphy.h
index 90cc3f976643..86b98b37838e 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.h
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.h
@@ -77,6 +77,7 @@ struct csiphy_hw_ops {
void (*lanes_disable)(struct csiphy_device *csiphy,
struct csiphy_config *cfg);
irqreturn_t (*isr)(int irq, void *dev);
+ int (*init)(struct csiphy_device *csiphy);
};
struct csiphy_subdev_resources {
@@ -84,6 +85,12 @@ struct csiphy_subdev_resources {
const struct csiphy_formats *formats;
};
+struct csiphy_device_regs {
+ const struct csiphy_lane_regs *lane_regs;
+ int lane_array_size;
+ u32 offset;
+};
+
struct csiphy_device {
struct camss *camss;
u8 id;
@@ -102,6 +109,7 @@ struct csiphy_device {
struct csiphy_config cfg;
struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM];
const struct csiphy_subdev_resources *res;
+ struct csiphy_device_regs *regs;
};
struct camss_subdev_resources;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-17x.c b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
index 380c99321030..e5ee7e717b3b 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-17x.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-17x.c
@@ -14,8 +14,6 @@
#include "camss.h"
#include "camss-vfe.h"
-#define VFE_HW_VERSION (0x000)
-
#define VFE_GLOBAL_RESET_CMD (0x018)
#define GLOBAL_RESET_CMD_CORE BIT(0)
#define GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -176,20 +174,6 @@
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
-
- u32 gen = (hw_version >> 28) & 0xF;
- u32 rev = (hw_version >> 16) & 0xFFF;
- u32 step = hw_version & 0xFFFF;
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
- gen, rev, step);
-
- return hw_version;
-}
-
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
@@ -438,62 +422,6 @@ error:
return -EINVAL;
}
-static int vfe_enable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- const struct vfe_hw_ops *ops = vfe->res->hw_ops;
- struct media_entity *sensor;
- unsigned long flags;
- unsigned int frame_skip = 0;
- unsigned int i;
-
- sensor = camss_find_sensor(&line->subdev.entity);
- if (sensor) {
- struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
-
- v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
- /* Max frame skip is 29 frames */
- if (frame_skip > VFE_FRAME_DROP_VAL - 1)
- frame_skip = VFE_FRAME_DROP_VAL - 1;
- }
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- ops->reg_update_clear(vfe, line->id);
-
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
- output->state);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- return -EINVAL;
- }
-
- WARN_ON(output->gen2.active_num);
-
- output->state = VFE_OUTPUT_ON;
-
- output->sequence = 0;
- output->wait_reg_update = 0;
- reinit_completion(&output->reg_update);
-
- vfe_wm_start(vfe, output->wm_idx[0], line);
-
- for (i = 0; i < 2; i++) {
- output->buf[i] = vfe_buf_get_pending(output);
- if (!output->buf[i])
- break;
- output->gen2.active_num++;
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
- }
-
- ops->reg_update(vfe, line->id);
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
@@ -518,7 +446,7 @@ static int vfe_enable(struct vfe_line *line)
if (ret < 0)
goto error_get_output;
- ret = vfe_enable_output(line);
+ ret = vfe_enable_output_v2(line);
if (ret < 0)
goto error_enable_output;
@@ -627,40 +555,6 @@ out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
-/*
- * vfe_queue_buffer - Add empty buffer
- * @vid: Video device structure
- * @buf: Buffer to be enqueued
- *
- * Add an empty buffer - depending on the current number of buffers it will be
- * put in pending buffer queue or directly given to the hardware to be filled.
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_queue_buffer(struct camss_video *vid,
- struct camss_buffer *buf)
-{
- struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- output = &line->output;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
- output->buf[output->gen2.active_num++] = buf;
- vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
- } else {
- vfe_buf_add_pending(output, buf);
- }
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
static const struct vfe_isr_ops vfe_isr_ops_170 = {
.reset_ack = vfe_isr_reset_ack,
.halt_ack = vfe_isr_halt_ack,
@@ -671,7 +565,7 @@ static const struct vfe_isr_ops vfe_isr_ops_170 = {
};
static const struct camss_video_ops vfe_video_ops_170 = {
- .queue_buffer = vfe_queue_buffer,
+ .queue_buffer = vfe_queue_buffer_v2,
.flush_buffers = vfe_flush_buffers,
};
@@ -695,5 +589,7 @@ const struct vfe_hw_ops vfe_ops_170 = {
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
+ .vfe_wm_start = vfe_wm_start,
.vfe_wm_stop = vfe_wm_stop,
+ .vfe_wm_update = vfe_wm_update,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 9a9007c3ff33..901677293d97 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -210,15 +210,6 @@
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index ce0719106bd3..76729607db02 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -18,8 +18,6 @@
#include "camss-vfe-gen1.h"
-#define VFE_0_HW_VERSION 0x000
-
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -254,15 +252,6 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 6b59c8107a3c..b2f7d855d8dd 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -17,8 +17,6 @@
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
-#define VFE_0_HW_VERSION 0x000
-
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
@@ -247,15 +245,6 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
-
- return hw_version;
-}
-
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-480.c b/drivers/media/platform/qcom/camss/camss-vfe-480.c
index dc2735476c82..4feea590a47b 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-480.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-480.c
@@ -15,8 +15,6 @@
#include "camss.h"
#include "camss-vfe.h"
-#define VFE_HW_VERSION (0x00)
-
#define VFE_GLOBAL_RESET_CMD (vfe_is_lite(vfe) ? 0x0c : 0x1c)
#define GLOBAL_RESET_HW_AND_REG (vfe_is_lite(vfe) ? BIT(1) : BIT(0))
@@ -92,19 +90,6 @@ static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
#define MAX_VFE_OUTPUT_LINES 4
-static u32 vfe_hw_version(struct vfe_device *vfe)
-{
- u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
-
- u32 gen = (hw_version >> 28) & 0xF;
- u32 rev = (hw_version >> 16) & 0xFFF;
- u32 step = hw_version & 0xFFFF;
-
- dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
-
- return hw_version;
-}
-
static void vfe_global_reset(struct vfe_device *vfe)
{
writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
@@ -167,18 +152,16 @@ static inline void vfe_reg_update_clear(struct vfe_device *vfe,
vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
}
-static void vfe_enable_irq_common(struct vfe_device *vfe)
-{
- /* enable reset ack IRQ and top BUS status IRQ */
- writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
- vfe->base + VFE_IRQ_MASK(0));
-}
-
-static void vfe_enable_lines_irq(struct vfe_device *vfe)
+static void vfe_enable_irq(struct vfe_device *vfe)
{
int i;
u32 bus_irq_mask = 0;
+ if (!vfe->stream_count)
+ /* enable reset ack IRQ and top BUS status IRQ */
+ writel(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
+ vfe->base + VFE_IRQ_MASK(0));
+
for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
/* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
@@ -188,11 +171,10 @@ static void vfe_enable_lines_irq(struct vfe_device *vfe)
}
}
- writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
+ writel(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
}
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
-static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm);
/*
* vfe_isr - VFE module interrupt handler
@@ -226,7 +208,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
vfe_isr_reg_update(vfe, i);
if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
- vfe_isr_wm_done(vfe, i);
+ vfe_buf_done(vfe, i);
}
}
@@ -245,132 +227,6 @@ static int vfe_halt(struct vfe_device *vfe)
return 0;
}
-static int vfe_get_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- output = &line->output;
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is running\n");
- goto error;
- }
-
- output->wm_num = 1;
-
- /* Correspondence between VFE line number and WM number.
- * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
- * Note this 1:1 mapping will not work for PIX streams.
- */
- output->wm_idx[0] = line->id;
- vfe->wm_output_map[line->id] = line->id;
-
- output->drop_update_idx = 0;
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-
-error:
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- output->state = VFE_OUTPUT_OFF;
-
- return -EINVAL;
-}
-
-static int vfe_enable_output(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output = &line->output;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- vfe_reg_update_clear(vfe, line->id);
-
- if (output->state > VFE_OUTPUT_RESERVED) {
- dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
- output->state);
- spin_unlock_irqrestore(&vfe->output_lock, flags);
- return -EINVAL;
- }
-
- WARN_ON(output->gen2.active_num);
-
- output->state = VFE_OUTPUT_ON;
-
- output->sequence = 0;
- output->wait_reg_update = 0;
- reinit_completion(&output->reg_update);
-
- vfe_wm_start(vfe, output->wm_idx[0], line);
-
- for (i = 0; i < 2; i++) {
- output->buf[i] = vfe_buf_get_pending(output);
- if (!output->buf[i])
- break;
- output->gen2.active_num++;
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
- }
-
- vfe_reg_update(vfe, line->id);
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
-}
-
-/*
- * vfe_enable - Enable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_enable(struct vfe_line *line)
-{
- struct vfe_device *vfe = to_vfe(line);
- int ret;
-
- mutex_lock(&vfe->stream_lock);
-
- if (!vfe->stream_count)
- vfe_enable_irq_common(vfe);
-
- vfe->stream_count++;
-
- vfe_enable_lines_irq(vfe);
-
- mutex_unlock(&vfe->stream_lock);
-
- ret = vfe_get_output(line);
- if (ret < 0)
- goto error_get_output;
-
- ret = vfe_enable_output(line);
- if (ret < 0)
- goto error_enable_output;
-
- vfe->was_streaming = 1;
-
- return 0;
-
-error_enable_output:
- vfe_put_output(line);
-
-error_get_output:
- mutex_lock(&vfe->stream_lock);
-
- vfe->stream_count--;
-
- mutex_unlock(&vfe->stream_lock);
-
- return ret;
-}
-
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
@@ -394,114 +250,48 @@ static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
-/*
- * vfe_isr_wm_done - Process write master done interrupt
- * @vfe: VFE Device
- * @wm: Write master id
- */
-static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
-{
- struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
- struct camss_buffer *ready_buf;
- struct vfe_output *output;
- unsigned long flags;
- u32 index;
- u64 ts = ktime_get_ns();
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
- dev_err_ratelimited(vfe->camss->dev,
- "Received wm done for unmapped index\n");
- goto out_unlock;
- }
- output = &vfe->line[vfe->wm_output_map[wm]].output;
-
- ready_buf = output->buf[0];
- if (!ready_buf) {
- dev_err_ratelimited(vfe->camss->dev,
- "Missing ready buf %d!\n", output->state);
- goto out_unlock;
- }
-
- ready_buf->vb.vb2_buf.timestamp = ts;
- ready_buf->vb.sequence = output->sequence++;
-
- index = 0;
- output->buf[0] = output->buf[1];
- if (output->buf[0])
- index = 1;
-
- output->buf[index] = vfe_buf_get_pending(output);
-
- if (output->buf[index])
- vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
- else
- output->gen2.active_num--;
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
-
- return;
+static const struct camss_video_ops vfe_video_ops_480 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
-out_unlock:
- spin_unlock_irqrestore(&vfe->output_lock, flags);
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_480;
}
-/*
- * vfe_queue_buffer - Add empty buffer
- * @vid: Video device structure
- * @buf: Buffer to be enqueued
- *
- * Add an empty buffer - depending on the current number of buffers it will be
- * put in pending buffer queue or directly given to the hardware to be filled.
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_queue_buffer(struct camss_video *vid,
- struct camss_buffer *buf)
+static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
- struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
- struct vfe_device *vfe = to_vfe(line);
- struct vfe_output *output;
- unsigned long flags;
-
- output = &line->output;
-
- spin_lock_irqsave(&vfe->output_lock, flags);
-
- if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
- output->buf[output->gen2.active_num++] = buf;
- vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
- } else {
- vfe_buf_add_pending(output, buf);
- }
-
- spin_unlock_irqrestore(&vfe->output_lock, flags);
-
- return 0;
+ /* nop */
}
-static const struct camss_video_ops vfe_video_ops_480 = {
- .queue_buffer = vfe_queue_buffer,
- .flush_buffers = vfe_flush_buffers,
-};
+static void vfe_violation_read(struct vfe_device *vfe)
+{
+ /* nop */
+}
-static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+static void vfe_buf_done_480(struct vfe_device *vfe, int port_id)
{
- vfe->video_ops = vfe_video_ops_480;
+ /* nop */
}
const struct vfe_hw_ops vfe_ops_480 = {
+ .enable_irq = vfe_enable_irq,
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
+ .isr_read = vfe_isr_read,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
- .vfe_enable = vfe_enable,
+ .vfe_enable = vfe_enable_v2,
.vfe_halt = vfe_halt,
+ .violation_read = vfe_violation_read,
+ .vfe_wm_start = vfe_wm_start,
.vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done_480,
+ .vfe_wm_update = vfe_wm_update,
};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-780.c b/drivers/media/platform/qcom/camss/camss-vfe-780.c
new file mode 100644
index 000000000000..b9812d70f91b
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-780.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v780 (SM8550)
+ *
+ * Copyright (c) 2024 Qualcomm Technologies, Inc.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+
+#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
+#define WM_CGC_OVERRIDE_ALL (0x7FFFFFF)
+
+#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xDC)
+
+#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
+#define WM_CFG_EN BIT(0)
+#define WM_VIR_FRM_EN BIT(1)
+#define WM_CFG_MODE BIT(16)
+#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100)
+#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100)
+#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100)
+#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
+#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100)
+#define WM_IMAGE_CFG_2_DEFAULT_STRIDE (0xFFFF)
+#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100)
+
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100)
+#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100)
+#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100)
+
+#define VFE_BUS_WM_MMU_PREFETCH_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
+#define VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
+
+/*
+ * Bus client mapping:
+ *
+ * Full VFE:
+ * 23 = RDI0, 24 = RDI1, 25 = RDI2
+ *
+ * VFE LITE:
+ * 0 = RDI0, 1 = RDI1, 2 = RDI3, 4 = RDI4
+ */
+#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix =
+ &line->video_out.active_fmt.fmt.pix_mp;
+
+ wm = RDI_WM(wm);
+
+ /* no clock gating at bus input */
+ writel(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
+
+ writel(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
+
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ writel((WM_IMAGE_CFG_0_DEFAULT_WIDTH & 0xFFFF),
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
+ writel(WM_IMAGE_CFG_2_DEFAULT_STRIDE,
+ vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
+ writel(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
+
+ /* no dropped frames, one irq per frame */
+ writel(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
+ writel(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
+ writel(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
+ writel(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
+
+ writel(1, vfe->base + VFE_BUS_WM_MMU_PREFETCH_CFG(wm));
+ writel(0xFFFFFFFF, vfe->base + VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(wm));
+
+ writel(WM_CFG_EN | WM_CFG_MODE, vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
+{
+ wm = RDI_WM(wm);
+ writel(0, vfe->base + VFE_BUS_WM_CFG(wm));
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
+ struct vfe_line *line)
+{
+ wm = RDI_WM(wm);
+ writel((addr >> 8) & 0xFFFFFFFF, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+
+ dev_dbg(vfe->camss->dev, "wm:%d, image buf addr:0x%x\n",
+ wm, addr);
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, false);
+}
+
+static inline void vfe_reg_update_clear(struct vfe_device *vfe,
+ enum vfe_line_id line_id)
+{
+ int port_id = line_id;
+
+ camss_reg_update(vfe->camss, vfe->id, port_id, true);
+}
+
+static const struct camss_video_ops vfe_video_ops_780 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_780;
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ vfe_isr_reset_ack(vfe);
+}
+
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ /* nop */
+ return IRQ_HANDLED;
+}
+
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+const struct vfe_hw_ops vfe_ops_780 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-gen1.c b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
index eb33c03df27e..d84a375e3318 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-gen1.c
@@ -170,7 +170,7 @@ static int vfe_enable_output(struct vfe_line *line)
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->res->hw_ops;
- struct media_entity *sensor;
+ struct media_pad *sensor_pad;
unsigned long flags;
unsigned int frame_skip = 0;
unsigned int i;
@@ -180,9 +180,10 @@ static int vfe_enable_output(struct vfe_line *line)
if (!ub_size)
return -EINVAL;
- sensor = camss_find_sensor(&line->subdev.entity);
- if (sensor) {
- struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
+ sensor_pad = camss_find_sensor_pad(&line->subdev.entity);
+ if (sensor_pad) {
+ struct v4l2_subdev *subdev =
+ media_entity_to_v4l2_subdev(sensor_pad->entity);
v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
/* Max frame skip is 29 frames */
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 95f6a1ac7eaf..cf0e8f5c004a 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -32,6 +32,11 @@
#define SCALER_RATIO_MAX 16
+#define VFE_HW_VERSION 0x0
+#define HW_VERSION_STEPPING 0
+#define HW_VERSION_REVISION 16
+#define HW_VERSION_GENERATION 28
+
static const struct camss_format_info formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_1X16, 8, V4L2_PIX_FMT_UYVY, 1,
PER_PLANE_DATA(0, 1, 1, 1, 1, 16) },
@@ -340,6 +345,7 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
{
@@ -400,10 +406,278 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
return sink_code;
}
break;
+ default:
+ WARN(1, "Unsupported HW version: %x\n",
+ vfe->camss->res->version);
+ break;
}
return 0;
}
+/*
+ * vfe_hw_version - Process write master done interrupt
+ * @vfe: VFE Device
+ *
+ * Return vfe hw version
+ */
+u32 vfe_hw_version(struct vfe_device *vfe)
+{
+ u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
+
+ u32 gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
+ u32 rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
+ u32 step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
+
+ dev_info(vfe->camss->dev, "VFE:%d HW Version = %u.%u.%u\n",
+ vfe->id, gen, rev, step);
+
+ return hw_version;
+}
+
+/*
+ * vfe_buf_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+void vfe_buf_done(struct vfe_device *vfe, int wm)
+{
+ struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct camss_buffer *ready_buf;
+ struct vfe_output *output;
+ unsigned long flags;
+ u32 index;
+ u64 ts = ktime_get_ns();
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Received wm done for unmapped index\n");
+ goto out_unlock;
+ }
+ output = &vfe->line[vfe->wm_output_map[wm]].output;
+
+ ready_buf = output->buf[0];
+ if (!ready_buf) {
+ dev_err_ratelimited(vfe->camss->dev,
+ "Missing ready buf %d!\n", output->state);
+ goto out_unlock;
+ }
+
+ ready_buf->vb.vb2_buf.timestamp = ts;
+ ready_buf->vb.sequence = output->sequence++;
+
+ index = 0;
+ output->buf[0] = output->buf[1];
+ if (output->buf[0])
+ index = 1;
+
+ output->buf[index] = vfe_buf_get_pending(output);
+
+ if (output->buf[index]) {
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ output->buf[index]->addr[0],
+ line);
+ ops->reg_update(vfe, line->id);
+ } else {
+ output->gen2.active_num--;
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return;
+
+out_unlock:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+}
+
+int vfe_enable_output_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output = &line->output;
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct media_pad *sensor_pad;
+ unsigned long flags;
+ unsigned int frame_skip = 0;
+ unsigned int i;
+
+ sensor_pad = camss_find_sensor_pad(&line->subdev.entity);
+ if (sensor_pad) {
+ struct v4l2_subdev *subdev =
+ media_entity_to_v4l2_subdev(sensor_pad->entity);
+
+ v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
+ /* Max frame skip is 29 frames */
+ if (frame_skip > VFE_FRAME_DROP_VAL - 1)
+ frame_skip = VFE_FRAME_DROP_VAL - 1;
+ }
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ ops->reg_update_clear(vfe, line->id);
+
+ if (output->state > VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev,
+ "Output is not in reserved state %d\n",
+ output->state);
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ return -EINVAL;
+ }
+
+ WARN_ON(output->gen2.active_num);
+
+ output->state = VFE_OUTPUT_ON;
+
+ output->sequence = 0;
+ output->wait_reg_update = 0;
+ reinit_completion(&output->reg_update);
+
+ ops->vfe_wm_start(vfe, output->wm_idx[0], line);
+
+ for (i = 0; i < 2; i++) {
+ output->buf[i] = vfe_buf_get_pending(output);
+ if (!output->buf[i])
+ break;
+ output->gen2.active_num++;
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ output->buf[i]->addr[0], line);
+ ops->reg_update(vfe, line->id);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_queue_buffer_v2 - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_queue_buffer_v2(struct camss_video *vid,
+ struct camss_buffer *buf)
+{
+ struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
+ struct vfe_device *vfe = to_vfe(line);
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ struct vfe_output *output;
+ unsigned long flags;
+
+ output = &line->output;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ if (output->state == VFE_OUTPUT_ON &&
+ output->gen2.active_num < 2) {
+ output->buf[output->gen2.active_num++] = buf;
+ ops->vfe_wm_update(vfe, output->wm_idx[0],
+ buf->addr[0], line);
+ ops->reg_update(vfe, line->id);
+ } else {
+ vfe_buf_add_pending(output, buf);
+ }
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+}
+
+/*
+ * vfe_enable_v2 - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ const struct vfe_hw_ops *ops = vfe->res->hw_ops;
+ int ret;
+
+ mutex_lock(&vfe->stream_lock);
+
+ if (vfe->res->hw_ops->enable_irq)
+ ops->enable_irq(vfe);
+
+ vfe->stream_count++;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ ret = vfe_get_output_v2(line);
+ if (ret < 0)
+ goto error_get_output;
+
+ ret = vfe_enable_output_v2(line);
+ if (ret < 0)
+ goto error_enable_output;
+
+ vfe->was_streaming = 1;
+
+ return 0;
+
+error_enable_output:
+ vfe_put_output(line);
+
+error_get_output:
+ mutex_lock(&vfe->stream_lock);
+
+ vfe->stream_count--;
+
+ mutex_unlock(&vfe->stream_lock);
+
+ return ret;
+}
+
+/*
+ * vfe_get_output_v2 - Get vfe output port for corresponding VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_get_output_v2(struct vfe_line *line)
+{
+ struct vfe_device *vfe = to_vfe(line);
+ struct vfe_output *output;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vfe->output_lock, flags);
+
+ output = &line->output;
+ if (output->state > VFE_OUTPUT_RESERVED) {
+ dev_err(vfe->camss->dev, "Output is running\n");
+ goto error;
+ }
+
+ output->wm_num = 1;
+
+ /* Correspondence between VFE line number and WM number.
+ * line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
+ * Note this 1:1 mapping will not work for PIX streams.
+ */
+ output->wm_idx[0] = line->id;
+ vfe->wm_output_map[line->id] = line->id;
+
+ output->drop_update_idx = 0;
+
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+ return 0;
+
+error:
+ spin_unlock_irqrestore(&vfe->output_lock, flags);
+ output->state = VFE_OUTPUT_OFF;
+
+ return -EINVAL;
+}
+
int vfe_reset(struct vfe_device *vfe)
{
unsigned long time;
@@ -1698,6 +1972,7 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_8250:
case CAMSS_8280XP:
case CAMSS_845:
+ case CAMSS_8550:
ret = 16;
break;
default:
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 10e2cc3c0b83..9dec5bc0d1b1 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -99,7 +99,7 @@ struct vfe_line {
struct vfe_device;
struct vfe_hw_ops {
- void (*enable_irq_common)(struct vfe_device *vfe);
+ void (*enable_irq)(struct vfe_device *vfe);
void (*global_reset)(struct vfe_device *vfe);
u32 (*hw_version)(struct vfe_device *vfe);
irqreturn_t (*isr)(int irq, void *dev);
@@ -114,7 +114,12 @@ struct vfe_hw_ops {
int (*vfe_enable)(struct vfe_line *line);
int (*vfe_halt)(struct vfe_device *vfe);
void (*violation_read)(struct vfe_device *vfe);
+ void (*vfe_wm_start)(struct vfe_device *vfe, u8 wm,
+ struct vfe_line *line);
void (*vfe_wm_stop)(struct vfe_device *vfe, u8 wm);
+ void (*vfe_buf_done)(struct vfe_device *vfe, int port_id);
+ void (*vfe_wm_update)(struct vfe_device *vfe, u8 wm, u32 addr,
+ struct vfe_line *line);
};
struct vfe_isr_ops {
@@ -238,6 +243,7 @@ extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
extern const struct vfe_hw_ops vfe_ops_480;
+extern const struct vfe_hw_ops vfe_ops_780;
int vfe_get(struct vfe_device *vfe);
void vfe_put(struct vfe_device *vfe);
@@ -252,4 +258,55 @@ void vfe_put(struct vfe_device *vfe);
*/
bool vfe_is_lite(struct vfe_device *vfe);
+/*
+ * vfe_hw_version - Process write master done interrupt
+ * @vfe: VFE Device
+ *
+ * Return vfe hw version
+ */
+u32 vfe_hw_version(struct vfe_device *vfe);
+/*
+ * vfe_enable - Enable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_v2(struct vfe_line *line);
+
+/*
+ * vfe_buf_done - Process write master done interrupt
+ * @vfe: VFE Device
+ * @wm: Write master id
+ */
+void vfe_buf_done(struct vfe_device *vfe, int wm);
+
+/*
+ * vfe_get_output_v2 - Get vfe output line
+ * line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_get_output_v2(struct vfe_line *line);
+
+/*
+ * vfe_enable_output_v2 - Enable vfe output line
+ * line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_enable_output_v2(struct vfe_line *line);
+
+/*
+ * vfe_queue_buffer_v2 - Add empty buffer
+ * @vid: Video device structure
+ * @buf: Buffer to be enqueued
+ *
+ * Add an empty buffer - depending on the current number of buffers it will be
+ * put in pending buffer queue or directly given to the hardware to be filled.
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_queue_buffer_v2(struct camss_video *vid,
+ struct camss_buffer *buf);
+
#endif /* QC_MSM_CAMSS_VFE_H */
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index a85e9df0f301..6791dfea91b1 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -738,6 +738,185 @@ static const struct camss_subdev_resources vfe_res_660[] = {
}
};
+static const struct camss_subdev_resources csiphy_res_670[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "soc_ahb", "cpas_ahb",
+ "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 240000000, 269333333 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct camss_subdev_resources csid_res_670[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe0",
+ "vfe0_cphy_rx", "csi0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe1",
+ "vfe1_cphy_rx", "csi1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "cpas_ahb", "soc_ahb", "vfe_lite",
+ "vfe_lite_cphy_rx", "csi2" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 384000000 },
+ { 19200000, 75000000, 384000000, 538666667 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen2,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_670[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe0", "vfe0_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 0 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe1", "vfe1_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 },
+ { 0 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE-lite */
+ {
+ .regulators = {},
+ .clock = { "camnoc_axi", "cpas_ahb", "soc_ahb",
+ "vfe_lite" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 100000000, 320000000, 404000000, 480000000, 600000000 } },
+ .reg = { "vfe_lite" },
+ .interrupt = { "vfe_lite" },
+ .vfe = {
+ .is_lite = true,
+ .line_num = 4,
+ .hw_ops = &vfe_ops_170,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ }
+};
+
static const struct camss_subdev_resources csiphy_res_845[] = {
/* CSIPHY0 */
{
@@ -927,6 +1106,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.interrupt = { "vfe0" },
.vfe = {
.line_num = 4,
+ .pd_name = "ife0",
.has_pd = true,
.hw_ops = &vfe_ops_170,
.formats_rdi = &vfe_formats_rdi_845,
@@ -954,6 +1134,7 @@ static const struct camss_subdev_resources vfe_res_845[] = {
.interrupt = { "vfe1" },
.vfe = {
.line_num = 4,
+ .pd_name = "ife1",
.has_pd = true,
.hw_ops = &vfe_ops_170,
.formats_rdi = &vfe_formats_rdi_845,
@@ -1443,12 +1624,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe0",
- "vfe0_axi", "gcc_cam_hf_axi" },
+ "vfe0_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe0" },
@@ -1468,12 +1650,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe1",
- "vfe1_axi", "gcc_cam_hf_axi" },
+ "vfe1_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe1" },
@@ -1493,12 +1676,13 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb", "vfe2",
- "vfe2_axi", "gcc_cam_hf_axi" },
+ "vfe2_axi", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 380000000, 510000000, 637000000, 760000000 },
{ 0 },
+ { 0 },
{ 0 } },
.reg = { "vfe2" },
@@ -1516,11 +1700,12 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
/* VFE3 (lite) */
{
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
- "vfe_lite0", "gcc_cam_hf_axi" },
+ "vfe_lite0", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 320000000, 400000000, 480000000, 600000000 },
+ { 0 },
{ 0 } },
.regulators = {},
@@ -1537,11 +1722,12 @@ static const struct camss_subdev_resources vfe_res_7280[] = {
/* VFE4 (lite) */
{
.clock = { "camnoc_axi", "cpas_ahb", "icp_ahb",
- "vfe_lite1", "gcc_cam_hf_axi" },
+ "vfe_lite1", "gcc_axi_hf", "gcc_axi_sf" },
.clock_rate = { { 150000000, 240000000, 320000000, 400000000, 480000000 },
{ 80000000 },
{ 0 },
{ 320000000, 400000000, 480000000, 600000000 },
+ { 0 },
{ 0 } },
.regulators = {},
@@ -1938,6 +2124,327 @@ static const struct resources_icc icc_res_sc8280xp[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_8550[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy2", "csiphy2_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy3", "csiphy3_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY4 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy4", "csiphy4_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy4" },
+ .interrupt = { "csiphy4" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY5 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy5", "csiphy5_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy5" },
+ .interrupt = { "csiphy5" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY6 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy6", "csiphy6_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy6" },
+ .interrupt = { "csiphy6" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY7 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy7", "csiphy7_timer" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000 } },
+ .reg = { "csiphy7" },
+ .interrupt = { "csiphy7" },
+ .csiphy = {
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct resources_wrapper csid_wrapper_res_sm8550 = {
+ .reg = "csid_wrapper",
+};
+
+static const struct camss_subdev_resources csid_res_8550[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID2 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid2" },
+ .interrupt = { "csid2" },
+ .csid = {
+ .is_lite = false,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 */
+ {
+ .regulators = {},
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 */
+ {
+ .regulators = {},
+ .clock = { "vfe_lite_csid", "vfe_lite_cphy_rx" },
+ .clock_rate = { { 400000000, 480000000 },
+ { 400000000, 480000000 } },
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .hw_ops = &csid_ops_780,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_8550[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe0_fast_ahb",
+ "vfe0", "cpas_vfe0", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife0",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe1_fast_ahb",
+ "vfe1", "cpas_vfe1", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife1",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe2_fast_ahb",
+ "vfe2", "cpas_vfe2", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 466000000, 594000000, 675000000, 785000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe2" },
+ .interrupt = { "vfe2" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = true,
+ .pd_name = "ife2",
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 lite */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe_lite_ahb",
+ "vfe_lite", "cpas_ife_lite", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 400000000, 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 lite */
+ {
+ .regulators = {},
+ .clock = { "gcc_axi_hf", "cpas_ahb", "cpas_fast_ahb_clk", "vfe_lite_ahb",
+ "vfe_lite", "cpas_ife_lite", "camnoc_axi" },
+ .clock_rate = { { 0 },
+ { 80000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 400000000, 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 } },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_780,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_sm8550[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+ {
+ .name = "hf_0_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
@@ -1996,12 +2503,12 @@ void camss_disable_clocks(int nclocks, struct camss_clock *clock)
}
/*
- * camss_find_sensor - Find a linked media entity which represents a sensor
+ * camss_find_sensor_pad - Find the media pad via which the sensor is linked
* @entity: Media entity to start searching from
*
- * Return a pointer to sensor media entity or NULL if not found
+ * Return a pointer to sensor media pad or NULL if not found
*/
-struct media_entity *camss_find_sensor(struct media_entity *entity)
+struct media_pad *camss_find_sensor_pad(struct media_entity *entity)
{
struct media_pad *pad;
@@ -2017,7 +2524,7 @@ struct media_entity *camss_find_sensor(struct media_entity *entity)
entity = pad->entity;
if (entity->function == MEDIA_ENT_F_CAM_SENSOR)
- return entity;
+ return pad;
}
}
@@ -2032,16 +2539,13 @@ struct media_entity *camss_find_sensor(struct media_entity *entity)
s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
unsigned int lanes)
{
- struct media_entity *sensor;
- struct v4l2_subdev *subdev;
+ struct media_pad *sensor_pad;
- sensor = camss_find_sensor(entity);
- if (!sensor)
+ sensor_pad = camss_find_sensor_pad(entity);
+ if (!sensor_pad)
return -ENODEV;
- subdev = media_entity_to_v4l2_subdev(sensor);
-
- return v4l2_get_link_freq(subdev->ctrl_handler, bpp, 2 * lanes);
+ return v4l2_get_link_freq(sensor_pad, bpp, 2 * lanes);
}
/*
@@ -2053,15 +2557,15 @@ s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
*/
int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock)
{
- struct media_entity *sensor;
+ struct media_pad *sensor_pad;
struct v4l2_subdev *subdev;
struct v4l2_ctrl *ctrl;
- sensor = camss_find_sensor(entity);
- if (!sensor)
+ sensor_pad = camss_find_sensor_pad(entity);
+ if (!sensor_pad)
return -ENODEV;
- subdev = media_entity_to_v4l2_subdev(sensor);
+ subdev = media_entity_to_v4l2_subdev(sensor_pad->entity);
ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
@@ -2403,6 +2907,28 @@ static int camss_link_entities(struct camss *camss)
return 0;
}
+void camss_reg_update(struct camss *camss, int hw_id, int port_id, bool is_clear)
+{
+ struct csid_device *csid;
+
+ if (hw_id < camss->res->csid_num) {
+ csid = &camss->csid[hw_id];
+
+ csid->res->hw_ops->reg_update(csid, port_id, is_clear);
+ }
+}
+
+void camss_buf_done(struct camss *camss, int hw_id, int port_id)
+{
+ struct vfe_device *vfe;
+
+ if (hw_id < camss->res->vfe_num) {
+ vfe = &camss->vfe[hw_id];
+
+ vfe->res->hw_ops->vfe_buf_done(vfe, port_id);
+ }
+}
+
/*
* camss_register_entities - Register subdev nodes and create links
* @camss: CAMSS device
@@ -2898,8 +3424,20 @@ static const struct camss_resources sdm660_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sdm670_resources = {
+ .version = CAMSS_845,
+ .csiphy_res = csiphy_res_670,
+ .csid_res = csid_res_670,
+ .vfe_res = vfe_res_670,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_670),
+ .csid_num = ARRAY_SIZE(csid_res_670),
+ .vfe_num = ARRAY_SIZE(vfe_res_670),
+ .link_entities = camss_link_entities
+};
+
static const struct camss_resources sdm845_resources = {
.version = CAMSS_845,
+ .pd_name = "top",
.csiphy_res = csiphy_res_845,
.csid_res = csid_res_845,
.vfe_res = vfe_res_845,
@@ -2952,6 +3490,21 @@ static const struct camss_resources sc7280_resources = {
.link_entities = camss_link_entities
};
+static const struct camss_resources sm8550_resources = {
+ .version = CAMSS_8550,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8550,
+ .csid_res = csid_res_8550,
+ .vfe_res = vfe_res_8550,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .icc_res = icc_res_sm8550,
+ .icc_path_num = ARRAY_SIZE(icc_res_sm8550),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8550),
+ .csid_num = ARRAY_SIZE(csid_res_8550),
+ .vfe_num = ARRAY_SIZE(vfe_res_8550),
+ .link_entities = camss_link_entities
+};
+
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
@@ -2959,8 +3512,10 @@ static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
{ .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
+ { .compatible = "qcom,sdm670-camss", .data = &sdm670_resources },
{ .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
{ .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
+ { .compatible = "qcom,sm8550-camss", .data = &sm8550_resources },
{ }
};
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 9a046eea334f..b284b910ce42 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -85,6 +85,7 @@ enum camss_version {
CAMSS_8250,
CAMSS_8280XP,
CAMSS_845,
+ CAMSS_8550,
};
enum icc_count {
@@ -153,7 +154,7 @@ void camss_add_clock_margin(u64 *rate);
int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev);
void camss_disable_clocks(int nclocks, struct camss_clock *clock);
-struct media_entity *camss_find_sensor(struct media_entity *entity);
+struct media_pad *camss_find_sensor_pad(struct media_entity *entity);
s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
unsigned int lanes);
int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock);
@@ -162,5 +163,8 @@ void camss_pm_domain_off(struct camss *camss, int id);
int camss_vfe_get(struct camss *camss, int id);
void camss_vfe_put(struct camss *camss, int id);
void camss_delete(struct camss *camss);
+void camss_buf_done(struct camss *camss, int hw_id, int port_id);
+void camss_reg_update(struct camss *camss, int hw_id,
+ int port_id, bool is_clear);
#endif /* QC_MSM_CAMSS_H */
diff --git a/drivers/media/platform/qcom/iris/Kconfig b/drivers/media/platform/qcom/iris/Kconfig
new file mode 100644
index 000000000000..3c803a05305a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_QCOM_IRIS
+ tristate "Qualcomm iris V4L2 decoder driver"
+ depends on VIDEO_DEV
+ depends on ARCH_QCOM || COMPILE_TEST
+ select V4L2_MEM2MEM_DEV
+ select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_SCM
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ This is a V4L2 driver for Qualcomm iris video accelerator
+ hardware. It accelerates decoding operations on various
+ Qualcomm SoCs.
+ To compile this driver as a module choose m here.
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
new file mode 100644
index 000000000000..35390534534e
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -0,0 +1,31 @@
+qcom-iris-objs += \
+ iris_buffer.o \
+ iris_core.o \
+ iris_ctrls.o \
+ iris_firmware.o \
+ iris_hfi_common.o \
+ iris_hfi_gen1_command.o \
+ iris_hfi_gen1_response.o \
+ iris_hfi_gen2_command.o \
+ iris_hfi_gen2_packet.o \
+ iris_hfi_gen2_response.o \
+ iris_hfi_queue.o \
+ iris_platform_sm8550.o \
+ iris_power.o \
+ iris_probe.o \
+ iris_resources.o \
+ iris_state.o \
+ iris_utils.o \
+ iris_vidc.o \
+ iris_vb2.o \
+ iris_vdec.o \
+ iris_vpu2.o \
+ iris_vpu3.o \
+ iris_vpu_buffer.o \
+ iris_vpu_common.o \
+
+ifeq ($(CONFIG_VIDEO_QCOM_VENUS),)
+qcom-iris-objs += iris_platform_sm8250.o
+endif
+
+obj-$(CONFIG_VIDEO_QCOM_IRIS) += qcom-iris.o
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.c b/drivers/media/platform/qcom/iris/iris_buffer.c
new file mode 100644
index 000000000000..e5c5a564fcb8
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_buffer.c
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_vpu_buffer.h"
+
+#define PIXELS_4K 4096
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2304
+#define Y_STRIDE_ALIGN 128
+#define UV_STRIDE_ALIGN 128
+#define Y_SCANLINE_ALIGN 32
+#define UV_SCANLINE_ALIGN 16
+#define UV_SCANLINE_ALIGN_QC08C 32
+#define META_STRIDE_ALIGNED 64
+#define META_SCANLINE_ALIGNED 16
+#define NUM_MBS_4K (DIV_ROUND_UP(MAX_WIDTH, 16) * DIV_ROUND_UP(MAX_HEIGHT, 16))
+
+/*
+ * NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-Y/UV_Stride (aligned to 128)->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | y_scanlines (aligned to 32)
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . uv_scanlines (aligned to 16)
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size aligned to 4K
+ *
+ * y_stride : Width aligned to 128
+ * uv_stride : Width aligned to 128
+ * y_scanlines: Height aligned to 32
+ * uv_scanlines: Height/2 aligned to 16
+ * Total size = align((y_stride * y_scanlines
+ * + uv_stride * uv_scanlines , 4096)
+ *
+ * Note: All the alignments are hardware requirements.
+ */
+static u32 iris_yuv_buffer_size_nv12(struct iris_inst *inst)
+{
+ u32 y_plane, uv_plane, y_stride, uv_stride, y_scanlines, uv_scanlines;
+ struct v4l2_format *f = inst->fmt_dst;
+
+ y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
+ uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
+ y_scanlines = ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN);
+ uv_scanlines = ALIGN((f->fmt.pix_mp.height + 1) >> 1, UV_SCANLINE_ALIGN);
+ y_plane = y_stride * y_scanlines;
+ uv_plane = uv_stride * uv_scanlines;
+
+ return ALIGN(y_plane + uv_plane, PIXELS_4K);
+}
+
+/*
+ * QC08C:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- y_meta_stride ----> (aligned to 64)
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | y_meta_scanlines (aligned to 16)
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile y_stride---> (aligned to 128)
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile y_scanlines (aligned to 32)
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- uv_meta_stride ----> (aligned to 64)
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . uv_meta_scanlines (aligned to 16)
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile uv_stride---> (aligned to 128)
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . uv_scanlines (aligned to 32)
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * y_stride: width aligned to 128
+ * uv_stride: width aligned to 128
+ * y_scanlines: height aligned to 32
+ * uv_scanlines: height aligned to 32
+ * y_plane: buffer size aligned to 4096
+ * uv_plane: buffer size aligned to 4096
+ * y_meta_stride: width aligned to 64
+ * y_meta_scanlines: height aligned to 16
+ * y_meta_plane: buffer size aligned to 4096
+ * uv_meta_stride: width aligned to 64
+ * uv_meta_scanlines: height aligned to 16
+ * uv_meta_plane: buffer size aligned to 4096
+ *
+ * Total size = align( y_plane + uv_plane +
+ * y_meta_plane + uv_meta_plane, 4096)
+ *
+ * Note: All the alignments are hardware requirements.
+ */
+static u32 iris_yuv_buffer_size_qc08c(struct iris_inst *inst)
+{
+ u32 y_plane, uv_plane, y_stride, uv_stride;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 uv_meta_stride, uv_meta_plane;
+ u32 y_meta_stride, y_meta_plane;
+
+ y_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width, META_STRIDE_ALIGNED >> 1),
+ META_STRIDE_ALIGNED);
+ y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height,
+ META_SCANLINE_ALIGNED >> 1),
+ META_SCANLINE_ALIGNED);
+ y_meta_plane = ALIGN(y_meta_plane, PIXELS_4K);
+
+ y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
+ y_plane = ALIGN(y_stride * ALIGN(f->fmt.pix_mp.height, Y_SCANLINE_ALIGN), PIXELS_4K);
+
+ uv_meta_stride = ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.width / 2, META_STRIDE_ALIGNED >> 2),
+ META_STRIDE_ALIGNED);
+ uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(f->fmt.pix_mp.height / 2,
+ META_SCANLINE_ALIGNED >> 1),
+ META_SCANLINE_ALIGNED);
+ uv_meta_plane = ALIGN(uv_meta_plane, PIXELS_4K);
+
+ uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
+ uv_plane = ALIGN(uv_stride * ALIGN(f->fmt.pix_mp.height / 2, UV_SCANLINE_ALIGN_QC08C),
+ PIXELS_4K);
+
+ return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane, PIXELS_4K);
+}
+
+static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ u32 base_res_mbs = NUM_MBS_4K;
+ u32 frame_size, num_mbs;
+ u32 div_factor = 2;
+
+ num_mbs = iris_get_mbpf(inst);
+ if (num_mbs > NUM_MBS_4K) {
+ div_factor = 4;
+ base_res_mbs = caps->max_mbpf;
+ }
+
+ /*
+ * frame_size = YUVsize / div_factor
+ * where YUVsize = resolution_in_MBs * MBs_in_pixel * 3 / 2
+ */
+ frame_size = base_res_mbs * (16 * 16) * 3 / 2 / div_factor;
+
+ return ALIGN(frame_size, PIXELS_4K);
+}
+
+int iris_get_buffer_size(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_bitstream_buffer_size(inst);
+ case BUF_OUTPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_DPB:
+ return iris_yuv_buffer_size_qc08c(inst);
+ default:
+ return 0;
+ }
+}
+
+static void iris_fill_internal_buf_info(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+
+ buffers->size = iris_vpu_buf_size(inst, buffer_type);
+ buffers->min_count = iris_vpu_buf_count(inst, buffer_type);
+}
+
+void iris_get_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
+}
+
+static int iris_create_internal_buffer(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type, u32 index)
+{
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+ struct iris_core *core = inst->core;
+ struct iris_buffer *buffer;
+
+ if (!buffers->size)
+ return 0;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&buffer->list);
+ buffer->type = buffer_type;
+ buffer->index = index;
+ buffer->buffer_size = buffers->size;
+ buffer->dma_attrs = DMA_ATTR_WRITE_COMBINE | DMA_ATTR_NO_KERNEL_MAPPING;
+ list_add_tail(&buffer->list, &buffers->list);
+
+ buffer->kvaddr = dma_alloc_attrs(core->dev, buffer->buffer_size,
+ &buffer->device_addr, GFP_KERNEL, buffer->dma_attrs);
+ if (!buffer->kvaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int iris_create_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ u32 internal_buffer_count, i, j;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ for (j = 0; j < buffers->min_count; j++) {
+ ret = iris_create_internal_buffer(inst, internal_buf_type[i], j);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_queue_buf(inst, buf);
+ if (ret)
+ return ret;
+
+ buf->attr &= ~BUF_ATTR_DEFERRED;
+ buf->attr |= BUF_ATTR_QUEUED;
+
+ return 0;
+}
+
+int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ struct iris_buffer *buffer, *next;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_core *core = inst->core;
+
+ list_del(&buffer->list);
+ dma_free_attrs(core->dev, buffer->buffer_size, buffer->kvaddr,
+ buffer->device_addr, buffer->dma_attrs);
+ kfree(buffer);
+
+ return 0;
+}
+
+int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ struct iris_buffer *buf, *next;
+ struct iris_buffers *buffers;
+ const u32 *internal_buf_type;
+ u32 i, len;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ len = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ len = platform_data->dec_op_int_buf_tbl_size;
+ }
+
+ for (i = 0; i < len; i++) {
+ buffers = &inst->buffers[internal_buf_type[i]];
+ list_for_each_entry_safe(buf, next, &buffers->list, list) {
+ ret = iris_destroy_internal_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int iris_release_internal_buffers(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
+ struct iris_buffer *buffer, *next;
+ int ret;
+
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (!(buffer->attr & BUF_ATTR_QUEUED))
+ continue;
+ ret = hfi_ops->session_release_buf(inst, buffer);
+ if (ret)
+ return ret;
+ buffer->attr |= BUF_ATTR_PENDING_RELEASE;
+ }
+
+ return 0;
+}
+
+static int iris_release_input_internal_buffers(struct iris_inst *inst)
+{
+ const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
+ const u32 *internal_buf_type;
+ u32 internal_buffer_count, i;
+ int ret;
+
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+
+ for (i = 0; i < internal_buffer_count; i++) {
+ ret = iris_release_internal_buffers(inst, internal_buf_type[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
+{
+ struct iris_buffers *buffers = &inst->buffers[BUF_PERSIST];
+ struct iris_buffer *buffer, *next;
+ int ret;
+ u32 i;
+
+ if (!list_empty(&buffers->list))
+ return 0;
+
+ iris_fill_internal_buf_info(inst, BUF_PERSIST);
+
+ for (i = 0; i < buffers->min_count; i++) {
+ ret = iris_create_internal_buffer(inst, BUF_PERSIST, i);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst)
+{
+ int ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_release_input_internal_buffers(inst);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ int ret;
+
+ iris_scale_power(inst);
+
+ if (buf_type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_DEFERRED))
+ continue;
+ ret = iris_queue_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_DEFERRED))
+ continue;
+ ret = iris_queue_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void iris_vb2_queue_error(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_src_vq(m2m_ctx);
+ vb2_queue_error(q);
+ q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vb2_queue_error(q);
+}
+
+static struct vb2_v4l2_buffer *
+iris_helper_find_buf(struct iris_inst *inst, u32 type, u32 idx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
+ else
+ return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
+}
+
+static void iris_get_ts_metadata(struct iris_inst *inst, u64 timestamp_ns,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
+ if (inst->tss[i].ts_ns != timestamp_ns)
+ continue;
+
+ vbuf->flags &= ~mask;
+ vbuf->flags |= inst->tss[i].flags;
+ vbuf->timecode = inst->tss[i].tc;
+ return;
+ }
+
+ vbuf->flags &= ~mask;
+ vbuf->flags |= inst->tss[inst->metadata_idx].flags;
+ vbuf->timecode = inst->tss[inst->metadata_idx].tc;
+}
+
+int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb2;
+ u32 type, state;
+
+ switch (buf->type) {
+ case BUF_INPUT:
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ break;
+ case BUF_OUTPUT:
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ break;
+ default:
+ return 0; /* Internal DPB Buffers */
+ }
+
+ vbuf = iris_helper_find_buf(inst, type, buf->index);
+ if (!vbuf)
+ return -EINVAL;
+
+ vb2 = &vbuf->vb2_buf;
+
+ if (buf->flags & V4L2_BUF_FLAG_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ vbuf->flags |= buf->flags;
+
+ if (V4L2_TYPE_IS_CAPTURE(type)) {
+ vb2_set_plane_payload(vb2, 0, buf->data_size);
+ vbuf->sequence = inst->sequence_cap++;
+ iris_get_ts_metadata(inst, buf->timestamp, vbuf);
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
+ if (!v4l2_m2m_has_stopped(m2m_ctx)) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ v4l2_m2m_mark_stopped(m2m_ctx);
+ }
+ }
+ vb2->timestamp = buf->timestamp;
+ v4l2_m2m_buf_done(vbuf, state);
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.h b/drivers/media/platform/qcom/iris/iris_buffer.h
new file mode 100644
index 000000000000..c36b6347b077
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_buffer.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_BUFFER_H__
+#define __IRIS_BUFFER_H__
+
+#include <media/videobuf2-v4l2.h>
+
+struct iris_inst;
+
+#define to_iris_buffer(ptr) container_of(ptr, struct iris_buffer, vb2)
+
+/**
+ * enum iris_buffer_type
+ *
+ * @BUF_INPUT: input buffer to the iris hardware
+ * @BUF_OUTPUT: output buffer from the iris hardware
+ * @BUF_BIN: buffer to store intermediate bin data
+ * @BUF_ARP: buffer for auto register programming
+ * @BUF_COMV: buffer to store colocated motion vectors
+ * @BUF_NON_COMV: buffer to hold config data for HW
+ * @BUF_LINE: buffer to store decoding/encoding context data for HW
+ * @BUF_DPB: buffer to store display picture buffers for reference
+ * @BUF_PERSIST: buffer to store session context data
+ * @BUF_SCRATCH_1: buffer to store decoding/encoding context data for HW
+ * @BUF_TYPE_MAX: max buffer types
+ */
+enum iris_buffer_type {
+ BUF_INPUT = 1,
+ BUF_OUTPUT,
+ BUF_BIN,
+ BUF_ARP,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+ BUF_DPB,
+ BUF_PERSIST,
+ BUF_SCRATCH_1,
+ BUF_TYPE_MAX,
+};
+
+/*
+ * enum iris_buffer_attributes
+ *
+ * BUF_ATTR_DEFERRED: buffer queued by client but not submitted to firmware.
+ * BUF_ATTR_PENDING_RELEASE: buffers requested to be released from firmware.
+ * BUF_ATTR_QUEUED: buffers submitted to firmware.
+ * BUF_ATTR_DEQUEUED: buffers received from firmware.
+ * BUF_ATTR_BUFFER_DONE: buffers sent back to vb2.
+ */
+enum iris_buffer_attributes {
+ BUF_ATTR_DEFERRED = BIT(0),
+ BUF_ATTR_PENDING_RELEASE = BIT(1),
+ BUF_ATTR_QUEUED = BIT(2),
+ BUF_ATTR_DEQUEUED = BIT(3),
+ BUF_ATTR_BUFFER_DONE = BIT(4),
+};
+
+/**
+ * struct iris_buffer
+ *
+ * @vb2: v4l2 vb2 buffer
+ * @list: list head for the iris_buffers structure
+ * @inst: iris instance structure
+ * @type: enum for type of iris buffer
+ * @index: identifier for the iris buffer
+ * @fd: file descriptor of the buffer
+ * @buffer_size: accessible buffer size in bytes starting from addr_offset
+ * @data_offset: accessible buffer offset from base address
+ * @data_size: data size in bytes
+ * @device_addr: device address of the buffer
+ * @kvaddr: kernel virtual address of the buffer
+ * @dma_attrs: dma attributes
+ * @flags: buffer flags. It is represented as bit masks.
+ * @timestamp: timestamp of the buffer in nano seconds (ns)
+ * @attr: enum for iris buffer attributes
+ */
+struct iris_buffer {
+ struct vb2_v4l2_buffer vb2;
+ struct list_head list;
+ struct iris_inst *inst;
+ enum iris_buffer_type type;
+ u32 index;
+ int fd;
+ size_t buffer_size;
+ u32 data_offset;
+ size_t data_size;
+ dma_addr_t device_addr;
+ void *kvaddr;
+ unsigned long dma_attrs;
+ u32 flags; /* V4L2_BUF_FLAG_* */
+ u64 timestamp;
+ enum iris_buffer_attributes attr;
+};
+
+struct iris_buffers {
+ struct list_head list;
+ u32 min_count;
+ u32 size;
+};
+
+int iris_get_buffer_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+void iris_get_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_create_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer);
+int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst);
+int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst);
+int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf);
+int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type);
+int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf);
+void iris_vb2_queue_error(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_core.c b/drivers/media/platform/qcom/iris/iris_core.c
new file mode 100644
index 000000000000..0fa0a3b549a2
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_core.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_core.h"
+#include "iris_firmware.h"
+#include "iris_state.h"
+#include "iris_vpu_common.h"
+
+void iris_core_deinit(struct iris_core *core)
+{
+ pm_runtime_resume_and_get(core->dev);
+
+ mutex_lock(&core->lock);
+ iris_fw_unload(core);
+ iris_vpu_power_off(core);
+ iris_hfi_queues_deinit(core);
+ core->state = IRIS_CORE_DEINIT;
+ mutex_unlock(&core->lock);
+
+ pm_runtime_put_sync(core->dev);
+}
+
+static int iris_wait_for_system_response(struct iris_core *core)
+{
+ u32 hw_response_timeout_val = core->iris_platform_data->hw_response_timeout;
+ int ret;
+
+ if (core->state == IRIS_CORE_ERROR)
+ return -EIO;
+
+ ret = wait_for_completion_timeout(&core->core_init_done,
+ msecs_to_jiffies(hw_response_timeout_val));
+ if (!ret) {
+ core->state = IRIS_CORE_ERROR;
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int iris_core_init(struct iris_core *core)
+{
+ int ret;
+
+ mutex_lock(&core->lock);
+ if (core->state == IRIS_CORE_INIT) {
+ ret = 0;
+ goto exit;
+ } else if (core->state == IRIS_CORE_ERROR) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ core->state = IRIS_CORE_INIT;
+
+ ret = iris_hfi_queues_init(core);
+ if (ret)
+ goto error;
+
+ ret = iris_vpu_power_on(core);
+ if (ret)
+ goto error_queue_deinit;
+
+ ret = iris_fw_load(core);
+ if (ret)
+ goto error_power_off;
+
+ ret = iris_vpu_boot_firmware(core);
+ if (ret)
+ goto error_unload_fw;
+
+ ret = iris_hfi_core_init(core);
+ if (ret)
+ goto error_unload_fw;
+
+ mutex_unlock(&core->lock);
+
+ return iris_wait_for_system_response(core);
+
+error_unload_fw:
+ iris_fw_unload(core);
+error_power_off:
+ iris_vpu_power_off(core);
+error_queue_deinit:
+ iris_hfi_queues_deinit(core);
+error:
+ core->state = IRIS_CORE_DEINIT;
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
new file mode 100644
index 000000000000..37fb4919fecc
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_CORE_H__
+#define __IRIS_CORE_H__
+
+#include <linux/types.h>
+#include <linux/pm_domain.h>
+#include <media/v4l2-device.h>
+
+#include "iris_hfi_common.h"
+#include "iris_hfi_queue.h"
+#include "iris_platform_common.h"
+#include "iris_resources.h"
+#include "iris_state.h"
+
+struct icc_info {
+ const char *name;
+ u32 bw_min_kbps;
+ u32 bw_max_kbps;
+};
+
+#define IRIS_FW_VERSION_LENGTH 128
+#define IFACEQ_CORE_PKT_SIZE (1024 * 4)
+
+/**
+ * struct iris_core - holds core parameters valid for all instances
+ *
+ * @dev: reference to device structure
+ * @reg_base: IO memory base address
+ * @irq: iris irq
+ * @v4l2_dev: a holder for v4l2 device structure
+ * @vdev_dec: iris video device structure for decoder
+ * @iris_v4l2_file_ops: iris v4l2 file ops
+ * @iris_v4l2_ioctl_ops: iris v4l2 ioctl ops
+ * @iris_vb2_ops: iris vb2 ops
+ * @icc_tbl: table of iris interconnects
+ * @icc_count: count of iris interconnects
+ * @pmdomain_tbl: table of iris power domains
+ * @opp_pmdomain_tbl: table of opp power domains
+ * @clock_tbl: table of iris clocks
+ * @clk_count: count of iris clocks
+ * @resets: table of iris reset clocks
+ * @iris_platform_data: a structure for platform data
+ * @state: current state of core
+ * @iface_q_table_daddr: device address for interface queue table memory
+ * @sfr_daddr: device address for SFR (Sub System Failure Reason) register memory
+ * @iface_q_table_vaddr: virtual address for interface queue table memory
+ * @sfr_vaddr: virtual address for SFR (Sub System Failure Reason) register memory
+ * @command_queue: shared interface queue to send commands to firmware
+ * @message_queue: shared interface queue to receive responses from firmware
+ * @debug_queue: shared interface queue to receive debug info from firmware
+ * @lock: a lock for this strucure
+ * @response_packet: a pointer to response packet from fw to driver
+ * @header_id: id of packet header
+ * @packet_id: id of packet
+ * @power: a structure for clock and bw information
+ * @hfi_ops: iris hfi command ops
+ * @hfi_response_ops: iris hfi response ops
+ * @core_init_done: structure of signal completion for system response
+ * @intr_status: interrupt status
+ * @sys_error_handler: a delayed work for handling system fatal error
+ * @instances: a list_head of all instances
+ * @inst_fw_caps: an array of supported instance capabilities
+ */
+
+struct iris_core {
+ struct device *dev;
+ void __iomem *reg_base;
+ int irq;
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev_dec;
+ const struct v4l2_file_operations *iris_v4l2_file_ops;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops;
+ const struct vb2_ops *iris_vb2_ops;
+ struct icc_bulk_data *icc_tbl;
+ u32 icc_count;
+ struct dev_pm_domain_list *pmdomain_tbl;
+ struct dev_pm_domain_list *opp_pmdomain_tbl;
+ struct clk_bulk_data *clock_tbl;
+ u32 clk_count;
+ struct reset_control_bulk_data *resets;
+ const struct iris_platform_data *iris_platform_data;
+ enum iris_core_state state;
+ dma_addr_t iface_q_table_daddr;
+ dma_addr_t sfr_daddr;
+ void *iface_q_table_vaddr;
+ void *sfr_vaddr;
+ struct iris_iface_q_info command_queue;
+ struct iris_iface_q_info message_queue;
+ struct iris_iface_q_info debug_queue;
+ struct mutex lock; /* lock for core related operations */
+ u8 *response_packet;
+ u32 header_id;
+ u32 packet_id;
+ struct iris_core_power power;
+ const struct iris_hfi_command_ops *hfi_ops;
+ const struct iris_hfi_response_ops *hfi_response_ops;
+ struct completion core_init_done;
+ u32 intr_status;
+ struct delayed_work sys_error_handler;
+ struct list_head instances;
+ struct platform_inst_fw_cap inst_fw_caps[INST_FW_CAP_MAX];
+};
+
+int iris_core_init(struct iris_core *core);
+void iris_core_deinit(struct iris_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.c b/drivers/media/platform/qcom/iris/iris_ctrls.c
new file mode 100644
index 000000000000..b690578256d5
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+
+static inline bool iris_valid_cap_id(enum platform_inst_fw_cap_type cap_id)
+{
+ return cap_id >= 1 && cap_id < INST_FW_CAP_MAX;
+}
+
+static enum platform_inst_fw_cap_type iris_get_cap_id(u32 id)
+{
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ return DEBLOCK;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ return PROFILE;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ return LEVEL;
+ default:
+ return INST_FW_CAP_MAX;
+ }
+}
+
+static u32 iris_get_v4l2_id(enum platform_inst_fw_cap_type cap_id)
+{
+ if (!iris_valid_cap_id(cap_id))
+ return 0;
+
+ switch (cap_id) {
+ case DEBLOCK:
+ return V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER;
+ case PROFILE:
+ return V4L2_CID_MPEG_VIDEO_H264_PROFILE;
+ case LEVEL:
+ return V4L2_CID_MPEG_VIDEO_H264_LEVEL;
+ default:
+ return 0;
+ }
+}
+
+static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct iris_inst *inst = container_of(ctrl->handler, struct iris_inst, ctrl_handler);
+ enum platform_inst_fw_cap_type cap_id;
+ struct platform_inst_fw_cap *cap;
+ struct vb2_queue *q;
+
+ cap = &inst->fw_caps[0];
+ cap_id = iris_get_cap_id(ctrl->id);
+ if (!iris_valid_cap_id(cap_id))
+ return -EINVAL;
+
+ q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ if (vb2_is_streaming(q) &&
+ (!(inst->fw_caps[cap_id].flags & CAP_FLAG_DYNAMIC_ALLOWED)))
+ return -EINVAL;
+
+ cap[cap_id].flags |= CAP_FLAG_CLIENT_SET;
+
+ inst->fw_caps[cap_id].value = ctrl->val;
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops iris_ctrl_ops = {
+ .s_ctrl = iris_vdec_op_s_ctrl,
+};
+
+int iris_ctrls_init(struct iris_inst *inst)
+{
+ struct platform_inst_fw_cap *cap = &inst->fw_caps[0];
+ u32 num_ctrls = 0, ctrl_idx = 0, idx = 0;
+ u32 v4l2_id;
+ int ret;
+
+ for (idx = 1; idx < INST_FW_CAP_MAX; idx++) {
+ if (iris_get_v4l2_id(cap[idx].cap_id))
+ num_ctrls++;
+ }
+ if (!num_ctrls)
+ return -EINVAL;
+
+ /* Adding 1 to num_ctrls to include V4L2_CID_MIN_BUFFERS_FOR_CAPTURE */
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls + 1);
+ if (ret)
+ return ret;
+
+ for (idx = 1; idx < INST_FW_CAP_MAX; idx++) {
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_id = iris_get_v4l2_id(cap[idx].cap_id);
+ if (!v4l2_id)
+ continue;
+
+ if (ctrl_idx >= num_ctrls) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (cap[idx].flags & CAP_FLAG_MENU) {
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler,
+ &iris_ctrl_ops,
+ v4l2_id,
+ cap[idx].max,
+ ~(cap[idx].step_or_mask),
+ cap[idx].value);
+ } else {
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
+ &iris_ctrl_ops,
+ v4l2_id,
+ cap[idx].min,
+ cap[idx].max,
+ cap[idx].step_or_mask,
+ cap[idx].value);
+ }
+ if (!ctrl) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ctrl_idx++;
+ }
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+
+ ret = inst->ctrl_handler.error;
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+
+ return ret;
+}
+
+void iris_session_init_caps(struct iris_core *core)
+{
+ struct platform_inst_fw_cap *caps;
+ u32 i, num_cap, cap_id;
+
+ caps = core->iris_platform_data->inst_fw_caps;
+ num_cap = core->iris_platform_data->inst_fw_caps_size;
+
+ for (i = 0; i < num_cap; i++) {
+ cap_id = caps[i].cap_id;
+ if (!iris_valid_cap_id(cap_id))
+ continue;
+
+ core->inst_fw_caps[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps[cap_id].min = caps[i].min;
+ core->inst_fw_caps[cap_id].max = caps[i].max;
+ core->inst_fw_caps[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps[cap_id].value = caps[i].value;
+ core->inst_fw_caps[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps[cap_id].hfi_id = caps[i].hfi_id;
+ }
+}
+
+static u32 iris_get_port_info(struct iris_inst *inst,
+ enum platform_inst_fw_cap_type cap_id)
+{
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_RAW;
+
+ return HFI_PORT_NONE;
+}
+
+int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_value = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_value = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 height = inp_f->fmt.pix_mp.height;
+ u32 width = inp_f->fmt.pix_mp.width;
+ u32 work_mode = STAGE_2;
+
+ if (iris_res_is_less_than(width, height, 1280, 720))
+ work_mode = STAGE_1;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &work_mode, sizeof(u32));
+}
+
+int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 work_route = inst->fw_caps[PIPE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &work_route, sizeof(u32));
+}
+
+int iris_set_properties(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct platform_inst_fw_cap *cap;
+ int ret;
+ u32 i;
+
+ ret = hfi_ops->session_set_config_params(inst, plane);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < INST_FW_CAP_MAX; i++) {
+ cap = &inst->fw_caps[i];
+ if (!iris_valid_cap_id(cap->cap_id))
+ continue;
+
+ if (cap->cap_id && cap->set)
+ cap->set(inst, i);
+ }
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.h b/drivers/media/platform/qcom/iris/iris_ctrls.h
new file mode 100644
index 000000000000..9b5741868933
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_CTRLS_H__
+#define __IRIS_CTRLS_H__
+
+#include "iris_platform_common.h"
+
+struct iris_core;
+struct iris_inst;
+
+int iris_ctrls_init(struct iris_inst *inst);
+void iris_session_init_caps(struct iris_core *core);
+int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_properties(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
new file mode 100644
index 000000000000..7c493b4a75db
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/firmware.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "iris_core.h"
+#include "iris_firmware.h"
+
+#define MAX_FIRMWARE_NAME_SIZE 128
+
+static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
+{
+ u32 pas_id = core->iris_platform_data->pas_id;
+ const struct firmware *firmware = NULL;
+ struct device *dev = core->dev;
+ struct reserved_mem *rmem;
+ struct device_node *node;
+ phys_addr_t mem_phys;
+ size_t res_size;
+ ssize_t fw_size;
+ void *mem_virt;
+ int ret;
+
+ if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4)
+ return -EINVAL;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem)
+ return -EINVAL;
+
+ mem_phys = rmem->base;
+ res_size = rmem->size;
+
+ ret = request_firmware(&firmware, fw_name, dev);
+ if (ret)
+ return ret;
+
+ fw_size = qcom_mdt_get_size(firmware);
+ if (fw_size < 0 || res_size < (size_t)fw_size) {
+ ret = -EINVAL;
+ goto err_release_fw;
+ }
+
+ mem_virt = memremap(mem_phys, res_size, MEMREMAP_WC);
+ if (!mem_virt)
+ goto err_release_fw;
+
+ ret = qcom_mdt_load(dev, firmware, fw_name,
+ pas_id, mem_virt, mem_phys, res_size, NULL);
+ if (ret)
+ goto err_mem_unmap;
+
+ ret = qcom_scm_pas_auth_and_reset(pas_id);
+ if (ret)
+ goto err_mem_unmap;
+
+ return ret;
+
+err_mem_unmap:
+ memunmap(mem_virt);
+err_release_fw:
+ release_firmware(firmware);
+
+ return ret;
+}
+
+int iris_fw_load(struct iris_core *core)
+{
+ struct tz_cp_config *cp_config = core->iris_platform_data->tz_cp_config_data;
+ const char *fwpath = NULL;
+ int ret;
+
+ ret = of_property_read_string_index(core->dev->of_node, "firmware-name", 0,
+ &fwpath);
+ if (ret)
+ fwpath = core->iris_platform_data->fwname;
+
+ ret = iris_load_fw_to_memory(core, fwpath);
+ if (ret) {
+ dev_err(core->dev, "firmware download failed\n");
+ return -ENOMEM;
+ }
+
+ ret = qcom_scm_mem_protect_video_var(cp_config->cp_start,
+ cp_config->cp_size,
+ cp_config->cp_nonpixel_start,
+ cp_config->cp_nonpixel_size);
+ if (ret) {
+ dev_err(core->dev, "protect memory failed\n");
+ qcom_scm_pas_shutdown(core->iris_platform_data->pas_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+int iris_fw_unload(struct iris_core *core)
+{
+ return qcom_scm_pas_shutdown(core->iris_platform_data->pas_id);
+}
+
+int iris_set_hw_state(struct iris_core *core, bool resume)
+{
+ return qcom_scm_set_remote_state(resume, 0);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.h b/drivers/media/platform/qcom/iris/iris_firmware.h
new file mode 100644
index 000000000000..e833ecd34887
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_firmware.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_FIRMWARE_H__
+#define __IRIS_FIRMWARE_H__
+
+struct iris_core;
+
+int iris_fw_load(struct iris_core *core);
+int iris_fw_unload(struct iris_core *core);
+int iris_set_hw_state(struct iris_core *core, bool resume);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.c b/drivers/media/platform/qcom/iris/iris_hfi_common.c
new file mode 100644
index 000000000000..92112eb16c11
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_firmware.h"
+#include "iris_core.h"
+#include "iris_hfi_common.h"
+#include "iris_vpu_common.h"
+
+u32 iris_hfi_get_v4l2_color_primaries(u32 hfi_primaries)
+{
+ switch (hfi_primaries) {
+ case HFI_PRIMARIES_RESERVED:
+ return V4L2_COLORSPACE_DEFAULT;
+ case HFI_PRIMARIES_BT709:
+ return V4L2_COLORSPACE_REC709;
+ case HFI_PRIMARIES_BT470_SYSTEM_M:
+ return V4L2_COLORSPACE_470_SYSTEM_M;
+ case HFI_PRIMARIES_BT470_SYSTEM_BG:
+ return V4L2_COLORSPACE_470_SYSTEM_BG;
+ case HFI_PRIMARIES_BT601_525:
+ return V4L2_COLORSPACE_SMPTE170M;
+ case HFI_PRIMARIES_SMPTE_ST240M:
+ return V4L2_COLORSPACE_SMPTE240M;
+ case HFI_PRIMARIES_BT2020:
+ return V4L2_COLORSPACE_BT2020;
+ case V4L2_COLORSPACE_DCI_P3:
+ return HFI_PRIMARIES_SMPTE_RP431_2;
+ default:
+ return V4L2_COLORSPACE_DEFAULT;
+ }
+}
+
+u32 iris_hfi_get_v4l2_transfer_char(u32 hfi_characterstics)
+{
+ switch (hfi_characterstics) {
+ case HFI_TRANSFER_RESERVED:
+ return V4L2_XFER_FUNC_DEFAULT;
+ case HFI_TRANSFER_BT709:
+ return V4L2_XFER_FUNC_709;
+ case HFI_TRANSFER_SMPTE_ST240M:
+ return V4L2_XFER_FUNC_SMPTE240M;
+ case HFI_TRANSFER_SRGB_SYCC:
+ return V4L2_XFER_FUNC_SRGB;
+ case HFI_TRANSFER_SMPTE_ST2084_PQ:
+ return V4L2_XFER_FUNC_SMPTE2084;
+ default:
+ return V4L2_XFER_FUNC_DEFAULT;
+ }
+}
+
+u32 iris_hfi_get_v4l2_matrix_coefficients(u32 hfi_coefficients)
+{
+ switch (hfi_coefficients) {
+ case HFI_MATRIX_COEFF_RESERVED:
+ return V4L2_YCBCR_ENC_DEFAULT;
+ case HFI_MATRIX_COEFF_BT709:
+ return V4L2_YCBCR_ENC_709;
+ case HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
+ return V4L2_YCBCR_ENC_XV601;
+ case HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
+ return V4L2_YCBCR_ENC_601;
+ case HFI_MATRIX_COEFF_SMPTE_ST240:
+ return V4L2_YCBCR_ENC_SMPTE240M;
+ case HFI_MATRIX_COEFF_BT2020_NON_CONSTANT:
+ return V4L2_YCBCR_ENC_BT2020;
+ case HFI_MATRIX_COEFF_BT2020_CONSTANT:
+ return V4L2_YCBCR_ENC_BT2020_CONST_LUM;
+ default:
+ return V4L2_YCBCR_ENC_DEFAULT;
+ }
+}
+
+int iris_hfi_core_init(struct iris_core *core)
+{
+ const struct iris_hfi_command_ops *hfi_ops = core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->sys_init(core);
+ if (ret)
+ return ret;
+
+ ret = hfi_ops->sys_image_version(core);
+ if (ret)
+ return ret;
+
+ return hfi_ops->sys_interframe_powercollapse(core);
+}
+
+irqreturn_t iris_hfi_isr(int irq, void *data)
+{
+ disable_irq_nosync(irq);
+
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iris_hfi_isr_handler(int irq, void *data)
+{
+ struct iris_core *core = data;
+
+ if (!core)
+ return IRQ_NONE;
+
+ mutex_lock(&core->lock);
+ pm_runtime_mark_last_busy(core->dev);
+ iris_vpu_clear_interrupt(core);
+ mutex_unlock(&core->lock);
+
+ core->hfi_response_ops->hfi_response_handler(core);
+
+ if (!iris_vpu_watchdog(core, core->intr_status))
+ enable_irq(irq);
+
+ return IRQ_HANDLED;
+}
+
+int iris_hfi_pm_suspend(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_vpu_prepare_pc(core);
+ if (ret) {
+ pm_runtime_mark_last_busy(core->dev);
+ ret = -EAGAIN;
+ goto error;
+ }
+
+ ret = iris_set_hw_state(core, false);
+ if (ret)
+ goto error;
+
+ iris_vpu_power_off(core);
+
+ return 0;
+
+error:
+ dev_err(core->dev, "failed to suspend\n");
+
+ return ret;
+}
+
+int iris_hfi_pm_resume(struct iris_core *core)
+{
+ const struct iris_hfi_command_ops *ops = core->hfi_ops;
+ int ret;
+
+ ret = iris_vpu_power_on(core);
+ if (ret)
+ goto error;
+
+ ret = iris_set_hw_state(core, true);
+ if (ret)
+ goto err_power_off;
+
+ ret = iris_vpu_boot_firmware(core);
+ if (ret)
+ goto err_suspend_hw;
+
+ ret = ops->sys_interframe_powercollapse(core);
+ if (ret)
+ goto err_suspend_hw;
+
+ return 0;
+
+err_suspend_hw:
+ iris_set_hw_state(core, false);
+err_power_off:
+ iris_vpu_power_off(core);
+error:
+ dev_err(core->dev, "failed to resume\n");
+
+ return -EBUSY;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.h b/drivers/media/platform/qcom/iris/iris_hfi_common.h
new file mode 100644
index 000000000000..b2c541367fc6
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_COMMON_H__
+#define __IRIS_HFI_COMMON_H__
+
+#include <linux/types.h>
+#include <media/v4l2-device.h>
+
+#include "iris_buffer.h"
+
+struct iris_inst;
+struct iris_core;
+
+enum hfi_packet_port_type {
+ HFI_PORT_NONE = 0x00000000,
+ HFI_PORT_BITSTREAM = 0x00000001,
+ HFI_PORT_RAW = 0x00000002,
+};
+
+enum hfi_packet_payload_info {
+ HFI_PAYLOAD_NONE = 0x00000000,
+ HFI_PAYLOAD_U32 = 0x00000001,
+ HFI_PAYLOAD_S32 = 0x00000002,
+ HFI_PAYLOAD_U64 = 0x00000003,
+ HFI_PAYLOAD_S64 = 0x00000004,
+ HFI_PAYLOAD_STRUCTURE = 0x00000005,
+ HFI_PAYLOAD_BLOB = 0x00000006,
+ HFI_PAYLOAD_STRING = 0x00000007,
+ HFI_PAYLOAD_Q16 = 0x00000008,
+ HFI_PAYLOAD_U32_ENUM = 0x00000009,
+ HFI_PAYLOAD_32_PACKED = 0x0000000a,
+ HFI_PAYLOAD_U32_ARRAY = 0x0000000b,
+ HFI_PAYLOAD_S32_ARRAY = 0x0000000c,
+ HFI_PAYLOAD_64_PACKED = 0x0000000d,
+};
+
+enum hfi_packet_host_flags {
+ HFI_HOST_FLAGS_NONE = 0x00000000,
+ HFI_HOST_FLAGS_INTR_REQUIRED = 0x00000001,
+ HFI_HOST_FLAGS_RESPONSE_REQUIRED = 0x00000002,
+ HFI_HOST_FLAGS_NON_DISCARDABLE = 0x00000004,
+ HFI_HOST_FLAGS_GET_PROPERTY = 0x00000008,
+};
+
+enum hfi_color_primaries {
+ HFI_PRIMARIES_RESERVED = 0,
+ HFI_PRIMARIES_BT709 = 1,
+ HFI_PRIMARIES_UNSPECIFIED = 2,
+ HFI_PRIMARIES_BT470_SYSTEM_M = 4,
+ HFI_PRIMARIES_BT470_SYSTEM_BG = 5,
+ HFI_PRIMARIES_BT601_525 = 6,
+ HFI_PRIMARIES_SMPTE_ST240M = 7,
+ HFI_PRIMARIES_GENERIC_FILM = 8,
+ HFI_PRIMARIES_BT2020 = 9,
+ HFI_PRIMARIES_SMPTE_ST428_1 = 10,
+ HFI_PRIMARIES_SMPTE_RP431_2 = 11,
+ HFI_PRIMARIES_SMPTE_EG431_1 = 12,
+ HFI_PRIMARIES_SMPTE_EBU_TECH = 22,
+};
+
+enum hfi_transfer_characteristics {
+ HFI_TRANSFER_RESERVED = 0,
+ HFI_TRANSFER_BT709 = 1,
+ HFI_TRANSFER_UNSPECIFIED = 2,
+ HFI_TRANSFER_BT470_SYSTEM_M = 4,
+ HFI_TRANSFER_BT470_SYSTEM_BG = 5,
+ HFI_TRANSFER_BT601_525_OR_625 = 6,
+ HFI_TRANSFER_SMPTE_ST240M = 7,
+ HFI_TRANSFER_LINEAR = 8,
+ HFI_TRANSFER_LOG_100_1 = 9,
+ HFI_TRANSFER_LOG_SQRT = 10,
+ HFI_TRANSFER_XVYCC = 11,
+ HFI_TRANSFER_BT1361_0 = 12,
+ HFI_TRANSFER_SRGB_SYCC = 13,
+ HFI_TRANSFER_BT2020_14 = 14,
+ HFI_TRANSFER_BT2020_15 = 15,
+ HFI_TRANSFER_SMPTE_ST2084_PQ = 16,
+ HFI_TRANSFER_SMPTE_ST428_1 = 17,
+ HFI_TRANSFER_BT2100_2_HLG = 18,
+};
+
+enum hfi_matrix_coefficients {
+ HFI_MATRIX_COEFF_SRGB_SMPTE_ST428_1 = 0,
+ HFI_MATRIX_COEFF_BT709 = 1,
+ HFI_MATRIX_COEFF_UNSPECIFIED = 2,
+ HFI_MATRIX_COEFF_RESERVED = 3,
+ HFI_MATRIX_COEFF_FCC_TITLE_47 = 4,
+ HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625 = 5,
+ HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625 = 6,
+ HFI_MATRIX_COEFF_SMPTE_ST240 = 7,
+ HFI_MATRIX_COEFF_YCGCO = 8,
+ HFI_MATRIX_COEFF_BT2020_NON_CONSTANT = 9,
+ HFI_MATRIX_COEFF_BT2020_CONSTANT = 10,
+ HFI_MATRIX_COEFF_SMPTE_ST2085 = 11,
+ HFI_MATRIX_COEFF_SMPTE_CHROM_DERV_NON_CONSTANT = 12,
+ HFI_MATRIX_COEFF_SMPTE_CHROM_DERV_CONSTANT = 13,
+ HFI_MATRIX_COEFF_BT2100 = 14,
+};
+
+struct iris_hfi_prop_type_handle {
+ u32 type;
+ int (*handle)(struct iris_inst *inst);
+};
+
+struct iris_hfi_command_ops {
+ int (*sys_init)(struct iris_core *core);
+ int (*sys_image_version)(struct iris_core *core);
+ int (*sys_interframe_powercollapse)(struct iris_core *core);
+ int (*sys_pc_prep)(struct iris_core *core);
+ int (*session_set_config_params)(struct iris_inst *inst, u32 plane);
+ int (*session_set_property)(struct iris_inst *inst,
+ u32 packet_type, u32 flag, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size);
+ int (*session_open)(struct iris_inst *inst);
+ int (*session_start)(struct iris_inst *inst, u32 plane);
+ int (*session_queue_buf)(struct iris_inst *inst, struct iris_buffer *buffer);
+ int (*session_release_buf)(struct iris_inst *inst, struct iris_buffer *buffer);
+ int (*session_pause)(struct iris_inst *inst, u32 plane);
+ int (*session_resume_drc)(struct iris_inst *inst, u32 plane);
+ int (*session_stop)(struct iris_inst *inst, u32 plane);
+ int (*session_drain)(struct iris_inst *inst, u32 plane);
+ int (*session_resume_drain)(struct iris_inst *inst, u32 plane);
+ int (*session_close)(struct iris_inst *inst);
+};
+
+struct iris_hfi_response_ops {
+ void (*hfi_response_handler)(struct iris_core *core);
+};
+
+struct hfi_subscription_params {
+ u32 bitstream_resolution;
+ u32 crop_offsets[2];
+ u32 bit_depth;
+ u32 coded_frames;
+ u32 fw_min_count;
+ u32 pic_order_cnt;
+ u32 color_info;
+ u32 profile;
+ u32 level;
+};
+
+u32 iris_hfi_get_v4l2_color_primaries(u32 hfi_primaries);
+u32 iris_hfi_get_v4l2_transfer_char(u32 hfi_characterstics);
+u32 iris_hfi_get_v4l2_matrix_coefficients(u32 hfi_coefficients);
+int iris_hfi_core_init(struct iris_core *core);
+int iris_hfi_pm_suspend(struct iris_core *core);
+int iris_hfi_pm_resume(struct iris_core *core);
+
+irqreturn_t iris_hfi_isr(int irq, void *data);
+irqreturn_t iris_hfi_isr_handler(int irq, void *data);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1.h
new file mode 100644
index 000000000000..19b8e9054a75
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN1_H__
+#define __IRIS_HFI_GEN1_H__
+
+struct iris_core;
+struct iris_inst;
+
+void iris_hfi_gen1_command_ops_init(struct iris_core *core);
+void iris_hfi_gen1_response_ops_init(struct iris_core *core);
+struct iris_inst *iris_hfi_gen1_get_instance(void);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
new file mode 100644
index 000000000000..64f887d9a17d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
@@ -0,0 +1,826 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_instance.h"
+#include "iris_vpu_buffer.h"
+
+static u32 iris_hfi_gen1_buf_type_from_driver(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return HFI_BUFFER_INPUT;
+ case BUF_OUTPUT:
+ return HFI_BUFFER_OUTPUT;
+ case BUF_PERSIST:
+ return HFI_BUFFER_INTERNAL_PERSIST_1;
+ case BUF_BIN:
+ return HFI_BUFFER_INTERNAL_SCRATCH;
+ case BUF_SCRATCH_1:
+ return HFI_BUFFER_INTERNAL_SCRATCH_1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iris_hfi_gen1_sys_init(struct iris_core *core)
+{
+ struct hfi_sys_init_pkt sys_init_pkt;
+
+ sys_init_pkt.hdr.size = sizeof(sys_init_pkt);
+ sys_init_pkt.hdr.pkt_type = HFI_CMD_SYS_INIT;
+ sys_init_pkt.arch_type = HFI_VIDEO_ARCH_OX;
+
+ return iris_hfi_queue_cmd_write_locked(core, &sys_init_pkt, sys_init_pkt.hdr.size);
+}
+
+static int iris_hfi_gen1_sys_image_version(struct iris_core *core)
+{
+ struct hfi_sys_get_property_pkt packet;
+
+ packet.hdr.size = sizeof(packet);
+ packet.hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
+ packet.num_properties = 1;
+ packet.data = HFI_PROPERTY_SYS_IMAGE_VERSION;
+
+ return iris_hfi_queue_cmd_write_locked(core, &packet, packet.hdr.size);
+}
+
+static int iris_hfi_gen1_sys_interframe_powercollapse(struct iris_core *core)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ struct hfi_enable *hfi;
+ u32 packet_size;
+ int ret;
+
+ packet_size = struct_size(pkt, data, 1) + sizeof(*hfi);
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = packet_size;
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+ hfi->enable = true;
+
+ ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt->hdr.size);
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_sys_pc_prep(struct iris_core *core)
+{
+ struct hfi_sys_pc_prep_pkt pkt;
+
+ pkt.hdr.size = sizeof(struct hfi_sys_pc_prep_pkt);
+ pkt.hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
+
+ return iris_hfi_queue_cmd_write_locked(core, &pkt, pkt.hdr.size);
+}
+
+static int iris_hfi_gen1_session_open(struct iris_inst *inst)
+{
+ struct hfi_session_open_pkt packet;
+ int ret;
+
+ if (inst->state != IRIS_INST_DEINIT)
+ return -EALREADY;
+
+ packet.shdr.hdr.size = sizeof(struct hfi_session_open_pkt);
+ packet.shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
+ packet.shdr.session_id = inst->session_id;
+ packet.session_domain = HFI_SESSION_TYPE_DEC;
+ packet.session_codec = HFI_VIDEO_CODEC_H264;
+
+ reinit_completion(&inst->completion);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ return iris_wait_for_session_response(inst, false);
+}
+
+static void iris_hfi_gen1_packet_session_cmd(struct iris_inst *inst,
+ struct hfi_session_pkt *packet,
+ u32 ptype)
+{
+ packet->shdr.hdr.size = sizeof(*packet);
+ packet->shdr.hdr.pkt_type = ptype;
+ packet->shdr.session_id = inst->session_id;
+}
+
+static int iris_hfi_gen1_session_close(struct iris_inst *inst)
+{
+ struct hfi_session_pkt packet;
+
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SYS_SESSION_END);
+
+ return iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_session_start(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ struct hfi_session_pkt packet;
+ int ret;
+
+ if (!V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ if (inst->sub_state & IRIS_INST_SUB_LOAD_RESOURCES)
+ return 0;
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_LOAD_RESOURCES);
+
+ ret = iris_hfi_queue_cmd_write(core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ ret = iris_wait_for_session_response(inst, false);
+ if (ret)
+ return ret;
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_START);
+
+ ret = iris_hfi_queue_cmd_write(core, &packet, packet.shdr.hdr.size);
+ if (ret)
+ return ret;
+
+ ret = iris_wait_for_session_response(inst, false);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_LOAD_RESOURCES);
+}
+
+static int iris_hfi_gen1_session_stop(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_flush_pkt flush_pkt;
+ struct iris_core *core = inst->core;
+ struct hfi_session_pkt pkt;
+ u32 flush_type = 0;
+ int ret = 0;
+
+ if ((V4L2_TYPE_IS_OUTPUT(plane) &&
+ inst->state == IRIS_INST_INPUT_STREAMING) ||
+ (V4L2_TYPE_IS_CAPTURE(plane) &&
+ inst->state == IRIS_INST_OUTPUT_STREAMING) ||
+ inst->state == IRIS_INST_ERROR) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ } else if (inst->state == IRIS_INST_STREAMING) {
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ flush_type = HFI_FLUSH_ALL;
+ else if (V4L2_TYPE_IS_CAPTURE(plane))
+ flush_type = HFI_FLUSH_OUTPUT;
+
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = flush_type;
+
+ ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, true);
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_continue(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_pkt packet;
+
+ iris_hfi_gen1_packet_session_cmd(inst, &packet, HFI_CMD_SESSION_CONTINUE);
+
+ return iris_hfi_queue_cmd_write(inst->core, &packet, packet.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_input_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt;
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ ip_pkt.flags = buf->flags;
+ ip_pkt.mark_target = 0;
+ ip_pkt.mark_data = 0;
+ ip_pkt.offset = buf->data_offset;
+ ip_pkt.alloc_len = buf->buffer_size;
+ ip_pkt.filled_len = buf->data_size;
+ ip_pkt.input_tag = buf->index;
+ ip_pkt.packet_buffer = buf->device_addr;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_output_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_fill_buffer_pkt op_pkt;
+
+ op_pkt.shdr.hdr.size = sizeof(struct hfi_session_fill_buffer_pkt);
+ op_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
+ op_pkt.shdr.session_id = inst->session_id;
+ op_pkt.output_tag = buf->index;
+ op_pkt.packet_buffer = buf->device_addr;
+ op_pkt.extradata_buffer = 0;
+ op_pkt.alloc_len = buf->buffer_size;
+ op_pkt.filled_len = buf->data_size;
+ op_pkt.offset = buf->data_offset;
+ op_pkt.data = 0;
+
+ if (buf->type == BUF_OUTPUT && iris_split_mode_enabled(inst))
+ op_pkt.stream_id = 1;
+ else
+ op_pkt.stream_id = 0;
+
+ return iris_hfi_queue_cmd_write(inst->core, &op_pkt, op_pkt.shdr.hdr.size);
+}
+
+static int iris_hfi_gen1_queue_internal_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_set_buffers_pkt *int_pkt;
+ u32 buffer_type, i;
+ u32 packet_size;
+ int ret;
+
+ packet_size = struct_size(int_pkt, buffer_info, 1);
+ int_pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!int_pkt)
+ return -ENOMEM;
+
+ int_pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
+ int_pkt->shdr.session_id = inst->session_id;
+ int_pkt->buffer_size = buf->buffer_size;
+ int_pkt->min_buffer_size = buf->buffer_size;
+ int_pkt->num_buffers = 1;
+ int_pkt->extradata_size = 0;
+ int_pkt->shdr.hdr.size = packet_size;
+ for (i = 0; i < int_pkt->num_buffers; i++)
+ int_pkt->buffer_info[i] = buf->device_addr;
+ buffer_type = iris_hfi_gen1_buf_type_from_driver(buf->type);
+ if (buffer_type == -EINVAL) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ int_pkt->buffer_type = buffer_type;
+ ret = iris_hfi_queue_cmd_write(inst->core, int_pkt, int_pkt->shdr.hdr.size);
+
+exit:
+ kfree(int_pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ switch (buf->type) {
+ case BUF_INPUT:
+ return iris_hfi_gen1_queue_input_buffer(inst, buf);
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return iris_hfi_gen1_queue_output_buffer(inst, buf);
+ case BUF_PERSIST:
+ case BUF_BIN:
+ case BUF_SCRATCH_1:
+ return iris_hfi_gen1_queue_internal_buffer(inst, buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iris_hfi_gen1_session_unset_buffers(struct iris_inst *inst, struct iris_buffer *buf)
+{
+ struct hfi_session_release_buffer_pkt *pkt;
+ u32 packet_size, buffer_type, i;
+ int ret;
+
+ buffer_type = iris_hfi_gen1_buf_type_from_driver(buf->type);
+ if (buffer_type == -EINVAL)
+ return -EINVAL;
+
+ if (buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ packet_size = sizeof(*pkt) + sizeof(struct hfi_buffer_info);
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+ pkt->shdr.session_id = inst->session_id;
+ pkt->buffer_size = buf->buffer_size;
+ pkt->num_buffers = 1;
+
+ if (buffer_type == HFI_BUFFER_OUTPUT ||
+ buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = buf->device_addr;
+ bi->extradata_addr = 0;
+ }
+ pkt->shdr.hdr.size = packet_size;
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = buf->device_addr;
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) +
+ ((pkt->num_buffers) * sizeof(u32));
+ }
+
+ pkt->response_req = true;
+ pkt->buffer_type = buffer_type;
+
+ ret = iris_hfi_queue_cmd_write(inst->core, pkt, pkt->shdr.hdr.size);
+ if (ret)
+ goto exit;
+
+ ret = iris_wait_for_session_response(inst, false);
+
+exit:
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_drain(struct iris_inst *inst, u32 plane)
+{
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+}
+
+static int
+iris_hfi_gen1_packet_session_set_property(struct hfi_session_set_property_pkt *packet,
+ struct iris_inst *inst, u32 ptype, void *pdata)
+{
+ void *prop_data = &packet->data[1];
+
+ packet->shdr.hdr.size = sizeof(*packet);
+ packet->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ packet->shdr.session_id = inst->session_id;
+ packet->num_properties = 1;
+ packet->data[0] = ptype;
+
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE: {
+ struct hfi_framesize *in = pdata, *fsize = prop_data;
+
+ fsize->buffer_type = in->buffer_type;
+ fsize->height = in->height;
+ fsize->width = in->width;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
+ struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
+
+ cu->video_core_enable_mask = in->video_core_enable_mask;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: {
+ struct hfi_uncompressed_format_select *in = pdata;
+ struct hfi_uncompressed_format_select *hfi = prop_data;
+
+ hfi->buffer_type = in->buffer_type;
+ hfi->format = in->format;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: {
+ struct hfi_uncompressed_plane_actual_constraints_info *info = prop_data;
+
+ info->buffer_type = HFI_BUFFER_OUTPUT2;
+ info->num_planes = 2;
+ info->plane_format[0].stride_multiples = 128;
+ info->plane_format[0].max_stride = 8192;
+ info->plane_format[0].min_plane_buffer_height_multiple = 32;
+ info->plane_format[0].buffer_alignment = 256;
+ if (info->num_planes > 1) {
+ info->plane_format[1].stride_multiples = 128;
+ info->plane_format[1].max_stride = 8192;
+ info->plane_format[1].min_plane_buffer_height_multiple = 16;
+ info->plane_format[1].buffer_alignment = 256;
+ }
+
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*info);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata;
+ struct hfi_buffer_count_actual *count = prop_data;
+
+ count->type = in->type;
+ count->count_actual = in->count_actual;
+ count->count_min_host = in->count_min_host;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata;
+ struct hfi_multi_stream *multi = prop_data;
+
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: {
+ struct hfi_buffer_size_actual *in = pdata, *sz = prop_data;
+
+ sz->size = in->size;
+ sz->type = in->type;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_ROUTE: {
+ struct hfi_video_work_route *wr = prop_data;
+ u32 *in = pdata;
+
+ wr->video_work_route = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_WORK_MODE: {
+ struct hfi_video_work_mode *wm = prop_data;
+ u32 *in = pdata;
+
+ wm->video_work_mode = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
+ struct hfi_enable *en = prop_data;
+ u32 *in = pdata;
+
+ en->enable = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hfi_gen1_set_property(struct iris_inst *inst, u32 packet_type,
+ void *payload, u32 payload_size)
+{
+ struct hfi_session_set_property_pkt *pkt;
+ u32 packet_size;
+ int ret;
+
+ packet_size = sizeof(*pkt) + sizeof(u32) + payload_size;
+ pkt = kzalloc(packet_size, GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ret = iris_hfi_gen1_packet_session_set_property(pkt, inst, packet_type, payload);
+ if (ret == -EOPNOTSUPP) {
+ ret = 0;
+ goto exit;
+ }
+ if (ret)
+ goto exit;
+
+ ret = iris_hfi_queue_cmd_write(inst->core, pkt, pkt->shdr.hdr.size);
+
+exit:
+ kfree(pkt);
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_set_property(struct iris_inst *inst, u32 packet_type,
+ u32 flag, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size)
+{
+ return hfi_gen1_set_property(inst, packet_type, payload, payload_size);
+}
+
+static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+ int ret;
+
+ fs.buffer_type = HFI_BUFFER_INPUT;
+ fs.width = inst->fmt_src->fmt.pix_mp.width;
+ fs.height = inst->fmt_src->fmt.pix_mp.height;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
+ if (ret)
+ return ret;
+
+ fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ fs.width = inst->fmt_dst->fmt.pix_mp.width;
+ fs.height = inst->fmt_dst->fmt.pix_mp.height;
+
+ return hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
+}
+
+static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
+ struct hfi_videocores_usage_type cu;
+
+ cu.video_core_enable_mask = HFI_CORE_ID_1;
+
+ return hfi_gen1_set_property(inst, ptype, &cu, sizeof(cu));
+}
+
+static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ struct hfi_uncompressed_format_select fmt;
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12_UBWC : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ if (ret)
+ return ret;
+
+ fmt.buffer_type = HFI_BUFFER_OUTPUT2;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ } else {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
+ struct hfi_uncompressed_plane_actual_constraints_info pconstraint;
+
+ pconstraint.buffer_type = HFI_BUFFER_OUTPUT2;
+ pconstraint.num_planes = 2;
+ pconstraint.plane_format[0].stride_multiples = 128;
+ pconstraint.plane_format[0].max_stride = 8192;
+ pconstraint.plane_format[0].min_plane_buffer_height_multiple = 32;
+ pconstraint.plane_format[0].buffer_alignment = 256;
+
+ pconstraint.plane_format[1].stride_multiples = 128;
+ pconstraint.plane_format[1].max_stride = 8192;
+ pconstraint.plane_format[1].min_plane_buffer_height_multiple = 16;
+ pconstraint.plane_format[1].buffer_alignment = 256;
+
+ return hfi_gen1_set_property(inst, ptype, &pconstraint, sizeof(pconstraint));
+}
+
+static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ struct hfi_buffer_count_actual buf_count;
+ int ret;
+
+ buf_count.type = HFI_BUFFER_INPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
+
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ } else {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ struct hfi_multi_stream multi = {0};
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = 1;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ } else {
+ multi.buffer_type = HFI_BUFFER_OUTPUT;
+ multi.enable = 1;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ if (ret)
+ return ret;
+
+ multi.buffer_type = HFI_BUFFER_OUTPUT2;
+ multi.enable = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &multi, sizeof(multi));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ struct hfi_buffer_size_actual bufsz;
+ int ret;
+
+ if (iris_split_mode_enabled(inst)) {
+ bufsz.type = HFI_BUFFER_OUTPUT;
+ bufsz.size = iris_vpu_buf_size(inst, BUF_DPB);
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ if (ret)
+ return ret;
+
+ bufsz.type = HFI_BUFFER_OUTPUT2;
+ bufsz.size = inst->buffers[BUF_OUTPUT].size;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ } else {
+ bufsz.type = HFI_BUFFER_OUTPUT;
+ bufsz.size = inst->buffers[BUF_OUTPUT].size;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ if (ret)
+ return ret;
+
+ bufsz.type = HFI_BUFFER_OUTPUT2;
+ bufsz.size = 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 config_params_size, i, j;
+ const u32 *config_params;
+ int ret;
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_inp_arr[] = {
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
+ iris_hfi_gen1_decide_core},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ iris_hfi_gen1_set_format_constraints},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ {HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ iris_hfi_gen1_set_multistream},
+ {HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ iris_hfi_gen1_set_bufsize},
+ };
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_out_arr[] = {
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ iris_hfi_gen1_set_format_constraints},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ {HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ iris_hfi_gen1_set_multistream},
+ {HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ iris_hfi_gen1_set_bufsize},
+ };
+
+ config_params = core->iris_platform_data->input_config_params;
+ config_params_size = core->iris_platform_data->input_config_params_size;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_inp_arr); j++) {
+ if (prop_type_handle_inp_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_inp_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_out_arr); j++) {
+ if (prop_type_handle_out_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_out_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct iris_hfi_command_ops iris_hfi_gen1_command_ops = {
+ .sys_init = iris_hfi_gen1_sys_init,
+ .sys_image_version = iris_hfi_gen1_sys_image_version,
+ .sys_interframe_powercollapse = iris_hfi_gen1_sys_interframe_powercollapse,
+ .sys_pc_prep = iris_hfi_gen1_sys_pc_prep,
+ .session_open = iris_hfi_gen1_session_open,
+ .session_set_config_params = iris_hfi_gen1_session_set_config_params,
+ .session_set_property = iris_hfi_gen1_session_set_property,
+ .session_start = iris_hfi_gen1_session_start,
+ .session_queue_buf = iris_hfi_gen1_session_queue_buffer,
+ .session_release_buf = iris_hfi_gen1_session_unset_buffers,
+ .session_resume_drc = iris_hfi_gen1_session_continue,
+ .session_stop = iris_hfi_gen1_session_stop,
+ .session_drain = iris_hfi_gen1_session_drain,
+ .session_close = iris_hfi_gen1_session_close,
+};
+
+void iris_hfi_gen1_command_ops_init(struct iris_core *core)
+{
+ core->hfi_ops = &iris_hfi_gen1_command_ops;
+}
+
+struct iris_inst *iris_hfi_gen1_get_instance(void)
+{
+ return kzalloc(sizeof(struct iris_inst), GFP_KERNEL);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
new file mode 100644
index 000000000000..9f246816a286
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
@@ -0,0 +1,448 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN1_DEFINES_H__
+#define __IRIS_HFI_GEN1_DEFINES_H__
+
+#include <linux/types.h>
+
+#define HFI_VIDEO_ARCH_OX 0x1
+
+#define HFI_SESSION_TYPE_DEC 2
+
+#define HFI_VIDEO_CODEC_H264 0x00000002
+
+#define HFI_ERR_NONE 0x0
+
+#define HFI_CMD_SYS_INIT 0x10001
+#define HFI_CMD_SYS_PC_PREP 0x10002
+#define HFI_CMD_SYS_SET_PROPERTY 0x10005
+#define HFI_CMD_SYS_GET_PROPERTY 0x10006
+#define HFI_CMD_SYS_SESSION_INIT 0x10007
+#define HFI_CMD_SYS_SESSION_END 0x10008
+
+#define HFI_CMD_SESSION_SET_PROPERTY 0x11001
+#define HFI_CMD_SESSION_SET_BUFFERS 0x11002
+
+#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001
+#define HFI_CMD_SESSION_START 0x211002
+#define HFI_CMD_SESSION_STOP 0x211003
+#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004
+#define HFI_CMD_SESSION_FILL_BUFFER 0x211005
+#define HFI_CMD_SESSION_FLUSH 0x211008
+#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b
+#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c
+#define HFI_CMD_SESSION_CONTINUE 0x21100d
+
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013
+
+#define HFI_EVENT_SYS_ERROR 0x1
+#define HFI_EVENT_SESSION_ERROR 0x2
+
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003
+
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+
+#define HFI_FLUSH_OUTPUT 0x1000002
+#define HFI_FLUSH_OUTPUT2 0x1000003
+#define HFI_FLUSH_ALL 0x1000004
+
+#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c
+
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001
+
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001
+#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
+#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004
+
+#define HFI_BUFFER_INPUT 0x1
+#define HFI_BUFFER_OUTPUT 0x2
+#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
+#define HFI_BUFFER_INTERNAL_SCRATCH 0x6
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x7
+
+#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
+#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
+
+#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
+#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
+#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
+
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
+#define HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH 0x1003007
+#define HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT 0x1003009
+#define HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE 0x100300a
+#define HFI_CORE_ID_1 1
+#define HFI_COLOR_FORMAT_NV12 0x02
+#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002
+
+#define HFI_MSG_SYS_INIT 0x20001
+#define HFI_MSG_SYS_SESSION_INIT 0x20006
+#define HFI_MSG_SYS_SESSION_END 0x20007
+#define HFI_MSG_SYS_COV 0x20009
+#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a
+
+#define HFI_MSG_EVENT_NOTIFY 0x21001
+#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001
+#define HFI_MSG_SESSION_START 0x221002
+#define HFI_MSG_SESSION_STOP 0x221003
+#define HFI_MSG_SESSION_FLUSH 0x221006
+#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007
+#define HFI_MSG_SESSION_FILL_BUFFER 0x221008
+#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
+#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
+
+#define HFI_PICTURE_I 0x00000001
+#define HFI_PICTURE_P 0x00000002
+#define HFI_PICTURE_B 0x00000004
+#define HFI_PICTURE_IDR 0x00000008
+#define HFI_FRAME_NOTCODED 0x7f002000
+#define HFI_FRAME_YUV 0x7f004000
+#define HFI_UNUSED_PICT 0x10000000
+
+struct hfi_pkt_hdr {
+ u32 size;
+ u32 pkt_type;
+};
+
+struct hfi_session_hdr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 session_id;
+};
+
+struct hfi_session_open_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 session_domain;
+ u32 session_codec;
+};
+
+struct hfi_session_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_sys_init_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 arch_type;
+};
+
+struct hfi_sys_set_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_sys_get_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data;
+};
+
+struct hfi_session_set_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_sys_pc_prep_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_session_set_buffers_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_session_empty_buffer_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
+struct hfi_session_fill_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 output_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
+struct hfi_session_flush_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_session_release_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 response_req;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extradata_addr;
+};
+
+struct hfi_msg_event_notify_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 ext_event_data[];
+};
+
+struct hfi_msg_sys_init_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_msg_session_hdr_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_init_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[];
+};
+
+struct hfi_msg_sys_property_info_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 property;
+ u8 data[];
+};
+
+struct hfi_msg_session_flush_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_enable {
+ u32 enable;
+};
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+struct hfi_framesize {
+ u32 buffer_type;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_videocores_usage_type {
+ u32 video_core_enable_mask;
+};
+
+struct hfi_video_work_mode {
+ u32 video_work_mode;
+};
+
+struct hfi_video_work_route {
+ u32 video_work_route;
+};
+
+struct hfi_bit_depth {
+ u32 buffer_type;
+ u32 bit_depth;
+};
+
+struct hfi_pic_struct {
+ u32 progressive_only;
+};
+
+struct hfi_colour_space {
+ u32 colour_space;
+};
+
+struct hfi_extradata_input_crop {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_dpb_counts {
+ u32 max_dpb_count;
+ u32 max_ref_frames;
+ u32 max_dec_buffering;
+ u32 max_reorder_frames;
+ u32 fw_min_count;
+};
+
+struct hfi_uncompressed_format_select {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_format[2];
+};
+
+struct hfi_buffer_count_actual {
+ u32 type;
+ u32 count_actual;
+ u32 count_min_host;
+};
+
+struct hfi_buffer_size_actual {
+ u32 type;
+ u32 size;
+};
+
+struct hfi_multi_stream {
+ u32 buffer_type;
+ u32 enable;
+};
+
+struct hfi_buffer_requirements {
+ u32 type;
+ u32 size;
+ u32 region_size;
+ u32 hold_count;
+ u32 count_min;
+ u32 count_actual;
+ u32 contiguous;
+ u32 alignment;
+};
+
+struct hfi_event_data {
+ u32 error;
+ u32 height;
+ u32 width;
+ u32 event_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 tag;
+ u32 profile;
+ u32 level;
+ u32 bit_depth;
+ u32 pic_struct;
+ u32 colour_space;
+ u32 entropy_mode;
+ u32 buf_count;
+ struct {
+ u32 left, top;
+ u32 width, height;
+ } input_crop;
+};
+
+struct hfi_msg_session_empty_buffer_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 offset;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 view_id;
+ u32 error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_x_coord;
+ u32 start_y_coord;
+ u32 input_tag;
+ u32 input_tag2;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
+struct hfi_msg_session_release_buffers_done_pkt {
+ struct hfi_msg_session_hdr_pkt shdr;
+ u32 num_buffers;
+ u32 buffer_info[];
+};
+
+struct hfi_msg_sys_debug_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[];
+};
+
+struct hfi_msg_sys_coverage_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[];
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
new file mode 100644
index 000000000000..b72d503dd740
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_instance.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+
+static void iris_hfi_gen1_read_changed_params(struct iris_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 num_properties_changed = pkt->event_data2;
+ u8 *data_ptr = (u8 *)&pkt->ext_event_data[0];
+ u32 primaries, matrix_coeff, transfer_char;
+ struct hfi_dpb_counts *iris_vpu_dpb_count;
+ struct hfi_profile_level *profile_level;
+ struct hfi_buffer_requirements *bufreq;
+ struct hfi_extradata_input_crop *crop;
+ struct hfi_colour_space *colour_info;
+ struct iris_core *core = inst->core;
+ u32 colour_description_present_flag;
+ u32 video_signal_type_present_flag;
+ struct hfi_event_data event = {0};
+ struct hfi_bit_depth *pixel_depth;
+ struct hfi_pic_struct *pic_struct;
+ struct hfi_framesize *frame_sz;
+ struct vb2_queue *dst_q;
+ struct v4l2_ctrl *ctrl;
+ u32 full_range, ptype;
+
+ do {
+ ptype = *((u32 *)data_ptr);
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE:
+ data_ptr += sizeof(u32);
+ frame_sz = (struct hfi_framesize *)data_ptr;
+ event.width = frame_sz->width;
+ event.height = frame_sz->height;
+ data_ptr += sizeof(*frame_sz);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ data_ptr += sizeof(u32);
+ profile_level = (struct hfi_profile_level *)data_ptr;
+ event.profile = profile_level->profile;
+ event.level = profile_level->level;
+ data_ptr += sizeof(*profile_level);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
+ data_ptr += sizeof(u32);
+ pixel_depth = (struct hfi_bit_depth *)data_ptr;
+ event.bit_depth = pixel_depth->bit_depth;
+ data_ptr += sizeof(*pixel_depth);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
+ data_ptr += sizeof(u32);
+ pic_struct = (struct hfi_pic_struct *)data_ptr;
+ event.pic_struct = pic_struct->progressive_only;
+ data_ptr += sizeof(*pic_struct);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
+ data_ptr += sizeof(u32);
+ colour_info = (struct hfi_colour_space *)data_ptr;
+ event.colour_space = colour_info->colour_space;
+ data_ptr += sizeof(*colour_info);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ data_ptr += sizeof(u32);
+ event.entropy_mode = *(u32 *)data_ptr;
+ data_ptr += sizeof(u32);
+ break;
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ data_ptr += sizeof(u32);
+ bufreq = (struct hfi_buffer_requirements *)data_ptr;
+ event.buf_count = bufreq->count_min;
+ data_ptr += sizeof(*bufreq);
+ break;
+ case HFI_INDEX_EXTRADATA_INPUT_CROP:
+ data_ptr += sizeof(u32);
+ crop = (struct hfi_extradata_input_crop *)data_ptr;
+ event.input_crop.left = crop->left;
+ event.input_crop.top = crop->top;
+ event.input_crop.width = crop->width;
+ event.input_crop.height = crop->height;
+ data_ptr += sizeof(*crop);
+ break;
+ case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+ data_ptr += sizeof(u32);
+ iris_vpu_dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ event.buf_count = iris_vpu_dpb_count->fw_min_count;
+ data_ptr += sizeof(*iris_vpu_dpb_count);
+ break;
+ default:
+ break;
+ }
+ num_properties_changed--;
+ } while (num_properties_changed > 0);
+
+ pixmp_ip->width = event.width;
+ pixmp_ip->height = event.height;
+
+ pixmp_op->width = ALIGN(event.width, 128);
+ pixmp_op->height = ALIGN(event.height, 32);
+ pixmp_op->plane_fmt[0].bytesperline = ALIGN(event.width, 128);
+ pixmp_op->plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ matrix_coeff = FIELD_GET(GENMASK(7, 0), event.colour_space);
+ transfer_char = FIELD_GET(GENMASK(15, 8), event.colour_space);
+ primaries = FIELD_GET(GENMASK(23, 16), event.colour_space);
+ colour_description_present_flag = FIELD_GET(GENMASK(24, 24), event.colour_space);
+ full_range = FIELD_GET(GENMASK(25, 25), event.colour_space);
+ video_signal_type_present_flag = FIELD_GET(GENMASK(29, 29), event.colour_space);
+
+ pixmp_op->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pixmp_op->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pixmp_op->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pixmp_op->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (video_signal_type_present_flag) {
+ pixmp_op->quantization =
+ full_range ?
+ V4L2_QUANTIZATION_FULL_RANGE :
+ V4L2_QUANTIZATION_LIM_RANGE;
+ if (colour_description_present_flag) {
+ pixmp_op->colorspace =
+ iris_hfi_get_v4l2_color_primaries(primaries);
+ pixmp_op->xfer_func =
+ iris_hfi_get_v4l2_transfer_char(transfer_char);
+ pixmp_op->ycbcr_enc =
+ iris_hfi_get_v4l2_matrix_coefficients(matrix_coeff);
+ }
+ }
+
+ pixmp_ip->colorspace = pixmp_op->colorspace;
+ pixmp_ip->xfer_func = pixmp_op->xfer_func;
+ pixmp_ip->ycbcr_enc = pixmp_op->ycbcr_enc;
+ pixmp_ip->quantization = pixmp_op->quantization;
+
+ if (event.input_crop.width > 0 && event.input_crop.height > 0) {
+ inst->crop.left = event.input_crop.left;
+ inst->crop.top = event.input_crop.top;
+ inst->crop.width = event.input_crop.width;
+ inst->crop.height = event.input_crop.height;
+ } else {
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = event.width;
+ inst->crop.height = event.height;
+ }
+
+ inst->fw_min_count = event.buf_count;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = pixmp_op->plane_fmt[0].sizeimage;
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->buffers[BUF_OUTPUT].min_count);
+
+ dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ dst_q->min_reqbufs_allocation = inst->buffers[BUF_OUTPUT].min_count;
+
+ if (event.bit_depth || !event.pic_struct) {
+ dev_err(core->dev, "unsupported content, bit depth: %x, pic_struct = %x\n",
+ event.bit_depth, event.pic_struct);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+}
+
+static void iris_hfi_gen1_event_seq_changed(struct iris_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct hfi_session_flush_pkt flush_pkt;
+ u32 num_properties_changed;
+ int ret;
+
+ ret = iris_inst_sub_state_change_drc(inst);
+ if (ret)
+ return;
+
+ switch (pkt->event_data1) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ break;
+ default:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ num_properties_changed = pkt->event_data2;
+ if (!num_properties_changed) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ iris_hfi_gen1_read_changed_params(inst, pkt);
+
+ if (inst->state != IRIS_INST_ERROR) {
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
+ iris_hfi_queue_cmd_write(inst->core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ }
+
+ iris_vdec_src_change(inst);
+ iris_inst_sub_state_change_drc_last(inst);
+}
+
+static void
+iris_hfi_gen1_sys_event_notify(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+ struct iris_inst *instance;
+
+ if (pkt->event_id == HFI_EVENT_SYS_ERROR)
+ dev_err(core->dev, "sys error (type: %x, session id:%x, data1:%x, data2:%x)\n",
+ pkt->event_id, pkt->shdr.session_id, pkt->event_data1,
+ pkt->event_data2);
+
+ core->state = IRIS_CORE_ERROR;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ iris_inst_change_state(instance, IRIS_INST_ERROR);
+ mutex_unlock(&core->lock);
+
+ schedule_delayed_work(&core->sys_error_handler, msecs_to_jiffies(10));
+}
+
+static void
+iris_hfi_gen1_event_session_error(struct iris_inst *inst, struct hfi_msg_event_notify_pkt *pkt)
+{
+ switch (pkt->event_data1) {
+ /* non fatal session errors */
+ case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+ case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+ dev_dbg(inst->core->dev, "session error: event id:%x, session id:%x\n",
+ pkt->event_data1, pkt->shdr.session_id);
+ break;
+ /* fatal session errors */
+ default:
+ /*
+ * firmware fills event_data2 as an additional information about the
+ * hfi command for which session error has ouccured.
+ */
+ dev_err(inst->core->dev,
+ "session error for command: %x, event id:%x, session id:%x\n",
+ pkt->event_data2, pkt->event_data1,
+ pkt->shdr.session_id);
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ break;
+ }
+}
+
+static void iris_hfi_gen1_session_event_notify(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+
+ switch (pkt->event_id) {
+ case HFI_EVENT_SESSION_ERROR:
+ iris_hfi_gen1_event_session_error(inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+ iris_hfi_gen1_event_seq_changed(inst, pkt);
+ break;
+ default:
+ break;
+ }
+}
+
+static void iris_hfi_gen1_sys_init_done(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_sys_init_done_pkt *pkt = packet;
+
+ if (pkt->error_type != HFI_ERR_NONE) {
+ core->state = IRIS_CORE_ERROR;
+ return;
+ }
+
+ complete(&core->core_init_done);
+}
+
+static void
+iris_hfi_gen1_sys_get_prop_image_version(struct iris_core *core,
+ struct hfi_msg_sys_property_info_pkt *pkt)
+{
+ int req_bytes = pkt->hdr.size - sizeof(*pkt);
+ char fw_version[IRIS_FW_VERSION_LENGTH];
+ u8 *str_image_version;
+ u32 i;
+
+ if (req_bytes < IRIS_FW_VERSION_LENGTH - 1 || !pkt->data[0] || pkt->num_properties > 1) {
+ dev_err(core->dev, "bad packet\n");
+ return;
+ }
+
+ str_image_version = pkt->data;
+ if (!str_image_version) {
+ dev_err(core->dev, "firmware version not available\n");
+ return;
+ }
+
+ for (i = 0; i < IRIS_FW_VERSION_LENGTH - 1; i++) {
+ if (str_image_version[i] != '\0')
+ fw_version[i] = str_image_version[i];
+ else
+ fw_version[i] = ' ';
+ }
+ fw_version[i] = '\0';
+ dev_dbg(core->dev, "firmware version: %s\n", fw_version);
+}
+
+static void iris_hfi_gen1_sys_property_info(struct iris_core *core, void *packet)
+{
+ struct hfi_msg_sys_property_info_pkt *pkt = packet;
+
+ if (!pkt->num_properties) {
+ dev_dbg(core->dev, "no properties\n");
+ return;
+ }
+
+ switch (pkt->property) {
+ case HFI_PROPERTY_SYS_IMAGE_VERSION:
+ iris_hfi_gen1_sys_get_prop_image_version(core, pkt);
+ break;
+ default:
+ dev_dbg(core->dev, "unknown property data\n");
+ break;
+ }
+}
+
+static void iris_hfi_gen1_session_etb_done(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf = NULL;
+ bool found = false;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == pkt->input_tag) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto error;
+
+ if (pkt->shdr.error_type == HFI_ERR_SESSION_UNSUPPORTED_STREAM) {
+ buf->flags = V4L2_BUF_FLAG_ERROR;
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+
+ return;
+
+error:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ dev_err(inst->core->dev, "error in etb done\n");
+}
+
+static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
+{
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = packet;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct hfi_session_flush_pkt flush_pkt;
+ u32 timestamp_hi = pkt->time_stamp_hi;
+ u32 timestamp_lo = pkt->time_stamp_lo;
+ struct iris_core *core = inst->core;
+ u32 filled_len = pkt->filled_len;
+ u32 pic_type = pkt->picture_type;
+ u32 output_tag = pkt->output_tag;
+ struct iris_buffer *buf, *iter;
+ struct iris_buffers *buffers;
+ u32 hfi_flags = pkt->flags;
+ u32 offset = pkt->offset;
+ u64 timestamp_us = 0;
+ bool found = false;
+ u32 flags = 0;
+
+ if ((hfi_flags & HFI_BUFFERFLAG_EOS) && !filled_len) {
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = HFI_FLUSH_OUTPUT;
+ iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ iris_inst_sub_state_change_drain_last(inst);
+
+ return;
+ }
+
+ if (iris_split_mode_enabled(inst) && pkt->stream_id == 0) {
+ buffers = &inst->buffers[BUF_DPB];
+ if (!buffers)
+ goto error;
+
+ found = false;
+ list_for_each_entry(iter, &buffers->list, list) {
+ if (!(iter->attr & BUF_ATTR_QUEUED))
+ continue;
+
+ found = (iter->index == output_tag &&
+ iter->data_offset == offset);
+
+ if (found) {
+ buf = iter;
+ break;
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+
+ found = (buf->index == output_tag &&
+ buf->data_offset == offset);
+
+ if (found)
+ break;
+ }
+ }
+ if (!found)
+ goto error;
+
+ buf->data_offset = offset;
+ buf->data_size = filled_len;
+
+ if (filled_len) {
+ timestamp_us = timestamp_hi;
+ timestamp_us = (timestamp_us << 32) | timestamp_lo;
+ } else {
+ flags |= V4L2_BUF_FLAG_LAST;
+ }
+ buf->timestamp = timestamp_us;
+
+ switch (pic_type) {
+ case HFI_PICTURE_IDR:
+ case HFI_PICTURE_I:
+ flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HFI_PICTURE_P:
+ flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case HFI_PICTURE_B:
+ flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ case HFI_FRAME_NOTCODED:
+ case HFI_UNUSED_PICT:
+ case HFI_FRAME_YUV:
+ default:
+ break;
+ }
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+
+ buf->flags |= flags;
+
+ iris_vb2_buffer_done(inst, buf);
+
+ return;
+
+error:
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ dev_err(core->dev, "error in ftb done\n");
+}
+
+struct iris_hfi_gen1_response_pkt_info {
+ u32 pkt;
+ u32 pkt_sz;
+};
+
+static const struct iris_hfi_gen1_response_pkt_info pkt_infos[] = {
+ {
+ .pkt = HFI_MSG_EVENT_NOTIFY,
+ .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_SESSION_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SYS_SESSION_END,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_START,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_STOP,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_FILL_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_FLUSH,
+ .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_hdr_pkt),
+ },
+ {
+ .pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt),
+ },
+};
+
+static void iris_hfi_gen1_handle_response(struct iris_core *core, void *response)
+{
+ struct hfi_pkt_hdr *hdr = (struct hfi_pkt_hdr *)response;
+ const struct iris_hfi_gen1_response_pkt_info *pkt_info;
+ struct device *dev = core->dev;
+ struct hfi_session_pkt *pkt;
+ struct completion *done;
+ struct iris_inst *inst;
+ bool found = false;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(pkt_infos); i++) {
+ pkt_info = &pkt_infos[i];
+ if (pkt_info->pkt != hdr->pkt_type)
+ continue;
+ found = true;
+ break;
+ }
+
+ if (!found || hdr->size < pkt_info->pkt_sz) {
+ dev_err(dev, "bad packet size (%d should be %d, pkt type:%x, found %d)\n",
+ hdr->size, pkt_info->pkt_sz, hdr->pkt_type, found);
+
+ return;
+ }
+
+ switch (hdr->pkt_type) {
+ case HFI_MSG_SYS_INIT:
+ iris_hfi_gen1_sys_init_done(core, hdr);
+ break;
+ case HFI_MSG_SYS_PROPERTY_INFO:
+ iris_hfi_gen1_sys_property_info(core, hdr);
+ break;
+ case HFI_MSG_EVENT_NOTIFY:
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = iris_get_instance(core, pkt->shdr.session_id);
+ if (inst) {
+ mutex_lock(&inst->lock);
+ iris_hfi_gen1_session_event_notify(inst, hdr);
+ mutex_unlock(&inst->lock);
+ } else {
+ iris_hfi_gen1_sys_event_notify(core, hdr);
+ }
+
+ break;
+ default:
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = iris_get_instance(core, pkt->shdr.session_id);
+ if (!inst) {
+ dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n",
+ pkt->shdr.session_id,
+ pkt_info ? pkt_info->pkt : 0);
+ return;
+ }
+
+ mutex_lock(&inst->lock);
+ if (hdr->pkt_type == HFI_MSG_SESSION_EMPTY_BUFFER) {
+ iris_hfi_gen1_session_etb_done(inst, hdr);
+ } else if (hdr->pkt_type == HFI_MSG_SESSION_FILL_BUFFER) {
+ iris_hfi_gen1_session_ftb_done(inst, hdr);
+ } else {
+ struct hfi_msg_session_hdr_pkt *shdr;
+
+ shdr = (struct hfi_msg_session_hdr_pkt *)hdr;
+ if (shdr->error_type != HFI_ERR_NONE)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ done = pkt_info->pkt == HFI_MSG_SESSION_FLUSH ?
+ &inst->flush_completion : &inst->completion;
+ complete(done);
+ }
+ mutex_unlock(&inst->lock);
+
+ break;
+ }
+}
+
+static void iris_hfi_gen1_flush_debug_queue(struct iris_core *core, u8 *packet)
+{
+ struct hfi_msg_sys_coverage_pkt *pkt;
+
+ while (!iris_hfi_queue_dbg_read(core, packet)) {
+ pkt = (struct hfi_msg_sys_coverage_pkt *)packet;
+
+ if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
+ struct hfi_msg_sys_debug_pkt *pkt =
+ (struct hfi_msg_sys_debug_pkt *)packet;
+
+ dev_dbg(core->dev, "%s", pkt->msg_data);
+ }
+ }
+}
+
+static void iris_hfi_gen1_response_handler(struct iris_core *core)
+{
+ memset(core->response_packet, 0, sizeof(struct hfi_pkt_hdr));
+ while (!iris_hfi_queue_msg_read(core, core->response_packet)) {
+ iris_hfi_gen1_handle_response(core, core->response_packet);
+ memset(core->response_packet, 0, sizeof(struct hfi_pkt_hdr));
+ }
+
+ iris_hfi_gen1_flush_debug_queue(core, core->response_packet);
+}
+
+static const struct iris_hfi_response_ops iris_hfi_gen1_response_ops = {
+ .hfi_response_handler = iris_hfi_gen1_response_handler,
+};
+
+void iris_hfi_gen1_response_ops_init(struct iris_core *core)
+{
+ core->hfi_response_ops = &iris_hfi_gen1_response_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2.h
new file mode 100644
index 000000000000..b9d3749a10ef
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_H__
+#define __IRIS_HFI_GEN2_H__
+
+#include "iris_instance.h"
+
+struct iris_core;
+
+#define to_iris_inst_hfi_gen2(ptr) \
+ container_of(ptr, struct iris_inst_hfi_gen2, inst)
+
+/**
+ * struct iris_inst_hfi_gen2 - holds per video instance parameters for hfi_gen2
+ *
+ * @inst: pointer to iris_instance structure
+ * @packet: HFI packet
+ * @ipsc_properties_set: boolean to set ipsc properties to fw
+ * @opsc_properties_set: boolean to set opsc properties to fw
+ * @hfi_frame_info: structure of frame info
+ * @src_subcr_params: subscription params to fw on input port
+ * @dst_subcr_params: subscription params to fw on output port
+ */
+struct iris_inst_hfi_gen2 {
+ struct iris_inst inst;
+ struct iris_hfi_header *packet;
+ bool ipsc_properties_set;
+ bool opsc_properties_set;
+ struct iris_hfi_frame_info hfi_frame_info;
+ struct hfi_subscription_params src_subcr_params;
+ struct hfi_subscription_params dst_subcr_params;
+};
+
+void iris_hfi_gen2_command_ops_init(struct iris_core *core);
+void iris_hfi_gen2_response_ops_init(struct iris_core *core);
+struct iris_inst *iris_hfi_gen2_get_instance(void);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
new file mode 100644
index 000000000000..a908b41e2868
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
@@ -0,0 +1,957 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_packet.h"
+
+#define UNSPECIFIED_COLOR_FORMAT 5
+#define NUM_SYS_INIT_PACKETS 8
+
+#define SYS_INIT_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ NUM_SYS_INIT_PACKETS * (sizeof(struct iris_hfi_packet) + sizeof(u32)))
+
+#define SYS_IFPC_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ sizeof(struct iris_hfi_packet) + sizeof(u32))
+
+#define SYS_NO_PAYLOAD_PKT_SIZE (sizeof(struct iris_hfi_header) + \
+ sizeof(struct iris_hfi_packet))
+
+static int iris_hfi_gen2_sys_init(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_INIT_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_init(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_image_version(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_NO_PAYLOAD_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_image_version(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_interframe_powercollapse(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_IFPC_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_interframe_powercollapse(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_sys_pc_prep(struct iris_core *core)
+{
+ struct iris_hfi_header *hdr;
+ int ret;
+
+ hdr = kzalloc(SYS_NO_PAYLOAD_PKT_SIZE, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_sys_pc_prep(core, hdr);
+ ret = iris_hfi_queue_cmd_write_locked(core, hdr, hdr->size);
+
+ kfree(hdr);
+
+ return ret;
+}
+
+static u32 iris_hfi_gen2_get_port(u32 plane)
+{
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_RAW;
+ default:
+ return HFI_PORT_NONE;
+ }
+}
+
+static u32 iris_hfi_gen2_get_port_from_buf_type(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ return HFI_PORT_BITSTREAM;
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return HFI_PORT_RAW;
+ case BUF_PERSIST:
+ default:
+ return HFI_PORT_NONE;
+ }
+}
+
+static int iris_hfi_gen2_session_set_property(struct iris_inst *inst, u32 packet_type, u32 flag,
+ u32 plane, u32 payload_type, void *payload,
+ u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_property(inst,
+ packet_type,
+ flag,
+ plane,
+ payload_type,
+ payload,
+ payload_size);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
+ inst->fmt_src->fmt.pix_mp.height;
+
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &resolution,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
+{
+ u32 bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ u32 right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 left_offset = inst->crop.left;
+ u32 top_offset = inst->crop.top;
+ u32 payload[2];
+
+ payload[0] = FIELD_PREP(GENMASK(31, 16), left_offset) | top_offset;
+ payload[1] = FIELD_PREP(GENMASK(31, 16), right_offset) | bottom_offset;
+ inst_hfi_gen2->src_subcr_params.crop_offsets[0] = payload[0];
+ inst_hfi_gen2->src_subcr_params.crop_offsets[1] = payload[1];
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_64_PACKED,
+ &payload,
+ sizeof(u64));
+}
+
+static int iris_hfi_gen2_set_bit_dpeth(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 bitdepth = BIT_DEPTH_8;
+
+ inst_hfi_gen2->src_subcr_params.bit_depth = bitdepth;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &bitdepth,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 coded_frames = 0;
+
+ if (inst->fw_caps[CODED_FRAMES].value == CODED_FRAMES_PROGRESSIVE)
+ coded_frames = HFI_BITMASK_FRAME_MBS_ONLY_FLAG;
+ inst_hfi_gen2->src_subcr_params.coded_frames = coded_frames;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_CODED_FRAMES,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &coded_frames,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 min_output = inst->buffers[BUF_OUTPUT].min_count;
+
+ inst_hfi_gen2->src_subcr_params.fw_min_count = min_output;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &min_output,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 poc = 0;
+
+ inst_hfi_gen2->src_subcr_params.pic_order_cnt = poc;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_PIC_ORDER_CNT_TYPE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &poc,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ struct v4l2_pix_format_mplane *pixmp = &inst->fmt_src->fmt.pix_mp;
+ u32 video_signal_type_present_flag = 0, color_info;
+ u32 matrix_coeff = HFI_MATRIX_COEFF_RESERVED;
+ u32 video_format = UNSPECIFIED_COLOR_FORMAT;
+ u32 full_range = V4L2_QUANTIZATION_DEFAULT;
+ u32 transfer_char = HFI_TRANSFER_RESERVED;
+ u32 colour_description_present_flag = 0;
+ u32 primaries = HFI_PRIMARIES_RESERVED;
+
+ if (pixmp->colorspace != V4L2_COLORSPACE_DEFAULT ||
+ pixmp->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT ||
+ pixmp->xfer_func != V4L2_XFER_FUNC_DEFAULT) {
+ colour_description_present_flag = 1;
+ video_signal_type_present_flag = 1;
+ primaries = iris_hfi_gen2_get_color_primaries(pixmp->colorspace);
+ matrix_coeff = iris_hfi_gen2_get_matrix_coefficients(pixmp->ycbcr_enc);
+ transfer_char = iris_hfi_gen2_get_transfer_char(pixmp->xfer_func);
+ }
+
+ if (pixmp->quantization != V4L2_QUANTIZATION_DEFAULT) {
+ video_signal_type_present_flag = 1;
+ full_range = pixmp->quantization == V4L2_QUANTIZATION_FULL_RANGE ? 1 : 0;
+ }
+
+ color_info = iris_hfi_gen2_get_color_info(matrix_coeff, transfer_char, primaries,
+ colour_description_present_flag, full_range,
+ video_format, video_signal_type_present_flag);
+
+ inst_hfi_gen2->src_subcr_params.color_info = color_info;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_32_PACKED,
+ &color_info,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 profile = inst->fw_caps[PROFILE].value;
+
+ inst_hfi_gen2->src_subcr_params.profile = profile;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_PROFILE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32_ENUM,
+ &profile,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_level(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 level = inst->fw_caps[LEVEL].value;
+
+ inst_hfi_gen2->src_subcr_params.level = level;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LEVEL,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32_ENUM,
+ &level,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst)
+{
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 hfi_colorformat, pixelformat;
+
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_COLOR_FORMAT,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U32,
+ &hfi_colorformat,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
+{
+ u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ u32 scanline_y = inst->fmt_dst->fmt.pix_mp.height;
+ u32 stride_y = inst->fmt_dst->fmt.pix_mp.width;
+ u32 scanline_uv = scanline_y / 2;
+ u32 stride_uv = stride_y;
+ u32 payload[2];
+
+ if (pixelformat != V4L2_PIX_FMT_NV12)
+ return 0;
+
+ payload[0] = stride_y << 16 | scanline_y;
+ payload[1] = stride_uv << 16 | scanline_uv;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_U64,
+ &payload,
+ sizeof(u64));
+}
+
+static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 config_params_size, i, j;
+ const u32 *config_params;
+ int ret;
+
+ static const struct iris_hfi_prop_type_handle prop_type_handle_arr[] = {
+ {HFI_PROP_BITSTREAM_RESOLUTION, iris_hfi_gen2_set_bitstream_resolution },
+ {HFI_PROP_CROP_OFFSETS, iris_hfi_gen2_set_crop_offsets },
+ {HFI_PROP_CODED_FRAMES, iris_hfi_gen2_set_coded_frames },
+ {HFI_PROP_LUMA_CHROMA_BIT_DEPTH, iris_hfi_gen2_set_bit_dpeth },
+ {HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT, iris_hfi_gen2_set_min_output_count },
+ {HFI_PROP_PIC_ORDER_CNT_TYPE, iris_hfi_gen2_set_picture_order_count },
+ {HFI_PROP_SIGNAL_COLOR_INFO, iris_hfi_gen2_set_colorspace },
+ {HFI_PROP_PROFILE, iris_hfi_gen2_set_profile },
+ {HFI_PROP_LEVEL, iris_hfi_gen2_set_level },
+ {HFI_PROP_COLOR_FORMAT, iris_hfi_gen2_set_colorformat },
+ {HFI_PROP_LINEAR_STRIDE_SCANLINE, iris_hfi_gen2_set_linear_stride_scanline },
+ };
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ config_params = core->iris_platform_data->input_config_params;
+ config_params_size = core->iris_platform_data->input_config_params_size;
+ } else {
+ config_params = core->iris_platform_data->output_config_params;
+ config_params_size = core->iris_platform_data->output_config_params_size;
+ }
+
+ if (!config_params || !config_params_size)
+ return -EINVAL;
+
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < ARRAY_SIZE(prop_type_handle_arr); j++) {
+ if (prop_type_handle_arr[j].type == config_params[i]) {
+ ret = prop_type_handle_arr[j].handle(inst);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_session_set_codec(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 codec = HFI_CODEC_DECODE_AVC;
+
+ iris_hfi_gen2_packet_session_property(inst,
+ HFI_PROP_CODEC,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_NONE,
+ HFI_PAYLOAD_U32_ENUM,
+ &codec,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_set_default_header(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 default_header = false;
+
+ iris_hfi_gen2_packet_session_property(inst,
+ HFI_PROP_DEC_DEFAULT_HEADER,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_BITSTREAM,
+ HFI_PAYLOAD_U32,
+ &default_header,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_open(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret;
+
+ if (inst->state != IRIS_INST_DEINIT)
+ return -EALREADY;
+
+ inst_hfi_gen2->ipsc_properties_set = false;
+ inst_hfi_gen2->opsc_properties_set = false;
+
+ inst_hfi_gen2->packet = kzalloc(4096, GFP_KERNEL);
+ if (!inst_hfi_gen2->packet)
+ return -ENOMEM;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_OPEN,
+ HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED,
+ HFI_PORT_NONE,
+ 0,
+ HFI_PAYLOAD_U32,
+ &inst->session_id,
+ sizeof(u32));
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+ if (ret)
+ goto fail_free_packet;
+
+ ret = iris_hfi_gen2_session_set_codec(inst);
+ if (ret)
+ goto fail_free_packet;
+
+ ret = iris_hfi_gen2_session_set_default_header(inst);
+ if (ret)
+ goto fail_free_packet;
+
+ return 0;
+
+fail_free_packet:
+ kfree(inst_hfi_gen2->packet);
+ inst_hfi_gen2->packet = NULL;
+
+ return ret;
+}
+
+static int iris_hfi_gen2_session_close(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret;
+
+ if (!inst_hfi_gen2->packet)
+ return -EINVAL;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_CLOSE,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ HFI_PORT_NONE,
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+
+ kfree(inst_hfi_gen2->packet);
+ inst_hfi_gen2->packet = NULL;
+
+ return ret;
+}
+
+static int iris_hfi_gen2_session_subscribe_mode(struct iris_inst *inst,
+ u32 cmd, u32 plane, u32 payload_type,
+ void *payload, u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ cmd,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ payload_type,
+ payload,
+ payload_size);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct hfi_subscription_params subsc_params;
+ u32 prop_type, payload_size, payload_type;
+ struct iris_core *core = inst->core;
+ const u32 *change_param;
+ u32 change_param_size;
+ u32 payload[32] = {0};
+ u32 hfi_port = 0, i;
+ int ret;
+
+ if ((V4L2_TYPE_IS_OUTPUT(plane) && inst_hfi_gen2->ipsc_properties_set) ||
+ (V4L2_TYPE_IS_CAPTURE(plane) && inst_hfi_gen2->opsc_properties_set)) {
+ dev_err(core->dev, "invalid plane\n");
+ return 0;
+ }
+
+ change_param = core->iris_platform_data->input_config_params;
+ change_param_size = core->iris_platform_data->input_config_params_size;
+
+ payload[0] = HFI_MODE_PORT_SETTINGS_CHANGE;
+
+ for (i = 0; i < change_param_size; i++)
+ payload[i + 1] = change_param[i];
+
+ ret = iris_hfi_gen2_session_subscribe_mode(inst,
+ HFI_CMD_SUBSCRIBE_MODE,
+ plane,
+ HFI_PAYLOAD_U32_ARRAY,
+ &payload[0],
+ ((change_param_size + 1) * sizeof(u32)));
+ if (ret)
+ return ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ inst_hfi_gen2->ipsc_properties_set = true;
+ } else {
+ hfi_port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ memcpy(&inst_hfi_gen2->dst_subcr_params,
+ &inst_hfi_gen2->src_subcr_params,
+ sizeof(inst_hfi_gen2->src_subcr_params));
+ subsc_params = inst_hfi_gen2->dst_subcr_params;
+ for (i = 0; i < change_param_size; i++) {
+ payload[0] = 0;
+ payload[1] = 0;
+ payload_size = 0;
+ payload_type = 0;
+ prop_type = change_param[i];
+ switch (prop_type) {
+ case HFI_PROP_BITSTREAM_RESOLUTION:
+ payload[0] = subsc_params.bitstream_resolution;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_CROP_OFFSETS:
+ payload[0] = subsc_params.crop_offsets[0];
+ payload[1] = subsc_params.crop_offsets[1];
+ payload_size = sizeof(u64);
+ payload_type = HFI_PAYLOAD_64_PACKED;
+ break;
+ case HFI_PROP_CODED_FRAMES:
+ payload[0] = subsc_params.coded_frames;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
+ payload[0] = subsc_params.fw_min_count;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_PIC_ORDER_CNT_TYPE:
+ payload[0] = subsc_params.pic_order_cnt;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_SIGNAL_COLOR_INFO:
+ payload[0] = subsc_params.color_info;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_PROFILE:
+ payload[0] = subsc_params.profile;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ case HFI_PROP_LEVEL:
+ payload[0] = subsc_params.level;
+ payload_size = sizeof(u32);
+ payload_type = HFI_PAYLOAD_U32;
+ break;
+ default:
+ prop_type = 0;
+ ret = -EINVAL;
+ break;
+ }
+ if (prop_type) {
+ ret = iris_hfi_gen2_session_set_property(inst,
+ prop_type,
+ HFI_HOST_FLAGS_NONE,
+ hfi_port,
+ payload_type,
+ &payload,
+ payload_size);
+ if (ret)
+ return ret;
+ }
+ }
+ inst_hfi_gen2->opsc_properties_set = true;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
+{
+ struct iris_core *core = inst->core;
+ u32 subscribe_prop_size, i;
+ const u32 *subcribe_prop;
+ u32 payload[32] = {0};
+
+ payload[0] = HFI_MODE_PROPERTY;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ subscribe_prop_size = core->iris_platform_data->dec_input_prop_size;
+ subcribe_prop = core->iris_platform_data->dec_input_prop;
+ } else {
+ subscribe_prop_size = core->iris_platform_data->dec_output_prop_size;
+ subcribe_prop = core->iris_platform_data->dec_output_prop;
+ }
+
+ for (i = 0; i < subscribe_prop_size; i++)
+ payload[i + 1] = subcribe_prop[i];
+
+ return iris_hfi_gen2_session_subscribe_mode(inst,
+ HFI_CMD_SUBSCRIBE_MODE,
+ plane,
+ HFI_PAYLOAD_U32_ARRAY,
+ &payload[0],
+ (subscribe_prop_size + 1) * sizeof(u32));
+}
+
+static int iris_hfi_gen2_session_start(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret = 0;
+
+ ret = iris_hfi_gen2_subscribe_change_param(inst, plane);
+ if (ret)
+ return ret;
+
+ ret = iris_hfi_gen2_subscribe_property(inst, plane);
+ if (ret)
+ return ret;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_START,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_stop(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ int ret = 0;
+
+ reinit_completion(&inst->completion);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_STOP,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ ret = iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+ if (ret)
+ return ret;
+
+ return iris_wait_for_session_response(inst, false);
+}
+
+static int iris_hfi_gen2_session_pause(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_PAUSE,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_resume_drc(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 payload = HFI_CMD_SETTINGS_CHANGE;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_RESUME,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_U32,
+ &payload,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_resume_drain(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 payload = HFI_CMD_DRAIN;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_RESUME,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_U32,
+ &payload,
+ sizeof(u32));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ if (!V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_DRAIN,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ iris_hfi_gen2_get_port(plane),
+ inst->session_id,
+ HFI_PAYLOAD_NONE,
+ NULL,
+ 0);
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return HFI_BUFFER_BITSTREAM;
+ case BUF_OUTPUT:
+ return HFI_BUFFER_RAW;
+ case BUF_BIN:
+ return HFI_BUFFER_BIN;
+ case BUF_COMV:
+ return HFI_BUFFER_COMV;
+ case BUF_NON_COMV:
+ return HFI_BUFFER_NON_COMV;
+ case BUF_LINE:
+ return HFI_BUFFER_LINE;
+ case BUF_DPB:
+ return HFI_BUFFER_DPB;
+ case BUF_PERSIST:
+ return HFI_BUFFER_PERSIST;
+ default:
+ return 0;
+ }
+}
+
+static int iris_set_num_comv(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps;
+ struct iris_core *core = inst->core;
+ u32 num_comv;
+
+ caps = core->iris_platform_data->inst_caps;
+ num_comv = caps->num_comv;
+
+ return core->hfi_ops->session_set_property(inst,
+ HFI_PROP_COMV_BUFFER_COUNT,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PORT_BITSTREAM,
+ HFI_PAYLOAD_U32,
+ &num_comv, sizeof(u32));
+}
+
+static void iris_hfi_gen2_get_buffer(struct iris_buffer *buffer, struct iris_hfi_buffer *buf)
+{
+ memset(buf, 0, sizeof(*buf));
+ buf->type = iris_hfi_gen2_buf_type_from_driver(buffer->type);
+ buf->index = buffer->index;
+ buf->base_address = buffer->device_addr;
+ buf->addr_offset = 0;
+ buf->buffer_size = buffer->buffer_size;
+
+ if (buffer->type == BUF_INPUT)
+ buf->buffer_size = ALIGN(buffer->buffer_size, 256);
+ buf->data_offset = buffer->data_offset;
+ buf->data_size = buffer->data_size;
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ buf->flags |= HFI_BUF_HOST_FLAG_RELEASE;
+ buf->flags |= HFI_BUF_HOST_FLAGS_CB_NON_SECURE;
+ buf->timestamp = buffer->timestamp;
+}
+
+static int iris_hfi_gen2_session_queue_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_hfi_buffer hfi_buffer;
+ u32 port;
+ int ret;
+
+ iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ if (buffer->type == BUF_COMV) {
+ ret = iris_set_num_comv(inst);
+ if (ret)
+ return ret;
+ }
+
+ port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_BUFFER,
+ HFI_HOST_FLAGS_INTR_REQUIRED,
+ port,
+ inst->session_id,
+ HFI_PAYLOAD_STRUCTURE,
+ &hfi_buffer,
+ sizeof(hfi_buffer));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static int iris_hfi_gen2_session_release_buffer(struct iris_inst *inst, struct iris_buffer *buffer)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_hfi_buffer hfi_buffer;
+ u32 port;
+
+ iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
+ port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+
+ iris_hfi_gen2_packet_session_command(inst,
+ HFI_CMD_BUFFER,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED),
+ port,
+ inst->session_id,
+ HFI_PAYLOAD_STRUCTURE,
+ &hfi_buffer,
+ sizeof(hfi_buffer));
+
+ return iris_hfi_queue_cmd_write(inst->core, inst_hfi_gen2->packet,
+ inst_hfi_gen2->packet->size);
+}
+
+static const struct iris_hfi_command_ops iris_hfi_gen2_command_ops = {
+ .sys_init = iris_hfi_gen2_sys_init,
+ .sys_image_version = iris_hfi_gen2_sys_image_version,
+ .sys_interframe_powercollapse = iris_hfi_gen2_sys_interframe_powercollapse,
+ .sys_pc_prep = iris_hfi_gen2_sys_pc_prep,
+ .session_open = iris_hfi_gen2_session_open,
+ .session_set_config_params = iris_hfi_gen2_session_set_config_params,
+ .session_set_property = iris_hfi_gen2_session_set_property,
+ .session_start = iris_hfi_gen2_session_start,
+ .session_queue_buf = iris_hfi_gen2_session_queue_buffer,
+ .session_release_buf = iris_hfi_gen2_session_release_buffer,
+ .session_pause = iris_hfi_gen2_session_pause,
+ .session_resume_drc = iris_hfi_gen2_session_resume_drc,
+ .session_stop = iris_hfi_gen2_session_stop,
+ .session_drain = iris_hfi_gen2_session_drain,
+ .session_resume_drain = iris_hfi_gen2_session_resume_drain,
+ .session_close = iris_hfi_gen2_session_close,
+};
+
+void iris_hfi_gen2_command_ops_init(struct iris_core *core)
+{
+ core->hfi_ops = &iris_hfi_gen2_command_ops;
+}
+
+struct iris_inst *iris_hfi_gen2_get_instance(void)
+{
+ return kzalloc(sizeof(struct iris_inst_hfi_gen2), GFP_KERNEL);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
new file mode 100644
index 000000000000..806f8bb7f505
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_DEFINES_H__
+#define __IRIS_HFI_GEN2_DEFINES_H__
+
+#include <linux/types.h>
+
+#define HFI_VIDEO_ARCH_LX 0x1
+
+#define HFI_CMD_BEGIN 0x01000000
+#define HFI_CMD_INIT 0x01000001
+#define HFI_CMD_POWER_COLLAPSE 0x01000002
+#define HFI_CMD_OPEN 0x01000003
+#define HFI_CMD_CLOSE 0x01000004
+#define HFI_CMD_START 0x01000005
+#define HFI_CMD_STOP 0x01000006
+#define HFI_CMD_DRAIN 0x01000007
+#define HFI_CMD_RESUME 0x01000008
+#define HFI_CMD_BUFFER 0x01000009
+#define HFI_CMD_SUBSCRIBE_MODE 0x0100000B
+#define HFI_CMD_SETTINGS_CHANGE 0x0100000C
+#define HFI_CMD_PAUSE 0x01000011
+#define HFI_CMD_END 0x01FFFFFF
+
+#define HFI_BITMASK_BITSTREAM_WIDTH 0xffff0000
+#define HFI_BITMASK_BITSTREAM_HEIGHT 0x0000ffff
+#define HFI_BITMASK_FRAME_MBS_ONLY_FLAG 0x00000001
+
+#define HFI_PROP_BEGIN 0x03000000
+#define HFI_PROP_IMAGE_VERSION 0x03000001
+#define HFI_PROP_INTRA_FRAME_POWER_COLLAPSE 0x03000002
+#define HFI_PROP_UBWC_MAX_CHANNELS 0x03000003
+#define HFI_PROP_UBWC_MAL_LENGTH 0x03000004
+#define HFI_PROP_UBWC_HBB 0x03000005
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL1 0x03000006
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL2 0x03000007
+#define HFI_PROP_UBWC_BANK_SWZL_LEVEL3 0x03000008
+#define HFI_PROP_UBWC_BANK_SPREADING 0x03000009
+#define HFI_PROP_CODEC 0x03000100
+#define HFI_PROP_COLOR_FORMAT 0x03000101
+#define HFI_PROP_BITSTREAM_RESOLUTION 0x03000103
+#define HFI_PROP_LINEAR_STRIDE_SCANLINE 0x03000104
+#define HFI_PROP_CROP_OFFSETS 0x03000105
+#define HFI_PROP_PROFILE 0x03000107
+#define HFI_PROP_LEVEL 0x03000108
+#define HFI_PROP_STAGE 0x0300010a
+#define HFI_PROP_PIPE 0x0300010b
+#define HFI_PROP_LUMA_CHROMA_BIT_DEPTH 0x0300010f
+#define HFI_PROP_CODED_FRAMES 0x03000120
+#define HFI_PROP_CABAC_SESSION 0x03000121
+#define HFI_PROP_BUFFER_HOST_MAX_COUNT 0x03000123
+#define HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT 0x03000124
+#define HFI_PROP_PIC_ORDER_CNT_TYPE 0x03000128
+#define HFI_PROP_QUALITY_MODE 0x03000148
+#define HFI_PROP_SIGNAL_COLOR_INFO 0x03000155
+#define HFI_PROP_PICTURE_TYPE 0x03000162
+#define HFI_PROP_DEC_DEFAULT_HEADER 0x03000168
+#define HFI_PROP_DEC_START_FROM_RAP_FRAME 0x03000169
+#define HFI_PROP_NO_OUTPUT 0x0300016a
+#define HFI_PROP_COMV_BUFFER_COUNT 0x03000193
+#define HFI_PROP_END 0x03FFFFFF
+
+#define HFI_SESSION_ERROR_BEGIN 0x04000000
+#define HFI_ERROR_UNKNOWN_SESSION 0x04000001
+#define HFI_ERROR_MAX_SESSIONS 0x04000002
+#define HFI_ERROR_FATAL 0x04000003
+#define HFI_ERROR_INVALID_STATE 0x04000004
+#define HFI_ERROR_INSUFFICIENT_RESOURCES 0x04000005
+#define HFI_ERROR_BUFFER_NOT_SET 0x04000006
+#define HFI_ERROR_STREAM_UNSUPPORTED 0x04000008
+#define HFI_SESSION_ERROR_END 0x04FFFFFF
+
+#define HFI_SYSTEM_ERROR_BEGIN 0x05000000
+#define HFI_SYS_ERROR_WD_TIMEOUT 0x05000001
+#define HFI_SYSTEM_ERROR_END 0x05FFFFFF
+
+#define HFI_INFORMATION_BEGIN 0x06000000
+#define HFI_INFO_UNSUPPORTED 0x06000001
+#define HFI_INFO_DATA_CORRUPT 0x06000002
+#define HFI_INFO_BUFFER_OVERFLOW 0x06000004
+#define HFI_INFO_HFI_FLAG_DRAIN_LAST 0x06000006
+#define HFI_INFO_HFI_FLAG_PSC_LAST 0x06000007
+#define HFI_INFORMATION_END 0x06FFFFFF
+
+enum hfi_property_mode_type {
+ HFI_MODE_PORT_SETTINGS_CHANGE = 0x00000001,
+ HFI_MODE_PROPERTY = 0x00000002,
+};
+
+enum hfi_color_format {
+ HFI_COLOR_FMT_OPAQUE = 0,
+ HFI_COLOR_FMT_NV12 = 1,
+ HFI_COLOR_FMT_NV12_UBWC = 2,
+ HFI_COLOR_FMT_P010 = 3,
+ HFI_COLOR_FMT_TP10_UBWC = 4,
+ HFI_COLOR_FMT_RGBA8888 = 5,
+ HFI_COLOR_FMT_RGBA8888_UBWC = 6,
+ HFI_COLOR_FMT_NV21 = 7,
+};
+
+enum hfi_codec_type {
+ HFI_CODEC_DECODE_AVC = 1,
+ HFI_CODEC_ENCODE_AVC = 2,
+};
+
+enum hfi_picture_type {
+ HFI_PICTURE_IDR = 0x00000001,
+ HFI_PICTURE_P = 0x00000002,
+ HFI_PICTURE_B = 0x00000004,
+ HFI_PICTURE_I = 0x00000008,
+ HFI_PICTURE_CRA = 0x00000010,
+ HFI_PICTURE_BLA = 0x00000020,
+};
+
+enum hfi_buffer_type {
+ HFI_BUFFER_BITSTREAM = 0x00000001,
+ HFI_BUFFER_RAW = 0x00000002,
+ HFI_BUFFER_METADATA = 0x00000003,
+ HFI_BUFFER_SUBCACHE = 0x00000004,
+ HFI_BUFFER_PARTIAL_DATA = 0x00000005,
+ HFI_BUFFER_DPB = 0x00000006,
+ HFI_BUFFER_BIN = 0x00000007,
+ HFI_BUFFER_LINE = 0x00000008,
+ HFI_BUFFER_ARP = 0x00000009,
+ HFI_BUFFER_COMV = 0x0000000A,
+ HFI_BUFFER_NON_COMV = 0x0000000B,
+ HFI_BUFFER_PERSIST = 0x0000000C,
+ HFI_BUFFER_VPSS = 0x0000000D,
+};
+
+enum hfi_buffer_host_flags {
+ HFI_BUF_HOST_FLAG_RELEASE = 0x00000001,
+ HFI_BUF_HOST_FLAG_READONLY = 0x00000010,
+ HFI_BUF_HOST_FLAG_CODEC_CONFIG = 0x00000100,
+ HFI_BUF_HOST_FLAGS_CB_NON_SECURE = 0x00000200,
+};
+
+enum hfi_buffer_firmware_flags {
+ HFI_BUF_FW_FLAG_RELEASE_DONE = 0x00000001,
+ HFI_BUF_FW_FLAG_READONLY = 0x00000010,
+ HFI_BUF_FW_FLAG_LAST = 0x10000000,
+ HFI_BUF_FW_FLAG_PSC_LAST = 0x20000000,
+};
+
+enum hfi_packet_firmware_flags {
+ HFI_FW_FLAGS_SUCCESS = 0x00000001,
+ HFI_FW_FLAGS_INFORMATION = 0x00000002,
+ HFI_FW_FLAGS_SESSION_ERROR = 0x00000004,
+ HFI_FW_FLAGS_SYSTEM_ERROR = 0x00000008,
+};
+
+struct hfi_debug_header {
+ u32 size;
+ u32 debug_level;
+ u32 reserved[2];
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c
new file mode 100644
index 000000000000..d77fa29f44fc
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_hfi_common.h"
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_packet.h"
+
+u32 iris_hfi_gen2_get_color_primaries(u32 primaries)
+{
+ switch (primaries) {
+ case V4L2_COLORSPACE_DEFAULT:
+ return HFI_PRIMARIES_RESERVED;
+ case V4L2_COLORSPACE_REC709:
+ return HFI_PRIMARIES_BT709;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return HFI_PRIMARIES_BT470_SYSTEM_M;
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return HFI_PRIMARIES_BT470_SYSTEM_BG;
+ case V4L2_COLORSPACE_SMPTE170M:
+ return HFI_PRIMARIES_BT601_525;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return HFI_PRIMARIES_SMPTE_ST240M;
+ case V4L2_COLORSPACE_BT2020:
+ return HFI_PRIMARIES_BT2020;
+ case V4L2_COLORSPACE_DCI_P3:
+ return HFI_PRIMARIES_SMPTE_RP431_2;
+ default:
+ return HFI_PRIMARIES_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_transfer_char(u32 characterstics)
+{
+ switch (characterstics) {
+ case V4L2_XFER_FUNC_DEFAULT:
+ return HFI_TRANSFER_RESERVED;
+ case V4L2_XFER_FUNC_709:
+ return HFI_TRANSFER_BT709;
+ case V4L2_XFER_FUNC_SMPTE240M:
+ return HFI_TRANSFER_SMPTE_ST240M;
+ case V4L2_XFER_FUNC_SRGB:
+ return HFI_TRANSFER_SRGB_SYCC;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return HFI_TRANSFER_SMPTE_ST2084_PQ;
+ default:
+ return HFI_TRANSFER_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_matrix_coefficients(u32 coefficients)
+{
+ switch (coefficients) {
+ case V4L2_YCBCR_ENC_DEFAULT:
+ return HFI_MATRIX_COEFF_RESERVED;
+ case V4L2_YCBCR_ENC_709:
+ return HFI_MATRIX_COEFF_BT709;
+ case V4L2_YCBCR_ENC_XV709:
+ return HFI_MATRIX_COEFF_BT709;
+ case V4L2_YCBCR_ENC_XV601:
+ return HFI_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
+ case V4L2_YCBCR_ENC_601:
+ return HFI_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ return HFI_MATRIX_COEFF_SMPTE_ST240;
+ case V4L2_YCBCR_ENC_BT2020:
+ return HFI_MATRIX_COEFF_BT2020_NON_CONSTANT;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return HFI_MATRIX_COEFF_BT2020_CONSTANT;
+ default:
+ return HFI_MATRIX_COEFF_RESERVED;
+ }
+}
+
+u32 iris_hfi_gen2_get_color_info(u32 matrix_coeff, u32 transfer_char, u32 primaries,
+ u32 colour_description_present_flag, u32 full_range,
+ u32 video_format, u32 video_signal_type_present_flag)
+{
+ return (matrix_coeff & 0xFF) |
+ ((transfer_char << 8) & 0xFF00) |
+ ((primaries << 16) & 0xFF0000) |
+ ((colour_description_present_flag << 24) & 0x1000000) |
+ ((full_range << 25) & 0x2000000) |
+ ((video_format << 26) & 0x1C000000) |
+ ((video_signal_type_present_flag << 29) & 0x20000000);
+}
+
+static void iris_hfi_gen2_create_header(struct iris_hfi_header *hdr,
+ u32 session_id, u32 header_id)
+{
+ memset(hdr, 0, sizeof(*hdr));
+
+ hdr->size = sizeof(*hdr);
+ hdr->session_id = session_id;
+ hdr->header_id = header_id;
+ hdr->num_packets = 0;
+}
+
+static void iris_hfi_gen2_create_packet(struct iris_hfi_header *hdr, u32 pkt_type,
+ u32 pkt_flags, u32 payload_type, u32 port,
+ u32 packet_id, void *payload, u32 payload_size)
+{
+ struct iris_hfi_packet *pkt = (struct iris_hfi_packet *)((u8 *)hdr + hdr->size);
+ u32 pkt_size = sizeof(*pkt) + payload_size;
+
+ memset(pkt, 0, pkt_size);
+ pkt->size = pkt_size;
+ pkt->type = pkt_type;
+ pkt->flags = pkt_flags;
+ pkt->payload_info = payload_type;
+ pkt->port = port;
+ pkt->packet_id = packet_id;
+ if (payload_size)
+ memcpy(&pkt->payload[0], payload, payload_size);
+
+ hdr->num_packets++;
+ hdr->size += pkt->size;
+}
+
+void iris_hfi_gen2_packet_sys_init(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ u32 payload = 0;
+
+ iris_hfi_gen2_create_header(hdr, 0, core->header_id++);
+
+ payload = HFI_VIDEO_ARCH_LX;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_CMD_INIT,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_NON_DISCARDABLE),
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->max_channels;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_MAX_CHANNELS,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->mal_length;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_MAL_LENGTH,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->highest_bank_bit;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_HBB,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swzl_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL1,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swz2_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL2,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_swz3_level;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SWZL_LEVEL3,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+
+ payload = core->iris_platform_data->ubwc_config->bank_spreading;
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_UBWC_BANK_SPREADING,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+}
+
+void iris_hfi_gen2_packet_image_version(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ iris_hfi_gen2_create_header(hdr, 0, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_IMAGE_VERSION,
+ (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
+ HFI_HOST_FLAGS_INTR_REQUIRED |
+ HFI_HOST_FLAGS_GET_PROPERTY),
+ HFI_PAYLOAD_NONE,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ NULL, 0);
+}
+
+void iris_hfi_gen2_packet_session_command(struct iris_inst *inst, u32 pkt_type,
+ u32 flags, u32 port, u32 session_id,
+ u32 payload_type, void *payload,
+ u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+
+ iris_hfi_gen2_create_header(inst_hfi_gen2->packet, session_id, core->header_id++);
+
+ iris_hfi_gen2_create_packet(inst_hfi_gen2->packet,
+ pkt_type,
+ flags,
+ payload_type,
+ port,
+ core->packet_id++,
+ payload,
+ payload_size);
+}
+
+void iris_hfi_gen2_packet_session_property(struct iris_inst *inst,
+ u32 pkt_type, u32 flags, u32 port,
+ u32 payload_type, void *payload, u32 payload_size)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+
+ iris_hfi_gen2_create_header(inst_hfi_gen2->packet, inst->session_id, core->header_id++);
+
+ iris_hfi_gen2_create_packet(inst_hfi_gen2->packet,
+ pkt_type,
+ flags,
+ payload_type,
+ port,
+ core->packet_id++,
+ payload,
+ payload_size);
+}
+
+void iris_hfi_gen2_packet_sys_interframe_powercollapse(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u32 payload = 1; /* HFI_TRUE */
+
+ iris_hfi_gen2_create_header(hdr, 0 /*session_id*/, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_PROP_INTRA_FRAME_POWER_COLLAPSE,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_U32,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ &payload,
+ sizeof(u32));
+}
+
+void iris_hfi_gen2_packet_sys_pc_prep(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ iris_hfi_gen2_create_header(hdr, 0 /*session_id*/, core->header_id++);
+
+ iris_hfi_gen2_create_packet(hdr,
+ HFI_CMD_POWER_COLLAPSE,
+ HFI_HOST_FLAGS_NONE,
+ HFI_PAYLOAD_NONE,
+ HFI_PORT_NONE,
+ core->packet_id++,
+ NULL, 0);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h
new file mode 100644
index 000000000000..25b9582349ca
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_packet.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_GEN2_PACKET_H__
+#define __IRIS_HFI_GEN2_PACKET_H__
+
+#include "iris_hfi_gen2_defines.h"
+
+struct iris_core;
+
+/**
+ * struct iris_hfi_header
+ *
+ * @size: size of the total packet in bytes including hfi_header
+ * @session_id: For session level hfi_header session_id is non-zero.
+ * For system level hfi_header session_id is zero.
+ * @header_id: unique header id for each hfi_header
+ * @reserved: reserved for future use
+ * @num_packets: number of hfi_packet that are included with the hfi_header
+ */
+struct iris_hfi_header {
+ u32 size;
+ u32 session_id;
+ u32 header_id;
+ u32 reserved[4];
+ u32 num_packets;
+};
+
+/**
+ * struct iris_hfi_packet
+ *
+ * @size: size of the hfi_packet in bytes including payload
+ * @type: one of the below hfi_packet types:
+ * HFI_CMD_*,
+ * HFI_PROP_*,
+ * HFI_ERROR_*,
+ * HFI_INFO_*,
+ * HFI_SYS_ERROR_*
+ * @flags: hfi_packet flags. It is represented as bit masks.
+ * host packet flags are "enum hfi_packet_host_flags"
+ * firmware packet flags are "enum hfi_packet_firmware_flags"
+ * @payload_info: payload information indicated by "enum hfi_packet_payload_info"
+ * @port: hfi_packet port type indicated by "enum hfi_packet_port_type"
+ * This is bitmask and may be applicable to multiple ports.
+ * @packet_id: host hfi_packet contains unique packet id.
+ * firmware returns host packet id in response packet
+ * wherever applicable. If not applicable firmware sets it to zero.
+ * @reserved: reserved for future use.
+ * @payload: flexible array of payload having additional packet information.
+ */
+struct iris_hfi_packet {
+ u32 size;
+ u32 type;
+ u32 flags;
+ u32 payload_info;
+ u32 port;
+ u32 packet_id;
+ u32 reserved[2];
+ u32 payload[];
+};
+
+/**
+ * struct iris_hfi_buffer
+ *
+ * @type: buffer type indicated by "enum hfi_buffer_type"
+ * FW needs to return proper type for any buffer command.
+ * @index: index of the buffer
+ * @base_address: base address of the buffer.
+ * This buffer address is always 4KBytes aligned.
+ * @addr_offset: accessible buffer offset from base address
+ * Decoder bitstream buffer: 256 Bytes aligned
+ * Firmware can uniquely identify a buffer based on
+ * base_address & addr_offset.
+ * HW can read memory only from base_address+addr_offset.
+ * @buffer_size: accessible buffer size in bytes starting from addr_offset
+ * @data_offset: data starts from "base_address + addr_offset + data_offset"
+ * RAW buffer: data_offset is 0. Restriction: 4KBytes aligned
+ * decoder bitstream buffer: no restriction (can be any value)
+ * @data_size: data size in bytes
+ * @flags: buffer flags. It is represented as bit masks.
+ * host buffer flags are "enum hfi_buffer_host_flags"
+ * firmware buffer flags are "enum hfi_buffer_firmware_flags"
+ * @timestamp: timestamp of the buffer in nano seconds (ns)
+ * It is Presentation timestamp (PTS) for encoder & decoder.
+ * Decoder: it is pass through from bitstream to raw buffer.
+ * firmware does not need to return as part of input buffer done.
+ * For any internal buffers: there is no timestamp. Host sets as 0.
+ * @reserved: reserved for future use
+ */
+struct iris_hfi_buffer {
+ u32 type;
+ u32 index;
+ u64 base_address;
+ u32 addr_offset;
+ u32 buffer_size;
+ u32 data_offset;
+ u32 data_size;
+ u64 timestamp;
+ u32 flags;
+ u32 reserved[5];
+};
+
+u32 iris_hfi_gen2_get_color_primaries(u32 primaries);
+u32 iris_hfi_gen2_get_transfer_char(u32 characterstics);
+u32 iris_hfi_gen2_get_matrix_coefficients(u32 coefficients);
+u32 iris_hfi_gen2_get_color_info(u32 matrix_coeff, u32 transfer_char, u32 primaries,
+ u32 colour_description_present_flag, u32 full_range,
+ u32 video_format, u32 video_signal_type_present_flag);
+
+void iris_hfi_gen2_packet_sys_init(struct iris_core *core, struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_image_version(struct iris_core *core, struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_session_command(struct iris_inst *inst, u32 pkt_type,
+ u32 flags, u32 port, u32 session_id,
+ u32 payload_type, void *payload,
+ u32 payload_size);
+void iris_hfi_gen2_packet_session_property(struct iris_inst *inst,
+ u32 pkt_type, u32 flags, u32 port,
+ u32 payload_type, void *payload, u32 payload_size);
+void iris_hfi_gen2_packet_sys_interframe_powercollapse(struct iris_core *core,
+ struct iris_hfi_header *hdr);
+void iris_hfi_gen2_packet_sys_pc_prep(struct iris_core *core, struct iris_hfi_header *hdr);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
new file mode 100644
index 000000000000..b75a01641d5d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_defines.h"
+#include "iris_hfi_gen2_packet.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+#include "iris_vpu_common.h"
+
+struct iris_hfi_gen2_core_hfi_range {
+ u32 begin;
+ u32 end;
+ int (*handle)(struct iris_core *core, struct iris_hfi_packet *pkt);
+};
+
+struct iris_hfi_gen2_inst_hfi_range {
+ u32 begin;
+ u32 end;
+ int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
+};
+
+struct iris_hfi_gen2_packet_handle {
+ enum hfi_buffer_type type;
+ int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
+};
+
+static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
+{
+ switch (buf_type) {
+ case HFI_BUFFER_BITSTREAM:
+ return BUF_INPUT;
+ case HFI_BUFFER_RAW:
+ return BUF_OUTPUT;
+ case HFI_BUFFER_BIN:
+ return BUF_BIN;
+ case HFI_BUFFER_ARP:
+ return BUF_ARP;
+ case HFI_BUFFER_COMV:
+ return BUF_COMV;
+ case HFI_BUFFER_NON_COMV:
+ return BUF_NON_COMV;
+ case HFI_BUFFER_LINE:
+ return BUF_LINE;
+ case HFI_BUFFER_DPB:
+ return BUF_DPB;
+ case HFI_BUFFER_PERSIST:
+ return BUF_PERSIST;
+ default:
+ return 0;
+ }
+}
+
+static bool iris_hfi_gen2_is_valid_hfi_buffer_type(u32 buffer_type)
+{
+ switch (buffer_type) {
+ case HFI_BUFFER_BITSTREAM:
+ case HFI_BUFFER_RAW:
+ case HFI_BUFFER_BIN:
+ case HFI_BUFFER_ARP:
+ case HFI_BUFFER_COMV:
+ case HFI_BUFFER_NON_COMV:
+ case HFI_BUFFER_LINE:
+ case HFI_BUFFER_DPB:
+ case HFI_BUFFER_PERSIST:
+ case HFI_BUFFER_VPSS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool iris_hfi_gen2_is_valid_hfi_port(u32 port, u32 buffer_type)
+{
+ if (port == HFI_PORT_NONE && buffer_type != HFI_BUFFER_PERSIST)
+ return false;
+
+ if (port != HFI_PORT_BITSTREAM && port != HFI_PORT_RAW)
+ return false;
+
+ return true;
+}
+
+static int iris_hfi_gen2_get_driver_buffer_flags(struct iris_inst *inst, u32 hfi_flags)
+{
+ u32 keyframe = HFI_PICTURE_IDR | HFI_PICTURE_I | HFI_PICTURE_CRA | HFI_PICTURE_BLA;
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 driver_flags = 0;
+
+ if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
+ driver_flags |= V4L2_BUF_FLAG_KEYFRAME;
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_P)
+ driver_flags |= V4L2_BUF_FLAG_PFRAME;
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_B)
+ driver_flags |= V4L2_BUF_FLAG_BFRAME;
+
+ if (inst_hfi_gen2->hfi_frame_info.data_corrupt || inst_hfi_gen2->hfi_frame_info.overflow)
+ driver_flags |= V4L2_BUF_FLAG_ERROR;
+
+ if (hfi_flags & HFI_BUF_FW_FLAG_LAST ||
+ hfi_flags & HFI_BUF_FW_FLAG_PSC_LAST)
+ driver_flags |= V4L2_BUF_FLAG_LAST;
+
+ return driver_flags;
+}
+
+static bool iris_hfi_gen2_validate_packet_payload(struct iris_hfi_packet *pkt)
+{
+ u32 payload_size = 0;
+
+ switch (pkt->payload_info) {
+ case HFI_PAYLOAD_U32:
+ case HFI_PAYLOAD_S32:
+ case HFI_PAYLOAD_Q16:
+ case HFI_PAYLOAD_U32_ENUM:
+ case HFI_PAYLOAD_32_PACKED:
+ payload_size = 4;
+ break;
+ case HFI_PAYLOAD_U64:
+ case HFI_PAYLOAD_S64:
+ case HFI_PAYLOAD_64_PACKED:
+ payload_size = 8;
+ break;
+ case HFI_PAYLOAD_STRUCTURE:
+ if (pkt->type == HFI_CMD_BUFFER)
+ payload_size = sizeof(struct iris_hfi_buffer);
+ break;
+ default:
+ payload_size = 0;
+ break;
+ }
+
+ if (pkt->size < sizeof(struct iris_hfi_packet) + payload_size)
+ return false;
+
+ return true;
+}
+
+static int iris_hfi_gen2_validate_packet(u8 *response_pkt, u8 *core_resp_pkt)
+{
+ u8 *response_limit = core_resp_pkt + IFACEQ_CORE_PKT_SIZE;
+ u32 response_pkt_size = *(u32 *)response_pkt;
+
+ if (!response_pkt_size)
+ return -EINVAL;
+
+ if (response_pkt_size < sizeof(struct iris_hfi_packet))
+ return -EINVAL;
+
+ if (response_pkt + response_pkt_size > response_limit)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iris_hfi_gen2_validate_hdr_packet(struct iris_core *core, struct iris_hfi_header *hdr)
+{
+ struct iris_hfi_packet *packet;
+ int ret;
+ u8 *pkt;
+ u32 i;
+
+ if (hdr->size < sizeof(*hdr) + sizeof(*packet))
+ return -EINVAL;
+
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+
+ for (i = 0; i < hdr->num_packets; i++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ ret = iris_hfi_gen2_validate_packet(pkt, core->response_packet);
+ if (ret)
+ return ret;
+
+ pkt += packet->size;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_session_info(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct iris_core *core = inst->core;
+ int ret = 0;
+ char *info;
+
+ switch (pkt->type) {
+ case HFI_INFO_UNSUPPORTED:
+ info = "unsupported";
+ break;
+ case HFI_INFO_DATA_CORRUPT:
+ info = "data corrupt";
+ inst_hfi_gen2->hfi_frame_info.data_corrupt = 1;
+ break;
+ case HFI_INFO_BUFFER_OVERFLOW:
+ info = "buffer overflow";
+ inst_hfi_gen2->hfi_frame_info.overflow = 1;
+ break;
+ case HFI_INFO_HFI_FLAG_DRAIN_LAST:
+ info = "drain last flag";
+ ret = iris_inst_sub_state_change_drain_last(inst);
+ break;
+ case HFI_INFO_HFI_FLAG_PSC_LAST:
+ info = "drc last flag";
+ ret = iris_inst_sub_state_change_drc_last(inst);
+ break;
+ default:
+ info = "unknown";
+ break;
+ }
+
+ dev_dbg(core->dev, "session info received %#x: %s\n",
+ pkt->type, info);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_error(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_core *core = inst->core;
+ char *error;
+
+ switch (pkt->type) {
+ case HFI_ERROR_MAX_SESSIONS:
+ error = "exceeded max sessions";
+ break;
+ case HFI_ERROR_UNKNOWN_SESSION:
+ error = "unknown session id";
+ break;
+ case HFI_ERROR_INVALID_STATE:
+ error = "invalid operation for current state";
+ break;
+ case HFI_ERROR_INSUFFICIENT_RESOURCES:
+ error = "insufficient resources";
+ break;
+ case HFI_ERROR_BUFFER_NOT_SET:
+ error = "internal buffers not set";
+ break;
+ case HFI_ERROR_FATAL:
+ error = "fatal error";
+ break;
+ case HFI_ERROR_STREAM_UNSUPPORTED:
+ error = "unsupported stream";
+ break;
+ default:
+ error = "unknown";
+ break;
+ }
+
+ dev_err(core->dev, "session error received %#x: %s\n", pkt->type, error);
+ iris_vb2_queue_error(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_error(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst *instance;
+
+ dev_err(core->dev, "received system error of type %#x\n", pkt->type);
+
+ core->state = IRIS_CORE_ERROR;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ iris_inst_change_state(instance, IRIS_INST_ERROR);
+ mutex_unlock(&core->lock);
+
+ schedule_delayed_work(&core->sys_error_handler, msecs_to_jiffies(10));
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_init(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ core->state = IRIS_CORE_ERROR;
+ return 0;
+ }
+
+ complete(&core->core_init_done);
+
+ return 0;
+}
+
+static void iris_hfi_gen2_handle_session_close(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return;
+ }
+
+ complete(&inst->completion);
+}
+
+static int iris_hfi_gen2_handle_input_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *buffer)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf;
+ bool found = false;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == buffer->index) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return -EINVAL;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+
+ buf->flags = iris_hfi_gen2_get_driver_buffer_flags(inst, buffer->flags);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_output_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *hfi_buffer)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *m2m_buffer, *n;
+ struct iris_buffer *buf;
+ bool found = false;
+ int ret;
+
+ if (hfi_buffer->flags & HFI_BUF_FW_FLAG_LAST) {
+ ret = iris_inst_sub_state_change_drain_last(inst);
+ if (ret)
+ return ret;
+ }
+
+ if (hfi_buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST) {
+ ret = iris_inst_sub_state_change_drc_last(inst);
+ if (ret)
+ return ret;
+ }
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, m2m_buffer, n) {
+ buf = to_iris_buffer(&m2m_buffer->vb);
+ if (buf->index == hfi_buffer->index &&
+ buf->device_addr == hfi_buffer->base_address &&
+ buf->data_offset == hfi_buffer->data_offset) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ return -EINVAL;
+
+ buf->data_offset = hfi_buffer->data_offset;
+ buf->data_size = hfi_buffer->data_size;
+ buf->timestamp = hfi_buffer->timestamp;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ buf->attr |= BUF_ATTR_DEQUEUED;
+
+ buf->flags = iris_hfi_gen2_get_driver_buffer_flags(inst, hfi_buffer->flags);
+
+ return 0;
+}
+
+static void iris_hfi_gen2_handle_dequeue_buffers(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf = NULL;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEQUEUED) {
+ buf->attr &= ~BUF_ATTR_DEQUEUED;
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEQUEUED) {
+ buf->attr &= ~BUF_ATTR_DEQUEUED;
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+}
+
+static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
+ struct iris_hfi_buffer *buffer)
+{
+ u32 buf_type = iris_hfi_gen2_buf_type_to_driver(buffer->type);
+ struct iris_buffers *buffers = &inst->buffers[buf_type];
+ struct iris_buffer *buf, *iter;
+ bool found = false;
+ int ret = 0;
+
+ list_for_each_entry(iter, &buffers->list, list) {
+ if (iter->device_addr == buffer->base_address) {
+ found = true;
+ buf = iter;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ buf->attr &= ~BUF_ATTR_QUEUED;
+ if (buf->attr & BUF_ATTR_PENDING_RELEASE)
+ ret = iris_destroy_internal_buffer(inst, buf);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_stop(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ if (pkt->port == HFI_PORT_RAW)
+ ret = iris_inst_sub_state_change_pause(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ else if (pkt->port == HFI_PORT_BITSTREAM)
+ ret = iris_inst_sub_state_change_pause(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ complete(&inst->completion);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_buffer(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_hfi_buffer *buffer;
+
+ if (pkt->payload_info == HFI_PAYLOAD_NONE)
+ return 0;
+
+ if (!iris_hfi_gen2_validate_packet_payload(pkt)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return 0;
+ }
+
+ buffer = (struct iris_hfi_buffer *)((u8 *)pkt + sizeof(*pkt));
+ if (!iris_hfi_gen2_is_valid_hfi_buffer_type(buffer->type))
+ return 0;
+
+ if (!iris_hfi_gen2_is_valid_hfi_port(pkt->port, buffer->type))
+ return 0;
+
+ if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+}
+
+static int iris_hfi_gen2_handle_session_drain(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return 0;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN)
+ ret = iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_INPUT_PAUSE);
+
+ return ret;
+}
+
+static void iris_hfi_gen2_read_input_subcr_params(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 primaries, matrix_coeff, transfer_char;
+ struct hfi_subscription_params subsc_params;
+ u32 colour_description_present_flag;
+ u32 video_signal_type_present_flag;
+ struct iris_core *core = inst->core;
+ u32 full_range, width, height;
+ struct vb2_queue *dst_q;
+ struct v4l2_ctrl *ctrl;
+
+ subsc_params = inst_hfi_gen2->src_subcr_params;
+ width = (subsc_params.bitstream_resolution &
+ HFI_BITMASK_BITSTREAM_WIDTH) >> 16;
+ height = subsc_params.bitstream_resolution &
+ HFI_BITMASK_BITSTREAM_HEIGHT;
+
+ pixmp_ip->width = width;
+ pixmp_ip->height = height;
+
+ pixmp_op->width = ALIGN(width, 128);
+ pixmp_op->height = ALIGN(height, 32);
+ pixmp_op->plane_fmt[0].bytesperline = ALIGN(width, 128);
+ pixmp_op->plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ matrix_coeff = subsc_params.color_info & 0xFF;
+ transfer_char = (subsc_params.color_info & 0xFF00) >> 8;
+ primaries = (subsc_params.color_info & 0xFF0000) >> 16;
+ colour_description_present_flag =
+ (subsc_params.color_info & 0x1000000) >> 24;
+ full_range = (subsc_params.color_info & 0x2000000) >> 25;
+ video_signal_type_present_flag =
+ (subsc_params.color_info & 0x20000000) >> 29;
+
+ pixmp_op->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pixmp_op->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ pixmp_op->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pixmp_op->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ if (video_signal_type_present_flag) {
+ pixmp_op->quantization =
+ full_range ?
+ V4L2_QUANTIZATION_FULL_RANGE :
+ V4L2_QUANTIZATION_LIM_RANGE;
+ if (colour_description_present_flag) {
+ pixmp_op->colorspace =
+ iris_hfi_get_v4l2_color_primaries(primaries);
+ pixmp_op->xfer_func =
+ iris_hfi_get_v4l2_transfer_char(transfer_char);
+ pixmp_op->ycbcr_enc =
+ iris_hfi_get_v4l2_matrix_coefficients(matrix_coeff);
+ }
+ }
+
+ pixmp_ip->colorspace = pixmp_op->colorspace;
+ pixmp_ip->xfer_func = pixmp_op->xfer_func;
+ pixmp_ip->ycbcr_enc = pixmp_op->ycbcr_enc;
+ pixmp_ip->quantization = pixmp_op->quantization;
+
+ inst->crop.top = subsc_params.crop_offsets[0] & 0xFFFF;
+ inst->crop.left = (subsc_params.crop_offsets[0] >> 16) & 0xFFFF;
+ inst->crop.height = pixmp_ip->height -
+ (subsc_params.crop_offsets[1] & 0xFFFF) - inst->crop.top;
+ inst->crop.width = pixmp_ip->width -
+ ((subsc_params.crop_offsets[1] >> 16) & 0xFFFF) - inst->crop.left;
+
+ inst->fw_caps[PROFILE].value = subsc_params.profile;
+ inst->fw_caps[LEVEL].value = subsc_params.level;
+ inst->fw_caps[POC].value = subsc_params.pic_order_cnt;
+
+ if (subsc_params.bit_depth != BIT_DEPTH_8 ||
+ !(subsc_params.coded_frames & HFI_BITMASK_FRAME_MBS_ONLY_FLAG)) {
+ dev_err(core->dev, "unsupported content, bit depth: %x, pic_struct = %x\n",
+ subsc_params.bit_depth, subsc_params.coded_frames);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+
+ inst->fw_min_count = subsc_params.fw_min_count;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = pixmp_op->plane_fmt[0].sizeimage;
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE);
+ if (ctrl)
+ v4l2_ctrl_s_ctrl(ctrl, inst->buffers[BUF_OUTPUT].min_count);
+
+ dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ dst_q->min_reqbufs_allocation = inst->buffers[BUF_OUTPUT].min_count;
+}
+
+static int iris_hfi_gen2_handle_src_change(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret;
+
+ if (pkt->port != HFI_PORT_BITSTREAM)
+ return 0;
+
+ ret = iris_inst_sub_state_change_drc(inst);
+ if (ret)
+ return ret;
+
+ iris_hfi_gen2_read_input_subcr_params(inst);
+ iris_vdec_src_change(inst);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_session_command(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ int ret = 0;
+
+ switch (pkt->type) {
+ case HFI_CMD_CLOSE:
+ iris_hfi_gen2_handle_session_close(inst, pkt);
+ break;
+ case HFI_CMD_STOP:
+ iris_hfi_gen2_handle_session_stop(inst, pkt);
+ break;
+ case HFI_CMD_BUFFER:
+ ret = iris_hfi_gen2_handle_session_buffer(inst, pkt);
+ break;
+ case HFI_CMD_SETTINGS_CHANGE:
+ ret = iris_hfi_gen2_handle_src_change(inst, pkt);
+ break;
+ case HFI_CMD_DRAIN:
+ ret = iris_hfi_gen2_handle_session_drain(inst, pkt);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_session_property(struct iris_inst *inst,
+ struct iris_hfi_packet *pkt)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+
+ if (pkt->port != HFI_PORT_BITSTREAM)
+ return 0;
+
+ if (pkt->flags & HFI_FW_FLAGS_INFORMATION)
+ return 0;
+
+ switch (pkt->type) {
+ case HFI_PROP_BITSTREAM_RESOLUTION:
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = pkt->payload[0];
+ break;
+ case HFI_PROP_CROP_OFFSETS:
+ inst_hfi_gen2->src_subcr_params.crop_offsets[0] = pkt->payload[0];
+ inst_hfi_gen2->src_subcr_params.crop_offsets[1] = pkt->payload[1];
+ break;
+ case HFI_PROP_CODED_FRAMES:
+ inst_hfi_gen2->src_subcr_params.coded_frames = pkt->payload[0];
+ break;
+ case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
+ inst_hfi_gen2->src_subcr_params.fw_min_count = pkt->payload[0];
+ break;
+ case HFI_PROP_PIC_ORDER_CNT_TYPE:
+ inst_hfi_gen2->src_subcr_params.pic_order_cnt = pkt->payload[0];
+ break;
+ case HFI_PROP_SIGNAL_COLOR_INFO:
+ inst_hfi_gen2->src_subcr_params.color_info = pkt->payload[0];
+ break;
+ case HFI_PROP_PROFILE:
+ inst_hfi_gen2->src_subcr_params.profile = pkt->payload[0];
+ break;
+ case HFI_PROP_LEVEL:
+ inst_hfi_gen2->src_subcr_params.level = pkt->payload[0];
+ break;
+ case HFI_PROP_PICTURE_TYPE:
+ inst_hfi_gen2->hfi_frame_info.picture_type = pkt->payload[0];
+ break;
+ case HFI_PROP_NO_OUTPUT:
+ inst_hfi_gen2->hfi_frame_info.no_output = 1;
+ break;
+ case HFI_PROP_QUALITY_MODE:
+ case HFI_PROP_STAGE:
+ case HFI_PROP_PIPE:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_image_version_property(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ u8 *str_image_version = (u8 *)pkt + sizeof(*pkt);
+ u32 req_bytes = pkt->size - sizeof(*pkt);
+ char fw_version[IRIS_FW_VERSION_LENGTH];
+ u32 i;
+
+ if (req_bytes < IRIS_FW_VERSION_LENGTH - 1)
+ return -EINVAL;
+
+ for (i = 0; i < IRIS_FW_VERSION_LENGTH - 1; i++) {
+ if (str_image_version[i] != '\0')
+ fw_version[i] = str_image_version[i];
+ else
+ fw_version[i] = ' ';
+ }
+ fw_version[i] = '\0';
+ dev_dbg(core->dev, "firmware version: %s\n", fw_version);
+
+ return 0;
+}
+
+static int iris_hfi_gen2_handle_system_property(struct iris_core *core,
+ struct iris_hfi_packet *pkt)
+{
+ switch (pkt->type) {
+ case HFI_PROP_IMAGE_VERSION:
+ return iris_hfi_gen2_handle_image_version_property(core, pkt);
+ default:
+ return 0;
+ }
+}
+
+static int iris_hfi_gen2_handle_system_response(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u8 *start_pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ struct iris_hfi_packet *packet;
+ u32 i, j;
+ u8 *pkt;
+ int ret;
+ static const struct iris_hfi_gen2_core_hfi_range range[] = {
+ {HFI_SYSTEM_ERROR_BEGIN, HFI_SYSTEM_ERROR_END, iris_hfi_gen2_handle_system_error },
+ {HFI_PROP_BEGIN, HFI_PROP_END, iris_hfi_gen2_handle_system_property },
+ {HFI_CMD_BEGIN, HFI_CMD_END, iris_hfi_gen2_handle_system_init },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(range); i++) {
+ pkt = start_pkt;
+ for (j = 0; j < hdr->num_packets; j++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->flags & HFI_FW_FLAGS_SYSTEM_ERROR) {
+ ret = iris_hfi_gen2_handle_system_error(core, packet);
+ return ret;
+ }
+
+ if (packet->type > range[i].begin && packet->type < range[i].end) {
+ ret = range[i].handle(core, packet);
+ if (ret)
+ return ret;
+
+ if (packet->type > HFI_SYSTEM_ERROR_BEGIN &&
+ packet->type < HFI_SYSTEM_ERROR_END)
+ return 0;
+ }
+ pkt += packet->size;
+ }
+ }
+
+ return 0;
+}
+
+static void iris_hfi_gen2_init_src_change_param(struct iris_inst *inst)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ struct v4l2_pix_format_mplane *pixmp_ip = &inst->fmt_src->fmt.pix_mp;
+ struct v4l2_pix_format_mplane *pixmp_op = &inst->fmt_dst->fmt.pix_mp;
+ u32 bottom_offset = (pixmp_ip->height - inst->crop.height);
+ u32 right_offset = (pixmp_ip->width - inst->crop.width);
+ struct hfi_subscription_params *subsc_params;
+ u32 primaries, matrix_coeff, transfer_char;
+ u32 colour_description_present_flag = 0;
+ u32 video_signal_type_present_flag = 0;
+ u32 full_range, video_format = 0;
+ u32 left_offset = inst->crop.left;
+ u32 top_offset = inst->crop.top;
+
+ subsc_params = &inst_hfi_gen2->src_subcr_params;
+ subsc_params->bitstream_resolution =
+ pixmp_ip->width << 16 | pixmp_ip->height;
+ subsc_params->crop_offsets[0] =
+ left_offset << 16 | top_offset;
+ subsc_params->crop_offsets[1] =
+ right_offset << 16 | bottom_offset;
+ subsc_params->fw_min_count = inst->buffers[BUF_OUTPUT].min_count;
+
+ primaries = iris_hfi_gen2_get_color_primaries(pixmp_op->colorspace);
+ matrix_coeff = iris_hfi_gen2_get_matrix_coefficients(pixmp_op->ycbcr_enc);
+ transfer_char = iris_hfi_gen2_get_transfer_char(pixmp_op->xfer_func);
+ full_range = pixmp_op->quantization == V4L2_QUANTIZATION_FULL_RANGE ? 1 : 0;
+ subsc_params->color_info =
+ iris_hfi_gen2_get_color_info(matrix_coeff, transfer_char, primaries,
+ colour_description_present_flag,
+ full_range, video_format,
+ video_signal_type_present_flag);
+
+ subsc_params->profile = inst->fw_caps[PROFILE].value;
+ subsc_params->level = inst->fw_caps[LEVEL].value;
+ subsc_params->pic_order_cnt = inst->fw_caps[POC].value;
+ subsc_params->bit_depth = inst->fw_caps[BIT_DEPTH].value;
+ if (inst->fw_caps[CODED_FRAMES].value ==
+ CODED_FRAMES_PROGRESSIVE)
+ subsc_params->coded_frames = HFI_BITMASK_FRAME_MBS_ONLY_FLAG;
+ else
+ subsc_params->coded_frames = 0;
+}
+
+static int iris_hfi_gen2_handle_session_response(struct iris_core *core,
+ struct iris_hfi_header *hdr)
+{
+ u8 *pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2;
+ struct iris_hfi_packet *packet;
+ struct iris_inst *inst;
+ bool dequeue = false;
+ int ret = 0;
+ u32 i, j;
+ static const struct iris_hfi_gen2_inst_hfi_range range[] = {
+ {HFI_SESSION_ERROR_BEGIN, HFI_SESSION_ERROR_END,
+ iris_hfi_gen2_handle_session_error},
+ {HFI_INFORMATION_BEGIN, HFI_INFORMATION_END,
+ iris_hfi_gen2_handle_session_info},
+ {HFI_PROP_BEGIN, HFI_PROP_END,
+ iris_hfi_gen2_handle_session_property},
+ {HFI_CMD_BEGIN, HFI_CMD_END,
+ iris_hfi_gen2_handle_session_command },
+ };
+
+ inst = iris_get_instance(core, hdr->session_id);
+ if (!inst)
+ return -EINVAL;
+
+ mutex_lock(&inst->lock);
+ inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ memset(&inst_hfi_gen2->hfi_frame_info, 0, sizeof(struct iris_hfi_frame_info));
+
+ for (i = 0; i < hdr->num_packets; i++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->type == HFI_CMD_SETTINGS_CHANGE) {
+ if (packet->port == HFI_PORT_BITSTREAM) {
+ iris_hfi_gen2_init_src_change_param(inst);
+ break;
+ }
+ }
+ pkt += packet->size;
+ }
+
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ for (i = 0; i < ARRAY_SIZE(range); i++) {
+ pkt = (u8 *)((u8 *)hdr + sizeof(*hdr));
+ for (j = 0; j < hdr->num_packets; j++) {
+ packet = (struct iris_hfi_packet *)pkt;
+ if (packet->flags & HFI_FW_FLAGS_SESSION_ERROR)
+ iris_hfi_gen2_handle_session_error(inst, packet);
+
+ if (packet->type > range[i].begin && packet->type < range[i].end) {
+ dequeue |= (packet->type == HFI_CMD_BUFFER);
+ ret = range[i].handle(inst, packet);
+ if (ret)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ }
+ pkt += packet->size;
+ }
+ }
+
+ if (dequeue)
+ iris_hfi_gen2_handle_dequeue_buffers(inst);
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_hfi_gen2_handle_response(struct iris_core *core, void *response)
+{
+ struct iris_hfi_header *hdr = (struct iris_hfi_header *)response;
+ int ret;
+
+ ret = iris_hfi_gen2_validate_hdr_packet(core, hdr);
+ if (ret)
+ return iris_hfi_gen2_handle_system_error(core, NULL);
+
+ if (!hdr->session_id)
+ return iris_hfi_gen2_handle_system_response(core, hdr);
+ else
+ return iris_hfi_gen2_handle_session_response(core, hdr);
+}
+
+static void iris_hfi_gen2_flush_debug_queue(struct iris_core *core, u8 *packet)
+{
+ struct hfi_debug_header *pkt;
+ u8 *log;
+
+ while (!iris_hfi_queue_dbg_read(core, packet)) {
+ pkt = (struct hfi_debug_header *)packet;
+
+ if (pkt->size < sizeof(*pkt))
+ continue;
+
+ if (pkt->size >= IFACEQ_CORE_PKT_SIZE)
+ continue;
+
+ packet[pkt->size] = '\0';
+ log = (u8 *)packet + sizeof(*pkt) + 1;
+ dev_dbg(core->dev, "%s", log);
+ }
+}
+
+static void iris_hfi_gen2_response_handler(struct iris_core *core)
+{
+ if (iris_vpu_watchdog(core, core->intr_status)) {
+ struct iris_hfi_packet pkt = {.type = HFI_SYS_ERROR_WD_TIMEOUT};
+
+ dev_err(core->dev, "cpu watchdog error received\n");
+ core->state = IRIS_CORE_ERROR;
+ iris_hfi_gen2_handle_system_error(core, &pkt);
+
+ return;
+ }
+
+ memset(core->response_packet, 0, sizeof(struct iris_hfi_header));
+ while (!iris_hfi_queue_msg_read(core, core->response_packet)) {
+ iris_hfi_gen2_handle_response(core, core->response_packet);
+ memset(core->response_packet, 0, sizeof(struct iris_hfi_header));
+ }
+
+ iris_hfi_gen2_flush_debug_queue(core, core->response_packet);
+}
+
+static const struct iris_hfi_response_ops iris_hfi_gen2_response_ops = {
+ .hfi_response_handler = iris_hfi_gen2_response_handler,
+};
+
+void iris_hfi_gen2_response_ops_init(struct iris_core *core)
+{
+ core->hfi_response_ops = &iris_hfi_gen2_response_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
new file mode 100644
index 000000000000..fac7df0c4d1a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "iris_core.h"
+#include "iris_hfi_queue.h"
+#include "iris_vpu_common.h"
+
+static int iris_hfi_queue_write(struct iris_iface_q_info *qinfo, void *packet, u32 packet_size)
+{
+ struct iris_hfi_queue_header *queue = qinfo->qhdr;
+ u32 write_idx = queue->write_idx * sizeof(u32);
+ u32 read_idx = queue->read_idx * sizeof(u32);
+ u32 empty_space, new_write_idx, residue;
+ u32 *write_ptr;
+
+ if (write_idx < read_idx)
+ empty_space = read_idx - write_idx;
+ else
+ empty_space = IFACEQ_QUEUE_SIZE - (write_idx - read_idx);
+ if (empty_space < packet_size)
+ return -ENOSPC;
+
+ queue->tx_req = 0;
+
+ new_write_idx = write_idx + packet_size;
+ write_ptr = (u32 *)((u8 *)qinfo->kernel_vaddr + write_idx);
+
+ if (write_ptr < (u32 *)qinfo->kernel_vaddr ||
+ write_ptr > (u32 *)(qinfo->kernel_vaddr +
+ IFACEQ_QUEUE_SIZE))
+ return -EINVAL;
+
+ if (new_write_idx < IFACEQ_QUEUE_SIZE) {
+ memcpy(write_ptr, packet, packet_size);
+ } else {
+ residue = new_write_idx - IFACEQ_QUEUE_SIZE;
+ memcpy(write_ptr, packet, (packet_size - residue));
+ memcpy(qinfo->kernel_vaddr,
+ packet + (packet_size - residue), residue);
+ new_write_idx = residue;
+ }
+
+ /* Make sure packet is written before updating the write index */
+ mb();
+ queue->write_idx = new_write_idx / sizeof(u32);
+
+ /* Make sure write index is updated before an interrupt is raised */
+ mb();
+
+ return 0;
+}
+
+static int iris_hfi_queue_read(struct iris_iface_q_info *qinfo, void *packet)
+{
+ struct iris_hfi_queue_header *queue = qinfo->qhdr;
+ u32 write_idx = queue->write_idx * sizeof(u32);
+ u32 read_idx = queue->read_idx * sizeof(u32);
+ u32 packet_size, receive_request = 0;
+ u32 new_read_idx, residue;
+ u32 *read_ptr;
+ int ret = 0;
+
+ if (queue->queue_type == IFACEQ_MSGQ_ID)
+ receive_request = 1;
+
+ if (read_idx == write_idx) {
+ queue->rx_req = receive_request;
+ /* Ensure qhdr is updated in main memory */
+ mb();
+ return -ENODATA;
+ }
+
+ read_ptr = qinfo->kernel_vaddr + read_idx;
+ if (read_ptr < (u32 *)qinfo->kernel_vaddr ||
+ read_ptr > (u32 *)(qinfo->kernel_vaddr +
+ IFACEQ_QUEUE_SIZE - sizeof(*read_ptr)))
+ return -ENODATA;
+
+ packet_size = *read_ptr;
+ if (!packet_size)
+ return -EINVAL;
+
+ new_read_idx = read_idx + packet_size;
+ if (packet_size <= IFACEQ_CORE_PKT_SIZE) {
+ if (new_read_idx < IFACEQ_QUEUE_SIZE) {
+ memcpy(packet, read_ptr, packet_size);
+ } else {
+ residue = new_read_idx - IFACEQ_QUEUE_SIZE;
+ memcpy(packet, read_ptr, (packet_size - residue));
+ memcpy((packet + (packet_size - residue)),
+ qinfo->kernel_vaddr, residue);
+ new_read_idx = residue;
+ }
+ } else {
+ new_read_idx = write_idx;
+ ret = -EBADMSG;
+ }
+
+ queue->rx_req = receive_request;
+
+ queue->read_idx = new_read_idx / sizeof(u32);
+ /* Ensure qhdr is updated in main memory */
+ mb();
+
+ return ret;
+}
+
+int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size)
+{
+ struct iris_iface_q_info *q_info = &core->command_queue;
+
+ if (core->state == IRIS_CORE_ERROR)
+ return -EINVAL;
+
+ if (!iris_hfi_queue_write(q_info, pkt, pkt_size)) {
+ iris_vpu_raise_interrupt(core);
+ } else {
+ dev_err(core->dev, "queue full\n");
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ goto exit;
+
+ mutex_lock(&core->lock);
+ ret = iris_hfi_queue_cmd_write_locked(core, pkt, pkt_size);
+ if (ret) {
+ mutex_unlock(&core->lock);
+ goto exit;
+ }
+ mutex_unlock(&core->lock);
+
+ pm_runtime_mark_last_busy(core->dev);
+ pm_runtime_put_autosuspend(core->dev);
+
+ return 0;
+
+exit:
+ pm_runtime_put_sync(core->dev);
+
+ return ret;
+}
+
+int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt)
+{
+ struct iris_iface_q_info *q_info = &core->message_queue;
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (iris_hfi_queue_read(q_info, pkt)) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt)
+{
+ struct iris_iface_q_info *q_info = &core->debug_queue;
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (iris_hfi_queue_read(q_info, pkt)) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static void iris_hfi_queue_set_header(struct iris_core *core, u32 queue_id,
+ struct iris_iface_q_info *iface_q)
+{
+ iface_q->qhdr->status = 0x1;
+ iface_q->qhdr->start_addr = iface_q->device_addr;
+ iface_q->qhdr->header_type = IFACEQ_DFLT_QHDR;
+ iface_q->qhdr->queue_type = queue_id;
+ iface_q->qhdr->q_size = IFACEQ_QUEUE_SIZE / sizeof(u32);
+ iface_q->qhdr->pkt_size = 0; /* variable packet size */
+ iface_q->qhdr->rx_wm = 0x1;
+ iface_q->qhdr->tx_wm = 0x1;
+ iface_q->qhdr->rx_req = 0x1;
+ iface_q->qhdr->tx_req = 0x0;
+ iface_q->qhdr->rx_irq_status = 0x0;
+ iface_q->qhdr->tx_irq_status = 0x0;
+ iface_q->qhdr->read_idx = 0x0;
+ iface_q->qhdr->write_idx = 0x0;
+
+ /*
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from video hardware for debug messages
+ */
+ if (queue_id == IFACEQ_DBGQ_ID)
+ iface_q->qhdr->rx_req = 0;
+}
+
+static void
+iris_hfi_queue_init(struct iris_core *core, u32 queue_id, struct iris_iface_q_info *iface_q)
+{
+ struct iris_hfi_queue_table_header *q_tbl_hdr = core->iface_q_table_vaddr;
+ u32 offset = sizeof(*q_tbl_hdr) + (queue_id * IFACEQ_QUEUE_SIZE);
+
+ iface_q->device_addr = core->iface_q_table_daddr + offset;
+ iface_q->kernel_vaddr =
+ (void *)((char *)core->iface_q_table_vaddr + offset);
+ iface_q->qhdr = &q_tbl_hdr->q_hdr[queue_id];
+
+ iris_hfi_queue_set_header(core, queue_id, iface_q);
+}
+
+static void iris_hfi_queue_deinit(struct iris_iface_q_info *iface_q)
+{
+ iface_q->qhdr = NULL;
+ iface_q->kernel_vaddr = NULL;
+ iface_q->device_addr = 0;
+}
+
+int iris_hfi_queues_init(struct iris_core *core)
+{
+ struct iris_hfi_queue_table_header *q_tbl_hdr;
+ u32 queue_size;
+
+ /* Iris hardware requires 4K queue alignment */
+ queue_size = ALIGN((sizeof(*q_tbl_hdr) + (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ)), SZ_4K);
+ core->iface_q_table_vaddr = dma_alloc_attrs(core->dev, queue_size,
+ &core->iface_q_table_daddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!core->iface_q_table_vaddr) {
+ dev_err(core->dev, "queues alloc and map failed\n");
+ return -ENOMEM;
+ }
+
+ core->sfr_vaddr = dma_alloc_attrs(core->dev, SFR_SIZE,
+ &core->sfr_daddr,
+ GFP_KERNEL, DMA_ATTR_WRITE_COMBINE);
+ if (!core->sfr_vaddr) {
+ dev_err(core->dev, "sfr alloc and map failed\n");
+ dma_free_attrs(core->dev, sizeof(*q_tbl_hdr), core->iface_q_table_vaddr,
+ core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
+ return -ENOMEM;
+ }
+
+ iris_hfi_queue_init(core, IFACEQ_CMDQ_ID, &core->command_queue);
+ iris_hfi_queue_init(core, IFACEQ_MSGQ_ID, &core->message_queue);
+ iris_hfi_queue_init(core, IFACEQ_DBGQ_ID, &core->debug_queue);
+
+ q_tbl_hdr = (struct iris_hfi_queue_table_header *)core->iface_q_table_vaddr;
+ q_tbl_hdr->version = 0;
+ q_tbl_hdr->device_addr = (void *)core;
+ strscpy(q_tbl_hdr->name, "iris-hfi-queues", sizeof(q_tbl_hdr->name));
+ q_tbl_hdr->size = sizeof(*q_tbl_hdr);
+ q_tbl_hdr->qhdr0_offset = sizeof(*q_tbl_hdr) -
+ (IFACEQ_NUMQ * sizeof(struct iris_hfi_queue_header));
+ q_tbl_hdr->qhdr_size = sizeof(q_tbl_hdr->q_hdr[0]);
+ q_tbl_hdr->num_q = IFACEQ_NUMQ;
+ q_tbl_hdr->num_active_q = IFACEQ_NUMQ;
+
+ /* Write sfr size in first word to be used by firmware */
+ *((u32 *)core->sfr_vaddr) = SFR_SIZE;
+
+ return 0;
+}
+
+void iris_hfi_queues_deinit(struct iris_core *core)
+{
+ u32 queue_size;
+
+ if (!core->iface_q_table_vaddr)
+ return;
+
+ iris_hfi_queue_deinit(&core->debug_queue);
+ iris_hfi_queue_deinit(&core->message_queue);
+ iris_hfi_queue_deinit(&core->command_queue);
+
+ dma_free_attrs(core->dev, SFR_SIZE, core->sfr_vaddr,
+ core->sfr_daddr, DMA_ATTR_WRITE_COMBINE);
+
+ core->sfr_vaddr = NULL;
+ core->sfr_daddr = 0;
+
+ queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
+ (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
+
+ dma_free_attrs(core->dev, queue_size, core->iface_q_table_vaddr,
+ core->iface_q_table_daddr, DMA_ATTR_WRITE_COMBINE);
+
+ core->iface_q_table_vaddr = NULL;
+ core->iface_q_table_daddr = 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.h b/drivers/media/platform/qcom/iris/iris_hfi_queue.h
new file mode 100644
index 000000000000..2174fc5ce618
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_HFI_QUEUE_H__
+#define __IRIS_HFI_QUEUE_H__
+
+struct iris_core;
+
+/*
+ * Max 64 Buffers ( 32 input buffers and 32 output buffers)
+ * can be queued by v4l2 framework at any given time.
+ */
+#define IFACEQ_MAX_BUF_COUNT 64
+/*
+ * Max session supported are 16.
+ * this value is used to calcualte the size of
+ * individual shared queue.
+ */
+#define IFACE_MAX_PARALLEL_SESSIONS 16
+#define IFACEQ_DFLT_QHDR 0x0101
+#define IFACEQ_MAX_PKT_SIZE 1024 /* Maximum size of a packet in the queue */
+
+/*
+ * SFR: Subsystem Failure Reason
+ * when hardware goes into bad state/failure, firmware fills this memory
+ * and driver will get to know the actual failure reason from this SFR buffer.
+ */
+#define SFR_SIZE SZ_4K /* Iris hardware requires 4K queue alignment */
+
+#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
+ IFACEQ_MAX_BUF_COUNT * IFACE_MAX_PARALLEL_SESSIONS)
+
+/*
+ * Memory layout of the shared queues:
+ *
+ * ||=================|| ^ ^ ^
+ * || || | | |
+ * || Queue Table || 288 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V | |
+ * ||-----------------|| ^ | |
+ * || || | | |
+ * || Command Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V 456 Bytes |
+ * ||-----------------|| ^ | |
+ * || || | | |
+ * || Message Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||-----------------|| V | Buffer size aligned to 4k
+ * ||-----------------|| ^ | Overall Queue Size = 2,404 KB
+ * || || | | |
+ * || Debug Queue || 56 Bytes | |
+ * || Header || | | |
+ * || || | | |
+ * ||=================|| V V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Command || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Message || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * ||=================|| ^ |
+ * || || | |
+ * || Debug || 800 KB |
+ * || Queue || | |
+ * || || | |
+ * ||=================|| V |
+ * || || |
+ * ||=================|| V
+ */
+
+/*
+ * Shared queues are used for communication between driver and firmware.
+ * There are 3 types of queues:
+ * Command queue - driver to write any command to firmware.
+ * Message queue - firmware to send any response to driver.
+ * Debug queue - firmware to write debug message.
+ */
+
+/* Host-firmware shared queue ids */
+enum iris_iface_queue {
+ IFACEQ_CMDQ_ID,
+ IFACEQ_MSGQ_ID,
+ IFACEQ_DBGQ_ID,
+ IFACEQ_NUMQ, /* not an index */
+};
+
+/**
+ * struct iris_hfi_queue_header
+ *
+ * @status: Queue status, bits (7:0), 0x1 - active, 0x0 - inactive
+ * @start_addr: Queue start address in non cached memory
+ * @queue_type: Queue ID
+ * @header_type: Default queue header
+ * @q_size: Queue size
+ * Number of queue packets if pkt_size is non-zero
+ * Queue size in bytes if pkt_size is zero
+ * @pkt_size: Size of queue packet entries
+ * 0x0: variable queue packet size
+ * non zero: size of queue packet entry, fixed
+ * @pkt_drop_cnt: Number of packets dropped by sender
+ * @rx_wm: Receiver watermark, applicable in event driven mode
+ * @tx_wm: Sender watermark, applicable in event driven mode
+ * @rx_req: Receiver sets this bit if queue is empty
+ * @tx_req: Sender sets this bit if queue is full
+ * @rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ * the sender after packets are dequeued. Sender clears this bit
+ * @tx_irq_status: Sender sets this bit and triggers an interrupt to
+ * the receiver after packets are queued. Receiver clears this bit
+ * @read_idx: Index till where receiver has consumed the packets from the queue.
+ * @write_idx: Index till where sender has written the packets into the queue.
+ */
+struct iris_hfi_queue_header {
+ u32 status;
+ u32 start_addr;
+ u16 queue_type;
+ u16 header_type;
+ u32 q_size;
+ u32 pkt_size;
+ u32 pkt_drop_cnt;
+ u32 rx_wm;
+ u32 tx_wm;
+ u32 rx_req;
+ u32 tx_req;
+ u32 rx_irq_status;
+ u32 tx_irq_status;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+/**
+ * struct iris_hfi_queue_table_header
+ *
+ * @version: Queue table version number
+ * @size: Queue table size from version to last parametr in qhdr entry
+ * @qhdr0_offset: Offset to the start of first qhdr
+ * @qhdr_size: Queue header size in bytes
+ * @num_q: Total number of queues in Queue table
+ * @num_active_q: Total number of active queues
+ * @device_addr: Device address of the queue
+ * @name: Queue name in characters
+ * @q_hdr: Array of queue headers
+ */
+struct iris_hfi_queue_table_header {
+ u32 version;
+ u32 size;
+ u32 qhdr0_offset;
+ u32 qhdr_size;
+ u32 num_q;
+ u32 num_active_q;
+ void *device_addr;
+ char name[256]; /* NUL-terminated array of characters */
+ struct iris_hfi_queue_header q_hdr[IFACEQ_NUMQ];
+};
+
+struct iris_iface_q_info {
+ struct iris_hfi_queue_header *qhdr;
+ dma_addr_t device_addr;
+ void *kernel_vaddr;
+};
+
+int iris_hfi_queues_init(struct iris_core *core);
+void iris_hfi_queues_deinit(struct iris_core *core);
+
+int iris_hfi_queue_cmd_write_locked(struct iris_core *core, void *pkt, u32 pkt_size);
+int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size);
+int iris_hfi_queue_msg_read(struct iris_core *core, void *pkt);
+int iris_hfi_queue_dbg_read(struct iris_core *core, void *pkt);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_instance.h b/drivers/media/platform/qcom/iris/iris_instance.h
new file mode 100644
index 000000000000..caa3c6507006
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_instance.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_INSTANCE_H__
+#define __IRIS_INSTANCE_H__
+
+#include <media/v4l2-ctrls.h>
+
+#include "iris_buffer.h"
+#include "iris_core.h"
+#include "iris_utils.h"
+
+/**
+ * struct iris_inst - holds per video instance parameters
+ *
+ * @list: used for attach an instance to the core
+ * @core: pointer to core structure
+ * @session_id: id of current video session
+ * @ctx_q_lock: lock to serialize queues related ioctls
+ * @lock: lock to seralise forward and reverse threads
+ * @fh: reference of v4l2 file handler
+ * @fmt_src: structure of v4l2_format for source
+ * @fmt_dst: structure of v4l2_format for destination
+ * @ctrl_handler: reference of v4l2 ctrl handler
+ * @crop: structure of crop info
+ * @completion: structure of signal completions
+ * @flush_completion: structure of signal completions for flush cmd
+ * @fw_caps: array of supported instance firmware capabilities
+ * @buffers: array of different iris buffers
+ * @fw_min_count: minimnum count of buffers needed by fw
+ * @state: instance state
+ * @sub_state: instance sub state
+ * @once_per_session_set: boolean to set once per session property
+ * @max_input_data_size: max size of input data
+ * @power: structure of power info
+ * @icc_data: structure of interconnect data
+ * @m2m_dev: a reference to m2m device structure
+ * @m2m_ctx: a reference to m2m context structure
+ * @sequence_cap: a sequence counter for capture queue
+ * @sequence_out: a sequence counter for output queue
+ * @tss: timestamp metadata
+ * @metadata_idx: index for metadata buffer
+ */
+
+struct iris_inst {
+ struct list_head list;
+ struct iris_core *core;
+ u32 session_id;
+ struct mutex ctx_q_lock;/* lock to serialize queues related ioctls */
+ struct mutex lock; /* lock to serialize forward and reverse threads */
+ struct v4l2_fh fh;
+ struct v4l2_format *fmt_src;
+ struct v4l2_format *fmt_dst;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct iris_hfi_rect_desc crop;
+ struct completion completion;
+ struct completion flush_completion;
+ struct platform_inst_fw_cap fw_caps[INST_FW_CAP_MAX];
+ struct iris_buffers buffers[BUF_TYPE_MAX];
+ u32 fw_min_count;
+ enum iris_inst_state state;
+ enum iris_inst_sub_state sub_state;
+ bool once_per_session_set;
+ size_t max_input_data_size;
+ struct iris_inst_power power;
+ struct icc_vote_data icc_data;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ u32 sequence_cap;
+ u32 sequence_out;
+ struct iris_ts_metadata tss[VIDEO_MAX_FRAME];
+ u32 metadata_idx;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
new file mode 100644
index 000000000000..f6b15d2805fb
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_PLATFORM_COMMON_H__
+#define __IRIS_PLATFORM_COMMON_H__
+
+#include <linux/bits.h>
+
+struct iris_core;
+struct iris_inst;
+
+#define IRIS_PAS_ID 9
+#define HW_RESPONSE_TIMEOUT_VALUE (1000) /* milliseconds */
+#define AUTOSUSPEND_DELAY_VALUE (HW_RESPONSE_TIMEOUT_VALUE + 500) /* milliseconds */
+
+#define REGISTER_BIT_DEPTH(luma, chroma) ((luma) << 16 | (chroma))
+#define BIT_DEPTH_8 REGISTER_BIT_DEPTH(8, 8)
+#define CODED_FRAMES_PROGRESSIVE 0x0
+#define DEFAULT_MAX_HOST_BUF_COUNT 64
+#define DEFAULT_MAX_HOST_BURST_BUF_COUNT 256
+#define DEFAULT_FPS 30
+
+enum stage_type {
+ STAGE_1 = 1,
+ STAGE_2 = 2,
+};
+
+enum pipe_type {
+ PIPE_1 = 1,
+ PIPE_2 = 2,
+ PIPE_4 = 4,
+};
+
+extern struct iris_platform_data sm8250_data;
+extern struct iris_platform_data sm8550_data;
+
+enum platform_clk_type {
+ IRIS_AXI_CLK,
+ IRIS_CTRL_CLK,
+ IRIS_HW_CLK,
+};
+
+struct platform_clk_data {
+ enum platform_clk_type clk_type;
+ const char *clk_name;
+};
+
+struct tz_cp_config {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+};
+
+struct ubwc_config_data {
+ u32 max_channels;
+ u32 mal_length;
+ u32 highest_bank_bit;
+ u32 bank_swzl_level;
+ u32 bank_swz2_level;
+ u32 bank_swz3_level;
+ u32 bank_spreading;
+};
+
+struct platform_inst_caps {
+ u32 min_frame_width;
+ u32 max_frame_width;
+ u32 min_frame_height;
+ u32 max_frame_height;
+ u32 max_mbpf;
+ u32 mb_cycles_vsp;
+ u32 mb_cycles_vpp;
+ u32 mb_cycles_fw;
+ u32 mb_cycles_fw_vpp;
+ u32 num_comv;
+};
+
+enum platform_inst_fw_cap_type {
+ PROFILE = 1,
+ LEVEL,
+ INPUT_BUF_HOST_MAX_COUNT,
+ STAGE,
+ PIPE,
+ POC,
+ CODED_FRAMES,
+ BIT_DEPTH,
+ RAP_FRAME,
+ DEBLOCK,
+ INST_FW_CAP_MAX,
+};
+
+enum platform_inst_fw_cap_flags {
+ CAP_FLAG_DYNAMIC_ALLOWED = BIT(0),
+ CAP_FLAG_MENU = BIT(1),
+ CAP_FLAG_INPUT_PORT = BIT(2),
+ CAP_FLAG_OUTPUT_PORT = BIT(3),
+ CAP_FLAG_CLIENT_SET = BIT(4),
+ CAP_FLAG_BITMASK = BIT(5),
+ CAP_FLAG_VOLATILE = BIT(6),
+};
+
+struct platform_inst_fw_cap {
+ enum platform_inst_fw_cap_type cap_id;
+ s64 min;
+ s64 max;
+ s64 step_or_mask;
+ s64 value;
+ u32 hfi_id;
+ enum platform_inst_fw_cap_flags flags;
+ int (*set)(struct iris_inst *inst,
+ enum platform_inst_fw_cap_type cap_id);
+};
+
+struct bw_info {
+ u32 mbs_per_sec;
+ u32 bw_ddr;
+};
+
+struct iris_core_power {
+ u64 clk_freq;
+ u64 icc_bw;
+};
+
+struct iris_inst_power {
+ u64 min_freq;
+ u32 icc_bw;
+};
+
+struct icc_vote_data {
+ u32 height, width;
+ u32 fps;
+};
+
+enum platform_pm_domain_type {
+ IRIS_CTRL_POWER_DOMAIN,
+ IRIS_HW_POWER_DOMAIN,
+};
+
+struct iris_platform_data {
+ void (*init_hfi_command_ops)(struct iris_core *core);
+ void (*init_hfi_response_ops)(struct iris_core *core);
+ struct iris_inst *(*get_instance)(void);
+ const struct vpu_ops *vpu_ops;
+ void (*set_preset_registers)(struct iris_core *core);
+ const struct icc_info *icc_tbl;
+ unsigned int icc_tbl_size;
+ const struct bw_info *bw_tbl_dec;
+ unsigned int bw_tbl_dec_size;
+ const char * const *pmdomain_tbl;
+ unsigned int pmdomain_tbl_size;
+ const char * const *opp_pd_tbl;
+ unsigned int opp_pd_tbl_size;
+ const struct platform_clk_data *clk_tbl;
+ unsigned int clk_tbl_size;
+ const char * const *clk_rst_tbl;
+ unsigned int clk_rst_tbl_size;
+ u64 dma_mask;
+ const char *fwname;
+ u32 pas_id;
+ struct platform_inst_caps *inst_caps;
+ struct platform_inst_fw_cap *inst_fw_caps;
+ u32 inst_fw_caps_size;
+ struct tz_cp_config *tz_cp_config_data;
+ u32 core_arch;
+ u32 hw_response_timeout;
+ struct ubwc_config_data *ubwc_config;
+ u32 num_vpp_pipe;
+ u32 max_session_count;
+ u32 max_core_mbpf;
+ const u32 *input_config_params;
+ unsigned int input_config_params_size;
+ const u32 *output_config_params;
+ unsigned int output_config_params_size;
+ const u32 *dec_input_prop;
+ unsigned int dec_input_prop_size;
+ const u32 *dec_output_prop;
+ unsigned int dec_output_prop_size;
+ const u32 *dec_ip_int_buf_tbl;
+ unsigned int dec_ip_int_buf_tbl_size;
+ const u32 *dec_op_int_buf_tbl;
+ unsigned int dec_op_int_buf_tbl_size;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
new file mode 100644
index 000000000000..5c86fd7b7b6f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_platform_common.h"
+#include "iris_resources.h"
+#include "iris_hfi_gen1.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_vpu_common.h"
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_4,
+ .step_or_mask = 1,
+ .value = PIPE_4,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_ROUTE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_MODE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = DEBLOCK,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ .hfi_id = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_sm8250 = {
+ .min_frame_width = 128,
+ .max_frame_width = 8192,
+ .min_frame_height = 128,
+ .max_frame_height = 8192,
+ .max_mbpf = 138240,
+ .mb_cycles_vsp = 25,
+ .mb_cycles_vpp = 200,
+};
+
+static void iris_set_sm8250_preset_registers(struct iris_core *core)
+{
+ writel(0x0, core->reg_base + 0xB0088);
+}
+
+static const struct icc_info sm8250_icc_table[] = {
+ { "cpu-cfg", 1000, 1000 },
+ { "video-mem", 1000, 15000000 },
+};
+
+static const char * const sm8250_clk_reset_table[] = { "bus", "core" };
+
+static const struct bw_info sm8250_bw_table_dec[] = {
+ { ((4096 * 2160) / 256) * 60, 2403000 },
+ { ((4096 * 2160) / 256) * 30, 1224000 },
+ { ((1920 * 1080) / 256) * 60, 812000 },
+ { ((1920 * 1080) / 256) * 30, 416000 },
+};
+
+static const char * const sm8250_pmdomain_table[] = { "venus", "vcodec0" };
+
+static const char * const sm8250_opp_pd_table[] = { "mx" };
+
+static const struct platform_clk_data sm8250_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+};
+
+static struct tz_cp_config tz_cp_config_sm8250 = {
+ .cp_start = 0,
+ .cp_size = 0x25800000,
+ .cp_nonpixel_start = 0x01000000,
+ .cp_nonpixel_size = 0x24800000,
+};
+
+static const u32 sm8250_vdec_input_config_param_default[] = {
+ HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO,
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM,
+ HFI_PROPERTY_PARAM_FRAME_SIZE,
+ HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL,
+ HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE,
+};
+
+static const u32 sm8250_dec_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_SCRATCH_1,
+};
+
+static const u32 sm8250_dec_op_int_buf_tbl[] = {
+ BUF_DPB,
+};
+
+struct iris_platform_data sm8250_data = {
+ .get_instance = iris_hfi_gen1_get_instance,
+ .init_hfi_command_ops = &iris_hfi_gen1_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen1_response_ops_init,
+ .vpu_ops = &iris_vpu2_ops,
+ .set_preset_registers = iris_set_sm8250_preset_registers,
+ .icc_tbl = sm8250_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8250_icc_table),
+ .clk_rst_tbl = sm8250_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8250_clk_reset_table),
+ .bw_tbl_dec = sm8250_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8250_bw_table_dec),
+ .pmdomain_tbl = sm8250_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8250_pmdomain_table),
+ .opp_pd_tbl = sm8250_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8250_opp_pd_table),
+ .clk_tbl = sm8250_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8250_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu-1.0/venus.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8250,
+ .inst_fw_caps = inst_fw_cap_sm8250,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8250),
+ .tz_cp_config_data = &tz_cp_config_sm8250,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = (8192 * 4352) / 256,
+ .input_config_params =
+ sm8250_vdec_input_config_param_default,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8250_vdec_input_config_param_default),
+
+ .dec_ip_int_buf_tbl = sm8250_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8250_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8550.c b/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
new file mode 100644
index 000000000000..35d278996c43
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8550.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_hfi_gen2.h"
+#include "iris_hfi_gen2_defines.h"
+#include "iris_platform_common.h"
+#include "iris_vpu_common.h"
+
+#define VIDEO_ARCH_LX 1
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
+ {
+ .cap_id = PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = LEVEL,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_6_1,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_u32_enum,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PIPE,
+ .min = PIPE_1,
+ .max = PIPE_4,
+ .step_or_mask = 1,
+ .value = PIPE_4,
+ .hfi_id = HFI_PROP_PIPE,
+ .set = iris_set_pipe,
+ },
+ {
+ .cap_id = POC,
+ .min = 0,
+ .max = 2,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_PIC_ORDER_CNT_TYPE,
+ },
+ {
+ .cap_id = CODED_FRAMES,
+ .min = CODED_FRAMES_PROGRESSIVE,
+ .max = CODED_FRAMES_PROGRESSIVE,
+ .step_or_mask = 0,
+ .value = CODED_FRAMES_PROGRESSIVE,
+ .hfi_id = HFI_PROP_CODED_FRAMES,
+ },
+ {
+ .cap_id = BIT_DEPTH,
+ .min = BIT_DEPTH_8,
+ .max = BIT_DEPTH_8,
+ .step_or_mask = 1,
+ .value = BIT_DEPTH_8,
+ .hfi_id = HFI_PROP_LUMA_CHROMA_BIT_DEPTH,
+ },
+ {
+ .cap_id = RAP_FRAME,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ .hfi_id = HFI_PROP_DEC_START_FROM_RAP_FRAME,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
+static struct platform_inst_caps platform_inst_cap_sm8550 = {
+ .min_frame_width = 96,
+ .max_frame_width = 8192,
+ .min_frame_height = 96,
+ .max_frame_height = 8192,
+ .max_mbpf = (8192 * 4352) / 256,
+ .mb_cycles_vpp = 200,
+ .mb_cycles_fw = 489583,
+ .mb_cycles_fw_vpp = 66234,
+ .num_comv = 0,
+};
+
+static void iris_set_sm8550_preset_registers(struct iris_core *core)
+{
+ writel(0x0, core->reg_base + 0xB0088);
+}
+
+static const struct icc_info sm8550_icc_table[] = {
+ { "cpu-cfg", 1000, 1000 },
+ { "video-mem", 1000, 15000000 },
+};
+
+static const char * const sm8550_clk_reset_table[] = { "bus" };
+
+static const struct bw_info sm8550_bw_table_dec[] = {
+ { ((4096 * 2160) / 256) * 60, 1608000 },
+ { ((4096 * 2160) / 256) * 30, 826000 },
+ { ((1920 * 1080) / 256) * 60, 567000 },
+ { ((1920 * 1080) / 256) * 30, 294000 },
+};
+
+static const char * const sm8550_pmdomain_table[] = { "venus", "vcodec0" };
+
+static const char * const sm8550_opp_pd_table[] = { "mxc", "mmcx" };
+
+static const struct platform_clk_data sm8550_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+};
+
+static struct ubwc_config_data ubwc_config_sm8550 = {
+ .max_channels = 8,
+ .mal_length = 32,
+ .highest_bank_bit = 16,
+ .bank_swzl_level = 0,
+ .bank_swz2_level = 1,
+ .bank_swz3_level = 1,
+ .bank_spreading = 1,
+};
+
+static struct tz_cp_config tz_cp_config_sm8550 = {
+ .cp_start = 0,
+ .cp_size = 0x25800000,
+ .cp_nonpixel_start = 0x01000000,
+ .cp_nonpixel_size = 0x24800000,
+};
+
+static const u32 sm8550_vdec_input_config_params[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_CODED_FRAMES,
+ HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT,
+ HFI_PROP_PIC_ORDER_CNT_TYPE,
+ HFI_PROP_PROFILE,
+ HFI_PROP_LEVEL,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+};
+
+static const u32 sm8550_vdec_output_config_params[] = {
+ HFI_PROP_COLOR_FORMAT,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+};
+
+static const u32 sm8550_vdec_subscribe_input_properties[] = {
+ HFI_PROP_NO_OUTPUT,
+};
+
+static const u32 sm8550_vdec_subscribe_output_properties[] = {
+ HFI_PROP_PICTURE_TYPE,
+ HFI_PROP_CABAC_SESSION,
+};
+
+static const u32 sm8550_dec_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+};
+
+static const u32 sm8550_dec_op_int_buf_tbl[] = {
+ BUF_DPB,
+};
+
+struct iris_platform_data sm8550_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu3_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8550_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8550_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8550_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8550_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu30_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps = inst_fw_cap_sm8550,
+ .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = ((8192 * 4352) / 256) * 2,
+ .input_config_params =
+ sm8550_vdec_input_config_params,
+ .input_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params),
+ .output_config_params =
+ sm8550_vdec_output_config_params,
+ .output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop = sm8550_vdec_subscribe_output_properties,
+ .dec_output_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_output_properties),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+};
diff --git a/drivers/media/platform/qcom/iris/iris_power.c b/drivers/media/platform/qcom/iris/iris_power.c
new file mode 100644
index 000000000000..dbca42df0910
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_power.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_resources.h"
+#include "iris_vpu_common.h"
+
+static u32 iris_calc_bw(struct iris_inst *inst, struct icc_vote_data *data)
+{
+ const struct bw_info *bw_tbl = NULL;
+ struct iris_core *core = inst->core;
+ u32 num_rows, i, mbs, mbps;
+ u32 icc_bw = 0;
+
+ mbs = DIV_ROUND_UP(data->height, 16) * DIV_ROUND_UP(data->width, 16);
+ mbps = mbs * data->fps;
+ if (mbps == 0)
+ goto exit;
+
+ bw_tbl = core->iris_platform_data->bw_tbl_dec;
+ num_rows = core->iris_platform_data->bw_tbl_dec_size;
+
+ for (i = 0; i < num_rows; i++) {
+ if (i != 0 && mbps > bw_tbl[i].mbs_per_sec)
+ break;
+
+ icc_bw = bw_tbl[i].bw_ddr;
+ }
+
+exit:
+ return icc_bw;
+}
+
+static int iris_set_interconnects(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u64 total_bw_ddr = 0;
+ int ret;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ if (!instance->max_input_data_size)
+ continue;
+
+ total_bw_ddr += instance->power.icc_bw;
+ }
+
+ ret = iris_set_icc_bw(core, total_bw_ddr);
+
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int iris_vote_interconnects(struct iris_inst *inst)
+{
+ struct icc_vote_data *vote_data = &inst->icc_data;
+ struct v4l2_format *inp_f = inst->fmt_src;
+
+ vote_data->width = inp_f->fmt.pix_mp.width;
+ vote_data->height = inp_f->fmt.pix_mp.height;
+ vote_data->fps = DEFAULT_FPS;
+
+ inst->power.icc_bw = iris_calc_bw(inst, vote_data);
+
+ return iris_set_interconnects(inst);
+}
+
+static int iris_set_clocks(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u64 freq = 0;
+ int ret;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ if (!instance->max_input_data_size)
+ continue;
+
+ freq += instance->power.min_freq;
+ }
+
+ core->power.clk_freq = freq;
+ ret = dev_pm_opp_set_rate(core->dev, freq);
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int iris_scale_clocks(struct iris_inst *inst)
+{
+ const struct vpu_ops *vpu_ops = inst->core->iris_platform_data->vpu_ops;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ size_t data_size = 0;
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ data_size = max(data_size, buf->data_size);
+ }
+
+ inst->max_input_data_size = data_size;
+ if (!inst->max_input_data_size)
+ return 0;
+
+ inst->power.min_freq = vpu_ops->calc_freq(inst, inst->max_input_data_size);
+
+ return iris_set_clocks(inst);
+}
+
+int iris_scale_power(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ int ret;
+
+ if (pm_runtime_suspended(core->dev)) {
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ return ret;
+
+ pm_runtime_put_autosuspend(core->dev);
+ }
+
+ ret = iris_scale_clocks(inst);
+ if (ret)
+ return ret;
+
+ return iris_vote_interconnects(inst);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_power.h b/drivers/media/platform/qcom/iris/iris_power.h
new file mode 100644
index 000000000000..55212660e72d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_power.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_POWER_H__
+#define __IRIS_POWER_H__
+
+struct iris_inst;
+
+int iris_scale_power(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
new file mode 100644
index 000000000000..aca442dcc153
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_ctrls.h"
+#include "iris_vidc.h"
+
+static int iris_init_icc(struct iris_core *core)
+{
+ const struct icc_info *icc_tbl;
+ u32 i = 0;
+
+ icc_tbl = core->iris_platform_data->icc_tbl;
+
+ core->icc_count = core->iris_platform_data->icc_tbl_size;
+ core->icc_tbl = devm_kzalloc(core->dev,
+ sizeof(struct icc_bulk_data) * core->icc_count,
+ GFP_KERNEL);
+ if (!core->icc_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < core->icc_count; i++) {
+ core->icc_tbl[i].name = icc_tbl[i].name;
+ core->icc_tbl[i].avg_bw = icc_tbl[i].bw_min_kbps;
+ core->icc_tbl[i].peak_bw = 0;
+ }
+
+ return devm_of_icc_bulk_get(core->dev, core->icc_count, core->icc_tbl);
+}
+
+static int iris_init_power_domains(struct iris_core *core)
+{
+ const struct platform_clk_data *clk_tbl;
+ u32 clk_cnt, i;
+ int ret;
+
+ struct dev_pm_domain_attach_data iris_pd_data = {
+ .pd_names = core->iris_platform_data->pmdomain_tbl,
+ .num_pd_names = core->iris_platform_data->pmdomain_tbl_size,
+ .pd_flags = PD_FLAG_NO_DEV_LINK,
+ };
+
+ struct dev_pm_domain_attach_data iris_opp_pd_data = {
+ .pd_names = core->iris_platform_data->opp_pd_tbl,
+ .num_pd_names = core->iris_platform_data->opp_pd_tbl_size,
+ .pd_flags = PD_FLAG_DEV_LINK_ON,
+ };
+
+ ret = devm_pm_domain_attach_list(core->dev, &iris_pd_data, &core->pmdomain_tbl);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pm_domain_attach_list(core->dev, &iris_opp_pd_data, &core->opp_pmdomain_tbl);
+ if (ret < 0)
+ return ret;
+
+ clk_tbl = core->iris_platform_data->clk_tbl;
+ clk_cnt = core->iris_platform_data->clk_tbl_size;
+
+ for (i = 0; i < clk_cnt; i++) {
+ if (clk_tbl[i].clk_type == IRIS_HW_CLK) {
+ ret = devm_pm_opp_set_clkname(core->dev, clk_tbl[i].clk_name);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return devm_pm_opp_of_add_table(core->dev);
+}
+
+static int iris_init_clocks(struct iris_core *core)
+{
+ int ret;
+
+ ret = devm_clk_bulk_get_all(core->dev, &core->clock_tbl);
+ if (ret < 0)
+ return ret;
+
+ core->clk_count = ret;
+
+ return 0;
+}
+
+static int iris_init_resets(struct iris_core *core)
+{
+ const char * const *rst_tbl;
+ u32 rst_tbl_size;
+ u32 i = 0;
+
+ rst_tbl = core->iris_platform_data->clk_rst_tbl;
+ rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+
+ core->resets = devm_kzalloc(core->dev,
+ sizeof(*core->resets) * rst_tbl_size,
+ GFP_KERNEL);
+ if (!core->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < rst_tbl_size; i++)
+ core->resets[i].id = rst_tbl[i];
+
+ return devm_reset_control_bulk_get_exclusive(core->dev, rst_tbl_size, core->resets);
+}
+
+static int iris_init_resources(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_init_icc(core);
+ if (ret)
+ return ret;
+
+ ret = iris_init_power_domains(core);
+ if (ret)
+ return ret;
+
+ ret = iris_init_clocks(core);
+ if (ret)
+ return ret;
+
+ return iris_init_resets(core);
+}
+
+static int iris_register_video_device(struct iris_core *core)
+{
+ struct video_device *vdev;
+ int ret;
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = core->iris_v4l2_file_ops;
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_dec = vdev;
+ video_set_drvdata(vdev, core);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+
+ return ret;
+}
+
+static void iris_remove(struct platform_device *pdev)
+{
+ struct iris_core *core;
+
+ core = platform_get_drvdata(pdev);
+ if (!core)
+ return;
+
+ iris_core_deinit(core);
+
+ video_unregister_device(core->vdev_dec);
+
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ mutex_destroy(&core->lock);
+}
+
+static void iris_sys_error_handler(struct work_struct *work)
+{
+ struct iris_core *core =
+ container_of(work, struct iris_core, sys_error_handler.work);
+
+ iris_core_deinit(core);
+ iris_core_init(core);
+}
+
+static int iris_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iris_core *core;
+ u64 dma_mask;
+ int ret;
+
+ core = devm_kzalloc(&pdev->dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+ core->dev = dev;
+
+ core->state = IRIS_CORE_DEINIT;
+ mutex_init(&core->lock);
+ init_completion(&core->core_init_done);
+
+ core->response_packet = devm_kzalloc(core->dev, IFACEQ_CORE_PKT_SIZE, GFP_KERNEL);
+ if (!core->response_packet)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&core->instances);
+ INIT_DELAYED_WORK(&core->sys_error_handler, iris_sys_error_handler);
+
+ core->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(core->reg_base))
+ return PTR_ERR(core->reg_base);
+
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ core->iris_platform_data = of_device_get_match_data(core->dev);
+
+ ret = devm_request_threaded_irq(core->dev, core->irq, iris_hfi_isr,
+ iris_hfi_isr_handler, IRQF_TRIGGER_HIGH, "iris", core);
+ if (ret)
+ return ret;
+
+ disable_irq_nosync(core->irq);
+
+ iris_init_ops(core);
+ core->iris_platform_data->init_hfi_command_ops(core);
+ core->iris_platform_data->init_hfi_response_ops(core);
+
+ ret = iris_init_resources(core);
+ if (ret)
+ return ret;
+
+ iris_session_init_caps(core);
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret)
+ return ret;
+
+ ret = iris_register_video_device(core);
+ if (ret)
+ goto err_v4l2_unreg;
+
+ platform_set_drvdata(pdev, core);
+
+ dma_mask = core->iris_platform_data->dma_mask;
+
+ ret = dma_set_mask_and_coherent(dev, dma_mask);
+ if (ret)
+ goto err_vdev_unreg;
+
+ dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+ dma_set_seg_boundary(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_set_autosuspend_delay(core->dev, AUTOSUSPEND_DELAY_VALUE);
+ pm_runtime_use_autosuspend(core->dev);
+ ret = devm_pm_runtime_enable(core->dev);
+ if (ret)
+ goto err_vdev_unreg;
+
+ return 0;
+
+err_vdev_unreg:
+ video_unregister_device(core->vdev_dec);
+err_v4l2_unreg:
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ return ret;
+}
+
+static int __maybe_unused iris_pm_suspend(struct device *dev)
+{
+ struct iris_core *core;
+ int ret = 0;
+
+ core = dev_get_drvdata(dev);
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT)
+ goto exit;
+
+ ret = iris_hfi_pm_suspend(core);
+
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static int __maybe_unused iris_pm_resume(struct device *dev)
+{
+ struct iris_core *core;
+ int ret = 0;
+
+ core = dev_get_drvdata(dev);
+
+ mutex_lock(&core->lock);
+ if (core->state != IRIS_CORE_INIT)
+ goto exit;
+
+ ret = iris_hfi_pm_resume(core);
+ pm_runtime_mark_last_busy(core->dev);
+
+exit:
+ mutex_unlock(&core->lock);
+
+ return ret;
+}
+
+static const struct dev_pm_ops iris_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(iris_pm_suspend, iris_pm_resume, NULL)
+};
+
+static const struct of_device_id iris_dt_match[] = {
+ {
+ .compatible = "qcom,sm8550-iris",
+ .data = &sm8550_data,
+ },
+#if (!IS_ENABLED(CONFIG_VIDEO_QCOM_VENUS))
+ {
+ .compatible = "qcom,sm8250-venus",
+ .data = &sm8250_data,
+ },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, iris_dt_match);
+
+static struct platform_driver qcom_iris_driver = {
+ .probe = iris_probe,
+ .remove = iris_remove,
+ .driver = {
+ .name = "qcom-iris",
+ .of_match_table = iris_dt_match,
+ .pm = &iris_pm_ops,
+ },
+};
+
+module_platform_driver(qcom_iris_driver);
+MODULE_DESCRIPTION("Qualcomm iris video driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/qcom/iris/iris_resources.c b/drivers/media/platform/qcom/iris/iris_resources.c
new file mode 100644
index 000000000000..cf32f268b703
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_resources.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_resources.h"
+
+#define BW_THRESHOLD 50000
+
+int iris_set_icc_bw(struct iris_core *core, unsigned long icc_bw)
+{
+ unsigned long bw_kbps = 0, bw_prev = 0;
+ const struct icc_info *icc_tbl;
+ int ret = 0, i;
+
+ icc_tbl = core->iris_platform_data->icc_tbl;
+
+ for (i = 0; i < core->icc_count; i++) {
+ if (!strcmp(core->icc_tbl[i].name, "video-mem")) {
+ bw_kbps = icc_bw;
+ bw_prev = core->power.icc_bw;
+
+ bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
+ icc_tbl[i].bw_min_kbps, icc_tbl[i].bw_max_kbps);
+
+ if (abs(bw_kbps - bw_prev) < BW_THRESHOLD && bw_prev)
+ return ret;
+
+ core->icc_tbl[i].avg_bw = bw_kbps;
+
+ core->power.icc_bw = bw_kbps;
+ break;
+ }
+ }
+
+ return icc_bulk_set_bw(core->icc_count, core->icc_tbl);
+}
+
+int iris_unset_icc_bw(struct iris_core *core)
+{
+ u32 i;
+
+ core->power.icc_bw = 0;
+
+ for (i = 0; i < core->icc_count; i++) {
+ core->icc_tbl[i].avg_bw = 0;
+ core->icc_tbl[i].peak_bw = 0;
+ }
+
+ return icc_bulk_set_bw(core->icc_count, core->icc_tbl);
+}
+
+int iris_enable_power_domains(struct iris_core *core, struct device *pd_dev)
+{
+ int ret;
+
+ ret = dev_pm_opp_set_rate(core->dev, ULONG_MAX);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(pd_dev);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+int iris_disable_power_domains(struct iris_core *core, struct device *pd_dev)
+{
+ int ret;
+
+ ret = dev_pm_opp_set_rate(core->dev, 0);
+ if (ret)
+ return ret;
+
+ pm_runtime_put_sync(pd_dev);
+
+ return 0;
+}
+
+static struct clk *iris_get_clk_by_type(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ const struct platform_clk_data *clk_tbl;
+ u32 clk_cnt, i, j;
+
+ clk_tbl = core->iris_platform_data->clk_tbl;
+ clk_cnt = core->iris_platform_data->clk_tbl_size;
+
+ for (i = 0; i < clk_cnt; i++) {
+ if (clk_tbl[i].clk_type == clk_type) {
+ for (j = 0; core->clock_tbl && j < core->clk_count; j++) {
+ if (!strcmp(core->clock_tbl[j].id, clk_tbl[i].clk_name))
+ return core->clock_tbl[j].clk;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+int iris_prepare_enable_clock(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ struct clk *clock;
+
+ clock = iris_get_clk_by_type(core, clk_type);
+ if (!clock)
+ return -EINVAL;
+
+ return clk_prepare_enable(clock);
+}
+
+int iris_disable_unprepare_clock(struct iris_core *core, enum platform_clk_type clk_type)
+{
+ struct clk *clock;
+
+ clock = iris_get_clk_by_type(core, clk_type);
+ if (!clock)
+ return -EINVAL;
+
+ clk_disable_unprepare(clock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_resources.h b/drivers/media/platform/qcom/iris/iris_resources.h
new file mode 100644
index 000000000000..f723dfe5bd81
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_resources.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_RESOURCES_H__
+#define __IRIS_RESOURCES_H__
+
+struct iris_core;
+
+int iris_enable_power_domains(struct iris_core *core, struct device *pd_dev);
+int iris_disable_power_domains(struct iris_core *core, struct device *pd_dev);
+int iris_unset_icc_bw(struct iris_core *core);
+int iris_set_icc_bw(struct iris_core *core, unsigned long icc_bw);
+int iris_disable_unprepare_clock(struct iris_core *core, enum platform_clk_type clk_type);
+int iris_prepare_enable_clock(struct iris_core *core, enum platform_clk_type clk_type);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_state.c b/drivers/media/platform/qcom/iris/iris_state.c
new file mode 100644
index 000000000000..5976e926c83d
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_state.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+
+static bool iris_allow_inst_state_change(struct iris_inst *inst,
+ enum iris_inst_state req_state)
+{
+ switch (inst->state) {
+ case IRIS_INST_INIT:
+ if (req_state == IRIS_INST_INPUT_STREAMING ||
+ req_state == IRIS_INST_OUTPUT_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_INPUT_STREAMING:
+ if (req_state == IRIS_INST_INIT ||
+ req_state == IRIS_INST_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_OUTPUT_STREAMING:
+ if (req_state == IRIS_INST_INIT ||
+ req_state == IRIS_INST_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_STREAMING:
+ if (req_state == IRIS_INST_INPUT_STREAMING ||
+ req_state == IRIS_INST_OUTPUT_STREAMING ||
+ req_state == IRIS_INST_DEINIT)
+ return true;
+ return false;
+ case IRIS_INST_DEINIT:
+ if (req_state == IRIS_INST_INIT)
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+int iris_inst_change_state(struct iris_inst *inst,
+ enum iris_inst_state request_state)
+{
+ if (inst->state == IRIS_INST_ERROR)
+ return 0;
+
+ if (inst->state == request_state)
+ return 0;
+
+ if (request_state == IRIS_INST_ERROR)
+ goto change_state;
+
+ if (!iris_allow_inst_state_change(inst, request_state))
+ return -EINVAL;
+
+change_state:
+ inst->state = request_state;
+ dev_dbg(inst->core->dev, "state changed from %x to %x\n",
+ inst->state, request_state);
+
+ return 0;
+}
+
+int iris_inst_state_change_streamon(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_state new_state = IRIS_INST_ERROR;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->state == IRIS_INST_INIT)
+ new_state = IRIS_INST_INPUT_STREAMING;
+ else if (inst->state == IRIS_INST_OUTPUT_STREAMING)
+ new_state = IRIS_INST_STREAMING;
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ if (inst->state == IRIS_INST_INIT)
+ new_state = IRIS_INST_OUTPUT_STREAMING;
+ else if (inst->state == IRIS_INST_INPUT_STREAMING)
+ new_state = IRIS_INST_STREAMING;
+ }
+
+ return iris_inst_change_state(inst, new_state);
+}
+
+int iris_inst_state_change_streamoff(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_state new_state = IRIS_INST_ERROR;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->state == IRIS_INST_INPUT_STREAMING)
+ new_state = IRIS_INST_INIT;
+ else if (inst->state == IRIS_INST_STREAMING)
+ new_state = IRIS_INST_OUTPUT_STREAMING;
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ if (inst->state == IRIS_INST_OUTPUT_STREAMING)
+ new_state = IRIS_INST_INIT;
+ else if (inst->state == IRIS_INST_STREAMING)
+ new_state = IRIS_INST_INPUT_STREAMING;
+ }
+
+ return iris_inst_change_state(inst, new_state);
+}
+
+static bool iris_inst_allow_sub_state(struct iris_inst *inst, enum iris_inst_sub_state sub_state)
+{
+ if (!sub_state)
+ return true;
+
+ switch (inst->state) {
+ case IRIS_INST_INIT:
+ if (sub_state & IRIS_INST_SUB_LOAD_RESOURCES)
+ return true;
+ return false;
+ case IRIS_INST_INPUT_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_FIRST_IPSC | IRIS_INST_SUB_DRC |
+ IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_INPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_OUTPUT_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_DRC_LAST |
+ IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_STREAMING:
+ if (sub_state & (IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRAIN |
+ IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_DRAIN_LAST |
+ IRIS_INST_SUB_INPUT_PAUSE | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ case IRIS_INST_DEINIT:
+ if (sub_state & (IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRAIN |
+ IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_DRAIN_LAST |
+ IRIS_INST_SUB_INPUT_PAUSE | IRIS_INST_SUB_OUTPUT_PAUSE))
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+int iris_inst_change_sub_state(struct iris_inst *inst,
+ enum iris_inst_sub_state clear_sub_state,
+ enum iris_inst_sub_state set_sub_state)
+{
+ enum iris_inst_sub_state prev_sub_state;
+
+ if (inst->state == IRIS_INST_ERROR)
+ return 0;
+
+ if (!clear_sub_state && !set_sub_state)
+ return 0;
+
+ if ((clear_sub_state & set_sub_state) ||
+ set_sub_state > IRIS_INST_MAX_SUB_STATE_VALUE ||
+ clear_sub_state > IRIS_INST_MAX_SUB_STATE_VALUE)
+ return -EINVAL;
+
+ prev_sub_state = inst->sub_state;
+
+ if (!iris_inst_allow_sub_state(inst, set_sub_state))
+ return -EINVAL;
+
+ inst->sub_state |= set_sub_state;
+ inst->sub_state &= ~clear_sub_state;
+
+ if (inst->sub_state != prev_sub_state)
+ dev_dbg(inst->core->dev, "sub_state changed from %x to %x\n",
+ prev_sub_state, inst->sub_state);
+
+ return 0;
+}
+
+int iris_inst_sub_state_change_drc(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state = 0;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC)
+ return -EINVAL;
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING ||
+ inst->state == IRIS_INST_INIT)
+ set_sub_state = IRIS_INST_SUB_FIRST_IPSC | IRIS_INST_SUB_INPUT_PAUSE;
+ else
+ set_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_INPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_drain_last(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN_LAST)
+ return -EINVAL;
+
+ if (!(inst->sub_state & IRIS_INST_SUB_DRAIN))
+ return -EINVAL;
+
+ set_sub_state = IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_drc_last(struct iris_inst *inst)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC_LAST)
+ return -EINVAL;
+
+ if (!(inst->sub_state & IRIS_INST_SUB_DRC) ||
+ !(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE))
+ return -EINVAL;
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ return 0;
+
+ set_sub_state = IRIS_INST_SUB_DRC_LAST | IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane)
+{
+ enum iris_inst_sub_state set_sub_state;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->sub_state & IRIS_INST_SUB_DRC &&
+ !(inst->sub_state & IRIS_INST_SUB_DRC_LAST))
+ return -EINVAL;
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ !(inst->sub_state & IRIS_INST_SUB_DRAIN_LAST))
+ return -EINVAL;
+
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ } else {
+ set_sub_state = IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+static inline bool iris_drc_pending(struct iris_inst *inst)
+{
+ return inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+}
+
+static inline bool iris_drain_pending(struct iris_inst *inst)
+{
+ return inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+}
+
+bool iris_allow_cmd(struct iris_inst *inst, u32 cmd)
+{
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (cmd == V4L2_DEC_CMD_START) {
+ if (vb2_is_streaming(src_q) || vb2_is_streaming(dst_q))
+ if (iris_drc_pending(inst) || iris_drain_pending(inst))
+ return true;
+ } else if (cmd == V4L2_DEC_CMD_STOP) {
+ if (vb2_is_streaming(src_q))
+ if (inst->sub_state != IRIS_INST_SUB_DRAIN)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_state.h b/drivers/media/platform/qcom/iris/iris_state.h
new file mode 100644
index 000000000000..78c61aac5e7e
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_state.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_STATE_H__
+#define __IRIS_STATE_H__
+
+struct iris_inst;
+
+/**
+ * enum iris_core_state
+ *
+ * @IRIS_CORE_DEINIT: default state.
+ * @IRIS_CORE_INIT: core state with core initialized. FW loaded and
+ * HW brought out of reset, shared queues established
+ * between host driver and firmware.
+ * @IRIS_CORE_ERROR: error state.
+ *
+ * -----------
+ * |
+ * V
+ * -----------
+ * +--->| DEINIT |<---+
+ * | ----------- |
+ * | | |
+ * | v |
+ * | ----------- |
+ * | / \ |
+ * | / \ |
+ * | / \ |
+ * | v v v
+ * ----------- -----------
+ * | INIT |--->| ERROR |
+ * ----------- -----------
+ */
+enum iris_core_state {
+ IRIS_CORE_DEINIT,
+ IRIS_CORE_INIT,
+ IRIS_CORE_ERROR,
+};
+
+/**
+ * enum iris_inst_state
+ *
+ * @IRIS_INST_INIT: video instance is opened.
+ * @IRIS_INST_INPUT_STREAMING: stream on is completed on output plane.
+ * @IRIS_INST_OUTPUT_STREAMING: stream on is completed on capture plane.
+ * @IRIS_INST_STREAMING: stream on is completed on both output and capture planes.
+ * @IRIS_INST_DEINIT: video instance is closed.
+ * @IRIS_INST_ERROR: error state.
+ * |
+ * V
+ * -------------
+ * +--------| INIT |----------+
+ * | ------------- |
+ * | ^ ^ |
+ * | / \ |
+ * | / \ |
+ * | v v |
+ * | ----------- ----------- |
+ * | | INPUT OUTPUT | |
+ * |---| STREAMING STREAMING |---|
+ * | ----------- ----------- |
+ * | ^ ^ |
+ * | \ / |
+ * | \ / |
+ * | v v |
+ * | ------------- |
+ * |--------| STREAMING |-----------|
+ * | ------------- |
+ * | | |
+ * | | |
+ * | v |
+ * | ----------- |
+ * +-------->| DEINIT |<----------+
+ * | ----------- |
+ * | | |
+ * | | |
+ * | v |
+ * | ---------- |
+ * +-------->| ERROR |<------------+
+ * ----------
+ */
+enum iris_inst_state {
+ IRIS_INST_DEINIT,
+ IRIS_INST_INIT,
+ IRIS_INST_INPUT_STREAMING,
+ IRIS_INST_OUTPUT_STREAMING,
+ IRIS_INST_STREAMING,
+ IRIS_INST_ERROR,
+};
+
+#define IRIS_INST_SUB_STATES 8
+#define IRIS_INST_MAX_SUB_STATE_VALUE ((1 << IRIS_INST_SUB_STATES) - 1)
+
+/**
+ * enum iris_inst_sub_state
+ *
+ * @IRIS_INST_SUB_FIRST_IPSC: indicates source change is received from firmware
+ * when output port is not yet streaming.
+ * @IRIS_INST_SUB_DRC: indicates source change is received from firmware
+ * when output port is streaming and source change event is
+ * sent to client.
+ * @IRIS_INST_SUB_DRC_LAST: indicates last buffer is received from firmware
+ * as part of source change.
+ * @IRIS_INST_SUB_DRAIN: indicates drain is in progress.
+ * @IRIS_INST_SUB_DRAIN_LAST: indicates last buffer is received from firmware
+ * as part of drain sequence.
+ * @IRIS_INST_SUB_INPUT_PAUSE: source change is received form firmware. This
+ * indicates that firmware is paused to process
+ * any further input frames.
+ * @IRIS_INST_SUB_OUTPUT_PAUSE: last buffer is received form firmware as part
+ * of drc sequence. This indicates that
+ * firmware is paused to process any further output frames.
+ * @IRIS_INST_SUB_LOAD_RESOURCES: indicates all the resources have been loaded by the
+ * firmware and it is ready for processing.
+ */
+enum iris_inst_sub_state {
+ IRIS_INST_SUB_FIRST_IPSC = BIT(0),
+ IRIS_INST_SUB_DRC = BIT(1),
+ IRIS_INST_SUB_DRC_LAST = BIT(2),
+ IRIS_INST_SUB_DRAIN = BIT(3),
+ IRIS_INST_SUB_DRAIN_LAST = BIT(4),
+ IRIS_INST_SUB_INPUT_PAUSE = BIT(5),
+ IRIS_INST_SUB_OUTPUT_PAUSE = BIT(6),
+ IRIS_INST_SUB_LOAD_RESOURCES = BIT(7),
+};
+
+int iris_inst_change_state(struct iris_inst *inst,
+ enum iris_inst_state request_state);
+int iris_inst_change_sub_state(struct iris_inst *inst,
+ enum iris_inst_sub_state clear_sub_state,
+ enum iris_inst_sub_state set_sub_state);
+
+int iris_inst_state_change_streamon(struct iris_inst *inst, u32 plane);
+int iris_inst_state_change_streamoff(struct iris_inst *inst, u32 plane);
+int iris_inst_sub_state_change_drc(struct iris_inst *inst);
+int iris_inst_sub_state_change_drain_last(struct iris_inst *inst);
+int iris_inst_sub_state_change_drc_last(struct iris_inst *inst);
+int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane);
+bool iris_allow_cmd(struct iris_inst *inst, u32 cmd);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_utils.c b/drivers/media/platform/qcom/iris/iris_utils.c
new file mode 100644
index 000000000000..83c70d6a2d90
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_utils.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+#include "iris_utils.h"
+
+bool iris_res_is_less_than(u32 width, u32 height,
+ u32 ref_width, u32 ref_height)
+{
+ u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
+ u32 max_side = max(ref_width, ref_height);
+
+ if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
+ width < max_side &&
+ height < max_side)
+ return true;
+
+ return false;
+}
+
+int iris_get_mbpf(struct iris_inst *inst)
+{
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+ u32 width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+
+ return NUM_MBS_PER_FRAME(height, width);
+}
+
+bool iris_split_mode_enabled(struct iris_inst *inst)
+{
+ return inst->fmt_dst->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_NV12;
+}
+
+void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
+ enum vb2_buffer_state state)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_v4l2_buffer *buf;
+
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
+ while ((buf = v4l2_m2m_src_buf_remove(m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ } else if (V4L2_TYPE_IS_CAPTURE(type)) {
+ while ((buf = v4l2_m2m_dst_buf_remove(m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ }
+}
+
+int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush)
+{
+ struct iris_core *core = inst->core;
+ u32 hw_response_timeout_val;
+ struct completion *done;
+ int ret;
+
+ hw_response_timeout_val = core->iris_platform_data->hw_response_timeout;
+ done = is_flush ? &inst->flush_completion : &inst->completion;
+
+ mutex_unlock(&inst->lock);
+ ret = wait_for_completion_timeout(done, msecs_to_jiffies(hw_response_timeout_val));
+ mutex_lock(&inst->lock);
+ if (!ret) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id)
+{
+ struct iris_inst *inst;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->session_id == session_id) {
+ mutex_unlock(&core->lock);
+ return inst;
+ }
+ }
+
+ mutex_unlock(&core->lock);
+ return NULL;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_utils.h b/drivers/media/platform/qcom/iris/iris_utils.h
new file mode 100644
index 000000000000..49869cf7a376
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_utils.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_UTILS_H__
+#define __IRIS_UTILS_H__
+
+struct iris_core;
+#include "iris_buffer.h"
+
+struct iris_hfi_rect_desc {
+ u32 left;
+ u32 top;
+ u32 width;
+ u32 height;
+};
+
+struct iris_hfi_frame_info {
+ u32 picture_type;
+ u32 no_output;
+ u32 data_corrupt;
+ u32 overflow;
+};
+
+struct iris_ts_metadata {
+ u64 ts_ns;
+ u64 ts_us;
+ u32 flags;
+ struct v4l2_timecode tc;
+};
+
+#define NUM_MBS_PER_FRAME(height, width) \
+ (DIV_ROUND_UP(height, 16) * DIV_ROUND_UP(width, 16))
+
+static inline enum iris_buffer_type iris_v4l2_type_to_driver(u32 type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return BUF_INPUT;
+ else
+ return BUF_OUTPUT;
+}
+
+bool iris_res_is_less_than(u32 width, u32 height,
+ u32 ref_width, u32 ref_height);
+int iris_get_mbpf(struct iris_inst *inst);
+bool iris_split_mode_enabled(struct iris_inst *inst);
+struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id);
+void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
+ enum vb2_buffer_state state);
+int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.c b/drivers/media/platform/qcom/iris/iris_vb2.c
new file mode 100644
index 000000000000..cdf11feb590b
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vb2.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_instance.h"
+#include "iris_vb2.h"
+#include "iris_vdec.h"
+#include "iris_power.h"
+
+static int iris_check_core_mbpf(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbpf = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ total_mbpf += iris_get_mbpf(instance);
+ mutex_unlock(&core->lock);
+
+ if (total_mbpf > core->iris_platform_data->max_core_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int iris_check_inst_mbpf(struct iris_inst *inst)
+{
+ struct platform_inst_caps *caps;
+ u32 mbpf, max_mbpf;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ max_mbpf = caps->max_mbpf;
+ mbpf = iris_get_mbpf(inst);
+ if (mbpf > max_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int iris_check_resolution_supported(struct iris_inst *inst)
+{
+ u32 width, height, min_width, min_height, max_width, max_height;
+ struct platform_inst_caps *caps;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ width = inst->fmt_src->fmt.pix_mp.width;
+ height = inst->fmt_src->fmt.pix_mp.height;
+
+ min_width = caps->min_frame_width;
+ max_width = caps->max_frame_width;
+ min_height = caps->min_frame_height;
+ max_height = caps->max_frame_height;
+
+ if (!(min_width <= width && width <= max_width) ||
+ !(min_height <= height && height <= max_height))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iris_check_session_supported(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance = NULL;
+ bool found = false;
+ int ret;
+
+ list_for_each_entry(instance, &core->instances, list) {
+ if (instance == inst)
+ found = true;
+ }
+
+ if (!found) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = iris_check_core_mbpf(inst);
+ if (ret)
+ goto exit;
+
+ ret = iris_check_inst_mbpf(inst);
+ if (ret)
+ goto exit;
+
+ ret = iris_check_resolution_supported(inst);
+ if (ret)
+ goto exit;
+
+ return 0;
+exit:
+ dev_err(inst->core->dev, "current session not supported(%d)\n", ret);
+
+ return ret;
+}
+
+int iris_vb2_buf_init(struct vb2_buffer *vb2)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+
+ buf->device_addr = vb2_dma_contig_plane_dma_addr(vb2, 0);
+
+ return 0;
+}
+
+int iris_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct iris_inst *inst;
+ struct iris_core *core;
+ struct v4l2_format *f;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ core = inst->core;
+ f = V4L2_TYPE_IS_OUTPUT(q->type) ? inst->fmt_src : inst->fmt_dst;
+
+ if (*num_planes) {
+ if (*num_planes != f->fmt.pix_mp.num_planes ||
+ sizes[0] < f->fmt.pix_mp.plane_fmt[0].sizeimage)
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iris_check_session_supported(inst);
+ if (ret)
+ goto unlock;
+
+ if (!inst->once_per_session_set) {
+ inst->once_per_session_set = true;
+
+ ret = core->hfi_ops->session_open(inst);
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(core->dev, "session open failed\n");
+ goto unlock;
+ }
+
+ ret = iris_inst_change_state(inst, IRIS_INST_INIT);
+ if (ret)
+ goto unlock;
+ }
+
+ *num_planes = 1;
+ sizes[0] = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ enum iris_buffer_type buf_type;
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
+ return 0;
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
+ !V4L2_TYPE_IS_CAPTURE(q->type)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ iris_scale_power(inst);
+
+ ret = iris_check_session_supported(inst);
+ if (ret)
+ goto error;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ret = iris_vdec_streamon_input(inst);
+ else if (V4L2_TYPE_IS_CAPTURE(q->type))
+ ret = iris_vdec_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ buf_type = iris_v4l2_type_to_driver(q->type);
+
+ ret = iris_queue_deferred_buffers(inst, buf_type);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+
+error:
+ iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+void iris_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(q);
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
+ return;
+
+ mutex_lock(&inst->lock);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) &&
+ !V4L2_TYPE_IS_CAPTURE(q->type))
+ goto exit;
+
+ ret = iris_vdec_session_streamoff(inst, q->type);
+ if (ret)
+ goto exit;
+
+exit:
+ iris_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
+ if (ret)
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+
+ mutex_unlock(&inst->lock);
+}
+
+int iris_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct iris_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+ }
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_OUTPUT))
+ return -EINVAL;
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_plane_size(vb, 0) < iris_get_buffer_size(inst, BUF_INPUT))
+ return -EINVAL;
+
+ return 0;
+}
+
+int iris_vb2_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+void iris_vb2_buf_queue(struct vb2_buffer *vb2)
+{
+ static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS };
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct iris_inst *inst;
+ int ret = 0;
+
+ inst = vb2_get_drv_priv(vb2->vb2_queue);
+
+ mutex_lock(&inst->lock);
+ if (inst->state == IRIS_INST_ERROR) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+
+ m2m_ctx = inst->m2m_ctx;
+
+ if (!vb2->planes[0].bytesused && V4L2_TYPE_IS_OUTPUT(vb2->type)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (V4L2_TYPE_IS_CAPTURE(vb2->vb2_queue->type)) {
+ if ((inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST) ||
+ (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST)) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ vbuf->sequence = inst->sequence_cap++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vb2_set_plane_payload(vb2, 0, 0);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ if (!v4l2_m2m_has_stopped(m2m_ctx)) {
+ v4l2_event_queue_fh(&inst->fh, &eos);
+ v4l2_m2m_mark_stopped(m2m_ctx);
+ }
+ goto exit;
+ }
+ }
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ ret = iris_vdec_qbuf(inst, vbuf);
+
+exit:
+ if (ret) {
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&inst->lock);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.h b/drivers/media/platform/qcom/iris/iris_vb2.h
new file mode 100644
index 000000000000..a88565fdd3e4
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vb2.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VB2_H__
+#define __IRIS_VB2_H__
+
+int iris_vb2_buf_init(struct vb2_buffer *vb2);
+int iris_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[]);
+int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count);
+void iris_vb2_stop_streaming(struct vb2_queue *q);
+int iris_vb2_buf_prepare(struct vb2_buffer *vb);
+int iris_vb2_buf_out_validate(struct vb2_buffer *vb);
+void iris_vb2_buf_queue(struct vb2_buffer *vb2);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.c b/drivers/media/platform/qcom/iris/iris_vdec.c
new file mode 100644
index 000000000000..4143acedfc57
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vdec.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_vdec.h"
+#include "iris_vpu_buffer.h"
+
+#define DEFAULT_WIDTH 320
+#define DEFAULT_HEIGHT 240
+#define DEFAULT_CODEC_ALIGNMENT 16
+
+int iris_vdec_inst_init(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct v4l2_format *f;
+
+ inst->fmt_src = kzalloc(sizeof(*inst->fmt_src), GFP_KERNEL);
+ inst->fmt_dst = kzalloc(sizeof(*inst->fmt_dst), GFP_KERNEL);
+
+ inst->fw_min_count = MIN_BUFFERS;
+
+ f = inst->fmt_src;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.width = DEFAULT_WIDTH;
+ f->fmt.pix_mp.height = DEFAULT_HEIGHT;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ f = inst->fmt_dst;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix_mp.width = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.height = ALIGN(DEFAULT_HEIGHT, 32);
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps[0],
+ INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
+
+ return iris_ctrls_init(inst);
+}
+
+void iris_vdec_inst_deinit(struct iris_inst *inst)
+{
+ kfree(inst->fmt_dst);
+ kfree(inst->fmt_src);
+}
+
+int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f)
+{
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ f->pixelformat = V4L2_PIX_FMT_H264;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED | V4L2_FMT_FLAG_DYN_RESOLUTION;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_format *f_inst;
+ struct vb2_queue *src_q;
+
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
+ f_inst = inst->fmt_dst;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ }
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ if (vb2_is_streaming(src_q)) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ pixmp->num_planes = 1;
+
+ return 0;
+}
+
+int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt, *output_fmt;
+ struct vb2_queue *q;
+ u32 codec_align;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ iris_vdec_try_fmt(inst, f);
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_H264)
+ return -EINVAL;
+
+ fmt = inst->fmt_src;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+
+ codec_align = DEFAULT_CODEC_ALIGNMENT;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, codec_align);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, codec_align);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ output_fmt = inst->fmt_dst;
+ output_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ output_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ output_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ output_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt = inst->fmt_dst;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ if (fmt->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, 32);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ inst->crop.top = 0;
+ inst->crop.left = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ memcpy(f, fmt, sizeof(*fmt));
+
+ return 0;
+}
+
+int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub)
+{
+ int ret = 0;
+
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ ret = v4l2_event_subscribe(&inst->fh, sub, 0, NULL);
+ break;
+ case V4L2_EVENT_SOURCE_CHANGE:
+ ret = v4l2_src_change_event_subscribe(&inst->fh, sub);
+ break;
+ case V4L2_EVENT_CTRL:
+ ret = v4l2_ctrl_subscribe_event(&inst->fh, sub);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+void iris_vdec_src_change(struct iris_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_event event = {0};
+ struct vb2_queue *src_q;
+
+ src_q = v4l2_m2m_get_src_vq(m2m_ctx);
+ if (!vb2_is_streaming(src_q))
+ return;
+
+ event.type = V4L2_EVENT_SOURCE_CHANGE;
+ event.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION;
+ v4l2_event_queue_fh(&inst->fh, &event);
+}
+
+static int iris_vdec_get_num_queued_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+ u32 count = 0;
+
+ switch (type) {
+ case BUF_INPUT:
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+ count++;
+ }
+ return count;
+ case BUF_OUTPUT:
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (!(buf->attr & BUF_ATTR_QUEUED))
+ continue;
+ count++;
+ }
+ return count;
+ default:
+ return count;
+ }
+}
+
+static void iris_vdec_flush_deferred_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+
+ if (type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ }
+}
+
+static void iris_vdec_kill_session(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+
+ if (!inst->session_id)
+ return;
+
+ hfi_ops->session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+}
+
+int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_buffer_type buffer_type;
+ u32 count;
+ int ret;
+
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ buffer_type = BUF_INPUT;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ buffer_type = BUF_OUTPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hfi_ops->session_stop(inst, plane);
+ if (ret)
+ goto error;
+
+ count = iris_vdec_get_num_queued_buffers(inst, buffer_type);
+ if (count) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ ret = iris_inst_state_change_streamoff(inst, plane);
+ if (ret)
+ goto error;
+
+ iris_vdec_flush_deferred_buffers(inst, buffer_type);
+
+ return 0;
+
+error:
+ iris_vdec_kill_session(inst);
+ iris_vdec_flush_deferred_buffers(inst, buffer_type);
+
+ return ret;
+}
+
+static int iris_vdec_process_streamon_input(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state set_sub_state = 0;
+ int ret;
+
+ iris_scale_power(inst);
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC ||
+ inst->sub_state & IRIS_INST_SUB_DRAIN ||
+ inst->sub_state & IRIS_INST_SUB_FIRST_IPSC) {
+ if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
+ if (hfi_ops->session_pause) {
+ ret = hfi_ops->session_pause(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ }
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_vdec_streamon_input(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_vdec_process_streamon_input(inst);
+}
+
+static int iris_vdec_process_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool drain_active = false, drc_active = false;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ int ret = 0;
+
+ iris_scale_power(inst);
+
+ drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+
+ drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+
+ if (drc_active)
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+ else if (drain_active)
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_alloc_and_queue_input_int_bufs(inst);
+ if (ret)
+ return ret;
+ ret = iris_set_stage(inst, STAGE);
+ if (ret)
+ return ret;
+ ret = iris_set_pipe(inst, PIPE);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING &&
+ inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (!drain_active)
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ else if (hfi_ops->session_resume_drain)
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_vdec_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_set_config_params(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ret = iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_vdec_process_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ iris_vdec_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+static int
+iris_vdec_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+
+ buf->type = iris_v4l2_type_to_driver(vb2->type);
+ buf->index = vb2->index;
+ buf->fd = vb2->planes[0].m.fd;
+ buf->buffer_size = vb2->planes[0].length;
+ buf->data_offset = vb2->planes[0].data_offset;
+ buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
+ buf->flags = vbuf->flags;
+ buf->timestamp = vb2->timestamp;
+ buf->attr = 0;
+
+ return 0;
+}
+
+static void
+iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ u64 ts_us = vb->timestamp;
+
+ if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
+ inst->metadata_idx = 0;
+
+ do_div(ts_us, NSEC_PER_USEC);
+
+ inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
+ inst->tss[inst->metadata_idx].tc = vbuf->timecode;
+ inst->tss[inst->metadata_idx].ts_us = ts_us;
+ inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
+
+ inst->metadata_idx++;
+}
+
+int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+ struct vb2_buffer *vb2 = &vbuf->vb2_buf;
+ struct vb2_queue *q;
+ int ret;
+
+ ret = iris_vdec_vb2_buffer_to_driver(vb2, buf);
+ if (ret)
+ return ret;
+
+ if (buf->type == BUF_INPUT)
+ iris_set_ts_metadata(inst, vbuf);
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
+ if (!vb2_is_streaming(q)) {
+ buf->attr |= BUF_ATTR_DEFERRED;
+ return 0;
+ }
+
+ iris_scale_power(inst);
+
+ return iris_queue_buffer(inst, buf);
+}
+
+int iris_vdec_start_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ dst_vq = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret =
+ hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret =
+ hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ }
+
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else {
+ dev_err(inst->core->dev, "start called before receiving last_flag\n");
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -EBUSY;
+ }
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_vdec_stop_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_drain(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_DRAIN);
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.h b/drivers/media/platform/qcom/iris/iris_vdec.h
new file mode 100644
index 000000000000..b24932dc511a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vdec.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VDEC_H__
+#define __IRIS_VDEC_H__
+
+struct iris_inst;
+
+int iris_vdec_inst_init(struct iris_inst *inst);
+void iris_vdec_inst_deinit(struct iris_inst *inst);
+int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
+int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
+void iris_vdec_src_change(struct iris_inst *inst);
+int iris_vdec_streamon_input(struct iris_inst *inst);
+int iris_vdec_streamon_output(struct iris_inst *inst);
+int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_vdec_start_cmd(struct iris_inst *inst);
+int iris_vdec_stop_cmd(struct iris_inst *inst);
+int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
new file mode 100644
index 000000000000..ca0f4e310f77
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vidc.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pm_runtime.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "iris_vidc.h"
+#include "iris_instance.h"
+#include "iris_vdec.h"
+#include "iris_vb2.h"
+#include "iris_vpu_buffer.h"
+#include "iris_platform_common.h"
+
+#define IRIS_DRV_NAME "iris_driver"
+#define IRIS_BUS_NAME "platform:iris_icc"
+#define STEP_WIDTH 1
+#define STEP_HEIGHT 1
+
+static void iris_v4l2_fh_init(struct iris_inst *inst)
+{
+ v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+}
+
+static void iris_v4l2_fh_deinit(struct iris_inst *inst)
+{
+ v4l2_fh_del(&inst->fh);
+ inst->fh.ctrl_handler = NULL;
+ v4l2_fh_exit(&inst->fh);
+}
+
+static void iris_add_session(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *iter;
+ u32 count = 0;
+
+ mutex_lock(&core->lock);
+
+ list_for_each_entry(iter, &core->instances, list)
+ count++;
+
+ if (count < core->iris_platform_data->max_session_count)
+ list_add_tail(&inst->list, &core->instances);
+
+ mutex_unlock(&core->lock);
+}
+
+static void iris_remove_session(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *iter, *temp;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry_safe(iter, temp, &core->instances, list) {
+ if (iter->session_id == inst->session_id) {
+ list_del_init(&iter->list);
+ break;
+ }
+ }
+ mutex_unlock(&core->lock);
+}
+
+static inline struct iris_inst *iris_get_inst(struct file *filp, void *fh)
+{
+ return container_of(filp->private_data, struct iris_inst, fh);
+}
+
+static void iris_m2m_device_run(void *priv)
+{
+}
+
+static void iris_m2m_job_abort(void *priv)
+{
+ struct iris_inst *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ v4l2_m2m_job_finish(inst->m2m_dev, m2m_ctx);
+}
+
+static const struct v4l2_m2m_ops iris_m2m_ops = {
+ .device_run = iris_m2m_device_run,
+ .job_abort = iris_m2m_job_abort,
+};
+
+static int
+iris_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct iris_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = inst->core->iris_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct iris_buffer);
+ src_vq->min_reqbufs_allocation = MIN_BUFFERS;
+ src_vq->dev = inst->core->dev;
+ src_vq->lock = &inst->ctx_q_lock;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = inst->core->iris_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct iris_buffer);
+ dst_vq->min_reqbufs_allocation = MIN_BUFFERS;
+ dst_vq->dev = inst->core->dev;
+ dst_vq->lock = &inst->ctx_q_lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+int iris_open(struct file *filp)
+{
+ struct iris_core *core = video_drvdata(filp);
+ struct iris_inst *inst;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(core->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = iris_core_init(core);
+ if (ret) {
+ dev_err(core->dev, "core init failed\n");
+ pm_runtime_put_sync(core->dev);
+ return ret;
+ }
+
+ pm_runtime_put_sync(core->dev);
+
+ inst = core->iris_platform_data->get_instance();
+ if (!inst)
+ return -ENOMEM;
+
+ inst->core = core;
+ inst->session_id = hash32_ptr(inst);
+ inst->state = IRIS_INST_DEINIT;
+
+ mutex_init(&inst->lock);
+ mutex_init(&inst->ctx_q_lock);
+
+ INIT_LIST_HEAD(&inst->buffers[BUF_BIN].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_ARP].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_COMV].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_NON_COMV].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_LINE].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_DPB].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_PERSIST].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_1].list);
+ init_completion(&inst->completion);
+ init_completion(&inst->flush_completion);
+
+ iris_v4l2_fh_init(inst);
+
+ inst->m2m_dev = v4l2_m2m_init(&iris_m2m_ops);
+ if (IS_ERR_OR_NULL(inst->m2m_dev)) {
+ ret = -EINVAL;
+ goto fail_v4l2_fh_deinit;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, iris_m2m_queue_init);
+ if (IS_ERR_OR_NULL(inst->m2m_ctx)) {
+ ret = -EINVAL;
+ goto fail_m2m_release;
+ }
+
+ ret = iris_vdec_inst_init(inst);
+ if (ret)
+ goto fail_m2m_ctx_release;
+
+ iris_add_session(inst);
+
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ filp->private_data = &inst->fh;
+
+ return 0;
+
+fail_m2m_ctx_release:
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+fail_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+fail_v4l2_fh_deinit:
+ iris_v4l2_fh_deinit(inst);
+ mutex_destroy(&inst->ctx_q_lock);
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+
+ return ret;
+}
+
+static void iris_session_close(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool wait_for_response = true;
+ int ret;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ return;
+
+ reinit_completion(&inst->completion);
+
+ ret = hfi_ops->session_close(inst);
+ if (ret)
+ wait_for_response = false;
+
+ if (wait_for_response)
+ iris_wait_for_session_response(inst, false);
+}
+
+int iris_close(struct file *filp)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ mutex_lock(&inst->lock);
+ iris_vdec_inst_deinit(inst);
+ iris_session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_DEINIT);
+ iris_v4l2_fh_deinit(inst);
+ iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ iris_destroy_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_remove_session(inst);
+ mutex_unlock(&inst->lock);
+ mutex_destroy(&inst->ctx_q_lock);
+ mutex_destroy(&inst->lock);
+ kfree(inst);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static int iris_enum_fmt(struct file *filp, void *fh, struct v4l2_fmtdesc *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ if (f->index)
+ return -EINVAL;
+
+ return iris_vdec_enum_fmt(inst, f);
+}
+
+static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret;
+
+ mutex_lock(&inst->lock);
+ ret = iris_vdec_try_fmt(inst, f);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret;
+
+ mutex_lock(&inst->lock);
+ ret = iris_vdec_s_fmt(inst, f);
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ *f = *inst->fmt_src;
+ else if (V4L2_TYPE_IS_CAPTURE(f->type))
+ *f = *inst->fmt_dst;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static int iris_enum_framesizes(struct file *filp, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct platform_inst_caps *caps;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ if (fsize->pixel_format != V4L2_PIX_FMT_H264 &&
+ fsize->pixel_format != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = caps->min_frame_width;
+ fsize->stepwise.max_width = caps->max_frame_width;
+ fsize->stepwise.step_width = STEP_WIDTH;
+ fsize->stepwise.min_height = caps->min_frame_height;
+ fsize->stepwise.max_height = caps->max_frame_height;
+ fsize->stepwise.step_height = STEP_HEIGHT;
+
+ return 0;
+}
+
+static int iris_querycap(struct file *filp, void *fh, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, IRIS_DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+
+ return 0;
+}
+
+static int iris_g_selection(struct file *filp, void *fh, struct v4l2_selection *s)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = inst->crop.left;
+ s->r.top = inst->crop.top;
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iris_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
+{
+ struct iris_inst *inst = container_of(fh, struct iris_inst, fh);
+
+ return iris_vdec_subscribe_event(inst, sub);
+}
+
+static int iris_dec_cmd(struct file *filp, void *fh,
+ struct v4l2_decoder_cmd *dec)
+{
+ struct iris_inst *inst = iris_get_inst(filp, NULL);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+
+ ret = v4l2_m2m_ioctl_decoder_cmd(filp, fh, dec);
+ if (ret)
+ goto unlock;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+
+ if (!iris_allow_cmd(inst, dec->cmd)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (dec->cmd == V4L2_DEC_CMD_START)
+ ret = iris_vdec_start_cmd(inst);
+ else if (dec->cmd == V4L2_DEC_CMD_STOP)
+ ret = iris_vdec_stop_cmd(inst);
+ else
+ ret = -EINVAL;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
+static struct v4l2_file_operations iris_v4l2_file_ops = {
+ .owner = THIS_MODULE,
+ .open = iris_open,
+ .release = iris_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct vb2_ops iris_vb2_ops = {
+ .buf_init = iris_vb2_buf_init,
+ .queue_setup = iris_vb2_queue_setup,
+ .start_streaming = iris_vb2_start_streaming,
+ .stop_streaming = iris_vb2_stop_streaming,
+ .buf_prepare = iris_vb2_buf_prepare,
+ .buf_out_validate = iris_vb2_buf_out_validate,
+ .buf_queue = iris_vb2_buf_queue,
+};
+
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
+ .vidioc_enum_fmt_vid_cap = iris_enum_fmt,
+ .vidioc_enum_fmt_vid_out = iris_enum_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_try_fmt_vid_out_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_out_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_enum_framesizes = iris_enum_framesizes,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
+ .vidioc_querycap = iris_querycap,
+ .vidioc_g_selection = iris_g_selection,
+ .vidioc_subscribe_event = iris_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
+ .vidioc_decoder_cmd = iris_dec_cmd,
+};
+
+void iris_init_ops(struct iris_core *core)
+{
+ core->iris_v4l2_file_ops = &iris_v4l2_file_ops;
+ core->iris_vb2_ops = &iris_vb2_ops;
+ core->iris_v4l2_ioctl_ops = &iris_v4l2_ioctl_ops;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.h b/drivers/media/platform/qcom/iris/iris_vidc.h
new file mode 100644
index 000000000000..a26054ff55b5
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vidc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VIDC_H__
+#define __IRIS_VIDC_H__
+
+struct iris_core;
+
+void iris_init_ops(struct iris_core *core);
+int iris_open(struct file *filp);
+int iris_close(struct file *filp);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
new file mode 100644
index 000000000000..8f502aed43ce
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+
+static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 mbs_per_second, mbpf, height, width;
+ unsigned long vpp_freq, vsp_freq;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ vpp_freq = mbs_per_second * caps->mb_cycles_vpp;
+
+ /* 21 / 20 is overhead factor */
+ vpp_freq += vpp_freq / 20;
+ vsp_freq = mbs_per_second * caps->mb_cycles_vsp;
+
+ /* 10 / 7 is overhead factor */
+ vsp_freq += ((fps * data_size * 8) * 10) / 7;
+
+ return max(vpp_freq, vsp_freq);
+}
+
+const struct vpu_ops iris_vpu2_ops = {
+ .power_off_hw = iris_vpu_power_off_hw,
+ .calc_freq = iris_vpu2_calc_freq,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3.c b/drivers/media/platform/qcom/iris/iris_vpu3.c
new file mode 100644
index 000000000000..b484638e6105
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu3.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+
+#include "iris_instance.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define AON_MVP_NOC_RESET 0x0001F000
+
+#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
+#define CORE_CLK_RUN 0x0
+
+#define CPU_CS_AHB_BRIDGE_SYNC_RESET (CPU_CS_BASE_OFFS + 0x160)
+#define CORE_BRIDGE_SW_RESET BIT(0)
+#define CORE_BRIDGE_HW_RESET_DISABLE BIT(1)
+
+#define AON_WRAPPER_MVP_NOC_RESET_REQ (AON_MVP_NOC_RESET + 0x000)
+#define VIDEO_NOC_RESET_REQ (BIT(0) | BIT(1))
+
+#define AON_WRAPPER_MVP_NOC_RESET_ACK (AON_MVP_NOC_RESET + 0x004)
+
+#define VCODEC_SS_IDLE_STATUSN (VCODEC_BASE_OFFS + 0x70)
+
+static bool iris_vpu3_hw_power_collapsed(struct iris_core *core)
+{
+ u32 value, pwr_status;
+
+ value = readl(core->reg_base + WRAPPER_CORE_POWER_STATUS);
+ pwr_status = value & BIT(1);
+
+ return pwr_status ? false : true;
+}
+
+static void iris_vpu3_power_off_hardware(struct iris_core *core)
+{
+ u32 reg_val = 0, value, i;
+ int ret;
+
+ if (iris_vpu3_hw_power_collapsed(core))
+ goto disable_power;
+
+ dev_err(core->dev, "video hw is power on\n");
+
+ value = readl(core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+ if (value)
+ writel(CORE_CLK_RUN, core->reg_base + WRAPPER_CORE_CLOCK_CONFIG);
+
+ for (i = 0; i < core->iris_platform_data->num_vpp_pipe; i++) {
+ ret = readl_poll_timeout(core->reg_base + VCODEC_SS_IDLE_STATUSN + 4 * i,
+ reg_val, reg_val & 0x400000, 2000, 20000);
+ if (ret)
+ goto disable_power;
+ }
+
+ writel(VIDEO_NOC_RESET_REQ, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, reg_val & 0x3, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + AON_WRAPPER_MVP_NOC_RESET_REQ);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_RESET_ACK,
+ reg_val, !(reg_val & 0x3), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
+ core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(CORE_BRIDGE_HW_RESET_DISABLE, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+ writel(0x0, core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
+
+disable_power:
+ iris_vpu_power_off_hw(core);
+}
+
+static u64 iris_vpu3_calculate_frequency(struct iris_inst *inst, size_t data_size)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct v4l2_format *inp_f = inst->fmt_src;
+ u32 height, width, mbs_per_second, mbpf;
+ u64 fw_cycles, fw_vpp_cycles;
+ u64 vsp_cycles, vpp_cycles;
+ u32 fps = DEFAULT_FPS;
+
+ width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
+ height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
+
+ mbpf = NUM_MBS_PER_FRAME(height, width);
+ mbs_per_second = mbpf * fps;
+
+ fw_cycles = fps * caps->mb_cycles_fw;
+ fw_vpp_cycles = fps * caps->mb_cycles_fw_vpp;
+
+ vpp_cycles = mult_frac(mbs_per_second, caps->mb_cycles_vpp, (u32)inst->fw_caps[PIPE].value);
+ /* 21 / 20 is minimum overhead factor */
+ vpp_cycles += max(div_u64(vpp_cycles, 20), fw_vpp_cycles);
+
+ /* 1.059 is multi-pipe overhead */
+ if (inst->fw_caps[PIPE].value > 1)
+ vpp_cycles += div_u64(vpp_cycles * 59, 1000);
+
+ vsp_cycles = fps * data_size * 8;
+ vsp_cycles = div_u64(vsp_cycles, 2);
+ /* VSP FW overhead 1.05 */
+ vsp_cycles = div_u64(vsp_cycles * 21, 20);
+
+ if (inst->fw_caps[STAGE].value == STAGE_1)
+ vsp_cycles = vsp_cycles * 3;
+
+ return max3(vpp_cycles, vsp_cycles, fw_cycles);
+}
+
+const struct vpu_ops iris_vpu3_ops = {
+ .power_off_hw = iris_vpu3_power_off_hardware,
+ .calc_freq = iris_vpu3_calculate_frequency,
+};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
new file mode 100644
index 000000000000..dce25e410d80
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "iris_instance.h"
+#include "iris_vpu_buffer.h"
+
+static u32 size_h264d_hw_bin_buffer(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 size_yuv, size_bin_hdr, size_bin_res;
+
+ size_yuv = ((frame_width * frame_height) <= BIN_BUFFER_THRESHOLD) ?
+ ((BIN_BUFFER_THRESHOLD * 3) >> 1) :
+ ((frame_width * frame_height * 3) >> 1);
+ size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT;
+ size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT;
+ size_bin_hdr = ALIGN(size_bin_hdr / num_vpp_pipes,
+ DMA_ALIGNMENT) * num_vpp_pipes;
+ size_bin_res = ALIGN(size_bin_res / num_vpp_pipes,
+ DMA_ALIGNMENT) * num_vpp_pipes;
+
+ return size_bin_hdr + size_bin_res;
+}
+
+static u32 hfi_buffer_bin_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 n_aligned_h = ALIGN(frame_height, 16);
+ u32 n_aligned_w = ALIGN(frame_width, 16);
+
+ return size_h264d_hw_bin_buffer(n_aligned_w, n_aligned_h, num_vpp_pipes);
+}
+
+static u32 hfi_buffer_comv_h264d(u32 frame_width, u32 frame_height, u32 _comv_bufcount)
+{
+ u32 frame_height_in_mbs = DIV_ROUND_UP(frame_height, 16);
+ u32 frame_width_in_mbs = DIV_ROUND_UP(frame_width, 16);
+ u32 col_zero_aligned_width = (frame_width_in_mbs << 2);
+ u32 col_mv_aligned_width = (frame_width_in_mbs << 7);
+ u32 col_zero_size, size_colloc;
+
+ col_mv_aligned_width = ALIGN(col_mv_aligned_width, 16);
+ col_zero_aligned_width = ALIGN(col_zero_aligned_width, 16);
+ col_zero_size = col_zero_aligned_width *
+ ((frame_height_in_mbs + 1) >> 1);
+ col_zero_size = ALIGN(col_zero_size, 64);
+ col_zero_size <<= 1;
+ col_zero_size = ALIGN(col_zero_size, 512);
+ size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1);
+ size_colloc = ALIGN(size_colloc, 64);
+ size_colloc <<= 1;
+ size_colloc = ALIGN(size_colloc, 512);
+ size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2);
+
+ return (size_colloc * (_comv_bufcount)) + 512;
+}
+
+static u32 size_h264d_bse_cmd_buf(u32 frame_height)
+{
+ u32 height = ALIGN(frame_height, 32);
+
+ return min_t(u32, (DIV_ROUND_UP(height, 16) * 48), H264D_MAX_SLICE) *
+ SIZE_H264D_BSE_CMD_PER_BUF;
+}
+
+static u32 size_h264d_vpp_cmd_buf(u32 frame_height)
+{
+ u32 size, height = ALIGN(frame_height, 32);
+
+ size = min_t(u32, (DIV_ROUND_UP(height, 16) * 48), H264D_MAX_SLICE) *
+ SIZE_H264D_VPP_CMD_PER_BUF;
+
+ return size > VPP_CMD_MAX_SIZE ? VPP_CMD_MAX_SIZE : size;
+}
+
+static u32 hfi_buffer_persist_h264d(void)
+{
+ return ALIGN(SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264 +
+ H264_DISPLAY_BUF_SIZE * H264_NUM_FRM_INFO +
+ NUM_HW_PIC_BUF * SIZE_SEI_USERDATA,
+ DMA_ALIGNMENT);
+}
+
+static u32 hfi_buffer_non_comv_h264d(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
+{
+ u32 size_bse, size_vpp, size;
+
+ size_bse = size_h264d_bse_cmd_buf(frame_height);
+ size_vpp = size_h264d_vpp_cmd_buf(frame_height);
+ size = ALIGN(size_bse, DMA_ALIGNMENT) +
+ ALIGN(size_vpp, DMA_ALIGNMENT) +
+ ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), DMA_ALIGNMENT);
+
+ return ALIGN(size, DMA_ALIGNMENT);
+}
+
+static u32 size_vpss_lb(u32 frame_width, u32 frame_height)
+{
+ u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size;
+ u32 opb_wr_top_line_chroma_buffer_size;
+ u32 opb_wr_top_line_luma_buffer_size;
+ u32 macrotiling_size = 32;
+
+ opb_wr_top_line_luma_buffer_size =
+ ALIGN(frame_width, macrotiling_size) / macrotiling_size * 256;
+ opb_wr_top_line_luma_buffer_size =
+ ALIGN(opb_wr_top_line_luma_buffer_size, DMA_ALIGNMENT) +
+ (MAX_TILE_COLUMNS - 1) * 256;
+ opb_wr_top_line_luma_buffer_size =
+ max_t(u32, opb_wr_top_line_luma_buffer_size, (32 * ALIGN(frame_height, 8)));
+ opb_wr_top_line_chroma_buffer_size = opb_wr_top_line_luma_buffer_size;
+ opb_lb_wr_llb_uv_buffer_size =
+ ALIGN((ALIGN(frame_height, 8) / (4 / 2)) * 64, 32);
+ opb_lb_wr_llb_y_buffer_size =
+ ALIGN((ALIGN(frame_height, 8) / (4 / 2)) * 64, 32);
+ return opb_wr_top_line_luma_buffer_size +
+ opb_wr_top_line_chroma_buffer_size +
+ opb_lb_wr_llb_uv_buffer_size +
+ opb_lb_wr_llb_y_buffer_size;
+}
+
+static u32 hfi_buffer_line_h264d(u32 frame_width, u32 frame_height,
+ bool is_opb, u32 num_vpp_pipes)
+{
+ u32 vpss_lb_size = 0;
+ u32 size;
+
+ size = ALIGN(size_h264d_lb_fe_top_data(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_fe_top_ctrl(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_fe_left_ctrl(frame_height), DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h264d_lb_se_top_ctrl(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_se_left_ctrl(frame_height), DMA_ALIGNMENT) * num_vpp_pipes +
+ ALIGN(size_h264d_lb_pe_top_data(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_vsp_top(frame_width), DMA_ALIGNMENT) +
+ ALIGN(size_h264d_lb_recon_dma_metadata_wr(frame_height), DMA_ALIGNMENT) * 2 +
+ ALIGN(size_h264d_qp(frame_width, frame_height), DMA_ALIGNMENT);
+ size = ALIGN(size, DMA_ALIGNMENT);
+ if (is_opb)
+ vpss_lb_size = size_vpss_lb(frame_width, frame_height);
+
+ return ALIGN((size + vpss_lb_size), DMA_ALIGNMENT);
+}
+
+static u32 iris_vpu_dec_bin_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_bin_h264d(width, height, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_comv_size(struct iris_inst *inst)
+{
+ u32 num_comv = VIDEO_MAX_FRAME;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_comv_h264d(width, height, num_comv);
+}
+
+static u32 iris_vpu_dec_persist_size(struct iris_inst *inst)
+{
+ return hfi_buffer_persist_h264d();
+}
+
+static u32 iris_vpu_dec_dpb_size(struct iris_inst *inst)
+{
+ if (iris_split_mode_enabled(inst))
+ return iris_get_buffer_size(inst, BUF_DPB);
+ else
+ return 0;
+}
+
+static u32 iris_vpu_dec_non_comv_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_non_comv_h264d(width, height, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_src;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ bool is_opb = false;
+
+ if (iris_split_mode_enabled(inst))
+ is_opb = true;
+
+ return hfi_buffer_line_h264d(width, height, is_opb, num_vpp_pipes);
+}
+
+static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
+{
+ return iris_vpu_dec_comv_size(inst) +
+ iris_vpu_dec_non_comv_size(inst) +
+ iris_vpu_dec_line_size(inst);
+}
+
+struct iris_vpu_buf_type_handle {
+ enum iris_buffer_type type;
+ u32 (*handle)(struct iris_inst *inst);
+};
+
+int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ const struct iris_vpu_buf_type_handle *buf_type_handle_arr;
+ u32 size = 0, buf_type_handle_size, i;
+
+ static const struct iris_vpu_buf_type_handle dec_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_dec_bin_size },
+ {BUF_COMV, iris_vpu_dec_comv_size },
+ {BUF_NON_COMV, iris_vpu_dec_non_comv_size },
+ {BUF_LINE, iris_vpu_dec_line_size },
+ {BUF_PERSIST, iris_vpu_dec_persist_size },
+ {BUF_DPB, iris_vpu_dec_dpb_size },
+ {BUF_SCRATCH_1, iris_vpu_dec_scratch1_size },
+ };
+
+ buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
+ buf_type_handle_arr = dec_internal_buf_type_handle;
+
+ for (i = 0; i < buf_type_handle_size; i++) {
+ if (buf_type_handle_arr[i].type == buffer_type) {
+ size = buf_type_handle_arr[i].handle(inst);
+ break;
+ }
+ }
+
+ return size;
+}
+
+static inline int iris_vpu_dpb_count(struct iris_inst *inst)
+{
+ if (iris_split_mode_enabled(inst)) {
+ return inst->fw_min_count ?
+ inst->fw_min_count : inst->buffers[BUF_OUTPUT].min_count;
+ }
+
+ return 0;
+}
+
+int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return MIN_BUFFERS;
+ case BUF_OUTPUT:
+ return inst->fw_min_count;
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ case BUF_PERSIST:
+ case BUF_SCRATCH_1:
+ return 1; /* internal buffer count needed by firmware is 1 */
+ case BUF_DPB:
+ return iris_vpu_dpb_count(inst);
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
new file mode 100644
index 000000000000..62af6ea6ba1f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_BUFFER_H__
+#define __IRIS_VPU_BUFFER_H__
+
+struct iris_inst;
+
+#define MIN_BUFFERS 4
+
+#define DMA_ALIGNMENT 256
+
+#define NUM_HW_PIC_BUF 32
+#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf))
+
+#define MAX_TILE_COLUMNS 32
+#define BIN_BUFFER_THRESHOLD (1280 * 736)
+#define VPP_CMD_MAX_SIZE (BIT(20))
+#define H264D_MAX_SLICE 1800
+
+#define SIZE_H264D_BUFTAB_T 256
+#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4)
+#define SIZE_H264D_VPP_CMD_PER_BUF 512
+
+#define NUM_SLIST_BUF_H264 (256 + 32)
+#define SIZE_SLIST_BUF_H264 512
+#define H264_DISPLAY_BUF_SIZE 3328
+#define H264_NUM_FRM_INFO 66
+
+#define SIZE_SEI_USERDATA 4096
+
+#define H264_CABAC_HDR_RATIO_HD_TOT 1
+#define H264_CABAC_RES_RATIO_HD_TOT 3
+#define SIZE_H264D_HW_PIC_T (BIT(11))
+
+#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
+#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 16
+#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE 384
+#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
+
+static inline u32 size_h264d_lb_fe_top_data(u32 frame_width)
+{
+ return MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN(frame_width, 16) * 3;
+}
+
+static inline u32 size_h264d_lb_fe_top_ctrl(u32 frame_width)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_fe_left_ctrl(u32 frame_height)
+{
+ return MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_height, 16);
+}
+
+static inline u32 size_h264d_lb_se_top_ctrl(u32 frame_width)
+{
+ return MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_se_left_ctrl(u32 frame_height)
+{
+ return MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_height, 16);
+}
+
+static inline u32 size_h264d_lb_pe_top_data(u32 frame_width)
+{
+ return MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * DIV_ROUND_UP(frame_width, 16);
+}
+
+static inline u32 size_h264d_lb_vsp_top(u32 frame_width)
+{
+ return (DIV_ROUND_UP(frame_width, 16) << 7);
+}
+
+static inline u32 size_h264d_lb_recon_dma_metadata_wr(u32 frame_height)
+{
+ return ALIGN(frame_height, 16) * 32;
+}
+
+static inline u32 size_h264d_qp(u32 frame_width, u32 frame_height)
+{
+ return DIV_ROUND_UP(frame_width, 64) * DIV_ROUND_UP(frame_height, 64) * 128;
+}
+
+int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
new file mode 100644
index 000000000000..fe9896d66848
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <linux/reset.h>
+
+#include "iris_core.h"
+#include "iris_vpu_common.h"
+#include "iris_vpu_register_defines.h"
+
+#define WRAPPER_TZ_BASE_OFFS 0x000C0000
+#define AON_BASE_OFFS 0x000E0000
+
+#define CPU_IC_BASE_OFFS (CPU_BASE_OFFS)
+
+#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C)
+#define CLEAR_XTENSA2HOST_INTR BIT(0)
+
+#define CTRL_INIT (CPU_CS_BASE_OFFS + 0x48)
+#define CTRL_STATUS (CPU_CS_BASE_OFFS + 0x4C)
+
+#define CTRL_INIT_IDLE_MSG_BMSK 0x40000000
+#define CTRL_ERROR_STATUS__M 0xfe
+#define CTRL_STATUS_PC_READY 0x100
+
+#define QTBL_INFO (CPU_CS_BASE_OFFS + 0x50)
+#define QTBL_ENABLE BIT(0)
+
+#define QTBL_ADDR (CPU_CS_BASE_OFFS + 0x54)
+#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58)
+#define SFR_ADDR (CPU_CS_BASE_OFFS + 0x5C)
+#define UC_REGION_ADDR (CPU_CS_BASE_OFFS + 0x64)
+#define UC_REGION_SIZE (CPU_CS_BASE_OFFS + 0x68)
+
+#define CPU_CS_H2XSOFTINTEN (CPU_CS_BASE_OFFS + 0x148)
+#define HOST2XTENSA_INTR_ENABLE BIT(0)
+
+#define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
+#define MSK_SIGNAL_FROM_TENSILICA BIT(0)
+#define MSK_CORE_POWER_ON BIT(1)
+
+#define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x150)
+#define CPU_IC_SOFTINT_H2A_SHFT 0x0
+
+#define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C)
+#define WRAPPER_INTR_STATUS_A2HWD_BMSK BIT(3)
+#define WRAPPER_INTR_STATUS_A2H_BMSK BIT(2)
+
+#define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10)
+#define WRAPPER_INTR_MASK_A2HWD_BMSK BIT(3)
+#define WRAPPER_INTR_MASK_A2HCPU_BMSK BIT(2)
+
+#define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
+#define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
+#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
+#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+
+#define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10)
+#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
+#define CTL_AXI_CLK_HALT BIT(0)
+#define CTL_CLK_HALT BIT(1)
+
+#define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
+#define RESET_HIGH BIT(0)
+
+#define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
+#define REQ_POWER_DOWN_PREP BIT(0)
+
+#define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
+
+static void iris_vpu_interrupt_init(struct iris_core *core)
+{
+ u32 mask_val;
+
+ mask_val = readl(core->reg_base + WRAPPER_INTR_MASK);
+ mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK |
+ WRAPPER_INTR_MASK_A2HCPU_BMSK);
+ writel(mask_val, core->reg_base + WRAPPER_INTR_MASK);
+}
+
+static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
+{
+ u32 queue_size, value;
+
+ /* Iris hardware requires 4K queue alignment */
+ queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
+ (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
+
+ value = (u32)core->iface_q_table_daddr;
+ writel(value, core->reg_base + UC_REGION_ADDR);
+
+ /* Iris hardware requires 1M queue alignment */
+ value = ALIGN(SFR_SIZE + queue_size, SZ_1M);
+ writel(value, core->reg_base + UC_REGION_SIZE);
+
+ value = (u32)core->iface_q_table_daddr;
+ writel(value, core->reg_base + QTBL_ADDR);
+
+ writel(QTBL_ENABLE, core->reg_base + QTBL_INFO);
+
+ if (core->sfr_daddr) {
+ value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
+ writel(value, core->reg_base + SFR_ADDR);
+ }
+}
+
+int iris_vpu_boot_firmware(struct iris_core *core)
+{
+ u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000;
+
+ iris_vpu_setup_ucregion_memory_map(core);
+
+ writel(ctrl_init, core->reg_base + CTRL_INIT);
+ writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3);
+
+ while (!ctrl_status && count < max_tries) {
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) {
+ dev_err(core->dev, "invalid setting for uc_region\n");
+ break;
+ }
+
+ usleep_range(50, 100);
+ count++;
+ }
+
+ if (count >= max_tries) {
+ dev_err(core->dev, "error booting up iris firmware\n");
+ return -ETIME;
+ }
+
+ writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN);
+ writel(0x0, core->reg_base + CPU_CS_X2RPMH);
+
+ return 0;
+}
+
+void iris_vpu_raise_interrupt(struct iris_core *core)
+{
+ writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT);
+}
+
+void iris_vpu_clear_interrupt(struct iris_core *core)
+{
+ u32 intr_status, mask;
+
+ intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS);
+ mask = (WRAPPER_INTR_STATUS_A2H_BMSK |
+ WRAPPER_INTR_STATUS_A2HWD_BMSK |
+ CTRL_INIT_IDLE_MSG_BMSK);
+
+ if (intr_status & mask)
+ core->intr_status |= intr_status;
+
+ writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR);
+}
+
+int iris_vpu_watchdog(struct iris_core *core, u32 intr_status)
+{
+ if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) {
+ dev_err(core->dev, "received watchdog interrupt\n");
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+int iris_vpu_prepare_pc(struct iris_core *core)
+{
+ u32 wfi_status, idle_status, pc_ready;
+ u32 ctrl_status, val = 0;
+ int ret;
+
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ pc_ready = ctrl_status & CTRL_STATUS_PC_READY;
+ idle_status = ctrl_status & BIT(30);
+ if (pc_ready)
+ return 0;
+
+ wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
+ wfi_status &= BIT(0);
+ if (!wfi_status || !idle_status)
+ goto skip_power_off;
+
+ ret = core->hfi_ops->sys_pc_prep(core);
+ if (ret)
+ goto skip_power_off;
+
+ ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val,
+ val & CTRL_STATUS_PC_READY, 250, 2500);
+ if (ret)
+ goto skip_power_off;
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS,
+ val, val & BIT(0), 250, 2500);
+ if (ret)
+ goto skip_power_off;
+
+ return 0;
+
+skip_power_off:
+ ctrl_status = readl(core->reg_base + CTRL_STATUS);
+ wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
+ wfi_status &= BIT(0);
+ dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
+ wfi_status, idle_status, pc_ready, ctrl_status);
+
+ return -EAGAIN;
+}
+
+static int iris_vpu_power_off_controller(struct iris_core *core)
+{
+ u32 val = 0;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
+ core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+ writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
+ writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
+
+disable_power:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return 0;
+}
+
+void iris_vpu_power_off_hw(struct iris_core *core)
+{
+ dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false);
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+}
+
+void iris_vpu_power_off(struct iris_core *core)
+{
+ dev_pm_opp_set_rate(core->dev, 0);
+ core->iris_platform_data->vpu_ops->power_off_hw(core);
+ iris_vpu_power_off_controller(core);
+ iris_unset_icc_bw(core);
+
+ if (!iris_vpu_watchdog(core, core->intr_status))
+ disable_irq_nosync(core->irq);
+}
+
+static int iris_vpu_power_on_controller(struct iris_core *core)
+{
+ u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_reset(rst_tbl_size, core->resets);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
+ if (ret)
+ goto err_disable_clock;
+
+ return 0;
+
+err_disable_clock:
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static int iris_vpu_power_on_hw(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
+ if (ret)
+ goto err_disable_clock;
+
+ return 0;
+
+err_disable_clock:
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+
+ return ret;
+}
+
+int iris_vpu_power_on(struct iris_core *core)
+{
+ u32 freq;
+ int ret;
+
+ ret = iris_set_icc_bw(core, INT_MAX);
+ if (ret)
+ goto err;
+
+ ret = iris_vpu_power_on_controller(core);
+ if (ret)
+ goto err_unvote_icc;
+
+ ret = iris_vpu_power_on_hw(core);
+ if (ret)
+ goto err_power_off_ctrl;
+
+ freq = core->power.clk_freq ? core->power.clk_freq :
+ (u32)ULONG_MAX;
+
+ dev_pm_opp_set_rate(core->dev, freq);
+
+ core->iris_platform_data->set_preset_registers(core);
+
+ iris_vpu_interrupt_init(core);
+ core->intr_status = 0;
+ enable_irq(core->irq);
+
+ return 0;
+
+err_power_off_ctrl:
+ iris_vpu_power_off_controller(core);
+err_unvote_icc:
+ iris_unset_icc_bw(core);
+err:
+ dev_err(core->dev, "power on failed\n");
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
new file mode 100644
index 000000000000..63fa1fa5a498
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_COMMON_H__
+#define __IRIS_VPU_COMMON_H__
+
+struct iris_core;
+
+extern const struct vpu_ops iris_vpu2_ops;
+extern const struct vpu_ops iris_vpu3_ops;
+
+struct vpu_ops {
+ void (*power_off_hw)(struct iris_core *core);
+ u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
+};
+
+int iris_vpu_boot_firmware(struct iris_core *core);
+void iris_vpu_raise_interrupt(struct iris_core *core);
+void iris_vpu_clear_interrupt(struct iris_core *core);
+int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
+int iris_vpu_prepare_pc(struct iris_core *core);
+int iris_vpu_power_on(struct iris_core *core);
+void iris_vpu_power_off_hw(struct iris_core *core);
+void iris_vpu_power_off(struct iris_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h b/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h
new file mode 100644
index 000000000000..fe8a39e5e5a3
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_vpu_register_defines.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_VPU_REGISTER_DEFINES_H__
+#define __IRIS_VPU_REGISTER_DEFINES_H__
+
+#define VCODEC_BASE_OFFS 0x00000000
+#define CPU_BASE_OFFS 0x000A0000
+#define WRAPPER_BASE_OFFS 0x000B0000
+
+#define CPU_CS_BASE_OFFS (CPU_BASE_OFFS)
+
+#define WRAPPER_CORE_POWER_STATUS (WRAPPER_BASE_OFFS + 0x80)
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/Kconfig b/drivers/media/platform/qcom/venus/Kconfig
index bc2e410b29cb..ffb731ecd48c 100644
--- a/drivers/media/platform/qcom/venus/Kconfig
+++ b/drivers/media/platform/qcom/venus/Kconfig
@@ -2,7 +2,7 @@ config VIDEO_QCOM_VENUS
tristate "Qualcomm Venus V4L2 encoder/decoder driver"
depends on V4L_MEM2MEM_DRIVERS
depends on VIDEO_DEV && QCOM_SMEM
- depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ depends on (ARCH_QCOM && ARM64 && IOMMU_API) || COMPILE_TEST
select OF_DYNAMIC if ARCH_QCOM
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 3df241dc3a11..1b3db2caa99f 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -19,6 +19,8 @@ static void init_codecs(struct venus_core *core)
struct hfi_plat_caps *caps = core->caps, *cap;
unsigned long bit;
+ core->codecs_count = 0;
+
if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
return;
@@ -62,7 +64,7 @@ fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num)
cap->cap_bufs_mode_dynamic = true;
}
-static void
+static int
parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_buffer_alloc_mode_supported *mode = data;
@@ -70,7 +72,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
u32 *type;
if (num_entries > MAX_ALLOC_MODE_ENTRIES)
- return;
+ return -EINVAL;
type = mode->data;
@@ -82,6 +84,8 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
type++;
}
+
+ return sizeof(*mode);
}
static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
@@ -96,7 +100,7 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
cap->num_pl += num;
}
-static void
+static int
parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_profile_level_supported *pl = data;
@@ -104,12 +108,14 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
- return;
+ return -EINVAL;
memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_profile_level, pl_arr, pl->profile_count);
+
+ return pl->profile_count * sizeof(*proflevel) + sizeof(u32);
}
static void
@@ -124,7 +130,7 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
cap->num_caps += num;
}
-static void
+static int
parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_capabilities *caps = data;
@@ -133,12 +139,14 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
if (num_caps > MAX_CAP_ENTRIES)
- return;
+ return -EINVAL;
memcpy(caps_arr, cap, num_caps * sizeof(*cap));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_caps, caps_arr, num_caps);
+
+ return sizeof(*caps);
}
static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
@@ -153,7 +161,7 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
cap->num_fmts += num_fmts;
}
-static void
+static int
parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_uncompressed_format_supported *fmt = data;
@@ -162,7 +170,8 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
u32 entries = fmt->format_entries;
unsigned int i = 0;
- u32 num_planes;
+ u32 num_planes = 0;
+ u32 size;
while (entries) {
num_planes = pinfo->num_planes;
@@ -172,7 +181,7 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
i++;
if (i >= MAX_FMT_ENTRIES)
- return;
+ return -EINVAL;
if (pinfo->num_planes > MAX_PLANES)
break;
@@ -184,9 +193,13 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_raw_fmts, rawfmts, i);
+ size = fmt->format_entries * (sizeof(*constr) * num_planes + 2 * sizeof(u32))
+ + 2 * sizeof(u32);
+
+ return size;
}
-static void parse_codecs(struct venus_core *core, void *data)
+static int parse_codecs(struct venus_core *core, void *data)
{
struct hfi_codec_supported *codecs = data;
@@ -198,21 +211,27 @@ static void parse_codecs(struct venus_core *core, void *data)
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
+
+ return sizeof(*codecs);
}
-static void parse_max_sessions(struct venus_core *core, const void *data)
+static int parse_max_sessions(struct venus_core *core, const void *data)
{
const struct hfi_max_sessions_supported *sessions = data;
core->max_sessions_supported = sessions->max_sessions;
+
+ return sizeof(*sessions);
}
-static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
+static int parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
{
struct hfi_codec_mask_supported *mask = data;
*codecs = mask->codecs;
*domain = mask->video_domains;
+
+ return sizeof(*mask);
}
static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
@@ -281,8 +300,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
u32 size)
{
- unsigned int words_count = size >> 2;
- u32 *word = buf, *data, codecs = 0, domain = 0;
+ u32 *words = buf, *payload, codecs = 0, domain = 0;
+ u32 *frame_size = buf + size;
+ u32 rem_bytes = size;
int ret;
ret = hfi_platform_parser(core, inst);
@@ -299,38 +319,66 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
memset(core->caps, 0, sizeof(core->caps));
}
- while (words_count) {
- data = word + 1;
+ while (words < frame_size) {
+ payload = words + 1;
- switch (*word) {
+ switch (*words) {
case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
- parse_codecs(core, data);
+ if (rem_bytes <= sizeof(struct hfi_codec_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_codecs(core, payload);
+ if (ret < 0)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
init_codecs(core);
break;
case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
- parse_max_sessions(core, data);
+ if (rem_bytes <= sizeof(struct hfi_max_sessions_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_max_sessions(core, payload);
break;
case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
- parse_codecs_mask(&codecs, &domain, data);
+ if (rem_bytes <= sizeof(struct hfi_codec_mask_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_codecs_mask(&codecs, &domain, payload);
break;
case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
- parse_raw_formats(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_uncompressed_format_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_raw_formats(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
- parse_caps(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_capabilities))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_caps(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
- parse_profile_level(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_profile_level_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_profile_level(core, codecs, domain, payload);
break;
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
- parse_alloc_mode(core, codecs, domain, data);
+ if (rem_bytes <= sizeof(struct hfi_buffer_alloc_mode_supported))
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ ret = parse_alloc_mode(core, codecs, domain, payload);
break;
default:
+ ret = sizeof(u32);
break;
}
- word++;
- words_count--;
+ if (ret < 0)
+ return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+
+ words += ret / sizeof(u32);
+ rem_bytes -= ret;
}
if (!core->max_sessions_supported)
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index a9167867063c..b5f2ea879950 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -187,6 +187,9 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
/* ensure rd/wr indices's are read from memory */
rmb();
+ if (qsize > IFACEQ_QUEUE_SIZE / 4)
+ return -EINVAL;
+
if (wr_idx >= rd_idx)
empty_space = qsize - (wr_idx - rd_idx);
else
@@ -255,6 +258,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
wr_idx = qhdr->write_idx;
qsize = qhdr->q_size;
+ if (qsize > IFACEQ_QUEUE_SIZE / 4)
+ return -EINVAL;
+
/* make sure data is valid before using it */
rmb();
@@ -1035,18 +1041,26 @@ static void venus_sfr_print(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
struct hfi_sfr *sfr = hdev->sfr.kva;
+ u32 size;
void *p;
if (!sfr)
return;
- p = memchr(sfr->data, '\0', sfr->buf_size);
+ size = sfr->buf_size;
+ if (!size)
+ return;
+
+ if (size > ALIGNED_SFR_SIZE)
+ size = ALIGNED_SFR_SIZE;
+
+ p = memchr(sfr->data, '\0', size);
/*
* SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
* that Venus is in the process of crashing.
*/
if (!p)
- sfr->data[sfr->buf_size - 1] = '\0';
+ sfr->data[size - 1] = '\0';
dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
}
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 51801a962ed2..4d36c44f9d44 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -662,11 +662,16 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_CLL_INFO,
- v4l2_ctrl_ptr_create(&p_hdr10_cll));
+ v4l2_ctrl_ptr_create(&p_hdr10_cll),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY,
- v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering));
+ v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering),
+ v4l2_ctrl_ptr_create(NULL),
+ v4l2_ctrl_ptr_create(NULL));
+
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE,
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
index 12660087b12f..69a5f23e7954 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/cfe.c
@@ -1102,6 +1102,8 @@ static void cfe_buffer_queue(struct vb2_buffer *vb)
static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
{
+ struct media_pad *src_pad =
+ &cfe->source_sd->entity.pads[cfe->source_pad];
struct v4l2_subdev_state *state;
s64 link_freq;
u32 bpp;
@@ -1136,7 +1138,7 @@ static s64 cfe_get_source_link_freq(struct cfe_device *cfe)
bpp = 0;
}
- link_freq = v4l2_get_link_freq(cfe->source_sd->ctrl_handler, bpp,
+ link_freq = v4l2_get_link_freq(src_pad, bpp,
2 * cfe->csi2.dphy.active_lanes);
if (link_freq < 0)
cfe_err(cfe, "failed to get link freq for subdev '%s'\n",
@@ -1315,8 +1317,6 @@ static void cfe_stop_streaming(struct vb2_queue *vq)
}
static const struct vb2_ops cfe_video_qops = {
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
.queue_setup = cfe_queue_setup,
.buf_prepare = cfe_buffer_prepare,
.buf_queue = cfe_buffer_queue,
diff --git a/drivers/media/platform/renesas/rcar-csi2.c b/drivers/media/platform/renesas/rcar-csi2.c
index 0a53dd47d7bf..38a3149f9724 100644
--- a/drivers/media/platform/renesas/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-csi2.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sys_soc.h>
+#include <linux/units.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
@@ -637,6 +639,10 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
.datatype = MIPI_CSI2_DT_YUV422_8B,
.bpp = 20,
}, {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .bpp = 8,
+ }, {
.code = MEDIA_BUS_FMT_Y10_1X10,
.datatype = MIPI_CSI2_DT_RAW10,
.bpp = 10,
@@ -657,9 +663,37 @@ static const struct rcar_csi2_format rcar_csi2_formats[] = {
.datatype = MIPI_CSI2_DT_RAW8,
.bpp = 8,
}, {
- .code = MEDIA_BUS_FMT_Y8_1X8,
- .datatype = MIPI_CSI2_DT_RAW8,
- .bpp = 8,
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .bpp = 10,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .bpp = 12,
},
};
@@ -921,7 +955,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
unsigned int lanes)
{
struct v4l2_subdev *source;
- struct v4l2_ctrl *ctrl;
+ s64 freq;
u64 mbps;
if (!priv->remote)
@@ -929,21 +963,17 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
source = priv->remote;
- /* Read the pixel rate control from remote. */
- ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE);
- if (!ctrl) {
- dev_err(priv->dev, "no pixel rate control in subdev %s\n",
- source->name);
- return -EINVAL;
+ freq = v4l2_get_link_freq(source->ctrl_handler, bpp, 2 * lanes);
+ if (freq < 0) {
+ int ret = (int)freq;
+
+ dev_err(priv->dev, "failed to get link freq for %s: %d\n",
+ source->name, ret);
+
+ return ret;
}
- /*
- * Calculate the phypll in mbps.
- * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes)
- * bps = link_freq * 2
- */
- mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
- do_div(mbps, lanes * 1000000);
+ mbps = div_u64(freq * 2, MEGA);
/* Adjust for C-PHY, divide by 2.8. */
if (priv->cphy)
@@ -1547,7 +1577,8 @@ static int rcsi2_start(struct rcar_csi2 *priv, struct v4l2_subdev_state *state)
return ret;
}
- ret = v4l2_subdev_call(priv->remote, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(priv->remote, priv->remote_pad,
+ BIT_ULL(0));
if (ret) {
rcsi2_enter_standby(priv);
return ret;
@@ -1559,31 +1590,50 @@ static int rcsi2_start(struct rcar_csi2 *priv, struct v4l2_subdev_state *state)
static void rcsi2_stop(struct rcar_csi2 *priv)
{
rcsi2_enter_standby(priv);
- v4l2_subdev_call(priv->remote, video, s_stream, 0);
+ v4l2_subdev_disable_streams(priv->remote, priv->remote_pad, BIT_ULL(0));
}
-static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable)
+static int rcsi2_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
- struct v4l2_subdev_state *state;
int ret = 0;
+ if (source_streams_mask != 1)
+ return -EINVAL;
+
if (!priv->remote)
return -ENODEV;
- state = v4l2_subdev_lock_and_get_active_state(&priv->subdev);
-
- if (enable && priv->stream_count == 0) {
+ if (priv->stream_count == 0) {
ret = rcsi2_start(priv, state);
if (ret)
- goto out;
- } else if (!enable && priv->stream_count == 1) {
- rcsi2_stop(priv);
+ return ret;
}
- priv->stream_count += enable ? 1 : -1;
-out:
- v4l2_subdev_unlock_state(state);
+ priv->stream_count += 1;
+
+ return ret;
+}
+
+static int rcsi2_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 source_pad, u64 source_streams_mask)
+{
+ struct rcar_csi2 *priv = sd_to_csi2(sd);
+ int ret = 0;
+
+ if (source_streams_mask != 1)
+ return -EINVAL;
+
+ if (!priv->remote)
+ return -ENODEV;
+
+ if (priv->stream_count == 1)
+ rcsi2_stop(priv);
+
+ priv->stream_count -= 1;
return ret;
}
@@ -1610,17 +1660,15 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
return 0;
}
-static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = {
- .s_stream = rcsi2_s_stream,
-};
-
static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = {
+ .enable_streams = rcsi2_enable_streams,
+ .disable_streams = rcsi2_disable_streams,
+
.set_fmt = rcsi2_set_pad_format,
.get_fmt = v4l2_subdev_get_fmt,
};
static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
- .video = &rcar_csi2_video_ops,
.pad = &rcar_csi2_pad_ops,
};
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index c515278e3be5..4bc89d4757fa 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -76,6 +76,54 @@ static const struct rcar_isp_format rcar_isp_formats[] = {
.code = MEDIA_BUS_FMT_YUYV10_2X10,
.datatype = MIPI_CSI2_DT_YUV422_8B,
.procmode = 0x0c,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .datatype = MIPI_CSI2_DT_RAW8,
+ .procmode = 0x00,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .datatype = MIPI_CSI2_DT_RAW10,
+ .procmode = 0x01,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .datatype = MIPI_CSI2_DT_RAW12,
+ .procmode = 0x02,
},
};
@@ -121,9 +169,8 @@ struct rcar_isp {
struct v4l2_async_notifier notifier;
struct v4l2_subdev *remote;
+ unsigned int remote_pad;
- struct mutex lock; /* Protects mf and stream_count. */
- struct v4l2_mbus_framefmt mf;
int stream_count;
};
@@ -170,14 +217,19 @@ static void risp_power_off(struct rcar_isp *isp)
pm_runtime_put(isp->dev);
}
-static int risp_start(struct rcar_isp *isp)
+static int risp_start(struct rcar_isp *isp, struct v4l2_subdev_state *state)
{
+ const struct v4l2_mbus_framefmt *fmt;
const struct rcar_isp_format *format;
unsigned int vc;
u32 sel_csi = 0;
int ret;
- format = risp_code_to_fmt(isp->mf.code);
+ fmt = v4l2_subdev_state_get_format(state, RCAR_ISP_SINK);
+ if (!fmt)
+ return -EINVAL;
+
+ format = risp_code_to_fmt(fmt->code);
if (!format) {
dev_err(isp->dev, "Unsupported bus format\n");
return -EINVAL;
@@ -219,7 +271,8 @@ static int risp_start(struct rcar_isp *isp)
/* Start ISP. */
risp_write(isp, ISPSTART_REG, ISPSTART_START);
- ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(isp->remote, isp->remote_pad,
+ BIT_ULL(0));
if (ret)
risp_power_off(isp);
@@ -228,7 +281,7 @@ static int risp_start(struct rcar_isp *isp)
static void risp_stop(struct rcar_isp *isp)
{
- v4l2_subdev_call(isp->remote, video, s_stream, 0);
+ v4l2_subdev_disable_streams(isp->remote, isp->remote_pad, BIT_ULL(0));
/* Stop ISP. */
risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
@@ -236,87 +289,79 @@ static void risp_stop(struct rcar_isp *isp)
risp_power_off(isp);
}
-static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+static int risp_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_isp *isp = sd_to_isp(sd);
int ret = 0;
- mutex_lock(&isp->lock);
+ if (source_streams_mask != 1)
+ return -EINVAL;
- if (!isp->remote) {
- ret = -ENODEV;
- goto out;
- }
+ if (!isp->remote)
+ return -ENODEV;
- if (enable && isp->stream_count == 0) {
- ret = risp_start(isp);
+ if (isp->stream_count == 0) {
+ ret = risp_start(isp, state);
if (ret)
- goto out;
- } else if (!enable && isp->stream_count == 1) {
- risp_stop(isp);
+ return ret;
}
- isp->stream_count += enable ? 1 : -1;
-out:
- mutex_unlock(&isp->lock);
+ isp->stream_count += 1;
return ret;
}
-static const struct v4l2_subdev_video_ops risp_video_ops = {
- .s_stream = risp_s_stream,
-};
-
-static int risp_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
+static int risp_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
{
struct rcar_isp *isp = sd_to_isp(sd);
- struct v4l2_mbus_framefmt *framefmt;
- mutex_lock(&isp->lock);
+ if (source_streams_mask != 1)
+ return -EINVAL;
- if (!risp_code_to_fmt(format->format.code))
- format->format.code = rcar_isp_formats[0].code;
+ if (!isp->remote)
+ return -ENODEV;
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- isp->mf = format->format;
- } else {
- framefmt = v4l2_subdev_state_get_format(sd_state, 0);
- *framefmt = format->format;
- }
+ if (isp->stream_count == 1)
+ risp_stop(isp);
- mutex_unlock(&isp->lock);
+ isp->stream_count -= 1;
return 0;
}
-static int risp_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
- mutex_lock(&isp->lock);
+ if (format->pad > RCAR_ISP_SINK)
+ return v4l2_subdev_get_fmt(sd, state, format);
- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- format->format = isp->mf;
- else
- format->format = *v4l2_subdev_state_get_format(sd_state, 0);
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
- mutex_unlock(&isp->lock);
+ for (unsigned int i = 0; i < RCAR_ISP_NUM_PADS; i++) {
+ framefmt = v4l2_subdev_state_get_format(state, i);
+ *framefmt = format->format;
+ }
return 0;
}
static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .enable_streams = risp_enable_streams,
+ .disable_streams = risp_disable_streams,
.set_fmt = risp_set_pad_format,
- .get_fmt = risp_get_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
- .video = &risp_video_ops,
.pad = &risp_pad_ops,
};
@@ -339,6 +384,7 @@ static int risp_notify_bound(struct v4l2_async_notifier *notifier,
}
isp->remote = subdev;
+ isp->remote_pad = pad;
dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
@@ -449,12 +495,10 @@ static int risp_probe(struct platform_device *pdev)
isp->dev = &pdev->dev;
- mutex_init(&isp->lock);
-
ret = risp_probe_resources(isp, pdev);
if (ret) {
dev_err(isp->dev, "Failed to get resources\n");
- goto error_mutex;
+ return ret;
}
platform_set_drvdata(pdev, isp);
@@ -485,20 +529,25 @@ static int risp_probe(struct platform_device *pdev)
if (ret)
goto error_notifier;
+ ret = v4l2_subdev_init_finalize(&isp->subdev);
+ if (ret)
+ goto error_notifier;
+
ret = v4l2_async_register_subdev(&isp->subdev);
if (ret < 0)
- goto error_notifier;
+ goto error_subdev;
dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
return 0;
+
+error_subdev:
+ v4l2_subdev_cleanup(&isp->subdev);
error_notifier:
v4l2_async_nf_unregister(&isp->notifier);
v4l2_async_nf_cleanup(&isp->notifier);
error_pm:
pm_runtime_disable(&pdev->dev);
-error_mutex:
- mutex_destroy(&isp->lock);
return ret;
}
@@ -511,10 +560,9 @@ static void risp_remove(struct platform_device *pdev)
v4l2_async_nf_cleanup(&isp->notifier);
v4l2_async_unregister_subdev(&isp->subdev);
+ v4l2_subdev_cleanup(&isp->subdev);
pm_runtime_disable(&pdev->dev);
-
- mutex_destroy(&isp->lock);
}
static struct platform_driver rcar_isp_driver = {
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 8773998101ff..8de871240440 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -1397,7 +1397,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
if (!on) {
video_device_pipeline_stop(&vin->vdev);
- return v4l2_subdev_call(sd, video, s_stream, 0);
+ return v4l2_subdev_disable_streams(sd, pad->index, BIT_ULL(0));
}
ret = rvin_mc_validate_format(vin, sd, pad);
@@ -1408,7 +1408,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on)
if (ret)
return ret;
- ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ ret = v4l2_subdev_enable_streams(sd, pad->index, BIT_ULL(0));
if (ret == -ENOIOCTLCMD)
ret = 0;
if (ret)
diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
index 4396348811c8..730bdf98565a 100644
--- a/drivers/media/platform/rockchip/rga/rga-buf.c
+++ b/drivers/media/platform/rockchip/rga/rga-buf.c
@@ -119,6 +119,13 @@ static int rga_buf_prepare(struct vb2_buffer *vb)
if (IS_ERR(f))
return PTR_ERR(f);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE)
+ return -EINVAL;
+ }
+
for (i = 0; i < vb->num_planes; i++) {
vb2_set_plane_payload(vb, i, f->pix.plane_fmt[i].sizeimage);
diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c
index bf55beec0fac..43ed742a1649 100644
--- a/drivers/media/platform/rockchip/rga/rga-hw.c
+++ b/drivers/media/platform/rockchip/rga/rga-hw.c
@@ -376,7 +376,7 @@ static void rga_cmd_set_dst_info(struct rga_ctx *ctx,
* Configure the dest framebuffer base address with pixel offset.
*/
offsets = rga_get_addr_offset(&ctx->out, offset, dst_x, dst_y, dst_w, dst_h);
- dst_offset = rga_lookup_draw_pos(&offsets, mir_mode, rot_mode);
+ dst_offset = rga_lookup_draw_pos(&offsets, rot_mode, mir_mode);
dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->y_off;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
index 73f7af674c01..0c636090d723 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_opr_v6.c
@@ -549,8 +549,9 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
case V4L2_PIX_FMT_NV21M:
ctx->stride[0] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
ctx->stride[1] = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
- ctx->luma_size = ctx->stride[0] * ALIGN(ctx->img_height, 16);
- ctx->chroma_size = ctx->stride[0] * ALIGN(ctx->img_height / 2, 16);
+ ctx->luma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height, 16), 256);
+ ctx->chroma_size = ALIGN(ctx->stride[0] * ALIGN(ctx->img_height / 2, 16),
+ 256);
break;
case V4L2_PIX_FMT_YUV420M:
case V4L2_PIX_FMT_YVU420M:
diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
index 7b3a37957e3a..d151d2ed1f64 100644
--- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
@@ -797,13 +797,12 @@ static int c8sectpfe_probe(struct platform_device *pdev)
}
tsin->i2c_adapter =
of_find_i2c_adapter_by_node(i2c_bus);
+ of_node_put(i2c_bus);
if (!tsin->i2c_adapter) {
dev_err(&pdev->dev, "No i2c adapter found\n");
- of_node_put(i2c_bus);
ret = -ENODEV;
goto err_node_put;
}
- of_node_put(i2c_bus);
/* Acquire reset GPIO and activate it */
tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
index b6c8400fb92d..48fa781aab06 100644
--- a/drivers/media/platform/st/stm32/dma2d/dma2d.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -490,7 +490,8 @@ static void device_run(void *prv)
dst->sequence = frm_cap->sequence++;
v4l2_m2m_buf_copy_metadata(src, dst, true);
- clk_enable(dev->gate);
+ if (clk_enable(dev->gate))
+ goto end;
dma2d_config_fg(dev, frm_out,
vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
index 48941aae8c9b..b69048144cc1 100644
--- a/drivers/media/platform/st/stm32/stm32-csi.c
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -325,7 +325,6 @@ static const struct stm32_csi_mbps_phy_reg snps_stm32mp25[] = {
{ .mbps = 2400, .hsfreqrange = 0x47, .osc_freq_target = 442 },
{ .mbps = 2450, .hsfreqrange = 0x48, .osc_freq_target = 451 },
{ .mbps = 2500, .hsfreqrange = 0x49, .osc_freq_target = 460 },
- { /* sentinel */ }
};
static const struct v4l2_mbus_framefmt fmt_default = {
@@ -358,7 +357,7 @@ static inline struct stm32_csi_dev *to_csidev(struct v4l2_subdev *sd)
static int stm32_csi_setup_lane_merger(struct stm32_csi_dev *csidev)
{
u32 lmcfgr = 0;
- int i;
+ unsigned int i;
for (i = 0; i < csidev->num_lanes; i++) {
if (!csidev->lanes[i] || csidev->lanes[i] > STM32_CSI_LANES_MAX) {
@@ -444,13 +443,15 @@ static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
static int stm32_csi_start(struct stm32_csi_dev *csidev,
struct v4l2_subdev_state *state)
{
- const struct stm32_csi_mbps_phy_reg *phy_regs;
+ struct media_pad *src_pad =
+ &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
+ const struct stm32_csi_mbps_phy_reg *phy_regs = NULL;
struct v4l2_mbus_framefmt *sink_fmt;
const struct stm32_csi_fmts *fmt;
unsigned long phy_clk_frate;
+ u32 lanes_ie, lanes_en;
unsigned int mbps;
- u32 lanes_ie = 0;
- u32 lanes_en = 0;
+ unsigned int i;
s64 link_freq;
int ret;
u32 ccfr;
@@ -465,7 +466,7 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
if (!csidev->s_subdev)
return -EIO;
- link_freq = v4l2_get_link_freq(csidev->s_subdev->ctrl_handler,
+ link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csidev->num_lanes);
if (link_freq < 0)
return link_freq;
@@ -474,11 +475,14 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
mbps = div_s64(link_freq, 500000);
dev_dbg(csidev->dev, "Computed Mbps: %u\n", mbps);
- for (phy_regs = snps_stm32mp25; phy_regs->mbps != 0; phy_regs++)
- if (phy_regs->mbps >= mbps)
+ for (i = 0; i < ARRAY_SIZE(snps_stm32mp25); i++) {
+ if (snps_stm32mp25[i].mbps >= mbps) {
+ phy_regs = &snps_stm32mp25[i];
break;
+ }
+ }
- if (!phy_regs->mbps) {
+ if (!phy_regs) {
dev_err(csidev->dev, "Unsupported PHY speed (%u Mbps)", mbps);
return -ERANGE;
}
@@ -488,8 +492,8 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
phy_regs->osc_freq_target);
/* Prepare lanes related configuration bits */
- lanes_ie |= STM32_CSI_SR1_DL0_ERRORS;
- lanes_en |= STM32_CSI_PCR_DL0EN;
+ lanes_ie = STM32_CSI_SR1_DL0_ERRORS;
+ lanes_en = STM32_CSI_PCR_DL0EN;
if (csidev->num_lanes == 2) {
lanes_ie |= STM32_CSI_SR1_DL1_ERRORS;
lanes_en |= STM32_CSI_PCR_DL1EN;
@@ -497,21 +501,19 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
ret = pm_runtime_get_sync(csidev->dev);
if (ret < 0)
- return ret;
+ goto error_put;
/* Retrieve CSI2PHY clock rate to compute CCFR value */
phy_clk_frate = clk_get_rate(csidev->clks[STM32_CSI_CLK_CSI2PHY].clk);
if (!phy_clk_frate) {
- pm_runtime_put(csidev->dev);
dev_err(csidev->dev, "CSI2PHY clock rate invalid (0)\n");
- return ret;
+ ret = -EINVAL;
+ goto error_put;
}
ret = stm32_csi_setup_lane_merger(csidev);
- if (ret) {
- pm_runtime_put(csidev->dev);
- return ret;
- }
+ if (ret)
+ goto error_put;
/* Enable the CSI */
writel_relaxed(STM32_CSI_CR_CSIEN, csidev->base + STM32_CSI_CR);
@@ -567,6 +569,10 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
writel_relaxed(0, csidev->base + STM32_CSI_PMCR);
return ret;
+
+error_put:
+ pm_runtime_put(csidev->dev);
+ return ret;
}
static void stm32_csi_stop(struct stm32_csi_dev *csidev)
@@ -591,20 +597,20 @@ static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
{
struct v4l2_mbus_framefmt *mbus_fmt;
const struct stm32_csi_fmts *fmt;
- u32 cfgr1 = 0;
- int ret = 0;
u32 status;
+ u32 cfgr1;
+ int ret;
mbus_fmt = v4l2_subdev_state_get_format(state, STM32_CSI_PAD_SOURCE);
fmt = stm32_csi_code_to_fmt(mbus_fmt->code);
/* If the mbus code is JPEG, don't enable filtering */
if (mbus_fmt->code == MEDIA_BUS_FMT_JPEG_1X8) {
- cfgr1 |= STM32_CSI_VCXCFGR1_ALLDT;
+ cfgr1 = STM32_CSI_VCXCFGR1_ALLDT;
cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_CDTFT_SHIFT;
dev_dbg(csidev->dev, "VC%d: enable AllDT mode\n", vc);
} else {
- cfgr1 |= fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
+ cfgr1 = fmt->datatype << STM32_CSI_VCXCFGR1_DT0_SHIFT;
cfgr1 |= fmt->input_fmt << STM32_CSI_VCXCFGR1_DT0FT_SHIFT;
cfgr1 |= STM32_CSI_VCXCFGR1_DT0EN;
dev_dbg(csidev->dev, "VC%d: enable DT0(0x%x)/DT0FT(0x%x)\n",
@@ -630,8 +636,8 @@ static int stm32_csi_start_vc(struct stm32_csi_dev *csidev,
static int stm32_csi_stop_vc(struct stm32_csi_dev *csidev, u32 vc)
{
- int ret = 0;
u32 status;
+ int ret;
/* Stop the Virtual Channel */
writel_relaxed(STM32_CSI_CR_VCXSTOP(vc) | STM32_CSI_CR_CSIEN,
@@ -690,25 +696,27 @@ static int stm32_csi_enable_streams(struct v4l2_subdev *sd,
ret = stm32_csi_start_vc(csidev, state, 0);
if (ret) {
dev_err(csidev->dev, "Failed to start VC0\n");
- stm32_csi_stop(csidev);
- return ret;
+ goto failed_start_vc;
}
ret = v4l2_subdev_enable_streams(csidev->s_subdev,
csidev->s_subdev_pad_nb, BIT_ULL(0));
- if (ret) {
- stm32_csi_stop_vc(csidev, 0);
- stm32_csi_stop(csidev);
- return ret;
- }
+ if (ret)
+ goto failed_enable_streams;
return 0;
+
+failed_enable_streams:
+ stm32_csi_stop_vc(csidev, 0);
+failed_start_vc:
+ stm32_csi_stop(csidev);
+ return ret;
}
static int stm32_csi_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state)
{
- int i;
+ unsigned int i;
for (i = 0; i < sd->entity.num_pads; i++)
*v4l2_subdev_state_get_format(state, i) = fmt_default;
@@ -873,7 +881,8 @@ static irqreturn_t stm32_csi_irq_thread(int irq, void *arg)
static int stm32_csi_get_resources(struct stm32_csi_dev *csidev,
struct platform_device *pdev)
{
- int irq, ret, i;
+ unsigned int i;
+ int irq, ret;
csidev->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(csidev->base))
@@ -926,38 +935,32 @@ static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
}
ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
- fwnode_handle_put(ep);
if (ret) {
dev_err(csidev->dev, "Could not parse v4l2 endpoint\n");
- return ret;
+ goto out;
}
csidev->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
if (csidev->num_lanes > STM32_CSI_LANES_MAX) {
dev_err(csidev->dev, "Unsupported number of data-lanes: %d\n",
csidev->num_lanes);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
memcpy(csidev->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
sizeof(csidev->lanes));
- ep = fwnode_graph_get_next_endpoint(dev_fwnode(csidev->dev), NULL);
- if (!ep) {
- dev_err(csidev->dev, "Failed to get next endpoint\n");
- return -EINVAL;
- }
-
v4l2_async_subdev_nf_init(&csidev->notifier, &csidev->sd);
asd = v4l2_async_nf_add_fwnode_remote(&csidev->notifier, ep,
struct v4l2_async_connection);
- fwnode_handle_put(ep);
if (IS_ERR(asd)) {
dev_err(csidev->dev, "Failed to add fwnode remote subdev\n");
- return PTR_ERR(asd);
+ ret = PTR_ERR(asd);
+ goto out;
}
csidev->notifier.ops = &stm32_csi_notifier_ops;
@@ -966,9 +969,11 @@ static int stm32_csi_parse_dt(struct stm32_csi_dev *csidev)
if (ret) {
dev_err(csidev->dev, "Failed to register notifier\n");
v4l2_async_nf_cleanup(&csidev->notifier);
- return ret;
+ goto out;
}
+out:
+ fwnode_handle_put(ep);
return ret;
}
@@ -989,11 +994,11 @@ static int stm32_csi_probe(struct platform_device *pdev)
ret = stm32_csi_get_resources(csidev, pdev);
if (ret)
- goto err_free_priv;
+ return ret;
ret = stm32_csi_parse_dt(csidev);
if (ret)
- goto err_free_priv;
+ return ret;
csidev->sd.owner = THIS_MODULE;
csidev->sd.dev = &pdev->dev;
@@ -1018,10 +1023,6 @@ static int stm32_csi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_cleanup;
- ret = v4l2_async_register_subdev(&csidev->sd);
- if (ret < 0)
- goto err_cleanup;
-
/* Reset device */
rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc)) {
@@ -1048,6 +1049,10 @@ static int stm32_csi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
+ ret = v4l2_async_register_subdev(&csidev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
dev_info(&pdev->dev,
"Probed CSI with %u lanes\n", csidev->num_lanes);
@@ -1055,7 +1060,6 @@ static int stm32_csi_probe(struct platform_device *pdev)
err_cleanup:
v4l2_async_nf_cleanup(&csidev->notifier);
-err_free_priv:
return ret;
}
diff --git a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
index 71acf539e1f3..1b7bae3266c8 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmipp/dcmipp-core.c
@@ -89,6 +89,8 @@ struct dcmipp_pipeline_config {
const struct dcmipp_ent_link *links;
size_t num_links;
u32 hw_revision;
+ bool has_csi2;
+ bool needs_mclk;
};
/* --------------------------------------------------------------------------
@@ -164,7 +166,9 @@ static const struct dcmipp_pipeline_config stm32mp25_pipe_cfg = {
.num_ents = ARRAY_SIZE(stm32mp25_ent_config),
.links = stm32mp25_ent_links,
.num_links = ARRAY_SIZE(stm32mp25_ent_links),
- .hw_revision = DCMIPP_STM32MP25_VERR
+ .hw_revision = DCMIPP_STM32MP25_VERR,
+ .has_csi2 = true,
+ .needs_mclk = true
};
#define LINK_FLAG_TO_STR(f) ((f) == 0 ? "" :\
@@ -296,7 +300,7 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asd)
{
struct dcmipp_device *dcmipp = notifier_to_dcmipp(notifier);
- unsigned int ret;
+ int ret = -EINVAL;
int src_pad, i;
struct dcmipp_ent_device *sink;
struct v4l2_fwnode_endpoint vep = { 0 };
@@ -304,15 +308,9 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
enum v4l2_mbus_type supported_types[] = {
V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_CSI2_DPHY
};
- int supported_types_nb = ARRAY_SIZE(supported_types);
dev_dbg(dcmipp->dev, "Subdev \"%s\" bound\n", subdev->name);
- /* Only MP25 supports CSI input */
- if (!of_device_is_compatible(dcmipp->dev->of_node,
- "st,stm32mp25-dcmipp"))
- supported_types_nb--;
-
/*
* Link this sub-device to DCMIPP, it could be
* a parallel camera sensor or a CSI-2 to parallel bridge
@@ -330,7 +328,12 @@ static int dcmipp_graph_notify_bound(struct v4l2_async_notifier *notifier,
}
/* Check for supported MBUS type */
- for (i = 0; i < supported_types_nb; i++) {
+ for (i = 0; i < ARRAY_SIZE(supported_types); i++) {
+ /* Only MP25 supports CSI input */
+ if (supported_types[i] == V4L2_MBUS_CSI2_DPHY &&
+ !dcmipp->pipe_cfg->has_csi2)
+ continue;
+
vep.bus_type = supported_types[i];
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret)
@@ -529,7 +532,7 @@ static int dcmipp_probe(struct platform_device *pdev)
"Unable to get kclk\n");
dcmipp->kclk = kclk;
- if (!of_device_is_compatible(pdev->dev.of_node, "st,stm32mp13-dcmipp")) {
+ if (dcmipp->pipe_cfg->needs_mclk) {
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk))
return dev_err_probe(&pdev->dev, PTR_ERR(mclk),
diff --git a/drivers/media/platform/synopsys/Kconfig b/drivers/media/platform/synopsys/Kconfig
new file mode 100644
index 000000000000..4fd521f78425
--- /dev/null
+++ b/drivers/media/platform/synopsys/Kconfig
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+source "drivers/media/platform/synopsys/hdmirx/Kconfig"
diff --git a/drivers/media/platform/synopsys/Makefile b/drivers/media/platform/synopsys/Makefile
new file mode 100644
index 000000000000..3b12c574dd67
--- /dev/null
+++ b/drivers/media/platform/synopsys/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += hdmirx/
diff --git a/drivers/media/platform/synopsys/hdmirx/Kconfig b/drivers/media/platform/synopsys/hdmirx/Kconfig
new file mode 100644
index 000000000000..27e6706f84a3
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/Kconfig
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config VIDEO_SYNOPSYS_HDMIRX
+ tristate "Synopsys DesignWare HDMI Receiver driver"
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select CEC_CORE
+ select HDMI
+ help
+ Support for Synopsys HDMI HDMI RX Controller.
+ This driver supports HDMI 2.0 version.
+
+ To compile this driver as a module, choose M here. The module
+ will be called synopsys_hdmirx.
+
+config VIDEO_SYNOPSYS_HDMIRX_LOAD_DEFAULT_EDID
+ bool "Load default EDID"
+ depends on VIDEO_SYNOPSYS_HDMIRX
+ help
+ Preload default EDID (Extended Display Identification Data)
+ branded by Linux Foundation that exposes display modes up
+ to 4k@30Hz, which have best compatibility with HDMI transmitters.
+
+ Enabling this option is recommended for a non-production use-cases.
+ It will make driver usable out-of-the-box.
+
+ For a higher display modes you will need to load customized EDID
+ from userspace using v4l2-ctl tool or by other means.
+
+ Without enabling this option driver will be practically
+ non-functional until EDID will be loaded from userspace.
+ Which is a wanted behavior when using this driver in a
+ commercial product that should utilize own branded EDID.
diff --git a/drivers/media/platform/synopsys/hdmirx/Makefile b/drivers/media/platform/synopsys/hdmirx/Makefile
new file mode 100644
index 000000000000..2fa2d9e25300
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+synopsys-hdmirx-objs := snps_hdmirx.o snps_hdmirx_cec.o
+
+obj-$(CONFIG_VIDEO_SYNOPSYS_HDMIRX) += synopsys-hdmirx.o
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
new file mode 100644
index 000000000000..3d2913de9a86
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
@@ -0,0 +1,2746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Collabora, Ltd.
+ * Author: Shreeya Patel <shreeya.patel@collabora.com>
+ * Author: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+ *
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Author: Dingxian Wen <shawn.wen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hdmi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/v4l2-dv-timings.h>
+#include <linux/workqueue.h>
+
+#include <media/cec.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "snps_hdmirx.h"
+#include "snps_hdmirx_cec.h"
+
+#define EDID_NUM_BLOCKS_MAX 4
+#define EDID_BLOCK_SIZE 128
+#define HDMIRX_PLANE_Y 0
+#define HDMIRX_PLANE_CBCR 1
+#define FILTER_FRAME_CNT 6
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-3)");
+
+enum hdmirx_pix_fmt {
+ HDMIRX_RGB888 = 0,
+ HDMIRX_YUV422 = 1,
+ HDMIRX_YUV444 = 2,
+ HDMIRX_YUV420 = 3,
+};
+
+enum ddr_store_fmt {
+ STORE_RGB888 = 0,
+ STORE_RGBA_ARGB,
+ STORE_YUV420_8BIT,
+ STORE_YUV420_10BIT,
+ STORE_YUV422_8BIT,
+ STORE_YUV422_10BIT,
+ STORE_YUV444_8BIT,
+ STORE_YUV420_16BIT = 8,
+ STORE_YUV422_16BIT = 9,
+};
+
+enum hdmirx_reg_attr {
+ HDMIRX_ATTR_RW = 0,
+ HDMIRX_ATTR_RO = 1,
+ HDMIRX_ATTR_WO = 2,
+ HDMIRX_ATTR_RE = 3,
+};
+
+enum {
+ HDMIRX_RST_A,
+ HDMIRX_RST_P,
+ HDMIRX_RST_REF,
+ HDMIRX_RST_BIU,
+ HDMIRX_NUM_RST,
+};
+
+static const char *const pix_fmt_str[] = {
+ "RGB888",
+ "YUV422",
+ "YUV444",
+ "YUV420",
+};
+
+struct hdmirx_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ u32 buff_addr[VIDEO_MAX_PLANES];
+};
+
+struct hdmirx_stream {
+ struct snps_hdmirx_dev *hdmirx_dev;
+ struct video_device vdev;
+ struct vb2_queue buf_queue;
+ struct list_head buf_head;
+ struct hdmirx_buffer *curr_buf;
+ struct hdmirx_buffer *next_buf;
+ struct v4l2_pix_format_mplane pixm;
+ const struct v4l2_format_info *out_finfo;
+ struct mutex vlock; /* to lock resources associated with video buffer and video device */
+ spinlock_t vbq_lock; /* to lock video buffer queue */
+ bool stopping;
+ wait_queue_head_t wq_stopped;
+ u32 frame_idx;
+ u32 line_flag_int_cnt;
+ u32 irq_stat;
+};
+
+struct snps_hdmirx_dev {
+ struct device *dev;
+ struct hdmirx_stream stream;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *detect_tx_5v_ctrl;
+ struct v4l2_ctrl *rgb_range;
+ struct v4l2_ctrl *content_type;
+ struct v4l2_dv_timings timings;
+ struct gpio_desc *detect_5v_gpio;
+ struct delayed_work delayed_work_hotplug;
+ struct delayed_work delayed_work_res_change;
+ struct hdmirx_cec *cec;
+ struct mutex stream_lock; /* to lock video stream capture */
+ struct mutex work_lock; /* to lock the critical section of hotplug event */
+ struct reset_control_bulk_data resets[HDMIRX_NUM_RST];
+ struct clk_bulk_data *clks;
+ struct regmap *grf;
+ struct regmap *vo1_grf;
+ struct completion cr_write_done;
+ struct completion timer_base_lock;
+ struct completion avi_pkt_rcv;
+ struct dentry *debugfs_dir;
+ struct v4l2_debugfs_if *infoframes;
+ enum hdmirx_pix_fmt pix_fmt;
+ void __iomem *regs;
+ int hdmi_irq;
+ int dma_irq;
+ int det_irq;
+ bool hpd_trigger_level_high;
+ bool tmds_clk_ratio;
+ bool plugged;
+ int num_clks;
+ u32 edid_blocks_written;
+ u32 cur_fmt_fourcc;
+ u32 color_depth;
+ spinlock_t rst_lock; /* to lock register access */
+ u8 edid[EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE];
+};
+
+static const struct v4l2_dv_timings cea640x480 = V4L2_DV_BT_CEA_640X480P59_94;
+
+static const struct v4l2_dv_timings_cap hdmirx_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .reserved = { 0 },
+ V4L2_INIT_BT_TIMINGS(640, 4096, /* min/max width */
+ 480, 2160, /* min/max height */
+ 20000000, 600000000, /* min/max pixelclock */
+ /* standards */
+ V4L2_DV_BT_STD_CEA861,
+ /* capabilities */
+ V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_INTERLACED)
+};
+
+static void hdmirx_writel(struct snps_hdmirx_dev *hdmirx_dev, int reg, u32 val)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ writel(val, hdmirx_dev->regs + reg);
+}
+
+static u32 hdmirx_readl(struct snps_hdmirx_dev *hdmirx_dev, int reg)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ return readl(hdmirx_dev->regs + reg);
+}
+
+static void hdmirx_reset_dma(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ reset_control_reset(hdmirx_dev->resets[0].rstc);
+}
+
+static void hdmirx_update_bits(struct snps_hdmirx_dev *hdmirx_dev, int reg,
+ u32 mask, u32 data)
+{
+ u32 val;
+
+ guard(spinlock_irqsave)(&hdmirx_dev->rst_lock);
+
+ val = readl(hdmirx_dev->regs + reg) & ~mask;
+ val |= (data & mask);
+ writel(val, hdmirx_dev->regs + reg);
+}
+
+static int hdmirx_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static bool tx_5v_power_present(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ const unsigned int detection_threshold = 7;
+ int val, i, cnt = 0;
+ bool ret;
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(1000, 1100);
+ val = gpiod_get_value(hdmirx_dev->detect_5v_gpio);
+ if (val > 0)
+ cnt++;
+ if (cnt >= detection_threshold)
+ break;
+ }
+
+ ret = (cnt >= detection_threshold) ? true : false;
+ v4l2_dbg(3, debug, &hdmirx_dev->v4l2_dev, "%s: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static bool signal_not_lock(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ u32 mu_status, dma_st10, cmu_st;
+
+ mu_status = hdmirx_readl(hdmirx_dev, MAINUNIT_STATUS);
+ dma_st10 = hdmirx_readl(hdmirx_dev, DMA_STATUS10);
+ cmu_st = hdmirx_readl(hdmirx_dev, CMU_STATUS);
+
+ if ((mu_status & TMDSVALID_STABLE_ST) &&
+ (dma_st10 & HDMIRX_LOCK) &&
+ (cmu_st & TMDSQPCLK_LOCKED_ST))
+ return false;
+
+ return true;
+}
+
+static void hdmirx_get_timings(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_bt_timings *bt)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 hact, vact, htotal, vtotal, fps;
+ u32 hfp, hs, hbp, vfp, vs, vbp;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS2);
+ hact = (val >> 16) & 0xffff;
+ vact = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS3);
+ htotal = (val >> 16) & 0xffff;
+ vtotal = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS4);
+ hs = (val >> 16) & 0xffff;
+ vs = val & 0xffff;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS5);
+ hbp = (val >> 16) & 0xffff;
+ vbp = val & 0xffff;
+
+ if (hdmirx_dev->pix_fmt == HDMIRX_YUV420) {
+ htotal *= 2;
+ hbp *= 2;
+ hs *= 2;
+ }
+
+ hfp = htotal - hact - hs - hbp;
+ vfp = vtotal - vact - vs - vbp;
+
+ fps = div_u64(bt->pixelclock + (htotal * vtotal) / 2, htotal * vtotal);
+ bt->width = hact;
+ bt->height = vact;
+ bt->hfrontporch = hfp;
+ bt->hsync = hs;
+ bt->hbackporch = hbp;
+ bt->vfrontporch = vfp;
+ bt->vsync = vs;
+ bt->vbackporch = vbp;
+
+ v4l2_dbg(1, debug, v4l2_dev, "get timings from dma\n");
+ v4l2_dbg(1, debug, v4l2_dev,
+ "act:%ux%u%s, total:%ux%u, fps:%u, pixclk:%llu\n",
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ htotal, vtotal, fps, bt->pixelclock);
+
+ v4l2_dbg(2, debug, v4l2_dev,
+ "hfp:%u, hact:%u, hs:%u, hbp:%u, vfp:%u, vact:%u, vs:%u, vbp:%u\n",
+ bt->hfrontporch, hact, bt->hsync, bt->hbackporch,
+ bt->vfrontporch, vact, bt->vsync, bt->vbackporch);
+
+ if (bt->interlaced == V4L2_DV_INTERLACED) {
+ bt->height *= 2;
+ bt->il_vfrontporch = bt->vfrontporch;
+ bt->il_vsync = bt->vsync + 1;
+ bt->il_vbackporch = bt->vbackporch;
+ }
+}
+
+static bool hdmirx_check_timing_valid(struct v4l2_bt_timings *bt)
+{
+ /*
+ * Sanity-check timing values. Some of the values will be outside
+ * of a valid range till hardware becomes ready to perform capture.
+ */
+ if (bt->width < 100 || bt->width > 5000 ||
+ bt->height < 100 || bt->height > 5000)
+ return false;
+
+ if (!bt->hsync || bt->hsync > 200 ||
+ !bt->vsync || bt->vsync > 100)
+ return false;
+
+ /*
+ * According to the CEA-861, 1280x720p25 Hblank timing is up to 2680,
+ * and all standard video format timings are less than 3000.
+ */
+ if (!bt->hbackporch || bt->hbackporch > 3000 ||
+ !bt->vbackporch || bt->vbackporch > 3000)
+ return false;
+
+ if (!bt->hfrontporch || bt->hfrontporch > 3000 ||
+ !bt->vfrontporch || bt->vfrontporch > 3000)
+ return false;
+
+ return true;
+}
+
+static void hdmirx_toggle_polarity(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ u32 val = hdmirx_readl(hdmirx_dev, DMA_CONFIG6);
+
+ if (!(val & (VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN))) {
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN);
+ hdmirx_update_bits(hdmirx_dev, VIDEO_CONFIG2,
+ VPROC_VSYNC_POL_OVR_VALUE |
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_VALUE |
+ VPROC_HSYNC_POL_OVR_EN,
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_EN);
+ return;
+ }
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6,
+ VSYNC_TOGGLE_EN | HSYNC_TOGGLE_EN, 0);
+
+ hdmirx_update_bits(hdmirx_dev, VIDEO_CONFIG2,
+ VPROC_VSYNC_POL_OVR_VALUE |
+ VPROC_VSYNC_POL_OVR_EN |
+ VPROC_HSYNC_POL_OVR_VALUE |
+ VPROC_HSYNC_POL_OVR_EN, 0);
+}
+
+/*
+ * When querying DV timings during preview, if the DMA's timing is stable,
+ * we retrieve the timings directly from the DMA. However, if the current
+ * resolution is negative, obtaining the timing from CTRL may require a
+ * change in the sync polarity, potentially leading to DMA errors.
+ */
+static int hdmirx_get_detected_timings(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_dv_timings *timings)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_bt_timings *bt = &timings->bt;
+ u32 val, tmdsqpclk_freq, pix_clk;
+ unsigned int num_retries = 0;
+ u32 field_type, deframer_st;
+ u64 tmp_data, tmds_clk;
+ bool is_dvi_mode;
+ int ret;
+
+ mutex_lock(&hdmirx_dev->work_lock);
+retry:
+ memset(timings, 0, sizeof(struct v4l2_dv_timings));
+ timings->type = V4L2_DV_BT_656_1120;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ field_type = (val & HDMIRX_TYPE_MASK) >> 7;
+
+ if (field_type & BIT(0))
+ bt->interlaced = V4L2_DV_INTERLACED;
+ else
+ bt->interlaced = V4L2_DV_PROGRESSIVE;
+
+ deframer_st = hdmirx_readl(hdmirx_dev, DEFRAMER_STATUS);
+ is_dvi_mode = !(deframer_st & OPMODE_STS_MASK);
+
+ tmdsqpclk_freq = hdmirx_readl(hdmirx_dev, CMU_TMDSQPCLK_FREQ);
+ tmds_clk = tmdsqpclk_freq * 4 * 1000;
+ tmp_data = tmds_clk * 24;
+ do_div(tmp_data, hdmirx_dev->color_depth);
+ pix_clk = tmp_data;
+ bt->pixelclock = pix_clk;
+
+ if (hdmirx_dev->pix_fmt == HDMIRX_YUV420)
+ bt->pixelclock *= 2;
+
+ hdmirx_get_timings(hdmirx_dev, bt);
+
+ v4l2_dbg(2, debug, v4l2_dev, "tmds_clk:%llu, pix_clk:%d\n", tmds_clk, pix_clk);
+ v4l2_dbg(1, debug, v4l2_dev, "interlace:%d, fmt:%d, color:%d, mode:%s\n",
+ bt->interlaced, hdmirx_dev->pix_fmt,
+ hdmirx_dev->color_depth,
+ is_dvi_mode ? "dvi" : "hdmi");
+ v4l2_dbg(2, debug, v4l2_dev, "deframer_st:%#x\n", deframer_st);
+
+ /*
+ * Timing will be invalid until it's latched by HW or if signal's
+ * polarity doesn't match.
+ */
+ if (!hdmirx_check_timing_valid(bt)) {
+ if (num_retries++ < 20) {
+ if (num_retries == 10)
+ hdmirx_toggle_polarity(hdmirx_dev);
+
+ usleep_range(10 * 1000, 10 * 1100);
+ goto retry;
+ }
+
+ ret = -ERANGE;
+ } else {
+ ret = 0;
+ }
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+
+ return ret;
+}
+
+static bool port_no_link(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ return !tx_5v_power_present(hdmirx_dev);
+}
+
+static int hdmirx_query_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ if (port_no_link(hdmirx_dev)) {
+ v4l2_err(v4l2_dev, "%s: port has no link\n", __func__);
+ return -ENOLINK;
+ }
+
+ if (signal_not_lock(hdmirx_dev)) {
+ v4l2_err(v4l2_dev, "%s: signal is not locked\n", __func__);
+ return -ENOLCK;
+ }
+
+ ret = hdmirx_get_detected_timings(hdmirx_dev, timings);
+ if (ret)
+ return ret;
+
+ if (debug)
+ v4l2_print_dv_timings(hdmirx_dev->v4l2_dev.name,
+ "query_dv_timings: ", timings, false);
+
+ if (!v4l2_valid_dv_timings(timings, &hdmirx_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void hdmirx_hpd_ctrl(struct snps_hdmirx_dev *hdmirx_dev, bool en)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: %sable, hpd_trigger_level_high:%d\n",
+ __func__, en ? "en" : "dis", hdmirx_dev->hpd_trigger_level_high);
+
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, HPDLOW, en ? 0 : HPDLOW);
+ hdmirx_writel(hdmirx_dev, CORE_CONFIG,
+ hdmirx_dev->hpd_trigger_level_high ? en : !en);
+
+ /* 100ms delay as per HDMI spec */
+ if (!en)
+ msleep(100);
+}
+
+static void hdmirx_write_edid_data(struct snps_hdmirx_dev *hdmirx_dev,
+ u8 *edid, unsigned int num_blocks)
+{
+ static u8 data[EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE];
+ unsigned int edid_len = num_blocks * EDID_BLOCK_SIZE;
+ unsigned int i;
+
+ cec_s_phys_addr_from_edid(hdmirx_dev->cec->adap,
+ (const struct edid *)edid);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK |
+ EDID_WRITE_EN_MASK |
+ EDID_SLAVE_ADDR_MASK,
+ EDID_READ_EN(0) |
+ EDID_WRITE_EN(1) |
+ EDID_SLAVE_ADDR(0x50));
+ for (i = 0; i < edid_len; i++)
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG10, edid[i]);
+
+ /* read out for debug */
+ if (debug >= 2) {
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK |
+ EDID_WRITE_EN_MASK,
+ EDID_READ_EN(1) |
+ EDID_WRITE_EN(0));
+
+ for (i = 0; i < edid_len; i++)
+ data[i] = hdmirx_readl(hdmirx_dev, DMA_STATUS14);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data,
+ edid_len, false);
+ }
+
+ /*
+ * Must set EDID_READ_EN & EDID_WRITE_EN bit to 0,
+ * when the read/write edid operation is completed. Otherwise, it
+ * will affect the reading and writing of other registers.
+ */
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG11,
+ EDID_READ_EN_MASK | EDID_WRITE_EN_MASK,
+ EDID_READ_EN(0) | EDID_WRITE_EN(0));
+}
+
+static void hdmirx_write_edid(struct snps_hdmirx_dev *hdmirx_dev,
+ struct v4l2_edid *edid)
+{
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+ memset(hdmirx_dev->edid, 0, sizeof(hdmirx_dev->edid));
+
+ hdmirx_write_edid_data(hdmirx_dev, edid->edid, edid->blocks);
+
+ hdmirx_dev->edid_blocks_written = edid->blocks;
+ memcpy(hdmirx_dev->edid, edid->edid, edid->blocks * EDID_BLOCK_SIZE);
+}
+
+/*
+ * Before clearing interrupt, we need to read the interrupt status.
+ */
+static inline void hdmirx_clear_interrupt(struct snps_hdmirx_dev *hdmirx_dev,
+ u32 reg, u32 val)
+{
+ /* (interrupt status register) = (interrupt clear register) - 0x8 */
+ hdmirx_readl(hdmirx_dev, reg - 0x8);
+ hdmirx_writel(hdmirx_dev, reg, val);
+}
+
+static void hdmirx_interrupts_setup(struct snps_hdmirx_dev *hdmirx_dev, bool en)
+{
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: %sable\n",
+ __func__, en ? "en" : "dis");
+
+ disable_irq(hdmirx_dev->hdmi_irq);
+
+ /* Note: In DVI mode, it needs to be written twice to take effect. */
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+
+ if (en) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TMDSQPCLK_OFF_CHG | TMDSQPCLK_LOCKED_CHG,
+ TMDSQPCLK_OFF_CHG | TMDSQPCLK_LOCKED_CHG);
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ TMDSVALID_STABLE_CHG, TMDSVALID_STABLE_CHG);
+ hdmirx_update_bits(hdmirx_dev, AVPUNIT_0_INT_MASK_N,
+ CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ,
+ CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ);
+ } else {
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_MASK_N, 0);
+ }
+
+ enable_irq(hdmirx_dev->hdmi_irq);
+}
+
+static void hdmirx_plugout(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ if (!hdmirx_dev->plugged)
+ return;
+
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED, 0);
+ hdmirx_interrupts_setup(hdmirx_dev, false);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ hdmirx_reset_dma(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, HDMI_DISABLE | PHY_RESET |
+ PHY_PDDQ, HDMI_DISABLE);
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG0, 0x0);
+ cancel_delayed_work(&hdmirx_dev->delayed_work_res_change);
+
+ /* will be NULL on driver removal */
+ if (hdmirx_dev->rgb_range)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->rgb_range, V4L2_DV_RGB_RANGE_AUTO);
+
+ if (hdmirx_dev->content_type)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+
+ hdmirx_dev->plugged = false;
+}
+
+static int hdmirx_set_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ u16 phys_addr;
+ int err;
+
+ if (edid->pad)
+ return -EINVAL;
+
+ if (edid->start_block)
+ return -EINVAL;
+
+ if (edid->blocks > EDID_NUM_BLOCKS_MAX) {
+ edid->blocks = EDID_NUM_BLOCKS_MAX;
+ return -E2BIG;
+ }
+
+ if (edid->blocks) {
+ phys_addr = cec_get_edid_phys_addr(edid->edid,
+ edid->blocks * EDID_BLOCK_SIZE,
+ NULL);
+
+ err = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Touching HW registers in parallel with plugin/out handlers
+ * will bring hardware into a bad state.
+ */
+ mutex_lock(&hdmirx_dev->work_lock);
+
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ if (edid->blocks) {
+ hdmirx_write_edid(hdmirx_dev, edid);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+ } else {
+ cec_phys_addr_invalidate(hdmirx_dev->cec->adap);
+ hdmirx_dev->edid_blocks_written = 0;
+ }
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+
+ return 0;
+}
+
+static int hdmirx_get_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
+ if (edid->pad)
+ return -EINVAL;
+
+ if (!edid->start_block && !edid->blocks) {
+ edid->blocks = hdmirx_dev->edid_blocks_written;
+ return 0;
+ }
+
+ if (!hdmirx_dev->edid_blocks_written)
+ return -ENODATA;
+
+ if (edid->start_block >= hdmirx_dev->edid_blocks_written || !edid->blocks)
+ return -EINVAL;
+
+ if (edid->start_block + edid->blocks > hdmirx_dev->edid_blocks_written)
+ edid->blocks = hdmirx_dev->edid_blocks_written - edid->start_block;
+
+ memcpy(edid->edid, hdmirx_dev->edid, edid->blocks * EDID_BLOCK_SIZE);
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: read EDID:\n", __func__);
+ if (debug > 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
+ edid->edid, edid->blocks * EDID_BLOCK_SIZE, false);
+
+ return 0;
+}
+
+static int hdmirx_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ parm->parm.capture.timeperframe = v4l2_calc_timeperframe(&hdmirx_dev->timings);
+
+ return 0;
+}
+
+static int hdmirx_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = hdmirx_timings_cap;
+ return 0;
+}
+
+static int hdmirx_enum_dv_timings(struct file *file, void *_fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &hdmirx_timings_cap, NULL, NULL);
+}
+
+static void hdmirx_scdc_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, I2C_SLAVE_CONFIG1,
+ I2C_SDA_OUT_HOLD_VALUE_QST_MASK |
+ I2C_SDA_IN_HOLD_VALUE_QST_MASK,
+ I2C_SDA_OUT_HOLD_VALUE_QST(0x80) |
+ I2C_SDA_IN_HOLD_VALUE_QST(0x15));
+ hdmirx_update_bits(hdmirx_dev, SCDC_REGBANK_CONFIG0,
+ SCDC_SINKVERSION_QST_MASK,
+ SCDC_SINKVERSION_QST(1));
+}
+
+static int wait_reg_bit_status(struct snps_hdmirx_dev *hdmirx_dev, u32 reg,
+ u32 bit_mask, u32 expect_val, bool is_grf,
+ u32 ms)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 i, val;
+
+ for (i = 0; i < ms; i++) {
+ if (is_grf)
+ regmap_read(hdmirx_dev->grf, reg, &val);
+ else
+ val = hdmirx_readl(hdmirx_dev, reg);
+
+ if ((val & bit_mask) == expect_val) {
+ v4l2_dbg(2, debug, v4l2_dev,
+ "%s: i:%d, time: %dms\n", __func__, i, ms);
+ break;
+ }
+ usleep_range(1000, 1010);
+ }
+
+ if (i == ms)
+ return -1;
+
+ return 0;
+}
+
+static int hdmirx_phy_register_write(struct snps_hdmirx_dev *hdmirx_dev,
+ u32 phy_reg, u32 val)
+{
+ struct device *dev = hdmirx_dev->dev;
+
+ reinit_completion(&hdmirx_dev->cr_write_done);
+ /* clear irq status */
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ /* en irq */
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ PHYCREG_CR_WRITE_DONE, PHYCREG_CR_WRITE_DONE);
+ /* write phy reg addr */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG1, phy_reg);
+ /* write phy reg val */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG2, val);
+ /* config write enable */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONTROL, PHYCREG_CR_PARA_WRITE_P);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->cr_write_done,
+ msecs_to_jiffies(20))) {
+ dev_err(dev, "%s wait cr write done failed\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void hdmirx_tmds_clk_ratio_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, SCDC_REGBANK_STATUS1);
+ v4l2_dbg(3, debug, v4l2_dev, "%s: scdc_regbank_st:%#x\n", __func__, val);
+ hdmirx_dev->tmds_clk_ratio = (val & SCDC_TMDSBITCLKRATIO) > 0;
+
+ if (hdmirx_dev->tmds_clk_ratio) {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: HDMITX greater than 3.4Gbps\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG,
+ TMDS_CLOCK_RATIO, TMDS_CLOCK_RATIO);
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: HDMITX less than 3.4Gbps\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG,
+ TMDS_CLOCK_RATIO, 0);
+ }
+}
+
+static void hdmirx_phy_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct device *dev = hdmirx_dev->dev;
+
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, SCDC_INT_MASK_N, SCDCTMDSCCFG_CHG,
+ SCDCTMDSCCFG_CHG);
+ /* cr_para_clk 24M */
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, REFFREQ_SEL_MASK, REFFREQ_SEL(0));
+ /* rx data width 40bit valid */
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, RXDATA_WIDTH, RXDATA_WIDTH);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET, PHY_RESET);
+ usleep_range(100, 110);
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET, 0);
+ usleep_range(100, 110);
+ /* select cr para interface */
+ hdmirx_writel(hdmirx_dev, PHYCREG_CONFIG0, 0x3);
+
+ if (wait_reg_bit_status(hdmirx_dev, SYS_GRF_SOC_STATUS1,
+ HDMIRXPHY_SRAM_INIT_DONE,
+ HDMIRXPHY_SRAM_INIT_DONE, true, 10))
+ dev_err(dev, "%s: phy SRAM init failed\n", __func__);
+
+ regmap_write(hdmirx_dev->grf, SYS_GRF_SOC_CON1,
+ (HDMIRXPHY_SRAM_EXT_LD_DONE << 16) |
+ HDMIRXPHY_SRAM_EXT_LD_DONE);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 3);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 3);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 2);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 1);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+ hdmirx_phy_register_write(hdmirx_dev, SUP_DIG_ANA_CREGS_SUP_ANA_NC, 0);
+
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_3_REG,
+ CDR_SETTING_BOUNDARY_3_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_4_REG,
+ CDR_SETTING_BOUNDARY_4_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_5_REG,
+ CDR_SETTING_BOUNDARY_5_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_6_REG,
+ CDR_SETTING_BOUNDARY_6_DEFAULT);
+ hdmirx_phy_register_write(hdmirx_dev,
+ HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_7_REG,
+ CDR_SETTING_BOUNDARY_7_DEFAULT);
+
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_PDDQ, 0);
+ if (wait_reg_bit_status(hdmirx_dev, PHY_STATUS, PDDQ_ACK, 0, false, 10))
+ dev_err(dev, "%s: wait pddq ack failed\n", __func__);
+
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, HDMI_DISABLE, 0);
+ if (wait_reg_bit_status(hdmirx_dev, PHY_STATUS, HDMI_DISABLE_ACK, 0,
+ false, 50))
+ dev_err(dev, "%s: wait hdmi disable ack failed\n", __func__);
+
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+}
+
+static void hdmirx_controller_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ const unsigned long iref_clk_freq_hz = 428571429;
+ struct device *dev = hdmirx_dev->dev;
+
+ reinit_completion(&hdmirx_dev->timer_base_lock);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ /* en irq */
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TIMER_BASE_LOCKED_IRQ, TIMER_BASE_LOCKED_IRQ);
+ /* write irefclk freq */
+ hdmirx_writel(hdmirx_dev, GLOBAL_TIMER_REF_BASE, iref_clk_freq_hz);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->timer_base_lock,
+ msecs_to_jiffies(20)))
+ dev_err(dev, "%s wait timer base lock failed\n", __func__);
+
+ hdmirx_update_bits(hdmirx_dev, CMU_CONFIG0,
+ TMDSQPCLK_STABLE_FREQ_MARGIN_MASK |
+ AUDCLK_STABLE_FREQ_MARGIN_MASK,
+ TMDSQPCLK_STABLE_FREQ_MARGIN(2) |
+ AUDCLK_STABLE_FREQ_MARGIN(1));
+ hdmirx_update_bits(hdmirx_dev, DESCRAND_EN_CONTROL,
+ SCRAMB_EN_SEL_QST_MASK, SCRAMB_EN_SEL_QST(1));
+ hdmirx_update_bits(hdmirx_dev, CED_CONFIG,
+ CED_VIDDATACHECKEN_QST |
+ CED_DATAISCHECKEN_QST |
+ CED_GBCHECKEN_QST |
+ CED_CTRLCHECKEN_QST |
+ CED_CHLOCKMAXER_QST_MASK,
+ CED_VIDDATACHECKEN_QST |
+ CED_GBCHECKEN_QST |
+ CED_CTRLCHECKEN_QST |
+ CED_CHLOCKMAXER_QST(0x10));
+ hdmirx_update_bits(hdmirx_dev, DEFRAMER_CONFIG0,
+ VS_REMAPFILTER_EN_QST | VS_FILTER_ORDER_QST_MASK,
+ VS_REMAPFILTER_EN_QST | VS_FILTER_ORDER_QST(0x3));
+}
+
+static void hdmirx_get_colordepth(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val, color_depth_reg;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ color_depth_reg = (val & HDMIRX_COLOR_DEPTH_MASK) >> 3;
+
+ switch (color_depth_reg) {
+ case 0x4:
+ hdmirx_dev->color_depth = 24;
+ break;
+ case 0x5:
+ hdmirx_dev->color_depth = 30;
+ break;
+ case 0x6:
+ hdmirx_dev->color_depth = 36;
+ break;
+ case 0x7:
+ hdmirx_dev->color_depth = 48;
+ break;
+ default:
+ hdmirx_dev->color_depth = 24;
+ break;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: color_depth: %d, reg_val:%d\n",
+ __func__, hdmirx_dev->color_depth, color_depth_reg);
+}
+
+static void hdmirx_get_pix_fmt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 val;
+
+ val = hdmirx_readl(hdmirx_dev, DMA_STATUS11);
+ hdmirx_dev->pix_fmt = val & HDMIRX_FORMAT_MASK;
+
+ switch (hdmirx_dev->pix_fmt) {
+ case HDMIRX_RGB888:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ break;
+ case HDMIRX_YUV422:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV16;
+ break;
+ case HDMIRX_YUV444:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV24;
+ break;
+ case HDMIRX_YUV420:
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_NV12;
+ break;
+ default:
+ v4l2_err(v4l2_dev,
+ "%s: err pix_fmt: %d, set RGB888 as default\n",
+ __func__, hdmirx_dev->pix_fmt);
+ hdmirx_dev->pix_fmt = HDMIRX_RGB888;
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ break;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s\n", __func__,
+ pix_fmt_str[hdmirx_dev->pix_fmt]);
+}
+
+static void hdmirx_read_avi_infoframe(struct snps_hdmirx_dev *hdmirx_dev,
+ u8 *aviif)
+{
+ unsigned int i, b, itr = 0;
+ u32 val;
+
+ aviif[itr++] = HDMI_INFOFRAME_TYPE_AVI;
+ val = hdmirx_readl(hdmirx_dev, PKTDEC_AVIIF_PH2_1);
+ aviif[itr++] = val & 0xff;
+ aviif[itr++] = (val >> 8) & 0xff;
+
+ for (i = 0; i < 7; i++) {
+ val = hdmirx_readl(hdmirx_dev, PKTDEC_AVIIF_PB3_0 + 4 * i);
+
+ for (b = 0; b < 4; b++)
+ aviif[itr++] = (val >> (8 * b)) & 0xff;
+ }
+}
+
+static void hdmirx_get_avi_infoframe(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ union hdmi_infoframe frame = {};
+ u8 aviif[3 + 7 * 4];
+ int err;
+
+ hdmirx_read_avi_infoframe(hdmirx_dev, aviif);
+
+ err = hdmi_infoframe_unpack(&frame, aviif, sizeof(aviif));
+ if (err) {
+ v4l2_err(v4l2_dev, "failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ v4l2_ctrl_s_ctrl(hdmirx_dev->rgb_range, frame.avi.quantization_range);
+
+ if (frame.avi.itc)
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ frame.avi.content_type);
+ else
+ v4l2_ctrl_s_ctrl(hdmirx_dev->content_type,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+}
+
+static ssize_t
+hdmirx_debugfs_if_read(u32 type, void *priv, struct file *filp,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = priv;
+ u8 aviif[V4L2_DEBUGFS_IF_MAX_LEN] = {};
+ int len;
+
+ if (type != V4L2_DEBUGFS_IF_AVI)
+ return 0;
+
+ hdmirx_read_avi_infoframe(hdmirx_dev, aviif);
+
+ len = aviif[2] + 4;
+ if (len > V4L2_DEBUGFS_IF_MAX_LEN)
+ len = -ENOENT;
+ else
+ len = simple_read_from_buffer(ubuf, count, ppos, aviif, len);
+
+ return len < 0 ? 0 : len;
+}
+
+static void hdmirx_format_change(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ static const struct v4l2_event ev_src_chg = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ hdmirx_get_pix_fmt(hdmirx_dev);
+ hdmirx_get_colordepth(hdmirx_dev);
+ hdmirx_get_avi_infoframe(hdmirx_dev);
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: queue res_chg_event\n", __func__);
+ v4l2_event_queue(&stream->vdev, &ev_src_chg);
+}
+
+static void hdmirx_set_ddr_store_fmt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ enum ddr_store_fmt store_fmt;
+ u32 dma_cfg1;
+
+ switch (hdmirx_dev->pix_fmt) {
+ case HDMIRX_RGB888:
+ store_fmt = STORE_RGB888;
+ break;
+ case HDMIRX_YUV444:
+ store_fmt = STORE_YUV444_8BIT;
+ break;
+ case HDMIRX_YUV422:
+ store_fmt = STORE_YUV422_8BIT;
+ break;
+ case HDMIRX_YUV420:
+ store_fmt = STORE_YUV420_8BIT;
+ break;
+ default:
+ store_fmt = STORE_RGB888;
+ break;
+ }
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG1,
+ DDR_STORE_FORMAT_MASK, DDR_STORE_FORMAT(store_fmt));
+ dma_cfg1 = hdmirx_readl(hdmirx_dev, DMA_CONFIG1);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s, DMA_CONFIG1:%#x\n",
+ __func__, pix_fmt_str[hdmirx_dev->pix_fmt], dma_cfg1);
+}
+
+static void hdmirx_dma_config(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_set_ddr_store_fmt(hdmirx_dev);
+
+ /* Note: uv_swap, rb can not swap, doc err */
+ if (hdmirx_dev->cur_fmt_fourcc != V4L2_PIX_FMT_NV16)
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, RB_SWAP_EN, RB_SWAP_EN);
+ else
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, RB_SWAP_EN, 0);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG7,
+ LOCK_FRAME_NUM_MASK,
+ LOCK_FRAME_NUM(2));
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG1,
+ UV_WID_MASK | Y_WID_MASK | ABANDON_EN,
+ UV_WID(1) | Y_WID(2) | ABANDON_EN);
+}
+
+static void hdmirx_submodule_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ /* Note: if not config HDCP2_CONFIG, there will be some errors; */
+ hdmirx_update_bits(hdmirx_dev, HDCP2_CONFIG,
+ HDCP2_SWITCH_OVR_VALUE |
+ HDCP2_SWITCH_OVR_EN,
+ HDCP2_SWITCH_OVR_EN);
+ hdmirx_scdc_init(hdmirx_dev);
+ hdmirx_controller_init(hdmirx_dev);
+}
+
+static int hdmirx_enum_input(struct file *file, void *priv,
+ struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+ input->std = 0;
+ strscpy(input->name, "HDMI IN", sizeof(input->name));
+ input->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+
+ return 0;
+}
+
+static int hdmirx_get_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int hdmirx_set_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+ return 0;
+}
+
+static void hdmirx_set_fmt(struct hdmirx_stream *stream,
+ struct v4l2_pix_format_mplane *pixm, bool try)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_bt_timings *bt = &hdmirx_dev->timings.bt;
+ const struct v4l2_format_info *finfo;
+ unsigned int imagesize = 0;
+ unsigned int i;
+
+ memset(&pixm->plane_fmt[0], 0, sizeof(struct v4l2_plane_pix_format));
+ finfo = v4l2_format_info(pixm->pixelformat);
+ if (!finfo) {
+ finfo = v4l2_format_info(V4L2_PIX_FMT_BGR24);
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: set_fmt:%#x not supported, use def_fmt:%x\n",
+ __func__, pixm->pixelformat, finfo->format);
+ }
+
+ if (!bt->width || !bt->height)
+ v4l2_dbg(1, debug, v4l2_dev, "%s: invalid resolution:%#xx%#x\n",
+ __func__, bt->width, bt->height);
+
+ pixm->pixelformat = finfo->format;
+ pixm->width = bt->width;
+ pixm->height = bt->height;
+ pixm->num_planes = finfo->mem_planes;
+ pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pixm->colorspace = V4L2_COLORSPACE_SRGB;
+ pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+
+ if (bt->interlaced == V4L2_DV_INTERLACED)
+ pixm->field = V4L2_FIELD_INTERLACED_TB;
+ else
+ pixm->field = V4L2_FIELD_NONE;
+
+ memset(pixm->reserved, 0, sizeof(pixm->reserved));
+
+ v4l2_fill_pixfmt_mp(pixm, finfo->format, pixm->width, pixm->height);
+
+ for (i = 0; i < finfo->comp_planes; i++) {
+ struct v4l2_plane_pix_format *plane_fmt;
+ int width, height, bpl, size, bpp = 0;
+ const unsigned int hw_align = 64;
+
+ if (!i) {
+ width = pixm->width;
+ height = pixm->height;
+ } else {
+ width = pixm->width / finfo->hdiv;
+ height = pixm->height / finfo->vdiv;
+ }
+
+ switch (finfo->format) {
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_BGR24:
+ bpp = finfo->bpp[i];
+ break;
+ default:
+ v4l2_dbg(1, debug, v4l2_dev,
+ "fourcc: %#x is not supported\n",
+ finfo->format);
+ break;
+ }
+
+ bpl = ALIGN(width * bpp, hw_align);
+ size = bpl * height;
+ imagesize += size;
+
+ if (finfo->mem_planes > i) {
+ /* Set bpl and size for each mplane */
+ plane_fmt = pixm->plane_fmt + i;
+ plane_fmt->bytesperline = bpl;
+ plane_fmt->sizeimage = size;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev,
+ "C-Plane %u size: %d, Total imagesize: %d\n",
+ i, size, imagesize);
+ }
+
+ /* Convert to non-MPLANE format as we want to unify non-MPLANE and MPLANE */
+ if (finfo->mem_planes == 1)
+ pixm->plane_fmt[0].sizeimage = imagesize;
+
+ if (!try) {
+ stream->out_finfo = finfo;
+ stream->pixm = *pixm;
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: req(%d, %d), out(%d, %d), fmt:%#x\n", __func__,
+ pixm->width, pixm->height, stream->pixm.width,
+ stream->pixm.height, finfo->format);
+ }
+}
+
+static int hdmirx_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ if (f->index >= 1)
+ return -EINVAL;
+
+ f->pixelformat = hdmirx_dev->cur_fmt_fourcc;
+
+ return 0;
+}
+
+static int hdmirx_s_fmt_vid_cap_mplane(struct file *file,
+ void *priv, struct v4l2_format *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (vb2_is_busy(&stream->buf_queue)) {
+ v4l2_err(v4l2_dev, "%s: queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ hdmirx_set_fmt(stream, &f->fmt.pix_mp, false);
+
+ return 0;
+}
+
+static int hdmirx_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_pix_format_mplane pixm = {};
+
+ pixm.pixelformat = hdmirx_dev->cur_fmt_fourcc;
+ hdmirx_set_fmt(stream, &pixm, true);
+ f->fmt.pix_mp = pixm;
+
+ return 0;
+}
+
+static int hdmirx_g_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 dma_cfg1;
+
+ *timings = hdmirx_dev->timings;
+ dma_cfg1 = hdmirx_readl(hdmirx_dev, DMA_CONFIG1);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: pix_fmt: %s, DMA_CONFIG1:%#x\n",
+ __func__, pix_fmt_str[hdmirx_dev->pix_fmt], dma_cfg1);
+
+ return 0;
+}
+
+static int hdmirx_s_dv_timings(struct file *file, void *_fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (!timings)
+ return -EINVAL;
+
+ if (debug)
+ v4l2_print_dv_timings(hdmirx_dev->v4l2_dev.name,
+ "s_dv_timings: ", timings, false);
+
+ if (!v4l2_valid_dv_timings(timings, &hdmirx_timings_cap, NULL, NULL)) {
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: timings out of range\n", __func__);
+ return -ERANGE;
+ }
+
+ /* Check if the timings are part of the CEA-861 timings. */
+ v4l2_find_dv_timings_cap(timings, &hdmirx_timings_cap, 0, NULL, NULL);
+
+ if (v4l2_match_dv_timings(&hdmirx_dev->timings, timings, 0, false)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: no change\n", __func__);
+ return 0;
+ }
+
+ /*
+ * Changing the timings implies a format change, which is not allowed
+ * while buffers for use with streaming have already been allocated.
+ */
+ if (vb2_is_busy(&stream->buf_queue))
+ return -EBUSY;
+
+ hdmirx_dev->timings = *timings;
+ /* Update the internal format */
+ hdmirx_set_fmt(stream, &stream->pixm, false);
+
+ return 0;
+}
+
+static int hdmirx_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hdmirx_stream *stream = video_drvdata(file);
+ struct device *dev = stream->hdmirx_dev->dev;
+
+ strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, dev->driver->name, sizeof(cap->card));
+
+ return 0;
+}
+
+static int hdmirx_queue_setup(struct vb2_queue *queue,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_ctxs[])
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ const struct v4l2_pix_format_mplane *pixm = NULL;
+ const struct v4l2_format_info *out_finfo;
+ u32 i;
+
+ pixm = &stream->pixm;
+ out_finfo = stream->out_finfo;
+
+ if (!out_finfo) {
+ v4l2_err(v4l2_dev, "%s: out_fmt not set\n", __func__);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixm->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *num_planes; i++)
+ if (sizes[i] < pixm->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = out_finfo->mem_planes;
+
+ for (i = 0; i < out_finfo->mem_planes; i++)
+ sizes[i] = pixm->plane_fmt[i].sizeimage;
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: count %d, size %d\n",
+ v4l2_type_names[queue->type], *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+/*
+ * The vb2_buffer are stored in hdmirx_buffer, in order to unify
+ * mplane buffer and none-mplane buffer.
+ */
+static void hdmirx_buf_queue(struct vb2_buffer *vb)
+{
+ const struct v4l2_pix_format_mplane *pixm;
+ const struct v4l2_format_info *out_finfo;
+ struct hdmirx_buffer *hdmirx_buf;
+ struct vb2_v4l2_buffer *vbuf;
+ struct hdmirx_stream *stream;
+ struct vb2_queue *queue;
+ unsigned long flags;
+ unsigned int i;
+
+ vbuf = to_vb2_v4l2_buffer(vb);
+ hdmirx_buf = container_of(vbuf, struct hdmirx_buffer, vb);
+ queue = vb->vb2_queue;
+ stream = vb2_get_drv_priv(queue);
+ pixm = &stream->pixm;
+ out_finfo = stream->out_finfo;
+
+ memset(hdmirx_buf->buff_addr, 0, sizeof(hdmirx_buf->buff_addr));
+
+ /*
+ * If mplanes > 1, every c-plane has its own m-plane,
+ * otherwise, multiple c-planes are in the same m-plane
+ */
+ for (i = 0; i < out_finfo->mem_planes; i++)
+ hdmirx_buf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ if (out_finfo->mem_planes == 1) {
+ if (out_finfo->comp_planes == 1) {
+ hdmirx_buf->buff_addr[HDMIRX_PLANE_CBCR] =
+ hdmirx_buf->buff_addr[HDMIRX_PLANE_Y];
+ } else {
+ for (i = 0; i < out_finfo->comp_planes - 1; i++)
+ hdmirx_buf->buff_addr[i + 1] =
+ hdmirx_buf->buff_addr[i] +
+ pixm->plane_fmt[i].bytesperline *
+ pixm->height;
+ }
+ }
+
+ spin_lock_irqsave(&stream->vbq_lock, flags);
+ list_add_tail(&hdmirx_buf->queue, &stream->buf_head);
+ spin_unlock_irqrestore(&stream->vbq_lock, flags);
+}
+
+static void return_all_buffers(struct hdmirx_stream *stream,
+ enum vb2_buffer_state state)
+{
+ struct hdmirx_buffer *buf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->vbq_lock, flags);
+ if (stream->curr_buf)
+ list_add_tail(&stream->curr_buf->queue, &stream->buf_head);
+ if (stream->next_buf && stream->next_buf != stream->curr_buf)
+ list_add_tail(&stream->next_buf->queue, &stream->buf_head);
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+
+ list_for_each_entry_safe(buf, tmp, &stream->buf_head, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&stream->vbq_lock, flags);
+}
+
+static void hdmirx_stop_streaming(struct vb2_queue *queue)
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ v4l2_dbg(1, debug, v4l2_dev, "stream start stopping\n");
+ mutex_lock(&hdmirx_dev->stream_lock);
+ WRITE_ONCE(stream->stopping, true);
+
+ /* wait last irq to return the buffer */
+ ret = wait_event_timeout(stream->wq_stopped, !stream->stopping,
+ msecs_to_jiffies(500));
+ if (!ret)
+ v4l2_dbg(1, debug, v4l2_dev, "%s: timeout waiting last irq\n",
+ __func__);
+
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ return_all_buffers(stream, VB2_BUF_STATE_ERROR);
+ mutex_unlock(&hdmirx_dev->stream_lock);
+ v4l2_dbg(1, debug, v4l2_dev, "stream stopping finished\n");
+}
+
+static int hdmirx_start_streaming(struct vb2_queue *queue, unsigned int count)
+{
+ struct hdmirx_stream *stream = vb2_get_drv_priv(queue);
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ unsigned long lock_flags = 0;
+ int line_flag;
+
+ mutex_lock(&hdmirx_dev->stream_lock);
+ stream->frame_idx = 0;
+ stream->line_flag_int_cnt = 0;
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+ stream->irq_stat = 0;
+
+ WRITE_ONCE(stream->stopping, false);
+
+ spin_lock_irqsave(&stream->vbq_lock, lock_flags);
+ if (!stream->curr_buf) {
+ if (!list_empty(&stream->buf_head)) {
+ stream->curr_buf = list_first_entry(&stream->buf_head,
+ struct hdmirx_buffer,
+ queue);
+ list_del(&stream->curr_buf->queue);
+ } else {
+ stream->curr_buf = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
+
+ if (!stream->curr_buf) {
+ mutex_unlock(&hdmirx_dev->stream_lock);
+ return -ENOMEM;
+ }
+
+ v4l2_dbg(2, debug, v4l2_dev,
+ "%s: start_stream cur_buf y_addr:%#x, uv_addr:%#x\n",
+ __func__, stream->curr_buf->buff_addr[HDMIRX_PLANE_Y],
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_Y]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
+ stream->curr_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+
+ if (bt->height) {
+ if (bt->interlaced == V4L2_DV_INTERLACED)
+ line_flag = bt->height / 4;
+ else
+ line_flag = bt->height / 2;
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG7,
+ LINE_FLAG_NUM_MASK,
+ LINE_FLAG_NUM(line_flag));
+ } else {
+ v4l2_err(v4l2_dev, "invalid BT timing height=%d\n", bt->height);
+ }
+
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, CED_DYN_CONTROL, 0x1);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, HDMIRX_DMA_EN);
+ v4l2_dbg(1, debug, v4l2_dev, "%s: enable dma", __func__);
+ mutex_unlock(&hdmirx_dev->stream_lock);
+
+ return 0;
+}
+
+/* vb2 queue */
+static const struct vb2_ops hdmirx_vb2_ops = {
+ .queue_setup = hdmirx_queue_setup,
+ .buf_queue = hdmirx_buf_queue,
+ .stop_streaming = hdmirx_stop_streaming,
+ .start_streaming = hdmirx_start_streaming,
+};
+
+static int hdmirx_init_vb2_queue(struct vb2_queue *q,
+ struct hdmirx_stream *stream,
+ enum v4l2_buf_type buf_type)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+
+ q->type = buf_type;
+ q->io_modes = VB2_MMAP | VB2_DMABUF;
+ q->drv_priv = stream;
+ q->ops = &hdmirx_vb2_ops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct hdmirx_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &stream->vlock;
+ q->dev = hdmirx_dev->dev;
+ q->min_queued_buffers = 1;
+
+ return vb2_queue_init(q);
+}
+
+/* video device */
+static const struct v4l2_ioctl_ops hdmirx_v4l2_ioctl_ops = {
+ .vidioc_querycap = hdmirx_querycap,
+ .vidioc_try_fmt_vid_cap_mplane = hdmirx_g_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = hdmirx_s_fmt_vid_cap_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = hdmirx_g_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = hdmirx_enum_fmt_vid_cap_mplane,
+
+ .vidioc_s_dv_timings = hdmirx_s_dv_timings,
+ .vidioc_g_dv_timings = hdmirx_g_dv_timings,
+ .vidioc_enum_dv_timings = hdmirx_enum_dv_timings,
+ .vidioc_query_dv_timings = hdmirx_query_dv_timings,
+ .vidioc_dv_timings_cap = hdmirx_dv_timings_cap,
+ .vidioc_enum_input = hdmirx_enum_input,
+ .vidioc_g_input = hdmirx_get_input,
+ .vidioc_s_input = hdmirx_set_input,
+ .vidioc_g_edid = hdmirx_get_edid,
+ .vidioc_s_edid = hdmirx_set_edid,
+ .vidioc_g_parm = hdmirx_g_parm,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = hdmirx_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations hdmirx_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static int hdmirx_register_stream_vdev(struct hdmirx_stream *stream)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct video_device *vdev = &stream->vdev;
+ int ret;
+
+ strscpy(vdev->name, "stream_hdmirx", sizeof(vdev->name));
+ INIT_LIST_HEAD(&stream->buf_head);
+ spin_lock_init(&stream->vbq_lock);
+ mutex_init(&stream->vlock);
+ init_waitqueue_head(&stream->wq_stopped);
+ stream->curr_buf = NULL;
+ stream->next_buf = NULL;
+
+ vdev->ioctl_ops = &hdmirx_v4l2_ioctl_ops;
+ vdev->release = video_device_release_empty;
+ vdev->fops = &hdmirx_fops;
+ vdev->minor = -1;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->lock = &stream->vlock;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING;
+ vdev->vfl_dir = VFL_DIR_RX;
+
+ video_set_drvdata(vdev, stream);
+
+ hdmirx_init_vb2_queue(&stream->buf_queue, stream,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vdev->queue = &stream->buf_queue;
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "video_register_device failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void process_signal_change(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG6, HDMIRX_DMA_EN, 0);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ hdmirx_reset_dma(hdmirx_dev);
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_res_change,
+ msecs_to_jiffies(50));
+}
+
+static void avpunit_0_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (status & (CED_DYN_CNT_CH2_IRQ |
+ CED_DYN_CNT_CH1_IRQ |
+ CED_DYN_CNT_CH0_IRQ)) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: avp0_st:%#x\n",
+ __func__, status);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_FORCE, 0x0);
+}
+
+static void avpunit_1_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ if (status & DEFRAMER_VSYNC_THR_REACHED_IRQ) {
+ v4l2_dbg(2, debug, v4l2_dev,
+ "Vertical Sync threshold reached interrupt %#x", status);
+ hdmirx_update_bits(hdmirx_dev, AVPUNIT_1_INT_MASK_N,
+ DEFRAMER_VSYNC_THR_REACHED_MASK_N, 0);
+ *handled = true;
+ }
+}
+
+static void mainunit_0_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "mu0_st:%#x\n", status);
+ if (status & TIMER_BASE_LOCKED_IRQ) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_0_INT_MASK_N,
+ TIMER_BASE_LOCKED_IRQ, 0);
+ complete(&hdmirx_dev->timer_base_lock);
+ *handled = true;
+ }
+
+ if (status & TMDSQPCLK_OFF_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSQPCLK_OFF_CHG\n", __func__);
+ *handled = true;
+ }
+
+ if (status & TMDSQPCLK_LOCKED_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSQPCLK_LOCKED_CHG\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_FORCE, 0x0);
+}
+
+static void mainunit_2_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "mu2_st:%#x\n", status);
+ if (status & PHYCREG_CR_WRITE_DONE) {
+ hdmirx_update_bits(hdmirx_dev, MAINUNIT_2_INT_MASK_N,
+ PHYCREG_CR_WRITE_DONE, 0);
+ complete(&hdmirx_dev->cr_write_done);
+ *handled = true;
+ }
+
+ if (status & TMDSVALID_STABLE_CHG) {
+ process_signal_change(hdmirx_dev);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: TMDSVALID_STABLE_CHG\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_FORCE, 0x0);
+}
+
+static void pkt_2_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: pk2_st:%#x\n", __func__, status);
+ if (status & PKTDEC_AVIIF_RCV_IRQ) {
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, 0);
+ complete(&hdmirx_dev->avi_pkt_rcv);
+ v4l2_dbg(2, debug, v4l2_dev, "%s: AVIIF_RCV_IRQ\n", __func__);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+}
+
+static void scdc_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ int status, bool *handled)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: scdc_st:%#x\n", __func__, status);
+ if (status & SCDCTMDSCCFG_CHG) {
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+ *handled = true;
+ }
+
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+}
+
+static irqreturn_t hdmirx_hdmi_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 mu0_st, mu2_st, pk2_st, scdc_st, avp1_st, avp0_st;
+ u32 mu0_mask, mu2_mask, pk2_mask, scdc_mask, avp1_msk, avp0_msk;
+ bool handled = false;
+
+ mu0_mask = hdmirx_readl(hdmirx_dev, MAINUNIT_0_INT_MASK_N);
+ mu2_mask = hdmirx_readl(hdmirx_dev, MAINUNIT_2_INT_MASK_N);
+ pk2_mask = hdmirx_readl(hdmirx_dev, PKT_2_INT_MASK_N);
+ scdc_mask = hdmirx_readl(hdmirx_dev, SCDC_INT_MASK_N);
+ mu0_st = hdmirx_readl(hdmirx_dev, MAINUNIT_0_INT_STATUS);
+ mu2_st = hdmirx_readl(hdmirx_dev, MAINUNIT_2_INT_STATUS);
+ pk2_st = hdmirx_readl(hdmirx_dev, PKT_2_INT_STATUS);
+ scdc_st = hdmirx_readl(hdmirx_dev, SCDC_INT_STATUS);
+ avp0_st = hdmirx_readl(hdmirx_dev, AVPUNIT_0_INT_STATUS);
+ avp1_st = hdmirx_readl(hdmirx_dev, AVPUNIT_1_INT_STATUS);
+ avp0_msk = hdmirx_readl(hdmirx_dev, AVPUNIT_0_INT_MASK_N);
+ avp1_msk = hdmirx_readl(hdmirx_dev, AVPUNIT_1_INT_MASK_N);
+ mu0_st &= mu0_mask;
+ mu2_st &= mu2_mask;
+ pk2_st &= pk2_mask;
+ avp1_st &= avp1_msk;
+ avp0_st &= avp0_msk;
+ scdc_st &= scdc_mask;
+
+ if (avp0_st)
+ avpunit_0_int_handler(hdmirx_dev, avp0_st, &handled);
+ if (avp1_st)
+ avpunit_1_int_handler(hdmirx_dev, avp1_st, &handled);
+ if (mu0_st)
+ mainunit_0_int_handler(hdmirx_dev, mu0_st, &handled);
+ if (mu2_st)
+ mainunit_2_int_handler(hdmirx_dev, mu2_st, &handled);
+ if (pk2_st)
+ pkt_2_int_handler(hdmirx_dev, pk2_st, &handled);
+ if (scdc_st)
+ scdc_int_handler(hdmirx_dev, scdc_st, &handled);
+
+ if (!handled) {
+ v4l2_dbg(2, debug, v4l2_dev, "%s: hdmi irq not handled", __func__);
+ v4l2_dbg(2, debug, v4l2_dev,
+ "avp0:%#x, avp1:%#x, mu0:%#x, mu2:%#x, pk2:%#x, scdc:%#x\n",
+ avp0_st, avp1_st, mu0_st, mu2_st, pk2_st, scdc_st);
+ }
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: en_fiq", __func__);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void hdmirx_vb_done(struct hdmirx_stream *stream,
+ struct vb2_v4l2_buffer *vb_done)
+{
+ const struct v4l2_format_info *finfo = stream->out_finfo;
+ u32 i;
+
+ /* Dequeue a filled buffer */
+ for (i = 0; i < finfo->mem_planes; i++) {
+ vb2_set_plane_payload(&vb_done->vb2_buf, i,
+ stream->pixm.plane_fmt[i].sizeimage);
+ }
+
+ vb_done->vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void dma_idle_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ bool *handled)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ struct vb2_v4l2_buffer *vb_done = NULL;
+
+ if (!(stream->irq_stat) && !(stream->irq_stat & LINE_FLAG_INT_EN))
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: last time have no line_flag_irq\n", __func__);
+
+ /* skip first frames that are expected to come out zeroed from DMA */
+ if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
+ goto DMA_IDLE_OUT;
+
+ if (bt->interlaced != V4L2_DV_INTERLACED ||
+ !(stream->line_flag_int_cnt % 2)) {
+ if (stream->next_buf) {
+ if (stream->curr_buf)
+ vb_done = &stream->curr_buf->vb;
+
+ if (vb_done) {
+ vb_done->vb2_buf.timestamp = ktime_get_ns();
+ vb_done->sequence = stream->frame_idx;
+
+ if (bt->interlaced)
+ vb_done->field = V4L2_FIELD_INTERLACED_TB;
+ else
+ vb_done->field = V4L2_FIELD_NONE;
+
+ hdmirx_vb_done(stream, vb_done);
+ stream->frame_idx++;
+ if (stream->frame_idx == 30)
+ v4l2_dbg(1, debug, v4l2_dev,
+ "rcv frames\n");
+ }
+
+ stream->curr_buf = NULL;
+ if (stream->next_buf) {
+ stream->curr_buf = stream->next_buf;
+ stream->next_buf = NULL;
+ }
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: next_buf NULL, skip vb_done\n", __func__);
+ }
+ }
+
+DMA_IDLE_OUT:
+ *handled = true;
+}
+
+static void line_flag_int_handler(struct snps_hdmirx_dev *hdmirx_dev,
+ bool *handled)
+{
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ struct v4l2_dv_timings timings = hdmirx_dev->timings;
+ struct v4l2_bt_timings *bt = &timings.bt;
+ u32 dma_cfg6;
+
+ stream->line_flag_int_cnt++;
+ if (!(stream->irq_stat) && !(stream->irq_stat & HDMIRX_DMA_IDLE_INT))
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: last have no dma_idle_irq\n", __func__);
+ dma_cfg6 = hdmirx_readl(hdmirx_dev, DMA_CONFIG6);
+ if (!(dma_cfg6 & HDMIRX_DMA_EN)) {
+ v4l2_dbg(2, debug, v4l2_dev, "%s: dma not on\n", __func__);
+ goto LINE_FLAG_OUT;
+ }
+
+ if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
+ goto LINE_FLAG_OUT;
+
+ if (bt->interlaced != V4L2_DV_INTERLACED ||
+ !(stream->line_flag_int_cnt % 2)) {
+ if (!stream->next_buf) {
+ spin_lock(&stream->vbq_lock);
+ if (!list_empty(&stream->buf_head)) {
+ stream->next_buf = list_first_entry(&stream->buf_head,
+ struct hdmirx_buffer,
+ queue);
+ list_del(&stream->next_buf->queue);
+ } else {
+ stream->next_buf = NULL;
+ }
+ spin_unlock(&stream->vbq_lock);
+
+ if (stream->next_buf) {
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG2,
+ stream->next_buf->buff_addr[HDMIRX_PLANE_Y]);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG3,
+ stream->next_buf->buff_addr[HDMIRX_PLANE_CBCR]);
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: no buffer is available\n", __func__);
+ }
+ }
+ } else {
+ v4l2_dbg(3, debug, v4l2_dev, "%s: interlace:%d, line_flag_int_cnt:%d\n",
+ __func__, bt->interlaced, stream->line_flag_int_cnt);
+ }
+
+LINE_FLAG_OUT:
+ *handled = true;
+}
+
+static irqreturn_t hdmirx_dma_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ struct hdmirx_stream *stream = &hdmirx_dev->stream;
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 dma_stat1, dma_stat13;
+ bool handled = false;
+
+ dma_stat1 = hdmirx_readl(hdmirx_dev, DMA_STATUS1);
+ dma_stat13 = hdmirx_readl(hdmirx_dev, DMA_STATUS13);
+ v4l2_dbg(3, debug, v4l2_dev, "dma_irq st1:%#x, st13:%d\n",
+ dma_stat1, dma_stat13);
+
+ if (READ_ONCE(stream->stopping)) {
+ v4l2_dbg(1, debug, v4l2_dev, "%s: stop stream\n", __func__);
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, DMA_CONFIG4,
+ LINE_FLAG_INT_EN |
+ HDMIRX_DMA_IDLE_INT |
+ HDMIRX_LOCK_DISABLE_INT |
+ LAST_FRAME_AXI_UNFINISH_INT_EN |
+ FIFO_OVERFLOW_INT_EN |
+ FIFO_UNDERFLOW_INT_EN |
+ HDMIRX_AXI_ERROR_INT_EN, 0);
+ WRITE_ONCE(stream->stopping, false);
+ wake_up(&stream->wq_stopped);
+ return IRQ_HANDLED;
+ }
+
+ if (dma_stat1 & HDMIRX_DMA_IDLE_INT)
+ dma_idle_int_handler(hdmirx_dev, &handled);
+
+ if (dma_stat1 & LINE_FLAG_INT_EN)
+ line_flag_int_handler(hdmirx_dev, &handled);
+
+ if (!handled)
+ v4l2_dbg(3, debug, v4l2_dev,
+ "%s: dma irq not handled, dma_stat1:%#x\n",
+ __func__, dma_stat1);
+
+ stream->irq_stat = dma_stat1;
+ hdmirx_writel(hdmirx_dev, DMA_CONFIG5, 0xffffffff);
+
+ return IRQ_HANDLED;
+}
+
+static int hdmirx_wait_signal_lock(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ u32 mu_status, scdc_status, dma_st10, cmu_st;
+ u32 i;
+
+ for (i = 0; i < 300; i++) {
+ mu_status = hdmirx_readl(hdmirx_dev, MAINUNIT_STATUS);
+ scdc_status = hdmirx_readl(hdmirx_dev, SCDC_REGBANK_STATUS3);
+ dma_st10 = hdmirx_readl(hdmirx_dev, DMA_STATUS10);
+ cmu_st = hdmirx_readl(hdmirx_dev, CMU_STATUS);
+
+ if ((mu_status & TMDSVALID_STABLE_ST) &&
+ (dma_st10 & HDMIRX_LOCK) &&
+ (cmu_st & TMDSQPCLK_LOCKED_ST))
+ break;
+
+ if (!tx_5v_power_present(hdmirx_dev)) {
+ v4l2_dbg(1, debug, v4l2_dev,
+ "%s: HDMI pull out, return\n", __func__);
+ return -1;
+ }
+
+ hdmirx_tmds_clk_ratio_config(hdmirx_dev);
+ }
+
+ if (i == 300) {
+ v4l2_err(v4l2_dev, "%s: signal not lock, tmds_clk_ratio:%d\n",
+ __func__, hdmirx_dev->tmds_clk_ratio);
+ v4l2_err(v4l2_dev, "%s: mu_st:%#x, scdc_st:%#x, dma_st10:%#x\n",
+ __func__, mu_status, scdc_status, dma_st10);
+ return -1;
+ }
+
+ v4l2_dbg(1, debug, v4l2_dev, "%s: signal lock ok, i:%d\n", __func__, i);
+ hdmirx_writel(hdmirx_dev, GLOBAL_SWRESET_REQUEST, DATAPATH_SWRESETREQ);
+
+ reinit_completion(&hdmirx_dev->avi_pkt_rcv);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, PKTDEC_AVIIF_RCV_IRQ);
+
+ if (!wait_for_completion_timeout(&hdmirx_dev->avi_pkt_rcv,
+ msecs_to_jiffies(300))) {
+ v4l2_err(v4l2_dev, "%s wait avi_pkt_rcv failed\n", __func__);
+ hdmirx_update_bits(hdmirx_dev, PKT_2_INT_MASK_N,
+ PKTDEC_AVIIF_RCV_IRQ, 0);
+ }
+
+ msleep(50);
+ hdmirx_format_change(hdmirx_dev);
+
+ return 0;
+}
+
+static void hdmirx_plugin(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ if (hdmirx_dev->plugged)
+ return;
+
+ hdmirx_submodule_init(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED,
+ POWERPROVIDED);
+ hdmirx_phy_config(hdmirx_dev);
+ hdmirx_interrupts_setup(hdmirx_dev, true);
+
+ hdmirx_dev->plugged = true;
+}
+
+static void hdmirx_delayed_work_hotplug(struct work_struct *work)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ bool plugin;
+
+ hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
+ delayed_work_hotplug.work);
+
+ mutex_lock(&hdmirx_dev->work_lock);
+ plugin = tx_5v_power_present(hdmirx_dev);
+ v4l2_ctrl_s_ctrl(hdmirx_dev->detect_tx_5v_ctrl, plugin);
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: plugin:%d\n",
+ __func__, plugin);
+
+ hdmirx_plugout(hdmirx_dev);
+
+ if (plugin)
+ hdmirx_plugin(hdmirx_dev);
+
+ mutex_unlock(&hdmirx_dev->work_lock);
+}
+
+static void hdmirx_delayed_work_res_change(struct work_struct *work)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ bool plugin;
+
+ hdmirx_dev = container_of(work, struct snps_hdmirx_dev,
+ delayed_work_res_change.work);
+
+ mutex_lock(&hdmirx_dev->work_lock);
+ plugin = tx_5v_power_present(hdmirx_dev);
+ v4l2_dbg(1, debug, &hdmirx_dev->v4l2_dev, "%s: plugin:%d\n",
+ __func__, plugin);
+ if (plugin) {
+ hdmirx_interrupts_setup(hdmirx_dev, false);
+ hdmirx_submodule_init(hdmirx_dev);
+ hdmirx_update_bits(hdmirx_dev, SCDC_CONFIG, POWERPROVIDED,
+ POWERPROVIDED);
+ hdmirx_phy_config(hdmirx_dev);
+
+ if (hdmirx_wait_signal_lock(hdmirx_dev)) {
+ hdmirx_plugout(hdmirx_dev);
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(200));
+ } else {
+ hdmirx_dma_config(hdmirx_dev);
+ hdmirx_interrupts_setup(hdmirx_dev, true);
+ }
+ }
+ mutex_unlock(&hdmirx_dev->work_lock);
+}
+
+static irqreturn_t hdmirx_5v_det_irq_handler(int irq, void *dev_id)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_id;
+ u32 val;
+
+ val = gpiod_get_value(hdmirx_dev->detect_5v_gpio);
+ v4l2_dbg(3, debug, &hdmirx_dev->v4l2_dev, "%s: 5v:%d\n", __func__, val);
+
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(10));
+
+ return IRQ_HANDLED;
+}
+
+static const struct hdmirx_cec_ops hdmirx_cec_ops = {
+ .write = hdmirx_writel,
+ .read = hdmirx_readl,
+};
+
+static void devm_hdmirx_of_reserved_mem_device_release(void *dev)
+{
+ of_reserved_mem_device_release(dev);
+}
+
+static int hdmirx_parse_dt(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ int ret;
+
+ hdmirx_dev->num_clks = devm_clk_bulk_get_all(dev, &hdmirx_dev->clks);
+ if (hdmirx_dev->num_clks < 1)
+ return -ENODEV;
+
+ hdmirx_dev->resets[HDMIRX_RST_A].id = "axi";
+ hdmirx_dev->resets[HDMIRX_RST_P].id = "apb";
+ hdmirx_dev->resets[HDMIRX_RST_REF].id = "ref";
+ hdmirx_dev->resets[HDMIRX_RST_BIU].id = "biu";
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, HDMIRX_NUM_RST,
+ hdmirx_dev->resets);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset controls\n");
+ return ret;
+ }
+
+ hdmirx_dev->detect_5v_gpio =
+ devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+
+ if (IS_ERR(hdmirx_dev->detect_5v_gpio)) {
+ dev_err(dev, "failed to get hdmirx hot plug detection gpio\n");
+ return PTR_ERR(hdmirx_dev->detect_5v_gpio);
+ }
+
+ hdmirx_dev->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmirx_dev->grf)) {
+ dev_err(dev, "failed to get rockchip,grf\n");
+ return PTR_ERR(hdmirx_dev->grf);
+ }
+
+ hdmirx_dev->vo1_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,vo1-grf");
+ if (IS_ERR(hdmirx_dev->vo1_grf)) {
+ dev_err(dev, "failed to get rockchip,vo1-grf\n");
+ return PTR_ERR(hdmirx_dev->vo1_grf);
+ }
+
+ if (!device_property_read_bool(dev, "hpd-is-active-low"))
+ hdmirx_dev->hpd_trigger_level_high = true;
+
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
+ dev_warn(dev, "no reserved memory for HDMIRX, use default CMA\n");
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ devm_hdmirx_of_reserved_mem_device_release,
+ dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hdmirx_disable_all_interrupts(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_writel(hdmirx_dev, MAINUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, MAINUNIT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, AVPUNIT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_0_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_1_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, PKT_2_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, SCDC_INT_MASK_N, 0);
+ hdmirx_writel(hdmirx_dev, CEC_INT_MASK_N, 0);
+
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, MAINUNIT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, AVPUNIT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_0_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, PKT_2_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, SCDC_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, HDCP_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, HDCP_1_INT_CLEAR, 0xffffffff);
+ hdmirx_clear_interrupt(hdmirx_dev, CEC_INT_CLEAR, 0xffffffff);
+}
+
+static void hdmirx_init(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ hdmirx_update_bits(hdmirx_dev, PHY_CONFIG, PHY_RESET | PHY_PDDQ, 0);
+
+ regmap_write(hdmirx_dev->vo1_grf, VO1_GRF_VO1_CON2,
+ (HDMIRX_SDAIN_MSK | HDMIRX_SCLIN_MSK) |
+ ((HDMIRX_SDAIN_MSK | HDMIRX_SCLIN_MSK) << 16));
+ /*
+ * Some interrupts are enabled by default, so we disable
+ * all interrupts and clear interrupts status first.
+ */
+ hdmirx_disable_all_interrupts(hdmirx_dev);
+}
+
+/* hdmi-4k-300mhz EDID produced by v4l2-ctl tool */
+static u8 __maybe_unused edid_default[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x31, 0xd8, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
+ 0x22, 0x1a, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0x2f, 0xcf, 0x00, 0x31, 0x59,
+ 0x45, 0x59, 0x81, 0x80, 0x81, 0x40, 0x90, 0x40,
+ 0x95, 0x00, 0xa9, 0x40, 0xb3, 0x00, 0x04, 0x74,
+ 0x00, 0x30, 0xf2, 0x70, 0x5a, 0x80, 0xb0, 0x58,
+ 0x8a, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x55, 0x18,
+ 0x87, 0x1e, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x68,
+ 0x64, 0x6d, 0x69, 0x2d, 0x34, 0x6b, 0x2d, 0x33,
+ 0x30, 0x30, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc5,
+
+ 0x02, 0x03, 0x40, 0xf1, 0x4f, 0x5f, 0x5e, 0x5d,
+ 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21, 0x20, 0x05,
+ 0x14, 0x02, 0x11, 0x01, 0x23, 0x09, 0x07, 0x07,
+ 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03, 0x0c, 0x00,
+ 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00, 0x60, 0x01,
+ 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4, 0x01, 0x00,
+ 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3, 0x05, 0x00,
+ 0x00, 0xe3, 0x06, 0x01, 0x00, 0xe2, 0x0d, 0x5f,
+ 0xa3, 0x66, 0x00, 0xa0, 0xf0, 0x70, 0x1f, 0x80,
+ 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00,
+ 0x00, 0x1e, 0x1a, 0x36, 0x80, 0xa0, 0x70, 0x38,
+ 0x1f, 0x40, 0x30, 0x20, 0x35, 0x00, 0xc0, 0x1c,
+ 0x32, 0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80,
+ 0x51, 0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00,
+ 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1,
+};
+
+static void hdmirx_load_default_edid(struct snps_hdmirx_dev *hdmirx_dev)
+{
+ struct v4l2_edid def_edid = {};
+
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ if (!IS_ENABLED(CONFIG_VIDEO_SYNOPSYS_HDMIRX_LOAD_DEFAULT_EDID))
+ return;
+
+ /* disable hpd and write edid */
+ def_edid.blocks = sizeof(edid_default) / EDID_BLOCK_SIZE;
+ def_edid.edid = edid_default;
+
+ hdmirx_write_edid(hdmirx_dev, &def_edid);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+}
+
+static int hdmirx_disable(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+
+ hdmirx_plugout(hdmirx_dev);
+ hdmirx_hpd_ctrl(hdmirx_dev, false);
+
+ clk_bulk_disable_unprepare(hdmirx_dev->num_clks, hdmirx_dev->clks);
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: suspend\n", __func__);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int hdmirx_enable(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
+ int ret;
+
+ v4l2_dbg(2, debug, v4l2_dev, "%s: resume\n", __func__);
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(hdmirx_dev->num_clks, hdmirx_dev->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable hdmirx bulk clks: %d\n", ret);
+ return ret;
+ }
+
+ reset_control_bulk_assert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+ usleep_range(150, 160);
+ reset_control_bulk_deassert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+ usleep_range(150, 160);
+
+ return 0;
+}
+
+static void hdmirx_disable_irq(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ disable_irq(hdmirx_dev->det_irq);
+ disable_irq(hdmirx_dev->dma_irq);
+ disable_irq(hdmirx_dev->hdmi_irq);
+
+ cancel_delayed_work_sync(&hdmirx_dev->delayed_work_hotplug);
+ cancel_delayed_work_sync(&hdmirx_dev->delayed_work_res_change);
+}
+
+static void hdmirx_enable_irq(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ enable_irq(hdmirx_dev->hdmi_irq);
+ enable_irq(hdmirx_dev->dma_irq);
+ enable_irq(hdmirx_dev->det_irq);
+
+ queue_delayed_work(system_unbound_wq,
+ &hdmirx_dev->delayed_work_hotplug,
+ msecs_to_jiffies(110));
+}
+
+static __maybe_unused int hdmirx_suspend(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ hdmirx_disable_irq(dev);
+
+ /* TODO store CEC HW state */
+ disable_irq(hdmirx_dev->cec->irq);
+
+ return hdmirx_disable(dev);
+}
+
+static __maybe_unused int hdmirx_resume(struct device *dev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+ int ret = hdmirx_enable(dev);
+
+ if (ret)
+ return ret;
+
+ if (hdmirx_dev->edid_blocks_written) {
+ hdmirx_write_edid_data(hdmirx_dev, hdmirx_dev->edid,
+ hdmirx_dev->edid_blocks_written);
+ hdmirx_hpd_ctrl(hdmirx_dev, true);
+ }
+
+ /* TODO restore CEC HW state */
+ enable_irq(hdmirx_dev->cec->irq);
+
+ hdmirx_enable_irq(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops snps_hdmirx_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hdmirx_suspend, hdmirx_resume)
+};
+
+static int hdmirx_setup_irq(struct snps_hdmirx_dev *hdmirx_dev,
+ struct platform_device *pdev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ int ret, irq;
+
+ irq = platform_get_irq_byname(pdev, "hdmi");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get hdmi irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->hdmi_irq = irq;
+ ret = devm_request_irq(dev, irq, hdmirx_hdmi_irq_handler, 0,
+ "rk_hdmirx-hdmi", hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request hdmi irq\n");
+ return ret;
+ }
+
+ irq = platform_get_irq_byname(pdev, "dma");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get dma irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->dma_irq = irq;
+ ret = devm_request_threaded_irq(dev, irq, NULL, hdmirx_dma_irq_handler,
+ IRQF_ONESHOT, "rk_hdmirx-dma",
+ hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request dma irq\n");
+ return ret;
+ }
+
+ irq = gpiod_to_irq(hdmirx_dev->detect_5v_gpio);
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get hdmirx-5v irq\n");
+ return irq;
+ }
+
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ hdmirx_dev->det_irq = irq;
+ ret = devm_request_irq(dev, irq, hdmirx_5v_det_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "rk_hdmirx-5v", hdmirx_dev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request hdmirx-5v irq\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hdmirx_register_cec(struct snps_hdmirx_dev *hdmirx_dev,
+ struct platform_device *pdev)
+{
+ struct device *dev = hdmirx_dev->dev;
+ struct hdmirx_cec_data cec_data;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "cec");
+ if (irq < 0) {
+ dev_err_probe(dev, irq, "failed to get cec irq\n");
+ return irq;
+ }
+
+ cec_data.hdmirx = hdmirx_dev;
+ cec_data.dev = hdmirx_dev->dev;
+ cec_data.ops = &hdmirx_cec_ops;
+ cec_data.irq = irq;
+
+ hdmirx_dev->cec = snps_hdmirx_cec_register(&cec_data);
+ if (IS_ERR(hdmirx_dev->cec))
+ return dev_err_probe(dev, PTR_ERR(hdmirx_dev->cec),
+ "failed to register cec\n");
+
+ return 0;
+}
+
+static int hdmirx_probe(struct platform_device *pdev)
+{
+ struct snps_hdmirx_dev *hdmirx_dev;
+ struct device *dev = &pdev->dev;
+ struct v4l2_ctrl_handler *hdl;
+ struct hdmirx_stream *stream;
+ struct v4l2_device *v4l2_dev;
+ int ret;
+
+ hdmirx_dev = devm_kzalloc(dev, sizeof(*hdmirx_dev), GFP_KERNEL);
+ if (!hdmirx_dev)
+ return -ENOMEM;
+
+ /*
+ * RK3588 HDMIRX SoC integration doesn't use IOMMU and can
+ * address only first 32bit of the physical address space.
+ */
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ hdmirx_dev->dev = dev;
+ dev_set_drvdata(dev, hdmirx_dev);
+
+ ret = hdmirx_parse_dt(hdmirx_dev);
+ if (ret)
+ return ret;
+
+ ret = hdmirx_setup_irq(hdmirx_dev, pdev);
+ if (ret)
+ return ret;
+
+ hdmirx_dev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hdmirx_dev->regs))
+ return dev_err_probe(dev, PTR_ERR(hdmirx_dev->regs),
+ "failed to remap regs resource\n");
+
+ mutex_init(&hdmirx_dev->stream_lock);
+ mutex_init(&hdmirx_dev->work_lock);
+ spin_lock_init(&hdmirx_dev->rst_lock);
+
+ init_completion(&hdmirx_dev->cr_write_done);
+ init_completion(&hdmirx_dev->timer_base_lock);
+ init_completion(&hdmirx_dev->avi_pkt_rcv);
+
+ INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_hotplug,
+ hdmirx_delayed_work_hotplug);
+ INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_res_change,
+ hdmirx_delayed_work_res_change);
+
+ hdmirx_dev->cur_fmt_fourcc = V4L2_PIX_FMT_BGR24;
+ hdmirx_dev->timings = cea640x480;
+
+ hdmirx_enable(dev);
+ hdmirx_init(hdmirx_dev);
+
+ v4l2_dev = &hdmirx_dev->v4l2_dev;
+ strscpy(v4l2_dev->name, dev_name(dev), sizeof(v4l2_dev->name));
+
+ hdl = &hdmirx_dev->hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+
+ hdmirx_dev->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT,
+ 0, 1, 0, 0);
+
+ hdmirx_dev->rgb_range = v4l2_ctrl_new_std_menu(hdl, NULL,
+ V4L2_CID_DV_RX_RGB_RANGE,
+ V4L2_DV_RGB_RANGE_FULL, 0,
+ V4L2_DV_RGB_RANGE_AUTO);
+
+ hdmirx_dev->rgb_range->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hdmirx_dev->content_type =
+ v4l2_ctrl_new_std_menu(hdl, NULL, V4L2_CID_DV_RX_IT_CONTENT_TYPE,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC, 0,
+ V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ dev_err_probe(dev, ret, "v4l2 ctrl handler init failed\n");
+ goto err_pm;
+ }
+ hdmirx_dev->v4l2_dev.ctrl_handler = hdl;
+
+ ret = v4l2_device_register(dev, &hdmirx_dev->v4l2_dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "v4l2 device registration failed\n");
+ goto err_hdl;
+ }
+
+ stream = &hdmirx_dev->stream;
+ stream->hdmirx_dev = hdmirx_dev;
+ ret = hdmirx_register_stream_vdev(stream);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "video device registration failed\n");
+ goto err_unreg_v4l2_dev;
+ }
+
+ ret = hdmirx_register_cec(hdmirx_dev, pdev);
+ if (ret)
+ goto err_unreg_video_dev;
+
+ hdmirx_load_default_edid(hdmirx_dev);
+
+ hdmirx_enable_irq(dev);
+
+ hdmirx_dev->debugfs_dir = debugfs_create_dir(hdmirx_dev->v4l2_dev.name,
+ v4l2_debugfs_root());
+
+ hdmirx_dev->infoframes = v4l2_debugfs_if_alloc(hdmirx_dev->debugfs_dir,
+ V4L2_DEBUGFS_IF_AVI, hdmirx_dev,
+ hdmirx_debugfs_if_read);
+
+ return 0;
+
+err_unreg_video_dev:
+ vb2_video_unregister_device(&hdmirx_dev->stream.vdev);
+err_unreg_v4l2_dev:
+ v4l2_device_unregister(&hdmirx_dev->v4l2_dev);
+err_hdl:
+ v4l2_ctrl_handler_free(&hdmirx_dev->hdl);
+err_pm:
+ hdmirx_disable(dev);
+
+ return ret;
+}
+
+static void hdmirx_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snps_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
+
+ v4l2_debugfs_if_free(hdmirx_dev->infoframes);
+ debugfs_remove_recursive(hdmirx_dev->debugfs_dir);
+
+ snps_hdmirx_cec_unregister(hdmirx_dev->cec);
+
+ hdmirx_disable_irq(dev);
+
+ vb2_video_unregister_device(&hdmirx_dev->stream.vdev);
+ v4l2_ctrl_handler_free(&hdmirx_dev->hdl);
+ v4l2_device_unregister(&hdmirx_dev->v4l2_dev);
+
+ /* touched by hdmirx_disable()->hdmirx_plugout() */
+ hdmirx_dev->rgb_range = NULL;
+ hdmirx_dev->content_type = NULL;
+
+ hdmirx_disable(dev);
+
+ reset_control_bulk_assert(HDMIRX_NUM_RST, hdmirx_dev->resets);
+}
+
+static const struct of_device_id hdmirx_id[] = {
+ { .compatible = "rockchip,rk3588-hdmirx-ctrler" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hdmirx_id);
+
+static struct platform_driver hdmirx_driver = {
+ .probe = hdmirx_probe,
+ .remove = hdmirx_remove,
+ .driver = {
+ .name = "snps_hdmirx",
+ .of_match_table = hdmirx_id,
+ .pm = &snps_hdmirx_pm_ops,
+ }
+};
+module_platform_driver(hdmirx_driver);
+
+MODULE_DESCRIPTION("Synopsys HDMI Receiver Driver");
+MODULE_AUTHOR("Dingxian Wen <shawn.wen@rock-chips.com>");
+MODULE_AUTHOR("Shreeya Patel <shreeya.patel@collabora.com>");
+MODULE_AUTHOR("Dmitry Osipenko <dmitry.osipenko@collabora.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
new file mode 100644
index 000000000000..220ab99ca611
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Dingxian Wen <shawn.wen@rock-chips.com>
+ */
+
+#ifndef DW_HDMIRX_H
+#define DW_HDMIRX_H
+
+#include <linux/bitops.h>
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+#define HIWORD_UPDATE(v, h, l) (((v) << (l)) | (GENMASK((h), (l)) << 16))
+
+/* SYS_GRF */
+#define SYS_GRF_SOC_CON1 0x0304
+#define HDMIRXPHY_SRAM_EXT_LD_DONE BIT(1)
+#define HDMIRXPHY_SRAM_BYPASS BIT(0)
+#define SYS_GRF_SOC_STATUS1 0x0384
+#define HDMIRXPHY_SRAM_INIT_DONE BIT(10)
+#define SYS_GRF_CHIP_ID 0x0600
+
+/* VO1_GRF */
+#define VO1_GRF_VO1_CON2 0x0008
+#define HDMIRX_SDAIN_MSK BIT(2)
+#define HDMIRX_SCLIN_MSK BIT(1)
+
+/* HDMIRX PHY */
+#define SUP_DIG_ANA_CREGS_SUP_ANA_NC 0x004f
+
+#define LANE0_DIG_ASIC_RX_OVRD_OUT_0 0x100f
+#define LANE1_DIG_ASIC_RX_OVRD_OUT_0 0x110f
+#define LANE2_DIG_ASIC_RX_OVRD_OUT_0 0x120f
+#define LANE3_DIG_ASIC_RX_OVRD_OUT_0 0x130f
+#define ASIC_ACK_OVRD_EN BIT(1)
+#define ASIC_ACK BIT(0)
+
+#define LANE0_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x104a
+#define LANE1_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x114a
+#define LANE2_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x124a
+#define LANE3_DIG_RX_VCOCAL_RX_VCO_CAL_CTRL_2 0x134a
+#define FREQ_TUNE_START_VAL_MASK GENMASK(9, 0)
+#define FREQ_TUNE_START_VAL(x) UPDATE(x, 9, 0)
+
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_FSM_CONFIG 0x20c4
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_ADAPT_REF_FOM 0x20c7
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_3_REG 0x20e9
+#define CDR_SETTING_BOUNDARY_3_DEFAULT 0x52da
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_4_REG 0x20ea
+#define CDR_SETTING_BOUNDARY_4_DEFAULT 0x43cd
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_5_REG 0x20eb
+#define CDR_SETTING_BOUNDARY_5_DEFAULT 0x35b3
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_6_REG 0x20fb
+#define CDR_SETTING_BOUNDARY_6_DEFAULT 0x2799
+#define HDMIPCS_DIG_CTRL_PATH_MAIN_FSM_RATE_CALC_HDMI14_CDR_SETTING_7_REG 0x20fc
+#define CDR_SETTING_BOUNDARY_7_DEFAULT 0x1b65
+
+#define RAWLANE0_DIG_PCS_XF_RX_OVRD_OUT 0x300e
+#define RAWLANE1_DIG_PCS_XF_RX_OVRD_OUT 0x310e
+#define RAWLANE2_DIG_PCS_XF_RX_OVRD_OUT 0x320e
+#define RAWLANE3_DIG_PCS_XF_RX_OVRD_OUT 0x330e
+#define PCS_ACK_WRITE_SELECT BIT(14)
+#define PCS_EN_CTL BIT(1)
+#define PCS_ACK BIT(0)
+
+#define RAWLANE0_DIG_AON_FAST_FLAGS 0x305c
+#define RAWLANE1_DIG_AON_FAST_FLAGS 0x315c
+#define RAWLANE2_DIG_AON_FAST_FLAGS 0x325c
+#define RAWLANE3_DIG_AON_FAST_FLAGS 0x335c
+
+/* HDMIRX Ctrler */
+#define GLOBAL_SWRESET_REQUEST 0x0020
+#define DATAPATH_SWRESETREQ BIT(12)
+#define GLOBAL_SWENABLE 0x0024
+#define PHYCTRL_ENABLE BIT(21)
+#define CEC_ENABLE BIT(16)
+#define TMDS_ENABLE BIT(13)
+#define DATAPATH_ENABLE BIT(12)
+#define PKTFIFO_ENABLE BIT(11)
+#define AVPUNIT_ENABLE BIT(8)
+#define MAIN_ENABLE BIT(0)
+#define GLOBAL_TIMER_REF_BASE 0x0028
+#define CORE_CONFIG 0x0050
+#define CMU_CONFIG0 0x0060
+#define TMDSQPCLK_STABLE_FREQ_MARGIN_MASK GENMASK(30, 16)
+#define TMDSQPCLK_STABLE_FREQ_MARGIN(x) UPDATE(x, 30, 16)
+#define AUDCLK_STABLE_FREQ_MARGIN_MASK GENMASK(11, 9)
+#define AUDCLK_STABLE_FREQ_MARGIN(x) UPDATE(x, 11, 9)
+#define CMU_STATUS 0x007c
+#define TMDSQPCLK_LOCKED_ST BIT(4)
+#define CMU_TMDSQPCLK_FREQ 0x0084
+#define PHY_CONFIG 0x00c0
+#define LDO_AFE_PROG_MASK GENMASK(24, 23)
+#define LDO_AFE_PROG(x) UPDATE(x, 24, 23)
+#define LDO_PWRDN BIT(21)
+#define TMDS_CLOCK_RATIO BIT(16)
+#define RXDATA_WIDTH BIT(15)
+#define REFFREQ_SEL_MASK GENMASK(11, 9)
+#define REFFREQ_SEL(x) UPDATE(x, 11, 9)
+#define HDMI_DISABLE BIT(8)
+#define PHY_PDDQ BIT(1)
+#define PHY_RESET BIT(0)
+#define PHY_STATUS 0x00c8
+#define HDMI_DISABLE_ACK BIT(1)
+#define PDDQ_ACK BIT(0)
+#define PHYCREG_CONFIG0 0x00e0
+#define PHYCREG_CR_PARA_SELECTION_MODE_MASK GENMASK(1, 0)
+#define PHYCREG_CR_PARA_SELECTION_MODE(x) UPDATE(x, 1, 0)
+#define PHYCREG_CONFIG1 0x00e4
+#define PHYCREG_CONFIG2 0x00e8
+#define PHYCREG_CONFIG3 0x00ec
+#define PHYCREG_CONTROL 0x00f0
+#define PHYCREG_CR_PARA_WRITE_P BIT(1)
+#define PHYCREG_CR_PARA_READ_P BIT(0)
+#define PHYCREG_STATUS 0x00f4
+
+#define MAINUNIT_STATUS 0x0150
+#define TMDSVALID_STABLE_ST BIT(1)
+#define DESCRAND_EN_CONTROL 0x0210
+#define SCRAMB_EN_SEL_QST_MASK GENMASK(1, 0)
+#define SCRAMB_EN_SEL_QST(x) UPDATE(x, 1, 0)
+#define DESCRAND_SYNC_CONTROL 0x0214
+#define RECOVER_UNSYNC_STREAM_QST BIT(0)
+#define DESCRAND_SYNC_SEQ_CONFIG 0x022c
+#define DESCRAND_SYNC_SEQ_ERR_CNT_EN BIT(0)
+#define DESCRAND_SYNC_SEQ_STATUS 0x0234
+#define DEFRAMER_CONFIG0 0x0270
+#define VS_CNT_THR_QST_MASK GENMASK(27, 20)
+#define VS_CNT_THR_QST(x) UPDATE(x, 27, 20)
+#define HS_POL_QST_MASK GENMASK(19, 18)
+#define HS_POL_QST(x) UPDATE(x, 19, 18)
+#define VS_POL_QST_MASK GENMASK(17, 16)
+#define VS_POL_QST(x) UPDATE(x, 17, 16)
+#define VS_REMAPFILTER_EN_QST BIT(8)
+#define VS_FILTER_ORDER_QST_MASK GENMASK(1, 0)
+#define VS_FILTER_ORDER_QST(x) UPDATE(x, 1, 0)
+#define DEFRAMER_VSYNC_CNT_CLEAR 0x0278
+#define VSYNC_CNT_CLR_P BIT(0)
+#define DEFRAMER_STATUS 0x027c
+#define OPMODE_STS_MASK GENMASK(6, 4)
+#define I2C_SLAVE_CONFIG1 0x0164
+#define I2C_SDA_OUT_HOLD_VALUE_QST_MASK GENMASK(15, 8)
+#define I2C_SDA_OUT_HOLD_VALUE_QST(x) UPDATE(x, 15, 8)
+#define I2C_SDA_IN_HOLD_VALUE_QST_MASK GENMASK(7, 0)
+#define I2C_SDA_IN_HOLD_VALUE_QST(x) UPDATE(x, 7, 0)
+#define OPMODE_STS_MASK GENMASK(6, 4)
+#define REPEATER_QST BIT(28)
+#define FASTREAUTH_QST BIT(27)
+#define FEATURES_1DOT1_QST BIT(26)
+#define FASTI2C_QST BIT(25)
+#define EESS_CTL_THR_QST_MASK GENMASK(19, 16)
+#define EESS_CTL_THR_QST(x) UPDATE(x, 19, 16)
+#define OESS_CTL3_THR_QST_MASK GENMASK(11, 8)
+#define OESS_CTL3_THR_QST(x) UPDATE(x, 11, 8)
+#define EESS_OESS_SEL_QST_MASK GENMASK(5, 4)
+#define EESS_OESS_SEL_QST(x) UPDATE(x, 5, 4)
+#define KEY_DECRYPT_EN_QST BIT(0)
+#define KEY_DECRYPT_SEED_QST_MASK GENMASK(15, 0)
+#define KEY_DECRYPT_SEED_QST(x) UPDATE(x, 15, 0)
+#define HDCP_INT_CLEAR 0x50d8
+#define HDCP_1_INT_CLEAR 0x50e8
+#define HDCP2_CONFIG 0x02f0
+#define HDCP2_SWITCH_OVR_VALUE BIT(2)
+#define HDCP2_SWITCH_OVR_EN BIT(1)
+
+#define VIDEO_CONFIG2 0x042c
+#define VPROC_VSYNC_POL_OVR_VALUE BIT(19)
+#define VPROC_VSYNC_POL_OVR_EN BIT(18)
+#define VPROC_HSYNC_POL_OVR_VALUE BIT(17)
+#define VPROC_HSYNC_POL_OVR_EN BIT(16)
+#define VPROC_FMT_OVR_VALUE_MASK GENMASK(6, 4)
+#define VPROC_FMT_OVR_VALUE(x) UPDATE(x, 6, 4)
+#define VPROC_FMT_OVR_EN BIT(0)
+
+#define AFIFO_FILL_RESTART BIT(0)
+#define AFIFO_INIT_P BIT(0)
+#define AFIFO_THR_LOW_QST_MASK GENMASK(25, 16)
+#define AFIFO_THR_LOW_QST(x) UPDATE(x, 25, 16)
+#define AFIFO_THR_HIGH_QST_MASK GENMASK(9, 0)
+#define AFIFO_THR_HIGH_QST(x) UPDATE(x, 9, 0)
+#define AFIFO_THR_MUTE_LOW_QST_MASK GENMASK(25, 16)
+#define AFIFO_THR_MUTE_LOW_QST(x) UPDATE(x, 25, 16)
+#define AFIFO_THR_MUTE_HIGH_QST_MASK GENMASK(9, 0)
+#define AFIFO_THR_MUTE_HIGH_QST(x) UPDATE(x, 9, 0)
+
+#define AFIFO_UNDERFLOW_ST BIT(25)
+#define AFIFO_OVERFLOW_ST BIT(24)
+
+#define SPEAKER_ALLOC_OVR_EN BIT(16)
+#define I2S_BPCUV_EN BIT(4)
+#define SPDIF_EN BIT(2)
+#define I2S_EN BIT(1)
+#define AFIFO_THR_PASS_DEMUTEMASK_N BIT(24)
+#define AVMUTE_DEMUTEMASK_N BIT(16)
+#define AFIFO_THR_MUTE_LOW_MUTEMASK_N BIT(9)
+#define AFIFO_THR_MUTE_HIGH_MUTEMASK_N BIT(8)
+#define AVMUTE_MUTEMASK_N BIT(0)
+#define SCDC_CONFIG 0x0580
+#define HPDLOW BIT(1)
+#define POWERPROVIDED BIT(0)
+#define SCDC_REGBANK_STATUS1 0x058c
+#define SCDC_TMDSBITCLKRATIO BIT(1)
+#define SCDC_REGBANK_STATUS3 0x0594
+#define SCDC_REGBANK_CONFIG0 0x05c0
+#define SCDC_SINKVERSION_QST_MASK GENMASK(7, 0)
+#define SCDC_SINKVERSION_QST(x) UPDATE(x, 7, 0)
+#define AGEN_LAYOUT BIT(4)
+#define AGEN_SPEAKER_ALLOC GENMASK(15, 8)
+
+#define CED_CONFIG 0x0760
+#define CED_VIDDATACHECKEN_QST BIT(27)
+#define CED_DATAISCHECKEN_QST BIT(26)
+#define CED_GBCHECKEN_QST BIT(25)
+#define CED_CTRLCHECKEN_QST BIT(24)
+#define CED_CHLOCKMAXER_QST_MASK GENMASK(14, 0)
+#define CED_CHLOCKMAXER_QST(x) UPDATE(x, 14, 0)
+#define CED_DYN_CONFIG 0x0768
+#define CED_DYN_CONTROL 0x076c
+#define PKTEX_BCH_ERRFILT_CONFIG 0x07c4
+#define PKTEX_CHKSUM_ERRFILT_CONFIG 0x07c8
+
+#define PKTDEC_ACR_PH2_1 0x1100
+#define PKTDEC_ACR_PB3_0 0x1104
+#define PKTDEC_ACR_PB7_4 0x1108
+#define PKTDEC_AVIIF_PH2_1 0x1200
+#define PKTDEC_AVIIF_PB3_0 0x1204
+#define PKTDEC_AVIIF_PB7_4 0x1208
+#define VIC_VAL_MASK GENMASK(6, 0)
+#define PKTDEC_AVIIF_PB11_8 0x120c
+#define PKTDEC_AVIIF_PB15_12 0x1210
+#define PKTDEC_AVIIF_PB19_16 0x1214
+#define PKTDEC_AVIIF_PB23_20 0x1218
+#define PKTDEC_AVIIF_PB27_24 0x121c
+
+#define PKTFIFO_CONFIG 0x1500
+#define PKTFIFO_STORE_FILT_CONFIG 0x1504
+#define PKTFIFO_THR_CONFIG0 0x1508
+#define PKTFIFO_THR_CONFIG1 0x150c
+#define PKTFIFO_CONTROL 0x1510
+
+#define VMON_STATUS1 0x1580
+#define VMON_STATUS2 0x1584
+#define VMON_STATUS3 0x1588
+#define VMON_STATUS4 0x158c
+#define VMON_STATUS5 0x1590
+#define VMON_STATUS6 0x1594
+#define VMON_STATUS7 0x1598
+#define VMON_ILACE_DETECT BIT(4)
+
+#define CEC_TX_CONTROL 0x2000
+#define CEC_STATUS 0x2004
+#define CEC_CONFIG 0x2008
+#define RX_AUTO_DRIVE_ACKNOWLEDGE BIT(9)
+#define CEC_ADDR 0x200c
+#define CEC_TX_COUNT 0x2020
+#define CEC_TX_DATA3_0 0x2024
+#define CEC_RX_COUNT_STATUS 0x2040
+#define CEC_RX_DATA3_0 0x2044
+#define CEC_LOCK_CONTROL 0x2054
+#define CEC_RXQUAL_BITTIME_CONFIG 0x2060
+#define CEC_RX_BITTIME_CONFIG 0x2064
+#define CEC_TX_BITTIME_CONFIG 0x2068
+
+#define DMA_CONFIG1 0x4400
+#define UV_WID_MASK GENMASK(31, 28)
+#define UV_WID(x) UPDATE(x, 31, 28)
+#define Y_WID_MASK GENMASK(27, 24)
+#define Y_WID(x) UPDATE(x, 27, 24)
+#define DDR_STORE_FORMAT_MASK GENMASK(15, 12)
+#define DDR_STORE_FORMAT(x) UPDATE(x, 15, 12)
+#define ABANDON_EN BIT(0)
+#define DMA_CONFIG2 0x4404
+#define DMA_CONFIG3 0x4408
+#define DMA_CONFIG4 0x440c // dma irq en
+#define DMA_CONFIG5 0x4410 // dma irq clear status
+#define LINE_FLAG_INT_EN BIT(8)
+#define HDMIRX_DMA_IDLE_INT BIT(7)
+#define HDMIRX_LOCK_DISABLE_INT BIT(6)
+#define LAST_FRAME_AXI_UNFINISH_INT_EN BIT(5)
+#define FIFO_OVERFLOW_INT_EN BIT(2)
+#define FIFO_UNDERFLOW_INT_EN BIT(1)
+#define HDMIRX_AXI_ERROR_INT_EN BIT(0)
+#define DMA_CONFIG6 0x4414
+#define RB_SWAP_EN BIT(9)
+#define HSYNC_TOGGLE_EN BIT(5)
+#define VSYNC_TOGGLE_EN BIT(4)
+#define HDMIRX_DMA_EN BIT(1)
+#define DMA_CONFIG7 0x4418
+#define LINE_FLAG_NUM_MASK GENMASK(31, 16)
+#define LINE_FLAG_NUM(x) UPDATE(x, 31, 16)
+#define LOCK_FRAME_NUM_MASK GENMASK(11, 0)
+#define LOCK_FRAME_NUM(x) UPDATE(x, 11, 0)
+#define DMA_CONFIG8 0x441c
+#define REG_MIRROR_EN BIT(0)
+#define DMA_CONFIG9 0x4420
+#define DMA_CONFIG10 0x4424
+#define DMA_CONFIG11 0x4428
+#define EDID_READ_EN_MASK BIT(8)
+#define EDID_READ_EN(x) UPDATE(x, 8, 8)
+#define EDID_WRITE_EN_MASK BIT(7)
+#define EDID_WRITE_EN(x) UPDATE(x, 7, 7)
+#define EDID_SLAVE_ADDR_MASK GENMASK(6, 0)
+#define EDID_SLAVE_ADDR(x) UPDATE(x, 6, 0)
+#define DMA_STATUS1 0x4430 // dma irq status
+#define DMA_STATUS2 0x4434
+#define DMA_STATUS3 0x4438
+#define DMA_STATUS4 0x443c
+#define DMA_STATUS5 0x4440
+#define DMA_STATUS6 0x4444
+#define DMA_STATUS7 0x4448
+#define DMA_STATUS8 0x444c
+#define DMA_STATUS9 0x4450
+#define DMA_STATUS10 0x4454
+#define HDMIRX_LOCK BIT(3)
+#define DMA_STATUS11 0x4458
+#define HDMIRX_TYPE_MASK GENMASK(8, 7)
+#define HDMIRX_COLOR_DEPTH_MASK GENMASK(6, 3)
+#define HDMIRX_FORMAT_MASK GENMASK(2, 0)
+#define DMA_STATUS12 0x445c
+#define DMA_STATUS13 0x4460
+#define DMA_STATUS14 0x4464
+
+#define MAINUNIT_INTVEC_INDEX 0x5000
+#define MAINUNIT_0_INT_STATUS 0x5010
+#define CECRX_NOTIFY_ERR BIT(12)
+#define CECRX_EOM BIT(11)
+#define CECTX_DRIVE_ERR BIT(10)
+#define CECRX_BUSY BIT(9)
+#define CECTX_BUSY BIT(8)
+#define CECTX_FRAME_DISCARDED BIT(5)
+#define CECTX_NRETRANSMIT_FAIL BIT(4)
+#define CECTX_LINE_ERR BIT(3)
+#define CECTX_ARBLOST BIT(2)
+#define CECTX_NACK BIT(1)
+#define CECTX_DONE BIT(0)
+#define MAINUNIT_0_INT_MASK_N 0x5014
+#define MAINUNIT_0_INT_CLEAR 0x5018
+#define MAINUNIT_0_INT_FORCE 0x501c
+#define TIMER_BASE_LOCKED_IRQ BIT(26)
+#define TMDSQPCLK_OFF_CHG BIT(5)
+#define TMDSQPCLK_LOCKED_CHG BIT(4)
+#define MAINUNIT_1_INT_STATUS 0x5020
+#define MAINUNIT_1_INT_MASK_N 0x5024
+#define MAINUNIT_1_INT_CLEAR 0x5028
+#define MAINUNIT_1_INT_FORCE 0x502c
+#define MAINUNIT_2_INT_STATUS 0x5030
+#define MAINUNIT_2_INT_MASK_N 0x5034
+#define MAINUNIT_2_INT_CLEAR 0x5038
+#define MAINUNIT_2_INT_FORCE 0x503c
+#define PHYCREG_CR_READ_DONE BIT(11)
+#define PHYCREG_CR_WRITE_DONE BIT(10)
+#define TMDSVALID_STABLE_CHG BIT(1)
+
+#define AVPUNIT_0_INT_STATUS 0x5040
+#define AVPUNIT_0_INT_MASK_N 0x5044
+#define AVPUNIT_0_INT_CLEAR 0x5048
+#define AVPUNIT_0_INT_FORCE 0x504c
+#define CED_DYN_CNT_CH2_IRQ BIT(22)
+#define CED_DYN_CNT_CH1_IRQ BIT(21)
+#define CED_DYN_CNT_CH0_IRQ BIT(20)
+#define AVPUNIT_1_INT_STATUS 0x5050
+#define DEFRAMER_VSYNC_THR_REACHED_IRQ BIT(1)
+#define AVPUNIT_1_INT_MASK_N 0x5054
+#define DEFRAMER_VSYNC_THR_REACHED_MASK_N BIT(1)
+#define DEFRAMER_VSYNC_MASK_N BIT(0)
+#define AVPUNIT_1_INT_CLEAR 0x5058
+#define DEFRAMER_VSYNC_THR_REACHED_CLEAR BIT(1)
+#define PKT_0_INT_STATUS 0x5080
+#define PKTDEC_ACR_CHG_IRQ BIT(3)
+#define PKT_0_INT_MASK_N 0x5084
+#define PKTDEC_ACR_CHG_MASK_N BIT(3)
+#define PKT_0_INT_CLEAR 0x5088
+#define PKT_1_INT_STATUS 0x5090
+#define PKT_1_INT_MASK_N 0x5094
+#define PKT_1_INT_CLEAR 0x5098
+#define PKT_2_INT_STATUS 0x50a0
+#define PKTDEC_ACR_RCV_IRQ BIT(3)
+#define PKT_2_INT_MASK_N 0x50a4
+#define PKTDEC_AVIIF_RCV_IRQ BIT(11)
+#define PKTDEC_ACR_RCV_MASK_N BIT(3)
+#define PKT_2_INT_CLEAR 0x50a8
+#define PKTDEC_AVIIF_RCV_CLEAR BIT(11)
+#define PKTDEC_ACR_RCV_CLEAR BIT(3)
+#define SCDC_INT_STATUS 0x50c0
+#define SCDC_INT_MASK_N 0x50c4
+#define SCDC_INT_CLEAR 0x50c8
+#define SCDCTMDSCCFG_CHG BIT(2)
+
+#define CEC_INT_STATUS 0x5100
+#define CEC_INT_MASK_N 0x5104
+#define CEC_INT_CLEAR 0x5108
+
+#endif
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c
new file mode 100644
index 000000000000..8e470c0376d6
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Shunqing Chen <csq@rock-chips.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <media/cec.h>
+
+#include "snps_hdmirx.h"
+#include "snps_hdmirx_cec.h"
+
+static void hdmirx_cec_write(struct hdmirx_cec *cec, int reg, u32 val)
+{
+ cec->ops->write(cec->hdmirx, reg, val);
+}
+
+static u32 hdmirx_cec_read(struct hdmirx_cec *cec, int reg)
+{
+ return cec->ops->read(cec->hdmirx, reg);
+}
+
+static void hdmirx_cec_update_bits(struct hdmirx_cec *cec, int reg, u32 mask,
+ u32 data)
+{
+ u32 val = hdmirx_cec_read(cec, reg) & ~mask;
+
+ val |= (data & mask);
+ hdmirx_cec_write(cec, reg, val);
+}
+
+static int hdmirx_cec_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | BIT(15);
+
+ hdmirx_cec_write(cec, CEC_ADDR, cec->addresses);
+
+ return 0;
+}
+
+/* signal_free_time is handled by the Synopsys Designware
+ * HDMIRX Controller hardware.
+ */
+static int hdmirx_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+ u32 data[4] = {0};
+ int i, data_len, msg_len;
+
+ msg_len = msg->len;
+
+ hdmirx_cec_write(cec, CEC_TX_COUNT, msg_len - 1);
+ for (i = 0; i < msg_len; i++)
+ data[i / 4] |= msg->msg[i] << (i % 4) * 8;
+
+ data_len = DIV_ROUND_UP(msg_len, 4);
+
+ for (i = 0; i < data_len; i++)
+ hdmirx_cec_write(cec, CEC_TX_DATA3_0 + i * 4, data[i]);
+
+ hdmirx_cec_write(cec, CEC_TX_CONTROL, 0x1);
+
+ return 0;
+}
+
+static irqreturn_t hdmirx_cec_hardirq(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+ u32 stat = hdmirx_cec_read(cec, CEC_INT_STATUS);
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 val;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, stat);
+
+ if (stat & CECTX_LINE_ERR) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CECTX_ARBLOST) {
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CECRX_EOM) {
+ unsigned int len, i;
+
+ val = hdmirx_cec_read(cec, CEC_RX_COUNT_STATUS);
+ /* rxbuffer locked status */
+ if ((val & 0x80))
+ return ret;
+
+ len = (val & 0xf) + 1;
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < len; i++) {
+ if (!(i % 4))
+ val = hdmirx_cec_read(cec, CEC_RX_DATA3_0 + i / 4 * 4);
+ cec->rx_msg.msg[i] = (val >> ((i % 4) * 8)) & 0xff;
+ }
+
+ cec->rx_msg.len = len;
+ smp_wmb(); /* receive RX msg */
+ cec->rx_done = true;
+ hdmirx_cec_write(cec, CEC_LOCK_CONTROL, 0x1);
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t hdmirx_cec_thread(int irq, void *data)
+{
+ struct cec_adapter *adap = data;
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ cec_transmit_attempt_done(adap, cec->tx_status);
+ }
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ smp_rmb(); /* RX msg has been received */
+ cec_received_msg(adap, &cec->rx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hdmirx_cec_enable(struct cec_adapter *adap, bool enable)
+{
+ struct hdmirx_cec *cec = cec_get_drvdata(adap);
+
+ if (!enable) {
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, 0);
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, 0);
+ if (cec->ops->disable)
+ cec->ops->disable(cec->hdmirx);
+ } else {
+ unsigned int irqs;
+
+ hdmirx_cec_log_addr(cec->adap, CEC_LOG_ADDR_INVALID);
+ if (cec->ops->enable)
+ cec->ops->enable(cec->hdmirx);
+ hdmirx_cec_update_bits(cec, GLOBAL_SWENABLE, CEC_ENABLE, CEC_ENABLE);
+
+ irqs = CECTX_LINE_ERR | CECTX_NACK | CECRX_EOM | CECTX_DONE;
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, irqs);
+ }
+
+ return 0;
+}
+
+static const struct cec_adap_ops hdmirx_cec_ops = {
+ .adap_enable = hdmirx_cec_enable,
+ .adap_log_addr = hdmirx_cec_log_addr,
+ .adap_transmit = hdmirx_cec_transmit,
+};
+
+static void hdmirx_cec_del(void *data)
+{
+ struct hdmirx_cec *cec = data;
+
+ cec_delete_adapter(cec->adap);
+}
+
+struct hdmirx_cec *snps_hdmirx_cec_register(struct hdmirx_cec_data *data)
+{
+ struct hdmirx_cec *cec;
+ unsigned int irqs;
+ int ret;
+
+ /*
+ * Our device is just a convenience - we want to link to the real
+ * hardware device here, so that userspace can see the association
+ * between the HDMI hardware and its associated CEC chardev.
+ */
+ cec = devm_kzalloc(data->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return ERR_PTR(-ENOMEM);
+
+ cec->dev = data->dev;
+ cec->irq = data->irq;
+ cec->ops = data->ops;
+ cec->hdmirx = data->hdmirx;
+
+ hdmirx_cec_update_bits(cec, GLOBAL_SWENABLE, CEC_ENABLE, CEC_ENABLE);
+ hdmirx_cec_update_bits(cec, CEC_CONFIG, RX_AUTO_DRIVE_ACKNOWLEDGE,
+ RX_AUTO_DRIVE_ACKNOWLEDGE);
+
+ hdmirx_cec_write(cec, CEC_TX_COUNT, 0);
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, 0);
+ hdmirx_cec_write(cec, CEC_INT_CLEAR, ~0);
+
+ cec->adap = cec_allocate_adapter(&hdmirx_cec_ops, cec, "snps-hdmirx",
+ CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL,
+ CEC_MAX_LOG_ADDRS);
+ if (IS_ERR(cec->adap)) {
+ dev_err(cec->dev, "cec adapter allocation failed\n");
+ return ERR_CAST(cec->adap);
+ }
+
+ /* override the module pointer */
+ cec->adap->owner = THIS_MODULE;
+
+ ret = devm_add_action(cec->dev, hdmirx_cec_del, cec);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ERR_PTR(ret);
+ }
+
+ irq_set_status_flags(cec->irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(cec->dev, cec->irq,
+ hdmirx_cec_hardirq,
+ hdmirx_cec_thread, IRQF_ONESHOT,
+ "rk_hdmirx_cec", cec->adap);
+ if (ret) {
+ dev_err(cec->dev, "cec irq request failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = cec_register_adapter(cec->adap, cec->dev);
+ if (ret < 0) {
+ dev_err(cec->dev, "cec adapter registration failed\n");
+ return ERR_PTR(ret);
+ }
+
+ irqs = CECTX_LINE_ERR | CECTX_NACK | CECRX_EOM | CECTX_DONE;
+ hdmirx_cec_write(cec, CEC_INT_MASK_N, irqs);
+
+ /*
+ * CEC documentation says we must not call cec_delete_adapter
+ * after a successful call to cec_register_adapter().
+ */
+ devm_remove_action(cec->dev, hdmirx_cec_del, cec);
+
+ enable_irq(cec->irq);
+
+ return cec;
+}
+
+void snps_hdmirx_cec_unregister(struct hdmirx_cec *cec)
+{
+ disable_irq(cec->irq);
+
+ cec_unregister_adapter(cec->adap);
+}
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h
new file mode 100644
index 000000000000..1b10da5b8fd4
--- /dev/null
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx_cec.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Shunqing Chen <csq@rock-chips.com>
+ */
+
+#ifndef DW_HDMI_RX_CEC_H
+#define DW_HDMI_RX_CEC_H
+
+struct snps_hdmirx_dev;
+
+struct hdmirx_cec_ops {
+ void (*write)(struct snps_hdmirx_dev *hdmirx_dev, int reg, u32 val);
+ u32 (*read)(struct snps_hdmirx_dev *hdmirx_dev, int reg);
+ void (*enable)(struct snps_hdmirx_dev *hdmirx);
+ void (*disable)(struct snps_hdmirx_dev *hdmirx);
+};
+
+struct hdmirx_cec_data {
+ struct snps_hdmirx_dev *hdmirx;
+ const struct hdmirx_cec_ops *ops;
+ struct device *dev;
+ int irq;
+};
+
+struct hdmirx_cec {
+ struct snps_hdmirx_dev *hdmirx;
+ struct device *dev;
+ const struct hdmirx_cec_ops *ops;
+ u32 addresses;
+ struct cec_adapter *adap;
+ struct cec_msg rx_msg;
+ unsigned int tx_status;
+ bool tx_done;
+ bool rx_done;
+ int irq;
+};
+
+struct hdmirx_cec *snps_hdmirx_cec_register(struct hdmirx_cec_data *data);
+void snps_hdmirx_cec_unregister(struct hdmirx_cec *cec);
+
+#endif /* DW_HDMI_RX_CEC_H */
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 42dfe08b765f..9cc875665695 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -65,7 +65,8 @@ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
bpp = fmtinfo->bpp;
- freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes);
+ freq = v4l2_get_link_freq(&phy->source->entity.pads[phy->source_pad],
+ bpp, 2 * num_lanes);
if (freq < 0) {
phy_err(phy, "failed to get link freq for subdev '%s'\n",
phy->source->name);
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 4bd2092e0255..6cb3e5f49686 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -798,7 +798,6 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
return 0;
}
- phy->source = subdev;
phy_dbg(1, phy, "Using source %s for capture\n", subdev->name);
pad = media_entity_get_fwnode_pad(&subdev->entity,
@@ -820,6 +819,9 @@ static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
return ret;
}
+ phy->source = subdev;
+ phy->source_pad = pad;
+
return 0;
}
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index 0856297adc0b..72a246a64d9e 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -174,6 +174,7 @@ struct cal_camerarx {
struct device_node *source_ep_node;
struct device_node *source_node;
struct v4l2_subdev *source;
+ unsigned int source_pad;
struct v4l2_subdev subdev;
struct media_pad pads[CAL_CAMERARX_NUM_PADS];
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index 405ca215179d..f51cf6119e97 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1475,43 +1475,6 @@ void omap3isp_put(struct isp_device *isp)
* Platform device driver
*/
-/*
- * omap3isp_print_status - Prints the values of the ISP Control Module registers
- * @isp: OMAP3 ISP device
- */
-#define ISP_PRINT_REGISTER(isp, name)\
- dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \
- isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name))
-#define SBL_PRINT_REGISTER(isp, name)\
- dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \
- isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name))
-
-void omap3isp_print_status(struct isp_device *isp)
-{
- dev_dbg(isp->dev, "-------------ISP Register dump--------------\n");
-
- ISP_PRINT_REGISTER(isp, SYSCONFIG);
- ISP_PRINT_REGISTER(isp, SYSSTATUS);
- ISP_PRINT_REGISTER(isp, IRQ0ENABLE);
- ISP_PRINT_REGISTER(isp, IRQ0STATUS);
- ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY);
- ISP_PRINT_REGISTER(isp, CTRL);
- ISP_PRINT_REGISTER(isp, TCTRL_CTRL);
- ISP_PRINT_REGISTER(isp, TCTRL_FRAME);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY);
- ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH);
- ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH);
-
- SBL_PRINT_REGISTER(isp, PCR);
- SBL_PRINT_REGISTER(isp, SDR_REQ_EXP);
-
- dev_dbg(isp->dev, "--------------------------------------------\n");
-}
-
#ifdef CONFIG_PM
/*
@@ -1961,6 +1924,13 @@ static int isp_attach_iommu(struct isp_device *isp)
struct dma_iommu_mapping *mapping;
int ret;
+ /* We always want to replace any default mapping from the arch code */
+ mapping = to_dma_iommu_mapping(isp->dev);
+ if (mapping) {
+ arm_iommu_detach_device(isp->dev);
+ arm_iommu_release_mapping(mapping);
+ }
+
/*
* Create the ARM mapping, used by the ARM DMA mapping core to allocate
* VAs. This will allocate a corresponding IOMMU domain.
@@ -2272,18 +2242,14 @@ static int isp_probe(struct platform_device *pdev)
if (ret)
goto error_release_isp;
- isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "syscon");
+ isp->syscon = syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "syscon", 1,
+ &isp->syscon_offset);
if (IS_ERR(isp->syscon)) {
ret = PTR_ERR(isp->syscon);
goto error_release_isp;
}
- ret = of_property_read_u32_index(pdev->dev.of_node,
- "syscon", 1, &isp->syscon_offset);
- if (ret)
- goto error_release_isp;
-
isp->autoidle = autoidle;
mutex_init(&isp->isp_mutex);
diff --git a/drivers/media/platform/ti/omap3isp/isp.h b/drivers/media/platform/ti/omap3isp/isp.h
index b4793631ad97..60acf3401ac9 100644
--- a/drivers/media/platform/ti/omap3isp/isp.h
+++ b/drivers/media/platform/ti/omap3isp/isp.h
@@ -260,8 +260,6 @@ void omap3isp_configure_bridge(struct isp_device *isp,
struct isp_device *omap3isp_get(struct isp_device *isp);
void omap3isp_put(struct isp_device *isp);
-void omap3isp_print_status(struct isp_device *isp);
-
void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res);
void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
index 85a44143b378..0e212198dd65 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -518,6 +518,7 @@ static void set_buffers(struct hantro_ctx *ctx)
hantro_reg_write(vpu, &g2_stream_len, src_len);
hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
hantro_reg_write(vpu, &g2_strm_start_offset, 0);
+ hantro_reg_write(vpu, &g2_start_bit, 0);
hantro_reg_write(vpu, &g2_write_mvs_e, 1);
hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
index 342e543dee4c..82a478ac645e 100644
--- a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
@@ -776,15 +776,15 @@ config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_para
struct vb2_v4l2_buffer *vb2_src)
{
dma_addr_t stream_base, tmp_addr;
- unsigned int headres_size;
+ unsigned int headers_size;
u32 src_len, start_bit, src_buf_len;
- headres_size = dec_params->uncompressed_header_size
+ headers_size = dec_params->uncompressed_header_size
+ dec_params->compressed_header_size;
stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
- tmp_addr = stream_base + headres_size;
+ tmp_addr = stream_base + headers_size;
if (ctx->dev->variant->legacy_regs)
hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
else
@@ -794,7 +794,7 @@ config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_para
hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
- src_len += start_bit / 8 - headres_size;
+ src_len += start_bit / 8 - headers_size;
hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
if (!ctx->dev->variant->legacy_regs) {
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index cb93711ea3e3..7deec6e37edc 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -722,7 +722,6 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
format = xvip_of_get_format(port);
if (IS_ERR(format)) {
dev_err(dev, "invalid format in DT");
- of_node_put(port);
return PTR_ERR(format);
}
@@ -731,7 +730,6 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
xtpg->vip_format = format;
} else if (xtpg->vip_format != format) {
dev_err(dev, "in/out format mismatch in DT");
- of_node_put(port);
return -EINVAL;
}
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 4909c337b027..d989c0b3966f 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -2,7 +2,7 @@
/*
* radio-aztech.c - Aztech radio card driver
*
- * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@xs4all.nl>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Adapted to support the Video for Linux API by
* Russell Kroll <rkroll@exploits.org>. Based on original tuner code by:
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 511a8ede05ec..f55217ccf2b8 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@ static inline struct wl1273_device *to_radio(struct v4l2_ctrl *ctrl)
return container_of(ctrl->handler, struct wl1273_device, ctrl_handler);
}
-static int wl1273_fm_vidioc_s_ctrl(struct v4l2_ctrl *ctrl)
+static int wl1273_fm_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct wl1273_device *radio = to_radio(ctrl);
struct wl1273_core *core = radio->core;
@@ -1945,7 +1945,7 @@ static void wl1273_vdev_release(struct video_device *dev)
}
static const struct v4l2_ctrl_ops wl1273_ctrl_ops = {
- .s_ctrl = wl1273_fm_vidioc_s_ctrl,
+ .s_ctrl = wl1273_fm_s_ctrl,
.g_volatile_ctrl = wl1273_fm_g_volatile_ctrl,
};
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index 7fdf0d9edbfd..d04572627cdd 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_RC_MAP) += \
rc-rc6-mce.o \
rc-real-audio-220-32-keys.o \
rc-reddo.o \
+ rc-siemens-gigaset-rc20.o \
rc-snapstream-firefly.o \
rc-streamzap.o \
rc-su3000.o \
diff --git a/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c b/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c
new file mode 100644
index 000000000000..defc77932e10
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-siemens-gigaset-rc20.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* rc-siemens-gigaset-rc20.c - Keytable for the Siemens Gigaset RC 20 remote
+ *
+ * Copyright (c) 2025 by Michael Klein
+ */
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+static struct rc_map_table siemens_gigaset_rc20[] = {
+ { 0x1501, KEY_POWER },
+ { 0x1502, KEY_MUTE },
+ { 0x1503, KEY_NUMERIC_1 },
+ { 0x1504, KEY_NUMERIC_2 },
+ { 0x1505, KEY_NUMERIC_3 },
+ { 0x1506, KEY_NUMERIC_4 },
+ { 0x1507, KEY_NUMERIC_5 },
+ { 0x1508, KEY_NUMERIC_6 },
+ { 0x1509, KEY_NUMERIC_7 },
+ { 0x150a, KEY_NUMERIC_8 },
+ { 0x150b, KEY_NUMERIC_9 },
+ { 0x150c, KEY_NUMERIC_0 },
+ { 0x150d, KEY_UP },
+ { 0x150e, KEY_LEFT },
+ { 0x150f, KEY_OK },
+ { 0x1510, KEY_RIGHT },
+ { 0x1511, KEY_DOWN },
+ { 0x1512, KEY_SHUFFLE }, /* double-arrow */
+ { 0x1513, KEY_EXIT },
+ { 0x1514, KEY_RED },
+ { 0x1515, KEY_GREEN },
+ { 0x1516, KEY_YELLOW }, /* OPT */
+ { 0x1517, KEY_BLUE },
+ { 0x1518, KEY_MENU },
+ { 0x1519, KEY_TEXT },
+ { 0x151a, KEY_MODE }, /* TV/Radio */
+
+ { 0x1521, KEY_EPG },
+ { 0x1522, KEY_FAVORITES },
+ { 0x1523, KEY_CHANNELUP },
+ { 0x1524, KEY_CHANNELDOWN },
+ { 0x1525, KEY_VOLUMEUP },
+ { 0x1526, KEY_VOLUMEDOWN },
+ { 0x1527, KEY_INFO },
+};
+
+static struct rc_map_list siemens_gigaset_rc20_map = {
+ .map = {
+ .scan = siemens_gigaset_rc20,
+ .size = ARRAY_SIZE(siemens_gigaset_rc20),
+ .rc_proto = RC_PROTO_RC5,
+ .name = RC_MAP_SIEMENS_GIGASET_RC20,
+ }
+};
+
+static int __init init_rc_map_siemens_gigaset_rc20(void)
+{
+ return rc_map_register(&siemens_gigaset_rc20_map);
+}
+
+static void __exit exit_rc_map_siemens_gigaset_rc20(void)
+{
+ rc_map_unregister(&siemens_gigaset_rc20_map);
+}
+
+module_init(init_rc_map_siemens_gigaset_rc20)
+module_exit(exit_rc_map_siemens_gigaset_rc20)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michael Klein");
+MODULE_DESCRIPTION("Siemens Gigaset RC20 remote keytable");
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index fe368aebbc13..84533fdd61aa 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -172,8 +172,7 @@ static int pwm_ir_probe(struct platform_device *pdev)
rcdev->tx_ir = pwm_ir_tx_sleep;
} else {
init_completion(&pwm_ir->tx_done);
- hrtimer_init(&pwm_ir->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pwm_ir->timer.function = pwm_ir_timer;
+ hrtimer_setup(&pwm_ir->timer, pwm_ir_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rcdev->tx_ir = pwm_ir_tx_atomic;
}
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 7df949fc65e2..4967d87ec4b7 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -85,8 +85,8 @@ struct ir_raw_event_ctrl {
struct rc6_dec {
int state;
u8 header;
- u32 body;
bool toggle;
+ u32 body;
unsigned count;
unsigned wanted_bits;
} rc6;
@@ -127,8 +127,8 @@ struct ir_raw_event_ctrl {
struct mce_kbd_dec {
/* locks key up timer */
spinlock_t keylock;
- struct timer_list rx_timeout;
int state;
+ struct timer_list rx_timeout;
u8 header;
u32 body;
unsigned count;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 9b209e687f25..d3b48a0dd1f4 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -138,39 +138,10 @@ static void sz_push_half_space(struct streamzap_ir *sz,
sz_push_full_space(sz, value & SZ_SPACE_MASK);
}
-/*
- * streamzap_callback - usb IRQ handler callback
- *
- * This procedure is invoked on reception of data from
- * the usb remote.
- */
-static void streamzap_callback(struct urb *urb)
+static void sz_process_ir_data(struct streamzap_ir *sz, int len)
{
- struct streamzap_ir *sz;
unsigned int i;
- int len;
- if (!urb)
- return;
-
- sz = urb->context;
- len = urb->actual_length;
-
- switch (urb->status) {
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /*
- * this urb is terminated, clean up.
- * sz might already be invalid at this point
- */
- dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
- return;
- default:
- break;
- }
-
- dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
for (i = 0; i < len; i++) {
dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
i, (unsigned char)sz->buf_in[i]);
@@ -219,6 +190,43 @@ static void streamzap_callback(struct urb *urb)
}
ir_raw_event_handle(sz->rdev);
+}
+
+/*
+ * streamzap_callback - usb IRQ handler callback
+ *
+ * This procedure is invoked on reception of data from
+ * the usb remote.
+ */
+static void streamzap_callback(struct urb *urb)
+{
+ struct streamzap_ir *sz;
+ int len;
+
+ if (!urb)
+ return;
+
+ sz = urb->context;
+ len = urb->actual_length;
+
+ switch (urb->status) {
+ case 0:
+ dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
+ sz_process_ir_data(sz, len);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /*
+ * this urb is terminated, clean up.
+ * sz might already be invalid at this point
+ */
+ dev_err(sz->dev, "urb terminated, status: %d\n", urb->status);
+ return;
+ default:
+ break;
+ }
+
usb_submit_urb(urb, GFP_ATOMIC);
}
@@ -385,8 +393,8 @@ static void streamzap_disconnect(struct usb_interface *interface)
if (!sz)
return;
- rc_unregister_device(sz->rdev);
usb_kill_urb(sz->urb_in);
+ rc_unregister_device(sz->rdev);
usb_free_urb(sz->urb_in);
usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in);
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 6c24dcf27eb0..0fe97e208c02 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -1314,9 +1314,6 @@ static int vim2m_probe(struct platform_device *pdev)
vfd->v4l2_dev = &dev->v4l2_dev;
video_set_drvdata(vfd, dev);
- v4l2_info(&dev->v4l2_dev,
- "Device registered as /dev/video%d\n", vfd->num);
-
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1343,6 +1340,9 @@ static int vim2m_probe(struct platform_device *pdev)
goto error_m2m;
}
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
diff --git a/drivers/media/test-drivers/vimc/vimc-streamer.c b/drivers/media/test-drivers/vimc/vimc-streamer.c
index 807551a5143b..15d863f97cbf 100644
--- a/drivers/media/test-drivers/vimc/vimc-streamer.c
+++ b/drivers/media/test-drivers/vimc/vimc-streamer.c
@@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
continue;
sd = media_entity_to_v4l2_subdev(ved->ent);
+ /*
+ * Do not call .s_stream() to stop an already
+ * stopped/unstarted subdev.
+ */
+ if (!v4l2_subdev_is_streaming(sd))
+ continue;
v4l2_subdev_call(sd, video, s_stream, 0);
}
}
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 01c964ea6f76..5bf3136b36eb 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -161,9 +161,15 @@ static const struct visl_ctrl_desc visl_h264_ctrl_descs[] = {
},
{
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .cfg.min = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
+ .cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .cfg.def = V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_START_CODE,
+ .cfg.min = V4L2_STATELESS_H264_START_CODE_NONE,
+ .cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .cfg.def = V4L2_STATELESS_H264_START_CODE_NONE,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
@@ -198,9 +204,15 @@ static const struct visl_ctrl_desc visl_hevc_ctrl_descs[] = {
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .cfg.min = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
+ .cfg.max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .cfg.def = V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .cfg.min = V4L2_STATELESS_HEVC_START_CODE_NONE,
+ .cfg.max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .cfg.def = V4L2_STATELESS_HEVC_START_CODE_NONE,
},
{
.cfg.id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig
index ec2e71d76965..e95edc0f22bf 100644
--- a/drivers/media/test-drivers/vivid/Kconfig
+++ b/drivers/media/test-drivers/vivid/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_VIVID
tristate "Virtual Video Test Driver"
- depends on VIDEO_DEV && !SPARC32 && !SPARC64 && FB
+ depends on VIDEO_DEV && !SPARC32 && !SPARC64
depends on HAS_DMA
- select FB_IOMEM_HELPERS
select FONT_SUPPORT
select FONT_8x16
select VIDEOBUF2_VMALLOC
@@ -31,6 +30,15 @@ config VIDEO_VIVID_CEC
When selected the vivid module will emulate the optional
HDMI CEC feature.
+config VIDEO_VIVID_OSD
+ bool "Enable Framebuffer for testing Output Overlay"
+ depends on VIDEO_VIVID && FB
+ default y
+ select FB_IOMEM_HELPERS
+ help
+ When selected the vivid module will emulate a Framebuffer for
+ testing Output Overlay.
+
config VIDEO_VIVID_MAX_DEVS
int "Maximum number of devices"
depends on VIDEO_VIVID
diff --git a/drivers/media/test-drivers/vivid/Makefile b/drivers/media/test-drivers/vivid/Makefile
index b12ad0152a3e..284a59e97335 100644
--- a/drivers/media/test-drivers/vivid/Makefile
+++ b/drivers/media/test-drivers/vivid/Makefile
@@ -3,10 +3,13 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o vivid-meta-cap.o vivid-meta-out.o \
+ vivid-meta-cap.o vivid-meta-out.o \
vivid-kthread-touch.o vivid-touch-cap.o
ifeq ($(CONFIG_VIDEO_VIVID_CEC),y)
vivid-objs += vivid-cec.o
endif
+ifeq ($(CONFIG_VIDEO_VIVID_OSD),y)
+ vivid-objs += vivid-osd.o
+endif
obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 7477ac8cb955..8d56168c72aa 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -125,7 +125,9 @@ MODULE_PARM_DESC(node_types, " node types, default is 0xe1d3d. Bitmask with the
"\t\t bit 8: Video Output node\n"
"\t\t bit 10-11: VBI Output node: 0 = none, 1 = raw vbi, 2 = sliced vbi, 3 = both\n"
"\t\t bit 12: Radio Transmitter node\n"
+#ifdef CONFIG_VIDEO_VIVID_OSD
"\t\t bit 16: Framebuffer for testing output overlays\n"
+#endif
"\t\t bit 17: Metadata Capture node\n"
"\t\t bit 18: Metadata Output node\n"
"\t\t bit 19: Touch Capture node\n");
@@ -1071,9 +1073,11 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst,
/* do we have a modulator? */
*has_modulator = dev->has_radio_tx;
+#ifdef CONFIG_VIDEO_VIVID_OSD
if (dev->has_vid_cap)
/* do we have a framebuffer for overlay testing? */
dev->has_fb = node_type & 0x10000;
+#endif
/* can we do crop/compose/scaling while capturing? */
if (no_error_inj && *ccs_cap == -1)
@@ -1410,8 +1414,6 @@ static int vivid_create_queues(struct vivid_dev *dev)
ret = vivid_fb_init(dev);
if (ret)
return ret;
- v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
- dev->fb_info.node);
}
return 0;
}
@@ -2197,12 +2199,8 @@ static void vivid_remove(struct platform_device *pdev)
video_device_node_name(&dev->radio_tx_dev));
video_unregister_device(&dev->radio_tx_dev);
}
- if (dev->has_fb) {
- v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n",
- dev->fb_info.node);
- unregister_framebuffer(&dev->fb_info);
- vivid_fb_release_buffers(dev);
- }
+ if (dev->has_fb)
+ vivid_fb_deinit(dev);
if (dev->has_meta_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->meta_cap_dev));
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index d2d52763b119..571a6c222969 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -403,9 +403,11 @@ struct vivid_dev {
int display_byte_stride;
int bits_per_pixel;
int bytes_per_pixel;
+#ifdef CONFIG_VIDEO_VIVID_OSD
struct fb_info fb_info;
struct fb_var_screeninfo fb_defined;
struct fb_fix_screeninfo fb_fix;
+#endif
/* Error injection */
bool disconnect_error;
diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c
index 2b5c8fbcd0a2..e340df0b6261 100644
--- a/drivers/media/test-drivers/vivid/vivid-ctrls.c
+++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c
@@ -37,6 +37,7 @@
#define VIVID_CID_U8_PIXEL_ARRAY (VIVID_CID_CUSTOM_BASE + 14)
#define VIVID_CID_S32_ARRAY (VIVID_CID_CUSTOM_BASE + 15)
#define VIVID_CID_S64_ARRAY (VIVID_CID_CUSTOM_BASE + 16)
+#define VIVID_CID_RECT (VIVID_CID_CUSTOM_BASE + 17)
#define VIVID_CID_VIVID_BASE (0x00f00000 | 0xf000)
#define VIVID_CID_VIVID_CLASS (0x00f00000 | 1)
@@ -360,6 +361,38 @@ static const struct v4l2_ctrl_config vivid_ctrl_ro_int32 = {
.step = 1,
};
+static const struct v4l2_rect rect_def = {
+ .top = 100,
+ .left = 200,
+ .width = 300,
+ .height = 400,
+};
+
+static const struct v4l2_rect rect_min = {
+ .top = 0,
+ .left = 0,
+ .width = 1,
+ .height = 1,
+};
+
+static const struct v4l2_rect rect_max = {
+ .top = 0,
+ .left = 0,
+ .width = 1000,
+ .height = 2000,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_rect = {
+ .ops = &vivid_user_gen_ctrl_ops,
+ .id = VIVID_CID_RECT,
+ .name = "Rect",
+ .type = V4L2_CTRL_TYPE_RECT,
+ .flags = V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX,
+ .p_def.p_const = &rect_def,
+ .p_min.p_const = &rect_min,
+ .p_max.p_const = &rect_max,
+};
+
/* Framebuffer Controls */
static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -369,7 +402,7 @@ static int vivid_fb_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case VIVID_CID_CLEAR_FB:
- vivid_clear_fb(dev);
+ vivid_fb_clear(dev);
break;
}
return 0;
@@ -1685,6 +1718,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
dev->int_menu = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_int_menu, NULL);
dev->ro_int32 = v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_ro_int32, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_area, NULL);
+ v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_rect, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u32_dyn_array, NULL);
v4l2_ctrl_new_custom(hdl_user_gen, &vivid_ctrl_u16_matrix, NULL);
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
index 669bd96da4c7..273e8ed8c2a9 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c
@@ -789,9 +789,14 @@ static int vivid_thread_vid_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
index fac6208b51da..015a7b166a1e 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c
@@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Video Output Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
index fa711ee36a3f..c862689786b6 100644
--- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
+++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c
@@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "Touch Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-osd.c b/drivers/media/test-drivers/vivid/vivid-osd.c
index 5c931b94a7b5..91ad9b314f2e 100644
--- a/drivers/media/test-drivers/vivid/vivid-osd.c
+++ b/drivers/media/test-drivers/vivid/vivid-osd.c
@@ -45,13 +45,18 @@ static const u16 rgb565[16] = {
0xffff, 0xffe0, 0x07ff, 0x07e0, 0xf81f, 0xf800, 0x001f, 0x0000
};
-void vivid_clear_fb(struct vivid_dev *dev)
+unsigned int vivid_fb_green_bits(struct vivid_dev *dev)
+{
+ return dev->fb_defined.green.length;
+}
+
+void vivid_fb_clear(struct vivid_dev *dev)
{
void *p = dev->video_vbase;
const u16 *rgb = rgb555;
unsigned x, y;
- if (dev->fb_defined.green.length == 6)
+ if (vivid_fb_green_bits(dev) == 6)
rgb = rgb565;
for (y = 0; y < dev->display_height; y++) {
@@ -333,7 +338,7 @@ static int vivid_fb_init_vidmode(struct vivid_dev *dev)
}
/* Release any memory we've grabbed */
-void vivid_fb_release_buffers(struct vivid_dev *dev)
+static void vivid_fb_release_buffers(struct vivid_dev *dev)
{
if (dev->video_vbase == NULL)
return;
@@ -370,7 +375,7 @@ int vivid_fb_init(struct vivid_dev *dev)
return ret;
}
- vivid_clear_fb(dev);
+ vivid_fb_clear(dev);
/* Register the framebuffer */
if (register_framebuffer(&dev->fb_info) < 0) {
@@ -380,6 +385,17 @@ int vivid_fb_init(struct vivid_dev *dev)
/* Set the card to the requested mode */
vivid_fb_set_par(&dev->fb_info);
+
+ v4l2_info(&dev->v4l2_dev, "Framebuffer device registered as fb%d\n",
+ dev->fb_info.node);
+
return 0;
}
+
+void vivid_fb_deinit(struct vivid_dev *dev)
+{
+ v4l2_info(&dev->v4l2_dev, "unregistering fb%d\n", dev->fb_info.node);
+ unregister_framebuffer(&dev->fb_info);
+ vivid_fb_release_buffers(dev);
+}
diff --git a/drivers/media/test-drivers/vivid/vivid-osd.h b/drivers/media/test-drivers/vivid/vivid-osd.h
index f9ac1af25dd3..c52280ebcd03 100644
--- a/drivers/media/test-drivers/vivid/vivid-osd.h
+++ b/drivers/media/test-drivers/vivid/vivid-osd.h
@@ -8,8 +8,23 @@
#ifndef _VIVID_OSD_H_
#define _VIVID_OSD_H_
+#ifdef CONFIG_VIDEO_VIVID_OSD
int vivid_fb_init(struct vivid_dev *dev);
-void vivid_fb_release_buffers(struct vivid_dev *dev);
-void vivid_clear_fb(struct vivid_dev *dev);
+void vivid_fb_deinit(struct vivid_dev *dev);
+void vivid_fb_clear(struct vivid_dev *dev);
+unsigned int vivid_fb_green_bits(struct vivid_dev *dev);
+#else
+static inline int vivid_fb_init(struct vivid_dev *dev)
+{
+ return -ENODEV;
+}
+
+static inline void vivid_fb_deinit(struct vivid_dev *dev) {}
+static inline void vivid_fb_clear(struct vivid_dev *dev) {}
+static inline unsigned int vivid_fb_green_bits(struct vivid_dev *dev)
+{
+ return 5;
+}
+#endif
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index 74a91d28c8be..c633fc2ed664 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data)
next_jiffies_since_start = jiffies_since_start;
wait_jiffies = next_jiffies_since_start - jiffies_since_start;
- while (time_is_after_jiffies(cur_jiffies + wait_jiffies) &&
- !kthread_should_stop())
- schedule();
+ if (!time_is_after_jiffies(cur_jiffies + wait_jiffies))
+ continue;
+
+ wait_queue_head_t wait;
+
+ init_waitqueue_head(&wait);
+ wait_event_interruptible_timeout(wait, kthread_should_stop(),
+ cur_jiffies + wait_jiffies - jiffies);
}
dprintk(dev, 1, "SDR Capture Thread End\n");
return 0;
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index 5ec84db934d6..c3398bce6c15 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -16,6 +16,7 @@
#include <media/v4l2-rect.h>
#include "vivid-core.h"
+#include "vivid-osd.h"
#include "vivid-vid-common.h"
#include "vivid-kthread-out.h"
#include "vivid-vid-out.h"
@@ -907,7 +908,7 @@ int vivid_vid_out_g_fbuf(struct file *file, void *fh,
a->base = (void *)dev->video_pbase;
a->fmt.width = dev->display_width;
a->fmt.height = dev->display_height;
- if (dev->fb_defined.green.length == 5)
+ if (vivid_fb_green_bits(dev) == 5)
a->fmt.pixelformat = V4L2_PIX_FMT_ARGB555;
else
a->fmt.pixelformat = V4L2_PIX_FMT_RGB565;
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 8fb186b25d6a..b52cd8bd07dd 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -112,7 +112,7 @@ struct tuner_simple_priv {
struct list_head hybrid_tuner_instance_list;
unsigned int type;
- struct tunertype *tun;
+ const struct tunertype *tun;
u32 frequency;
u32 bandwidth;
@@ -232,11 +232,11 @@ static inline char *tuner_param_name(enum param_type type)
return name;
}
-static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
- enum param_type desired_type)
+static const struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
+ enum param_type desired_type)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
- struct tunertype *tun = priv->tun;
+ const struct tunertype *tun = priv->tun;
int i;
for (i = 0; i < tun->count; i++)
@@ -257,7 +257,7 @@ static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe,
}
static int simple_config_lookup(struct dvb_frontend *fe,
- struct tuner_params *t_params,
+ const struct tuner_params *t_params,
unsigned *frequency, u8 *config, u8 *cb)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
@@ -549,7 +549,7 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
u8 buffer[4];
int rc, IFPCoff, i;
enum param_type desired_type;
- struct tuner_params *t_params;
+ const struct tuner_params *t_params;
/* IFPCoff = Video Intermediate Frequency - Vif:
940 =16*58.75 NTSC/J (Japan)
@@ -664,12 +664,12 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
static int simple_set_radio_freq(struct dvb_frontend *fe,
struct analog_parameters *params)
{
- struct tunertype *tun;
+ const struct tunertype *tun;
struct tuner_simple_priv *priv = fe->tuner_priv;
u8 buffer[4];
u16 div;
int rc, j;
- struct tuner_params *t_params;
+ const struct tuner_params *t_params;
unsigned int freq = params->frequency;
bool mono = params->audmode == V4L2_TUNER_MODE_MONO;
@@ -848,8 +848,8 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
{
/* This function returns the tuned frequency on success, 0 on error */
struct tuner_simple_priv *priv = fe->tuner_priv;
- struct tunertype *tun = priv->tun;
- struct tuner_params *t_params;
+ const struct tunertype *tun = priv->tun;
+ const struct tuner_params *t_params;
u8 config, cb;
u32 div;
int ret;
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index c26f1296e18f..0716cc028212 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -61,13 +61,13 @@ static u8 tua603x_agc112[] = { 2, 0x80|0x40|0x18|0x04|0x01, 0x80|0x20 };
/* 0-9 */
/* ------------ TUNER_TEMIC_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_pal_ranges[] = {
+static const struct tuner_range tuner_temic_pal_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_pal_params[] = {
+static const struct tuner_params tuner_temic_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_ranges,
@@ -77,13 +77,13 @@ static struct tuner_params tuner_temic_pal_params[] = {
/* ------------ TUNER_PHILIPS_PAL_I - Philips PAL_I ------------ */
-static struct tuner_range tuner_philips_pal_i_ranges[] = {
+static const struct tuner_range tuner_philips_pal_i_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_pal_i_params[] = {
+static const struct tuner_params tuner_philips_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_i_ranges,
@@ -93,13 +93,13 @@ static struct tuner_params tuner_philips_pal_i_params[] = {
/* ------------ TUNER_PHILIPS_NTSC - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 451.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_ntsc_params[] = {
+static const struct tuner_params tuner_philips_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_ranges,
@@ -110,13 +110,13 @@ static struct tuner_params tuner_philips_ntsc_params[] = {
/* ------------ TUNER_PHILIPS_SECAM - Philips SECAM ------------ */
-static struct tuner_range tuner_philips_secam_ranges[] = {
+static const struct tuner_range tuner_philips_secam_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa7, },
{ 16 * 447.25 /*MHz*/, 0x8e, 0x97, },
{ 16 * 999.99 , 0x8e, 0x37, },
};
-static struct tuner_params tuner_philips_secam_params[] = {
+static const struct tuner_params tuner_philips_secam_params[] = {
{
.type = TUNER_PARAM_TYPE_SECAM,
.ranges = tuner_philips_secam_ranges,
@@ -127,13 +127,13 @@ static struct tuner_params tuner_philips_secam_params[] = {
/* ------------ TUNER_PHILIPS_PAL - Philips PAL ------------ */
-static struct tuner_range tuner_philips_pal_ranges[] = {
+static const struct tuner_range tuner_philips_pal_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 447.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_pal_params[] = {
+static const struct tuner_params tuner_philips_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_ranges,
@@ -144,13 +144,13 @@ static struct tuner_params tuner_philips_pal_params[] = {
/* ------------ TUNER_TEMIC_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_ntsc_params[] = {
+static const struct tuner_params tuner_temic_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_ntsc_ranges,
@@ -160,13 +160,13 @@ static struct tuner_params tuner_temic_ntsc_params[] = {
/* ------------ TUNER_TEMIC_PAL_I - TEMIC PAL_I ------------ */
-static struct tuner_range tuner_temic_pal_i_ranges[] = {
+static const struct tuner_range tuner_temic_pal_i_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x04, },
{ 16 * 999.99 , 0x8e, 0x01, },
};
-static struct tuner_params tuner_temic_pal_i_params[] = {
+static const struct tuner_params tuner_temic_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_i_ranges,
@@ -176,13 +176,13 @@ static struct tuner_params tuner_temic_pal_i_params[] = {
/* ------------ TUNER_TEMIC_4036FY5_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
+static const struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4036fy5_ntsc_ranges,
@@ -192,13 +192,13 @@ static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = {
/* ------------ TUNER_ALPS_TSBH1_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_alps_tsb_1_ranges[] = {
+static const struct tuner_range tuner_alps_tsb_1_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 385.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
+static const struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_alps_tsb_1_ranges,
@@ -209,7 +209,7 @@ static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = {
/* 10-19 */
/* ------------ TUNER_ALPS_TSBE1_PAL - TEMIC PAL ------------ */
-static struct tuner_params tuner_alps_tsb_1_params[] = {
+static const struct tuner_params tuner_alps_tsb_1_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_1_ranges,
@@ -219,13 +219,13 @@ static struct tuner_params tuner_alps_tsb_1_params[] = {
/* ------------ TUNER_ALPS_TSBB5_PAL_I - Alps PAL_I ------------ */
-static struct tuner_range tuner_alps_tsb_5_pal_ranges[] = {
+static const struct tuner_range tuner_alps_tsb_5_pal_ranges[] = {
{ 16 * 133.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 351.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_alps_tsbb5_params[] = {
+static const struct tuner_params tuner_alps_tsbb5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -235,7 +235,7 @@ static struct tuner_params tuner_alps_tsbb5_params[] = {
/* ------------ TUNER_ALPS_TSBE5_PAL - Alps PAL ------------ */
-static struct tuner_params tuner_alps_tsbe5_params[] = {
+static const struct tuner_params tuner_alps_tsbe5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -245,7 +245,7 @@ static struct tuner_params tuner_alps_tsbe5_params[] = {
/* ------------ TUNER_ALPS_TSBC5_PAL - Alps PAL ------------ */
-static struct tuner_params tuner_alps_tsbc5_params[] = {
+static const struct tuner_params tuner_alps_tsbc5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_alps_tsb_5_pal_ranges,
@@ -255,13 +255,13 @@ static struct tuner_params tuner_alps_tsbc5_params[] = {
/* ------------ TUNER_TEMIC_4006FH5_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_lg_pal_ranges[] = {
+static const struct tuner_range tuner_lg_pal_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4006fh5_params[] = {
+static const struct tuner_params tuner_temic_4006fh5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -271,13 +271,13 @@ static struct tuner_params tuner_temic_4006fh5_params[] = {
/* ------------ TUNER_ALPS_TSHC6_NTSC - Alps NTSC ------------ */
-static struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = {
+static const struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x14, },
{ 16 * 385.25 /*MHz*/, 0x8e, 0x12, },
{ 16 * 999.99 , 0x8e, 0x11, },
};
-static struct tuner_params tuner_alps_tshc6_params[] = {
+static const struct tuner_params tuner_alps_tshc6_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_alps_tshc6_ntsc_ranges,
@@ -287,13 +287,13 @@ static struct tuner_params tuner_alps_tshc6_params[] = {
/* ------------ TUNER_TEMIC_PAL_DK - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_pal_dk_ranges[] = {
+static const struct tuner_range tuner_temic_pal_dk_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 456.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_pal_dk_params[] = {
+static const struct tuner_params tuner_temic_pal_dk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_dk_ranges,
@@ -303,13 +303,13 @@ static struct tuner_params tuner_temic_pal_dk_params[] = {
/* ------------ TUNER_PHILIPS_NTSC_M - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_ntsc_m_ranges[] = {
+static const struct tuner_range tuner_philips_ntsc_m_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_ntsc_m_params[] = {
+static const struct tuner_params tuner_philips_ntsc_m_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_m_ranges,
@@ -319,13 +319,13 @@ static struct tuner_params tuner_philips_ntsc_m_params[] = {
/* ------------ TUNER_TEMIC_4066FY5_PAL_I - TEMIC PAL_I ------------ */
-static struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = {
+static const struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = {
{ 16 * 169.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
+static const struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -335,7 +335,7 @@ static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = {
/* ------------ TUNER_TEMIC_4006FN5_MULTI_PAL - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
+static const struct tuner_params tuner_temic_4006fn5_multi_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -346,13 +346,13 @@ static struct tuner_params tuner_temic_4006fn5_multi_params[] = {
/* 20-29 */
/* ------------ TUNER_TEMIC_4009FR5_PAL - TEMIC PAL ------------ */
-static struct tuner_range tuner_temic_4009f_5_pal_ranges[] = {
+static const struct tuner_range tuner_temic_4009f_5_pal_ranges[] = {
{ 16 * 141.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 464.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4009f_5_params[] = {
+static const struct tuner_params tuner_temic_4009f_5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -362,13 +362,13 @@ static struct tuner_params tuner_temic_4009f_5_params[] = {
/* ------------ TUNER_TEMIC_4039FR5_NTSC - TEMIC NTSC ------------ */
-static struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = {
+static const struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 453.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_temic_4039fr5_params[] = {
+static const struct tuner_params tuner_temic_4039fr5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
@@ -378,7 +378,7 @@ static struct tuner_params tuner_temic_4039fr5_params[] = {
/* ------------ TUNER_TEMIC_4046FM5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4046fm5_params[] = {
+static const struct tuner_params tuner_temic_4046fm5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_40x6f_5_pal_ranges,
@@ -388,7 +388,7 @@ static struct tuner_params tuner_temic_4046fm5_params[] = {
/* ------------ TUNER_PHILIPS_PAL_DK - Philips PAL ------------ */
-static struct tuner_params tuner_philips_pal_dk_params[] = {
+static const struct tuner_params tuner_philips_pal_dk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -398,7 +398,7 @@ static struct tuner_params tuner_philips_pal_dk_params[] = {
/* ------------ TUNER_PHILIPS_FQ1216ME - Philips PAL ------------ */
-static struct tuner_params tuner_philips_fq1216me_params[] = {
+static const struct tuner_params tuner_philips_fq1216me_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -412,7 +412,7 @@ static struct tuner_params tuner_philips_fq1216me_params[] = {
/* ------------ TUNER_LG_PAL_I_FM - LGINNOTEK PAL_I ------------ */
-static struct tuner_params tuner_lg_pal_i_fm_params[] = {
+static const struct tuner_params tuner_lg_pal_i_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -422,7 +422,7 @@ static struct tuner_params tuner_lg_pal_i_fm_params[] = {
/* ------------ TUNER_LG_PAL_I - LGINNOTEK PAL_I ------------ */
-static struct tuner_params tuner_lg_pal_i_params[] = {
+static const struct tuner_params tuner_lg_pal_i_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -432,13 +432,13 @@ static struct tuner_params tuner_lg_pal_i_params[] = {
/* ------------ TUNER_LG_NTSC_FM - LGINNOTEK NTSC ------------ */
-static struct tuner_range tuner_lg_ntsc_fm_ranges[] = {
+static const struct tuner_range tuner_lg_ntsc_fm_ranges[] = {
{ 16 * 210.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 497.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_lg_ntsc_fm_params[] = {
+static const struct tuner_params tuner_lg_ntsc_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_ntsc_fm_ranges,
@@ -448,7 +448,7 @@ static struct tuner_params tuner_lg_ntsc_fm_params[] = {
/* ------------ TUNER_LG_PAL_FM - LGINNOTEK PAL ------------ */
-static struct tuner_params tuner_lg_pal_fm_params[] = {
+static const struct tuner_params tuner_lg_pal_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -458,7 +458,7 @@ static struct tuner_params tuner_lg_pal_fm_params[] = {
/* ------------ TUNER_LG_PAL - LGINNOTEK PAL ------------ */
-static struct tuner_params tuner_lg_pal_params[] = {
+static const struct tuner_params tuner_lg_pal_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_pal_ranges,
@@ -469,7 +469,7 @@ static struct tuner_params tuner_lg_pal_params[] = {
/* 30-39 */
/* ------------ TUNER_TEMIC_4009FN5_MULTI_PAL_FM - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
+static const struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -479,13 +479,13 @@ static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = {
/* ------------ TUNER_SHARP_2U5JF5540_NTSC - SHARP NTSC ------------ */
-static struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = {
+static const struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 317.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
+static const struct tuner_params tuner_sharp_2u5jf5540_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sharp_2u5jf5540_ntsc_ranges,
@@ -495,13 +495,13 @@ static struct tuner_params tuner_sharp_2u5jf5540_params[] = {
/* ------------ TUNER_Samsung_PAL_TCPM9091PD27 - Samsung PAL ------------ */
-static struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = {
+static const struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = {
{ 16 * 169 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 464 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
+static const struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_samsung_pal_tcpm9091pd27_ranges,
@@ -511,7 +511,7 @@ static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = {
/* ------------ TUNER_TEMIC_4106FH5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4106fh5_params[] = {
+static const struct tuner_params tuner_temic_4106fh5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -521,7 +521,7 @@ static struct tuner_params tuner_temic_4106fh5_params[] = {
/* ------------ TUNER_TEMIC_4012FY5 - TEMIC PAL ------------ */
-static struct tuner_params tuner_temic_4012fy5_params[] = {
+static const struct tuner_params tuner_temic_4012fy5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_pal_ranges,
@@ -531,7 +531,7 @@ static struct tuner_params tuner_temic_4012fy5_params[] = {
/* ------------ TUNER_TEMIC_4136FY5 - TEMIC NTSC ------------ */
-static struct tuner_params tuner_temic_4136_fy5_params[] = {
+static const struct tuner_params tuner_temic_4136_fy5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_temic_4x3x_f_5_ntsc_ranges,
@@ -541,13 +541,13 @@ static struct tuner_params tuner_temic_4136_fy5_params[] = {
/* ------------ TUNER_LG_PAL_NEW_TAPC - LGINNOTEK PAL ------------ */
-static struct tuner_range tuner_lg_new_tapc_ranges[] = {
+static const struct tuner_range tuner_lg_new_tapc_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
+static const struct tuner_params tuner_lg_pal_new_tapc_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_lg_new_tapc_ranges,
@@ -557,13 +557,13 @@ static struct tuner_params tuner_lg_pal_new_tapc_params[] = {
/* ------------ TUNER_PHILIPS_FM1216ME_MK3 - Philips PAL ------------ */
-static struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = {
+static const struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_fm1216me_mk3_params[] = {
+static const struct tuner_params tuner_fm1216me_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216me_mk3_pal_ranges,
@@ -582,13 +582,13 @@ static struct tuner_params tuner_fm1216me_mk3_params[] = {
/* ------------ TUNER_PHILIPS_FM1216MK5 - Philips PAL ------------ */
-static struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
+static const struct tuner_range tuner_fm1216mk5_pal_ranges[] = {
{ 16 * 158.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 441.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 864.00 , 0xce, 0x04, },
};
-static struct tuner_params tuner_fm1216mk5_params[] = {
+static const struct tuner_params tuner_fm1216mk5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216mk5_pal_ranges,
@@ -607,7 +607,7 @@ static struct tuner_params tuner_fm1216mk5_params[] = {
/* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */
-static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
+static const struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_new_tapc_ranges,
@@ -618,7 +618,7 @@ static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = {
/* 40-49 */
/* ------------ TUNER_HITACHI_NTSC - HITACHI NTSC ------------ */
-static struct tuner_params tuner_hitachi_ntsc_params[] = {
+static const struct tuner_params tuner_hitachi_ntsc_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_new_tapc_ranges,
@@ -628,13 +628,13 @@ static struct tuner_params tuner_hitachi_ntsc_params[] = {
/* ------------ TUNER_PHILIPS_PAL_MK - Philips PAL ------------ */
-static struct tuner_range tuner_philips_pal_mk_pal_ranges[] = {
+static const struct tuner_range tuner_philips_pal_mk_pal_ranges[] = {
{ 16 * 140.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0xc2, },
{ 16 * 999.99 , 0x8e, 0xcf, },
};
-static struct tuner_params tuner_philips_pal_mk_params[] = {
+static const struct tuner_params tuner_philips_pal_mk_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_pal_mk_pal_ranges,
@@ -644,19 +644,19 @@ static struct tuner_params tuner_philips_pal_mk_params[] = {
/* ---- TUNER_PHILIPS_FCV1236D - Philips FCV1236D (ATSC/NTSC) ---- */
-static struct tuner_range tuner_philips_fcv1236d_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_fcv1236d_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0xa2, },
{ 16 * 451.25 /*MHz*/, 0x8e, 0x92, },
{ 16 * 999.99 , 0x8e, 0x32, },
};
-static struct tuner_range tuner_philips_fcv1236d_atsc_ranges[] = {
+static const struct tuner_range tuner_philips_fcv1236d_atsc_ranges[] = {
{ 16 * 159.00 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 453.00 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_philips_fcv1236d_params[] = {
+static const struct tuner_params tuner_philips_fcv1236d_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_fcv1236d_ntsc_ranges,
@@ -672,13 +672,13 @@ static struct tuner_params tuner_philips_fcv1236d_params[] = {
/* ------------ TUNER_PHILIPS_FM1236_MK3 - Philips NTSC ------------ */
-static struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = {
+static const struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 442.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_fm1236_mk3_params[] = {
+static const struct tuner_params tuner_fm1236_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -693,7 +693,7 @@ static struct tuner_params tuner_fm1236_mk3_params[] = {
/* ------------ TUNER_PHILIPS_4IN1 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_4in1_params[] = {
+static const struct tuner_params tuner_philips_4in1_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -703,7 +703,7 @@ static struct tuner_params tuner_philips_4in1_params[] = {
/* ------------ TUNER_MICROTUNE_4049FM5 - Microtune PAL ------------ */
-static struct tuner_params tuner_microtune_4049_fm5_params[] = {
+static const struct tuner_params tuner_microtune_4049_fm5_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_temic_4009f_5_pal_ranges,
@@ -718,13 +718,13 @@ static struct tuner_params tuner_microtune_4049_fm5_params[] = {
/* ------------ TUNER_PANASONIC_VP27 - Panasonic NTSC ------------ */
-static struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = {
+static const struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 454.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x08, },
};
-static struct tuner_params tuner_panasonic_vp27_params[] = {
+static const struct tuner_params tuner_panasonic_vp27_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_panasonic_vp27_ntsc_ranges,
@@ -739,13 +739,13 @@ static struct tuner_params tuner_panasonic_vp27_params[] = {
/* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */
-static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
+static const struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = {
{ 16 * 161.25 /*MHz*/, 0x8e, 0xa0, },
{ 16 * 463.25 /*MHz*/, 0x8e, 0x90, },
{ 16 * 999.99 , 0x8e, 0x30, },
};
-static struct tuner_params tuner_tnf_8831bgff_params[] = {
+static const struct tuner_params tuner_tnf_8831bgff_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tnf_8831bgff_pal_ranges,
@@ -755,19 +755,19 @@ static struct tuner_params tuner_tnf_8831bgff_params[] = {
/* ------------ TUNER_MICROTUNE_4042FI5 - Microtune NTSC ------------ */
-static struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = {
+static const struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = {
{ 16 * 162.00 /*MHz*/, 0x8e, 0xa2, },
{ 16 * 457.00 /*MHz*/, 0x8e, 0x94, },
{ 16 * 999.99 , 0x8e, 0x31, },
};
-static struct tuner_range tuner_microtune_4042fi5_atsc_ranges[] = {
+static const struct tuner_range tuner_microtune_4042fi5_atsc_ranges[] = {
{ 16 * 162.00 /*MHz*/, 0x8e, 0xa1, },
{ 16 * 457.00 /*MHz*/, 0x8e, 0x91, },
{ 16 * 999.99 , 0x8e, 0x31, },
};
-static struct tuner_params tuner_microtune_4042fi5_params[] = {
+static const struct tuner_params tuner_microtune_4042fi5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_microtune_4042fi5_ntsc_ranges,
@@ -784,13 +784,13 @@ static struct tuner_params tuner_microtune_4042fi5_params[] = {
/* 50-59 */
/* ------------ TUNER_TCL_2002N - TCL NTSC ------------ */
-static struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = {
+static const struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = {
{ 16 * 172.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 448.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tcl_2002n_params[] = {
+static const struct tuner_params tuner_tcl_2002n_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tcl_2002n_ntsc_ranges,
@@ -801,7 +801,7 @@ static struct tuner_params tuner_tcl_2002n_params[] = {
/* ------------ TUNER_PHILIPS_FM1256_IH3 - Philips PAL ------------ */
-static struct tuner_params tuner_philips_fm1256_ih3_params[] = {
+static const struct tuner_params tuner_philips_fm1256_ih3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -813,13 +813,13 @@ static struct tuner_params tuner_philips_fm1256_ih3_params[] = {
/* ------------ TUNER_THOMSON_DTT7610 - THOMSON ATSC ------------ */
/* single range used for both ntsc and atsc */
-static struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0x8e, 0x39, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_params tuner_thomson_dtt7610_params[] = {
+static const struct tuner_params tuner_thomson_dtt7610_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_thomson_dtt7610_ntsc_ranges,
@@ -835,13 +835,13 @@ static struct tuner_params tuner_thomson_dtt7610_params[] = {
/* ------------ TUNER_PHILIPS_FQ1286 - Philips NTSC ------------ */
-static struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = {
+static const struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x41, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x42, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_philips_fq1286_params[] = {
+static const struct tuner_params tuner_philips_fq1286_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_fq1286_ntsc_ranges,
@@ -851,13 +851,13 @@ static struct tuner_params tuner_philips_fq1286_params[] = {
/* ------------ TUNER_TCL_2002MB - TCL PAL ------------ */
-static struct tuner_range tuner_tcl_2002mb_pal_ranges[] = {
+static const struct tuner_range tuner_tcl_2002mb_pal_ranges[] = {
{ 16 * 170.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 450.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x08, },
};
-static struct tuner_params tuner_tcl_2002mb_params[] = {
+static const struct tuner_params tuner_tcl_2002mb_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tcl_2002mb_pal_ranges,
@@ -867,13 +867,13 @@ static struct tuner_params tuner_tcl_2002mb_params[] = {
/* ------------ TUNER_PHILIPS_FQ1216AME_MK4 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = {
+static const struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xce, 0x01, },
{ 16 * 442.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x04, },
};
-static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
+static const struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fq12_6a___mk4_pal_ranges,
@@ -890,7 +890,7 @@ static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = {
/* ------------ TUNER_PHILIPS_FQ1236A_MK4 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
+static const struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -900,7 +900,7 @@ static struct tuner_params tuner_philips_fq1236a_mk4_params[] = {
/* ------------ TUNER_YMEC_TVF_8531MF - Philips NTSC ------------ */
-static struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
+static const struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_philips_ntsc_m_ranges,
@@ -910,13 +910,13 @@ static struct tuner_params tuner_ymec_tvf_8531mf_params[] = {
/* ------------ TUNER_YMEC_TVF_5533MF - Philips NTSC ------------ */
-static struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = {
+static const struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 454.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
+static const struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_ymec_tvf_5533mf_ntsc_ranges,
@@ -928,19 +928,19 @@ static struct tuner_params tuner_ymec_tvf_5533mf_params[] = {
/* ------------ TUNER_THOMSON_DTT761X - THOMSON ATSC ------------ */
/* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */
-static struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = {
{ 16 * 145.25 /*MHz*/, 0x8e, 0x39, },
{ 16 * 415.25 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_range tuner_thomson_dtt761x_atsc_ranges[] = {
+static const struct tuner_range tuner_thomson_dtt761x_atsc_ranges[] = {
{ 16 * 147.00 /*MHz*/, 0x8e, 0x39, },
{ 16 * 417.00 /*MHz*/, 0x8e, 0x3a, },
{ 16 * 999.99 , 0x8e, 0x3c, },
};
-static struct tuner_params tuner_thomson_dtt761x_params[] = {
+static const struct tuner_params tuner_thomson_dtt761x_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_thomson_dtt761x_ntsc_ranges,
@@ -959,13 +959,13 @@ static struct tuner_params tuner_thomson_dtt761x_params[] = {
/* ------------ TUNER_TENA_9533_DI - Philips PAL ------------ */
-static struct tuner_range tuner_tena_9533_di_pal_ranges[] = {
+static const struct tuner_range tuner_tena_9533_di_pal_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_tena_9533_di_params[] = {
+static const struct tuner_params tuner_tena_9533_di_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tena_9533_di_pal_ranges,
@@ -975,13 +975,13 @@ static struct tuner_params tuner_tena_9533_di_params[] = {
/* ------------ TUNER_TENA_TNF_5337 - Tena tnf5337MFD STD M/N ------------ */
-static struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = {
+static const struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = {
{ 16 * 166.25 /*MHz*/, 0x86, 0x01, },
{ 16 * 466.25 /*MHz*/, 0x86, 0x02, },
{ 16 * 999.99 , 0x86, 0x08, },
};
-static struct tuner_params tuner_tena_tnf_5337_params[] = {
+static const struct tuner_params tuner_tena_tnf_5337_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tena_tnf_5337_ntsc_ranges,
@@ -991,13 +991,13 @@ static struct tuner_params tuner_tena_tnf_5337_params[] = {
/* ------------ TUNER_PHILIPS_FMD1216ME(X)_MK3 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
+static const struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0x86, 0x51, },
{ 16 * 442.00 /*MHz*/, 0x86, 0x52, },
{ 16 * 999.99 , 0x86, 0x54, },
};
-static struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
+static const struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
{ 16 * 143.87 /*MHz*/, 0xbc, 0x41 },
{ 16 * 158.87 /*MHz*/, 0xf4, 0x41 },
{ 16 * 329.87 /*MHz*/, 0xbc, 0x42 },
@@ -1007,7 +1007,7 @@ static struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = {
{ 16 * 999.99 , 0xfc, 0x44 },
};
-static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
+static const struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
@@ -1027,7 +1027,7 @@ static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = {
},
};
-static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
+static const struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_fmd1216me_mk3_pal_ranges,
@@ -1051,19 +1051,19 @@ static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = {
/* ------ TUNER_LG_TDVS_H06XF - LG INNOTEK / INFINEON ATSC ----- */
-static struct tuner_range tuner_tua6034_ntsc_ranges[] = {
+static const struct tuner_range tuner_tua6034_ntsc_ranges[] = {
{ 16 * 165.00 /*MHz*/, 0x8e, 0x01 },
{ 16 * 450.00 /*MHz*/, 0x8e, 0x02 },
{ 16 * 999.99 , 0x8e, 0x04 },
};
-static struct tuner_range tuner_tua6034_atsc_ranges[] = {
+static const struct tuner_range tuner_tua6034_atsc_ranges[] = {
{ 16 * 165.00 /*MHz*/, 0xce, 0x01 },
{ 16 * 450.00 /*MHz*/, 0xce, 0x02 },
{ 16 * 999.99 , 0xce, 0x04 },
};
-static struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
+static const struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tua6034_ntsc_ranges,
@@ -1079,13 +1079,13 @@ static struct tuner_params tuner_lg_tdvs_h06xf_params[] = {
/* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */
-static struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
+static const struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 464.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
+static const struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_ymec_tvf66t5_b_dff_pal_ranges,
@@ -1095,19 +1095,19 @@ static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = {
/* ------------ TUNER_LG_NTSC_TALN_MINI - LGINNOTEK NTSC ------------ */
-static struct tuner_range tuner_lg_taln_ntsc_ranges[] = {
+static const struct tuner_range tuner_lg_taln_ntsc_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 373.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_range tuner_lg_taln_pal_secam_ranges[] = {
+static const struct tuner_range tuner_lg_taln_pal_secam_ranges[] = {
{ 16 * 150.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 425.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_lg_taln_params[] = {
+static const struct tuner_params tuner_lg_taln_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_lg_taln_ntsc_ranges,
@@ -1121,13 +1121,13 @@ static struct tuner_params tuner_lg_taln_params[] = {
/* ------------ TUNER_PHILIPS_TD1316 - Philips PAL ------------ */
-static struct tuner_range tuner_philips_td1316_pal_ranges[] = {
+static const struct tuner_range tuner_philips_td1316_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xc8, 0xa1, },
{ 16 * 442.00 /*MHz*/, 0xc8, 0xa2, },
{ 16 * 999.99 , 0xc8, 0xa4, },
};
-static struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
+static const struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
{ 16 * 93.834 /*MHz*/, 0xca, 0x60, },
{ 16 * 123.834 /*MHz*/, 0xca, 0xa0, },
{ 16 * 163.834 /*MHz*/, 0xca, 0xc0, },
@@ -1139,7 +1139,7 @@ static struct tuner_range tuner_philips_td1316_dvb_ranges[] = {
{ 16 * 999.999 , 0xca, 0xe0, },
};
-static struct tuner_params tuner_philips_td1316_params[] = {
+static const struct tuner_params tuner_philips_td1316_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_philips_td1316_pal_ranges,
@@ -1155,19 +1155,19 @@ static struct tuner_params tuner_philips_td1316_params[] = {
/* ------------ TUNER_PHILIPS_TUV1236D - Philips ATSC ------------ */
-static struct tuner_range tuner_tuv1236d_ntsc_ranges[] = {
+static const struct tuner_range tuner_tuv1236d_ntsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0xce, 0x01, },
{ 16 * 454.00 /*MHz*/, 0xce, 0x02, },
{ 16 * 999.99 , 0xce, 0x04, },
};
-static struct tuner_range tuner_tuv1236d_atsc_ranges[] = {
+static const struct tuner_range tuner_tuv1236d_atsc_ranges[] = {
{ 16 * 157.25 /*MHz*/, 0xc6, 0x41, },
{ 16 * 454.00 /*MHz*/, 0xc6, 0x42, },
{ 16 * 999.99 , 0xc6, 0x44, },
};
-static struct tuner_params tuner_tuv1236d_params[] = {
+static const struct tuner_params tuner_tuv1236d_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tuv1236d_ntsc_ranges,
@@ -1187,19 +1187,19 @@ static struct tuner_params tuner_tuv1236d_params[] = {
* models based on TI SN 761677 chip on both PAL and NTSC
*/
-static struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = {
+static const struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = {
{ 16 * 168.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 471.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = {
+static const struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = {
{ 16 * 169.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 469.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tnf_5335mf_params[] = {
+static const struct tuner_params tuner_tnf_5335mf_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tnf_5335mf_ntsc_ranges,
@@ -1216,13 +1216,13 @@ static struct tuner_params tuner_tnf_5335mf_params[] = {
/* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */
/* '+ 4' turns on the Low Noise Amplifier */
-static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
+static const struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = {
{ 16 * 130.00 /*MHz*/, 0xce, 0x01 + 4, },
{ 16 * 364.50 /*MHz*/, 0xce, 0x02 + 4, },
{ 16 * 999.99 , 0xce, 0x08 + 4, },
};
-static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
+static const struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges,
@@ -1232,20 +1232,20 @@ static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
/* ------------ TUNER_THOMSON_FE6600 - DViCO Hybrid PAL ------------ */
-static struct tuner_range tuner_thomson_fe6600_pal_ranges[] = {
+static const struct tuner_range tuner_thomson_fe6600_pal_ranges[] = {
{ 16 * 160.00 /*MHz*/, 0xfe, 0x11, },
{ 16 * 442.00 /*MHz*/, 0xf6, 0x12, },
{ 16 * 999.99 , 0xf6, 0x18, },
};
-static struct tuner_range tuner_thomson_fe6600_dvb_ranges[] = {
+static const struct tuner_range tuner_thomson_fe6600_dvb_ranges[] = {
{ 16 * 250.00 /*MHz*/, 0xb4, 0x12, },
{ 16 * 455.00 /*MHz*/, 0xfe, 0x11, },
{ 16 * 775.50 /*MHz*/, 0xbc, 0x18, },
{ 16 * 999.99 , 0xf4, 0x18, },
};
-static struct tuner_params tuner_thomson_fe6600_params[] = {
+static const struct tuner_params tuner_thomson_fe6600_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_thomson_fe6600_pal_ranges,
@@ -1262,13 +1262,13 @@ static struct tuner_params tuner_thomson_fe6600_params[] = {
/* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */
/* '+ 4' turns on the Low Noise Amplifier */
-static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = {
+static const struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = {
{ 16 * 146.25 /*MHz*/, 0xce, 0x01 + 4, },
{ 16 * 428.50 /*MHz*/, 0xce, 0x02 + 4, },
{ 16 * 999.99 , 0xce, 0x08 + 4, },
};
-static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
+static const struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_samsung_tcpg_6121p30a_pal_ranges,
@@ -1282,13 +1282,13 @@ static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = {
/* ------------ TUNER_TCL_MF02GIP-5N-E - TCL MF02GIP-5N ------------ */
-static struct tuner_range tuner_tcl_mf02gip_5n_ntsc_ranges[] = {
+static const struct tuner_range tuner_tcl_mf02gip_5n_ntsc_ranges[] = {
{ 16 * 172.00 /*MHz*/, 0x8e, 0x01, },
{ 16 * 448.00 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
+static const struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_tcl_mf02gip_5n_ntsc_ranges,
@@ -1300,7 +1300,7 @@ static struct tuner_params tuner_tcl_mf02gip_5n_params[] = {
/* 80-89 */
/* --------- TUNER_PHILIPS_FQ1216LME_MK3 -- active loopthrough, no FM ------- */
-static struct tuner_params tuner_fq1216lme_mk3_params[] = {
+static const struct tuner_params tuner_fq1216lme_mk3_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_fm1216me_mk3_pal_ranges,
@@ -1321,7 +1321,7 @@ static struct tuner_params tuner_fq1216lme_mk3_params[] = {
/* ----- TUNER_PARTSNIC_PTI_5NF05 - Partsnic (Daewoo) PTI-5NF05 NTSC ----- */
-static struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
+static const struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
/* The datasheet specified channel ranges and the bandswitch byte */
/* The control byte value of 0x8e is just a guess */
{ 16 * 133.25 /*MHz*/, 0x8e, 0x01, }, /* Channels 2 - B */
@@ -1329,7 +1329,7 @@ static struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = {
{ 16 * 999.99 , 0x8e, 0x08, }, /* Channels W+12 - 69 */
};
-static struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
+static const struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_partsnic_pti_5nf05_ranges,
@@ -1340,13 +1340,13 @@ static struct tuner_params tuner_partsnic_pti_5nf05_params[] = {
/* --------- TUNER_PHILIPS_CU1216L - DVB-C NIM ------------------------- */
-static struct tuner_range tuner_cu1216l_ranges[] = {
+static const struct tuner_range tuner_cu1216l_ranges[] = {
{ 16 * 160.25 /*MHz*/, 0xce, 0x01 },
{ 16 * 444.25 /*MHz*/, 0xce, 0x02 },
{ 16 * 999.99 , 0xce, 0x04 },
};
-static struct tuner_params tuner_philips_cu1216l_params[] = {
+static const struct tuner_params tuner_philips_cu1216l_params[] = {
{
.type = TUNER_PARAM_TYPE_DIGITAL,
.ranges = tuner_cu1216l_ranges,
@@ -1357,13 +1357,13 @@ static struct tuner_params tuner_philips_cu1216l_params[] = {
/* ---------------------- TUNER_SONY_BTF_PXN01Z ------------------------ */
-static struct tuner_range tuner_sony_btf_pxn01z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pxn01z_ranges[] = {
{ 16 * 137.25 /*MHz*/, 0x8e, 0x01, },
{ 16 * 367.25 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pxn01z_params[] = {
+static const struct tuner_params tuner_sony_btf_pxn01z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pxn01z_ranges,
@@ -1373,7 +1373,7 @@ static struct tuner_params tuner_sony_btf_pxn01z_params[] = {
/* ------------ TUNER_PHILIPS_FQ1236_MK5 - Philips NTSC ------------ */
-static struct tuner_params tuner_philips_fq1236_mk5_params[] = {
+static const struct tuner_params tuner_philips_fq1236_mk5_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_fm1236_mk3_ntsc_ranges,
@@ -1384,13 +1384,13 @@ static struct tuner_params tuner_philips_fq1236_mk5_params[] = {
/* --------- Sony BTF-PG472Z PAL/SECAM ------- */
-static struct tuner_range tuner_sony_btf_pg472z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg472z_ranges[] = {
{ 16 * 144.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 427.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg472z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg472z_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_sony_btf_pg472z_ranges,
@@ -1404,13 +1404,13 @@ static struct tuner_params tuner_sony_btf_pg472z_params[] = {
/* 90-99 */
/* --------- Sony BTF-PG467Z NTSC-M-JP ------- */
-static struct tuner_range tuner_sony_btf_pg467z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg467z_ranges[] = {
{ 16 * 220.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 467.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg467z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg467z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pg467z_ranges,
@@ -1420,13 +1420,13 @@ static struct tuner_params tuner_sony_btf_pg467z_params[] = {
/* --------- Sony BTF-PG463Z NTSC-M ------- */
-static struct tuner_range tuner_sony_btf_pg463z_ranges[] = {
+static const struct tuner_range tuner_sony_btf_pg463z_ranges[] = {
{ 16 * 130.25 /*MHz*/, 0xc6, 0x01, },
{ 16 * 364.25 /*MHz*/, 0xc6, 0x02, },
{ 16 * 999.99 , 0xc6, 0x04, },
};
-static struct tuner_params tuner_sony_btf_pg463z_params[] = {
+static const struct tuner_params tuner_sony_btf_pg463z_params[] = {
{
.type = TUNER_PARAM_TYPE_NTSC,
.ranges = tuner_sony_btf_pg463z_ranges,
@@ -1436,13 +1436,13 @@ static struct tuner_params tuner_sony_btf_pg463z_params[] = {
/* ------------- TUNER_TENA_TNF_931D_DFDR1 - NXP TDA6509A ------------- */
-static struct tuner_range tuner_tena_tnf_931d_dfdr1_ranges[] = {
+static const struct tuner_range tuner_tena_tnf_931d_dfdr1_ranges[] = {
{ 16 * 161.15 /*MHz*/, 0x8e, 0x01, },
{ 16 * 463.15 /*MHz*/, 0x8e, 0x02, },
{ 16 * 999.99 , 0x8e, 0x08, },
};
-static struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
+static const struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
{
.type = TUNER_PARAM_TYPE_PAL,
.ranges = tuner_tena_tnf_931d_dfdr1_ranges,
@@ -1452,7 +1452,7 @@ static struct tuner_params tuner_tena_tnf_931d_dfdr1_params[] = {
/* --------------------------------------------------------------------- */
-struct tunertype tuners[] = {
+const struct tunertype tuners[] = {
/* 0-9 */
[TUNER_TEMIC_PAL] = { /* TEMIC PAL */
.name = "Temic PAL (4002 FH5)",
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index a4a9781328c5..e585c8f6e4c5 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1538,20 +1538,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
return 0;
}
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx231xx *dev = video_drvdata(file);
- struct v4l2_subdev *sd;
-
- dprintk(3, "enter vidioc_s_ctrl()\n");
- /* Update the A/V core */
- v4l2_device_for_each_subdev(sd, &dev->v4l2_dev)
- v4l2_s_ctrl(NULL, sd->ctrl_handler, ctl);
- dprintk(3, "exit vidioc_s_ctrl()\n");
- return 0;
-}
-
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -1627,7 +1613,6 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_enum_input = cx231xx_enum_input,
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_querycap = cx231xx_querycap,
@@ -1720,6 +1705,8 @@ static void cx231xx_video_dev_init(
vfd->lock = &dev->lock;
vfd->release = video_device_release_empty;
vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl;
+ vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
video_set_drvdata(vfd, dev);
if (dev->tuner_type == TUNER_ABSENT) {
v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY);
diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
index 5bc44f194d0a..62ffa16bb82c 100644
--- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
+++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h
@@ -57,19 +57,17 @@ enum USB_SPEED{
};
#define TS_MASK 0x6
-enum TS_PORT{
- NO_TS_PORT = 0x0, /* 2'b00: Neither port used. PCB not a Hybrid,
+#define NO_TS_PORT 0x0 /* 2'b00: Neither port used. PCB not a Hybrid,
only offers Analog TV or Video */
- TS1_PORT = 0x4, /* 2'b10: TS1 Input (Hybrid mode :
+#define TS1_PORT 0x4 /* 2'b10: TS1 Input (Hybrid mode :
Digital or External Analog/Compressed source) */
- TS1_TS2_PORT = 0x6, /* 2'b11: TS1 & TS2 Inputs
+#define TS1_TS2_PORT 0x6 /* 2'b11: TS1 & TS2 Inputs
(Dual inputs from Digital and/or
External Analog/Compressed sources) */
- TS1_EXT_CLOCK = 0x6, /* 2'b11: TS1 & TS2 as selector
+#define TS1_EXT_CLOCK 0x6 /* 2'b11: TS1 & TS2 as selector
to external clock */
- TS1VIP_TS2_PORT = 0x2 /* 2'b01: TS1 used as 656/VIP Output,
+#define TS1VIP_TS2_PORT 0x2 /* 2'b01: TS1 used as 656/VIP Output,
TS2 Input (from Compressor) */
-};
#define EAVP_MASK 0x8
enum EAV_PRESENT{
@@ -89,10 +87,8 @@ enum AT_MODE{
};
#define PWR_SEL_MASK 0x40
-enum POWE_TYPE{
- SELF_POWER = 0x0, /* 0: self power */
- BUS_POWER = 0x40 /* 1: bus power */
-};
+#define SELF_POWER 0x0 /* 0: self power */
+#define BUS_POWER 0x40 /* 1: bus power */
enum USB_POWE_TYPE{
USB_SELF_POWER = 0,
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 4014f7d07330..3eddc40377bf 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -260,7 +260,7 @@ static u32 af9015_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9015_i2c_algo = {
+static const struct i2c_algorithm af9015_i2c_algo = {
.master_xfer = af9015_i2c_xfer,
.functionality = af9015_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 218f712f56b1..17062672ea06 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -483,7 +483,7 @@ static u32 af9035_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9035_i2c_algo = {
+static const struct i2c_algorithm af9035_i2c_algo = {
.master_xfer = af9035_i2c_master_xfer,
.functionality = af9035_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index bea12cdc85e8..64bddca5303c 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -235,7 +235,7 @@ static u32 anysee_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm anysee_i2c_algo = {
+static const struct i2c_algorithm anysee_i2c_algo = {
.master_xfer = anysee_master_xfer,
.functionality = anysee_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/au6610.c b/drivers/media/usb/dvb-usb-v2/au6610.c
index be223fc8aa14..c20a9469f564 100644
--- a/drivers/media/usb/dvb-usb-v2/au6610.c
+++ b/drivers/media/usb/dvb-usb-v2/au6610.c
@@ -115,7 +115,7 @@ static u32 au6610_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm au6610_i2c_algo = {
+static const struct i2c_algorithm au6610_i2c_algo = {
.master_xfer = au6610_i2c_xfer,
.functionality = au6610_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 2410054ddb2c..65ef045b74ca 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -838,7 +838,7 @@ static u32 az6007_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm az6007_i2c_algo = {
+static const struct i2c_algorithm az6007_i2c_algo = {
.master_xfer = az6007_i2c_xfer,
.functionality = az6007_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/ce6230.c b/drivers/media/usb/dvb-usb-v2/ce6230.c
index d3b5cb4a24da..7ebaf3ee4491 100644
--- a/drivers/media/usb/dvb-usb-v2/ce6230.c
+++ b/drivers/media/usb/dvb-usb-v2/ce6230.c
@@ -154,7 +154,7 @@ static u32 ce6230_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ce6230_i2c_algorithm = {
+static const struct i2c_algorithm ce6230_i2c_algorithm = {
.master_xfer = ce6230_i2c_master_xfer,
.functionality = ce6230_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 288c15a7d72b..ecdc20d45132 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -243,7 +243,7 @@ struct dvb_usb_device_properties {
int (*download_firmware) (struct dvb_usb_device *,
const struct firmware *);
- struct i2c_algorithm *i2c_algo;
+ const struct i2c_algorithm *i2c_algo;
unsigned int num_adapters;
int (*get_adapter_count) (struct dvb_usb_device *);
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 1221c924312a..ceac0ea21dab 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -169,7 +169,7 @@ static u32 dvbsky_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dvbsky_i2c_algo = {
+static const struct i2c_algorithm dvbsky_i2c_algo = {
.master_xfer = dvbsky_i2c_xfer,
.functionality = dvbsky_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/ec168.c b/drivers/media/usb/dvb-usb-v2/ec168.c
index 0e4773fc025c..973b32356b17 100644
--- a/drivers/media/usb/dvb-usb-v2/ec168.c
+++ b/drivers/media/usb/dvb-usb-v2/ec168.c
@@ -176,7 +176,7 @@ static u32 ec168_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ec168_i2c_algo = {
+static const struct i2c_algorithm ec168_i2c_algo = {
.master_xfer = ec168_i2c_xfer,
.functionality = ec168_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index c71e7b93476d..0538170ccf29 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -162,7 +162,7 @@ static u32 gl861_i2c_functionality(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm gl861_i2c_algo = {
+static const struct i2c_algorithm gl861_i2c_algo = {
.master_xfer = gl861_i2c_master_xfer,
.functionality = gl861_i2c_functionality,
};
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index f0537b741d13..0c510035805b 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -559,7 +559,7 @@ static u32 lme2510_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm lme2510_i2c_algo = {
+static const struct i2c_algorithm lme2510_i2c_algo = {
.master_xfer = lme2510_i2c_xfer,
.functionality = lme2510_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index cd5861a30b6f..870ac3c8b085 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -911,7 +911,7 @@ static u32 mxl111sf_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm mxl111sf_i2c_algo = {
+static const struct i2c_algorithm mxl111sf_i2c_algo = {
.master_xfer = mxl111sf_i2c_xfer,
.functionality = mxl111sf_i2c_func,
#ifdef NEED_ALGO_CONTROL
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index f7884bb56fcc..487c6ab784ab 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -290,7 +290,7 @@ static u32 rtl28xxu_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm rtl28xxu_i2c_algo = {
+static const struct i2c_algorithm rtl28xxu_i2c_algo = {
.master_xfer = rtl28xxu_i2c_xfer,
.functionality = rtl28xxu_i2c_func,
};
diff --git a/drivers/media/usb/dvb-usb/a800.c b/drivers/media/usb/dvb-usb/a800.c
index 5f294784923c..c5f95e48f1d5 100644
--- a/drivers/media/usb/dvb-usb/a800.c
+++ b/drivers/media/usb/dvb-usb/a800.c
@@ -77,7 +77,7 @@ enum {
AVERMEDIA_DVBT_USB2_WARM,
};
-static struct usb_device_id a800_table[] = {
+static const struct usb_device_id a800_table[] = {
DVB_USB_DEV(AVERMEDIA, AVERMEDIA_DVBT_USB2_COLD),
DVB_USB_DEV(AVERMEDIA, AVERMEDIA_DVBT_USB2_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 13604e6acdb8..a4bede7e8a1d 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -445,7 +445,7 @@ static u32 af9005_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm af9005_i2c_algo = {
+static const struct i2c_algorithm af9005_i2c_algo = {
.master_xfer = af9005_i2c_xfer,
.functionality = af9005_i2c_func,
};
@@ -1005,7 +1005,7 @@ enum {
ANSONIC_DVBT_USB,
};
-static struct usb_device_id af9005_usb_table[] = {
+static const struct usb_device_id af9005_usb_table[] = {
DVB_USB_DEV(AFATECH, AFATECH_AF9005),
DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_USB_XE),
DVB_USB_DEV(ANSONIC, ANSONIC_DVBT_USB),
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 2bc27710427d..056935d3cbd6 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -1062,7 +1062,7 @@ static u32 az6027_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm az6027_i2c_algo = {
+static const struct i2c_algorithm az6027_i2c_algo = {
.master_xfer = az6027_i2c_xfer,
.functionality = az6027_i2c_func,
};
@@ -1107,7 +1107,7 @@ enum {
ELGATO_EYETV_SAT_V3,
};
-static struct usb_device_id az6027_usb_table[] = {
+static const struct usb_device_id az6027_usb_table[] = {
DVB_USB_DEV(AZUREWAVE, AZUREWAVE_AZ6027),
DVB_USB_DEV(TERRATEC, TERRATEC_DVBS2CI_V1),
DVB_USB_DEV(TERRATEC, TERRATEC_DVBS2CI_V2),
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 4926c954e29a..d86c279e2dce 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -197,7 +197,7 @@ enum {
TERRATEC_CINERGY_T2,
};
-static struct usb_device_id cinergyt2_usb_table[] = {
+static const struct usb_device_id cinergyt2_usb_table[] = {
DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T2),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/cxusb-analog.c b/drivers/media/usb/dvb-usb/cxusb-analog.c
index 8253046cd6e6..3bbee1fcbc8d 100644
--- a/drivers/media/usb/dvb-usb/cxusb-analog.c
+++ b/drivers/media/usb/dvb-usb/cxusb-analog.c
@@ -817,8 +817,8 @@ static int cxusb_medion_v_start_streaming(struct vb2_queue *q,
* doing a large continuous allocation when (if)
* s-g isochronous USB transfers are supported
*/
- streambuf = kmalloc(npackets * CXUSB_VIDEO_PKT_SIZE,
- GFP_KERNEL);
+ streambuf = kmalloc_array(npackets, CXUSB_VIDEO_PKT_SIZE,
+ GFP_KERNEL);
if (!streambuf) {
if (i < 2) {
ret = -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 1d98d3465e28..f44529b40989 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -78,7 +78,7 @@ enum cxusb_table_index {
NR__cxusb_table_index
};
-static struct usb_device_id cxusb_table[];
+static const struct usb_device_id cxusb_table[];
int cxusb_ctrl_msg(struct dvb_usb_device *d,
u8 cmd, const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
@@ -287,7 +287,7 @@ static u32 cxusb_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static struct i2c_algorithm cxusb_i2c_algo = {
+static const struct i2c_algorithm cxusb_i2c_algo = {
.master_xfer = cxusb_i2c_xfer,
.functionality = cxusb_i2c_func,
};
@@ -1692,7 +1692,7 @@ static void cxusb_disconnect(struct usb_interface *intf)
dvb_usb_device_exit(intf);
}
-static struct usb_device_id cxusb_table[] = {
+static const struct usb_device_id cxusb_table[] = {
DVB_USB_DEV(MEDION, MEDION_MD95700),
DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_COLD),
DVB_USB_DEV(DVICO, DVICO_BLUEBIRD_LG064F_WARM),
diff --git a/drivers/media/usb/dvb-usb/dibusb-mb.c b/drivers/media/usb/dvb-usb/dibusb-mb.c
index 2cd88cab4c98..431766f19931 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mb.c
@@ -155,7 +155,7 @@ enum {
ULTIMA_TVBOX_ANCHOR_COLD,
};
-static struct usb_device_id dibusb_dib3000mb_table[] = {
+static const struct usb_device_id dibusb_dib3000mb_table[] = {
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DVBT_USB_COLD),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DVBT_USB_WARM),
DVB_USB_DEV(COMPRO, COMPRO_DVBU2000_COLD),
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc.c b/drivers/media/usb/dvb-usb/dibusb-mc.c
index 00cb016f6266..01eece2687d6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc.c
@@ -43,7 +43,7 @@ enum {
HUMAX_DVB_T_STICK_HIGH_SPEED_WARM,
};
-static struct usb_device_id dibusb_dib3000mc_table[] = {
+static const struct usb_device_id dibusb_dib3000mc_table[] = {
DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_COLD),
DVB_USB_DEV(DIBCOM, DIBCOM_MOD3001_WARM),
DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_TVBOX_USB2_COLD),
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 32134be16914..ab229ab1a858 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -88,7 +88,7 @@ static u32 digitv_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm digitv_i2c_algo = {
+static const struct i2c_algorithm digitv_i2c_algo = {
.master_xfer = digitv_i2c_xfer,
.functionality = digitv_i2c_func,
};
@@ -299,7 +299,7 @@ enum {
ANCHOR_NEBULA_DIGITV,
};
-static struct usb_device_id digitv_table[] = {
+static const struct usb_device_id digitv_table[] = {
DVB_USB_DEV(ANCHOR, ANCHOR_NEBULA_DIGITV),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/dtt200u.c b/drivers/media/usb/dvb-usb/dtt200u.c
index e6ee56b3a9dd..83a69df384f2 100644
--- a/drivers/media/usb/dvb-usb/dtt200u.c
+++ b/drivers/media/usb/dvb-usb/dtt200u.c
@@ -171,7 +171,7 @@ enum {
MIGLIA_WT220U_ZAP250_COLD,
};
-static struct usb_device_id dtt200u_usb_table[] = {
+static const struct usb_device_id dtt200u_usb_table[] = {
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DTT200U_COLD),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_DTT200U_WARM),
DVB_USB_DEV(WIDEVIEW, WIDEVIEW_WT220U_COLD),
diff --git a/drivers/media/usb/dvb-usb/dtv5100.c b/drivers/media/usb/dvb-usb/dtv5100.c
index 56c9d521a34a..3d85c6f7f6ec 100644
--- a/drivers/media/usb/dvb-usb/dtv5100.c
+++ b/drivers/media/usb/dvb-usb/dtv5100.c
@@ -97,7 +97,7 @@ static u32 dtv5100_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dtv5100_i2c_algo = {
+static const struct i2c_algorithm dtv5100_i2c_algo = {
.master_xfer = dtv5100_i2c_xfer,
.functionality = dtv5100_i2c_func,
};
@@ -166,7 +166,7 @@ enum {
AME_DTV5100,
};
-static struct usb_device_id dtv5100_table[] = {
+static const struct usb_device_id dtv5100_table[] = {
DVB_USB_DEV(AME, AME_DTV5100),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index cbb0541d4dc1..550006a8d86f 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -73,8 +73,8 @@ struct dvb_usb_device_description {
const char *name;
#define DVB_USB_ID_MAX_NUM 15
- struct usb_device_id *cold_ids[DVB_USB_ID_MAX_NUM];
- struct usb_device_id *warm_ids[DVB_USB_ID_MAX_NUM];
+ const struct usb_device_id *cold_ids[DVB_USB_ID_MAX_NUM];
+ const struct usb_device_id *warm_ids[DVB_USB_ID_MAX_NUM];
};
static inline u8 rc5_custom(struct rc_map_table *key)
@@ -309,7 +309,7 @@ struct dvb_usb_device_properties {
struct dvb_rc core;
} rc;
- struct i2c_algorithm *i2c_algo;
+ const struct i2c_algorithm *i2c_algo;
int generic_bulk_ctrl_endpoint;
int generic_bulk_ctrl_endpoint_response;
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 79e2ccf974c9..4fecf2f965e9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -839,37 +839,37 @@ static u32 dw210x_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm dw2102_i2c_algo = {
+static const struct i2c_algorithm dw2102_i2c_algo = {
.master_xfer = dw2102_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2102_serit_i2c_algo = {
+static const struct i2c_algorithm dw2102_serit_i2c_algo = {
.master_xfer = dw2102_serit_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2102_earda_i2c_algo = {
+static const struct i2c_algorithm dw2102_earda_i2c_algo = {
.master_xfer = dw2102_earda_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw2104_i2c_algo = {
+static const struct i2c_algorithm dw2104_i2c_algo = {
.master_xfer = dw2104_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm dw3101_i2c_algo = {
+static const struct i2c_algorithm dw3101_i2c_algo = {
.master_xfer = dw3101_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm s6x0_i2c_algo = {
+static const struct i2c_algorithm s6x0_i2c_algo = {
.master_xfer = s6x0_i2c_transfer,
.functionality = dw210x_i2c_func,
};
-static struct i2c_algorithm su3000_i2c_algo = {
+static const struct i2c_algorithm su3000_i2c_algo = {
.master_xfer = su3000_i2c_transfer,
.functionality = dw210x_i2c_func,
};
@@ -1836,7 +1836,7 @@ enum dw2102_table_entry {
TEVII_S662
};
-static struct usb_device_id dw2102_table[] = {
+static const struct usb_device_id dw2102_table[] = {
DVB_USB_DEV(CYPRESS, CYPRESS_DW2102),
DVB_USB_DEV(CYPRESS, CYPRESS_DW2101),
DVB_USB_DEV(CYPRESS, CYPRESS_DW2104),
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 4cd21bb8805e..96a255500b38 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -319,7 +319,7 @@ enum {
GENPIX_SKYWALKER_CW3K,
};
-static struct usb_device_id gp8psk_usb_table[] = {
+static const struct usb_device_id gp8psk_usb_table[] = {
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_COLD),
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_1_WARM),
DVB_USB_DEV(GENPIX, GENPIX_8PSK_REV_2),
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index a2054b1b100f..45337ba0a0a3 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -319,7 +319,7 @@ static u32 m920x_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm m920x_i2c_algo = {
+static const struct i2c_algorithm m920x_i2c_algo = {
.master_xfer = m920x_i2c_xfer,
.functionality = m920x_i2c_func,
};
@@ -909,7 +909,7 @@ enum {
AZUREWAVE_TWINHAN_VP7049,
};
-static struct usb_device_id m920x_table[] = {
+static const struct usb_device_id m920x_table[] = {
DVB_USB_DEV(MSI, MSI_MEGASKY580),
DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_MSI_DIGI_VOX_MINI_II),
DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD),
diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
index 4782d0780913..2e5cbfacbeed 100644
--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
@@ -165,7 +165,7 @@ enum {
HAUPPAUGE_WINTV_NOVA_T_USB2_WARM,
};
-static struct usb_device_id nova_t_table[] = {
+static const struct usb_device_id nova_t_table[] = {
DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_WINTV_NOVA_T_USB2_COLD),
DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_WINTV_NOVA_T_USB2_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 268f05fc8691..3c79cc6848b4 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -155,7 +155,7 @@ static u32 opera1_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm opera1_i2c_algo = {
+static const struct i2c_algorithm opera1_i2c_algo = {
.master_xfer = opera1_i2c_xfer,
.functionality = opera1_i2c_func,
};
@@ -425,7 +425,7 @@ enum {
OPERA1_WARM,
};
-static struct usb_device_id opera1_table[] = {
+static const struct usb_device_id opera1_table[] = {
DVB_USB_DEV(CYPRESS, CYPRESS_OPERA1_COLD),
DVB_USB_DEV(OPERA1, OPERA1_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 2aab49003493..5094de9a312e 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -906,14 +906,14 @@ static struct stb6100_config stb6100_config = {
};
-static struct i2c_algorithm pctv452e_i2c_algo = {
+static const struct i2c_algorithm pctv452e_i2c_algo = {
.master_xfer = pctv452e_i2c_xfer,
.functionality = pctv452e_i2c_func
};
static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
{
- struct usb_device_id *id;
+ const struct usb_device_id *id;
a->fe_adap[0].fe = dvb_attach(stb0899_attach, &stb0899_config,
&a->dev->i2c_adap);
@@ -959,7 +959,7 @@ enum {
TECHNOTREND_CONNECT_S2_3650_CI,
};
-static struct usb_device_id pctv452e_usb_table[] = {
+static const struct usb_device_id pctv452e_usb_table[] = {
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_452E),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_3600),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2_3650_CI),
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index df90c6c5f3b9..1e43aab2bc27 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -199,7 +199,7 @@ static u32 technisat_usb2_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm technisat_usb2_i2c_algo = {
+static const struct i2c_algorithm technisat_usb2_i2c_algo = {
.master_xfer = technisat_usb2_i2c_xfer,
.functionality = technisat_usb2_i2c_func,
};
@@ -693,7 +693,7 @@ enum {
TECHNISAT_USB2_DVB_S2,
};
-static struct usb_device_id technisat_usb2_id_table[] = {
+static const struct usb_device_id technisat_usb2_id_table[] = {
DVB_USB_DEV(TECHNISAT, TECHNISAT_USB2_DVB_S2),
{ }
};
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index 373ffa7f641e..acde6149d278 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -434,7 +434,7 @@ static u32 ttusb2_i2c_func(struct i2c_adapter *adapter)
return I2C_FUNC_I2C;
}
-static struct i2c_algorithm ttusb2_i2c_algo = {
+static const struct i2c_algorithm ttusb2_i2c_algo = {
.master_xfer = ttusb2_i2c_xfer,
.functionality = ttusb2_i2c_func,
};
@@ -638,7 +638,7 @@ enum {
TECHNOTREND_CONNECT_S2400_8KEEPROM,
};
-static struct usb_device_id ttusb2_table[] = {
+static const struct usb_device_id ttusb2_table[] = {
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_400E),
DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_450E),
DVB_USB_DEV(TECHNOTREND, TECHNOTREND_CONNECT_S2400),
diff --git a/drivers/media/usb/dvb-usb/umt-010.c b/drivers/media/usb/dvb-usb/umt-010.c
index 464699b0b75b..8f23f92946d4 100644
--- a/drivers/media/usb/dvb-usb/umt-010.c
+++ b/drivers/media/usb/dvb-usb/umt-010.c
@@ -86,7 +86,7 @@ enum {
HANFTEK_UMT_010_WARM,
};
-static struct usb_device_id umt_table[] = {
+static const struct usb_device_id umt_table[] = {
DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_COLD),
DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_WARM),
{ }
diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
index 5b6740cbd1d1..034b0652b9a1 100644
--- a/drivers/media/usb/dvb-usb/vp702x.c
+++ b/drivers/media/usb/dvb-usb/vp702x.c
@@ -375,7 +375,7 @@ enum {
VISIONPLUS_VP7020_WARM,
};
-static struct usb_device_id vp702x_usb_table[] = {
+static const struct usb_device_id vp702x_usb_table[] = {
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7021_COLD),
// DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_COLD),
// DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7020_WARM),
diff --git a/drivers/media/usb/dvb-usb/vp7045.c b/drivers/media/usb/dvb-usb/vp7045.c
index 1dc2b18d44d8..5224c3233f8c 100644
--- a/drivers/media/usb/dvb-usb/vp7045.c
+++ b/drivers/media/usb/dvb-usb/vp7045.c
@@ -179,7 +179,7 @@ enum {
VISIONPLUS_TINYUSB2_WARM,
};
-static struct usb_device_id vp7045_usb_table[] = {
+static const struct usb_device_id vp7045_usb_table[] = {
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_COLD),
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_WARM),
DVB_USB_DEV(VISIONPLUS, VISIONPLUS_TINYUSB2_COLD),
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index d608b793fa84..ad38e1240541 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -497,8 +497,8 @@ static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
return pvr2_hdw_set_streaming(hdw, 0);
}
-static int pvr2_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *vc)
+static int pvr2_query_ext_ctrl(struct file *file, void *priv,
+ struct v4l2_query_ext_ctrl *vc)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
@@ -521,13 +521,16 @@ static int pvr2_queryctrl(struct file *file, void *priv,
}
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ "QUERYEXTCTRL id=0x%x mapping name=%s (%s)",
vc->id, pvr2_ctrl_get_name(cptr),
pvr2_ctrl_get_desc(cptr));
strscpy(vc->name, pvr2_ctrl_get_desc(cptr), sizeof(vc->name));
vc->flags = pvr2_ctrl_get_v4lflags(cptr);
pvr2_ctrl_get_def(cptr, &val);
vc->default_value = val;
+ vc->nr_of_dims = 0;
+ vc->elems = 1;
+ vc->elem_size = 4;
switch (pvr2_ctrl_get_type(cptr)) {
case pvr2_ctl_enum:
vc->type = V4L2_CTRL_TYPE_MENU;
@@ -549,7 +552,7 @@ static int pvr2_queryctrl(struct file *file, void *priv,
break;
default:
pvr2_trace(PVR2_TRACE_V4LIOCTL,
- "QUERYCTRL id=0x%x name=%s not mappable",
+ "QUERYEXTCTRL id=0x%x name=%s not mappable",
vc->id, pvr2_ctrl_get_name(cptr));
return -EINVAL;
}
@@ -571,31 +574,6 @@ static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *
return ret;
}
-static int pvr2_g_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- int val = 0;
- int ret;
-
- ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
- &val);
- vc->value = val;
- return ret;
-}
-
-static int pvr2_s_ctrl(struct file *file, void *priv, struct v4l2_control *vc)
-{
- struct pvr2_v4l2_fh *fh = file->private_data;
- struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- int ret;
-
- ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw, vc->id),
- vc->value);
- pvr2_hdw_commit_ctl(hdw);
- return ret;
-}
-
static int pvr2_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
@@ -812,10 +790,8 @@ static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
.vidioc_try_fmt_vid_cap = pvr2_try_fmt_vid_cap,
.vidioc_streamon = pvr2_streamon,
.vidioc_streamoff = pvr2_streamoff,
- .vidioc_queryctrl = pvr2_queryctrl,
+ .vidioc_query_ext_ctrl = pvr2_query_ext_ctrl,
.vidioc_querymenu = pvr2_querymenu,
- .vidioc_g_ctrl = pvr2_g_ctrl,
- .vidioc_s_ctrl = pvr2_s_ctrl,
.vidioc_g_ext_ctrls = pvr2_g_ext_ctrls,
.vidioc_s_ext_ctrls = pvr2_s_ext_ctrls,
.vidioc_try_ext_ctrls = pvr2_try_ext_ctrls,
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 3ec9eb5956ed..c6e5d031f068 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -56,7 +56,6 @@
#endif
#include <linux/vmalloc.h>
#include <asm/io.h>
-#include <linux/kernel.h> /* simple_strtol() */
#include "pwc.h"
#include "pwc-kiara.h"
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 4e58476d305e..cbf19aa1d823 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -358,6 +358,24 @@ static const struct uvc_control_info uvc_ctrls[] = {
.flags = UVC_CTRL_FLAG_GET_CUR
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
+ /*
+ * UVC_CTRL_FLAG_AUTO_UPDATE is needed because the RoI may get updated
+ * by sensors.
+ * "This RoI should be the same as specified in most recent SET_CUR
+ * except in the case where the ‘Auto Detect and Track’ and/or
+ * ‘Image Stabilization’ bit have been set."
+ * 4.2.2.1.20 Digital Region of Interest (ROI) Control
+ */
+ {
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .index = 21,
+ .size = 10,
+ .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR
+ | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX
+ | UVC_CTRL_FLAG_GET_DEF
+ | UVC_CTRL_FLAG_AUTO_UPDATE,
+ },
};
static const u32 uvc_control_classes[] = {
@@ -367,6 +385,27 @@ static const u32 uvc_control_classes[] = {
static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
+static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping)
+{
+ return mapping->v4l2_type >= V4L2_CTRL_COMPOUND_TYPES;
+}
+
+static s32 uvc_mapping_get_s32(struct uvc_control_mapping *mapping,
+ u8 query, const void *data_in)
+{
+ s32 data_out = 0;
+
+ mapping->get(mapping, query, data_in, sizeof(data_out), &data_out);
+
+ return data_out;
+}
+
+static void uvc_mapping_set_s32(struct uvc_control_mapping *mapping,
+ s32 data_in, void *data_out)
+{
+ mapping->set(mapping, sizeof(data_in), &data_in, data_out);
+}
+
/*
* This function translates the V4L2 menu index @idx, as exposed to userspace as
* the V4L2 control value, to the corresponding UVC control value used by the
@@ -405,58 +444,93 @@ uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx)
return v4l2_ctrl_get_menu(mapping->id)[idx];
}
-static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size,
+ void *v4l2_out)
{
- s8 zoom = (s8)data[0];
+ u8 value = ((u8 *)uvc_in)[2];
+ s8 sign = ((s8 *)uvc_in)[0];
+ s32 *out = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
switch (query) {
case UVC_GET_CUR:
- return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]);
+ *out = (sign == 0) ? 0 : (sign > 0 ? value : -value);
+ return 0;
case UVC_GET_MIN:
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
- return data[2];
+ *out = value;
+ return 0;
}
}
-static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
- data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
- data[2] = min((int)abs(value), 0xff);
+ u8 *out = uvc_out;
+ s32 value;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
+ value = *(u32 *)v4l2_in;
+ out[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
+ out[2] = min_t(int, abs(value), 0xff);
+
+ return 0;
}
-static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping,
+ u8 query, const void *uvc_in,
+ size_t v4l2_size, void *v4l2_out)
{
unsigned int first = mapping->offset / 8;
- s8 rel = (s8)data[first];
+ u8 value = ((u8 *)uvc_in)[first + 1];
+ s8 sign = ((s8 *)uvc_in)[first];
+ s32 *out = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
switch (query) {
case UVC_GET_CUR:
- return (rel == 0) ? 0 : (rel > 0 ? data[first+1]
- : -data[first+1]);
+ *out = (sign == 0) ? 0 : (sign > 0 ? value : -value);
+ return 0;
case UVC_GET_MIN:
- return -data[first+1];
+ *out = -value;
+ return 0;
case UVC_GET_MAX:
case UVC_GET_RES:
case UVC_GET_DEF:
default:
- return data[first+1];
+ *out = value;
+ return 0;
}
}
-static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
unsigned int first = mapping->offset / 8;
+ u8 *out = uvc_out;
+ s32 value;
+
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
- data[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
- data[first+1] = min_t(int, abs(value), 0xff);
+ value = *(u32 *)v4l2_in;
+ out[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff;
+ out[first + 1] = min_t(int, abs(value), 0xff);
+
+ return 0;
}
static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
@@ -547,6 +621,44 @@ end:
return out_mapping;
}
+static int uvc_get_rect(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size, void *v4l2_out)
+{
+ const struct uvc_rect *uvc_rect = uvc_in;
+ struct v4l2_rect *v4l2_rect = v4l2_out;
+
+ if (WARN_ON(v4l2_size != sizeof(struct v4l2_rect)))
+ return -EINVAL;
+
+ if (uvc_rect->left > uvc_rect->right ||
+ uvc_rect->top > uvc_rect->bottom)
+ return -EIO;
+
+ v4l2_rect->top = uvc_rect->top;
+ v4l2_rect->left = uvc_rect->left;
+ v4l2_rect->height = uvc_rect->bottom - uvc_rect->top + 1;
+ v4l2_rect->width = uvc_rect->right - uvc_rect->left + 1;
+
+ return 0;
+}
+
+static int uvc_set_rect(struct uvc_control_mapping *mapping, size_t v4l2_size,
+ const void *v4l2_in, void *uvc_out)
+{
+ struct uvc_rect *uvc_rect = uvc_out;
+ const struct v4l2_rect *v4l2_rect = v4l2_in;
+
+ if (WARN_ON(v4l2_size != sizeof(struct v4l2_rect)))
+ return -EINVAL;
+
+ uvc_rect->top = min(0xffff, v4l2_rect->top);
+ uvc_rect->left = min(0xffff, v4l2_rect->left);
+ uvc_rect->bottom = min(0xffff, v4l2_rect->top + v4l2_rect->height - 1);
+ uvc_rect->right = min(0xffff, v4l2_rect->left + v4l2_rect->width - 1);
+
+ return 0;
+}
+
static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
@@ -841,6 +953,28 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.filter_mapping = uvc_ctrl_filter_plf_mapping,
},
+ {
+ .id = V4L2_CID_UVC_REGION_OF_INTEREST_RECT,
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .size = sizeof(struct uvc_rect) * 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_RECT,
+ .data_type = UVC_CTRL_DATA_TYPE_RECT,
+ .get = uvc_get_rect,
+ .set = uvc_set_rect,
+ .name = "Region of Interest Rectangle",
+ },
+ {
+ .id = V4L2_CID_UVC_REGION_OF_INTEREST_AUTO,
+ .entity = UVC_GUID_UVC_CAMERA,
+ .selector = UVC_CT_REGION_OF_INTEREST_CONTROL,
+ .size = 16,
+ .offset = 64,
+ .v4l2_type = V4L2_CTRL_TYPE_BITMASK,
+ .data_type = UVC_CTRL_DATA_TYPE_BITMASK,
+ .name = "Region of Interest Auto Ctrls",
+ },
};
/* ------------------------------------------------------------------------
@@ -862,20 +996,45 @@ static inline void uvc_clear_bit(u8 *data, int bit)
data[bit >> 3] &= ~(1 << (bit & 7));
}
+static s32 uvc_menu_to_v4l2_menu(struct uvc_control_mapping *mapping, s32 val)
+{
+ unsigned int i;
+
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == val)
+ return i;
+ }
+
+ return val;
+}
+
/*
* Extract the bit string specified by mapping->offset and mapping->size
* from the little-endian data stored at 'data' and return the result as
* a signed 32bit integer. Sign extension will be performed if the mapping
* references a signed data type.
*/
-static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
- u8 query, const u8 *data)
+static int uvc_get_le_value(struct uvc_control_mapping *mapping,
+ u8 query, const void *uvc_in, size_t v4l2_size,
+ void *v4l2_out)
{
- int bits = mapping->size;
int offset = mapping->offset;
+ int bits = mapping->size;
+ const u8 *data = uvc_in;
+ s32 *out = v4l2_out;
s32 value = 0;
u8 mask;
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
data += offset / 8;
offset &= 7;
mask = ((1LL << bits) - 1) << offset;
@@ -896,28 +1055,58 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
value |= -(value & (1 << (mapping->size - 1)));
- return value;
+ /* If it is a menu, convert from uvc to v4l2. */
+ if (mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) {
+ *out = value;
+ return 0;
+ }
+
+ switch (query) {
+ case UVC_GET_CUR:
+ case UVC_GET_DEF:
+ *out = uvc_menu_to_v4l2_menu(mapping, value);
+ return 0;
+ }
+
+ *out = value;
+ return 0;
}
/*
* Set the bit string specified by mapping->offset and mapping->size
* in the little-endian data stored at 'data' to the value 'value'.
*/
-static void uvc_set_le_value(struct uvc_control_mapping *mapping,
- s32 value, u8 *data)
+static int uvc_set_le_value(struct uvc_control_mapping *mapping,
+ size_t v4l2_size, const void *v4l2_in,
+ void *uvc_out)
{
- int bits = mapping->size;
int offset = mapping->offset;
+ int bits = mapping->size;
+ u8 *data = uvc_out;
+ s32 value;
u8 mask;
- /*
- * According to the v4l2 spec, writing any value to a button control
- * should result in the action belonging to the button control being
- * triggered. UVC devices however want to see a 1 written -> override
- * value.
- */
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON)
+ if (WARN_ON(v4l2_size != sizeof(s32)))
+ return -EINVAL;
+
+ value = *(s32 *)v4l2_in;
+
+ switch (mapping->v4l2_type) {
+ case V4L2_CTRL_TYPE_MENU:
+ value = uvc_mapping_get_menu_value(mapping, value);
+ break;
+ case V4L2_CTRL_TYPE_BUTTON:
+ /*
+ * According to the v4l2 spec, writing any value to a button
+ * control should result in the action belonging to the button
+ * control being triggered. UVC devices however want to see a 1
+ * written -> override value.
+ */
value = -1;
+ break;
+ default:
+ break;
+ }
data += offset / 8;
offset &= 7;
@@ -929,6 +1118,8 @@ static void uvc_set_le_value(struct uvc_control_mapping *mapping,
bits -= 8 - offset;
offset = 0;
}
+
+ return 0;
}
/* ------------------------------------------------------------------------
@@ -947,7 +1138,7 @@ static int uvc_entity_match_guid(const struct uvc_entity *entity,
static void __uvc_find_control(struct uvc_entity *entity, u32 v4l2_id,
struct uvc_control_mapping **mapping, struct uvc_control **control,
- int next)
+ int next, int next_compound)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *map;
@@ -962,14 +1153,16 @@ static void __uvc_find_control(struct uvc_entity *entity, u32 v4l2_id,
continue;
list_for_each_entry(map, &ctrl->info.mappings, list) {
- if ((map->id == v4l2_id) && !next) {
+ if (map->id == v4l2_id && !next && !next_compound) {
*control = ctrl;
*mapping = map;
return;
}
if ((*mapping == NULL || (*mapping)->id > map->id) &&
- (map->id > v4l2_id) && next) {
+ (map->id > v4l2_id) &&
+ (uvc_ctrl_mapping_is_compound(map) ?
+ next_compound : next)) {
*control = ctrl;
*mapping = map;
}
@@ -983,6 +1176,7 @@ static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
struct uvc_control *ctrl = NULL;
struct uvc_entity *entity;
int next = v4l2_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ int next_compound = v4l2_id & V4L2_CTRL_FLAG_NEXT_COMPOUND;
*mapping = NULL;
@@ -991,12 +1185,13 @@ static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain,
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next);
- if (ctrl && !next)
+ __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next,
+ next_compound);
+ if (ctrl && !next && !next_compound)
return ctrl;
}
- if (ctrl == NULL && !next)
+ if (!ctrl && !next && !next_compound)
uvc_dbg(chain->dev, CONTROL, "Control 0x%08x not found\n",
v4l2_id);
@@ -1060,32 +1255,6 @@ static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain,
return 0;
}
-static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
- const u8 *data)
-{
- s32 value = mapping->get(mapping, UVC_GET_CUR, data);
-
- if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- unsigned int i;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == value) {
- value = i;
- break;
- }
- }
- }
-
- return value;
-}
-
static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
@@ -1136,8 +1305,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
if (ret < 0)
return ret;
- *value = __uvc_ctrl_get_value(mapping,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ *value = uvc_mapping_get_s32(mapping, UVC_GET_CUR,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
return 0;
}
@@ -1145,7 +1314,8 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
u32 found_id)
{
- bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ bool find_next = req_id &
+ (V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND);
unsigned int i;
req_id &= V4L2_CTRL_ID_MASK;
@@ -1167,7 +1337,8 @@ static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
}
static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
- u32 found_id, struct v4l2_queryctrl *v4l2_ctrl)
+ u32 found_id,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
int idx;
@@ -1185,6 +1356,37 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
return 0;
}
+static bool uvc_ctrl_is_readable(u32 which, struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping)
+{
+ if (which == V4L2_CTRL_WHICH_CUR_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR);
+
+ if (which == V4L2_CTRL_WHICH_DEF_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF);
+
+ /* Types with implicit boundaries. */
+ switch (mapping->v4l2_type) {
+ case V4L2_CTRL_TYPE_MENU:
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ case V4L2_CTRL_TYPE_BUTTON:
+ return true;
+ case V4L2_CTRL_TYPE_BITMASK:
+ return (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) ||
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX);
+ default:
+ break;
+ }
+
+ if (which == V4L2_CTRL_WHICH_MIN_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN);
+
+ if (which == V4L2_CTRL_WHICH_MAX_VAL)
+ return !!(ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX);
+
+ return false;
+}
+
/*
* Check if control @v4l2_id can be accessed by the given control @ioctl
* (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
@@ -1203,7 +1405,6 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
struct uvc_control *master_ctrl = NULL;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
- bool read = ioctl == VIDIOC_G_EXT_CTRLS;
s32 val;
int ret;
int i;
@@ -1215,10 +1416,10 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
if (!ctrl)
return -EINVAL;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read)
- return -EACCES;
+ if (ioctl == VIDIOC_G_EXT_CTRLS)
+ return uvc_ctrl_is_readable(ctrls->which, ctrl, mapping);
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
return -EACCES;
if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
@@ -1235,10 +1436,12 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
}
__uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
- &master_ctrl, 0);
+ &master_ctrl, 0, 0);
if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
return 0;
+ if (WARN_ON(uvc_ctrl_mapping_is_compound(master_map)))
+ return -EIO;
ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
if (ret >= 0 && val != mapping->master_manual)
@@ -1270,50 +1473,21 @@ static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl,
* as supported.
*/
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
- return mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ return uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
- return mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ return uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
return ~0;
}
-static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct uvc_control *ctrl,
- struct uvc_control_mapping *mapping,
- struct v4l2_queryctrl *v4l2_ctrl)
+static int __uvc_queryctrl_boundaries(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
- struct uvc_control_mapping *master_map = NULL;
- struct uvc_control *master_ctrl = NULL;
- unsigned int i;
-
- memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
- v4l2_ctrl->id = mapping->id;
- v4l2_ctrl->type = mapping->v4l2_type;
- strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
- sizeof(v4l2_ctrl->name));
- v4l2_ctrl->flags = 0;
-
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-
- if (mapping->master_id)
- __uvc_find_control(ctrl->entity, mapping->master_id,
- &master_map, &master_ctrl, 0);
- if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
- s32 val;
- int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
- if (ret < 0)
- return ret;
-
- if (val != mapping->master_manual)
- v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
- }
-
if (!ctrl->cached) {
int ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
@@ -1321,8 +1495,8 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) {
- v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
+ v4l2_ctrl->default_value = uvc_mapping_get_s32(mapping,
+ UVC_GET_DEF, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF));
}
switch (mapping->v4l2_type) {
@@ -1330,21 +1504,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
v4l2_ctrl->step = 1;
-
- for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
- u32 menu_value;
-
- if (!test_bit(i, &mapping->menu_mask))
- continue;
-
- menu_value = uvc_mapping_get_menu_value(mapping, i);
-
- if (menu_value == v4l2_ctrl->default_value) {
- v4l2_ctrl->default_value = i;
- break;
- }
- }
-
return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
@@ -1370,22 +1529,95 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN)
- v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ v4l2_ctrl->minimum = uvc_mapping_get_s32(mapping, UVC_GET_MIN,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ else
+ v4l2_ctrl->minimum = 0;
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
- v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ v4l2_ctrl->maximum = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ else
+ v4l2_ctrl->maximum = 0;
if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
- v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ v4l2_ctrl->step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ else
+ v4l2_ctrl->step = 0;
return 0;
}
+static size_t uvc_mapping_v4l2_size(struct uvc_control_mapping *mapping)
+{
+ if (mapping->v4l2_type == V4L2_CTRL_TYPE_RECT)
+ return sizeof(struct v4l2_rect);
+
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return DIV_ROUND_UP(mapping->size, 8);
+
+ return sizeof(s32);
+}
+
+static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
+{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
+
+ memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ v4l2_ctrl->id = mapping->id;
+ v4l2_ctrl->type = mapping->v4l2_type;
+ strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
+ sizeof(v4l2_ctrl->name));
+ v4l2_ctrl->flags = 0;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) &&
+ (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN))
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX;
+
+ if (mapping->master_id)
+ __uvc_find_control(ctrl->entity, mapping->master_id,
+ &master_map, &master_ctrl, 0, 0);
+ if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) {
+ s32 val;
+ int ret;
+
+ if (WARN_ON(uvc_ctrl_mapping_is_compound(master_map)))
+ return -EIO;
+
+ ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != mapping->master_manual)
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
+ }
+
+ v4l2_ctrl->elem_size = uvc_mapping_v4l2_size(mapping);
+ v4l2_ctrl->elems = 1;
+
+ if (v4l2_ctrl->type >= V4L2_CTRL_COMPOUND_TYPES) {
+ v4l2_ctrl->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+ v4l2_ctrl->default_value = 0;
+ v4l2_ctrl->minimum = 0;
+ v4l2_ctrl->maximum = 0;
+ v4l2_ctrl->step = 0;
+ return 0;
+ }
+
+ return __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
+}
+
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl)
+ struct v4l2_query_ext_ctrl *v4l2_ctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
@@ -1511,7 +1743,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
struct uvc_control_mapping *mapping,
s32 value, u32 changes)
{
- struct v4l2_queryctrl v4l2_ctrl;
+ struct v4l2_query_ext_ctrl v4l2_ctrl;
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
@@ -1569,11 +1801,12 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
- __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0);
+ __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0, 0);
if (ctrl == NULL)
return;
- if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
+ if (uvc_ctrl_mapping_is_compound(mapping) ||
+ __uvc_ctrl_get(chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
@@ -1630,7 +1863,12 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
uvc_ctrl_set_handle(handle, ctrl, NULL);
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
- s32 value = __uvc_ctrl_get_value(mapping, data);
+ s32 value;
+
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ value = 0;
+ else
+ value = uvc_mapping_get_s32(mapping, UVC_GET_CUR, data);
/*
* handle may be NULL here if the device sends auto-update
@@ -1714,6 +1952,7 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
for (i = 0; i < xctrls_count; ++i) {
u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
+ s32 value;
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
@@ -1738,6 +1977,10 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
slave_id);
}
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ value = 0;
+ else
+ value = xctrls[i].value;
/*
* If the master is being modified in the same transaction
* flags may change too.
@@ -1748,7 +1991,7 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
changes |= V4L2_EVENT_CTRL_CH_FLAGS;
uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping,
- xctrls[i].value, changes);
+ value, changes);
}
}
@@ -1780,7 +2023,8 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
u32 changes = V4L2_EVENT_CTRL_CH_FLAGS;
s32 val = 0;
- if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
+ if (uvc_ctrl_mapping_is_compound(mapping) ||
+ __uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0)
changes |= V4L2_EVENT_CTRL_CH_VALUE;
uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val,
@@ -1920,7 +2164,7 @@ static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
for (i = 0; i < ctrls->count; i++) {
__uvc_find_control(entity, ctrls->controls[i].id, &mapping,
- &ctrl_found, 0);
+ &ctrl_found, 0, 0);
if (uvc_control == ctrl_found)
return i;
}
@@ -1956,8 +2200,120 @@ done:
return ret;
}
-int uvc_ctrl_get(struct uvc_video_chain *chain,
- struct v4l2_ext_control *xctrl)
+static int uvc_mapping_get_xctrl_compound(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which,
+ struct v4l2_ext_control *xctrl)
+{
+ u8 *data __free(kfree) = NULL;
+ size_t size;
+ u8 query;
+ int ret;
+ int id;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ id = UVC_CTRL_DATA_CURRENT;
+ query = UVC_GET_CUR;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ id = UVC_CTRL_DATA_MIN;
+ query = UVC_GET_MIN;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ id = UVC_CTRL_DATA_MAX;
+ query = UVC_GET_MAX;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ id = UVC_CTRL_DATA_DEF;
+ query = UVC_GET_DEF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ size = uvc_mapping_v4l2_size(mapping);
+ if (xctrl->size < size) {
+ xctrl->size = size;
+ return -ENOSPC;
+ }
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (which == V4L2_CTRL_WHICH_CUR_VAL)
+ ret = __uvc_ctrl_load_cur(chain, ctrl);
+ else
+ ret = uvc_ctrl_populate_cache(chain, ctrl);
+
+ if (ret < 0)
+ return ret;
+
+ ret = mapping->get(mapping, query, uvc_ctrl_data(ctrl, id), size, data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * v4l2_ext_control does not have enough room to fit a compound control.
+ * Instead, the value is in the user memory at xctrl->ptr. The v4l2
+ * ioctl helper does not copy it for us.
+ */
+ return copy_to_user(xctrl->ptr, data, size) ? -EFAULT : 0;
+}
+
+static int uvc_mapping_get_xctrl_std(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which, struct v4l2_ext_control *xctrl)
+{
+ struct v4l2_query_ext_ctrl qec;
+ int ret;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, &qec);
+ if (ret < 0)
+ return ret;
+
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ xctrl->value = qec.default_value;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ xctrl->value = qec.minimum;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ xctrl->value = qec.maximum;
+ break;
+ }
+
+ return 0;
+}
+
+static int uvc_mapping_get_xctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ u32 which, struct v4l2_ext_control *xctrl)
+{
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return uvc_mapping_get_xctrl_compound(chain, ctrl, mapping,
+ which, xctrl);
+ return uvc_mapping_get_xctrl_std(chain, ctrl, mapping, which, xctrl);
+}
+
+int uvc_ctrl_get(struct uvc_video_chain *chain, u32 which,
+ struct v4l2_ext_control *xctrl)
{
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
@@ -1966,36 +2322,23 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
return -EACCES;
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL)
+ if (!ctrl)
return -EINVAL;
- return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value);
+ return uvc_mapping_get_xctrl(chain, ctrl, mapping, which, xctrl);
}
-int uvc_ctrl_set(struct uvc_fh *handle,
- struct v4l2_ext_control *xctrl)
+static int uvc_ctrl_clamp(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ s32 *value_in_out)
{
- struct uvc_video_chain *chain = handle->chain;
- struct uvc_control *ctrl;
- struct uvc_control_mapping *mapping;
- s32 value;
+ s32 value = *value_in_out;
u32 step;
s32 min;
s32 max;
int ret;
- lockdep_assert_held(&chain->ctrl_mutex);
-
- if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
- return -EACCES;
-
- ctrl = uvc_find_control(chain, xctrl->id, &mapping);
- if (ctrl == NULL)
- return -EINVAL;
- if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
- return -EACCES;
-
- /* Clamp out of range values. */
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_INTEGER:
if (!ctrl->cached) {
@@ -2004,23 +2347,22 @@ int uvc_ctrl_set(struct uvc_fh *handle,
return ret;
}
- min = mapping->get(mapping, UVC_GET_MIN,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
- max = mapping->get(mapping, UVC_GET_MAX,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
- step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+ min = uvc_mapping_get_s32(mapping, UVC_GET_MIN,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN));
+ max = uvc_mapping_get_s32(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+ step = uvc_mapping_get_s32(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
if (step == 0)
step = 1;
- xctrl->value = min + DIV_ROUND_CLOSEST((u32)(xctrl->value - min),
- step) * step;
+ value = min + DIV_ROUND_CLOSEST((u32)(value - min), step) * step;
if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED)
- xctrl->value = clamp(xctrl->value, min, max);
+ value = clamp(value, min, max);
else
- xctrl->value = clamp_t(u32, xctrl->value, min, max);
- value = xctrl->value;
- break;
+ value = clamp_t(u32, value, min, max);
+ *value_in_out = value;
+ return 0;
case V4L2_CTRL_TYPE_BITMASK:
if (!ctrl->cached) {
@@ -2029,47 +2371,102 @@ int uvc_ctrl_set(struct uvc_fh *handle,
return ret;
}
- xctrl->value &= uvc_get_ctrl_bitmap(ctrl, mapping);
- value = xctrl->value;
- break;
+ value &= uvc_get_ctrl_bitmap(ctrl, mapping);
+ *value_in_out = value;
+ return 0;
case V4L2_CTRL_TYPE_BOOLEAN:
- xctrl->value = clamp(xctrl->value, 0, 1);
- value = xctrl->value;
- break;
+ *value_in_out = clamp(value, 0, 1);
+ return 0;
case V4L2_CTRL_TYPE_MENU:
- if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
- xctrl->value > (fls(mapping->menu_mask) - 1))
+ if (value < (ffs(mapping->menu_mask) - 1) ||
+ value > (fls(mapping->menu_mask) - 1))
return -ERANGE;
- if (!test_bit(xctrl->value, &mapping->menu_mask))
+ if (!test_bit(value, &mapping->menu_mask))
return -EINVAL;
- value = uvc_mapping_get_menu_value(mapping, xctrl->value);
-
/*
* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
+ int val = uvc_mapping_get_menu_value(mapping, value);
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
- if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & value))
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & val))
return -EINVAL;
}
-
- break;
+ return 0;
default:
- value = xctrl->value;
- break;
+ return 0;
}
+ return 0;
+}
+
+static int uvc_mapping_set_xctrl_compound(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_ext_control *xctrl)
+{
+ u8 *data __free(kfree) = NULL;
+ size_t size = uvc_mapping_v4l2_size(mapping);
+
+ if (xctrl->size != size)
+ return -EINVAL;
+
+ /*
+ * v4l2_ext_control does not have enough room to fit a compound control.
+ * Instead, the value is in the user memory at xctrl->ptr. The v4l2
+ * ioctl helper does not copy it for us.
+ */
+ data = memdup_user(xctrl->ptr, size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return mapping->set(mapping, size, data,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+}
+
+static int uvc_mapping_set_xctrl(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping,
+ struct v4l2_ext_control *xctrl)
+{
+ if (uvc_ctrl_mapping_is_compound(mapping))
+ return uvc_mapping_set_xctrl_compound(ctrl, mapping, xctrl);
+
+ uvc_mapping_set_s32(mapping, xctrl->value,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ return 0;
+}
+
+int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl)
+{
+ struct uvc_video_chain *chain = handle->chain;
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+ int ret;
+
+ lockdep_assert_held(&chain->ctrl_mutex);
+
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
+ ctrl = uvc_find_control(chain, xctrl->id, &mapping);
+ if (!ctrl)
+ return -EINVAL;
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+ return -EACCES;
+
+ ret = uvc_ctrl_clamp(chain, ctrl, mapping, &xctrl->value);
+ if (ret)
+ return ret;
/*
* If the mapping doesn't span the whole UVC control, the current value
* needs to be loaded from the device to perform the read-modify-write
@@ -2088,8 +2485,9 @@ int uvc_ctrl_set(struct uvc_fh *handle,
ctrl->info.size);
}
- mapping->set(mapping, value,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
+ ret = uvc_mapping_set_xctrl(ctrl, mapping, xctrl);
+ if (ret)
+ return ret;
ctrl->dirty = 1;
ctrl->modified = 1;
@@ -2464,6 +2862,7 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
struct uvc_control_mapping *map;
unsigned int size;
unsigned int i;
+ int ret;
/*
* Most mappings come from static kernel data, and need to be duplicated.
@@ -2504,6 +2903,12 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto err_nomem;
}
+ if (uvc_ctrl_mapping_is_compound(map))
+ if (WARN_ON(!map->set || !map->get)) {
+ ret = -EIO;
+ goto free_mem;
+ }
+
if (map->get == NULL)
map->get = uvc_get_le_value;
if (map->set == NULL)
@@ -2525,11 +2930,13 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
return 0;
err_nomem:
+ ret = -ENOMEM;
+free_mem:
kfree(map->menu_names);
kfree(map->menu_mapping);
kfree(map->name);
kfree(map);
- return -ENOMEM;
+ return ret;
}
int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index deadbcea5e22..107e0fafd80f 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -37,6 +37,8 @@ static unsigned int uvc_quirks_param = -1;
unsigned int uvc_dbg_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
+static struct usb_driver uvc_driver;
+
/* ------------------------------------------------------------------------
* Utility functions
*/
@@ -546,7 +548,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return -EINVAL;
}
- if (usb_driver_claim_interface(&uvc_driver.driver, intf, dev)) {
+ if (usb_driver_claim_interface(&uvc_driver, intf, dev)) {
uvc_dbg(dev, DESCR,
"device %d interface %d is already claimed\n",
dev->udev->devnum,
@@ -556,7 +558,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming = uvc_stream_new(dev, intf);
if (streaming == NULL) {
- usb_driver_release_interface(&uvc_driver.driver, intf);
+ usb_driver_release_interface(&uvc_driver, intf);
return -ENOMEM;
}
@@ -779,7 +781,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return 0;
error:
- usb_driver_release_interface(&uvc_driver.driver, intf);
+ usb_driver_release_interface(&uvc_driver, intf);
uvc_stream_delete(streaming);
return ret;
}
@@ -1922,8 +1924,7 @@ static void uvc_delete(struct kref *kref)
struct uvc_streaming *streaming;
streaming = list_entry(p, struct uvc_streaming, list);
- usb_driver_release_interface(&uvc_driver.driver,
- streaming->intf);
+ usb_driver_release_interface(&uvc_driver, streaming->intf);
uvc_stream_delete(streaming);
}
@@ -3062,6 +3063,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_IGNORE_SELECTOR_UNIT) },
+ /* Actions Microelectronics Co. Display capture-UVC05 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1de1,
+ .idProduct = 0xf105,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_DISABLE_AUTOSUSPEND) },
/* NXP Semiconductors IR VIDEO */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -3196,17 +3206,15 @@ static const struct usb_device_id uvc_ids[] = {
MODULE_DEVICE_TABLE(usb, uvc_ids);
-struct uvc_driver uvc_driver = {
- .driver = {
- .name = "uvcvideo",
- .probe = uvc_probe,
- .disconnect = uvc_disconnect,
- .suspend = uvc_suspend,
- .resume = uvc_resume,
- .reset_resume = uvc_reset_resume,
- .id_table = uvc_ids,
- .supports_autosuspend = 1,
- },
+static struct usb_driver uvc_driver = {
+ .name = "uvcvideo",
+ .probe = uvc_probe,
+ .disconnect = uvc_disconnect,
+ .suspend = uvc_suspend,
+ .resume = uvc_resume,
+ .reset_resume = uvc_reset_resume,
+ .id_table = uvc_ids,
+ .supports_autosuspend = 1,
};
static int __init uvc_init(void)
@@ -3215,7 +3223,7 @@ static int __init uvc_init(void)
uvc_debugfs_init();
- ret = usb_register(&uvc_driver.driver);
+ ret = usb_register(&uvc_driver);
if (ret < 0) {
uvc_debugfs_cleanup();
return ret;
@@ -3226,7 +3234,7 @@ static int __init uvc_init(void)
static void __exit uvc_cleanup(void)
{
- usb_deregister(&uvc_driver.driver);
+ usb_deregister(&uvc_driver);
uvc_debugfs_cleanup();
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 93c6cdb23881..39065db44e86 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -108,6 +108,12 @@ static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
struct uvc_control_mapping *map;
int ret;
+ if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) {
+ uvc_dbg(chain->dev, CONTROL,
+ "Unsupported UVC data type %u\n", xmap->data_type);
+ return -EINVAL;
+ }
+
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL)
return -ENOMEM;
@@ -963,42 +969,13 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
return ret;
}
-static int uvc_ioctl_queryctrl(struct file *file, void *fh,
- struct v4l2_queryctrl *qc)
-{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
-
- return uvc_query_v4l2_ctrl(chain, qc);
-}
-
static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
struct v4l2_query_ext_ctrl *qec)
{
struct uvc_fh *handle = fh;
struct uvc_video_chain *chain = handle->chain;
- struct v4l2_queryctrl qc = { qec->id };
- int ret;
-
- ret = uvc_query_v4l2_ctrl(chain, &qc);
- if (ret)
- return ret;
-
- qec->id = qc.id;
- qec->type = qc.type;
- strscpy(qec->name, qc.name, sizeof(qec->name));
- qec->minimum = qc.minimum;
- qec->maximum = qc.maximum;
- qec->step = qc.step;
- qec->default_value = qc.default_value;
- qec->flags = qc.flags;
- qec->elem_size = 4;
- qec->elems = 1;
- qec->nr_of_dims = 0;
- memset(qec->dims, 0, sizeof(qec->dims));
- memset(qec->reserved, 0, sizeof(qec->reserved));
- return 0;
+ return uvc_query_v4l2_ctrl(chain, qec);
}
static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
@@ -1027,34 +1004,33 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
struct uvc_video_chain *chain = handle->chain;
struct v4l2_ext_control *ctrl = ctrls->controls;
unsigned int i;
+ u32 which;
int ret;
+ if (!ctrls->count)
+ return 0;
+
+ switch (ctrls->which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ which = ctrls->which;
+ break;
+ default:
+ which = V4L2_CTRL_WHICH_CUR_VAL;
+ }
+
ret = uvc_ctrl_check_access(chain, ctrls, VIDIOC_G_EXT_CTRLS);
if (ret < 0)
return ret;
- if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
- for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- struct v4l2_queryctrl qc = { .id = ctrl->id };
-
- ret = uvc_query_v4l2_ctrl(chain, &qc);
- if (ret < 0) {
- ctrls->error_idx = i;
- return ret;
- }
-
- ctrl->value = qc.default_value;
- }
-
- return 0;
- }
-
ret = uvc_ctrl_begin(chain);
if (ret < 0)
return ret;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_get(chain, ctrl);
+ ret = uvc_ctrl_get(chain, which, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
ctrls->error_idx = i;
@@ -1076,6 +1052,9 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
unsigned int i;
int ret;
+ if (!ctrls->count)
+ return 0;
+
ret = uvc_ctrl_check_access(chain, ctrls, ioctl);
if (ret < 0)
return ret;
@@ -1491,7 +1470,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_enum_input = uvc_ioctl_enum_input,
.vidioc_g_input = uvc_ioctl_g_input,
.vidioc_s_input = uvc_ioctl_s_input,
- .vidioc_queryctrl = uvc_ioctl_queryctrl,
.vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 5e388f05f3fc..b4ee701835fc 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -116,7 +116,12 @@ struct uvc_control_mapping {
u8 entity[16];
u8 selector;
+ /*
+ * Size of the control data in the payload of the UVC control GET and
+ * SET requests, expressed in bits.
+ */
u8 size;
+
u8 offset;
enum v4l2_ctrl_type v4l2_type;
u32 data_type;
@@ -132,10 +137,10 @@ struct uvc_control_mapping {
const struct uvc_control_mapping *(*filter_mapping)
(struct uvc_video_chain *chain,
struct uvc_control *ctrl);
- s32 (*get)(struct uvc_control_mapping *mapping, u8 query,
- const u8 *data);
- void (*set)(struct uvc_control_mapping *mapping, s32 value,
- u8 *data);
+ int (*get)(struct uvc_control_mapping *mapping, u8 query,
+ const void *uvc_in, size_t v4l2_size, void *v4l2_out);
+ int (*set)(struct uvc_control_mapping *mapping, size_t v4l2_size,
+ const void *v4l2_in, void *uvc_out);
};
struct uvc_control {
@@ -538,6 +543,13 @@ struct uvc_device_info {
u16 uvc_version;
};
+struct uvc_rect {
+ u16 top;
+ u16 left;
+ u16 bottom;
+ u16 right;
+} __packed;
+
struct uvc_status_streaming {
u8 button;
} __packed;
@@ -620,10 +632,6 @@ struct uvc_fh {
unsigned int pending_async_ctrls;
};
-struct uvc_driver {
- struct usb_driver driver;
-};
-
/* ------------------------------------------------------------------------
* Debugging, printing and logging
*/
@@ -674,9 +682,6 @@ do { \
* Internal functions.
*/
-/* Core driver */
-extern struct uvc_driver uvc_driver;
-
struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
/* Video buffers queue management. */
@@ -766,7 +771,7 @@ void uvc_status_put(struct uvc_device *dev);
extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
- struct v4l2_queryctrl *v4l2_ctrl);
+ struct v4l2_query_ext_ctrl *v4l2_ctrl);
int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
struct v4l2_querymenu *query_menu);
@@ -793,7 +798,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
return __uvc_ctrl_commit(handle, 1, NULL);
}
-int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_get(struct uvc_video_chain *chain, u32 which,
+ struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
const struct v4l2_ext_controls *ctrls,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 0a2f4f0d0a07..e4b2de3833ee 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -466,8 +466,8 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
- unsigned int div)
+s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
+ unsigned int mul, unsigned int div)
{
struct v4l2_ctrl *ctrl;
s64 freq;
@@ -502,7 +502,33 @@ s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_ctrl);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
+ unsigned int div)
+{
+ struct v4l2_mbus_config mbus_config = {};
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, pad, get_mbus_config, pad->index,
+ &mbus_config);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ if (mbus_config.link_freq)
+ return mbus_config.link_freq;
+
+ /*
+ * Fall back to using the link frequency control if the media bus config
+ * doesn't provide a link frequency.
+ */
+ return __v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
+}
+EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_pad);
+#endif /* CONFIG_MEDIA_CONTROLLER */
/*
* Simplify a fraction using a simple continued fraction decomposition. The
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 95a2202879d8..d49a68b36c28 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -94,6 +94,22 @@ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
return ptr_to_user(c, ctrl, ctrl->p_new);
}
+/* Helper function: copy the minimum control value back to the caller */
+static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
+/* Helper function: copy the maximum control value back to the caller */
+static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
+{
+ ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new);
+
+ return ptr_to_user(c, ctrl, ctrl->p_new);
+}
+
/* Helper function: copy the caller-provider value as the new control value */
static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
@@ -229,8 +245,8 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
cs->error_idx = i;
if (cs->which &&
- cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
- cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
+ (cs->which < V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which > V4L2_CTRL_WHICH_MAX_VAL) &&
V4L2_CTRL_ID2WHICH(id) != cs->which) {
dprintk(vdev,
"invalid which 0x%x or control id 0x%x\n",
@@ -259,6 +275,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
return -EINVAL;
}
+ if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) &&
+ (cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL)) {
+ dprintk(vdev,
+ "invalid which 0x%x or control id 0x%x\n",
+ cs->which, id);
+ return -EINVAL;
+ }
+
if (ctrl->cluster[0]->ncontrols > 1)
have_clusters = true;
if (ctrl->cluster[0] != ctrl)
@@ -368,8 +393,8 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
*/
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
- which == V4L2_CTRL_WHICH_REQUEST_VAL)
+ if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL &&
+ which <= V4L2_CTRL_WHICH_MAX_VAL))
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
@@ -389,10 +414,12 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_helper *helpers = helper;
int ret;
int i, j;
- bool is_default, is_request;
+ bool is_default, is_request, is_min, is_max;
is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
+ is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL);
+ is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL);
cs->error_idx = cs->count;
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
@@ -432,13 +459,14 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
/*
* g_volatile_ctrl will update the new control values.
- * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
+ * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL,
+ * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and
* V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
* it is v4l2_ctrl_request_complete() that copies the
* volatile controls at the time of request completion
* to the request, so you don't want to do that again.
*/
- if (!is_default && !is_request &&
+ if (!is_default && !is_request && !is_min && !is_max &&
((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
(master->has_volatiles && !is_cur_manual(master)))) {
for (j = 0; j < master->ncontrols; j++)
@@ -467,6 +495,10 @@ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
ret = -ENOMEM;
else if (is_request && ref->p_req_valid)
ret = req_to_user(cs->controls + idx, ref);
+ else if (is_min)
+ ret = min_to_user(cs->controls + idx, ref->ctrl);
+ else if (is_max)
+ ret = max_to_user(cs->controls + idx, ref->ctrl);
else if (is_volatile)
ret = new_to_user(cs->controls + idx, ref->ctrl);
else
@@ -564,9 +596,11 @@ int try_set_ext_ctrls_common(struct v4l2_fh *fh,
cs->error_idx = cs->count;
- /* Default value cannot be changed */
- if (cs->which == V4L2_CTRL_WHICH_DEF_VAL) {
- dprintk(vdev, "%s: cannot change default value\n",
+ /* Default/minimum/maximum values cannot be changed */
+ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MIN_VAL ||
+ cs->which == V4L2_CTRL_WHICH_MAX_VAL) {
+ dprintk(vdev, "%s: cannot change default/min/max value\n",
video_device_node_name(vdev));
return -EINVAL;
}
@@ -1123,39 +1157,48 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
}
EXPORT_SYMBOL(v4l2_query_ext_ctrl);
-/* Implement VIDIOC_QUERYCTRL */
-int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to,
+ const struct v4l2_query_ext_ctrl *from)
{
- struct v4l2_query_ext_ctrl qec = { qc->id };
- int rc;
-
- rc = v4l2_query_ext_ctrl(hdl, &qec);
- if (rc)
- return rc;
+ to->id = from->id;
+ to->type = from->type;
+ to->flags = from->flags;
+ strscpy(to->name, from->name, sizeof(to->name));
- qc->id = qec.id;
- qc->type = qec.type;
- qc->flags = qec.flags;
- strscpy(qc->name, qec.name, sizeof(qc->name));
- switch (qc->type) {
+ switch (from->type) {
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_INTEGER_MENU:
case V4L2_CTRL_TYPE_STRING:
case V4L2_CTRL_TYPE_BITMASK:
- qc->minimum = qec.minimum;
- qc->maximum = qec.maximum;
- qc->step = qec.step;
- qc->default_value = qec.default_value;
+ to->minimum = from->minimum;
+ to->maximum = from->maximum;
+ to->step = from->step;
+ to->default_value = from->default_value;
break;
default:
- qc->minimum = 0;
- qc->maximum = 0;
- qc->step = 0;
- qc->default_value = 0;
+ to->minimum = 0;
+ to->maximum = 0;
+ to->step = 0;
+ to->default_value = 0;
break;
}
+}
+EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl);
+
+/* Implement VIDIOC_QUERYCTRL */
+int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc)
+{
+ struct v4l2_query_ext_ctrl qec = { qc->id };
+ int rc;
+
+ rc = v4l2_query_ext_ctrl(hdl, &qec);
+ if (rc)
+ return rc;
+
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec);
+
return 0;
}
EXPORT_SYMBOL(v4l2_queryctrl);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index eeab6a5eb7ba..90d25329661e 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -182,29 +182,66 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
}
}
-void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
- union v4l2_ctrl_ptr ptr)
+static void std_min_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_min.p_const)
+ memcpy(p, ctrl->p_min.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void std_max_compound(const struct v4l2_ctrl *ctrl, u32 idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ void *p = ptr.p + idx * ctrl->elem_size;
+
+ if (ctrl->p_max.p_const)
+ memcpy(p, ctrl->p_max.p_const, ctrl->elem_size);
+ else
+ memset(p, 0, ctrl->elem_size);
+}
+
+static void __v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ u32 which, union v4l2_ctrl_ptr ptr)
{
unsigned int i;
u32 tot_elems = ctrl->elems;
u32 elems = tot_elems - from_idx;
+ s64 value;
- if (from_idx >= tot_elems)
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ value = ctrl->default_value;
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ value = ctrl->maximum;
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ value = ctrl->minimum;
+ break;
+ default:
return;
+ }
switch (ctrl->type) {
case V4L2_CTRL_TYPE_STRING:
+ if (which == V4L2_CTRL_WHICH_DEF_VAL)
+ value = ctrl->minimum;
+
for (i = from_idx; i < tot_elems; i++) {
unsigned int offset = i * ctrl->elem_size;
- memset(ptr.p_char + offset, ' ', ctrl->minimum);
- ptr.p_char[offset + ctrl->minimum] = '\0';
+ memset(ptr.p_char + offset, ' ', value);
+ ptr.p_char[offset + value] = '\0';
}
break;
case V4L2_CTRL_TYPE_INTEGER64:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_s64[i] = ctrl->default_value;
+ ptr.p_s64[i] = value;
} else {
memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64));
}
@@ -214,9 +251,9 @@ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
case V4L2_CTRL_TYPE_MENU:
case V4L2_CTRL_TYPE_BITMASK:
case V4L2_CTRL_TYPE_BOOLEAN:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_s32[i] = ctrl->default_value;
+ ptr.p_s32[i] = value;
} else {
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
}
@@ -226,32 +263,61 @@ void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32));
break;
case V4L2_CTRL_TYPE_U8:
- memset(ptr.p_u8 + from_idx, ctrl->default_value, elems);
+ memset(ptr.p_u8 + from_idx, value, elems);
break;
case V4L2_CTRL_TYPE_U16:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_u16[i] = ctrl->default_value;
+ ptr.p_u16[i] = value;
} else {
memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16));
}
break;
case V4L2_CTRL_TYPE_U32:
- if (ctrl->default_value) {
+ if (value) {
for (i = from_idx; i < tot_elems; i++)
- ptr.p_u32[i] = ctrl->default_value;
+ ptr.p_u32[i] = value;
} else {
memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32));
}
break;
default:
- for (i = from_idx; i < tot_elems; i++)
- std_init_compound(ctrl, i, ptr);
+ for (i = from_idx; i < tot_elems; i++) {
+ switch (which) {
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ std_init_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ std_max_compound(ctrl, i, ptr);
+ break;
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ std_min_compound(ctrl, i, ptr);
+ break;
+ }
+ }
break;
}
}
+
+void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx,
+ union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_DEF_VAL, ptr);
+}
EXPORT_SYMBOL(v4l2_ctrl_type_op_init);
+static void v4l2_ctrl_type_op_minimum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MIN_VAL, ptr);
+}
+
+static void v4l2_ctrl_type_op_maximum(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, union v4l2_ctrl_ptr ptr)
+{
+ __v4l2_ctrl_type_op_init(ctrl, from_idx, V4L2_CTRL_WHICH_MAX_VAL, ptr);
+}
+
void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
{
union v4l2_ctrl_ptr ptr = ctrl->p_cur;
@@ -370,7 +436,11 @@ void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl)
case V4L2_CTRL_TYPE_AV1_FILM_GRAIN:
pr_cont("AV1_FILM_GRAIN");
break;
-
+ case V4L2_CTRL_TYPE_RECT:
+ pr_cont("(%d,%d)/%ux%u",
+ ptr.p_rect->left, ptr.p_rect->top,
+ ptr.p_rect->width, ptr.p_rect->height);
+ break;
default:
pr_cont("unknown type %d", ctrl->type);
break;
@@ -815,6 +885,7 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering;
struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
struct v4l2_area *area;
+ struct v4l2_rect *rect;
void *p = ptr.p + idx * ctrl->elem_size;
unsigned int i;
@@ -1172,6 +1243,12 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
return -EINVAL;
break;
+ case V4L2_CTRL_TYPE_RECT:
+ rect = p;
+ if (!rect->width || !rect->height)
+ return -EINVAL;
+ break;
+
default:
return -EINVAL;
}
@@ -1285,6 +1362,8 @@ EXPORT_SYMBOL(v4l2_ctrl_type_op_validate);
static const struct v4l2_ctrl_type_ops std_type_ops = {
.equal = v4l2_ctrl_type_op_equal,
.init = v4l2_ctrl_type_op_init,
+ .minimum = v4l2_ctrl_type_op_minimum,
+ .maximum = v4l2_ctrl_type_op_maximum,
.log = v4l2_ctrl_type_op_log,
.validate = v4l2_ctrl_type_op_validate,
};
@@ -1757,7 +1836,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
- const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def,
+ const s64 *qmenu_int,
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max,
void *priv)
{
struct v4l2_ctrl *ctrl;
@@ -1872,12 +1954,21 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_AREA:
elem_size = sizeof(struct v4l2_area);
break;
+ case V4L2_CTRL_TYPE_RECT:
+ elem_size = sizeof(struct v4l2_rect);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
break;
}
+ if (type < V4L2_CTRL_COMPOUND_TYPES &&
+ type != V4L2_CTRL_TYPE_BUTTON &&
+ type != V4L2_CTRL_TYPE_CTRL_CLASS &&
+ type != V4L2_CTRL_TYPE_STRING)
+ flags |= V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX;
+
/* Sanity checks */
if (id == 0 || name == NULL || !elem_size ||
id >= V4L2_CID_PRIVATE_BASE ||
@@ -1886,6 +1977,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
handler_set_err(hdl, -ERANGE);
return NULL;
}
+
err = check_range(type, min, max, step, def);
if (err) {
handler_set_err(hdl, err);
@@ -1927,6 +2019,10 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const)
sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_min.p_const)
+ sz_extra += elem_size;
+ if (type >= V4L2_CTRL_COMPOUND_TYPES && p_max.p_const)
+ sz_extra += elem_size;
ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
@@ -1992,6 +2088,22 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
memcpy(ctrl->p_def.p, p_def.p_const, elem_size);
}
+ if (flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) {
+ void *ptr = ctrl->p_def.p;
+
+ if (p_min.p_const) {
+ ptr += elem_size;
+ ctrl->p_min.p = ptr;
+ memcpy(ctrl->p_min.p, p_min.p_const, elem_size);
+ }
+
+ if (p_max.p_const) {
+ ptr += elem_size;
+ ctrl->p_max.p = ptr;
+ memcpy(ctrl->p_max.p, p_max.p_const, elem_size);
+ }
+ }
+
ctrl->type_ops->init(ctrl, 0, ctrl->p_cur);
cur_to_new(ctrl);
@@ -2042,7 +2154,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
type, min, max,
is_menu ? cfg->menu_skip_mask : step, def,
cfg->dims, cfg->elem_size,
- flags, qmenu, qmenu_int, cfg->p_def, priv);
+ flags, qmenu, qmenu_int, cfg->p_def, cfg->p_min,
+ cfg->p_max, priv);
if (ctrl)
ctrl->is_private = cfg->is_private;
return ctrl;
@@ -2067,7 +2180,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, ptr_null, NULL);
+ flags, NULL, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std);
@@ -2100,7 +2214,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, qmenu_int, ptr_null, NULL);
+ flags, qmenu, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
@@ -2132,7 +2247,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, mask, def, NULL, 0,
- flags, qmenu, NULL, ptr_null, NULL);
+ flags, qmenu, NULL, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
@@ -2140,7 +2256,9 @@ EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items);
/* Helper function for standard compound controls */
struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops, u32 id,
- const union v4l2_ctrl_ptr p_def)
+ const union v4l2_ctrl_ptr p_def,
+ const union v4l2_ctrl_ptr p_min,
+ const union v4l2_ctrl_ptr p_max)
{
const char *name;
enum v4l2_ctrl_type type;
@@ -2154,7 +2272,7 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
min, max, step, def, NULL, 0,
- flags, NULL, NULL, p_def, NULL);
+ flags, NULL, NULL, p_def, p_min, p_max, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_compound);
@@ -2178,7 +2296,8 @@ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
}
return v4l2_ctrl_new(hdl, ops, NULL, id, name, type,
0, max, 0, def, NULL, 0,
- flags, NULL, qmenu_int, ptr_null, NULL);
+ flags, NULL, qmenu_int, ptr_null, ptr_null,
+ ptr_null, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 5bcaeeba4d09..b40c08ce909d 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -572,13 +572,13 @@ static void determine_valid_ioctls(struct video_device *vdev)
and that can't be tested here. If the bit for these control ioctls
is set, then the ioctl is valid. But if it is 0, then it can still
be valid if the filehandle passed the control handler. */
- if (vdev->ctrl_handler || ops->vidioc_queryctrl)
+ if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
__set_bit(_IOC_NR(VIDIOC_QUERYCTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_query_ext_ctrl)
__set_bit(_IOC_NR(VIDIOC_QUERY_EXT_CTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_g_ctrl || ops->vidioc_g_ext_ctrls)
+ if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_CTRL), valid_ioctls);
- if (vdev->ctrl_handler || ops->vidioc_s_ctrl || ops->vidioc_s_ext_ctrls)
+ if (vdev->ctrl_handler || ops->vidioc_s_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_S_CTRL), valid_ioctls);
if (vdev->ctrl_handler || ops->vidioc_g_ext_ctrls)
__set_bit(_IOC_NR(VIDIOC_G_EXT_CTRLS), valid_ioctls);
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index d26edf157e64..7710cb26bea0 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -764,7 +764,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
u64 num;
u32 den;
- num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) -
+ num = (((u64)image_width * GTF_D_C_PRIME * hfreq) -
((u64)image_width * GTF_D_M_PRIME * 1000));
den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
@@ -774,7 +774,7 @@ bool v4l2_detect_gtf(unsigned int frame_height,
u64 num;
u32 den;
- num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) -
+ num = (((u64)image_width * GTF_S_C_PRIME * hfreq) -
((u64)image_width * GTF_S_M_PRIME * 1000));
den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) *
(2 * GTF_CELL_GRAN);
@@ -1018,6 +1018,42 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
/**
+ * v4l2_num_edid_blocks() - return the number of EDID blocks
+ *
+ * @edid: pointer to the EDID data
+ * @max_blocks: maximum number of supported EDID blocks
+ *
+ * Return: the number of EDID blocks based on the contents of the EDID.
+ * This supports the HDMI Forum EDID Extension Override Data Block.
+ */
+unsigned int v4l2_num_edid_blocks(const u8 *edid, unsigned int max_blocks)
+{
+ unsigned int blocks;
+
+ if (!edid || !max_blocks)
+ return 0;
+
+ // The number of extension blocks is recorded at byte 126 of the
+ // first 128-byte block in the EDID.
+ //
+ // If there is an HDMI Forum EDID Extension Override Data Block
+ // present, then it is in bytes 4-6 of the first CTA-861 extension
+ // block of the EDID.
+ blocks = edid[126] + 1;
+ // Check for HDMI Forum EDID Extension Override Data Block
+ if (blocks >= 2 && // The EDID must be at least 2 blocks
+ max_blocks >= 3 && // The caller supports at least 3 blocks
+ edid[128] == 2 && // The first extension block is type CTA-861
+ edid[133] == 0x78 && // Identifier for the EEODB
+ (edid[132] & 0xe0) == 0xe0 && // Tag Code == 7
+ (edid[132] & 0x1f) >= 2 && // Length >= 2
+ edid[134] > 1) // Number of extension blocks is sane
+ blocks = edid[134] + 1;
+ return blocks > max_blocks ? max_blocks : blocks;
+}
+EXPORT_SYMBOL_GPL(v4l2_num_edid_blocks);
+
+/**
* v4l2_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 0304daa8471d..a16fb44c7246 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -310,8 +310,8 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
win = &p->fmt.win;
- pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
- win->w.width, win->w.height, win->w.left, win->w.top,
+ pr_cont(", (%d,%d)/%ux%u, field=%s, chromakey=0x%08x, global_alpha=0x%02x\n",
+ win->w.left, win->w.top, win->w.width, win->w.height,
prt_names(win->field, v4l2_field_names),
win->chromakey, win->global_alpha);
break;
@@ -589,12 +589,12 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
{
const struct v4l2_cropcap *p = arg;
- pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
+ pr_cont("type=%s, bounds (%d,%d)/%ux%u, defrect (%d,%d)/%ux%u, pixelaspect %d/%d\n",
prt_names(p->type, v4l2_type_names),
- p->bounds.width, p->bounds.height,
p->bounds.left, p->bounds.top,
- p->defrect.width, p->defrect.height,
+ p->bounds.width, p->bounds.height,
p->defrect.left, p->defrect.top,
+ p->defrect.width, p->defrect.height,
p->pixelaspect.numerator, p->pixelaspect.denominator);
}
@@ -602,20 +602,20 @@ static void v4l_print_crop(const void *arg, bool write_only)
{
const struct v4l2_crop *p = arg;
- pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, crop=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
- p->c.width, p->c.height,
- p->c.left, p->c.top);
+ p->c.left, p->c.top,
+ p->c.width, p->c.height);
}
static void v4l_print_selection(const void *arg, bool write_only)
{
const struct v4l2_selection *p = arg;
- pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ pr_cont("type=%s, target=%d, flags=0x%x, rect=(%d,%d)/%ux%u\n",
prt_names(p->type, v4l2_type_names),
p->target, p->flags,
- p->r.width, p->r.height, p->r.left, p->r.top);
+ p->r.left, p->r.top, p->r.width, p->r.height);
}
static void v4l_print_jpegcompression(const void *arg, bool write_only)
@@ -893,7 +893,9 @@ static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
return false;
break;
case V4L2_CTRL_WHICH_DEF_VAL:
- /* Default value cannot be changed */
+ case V4L2_CTRL_WHICH_MIN_VAL:
+ case V4L2_CTRL_WHICH_MAX_VAL:
+ /* Default, minimum or maximum value cannot be changed */
if (ioctl == VIDIOC_S_EXT_CTRLS ||
ioctl == VIDIOC_TRY_EXT_CTRLS) {
c->error_idx = c->count;
@@ -2284,17 +2286,26 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct v4l2_query_ext_ctrl qec = {};
struct v4l2_queryctrl *p = arg;
struct v4l2_fh *vfh =
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_queryctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_queryctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_queryctrl)
- return ops->vidioc_queryctrl(file, fh, p);
- return -ENOTTY;
+ if (!ops->vidioc_query_ext_ctrl)
+ return -ENOTTY;
+
+ /* Simulate query_ext_ctr using query_ctrl. */
+ qec.id = p->id;
+ ret = ops->vidioc_query_ext_ctrl(file, fh, &qec);
+ if (ret)
+ return ret;
+ v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec);
+ return ret;
}
static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
@@ -2345,8 +2356,6 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return v4l2_g_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_g_ctrl(vfd->ctrl_handler, p);
- if (ops->vidioc_g_ctrl)
- return ops->vidioc_g_ctrl(file, fh, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
@@ -2380,8 +2389,6 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
- if (ops->vidioc_s_ctrl)
- return ops->vidioc_s_ctrl(file, fh, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index cde1774c9098..a3074f469b15 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -444,6 +444,8 @@ static int call_enum_dv_timings(struct v4l2_subdev *sd,
static int call_get_mbus_config(struct v4l2_subdev *sd, unsigned int pad,
struct v4l2_mbus_config *config)
{
+ memset(config, 0, sizeof(*config));
+
return check_pad(sd, pad) ? :
sd->ops->pad->get_mbus_config(sd, pad, config);
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 5710348f72f6..a8f5467d6b31 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -332,6 +332,38 @@ static const u8 mtk_smi_larb_mt8188_ostd[][SMI_LARB_PORT_NR_MAX] = {
[25] = {0x01},
};
+static const u8 mtk_smi_larb_mt8192_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x2, 0x2, 0x28, 0xa, 0xc, 0x28,},
+ [1] = {0x2, 0x2, 0x18, 0x18, 0x18, 0xa, 0xc, 0x28,},
+ [2] = {0x5, 0x5, 0x5, 0x5, 0x1,},
+ [3] = {},
+ [4] = {0x28, 0x19, 0xb, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x1,},
+ [5] = {0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x16,},
+ [6] = {},
+ [7] = {0x1, 0x3, 0x2, 0x1, 0x1, 0x5, 0x2, 0x12, 0x13, 0x4, 0x4, 0x1,
+ 0x4, 0x2, 0x1,},
+ [8] = {},
+ [9] = {0xa, 0x7, 0xf, 0x8, 0x1, 0x8, 0x9, 0x3, 0x3, 0x6, 0x7, 0x4,
+ 0xa, 0x3, 0x4, 0xe, 0x1, 0x7, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0x1, 0x1,},
+ [10] = {},
+ [11] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1,
+ 0x1, 0x1, 0x1, 0xe, 0x1, 0x7, 0x8, 0x7, 0x7, 0x1, 0x6, 0x2,
+ 0xf, 0x8, 0x1, 0x1, 0x1,},
+ [12] = {},
+ [13] = {0x2, 0xc, 0xc, 0xe, 0x6, 0x6, 0x6, 0x6, 0x6, 0x12, 0x6, 0x28,
+ 0x2, 0xc, 0xc, 0x28, 0x12, 0x6,},
+ [14] = {},
+ [15] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [16] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [17] = {0x28, 0x14, 0x2, 0xc, 0x18, 0x4, 0x28, 0x14, 0x4, 0x4, 0x4, 0x2,
+ 0x4, 0x2, 0x8, 0x4, 0x4,},
+ [18] = {0x2, 0x2, 0x4, 0x2,},
+ [19] = {0x9, 0x9, 0x5, 0x5, 0x1, 0x1,},
+};
+
static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
[0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
[1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
@@ -427,6 +459,7 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8188 = {
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
+ .ostd = mtk_smi_larb_mt8192_ostd,
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index e2a75a52563f..53f1888cc84f 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2226,26 +2226,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
goto err;
}
- if (of_node_name_eq(child, "nand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible NAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
- if (of_node_name_eq(child, "onenand")) {
- /* Warn about older DT blobs with no compatible property */
- if (!of_property_read_bool(child, "compatible")) {
- dev_warn(&pdev->dev,
- "Incompatible OneNAND node: missing compatible");
- ret = -EINVAL;
- goto err;
- }
- }
-
if (of_match_node(omap_nand_ids, child)) {
/* NAND specific setup */
val = 8;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 9b7d30a21a5b..44ac55feacd3 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -1191,10 +1191,8 @@ static int tegra_emc_probe(struct platform_device *pdev)
int irq, err;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "please update your device tree\n");
+ if (irq < 0)
return irq;
- }
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
if (!emc)
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 5b617c1f6789..f4398383ae06 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1904,7 +1904,7 @@ static void msb_io_work(struct work_struct *work)
/* process the request */
dbg_verbose("IO: processing new request");
- blk_rq_map_sg(msb->queue, req, sg);
+ blk_rq_map_sg(req, sg);
lba = blk_rq_pos(req);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 634d343b6bdb..c9853d887d28 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -627,9 +627,7 @@ static int mspro_block_issue_req(struct memstick_dev *card)
while (true) {
msb->current_page = 0;
msb->current_seg = 0;
- msb->seg_count = blk_rq_map_sg(msb->block_req->q,
- msb->block_req,
- msb->req_sg);
+ msb->seg_count = blk_rq_map_sg(msb->block_req, msb->req_sg);
if (!msb->seg_count) {
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 6eb892fd4d34..3878136227e4 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
host->eject = true;
cancel_work_sync(&host->handle_req);
+ cancel_delayed_work_sync(&host->poll_card);
mutex_lock(&host->host_mutex);
if (host->req) {
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7e79da9684ed..185c08eab4ca 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2834,10 +2834,10 @@ struct rep_manu_reply{
u8 sas_format:1;
u8 reserved1:7;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a9604ba3c805..3a64dc7a7e27 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1843,65 +1843,6 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
return FAILED;
}
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- * mptscsih_target_reset - Perform a SCSI TARGET_RESET!
- * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
- *
- * (linux scsi_host_template.eh_target_reset_handler routine)
- *
- * Returns SUCCESS or FAILED.
- **/
-int
-mptscsih_target_reset(struct scsi_cmnd * SCpnt)
-{
- MPT_SCSI_HOST *hd;
- int retval;
- VirtDevice *vdevice;
- MPT_ADAPTER *ioc;
-
- /* If we can't locate our host adapter structure, return FAILED status.
- */
- if ((hd = shost_priv(SCpnt->device->host)) == NULL){
- printk(KERN_ERR MYNAM ": target reset: "
- "Can't locate host! (sc=%p)\n", SCpnt);
- return FAILED;
- }
-
- ioc = hd->ioc;
- printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
- ioc->name, SCpnt);
- scsi_print_command(SCpnt);
-
- vdevice = SCpnt->device->hostdata;
- if (!vdevice || !vdevice->vtarget) {
- retval = 0;
- goto out;
- }
-
- /* Target reset to hidden raid component is not supported
- */
- if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
- retval = FAILED;
- goto out;
- }
-
- retval = mptscsih_IssueTaskMgmt(hd,
- MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
- vdevice->vtarget->channel,
- vdevice->vtarget->id, 0, 0,
- mptscsih_get_tm_timeout(ioc));
-
- out:
- printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
- ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
-
- if (retval == 0)
- return SUCCESS;
- else
- return FAILED;
-}
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
@@ -2915,14 +2856,14 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
timeout = 10;
break;
- case RESERVE:
+ case RESERVE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
timeout = 10;
break;
- case RELEASE:
+ case RELEASE_6:
cmdLen = 6;
dir = MPI_SCSIIO_CONTROL_READ;
CDB[0] = cmd;
@@ -3306,7 +3247,6 @@ EXPORT_SYMBOL(mptscsih_sdev_destroy);
EXPORT_SYMBOL(mptscsih_sdev_configure);
EXPORT_SYMBOL(mptscsih_abort);
EXPORT_SYMBOL(mptscsih_dev_reset);
-EXPORT_SYMBOL(mptscsih_target_reset);
EXPORT_SYMBOL(mptscsih_bus_reset);
EXPORT_SYMBOL(mptscsih_host_reset);
EXPORT_SYMBOL(mptscsih_bios_param);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index ece451c575e1..8c2bb2331fc1 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -121,7 +121,6 @@ extern int mptscsih_sdev_configure(struct scsi_device *device,
struct queue_limits *lim);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
-extern int mptscsih_target_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 3ba05ebb9035..63d6694f7145 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -480,7 +480,7 @@ EXPORT_SYMBOL_NS_GPL(intel_lpss_remove, "INTEL_LPSS");
static int resume_lpss_device(struct device *dev, void *data)
{
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ if (!dev_pm_smart_suspend(dev))
pm_runtime_resume(dev);
return 0;
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index e0174da5e9fc..77b0490a1b38 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
- u8 interrupt_val = 0;
u16 *buf;
if (!status)
@@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
- rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
- /* Cross check presence with interrupts */
- if (*status & XD_CD)
- if (!(interrupt_val & XD_INT))
- *status &= ~XD_CD;
-
- if (*status & SD_CD)
- if (!(interrupt_val & SD_INT))
- *status &= ~SD_CD;
-
- if (*status & MS_CD)
- if (!(interrupt_val & MS_INT))
- *status &= ~MS_CD;
-
/* usb_control_msg may return positive when success */
if (ret < 0)
return ret;
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
index 88888485e6f8..ee58f7ce5bfa 100644
--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = {
};
static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
- .dev_id = "spi_gpio",
+ .dev_id = "spi_gpio.1",
.table = {
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
"sck", GPIO_ACTIVE_HIGH),
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 67d9391f1855..7575fee96cc6 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -3,7 +3,7 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
depends on X86 && PCI
- default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC
+ default X86_64 || MATOM
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c3a6657dcd4a..a5f88ec97df7 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,8 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 6589635f8ba3..d6ff9d82ae94 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 35d349fee769..7be1649b1972 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi)
if (ret)
return ret;
- tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
if (IS_ERR(tp->wakeuphost))
return PTR_ERR(tp->wakeuphost);
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 055395cde42b..999026a1ae04 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -873,6 +873,7 @@ static int setup_wait(struct ntsync_device *dev,
{
int fds[NTSYNC_MAX_WAIT_COUNT + 1];
const __u32 count = args->count;
+ size_t size = array_size(count, sizeof(fds[0]));
struct ntsync_q *q;
__u32 total_count;
__u32 i, j;
@@ -880,15 +881,14 @@ static int setup_wait(struct ntsync_device *dev,
if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
return -EINVAL;
- if (args->count > NTSYNC_MAX_WAIT_COUNT)
+ if (size >= sizeof(fds))
return -EINVAL;
total_count = count;
if (args->alert)
total_count++;
- if (copy_from_user(fds, u64_to_user_ptr(args->objs),
- array_size(count, sizeof(*fds))))
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
return -EFAULT;
if (args->alert)
fds[count] = args->alert;
@@ -1208,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = NTSYNC_NAME,
.fops = &ntsync_fops,
+ .mode = 0666,
};
module_misc_device(ntsync_misc);
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index f0b1fc87490e..26166357b255 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -111,8 +111,7 @@ static int start_stall_detector_cpu(unsigned int cpu)
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
- hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
+ hrtimer_setup(vcpu_hrtimer, vcpu_stall_detect_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu_stall_detector->is_initialized = true;
hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5241528f8b90..ce08e0ea7fc1 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -335,7 +335,7 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
- if (mrq->cmd && mrq->cmd->has_ext_addr)
+ if (mrq->cmd->has_ext_addr)
mmc_send_ext_addr(host, mrq->cmd->ext_addr);
init_completion(&mrq->cmd_completion);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 6a23be214543..1522fd2b517d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/sysfs.h>
@@ -66,7 +67,7 @@ static int mmc_decode_cid(struct mmc_card *card)
/*
* The selection of the format here is based upon published
- * specs from sandisk and from what people have reported.
+ * specs from SanDisk and from what people have reported.
*/
switch (card->csd.mmca_vsn) {
case 0: /* MMC v1.0 - v1.2 */
@@ -109,6 +110,9 @@ static int mmc_decode_cid(struct mmc_card *card)
return -EINVAL;
}
+ /* some product names include trailing whitespace */
+ strim(card->cid.prod_name);
+
return 0;
}
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 37cd858df0f4..4b47e6c3b04b 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -54,8 +54,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
else
bitmap_zero(values, nvalues);
- gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc,
- reset_gpios->info, values);
+ gpiod_multi_set_value_cansleep(reset_gpios, values);
bitmap_free(values);
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index ab662f502fe7..3ba62f825b84 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -523,5 +523,5 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
+ return blk_rq_map_sg(req, mqrq->sg);
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cc757b850e79..8eba697d3d86 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -11,6 +11,7 @@
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/string.h>
#include <linux/pm_runtime.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
@@ -95,6 +96,9 @@ void mmc_decode_cid(struct mmc_card *card)
card->cid.month = unstuff_bits(resp, 8, 4);
card->cid.year += 2000; /* SD cards year offset */
+
+ /* some product names may include trailing whitespace */
+ strim(card->cid.prod_name);
}
/*
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index 6b7471dba3bf..7423a601e1e5 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -471,7 +471,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
if (tty && C_CRTSCTS(tty)) {
- int cts = (status & UART_MSR_CTS);
+ bool cts = status & UART_MSR_CTS;
if (tty->hw_stopped) {
if (cts) {
tty->hw_stopped = false;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 12247219e1c2..5fd455816393 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -159,18 +159,6 @@ int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on)
}
EXPORT_SYMBOL(mmc_gpio_set_cd_wake);
-/* Register an alternate interrupt service routine for
- * the card-detect GPIO.
- */
-void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr)
-{
- struct mmc_gpio *ctx = host->slot.handler_priv;
-
- WARN_ON(ctx->cd_gpio_isr);
- ctx->cd_gpio_isr = isr;
-}
-EXPORT_SYMBOL(mmc_gpio_set_cd_isr);
-
/**
* mmc_gpiod_request_cd - request a gpio descriptor for card-detection
* @host: mmc host
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index fc360902729d..24fffc702a94 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
ret = atmci_configure_dma(host);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ clk_disable_unprepare(host->mck);
goto err_dma_probe_defer;
+ }
if (ret == 0) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index cb8044093402..5a467098a0d6 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -84,11 +84,11 @@ static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + CQHCI_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
cqhci_crypto_program_key(cq_host, &cfg, slot);
@@ -204,6 +204,8 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
profile->max_dun_bytes_supported = 4;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+
/*
* Cache all the crypto capabilities and advertise the supported crypto
* modes and data unit sizes to the block layer.
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 53d32d0f2709..e3548408ca39 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -27,6 +27,8 @@ enum dw_mci_exynos_type {
DW_MCI_TYPE_EXYNOS5420_SMU,
DW_MCI_TYPE_EXYNOS7,
DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_EXYNOS7870,
+ DW_MCI_TYPE_EXYNOS7870_SMU,
DW_MCI_TYPE_ARTPEC8,
};
@@ -70,6 +72,12 @@ static struct dw_mci_exynos_compatible {
.compatible = "samsung,exynos7-dw-mshc-smu",
.ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
}, {
+ .compatible = "samsung,exynos7870-dw-mshc",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870,
+ }, {
+ .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU,
+ }, {
.compatible = "axis,artpec8-dw-mshc",
.ctrl_type = DW_MCI_TYPE_ARTPEC8,
},
@@ -85,6 +93,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
return EXYNOS4210_FIXED_CIU_CLK_DIV;
else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
else
@@ -100,7 +110,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host)
* set for non-ecryption mode at this time.
*/
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
mci_writel(host, MPSBEGIN0, 0);
mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
@@ -126,6 +137,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
+ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) {
+ /* Quirk needed for certain Exynos SoCs */
+ host->quirks |= DW_MMC_QUIRK_FIFO64_32;
+ }
+
if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) {
/* Quirk needed for the ARTPEC-8 SoC */
host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT;
@@ -143,6 +160,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -152,6 +171,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -222,6 +243,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -230,6 +253,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -409,6 +434,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
else
@@ -422,6 +449,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -429,6 +458,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -443,6 +474,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
clksel = mci_readl(host, CLKSEL64);
else
@@ -453,6 +486,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 ||
+ priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU ||
priv->ctrl_type == DW_MCI_TYPE_ARTPEC8)
mci_writel(host, CLKSEL64, clksel);
else
@@ -632,6 +667,10 @@ static const struct of_device_id dw_mci_exynos_match[] = {
.data = &exynos_drv_data, },
{ .compatible = "samsung,exynos7-dw-mshc-smu",
.data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc",
+ .data = &exynos_drv_data, },
+ { .compatible = "samsung,exynos7870-dw-mshc-smu",
+ .data = &exynos_drv_data, },
{ .compatible = "axis,artpec8-dw-mshc",
.data = &artpec_drv_data, },
{},
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3cbda98d08d2..bb596d169420 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1875,8 +1875,7 @@ static void dw_mci_init_fault(struct dw_mci *host)
{
host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
- hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- host->fault_timer.function = dw_mci_fault_timer;
+ hrtimer_setup(&host->fault_timer, dw_mci_fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
#else
static void dw_mci_init_fault(struct dw_mci *host)
@@ -2579,6 +2578,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
}
}
+static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+ struct mmc_data *data = host->data;
+ int init_cnt = cnt;
+
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+
+ buf += len;
+ cnt -= len;
+
+ if (host->part_buf_count == 8) {
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ mci_fifo_l_writeq(host->fifo_reg, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) ==
+ (data->blksz * data->blocks))
+ mci_fifo_l_writeq(host->fifo_reg, host->part_buf);
+ }
+}
+
+static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg);
+
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_fifo_l_readq(host->fifo_reg);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_fifo_l_readq(host->fifo_reg);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
+
static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
{
int len;
@@ -3379,8 +3463,13 @@ int dw_mci_probe(struct dw_mci *host)
width = 16;
host->data_shift = 1;
} else if (i == 2) {
- host->push_data = dw_mci_push_data64;
- host->pull_data = dw_mci_pull_data64;
+ if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) {
+ host->push_data = dw_mci_push_data64_32;
+ host->pull_data = dw_mci_pull_data64_32;
+ } else {
+ host->push_data = dw_mci_push_data64;
+ host->pull_data = dw_mci_pull_data64;
+ }
width = 64;
host->data_shift = 3;
} else {
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 6447b916990d..5463392dc811 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -281,6 +281,8 @@ struct dw_mci_board {
/* Support for longer data read timeout */
#define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0)
+/* Force 32-bit access to the FIFO */
+#define DW_MMC_QUIRK_FIFO64_32 BIT(1)
#define DW_MMC_240A 0x240a
#define DW_MMC_280A 0x280a
@@ -472,6 +474,31 @@ struct dw_mci_board {
#define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value)
#define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value)
+/*
+ * Some dw_mmc devices have 64-bit FIFOs, but expect them to be
+ * accessed using two 32-bit accesses. If such controller is used
+ * with a 64-bit kernel, this has to be done explicitly.
+ */
+static inline u64 mci_fifo_l_readq(void __iomem *addr)
+{
+ u64 ans;
+ u32 proxy[2];
+
+ proxy[0] = mci_fifo_readl(addr);
+ proxy[1] = mci_fifo_readl(addr + 4);
+ memcpy(&ans, proxy, 8);
+ return ans;
+}
+
+static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value)
+{
+ u32 proxy[2];
+
+ memcpy(proxy, &value, 8);
+ mci_fifo_writel(addr, proxy[0]);
+ mci_fifo_writel(addr + 4, proxy[1]);
+}
+
/* Register access macros */
#define mci_readl(dev, reg) \
readl_relaxed((dev)->regs + SDMMC_##reg)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 62252ad4e20d..3cdb2fc44965 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1272,19 +1272,25 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
/* Check for some optional GPIO controls */
slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
id, GPIOD_OUT_LOW);
- if (IS_ERR(slot->vsd))
- return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
+ if (IS_ERR(slot->vsd)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->vsd),
"error looking up VSD GPIO\n");
+ goto err_free_host;
+ }
slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
id, GPIOD_OUT_LOW);
- if (IS_ERR(slot->vio))
- return dev_err_probe(host->dev, PTR_ERR(slot->vio),
+ if (IS_ERR(slot->vio)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->vio),
"error looking up VIO GPIO\n");
+ goto err_free_host;
+ }
slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
id, GPIOD_IN);
- if (IS_ERR(slot->cover))
- return dev_err_probe(host->dev, PTR_ERR(slot->cover),
+ if (IS_ERR(slot->cover)) {
+ r = dev_err_probe(host->dev, PTR_ERR(slot->cover),
"error looking up cover switch GPIO\n");
+ goto err_free_host;
+ }
host->slots[id] = slot;
@@ -1344,6 +1350,7 @@ err_remove_slot_name:
device_remove_file(&mmc->class_dev, &dev_attr_slot_name);
err_remove_host:
mmc_remove_host(mmc);
+err_free_host:
mmc_free_host(mmc);
return r;
}
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index f12a87442338..291ddb4ad9be 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -95,6 +95,7 @@ struct renesas_sdhi {
struct reset_control *rstc;
struct tmio_mmc_host *host;
+ struct regulator_dev *rdev;
};
#define host_to_priv(host) \
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index f73b84bae0c4..fa6526be3638 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -32,6 +32,8 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/slab.h>
@@ -581,12 +583,24 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host, bool preserve)
if (!preserve) {
if (priv->rstc) {
+ u32 sd_status;
+ /*
+ * HW reset might have toggled the regulator state in
+ * HW which regulator core might be unaware of so save
+ * and restore the regulator state during HW reset.
+ */
+ if (priv->rdev)
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
reset_control_reset(priv->rstc);
/* Unknown why but without polling reset status, it will hang */
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
false, priv->rstc);
/* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
+ if (priv->rdev)
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
@@ -904,6 +918,102 @@ static void renesas_sdhi_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_sdbuf_width(host, enable ? width : 16);
}
+static const unsigned int renesas_sdhi_vqmmc_voltages[] = {
+ 3300000, 1800000
+};
+
+static int renesas_sdhi_regulator_disable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status &= ~SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_enable(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ sd_status |= SD_STATUS_PWEN;
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_PWEN) ? 1 : 0;
+}
+
+static int renesas_sdhi_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+
+ return (sd_status & SD_STATUS_IOVS) ? 1800000 : 3300000;
+}
+
+static int renesas_sdhi_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct tmio_mmc_host *host = rdev_get_drvdata(rdev);
+ u32 sd_status;
+
+ sd_status = sd_ctrl_read32(host, CTL_SD_STATUS);
+ if (min_uV >= 1700000 && max_uV <= 1950000) {
+ sd_status |= SD_STATUS_IOVS;
+ *selector = 1;
+ } else {
+ sd_status &= ~SD_STATUS_IOVS;
+ *selector = 0;
+ }
+ sd_ctrl_write32(host, CTL_SD_STATUS, sd_status);
+
+ return 0;
+}
+
+static int renesas_sdhi_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= ARRAY_SIZE(renesas_sdhi_vqmmc_voltages))
+ return -EINVAL;
+
+ return renesas_sdhi_vqmmc_voltages[selector];
+}
+
+static const struct regulator_ops renesas_sdhi_regulator_voltage_ops = {
+ .enable = renesas_sdhi_regulator_enable,
+ .disable = renesas_sdhi_regulator_disable,
+ .is_enabled = renesas_sdhi_regulator_is_enabled,
+ .list_voltage = renesas_sdhi_regulator_list_voltage,
+ .get_voltage = renesas_sdhi_regulator_get_voltage,
+ .set_voltage = renesas_sdhi_regulator_set_voltage,
+};
+
+static const struct regulator_desc renesas_sdhi_vqmmc_regulator = {
+ .name = "sdhi-vqmmc-regulator",
+ .of_match = of_match_ptr("vqmmc-regulator"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &renesas_sdhi_regulator_voltage_ops,
+ .volt_table = renesas_sdhi_vqmmc_voltages,
+ .n_voltages = ARRAY_SIZE(renesas_sdhi_vqmmc_voltages),
+};
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
@@ -911,7 +1021,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
{
struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_data *mmc_data;
+ struct regulator_config rcfg = { .dev = &pdev->dev, };
+ struct regulator_dev *rdev;
struct renesas_sdhi_dma *dma_priv;
+ struct device *dev = &pdev->dev;
struct tmio_mmc_host *host;
struct renesas_sdhi *priv;
int num_irqs, irq, ret, i;
@@ -1053,6 +1166,24 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (ret)
goto efree;
+ rcfg.of_node = of_get_child_by_name(dev->of_node, "vqmmc-regulator");
+ if (!of_device_is_available(rcfg.of_node)) {
+ of_node_put(rcfg.of_node);
+ rcfg.of_node = NULL;
+ }
+
+ if (rcfg.of_node) {
+ rcfg.driver_data = priv->host;
+ rdev = devm_regulator_register(dev, &renesas_sdhi_vqmmc_regulator, &rcfg);
+ of_node_put(rcfg.of_node);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev));
+ ret = PTR_ERR(rdev);
+ goto efree;
+ }
+ priv->rdev = rdev;
+ }
+
ver = sd_ctrl_read16(host, CTL_VERSION);
/* GEN2_SDR104 is first known SDHI to use 32bit block count */
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 0ef4d578ade8..48cdcba0f39c 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ int ret;
clk_disable_unprepare(priv->base_clk);
+ if (host->mmc->caps2 & MMC_CAP2_CQE) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
return sdhci_pltfm_suspend(dev);
}
@@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev)
ret = clk_set_rate(priv->base_clk, priv->base_freq_hz);
}
+ if (host->mmc->caps2 & MMC_CAP2_CQE)
+ ret = cqhci_resume(host->mmc);
+
return ret;
}
#endif
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index e3d39311fdc7..57bd49eea777 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1873,7 +1873,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
return 0;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -1895,6 +1895,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
profile->ll_ops = sdhci_msm_crypto_ops;
profile->max_dun_bytes_supported = 4;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = dev;
/*
@@ -1968,7 +1969,7 @@ static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
return qcom_ice_program_key(msm_host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->raw,
+ key->bytes,
key->crypto_cfg.data_unit_size / 512,
slot);
}
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 7ea3da45db32..09b9ab15e499 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -328,12 +328,17 @@ static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_request(mmc, mrq);
}
-static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
+static void dwcmshc_phy_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 rxsel = PHY_PAD_RXSEL_3V3;
u32 val;
+ if (priv->flags & FLAG_IO_FIXED_1V8 ||
+ host->mmc->ios.timing & MMC_SIGNAL_VOLTAGE_180)
+ rxsel = PHY_PAD_RXSEL_1V8;
+
/* deassert phy reset & set tx drive strength */
val = PHY_CNFG_RSTN_DEASSERT;
val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
@@ -353,7 +358,7 @@ static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
/* configure phy pads */
- val = PHY_PAD_RXSEL_1V8;
+ val = rxsel;
val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
@@ -365,65 +370,22 @@ static void dwcmshc_phy_1_8v_init(struct sdhci_host *host)
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
- val = PHY_PAD_RXSEL_1V8;
+ val = rxsel;
val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
/* enable data strobe mode */
- sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL),
- PHY_DLLDL_CNFG_R);
-
- /* enable phy dll */
- sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
-}
-
-static void dwcmshc_phy_3_3v_init(struct sdhci_host *host)
-{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
- u32 val;
-
- /* deassert phy reset & set tx drive strength */
- val = PHY_CNFG_RSTN_DEASSERT;
- val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
- val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
- sdhci_writel(host, val, PHY_CNFG_R);
-
- /* disable delay line */
- sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
-
- /* set delay line */
- sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
- sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
-
- /* enable delay lane */
- val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
- val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
- sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
+ if (rxsel == PHY_PAD_RXSEL_1V8) {
+ u8 sel = FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL);
- /* configure phy pads */
- val = PHY_PAD_RXSEL_3V3;
- val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
- sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
- sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
-
- val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
-
- val = PHY_PAD_RXSEL_3V3;
- val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
- val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
- sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
+ sdhci_writeb(host, sel, PHY_DLLDL_CNFG_R);
+ }
/* enable phy dll */
sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
+
}
static void th1520_sdhci_set_phy(struct sdhci_host *host)
@@ -433,11 +395,7 @@ static void th1520_sdhci_set_phy(struct sdhci_host *host)
u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
u16 emmc_ctrl;
- /* Before power on, set PHY configs */
- if (priv->flags & FLAG_IO_FIXED_1V8)
- dwcmshc_phy_1_8v_init(host);
- else
- dwcmshc_phy_3_3v_init(host);
+ dwcmshc_phy_init(host);
if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
@@ -1163,7 +1121,7 @@ static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
.get_max_clock = dwcmshc_get_max_clock,
.reset = th1520_sdhci_reset,
.adma_write_desc = dwcmshc_adma_write_desc,
- .voltage_switch = dwcmshc_phy_1_8v_init,
+ .voltage_switch = dwcmshc_phy_init,
.platform_execute_tuning = th1520_execute_tuning,
};
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 54d795205fb4..26a9a8b5682a 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1339,8 +1339,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
- /* Allow card power off and runtime PM for eMMC/SD card devices */
- mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
+ /* Enable SDIO card power off. */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD;
ret = sdhci_setup_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1f0bd723f011..13a84b9309e0 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -610,8 +610,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power(host, mode, vdd);
- if (mode == MMC_POWER_OFF)
+ if (mode == MMC_POWER_OFF) {
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD)
+ usleep_range(15000, 17500);
return;
+ }
/*
* Bus power might not enable after D3 -> D0 transition due to the
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 990723a008ae..3fb56face3d8 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -399,6 +399,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f4a7733a8ad2..5f91b44891f9 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2065,10 +2065,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_CARD_EN)
+ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN,
+ SDHCI_CLOCK_CONTROL);
- if (clock == 0)
+ if (clock == 0) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
return;
+ }
clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
sdhci_enable_clk(host, clk);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index a75755f31d31..41787ea77a13 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -44,6 +44,7 @@
#define CTL_RESET_SD 0xe0
#define CTL_VERSION 0xe2
#define CTL_SDIF_MODE 0xe6 /* only known on R-Car 2+ */
+#define CTL_SD_STATUS 0xf2 /* only known on RZ/{G2L,G3E,V2H} */
/* Definitions for values the CTL_STOP_INTERNAL_ACTION register can take */
#define TMIO_STOP_STP BIT(0)
@@ -103,6 +104,10 @@
/* Definitions for values the CTL_SDIF_MODE register can take */
#define SDIF_MODE_HS400 BIT(0) /* only known on R-Car 2+ */
+/* Definitions for values the CTL_SD_STATUS register can take */
+#define SD_STATUS_PWEN BIT(0) /* only known on RZ/{G3E,V2H} */
+#define SD_STATUS_IOVS BIT(16) /* only known on RZ/{G3E,V2H} */
+
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
@@ -226,6 +231,11 @@ static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return ioread32(host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 7584d0ba9396..4af9208f9690 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -23,6 +23,7 @@
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
struct mchp48_caps {
unsigned int size;
@@ -128,11 +129,11 @@ static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
mutex_unlock(&flash->lock);
if (ret)
- dev_err(&flash->spi->dev, "write %sable failed ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_err(&flash->spi->dev, "write %s failed ret: %d",
+ str_enable_disable(enable), ret);
- dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_dbg(&flash->spi->dev, "write %s success ret: %d",
+ str_enable_disable(enable), ret);
if (enable)
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 724f917f91ba..5ba9a741f5ac 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -741,7 +741,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (error)
+ goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
@@ -790,6 +792,7 @@ fail_nvmem_add:
device_unregister(&mtd->dev);
fail_added:
of_node_put(mtd_get_of_node(mtd));
+fail_devname:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
@@ -1053,7 +1056,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int ret;
+ int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1105,8 +1108,11 @@ out:
nvmem_unregister(mtd->otp_factory_nvmem);
}
- if (ret && device_is_registered(&mtd->dev))
- del_mtd_device(mtd);
+ if (ret && device_is_registered(&mtd->dev)) {
+ err = del_mtd_device(mtd);
+ if (err)
+ pr_err("Error when deleting MTD device (%d)\n", err);
+ }
return ret;
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6811a714349d..994e8c51e674 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -690,10 +690,9 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser = mtd_part_parser_get(*types);
if (!parser && !request_module("%s", *types))
parser = mtd_part_parser_get(*types);
- pr_debug("%s: got parser %s\n", master->name,
- parser ? parser->name : NULL);
if (!parser)
continue;
+ pr_debug("%s: got parser %s\n", master->name, parser->name);
ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index 7ac8ac901306..9cf3872e37ae 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
}
longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
- cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
- cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
- cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+
+ if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap)
+ return;
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
@@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd)
mtdpstore_flush_removed(cxt);
unregister_pstore_device(&cxt->dev);
- kfree(cxt->badmap);
- kfree(cxt->usedmap);
- kfree(cxt->rmmap);
cxt->mtd = NULL;
cxt->index = -1;
}
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index da1586a36574..db516a45f0c5 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,11 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
+obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
+else
obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+endif
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index fea5b6119956..17f6d9723df9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev)
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- nand_reset_op(chip);
+ nand_reset(chip, 0);
}
return 0;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index d76802944453..f4e68008ea03 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/dma/mxs-dma.h>
+#include <linux/string_choices.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
#include "bch-regs.h"
@@ -2319,8 +2320,8 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
+ dev_dbg(this->dev, "Blockmark swapping %s\n",
+ str_enabled_disabled(this->swap_block_mark));
ret = gpmi_init_last(this);
if (ret)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 53e16d39af4b..13e4060bd1b6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1833,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -2203,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 6720b547892b..5eaa0be367cd 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -165,9 +165,9 @@ static void nandc_set_read_loc_first(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) |
- ((is_last_read_loc) << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -197,9 +197,9 @@ static void nandc_set_read_loc_last(struct nand_chip *chip,
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
__le32 locreg_val;
- u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
- ((read_size) << READ_LOCATION_SIZE) |
- ((is_last_read_loc) << READ_LOCATION_LAST));
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
locreg_val = cpu_to_le32(val);
@@ -271,14 +271,14 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
if (host->use_ecc) {
- cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE);
+ cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1);
ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE);
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
cfg1 = cpu_to_le32(host->cfg1_raw);
ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
@@ -882,12 +882,12 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
- host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
- host->cw_data << UD_SIZE_BYTES;
+ host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data);
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
- host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
- host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+ host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data);
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1);
}
/* implements ecc->read_page() */
@@ -1531,7 +1531,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
if (!nandc->props->qpic_version2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -1817,7 +1817,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->regs->addr0 = q_op.addr1_reg;
nandc->regs->addr1 = q_op.addr2_reg;
- nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK);
nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
} else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
@@ -1900,8 +1900,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
if (!nandc->props->qpic_version2) {
nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
- nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) |
+ FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM));
}
nandc->regs->exec = cpu_to_le32(1);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 1e61ab21893a..258da42451a4 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+spinand-objs := core.o otp.o
+spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index da4713692674..d16e42cf8fae 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -534,10 +534,20 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand,
- unsigned long initial_delay_us,
- unsigned long poll_delay_us,
- u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
@@ -604,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -635,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -674,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
+ unsigned int retry_mode = 0;
int ret;
+ old_stats = mtd->ecc_stats;
+
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
@@ -690,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
+read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- else
+ } else {
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
}
if (ecc_failed && !ret)
@@ -1292,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1478,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
+
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 323a20901fc9..a164d821464d 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -8,10 +8,15 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
+#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
+#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
@@ -102,6 +107,83 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.free = f50l1g41lb_ooblayout_free,
};
+static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen, bool user)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
+ ESMT_F50L1G41LB_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
+ .info = f50l1g41lb_user_otp_info,
+ .lock = f50l1g41lb_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
+ .info = f50l1g41lb_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
@@ -112,7 +194,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f, 0x7f),
@@ -122,7 +206,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D2G41KA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
0x7f, 0x7f),
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index 3dc4d63d6832..1ef08ad850a2 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -14,6 +14,8 @@
#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
#define MACRONIX_CFG_CONT_READ BIT(2)
+#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
+#define MACRONIX_NUM_READ_RETRY_MODES 5
#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
@@ -136,6 +138,23 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
return 0;
}
+/**
+ * macronix_set_read_retry - Set the retry mode
+ * @spinand: SPI NAND device
+ * @retry_mode: Specify which retry mode to set
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int macronix_set_read_retry(struct spinand_device *spinand,
+ unsigned int retry_mode)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = retry_mode;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -168,7 +187,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
@@ -179,7 +200,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -188,7 +211,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
@@ -198,7 +223,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -207,7 +234,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
@@ -217,7 +246,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -226,7 +257,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -270,7 +303,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -280,7 +315,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -291,7 +328,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -314,7 +353,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -324,7 +365,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -335,7 +378,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -366,7 +411,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -377,7 +424,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index ad0bb9755a09..691f8a2e0791 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
@@ -28,6 +30,10 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
+
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
+{
+ size_t bufsize = spinand_otp_page_size(spinand);
+ size_t retlen;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
+ if (ret)
+ goto free_buf;
+
+ ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
+
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ if (ret)
+ goto free_buf;
+
+ /* If all zeros, then the OTP area is locked. */
+ if (mem_is_zero(buf, bufsize))
+ ret = 1;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen,
+ bool user)
+{
+ int locked;
+
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ locked = mt29f2g01abagd_otp_is_locked(spinand);
+ if (locked < 0)
+ return locked;
+
+ buf->locked = locked;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
+ .info = mt29f2g01abagd_user_otp_info,
+ .lock = mt29f2g01abagd_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
+ .info = mt29f2g01abagd_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
- micron_8_ecc_get_status)),
+ micron_8_ecc_get_status),
+ SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c
new file mode 100644
index 000000000000..ce41bca86ea9
--- /dev/null
+++ b/drivers/mtd/nand/spi/otp.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spinand.h>
+
+/**
+ * spinand_otp_page_size() - Get SPI-NAND OTP page size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP page size.
+ */
+size_t spinand_otp_page_size(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+}
+
+static size_t spinand_otp_size(struct spinand_device *spinand,
+ const struct spinand_otp_layout *layout)
+{
+ return layout->npages * spinand_otp_page_size(spinand);
+}
+
+/**
+ * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_fact_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_size() - Get SPI-NAND user OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_user_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->user_otp->layout);
+}
+
+static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
+ size_t len,
+ const struct spinand_otp_layout *layout)
+{
+ if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
+ loff_t ofs, size_t len)
+{
+ return spinand_otp_check_bounds(spinand, ofs, len,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf, bool is_write,
+ const struct spinand_otp_layout *layout)
+{
+ struct nand_page_io_req req = {};
+ unsigned long long page;
+ size_t copied = 0;
+ size_t otp_pagesize = spinand_otp_page_size(spinand);
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
+ if (ret)
+ return ret;
+
+ page = ofs;
+ req.dataoffs = do_div(page, otp_pagesize);
+ req.pos.page = page + layout->start_page;
+ req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
+ req.mode = MTD_OPS_RAW;
+ req.databuf.in = buf;
+
+ while (copied < len) {
+ req.datalen = min_t(unsigned int,
+ otp_pagesize - req.dataoffs,
+ len - copied);
+
+ if (is_write)
+ ret = spinand_write_page(spinand, &req);
+ else
+ ret = spinand_read_page(spinand, &req);
+
+ if (ret < 0)
+ break;
+
+ req.databuf.in += req.datalen;
+ req.pos.page++;
+ req.dataoffs = 0;
+ copied += req.datalen;
+ }
+
+ *retlen = copied;
+
+ if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * spinand_fact_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->user_otp->layout);
+}
+
+/**
+ * spinand_user_otp_write() - Write to OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the pointer to variable to store the number of written bytes
+ * @buf: the buffer with data to write
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf,
+ bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
+ else
+ ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
+}
+
+static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u8 *buf, bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len,
+ is_fact ? &spinand->fact_otp->layout :
+ &spinand->user_otp->layout);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+ else
+ ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
+}
+
+static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->write(spinand, ofs, len, retlen, buf);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->erase(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->lock(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+/**
+ * spinand_set_mtd_otp_ops() - Setup OTP methods
+ * @spinand: the spinand device
+ *
+ * Setup OTP methods.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
+ const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
+
+ if (!user_ops && !fact_ops)
+ return -EINVAL;
+
+ if (user_ops) {
+ if (user_ops->info)
+ mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
+
+ if (user_ops->read)
+ mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
+
+ if (user_ops->write)
+ mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
+
+ if (user_ops->lock)
+ mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
+
+ if (user_ops->erase)
+ mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
+ }
+
+ if (fact_ops) {
+ if (fact_ops->info)
+ mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
+
+ if (fact_ops->read)
+ mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 19eb98bd6821..ac4b960101cc 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -7,16 +7,17 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
-#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
@@ -639,32 +640,26 @@ static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
static int spi_nor_rww_start_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- int ret = -EAGAIN;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return -EAGAIN;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- ret = 0;
-busy:
- mutex_unlock(&nor->lock);
- return ret;
+ return 0;
}
static void spi_nor_rww_end_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_rdst(struct spi_nor *nor)
@@ -1212,26 +1207,21 @@ static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
static bool spi_nor_rww_start_io(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io)
- goto busy;
+ return false;
rww->ongoing_io = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_io(struct spi_nor *nor)
{
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
nor->rww.ongoing_io = false;
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_device(struct spi_nor *nor)
@@ -1254,32 +1244,27 @@ static void spi_nor_unlock_device(struct spi_nor *nor)
static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
rww->ongoing_io = true;
rww->ongoing_rd = true;
rww->ongoing_pe = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
rww->ongoing_pe = false;
- mutex_unlock(&nor->lock);
}
int spi_nor_prep_and_lock(struct spi_nor *nor)
@@ -1316,30 +1301,26 @@ static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_pe = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1348,15 +1329,13 @@ static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
rww->used_banks &= ~BIT(bank);
rww->ongoing_pe = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1393,19 +1372,18 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
@@ -1413,11 +1391,8 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->used_banks |= used_banks;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -1426,7 +1401,7 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
@@ -1434,8 +1409,6 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 99936fd25d43..55644a3cd88c 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -45,8 +45,26 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
+static int
+macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /* PP_1_1_4_4B is supported but missing in 4BAIT. */
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
+};
+
+static const struct spi_nor_fixups macronix_qpp4b_fixups = {
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
@@ -102,11 +120,17 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x1b),
.name = "mx66l1g45g",
.size = SZ_128M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66L2G45G */
+ .id = SNOR_ID(0xc2, 0x20, 0x1c),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x23, 0x14),
.name = "mx25v8035f",
@@ -148,18 +172,25 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx66u51235f",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66U1G45G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3b),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3c),
.name = "mx66u2g45g",
.size = SZ_256M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
.name = "mx25l12855e",
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 9a729aa3452d..7d0b145d78d8 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -6,6 +6,7 @@
*/
#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index e48c3cff247a..9c9328478d8a 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 8d0a00d69e12..63a93c9eb917 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -10,6 +10,7 @@
#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */
#define WINBOND_NOR_WREAR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
@@ -17,6 +18,12 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
+#define WINBOND_NOR_SELDIE_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
static int
w25q128_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -66,6 +73,79 @@ static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
+/**
+ * winbond_nor_select_die() - Set active die.
+ * @nor: pointer to 'struct spi_nor'.
+ * @die: die to set active.
+ *
+ * Certain Winbond chips feature more than a single die. This is mostly hidden
+ * to the user, except that some chips may experience time deviation when
+ * modifying the status bits between dies, which in some corner cases may
+ * produce problematic races. Being able to explicitly select a die to check its
+ * state in this case may be useful.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_select_die(struct spi_nor *nor, u8 die)
+{
+ int ret;
+
+ nor->bouncebuf[0] = die;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ WINBOND_NOR_OP_SELDIE,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die);
+
+ return ret;
+}
+
+static int winbond_nor_multi_die_ready(struct spi_nor *nor)
+{
+ int ret, i;
+
+ for (i = 0; i < nor->params->n_dice; i++) {
+ ret = winbond_nor_select_die(nor, i);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_sr_ready(nor);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
+static int
+winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /*
+ * SFDP supports dice numbers, but this information is only available in
+ * optional additional tables which are not provided by these chips.
+ * Dice number has an impact though, because these devices need extra
+ * care when reading the busy bit.
+ */
+ nor->params->n_dice = nor->params->size / SZ_64M;
+ nor->params->ready = winbond_nor_multi_die_ready;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups winbond_nor_multi_die_fixups = {
+ .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups,
+};
+
static const struct flash_info winbond_nor_parts[] = {
{
.id = SNOR_ID(0xef, 0x30, 0x10),
@@ -147,6 +227,10 @@ static const struct flash_info winbond_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* W25Q01JV */
+ .id = SNOR_ID(0xef, 0x40, 0x21),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x50, 0x12),
.name = "w25q20bw",
.size = SZ_256K,
@@ -222,6 +306,10 @@ static const struct flash_info winbond_nor_parts[] = {
.id = SNOR_ID(0xef, 0x70, 0x19),
.name = "w25q256jvm",
}, {
+ /* W25Q02JV */
+ .id = SNOR_ID(0xef, 0x70, 0x22),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x71, 0x19),
.name = "w25m512jv",
.size = SZ_64M,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 2836905f0152..39cc0a6a4d37 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ blk_rq_map_sg(req, pdu->usgl.sg);
while (bytes_left) {
/*
diff --git a/drivers/mux/gpio.c b/drivers/mux/gpio.c
index cc5f2c1861d4..5710879cd47f 100644
--- a/drivers/mux/gpio.c
+++ b/drivers/mux/gpio.c
@@ -28,9 +28,7 @@ static int mux_gpio_set(struct mux_control *mux, int state)
bitmap_from_arr32(values, &value, BITS_PER_TYPE(value));
- gpiod_set_array_value_cansleep(mux_gpio->gpios->ndescs,
- mux_gpio->gpios->desc,
- mux_gpio->gpios->info, values);
+ gpiod_multi_set_value_cansleep(mux_gpio->gpios, values);
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1fd5acdc73c6..271520510b5f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -518,30 +518,6 @@ source "drivers/net/hippi/Kconfig"
source "drivers/net/ipa/Kconfig"
-config NET_SB1000
- tristate "General Instruments Surfboard 1000"
- depends on ISA && PNP
- help
- This is a driver for the General Instrument (also known as
- NextLevel) SURFboard 1000 internal
- cable modem. This is an ISA card which is used by a number of cable
- TV companies to provide cable modem access. It's a one-way
- downstream-only cable modem, meaning that your upstream net link is
- provided by your regular phone modem.
-
- At present this driver only compiles as a module, so say M here if
- you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
- information on how to use this module, as it needs special ppp
- scripts for establishing a connection. Further documentation
- and the necessary scripts can be found at:
-
- <http://www.jacksonville.net/~fventuri/>
- <http://home.adelphia.net/~siglercm/sb1000.html>
- <http://linuxpower.cx/~cable/>
-
- If you don't have this card, of course say N.
-
source "drivers/net/phy/Kconfig"
source "drivers/net/pse-pd/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 13743d0e83b5..75333251a01a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -69,7 +69,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/
obj-$(CONFIG_PPTP) += ppp/
obj-$(CONFIG_SLIP) += slip/
obj-$(CONFIG_SLHC) += slip/
-obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 98c6205ed19f..734a0b3242a9 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3099,7 +3099,7 @@ static void amt_link_setup(struct net_device *dev)
dev->addr_len = 0;
dev->priv_flags |= IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
@@ -3161,14 +3161,17 @@ static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int amt_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int amt_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct amt_dev *amt = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err = -EINVAL;
- amt->net = net;
+ amt->net = link_net;
amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
if (data[IFLA_AMT_MAX_TUNNELS] &&
@@ -3183,7 +3186,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
amt->hash_buckets = AMT_HSIZE;
amt->nr_tunnels = 0;
get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
- amt->stream_dev = dev_get_by_index(net,
+ amt->stream_dev = dev_get_by_index(link_net,
nla_get_u32(data[IFLA_AMT_LINK]));
if (!amt->stream_dev) {
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 70814303aab8..d1473c5f8eef 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -698,10 +698,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int bareudp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bareudp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct bareudp_conf conf;
int err;
@@ -709,7 +712,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf, extack);
+ err = bareudp_configure(link_net, dev, &conf, extack);
if (err)
return err;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f6d0628a36d9..950d8e4d86f8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -90,6 +90,7 @@
#include <net/tls.h>
#endif
#include <net/ip6_route.h>
+#include <net/netdev_lock.h>
#include <net/xdp.h>
#include "bonding_priv.h"
@@ -322,9 +323,9 @@ static bool bond_sk_check(struct bonding *bond)
}
}
-static bool bond_xdp_check(struct bonding *bond)
+bool bond_xdp_check(struct bonding *bond, int mode)
{
- switch (BOND_MODE(bond)) {
+ switch (mode) {
case BOND_MODE_ROUNDROBIN:
case BOND_MODE_ACTIVEBACKUP:
return true;
@@ -673,22 +674,16 @@ out:
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *real_dev;
- bool ok = false;
rcu_read_lock();
real_dev = bond_ipsec_dev(xs);
- if (!real_dev)
- goto out;
-
- if (!real_dev->xfrmdev_ops ||
- !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev))
- goto out;
+ if (!real_dev || netif_is_bond_master(real_dev)) {
+ rcu_read_unlock();
+ return false;
+ }
- ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
-out:
rcu_read_unlock();
- return ok;
+ return true;
}
/**
@@ -855,7 +850,6 @@ static int bond_check_dev_link(struct bonding *bond,
struct net_device *slave_dev, int reporting)
{
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- int (*ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr;
struct mii_ioctl_data *mii;
@@ -871,8 +865,7 @@ static int bond_check_dev_link(struct bonding *bond,
BMSR_LSTATUS : 0;
/* Ethtool can't be used, fallback to MII ioctls. */
- ioctl = slave_ops->ndo_eth_ioctl;
- if (ioctl) {
+ if (slave_ops->ndo_eth_ioctl) {
/* TODO: set pointer to correct ioctl on a per team member
* bases to make this more efficient. that is, once
* we determine the correct ioctl, we will always
@@ -888,9 +881,10 @@ static int bond_check_dev_link(struct bonding *bond,
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
- if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
+
+ if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
- if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
+ if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
return mii->val_out & BMSR_LSTATUS;
}
}
@@ -1934,7 +1928,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -2548,7 +2542,7 @@ static int __bond_release_one(struct net_device *bond_dev,
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- if (!all && (!bond->params.fail_over_mac ||
+ if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
@@ -2645,10 +2639,13 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
- if (unregister)
+ if (unregister) {
+ netdev_lock_ops(slave_dev);
__dev_set_mtu(slave_dev, slave->original_mtu);
- else
+ netdev_unlock_ops(slave_dev);
+ } else {
dev_set_mtu(slave_dev, slave->original_mtu);
+ }
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
@@ -4214,7 +4211,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *
}
if (l34 && *ip_proto >= 0)
- fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
+ fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
return true;
}
@@ -5696,7 +5693,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond, BOND_MODE(bond))) {
BOND_NL_ERR(dev, extack,
"No native XDP support for the current bonding mode");
return -EOPNOTSUPP;
@@ -6025,7 +6022,7 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->lltx = true;
/* Don't allow bond devices to change network namespaces. */
- bond_dev->netns_local = true;
+ bond_dev->netns_immutable = true;
/* By default, we declare the bond to be fully
* VLAN hardware accelerated capable. Special
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 2a6a424806aa..ac5e402c34bc 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -564,10 +564,12 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return 0;
}
-static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int bond_newlink(struct net_device *bond_dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
err = bond_changelink(bond_dev, tb, data, extack);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 327b6ecdc77e..91893c29b899 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -868,6 +868,9 @@ static bool bond_set_xfrm_features(struct bonding *bond)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
+ if (bond->xdp_prog && !bond_xdp_check(bond, newval->value))
+ return -EOPNOTSUPP;
+
if (!bond_mode_uses_arp(newval->value)) {
if (bond->params.arp_interval) {
netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -1242,10 +1245,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla
slave->dev->flags & IFF_MULTICAST;
}
+/**
+ * slave_set_ns_maddrs - add/del all NS mac addresses for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @add: add or remove all the NS mac addresses
+ *
+ * This function tries to add or delete all the NS mac addresses on the slave
+ *
+ * Note, the IPv6 NS target address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * arp_validate changes
+ * * enslaving, releasing new slaves
+ */
static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add)
{
struct in6_addr *targets = bond->params.ns_targets;
char slot_maddr[MAX_ADDR_LEN];
+ struct in6_addr mcaddr;
int i;
if (!slave_can_set_ns_maddr(bond, slave))
@@ -1255,7 +1276,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool
if (ipv6_addr_any(&targets[i]))
break;
- if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) {
+ addrconf_addr_solict_mult(&targets[i], &mcaddr);
+ if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) {
if (add)
dev_mc_add(slave->dev, slot_maddr);
else
@@ -1278,23 +1300,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave)
slave_set_ns_maddrs(bond, slave, false);
}
+/**
+ * slave_set_ns_maddr - set new NS mac address for slave
+ * @bond: bond device
+ * @slave: slave device
+ * @target: the new IPv6 target
+ * @slot: the old IPv6 target in the slot
+ *
+ * This function tries to replace the old mac address to new one on the slave.
+ *
+ * Note, the target/slot IPv6 address is the unicast address in Neighbor
+ * Solicitation (NS) message. The dest address of NS message should be
+ * solicited-node multicast address of the target. The dest mac of NS message
+ * is converted from the solicited-node multicast address.
+ *
+ * This function is called when
+ * * An IPv6 NS target is added or removed.
+ */
static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave,
struct in6_addr *target, struct in6_addr *slot)
{
- char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN];
+ char mac_addr[MAX_ADDR_LEN];
+ struct in6_addr mcast_addr;
if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave))
return;
- /* remove the previous maddr from slave */
+ /* remove the previous mac addr from slave */
+ addrconf_addr_solict_mult(slot, &mcast_addr);
if (!ipv6_addr_any(slot) &&
- !ndisc_mc_map(slot, slot_maddr, slave->dev, 0))
- dev_mc_del(slave->dev, slot_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_del(slave->dev, mac_addr);
- /* add new maddr on slave if target is set */
+ /* add new mac addr on slave if target is set */
+ addrconf_addr_solict_mult(target, &mcast_addr);
if (!ipv6_addr_any(target) &&
- !ndisc_mc_map(target, target_maddr, slave->dev, 0))
- dev_mc_add(slave->dev, target_maddr);
+ !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0))
+ dev_mc_add(slave->dev, mac_addr);
}
static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3a589def6b..90ea3dc0fb10 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
- if (size > sizeof(ser->tx_data))
- size = sizeof(ser->tx_data);
- memcpy(ser->tx_data, data, size);
- ser->tx_blob.data = ser->tx_data;
- ser->tx_blob.size = size;
-}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
@@ -151,11 +142,6 @@ static inline void update_tty_status(struct ser_device *ser)
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
-
-static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
-{
-}
-
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 7fea00c7ca8a..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 399844809bbe..19c86b94a40e 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -269,30 +269,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* get the appropriate clk */
clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- goto exit;
- }
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
/* get the platform data */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto exit;
- }
+ if (irq < 0)
+ return irq;
addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
- if (IS_ERR(addr)) {
- ret = PTR_ERR(addr);
- goto exit;
- }
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
/* allocate the c_can device */
dev = alloc_c_can_dev(drvdata->msg_obj_num);
- if (!dev) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!dev)
+ return -ENOMEM;
priv = netdev_priv(dev);
switch (drvdata->id) {
@@ -324,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev)
/* Check if we need custom RAMINIT via syscon. Mostly for TI
* platforms. Only supported with DT boot.
*/
- if (np && of_property_read_bool(np, "syscon-raminit")) {
+ if (np && of_property_present(np, "syscon-raminit")) {
+ unsigned int args[2];
u32 id;
struct c_can_raminit *raminit = &priv->raminit_sys;
ret = -EINVAL;
- raminit->syscon = syscon_regmap_lookup_by_phandle(np,
- "syscon-raminit");
+ raminit->syscon = syscon_regmap_lookup_by_phandle_args(np,
+ "syscon-raminit",
+ 2, args);
if (IS_ERR(raminit->syscon)) {
- /* can fail with -EPROBE_DEFER */
ret = PTR_ERR(raminit->syscon);
- free_c_can_dev(dev);
- return ret;
- }
-
- if (of_property_read_u32_index(np, "syscon-raminit", 1,
- &raminit->reg)) {
- dev_err(&pdev->dev,
- "couldn't get the RAMINIT reg. offset!\n");
goto exit_free_device;
}
- if (of_property_read_u32_index(np, "syscon-raminit", 2,
- &id)) {
- dev_err(&pdev->dev,
- "couldn't get the CAN instance ID\n");
- goto exit_free_device;
- }
+ raminit->reg = args[0];
+ id = args[1];
if (id >= drvdata->raminit_num) {
dev_err(&pdev->dev,
@@ -396,8 +377,6 @@ exit_pm_runtime:
pm_runtime_disable(priv->device);
exit_free_device:
free_c_can_dev(dev);
-exit:
- dev_err(&pdev->dev, "probe failed\n");
return ret;
}
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 01aacdcda260..f1db9b7ffd4d 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -624,8 +624,8 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int can_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int can_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ac1a860986df..6d80c341b26f 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -26,6 +26,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/can/platform/flexcan.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -386,6 +387,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
+static const struct flexcan_devtype_data nxp_s32g2_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SECONDARY_MB_IRQ,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -634,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv)
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_enable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_on(priv->transceiver);
- return regulator_enable(priv->reg_xceiver);
+ return 0;
}
static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
{
- if (!priv->reg_xceiver)
- return 0;
+ if (priv->reg_xceiver)
+ return regulator_disable(priv->reg_xceiver);
+ else if (priv->transceiver)
+ return phy_power_off(priv->transceiver);
- return regulator_disable(priv->reg_xceiver);
+ return 0;
}
static int flexcan_chip_enable(struct flexcan_priv *priv)
@@ -1762,14 +1777,25 @@ static int flexcan_open(struct net_device *dev)
goto out_free_irq_boff;
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ err = request_irq(priv->irq_secondary_mb,
+ flexcan_irq, IRQF_SHARED, dev->name, dev);
+ if (err)
+ goto out_free_irq_err;
+ }
+
flexcan_chip_interrupts_enable(dev);
netif_start_queue(dev);
return 0;
+ out_free_irq_err:
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_err, dev);
out_free_irq_boff:
- free_irq(priv->irq_boff, dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3)
+ free_irq(priv->irq_boff, dev);
out_free_irq:
free_irq(dev->irq, dev);
out_can_rx_offload_disable:
@@ -1794,6 +1820,9 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ)
+ free_irq(priv->irq_secondary_mb, dev);
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
@@ -2041,6 +2070,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
{ .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
+ { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -2061,6 +2091,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
struct flexcan_platform_data *pdata;
@@ -2076,6 +2107,11 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
return PTR_ERR(reg_xceiver);
+ transceiver = devm_phy_optional_get(&pdev->dev, NULL);
+ if (IS_ERR(transceiver))
+ return dev_err_probe(&pdev->dev, PTR_ERR(transceiver),
+ "failed to get phy\n");
+
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@@ -2173,6 +2209,10 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_per = clk_per;
priv->clk_src = clk_src;
priv->reg_xceiver = reg_xceiver;
+ priv->transceiver = transceiver;
+
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
@@ -2187,6 +2227,14 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) {
+ priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1");
+ if (priv->irq_secondary_mb < 0) {
+ err = priv->irq_secondary_mb;
+ goto failed_platform_get_irq;
+ }
+ }
+
if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
@@ -2260,14 +2308,19 @@ static int __maybe_unused flexcan_suspend(struct device *device)
flexcan_chip_interrupts_disable(dev);
+ err = flexcan_transceiver_disable(priv);
+ if (err)
+ return err;
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
+
+ priv->can.state = CAN_STATE_SLEEPING;
}
- priv->can.state = CAN_STATE_SLEEPING;
return 0;
}
@@ -2278,7 +2331,6 @@ static int __maybe_unused flexcan_resume(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
@@ -2292,12 +2344,20 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
- err = flexcan_chip_start(dev);
+ err = flexcan_transceiver_enable(priv);
if (err)
return err;
+ err = flexcan_chip_start(dev);
+ if (err) {
+ flexcan_transceiver_disable(priv);
+ return err;
+ }
+
flexcan_chip_interrupts_enable(dev);
}
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
return 0;
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 4933d8c7439e..16692a2502eb 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -70,6 +70,10 @@
#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
/* Setup stop mode with ATF SCMI protocol to support wakeup */
#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17)
+/* Device has two separate interrupt lines for two mailbox ranges, which
+ * both need to have an interrupt handler registered.
+ */
+#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -103,10 +107,12 @@ struct flexcan_priv {
struct clk *clk_per;
struct flexcan_devtype_data devtype_data;
struct regulator *reg_xceiver;
+ struct phy *transceiver;
struct flexcan_stop_mode stm;
int irq_boff;
int irq_err;
+ int irq_secondary_mb;
/* IPC handle when setup stop mode by System Controller firmware(scfw) */
struct imx_sc_ipc *sc_ipc_handle;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d025d4163fd1..884a6352c42b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2420,12 +2420,11 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (!cdev->net->irq) {
dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- cdev->hrtimer.function = &hrtimer_callback;
+ hrtimer_setup(&cdev->hrtimer, &hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
} else {
- hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdev->hrtimer.function = m_can_coalescing_timer;
+ hrtimer_setup(&cdev->hrtimer, m_can_coalescing_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = m_can_dev_setup(cdev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index df1a5d0b37b2..aa3df0d05b85 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
}
static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
- u32 ch)
+ u32 ch, u32 rule_entry)
{
- u32 cfg;
- int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES;
+ u32 rule_entry_index = rule_entry % 16;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (ch == 0) {
- start = 0; /* Channel 0 always starts from 0th rule */
- } else {
- /* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
- start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
- }
-
/* Enable write access to entry */
- page = RCANFD_GAFL_PAGENUM(start);
+ page = RCANFD_GAFL_PAGENUM(rule_entry);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
(RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
@@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
offset = RCANFD_C_GAFL_OFFSET;
/* Accept all IDs */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0);
/* IDE or RTR is not considered for matching */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0);
/* Any data length accepted */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
+ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index),
RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
@@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
unsigned long channels_mask = 0;
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
+ u32 rule_entry = 0;
bool fdmode = true; /* CAN FD only mode - default */
char name[9] = "channelX";
int i;
@@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_tx(gpriv, ch);
/* Configure receive rules */
- rcar_canfd_configure_afl_rules(gpriv, ch);
+ rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry);
+ rule_entry += RCANFD_CHANNEL_NUMRULES;
}
/* Configure common interrupts */
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index d9a937ba126c..46201c126703 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -236,11 +236,6 @@ static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv)
{
u32 reg;
- /* TXE FIFO */
- reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
- reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
- rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg);
-
/* RX FIFO */
reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL);
reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index 7209a831f0f2..c34f2067a989 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -541,11 +541,11 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
priv->rx_ring_num = i;
- hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+ hrtimer_setup(&priv->rx_irq_timer, mcp251xfd_rx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+ hrtimer_setup(&priv->tx_irq_timer, mcp251xfd_tx_irq_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
return 0;
}
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b6f4de375df7..3ccac6781b98 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -43,6 +43,9 @@
#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+#define USB_CANNECTIVITY_VENDOR_ID 0x1209
+#define USB_CANNECTIVITY_PRODUCT_ID 0xca01
+
/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
* for timer overflow (will be after ~71 minutes)
*/
@@ -1546,6 +1549,8 @@ static const struct usb_device_id gs_usb_table[] = {
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID,
+ USB_CANNECTIVITY_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 39a63b7313a4..07406daf7c88 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -186,7 +186,7 @@ union ucan_ctl_payload {
*/
struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
- u8 raw[128];
+ u8 fw_str[128];
} __packed;
enum {
@@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up,
UCAN_USB_CTL_PIPE_TIMEOUT);
}
-static int ucan_device_request_in(struct ucan_priv *up,
- u8 cmd, u16 subcmd, u16 datalen)
+static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size)
{
- return usb_control_msg(up->udev,
- usb_rcvctrlpipe(up->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- subcmd,
- 0,
- up->ctl_msg_buffer,
- datalen,
- UCAN_USB_CTL_PIPE_TIMEOUT);
+ int ret;
+
+ ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0),
+ UCAN_DEVICE_GET_FW_STRING,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE,
+ 0, 0, fw_str, size - 1,
+ UCAN_USB_CTL_PIPE_TIMEOUT);
+ if (ret > 0)
+ fw_str[ret] = '\0';
+ else
+ strscpy(fw_str, "unknown", size);
}
/* Parse the device information structure reported by the device and
@@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf,
u8 in_ep_addr;
u8 out_ep_addr;
union ucan_ctl_payload *ctl_msg_buffer;
- char firmware_str[sizeof(union ucan_ctl_payload) + 1];
udev = interface_to_usbdev(intf);
@@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf,
*/
ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
- /* just print some device information - if available */
- ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
- sizeof(union ucan_ctl_payload));
- if (ret > 0) {
- /* copy string while ensuring zero termination */
- strscpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload) + 1);
- } else {
- strcpy(firmware_str, "unknown");
- }
-
/* device is compatible, reset it */
ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
if (ret < 0)
@@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf,
/* initialisation complete, log device info */
netdev_info(up->netdev, "registered device\n");
- netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
+ ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str,
+ sizeof(up->ctl_msg_buffer->fw_str));
+ netdev_info(up->netdev, "firmware string: %s\n",
+ up->ctl_msg_buffer->fw_str);
/* success */
return 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ca8811941085..99a78a757167 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -172,10 +172,13 @@ static void vxcan_setup(struct net_device *dev)
/* forward declaration for rtnl_create_link() */
static struct rtnl_link_ops vxcan_link_ops;
-static int vxcan_newlink(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxcan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxcan_priv *priv;
struct net_device *peer;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2d10b4d6cfbb..bb9812b3b0e8 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -37,6 +37,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select REGMAP_IRQ
imply NET_DSA_MT7530_MDIO
imply NET_DSA_MT7530_MMIO
help
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 79dc77835681..61d164ffb3ae 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2410,6 +2410,19 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
{
+ .chip_id = BCM53101_DEVICE_ID,
+ .dev_name = "BCM53101",
+ .vlans = 4096,
+ .enabled_ports = 0x11f,
+ .arl_bins = 4,
+ .arl_buckets = 512,
+ .vta_regs = B53_VTA_REGS,
+ .imp_port = 8,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
+ {
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
@@ -2789,6 +2802,7 @@ int b53_switch_detect(struct b53_device *dev)
return ret;
switch (id32) {
+ case BCM53101_DEVICE_ID:
case BCM53115_DEVICE_ID:
case BCM53125_DEVICE_ID:
case BCM53128_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 31d070bf161a..43a3b37b731b 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -374,6 +374,7 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm5325" },
+ { .compatible = "brcm,bcm53101" },
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 9e9b5bc0c5d6..0166c37a13a7 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -66,6 +66,7 @@ enum {
BCM5395_DEVICE_ID = 0x95,
BCM5397_DEVICE_ID = 0x97,
BCM5398_DEVICE_ID = 0x98,
+ BCM53101_DEVICE_ID = 0x53101,
BCM53115_DEVICE_ID = 0x53115,
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
@@ -188,6 +189,7 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
+ dev->chip_id == BCM53101_DEVICE_ID ||
dev->chip_id == BCM53128_DEVICE_ID ||
dev->chip_id == BCM53134_DEVICE_ID;
}
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 4730982b6840..7460122f6abc 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -239,7 +239,6 @@ int b53_serdes_init(struct b53_device *dev, int port)
pcs->dev = dev;
pcs->lane = lane;
pcs->pcs.ops = &b53_pcs_ops;
- pcs->pcs.neg_mode = true;
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index da7110d67558..be433b4e2b1c 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -1625,7 +1625,6 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
const u16 *regs = dev->info->regs;
struct dsa_switch *ds = dev->ds;
const u32 *masks;
- int queues;
u8 member;
masks = dev->info->masks;
@@ -1633,15 +1632,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* For KSZ88x3 enable only one queue by default, otherwise we won't
- * be able to get rid of PCP prios on Port 2.
- */
- if (ksz_is_ksz88x3(dev))
- queues = 1;
- else
- queues = dev->info->num_tx_queues;
-
- ksz8_port_queue_split(dev, port, queues);
+ ksz8_port_queue_split(dev, port, dev->info->num_tx_queues);
/* replace priority */
ksz_port_cfg(dev, port, P_802_1P_CTRL,
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
index 30b4a6186e38..c3b501997ac9 100644
--- a/drivers/net/dsa/microchip/ksz_dcb.c
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -10,7 +10,12 @@
#include "ksz_dcb.h"
#include "ksz8.h"
-#define KSZ8_REG_PORT_1_CTRL_0 0x10
+/* Port X Control 0 register.
+ * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30.
+ * However, the driver uses get_port_addr(), which maps Port 1 to offset 0.
+ * Therefore, we define the base offset as 0x00 here to align with that logic.
+ */
+#define KSZ8_REG_PORT_1_CTRL_0 0x00
#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
#define KSZ8_PORT_802_1P_ENABLE BIT(5)
#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
@@ -182,49 +187,6 @@ int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
}
/**
- * ksz88x3_port_set_default_prio_quirks - Quirks for default priority
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the default priority
- * @prio: Priority value to set
- *
- * This function implements quirks for setting the default priority on KSZ88x3
- * devices. On Port 2, no other priority providers are working
- * except of PCP. So, configuring default priority on Port 2 is not possible.
- * On Port 1, it is not possible to configure port priority if PCP
- * apptrust on Port 2 is disabled. Since we disable multiple queues on the
- * switch to disable PCP on Port 2, we need to ensure that the default priority
- * configuration on Port 1 is in agreement with the configuration on Port 2.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port,
- u8 prio)
-{
- if (!prio)
- return 0;
-
- if (port == KSZ_PORT_2) {
- dev_err(dev->dev, "Port priority configuration is not working on Port 2\n");
- return -EINVAL;
- } else if (port == KSZ_PORT_1) {
- u8 port2_data;
- int ret;
-
- ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- &port2_data);
- if (ret)
- return ret;
-
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-/**
* ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -239,18 +201,12 @@ static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port
int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
{
struct ksz_device *dev = ds->priv;
- int reg, shift, ret;
+ int reg, shift;
u8 mask;
if (prio >= dev->info->num_ipms)
return -EINVAL;
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio);
- if (ret)
- return ret;
- }
-
ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
@@ -519,155 +475,6 @@ err_sel_not_vaild:
}
/**
- * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port1_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 1 of
- * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not
- * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2
- * seems to be permanently hardwired to PCP classification, so we need to
- * do Port 1 configuration always in agreement with Port 2 configuration.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port1_data)
-{
- u8 port2_data;
- int ret;
-
- /* If no apptrust is requested for Port 1, no need to care about Port 2
- * configuration.
- */
- if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)))
- return 0;
-
- /* We got request to enable any apptrust on Port 1. To make it possible,
- * we need to enable multiple queues on the switch. If we enable
- * multiqueue support, PCP classification on Port 2 will be
- * automatically activated by HW.
- */
- ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data);
- if (ret)
- return ret;
-
- /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed
- * the interest in PCP classification on Port 2. In this case,
- * multiqueue support is enabled and we can enable any apptrust on
- * Port 1.
- * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP
- * classification on Port 2 is still active, but the driver disabled
- * multiqueue support and made frame prioritization inactive for
- * all ports. In this case, we can't enable any apptrust on Port 1.
- */
- if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
- dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2
- * of KSZ88x3 devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @port2_data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on Port 2 of
- * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and
- * that it is not possible to disable PCP on Port 2. The only way to disable PCP
- * on Port 2 is to disable multiple queues on the switch.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 port2_data)
-{
- struct dsa_switch *ds = dev->ds;
- u8 port1_data;
- int ret;
-
- /* First validate Port 2 configuration. DiffServ/DSCP is not working
- * on this port.
- */
- if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) {
- dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n");
- return -EINVAL;
- }
-
- /* If PCP support is requested, we need to enable all queues on the
- * switch to make PCP priority working on Port 2.
- */
- if (port2_data & KSZ8_PORT_802_1P_ENABLE)
- return ksz8_all_queues_split(dev, dev->info->num_tx_queues);
-
- /* We got request to disable PCP priority on Port 2.
- * Now, we need to compare Port 2 configuration with Port 1
- * configuration.
- */
- ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data);
- if (ret)
- return ret;
-
- /* If Port 1 has any apptrust enabled, we can't disable multiple queues
- * on the switch, so we can't disable PCP on Port 2.
- */
- if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n");
- return -EINVAL;
- }
-
- /* Now we need to ensure that default priority on Port 1 is set to 0
- * otherwise we can't disable multiqueue support on the switch.
- */
- ret = ksz_port_get_default_prio(ds, KSZ_PORT_1);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n");
- return -EINVAL;
- }
-
- /* Port 1 has no apptrust or default priority set and we got request to
- * disable PCP on Port 2. We can disable multiqueue support to disable
- * PCP on Port 2.
- */
- return ksz8_all_queues_split(dev, 1);
-}
-
-/**
- * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3
- * devices
- * @dev: Pointer to the KSZ switch device structure
- * @port: Port number for which to set the apptrust selectors
- * @reg: Register address for the apptrust configuration
- * @data: Data to set for the apptrust configuration
- *
- * This function implements a quirk for apptrust configuration on KSZ88x3
- * devices. It ensures that apptrust configuration on Port 1 and
- * Port 2 is done in agreement with each other.
- *
- * Return: 0 on success, or a negative error code on failure
- */
-static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port,
- int reg, u8 data)
-{
- if (port == KSZ_PORT_1)
- return ksz88x3_port1_apptrust_quirk(dev, port, reg, data);
- else if (port == KSZ_PORT_2)
- return ksz88x3_port2_apptrust_quirk(dev, port, reg, data);
-
- return 0;
-}
-
-/**
* ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
* switch
* @ds: Pointer to the DSA switch structure
@@ -707,12 +514,6 @@ int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
}
}
- if (ksz_is_ksz88x3(dev)) {
- ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data);
- if (ret)
- return ret;
- }
-
return ksz_prmw8(dev, port, reg, mask, data);
}
@@ -799,21 +600,5 @@ int ksz_dcb_init_port(struct ksz_device *dev, int port)
*/
int ksz_dcb_init(struct ksz_device *dev)
{
- int ret;
-
- ret = ksz_init_global_dscp_map(dev);
- if (ret)
- return ret;
-
- /* Enable 802.1p priority control on Port 2 during switch initialization.
- * This setup is critical for the apptrust functionality on Port 1, which
- * relies on the priority settings of Port 2. Note: Port 1 is naturally
- * configured before Port 2, necessitating this configuration order.
- */
- if (ksz_is_ksz88x3(dev))
- return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
- KSZ8_PORT_802_1P_ENABLE,
- KSZ8_PORT_802_1P_ENABLE);
-
- return 0;
+ return ksz_init_global_dscp_map(dev);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1c83af805209..d70399bce5b9 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2050,131 +2050,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
}
#endif /* CONFIG_GPIOLIB */
-static irqreturn_t
-mt7530_irq_thread_fn(int irq, void *dev_id)
-{
- struct mt7530_priv *priv = dev_id;
- bool handled = false;
- u32 val;
- int p;
-
- mt7530_mutex_lock(priv);
- val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
- mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mt7530_mutex_unlock(priv);
-
- for (p = 0; p < MT7530_NUM_PHYS; p++) {
- if (BIT(p) & val) {
- unsigned int irq;
-
- irq = irq_find_mapping(priv->irq_domain, p);
- handle_nested_irq(irq);
- handled = true;
- }
- }
-
- return IRQ_RETVAL(handled);
-}
-
-static void
-mt7530_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
-}
-
-static void
-mt7530_irq_bus_lock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mutex_lock(priv);
-}
-
-static void
-mt7530_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mt7530_mutex_unlock(priv);
-}
-
-static struct irq_chip mt7530_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7530_irq_mask,
- .irq_unmask = mt7530_irq_unmask,
- .irq_bus_lock = mt7530_irq_bus_lock,
- .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock,
-};
-
-static int
-mt7530_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7530_irq_domain_ops = {
- .map = mt7530_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void
-mt7988_irq_mask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable &= ~BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static void
-mt7988_irq_unmask(struct irq_data *d)
-{
- struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
-
- priv->irq_enable |= BIT(d->hwirq);
- mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
-}
-
-static struct irq_chip mt7988_irq_chip = {
- .name = KBUILD_MODNAME,
- .irq_mask = mt7988_irq_mask,
- .irq_unmask = mt7988_irq_unmask,
-};
-
-static int
-mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_data(irq, domain->host_data);
- irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mt7988_irq_domain_ops = {
- .map = mt7988_irq_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
@@ -2191,49 +2066,72 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv)
}
}
+static const struct regmap_irq mt7530_irqs[] = {
+ REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */
+ REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */
+ REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */
+ REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */
+ REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */
+ REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */
+ REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */
+ REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */
+ REGMAP_IRQ_REG_LINE(17, 32), /* BMU */
+ REGMAP_IRQ_REG_LINE(18, 32), /* MIB */
+ REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */
+ REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */
+ REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */
+ REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */
+ REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */
+ REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */
+ REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */
+ REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */
+ REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */
+ REGMAP_IRQ_REG_LINE(31, 32), /* ACL */
+};
+
+static const struct regmap_irq_chip mt7530_regmap_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .status_base = MT7530_SYS_INT_STS,
+ .unmask_base = MT7530_SYS_INT_EN,
+ .ack_base = MT7530_SYS_INT_STS,
+ .init_ack_masked = true,
+ .irqs = mt7530_irqs,
+ .num_irqs = ARRAY_SIZE(mt7530_irqs),
+ .num_regs = 1,
+};
+
static int
mt7530_setup_irq(struct mt7530_priv *priv)
{
+ struct regmap_irq_chip_data *irq_data;
struct device *dev = priv->dev;
struct device_node *np = dev->of_node;
- int ret;
+ int irq, ret;
if (!of_property_read_bool(np, "interrupt-controller")) {
dev_info(dev, "no interrupt support\n");
return 0;
}
- priv->irq = of_irq_get(np, 0);
- if (priv->irq <= 0) {
- dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq);
- return priv->irq ? : -EINVAL;
- }
-
- if (priv->id == ID_MT7988 || priv->id == ID_EN7581)
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7988_irq_domain_ops,
- priv);
- else
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops,
- priv);
-
- if (!priv->irq_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
+ irq = of_irq_get(np, 0);
+ if (irq <= 0) {
+ dev_err(dev, "failed to get parent IRQ: %d\n", irq);
+ return irq ? : -EINVAL;
}
/* This register must be set for MT7530 to properly fire interrupts */
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
- ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
- IRQF_ONESHOT, KBUILD_MODNAME, priv);
- if (ret) {
- irq_domain_remove(priv->irq_domain);
- dev_err(dev, "failed to request IRQ: %d\n", ret);
+ ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
+ priv->regmap, irq,
+ IRQF_ONESHOT,
+ 0, &mt7530_regmap_irq_chip,
+ &irq_data);
+ if (ret)
return ret;
- }
+
+ priv->irq_domain = regmap_irq_get_domain(irq_data);
return 0;
}
@@ -2253,26 +2151,6 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv)
}
}
-static void
-mt7530_free_irq_common(struct mt7530_priv *priv)
-{
- free_irq(priv->irq, priv);
- irq_domain_remove(priv->irq_domain);
-}
-
-static void
-mt7530_free_irq(struct mt7530_priv *priv)
-{
- struct device_node *mnp, *np = priv->dev->of_node;
-
- mnp = of_get_child_by_name(np, "mdio");
- if (!mnp)
- mt7530_free_mdio_irq(priv);
- of_node_put(mnp);
-
- mt7530_free_irq_common(priv);
-}
-
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
@@ -2307,13 +2185,13 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_setup_mdio_irq(priv);
ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq && !mnp)
+ if (priv->irq_domain && !mnp)
mt7530_free_mdio_irq(priv);
}
@@ -2586,12 +2464,18 @@ mt7531_setup_common(struct dsa_switch *ds)
/* Allow mirroring frames received on the local port (monitor port). */
mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+ /* Enable Special Tag for rx frames */
+ if (priv->id == ID_EN7581)
+ mt7530_write(priv, MT753X_CPORT_SPTAG_CFG,
+ CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2687,11 +2571,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
- if (ret)
- return ret;
-
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2957,28 +2836,61 @@ static void mt753x_phylink_mac_link_up(struct phylink_config *config,
mcr |= PMCR_FORCE_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
- switch (speed) {
- case SPEED_1000:
- case SPEED_2500:
- mcr |= PMCR_FORCE_EEE1G;
- break;
- case SPEED_100:
- mcr |= PMCR_FORCE_EEE100;
- break;
- }
- }
-
mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
+static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+}
+
+static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config,
+ u32 timer, bool tx_clock_stop)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
+ u32 val;
+
+ /* If the timer is zero, then set LPI_MODE_EN, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = LPI_MODE_EN;
+ else if (FIELD_FIT(LPI_THRESH_MASK, timer))
+ val = FIELD_PREP(LPI_THRESH_MASK, timer);
+ else
+ val = LPI_THRESH_MASK;
+
+ mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index),
+ LPI_THRESH_MASK | LPI_MODE_EN, val);
+
+ mt7530_set(priv, MT753X_PMCR_P(dp->index),
+ PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100);
+
+ return 0;
+}
+
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
+ u32 eeecr;
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+ config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+
+ eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
+ /* tx_lpi_timer should be in microseconds. The time units for
+ * LPI threshold are unspecified.
+ */
+ config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr);
+
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -3063,24 +2975,21 @@ mt753x_setup(struct dsa_switch *ds)
return ret;
ret = mt7530_setup_mdio(priv);
- if (ret && priv->irq)
- mt7530_free_irq_common(priv);
if (ret)
return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
}
- if (priv->create_sgmii) {
+ if (priv->create_sgmii)
ret = priv->create_sgmii(priv);
- if (ret && priv->irq)
- mt7530_free_irq(priv);
- }
+
+ if (ret && priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
return ret;
}
@@ -3088,18 +2997,9 @@ mt753x_setup(struct dsa_switch *ds)
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
- struct mt7530_priv *priv = ds->priv;
- u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
-
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = LPI_THRESH_SET(e->tx_lpi_timer);
- if (!e->tx_lpi_enabled)
- /* Force LPI Mode without a delay */
- set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
-
return 0;
}
@@ -3238,6 +3138,8 @@ static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
.mac_config = mt753x_phylink_mac_config,
.mac_link_down = mt753x_phylink_mac_link_down,
.mac_link_up = mt753x_phylink_mac_link_up,
+ .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi,
};
const struct mt753x_info mt753x_table[] = {
@@ -3331,8 +3233,8 @@ EXPORT_SYMBOL_GPL(mt7530_probe_common);
void
mt7530_remove_common(struct mt7530_priv *priv)
{
- if (priv->irq)
- mt7530_free_irq(priv);
+ if (priv->irq_domain)
+ mt7530_free_mdio_irq(priv);
dsa_unregister_switch(priv->ds);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 448200689f49..c3ea403d7acf 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -627,6 +627,10 @@ enum mt7531_xtal_fsel {
#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
#define MT7531_EXT_P_MDIO_12 (2 << 16)
+#define MT753X_CPORT_SPTAG_CFG 0x7c10
+#define CPORT_SW2FE_STAG_EN BIT(1)
+#define CPORT_FE2SW_STAG_EN BIT(0)
+
/* Registers for LED GPIO control (MT7530 only)
* All registers follow this pattern:
* [ 2: 0] port 0
@@ -815,9 +819,7 @@ struct mt753x_info {
* @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
- * @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
- * @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
* @mdiodev: The pointer to the MDIO device structure
@@ -842,9 +844,7 @@ struct mt7530_priv {
struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
- int irq;
struct irq_domain *irq_domain;
- u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
struct mdio_device *mdiodev;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 68d1e891752b..901929f96b38 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2208,13 +2208,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return err;
}
-static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
- const unsigned char *addr, u16 vid,
- u8 state)
+static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid,
+ u16 *fid, struct mv88e6xxx_atu_entry *entry)
{
- struct mv88e6xxx_atu_entry entry;
struct mv88e6xxx_vtu_entry vlan;
- u16 fid;
int err;
/* Ports have two private address databases: one for when the port is
@@ -2225,7 +2223,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
* VLAN ID into the port's database used for VLAN-unaware bridging.
*/
if (vid == 0) {
- fid = MV88E6XXX_FID_BRIDGED;
+ *fid = MV88E6XXX_FID_BRIDGED;
} else {
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
if (err)
@@ -2235,14 +2233,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid)
return -EOPNOTSUPP;
- fid = vlan.fid;
+ *fid = vlan.fid;
}
- entry.state = 0;
- ether_addr_copy(entry.mac, addr);
- eth_addr_dec(entry.mac);
+ entry->state = 0;
+ ether_addr_copy(entry->mac, addr);
+ eth_addr_dec(entry->mac);
+
+ return mv88e6xxx_g1_atu_getnext(chip, *fid, entry);
+}
+
+static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip,
+ const unsigned char *addr, u16 vid)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
+
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
+ if (err)
+ return false;
+
+ return entry.state && ether_addr_equal(entry.mac, addr);
+}
+
+static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
+ const unsigned char *addr, u16 vid,
+ u8 state)
+{
+ struct mv88e6xxx_atu_entry entry;
+ u16 fid;
+ int err;
- err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
+ err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry);
if (err)
return err;
@@ -2846,6 +2869,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, addr, vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
@@ -3644,6 +3674,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
+static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ u16 dummy;
+ int err;
+
+ /* Workaround for erratum
+ * 3.3 RGMII timing may be out of spec when transmit delay is enabled
+ */
+ err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy);
+}
+
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -5100,6 +5145,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
static const struct mv88e6xxx_ops mv88e6320_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5115,6 +5161,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5139,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5149,6 +5198,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
static const struct mv88e6xxx_ops mv88e6321_ops = {
/* MV88E6XXX_FAMILY_6320 */
+ .setup_errata = mv88e6320_setup_errata,
.ieee_pri_map = mv88e6085_g1_ieee_pri_map,
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
@@ -5164,6 +5214,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -5187,8 +5238,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
- .vtu_getnext = mv88e6185_g1_vtu_getnext,
- .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .vtu_getnext = mv88e6352_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -5788,7 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -6206,9 +6259,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6232,9 +6287,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_databases = 4096,
.num_macs = 8192,
.num_ports = 7,
- .num_internal_phys = 5,
+ .num_internal_phys = 2,
+ .internal_phys_offset = 3,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -6244,6 +6301,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
.atu_move_port_mask = 0xf,
+ .pvt = true,
.multi_chip = true,
.edsa_support = MV88E6XXX_EDSA_SUPPORTED,
.ptp_support = true,
@@ -6266,7 +6324,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.global1_addr = 0x1b,
.global2_addr = 0x1c,
.age_time_coeff = 3750,
- .atu_move_port_mask = 0x1f,
+ .atu_move_port_mask = 0xf,
.g1_irqs = 9,
.g2_irqs = 10,
.stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -6614,6 +6672,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC);
+ if (err)
+ goto out;
+
+ if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid))
+ err = -ENOSPC;
+
+out:
mv88e6xxx_reg_unlock(chip);
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 75ed1fa500a5..af7e06d265f7 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -138,7 +138,6 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
index 143fe21d1834..36993400837e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c
@@ -275,7 +275,6 @@ static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev,
mpcs->mdio.bus = bus;
mpcs->mdio.addr = addr;
mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops;
- mpcs->phylink_pcs.neg_mode = true;
return mpcs;
}
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
index 59f63d6beec8..5db17c0b77f5 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c
@@ -565,9 +565,7 @@ static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X ||
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X)
@@ -945,9 +943,7 @@ static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port)
return -ENOMEM;
mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops;
- mpcs->sgmii_pcs.neg_mode = true;
mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops;
- mpcs->xg_pcs.neg_mode = true;
mpcs->supports_5g = true;
err = mv88e6393x_erratum_4_6(mpcs);
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index e8cb4da15dbe..a36b8b07030e 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1634,7 +1634,6 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
- qpcs->pcs.neg_mode = true;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 10687722d14c..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -44,7 +44,7 @@ config NET_DSA_REALTEK_RTL8366RB
Select to enable support for Realtek RTL8366RB.
config NET_DSA_REALTEK_RTL8366RB_LEDS
- bool "Support RTL8366RB LED control"
+ bool
depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
depends on NET_DSA_REALTEK_RTL8366RB
default NET_DSA_REALTEK_RTL8366RB
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 66974379334a..31ea8130a495 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1248,18 +1248,16 @@ static int a5psw_probe(struct platform_device *pdev)
if (ret)
goto clk_disable;
- mdio = of_get_child_by_name(dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
+ mdio = of_get_available_child_by_name(dev->of_node, "mdio");
+ if (mdio) {
ret = a5psw_probe_mdio(a5psw, mdio);
+ of_node_put(mdio);
if (ret) {
- of_node_put(mdio);
dev_err(dev, "Failed to register MDIO: %d\n", ret);
goto hclk_disable;
}
}
- of_node_put(mdio);
-
ds = &a5psw->ds;
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
index 2ea64b1d026d..84d7d3f66bd0 100644
--- a/drivers/net/dsa/sja1105/sja1105_ethtool.c
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
rc = sja1105_port_counter_read(priv, port, i, &data[k++]);
if (rc) {
dev_err(ds->dev,
@@ -596,8 +599,12 @@ void sja1105_get_strings(struct dsa_switch *ds, int port,
else
max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER;
- for (i = 0; i < max_ctr; i++)
+ for (i = 0; i < max_ctr; i++) {
+ if (!strlen(sja1105_port_counters[i].name))
+ continue;
+
ethtool_puts(&data, sja1105_port_counters[i].name);
+ }
}
int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 84b7169f2974..8d535c033cef 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -468,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
if (rc)
return rc;
- mdio_node = of_get_child_by_name(switch_node, "mdios");
+ mdio_node = of_get_available_child_by_name(switch_node, "mdios");
if (!mdio_node)
return 0;
- if (!of_device_is_available(mdio_node))
- goto out_put_mdio_node;
-
if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
if (rc)
@@ -487,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds)
goto err_free_base_tx_mdiobus;
}
-out_put_mdio_node:
of_node_put(mdio_node);
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a1f4ca6ad888..08b45fdd1d24 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -61,17 +61,21 @@ enum sja1105_ptp_clk_mode {
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
struct sja1105_private *priv = ds->priv;
+ unsigned long hwts_tx_en, hwts_rx_en;
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
+ hwts_tx_en = priv->hwts_tx_en;
+ hwts_rx_en = priv->hwts_rx_en;
+
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->hwts_tx_en &= ~BIT(port);
+ hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->hwts_tx_en |= BIT(port);
+ hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
@@ -79,15 +83,21 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->hwts_rx_en &= ~BIT(port);
+ hwts_rx_en &= ~BIT(port);
break;
- default:
- priv->hwts_rx_en |= BIT(port);
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ hwts_rx_en |= BIT(port);
break;
+ default:
+ return -ERANGE;
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
return -EFAULT;
+
+ priv->hwts_tx_en = hwts_tx_en;
+ priv->hwts_rx_en = hwts_rx_en;
+
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 3d790f8c6f4d..ffece8a400a6 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -1917,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i)
if (i > table->entry_count)
return -ERANGE;
- memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
- (table->entry_count - i) * entry_size);
+ if (i + 1 < table->entry_count) {
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i - 1) * entry_size);
+ }
table->entry_count--;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 005d79975f3b..a4938c6a5ebb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -38,6 +38,7 @@
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
#include <linux/net_tstamp.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <linux/u64_stats_sync.h>
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 977b42bc1e8c..f86d4557d8d7 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
source "drivers/net/ethernet/aeroflex/Kconfig"
source "drivers/net/ethernet/agere/Kconfig"
+source "drivers/net/ethernet/airoha/Kconfig"
source "drivers/net/ethernet/alacritech/Kconfig"
source "drivers/net/ethernet/allwinner/Kconfig"
source "drivers/net/ethernet/alteon/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 99fa180dedb8..67182339469a 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
+obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 115f48b3342c..0a08da799255 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev)
struct device_node *mdio_node;
int ret;
- mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(dev->of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 68fad5575fd4..30f9d271e595 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv)
netdev->netdev_ops = &adin1110_netdev_ops;
netdev->ethtool_ops = &adin1110_ethtool_ops;
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
if (IS_ERR(port_priv->phydev)) {
diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig
new file mode 100644
index 000000000000..1a4cf6a259f6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_AIROHA
+ bool "Airoha devices"
+ depends on ARCH_AIROHA || COMPILE_TEST
+ help
+ If you have a Airoha SoC with ethernet, say Y.
+
+if NET_VENDOR_AIROHA
+
+config NET_AIROHA_NPU
+ tristate "Airoha NPU support"
+ select WANT_DEV_COREDUMP
+ select REGMAP_MMIO
+ help
+ This driver supports Airoha Network Processor (NPU) available
+ on the Airoha Soc family.
+
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select NET_AIROHA_NPU
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
+endif #NET_VENDOR_AIROHA
diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile
new file mode 100644
index 000000000000..94468053e34b
--- /dev/null
+++ b/drivers/net/ethernet/airoha/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Airoha for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_AIROHA) += airoha-eth.o
+airoha-eth-y := airoha_eth.o airoha_ppe.o
+airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o
+obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index 09f448f29124..c0a642568ac1 100644
--- a/drivers/net/ethernet/mediatek/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -3,925 +3,30 @@
* Copyright (c) 2024 AIROHA Inc
* Author: Lorenzo Bianconi <lorenzo@kernel.org>
*/
-#include <linux/etherdevice.h>
-#include <linux/iopoll.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/u64_stats_sync.h>
-#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <uapi/linux/ppp_defs.h>
-#define AIROHA_MAX_NUM_GDM_PORTS 1
-#define AIROHA_MAX_NUM_QDMA 2
-#define AIROHA_MAX_NUM_RSTS 3
-#define AIROHA_MAX_NUM_XSI_RSTS 5
-#define AIROHA_MAX_MTU 2000
-#define AIROHA_MAX_PACKET_SIZE 2048
-#define AIROHA_NUM_QOS_CHANNELS 4
-#define AIROHA_NUM_QOS_QUEUES 8
-#define AIROHA_NUM_TX_RING 32
-#define AIROHA_NUM_RX_RING 32
-#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
- AIROHA_NUM_QOS_CHANNELS)
-#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
-#define AIROHA_FE_MC_MAX_VLAN_PORT 16
-#define AIROHA_NUM_TX_IRQ 2
-#define HW_DSCP_NUM 2048
-#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
-#define TX_DSCP_NUM 1024
-#define RX_DSCP_NUM(_n) \
- ((_n) == 2 ? 128 : \
- (_n) == 11 ? 128 : \
- (_n) == 15 ? 128 : \
- (_n) == 0 ? 1024 : 16)
-
-#define PSE_RSV_PAGES 128
-#define PSE_QUEUE_RSV_PAGES 64
-
-#define QDMA_METER_IDX(_n) ((_n) & 0xff)
-#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
-
-/* FE */
-#define PSE_BASE 0x0100
-#define CSR_IFC_BASE 0x0200
-#define CDM1_BASE 0x0400
-#define GDM1_BASE 0x0500
-#define PPE1_BASE 0x0c00
-
-#define CDM2_BASE 0x1400
-#define GDM2_BASE 0x1500
-
-#define GDM3_BASE 0x1100
-#define GDM4_BASE 0x2500
-
-#define GDM_BASE(_n) \
- ((_n) == 4 ? GDM4_BASE : \
- (_n) == 3 ? GDM3_BASE : \
- (_n) == 2 ? GDM2_BASE : GDM1_BASE)
-
-#define REG_FE_DMA_GLO_CFG 0x0000
-#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
-#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
-
-#define REG_FE_RST_GLO_CFG 0x0004
-#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
-#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
-#define FE_RST_CORE_MASK BIT(0)
-
-#define REG_FE_WAN_MAC_H 0x0030
-#define REG_FE_LAN_MAC_H 0x0040
-
-#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
-#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
-
-#define REG_FE_CDM1_OQ_MAP0 0x0050
-#define REG_FE_CDM1_OQ_MAP1 0x0054
-#define REG_FE_CDM1_OQ_MAP2 0x0058
-#define REG_FE_CDM1_OQ_MAP3 0x005c
-
-#define REG_FE_PCE_CFG 0x0070
-#define PCE_DPI_EN_MASK BIT(2)
-#define PCE_KA_EN_MASK BIT(1)
-#define PCE_MC_EN_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
-#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
-#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
-#define PSE_CFG_WR_EN_MASK BIT(8)
-#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
-
-#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
-#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
-
-#define PSE_FQ_CFG 0x008c
-#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
-
-#define REG_FE_PSE_BUF_SET 0x0090
-#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
-#define PSE_ALLRSV_MASK GENMASK(14, 0)
-
-#define REG_PSE_SHARE_USED_THD 0x0094
-#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
-#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
-
-#define REG_GDM_MISC_CFG 0x0148
-#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
-#define GDM2_CHN_VLD_MODE_MASK BIT(5)
-
-#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
-#define FE_IFC_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PORT_EN 0x01f0
-#define REG_FE_IFC_PORT_EN 0x01f4
-
-#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
-#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
-
-#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
-#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
-#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
-
-#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
-#define PATN_FCPU_EN_MASK BIT(7)
-#define PATN_SWP_EN_MASK BIT(6)
-#define PATN_DP_EN_MASK BIT(5)
-#define PATN_SP_EN_MASK BIT(4)
-#define PATN_TYPE_MASK GENMASK(3, 1)
-#define PATN_EN_MASK BIT(0)
-
-#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
-#define PATN_DP_MASK GENMASK(31, 16)
-#define PATN_SP_MASK GENMASK(15, 0)
-
-#define REG_CDM1_VLAN_CTRL CDM1_BASE
-#define CDM1_VLAN_MASK GENMASK(31, 16)
-
-#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
-#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
-#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
-#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
-#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
-
-#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
-#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
- GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
-
-#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
-#define GDM_DROP_CRC_ERR BIT(23)
-#define GDM_IP4_CKSUM BIT(22)
-#define GDM_TCP_CKSUM BIT(21)
-#define GDM_UDP_CKSUM BIT(20)
-#define GDM_UCFQ_MASK GENMASK(15, 12)
-#define GDM_BCFQ_MASK GENMASK(11, 8)
-#define GDM_MCFQ_MASK GENMASK(7, 4)
-#define GDM_OCFQ_MASK GENMASK(3, 0)
-
-#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
-#define GDM_INGRESS_FC_EN_MASK BIT(1)
-#define GDM_STAG_EN_MASK BIT(0)
-
-#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
-#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
-#define GDM_LONG_LEN_MASK GENMASK(29, 16)
-
-#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
-#define FE_CPORT_PAD BIT(26)
-#define FE_CPORT_PORT_XFC_MASK BIT(25)
-#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
-
-#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
-#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
-#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
-
-#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
-#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
-#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
-#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
-#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
-#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
-#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
-#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
-#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
-#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
-#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
-#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
-#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
-#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
-#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
-#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
-#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
-#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
-#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
-#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
-#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
-#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
-#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
-#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
-#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
-#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
-#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
-#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
-#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
-#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
-#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
-#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
-#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
-
-#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
-#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
-#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
-#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
-#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
-
-#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
-#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
-#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
-#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
-
-#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
-#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
-#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
-#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
-#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
-#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
-#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
-#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
-#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
-#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
-#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
-#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
-#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
-#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
-#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
-#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
-
-#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
-#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
-#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
-
-#define REG_GDM3_FWD_CFG GDM3_BASE
-#define GDM3_PAD_EN_MASK BIT(28)
-
-#define REG_GDM4_FWD_CFG GDM4_BASE
-#define GDM4_PAD_EN_MASK BIT(28)
-#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
-
-#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
-#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
-#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
-#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
-
-#define REG_IP_FRAG_FP 0x2010
-#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
-#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
-#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
-#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
-
-#define REG_MC_VLAN_EN 0x2100
-#define MC_VLAN_EN_MASK BIT(0)
-
-#define REG_MC_VLAN_CFG 0x2104
-#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
-#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
-#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
-#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
-#define MC_VLAN_CFG_RW_MASK BIT(0)
-
-#define REG_MC_VLAN_DATA 0x2108
-
-#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
-
-/* QDMA */
-#define REG_QDMA_GLOBAL_CFG 0x0004
-#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
-#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
-#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
-#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
-#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
-#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
-#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
-#define GLOBAL_CFG_RESET_MASK BIT(23)
-#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
-#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
-#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
-#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
-#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
-#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
-#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
-#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
-#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
-#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
-#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
-#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
-#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
-#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
-#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
-
-#define REG_FWD_DSCP_BASE 0x0010
-#define REG_FWD_BUF_BASE 0x0014
-
-#define REG_HW_FWD_DSCP_CFG 0x0018
-#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
-#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
-#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
-
-#define REG_INT_STATUS(_n) \
- (((_n) == 4) ? 0x0730 : \
- ((_n) == 3) ? 0x0724 : \
- ((_n) == 2) ? 0x0720 : \
- ((_n) == 1) ? 0x0024 : 0x0020)
-
-#define REG_INT_ENABLE(_n) \
- (((_n) == 4) ? 0x0750 : \
- ((_n) == 3) ? 0x0744 : \
- ((_n) == 2) ? 0x0740 : \
- ((_n) == 1) ? 0x002c : 0x0028)
-
-/* QDMA_CSR_INT_ENABLE1 */
-#define RX15_COHERENT_INT_MASK BIT(31)
-#define RX14_COHERENT_INT_MASK BIT(30)
-#define RX13_COHERENT_INT_MASK BIT(29)
-#define RX12_COHERENT_INT_MASK BIT(28)
-#define RX11_COHERENT_INT_MASK BIT(27)
-#define RX10_COHERENT_INT_MASK BIT(26)
-#define RX9_COHERENT_INT_MASK BIT(25)
-#define RX8_COHERENT_INT_MASK BIT(24)
-#define RX7_COHERENT_INT_MASK BIT(23)
-#define RX6_COHERENT_INT_MASK BIT(22)
-#define RX5_COHERENT_INT_MASK BIT(21)
-#define RX4_COHERENT_INT_MASK BIT(20)
-#define RX3_COHERENT_INT_MASK BIT(19)
-#define RX2_COHERENT_INT_MASK BIT(18)
-#define RX1_COHERENT_INT_MASK BIT(17)
-#define RX0_COHERENT_INT_MASK BIT(16)
-#define TX7_COHERENT_INT_MASK BIT(15)
-#define TX6_COHERENT_INT_MASK BIT(14)
-#define TX5_COHERENT_INT_MASK BIT(13)
-#define TX4_COHERENT_INT_MASK BIT(12)
-#define TX3_COHERENT_INT_MASK BIT(11)
-#define TX2_COHERENT_INT_MASK BIT(10)
-#define TX1_COHERENT_INT_MASK BIT(9)
-#define TX0_COHERENT_INT_MASK BIT(8)
-#define CNT_OVER_FLOW_INT_MASK BIT(7)
-#define IRQ1_FULL_INT_MASK BIT(5)
-#define IRQ1_INT_MASK BIT(4)
-#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
-#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
-#define IRQ0_FULL_INT_MASK BIT(1)
-#define IRQ0_INT_MASK BIT(0)
-
-#define TX_DONE_INT_MASK(_n) \
- ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
- : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_TX_MASK \
- (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
- IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
-
-#define INT_IDX0_MASK \
- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
- RX15_COHERENT_INT_MASK | INT_TX_MASK)
-
-/* QDMA_CSR_INT_ENABLE2 */
-#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
-#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
-#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
-#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
-#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
-#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
-#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
-#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
-#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
-#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
-#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
-#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
-#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
-#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
-#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
-#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
-#define RX15_DONE_INT_MASK BIT(15)
-#define RX14_DONE_INT_MASK BIT(14)
-#define RX13_DONE_INT_MASK BIT(13)
-#define RX12_DONE_INT_MASK BIT(12)
-#define RX11_DONE_INT_MASK BIT(11)
-#define RX10_DONE_INT_MASK BIT(10)
-#define RX9_DONE_INT_MASK BIT(9)
-#define RX8_DONE_INT_MASK BIT(8)
-#define RX7_DONE_INT_MASK BIT(7)
-#define RX6_DONE_INT_MASK BIT(6)
-#define RX5_DONE_INT_MASK BIT(5)
-#define RX4_DONE_INT_MASK BIT(4)
-#define RX3_DONE_INT_MASK BIT(3)
-#define RX2_DONE_INT_MASK BIT(2)
-#define RX1_DONE_INT_MASK BIT(1)
-#define RX0_DONE_INT_MASK BIT(0)
-
-#define RX_DONE_INT_MASK \
- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
- RX15_DONE_INT_MASK)
-#define INT_IDX1_MASK \
- (RX_DONE_INT_MASK | \
- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
- RX15_NO_CPU_DSCP_INT_MASK)
-
-/* QDMA_CSR_INT_ENABLE5 */
-#define TX31_COHERENT_INT_MASK BIT(31)
-#define TX30_COHERENT_INT_MASK BIT(30)
-#define TX29_COHERENT_INT_MASK BIT(29)
-#define TX28_COHERENT_INT_MASK BIT(28)
-#define TX27_COHERENT_INT_MASK BIT(27)
-#define TX26_COHERENT_INT_MASK BIT(26)
-#define TX25_COHERENT_INT_MASK BIT(25)
-#define TX24_COHERENT_INT_MASK BIT(24)
-#define TX23_COHERENT_INT_MASK BIT(23)
-#define TX22_COHERENT_INT_MASK BIT(22)
-#define TX21_COHERENT_INT_MASK BIT(21)
-#define TX20_COHERENT_INT_MASK BIT(20)
-#define TX19_COHERENT_INT_MASK BIT(19)
-#define TX18_COHERENT_INT_MASK BIT(18)
-#define TX17_COHERENT_INT_MASK BIT(17)
-#define TX16_COHERENT_INT_MASK BIT(16)
-#define TX15_COHERENT_INT_MASK BIT(15)
-#define TX14_COHERENT_INT_MASK BIT(14)
-#define TX13_COHERENT_INT_MASK BIT(13)
-#define TX12_COHERENT_INT_MASK BIT(12)
-#define TX11_COHERENT_INT_MASK BIT(11)
-#define TX10_COHERENT_INT_MASK BIT(10)
-#define TX9_COHERENT_INT_MASK BIT(9)
-#define TX8_COHERENT_INT_MASK BIT(8)
-
-#define INT_IDX4_MASK \
- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
-
-#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
-
-#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
-#define TX_IRQ_THR_MASK GENMASK(27, 16)
-#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
-
-#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
-#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
-
-#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
-#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
-#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
-
-#define REG_TX_RING_BASE(_n) \
- (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
-
-#define REG_TX_RING_BLOCKING(_n) \
- (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
-
-#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
-#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
-#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
-#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
-#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
-
-#define REG_TX_CPU_IDX(_n) \
- (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
-
-#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_TX_DMA_IDX(_n) \
- (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
-
-#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define IRQ_RING_IDX_MASK GENMASK(20, 16)
-#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_RING_BASE(_n) \
- (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
-
-#define REG_RX_RING_SIZE(_n) \
- (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
-
-#define RX_RING_THR_MASK GENMASK(31, 16)
-#define RX_RING_SIZE_MASK GENMASK(15, 0)
-
-#define REG_RX_CPU_IDX(_n) \
- (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
-
-#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
-
-#define REG_RX_DMA_IDX(_n) \
- (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
-
-#define REG_RX_DELAY_INT_IDX(_n) \
- (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
-
-#define RX_DELAY_INT_MASK GENMASK(15, 0)
-
-#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
-
-#define REG_INGRESS_TRTCM_CFG 0x0070
-#define INGRESS_TRTCM_EN_MASK BIT(31)
-#define INGRESS_TRTCM_MODE_MASK BIT(30)
-#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
-#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
-
-#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
-#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
-
-#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
-#define CNTR_EN_MASK BIT(31)
-#define CNTR_ALL_CHAN_EN_MASK BIT(30)
-#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
-#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
-#define CNTR_SRC_MASK GENMASK(27, 24)
-#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
-#define CNTR_CHAN_MASK GENMASK(7, 3)
-#define CNTR_QUEUE_MASK GENMASK(2, 0)
-
-#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
-
-#define REG_LMGR_INIT_CFG 0x1000
-#define LMGR_INIT_START BIT(31)
-#define LMGR_SRAM_MODE_MASK BIT(30)
-#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
-#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
-
-#define REG_FWD_DSCP_LOW_THR 0x1004
-#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
-
-#define REG_EGRESS_RATE_METER_CFG 0x100c
-#define EGRESS_RATE_METER_EN_MASK BIT(31)
-#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
-#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
-#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
-
-#define REG_EGRESS_TRTCM_CFG 0x1010
-#define EGRESS_TRTCM_EN_MASK BIT(31)
-#define EGRESS_TRTCM_MODE_MASK BIT(30)
-#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
-
-#define TRTCM_PARAM_RW_MASK BIT(31)
-#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
-#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
-#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
-#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
-#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
-
-#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
-#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
-#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
-
-#define REG_TXWRR_MODE_CFG 0x1020
-#define TWRR_WEIGHT_SCALE_MASK BIT(31)
-#define TWRR_WEIGHT_BASE_MASK BIT(3)
-
-#define REG_TXWRR_WEIGHT_CFG 0x1024
-#define TWRR_RW_CMD_MASK BIT(31)
-#define TWRR_RW_CMD_DONE BIT(30)
-#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
-#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
-#define TWRR_VALUE_MASK GENMASK(15, 0)
-
-#define REG_PSE_BUF_USAGE_CFG 0x1028
-#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
-
-#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
-#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
-
-#define REG_GLB_TRTCM_CFG 0x1080
-#define GLB_TRTCM_EN_MASK BIT(31)
-#define GLB_TRTCM_MODE_MASK BIT(30)
-#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define GLB_FAST_TICK_MASK GENMASK(15, 0)
-
-#define REG_TXQ_CNGST_CFG 0x10a0
-#define TXQ_CNGST_DROP_EN BIT(31)
-#define TXQ_CNGST_DEI_DROP_EN BIT(30)
-
-#define REG_SLA_TRTCM_CFG 0x1150
-#define SLA_TRTCM_EN_MASK BIT(31)
-#define SLA_TRTCM_MODE_MASK BIT(30)
-#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
-#define SLA_FAST_TICK_MASK GENMASK(15, 0)
-
-/* CTRL */
-#define QDMA_DESC_DONE_MASK BIT(31)
-#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
-#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
-#define QDMA_DESC_DEI_MASK BIT(25)
-#define QDMA_DESC_NO_DROP_MASK BIT(24)
-#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
-/* DATA */
-#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
-/* TX MSG0 */
-#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
-#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
-#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
-#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
-#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
-#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
-#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
-#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
-#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
-#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
-/* TX MSG1 */
-#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
-#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
-#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
-#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
-#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
-#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
-#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
-#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
-#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
-
-/* RX MSG1 */
-#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
-#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
-#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
-#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
-#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
-#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
-#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
-#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
-#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
-
-struct airoha_qdma_desc {
- __le32 rsv;
- __le32 ctrl;
- __le32 addr;
- __le32 data;
- __le32 msg0;
- __le32 msg1;
- __le32 msg2;
- __le32 msg3;
-};
-
-/* CTRL0 */
-#define QDMA_FWD_DESC_CTX_MASK BIT(31)
-#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
-#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
-#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
-/* CTRL1 */
-#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
-/* CTRL2 */
-#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
-
-struct airoha_qdma_fwd_desc {
- __le32 addr;
- __le32 ctrl0;
- __le32 ctrl1;
- __le32 ctrl2;
- __le32 msg0;
- __le32 msg1;
- __le32 rsv0;
- __le32 rsv1;
-};
-
-enum {
- QDMA_INT_REG_IDX0,
- QDMA_INT_REG_IDX1,
- QDMA_INT_REG_IDX2,
- QDMA_INT_REG_IDX3,
- QDMA_INT_REG_IDX4,
- QDMA_INT_REG_MAX
-};
-
-enum {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_AE_PORT,
- XSI_ETH_PORT,
-};
-
-enum {
- XSI_PCIE0_VIP_PORT_MASK = BIT(22),
- XSI_PCIE1_VIP_PORT_MASK = BIT(23),
- XSI_USB_VIP_PORT_MASK = BIT(25),
- XSI_ETH_VIP_PORT_MASK = BIT(24),
-};
-
-enum {
- DEV_STATE_INITIALIZED,
-};
-
-enum {
- CDM_CRSN_QSEL_Q1 = 1,
- CDM_CRSN_QSEL_Q5 = 5,
- CDM_CRSN_QSEL_Q6 = 6,
- CDM_CRSN_QSEL_Q15 = 15,
-};
-
-enum {
- CRSN_08 = 0x8,
- CRSN_21 = 0x15, /* KA */
- CRSN_22 = 0x16, /* hit bind and force route to CPU */
- CRSN_24 = 0x18,
- CRSN_25 = 0x19,
-};
-
-enum {
- FE_PSE_PORT_CDM1,
- FE_PSE_PORT_GDM1,
- FE_PSE_PORT_GDM2,
- FE_PSE_PORT_GDM3,
- FE_PSE_PORT_PPE1,
- FE_PSE_PORT_CDM2,
- FE_PSE_PORT_CDM3,
- FE_PSE_PORT_CDM4,
- FE_PSE_PORT_PPE2,
- FE_PSE_PORT_GDM4,
- FE_PSE_PORT_CDM5,
- FE_PSE_PORT_DROP = 0xf,
-};
-
-enum tx_sched_mode {
- TC_SCH_WRR8,
- TC_SCH_SP,
- TC_SCH_WRR7,
- TC_SCH_WRR6,
- TC_SCH_WRR5,
- TC_SCH_WRR4,
- TC_SCH_WRR3,
- TC_SCH_WRR2,
-};
-
-enum trtcm_param_type {
- TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
- TRTCM_TOKEN_RATE_MODE,
- TRTCM_BUCKETSIZE_SHIFT_MODE,
- TRTCM_BUCKET_COUNTER_MODE,
-};
-
-enum trtcm_mode_type {
- TRTCM_COMMIT_MODE,
- TRTCM_PEAK_MODE,
-};
-
-enum trtcm_param {
- TRTCM_TICK_SEL = BIT(0),
- TRTCM_PKT_MODE = BIT(1),
- TRTCM_METER_MODE = BIT(2),
-};
-
-#define MIN_TOKEN_SIZE 4096
-#define MAX_TOKEN_SIZE_OFFSET 17
-#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
-#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
-
-struct airoha_queue_entry {
- union {
- void *buf;
- struct sk_buff *skb;
- };
- dma_addr_t dma_addr;
- u16 dma_len;
-};
-
-struct airoha_queue {
- struct airoha_qdma *qdma;
-
- /* protect concurrent queue accesses */
- spinlock_t lock;
- struct airoha_queue_entry *entry;
- struct airoha_qdma_desc *desc;
- u16 head;
- u16 tail;
-
- int queued;
- int ndesc;
- int free_thr;
- int buf_size;
-
- struct napi_struct napi;
- struct page_pool *page_pool;
-};
-
-struct airoha_tx_irq_queue {
- struct airoha_qdma *qdma;
-
- struct napi_struct napi;
-
- int size;
- u32 *q;
-};
-
-struct airoha_hw_stats {
- /* protect concurrent hw_stats accesses */
- spinlock_t lock;
- struct u64_stats_sync syncp;
-
- /* get_stats64 */
- u64 rx_ok_pkts;
- u64 tx_ok_pkts;
- u64 rx_ok_bytes;
- u64 tx_ok_bytes;
- u64 rx_multicast;
- u64 rx_errors;
- u64 rx_drops;
- u64 tx_drops;
- u64 rx_crc_error;
- u64 rx_over_errors;
- /* ethtool stats */
- u64 tx_broadcast;
- u64 tx_multicast;
- u64 tx_len[7];
- u64 rx_broadcast;
- u64 rx_fragment;
- u64 rx_jabber;
- u64 rx_len[7];
-};
-
-struct airoha_qdma {
- struct airoha_eth *eth;
- void __iomem *regs;
-
- /* protect concurrent irqmask accesses */
- spinlock_t irq_lock;
- u32 irqmask[QDMA_INT_REG_MAX];
- int irq;
+#include "airoha_regs.h"
+#include "airoha_eth.h"
- struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
-
- struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
- struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
-
- /* descriptor and packet buffers for qdma hw forward */
- struct {
- void *desc;
- void *q;
- } hfwd;
-};
-
-struct airoha_gdm_port {
- struct airoha_qdma *qdma;
- struct net_device *dev;
- int id;
-
- struct airoha_hw_stats stats;
-
- DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
-
- /* qos stats counters */
- u64 cpu_tx_packets;
- u64 fwd_tx_packets;
-};
-
-struct airoha_eth {
- struct device *dev;
-
- unsigned long state;
- void __iomem *fe_regs;
-
- struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
- struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
-
- struct net_device *napi_dev;
-
- struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
- struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
-};
-
-static u32 airoha_rr(void __iomem *base, u32 offset)
+u32 airoha_rr(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
-static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+void airoha_wr(void __iomem *base, u32 offset, u32 val)
{
writel(val, base + offset);
}
-static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
{
val |= (airoha_rr(base, offset) & ~mask);
airoha_wr(base, offset, val);
@@ -929,28 +34,6 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
return val;
}
-#define airoha_fe_rr(eth, offset) \
- airoha_rr((eth)->fe_regs, (offset))
-#define airoha_fe_wr(eth, offset, val) \
- airoha_wr((eth)->fe_regs, (offset), (val))
-#define airoha_fe_rmw(eth, offset, mask, val) \
- airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
-#define airoha_fe_set(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), 0, (val))
-#define airoha_fe_clear(eth, offset, val) \
- airoha_rmw((eth)->fe_regs, (offset), (val), 0)
-
-#define airoha_qdma_rr(qdma, offset) \
- airoha_rr((qdma)->regs, (offset))
-#define airoha_qdma_wr(qdma, offset, val) \
- airoha_wr((qdma)->regs, (offset), (val))
-#define airoha_qdma_rmw(qdma, offset, mask, val) \
- airoha_rmw((qdma)->regs, (offset), (mask), (val))
-#define airoha_qdma_set(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), 0, (val))
-#define airoha_qdma_clear(qdma, offset, val) \
- airoha_rmw((qdma)->regs, (offset), (val), 0)
-
static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
u32 clear, u32 set)
{
@@ -1021,30 +104,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
FIELD_PREP(GDM_UCFQ_MASK, val));
}
-static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port,
+ bool enable)
{
- u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
- u32 vip_port, cfg_addr;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 vip_port;
- switch (port) {
- case XSI_PCIE0_PORT:
+ switch (port->id) {
+ case 3:
+ /* FIXME: handle XSI_PCIE1_PORT */
vip_port = XSI_PCIE0_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_PCIE1_PORT:
- vip_port = XSI_PCIE1_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(3);
- break;
- case XSI_USB_PORT:
- vip_port = XSI_USB_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
- case XSI_ETH_PORT:
+ case 4:
+ /* FIXME: handle XSI_USB_PORT */
vip_port = XSI_ETH_VIP_PORT_MASK;
- cfg_addr = REG_GDM_FWD_CFG(4);
break;
default:
- return -EINVAL;
+ return 0;
}
if (enable) {
@@ -1055,51 +131,17 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
}
- airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
-
return 0;
}
-static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
-{
- const int port_list[] = {
- XSI_PCIE0_PORT,
- XSI_PCIE1_PORT,
- XSI_USB_PORT,
- XSI_ETH_PORT
- };
- int i, err;
-
- for (i = 0; i < ARRAY_SIZE(port_list); i++) {
- err = airoha_set_gdm_port(eth, port_list[i], enable);
- if (err)
- goto error;
- }
-
- return 0;
-
-error:
- for (i--; i >= 0; i--)
- airoha_set_gdm_port(eth, port_list[i], false);
-
- return err;
-}
-
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
GDM_DROP_CRC_ERR);
- airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
- FE_PSE_PORT_CDM1);
- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
- }
airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
@@ -1547,7 +589,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
switch (sport) {
- case 0x10 ... 0x13:
+ case 0x10 ... 0x14:
port = 0;
break;
case 0x2 ... 0x4:
@@ -1571,10 +613,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
while (done < budget) {
struct airoha_queue_entry *e = &q->entry[q->tail];
struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ struct page *page = virt_to_head_page(e->buf);
u32 desc_ctrl = le32_to_cpu(desc->ctrl);
- struct sk_buff *skb;
- int len, p;
+ struct airoha_gdm_port *port;
+ int data_len, len, p;
if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
break;
@@ -1592,32 +636,74 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
dma_sync_single_for_cpu(eth->dev, dma_addr,
SKB_WITH_OVERHEAD(q->buf_size), dir);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+ if (data_len < len)
+ goto free_frag;
+
p = airoha_qdma_get_gdm_port(eth, desc);
- if (p < 0 || !eth->ports[p]) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- continue;
+ if (p < 0 || !eth->ports[p])
+ goto free_frag;
+
+ port = eth->ports[p];
+ if (!q->skb) { /* first buffer */
+ q->skb = napi_build_skb(e->buf, q->buf_size);
+ if (!q->skb)
+ goto free_frag;
+
+ __skb_put(q->skb, len);
+ skb_mark_for_recycle(q->skb);
+ q->skb->dev = port->dev;
+ q->skb->protocol = eth_type_trans(q->skb, port->dev);
+ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(q->skb, qid);
+ } else { /* scattered frame */
+ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
+ int nr_frags = shinfo->nr_frags;
+
+ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
+ goto free_frag;
+
+ skb_add_rx_frag(q->skb, nr_frags, page,
+ e->buf - page_address(page), len,
+ q->buf_size);
}
- skb = napi_build_skb(e->buf, q->buf_size);
- if (!skb) {
- page_pool_put_full_page(q->page_pool,
- virt_to_head_page(e->buf),
- true);
- break;
+ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
+ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+ * properly and it provides DSA port index via the
+ * DMA descriptor. Report DSA tag to the DSA stack
+ * via skb dst info.
+ */
+ u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
+ le32_to_cpu(desc->msg0));
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
}
- skb_reserve(skb, 2);
- __skb_put(skb, len);
- skb_mark_for_recycle(skb);
- skb->dev = eth->ports[p]->dev;
- skb->protocol = eth_type_trans(skb, skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, qid);
- napi_gro_receive(&q->napi, skb);
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(eth->ppe, hash);
done++;
+ napi_gro_receive(&q->napi, q->skb);
+ q->skb = NULL;
+ continue;
+free_frag:
+ page_pool_put_full_page(q->page_pool, page, true);
+ dev_kfree_skb(q->skb);
+ q->skb = NULL;
}
airoha_qdma_fill_rx_queue(q);
@@ -1692,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
FIELD_PREP(RX_RING_THR_MASK, thr));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
airoha_qdma_fill_rx_queue(q);
@@ -2091,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_RX_2B_OFFSET_MASK |
FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
GLOBAL_CFG_CPU_TXR_RR_MASK |
GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
@@ -2235,6 +1321,10 @@ static int airoha_hw_init(struct platform_device *pdev,
return err;
}
+ err = airoha_ppe_init(eth);
+ if (err)
+ return err;
+
set_bit(DEV_STATE_INITIALIZED, &eth->state);
return 0;
@@ -2441,12 +1531,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port)
static int airoha_dev_open(struct net_device *dev)
{
+ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
struct airoha_gdm_port *port = netdev_priv(dev);
struct airoha_qdma *qdma = port->qdma;
- int err;
netif_tx_start_all_queues(dev);
- err = airoha_set_gdm_ports(qdma->eth, true);
+ err = airoha_set_vip_for_gdm_port(port, true);
if (err)
return err;
@@ -2457,9 +1547,15 @@ static int airoha_dev_open(struct net_device *dev)
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
+ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+
airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
GLOBAL_CFG_TX_DMA_EN_MASK |
GLOBAL_CFG_RX_DMA_EN_MASK);
+ atomic_inc(&qdma->users);
return 0;
}
@@ -2471,20 +1567,24 @@ static int airoha_dev_stop(struct net_device *dev)
int i, err;
netif_tx_disable(dev);
- err = airoha_set_gdm_ports(qdma->eth, false);
+ err = airoha_set_vip_for_gdm_port(port, false);
if (err)
return err;
- airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
- GLOBAL_CFG_TX_DMA_EN_MASK |
- GLOBAL_CFG_RX_DMA_EN_MASK);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++)
+ netdev_tx_reset_subqueue(dev, i);
- for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
- if (!qdma->q_tx[i].ndesc)
- continue;
+ if (atomic_dec_and_test(&qdma->users)) {
+ airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
- airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
- netdev_tx_reset_subqueue(dev, i);
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+ if (!qdma->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]);
+ }
}
return 0;
@@ -2504,12 +1604,82 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
return 0;
}
+static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
+{
+ u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4;
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 chan = port->id == 3 ? 4 : 0;
+
+ /* Forward the traffic to the proper GDM port */
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port);
+ airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC);
+
+ /* Enable GDM2 loopback */
+ airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff);
+ airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
+ airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
+ LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
+ FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU));
+
+ /* Disable VIP and IFC for GDM2 */
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2));
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2));
+
+ if (port->id == 3) {
+ /* FIXME: handle XSI_PCE1_PORT */
+ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3),
+ SP_CPORT_PCIE0_MASK,
+ FIELD_PREP(SP_CPORT_PCIE0_MASK,
+ FE_PSE_PORT_CDM2));
+ } else {
+ /* FIXME: handle XSI_USB_PORT */
+ airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6,
+ FC_ID_OF_SRC_PORT24_MASK,
+ FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2));
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT));
+ airoha_fe_rmw(eth,
+ REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3),
+ SP_CPORT_ETH_MASK,
+ FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2));
+ }
+}
+
static int airoha_dev_init(struct net_device *dev)
{
struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 pse_port;
airoha_set_macaddr(port, dev->dev_addr);
+ switch (port->id) {
+ case 3:
+ case 4:
+ /* If GDM2 is active we can't enable loopback */
+ if (!eth->ports[1])
+ airhoha_set_gdm2_loopback(port);
+ fallthrough;
+ case 2:
+ pse_port = FE_PSE_PORT_PPE2;
+ break;
+ default:
+ pse_port = FE_PSE_PORT_PPE1;
+ break;
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port);
+
return 0;
}
@@ -2535,6 +1705,20 @@ static void airoha_dev_get_stats64(struct net_device *dev,
} while (u64_stats_fetch_retry(&port->stats.syncp, start));
}
+static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
+
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
+ GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_LONG_LEN_MASK, len));
+ WRITE_ONCE(dev->mtu, mtu);
+
+ return 0;
+}
+
static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -2553,26 +1737,71 @@ static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue < dev->num_tx_queues ? queue : 0;
}
+static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
+ return 0;
+
+ ehdr = (struct ethhdr *)skb->data;
+ tag = be16_to_cpu(ehdr->h_proto);
+ xmit_tpid = tag >> 8;
+
+ switch (xmit_tpid) {
+ case MTK_HDR_XMIT_TAGGED_TPID_8100:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021Q);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8);
+ break;
+ case MTK_HDR_XMIT_TAGGED_TPID_88A8:
+ ehdr->h_proto = cpu_to_be16(ETH_P_8021AD);
+ tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8);
+ break;
+ default:
+ /* PPE module requires untagged DSA packets to work properly,
+ * so move DSA tag to DMA descriptor.
+ */
+ memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, MTK_HDR_LEN);
+ break;
+ }
+
+ return tag;
+#else
+ return 0;
+#endif
+}
+
static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct skb_shared_info *sinfo = skb_shinfo(skb);
struct airoha_gdm_port *port = netdev_priv(dev);
- u32 msg0, msg1, len = skb_headlen(skb);
struct airoha_qdma *qdma = port->qdma;
- u32 nr_frags = 1 + sinfo->nr_frags;
+ u32 nr_frags, tag, msg0, msg1, len;
struct netdev_queue *txq;
struct airoha_queue *q;
- void *data = skb->data;
+ void *data;
int i, qid;
u16 index;
u8 fport;
qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx);
+ tag = airoha_get_dsa_tag(skb, dev);
+
msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK,
qid / AIROHA_NUM_QOS_QUEUES) |
FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK,
- qid % AIROHA_NUM_QOS_QUEUES);
+ qid % AIROHA_NUM_QOS_QUEUES) |
+ FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag);
if (skb->ip_summed == CHECKSUM_PARTIAL)
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
@@ -2583,8 +1812,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto error;
- if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- __be16 csum = cpu_to_be16(sinfo->gso_size);
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 |
+ SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size);
tcp_hdr(skb)->check = (__force __sum16)csum;
msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
@@ -2602,6 +1832,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
spin_lock_bh(&q->lock);
txq = netdev_get_tx_queue(dev, qid);
+ nr_frags = 1 + skb_shinfo(skb)->nr_frags;
+
if (q->queued + nr_frags > q->ndesc) {
/* not enough space in the queue */
netif_tx_stop_queue(txq);
@@ -2609,11 +1841,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ len = skb_headlen(skb);
+ data = skb->data;
index = q->head;
+
for (i = 0; i < nr_frags; i++) {
struct airoha_qdma_desc *desc = &q->desc[index];
struct airoha_queue_entry *e = &q->entry[index];
- skb_frag_t *frag = &sinfo->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
dma_addr_t addr;
u32 val;
@@ -3038,6 +2273,47 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port,
return 0;
}
+static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+{
+ flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &block_cb_list;
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, port->dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue)
{
struct net_device *dev = port->dev;
@@ -3121,6 +2397,9 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
return airoha_tc_setup_qdisc_ets(port, type_data);
case TC_SETUP_QDISC_HTB:
return airoha_tc_setup_qdisc_htb(port, type_data);
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return airoha_dev_setup_tc_block(port, type_data);
default:
return -EOPNOTSUPP;
}
@@ -3130,6 +2409,7 @@ static const struct net_device_ops airoha_netdev_ops = {
.ndo_init = airoha_dev_init,
.ndo_open = airoha_dev_open,
.ndo_stop = airoha_dev_stop,
+ .ndo_change_mtu = airoha_dev_change_mtu,
.ndo_select_queue = airoha_dev_select_queue,
.ndo_start_xmit = airoha_dev_xmit,
.ndo_get_stats64 = airoha_dev_get_stats64,
@@ -3143,13 +2423,45 @@ static const struct ethtool_ops airoha_ethtool_ops = {
.get_rmon_stats = airoha_ethtool_get_rmon_stats,
};
-static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ struct metadata_dst *md_dst;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ port->dsa_meta[i] = md_dst;
+ }
+
+ return 0;
+}
+
+static void airoha_metadata_dst_free(struct airoha_gdm_port *port)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) {
+ if (!port->dsa_meta[i])
+ continue;
+
+ metadata_dst_free(port->dsa_meta[i]);
+ }
+}
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
{
const __be32 *id_ptr = of_get_property(np, "reg", NULL);
struct airoha_gdm_port *port;
struct airoha_qdma *qdma;
struct net_device *dev;
- int err, index;
+ int err, p;
u32 id;
if (!id_ptr) {
@@ -3158,14 +2470,14 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
}
id = be32_to_cpup(id_ptr);
- index = id - 1;
+ p = id - 1;
if (!id || id > ARRAY_SIZE(eth->ports)) {
dev_err(eth->dev, "invalid gdm port id: %d\n", id);
return -EINVAL;
}
- if (eth->ports[index]) {
+ if (eth->ports[p]) {
dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
return -EINVAL;
}
@@ -3188,6 +2500,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HW_TC;
dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
dev->dev.of_node = np;
dev->irq = qdma->irq;
SET_NETDEV_DEV(dev, eth->dev);
@@ -3213,7 +2526,11 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
port->qdma = qdma;
port->dev = dev;
port->id = id;
- eth->ports[index] = port;
+ eth->ports[p] = port;
+
+ err = airoha_metadata_dst_alloc(port);
+ if (err)
+ return err;
return register_netdev(dev);
}
@@ -3281,6 +2598,7 @@ static int airoha_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(eth->qdma); i++)
airoha_qdma_start_napi(&eth->qdma[i]);
+ i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_device_is_compatible(np, "airoha,eth-mac"))
continue;
@@ -3288,7 +2606,7 @@ static int airoha_probe(struct platform_device *pdev)
if (!of_device_is_available(np))
continue;
- err = airoha_alloc_gdm_port(eth, np);
+ err = airoha_alloc_gdm_port(eth, np, i++);
if (err) {
of_node_put(np);
goto error_napi_stop;
@@ -3307,8 +2625,10 @@ error_hw_cleanup:
for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
struct airoha_gdm_port *port = eth->ports[i];
- if (port && port->dev->reg_state == NETREG_REGISTERED)
+ if (port && port->dev->reg_state == NETREG_REGISTERED) {
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
+ }
}
free_netdev(eth->napi_dev);
platform_set_drvdata(pdev, NULL);
@@ -3334,9 +2654,11 @@ static void airoha_remove(struct platform_device *pdev)
airoha_dev_stop(port->dev);
unregister_netdev(port->dev);
+ airoha_metadata_dst_free(port);
}
free_netdev(eth->napi_dev);
+ airoha_ppe_deinit(eth);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
new file mode 100644
index 000000000000..60690b685710
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -0,0 +1,552 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_ETH_H
+#define AIROHA_ETH_H
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 4
+#define AIROHA_MAX_NUM_QDMA 2
+#define AIROHA_MAX_DSA_PORTS 7
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 9216
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_QOS_CHANNELS 4
+#define AIROHA_NUM_QOS_QUEUES 8
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \
+ AIROHA_NUM_QOS_CHANNELS)
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+#define QDMA_METER_IDX(_n) ((_n) & 0xff)
+#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3)
+
+#define PPE_NUM 2
+#define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
+#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
+#define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
+#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
+#define PPE_ENTRY_SIZE 80
+#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10))
+
+#define MTK_HDR_LEN 4
+#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
+#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ HSGMII_LAN_PCIE0_SRCPORT = 0x16,
+ HSGMII_LAN_PCIE1_SRCPORT,
+ HSGMII_LAN_ETH_SRCPORT,
+ HSGMII_LAN_USB_SRCPORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+enum tx_sched_mode {
+ TC_SCH_WRR8,
+ TC_SCH_SP,
+ TC_SCH_WRR7,
+ TC_SCH_WRR6,
+ TC_SCH_WRR5,
+ TC_SCH_WRR4,
+ TC_SCH_WRR3,
+ TC_SCH_WRR2,
+};
+
+enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+ TRTCM_BUCKETSIZE_SHIFT_MODE,
+ TRTCM_BUCKET_COUNTER_MODE,
+};
+
+enum trtcm_mode_type {
+ TRTCM_COMMIT_MODE,
+ TRTCM_PEAK_MODE,
+};
+
+enum trtcm_param {
+ TRTCM_TICK_SEL = BIT(0),
+ TRTCM_PKT_MODE = BIT(1),
+ TRTCM_METER_MODE = BIT(2),
+};
+
+#define MIN_TOKEN_SIZE 4096
+#define MAX_TOKEN_SIZE_OFFSET 17
+#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6)
+#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0)
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_qdma *qdma;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+ struct sk_buff *skb;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_qdma *qdma;
+
+ struct napi_struct napi;
+
+ int size;
+ u32 *q;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+ AIROHA_FOE_STATE_FIN
+};
+
+enum {
+ PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ PPE_PKT_TYPE_BRIDGE = 2,
+ PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
+struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+};
+
+struct airoha_foe_mac_info {
+ struct airoha_foe_mac_info_common common;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+};
+
+#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+
+#define AIROHA_FOE_IB1_BIND_STATIC BIT(31)
+#define AIROHA_FOE_IB1_BIND_UDP BIT(30)
+#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28)
+#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25)
+#define AIROHA_FOE_IB1_BIND_TTL BIT(24)
+#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22)
+#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20)
+#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16)
+#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+
+#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24)
+#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13)
+#define AIROHA_FOE_IB2_PCP BIT(12)
+#define AIROHA_FOE_IB2_MULTICAST BIT(11)
+#define AIROHA_FOE_IB2_FAST_PATH BIT(10)
+#define AIROHA_FOE_IB2_PSE_QOS BIT(9)
+#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5)
+#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0)
+
+#define AIROHA_FOE_ACTDP GENMASK(31, 24)
+#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16)
+#define AIROHA_FOE_CHANNEL GENMASK(15, 11)
+#define AIROHA_FOE_QID GENMASK(10, 8)
+#define AIROHA_FOE_DPI BIT(7)
+#define AIROHA_FOE_TUNNEL BIT(6)
+#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
+struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+ u16 src_mac_hi;
+ u16 dest_mac_lo;
+
+ u32 src_mac_lo;
+
+ u32 ib2;
+
+ u32 rsv[5];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct airoha_foe_ipv4 {
+ struct airoha_foe_ipv4_tuple orig_tuple;
+
+ u32 ib2;
+
+ struct airoha_foe_ipv4_tuple new_tuple;
+
+ u32 rsv[2];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv4_dslite {
+ struct airoha_foe_ipv4_tuple ip4;
+
+ u32 ib2;
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 rsv[4];
+
+ u32 data;
+
+ struct airoha_foe_mac_info l2;
+};
+
+struct airoha_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 pad[3];
+ };
+ u32 ports;
+ };
+
+ u32 data;
+
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
+};
+
+struct airoha_foe_entry {
+ union {
+ struct {
+ u32 ib1;
+ union {
+ struct airoha_foe_bridge bridge;
+ struct airoha_foe_ipv4 ipv4;
+ struct airoha_foe_ipv4_dslite dslite;
+ struct airoha_foe_ipv6 ipv6;
+ DECLARE_FLEX_ARRAY(u32, d);
+ };
+ };
+ u8 data[PPE_ENTRY_SIZE];
+ };
+};
+
+struct airoha_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ struct {
+ struct {
+ u16 id;
+ __be16 proto;
+ } hdr[2];
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+struct airoha_flow_table_entry {
+ struct hlist_node list;
+
+ struct airoha_foe_entry data;
+ u32 hash;
+
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+struct airoha_qdma {
+ struct airoha_eth *eth;
+ void __iomem *regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ atomic_t users;
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+struct airoha_gdm_port {
+ struct airoha_qdma *qdma;
+ struct net_device *dev;
+ int id;
+
+ struct airoha_hw_stats stats;
+
+ DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS);
+
+ /* qos stats counters */
+ u64 cpu_tx_packets;
+ u64 fwd_tx_packets;
+
+ struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS];
+};
+
+#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16)
+#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+struct airoha_ppe {
+ struct airoha_eth *eth;
+
+ void *foe;
+ dma_addr_t foe_dma;
+
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
+ struct dentry *debugfs_dir;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+ void __iomem *fe_regs;
+
+ struct airoha_npu __rcu *npu;
+
+ struct airoha_ppe *ppe;
+ struct rhashtable flow_table;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct net_device *napi_dev;
+
+ struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+};
+
+u32 airoha_rr(void __iomem *base, u32 offset);
+void airoha_wr(void __iomem *base, u32 offset, u32 val);
+u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val);
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(qdma, offset) \
+ airoha_rr((qdma)->regs, (offset))
+#define airoha_qdma_wr(qdma, offset, val) \
+ airoha_wr((qdma)->regs, (offset), (val))
+#define airoha_qdma_rmw(qdma, offset, mask, val) \
+ airoha_rmw((qdma)->regs, (offset), (mask), (val))
+#define airoha_qdma_set(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), 0, (val))
+#define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+int airoha_ppe_init(struct airoha_eth *eth);
+void airoha_ppe_deinit(struct airoha_eth *eth);
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+
+#ifdef CONFIG_DEBUG_FS
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+#else
+static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ return 0;
+}
+#endif
+
+#endif /* AIROHA_ETH_H */
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
new file mode 100644
index 000000000000..7a5710f9ccf6
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/devcoredump.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/regmap.h>
+
+#include "airoha_npu.h"
+
+#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000
+#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000
+#define NPU_DUMP_SIZE 512
+
+#define REG_NPU_LOCAL_SRAM 0x0
+
+#define NPU_PC_BASE_ADDR 0x305000
+#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100))
+
+#define NPU_CLUSTER_BASE_ADDR 0x306000
+
+#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000)
+#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004)
+#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2))
+
+#define NPU_MBOX_BASE_ADDR 0x30c000
+
+#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000)
+#define MBOX_INT_STATUS_MASK BIT(8)
+
+#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2))
+#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2))
+#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
+#define NPU_TIMER_BASE_ADDR 0x310100
+#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+#define WDT_EN_MASK BIT(25)
+#define WDT_INTR_MASK BIT(21)
+
+enum {
+ NPU_OP_SET = 1,
+ NPU_OP_SET_NO_WAIT,
+ NPU_OP_GET,
+ NPU_OP_GET_NO_WAIT,
+};
+
+enum {
+ NPU_FUNC_WIFI,
+ NPU_FUNC_TUNNEL,
+ NPU_FUNC_NOTIFY,
+ NPU_FUNC_DBA,
+ NPU_FUNC_TR471,
+ NPU_FUNC_PPE,
+};
+
+enum {
+ NPU_MBOX_ERROR,
+ NPU_MBOX_SUCCESS,
+};
+
+enum {
+ PPE_FUNC_SET_WAIT,
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
+};
+
+enum {
+ PPE2_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_ENTRY,
+ PPE_SRAM_SET_VAL,
+ PPE_SRAM_RESET_VAL,
+};
+
+enum {
+ QDMA_WAN_ETHER = 1,
+ QDMA_WAN_PON_XDSL,
+};
+
+#define MBOX_MSG_FUNC_ID GENMASK(14, 11)
+#define MBOX_MSG_STATIC_BUF BIT(5)
+#define MBOX_MSG_STATUS GENMASK(4, 2)
+#define MBOX_MSG_DONE BIT(1)
+#define MBOX_MSG_WAIT_RSP BIT(0)
+
+#define PPE_TYPE_L2B_IPV4 2
+#define PPE_TYPE_L2B_IPV4_IPV6 3
+
+struct ppe_mbox_data {
+ u32 func_type;
+ u32 func_id;
+ union {
+ struct {
+ u8 cds;
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+ int ppe_type;
+ int wan_mode;
+ int wan_sel;
+ } init_info;
+ struct {
+ int func_id;
+ u32 size;
+ u32 data;
+ } set_info;
+ };
+};
+
+static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+{
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+ void *addr;
+ int ret;
+
+ addr = kmemdup(p, size, GFP_ATOMIC);
+ if (!addr)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size);
+ regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val);
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1);
+ val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP;
+ regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val);
+
+ ret = regmap_read_poll_timeout_atomic(npu->regmap,
+ REG_CR_MBQ0_CTRL(3) + offset,
+ val, (val & MBOX_MSG_DONE),
+ 100, 100 * MSEC_PER_SEC);
+ if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS)
+ ret = -EINVAL;
+
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+out:
+ kfree(addr);
+
+ return ret;
+}
+
+static int airoha_npu_run_firmware(struct device *dev, void __iomem *base,
+ struct reserved_mem *rmem)
+{
+ const struct firmware *fw;
+ void __iomem *addr;
+ int ret;
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_RV32, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ addr = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy_toio(addr, fw->data, fw->size);
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev);
+ if (ret)
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+
+ if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) {
+ dev_err(dev, "%s: fw size too overlimit (%zu)\n",
+ NPU_EN7581_FIRMWARE_DATA, fw->size);
+ ret = -E2BIG;
+ goto out;
+ }
+
+ memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance)
+{
+ struct airoha_npu *npu = npu_instance;
+
+ /* clear mbox interrupt status */
+ regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS,
+ MBOX_INT_STATUS_MASK);
+
+ /* acknowledge npu */
+ regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3),
+ MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE);
+
+ return IRQ_HANDLED;
+}
+
+static void airoha_npu_wdt_work(struct work_struct *work)
+{
+ struct airoha_npu_core *core;
+ struct airoha_npu *npu;
+ void *dump;
+ u32 val[3];
+ int c;
+
+ core = container_of(work, struct airoha_npu_core, wdt_work);
+ npu = core->npu;
+
+ dump = vzalloc(NPU_DUMP_SIZE);
+ if (!dump)
+ return;
+
+ c = core - &npu->cores[0];
+ regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val));
+ snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n",
+ val[0], val[1], val[2]);
+
+ dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL);
+}
+
+static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance)
+{
+ struct airoha_npu_core *core = core_instance;
+ struct airoha_npu *npu = core->npu;
+ int c = core - &npu->cores[0];
+ u32 val;
+
+ regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK);
+ if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) &&
+ FIELD_GET(WDT_EN_MASK, val))
+ schedule_work(&core->wdt_work);
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_npu_ppe_init(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ .init_info = {
+ .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
+ .wan_mode = QDMA_WAN_ETHER,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .func_id = PPE_SRAM_RESET_VAL,
+ .data = foe_addr,
+ .size = sram_num_entries,
+ },
+ };
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+{
+ struct ppe_mbox_data ppe_data = {
+ .func_type = NPU_OP_SET,
+ .func_id = PPE_FUNC_SET_WAIT_API,
+ .set_info = {
+ .data = foe_addr,
+ .size = entry_size,
+ },
+ };
+ int err;
+
+ ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+ : PPE_SRAM_SET_ENTRY;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+ if (err)
+ return err;
+
+ ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
+ ppe_data.set_info.data = hash;
+ ppe_data.set_info.size = sizeof(u32);
+
+ return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+ sizeof(struct ppe_mbox_data));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_npu *npu;
+
+ np = of_parse_phandle(dev->of_node, "airoha,npu", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ npu = ERR_PTR(-ENODEV);
+ goto error_pdev_put;
+ }
+
+ npu = platform_get_drvdata(pdev);
+ if (!npu) {
+ npu = ERR_PTR(-ENODEV);
+ goto error_module_put;
+ }
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ npu = ERR_PTR(-EINVAL);
+ goto error_module_put;
+ }
+
+ return npu;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return npu;
+}
+EXPORT_SYMBOL_GPL(airoha_npu_get);
+
+void airoha_npu_put(struct airoha_npu *npu)
+{
+ module_put(THIS_MODULE);
+ put_device(npu->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_npu_put);
+
+static const struct of_device_id of_airoha_npu_match[] = {
+ { .compatible = "airoha,en7581-npu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_airoha_npu_match);
+
+static const struct regmap_config regmap_config = {
+ .name = "npu",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static int airoha_npu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reserved_mem *rmem;
+ struct airoha_npu *npu;
+ struct device_node *np;
+ void __iomem *base;
+ int i, irq, err;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
+
+ npu->dev = dev;
+ npu->ops.ppe_init = airoha_npu_ppe_init;
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(npu->regmap))
+ return PTR_ERR(npu->regmap);
+
+ np = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_mbox_handler,
+ IRQF_SHARED, "airoha-npu-mbox", npu);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++) {
+ struct airoha_npu_core *core = &npu->cores[i];
+
+ spin_lock_init(&core->lock);
+ core->npu = npu;
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, airoha_npu_wdt_handler,
+ IRQF_SHARED, "airoha-npu-wdt", core);
+ if (err)
+ return err;
+
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+
+ err = airoha_npu_run_firmware(dev, base, rmem);
+ if (err)
+ return dev_err_probe(dev, err, "failed to run npu firmware\n");
+
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(10),
+ rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0);
+ regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1);
+ msleep(100);
+
+ /* setting booting address */
+ for (i = 0; i < NPU_NUM_CORES; i++)
+ regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base);
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+ /* do not start core3 since it is used for WiFi offloading */
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
+ platform_set_drvdata(pdev, npu);
+
+ return 0;
+}
+
+static void airoha_npu_remove(struct platform_device *pdev)
+{
+ struct airoha_npu *npu = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(npu->cores); i++)
+ cancel_work_sync(&npu->cores[i].wdt_work);
+}
+
+static struct platform_driver airoha_npu_driver = {
+ .probe = airoha_npu_probe,
+ .remove = airoha_npu_remove,
+ .driver = {
+ .name = "airoha-npu",
+ .of_match_table = of_airoha_npu_match,
+ },
+};
+module_platform_driver(airoha_npu_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
new file mode 100644
index 000000000000..a2b8ae4d9473
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_npu.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#define NPU_NUM_CORES 8
+
+struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ } ops;
+};
+
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
new file mode 100644
index 000000000000..8b55e871352d
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/rhashtable.h>
+#include <net/ipv6.h>
+#include <net/pkt_cls.h>
+
+#include "airoha_npu.h"
+#include "airoha_regs.h"
+#include "airoha_eth.h"
+
+static DEFINE_MUTEX(flow_offload_mutex);
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params airoha_flow_table_params = {
+ .head_offset = offsetof(struct airoha_flow_table_entry, node),
+ .key_offset = offsetof(struct airoha_flow_table_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+{
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+}
+
+static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe)
+{
+ u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS);
+
+ return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp);
+}
+
+static void airoha_ppe_hw_init(struct airoha_ppe *ppe)
+{
+ u32 sram_tb_size, sram_num_entries, dram_num_entries;
+ struct airoha_eth *eth = ppe->eth;
+ int i;
+
+ sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES);
+
+ for (i = 0; i < PPE_NUM; i++) {
+ int p;
+
+ airoha_fe_wr(eth, REG_PPE_TB_BASE(i),
+ ppe->foe_dma + sram_tb_size);
+
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i),
+ PPE_BIND_AGE0_DELTA_NON_L4 |
+ PPE_BIND_AGE0_DELTA_UDP,
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) |
+ FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12));
+ airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i),
+ PPE_BIND_AGE1_DELTA_TCP_FIN |
+ PPE_BIND_AGE1_DELTA_TCP,
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i),
+ PPE_SRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH1_EN_MASK |
+ PPE_DRAM_TABLE_EN_MASK |
+ PPE_SRAM_HASH0_MODE_MASK |
+ PPE_SRAM_HASH1_MODE_MASK |
+ PPE_DRAM_HASH0_MODE_MASK |
+ PPE_DRAM_HASH1_MODE_MASK,
+ FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) |
+ FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) |
+ FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3));
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
+
+ airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED);
+
+ for (p = 0; p < ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_rmw(eth, REG_PPE_MTU(i, p),
+ FP0_EGRESS_MTU_MASK |
+ FP1_EGRESS_MTU_MASK,
+ FIELD_PREP(FP0_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU) |
+ FIELD_PREP(FP1_EGRESS_MTU_MASK,
+ AIROHA_MAX_MTU));
+ }
+
+ if (airoha_ppe2_is_enabled(eth)) {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(1),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ } else {
+ sram_num_entries =
+ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+ FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK,
+ sram_num_entries) |
+ FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK,
+ dram_num_entries));
+ }
+}
+
+static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ u32 val = be32_to_cpu((__force __be32)act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct airoha_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int airoha_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp = dsa_port_from_netdev(*dev);
+
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ *dev = dsa_port_to_conduit(dp);
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+{
+ int dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u32 qdata, ports_pad, val;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+ val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
+
+ if (dev) {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2 loopback */
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+ val |= AIROHA_FOE_IB2_MULTICAST;
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == PPE_PKT_TYPE_IPV4_ROUTE)
+ hwe->ipv4.orig_tuple.ports = ports_pad;
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+ qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ hwe->bridge.dest_mac_lo =
+ get_unaligned_be16(data->eth.h_dest + 4);
+ hwe->bridge.src_mac_hi =
+ get_unaligned_be16(data->eth.h_source);
+ hwe->bridge.src_mac_lo =
+ get_unaligned_be32(data->eth.h_source + 2);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+ } else {
+ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
+ }
+
+ if (data->vlan.num) {
+ l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+ } else if (dsa_port >= 0) {
+ l2->etype = BIT(15) | BIT(dsa_port);
+ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ l2->etype = ETH_P_IPV6;
+ } else {
+ l2->etype = ETH_P_IP;
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data,
+ bool egress)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ struct airoha_foe_ipv4_tuple *t;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &hwe->ipv4.new_tuple;
+ break;
+ }
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &hwe->ipv4.orig_tuple;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(data->v4.src_addr);
+ t->dest_ip = be32_to_cpu(data->v4.dst_addr);
+
+ if (type != PPE_PKT_TYPE_IPV4_ROUTE) {
+ t->src_port = be16_to_cpu(data->src_port);
+ t->dest_port = be16_to_cpu(data->dst_port);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe,
+ struct airoha_flow_data *data)
+
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 *src, *dest;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ hwe->ipv6.src_port = be16_to_cpu(data->src_port);
+ hwe->ipv6.dest_port = be16_to_cpu(data->dst_port);
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = hwe->ipv6.src_ip;
+ dest = hwe->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32);
+ ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32);
+
+ return 0;
+}
+
+static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ u32 hash, hv1, hv2, hv3;
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = hwe->ipv4.orig_tuple.ports;
+ hv2 = hwe->ipv4.orig_tuple.dest_ip;
+ hv3 = hwe->ipv4.orig_tuple.src_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3];
+ hv1 ^= hwe->ipv6.ports;
+
+ hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2];
+ hv2 ^= hwe->ipv6.dest_ip[0];
+
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash &= PPE_NUM_ENTRIES - 1;
+
+ return hash;
+}
+
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ struct airoha_eth *eth = ppe->eth;
+ bool ppe2;
+ u32 val;
+ int i;
+
+ ppe2 = airoha_ppe2_is_enabled(ppe->eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2),
+ FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) |
+ PPE_SRAM_CTRL_REQ_MASK);
+ if (read_poll_timeout_atomic(airoha_fe_rr, val,
+ val & PPE_SRAM_CTRL_ACK_MASK,
+ 10, 100, false, eth,
+ REG_PPE_RAM_CTRL(ppe2)))
+ return NULL;
+
+ for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++)
+ hwe[i] = airoha_fe_rr(eth,
+ REG_PPE_RAM_ENTRY(ppe2, i));
+ }
+
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+}
+
+static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+{
+ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
+ int len;
+
+ if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP)
+ return false;
+
+ if (type > PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct airoha_foe_entry, ipv6.data);
+ else
+ len = offsetof(struct airoha_foe_entry, ipv4.ib2);
+
+ return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1));
+}
+
+static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+
+ e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+ bool ppe2 = airoha_ppe2_is_enabled(eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+ struct airoha_npu *npu;
+ int err = -ENODEV;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu)
+ err = npu->ops.ppe_foe_commit_entry(npu, addr,
+ sizeof(*hwe), hash,
+ ppe2);
+ rcu_read_unlock();
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+{
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_entry *hwe;
+ struct hlist_node *n;
+ u32 index, state;
+
+ spin_lock_bh(&ppe_lock);
+
+ hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (state == AIROHA_FOE_STATE_BIND)
+ goto unlock;
+
+ index = airoha_ppe_foe_get_entry_hash(hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+ if (airoha_ppe_foe_compare_entry(e, hwe)) {
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ e->hash = hash;
+ break;
+ }
+ }
+unlock:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
+
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&e->list, &ppe->foe_flow[hash]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+{
+ spin_lock_bh(&ppe_lock);
+
+ hlist_del_init(&e->list);
+ if (e->hash != 0xffff) {
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
+
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+ struct flow_action_entry *act;
+ struct airoha_foe_entry hwe;
+ int err, i, offload_type;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params))
+ return -EEXIST;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ airoha_ppe_flow_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 2 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.hdr[data.vlan.num].id = act->vlan.vid;
+ data.vlan.hdr[data.vlan.num].proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+ airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = airoha_ppe_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = airoha_ppe_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true);
+ if (err)
+ return err;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->cookie = f->cookie;
+ memcpy(&e->data, &hwe, sizeof(e->data));
+
+ err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e);
+ if (err)
+ goto free_entry;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ if (err < 0)
+ goto remove_foe_entry;
+
+ return 0;
+
+remove_foe_entry:
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+free_entry:
+ kfree(e);
+
+ return err;
+}
+
+static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ airoha_flow_table_params);
+ if (!e)
+ return -ENOENT;
+
+ airoha_ppe_foe_flow_remove_entry(eth->ppe, e);
+ rhashtable_remove_fast(&eth->flow_table, &e->node,
+ airoha_flow_table_params);
+ kfree(e);
+
+ return 0;
+}
+
+static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return airoha_ppe_flow_offload_replace(port, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(port, f);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
+ struct airoha_npu *npu)
+{
+ int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES;
+ struct airoha_foe_entry *hwe = ppe->foe;
+
+ if (airoha_ppe2_is_enabled(ppe->eth))
+ sram_num_entries = sram_num_entries / 2;
+
+ for (i = 0; i < sram_num_entries; i++)
+ memset(&hwe[i], 0, sizeof(*hwe));
+
+ return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma,
+ PPE_SRAM_NUM_ENTRIES);
+}
+
+static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+ npu = airoha_npu_get(eth->dev);
+ }
+
+ return npu;
+}
+
+static int airoha_ppe_offload_setup(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ int err;
+
+ if (IS_ERR(npu))
+ return PTR_ERR(npu);
+
+ err = npu->ops.ppe_init(npu);
+ if (err)
+ goto error_npu_put;
+
+ airoha_ppe_hw_init(eth->ppe);
+ err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (err)
+ goto error_npu_put;
+
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+ return 0;
+
+error_npu_put:
+ airoha_npu_put(npu);
+
+ return err;
+}
+
+int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+ if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+ err = airoha_ppe_flow_offload_cmd(port, cls);
+
+ mutex_unlock(&flow_offload_mutex);
+
+ return err;
+}
+
+void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
+{
+ u16 now, diff;
+
+ if (hash > PPE_HASH_MASK)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ airoha_ppe_foe_insert_entry(ppe, hash);
+}
+
+int airoha_ppe_init(struct airoha_eth *eth)
+{
+ struct airoha_ppe *ppe;
+ int foe_size, err;
+
+ ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return -ENOMEM;
+
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+ if (!ppe->foe)
+ return -ENOMEM;
+
+ ppe->eth = eth;
+ eth->ppe = ppe;
+
+ ppe->foe_flow = devm_kzalloc(eth->dev,
+ PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow),
+ GFP_KERNEL);
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
+ err = rhashtable_init(&eth->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+ rhashtable_destroy(&eth->flow_table);
+
+ return err;
+}
+
+void airoha_ppe_deinit(struct airoha_eth *eth)
+{
+ struct airoha_npu *npu;
+
+ rcu_read_lock();
+ npu = rcu_dereference(eth->npu);
+ if (npu) {
+ npu->ops.ppe_deinit(npu);
+ airoha_npu_put(npu);
+ }
+ rcu_read_unlock();
+
+ rhashtable_destroy(&eth->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+}
diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
new file mode 100644
index 000000000000..3cdc6fd53fc7
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "airoha_eth.h"
+
+static void airoha_debugfs_ppe_print_tuple(struct seq_file *m,
+ void *src_addr, void *dest_addr,
+ u16 *src_port, u16 *dest_port,
+ bool ipv6)
+{
+ __be32 n_addr[IPV6_ADDR_WORDS];
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, src_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", src_addr);
+ }
+ if (src_port)
+ seq_printf(m, ":%d", *src_port);
+
+ seq_puts(m, "->");
+
+ if (ipv6) {
+ ipv6_addr_cpu_to_be32(n_addr, dest_addr);
+ seq_printf(m, "%pI6", n_addr);
+ } else {
+ seq_printf(m, "%pI4h", dest_addr);
+ }
+ if (dest_port)
+ seq_printf(m, ":%d", *dest_port);
+}
+
+static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private,
+ bool bind)
+{
+ static const char *const ppe_type_str[] = {
+ [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [PPE_PKT_TYPE_BRIDGE] = "L2B",
+ [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+ static const char *const ppe_state_str[] = {
+ [AIROHA_FOE_STATE_INVALID] = "INV",
+ [AIROHA_FOE_STATE_UNBIND] = "UNB",
+ [AIROHA_FOE_STATE_BIND] = "BND",
+ [AIROHA_FOE_STATE_FIN] = "FIN",
+ };
+ struct airoha_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < PPE_NUM_ENTRIES; i++) {
+ const char *state_str, *type_str = "UNKNOWN";
+ void *src_addr = NULL, *dest_addr = NULL;
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+ bool ipv6 = false;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, i);
+ if (!hwe)
+ continue;
+
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != AIROHA_FOE_STATE_BIND)
+ continue;
+
+ state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)];
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
+ if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type])
+ type_str = ppe_type_str[type];
+
+ seq_printf(m, "%05x %s %7s", i, state_str, type_str);
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.orig_tuple.src_port;
+ dest_port = &hwe->ipv4.orig_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.orig_tuple.src_ip;
+ dest_addr = &hwe->ipv4.orig_tuple.dest_ip;
+ break;
+ case PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ src_port = &hwe->ipv6.src_port;
+ dest_port = &hwe->ipv6.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ src_addr = &hwe->ipv6.src_ip;
+ dest_addr = &hwe->ipv6.dest_ip;
+ ipv6 = true;
+ break;
+ default:
+ break;
+ }
+
+ if (src_addr && dest_addr) {
+ seq_puts(m, " orig=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port, ipv6);
+ }
+
+ switch (type) {
+ case PPE_PKT_TYPE_IPV4_HNAPT:
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ src_port = &hwe->ipv4.new_tuple.src_port;
+ dest_port = &hwe->ipv4.new_tuple.dest_port;
+ fallthrough;
+ case PPE_PKT_TYPE_IPV4_ROUTE:
+ src_addr = &hwe->ipv4.new_tuple.src_ip;
+ dest_addr = &hwe->ipv4.new_tuple.dest_ip;
+ seq_puts(m, " new=");
+ airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr,
+ src_port, dest_port,
+ ipv6);
+ break;
+ default:
+ break;
+ }
+
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ data = hwe->ipv6.data;
+ ib2 = hwe->ipv6.ib2;
+ l2 = &hwe->ipv6.l2;
+ } else {
+ data = hwe->ipv4.data;
+ ib2 = hwe->ipv4.ib2;
+ l2 = &hwe->ipv4.l2.common;
+ *((__be16 *)&h_source[4]) =
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ h_source, h_dest, l2->etype, data,
+ l2->vlan1, l2->vlan2, hwe->ib1, ib2);
+ }
+
+ return 0;
+}
+
+static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all);
+
+static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return airoha_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind);
+
+int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
+{
+ ppe->debugfs_dir = debugfs_create_dir("ppe", NULL);
+ debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe,
+ &airoha_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
new file mode 100644
index 000000000000..8146cde4e8ba
--- /dev/null
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef AIROHA_REGS_H
+#define AIROHA_REGS_H
+
+#include <linux/types.h>
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+#define PPE2_BASE 0x1c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_FOE_TS 0x0010
+
+#define REG_FE_WAN_PORT 0x0024
+#define WAN1_EN_MASK BIT(16)
+#define WAN1_MASK GENMASK(12, 8)
+#define WAN0_MASK GENMASK(4, 0)
+
+#define REG_FE_WAN_MAC_H 0x0030
+#define REG_FE_LAN_MAC_H 0x0040
+
+#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04)
+#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08)
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_STRIP_CRC BIT(16)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c)
+#define LPBK_GAP_MASK GENMASK(31, 24)
+#define LPBK_LEN_MASK GENMASK(23, 10)
+#define LPBK_CHAN_MASK GENMASK(8, 4)
+#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LPBK_EN_MASK BIT(0)
+
+#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
+#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200)
+#define PPE_GLO_CFG_BUSY_MASK BIT(31)
+#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9)
+#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6)
+#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5)
+#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4)
+#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3)
+#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2)
+#define PPE_GLO_CFG_EN_MASK BIT(0)
+
+#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204)
+#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20)
+#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19)
+#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18)
+#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17)
+#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16)
+#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14)
+#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13)
+#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12)
+#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10)
+#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9)
+#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8)
+#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7)
+#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
+
+#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+
+#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
+#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
+#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12)
+#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11)
+#define PPE_TB_CFG_AGE_UDP_MASK BIT(10)
+#define PPE_TB_CFG_AGE_TCP_MASK BIT(9)
+#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8)
+#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7)
+#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6)
+#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4)
+#define PPE_TB_ENTRY_SIZE_MASK BIT(3)
+#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0)
+
+#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220)
+
+#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228)
+#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16)
+#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0)
+
+#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c)
+#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16)
+#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0)
+
+#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230)
+#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16)
+#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0)
+
+#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c)
+#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238)
+#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16)
+#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0)
+
+#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240)
+#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244)
+#define PPE_HASH_SEED 0x12345678
+
+#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
+
+#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250)
+#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28)
+#define PPE_DRAM_HASH1_EN_MASK BIT(24)
+#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20)
+#define PPE_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12)
+#define PPE_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4)
+#define PPE_SRAM_TABLE_EN_MASK BIT(0)
+
+#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304)
+#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2))
+#define FP1_EGRESS_MTU_MASK GENMASK(29, 16)
+#define FP0_EGRESS_MTU_MASK GENMASK(13, 0)
+
+#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c)
+#define PPE_SRAM_CTRL_ACK_MASK BIT(31)
+#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30)
+#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8)
+#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2)
+#define PPE_SRAM_CTRL_WR_MASK BIT(1)
+#define PPE_SRAM_CTRL_REQ_MASK BIT(0)
+
+#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG GDM4_BASE
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2))
+#define SP_CPORT_PCIE1_MASK GENMASK(31, 28)
+#define SP_CPORT_PCIE0_MASK GENMASK(27, 24)
+#define SP_CPORT_USB_MASK GENMASK(7, 4)
+#define SP_CPORT_ETH_MASK GENMASK(7, 4)
+
+#define REG_SRC_PORT_FC_MAP6 0x2298
+#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24)
+#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16)
+#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8)
+#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0)
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_n) \
+ (((_n) == 4) ? 0x0750 : \
+ ((_n) == 3) ? 0x0744 : \
+ ((_n) == 2) ? 0x0740 : \
+ ((_n) == 1) ? 0x002c : 0x0028)
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_IDX0_MASK \
+ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+ RX15_COHERENT_INT_MASK | INT_TX_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_DONE_INT_MASK \
+ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+ RX15_DONE_INT_MASK)
+#define INT_IDX1_MASK \
+ (RX_DONE_INT_MASK | \
+ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+ RX15_NO_CPU_DSCP_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define INT_IDX4_MASK \
+ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define REG_RX_SCATTER_CFG(_n) \
+ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define RX_RING_SG_EN_MASK BIT(0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3))
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28)
+#define CNTR_SRC_MASK GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK GENMASK(20, 16)
+#define CNTR_CHAN_MASK GENMASK(7, 3)
+#define CNTR_QUEUE_MASK GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3))
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(31)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define TRTCM_PARAM_RW_MASK BIT(31)
+#define TRTCM_PARAM_RW_DONE_MASK BIT(30)
+#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28)
+#define TRTCM_METER_GROUP_MASK GENMASK(27, 26)
+#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17)
+#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16)
+
+#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4)
+#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_TXWRR_WEIGHT_CFG 0x1024
+#define TWRR_RW_CMD_MASK BIT(31)
+#define TWRR_RW_CMD_DONE BIT(30)
+#define TWRR_CHAN_IDX_MASK GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16)
+#define TWRR_VALUE_MASK GENMASK(15, 0)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG0 */
+#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+#endif /* AIROHA_REGS_H */
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c1295dfad0d0..70fa3adb4934 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -5,9 +5,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif /* CONFIG_RFS_ACCEL */
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter,
return 0;
}
-static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
-{
-#ifdef CONFIG_RFS_ACCEL
- u32 i;
- int rc;
-
- adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
- if (!adapter->netdev->rx_cpu_rmap)
- return -ENOMEM;
- for (i = 0; i < adapter->num_io_queues; i++) {
- int irq_idx = ENA_IO_IRQ_IDX(i);
-
- rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
- pci_irq_vector(adapter->pdev, irq_idx));
- if (rc) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- return rc;
- }
- }
-#endif /* CONFIG_RFS_ACCEL */
- return 0;
-}
-
static void ena_init_io_rings_common(struct ena_adapter *adapter,
struct ena_ring *ring, u16 qid)
{
@@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
- if (ena_init_rx_cpu_rmap(adapter))
+ if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
@@ -1742,16 +1715,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq;
int i;
-#ifdef CONFIG_RFS_ACCEL
- if (adapter->msix_vecs >= 1) {
- free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
- adapter->netdev->rx_cpu_rmap = NULL;
- }
-#endif /* CONFIG_RFS_ACCEL */
-
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
+ struct ena_napi *ena_napi;
+
irq = &adapter->irq_tbl[i];
irq_set_affinity_hint(irq->vector, NULL);
+ ena_napi = irq->data;
+ netif_napi_set_irq(&ena_napi->napi, -1);
free_irq(irq->vector, irq->data);
}
}
@@ -4131,13 +4101,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
ena_dev = adapter->ena_dev;
netdev = adapter->netdev;
-#ifdef CONFIG_RFS_ACCEL
- if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
- }
-
-#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 0671a066913b..9d35ac348ebe 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
return pDB;
}
-void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
{
struct db_dest *pDBfree = aup->pDBfree;
if (pDBfree)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 2a91c84aebdb..d7ca847d44c7 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -9,8 +9,6 @@
#include "main.h"
-static const struct acpi_device_id xge_acpi_match[];
-
static int xge_get_resources(struct xge_pdata *pdata)
{
struct platform_device *pdev;
@@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
static struct platform_driver xge_driver = {
.driver = {
.name = "xgene-enet-v2",
- .acpi_match_table = ACPI_PTR(xge_acpi_match),
+ .acpi_match_table = xge_acpi_match,
},
.probe = xge_probe,
.remove = xge_remove,
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index eba06831aec2..6a17045a5f62 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
-
- linkmode_andnot(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 86607b79c09f..cc3b1631c905 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -6,8 +6,14 @@
* Keyur Chudgar <kchudgar@apm.com>
*/
-#include <linux/of_gpio.h>
-#include <linux/gpio.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include "xgene_enet_main.h"
#include "xgene_enet_hw.h"
#include "xgene_enet_xgmac.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index 414b2e448d59..787ea91802e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = {
.read_string = aq_hwmon_read_string,
};
-static u32 aq_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0,
-};
-
-static const struct hwmon_channel_info aq_hwmon_temp = {
- .type = hwmon_temp,
- .config = aq_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aq_hwmon_info[] = {
- &aq_hwmon_temp,
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8e04552d2216..02c8213915a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2593,7 +2593,7 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
/********************* Multicast verbs: SET, CLEAR ****************************/
static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
{
- return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+ return (crc32c(0, mac, ETH_ALEN) >> 24) & 0xff;
}
struct bnx2x_mcast_mac_elem {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7b8b5b39c7bb..934ba9425857 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -54,7 +54,10 @@
#include <net/pkt_cls.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
+#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <linux/pci-tph.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -76,6 +79,7 @@
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
+MODULE_IMPORT_NS("NETDEV_INTERNAL");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
@@ -485,6 +489,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txr = &bp->tx_ring[bp->tx_ring_map[i]];
prod = txr->tx_prod;
+#if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
+ if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) {
+ netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n",
+ skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS);
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_any(skb);
+ dev_core_stats_tx_dropped_inc(dev);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
/* We must have raced with NAPI cleanup */
@@ -564,7 +579,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
TX_BD_FLAGS_LHINT_512_AND_SMALLER |
TX_BD_FLAGS_COAL_NOW |
TX_BD_FLAGS_PACKET_END |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+ TX_BD_CNT(2));
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_push1->tx_bd_hsize_lflags =
@@ -639,7 +654,7 @@ normal_tx:
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+ TX_BD_CNT(last_frag + 2);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag);
@@ -2038,6 +2053,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2164,6 +2180,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
false);
if (!frag_len)
goto oom_next_rx;
+
}
xdp_active = true;
}
@@ -2173,6 +2190,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
if (len <= bp->rx_copybreak) {
@@ -2210,7 +2233,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!skb)
goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
@@ -3314,74 +3338,81 @@ poll_done:
return work_done;
}
-static void bnxt_free_tx_skbs(struct bnxt *bp)
+static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, int idx)
{
int i, max_idx;
struct pci_dev *pdev = bp->pdev;
- if (!bp->tx_ring)
- return;
-
max_idx = bp->tx_nr_pages * TX_DESC_CNT;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- int j;
- if (!txr->tx_buf_ring)
+ for (i = 0; i < max_idx;) {
+ struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i];
+ struct sk_buff *skb;
+ int j, last;
+
+ if (idx < bp->tx_nr_rings_xdp &&
+ tx_buf->action == XDP_REDIRECT) {
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ xdp_return_frame(tx_buf->xdpf);
+ tx_buf->action = 0;
+ tx_buf->xdpf = NULL;
+ i++;
continue;
+ }
- for (j = 0; j < max_idx;) {
- struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
- struct sk_buff *skb;
- int k, last;
-
- if (i < bp->tx_nr_rings_xdp &&
- tx_buf->action == XDP_REDIRECT) {
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- xdp_return_frame(tx_buf->xdpf);
- tx_buf->action = 0;
- tx_buf->xdpf = NULL;
- j++;
- continue;
- }
+ skb = tx_buf->skb;
+ if (!skb) {
+ i++;
+ continue;
+ }
- skb = tx_buf->skb;
- if (!skb) {
- j++;
- continue;
- }
+ tx_buf->skb = NULL;
- tx_buf->skb = NULL;
+ if (tx_buf->is_push) {
+ dev_kfree_skb(skb);
+ i += 2;
+ continue;
+ }
- if (tx_buf->is_push) {
- dev_kfree_skb(skb);
- j += 2;
- continue;
- }
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
- dma_unmap_single(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_headlen(skb),
- DMA_TO_DEVICE);
+ last = tx_buf->nr_frags;
+ i += 2;
+ for (j = 0; j < last; j++, i++) {
+ int ring_idx = i & bp->tx_ring_mask;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
- last = tx_buf->nr_frags;
- j += 2;
- for (k = 0; k < last; k++, j++) {
- int ring_idx = j & bp->tx_ring_mask;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
-
- tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(
- &pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
- }
- dev_kfree_skb(skb);
+ tx_buf = &txr->tx_buf_ring[ring_idx];
+ dma_unmap_page(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+ dev_kfree_skb(skb);
+ }
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx));
+}
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->tx_ring)
+ return;
+
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+
+ if (!txr->tx_buf_ring)
+ continue;
+
+ bnxt_free_one_tx_ring_skbs(bp, txr, i);
}
}
@@ -5236,8 +5267,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- /* Under rtnl_lock and all our NAPIs have been disabled. It's
- * safe to delete the hash table.
+ netdev_assert_locked(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
*/
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
struct hlist_head *head;
@@ -5565,6 +5598,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT;
req->flags = cpu_to_le32(flags);
req->ver_maj_8b = DRV_VER_MAJ;
req->ver_min_8b = DRV_VER_MIN;
@@ -6935,6 +6970,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
+static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type,
+ struct hwrm_ring_alloc_input *req,
+ struct bnxt_ring_struct *ring)
+{
+ struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx];
+ u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID |
+ RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
+
+ if (ring_type == HWRM_RING_ALLOC_AGG) {
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
+ enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID;
+ } else {
+ req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
+ if (NET_IP_ALIGN == 2)
+ req->flags =
+ cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD);
+ }
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req->enables |= cpu_to_le32(enables);
+}
+
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, u32 map_index)
@@ -6986,37 +7045,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
break;
}
case HWRM_RING_ALLOC_RX:
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- req->length = cpu_to_le32(bp->rx_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 flags = 0;
-
- /* Association of rx ring with stats context */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- if (NET_IP_ALIGN == 2)
- flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
- req->flags = cpu_to_le16(flags);
- }
- break;
case HWRM_RING_ALLOC_AGG:
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
- /* Association of agg ring with rx ring */
- grp_info = &bp->grp_info[ring->grp_idx];
- req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
- req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
- req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
- req->enables |= cpu_to_le32(
- RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
- RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
- } else {
- req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
- }
- req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = (ring_type == HWRM_RING_ALLOC_RX) ?
+ cpu_to_le32(bp->rx_ring_mask + 1) :
+ cpu_to_le32(bp->rx_agg_ring_mask + 1);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring);
break;
case HWRM_RING_ALLOC_CMPL:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -7197,6 +7232,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
return 0;
}
+static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_napi *bnapi = cpr->bnapi;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->cp_ring_struct;
+ ring->handle = BNXT_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ return 0;
+}
+
+static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx);
+ if (rc)
+ return rc;
+ bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -7233,33 +7301,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
}
- type = HWRM_RING_ALLOC_TX;
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring;
- u32 map_idx;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr;
- struct bnxt_napi *bnapi = txr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
- ring = &txr->tx_ring_struct;
- map_idx = i;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i);
if (rc)
goto err_out;
- bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7272,20 +7324,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 type2 = HWRM_RING_ALLOC_CMPL;
- struct bnxt_ring_struct *ring;
- u32 map_idx = bnapi->index;
-
- ring = &cpr2->cp_ring_struct;
- ring->handle = BNXT_SET_NQ_HDL(cpr2);
- rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
- ring->fw_ring_id);
- bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
}
}
@@ -7353,6 +7394,23 @@ exit:
return 0;
}
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp,
+ struct bnxt_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
bool close_path)
@@ -7397,6 +7455,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
}
+static void bnxt_hwrm_cp_ring_free(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring;
+
+ ring = &cpr->cp_ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ int i, size = ring->ring_mem.page_size;
+
+ cpr->cp_raw_cons = 0;
+ cpr->toggle = 0;
+
+ for (i = 0; i < bp->cp_nr_pages; i++)
+ if (cpr->cp_desc_ring[i])
+ memset(cpr->cp_desc_ring[i], 0, size);
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -7405,20 +7490,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
if (!bp->bnapi)
return;
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
- struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_TX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
+ for (i = 0; i < bp->tx_nr_rings; i++)
+ bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path);
bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -7442,17 +7515,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
struct bnxt_ring_struct *ring;
int j;
- for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) {
- struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j];
+ for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++)
+ bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]);
- ring = &cpr2->cp_ring_struct;
- if (ring->fw_ring_id == INVALID_HW_RING_ID)
- continue;
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_L2_CMPL,
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- }
ring = &cpr->cp_ring_struct;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
hwrm_ring_free_send_msg(bp, ring, type,
@@ -8365,6 +8430,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
bp->port_partition_type = resp->port_partition_type;
@@ -9529,6 +9595,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
@@ -11237,6 +11305,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
return 0;
}
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int i;
+
+ bnapi = bp->bnapi[idx];
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+ synchronize_net();
+
+ if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq) {
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+ }
+ }
+
+ if (!bp->tph_mode)
+ continue;
+
+ bnxt_hwrm_tx_ring_free(bp, txr, true);
+ bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+ bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+ bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+ }
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+ struct bnxt_tx_ring_info *txr;
+ struct netdev_queue *txq;
+ struct bnxt_napi *bnapi;
+ int rc, i;
+
+ bnapi = bp->bnapi[idx];
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
+ bnxt_for_each_napi_tx(i, bnapi, txr) {
+ if (!bp->tph_mode)
+ goto start_tx;
+
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+ if (rc)
+ return rc;
+
+ txr->tx_prod = 0;
+ txr->tx_cons = 0;
+ txr->tx_hw_cons = 0;
+start_tx:
+ WRITE_ONCE(txr->dev_state, 0);
+ synchronize_net();
+
+ if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+ continue;
+
+ txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+ if (txq)
+ netif_tx_start_queue(txq);
+ }
+
+ return 0;
+}
+
+static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct bnxt_irq *irq;
+ u16 tag;
+ int err;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ cpumask_copy(irq->cpu_mask, mask);
+
+ if (irq->ring_nr >= irq->bp->rx_nr_rings)
+ return;
+
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask), &tag))
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag))
+ return;
+
+ netdev_lock(irq->bp->dev);
+ if (netif_running(irq->bp->dev)) {
+ err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr);
+ if (err)
+ netdev_err(irq->bp->dev,
+ "RX queue restart failed: err=%d\n", err);
+ }
+ netdev_unlock(irq->bp->dev);
+}
+
+static void bnxt_irq_affinity_release(struct kref *ref)
+{
+ struct irq_affinity_notify *notify =
+ container_of(ref, struct irq_affinity_notify, kref);
+ struct bnxt_irq *irq;
+
+ irq = container_of(notify, struct bnxt_irq, affinity_notify);
+
+ if (!irq->bp->tph_mode)
+ return;
+
+ if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) {
+ netdev_err(irq->bp->dev,
+ "Setting ST=0 for MSIX entry %d failed\n",
+ irq->msix_nr);
+ return;
+ }
+}
+
+static void bnxt_release_irq_notifier(struct bnxt_irq *irq)
+{
+ irq_set_affinity_notifier(irq->vector, NULL);
+}
+
+static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq)
+{
+ struct irq_affinity_notify *notify;
+
+ irq->bp = bp;
+
+ /* Nothing to do if TPH is not enabled */
+ if (!bp->tph_mode)
+ return;
+
+ /* Register IRQ affinity notifier */
+ notify = &irq->affinity_notify;
+ notify->irq = irq->vector;
+ notify->notify = bnxt_irq_affinity_notify;
+ notify->release = bnxt_irq_affinity_release;
+
+ irq_set_affinity_notifier(irq->vector, notify);
+}
+
static void bnxt_free_irq(struct bnxt *bp)
{
struct bnxt_irq *irq;
@@ -11259,11 +11476,18 @@ static void bnxt_free_irq(struct bnxt *bp)
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
+
+ bnxt_release_irq_notifier(irq);
+
free_irq(irq->vector, bp->bnapi[i]);
}
irq->requested = 0;
}
+
+ /* Disable TPH support */
+ pcie_disable_tph(bp->pdev);
+ bp->tph_mode = 0;
}
static int bnxt_request_irq(struct bnxt *bp)
@@ -11283,6 +11507,12 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
+
+ /* Enable TPH support as part of IRQ request */
+ rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE);
+ if (!rc)
+ bp->tph_mode = PCI_TPH_ST_IV_MODE;
+
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -11301,13 +11531,16 @@ static int bnxt_request_irq(struct bnxt *bp)
if (rc)
break;
- netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector);
+ netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector);
irq->requested = 1;
if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
int numa_node = dev_to_node(&bp->pdev->dev);
+ u16 tag;
irq->have_cpumask = 1;
+ irq->msix_nr = map_idx;
+ irq->ring_nr = i;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
@@ -11317,6 +11550,16 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->vector);
break;
}
+
+ bnxt_register_irq_notifier(bp, irq);
+
+ /* Init ST table entry */
+ if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM,
+ cpumask_first(irq->cpu_mask),
+ &tag))
+ continue;
+
+ pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag);
}
}
return rc;
@@ -11337,9 +11580,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- __netif_napi_del(&bnapi->napi);
+ __netif_napi_del_locked(&bnapi->napi);
}
- /* We called __netif_napi_del(), we need
+ /* We called __netif_napi_del_locked(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -11358,12 +11601,12 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
- bnapi->index);
+ netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
+ netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -11384,7 +11627,7 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
cpr->sw_stats->rx.rx_resets++;
- napi_disable(&bnapi->napi);
+ napi_disable_locked(&bnapi->napi);
}
}
@@ -11406,7 +11649,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bnapi->napi);
+ napi_enable_locked(&bnapi->napi);
}
}
@@ -12077,6 +12320,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
struct hwrm_func_drv_if_change_input *req;
bool fw_reset = !bp->irq_tbl;
bool resc_reinit = false;
+ bool caps_change = false;
int rc, retry = 0;
u32 flags = 0;
@@ -12132,8 +12376,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return -ENODEV;
}
- if (resc_reinit || fw_reset) {
- if (fw_reset) {
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE)
+ caps_change = true;
+
+ if (resc_reinit || fw_reset || caps_change) {
+ if (fw_reset || caps_change) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_irq_stop(bp);
@@ -12569,7 +12816,6 @@ open_err_free_mem:
return rc;
}
-/* rtnl_lock held */
int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12580,14 +12826,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
return rc;
}
-/* rtnl_lock held, open the NIC half way by allocating all resources, but
- * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
- * self tests.
+/* netdev instance lock held, open the NIC half way by allocating all
+ * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used
+ * for offline self tests.
*/
int bnxt_half_open_nic(struct bnxt *bp)
{
@@ -12618,12 +12864,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
- dev_close(bp->dev);
+ netif_close(bp->dev);
return rc;
}
-/* rtnl_lock held, this call can only be made after a previous successful
- * call to bnxt_half_open_nic().
+/* netdev instance lock held, this call can only be made after a previous
+ * successful call to bnxt_half_open_nic().
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
@@ -12732,10 +12978,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
/* If we get here, it means firmware reset is in progress
* while we are trying to close. We can safely proceed with
- * the close because we are holding rtnl_lock(). Some firmware
- * messages may fail as we proceed to close. We set the
- * ABORT_ERR flag here so that the FW reset thread will later
- * abort when it gets the rtnl_lock() and sees the flag.
+ * the close because we are holding netdev instance lock.
+ * Some firmware messages may fail as we proceed to close.
+ * We set the ABORT_ERR flag here so that the FW reset thread
+ * will later abort when it gets the netdev instance lock
+ * and sees the flag.
*/
netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
@@ -12826,7 +13073,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
return hwrm_req_send(bp, req);
}
-/* rtnl_lock held */
+/* netdev instance lock held */
static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mdio = if_mii(ifr);
@@ -13745,30 +13992,31 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-static void bnxt_rtnl_lock_sp(struct bnxt *bp)
+static void bnxt_lock_sp(struct bnxt *bp)
{
/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
* set. If the device is being closed, bnxt_close() may be holding
- * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
- * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear.
+ * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev
+ * instance lock.
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
+ netdev_lock(bp->dev);
}
-static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+static void bnxt_unlock_sp(struct bnxt *bp)
{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
}
/* Only called from bnxt_sp_task() */
static void bnxt_reset(struct bnxt *bp, bool silent)
{
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_reset_task(bp, silent);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Only called from bnxt_sp_task() */
@@ -13776,9 +14024,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
{
int i;
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
return;
}
/* Disable and flush TPA before resetting the RX ring */
@@ -13817,7 +14065,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
}
if (bp->flags & BNXT_FLAG_TPA)
bnxt_set_tpa(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_fw_fatal_close(struct bnxt *bp)
@@ -13873,7 +14121,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp)
return false;
}
-/* rtnl_lock is acquired before calling this function */
+/* netdev instance lock is acquired before calling this function */
static void bnxt_force_fw_reset(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -13916,9 +14164,9 @@ void bnxt_fw_exception(struct bnxt *bp)
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
bnxt_force_fw_reset(bp);
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
@@ -13948,7 +14196,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- bnxt_rtnl_lock_sp(bp);
+ bnxt_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -13971,7 +14219,7 @@ void bnxt_fw_reset(struct bnxt *bp)
netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
n);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- dev_close(bp->dev);
+ netif_close(bp->dev);
goto fw_reset_exit;
} else if (n > 0) {
u16 vf_tmo_dsecs = n * 10;
@@ -13994,7 +14242,7 @@ void bnxt_fw_reset(struct bnxt *bp)
bnxt_queue_fw_reset_work(bp, tmo);
}
fw_reset_exit:
- bnxt_rtnl_unlock_sp(bp);
+ bnxt_unlock_sp(bp);
}
static void bnxt_chk_missed_irq(struct bnxt *bp)
@@ -14193,7 +14441,7 @@ static void bnxt_sp_task(struct work_struct *work)
static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp);
-/* Under rtnl_lock */
+/* Under netdev instance lock */
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
@@ -14586,7 +14834,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
bp->fw_reset_state = 0;
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
static void bnxt_fw_reset_task(struct work_struct *work)
@@ -14621,10 +14869,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
bp->fw_reset_timestamp = jiffies;
- rtnl_lock();
+ netdev_lock(bp->dev);
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
bnxt_fw_reset_close(bp);
@@ -14635,7 +14883,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
tmo = bp->fw_reset_min_dsecs * HZ / 10;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_queue_fw_reset_work(bp, tmo);
return;
}
@@ -14709,7 +14957,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
fallthrough;
case BNXT_FW_RESET_STATE_OPENING:
- while (!rtnl_trylock()) {
+ while (!netdev_trylock(bp->dev)) {
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
}
@@ -14717,7 +14965,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (rc) {
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
goto ulp_start;
}
@@ -14736,13 +14984,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_recovery_done(bp);
bnxt_dl_health_fw_status_update(bp, true);
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
break;
}
return;
@@ -14755,9 +15003,9 @@ fw_reset_abort_status:
netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
}
fw_reset_abort:
- rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_fw_reset_abort(bp, rc);
- rtnl_unlock();
+ netdev_unlock(bp->dev);
ulp_start:
bnxt_ulp_start(bp, rc);
}
@@ -14849,13 +15097,14 @@ init_err:
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
+ netdev_assert_locked(dev);
+
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -14876,11 +15125,12 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
-/* rtnl_lock held */
static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
@@ -15375,6 +15625,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
struct bnxt_cp_ring_info *cpr;
u64 *sw;
+ if (!bp->bnapi)
+ return;
+
cpr = &bp->bnapi[i]->cp_ring;
sw = cpr->stats.sw_stats;
@@ -15398,6 +15651,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
struct bnxt_napi *bnapi;
u64 *sw;
+ if (!bp->tx_ring)
+ return;
+
bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
sw = bnapi->cp_ring.stats.sw_stats;
@@ -15439,6 +15695,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
struct bnxt_ring_struct *ring;
int rc;
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
rxr = &bp->rx_ring[idx];
clone = qmem;
memcpy(clone, rxr, sizeof(*rxr));
@@ -15521,6 +15780,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt_ring_struct *ring;
bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
@@ -15601,6 +15861,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
struct bnxt_rx_ring_info *rxr, *clone;
struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i, rc;
rxr = &bp->rx_ring[idx];
@@ -15618,21 +15879,40 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
bnxt_copy_rx_ring(bp, rxr, clone);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+
+ /* All rings have been reserved and previously allocated.
+ * Reallocating with the same parameters should never fail.
+ */
rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
- return rc;
+ goto err_reset;
+
+ if (bp->tph_mode) {
+ rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr);
+ if (rc)
+ goto err_reset;
+ }
+
rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
if (rc)
- goto err_free_hwrm_rx_ring;
+ goto err_reset;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats->rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+ rc = bnxt_tx_queue_start(bp, idx);
+ if (rc)
+ goto err_reset;
+ }
+
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
@@ -15648,8 +15928,12 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
return 0;
-err_free_hwrm_rx_ring:
- bnxt_hwrm_rx_ring_free(bp, rxr, false);
+err_reset:
+ netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n",
+ rc);
+ napi_enable(&bnapi->napi);
+ bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+ bnxt_reset_task(bp, true);
return rc;
}
@@ -15657,10 +15941,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rx_ring_info *rxr;
+ struct bnxt_cp_ring_info *cpr;
struct bnxt_vnic_info *vnic;
+ struct bnxt_napi *bnapi;
int i;
- for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
+ for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
vnic->mru = 0;
bnxt_hwrm_vnic_update(bp, vnic,
@@ -15669,14 +15955,30 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
/* Make sure NAPI sees that the VNIC is disabled */
synchronize_net();
rxr = &bp->rx_ring[idx];
- cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ bnapi = rxr->bnapi;
+ cpr = &bnapi->cp_ring;
+ cancel_work_sync(&cpr->dim.work);
bnxt_hwrm_rx_ring_free(bp, rxr, false);
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
- rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_disable_direct_recycling(rxr->head_pool);
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+ bnxt_tx_queue_stop(bp, idx);
+
+ /* Disable NAPI now after freeing the rings because HWRM_RING_FREE
+ * completion is handled in NAPI to guarantee no more DMA on that ring
+ * after seeing the completion.
+ */
+ napi_disable(&bnapi->napi);
+
+ if (bp->tph_mode) {
+ bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
+ bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+ }
+ bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
@@ -15995,7 +16297,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
int rc;
- ASSERT_RTNL();
+ netdev_ops_assert_locked(bp->dev);
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
@@ -16008,7 +16310,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
if (netif_running(bp->dev)) {
if (rc)
- dev_close(bp->dev);
+ netif_close(bp->dev);
else
rc = bnxt_open_nic(bp, true, false);
}
@@ -16345,6 +16647,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+ dev->request_ops_lock = true;
rc = register_netdev(dev);
if (rc)
@@ -16395,13 +16698,13 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (!dev)
return;
- rtnl_lock();
+ netdev_lock(dev);
bp = netdev_priv(dev);
if (!bp)
goto shutdown_exit;
if (netif_running(dev))
- dev_close(dev);
+ netif_close(dev);
bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
@@ -16413,7 +16716,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
}
shutdown_exit:
- rtnl_unlock();
+ netdev_unlock(dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -16425,7 +16728,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(dev);
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -16434,7 +16737,7 @@ static int bnxt_suspend(struct device *device)
bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
@@ -16444,7 +16747,7 @@ static int bnxt_resume(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
rc = pci_enable_device(bp->pdev);
if (rc) {
netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
@@ -16487,7 +16790,7 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
- rtnl_unlock();
+ netdev_unlock(bp->dev);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
@@ -16522,7 +16825,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -16531,7 +16834,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
}
if (abort || state == pci_channel_io_perm_failure) {
- rtnl_unlock();
+ netdev_unlock(netdev);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -16550,7 +16853,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
bnxt_free_ctx_mem(bp, false);
- rtnl_unlock();
+ netdev_unlock(netdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16580,7 +16883,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
msleep(900);
- rtnl_lock();
+ netdev_lock(netdev);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
@@ -16635,7 +16938,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
reset_exit:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
- rtnl_unlock();
+ netdev_unlock(netdev);
return result;
}
@@ -16654,7 +16957,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
int err;
netdev_info(bp->dev, "PCI Slot Resume\n");
- rtnl_lock();
+ netdev_lock(netdev);
err = bnxt_hwrm_func_qcaps(bp);
if (!err) {
@@ -16667,7 +16970,7 @@ static void bnxt_io_resume(struct pci_dev *pdev)
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ netdev_unlock(netdev);
bnxt_ulp_start(bp, err);
if (!err)
bnxt_reenable_sriov(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2373f423a523..21726cf56586 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -82,6 +82,12 @@ struct tx_bd {
#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\
(bp)->tx_ring_mask)
+#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT)
+
+#define TX_MAX_BD_CNT 32
+
+#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2)
+
struct tx_bd_ext {
__le32 tx_bd_hsize_lflags;
#define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
@@ -1234,6 +1240,11 @@ struct bnxt_irq {
u8 have_cpumask:1;
char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
+
+ struct bnxt *bp;
+ int msix_nr;
+ int ring_nr;
+ struct irq_affinity_notify affinity_notify;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -2410,6 +2421,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u8 tph_mode;
+
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ
@@ -2492,6 +2505,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
u32 fw_dbg_cap;
@@ -2689,6 +2703,7 @@ struct bnxt {
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
#define BNXT_DUMP_DRIVER 2
+#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3
struct bpf_prog *xdp_prog;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 7236d8e548ab..5576e7cf8463 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -159,8 +159,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
return rc;
}
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type,
+ u16 component_id, u16 segment_id)
{
struct hwrm_dbg_coredump_initiate_input *req;
int rc;
@@ -172,6 +172,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
+ if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE)
+ req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE;
return hwrm_req_send(bp, req);
}
@@ -450,7 +452,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
start = jiffies;
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id,
+ seg_id);
if (rc) {
netdev_err(bp->dev,
"Failed to initiate coredump for seg = %d\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ef8288fd68f4..777880594a04 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <net/devlink.h>
+#include <net/netdev_lock.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -439,14 +440,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
bnxt_ulp_stop(bp);
rtnl_lock();
+ netdev_lock(bp->dev);
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
bnxt_ulp_start(bp, 0);
return -ENODEV;
@@ -458,7 +462,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
if (netif_running(bp->dev))
- dev_close(bp->dev);
+ netif_close(bp->dev);
+ netdev_unlock(bp->dev);
rtnl_unlock();
break;
}
@@ -479,7 +484,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
return -EPERM;
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ netdev_unlock(bp->dev);
rtnl_unlock();
return -ENODEV;
}
@@ -493,6 +500,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
break;
@@ -511,6 +519,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc = 0;
+ netdev_assert_locked(bp->dev);
+
*actions_performed = 0;
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
@@ -535,6 +545,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
if (!netif_running(bp->dev))
NL_SET_ERR_MSG_MOD(extack,
"Device is closed, not waiting for reset notice that will never come");
+ netdev_unlock(bp->dev);
rtnl_unlock();
while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
if (time_after(jiffies, timeout)) {
@@ -550,6 +561,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
msleep(50);
}
rtnl_lock();
+ netdev_lock(bp->dev);
if (!rc)
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
@@ -568,8 +580,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
}
*actions_performed |= BIT(action);
} else if (netif_running(bp->dev)) {
- dev_close(bp->dev);
+ netif_close(bp->dev);
}
+ netdev_unlock(bp->dev);
rtnl_unlock();
if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
bnxt_ulp_start(bp, rc);
@@ -666,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = {
NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
{DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
+ {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA,
+ BNXT_NVM_FUNC_CFG, 1, 1},
{BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
BNXT_NVM_SHARED_CFG, 1, 1},
};
@@ -1010,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
}
-static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
- union devlink_param_value *val)
+static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
+ const struct bnxt_dl_nvm_param *nvm, void *msg,
+ union devlink_param_value *val)
{
struct hwrm_nvm_get_variable_input *req = msg;
- struct bnxt_dl_nvm_param nvm_param;
struct hwrm_err_output *resp;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int idx = 0, rc, i;
-
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
- for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
- if (nvm_params[i].id == param_id) {
- nvm_param = nvm_params[i];
- break;
- }
- }
+ int idx = 0, rc;
- if (i == ARRAY_SIZE(nvm_params)) {
- hwrm_req_drop(bp, req);
- return -EOPNOTSUPP;
- }
-
- if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
+ if (nvm->dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
- else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
+ else if (nvm->dir_type == BNXT_NVM_FUNC_CFG)
idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr);
@@ -1051,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
- req->option_num = cpu_to_le16(nvm_param.offset);
+ req->data_len = cpu_to_le16(nvm->nvm_num_bits);
+ req->option_num = cpu_to_le16(nvm->offset);
req->index_0 = cpu_to_le16(idx);
if (idx)
req->dimensions = cpu_to_le16(1);
resp = hwrm_req_hold(bp, req);
if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
- bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
rc = hwrm_req_send(bp, msg);
} else {
rc = hwrm_req_send_silent(bp, msg);
if (!rc) {
bnxt_copy_from_nvm_data(val, data,
- nvm_param.nvm_num_bits,
- nvm_param.dl_num_bytes);
+ nvm->nvm_num_bits,
+ nvm->dl_num_bytes);
} else {
if (resp->cmd_err ==
NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST)
@@ -1080,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
return rc;
}
+static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ union devlink_param_value *val)
+{
+ struct hwrm_nvm_get_variable_input *req = msg;
+ const struct bnxt_dl_nvm_param *nvm_param;
+ int i;
+
+ /* Get/Set NVM CFG parameter is supported only on PFs */
+ if (BNXT_VF(bp)) {
+ hwrm_req_drop(bp, req);
+ return -EPERM;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
+ nvm_param = &nvm_params[i];
+ if (nvm_param->id == param_id)
+ return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val);
+ }
+ return -EOPNOTSUPP;
+}
+
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -1116,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val);
}
+static int bnxt_dl_roce_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE,
+ BNXT_NVM_SHARED_CFG, 1, 1};
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ struct hwrm_nvm_get_variable_input *req;
+ union devlink_param_value roce_cap;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
+ if (rc)
+ return rc;
+
+ if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable");
+ return -EINVAL;
+ }
+ if (!roce_cap.vbool) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -1180,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
bnxt_dl_msix_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
+ bnxt_dl_roce_validate),
DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK,
"gre_ver_check", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index b8105065367b..7f45dcd7b287 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114
#define NVM_OFF_IGNORE_ARI 164
+#define NVM_OFF_RDMA_CAPABLE 161
#define NVM_OFF_DIS_GRE_VER_CHECK 171
#define NVM_OFF_ENABLE_SRIOV 401
+#define NVM_OFF_SUPPORT_RDMA 506
#define NVM_OFF_NVM_CFG_VER 602
#define BNXT_NVM_CFG_VER_BITS 8
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 9c5820839514..48dd5922e4dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -4541,16 +4541,16 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac
return -EINVAL;
}
-static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
- const struct ethtool_module_eeprom *page_data,
- struct netlink_ext_ack *extack)
+static int
+bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
{
- struct bnxt *bp = netdev_priv(dev);
int rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
NL_SET_ERR_MSG_MOD(extack,
- "Module read not permitted on untrusted VF");
+ "Module read/write not permitted on untrusted VF");
return -EPERM;
}
@@ -4567,6 +4567,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
return -EINVAL;
}
+ return 0;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
page_data->page, page_data->bank,
@@ -4580,6 +4593,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
return page_data->length;
}
+static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp,
+ const struct ethtool_module_eeprom *page)
+{
+ struct hwrm_port_phy_i2c_write_input *req;
+ int bytes_written = 0;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE);
+ if (rc)
+ return rc;
+
+ hwrm_req_hold(bp, req);
+ req->i2c_slave_addr = page->i2c_address << 1;
+ req->page_number = cpu_to_le16(page->page);
+ req->bank_number = page->bank;
+ req->port_id = cpu_to_le16(bp->pf.port_id);
+ req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET |
+ PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER);
+
+ while (bytes_written < page->length) {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, page->length - bytes_written,
+ BNXT_MAX_PHY_I2C_RESP_SIZE);
+ req->page_offset = cpu_to_le16(page->offset + bytes_written);
+ req->data_length = xfer_size;
+ memcpy(req->data, page->data + bytes_written, xfer_size);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ break;
+ bytes_written += xfer_size;
+ }
+
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack);
+ if (rc)
+ return rc;
+
+ rc = bnxt_write_sfp_module_eeprom_info(bp, page_data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -5077,8 +5146,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_DRIVER) {
- netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
+ if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) {
+ netdev_info(dev,
+ "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n");
return -EINVAL;
}
@@ -5441,6 +5511,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5f8de1634378..549231703bce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2024 Broadcom Inc.
+ * Copyright (c) 2018-2025 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -438,6 +438,7 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
#define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
#define HWRM_PORT_POE_CFG 0x230UL
#define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
@@ -514,6 +515,8 @@ struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
#define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_SERDES_TEST 0xff0eUL
#define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
@@ -629,8 +632,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 85
-#define HWRM_VERSION_STR "1.10.3.85"
+#define HWRM_VERSION_RSVD 97
+#define HWRM_VERSION_STR "1.10.3.97"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output {
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
__le32 flags_ext3;
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
__le16 max_roce_vfs;
- u8 unused_3[5];
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
u8 valid;
};
@@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1280b/160B) */
+/* hwrm_func_qcfg_output (size:1344b/168B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 flags2;
#define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
- u8 unused_4[2];
+ __le16 stag_vid;
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_5[2];
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
u8 lag_id;
@@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output {
__le16 xid_partition_cfg;
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
- u8 unused_7;
+ __le16 mirror_vnic_id;
+ u8 unused_7[7];
u8 valid;
};
@@ -3965,7 +3977,7 @@ struct ts_split_entries {
__le32 region_num_entries;
u8 tsid;
u8 lkup_static_bkt_cnt_exp[2];
- u8 rsvd;
+ u8 locked;
__le32 rsvd2[2];
};
@@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output {
u8 valid;
};
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
struct hwrm_port_phy_i2c_read_input {
__le16 req_type;
@@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input {
__le32 flags;
#define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
__le16 virtio_net_fid;
- u8 unused_0[2];
+ __le16 vnic_id;
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
#define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
#define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
#define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
- u8 unused0[4];
+ __le32 raw_qp_id;
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output {
u8 valid;
};
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
__le16 req_type;
@@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
- #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
#define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
- u8 unused_4[2];
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
__le64 cq_handle;
};
@@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+};
+
/* hwrm_stat_generic_qstats_input (size:256b/32B) */
struct hwrm_stat_generic_qstats_input {
__le16 req_type;
@@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
u8 count;
__le16 subtype;
__le16 next_offset;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 12b6ed51fd88..5ddddd89052f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -946,7 +946,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Reclaim all resources for the PF. */
rtnl_lock();
+ netdev_lock(bp->dev);
bnxt_restore_pf_fw_resources(bp);
+ netdev_unlock(bp->dev);
rtnl_unlock();
}
@@ -956,17 +958,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct bnxt *bp = netdev_priv(dev);
rtnl_lock();
+ netdev_lock(dev);
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ netdev_unlock(dev);
rtnl_unlock();
return 0;
}
bp->sriov_cfg = true;
+ netdev_unlock(dev);
rtnl_unlock();
if (pci_vfs_assigned(bp->pdev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index e4a7f37036ed..a8e930d5dbb0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -112,7 +112,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt_ulp *ulp;
int rc = 0;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (!bp->irq_tbl) {
rc = -ENODEV;
@@ -138,7 +138,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
exit:
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -151,7 +151,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
- rtnl_lock();
+ netdev_lock(dev);
mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
@@ -169,7 +169,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
i++;
}
mutex_unlock(&edev->en_dev_lock);
- rtnl_unlock();
+ netdev_unlock(dev);
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
@@ -309,12 +309,14 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
reset = true;
ops->ulp_irq_stop(ulp->handle, reset);
+ netdev_unlock(bp->dev);
}
}
@@ -333,7 +335,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!ulp->msix_requested)
return;
- ops = rtnl_dereference(ulp->ulp_ops);
+ netdev_lock(bp->dev);
+ ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_restart)
return;
@@ -345,6 +348,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
bnxt_fill_msix_vecs(bp, ent);
}
ops->ulp_irq_restart(ulp->handle, ent);
+ netdev_unlock(bp->dev);
kfree(ent);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 1467b94a6427..619f0844e778 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev)
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
- * happens under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happens under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
@@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp)
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
- * happen under the rtnl_lock() this routine is safe
- * under the rtnl_lock()
+ * happen under the netdev instance lock this routine is safe
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
@@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
- rtnl_lock();
+ netdev_lock(bp->dev);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
@@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
bnxt_open_nic(bp, false, false);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
- rtnl_unlock();
+ netdev_unlock(bp->dev);
- /* Need to call vf_reps_destroy() outside of rntl_lock
- * as unregister_netdev takes rtnl_lock
+ /* Need to call vf_reps_destroy() outside of netdev instance lock
+ * as unregister_netdev takes it
*/
__bnxt_vf_reps_destroy(bp);
}
@@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* Free the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
void bnxt_vf_reps_free(struct bnxt *bp)
{
@@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
/* Allocate the VF-Reps in firmware, during firmware hot-reset processing.
* Note that the VF-Rep netdevs are still active (not unregistered) during
* this process. As the mode transition from SWITCHDEV to LEGACY happens
- * under the rtnl_lock() this routine is safe under the rtnl_lock().
+ * under the netdev instance lock this routine is safe.
*/
int bnxt_vf_reps_alloc(struct bnxt *bp)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index e6c64e4bd66c..e675611777b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -15,6 +15,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/filter.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) |
- ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) |
bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
@@ -382,13 +382,14 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
return nxmit;
}
-/* Under rtnl_lock */
static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
{
struct net_device *dev = bp->dev;
int tx_xdp = 0, tx_cp, rc, tc;
struct bpf_prog *old;
+ netdev_assert_locked(dev);
+
if (prog && !prog->aux->xdp_has_frags &&
bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
@@ -460,23 +461,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0122782400b8..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3e93f957430b..73d78dcb774d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -41,15 +41,13 @@
#include "bcmgenet.h"
-/* Maximum number of hardware queues, downsized if needed */
-#define GENET_MAX_MQ_CNT 4
-
/* Default highest priority queue for multi queue support */
-#define GENET_Q0_PRIORITY 0
+#define GENET_Q1_PRIORITY 0
+#define GENET_Q0_PRIORITY 1
-#define GENET_Q16_RX_BD_CNT \
+#define GENET_Q0_RX_BD_CNT \
(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
-#define GENET_Q16_TX_BD_CNT \
+#define GENET_Q0_TX_BD_CNT \
(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
#define RX_BUF_LENGTH 2048
@@ -104,7 +102,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
* the platform is explicitly configured for 64-bits/LPAE.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -446,33 +444,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
u32 offset;
u32 reg;
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg |= (1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg |= RBUF_HFB_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) |
+ RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg |= (1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg |= RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset, reg, reg1;
- offset = HFB_FLT_ENABLE_V3PLUS;
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
- if (f_index < 32) {
- reg1 &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
- } else {
- reg &= ~(1 << (f_index % 32));
- bcmgenet_hfb_reg_writel(priv, reg, offset);
- }
- if (!reg && !reg1) {
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~RBUF_HFB_EN;
+ reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT));
+ if (!(reg & RBUF_HFB_FILTER_EN_MASK))
+ reg &= ~RBUF_HFB_EN;
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ } else {
+ offset = HFB_FLT_ENABLE_V3PLUS;
+ reg = bcmgenet_hfb_reg_readl(priv, offset);
+ reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ if (f_index < 32) {
+ reg1 &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
+ } else {
+ reg &= ~(1 << (f_index % 32));
+ bcmgenet_hfb_reg_writel(priv, reg, offset);
+ }
+ if (!reg && !reg1) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
}
}
@@ -482,6 +495,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
offset = f_index / 8;
reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
reg &= ~(0xF << (4 * (f_index % 8)));
@@ -495,9 +511,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
u32 offset;
u32 reg;
- offset = HFB_FLT_LEN_V3PLUS +
- ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
- sizeof(u32);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ offset = HFB_FLT_LEN_V2;
+ else
+ offset = HFB_FLT_LEN_V3PLUS;
+
+ offset += sizeof(u32) *
+ ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4);
reg = bcmgenet_hfb_reg_readl(priv, offset);
reg &= ~(0xFF << (8 * (f_index % 4)));
reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
@@ -579,13 +599,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- u32 offset = 0, f_length = 0, f;
+ u32 offset = 0, f_length = 0, f, q;
u8 val_8, mask_8;
__be16 val_16;
u16 mask_16;
size_t size;
- f = fs->location;
+ f = fs->location + 1;
if (fs->flow_type & FLOW_MAC_EXT) {
bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
@@ -667,19 +687,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
}
bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
- if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
- /* Ring 0 flows can be handled by the default Descriptor Ring
- * We'll map them to ring 0, but don't enable the filter
- */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
- rule->state = BCMGENET_RXNFC_STATE_DISABLED;
- } else {
+ if (fs->ring_cookie == RX_CLS_FLOW_WAKE)
+ q = 0;
+ else if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ q = priv->hw_params->rx_queues + 1;
+ else
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
- fs->ring_cookie);
- bcmgenet_hfb_enable_filter(priv, f);
- rule->state = BCMGENET_RXNFC_STATE_ENABLED;
- }
+ q = fs->ring_cookie;
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q);
+ bcmgenet_hfb_enable_filter(priv, f);
+ rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
/* bcmgenet_hfb_clear
@@ -690,6 +707,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 base, i;
+ bcmgenet_hfb_set_filter_length(priv, f_index, 0);
base = f_index * priv->hw_params->hfb_filter_size;
for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
@@ -699,22 +717,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
- bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
-
- for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
- bcmgenet_rdma_writel(priv, 0x0, i);
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
- for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
- bcmgenet_hfb_reg_writel(priv, 0x0,
- HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) {
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS);
+ bcmgenet_hfb_reg_writel(priv, 0,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
+ bcmgenet_rdma_writel(priv, 0, i);
+ }
for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
bcmgenet_hfb_clear_filter(priv, i);
+
+ /* Enable filter 0 to send default flow to ring 0 */
+ bcmgenet_hfb_set_filter_length(priv, 0, 4);
+ bcmgenet_hfb_enable_filter(priv, 0);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
@@ -722,9 +741,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
int i;
INIT_LIST_HEAD(&priv->rxnfc_list);
- if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
- return;
-
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -819,20 +835,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
unsigned int i;
ec->tx_max_coalesced_frames =
- bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_max_coalesced_frames =
- bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
- DMA_MBUF_DONE_THRESH);
+ bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH);
ec->rx_coalesce_usecs =
- bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000;
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
ring = &priv->rx_rings[i];
ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
}
- ring = &priv->rx_rings[DESC_INDEX];
- ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
return 0;
}
@@ -902,17 +914,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* Program all TX queues with the same values, as there is no
* ethtool knob to do coalescing on a per-queue basis
*/
- for (i = 0; i < priv->hw_params->tx_queues; i++)
+ for (i = 0; i <= priv->hw_params->tx_queues; i++)
bcmgenet_tdma_ring_writel(priv, i,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
- ec->tx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
- bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -1120,7 +1128,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_Q(1),
STAT_GENET_Q(2),
STAT_GENET_Q(3),
- STAT_GENET_Q(16),
+ STAT_GENET_Q(4),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1438,7 +1446,8 @@ static int bcmgenet_insert_flow(struct net_device *dev,
}
if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
- cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
+ cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) {
netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
cmd->fs.ring_cookie);
return -EINVAL;
@@ -1472,10 +1481,10 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
}
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
@@ -1505,10 +1514,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
}
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
- bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1);
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
- bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1);
}
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -1651,9 +1660,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
/* Power down LED */
- if (priv->hw_params->flags & GENET_HAS_EXT) {
+ if (bcmgenet_has_ext(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm)
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv))
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1676,13 +1685,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
return ret;
}
-static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+static int bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ int ret = 0;
u32 reg;
- if (!(priv->hw_params->flags & GENET_HAS_EXT))
- return;
+ if (!bcmgenet_has_ext(priv))
+ return ret;
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -1690,7 +1700,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
+ if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1718,11 +1728,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
}
break;
case GENET_POWER_WOL_MAGIC:
- bcmgenet_wol_power_up_cfg(priv, mode);
- return;
+ ret = bcmgenet_wol_power_up_cfg(priv, mode);
+ break;
default:
break;
}
+
+ return ret;
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1759,18 +1771,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr;
}
-static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv,
@@ -1785,18 +1785,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
INTRL2_CPU_MASK_CLEAR);
}
-static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_SET);
-}
-
-static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
-{
- bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_MASK_CLEAR);
-}
-
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
@@ -1877,12 +1865,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX)
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
- INTRL2_CPU_CLEAR);
- else
- bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
- INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR);
/* Compute how many buffers are transmitted since last xmit call */
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
@@ -1916,19 +1899,46 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->packets += pkts_compl;
ring->bytes += bytes_compl;
- netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index),
pkts_compl, bytes_compl);
return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring,
+ bool all)
{
- unsigned int released;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int released, drop, wr_ptr;
+ struct enet_cb *cb_ptr;
+ struct sk_buff *skb;
spin_lock_bh(&ring->lock);
released = __bcmgenet_tx_reclaim(dev, ring);
+ if (all) {
+ skb = NULL;
+ drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK;
+ released += drop;
+ ring->prod_index = ring->c_index & DMA_C_INDEX_MASK;
+ while (drop--) {
+ cb_ptr = bcmgenet_put_txcb(priv, ring);
+ skb = cb_ptr->skb;
+ bcmgenet_free_tx_cb(kdev, cb_ptr);
+ if (skb && cb_ptr == GENET_CB(skb)->first_cb) {
+ dev_consume_skb_any(skb);
+ skb = NULL;
+ }
+ }
+ if (skb)
+ dev_consume_skb_any(skb);
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+ wr_ptr = ring->write_ptr * WORDS_PER_BD(priv);
+ bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr,
+ TDMA_WRITE_PTR);
+ }
spin_unlock_bh(&ring->lock);
return released;
@@ -1944,14 +1954,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
spin_lock(&ring->lock);
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->index);
netif_tx_wake_queue(txq);
}
spin_unlock(&ring->lock);
if (work_done == 0) {
napi_complete(napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
return 0;
}
@@ -1962,14 +1972,11 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int i;
-
- if (netif_is_multiqueue(dev)) {
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
- }
+ int i = 0;
- bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+ do {
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true);
+ } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev));
}
/* Reallocate the SKB to put enough headroom in front of it and insert
@@ -2057,19 +2064,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
index = skb_get_queue_mapping(skb);
/* Mapping strategy:
- * queue_mapping = 0, unclassified, packet xmited through ring16
- * queue_mapping = 1, goes to ring 0. (highest priority queue
- * queue_mapping = 2, goes to ring 1.
- * queue_mapping = 3, goes to ring 2.
- * queue_mapping = 4, goes to ring 3.
+ * queue_mapping = 0, unclassified, packet xmited through ring 0
+ * queue_mapping = 1, goes to ring 1. (highest priority queue)
+ * queue_mapping = 2, goes to ring 2.
+ * queue_mapping = 3, goes to ring 3.
+ * queue_mapping = 4, goes to ring 4.
*/
- if (index == 0)
- index = DESC_INDEX;
- else
- index -= 1;
-
ring = &priv->tx_rings[index];
- txq = netdev_get_tx_queue(dev, ring->queue);
+ txq = netdev_get_tx_queue(dev, index);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -2242,15 +2244,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned int discards;
/* Clear status before servicing to reduce spurious interrupts */
- if (ring->index == DESC_INDEX) {
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
- INTRL2_CPU_CLEAR);
- } else {
- mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
- bcmgenet_intrl2_1_writel(priv,
- mask,
- INTRL2_CPU_CLEAR);
- }
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR);
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
@@ -2399,7 +2394,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete_done(napi, work_done);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
if (ring->dim.use_dim) {
@@ -2523,7 +2518,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
} else if (priv->ext_phy) {
int0_enable |= UMAC_IRQ_LINK_EVENT;
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
int0_enable |= UMAC_IRQ_LINK_EVENT;
}
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2588,8 +2583,8 @@ static void init_umac(struct bcmgenet_priv *priv)
}
/* Enable MDIO interrupts on GENET v3+ */
- if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
- int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ if (bcmgenet_has_mdio_intr(priv))
+ int0_enable |= UMAC_IRQ_MDIO_EVENT;
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
@@ -2639,15 +2634,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
spin_lock_init(&ring->lock);
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->queue = 0;
- ring->int_enable = bcmgenet_tx_ring16_int_enable;
- ring->int_disable = bcmgenet_tx_ring16_int_disable;
- } else {
- ring->queue = index + 1;
- ring->int_enable = bcmgenet_tx_ring_int_enable;
- ring->int_disable = bcmgenet_tx_ring_int_disable;
- }
ring->cbs = priv->tx_cbs + start_ptr;
ring->size = size;
ring->clean_ptr = start_ptr;
@@ -2658,8 +2644,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
ring->end_ptr = end_ptr - 1;
ring->prod_index = 0;
- /* Set flow period for ring != 16 */
- if (index != DESC_INDEX)
+ /* Set flow period for ring != 0 */
+ if (index)
flow_period_val = ENET_MAX_MTU_SIZE << 16;
bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
@@ -2697,13 +2683,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
ring->priv = priv;
ring->index = index;
- if (index == DESC_INDEX) {
- ring->int_enable = bcmgenet_rx_ring16_int_enable;
- ring->int_disable = bcmgenet_rx_ring16_int_disable;
- } else {
- ring->int_enable = bcmgenet_rx_ring_int_enable;
- ring->int_disable = bcmgenet_rx_ring_int_disable;
- }
ring->cbs = priv->rx_cbs + start_ptr;
ring->size = size;
ring->c_index = 0;
@@ -2749,15 +2728,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_tx_ring_int_enable(ring);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
@@ -2765,13 +2740,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
}
-
- ring = &priv->tx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
}
static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
@@ -2779,82 +2751,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_tx_ring *ring;
- for (i = 0; i < priv->hw_params->tx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
netif_napi_del(&ring->napi);
}
+}
- ring = &priv->tx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
+static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv)
+{
+ int timeout = 0;
+ u32 reg, mask;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg &= ~mask;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check DMA status register to confirm DMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if ((reg & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
}
/* Initialize Tx queues
*
- * Queues 0-3 are priority-based, each one has 32 descriptors,
- * with queue 0 being the highest priority queue.
+ * Queues 1-4 are priority-based, each one has 32 descriptors,
+ * with queue 1 being the highest priority queue.
*
- * Queue 16 is the default Tx queue with
- * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
+ * Queue 0 is the default Tx queue with
+ * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
*
* The transmit control block pool is then partitioned as follows:
- * - Tx queue 0 uses tx_cbs[0..31]
- * - Tx queue 1 uses tx_cbs[32..63]
- * - Tx queue 2 uses tx_cbs[64..95]
- * - Tx queue 3 uses tx_cbs[96..127]
- * - Tx queue 16 uses tx_cbs[128..255]
+ * - Tx queue 0 uses tx_cbs[0..127]
+ * - Tx queue 1 uses tx_cbs[128..159]
+ * - Tx queue 2 uses tx_cbs[160..191]
+ * - Tx queue 3 uses tx_cbs[192..223]
+ * - Tx queue 4 uses tx_cbs[224..255]
*/
static void bcmgenet_init_tx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i, dma_enable;
- u32 dma_ctrl, ring_cfg;
- u32 dma_priority[3] = {0, 0, 0};
-
- dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
+ unsigned int start = 0, end = GENET_Q0_TX_BD_CNT;
+ u32 i, ring_mask, dma_priority[3] = {0, 0, 0};
/* Enable strict priority arbiter mode */
bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
/* Initialize Tx priority queues */
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
- i * priv->hw_params->tx_bds_per_q,
- (i + 1) * priv->hw_params->tx_bds_per_q);
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ bcmgenet_init_tx_ring(priv, i, end - start, start, end);
+ start = end;
+ end += priv->hw_params->tx_bds_per_q;
dma_priority[DMA_PRIO_REG_INDEX(i)] |=
- ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
+ (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY)
+ << DMA_PRIO_REG_SHIFT(i);
}
- /* Initialize Tx default queue 16 */
- bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
- priv->hw_params->tx_queues *
- priv->hw_params->tx_bds_per_q,
- TOTAL_DESC);
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
- dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
- ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
- DMA_PRIO_REG_SHIFT(DESC_INDEX));
-
/* Set Tx queue priorities */
bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
- /* Enable Tx queues */
- bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Tx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Enable Tx DMA */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Tx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL);
}
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
@@ -2862,15 +2856,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
- ring->int_enable(ring);
+ bcmgenet_rx_ring_int_enable(ring);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_enable(&ring->napi);
- ring->int_enable(ring);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
@@ -2878,15 +2868,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- napi_disable(&ring->napi);
- cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2894,13 +2880,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
unsigned int i;
struct bcmgenet_rx_ring *ring;
- for (i = 0; i < priv->hw_params->rx_queues; ++i) {
+ for (i = 0; i <= priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
netif_napi_del(&ring->napi);
}
-
- ring = &priv->rx_rings[DESC_INDEX];
- netif_napi_del(&ring->napi);
}
/* Initialize Rx queues
@@ -2908,57 +2891,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
* Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
* used to direct traffic to these queues.
*
- * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
+ * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors.
*/
static int bcmgenet_init_rx_queues(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 i;
- u32 dma_enable;
- u32 dma_ctrl;
- u32 ring_cfg;
+ unsigned int start = 0, end = GENET_Q0_RX_BD_CNT;
+ u32 i, ring_mask;
int ret;
- dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
- dma_enable = dma_ctrl & DMA_EN;
- dma_ctrl &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
-
- dma_ctrl = 0;
- ring_cfg = 0;
-
/* Initialize Rx priority queues */
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- ret = bcmgenet_init_rx_ring(priv, i,
- priv->hw_params->rx_bds_per_q,
- i * priv->hw_params->rx_bds_per_q,
- (i + 1) *
- priv->hw_params->rx_bds_per_q);
+ for (i = 0; i <= priv->hw_params->rx_queues; i++) {
+ ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end);
if (ret)
return ret;
- ring_cfg |= (1 << i);
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ start = end;
+ end += priv->hw_params->rx_bds_per_q;
}
- /* Initialize Rx default queue 16 */
- ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
- priv->hw_params->rx_queues *
- priv->hw_params->rx_bds_per_q,
- TOTAL_DESC);
- if (ret)
- return ret;
-
- ring_cfg |= (1 << DESC_INDEX);
- dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
-
- /* Enable rings */
- bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
+ /* Configure Rx queues as descriptor rings */
+ ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG);
- /* Configure ring as descriptor ring and re-enable DMA if enabled */
- if (dma_enable)
- dma_ctrl |= DMA_EN;
- bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
+ /* Enable Rx rings */
+ ring_mask <<= DMA_RING_BUF_EN_SHIFT;
+ bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL);
return 0;
}
@@ -2966,26 +2924,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev)
static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
{
int ret = 0;
- int timeout = 0;
- u32 reg;
- u32 dma_ctrl;
- int i;
/* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
@@ -2994,39 +2935,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
usleep_range(10000, 20000);
/* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
+ if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) {
netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
ret = -ETIMEDOUT;
}
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 0;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
return ret;
}
@@ -3038,32 +2951,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++)
- dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
- priv->tx_cbs + i));
-
- for (i = 0; i < priv->hw_params->tx_queues; i++) {
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ for (i = 0; i <= priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, i);
netdev_tx_reset_queue(txq);
}
- txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
- netdev_tx_reset_queue(txq);
-
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
}
/* init_edma: Initialize DMA control register */
-static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx)
{
- int ret;
- unsigned int i;
struct enet_cb *cb;
+ unsigned int i;
+ int ret;
+ u32 reg;
netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
+ /* Disable TX DMA */
+ ret = bcmgenet_tdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Tx DMA\n");
+ return ret;
+ }
+
+ /* Disable RX DMA */
+ ret = bcmgenet_rdma_disable(priv);
+ if (ret) {
+ netdev_err(priv->dev, "failed to halt Rx DMA\n");
+ return ret;
+ }
+
+ /* Flush TX queues */
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
/* Initialize common Rx ring structures */
priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
priv->num_rx_bds = TOTAL_DESC;
@@ -3113,6 +3047,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Initialize Tx queues */
bcmgenet_init_tx_queues(priv->dev);
+ /* Enable RX/TX DMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return 0;
}
@@ -3142,7 +3085,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
}
-/* bcmgenet_isr1: handle Rx and Tx priority queues */
+/* bcmgenet_isr1: handle Rx and Tx queues */
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
@@ -3161,7 +3104,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
"%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
- for (index = 0; index < priv->hw_params->rx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->rx_queues; index++) {
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
@@ -3169,20 +3112,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
+ bcmgenet_rx_ring_int_disable(rx_ring);
__napi_schedule_irqoff(&rx_ring->napi);
}
}
/* Check Tx priority queue interrupts */
- for (index = 0; index < priv->hw_params->tx_queues; index++) {
+ for (index = 0; index <= priv->hw_params->tx_queues; index++) {
if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
+ bcmgenet_tx_ring_int_disable(tx_ring);
__napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3190,12 +3133,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
+/* bcmgenet_isr0: handle other stuff */
static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
{
struct bcmgenet_priv *priv = dev_id;
- struct bcmgenet_rx_ring *rx_ring;
- struct bcmgenet_tx_ring *tx_ring;
unsigned int status;
unsigned long flags;
@@ -3209,29 +3150,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
netif_dbg(priv, intr, priv->dev,
"IRQ=0x%x\n", status);
- if (status & UMAC_IRQ_RXDMA_DONE) {
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_ring->dim.event_ctr++;
-
- if (likely(napi_schedule_prep(&rx_ring->napi))) {
- rx_ring->int_disable(rx_ring);
- __napi_schedule_irqoff(&rx_ring->napi);
- }
- }
-
- if (status & UMAC_IRQ_TXDMA_DONE) {
- tx_ring = &priv->tx_rings[DESC_INDEX];
-
- if (likely(napi_schedule_prep(&tx_ring->napi))) {
- tx_ring->int_disable(tx_ring);
- __napi_schedule_irqoff(&tx_ring->napi);
- }
- }
-
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT)
wake_up(&priv->wq);
- }
/* all other interested interrupts handled in bottom half */
status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
@@ -3285,56 +3205,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
put_unaligned_be16(addr_tmp, &addr[4]);
}
-/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
-{
- unsigned int i;
- u32 reg;
- u32 dma_ctrl;
-
- /* disable DMA */
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->tx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
- for (i = 0; i < priv->hw_params->rx_queues; i++)
- dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
- udelay(10);
- bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
-
- if (flush_rx) {
- reg = bcmgenet_rbuf_ctrl_get(priv);
- bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
- udelay(10);
- bcmgenet_rbuf_ctrl_set(priv, reg);
- udelay(10);
- }
-
- return dma_ctrl;
-}
-
-static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
-{
- u32 reg;
-
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg |= dma_ctrl;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-}
-
static void bcmgenet_netif_start(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -3358,7 +3228,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- unsigned long dma_ctrl;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3384,22 +3253,16 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX and RX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, true);
+ /* HFB init */
+ bcmgenet_hfb_init(priv);
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, true);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto err_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
- /* HFB init */
- bcmgenet_hfb_init(priv);
-
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
dev->name, priv);
if (ret < 0) {
@@ -3446,19 +3309,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- bcmgenet_disable_tx_napi(priv);
netif_tx_disable(dev);
/* Disable MAC receive */
+ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
umac_enable_set(priv, CMD_RX_EN, false);
+ if (stop_phy)
+ phy_stop(dev->phydev);
+
bcmgenet_dma_teardown(priv);
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- if (stop_phy)
- phy_stop(dev->phydev);
+ bcmgenet_disable_tx_napi(priv);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3506,16 +3371,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
if (!netif_msg_tx_err(priv))
return;
- txq = netdev_get_tx_queue(priv->dev, ring->queue);
+ txq = netdev_get_tx_queue(priv->dev, ring->index);
spin_lock(&ring->lock);
- if (ring->index == DESC_INDEX) {
- intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
- } else {
- intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
- intmsk = 1 << ring->index;
- }
+ intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intmsk = 1 << ring->index;
c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
txq_stopped = netif_tx_queue_stopped(txq);
@@ -3529,7 +3389,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
"(sw)c_index: %d (hw)c_index: %d\n"
"(sw)clean_p: %d (sw)write_p: %d\n"
"(sw)cb_ptr: %d (sw)end_ptr: %d\n",
- ring->index, ring->queue,
+ ring->index, ring->index,
txq_stopped ? "stopped" : "active",
intsts & intmsk ? "enabled" : "disabled",
free_bds, ring->size,
@@ -3542,25 +3402,20 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 int0_enable = 0;
u32 int1_enable = 0;
unsigned int q;
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
- bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
bcmgenet_tx_reclaim_all(dev);
- for (q = 0; q < priv->hw_params->tx_queues; q++)
+ for (q = 0; q <= priv->hw_params->tx_queues; q++)
int1_enable |= (1 << q);
- int0_enable = UMAC_IRQ_TXDMA_DONE;
-
/* Re-enable TX interrupts if disabled */
- bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
netif_trans_update(dev);
@@ -3664,16 +3519,13 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
struct bcmgenet_rx_ring *rx_ring;
unsigned int q;
- for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->tx_queues; q++) {
tx_ring = &priv->tx_rings[q];
tx_bytes += tx_ring->bytes;
tx_packets += tx_ring->packets;
}
- tx_ring = &priv->tx_rings[DESC_INDEX];
- tx_bytes += tx_ring->bytes;
- tx_packets += tx_ring->packets;
- for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ for (q = 0; q <= priv->hw_params->rx_queues; q++) {
rx_ring = &priv->rx_rings[q];
rx_bytes += rx_ring->bytes;
@@ -3681,11 +3533,6 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
rx_errors += rx_ring->errors;
rx_dropped += rx_ring->dropped;
}
- rx_ring = &priv->rx_rings[DESC_INDEX];
- rx_bytes += rx_ring->bytes;
- rx_packets += rx_ring->packets;
- rx_errors += rx_ring->errors;
- rx_dropped += rx_ring->dropped;
dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
@@ -3726,128 +3573,109 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_change_carrier = bcmgenet_change_carrier,
};
-/* Array of GENET hardware parameters/characteristics */
-static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
- [GENET_V1] = {
- .tx_queues = 0,
- .tx_bds_per_q = 0,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .hfb_offset = 0x1000,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x3000,
- .words_per_bd = 2,
- },
- [GENET_V2] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 16,
- .bp_in_mask = 0xffff,
- .hfb_filter_cnt = 16,
- .qtag_mask = 0x1F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x1000,
- .hfb_reg_offset = 0x2000,
- .rdma_offset = 0x3000,
- .tdma_offset = 0x4000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT,
- },
- [GENET_V3] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x10000,
- .tdma_offset = 0x11000,
- .words_per_bd = 2,
- .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
- GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V4] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
- [GENET_V5] = {
- .tx_queues = 4,
- .tx_bds_per_q = 32,
- .rx_queues = 0,
- .rx_bds_per_q = 0,
- .bp_in_en_shift = 17,
- .bp_in_mask = 0x1ffff,
- .hfb_filter_cnt = 48,
- .hfb_filter_size = 128,
- .qtag_mask = 0x3F,
- .tbuf_offset = 0x0600,
- .hfb_offset = 0x8000,
- .hfb_reg_offset = 0xfc00,
- .rdma_offset = 0x2000,
- .tdma_offset = 0x4000,
- .words_per_bd = 3,
- .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
- GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
- },
+/* GENET hardware parameters/characteristics */
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = {
+ .tx_queues = 0,
+ .tx_bds_per_q = 0,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .hfb_filter_size = 64,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+};
+
+static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
};
/* Infer hardware parameters from the detected GENET version */
static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
{
- struct bcmgenet_hw_params *params;
+ const struct bcmgenet_hw_params *params;
u32 reg;
u8 major;
u16 gphy_rev;
- if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
- bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
- genet_dma_ring_regs = genet_dma_ring_regs_v4;
- } else if (GENET_IS_V3(priv)) {
+ /* default to latest values */
+ params = &bcmgenet_hw_params_v4;
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ if (GENET_IS_V3(priv)) {
+ params = &bcmgenet_hw_params_v3;
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V2(priv)) {
+ params = &bcmgenet_hw_params_v2;
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
} else if (GENET_IS_V1(priv)) {
+ params = &bcmgenet_hw_params_v1;
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
}
-
- /* enum genet_version starts at 1 */
- priv->hw_params = &bcmgenet_hw_params[priv->version];
- params = priv->hw_params;
+ priv->hw_params = params;
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 6)
+ if (major == 6 || major == 7)
major = 5;
else if (major == 5)
major = 4;
@@ -3898,7 +3726,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
}
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (!(params->flags & GENET_HAS_40BITS))
+ if (!bcmgenet_has_40bits(priv))
pr_warn("GENET does not support 40-bits PA\n");
#endif
@@ -3923,7 +3751,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
- bool ephy_16nm;
+ u32 flags;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3934,32 +3762,43 @@ static const struct bcmgenet_plat_data v1_plat_data = {
static const struct bcmgenet_plat_data v2_plat_data = {
.version = GENET_V2,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT,
};
static const struct bcmgenet_plat_data v3_plat_data = {
.version = GENET_V3,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
+ GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v4_plat_data = {
.version = GENET_V4,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data v5_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm2711_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = 0x08,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
};
static const struct bcmgenet_plat_data bcm7712_plat_data = {
.version = GENET_V5,
.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
- .ephy_16nm = true,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET |
+ GENET_HAS_EPHY_16NM,
};
static const struct of_device_id bcmgenet_match[] = {
@@ -4057,7 +3896,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
- priv->ephy_16nm = pdata->ephy_16nm;
+ priv->flags = pdata->flags;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4077,7 +3916,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_set_hw_params(priv);
err = -EIO;
- if (priv->hw_params->flags & GENET_HAS_40BITS)
+ if (bcmgenet_has_40bits(priv))
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -4132,16 +3971,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
goto err_clk_disable;
- /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
- * just the ring 16 descriptor based TX
- */
+ /* setup number of real queues + 1 */
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
/* Set default coalescing parameters */
- for (i = 0; i < priv->hw_params->rx_queues; i++)
+ for (i = 0; i <= priv->hw_params->rx_queues; i++)
priv->rx_rings[i].rx_max_coalesced_frames = 1;
- priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
/* libphy will determine the link state */
netif_carrier_off(dev);
@@ -4205,9 +4041,22 @@ static int bcmgenet_resume_noirq(struct device *d)
reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
if (reg & UMAC_IRQ_WAKE_EVENT)
pm_wakeup_event(&priv->pdev->dev, 0);
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC))
+ return 0;
+
+ /* Failed so fall through to reset MAC */
}
- bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
+ /* If this is an internal GPHY, power it back on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (priv->internal_phy)
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
return 0;
}
@@ -4217,23 +4066,46 @@ static int bcmgenet_resume(struct device *d)
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
- unsigned long dma_ctrl;
int ret;
+ u32 reg;
if (!netif_running(dev))
return 0;
- /* From WOL-enabled suspend, switch to regular clock */
- if (device_may_wakeup(d) && priv->wolopts)
- bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
-
- /* If this is an internal GPHY, power it back on now, before UniMAC is
- * brought out of reset as absolutely no UniMAC activity is allowed
- */
- if (priv->internal_phy)
- bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
- bcmgenet_umac_reset(priv);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (reg & CMD_RX_EN) {
+ /* Successfully exited WoL, just resume data flows */
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ bcmgenet_hfb_enable_filter(priv,
+ rule->fs.location + 1);
+ bcmgenet_hfb_enable_filter(priv, 0);
+ bcmgenet_set_rx_mode(dev);
+ bcmgenet_enable_rx_napi(priv);
+
+ /* Reinitialize Tx flows */
+ bcmgenet_tdma_disable(priv);
+ bcmgenet_init_tx_queues(priv->dev);
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ bcmgenet_enable_tx_napi(priv);
+
+ bcmgenet_link_intr_enable(priv);
+ phy_start_machine(dev->phydev);
+
+ netif_device_attach(dev);
+ enable_irq(priv->irq1);
+ return 0;
+ }
+ /* MAC was reset so complete bcmgenet_netif_stop() */
+ umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false);
+ bcmgenet_rdma_disable(priv);
+ bcmgenet_intr_disable(priv);
+ bcmgenet_fini_dma(priv);
+ enable_irq(priv->irq1);
+ }
init_umac(priv);
@@ -4254,19 +4126,13 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv, false);
-
/* Reinitialize TDMA and RDMA and SW housekeeping */
- ret = bcmgenet_init_dma(priv);
+ ret = bcmgenet_init_dma(priv, false);
if (ret) {
netdev_err(dev, "failed to initialize DMA\n");
goto out_clk_disable;
}
- /* Always enable ring 16 - descriptor ring */
- bcmgenet_enable_dma(priv, dma_ctrl);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
@@ -4287,19 +4153,52 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
+ u32 reg, hfb_enable = 0;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- bcmgenet_netif_stop(dev, true);
+ if (device_may_wakeup(d) && priv->wolopts) {
+ netif_tx_disable(dev);
+
+ /* Suspend non-wake Rx data flows */
+ if (priv->wolopts & WAKE_FILTER)
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE &&
+ rule->state == BCMGENET_RXNFC_STATE_ENABLED)
+ hfb_enable |= 1 << rule->fs.location;
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) {
+ reg &= ~RBUF_HFB_FILTER_EN_MASK;
+ reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1);
+ } else {
+ bcmgenet_hfb_reg_writel(priv, hfb_enable << 1,
+ HFB_FLT_ENABLE_V3PLUS + 4);
+ }
+ if (!hfb_enable)
+ reg &= ~RBUF_HFB_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
+ /* Clear any old filter matches so only new matches wake */
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- /* Disable filtering */
- bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
+ if (-ETIMEDOUT == bcmgenet_tdma_disable(priv))
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+
+ bcmgenet_disable_tx_napi(priv);
+ bcmgenet_disable_rx_napi(priv);
+ disable_irq(priv->irq1);
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_tx_napi(priv);
+ } else {
+ /* Teardown the interface */
+ bcmgenet_netif_stop(dev, true);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 43b923c48b14..10c631bbe964 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -18,6 +18,9 @@
#include "../unimac.h"
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -271,6 +274,8 @@ struct bcmgenet_mib_counters {
/* Only valid for GENETv3+ */
#define UMAC_IRQ_MDIO_DONE (1 << 23)
#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \
+ UMAC_IRQ_MDIO_ERROR)
/* INTRL2 instance 1 definitions */
#define UMAC_IRQ1_TX_INTR_MASK 0xFFFF
@@ -476,6 +481,7 @@ enum bcmgenet_version {
#define GENET_HAS_EXT (1 << 1)
#define GENET_HAS_MDIO_INTR (1 << 2)
#define GENET_HAS_MOCA_LINK_DET (1 << 3)
+#define GENET_HAS_EPHY_16NM (1 << 4)
/* BCMGENET hardware parameters, keep this structure nicely aligned
* since it is going to be used in hot paths
@@ -496,7 +502,6 @@ struct bcmgenet_hw_params {
u32 rdma_offset;
u32 tdma_offset;
u32 words_per_bd;
- u32 flags;
};
struct bcmgenet_skb_cb {
@@ -513,7 +518,6 @@ struct bcmgenet_tx_ring {
unsigned long packets;
unsigned long bytes;
unsigned int index; /* ring index */
- unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
unsigned int size; /* size of each tx ring */
unsigned int clean_ptr; /* Tx ring clean pointer */
@@ -523,8 +527,6 @@ struct bcmgenet_tx_ring {
unsigned int prod_index; /* Tx ring producer index SW copy */
unsigned int cb_ptr; /* Tx ring initial CB ptr */
unsigned int end_ptr; /* Tx ring end CB ptr */
- void (*int_enable)(struct bcmgenet_tx_ring *);
- void (*int_disable)(struct bcmgenet_tx_ring *);
struct bcmgenet_priv *priv;
};
@@ -553,8 +555,6 @@ struct bcmgenet_rx_ring {
struct bcmgenet_net_dim dim;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_usecs;
- void (*int_enable)(struct bcmgenet_rx_ring *);
- void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
};
@@ -583,7 +583,7 @@ struct bcmgenet_priv {
struct enet_cb *tx_cbs;
unsigned int num_tx_bds;
- struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+ struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1];
/* receive variables */
void __iomem *rx_bds;
@@ -593,10 +593,11 @@ struct bcmgenet_priv {
struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES];
struct list_head rxnfc_list;
- struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1];
+ struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1];
/* other misc variables */
- struct bcmgenet_hw_params *hw_params;
+ const struct bcmgenet_hw_params *hw_params;
+ u32 flags;
unsigned autoneg_pause:1;
unsigned tx_pause:1;
unsigned rx_pause:1;
@@ -615,7 +616,6 @@ struct bcmgenet_priv {
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
- bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -643,13 +643,37 @@ struct bcmgenet_priv {
struct clk *clk_wol;
u32 wolopts;
u8 sopass[SOPASS_MAX];
- bool wol_active;
struct bcmgenet_mib_counters mib;
struct ethtool_keee eee;
};
+static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_40BITS);
+}
+
+static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EXT);
+}
+
+static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MDIO_INTR);
+}
+
+static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_MOCA_LINK_DET);
+}
+
+static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv)
+{
+ return !!(priv->flags & GENET_HAS_EPHY_16NM);
+}
+
#define GENET_IO_MACRO(name, offset) \
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
@@ -702,8 +726,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode);
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode);
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode);
void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
bool tx_lpi_enabled);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 3b082114f2e5..8fb551288298 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -145,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- struct bcmgenet_rxnfc_rule *rule;
- u32 reg, hfb_ctrl_reg, hfb_enable = 0;
+ u32 reg, hfb_ctrl_reg;
int retries = 0;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -154,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return -EINVAL;
}
- /* Can't suspend with WoL if MAC is still in reset */
- spin_lock_bh(&priv->reg_lock);
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
- reg &= ~CMD_SW_RESET;
-
- /* disable RX */
- reg &= ~CMD_RX_EN;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- spin_unlock_bh(&priv->reg_lock);
- mdelay(10);
-
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
@@ -177,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (priv->wolopts & WAKE_FILTER) {
- list_for_each_entry(rule, &priv->rxnfc_list, list)
- if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE)
- hfb_enable |= (1 << rule->fs.location);
- reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ reg = hfb_ctrl_reg | RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Do not leave UniMAC in MPD mode only */
retries = bcmgenet_poll_wol_status(priv);
@@ -198,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
retries);
- clk_prepare_enable(priv->clk_wol);
- priv->wol_active = 1;
+ /* Disable phy status updates while suspending */
+ mutex_lock(&dev->phydev->lock);
+ dev->phydev->state = PHY_READY;
+ mutex_unlock(&dev->phydev->lock);
- if (hfb_enable) {
- bcmgenet_hfb_reg_writel(priv, hfb_enable,
- HFB_FLT_ENABLE_V3PLUS + 4);
- hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN;
- bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL);
- }
+ clk_prepare_enable(priv->clk_wol);
/* Enable CRC forward */
spin_lock_bh(&priv->reg_lock);
@@ -214,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
+ /* Can't suspend with WoL if MAC is still in reset */
+ if (reg & CMD_SW_RESET)
+ reg &= ~CMD_SW_RESET;
+
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
- if (hfb_enable)
+ if (hfb_ctrl_reg & RBUF_HFB_EN)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR);
@@ -228,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
return 0;
}
-void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
{
+ struct net_device *dev = priv->dev;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
- return;
+ return -EINVAL;
}
- if (!priv->wol_active)
- return; /* failed to suspend so skip the rest */
-
- priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
priv->crc_fwd_en = 0;
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT,
+ INTRL2_CPU_MASK_SET);
+ if (bcmgenet_has_mdio_intr(priv))
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_MDIO_EVENT,
+ INTRL2_CPU_MASK_CLEAR);
+
/* Disable Magic Packet Detection */
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
if (!(reg & MPD_EN))
- return; /* already reset so skip the rest */
+ return -EPERM; /* already reset so skip the rest */
reg &= ~(MPD_EN | MPD_PW_EN);
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
}
- /* Disable WAKE_FILTER Detection */
- if (priv->wolopts & WAKE_FILTER) {
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- if (!(reg & RBUF_ACPI_EN))
- return; /* already reset so skip the rest */
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
- }
+ /* Disable ACPI mode */
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return -EPERM; /* already reset so skip the rest */
+ reg &= ~RBUF_ACPI_EN;
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
spin_lock_bh(&priv->reg_lock);
@@ -269,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
spin_unlock_bh(&priv->reg_lock);
+
+ /* Resume link status tracking */
+ mutex_lock(&dev->phydev->lock);
+ if (dev->phydev->link)
+ dev->phydev->state = PHY_RUNNING;
+ else
+ dev->phydev->state = PHY_NOLINK;
+ mutex_unlock(&dev->phydev->lock);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c4a3698cef66..71c619d2bea5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2024 Broadcom
+ * Copyright (c) 2014-2025 Broadcom
*/
#include <linux/acpi.h>
@@ -154,7 +154,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv) || priv->ephy_16nm) {
+ if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
@@ -184,7 +184,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ if (bcmgenet_has_moca_link_det(priv))
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ece6f3b48327..3b9107003b00 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/prefetch.h>
#include <linux/module.h>
+#include <net/gro.h>
#include "bnad.h"
#include "bna.h"
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 2847278d9cd4..c9a5c8beb2fa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -951,75 +951,73 @@ struct macb_tx_skb {
* device stats by a periodic timer.
*/
struct macb_stats {
- u32 rx_pause_frames;
- u32 tx_ok;
- u32 tx_single_cols;
- u32 tx_multiple_cols;
- u32 rx_ok;
- u32 rx_fcs_errors;
- u32 rx_align_errors;
- u32 tx_deferred;
- u32 tx_late_cols;
- u32 tx_excessive_cols;
- u32 tx_underruns;
- u32 tx_carrier_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_symbol_errors;
- u32 rx_oversize_pkts;
- u32 rx_jabbers;
- u32 rx_undersize_pkts;
- u32 sqe_test_errors;
- u32 rx_length_mismatch;
- u32 tx_pause_frames;
+ u64 rx_pause_frames;
+ u64 tx_ok;
+ u64 tx_single_cols;
+ u64 tx_multiple_cols;
+ u64 rx_ok;
+ u64 rx_fcs_errors;
+ u64 rx_align_errors;
+ u64 tx_deferred;
+ u64 tx_late_cols;
+ u64 tx_excessive_cols;
+ u64 tx_underruns;
+ u64 tx_carrier_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_symbol_errors;
+ u64 rx_oversize_pkts;
+ u64 rx_jabbers;
+ u64 rx_undersize_pkts;
+ u64 sqe_test_errors;
+ u64 rx_length_mismatch;
+ u64 tx_pause_frames;
};
struct gem_stats {
- u32 tx_octets_31_0;
- u32 tx_octets_47_32;
- u32 tx_frames;
- u32 tx_broadcast_frames;
- u32 tx_multicast_frames;
- u32 tx_pause_frames;
- u32 tx_64_byte_frames;
- u32 tx_65_127_byte_frames;
- u32 tx_128_255_byte_frames;
- u32 tx_256_511_byte_frames;
- u32 tx_512_1023_byte_frames;
- u32 tx_1024_1518_byte_frames;
- u32 tx_greater_than_1518_byte_frames;
- u32 tx_underrun;
- u32 tx_single_collision_frames;
- u32 tx_multiple_collision_frames;
- u32 tx_excessive_collisions;
- u32 tx_late_collisions;
- u32 tx_deferred_frames;
- u32 tx_carrier_sense_errors;
- u32 rx_octets_31_0;
- u32 rx_octets_47_32;
- u32 rx_frames;
- u32 rx_broadcast_frames;
- u32 rx_multicast_frames;
- u32 rx_pause_frames;
- u32 rx_64_byte_frames;
- u32 rx_65_127_byte_frames;
- u32 rx_128_255_byte_frames;
- u32 rx_256_511_byte_frames;
- u32 rx_512_1023_byte_frames;
- u32 rx_1024_1518_byte_frames;
- u32 rx_greater_than_1518_byte_frames;
- u32 rx_undersized_frames;
- u32 rx_oversize_frames;
- u32 rx_jabbers;
- u32 rx_frame_check_sequence_errors;
- u32 rx_length_field_frame_errors;
- u32 rx_symbol_errors;
- u32 rx_alignment_errors;
- u32 rx_resource_errors;
- u32 rx_overruns;
- u32 rx_ip_header_checksum_errors;
- u32 rx_tcp_checksum_errors;
- u32 rx_udp_checksum_errors;
+ u64 tx_octets;
+ u64 tx_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_byte_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_than_1518_byte_frames;
+ u64 tx_underrun;
+ u64 tx_single_collision_frames;
+ u64 tx_multiple_collision_frames;
+ u64 tx_excessive_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred_frames;
+ u64 tx_carrier_sense_errors;
+ u64 rx_octets;
+ u64 rx_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_byte_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_than_1518_byte_frames;
+ u64 rx_undersized_frames;
+ u64 rx_oversize_frames;
+ u64 rx_jabbers;
+ u64 rx_frame_check_sequence_errors;
+ u64 rx_length_field_frame_errors;
+ u64 rx_symbol_errors;
+ u64 rx_alignment_errors;
+ u64 rx_resource_errors;
+ u64 rx_overruns;
+ u64 rx_ip_header_checksum_errors;
+ u64 rx_tcp_checksum_errors;
+ u64 rx_udp_checksum_errors;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1027,7 +1025,7 @@ struct gem_stats {
* this register should contribute to.
*/
struct gem_statistic {
- char stat_string[ETH_GSTRING_LEN];
+ char stat_string[ETH_GSTRING_LEN] __nonstring;
int offset;
u32 stat_bits;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c1f57d96e63f..1fe8ec37491b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -17,8 +17,6 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -26,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/ip.h>
@@ -853,9 +850,7 @@ static int macb_mii_probe(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
- bp->phylink_sgmii_pcs.neg_mode = true;
bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
- bp->phylink_usx_pcs.neg_mode = true;
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
@@ -990,8 +985,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
- u32 *p = &bp->hw_stats.macb.rx_pause_frames;
- u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+ u64 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
int offset = MACB_PFR;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -1081,15 +1076,18 @@ static void macb_tx_error_task(struct work_struct *work)
tx_error_task);
bool halt_timeout = false;
struct macb *bp = queue->bp;
+ u32 queue_index;
+ u32 packets = 0;
+ u32 bytes = 0;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
struct sk_buff *skb;
unsigned int tail;
unsigned long flags;
+ queue_index = queue - bp->queues;
netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
- (unsigned int)(queue - bp->queues),
- queue->tx_tail, queue->tx_head);
+ queue_index, queue->tx_tail, queue->tx_head);
/* Prevent the queue NAPI TX poll from running, as it calls
* macb_tx_complete(), which in turn may call netif_wake_subqueue().
@@ -1142,8 +1140,10 @@ static void macb_tx_error_task(struct work_struct *work)
skb->data);
bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
+ packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
@@ -1160,6 +1160,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_unmap(bp, tx_skb, 0);
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
macb_set_addr(bp, desc, 0);
@@ -1230,6 +1233,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
unsigned int tail;
unsigned int head;
int packets = 0;
+ u32 bytes = 0;
spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
@@ -1271,6 +1275,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
packets++;
+ bytes += skb->len;
}
/* Now we can safely release resources */
@@ -1285,6 +1290,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
}
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ packets, bytes);
+
queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail,
@@ -2388,6 +2396,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Make newly initialized descriptor visible to hardware */
wmb();
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
+ skb->len);
spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
@@ -3023,6 +3033,7 @@ static int macb_close(struct net_device *dev)
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
napi_disable(&queue->napi_rx);
napi_disable(&queue->napi_tx);
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
phylink_stop(bp->phylink);
@@ -3073,7 +3084,7 @@ static void gem_update_stats(struct macb *bp)
unsigned int i, q, idx;
unsigned long *stat;
- u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u64 *p = &bp->hw_stats.gem.tx_octets;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
u32 offset = gem_statistics[i].offset;
@@ -3086,7 +3097,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(++p) += val;
+ *(p++) += ((u64)val) << 32;
}
}
@@ -3096,16 +3107,13 @@ static void gem_update_stats(struct macb *bp)
bp->ethtool_stats[idx++] = *stat;
}
-static struct net_device_stats *gem_get_stats(struct macb *bp)
+static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
- struct net_device_stats *nstat = &bp->dev->stats;
-
- if (!netif_running(bp->dev))
- return nstat;
spin_lock_irq(&bp->stats_lock);
- gem_update_stats(bp);
+ if (netif_running(bp->dev))
+ gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
hwstat->rx_alignment_errors +
@@ -3135,8 +3143,6 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
spin_unlock_irq(&bp->stats_lock);
-
- return nstat;
}
static void gem_get_ethtool_stats(struct net_device *dev,
@@ -3188,14 +3194,17 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
}
}
-static struct net_device_stats *macb_get_stats(struct net_device *dev)
+static void macb_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct macb *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
- if (macb_is_gem(bp))
- return gem_get_stats(bp);
+ netdev_stats_to_stats64(nstat, &bp->dev->stats);
+ if (macb_is_gem(bp)) {
+ gem_get_stats(bp, nstat);
+ return;
+ }
/* read stats from hardware */
spin_lock_irq(&bp->stats_lock);
@@ -3233,8 +3242,170 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
+ pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_ok;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
+ mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
+ mac_stats->FramesReceivedOK = hwstat->rx_ok;
+ mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_align_errors;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
+ mac_stats->LateCollisions = hwstat->tx_late_cols;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
+ mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
- return nstat;
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ mac_stats->FramesTransmittedOK = hwstat->tx_frames;
+ mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
+ mac_stats->MultipleCollisionFrames =
+ hwstat->tx_multiple_collision_frames;
+ mac_stats->FramesReceivedOK = hwstat->rx_frames;
+ mac_stats->FrameCheckSequenceErrors =
+ hwstat->rx_frame_check_sequence_errors;
+ mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
+ mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
+ mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
+ mac_stats->LateCollisions = hwstat->tx_late_collisions;
+ mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
+ mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
+ mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
+ mac_stats->OctetsReceivedOK = hwstat->rx_octets;
+ mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
+ mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
+ mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
+ mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
+ mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
+ mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+/* TODO: Report SQE test errors when added to phy_stats */
+static void macb_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void gem_get_eth_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static void macb_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ spin_lock_irq(&bp->stats_lock);
+ macb_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ spin_unlock_irq(&bp->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 16384 },
+ { },
+};
+
+static void gem_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct macb *bp = netdev_priv(dev);
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+
+ spin_lock_irq(&bp->stats_lock);
+ gem_update_stats(bp);
+ rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
+ rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
+ rmon_stats->jabbers = hwstat->rx_jabbers;
+ rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
+ rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
+ rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
+ rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
+ rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
+ rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
+ rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
+ rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
+ rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
+ rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
+ rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
+ rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
+ rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
+ rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
+ spin_unlock_irq(&bp->stats_lock);
+ *ranges = gem_rmon_ranges;
}
static int macb_get_regs_len(struct net_device *netdev)
@@ -3763,6 +3934,10 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_pause_stats = macb_get_pause_stats,
+ .get_eth_mac_stats = macb_get_eth_mac_stats,
+ .get_eth_phy_stats = macb_get_eth_phy_stats,
+ .get_rmon_stats = macb_get_rmon_stats,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link_ksettings = macb_get_link_ksettings,
@@ -3781,6 +3956,10 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_pause_stats = gem_get_pause_stats,
+ .get_eth_mac_stats = gem_get_eth_mac_stats,
+ .get_eth_phy_stats = gem_get_eth_phy_stats,
+ .get_rmon_stats = gem_get_rmon_stats,
.get_link_ksettings = macb_get_link_ksettings,
.set_link_ksettings = macb_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
@@ -3917,7 +4096,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_stop = macb_close,
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
@@ -4578,7 +4757,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_open = at91ether_open,
.ndo_stop = at91ether_close,
.ndo_start_xmit = at91ether_start_xmit,
- .ndo_get_stats = macb_get_stats,
+ .ndo_get_stats64 = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = macb_ioctl,
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 9ad49aea2673..ff8f2f9f9cae 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
- /* Wait for 100ms as Octeon resets. */
+ /* Wait for 100ms as Octeon resets */
mdelay(100);
if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
@@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
oct->octeon_id);
- /* restore the reset value*/
+ /* Restore the reset value */
octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
return 0;
@@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
oqticks_per_us /= 1024;
/* time_intr is in microseconds. The next 2 steps gives the oq ticks
- * corressponding to time_intr.
+ * corresponding to time_intr.
*/
oqticks_per_us *= time_intr_in_us;
oqticks_per_us /= 1000;
@@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
u64 reg_val;
u64 temp;
- /* programming SRN and TRS for each MAC(0..3) */
+ /* Programming SRN and TRS for each MAC(0..3) */
dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
__func__, mac_no);
- /* By default, mapping all 64 IOQs to a single MACs */
+ /* By default, map all 64 IOQs to a single MAC */
reg_val =
octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
@@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
temp = oct->sriov_info.max_vfs & 0xff;
reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
- /* write these settings to MAC register */
+ /* Write these settings to MAC register */
octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
reg_val);
@@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
srn = oct->sriov_info.pf_srn;
ern = srn + oct->sriov_info.num_pf_rings;
- /*As per HRM reg description, s/w cant write 0 to ENB. */
- /*to make the queue off, need to set the RST bit. */
+ /* As per HRM reg description, s/w can't write 0 to ENB. */
+ /* We need to set the RST bit, to turn the queue off. */
- /* Reset the Enable bit for all the 64 IQs. */
+ /* Reset the enable bit for all the 64 IQs. */
for (q_no = srn; q_no < ern; q_no++) {
/* set RST bit to 1. This bit applies to both IQ and OQ */
d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
}
- /*wait until the RST bit is clear or the RST and quite bits are set*/
+ /* Wait until the RST bit is clear or the RST and quiet bits are set */
for (q_no = srn; q_no < ern; q_no++) {
u64 reg_val = octeon_read_csr64(oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
if (cn23xx_reset_io_queues(oct))
return -1;
- /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
- * for all queues.Only PF can set these bits.
+ /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues. Only PF can set these bits.
* bits 29:30 indicate the MAC num.
* bits 32:47 indicate the PVF num.
*/
for (q_no = 0; q_no < ern; q_no++) {
reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
- /* for VF assigned queues. */
+ /* For VF assigned queues. */
if (q_no < oct->sriov_info.pf_srn) {
vf_num = q_no / oct->sriov_info.rings_per_vf;
vf_num += 1; /* VF1, VF2,........ */
@@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
reg_val);
}
- /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for
* pf queues
*/
for (q_no = srn; q_no < ern; q_no++) {
@@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
reg_val);
- /* Set WMARK level for triggering PI_INT */
+ /* Set WMARK level to trigger PI_INT */
/* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
CN23XX_PKT_IN_DONE_WMARK_MASK;
@@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/* set the ES bit */
reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
- /* write all the selected settings */
+ /* Write all the selected settings */
octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
/* Enabling these interrupt in oct->fn_list.enable_interrupt()
@@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
/** Setting the water mark level for pko back pressure **/
writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
- /** Disabling setting OQs in reset when ring has no dorebells
+ /* Disabling setting OQs in reset when ring has no doorbells
* enabling this will cause of head of line blocking
*/
/* Do it only for pass1.1. and pass1.2 */
@@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
CN23XX_SLI_GBL_CONTROL) | 0x2,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
- /** Enable channel-level backpressure */
+ /** Enable channel-level backpressure **/
if (oct->pf_num)
writeq(0xffffffffffffffffULL,
(u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
@@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
{
cn23xx_enable_error_reporting(oct);
- /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ /* Program the MAC(0..3)_RINFO before setting up input/output regs */
cn23xx_setup_global_mac_regs(oct);
if (cn23xx_pf_setup_global_input_regs(oct))
@@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
CN23XX_SLI_WINDOW_CTL_DEFAULT);
- /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */
octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
return 0;
}
@@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
- /*Mail Box Thread creation*/
+ /* Mail Box Thread creation */
INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
cn23xx_pf_mbox_thread);
mbox->mbox_poll_wk.ctxptr = (void *)mbox;
@@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
ern = srn + oct->num_iqs;
for (q_no = srn; q_no < ern; q_no++) {
- /* set the corresponding IQ IS_64B bit */
+ /* Set the corresponding IQ IS_64B bit */
if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
@@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
}
- /* set the corresponding IQ ENB bit */
+ /* Set the corresponding IQ ENB bit */
if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
/* IOQs are in reset by default in PEM2 mode,
* clearing reset bit
@@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
}
for (q_no = srn; q_no < ern; q_no++) {
u32 reg_val;
- /* set the corresponding OQ ENB bit */
+ /* Set the corresponding OQ ENB bit */
if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
reg_val = octeon_read_csr(
oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
@@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
for (q_no = srn; q_no < ern; q_no++) {
loop = HZ;
- /* start the Reset for a particular ring */
+ /* Start the Reset for a particular ring */
WRITE_ONCE(d64, octeon_read_csr64(
oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
WRITE_ONCE(d64, READ_ONCE(d64) &
@@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
loop = HZ;
/* Wait until hardware indicates that the particular IQ
- * is out of reset.It given that SLI_PKT_RING_RST is
+ * is out of reset. Given that SLI_PKT_RING_RST is
* common for both IQs and OQs
*/
WRITE_ONCE(d64, octeon_read_csr64(
@@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct)
schedule_timeout_uninterruptible(1);
}
- /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
WRITE_ONCE(d32, octeon_read_csr(
oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
@@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
return ret;
- /* Write count reg in sli_pkt_cnts to clear these int.*/
+ /* Write count reg in sli_pkt_cnts to clear these int. */
if ((pkts_sent & CN23XX_INTR_PO_INT) ||
(pkts_sent & CN23XX_INTR_PI_INT)) {
if (pkts_sent & CN23XX_INTR_PO_INT)
@@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
}
-/* always call with lock held */
+/* Always call with lock held */
static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx;
@@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
iq->pkt_in_done = pkt_in_done;
/* Modulo of the new index with the IQ size will give us
- * the new index. The iq->reset_instr_cnt is always zero for
+ * the new index. The iq->reset_instr_cnt is always zero for
* cn23xx, so no extra adjustments are needed.
*/
new_idx = (iq->octeon_read_index +
@@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
u64 intr_val = 0;
- /* Divide the single write to multiple writes based on the flag. */
- /* Enable Interrupt */
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupts */
if (intr_flag == OCTEON_ALL_INTR) {
writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
} else if (intr_flag & OCTEON_OUTPUT_INTR) {
@@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
ret = 0;
- /** Read Function Dependency Link reg to get the function number */
+ /* Read Function Dependency Link reg to get the function number */
if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
&fdl_bit) == 0) {
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
@@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
* In this case, read the PF number from the
* SLI_PKT0_INPUT_CONTROL reg (written by f/w)
*/
- pkt0_in_ctl = octeon_read_csr64(oct,
- CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pkt0_in_ctl =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0));
pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
- /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */
d64 = octeon_read_csr64(oct,
CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
@@ -1252,9 +1252,9 @@ int cn23xx_fw_loaded(struct octeon_device *oct)
u64 val;
/* If there's more than one active PF on this NIC, then that
- * implies that the NIC firmware is loaded and running. This check
+ * implies that the NIC firmware is loaded and running. This check
* prevents a rare false negative that might occur if we only relied
- * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
* happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
* though the firmware was already loaded but still booting and has yet
* to set SCR2_BIT_FW_LOADED.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f0b3e389e62..551c279dc14b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6538,26 +6538,6 @@ out_unlock:
mutex_unlock(&uld_mutex);
}
-static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- struct adapter *adap = netdev2adap(x->xso.dev);
- bool ret = false;
-
- if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
- return ret;
- }
- if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
- goto out_unlock;
-
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
-
-out_unlock:
- mutex_unlock(&uld_mutex);
- return ret;
-}
-
static void cxgb4_advance_esn_state(struct xfrm_state *x)
{
struct adapter *adap = netdev2adap(x->xso.dev);
@@ -6583,7 +6563,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
.xdo_dev_state_add = cxgb4_xfrm_add_state,
.xdo_dev_state_delete = cxgb4_xfrm_del_state,
.xdo_dev_state_free = cxgb4_xfrm_free_state,
- .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
.xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index c7338ac6a5bb..baba96883f48 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -71,7 +71,6 @@
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
@@ -85,7 +84,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
- .xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
@@ -323,20 +321,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index ad80c0fa96a6..96709875fe4f 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -6,5 +6,6 @@
config ENIC
tristate "Cisco VIC Ethernet NIC Support"
depends on PCI
+ select PAGE_POOL
help
This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index c3b6febfdbe4..a96b8332e6e2 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
- enic_ethtool.o enic_api.o enic_clsf.o
+ enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index 462c5435a206..bfb3f14e89f5 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -40,28 +40,7 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- const struct cq_desc *desc = desc_arg;
- const u8 type_color = desc->type_color;
-
- *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
- /*
- * Make sure color bit is read from desc *before* other fields
- * are read from desc. Hardware guarantees color bit is last
- * bit (byte) written. Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which would potentially
- * result in reading stale values.
- */
-
- rmb();
-
- *type = type_color & CQ_DESC_TYPE_MASK;
- *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
- *completed_index = le16_to_cpu(desc->completed_index) &
- CQ_DESC_COMP_NDX_MASK;
-}
+#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
+#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index d25426470a29..50787cff29db 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -17,12 +17,22 @@ struct cq_enet_wq_desc {
u8 type_color;
};
-static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-}
+/*
+ * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET
+ */
+#define VNIC_RQ_ALL (~0ULL)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16 0
+#define VNIC_RQ_CQ_ENTRY_SIZE_32 1
+#define VNIC_RQ_CQ_ENTRY_SIZE_64 2
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16)
+#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32)
+#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64)
+
+#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \
+ VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE)
/* Completion queue descriptor: Ethernet receive queue, 16B */
struct cq_enet_rq_desc {
@@ -36,6 +46,45 @@ struct cq_enet_rq_desc {
u8 type_color;
};
+/* Completion queue descriptor: Ethernet receive queue, 32B */
+struct cq_enet_rq_desc_32 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 type_color;
+};
+
+/* Completion queue descriptor: Ethernet receive queue, 64B */
+struct cq_enet_rq_desc_64 {
+ __le16 completed_index_flags;
+ __le16 q_number_rss_type_flags;
+ __le32 rss_hash;
+ __le16 bytes_written_flags;
+ __le16 vlan;
+ __le16 checksum_fcoe;
+ u8 flags;
+ u8 fetch_index_flags;
+ __le32 time_stamp;
+ __le16 time_stamp2;
+ __le16 pie_info;
+ __le32 pie_info2;
+ __le16 pie_info3;
+ u8 pie_info4;
+ u8 reserved[32];
+ u8 type_color;
+};
+
#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
@@ -88,85 +137,4 @@ struct cq_enet_rq_desc {
#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
-static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
- u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
- u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
- u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
- u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof,
- u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
- u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
- u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
-{
- u16 completed_index_flags;
- u16 q_number_rss_type_flags;
- u16 bytes_written_flags;
-
- cq_desc_dec((struct cq_desc *)desc, type,
- color, q_number, completed_index);
-
- completed_index_flags = le16_to_cpu(desc->completed_index_flags);
- q_number_rss_type_flags =
- le16_to_cpu(desc->q_number_rss_type_flags);
- bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
-
- *ingress_port = (completed_index_flags &
- CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
- *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
- 1 : 0;
- *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
- 1 : 0;
- *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
- 1 : 0;
-
- *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
- CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
- *csum_not_calc = (q_number_rss_type_flags &
- CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
-
- *rss_hash = le32_to_cpu(desc->rss_hash);
-
- *bytes_written = bytes_written_flags &
- CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
- *packet_error = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
- *vlan_stripped = (bytes_written_flags &
- CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
-
- /*
- * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
- */
- *vlan_tci = le16_to_cpu(desc->vlan);
-
- if (*fcoe) {
- *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
- CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
- *fcoe_fc_crc_ok = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
- *fcoe_enc_error = (desc->flags &
- CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
- CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
- CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
- *checksum = 0;
- } else {
- *fcoe_sof = 0;
- *fcoe_fc_crc_ok = 0;
- *fcoe_enc_error = 0;
- *fcoe_eof = 0;
- *checksum = le16_to_cpu(desc->checksum_fcoe);
- }
-
- *tcp_udp_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
- *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
- *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
- *ipv4_csum_ok =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
- *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
- *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
- *ipv4_fragment =
- (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
- *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
-}
-
#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 10b7e02ba4d0..9c12e967e9f1 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_nic.h"
#include "vnic_rss.h"
#include <linux/irq.h>
+#include <net/page_pool/helpers.h>
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
@@ -30,6 +31,13 @@
#define ENIC_AIC_LARGE_PKT_DIFF 3
+enum ext_cq {
+ ENIC_RQ_CQ_ENTRY_SIZE_16,
+ ENIC_RQ_CQ_ENTRY_SIZE_32,
+ ENIC_RQ_CQ_ENTRY_SIZE_64,
+ ENIC_RQ_CQ_ENTRY_SIZE_MAX,
+};
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ + 8];
@@ -75,6 +83,10 @@ struct enic_rx_coal {
#define ENIC_SET_INSTANCE (1 << 3)
#define ENIC_SET_HOST (1 << 4)
+#define MAX_TSO BIT(16)
+#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
struct enic_port_profile {
u32 set;
u8 request;
@@ -158,6 +170,7 @@ struct enic_rq_stats {
u64 pkt_truncated; /* truncated pkts */
u64 no_skb; /* out of skbs */
u64 desc_skip; /* Rx pkt went into later buffer */
+ u64 pp_alloc_fail; /* page pool alloc failure */
};
struct enic_wq {
@@ -169,6 +182,7 @@ struct enic_wq {
struct enic_rq {
struct vnic_rq vrq;
struct enic_rq_stats stats;
+ struct page_pool *pool;
} ____cacheline_aligned;
/* Per-instance private data structure */
@@ -223,9 +237,9 @@ struct enic {
unsigned int cq_avail;
unsigned int cq_count;
struct enic_rfs_flw_tbl rfs_h;
- u32 rx_copybreak;
u8 rss_key[ENIC_RSS_LEN];
struct vnic_gen_stats gen_stats;
+ enum ext_cq ext_cq;
};
static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
@@ -347,5 +361,6 @@ int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
void enic_set_ethtool_ops(struct net_device *netdev);
int __enic_set_rsskey(struct enic *enic);
+void enic_ext_cq(struct enic *enic);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index d607b4f0542c..529160926a96 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
- ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
+ ring->rx_max_pending = c->max_rq_ring;
ring->rx_pending = c->rq_desc_count;
- ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
+ ring->tx_max_pending = c->max_wq_ring;
ring->tx_pending = c->wq_desc_count;
}
@@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev,
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
- if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
+ if (ring->rx_pending > c->max_rq_ring ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
- ENIC_MAX_RQ_DESCS);
+ c->max_rq_ring);
return -EINVAL;
}
- if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
+ if (ring->tx_pending > c->max_wq_ring ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
- ENIC_MAX_WQ_DESCS);
+ c->max_wq_ring);
return -EINVAL;
}
if (running)
@@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int enic_get_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna, void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- *(u32 *)data = enic->rx_copybreak;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int enic_set_tunable(struct net_device *dev,
- const struct ethtool_tunable *tuna,
- const void *data)
-{
- struct enic *enic = netdev_priv(dev);
- int ret = 0;
-
- switch (tuna->id) {
- case ETHTOOL_RX_COPYBREAK:
- enic->rx_copybreak = *(u32 *)data;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
@@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
- .get_tunable = enic_get_tunable,
- .set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 49f6cab01ed5..54aa3953bf7b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -58,18 +58,15 @@
#include "enic_dev.h"
#include "enic_pp.h"
#include "enic_clsf.h"
+#include "enic_rq.h"
+#include "enic_wq.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO (1 << 16)
-#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
-#define RX_COPYBREAK_DEFAULT 256
-
/* Supported devices */
static const struct pci_device_id enic_id_table[] = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
@@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- if (buf->sop)
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_TO_DEVICE);
-
- if (buf->os_buf)
- dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
- struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(wq->vdev);
-
- enic->wq[wq->index].stats.cq_work++;
- enic->wq[wq->index].stats.cq_bytes += buf->len;
- enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- spin_lock(&enic->wq[q_number].lock);
-
- vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
- completed_index, enic_wq_free_buf,
- opaque);
-
- if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
- (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
- netif_wake_subqueue(enic->netdev, q_number);
- enic->wq[q_number].stats.wake++;
- }
-
- spin_unlock(&enic->wq[q_number].lock);
-
- return 0;
-}
-
static bool enic_log_q_error(struct enic *enic)
{
unsigned int i;
@@ -1313,243 +1262,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
-
- if (!buf->os_buf)
- return;
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(buf->os_buf);
- buf->os_buf = NULL;
-}
-
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
- unsigned int os_buf_index = 0;
- dma_addr_t dma_addr;
- struct vnic_rq_buf *buf = rq->to_use;
-
- if (buf->os_buf) {
- enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
- buf->len);
-
- return 0;
- }
- skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!skb) {
- enic->rq[rq->index].stats.no_skb++;
- return -ENOMEM;
- }
-
- dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
- DMA_FROM_DEVICE);
- if (unlikely(enic_dma_map_check(enic, dma_addr))) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- enic_queue_rq_desc(rq, skb, os_buf_index,
- dma_addr, len);
-
- return 0;
-}
-
-static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
- u32 pkt_len)
-{
- if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
- pkt_size->large_pkt_bytes_cnt += pkt_len;
- else
- pkt_size->small_pkt_bytes_cnt += pkt_len;
-}
-
-static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
- struct vnic_rq_buf *buf, u16 len)
-{
- struct enic *enic = netdev_priv(netdev);
- struct sk_buff *new_skb;
-
- if (len > enic->rx_copybreak)
- return false;
- new_skb = netdev_alloc_skb_ip_align(netdev, len);
- if (!new_skb)
- return false;
- dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, len);
- *skb = new_skb;
-
- return true;
-}
-
-static void enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct net_device *netdev = enic->netdev;
- struct sk_buff *skb;
- struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
- bool outer_csum_ok = true, encap = false;
-
- rqstats->packets++;
- if (skipped) {
- rqstats->desc_skip++;
- return;
- }
-
- skb = buf->os_buf;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- if (packet_error) {
-
- if (!fcs_ok) {
- if (bytes_written > 0)
- rqstats->bad_fcs++;
- else if (bytes_written == 0)
- rqstats->pkt_truncated++;
- }
-
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
-
- return;
- }
-
- if (eop && bytes_written > 0) {
-
- /* Good receive
- */
- rqstats->bytes += bytes_written;
- if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
- buf->os_buf = NULL;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
- buf->len, DMA_FROM_DEVICE);
- }
- prefetch(skb->data - NET_IP_ALIGN);
-
- skb_put(skb, bytes_written);
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, q_number);
- if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
- (type == 3)) {
- switch (rss_type) {
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
- rqstats->l4_rss_hash++;
- break;
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
- case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
- skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
- rqstats->l3_rss_hash++;
- break;
- }
- }
- if (enic->vxlan.vxlan_udp_port_number) {
- switch (enic->vxlan.patch_level) {
- case 0:
- if (fcoe) {
- encap = true;
- outer_csum_ok = fcoe_fc_crc_ok;
- }
- break;
- case 2:
- if ((type == 7) &&
- (rss_hash & BIT(0))) {
- encap = true;
- outer_csum_ok = (rss_hash & BIT(1)) &&
- (rss_hash & BIT(2));
- }
- break;
- }
- }
-
- /* Hardware does not provide whole packet checksum. It only
- * provides pseudo checksum. Since hw validates the packet
- * checksum but not provide us the checksum value. use
- * CHECSUM_UNNECESSARY.
- *
- * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
- * inner csum_ok. outer_csum_ok is set by hw when outer udp
- * csum is correct or is zero.
- */
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && outer_csum_ok &&
- (ipv4_csum_ok || ipv6)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = encap;
- if (encap)
- rqstats->csum_unnecessary_encap++;
- else
- rqstats->csum_unnecessary++;
- }
-
- if (vlan_stripped) {
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
- rqstats->vlan_stripped++;
- }
- skb_mark_napi_id(skb, &enic->napi[rq->index]);
- if (!(netdev->features & NETIF_F_GRO))
- netif_receive_skb(skb);
- else
- napi_gro_receive(&enic->napi[q_number], skb);
- if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_intr_update_pkt_size(&cq->pkt_size_counter,
- bytes_written);
- } else {
-
- /* Buffer overflow
- */
- rqstats->pkt_truncated++;
- dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
- DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
- buf->os_buf = NULL;
- }
-}
-
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
- return 0;
-}
-
static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
{
unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1620,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
- wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
- enic_wq_service, NULL);
+ wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do);
if (budget > 0)
- rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
- rq_work_to_do, enic_rq_service, NULL);
+ rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do);
/* Accumulate intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1724,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
wq_irq = wq->index;
cq = enic_cq_wq(enic, wq_irq);
intr = enic_msix_wq_intr(enic, wq_irq);
- wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
- enic_wq_service, NULL);
+
+ wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do);
vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
0 /* don't unmask intr */,
@@ -1754,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
if (budget > 0)
- work_done = vnic_cq_service(&enic->cq[cq],
- work_to_do, enic_rq_service, NULL);
+ work_done = enic_rq_cq_service(enic, cq, work_to_do);
/* Return intr event credits for this polling
* cycle. An intr event is the completion of a
@@ -1972,6 +1681,17 @@ static int enic_open(struct net_device *netdev)
struct enic *enic = netdev_priv(netdev);
unsigned int i;
int err, ret;
+ unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN;
+ struct page_pool_params pp_params = {
+ .order = get_order(max_pkt_len),
+ .pool_size = enic->config.rq_desc_count,
+ .nid = dev_to_node(&enic->pdev->dev),
+ .dev = &enic->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE,
+ .netdev = netdev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ };
err = enic_request_intr(enic);
if (err) {
@@ -1989,6 +1709,16 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* create a page pool for each RQ */
+ pp_params.napi = &enic->napi[i];
+ pp_params.queue_idx = i;
+ enic->rq[i].pool = page_pool_create(&pp_params);
+ if (IS_ERR(enic->rq[i].pool)) {
+ err = PTR_ERR(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ goto err_out_free_rq;
+ }
+
/* enable rq before updating rq desc */
vnic_rq_enable(&enic->rq[i].vrq);
vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
@@ -2029,8 +1759,11 @@ static int enic_open(struct net_device *netdev)
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
ret = vnic_rq_disable(&enic->rq[i].vrq);
- if (!ret)
+ if (!ret) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2088,8 +1821,11 @@ static int enic_stop(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
+ page_pool_destroy(enic->rq[i].pool);
+ enic->rq[i].pool = NULL;
+ }
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
@@ -2405,6 +2141,7 @@ static void enic_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2431,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
+ enic_ext_cq(enic);
enic_open(enic->netdev);
/* Allow infiniband to fiddle with the device again */
@@ -2599,6 +2337,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx,
rxs->hw_drop_overruns = rqstats->pkt_truncated;
rxs->csum_unnecessary = rqstats->csum_unnecessary +
rqstats->csum_unnecessary_encap;
+ rxs->alloc_fail = rqstats->pp_alloc_fail;
}
static void enic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -2626,6 +2365,7 @@ static void enic_get_base_stats(struct net_device *dev,
rxs->hw_drops = 0;
rxs->hw_drop_overruns = 0;
rxs->csum_unnecessary = 0;
+ rxs->alloc_fail = 0;
txs->bytes = 0;
txs->packets = 0;
txs->csum_none = 0;
@@ -2803,6 +2543,8 @@ static int enic_dev_init(struct enic *enic)
enic_get_res_counts(enic);
+ enic_ext_cq(enic);
+
err = enic_alloc_enic_resources(enic);
if (err) {
dev_err(dev, "Failed to allocate enic resources\n");
@@ -3179,7 +2921,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot register net device, aborting\n");
goto err_out_dev_deinit;
}
- enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 126125199833..bbd3143ed73e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(intr_timer_usec);
GET_CONFIG(loop_tag);
GET_CONFIG(num_arfs);
+ GET_CONFIG(max_rq_ring);
+ GET_CONFIG(max_wq_ring);
+ GET_CONFIG(max_cq_ring);
+
+ if (!c->max_wq_ring)
+ c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT;
+ if (!c->max_rq_ring)
+ c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT;
+ if (!c->max_cq_ring)
+ c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT;
c->wq_desc_count =
- min_t(u32, ENIC_MAX_WQ_DESCS,
- max_t(u32, ENIC_MIN_WQ_DESCS,
- c->wq_desc_count));
+ min_t(u32, c->max_wq_ring,
+ max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count));
c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
- min_t(u32, ENIC_MAX_RQ_DESCS,
- max_t(u32, ENIC_MIN_RQ_DESCS,
- c->rq_desc_count));
+ min_t(u32, c->max_rq_ring,
+ max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count));
c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
- c->mtu = min_t(u16, ENIC_MAX_MTU,
- max_t(u16, ENIC_MIN_MTU,
- c->mtu));
+ c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu));
c->intr_timer_usec = min_t(u32, c->intr_timer_usec,
vnic_dev_get_intr_coal_timer_max(enic->vdev));
dev_info(enic_get_dev(enic),
- "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
- enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
+ "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n",
+ enic->mac_addr, c->wq_desc_count, c->rq_desc_count,
+ c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"tso/lro %s/%s rss %s intr mode %s type %s timer %d usec "
@@ -312,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic)
int enic_alloc_vnic_resources(struct enic *enic)
{
enum vnic_dev_intr_mode intr_mode;
+ int rq_cq_desc_size;
unsigned int i;
int err;
@@ -326,6 +334,24 @@ int enic_alloc_vnic_resources(struct enic *enic)
intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
"unknown");
+ switch (enic->ext_cq) {
+ case ENIC_RQ_CQ_ENTRY_SIZE_16:
+ rq_cq_desc_size = 16;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_32:
+ rq_cq_desc_size = 32;
+ break;
+ case ENIC_RQ_CQ_ENTRY_SIZE_64:
+ rq_cq_desc_size = 64;
+ break;
+ default:
+ dev_err(enic_get_dev(enic),
+ "Unable to determine rq cq desc size: %d",
+ enic->ext_cq);
+ err = -ENODEV;
+ goto err_out;
+ }
+
/* Allocate queue resources
*/
@@ -348,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic)
for (i = 0; i < enic->cq_count; i++) {
if (i < enic->rq_count)
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
- enic->config.rq_desc_count,
- sizeof(struct cq_enet_rq_desc));
+ enic->config.rq_desc_count,
+ rq_cq_desc_size);
else
err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
enic->config.wq_desc_count,
@@ -380,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic)
err_out_cleanup:
enic_free_vnic_resources(enic);
-
+err_out:
return err;
}
+
+/*
+ * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support
+ * that command
+ */
+void enic_ext_cq(struct enic *enic)
+{
+ u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ spin_lock_bh(&enic->devcmd_lock);
+ ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (ret || a0) {
+ dev_info(&enic->pdev->dev,
+ "CMD_CQ_ENTRY_SIZE_SET not supported.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ goto out;
+ }
+ a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT;
+ enic->ext_cq = fls(a1) - 1;
+ a0 = VNIC_RQ_ALL;
+ a1 = enic->ext_cq;
+ ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait);
+ if (ret) {
+ dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed.");
+ enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16;
+ }
+out:
+ spin_unlock_bh(&enic->devcmd_lock);
+ dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes",
+ 16 << enic->ext_cq);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index b8ee42d297aa..02dca1ae4a22 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -12,10 +12,13 @@
#include "vnic_wq.h"
#include "vnic_rq.h"
-#define ENIC_MIN_WQ_DESCS 64
-#define ENIC_MAX_WQ_DESCS 4096
-#define ENIC_MIN_RQ_DESCS 64
-#define ENIC_MAX_RQ_DESCS 4096
+#define ENIC_MIN_WQ_DESCS 64
+#define ENIC_MAX_WQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_WQ_DESCS 16384
+#define ENIC_MIN_RQ_DESCS 64
+#define ENIC_MAX_RQ_DESCS 16384
+#define ENIC_MAX_RQ_DESCS_DEFAULT 4096
+#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024)
#define ENIC_MIN_MTU ETH_MIN_MTU
#define ENIC_MAX_MTU 9000
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c
new file mode 100644
index 000000000000..ccbf5c9a21d0
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2024 Cisco Systems, Inc. All rights reserved.
+
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include "enic.h"
+#include "enic_res.h"
+#include "enic_rq.h"
+#include "vnic_rq.h"
+#include "cq_enet_desc.h"
+
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (pkt_len > ENIC_LARGE_PKT_THRESHOLD)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
+static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type,
+ u8 *color, u16 *q_number, u16 *completed_index)
+{
+ /* type_color is the last field for all cq structs */
+ u8 type_color;
+
+ switch (cq_desc_size) {
+ case VNIC_RQ_CQ_ENTRY_SIZE_16: {
+ struct cq_enet_rq_desc *desc =
+ (struct cq_enet_rq_desc *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_32: {
+ struct cq_enet_rq_desc_32 *desc =
+ (struct cq_enet_rq_desc_32 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ case VNIC_RQ_CQ_ENTRY_SIZE_64: {
+ struct cq_enet_rq_desc_64 *desc =
+ (struct cq_enet_rq_desc_64 *)cq_desc;
+ type_color = desc->type_color;
+
+ /* Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *q_number = le16_to_cpu(desc->q_number_rss_type_flags) &
+ CQ_DESC_Q_NUM_MASK;
+ *completed_index = le16_to_cpu(desc->completed_index_flags) &
+ CQ_DESC_COMP_NDX_MASK;
+ *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) <<
+ CQ_DESC_COMP_NDX_BITS;
+ break;
+ }
+ }
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+ *type = type_color & CQ_DESC_TYPE_MASK;
+}
+
+static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash,
+ u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok,
+ u8 vlan_stripped, u8 csum_not_calc,
+ u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok,
+ u16 vlan_tci, struct sk_buff *skb)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+ bool outer_csum_ok = true, encap = false;
+
+ if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) {
+ switch (rss_type) {
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
+ rqstats->l4_rss_hash++;
+ break;
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
+ case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
+ skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
+ rqstats->l3_rss_hash++;
+ break;
+ }
+ }
+ if (enic->vxlan.vxlan_udp_port_number) {
+ switch (enic->vxlan.patch_level) {
+ case 0:
+ if (fcoe) {
+ encap = true;
+ outer_csum_ok = fcoe_fc_crc_ok;
+ }
+ break;
+ case 2:
+ if (type == 7 && (rss_hash & BIT(0))) {
+ encap = true;
+ outer_csum_ok = (rss_hash & BIT(1)) &&
+ (rss_hash & BIT(2));
+ }
+ break;
+ }
+ }
+
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ *
+ * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+ * inner csum_ok. outer_csum_ok is set by hw when outer udp
+ * csum is correct or is zero.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+ tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = encap;
+ if (encap)
+ rqstats->csum_unnecessary_encap++;
+ else
+ rqstats->csum_unnecessary++;
+ }
+
+ if (vlan_stripped) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+ rqstats->vlan_stripped++;
+ }
+}
+
+/*
+ * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which
+ * is identical for all type (16,32 and 64 byte) of cqs.
+ */
+static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port,
+ u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
+ u8 *csum_not_calc, u32 *rss_hash,
+ u16 *bytes_written, u8 *packet_error,
+ u8 *vlan_stripped, u16 *vlan_tci,
+ u16 *checksum, u8 *fcoe_sof,
+ u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error,
+ u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp,
+ u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4,
+ u8 *ipv4_fragment, u8 *fcs_ok)
+{
+ u16 completed_index_flags;
+ u16 q_number_rss_type_flags;
+ u16 bytes_written_flags;
+
+ completed_index_flags = le16_to_cpu(desc->completed_index_flags);
+ q_number_rss_type_flags =
+ le16_to_cpu(desc->q_number_rss_type_flags);
+ bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
+
+ *ingress_port = (completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
+ *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
+ 1 : 0;
+ *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
+ 1 : 0;
+ *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
+ 1 : 0;
+
+ *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
+ CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+ *csum_not_calc = (q_number_rss_type_flags &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
+
+ *rss_hash = le32_to_cpu(desc->rss_hash);
+
+ *bytes_written = bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ *packet_error = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
+ *vlan_stripped = (bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
+
+ /*
+ * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12)
+ */
+ *vlan_tci = le16_to_cpu(desc->vlan);
+
+ if (*fcoe) {
+ *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
+ CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
+ *fcoe_fc_crc_ok = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
+ *fcoe_enc_error = (desc->flags &
+ CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
+ CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
+ CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
+ *checksum = 0;
+ } else {
+ *fcoe_sof = 0;
+ *fcoe_fc_crc_ok = 0;
+ *fcoe_enc_error = 0;
+ *fcoe_eof = 0;
+ *checksum = le16_to_cpu(desc->checksum_fcoe);
+ }
+
+ *tcp_udp_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
+ *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
+ *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
+ *ipv4_csum_ok =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
+ *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
+ *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
+ *ipv4_fragment =
+ (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
+ *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
+}
+
+static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok,
+ u16 bytes_written)
+{
+ struct enic *enic = vnic_dev_priv(vrq->vdev);
+ struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats;
+
+ if (packet_error) {
+ if (!fcs_ok) {
+ if (bytes_written > 0)
+ rqstats->bad_fcs++;
+ else if (bytes_written == 0)
+ rqstats->pkt_truncated++;
+ }
+ return true;
+ }
+ return false;
+}
+
+int enic_rq_alloc_buf(struct vnic_rq *rq)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct net_device *netdev = enic->netdev;
+ struct enic_rq *erq = &enic->rq[rq->index];
+ struct enic_rq_stats *rqstats = &erq->stats;
+ unsigned int offset = 0;
+ unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
+ unsigned int os_buf_index = 0;
+ dma_addr_t dma_addr;
+ struct vnic_rq_buf *buf = rq->to_use;
+ struct page *page;
+ unsigned int truesize = len;
+
+ if (buf->os_buf) {
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
+
+ return 0;
+ }
+
+ page = page_pool_dev_alloc(erq->pool, &offset, &truesize);
+ if (unlikely(!page)) {
+ rqstats->pp_alloc_fail++;
+ return -ENOMEM;
+ }
+ buf->offset = offset;
+ buf->truesize = truesize;
+ dma_addr = page_pool_get_dma_addr(page) + offset;
+ enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len);
+
+ return 0;
+}
+
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct enic_rq *erq = &enic->rq[rq->index];
+
+ if (!buf->os_buf)
+ return;
+
+ page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true);
+ buf->os_buf = NULL;
+}
+
+static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq,
+ struct vnic_rq_buf *buf, void *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
+ struct napi_struct *napi;
+
+ u8 eop, sop, ingress_port, vlan_stripped;
+ u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
+ u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
+ u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
+ u8 packet_error;
+ u16 bytes_written, vlan_tci, checksum;
+ u32 rss_hash;
+
+ rqstats->packets++;
+
+ cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port,
+ &fcoe, &eop, &sop, &rss_type, &csum_not_calc,
+ &rss_hash, &bytes_written, &packet_error,
+ &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof,
+ &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof,
+ &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6,
+ &ipv4, &ipv4_fragment, &fcs_ok);
+
+ if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written))
+ return;
+
+ if (eop && bytes_written > 0) {
+ /* Good receive
+ */
+ rqstats->bytes += bytes_written;
+ napi = &enic->napi[rq->index];
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n",
+ enic->netdev->name, rq->index,
+ completed_index);
+ rqstats->no_skb++;
+ return;
+ }
+
+ prefetch(skb->data - NET_IP_ALIGN);
+
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr,
+ bytes_written, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ (struct page *)buf->os_buf, buf->offset,
+ bytes_written, buf->truesize);
+ skb_record_rx_queue(skb, q_number);
+ enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe,
+ fcoe_fc_crc_ok, vlan_stripped,
+ csum_not_calc, tcp_udp_csum_ok, ipv6,
+ ipv4_csum_ok, vlan_tci, skb);
+ skb_mark_for_recycle(skb);
+ napi_gro_frags(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
+ buf->os_buf = NULL;
+ buf->dma_addr = 0;
+ buf = buf->next;
+ } else {
+ /* Buffer overflow
+ */
+ rqstats->pkt_truncated++;
+ }
+}
+
+static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type,
+ u16 q_number, u16 completed_index)
+{
+ struct enic_rq_stats *rqstats = &enic->rq[q_number].stats;
+ struct vnic_rq *vrq = &enic->rq[q_number].vrq;
+ struct vnic_rq_buf *vrq_buf = vrq->to_clean;
+ int skipped;
+
+ while (1) {
+ skipped = (vrq_buf->index != completed_index);
+ if (!skipped)
+ enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type,
+ q_number, completed_index);
+ else
+ rqstats->desc_skip++;
+
+ vrq->ring.desc_avail++;
+ vrq->to_clean = vrq_buf->next;
+ vrq_buf = vrq_buf->next;
+ if (!skipped)
+ break;
+ }
+}
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ void *cq_desc = vnic_cq_to_clean(cq);
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ u8 type, color;
+
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number,
+ &completed_index);
+
+ while (color != cq->last_color) {
+ enic_rq_service(enic, cq_desc, type, q_number, completed_index);
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = vnic_cq_to_clean(cq);
+ enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h
new file mode 100644
index 000000000000..98476a7297af
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_rq.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2024 Cisco Systems, Inc. All rights reserved.
+ */
+
+unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
+int enic_rq_alloc_buf(struct vnic_rq *rq);
+void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644
index 000000000000..07936f8b4231
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc. All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+#define ENET_CQ_DESC_COMP_NDX_BITS 14
+#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0)
+
+static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq,
+ u8 *type, u8 *color, u16 *q_number,
+ u16 *completed_index)
+{
+ const struct cq_desc *desc = desc_arg;
+ const u8 type_color = desc->type_color;
+
+ *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+ /*
+ * Make sure color bit is read from desc *before* other fields
+ * are read from desc. Hardware guarantees color bit is last
+ * bit (byte) written. Adding the rmb() prevents the compiler
+ * and/or CPU from reordering the reads which would potentially
+ * result in reading stale values.
+ */
+ rmb();
+
+ *type = type_color & CQ_DESC_TYPE_MASK;
+ *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+
+ if (ext_wq)
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ ENET_CQ_DESC_COMP_NDX_MASK;
+ else
+ *completed_index = le16_to_cpu(desc->completed_index) &
+ CQ_DESC_COMP_NDX_MASK;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ if (buf->sop)
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
+
+ if (buf->os_buf)
+ dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+ struct vnic_wq_buf *buf, void *opaque)
+{
+ struct enic *enic = vnic_dev_priv(wq->vdev);
+
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
+ enic_free_wq_buf(wq, buf);
+}
+
+static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
+ u8 type, u16 q_number, u16 completed_index)
+{
+ struct enic *enic = vnic_dev_priv(vdev);
+
+ spin_lock(&enic->wq[q_number].lock);
+
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+ completed_index, enic_wq_free_buf, NULL);
+
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+ && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+ (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+ netif_wake_subqueue(enic->netdev, q_number);
+ enic->wq[q_number].stats.wake++;
+ }
+
+ spin_unlock(&enic->wq[q_number].lock);
+}
+
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do)
+{
+ struct vnic_cq *cq = &enic->cq[cq_index];
+ u16 q_number, completed_index;
+ unsigned int work_done = 0;
+ struct cq_desc *cq_desc;
+ u8 type, color;
+ bool ext_wq;
+
+ ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+
+ while (color != cq->last_color) {
+ enic_wq_service(cq->vdev, cq_desc, type, q_number,
+ completed_index);
+
+ vnic_cq_inc_to_clean(cq);
+
+ if (++work_done >= work_to_do)
+ break;
+
+ cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq);
+ enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color,
+ &q_number, &completed_index);
+ }
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644
index 000000000000..12acb3f2fbc9
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_wq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc. All rights reserved.
+ */
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index,
+ unsigned int work_to_do);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index eed5bf59e5d2..0e37f5d5e527 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -56,45 +56,18 @@ struct vnic_cq {
ktime_t prev_ts;
};
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
- unsigned int work_to_do,
- int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque),
- void *opaque)
+static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
{
- struct cq_desc *cq_desc;
- unsigned int work_done = 0;
- u16 q_number, completed_index;
- u8 type, color;
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
-
- while (color != cq->last_color) {
-
- if ((*q_service)(cq->vdev, cq_desc, type,
- q_number, completed_index, opaque))
- break;
-
- cq->to_clean++;
- if (cq->to_clean == cq->ring.desc_count) {
- cq->to_clean = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
-
- cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
- cq->ring.desc_size * cq->to_clean);
- cq_desc_dec(cq_desc, &type, &color,
- &q_number, &completed_index);
+ return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);
+}
- work_done++;
- if (work_done >= work_to_do)
- break;
+static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq)
+{
+ cq->to_clean++;
+ if (cq->to_clean == cq->ring.desc_count) {
+ cq->to_clean = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
}
-
- return work_done;
}
void vnic_cq_free(struct vnic_cq *cq);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index db56d778877a..605ef17f967e 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -436,6 +436,25 @@ enum vnic_devcmd_cmd {
* in: (u16) a2 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Set extended CQ field in MREGS of RQ (or all RQs)
+ * for given vNIC
+ * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs)
+ * (u32) a1 = CQ entry size
+ * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes
+ * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes
+ *
+ * Capability query:
+ * out: (u32) a0 = errno, 0:valid cmd
+ * (u32) a1 = value consisting of supported entries
+ * bit 0: 16 bytes
+ * bit 1: 32 bytes
+ * bit 2: 64 bytes
+ */
+ CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90),
+
};
/* CMD_ENABLE2 flags */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 5acc236069de..9e8e86262a3f 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -21,6 +21,11 @@ struct vnic_enet_config {
u16 loop_tag;
u16 vf_rq_count;
u16 num_arfs;
+ u8 reserved[66];
+ u32 max_rq_ring; // MAX RQ ring size
+ u32 max_wq_ring; // MAX WQ ring size
+ u32 max_cq_ring; // MAX CQ ring size
+ u32 rdma_rsvd_lkey; // Reserved (privileged) LKey
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0bc595abc03b..a1cdd729caec 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -50,7 +50,7 @@ struct vnic_rq_ctrl {
(VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384)
struct vnic_rq_buf {
struct vnic_rq_buf *next;
@@ -61,6 +61,8 @@ struct vnic_rq_buf {
unsigned int index;
void *desc;
uint64_t wr_id;
+ unsigned int offset;
+ unsigned int truesize;
};
enum enic_poll_state {
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 75c526911074..3bb4758100ba 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -62,7 +62,7 @@ struct vnic_wq_buf {
(VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
+#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384)
struct vnic_wq {
unsigned int index;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 991e3839858b..517a15904fb0 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -40,6 +40,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/gro.h>
#include "gemini.h"
@@ -1833,9 +1834,8 @@ static int gmac_open(struct net_device *netdev)
gmac_enable_tx_rx(netdev);
netif_tx_start_all_queues(netdev);
- hrtimer_init(&port->rx_coalesce_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- port->rx_coalesce_timer.function = &gmac_coalesce_delay_expired;
+ hrtimer_setup(&port->rx_coalesce_timer, &gmac_coalesce_delay_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
netdev_dbg(netdev, "opened\n");
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 27e01d780cd0..75eac18ff246 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev)
iowrite32(csr6, ioaddr + CSR6);
}
-#ifdef CONFIG_TULIP_MWI
static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
@@ -1251,7 +1250,6 @@ out:
netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
cache, csr0);
}
-#endif
/*
* Chips that have the MRM/reserved bit quirk and the burst quirk. That
@@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
-#ifdef CONFIG_TULIP_MWI
- if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+ if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 &&
+ (tp->flags & HAS_PCI_MWI))
tulip_mwi_config (pdev, dev);
-#endif
/* Stop the chip's Tx and Rx processes. */
tulip_stop_rxtx(tp);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 44af1d13d931..67275aa4f65b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -416,8 +416,7 @@ static int ec_bhf_open(struct net_device *net_dev)
netif_start_queue(net_dev);
- hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_setup(&priv->hrtimer, ec_bhf_timer_fun, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_start(&priv->hrtimer, polling_frequency, HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..51b8377edd1d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5038,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..3d2e21592119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 0d030cb0b21c..625245b0845c 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -221,20 +221,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev)
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
- int retval;
-
- retval = phy_loopback(adapter->phydev, enable);
+ int speed;
- /* PHY link state change is not signaled if loopback is enabled, it
- * would delay a working loopback anyway, let's ensure that loopback
- * is working immediately by setting link mode directly
- */
- if (!retval && enable) {
- netif_carrier_on(adapter->netdev);
- tsnep_set_link_mode(adapter);
+ if (enable) {
+ if (adapter->phydev->autoneg == AUTONEG_DISABLE &&
+ adapter->phydev->speed == SPEED_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_1000;
+ } else {
+ speed = 0;
}
- return retval;
+ return phy_loopback(adapter->phydev, enable, speed);
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
@@ -852,8 +851,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
- if (skb_shinfo(entry->skb)->tx_flags &
- SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (entry->skb->sk &&
+ READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f7c4ce8e9a26..a86cfebedaa8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1093,6 +1093,29 @@ static void fec_enet_enable_ring(struct net_device *ndev)
}
}
+/* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol)
+{
+ u32 val;
+
+ if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+ if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
+ ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
+ } else {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
+}
+
/*
* This function is called to start or restart the FEC during a link
* change, transmit timeout, or to reconfigure the FEC. The network
@@ -1109,17 +1132,7 @@ fec_restart(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
- ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
+ fec_ctrl_reset(fep, false);
/*
* enet-mac reset will reset mac address registers too,
@@ -1373,22 +1386,7 @@ fec_stop(struct net_device *ndev)
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
- /* Whack a reset. We should wait for this.
- * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
- * instead of reset MAC itself.
- */
- if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
- writel(0, fep->hwp + FEC_ECNTRL);
- } else {
- writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
- udelay(10);
- }
- } else {
- val = readl(fep->hwp + FEC_ECNTRL);
- val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
- writel(val, fep->hwp + FEC_ECNTRL);
- }
+ fec_ctrl_reset(fep, true);
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 7f6b57432071..876d90832596 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -30,7 +30,6 @@
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "fec.h"
@@ -739,8 +738,8 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
- hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
- fep->perout_timer.function = fec_ptp_pps_perout_handler;
+ hrtimer_setup(&fep->perout_timer, fec_ptp_pps_perout_handler, CLOCK_REALTIME,
+ HRTIMER_MODE_REL);
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index b3e2a596ad2c..51402dff72c5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
dtsec->pcs.ops = &dtsec_pcs_ops;
- dtsec->pcs.neg_mode = true;
dtsec->pcs.poll = true;
supported = mac_dev->phylink_config.supported_interfaces;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 435138f4699d..deb35b38c976 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1647,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
struct ethtool_keee edata;
- linkmode_set_bit_array(phy_10_100_features_array,
- ARRAY_SIZE(phy_10_100_features_array),
- mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
- linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
-
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1675,9 +1666,8 @@ static int init_phy(struct net_device *dev)
if (interface == PHY_INTERFACE_MODE_SGMII)
gfar_configure_serdes(dev);
- /* Remove any features not supported by the controller */
- linkmode_and(phydev->supported, phydev->supported, mask);
- linkmode_copy(phydev->advertising, phydev->supported);
+ if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
+ phy_set_max_speed(phydev, SPEED_100);
/* Add support for flow control */
phy_support_asym_pause(phydev);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 88510f822759..affd5a6c44e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3408,7 +3408,7 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which,
return 0;
}
-struct phylink_mac_ops ugeth_mac_ops = {
+static const struct phylink_mac_ops ugeth_mac_ops = {
.mac_link_up = ugeth_mac_link_up,
.mac_link_down = ugeth_mac_link_down,
.mac_config = ugeth_mac_config,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 38789faae706..84f92f6384e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -890,8 +890,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_CHANGE_TIME 2
/* Fast Ethernet (10/100 Mbps) */
#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 78d2a19593d1..2fab38c8ee78 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -59,6 +59,8 @@
#define GVE_MAX_RX_BUFFER_SIZE 4096
+#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
@@ -68,6 +70,9 @@
#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+#define GVE_RSS_KEY_SIZE 40
+#define GVE_RSS_INDIR_SIZE 128
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -102,7 +107,13 @@ struct gve_rx_desc_queue {
/* The page info for a single slot in the RX data queue */
struct gve_rx_slot_page_info {
- struct page *page;
+ /* netmem is used for DQO RDA mode
+ * page is used in all other modes
+ */
+ union {
+ struct page *page;
+ netmem_ref netmem;
+ };
void *page_address;
u32 page_offset; /* offset to write to in page */
unsigned int buf_size;
@@ -218,6 +229,11 @@ struct gve_rx_cnts {
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
+
+ u16 packet_buffer_size; /* Size of buffer posted to NIC */
+ u16 packet_buffer_truesize; /* Total size of RX buffer */
+ u16 rx_headroom;
+
union {
/* GQI fields */
struct {
@@ -226,7 +242,6 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
- u16 packet_buffer_size;
u32 qpl_copy_pool_mask;
u32 qpl_copy_pool_head;
@@ -604,8 +619,6 @@ struct gve_tx_ring {
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct xsk_buff_pool *xsk_pool;
- u32 xdp_xsk_wakeup;
- u32 xdp_xsk_done;
u64 xdp_xsk_sent;
u64 xdp_xmit;
u64 xdp_xmit_errors;
@@ -624,10 +637,18 @@ struct gve_notify_block {
u32 irq;
};
-/* Tracks allowed and current queue settings */
-struct gve_queue_config {
+/* Tracks allowed and current rx queue settings */
+struct gve_rx_queue_config {
+ u16 max_queues;
+ u16 num_queues;
+ u16 packet_buffer_size;
+};
+
+/* Tracks allowed and current tx queue settings */
+struct gve_tx_queue_config {
u16 max_queues;
- u16 num_queues; /* current */
+ u16 num_queues; /* number of TX queues, excluding XDP queues */
+ u16 num_xdp_queues;
};
/* Tracks the available and used qpl IDs */
@@ -651,11 +672,11 @@ struct gve_ptype_lut {
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
- struct gve_queue_config *qcfg;
+ struct gve_tx_queue_config *qcfg;
+
+ u16 num_xdp_rings;
u16 ring_size;
- u16 start_idx;
- u16 num_rings;
bool raw_addressing;
/* Allocated resources are returned here */
@@ -665,13 +686,15 @@ struct gve_tx_alloc_rings_cfg {
/* Parameters for allocating resources for rx queues */
struct gve_rx_alloc_rings_cfg {
/* tx config is also needed to determine QPL ids */
- struct gve_queue_config *qcfg;
- struct gve_queue_config *qcfg_tx;
+ struct gve_rx_queue_config *qcfg_rx;
+ struct gve_tx_queue_config *qcfg_tx;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
bool enable_header_split;
+ bool reset_rss;
+ bool xdp;
/* Allocated resources are returned here */
struct gve_rx_ring *rx;
@@ -722,6 +745,11 @@ struct gve_flow_rules_cache {
u32 rule_ids_cache_num;
};
+struct gve_rss_config {
+ u8 *hash_key;
+ u32 *hash_lut;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
@@ -751,9 +779,8 @@ struct gve_priv {
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
- u16 num_xdp_queues;
- struct gve_queue_config tx_cfg;
- struct gve_queue_config rx_cfg;
+ struct gve_tx_queue_config tx_cfg;
+ struct gve_rx_queue_config rx_cfg;
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -823,7 +850,6 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- u16 data_buffer_size_dqo;
u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
@@ -842,6 +868,8 @@ struct gve_priv {
u16 rss_key_size;
u16 rss_lut_size;
+ bool cache_rss_config;
+ struct gve_rss_config rss_config;
};
enum gve_service_task_flags_bit {
@@ -1024,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
}
/* Returns the number of tx queue page lists */
-static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
- int num_xdp_queues,
+static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg,
bool is_qpl)
{
if (!is_qpl)
return 0;
- return tx_cfg->num_queues + num_xdp_queues;
-}
-
-/* Returns the number of XDP tx queue page lists
- */
-static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return 0;
-
- return priv->num_xdp_queues;
+ return tx_cfg->num_queues + tx_cfg->num_xdp_queues;
}
/* Returns the number of rx queue page lists */
-static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg,
bool is_qpl)
{
if (!is_qpl)
@@ -1062,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg,
+ int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
@@ -1072,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
+static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
@@ -1103,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
}
static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
@@ -1207,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state);
int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx);
+ struct gve_rx_ring *rx,
+ bool xdp);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
@@ -1219,14 +1238,17 @@ int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config);
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss);
/* flow steering rule */
int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
int gve_flow_rules_reset(struct gve_priv *priv);
+/* RSS config */
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index aa7d723011d0..3e8fc33cc11f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
+ .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size),
};
if (gve_is_gqi(priv)) {
@@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->data.data_bus);
cmd->create_rx_queue.index = cpu_to_be32(queue_index);
cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
u32 qpl_id = 0;
@@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
cpu_to_be64(rx->dqo.complq.bus);
cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd->create_rx_queue.packet_buffer_size =
- cpu_to_be16(priv->data_buffer_size_dqo);
cmd->create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd->create_rx_queue.enable_rsc =
@@ -885,6 +883,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv,
priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
+static void gve_set_default_rss_sizes(struct gve_priv *priv)
+{
+ if (!gve_is_gqi(priv)) {
+ priv->rss_key_size = GVE_RSS_KEY_SIZE;
+ priv->rss_lut_size = GVE_RSS_INDIR_SIZE;
+ priv->cache_rss_config = true;
+ }
+}
+
static void gve_enable_supported_features(struct gve_priv *priv,
u32 supported_features_mask,
const struct gve_device_option_jumbo_frames
@@ -968,6 +975,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
be16_to_cpu(dev_op_rss_config->hash_key_size);
priv->rss_lut_size =
be16_to_cpu(dev_op_rss_config->hash_lut_size);
+ priv->cache_rss_config = false;
+ dev_dbg(&priv->pdev->dev,
+ "RSS device option enabled with key size of %u, lut size of %u.\n",
+ priv->rss_key_size, priv->rss_lut_size);
}
}
@@ -1052,6 +1063,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
+ gve_set_default_rss_sizes(priv);
+
/* DQO supports LRO. */
if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
@@ -1276,8 +1289,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv)
int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
{
+ const u32 *hash_lut_to_config = NULL;
+ const u8 *hash_key_to_config = NULL;
dma_addr_t lut_bus = 0, key_bus = 0;
- u16 key_size = 0, lut_size = 0;
union gve_adminq_command cmd;
__be32 *lut = NULL;
u8 hash_alg = 0;
@@ -1287,7 +1301,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
switch (rxfh->hfunc) {
case ETH_RSS_HASH_NO_CHANGE:
- break;
+ fallthrough;
case ETH_RSS_HASH_TOP:
hash_alg = ETH_RSS_HASH_TOP;
break;
@@ -1296,27 +1310,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
}
if (rxfh->indir) {
- lut_size = priv->rss_lut_size;
+ if (rxfh->indir_size != priv->rss_lut_size)
+ return -EINVAL;
+
+ hash_lut_to_config = rxfh->indir;
+ } else if (priv->cache_rss_config) {
+ hash_lut_to_config = priv->rss_config.hash_lut;
+ }
+
+ if (hash_lut_to_config) {
lut = dma_alloc_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
&lut_bus, GFP_KERNEL);
if (!lut)
return -ENOMEM;
for (i = 0; i < priv->rss_lut_size; i++)
- lut[i] = cpu_to_be32(rxfh->indir[i]);
+ lut[i] = cpu_to_be32(hash_lut_to_config[i]);
}
if (rxfh->key) {
- key_size = priv->rss_key_size;
+ if (rxfh->key_size != priv->rss_key_size) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ hash_key_to_config = rxfh->key;
+ } else if (priv->cache_rss_config) {
+ hash_key_to_config = priv->rss_config.hash_key;
+ }
+
+ if (hash_key_to_config) {
key = dma_alloc_coherent(&priv->pdev->dev,
- key_size, &key_bus, GFP_KERNEL);
+ priv->rss_key_size,
+ &key_bus, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out;
}
- memcpy(key, rxfh->key, key_size);
+ memcpy(key, hash_key_to_config, priv->rss_key_size);
}
/* Zero-valued fields in the cmd.configure_rss instruct the device to
@@ -1330,8 +1363,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
BIT(GVE_RSS_HASH_TCPV6) |
BIT(GVE_RSS_HASH_UDPV6)),
.hash_alg = hash_alg,
- .hash_key_size = cpu_to_be16(key_size),
- .hash_lut_size = cpu_to_be16(lut_size),
+ .hash_key_size =
+ cpu_to_be16((key_bus) ? priv->rss_key_size : 0),
+ .hash_lut_size =
+ cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0),
.hash_key_addr = cpu_to_be64(key_bus),
.hash_lut_addr = cpu_to_be64(lut_bus),
};
@@ -1341,11 +1376,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r
out:
if (lut)
dma_free_coherent(&priv->pdev->dev,
- lut_size * sizeof(*lut),
+ priv->rss_lut_size * sizeof(*lut),
lut, lut_bus);
if (key)
dma_free_coherent(&priv->pdev->dev,
- key_size, key, key_bus);
+ priv->rss_key_size, key, key_bus);
return err;
}
@@ -1449,12 +1484,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv,
rxfh->hfunc = descriptor->hash_alg;
rss_info_addr = (void *)(descriptor + 1);
- if (rxfh->key)
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
+ }
rss_info_addr += priv->rss_key_size;
lut = (__be32 *)rss_info_addr;
if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
for (i = 0; i < priv->rss_lut_size; i++)
rxfh->indir[i] = be32_to_cpu(lut[i]);
}
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 403f0f335ba6..a71883e1d920 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
buf_state->page_info.page_offset = 0;
buf_state->page_info.page_address =
page_address(buf_state->page_info.page);
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ buf_state->page_info.pad = rx->rx_headroom;
buf_state->last_single_ref_offset = 0;
/* The page already has 1 ref. */
@@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state)
void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const u16 data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = rx->packet_buffer_truesize;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -205,38 +206,40 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state,
bool allow_direct)
{
- struct page *page = buf_state->page_info.page;
+ netmem_ref netmem = buf_state->page_info.netmem;
- if (!page)
+ if (!netmem)
return;
- page_pool_put_full_page(page->pp, page, allow_direct);
- buf_state->page_info.page = NULL;
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
+ buf_state->page_info.netmem = 0;
}
static int gve_alloc_from_page_pool(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- struct gve_priv *priv = rx->gve;
- struct page *page;
+ netmem_ref netmem;
- buf_state->page_info.buf_size = priv->data_buffer_size_dqo;
- page = page_pool_alloc(rx->dqo.page_pool,
- &buf_state->page_info.page_offset,
- &buf_state->page_info.buf_size, GFP_ATOMIC);
+ buf_state->page_info.buf_size = rx->packet_buffer_truesize;
+ netmem = page_pool_alloc_netmem(rx->dqo.page_pool,
+ &buf_state->page_info.page_offset,
+ &buf_state->page_info.buf_size,
+ GFP_ATOMIC);
- if (!page)
+ if (!netmem)
return -ENOMEM;
- buf_state->page_info.page = page;
- buf_state->page_info.page_address = page_address(page);
- buf_state->addr = page_pool_get_dma_addr(page);
+ buf_state->page_info.netmem = netmem;
+ buf_state->page_info.page_address = netmem_address(netmem);
+ buf_state->addr = page_pool_get_dma_addr_netmem(netmem);
+ buf_state->page_info.pad = rx->dqo.page_pool->p.offset;
return 0;
}
struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
- struct gve_rx_ring *rx)
+ struct gve_rx_ring *rx,
+ bool xdp)
{
u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num);
struct page_pool_params pp = {
@@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.netdev = priv->dev,
.napi = &priv->ntfy_blocks[ntfy_id].napi,
.max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
+ .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
return page_pool_create(&pp);
@@ -269,7 +273,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
if (rx->dqo.page_pool) {
- buf_state->page_info.page = NULL;
+ buf_state->page_info.netmem = 0;
gve_free_buf_state(rx, buf_state);
} else {
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc)
}
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
- buf_state->page_info.page_offset);
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index bdfc6e77b2af..31a21ccf4863 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
"tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
- "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
+ "tx_dma_mapping_error[%u]",
+ "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = value;
}
}
- /* XDP xsk counters */
- data[i++] = tx->xdp_xsk_wakeup;
- data[i++] = tx->xdp_xsk_done;
+ /* XDP counters */
do {
start = u64_stats_fetch_begin(&priv->tx[ring].statss);
data[i] = tx->xdp_xsk_sent;
@@ -477,11 +475,12 @@ static int gve_set_channels(struct net_device *netdev,
struct ethtool_channels *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_queue_config new_tx_cfg = priv->tx_cfg;
- struct gve_queue_config new_rx_cfg = priv->rx_cfg;
+ struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg;
+ struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg;
struct ethtool_channels old_settings;
int new_tx = cmd->tx_count;
int new_rx = cmd->rx_count;
+ bool reset_rss = false;
gve_get_channels(netdev, &old_settings);
@@ -492,22 +491,27 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
- if (priv->num_xdp_queues &&
- (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
- dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
- return -EINVAL;
- }
+ if (priv->xdp_prog) {
+ if (new_tx != new_rx ||
+ (2 * new_tx > priv->tx_cfg.max_queues)) {
+ dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed");
+ return -EINVAL;
+ }
- if (!netif_running(netdev)) {
- priv->tx_cfg.num_queues = new_tx;
- priv->rx_cfg.num_queues = new_rx;
- return 0;
+ /* One XDP TX queue per RX queue. */
+ new_tx_cfg.num_xdp_queues = new_rx;
+ } else {
+ new_tx_cfg.num_xdp_queues = 0;
}
+ if (new_rx != priv->rx_cfg.num_queues &&
+ priv->cache_rss_config && !netif_is_rxfh_configured(netdev))
+ reset_rss = true;
+
new_tx_cfg.num_queues = new_tx;
new_rx_cfg.num_queues = new_rx;
- return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
+ return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss);
}
static void gve_get_ringparam(struct net_device *netdev,
@@ -643,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK:
{
- u32 max_copybreak = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
+ u32 max_copybreak = priv->rx_cfg.packet_buffer_size;
len = *(u32 *)value;
if (len > max_copybreak)
@@ -855,6 +858,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev)
return priv->rss_lut_size;
}
+static void gve_get_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->key) {
+ rxfh->key_size = priv->rss_key_size;
+ memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size);
+ }
+
+ if (rxfh->indir) {
+ rxfh->indir_size = priv->rss_lut_size;
+ memcpy(rxfh->indir, rss_config->hash_lut,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+ }
+}
+
static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -862,18 +884,46 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
+ if (priv->cache_rss_config) {
+ gve_get_rss_config_cache(priv, rxfh);
+ return 0;
+ }
+
return gve_adminq_query_rss_config(priv, rxfh);
}
+static void gve_set_rss_config_cache(struct gve_priv *priv,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (rxfh->key)
+ memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size);
+
+ if (rxfh->indir)
+ memcpy(rss_config->hash_lut, rxfh->indir,
+ priv->rss_lut_size * sizeof(*rxfh->indir));
+}
+
static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int err;
if (!priv->rss_key_size || !priv->rss_lut_size)
return -EOPNOTSUPP;
- return gve_adminq_configure_rss(priv, rxfh);
+ err = gve_adminq_configure_rss(priv, rxfh);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config");
+ return err;
+ }
+
+ if (priv->cache_rss_config)
+ gve_set_rss_config_cache(priv, rxfh);
+
+ return 0;
}
const struct ethtool_ops gve_ethtool_ops = {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 92237fb0b60c..cb2f9978f45e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -184,6 +184,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv)
flow_rules_cache->rules_cache = NULL;
}
+static int gve_alloc_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ rss_config->hash_key = kcalloc(priv->rss_key_size,
+ sizeof(rss_config->hash_key[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_key)
+ return -ENOMEM;
+
+ rss_config->hash_lut = kcalloc(priv->rss_lut_size,
+ sizeof(rss_config->hash_lut[0]),
+ GFP_KERNEL);
+ if (!rss_config->hash_lut)
+ goto free_rss_key_cache;
+
+ return 0;
+
+free_rss_key_cache:
+ kfree(rss_config->hash_key);
+ rss_config->hash_key = NULL;
+ return -ENOMEM;
+}
+
+static void gve_free_rss_config_cache(struct gve_priv *priv)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+
+ kfree(rss_config->hash_key);
+ kfree(rss_config->hash_lut);
+
+ memset(rss_config, 0, sizeof(*rss_config));
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -575,9 +612,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_rss_config_cache(priv);
if (err)
goto abort_with_flow_rule_caches;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_rss_config_cache;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -611,6 +651,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
}
}
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ dev_err(&priv->pdev->dev, "Failed to init RSS config");
+ goto abort_with_ptype_lut;
+ }
+
err = gve_adminq_report_stats(priv, priv->stats_report_len,
priv->stats_report_bus,
GVE_STATS_REPORT_TIMER_PERIOD);
@@ -629,6 +675,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_rss_config_cache:
+ gve_free_rss_config_cache(priv);
abort_with_flow_rule_caches:
gve_free_flow_rule_caches(priv);
@@ -669,6 +717,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
priv->ptype_lut_dqo = NULL;
gve_free_flow_rule_caches(priv);
+ gve_free_rss_config_cache(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
@@ -746,30 +795,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx
return rx->dqo.qpl;
}
-static int gve_register_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean up */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -787,30 +819,13 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_unregister_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int err;
- int i;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
- /* This failure will trigger a reset - no need to clean */
- if (err)
- return err;
- }
- return 0;
-}
-
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
int err;
int i;
- num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
- gve_is_qpl(priv));
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv));
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
@@ -829,27 +844,6 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return 0;
}
-static int gve_create_xdp_rings(struct gve_priv *priv)
-{
- int err;
-
- err = gve_adminq_create_tx_queues(priv,
- gve_xdp_tx_start_queue_id(priv),
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
- priv->num_xdp_queues);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
- priv->num_xdp_queues);
-
- return 0;
-}
-
static int gve_create_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -905,7 +899,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv)
int i;
/* Init stats */
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -930,24 +924,21 @@ static void gve_init_sync_stats(struct gve_priv *priv)
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
- int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
-
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
- cfg->start_idx = 0;
- cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
+ cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
-static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->tx)
return;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
@@ -955,12 +946,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings
}
}
-static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
- int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- for (i = start_id; i < start_id + num_rings; i++) {
+ for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
@@ -968,28 +958,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
}
}
-static int gve_alloc_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
- int err = 0;
-
- if (!priv->num_xdp_queues)
- return 0;
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- err = gve_tx_alloc_rings_gqi(priv, &cfg);
- if (err)
- return err;
-
- gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
- init_xdp_sync_stats(priv);
-
- return 0;
-}
-
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
@@ -1020,26 +988,6 @@ free_tx:
return err;
}
-static int gve_destroy_xdp_rings(struct gve_priv *priv)
-{
- int start_id;
- int err;
-
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_adminq_destroy_tx_queues(priv,
- start_id,
- priv->num_xdp_queues);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy XDP queues\n");
- /* This failure will trigger a reset - no need to clean up */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
-
- return 0;
-}
-
static int gve_destroy_rings(struct gve_priv *priv)
{
int num_tx_queues = gve_num_tx_queues(priv);
@@ -1064,20 +1012,6 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_free_xdp_rings(struct gve_priv *priv)
-{
- struct gve_tx_alloc_rings_cfg cfg = {0};
-
- gve_tx_get_curr_alloc_cfg(priv, &cfg);
- cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
- cfg.num_rings = priv->num_xdp_queues;
-
- if (priv->tx) {
- gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
- gve_tx_free_rings_gqi(priv, &cfg);
- }
-}
-
static void gve_queues_mem_free(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
@@ -1204,7 +1138,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
int i, j;
u32 tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues)
return 0;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1215,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
napi->napi_id);
if (err)
goto err;
- err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
- MEM_TYPE_PAGE_SHARED, NULL);
+ if (gve_is_qpl(priv))
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ else
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->dqo.page_pool);
if (err)
goto err;
rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
@@ -1234,7 +1174,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
}
@@ -1255,7 +1195,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
{
int i, tx_qid;
- if (!priv->num_xdp_queues)
+ if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx)
return;
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
@@ -1268,7 +1208,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
}
}
- for (i = 0; i < priv->num_xdp_queues; i++) {
+ for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) {
tx_qid = gve_xdp_tx_queue_id(priv, i);
priv->tx[tx_qid].xsk_pool = NULL;
}
@@ -1285,15 +1225,14 @@ static void gve_drain_page_cache(struct gve_priv *priv)
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
- cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_rx = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
- cfg->packet_buffer_size = gve_is_gqi(priv) ?
- GVE_DEFAULT_RX_BUFFER_SIZE :
- priv->data_buffer_size_dqo;
+ cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
+ cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
}
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
@@ -1366,17 +1305,13 @@ static int gve_queues_start(struct gve_priv *priv,
/* Record new configs into priv */
priv->tx_cfg = *tx_alloc_cfg->qcfg;
- priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
- if (priv->xdp_prog)
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
- else
- priv->num_xdp_queues = 0;
-
- gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
- gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_tx_start_rings(priv, gve_num_tx_queues(priv));
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
@@ -1390,12 +1325,18 @@ static int gve_queues_start(struct gve_priv *priv,
if (err)
goto stop_and_free_rings;
+ if (rx_alloc_cfg->reset_rss) {
+ err = gve_init_rss_config(priv, priv->rx_cfg.num_queues);
+ if (err)
+ goto reset;
+ }
+
err = gve_register_qpls(priv);
if (err)
goto reset;
priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
- priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+ priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size;
err = gve_create_rings(priv);
if (err)
@@ -1422,7 +1363,7 @@ reset:
/* return the original error */
return err;
stop_and_free_rings:
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
gve_queues_mem_remove(priv);
return err;
@@ -1471,7 +1412,7 @@ static int gve_queues_stop(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
- gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_tx_stop_rings(priv, gve_num_tx_queues(priv));
gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
priv->interface_down_cnt++;
@@ -1501,56 +1442,6 @@ static int gve_close(struct net_device *dev)
return 0;
}
-static int gve_remove_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- err = gve_destroy_xdp_rings(priv);
- if (err)
- return err;
-
- err = gve_unregister_xdp_qpls(priv);
- if (err)
- return err;
-
- gve_unreg_xdp_info(priv);
- gve_free_xdp_rings(priv);
-
- priv->num_xdp_queues = 0;
- return 0;
-}
-
-static int gve_add_xdp_queues(struct gve_priv *priv)
-{
- int err;
-
- priv->num_xdp_queues = priv->rx_cfg.num_queues;
-
- err = gve_alloc_xdp_rings(priv);
- if (err)
- goto err;
-
- err = gve_reg_xdp_info(priv, priv->dev);
- if (err)
- goto free_xdp_rings;
-
- err = gve_register_xdp_qpls(priv);
- if (err)
- goto free_xdp_rings;
-
- err = gve_create_xdp_rings(priv);
- if (err)
- goto free_xdp_rings;
-
- return 0;
-
-free_xdp_rings:
- gve_free_xdp_rings(priv);
-err:
- priv->num_xdp_queues = 0;
- return err;
-}
-
static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
{
if (!gve_get_napi_enabled(priv))
@@ -1568,6 +1459,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
}
}
+static int gve_configure_rings_xdp(struct gve_priv *priv,
+ u16 num_xdp_rings)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ tx_alloc_cfg.num_xdp_rings = num_xdp_rings;
+
+ rx_alloc_cfg.xdp = !!num_xdp_rings;
+ return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+}
+
static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
@@ -1580,29 +1484,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
+
+ /* Update priv XDP queue configuration */
+ priv->tx_cfg.num_xdp_queues = priv->xdp_prog ?
+ priv->rx_cfg.num_queues : 0;
return 0;
}
- gve_turndown(priv);
- if (!old_prog && prog) {
- // Allocate XDP TX queues if an XDP program is
- // being installed
- err = gve_add_xdp_queues(priv);
- if (err)
- goto out;
- } else if (old_prog && !prog) {
- // Remove XDP TX queues if an XDP program is
- // being uninstalled
- err = gve_remove_xdp_queues(priv);
- if (err)
- goto out;
- }
+ if (!old_prog && prog)
+ err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues);
+ else if (old_prog && !prog)
+ err = gve_configure_rings_xdp(priv, 0);
+
+ if (err)
+ goto out;
+
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
out:
- gve_turnup(priv);
status = ioread32be(&priv->reg_bar0->device_status);
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
return err;
@@ -1736,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
static int verify_xdp_configuration(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
+ u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
netdev_warn(dev, "XDP is not supported when LRO is on.\n");
@@ -1748,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr);
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT)
+ max_xdp_mtu -= GVE_RX_PAD;
+
+ if (dev->mtu > max_xdp_mtu) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1786,6 +1692,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+int gve_init_rss_config(struct gve_priv *priv, u16 num_queues)
+{
+ struct gve_rss_config *rss_config = &priv->rss_config;
+ struct ethtool_rxfh_param rxfh = {0};
+ u16 i;
+
+ if (!priv->cache_rss_config)
+ return 0;
+
+ for (i = 0; i < priv->rss_lut_size; i++)
+ rss_config->hash_lut[i] =
+ ethtool_rxfh_indir_default(i, num_queues);
+
+ netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size);
+
+ rxfh.hfunc = ETH_RSS_HASH_TOP;
+
+ return gve_adminq_configure_rss(priv, &rxfh);
+}
+
int gve_flow_rules_reset(struct gve_priv *priv)
{
if (!priv->max_flow_rules)
@@ -1833,12 +1759,12 @@ int gve_adjust_config(struct gve_priv *priv,
}
int gve_adjust_queues(struct gve_priv *priv,
- struct gve_queue_config new_rx_config,
- struct gve_queue_config new_tx_config)
+ struct gve_rx_queue_config new_rx_config,
+ struct gve_tx_queue_config new_tx_config,
+ bool reset_rss)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- int num_xdp_queues;
int err;
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
@@ -1846,18 +1772,19 @@ int gve_adjust_queues(struct gve_priv *priv,
/* Relay the new config from ethtool */
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- rx_alloc_cfg.qcfg = &new_rx_config;
- tx_alloc_cfg.num_rings = new_tx_config.num_queues;
-
- /* Add dedicated XDP TX queues if enabled. */
- num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
- tx_alloc_cfg.num_rings += num_xdp_queues;
+ rx_alloc_cfg.qcfg_rx = &new_rx_config;
+ rx_alloc_cfg.reset_rss = reset_rss;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
+ if (reset_rss) {
+ err = gve_init_rss_config(priv, new_rx_config.num_queues);
+ if (err)
+ return err;
+ }
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
@@ -1886,7 +1813,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx,
NETDEV_QUEUE_TYPE_TX, NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -1897,7 +1824,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
NULL);
- napi_disable(&block->napi);
+ napi_disable_locked(&block->napi);
}
/* Stop tx queues */
@@ -1927,7 +1854,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_tx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
if (idx < priv->tx_cfg.num_queues)
netif_queue_set_napi(priv->dev, idx,
@@ -1955,7 +1882,7 @@ static void gve_turnup(struct gve_priv *priv)
if (!gve_rx_was_added_to_block(priv, idx))
continue;
- napi_enable(&block->napi);
+ napi_enable_locked(&block->napi);
netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX,
&block->napi);
@@ -1974,7 +1901,7 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
- if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv))
xdp_features_set_redirect_target(priv->dev, false);
gve_set_napi_enabled(priv);
@@ -2330,6 +2257,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
priv->rx_cfg.num_queues);
}
+ priv->tx_cfg.num_xdp_queues = 0;
dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
@@ -2710,7 +2638,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
@@ -2805,6 +2733,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
priv->suspend_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
/* If the dev was up, attempt to close, if close fails, reset */
gve_reset_and_teardown(priv, was_up);
@@ -2813,6 +2742,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
gve_teardown_priv_resources(priv);
}
priv->up_before_suspend = was_up;
+ netdev_unlock(netdev);
rtnl_unlock();
return 0;
}
@@ -2825,7 +2755,9 @@ static int gve_resume(struct pci_dev *pdev)
priv->resume_cnt++;
rtnl_lock();
+ netdev_lock(netdev);
err = gve_reset_recovery(priv, priv->up_before_suspend);
+ netdev_unlock(netdev);
rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index acb73d4d0de6..90e875c1832f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
- dma_addr_t addr, struct page *page, __be64 *slot_addr)
+static void gve_setup_rx_buffer(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ dma_addr_t addr, struct page *page,
+ __be64 *slot_addr)
{
page_info->page = page;
page_info->page_offset = 0;
page_info->page_address = page_address(page);
+ page_info->buf_size = rx->packet_buffer_size;
*slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
@@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return err;
}
- gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
+ gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr);
return 0;
}
@@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
- gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
+ gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr,
+ page,
&rx->data.data_ring[i].qpl_offset);
continue;
}
@@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
rx->qpl_copy_pool[j].page = page;
rx->qpl_copy_pool[j].page_offset = 0;
rx->qpl_copy_pool[j].page_address = page_address(page);
+ rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
@@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
rx->mask = slots - 1;
rx->data.raw_addressing = cfg->raw_addressing;
@@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->db_threshold = slots / 2;
gve_rx_init_ring_state_gqi(rx);
- rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
return 0;
@@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
@@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
copy_page_info->pad = page_info->pad;
skb = gve_rx_add_frags(napi, copy_page_info,
- rx->packet_buffer_size, len, ctx);
+ copy_page_info->buf_size, len, ctx);
if (unlikely(!skb))
return NULL;
@@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* device.
*/
if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
+ skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
+ len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
@@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
- rx->packet_buffer_size, ctx);
+ page_info->buf_size, ctx);
} else {
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
@@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq);
xdp_prepare_buff(&xdp, page_info->page_address +
page_info->page_offset, GVE_RX_PAD,
len, false);
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f0674a443567..dcb0545baa50 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -114,7 +114,8 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
if (!gve_rx_was_added_to_block(priv, idx))
return;
- page_pool_disable_direct_recycling(rx->dqo.page_pool);
+ if (rx->dqo.page_pool)
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
@@ -223,6 +224,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
+ rx->packet_buffer_size = cfg->packet_buffer_size;
+
+ if (cfg->xdp) {
+ rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO;
+ rx->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rx->packet_buffer_truesize = rx->packet_buffer_size;
+ rx->rx_headroom = 0;
+ }
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
@@ -253,7 +263,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (cfg->raw_addressing) {
- pool = gve_rx_create_page_pool(priv, rx);
+ pool = gve_rx_create_page_pool(priv, rx, cfg->xdp);
if (IS_ERR(pool))
goto err;
@@ -299,12 +309,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
- for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -333,7 +343,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv,
if (!rx)
return;
- for (i = 0; i < cfg->qcfg->num_queues; i++)
+ for (i = 0; i < cfg->qcfg_rx->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
@@ -476,6 +486,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx,
return 0;
}
+static void gve_skb_add_rx_frag(struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state,
+ int num_frags, u16 buf_len)
+{
+ if (rx->dqo.page_pool) {
+ skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.netmem,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ } else {
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
+ buf_state->page_info.page,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad, buf_len,
+ buf_state->page_info.buf_size);
+ }
+}
+
/* Chains multi skbs for single rx packet.
* Returns 0 if buffer is appended, -1 otherwise.
*/
@@ -513,14 +542,34 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (gve_rx_should_trigger_copy_ondemand(rx))
return gve_rx_copy_ondemand(rx, buf_state, buf_len);
- skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
- buf_state->page_info.page,
- buf_state->page_info.page_offset,
- buf_len, buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
}
+static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act,
+ struct gve_rx_buf_state_dqo *buf_state)
+{
+ u64_stats_update_begin(&rx->statss);
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ rx->xdp_actions[xdp_act]++;
+ break;
+ case XDP_TX:
+ rx->xdp_tx_errors++;
+ break;
+ case XDP_REDIRECT:
+ rx->xdp_redirect_errors++;
+ break;
+ }
+ u64_stats_update_end(&rx->statss);
+ gve_free_buffer(rx, buf_state);
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -535,6 +584,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
+ struct bpf_prog *xprog;
u16 buf_len;
u16 hdr_len;
@@ -561,7 +611,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
- prefetch(buf_state->page_info.page);
+ if (rx->dqo.page_pool) {
+ if (!netmem_is_net_iov(buf_state->page_info.netmem))
+ prefetch(netmem_to_page(buf_state->page_info.netmem));
+ } else {
+ prefetch(buf_state->page_info.page);
+ }
/* Copy the header into the skb in the case of header split */
if (hsplit) {
@@ -590,7 +645,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset,
+ buf_state->page_info.page_offset +
+ buf_state->page_info.pad,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
@@ -602,6 +658,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog) {
+ struct xdp_buff xdp;
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp,
+ buf_state->page_info.page_address +
+ buf_state->page_info.page_offset,
+ buf_state->page_info.pad,
+ buf_len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ buf_state->page_info.pad += xdp.data - old_data;
+ buf_len = xdp.data_end - xdp.data;
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ buf_state);
+ return 0;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
+ }
+
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
@@ -632,9 +716,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
- skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
- buf_state->page_info.page_offset, buf_len,
- buf_state->page_info.buf_size);
+ gve_skb_add_rx_frag(rx, buf_state, 0, buf_len);
gve_reuse_buffer(rx, buf_state);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4350ebd9c2bd..1b40bf0c811a 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -334,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -370,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -384,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* gve_tx_avail - Calculates the number of slots available in the ring
@@ -844,7 +837,7 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
return -ENETDOWN;
qid = gve_xdp_tx_queue_id(priv,
- smp_processor_id() % priv->num_xdp_queues);
+ smp_processor_id() % priv->tx_cfg.num_xdp_queues);
tx = &priv->tx[qid];
@@ -959,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
spin_lock(&tx->xdp_lock);
while (sent < budget) {
- if (!gve_can_tx(tx, GVE_TX_START_THRESH))
- goto out;
-
- if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
- tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH) ||
+ !xsk_tx_peek_desc(tx->xsk_pool, &desc))
goto out;
- }
data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 394debc62268..2eba868d8037 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct gve_tx_ring *tx = cfg->tx;
+ int total_queues;
int err = 0;
int i, j;
- if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings;
+ if (total_queues > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
- if (cfg->start_idx == 0) {
- tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
- GFP_KERNEL);
- if (!tx)
- return -ENOMEM;
- } else if (!tx) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc tx rings from a nonzero start idx without tx array\n");
- return -EINVAL;
- }
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ for (i = 0; i < total_queues; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
err:
for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, &tx[j], cfg);
- if (cfg->start_idx == 0)
- kvfree(tx);
+ kvfree(tx);
return err;
}
@@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
if (!tx)
return;
- for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++)
gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- if (cfg->start_idx == 0) {
- kvfree(tx);
- cfg->tx = NULL;
- }
+ kvfree(tx);
+ cfg->tx = NULL;
}
/* Returns the number of slots available in the ring */
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 30fef100257e..ace9b8698021 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll);
- netif_napi_set_irq(&block->napi, block->irq);
+ netif_napi_add_locked(priv->dev, &block->napi, gve_poll);
+ netif_napi_set_irq_locked(&block->napi, block->irq);
}
void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_del(&block->napi);
+ netif_napi_del_locked(&block->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
index 7ea15f9ef849..1a9da564b306 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile
+++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_HIBMCGE) += hibmcge.o
hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \
- hbg_debugfs.o hbg_err.o
+ hbg_debugfs.o hbg_err.o hbg_diagnose.o
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
index b4300d8ea4ad..f8cdab62bf85 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h
@@ -36,6 +36,8 @@ enum hbg_nic_state {
HBG_NIC_STATE_EVENT_HANDLING = 0,
HBG_NIC_STATE_RESETTING,
HBG_NIC_STATE_RESET_FAIL,
+ HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */
+ HBG_NIC_STATE_NP_LINK_FAIL,
};
enum hbg_reset_type {
@@ -81,6 +83,7 @@ enum hbg_hw_event_type {
HBG_HW_EVENT_NONE = 0,
HBG_HW_EVENT_INIT, /* driver is loading */
HBG_HW_EVENT_RESET,
+ HBG_HW_EVENT_CORE_RESET,
};
struct hbg_dev_specs {
@@ -104,6 +107,7 @@ struct hbg_irq_info {
u32 mask;
bool re_enable;
bool need_print;
+ bool need_reset;
u64 count;
void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info);
@@ -142,6 +146,118 @@ struct hbg_user_def {
struct ethtool_pauseparam pause_param;
};
+struct hbg_stats {
+ u64 rx_desc_drop;
+ u64 rx_desc_l2_err_cnt;
+ u64 rx_desc_pkt_len_err_cnt;
+ u64 rx_desc_l3l4_err_cnt;
+ u64 rx_desc_l3_wrong_head_cnt;
+ u64 rx_desc_l3_csum_err_cnt;
+ u64 rx_desc_l3_len_err_cnt;
+ u64 rx_desc_l3_zero_ttl_cnt;
+ u64 rx_desc_l3_other_cnt;
+ u64 rx_desc_l4_err_cnt;
+ u64 rx_desc_l4_wrong_head_cnt;
+ u64 rx_desc_l4_len_err_cnt;
+ u64 rx_desc_l4_csum_err_cnt;
+ u64 rx_desc_l4_zero_port_num_cnt;
+ u64 rx_desc_l4_other_cnt;
+ u64 rx_desc_frag_cnt;
+ u64 rx_desc_ip_ver_err_cnt;
+ u64 rx_desc_ipv4_pkt_cnt;
+ u64 rx_desc_ipv6_pkt_cnt;
+ u64 rx_desc_no_ip_pkt_cnt;
+ u64 rx_desc_ip_pkt_cnt;
+ u64 rx_desc_tcp_pkt_cnt;
+ u64 rx_desc_udp_pkt_cnt;
+ u64 rx_desc_vlan_pkt_cnt;
+ u64 rx_desc_icmp_pkt_cnt;
+ u64 rx_desc_arp_pkt_cnt;
+ u64 rx_desc_rarp_pkt_cnt;
+ u64 rx_desc_multicast_pkt_cnt;
+ u64 rx_desc_broadcast_pkt_cnt;
+ u64 rx_desc_ipsec_pkt_cnt;
+ u64 rx_desc_ip_opt_pkt_cnt;
+ u64 rx_desc_key_not_match_cnt;
+
+ u64 rx_octets_total_ok_cnt;
+ u64 rx_uc_pkt_cnt;
+ u64 rx_mc_pkt_cnt;
+ u64 rx_bc_pkt_cnt;
+ u64 rx_vlan_pkt_cnt;
+ u64 rx_octets_bad_cnt;
+ u64 rx_octets_total_filt_cnt;
+ u64 rx_filt_pkt_cnt;
+ u64 rx_trans_pkt_cnt;
+ u64 rx_framesize_64;
+ u64 rx_framesize_65_127;
+ u64 rx_framesize_128_255;
+ u64 rx_framesize_256_511;
+ u64 rx_framesize_512_1023;
+ u64 rx_framesize_1024_1518;
+ u64 rx_framesize_bt_1518;
+ u64 rx_fcs_error_cnt;
+ u64 rx_data_error_cnt;
+ u64 rx_align_error_cnt;
+ u64 rx_pause_macctl_frame_cnt;
+ u64 rx_unknown_macctl_frame_cnt;
+ /* crc ok, > max_frm_size, < 2max_frm_size */
+ u64 rx_frame_long_err_cnt;
+ /* crc fail, > max_frm_size, < 2max_frm_size */
+ u64 rx_jabber_err_cnt;
+ /* > 2max_frm_size */
+ u64 rx_frame_very_long_err_cnt;
+ /* < 64byte, >= short_runts_thr */
+ u64 rx_frame_runt_err_cnt;
+ /* < short_runts_thr */
+ u64 rx_frame_short_err_cnt;
+ /* PCU: dropped when the RX FIFO is full.*/
+ u64 rx_overflow_cnt;
+ /* GMAC: the count of overflows of the RX FIFO */
+ u64 rx_overrun_cnt;
+ /* PCU: the count of buffer alloc errors in RX */
+ u64 rx_bufrq_err_cnt;
+ /* PCU: the count of write descriptor errors in RX */
+ u64 rx_we_err_cnt;
+ /* GMAC: the count of pkts that contain PAD but length is not 64 */
+ u64 rx_lengthfield_err_cnt;
+ u64 rx_fail_comma_cnt;
+
+ u64 rx_dma_err_cnt;
+ u64 rx_fifo_less_empty_thrsld_cnt;
+
+ u64 tx_octets_total_ok_cnt;
+ u64 tx_uc_pkt_cnt;
+ u64 tx_mc_pkt_cnt;
+ u64 tx_bc_pkt_cnt;
+ u64 tx_vlan_pkt_cnt;
+ u64 tx_octets_bad_cnt;
+ u64 tx_trans_pkt_cnt;
+ u64 tx_pause_frame_cnt;
+ u64 tx_framesize_64;
+ u64 tx_framesize_65_127;
+ u64 tx_framesize_128_255;
+ u64 tx_framesize_256_511;
+ u64 tx_framesize_512_1023;
+ u64 tx_framesize_1024_1518;
+ u64 tx_framesize_bt_1518;
+ /* GMAC: the count of times that frames fail to be transmitted
+ * due to internal errors.
+ */
+ u64 tx_underrun_err_cnt;
+ u64 tx_add_cs_fail_cnt;
+ /* PCU: the count of buffer free errors in TX */
+ u64 tx_bufrl_err_cnt;
+ u64 tx_crc_err_cnt;
+ u64 tx_drop_cnt;
+ u64 tx_excessive_length_drop_cnt;
+
+ u64 tx_timeout_cnt;
+ u64 tx_dma_err_cnt;
+
+ u64 np_link_fail_cnt;
+};
+
struct hbg_priv {
struct net_device *netdev;
struct pci_dev *pdev;
@@ -155,6 +271,12 @@ struct hbg_priv {
struct hbg_mac_filter filter;
enum hbg_reset_type reset_type;
struct hbg_user_def user_def;
+ struct hbg_stats stats;
+ unsigned long last_update_stats_time;
+ struct delayed_work service_task;
};
+void hbg_err_reset_task_schedule(struct hbg_priv *priv);
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
index 8473c43d171a..5e0ba4d5b08d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c
@@ -67,10 +67,11 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused)
for (i = 0; i < priv->vectors.info_array_len; i++) {
info = &priv->vectors.info_array[i];
seq_printf(s,
- "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n",
+ "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n",
info->name,
str_true_false(hbg_hw_irq_is_enabled(priv,
info->mask)),
+ str_true_false(info->need_reset),
str_true_false(info->need_print),
info->count);
}
@@ -114,6 +115,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused)
state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL));
seq_printf(s, "last reset type: %s\n",
reset_type_str[priv->reset_type]);
+ seq_printf(s, "need reset state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET));
+ seq_printf(s, "np_link fail state: %s\n",
+ state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL));
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
new file mode 100644
index 000000000000..d61c03f34ff0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2025 Hisilicon Limited.
+
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include "hbg_common.h"
+#include "hbg_ethtool.h"
+#include "hbg_hw.h"
+#include "hbg_diagnose.h"
+
+#define HBG_MSG_DATA_MAX_NUM 64
+
+struct hbg_diagnose_message {
+ u32 opcode;
+ u32 status;
+ u32 data_num;
+ struct hbg_priv *priv;
+
+ u32 data[HBG_MSG_DATA_MAX_NUM];
+};
+
+#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000)
+#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000)
+
+enum hbg_push_cmd {
+ HBG_PUSH_CMD_IRQ = 0,
+ HBG_PUSH_CMD_STATS,
+ HBG_PUSH_CMD_LINK,
+};
+
+struct hbg_push_stats_info {
+ /* id is used to match the name of the current stats item.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u64 offset;
+};
+
+struct hbg_push_irq_info {
+ /* id is used to match the name of the current irq.
+ * and is used for pretty print on BMC
+ */
+ u32 id;
+ u32 mask;
+};
+
+#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B}
+static const struct hbg_push_irq_info hbg_push_irq_list[] = {
+ HBG_PUSH_IRQ_I(RX, 0),
+ HBG_PUSH_IRQ_I(TX, 1),
+ HBG_PUSH_IRQ_I(TX_PKT_CPL, 2),
+ HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3),
+ HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4),
+ HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5),
+ HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6),
+ HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7),
+ HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8),
+ HBG_PUSH_IRQ_I(TX_AHB_ERR, 9),
+ HBG_PUSH_IRQ_I(RX_BUF_AVL, 10),
+ HBG_PUSH_IRQ_I(REL_BUF_ERR, 11),
+ HBG_PUSH_IRQ_I(TXCFG_AVL, 12),
+ HBG_PUSH_IRQ_I(TX_DROP, 13),
+ HBG_PUSH_IRQ_I(RX_DROP, 14),
+ HBG_PUSH_IRQ_I(RX_AHB_ERR, 15),
+ HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16),
+ HBG_PUSH_IRQ_I(RBREQ_ERR, 17),
+ HBG_PUSH_IRQ_I(WE_ERR, 18),
+};
+
+#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)}
+static const struct hbg_push_stats_info hbg_push_stats_list[] = {
+ HBG_PUSH_STATS_I(rx_desc_drop, 0),
+ HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1),
+ HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2),
+ HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3),
+ HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4),
+ HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5),
+ HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6),
+ HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7),
+ HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8),
+ HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9),
+ HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10),
+ HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11),
+ HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12),
+ HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13),
+ HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14),
+ HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15),
+ HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16),
+ HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17),
+ HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18),
+ HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19),
+ HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20),
+ HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21),
+ HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22),
+ HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23),
+ HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24),
+ HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25),
+ HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26),
+ HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27),
+ HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28),
+ HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29),
+ HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30),
+ HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31),
+ HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32),
+ HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33),
+ HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34),
+ HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35),
+ HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36),
+ HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37),
+ HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38),
+ HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39),
+ HBG_PUSH_STATS_I(rx_framesize_64, 40),
+ HBG_PUSH_STATS_I(rx_framesize_65_127, 41),
+ HBG_PUSH_STATS_I(rx_framesize_128_255, 42),
+ HBG_PUSH_STATS_I(rx_framesize_256_511, 43),
+ HBG_PUSH_STATS_I(rx_framesize_512_1023, 44),
+ HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45),
+ HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46),
+ HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47),
+ HBG_PUSH_STATS_I(rx_data_error_cnt, 48),
+ HBG_PUSH_STATS_I(rx_align_error_cnt, 49),
+ HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50),
+ HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51),
+ HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52),
+ HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53),
+ HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54),
+ HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55),
+ HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56),
+ HBG_PUSH_STATS_I(rx_overflow_cnt, 57),
+ HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58),
+ HBG_PUSH_STATS_I(rx_we_err_cnt, 59),
+ HBG_PUSH_STATS_I(rx_overrun_cnt, 60),
+ HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61),
+ HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62),
+ HBG_PUSH_STATS_I(rx_dma_err_cnt, 63),
+ HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64),
+ HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65),
+ HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66),
+ HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67),
+ HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68),
+ HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69),
+ HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70),
+ HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71),
+ HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72),
+ HBG_PUSH_STATS_I(tx_framesize_64, 73),
+ HBG_PUSH_STATS_I(tx_framesize_65_127, 74),
+ HBG_PUSH_STATS_I(tx_framesize_128_255, 75),
+ HBG_PUSH_STATS_I(tx_framesize_256_511, 76),
+ HBG_PUSH_STATS_I(tx_framesize_512_1023, 77),
+ HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78),
+ HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79),
+ HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80),
+ HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81),
+ HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82),
+ HBG_PUSH_STATS_I(tx_crc_err_cnt, 83),
+ HBG_PUSH_STATS_I(tx_drop_cnt, 84),
+ HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85),
+ HBG_PUSH_STATS_I(tx_dma_err_cnt, 86),
+};
+
+static int hbg_push_msg_send(struct hbg_priv *priv,
+ struct hbg_diagnose_message *msg)
+{
+ u32 header = 0;
+ u32 i;
+
+ if (msg->data_num == 0)
+ return 0;
+
+ for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++)
+ hbg_reg_write(priv,
+ HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32),
+ msg->data[i]);
+
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num);
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT);
+
+ /* start status */
+ hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1);
+
+ /* write header msg to start push */
+ hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header);
+
+ /* wait done */
+ readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header,
+ !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header),
+ HBG_HW_PUSH_WAIT_INTERVAL_US,
+ HBG_HW_PUSH_WAIT_TIMEOUT_US);
+
+ msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header);
+ return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header);
+}
+
+static int hbg_push_data(struct hbg_priv *priv,
+ u32 opcode, u32 *data, u32 data_num)
+{
+ struct hbg_diagnose_message msg = {0};
+ u32 data_left_num;
+ u32 i, j;
+ int ret;
+
+ msg.priv = priv;
+ msg.opcode = opcode;
+ for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) {
+ if (i * HBG_MSG_DATA_MAX_NUM >= data_num)
+ break;
+
+ data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM;
+ for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++)
+ msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j];
+
+ msg.data_num = j;
+ ret = hbg_push_msg_send(priv, &msg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode,
+ u64 *data, u32 data_num)
+{
+ /* The length of u64 is twice that of u32,
+ * the data_num must be multiplied by 2.
+ */
+ return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2);
+}
+
+static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask)
+{
+ u32 i = 0;
+
+ for (i = 0; i < vectors->info_array_len; i++)
+ if (vectors->info_array[i].mask == mask)
+ return vectors->info_array[i].count;
+
+ return 0;
+}
+
+static int hbg_push_irq_cnt(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2;
+ struct hbg_vector *vectors = &priv->vectors;
+ const struct hbg_push_irq_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_irq_list[j++];
+ data[i] = info->id;
+ data[i + 1] = hbg_get_irq_stats(vectors, info->mask);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+static int hbg_push_link_status(struct hbg_priv *priv)
+{
+ u32 link_status[2];
+
+ /* phy link status */
+ link_status[0] = priv->mac.phydev->link;
+ /* mac link status */
+ link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B);
+
+ return hbg_push_data(priv, HBG_PUSH_CMD_LINK,
+ link_status, ARRAY_SIZE(link_status));
+}
+
+static int hbg_push_stats(struct hbg_priv *priv)
+{
+ /* An id needs to be added for each data.
+ * Therefore, the data_num must be multiplied by 2.
+ */
+ u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2;
+ struct hbg_stats *stats = &priv->stats;
+ const struct hbg_push_stats_info *info;
+ u32 i, j = 0;
+ u64 *data;
+ int ret;
+
+ data = kcalloc(data_num, sizeof(u64), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* An id needs to be added for each data.
+ * So i + 2 for each loop.
+ */
+ for (i = 0; i < data_num; i += 2) {
+ info = &hbg_push_stats_list[j++];
+ data[i] = info->id;
+ data[i + 1] = HBG_STATS_R(stats, info->offset);
+ }
+
+ ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num);
+ kfree(data);
+ return ret;
+}
+
+void hbg_diagnose_message_push(struct hbg_priv *priv)
+{
+ int ret;
+
+ if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ /* only 1 is the right value */
+ if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1)
+ return;
+
+ ret = hbg_push_irq_cnt(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push irq cnt, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_link_status(priv);
+ if (ret) {
+ dev_err(&priv->pdev->dev,
+ "failed to push link status, ret = %d\n", ret);
+ goto push_done;
+ }
+
+ ret = hbg_push_stats(priv);
+ if (ret)
+ dev_err(&priv->pdev->dev,
+ "failed to push stats, ret = %d\n", ret);
+
+push_done:
+ hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0);
+}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
new file mode 100644
index 000000000000..ba04c6d8c03d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2025 Hisilicon Limited. */
+
+#ifndef __HBG_DIAGNOSE_H
+#define __HBG_DIAGNOSE_H
+
+#include "hbg_common.h"
+
+void hbg_diagnose_message_push(struct hbg_priv *priv);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 4d1f4a33391a..4e8cb66f601c 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -105,6 +105,62 @@ int hbg_reset(struct hbg_priv *priv)
return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION);
}
+void hbg_err_reset(struct hbg_priv *priv)
+{
+ bool running;
+
+ rtnl_lock();
+ running = netif_running(priv->netdev);
+ if (running)
+ dev_close(priv->netdev);
+
+ hbg_reset(priv);
+
+ /* in hbg_pci_err_detected(), we will detach first,
+ * so we need to attach before open
+ */
+ if (!netif_device_present(priv->netdev))
+ netif_device_attach(priv->netdev);
+
+ if (running)
+ dev_open(priv->netdev, NULL);
+ rtnl_unlock();
+}
+
+static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct hbg_priv *priv = netdev_priv(netdev);
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "failed to re-enable PCI device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ hbg_err_reset(priv);
+ netif_device_attach(netdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -124,6 +180,8 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
}
static const struct pci_error_handlers hbg_pci_err_handler = {
+ .error_detected = hbg_pci_err_detected,
+ .slot_reset = hbg_pci_err_slot_reset,
.reset_prepare = hbg_pci_err_reset_prepare,
.reset_done = hbg_pci_err_reset_done,
};
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
index d7828e446308..fb9fbe7004e8 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h
@@ -9,5 +9,6 @@
void hbg_set_pci_err_handler(struct pci_driver *pdrv);
int hbg_reset(struct hbg_priv *priv);
int hbg_rebuild(struct hbg_priv *priv);
+void hbg_err_reset(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
index 00364a438ec2..8f1107b85fbb 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c
@@ -9,6 +9,136 @@
#include "hbg_ethtool.h"
#include "hbg_hw.h"
+struct hbg_ethtool_stats {
+ char name[ETH_GSTRING_LEN];
+ unsigned long offset;
+ u32 reg; /* set to 0 if stats is not updated via dump reg */
+};
+
+#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0}
+#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg}
+
+static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = {
+ HBG_STATS_I(rx_desc_l2_err_cnt),
+ HBG_STATS_I(rx_desc_pkt_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l3_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l3_len_err_cnt),
+ HBG_STATS_I(rx_desc_l3_zero_ttl_cnt),
+ HBG_STATS_I(rx_desc_l3_other_cnt),
+ HBG_STATS_I(rx_desc_l4_wrong_head_cnt),
+ HBG_STATS_I(rx_desc_l4_len_err_cnt),
+ HBG_STATS_I(rx_desc_l4_csum_err_cnt),
+ HBG_STATS_I(rx_desc_l4_zero_port_num_cnt),
+ HBG_STATS_I(rx_desc_l4_other_cnt),
+ HBG_STATS_I(rx_desc_ip_ver_err_cnt),
+ HBG_STATS_I(rx_desc_ipv4_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipv6_pkt_cnt),
+ HBG_STATS_I(rx_desc_no_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_pkt_cnt),
+ HBG_STATS_I(rx_desc_tcp_pkt_cnt),
+ HBG_STATS_I(rx_desc_udp_pkt_cnt),
+ HBG_STATS_I(rx_desc_vlan_pkt_cnt),
+ HBG_STATS_I(rx_desc_icmp_pkt_cnt),
+ HBG_STATS_I(rx_desc_arp_pkt_cnt),
+ HBG_STATS_I(rx_desc_rarp_pkt_cnt),
+ HBG_STATS_I(rx_desc_multicast_pkt_cnt),
+ HBG_STATS_I(rx_desc_broadcast_pkt_cnt),
+ HBG_STATS_I(rx_desc_ipsec_pkt_cnt),
+ HBG_STATS_I(rx_desc_ip_opt_pkt_cnt),
+ HBG_STATS_I(rx_desc_key_not_match_cnt),
+
+ HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_filt_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR),
+ HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR),
+ HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR),
+ HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR),
+ HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_frame_very_long_err_cnt,
+ HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR),
+ HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR),
+ HBG_STATS_REG_I(rx_lengthfield_err_cnt,
+ HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR),
+ HBG_STATS_I(rx_dma_err_cnt),
+ HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt),
+
+ HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR),
+ HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR),
+
+ HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR),
+ HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR),
+ HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR),
+ HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR),
+ HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR),
+ HBG_STATS_REG_I(tx_excessive_length_drop_cnt,
+ HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR),
+ HBG_STATS_I(tx_dma_err_cnt),
+ HBG_STATS_I(tx_timeout_cnt),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = {
+ HBG_STATS_I(rx_desc_frag_cnt),
+ HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_65_127,
+ HBG_REG_RX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_128_255,
+ HBG_REG_RX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_256_511,
+ HBG_REG_RX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_512_1023,
+ HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_1024_1518,
+ HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(rx_framesize_bt_1518,
+ HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_65_127,
+ HBG_REG_TX_PKTS_65TO127OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_128_255,
+ HBG_REG_TX_PKTS_128TO255OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_256_511,
+ HBG_REG_TX_PKTS_256TO511OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_512_1023,
+ HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_1024_1518,
+ HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR),
+ HBG_STATS_REG_I(tx_framesize_bt_1518,
+ HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = {
+ HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR),
+ HBG_STATS_REG_I(rx_octets_total_ok_cnt,
+ HBG_REG_RX_OCTETS_TOTAL_OK_ADDR),
+ HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR),
+ HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR),
+ HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR),
+ HBG_STATS_REG_I(tx_octets_total_ok_cnt,
+ HBG_REG_OCTETS_TRANSMITTED_OK_ADDR),
+ HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR),
+};
+
+static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = {
+ HBG_STATS_REG_I(rx_pause_macctl_frame_cnt,
+ HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR),
+ HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR),
+ HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt,
+ HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR),
+};
+
enum hbg_reg_dump_type {
HBG_DUMP_REG_TYPE_SPEC = 0,
HBG_DUMP_REG_TYPE_MDIO,
@@ -180,6 +310,167 @@ static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags)
return hbg_reset(priv);
}
+static void hbg_update_stats_by_info(struct hbg_priv *priv,
+ const struct hbg_ethtool_stats *info,
+ u32 info_len)
+{
+ const struct hbg_ethtool_stats *stats;
+ u32 i;
+
+ for (i = 0; i < info_len; i++) {
+ stats = &info[i];
+ if (!stats->reg)
+ continue;
+
+ HBG_STATS_U(&priv->stats, stats->offset,
+ hbg_reg_read(priv, stats->reg));
+ }
+}
+
+void hbg_update_stats(struct hbg_priv *priv)
+{
+ hbg_update_stats_by_info(priv, hbg_ethtool_stats_info,
+ ARRAY_SIZE(hbg_ethtool_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info,
+ ARRAY_SIZE(hbg_ethtool_rmon_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info,
+ ARRAY_SIZE(hbg_ethtool_mac_stats_info));
+ hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info,
+ ARRAY_SIZE(hbg_ethtool_ctrl_stats_info));
+}
+
+static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset)
+{
+ if (stringset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return ARRAY_SIZE(hbg_ethtool_stats_info);
+}
+
+static void hbg_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ ethtool_puts(&data, hbg_ethtool_stats_info[i].name);
+}
+
+static void hbg_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ u32 i;
+
+ hbg_update_stats(priv);
+ for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++)
+ *data++ = HBG_STATS_R(&priv->stats,
+ hbg_ethtool_stats_info[i].offset);
+}
+
+static void hbg_ethtool_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *epstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt;
+ epstats->tx_pause_frames = stats->tx_pause_frame_cnt;
+}
+
+static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *emstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt;
+ emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt;
+ emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt;
+ emstats->AlignmentErrors = stats->rx_align_error_cnt;
+ emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt;
+ emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt;
+
+ emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt;
+ emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt;
+ emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt;
+ emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt;
+ emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt +
+ stats->rx_jabber_err_cnt +
+ stats->rx_unknown_macctl_frame_cnt +
+ stats->rx_bufrq_err_cnt +
+ stats->rx_we_err_cnt;
+ emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt +
+ stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+}
+
+static void
+hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *ecstats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *s = &priv->stats;
+
+ hbg_update_stats(priv);
+ ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt;
+ ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt;
+ ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt;
+}
+
+static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4095 },
+};
+
+static void
+hbg_ethtool_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt +
+ stats->rx_frame_runt_err_cnt +
+ stats->rx_lengthfield_err_cnt;
+ rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt +
+ stats->rx_frame_very_long_err_cnt;
+ rmon_stats->fragments = stats->rx_desc_frag_cnt;
+ rmon_stats->hist[0] = stats->rx_framesize_64;
+ rmon_stats->hist[1] = stats->rx_framesize_65_127;
+ rmon_stats->hist[2] = stats->rx_framesize_128_255;
+ rmon_stats->hist[3] = stats->rx_framesize_256_511;
+ rmon_stats->hist[4] = stats->rx_framesize_512_1023;
+ rmon_stats->hist[5] = stats->rx_framesize_1024_1518;
+ rmon_stats->hist[6] = stats->rx_framesize_bt_1518;
+
+ rmon_stats->hist_tx[0] = stats->tx_framesize_64;
+ rmon_stats->hist_tx[1] = stats->tx_framesize_65_127;
+ rmon_stats->hist_tx[2] = stats->tx_framesize_128_255;
+ rmon_stats->hist_tx[3] = stats->tx_framesize_256_511;
+ rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023;
+ rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518;
+ rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518;
+
+ *ranges = hbg_rmon_ranges;
+}
+
static const struct ethtool_ops hbg_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
@@ -190,6 +481,13 @@ static const struct ethtool_ops hbg_ethtool_ops = {
.set_pauseparam = hbg_ethtool_set_pauseparam,
.reset = hbg_ethtool_reset,
.nway_reset = phy_ethtool_nway_reset,
+ .get_sset_count = hbg_ethtool_get_sset_count,
+ .get_strings = hbg_ethtool_get_strings,
+ .get_ethtool_stats = hbg_ethtool_get_stats,
+ .get_pause_stats = hbg_ethtool_get_pause_stats,
+ .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats,
+ .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats,
+ .get_rmon_stats = hbg_ethtool_get_rmon_stats,
};
void hbg_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
index 628707ec2686..e173155b146a 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h
@@ -6,6 +6,11 @@
#include <linux/netdevice.h>
+#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f))
+#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
+#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val))
+
void hbg_ethtool_set_ops(struct net_device *netdev);
+void hbg_update_stats(struct hbg_priv *priv);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index e7798f213645..74a18033b444 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -213,10 +213,20 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
+
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
HBG_REG_PORT_MODE_M, speed);
hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR,
HBG_REG_DUPLEX_B, duplex);
+
+ hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET);
+
+ hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
+
+ if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
+ HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ hbg_np_link_fail_task_schedule(priv);
}
/* only support uc filter */
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
index 25dd25f096fe..e79e9ab3e530 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c
@@ -11,6 +11,9 @@ static void hbg_irq_handle_err(struct hbg_priv *priv,
if (irq_info->need_print)
dev_err(&priv->pdev->dev,
"receive error interrupt: %s\n", irq_info->name);
+
+ if (irq_info->need_reset)
+ hbg_err_reset_task_schedule(priv);
}
static void hbg_irq_handle_tx(struct hbg_priv *priv,
@@ -25,30 +28,38 @@ static void hbg_irq_handle_rx(struct hbg_priv *priv,
napi_schedule(&priv->rx_ring.napi);
}
-#define HBG_TXRX_IRQ_I(name, handle) \
- {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle}
-#define HBG_ERR_IRQ_I(name, need_print) \
- {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err}
+static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
+ struct hbg_irq_info *irq_info)
+{
+ priv->stats.rx_fifo_less_empty_thrsld_cnt++;
+}
+
+#define HBG_IRQ_I(name, handle) \
+ {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle}
+#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \
+ {#name, HBG_INT_MSK_##name##_B, true, need_print, \
+ ndde_reset, 0, hbg_irq_handle_err}
static struct hbg_irq_info hbg_irqs[] = {
- HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx),
- HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx),
- HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true),
- HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true),
- HBG_ERR_IRQ_I(TX_AHB_ERR, true),
- HBG_ERR_IRQ_I(RX_BUF_AVL, false),
- HBG_ERR_IRQ_I(REL_BUF_ERR, true),
- HBG_ERR_IRQ_I(TXCFG_AVL, false),
- HBG_ERR_IRQ_I(TX_DROP, false),
- HBG_ERR_IRQ_I(RX_DROP, false),
- HBG_ERR_IRQ_I(RX_AHB_ERR, true),
- HBG_ERR_IRQ_I(MAC_FIFO_ERR, false),
- HBG_ERR_IRQ_I(RBREQ_ERR, false),
- HBG_ERR_IRQ_I(WE_ERR, false),
+ HBG_IRQ_I(RX, hbg_irq_handle_rx),
+ HBG_IRQ_I(TX, hbg_irq_handle_tx),
+ HBG_ERR_IRQ_I(TX_PKT_CPL, true, true),
+ HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false),
+ HBG_ERR_IRQ_I(TX_AHB_ERR, true, true),
+ HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val),
+ HBG_ERR_IRQ_I(REL_BUF_ERR, true, false),
+ HBG_ERR_IRQ_I(TXCFG_AVL, false, false),
+ HBG_ERR_IRQ_I(TX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_DROP, false, false),
+ HBG_ERR_IRQ_I(RX_AHB_ERR, true, false),
+ HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true),
+ HBG_ERR_IRQ_I(RBREQ_ERR, true, true),
+ HBG_ERR_IRQ_I(WE_ERR, true, true),
};
static irqreturn_t hbg_irq_handle(int irq_num, void *p)
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index bb0f25ac9760..2ac5454338e4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -5,7 +5,9 @@
#include <linux/if_vlan.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <linux/phy.h>
#include "hbg_common.h"
+#include "hbg_diagnose.h"
#include "hbg_err.h"
#include "hbg_ethtool.h"
#include "hbg_hw.h"
@@ -14,6 +16,9 @@
#include "hbg_txrx.h"
#include "hbg_debugfs.h"
+#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXCSUM)
+
static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled)
{
struct hbg_irq_info *info;
@@ -214,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
char *buf = ring->tout_log_buf;
u32 pos = 0;
+ priv->stats.tx_timeout_cnt++;
+
+ pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
+ "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt);
pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos,
"ring used num: %u, fifo used num: %u\n",
hbg_get_queue_used_num(ring),
@@ -226,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
netdev_info(netdev, "%s", buf);
}
+static void hbg_net_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hbg_priv *priv = netdev_priv(netdev);
+ struct hbg_stats *h_stats = &priv->stats;
+
+ hbg_update_stats(priv);
+ dev_get_tstats64(netdev, stats);
+
+ /* fifo empty */
+ stats->tx_fifo_errors += h_stats->tx_drop_cnt;
+
+ stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt +
+ h_stats->tx_drop_cnt;
+ stats->tx_errors += h_stats->tx_add_cs_fail_cnt +
+ h_stats->tx_bufrl_err_cnt +
+ h_stats->tx_underrun_err_cnt +
+ h_stats->tx_crc_err_cnt;
+ stats->rx_errors += h_stats->rx_data_error_cnt;
+ stats->multicast += h_stats->rx_mc_pkt_cnt;
+ stats->rx_dropped += h_stats->rx_desc_drop;
+ stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt +
+ h_stats->rx_frame_long_err_cnt +
+ h_stats->rx_frame_runt_err_cnt +
+ h_stats->rx_frame_short_err_cnt +
+ h_stats->rx_lengthfield_err_cnt;
+ stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt +
+ h_stats->rx_desc_l3l4_err_cnt;
+ stats->rx_fifo_errors += h_stats->rx_overflow_cnt +
+ h_stats->rx_overrun_cnt;
+ stats->rx_crc_errors += h_stats->rx_fcs_error_cnt;
+}
+
static const struct net_device_ops hbg_netdev_ops = {
.ndo_open = hbg_net_open,
.ndo_stop = hbg_net_stop,
@@ -235,8 +277,62 @@ static const struct net_device_ops hbg_netdev_ops = {
.ndo_change_mtu = hbg_net_change_mtu,
.ndo_tx_timeout = hbg_net_tx_timeout,
.ndo_set_rx_mode = hbg_net_set_rx_mode,
+ .ndo_get_stats64 = hbg_net_get_stats,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
+static void hbg_service_task(struct work_struct *work)
+{
+ struct hbg_priv *priv = container_of(work, struct hbg_priv,
+ service_task.work);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state))
+ hbg_err_reset(priv);
+
+ if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state))
+ hbg_fix_np_link_fail(priv);
+
+ hbg_diagnose_message_push(priv);
+
+ /* The type of statistics register is u32,
+ * To prevent the statistics register from overflowing,
+ * the driver dumps the statistics every 30 seconds.
+ */
+ if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) {
+ hbg_update_stats(priv);
+ priv->last_update_stats_time = jiffies;
+ }
+
+ schedule_delayed_work(&priv->service_task,
+ msecs_to_jiffies(MSEC_PER_SEC));
+}
+
+void hbg_err_reset_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+void hbg_np_link_fail_task_schedule(struct hbg_priv *priv)
+{
+ set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state);
+ schedule_delayed_work(&priv->service_task, 0);
+}
+
+static void hbg_cancel_delayed_work_sync(void *data)
+{
+ cancel_delayed_work_sync(data);
+}
+
+static int hbg_delaywork_init(struct hbg_priv *priv)
+{
+ INIT_DELAYED_WORK(&priv->service_task, hbg_service_task);
+ schedule_delayed_work(&priv->service_task, 0);
+ return devm_add_action_or_reset(&priv->pdev->dev,
+ hbg_cancel_delayed_work_sync,
+ &priv->service_task);
+}
+
static int hbg_mac_filter_init(struct hbg_priv *priv)
{
struct hbg_dev_specs *dev_specs = &priv->dev_specs;
@@ -291,6 +387,10 @@ static int hbg_init(struct hbg_priv *priv)
if (ret)
return ret;
+ ret = hbg_delaywork_init(priv);
+ if (ret)
+ return ret;
+
hbg_debugfs_init(priv);
hbg_init_user_def(priv);
return 0;
@@ -349,6 +449,9 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
+ /* set default features */
+ netdev->features |= HBG_SUPPORT_FEATURES;
+ netdev->hw_features |= HBG_SUPPORT_FEATURES;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index db6bc4cfb971..f29a937ad087 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -17,6 +17,8 @@
#define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000)
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
+#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
+
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
{
hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd);
@@ -127,6 +129,26 @@ static void hbg_flowctrl_cfg(struct hbg_priv *priv)
hbg_hw_set_pause_enable(priv, tx_pause, rx_pause);
}
+void hbg_fix_np_link_fail(struct hbg_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) {
+ dev_err(dev, "failed to fix the MAC link status\n");
+ priv->stats.np_link_fail_cnt = 0;
+ return;
+ }
+
+ priv->stats.np_link_fail_cnt++;
+ dev_err(dev, "failed to link between MAC and PHY, try to fix...\n");
+
+ /* Replace phy_reset() with phy_stop() and phy_start(),
+ * as suggested by Andrew.
+ */
+ hbg_phy_stop(priv);
+ hbg_phy_start(priv);
+}
+
static void hbg_phy_adjust_link(struct net_device *netdev)
{
struct hbg_priv *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
index febd02a309c7..f3771c1bbd34 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h
@@ -9,4 +9,6 @@
int hbg_mdio_init(struct hbg_priv *priv);
void hbg_phy_start(struct hbg_priv *priv);
void hbg_phy_stop(struct hbg_priv *priv);
+void hbg_fix_np_link_fail(struct hbg_priv *priv);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
index f12efc12f3c5..cc2cc612770d 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h
@@ -18,6 +18,13 @@
#define HBG_REG_TX_FIFO_NUM_ADDR 0x0030
#define HBG_REG_RX_FIFO_NUM_ADDR 0x0034
#define HBG_REG_VLAN_LAYERS_ADDR 0x0038
+#define HBG_REG_PUSH_REQ_ADDR 0x00F0
+#define HBG_REG_MSG_HEADER_ADDR 0x00F4
+#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0)
+#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8)
+#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12)
+#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20)
+#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100
/* MDIO */
#define HBG_REG_MDIO_BASE 0x8000
@@ -54,12 +61,55 @@
#define HBG_REG_PAUSE_ENABLE_RX_B BIT(0)
#define HBG_REG_PAUSE_ENABLE_TX_B BIT(1)
#define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058)
+#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15)
#define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060)
#define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7)
#define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6)
#define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5)
#define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064)
#define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0)
+#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080)
+#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084)
+#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088)
+#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C)
+#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090)
+#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094)
+#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098)
+#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C)
+#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0)
+#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4)
+#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8)
+#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC)
+#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0)
+#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4)
+#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8)
+#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC)
+#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0)
+#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4)
+#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8)
+#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC)
+#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0)
+#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4)
+#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8)
+#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8)
+#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC)
+#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100)
+#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104)
+#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108)
+#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C)
+#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110)
+#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114)
+#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118)
+#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C)
+#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120)
+#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124)
+#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128)
+#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C)
+#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C)
+#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150)
+#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154)
+#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158)
+#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C)
#define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8)
#define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0)
#define HBG_REG_CF_CRC_STRIP_B BIT(0)
@@ -69,6 +119,9 @@
#define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0)
#define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3)
#define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8)
+#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC)
+#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4)
+#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8)
#define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200)
#define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204)
#define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208)
@@ -103,6 +156,7 @@
#define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17)
#define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16)
#define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15)
+#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14)
#define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */
#define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */
#define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434)
@@ -111,12 +165,17 @@
#define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440)
#define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444)
#define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0)
+#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448)
+#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C)
#define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450)
#define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454)
#define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0)
#define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16)
+#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460)
+#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464)
+#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468)
#define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470)
#define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488)
#define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C)
@@ -136,6 +195,9 @@
#define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0)
#define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4)
#define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21)
+#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C)
+#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590)
+#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594)
#define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4)
#define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8)
#define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC)
@@ -178,5 +240,48 @@ struct hbg_rx_desc {
};
#define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16)
+#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12)
+#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30)
+#define HBG_RX_DESC_W4_IPSEC_B BIT(29)
+#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28)
+#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23)
+#define HBG_RX_DESC_W4_FRAG_B BIT(22)
+#define HBG_RX_DESC_W4_OPT_B BIT(21)
+#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20)
+#define HBG_RX_DESC_W4_BRD_CST_B BIT(19)
+#define HBG_RX_DESC_W4_MUL_CST_B BIT(18)
+#define HBG_RX_DESC_W4_ARP_B BIT(17)
+#define HBG_RX_DESC_W4_RARP_B BIT(16)
+#define HBG_RX_DESC_W4_ICMP_B BIT(15)
+#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14)
+#define HBG_RX_DESC_W4_DROP_B BIT(13)
+#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9)
+#define HBG_RX_DESC_W4_L2_ERR_B BIT(8)
+#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7)
+
+enum hbg_l3_err_code {
+ HBG_L3_OK = 0,
+ HBG_L3_WRONG_HEAD,
+ HBG_L3_CSUM_ERR,
+ HBG_L3_LEN_ERR,
+ HBG_L3_ZERO_TTL,
+ HBG_L3_RSVD,
+};
+
+enum hbg_l4_err_code {
+ HBG_L4_OK = 0,
+ HBG_L4_WRONG_HEAD,
+ HBG_L4_LEN_ERR,
+ HBG_L4_CSUM_ERR,
+ HBG_L4_ZERO_PORT_NUM,
+ HBG_L4_RSVD,
+};
+
+enum hbg_pkt_type_code {
+ HBG_NO_IP_PKT = 0,
+ HBG_IP_PKT,
+ HBG_TCP_PKT,
+ HBG_UDP_PKT,
+};
#endif
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
index f4f256a0dfea..8d814c8f19ea 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c
@@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer)
buffer->skb_dma = dma_map_single(&priv->pdev->dev,
buffer->skb->data, buffer->skb_len,
buffer_to_dma_dir(buffer));
- if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma)))
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) {
+ if (buffer->dir == HBG_DIR_RX)
+ priv->stats.rx_dma_err_cnt++;
+ else
+ priv->stats.tx_dma_err_cnt++;
+
return -ENOMEM;
+ }
return 0;
}
@@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
return packet_done;
}
+static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM);
+
+ skb->ip_summed = rx_checksum_offload ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
+
+ if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
+ !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)))
+ return true;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) {
+ case HBG_L3_OK:
+ break;
+ case HBG_L3_WRONG_HEAD:
+ priv->stats.rx_desc_l3_wrong_head_cnt++;
+ return false;
+ case HBG_L3_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l3_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L3_LEN_ERR:
+ priv->stats.rx_desc_l3_len_err_cnt++;
+ return false;
+ case HBG_L3_ZERO_TTL:
+ priv->stats.rx_desc_l3_zero_ttl_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l3_other_cnt++;
+ return false;
+ }
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) {
+ case HBG_L4_OK:
+ break;
+ case HBG_L4_WRONG_HEAD:
+ priv->stats.rx_desc_l4_wrong_head_cnt++;
+ return false;
+ case HBG_L4_LEN_ERR:
+ priv->stats.rx_desc_l4_len_err_cnt++;
+ return false;
+ case HBG_L4_CSUM_ERR:
+ skb->ip_summed = CHECKSUM_NONE;
+ priv->stats.rx_desc_l4_csum_err_cnt++;
+
+ /* Don't drop packets on csum validation failure,
+ * suggest by Jakub
+ */
+ break;
+ case HBG_L4_ZERO_PORT_NUM:
+ priv->stats.rx_desc_l4_zero_port_num_cnt++;
+ return false;
+ default:
+ priv->stats.rx_desc_l4_other_cnt++;
+ return false;
+ }
+
+ return true;
+}
+
+static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) {
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ return;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_ip_ver_err_cnt++;
+ return;
+ }
+
+ /* 0:ipv4, 1:ipv6 */
+ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
+ priv->stats.rx_desc_ipv6_pkt_cnt++;
+ else
+ priv->stats.rx_desc_ipv4_pkt_cnt++;
+
+ switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) {
+ case HBG_IP_PKT:
+ priv->stats.rx_desc_ip_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
+ priv->stats.rx_desc_ip_opt_pkt_cnt++;
+ if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
+ priv->stats.rx_desc_frag_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
+ priv->stats.rx_desc_icmp_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
+ priv->stats.rx_desc_ipsec_pkt_cnt++;
+ break;
+ case HBG_TCP_PKT:
+ priv->stats.rx_desc_tcp_pkt_cnt++;
+ break;
+ case HBG_UDP_PKT:
+ priv->stats.rx_desc_udp_pkt_cnt++;
+ break;
+ default:
+ priv->stats.rx_desc_no_ip_pkt_cnt++;
+ break;
+ }
+}
+
+static void hbg_update_rx_protocol_stats(struct hbg_priv *priv,
+ struct hbg_rx_desc *desc)
+{
+ if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) {
+ priv->stats.rx_desc_key_not_match_cnt++;
+ return;
+ }
+
+ if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4))
+ priv->stats.rx_desc_broadcast_pkt_cnt++;
+ else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4))
+ priv->stats.rx_desc_multicast_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4))
+ priv->stats.rx_desc_vlan_pkt_cnt++;
+
+ if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) {
+ priv->stats.rx_desc_arp_pkt_cnt++;
+ return;
+ } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) {
+ priv->stats.rx_desc_rarp_pkt_cnt++;
+ return;
+ }
+
+ hbg_update_rx_ip_protocol_stats(priv, desc);
+}
+
+static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) >
+ priv->dev_specs.max_frame_len)) {
+ priv->stats.rx_desc_pkt_len_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) !=
+ priv->dev_specs.mac_id ||
+ FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) {
+ priv->stats.rx_desc_drop++;
+ return false;
+ }
+
+ if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) {
+ priv->stats.rx_desc_l2_err_cnt++;
+ return false;
+ }
+
+ if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) {
+ priv->stats.rx_desc_l3l4_err_cnt++;
+ return false;
+ }
+
+ hbg_update_rx_protocol_stats(priv, desc);
+ return true;
+}
+
static int hbg_rx_fill_one_buffer(struct hbg_priv *priv)
{
struct hbg_ring *ring = &priv->rx_ring;
@@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget)
rx_desc = (struct hbg_rx_desc *)buffer->skb->data;
pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2);
- hbg_dma_unmap(buffer);
+ if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) {
+ hbg_buffer_free(buffer);
+ goto next_buffer;
+ }
+ hbg_dma_unmap(buffer);
skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN);
skb_put(buffer->skb, pkt_len);
buffer->skb->protocol = eth_type_trans(buffer->skb,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a376d4bdf281..18376bcc718a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -934,8 +934,6 @@ static int hip04_mac_probe(struct platform_device *pdev)
priv->chan = arg.args[1] * RX_DESC_NUM;
priv->group = arg.args[2];
- hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
/* BQL will try to keep the TX queue as short as possible, but it can't
* be faster than tx_coalesce_usecs, so we need a fast timeout here,
* but also long enough to gather up enough frames to ensure we don't
@@ -944,7 +942,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
*/
priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
priv->tx_coalesce_usecs = 200;
- priv->tx_coalesce_timer.function = tx_done;
+ hrtimer_setup(&priv->tx_coalesce_timer, tx_done, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->map = syscon_node_to_regmap(arg.np);
of_node_put(arg.np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6c458f037262..60a586a951a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
if (err)
goto out;
- err = phy_loopback(phy_dev, true);
+ err = phy_loopback(phy_dev, true, 0);
} else {
- err = phy_loopback(phy_dev, false);
+ err = phy_loopback(phy_dev, false, 0);
if (err)
goto out;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9bbece25552b..09749e9f7398 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/string_choices.h>
#include "hnae3.h"
#include "hns3_debugfs.h"
@@ -661,12 +662,14 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_RX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -764,12 +767,14 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_EN_REG)));
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
- HNS3_RING_TX_EN_REG) ? "on" : "off");
+ sprintf(result[j++], "%s",
+ str_on_off(readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG)));
else
sprintf(result[j++], "%s", "NA");
@@ -1030,7 +1035,6 @@ static void
hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- const char * const str[] = {"no", "yes"};
unsigned long *caps = ae_dev->caps;
u32 i, state;
@@ -1039,7 +1043,7 @@ hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos)
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) {
state = test_bit(hns3_dbg_cap[i].cap_bit, caps);
*pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n",
- hns3_dbg_cap[i].name, str[state]);
+ hns3_dbg_cap[i].name, str_yes_no(state));
}
*pos += scnprintf(buf + *pos, len - *pos, "\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b771a2daba43..6715222aeb66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/phy.h>
#include <linux/sfp.h>
@@ -1198,7 +1199,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
return 0;
netdev_dbg(netdev, "Changing tx push from %s to %s\n",
- old_state ? "on" : "off", tx_push ? "on" : "off");
+ str_on_off(old_state), str_on_off(tx_push));
if (tx_push)
set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index debf143e9940..c46490693594 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -3,6 +3,7 @@
#include <linux/device.h>
#include <linux/sched/clock.h>
+#include <linux/string_choices.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -11,7 +12,6 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static const char * const state_str[] = { "off", "on" };
static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
@@ -2573,7 +2573,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
HCLGE_MAC_APP_LP_B);
pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2586,22 +2586,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
loopback_en = req_common->enable &
HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
if (phydev) {
loopback_en = phydev->loopback_enabled;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
} else if (hnae3_dev_phy_imp_supported(hdev)) {
loopback_en = req_common->enable &
HCLGE_CMD_GE_PHY_INNER_LOOP_B;
pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
- state_str[loopback_en]);
+ str_on_off(loopback_en));
}
return 0;
@@ -2894,9 +2894,9 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
*pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
- state_str[ingress]);
+ str_on_off(ingress));
*pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
- state_str[egress]);
+ str_on_off(egress));
hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
NULL, ARRAY_SIZE(vlan_filter_items));
@@ -2915,11 +2915,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
return ret;
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
- result[j++] = state_str[ingress];
- result[j++] = state_str[egress];
- result[j++] =
- test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
- hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ result[j++] = str_on_off(ingress);
+ result[j++] = str_on_off(egress);
+ result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ?
+ str_on_off(bypass) : "NA";
hclge_dbg_fill_content(content, sizeof(content),
vlan_filter_items, result,
ARRAY_SIZE(vlan_filter_items));
@@ -2958,19 +2958,19 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
j = 0;
result[j++] = hclge_dbg_get_func_id_str(str_id, i);
result[j++] = str_pvid;
- result[j++] = state_str[vlan_cfg.accept_tag1];
- result[j++] = state_str[vlan_cfg.accept_tag2];
- result[j++] = state_str[vlan_cfg.accept_untag1];
- result[j++] = state_str[vlan_cfg.accept_untag2];
- result[j++] = state_str[vlan_cfg.insert_tag1];
- result[j++] = state_str[vlan_cfg.insert_tag2];
- result[j++] = state_str[vlan_cfg.shift_tag];
- result[j++] = state_str[vlan_cfg.strip_tag1];
- result[j++] = state_str[vlan_cfg.strip_tag2];
- result[j++] = state_str[vlan_cfg.drop_tag1];
- result[j++] = state_str[vlan_cfg.drop_tag2];
- result[j++] = state_str[vlan_cfg.pri_only1];
- result[j++] = state_str[vlan_cfg.pri_only2];
+ result[j++] = str_on_off(vlan_cfg.accept_tag1);
+ result[j++] = str_on_off(vlan_cfg.accept_tag2);
+ result[j++] = str_on_off(vlan_cfg.accept_untag1);
+ result[j++] = str_on_off(vlan_cfg.accept_untag2);
+ result[j++] = str_on_off(vlan_cfg.insert_tag1);
+ result[j++] = str_on_off(vlan_cfg.insert_tag2);
+ result[j++] = str_on_off(vlan_cfg.shift_tag);
+ result[j++] = str_on_off(vlan_cfg.strip_tag1);
+ result[j++] = str_on_off(vlan_cfg.strip_tag2);
+ result[j++] = str_on_off(vlan_cfg.drop_tag1);
+ result[j++] = str_on_off(vlan_cfg.drop_tag2);
+ result[j++] = str_on_off(vlan_cfg.pri_only1);
+ result[j++] = str_on_off(vlan_cfg.pri_only2);
hclge_dbg_fill_content(content, sizeof(content),
vlan_offload_items, result,
@@ -3007,14 +3007,13 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
ptp->info.name);
pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN,
+ &ptp->flags)));
pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
- test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
- "yes" : "no");
+ str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN,
+ &ptp->flags)));
last_rx = jiffies_to_msecs(ptp->last_rx);
pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3f17b3073e50..92f9b8ec76d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7875,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
if (ret)
return ret;
- return phy_loopback(phydev, true);
+ return phy_loopback(phydev, true, 0);
}
static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
@@ -7883,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
{
int ret;
- ret = phy_loopback(phydev, false);
+ ret = phy_loopback(phydev, false, 0);
if (ret)
return ret;
@@ -8000,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
ret = hclge_tqp_enable(handle, en);
if (ret)
dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -11200,9 +11200,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.dcb_ets_active));
dev_info(dev, "MQPRIO %s\n",
- handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
+ str_enable_disable(handle->kinfo.tc_info.mqprio_active));
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
@@ -11976,7 +11976,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret) {
dev_err(&hdev->pdev->dev,
"Set vf %d mac spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
@@ -11984,7 +11984,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
if (ret)
dev_err(&hdev->pdev->dev,
"Set vf %d vlan spoof check %s failed, ret=%d\n",
- vf, enable ? "on" : "off", ret);
+ vf, str_on_off(enable), ret);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 80079657afeb..9a456ebf9b7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
- phy_loopback(phydev, false);
+ phy_loopback(phydev, false, 0);
phy_start(phydev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..59cc9221185f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -2,6 +2,7 @@
// Copyright (c) 2021 Hisilicon Limited.
#include <linux/skbuff.h>
+#include <linux/string_choices.h>
#include "hclge_main.h"
#include "hnae3.h"
@@ -226,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
if (ret)
dev_err(&hdev->pdev->dev,
"failed to %s ptp interrupt, ret = %d\n",
- en ? "enable" : "disable", ret);
+ str_enable_disable(en), ret);
return ret;
}
@@ -483,7 +484,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index a1aa6c1f966e..6812be8dc64f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = {
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
- .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ .acpi_match_table = hns_mdio_acpi_match,
},
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 25b8a3556004..417dfa18daae 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
struct mii_bus *bus;
int res;
- mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
+ mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio");
if (!mii_np) {
dev_err(&dev->ofdev->dev, "no mdio definition found.");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- res = -ENODEV;
- goto put_node;
- }
-
bus = devm_mdiobus_alloc(&dev->ofdev->dev);
if (!bus) {
res = -ENOMEM;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0676fc547b6f..92647e137cf8 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -234,11 +234,17 @@ static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
(*stragglers)--;
}
/* atomic write is safer than writing bit by bit directly */
- for (i = 0; i < stride; i++) {
- cpumask_set_cpu(*cpu, mask);
- *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
- nr_cpu_ids, false);
+ for_each_online_cpu_wrap(i, *cpu) {
+ if (!stride--) {
+ /* For the next queue we start from the first
+ * unused CPU in this queue
+ */
+ *cpu = i;
+ break;
+ }
+ cpumask_set_cpu(i, mask);
}
+
/* set queue affinity mask */
cpumask_copy(queue->affinity_mask, mask);
rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
@@ -256,7 +262,7 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
- unsigned int num_cpu, cpu;
+ unsigned int num_cpu, cpu = 0;
bool is_rx_queue;
int rc = 0;
@@ -274,8 +280,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
stride = max_t(int, num_cpu / total_queues, 1);
/* number of leftover cpu's */
stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
- /* next available cpu to assign irq to */
- cpu = cpumask_next(-1, cpu_online_mask);
for (i = 0; i < total_queues; i++) {
is_rx_queue = false;
@@ -4829,6 +4833,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strscpy(vlcd->name, adapter->netdev->name, len);
}
+static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf,
+ size_t len)
+{
+ unsigned char hex_str[16 * 3];
+
+ for (size_t i = 0; i < len; i += 16) {
+ hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8,
+ hex_str, sizeof(hex_str), false);
+ netdev_dbg(dev, "%s\n", hex_str);
+ }
+}
+
static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
@@ -4939,10 +4955,8 @@ static int send_login(struct ibmvnic_adapter *adapter)
vnic_add_client_data(adapter, vlcd);
netdev_dbg(adapter->netdev, "Login Buffer:\n");
- for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_buf))[i]);
- }
+ ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf,
+ adapter->login_buf_sz);
memset(&crq, 0, sizeof(crq));
crq.login.first = IBMVNIC_CRQ_CMD;
@@ -5319,15 +5333,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
- for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(buf))[i]);
+ ibmvnic_print_hex_dump(adapter->netdev, buf,
+ sizeof(adapter->ip_offload_buf));
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -5558,10 +5570,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
- for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
- netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long *)(adapter->login_rsp_buf))[i]);
- }
+ ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf,
+ adapter->login_rsp_buf_sz);
/* Sanity checks */
if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 24ec9a4f1ffa..1640d2f27833 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -264,6 +264,7 @@ config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
depends on PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports virtual functions for Intel XL710,
X710, X722, XXV710, and all devices advertising support for
@@ -336,7 +337,7 @@ config ICE_SWITCHDEV
config ICE_HWTS
bool "Support HW cross-timestamp on platforms with PTM support"
default y
- depends on ICE && X86
+ depends on ICE && X86 && PCIE_PTM
help
Say Y to enable hardware supported cross-timestamping on platforms
with PCIe PTM support. The cross-timestamp is available through
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index d7df2a0ed629..44249dd91bd6 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
}
/* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Do not queue up too many posted writes to prevent
+ * increased latency for other devices on the
+ * interconnect. Flush after each 8th posted write,
+ * to keep additional execution time low while still
+ * preventing increased latency.
+ */
+ if (!(i % 8) && i)
+ e1e_flush();
+ }
+ }
e1e_flush();
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e28f1905a4a0..9f47388eaba5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
@@ -529,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des
dma_addr_t dma;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ef156fad52f2..dd16351a7af8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -6,7 +6,7 @@
#include <linux/types.h>
-/* This value should match the pragma in the loop_unrolled_for
+/* This value should match the pragma in the unrolled_count()
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
* reads to the DMA array that can hide each others latency and the
@@ -14,14 +14,6 @@
*/
#define PKTS_PER_BATCH 4
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 4") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct i40e_ring;
struct i40e_vsi;
struct net_device;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 356ac9faa5bf..e13720a728ff 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
+
+iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 532a0a595fe8..9de3e0ba3731 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -41,6 +41,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include "iavf_types.h"
#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
@@ -82,7 +83,7 @@ struct iavf_vsi {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i]))
+#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i]))
#define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i]))
#define IAVF_TX_CTXTDESC(R, i) \
(&(((struct iavf_tx_context_desc *)((R)->desc))[i]))
@@ -271,6 +272,7 @@ struct iavf_adapter {
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
+ u8 rxdid;
int num_active_queues;
int num_req_queues;
@@ -343,6 +345,17 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39)
#define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40)
#define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41)
+#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42)
+#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43)
+#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44)
+
+ /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in
+ * order to negotiated extended capabilities.
+ */
+#define IAVF_FLAG_AQ_EXTENDED_CAPS \
+ (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
+ IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
+ IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
@@ -354,10 +367,18 @@ struct iavf_adapter {
u64 extended_caps;
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2)
+#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3)
+#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4)
+#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
#define IAVF_EXTENDED_CAPS \
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
- IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_SEND_RXDID | \
+ IAVF_EXTENDED_CAP_RECV_RXDID | \
+ IAVF_EXTENDED_CAP_SEND_PTP | \
+ IAVF_EXTENDED_CAP_RECV_PTP)
/* Lock to prevent possible clobbering of
* current_netdev_promisc_flags
@@ -417,12 +438,18 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
#define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS)
+#define IAVF_RXDID_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+#define IAVF_PTP_ALLOWED(a) \
+ ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
struct virtchnl_vlan_caps vlan_v2_caps;
+ u64 supp_rxdids;
+ struct iavf_ptp ptp;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct virtchnl_qos_cap_list *qos_caps;
@@ -555,6 +582,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter);
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter);
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 74a1e9fe1821..288bb5b2e72e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1808,7 +1808,7 @@ static int iavf_set_rxfh(struct net_device *netdev,
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = iavf_get_ringparam,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6faa62bced3a..6d7ba4d67a19 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/net/intel/libie/rx.h>
+#include <net/netdev_lock.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
@@ -710,6 +712,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
}
/**
+ * iavf_select_rx_desc_format - Select Rx descriptor format
+ * @adapter: adapter private structure
+ *
+ * Select what Rx descriptor format based on availability and enabled
+ * features.
+ *
+ * Return: the desired RXDID to select for a given Rx queue, as defined by
+ * enum virtchnl_rxdid_format.
+ */
+static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter)
+{
+ u64 rxdids = adapter->supp_rxdids;
+
+ /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must
+ * stick with the default value of the legacy 32 byte format.
+ */
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return VIRTCHNL_RXDID_1_32B_BASE;
+
+ /* Rx timestamping requires the use of flexible NIC descriptors */
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC))
+ return VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
+
+ pci_warn(adapter->pdev,
+ "Unable to negotiate flexible descriptor format\n");
+ }
+
+ /* Warn if the PF does not list support for the default legacy
+ * descriptor format. This shouldn't happen, as this is the format
+ * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is
+ * likely caused by a bug in the PF implementation failing to indicate
+ * support for the format.
+ */
+ if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M))
+ netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n");
+
+ return VIRTCHNL_RXDID_1_32B_BASE;
+}
+
+/**
* iavf_configure_rx - Configure Receive Unit after Reset
* @adapter: board private structure
*
@@ -719,8 +762,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rxdid = iavf_select_rx_desc_format(adapter);
+
+ for (u32 i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
+ adapter->rx_rings[i].rxdid = adapter->rxdid;
+ }
}
/**
@@ -2075,6 +2122,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return iavf_send_vf_config_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
return iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS)
+ return iavf_send_vf_supported_rxdids_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS)
+ return iavf_send_vf_ptp_caps_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -2239,7 +2290,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
return 0;
}
-
+ if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) {
+ iavf_virtchnl_send_ptp_cmd(adapter);
+ return IAVF_SUCCESS;
+ }
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -2604,6 +2658,112 @@ err:
}
/**
+ * iavf_init_send_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for supported RXDIDs to the PF.
+ * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC.
+ */
+static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ ret = iavf_send_vf_supported_rxdids_msg(adapter);
+ if (ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this
+ * case, we did not send the capability exchange message and
+ * do not expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID;
+}
+
+/**
+ * iavf_init_recv_supported_rxdids - part of querying for supported RXDID
+ * formats
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the supported RXDIDs message from the PF.
+ **/
+static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids));
+
+ ret = iavf_get_vf_supported_rxdids(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed the PF response to the
+ * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes send of the request for 1588 PTP capabilities to the PF.
+ * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g.
+ * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP
+ */
+static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter)
+{
+ if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we
+ * did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP;
+}
+
+/**
+ * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the PTP capabilities supported on this VF.
+ **/
+static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter)
+{
+ memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps));
+
+ if (iavf_get_vf_ptp_caps(adapter))
+ goto err;
+
+ /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS
+ * message we sent previously.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP;
+ return;
+
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
* iavf_init_process_extended_caps - Part of driver startup
* @adapter: board private structure
*
@@ -2627,6 +2787,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
return;
}
+ /* Process capability exchange for RXDID formats */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) {
+ iavf_init_send_supported_rxdids(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) {
+ iavf_init_recv_supported_rxdids(adapter);
+ return;
+ }
+
+ /* Process capability exchange for PTP features */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) {
+ iavf_init_send_ptp_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) {
+ iavf_init_recv_ptp_caps(adapter);
+ return;
+ }
+
/* When we reach here, no further extended capabilities exchanges are
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
*/
@@ -2718,6 +2896,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
if (QOS_ALLOWED(adapter))
adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS;
+ /* Setup initial PTP configuration */
+ iavf_ptp_init(adapter);
+
iavf_schedule_finish_config(adapter);
return;
@@ -3143,15 +3324,18 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
- /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
- * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
- * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
- * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
- * been successfully sent and negotiated
- */
- adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
+ /* Certain capabilities require an extended negotiation process using
+ * extra messages that must be processed after getting the VF
+ * configuration. The related checks such as VLAN_V2_ALLOWED() are not
+ * reliable here, since the configuration has not yet been negotiated.
+ *
+ * Always set these flags, since them related VIRTCHNL messages won't
+ * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES.
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS;
+
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* Delete filter for the current MAC address, it could have
@@ -3711,10 +3895,8 @@ exit:
if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
return 0;
- netdev_lock(netdev);
netif_set_real_num_rx_queues(netdev, total_qps);
netif_set_real_num_tx_queues(netdev, total_qps);
- netdev_unlock(netdev);
return ret;
}
@@ -4379,22 +4561,21 @@ static int iavf_open(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int err;
+ netdev_assert_locked(netdev);
+
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- netdev_lock(netdev);
while (!mutex_trylock(&adapter->crit_lock)) {
/* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
* is already taken and iavf_open is called from an upper
* device's notifier reacting on NETDEV_REGISTER event.
* We have to leave here to avoid dead lock.
*/
- if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) {
- netdev_unlock(netdev);
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
return -EBUSY;
- }
usleep_range(500, 1000);
}
@@ -4443,7 +4624,6 @@ static int iavf_open(struct net_device *netdev)
iavf_irq_enable(adapter, true);
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
@@ -4456,7 +4636,6 @@ err_setup_tx:
iavf_free_all_tx_resources(adapter);
err_unlock:
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return err;
}
@@ -4478,12 +4657,12 @@ static int iavf_close(struct net_device *netdev)
u64 aq_to_restore;
int status;
- netdev_lock(netdev);
+ netdev_assert_locked(netdev);
+
mutex_lock(&adapter->crit_lock);
if (adapter->state <= __IAVF_DOWN_PENDING) {
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
return 0;
}
@@ -4536,6 +4715,7 @@ static int iavf_close(struct net_device *netdev)
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+ netdev_lock(netdev);
mutex_lock(&adapter->crit_lock);
adapter->aq_required |= aq_to_restore;
mutex_unlock(&adapter->crit_lock);
@@ -5000,6 +5180,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
return iavf_fix_strip_features(adapter, features);
}
+static int iavf_hwstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ *config = adapter->ptp.hwtstamp_config;
+
+ return 0;
+}
+
+static int iavf_hwstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+
+ return iavf_ptp_set_ts_config(adapter, config, extack);
+}
+
static int
iavf_verify_shaper(struct net_shaper_binding *binding,
const struct net_shaper *shaper,
@@ -5108,6 +5307,8 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_set_features = iavf_set_features,
.ndo_setup_tc = iavf_setup_tc,
.net_shaper_ops = &iavf_shaper_ops,
+ .ndo_hwtstamp_get = iavf_hwstamp_get,
+ .ndo_hwtstamp_set = iavf_hwstamp_set,
};
/**
@@ -5362,6 +5563,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ INIT_LIST_HEAD(&adapter->ptp.aq_cmds);
+ init_waitqueue_head(&adapter->ptp.phc_time_waitqueue);
+ mutex_init(&adapter->ptp.aq_cmd_lock);
+
queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Initialization goes on in the work. Do not add more of it below. */
@@ -5518,6 +5723,8 @@ static void iavf_remove(struct pci_dev *pdev)
msleep(50);
}
+ iavf_ptp_release(adapter);
+
iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
cancel_work_sync(&adapter->reset_task);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
new file mode 100644
index 000000000000..b4d5eda2e84f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Intel Corporation. */
+
+#include "iavf.h"
+#include "iavf_ptp.h"
+
+#define iavf_clock_to_adapter(info) \
+ container_of_const(info, struct iavf_adapter, ptp.info)
+
+/**
+ * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Disable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings
+ * @adapter: private adapter structure
+ *
+ * Enable timestamp reporting for all Rx rings.
+ */
+static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter)
+{
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
+ adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP;
+}
+
+/**
+ * iavf_ptp_set_timestamp_mode - Set device timestamping mode
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config
+ *
+ * Set the timestamping mode requested from the userspace.
+ *
+ * Note: this function always translates Rx timestamp requests for any packet
+ * category into HWTSTAMP_FILTER_ALL.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+ case HWTSTAMP_TX_ON:
+ return -EOPNOTSUPP;
+ default:
+ return -ERANGE;
+ }
+
+ if (config->rx_filter == HWTSTAMP_FILTER_NONE) {
+ iavf_ptp_disable_rx_tstamp(adapter);
+ return 0;
+ } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) {
+ return -ERANGE;
+ } else if (!(iavf_ptp_cap_supported(adapter,
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) {
+ return -EOPNOTSUPP;
+ }
+
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ iavf_ptp_enable_rx_tstamp(adapter);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_set_ts_config - Set timestamping configuration
+ * @adapter: private adapter structure
+ * @config: pointer to kernel_hwtstamp_config structure
+ * @extack: pointer to netlink_ext_ack structure
+ *
+ * Program the requested timestamping configuration to the device.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = iavf_ptp_set_timestamp_mode(adapter, config);
+ if (err)
+ return err;
+
+ /* Save successful settings for future reference */
+ adapter->ptp.hwtstamp_config = *config;
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_cap_supported - Check if a PTP capability is supported
+ * @adapter: private adapter structure
+ * @cap: the capability bitmask to check
+ *
+ * Return: true if every capability set in cap is also set in the enabled
+ * capabilities reported by the PF, false otherwise.
+ */
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap)
+{
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return false;
+
+ /* Only return true if every bit in cap is set in hw_caps.caps */
+ return (adapter->ptp.hw_caps.caps & cap) == cap;
+}
+
+/**
+ * iavf_allocate_ptp_cmd - Allocate a PTP command message structure
+ * @v_opcode: the virtchnl opcode
+ * @msglen: length in bytes of the associated virtchnl structure
+ *
+ * Allocates a PTP command message and pre-fills it with the provided message
+ * length and opcode.
+ *
+ * Return: allocated PTP command.
+ */
+static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode,
+ u16 msglen)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->v_opcode = v_opcode;
+ cmd->msglen = msglen;
+
+ return cmd;
+}
+
+/**
+ * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl
+ * @adapter: private adapter structure
+ * @cmd: the command structure to send
+ *
+ * Queue the given command structure into the PTP virtchnl command queue tos
+ * end to the PF.
+ */
+static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter,
+ struct iavf_ptp_aq_cmd *cmd)
+{
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_add_tail(&cmd->list, &adapter->ptp.aq_cmds);
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+}
+
+/**
+ * iavf_send_phc_read - Send request to read PHC time
+ * @adapter: private adapter structure
+ *
+ * Send a request to obtain the PTP hardware clock time. This allocates the
+ * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to
+ * indirectly read the PHC time.
+ *
+ * This function does not wait for the reply from the PF.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_send_phc_read(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME,
+ sizeof(struct virtchnl_phc_time));
+ if (!cmd)
+ return -ENOMEM;
+
+ iavf_queue_ptp_cmd(adapter, cmd);
+
+ return 0;
+}
+
+/**
+ * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl
+ * @adapter: private adapter structure
+ * @ts: storage for the timestamp value
+ * @sts: system timestamp values before and after the read
+ *
+ * Used when the device does not have direct register access to the PHC time.
+ * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits
+ * for the reply from the PF.
+ *
+ * Based on some simple measurements using ftrace and phc2sys, this clock
+ * access method has about a ~110 usec latency even when the system is not
+ * under load. In order to achieve acceptable results when using phc2sys with
+ * the indirect clock access method, it is recommended to use more
+ * conservative proportional and integration constants with the P/I servo.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+static int iavf_read_phc_indirect(struct iavf_adapter *adapter,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ long ret;
+ int err;
+
+ adapter->ptp.phc_time_ready = false;
+
+ ptp_read_system_prets(sts);
+
+ err = iavf_send_phc_read(adapter);
+ if (err)
+ return err;
+
+ ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue,
+ adapter->ptp.phc_time_ready,
+ HZ);
+
+ ptp_read_system_postts(sts);
+
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+
+ *ts = ns_to_timespec64(adapter->ptp.cached_phc_time);
+
+ return 0;
+}
+
+static int iavf_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ if (!adapter->ptp.clock)
+ return -EOPNOTSUPP;
+
+ return iavf_read_phc_indirect(adapter, ts, sts);
+}
+
+/**
+ * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension
+ * @adapter: private adapter structure
+ *
+ * Periodically cache the PHC time in order to allow for timestamp extension.
+ * This is required because the Tx and Rx timestamps only contain 32bits of
+ * nanoseconds. Timestamp extension allows calculating the corrected 64bit
+ * timestamp. This algorithm relies on the cached time being within ~1 second
+ * of the timestamp.
+ */
+static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter)
+{
+ if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ))
+ return;
+
+ /* The response from virtchnl will store the time into
+ * cached_phc_time.
+ */
+ iavf_send_phc_read(adapter);
+}
+
+/**
+ * iavf_ptp_do_aux_work - Perform periodic work required for PTP support
+ * @info: PTP clock info structure
+ *
+ * Handler to take care of periodic work required for PTP operation. This
+ * includes the following tasks:
+ *
+ * 1) updating cached_phc_time
+ *
+ * cached_phc_time is used by the Tx and Rx timestamp flows in order to
+ * perform timestamp extension, by carefully comparing the timestamp
+ * 32bit nanosecond timestamps and determining the corrected 64bit
+ * timestamp value to report to userspace. This algorithm only works if
+ * the cached_phc_time is within ~1 second of the Tx or Rx timestamp
+ * event. This task periodically reads the PHC time and stores it, to
+ * ensure that timestamp extension operates correctly.
+ *
+ * Returns: time in jiffies until the periodic task should be re-scheduled.
+ */
+static long iavf_ptp_do_aux_work(struct ptp_clock_info *info)
+{
+ struct iavf_adapter *adapter = iavf_clock_to_adapter(info);
+
+ iavf_ptp_cache_phc_time(adapter);
+
+ /* Check work about twice a second */
+ return msecs_to_jiffies(500);
+}
+
+/**
+ * iavf_ptp_register_clock - Register a new PTP for userspace
+ * @adapter: private adapter structure
+ *
+ * Allocate and register a new PTP clock device if necessary.
+ *
+ * Return: 0 if success, error otherwise.
+ */
+static int iavf_ptp_register_clock(struct iavf_adapter *adapter)
+{
+ struct ptp_clock_info *ptp_info = &adapter->ptp.info;
+ struct device *dev = &adapter->pdev->dev;
+ struct ptp_clock *clock;
+
+ snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk",
+ KBUILD_MODNAME, dev_name(dev));
+ ptp_info->owner = THIS_MODULE;
+ ptp_info->gettimex64 = iavf_ptp_gettimex64;
+ ptp_info->do_aux_work = iavf_ptp_do_aux_work;
+
+ clock = ptp_clock_register(ptp_info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ adapter->ptp.clock = clock;
+
+ dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n",
+ adapter->ptp.info.name);
+
+ return 0;
+}
+
+/**
+ * iavf_ptp_init - Initialize PTP support if capability was negotiated
+ * @adapter: private adapter structure
+ *
+ * Initialize PTP functionality, based on the capabilities that the PF has
+ * enabled for this VF.
+ */
+void iavf_ptp_init(struct iavf_adapter *adapter)
+{
+ int err;
+
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) {
+ pci_notice(adapter->pdev,
+ "Device does not have PTP clock support\n");
+ return;
+ }
+
+ err = iavf_ptp_register_clock(adapter);
+ if (err) {
+ pci_err(adapter->pdev,
+ "Failed to register PTP clock device (%p)\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ for (int i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ rx_ring->ptp = &adapter->ptp;
+ }
+
+ ptp_schedule_worker(adapter->ptp.clock, 0);
+}
+
+/**
+ * iavf_ptp_release - Disable PTP support
+ * @adapter: private adapter structure
+ *
+ * Release all PTP resources that were previously initialized.
+ */
+void iavf_ptp_release(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd, *tmp;
+
+ if (!adapter->ptp.clock)
+ return;
+
+ pci_dbg(adapter->pdev, "removing PTP clock %s\n",
+ adapter->ptp.info.name);
+ ptp_clock_unregister(adapter->ptp.clock);
+ adapter->ptp.clock = NULL;
+
+ /* Cancel any remaining uncompleted PTP clock commands */
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) {
+ list_del(&cmd->list);
+ kfree(cmd);
+ }
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+}
+
+/**
+ * iavf_ptp_process_caps - Handle change in PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Handle any state changes necessary due to change in PTP capabilities, such
+ * as after a device reset or change in configuration from the PF.
+ */
+void iavf_ptp_process_caps(struct iavf_adapter *adapter)
+{
+ bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC);
+
+ /* Check if the device gained or lost necessary access to support the
+ * PTP hardware clock. If so, driver must respond appropriately by
+ * creating or destroying the PTP clock device.
+ */
+ if (adapter->ptp.clock && !phc)
+ iavf_ptp_release(adapter);
+ else if (!adapter->ptp.clock && phc)
+ iavf_ptp_init(adapter);
+
+ /* Check if the device lost access to Rx timestamp incoming packets */
+ if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) {
+ adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ iavf_ptp_disable_rx_tstamp(adapter);
+ }
+}
+
+/**
+ * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b
+ * nanoseconds
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ *
+ * Return: extended timestamp (to 64b).
+ */
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 low = lower_32_bits(cached_phc_time);
+ u32 delta = in_tstamp - low;
+ u64 ns;
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > S32_MAX)
+ ns = cached_phc_time - (low - in_tstamp);
+ else
+ ns = cached_phc_time + delta;
+
+ return ns;
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
new file mode 100644
index 000000000000..783b8f287cd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_PTP_H_
+#define _IAVF_PTP_H_
+
+#include "iavf_types.h"
+
+/* bit indicating whether a 40bit timestamp is valid */
+#define IAVF_PTP_40B_TSTAMP_VALID BIT(24)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void iavf_ptp_init(struct iavf_adapter *adapter);
+void iavf_ptp_release(struct iavf_adapter *adapter);
+void iavf_ptp_process_caps(struct iavf_adapter *adapter);
+bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap);
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter);
+int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline void iavf_ptp_init(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_release(struct iavf_adapter *adapter) { }
+static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { }
+static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter,
+ u32 cap)
+{
+ return false;
+}
+
+static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { }
+static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -1;
+}
+
+static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time,
+ u32 in_tstamp)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _IAVF_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 62212011c807..c5e4d1823886 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
iavf_rx_template,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct iavf_ring *ring,
- union iavf_32byte_rx_desc *desc,
+ struct iavf_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 26b424fd6718..422312b8b54a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -8,6 +8,26 @@
#include "iavf.h"
#include "iavf_trace.h"
#include "iavf_prototype.h"
+#include "iavf_ptp.h"
+
+/**
+ * iavf_is_descriptor_done - tests DD bit in Rx descriptor
+ * @qw1: quad word 1 from descriptor to get Descriptor Done field from
+ * @flex: is the descriptor flex or legacy
+ *
+ * This function tests the descriptor done bit in specified descriptor. Because
+ * there are two types of descriptors (legacy and flex) the parameter rx_ring
+ * is used to distinguish.
+ *
+ * Return: true or false based on the state of DD bit in Rx descriptor.
+ */
+static bool iavf_is_descriptor_done(u64 qw1, bool flex)
+{
+ if (flex)
+ return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1);
+ else
+ return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1);
+}
static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
.count = rx_ring->count,
};
u16 ntu = rx_ring->next_to_use;
- union iavf_rx_desc *rx_desc;
+ struct iavf_rx_desc *rx_desc;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
@@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(addr);
+ rx_desc->qw0 = cpu_to_le64(addr);
rx_desc++;
ntu++;
@@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
}
/* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
+ rx_desc->qw1 = 0;
cleaned_count--;
} while (cleaned_count);
@@ -896,60 +916,43 @@ no_buffers:
}
/**
- * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * iavf_rx_csum - Indicate in skb if hw indicated a good checksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_desc: the receive descriptor
+ * @decoded_pt: decoded ptype information
+ * @csum_bits: decoded Rx descriptor information
**/
-static void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb,
+ struct libeth_rx_pt decoded_pt,
+ struct libeth_rx_csum csum_bits)
{
- struct libeth_rx_pt decoded;
- u32 rx_error, rx_status;
bool ipv4, ipv6;
- u8 ptype;
- u64 qword;
skb->ip_summed = CHECKSUM_NONE;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
- decoded = libie_rx_pt_parse(ptype);
- if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
- return;
-
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
-
/* did the hardware decode the packet and checksum? */
- if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
- ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 &&
- (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
- BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* likely incorrect csum if alternate IP extension headers found */
- if (ipv6 &&
- rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
- /* don't increment checksum err here, non-fatal err */
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* there was some L4 error, count error and punt packet to the stack */
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
/* handle packets that were not able to be checksummed due
* to arrival speed, in this case the stack can compute
* the csum.
*/
- if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
+ if (unlikely(csum_bits.pprs))
return;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -960,52 +963,196 @@ checksum_fail:
}
/**
- * iavf_rx_hash - set the hash value in the skb
+ * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1);
+ csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum
+ * @vsi: the VSI we care about
+ * @qw1: quad word 1
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: decoded checksum bits.
+ **/
+static struct libeth_rx_csum
+iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1,
+ const struct libeth_rx_pt decoded_pt)
+{
+ struct libeth_rx_csum csum_bits = {};
+
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt))
+ return csum_bits;
+
+ csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1);
+ csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1);
+ csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1);
+ csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1);
+ csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1);
+ csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1);
+ csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1);
+
+ return csum_bits;
+}
+
+/**
+ * iavf_legacy_rx_hash - set the hash value in the skb
+ * @ring: descriptor ring
+ * @qw0: quad word 0
+ * @qw1: quad word 1
+ * @skb: skb currently being received and modified
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ **/
+static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0,
+ __le64 qw1, struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
+{
+ const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M);
+ u32 hash;
+
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
+ return;
+
+ if ((qw1 & rss_mask) == rss_mask) {
+ hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
+ }
+}
+
+/**
+ * iavf_flex_rx_hash - set the hash value in the skb
* @ring: descriptor ring
- * @rx_desc: specific descriptor
+ * @qw1: quad word 1
* @skb: skb currently being received and modified
- * @rx_ptype: Rx packet type
+ * @decoded_pt: decoded packet type
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
**/
-static void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1,
+ struct sk_buff *skb,
+ const struct libeth_rx_pt decoded_pt)
{
- struct libeth_rx_pt decoded;
+ bool rss_valid;
u32 hash;
- const __le64 rss_mask =
- cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- decoded = libie_rx_pt_parse(rx_ptype);
- if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt))
return;
- if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
- hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- libeth_rx_pt_set_hash(skb, hash, decoded);
+ rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M);
+ if (rss_valid) {
+ hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M);
+ libeth_rx_pt_set_hash(skb, hash, decoded_pt);
}
}
/**
+ * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor
+ * @rx_ring: descriptor ring
+ * @qw2: quad word 2 of descriptor
+ * @qw3: quad word 3 of descriptor
+ * @skb: skb currently being received
+ *
+ * Read the Rx timestamp value from the descriptor and pass it to the stack.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2,
+ __le64 qw3, struct sk_buff *skb)
+{
+ u32 tstamp;
+ u64 ns;
+
+ /* Skip processing if timestamps aren't enabled */
+ if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP))
+ return;
+
+ /* Check if this Rx descriptor has a valid timestamp */
+ if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID))
+ return;
+
+ /* the ts_low field only contains the valid bit and sub-nanosecond
+ * precision, so we don't need to extract it.
+ */
+ tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M);
+
+ ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time,
+ tstamp);
+
+ *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) {
+ .hwtstamp = ns_to_ktime(ns),
+ };
+}
+
+/**
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
+ * @ptype: the packet type decoded by hardware
+ * @flex: is the descriptor flex or legacy
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static void
-iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_process_skb_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ struct sk_buff *skb, u32 ptype,
+ bool flex)
{
- iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
-
- iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
+ struct libeth_rx_csum csum_bits;
+ struct libeth_rx_pt decoded_pt;
+ __le64 qw0 = rx_desc->qw0;
+ __le64 qw1 = rx_desc->qw1;
+ __le64 qw2 = rx_desc->qw2;
+ __le64 qw3 = rx_desc->qw3;
+
+ decoded_pt = libie_rx_pt_parse(ptype);
+
+ if (flex) {
+ iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt);
+ iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb);
+ csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ } else {
+ iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt);
+ csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1),
+ decoded_pt);
+ }
+ iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits);
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
+ * @fields: Rx descriptor extracted fields
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
* that this is in fact a non-EOP buffer.
**/
static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct libeth_rqe_info fields)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
prefetch(IAVF_RX_DESC(rx_ring, ntc));
/* if we are the last buffer then there is nothing else to do */
-#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
- if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
+ if (likely(fields.eop))
return false;
rx_ring->rx_stats.non_eop_descs++;
@@ -1123,6 +1267,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
}
/**
+ * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ struct libeth_rqe_info fields;
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2);
+ }
+
+ return fields;
+}
+
+/**
+ * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor
+ * @rx_ring: rx descriptor ring
+ * @rx_desc: the descriptor to process
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size, VLAN tag, Rx packet type, end of packet field and RXE field value.
+ *
+ * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ *
+ * Return: fields extracted from the Rx descriptor.
+ */
+static struct libeth_rqe_info
+iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc)
+{
+ struct libeth_rqe_info fields = {};
+ u64 qw0 = le64_to_cpu(rx_desc->qw0);
+ u64 qw1 = le64_to_cpu(rx_desc->qw1);
+ u64 qw2 = le64_to_cpu(rx_desc->qw2);
+ bool l2tag1p, l2tag2p;
+
+ fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1);
+ fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0);
+
+ if (!fields.eop)
+ return fields;
+
+ fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1);
+ fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0);
+ fields.vlan = 0;
+
+ if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1);
+ if (l2tag1p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1);
+ } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) {
+ l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2);
+ if (l2tag2p)
+ fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2);
+ }
+
+ return fields;
+}
+
+static struct libeth_rqe_info
+iavf_extract_rx_fields(const struct iavf_ring *rx_ring,
+ const struct iavf_rx_desc *rx_desc,
+ bool flex)
+{
+ if (flex)
+ return iavf_extract_flex_rx_fields(rx_ring, rx_desc);
+ else
+ return iavf_extract_legacy_rx_fields(rx_ring, rx_desc);
+}
+
+/**
* iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
**/
static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
{
+ bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
+ struct libeth_rqe_info fields;
struct libeth_fqe *rx_buffer;
- union iavf_rx_desc *rx_desc;
- unsigned int size;
- u16 vlan_tag = 0;
- u8 rx_ptype;
- u64 qword;
+ struct iavf_rx_desc *rx_desc;
+ u64 qw1;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
@@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then the length will be non-zero
- */
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we have
* verified the descriptor has been written back.
*/
dma_rmb();
-#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
- if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
+
+ qw1 = le64_to_cpu(rx_desc->qw1);
+ /* If DD field (descriptor done) is unset then other fields are
+ * not valid
+ */
+ if (!iavf_is_descriptor_done(qw1, flex))
break;
- size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
+ fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
- if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len))
goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(skb, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, fields.len);
else
- skb = iavf_build_skb(rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
+ if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb))
continue;
- /* ERR_MASK will only have valid bits if EOP set, and
- * what we are doing here is actually checking
- * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
- * the error field
+ /* RXE field in descriptor is an indication of the MAC errors
+ * (like CRC, alignment, oversize etc). If it is set then iavf
+ * should finish.
*/
- if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
+ if (unlikely(fields.rxe)) {
dev_kfree_skb_any(skb);
skb = NULL;
continue;
@@ -1219,22 +1461,11 @@ skip_data:
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
-
/* populate checksum, VLAN, and protocol */
- iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
- rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
- if (rx_desc->wb.qword2.ext_status &
- cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
- rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
- vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
+ iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- iavf_receive_skb(rx_ring, skb, vlan_tag);
+ iavf_receive_skb(rx_ring, skb, fields.vlan);
skb = NULL;
/* update budget accounting */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index f97c702c0802..79ad554f2d53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,25 +80,6 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-#define iavf_rx_desc iavf_32byte_rx_desc
-
-/**
- * iavf_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
- * @stat_err_bits: value to mask
- *
- * This function does some fast chicanery in order to return the
- * value of the mask which is really only used for boolean tests.
- * The status_error_len doesn't need to be shifted because it begins
- * at offset zero.
- */
-static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
- const u64 stat_err_bits)
-{
- return !!(rx_desc->wb.qword1.status_error_len &
- cpu_to_le64(stat_err_bits));
-}
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IAVF_RX_INCREMENT(r, i) \
do { \
@@ -262,6 +243,8 @@ struct iavf_ring {
u16 next_to_use;
u16 next_to_clean;
+ u16 rxdid; /* Rx descriptor format */
+
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
@@ -269,6 +252,7 @@ struct iavf_ring {
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
+#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6)
/* stats structs */
struct iavf_queue_stats stats;
@@ -295,6 +279,8 @@ struct iavf_ring {
* for this ring.
*/
+ struct iavf_ptp *ptp;
+
u32 rx_buf_len;
struct net_shaper q_shaper;
bool q_shaper_update;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index f6b09e57abce..f9e1319620f4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -178,110 +178,116 @@ struct iavf_hw {
char err_str[16];
};
-/* RX Descriptors */
-union iavf_16byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow director filter id */
- __le32 fcoe_param; /* FCoE DDP Context id */
- } hi_dword;
- } qword0;
- struct {
- /* ext status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- } wb; /* writeback */
-};
-
-union iavf_32byte_rx_desc {
- struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_buffer_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
- } read;
- struct {
- struct {
- struct {
- union {
- __le16 mirroring_status;
- __le16 fcoe_ctx_id;
- } mirr_fcoe;
- __le16 l2tag1;
- } lo_dword;
- union {
- __le32 rss; /* RSS Hash */
- __le32 fcoe_param; /* FCoE DDP Context id */
- /* Flow director filter id in case of
- * Programming status desc WB
- */
- __le32 fd_id;
- } hi_dword;
- } qword0;
- struct {
- /* status/error/pktype/length */
- __le64 status_error_len;
- } qword1;
- struct {
- __le16 ext_status; /* extended status */
- __le16 rsvd;
- __le16 l2tag2_1;
- __le16 l2tag2_2;
- } qword2;
- struct {
- union {
- __le32 flex_bytes_lo;
- __le32 pe_status;
- } lo_dword;
- union {
- __le32 flex_bytes_hi;
- __le32 fd_id;
- } hi_dword;
- } qword3;
- } wb; /* writeback */
-};
-
-enum iavf_rx_desc_status_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_STATUS_DD_SHIFT = 0,
- IAVF_RX_DESC_STATUS_EOF_SHIFT = 1,
- IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
- IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3,
- IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4,
- IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
- IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- /* Note: Bit 8 is reserved in X710 and XL710 */
- IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
- IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
- IAVF_RX_DESC_STATUS_FLM_SHIFT = 11,
- IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14,
- IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
- IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- /* Note: For non-tunnel packets INT_UDP_0 is the right status for
- * UDP header
- */
- IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
- IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */
-};
-
-#define IAVF_RXD_QW1_STATUS_SHIFT 0
-#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \
- << IAVF_RXD_QW1_STATUS_SHIFT)
+/**
+ * struct iavf_rx_desc - Receive descriptor (both legacy and flexible)
+ * @qw0: quad word 0 fields:
+ * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status
+ * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags
+ * Section 0; Packet Length; Header Length; Split Header Flag;
+ * Flexible Flags section 1 / Extended Status
+ * @qw1: quad word 1 fields:
+ * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet,
+ * header, Split Header Flag)
+ * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata
+ * Container #0; Flexible Metadata Container #1
+ * @qw2: quad word 2 fields:
+ * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG)
+ * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low;
+ * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG)
+ * @qw3: quad word 3 fields:
+ * Legacy: FD Filter ID / Flexible Bytes
+ * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3;
+ * Flexible Metadata Container #4 / Timestamp High 0; Flexible
+ * Metadata Container #5 / Timestamp High 1;
+ */
+struct iavf_rx_desc {
+ aligned_le64 qw0;
+/* The hash signature (RSS) */
+#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32)
+/* Stripped C-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16)
+/* Packet type */
+#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16)
+/* Packet length */
+#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32)
+
+ aligned_le64 qw1;
+/* Descriptor done indication flag. */
+#define IAVF_RXD_LEGACY_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_LEGACY_EOP_M BIT(1)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_LEGACY_L3L4P_M BIT(3)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_LEGACY_RXE_M BIT(19)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ */
+#define IAVF_RXD_LEGACY_IPE_M BIT(22)
+#define IAVF_RXD_LEGACY_L4E_M BIT(23)
+#define IAVF_RXD_LEGACY_EIPE_M BIT(24)
+/* Set for packets that skip checksum calculation in pre-parser */
+#define IAVF_RXD_LEGACY_PPRS_M BIT(26)
+/* Indicates the content in the Filter Status field */
+#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12)
+/* Packet type */
+#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30)
+/* Packet length */
+#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38)
+/* Descriptor done indication flag */
+#define IAVF_RXD_FLEX_DD_M BIT(0)
+/* End of packet. Set to 1 if this descriptor is the last one of the packet */
+#define IAVF_RXD_FLEX_EOP_M BIT(1)
+/* Detectable L3 and L4 integrity check is processed by the HW */
+#define IAVF_RXD_FLEX_L3L4P_M BIT(3)
+/* Checksum reports:
+ * - IPE: IP checksum error
+ * - L4E: L4 integrity error
+ * - EIPE: External IP header (tunneled packets)
+ * - EUDPE: External UDP checksum error (tunneled packets)
+ */
+#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4)
+#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5)
+#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6)
+#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7)
+/* Set when an IPv6 packet contains a Destination Options Header or a Routing
+ * Header.
+ */
+#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9)
+/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */
+#define IAVF_RXD_FLEX_RXE_M BIT(10)
+/* Indicates that the RSS/HASH result is valid */
+#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12)
+/* L2 TAG 1 presence indication */
+#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13)
+/* Stripped L2 Tag from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16)
+/* The hash signature (RSS) */
+#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32)
+
+ aligned_le64 qw2;
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32)
+/* Stripped S-TAG VLAN from the receive packet */
+#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48)
+/* The packet is a UDP tunneled packet */
+#define IAVF_RXD_FLEX_NAT_M BIT(4)
+/* L2 Tag 2 Presence */
+#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11)
+ aligned_le64 qw3;
+#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32)
+} __aligned(4 * sizeof(__le64));
+static_assert(sizeof(struct iavf_rx_desc) == 32);
#define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT
#define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
@@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values {
IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3,
};
-#define IAVF_RXD_QW1_ERROR_SHIFT 19
-#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT)
-
-enum iavf_rx_desc_error_bits {
- /* Note: These are predefined bit offsets */
- IAVF_RX_DESC_ERROR_RXE_SHIFT = 0,
- IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1,
- IAVF_RX_DESC_ERROR_HBO_SHIFT = 2,
- IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
- IAVF_RX_DESC_ERROR_IPE_SHIFT = 3,
- IAVF_RX_DESC_ERROR_L4E_SHIFT = 4,
- IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5,
- IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
- IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7
-};
-
enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_NONE = 0,
IAVF_RX_DESC_ERROR_L3L4E_PROT = 1,
@@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
};
-#define IAVF_RXD_QW1_PTYPE_SHIFT 30
-#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-
-#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
-#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
- IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
-
#define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52
#define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
IAVF_RXD_QW1_LENGTH_HBUF_SHIFT)
@@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits {
IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
};
+#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)
+
enum iavf_rx_desc_pe_status_bits {
/* Note: These are predefined bit offsets */
IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h
new file mode 100644
index 000000000000..a095855122bf
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Intel Corporation. */
+
+#ifndef _IAVF_TYPES_H_
+#define _IAVF_TYPES_H_
+
+#include "iavf_types.h"
+
+#include <linux/avf/virtchnl.h>
+#include <linux/ptp_clock_kernel.h>
+
+/* structure used to queue PTP commands for processing */
+struct iavf_ptp_aq_cmd {
+ struct list_head list;
+ enum virtchnl_ops v_opcode:16;
+ u16 msglen;
+ u8 msg[] __counted_by(msglen);
+};
+
+struct iavf_ptp {
+ wait_queue_head_t phc_time_waitqueue;
+ struct virtchnl_ptp_caps hw_caps;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct list_head aq_cmds;
+ u64 cached_phc_time;
+ unsigned long cached_phc_updated;
+ /* Lock protecting access to the AQ command list */
+ struct mutex aq_cmd_lock;
+ struct kernel_hwtstamp_config hwtstamp_config;
+ bool phc_time_ready:1;
+};
+
+#endif /* _IAVF_TYPES_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 15d388b431c5..a6f0e5990be2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -4,6 +4,7 @@
#include <linux/net/intel/libie/rx.h>
#include "iavf.h"
+#include "iavf_ptp.h"
#include "iavf_prototype.h"
/**
@@ -144,9 +145,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_TC_U32 |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_CAP_PTP |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_USO |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
@@ -177,6 +180,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS;
+
+ if (!IAVF_RXDID_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ NULL, 0);
+}
+
+/**
+ * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities
+ * @adapter: private adapter structure
+ *
+ * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP
+ * capabilities available to this device. This includes the following
+ * potential access:
+ *
+ * * READ_PHC - access to read the PTP hardware clock time
+ * * RX_TSTAMP - access to request Rx timestamps on all received packets
+ *
+ * The PF will reply with the same opcode a filled out copy of the
+ * virtchnl_ptp_caps structure which defines the specifics of which features
+ * are accessible to this device.
+ *
+ * Return: 0 if success, error code otherwise.
+ */
+int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps hw_caps = {
+ .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC |
+ VIRTCHNL_1588_PTP_CAP_RX_TSTAMP
+ };
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS;
+
+ if (!IAVF_PTP_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ (u8 *)&hw_caps, sizeof(hw_caps));
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -263,6 +314,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
return err;
}
+int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ u64 rxdids;
+ int err;
+
+ event.msg_buf = (u8 *)&rxdids;
+ event.buf_len = sizeof(rxdids);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS);
+ if (!err)
+ adapter->supp_rxdids = rxdids;
+
+ return err;
+}
+
+int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter)
+{
+ struct virtchnl_ptp_caps caps = {};
+ struct iavf_arq_event_info event;
+ int err;
+
+ event.msg_buf = (u8 *)&caps;
+ event.buf_len = sizeof(caps);
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_1588_PTP_GET_CAPS);
+ if (!err)
+ adapter->ptp.hw_caps = caps;
+
+ return err;
+}
+
/**
* iavf_configure_queues
* @adapter: adapter structure
@@ -275,6 +360,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
u32 i, max_frame;
+ u8 rx_flags = 0;
size_t len;
max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
@@ -292,6 +378,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
+ if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))
+ rx_flags |= VIRTCHNL_PTP_RX_TSTAMP;
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +398,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (IAVF_RXDID_ALLOWED(adapter))
+ vqpi->rxq.rxdid = adapter->rxdid;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
+ vqpi->rxq.flags = rx_flags;
vqpi++;
}
@@ -1402,6 +1494,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+/**
+ * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command
+ * @adapter: adapter private structure
+ *
+ * De-queue one PTP command request and send the command message to the PF.
+ * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send.
+ */
+void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter)
+{
+ struct iavf_ptp_aq_cmd *cmd;
+ int err;
+
+ if (!adapter->ptp.clock) {
+ /* This shouldn't be possible to hit, since no messages should
+ * be queued if PTP is not initialized.
+ */
+ pci_err(adapter->pdev, "PTP is not initialized\n");
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ return;
+ }
+
+ mutex_lock(&adapter->ptp.aq_cmd_lock);
+ cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds,
+ struct iavf_ptp_aq_cmd, list);
+ if (!cmd) {
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+ goto out_unlock;
+ }
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ pci_err(adapter->pdev,
+ "Cannot send PTP command %d, command %d pending\n",
+ cmd->v_opcode, adapter->current_op);
+ goto out_unlock;
+ }
+
+ err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen);
+ if (!err) {
+ /* Command was sent without errors, so we can remove it from
+ * the list and discard it.
+ */
+ list_del(&cmd->list);
+ kfree(cmd);
+ } else {
+ /* We failed to send the command, try again next cycle */
+ pci_err(adapter->pdev, "Failed to send PTP command %d\n",
+ cmd->v_opcode);
+ }
+
+ if (list_empty(&adapter->ptp.aq_cmds))
+ /* no further PTP messages to send */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD;
+
+out_unlock:
+ mutex_unlock(&adapter->ptp.aq_cmd_lock);
+}
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -2098,6 +2251,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME
+ * @adapter: private adapter structure
+ * @data: the message from the PF
+ * @len: length of the message from the PF
+ *
+ * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message
+ * is sent by the PF in response to the same op as a request from the VF.
+ * Extract the 64bit nanoseconds time from the message and store it in
+ * cached_phc_time. Then, notify any thread that is waiting for the update via
+ * the wait queue.
+ */
+static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter,
+ void *data, u16 len)
+{
+ struct virtchnl_phc_time *msg = data;
+
+ if (len != sizeof(*msg)) {
+ dev_err_once(&adapter->pdev->dev,
+ "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n",
+ len, sizeof(*msg));
+ return;
+ }
+
+ adapter->ptp.cached_phc_time = msg->time;
+ adapter->ptp.cached_phc_updated = jiffies;
+ adapter->ptp.phc_time_ready = true;
+
+ wake_up(&adapter->ptp.phc_time_waitqueue);
+}
+
+/**
* iavf_virtchnl_completion
* @adapter: adapter structure
* @v_opcode: opcode sent by PF
@@ -2509,6 +2693,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
aq_required;
}
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ if (msglen != sizeof(u64))
+ return;
+
+ adapter->supp_rxdids = *(u64 *)msg;
+
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ if (msglen != sizeof(adapter->ptp.hw_caps))
+ return;
+
+ adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg;
+
+ /* process any state change needed due to new capabilities */
+ iavf_ptp_process_caps(adapter);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ iavf_virtchnl_ptp_get_time(adapter, msg, msglen);
+ break;
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
iavf_irq_enable(adapter, true);
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index dbdb83567364..fcb199efbea5 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1205,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1220,6 +1239,9 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
return err;
}
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
goto unroll_hw_init;
@@ -1533,6 +1555,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1548,6 +1607,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1651,6 +1721,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1659,10 +1730,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+
+ return 0;
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1673,6 +1773,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index ea40f7941259..19c3d37aa768 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -25,10 +25,10 @@ struct ice_health_status {
* The below lookup requires to be sorted by code.
*/
-static const char *const ice_common_port_solutions =
+static const char ice_common_port_solutions[] =
"Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
-static const char *const ice_port_number_label = "Port Number";
-static const char *const ice_update_nvm_solution = "Update to the latest NVM image.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
static const struct ice_health_status ice_health_status_lookup[] = {
{ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 71e05d30f0fd..fd083647c14a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -97,9 +97,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -204,6 +201,7 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
ICE_F_MBX_LIMIT,
@@ -478,9 +476,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -542,6 +537,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -556,13 +559,7 @@ struct ice_pf {
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -612,7 +609,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -1047,10 +1044,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
-{
- return hw->ptp.phy_model;
-}
-
extern const struct xdp_metadata_ops ice_xdp_md_ops;
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..2bc5c7f59844 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +571,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index b2af8e3586f7..6db4ad8fc70b 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -147,10 +147,6 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
@@ -473,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -801,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 7a2a2e8da8fa..59df31c2c83f 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -186,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -195,120 +195,6 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810T based, false if not.
- */
-bool ice_is_e810t(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
-
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
- return true;
- default:
- return false;
- }
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*
@@ -2271,7 +2157,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->nvm_unified_update);
break;
case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
case ICE_AQC_CAPS_MAX_MTU:
@@ -2408,7 +2295,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
@@ -5765,6 +5652,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = &desc.params.get_link_topo;
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 15ba38543738..9b00aa0ddf10 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -131,7 +131,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -276,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -306,5 +301,7 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..69d5b1a28491 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2345,14 +2345,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
}
- if (!ice_is_e825c(hw))
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 8d806d8ad761..bce3ad6ca2a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -95,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
@@ -322,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
+ "err:%d %s failed to enable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -367,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
+ "err:%d %s failed to disable %s pin:%u",
ret, ice_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -479,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
@@ -518,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
@@ -1004,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
@@ -1362,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
ice_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d649c197cf67..ed21d7f55ac1 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -49,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -63,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -275,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index f241493a6ac8..7c2dc347e4e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3788,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3798,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3817,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -4773,7 +4770,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
.rxfh_per_ctx_key = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index ee9862ddfe15..1d118171de37 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index b2148dbe49b2..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..aa4bfbcf85d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -541,10 +541,22 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..bab3e81cad5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -228,61 +228,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
- */
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map = ice_alloc_irq(pf, true);
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
* @pf: board private structure to initialize
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry)
{
- int i;
-
- if (!pf->msix_entries)
- return;
-
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
+ struct msi_map map;
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
-
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -382,12 +355,6 @@ int ice_init_rdma(struct ice_pf *pf)
return -ENOMEM;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
@@ -395,8 +362,6 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
pf->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
return ret;
@@ -412,6 +377,5 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..22371011c249 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -1001,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
*/
@@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 1479b45738af..77ba26538b07 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -478,10 +478,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 38a1c8372180..0bcf9d127ac9 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -567,6 +571,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1420,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1764,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2582,7 +2596,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2595,12 +2608,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2755,11 +2762,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -3882,15 +3896,17 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ }
}
/**
@@ -3937,24 +3953,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index eabb35834a24..b4c9cb28a016 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -105,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c3a0fb97c5ee..049edeb60104 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2528,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2618,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2646,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -3304,22 +3260,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3689,6 +3631,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -4066,8 +4017,7 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -5065,16 +5015,16 @@ static int ice_init_devlink(struct ice_pf *pf)
return err;
ice_devlink_init_regions(pf);
- ice_health_init(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
- ice_devlink_unregister(pf);
ice_health_deinit(pf);
+ ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
@@ -5087,6 +5037,12 @@ static int ice_init(struct ice_pf *pf)
if (err)
return err;
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n");
+ }
+
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
@@ -5186,11 +5142,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5198,6 +5155,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5221,8 +5179,8 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
@@ -6597,6 +6555,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e26320ce52ca..1fd1ae03eb90 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -298,8 +298,8 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
@@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -972,28 +981,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1432,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1465,46 +1461,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1740,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
@@ -1783,6 +1778,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1824,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1843,9 +1839,10 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
@@ -2078,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2102,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2180,93 +2177,157 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2274,22 +2335,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
@@ -2550,13 +2625,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
} else {
@@ -2623,6 +2694,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2644,10 +2737,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2758,6 +2861,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2777,7 +2939,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2912,14 +3074,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2971,8 +3131,9 @@ err:
static bool ice_is_primary(struct ice_hw *hw)
{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ?
+ !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) :
+ true;
}
static int ice_ptp_setup_adapter(struct ice_pf *pf)
@@ -2990,7 +3151,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -3007,7 +3168,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3127,6 +3288,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3134,16 +3297,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3162,8 +3323,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index a1d0e988c084..3b769a0cad00 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -304,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -342,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index ac46d1183300..003cdfada3ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index ec91822e9280..89bb8461284a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
int err;
/* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
else
err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
@@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw)
/* Configure the CGU PLL using the parameters from the function
* capabilities.
*/
- if (ice_is_e825c(hw))
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825)
err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
(enum ice_clk_src)ts_info->clk_src);
else
@@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -895,6 +896,17 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
@@ -998,7 +1010,7 @@ static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
/* Lanes 4..7 are in fact 0..3 on a second PHY */
lane %= hw->ptp.ports_per_phy;
- *addr = eth56g_phy_res[res_type].base[0] +
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
return 0;
@@ -1228,7 +1240,7 @@ static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_write_phy_eth56g(hw, port, addr, val);
}
@@ -1253,7 +1265,7 @@ static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
if (port >= hw->ptp.num_lports)
return -EIO;
- addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset;
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
return ice_read_phy_eth56g(hw, port, addr, val);
}
@@ -1576,9 +1588,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) |
- FIELD_PREP(TS_PHY_LOW_M, lo);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -2639,18 +2650,17 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
}
/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization
* @hw: pointer to HW struct
*
- * Perform PHC initialization steps specific to E82X devices.
+ * Perform E825-specific PTP hardware clock initialization steps.
*
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
+ * Return: 0 on success, negative error code otherwise.
*/
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+static int ice_ptp_init_phc_e825(struct ice_hw *hw)
{
ice_sb_access_ena_eth56g(hw, true);
+
/* Initialize the Clock Generation Unit */
return ice_init_cgu_e82x(hw);
}
@@ -2729,10 +2739,7 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u32 phy_rev;
- int err;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
@@ -2742,9 +2749,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw)
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
ice_sb_access_ena_eth56g(hw, true);
- err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G)
- ptp->phy_model = ICE_PHY_UNSUP;
}
/* E822 family functions
@@ -3219,7 +3223,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -4792,7 +4797,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4986,7 +4990,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5049,8 +5054,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5316,68 +5320,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5442,37 +5384,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5538,18 +5449,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
}
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5612,14 +5643,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_init_phy_e825(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5640,11 +5679,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5705,9 +5744,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5778,23 +5819,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5826,20 +5873,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5899,16 +5952,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5932,13 +5988,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5962,13 +6021,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -6025,14 +6084,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -6054,13 +6113,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_phc_e825(hw);
default:
return -EOPNOTSUPP;
}
@@ -6079,17 +6141,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 6779ce120515..e5925ccc2613 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -324,6 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -395,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -431,13 +431,14 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
@@ -650,18 +651,25 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M GENMASK(7, 0)
-#define TS_PHY_HIGH_M GENMASK_ULL(39, 8)
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
@@ -772,36 +780,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -821,7 +812,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 8aabf7749aa5..f1648cf103b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -124,27 +124,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -178,6 +157,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -197,9 +177,6 @@ void ice_free_vfs(struct ice_pf *pf)
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -369,40 +346,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -426,11 +369,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -438,8 +379,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -478,13 +418,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -495,52 +428,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -553,7 +440,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -853,16 +740,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -915,7 +796,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -989,16 +869,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1017,7 +888,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1029,7 +901,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1102,20 +974,15 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
- ice_put_vf(vf);
- return -ENOSPC;
- }
-
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1144,7 +1011,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9c9ea4c1b93b..1e4f6f6ee449 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -1809,6 +1809,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1958,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2424,7 +2449,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
@@ -2439,7 +2466,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 806bce701df3..a4b1e9514632 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,6 +193,7 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
@@ -366,6 +367,7 @@ struct ice_rx_ring {
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..45cfaabc41cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -81,6 +81,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 33a1a5934c0d..0aab21113cc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -871,14 +871,6 @@ union ice_phy_params {
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -888,7 +880,6 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..799b2c1f1184 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -124,6 +124,9 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index ff4ad788d96a..7c3006eb68dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -498,6 +498,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -562,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
{
/* allocated Tx and Rx queues should be always equal for VF VSI */
return qid < vsi->alloc_txq;
@@ -1862,15 +1865,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
for (i = 0; i < qbw->num_queues; i++) {
if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
}
for (i = 0; i < qbw->num_queues; i++) {
@@ -1900,13 +1921,21 @@ err:
*/
static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
struct virtchnl_quanta_cfg *qquanta =
(struct virtchnl_quanta_cfg *)msg;
struct ice_vsi *vsi;
int ret;
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
@@ -1918,8 +1947,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
@@ -1948,7 +1975,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
goto err;
}
- start_qid = qquanta->queue_select.start_queue_id;
for (i = start_qid; i < end_qid; i++)
vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
@@ -1975,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_vsi *vsi;
u8 act_prt, pri_prt;
int i = -1, q_idx;
+ bool ena_ts;
lag = pf->lag;
mutex_lock(&pf->lag_mutex);
@@ -2104,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
rxdid = ICE_RXDID_LEGACY_1;
}
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
}
}
@@ -3031,8 +3063,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3044,7 +3076,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- rxdid.supported_rxdids = pf->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
@@ -4092,6 +4124,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4128,6 +4213,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
/* If you add a new op here please make sure to add it to
* ice_virtchnl_repr_ops as well.
*/
@@ -4264,6 +4351,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4501,6 +4590,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 0c629aef9baf..222990f229d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index c105a82ee136..a3d1579a619a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 14e3f0f89c78..9be4bd717512 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8975d2971bc3..a3a4eaa17739 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -989,7 +990,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..8dc5d55e26c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index a3d6b8f198a8..aa755dedb41d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -814,6 +814,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= dflt_features | offloads;
netdev->hw_enc_features |= dflt_features | offloads;
idpf_set_ethtool_ops(netdev);
+ netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
/* carrier off on init to avoid Tx hangs */
@@ -927,15 +928,19 @@ static int idpf_stop(struct net_device *netdev)
static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ u16 idx = vport->idx;
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
- unregister_netdev(vport->netdev);
- free_netdev(vport->netdev);
+ if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[idx]->flags)) {
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ }
vport->netdev = NULL;
- adapter->netdevs[vport->idx] = NULL;
+ adapter->netdevs[idx] = NULL;
}
/**
@@ -1536,13 +1541,22 @@ void idpf_init_task(struct work_struct *work)
}
for (index = 0; index < adapter->max_vports; index++) {
- if (adapter->netdevs[index] &&
- !test_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags)) {
- register_netdev(adapter->netdevs[index]);
- set_bit(IDPF_VPORT_REG_NETDEV,
- adapter->vport_config[index]->flags);
+ struct net_device *netdev = adapter->netdevs[index];
+ struct idpf_vport_config *vport_config;
+
+ vport_config = adapter->vport_config[index];
+
+ if (!netdev ||
+ test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags))
+ continue;
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n",
+ index, ERR_PTR(err));
+ continue;
}
+ set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
}
/* As all the required vports are created, clear the reset flag
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index dfd7cf1d9aa0..eae1b6f474e6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -595,7 +595,7 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
*/
static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -661,10 +661,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -696,10 +696,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
+ struct libeth_rx_csum csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -798,7 +798,7 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_q->netdev);
@@ -891,6 +891,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -900,20 +901,21 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
*/
static void
idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
u64 qword;
qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
- fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+ fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
}
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
* Decode the Rx descriptor and extract relevant information including the
* size and Rx packet type.
@@ -923,12 +925,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
*/
static void
idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
- fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
- le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
- fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
- le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+ fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
}
/**
@@ -936,17 +938,18 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
* @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
+ * @ptype: pointer that will store packet type
*
*/
static void
idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+ struct libeth_rqe_info *fields, u32 *ptype)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype);
else
- idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype);
}
/**
@@ -966,9 +969,10 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < (unsigned int)budget)) {
- struct idpf_rx_extracted fields = { };
+ struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
+ u32 ptype;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rx_q->rx[ntc];
@@ -989,16 +993,16 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
*/
dma_rmb();
- idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
goto skip_data;
if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.size);
+ idpf_rx_add_frag(rx_buf, skb, fields.len);
else
- skb = idpf_rx_build_skb(rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.len);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -1033,8 +1037,7 @@ skip_data:
total_rx_bytes += skb->len;
/* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb,
- rx_desc, fields.rx_ptype);
+ idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype);
/* send completed skb up the stack */
napi_gro_receive(rx_q->pp->p.napi, skb);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 977741c41498..bdf52cef3891 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2895,7 +2895,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
* skb->protocol must be set before this function is called
*/
static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_csum csum_bits,
struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
@@ -2923,7 +2923,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (csum_bits.raw_csum_inv ||
+ if (!csum_bits.raw_csum_valid ||
decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
@@ -2946,10 +2946,10 @@ checksum_fail:
*
* Return: parsed checksum status.
**/
-static struct idpf_rx_csum_decoded
+static struct libeth_rx_csum
idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum = { };
+ struct libeth_rx_csum csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
@@ -2965,9 +2965,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *
qword1);
csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
qword0);
- csum.raw_csum_inv =
- le16_get_bits(rx_desc->ptype_err_fflags0,
- VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
+ csum.raw_csum_valid =
+ !le16_get_bits(rx_desc->ptype_err_fflags0,
+ VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
return csum;
@@ -3059,7 +3059,7 @@ static int
idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
u16 rx_ptype;
@@ -3552,8 +3552,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3580,8 +3578,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3769,8 +3765,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4182,7 +4176,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx;
+ u16 v_idx, qv_idx;
+ int irq_num;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
@@ -4191,12 +4186,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ qv_idx = vport->q_vector_idxs[v_idx];
+ irq_num = vport->adapter->msix_entries[qv_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4240,9 +4235,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 0f71a6f5557b..b029f566e57c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -213,25 +213,6 @@ enum idpf_tx_ctx_desc_eipt_offload {
IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
};
-/* Checksum offload bits decoded from the receive descriptor. */
-struct idpf_rx_csum_decoded {
- u32 l3l4p : 1;
- u32 ipe : 1;
- u32 eipe : 1;
- u32 eudpe : 1;
- u32 ipv6exadd : 1;
- u32 l4e : 1;
- u32 pprs : 1;
- u32 nat : 1;
- u32 raw_csum_inv : 1;
- u32 raw_csum : 16;
-};
-
-struct idpf_rx_extracted {
- unsigned int size;
- u16 rx_ptype;
-};
-
#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
@@ -401,7 +382,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -438,13 +418,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 120,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -940,7 +919,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index f94570556120..f323e1c1989f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -509,6 +509,12 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
PTP_STRICT_FLAGS))
return -EOPNOTSUPP;
+ /* Both the rising and falling edge are timestamped */
+ if (rq->extts.flags & PTP_STRICT_FLAGS &&
+ (rq->extts.flags & PTP_ENABLE_FEATURE) &&
+ (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b8111ad9a9a8..cd1d7b6c1782 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -579,6 +579,7 @@ struct igc_metadata_request {
struct xsk_tx_metadata *meta;
struct igc_ring *tx_ring;
u32 cmd_type;
+ u16 used_desc;
};
struct igc_q_vector {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 84307bb7313e..491d942cefca 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1092,7 +1092,8 @@ static int igc_init_empty_frame(struct igc_ring *ring,
dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ net_err_ratelimited("%s: DMA mapping error for empty frame\n",
+ netdev_name(ring->netdev));
return -ENOMEM;
}
@@ -1108,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return 0;
}
-static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
- struct sk_buff *skb,
- struct igc_tx_buffer *first)
+static void igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
{
union igc_adv_tx_desc *desc;
u32 cmd_type, olinfo_status;
- int err;
-
- if (!igc_desc_unused(ring))
- return -EBUSY;
-
- err = igc_init_empty_frame(ring, first, skb);
- if (err)
- return err;
cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
@@ -1140,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
ring->next_to_use++;
if (ring->next_to_use == ring->count)
ring->next_to_use = 0;
-
- return 0;
}
#define IGC_EMPTY_FRAME_SIZE 60
@@ -1567,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s
return false;
}
+static int igc_insert_empty_frame(struct igc_ring *tx_ring)
+{
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty_skb;
+ void *data;
+ int ret;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (unlikely(!empty_skb)) {
+ net_err_ratelimited("%s: skb alloc error for empty frame\n",
+ netdev_name(tx_ring->netdev));
+ return -ENOMEM;
+ }
+
+ data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ /* Prepare DMA mapping and Tx buffer information */
+ ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(empty_skb);
+ return ret;
+ }
+
+ /* Prepare advanced context descriptor for empty packet */
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ /* Prepare advanced data descriptor for empty packet */
+ igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info);
+
+ return 0;
+}
+
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
@@ -1586,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
+ * + 2 desc for inserting an empty packet for launch time,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
@@ -1605,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
if (insert_empty) {
- struct igc_tx_buffer *empty_info;
- struct sk_buff *empty;
- void *data;
-
- empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
- if (!empty)
- goto done;
-
- data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
- memset(data, 0, IGC_EMPTY_FRAME_SIZE);
-
- igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
-
- if (igc_init_tx_empty_descriptor(tx_ring,
- empty,
- empty_info) < 0)
- dev_kfree_skb_any(empty);
+ /* Reset the launch time if the required empty frame fails to
+ * be inserted. However, this packet is not dropped, so it
+ * "dirties" the current Qbv cycle. This ensures that the
+ * upcoming packet, which is scheduled in the next Qbv cycle,
+ * does not require an empty frame. This way, the launch time
+ * continues to function correctly despite the current failure
+ * to insert the empty frame.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ launch_time = 0;
}
done:
@@ -1650,7 +1668,8 @@ done:
if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
+ if (skb->sk &&
+ READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC)
tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
} else {
adapter->tx_hwtstamp_skipped++;
@@ -2953,9 +2972,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv)
return *(u64 *)_priv;
}
+static void igc_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ __le32 launch_time_offset;
+ bool insert_empty = false;
+ bool first_flag = false;
+ u16 used_desc = 0;
+
+ if (!tx_ring->launchtime_enable)
+ return;
+
+ launch_time_offset = igc_tx_launchtime(tx_ring,
+ ns_to_ktime(launch_time),
+ &first_flag, &insert_empty);
+ if (insert_empty) {
+ /* Disregard the launch time request if the required empty frame
+ * fails to be inserted.
+ */
+ if (igc_insert_empty_frame(tx_ring))
+ return;
+
+ meta_req->tx_buffer =
+ &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ /* Inserting an empty packet requires two descriptors:
+ * one data descriptor and one context descriptor.
+ */
+ used_desc += 2;
+ }
+
+ /* Use one context descriptor to specify launch time and first flag. */
+ igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0);
+ used_desc += 1;
+
+ /* Update the number of used descriptors in this request */
+ meta_req->used_desc += used_desc;
+}
+
const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
.tmo_request_timestamp = igc_xsk_request_timestamp,
.tmo_fill_timestamp = igc_xsk_fill_timestamp,
+ .tmo_request_launch_time = igc_xsk_request_launch_time,
};
static void igc_xdp_xmit_zc(struct igc_ring *ring)
@@ -2978,7 +3036,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu = ring->next_to_use;
budget = igc_desc_unused(ring);
- while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
+ /* Packets with launch time require one data descriptor and one context
+ * descriptor. When the launch time falls into the next Qbv cycle, we
+ * may need to insert an empty packet, which requires two more
+ * descriptors. Therefore, to be safe, we always ensure we have at least
+ * 4 descriptors available.
+ */
+ while (xsk_tx_peek_desc(pool, &xdp_desc) && budget >= 4) {
struct igc_metadata_request meta_req;
struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
@@ -2999,9 +3063,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
meta_req.tx_ring = ring;
meta_req.tx_buffer = bi;
meta_req.meta = meta;
+ meta_req.used_desc = 0;
xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
&meta_req);
+ /* xsk_tx_metadata_request() may have updated next_to_use */
+ ntu = ring->next_to_use;
+
+ /* xsk_tx_metadata_request() may have updated Tx buffer info */
+ bi = meta_req.tx_buffer;
+
+ /* xsk_tx_metadata_request() may use a few descriptors */
+ budget -= meta_req.used_desc;
+
tx_desc = IGC_TX_DESC(ring, ntu);
tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
@@ -3019,9 +3093,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
ntu++;
if (ntu == ring->count)
ntu = 0;
+
+ ring->next_to_use = ntu;
+ budget--;
}
- ring->next_to_use = ntu;
if (tx_desc) {
igc_flush_tx_descriptors(ring);
xsk_tx_release(pool);
@@ -7090,8 +7166,8 @@ static int igc_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->reset_task, igc_reset_task);
INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
- hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- adapter->hrtimer.function = &igc_qbv_scheduling_timer;
+ hrtimer_setup(&adapter->hrtimer, &igc_qbv_scheduling_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index 13bbd3346e01..c538e6b18aad 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -14,6 +14,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
bool if_running = netif_running(dev);
struct bpf_prog *old_prog;
bool need_update;
+ unsigned int i;
if (dev->mtu > ETH_DATA_LEN) {
/* For now, the driver doesn't support XDP functionality with
@@ -24,8 +25,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
}
need_update = !!adapter->xdp_prog != !!prog;
- if (if_running && need_update)
- igc_close(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igc_disable_rx_ring(adapter->rx_ring[i]);
+ igc_disable_tx_ring(adapter->tx_ring[i]);
+ napi_disable(&adapter->rx_ring[i]->q_vector->napi);
+ }
+ }
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
@@ -36,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
else
xdp_features_clear_redirect_target(dev);
- if (if_running && need_update)
- igc_open(dev);
+ if (if_running && need_update) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ napi_enable(&adapter->rx_ring[i]->q_vector->napi);
+ igc_enable_tx_ring(adapter->tx_ring[i]);
+ igc_enable_rx_ring(adapter->rx_ring[i]);
+ }
+ }
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index da91c582d439..f03925c1f521 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3185,6 +3185,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
break;
case ixgbe_mac_X540:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 866024f2b9ee..07ea1954a276 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -817,30 +817,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
.xdo_dev_state_add = ixgbe_ipsec_add_sa,
.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 467f81239e12..481f917f7ed2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3185,6 +3185,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
case ixgbe_aci_opc_get_link_status:
ixgbe_handle_link_status_event(adapter, &event);
break;
+ case ixgbe_aci_opc_temp_tca_event:
+ e_crit(drv, "%s\n", ixgbe_overheat_msg);
+ ixgbe_down(adapter);
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 9339edbd9082..eef25e11d938 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -140,6 +140,7 @@
* proper mult and shift to convert the cycles into nanoseconds of time.
*/
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
+#define IXGBE_E610_BASE_PERIOD 0x333333333ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
@@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
+ u64 rate, base;
bool neg_adj;
- u64 rate;
u32 inca;
- neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
+ base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD :
+ IXGBE_X550_BASE_PERIOD;
+ neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
@@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -1067,6 +1072,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
fallthrough;
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
cc.read = ixgbe_ptp_read_X550;
break;
case ixgbe_mac_X540:
@@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
+ case ixgbe_mac_e610:
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
/* Reset SYSTIME registers to 0 */
@@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_x550em_a:
+ case ixgbe_mac_e610:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index 8d06ade3c7cd..617e07878e4f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -171,6 +171,9 @@ enum ixgbe_aci_opc {
ixgbe_aci_opc_done_alt_write = 0x0904,
ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+ /* TCA Events */
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
/* debug commands */
ixgbe_aci_opc_debug_dump_internals = 0xFF08,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index f804b35d79c7..8ba037e3d9c2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -428,30 +428,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
}
}
-/**
- * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
- * @skb: current data packet
- * @xs: pointer to transformer state struct
- **/
-static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- if (xs->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl != 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
- .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
};
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 4fe121b9f94b..147571fdada3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
prefetch(data);
xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
- data_len, false);
+ data_len, true);
}
static void
@@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
u8 num_frags;
@@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
@@ -5557,7 +5560,6 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk_bus);
pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
- pp->phylink_pcs.neg_mode = true;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index dd76c1b7ed3a..566c12c89520 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+ u32 rx_status, timestamp, metasize = 0;
struct mvpp2_bm_pool *bm_pool;
struct page_pool *pp = NULL;
struct sk_buff *skb;
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
struct page *page;
void *data;
@@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
xdp_prepare_buff(&xdp, data,
MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
- rx_bytes, false);
+ rx_bytes, true);
ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
@@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
ps.rx_bytes += rx_bytes;
continue;
}
+
+ metasize = xdp.data - xdp.data_meta;
}
if (frag_size)
@@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->ip_summed = mvpp2_rx_csum(port, rx_status);
skb->protocol = eth_type_trans(skb, dev);
@@ -6985,9 +6989,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
- hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED_SOFT);
- port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ hrtimer_setup(&port_pcpu->tx_done_timer, mvpp2_hr_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->timer_scheduled = false;
port_pcpu->dev = dev;
}
@@ -7024,9 +7027,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_gmac.neg_mode = true;
port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
- port->pcs_xlg.neg_mode = true;
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8216f843a7cd..0b27a695008b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM,
+ PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) },
{ 0, } /* end of table */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d39d86e694cc..655dd4726d36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -925,7 +925,6 @@ void rvu_mcs_exit(struct rvu *rvu)
if (!rvu->mcs_intr_wq)
return;
- flush_workqueue(rvu->mcs_intr_wq);
destroy_workqueue(rvu->mcs_intr_wq);
rvu->mcs_intr_wq = NULL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index bcc96eed2481..66749b3649c1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -545,8 +545,7 @@ static int ptp_probe(struct pci_dev *pdev,
spin_lock_init(&ptp->ptp_lock);
if (cn10k_ptp_errata(ptp)) {
ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
- hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ptp->hrtimer.function = ptp_reset_thresh;
+ hrtimer_setup(&ptp->hrtimer, ptp_reset_thresh, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
} else {
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a383b5ef5b2d..60f085b00a8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -30,6 +30,8 @@
#define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+#define PCI_SUBSYS_DEVID_CN20KA 0xC220
+#define PCI_SUBSYS_DEVID_CNF20KA 0xC320
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index cb6513ab35e7..69e0778f9ac1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
- otx2_devlink.o qos_sq.o qos.o
+ otx2_devlink.o qos_sq.o qos.o otx2_xsk.o
rvu_nicvf-y := otx2_vf.o
rvu_rep-y := rep.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a15cc86635d6..c3b6e0f60a79 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
struct otx2_nic *pfvf = dev;
int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
+ struct otx2_pool *pool;
dma_addr_t bufptr;
int num_ptrs = 1;
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
@@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
break;
}
cq->pool_ptrs--;
- ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ ptrs[num_ptrs] = pool->xsk_pool ?
+ (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM;
+
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index 09a5b5268205..fc59e50bafce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -744,24 +744,9 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x)
queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
}
-static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
- return true;
-}
-
static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = cn10k_ipsec_add_state,
.xdo_dev_state_delete = cn10k_ipsec_del_state,
- .xdo_dev_offload_ok = cn10k_ipsec_offload_ok,
};
static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 2b49bfec7869..84cd029a85aa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -17,6 +17,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf)
{
@@ -330,6 +331,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore the queue if AF_XDP zero copy is enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
+
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
if (!aq) {
/* The shared memory buffer can be full.
@@ -549,10 +554,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
u8 *buf;
+ if (pool->xsk_pool)
+ return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx);
+
if (pool->page_pool)
return otx2_alloc_pool_buf(pfvf, pool, dma);
@@ -571,12 +579,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
}
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma)
+ dma_addr_t *dma, int qidx, int idx)
{
int ret;
local_bh_disable();
- ret = __otx2_alloc_rbuf(pfvf, pool, dma);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx);
local_bh_enable();
return ret;
}
@@ -584,7 +592,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma,
+ cq->cq_idx, cq->pool_ptrs - 1)))
return -ENOMEM;
return 0;
}
@@ -884,7 +893,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct nix_aq_enq_req *aq;
@@ -1028,6 +1037,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
+ /* Attach XSK_BUFF_POOL to XDP queue */
+ if (qidx > pfvf->hw.xdp_queues)
+ otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues));
+
chan_offset = qidx % pfvf->hw.tx_chan_cnt;
err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura);
@@ -1041,12 +1054,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
-static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
@@ -1055,8 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- if (pfvf->xdp_prog)
+ if (pfvf->xdp_prog) {
xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ pool = &qset->pool[qidx];
+ if (pool->xsk_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq);
+ } else if (pool->page_pool) {
+ xdp_rxq_info_reg_mem_model(&cq->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ pool->page_pool);
+ }
+ }
} else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
@@ -1275,9 +1301,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_head_page(phys_to_virt(pa));
-
if (pool->page_pool) {
page_pool_put_full_page(pool->page_pool, page, true);
+ } else if (pool->xsk_pool) {
+ /* Note: No way of identifying xdp_buff */
} else {
dma_unmap_page_attrs(pfvf->dev, iova, size,
DMA_FROM_DEVICE,
@@ -1292,6 +1319,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
int pool_id, pool_start = 0, pool_end = 0, size = 0;
struct otx2_pool *pool;
u64 iova;
+ int idx;
if (type == AURA_NIX_SQ) {
pool_start = otx2_get_pool_idx(pfvf, type, 0);
@@ -1306,16 +1334,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
/* Free SQB and RQB pointers from the aura pool */
for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
- iova = otx2_aura_allocptr(pfvf, pool_id);
pool = &pfvf->qset.pool[pool_id];
+ iova = otx2_aura_allocptr(pfvf, pool_id);
while (iova) {
if (type == AURA_NIX_RQ)
iova -= OTX2_HEAD_ROOM;
-
otx2_free_bufs(pfvf, pool, iova, size);
-
iova = otx2_aura_allocptr(pfvf, pool_id);
}
+
+ for (idx = 0 ; idx < pool->xdp_cnt; idx++) {
+ if (!pool->xdp[idx])
+ continue;
+
+ xsk_buff_free(pool->xdp[idx]);
+ }
}
}
@@ -1332,7 +1365,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf)
qmem_free(pfvf->dev, pool->stack);
qmem_free(pfvf->dev, pool->fc_addr);
page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
+ devm_kfree(pfvf->dev, pool->xdp);
+ pool->xsk_pool = NULL;
}
devm_kfree(pfvf->dev, pfvf->qset.pool);
pfvf->qset.pool = NULL;
@@ -1419,6 +1453,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
int stack_pages, int numptrs, int buf_size, int type)
{
struct page_pool_params pp_params = { 0 };
+ struct xsk_buff_pool *xsk_pool;
struct npa_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -1462,21 +1497,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
aq->ctype = NPA_AQ_CTYPE_POOL;
aq->op = NPA_AQ_INSTOP_INIT;
- if (type != AURA_NIX_RQ) {
- pool->page_pool = NULL;
+ if (type != AURA_NIX_RQ)
+ return 0;
+
+ if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) {
+ pp_params.order = get_order(buf_size);
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dev = pfvf->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pool->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool->page_pool)) {
+ netdev_err(pfvf->netdev, "Creation of page pool failed\n");
+ return PTR_ERR(pool->page_pool);
+ }
return 0;
}
- pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
- pp_params.nid = NUMA_NO_NODE;
- pp_params.dev = pfvf->dev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
- pool->page_pool = page_pool_create(&pp_params);
- if (IS_ERR(pool->page_pool)) {
- netdev_err(pfvf->netdev, "Creation of page pool failed\n");
- return PTR_ERR(pool->page_pool);
+ /* Set XSK pool to support AF_XDP zero-copy */
+ xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id);
+ if (xsk_pool) {
+ pool->xsk_pool = xsk_pool;
+ pool->xdp_cnt = numptrs;
+ pool->xdp = devm_kcalloc(pfvf->dev,
+ numptrs, sizeof(struct xdp_buff *), GFP_KERNEL);
+ if (IS_ERR(pool->xdp)) {
+ netdev_err(pfvf->netdev, "Creation of xsk pool failed\n");
+ return PTR_ERR(pool->xdp);
+ }
}
return 0;
@@ -1537,9 +1586,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ ptr--;
+ while (ptr >= 0) {
+ xsk_buff_free(pool->xdp[ptr]);
+ ptr--;
+ }
+ }
goto err_mem;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
@@ -1589,11 +1647,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
+
for (ptr = 0; ptr < num_ptrs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
- if (err)
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
+ if (err) {
+ if (pool->xsk_pool) {
+ while (ptr)
+ xsk_buff_free(pool->xdp[--ptr]);
+ }
return -ENOMEM;
+ }
+
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ pool->xsk_pool ? bufptr :
bufptr + OTX2_HEAD_ROOM);
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 65814e3dc93f..1e88422825be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -21,6 +21,7 @@
#include <linux/time64.h>
#include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
+#include <net/page_pool/helpers.h>
#include <mbox.h>
#include <npc.h>
@@ -128,6 +129,12 @@ enum otx2_errcodes_re {
ERRCODE_IL4_CSUM = 0x22,
};
+enum otx2_xdp_action {
+ OTX2_XDP_TX = BIT(0),
+ OTX2_XDP_REDIRECT = BIT(1),
+ OTX2_AF_XDP_FRAME = BIT(2),
+};
+
struct otx2_dev_stats {
u64 rx_bytes;
u64 rx_frames;
@@ -531,6 +538,8 @@ struct otx2_nic {
/* Inline ipsec */
struct cn10k_ipsec ipsec;
+ /* af_xdp zero-copy */
+ unsigned long *af_xdp_zc_qidx;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -1002,7 +1011,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
void otx2_free_pending_sqe(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
- dma_addr_t *dma);
+ dma_addr_t *dma, int qidx, int idx);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
@@ -1032,6 +1041,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf);
void otx2_disable_mbox_intr(struct otx2_nic *pf);
void otx2_disable_napi(struct otx2_nic *pf);
irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq);
+int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura);
+int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
@@ -1094,7 +1105,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
int otx2_handle_ntuple_tc_features(struct net_device *netdev,
netdev_features_t features);
@@ -1175,4 +1187,5 @@ static inline int mcam_entry_cmp(const void *a, const void *b)
dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
struct sk_buff *skb, int seg, int *len);
void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg);
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 2d53dc77ef1e..010385b29988 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -910,8 +910,12 @@ static int otx2_get_rxfh(struct net_device *dev,
return -ENOENT;
if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ /* Ignore if the rx queue is AF_XDP zero copy enabled */
+ if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx))
+ continue;
indir[idx] = rss_ctx->ind_tbl[idx];
+ }
}
if (rxfh->key)
memcpy(rxfh->key, rss->key, sizeof(rss->key));
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e1dde93e8af8..cfed9ec5b157 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -27,6 +27,7 @@
#include "qos.h"
#include <rvu_trace.h>
#include "cn10k_ipsec.h"
+#include "otx2_xsk.h"
#define DRV_NAME "rvu_nicpf"
#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
@@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
- struct otx2_pool *pool;
struct msg_req *req;
- int pool_id;
int qidx;
/* Ensure all SQE are processed */
@@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf)
/* Free RQ buffer pointers*/
otx2_free_aura_ptr(pf, AURA_NIX_RQ);
- for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
- pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
- pool = &pf->qset.pool[pool_id];
- page_pool_destroy(pool->page_pool);
- pool->page_pool = NULL;
- }
-
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
@@ -2691,7 +2683,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
int qidx)
{
- struct page *page;
u64 dma_addr;
int err = 0;
@@ -2701,11 +2692,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
if (dma_mapping_error(pf->dev, dma_addr))
return -ENOMEM;
- err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len,
+ qidx, OTX2_XDP_REDIRECT);
if (!err) {
otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
- page = virt_to_page(xdpf->data);
- put_page(page);
+ xdp_return_frame(xdpf);
return -ENOMEM;
}
return 0;
@@ -2789,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return otx2_xdp_setup(pf, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2866,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_bpf = otx2_xdp,
+ .ndo_xsk_wakeup = otx2_xsk_wakeup,
.ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
@@ -3204,16 +3198,28 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
+ pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!pf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_sriov_cleannup;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_pf_sriov_init;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(pf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(pf->af_xdp_zc_qidx);
+#endif
+err_sriov_cleannup:
+ otx2_sriov_vfcfg_cleanup(pf);
err_pf_sriov_init:
otx2_shutdown_tc(pf);
err_mcam_flow_del:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 224cef938927..af8cabe828d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/xdp.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -19,6 +20,7 @@
#include "otx2_txrx.h"
#include "otx2_ptp.h"
#include "cn10k.h"
+#include "otx2_xsk.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
@@ -29,11 +31,17 @@
DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled);
+static int otx2_get_free_sqe(struct otx2_snd_queue *sq)
+{
+ return (sq->cons_head - sq->head - 1 + sq->sqe_cnt)
+ & (sq->sqe_cnt - 1);
+}
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush);
+ u32 *metasize, bool *need_xdp_flush);
static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq,
struct sk_buff *skb)
@@ -96,20 +104,22 @@ static unsigned int frag_num(unsigned int i)
static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_snd_queue *sq,
- struct nix_cqe_tx_s *cqe)
+ struct nix_cqe_tx_s *cqe,
+ int *xsk_frames)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
struct sg_list *sg;
- struct page *page;
- u64 pa;
sg = &sq->sg[snd_comp->sqe_id];
+ if (sg->flags & OTX2_AF_XDP_FRAME) {
+ (*xsk_frames)++;
+ return;
+ }
- pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
- otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
- sg->size[0], DMA_TO_DEVICE);
- page = virt_to_page(phys_to_virt(pa));
- put_page(page);
+ if (sg->flags & OTX2_XDP_REDIRECT)
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE);
+ xdp_return_frame((struct xdp_frame *)sg->skb);
+ sg->skb = (u64)NULL;
}
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
@@ -326,6 +336,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
void *end, *start;
+ u32 metasize = 0;
u64 *seg_addr;
u16 *seg_size;
int seg;
@@ -336,7 +347,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq,
+ &metasize, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -368,6 +380,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
skb->mark = parse->match_id;
skb_mark_for_recycle(skb);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
napi_gro_frags(napi);
}
@@ -431,6 +445,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
return cnt - cq->pool_ptrs;
}
+static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool,
+ int *xsk_frames, int qidx, int budget)
+{
+ if (*xsk_frames)
+ xsk_tx_completed(xsk_pool, *xsk_frames);
+
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
+
+ otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget);
+}
+
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
@@ -439,16 +465,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_tx_s *cqe;
struct net_device *ndev;
int processed_cqe = 0;
+ int xsk_frames = 0;
+
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
if (cq->pend_cqe >= budget)
goto process_cqe;
- if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) {
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames,
+ qidx, budget);
return 0;
+ }
process_cqe:
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- sq = &pfvf->qset.sq[qidx];
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
@@ -458,10 +490,8 @@ process_cqe:
break;
}
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
-
if (cq->cq_type == CQ_XDP)
- otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames);
else
otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx],
cqe, budget, &tx_pkts, &tx_bytes);
@@ -502,6 +532,10 @@ process_cqe:
netif_carrier_ok(ndev))
netif_tx_wake_queue(txq);
}
+
+ if (sq->xsk_pool)
+ otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget);
+
return 0;
}
@@ -527,9 +561,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
+ struct otx2_cq_queue *cq = NULL;
+ struct otx2_pool *pool = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
- struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
int filled_cnt = -1;
@@ -554,6 +589,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (rx_cq && rx_cq->pool_ptrs)
filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -566,20 +602,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED)
otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ if (likely(cq))
+ pool = &pfvf->qset.pool[cq->cq_idx];
+
if (unlikely(!filled_cnt)) {
struct refill_work *work;
struct delayed_work *dwork;
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- work->napi = napi;
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
+ if (likely(cq)) {
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ /* Call wake-up for not able to fill buffers */
+ if (pool->xsk_pool)
+ xsk_set_rx_need_wakeup(pool->xsk_pool);
}
} else {
+ /* Clear wake-up, since buffers are filled successfully */
+ if (pool && pool->xsk_pool)
+ xsk_clear_rx_need_wakeup(pool->xsk_pool);
/* Re-enable interrupts */
otx2_write64(pfvf,
NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
@@ -1147,7 +1194,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq,
/* Check if there is enough room between producer
* and consumer index.
*/
- free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ free_desc = otx2_get_free_sqe(sq);
if (free_desc < sq->sqe_thresh)
return false;
@@ -1230,15 +1277,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
u16 pool_id;
u64 iova;
- if (pfvf->xdp_prog)
+ pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
+ pool = &pfvf->qset.pool[pool_id];
+
+ if (pfvf->xdp_prog) {
+ if (pool->page_pool)
+ xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq);
+
xdp_rxq_info_unreg(&cq->xdp_rxq);
+ }
if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
return;
- pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
- pool = &pfvf->qset.pool[pool_id];
-
while (cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
@@ -1359,8 +1410,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf)
}
}
-static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
- int len, int *offset)
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq,
+ struct xdp_frame *xdpf,
+ u64 dma_addr, int len, int *offset, u16 flags)
{
struct nix_sqe_sg_s *sg = NULL;
u64 *iova = NULL;
@@ -1377,16 +1429,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
sq->sg[sq->head].dma_addr[0] = dma_addr;
sq->sg[sq->head].size[0] = len;
sq->sg[sq->head].num_segs = 1;
+ sq->sg[sq->head].flags = flags;
+ sq->sg[sq->head].skb = (u64)xdpf;
}
-bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx)
+{
+ struct otx2_snd_queue *sq;
+ int free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = otx2_get_free_sqe(sq);
+ if (free_sqe < sq->sqe_thresh) {
+ netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx);
+ return 0;
+ }
+
+ return free_sqe - sq->sqe_thresh;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf,
+ u64 iova, int len, u16 qidx, u16 flags)
{
struct nix_sqe_hdr_s *sqe_hdr;
struct otx2_snd_queue *sq;
int offset, free_sqe;
sq = &pfvf->qset.sq[qidx];
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_sqe = otx2_get_free_sqe(sq);
if (free_sqe < sq->sqe_thresh)
return false;
@@ -1405,7 +1475,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
offset = sizeof(*sqe_hdr);
- otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags);
sqe_hdr->sizem1 = (offset / 16) - 1;
pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
@@ -1416,16 +1486,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
struct otx2_cq_queue *cq,
- bool *need_xdp_flush)
+ u32 *metasize, bool *need_xdp_flush)
{
+ struct xdp_buff xdp, *xsk_buff = NULL;
unsigned char *hard_start;
+ struct otx2_pool *pool;
+ struct xdp_frame *xdpf;
int qidx = cq->cq_idx;
- struct xdp_buff xdp;
struct page *page;
u64 iova, pa;
u32 act;
int err;
+ pool = &pfvf->qset.pool[qidx];
+
+ if (pool->xsk_pool) {
+ xsk_buff = pool->xdp[--cq->rbpool->xdp_top];
+ if (!xsk_buff)
+ return false;
+
+ xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size;
+ act = bpf_prog_run_xdp(prog, xsk_buff);
+ goto handle_xdp_verdict;
+ }
+
iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
page = virt_to_page(phys_to_virt(pa));
@@ -1434,41 +1518,64 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
hard_start = (unsigned char *)phys_to_virt(pa);
xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM,
- cqe->sg.seg_size, false);
+ cqe->sg.seg_size, true);
act = bpf_prog_run_xdp(prog, &xdp);
+handle_xdp_verdict:
switch (act) {
case XDP_PASS:
+ *metasize = xdp.data - xdp.data_meta;
break;
case XDP_TX:
qidx += pfvf->hw.tx_queues;
cq->pool_ptrs++;
- return otx2_xdp_sq_append_pkt(pfvf, iova,
- cqe->sg.seg_size, qidx);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ return otx2_xdp_sq_append_pkt(pfvf, xdpf,
+ cqe->sg.seg_addr,
+ cqe->sg.seg_size,
+ qidx, OTX2_XDP_TX);
case XDP_REDIRECT:
cq->pool_ptrs++;
- err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+ if (xsk_buff) {
+ err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog);
+ if (!err) {
+ *need_xdp_flush = true;
+ return true;
+ }
+ return false;
+ }
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
if (!err) {
*need_xdp_flush = true;
return true;
}
- put_page(page);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ xdp_return_frame(xdpf);
break;
default:
bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
+ if (xsk_buff)
+ xsk_buff_free(xsk_buff);
trace_xdp_exception(pfvf->netdev, prog, act);
break;
case XDP_DROP:
- otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
- DMA_FROM_DEVICE);
- put_page(page);
cq->pool_ptrs++;
+ if (xsk_buff) {
+ xsk_buff_free(xsk_buff);
+ } else if (page->pp) {
+ page_pool_recycle_direct(pool->page_pool, page);
+ } else {
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
return true;
}
return false;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index d23810963fdb..acf259d72008 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -12,6 +12,7 @@
#include <linux/iommu.h>
#include <linux/if_vlan.h>
#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -76,6 +77,7 @@ struct otx2_rcv_queue {
struct sg_list {
u16 num_segs;
+ u16 flags;
u64 skb;
u64 size[OTX2_MAX_FRAGS_IN_SQE];
u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE];
@@ -104,6 +106,8 @@ struct otx2_snd_queue {
/* SQE ring and CPT response queue for Inline IPSEC */
struct qmem *sqe_ring;
struct qmem *cpt_resp;
+ /* Buffer pool for af_xdp zero-copy */
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_aligned_in_smp;
enum cq_type {
@@ -127,7 +131,11 @@ struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
struct page_pool *page_pool;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp;
+ u16 xdp_cnt;
u16 rbsize;
+ u16 xdp_top;
};
struct otx2_cq_queue {
@@ -144,6 +152,7 @@ struct otx2_cq_queue {
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ bool xsk_zc_en;
struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index e926c6ce96cf..7ef3ba477d49 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -722,15 +722,27 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_shutdown_tc;
+ vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL);
+ if (!vf->af_xdp_zc_qidx) {
+ err = -ENOMEM;
+ goto err_unreg_devlink;
+ }
+
#ifdef CONFIG_DCB
err = otx2_dcbnl_set_ops(netdev);
if (err)
- goto err_shutdown_tc;
+ goto err_free_zc_bmap;
#endif
otx2_qos_init(vf, qos_txqs);
return 0;
+#ifdef CONFIG_DCB
+err_free_zc_bmap:
+ bitmap_free(vf->af_xdp_zc_qidx);
+#endif
+err_unreg_devlink:
+ otx2_unregister_dl(vf);
err_shutdown_tc:
otx2_shutdown_tc(vf);
err_unreg_netdev:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
new file mode 100644
index 000000000000..ce10caea8511
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#include <linux/bpf_trace.h>
+#include <linux/stringify.h>
+#include <net/xdp_sock_drv.h>
+#include <net/xdp.h>
+
+#include "otx2_common.h"
+#include "otx2_xsk.h"
+
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx)
+{
+ struct xdp_buff *xdp;
+ int delta;
+
+ xdp = xsk_buff_alloc(pool->xsk_pool);
+ if (!xdp)
+ return -ENOMEM;
+
+ pool->xdp[pool->xdp_top++] = xdp;
+ *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
+ /* Adjust xdp->data for unaligned addresses */
+ delta = *dma - xsk_buff_xdp_get_dma(xdp);
+ xdp->data += delta;
+
+ return 0;
+}
+
+static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
+ struct npa_aq_enq_req *aura_aq;
+ struct npa_aq_enq_req *pool_aq;
+ struct nix_aq_enq_req *rq_aq;
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_rq_aq)
+ return -ENOMEM;
+ cn10k_rq_aq->qidx = qidx;
+ cn10k_rq_aq->rq.ena = 0;
+ cn10k_rq_aq->rq_mask.ena = 1;
+ cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ } else {
+ rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!rq_aq)
+ return -ENOMEM;
+ rq_aq->qidx = qidx;
+ rq_aq->sq.ena = 0;
+ rq_aq->sq_mask.ena = 1;
+ rq_aq->ctype = NIX_AQ_CTYPE_RQ;
+ rq_aq->op = NIX_AQ_INSTOP_WRITE;
+ }
+
+ aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!aura_aq)
+ goto fail;
+
+ aura_aq->aura_id = aura_id;
+ aura_aq->aura.ena = 0;
+ aura_aq->aura_mask.ena = 1;
+ aura_aq->ctype = NPA_AQ_CTYPE_AURA;
+ aura_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!pool_aq)
+ goto fail;
+
+ pool_aq->aura_id = aura_id;
+ pool_aq->pool.ena = 0;
+ pool_aq->pool_mask.ena = 1;
+
+ pool_aq->ctype = NPA_AQ_CTYPE_POOL;
+ pool_aq->op = NPA_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+
+fail:
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ return -ENOMEM;
+}
+
+static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
+{
+ struct otx2_qset *qset = &pfvf->qset;
+ struct otx2_cq_queue *cq;
+ struct otx2_pool *pool;
+ u64 iova;
+
+ /* If the DOWN flag is set SQs are already freed */
+ if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
+ return;
+
+ cq = &qset->cq[qidx];
+ if (cq)
+ otx2_cleanup_rx_cqes(pfvf, cq, qidx);
+
+ pool = &pfvf->qset.pool[qidx];
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ while (iova) {
+ iova -= OTX2_HEAD_ROOM;
+ otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
+ iova = otx2_aura_allocptr(pfvf, qidx);
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ otx2_xsk_ctx_disable(pfvf, qidx, qidx);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ u16 rx_queues = pf->hw.rx_queues;
+ u16 tx_queues = pf->hw.tx_queues;
+ int err;
+
+ if (qidx >= rx_queues || qidx >= tx_queues)
+ return -EINVAL;
+
+ err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ set_bit(qidx, pf->af_xdp_zc_qidx);
+ otx2_clean_up_rq(pf, qidx);
+ /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+ /* Kick start the NAPI context so that receiving will start */
+ return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
+}
+
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
+{
+ struct net_device *netdev = pf->netdev;
+ struct xsk_buff_pool *pool;
+ struct otx2_snd_queue *sq;
+
+ pool = xsk_get_pool_from_qid(netdev, qidx);
+ if (!pool)
+ return -EINVAL;
+
+ sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
+ sq->xsk_pool = NULL;
+ otx2_clean_up_rq(pf, qidx);
+ clear_bit(qidx, pf->af_xdp_zc_qidx);
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
+ otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
+
+ return 0;
+}
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
+{
+ if (pool)
+ return otx2_xsk_pool_enable(pf, pool, qidx);
+
+ return otx2_xsk_pool_disable(pf, qidx);
+}
+
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(dev);
+ struct otx2_cq_poll *cq_poll = NULL;
+ struct otx2_qset *qset = &pf->qset;
+
+ if (pf->flags & OTX2_FLAG_INTF_DOWN)
+ return -ENETDOWN;
+
+ if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
+ return -EINVAL;
+
+ cq_poll = &qset->napi[queue_id];
+ if (!cq_poll)
+ return -EINVAL;
+
+ /* Trigger interrupt */
+ if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
+ otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
+ }
+
+ return 0;
+}
+
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
+{
+ if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
+ sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
+}
+
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget)
+{
+ struct xdp_desc *xdp_desc = pool->tx_descs;
+ int err, i, work_done = 0, batch;
+
+ budget = min(budget, otx2_read_free_sqe(pfvf, queue));
+ batch = xsk_tx_peek_release_desc_batch(pool, budget);
+ if (!batch)
+ return;
+
+ for (i = 0; i < batch; i++) {
+ dma_addr_t dma_addr;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
+ err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
+ queue, OTX2_AF_XDP_FRAME);
+ if (!err) {
+ netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
+ break;
+ }
+ work_done++;
+ }
+
+ if (work_done)
+ xsk_tx_release(pool);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
new file mode 100644
index 000000000000..8047fafee8fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU PF/VF Netdev Devlink
+ *
+ * Copyright (C) 2024 Marvell.
+ *
+ */
+
+#ifndef OTX2_XSK_H
+#define OTX2_XSK_H
+
+struct otx2_nic;
+struct xsk_buff_pool;
+
+int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid);
+int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid);
+int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma, int idx);
+int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
+ int queue, int budget);
+void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx);
+
+#endif /* OTX2_XSK_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
index 9d887bfc3108..c5dbae0e513b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
@@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
}
for (ptr = 0; ptr < num_sqbs; ptr++) {
- err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
if (err)
goto sqb_free;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 440a4c42b405..71ffb55d1fc4 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -396,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port)
continue;
port->phylink_pcs.ops = &prestera_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phy_config.dev = &port->dev->dev;
port->phy_config.type = PHYLINK_NETDEV;
@@ -635,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_dl_port_register;
dev->features |= NETIF_F_HW_TC;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
SET_NETDEV_DEV(dev, sw->dev->dev);
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 95c4405b7d7b..7bfd3f230ff5 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK
if NET_VENDOR_MEDIATEK
-config NET_AIROHA
- tristate "Airoha SoC Gigabit Ethernet support"
- depends on NET_DSA || !NET_DSA
- select PAGE_POOL
- help
- This driver supports the gigabit ethernet MACs in the
- Airoha SoC family.
-
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index ddbb7f4a516c..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
-obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 53485142938c..43197b28b3e7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -815,12 +815,60 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id));
+}
+
+static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 val;
+
+ /* Tx idle timer in ms */
+ timer = DIV_ROUND_UP(timer, 1000);
+
+ /* If the timer is zero, then set LPI_MODE, which allows the
+ * system to enter LPI mode immediately rather than waiting for
+ * the LPI threshold.
+ */
+ if (!timer)
+ val = MAC_EEE_LPI_MODE;
+ else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer))
+ val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer);
+ else
+ val = MAC_EEE_LPI_TXIDLE_THD;
+
+ if (tx_clk_stop)
+ val |= MAC_EEE_CKG_TXIDLE;
+
+ /* PHY Wake-up time, this field does not have a reset value, so use the
+ * reset value from MT7531 (36us for 100M and 17us for 1000M).
+ */
+ val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) |
+ FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36);
+
+ mtk_w32(eth, val, MTK_MAC_EEECR(mac->id));
+ mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
+ .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi,
};
static int mtk_mdio_init(struct mtk_eth *eth)
@@ -830,17 +878,12 @@ static int mtk_mdio_init(struct mtk_eth *eth)
int ret;
u32 val;
- mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
- if (!of_device_is_available(mii_np)) {
- ret = -ENODEV;
- goto err_put_node;
- }
-
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
@@ -2079,7 +2122,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
- u32 ret;
+ u32 ret, metasize;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
@@ -2095,7 +2138,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
- false);
+ true);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
@@ -2115,6 +2158,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
@@ -4474,6 +4520,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return phylink_ethtool_set_pauseparam(mac->phylink, pause);
}
+static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(mac->phylink, eee);
+}
+
+static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(mac->phylink, eee);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4506,6 +4566,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
+ .get_eee = mtk_get_eee,
+ .set_eee = mtk_set_eee,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4615,6 +4677,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+ mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD |
+ MAC_2500FD;
+ mac->phylink_config.lpi_timer_default = 1000;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 0d5225f1d3ee..90a377ab4359 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -453,6 +453,8 @@
#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_EEE1G BIT(7)
+#define MAC_MCR_EEE100M BIT(6)
#define MAC_MCR_FORCE_RX_FC BIT(5)
#define MAC_MCR_FORCE_TX_FC BIT(4)
#define MAC_MCR_SPEED_1000 BIT(3)
@@ -461,6 +463,15 @@
#define MAC_MCR_FORCE_LINK BIT(0)
#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+/* Mac EEE control registers */
+#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100))
+#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24)
+#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16)
+#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8)
+#define MAC_EEE_CKG_TXIDLE BIT(3)
+#define MAC_EEE_CKG_RXLPI BIT(2)
+#define MAC_EEE_LPI_MODE BIT(0)
+
/* Mac status registers */
#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
#define MAC_MSR_EEE1G BIT(7)
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index f20bb390df3a..c855fb799ce1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -34,8 +34,10 @@ struct mtk_flow_data {
u16 vlan_in;
struct {
- u16 id;
- __be16 proto;
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlans[2];
u8 num;
} vlan;
struct {
@@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
- if (data.vlan.num == 1 ||
+ if (data.vlan.num + data.pppoe.num == 2 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- data.vlan.id = act->vlan.vid;
- data.vlan.proto = act->vlan.proto;
+ data.vlan.vlans[data.vlan.num].id = act->vlan.vid;
+ data.vlan.vlans[data.vlan.num].proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
- if (data.pppoe.num == 1)
+ if (data.pppoe.num == 1 ||
+ data.vlan.num == 2)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
@@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
- if (data.vlan.num == 1) {
- if (data.vlan.proto != htons(ETH_P_8021Q))
- return -EOPNOTSUPP;
+ for (i = 0; i < data.vlan.num; i++)
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id);
- mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
- }
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 25989c79c92e..76f202d7f055 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1427,15 +1427,10 @@ static int mtk_star_mdio_init(struct net_device *ndev)
of_node = dev->of_node;
- mdio_node = of_get_child_by_name(of_node, "mdio");
+ mdio_node = of_get_available_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
- if (!of_device_is_available(mdio_node)) {
- ret = -ENODEV;
- goto out_put_node;
- }
-
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 598df63518c5..07b061a97a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -660,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
- struct mlx4_db *db, int order)
+ struct mlx4_db *db, unsigned int order)
{
- int o;
+ unsigned int o;
int i;
for (o = order; o <= 1; ++o) {
@@ -690,7 +690,7 @@ found:
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 15c57e9517e9..b33285d755b9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -48,60 +48,43 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
-static int mlx4_alloc_page(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_alloc *frag,
- gfp_t gfp)
-{
- struct page *page;
- dma_addr_t dma;
-
- page = alloc_page(gfp);
- if (unlikely(!page))
- return -ENOMEM;
- dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
- if (unlikely(dma_mapping_error(priv->ddev, dma))) {
- __free_page(page);
- return -ENOMEM;
- }
- frag->page = page;
- frag->dma = dma;
- frag->page_offset = priv->rx_headroom;
- return 0;
-}
-
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
+ dma_addr_t dma;
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp)) {
+ frags->page = page_pool_alloc_pages(ring->pp, gfp);
+ if (!frags->page) {
ring->alloc_fail++;
return -ENOMEM;
}
+ page_pool_fragment_page(frags->page, 1);
+ frags->page_offset = priv->rx_headroom;
+
ring->rx_alloc_pages++;
}
- rx_desc->data[i].addr = cpu_to_be64(frags->dma +
- frags->page_offset);
+ dma = page_pool_get_dma_addr(frags->page);
+ rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frag)
{
- if (frag->page) {
- dma_unmap_page(priv->ddev, frag->dma,
- PAGE_SIZE, priv->dma_dir);
- __free_page(frag->page);
- }
+ if (frag->page)
+ page_pool_put_full_page(ring->pp, frag->page, false);
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
@@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
- if (likely(ring->page_cache.index > 0)) {
- /* XDP uses a single page per frame */
- if (!frags->page) {
- ring->page_cache.index--;
- frags->page = ring->page_cache.buf[ring->page_cache.index].page;
- frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
- }
- frags->page_offset = XDP_PACKET_HEADROOM;
- rx_desc->data[0].addr = cpu_to_be64(frags->dma +
- XDP_PACKET_HEADROOM);
- return 0;
- }
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
@@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- mlx4_en_free_frag(priv, frags + nr);
+ mlx4_en_free_frag(priv, ring, frags + nr);
}
}
@@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
+ struct page_pool_params pp = {};
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
@@ -286,9 +258,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
- if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ pp.flags = PP_FLAG_DMA_MAP;
+ pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE);
+ pp.nid = node;
+ pp.napi = &priv->rx_cq[queue_index]->napi;
+ pp.netdev = priv->dev;
+ pp.dev = &mdev->dev->persist->pdev->dev;
+ pp.dma_dir = priv->dma_dir;
+
+ ring->pp = page_pool_create(&pp);
+ if (!ring->pp)
goto err_ring;
+ if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
+ goto err_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ ring->pp);
+ if (err)
+ goto err_xdp_info;
+
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
@@ -319,6 +308,8 @@ err_info:
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
+err_pp:
+ page_pool_destroy(ring->pp);
err_ring:
kfree(ring);
*pring = NULL;
@@ -409,26 +400,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
-/* When the rx ring is running in page-per-packet mode, a released frame can go
- * directly into a small cache, to avoid unmapping or touching the page
- * allocator. In bpf prog performance scenarios, buffers are either forwarded
- * or dropped, never converted to skbs, so every page can come directly from
- * this cache when it is sized to be a multiple of the napi budget.
- */
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame)
-{
- struct mlx4_en_page_cache *cache = &ring->page_cache;
-
- if (cache->index >= MLX4_EN_CACHE_SIZE)
- return false;
-
- cache->buf[cache->index].page = frame->page;
- cache->buf[cache->index].dma = frame->dma;
- cache->index++;
- return true;
-}
-
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
@@ -445,6 +416,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
+ page_pool_destroy(ring->pp);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
@@ -453,14 +425,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
- int i;
-
- for (i = 0; i < ring->page_cache.index; i++) {
- dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(ring->page_cache.buf[i].page);
- }
- ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -487,7 +451,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (unlikely(!page))
goto fail;
- dma = frags->dma;
+ dma = page_pool_get_dma_addr(page);
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
@@ -498,6 +462,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
if (frag_info->frag_stride == PAGE_SIZE / 2) {
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
+ atomic_long_read(&page->pp_ref_count) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
@@ -511,10 +476,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
- dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
- page_ref_inc(page);
+ page_pool_ref_page(page);
}
nr++;
@@ -784,7 +748,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
@@ -823,7 +788,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
void *orig_data;
u32 act;
- dma = frags[0].dma + frags[0].page_offset;
+ dma = page_pool_get_dma_addr(frags[0].page);
+ dma += frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
@@ -886,6 +852,7 @@ xdp_drop_no_cnt:
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
+ skb_mark_for_recycle(skb);
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1ddb11cb25f9..87f35bcbeff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,6 +44,7 @@
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
#include "mlx4_en.h"
@@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
- struct mlx4_en_rx_alloc frame = {
- .page = tx_info->page,
- .dma = tx_info->map0_dma,
- };
-
- if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
- dma_unmap_page(priv->ddev, tx_info->map0_dma,
- PAGE_SIZE, priv->dma_dir);
- put_page(tx_info->page);
- }
+ struct page_pool *pool = ring->recycle_ring->pp;
+
+ /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */
+ page_pool_put_full_page(pool, tx_info->page, !!napi_mode);
return tx_info->nr_txbb;
}
@@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev,
if (unlikely(!priv->port_up))
return 0;
+ if (unlikely(!napi_budget) && cq->type == TX_XDP)
+ return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
@@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
- dma = frame->dma;
+ dma = page_pool_get_dma_addr(frame->page);
tx_info->page = frame->page;
frame->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 28b70dcc652e..ad0d91a75184 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -247,20 +247,11 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
- dma_addr_t dma;
u32 page_offset;
};
#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
-struct mlx4_en_page_cache {
- u32 index;
- struct {
- struct page *page;
- dma_addr_t dma;
- } buf[MLX4_EN_CACHE_SIZE];
-};
-
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
@@ -335,14 +326,14 @@ struct mlx4_en_rx_ring {
u16 stride;
u16 log_stride;
u16 cqn; /* index of port CQ associated with this ring */
+ u8 fcs_del;
u32 prod;
u32 cons;
u32 buf_size;
- u8 fcs_del;
+ struct page_pool *pp;
void *buf;
void *rx_info;
struct bpf_prog __rcu *xdp_prog;
- struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending);
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
-bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
- struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ea6070180c96..6ec7d6e0181d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,6 +31,7 @@ config MLX5_CORE_EN
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
select PAGE_POOL
+ select PAGE_POOL_STATS
select DIMLIB
help
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
@@ -80,8 +81,8 @@ config MLX5_BRIDGE
default y
help
mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
- Enable adding representors of mlx5 uplink and VF ports to Bridge and
- offloading rules for traffic between such ports. Supports VLANs (trunk and
+ Enable offloading FDB rules from a bridge device containing
+ representors of mlx5 uplink and VF ports. Supports VLANs (trunk and
access modes).
config MLX5_CLS_ACT
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e733b81e18a2..e53dbdc0a7a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
return MLX5_GET(mbox_in, in, opcode);
}
+static u16 in_to_uid(void *in)
+{
+ return MLX5_GET(mbox_in, in, uid);
+}
+
/* Returns true for opcodes that might be triggered very frequently and throttle
* the command interface. Limit their command slots usage.
*/
@@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ uid = in_to_uid(in);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -1871,6 +1876,17 @@ static int is_manage_pages(void *in)
return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev)
+{
+ return !xa_empty(&dev->cmd.vars.privileged_uids);
+}
+
+static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev,
+ u16 uid)
+{
+ return !!xa_load(&dev->cmd.vars.privileged_uids, uid);
+}
+
/* Notes:
* 1. Callback functions may not sleep
* 2. Page queue commands do not support asynchrous completion
@@ -1881,7 +1897,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
{
struct mlx5_cmd_msg *inb, *outb;
u16 opcode = in_to_opcode(in);
- bool throttle_op;
+ bool throttle_locked = false;
+ bool unpriv_locked = false;
+ u16 uid = in_to_uid(in);
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1890,12 +1908,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
- throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
- if (throttle_op) {
- if (callback) {
- if (down_trylock(&dev->cmd.vars.throttle_sem))
- return -EBUSY;
- } else {
+ if (!callback) {
+ /* The semaphore is already held for callback commands. It was
+ * acquired in mlx5_cmd_exec_cb()
+ */
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ unpriv_locked = true;
+ down(&dev->cmd.vars.unprivileged_sem);
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(opcode)) {
+ throttle_locked = true;
down(&dev->cmd.vars.throttle_sem);
}
}
@@ -1941,8 +1964,11 @@ out_out:
out_in:
free_msg(dev, inb);
out_up:
- if (throttle_op)
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+
return err;
}
@@ -2104,18 +2130,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
struct mlx5_async_work *work = _work;
struct mlx5_async_ctx *ctx;
struct mlx5_core_dev *dev;
- u16 opcode;
+ bool throttle_locked;
+ bool unpriv_locked;
ctx = work->ctx;
dev = ctx->dev;
- opcode = work->opcode;
+ throttle_locked = work->throttle_locked;
+ unpriv_locked = work->unpriv_locked;
status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
/* Can't access "work" from this point on. It could have been freed in
* the callback.
*/
- if (mlx5_cmd_is_throttle_opcode(opcode))
+ if (throttle_locked)
up(&dev->cmd.vars.throttle_sem);
+ if (unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
}
@@ -2124,6 +2154,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work)
{
+ struct mlx5_core_dev *dev = ctx->dev;
+ u16 uid;
int ret;
work->ctx = ctx;
@@ -2131,11 +2163,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
+ work->throttle_locked = false;
+ work->unpriv_locked = false;
+ uid = in_to_uid(in);
+
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
- ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
+
+ if (uid && mlx5_has_privileged_uid(dev)) {
+ if (!mlx5_cmd_is_privileged_uid(dev, uid)) {
+ if (down_trylock(&dev->cmd.vars.unprivileged_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->unpriv_locked = true;
+ }
+ } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) {
+ if (down_trylock(&dev->cmd.vars.throttle_sem)) {
+ ret = -EBUSY;
+ goto dec_num_inflight;
+ }
+ work->throttle_locked = true;
+ }
+
+ ret = cmd_exec(dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
- if (ret && atomic_dec_and_test(&ctx->num_inflight))
+ if (ret)
+ goto sem_up;
+
+ return 0;
+
+sem_up:
+ if (work->throttle_locked)
+ up(&dev->cmd.vars.throttle_sem);
+ if (work->unpriv_locked)
+ up(&dev->cmd.vars.unprivileged_sem);
+dec_num_inflight:
+ if (atomic_dec_and_test(&ctx->num_inflight))
complete(&ctx->inflight_done);
return ret;
@@ -2371,10 +2435,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ sema_init(&cmd->vars.unprivileged_sem,
+ DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+
+ xa_init(&cmd->vars.privileged_uids);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
+ if (!cmd->pool) {
+ err = -ENOMEM;
+ goto err_destroy_xa;
+ }
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2408,6 +2478,8 @@ err_cmd_page:
free_cmd_page(dev, cmd);
err_free_pool:
dma_pool_destroy(cmd->pool);
+err_destroy_xa:
+ xa_destroy(&dev->cmd.vars.privileged_uids);
return err;
}
@@ -2420,6 +2492,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
+ xa_destroy(&dev->cmd.vars.privileged_uids);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
@@ -2427,3 +2500,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
{
dev->cmd.state = cmdif_state;
}
+
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ return xa_insert(&dev->cmd.vars.privileged_uids, uid,
+ xa_mk_value(uid), GFP_KERNEL);
+}
+EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid);
+
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid)
+{
+ void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid);
+
+ WARN(!data, "Privileged UID %u does not exist\n", uid);
+}
+EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 98d4306929f3..73cd74644378 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,6 +46,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -324,7 +327,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set,
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
- .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set,
+ .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index c7216e84ef8c..86253a89c24c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,6 +13,50 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
+static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {};
+ u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {};
+ u32 cur_alloc_icm;
+ int vhca_icm_ctrl;
+ u16 vhca_id;
+ int err;
+
+ err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg,
+ sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err);
+ return;
+ }
+ vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl);
+ if (!vhca_icm_ctrl)
+ return;
+
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport);
+ if (other_vport) {
+ err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id);
+ if (err) {
+ mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n",
+ vport_num, err);
+ return;
+ }
+ MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id);
+ }
+ err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg),
+ out_icm_reg, sizeof(out_icm_reg),
+ MLX5_REG_VHCA_ICM_CTRL, 0, 0);
+ if (err) {
+ mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err);
+ return;
+ }
+ cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm);
+ devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm);
+}
+
void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg,
u16 vport_num, bool other_vport)
@@ -59,6 +103,8 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
+ if (MLX5_CAP_GEN(dev, nic_cap_reg))
+ mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 31142f6cc372..1e5522a19483 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll,
return 0;
}
errout:
- NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 979fc56205e1..32ed4963b8ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -95,8 +95,6 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
-
/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
* These are theoretical maximums, which can be further restricted by
* capabilities. These values are used for static resource allocations and
@@ -232,16 +230,22 @@ struct mlx5e_rx_wqe_cyc {
DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
-struct mlx5e_umr_wqe {
+struct mlx5e_umr_wqe_hdr {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5e_umr_wqe_hdr hdr;
union {
DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
+static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr),
+ "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe");
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
@@ -386,7 +390,6 @@ enum {
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
- MLX5E_SQ_STATE_XDP_MULTIBUF,
MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
@@ -395,6 +398,7 @@ struct mlx5e_tx_mpwqe {
struct mlx5e_tx_wqe *wqe;
u32 bytes_count;
u8 ds_count;
+ u8 ds_count_max;
u8 pkt_count;
u8 inline_on;
};
@@ -660,7 +664,7 @@ struct mlx5e_rq {
} wqe;
struct {
struct mlx5_wq_ll wq;
- struct mlx5e_umr_wqe umr_wqe;
+ struct mlx5e_umr_wqe_hdr umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
__be32 umr_mkey_be;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1e8b7d330701..b5c3a2a9d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,9 +84,9 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
- MLX5E_ACCEL_FS_ESP_FT_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 64b62ed17b07..aa36670d9a36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -10,6 +10,9 @@
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17
+
static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
{
u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size);
@@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
- u8 max_pages_per_wqe, max_log_mpwqe_size;
+ u8 max_pages_per_wqe, max_log_wqe_size_calc;
+ u8 max_log_wqe_size_cap;
u16 max_wqe_size;
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
- max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
+ max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift;
+
+ WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU);
- WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
+ max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ?
+ MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ;
- return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ);
+ return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap);
}
u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift,
@@ -1240,7 +1247,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
- param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk);
mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3f8986f9d862..bd5877acc5b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -33,7 +33,6 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
bool is_mpw;
bool is_tls;
- bool is_xdp_mb;
u16 stop_room;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index f62fbfb67a1b..6049ccf475bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5_port_eth_proto eproto;
+ const struct mlx5_link_info *info;
bool force_legacy = false;
bool ext;
int err;
@@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
goto out;
}
- *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy);
- if (!(*speed))
+ info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy);
+ if (!info) {
+ *speed = SPEED_UNKNOWN;
err = -EINVAL;
+ goto out;
+ }
+ *speed = info->speed;
out:
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 5d128c5b4529..0f5d7ea8956f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower, iter) {
- struct mlx5_core_dev *mdev;
- struct mlx5e_priv *priv;
-
if (!mlx5e_eswitch_rep(lower))
continue;
- priv = netdev_priv(lower);
- mdev = priv->mdev;
- if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
+ if (mlx5_esw_bridge_dev_same_esw(lower, esw))
return lower;
}
@@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *
priv = netdev_priv(rep);
mdev = priv->mdev;
if (netif_is_lag_master(dev))
- return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
+ return mlx5_lag_is_master(mdev);
return true;
}
@@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
if (!rep)
return NOTIFY_DONE;
+ if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev))
+ return NOTIFY_DONE;
+
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = container_of(info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 25d751eba99b..e75759533ae0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -317,10 +317,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx
}
static void
-mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg)
+mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
@@ -340,20 +338,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
{
- struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res);
int i;
- mutex_lock(&priv->state_lock);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs");
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto unlock;
+ for (i = 0; i < max_nch; i++) {
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "ix", i);
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i));
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner,
+ struct devlink_fmsg *fmsg)
+{
+ bool found_valid_tir = false;
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ if (!mlx5e_rss_valid_tir(rss, tt, inner))
+ continue;
+
+ if (!found_valid_tir) {
+ char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers";
+
+ found_valid_tir = true;
+ devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg);
+ }
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt));
+ devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner));
+ devlink_fmsg_obj_nest_end(fmsg);
+ }
+
+ if (found_valid_tir)
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx);
+
+ if (!rss)
+ return;
+
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx);
+ devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss));
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg);
+ if (mlx5e_rss_get_inner_ft_support(rss))
+ mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg);
+
+ devlink_fmsg_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res,
+ struct devlink_fmsg *fmsg)
+{
+ int rss_ix;
+
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RSS");
+ for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++)
+ mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_rx_res *rx_res = priv->rx_res;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources");
+ mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+ int i;
- mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
@@ -367,7 +445,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+
devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ mlx5e_rx_reporter_diagnose_common_config(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rqs(priv, fmsg);
+ mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 09433b91be17..532c7fa94d17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = {
[MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline",
[MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx",
[MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync",
- [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf",
};
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 5f742f896600..74cd111ee320 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,6 +81,11 @@ struct mlx5e_rss {
refcount_t refcnt;
};
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
+{
+ return rss->inner_ft_support;
+}
+
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
@@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
{
enum mlx5_traffic_types tt;
+ rss->hash.symmetric = true;
rss->hash.hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss->hash.toeplitz_hash_key,
sizeof(rss->hash.toeplitz_hash_key));
@@ -449,6 +455,16 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return mlx5e_tir_get_tirn(tir);
}
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss)
+{
+ return mlx5e_rqt_get_rqtn(&rss->rqt);
+}
+
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner)
+{
+ return !!rss_get_tir(rss, tt, inner);
+}
+
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
@@ -551,7 +567,7 @@ inner_tir:
return final_err;
}
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
if (indir)
memcpy(indir, rss->indir.table,
@@ -564,11 +580,14 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
if (hfunc)
*hfunc = rss->hash.hfunc;
+ if (symmetric)
+ *symmetric = rss->hash.symmetric;
+
return 0;
}
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
@@ -608,6 +627,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
+ if (symmetric) {
+ rss->hash.symmetric = *symmetric;
+ changed_hash = true;
+ }
+
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d0df98963c8d..8ac902190010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -32,8 +32,11 @@ void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss);
unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss);
+bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss);
u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
+bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
+u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
@@ -44,9 +47,9 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
struct mlx5e_packet_merge_param *pkt_merge_param);
-int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
+int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
- const u8 *key, const u8 *hfunc,
+ const u8 *key, const u8 *hfunc, const bool *symmetric,
u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a86eade9a9e0..5fcbe47337b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -5,8 +5,6 @@
#include "channels.h"
#include "params.h"
-#define MLX5E_MAX_NUM_RSS 16
-
struct mlx5e_rx_res {
struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
@@ -196,7 +194,7 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n
}
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc)
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric)
{
struct mlx5e_rss *rss;
@@ -207,11 +205,12 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_get_rxfh(rss, indir, key, hfunc);
+ return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric);
}
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc)
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric)
{
u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
@@ -223,8 +222,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
- res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric,
+ res->rss_rqns, vhca_ids, res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -497,6 +496,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_free(res);
}
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res)
+{
+ return res->max_nch;
+}
+
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir);
@@ -522,7 +526,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 7b1a9f0f1874..3e09d91281af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -10,6 +10,8 @@
#include "fs.h"
#include "rss.h"
+#define MLX5E_MAX_NUM_RSS 16
+
struct mlx5e_rx_res;
struct mlx5e_channels;
@@ -34,6 +36,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
+u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
+unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res);
+bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
@@ -44,9 +49,10 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch
/* Configuration API */
void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch);
int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- u32 *indir, u8 *key, u8 *hfunc);
+ u32 *indir, u8 *key, u8 *hfunc, bool *symmetric);
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
- const u32 *indir, const u8 *key, const u8 *hfunc);
+ const u32 *indir, const u8 *key, const u8 *hfunc,
+ const bool *symmetric);
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
enum mlx5_traffic_types tt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index feeb41693c17..b6cabe829f19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -5,6 +5,16 @@
#include "en/tc_priv.h"
#include "en/tc_ct.h"
+static bool
+tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
+ const struct flow_action_entry *act,
+ int act_index,
+ struct mlx5_flow_attr *attr)
+{
+ return !((act->ct.action & TCA_CT_ACT_COMMIT) &&
+ flow_action_is_last_entry(parse_state->flow_action, act));
+}
+
static int
tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act)
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
+ .can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.post_parse = tc_act_post_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 8218c892b161..7819fb297280 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return meter->meters_obj->base_id;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 9b795cd106bb..d6afb6556875 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -72,4 +72,17 @@ void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline int
+mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter)
+{
+ return 0;
+}
+#endif /* CONFIG_MLX5_CLS_ACT */
+
#endif /* __MLX5_EN_FLOW_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a065e8fafb1d..81332cd4a582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1349,6 +1349,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return 0;
}
+static bool
+mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft,
+ struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ struct flow_match_meta match;
+ struct net_device *netdev;
+ bool same_dev = false;
+
+ if (!is_mdev_legacy_mode(ct_priv->dev) ||
+ !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return true;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex))
+ return true;
+
+ netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ same_dev = ct_priv->netdev == netdev;
+ dev_put(netdev);
+
+ return same_dev;
+}
+
static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
void *cb_priv)
@@ -1361,6 +1387,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
switch (f->command) {
case FLOW_CLS_REPLACE:
+ if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f))
+ return -EOPNOTSUPP;
+
return mlx5_tc_ct_block_flow_offload_add(ft, f);
case FLOW_CLS_DESTROY:
return mlx5_tc_ct_block_flow_offload_del(ft, f);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 721f35e59757..2162d776fe35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,8 +31,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr
{
if (attr->n)
neigh_release(attr->n);
- if (attr->route_dev)
- dev_put(attr->route_dev);
+ dev_put(attr->route_dev);
}
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
@@ -68,16 +67,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
- if (uplink_upper)
- dev_hold(uplink_upper);
+ dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
- if (uplink_upper)
- dev_put(uplink_upper);
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index e7e01f3298ef..a0fc76a1bc08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -42,8 +42,7 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
&attr->action, out_index);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -753,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
}
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
@@ -788,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
out:
- if (route_dev)
- dev_put(route_dev);
+ dev_put(route_dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index e4e487c8431b..5c762a71818d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
gbp_mask = (u32 *)&enc_opts.mask->data[0];
if (*gbp_mask & ~VXLAN_GBP_MASK) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index 11f724ad90db..19499072f67f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric);
memcpy(rss_key, rss_hash->toeplitz_hash_key, len);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index 857a84bcd53a..e8df3aaf6562 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -9,6 +9,7 @@
struct mlx5e_rss_params_hash {
u8 hfunc;
u8 toeplitz_hash_key[40];
+ bool symmetric;
};
struct mlx5e_rss_params_traffic_type {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5ec468268d1a..e837c21d3d21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq,
+ u16 *size)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi, contig_wqebbs;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs);
+
+ return pi;
+}
+
void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -358,9 +371,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ return session->ds_count == session->ds_count_max;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 3cc4d55613bf..f803e1c93590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
};
@@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx
mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
- if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
+ if (unlikely(mlx5e_xdp_mpwqe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
stats->xmit++;
@@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
bool inline_ok;
bool linear;
u16 pi;
+ int i;
struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- int i;
-
- memset(&cseg->trailer, 0, sizeof(cseg->trailer));
- memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
-
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ memset(&cseg->trailer, 0, sizeof(cseg->trailer));
+ memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer));
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
- dma_addr_t addr;
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
- page_pool_get_dma_addr(skb_frag_page(frag)) +
- skb_frag_off(frag);
+ for (i = 0; i < num_frags; i++) {
+ skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
+ dma_addr_t addr;
- dseg->addr = cpu_to_be64(addr);
- dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
- dseg->lkey = sq->mkey_be;
- dseg++;
- }
+ addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] :
+ page_pool_get_dma_addr(skb_frag_page(frag)) +
+ skb_frag_off(frag);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ dseg->addr = cpu_to_be64(addr);
+ dseg->byte_count = cpu_to_be32(skb_frag_size(frag));
+ dseg->lkey = sq->mkey_be;
+ dseg++;
+ }
- sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = num_wqebbs,
- .num_pkts = 1,
- };
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->pc += num_wqebbs;
- } else {
- cseg->fm_ce_se = 0;
+ sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_pkts = 1,
+ };
- sq->pc++;
- }
+ sq->pc += num_wqebbs;
xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e054db1e10f8..446e492c6bb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -182,13 +182,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
return cur;
}
-static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
+static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+ session->ds_count_max;
- return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 1b7132fa70de..2b05536d564a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR);
/* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */
@@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD;
else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE))
offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
icosq->pc += rq->mpwqe.umr_wqebbs;
- icosq->doorbell_cseg = &umr_wqe->ctrl;
+ icosq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 501709ac310f..2dd842aac6fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -277,12 +277,12 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
case XFRM_DEV_OFFLOAD_IN:
src = attrs->dmac;
dst = attrs->smac;
- pkey = &attrs->saddr.a4;
+ pkey = &attrs->addrs.saddr.a4;
break;
case XFRM_DEV_OFFLOAD_OUT:
src = attrs->smac;
dst = attrs->dmac;
- pkey = &attrs->daddr.a4;
+ pkey = &attrs->addrs.daddr.a4;
break;
default:
return;
@@ -303,6 +303,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
neigh_release(n);
}
+static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs)
+{
+ /*
+ * State doesn't have subnet prefixes in outer headers.
+ * The match is performed for exaxt source/destination addresses.
+ */
+ memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4);
+ memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4);
+}
+
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
@@ -374,9 +384,11 @@ skip_replay_window:
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
- memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->family = x->props.family;
+ memcpy(&attrs->addrs.saddr, x->props.saddr.a6,
+ sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = x->props.family;
+ mlx5e_ipsec_state_mask(&attrs->addrs);
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
attrs->upspec.dport = ntohs(x->sel.dport);
@@ -428,7 +440,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
}
if (x->encap) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
- NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Encapsulation is not supported");
return -EINVAL;
}
@@ -853,13 +866,13 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
attrs = &sa_entry->attrs;
- if (attrs->family == AF_INET) {
- if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
- !neigh_key_eq32(n, &attrs->daddr.a4))
+ if (attrs->addrs.family == AF_INET) {
+ if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq32(n, &attrs->addrs.daddr.a4))
continue;
} else {
- if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
- !neigh_key_eq128(n, &attrs->daddr.a4))
+ if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) &&
+ !neigh_key_eq128(n, &attrs->addrs.daddr.a4))
continue;
}
@@ -953,21 +966,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
priv->ipsec = NULL;
}
-static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET) {
- /* Offload with IPv4 options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
- } else {
- /* Offload with IPv6 extension headers is not support yet */
- if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
- return false;
- }
-
- return true;
-}
-
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
@@ -1035,7 +1033,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
* by removing always available headers.
*/
headers = sizeof(struct ethhdr);
- if (sa_entry->attrs.family == AF_INET)
+ if (sa_entry->attrs.addrs.family == AF_INET)
headers += sizeof(struct iphdr);
else
headers += sizeof(struct ipv6hdr);
@@ -1044,6 +1042,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
x->curlft.bytes += success_bytes - headers * success_packets;
}
+static __be32 word_to_mask(int prefix)
+{
+ if (prefix < 0)
+ return 0;
+
+ if (!prefix || prefix > 31)
+ return cpu_to_be32(0xFFFFFFFF);
+
+ return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix));
+}
+
+static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs,
+ struct xfrm_selector *sel)
+{
+ int i;
+
+ if (addrs->family == AF_INET) {
+ addrs->smask.m4 = word_to_mask(sel->prefixlen_s);
+ addrs->saddr.a4 &= addrs->smask.m4;
+ addrs->dmask.m4 = word_to_mask(sel->prefixlen_d);
+ addrs->daddr.a4 &= addrs->dmask.m4;
+ return;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (sel->prefixlen_s != 32 * i)
+ addrs->smask.m6[i] =
+ word_to_mask(sel->prefixlen_s - 32 * i);
+ addrs->saddr.a6[i] &= addrs->smask.m6[i];
+
+ if (sel->prefixlen_d != 32 * i)
+ addrs->dmask.m6[i] =
+ word_to_mask(sel->prefixlen_d - 32 * i);
+ addrs->daddr.a6[i] &= addrs->dmask.m6[i];
+ }
+}
+
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack)
@@ -1116,9 +1151,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
sel = &x->selector;
memset(attrs, 0, sizeof(*attrs));
- memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
- memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
- attrs->family = sel->family;
+ memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr));
+ memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr));
+ attrs->addrs.family = sel->family;
+ mlx5e_ipsec_policy_mask(&attrs->addrs, sel);
attrs->dir = x->xdo.dir;
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
@@ -1196,7 +1232,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
.xdo_dev_state_free = mlx5e_xfrm_free_state,
- .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 7d943e93cf6d..a63c2289f8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,27 +76,36 @@ struct mlx5_replay_esn {
u8 trigger : 1;
};
-struct mlx5_accel_esp_xfrm_attrs {
- u32 spi;
- u32 mode;
- struct aes_gcm_keymat aes_gcm;
-
+struct mlx5e_ipsec_addr {
union {
__be32 a4;
__be32 a6[4];
} saddr;
-
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } smask;
union {
__be32 a4;
__be32 a6[4];
} daddr;
+ union {
+ __be32 m4;
+ __be32 m6[4];
+ } dmask;
+ u8 family;
+};
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ u32 mode;
+ struct aes_gcm_keymat aes_gcm;
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
u8 dir : 2;
u8 type : 2;
u8 drop : 1;
u8 encap : 1;
- u8 family;
struct mlx5_replay_esn replay_esn;
u32 authsize;
u32 reqid;
@@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats {
u64 ipsec_rx_bytes;
u64 ipsec_rx_drop_pkts;
u64 ipsec_rx_drop_bytes;
+ u64 ipsec_rx_drop_mismatch_sa_sel;
u64 ipsec_tx_pkts;
u64 ipsec_tx_bytes;
u64 ipsec_tx_drop_pkts;
@@ -184,6 +194,7 @@ struct mlx5e_ipsec_ft {
struct mutex mutex; /* Protect changes to this struct */
struct mlx5_flow_table *pol;
struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *sa_sel;
struct mlx5_flow_table *status;
u32 refcnt;
};
@@ -195,6 +206,8 @@ struct mlx5e_ipsec_drop {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *status_pass;
+ struct mlx5_flow_handle *sa_sel;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
@@ -206,6 +219,7 @@ struct mlx5e_ipsec_rule {
struct mlx5e_ipsec_miss {
struct mlx5_flow_group *group;
struct mlx5_flow_handle *rule;
+ struct mlx5_fc *fc;
};
struct mlx5e_ipsec_tx_create_attr {
@@ -274,18 +288,8 @@ struct mlx5e_ipsec_sa_entry {
};
struct mlx5_accel_pol_xfrm_attrs {
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
+ struct mlx5e_ipsec_addr addrs;
struct upspec upspec;
- u8 family;
u8 action;
u8 type : 2;
u8 dir : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index e7b64679f121..98b6a3a623f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -16,6 +16,16 @@
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16
+
+enum {
+ MLX5_IPSEC_ASO_OK,
+ MLX5_IPSEC_ASO_BAD_REPLY,
+
+ /* For crypto offload, set by driver */
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA,
+};
+
struct mlx5e_ipsec_fc {
struct mlx5_fc *cnt;
struct mlx5_fc *drop;
@@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx {
};
struct mlx5e_ipsec_status_checks {
+ struct mlx5_flow_group *pass_group;
+ struct mlx5_flow_handle *packet_offload_pass_rule;
+ struct mlx5_flow_handle *crypto_offload_pass_rule;
struct mlx5_flow_group *drop_all_group;
struct mlx5e_ipsec_drop all;
};
@@ -41,10 +54,12 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_status_checks status_drops;
+ struct mlx5e_ipsec_miss sa_sel;
+ struct mlx5e_ipsec_status_checks status_checks;
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *pol_miss_ft;
+ struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
};
@@ -130,11 +145,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
int level, int prio,
+ int num_reserved_entries,
int max_num_groups, u32 flags)
{
struct mlx5_flow_table_attr ft_attr = {};
- ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.num_reserved_entries = num_reserved_entries;
ft_attr.autogroup.max_num_groups = max_num_groups;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = level;
@@ -147,22 +163,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status_drops.all.rule);
- mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
- mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
+ mlx5_del_flow_rules(rx->status_checks.all.rule);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc);
+ mlx5_destroy_flow_group(rx->status_checks.drop_all_group);
}
static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
- mlx5_del_flow_rules(rx->status.rule);
+ mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule);
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+}
- if (rx != ipsec->rx_esw)
- return;
+static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-#ifdef CONFIG_MLX5_ESWITCH
- mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
-#endif
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_2);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_2,
+ sa_entry->ipsec_obj_id | BIT(31));
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ }
}
static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
@@ -200,11 +229,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -281,10 +307,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
- sa_entry->ipsec_obj_id | BIT(31));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -359,9 +383,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status_drops.drop_all_group = g;
- rx->status_drops.all.rule = rule;
- rx->status_drops.all.fc = flow_counter;
+ rx->status_checks.drop_all_group = g;
+ rx->status_checks.all.rule = rule;
+ rx->status_checks.all.fc = flow_counter;
kvfree(flow_group_in);
kvfree(spec);
@@ -377,9 +401,52 @@ err_out:
return err;
}
-static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx,
- struct mlx5_flow_destination *dest)
+static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ start_flow_index, ft->max_fte - 3);
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ end_flow_index, ft->max_fte - 2);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to create rx status pass flow group, err=%d\n",
+ err);
+ }
+ rx->status_checks.pass_group = fg;
+
+ kvfree(flow_group_in);
+ return err;
+}
+
+static struct mlx5_flow_handle *
+ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest,
+ u8 aso_ok)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -388,7 +455,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
misc_parameters_2.ipsec_syndrome);
@@ -397,11 +464,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters_2.ipsec_syndrome, 0);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters_2.metadata_reg_c_4, 0);
+ misc_parameters_2.metadata_reg_c_4, aso_ok);
if (rx == ipsec->rx_esw)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
- flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
@@ -412,19 +479,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
goto err_rule;
}
- rx->status.rule = rule;
kvfree(spec);
- return 0;
+ return rule;
err_rule:
kvfree(spec);
- return err;
+ return ERR_PTR(err);
}
static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx)
{
ipsec_rx_status_pass_destroy(ipsec, rx);
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
ipsec_rx_status_drop_destroy(ipsec, rx);
}
@@ -432,19 +499,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *dest)
{
+ struct mlx5_flow_destination pol_dest[2];
+ struct mlx5_flow_handle *rule;
int err;
err = ipsec_rx_status_drop_all_create(ipsec, rx);
if (err)
return err;
- err = ipsec_rx_status_pass_create(ipsec, rx, dest);
+ err = ipsec_rx_status_pass_group_create(ipsec, rx);
if (err)
- goto err_pass_create;
+ goto err_pass_group_create;
+
+ rule = ipsec_rx_status_pass_create(ipsec, rx, dest,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_crypto_offload_pass_create;
+ }
+ rx->status_checks.crypto_offload_pass_rule = rule;
+
+ pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ pol_dest[0].ft = rx->ft.pol;
+ pol_dest[1] = dest[1];
+ rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest,
+ MLX5_IPSEC_ASO_OK);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_packet_offload_pass_create;
+ }
+ rx->status_checks.packet_offload_pass_rule = rule;
return 0;
-err_pass_create:
+err_packet_offload_pass_create:
+ mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule);
+err_crypto_offload_pass_create:
+ mlx5_destroy_flow_group(rx->status_checks.pass_group);
+err_pass_group_create:
ipsec_rx_status_drop_destroy(ipsec, rx);
return err;
}
@@ -493,6 +585,15 @@ out:
return err;
}
+static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *old_dest,
+ struct mlx5_flow_destination *new_dest)
+{
+ mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest);
+ mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule,
+ new_dest, old_dest);
+}
+
static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
@@ -507,8 +608,7 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
}
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
@@ -520,8 +620,7 @@ static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
family2tt(family));
- mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
- mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
}
@@ -577,13 +676,8 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
- struct mlx5e_ipsec_rx *rx, u32 family)
+static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx)
{
- /* disconnect */
- if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
-
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
} else {
@@ -592,6 +686,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.pol);
}
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+}
+
+static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->sa_sel.rule);
+ mlx5_fc_destroy(mdev, rx->sa_sel.fc);
+ rx->sa_sel.fc = NULL;
+ mlx5_destroy_flow_group(rx->sa_sel.group);
+ mlx5_destroy_flow_table(rx->ft.sa_sel);
+}
+
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
+{
+ /* disconnect */
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
+
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
@@ -600,7 +717,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ipsec_rx_status_destroy(ipsec, rx);
mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+
+ ipsec_rx_policy_destroy(rx);
+
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
+
+#ifdef CONFIG_MLX5_ESWITCH
+ if (rx == ipsec->rx_esw)
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch),
+ 0, 1, 0);
+#endif
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -652,6 +779,28 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest,
+ struct mlx5_flow_destination *miss_dest)
+{
+ if (rx == ipsec->rx_esw)
+ *miss_dest = *dest;
+ else
+ *miss_dest =
+ mlx5_ttc_get_default_dest(attr->ttc,
+ family2tt(attr->family));
+}
+
+static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = rx->pol_miss_ft;
+}
+
static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx,
struct mlx5e_ipsec_rx_create_attr *attr)
@@ -659,15 +808,219 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination dest = {};
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = rx->ft.pol;
+ dest.ft = rx->ft.sa;
mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
}
+static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (rx == ipsec->rx_esw) {
+ /* No need to create miss table for switchdev mode,
+ * just set it to the root chain table.
+ */
+ rx->pol_miss_ft = dest->ft;
+ return 0;
+ }
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = attr->pol_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule;
+ }
+
+ rx->pol_miss_ft = ft;
+ rx->pol_miss_rule = rule;
+
+ return 0;
+
+err_rule:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_destination default_dest;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest);
+ if (err)
+ return err;
+
+ ipsec_rx_default_dest_get(ipsec, rx, &default_dest);
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev,
+ default_dest.ft,
+ attr->chains_ns,
+ attr->prio,
+ attr->sa_level,
+ &rx->ft.pol);
+ if (IS_ERR(rx->chains))
+ err = PTR_ERR(rx->chains);
+ } else {
+ ft = ipsec_ft_create(attr->ns, attr->pol_level,
+ attr->prio, 1, 2, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_out;
+ }
+ rx->ft.pol = ft;
+
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol,
+ &default_dest);
+ if (err)
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+
+ if (!err)
+ return 0;
+
+err_out:
+ if (rx->pol_miss_rule) {
+ mlx5_del_flow_rules(rx->pol_miss_rule);
+ mlx5_destroy_flow_table(rx->pol_miss_ft);
+ }
+ return err;
+}
+
+static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ struct mlx5_fc *fc;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1,
+ MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n",
+ err);
+ goto err_ft;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n",
+ err);
+ goto err_fg;
+ }
+
+ fc = mlx5_fc_create(mdev, false);
+ if (IS_ERR(fc)) {
+ err = PTR_ERR(fc);
+ mlx5_core_err(mdev,
+ "Failed to create ipsec RX SA selector miss rule counter, err=%d\n",
+ err);
+ goto err_cnt;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = fc;
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n",
+ err);
+ goto err_rule;
+ }
+
+ rx->ft.sa_sel = ft;
+ rx->sa_sel.group = fg;
+ rx->sa_sel.fc = fc;
+ rx->sa_sel.rule = rule;
+
+ kvfree(flow_group_in);
+
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, fc);
+err_cnt:
+ mlx5_destroy_flow_group(fg);
+err_fg:
+ mlx5_destroy_flow_table(ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+/* The decryption processing is as follows:
+ *
+ * +----------+ +-------------+
+ * | | | |
+ * | Kernel <--------------+----------+ policy miss <------------+
+ * | | ^ | | ^
+ * +----^-----+ | +-------------+ |
+ * | crypto |
+ * miss offload ok allow/default
+ * ^ ^ ^
+ * | | packet |
+ * +----+---------+ +----+-------------+ offload ok +------+---+
+ * | | | | (no UPSPEC) | |
+ * | SA (decrypt) +-----> status +--->------->----+ policy |
+ * | | | | | |
+ * +--------------+ ++---------+-------+ +-^----+---+
+ * | | | |
+ * v packet +-->->---+ v
+ * | offload ok match |
+ * fails (with UPSPEC) | block
+ * | | +-------------+-+ |
+ * v v | | miss v
+ * drop +---> SA sel +--------->drop
+ * | |
+ * +---------------+
+ */
+
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
+ struct mlx5_flow_destination dest[2], miss_dest;
struct mlx5e_ipsec_rx_create_attr attr;
- struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
u32 flags = 0;
int err;
@@ -678,80 +1031,61 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (err)
return err;
- ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
+ ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft_status;
}
rx->ft.status = ft;
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter = rx->fc->cnt;
- err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
+ err = ipsec_rx_sa_selector_create(ipsec, rx, &attr);
if (err)
- goto err_add;
+ goto err_fs_ft_sa_sel;
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
+ ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
rx->ft.sa = ft;
- err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
+ ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest);
if (err)
goto err_fs;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
- rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
- attr.chains_ns,
- attr.prio,
- attr.pol_level,
- &rx->ft.pol);
- if (IS_ERR(rx->chains)) {
- err = PTR_ERR(rx->chains);
- goto err_pol_ft;
- }
-
- goto connect;
- }
+ err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]);
+ if (err)
+ goto err_policy;
- ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto err_pol_ft;
- }
- rx->ft.pol = ft;
- memset(dest, 0x00, 2 * sizeof(*dest));
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = rx->ft.sa;
- err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+ err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
if (err)
- goto err_pol_miss;
+ goto err_add;
-connect:
/* connect */
if (rx != ipsec->rx_esw)
ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
-err_pol_miss:
- mlx5_destroy_flow_table(rx->ft.pol);
-err_pol_ft:
+err_add:
+ ipsec_rx_policy_destroy(rx);
+err_policy:
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
-err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_ipsec_rx_status_destroy(ipsec, rx);
-err_add:
+err_fs_ft:
+ ipsec_rx_sa_selector_destroy(mdev, rx);
+err_fs_ft_sa_sel:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
@@ -941,7 +1275,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
int err;
ipsec_tx_create_attr_set(ipsec, tx, &attr);
- ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
+ ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0);
if (IS_ERR(ft))
return PTR_ERR(ft);
tx->ft.status = ft;
@@ -954,7 +1288,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
+ ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_sa_ft;
@@ -982,7 +1316,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto connect_roce;
}
- ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
+ ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_pol_ft;
@@ -1150,9 +1484,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = &addrs->saddr.a4;
+ __be32 *smask = &addrs->smask.m4;
+ __be32 *daddr = &addrs->daddr.a4;
+ __be32 *dmask = &addrs->dmask.m4;
+
if (!*saddr && !*daddr)
return;
@@ -1164,21 +1503,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
if (*saddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4);
}
if (*daddr) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4);
}
}
-static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
- __be32 *daddr)
+static void setup_fte_addr6(struct mlx5_flow_spec *spec,
+ struct mlx5e_ipsec_addr *addrs)
{
+ __be32 *saddr = addrs->saddr.a6;
+ __be32 *smask = addrs->smask.m6;
+ __be32 *daddr = addrs->daddr.a6;
+ __be32 *dmask = addrs->dmask.m6;
+
if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
return;
@@ -1190,15 +1534,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
if (!addr6_all_zero(saddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16);
}
if (!addr6_all_zero(daddr)) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16);
}
}
@@ -1340,7 +1684,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action[2], field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
- MLX5_SET(set_action_in, action[2], data, 0);
+ MLX5_SET(set_action_in, action[2], data,
+ MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD);
MLX5_SET(set_action_in, action[2], offset, 0);
MLX5_SET(set_action_in, action[2], length, 32);
}
@@ -1387,7 +1732,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
bfflen += sizeof(*esp_hdr) + 8;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
bfflen += sizeof(*iphdr);
break;
@@ -1404,7 +1749,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
return -ENOMEM;
eth_hdr = (struct ethhdr *)reformatbf;
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
eth_hdr->h_proto = htons(ETH_P_IP);
break;
@@ -1427,11 +1772,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
reformat_params->param_0 = attrs->authsize;
hdr = reformatbf + sizeof(*eth_hdr);
- switch (attrs->family) {
+ switch (attrs->addrs.family) {
case AF_INET:
iphdr = (struct iphdr *)hdr;
- memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
- memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
+ memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4);
+ memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4);
iphdr->version = 4;
iphdr->ihl = 5;
iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1440,8 +1785,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
break;
case AF_INET6:
ipv6hdr = (struct ipv6hdr *)hdr;
- memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
- memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
+ memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16);
+ memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16);
ipv6hdr->nexthdr = IPPROTO_ESP;
ipv6hdr->version = 6;
ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
@@ -1475,7 +1820,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
case XFRM_DEV_OFFLOAD_OUT:
- if (attrs->family == AF_INET) {
+ if (attrs->addrs.family == AF_INET) {
if (attrs->encap)
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
@@ -1576,6 +1921,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
return 0;
}
+static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_rx *rx,
+ struct upspec *upspec)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_4, 0);
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ ipsec_rx_rule_add_match_obj(sa_entry, rx, spec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa_sel;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter = rx->fc->cnt;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx pass rule, err=%d\n",
+ err);
+ goto err_add_status_pass_rule;
+ }
+
+ sa_entry->ipsec_rule.status_pass = rule;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_4, 0);
+
+ setup_fte_upper_proto_match(spec, upspec);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.pol;
+
+ rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx sa selector rule, err=%d\n",
+ err);
+ goto err_add_sa_sel_rule;
+ }
+
+ sa_entry->ipsec_rule.sa_sel = rule;
+
+ kvfree(spec);
+ return 0;
+
+err_add_sa_sel_rule:
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+err_add_status_pass_rule:
+ kvfree(spec);
+ return err;
+}
+
static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -1589,7 +2013,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_fc *counter;
int err = 0;
- rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
+ rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type);
if (IS_ERR(rx))
return PTR_ERR(rx);
@@ -1599,16 +2023,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, attrs->encap);
if (!attrs->encap)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- setup_fte_upper_proto_match(spec, &attrs->upspec);
if (!attrs->drop) {
if (rx != ipsec->rx_esw)
@@ -1656,6 +2079,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
goto err_add_flow;
}
+
+ if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) {
+ err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec);
+ if (err)
+ goto err_add_sa_sel;
+ }
+
if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
err = rx_add_rule_drop_replay(sa_entry, rx);
if (err)
@@ -1679,6 +2109,11 @@ err_drop_reason:
mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
}
err_add_replay:
+ if (sa_entry->ipsec_rule.sa_sel) {
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel);
+ mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass);
+ }
+err_add_sa_sel:
mlx5_del_flow_rules(rule);
err_add_flow:
mlx5_fc_destroy(mdev, counter);
@@ -1691,7 +2126,7 @@ err_pkt_reformat:
err_mod_header:
kvfree(spec);
err_alloc:
- rx_ft_put(ipsec, attrs->family, attrs->type);
+ rx_ft_put(ipsec, attrs->addrs.family, attrs->type);
return err;
}
@@ -1723,10 +2158,10 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_spi(spec, attrs->spi, false);
setup_fte_esp(spec);
setup_fte_reg_a(spec);
@@ -1810,10 +2245,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
tx = ipsec_tx(ipsec, attrs->type);
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1883,12 +2318,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
struct mlx5e_ipsec_rx *rx;
int err, dstn = 0;
- ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
- attrs->type);
+ ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family,
+ attrs->prio, attrs->type);
if (IS_ERR(ft))
return PTR_ERR(ft);
- rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1896,10 +2331,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
goto err_alloc;
}
- if (attrs->family == AF_INET)
- setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ if (attrs->addrs.family == AF_INET)
+ setup_fte_addr4(spec, &attrs->addrs);
else
- setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_addr6(spec, &attrs->addrs);
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
@@ -1923,8 +2358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
flow_act.flags |= FLOW_ACT_NO_APPEND;
if (rx == ipsec->rx_esw && rx->chains)
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dstn].ft = rx->ft.sa;
+ ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]);
dstn++;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
if (IS_ERR(rule)) {
@@ -1940,7 +2374,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
err_action:
kvfree(spec);
err_alloc:
- rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
+ rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio,
+ attrs->type);
return err;
}
@@ -2061,6 +2496,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_rx_bytes = 0;
stats->ipsec_rx_drop_pkts = 0;
stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_rx_drop_mismatch_sa_sel = 0;
stats->ipsec_tx_pkts = 0;
stats->ipsec_tx_bytes = 0;
stats->ipsec_tx_drop_pkts = 0;
@@ -2070,6 +2506,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
&stats->ipsec_rx_drop_bytes);
+ if (ipsec->rx_ipv4->sa_sel.fc)
+ mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc,
+ &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes);
fc = ipsec->tx->fc;
mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
@@ -2098,6 +2537,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
stats->ipsec_tx_drop_pkts += packets;
stats->ipsec_tx_drop_bytes += bytes;
}
+
+ if (ipsec->rx_esw->sa_sel.fc &&
+ !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc,
+ &packets, &bytes))
+ stats->ipsec_rx_drop_mismatch_sa_sel += packets;
}
}
@@ -2195,12 +2639,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
mlx5_del_flow_rules(ipsec_rule->auth.rule);
mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
+ if (ipsec_rule->sa_sel) {
+ mlx5_del_flow_rules(ipsec_rule->sa_sel);
+ mlx5_del_flow_rules(ipsec_rule->status_pass);
+ }
+
if (ipsec_rule->replay.rule) {
mlx5_del_flow_rules(ipsec_rule->replay.rule);
mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
}
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
- rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
+ rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family,
+ sa_entry->attrs.type);
}
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
@@ -2236,7 +2686,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
+ rx_ft_put_policy(pol_entry->ipsec,
+ pol_entry->attrs.addrs.family,
pol_entry->attrs.prio, pol_entry->attrs.type);
return;
}
@@ -2376,7 +2827,7 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5e_ipsec_rx *rx;
struct mlx5e_ipsec_tx *tx;
- rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
+ rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
tx = ipsec_tx(sa_entry->ipsec, attrs->type);
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
return tx->allow_tunnel_mode;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 92bf3fa44a3b..93be388068f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index f9113cb13a0c..fdf9e9bb99ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -42,6 +42,9 @@
#include "lib/clock.h"
#include "en/fs_ethtool.h"
+#define LANES_UNKNOWN 0
+#define MAX_LANES 8
+
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
@@ -260,12 +263,10 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT);
}
-static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
+static void mlx5e_ethtool_get_speed_arr(bool ext,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = mlx5_ptys_ext_supported(mdev);
-
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
@@ -912,37 +913,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
-static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
- unsigned long *supported_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised,
+ unsigned long *modes)
{
- unsigned long proto_cap = eth_proto_cap;
+ unsigned long eproto = eth_eproto;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(supported_modes, supported_modes,
- table[proto].supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
- u32 eth_proto_cap, bool ext)
-{
- unsigned long proto_cap = eth_proto_cap;
- struct ptys2ethtool_config *table;
- u32 max_size;
- int proto;
-
- table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
- max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
- ARRAY_SIZE(ptys2legacy_ethtool_table);
-
- for_each_set_bit(proto, &proto_cap, max_size)
- bitmap_or(advertising_modes, advertising_modes,
- table[proto].advertised,
+ mlx5e_ethtool_get_speed_arr(ext, &table, &max_size);
+ for_each_set_bit(proto, &eproto, max_size)
+ bitmap_or(modes, modes,
+ advertised ?
+ table[proto].advertised : table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
@@ -1096,50 +1079,53 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
}
}
-static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper, bool force_legacy,
- u16 data_rate_oper,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_link_properties(struct net_device *netdev,
+ u32 eth_proto_oper, bool force_legacy,
+ u16 data_rate_oper,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- u32 speed = SPEED_UNKNOWN;
+ const struct mlx5_link_info *info;
u8 duplex = DUPLEX_UNKNOWN;
+ u32 speed = SPEED_UNKNOWN;
+ u32 lanes = LANES_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
- if (!speed) {
- if (data_rate_oper)
- speed = 100 * data_rate_oper;
- else
- speed = SPEED_UNKNOWN;
- goto out;
+ info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy);
+ if (info) {
+ speed = info->speed;
+ lanes = info->lanes;
+ duplex = DUPLEX_FULL;
+ } else if (data_rate_oper) {
+ speed = 100 * data_rate_oper;
+ lanes = MAX_LANES;
}
- duplex = DUPLEX_FULL;
-
out:
- link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->lanes = lanes;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
- ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
+ bool ext = mlx5_ptys_ext_supported(mdev);
+
+ ptys2ethtool_process_link(eth_proto_cap, ext, false, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
-
+ ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
@@ -1195,7 +1181,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
- ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
+ ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising);
}
static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -1257,8 +1243,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
- data_rate_oper, link_ksettings);
+ get_link_properties(priv->netdev, eth_proto_oper, !admin_ext,
+ data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
@@ -1363,28 +1349,22 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
-{
- bool ext_link_mode = ext_link_mode_requested(adver);
-
- return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
-}
-
static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
+ struct mlx5_link_info info = {};
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
+ bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
- u32 speed;
bool ext;
int err;
@@ -1392,13 +1372,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
- speed = link_ksettings->base.speed;
+ info.speed = link_ksettings->base.speed;
+ info.lanes = link_ksettings->lanes;
ext_supported = mlx5_ptys_ext_supported(mdev);
- ext = ext_requested(autoneg, adver, ext_supported);
- if (!ext_supported && ext)
+ ext_requested = ext_link_mode_requested(adver);
+ if (!ext_supported && ext_requested)
return -EOPNOTSUPP;
+ ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
@@ -1408,7 +1390,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
- mlx5_port_speed2linkmodes(mdev, speed, !ext);
+ mlx5_port_info2linkmodes(mdev, &info, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
@@ -1480,18 +1462,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
+ bool symmetric;
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
- rxfh->indir, rxfh->key, &rxfh->hfunc);
+ rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric);
mutex_unlock(&priv->state_lock);
- return err;
+
+ if (err)
+ return err;
+
+ if (symmetric)
+ rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR;
+
+ return 0;
}
static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
+ bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR;
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
u8 hfunc = rxfh->hfunc;
@@ -1526,7 +1517,8 @@ static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxf
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
rxfh->indir, rxfh->key,
- hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
+ hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc,
+ rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric);
unlock:
mutex_unlock(&priv->state_lock);
@@ -2040,7 +2032,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
if (size_read < 0) {
NL_SET_ERR_MSG_FMT_MOD(
extack,
- "Query module eeprom by page failed, read %u bytes, err %d\n",
+ "Query module eeprom by page failed, read %u bytes, err %d",
i, size_read);
return i;
}
@@ -2629,12 +2621,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev,
}
const struct ethtool_ops mlx5e_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.cap_rss_ctx_supported = true,
.rxfh_per_ctx_key = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 773624bb2c5d..d68230a7b9f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type)
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
+ case IP_USER_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
+ case IPV6_USER_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2fdc86432ac0..3506024c2453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -311,8 +311,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl;
u16 octowords;
u8 ds_cnt;
@@ -359,7 +359,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
return 0;
err_nomem:
- kvfree(shampo->bitmap);
+ bitmap_free(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
@@ -367,7 +367,7 @@ err_nomem:
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
- kvfree(rq->mpwqe.shampo->bitmap);
+ bitmap_free(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -393,7 +393,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
- mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
+ mlx5e_build_umr_wqe(rq, rq->icosq,
+ container_of(&rq->mpwqe.umr_wqe,
+ struct mlx5e_umr_wqe, hdr));
return 0;
}
@@ -2023,41 +2025,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- if (param->is_xdp_mb)
- set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
-
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
- if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
- unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
- unsigned int inline_hdr_sz = 0;
- int i;
-
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
-
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
-
- sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
- .num_wqebbs = 1,
- .num_pkts = 1,
- };
-
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- }
- }
-
return 0;
err_free_xdpsq:
@@ -4450,9 +4423,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
} else {
- netdev->netns_local = false;
+ netdev->netns_immutable = false;
}
mutex_unlock(&priv->state_lock);
@@ -5135,11 +5108,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
- int err;
- err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
- if (err)
- return err;
+ if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting))
+ return -EOPNOTSUPP;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index fdff9fd8a89e..2abab241f03b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -65,6 +65,7 @@
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
+#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
@@ -855,6 +856,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev))
+ params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE;
/* If netdev is already registered (e.g. move from nic profile to uplink,
* RTNL lock must be held before triggering netdev notifiers.
@@ -886,6 +889,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->watchdog_timeo = 15 * HZ;
+ if (mlx5_core_is_ecpf(mdev))
+ netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
@@ -900,7 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev,
netdev->features |= netdev->hw_features;
- netdev->netns_local = true;
+ netdev->netns_immutable = true;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1963bc5adb18..5fd70b4d55be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -631,16 +631,16 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
__be32 key, u16 offset, u16 ksm_len)
{
memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
- umr_wqe->ctrl.umr_mkey = key;
- umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ umr_wqe->hdr.ctrl.umr_mkey = key;
+ umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
| MLX5E_KSM_UMR_DS_CNT(ksm_len));
- umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
- umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
+ umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
@@ -704,7 +704,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
sq->pc += wqe_bbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
@@ -814,12 +814,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
- umr_wqe->ctrl.opmod_idx_opcode =
+ umr_wqe->hdr.ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
- umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
@@ -829,7 +829,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->pc += rq->mpwqe.umr_wqebbs;
- sq->doorbell_cseg = &umr_wqe->ctrl;
+ sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 1d60465cc2ca..2f7a543feca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct udphdr *udph;
struct iphdr *iph;
+ if (skb_linearize(skb))
+ goto out;
+
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f370..1c121b435016 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -37,9 +37,7 @@
#include "en/ptp.h"
#include "en/port.h"
-#ifdef CONFIG_PAGE_POOL_STATS
#include <net/page_pool/helpers.h>
-#endif
void mlx5e_ethtool_put_stat(u64 **data, u64 val)
{
@@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
@@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
@@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_arfs_err += rq_stats->arfs_err;
#endif
s->rx_recover += rq_stats->recover;
-#ifdef CONFIG_PAGE_POOL_STATS
s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
@@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
@@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
}
}
-#ifdef CONFIG_PAGE_POOL_STATS
static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
{
struct mlx5e_rq_stats *rq_stats = c->rq.stats;
@@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
}
-#else
-static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
-{
-}
-#endif
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
@@ -1227,6 +1215,13 @@ out:
mutex_unlock(&priv->state_lock);
}
+#define PPORT_PHY_LAYER_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_cntrs.c)
+static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = {
+ { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) }
+};
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
+#define PPORT_PHY_RECOVERY_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c)
+static const struct counter_desc
+pport_phy_recovery_cntrs_stats_desc[] = {
+ { "total_success_recovery_phy",
+ PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) }
+};
+
+#define NUM_PPORT_PHY_LAYER_COUNTERS \
+ ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
+#define NUM_PPORT_PHY_RECOVERY_COUNTERS \
+ ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc)
+
+#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0)
+#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \
+ NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0)
+#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \
+ NUM_PPORT_PHY_RECOVERY_COUNTERS : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
- /* "1" for link_down_events special counter */
- num_stats = 1;
+ num_stats = NUM_PPORT_PHY_LAYER_COUNTERS;
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
- NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev);
- num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
- NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
+ num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev);
return num_stats;
}
@@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- ethtool_puts(data, "link_down_events_phy");
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format);
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- ethtool_puts(data,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i]
+ .format);
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ ethtool_puts(data,
+ pport_phy_recovery_cntrs_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- /* link_down_events_phy has special handling since it is not stored in __be64 format */
- mlx5e_ethtool_put_stat(
- data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events));
-
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(&priv->stats.pport
+ .phy_counters,
+ pport_phy_layer_cntrs_stats_desc, i));
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++)
mlx5e_ethtool_put_stat(
data,
MLX5E_READ_CTR64_BE(
&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i));
- if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- mlx5e_ethtool_put_stat(
- data,
- MLX5E_READ_CTR64_BE(
- &priv->stats.pport
- .phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i));
+ for (i = 0;
+ i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev);
+ i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc, i));
+
+ for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++)
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pport.phy_recovery_counters,
+ pport_phy_recovery_cntrs_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
- out = pstats->phy_statistical_counters;
- MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) {
+ out = pstats->phy_recovery_counters;
+ MLX5_SET(ppcnt_reg, in, grp,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0,
+ 0);
+ }
}
void mlx5e_get_link_ext_stats(struct net_device *dev,
@@ -2086,7 +2119,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
#endif
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
-#ifdef CONFIG_PAGE_POOL_STATS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
@@ -2098,7 +2130,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
-#endif
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 5961c569cfe0..8de6fcbd3a03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -215,7 +215,6 @@ struct mlx5e_sw_stats {
u64 ch_aff_change;
u64 ch_force_irq;
u64 ch_eq_rearm;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 rx_pp_alloc_fast;
u64 rx_pp_alloc_slow;
u64 rx_pp_alloc_slow_high_order;
@@ -227,7 +226,6 @@ struct mlx5e_sw_stats {
u64 rx_pp_recycle_ring;
u64 rx_pp_recycle_ring_full;
u64 rx_pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
@@ -309,6 +307,9 @@ struct mlx5e_vport_stats {
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_RECOVERY_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \
+ counter_set.phys_layer_recovery_cntrs.c)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -324,6 +325,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
@@ -381,7 +383,6 @@ struct mlx5e_rq_stats {
u64 arfs_err;
#endif
u64 recover;
-#ifdef CONFIG_PAGE_POOL_STATS
u64 pp_alloc_fast;
u64 pp_alloc_slow;
u64 pp_alloc_slow_high_order;
@@ -393,7 +394,6 @@ struct mlx5e_rq_stats {
u64 pp_recycle_ring;
u64 pp_recycle_ring_full;
u64 pp_recycle_released_ref;
-#endif
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_decrypted_packets;
u64 tls_decrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f8c7912abe0e..4fd853d19e31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -525,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
{
struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_tx_wqe *wqe;
- u16 pi;
+ u16 pi, num_wqebbs;
- pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
+ pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
@@ -535,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
.wqe = wqe,
.bytes_count = 0,
.ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS,
.pkt_count = 0,
.inline_on = 0,
};
@@ -626,7 +627,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_tx_mpwqe_add_dseg(sq, &txd);
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2b229b6226c6..dfb079e59d85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
+ struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev);
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index d599e50af346..3ce455c2535c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num,
ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress");
- root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index);
+ root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n",
vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 5f647358a05c..76e35c827da0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1863,7 +1863,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
addr, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n",
+ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)",
addr, vport_num);
return -EINVAL;
}
@@ -1876,7 +1876,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return -EINVAL;
}
@@ -1884,7 +1884,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid);
if (err) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)",
addr, vid, vport_num);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 982fe3714683..b7102e14d23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -32,7 +32,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
u16 pfnum;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -110,7 +110,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw
struct netdev_phys_item_id ppid = {};
u16 pfnum;
- pfnum = mlx5_get_dev_index(dev);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index ed977ae75fab..3cfe743610d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -10,9 +10,9 @@
#endif
enum {
- MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
};
enum {
@@ -85,6 +85,19 @@ err_header_alloc:
return err;
}
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1,
+ ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_1,
+ sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+}
+
void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
index ac9c65b89166..514c15258b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec);
#else
static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr) {}
@@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+static inline void
+mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_spec *spec) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index 45183de424f3..76382626ad41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- ft_attr.max_fte = POOL_NEXT_SIZE;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 823c1ba456cd..b6ae384396b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -90,15 +90,30 @@ struct mlx5_esw_sched_node {
struct list_head children;
/* Valid only if this node is associated with a vport. */
struct mlx5_vport *vport;
+ /* Level in the hierarchy. The root node level is 1. */
+ u8 level;
};
+static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
+{
+ if (!node->parent) {
+ /* Root children are assigned a depth level of 2. */
+ node->level = 2;
+ list_add_tail(&node->entry, &node->esw->qos.domain->nodes);
+ } else {
+ node->level = node->parent->level + 1;
+ list_add_tail(&node->entry, &node->parent->children);
+ }
+}
+
static void
esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent)
{
list_del_init(&node->entry);
node->parent = parent;
- list_add_tail(&node->entry, &parent->children);
- node->esw = parent->esw;
+ if (parent)
+ node->esw = parent->esw;
+ esw_qos_node_attach_to_parent(node);
}
void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport)
@@ -305,8 +320,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
return 0;
}
-static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int
+esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 max_rate, u32 bw_share, u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
@@ -323,6 +339,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
parent_element_id);
+ MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate);
+ MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
@@ -358,7 +376,6 @@ static struct mlx5_esw_sched_node *
__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
struct mlx5_esw_sched_node *parent)
{
- struct list_head *parent_children;
struct mlx5_esw_sched_node *node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -370,8 +387,7 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type
node->type = type;
node->parent = parent;
INIT_LIST_HEAD(&node->children);
- parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
- list_add_tail(&node->entry, parent_children);
+ esw_qos_node_attach_to_parent(node);
return node;
}
@@ -396,7 +412,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch
u32 tsar_ix;
int err;
- err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0,
+ 0, &tsar_ix);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
@@ -463,7 +480,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0,
+ &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
@@ -986,10 +1004,10 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s
return err;
}
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack)
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
@@ -1000,3 +1018,105 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
node = parent_priv;
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
}
+
+static int
+mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ u8 new_level, max_level;
+
+ if (parent && parent->esw != node->esw) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot assign node to another E-Switch");
+ return -EOPNOTSUPP;
+ }
+
+ if (!list_empty(&node->children)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot reassign a node that contains rate objects");
+ return -EOPNOTSUPP;
+ }
+
+ new_level = parent ? parent->level + 1 : 2;
+ max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
+ if (new_level > max_level) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Node hierarchy depth exceeds the maximum supported level");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent = node->parent;
+ struct mlx5_eswitch *esw = node->esw;
+ u32 parent_ix;
+ int err;
+
+ parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix;
+ mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ node->ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, parent_ix,
+ node->max_rate, 0, &node->ix);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to create a node under the new hierarchy.");
+ if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix,
+ node->max_rate,
+ node->bw_share,
+ &node->ix))
+ esw_warn(esw->dev, "Node restore QoS failed\n");
+
+ return err;
+ }
+ esw_qos_node_set_parent(node, parent);
+
+ return 0;
+}
+
+static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node,
+ struct mlx5_esw_sched_node *parent,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *curr_parent;
+ struct mlx5_eswitch *esw = node->esw;
+ int err;
+
+ err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack);
+ if (err)
+ return err;
+
+ esw_qos_lock(esw);
+ curr_parent = node->parent;
+ err = esw_qos_vports_node_update_parent(node, parent, extack);
+ if (err)
+ goto out;
+
+ esw_qos_normalize_min_rate(esw, curr_parent, extack);
+ esw_qos_normalize_min_rate(esw, parent, extack);
+
+out:
+ esw_qos_unlock(esw);
+
+ return err;
+}
+
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_sched_node *node = priv, *parent_node;
+
+ if (!parent)
+ return mlx5_esw_qos_node_update_parent(node, NULL, extack);
+
+ parent_node = parent_priv;
+ return mlx5_esw_qos_node_update_parent(node, parent_node, extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
index 6eb8f6a648c8..ed40ec8f027e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h
@@ -29,10 +29,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack);
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack);
-int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent,
- void *priv, void *parent_priv,
- struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
+int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack);
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 20cc01ceee8a..a6a8eea5980c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -648,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
+ flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter);
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
@@ -2828,9 +2829,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (IS_ERR(vport))
return PTR_ERR(vport);
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
+ egress_ns = mlx5_get_flow_vport_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
if (!egress_ns)
return -EINVAL;
@@ -4157,37 +4158,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
-{
- int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *query_ctx;
- void *hca_caps;
- int err;
-
- *vhca_id = 0;
-
- query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
- if (!query_ctx)
- return -ENOMEM;
-
- err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
- if (err)
- goto out_free;
-
- hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
- *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
-
-out_free:
- kfree(query_ctx);
- return err;
-}
-
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err) {
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
vport_num, err);
@@ -4213,7 +4189,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
u16 *vhca_map_entry, vhca_id;
int err;
- err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
if (err)
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
vport_num, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index d91ea53eb394..01c5f5990f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -6,6 +6,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/events.h"
+#include "hwmon.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
@@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon,
+ u64 bit_set, int bit_set_offset)
+{
+ unsigned long *bit_set_ptr = (unsigned long *)&bit_set;
+ int num_bits = sizeof(bit_set) * BITS_PER_BYTE;
+ int i;
+
+ for_each_set_bit(i, bit_set_ptr, num_bits) {
+ const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset);
+
+ mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name);
+ }
+}
+#endif /* CONFIG_HWMON */
+
/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_core_dev *dev = events->dev;
struct mlx5_eqe *eqe = data;
u64 value_lsb;
u64 value_msb;
value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ /* bit 1-63 are not supported for NICs,
+ * hence read only bit 0 (asic) from lsb.
+ */
+ value_lsb &= 0x1;
value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
- mlx5_core_warn(events->dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
+ if (net_ratelimit()) {
+ mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n",
+ value_msb, value_lsb);
+#if IS_ENABLED(CONFIG_HWMON)
+ if (dev->hwmon) {
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0);
+ print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb,
+ sizeof(value_lsb) * BITS_PER_BYTE);
+ }
+#endif
+ }
return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ae20c061e0fb..a47c29571f64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -1142,6 +1142,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
+ case FS_FT_RDMA_TRANSPORT_RX:
+ case FS_FT_RDMA_TRANSPORT_TX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 22dc23d991d2..6163bc98d94a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1456,7 +1456,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table *ft;
int autogroups_max_fte;
- ft = mlx5_create_flow_table(ns, ft_attr);
+ ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport);
if (IS_ERR(ft))
return ft;
@@ -2764,9 +2764,9 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
-struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport)
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -2775,25 +2775,43 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport >= steering->esw_egress_acl_vports)
+ if (vport_idx >= steering->esw_egress_acl_vports)
return NULL;
if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport])
- return &steering->esw_egress_root_ns[vport]->ns;
+ steering->esw_egress_root_ns[vport_idx])
+ return &steering->esw_egress_root_ns[vport_idx]->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport >= steering->esw_ingress_acl_vports)
+ if (vport_idx >= steering->esw_ingress_acl_vports)
return NULL;
if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport])
- return &steering->esw_ingress_root_ns[vport]->ns;
+ steering->esw_ingress_root_ns[vport_idx])
+ return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
+ if (vport_idx >= steering->rdma_transport_rx_vports)
+ return NULL;
+ if (steering->rdma_transport_rx_root_ns &&
+ steering->rdma_transport_rx_root_ns[vport_idx])
+ return &steering->rdma_transport_rx_root_ns[vport_idx]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
+ if (vport_idx >= steering->rdma_transport_tx_vports)
+ return NULL;
+
+ if (steering->rdma_transport_tx_root_ns &&
+ steering->rdma_transport_tx_root_ns[vport_idx])
+ return &steering->rdma_transport_tx_root_ns[vport_idx]->ns;
else
return NULL;
default:
return NULL;
}
}
+EXPORT_SYMBOL(mlx5_get_flow_vport_namespace);
static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
unsigned int prio,
@@ -3199,6 +3217,127 @@ out_err:
return err;
}
+static int
+init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_rx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX);
+ if (!steering->rdma_transport_rx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int
+init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_transport_tx_root_ns[vport_idx] =
+ create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX);
+ if (!steering->rdma_transport_tx_root_ns[vport_idx])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns,
+ MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_rx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_rx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_rx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_rx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_rx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_core_dev *dev = steering->dev;
+ int total_vports;
+ int err;
+ int i;
+
+ /* In case eswitch not supported and working in legacy mode */
+ total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1;
+
+ steering->rdma_transport_tx_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->rdma_transport_tx_root_ns),
+ GFP_KERNEL);
+ if (!steering->rdma_transport_tx_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_rdma_transport_tx_root_ns_one(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->rdma_transport_tx_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ while (i--)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ return err;
+}
+
+static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering)
+{
+ int i;
+
+ if (steering->rdma_transport_rx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_rx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]);
+
+ kfree(steering->rdma_transport_rx_root_ns);
+ steering->rdma_transport_rx_root_ns = NULL;
+ }
+
+ if (steering->rdma_transport_tx_root_ns) {
+ for (i = 0; i < steering->rdma_transport_tx_vports; i++)
+ cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]);
+
+ kfree(steering->rdma_transport_tx_root_ns);
+ steering->rdma_transport_tx_root_ns = NULL;
+ }
+}
+
/* FT and tc chains are stored in the same array so we can re-use the
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
* When creating a new ns for each chain store it in the first available slot.
@@ -3631,6 +3770,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+ cleanup_rdma_transport_roots_ns(steering);
devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
ARRAY_SIZE(mlx5_fs_params));
@@ -3700,6 +3840,18 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) {
+ err = init_rdma_transport_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) {
+ err = init_rdma_transport_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
return 0;
err:
@@ -3850,8 +4002,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type
struct mlx5_flow_namespace *ns;
if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
- ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
- ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX ||
+ ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX)
+ ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0);
else
ns = mlx5_get_flow_namespace(dev, ns_type);
if (!ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 20837e526679..0767239f651c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -115,7 +115,9 @@ enum fs_flow_table_type {
FS_FT_PORT_SEL = 0X9,
FS_FT_FDB_RX = 0xa,
FS_FT_FDB_TX = 0xb,
- FS_FT_MAX_TYPE = FS_FT_FDB_TX,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
};
enum fs_flow_table_op_mod {
@@ -158,6 +160,10 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
+ struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
+ struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
+ int rdma_transport_rx_vports;
+ int rdma_transport_tx_vports;
};
struct fs_node {
@@ -341,16 +347,10 @@ struct mlx5_fc {
u64 lastbytes;
};
-struct mlx5_fc_bulk_hws_data {
- struct mlx5hws_action *hws_action;
- struct mutex lock; /* protects hws_action */
- refcount_t hws_action_refcount;
-};
-
struct mlx5_fc_bulk {
struct mlx5_fs_bulk fs_bulk;
u32 base_id;
- struct mlx5_fc_bulk_hws_data hws_data;
+ struct mlx5_fs_hws_data hws_data;
struct mlx5_fc fcs[];
};
@@ -434,7 +434,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
(type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
index c14590acc772..f6abfd00d7e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size ||
+ desired_size == MLX5_FS_MAX_POOL_SIZE) &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
+ if (desired_size != MLX5_FS_MAX_POOL_SIZE)
break;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
index 25f4274b372b..173e312db720 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -7,8 +7,6 @@
#include <linux/mlx5/driver.h>
#include "fs_core.h"
-#define POOL_NEXT_SIZE 0
-
int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index b253d1673398..57476487e31f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -287,6 +287,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, adv_rdma)) {
+ err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 566710d34a7b..6830a49fe682 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -345,15 +345,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
-static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev)
+static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev,
+ struct pci_dev *bridge)
{
- struct pci_dev *bridge = dev->pdev->bus->self;
u16 reg16;
int err;
- if (!bridge)
- return -EOPNOTSUPP;
-
err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &reg16);
if (err)
return err;
@@ -416,9 +413,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
+ struct pci_dev *bridge = dev->pdev->bus->self;
u16 dev_id;
int err;
+ if (!bridge) {
+ mlx5_core_warn(dev, "PCI bus bridge is not accessible\n");
+ return false;
+ }
+
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return false;
@@ -426,7 +429,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
- err = mlx5_check_hotplug_interrupt(dev);
+ err = mlx5_check_hotplug_interrupt(dev, bridge);
if (err)
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index a6329ca2d9bf..91613d5a36cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity)
return rfr_severity >> MLX5_RFR_BIT_OFFSET;
}
+static int mlx5_health_get_crr(u8 rfr_severity)
+{
+ return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd)
return "High temperature";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
return "ICM fetch PCI data poisoned error";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR:
+ return "Trust lockdown error";
default:
return "unrecognized error";
}
@@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev)
mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity));
mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
hsynd_str(ioread8(&h->synd)));
mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
+ if (mlx5_health_get_crr(rfr_severity))
+ mlx5_core_warn(dev, "Cold reset is required\n");
}
static int
@@ -799,14 +809,17 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
mlx5_core_err(dev, "device's health compromised - reached miss count\n");
+ health->synd = ioread8(&h->synd);
print_health_info(dev);
queue_work(health->wq, &health->report_work);
}
prev_synd = health->synd;
health->synd = ioread8(&h->synd);
- if (health->synd && health->synd != prev_synd)
+ if (health->synd && health->synd != prev_synd) {
+ print_health_info(dev);
queue_work(health->wq, &health->report_work);
+ }
out:
mod_timer(&health->timer, get_next_poll_jiffies(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
index 353f81dccd1c..4ba2636d7fb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
@@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
+
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel)
+{
+ return hwmon->temp_channel_desc[channel].sensor_name;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
index 999654a9b9da..f38271c22c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h
@@ -10,6 +10,7 @@
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev);
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev);
+const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel);
#else
static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 1477db7f5307..2691d88cdee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -175,7 +175,7 @@ unlock:
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
- struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
+ struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq);
int cpu;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index cea5aa314f6c..d058cbb4a00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -523,8 +523,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev
ndev = ldev->pf[last_idx].netdev;
}
- if (ndev)
- dev_hold(ndev);
+ dev_hold(ndev);
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
@@ -584,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
}
}
-static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
- unsigned long *flags)
+static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ enum mlx5_lag_mode mode,
+ unsigned long *flags)
{
int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev0;
@@ -593,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
if (first_idx < 0)
return -EINVAL;
+ if (mode == MLX5_LAG_MODE_MPESW ||
+ mode == MLX5_LAG_MODE_MULTIPATH)
+ return 0;
+
dev0 = ldev->pf[first_idx].dev;
+
if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) {
if (ldev->ports > 2)
return -EINVAL;
@@ -608,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
return 0;
}
-static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
- struct lag_tracker *tracker,
- enum mlx5_lag_mode mode,
- unsigned long *flags)
-{
- int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
- struct lag_func *dev0;
-
- if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW)
- return;
-
- dev0 = &ldev->pf[first_idx];
- if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
- if (ldev->ports > 2)
- ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
- set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
- }
-}
-
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
struct lag_tracker *tracker, bool shared_fdb,
unsigned long *flags)
{
- bool roce_lag = mode == MLX5_LAG_MODE_ROCE;
-
*flags = 0;
if (shared_fdb) {
set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags);
@@ -643,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
if (mode == MLX5_LAG_MODE_MPESW)
set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);
- if (roce_lag)
- return mlx5_lag_set_port_sel_mode_roce(ldev, flags);
-
- mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags);
- return 0;
+ return mlx5_lag_set_port_sel_mode(ldev, mode, flags);
}
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
@@ -951,7 +930,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
-static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
struct mlx5_core_dev *dev;
@@ -1038,7 +1017,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
if (do_bond && !__mlx5_lag_is_active(ldev)) {
- bool shared_fdb = mlx5_shared_fdb_supported(ldev);
+ bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev);
roce_lag = mlx5_lag_is_roce_lag(ldev);
@@ -1052,6 +1031,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (err) {
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
+ if (shared_fdb) {
+ mlx5_ldev_for_each(i, 0, ldev)
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
+ }
return;
} else if (roce_lag) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 01cf72366947..c2f256bb2bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev);
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index ffac0bd6c895..aad52d3a90e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,7 +65,6 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1);
@@ -77,13 +76,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
return -EINVAL;
dev0 = ldev->pf[idx].dev;
- if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
- return -EOPNOTSUPP;
-
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
- !mlx5_lag_check_prereq(ldev))
+ !mlx5_lag_check_prereq(ldev) ||
+ !mlx5_lag_shared_fdb_supported(ldev))
return -EOPNOTSUPP;
err = mlx5_mpesw_metadata_set(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index a80ecb672f33..0a3c260af377 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE;
ft_attr.max_fte = sz;
/* We use chains_default_ft(chains) as the table's next_ft till
@@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
}
+ if (!ns) {
+ mlx5_core_warn(chains->dev, "Failed to get flow namespace\n");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = chains->group_num;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
@@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
goto err_ignore;
}
- chain = mlx5_chains_get_chain_range(chains),
+ chain = mlx5_chains_get_chain_range(chains);
prio = mlx5_chains_get_prio_range(chains);
level = mlx5_chains_get_level_range(chains);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 9f13cea16446..eb3bd9c7f66e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -61,6 +61,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
}
}
+static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
+ [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
+ [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
+ [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
+ [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
+ [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
+ [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
+ [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
+ [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
+ [MLX5_TT_IPV4] = "TT_IPV4",
+ [MLX5_TT_IPV6] = "TT_IPV6",
+ [MLX5_TT_ANY] = "TT_ANY"
+};
+
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
+{
+ return mlx5_traffic_types_names[tt];
+}
+
struct mlx5_etype_proto {
u16 etype;
u8 proto;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 92eea6bea310..ab9434fe3ae6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -49,6 +49,7 @@ struct ttc_params {
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
+const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc);
struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 710633d5fdbe..41e8660c819c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1210,24 +1210,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
- mlx5_start_health_poll(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
- goto stop_health_poll;
+ goto err_cmd_cleanup;
}
+ mlx5_start_health_poll(dev);
+
err = mlx5_core_set_issi(dev);
if (err) {
mlx5_core_err(dev, "failed to set issi\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
mlx5_core_err(dev, "failed to allocate boot pages\n");
- goto err_disable_hca;
+ goto stop_health_poll;
}
err = mlx5_tout_query_dtor(dev);
@@ -1240,10 +1240,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
stop_health_poll:
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
@@ -1254,8 +1253,8 @@ err_cmd_cleanup:
static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
{
mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
mlx5_stop_health_poll(dev, boot);
+ mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_disable(dev);
}
@@ -1804,6 +1803,7 @@ static const int types[] = {
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
MLX5_CAP_SHAMPO,
+ MLX5_CAP_ADV_RDMA,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 99de67c3aa74..2e02bdea8361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr {
u8 access_key[ACCESS_KEY_LEN];
};
+struct mlx5_port_eth_proto {
+ u32 cap;
+ u32 admin;
+ u32 oper;
+};
+
+struct mlx5_module_eeprom_query_params {
+ u16 size;
+ u16 offset;
+ u16 i2c_address;
+ u32 page;
+ u32 bank;
+ u32 module_number;
+};
+
+struct mlx5_link_info {
+ u32 speed;
+ u32 lanes;
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -280,6 +300,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+ u8 *pfc_en_rx);
+
+int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+ u16 stall_minor_watermark);
+int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 *stall_critical_watermark,
+ u16 *stall_minor_watermark);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data);
+int
+mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+ u8 *data);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+ struct mlx5_port_eth_proto *eproto);
+bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy);
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy);
+int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
MLX5_CAP_GEN((mdev), pps_modify) && \
MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
@@ -346,6 +438,8 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 0881e961d8b1..586688da9940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -10,12 +10,15 @@
struct mlx5_irq;
struct cpu_rmap;
+struct mlx5_irq_pool;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev);
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
@@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
int mlx5_irq_get_irq(const struct mlx5_irq *irq);
-struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index d9362eabc6a1..2c5f850c31f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq)
return irq->map.index;
}
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq)
+{
+ return irq->pool;
+}
+
/* irq_pool API */
/* requesting an irq from a given pool according to given index */
@@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab
return irq_table->sf_ctrl_pool;
}
-static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table)
+static struct mlx5_irq_pool *
+sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table)
{
return irq_table->sf_comp_pool;
}
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev)
+struct mlx5_irq_pool *
+mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq_pool *pool = NULL;
if (mlx5_core_is_sf(dev))
- pool = sf_irq_pool_get(irq_table);
+ pool = sf_comp_irq_pool_get(irq_table);
/* In some configs, there won't be a pool of SFs IRQs. Hence, returning
* the PF IRQs pool in case the SF pool doesn't exist.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
index c4d377f8df30..cc064425fe16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h
@@ -28,7 +28,6 @@ struct mlx5_irq_pool {
struct mlx5_core_dev *dev;
};
-struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev);
static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
{
return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
@@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
int mlx5_irq_get_locked(struct mlx5_irq *irq);
int mlx5_irq_read_locked(struct mlx5_irq *irq);
int mlx5_irq_put(struct mlx5_irq *irq);
+struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq);
#endif /* __PCI_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 3995df064101..549f1066d2a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PAOS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status)
@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
*status = MLX5_GET(paos_reg, out, admin_status);
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
u16 *max_mtu, u16 *oper_mtu, u8 port)
@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, &query, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
struct mlx5_module_eeprom_query_params *params,
@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
return mlx5_query_mcia(dev, params, data);
}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause)
@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
u16 stall_critical_watermark,
@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc)
@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
u8 tc, u8 *tc_group)
@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct)
@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
return 0;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
{
@@ -1058,56 +1038,57 @@ out:
}
/* speed in units of 1Mb */
-static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = 1000,
- [MLX5E_1000BASE_KX] = 1000,
- [MLX5E_10GBASE_CX4] = 10000,
- [MLX5E_10GBASE_KX4] = 10000,
- [MLX5E_10GBASE_KR] = 10000,
- [MLX5E_20GBASE_KR2] = 20000,
- [MLX5E_40GBASE_CR4] = 40000,
- [MLX5E_40GBASE_KR4] = 40000,
- [MLX5E_56GBASE_R4] = 56000,
- [MLX5E_10GBASE_CR] = 10000,
- [MLX5E_10GBASE_SR] = 10000,
- [MLX5E_10GBASE_ER] = 10000,
- [MLX5E_40GBASE_SR4] = 40000,
- [MLX5E_40GBASE_LR4] = 40000,
- [MLX5E_50GBASE_SR2] = 50000,
- [MLX5E_100GBASE_CR4] = 100000,
- [MLX5E_100GBASE_SR4] = 100000,
- [MLX5E_100GBASE_KR4] = 100000,
- [MLX5E_100GBASE_LR4] = 100000,
- [MLX5E_100BASE_TX] = 100,
- [MLX5E_1000BASE_T] = 1000,
- [MLX5E_10GBASE_T] = 10000,
- [MLX5E_25GBASE_CR] = 25000,
- [MLX5E_25GBASE_KR] = 25000,
- [MLX5E_25GBASE_SR] = 25000,
- [MLX5E_50GBASE_CR2] = 50000,
- [MLX5E_50GBASE_KR2] = 50000,
+static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4},
+ [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2},
+ [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4},
+ [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1},
+ [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1},
+ [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1},
+ [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2},
};
-static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
- [MLX5E_SGMII_100M] = 100,
- [MLX5E_1000BASE_X_SGMII] = 1000,
- [MLX5E_5GBASE_R] = 5000,
- [MLX5E_10GBASE_XFI_XAUI_1] = 10000,
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000,
- [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000,
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
- [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
- [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
- [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
- [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000,
- [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = 200000,
- [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = 400000,
- [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = 800000,
+static const struct mlx5_link_info
+mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1},
+ [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1},
+ [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1},
+ [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1},
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4},
+ [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1},
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2},
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1},
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4},
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2},
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4},
+ [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8},
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1},
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2},
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4},
+ [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8},
+ [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1},
+ [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2},
+ [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4},
};
int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
@@ -1145,54 +1126,61 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev)
return !!eproto.cap;
}
-static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size,
- bool force_legacy)
+static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev,
+ const struct mlx5_link_info **arr,
+ u32 *size,
+ bool force_legacy)
{
bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev);
- *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
- ARRAY_SIZE(mlx5e_link_speed);
- *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed;
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) :
+ ARRAY_SIZE(mlx5e_link_info);
+ *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info;
}
-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
- bool force_legacy)
+const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
- const u32 *table;
- u32 speed = 0;
+ const struct mlx5_link_info *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
- speed = table[i];
- return speed;
+ return &table[i];
+
+ return NULL;
}
-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
- bool force_legacy)
+u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev,
+ struct mlx5_link_info *info,
+ bool force_legacy)
{
+ const struct mlx5_link_info *table;
u32 link_modes = 0;
- const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
for (i = 0; i < max_size; ++i) {
- if (table[i] == speed)
- link_modes |= MLX5E_PROT_MASK(i);
+ if (table[i].speed == info->speed) {
+ if (!info->lanes || table[i].lanes == info->lanes)
+ link_modes |= MLX5E_PROT_MASK(i);
+ }
}
return link_modes;
}
int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
+ const struct mlx5_link_info *table;
struct mlx5_port_eth_proto eproto;
u32 max_speed = 0;
- const u32 *table;
u32 max_size;
bool ext;
int err;
@@ -1203,10 +1191,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
- max_speed = max(max_speed, table[i]);
+ max_speed = max(max_speed, table[i].speed);
*speed = max_speed;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index b96909fbeb12..0864ba625c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -285,7 +285,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 3dbd4efa21a2..19dce1ba512d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -220,7 +220,7 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx,
bool drain)
{
unsigned long timeout = jiffies +
- msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC);
+ secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT);
struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH];
u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id);
bool got_comp = *pending_rules >= burst_th;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index f9f569131dde..47f7ed141553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -24,8 +24,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_matcher *matcher;
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM];
+ u32 priority;
u8 num_of_at;
- u16 priority;
u8 size_log;
atomic_t num_of_rules;
struct list_head *rules;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 487e75476b0a..e8f98c109b99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -130,12 +130,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
}
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id)
-{
- hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id);
-}
-
static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_fg_attr *fg_attr,
u32 *group_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index 610c63d81ad9..51d9e0291ac1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -258,9 +258,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev,
int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev,
u8 fw_ft_type, u32 table_id);
-void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev,
- u32 table_id);
-
int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev,
struct mlx5hws_cmd_rtc_create_attr *rtc_attr,
u32 *rtc_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 10ece7df1cfa..c8cc0c8115f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -500,7 +500,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd)
return 0;
err_conflict:
- mlx5hws_err(cd->ctx, "Invalid definer fields combination\n");
+ mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n",
+ cd->match_flags);
return -EINVAL;
}
@@ -1985,8 +1986,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx,
continue;
/* Reuse definer and set LRU (move to be first in the list) */
- list_del_init(&cached_definer->list_node);
- list_add(&cached_definer->list_node, &cache->list_head);
+ list_move(&cached_definer->list_node, &cache->list_head);
cached_definer->refcount++;
return cached_definer->definer.obj_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index f34bbbbba1c2..1b787cd66e6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -66,6 +66,8 @@ static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev,
xa_init(&hws_pool->table_dests);
xa_init(&hws_pool->vport_dests);
xa_init(&hws_pool->vport_vhca_dests);
+ xa_init(&hws_pool->aso_meters);
+ xa_init(&hws_pool->sample_dests);
return 0;
cleanup_insert_hdr:
@@ -88,10 +90,17 @@ destroy_tag:
static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
{
struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool;
+ struct mlx5_fs_hws_data *fs_hws_data;
struct mlx5hws_action *action;
struct mlx5_fs_pool *pool;
unsigned long i;
+ xa_for_each(&hws_pool->sample_dests, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->sample_dests);
+ xa_for_each(&hws_pool->aso_meters, i, fs_hws_data)
+ kfree(fs_hws_data);
+ xa_destroy(&hws_pool->aso_meters);
xa_for_each(&hws_pool->vport_vhca_dests, i, action)
mlx5hws_action_destroy(action);
xa_destroy(&hws_pool->vport_vhca_dests);
@@ -459,6 +468,106 @@ mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx,
flags);
}
+static struct mlx5_fs_hws_data *
+mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index)
+{
+ struct mlx5_fs_hws_data *fs_hws_data;
+ int err;
+
+ xa_lock(cache_xa);
+ fs_hws_data = xa_load(cache_xa, index);
+ if (!fs_hws_data) {
+ fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC);
+ if (!fs_hws_data) {
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 0);
+ mutex_init(&fs_hws_data->lock);
+ err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC);
+ if (err) {
+ kfree(fs_hws_data);
+ xa_unlock(cache_xa);
+ return NULL;
+ }
+ }
+ xa_unlock(cache_xa);
+
+ return fs_hws_data;
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *meter_hws_data;
+ u32 id = exe_aso->base_id;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id);
+ if (!meter_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER;
+ create_ctx.id = id;
+ create_ctx.return_reg_id = exe_aso->return_reg_id;
+
+ return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_exe_aso *exe_aso)
+{
+ struct mlx5_fs_hws_data *meter_hws_data;
+ struct xarray *meters_xa;
+
+ meters_xa = &fs_ctx->hws_pool.aso_meters;
+ meter_hws_data = xa_load(meters_xa, exe_aso->base_id);
+ if (!meter_hws_data)
+ return;
+ return mlx5_fs_put_hws_action(meter_hws_data);
+}
+
+static struct mlx5hws_action *
+mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ struct mlx5_flow_rule *dst)
+{
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
+ struct mlx5hws_context *ctx = fs_ctx->hws_ctx;
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ u32 id = dst->dest_attr.sampler_id;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id);
+ if (!sampler_hws_data)
+ return NULL;
+
+ create_ctx.hws_ctx = ctx;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER;
+ create_ctx.id = id;
+
+ return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx);
+}
+
+static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
+ u32 sampler_id)
+{
+ struct mlx5_fs_hws_data *sampler_hws_data;
+ struct xarray *sampler_xa;
+
+ sampler_xa = &fs_ctx->hws_pool.sample_dests;
+ sampler_hws_data = xa_load(sampler_xa, sampler_id);
+ if (!sampler_hws_data)
+ return;
+
+ mlx5_fs_put_hws_action(sampler_hws_data);
+}
+
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
@@ -519,26 +628,101 @@ mlx5_fs_create_action_last(struct mlx5hws_context *ctx)
return mlx5hws_action_create_last(ctx, flags);
}
-static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action)
+static struct mlx5hws_action *
+mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+
+ switch (create_ctx->actions_type) {
+ case MLX5HWS_ACTION_TYP_CTR:
+ return mlx5hws_action_create_counter(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx,
+ create_ctx->id,
+ create_ctx->return_reg_id,
+ flags);
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx,
+ create_ctx->id, flags);
+ default:
+ return NULL;
+ }
+}
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx)
+{
+ /* try avoid locking if not necessary */
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount))
+ return fs_hws_data->hws_action;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return fs_hws_data->hws_action;
+ }
+ fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx);
+ if (!fs_hws_data->hws_action) {
+ mutex_unlock(&fs_hws_data->lock);
+ return NULL;
+ }
+ refcount_set(&fs_hws_data->hws_action_refcount, 1);
+ mutex_unlock(&fs_hws_data->lock);
+
+ return fs_hws_data->hws_action;
+}
+
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data)
+{
+ if (!fs_hws_data)
+ return;
+
+ /* try avoid locking if not necessary */
+ if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount))
+ return;
+
+ mutex_lock(&fs_hws_data->lock);
+ if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) {
+ mutex_unlock(&fs_hws_data->lock);
+ return;
+ }
+ mlx5hws_action_destroy(fs_hws_data->hws_action);
+ fs_hws_data->hws_action = NULL;
+ mutex_unlock(&fs_hws_data->lock);
+}
+
+static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action *fs_action)
{
+ struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context;
+
switch (mlx5hws_action_get_type(fs_action->action)) {
case MLX5HWS_ACTION_TYP_CTR:
mlx5_fc_put_hws_action(fs_action->counter);
break;
+ case MLX5HWS_ACTION_TYP_ASO_METER:
+ mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso);
+ break;
+ case MLX5HWS_ACTION_TYP_SAMPLER:
+ mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id);
+ break;
default:
mlx5hws_action_destroy(fs_action->action);
}
}
static void
-mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions,
+mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_fs_hws_rule_action **fs_actions,
int *num_fs_actions)
{
int i;
/* Free in reverse order to handle action dependencies */
for (i = *num_fs_actions - 1; i >= 0; i--)
- mlx5_fs_destroy_fs_action(*fs_actions + i);
+ mlx5_fs_destroy_fs_action(ns, *fs_actions + i);
*num_fs_actions = 0;
kfree(*fs_actions);
*fs_actions = NULL;
@@ -735,8 +919,25 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
- err = -EOPNOTSUPP;
- goto free_actions;
+ if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER ||
+ num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
+
+ tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx,
+ &fte_action->exe_aso);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ (*ractions)[num_actions].aso_meter.offset =
+ fte_action->exe_aso.flow_meter.meter_idx;
+ (*ractions)[num_actions].aso_meter.init_color =
+ fte_action->exe_aso.flow_meter.init_color;
+ (*ractions)[num_actions++].action = tmp_action;
+ fs_actions[num_fs_actions].action = tmp_action;
+ fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso;
}
if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
@@ -784,6 +985,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst,
type_uplink);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ dest_action =
+ mlx5_fs_get_dest_action_sampler(fs_ctx,
+ dst);
+ fs_actions[num_fs_actions].action = dest_action;
+ fs_actions[num_fs_actions++].sampler_id =
+ dst->dest_attr.sampler_id;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
@@ -850,7 +1059,7 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions);
free_dest_actions_alloc:
kfree(dest_actions);
free_fs_actions_alloc:
@@ -900,7 +1109,7 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns,
return 0;
free_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
out_err:
mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err);
@@ -920,7 +1129,8 @@ static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns,
err = mlx5hws_bwc_rule_destroy(rule->bwc_rule);
rule->bwc_rule = NULL;
- mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions,
+ &rule->num_fs_actions);
return err;
}
@@ -958,11 +1168,12 @@ static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns,
if (ret)
goto restore_actions;
- mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions);
+ mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions,
+ &saved_num_fs_actions);
return ret;
restore_actions:
- mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions,
+ mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions,
&fte->fs_hws_rule.num_fs_actions);
fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions;
fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
index cbddb72d4362..8b56298288da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h
@@ -22,6 +22,8 @@ struct mlx5_fs_hws_actions_pool {
struct xarray table_dests;
struct xarray vport_vhca_dests;
struct xarray vport_dests;
+ struct xarray aso_meters;
+ struct xarray sample_dests;
};
struct mlx5_fs_hws_context {
@@ -49,6 +51,8 @@ struct mlx5_fs_hws_rule_action {
struct mlx5hws_action *action;
union {
struct mlx5_fc *counter;
+ struct mlx5_exe_aso *exe_aso;
+ u32 sampler_id;
};
};
@@ -58,6 +62,26 @@ struct mlx5_fs_hws_rule {
int num_fs_actions;
};
+struct mlx5_fs_hws_data {
+ struct mlx5hws_action *hws_action;
+ struct mutex lock; /* protects hws_action */
+ refcount_t hws_action_refcount;
+};
+
+struct mlx5_fs_hws_create_action_ctx {
+ enum mlx5hws_action_type actions_type;
+ struct mlx5hws_context *hws_ctx;
+ u32 id;
+ union {
+ u8 return_reg_id;
+ };
+};
+
+struct mlx5hws_action *
+mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data,
+ struct mlx5_fs_hws_create_action_ctx *create_ctx);
+void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data);
+
#ifdef CONFIG_MLX5_HW_STEERING
bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index 2ae4ac62b0e2..f1ecdba74e1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -405,46 +405,17 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
struct mlx5_fc *counter)
{
- u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
+ struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws;
- fc_bulk_hws = &fc_bulk->hws_data;
- /* try avoid locking if not necessary */
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount))
- return fc_bulk_hws->hws_action;
+ create_ctx.hws_ctx = ctx;
+ create_ctx.id = fc_bulk->base_id;
+ create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
- mutex_lock(&fc_bulk_hws->lock);
- if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return fc_bulk_hws->hws_action;
- }
- fc_bulk_hws->hws_action =
- mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags);
- if (!fc_bulk_hws->hws_action) {
- mutex_unlock(&fc_bulk_hws->lock);
- return NULL;
- }
- refcount_set(&fc_bulk_hws->hws_action_refcount, 1);
- mutex_unlock(&fc_bulk_hws->lock);
-
- return fc_bulk_hws->hws_action;
+ return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
- struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data;
-
- /* try avoid locking if not necessary */
- if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount))
- return;
-
- mutex_lock(&fc_bulk_hws->lock);
- if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) {
- mutex_unlock(&fc_bulk_hws->lock);
- return;
- }
- mlx5hws_action_destroy(fc_bulk_hws->hws_action);
- fc_bulk_hws->hws_action = NULL;
- mutex_unlock(&fc_bulk_hws->lock);
+ mlx5_fs_put_hws_action(&counter->bulk->hws_data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index d9dc4f2d0dc6..f51ed24526b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache,
cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions);
if (cached_pattern) {
/* LRU: move it to be first in the list */
- list_del_init(&cached_pattern->ptrn_list_node);
- list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
+ list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list);
cached_pattern->refcount++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
index 5f409dc30aca..3d5afc832fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
@@ -210,6 +210,10 @@ struct mlx5dr_ste_ctx {
void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action,
u8 *scnd_d_action, u32 reformat_id,
int size);
+ void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+ void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
/* Send */
void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
index 7f83d77c43ef..6447efbae00d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
@@ -266,10 +266,10 @@ void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -286,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
- u8 anchor, u8 offset,
- int size)
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset,
+ int size)
{
MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
@@ -584,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
@@ -597,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -792,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
- dr_ste_v1_set_insert_hdr(last_ste, action,
- attr->reformat.id,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
allow_modify_hdr = false;
@@ -808,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
allow_modify_hdr = true;
allow_ctr = true;
}
- dr_ste_v1_set_remove_hdr(last_ste, action,
- attr->reformat.param_0,
- attr->reformat.param_1,
- attr->reformat.size);
+ ste_ctx->set_remove_hdr(last_ste, action,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_SINGLE_SZ;
action += DR_STE_ACTION_SINGLE_SZ;
}
@@ -2200,6 +2200,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
index a8d9e308d339..591c20c95a6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
@@ -156,6 +156,10 @@ void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num);
void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action,
u32 reformat_id, int size);
void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action);
+void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id,
+ u8 anchor, u8 offset, int size);
+void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor,
+ u8 offset, int size);
void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn,
u8 *action_type_set, u32 actions_caps, u8 *last_ste,
struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
index 0882dba0f64b..d0ebaf820d42 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
@@ -69,6 +69,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.set_pop_vlan = &dr_ste_v1_set_pop_vlan,
.set_rx_decap = &dr_ste_v1_set_rx_decap,
.set_encap_l3 = &dr_ste_v1_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v1_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v1_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
index cc60ce1d274e..e468a9ae44e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c
@@ -79,6 +79,46 @@ static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, u8 anchor,
+ u8 offset, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action,
+ attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+ u8 anchor, u8 offset, int size)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ remove_size, size / 2);
+ MLX5_SET(ste_single_action_remove_header_size_v3, s_action,
+ start_offset, offset / 2);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
static int
dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz,
u8 *hw_action, u32 hw_action_sz,
@@ -211,6 +251,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v3 = {
.set_pop_vlan = &dr_ste_v3_set_pop_vlan,
.set_rx_decap = &dr_ste_v3_set_rx_decap,
.set_encap_l3 = &dr_ste_v3_set_encap_l3,
+ .set_insert_hdr = &dr_ste_v3_set_insert_hdr,
+ .set_remove_hdr = &dr_ste_v3_set_remove_hdr,
/* Send */
.prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 0d5f750faa45..d10d4c396040 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1199,6 +1199,31 @@ int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *ou
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5b44c931b660..058dcabfaa2e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -2214,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+ mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL);
+
/* Everything is set up, ring producer doorbell to get HW going */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 6bed495dcf0f..7fa94e5828de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
*/
MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+/* pci_wqe_ipcs
+ * Calculate IPv4 and TCP / UDP checksums.
+ */
+MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1);
+
/* pci_wqe_byte_count
* Size of i-th scatter/gather entry, 0 if entry is unused.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d714311fd884..3080ea032e7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1574,10 +1574,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
+ NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
@@ -2407,8 +2409,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
- /* NVE traps */
- MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
};
static const struct mlxsw_listener mlxsw_sp1_listener[] = {
@@ -5230,25 +5230,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
return -EOPNOTSUPP;
- if (cu_info->linking) {
- if (!netif_running(dev))
- return 0;
- /* When the bridge is VLAN-aware, the VNI of the VxLAN
- * device needs to be mapped to a VLAN, but at this
- * point no VLANs are configured on the VxLAN device
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ if (!netif_running(dev))
+ return 0;
+ if (cu_info->linking)
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
dev, 0, extack);
- } else {
- /* VLANs were already flushed, which triggered the
- * necessary cleanup
- */
- if (br_vlan_enabled(upper_dev))
- return 0;
+ else
mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
- }
break;
case NETDEV_PRE_UP:
upper_dev = netdev_master_upper_dev_get(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index fa7082ee5183..37cd1d002b3b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -661,10 +661,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
+ struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev);
+ struct net_device *vxlan_dev);
extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index a54eedb69a3f..067f0055a55a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = {
* This array defines key offsets for easy access when copying key blocks from
* entry key to Bloom filter chunk.
*/
-static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+static char *
+mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry,
+ u8 chunk_index)
+{
+ switch (chunk_index) {
+ case 0:
+ return &aentry->ht_key.enc_key[2];
+ case 1:
+ return &aentry->ht_key.enc_key[20];
+ case 2:
+ return &aentry->ht_key.enc_key[38];
+ default:
+ WARN_ON_ONCE(1);
+ return &aentry->ht_key.enc_key[0];
+ }
+}
static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c)
{
@@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
u8 key_offset, u8 chunk_key_len, u8 chunk_len)
{
struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
- u8 chunk_index, chunk_count, block_count;
+ u8 chunk_index, chunk_count;
char *chunk = output;
__be16 erp_region_id;
+ u32 block_count;
block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
chunk_count = 1 + ((block_count - 1) >> 2);
@@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
(aregion->region->id << 4));
for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks;
chunk_index++) {
+ char *enc_key;
+
memset(chunk, 0, pad_bytes);
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
- memcpy(chunk + key_offset,
- &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
- chunk_key_len);
+ enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index);
+ memcpy(chunk + key_offset, enc_key, chunk_key_len);
chunk += chunk_len;
}
*len = chunk_count * chunk_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6397ff0dc951..a48bf342084d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
}
-int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
- const struct net_device *vxlan_dev, u16 vid,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_bridge_device *bridge_device;
-
- bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
- if (WARN_ON(!bridge_device))
- return -EINVAL;
-
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
- extack);
-}
-
-void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *vxlan_dev)
+static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *vxlan_dev)
{
struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
struct mlxsw_sp_fid *fid;
@@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
}
+int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ int err;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev);
+ if (!mlxsw_sp_port)
+ return -EINVAL;
+
+ err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ return err;
+
+ err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev,
+ NULL, NULL, NULL, false, extack);
+ if (err)
+ goto err_bridge_port_offload;
+
+ return 0;
+
+err_bridge_port_offload:
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ return err;
+}
+
+void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vxlan_dev)
+{
+ switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+}
+
static void
mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
enum mlxsw_sp_l3proto *proto,
@@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_put(fid);
return -EINVAL;
}
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
return 0;
}
@@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
* mapped to the VNI should be unmapped
*/
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
mlxsw_sp_fid_put(fid);
/* Fifth case: The new VLAN is also egress untagged, which means the
@@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
if (mlxsw_sp_fid_8021q_vid(fid) != vid)
goto out;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
out:
mlxsw_sp_fid_put(fid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 1f9c1c86839f..b5c3f789c685 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
.trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
- MIRROR),
+ TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY,
- TRAP_TO_CPU, false),
+ MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 83477c8e6971..80ee5c4825dc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -29,6 +29,8 @@ enum {
MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B,
MLXSW_TRAP_ID_FID_MISS = 0x3D,
MLXSW_TRAP_ID_DECAP_ECN0 = 0x40,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
MLXSW_TRAP_ID_TTLERROR = 0x53,
MLXSW_TRAP_ID_LBERROR = 0x54,
@@ -66,13 +68,10 @@ enum {
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
- MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_IPV4_BFD = 0xD0,
MLXSW_TRAP_ID_IPV6_BFD = 0xD1,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
- MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0,
- MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1,
MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A,
MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130,
MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131,
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
index 239b2258ec65..0dbc634adb4b 100644
--- a/drivers/net/ethernet/meta/fbnic/Makefile
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -20,6 +20,7 @@ fbnic-y := fbnic_csr.o \
fbnic_pci.o \
fbnic_phylink.o \
fbnic_rpc.o \
+ fbnic_time.o \
fbnic_tlv.o \
fbnic_txrx.o \
- fbnic_time.o
+# End of objects
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index 14751f16e125..4ca7b99ef131 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -60,6 +60,12 @@ struct fbnic_dev {
u8 mac_addr_boundary;
u8 tce_tcam_last;
+ /* IP TCAM */
+ struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+ struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES];
+
/* Number of TCQs/RCQs available on hardware */
u16 max_num_queues;
@@ -180,6 +186,9 @@ void fbnic_dbg_exit(void);
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm);
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv);
+
enum fbnic_boards {
fbnic_board_asic
};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
index aeb9f333f4c7..d9c0dc1c2af9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c
@@ -30,6 +30,7 @@ static const struct fbnic_csr_bounds fbnic_csr_sects[] = {
FBNIC_BOUNDS(RSFEC),
FBNIC_BOUNDS(MAC_MAC),
FBNIC_BOUNDS(SIG),
+ FBNIC_BOUNDS(PCIE_SS_COMPHY),
FBNIC_BOUNDS(PUL_USER),
FBNIC_BOUNDS(QUEUE),
FBNIC_BOUNDS(RPC_RAM),
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index 02bb81b3c506..3b12a0ab5906 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -605,8 +605,11 @@ enum {
FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
};
+#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4)
+#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8)
#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
#define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28)
+#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29)
#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
@@ -677,6 +680,9 @@ enum {
#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\
(0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */
+#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16)
+
#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\
(0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */
#define FBNIC_RPC_TCAM_IPSRC(m, n)\
@@ -782,13 +788,52 @@ enum {
#define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */
#define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */
#define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */
+
+/* PCIE Comphy Registers */
+#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */
+#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */
+
/* PUL User Registers */
#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
+ 0x3106e /* 0xc41b8 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
+ 0x31070 /* 0xc41c0 */
+#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
+ 0x31071 /* 0xc41c4 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
+ 0x31072 /* 0xc41c8 */
+#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
+ 0x31073 /* 0xc41cc */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
+ 0x31074 /* 0xc41d0 */
+#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
+ 0x31075 /* 0xc41d4 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
+ 0x31076 /* 0xc41d8 */
+#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
+ 0x31077 /* 0xc41dc */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
+ 0x31078 /* 0xc41e0 */
+#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
+ 0x31079 /* 0xc41e4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
+ 0x3107a /* 0xc41e8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
+ 0x3107b /* 0xc41ec */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
+ 0x3107c /* 0xc41f0 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
+ 0x3107d /* 0xc41f4 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
+ 0x3107e /* 0xc41f8 */
+#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
+ 0x3107f /* 0xc41fc */
+#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */
/* Queue Registers
*
@@ -928,43 +973,6 @@ enum {
#define FBNIC_MAX_QUEUES 128
#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
-/* PUL User Registers*/
-#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \
- 0x3106e /* 0xc41b8 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \
- 0x31070 /* 0xc41c0 */
-#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \
- 0x31071 /* 0xc41c4 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \
- 0x31072 /* 0xc41c8 */
-#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \
- 0x31073 /* 0xc41cc */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \
- 0x31074 /* 0xc41d0 */
-#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \
- 0x31075 /* 0xc41d4 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \
- 0x31076 /* 0xc41d8 */
-#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \
- 0x31077 /* 0xc41dc */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \
- 0x31078 /* 0xc41e0 */
-#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \
- 0x31079 /* 0xc41e4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \
- 0x3107a /* 0xc41e8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \
- 0x3107b /* 0xc41ec */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \
- 0x3107c /* 0xc41f0 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \
- 0x3107d /* 0xc41f4 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \
- 0x3107e /* 0xc41f8 */
-#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \
- 0x3107f /* 0xc41fc */
-#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
-
/* BAR 4 CSRs */
/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
index ac80981f67c0..e8f2d7f2d962 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c
@@ -44,6 +44,132 @@ static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr);
+static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ int i, tcam_idx = 0;
+ char hdr[80];
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n",
+ "Idx", "S", "TCAM Bitmap", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam))
+ continue;
+
+ if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES)
+ break;
+
+ seq_printf(s, "%02d %d %64pb %pm\n",
+ tcam_idx, mac_addr->state, mac_addr->act_tcam,
+ mac_addr->value.addr8);
+ seq_printf(s, " %pm\n",
+ mac_addr->mask.addr8);
+ tcam_idx++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam);
+
+static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n",
+ "Idx", "S", "Value/Mask", "RSS", "Dest");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n",
+ i, act_tcam->state,
+ act_tcam->value.tcam[10], act_tcam->value.tcam[9],
+ act_tcam->value.tcam[8], act_tcam->value.tcam[7],
+ act_tcam->value.tcam[6], act_tcam->value.tcam[5],
+ act_tcam->value.tcam[4], act_tcam->value.tcam[3],
+ act_tcam->value.tcam[2], act_tcam->value.tcam[1],
+ act_tcam->value.tcam[0], act_tcam->rss_en_mask,
+ act_tcam->dest);
+ seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ act_tcam->mask.tcam[10], act_tcam->mask.tcam[9],
+ act_tcam->mask.tcam[8], act_tcam->mask.tcam[7],
+ act_tcam->mask.tcam[6], act_tcam->mask.tcam[5],
+ act_tcam->mask.tcam[4], act_tcam->mask.tcam[3],
+ act_tcam->mask.tcam[2], act_tcam->mask.tcam[1],
+ act_tcam->mask.tcam[0]);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam);
+
+static int fbnic_dbg_ip_addr_show(struct seq_file *s,
+ struct fbnic_ip_addr *ip_addr)
+{
+ char hdr[80];
+ int i;
+
+ /* Generate Header */
+ snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n",
+ "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask");
+ seq_puts(s, hdr);
+ fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr)));
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ seq_printf(s, "%02d %d %64pb %d %pi6\n",
+ i, ip_addr->state, ip_addr->act_tcam,
+ ip_addr->version, &ip_addr->value);
+ seq_printf(s, " %pi6\n",
+ &ip_addr->mask);
+ }
+
+ return 0;
+}
+
+static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src);
+
+static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ip_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst);
+
+static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_src);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src);
+
+static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v)
+{
+ struct fbnic_dev *fbd = s->private;
+
+ return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst);
+}
+DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst);
+
static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v)
{
struct fbnic_dev *fbd = s->private;
@@ -84,6 +210,18 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd)
&fbnic_dbg_pcie_stats_fops);
debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd,
&fbnic_dbg_mac_addr_fops);
+ debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_tce_tcam_fops);
+ debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_act_tcam_fops);
+ debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_src_fops);
+ debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ip_dst_fops);
+ debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_src_fops);
+ debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd,
+ &fbnic_dbg_ipo_dst_fops);
}
void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index 20cd9f5f89e2..0a751a2aaf73 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -4,6 +4,7 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -135,6 +136,168 @@ static void fbnic_clone_free(struct fbnic_net *clone)
kfree(clone);
}
+static int fbnic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = fbn->tx_usecs;
+ ec->rx_coalesce_usecs = fbn->rx_usecs;
+ ec->rx_max_coalesced_frames = fbn->rx_max_frames;
+
+ return 0;
+}
+
+static int fbnic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ /* Verify against hardware limits */
+ if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
+ return -EINVAL;
+ }
+ if (ec->rx_max_coalesced_frames >
+ FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
+ FBNIC_MIN_RXD_PER_FRAME) {
+ NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
+ return -EINVAL;
+ }
+
+ fbn->tx_usecs = ec->tx_coalesce_usecs;
+ fbn->rx_usecs = ec->rx_coalesce_usecs;
+ fbn->rx_max_frames = ec->rx_max_coalesced_frames;
+
+ if (netif_running(netdev)) {
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++) {
+ struct fbnic_napi_vector *nv = fbn->napi[i];
+
+ fbnic_config_txrx_usecs(nv, 0);
+ fbnic_config_rx_frames(nv);
+ }
+ }
+
+ return 0;
+}
+
+static void
+fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
+ ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
+
+ ring->rx_pending = fbn->rcq_size;
+ ring->rx_mini_pending = fbn->hpq_size;
+ ring->rx_jumbo_pending = fbn->ppq_size;
+ ring->tx_pending = fbn->txq_size;
+}
+
+static void fbnic_set_rings(struct fbnic_net *fbn,
+ struct ethtool_ringparam *ring)
+{
+ fbn->rcq_size = ring->rx_pending;
+ fbn->hpq_size = ring->rx_mini_pending;
+ fbn->ppq_size = ring->rx_jumbo_pending;
+ fbn->txq_size = ring->tx_pending;
+}
+
+static int
+fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_net *clone;
+ int err;
+
+ ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
+ ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
+ ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
+ ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
+
+ /* These are absolute minimums allowing the device and driver to operate
+ * but not necessarily guarantee reasonable performance. Settings below
+ * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
+ * at best.
+ */
+ if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
+ ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
+ ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
+ NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
+ return -EINVAL;
+ }
+
+ if (!netif_running(netdev)) {
+ fbnic_set_rings(fbn, ring);
+ return 0;
+ }
+
+ clone = fbnic_clone_create(fbn);
+ if (!clone)
+ return -ENOMEM;
+
+ fbnic_set_rings(clone, ring);
+
+ err = fbnic_alloc_napi_vectors(clone);
+ if (err)
+ goto err_free_clone;
+
+ err = fbnic_alloc_resources(clone);
+ if (err)
+ goto err_free_napis;
+
+ fbnic_down_noidle(fbn);
+ err = fbnic_wait_all_queues_idle(fbn->fbd, true);
+ if (err)
+ goto err_start_stack;
+
+ err = fbnic_set_netif_queues(clone);
+ if (err)
+ goto err_start_stack;
+
+ /* Nothing can fail past this point */
+ fbnic_flush(fbn);
+
+ fbnic_clone_swap(fbn, clone);
+
+ fbnic_up(fbn);
+
+ fbnic_free_resources(clone);
+ fbnic_free_napi_vectors(clone);
+ fbnic_clone_free(clone);
+
+ return 0;
+
+err_start_stack:
+ fbnic_flush(fbn);
+ fbnic_up(fbn);
+ fbnic_free_resources(clone);
+err_free_napis:
+ fbnic_free_napi_vectors(clone);
+err_free_clone:
+ fbnic_clone_free(clone);
+ return err;
+}
+
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
int i;
@@ -218,11 +381,234 @@ fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, cnt = 0;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ struct fbnic_act_tcam *act_tcam;
+
+ act_tcam = &fbd->act_tcam[idx];
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ if (rule_locs) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = i;
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ /* Report maximum rule count */
+ cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
+
+ /* Set flow type field */
+ if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
+ fsp->flow_type = ETHER_FLOW;
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->mask.tcam[1])) {
+ struct fbnic_mac_addr *mac_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ act_tcam->value.tcam[1]);
+ mac_addr = &fbd->mac_addr[idx];
+
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ mac_addr->value.addr8);
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ } else if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
+ fsp->flow_type = IPV6_USER_FLOW;
+ fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
+ fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ipo_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V6_FLOW;
+ else
+ fsp->flow_type = TCP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip6_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip6_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV6_USER_FLOW;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6src[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6src[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+ int i;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ for (i = 0; i < 4; i++) {
+ fsp->h_u.usr_ip6_spec.ip6dst[i] =
+ ip_addr->value.s6_addr32[i];
+ fsp->m_u.usr_ip6_spec.ip6dst[i] =
+ ~ip_addr->mask.s6_addr32[i];
+ }
+ }
+ } else {
+ if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
+ if (act_tcam->value.tcam[1] &
+ FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
+ fsp->flow_type = UDP_V4_FLOW;
+ else
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(act_tcam->value.tcam[3]);
+ fsp->m_u.tcp_ip4_spec.psrc =
+ cpu_to_be16(~act_tcam->mask.tcam[3]);
+ fsp->h_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(act_tcam->value.tcam[4]);
+ fsp->m_u.tcp_ip4_spec.pdst =
+ cpu_to_be16(~act_tcam->mask.tcam[4]);
+ } else {
+ fsp->flow_type = IPV4_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_src[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4src =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4src =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+
+ if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->mask.tcam[0])) {
+ struct fbnic_ip_addr *ip_addr;
+
+ idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ act_tcam->value.tcam[0]);
+ ip_addr = &fbd->ip_dst[idx];
+
+ fsp->h_u.usr_ip4_spec.ip4dst =
+ ip_addr->value.s6_addr32[3];
+ fsp->m_u.usr_ip4_spec.ip4dst =
+ ~ip_addr->mask.s6_addr32[3];
+ }
+ }
+
+ /* Record action */
+ if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
+ fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
+ act_tcam->dest);
+ else
+ fsp->flow_type |= FLOW_RSS;
+
+ cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
+ act_tcam->dest);
+
+ return 0;
+}
+
static int fbnic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct fbnic_net *fbn = netdev_priv(netdev);
int ret = -EOPNOTSUPP;
+ u32 special = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
@@ -232,6 +618,22 @@ static int fbnic_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH:
ret = fbnic_get_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = fbnic_get_cls_rule(fbn, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ rule_locs = NULL;
+ special = RX_CLS_LOC_SPECIAL;
+ fallthrough;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
+ if (ret < 0)
+ break;
+
+ cmd->data |= special;
+ cmd->rule_cnt = ret;
+ ret = 0;
+ break;
}
return ret;
@@ -272,6 +674,406 @@ fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
return 0;
}
+static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
+ int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+
+ if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
+ u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
+ u16 misc = 0, misc_mask = ~0;
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_HOST);
+ struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
+ struct fbnic_mac_addr *mac_addr = NULL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ struct in6_addr *addr6, *mask6;
+ struct in_addr *addr4, *mask4;
+ int hash_idx, location;
+ u32 flow_type;
+ int idx, j;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location != RX_CLS_LOC_ANY)
+ return -EINVAL;
+ location = fbnic_cls_rule_any_loc(fbd);
+ if (location < 0)
+ return location;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+ } else if (fsp->flow_type & FLOW_RSS) {
+ if (cmd->rss_context == 1)
+ dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
+ } else {
+ u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring_idx >= fbn->num_rx_queues)
+ return -EINVAL;
+
+ dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
+ FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
+ }
+
+ idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ /* Do not allow overwriting for now.
+ * To support overwriting rules we will need to add logic to free
+ * any IP or MACDA TCAMs that may be associated with the old rule.
+ */
+ if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
+ return -EBUSY;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
+ hash_idx = fbnic_get_rss_hash_idx(flow_type);
+
+ switch (flow_type) {
+ case UDP_V4_FLOW:
+udp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V4_FLOW:
+tcp4_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
+ goto ip4_flow;
+ case IP_USER_FLOW:
+ if (!fsp->m_u.usr_ip4_spec.proto)
+ goto ip4_flow;
+ if (fsp->m_u.usr_ip4_spec.proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
+ goto udp4_flow;
+ if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
+ goto tcp4_flow;
+ return -EINVAL;
+ip4_flow:
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
+ if (mask4->s_addr) {
+ ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
+ addr4, mask4);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
+ mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
+ if (mask4->s_addr) {
+ ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
+ addr4, mask4);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case UDP_V6_FLOW:
+udp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
+ fallthrough;
+ case TCP_V6_FLOW:
+tcp6_flow:
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
+ FBNIC_RPC_TCAM_ACT1_L4_VALID);
+
+ sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
+ sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
+ dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
+ dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
+ goto ipv6_flow;
+ case IPV6_USER_FLOW:
+ if (!fsp->m_u.usr_ip6_spec.l4_proto)
+ goto ipv6_flow;
+
+ if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
+ return -EINVAL;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
+ goto udp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
+ goto tcp6_flow;
+ if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
+ return -EINVAL;
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
+ ip_src - fbd->ipo_src);
+ ip_mask &=
+ ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |=
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
+ ip_dst - fbd->ipo_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
+ipv6_flow:
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
+ if (!ip_src && !ipv6_addr_any(mask6)) {
+ ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
+ addr6, mask6);
+ if (!ip_src)
+ return -ENOSPC;
+
+ set_bit(idx, ip_src->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
+ ip_src - fbd->ip_src);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
+ }
+
+ addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
+ mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
+ if (!ip_dst && !ipv6_addr_any(mask6)) {
+ ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
+ addr6, mask6);
+ if (!ip_dst) {
+ if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
+ memset(ip_src, 0, sizeof(*ip_src));
+ return -ENOSPC;
+ }
+
+ set_bit(idx, ip_dst->act_tcam);
+ ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
+ ip_dst - fbd->ip_dst);
+ ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
+ FBNIC_RPC_TCAM_ACT1_IP_VALID |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
+ break;
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ u8 *addr = fsp->h_u.ether_spec.h_dest;
+ u8 *mask = fsp->m_u.ether_spec.h_dest;
+
+ /* Do not allow MAC addr of 0 */
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Only support full MAC address to avoid
+ * conflicts with other MAC addresses.
+ */
+ if (!is_broadcast_ether_addr(mask))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(addr))
+ mac_addr = __fbnic_mc_sync(fbd, addr);
+ else
+ mac_addr = __fbnic_uc_sync(fbd, addr);
+
+ if (!mac_addr)
+ return -ENOSPC;
+
+ set_bit(idx, mac_addr->act_tcam);
+ flow_value |=
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ mac_addr - fbd->mac_addr);
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
+ }
+
+ flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Write action table values */
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
+
+ /* Write IP Match value/mask to action_tcam[0] */
+ act_tcam->value.tcam[0] = ip_value;
+ act_tcam->mask.tcam[0] = ip_mask;
+
+ /* Write flow type value/mask to action_tcam[1] */
+ act_tcam->value.tcam[1] = flow_value;
+ act_tcam->mask.tcam[1] = flow_mask;
+
+ /* Write error, DSCP, extra L4 matches to action_tcam[2] */
+ act_tcam->value.tcam[2] = misc;
+ act_tcam->mask.tcam[2] = misc_mask;
+
+ /* Write source/destination port values */
+ act_tcam->value.tcam[3] = sport;
+ act_tcam->mask.tcam[3] = sport_mask;
+ act_tcam->value.tcam[4] = dport;
+ act_tcam->mask.tcam[4] = dport_mask;
+
+ for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ fsp->location = location;
+
+ if (netif_running(fbn->netdev)) {
+ fbnic_write_rules(fbd);
+ if (ip_src || ip_dst)
+ fbnic_write_ip_addr(fbd);
+ if (mac_addr)
+ fbnic_write_macda(fbd);
+ }
+
+ return 0;
+}
+
+static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
+ __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
+ unsigned int tcam_idx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
+ __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
+
+ /* Write updates to hardware */
+ if (netif_running(fbn->netdev))
+ fbnic_write_ip_addr(fbd);
+}
+
+static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
+ const struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_act_tcam *act_tcam;
+ int idx;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
+ return -EINVAL;
+
+ idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
+ act_tcam = &fbd->act_tcam[idx];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ return -EINVAL;
+
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+
+ if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
+ (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
+ fbnic_clear_nfc_macda(fbn, idx);
+
+ if ((act_tcam->value.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
+ (~act_tcam->mask.tcam[0] &
+ (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
+ FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
+ fbnic_clear_nfc_ip_addr(fbn, idx);
+
+ if (netif_running(fbn->netdev))
+ fbnic_write_rules(fbd);
+
+ return 0;
+}
+
static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct fbnic_net *fbn = netdev_priv(netdev);
@@ -281,6 +1083,12 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXFH:
ret = fbnic_set_rss_hash_opts(fbn, cmd);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = fbnic_set_cls_rule_ins(fbn, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = fbnic_set_cls_rule_del(fbn, cmd);
+ break;
}
return ret;
@@ -374,6 +1182,61 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
return 0;
}
+static int
+fbnic_modify_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ const u32 *indir = rxfh->indir;
+ unsigned int changes;
+
+ if (!indir)
+ indir = ethtool_rxfh_context_indir(ctx);
+
+ changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
+ if (changes && netif_running(netdev))
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ return 0;
+}
+
+static int
+fbnic_create_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+
+ if (!rxfh->indir) {
+ u32 *indir = ethtool_rxfh_context_indir(ctx);
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+
+ return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
+}
+
+static int
+fbnic_remove_rxfh_context(struct net_device *netdev,
+ struct ethtool_rxfh_context *ctx, u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ /* Nothing to do, contexts are allocated statically */
+ return 0;
+}
+
static void fbnic_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
@@ -523,14 +1386,14 @@ static void fbnic_get_ts_stats(struct net_device *netdev,
unsigned int start;
int i;
- ts_stats->pkts = fbn->tx_stats.ts_packets;
- ts_stats->lost = fbn->tx_stats.ts_lost;
+ ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
+ ts_stats->lost = fbn->tx_stats.twq.ts_lost;
for (i = 0; i < fbn->num_tx_queues; i++) {
ring = fbn->tx[i];
do {
start = u64_stats_fetch_begin(&ring->stats.syncp);
- ts_packets = ring->stats.ts_packets;
- ts_lost = ring->stats.ts_lost;
+ ts_packets = ring->stats.twq.ts_packets;
+ ts_lost = ring->stats.twq.ts_lost;
} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
ts_stats->pkts += ts_packets;
ts_stats->lost += ts_lost;
@@ -586,9 +1449,17 @@ fbnic_get_eth_mac_stats(struct net_device *netdev,
}
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .supported_coalesce_params =
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
.get_regs = fbnic_get_regs,
+ .get_coalesce = fbnic_get_coalesce,
+ .set_coalesce = fbnic_set_coalesce,
+ .get_ringparam = fbnic_get_ringparam,
+ .set_ringparam = fbnic_set_ringparam,
.get_strings = fbnic_get_strings,
.get_ethtool_stats = fbnic_get_ethtool_stats,
.get_sset_count = fbnic_get_sset_count,
@@ -598,6 +1469,9 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
.get_rxfh = fbnic_get_rxfh,
.set_rxfh = fbnic_set_rxfh,
+ .create_rxfh_context = fbnic_create_rxfh_context,
+ .modify_rxfh_context = fbnic_modify_rxfh_context,
+ .remove_rxfh_context = fbnic_remove_rxfh_context,
.get_channels = fbnic_get_channels,
.set_channels = fbnic_set_channels,
.get_ts_info = fbnic_get_ts_info,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index bbc7c1c0c37e..88db3dacb940 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -494,16 +494,13 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
{
- u32 active_slot = 0, all_multi = 0;
+ u32 all_multi = 0, version = 0;
struct fbnic_dev *fbd = opaque;
- u32 speed = 0, fec = 0;
- size_t commit_size = 0;
bool bmc_present;
int err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
- fbd->fw_cap.running.mgmt.version);
-
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
+ fbd->fw_cap.running.mgmt.version = version;
if (!fbd->fw_cap.running.mgmt.version)
return -EINVAL;
@@ -524,43 +521,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
return -EINVAL;
}
- get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
- fbd->fw_cap.running.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
- if (!commit_size)
+ if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
- fbd->fw_cap.stored.mgmt.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.mgmt.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
- fbd->fw_cap.running.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.running.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
- fbd->fw_cap.stored.bootloader.version);
- get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.bootloader.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
- fbd->fw_cap.stored.undi.version);
- get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
- fbd->fw_cap.stored.undi.commit,
- FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
- fbd->fw_cap.active_slot = active_slot;
-
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
- get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
- fbd->fw_cap.link_speed = speed;
- fbd->fw_cap.link_fec = fec;
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
+ fbd->fw_cap.stored.mgmt.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
+ fbd->fw_cap.running.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
+ fbd->fw_cap.stored.bootloader.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
+ fbd->fw_cap.stored.undi.version = version;
+ fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ fbd->fw_cap.active_slot =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
+ fbd->fw_cap.link_speed =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
+ fbd->fw_cap.link_fec =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
if (bmc_present) {
@@ -575,7 +570,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
if (err)
return err;
- get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ all_multi =
+ fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
} else {
memset(fbd->fw_cap.bmc_mac_addr, 0,
sizeof(fbd->fw_cap.bmc_mac_addr));
@@ -743,9 +739,9 @@ free_message:
}
static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT),
- FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
FBNIC_TLV_ATTR_LAST
};
@@ -754,32 +750,31 @@ static int fbnic_fw_parse_tsene_read_resp(void *opaque,
{
struct fbnic_fw_completion *cmpl_data;
struct fbnic_dev *fbd = opaque;
+ s32 err_resp;
int err = 0;
/* Verify we have a completion pointer to provide with data */
cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
if (!cmpl_data)
- return -EINVAL;
+ return -ENOSPC;
- if (results[FBNIC_TSENE_ERROR]) {
- err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]);
- if (err)
- goto exit_complete;
- }
+ err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
+ if (err_resp)
+ goto msg_err;
- if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) {
+ if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
err = -EINVAL;
- goto exit_complete;
+ goto msg_err;
}
cmpl_data->u.tsene.millidegrees =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]);
+ fta_get_sint(results, FBNIC_FW_TSENE_THERM);
cmpl_data->u.tsene.millivolts =
- fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]);
+ fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
-exit_complete:
- cmpl_data->result = err;
+msg_err:
+ cmpl_data->result = err_resp ? : err;
complete(&cmpl_data->done);
fbnic_fw_put_cmpl(cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fe68333d51b1..a3618e7826c2 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -139,10 +139,10 @@ enum {
};
enum {
- FBNIC_TSENE_THERM = 0x0,
- FBNIC_TSENE_VOLT = 0x1,
- FBNIC_TSENE_ERROR = 0x2,
- FBNIC_TSENE_MSG_MAX
+ FBNIC_FW_TSENE_THERM = 0x0,
+ FBNIC_FW_TSENE_VOLT = 0x1,
+ FBNIC_FW_TSENE_ERROR = 0x2,
+ FBNIC_FW_TSENE_MSG_MAX
};
enum {
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 1db57c42333e..79a01fdd1dd1 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -487,8 +487,9 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_queue_stats *stats;
+ u64 bytes, packets, alloc_fail;
+ u64 csum_complete, csum_none;
unsigned int start;
- u64 bytes, packets;
if (!rxr)
return;
@@ -498,10 +499,16 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ alloc_fail = stats->rx.alloc_failed;
+ csum_complete = stats->rx.csum_complete;
+ csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
rx->bytes = bytes;
rx->packets = packets;
+ rx->alloc_fail = alloc_fail;
+ rx->csum_complete = csum_complete;
+ rx->csum_none = csum_none;
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -510,6 +517,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
+ u64 stop, wake, csum, lso;
unsigned int start;
u64 bytes, packets;
@@ -521,10 +529,18 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
start = u64_stats_fetch_begin(&stats->syncp);
bytes = stats->bytes;
packets = stats->packets;
+ csum = stats->twq.csum_partial;
+ lso = stats->twq.lso;
+ stop = stats->twq.stop;
+ wake = stats->twq.wake;
} while (u64_stats_fetch_retry(&stats->syncp, start));
tx->bytes = bytes;
tx->packets = packets;
+ tx->needs_csum = csum + lso;
+ tx->hw_gso_wire_packets = lso;
+ tx->stop = stop;
+ tx->wake = wake;
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -535,9 +551,16 @@ static void fbnic_get_base_stats(struct net_device *dev,
tx->bytes = fbn->tx_stats.bytes;
tx->packets = fbn->tx_stats.packets;
+ tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso;
+ tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso;
+ tx->stop = fbn->tx_stats.twq.stop;
+ tx->wake = fbn->tx_stats.twq.wake;
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+ rx->csum_none = fbn->rx_stats.rx.csum_none;
}
static const struct netdev_stat_ops fbnic_stat_ops = {
@@ -588,7 +611,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd)
* Allocate and initialize the netdev and netdev private structure. Bind
* together the hardware, netdev, and pci data structures.
*
- * Return: 0 on success, negative on failure
+ * Return: Pointer to net_device on success, NULL on failure
**/
struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
{
@@ -618,6 +641,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+ fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT;
+ fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
+ fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
@@ -630,15 +657,30 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->gso_partial_features =
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
netdev->features |=
+ netdev->gso_partial_features |
+ FBNIC_TUN_GSO_FEATURES |
NETIF_F_RXHASH |
NETIF_F_SG |
NETIF_F_HW_CSUM |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_L4;
netdev->hw_features |= netdev->features;
netdev->vlan_features |= netdev->features;
netdev->hw_enc_features |= netdev->features;
+ netdev->features |= NETIF_F_NTUPLE;
netdev->min_mtu = IPV6_MIN_MTU;
netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index a392ac1cc4f2..561837e80ec8 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -12,6 +12,10 @@
#include "fbnic_txrx.h"
#define FBNIC_MAX_NAPI_VECTORS 128u
+#define FBNIC_MIN_RXD_PER_FRAME 2
+
+/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */
+#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
struct fbnic_net {
struct fbnic_ring *tx[FBNIC_MAX_TXQS];
@@ -27,6 +31,11 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u16 rx_usecs;
+ u16 tx_usecs;
+
+ u32 rx_max_frames;
+
u16 num_napi;
struct phylink *phylink;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
index bb11fc83367d..860b02b22c15 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -133,7 +133,6 @@ int fbnic_phylink_init(struct net_device *netdev)
struct fbnic_net *fbn = netdev_priv(netdev);
struct phylink *phylink;
- fbn->phylink_pcs.neg_mode = true;
fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
fbn->phylink_config.dev = &netdev->dev;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index c25bd300b902..8ff07b5562e3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <net/ipv6.h>
#include "fbnic.h"
#include "fbnic_netdev.h"
@@ -60,7 +61,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
FIELD_GET(RXH_##_fh, _val))
-static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
{
u32 flow_hash = fbn->rss_flow_hash[flow_type];
u32 rss_en_mask = 0;
@@ -698,6 +699,359 @@ void fbnic_write_tce_tcam(struct fbnic_dev *fbd)
__fbnic_write_tce_tcam(fbd);
}
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from top of list to bottom, filling bottom up. */
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 4)
+ continue;
+
+ /* Drop avail_addr if mask is a subset of our current mask,
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m ffff ffff ffff ffff ffff ffff ffff 0000 0000
+ * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff
+ *
+ * "m" and "mask" represent typical IPv4 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result if there is a 0 stored
+ * anywhere in "m" where "mask" has a 0.
+ */
+ if (~m->s6_addr32[3] & ~mask->s_addr) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* Check to see if the mask actually contains fewer bits than
+ * our new mask "m". The XOR below should only result in 0 if
+ * "m" is masking a bit that we are looking for in our new
+ * "mask", we eliminated the 0^0 case with the check above.
+ *
+ * If it contains fewer bits we need to stop here, otherwise
+ * we might be adding an unreachable rule.
+ */
+ if (~(m->s6_addr32[3] ^ mask->s_addr))
+ break;
+
+ if (ip_addr->value.s6_addr32[3] == addr->s_addr) {
+ avail_addr = ip_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr);
+ ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0),
+ htonl(~0), ~mask->s_addr);
+ avail_addr->version = 4;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask)
+{
+ struct fbnic_ip_addr *avail_addr = NULL;
+ unsigned int i;
+
+ ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1];
+
+ /* Scan from bottom of list to top, filling top down. */
+ for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) {
+ struct in6_addr *m = &ip_addr->mask;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = ip_addr;
+ continue;
+ }
+
+ if (ip_addr->version != 6)
+ continue;
+
+ /* Drop avail_addr if mask is a superset of our current mask.
+ * This prevents us from inserting a longer prefix behind a
+ * shorter one.
+ *
+ * The mask is stored inverted value so as an example:
+ * m 0000 0000 0000 0000 0000 0000 0000 0000 0000
+ * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff
+ *
+ * "m" and "mask" represent typical IPv6 mask stored in
+ * the TCAM and those provided by the stack. The code below
+ * should return a non-zero result which will cause us
+ * to drop the avail_addr value that might be cached
+ * to prevent us from dropping a v6 address behind it.
+ */
+ if ((m->s6_addr32[0] & mask->s6_addr32[0]) |
+ (m->s6_addr32[1] & mask->s6_addr32[1]) |
+ (m->s6_addr32[2] & mask->s6_addr32[2]) |
+ (m->s6_addr32[3] & mask->s6_addr32[3])) {
+ avail_addr = NULL;
+ continue;
+ }
+
+ /* The previous test eliminated any overlap between the
+ * two values so now we need to check for gaps.
+ *
+ * If the mask is equal to our current mask then it should
+ * result with m ^ mask = ffff ffff, if however the value
+ * stored in m is bigger then we should see a 0 appear
+ * somewhere in the mask.
+ */
+ if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) |
+ ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) |
+ ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) |
+ ~(m->s6_addr32[3] ^ mask->s6_addr32[3]))
+ break;
+
+ if (ipv6_addr_cmp(&ip_addr->value, addr))
+ continue;
+
+ avail_addr = ip_addr;
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ memcpy(&avail_addr->value, addr, sizeof(*addr));
+ ipv6_addr_set(&avail_addr->mask,
+ ~mask->s6_addr32[0], ~mask->s6_addr32[1],
+ ~mask->s6_addr32[2], ~mask->s6_addr32[3]);
+ avail_addr->version = 6;
+
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ ip_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0);
+}
+
+static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0);
+}
+
+static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ /* Bit 129 is used to flag for v4/v6 */
+ wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i),
+ (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd,
+ unsigned int idx,
+ struct fbnic_ip_addr *ip_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+ value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--)));
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_ip_addr(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx];
+
+ /* Check if update flag is set else skip. */
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_ip_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ /* Repeat process for other IP TCAMs */
+ for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_src_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+
+ for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) {
+ struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx];
+
+ if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (ip_addr->state == FBNIC_TCAM_S_DELETE) {
+ fbnic_clear_ip_outer_dst_entry(fbd, idx);
+ memset(ip_addr, 0, sizeof(*ip_addr));
+
+ continue;
+ }
+
+ fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr);
+
+ ip_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
void fbnic_clear_rules(struct fbnic_dev *fbd)
{
u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 0d8285fa5b45..6892414195c3 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -7,6 +7,8 @@
#include <uapi/linux/in6.h>
#include <linux/bitfield.h>
+struct in_addr;
+
/* The TCAM state definitions follow an expected ordering.
* They start out disabled, then move through the following states:
* Disabled 0 -> Add 2
@@ -32,6 +34,12 @@ enum {
#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+/* 8 IPSRC and IPDST TCAM Entries each
+ * 8 registers, Validate each
+ */
+#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8
+#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8
+
#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
@@ -47,6 +55,13 @@ struct fbnic_mac_addr {
DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
};
+struct fbnic_ip_addr {
+ struct in6_addr mask, value;
+ unsigned char version;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
struct fbnic_act_tcam {
struct {
u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
@@ -81,6 +96,11 @@ enum {
#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+/* This should leave us with 48 total entries in the TCAM that can be used
+ * for NFC after also deducting the 14 needed for RSS table programming.
+ */
+#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2
+
/* We reserve the last 14 entries for RSS rules on the host. The BMC
* unicast rule will need to be populated above these and is expected to
* use MACDA TCAM entry 23 to store the BMC MAC address.
@@ -88,6 +108,9 @@ enum {
#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
(FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \
+ (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET)
+
/* Flags used to identify the owner for this MAC filter. Note that any
* flags set for Broadcast thru Promisc indicate that the rule belongs
* to the RSS filters for the host.
@@ -168,6 +191,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type);
int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
@@ -177,6 +201,17 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in_addr *addr,
+ const struct in_addr *mask);
+struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd,
+ struct fbnic_ip_addr *ip_addr,
+ const struct in6_addr *addr,
+ const struct in6_addr *mask);
+int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx);
+void fbnic_write_ip_addr(struct fbnic_dev *fbd);
+
static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
{
return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
index 2a174ab062a3..517ed8b2f1cb 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
/**
* fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: unsigned 64b value containing integer value
**/
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def)
{
__le64 le64_value = 0;
+ if (!attr)
+ return def;
+
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_signed - Retrieve signed value from result
* @attr: Attribute to retrieve data from
+ * @def: The default value if attr is NULL
*
* Return: signed 64b value containing integer value
**/
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def)
{
- int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
__le64 le64_value = 0;
+ int shift;
s64 value;
+ if (!attr)
+ return def;
+
+ shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+
/* Copy the value and adjust for byte ordering */
memcpy(&le64_value, &attr->value[0],
le16_to_cpu(attr->hdr.len) - sizeof(*attr));
@@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
/**
* fbnic_tlv_attr_get_string - Retrieve string value from result
* @attr: Attribute to retrieve data from
- * @str: Pointer to an allocated string to store the data
- * @max_size: The maximum size which can be in str
+ * @dst: Pointer to an allocated string to store the data
+ * @dstsize: The maximum size which can be in dst
*
- * Return: the size of the string read from firmware
+ * Return: the size of the string read from firmware or negative error.
**/
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size)
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize)
{
- max_size = min_t(size_t, max_size,
- (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
- memcpy(str, &attr->value, max_size);
+ size_t srclen, len;
+ ssize_t ret;
+
+ if (!attr)
+ return -EINVAL;
+
+ if (dstsize == 0)
+ return -E2BIG;
+
+ srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0')
+ srclen--;
+
+ if (srclen >= dstsize) {
+ len = dstsize - 1;
+ ret = -E2BIG;
+ } else {
+ len = srclen;
+ ret = len;
+ }
+
+ memcpy(dst, &attr->value, len);
+ /* Zero pad end of dst. */
+ memset(dst + len, 0, dstsize - len);
- return max_size;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
index 67300ab44353..c34bf87eeec9 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
return !!attr;
}
-u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
-s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
-size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
- size_t max_size);
-
-#define get_unsigned_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_unsigned(result); \
-} while (0)
-
-#define get_signed_result(id, location) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- location = fbnic_tlv_attr_get_signed(result); \
-} while (0)
-
-#define get_string_result(id, size, str, max_size) \
-do { \
- struct fbnic_tlv_msg *result = results[id]; \
- if (result) \
- size = fbnic_tlv_attr_get_string(result, str, max_size); \
-} while (0)
-
-#define get_bool(id) (!!(results[id]))
-
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def);
+ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst,
+ size_t dstsize);
struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
@@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
const struct fbnic_tlv_parser *parser);
int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+#define fta_get_uint(_results, _id) \
+ fbnic_tlv_attr_get_unsigned(_results[_id], 0)
+#define fta_get_sint(_results, _id) \
+ fbnic_tlv_attr_get_signed(_results[_id], 0)
+#define fta_get_str(_results, _id, _dst, _dstsize) \
+ fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize)
+
#define FBNIC_TLV_MSG_ERROR \
FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index d4d7027df9a0..ac11389a764c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -18,6 +19,7 @@ enum {
struct fbnic_xmit_cb {
u32 bytecount;
+ u16 gso_segs;
u8 desc_count;
u8 flags;
int hw_head;
@@ -113,6 +115,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev,
res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
FBNIC_TX_DESC_WAKEUP);
+ if (!res) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.stop++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
return !res;
}
@@ -174,8 +181,72 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb)
}
static bool
+fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb,
+ struct skb_shared_info *shinfo, __le64 *meta,
+ unsigned int *l2len, unsigned int *i3len)
+{
+ unsigned int l3_type, l4_type, l4len, hdrlen;
+ unsigned char *l4hdr;
+ __be16 payload_len;
+
+ if (unlikely(skb_cow_head(skb, 0)))
+ return true;
+
+ if (shinfo->gso_type & SKB_GSO_PARTIAL) {
+ l3_type = FBNIC_TWD_L3_TYPE_OTHER;
+ } else if (!skb->encapsulation) {
+ if (ip_hdr(skb)->version == 4)
+ l3_type = FBNIC_TWD_L3_TYPE_IPV4;
+ else
+ l3_type = FBNIC_TWD_L3_TYPE_IPV6;
+ } else {
+ unsigned int o3len;
+
+ o3len = skb_inner_network_header(skb) - skb_network_header(skb);
+ *i3len -= o3len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK,
+ o3len / 2));
+ l3_type = FBNIC_TWD_L3_TYPE_V6V6;
+ }
+
+ l4hdr = skb_checksum_start(skb);
+ payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data));
+
+ if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ struct tcphdr *tcph = (struct tcphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_TCP;
+ l4len = __tcp_hdrlen((struct tcphdr *)l4hdr);
+ csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len);
+ } else {
+ struct udphdr *udph = (struct udphdr *)l4hdr;
+
+ l4_type = FBNIC_TWD_L4_TYPE_UDP;
+ l4len = sizeof(struct udphdr);
+ csum_replace_by_diff(&udph->check, (__force __wsum)payload_len);
+ }
+
+ hdrlen = (l4hdr - skb->data) + l4len;
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) |
+ FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) |
+ FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) |
+ FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) |
+ FBNIC_TWD_FLAG_REQ_LSO);
+
+ FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen;
+ FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs;
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.lso += shinfo->gso_segs;
+ u64_stats_update_end(&ring->stats.syncp);
+
+ return false;
+}
+
+static bool
fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int l2len, i3len;
if (fbnic_tx_tstamp(skb))
@@ -190,7 +261,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
skb->csum_offset / 2));
- *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ if (shinfo->gso_size) {
+ if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len))
+ return true;
+ } else {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.csum_partial++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
*meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
@@ -198,12 +277,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
}
static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+ u64 *csum_cmpl, u64 *csum_none)
{
skb_checksum_none_assert(skb);
- if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ (*csum_none)++;
return;
+ }
if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +294,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = (__force __wsum)csum;
+ (*csum_cmpl)++;
}
}
@@ -329,7 +412,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
/* Write all members within DWORD to condense this into 2 4B writes */
FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->gso_segs = 1;
FBNIC_XMIT_CB(skb)->desc_count = 0;
+ FBNIC_XMIT_CB(skb)->flags = 0;
if (fbnic_tx_offloads(ring, skb, meta))
goto err_free;
@@ -356,6 +441,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
}
+static netdev_features_t
+fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features, unsigned int l3len)
+{
+ netdev_features_t skb_gso_features;
+ struct ipv6hdr *ip6_hdr;
+ unsigned char l4_hdr;
+ unsigned int start;
+ __be16 frag_off;
+
+ /* Require MANGLEID for GSO_PARTIAL of IPv4.
+ * In theory we could support TSO with single, innermost v4 header
+ * by pretending everything before it is L2, but that needs to be
+ * parsed case by case.. so leaving it for when the need arises.
+ */
+ if (!(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ skb_gso_features = skb_shinfo(skb)->gso_type;
+ skb_gso_features <<= NETIF_F_GSO_SHIFT;
+
+ /* We'd only clear the native GSO features, so don't bother validating
+ * if the match can only be on those supported thru GSO_PARTIAL.
+ */
+ if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES))
+ return features;
+
+ /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice
+ * to fall back to partial for this, or any failure below.
+ * This is just an optimization, UDPv4 will be caught later on.
+ */
+ if (skb_gso_features & NETIF_F_TSO)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Inner headers multiple of 2 */
+ if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */
+ ip6_hdr = ipv6_hdr(skb);
+ if (ip6_hdr->version != 6)
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ l4_hdr = ip6_hdr->nexthdr;
+ start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr);
+ start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off);
+ if (frag_off || l4_hdr != IPPROTO_IPV6 ||
+ skb->data + start != skb_inner_network_header(skb))
+ return features & ~FBNIC_TUN_GSO_FEATURES;
+
+ return features;
+}
+
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
@@ -376,9 +514,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
!FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
!FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
!FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
- return features & ~NETIF_F_CSUM_MASK;
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- return features;
+ if (likely(!skb->encapsulation) || !skb_is_gso(skb))
+ return features;
+
+ return fbnic_features_check_encap_gso(skb, dev, features, l3len);
}
static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
@@ -429,7 +570,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
- total_packets += 1;
+ total_packets += FBNIC_XMIT_CB(skb)->gso_segs;
napi_consume_skb(skb, napi_budget);
}
@@ -444,7 +585,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
if (unlikely(discard)) {
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.dropped += total_packets;
- ring->stats.ts_lost += ts_lost;
+ ring->stats.twq.ts_lost += ts_lost;
u64_stats_update_end(&ring->stats.syncp);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
@@ -456,9 +597,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
ring->stats.packets += total_packets;
u64_stats_update_end(&ring->stats.syncp);
- netif_txq_completed_wake(txq, total_packets, total_bytes,
- fbnic_desc_unused(ring),
- FBNIC_TX_DESC_WAKEUP);
+ if (!netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP)) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.twq.wake++;
+ u64_stats_update_end(&ring->stats.syncp);
+ }
}
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
@@ -507,7 +652,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
skb_tstamp_tx(skb, &hwtstamp);
u64_stats_update_begin(&ring->stats.syncp);
- ring->stats.ts_packets++;
+ ring->stats.twq.ts_packets++;
u64_stats_update_end(&ring->stats.syncp);
}
@@ -661,8 +806,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
struct page *page;
page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page)
+ if (!page) {
+ u64_stats_update_begin(&bdq->stats.syncp);
+ bdq->stats.rx.alloc_failed++;
+ u64_stats_update_end(&bdq->stats.syncp);
+
break;
+ }
fbnic_page_pool_init(bdq, i, page);
fbnic_bd_prep(bdq, i, page);
@@ -875,12 +1025,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
u64 rcd, struct sk_buff *skb,
- struct fbnic_q_triad *qt)
+ struct fbnic_q_triad *qt,
+ u64 *csum_cmpl, u64 *csum_none)
{
struct net_device *netdev = nv->napi.dev;
struct fbnic_ring *rcq = &qt->cmpl;
- fbnic_rx_csum(rcd, skb, rcq);
+ fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
if (netdev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
@@ -898,7 +1049,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
- unsigned int packets = 0, bytes = 0, dropped = 0;
+ unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+ u64 csum_complete = 0, csum_none = 0;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
s32 head0 = -1, head1 = -1;
@@ -947,14 +1099,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
- fbnic_populate_skb_fields(nv, rcd, skb, qt);
+ fbnic_populate_skb_fields(nv, rcd, skb, qt,
+ &csum_complete,
+ &csum_none);
packets++;
bytes += skb->len;
napi_gro_receive(&nv->napi, skb);
} else {
- dropped++;
+ if (!skb) {
+ alloc_failed++;
+ dropped++;
+ } else {
+ dropped++;
+ }
+
fbnic_put_pkt_buff(nv, pkt, 1);
}
@@ -977,6 +1137,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Re-add ethernet header length (removed in fbnic_build_skb) */
rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
+ rcq->stats.rx.alloc_failed += alloc_failed;
+ rcq->stats.rx.csum_complete += csum_complete;
+ rcq->stats.rx.csum_none += csum_none;
u64_stats_update_end(&rcq->stats.syncp);
/* Unmap and free processed buffers */
@@ -1054,6 +1217,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.bytes += stats->bytes;
fbn->rx_stats.packets += stats->packets;
fbn->rx_stats.dropped += stats->dropped;
+ fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+ fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+ fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
@@ -1065,8 +1233,14 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
fbn->tx_stats.bytes += stats->bytes;
fbn->tx_stats.packets += stats->packets;
fbn->tx_stats.dropped += stats->dropped;
- fbn->tx_stats.ts_lost += stats->ts_lost;
- fbn->tx_stats.ts_packets += stats->ts_packets;
+ fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial;
+ fbn->tx_stats.twq.lso += stats->twq.lso;
+ fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost;
+ fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets;
+ fbn->tx_stats.twq.stop += stats->twq.stop;
+ fbn->tx_stats.twq.wake += stats->twq.wake;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
@@ -1142,7 +1316,9 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
.dev = nv->dev,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
- .max_len = PAGE_SIZE
+ .max_len = PAGE_SIZE,
+ .napi = &nv->napi,
+ .netdev = fbn->netdev,
};
struct page_pool *pp;
@@ -2010,9 +2186,51 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
}
+static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc)
+{
+ u32 threshold;
+
+ /* Set the threhsold to half the ring size if rx_frames
+ * is not configured
+ */
+ threshold = rx_desc ? : rcq->size_mask / 2;
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold);
+}
+
+void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val = arm;
+
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) |
+ FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN;
+ val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) |
+ FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_config_rx_frames(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ int i;
+
+ for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx,
+ fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
+ }
+}
+
static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *rcq)
{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
u32 rcq_ctl;
@@ -2040,8 +2258,8 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
/* Store interrupt information for the completion queue */
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
- fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames *
+ FBNIC_MIN_RXD_PER_FRAME);
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
/* Enable queue */
@@ -2080,12 +2298,7 @@ void fbnic_enable(struct fbnic_net *fbn)
static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = nv->fbd;
- u32 val;
-
- val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
-
- fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+ fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK);
}
void fbnic_napi_enable(struct fbnic_net *fbn)
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index c2a94f31f71b..f46616af41ea 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -24,13 +24,29 @@ struct fbnic_net;
#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+/* To receive the worst case packet we need:
+ * 1 descriptor for primary metadata
+ * + 1 descriptor for optional metadata
+ * + 1 descriptor for headers
+ * + 4 descriptors for payload
+ */
+#define FBNIC_MAX_RX_PKT_DESC 7
+#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2)
+
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+/* These apply to TWQs, TCQ, RCQ */
+#define FBNIC_QUEUE_SIZE_MIN 16u
+#define FBNIC_QUEUE_SIZE_MAX SZ_64K
+
#define FBNIC_TXQ_SIZE_DEFAULT 1024
#define FBNIC_HPQ_SIZE_DEFAULT 256
#define FBNIC_PPQ_SIZE_DEFAULT 256
#define FBNIC_RCQ_SIZE_DEFAULT 1024
+#define FBNIC_TX_USECS_DEFAULT 35
+#define FBNIC_RX_USECS_DEFAULT 30
+#define FBNIC_RX_FRAMES_DEFAULT 0
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -56,9 +72,22 @@ struct fbnic_pkt_buff {
struct fbnic_queue_stats {
u64 packets;
u64 bytes;
+ union {
+ struct {
+ u64 csum_partial;
+ u64 lso;
+ u64 ts_packets;
+ u64 ts_lost;
+ u64 stop;
+ u64 wake;
+ } twq;
+ struct {
+ u64 alloc_failed;
+ u64 csum_complete;
+ u64 csum_none;
+ } rx;
+ };
u64 dropped;
- u64 ts_packets;
- u64 ts_lost;
struct u64_stats_sync syncp;
};
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 3062cc0f9199..c862b13b447a 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -20,8 +20,6 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include "ks8851.h"
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 4a777b449ecd..0be44dcb3393 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -942,6 +942,12 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on,
extts = &ptp->extts[index];
+ if (extts_request->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
if (on) {
extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index);
if (extts_pin < 0)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 3234a960fcc3..0af143ec0f86 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -828,7 +828,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink_config.type = PHYLINK_NETDEV;
port->phylink_pcs.poll = true;
port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
- port->phylink_pcs.neg_mode = true;
port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 6a0e5b83ecd0..74ad1d73b465 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -338,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5,
spx5_port->custom_etype = 0x8880; /* Vitesse */
spx5_port->phylink_pcs.poll = true;
spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops;
- spx5_port->phylink_pcs.neg_mode = true;
spx5_port->is_mrouter = false;
INIT_LIST_HEAD(&spx5_port->tc_templates);
sparx5->ports[config->portno] = spx5_port;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 138ac58fae51..f713656f1fae 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -375,6 +375,6 @@ irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
void sparx5_port_inj_timer_setup(struct sparx5_port *port)
{
- hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->inj_timer.function = sparx5_injection_timeout;
+ hrtimer_setup(&port->inj_timer, sparx5_injection_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index be95336ce089..62dfb6d1638c 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -666,8 +672,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +697,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -770,7 +781,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +810,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,6 +835,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -841,8 +863,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,6 +887,8 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -1157,8 +1184,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1435,8 +1465,10 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
err = mana_gd_setup_irqs(pdev);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs: %d\n", err);
return err;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1454,12 +1486,14 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1470,6 +1504,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1488,8 +1523,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1498,9 +1535,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1547,6 +1585,7 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1569,12 +1608,16 @@ static void mana_gd_remove(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
@@ -1622,6 +1665,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1648,8 +1693,10 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
@@ -1659,6 +1706,8 @@ static void __exit mana_driver_exit(void)
pci_unregister_driver(&mana_driver);
debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..1ba49602089b 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -440,7 +440,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +530,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..d30721d4516f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index aa1e47233fe5..e190d5ee5154 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -13,6 +13,7 @@
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -52,10 +53,12 @@ static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
@@ -64,7 +67,7 @@ static int mana_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -176,6 +179,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -256,6 +262,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -687,6 +696,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -738,12 +748,11 @@ static const struct net_device_ops mana_devops = {
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -1254,6 +1263,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1304,8 +1314,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1547,8 +1559,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1914,6 +1930,7 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
@@ -2080,6 +2097,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2099,6 +2118,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
@@ -2415,6 +2435,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2661,12 +2682,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2675,14 +2702,20 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
if (gd->gdma_context->is_pf) {
err = mana_pf_register_filter(apc);
@@ -2823,8 +2856,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2873,6 +2908,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2968,6 +3005,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -3009,8 +3048,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &num_ports);
@@ -3066,8 +3107,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
err = add_adev(gd);
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3129,6 +3176,7 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
}
struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 515069d5637b..671af5d4c5d2 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -565,20 +565,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1);
}
-static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
-{
- if (x->props.family == AF_INET)
- /* Offload with IPv4 options is not supported yet */
- return ip_hdr(skb)->ihl == 5;
-
- /* Offload with IPv6 extension headers is not support yet */
- return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr));
-}
-
static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = nfp_net_xfrm_add_state,
.xdo_dev_state_delete = nfp_net_xfrm_del_state,
- .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok,
};
void nfp_net_ipsec_init(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
index 0d6c59d6d4ae..ea6a288c0d5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c
@@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static u32 nfp_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info nfp_chip = {
- .type = hwmon_chip,
- .config = nfp_chip_config,
-};
-
-static u32 nfp_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_temp = {
- .type = hwmon_temp,
- .config = nfp_temp_config,
-};
-
-static u32 nfp_power_config[] = {
- HWMON_P_INPUT | HWMON_P_MAX,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nfp_power = {
- .type = hwmon_power,
- .config = nfp_power_config,
-};
-
static const struct hwmon_channel_info * const nfp_hwmon_info[] = {
- &nfp_chip,
- &nfp_temp,
- &nfp_power,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
+ HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f915c423fe70..886061d7351a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
static void qed_free_cdev(struct qed_dev *cdev)
{
- kfree((void *)cdev);
+ kfree(cdev);
}
static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index f9dd50152b1e..28d24d59efb8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
num_vlans = sriov->num_allowed_vlans;
sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
- if (!sriov->allowed_vlans)
+ if (!sriov->allowed_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
vlans = (u16 *)&cmd->rsp.arg[3];
for (i = 0; i < num_vlans; i++)
@@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
- if (!vf->sriov_vlans)
+ if (!vf->sriov_vlans) {
+ qlcnic_sriov_free_vlans(adapter);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index f3bea196a8f9..ba8763cac9d9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port)
rmnet_unregister_real_device(bridge_dev);
}
-static int rmnet_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int rmnet_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
@@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
}
- real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev) {
NL_SET_ERR_MSG_MOD(extack, "link does not exist");
return -ENODEV;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index a5e3d1a88305..8b4640c5d61e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -686,8 +686,8 @@ void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
- hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ hrtimer_setup(&port->hrtimer, rmnet_map_flush_tx_packet_queue, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->agg_lock);
rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 7306c8e323d7..4eebd9cb40a3 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -89,6 +89,7 @@
#define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
#define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
+#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN)
static const struct {
const char *name;
@@ -169,6 +170,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8125) },
{ PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
+ { PCI_VDEVICE(REALTEK, 0x5000) },
{}
};
@@ -2850,6 +2852,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
RTL_R32(tp, CSIDR) : ~0;
}
+static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp)
+{
+ struct pci_dev *pdev = tp->pci_dev;
+ u32 csi;
+ int rc;
+ u8 val;
+
+#define RTL_GEN3_RELATED_OFF 0x0890
+#define RTL_GEN3_ZRXDC_NONCOMPL 0x1
+ if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) {
+ rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val);
+ if (rc == PCIBIOS_SUCCESSFUL) {
+ val &= ~RTL_GEN3_ZRXDC_NONCOMPL;
+ rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF,
+ val);
+ if (rc == PCIBIOS_SUCCESSFUL)
+ return;
+ }
+ }
+
+ netdev_notice_once(tp->dev,
+ "No native access to PCI extended config space, falling back to CSI\n");
+ csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF);
+ rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL);
+}
+
static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -3822,6 +3850,7 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp)
static void rtl_hw_start_8126a(struct rtl8169_private *tp)
{
+ rtl_disable_zrxdc_timeout(tp);
rtl_set_def_aspm_entry_latency(tp);
rtl_hw_start_8125_common(tp);
}
@@ -5199,6 +5228,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
return 0;
}
+static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0)
+ return -ENODEV;
+
+ if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2)
+ return r8168_phy_ocp_read(tp, regnum);
+
+ return 0;
+}
+
+static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr,
+ int devnum, int regnum, u16 val)
+{
+ struct rtl8169_private *tp = mii_bus->priv;
+
+ if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2)
+ return -ENODEV;
+
+ r8168_phy_ocp_write(tp, regnum, val);
+
+ return 0;
+}
+
static int r8169_mdio_register(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
@@ -5229,6 +5285,11 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->read = r8169_mdio_read_reg;
new_bus->write = r8169_mdio_write_reg;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_40) {
+ new_bus->read_c45 = r8169_mdio_read_reg_c45;
+ new_bus->write_c45 = r8169_mdio_write_reg_c45;
+ }
+
ret = devm_mdiobus_register(&pdev->dev, new_bus);
if (ret)
return ret;
@@ -5252,9 +5313,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
/* mimic behavior of r8125/r8126 vendor drivers */
if (tp->mac_version == RTL_GIGA_MAC_VER_61)
- phy_set_eee_broken(tp->phydev,
- ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
- phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
+ phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
/* PHY will be woken up in rtl_open() */
phy_suspend(tp->phydev);
@@ -5327,6 +5388,9 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
/* RTL8168c */
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
return JUMBO_6K;
+ /* RTL8125/8126 */
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71:
+ return JUMBO_16K;
default:
return JUMBO_9K;
}
@@ -5361,7 +5425,7 @@ done:
/* register is set if system vendor successfully tested ASPM 1.2 */
static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
{
- if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
return true;
@@ -5410,11 +5474,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (region < 0)
return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
- rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
- if (rc < 0)
- return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
-
- tp->mmio_addr = pcim_iomap_table(pdev)[region];
+ tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME);
+ if (IS_ERR(tp->mmio_addr))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr),
+ "cannot remap MMIO, aborting\n");
txconfig = RTL_R32(tp, TxConfig);
if (txconfig == ~0U)
diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 3bd11cb56294..2aacc1996796 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev)
static void rtase_sw_reset(struct net_device *dev)
{
struct rtase_private *tp = netdev_priv(dev);
+ struct rtase_ring *ring, *tmp;
+ struct rtase_int_vector *ivec;
int ret;
+ u32 i;
netif_stop_queue(dev);
netif_carrier_off(dev);
@@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev)
rtase_tx_clear(tp);
rtase_rx_clear(tp);
+ for (i = 0; i < tp->int_nums; i++) {
+ ivec = &tp->int_vector[i];
+ list_for_each_entry_safe(ring, tmp, &ivec->ring_list,
+ ring_entry)
+ list_del(&ring->ring_entry);
+ }
+
ret = rtase_init_ring(dev);
if (ret) {
netdev_err(dev, "unable to init ring\n");
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 6e4ef7af27bf..b4365906669f 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -179,8 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
/* Reject requests with unsupported flags */
if (req->flags & ~(PTP_ENABLE_FEATURE |
PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
+ PTP_FALLING_EDGE))
return -EOPNOTSUPP;
if (req->index)
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 72e7fcc56693..4c3e8cc5046f 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -29,8 +29,8 @@ static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- bool neg_adj = scaled_ppm < 0 ? true : false;
s64 addend = ptp_priv->default_addend;
+ bool neg_adj = scaled_ppm < 0;
s64 diff;
if (neg_adj)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 84d09a8973b7..aba772e14555 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1287,17 +1287,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev)
if (!ports)
return NULL;
- for_each_child_of_node(ports, port) {
+ for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &index);
if (err < 0) {
port = NULL;
goto out;
}
- if (index == rdev->etha->index) {
- if (!of_device_is_available(port))
- port = NULL;
+ if (index == rdev->etha->index)
break;
- }
}
out:
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index fe0bf1d3217a..36af94a2e062 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_carrier_init(rocker_port);
dev->features |= NETIF_F_SG;
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* MTU range: 68 - 9000 */
dev->min_mtu = ROCKER_PORT_MIN_MTU;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 3eb55dcfa8a6..c4c43434f314 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -38,8 +38,9 @@ config SFC_MTD
default y
help
This exposes the on-board flash and/or EEPROM as MTD devices
- (e.g. /dev/mtd1). This is required to update the firmware or
- the boot configuration under Linux.
+ (e.g. /dev/mtd1). This is required to update the boot
+ configuration under Linux, or use some older userland tools to
+ update the firmware.
config SFC_MCDI_MON
bool "Solarflare SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 8f446b9bd5ee..d99039ec468d 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
ef100_ethtool.o ef100_rx.o ef100_tx.o \
- efx_devlink.o
+ efx_devlink.o efx_reflash.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o \
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 452009ed7a43..47349c148c0c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
const struct efx_ef10_nvram_type_info *info;
- size_t size, erase_size, outlen;
+ size_t size, erase_size, write_size, outlen;
int type_idx = 0;
bool protected;
int rc;
@@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (info->port != efx_port_num(efx))
return -ENODEV;
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size,
+ &protected);
if (rc)
return rc;
if (protected &&
@@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
if (!erase_size)
part->common.mtd.flags |= MTD_NO_ERASE;
+ part->common.mtd.writesize = write_size;
+
return 0;
}
@@ -4416,6 +4419,7 @@ const struct efx_nic_type efx_x4_nic_type = {
.can_rx_scatter = true,
.always_rx_scatter = true,
.option_descriptors = true,
+ .flash_auto_partition = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 7f7d560cb2b4..d941f073f1eb 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -452,7 +452,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
netif_set_tso_max_segs(net_dev,
ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT);
- efx->mdio.dev = net_dev;
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 650136dfc642..112e55b98ed3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -476,28 +476,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
/**************************************************************************
*
- * ioctls
- *
- *************************************************************************/
-
-/* Net device ioctl
- * Context: process, rtnl_lock() held.
- */
-static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct mii_ioctl_data *data = if_mii(ifr);
-
- /* Convert phy_id from older PRTAD/DEVAD format */
- if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
- (data->phy_id & 0xfc00) == 0x0400)
- data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
-
- return mdio_mii_ioctl(&efx->mdio, data, cmd);
-}
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
@@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
rc = efx_init_struct(efx, pci_dev);
if (rc)
goto fail1;
- efx->mdio.dev = net_dev;
pci_info(pci_dev, "Solarflare NIC detected\n");
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index c88ec3e24836..5a14d94163b1 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1003,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
INIT_LIST_HEAD(&efx->vf_reps);
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
+ mutex_init(&efx->reflash_mutex);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
index 3cd750820fdd..d842c60dfc10 100644
--- a/drivers/net/ethernet/sfc/efx_devlink.c
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -19,6 +19,7 @@
#include "mae.h"
#include "ef100_rep.h"
#endif
+#include "efx_reflash.h"
struct efx_devlink {
struct efx_nic *efx;
@@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink,
return 0;
}
+static int efx_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+
+ return efx_reflash_flash_firmware(efx, params->fw, extack);
+}
+
static const struct devlink_ops sfc_devlink_ops = {
+ .supported_flash_update_params = 0,
+ .flash_update = efx_devlink_flash_update,
.info_get = efx_devlink_info_get,
};
diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c
new file mode 100644
index 000000000000..b12e95f1c80a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/crc32.h>
+#include <net/devlink.h>
+#include "efx_reflash.h"
+#include "net_driver.h"
+#include "fw_formats.h"
+#include "mcdi_pcol.h"
+#include "mcdi.h"
+
+/* Try to parse a Reflash header at the specified offset */
+static bool efx_reflash_parse_reflash_header(const struct firmware *fw,
+ size_t header_offset, u32 *type,
+ u32 *subtype, const u8 **data,
+ size_t *data_size)
+{
+ size_t header_end, trailer_offset, trailer_end;
+ u32 magic, version, payload_size, header_len;
+ const u8 *header, *trailer;
+ u32 expected_crc, crc;
+
+ if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST +
+ EFX_REFLASH_HEADER_LENGTH_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST);
+ if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST);
+ if (version != EFX_REFLASH_HEADER_VERSION_VALUE)
+ return false;
+
+ payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST);
+ header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &trailer_offset) ||
+ check_add_overflow(trailer_offset, payload_size, &trailer_offset) ||
+ check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN,
+ &trailer_end))
+ return false;
+ if (fw->size < trailer_end)
+ return false;
+
+ trailer = fw->data + trailer_offset;
+ expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST);
+ /* Addition could overflow u32, but not size_t since we already
+ * checked trailer_offset didn't overflow. So cast to size_t first.
+ */
+ crc = crc32_le(0, header, (size_t)header_len + payload_size);
+ if (crc != expected_crc)
+ return false;
+
+ *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST);
+ *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST);
+ if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) {
+ /* All the bundle data is written verbatim to NVRAM */
+ *data = fw->data;
+ *data_size = fw->size;
+ } else {
+ /* Other payload types strip the reflash header and trailer
+ * from the data written to NVRAM
+ */
+ *data = header + header_len;
+ *data_size = payload_size;
+ }
+
+ return true;
+}
+
+/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */
+static int efx_reflash_partition_type(u32 type, u32 subtype,
+ u32 *partition_type,
+ u32 *partition_subtype)
+{
+ int rc = 0;
+
+ switch (type) {
+ case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM:
+ *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM;
+ *partition_subtype = subtype;
+ break;
+ case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE:
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = subtype;
+ break;
+ default:
+ /* Not supported */
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/* Try to parse a SmartNIC image header at the specified offset */
+static bool efx_reflash_parse_snic_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ u32 magic, version, payload_size, header_len, expected_crc, crc;
+ size_t header_end, payload_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN,
+ &header_end) ||
+ fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST);
+ if (check_add_overflow(header_offset, header_len, &header_end))
+ return false;
+ payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST);
+ if (check_add_overflow(header_end, payload_size, &payload_end) ||
+ fw->size < payload_end)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST);
+
+ /* Calculate CRC omitting the expected CRC field itself */
+ crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST);
+ crc = ~crc32_le(crc,
+ header + EFX_SNICIMAGE_HEADER_CRC_OFST +
+ EFX_SNICIMAGE_HEADER_CRC_LEN,
+ header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST -
+ EFX_SNICIMAGE_HEADER_CRC_LEN);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST);
+ *partition_subtype =
+ get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to parse a SmartNIC bundle header at the specified offset */
+static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw,
+ size_t header_offset,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data,
+ size_t *data_size)
+{
+ u32 magic, version, bundle_type, header_len, expected_crc, crc;
+ size_t header_end;
+ const u8 *header;
+
+ if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN,
+ &header_end))
+ return false;
+ if (fw->size < header_end)
+ return false;
+
+ header = fw->data + header_offset;
+ magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST);
+ if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE)
+ return false;
+
+ version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST);
+ if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE)
+ return false;
+
+ bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST);
+ if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE)
+ return false;
+
+ header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST);
+ if (header_len != EFX_SNICBUNDLE_HEADER_LEN)
+ return false;
+
+ expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST);
+ if (crc != expected_crc)
+ return false;
+
+ *partition_type = NVRAM_PARTITION_TYPE_BUNDLE;
+ *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST);
+ *data = fw->data;
+ *data_size = fw->size;
+ return true;
+}
+
+/* Try to find a valid firmware payload in the firmware data.
+ * When we recognise a valid header, we parse it for the partition type
+ * (so we know where to ask the MC to write it to) and the location of
+ * the data blob to write.
+ */
+static int efx_reflash_parse_firmware_data(const struct firmware *fw,
+ u32 *partition_type,
+ u32 *partition_subtype,
+ const u8 **data, size_t *data_size)
+{
+ size_t header_offset;
+ u32 type, subtype;
+
+ /* Some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial
+ * task, so step through the firmware data until we find a valid
+ * header.
+ *
+ * The checks are intended to reject firmware data that is clearly not
+ * in the expected format. They do not need to be exhaustive as the
+ * running firmware will perform its own comprehensive validity and
+ * compatibility checks during the update procedure.
+ *
+ * Firmware packages may contain multiple reflash images, e.g. a
+ * bundle containing one or more other images. Only check the
+ * outermost container by stopping after the first candidate image
+ * found even it is for an unsupported partition type.
+ */
+ for (header_offset = 0; header_offset < fw->size; header_offset++) {
+ if (efx_reflash_parse_snic_bundle_header(fw, header_offset,
+ partition_type,
+ partition_subtype,
+ data, data_size))
+ return 0;
+
+ if (efx_reflash_parse_snic_header(fw, header_offset,
+ partition_type,
+ partition_subtype, data,
+ data_size))
+ return 0;
+
+ if (efx_reflash_parse_reflash_header(fw, header_offset, &type,
+ &subtype, data, data_size))
+ return efx_reflash_partition_type(type, subtype,
+ partition_type,
+ partition_subtype);
+ }
+
+ return -EINVAL;
+}
+
+/* Limit the number of status updates during the erase or write phases */
+#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50
+
+/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */
+#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900
+
+/* Ideal erase chunk size. This is a balance between minimising the number of
+ * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI
+ * RPC timeout.
+ */
+#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024)
+
+static int efx_reflash_erase_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ size_t partition_size,
+ size_t align)
+{
+ size_t chunk, offset, next_update;
+ int rc;
+
+ /* Partitions that cannot be erased or do not require erase before
+ * write are advertised with a erase alignment/sector size of zero.
+ */
+ if (align == 0)
+ /* Nothing to do */
+ return 0;
+
+ if (partition_size % align)
+ return -EINVAL;
+
+ /* Erase the entire NVRAM partition a chunk at a time to avoid
+ * potentially tripping the MCDI RPC timeout.
+ */
+ if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE)
+ chunk = align;
+ else
+ chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align);
+
+ for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Erasing",
+ NULL, offset,
+ partition_size);
+ next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ chunk = min_t(size_t, partition_size - offset, chunk);
+ rc = efx_mcdi_nvram_erase(efx, type, offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Erase failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Erasing", NULL,
+ partition_size, partition_size);
+
+ return 0;
+}
+
+static int efx_reflash_write_partition(struct efx_nic *efx,
+ struct netlink_ext_ack *extack,
+ struct devlink *devlink, u32 type,
+ const u8 *data, size_t data_size,
+ size_t align)
+{
+ size_t write_max, chunk, offset, next_update;
+ int rc;
+
+ if (align == 0)
+ return -EINVAL;
+
+ /* Write the NVRAM partition in chunks that are the largest multiple
+ * of the partition's required write alignment that will fit into the
+ * MCDI NVRAM_WRITE RPC payload.
+ */
+ if (efx->type->mcdi_max_ver < 2)
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM;
+ else
+ write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN *
+ MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2;
+ chunk = rounddown(write_max, align);
+
+ for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) {
+ if (offset >= next_update) {
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+ next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT;
+ }
+
+ rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ /* Round up left over data to satisfy write alignment */
+ if (offset < data_size) {
+ size_t remaining = data_size - offset;
+ u8 *buf;
+
+ if (offset >= next_update)
+ devlink_flash_update_status_notify(devlink, "Writing",
+ NULL, offset,
+ data_size);
+
+ chunk = roundup(remaining, align);
+ buf = kmalloc(chunk, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data + offset, remaining);
+ memset(buf + remaining, 0xFF, chunk - remaining);
+ rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk);
+ kfree(buf);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Write failed for NVRAM partition %#x at %#zx-%#zx",
+ type, offset, offset + chunk - 1);
+ return rc;
+ }
+ }
+
+ devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size,
+ data_size);
+
+ return 0;
+}
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ size_t data_size, size, erase_align, write_align;
+ struct devlink *devlink = efx->devlink;
+ u32 type, data_subtype, subtype;
+ const u8 *data;
+ bool protected;
+ int rc;
+
+ if (!efx_has_cap(efx, BUNDLE_UPDATE)) {
+ NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&efx->reflash_mutex);
+
+ devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0);
+
+ if (efx->type->flash_auto_partition) {
+ /* NIC wants entire FW file including headers;
+ * FW will validate 'subtype' if there is one
+ */
+ type = NVRAM_PARTITION_TYPE_AUTO;
+ data = fw->data;
+ data_size = fw->size;
+ } else {
+ rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data,
+ &data_size);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image validation check failed");
+ goto out_unlock;
+ }
+
+ rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Metadata query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (subtype != data_subtype) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware image is not appropriate for this adapter");
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align,
+ &protected);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Info query for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ if (protected) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is protected",
+ type);
+ rc = -EPERM;
+ goto out_unlock;
+ }
+
+ if (write_align == 0) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x is not writable",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (erase_align != 0 && size % erase_align) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "NVRAM partition %#x has a bad partition table entry, can't erase it",
+ type);
+ rc = -EACCES;
+ goto out_unlock;
+ }
+
+ if (data_size > size) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware image is too big for NVRAM partition %#x",
+ type);
+ rc = -EFBIG;
+ goto out_unlock;
+ }
+
+ devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0);
+
+ rc = efx_mcdi_nvram_update_start(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Update start request for NVRAM partition %#x failed",
+ type);
+ goto out_unlock;
+ }
+
+ rc = efx_reflash_erase_partition(efx, extack, devlink, type, size,
+ erase_align);
+ if (rc)
+ goto out_update_finish;
+
+ rc = efx_reflash_write_partition(efx, extack, devlink, type, data,
+ data_size, write_align);
+ if (rc)
+ goto out_update_finish;
+
+ devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL,
+ EFX_DEVLINK_UPDATE_FINISH_TIMEOUT);
+
+out_update_finish:
+ if (rc)
+ /* Don't obscure the return code from an earlier failure */
+ efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT);
+ else
+ rc = efx_mcdi_nvram_update_finish_polled(efx, type);
+out_unlock:
+ mutex_unlock(&efx->reflash_mutex);
+ devlink_flash_update_status_notify(devlink, rc ? "Update failed" :
+ "Update complete",
+ NULL, 0, 0);
+ return rc;
+}
diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h
new file mode 100644
index 000000000000..3dffac565161
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_reflash.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_REFLASH_H
+#define _EFX_REFLASH_H
+
+#include "net_driver.h"
+#include <linux/firmware.h>
+
+int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _EFX_REFLASH_H */
diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h
new file mode 100644
index 000000000000..cbc350c96013
--- /dev/null
+++ b/drivers/net/ethernet/sfc/fw_formats.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_FW_FORMATS_H
+#define _EFX_FW_FORMATS_H
+
+/* Header layouts of firmware update images recognised by Efx NICs.
+ * The sources-of-truth for these layouts are AMD internal documents
+ * and sfregistry headers, neither of which are available externally
+ * nor usable directly by the driver.
+ *
+ * While each format includes a 'magic number', these are at different
+ * offsets in the various formats, and a legal header for one format
+ * could have the right value in whichever field occupies that offset
+ * to match another format's magic.
+ * Besides, some packaging formats (such as CMS/PKCS#7 signed images)
+ * prepend a header for which finding the size is a non-trivial task;
+ * rather than trying to parse those headers, we search byte-by-byte
+ * through the provided firmware image looking for a valid header.
+ * Thus, format recognition has to include validation of the checksum
+ * field, even though the firmware will validate that itself before
+ * applying the image.
+ */
+
+/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */
+#define EFX_REFLASH_HEADER_MAGIC_OFST 0
+#define EFX_REFLASH_HEADER_MAGIC_LEN 4
+#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5
+
+#define EFX_REFLASH_HEADER_VERSION_OFST 4
+#define EFX_REFLASH_HEADER_VERSION_LEN 4
+#define EFX_REFLASH_HEADER_VERSION_VALUE 4
+
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8
+#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4
+#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2
+#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd
+
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12
+#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4
+
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16
+#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_REFLASH_HEADER_LENGTH_OFST 20
+#define EFX_REFLASH_HEADER_LENGTH_LEN 4
+
+/* Reflash trailer */
+#define EFX_REFLASH_TRAILER_CRC_OFST 0
+#define EFX_REFLASH_TRAILER_CRC_LEN 4
+
+#define EFX_REFLASH_TRAILER_LEN \
+ (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN)
+
+/* EF100 "SmartNIC image" header format.
+ * Defined in sfregistry "src/layout/snic_image_hdr.h".
+ */
+#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16
+#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4
+#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A
+
+#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20
+#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4
+#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24
+#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36
+#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40
+#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60
+#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_CRC_OFST 64
+#define EFX_SNICIMAGE_HEADER_CRC_LEN 4
+
+#define EFX_SNICIMAGE_HEADER_MINLEN 256
+
+/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */
+#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0
+#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4
+#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001
+
+#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4
+#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12
+#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20
+#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224
+#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4
+
+#define EFX_SNICBUNDLE_HEADER_LEN \
+ (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN)
+
+#endif /* _EFX_FW_FORMATS_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 50f097487b14..6fd0c1e9a7d5 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -755,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx,
rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT],
ingress_port_mask_type);
if (rc) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s",
mask_type_name(ingress_port_mask_type),
"ingress_port");
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d461b1a6ce81..dbd2ee915838 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1625,12 +1625,15 @@ fail:
return rc;
}
+#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128
+
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out)
+ size_t *write_size_out, bool *protected_out)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN);
+ size_t write_size = 0;
size_t outlen;
int rc;
@@ -1645,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
goto fail;
}
+ if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN)
+ write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE);
+ else
+ write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN;
+
+ *write_size_out = write_size;
*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
@@ -2163,11 +2172,9 @@ out_free:
return rc;
}
-#ifdef CONFIG_SFC_MTD
-
#define EFX_MCDI_NVRAM_LEN_MAX 128
-static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
int rc;
@@ -2185,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return rc;
}
+#ifdef CONFIG_SFC_MTD
+
static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
loff_t offset, u8 *buffer, size_t length)
{
@@ -2209,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
}
-static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
+#endif /* CONFIG_SFC_MTD */
+
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
{
- MCDI_DECLARE_BUF(inbuf,
- MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ efx_dword_t *inbuf;
+ size_t inlen;
int rc;
+ inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4);
+ inbuf = kzalloc(inlen, GFP_KERNEL);
+ if (!inbuf)
+ return -ENOMEM;
+
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
@@ -2223,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL);
+ kfree(inbuf);
+
return rc;
}
-static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset,
+ size_t length)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
int rc;
@@ -2246,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return rc;
}
-static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
@@ -2254,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
int rc, rc2;
MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
- /* Always set this flag. Old firmware ignores it */
- MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+
+ /* Old firmware doesn't support background update finish and abort
+ * operations. Fallback to waiting if the requested mode is not
+ * supported.
+ */
+ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
+ (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
+ mode == EFX_UPDATE_FINISH_ABORT))
+ mode = EFX_UPDATE_FINISH_WAIT;
+
+ MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
- 1);
+ (mode != EFX_UPDATE_FINISH_ABORT),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND,
+ (mode == EFX_UPDATE_FINISH_BACKGROUND),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT,
+ (mode == EFX_UPDATE_FINISH_POLL),
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT,
+ (mode == EFX_UPDATE_FINISH_ABORT));
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
- if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+ if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS &&
+ rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING)
netif_err(efx, drv, efx->net_dev,
"NVRAM update failed verification with code 0x%x\n",
rc2);
switch (rc2) {
case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
break;
+ case MC_CMD_NVRAM_VERIFY_RC_PENDING:
+ rc = -EAGAIN;
+ break;
case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
@@ -2284,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+ case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED:
+ case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE:
rc = -EPERM;
break;
default:
@@ -2296,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return rc;
}
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000
+#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185
+
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type)
+{
+ unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS;
+ unsigned int retry = 0;
+ int rc;
+
+ /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle
+ * images). Polling for NVRAM update completion ensures that other MCDI
+ * commands can be issued before the background NVRAM update completes.
+ *
+ * The initial call either completes the update synchronously, or
+ * returns -EAGAIN to indicate processing is continuing. In the latter
+ * case, we poll for at least 900 seconds, at increasing intervals
+ * (5ms, 50ms, 500ms, 5s).
+ */
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND);
+ while (rc == -EAGAIN) {
+ if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES)
+ return -ETIMEDOUT;
+ retry++;
+
+ msleep(delay);
+ if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS)
+ delay *= 10;
+
+ rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL);
+ }
+ return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
@@ -2389,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd)
if (part->updating) {
part->updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type,
+ EFX_UPDATE_FINISH_WAIT);
}
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index cdb17d7c147f..3755cd3fe1e6 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -392,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
- bool *protected_out);
+ size_t *write_size_out, bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
u32 *subtype, u16 version[4], char *desc,
@@ -424,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type);
+int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length);
+int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
+
+enum efx_update_finish_mode {
+ EFX_UPDATE_FINISH_WAIT,
+ EFX_UPDATE_FINISH_BACKGROUND,
+ EFX_UPDATE_FINISH_POLL,
+ EFX_UPDATE_FINISH_ABORT,
+};
+
+int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type,
+ enum efx_update_finish_mode mode);
+int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type);
+
#ifdef CONFIG_SFC_MTD
int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, u8 *buffer);
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index cd297e19cddc..9cb339c461fb 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -72,19 +72,19 @@
* | \------- Error
* \------------------------------ Resync (always set)
*
- * The client writes it's request into MC shared memory, and rings the
- * doorbell. Each request is completed by either by the MC writing
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
* back into shared memory, or by writing out an event.
*
* All MCDI commands support completion by shared memory response. Each
* request may also contain additional data (accounted for by HEADER.LEN),
- * and some response's may also contain additional data (again, accounted
+ * and some responses may also contain additional data (again, accounted
* for by HEADER.LEN).
*
* Some MCDI commands support completion by event, in which any associated
* response data is included in the event.
*
- * The protocol requires one response to be delivered for every request, a
+ * The protocol requires one response to be delivered for every request; a
* request should not be sent unless the response for the previous request
* has been received (either by polling shared memory, or by receiving
* an event).
@@ -165,6 +165,7 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
#define MC_CMD_ERR_CODE_OFST 0
#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
@@ -321,7 +322,7 @@
/* enum: The requesting client is not a function */
#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
/* enum: The requested operation might require the command to be passed between
- * MCs, and thetransport doesn't support that. Should only ever been seen over
+ * MCs, and the transport doesn't support that. Should only ever been seen over
* the UART.
*/
#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
@@ -358,7 +359,7 @@
* sub-variant switching.
*/
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
-/* enum: The clock whose frequency you've attempted to set set doesn't exist on
+/* enum: The clock whose frequency you've attempted to set doesn't exist on
* this NIC
*/
#define MC_CMD_ERR_NO_CLOCK 0x1015
@@ -387,25 +388,6 @@
*/
#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
-
-/* MC_CMD_FPGA_FLASH_INDEX enum */
-#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */
-#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */
-
-/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */
-/* enum: Legacy mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0
-/* enum: Switchdev mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1
-/* enum: Bootstrap mode as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2
-/* enum: Link-mode change is in-progress as described in XN-200039-TC. */
-#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf
-
/* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe
* interfaces. There is a need to refer to interfaces explicitly from drivers
* (for example, a management driver on one interface administering a function
@@ -424,6 +406,14 @@
* an on-NIC ARM module is expected to be connected.
*/
#define PCIE_INTERFACE_NIC_EMBEDDED 0x1
+/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0
+/* enum: The PCIe logical interface 1. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2
+/* enum: The PCIe logical interface 2. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3
+/* enum: The PCIe logical interface 3. */
+#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4
/* enum: For MCDI commands issued over a PCIe interface, this value is
* translated into the interface over which the command was issued. Not
* meaningful for other MCDI transports.
@@ -640,7 +630,11 @@
* be allocated by different counter blocks, so e.g. AR counter 42 is different
* from CT counter 42. Generation counts are also type-specific. This value is
* also present in the header of streaming counter packets, in the IDENTIFIER
- * field (see packetiser packet format definitions).
+ * field (see packetiser packet format definitions). Also note that LACP
+ * counter IDs are not allocated individually, instead the counter IDs are
+ * directly tied to the LACP balance table indices. These in turn are allocated
+ * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE
+ * with an LACP counter type will return EPERM.
*/
/* enum: Action Rule counters - can be referenced in AR response. */
#define MAE_COUNTER_TYPE_AR 0x0
@@ -648,6 +642,14 @@
#define MAE_COUNTER_TYPE_CT 0x1
/* enum: Outer Rule counters - can be referenced in OR response. */
#define MAE_COUNTER_TYPE_OR 0x2
+/* enum: LACP counters - linked to LACP balance table entries. */
+#define MAE_COUNTER_TYPE_LACP 0x3
+
+/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */
+/* enum: A counter ID that is guaranteed never to represent a real counter or
+ * counter list.
+ */
+#define MAE_COUNTER_ID_NULL 0xffffffff
/* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been
* structured with bits [31:24] reserved (0), [23:16] indicating which major
@@ -656,7 +658,9 @@
* variations of the same table. (All of the tables currently defined within
* the streaming engines are listed here, but this does not imply that they are
* all supported - MC_CMD_TABLE_LIST returns the list of actually supported
- * tables.)
+ * tables.) The DPU offload engines' enumerators follow a deliberate pattern:
+ * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe *
+ * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10
*/
/* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */
#define TABLE_ID_OUTER_RULE_TABLE 0x10000
@@ -694,45 +698,70 @@
#define TABLE_ID_RSS_CONTEXT_TABLE 0x20200
/* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */
#define TABLE_ID_INDIRECTION_TABLE 0x20300
-
-/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field
- * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) |
- * (ether_type_msb & 0x3);
- */
-#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */
-#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */
-
-/* TABLE_NAT_DIR enum: NAT direction. */
-#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */
-#define TABLE_NAT_DIR_DEST 0x1 /* enum */
-
-/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS
- * is constructed as a concatenation (indicated here by "++") of packet header
- * fields.
- */
-/* enum: IP src addr ++ IP dst addr */
-#define TABLE_RSS_KEY_MODE_SA_DA 0x0
-/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */
-#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1
-/* enum: IP src addr */
-#define TABLE_RSS_KEY_MODE_SA 0x2
-/* enum: IP dst addr */
-#define TABLE_RSS_KEY_MODE_DA 0x3
-/* enum: IP src addr ++ TCP/UDP src port */
-#define TABLE_RSS_KEY_MODE_SA_SP 0x4
-/* enum: IP dest addr ++ TCP dest port */
-#define TABLE_RSS_KEY_MODE_DA_DP 0x5
-/* enum: Nothing (produces input of 0, resulting in output hash of 0) */
-#define TABLE_RSS_KEY_MODE_NONE 0x7
-
-/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */
-/* enum: RSS uses Indirection_Table lookup. */
-#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0
-/* enum: RSS uses even spreading calculation. */
-#define TABLE_RSS_SPREAD_MODE_EVEN 0x1
+/* enum: DPU.host read pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000
+/* enum: DPU.host read pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010
+/* enum: DPU.host write pipe first CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000
+/* enum: DPU.host write pipe second CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010
+/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000
+/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100
+/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110
+/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200
+/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000
+/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100
+/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000
+/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100
+/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles -
+ * refer to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110
+/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200
+/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to
+ * XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000
+/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer
+ * to XN-200147-AN.
+ */
+#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100
/* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been
* loosely grouped together into blocks with gaps for expansion, but the values
@@ -1026,6 +1055,16 @@
#define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde
/* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */
#define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf
+/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing
+ * LACP_LAG_Config_Table. Refer to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0
+/* enum: Address in LACP_Balance_Table. The balance table is partitioned
+ * between LAGs according to the settings in LACP_LAG_Config_Table and then
+ * indexed by the LACP hash, providing the mapping to destination mports. Refer
+ * to SF-123102-TC.
+ */
+#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1
/* enum: UDP port to match for UDP-based encapsulations; required to be 0 for
* other encapsulation types.
*/
@@ -1082,6 +1121,55 @@
#define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105
/* enum: An offset to be applied to the base destination queue ID. */
#define TABLE_FIELD_ID_INDIR_OFFSET 0x106
+/* enum: DPU offload engine profile ID to address. */
+#define TABLE_FIELD_ID_OE_PROFILE 0x3e8
+/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */
+#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2
+/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFIN 0x3f3
+/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is
+ * MSB. If clear, bit 7 is MSB, bit 0 is LSB.
+ */
+#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4
+/* enum: If set, invert every bit of the output value. */
+#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5
+/* enum: The CRC polynomial to use for checksumming, in normal form. */
+#define TABLE_FIELD_ID_CRC_POLY 0x3f6
+/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum.
+ */
+#define TABLE_FIELD_ID_CSUM_OP 0x410
+/* enum: Byte offset of checksum relative to region_start (for VALIDATE_*
+ * operations only).
+ */
+#define TABLE_FIELD_ID_CSUM_OFFSET 0x411
+/* enum: Indicates there is additional data on OPR bus that needs to be
+ * incorporated into the payload checksum.
+ */
+#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412
+/* enum: Log2 data size of additional data on OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413
+/* enum: 4 byte offset of where to find the additional data on the OPR bus. */
+#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414
+/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */
+#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a
+/* enum: Key length - AES_KEY_LEN enum. */
+#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b
+/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or
+ * IPSEC descrypt output.
+ */
+#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c
+/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else,
+ * indicates IPSEC-ESN mode.
+ */
+#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d
+/* enum: Replay Protection Enable. */
+#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e
+/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */
+#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f
+/* enum: Replay Window Size. */
+#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420
/* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100
* platforms
@@ -1138,6 +1226,24 @@
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31
+#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24
+#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31
+#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1
#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
@@ -1237,7 +1343,7 @@
#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
/* enum: Notify that invalid flash type detected */
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
-/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+/* enum: Notify that the attempt to run FPGA Controller firmware timed out */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
/* enum: Failure to probe one or more FPGA boot flash chips */
#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
@@ -1255,7 +1361,7 @@
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
/* enum: FC Assert happened, but the register information is not available */
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
-/* enum: The register information for FC Assert is ready for readinng by driver
+/* enum: The register information for FC Assert is ready for reading by driver
*/
#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
@@ -1364,6 +1470,12 @@
#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16
+#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
/* Alias for PTP_DATA. */
@@ -1500,6 +1612,31 @@
* change to the journal.
*/
#define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27
+/* enum: Notification that a source queue is enabled and attached to its proxy
+ * sink queue. SRC field contains the handle of the affected descriptor proxy
+ * function. DATA field contains the relative source queue number and absolute
+ * VI ID.
+ */
+#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28
+/* enum: Notification of a change in link state and/or link speed of a network
+ * port link. This event applies to a network port identified by a handle,
+ * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS
+ * command.
+ */
+#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29
+/* enum: Notification of a change in the state of an MDI (external connector)
+ * of a network port. This typically corresponds to module plug/unplug for
+ * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect.
+ * This event applies to a network port identified by a handle, PORT_HANDLE,
+ * which is discovered by the driver using the MC_CMD_ENUM_PORTS command.
+ */
+#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a
+/* enum: Notification that the port enumeration journal has changed since it
+ * was last read and updates can be read using the MC_CMD_ENUM_PORTS command.
+ * The firmware may moderate the events so that an event is not sent for every
+ * change to the journal.
+ */
+#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
@@ -1512,6 +1649,14 @@
#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0
+#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
@@ -1668,247 +1813,6 @@
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
-/* FCDI_EVENT structuredef */
-#define FCDI_EVENT_LEN 8
-#define FCDI_EVENT_CONT_LBN 32
-#define FCDI_EVENT_CONT_WIDTH 1
-#define FCDI_EVENT_LEVEL_LBN 33
-#define FCDI_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define FCDI_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define FCDI_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define FCDI_EVENT_LEVEL_FATAL 0x3
-#define FCDI_EVENT_DATA_OFST 0
-#define FCDI_EVENT_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0
-#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
-#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
-#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
-#define FCDI_EVENT_LINK_UP 0x1 /* enum */
-#define FCDI_EVENT_DATA_LBN 0
-#define FCDI_EVENT_DATA_WIDTH 32
-#define FCDI_EVENT_SRC_LBN 36
-#define FCDI_EVENT_SRC_WIDTH 8
-#define FCDI_EVENT_EV_CODE_LBN 60
-#define FCDI_EVENT_EV_CODE_WIDTH 4
-#define FCDI_EVENT_CODE_LBN 44
-#define FCDI_EVENT_CODE_WIDTH 8
-/* enum: The FC was rebooted. */
-#define FCDI_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define FCDI_EVENT_CODE_ASSERT 0x2
-/* enum: DDR3 test result. */
-#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
-/* enum: Link status. */
-#define FCDI_EVENT_CODE_LINK_STATE 0x4
-/* enum: A timed read is ready to be serviced. */
-#define FCDI_EVENT_CODE_TIMED_READ 0x5
-/* enum: One or more PPS IN events */
-#define FCDI_EVENT_CODE_PPS_IN 0x6
-/* enum: Tick event from PTP clock */
-#define FCDI_EVENT_CODE_PTP_TICK 0x7
-/* enum: ECC error counters */
-#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
-/* enum: Current status of PTP */
-#define FCDI_EVENT_CODE_PTP_STATUS 0x9
-/* enum: Port id config to map MC-FC port idx */
-#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
-/* enum: Boot result or error code */
-#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
-#define FCDI_EVENT_REBOOT_SRC_LBN 36
-#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
-#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
-#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
-#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
-#define FCDI_EVENT_ASSERT_TYPE_LBN 36
-#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
-#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
-#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
-#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
-#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
-#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
-#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
-#define FCDI_EVENT_PTP_STATE_OFST 0
-#define FCDI_EVENT_PTP_STATE_LEN 4
-#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
-#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
-#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
-#define FCDI_EVENT_PTP_STATE_LBN 0
-#define FCDI_EVENT_PTP_STATE_WIDTH 32
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
-#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
-#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
-/* Index of MC port being referred to */
-#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
-#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
-/* FC Port index that matches the MC port index in SRC */
-#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
-#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
-#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
-#define FCDI_EVENT_BOOT_RESULT_OFST 0
-#define FCDI_EVENT_BOOT_RESULT_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
-#define FCDI_EVENT_BOOT_RESULT_LBN 0
-#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
-
-/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
- * to the MC. Note that this structure | is overlayed over a normal FCDI event
- * such that bits 32-63 containing | event code, level, source etc remain the
- * same. In this case the data | field of the header is defined to be the
- * number of timestamps
- */
-#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
-#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
-#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
-/* Number of timestamps following */
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
-#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
-/* Seconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
-/* Nanoseconds field of a timestamp record */
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
-/* Timestamp records comprising the event */
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
-#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
-
-/* MUM_EVENT structuredef */
-#define MUM_EVENT_LEN 8
-#define MUM_EVENT_CONT_LBN 32
-#define MUM_EVENT_CONT_WIDTH 1
-#define MUM_EVENT_LEVEL_LBN 33
-#define MUM_EVENT_LEVEL_WIDTH 3
-/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
-/* enum: Warning. */
-#define MUM_EVENT_LEVEL_WARN 0x1
-/* enum: Error. */
-#define MUM_EVENT_LEVEL_ERR 0x2
-/* enum: Fatal. */
-#define MUM_EVENT_LEVEL_FATAL 0x3
-#define MUM_EVENT_DATA_OFST 0
-#define MUM_EVENT_DATA_LEN 4
-#define MUM_EVENT_SENSOR_ID_OFST 0
-#define MUM_EVENT_SENSOR_ID_LBN 0
-#define MUM_EVENT_SENSOR_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-#define MUM_EVENT_SENSOR_STATE_OFST 0
-#define MUM_EVENT_SENSOR_STATE_LBN 8
-#define MUM_EVENT_SENSOR_STATE_WIDTH 8
-#define MUM_EVENT_PORT_PHY_READY_OFST 0
-#define MUM_EVENT_PORT_PHY_READY_LBN 0
-#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
-#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
-#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
-#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
-#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
-#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
-#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
-#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
-#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
-#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
-#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
-#define MUM_EVENT_DATA_LBN 0
-#define MUM_EVENT_DATA_WIDTH 32
-#define MUM_EVENT_SRC_LBN 36
-#define MUM_EVENT_SRC_WIDTH 8
-#define MUM_EVENT_EV_CODE_LBN 60
-#define MUM_EVENT_EV_CODE_WIDTH 4
-#define MUM_EVENT_CODE_LBN 44
-#define MUM_EVENT_CODE_WIDTH 8
-/* enum: The MUM was rebooted. */
-#define MUM_EVENT_CODE_REBOOT 0x1
-/* enum: Bad assert. */
-#define MUM_EVENT_CODE_ASSERT 0x2
-/* enum: Sensor failure. */
-#define MUM_EVENT_CODE_SENSOR 0x3
-/* enum: Link fault has been asserted, or has cleared. */
-#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
-#define MUM_EVENT_SENSOR_DATA_OFST 0
-#define MUM_EVENT_SENSOR_DATA_LEN 4
-#define MUM_EVENT_SENSOR_DATA_LBN 0
-#define MUM_EVENT_SENSOR_DATA_WIDTH 32
-#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
-#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
-#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
-#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
-#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
-#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
-#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
-#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
-#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
-#define MUM_EVENT_PORT_PHY_TECH_OFST 0
-#define MUM_EVENT_PORT_PHY_TECH_LEN 4
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
-#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
-#define MUM_EVENT_PORT_PHY_TECH_LBN 0
-#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
-#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
-
/***********************************/
/* MC_CMD_READ32
@@ -1969,90 +1873,6 @@
/***********************************/
-/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump. Note - this command really
- * belongs to INSECURE category but is required by shmboot. The command handler
- * has additional checks to reject insecure calls.
- */
-#define MC_CMD_COPYCODE 0x3
-#undef MC_CMD_0x3_PRIVILEGE_CTG
-
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_COPYCODE_IN msgrequest */
-#define MC_CMD_COPYCODE_IN_LEN 16
-/* Source address
- *
- * The main image should be entered via a copy of a single word from and to a
- * magic address, which controls various aspects of the boot. The magic address
- * is a bitfield, with each bit as documented below.
- */
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
-#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
- */
-#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
- * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
- * below)
- */
-#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
-#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
-/* Destination address */
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
-#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
-#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
-#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
-/* Address of where to jump after copy. */
-#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
-/* enum: Control should return to the caller rather than jumping */
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1
-
-/* MC_CMD_COPYCODE_OUT msgresponse */
-#define MC_CMD_COPYCODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SET_FUNC
- * Select function for function-specific commands.
- */
-#define MC_CMD_SET_FUNC 0x4
-#undef MC_CMD_0x4_PRIVILEGE_CTG
-
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_FUNC_IN msgrequest */
-#define MC_CMD_SET_FUNC_IN_LEN 4
-/* Set function */
-#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
-#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
-
-/* MC_CMD_SET_FUNC_OUT msgresponse */
-#define MC_CMD_SET_FUNC_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_BOOT_STATUS
* Get the instruction address from which the MC booted.
*/
@@ -2259,6 +2079,7 @@
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum property: bitmask */
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
@@ -2304,6 +2125,9 @@
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2326,6 +2150,9 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2356,6 +2183,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2495,6 +2325,9 @@
* (depending on which components exist on a particular adapter)
*/
#define MC_CMD_GET_VERSION_V3_OUT_LEN 328
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2641,6 +2474,9 @@
* version information
*/
#define MC_CMD_GET_VERSION_V4_OUT_LEN 392
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -2803,6 +2639,9 @@
* and board version information
*/
#define MC_CMD_GET_VERSION_V5_OUT_LEN 424
+/* This is normally the UTC build time in seconds since epoch or one of the
+ * special values listed
+ */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
@@ -3065,8 +2904,18 @@
* subscribers.
*/
#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
-/* enum: Above this for future use. */
-#define MC_CMD_PTP_OP_MAX 0x1c
+/* enum: X4 and later adapters should use this instead of
+ * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events
+ * indicating the current NIC time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c
+/* enum: For X4 and later NICs. Packet timestamps and time sync events have
+ * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it
+ * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a
+ * software configurable time out. Host driver need to query what is set and
+ * what is maximum supported interval, this MCDI can be used to query these.
+ */
+#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -3507,6 +3356,22 @@
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4
+/* Space for flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1
+
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
@@ -3540,6 +3405,13 @@
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */
+#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3939,416 +3811,14 @@
/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
-
-/***********************************/
-/* MC_CMD_CSR_READ32
- * Read 32bit words from the indirect memory map.
- */
-#define MC_CMD_CSR_READ32 0xc
-#undef MC_CMD_0xc_PRIVILEGE_CTG
-
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_READ32_IN msgrequest */
-#define MC_CMD_CSR_READ32_IN_LEN 12
-/* Address */
-#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
-#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
-#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
-
-/* MC_CMD_CSR_READ32_OUT msgresponse */
-#define MC_CMD_CSR_READ32_OUT_LENMIN 4
-#define MC_CMD_CSR_READ32_OUT_LENMAX 252
-#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
-/* The last dword is the status, not a value read */
-#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
-#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
-#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_CSR_WRITE32
- * Write 32bit dwords to the indirect memory map.
- */
-#define MC_CMD_CSR_WRITE32 0xd
-#undef MC_CMD_0xd_PRIVILEGE_CTG
-
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CSR_WRITE32_IN msgrequest */
-#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
-#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
-#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
-#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
-/* Address */
-#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
-#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
-#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
-
-/* MC_CMD_CSR_WRITE32_OUT msgresponse */
-#define MC_CMD_CSR_WRITE32_OUT_LEN 4
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_HP
- * These commands are used for HP related features. They are grouped under one
- * MCDI command to avoid creating too many MCDI commands.
- */
-#define MC_CMD_HP 0x54
-#undef MC_CMD_0x54_PRIVILEGE_CTG
-
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_HP_IN msgrequest */
-#define MC_CMD_HP_IN_LEN 16
-/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
- * the specified address with the specified interval.When address is NULL,
- * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
- * state / 2: (debug) Show temperature reported by one of the supported
- * sensors.
- */
-#define MC_CMD_HP_IN_SUBCMD_OFST 0
-#define MC_CMD_HP_IN_SUBCMD_LEN 4
-/* enum: OCSD (Option Card Sensor Data) sub-command. */
-#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
-/* enum: Last known valid HP sub-command. */
-#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
-/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
- */
-#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32
-#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64
-#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32
-/* The requested update interval, in seconds. (Or the sub-command if ADDR is
- * NULL.)
- */
-#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
-#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
-
-/* MC_CMD_HP_OUT msgresponse */
-#define MC_CMD_HP_OUT_LEN 4
-#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
-#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
-/* enum: OCSD stopped for this card. */
-#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
-/* enum: OCSD was successfully started with the address provided. */
-#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
-/* enum: OCSD was already started for this card. */
-#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
-
-
-/***********************************/
-/* MC_CMD_STACKINFO
- * Get stack information.
- */
-#define MC_CMD_STACKINFO 0xf
-#undef MC_CMD_0xf_PRIVILEGE_CTG
-
-#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_STACKINFO_IN msgrequest */
-#define MC_CMD_STACKINFO_IN_LEN 0
-
-/* MC_CMD_STACKINFO_OUT msgresponse */
-#define MC_CMD_STACKINFO_OUT_LENMIN 12
-#define MC_CMD_STACKINFO_OUT_LENMAX 252
-#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
-/* (thread ptr, stack size, free space) for each thread in system */
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
-#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_MDIO_READ
- * MDIO register read.
- */
-#define MC_CMD_MDIO_READ 0x10
-#undef MC_CMD_0x10_PRIVILEGE_CTG
-
-#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MDIO_READ_IN msgrequest */
-#define MC_CMD_MDIO_READ_IN_LEN 16
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
-/* enum: Internal. */
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0
-/* enum: External. */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
-/* Port address */
-#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-#define MC_CMD_MDIO_CLAUSE22 0x20
-/* Address */
-#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
-
-/* MC_CMD_MDIO_READ_OUT msgresponse */
-#define MC_CMD_MDIO_READ_OUT_LEN 8
-/* Value */
-#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
-#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
-/* Status the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
-/* enum: Good. */
-#define MC_CMD_MDIO_STATUS_GOOD 0x8
-
-
-/***********************************/
-/* MC_CMD_MDIO_WRITE
- * MDIO register write.
- */
-#define MC_CMD_MDIO_WRITE 0x11
-#undef MC_CMD_0x11_PRIVILEGE_CTG
-
-#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_MDIO_WRITE_IN msgrequest */
-#define MC_CMD_MDIO_WRITE_IN_LEN 20
-/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
- * external devices.
- */
-#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
-#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
-/* enum: Internal. */
-/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
-/* enum: External. */
-/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
-/* Port address */
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
-#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
-/* Device Address or clause 22. */
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
-/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
- * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
- */
-/* MC_CMD_MDIO_CLAUSE22 0x20 */
-/* Address */
-#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
-#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
-#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
-
-/* MC_CMD_MDIO_WRITE_OUT msgresponse */
-#define MC_CMD_MDIO_WRITE_OUT_LEN 4
-/* Status; the MDIO commands return the raw status bits from the MDIO block. A
- * "good" transaction should have the DONE bit set and all other bits clear.
- */
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
-#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
-/* enum: Good. */
-/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
-
-
-/***********************************/
-/* MC_CMD_DBI_WRITE
- * Write DBI register(s).
- */
-#define MC_CMD_DBI_WRITE 0x12
-#undef MC_CMD_0x12_PRIVILEGE_CTG
-
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_WRITE_IN msgrequest */
-#define MC_CMD_DBI_WRITE_IN_LENMIN 12
-#define MC_CMD_DBI_WRITE_IN_LENMAX 252
-#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
-/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
- * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
- */
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
-#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
-
-/* MC_CMD_DBI_WRITE_OUT msgresponse */
-#define MC_CMD_DBI_WRITE_OUT_LEN 0
-
-/* MC_CMD_DBIWROP_TYPEDEF structuredef */
-#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
-#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ32 0x14
-
-/* MC_CMD_PORT_READ32_IN msgrequest */
-#define MC_CMD_PORT_READ32_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ32_OUT msgresponse */
-#define MC_CMD_PORT_READ32_OUT_LEN 8
-/* Value */
-#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
-/* Status */
-#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
-#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE32 0x15
-
-/* MC_CMD_PORT_WRITE32_IN msgrequest */
-#define MC_CMD_PORT_WRITE32_IN_LEN 8
-/* Address */
-#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
-
-/* MC_CMD_PORT_WRITE32_OUT msgresponse */
-#define MC_CMD_PORT_WRITE32_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_READ128 0x16
-
-/* MC_CMD_PORT_READ128_IN msgrequest */
-#define MC_CMD_PORT_READ128_IN_LEN 4
-/* Address */
-#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
-
-/* MC_CMD_PORT_READ128_OUT msgresponse */
-#define MC_CMD_PORT_READ128_OUT_LEN 20
-/* Value */
-#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
-#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
-/* Status */
-#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
-#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map. The port to
- * access is implied by the Shared memory channel used.
- */
-#define MC_CMD_PORT_WRITE128 0x17
-
-/* MC_CMD_PORT_WRITE128_IN msgrequest */
-#define MC_CMD_PORT_WRITE128_IN_LEN 20
-/* Address */
-#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
-#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
-/* Value */
-#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
-#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
-
-/* MC_CMD_PORT_WRITE128_OUT msgresponse */
-#define MC_CMD_PORT_WRITE128_OUT_LEN 4
-/* Status */
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
-#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
-
-/* MC_CMD_CAPABILITIES structuredef */
-#define MC_CMD_CAPABILITIES_LEN 4
-/* Small buf table. */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
-/* Turbo mode (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 1
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
-/* Turbo mode active (for Maranello). */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
-/* PTP offload. */
-#define MC_CMD_CAPABILITIES_PTP_LBN 3
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
-/* AOE mode. */
-#define MC_CMD_CAPABILITIES_AOE_LBN 4
-#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
-#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
-/* AOE mode active. */
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
-#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
-#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
-#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8
+/* Current value set in NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4
+/* Maximum supported by NIC, in seconds */
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4
+#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4
/***********************************/
@@ -4427,112 +3897,6 @@
/***********************************/
-/* MC_CMD_DBI_READX
- * Read DBI register(s) -- extended functionality
- */
-#define MC_CMD_DBI_READX 0x19
-#undef MC_CMD_0x19_PRIVILEGE_CTG
-
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DBI_READX_IN msgrequest */
-#define MC_CMD_DBI_READX_IN_LENMIN 8
-#define MC_CMD_DBI_READX_IN_LENMAX 248
-#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
-#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
-#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
-/* Each Read op consists of an address (offset 0), VF/CS2) */
-#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0
-#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
-#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
-
-/* MC_CMD_DBI_READX_OUT msgresponse */
-#define MC_CMD_DBI_READX_OUT_LENMIN 4
-#define MC_CMD_DBI_READX_OUT_LENMAX 252
-#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* Value */
-#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
-#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
-#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
-#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
-
-/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
-#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
-#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
-#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
-#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
-#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_RAND_SEED
- * Set the 16byte seed for the MC pseudo-random generator.
- */
-#define MC_CMD_SET_RAND_SEED 0x1a
-#undef MC_CMD_0x1a_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_RAND_SEED_IN msgrequest */
-#define MC_CMD_SET_RAND_SEED_IN_LEN 16
-/* Seed value. */
-#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
-#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
-
-/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
-#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the LTSSM, if the build supports it.
- */
-#define MC_CMD_LTSSM_HIST 0x1b
-
-/* MC_CMD_LTSSM_HIST_IN msgrequest */
-#define MC_CMD_LTSSM_HIST_IN_LEN 0
-
-/* MC_CMD_LTSSM_HIST_OUT msgresponse */
-#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
-#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
-/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
-#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
-#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
/* MC_CMD_DRV_ATTACH
* Inform MCPU that this port is managed on the host (i.e. driver active). For
* Huntington, also request the preferred datapath firmware to use if possible
@@ -4705,6 +4069,7 @@
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum property: bitshift */
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4732,22 +4097,6 @@
/***********************************/
-/* MC_CMD_SHMUART
- * Route UART output to circular buffer in shared memory instead.
- */
-#define MC_CMD_SHMUART 0x1f
-
-/* MC_CMD_SHMUART_IN msgrequest */
-#define MC_CMD_SHMUART_IN_LEN 4
-/* ??? */
-#define MC_CMD_SHMUART_IN_FLAG_OFST 0
-#define MC_CMD_SHMUART_IN_FLAG_LEN 4
-
-/* MC_CMD_SHMUART_OUT msgresponse */
-#define MC_CMD_SHMUART_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PORT_RESET
* Generic per-port reset. There is no equivalent for per-board reset. Locks
* required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
@@ -4790,100 +4139,6 @@
/***********************************/
-/* MC_CMD_PCIE_CREDITS
- * Read instantaneous and minimum flow control thresholds.
- */
-#define MC_CMD_PCIE_CREDITS 0x21
-
-/* MC_CMD_PCIE_CREDITS_IN msgrequest */
-#define MC_CMD_PCIE_CREDITS_IN_LEN 8
-/* poll period. 0 is disabled */
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
-#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
-/* wipe statistics */
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
-#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
-
-/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
-#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
-#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
-#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
-
-
-/***********************************/
-/* MC_CMD_RXD_MONITOR
- * Get histogram of RX queue fill level.
- */
-#define MC_CMD_RXD_MONITOR 0x22
-
-/* MC_CMD_RXD_MONITOR_IN msgrequest */
-#define MC_CMD_RXD_MONITOR_IN_LEN 12
-#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
-#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
-#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
-#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
-
-/* MC_CMD_RXD_MONITOR_OUT msgresponse */
-#define MC_CMD_RXD_MONITOR_OUT_LEN 80
-#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
-#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
-#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
-#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
-#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
-
-
-/***********************************/
/* MC_CMD_PUTS
* Copy the given ASCII string out onto UART and/or out of the network port.
*/
@@ -4931,6 +4186,54 @@
/* MC_CMD_GET_PHY_CFG_IN msgrequest */
#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
@@ -5026,6 +4329,9 @@
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_200000FDX_OFST 8
+#define MC_CMD_PHY_CAP_200000FDX_LBN 22
+#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
@@ -5059,6 +4365,7 @@
#define MC_CMD_MEDIA_DSFP 0x8
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum property: bitshift */
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5084,7 +4391,7 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
@@ -5124,7 +4431,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5282,33 +4589,6 @@
/***********************************/
-/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
- * flushes should be initiated via this MCDI operation, rather than via
- * directly writing FLUSH_CMD.
- *
- * The flush is completed (either done/fail) asynchronously (after this command
- * returns). The driver must still wait for flush done/failure events as usual.
- */
-#define MC_CMD_FLUSH_RX_QUEUES 0x27
-
-/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
-#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
-
-/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
-#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
* Returns a bitmask of loopback modes available at each speed.
*/
@@ -5320,6 +4600,54 @@
/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8
+/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR
+ * which identifies a real or virtual network port by MAE port and link end.
+ * See the structure definition for more details
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
/* Supported loopbacks. */
@@ -5333,6 +4661,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
@@ -5422,6 +4751,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5435,6 +4765,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5448,6 +4779,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5461,6 +4793,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5479,6 +4812,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32
+/* enum property: bitshift */
/* enum: None. */
/* MC_CMD_LOOPBACK_NONE 0x0 */
/* enum: Data. */
@@ -5568,6 +4902,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5581,6 +4916,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5594,6 +4930,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported loopbacks. */
@@ -5607,6 +4944,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 25G loopbacks. */
@@ -5620,6 +4958,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 50 loopbacks. */
@@ -5633,6 +4972,7 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
/* Supported 100G loopbacks. */
@@ -5646,6 +4986,214 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480
#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for
+ * newer NICs with 200G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 200G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32
+/* enum property: bitshift */
/* Enum values, see field(s): */
/* 100M */
@@ -5673,13 +5221,835 @@
#define FEC_TYPE_TYPE_LEN 4
/* enum: No FEC */
#define MC_CMD_FEC_NONE 0x0
-/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */
#define MC_CMD_FEC_BASER 0x1
-/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */
#define MC_CMD_FEC_RS 0x2
+/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R
+ * PHYs
+ */
+#define MC_CMD_FEC_IEEE_RS_INT 0x3
+/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC
+ * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause
+ * 134 for 50G PHY.
+ */
+#define MC_CMD_FEC_ETCS_RS_LL 0x4
+/* enum: FEC mode selected automatically */
+#define MC_CMD_FEC_AUTO 0x5
#define FEC_TYPE_TYPE_LBN 0
#define FEC_TYPE_TYPE_WIDTH 32
+/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3,
+ * Ethernet Technology Consortium, proprietary technologies. The driver must
+ * not use technologies labelled NONE and AUTO.
+ */
+#define MC_CMD_ETH_TECH_LEN 16
+/* The enums in this field can be used either as bitwise indices into a tech
+ * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular
+ * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This
+ * structure must be updated to add new technologies when projects that need
+ * them arise. An incomplete list of possible expansion in the future include:
+ * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8
+ * 800GBASE_VR8
+ */
+#define MC_CMD_ETH_TECH_TECH_OFST 0
+#define MC_CMD_ETH_TECH_TECH_LEN 16
+/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See
+ * IEEE 802.3 Clause 70
+ */
+#define MC_CMD_ETH_TECH_1000BASEKX 0x0
+/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE
+ * 802.3 Clause 72
+ */
+#define MC_CMD_ETH_TECH_10GBASE_KR 0x1
+/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 84.
+ */
+#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2
+/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 85
+ */
+#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3
+/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in
+ * Clause 86
+ */
+#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4
+/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long
+ * reach. See IEEE 802.3 Clause 87
+ */
+#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5
+/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 Clause 110
+ */
+#define MC_CMD_ETH_TECH_25GBASE_CR 0x6
+/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 111
+ */
+#define MC_CMD_ETH_TECH_25GBASE_KR 0x7
+/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause
+ * 112
+ */
+#define MC_CMD_ETH_TECH_25GBASE_SR 0x8
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper
+ * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 93
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb
+/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 95
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc
+/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 92
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd
+/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with
+ * long/extended reach,. See IEEE 802.3 Clause 88
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe
+/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber.
+ * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf
+/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex
+ * mode unknown
+ */
+#define MC_CMD_ETH_TECH_1000BASEX 0x10
+/* enum: Non-standardised. 10G direct attach */
+#define MC_CMD_ETH_TECH_10GBASE_CR 0x11
+/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_SR 0x12
+/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_LR 0x13
+/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */
+#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14
+/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */
+#define MC_CMD_ETH_TECH_10GBASE_ER 0x15
+/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_50GBASE_KR 0x16
+/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138
+ */
+#define MC_CMD_ETH_TECH_50GBASE_SR 0x17
+/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE
+ * 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_50GBASE_CR 0x18
+/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause
+ * 139.
+ */
+#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19
+/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with
+ * reach up to at least 500 m (see IEEE 802.3 Clause 140)
+ */
+#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b
+/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c
+/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See
+ * IEEE 802.3 Clause 136
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 137
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20
+/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21
+/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified
+ * in Clause 122
+ */
+#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22
+/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3
+ * Clause 121
+ */
+#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23
+/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as
+ * specified in Clause 136
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses
+ * 802.3 Clause 137, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25
+/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3
+ * Clause 138
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26
+/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 122
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28
+/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses
+ * IEEE 802.3 Clause 136, but the number PMD lanes is 8.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29
+/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck
+ * Clause 163.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a
+/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short
+ * reach fiber
+ */
+#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c
+/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE
+ * 802.3 in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d
+/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause
+ * 140
+ */
+#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e
+/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in
+ * Clause 163 IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f
+/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber
+ * IEEE 802.3ck
+ */
+#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31
+/* enum: Unknown source */
+#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32
+/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33
+/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3
+ * Clause 163 IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34
+/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE
+ * 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35
+/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE
+ * 802.3 Clause 151
+ */
+#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36
+/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in
+ * Clause 124
+ */
+#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37
+/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as
+ * specified in Clause 162 of IEEE 802.3ck.
+ */
+#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38
+/* enum: Automatic tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_AUTO 0x39
+/* enum: See IEEE 802.3cc-2017 Clause 114 */
+#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a
+/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s
+ * each) See IEEE 802.3ba-2010 Clause 85
+ */
+#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b
+/* enum: Invalid tech mode. The driver must not use this. */
+#define MC_CMD_ETH_TECH_NONE 0x7f
+#define MC_CMD_ETH_TECH_TECH_LBN 0
+#define MC_CMD_ETH_TECH_TECH_WIDTH 128
+
+/* MC_CMD_LINK_STATUS_FLAGS structuredef */
+#define MC_CMD_LINK_STATUS_FLAGS_LEN 8
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: bitshift */
+/* enum: Whether we have overall link up */
+#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0
+/* enum: If set, the PHY has no external RX link synchronisation */
+#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1
+/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage
+ * empty)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2
+/* enum: Set on error while decoding module data (e.g. module EEPROM does not
+ * contain valid values, has checksum errors, etc.)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3
+/* enum: Set when module unsupported (e.g. unsupported link rate or link
+ * technology)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4
+/* enum: Set on error while communicating with the module (e.g. I2C errors
+ * while reading EEPROM)
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5
+/* enum: Set on module overcurrent/overvoltage condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6
+/* enum: Set on module overtemperature condition */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7
+/* enum: If set, the module is indicating Loss of Signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8
+/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9
+/* enum: If set, PMA is indicating loss of analog signal */
+#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa
+/* enum: If set, PCS is indicating loss of block lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb
+/* enum: If set, PCS is indicating loss of alignment marker lock on one or more
+ * lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc
+/* enum: If set, PCS is indicating loss of overall alignment lock */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd
+/* enum: If set, PCS is indicating high bit error rate condition. */
+#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe
+/* enum: If set, FEC is indicating loss of FEC lock */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf
+/* enum: If set, indicates that the number of symbol errors in a 8192-codeword
+ * window has exceeded the threshold K (417).
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10
+/* enum: If set, the receiver has detected the local FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11
+/* enum: If set, the receiver has detected the remote FEC has degraded. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12
+/* enum: If set, the number of symbol errors is over an internal threshold. */
+#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13
+/* enum: If set, autonegotiation has detected an auto-negotiation capable link
+ * partner
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14
+/* enum: If set, autonegotiation base page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15
+/* enum: If set, autonegotiation next page exchange has failed */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16
+/* enum: If set, autonegotiation has failed to negotiate a common set of
+ * capabilities
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17
+/* enum: If set, local end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18
+/* enum: If set, remote end link training has failed to establish link training
+ * frame lock on one or more lanes
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19
+/* enum: If set, remote end has failed to assert Receiver Ready (link training
+ * success) within the designated timeout
+ */
+#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0
+#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64
+
+/* MC_CMD_PAUSE_MODE structuredef */
+#define MC_CMD_PAUSE_MODE_LEN 1
+#define MC_CMD_PAUSE_MODE_TYPE_OFST 0
+#define MC_CMD_PAUSE_MODE_TYPE_LEN 1
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0
+/* enum: See IEEE 802.3 Clause 73.6.6 */
+#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1
+#define MC_CMD_PAUSE_MODE_TYPE_LBN 0
+#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8
+
+/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73
+ * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This
+ * structure is used to define autonegotiable abilities (advertised, link
+ * partner and supported abilities).
+ */
+#define MC_CMD_ETH_AN_FIELDS_LEN 25
+/* Mask of Ethernet technologies. The bit indices in this mask are taken from
+ * the TECH field in the MC_CMD_ETH_TECH structure.
+ */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0
+#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128
+/* Mask of supported FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128
+#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32
+/* Mask of requested FEC modes */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160
+#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32
+/* Bitmask of negotiated pause modes */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192
+#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8
+
+/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new
+ * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not
+ * supported in other getlink/setlink commands.
+ */
+#define MC_CMD_LOOPBACK_V2_LEN 4
+#define MC_CMD_LOOPBACK_V2_MODE_OFST 0
+#define MC_CMD_LOOPBACK_V2_MODE_LEN 4
+/* enum: No loopback */
+#define MC_CMD_LOOPBACK_V2_NONE 0x0
+/* enum: Let firmware choose a supported loopback mode */
+#define MC_CMD_LOOPBACK_V2_AUTO 0x1
+/* enum: Loopback after the MAC */
+#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2
+/* enum: Loopback after the PCS */
+#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3
+/* enum: Loopback after the PMA */
+#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4
+/* enum: Loopback after the MDI Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5
+/* enum: Loopback after the PMA Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6
+/* enum: Loopback after the PCS Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7
+/* enum: Loopback after the MAC Wireside */
+#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8
+/* enum: Loopback after the MAC FIFOs (before the MAC) */
+#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9
+#define MC_CMD_LOOPBACK_V2_MODE_LBN 0
+#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32
+
+/* MC_CMD_FCNTL structuredef */
+#define MC_CMD_FCNTL_LEN 4
+#define MC_CMD_FCNTL_MASK_OFST 0
+#define MC_CMD_FCNTL_MASK_LEN 4
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto negotiate flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control. This is only supported on KSB. */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_FCNTL_MASK_LBN 0
+#define MC_CMD_FCNTL_MASK_WIDTH 32
+
+/* MC_CMD_LINK_FLAGS structuredef */
+#define MC_CMD_LINK_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_LINK_FLAGS bitmask.
+ */
+#define MC_CMD_LINK_FLAGS_MASK_OFST 0
+#define MC_CMD_LINK_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC
+ * mode are determined by advertised capabilities and requested FEC modes,
+ * combined with link partner capabilities. If AN is disabled, link technology
+ * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid
+ * if loopback is enabled
+ */
+#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0
+/* enum: Enable parallel detect. In addition to AN, try to sense partner forced
+ * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled.
+ */
+#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1
+/* enum: Force link down, in electrical idle. */
+#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2
+/* enum: Ignore the sequence number and always apply. */
+#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3
+#define MC_CMD_LINK_FLAGS_MASK_LBN 0
+#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_LINK_CTRL
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_LINK_CTRL 0x6b
+#undef MC_CMD_0x6b_PRIVILEGE_CTG
+
+#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_CTRL_IN msgrequest */
+#define MC_CMD_LINK_CTRL_IN_LEN 40
+/* Handle to the port to set link state for. */
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4
+/* Control flags */
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Reserved for future expansion, and included to provide padding for alignment
+ * purposes.
+ */
+#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64
+#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96
+#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32
+/* Technology abilities to advertise during auto-negotiation */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Pause abilities to advertise during auto-negotiation. Valid when auto-
+ * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to
+ * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must
+ * explicitly configure pause mode with MC_CMD_SET_MAC.
+ */
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32
+#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* When auto-negotiation is enabled, this is the FEC mode to request. Note that
+ * a weaker FEC mode may get negotiated, depending on what the link partner
+ * supports. The driver should subsequently use MC_CMD_GET_LINK to check the
+ * actual negotiated FEC mode. When auto-negotiation is disabled, this is the
+ * forced FEC mode.
+ */
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33
+#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* This is only to be used when auto-negotiation is disabled (forced speed or
+ * loopback mode). If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36
+#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* The sequence number of the last MODULECHANGE event. If this doesn't match,
+ * fail with EAGAIN.
+ */
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38
+#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1
+/* Loopback Mode. Only valid when auto-negotiation is disabled. */
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39
+#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+/* MC_CMD_LINK_CTRL_OUT msgresponse */
+#define MC_CMD_LINK_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE
+ */
+#define MC_CMD_LINK_STATE 0x6c
+#undef MC_CMD_0x6c_PRIVILEGE_CTG
+
+#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_LINK_STATE_IN msgrequest */
+#define MC_CMD_LINK_STATE_IN_LEN 4
+/* Handle to the port to get link state for. */
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_LINK_STATE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_OUT_LEN 114
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40
+#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72
+#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104
+#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with
+ * LOCAL_AN_SUPPORT
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LEN 120
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+
+/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit
+ * reporting of the link speed and duplex mode.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LEN 128
+/* Flags used to report the current configuration/state of the link. */
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32
+#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */
+/* Configured technology. If the specified value does not align with the values
+ * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Configured FEC mode */
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10
+#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* Bitmask of auto-negotiated pause modes */
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11
+#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_PAUSE_MODE/TYPE */
+/* Configured loopback mode */
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12
+#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* Abilities requested by the driver to advertise during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16
+#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32
+/* Abilities advertised by the link partner during auto-negotiation */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32
+/* Abilities supported by the local device (including cable abilities) For
+ * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80
+#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28
+/* Control flags */
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108
+#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LINK_FLAGS/MASK */
+/* Sequence number to synchronize link change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1
+/* Sequence number to synchronize module change events */
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113
+#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+/* Reports the auto-negotiation supported by the local device. This depends on
+ * the port and module properties.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116
+#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero. LINK_SPEED field is intended to be used by drivers without
+ * the most up-to-date MCDI definitions, unable to deduce the link speed from
+ * the reported LINK_TECHNOLOGY field.
+ */
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120
+#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0
+#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5694,6 +6064,54 @@
/* MC_CMD_GET_LINK_IN msgrequest */
#define MC_CMD_GET_LINK_IN_LEN 0
+/* MC_CMD_GET_LINK_IN_V2 msgrequest */
+#define MC_CMD_GET_LINK_IN_V2_LEN 8
+/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
/* Near-side advertised capabilities. Refer to
@@ -5745,6 +6163,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
@@ -5813,6 +6232,7 @@
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* enum property: value */
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
@@ -5969,6 +6389,95 @@
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+/* MC_CMD_SET_LINK_IN_V3 msgrequest */
+#define MC_CMD_SET_LINK_IN_V3_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4
+/* Flags */
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3
+#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7
+#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1
+/* Padding */
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17
+#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3
+/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -6034,17 +6543,17 @@
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
-#define MC_CMD_FCNTL_OFF 0x0
+/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
-#define MC_CMD_FCNTL_RESPOND 0x1
+/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
-#define MC_CMD_FCNTL_BIDIR 0x2
-/* enum: Auto neg flow control. */
-#define MC_CMD_FCNTL_AUTO 0x3
-/* enum: Priority flow control (eftest builds only). */
-#define MC_CMD_FCNTL_QBB 0x4
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto negotiate flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control. This is only supported on KSB. */
+/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
-#define MC_CMD_FCNTL_GENERATE 0x5
+/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
@@ -6086,9 +6595,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6155,9 +6664,9 @@
/* MC_CMD_FCNTL_RESPOND 0x1 */
/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-/* enum: Auto neg flow control. */
+/* enum: Auto negotiate flow control. */
/* MC_CMD_FCNTL_AUTO 0x3 */
-/* enum: Priority flow control (eftest builds only). */
+/* enum: Priority flow control. This is only supported on KSB. */
/* MC_CMD_FCNTL_QBB 0x4 */
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
@@ -6188,19 +6697,9 @@
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4
#define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1
-/* Identifies the MAC to update by the specifying the end of a logical MAE
- * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the
- * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible
- * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all
- * circumstances. 1. Some will always work (e.g. a VF can always address its
- * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not
- * meaningful and will always fail with EINVAL (e.g. attempting to address the
- * VNIC end of a link to a physical port), 3. Some are meaningful but require
- * the MCDI client to have the required permission and fail with EPERM
- * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer),
- * and 4. Some could be implementation-specific and fail with ENOTSUP if not
- * available (no examples exist right now). See SF-123581-TC section 4.3 for
- * more details.
+/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
*/
#define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8
@@ -6212,6 +6711,7 @@
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288
#define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4
#define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32
@@ -6405,6 +6905,98 @@
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+/* MC_CMD_MAC_STATS_V2_IN msgrequest */
+#define MC_CMD_MAC_STATS_V2_IN_LEN 28
+/* ??? */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32
+#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4
+/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192
+#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -6421,6 +7013,7 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+/* enum property: index */
#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
@@ -6583,6 +7176,7 @@
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum property: index */
/* enum: Start of FEC stats buffer space, Medford2 and up */
#define MC_CMD_MAC_FEC_DMABUF_START 0x61
/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
@@ -6622,6 +7216,7 @@
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum property: index */
/* enum: Start of CTPIO stats buffer space, Medford2 and up */
#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
@@ -6702,6 +7297,7 @@
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum property: index */
/* enum: Start of V4 stats buffer space */
#define MC_CMD_MAC_V4_DMABUF_START 0x79
/* enum: RXDP counter: Number of packets truncated because scattering was
@@ -6723,112 +7319,35 @@
/* Other enum values, see field(s): */
/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
-
-/***********************************/
-/* MC_CMD_SRIOV
- * to be documented
- */
-#define MC_CMD_SRIOV 0x30
-
-/* MC_CMD_SRIOV_IN msgrequest */
-#define MC_CMD_SRIOV_IN_LEN 12
-#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
-#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
-#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
-#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
-#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
-#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
-
-/* MC_CMD_SRIOV_OUT msgresponse */
-#define MC_CMD_SRIOV_OUT_LEN 8
-#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
-#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
-#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
-
-/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
-/* this is only used for the first record */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
-#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MEMCPY
- * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
- * embedded directly in the command.
- *
- * A common pattern is for a client to use generation counts to signal a dma
- * update of a datastructure. To facilitate this, this MCDI operation can
- * contain multiple requests which are executed in strict order. Requests take
- * the form of duplicating the entire MCDI request continuously (including the
- * requests record, which is ignored in all but the first structure)
- *
- * The source data can either come from a DMA from the host, or it can be
- * embedded within the request directly, thereby eliminating a DMA read. To
- * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
- * ADDR_LO=offset, and inserts the data at %offset from the start of the
- * payload. It's the callers responsibility to ensure that the embedded data
- * doesn't overlap the records.
- *
- * Returns: 0, EINVAL (invalid RID)
- */
-#define MC_CMD_MEMCPY 0x31
-
-/* MC_CMD_MEMCPY_IN msgrequest */
-#define MC_CMD_MEMCPY_IN_LENMIN 32
-#define MC_CMD_MEMCPY_IN_LENMAX 224
-#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
-#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
-#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
-/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
-#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
-#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
-#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
-#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
-
-/* MC_CMD_MEMCPY_OUT msgresponse */
-#define MC_CMD_MEMCPY_OUT_LEN 0
+/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3)
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32
+#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5
+/* enum property: index */
+/* enum: Start of V5 stats buffer space */
+#define MC_CMD_MAC_V5_DMABUF_START 0x7c
+/* enum: Link toggle counter: Number of times the link has toggled between
+ * up/down and down/up
+ */
+#define MC_CMD_MAC_LINK_TOGGLES 0x7c
+/* enum: This includes the space at offset 125 which is the final
+ * GENERATION_END in a MAC_STATS_V5 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V5 0x7e
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -6984,6 +7503,7 @@
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+/* enum property: bitmask */
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6992,23 +7512,6 @@
/***********************************/
-/* MC_CMD_SET_MCAST_HASH
- * Set the MCAST hash value without otherwise reconfiguring the MAC
- */
-#define MC_CMD_SET_MCAST_HASH 0x35
-
-/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
-#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
-#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
-#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
-
-/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
-#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_NVRAM_TYPES
* Return bitfield indicating available types of virtual NVRAM partitions.
* Locks required: none. Returns: 0
@@ -7026,6 +7529,7 @@
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum property: bitshift */
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -7152,6 +7656,12 @@
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9
+#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
@@ -7499,6 +8009,128 @@
#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
/* enum: The update operation is in-progress. */
#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+/* enum: The update was an invalid user configuration file. */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */
+/* enum: Verify succeeded without any errors. */
+/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */
+/* enum: CMS format verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */
+/* enum: Invalid CMS format in image metadata. */
+/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */
+/* enum: Message digest verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */
+/* enum: Signature verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */
+/* enum: There are no valid signatures in the image. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */
+/* enum: Trusted approvers verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */
+/* enum: The Trusted approver's list is empty. */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */
+/* enum: Signature chain verification failed due to an internal error. */
+/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */
+/* enum: The image has a lower security level than the current firmware. */
+/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */
+/* enum: Internal-error. The bundle header is invalid. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */
+/* enum: Internal-error. Component hash calculation failed. */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */
+/* enum: The update operation is in-progress. */
+/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */
+/* enum: The update was an invalid user configuration file. */
+/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */
+/* enum: The write was to the AUTO partition but the data was not recognised as
+ * a valid partition.
+ */
+/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */
+/* If the update was a user configuration, what action(s) the user must take to
+ * apply the new configuration.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4
+/* enum: No action required. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0
+/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1
+/* enum: The host must be rebooted. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2
+/* enum: The firmware and host must be rebooted (in either order). */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3
+/* enum: The host must be fully powered off. */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4
+/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null-
+ * terminated US-ASCII string suitable for showing to the user.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80
/***********************************/
@@ -7522,7 +8154,7 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
@@ -7535,65 +8167,6 @@
/***********************************/
-/* MC_CMD_SCHEDINFO
- * Request scheduler info. Locks required: NONE. Returns: An array of
- * (timeslice,maximum overrun), one for each thread, in ascending order of
- * thread address.
- */
-#define MC_CMD_SCHEDINFO 0x3e
-#undef MC_CMD_0x3e_PRIVILEGE_CTG
-
-#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SCHEDINFO_IN msgrequest */
-#define MC_CMD_SCHEDINFO_IN_LEN 0
-
-/* MC_CMD_SCHEDINFO_OUT msgresponse */
-#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
-#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
-#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
-#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
-#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
-#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_REBOOT_MODE
- * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
- * mode to the specified value. Returns the old mode.
- */
-#define MC_CMD_REBOOT_MODE 0x3f
-#undef MC_CMD_0x3f_PRIVILEGE_CTG
-
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_REBOOT_MODE_IN msgrequest */
-#define MC_CMD_REBOOT_MODE_IN_LEN 4
-#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
-/* enum: Normal. */
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0
-/* enum: Power-on Reset. */
-#define MC_CMD_REBOOT_MODE_POR 0x2
-/* enum: Snapper. */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
-/* enum: snapper fake POR */
-#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
-#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
-#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
-#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
-
-/* MC_CMD_REBOOT_MODE_OUT msgresponse */
-#define MC_CMD_REBOOT_MODE_OUT_LEN 4
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
-
-
-/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
*
@@ -8061,6 +8634,54 @@
/* MC_CMD_GET_PHY_STATE_IN msgrequest */
#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details.
+ */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32
+#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
@@ -8072,22 +8693,6 @@
/***********************************/
-/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
- * disable 802.Qbb for a given priority.
- */
-#define MC_CMD_SETUP_8021QBB 0x44
-
-/* MC_CMD_SETUP_8021QBB_IN msgrequest */
-#define MC_CMD_SETUP_8021QBB_IN_LEN 32
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
-#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
-
-/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
-#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WOL_FILTER_GET
* Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
@@ -8106,133 +8711,6 @@
/***********************************/
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state. Locks required: None.
- * Returns: 0, ENOSYS
- */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
-#undef MC_CMD_0x46_PRIVILEGE_CTG
-
-#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
-
-/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
-#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state. Locks required:
- * None. Returns: 0, ENOSYS
- */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
-#undef MC_CMD_0x47_PRIVILEGE_CTG
-
-#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
-
-/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
-#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset. Locks required: None. Returns: 0.
- */
-#define MC_CMD_MAC_RESET_RESTORE 0x48
-
-/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
-#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
-
-/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
-#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TESTASSERT
- * Deliberately trigger an assert-detonation in the firmware for testing
- * purposes (i.e. to allow tests that the driver copes gracefully). Locks
- * required: None Returns: 0
- */
-#define MC_CMD_TESTASSERT 0x49
-#undef MC_CMD_0x49_PRIVILEGE_CTG
-
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_TESTASSERT_IN msgrequest */
-#define MC_CMD_TESTASSERT_IN_LEN 0
-
-/* MC_CMD_TESTASSERT_OUT msgresponse */
-#define MC_CMD_TESTASSERT_OUT_LEN 0
-
-/* MC_CMD_TESTASSERT_V2_IN msgrequest */
-#define MC_CMD_TESTASSERT_V2_IN_LEN 4
-/* How to provoke the assertion */
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
-#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
-/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
- * you're testing firmware, this is what you want.
- */
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
-/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
-/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
-/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
-/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
-/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
-
-/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
-#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_WORKAROUND
* Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
* understand the given workaround number - which should not be treated as a
@@ -8324,6 +8802,62 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16
+/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which
+ * identifies a real or virtual network port by MAE port and link end. See the
+ * structure definition for more details
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32
+/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32
+
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
@@ -8348,7 +8882,7 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
@@ -8370,103 +8904,6 @@
/***********************************/
-/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
- * I2C I/O expander bits are always read; if equaliser parameters are supplied,
- * they are configured first. Locks required: None. Return code: 0, EINVAL.
- */
-#define MC_CMD_MRSFP_TWEAK 0x4d
-
-/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
-/* 0-6 low->high de-emph. */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
-/* 0-8 0-8 low->high boost */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
-/* 0-8 low->high ref.V */
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
-#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
-
-/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
-#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
-
-/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
-#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
-/* input bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
-/* output bits */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
-/* direction */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
-/* enum: Out. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
-/* enum: In. */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
-
-
-/***********************************/
-/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
- * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
- * of range.
- */
-#define MC_CMD_SENSOR_SET_LIMS 0x4e
-#undef MC_CMD_0x4e_PRIVILEGE_CTG
-
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
-#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
-#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
-/* interpretation is is sensor-specific. */
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
-#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
-
-/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
-#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RESOURCE_LIMITS
- */
-#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
-
-/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
-#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
-
-/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
-#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
-
-
-/***********************************/
/* MC_CMD_NVRAM_PARTITIONS
* Reads the list of available virtual NVRAM partition types. Locks required:
* none. Returns: 0, EINVAL (bad type).
@@ -8582,806 +9019,6 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
-
-/***********************************/
-/* MC_CMD_CLP
- * Perform a CLP related operation, see SF-110495-PS for details of CLP
- * processing. This command has been extended to accomodate the requirements of
- * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
- * SF-120509-TC and SF-117282-PS.
- */
-#define MC_CMD_CLP 0x56
-#undef MC_CMD_0x56_PRIVILEGE_CTG
-
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_CLP_IN msgrequest */
-#define MC_CMD_CLP_IN_LEN 4
-/* Sub operation */
-#define MC_CMD_CLP_IN_OP_OFST 0
-#define MC_CMD_CLP_IN_OP_LEN 4
-/* enum: Return to factory default settings */
-#define MC_CMD_CLP_OP_DEFAULT 0x1
-/* enum: Set MAC address */
-#define MC_CMD_CLP_OP_SET_MAC 0x2
-/* enum: Get MAC address */
-#define MC_CMD_CLP_OP_GET_MAC 0x3
-/* enum: Set UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_SET_BOOT 0x4
-/* enum: Get UEFI/GPXE boot mode */
-#define MC_CMD_CLP_OP_GET_BOOT 0x5
-
-/* MC_CMD_CLP_OUT msgresponse */
-#define MC_CMD_CLP_OUT_LEN 0
-
-/* MC_CMD_CLP_IN_DEFAULT msgrequest */
-#define MC_CMD_CLP_IN_DEFAULT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
-#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_LEN 12
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
-
-/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
- * restores the permanent (factory-programmed) MAC address associated with the
- * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
- */
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
-#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
-#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
-
-/* MC_CMD_CLP_IN_GET_MAC msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
-#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
-#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
-
-/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
-#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
-/* MAC address assigned to port */
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
-#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
-#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
-
-/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-/* Boot flag */
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
-#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
-
-/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
-
-/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
-#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
-/* MC_CMD_CLP_IN_OP_OFST 0 */
-/* MC_CMD_CLP_IN_OP_LEN 4 */
-
-/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
-#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
-/* Boot flag */
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
-#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
-/* Padding */
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
-#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
-
-
-/***********************************/
-/* MC_CMD_MUM
- * Perform a MUM operation
- */
-#define MC_CMD_MUM 0x57
-#undef MC_CMD_0x57_PRIVILEGE_CTG
-
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_MUM_IN msgrequest */
-#define MC_CMD_MUM_IN_LEN 4
-#define MC_CMD_MUM_IN_OP_HDR_OFST 0
-#define MC_CMD_MUM_IN_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_OP_OFST 0
-#define MC_CMD_MUM_IN_OP_LBN 0
-#define MC_CMD_MUM_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to MUM */
-#define MC_CMD_MUM_OP_NULL 0x1
-/* enum: Get MUM version */
-#define MC_CMD_MUM_OP_GET_VERSION 0x2
-/* enum: Issue raw I2C command to MUM */
-#define MC_CMD_MUM_OP_RAW_CMD 0x3
-/* enum: Read from registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_READ 0x4
-/* enum: Write to registers on devices connected to MUM. */
-#define MC_CMD_MUM_OP_WRITE 0x5
-/* enum: Control UART logging. */
-#define MC_CMD_MUM_OP_LOG 0x6
-/* enum: Operations on MUM GPIO lines */
-#define MC_CMD_MUM_OP_GPIO 0x7
-/* enum: Get sensor readings from MUM */
-#define MC_CMD_MUM_OP_READ_SENSORS 0x8
-/* enum: Initiate clock programming on the MUM */
-#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
-/* enum: Initiate FPGA load from flash on the MUM */
-#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
-/* enum: Request sensor reading from MUM ADC resulting from earlier request via
- * MUM ATB
- */
-#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
-/* enum: Send commands relating to the QSFP ports via the MUM for PHY
- * operations
- */
-#define MC_CMD_MUM_OP_QSFP 0xc
-/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
- * level) from MUM
- */
-#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
-
-/* MC_CMD_MUM_IN_NULL msgrequest */
-#define MC_CMD_MUM_IN_NULL_LEN 4
-/* MUM cmd header */
-#define MC_CMD_MUM_IN_CMD_OFST 0
-#define MC_CMD_MUM_IN_CMD_LEN 4
-
-/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
-#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_READ_LEN 16
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to read from registers of */
-#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE 0x1
-/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
-#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
-/* 32-bit address to read from */
-#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
-#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
-/* Number of words to read. */
-#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
-#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
-
-/* MC_CMD_MUM_IN_WRITE msgrequest */
-#define MC_CMD_MUM_IN_WRITE_LENMIN 16
-#define MC_CMD_MUM_IN_WRITE_LENMAX 252
-#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
-#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* ID of (device connected to MUM) to write to registers of */
-#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
-#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
-/* enum: Hittite HMC1035 clock generator on Sorrento board */
-/* MC_CMD_MUM_DEV_HITTITE 0x1 */
-/* 32-bit address to write to */
-#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
-#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
-/* Words to write */
-#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
-#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
-#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
-
-/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
-#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MUM I2C cmd code */
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
-#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
-/* Number of bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
-/* Number of bytes to read */
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
-#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
-/* Bytes to write */
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
-#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_MUM_IN_LOG msgrequest */
-#define MC_CMD_MUM_IN_LOG_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_LEN 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
-
-/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
-#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
-/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
-/* Enable/disable debug output to UART */
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
-#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO msgrequest */
-#define MC_CMD_MUM_IN_GPIO_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
-
-/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
-/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
-/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
-#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
-#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
-
-/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
-#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
-
-/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
-#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
-#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
-#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
-
-/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Bit-mask of clocks to be programmed */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
-#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
-#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
-/* Control flags for clock programming */
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
-#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
-
-/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
-#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-/* Enable/Disable FPGA config from flash */
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
-#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
-
-/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
-#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_IN_QSFP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_LEN 12
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
-#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
-#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
-#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
-#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
-#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
-#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
-#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
-#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
-
-/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
-#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
-/* MUM cmd header */
-/* MC_CMD_MUM_IN_CMD_OFST 0 */
-/* MC_CMD_MUM_IN_CMD_LEN 4 */
-
-/* MC_CMD_MUM_OUT msgresponse */
-#define MC_CMD_MUM_OUT_LEN 0
-
-/* MC_CMD_MUM_OUT_NULL msgresponse */
-#define MC_CMD_MUM_OUT_NULL_LEN 0
-
-/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
-#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64
-#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32
-
-/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
-#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
-/* returned data */
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
-#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
-
-/* MC_CMD_MUM_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_READ_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
-#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
-#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
-
-/* MC_CMD_MUM_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG msgresponse */
-#define MC_CMD_MUM_OUT_LOG_LEN 0
-
-/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
-#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
-/* The first 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO IN register. */
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
-/* The first 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
-/* The second 32-bit word read from the GPIO OUT register. */
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
-#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
-
-/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
-#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
-#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
-#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
-#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
-#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
-
-/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
-#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
-
-/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
-#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
-
-/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
-#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
-
-/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
-#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
-
-/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
-/* in bytes */
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
-#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
-
-/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
-#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
-
-/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
-#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
-
-/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
-/* Discrete (soldered) DDR resistor strap info */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
-/* Number of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
-/* Array of SODIMM info records */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
-/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
-/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
-/* enum: Total number of SODIMM banks */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
-/* enum: Values 5-15 are reserved for future usage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
-/* enum: No module present */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
-/* enum: Module present supported and powered on */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
-/* enum: Module present but bad type */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
-/* enum: Module present but incompatible voltage */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
-/* enum: Module present but unknown SPD */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
-/* enum: Module present but slot cannot support it */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
-/* enum: Modules may or may not be present, but cannot establish contact by I2C
- */
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
-#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
-
/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
* should match the equivalent structure in the sensor_query SPHINX service.
*/
@@ -9500,27 +9137,22 @@
* and a generation count for this version of the sensor table. On systems
* advertising the DYNAMIC_SENSORS capability bit, this replaces the
* MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
- * added by the NMC.
- *
- * Sensor handles are persistent for the lifetime of the sensor and are used to
- * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
- * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
- *
- * The generation count is maintained by the MC, is persistent across reboots
- * and will be incremented each time the sensor table is modified. When the
- * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
- * containing the new generation count. The driver should compare this against
- * the current generation count, and if it is different, call
- * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
- *
- * The sensor count is provided to allow a future path to supporting more than
+ * added by the NMC. Sensor handles are persistent for the lifetime of the
+ * sensor and are used to identify sensors in
+ * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the
+ * MC, is persistent across reboots and will be incremented each time the
+ * sensor table is modified. When the table is modified, a
+ * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new
+ * generation count. The driver should compare this against the current
+ * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST
+ * again to update it's copy of the sensor table. The sensor count is provided
+ * to allow a future path to supporting more than
* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
* the maximum number that will fit in a single response. As this is a fairly
* large number (253) it is not anticipated that this will be needed in the
- * near future, so can currently be ignored.
- *
- * On Riverhead this command is implemented as a wrapper for `list` in the
- * sensor_query SPHINX service.
+ * near future, so can currently be ignored. On Riverhead this command is
+ * implemented as a wrapper for `list` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
#undef MC_CMD_0x66_PRIVILEGE_CTG
@@ -9557,15 +9189,13 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
* Get descriptions for a set of sensors, specified as an array of sensor
- * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for
- * `get_descriptions` in the sensor_query SPHINX service.
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not
+ * correspond to a sensor currently managed by the MC will be dropped from from
+ * the response. This may happen when a sensor table update is in progress, and
+ * effectively means the set of usable sensors is the intersection between the
+ * sets of sensors known to the driver and the MC. On Riverhead this command is
+ * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX
+ * service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
#undef MC_CMD_0x67_PRIVILEGE_CTG
@@ -9602,19 +9232,15 @@
/***********************************/
/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
* Read the state and value for a set of sensors, specified as an array of
- * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
- *
- * In the case of a broken sensor, then the state of the response's
- * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
- * provided should be treated as erroneous.
- *
- * Any handles which do not correspond to a sensor currently managed by the MC
- * will be dropped from from the response. This may happen when a sensor table
- * update is in progress, and effectively means the set of usable sensors is
- * the intersection between the sets of sensors known to the driver and the MC.
- *
- * On Riverhead this command is implemented as a wrapper for `get_readings`
- * in the sensor_query SPHINX service.
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a
+ * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE
+ * entry will be set to BROKEN, and any value provided should be treated as
+ * erroneous. Any handles which do not correspond to a sensor currently managed
+ * by the MC will be dropped from from the response. This may happen when a
+ * sensor table update is in progress, and effectively means the set of usable
+ * sensors is the intersection between the sets of sensors known to the driver
+ * and the MC. On Riverhead this command is implemented as a wrapper for
+ * `get_readings` in the sensor_query SPHINX service.
*/
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
#undef MC_CMD_0x68_PRIVILEGE_CTG
@@ -9647,45 +9273,1286 @@
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+/* MC_CMD_MAC_FLAGS structuredef */
+#define MC_CMD_MAC_FLAGS_LEN 4
+/* The enums defined in this field are used as indices into the
+ * MC_CMD_MAC_FLAGS bitmask.
+ */
+#define MC_CMD_MAC_FLAGS_MASK_OFST 0
+#define MC_CMD_MAC_FLAGS_MASK_LEN 4
+/* enum property: bitshift */
+/* enum: Include the FCS in the packet data delivered to the host. Ignored if
+ * RX_INCLUDE_FCS not set in capabilities.
+ */
+#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0
+#define MC_CMD_MAC_FLAGS_MASK_LBN 0
+#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32
+
+/* MC_CMD_TRANSMISSION_MODE structuredef */
+#define MC_CMD_TRANSMISSION_MODE_LEN 4
+#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4
+/* enum property: value */
+#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */
+#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0
+#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32
+
+/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */
+#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4
+/* enum property: bitmask */
+/* enum: Configure the MAC address. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0
+/* enum: Configure the maximum frame length. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1
+/* enum: Configure flow control. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2
+/* enum: Configure the transmission mode. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3
+/* enum: Configure FCS. */
+#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0
+#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MAC_CTRL
+ * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP
+ */
+#define MC_CMD_MAC_CTRL 0x1df
+#undef MC_CMD_0x1df_PRIVILEGE_CTG
+
+#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_CTRL_IN msgrequest */
+#define MC_CMD_MAC_CTRL_IN_LEN 32
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+
+/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */
+#define MC_CMD_MAC_CTRL_IN_V2_LEN 33
+/* Handle for selected network port. */
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4
+#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96
+#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32
+/* Includes the ethernet header, optional VLAN tags, payload and FCS. */
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16
+#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4
+/* Settings for flow control. */
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20
+#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast
+ * mode.
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* Flags to control and expand the configuration of the MAC. */
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28
+#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest
+ * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is
+ * set and FCNTL is QBB
+ */
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32
+#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1
+/* enum property: bitmask */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */
+#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */
+
+/* MC_CMD_MAC_CTRL_OUT msgresponse */
+#define MC_CMD_MAC_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_STATE
+ * Read the MAC state. Return code: 0, ETIME.
+ */
+#define MC_CMD_MAC_STATE 0x1e0
+#undef MC_CMD_0x1e0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_MAC_STATE_IN msgrequest */
+#define MC_CMD_MAC_STATE_IN_LEN 4
+/* Handle for selected network port. */
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_MAC_STATE_OUT msgresponse */
+#define MC_CMD_MAC_STATE_OUT_LEN 32
+/* The configured maximum frame length of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0
+#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4
+/* This returns the negotiated flow control value. */
+#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4
+#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_FCNTL/MASK */
+/* MAC address of the device. */
+#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32
+/* Flags indicating MAC faults. */
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16
+#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* enum: Indicates a local MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0
+/* enum: Indicates a remote MAC fault. */
+#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1
+/* enum: Indicates a pending reconfiguration of the MAC. */
+#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2
+/* The flags that were used to configure the MAC. This is a copy of the FLAGS
+ * field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20
+#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_FLAGS/MASK */
+/* The transmission mode that was used to configure the MAC. This is a copy of
+ * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24
+#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4
+/* enum property: value */
+/* Enum values, see field(s): */
+/* MC_CMD_TRANSMISSION_MODE/MASK */
+/* The control flags that were used to configure the MAC. This is a copy of the
+ * CONTROL field in the MC_CMD_MAC_CTRL_IN command.
+ */
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28
+#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */
+
+
+/***********************************/
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE
+ * Obtain a handle that can be operated on to configure and query the status of
+ * the link. ENOENT is returned when no port is assigned to the client. Return
+ * code: 0, ENOENT
+ */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2
+#undef MC_CMD_0x1e2_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0
+
+/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4
+/* Handle for assigned port. */
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4
+
+/* MC_CMD_STAT_ID structuredef */
+#define MC_CMD_STAT_ID_LEN 4
+#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2
+/* enum property: index */
+/* enum: Internal markers (generation start and end markers) */
+#define MC_CMD_STAT_ID_MARKER 0x1
+/* enum: Network port MAC statistics. */
+#define MC_CMD_STAT_ID_MAC 0x2
+/* enum: Network port PHY statistics. */
+#define MC_CMD_STAT_ID_PHY 0x3
+#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: This value is used to mark the start of a generation of statistics for
+ * DMA synchronization. It is incremented whenever a new set of statistics is
+ * transferred. Always the first entry in the DMA buffer.
+ */
+#define MC_CMD_STAT_ID_GENERATION_START 0x1
+/* enum: This value is used to mark the end of a generation of statistics for
+ * DMA synchronizaion. Always the last entry in the DMA buffer and set to the
+ * same value as GENERATION_START. The host driver must compare the
+ * GENERATION_START and GENERATION_END values to verify that the DMA buffer is
+ * consistent upon copying the the DMA buffer. If they do not match, it means
+ * that new DMA transfer has started while the host driver was copying the DMA
+ * buffer. In this case, the host driver must repeat the copy operation.
+ */
+#define MC_CMD_STAT_ID_GENERATION_END 0x2
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Total number of packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_PKTS 0x1
+/* enum: Pause frames transmitted. */
+#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2
+/* enum: Control frames transmitted. */
+#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3
+/* enum: Unicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4
+/* enum: Multicast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5
+/* enum: Broadcast packets transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6
+/* enum: Bytes transmitted (includes pause frames). */
+#define MC_CMD_STAT_ID_TX_BYTES 0x7
+/* enum: Bytes transmitted with bad CRC. */
+#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8
+/* enum: Bytes transmitted with good CRC. */
+#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9
+/* enum: Packets transmitted with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa
+/* enum: Packets transmitted with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_TX_64_PKTS 0xb
+/* enum: Packets transmitted with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc
+/* enum: Packets transmitted with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd
+/* enum: Packets transmitted with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe
+/* enum: Packets transmitted with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf
+/* enum: Packets transmitted with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10
+/* enum: Packets transmitted with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11
+/* enum: Packets transmitted with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12
+/* enum: Packets transmitted with bad FCS. */
+#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13
+/* enum: Packets transmitted with good FCS. */
+#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14
+/* enum: Packets received. */
+#define MC_CMD_STAT_ID_RX_PKTS 0x15
+/* enum: Pause frames received. */
+#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16
+/* enum: Total number of good packets received. */
+#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17
+/* enum: Total number of BAD packets received. */
+#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18
+/* enum: Total number of control frames received. */
+#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19
+/* enum: Total number of unicast packets received. */
+#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a
+/* enum: Total number of multicast packets received. */
+#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b
+/* enum: Total number of broadcast packets received. */
+#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c
+/* enum: Total number of bytes received. */
+#define MC_CMD_STAT_ID_RX_BYTES 0x1d
+/* enum: Total number of bytes received with bad CRC. */
+#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e
+/* enum: Total number of bytes received with GOOD CRC. */
+#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f
+/* enum: Packets received with length equal to 64 bytes. */
+#define MC_CMD_STAT_ID_RX_64_PKTS 0x20
+/* enum: Packets received with length between 65 and 127 bytes. */
+#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21
+/* enum: Packets received with length between 128 and 255 bytes. */
+#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22
+/* enum: Packets received with length between 256 and 511 bytes. */
+#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23
+/* enum: Packets received with length between 512 and 1023 bytes. */
+#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24
+/* enum: Packets received with length between 1024 and 1518 bytes. */
+#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25
+/* enum: Packets received with length between 1519 and 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26
+/* enum: Packets received with length greater than 9216 bytes. */
+#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27
+/* enum: Packets received with length less than 64 bytes. */
+#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28
+/* enum: Packets received with bad FCS. */
+#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29
+/* enum: Packets received with GOOD FCS. */
+#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a
+/* enum: Packets received with overflow. */
+#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b
+/* enum: Packets received with symbol error. */
+#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c
+/* enum: Packets received with alignment error. */
+#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d
+/* enum: Packets received with length error. */
+#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e
+/* enum: Packets received with internal error. */
+#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f
+/* enum: Packets received with jabber. These packets are larger than the
+ * allowed maximum receive unit (MRU). This indicates that a packet either has
+ * a bad CRC or has an RX error.
+ */
+#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30
+/* enum: Packets dropped due to having no descriptor. This is a datapath stat
+ */
+#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31
+/* enum: Packets received with lanes 0 and 1 character error. */
+#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32
+/* enum: Packets received with lanes 2 and 3 character error. */
+#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33
+/* enum: Packets received with lanes 0 and 1 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34
+/* enum: Packets received with lanes 2 and 3 disparity error. */
+#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35
+/* enum: Packets received with match fault. */
+#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36
+#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16
+/* Include FEC stats. */
+#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2
+/* enum property: index */
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1
+/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2
+ * onwards)
+ */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6
+#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16
+
+/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of
+ * the stats DMA buffer descriptor.
+ */
+#define MC_CMD_STAT_DESC_LEN 8
+/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_LEN 4
+#define MC_CMD_STAT_DESC_STAT_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32
+/* See structuredef: MC_CMD_STAT_ID */
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0
+#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16
+#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16
+/* Index of the statistic in the DMA buffer. */
+#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4
+#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2
+#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32
+#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16
+/* Reserved for future extension (e.g. flags field) - currently always 0. */
+#define MC_CMD_STAT_DESC_RESERVED_OFST 6
+#define MC_CMD_STAT_DESC_RESERVED_LEN 2
+#define MC_CMD_STAT_DESC_RESERVED_LBN 48
+#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR
+ * Get a list of descriptors that describe the layout and size of the stats
+ * buffer required for retrieving statistics for a given port. Each entry in
+ * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat
+ * and its location and size in the buffer. It also gives the overall minimum
+ * size of the DMA buffer required when DMA mode is used. Note that the first
+ * and last entries in the list are reserved for the generation start
+ * (MC_CMD_MARKER_STAT_GENERATION_START) and end
+ * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA
+ * synchronisation as described in the documentation for the relevant enum
+ * entries. The entries are present in the buffer even if DMA mode is not used.
+ * Provisions are made (but currently unused) for extending the size of the
+ * descriptors, extending the size of the list beyond the maximum MCDI response
+ * size, as well as the dynamic runtime updates of the list. Returns: 0 on
+ * success, ENOENT on non-existent port handle
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3
+#undef MC_CMD_0x1e3_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8
+/* Handle of port to get MAC statitstics descriptors for. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4
+/* Offset of the first entry to return, for cases where not all entries fit in
+ * the MCDI response. Should be set to 0 on initial request, and on subsequent
+ * requests updated by the number of entries already returned, as long as the
+ * MORE_ENTRIES flag is set.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num))
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8)
+/* Generation number of the stats buffer. This is incremented each time the
+ * buffer is updated, and is used to verify the consistency of the buffer
+ * contents. Reserved for future extension (dynamic list updates). Currently
+ * always set to 0.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4
+/* Minimum size of the DMA buffer required to retrieve all statistics for the
+ * port. This is the sum of the sizes of all the statistics, plus the size of
+ * the generation markers. Minimum buffer size in bytes required to fit all
+ * statistics. Drivers will typically round up this value to the granularity of
+ * the host DMA allocation units.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1
+/* Size of the individual descriptor entry in the list. Determines the entry
+ * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger
+ * values can be used in the future for extending the descriptor, by appending
+ * new data to the end of the existing structure.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4
+/* Number of entries returned in the descriptor list. */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4
+/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and
+ * provides the ID of each stat and its location and size in the buffer. The
+ * first and last entries are reserved for the generation start and end markers
+ * respectively.
+ */
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29
+#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125
+
+
+/***********************************/
+/* MC_CMD_MAC_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_MAC_STATISTICS 0x1e4
+#undef MC_CMD_0x1e4_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATISTICS_IN msgrequest */
+#define MC_CMD_MAC_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16
+/* This is the address of the DMA buffer to use for transfer of the statistics.
+ * Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* This is the length of the DMA buffer to use for the transfer of the
+ * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned
+ * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small,
+ * the command will fail with EINVAL. Only valid if the DMA flag is set to 1.
+ */
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_MAC_STATISTICS_OUT msgresponse */
+#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252
+#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0
+#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248
+#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port
+ * handle and attributes used, for example, in MC_CMD_ENUM_PORTS.
+ */
+#define NET_PORT_HANDLE_DESC_LEN 53
+/* The handle to identify the port */
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32
+/* Includes the type of port e.g. physical, virtual or MAE MPORT and other
+ * properties relevant to the port.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0
+#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3
+#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */
+#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */
+#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8
+#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32
+#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64
+/* The dynamic change that led to the port enumeration */
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1
+/* enum: Indicates that the ENTRY_SRC field has not been initialized. */
+#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0
+/* enum: The port was enumerated at start of day. */
+#define NET_PORT_HANDLE_DESC_PRESENT 0x1
+/* enum: The port was dynamically added. */
+#define NET_PORT_HANDLE_DESC_ADDED 0x2
+/* enum: The port was dynamically deleted. */
+#define NET_PORT_HANDLE_DESC_DELETED 0x3
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96
+#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8
+/* This is an opaque 40 byte label exposed to users as a unique identifier of
+ * the port. It is represented as a zero-terminated ASCII string and assigned
+ * by the port administrator which is typically either the firmware for a
+ * physical port or the host software responsible for creating the virtual
+ * port. The label is conveyed to the driver after assignment, which, unlike
+ * the port administrator, does not need to know how to interpret the label.
+ */
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104
+#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_ENUM_PORTS
+ * This command returns handles for all ports present in the system. The PCIe
+ * function type of each port (either physical or virtual) is also reported.
+ * After a start-of-day port enumeration, firmware keeps track of all available
+ * ports upon creation or deletion and updates the ports if there is a change.
+ * This command is cleared after a control interface reset (e.g. FLR,
+ * ENTITY_RESET), in which case it must be called again to reenumerate the
+ * ports. The command is also clear-on-read and repeated calls will drain the
+ * buffer.
+ */
+#define MC_CMD_ENUM_PORTS 0x1e5
+#undef MC_CMD_0x1e5_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ENUM_PORTS_IN msgrequest */
+#define MC_CMD_ENUM_PORTS_IN_LEN 0
+
+/* MC_CMD_ENUM_PORTS_OUT msgresponse */
+#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252
+#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num))
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1)
+/* Any unused flags are reserved and must be ignored. */
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0
+#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1
+/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8
+#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4
+/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the
+ * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries.
+ * This may also allow for tail padding for alignment. Fields beyond
+ * SIZEOF_NET_PORT_HANDLE_DESC are not present.
+ */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008
+/* See structuredef: NET_PORT_HANDLE_DESC */
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25
+#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40
+
+
+/***********************************/
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES
+ * Read properties of the transceiver associated with the port. Can be either
+ * for a fixed onboard transceiver or an inserted module. The returned data is
+ * interpreted from the transceiver hardware and may be fixed up by the
+ * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6
+#undef MC_CMD_0x1e6_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4
+/* Handle to port to get transceiver properties from. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89
+/* Supported technology abilities. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_ETH_TECH/TECH */
+/* Reserved for future expansion to accommodate future Ethernet technology
+ * expansion.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16
+/* Preferred FEC modes. This is a function of the cable type and length. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+/* SFF-8042 code reported by the module. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2
+/* Medium. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1
+/* enum property: value */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */
+/* Identifies the tech */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1
+/* enum property: value */
+/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */
+/* enum: Ethernet over twisted-pair copper cables for distances up to 100
+ * meters.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1
+/* enum: Ethernet over twin-axial, balanced copper cable. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2
+/* enum: Ethernet over backplane for connections on the same board. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3
+/* enum: Ethernet over a single backplane lane for connections between
+ * different boards.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4
+/* enum: Ethernet over copper backplane. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5
+/* enum: Ethernet over fiber optic. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6
+/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause
+ * 49 and 52).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7
+/* enum: Long range, extended range or far reach ethernet used with single mode
+ * fiber optics.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8
+/* enum: Long reach multimode ethernet over multimode optical fiber. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9
+/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE
+ * 802.3db).
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa
+/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at
+ * least 500 meters (803.2 Clause 121 and 124)
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb
+/* String of the vendor name as intepreted by NMC firmware. NMC firmware
+ * applies workarounds for known buggy transceivers. The vendor name is
+ * presented as 16 bytes of ASCII characters padded with spaces. It can also be
+ * represented as 16 bytes of zeros if the field is unspecified for the
+ * connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16
+/* The vendor part number as intepreted by NMC firmware. The field is presented
+ * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of
+ * zeros if the field is unspecified for the connected module.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16
+/* Serial number of the module presented as 16 bytes of ASCII characters padded
+ * with spaces. It can also be 16 bytes of zeros if the field is unspecified
+ * for the connected module. See SFF-8472/CMIS specifications for details.
+ */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16
+/* This reports the number of module changes detected by the NMC firmware. */
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88
+#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1
+
+
+/***********************************/
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7
+#undef MC_CMD_0x1e7_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port
+ * consists of the MAC and the PHY, and excludes any modules inserted into the
+ * cage. This information is fixed for a given board but not for a given ASIC.
+ * This command reports properties for the port as it is currently configured,
+ * and not its hardware capabilities, which can be better than the current
+ * configuration.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4
+/* Handle to the port to from which to retreive properties */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25
+/* See structuredef: MC_CMD_ETH_AN_FIELDS */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4
+
+/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48
+/* Supported capabilities of the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25
+/* Number of lanes supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1
+/* Bitmask of supported loopback modes. Where the response to this command
+ * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in
+ * preference to ensure that all available loopback modes are seen.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+/* This field serves as a cage index that uniquely identifies the cage to which
+ * the module is connected. This is useful when splitter cables that have
+ * multiple ports on a single cage are used.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1
+/* This bitmask is used to specify the lanes within the cage identified by
+ * MDI_INDEX that are allocated to the port.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1
+/* Maximum frame length supported by the port in its current configuration. */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4
+/* Bitmask of supported loopback modes. This field replaces the
+ * LOOPBACK_MODES_MASK field which is defined under version 1 of this command.
+ */
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352
+#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* MC_CMD_LOOPBACK_V2/MODE */
+
+
+/***********************************/
+/* MC_CMD_GET_MODULE_DATA
+ * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information
+ * for SFP+ PHYs). This command returns raw data from the module's EEPROM and
+ * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to
+ * get interpreted data. Return code: 0, ENOENT
+ */
+#define MC_CMD_GET_MODULE_DATA 0x1e8
+#undef MC_CMD_0x1e8_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_MODULE_DATA_IN msgrequest */
+#define MC_CMD_GET_MODULE_DATA_IN_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4
+
+/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with
+ * 8-bit wide ADDRESSING field. This new field provides a correctly aligned
+ * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to
+ * ensure proper alignment.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16
+/* Handle to identify the port from which to request module properties. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4
+/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by
+ * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier
+ * access.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32
+#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7
+/* 0 if the page does not support banked access, non-zero otherwise. Non-zero
+ * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory
+ * region.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6
+#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2
+/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is
+ * valid if OFFSET is in the range 80h - ffh.
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8
+#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2
+/* Offset in the range 00h - 7fh to access lower memory. Offset in the range
+ * 80h - ffh to access upper memory
+ */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10
+#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12
+#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4
+/* Container for 7 bit I2C addresses. */
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0
+#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7
+
+/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252
+#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num))
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1)
+/* length of the data in bytes */
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248
+#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016
+
+/* EVENT_MASK structuredef */
+#define EVENT_MASK_LEN 4
+#define EVENT_MASK_TYPE_OFST 0
+#define EVENT_MASK_TYPE_LEN 4
+/* enum: PORT_LINKCHANGE event is enabled */
+#define EVENT_MASK_PORT_LINKCHANGE 0x0
+/* enum: PORT_MODULECHANGE event is enabled */
+#define EVENT_MASK_PORT_MODULECHANGE 0x1
+#define EVENT_MASK_TYPE_LBN 0
+#define EVENT_MASK_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9
+#undef MC_CMD_0x1e9_PRIVILEGE_CTG
+
+#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of
+ * specified network port events for a given port identified by PORT_HANDLE. At
+ * start of day, or after any control interface reset (FLR, ENTITY_RESET,
+ * etc.), all event delivery is disabled for all ports associated with the
+ * control interface.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8
+/* Handle to port to set event delivery mask. */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+/* Bitmask of events to enable. Event delivery is enabled when corresponding
+ * bit is 1, disabled when 0.
+ */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0
+
/***********************************/
-/* MC_CMD_EVENT_CTRL
- * Configure which categories of unsolicited events the driver expects to
- * receive (Riverhead).
- */
-#define MC_CMD_EVENT_CTRL 0x69
-#undef MC_CMD_0x69_PRIVILEGE_CTG
-
-#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVENT_CTRL_IN msgrequest */
-#define MC_CMD_EVENT_CTRL_IN_LENMIN 0
-#define MC_CMD_EVENT_CTRL_IN_LENMAX 252
-#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
-#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
-/* Array of event categories for which the driver wishes to receive events. */
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
-#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
-/* enum: Driver wishes to receive LINKCHANGE events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
-/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
- */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
-/* enum: Driver wishes to receive receive errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
-/* enum: Driver wishes to receive transmit errors. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
-/* enum: Driver wishes to receive firmware alerts. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
-/* enum: Driver wishes to receive reboot events. */
-#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
-
-/* MC_CMD_EVENT_CTRL_OUT msgrequest */
-#define MC_CMD_EVENT_CTRL_OUT_LEN 0
+/* MC_CMD_GET_NETPORT_EVENTS_MASK
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea
+#undef MC_CMD_0x1ea_PRIVILEGE_CTG
+
+#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a
+ * given port identified by PORT_HANDLE.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4
+/* Handle to port to get event deliver mask for. */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4
+
+/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb
+#undef MC_CMD_0x1eb_PRIVILEGE_CTG
+
+#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events
+ * supported by the platform. Information returned is fixed for a given NIC
+ * platform.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0
+
+/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4
+/* Bitmask of events enabled. Event delivery is enabled when corresponding bit
+ * is 1, disabled when 0.
+ */
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0
+#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4
+/* enum property: bitshift */
+/* Enum values, see field(s): */
+/* EVENT_MASK/TYPE */
+
+
+/***********************************/
+/* MC_CMD_GET_NETPORT_STATISTICS
+ * Get generic MAC statistics. This call retrieves unified statistics managed
+ * by the MC. The MC will populate and provide all supported statistics in the
+ * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the
+ * aforementioned command for the format and contents of the stats DMA buffer.
+ * To ensure consistent and accurate results, it is essential for the driver to
+ * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on
+ * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port
+ * handle, and EINVAL on invalid parameters (DMA buffer too small)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa
+#undef MC_CMD_0x1fa_PRIVILEGE_CTG
+
+#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20
+/* Handle of port to get MAC statistics for. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4
+/* Contains options for querying the MAC statistics. */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17
+/* Specifies the physical address of the DMA buffer to use for statistics
+ * transfer. This field must contain a valid address under either of these
+ * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both
+ * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32
+/* Specifies the length of the DMA buffer in bytes for statistics transfer. The
+ * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by
+ * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size
+ * will result in an EINVAL error. This field must contain a valid length under
+ * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2.
+ * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured)
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16
+#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4
+
+/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8)
+/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is
+ * an array of 8-byte counter values, containing the generation start marker,
+ * stats counters, and generation end marker. The index of each counter in the
+ * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout
+ * is used for the DMA buffer for DMA mode stats.
+ */
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31
+#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
@@ -9706,44 +10573,6 @@
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
-/* EVB_VLAN_TAG structuredef */
-#define EVB_VLAN_TAG_LEN 2
-/* The VLAN tag value */
-#define EVB_VLAN_TAG_VLAN_ID_LBN 0
-#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
-#define EVB_VLAN_TAG_MODE_LBN 12
-#define EVB_VLAN_TAG_MODE_WIDTH 4
-/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
-/* enum: Replace the VLAN if already present. */
-#define EVB_VLAN_TAG_REPLACE 0x1
-
-/* BUFTBL_ENTRY structuredef */
-#define BUFTBL_ENTRY_LEN 12
-/* the owner ID */
-#define BUFTBL_ENTRY_OID_OFST 0
-#define BUFTBL_ENTRY_OID_LEN 2
-#define BUFTBL_ENTRY_OID_LBN 0
-#define BUFTBL_ENTRY_OID_WIDTH 16
-/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
-#define BUFTBL_ENTRY_PGSZ_OFST 2
-#define BUFTBL_ENTRY_PGSZ_LEN 2
-#define BUFTBL_ENTRY_PGSZ_LBN 16
-#define BUFTBL_ENTRY_PGSZ_WIDTH 16
-/* the raw 64-bit address field from the SMC, not adjusted for page size */
-#define BUFTBL_ENTRY_RAWADDR_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LEN 8
-#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
-#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4
-#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64
-#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32
-#define BUFTBL_ENTRY_RAWADDR_LBN 32
-#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
-
/* NVRAM_PARTITION_TYPE structuredef */
#define NVRAM_PARTITION_TYPE_LEN 2
#define NVRAM_PARTITION_TYPE_ID_OFST 0
@@ -9787,6 +10616,8 @@
#define NVRAM_PARTITION_TYPE_NMC_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: RAM (volatile) log output partition */
+#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Crash log partition for NMC firmware */
@@ -9923,6 +10754,16 @@
#define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07
/* enum: System-on-Chip update information. */
#define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003
+/* enum: Virtual partition. Write-only. Writes will actually be sent to an
+ * appropriate partition (for instance BUNDLE if the data starts with the magic
+ * number for a bundle update), or discarded with an error if not recognised as
+ * a supported type.
+ */
+#define NVRAM_PARTITION_TYPE_AUTO 0x2100
+/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS
+ * and XN-202084-SW section 3.1).
+ */
+#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -9981,116 +10822,6 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
-/* LICENSED_FEATURES structuredef */
-#define LICENSED_FEATURES_LEN 8
-/* Bitmask of licensed firmware features */
-#define LICENSED_FEATURES_MASK_OFST 0
-#define LICENSED_FEATURES_MASK_LEN 8
-#define LICENSED_FEATURES_MASK_LO_OFST 0
-#define LICENSED_FEATURES_MASK_LO_LEN 4
-#define LICENSED_FEATURES_MASK_LO_LBN 0
-#define LICENSED_FEATURES_MASK_LO_WIDTH 32
-#define LICENSED_FEATURES_MASK_HI_OFST 4
-#define LICENSED_FEATURES_MASK_HI_LEN 4
-#define LICENSED_FEATURES_MASK_HI_LBN 32
-#define LICENSED_FEATURES_MASK_HI_WIDTH 32
-#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
-#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_PIO_OFST 0
-#define LICENSED_FEATURES_PIO_LBN 1
-#define LICENSED_FEATURES_PIO_WIDTH 1
-#define LICENSED_FEATURES_EVQ_TIMER_OFST 0
-#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
-#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
-#define LICENSED_FEATURES_CLOCK_OFST 0
-#define LICENSED_FEATURES_CLOCK_LBN 3
-#define LICENSED_FEATURES_CLOCK_WIDTH 1
-#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
-#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
-#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
-#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
-#define LICENSED_FEATURES_RX_SNIFF_OFST 0
-#define LICENSED_FEATURES_RX_SNIFF_LBN 6
-#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_TX_SNIFF_OFST 0
-#define LICENSED_FEATURES_TX_SNIFF_LBN 7
-#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
-#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
-#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
-#define LICENSED_FEATURES_MASK_LBN 0
-#define LICENSED_FEATURES_MASK_WIDTH 64
-
-/* LICENSED_V3_APPS structuredef */
-#define LICENSED_V3_APPS_LEN 8
-/* Bitmask of licensed applications */
-#define LICENSED_V3_APPS_MASK_OFST 0
-#define LICENSED_V3_APPS_MASK_LEN 8
-#define LICENSED_V3_APPS_MASK_LO_OFST 0
-#define LICENSED_V3_APPS_MASK_LO_LEN 4
-#define LICENSED_V3_APPS_MASK_LO_LBN 0
-#define LICENSED_V3_APPS_MASK_LO_WIDTH 32
-#define LICENSED_V3_APPS_MASK_HI_OFST 4
-#define LICENSED_V3_APPS_MASK_HI_LEN 4
-#define LICENSED_V3_APPS_MASK_HI_LBN 32
-#define LICENSED_V3_APPS_MASK_HI_WIDTH 32
-#define LICENSED_V3_APPS_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_ONLOAD_LBN 0
-#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_PTP_OFST 0
-#define LICENSED_V3_APPS_PTP_LBN 1
-#define LICENSED_V3_APPS_PTP_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
-#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
-#define LICENSED_V3_APPS_SOLARSECURE_OFST 0
-#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
-#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
-#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0
-#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
-#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
-#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
-#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
-#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0
-#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
-#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
-#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0
-#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
-#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
-#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
-#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
-#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
-#define LICENSED_V3_APPS_DSHBRD_OFST 0
-#define LICENSED_V3_APPS_DSHBRD_LBN 14
-#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
-#define LICENSED_V3_APPS_SCATRD_OFST 0
-#define LICENSED_V3_APPS_SCATRD_LBN 15
-#define LICENSED_V3_APPS_SCATRD_WIDTH 1
-#define LICENSED_V3_APPS_MASK_LBN 0
-#define LICENSED_V3_APPS_MASK_WIDTH 64
-
/* LICENSED_V3_FEATURES structuredef */
#define LICENSED_V3_FEATURES_LEN 8
/* Bitmask of licensed firmware features */
@@ -10199,44 +10930,6 @@
#define RSS_MODE_HASH_SELECTOR_LBN 0
#define RSS_MODE_HASH_SELECTOR_WIDTH 8
-/* CTPIO_STATS_MAP structuredef */
-#define CTPIO_STATS_MAP_LEN 4
-/* The (function relative) VI number */
-#define CTPIO_STATS_MAP_VI_OFST 0
-#define CTPIO_STATS_MAP_VI_LEN 2
-#define CTPIO_STATS_MAP_VI_LBN 0
-#define CTPIO_STATS_MAP_VI_WIDTH 16
-/* The target bucket for the VI */
-#define CTPIO_STATS_MAP_BUCKET_OFST 2
-#define CTPIO_STATS_MAP_BUCKET_LEN 2
-#define CTPIO_STATS_MAP_BUCKET_LBN 16
-#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_READ_REGS
- * Get a dump of the MCPU registers
- */
-#define MC_CMD_READ_REGS 0x50
-#undef MC_CMD_0x50_PRIVILEGE_CTG
-
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_REGS_IN msgrequest */
-#define MC_CMD_READ_REGS_IN_LEN 0
-
-/* MC_CMD_READ_REGS_OUT msgresponse */
-#define MC_CMD_READ_REGS_OUT_LEN 308
-/* Whether the corresponding register entry contains a valid value */
-#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
-#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
-/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
- * fir, fp)
- */
-#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
-#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
-#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
-
/***********************************/
/* MC_CMD_INIT_EVQ
@@ -10640,25 +11333,6 @@
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
#define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
-/* QUEUE_CRC_MODE structuredef */
-#define QUEUE_CRC_MODE_LEN 1
-#define QUEUE_CRC_MODE_MODE_LBN 0
-#define QUEUE_CRC_MODE_MODE_WIDTH 4
-/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
-/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
-/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
-/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
-/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
-/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
-#define QUEUE_CRC_MODE_SPARE_LBN 4
-#define QUEUE_CRC_MODE_SPARE_WIDTH 4
-
/***********************************/
/* MC_CMD_INIT_RXQ
@@ -10827,6 +11501,9 @@
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
@@ -10933,6 +11610,9 @@
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
@@ -11068,6 +11748,9 @@
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
@@ -11216,6 +11899,9 @@
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21
+#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
@@ -11610,320 +12296,6 @@
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
-/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
- * manage proxied requests
- */
-#define MC_PROXY_STATUS_BUFFER_LEN 16
-/* Handle allocated by the firmware for this proxy transaction */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
-/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
-#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
-/* The requesting physical function number */
-#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
-#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
-#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
-/* The requesting virtual function number. Set to VF_NULL if the target is a
- * PF.
- */
-#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
-#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
-#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
-#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
-/* The target function RID. */
-#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
-#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
-#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
-#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
-/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
-#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
-#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
-#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
-#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
-/* If a request is authorized rather than carried out by the host, this is the
- * elevated privilege mask granted to the requesting function.
- */
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
-#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_PROXY_CONFIGURE
- * Enable/disable authorization of MCDI requests from unprivileged functions by
- * a designated admin function
- */
-#define MC_CMD_PROXY_CONFIGURE 0x58
-#undef MC_CMD_0x58_PRIVILEGE_CTG
-
-#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
-
-/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REQUEST_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size REPLY_BLOCK_SIZE.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2 */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
-/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
- * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
- * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
- */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32
-/* Must be a power of 2, or zero if this buffer is not provided */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
-/* Applies to all three buffers */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
-/* A bit mask defining which MCDI operations may be proxied */
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
-#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
-
-/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
-#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PROXY_COMPLETE
- * Tells FW that a requested proxy operation has either been completed (by
- * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
- * function that enabled proxying/authorization (by using
- * MC_CMD_PROXY_CONFIGURE).
- */
-#define MC_CMD_PROXY_COMPLETE 0x5f
-#undef MC_CMD_0x5f_PRIVILEGE_CTG
-
-#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
-#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
-#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
-#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
-/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
- * is stored in the REPLY_BUFF.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
-/* enum: The operation has been authorized. The originating function may now
- * try again.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
-/* enum: The operation has been declined. */
-#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
-/* enum: The authorization failed because the relevant application did not
- * respond in time.
- */
-#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
-#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
-#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_ALLOC_BUFTBL_CHUNK
- * Allocate a set of buffer table entries using the specified owner ID. This
- * operation allocates the required buffer table entries (and fails if it
- * cannot do so). The buffer table entries will initially be zeroed.
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
-#undef MC_CMD_0x87_PRIVILEGE_CTG
-
-#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
-/* Owner ID to use */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
-/* Size of buffer table pages to use, in bytes (note that only a few values are
- * legal on any specific hardware).
- */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
-
-/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
-/* Buffer table IDs for use in DMA descriptors. */
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
-#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
- * Reprogram a set of buffer table entries in the specified chunk.
- */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
-#undef MC_CMD_0x88_PRIVILEGE_CTG
-
-#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
-/* ID */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Num entries */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-/* Buffer table entry address */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
-
-/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_FREE_BUFTBL_CHUNK
- */
-#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
-#undef MC_CMD_0x89_PRIVILEGE_CTG
-
-#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
-#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
-
-/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
-#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -12822,6 +13194,10 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
/* enum: read the supported encapsulation types for the VNIC */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6
+/* enum: read the supported RX filter matches for low-latency queues (as
+ * allocated by MC_CMD_ALLOC_LL_QUEUES)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -12860,6 +13236,48 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
+
/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
* returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
* OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
@@ -12914,136 +13332,6 @@
/***********************************/
-/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
- * Please note that this interface is only of use to debug tools which have
- * knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code. Note that although this command is in the
- * Admin privilege group, in tamperproof adapters, only read operations are
- * permitted.
- */
-#define MC_CMD_PARSER_DISP_RW 0xe5
-#undef MC_CMD_0xe5_PRIVILEGE_CTG
-
-#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
-#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
-/* identifies the target of the operation */
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
-#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
-/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
-/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format). Deprecated; used only
- * by cmdclient as a fallback for very old Huntington firmware, and not
- * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
- * instead.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
-/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
-/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
-/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
-/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
-/* identifies the type of operation requested */
-#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
-/* enum: Read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
- * tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
- * permitted on tamperproof adapters.
- */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address (DICPU targets) or LUE index (LUE targets) */
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
-/* selector (for MISC_STATE target) */
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
-#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
-/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
-/* value to write (for DMEM writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
-/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
-/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
-#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
-/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
-/* value to write (for LUE writes) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
-
-/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
-#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
-/* value read (for DMEM reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
-/* value read (for LUE reads) */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
-/* up to 8 32-bit words of additional soft state from the LUE manager (the
- * exact content is firmware-dependent and intended only for debug use)
- */
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
-#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
-/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
-#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
-
-
-/***********************************/
-/* MC_CMD_GET_PF_COUNT
- * Get number of PFs on the device.
- */
-#define MC_CMD_GET_PF_COUNT 0xb6
-#undef MC_CMD_0xb6_PRIVILEGE_CTG
-
-#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PF_COUNT_IN msgrequest */
-#define MC_CMD_GET_PF_COUNT_IN_LEN 0
-
-/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
-/* Identifies the number of PFs on the device. */
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
-#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
-
-
-/***********************************/
-/* MC_CMD_SET_PF_COUNT
- * Set number of PFs on the device.
- */
-#define MC_CMD_SET_PF_COUNT 0xb7
-
-/* MC_CMD_SET_PF_COUNT_IN msgrequest */
-#define MC_CMD_SET_PF_COUNT_IN_LEN 4
-/* New number of PFs on the device. */
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
-#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
-
-/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
-#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_PORT_ASSIGNMENT
* Get port assignment for current PCI function.
*/
@@ -13069,25 +13357,6 @@
/***********************************/
-/* MC_CMD_SET_PORT_ASSIGNMENT
- * Set port assignment for current PCI function.
- */
-#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
-#undef MC_CMD_0xb9_PRIVILEGE_CTG
-
-#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
-/* Identifies the port assignment for this function. */
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
-#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
-
-/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
-#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_ALLOC_VIS
* Allocate VIs for current PCI function.
*/
@@ -13184,263 +13453,6 @@
/***********************************/
-/* MC_CMD_SET_SRIOV_CFG
- * Set SRIOV config for this PF.
- */
-#define MC_CMD_SET_SRIOV_CFG 0xbb
-#undef MC_CMD_0xbb_PRIVILEGE_CTG
-
-#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
-#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
-/* Number of VFs currently enabled. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
-/* Max number of VFs before sriov stride and offset may need to be changed. */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
-/* RID offset of first VF from PF, or 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
-/* RID offset of each subsequent VF from the previous, 0 for no change, or
- * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
- */
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
-#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
-
-/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
-#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VI_ALLOC_INFO
- * Get information about number of VI's and base VI number allocated to this
- * function. This message is not available to dynamic clients created by
- * MC_CMD_CLIENT_ALLOC.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
-#undef MC_CMD_0x8d_PRIVILEGE_CTG
-
-#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
-#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
-
-/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
-/* The number of VIs allocated on this function */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to this function. Required to
- * correctly interpret wakeup events.
- */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
-/* Function's port vi_shift value (always 0 on Huntington) */
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
-#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_VI_STATE
- * For CmdClient use. Dump pertinent information on a specific absolute VI. The
- * VI must be owned by the calling client or one of its ancestors; usership of
- * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient.
- */
-#define MC_CMD_DUMP_VI_STATE 0x8e
-#undef MC_CMD_0x8e_PRIVILEGE_CTG
-
-#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
-#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
-/* The VI number to query. */
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
-#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
-
-/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
-#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100
-/* The PF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
-/* The VF part of the function owning this VI. */
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
-#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
-/* Base of VIs allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
-/* Count of VIs allocated to the owner function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
-/* Base interrupt vector allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
-/* Number of interrupt vectors allocated to this function. */
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
-#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
-/* Raw evq ptr table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32
-/* Raw evq timer table data. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32
-/* TXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32
-/* RXDPCPU raw table data for queue. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32
-/* Reserved, currently 0. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32
-/* Combined metadata field. */
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
-#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
-/* Current user, as assigned by MC_CMD_SET_VI_USER. */
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96
-#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4
-
-
-/***********************************/
/* MC_CMD_ALLOC_PIOBUF
* Allocate a push I/O buffer for later use with a tx queue.
*/
@@ -13491,354 +13503,102 @@
/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
-/* VI number to get information for. */
+/* Queue handle, encodes queue type and VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
-/* Set no snoop bit for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
-#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same
+ * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/***********************************/
/* MC_CMD_SET_VI_TLP_PROCESSING
* Set TLP steering and ordering information for a VI. The caller must have the
* GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or
- * an ancestor of the current user (see MC_CMD_SET_VI_USER).
+ * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL
+ * queues require this to be called after allocation but before initialisation
+ * of the queue. TLP options of a queue are fixed after queue is initialised,
+ * with the values set to current global value or they can be overriden using
+ * this command. At LL queue allocation, all overrides are cleared.
*/
#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
#undef MC_CMD_0xb1_PRIVILEGE_CTG
#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
-/* VI number to set information for. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
-/* Transaction processing steering hint 1 for use with the Rx Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
-/* Transaction processing steering hint 2 for use with the Ev Queue. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
-/* Use Relaxed ordering model for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
-/* Use ID based ordering for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
-/* Set the no snoop bit for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
-/* Enable TPH for TLPs on this VI. */
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
-#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same
+ * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to
+ * simplify use in drivers.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8
+/* Queue handle, encodes queue type and VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20
+#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
/***********************************/
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
- * Get global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
-#undef MC_CMD_0xbc_PRIVILEGE_CTG
-
-#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
-/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
-/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
-/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
-
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
- * Set global PCIe steering and transaction processing configuration.
- */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
-#undef MC_CMD_0xbd_PRIVILEGE_CTG
-
-#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
-/* Amalgamated TLP info word. */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
-
-/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
-#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SATELLITE_DOWNLOAD
- * Download a new set of images to the satellite CPUs from the host.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD 0x91
-#undef MC_CMD_0x91_PRIVILEGE_CTG
-
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
- * are subtle, and so downloads must proceed in a number of phases.
- *
- * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
- * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
- * be a checksum (a simple 32-bit sum) of the transferred data. An individual
- * download may be aborted using CHUNK_ID_ABORT.
- *
- * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
- * similar to PHASE_IMEMS.
- *
- * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
- *
- * After any error (a requested abort is not considered to be an error) the
- * sequence must be restarted from PHASE_RESET.
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
-/* Download phase. (Note: the IDLE phase is used internally and is never valid
- * in a command from the host.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
-/* Target for download. (These match the blob numbers defined in
- * mc_flash_layout.h.)
- */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
-/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
-/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
-/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
-/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
-/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
-/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
-/* Length of this chunk in bytes */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
-/* Data for this chunk */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
-
-/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
-/* Same as MC_CMD_ERR field, but included as 0 in success cases */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
-/* Extra status information */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
-/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
-/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
-/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
-/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
-/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
-/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
-/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
-/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
-
-
-/***********************************/
/* MC_CMD_GET_CAPABILITIES
- * Get device capabilities.
- *
- * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
- * reference inherent device capabilities as opposed to current NVRAM config.
+ * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG
+ * command, and intended to reference inherent device capabilities as opposed
+ * to current NVRAM config.
*/
#define MC_CMD_GET_CAPABILITIES 0xbe
#undef MC_CMD_0xbe_PRIVILEGE_CTG
@@ -14490,7 +14250,10 @@
/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -14900,7 +14663,10 @@
/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15335,7 +15101,10 @@
/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -15778,7 +15547,10 @@
/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16226,7 +15998,10 @@
/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16685,7 +16460,10 @@
/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -16796,9 +16574,21 @@
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
@@ -17189,7 +16979,10 @@
/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17300,9 +17093,21 @@
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -17707,7 +17512,10 @@
/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -17818,9 +17626,21 @@
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18260,7 +18080,10 @@
/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */
-/* Number of VIs available for each external port */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2
#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4
@@ -18371,9 +18194,21 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1
/* These bits are reserved for communicating test-specific capabilities to
* host-side test software. All production drivers should treat this field as
* opaque.
@@ -18438,6 +18273,1182 @@
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for external ports 0-3. For devices with more than
+ * four ports, the remainder are in NUM_VIS_PER_PORT2 in
+ * GET_CAPABILITIES_V12_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+/* A bitmap of the queue sizes the device can provide, where bit N being set
+ * indicates that 2**N is a valid size. The device may be limited in the number
+ * of different queue sizes that can exist simultaneously, so a bit being set
+ * here does not guarantee that an attempt to create a queue of that size will
+ * succeed.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4
+/* A bitmap of queue sizes that are always available, in the same format as
+ * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes
+ * will never fail due to unavailability of the requested size.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4
+/* Number of available indirect memory maps. */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4
+/* Number of VIs available for external ports 4-7. Information for ports 0-3 is
+ * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT.
+ */
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -18468,168 +19479,13 @@
* are not defined.
*/
#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_ALLOC
- * Allocate a pacer bucket (for qau rp or a snapper test)
+/* enum: MCDI command used for platform management. Typically, these commands
+ * are used for low-level operations directed at the platform as a whole (e.g.
+ * MMIO device enumeration) rather than individual functions and use a
+ * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or
+ * different CPU as MCDI_MESSAGE_TYPE_MC.
*/
-#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
-#undef MC_CMD_0xb2_PRIVILEGE_CTG
-
-#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
-
-/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_FREE
- * Free a pacer bucket
- */
-#define MC_CMD_TCM_BUCKET_FREE 0xb3
-#undef MC_CMD_0xb3_PRIVILEGE_CTG
-
-#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
-
-/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_BUCKET_INIT
- * Initialise pacer bucket with a given rate
- */
-#define MC_CMD_TCM_BUCKET_INIT 0xb4
-#undef MC_CMD_0xb4_PRIVILEGE_CTG
-
-#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
-/* the bucket id */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
-/* the rate in mbps */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
-/* the desired maximum fill level */
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
-#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
-
-/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
-#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_TCM_TXQ_INIT
- * Initialise txq in pacer with given options or set options
- */
-#define MC_CMD_TCM_TXQ_INIT 0xb5
-#undef MC_CMD_0xb5_PRIVILEGE_CTG
-
-#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
-/* the txq id */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
-/* bitmask of the priority queues this txq is inserted into when inserted. */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
-/* the reaction point (RP) bucket */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
-/* an already reserved bucket (typically set to bucket associated with outer
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
-/* an already reserved bucket (typically set to bucket associated with inner
- * vswitch)
- */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
-/* the min bucket (typically for ETS/minimum bandwidth) */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
-/* the static priority associated with the txq */
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
-#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
-
-/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
-#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
/***********************************/
@@ -18740,27 +19596,6 @@
/***********************************/
-/* MC_CMD_VSWITCH_QUERY
- * read some config of v-switch. For now this command is an empty placeholder.
- * It may be used to check if a v-switch is connected to a given EVB port (if
- * not, then the command returns ENOENT).
- */
-#define MC_CMD_VSWITCH_QUERY 0x63
-#undef MC_CMD_0x63_PRIVILEGE_CTG
-
-#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
-#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
-/* The port to which the v-switch is connected. */
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
-#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -18936,28 +19771,6 @@
/***********************************/
-/* MC_CMD_VADAPTOR_GET_MAC
- * read the MAC address assigned to a v-adaptor.
- */
-#define MC_CMD_VADAPTOR_GET_MAC 0x5e
-#undef MC_CMD_0x5e_PRIVILEGE_CTG
-
-#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
-/* The port to which the v-adaptor is connected. */
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
-/* The MAC address assigned to this v-adaptor */
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
-#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
-
-
-/***********************************/
/* MC_CMD_VADAPTOR_QUERY
* read some config of v-adaptor.
*/
@@ -19014,86 +19827,6 @@
/***********************************/
-/* MC_CMD_RDWR_A64_REGIONS
- * Assign the 64 bit region addresses.
- */
-#define MC_CMD_RDWR_A64_REGIONS 0x9b
-#undef MC_CMD_0x9b_PRIVILEGE_CTG
-
-#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
-#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
-/* Write enable bits 0-3, set to write, clear to read. */
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
-#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
-
-/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
- * regardless of state of write bits in the request.
- */
-#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
-#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_ALLOC
- * Allocate an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
-#undef MC_CMD_0x9c_PRIVILEGE_CTG
-
-#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
-/* The handle of the owning upstream port */
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
-/* The handle of the new Onload stack */
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ONLOAD_STACK_FREE
- * Free an Onload stack ID.
- */
-#define MC_CMD_ONLOAD_STACK_FREE 0x9d
-#undef MC_CMD_0x9d_PRIVILEGE_CTG
-
-#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
-
-/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
-/* The handle of the Onload stack */
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
-#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
-
-/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
-#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_ALLOC
* Allocate an RSS context.
*/
@@ -19305,93 +20038,6 @@
/***********************************/
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
- * Write a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
-#undef MC_CMD_0x13e_PRIVILEGE_CTG
-
-#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array of index-value pairs to be written to the table. Structure is
- * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
- */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
-
-/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
-/* The index of the table entry to be written. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
-/* The value to write into the table entry. */
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
-#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
-
-
-/***********************************/
-/* MC_CMD_RSS_CONTEXT_READ_TABLE
- * Read a portion of a selectable-size indirection table for an RSS context.
- * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
- * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
- */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
-#undef MC_CMD_0x13f_PRIVILEGE_CTG
-
-#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
-/* The handle of the RSS context */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
-/* An array containing the indices of the entries to be read. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
-
-/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
-/* A buffer containing the requested entries read from the table. */
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
-#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
-
-
-/***********************************/
/* MC_CMD_RSS_CONTEXT_SET_FLAGS
* Set various control flags for an RSS context.
*/
@@ -19525,158 +20171,6 @@
/***********************************/
-/* MC_CMD_DOT1P_MAPPING_ALLOC
- * Allocate a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
-#undef MC_CMD_0xa4_PRIVILEGE_CTG
-
-#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
-/* The handle of the owning upstream port */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
-/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
- * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
- * referenced RSS contexts must span no more than this number.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
-#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping. This should be considered opaque to the
- * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
- * handle.
- */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
-/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_FREE
- * Free a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
-#undef MC_CMD_0xa5_PRIVILEGE_CTG
-
-#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE
- * Set the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
-#undef MC_CMD_0xa6_PRIVILEGE_CTG
-
-#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
-
-/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE
- * Get the mapping table for a .1p mapping.
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
-#undef MC_CMD_0xa7_PRIVILEGE_CTG
-
-#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
-/* The handle of the .1p mapping */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
-
-/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
-/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
- * handle)
- */
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
-#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
-
-
-/***********************************/
-/* MC_CMD_GET_VECTOR_CFG
- * Get Interrupt Vector config for this PF.
- */
-#define MC_CMD_GET_VECTOR_CFG 0xbf
-#undef MC_CMD_0xbf_PRIVILEGE_CTG
-
-#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
-
-/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
-/* Base absolute interrupt vector number. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
-#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
-#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SET_VECTOR_CFG
- * Set Interrupt Vector config for this PF.
- */
-#define MC_CMD_SET_VECTOR_CFG 0xc0
-#undef MC_CMD_0xc0_PRIVILEGE_CTG
-
-#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
-#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
-/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
- * let the system find a suitable base.
- */
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
-#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
-/* Number of interrupt vectors allocate to this PF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
-/* Number of interrupt vectors to allocate per VF. */
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
-#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
-
-/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
-#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_VPORT_ADD_MAC_ADDRESS
* Add a MAC address to a v-port
*/
@@ -19810,124 +20304,6 @@
/***********************************/
-/* MC_CMD_EVB_PORT_QUERY
- * read some config of v-port.
- */
-#define MC_CMD_EVB_PORT_QUERY 0x62
-#undef MC_CMD_0x62_PRIVILEGE_CTG
-
-#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
-#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
-/* The handle of the v-port */
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
-
-/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
-#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
-/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
-#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
-/* The number of VLAN tags that may be used on a v-adaptor connected to this
- * EVB port.
- */
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
-#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DUMP_BUFTBL_ENTRIES
- * Dump buffer table entries, mainly for command client debug use. Dumps
- * absolute entries, and does not use chunk handles. All entries must be in
- * range, and used for q page mapping, Although the latter restriction may be
- * lifted in future.
- */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
-#undef MC_CMD_0xab_PRIVILEGE_CTG
-
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
-/* Index of the first buffer table entry. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
-/* Number of buffer table entries to dump. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
-
-/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
-/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
-#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
-
-
-/***********************************/
-/* MC_CMD_SET_RXDP_CONFIG
- * Set global RXDP configuration settings
- */
-#define MC_CMD_SET_RXDP_CONFIG 0xc1
-#undef MC_CMD_0xc1_PRIVILEGE_CTG
-
-#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
-/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
-/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
-/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
-
-/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_RXDP_CONFIG
- * Get global RXDP configuration settings
- */
-#define MC_CMD_GET_RXDP_CONFIG 0xc2
-#undef MC_CMD_0xc2_PRIVILEGE_CTG
-
-#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
-#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
-/* Enum values, see field(s): */
-/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
-
-
-/***********************************/
/* MC_CMD_GET_CLOCK
* Return the system and PDCPU clock frequencies.
*/
@@ -19950,210 +20326,6 @@
/***********************************/
-/* MC_CMD_SET_CLOCK
- * Control the system and DPCPU clock frequencies. Changes are lost reboot.
- */
-#define MC_CMD_SET_CLOCK 0xad
-#undef MC_CMD_0xad_PRIVILEGE_CTG
-
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_CLOCK_IN msgrequest */
-#define MC_CMD_SET_CLOCK_IN_LEN 28
-/* Requested frequency in MHz for system clock domain */
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
-/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for inter-core clock domain */
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
-/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for DPCPU clock domain */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
-/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for PCS clock domain */
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
-/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for MC clock domain */
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
-/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for rmon clock domain */
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
-/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
-/* Requested frequency in MHz for vswitch clock domain */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
-/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
-
-/* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define MC_CMD_SET_CLOCK_OUT_LEN 28
-/* Resulting system frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
-#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
-/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting inter-core frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
-#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
-/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
-/* Resulting DPCPU frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
-/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
-/* Resulting PCS frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
-#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
-/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
-/* Resulting MC frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
-#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
-/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
-/* Resulting rmon frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
-#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
-/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
-/* Resulting vswitch frequency in MHz */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
-/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
-
-
-/***********************************/
-/* MC_CMD_DPCPU_RPC
- * Send an arbitrary DPCPU message.
- */
-#define MC_CMD_DPCPU_RPC 0xae
-#undef MC_CMD_0xae_PRIVILEGE_CTG
-
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_DPCPU_RPC_IN msgrequest */
-#define MC_CMD_DPCPU_RPC_IN_LEN 36
-#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
-/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
-/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
-/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
-/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
-/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_RX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
-/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
- * DPCPU_TX0)
- */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
-/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
- * initialised to zero
- */
-#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
-/* Register data to write. Only valid in write/write-read. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
-/* Register address. */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
-
-/* MC_CMD_DPCPU_RPC_OUT msgresponse */
-#define MC_CMD_DPCPU_RPC_OUT_LEN 36
-#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
-#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
-/* DATA */
-#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
-#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
-#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
-
-
-/***********************************/
/* MC_CMD_TRIGGER_INTERRUPT
* Trigger an interrupt by prodding the BIU.
*/
@@ -20173,66 +20345,6 @@
/***********************************/
-/* MC_CMD_SHMBOOT_OP
- * Special operations to support (for now) shmboot.
- */
-#define MC_CMD_SHMBOOT_OP 0xe6
-#undef MC_CMD_0xe6_PRIVILEGE_CTG
-
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SHMBOOT_OP_IN msgrequest */
-#define MC_CMD_SHMBOOT_OP_IN_LEN 4
-/* Identifies the operation to perform */
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
-#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
-/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
-
-/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
-#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_CAP_BLK_READ
- * Read multiple 64bit words from capture block memory
- */
-#define MC_CMD_CAP_BLK_READ 0xe7
-#undef MC_CMD_0xe7_PRIVILEGE_CTG
-
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_CAP_BLK_READ_IN msgrequest */
-#define MC_CMD_CAP_BLK_READ_IN_LEN 12
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
-#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
-#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
-#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
-
-/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
-#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
-#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
-#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
-
-
-/***********************************/
/* MC_CMD_DUMP_DO
* Take a dump of the DUT state
*/
@@ -20380,34 +20492,6 @@
/***********************************/
-/* MC_CMD_SET_PSU
- * Adjusts power supply parameters. This is a warranty-voiding operation.
- * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
- * the parameter is out of range.
- */
-#define MC_CMD_SET_PSU 0xea
-#undef MC_CMD_0xea_PRIVILEGE_CTG
-
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_SET_PSU_IN msgrequest */
-#define MC_CMD_SET_PSU_IN_LEN 12
-#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
-/* desired value, eg voltage in mV */
-#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
-#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
-
-/* MC_CMD_SET_PSU_OUT msgresponse */
-#define MC_CMD_SET_PSU_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_FUNCTION_INFO
* Get function information. PF and VF number.
*/
@@ -20448,7 +20532,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -20458,137 +20542,13 @@
/***********************************/
-/* MC_CMD_UART_SEND_DATA
- * Send checksummed[sic] block of data over the uart. Response is a placeholder
- * should we wish to make this reliable; currently requests are fire-and-
- * forget.
- */
-#define MC_CMD_UART_SEND_DATA 0xee
-#undef MC_CMD_0xee_PRIVILEGE_CTG
-
-#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
-#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
-#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
-#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
-
-/* MC_CMD_UART_SEND_DATA_IN msgresponse */
-#define MC_CMD_UART_SEND_DATA_IN_LEN 0
-
-
-/***********************************/
-/* MC_CMD_UART_RECV_DATA
- * Request checksummed[sic] block of data over the uart. Only a placeholder,
- * subject to change and not currently implemented.
- */
-#define MC_CMD_UART_RECV_DATA 0xef
-#undef MC_CMD_0xef_PRIVILEGE_CTG
-
-#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
-#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
-/* CRC32 over OFFSET, LENGTH, RESERVED */
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
-/* Offset from which to read the data */
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
-#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
-#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
-#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
-
-/* MC_CMD_UART_RECV_DATA_IN msgresponse */
-#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
-#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
-#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
-#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
-/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
-#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
-/* Offset at which to write the data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
-/* Length of data */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
-/* Reserved for future use */
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
-#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
-#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
-#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
-#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
-
-
-/***********************************/
-/* MC_CMD_READ_FUSES
- * Read data programmed into the device One-Time-Programmable (OTP) Fuses
- */
-#define MC_CMD_READ_FUSES 0xf0
-#undef MC_CMD_0xf0_PRIVILEGE_CTG
-
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_FUSES_IN msgrequest */
-#define MC_CMD_READ_FUSES_IN_LEN 8
-/* Offset in OTP to read */
-#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
-#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
-/* Length of data to read in bytes */
-#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
-#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
-
-/* MC_CMD_READ_FUSES_OUT msgresponse */
-#define MC_CMD_READ_FUSES_OUT_LENMIN 4
-#define MC_CMD_READ_FUSES_OUT_LENMAX 252
-#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Length of returned OTP data in bytes */
-#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
-#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
-/* Returned data */
-#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
-#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
-#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
-#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
/* MC_CMD_KR_TUNE
* Get or set KR Serdes RXEQ and TX Driver settings
*/
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -21138,262 +21098,6 @@
/***********************************/
-/* MC_CMD_PCIE_TUNE
- * Get or set PCIE Serdes RXEQ and TX Driver settings
- */
-#define MC_CMD_PCIE_TUNE 0xf2
-#undef MC_CMD_0xf2_PRIVILEGE_CTG
-
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PCIE_TUNE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
-#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
-/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
-/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
-/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
-/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
-/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
-/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
- * caller should call this command repeatedly after starting eye plot, until no
- * more data is returned.
- */
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
-/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
-/* Arguments specific to the operation */
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
-
-/* MC_CMD_PCIE_TUNE_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
-/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
-/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
-/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
-/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
-/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
-/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
-/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
-/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
-/* RXEQ Parameter */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
-/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
-/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
-/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
-/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
-/* Enum values, see field(s): */
-/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
-
-/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
-/* Requested operation */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
-/* Align the arguments to 32 bits */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
-
-/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
-#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
-
-/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
-#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
* - not used for V3 licensing
@@ -21532,56 +21236,6 @@
/***********************************/
-/* MC_CMD_LICENSING_GET_ID_V3
- * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
- * partition - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSING_GET_ID_V3 0xd1
-#undef MC_CMD_0xd1_PRIVILEGE_CTG
-
-#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
-#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
-
-/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
-/* type of license (eg 3) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
-/* length of the license ID (in bytes) */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
-/* the unique license ID of the adapter */
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
-#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
-
-
-/***********************************/
-/* MC_CMD_MC2MC_PROXY
- * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
- * This will fail on a single-core system.
- */
-#define MC_CMD_MC2MC_PROXY 0xf4
-#undef MC_CMD_0xf4_PRIVILEGE_CTG
-
-#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MC2MC_PROXY_IN msgrequest */
-#define MC_CMD_MC2MC_PROXY_IN_LEN 0
-
-/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
-#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
@@ -21610,424 +21264,6 @@
/***********************************/
-/* MC_CMD_GET_LICENSED_V3_APP_STATE
- * Query the state of an individual licensed application. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
-#undef MC_CMD_0xd2_PRIVILEGE_CTG
-
-#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
-/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
- * mask
- */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
-/* state of this application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
-/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
-/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
-
-
-/***********************************/
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of an one or more licensed features. (Note that the actual
- * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
- * operation or a reboot of the MC.) Used for V3 licensing (Medford)
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
-#undef MC_CMD_0xd3_PRIVILEGE_CTG
-
-#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
-/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
- * more bits set
- */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32
-
-/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
-/* states of these features - bit set for licensed, clear for not licensed */
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32
-#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application - not used for V3
- * licensing.
- */
-#define MC_CMD_LICENSED_APP_OP 0xf6
-#undef MC_CMD_0xf6_PRIVILEGE_CTG
-
-#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
-/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
-/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
-/* arguments specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
-#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
-
-/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
-#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
-/* result specific to this particular operation */
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
-#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
-/* validation challenge */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
-/* feature expiry (time_t) */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
-/* validation response */
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
-#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
-
-/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
-/* application ID */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
-/* the type of operation requested */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
-/* flag */
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
-
-/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
-#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_VALIDATE_APP
- * Perform validation for an individual licensed application - V3 licensing
- * (Medford)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
-#undef MC_CMD_0xd4_PRIVILEGE_CTG
-
-#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
-/* challenge for validation (384 bits) */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
-/* application ID expressed as a single bit mask */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32
-
-/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
-/* validation response to challenge in the form of ECDSA signature consisting
- * of two 384-bit integers, r and s, in big-endian order. The signature signs a
- * SHA-384 digest of a message constructed from the concatenation of the input
- * message and the remaining fields of this output message, e.g. challenge[48
- * bytes] ... expiry_time[4 bytes] ...
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
-/* application expiry time */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
-/* application expiry units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
-/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
-/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
-/* base MAC address of the NIC stored in NVRAM (note that this is a constant
- * value for a given NIC regardless which function is calling, effectively this
- * is PF0 base MAC address)
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
-/* MAC address of v-adaptor associated with the client. If no such v-adapator
- * exists, then the field is filled with 0xFF.
- */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
-
-
-/***********************************/
-/* MC_CMD_LICENSED_V3_MASK_FEATURES
- * Mask features - V3 licensing (Medford)
- */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
-#undef MC_CMD_0xd5_PRIVILEGE_CTG
-
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
-/* mask to be applied to features to be changed */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32
-/* whether to turn on or turn off the masked features */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
-/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
-/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
-
-/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_LICENSING_V3_TEMPORARY
- * Perform operations to support installation of a single temporary license in
- * the adapter, in addition to those found in the licensing partition. See
- * SF-116124-SW for an overview of how this could be used. The license is
- * stored in MC persistent data and so will survive a MC reboot, but will be
- * erased when the adapter is power cycled
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
-#undef MC_CMD_0xd6_PRIVILEGE_CTG
-
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
-/* operation code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
-/* enum: install a new license, overwriting any existing temporary license.
- * This is an asynchronous operation owing to the time taken to validate an
- * ECDSA license
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
-/* enum: clear the license immediately rather than waiting for the next power
- * cycle
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
-/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
- * operation
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
-/* ECDSA license and signature */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
-
-/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
-/* status code */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
-/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
-/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
-/* enum: licensing error. More specific error messages are not provided to
- * avoid exposing details of the licensing system to the client
- */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
-/* bitmask of licensed features */
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64
-#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure RX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic delivered to the host (non-promiscuous
- * mode) or all traffic arriving at the port (promiscuous mode) may be
- * delivered to a specific queue, or a set of queues with RSS.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
-#undef MC_CMD_0xf7_PRIVILEGE_CTG
-
-#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current RX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
-#undef MC_CMD_0xf8_PRIVILEGE_CTG
-
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
/* MC_CMD_SET_PARSER_DISP_CONFIG
* Change configuration related to the parser-dispatcher subsystem.
*/
@@ -22073,305 +21309,6 @@
/***********************************/
-/* MC_CMD_GET_PARSER_DISP_CONFIG
- * Read configuration related to the parser-dispatcher subsystem.
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
-#undef MC_CMD_0xfa_PRIVILEGE_CTG
-
-#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
-/* the type of configuration setting to read */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
-/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
- * the type of configuration setting being read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
-
-/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
-/* current value: the details depend on the type of configuration setting being
- * read
- */
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
-#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
-
-
-/***********************************/
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
- * Configure TX port sniffing for the physical port associated with the calling
- * function. Only a privileged function may change the port sniffing
- * configuration. A copy of all traffic transmitted through the port may be
- * delivered to a specific queue, or a set of queues with RSS. Note that these
- * packets are delivered with transmit timestamps in the packet prefix, not
- * receive timestamps, so it is likely that the queue(s) will need to be
- * dedicated as TX sniff receivers.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
-#undef MC_CMD_0xfb_PRIVILEGE_CTG
-
-#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
-/* configuration flags */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
-/* receive queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
-/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
-/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
- * that these handles should be considered opaque to the host, although a value
- * of 0xFFFFFFFF is guaranteed never to be a valid handle.
- */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
-
-/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
- * Obtain the current TX port sniffing configuration for the physical port
- * associated with the calling function. Only a privileged function may read
- * the configuration.
- */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
-#undef MC_CMD_0xfc_PRIVILEGE_CTG
-
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
-
-/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
-/* configuration flags */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
-/* receiving queue handle (for RSS mode, this is the base queue) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
-/* receive mode */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
-/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
-/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
-/* RSS context (for RX_MODE_RSS) */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_RMON_STATS_RX_ERRORS
- * Per queue rx error stats.
- */
-#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
-#undef MC_CMD_0xfe_PRIVILEGE_CTG
-
-#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
-/* The rx queue to get stats for. */
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
-#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PCIE_RESOURCE_INFO
- * Find out about available PCIE resources
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
-#undef MC_CMD_0xfd_PRIVILEGE_CTG
-
-#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
-
-/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
-/* The maximum number of PFs the device can expose */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
-/* The maximum number of VFs the device can expose in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
-/* The maximum number of MSI-X vectors the device can provide in total */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each PF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
-/* the number of MSI-X vectors the device will allocate by default to each VF
- */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one PF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
-/* the maximum number of MSI-X vectors the device can allocate to any one VF */
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
-#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_PORT_MODES
- * Find out about available port modes
- */
-#define MC_CMD_GET_PORT_MODES 0xff
-#undef MC_CMD_0xff_PRIVILEGE_CTG
-
-#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_PORT_MODES_IN msgrequest */
-#define MC_CMD_GET_PORT_MODES_IN_LEN 0
-
-/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
-
-/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
- * that are supported for customer use in production firmware.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
-#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
-/* Default (canonical) board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
-#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
-/* Current board mode */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
-#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
-/* Bitmask of engineering port modes available on the board (indexed by
- * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
- * contains all modes implemented in firmware for a particular board. Modes
- * listed in MODES are considered production modes and should be exposed in
- * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES
- * should be considered hidden (not to be exposed in userland tools) and for
- * engineering use only. There are no other semantic differences and any mode
- * listed in either MODES or ENGINEERING_MODES can be set on the board.
- */
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
-#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
-
-
-/***********************************/
-/* MC_CMD_OVERRIDE_PORT_MODE
- * Override flash config port mode for subsequent MC reboot(s). Override data
- * is stored in the presistent data section of DMEM and activated on next MC
- * warm reboot. A cold reboot resets the override. It is assumed that a
- * sufficient number of PFs are available and that port mapping is valid for
- * the new port mode, as the override does not affect PF configuration.
- */
-#define MC_CMD_OVERRIDE_PORT_MODE 0x137
-#undef MC_CMD_0x137_PRIVILEGE_CTG
-
-#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
-/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
-#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
-
-/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
-#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_READ_ATB
- * Sample voltages on the ATB
- */
-#define MC_CMD_READ_ATB 0x100
-#undef MC_CMD_0x100_PRIVILEGE_CTG
-
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_READ_ATB_IN msgrequest */
-#define MC_CMD_READ_ATB_IN_LEN 16
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
-#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
-#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
-
-/* MC_CMD_READ_ATB_OUT msgresponse */
-#define MC_CMD_READ_ATB_OUT_LEN 4
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
-#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
-
-
-/***********************************/
/* MC_CMD_GET_WORKAROUNDS
* Read the list of all implemented and all currently enabled workarounds. The
* enums here must correspond with those in MC_CMD_WORKAROUND.
@@ -22538,447 +21475,6 @@
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
-
-/***********************************/
-/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
- * parameter to MC_CMD_INIT_RXQ.
- */
-#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
-#undef MC_CMD_0x101_PRIVILEGE_CTG
-
-#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
-
-/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
-/* Minimum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
-/* Maximum acceptable snapshot length. */
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
-#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_FUSE_DIAGS
- * Additional fuse diagnostics
- */
-#define MC_CMD_FUSE_DIAGS 0x102
-#undef MC_CMD_0x102_PRIVILEGE_CTG
-
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_FUSE_DIAGS_IN msgrequest */
-#define MC_CMD_FUSE_DIAGS_IN_LEN 0
-
-/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
-#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
-/* Total number of mismatched bits between pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 0 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
-#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 1 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
-#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
-/* Total number of mismatched bits between pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
-/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
-/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
-/* Checksum of data after logical OR of pairs in area 2 */
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
-#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PRIVILEGE_MODIFY
- * Modify the privileges of a set of PCIe functions. Note that this operation
- * only effects non-admin functions unless the admin privilege itself is
- * included in one of the masks provided.
- */
-#define MC_CMD_PRIVILEGE_MODIFY 0x60
-#undef MC_CMD_0x60_PRIVILEGE_CTG
-
-#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
-/* The groups of functions to have their privilege masks modified. */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
-/* For VFS_OF_PF specify the PF, for ONE specify the target function */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
-#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
-/* Privileges to be added to the target functions. For privilege definitions
- * refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
-/* Privileges to be removed from the target functions. For privilege
- * definitions refer to the command MC_CMD_PRIVILEGE_MASK
- */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
-#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
-
-/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
-#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_BYTES
- * Read XPM memory
- */
-#define MC_CMD_XPM_READ_BYTES 0x103
-#undef MC_CMD_0x103_PRIVILEGE_CTG
-
-#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
-#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
-#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
-/* Data */
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
-#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_BYTES
- * Write XPM memory
- */
-#define MC_CMD_XPM_WRITE_BYTES 0x104
-#undef MC_CMD_0x104_PRIVILEGE_CTG
-
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
-#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
-/* Start address (byte) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
-#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
-/* Data */
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
-#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_READ_SECTOR
- * Read XPM sector
- */
-#define MC_CMD_XPM_READ_SECTOR 0x105
-#undef MC_CMD_0x105_PRIVILEGE_CTG
-
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
-/* Sector index */
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
-/* Sector size */
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
-
-/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
-#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Sector type */
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
-/* Sector data */
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
-#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_SECTOR
- * Write XPM sector
- */
-#define MC_CMD_XPM_WRITE_SECTOR 0x106
-#undef MC_CMD_0x106_PRIVILEGE_CTG
-
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
-#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
-/* If writing fails due to an uncorrectable error, try up to RETRIES following
- * sectors (or until no more space available). If 0, only one write attempt is
- * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
- * mechanism.
- */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
-/* Sector type */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
-#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
-/* Sector size */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
-#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
-/* Sector data */
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
-#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
-
-/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
-/* New sector index */
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
-#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
-
-
-/***********************************/
-/* MC_CMD_XPM_INVALIDATE_SECTOR
- * Invalidate XPM sector
- */
-#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
-#undef MC_CMD_0x107_PRIVILEGE_CTG
-
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
-/* Sector index */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
-#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
-
-/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
-#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_BLANK_CHECK
- * Blank-check XPM memory and report bad locations
- */
-#define MC_CMD_XPM_BLANK_CHECK 0x108
-#undef MC_CMD_0x108_PRIVILEGE_CTG
-
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
-#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
-/* Total number of bad (non-blank) locations */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
-/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
- * into MCDI response)
- */
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
-#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
-
-
-/***********************************/
-/* MC_CMD_XPM_REPAIR
- * Blank-check and repair XPM memory
- */
-#define MC_CMD_XPM_REPAIR 0x109
-#undef MC_CMD_0x109_PRIVILEGE_CTG
-
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_REPAIR_IN msgrequest */
-#define MC_CMD_XPM_REPAIR_IN_LEN 8
-/* Start address (byte) */
-#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
-#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
-/* Count (bytes) */
-#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
-#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
-
-/* MC_CMD_XPM_REPAIR_OUT msgresponse */
-#define MC_CMD_XPM_REPAIR_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_DECODER_TEST
- * Test XPM memory address decoders for gross manufacturing defects. Can only
- * be performed on an unprogrammed part.
- */
-#define MC_CMD_XPM_DECODER_TEST 0x10a
-#undef MC_CMD_0x10a_PRIVILEGE_CTG
-
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
-#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
-#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_WRITE_TEST
- * XPM memory write test. Test XPM write logic for gross manufacturing defects
- * by writing to a dedicated test row. There are 16 locations in the test row
- * and the test can only be performed on locations that have not been
- * previously used (i.e. can be run at most 16 times). The test will pick the
- * first available location to use, or fail with ENOSPC if none left.
- */
-#define MC_CMD_XPM_WRITE_TEST 0x10b
-#undef MC_CMD_0x10b_PRIVILEGE_CTG
-
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
-#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
-
-/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
-#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_EXEC_SIGNED
- * Check the CMAC of the contents of IMEM and DMEM against the value supplied
- * and if correct begin execution from the start of IMEM. The caller supplies a
- * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
- * computation runs from the start of IMEM, and from the start of DMEM + 16k,
- * to match flash booting. The command will respond with EINVAL if the CMAC
- * does match, otherwise it will respond with success before it jumps to IMEM.
- */
-#define MC_CMD_EXEC_SIGNED 0x10c
-#undef MC_CMD_0x10c_PRIVILEGE_CTG
-
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_EXEC_SIGNED_IN msgrequest */
-#define MC_CMD_EXEC_SIGNED_IN_LEN 28
-/* the length of code to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
-#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
-/* the length of date to include in the CMAC */
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
-#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
-/* the XPM sector containing the key to use */
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
-#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
-/* the expected CMAC value */
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
-#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
-
-/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
-#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PREPARE_SIGNED
- * Prepare to upload a signed image. This will scrub the specified length of
- * the data region, which must be at least as large as the DATALEN supplied to
- * MC_CMD_EXEC_SIGNED.
- */
-#define MC_CMD_PREPARE_SIGNED 0x10d
-#undef MC_CMD_0x10d_PRIVILEGE_CTG
-
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
-#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
-/* the length of data area to clear */
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
-#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
-
-/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
-#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
-
-
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
@@ -23049,110 +21545,6 @@
/***********************************/
-/* MC_CMD_RX_BALANCING
- * Configure a port upconverter to distribute the packets on both RX engines.
- * Packets are distributed based on a table with the destination vFIFO. The
- * index of the table is a hash of source and destination of IPV4 and VLAN
- * priority.
- */
-#define MC_CMD_RX_BALANCING 0x118
-#undef MC_CMD_0x118_PRIVILEGE_CTG
-
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_RX_BALANCING_IN msgrequest */
-#define MC_CMD_RX_BALANCING_IN_LEN 16
-/* The RX port whose upconverter table will be modified */
-#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
-#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
-/* The VLAN priority associated to the table index and vFIFO */
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
-#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
-/* The resulting bit of SRC^DST for indexing the table */
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
-#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
-/* The RX engine to which the vFIFO in the table entry will point to */
-#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
-#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
-
-/* MC_CMD_RX_BALANCING_OUT msgresponse */
-#define MC_CMD_RX_BALANCING_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_NVRAM_PRIVATE_APPEND
- * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
- * if the tag is already present.
- */
-#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
-#undef MC_CMD_0x11c_PRIVILEGE_CTG
-
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
-/* The tag to be appended */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
-/* The length of the data */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
-/* The data to be contained in the TLV structure */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
-#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
-
-/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
-#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_XPM_VERIFY_CONTENTS
- * Verify that the contents of the XPM memory is correct (Medford only). This
- * is used during manufacture to check that the XPM memory has been programmed
- * correctly at ATE.
- */
-#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
-#undef MC_CMD_0x11b_PRIVILEGE_CTG
-
-#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
-/* Data type to be checked */
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
-
-/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
-/* Number of sectors found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
-/* Number of bytes found (test builds only) */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
-/* Length of signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
-/* Signature */
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
-#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
-
-
-/***********************************/
/* MC_CMD_SET_EVQ_TMR
* Update the timer load, timer reload and timer mode values for a given EVQ.
* The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
@@ -23262,576 +21654,6 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP
- * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
- * non used switch buffers.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
-#undef MC_CMD_0x11d_PRIVILEGE_CTG
-
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
-/* Desired instance. Must be set to a specific instance, which is a function
- * local queue index. The calling client must be the currently-assigned user of
- * this VI (see MC_CMD_SET_VI_USER).
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
-/* Will the common pool be used as TX_vFIFO_ULL (1) */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
-/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
-/* Number of buffers to reserve for the common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
-/* TX datapath to which the Common Pool is connected to. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
-/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
-/* Network port or RX Engine to which the common pool connects. */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
-/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
-/* ID of the common pool allocated */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
-
-
-/***********************************/
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
- * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
- * previously allocated common pools.
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
-#undef MC_CMD_0x11e_PRIVILEGE_CTG
-
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
-/* Common pool previously allocated to which the new vFIFO will be associated
- */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
-/* Port or RX engine to associate the vFIFO egress */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
-/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
-/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
-/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
-/* Minimum number of buffers that the pool must have */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
-/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
-/* Will the vFIFO be used as TX_vFIFO_ULL */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
-/* Network priority of the vFIFO,if applicable */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
-/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
-
-/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
-/* Short vFIFO ID */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
-/* Network priority of the vFIFO */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
-
-
-/***********************************/
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF
- * This interface clears the configuration of the given vFIFO and leaves it
- * ready to be re-used.
- */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
-#undef MC_CMD_0x11f_PRIVILEGE_CTG
-
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
-/* Short vFIFO ID */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
-
-/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
-#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
- * This interface clears the configuration of the given common pool and leaves
- * it ready to be re-used.
- */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
-#undef MC_CMD_0x121_PRIVILEGE_CTG
-
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
-/* Common pool ID given when pool allocated */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
-
-/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
-#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
- * This interface allows the host to find out how many common pool buffers are
- * not yet assigned.
- */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
-#undef MC_CMD_0x124_PRIVILEGE_CTG
-
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
-
-/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
-/* Available buffers for the ENG to NET vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
-/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
-#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
-
-
-/***********************************/
-/* MC_CMD_SUC_VERSION
- * Get the version of the SUC
- */
-#define MC_CMD_SUC_VERSION 0x134
-#undef MC_CMD_0x134_PRIVILEGE_CTG
-
-#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_SUC_VERSION_IN msgrequest */
-#define MC_CMD_SUC_VERSION_IN_LEN 0
-
-/* MC_CMD_SUC_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_VERSION_OUT_LEN 24
-/* The SUC firmware version as four numbers - a.b.c.d */
-#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
-#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
-/* The date, in seconds since the Unix epoch, when the firmware image was
- * built.
- */
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
-#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
-/* The ID of the SUC chip. This is specific to the platform but typically
- * indicates family, memory sizes etc. See SF-116728-SW for further details.
- */
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
-#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
-
-/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
- * loader.
- */
-#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
-/* enum: Requests the SUC boot version. */
-#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
-
-/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
-/* The SUC boot version */
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
-#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_GET_RX_PREFIX_ID
- * This command is part of the mechanism for configuring the format of the RX
- * packet prefix. It takes as input a bitmask of the fields the host would like
- * to be in the prefix. If the hardware supports RX prefixes with that
- * combination of fields, then this command returns a list of prefix-ids,
- * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
- * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
- * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
- * due to resource constraints, returns ENOSPC.
- */
-#define MC_CMD_GET_RX_PREFIX_ID 0x13b
-#undef MC_CMD_0x13b_PRIVILEGE_CTG
-
-#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
-/* Field bitmask. */
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
-#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
-#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
-#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
-#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
-#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11
-#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1
-
-/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
-/* Number of prefix-ids returned */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
-/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
- * MC_CMD_QUERY_PREFIX_ID
- */
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
-#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
-
-/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
- * field
- */
-#define RX_PREFIX_FIELD_INFO_LEN 4
-/* The offset of the field from the start of the prefix, in bits */
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
-#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
-/* The width of the field, in bits */
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
-#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
-/* The type of the field. These enum values are in the same order as the fields
- * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
- */
-#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3
-#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1
-#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
-#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
-#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
-#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
-#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
-#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
-#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */
-#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */
-#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24
-#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
-
-/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
- * which every field has a fixed offset and width
- */
-#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252
-#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
-#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
-/* Length of the RX prefix in bytes */
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
-#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
-/* Number of fields present in the prefix */
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
-#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
-#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
-/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
-#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_QUERY_RX_PREFIX_ID
- * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
- * and returns a description of the RX prefix of packets delievered to an RXQ
- * created with that prefix id
- */
-#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
-#undef MC_CMD_0x13c_PRIVILEGE_CTG
-
-#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
-/* Prefix id to query */
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
-
-/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
-/* An enum describing the structure of this response. */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
-/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
-/* The response. Its format is as defined by the RESPONSE_TYPE value */
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
-#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_BUNDLE
- * A command to perform various bundle-related operations on insecure cards.
- */
-#define MC_CMD_BUNDLE 0x13d
-#undef MC_CMD_0x13d_PRIVILEGE_CTG
-
-#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
-
-/* MC_CMD_BUNDLE_IN msgrequest */
-#define MC_CMD_BUNDLE_IN_LEN 4
-/* Sub-command code */
-#define MC_CMD_BUNDLE_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_IN_OP_LEN 4
-/* enum: Get the current host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
-/* enum: Set the host access mode set on component partitions. */
-#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
- * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
- * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
- * secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
- * control mode.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
-/* enum: Component partitions are read-only from the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
-/* enum: Component partitions can read read-from written-to by the host. */
-#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
- * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
- * read-only on firmware built with bundle support. This command marks these
- * partitions as read/writeable. The access status set by this command does not
- * persist across MC reboots. This command only works on engineering (insecure)
- * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
- */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
-/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
-/* Access mode of component partitions. */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
-
-/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
-#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_GET_VPD
- * Read all VPD starting from a given address
- */
-#define MC_CMD_GET_VPD 0x165
-#undef MC_CMD_0x165_PRIVILEGE_CTG
-
-#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_VPD_IN msgresponse */
-#define MC_CMD_GET_VPD_IN_LEN 4
-/* VPD address to start from. In case VPD is longer than MCDI buffer
- * (unlikely), user can make multiple calls with different starting addresses.
- */
-#define MC_CMD_GET_VPD_IN_ADDR_OFST 0
-#define MC_CMD_GET_VPD_IN_ADDR_LEN 4
-
-/* MC_CMD_GET_VPD_OUT msgresponse */
-#define MC_CMD_GET_VPD_OUT_LENMIN 0
-#define MC_CMD_GET_VPD_OUT_LENMAX 252
-#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
-/* VPD data returned. */
-#define MC_CMD_GET_VPD_OUT_DATA_OFST 0
-#define MC_CMD_GET_VPD_OUT_DATA_LEN 1
-#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
-#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
-
-
-/***********************************/
-/* MC_CMD_GET_NCSI_INFO
- * Provide information about the NC-SI stack
- */
-#define MC_CMD_GET_NCSI_INFO 0x167
-#undef MC_CMD_0x167_PRIVILEGE_CTG
-
-#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
-#define MC_CMD_GET_NCSI_INFO_IN_LEN 8
-/* Operation to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
-/* enum: Information on the link settings. */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
-/* enum: Statistics associated with the channel */
-#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
-/* The NC-SI channel on which the operation is to be performed */
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
-#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
-
-/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
-/* Settings as received from BMC. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
-/* Advertised capabilities applied to channel. */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
-/* General status */
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
-#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
-
-/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
-/* The number of NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
-/* The number of NC-SI commands dropped. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
-/* The number of invalid NC-SI commands received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
-/* The number of checksum errors seen. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
-/* The number of NC-SI requests received. */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
-/* The number of NC-SI responses sent (includes AENs) */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
-/* The number of NC-SI AENs sent */
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
-#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
-
/* CLIENT_HANDLE structuredef: A client is an abstract entity that can make
* requests of the device and that can own resources managed by the device.
* Examples of clients include PCIe functions and dynamic clients. A client
@@ -23848,59 +21670,10 @@
#define CLIENT_HANDLE_OPAQUE_LBN 0
#define CLIENT_HANDLE_OPAQUE_WIDTH 32
-/* CLOCK_INFO structuredef: Information about a single hardware clock */
-#define CLOCK_INFO_LEN 28
-/* Enumeration that uniquely identifies the clock */
-#define CLOCK_INFO_CLOCK_ID_OFST 0
-#define CLOCK_INFO_CLOCK_ID_LEN 2
-/* enum: The Riverhead CMC (card MC) */
-#define CLOCK_INFO_CLOCK_CMC 0x0
-/* enum: The Riverhead NMC (network MC) */
-#define CLOCK_INFO_CLOCK_NMC 0x1
-/* enum: The Riverhead SDNET slice main logic */
-#define CLOCK_INFO_CLOCK_SDNET 0x2
-/* enum: The Riverhead SDNET LUT */
-#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3
-/* enum: The Riverhead SDNET control logic */
-#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
-/* enum: The Riverhead Streaming SubSystem */
-#define CLOCK_INFO_CLOCK_SSS 0x5
-/* enum: The Riverhead network MAC and associated CSR registers */
-#define CLOCK_INFO_CLOCK_MAC 0x6
-#define CLOCK_INFO_CLOCK_ID_LBN 0
-#define CLOCK_INFO_CLOCK_ID_WIDTH 16
-/* Assorted flags */
-#define CLOCK_INFO_FLAGS_OFST 2
-#define CLOCK_INFO_FLAGS_LEN 2
-#define CLOCK_INFO_SETTABLE_OFST 2
-#define CLOCK_INFO_SETTABLE_LBN 0
-#define CLOCK_INFO_SETTABLE_WIDTH 1
-#define CLOCK_INFO_FLAGS_LBN 16
-#define CLOCK_INFO_FLAGS_WIDTH 16
-/* The frequency in HZ */
-#define CLOCK_INFO_FREQUENCY_OFST 4
-#define CLOCK_INFO_FREQUENCY_LEN 8
-#define CLOCK_INFO_FREQUENCY_LO_OFST 4
-#define CLOCK_INFO_FREQUENCY_LO_LEN 4
-#define CLOCK_INFO_FREQUENCY_LO_LBN 32
-#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_HI_OFST 8
-#define CLOCK_INFO_FREQUENCY_HI_LEN 4
-#define CLOCK_INFO_FREQUENCY_HI_LBN 64
-#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32
-#define CLOCK_INFO_FREQUENCY_LBN 32
-#define CLOCK_INFO_FREQUENCY_WIDTH 64
-/* Human-readable ASCII name for clock, with NUL termination */
-#define CLOCK_INFO_NAME_OFST 12
-#define CLOCK_INFO_NAME_LEN 1
-#define CLOCK_INFO_NAME_NUM 16
-#define CLOCK_INFO_NAME_LBN 96
-#define CLOCK_INFO_NAME_WIDTH 8
-
/* SCHED_CREDIT_CHECK_RESULT structuredef */
#define SCHED_CREDIT_CHECK_RESULT_LEN 16
-/* The instance of the scheduler. Refer to XN-200389-AW for the location of
- * these schedulers in the hardware.
+/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and
+ * XN-200425-TC (cdx) for the location of these schedulers in the hardware.
*/
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1
@@ -23914,6 +21687,16 @@
#define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */
+#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0
#define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8
/* The type of node that this result refers to. */
@@ -23923,6 +21706,10 @@
#define SCHED_CREDIT_CHECK_RESULT_DEST 0x0
/* enum: Source node */
#define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1
+/* enum: Destination node credit type 1 (new to the Keystone schedulers, see
+ * SF-120268-TC)
+ */
+#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8
#define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8
/* Level of node in scheduler hierarchy (level 0 is the bottom of the
@@ -23950,592 +21737,6 @@
/***********************************/
-/* MC_CMD_GET_CLOCKS_INFO
- * Get information about the device clocks
- */
-#define MC_CMD_GET_CLOCKS_INFO 0x166
-#undef MC_CMD_0x166_PRIVILEGE_CTG
-
-#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
-#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
-
-/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
-#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
-/* An array of CLOCK_INFO structures. */
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
-#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_ADD
- * Add a rule for detecting encapsulations in the VNIC stage. Currently this
- * only affects checksum validation in VNIC RX - on TX the send descriptor
- * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only
- * apply to the current driver. If a rule matches, then the packet is
- * considered to have the corresponding encapsulation type, and the inner
- * packet is parsed. It is up to the driver to ensure that overlapping rules
- * are not inserted. (If a packet would match multiple rules, a random one of
- * them will be used.) A rule with the exact same match criteria may not be
- * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are
- * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP
- * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported
- * combinations. Each driver may only have a limited set of active rules -
- * returns ENOSPC if the caller's table is full.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
-#undef MC_CMD_0x16d_PRIVILEGE_CTG
-
-#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
-/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
-/* Any non-zero bits other than the ones named below or an unsupported
- * combination will cause the NIC to return EOPNOTSUPP. In the future more
- * flags may be added.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
-/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
- * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
-/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
- * (Deprecated)
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
-/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
-/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
- * case of IPv4, the IP should be in the first 4 bytes and all other bytes
- * should be zero.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
-/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
-/* Actions that should be applied to packets match the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1
-/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
-/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
-/* Handle to inserted rule. Used for removing the rule. */
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
- * Remove a VNIC encapsulation rule. Packets which would have previously
- * matched the rule will then be considered as unencapsulated. Returns EALREADY
- * if the input HANDLE doesn't correspond to an existing rule.
- */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
-#undef MC_CMD_0x16e_PRIVILEGE_CTG
-
-#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
-/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
-
-/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
-#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
-
-/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in
- * the endianness specified by the RFC; users should ignore the broken-out
- * fields and instead do straight memory copies to ensure correct ordering.
- */
-#define UUID_LEN 16
-#define UUID_TIME_LOW_OFST 0
-#define UUID_TIME_LOW_LEN 4
-#define UUID_TIME_LOW_LBN 0
-#define UUID_TIME_LOW_WIDTH 32
-#define UUID_TIME_MID_OFST 4
-#define UUID_TIME_MID_LEN 2
-#define UUID_TIME_MID_LBN 32
-#define UUID_TIME_MID_WIDTH 16
-#define UUID_TIME_HI_LBN 52
-#define UUID_TIME_HI_WIDTH 12
-#define UUID_VERSION_LBN 48
-#define UUID_VERSION_WIDTH 4
-#define UUID_RESERVED_LBN 64
-#define UUID_RESERVED_WIDTH 2
-#define UUID_CLK_SEQ_LBN 66
-#define UUID_CLK_SEQ_WIDTH 14
-#define UUID_NODE_OFST 10
-#define UUID_NODE_LEN 6
-#define UUID_NODE_LBN 80
-#define UUID_NODE_WIDTH 48
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_ALLOC
- * Create a handle to a datapath plugin's extension. This involves finding a
- * currently-loaded plugin offering the given functionality (as identified by
- * the UUID) and allocating a handle to track the usage of it. Plugin
- * functionality is identified by 'extension' rather than any other identifier
- * so that a single plugin bitfile may offer more than one piece of independent
- * functionality. If two bitfiles are loaded which both offer the same
- * extension, then the metadata is interrogated further to determine which is
- * the newest and that is the one opened. See SF-123625-SW for architectural
- * detail on datapath plugins.
- */
-#define MC_CMD_PLUGIN_ALLOC 0x1ad
-#undef MC_CMD_0x1ad_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */
-#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24
-/* The functionality requested of the plugin, as a UUID structure */
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16
-/* Additional options for opening the handle */
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1
-/* Load the extension only if it is in the specified administrative group.
- * Specify ANY to load the extension wherever it is found (if there are
- * multiple choices then the extension with the highest MINOR_VER/PATCH_VER
- * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of
- * administrative groups.
- */
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20
-#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2
-/* enum: Load the extension from any ADMIN_GROUP. */
-#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff
-/* Reserved */
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22
-#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2
-
-/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */
-#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4
-/* Unique identifier of this usage */
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_FREE
- * Delete a handle to a plugin's extension.
- */
-#define MC_CMD_PLUGIN_FREE 0x1ae
-#undef MC_CMD_0x1ae_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_FREE_IN msgrequest */
-#define MC_CMD_PLUGIN_FREE_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_FREE_OUT msgresponse */
-#define MC_CMD_PLUGIN_FREE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_GLOBAL
- * Returns the global metadata applying to the whole plugin extension. See the
- * other metadata calls for subtypes of data.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af
-#undef MC_CMD_0x1af_PRIVILEGE_CTG
-
-#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36
-/* Unique identifier of this plugin extension. This is identical to the value
- * which was requested when the handle was allocated.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16
-/* semver sub-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2
-/* semver micro-version of this plugin extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2
-/* Number of different messages which can be sent to this extension */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4
-/* Byte offset within the VI window of the plugin's mapped CSR window. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2
-/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was
- * not requested by the plugin (in which case MAPPED_CSR_OFFSET and
- * MAPPED_CSR_FLAGS are ignored).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2
-/* Flags indicating how to perform the CSR window mapping. */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1
-/* Identifier of the set of extensions which all change state together.
- * Extensions having the same ADMIN_GROUP will always load and unload at the
- * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a
- * generation number as an implementation detail to ensure that they're not
- * reused rapidly).
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1
-/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters
- * corresponding to this extension, i.e. set the bit 1<<PRIVILEGE_BIT to permit
- * access to this extension.
- */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_OFST 33
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PRIVILEGE_BIT_LEN 1
-/* Reserved */
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_OFST 34
-#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_RESERVED_LEN 2
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER
- * Returns metadata supplied by the plugin author which describes this
- * extension in a human-readable way. Contrast with
- * MC_CMD_PLUGIN_GET_META_GLOBAL, which returns information needed for software
- * to operate.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER 0x1b0
-#undef MC_CMD_0x1b0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_LEN 12
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_HANDLE_LEN 4
-/* Category of data to return */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_SUBTYPE_LEN 4
-/* enum: Top-level information about the extension. The returned data is an
- * array of key/value pairs using the keys in RFC5013 (Dublin Core) to describe
- * the extension. The data is a back-to-back list of zero-terminated strings;
- * the even-numbered fields (0,2,4,...) are keys and their following odd-
- * numbered fields are the corresponding values. Both keys and values are
- * nominally UTF-8. Per RFC5013, the same key may be repeated any number of
- * times. Note that all information (including the key/value structure itself
- * and the UTF-8 encoding) may have been provided by the plugin author, so
- * callers must be cautious about parsing it. Callers should parse only the
- * top-level structure to separate out the keys and values; the contents of the
- * values is not expected to be machine-readable.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_EXTENSION_KVS 0x0
-/* Byte position of the data to be returned within the full data block of the
- * given SUBTYPE.
- */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_OFST 8
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_IN_OFFSET_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMIN 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_LEN(num) (4+1*(num))
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_NUM(len) (((len)-4)/1)
-/* Full length of the data block of the requested SUBTYPE, in bytes. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_TOTAL_SIZE_LEN 4
-/* The information requested by SUBTYPE. */
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_OFST 4
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM 248
-#define MC_CMD_PLUGIN_GET_META_PUBLISHER_OUT_DATA_MAXNUM_MCDI2 1016
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_META_MSG
- * Returns the simple metadata for a specific plugin request message. This
- * supplies information necessary for the host to know how to build an
- * MC_CMD_PLUGIN_REQ request.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG 0x1b1
-#undef MC_CMD_0x1b1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_META_MSG_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_LEN 8
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_HANDLE_LEN 4
-/* Unique message ID to obtain */
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_IN_ID_LEN 4
-
-/* MC_CMD_PLUGIN_GET_META_MSG_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_LEN 44
-/* Unique message ID. This is the same value as the input parameter; it exists
- * to allow future MCDI extensions which enumerate all messages.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_OFST 0
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_ID_LEN 4
-/* Packed index number of this message, assigned by the MC to give each message
- * a unique ID in an array to allow for more efficient storage/management.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_OFST 4
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_INDEX_LEN 4
-/* Short human-readable codename for this message. This is conventionally
- * formatted as a C identifier in the basic ASCII character set with any spare
- * bytes at the end set to 0, however this convention is not enforced by the MC
- * so consumers must check for all potential malformations before using it for
- * a trusted purpose.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_OFST 8
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_NAME_LEN 32
-/* Number of bytes of data which must be passed from the host kernel to the MC
- * for this message's payload, and which are passed back again in the response.
- * The MC's plugin metadata loader will have validated that the number of bytes
- * specified here will fit in to MC_CMD_PLUGIN_REQ_IN_DATA in a single MCDI
- * message.
- */
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_OFST 40
-#define MC_CMD_PLUGIN_GET_META_MSG_OUT_DATA_SIZE_LEN 4
-
-/* PLUGIN_EXTENSION structuredef: Used within MC_CMD_PLUGIN_GET_ALL to describe
- * an individual extension.
- */
-#define PLUGIN_EXTENSION_LEN 20
-#define PLUGIN_EXTENSION_UUID_OFST 0
-#define PLUGIN_EXTENSION_UUID_LEN 16
-#define PLUGIN_EXTENSION_UUID_LBN 0
-#define PLUGIN_EXTENSION_UUID_WIDTH 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_OFST 16
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LEN 1
-#define PLUGIN_EXTENSION_ADMIN_GROUP_LBN 128
-#define PLUGIN_EXTENSION_ADMIN_GROUP_WIDTH 8
-#define PLUGIN_EXTENSION_FLAG_ENABLED_LBN 136
-#define PLUGIN_EXTENSION_FLAG_ENABLED_WIDTH 1
-#define PLUGIN_EXTENSION_RESERVED_LBN 137
-#define PLUGIN_EXTENSION_RESERVED_WIDTH 23
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_GET_ALL
- * Returns a list of all plugin extensions currently loaded and available. The
- * UUIDs returned can be passed to MC_CMD_PLUGIN_ALLOC in order to obtain more
- * detailed metadata via the MC_CMD_PLUGIN_GET_META_* family of requests. The
- * ADMIN_GROUP field collects how extensions are grouped in to units which are
- * loaded/unloaded together; extensions with the same value are in the same
- * group.
- */
-#define MC_CMD_PLUGIN_GET_ALL 0x1b2
-#undef MC_CMD_0x1b2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_GET_ALL_IN msgrequest */
-#define MC_CMD_PLUGIN_GET_ALL_IN_LEN 4
-/* Additional options for querying. Note that if neither FLAG_INCLUDE_ENABLED
- * nor FLAG_INCLUDE_DISABLED are specified then the result set will be empty.
- */
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAGS_LEN 4
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_LBN 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_ENABLED_WIDTH 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_LBN 1
-#define MC_CMD_PLUGIN_GET_ALL_IN_FLAG_INCLUDE_DISABLED_WIDTH 1
-
-/* MC_CMD_PLUGIN_GET_ALL_OUT msgresponse */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX 240
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_GET_ALL_OUT_LEN(num) (0+20*(num))
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_NUM(len) (((len)-0)/20)
-/* The list of available plugin extensions, as an array of PLUGIN_EXTENSION
- * structs.
- */
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_OFST 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_LEN 20
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MINNUM 0
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM 12
-#define MC_CMD_PLUGIN_GET_ALL_OUT_EXTENSIONS_MAXNUM_MCDI2 51
-
-
-/***********************************/
-/* MC_CMD_PLUGIN_REQ
- * Send a command to a plugin. A plugin may define an arbitrary number of
- * 'messages' which it allows applications on the host system to send, each
- * identified by a 32-bit ID.
- */
-#define MC_CMD_PLUGIN_REQ 0x1b3
-#undef MC_CMD_0x1b3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1b3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_PLUGIN_REQ_IN msgrequest */
-#define MC_CMD_PLUGIN_REQ_IN_LENMIN 8
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_IN_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_IN_LEN(num) (8+1*(num))
-#define MC_CMD_PLUGIN_REQ_IN_DATA_NUM(len) (((len)-8)/1)
-/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_OFST 0
-#define MC_CMD_PLUGIN_REQ_IN_HANDLE_LEN 4
-/* Message ID defined by the plugin author */
-#define MC_CMD_PLUGIN_REQ_IN_ID_OFST 4
-#define MC_CMD_PLUGIN_REQ_IN_ID_LEN 4
-/* Data blob being the parameter to the message. This must be of the length
- * specified by MC_CMD_PLUGIN_GET_META_MSG_IN_MCDI_PARAM_SIZE.
- */
-#define MC_CMD_PLUGIN_REQ_IN_DATA_OFST 8
-#define MC_CMD_PLUGIN_REQ_IN_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM 244
-#define MC_CMD_PLUGIN_REQ_IN_DATA_MAXNUM_MCDI2 1012
-
-/* MC_CMD_PLUGIN_REQ_OUT msgresponse */
-#define MC_CMD_PLUGIN_REQ_OUT_LENMIN 0
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX 252
-#define MC_CMD_PLUGIN_REQ_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_PLUGIN_REQ_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_NUM(len) (((len)-0)/1)
-/* The input data, as transformed and/or updated by the plugin's eBPF. Will be
- * the same size as the input DATA parameter.
- */
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_OFST 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_LEN 1
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MINNUM 0
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM 252
-#define MC_CMD_PLUGIN_REQ_OUT_DATA_MAXNUM_MCDI2 1020
-
-/* DESC_ADDR_REGION structuredef: Describes a contiguous region of DESC_ADDR
- * space that maps to a contiguous region of TRGT_ADDR space. Addresses
- * DESC_ADDR in the range [DESC_ADDR_BASE:DESC_ADDR_BASE + 1 <<
- * WINDOW_SIZE_LOG2) map to TRGT_ADDR = DESC_ADDR - DESC_ADDR_BASE +
- * TRGT_ADDR_BASE.
- */
-#define DESC_ADDR_REGION_LEN 32
-/* The start of the region in DESC_ADDR space. */
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_OFST 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_OFST 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_LBN 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_LBN 0
-#define DESC_ADDR_REGION_DESC_ADDR_BASE_WIDTH 64
-/* The start of the region in TRGT_ADDR space. Drivers can set this via
- * MC_CMD_SET_DESC_ADDR_REGIONS.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LEN 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_OFST 8
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LO_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_OFST 12
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_LBN 96
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_HI_WIDTH 32
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_LBN 64
-#define DESC_ADDR_REGION_TRGT_ADDR_BASE_WIDTH 64
-/* The size of the region. */
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_OFST 16
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LEN 4
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_LBN 128
-#define DESC_ADDR_REGION_WINDOW_SIZE_LOG2_WIDTH 32
-/* The alignment restriction on TRGT_ADDR. TRGT_ADDR values set by the driver
- * must be a multiple of 1 << TRGT_ADDR_ALIGN_LOG2.
- */
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_OFST 20
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LEN 4
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_LBN 160
-#define DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_OFST 24
-#define DESC_ADDR_REGION_RSVD_LEN 8
-#define DESC_ADDR_REGION_RSVD_LO_OFST 24
-#define DESC_ADDR_REGION_RSVD_LO_LEN 4
-#define DESC_ADDR_REGION_RSVD_LO_LBN 192
-#define DESC_ADDR_REGION_RSVD_LO_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_HI_OFST 28
-#define DESC_ADDR_REGION_RSVD_HI_LEN 4
-#define DESC_ADDR_REGION_RSVD_HI_LBN 224
-#define DESC_ADDR_REGION_RSVD_HI_WIDTH 32
-#define DESC_ADDR_REGION_RSVD_LBN 192
-#define DESC_ADDR_REGION_RSVD_WIDTH 64
-
-
-/***********************************/
/* MC_CMD_GET_DESC_ADDR_INFO
* Returns a description of the mapping from DESC_ADDR to TRGT_ADDR for the calling function's address space.
*/
@@ -24836,122 +22037,6 @@
/***********************************/
-/* MC_CMD_GET_BOARD_ATTR
- * Retrieve physical build-level board attributes as configured at
- * manufacturing stage. Fields originate from EEPROM and per-platform constants
- * in firmware. Fields are used in development to identify/ differentiate
- * boards based on build levels/parameters, and also in manufacturing to cross
- * check "what was programmed in manufacturing" is same as "what firmware
- * thinks has been programmed" as there are two layers to translation within
- * firmware before the attributes reach this MCDI handler. Some parameters are
- * retrieved as part of other commands and therefore not replicated here. See
- * GET_VERSION_OUT.
- */
-#define MC_CMD_GET_BOARD_ATTR 0x1c6
-#undef MC_CMD_0x1c6_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_BOARD_ATTR_IN msgrequest */
-#define MC_CMD_GET_BOARD_ATTR_IN_LEN 0
-
-/* MC_CMD_GET_BOARD_ATTR_OUT msgresponse */
-#define MC_CMD_GET_BOARD_ATTR_OUT_LEN 16
-/* Defines board capabilities and validity of attributes returned in this
- * response-message.
- */
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_FAN_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_SOC_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_OFST 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_LBN 2
-#define MC_CMD_GET_BOARD_ATTR_OUT_HAS_AUX_POWER_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_LBN 0
-#define MC_CMD_GET_BOARD_ATTR_OUT_SOC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_LBN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_SUC_EE_WIDTH 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_LBN 16
-#define MC_CMD_GET_BOARD_ATTR_OUT_FPGA_VOLTAGES_SUPPORTED_WIDTH 8
-/* enum: The FPGA voltage on the adapter can be set to low */
-#define MC_CMD_FPGA_VOLTAGE_LOW 0x0
-/* enum: The FPGA voltage on the adapter can be set to regular */
-#define MC_CMD_FPGA_VOLTAGE_REG 0x1
-/* enum: The FPGA voltage on the adapter can be set to high */
-#define MC_CMD_FPGA_VOLTAGE_HIGH 0x2
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_OFST 4
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_LBN 24
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_COUNT_WIDTH 8
-/* An array of cage types on the board */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_OFST 8
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_LEN 1
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_NUM 8
-/* enum: The cages are not known */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_UNKNOWN 0x0
-/* enum: The cages are SFP/SFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_SFP 0x1
-/* enum: The cages are QSFP/QSFP+ */
-#define MC_CMD_GET_BOARD_ATTR_OUT_CAGE_TYPE_QSFP 0x2
-
-
-/***********************************/
-/* MC_CMD_GET_SOC_STATE
- * Retrieve current state of the System-on-Chip. This command is valid when
- * MC_CMD_GET_BOARD_ATTR:HAS_SOC is set.
- */
-#define MC_CMD_GET_SOC_STATE 0x1c7
-#undef MC_CMD_0x1c7_PRIVILEGE_CTG
-
-#define MC_CMD_0x1c7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_GET_SOC_STATE_IN msgrequest */
-#define MC_CMD_GET_SOC_STATE_IN_LEN 0
-
-/* MC_CMD_GET_SOC_STATE_OUT msgresponse */
-#define MC_CMD_GET_SOC_STATE_OUT_LEN 12
-/* Status flags for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_FLAGS_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_SHOULD_THROTTLE_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_LBN 1
-#define MC_CMD_GET_SOC_STATE_OUT_OS_RECOVERY_REQUIRED_WIDTH 1
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_OFST 0
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_LBN 2
-#define MC_CMD_GET_SOC_STATE_OUT_WDT_FIRED_WIDTH 1
-/* Status fields for the SoC */
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_ATTRIBUTES_LEN 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_OFST 4
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_LBN 0
-#define MC_CMD_GET_SOC_STATE_OUT_RUN_STATE_WIDTH 8
-/* enum: Power on (set by SUC on power up) */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOT 0x0
-/* enum: Running bootloader */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_BOOTLOADER 0x1
-/* enum: Bootloader has started OS. OS is booting */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_START 0x2
-/* enum: OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_RUNNING 0x3
-/* enum: Maintenance OS is running */
-#define MC_CMD_GET_SOC_STATE_OUT_SOC_OS_MAINTENANCE 0x4
-/* Number of SoC resets since power on */
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_OFST 8
-#define MC_CMD_GET_SOC_STATE_OUT_RESET_COUNT_LEN 4
-
-
-/***********************************/
/* MC_CMD_CHECK_SCHEDULER_CREDITS
* For debugging purposes. For each source and destination node in the hardware
* schedulers, check whether the number of credits is as it should be. This
@@ -25010,76 +22095,6 @@
/***********************************/
-/* MC_CMD_TXQ_STATS
- * Query per-TXQ statistics.
- */
-#define MC_CMD_TXQ_STATS 0x1d5
-#undef MC_CMD_0x1d5_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TXQ_STATS_IN msgrequest */
-#define MC_CMD_TXQ_STATS_IN_LEN 8
-/* Instance of TXQ to retrieve statistics for */
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_OFST 0
-#define MC_CMD_TXQ_STATS_IN_INSTANCE_LEN 4
-/* Flags for the request */
-#define MC_CMD_TXQ_STATS_IN_FLAGS_OFST 4
-#define MC_CMD_TXQ_STATS_IN_FLAGS_LEN 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_OFST 4
-#define MC_CMD_TXQ_STATS_IN_CLEAR_LBN 0
-#define MC_CMD_TXQ_STATS_IN_CLEAR_WIDTH 1
-
-/* MC_CMD_TXQ_STATS_OUT msgresponse */
-#define MC_CMD_TXQ_STATS_OUT_LENMIN 0
-#define MC_CMD_TXQ_STATS_OUT_LENMAX 248
-#define MC_CMD_TXQ_STATS_OUT_LENMAX_MCDI2 1016
-#define MC_CMD_TXQ_STATS_OUT_LEN(num) (0+8*(num))
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_NUM(len) (((len)-0)/8)
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LEN 8
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_OFST 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_LBN 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_LO_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_OFST 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LEN 4
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_LBN 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_HI_WIDTH 32
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MINNUM 0
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM 31
-#define MC_CMD_TXQ_STATS_OUT_STATISTICS_MAXNUM_MCDI2 127
-#define MC_CMD_TXQ_STATS_CTPIO_MAX_FILL 0x0 /* enum */
-
-/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
- * defined in SF-120734-TC with more information in SF-122717-TC.
- */
-#define FUNCTION_PERSONALITY_LEN 4
-#define FUNCTION_PERSONALITY_ID_OFST 0
-#define FUNCTION_PERSONALITY_ID_LEN 4
-/* enum: Function has no assigned personality */
-#define FUNCTION_PERSONALITY_NULL 0x0
-/* enum: Function has an EF100-style function control window and VI windows
- * with both EF100 and vDPA doorbells.
- */
-#define FUNCTION_PERSONALITY_EF100 0x1
-/* enum: Function has virtio net device configuration registers and doorbells
- * for virtio queue pairs.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2
-/* enum: Function has virtio block device configuration registers and a
- * doorbell for a single virtqueue.
- */
-#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
-/* enum: Function is a Xilinx acceleration device - management function */
-#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
-/* enum: Function is a Xilinx acceleration device - user function */
-#define FUNCTION_PERSONALITY_ACCEL_USR 0x5
-#define FUNCTION_PERSONALITY_ID_LBN 0
-#define FUNCTION_PERSONALITY_ID_WIDTH 32
-
-
-/***********************************/
/* MC_CMD_VIRTIO_GET_FEATURES
* Get a list of the virtio features supported by the device.
*/
@@ -25162,37 +22177,6 @@
/***********************************/
-/* MC_CMD_VIRTIO_GET_CAPABILITIES
- * Get virtio capabilities supported by the device. Returns general virtio
- * capabilities and limitations of the hardware / firmware implementation
- * (hardware device as a whole), rather than that of individual configured
- * virtio devices. At present, only the absolute maximum number of queues
- * allowed on multi-queue devices is returned. Response is expected to be
- * extended as necessary in the future.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES 0x1d3
-#undef MC_CMD_0x1d3_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_IN msgrequest */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_LEN 4
-/* Type of device to get capabilities for. Matches the device id as defined by
- * the virtio spec.
- */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_IN_DEVICE_ID_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
-
-/* MC_CMD_VIRTIO_GET_CAPABILITIES_OUT msgresponse */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_LEN 4
-/* Maximum number of queues supported for a single device instance */
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_OFST 0
-#define MC_CMD_VIRTIO_GET_CAPABILITIES_OUT_MAX_QUEUES_LEN 4
-
-
-/***********************************/
/* MC_CMD_VIRTIO_INIT_QUEUE
* Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
* Fails with ENOSUP if a feature is requested that isn't supported. Fails with
@@ -25474,866 +22458,6 @@
#define PCIE_FUNCTION_INTF_LBN 32
#define PCIE_FUNCTION_INTF_WIDTH 32
-/* QUEUE_ID structuredef: Structure representing an absolute queue identifier
- * (absolute VI number + VI relative queue number). On Keystone, a VI can
- * contain multiple queues (at present, up to 2), each with separate controls
- * for direction. This structure is required to uniquely identify the absolute
- * source queue for descriptor proxy functions.
- */
-#define QUEUE_ID_LEN 4
-/* Absolute VI number */
-#define QUEUE_ID_ABS_VI_OFST 0
-#define QUEUE_ID_ABS_VI_LEN 2
-#define QUEUE_ID_ABS_VI_LBN 0
-#define QUEUE_ID_ABS_VI_WIDTH 16
-/* Relative queue number within the VI */
-#define QUEUE_ID_REL_QUEUE_LBN 16
-#define QUEUE_ID_REL_QUEUE_WIDTH 1
-#define QUEUE_ID_RESERVED_LBN 17
-#define QUEUE_ID_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CREATE
- * Descriptor proxy functions are abstract devices that forward all request
- * submitted to the host PCIe function (descriptors submitted to Virtio or
- * EF100 queues) to be handled on another function (most commonly on the
- * embedded Application Processor), via EF100 descriptor proxy, memory-to-
- * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
- * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
- * function on the host and assigns a user-defined label. The actual function
- * configuration is not persisted until the caller configures it with
- * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
- * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
-#undef MC_CMD_0x172_PRIVILEGE_CTG
-
-#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
-/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
- * {PF_ANY,VF_ANY,interface} for "any available function" Set to
- * {PF_ANY,VF_NULL,interface} for "any available PF"
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_OFST 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_INTF_LEN 4
-/* The personality to set. The meanings of the personalities are defined in
- * SF-120734-TC with more information in SF-122717-TC. At present, we only
- * support proxying for VIRTIO_BLK
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
-/* Allocated function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_INTF_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY
- * Remove an existing descriptor proxy function. Underlying function
- * personality and configuration reverts back to factory default. Function
- * configuration is committed immediately to specified store and any function
- * ownership is released.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
-#undef MC_CMD_0x173_PRIVILEGE_CTG
-
-#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
-/* Store from which to remove function configuration */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
-/* Enum values, see field(s): */
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
-
-/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
-
-/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
- * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
- * bits. See Virtio specification v1.1, Section 5.2.4 (struct
- * virtio_blk_config) for definition of remaining configuration fields
- */
-#define VIRTIO_BLK_CONFIG_LEN 68
-/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
-#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_LBN 32
-#define VIRTIO_BLK_CONFIG_FEATURES_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
-#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
-#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
-#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0
-#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
-/* The capacity of the device (expressed in 512-byte sectors) */
-#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_LO_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LEN 4
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_LBN 96
-#define VIRTIO_BLK_CONFIG_CAPACITY_HI_WIDTH 32
-#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
-#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
-/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
-#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
-/* Maximum number of segments in a request. Only valid when
- * VIRTIO_BLK_F_SEG_MAX is set.
- */
-#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
-#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
-#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
-/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
- * set.
- */
-#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
-#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
-#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
-/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_HEADS_OFST 26
-#define VIRTIO_BLK_CONFIG_HEADS_LEN 1
-#define VIRTIO_BLK_CONFIG_HEADS_LBN 208
-#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
-/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
- */
-#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27
-#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1
-#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216
-#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
-/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
-#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
-/* Block topology - number of logical blocks per physical block (log2). Only
- * valid when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
-#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
-/* Block topology - offset of first aligned logical block. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
-#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
-/* Block topology - suggested minimum I/O size in blocks. Only valid when
- * VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
-#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
-/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
- * when VIRTIO_BLK_F_TOPOLOGY is set.
- */
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
-#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
-/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
- * not carried in config data.
- */
-#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
-#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
-#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
-#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
-/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
- */
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
-#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
-/* Maximum discard sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
-/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
-#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
-/* Discard sector alignment, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_DISCARD is set.
- */
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
-#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
-/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
- * VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
-/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
- * is set.
- */
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
-#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
-/* Write zeroes request can result in deallocating one or more sectors. Only
- * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
- */
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
-#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
-/* Unused, set to zero. */
-#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
-#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
-#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
-#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
- * Set configuration for an existing descriptor proxy function. Configuration
- * data must match function personality. The actual function configuration is
- * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
-#undef MC_CMD_0x174_PRIVILEGE_CTG
-
-#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
-/* Configuration data. Format of configuration data is determined implicitly
- * from function personality referred to by HANDLE. Currently, only supported
- * format is VIRTIO_BLK_CONFIG.
- */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
-
-/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT
- * Commit function configuration to non-volatile or volatile store. Once
- * configuration is applied to hardware (which may happen immediately or on
- * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
- * delivered to callers MCDI event queue.
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
-#undef MC_CMD_0x175_PRIVILEGE_CTG
-
-#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
-/* enum: Store into non-volatile (dynamic) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
-/* enum: Store into volatile (ephemeral) config */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
-
-/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
-/* Generation count to be delivered in an event once configuration becomes live
- */
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_OPEN
- * Retrieve a handle for an existing descriptor proxy function. Returns an
- * integer handle, valid until function is deallocated, MC rebooted or power-
- * cycle. Returns ENODEV if no function with given label exists.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
-#undef MC_CMD_0x176_PRIVILEGE_CTG
-
-#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
-
-/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
-/* PCIe Function ID (as struct PCIE_FUNCTION) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_LBN 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_LBN 64
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_WIDTH 32
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_PF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_OFST 6
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_VF_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_INTF_LEN 4
-/* Function personality */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-/* Function configuration state */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
-/* enum: Function configuration is visible to the host (live) */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
-/* enum: Function configuration is pending reset */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
-/* enum: Function configuration is missing (created, but no configuration
- * committed)
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_UNCONFIGURED 0x2
-/* Generation count to be delivered in an event once the configuration becomes
- * live (if status is "pending")
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
-/* Reserved for future extension, set to zero. */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
-/* Configuration data corresponding to function personality. Currently, only
- * supported format is VIRTIO_BLK_CONFIG. Not valid if status is UNCONFIGURED.
- */
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
-#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE
- * Releases a handle for an open descriptor proxy function. If proxying was
- * enabled on the device, the caller is expected to gracefully stop it using
- * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
- * active device without disabling proxying will result in forced close, which
- * will put the device into a failed state and signal the host driver of the
- * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
- */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
-#undef MC_CMD_0x1a1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
-/* Handle to the descriptor proxy function */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
-
-/* DESC_PROXY_FUNC_MAP structuredef */
-#define DESC_PROXY_FUNC_MAP_LEN 52
-/* PCIe function ID (as struct PCIE_FUNCTION) */
-#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_LO_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_HI_WIDTH 32
-#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_OFST 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_LBN 0
-#define DESC_PROXY_FUNC_MAP_FUNC_PF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_OFST 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LEN 2
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_LBN 16
-#define DESC_PROXY_FUNC_MAP_FUNC_VF_WIDTH 16
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_OFST 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LEN 4
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_LBN 32
-#define DESC_PROXY_FUNC_MAP_FUNC_INTF_WIDTH 32
-/* Function personality */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
-/* Enum values, see field(s): */
-/* FUNCTION_PERSONALITY/ID */
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
-#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
-/* User-defined label (zero-terminated ASCII string) to uniquely identify the
- * function
- */
-#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12
-#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40
-#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96
-#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENUM
- * Enumerate existing descriptor proxy functions
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
-#undef MC_CMD_0x177_PRIVILEGE_CTG
-
-#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
-/* Starting index, set to 0 on first request. See
- * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
-/* Function map, as array of DESC_PROXY_FUNC_MAP */
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
-#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE
- * Enable descriptor proxying for function into target event queue. Returns VI
- * allocation info for the proxy source function, so that the caller can map
- * absolute VI IDs from descriptor proxy events back to the originating
- * function. This is a legacy function that only supports single queue proxy
- * devices. It is also limited in that it can only be called after host driver
- * attach (once VI allocation is known) and will return MC_CMD_ERR_ENOTCONN
- * otherwise. For new code, see MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE which
- * supports multi-queue devices and has no dependency on host driver attach.
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
-#undef MC_CMD_0x178_PRIVILEGE_CTG
-
-#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
-/* The number of VIs allocated on the function */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
-/* The base absolute VI number allocated to the function. */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE
- * Enable descriptor proxying for a source queue on a host function into target
- * event queue. Source queue number is a relative virtqueue number on the
- * source function (0 to max_virtqueues-1). For a multi-queue device, the
- * caller must enable all source queues individually. To retrieve absolute VI
- * information for the source function (so that VI IDs from descriptor proxy
- * events can be mapped back to source function / queue) see
- * MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE 0x1d0
-#undef MC_CMD_0x1d0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_LEN 12
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to enable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-/* Descriptor proxy sink queue (caller function relative). Must be extended
- * width event queue
- */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_OFST 8
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_IN_TARGET_EVQ_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_ENABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE
- * Disable descriptor proxying for function. For multi-queue functions,
- * disables all queues.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
-#undef MC_CMD_0x179_PRIVILEGE_CTG
-
-#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE
- * Disable descriptor proxying for a specific source queue on a function.
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE 0x1d1
-#undef MC_CMD_0x1d1_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN msgrequest */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_LEN 8
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_HANDLE_LEN 4
-/* Source relative queue number to disable proxying on */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_OFST 4
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_IN_SOURCE_QUEUE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_DISABLE_QUEUE_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_DESC_PROXY_GET_VI_INFO
- * Returns absolute VI allocation information for the descriptor proxy source
- * function referenced by HANDLE, so that the caller can map absolute VI IDs
- * from descriptor proxy events back to the originating function and queue. The
- * call is only valid after the host driver for the source function has
- * attached (after receiving a driver attach event for the descriptor proxy
- * function) and will fail with ENOTCONN otherwise.
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO 0x1d2
-#undef MC_CMD_0x1d2_PRIVILEGE_CTG
-
-#define MC_CMD_0x1d2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_DESC_PROXY_GET_VI_INFO_IN msgrequest */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_LEN 4
-/* Handle to descriptor proxy function (as returned by
- * MC_CMD_DESC_PROXY_FUNC_OPEN)
- */
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_OFST 0
-#define MC_CMD_DESC_PROXY_GET_VI_INFO_IN_HANDLE_LEN 4
-
-/* MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT msgresponse */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMIN 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX 252
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_LEN(num) (0+4*(num))
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_NUM(len) (((len)-0)/4)
-/* VI information (VI ID + VI relative queue number) for each of the source
- * queues (in order from 0 to max_virtqueues-1), as array of QUEUE_ID
- * structures.
- */
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_LEN 4
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MINNUM 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM 63
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_MAXNUM_MCDI2 255
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_OFST 0
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_ABS_VI_LEN 2
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_LBN 16
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_REL_QUEUE_WIDTH 1
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_LBN 17
-#define MC_CMD_DESC_PROXY_FUNC_GET_VI_INFO_OUT_VI_MAP_RESERVED_WIDTH 15
-
-
-/***********************************/
-/* MC_CMD_GET_ADDR_SPC_ID
- * Get Address space identifier for use in mem2mem descriptors for a given
- * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
- * descriptors.
- */
-#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
-#undef MC_CMD_0x1a0_PRIVILEGE_CTG
-
-#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
-
-/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
-/* Resource type to get ADDR_SPC_ID for */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
-/* enum: Address space ID for host/AP memory DMA over the same interface this
- * MCDI was called on
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value specified by PASID
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
-/* enum: Address space ID for host/AP memory DMA via PCI interface and function
- * specified by FUNC with PASID value of relative VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
-/* enum: Address space ID for host/AP memory DMA via PCI interface, function
- * and PASID value of absolute VI specified by VI
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
-/* enum: Address space ID for host memory DMA via PCI interface and function of
- * descriptor proxy function specified by HANDLE
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
-/* enum: Address space ID for DMA to/from MC memory */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
-/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
-/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
- * PCI_FUNC_PASID or REL_VI.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_LBN 64
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_PF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_OFST 6
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_VF_LEN 2
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_OFST 8
-#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_INTF_LEN 4
-/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
-/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
-#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
-/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
-
-/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
-/* Address Space ID for the requested target. Only the lower 36 bits are valid
- * in the current SmartNIC implementation.
- */
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_LBN 0
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_WIDTH 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LEN 4
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_LBN 32
-#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_WIDTH 32
-
/***********************************/
/* MC_CMD_GET_CLIENT_HANDLE
@@ -26359,7 +22483,8 @@
* INTF=CALLER, PF=PF_NULL, VF=... to refer to a VF child of the calling PF or
* a sibling VF of the calling VF. - INTF=CALLER, PF=..., VF=VF_NULL to refer
* to a PF on the calling interface - INTF=CALLER, PF=..., VF=... to refer to a
- * VF on the calling interface - INTF=..., PF=..., VF=VF_NULL to refer to a PF
+ * VF on the calling interface - INTF=..., PF=PF_NULL, VF=VF_NULL to refer to
+ * the named interface itself - INTF=..., PF=..., VF=VF_NULL to refer to a PF
* on a named interface - INTF=..., PF=..., VF=... to refer to a VF on a named
* interface where ... refers to a small integer for the VF/PF fields, and to
* values from the PCIE_INTERFACE enum for for the INTF field. It's only
@@ -26380,6 +22505,7 @@
* backwards compatibility only, callers should use PCIE_INTERFACE_CALLER.
*/
#define MC_CMD_GET_CLIENT_HANDLE_IN_PCIE_FUNCTION_INTF_NULL 0xffffffff
+/* See structuredef: PCIE_FUNCTION */
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_OFST 4
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_PF_LEN 2
#define MC_CMD_GET_CLIENT_HANDLE_IN_FUNC_VF_OFST 6
@@ -27350,7 +23476,7 @@
/* MAE_MPORT_SELECTOR structuredef: MPORTS are identified by an opaque unsigned
* integer value (mport_id) that is guaranteed to be representable within
* 32-bits or within any NIC interface field that needs store the value
- * (whichever is narrowers). This selector structure provides a stable way to
+ * (whichever is narrower). This selector structure provides a stable way to
* refer to m-ports.
*/
#define MAE_MPORT_SELECTOR_LEN 4
@@ -27425,10 +23551,22 @@
#define MAE_MPORT_SELECTOR_FLAT_WIDTH 32
/* MAE_LINK_ENDPOINT_SELECTOR structuredef: Structure that identifies a real or
- * virtual network port by MAE port and link end
+ * virtual network port by MAE port and link end. Intended to be used by
+ * network port MCDI commands. Setting FLAT to MAE_LINK_ENDPOINT_COMPAT is
+ * equivalent to using the previous version of the command. Not all possible
+ * combinations of MPORT_END and MPORT_SELECTOR in MAE_LINK_ENDPOINT_SELECTOR
+ * will work in all circumstances. 1. Some will always work (e.g. a VF can
+ * always address its logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC),
+ * 2. Some are not meaningful and will always fail with EINVAL (e.g. attempting
+ * to address the VNIC end of a link to a physical port), 3. Some are
+ * meaningful but require the MCDI client to have the required permission and
+ * fail with EPERM otherwise (e.g. trying to set the MAC on a VF the caller
+ * cannot administer), and 4. Some could be implementation-specific and fail
+ * with ENOTSUP if not available (no examples exist right now). See
+ * SF-123581-TC section 4.3 for more details.
*/
#define MAE_LINK_ENDPOINT_SELECTOR_LEN 8
-/* The MAE MPORT of interest */
+/* Identifier for the MAE MPORT of interest */
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_OFST 0
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LEN 4
#define MAE_LINK_ENDPOINT_SELECTOR_MPORT_SELECTOR_LBN 0
@@ -27829,6 +23967,8 @@
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM_MCDI2 253
/* enum: A counter ID that is guaranteed never to represent a real counter */
#define MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL 0xffffffff
+/* Other enum values, see field(s): */
+/* MAE_COUNTER_ID */
/***********************************/
@@ -28266,6 +24406,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2
@@ -28291,19 +24449,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28347,6 +24509,24 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1
/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2
@@ -28372,19 +24552,23 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4
/* Allows an action set to trigger several counter updates. Set to
- * COUNTER_LIST_ID_NULL to request no counter action.
+ * MAE_COUNTER_ID_NULL to request no counter action.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
/* If a driver only wished to update one counter within this action set, then
* it can supply a COUNTER_ID instead of allocating a single-element counter
* list. The ID must have been allocated with COUNTER_TYPE=AR. This field
- * should be set to COUNTER_ID_NULL if this behaviour is not required. It is
- * not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
* COUNTER_ID.
*/
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4
/* Set to MAC_ID_NULL to request no source MAC replacement. */
@@ -28437,6 +24621,172 @@
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6
#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if
+ * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in
+ * MC_CMD_GET_CAPABILITIES_V10_OUT.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1
+/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2
+/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TCI value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2
+/* If VLAN_PUSH == 2, inner TPID value to be inserted. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2
+/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4
+/* Set to ENCAP_HEADER_ID_NULL to request no encap action */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4
+/* An m-port selector identifying the m-port that the modified packet should be
+ * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the
+ * packet.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4
+/* Allows an action set to trigger several counter updates. Set to
+ * MAE_COUNTER_ID_NULL to request no counter action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+/* If a driver only wished to update one counter within this action set, then
+ * it can supply a COUNTER_ID instead of allocating a single-element counter
+ * list. The ID must have been allocated with COUNTER_TYPE=AR. This field
+ * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It
+ * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and
+ * COUNTER_ID.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4
+/* Enum values, see field(s): */
+/* MAE_COUNTER_ID */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4
+/* Set to MAC_ID_NULL to request no source MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4
+/* Set to MAC_ID_NULL to request no destination MAC replacement. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4
+/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4
+/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits
+ * within IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6
+/* Actions for modifying the Explicit Congestion Notification (ECN) bits within
+ * IPv4 and IPv6 headers.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1
+/* Actions for overwriting CH_ROUTE subfields. */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1
+/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot
+ * be used in conjunction with DO_SET_SRC_MPORT action.
+ */
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52
+#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1
+
/* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */
#define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4
/* The MSB of the AS_ID is guaranteed to be clear if the ID is not
@@ -28680,58 +25030,6 @@
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32
#define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32
-
-/***********************************/
-/* MC_CMD_MAE_OUTER_RULE_UPDATE
- * Atomically change the response of an Outer Rule.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d
-#undef MC_CMD_0x17d_PRIVILEGE_CTG
-
-#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16
-/* ID of outer rule to update */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4
-/* Packets matching the rule will be parsed with this encapsulation. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4
-/* Enum values, see field(s): */
-/* MAE_MCDI_ENCAP_TYPE */
-/* This field controls the actions that are performed when a rule is hit. */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2
-/* Enum values, see field(s): */
-/* MAE_CT_VNI_MODE */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16
-/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT
- * flag is set. The ID must have been allocated with COUNTER_TYPE=OR.
- */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4
-
-/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */
-#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0
-
/* MAE_ACTION_RULE_RESPONSE structuredef */
#define MAE_ACTION_RULE_RESPONSE_LEN 16
#define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0
@@ -29122,142 +25420,6 @@
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352
#define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32
-/* MAE_MPORT_DESC_V2 structuredef */
-#define MAE_MPORT_DESC_V2_LEN 56
-#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0
-#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32
-/* Reserved for future purposes, contains information independent of caller */
-#define MAE_MPORT_DESC_V2_FLAGS_OFST 4
-#define MAE_MPORT_DESC_V2_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_FLAGS_LBN 32
-#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0
-#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1
-#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1
-#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8
-#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2
-#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3
-#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64
-#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32
-/* Not the ideal name; it's really the type of thing connected to the m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4
-/* enum: Connected to a MAC... */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0
-/* enum: Adds metadata and delivers to another m-port */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1
-/* enum: Connected to a VNIC. */
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96
-#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32
-/* 128-bit value available to drivers for m-port identification. */
-#define MAE_MPORT_DESC_V2_UUID_OFST 16
-#define MAE_MPORT_DESC_V2_UUID_LEN 16
-#define MAE_MPORT_DESC_V2_UUID_LBN 128
-#define MAE_MPORT_DESC_V2_UUID_WIDTH 128
-/* Big wadge of space reserved for other common properties */
-#define MAE_MPORT_DESC_V2_RESERVED_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LEN 8
-#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4
-#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288
-#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32
-#define MAE_MPORT_DESC_V2_RESERVED_LBN 256
-#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64
-/* Logical port index. Only valid when type NET Port. */
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320
-#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32
-/* The m-port delivered to */
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320
-#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32
-/* The type of thing that owns the VNIC */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32
-/* The PCIe interface on which the function lives. CJK: We need an enumeration
- * of interfaces that we extend as new interface (types) appear. This belongs
- * elsewhere and should be referenced from here
- */
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2
-/* enum: Indicates that the function is a PF */
-#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400
-#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16
-/* Reserved. Should be ignored for now. */
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352
-#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32
-/* A client handle for the VNIC's owner. Only valid for type VNIC. */
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416
-#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32
-
-
-/***********************************/
-/* MC_CMD_MAE_MPORT_ENUMERATE
- * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command
- * will be removed at some future point.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c
-#undef MC_CMD_0x17c_PRIVILEGE_CTG
-
-#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */
-#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0
-
-/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num))
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1)
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4
-/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may
- * grow in future version of this command. Drivers should use a stride of
- * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present.
- */
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244
-#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012
-
/***********************************/
/* MC_CMD_MAE_MPORT_READ_JOURNAL
@@ -29570,73 +25732,6 @@
/***********************************/
-/* MC_CMD_TABLE_UPDATE
- * Update an existing entry in a table with a new response value. May return
- * EINVAL for unknown table ID or other bad request parameters, ENOENT if the
- * entry does not already exist, or EPERM if the operation is not permitted. In
- * case of an error, the additional MCDI error argument field returns the raw
- * error code from the underlying CAM driver.
- */
-#define MC_CMD_TABLE_UPDATE 0x1ce
-#undef MC_CMD_0x1ce_PRIVILEGE_CTG
-
-#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_TABLE_UPDATE_IN msgrequest */
-#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252
-#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020
-#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num))
-#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4)
-/* Table identifier. */
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0
-#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4
-/* Enum values, see field(s): */
-/* TABLE_ID */
-/* Width in bits of supplied key data (must match table properties). */
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4
-#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2
-/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM
- * when allocated MASK_ID is used instead).
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2
-/* Width in bits of supplied response data (for INSERT and UPDATE operations
- * this must match the table properties; for DELETE operations, no response
- * data is required and this must be 0).
- */
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2
-/* Mask ID for STCAM table - used instead of mask data if the table descriptor
- * reports ALLOC_MASKS==1. Otherwise set to 0.
- */
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6
-#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2
-/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8
-#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2
-/* (32-bit alignment padding - set to 0) */
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10
-#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2
-/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0)
- * data values. Each of these items is logically treated as a single wide N-bit
- * value, in which the individual fields have been placed within that value per
- * the LBN and WIDTH information from the table field descriptors. The wide
- * N-bit value is padded with 0 bits at the MSB end if necessary to make a
- * multiple of 32 bits. The value is then packed into this command as a
- * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc.
- */
-#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12
-#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60
-#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252
-
-/* MC_CMD_TABLE_UPDATE_OUT msgresponse */
-#define MC_CMD_TABLE_UPDATE_OUT_LEN 0
-
-
-/***********************************/
/* MC_CMD_TABLE_DELETE
* Delete an existing entry in a table. May return EINVAL for unknown table ID
* or other bad request parameters, ENOENT if the entry does not exist, or
@@ -29702,5 +25797,124 @@
/* MC_CMD_TABLE_DELETE_OUT msgresponse */
#define MC_CMD_TABLE_DELETE_OUT_LEN 0
+/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full-
+ * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top
+ * bits of the queue handle to specify the queue type in all MCDI calls which
+ * refer to VIs/queues. These bits should be masked off when indexing into a
+ * queue in the BAR.
+ */
+#define MC_CMD_QUEUE_HANDLE_LEN 4
+/* Combined queue number and type. This is the ID returned by and passed into
+ * MCDI calls that use queues.
+ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8
+/* enum: Indicates that the queue instance is a full-featured VI */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0
+/* enum: Indicates that the queue instance is a LL TXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1
+/* enum: Indicates that the queue instance is a LL RXQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2
+/* enum: Indicates that the queue instance is a LL EVQ */
+#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0
+#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_ALLOC_LL_QUEUES
+ * Allocate low latency (X3-style) queues for current PCI function. Can be
+ * called more than once if desired to allocate more queues.
+ */
+#define MC_CMD_ALLOC_LL_QUEUES 0x1dd
+#undef MC_CMD_0x1dd_PRIVILEGE_CTG
+
+#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24
+/* The minimum number of TXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4
+/* The maximum number of TXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4
+/* The minimum number of RXQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4
+/* The maximum number of RXQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4
+/* The minimum number of EVQs that is acceptable */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4
+/* The maximum number of EVQs that would be useful */
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20
+#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num))
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4)
+/* The number of TXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4
+/* The number of RXQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4
+/* The number of EVQs allocated in this request */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4
+/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not
+ * necessarily contiguous. TXQs are first in the list, followed by RXQs then
+ * EVQs. The type of each queue is indicated by the top bits (see the
+ * QUEUE_TYPE enum)
+ */
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60
+#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252
+
+
+/***********************************/
+/* MC_CMD_FREE_LL_QUEUES
+ * Free low latency (X3-style) queues for current PCI function.
+ */
+#define MC_CMD_FREE_LL_QUEUES 0x1de
+#undef MC_CMD_0x1de_PRIVILEGE_CTG
+
+#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252
+#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020
+#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num))
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4)
+/* The number of queues to free. */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4
+/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have
+ * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each
+ * queue should be indicated by the top bits.
+ */
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62
+#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254
+
+/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */
+#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index ad4694fa3dda..7b236d291d8c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -17,58 +17,6 @@
#include "selftest.h"
#include "mcdi_port_common.h"
-static int efx_mcdi_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
-}
-
-static int efx_mcdi_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
-
- if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
- MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
@@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = efx_mcdi_mdio_read;
- efx->mdio.mdio_write = efx_mcdi_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
+ /* Fill out loopback modes and initial link state */
rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 76ea26722ca4..dae684194ac8 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->phy_data = phy_data;
efx->phy_type = phy_data->type;
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
mcdi_to_ethtool_linkset(phy_data->media, caps,
@@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks
cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
cmd->base.phy_address = phy_cfg->port;
cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
cmd->link_modes.supported);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f70a7b7d6345..5c0f306fb019 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -15,7 +15,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/timer.h>
-#include <linux/mdio.h>
+#include <linux/mii.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/device.h>
@@ -956,8 +956,6 @@ struct efx_mae;
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_data: PHY private data (including PHY-specific stats)
- * @mdio: PHY MDIO interface
- * @mdio_bus: PHY MDIO bus ID (only used by Siena)
* @phy_mode: PHY operating mode. Serialised by @mac_lock.
* @link_advertising: Autonegotiation advertising flags
* @fec_config: Forward Error Correction configuration flags. For bit positions
@@ -1006,6 +1004,7 @@ struct efx_mae;
* @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
+ * @reflash_mutex: Mutex for serialising firmware reflash operations.
* @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
@@ -1131,8 +1130,6 @@ struct efx_nic {
unsigned int phy_type;
void *phy_data;
- struct mdio_if_info mdio;
- unsigned int mdio_bus;
enum efx_phy_mode phy_mode;
__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
@@ -1191,6 +1188,7 @@ struct efx_nic {
struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
+ struct mutex reflash_mutex;
/* The following fields may be written more often */
@@ -1383,6 +1381,8 @@ struct efx_udp_tunnel {
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @option_descriptors: NIC supports TX option descriptors
+ * @flash_auto_partition: firmware flash uses AUTO partition, driver does
+ * not need to perform image parsing
* @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1559,6 +1559,7 @@ struct efx_nic_type {
bool can_rx_scatter;
bool always_rx_scatter;
bool option_descriptors;
+ bool flash_auto_partition;
unsigned int min_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 0d93164988fc..fa94aa3cd5fe 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
if (fa->ct.action) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule",
fa->ct.action);
return -EOPNOTSUPP;
}
@@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
act->zone = ct_zone;
break;
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule",
fa->id);
return -EOPNOTSUPP;
}
@@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
type = efx_tc_indr_netdev_type(net_dev);
if (type == EFX_ENCAP_TYPE_NONE) {
- NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index f539813878f5..2e1106097965 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -43,7 +43,6 @@
#include <linux/smsc911x.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index dc99821c6226..ee890de69ffe 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
struct netsec_desc *desc = &dring->desc[idx];
struct page *page = virt_to_page(desc->addr);
- u32 xdp_result = NETSEC_XDP_PASS;
+ u32 metasize, xdp_result = NETSEC_XDP_PASS;
struct sk_buff *skb = NULL;
u16 pkt_len, desc_len;
dma_addr_t dma_handle;
@@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
prefetch(desc->addr);
xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
- pkt_len, false);
+ pkt_len, true);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
@@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
+ metasize = xdp.data - xdp.data_meta;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4cc85a36a1ab..3c820ef56775 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -181,6 +181,17 @@ config DWMAC_SOCFPGA
for the stmmac device driver. This driver is used for
arria5 and cyclone5 FPGA SoCs.
+config DWMAC_SOPHGO
+ tristate "Sophgo dwmac support"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ default m if ARCH_SOPHGO
+ help
+ Support for ethernet controllers on Sophgo RISC-V SoCs
+
+ This selects the Sophgo SoC specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ ethernet controllers on various Sophgo SoCs.
+
config DWMAC_STARFIVE
tristate "StarFive dwmac support"
depends on OF && (ARCH_STARFIVE || COMPILE_TEST)
@@ -307,6 +318,7 @@ config DWMAC_INTEL
default X86
depends on X86 && STMMAC_ETH && PCI
depends on COMMON_CLK
+ depends on ACPI
help
This selects the Intel platform specific bus support for the
stmmac driver. This driver is used for Intel Quark/EHL/TGL.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b26f0e79c2b3..594883fb4164 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e25db747a81a..412b07e77945 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -101,8 +101,8 @@ struct stmmac_rxq_stats {
/* Updates on each CPU protected by not allowing nested irqs. */
struct stmmac_pcpu_stats {
struct u64_stats_sync syncp;
- u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
- u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES];
};
/* Extra statistic and debug information exposed by ethtool */
@@ -530,6 +530,20 @@ struct dma_features {
#define STMMAC_DEFAULT_TWT_LS 0x1E
#define STMMAC_ET_MAX 0xFFFFF
+/* Common LPI register bits */
+#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */
+#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */
+#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */
+#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index ef99ef3f1ab4..37fe7c288878 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -59,10 +59,11 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
}
-static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+static struct anarion_gmac *
+anarion_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct anarion_gmac *gmac;
- phy_interface_t phy_mode;
void __iomem *ctl_block;
int err;
@@ -79,11 +80,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
gmac->ctl_block = ctl_block;
- err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
- if (err)
- return ERR_PTR(err);
-
- switch (phy_mode) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
fallthrough;
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -93,7 +90,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
break;
default:
dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
- phy_mode);
+ plat_dat->phy_interface);
return ERR_PTR(-ENOTSUPP);
}
@@ -111,14 +108,14 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- gmac = anarion_config_dt(pdev);
- if (IS_ERR(gmac))
- return PTR_ERR(gmac);
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
+ gmac = anarion_config_dt(pdev, plat_dat);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
plat_dat->init = anarion_gmac_init;
plat_dat->exit = anarion_gmac_exit;
anarion_gmac_init(pdev, gmac);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index bd4eb187f8c6..cd431f84f34f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -29,14 +29,21 @@ struct tegra_eqos {
void __iomem *regs;
struct reset_control *rst;
- struct clk *clk_master;
struct clk *clk_slave;
- struct clk *clk_tx;
- struct clk *clk_rx;
struct gpio_desc *reset;
};
+static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat,
+ const char *name)
+{
+ for (int i = 0; i < plat_dat->num_clks; i++)
+ if (strcmp(plat_dat->clks[i].id, name) == 0)
+ return plat_dat->clks[i].clk;
+
+ return NULL;
+}
+
static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat)
{
@@ -46,7 +53,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
u32 a_index = 0;
if (!plat_dat->axi) {
- plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+ plat_dat->axi = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_axi),
+ GFP_KERNEL);
if (!plat_dat->axi)
return -ENOMEM;
@@ -123,49 +132,9 @@ static int dwc_qos_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *stmmac_res)
{
- int err;
-
- plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(plat_dat->stmmac_clk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return PTR_ERR(plat_dat->stmmac_clk);
- }
-
- err = clk_prepare_enable(plat_dat->stmmac_clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
- err);
- return err;
- }
-
- plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(plat_dat->pclk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- err = PTR_ERR(plat_dat->pclk);
- goto disable;
- }
-
- err = clk_prepare_enable(plat_dat->pclk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
- err);
- goto disable;
- }
+ plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk");
return 0;
-
-disable:
- clk_disable_unprepare(plat_dat->stmmac_clk);
- return err;
-}
-
-static void dwc_qos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->plat->pclk);
- clk_disable_unprepare(priv->plat->stmmac_clk);
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -178,11 +147,10 @@ static void dwc_qos_remove(struct platform_device *pdev)
#define AUTO_CAL_STATUS 0x880c
#define AUTO_CAL_STATUS_ACTIVE BIT(31)
-static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode)
{
struct tegra_eqos *eqos = priv;
bool needs_calibration = false;
- long rate = 125000000;
u32 value;
int err;
@@ -193,11 +161,10 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
fallthrough;
case SPEED_10:
- rate = rgmii_clock(speed);
break;
default:
- dev_err(eqos->dev, "invalid speed %u\n", speed);
+ dev_err(eqos->dev, "invalid speed %d\n", speed);
break;
}
@@ -240,10 +207,6 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo
value &= ~AUTO_CAL_CONFIG_ENABLE;
writel(value, eqos->regs + AUTO_CAL_CONFIG);
}
-
- err = clk_set_rate(eqos->clk_tx, rate);
- if (err < 0)
- dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
}
static int tegra_eqos_init(struct platform_device *pdev, void *priv)
@@ -261,7 +224,7 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv)
}
static int tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
@@ -274,63 +237,24 @@ static int tegra_eqos_probe(struct platform_device *pdev,
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
+ eqos->clk_slave = plat_dat->stmmac_clk;
if (!is_of_node(dev->fwnode))
goto bypass_clk_reset_gpio;
- eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
- if (IS_ERR(eqos->clk_master)) {
- err = PTR_ERR(eqos->clk_master);
- goto error;
- }
-
- err = clk_prepare_enable(eqos->clk_master);
- if (err < 0)
- goto error;
-
- eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
- if (IS_ERR(eqos->clk_slave)) {
- err = PTR_ERR(eqos->clk_slave);
- goto disable_master;
- }
-
- data->stmmac_clk = eqos->clk_slave;
-
- err = clk_prepare_enable(eqos->clk_slave);
- if (err < 0)
- goto disable_master;
-
- eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
- if (IS_ERR(eqos->clk_rx)) {
- err = PTR_ERR(eqos->clk_rx);
- goto disable_slave;
- }
-
- err = clk_prepare_enable(eqos->clk_rx);
- if (err < 0)
- goto disable_slave;
-
- eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
- if (IS_ERR(eqos->clk_tx)) {
- err = PTR_ERR(eqos->clk_tx);
- goto disable_rx;
- }
-
- err = clk_prepare_enable(eqos->clk_tx);
- if (err < 0)
- goto disable_rx;
+ plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx");
eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
if (IS_ERR(eqos->reset)) {
err = PTR_ERR(eqos->reset);
- goto disable_tx;
+ return err;
}
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
/* MDIO bus was already reset just above */
- data->mdio_bus_data->needs_reset = false;
+ plat_dat->mdio_bus_data->needs_reset = false;
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
@@ -351,10 +275,11 @@ static int tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
bypass_clk_reset_gpio:
- data->fix_mac_speed = tegra_eqos_fix_speed;
- data->init = tegra_eqos_init;
- data->bsp_priv = eqos;
- data->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->fix_mac_speed = tegra_eqos_fix_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->init = tegra_eqos_init;
+ plat_dat->bsp_priv = eqos;
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
err = tegra_eqos_init(pdev, eqos);
if (err < 0)
@@ -365,15 +290,7 @@ reset:
reset_control_assert(eqos->rst);
reset_phy:
gpiod_set_value(eqos->reset, 1);
-disable_tx:
- clk_disable_unprepare(eqos->clk_tx);
-disable_rx:
- clk_disable_unprepare(eqos->clk_rx);
-disable_slave:
- clk_disable_unprepare(eqos->clk_slave);
-disable_master:
- clk_disable_unprepare(eqos->clk_master);
-error:
+
return err;
}
@@ -383,27 +300,29 @@ static void tegra_eqos_remove(struct platform_device *pdev)
reset_control_assert(eqos->rst);
gpiod_set_value(eqos->reset, 1);
- clk_disable_unprepare(eqos->clk_tx);
- clk_disable_unprepare(eqos->clk_rx);
- clk_disable_unprepare(eqos->clk_slave);
- clk_disable_unprepare(eqos->clk_master);
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
+ struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
void (*remove)(struct platform_device *pdev);
+ const char *stmmac_clk_name;
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
.probe = dwc_qos_probe,
- .remove = dwc_qos_remove,
+ .stmmac_clk_name = "apb_pclk",
};
static const struct dwc_eth_dwmac_data tegra_eqos_data = {
.probe = tegra_eqos_probe,
.remove = tegra_eqos_remove,
+ .stmmac_clk_name = "slave_bus",
+};
+
+static const struct dwc_eth_dwmac_data fsd_eqos_data = {
+ .stmmac_clk_name = "slave_bus",
};
static int dwc_eth_dwmac_probe(struct platform_device *pdev)
@@ -434,9 +353,23 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = data->probe(pdev, plat_dat, &stmmac_res);
+ ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ plat_dat->num_clks = ret;
+
+ ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
+
+ plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat,
+ data->stmmac_clk_name);
+
+ if (data->probe)
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -451,7 +384,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
return ret;
remove:
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
return ret;
}
@@ -459,15 +393,21 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
- data->remove(pdev);
+ if (data->remove)
+ data->remove(pdev);
+
+ if (plat_dat)
+ clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
{ .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
{ .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data },
{ }
};
MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 20d3a202bb8d..5d279fa54b3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -51,7 +51,7 @@ struct imx_dwmac_ops {
int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
struct imx_priv_data {
@@ -192,7 +192,20 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
/* nothing to do now */
}
-static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ struct imx_priv_data *dwmac = bsp_priv;
+
+ interface = dwmac->plat_dat->mac_interface;
+ if (interface == PHY_INTERFACE_MODE_RMII ||
+ interface == PHY_INTERFACE_MODE_MII)
+ return 0;
+
+ return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed);
+}
+
+static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct plat_stmmacenet_data *plat_dat;
struct imx_priv_data *dwmac = priv;
@@ -208,7 +221,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
rate = rgmii_clock(speed);
if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
return;
}
@@ -217,7 +230,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
}
-static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
{
struct imx_priv_data *dwmac = priv;
unsigned int iface;
@@ -358,7 +371,6 @@ static int imx_dwmac_probe(struct platform_device *pdev)
plat_dat->init = imx_dwmac_init;
plat_dat->exit = imx_dwmac_exit;
plat_dat->clks_config = imx_dwmac_clks_config;
- plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
plat_dat->bsp_priv = dwmac;
dwmac->plat_dat = plat_dat;
dwmac->base_addr = stmmac_res.addr;
@@ -371,8 +383,13 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dwmac_init;
- if (dwmac->ops->fix_mac_speed)
+ if (dwmac->ops->fix_mac_speed) {
plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed;
+ } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) {
+ plat_dat->clk_tx_i = dwmac->clk_tx;
+ plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate;
+ }
+
dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index ddee6154d40b..599def7b3a64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -22,31 +22,12 @@ struct intel_dwmac {
};
struct intel_dwmac_data {
- void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode);
unsigned long ptp_ref_clk_rate;
unsigned long tx_clk_rate;
bool tx_clk_en;
};
-static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct intel_dwmac *dwmac = priv;
- long rate;
- int ret;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "Invalid speed\n");
- return;
- }
-
- ret = clk_set_rate(dwmac->tx_clk, rate);
- if (ret)
- dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
-}
-
static const struct intel_dwmac_data kmb_data = {
- .fix_mac_speed = kmb_eth_fix_mac_speed,
.ptp_ref_clk_rate = 200000000,
.tx_clk_rate = 125000000,
.tx_clk_en = true,
@@ -89,8 +70,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
* platform_match().
*/
dwmac->data = device_get_match_data(&pdev->dev);
- if (dwmac->data->fix_mac_speed)
- plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
@@ -132,6 +111,9 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
}
}
+ plat_dat->clk_tx_i = dwmac->tx_clk;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+
plat_dat->bsp_priv = dwmac;
plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 48acba5eb178..c8bb9265bbb4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -5,15 +5,30 @@
#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/intel_pmc_ipc.h>
#include "dwmac-intel.h"
#include "dwmac4.h"
#include "stmmac.h"
#include "stmmac_ptp.h"
+struct pmc_serdes_regs {
+ u8 index;
+ u32 val;
+};
+
+struct pmc_serdes_reg_info {
+ const struct pmc_serdes_regs *regs;
+ u8 num_regs;
+};
+
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
bool is_pse;
+ const int *tsn_lane_regs;
+ int max_tsn_lane_regs;
+ struct pmc_serdes_reg_info pid_1g;
+ struct pmc_serdes_reg_info pid_2p5g;
};
/* This struct is used to associate PCI Function of MAC controller on a board,
@@ -35,6 +50,45 @@ struct stmmac_pci_info {
int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
};
+static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = {
+ { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G },
+ {}
+};
+
+static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = {
+ { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G },
+ { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G },
+ {}
+};
+
+static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11};
+static const int adln_tsn_lane_regs[] = {6};
+
static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
const struct dmi_system_id *dmi_list)
{
@@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
data &= ~SERDES_RATE_MASK;
data &= ~SERDES_PCLK_MASK;
- if (priv->plat->max_speed == 2500)
+ if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX)
data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
else
@@ -415,6 +469,95 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
}
}
+static int intel_tsn_lane_is_available(struct net_device *ndev,
+ struct intel_priv_data *intel_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+ int ret = 0, i, j;
+ const int max_fia_regs = 5;
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_READ;
+
+ for (i = 0; i < max_fia_regs; i++) {
+ tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0) {
+ netdev_info(priv->dev, "Failed to read from PMC.\n");
+ return ret;
+ }
+
+ for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++)
+ if ((rbuf.buf[0] >>
+ (4 * (intel_priv->tsn_lane_regs[j] % 8)) &
+ B_PCH_FIA_PCR_L0O) == 0xB)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < max_regs; i++) {
+ struct pmc_ipc_cmd tmp = {};
+ struct pmc_ipc_rbuf rbuf = {};
+
+ tmp.cmd = IPC_SOC_REGISTER_ACCESS;
+ tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE;
+ tmp.wbuf[0] = (u32)regs[i].index;
+ tmp.wbuf[1] = regs[i].val;
+
+ ret = intel_pmc_ipc(&tmp, &rbuf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_mac_finish(struct net_device *ndev,
+ void *intel_data,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct pmc_serdes_regs *regs;
+ int max_regs = 0;
+ int ret = 0;
+
+ ret = intel_tsn_lane_is_available(ndev, intel_priv);
+ if (ret < 0) {
+ netdev_info(priv->dev, "No TSN lane available to set the registers.\n");
+ return ret;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX) {
+ regs = intel_priv->pid_2p5g.regs;
+ max_regs = intel_priv->pid_2p5g.num_regs;
+ } else {
+ regs = intel_priv->pid_1g.regs;
+ max_regs = intel_priv->pid_1g.num_regs;
+ }
+
+ ret = intel_set_reg_access(regs, max_regs);
+ if (ret < 0)
+ return ret;
+
+ priv->plat->phy_interface = interface;
+
+ intel_serdes_powerdown(ndev, intel_priv);
+ intel_serdes_powerup(ndev, intel_priv);
+
+ return ret;
+}
+
static void common_default_data(struct plat_stmmacenet_data *plat)
{
plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
@@ -624,6 +767,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
static int ehl_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->rx_queues_to_use = 8;
plat->tx_queues_to_use = 8;
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
@@ -639,20 +784,29 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 0;
plat->safety_feat_cfg->tmouten = 0;
+ intel_priv->tsn_lane_regs = ehl_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs);
+
return intel_mgbe_common_data(pdev, plat);
}
static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
-
+ plat->mac_finish = intel_mac_finish;
plat->clk_ptp_rate = 204800000;
+ intel_priv->pid_1g.regs = pid_modphy3_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs);
+
return ehl_common_data(pdev, plat);
}
@@ -705,10 +859,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse0_common_data(pdev, plat);
}
@@ -746,10 +908,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
- plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
return ehl_pse1_common_data(pdev, plat);
}
@@ -835,6 +1005,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev,
static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
.setup = adls_sgmii_phy1_data,
};
+
+static int adln_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ intel_priv->tsn_lane_regs = adln_tsn_lane_regs;
+ intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs);
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int adln_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ plat->mac_finish = intel_mac_finish;
+
+ intel_priv->pid_1g.regs = pid_modphy1_1g_regs;
+ intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs);
+ intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs;
+ intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs);
+
+ return adln_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adln_sgmii1g_phy0_info = {
+ .setup = adln_sgmii_phy0_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -1217,8 +1436,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
{ PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
- { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
- { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 0a37987478c1..a12f8e65f89f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -50,4 +50,33 @@
#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
#define PCH_PTP_CLK_FREQ_200MHZ (0)
+/* Modphy Register index */
+#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8
+#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9
+#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10
+#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11
+#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12
+#define B_PCH_FIA_PCR_L0O GENMASK(3, 0)
+#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14
+#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15
+#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16
+#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17
+#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19
+#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20
+#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21
+#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22
+
+#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41
+#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139
+#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC
+#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551
+#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D
+#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003
+#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008
+#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC
+
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 61227dcf56dc..ca4035cbb55b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -112,7 +112,7 @@ struct ipq806x_gmac {
phy_interface_t phy_mode;
};
-static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed)
{
struct device *dev = &gmac->pdev->dev;
int div;
@@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
return div;
}
-static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed)
{
uint32_t clk_bits, val;
int div;
@@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
return 0;
}
-static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac,
+ struct plat_stmmacenet_data *plat_dat)
{
struct device *dev = &gmac->pdev->dev;
- int ret;
- ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
- if (ret) {
- dev_err(dev, "missing phy mode property\n");
- return -EINVAL;
- }
+ gmac->phy_mode = plat_dat->phy_interface;
if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
dev_err(dev, "missing qcom id property\n");
@@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
}
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct ipq806x_gmac *gmac = priv;
+ struct ipq806x_gmac *gmac = bsp_priv;
- ipq806x_gmac_set_speed(gmac, speed);
+ return ipq806x_gmac_set_speed(gmac, speed);
}
static int
@@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
gmac->pdev = pdev;
- err = ipq806x_gmac_of_parse(gmac);
+ err = ipq806x_gmac_of_parse(gmac, plat_dat);
if (err) {
dev_err(dev, "device tree parsing error\n");
return err;
@@ -478,7 +475,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->has_gmac = true;
plat_dat->bsp_priv = gmac;
- plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index f5acfb7d4ff6..1a93787056a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -11,6 +11,8 @@
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -149,8 +151,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = {
.setup = loongson_gmac_data,
};
-static void loongson_gnet_fix_speed(void *priv, unsigned int speed,
- unsigned int mode)
+static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode)
{
struct loongson_data *ld = (struct loongson_data *)priv;
struct net_device *ndev = dev_get_drvdata(ld->dev);
@@ -532,10 +533,10 @@ static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res = {};
struct stmmac_pci_info *info;
- struct stmmac_resources res;
struct loongson_data *ld;
- int ret, i;
+ int ret;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -565,17 +566,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
/* Get the base address of device */
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
- if (ret)
- goto err_disable_device;
- break;
- }
-
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[0];
+ res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME);
+ ret = PTR_ERR_OR_ZERO(res.addr);
+ if (ret)
+ goto err_disable_device;
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
@@ -588,6 +582,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (ret)
goto err_disable_device;
+ plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use;
+ plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use;
+
if (dev_of_node(&pdev->dev))
ret = loongson_dwmac_dt_config(pdev, plat, &res);
else
@@ -620,7 +617,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
struct net_device *ndev = dev_get_drvdata(&pdev->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct loongson_data *ld;
- int i;
ld = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
@@ -631,13 +627,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN)
loongson_dwmac_msi_clear(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
-
pci_disable_device(pdev);
}
@@ -687,7 +676,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index c9636832a570..d178d5ddc7c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -456,7 +456,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
{
struct mac_delay_struct *mac_delay = &plat->mac_delay;
u32 tx_delay_ps, rx_delay_ps;
- int err;
plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
if (IS_ERR(plat->peri_regmap)) {
@@ -464,12 +463,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
return PTR_ERR(plat->peri_regmap);
}
- err = of_get_phy_mode(plat->np, &plat->phy_mode);
- if (err) {
- dev_err(plat->dev, "not find phy-mode\n");
- return err;
- }
-
if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
if (tx_delay_ps < plat->variant->tx_delay_max) {
mac_delay->tx_delay = tx_delay_ps;
@@ -587,6 +580,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
+ priv_plat->phy_mode = plat->phy_interface;
plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 5469fa1b429e..07c504d07604 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -22,9 +22,10 @@ struct meson_dwmac {
void __iomem *reg;
};
-static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct meson_dwmac *dwmac = priv;
+ struct meson_dwmac *dwmac = bsp_priv;
unsigned int val;
val = readl(dwmac->reg);
@@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned
}
writel(val, dwmac->reg);
+
+ return 0;
}
static int meson6_dwmac_probe(struct platform_device *pdev)
@@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
- plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate;
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 9c2d62d133ad..a50782994b97 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -417,11 +417,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
- ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
- if (ret) {
- dev_err(&pdev->dev, "missing phy-mode property\n");
- return ret;
- }
+ dwmac->phy_mode = plat_dat->phy_interface;
/* use 2ns as fallback since this value was previously hardcoded */
if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 2a5b38723635..0e4da216f942 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -111,7 +111,7 @@ struct qcom_ethqos {
unsigned int link_clk_rate;
struct clk *link_clk;
struct phy *serdes_phy;
- unsigned int speed;
+ int speed;
int serdes_speed;
phy_interface_t phy_mode;
@@ -169,30 +169,17 @@ static void rgmii_dump(void *priv)
rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
}
-/* Clock rates */
-#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
-#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
-#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
-
static void
-ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed)
{
+ long rate;
+
if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
return;
- switch (speed) {
- case SPEED_1000:
- ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
- break;
-
- case SPEED_100:
- ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
- break;
-
- case SPEED_10:
- ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
- break;
- }
+ rate = rgmii_clock(speed);
+ if (rate > 0)
+ ethqos->link_clk_rate = rate * 2;
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
@@ -699,7 +686,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos)
return ethqos->configure_func(ethqos);
}
-static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct qcom_ethqos *ethqos = priv;
@@ -807,9 +794,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
if (!ethqos)
return -ENOMEM;
- ret = of_get_phy_mode(np, &ethqos->phy_mode);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get phy mode\n");
+ ethqos->phy_mode = plat_dat->phy_interface;
switch (ethqos->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index a4dc89e23a68..700858ff6f7c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,8 @@ struct rk_gmac_ops {
void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
bool enable);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv);
+ bool php_grf_required;
bool regs_valid;
u32 regs[];
};
@@ -91,6 +93,76 @@ struct rk_priv_data {
(((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
+#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
+#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+
+static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+#define RK_FEPHY_SHUTDOWN GRF_BIT(1)
+#define RK_FEPHY_POWERUP GRF_CLR_BIT(1)
+#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6)
+#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9))
+#define RK_FEPHY_PHY_ID GRF_BIT(11)
+
+static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ reset_control_assert(priv->phy_reset);
+ usleep_range(20, 30);
+
+ regmap_write(priv->grf, reg,
+ RK_FEPHY_POWERUP |
+ RK_FEPHY_INTERNAL_RMII_SEL |
+ RK_FEPHY_24M_CLK_SEL |
+ RK_FEPHY_PHY_ID);
+ usleep_range(10000, 12000);
+
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(50000, 60000);
+}
+
+static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv,
+ unsigned int reg)
+{
+ regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN);
+}
+
#define PX30_GRF_GMAC_CON1 0x0904
/* PX30_GRF_GMAC_CON1 */
@@ -101,13 +173,6 @@ struct rk_priv_data {
static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
PX30_GMAC_PHY_INTF_SEL_RMII);
}
@@ -181,13 +246,6 @@ static const struct rk_gmac_ops px30_ops = {
static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RGMII |
RK3128_GMAC_RMII_MODE_CLR);
@@ -199,13 +257,6 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
}
@@ -214,11 +265,6 @@ static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_CLK_2_5M);
@@ -236,11 +282,6 @@ static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
RK3128_GMAC_RMII_CLK_2_5M |
@@ -297,13 +338,6 @@ static const struct rk_gmac_ops rk3128_ops = {
static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RGMII |
RK3228_GMAC_RMII_MODE_CLR |
@@ -316,13 +350,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_PHY_INTF_SEL_RMII |
RK3228_GMAC_RMII_MODE);
@@ -335,11 +362,6 @@ static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_CLK_2_5M);
@@ -357,11 +379,6 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
RK3228_GMAC_RMII_CLK_2_5M |
@@ -378,6 +395,8 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3228_GRF_CON_MUX,
RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3228_ops = {
@@ -385,7 +404,8 @@ static const struct rk_gmac_ops rk3228_ops = {
.set_to_rmii = rk3228_set_to_rmii,
.set_rgmii_speed = rk3228_set_rgmii_speed,
.set_rmii_speed = rk3228_set_rmii_speed,
- .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3288_GRF_SOC_CON1 0x0248
@@ -419,13 +439,6 @@ static const struct rk_gmac_ops rk3228_ops = {
static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RGMII |
RK3288_GMAC_RMII_MODE_CLR);
@@ -437,13 +450,6 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
}
@@ -452,11 +458,6 @@ static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_CLK_2_5M);
@@ -474,11 +475,6 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
RK3288_GMAC_RMII_CLK_2_5M |
@@ -511,13 +507,6 @@ static const struct rk_gmac_ops rk3288_ops = {
static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_PHY_INTF_SEL_RMII);
}
@@ -526,11 +515,6 @@ static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
RK3308_GMAC_SPEED_10M);
@@ -583,13 +567,6 @@ static const struct rk_gmac_ops rk3308_ops = {
static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_PHY_INTF_SEL_RGMII |
RK3328_GMAC_RMII_MODE_CLR |
@@ -603,14 +580,8 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -623,11 +594,6 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
RK3328_GMAC_CLK_2_5M);
@@ -646,11 +612,6 @@ static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
struct device *dev = &bsp_priv->pdev->dev;
unsigned int reg;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
RK3328_GRF_MAC_CON1;
@@ -670,6 +631,8 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
{
regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
RK3328_MACPHY_RMII_MODE);
+
+ rk_gmac_integrated_ephy_powerup(priv);
}
static const struct rk_gmac_ops rk3328_ops = {
@@ -677,7 +640,8 @@ static const struct rk_gmac_ops rk3328_ops = {
.set_to_rmii = rk3328_set_to_rmii,
.set_rgmii_speed = rk3328_set_rgmii_speed,
.set_rmii_speed = rk3328_set_rmii_speed,
- .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown,
};
#define RK3366_GRF_SOC_CON6 0x0418
@@ -711,13 +675,6 @@ static const struct rk_gmac_ops rk3328_ops = {
static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RGMII |
RK3366_GMAC_RMII_MODE_CLR);
@@ -729,13 +686,6 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
}
@@ -744,11 +694,6 @@ static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_CLK_2_5M);
@@ -766,11 +711,6 @@ static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
RK3366_GMAC_RMII_CLK_2_5M |
@@ -822,13 +762,6 @@ static const struct rk_gmac_ops rk3366_ops = {
static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RGMII |
RK3368_GMAC_RMII_MODE_CLR);
@@ -840,13 +773,6 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
}
@@ -855,11 +781,6 @@ static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_CLK_2_5M);
@@ -877,11 +798,6 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
RK3368_GMAC_RMII_CLK_2_5M |
@@ -933,13 +849,6 @@ static const struct rk_gmac_ops rk3368_ops = {
static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RGMII |
RK3399_GMAC_RMII_MODE_CLR);
@@ -951,13 +860,6 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
}
@@ -966,11 +868,6 @@ static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10)
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_CLK_2_5M);
@@ -988,11 +885,6 @@ static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
RK3399_GMAC_RMII_CLK_2_5M |
@@ -1013,6 +905,149 @@ static const struct rk_gmac_ops rk3399_ops = {
.set_rmii_speed = rk3399_set_rmii_speed,
};
+#define RK3528_VO_GRF_GMAC_CON 0x0018
+#define RK3528_VO_GRF_MACPHY_CON0 0x001c
+#define RK3528_VO_GRF_MACPHY_CON1 0x0020
+#define RK3528_VPU_GRF_GMAC_CON5 0x0018
+#define RK3528_VPU_GRF_GMAC_CON6 0x001c
+
+#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+
+#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1)
+#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8)
+#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8)
+
+#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12)
+#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12)
+
+#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3)
+#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3)
+#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10)
+#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10)
+
+#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10))
+#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10))
+
+#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2)
+#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2)
+#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9)
+#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9)
+
+static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RGMII);
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ DELAY_ENABLE(RK3528, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6,
+ RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3528_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ if (bsp_priv->id == 1)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_PHY_INTF_SEL_RMII);
+ else
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON,
+ RK3528_GMAC0_PHY_INTF_SEL_RMII |
+ RK3528_GMAC0_CLK_RMII_DIV2);
+}
+
+static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV50);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV5);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5,
+ RK3528_GMAC1_CLK_RGMII_DIV1);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg, val;
+
+ if (speed == 10)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 :
+ RK3528_GMAC0_CLK_RMII_DIV20;
+ else if (speed == 100)
+ val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 :
+ RK3528_GMAC0_CLK_RMII_DIV2;
+ else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ return;
+ }
+
+ reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 :
+ RK3528_VO_GRF_GMAC_CON;
+
+ regmap_write(bsp_priv->grf, reg, val);
+}
+
+static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv,
+ bool input, bool enable)
+{
+ unsigned int val;
+
+ if (bsp_priv->id == 1) {
+ val = input ? RK3528_GMAC1_CLK_SELECT_IO :
+ RK3528_GMAC1_CLK_SELECT_CRU;
+ val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE :
+ RK3528_GMAC1_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val);
+ } else {
+ val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE :
+ RK3528_GMAC0_CLK_RMII_GATE;
+ regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val);
+ }
+}
+
+static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv)
+{
+ rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0);
+}
+
+static const struct rk_gmac_ops rk3528_ops = {
+ .set_to_rgmii = rk3528_set_to_rgmii,
+ .set_to_rmii = rk3528_set_to_rmii,
+ .set_rgmii_speed = rk3528_set_rgmii_speed,
+ .set_rmii_speed = rk3528_set_rmii_speed,
+ .set_clock_selection = rk3528_set_clock_selection,
+ .integrated_phy_powerup = rk3528_integrated_phy_powerup,
+ .integrated_phy_powerdown = rk3528_integrated_phy_powerdown,
+ .regs_valid = true,
+ .regs = {
+ 0xffbd0000, /* gmac0 */
+ 0xffbe0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
#define RK3568_GRF_GMAC0_CON0 0x0380
#define RK3568_GRF_GMAC0_CON1 0x0384
#define RK3568_GRF_GMAC1_CON0 0x0388
@@ -1037,14 +1072,8 @@ static const struct rk_gmac_ops rk3399_ops = {
static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con0, con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
RK3568_GRF_GMAC0_CON0;
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
@@ -1062,14 +1091,8 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 con1;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
RK3568_GRF_GMAC0_CON1;
regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
@@ -1147,14 +1170,8 @@ static const struct rk_gmac_ops rk3568_ops = {
static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1180,14 +1197,8 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
unsigned int offset_con;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 :
RK3576_GRF_GMAC_CON0;
@@ -1254,6 +1265,7 @@ static const struct rk_gmac_ops rk3576_ops = {
.set_rgmii_speed = rk3576_set_gmac_speed,
.set_rmii_speed = rk3576_set_gmac_speed,
.set_clock_selection = rk3576_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0x2a220000, /* gmac0 */
@@ -1306,14 +1318,8 @@ static const struct rk_gmac_ops rk3576_ops = {
static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
u32 offset_con, id = bsp_priv->id;
- if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
- return;
- }
-
offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
RK3588_GRF_GMAC_CON8;
@@ -1334,13 +1340,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->php_grf)) {
- dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
@@ -1401,6 +1400,7 @@ static const struct rk_gmac_ops rk3588_ops = {
.set_rgmii_speed = rk3588_set_gmac_speed,
.set_rmii_speed = rk3588_set_gmac_speed,
.set_clock_selection = rk3588_set_clock_selection,
+ .php_grf_required = true,
.regs_valid = true,
.regs = {
0xfe1b0000, /* gmac0 */
@@ -1423,13 +1423,6 @@ static const struct rk_gmac_ops rk3588_ops = {
static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1438,11 +1431,6 @@ static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
{
struct device *dev = &bsp_priv->pdev->dev;
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
if (speed == 10) {
regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
RV1108_GMAC_RMII_CLK_2_5M |
@@ -1491,13 +1479,6 @@ static const struct rk_gmac_ops rv1108_ops = {
static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
int tx_delay, int rx_delay)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "Missing rockchip,grf property\n");
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RGMII |
RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
@@ -1516,13 +1497,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
{
- struct device *dev = &bsp_priv->pdev->dev;
-
- if (IS_ERR(bsp_priv->grf)) {
- dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
- return;
- }
-
regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
RV1126_GMAC_PHY_INTF_SEL_RMII);
}
@@ -1578,50 +1552,6 @@ static const struct rk_gmac_ops rv1126_ops = {
.set_rmii_speed = rv1126_set_rmii_speed,
};
-#define RK_GRF_MACPHY_CON0 0xb00
-#define RK_GRF_MACPHY_CON1 0xb04
-#define RK_GRF_MACPHY_CON2 0xb08
-#define RK_GRF_MACPHY_CON3 0xb0c
-
-#define RK_MACPHY_ENABLE GRF_BIT(0)
-#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
-#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
-#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
-#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
-#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
-
-static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
-{
- if (priv->ops->integrated_phy_powerup)
- priv->ops->integrated_phy_powerup(priv);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
-
- regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
-
- if (priv->phy_reset) {
- /* PHY needs to be disabled before trying to reset it */
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
- usleep_range(10, 20);
- if (priv->phy_reset)
- reset_control_deassert(priv->phy_reset);
- usleep_range(10, 20);
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
- msleep(30);
- }
-}
-
-static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
-{
- regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
- if (priv->phy_reset)
- reset_control_assert(priv->phy_reset);
-}
-
static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
{
struct rk_priv_data *bsp_priv = plat->bsp_priv;
@@ -1749,7 +1679,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
if (!bsp_priv)
return ERR_PTR(-ENOMEM);
- of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->phy_iface = plat->phy_interface;
bsp_priv->ops = ops;
/* Some SoCs have multiple MAC controllers, which need
@@ -1812,8 +1742,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
- "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->grf),
+ "failed to lookup rockchip,grf\n");
+ return ERR_CAST(bsp_priv->grf);
+ }
+
+ if (ops->php_grf_required) {
+ bsp_priv->php_grf =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf),
+ "failed to lookup rockchip,php-grf\n");
+ return ERR_CAST(bsp_priv->php_grf);
+ }
+ }
if (plat->phy_node) {
bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
@@ -1903,16 +1847,16 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
pm_runtime_get_sync(dev);
- if (bsp_priv->integrated_phy)
- rk_gmac_integrated_phy_powerup(bsp_priv);
+ if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup)
+ bsp_priv->ops->integrated_phy_powerup(bsp_priv);
return 0;
}
static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- if (gmac->integrated_phy)
- rk_gmac_integrated_phy_powerdown(gmac);
+ if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown)
+ gmac->ops->integrated_phy_powerdown(gmac);
pm_runtime_put_sync(&gmac->pdev->dev);
@@ -1920,9 +1864,10 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac)
gmac_clk_enable(gmac, false);
}
-static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
- struct rk_priv_data *bsp_priv = priv;
+ struct rk_priv_data *bsp_priv = bsp_priv_;
struct device *dev = &bsp_priv->pdev->dev;
switch (bsp_priv->phy_iface) {
@@ -1940,6 +1885,8 @@ static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode)
default:
dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
}
+
+ return 0;
}
static int rk_gmac_probe(struct platform_device *pdev)
@@ -1966,9 +1913,13 @@ static int rk_gmac_probe(struct platform_device *pdev)
/* If the stmmac is not already selected as gmac4,
* then make sure we fallback to gmac.
*/
- if (!plat_dat->has_gmac4)
+ if (!plat_dat->has_gmac4) {
plat_dat->has_gmac = true;
- plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->rx_fifo_size = 4096;
+ plat_dat->tx_fifo_size = 2048;
+ }
+
+ plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -2044,6 +1995,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops },
{ .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
{ .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops },
{ .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
index 9cc0e5817416..221539d760bc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c
@@ -100,24 +100,6 @@ static void s32_gmac_exit(struct platform_device *pdev, void *priv)
clk_disable_unprepare(gmac->rx_clk);
}
-static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct s32_priv_data *gmac = priv;
- long tx_clk_rate;
- int ret;
-
- tx_clk_rate = rgmii_clock(speed);
- if (tx_clk_rate < 0) {
- dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed);
- return;
- }
-
- dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate);
- ret = clk_set_rate(gmac->tx_clk, tx_clk_rate);
- if (ret)
- dev_err(gmac->dev, "Can't set tx clock\n");
-}
-
static int s32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat;
@@ -172,7 +154,9 @@ static int s32_dwmac_probe(struct platform_device *pdev)
plat->init = s32_gmac_init;
plat->exit = s32_gmac_exit;
- plat->fix_mac_speed = s32_fix_mac_speed;
+
+ plat->clk_tx_i = gmac->tx_clk;
+ plat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat->bsp_priv = gmac;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 16020b72dec8..116855658559 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -61,7 +61,7 @@ struct socfpga_dwmac {
struct mdio_device *pcs_mdiodev;
};
-static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
@@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev)
dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (ndev->phydev)
- phy_resume(ndev->phydev);
-
return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
new file mode 100644
index 000000000000..3303784cbbf8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Sophgo DWMAC platform driver
+ *
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+
+static int sophgo_sg2044_dwmac_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
+ "failed to get tx clock\n");
+
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->unicast_filter_entries = 1;
+
+ return 0;
+}
+
+static int sophgo_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get platform resources\n");
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(dev, PTR_ERR(plat_dat),
+ "failed to parse DT parameters\n");
+
+ ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sophgo_dwmac_match[] = {
+ { .compatible = "sophgo,sg2044-dwmac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sophgo_dwmac_match);
+
+static struct platform_driver sophgo_dwmac_driver = {
+ .probe = sophgo_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sophgo-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sophgo_dwmac_match,
+ },
+};
+module_platform_driver(sophgo_dwmac_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>");
+MODULE_DESCRIPTION("Sophgo DWMAC platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 0a0a363d3730..2013d7477eb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -27,27 +27,9 @@ struct starfive_dwmac_data {
struct starfive_dwmac {
struct device *dev;
- struct clk *clk_tx;
const struct starfive_dwmac_data *data;
};
-static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
-{
- struct starfive_dwmac *dwmac = priv;
- long rate;
- int err;
-
- rate = rgmii_clock(speed);
- if (rate < 0) {
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
- }
-
- err = clk_set_rate(dwmac->clk_tx, rate);
- if (err)
- dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
-}
-
static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct starfive_dwmac *dwmac = plat_dat->bsp_priv;
@@ -122,9 +104,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
dwmac->data = device_get_match_data(&pdev->dev);
- dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
- if (IS_ERR(dwmac->clk_tx))
- return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
+ plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx");
+ if (IS_ERR(plat_dat->clk_tx_i))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i),
"error getting tx clock\n");
clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx");
@@ -139,9 +121,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
* internally, because rgmii_rxin will be adaptively adjusted.
*/
if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk"))
- plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed;
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
dwmac->dev = &pdev->dev;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
plat_dat->dma_cfg->dche = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index f25461c292fe..be57c6c12c1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -99,12 +99,12 @@ struct sti_dwmac {
int clk_sel_reg; /* GMAC ext clk selection register */
struct regmap *regmap;
bool gmac_en;
- u32 speed;
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ int speed;
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
struct sti_dwmac_of_data {
- void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode);
+ void (*fix_retime_src)(void *priv, int speed, unsigned int mode);
};
static u32 phy_intf_sels[] = {
@@ -132,7 +132,7 @@ static u32 stih4xx_tx_retime_val[] = {
| STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
};
-static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode)
+static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode)
{
struct sti_dwmac *dwmac = priv;
u32 src = dwmac->tx_retime_src;
@@ -185,7 +185,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
}
static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
- struct platform_device *pdev)
+ struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
{
struct resource *res;
struct device *dev = &pdev->dev;
@@ -204,12 +205,7 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- err = of_get_phy_mode(np, &dwmac->interface);
- if (err && err != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return err;
- }
-
+ dwmac->interface = plat_dat->phy_interface;
dwmac->regmap = regmap;
dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
@@ -268,7 +264,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- ret = sti_dwmac_parse_data(dwmac, pdev);
+ ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1fcb74e9e3ff..c3d321192581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -538,6 +538,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return ret;
}
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
ret = stm32_dwmac_init(plat_dat, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4b7b2582a120..85723a78793a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1155,11 +1155,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct sunxi_priv_data *gmac;
struct device *dev = &pdev->dev;
- phy_interface_t interface;
- int ret;
struct stmmac_priv *priv;
struct net_device *ndev;
struct regmap *regmap;
+ int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -1219,10 +1218,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = of_get_phy_mode(dev->of_node, &interface);
- if (ret)
- return -EINVAL;
-
plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1230,7 +1225,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 9ae318436c4a..9f098ff0ff05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
regulator_disable(gmac->regulator);
}
-static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static void sun7i_fix_speed(void *priv, int speed, unsigned int mode)
{
struct sunxi_priv_data *gmac = priv;
@@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (!gmac)
return -ENOMEM;
- ret = of_get_phy_mode(dev->of_node, &gmac->interface);
- if (ret && ret != -ENODEV) {
- dev_err(dev, "Can't get phy-mode\n");
- return ret;
- }
+ gmac->interface = plat_dat->phy_interface;
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index dce84ed184e9..c72ee759aae5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -45,9 +45,6 @@
#define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0)
#define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1)
-#define GMAC_GMII_RGMII_RATE 125000000
-#define GMAC_MII_RATE 25000000
-
struct thead_dwmac {
struct plat_stmmacenet_data *plat;
void __iomem *apb_base;
@@ -104,11 +101,13 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
return 0;
}
-static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode)
+static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
{
+ struct thead_dwmac *dwmac = bsp_priv;
struct plat_stmmacenet_data *plat;
- struct thead_dwmac *dwmac = priv;
unsigned long rate;
+ long tx_rate;
u32 div, reg;
plat = dwmac->plat;
@@ -116,44 +115,37 @@ static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m
switch (plat->mac_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
- return;
+ return 0;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
rate = clk_get_rate(plat->stmmac_clk);
- if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 ||
- rate % GMAC_MII_RATE != 0) {
- dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate);
- return;
- }
writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
- switch (speed) {
- case SPEED_1000:
- div = rate / GMAC_GMII_RGMII_RATE;
- break;
- case SPEED_100:
- div = rate / GMAC_MII_RATE;
- break;
- case SPEED_10:
- div = rate * 10 / GMAC_MII_RATE;
- break;
- default:
- dev_err(dwmac->dev, "invalid speed %u\n", speed);
- return;
+ tx_rate = rgmii_clock(speed);
+ if (tx_rate < 0) {
+ dev_err(dwmac->dev, "invalid speed %d\n", speed);
+ return tx_rate;
+ }
+
+ div = rate / tx_rate;
+ if (rate != tx_rate * div) {
+ dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate);
+ return -EINVAL;
}
reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
- break;
+ return 0;
+
default:
dev_err(dwmac->dev, "unsupported phy interface %d\n",
plat->mac_interface);
- return;
+ return -EINVAL;
}
}
@@ -245,7 +237,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
dwmac->plat = plat;
dwmac->apb_base = apb;
plat->bsp_priv = dwmac;
- plat->fix_mac_speed = thead_dwmac_fix_speed;
+ plat->set_clk_tx_rate = thead_set_clk_tx_rate;
plat->init = thead_dwmac_init;
return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index eccf7f537467..33cf99797df5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -54,7 +54,7 @@ struct visconti_eth {
spinlock_t lock; /* lock to protect register update */
};
-static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
+static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode)
{
struct visconti_eth *dwmac = priv;
struct net_device *netdev = dev_get_drvdata(dwmac->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 600fea8f712f..967a16212faf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -59,22 +59,11 @@ enum power_event {
/* Energy Efficient Ethernet (EEE)
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define LPI_CTRL_STATUS 0x0030
#define LPI_TIMER_CTRL 0x0034
-/* LPI control and status defines */
-#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
-#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
-#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
-#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
-#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
-#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
-#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
-#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
-#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
-#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
-
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
0x00000040 + (reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 96bcda0856ec..a8b901cdf5cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/io.h>
+#include <linux/string_choices.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "stmmac_ptp.h"
@@ -342,31 +343,24 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
return ret;
}
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac1000_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- /*TODO - en_tx_lpi_clockgating treatment */
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
value = readl(ioaddr + LPI_CTRL_STATUS);
- value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_FORCED)
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ else
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
writel(value, ioaddr + LPI_CTRL_STATUS);
-}
-
-static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
- value = readl(ioaddr + LPI_CTRL_STATUS);
- value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
- writel(value, ioaddr + LPI_CTRL_STATUS);
+ return 0;
}
static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
@@ -509,8 +503,7 @@ const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
- .set_eee_mode = dwmac1000_set_eee_mode,
- .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_lpi_mode = dwmac1000_set_lpi_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
@@ -633,7 +626,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp,
}
netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n",
- on ? "enabled" : "disabled");
+ str_enabled_disabled(on));
writel(tcr_val, ptpaddr + PTP_TCR);
/* wait for auxts fifo clear to finish */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 184d41a306af..42fe29a4e300 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -177,23 +177,13 @@ enum power_event {
/* Energy Efficient Ethernet (EEE) for GMAC4
*
* LPI status, timer and control register offset
+ * For LPI control and status bit definitions, see common.h.
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
#define GMAC4_LPI_ENTRY_TIMER 0xd8
#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
-/* LPI control and status defines */
-#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
-#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
-#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
-#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
-#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
-
/* MAC Debug bitmap */
#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
#define GMAC_DEBUG_TFCSTS_SHIFT 17
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 9ed8620580a8..cc4ddf608652 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -376,33 +376,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
GMAC_ADDR_LOW(reg_n));
}
-static void dwmac4_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwmac4_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ u32 value, mask;
- /* Enable the link status receive on RGMII, SGMII ore SMII
- * receive path and instruct the transmit to enter in LPI
- * state.
- */
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+ if (mode == STMMAC_LPI_DISABLE) {
+ value = 0;
+ } else {
+ value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+ if (mode == STMMAC_LPI_TIMER) {
+ /* Return ERANGE if the timer is larger than the
+ * register field.
+ */
+ if (et > STMMAC_ET_MAX)
+ return -ERANGE;
- writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
+ /* Set the hardware LPI entry timer */
+ writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ /* Interpret a zero LPI entry timer to mean
+ * immediate entry into LPI mode.
+ */
+ if (et)
+ value |= LPI_CTRL_STATUS_LPIATE;
+ }
- value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ }
+
+ mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN |
+ LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE;
+
+ value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ return 0;
}
static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
@@ -413,34 +426,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
if (link)
- value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
-static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = et & STMMAC_ET_MAX;
- int regval;
-
- /* Program LPI entry timer value into register */
- writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
-
- /* Enable/disable LPI entry timer */
- regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
-
- if (et)
- regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
- else
- regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
-
- writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
-}
-
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -849,17 +841,17 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ if (status & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ if (status & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ if (status & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ if (status & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -1201,9 +1193,7 @@ const struct stmmac_ops dwmac4_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1245,9 +1235,7 @@ const struct stmmac_ops dwmac410_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1291,9 +1279,7 @@ const struct stmmac_ops dwmac510_ops = {
.pmt = dwmac4_pmt,
.set_umac_addr = dwmac4_set_umac_addr,
.get_umac_addr = dwmac4_get_umac_addr,
- .set_eee_mode = dwmac4_set_eee_mode,
- .reset_eee_mode = dwmac4_reset_eee_mode,
- .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_lpi_mode = dwmac4_set_lpi_mode,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 20027d3c25a7..a03f5d771566 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -112,14 +112,7 @@
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
#define XGMAC_LPI_CTRL 0x000000d0
-#define XGMAC_TXCGE BIT(21)
-#define XGMAC_LPITXA BIT(19)
-#define XGMAC_PLS BIT(17)
-#define XGMAC_LPITXEN BIT(16)
-#define XGMAC_RLPIEX BIT(3)
-#define XGMAC_RLPIEN BIT(2)
-#define XGMAC_TLPIEX BIT(1)
-#define XGMAC_TLPIEN BIT(0)
+/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */
#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_EDMA BIT(31)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 9a60a6e8f633..a6d395c6bacd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -316,17 +316,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
if (stat & XGMAC_LPIIS) {
u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
- if (lpi & XGMAC_TLPIEN) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEN) {
ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi & XGMAC_TLPIEX) {
+ if (lpi & LPI_CTRL_STATUS_TLPIEX) {
ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi & XGMAC_RLPIEN)
+ if (lpi & LPI_CTRL_STATUS_RLPIEN)
x->irq_rx_path_in_lpi_mode_n++;
- if (lpi & XGMAC_RLPIEX)
+ if (lpi & LPI_CTRL_STATUS_RLPIEX)
x->irq_rx_path_exit_lpi_mode_n++;
}
@@ -425,29 +425,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
-static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating)
+static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
- value = readl(ioaddr + XGMAC_LPI_CTRL);
-
- value |= XGMAC_LPITXEN | XGMAC_LPITXA;
- if (en_tx_lpi_clockgating)
- value |= XGMAC_TXCGE;
-
- writel(value, ioaddr + XGMAC_LPI_CTRL);
-}
-
-static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
-{
- void __iomem *ioaddr = hw->pcsr;
- u32 value;
+ if (mode == STMMAC_LPI_TIMER)
+ return -EOPNOTSUPP;
value = readl(ioaddr + XGMAC_LPI_CTRL);
- value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ if (mode == STMMAC_LPI_FORCED) {
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= LPI_CTRL_STATUS_LPITCSE;
+ } else {
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA |
+ LPI_CTRL_STATUS_LPITCSE);
+ }
writel(value, ioaddr + XGMAC_LPI_CTRL);
+
+ return 0;
}
static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
@@ -457,9 +456,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
value = readl(ioaddr + XGMAC_LPI_CTRL);
if (link)
- value |= XGMAC_PLS;
+ value |= LPI_CTRL_STATUS_PLS;
else
- value &= ~XGMAC_PLS;
+ value &= ~LPI_CTRL_STATUS_PLS;
writel(value, ioaddr + XGMAC_LPI_CTRL);
}
@@ -1525,8 +1524,7 @@ const struct stmmac_ops dwxgmac210_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
@@ -1582,8 +1580,7 @@ const struct stmmac_ops dwxlgmac2_ops = {
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = dwxgmac2_set_eee_mode,
- .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_lpi_mode = dwxgmac2_set_lpi_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
.debug = NULL,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 0f200b72c225..27c63a9fc163 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -306,6 +306,12 @@ struct stmmac_pps_cfg;
struct stmmac_rss;
struct stmmac_est;
+enum stmmac_lpi_mode {
+ STMMAC_LPI_DISABLE,
+ STMMAC_LPI_FORCED,
+ STMMAC_LPI_TIMER,
+};
+
/* Helpers to program the MAC core */
struct stmmac_ops {
/* MAC core initialization */
@@ -360,10 +366,9 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
- void (*set_eee_mode)(struct mac_device_info *hw,
- bool en_tx_lpi_clockgating);
- void (*reset_eee_mode)(struct mac_device_info *hw);
- void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et);
+ int (*set_lpi_mode)(struct mac_device_info *hw,
+ enum stmmac_lpi_mode mode,
+ bool en_tx_lpi_clockgating, u32 et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -467,12 +472,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
#define stmmac_get_umac_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
-#define stmmac_set_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
-#define stmmac_reset_eee_mode(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
-#define stmmac_set_eee_lpi_timer(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_lpi_mode(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, set_lpi_mode, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f05cae103d83..bddfa0f4aa21 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -106,6 +106,8 @@ struct stmmac_metadata_request {
struct stmmac_priv *priv;
struct dma_desc *tx_desc;
bool *set_ic;
+ struct dma_edesc *edesc;
+ int tbs;
};
struct stmmac_xsk_tx_complete {
@@ -257,7 +259,7 @@ struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
- u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_RX_QUEUES];
int hwts_tx_en;
bool tx_path_in_lpi_mode;
@@ -265,8 +267,7 @@ struct stmmac_priv {
int sph;
int sph_cap;
u32 sarc_type;
-
- u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ u32 rx_riwt[MTL_MAX_RX_QUEUES];
int hwts_rx_en;
void __iomem *ioaddr;
@@ -281,9 +282,7 @@ struct stmmac_priv {
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
- int speed;
- unsigned int flow_ctrl;
- unsigned int pause;
+ unsigned int pause_time;
struct mii_bus *mii;
struct phylink_config phylink_config;
@@ -307,6 +306,7 @@ struct stmmac_priv {
struct timer_list eee_ctrl_timer;
int lpi_irq;
u32 tx_lpi_timer;
+ bool tx_lpi_clk_stop;
bool eee_enabled;
bool eee_active;
bool eee_sw_timer_en;
@@ -343,7 +343,7 @@ struct stmmac_priv {
char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
- char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14];
char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
#ifdef CONFIG_DEBUG_FS
@@ -407,6 +407,8 @@ int stmmac_dvr_probe(struct device *device,
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0ae7db96f46..279532609707 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -88,19 +88,20 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
-static int flow_ctrl = FLOW_AUTO;
+static int flow_ctrl = 0xdead;
module_param(flow_ctrl, int, 0644);
-MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
static int pause = PAUSE_TIME;
module_param(pause, int, 0644);
-MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
#define TC_DEFAULT 64
static int tc = TC_DEFAULT;
module_param(tc, int, 0644);
MODULE_PARM_DESC(tc, "DMA threshold control value");
+/* This is unused */
#define DEFAULT_BUFSIZE 1536
static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, 0644);
@@ -178,6 +179,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
/**
+ * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
+ * @bsp_priv: BSP private data structure (unused)
+ * @clk_tx_i: the transmit clock
+ * @interface: the selected interface mode
+ * @speed: the speed that the MAC will be operating at
+ *
+ * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
+ * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
+ * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
+ * the plat_data->set_clk_tx_rate method directly, call it via their own
+ * implementation, or implement their own method should they have more
+ * complex requirements. It is intended to only be used in this method.
+ *
+ * plat_data->clk_tx_i must be filled in.
+ */
+int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed)
+{
+ long rate = rgmii_clock(speed);
+
+ /* Silently ignore unsupported speeds as rgmii_clock() only
+ * supports 10, 100 and 1000Mbps. We do not want to spit
+ * errors for 2500 and higher speeds here.
+ */
+ if (rate < 0)
+ return 0;
+
+ return clk_set_rate(clk_tx_i, rate);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
+
+/**
* stmmac_verify_args - verify the driver parameters.
* Description: it checks the driver parameters and set a default in case of
* errors.
@@ -186,14 +219,11 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DEFAULT_BUFSIZE;
- if (unlikely(flow_ctrl > 1))
- flow_ctrl = FLOW_AUTO;
- else if (likely(flow_ctrl < 0))
- flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+
+ if (flow_ctrl != 0xdead)
+ pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
}
static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
@@ -390,16 +420,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
-static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
-}
-
-static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
-{
- stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
-}
-
static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
@@ -436,8 +456,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
- stmmac_set_eee_mode(priv, priv->hw,
- priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
+ priv->tx_lpi_clk_stop, 0);
}
/**
@@ -447,8 +467,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
*/
static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
{
- stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
priv->tx_path_in_lpi_mode = false;
}
@@ -466,74 +486,6 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
stmmac_try_to_start_sw_lpi(priv);
}
-/**
- * stmmac_eee_init - init EEE
- * @priv: driver private structure
- * @active: indicates whether EEE should be enabled.
- * Description:
- * if the GMAC supports the EEE (from the HW cap reg) and the phy device
- * can also manage EEE, this function enable the LPI state and start related
- * timer.
- */
-static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
-{
- priv->eee_active = active;
-
- /* Check if MAC core supports the EEE feature. */
- if (!priv->dma_cap.eee) {
- priv->eee_enabled = false;
- return;
- }
-
- mutex_lock(&priv->lock);
-
- /* Check if it needs to be deactivated */
- if (!priv->eee_active) {
- if (priv->eee_enabled) {
- netdev_dbg(priv->dev, "disable EEE\n");
- priv->eee_sw_timer_en = false;
- stmmac_disable_hw_lpi_timer(priv);
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- false);
- }
- priv->eee_enabled = false;
- mutex_unlock(&priv->lock);
- return;
- }
-
- if (priv->eee_active && !priv->eee_enabled) {
- stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
- STMMAC_DEFAULT_TWT_LS);
- if (priv->hw->xpcs)
- xpcs_config_eee(priv->hw->xpcs,
- priv->plat->mult_fact_100ns,
- true);
- }
-
- if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
- /* Use hardware LPI mode */
- del_timer_sync(&priv->eee_ctrl_timer);
- priv->tx_path_in_lpi_mode = false;
- priv->eee_sw_timer_en = false;
- stmmac_enable_hw_lpi_timer(priv);
- } else {
- /* Use software LPI mode */
- priv->eee_sw_timer_en = true;
- stmmac_disable_hw_lpi_timer(priv);
- stmmac_restart_sw_lpi_timer(priv);
- }
-
- priv->eee_enabled = true;
-
- mutex_unlock(&priv->lock);
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -935,14 +887,16 @@ static void stmmac_release_ptp(struct stmmac_priv *priv)
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
* @duplex: duplex passed to the next function
+ * @flow_ctrl: desired flow control modes
* Description: It is used for configuring the flow control in all queues
*/
-static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
+ unsigned int flow_ctrl)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
- stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
- priv->pause, tx_cnt);
+ stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
+ tx_cnt);
}
static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
@@ -1002,7 +956,9 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ unsigned int flow_ctrl;
u32 old_ctrl, ctrl;
+ int ret;
if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
priv->plat->serdes_powerup)
@@ -1070,8 +1026,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
}
- priv->speed = speed;
-
if (priv->plat->fix_mac_speed)
priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
@@ -1082,19 +1036,29 @@ static void stmmac_mac_link_up(struct phylink_config *config,
/* Flow Control operation */
if (rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_AUTO;
+ flow_ctrl = FLOW_AUTO;
else if (rx_pause && !tx_pause)
- priv->flow_ctrl = FLOW_RX;
+ flow_ctrl = FLOW_RX;
else if (!rx_pause && tx_pause)
- priv->flow_ctrl = FLOW_TX;
+ flow_ctrl = FLOW_TX;
else
- priv->flow_ctrl = FLOW_OFF;
+ flow_ctrl = FLOW_OFF;
- stmmac_mac_flow_ctrl(priv, duplex);
+ stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
if (ctrl != old_ctrl)
writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (priv->plat->set_clk_tx_rate) {
+ ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
+ priv->plat->clk_tx_i,
+ interface, speed);
+ if (ret < 0)
+ netdev_err(priv->dev,
+ "failed to configure transmit clock for %dMbps: %pe\n",
+ speed, ERR_PTR(ret));
+ }
+
stmmac_mac_set(priv, priv->ioaddr, true);
if (priv->dma_cap.eee)
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -1110,16 +1074,70 @@ static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- stmmac_eee_init(priv, false);
+ priv->eee_active = false;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = false;
+
+ netdev_dbg(priv->dev, "disable EEE\n");
+ priv->eee_sw_timer_en = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
+ priv->tx_path_in_lpi_mode = false;
+
+ stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
+ mutex_unlock(&priv->lock);
}
static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
bool tx_clk_stop)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ int ret;
priv->tx_lpi_timer = timer;
- stmmac_eee_init(priv, true);
+ priv->eee_active = true;
+
+ mutex_lock(&priv->lock);
+
+ priv->eee_enabled = true;
+
+ /* Update the transmit clock stop according to PHY capability if
+ * the platform allows
+ */
+ if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
+ priv->tx_lpi_clk_stop = tx_clk_stop;
+
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ STMMAC_DEFAULT_TWT_LS);
+
+ /* Try to cnfigure the hardware timer. */
+ ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
+ priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
+
+ if (ret) {
+ /* Hardware timer mode not supported, or value out of range.
+ * Fall back to using software LPI mode
+ */
+ priv->eee_sw_timer_en = true;
+ stmmac_restart_sw_lpi_timer(priv);
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+
+ return 0;
+}
+
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
return 0;
}
@@ -1132,6 +1150,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.mac_link_up = stmmac_mac_link_up,
.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
+ .mac_finish = stmmac_mac_finish,
};
/**
@@ -1254,6 +1273,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
priv->phylink_config.eee_rx_clk_stop_enable = true;
+ /* Set the default transmit clock stop bit based on the platform glue */
+ priv->tx_lpi_clk_stop = priv->plat->flags &
+ STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
priv->phylink_config.default_an_inband =
@@ -2524,9 +2547,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv)
return 0;
}
+static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
+{
+ struct timespec64 ts = ns_to_timespec64(launch_time);
+ struct stmmac_metadata_request *meta_req = _priv;
+
+ if (meta_req->tbs & STMMAC_TBS_EN)
+ stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
+ ts.tv_nsec);
+}
+
static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
.tmo_request_timestamp = stmmac_xsk_request_timestamp,
.tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
+ .tmo_request_launch_time = stmmac_xsk_request_launch_time,
};
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -2610,6 +2644,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
meta_req.priv = priv;
meta_req.tx_desc = tx_desc;
meta_req.set_ic = &set_ic;
+ meta_req.tbs = tx_q->tbs;
+ meta_req.edesc = &tx_q->dma_entx[entry];
xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
&meta_req);
if (set_ic) {
@@ -3072,7 +3108,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
int ret = 0;
if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
- dev_err(priv->device, "Invalid DMA configuration\n");
+ netdev_err(priv->dev, "Invalid DMA configuration\n");
return -EINVAL;
}
@@ -3081,7 +3117,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
- dev_err(priv->device, "Failed to reset the dma\n");
+ netdev_err(priv->dev, "Failed to reset the dma\n");
return ret;
}
@@ -3199,8 +3235,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
for (chan = 0; chan < rx_channel_count; chan++)
@@ -3448,9 +3483,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
if (priv->hw->phylink_pcs)
phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+ /* Note that clk_rx_i must be running for reset to complete. This
+ * clock may also be required when setting the MAC address.
+ *
+ * Block the receive clock stop for LPI mode at the PHY in case
+ * the link is established with EEE mode active.
+ */
+ phylink_rx_clk_stop_block(priv->phylink);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
+ phylink_rx_clk_stop_unblock(priv->phylink);
netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
__func__);
return ret;
@@ -3458,6 +3502,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Copy the MAC addr into the HW */
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
/* PS and related bits will be programmed according to the speed */
if (priv->hw->pcs) {
@@ -3568,7 +3613,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -3640,7 +3687,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
enum request_irq_err irq_err;
- cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3769,9 +3815,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
/* Request Tx MSI irq */
@@ -3794,9 +3839,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i],
+ cpumask_of(i % num_online_cpus()));
}
return 0;
@@ -3997,7 +4041,6 @@ static int __stmmac_open(struct net_device *dev,
}
}
- buf_sz = dma_conf->dma_buf_sz;
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4102,9 +4145,6 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv, &priv->dma_conf);
- /* Disable the MAC Rx/Tx */
- stmmac_mac_set(priv, priv->ioaddr, false);
-
/* Powerdown Serdes if there is */
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
@@ -5454,10 +5494,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
int xdp_status = 0;
- int buf_sz;
+ int bufsz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
@@ -5570,7 +5610,7 @@ read_again:
net_prefetch(page_address(buf->page) +
buf->page_offset);
- xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
buf->page_offset, buf1_len, true);
@@ -5856,6 +5896,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
* whenever multicast addresses must be enabled/disabled.
* Return value:
* void.
+ *
+ * FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
*/
static void stmmac_set_rx_mode(struct net_device *dev)
{
@@ -5988,7 +6031,9 @@ static int stmmac_set_features(struct net_device *netdev,
else
priv->hw->hw_vlan_en = false;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_hw_vlan_mode(priv, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
return 0;
}
@@ -6272,7 +6317,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
if (ret)
goto set_mac_error;
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+ phylink_rx_clk_stop_unblock(priv->phylink);
set_mac_error:
pm_runtime_put(priv->device);
@@ -6628,6 +6675,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6659,6 +6709,9 @@ err_pm_put:
return ret;
}
+/* FIXME: This may need RXC to be running, but it may be called with BH
+ * disabled, which means we can't call phylink_rx_clk_stop*().
+ */
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -6970,8 +7023,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
- hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx_q->txtimer.function = stmmac_tx_timer;
+ hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
/* Enable the MAC Rx/Tx */
@@ -7444,7 +7496,7 @@ int stmmac_dvr_probe(struct device *device,
return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
- priv->pause = pause;
+ priv->pause_time = pause;
priv->plat = plat_dat;
priv->ioaddr = res->addr;
priv->dev->base_addr = (unsigned long)res->addr;
@@ -7640,9 +7692,6 @@ int stmmac_dvr_probe(struct device *device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
-
ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* Setup channels NAPI */
@@ -7744,8 +7793,6 @@ void stmmac_dvr_remove(struct device *dev)
pm_runtime_get_sync(dev);
- stmmac_stop_all_dma(priv);
- stmmac_mac_set(priv, priv->ioaddr, false);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7816,19 +7863,16 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_suspend(priv->phylink, true);
- } else {
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink, false);
- }
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_down(priv->phylink, false);
+
+ phylink_suspend(priv->phylink,
+ device_may_wakeup(priv->device) && priv->plat->pmt);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
- priv->speed = SPEED_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7912,16 +7956,12 @@ int stmmac_resume(struct device *dev)
}
rtnl_lock();
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
- phylink_resume(priv->phylink);
- } else {
- phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device))
- phylink_speed_up(priv->phylink);
- }
- rtnl_unlock();
- rtnl_lock();
+ /* Prepare the PHY to resume, ensuring that its clocks which are
+ * necessary for the MAC DMA reset to complete are running
+ */
+ phylink_prepare_resume(priv->phylink);
+
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -7931,14 +7971,25 @@ int stmmac_resume(struct device *dev)
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
stmmac_enable_all_queues(priv);
stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
+
+ /* phylink_resume() must be called after the hardware has been
+ * initialised because it may bring the link up immediately in a
+ * workqueue thread, which will race with initialisation.
+ */
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ phylink_speed_up(priv->phylink);
+
rtnl_unlock();
netif_device_attach(ndev);
@@ -7961,9 +8012,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "buf_sz:", 7)) {
- if (kstrtoint(opt + 7, 0, &buf_sz))
- goto err;
} else if (!strncmp(opt, "tc:", 3)) {
if (kstrtoint(opt + 3, 0, &tc))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0c7d81ddd440..836f2848dfeb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -524,6 +524,9 @@ int stmmac_pcs_setup(struct net_device *ndev)
if (ret)
return dev_err_probe(priv->device, ret, "No xPCS found\n");
+ if (xpcs)
+ xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns);
+
priv->hw->xpcs = xpcs;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 352b01678c22..9c1b54b701f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -155,9 +155,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
{
struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
struct plat_stmmacenet_data *plat;
- struct stmmac_resources res;
- int i;
+ struct stmmac_resources res = {};
int ret;
+ int i;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -192,9 +192,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
- if (ret)
- return ret;
+ res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(res.addr))
+ return PTR_ERR(res.addr);
break;
}
@@ -204,8 +204,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- memset(&res, 0, sizeof(res));
- res.addr = pcim_iomap_table(pdev)[i];
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
@@ -226,21 +224,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
* stmmac_pci_remove
*
* @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and releases the PCI resources.
+ * Description: this function calls the main to free the net resources.
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- int i;
-
stmmac_dvr_remove(&pdev->dev);
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- pcim_iounmap_regions(pdev, BIT(i));
- break;
- }
}
static int __maybe_unused stmmac_pci_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index d0e61aa1a495..c73eff6a56b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -405,6 +405,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
return -ENODEV;
}
+/* Compatible string array for all gmac4 devices */
+static const char * const stmmac_gmac4_compats[] = {
+ "snps,dwmac-4.00",
+ "snps,dwmac-4.10a",
+ "snps,dwmac-4.20a",
+ "snps,dwmac-5.10a",
+ "snps,dwmac-5.20",
+ "snps,dwmac-5.30a",
+ NULL
+};
+
/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
@@ -486,8 +497,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->force_sf_dma_mode =
of_property_read_bool(np, "snps,force_sf_dma_mode");
- if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating"))
+ if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) {
+ dev_warn(&pdev->dev,
+ "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n");
plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
+ }
/* Set the maxmtu to a default of JUMBO_LEN in case the
* parameter is not present in the device tree.
@@ -538,11 +552,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
plat->pmt = 1;
}
- if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
- of_device_is_compatible(np, "snps,dwmac-4.10a") ||
- of_device_is_compatible(np, "snps,dwmac-4.20a") ||
- of_device_is_compatible(np, "snps,dwmac-5.10a") ||
- of_device_is_compatible(np, "snps,dwmac-5.20")) {
+ if (of_device_compatible_match(np, stmmac_gmac4_compats)) {
plat->has_gmac4 = 1;
plat->has_gmac = 0;
plat->pmt = 1;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 3ca1c2a816ff..a01bc394d1ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
if (!priv->dev->phydev)
return -EOPNOTSUPP;
- ret = phy_loopback(priv->dev->phydev, true);
+ ret = phy_loopback(priv->dev->phydev, true, 0);
if (ret)
return ret;
attr.dst = priv->dev->dev_addr;
ret = __stmmac_test_loopback(priv, &attr);
- phy_loopback(priv->dev->phydev, false);
+ phy_loopback(priv->dev->phydev, false, 0);
return ret;
}
@@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, true);
+ ret = phy_loopback(dev->phydev, true, 0);
if (!ret)
break;
fallthrough;
@@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev,
case STMMAC_LOOPBACK_PHY:
ret = -EOPNOTSUPP;
if (dev->phydev)
- ret = phy_loopback(dev->phydev, false);
+ ret = phy_loopback(dev->phydev, false, 0);
if (!ret)
break;
fallthrough;
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
index 259bdac24cf2..558b791a97ed 100644
--- a/drivers/net/ethernet/tehuti/tn40.c
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = tn40_phy_register(priv);
if (ret) {
dev_err(&pdev->dev, "failed to set up PHY.\n");
- goto err_free_irq;
+ goto err_cleanup_swnodes;
}
ret = tn40_priv_init(priv);
@@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_unregister_phydev:
tn40_phy_unregister(priv);
+err_cleanup_swnodes:
+ tn40_swnodes_cleanup(priv);
err_free_irq:
pci_free_irq_vectors(pdev);
err_unset_drvdata:
@@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev)
unregister_netdev(ndev);
tn40_phy_unregister(priv);
+ tn40_swnodes_cleanup(priv);
pci_free_irq_vectors(priv->pdev);
pci_set_drvdata(pdev, NULL);
iounmap(priv->regs);
@@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = {
PCI_VENDOR_ID_ASUSTEK, 0x8709) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510,
+ PCI_VENDOR_ID_EDIMAX, 0x8102) },
{ }
};
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
index 490781fe5120..25da8686d469 100644
--- a/drivers/net/ethernet/tehuti/tn40.h
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -4,10 +4,13 @@
#ifndef _TN40_H_
#define _TN40_H_
+#include <linux/property.h>
#include "tn40_regs.h"
#define TN40_DRV_NAME "tn40xx"
+#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025
+
#define TN40_MDIO_SPEED_1MHZ (1)
#define TN40_MDIO_SPEED_6MHZ (6)
@@ -102,10 +105,39 @@ struct tn40_txdb {
int size; /* Number of elements in the db */
};
+#define NODE_PROP(_NAME, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .properties = _PROP, \
+ })
+
+#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \
+ (const struct software_node) { \
+ .name = _NAME, \
+ .parent = _PAR, \
+ .properties = _PROP, \
+ })
+
+enum tn40_swnodes {
+ SWNODE_MDIO,
+ SWNODE_PHY,
+ SWNODE_MAX
+};
+
+struct tn40_nodes {
+ char phy_name[32];
+ char mdio_name[32];
+ struct property_entry phy_props[3];
+ struct software_node swnodes[SWNODE_MAX];
+ const struct software_node *group[SWNODE_MAX + 1];
+};
+
struct tn40_priv {
struct net_device *ndev;
struct pci_dev *pdev;
+ struct tn40_nodes nodes;
+
struct napi_struct napi;
/* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
struct tn40_rxd_fifo rxd_fifo0;
@@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+void tn40_swnodes_cleanup(struct tn40_priv *priv);
int tn40_mdiobus_init(struct tn40_priv *priv);
int tn40_phy_register(struct tn40_priv *priv);
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
index af18615d64a8..fb1a4a2e4dbc 100644
--- a/drivers/net/ethernet/tehuti/tn40_mdio.c
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -14,6 +14,8 @@
(FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
#define TN40_MDIO_CMD_READ BIT(15)
+#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld"
+
static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
{
void __iomem *regs = priv->regs;
@@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
}
+/* registers an mdio node and an aqr105 PHY at address 1
+ * tn40_mdio-%id {
+ * ethernet-phy@1 {
+ * compatible = "ethernet-phy-id03a1.b4a3";
+ * reg = <1>;
+ * firmware-name = AQR105_FIRMWARE;
+ * };
+ * };
+ */
+static int tn40_swnodes_register(struct tn40_priv *priv)
+{
+ struct tn40_nodes *nodes = &priv->nodes;
+ struct pci_dev *pdev = priv->pdev;
+ struct software_node *swnodes;
+ u32 id;
+
+ id = pci_dev_id(pdev);
+
+ snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1");
+ snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x",
+ id);
+
+ swnodes = nodes->swnodes;
+
+ swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL);
+
+ nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible",
+ "ethernet-phy-id03a1.b4a3");
+ nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1);
+ nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name",
+ AQR105_FIRMWARE);
+ swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name,
+ &swnodes[SWNODE_MDIO],
+ nodes->phy_props);
+
+ nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY];
+ nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO];
+ return software_node_register_node_group(nodes->group);
+}
+
+void tn40_swnodes_cleanup(struct tn40_priv *priv)
+{
+ /* cleanup of swnodes is only needed for AQR105-based cards */
+ if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ fwnode_handle_put(dev_fwnode(&priv->mdio->dev));
+ device_remove_software_node(&priv->mdio->dev);
+ software_node_unregister_node_group(priv->nodes.group);
+ }
+}
+
int tn40_mdiobus_init(struct tn40_priv *priv)
{
struct pci_dev *pdev = priv->pdev;
@@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv)
bus->read_c45 = tn40_mdio_read_c45;
bus->write_c45 = tn40_mdio_write_c45;
+ priv->mdio = bus;
+ /* provide swnodes for AQR105-based cards only */
+ if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) {
+ ret = tn40_swnodes_register(priv);
+ if (ret) {
+ pr_err("swnodes failed\n");
+ return ret;
+ }
+
+ ret = device_add_software_node(&bus->dev,
+ priv->nodes.group[SWNODE_MDIO]);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "device_add_software_node failed: %d\n", ret);
+ goto err_swnodes_unregister;
+ }
+ }
+
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
ret = devm_mdiobus_register(&pdev->dev, bus);
if (ret) {
dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
ret, bus->state, MDIOBUS_UNREGISTERED);
- return ret;
+ goto err_swnodes_cleanup;
}
- tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
- priv->mdio = bus;
return 0;
+
+err_swnodes_unregister:
+ software_node_unregister_node_group(priv->nodes.group);
+ return ret;
+err_swnodes_cleanup:
+ tn40_swnodes_cleanup(priv);
+ return ret;
}
+
+MODULE_FIRMWARE(AQR105_FIRMWARE);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 3a13d60a947a..a07c910c497a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -205,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1
select PHYLIB
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
+ select PAGE_POOL
depends on PRU_REMOTEPROC
depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 2806238629f8..b3118bf0757e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -164,6 +164,7 @@
#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
/* XDP */
+#define AM65_CPSW_XDP_TX BIT(2)
#define AM65_CPSW_XDP_CONSUMED BIT(1)
#define AM65_CPSW_XDP_REDIRECT BIT(0)
#define AM65_CPSW_XDP_PASS 0
@@ -829,19 +830,19 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
struct cppi5_host_desc_t *desc_tx;
struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = *(swdata);
+ skb = swdata->skb;
dev_kfree_skb_any(skb);
} else {
- xdpf = *(swdata);
+ xdpf = swdata->xdpf;
xdp_return_frame(xdpf);
}
@@ -1098,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct cppi5_host_desc_t *host_desc;
+ struct am65_cpsw_tx_swdata *swdata;
struct netdev_queue *netif_txq;
dma_addr_t dma_desc, dma_buf;
u32 pkt_len = xdpf->len;
- void **swdata;
int ret;
host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
@@ -1131,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
swdata = cppi5_hdesc_get_swdata(host_desc);
- *(swdata) = xdpf;
+ swdata->ndev = ndev;
+ swdata->xdpf = xdpf;
/* Report BQL before sending the packet */
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1167,17 +1169,16 @@ pool_free:
static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct am65_cpsw_port *port,
- struct xdp_buff *xdp,
- int cpu, int *len)
+ struct xdp_buff *xdp, int *len)
{
struct am65_cpsw_common *common = flow->common;
struct net_device *ndev = port->ndev;
int ret = AM65_CPSW_XDP_CONSUMED;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
struct xdp_frame *xdpf;
struct bpf_prog *prog;
- struct page *page;
int pkt_len;
u32 act;
int err;
@@ -1193,8 +1194,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
switch (act) {
case XDP_PASS:
- ret = AM65_CPSW_XDP_PASS;
- goto out;
+ return AM65_CPSW_XDP_PASS;
case XDP_TX:
tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES];
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
@@ -1213,15 +1213,13 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_CONSUMED;
- goto out;
+ return AM65_CPSW_XDP_TX;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
dev_sw_netstats_rx_add(ndev, pkt_len);
- ret = AM65_CPSW_XDP_REDIRECT;
- goto out;
+ return AM65_CPSW_XDP_REDIRECT;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
fallthrough;
@@ -1233,10 +1231,6 @@ drop:
ndev->stats.rx_dropped++;
}
- page = virt_to_head_page(xdp->data);
- am65_cpsw_put_page(flow, page, true);
-
-out:
return ret;
}
@@ -1274,7 +1268,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
- int cpu, int *xdp_state)
+ int *xdp_state)
{
struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -1334,8 +1328,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
pkt_len, false);
- *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp,
- cpu, &pkt_len);
+ *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len);
+ if (*xdp_state == AM65_CPSW_XDP_CONSUMED) {
+ page = virt_to_head_page(xdp.data);
+ am65_cpsw_put_page(flow, page, true);
+ goto allocate;
+ }
+
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
@@ -1401,7 +1400,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx);
struct am65_cpsw_common *common = flow->common;
- int cpu = smp_processor_id();
int xdp_state_or = 0;
int cur_budget, ret;
int xdp_state;
@@ -1410,7 +1408,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
/* process only this flow */
cur_budget = budget;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state);
+ ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state);
xdp_state_or |= xdp_state;
if (ret)
break;
@@ -1438,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
return num_rx;
}
-static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
-
- dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
-
- return skb;
-}
-
-static struct xdp_frame *
-am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
- struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma,
- struct net_device **ndev)
-{
- struct cppi5_host_desc_t *desc_tx;
- struct am65_cpsw_port *port;
- struct xdp_frame *xdpf;
- u32 port_id = 0;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- xdpf = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
-
- port = am65_common_get_port(common, port_id);
- dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len);
- *ndev = port->ndev;
-
- return xdpf;
-}
-
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -1504,13 +1456,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ bool single_port = AM65_CPSW_IS_CPSW2G(common);
enum am65_cpsw_tx_buf_type buf_type;
+ struct am65_cpsw_tx_swdata *swdata;
+ struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
struct xdp_frame *xdpf;
+ unsigned int pkt_len;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1518,9 +1474,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
tx_chn = &common->tx_chns[chn];
while (true) {
- spin_lock(&tx_chn->lock);
+ if (!single_port)
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- spin_unlock(&tx_chn->lock);
+ if (!single_port)
+ spin_unlock(&tx_chn->lock);
+
if (res == -ENODATA)
break;
@@ -1531,27 +1490,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ ndev = swdata->ndev;
buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes = skb->len;
+ skb = swdata->skb;
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+ pkt_len = skb->len;
napi_consume_skb(skb, budget);
} else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes = xdpf->len;
+ xdpf = swdata->xdpf;
+ pkt_len = xdpf->len;
if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
+
+ total_bytes += pkt_len;
num_tx++;
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ dev_sw_netstats_tx_add(ndev, 1, pkt_len);
+ if (!single_port) {
+ /* as packets from multi ports can be interleaved
+ * on the same channel, we have to figure out the
+ * port/queue at every packet and report it/wake queue.
+ */
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+ }
+ if (single_port) {
netif_txq = netdev_get_tx_queue(ndev, chn);
-
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
}
@@ -1560,66 +1535,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
return num_tx;
}
-static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
- int chn, unsigned int budget, bool *tdown)
-{
- enum am65_cpsw_tx_buf_type buf_type;
- struct device *dev = common->dev;
- struct am65_cpsw_tx_chn *tx_chn;
- struct netdev_queue *netif_txq;
- unsigned int total_bytes = 0;
- struct net_device *ndev;
- struct xdp_frame *xdpf;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
-
- tx_chn = &common->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&common->tdown_cnt))
- complete(&common->tdown_complete);
- *tdown = true;
- break;
- }
-
- buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
- skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- } else {
- xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
- desc_dma, &ndev);
- total_bytes += xdpf->len;
- if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
- xdp_return_frame_rx_napi(xdpf);
- else
- xdp_return_frame(xdpf);
- }
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
-
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
-
- dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
-
- return num_tx;
-}
-
static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer)
{
struct am65_cpsw_tx_chn *tx_chns =
@@ -1635,13 +1550,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
bool tdown = false;
int num_tx;
- if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
- num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id,
- budget, &tdown);
- else
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
- tx_chn->id, budget, &tdown);
-
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common,
+ tx_chn->id, budget, &tdown);
if (num_tx >= budget)
return budget;
@@ -1685,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_tx_swdata *swdata;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
dma_addr_t desc_dma, buf_dma;
int ret, q_idx, i;
- void **swdata;
u32 *psdata;
u32 pkt_len;
@@ -1736,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *(swdata) = skb;
+ swdata->ndev = ndev;
+ swdata->skb = skb;
psdata = cppi5_hdesc_get_psdata(first_desc);
/* HW csum offload if enabled */
@@ -2306,13 +2217,17 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
int i, ret = 0;
for (i = 0; i < common->tx_ch_num; i++) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+ tx_chn = &common->tx_chns[i];
+
+ hrtimer_setup(&tx_chn->tx_hrtimer, &am65_cpsw_nuss_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback;
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
ret = devm_request_irq(dev, tx_chn->irq,
am65_cpsw_nuss_tx_irq,
@@ -2323,19 +2238,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
tx_chn->id, tx_chn->irq, ret);
goto err;
}
-
- netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
- am65_cpsw_nuss_tx_poll);
}
return 0;
err:
- for (--i ; i >= 0 ; i--) {
- struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
-
- netif_napi_del(&tx_chn->napi_tx);
+ netif_napi_del(&tx_chn->napi_tx);
+ for (--i; i >= 0; i--) {
+ tx_chn = &common->tx_chns[i];
devm_free_irq(dev, tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
}
return ret;
@@ -2565,9 +2477,11 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
snprintf(flow->name,
sizeof(flow->name), "%s-rx%d",
dev_name(dev), i);
- hrtimer_init(&flow->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback;
+ hrtimer_setup(&flow->rx_hrtimer, &am65_cpsw_nuss_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+
+ netif_napi_add(common->dma_ndev, &flow->napi_rx,
+ am65_cpsw_nuss_rx_poll);
ret = devm_request_irq(dev, flow->irq,
am65_cpsw_nuss_rx_irq,
@@ -2577,11 +2491,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
dev_err(dev, "failure requesting rx %d irq %u, %d\n",
i, flow->irq, ret);
flow->irq = -EINVAL;
- goto err_flow;
+ goto err_request_irq;
}
-
- netif_napi_add(common->dma_ndev, &flow->napi_rx,
- am65_cpsw_nuss_rx_poll);
}
/* setup classifier to route priorities to flows */
@@ -2589,11 +2500,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
return 0;
+err_request_irq:
+ netif_napi_del(&flow->napi_rx);
+
err_flow:
- for (--i; i >= 0 ; i--) {
+ for (--i; i >= 0; i--) {
flow = &rx_chn->flows[i];
- netif_napi_del(&flow->napi_rx);
devm_free_irq(dev, flow->irq, flow);
+ netif_napi_del(&flow->napi_rx);
}
err:
@@ -3578,6 +3492,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
__be64 id_temp;
int ret, i;
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
+ BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE,
+ "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE");
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
return -ENOMEM;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index e7832a5cf3cc..917c37e4e89b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow {
char name[32];
};
+struct am65_cpsw_tx_swdata {
+ struct net_device *ndev;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+};
+
struct am65_cpsw_swdata {
u32 flow_id;
struct page *page;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 0cb6fa6e5b7d..a984b7d84e5e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
int ret = 0, port, ch = xmeta->ch;
int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
+ u32 metasize = 0;
struct cpsw_priv *priv;
struct page_pool *pool;
struct sk_buff *skb;
@@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
port = priv->emac_port + cpsw->data.dual_emac;
ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
@@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index cec0a90659d9..5b5b52e4e7a7 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ u32 metasize = 0;
int ret = 0;
dma_addr_t dma;
@@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status)
size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_prepare_buff(&xdp, pa, headroom, size, false);
+ xdp_prepare_buff(&xdp, pa, headroom, size, true);
ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
headroom = xdp.data - xdp.data_hard_start;
+ metasize = xdp.data - xdp.data_meta;
/* XDP prog can modify vlan tag, so can't use encap header */
status &= ~CPDMA_RX_VLAN_ENCAP;
@@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->offload_fwd_mark = priv->offload_fwd_mark;
skb_reserve(skb, headroom);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb->dev = ndev;
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
@@ -1409,7 +1413,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC;
- ndev->netns_local = true;
+ ndev->netns_immutable = true;
ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
@@ -1418,6 +1422,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = slave_data->slave_node;
if (!napi_ndev) {
/* CPSW Host port CPDMA interface is shared between
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index d59c1744840a..b4a34c57b7b4 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -406,9 +406,25 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
static int icss_iep_perout_enable_hw(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
+ struct timespec64 ts;
+ u64 ns_start;
+ u64 ns_width;
int ret;
u64 cmp;
+ /* Calculate width of the signal for PPS/PEROUT handling */
+ ts.tv_sec = req->on.sec;
+ ts.tv_nsec = req->on.nsec;
+ ns_width = timespec64_to_ns(&ts);
+
+ if (req->flags & PTP_PEROUT_PHASE) {
+ ts.tv_sec = req->phase.sec;
+ ts.tv_nsec = req->phase.nsec;
+ ns_start = timespec64_to_ns(&ts);
+ } else {
+ ns_start = 0;
+ }
+
if (iep->ops && iep->ops->perout_enable) {
ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
if (ret)
@@ -419,10 +435,12 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
- /* Configure SYNC, 1ms pulse width */
- regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ /* Configure SYNC, based on req on width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
- regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
/* Enable CMP 1 */
regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
@@ -447,6 +465,10 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ req->period.nsec;
icss_iep_update_to_next_boundary(iep, start_ns);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG,
+ div_u64(ns_width, iep->def_inc));
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG,
+ div_u64(ns_start, iep->def_inc));
/* Enable Sync in single shot mode */
regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
@@ -474,7 +496,37 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- return -EOPNOTSUPP;
+ int ret = 0;
+
+ /* Reject requests with unsupported flags */
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ /* Set default "on" time (1ms) for the signal if not passed by the app */
+ if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) {
+ req->on.sec = 0;
+ req->on.nsec = NSEC_PER_MSEC;
+ }
+
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
@@ -549,10 +601,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (on) {
ns = icss_iep_gettime(iep, NULL);
ts = ns_to_timespec64(ns);
+ rq.perout.flags = 0;
rq.perout.period.sec = 1;
rq.perout.period.nsec = 0;
rq.perout.start.sec = ts.tv_sec + 2;
rq.perout.start.nsec = 0;
+ rq.perout.on.sec = 0;
+ rq.perout.on.nsec = NSEC_PER_MSEC;
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 74f0f200a89d..46f500b90b17 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
{
+ if (rx_chn->pg_pool) {
+ page_pool_destroy(rx_chn->pg_pool);
+ rx_chn->pg_pool = NULL;
+ }
+
if (rx_chn->desc_pool)
k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
@@ -93,11 +98,20 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
{
struct cppi5_host_desc_t *first_desc, *next_desc;
dma_addr_t buf_dma, next_desc_dma;
+ struct prueth_swdata *swdata;
+ struct page *page;
u32 buf_dma_len;
first_desc = desc;
next_desc = first_desc;
+ swdata = cppi5_hdesc_get_swdata(desc);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(page->pp, swdata->data.page);
+ goto free_desc;
+ }
+
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
@@ -121,6 +135,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
+free_desc:
k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
}
EXPORT_SYMBOL_GPL(prueth_xmit_free);
@@ -131,12 +146,13 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_tx;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
unsigned int total_bytes = 0;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &emac->tx_chns[chn];
@@ -157,20 +173,27 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- /* was this command's TX complete? */
- if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ dev_sw_netstats_tx_add(ndev, 1, xdpf->len);
+ total_bytes += xdpf->len;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type);
prueth_xmit_free(tx_chn, desc_tx);
+ ndev->stats.tx_dropped++;
continue;
}
- skb = *(swdata);
prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
num_tx++;
}
@@ -249,9 +272,8 @@ int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ hrtimer_setup(&tx_chn->tx_hrtimer, &emac_tx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
ret = request_irq(tx_chn->irq, prueth_tx_irq,
IRQF_TRIGGER_HIGH, tx_chn->name,
tx_chn);
@@ -461,17 +483,17 @@ fail:
}
EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len)
{
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma;
dma_addr_t buf_dma;
- void **swdata;
+ buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM;
desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
if (!desc_rx) {
netdev_err(ndev, "rx push: failed to allocate descriptor\n");
@@ -479,25 +501,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
PRUETH_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA,
desc_rx, desc_dma);
}
-EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped);
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
{
@@ -535,18 +551,170 @@ void emac_rx_timestamp(struct prueth_emac *emac,
ssh->hwtstamp = ns_to_ktime(ns);
}
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+/**
+ * emac_xmit_xdp_frame - transmits an XDP frame
+ * @emac: emac device
+ * @xdpf: data to transmit
+ * @page: page from page pool if already DMA mapped
+ * @q_idx: queue id
+ *
+ * Return: XDP state
+ */
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx)
+{
+ struct cppi5_host_desc_t *first_desc;
+ struct net_device *ndev = emac->ndev;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_swdata *swdata;
+ u32 *epib;
+ int ret;
+
+ if (q_idx >= PRUETH_MAX_TX_QUEUES) {
+ netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx);
+ return ICSSG_XDP_CONSUMED; /* drop */
+ }
+
+ tx_chn = &emac->tx_chns[q_idx];
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n");
+ goto drop_free_descs; /* drop */
+ }
+
+ if (page) { /* already DMA mapped by page_pool */
+ buf_dma = page_pool_get_dma_addr(page);
+ buf_dma += xdpf->headroom + sizeof(struct xdp_frame);
+ } else { /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "xdp tx: failed to map data buffer\n");
+ goto drop_free_descs; /* drop */
+ }
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ if (page) {
+ swdata->type = PRUETH_SWDATA_PAGE;
+ swdata->data.page = page;
+ } else {
+ swdata->type = PRUETH_SWDATA_XDPF;
+ swdata->data.xdpf = xdpf;
+ }
+
+ cppi5_hdesc_set_pktlen(first_desc, xdpf->len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "xdp tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ return ICSSG_XDP_TX;
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+ return ICSSG_XDP_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame);
+
+/**
+ * emac_run_xdp - run an XDP program
+ * @emac: emac device
+ * @xdp: XDP buffer containing the frame
+ * @page: page with RX data if already DMA mapped
+ * @len: Rx descriptor packet length
+ *
+ * Return: XDP state
+ */
+static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp,
+ struct page *page, u32 *len)
+{
+ struct net_device *ndev = emac->ndev;
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ u32 pkt_len = *len;
+ u32 act, result;
+ int q_idx, err;
+
+ xdp_prog = READ_ONCE(emac->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return ICSSG_XDP_PASS;
+ case XDP_TX:
+ /* Send packet to TX ring for immediate transmission */
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
+ goto drop;
+ }
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+ result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx);
+ if (result == ICSSG_XDP_CONSUMED)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, xdpf->len);
+ return result;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(emac->ndev, xdp, xdp_prog);
+ if (err)
+ goto drop;
+
+ dev_sw_netstats_rx_add(ndev, pkt_len);
+ return ICSSG_XDP_REDIR;
+ default:
+ bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+drop:
+ trace_xdp_exception(emac->ndev, xdp_prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(emac->rx_chns.pg_pool, page);
+ return ICSSG_XDP_CONSUMED;
+ }
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- void **swdata;
+ struct page *page, *new_page;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 *psdata;
+ void *pa;
int ret;
+ *xdp_state = 0;
+ pool = rx_chn->pg_pool;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
if (ret) {
if (ret != -ENODATA)
@@ -558,15 +726,15 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
return 0;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
+ if (swdata->type != PRUETH_SWDATA_PAGE) {
+ netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ return 0;
+ }
+ page = swdata->data.page;
+ page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -574,32 +742,63 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
pkt_len -= 4;
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
/* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
+ * descriptor back to the ring with old page to prevent a stall
*/
- if (!new_skb) {
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
ndev->stats.rx_dropped++;
- new_skb = skb;
+ goto requeue;
+ }
+
+ pa = page_address(page);
+ if (emac->xdp_prog) {
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq);
+ xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
+
+ *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
+ if (*xdp_state == ICSSG_XDP_PASS)
+ skb = xdp_build_skb_from_buff(&xdp);
+ else
+ goto requeue;
} else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- if (emac->prueth->is_switch_mode)
- skb->offload_fwd_mark = emac->offload_fwd_mark;
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
+ }
+
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
}
+ skb_reserve(skb, PRUETH_HEADROOM);
+ skb_put(skb, pkt_len);
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb_mark_for_recycle(skb);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+
+requeue:
/* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(pool, new_page);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -611,22 +810,19 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
+ struct prueth_swdata *swdata;
+ struct page_pool *pool;
+ struct page *page;
+ pool = rx_chn->pg_pool;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ if (swdata->type == PRUETH_SWDATA_PAGE) {
+ page = swdata->data.page;
+ page_pool_recycle_direct(pool, page);
+ }
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
}
static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
@@ -662,13 +858,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
struct netdev_queue *netif_txq;
+ struct prueth_swdata *swdata;
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
u32 pkt_len, dst_tag_id;
int i, ret = 0, q_idx;
bool in_tx_ts = 0;
int tx_ts_cookie;
- void **swdata;
u32 *epib;
pkt_len = skb_headlen(skb);
@@ -730,7 +926,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
+ swdata->type = PRUETH_SWDATA_SKB;
+ swdata->data.skb = skb;
/* Handle the case where skb is fragmented in pages */
cur_desc = first_desc;
@@ -833,15 +1030,27 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct prueth_tx_chn *tx_chn = data;
struct cppi5_host_desc_t *desc_tx;
+ struct prueth_swdata *swdata;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
- void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
- dev_kfree_skb_any(skb);
+ switch (swdata->type) {
+ case PRUETH_SWDATA_SKB:
+ skb = swdata->data.skb;
+ dev_kfree_skb_any(skb);
+ break;
+ case PRUETH_SWDATA_XDPF:
+ xdpf = swdata->data.xdpf;
+ xdp_return_frame(xdpf);
+ break;
+ default:
+ break;
+ }
+
+ prueth_xmit_free(tx_chn, desc_tx);
}
irqreturn_t prueth_rx_irq(int irq, void *dev_id)
@@ -875,15 +1084,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
int flow = emac->is_sr1 ?
PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int xdp_state_or = 0;
int num_rx = 0;
int cur_budget;
+ u32 xdp_state;
int ret;
while (flow--) {
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
+ ret = emac_rx_packet(emac, flow, &xdp_state);
+ xdp_state_or |= xdp_state;
if (ret)
break;
num_rx++;
@@ -893,6 +1105,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_state_or & ICSSG_XDP_REDIR)
+ xdp_do_flush();
+
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
if (unlikely(emac->rx_pace_timeout_ns)) {
hrtimer_start(&emac->rx_hrtimer,
@@ -907,29 +1122,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
}
EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac,
+ struct device *dma_dev,
+ int size)
+{
+ struct page_pool_params pp_params = { 0 };
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = size;
+ pp_params.nid = dev_to_node(emac->prueth->dev);
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = dma_dev;
+ pp_params.napi = &emac->napi_rx;
+ pp_params.max_len = PAGE_SIZE;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ netdev_err(emac->ndev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
int prueth_prepare_rx_chan(struct prueth_emac *emac,
struct prueth_rx_chn *chn,
int buf_size)
{
- struct sk_buff *skb;
+ struct page_pool *pool;
+ struct page *page;
int i, ret;
+ pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ chn->pg_pool = pool;
+
for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ /* NOTE: we're not using memory efficiently here.
+ * 1 full page (4KB?) used here instead of
+ * PRUETH_MAX_PKT_SIZE (~1.5KB?)
+ */
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ netdev_err(emac->ndev, "couldn't allocate rx page\n");
+ ret = -ENOMEM;
+ goto recycle_alloc_pg;
+ }
- ret = prueth_dma_rx_push(emac, skb, chn);
+ ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size);
if (ret < 0) {
netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
+ "cannot submit page for rx chan %s ret %d\n",
chn->name, ret);
- kfree_skb(skb);
- return ret;
+ page_pool_recycle_direct(pool, page);
+ goto recycle_alloc_pg;
}
}
return 0;
+
+recycle_alloc_pg:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
@@ -958,6 +1215,9 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
prueth_rx_cleanup, !!i);
if (disable)
k3_udma_glue_disable_rx_chn(chn->rx_chn);
+
+ page_pool_destroy(chn->pg_pool);
+ chn->pg_pool = NULL;
}
EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 00ed97860547..443f90fa6557 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -559,6 +559,33 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ struct page_pool *pool = emac->rx_chns.pg_pool;
+ int ret;
+
+ ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
{
struct net_device *real_dev;
@@ -780,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev)
if (ret)
goto free_tx_ts_irq;
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_xdp_rxqs(emac);
if (ret)
goto reset_rx_chn;
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto destroy_xdp_rxqs;
+
for (i = 0; i < emac->tx_ch_num; i++) {
ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
if (ret)
@@ -809,6 +840,8 @@ reset_tx_chan:
* any SKB for completion. So set false to free_skb
*/
prueth_reset_tx_chan(emac, i, false);
+destroy_xdp_rxqs:
+ prueth_destroy_xdp_rxqs(emac);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
free_tx_ts_irq:
@@ -879,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev)
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
-
+ prueth_destroy_xdp_rxqs(emac);
napi_disable(&emac->napi_rx);
hrtimer_cancel(&emac->rx_hrtimer);
@@ -1024,6 +1057,93 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev,
return 0;
}
+/**
+ * emac_xdp_xmit - Implements ndo_xdp_xmit
+ * @dev: netdev
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
+ *
+ * Return: number of frames successfully sent. Failed frames
+ * will be free'ed by XDP core.
+ *
+ * For error cases, a negative errno code is returned and no-frames
+ * are transmitted (caller must handle freeing frames).
+ **/
+static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(dev);
+ struct net_device *ndev = emac->ndev;
+ struct xdp_frame *xdpf;
+ unsigned int q_idx;
+ int nxmit = 0;
+ u32 err;
+ int i;
+
+ q_idx = smp_processor_id() % emac->tx_ch_num;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ if (err != ICSSG_XDP_TX) {
+ ndev->stats.tx_dropped++;
+ break;
+ }
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+/**
+ * emac_xdp_setup - add/remove an XDP program
+ * @emac: emac device
+ * @bpf: XDP program
+ *
+ * Return: Always 0 (Success)
+ **/
+static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+ xdp_features_t val;
+
+ val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_set_features_flag(emac->ndev, val);
+
+ if (!emac->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(emac->xdp_prog, prog);
+
+ xdp_attachment_setup(&emac->xdpi, bpf);
+
+ return 0;
+}
+
+/**
+ * emac_ndo_bpf - implements ndo_bpf for icssg_prueth
+ * @ndev: network adapter device
+ * @bpf: XDP program
+ *
+ * Return: 0 on success, error code on failure.
+ **/
+static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return emac_xdp_setup(emac, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1038,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_fix_features = emac_ndo_fix_features,
.ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
+ .ndo_bpf = emac_ndo_bpf,
+ .ndo_xdp_xmit = emac_xdp_xmit,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1066,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->prueth = prueth;
emac->ndev = ndev;
emac->port_id = port;
+ emac->xdp_prog = NULL;
+ emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
if (!emac->cmd_wq) {
ret = -ENOMEM;
@@ -1169,9 +1293,8 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES;
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
- hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- emac->rx_hrtimer.function = &emac_rx_timer_callback;
+ hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
prueth->emac[mac] = emac;
return 0;
@@ -1522,6 +1645,9 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
+ BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE),
+ "insufficient SW_DATA size");
+
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
@@ -1679,6 +1805,7 @@ static int prueth_probe(struct platform_device *pdev)
}
spin_lock_init(&prueth->vtbl_lock);
+ spin_lock_init(&prueth->stats_lock);
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 329b46e9ee53..b6be4aa57a61 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -8,6 +8,8 @@
#ifndef __NET_TI_ICSSG_PRUETH_H
#define __NET_TI_ICSSG_PRUETH_H
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/etherdevice.h>
#include <linux/genalloc.h>
#include <linux/if_vlan.h>
@@ -33,6 +35,8 @@
#include <linux/dma/k3-udma-glue.h>
#include <net/devlink.h>
+#include <net/xdp.h>
+#include <net/page_pool/helpers.h>
#include "icssg_config.h"
#include "icss_iep.h"
@@ -131,6 +135,26 @@ struct prueth_rx_chn {
u32 descs_num;
unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */
char name[32];
+ struct page_pool *pg_pool;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+enum prueth_swdata_type {
+ PRUETH_SWDATA_INVALID = 0,
+ PRUETH_SWDATA_SKB,
+ PRUETH_SWDATA_PAGE,
+ PRUETH_SWDATA_CMD,
+ PRUETH_SWDATA_XDPF,
+};
+
+struct prueth_swdata {
+ enum prueth_swdata_type type;
+ union prueth_data {
+ struct sk_buff *skb;
+ struct page *page;
+ u32 cmd;
+ struct xdp_frame *xdpf;
+ } data;
};
/* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3)
@@ -140,6 +164,12 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* XDP BPF state */
+#define ICSSG_XDP_PASS 0
+#define ICSSG_XDP_CONSUMED BIT(0)
+#define ICSSG_XDP_TX BIT(1)
+#define ICSSG_XDP_REDIR BIT(2)
+
/* Minimum coalesce time in usecs for both Tx and Rx */
#define ICSSG_MIN_COALESCE_USECS 20
@@ -208,8 +238,14 @@ struct prueth_emac {
unsigned long rx_pace_timeout_ns;
struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID];
+ struct bpf_prog *xdp_prog;
+ struct xdp_attachment_info xdpi;
};
+/* The buf includes headroom compatible with both skb and xdpf */
+#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long))
+
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
@@ -305,6 +341,8 @@ struct prueth {
int default_vlan;
/** @vtbl_lock: Lock for vtbl in shared memory */
spinlock_t vtbl_lock;
+ /** @stats_lock: Lock for reading icssg stats */
+ spinlock_t stats_lock;
};
struct emac_tx_ts_response {
@@ -410,9 +448,10 @@ int prueth_init_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
char *name, u32 max_rflows,
u32 max_desc_num);
-int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn);
+int prueth_dma_rx_push_mapped(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ struct page *page, u32 buf_len);
+unsigned int prueth_rxbuf_total_len(unsigned int len);
void emac_rx_timestamp(struct prueth_emac *emac,
struct sk_buff *skb, u32 *psdata);
enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
@@ -441,5 +480,9 @@ void prueth_put_cores(struct prueth *prueth, int slice);
/* Revision specific helper */
u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+u32 emac_xmit_xdp_frame(struct prueth_emac *emac,
+ struct xdp_frame *xdpf,
+ struct page *page,
+ unsigned int q_idx);
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
index 64a19ff39562..ff5f41bf499e 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
__le32 *data = emac->cmd_data;
dma_addr_t desc_dma, buf_dma;
struct prueth_tx_chn *tx_chn;
- void **swdata;
+ struct prueth_swdata *swdata;
int ret = 0;
u32 *epib;
@@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = data;
+ swdata->type = PRUETH_SWDATA_CMD;
+ swdata->data.cmd = le32_to_cpu(data[0]);
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
@@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac)
* Returns skb pointer if packet found else NULL
* Caller must free the returned skb.
*/
-static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
- u32 flow_id)
+static struct page *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
struct net_device *ndev = emac->ndev;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
+ struct prueth_swdata *swdata;
dma_addr_t desc_dma, buf_dma;
- u32 buf_dma_len, pkt_len;
- void **swdata;
+ u32 buf_dma_len;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
}
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page = swdata->data.page;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool);
/* if allocation fails we drop the packet but push the
* descriptor back to the ring with old skb to prevent a stall
*/
- if (!new_skb) {
+ if (!new_page) {
netdev_err(ndev,
- "skb alloc failed, dropped mgm pkt from flow %d\n",
+ "page alloc failed, dropped mgm pkt from flow %d\n",
flow_id);
- new_skb = skb;
- skb = NULL; /* return NULL */
- } else {
- /* return the filled skb */
- skb_put(skb, pkt_len);
+ new_page = page;
+ page = NULL; /* return NULL */
}
/* queue another DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page,
+ PRUETH_MAX_PKT_SIZE);
if (WARN_ON(ret < 0))
- dev_kfree_skb_any(new_skb);
+ page_pool_recycle_direct(rx_chn->pg_pool, new_page);
- return skb;
+ return page;
}
static void prueth_tx_ts_sr1(struct prueth_emac *emac,
@@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac,
static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!page)
return IRQ_NONE;
- prueth_tx_ts_sr1(emac, (void *)skb->data);
- dev_kfree_skb_any(skb);
+ prueth_tx_ts_sr1(emac, (void *)page_address(page));
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
@@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
- struct sk_buff *skb;
+ struct page *page;
u32 rsp;
- skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
- if (!skb)
+ page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!page)
return IRQ_NONE;
/* Process command response */
- rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000;
if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
complete(&emac->cmd_complete);
@@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
complete(&emac->cmd_complete);
}
- dev_kfree_skb_any(skb);
+ page_pool_recycle_direct(page->pp, page);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 8800bd3a8d07..6f0edae38ea2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
u32 val, reg;
int i;
+ spin_lock(&prueth->stats_lock);
+
for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) {
regmap_read(prueth->miig_rt,
base + icssg_all_miig_stats[i].offset,
@@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
emac->pa_stats[i] += val;
}
}
+
+ spin_unlock(&prueth->stats_lock);
}
void icssg_stats_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 701e9b7c1c3b..b1e27e3b99eb 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || MIPS) || PPC_PS3
+ depends on PCI && MIPS || PPC_PS3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,15 +39,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config SPIDER_NET
- tristate "Spider Gigabit Ethernet driver"
- depends on PCI && PPC_IBM_CELL_BLADE
- select FW_LOADER
- select SUNGEM_PHY
- help
- This driver supports the Gigabit Ethernet chips present on the
- Cell Processor-Based Blades from IBM.
-
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on PCI && MIPS
diff --git a/drivers/net/ethernet/toshiba/Makefile b/drivers/net/ethernet/toshiba/Makefile
index f434fd0f429e..27e2164cf7e9 100644
--- a/drivers/net/ethernet/toshiba/Makefile
+++ b/drivers/net/ethernet/toshiba/Makefile
@@ -6,6 +6,4 @@
obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
-spidernet-y += spider_net.o spider_net_ethtool.o
-obj-$(CONFIG_SPIDER_NET) += spidernet.o
obj-$(CONFIG_TC35815) += tc35815.o
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
deleted file mode 100644
index a4937c18d7cb..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ /dev/null
@@ -1,2556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/compiler.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/firmware.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/ip.h>
-#include <linux/kernel.h>
-#include <linux/mii.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-#include <linux/tcp.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-#include <linux/of.h>
-#include <net/checksum.h>
-
-#include "spider_net.h"
-
-MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
- "<Jens.Osterkamp@de.ibm.com>");
-MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VERSION);
-MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
-
-static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
-static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
-
-module_param(rx_descriptors, int, 0444);
-module_param(tx_descriptors, int, 0444);
-
-MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
- "in rx chains");
-MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
- "in tx chain");
-
-char spider_net_driver_name[] = "spidernet";
-
-static const struct pci_device_id spider_net_pci_tbl[] = {
- { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
-
-/**
- * spider_net_read_reg - reads an SMMIO register of a card
- * @card: device structure
- * @reg: register to read from
- *
- * returns the content of the specified SMMIO register.
- */
-static inline u32
-spider_net_read_reg(struct spider_net_card *card, u32 reg)
-{
- /* We use the powerpc specific variants instead of readl_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- return in_be32(card->regs + reg);
-}
-
-/**
- * spider_net_write_reg - writes to an SMMIO register of a card
- * @card: device structure
- * @reg: register to write to
- * @value: value to write into the specified SMMIO register
- */
-static inline void
-spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
-{
- /* We use the powerpc specific variants instead of writel_be() because
- * we know spidernet is not a real PCI device and we can thus avoid the
- * performance hit caused by the PCI workarounds.
- */
- out_be32(card->regs + reg, value);
-}
-
-/**
- * spider_net_write_phy - write to phy register
- * @netdev: adapter to be written to
- * @mii_id: id of MII
- * @reg: PHY register
- * @val: value to be written to phy register
- *
- * spider_net_write_phy_register writes to an arbitrary PHY
- * register via the spider GPCWOPCMD register. We assume the queue does
- * not run full (not more than 15 commands outstanding).
- **/
-static void
-spider_net_write_phy(struct net_device *netdev, int mii_id,
- int reg, int val)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 writevalue;
-
- writevalue = ((u32)mii_id << 21) |
- ((u32)reg << 16) | ((u32)val);
-
- spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
-}
-
-/**
- * spider_net_read_phy - read from phy register
- * @netdev: network device to be read from
- * @mii_id: id of MII
- * @reg: PHY register
- *
- * Returns value read from PHY register
- *
- * spider_net_write_phy reads from an arbitrary PHY
- * register via the spider GPCROPCMD register
- **/
-static int
-spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 readvalue;
-
- readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
- spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
-
- /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
- * interrupt, as we poll for the completion of the read operation
- * in spider_net_read_phy. Should take about 50 us
- */
- do {
- readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
- } while (readvalue & SPIDER_NET_GPREXEC);
-
- readvalue &= SPIDER_NET_GPRDAT_MASK;
-
- return readvalue;
-}
-
-/**
- * spider_net_setup_aneg - initial auto-negotiation setup
- * @card: device structure
- **/
-static void
-spider_net_setup_aneg(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
- u32 advertise = 0;
- u16 bmsr, estat;
-
- bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
-
- if (bmsr & BMSR_10HALF)
- advertise |= ADVERTISED_10baseT_Half;
- if (bmsr & BMSR_10FULL)
- advertise |= ADVERTISED_10baseT_Full;
- if (bmsr & BMSR_100HALF)
- advertise |= ADVERTISED_100baseT_Half;
- if (bmsr & BMSR_100FULL)
- advertise |= ADVERTISED_100baseT_Full;
-
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
- advertise |= SUPPORTED_1000baseT_Full;
- if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
- advertise |= SUPPORTED_1000baseT_Half;
-
- sungem_phy_probe(phy, phy->mii_id);
- phy->def->ops->setup_aneg(phy, advertise);
-
-}
-
-/**
- * spider_net_rx_irq_off - switch off rx irq on this spider card
- * @card: device structure
- *
- * switches off rx irq by masking them out in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_off(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_rx_irq_on - switch on rx irq on this spider card
- * @card: device structure
- *
- * switches on rx irq by enabling them in the GHIINTnMSK register
- */
-static void
-spider_net_rx_irq_on(struct spider_net_card *card)
-{
- u32 regvalue;
-
- regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
-}
-
-/**
- * spider_net_set_promisc - sets the unicast address or the promiscuous mode
- * @card: card structure
- *
- * spider_net_set_promisc sets the unicast destination address filter and
- * thus either allows for non-promisc mode or promisc mode
- */
-static void
-spider_net_set_promisc(struct spider_net_card *card)
-{
- u32 macu, macl;
- struct net_device *netdev = card->netdev;
-
- if (netdev->flags & IFF_PROMISC) {
- /* clear destination entry 0 */
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_PROMISC_VALUE);
- } else {
- macu = netdev->dev_addr[0];
- macu <<= 8;
- macu |= netdev->dev_addr[1];
- memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
-
- macu |= SPIDER_NET_UA_DESCR_VALUE;
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
- spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
- SPIDER_NET_NONPROMISC_VALUE);
- }
-}
-
-/**
- * spider_net_get_descr_status -- returns the status of a descriptor
- * @hwdescr: descriptor to look at
- *
- * returns the status as in the dmac_cmd_status field of the descriptor
- */
-static inline int
-spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
-{
- return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
-}
-
-/**
- * spider_net_free_chain - free descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- */
-static void
-spider_net_free_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- struct spider_net_descr *descr;
-
- descr = chain->ring;
- do {
- descr->bus_addr = 0;
- descr->hwdescr->next_descr_addr = 0;
- descr = descr->next;
- } while (descr != chain->ring);
-
- dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
- chain->hwring, chain->dma_addr);
-}
-
-/**
- * spider_net_init_chain - alloc and link descriptor chain
- * @card: card structure
- * @chain: address of chain
- *
- * We manage a circular list that mirrors the hardware structure,
- * except that the hardware uses bus addresses.
- *
- * Returns 0 on success, <0 on failure
- */
-static int
-spider_net_init_chain(struct spider_net_card *card,
- struct spider_net_descr_chain *chain)
-{
- int i;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- size_t alloc_size;
-
- alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
-
- chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
- if (!chain->hwring)
- return -ENOMEM;
-
- /* Set up the hardware pointers in each descriptor */
- descr = chain->ring;
- hwdescr = chain->hwring;
- buf = chain->dma_addr;
- for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- hwdescr->next_descr_addr = 0;
-
- descr->hwdescr = hwdescr;
- descr->bus_addr = buf;
- descr->next = descr + 1;
- descr->prev = descr - 1;
-
- buf += sizeof(struct spider_net_hw_descr);
- }
- /* do actual circular list */
- (descr-1)->next = chain->ring;
- chain->ring->prev = descr-1;
-
- spin_lock_init(&chain->lock);
- chain->head = chain->ring;
- chain->tail = chain->ring;
- return 0;
-}
-
-/**
- * spider_net_free_rx_chain_contents - frees descr contents in rx chain
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- */
-static void
-spider_net_free_rx_chain_contents(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- descr = card->rx_chain.head;
- do {
- if (descr->skb) {
- dma_unmap_single(&card->pdev->dev,
- descr->hwdescr->buf_addr,
- SPIDER_NET_MAX_FRAME,
- DMA_BIDIRECTIONAL);
- dev_kfree_skb(descr->skb);
- descr->skb = NULL;
- }
- descr = descr->next;
- } while (descr != card->rx_chain.head);
-}
-
-/**
- * spider_net_prepare_rx_descr - Reinitialize RX descriptor
- * @card: card structure
- * @descr: descriptor to re-init
- *
- * Return 0 on success, <0 on failure.
- *
- * Allocates a new rx skb, iommu-maps it and attaches it to the
- * descriptor. Mark the descriptor as activated, ready-to-use.
- */
-static int
-spider_net_prepare_rx_descr(struct spider_net_card *card,
- struct spider_net_descr *descr)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- dma_addr_t buf;
- int offset;
- int bufsize;
-
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
- (~(SPIDER_NET_RXBUF_ALIGN - 1));
-
- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more
- */
- /* allocate an skb */
- descr->skb = netdev_alloc_skb(card->netdev,
- bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
- if (!descr->skb) {
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev,
- "Not enough memory to allocate rx buffer\n");
- card->spider_stats.alloc_rx_skb_error++;
- return -ENOMEM;
- }
- hwdescr->buf_size = bufsize;
- hwdescr->result_size = 0;
- hwdescr->valid_size = 0;
- hwdescr->data_status = 0;
- hwdescr->data_error = 0;
-
- offset = ((unsigned long)descr->skb->data) &
- (SPIDER_NET_RXBUF_ALIGN - 1);
- if (offset)
- skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
- /* iommu-map the skb */
- buf = dma_map_single(&card->pdev->dev, descr->skb->data,
- SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- dev_kfree_skb_any(descr->skb);
- descr->skb = NULL;
- if (netif_msg_rx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
- card->spider_stats.rx_iommu_map_error++;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- } else {
- hwdescr->buf_addr = buf;
- wmb();
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
- SPIDER_NET_DMAC_NOINTR_COMPLETE;
- }
-
- return 0;
-}
-
-/**
- * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
- * @card: card structure
- *
- * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
- * chip by writing to the appropriate register. DMA is enabled in
- * spider_net_enable_rxdmac.
- */
-static inline void
-spider_net_enable_rxchtails(struct spider_net_card *card)
-{
- /* assume chain is aligned correctly */
- spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
- card->rx_chain.tail->bus_addr);
-}
-
-/**
- * spider_net_enable_rxdmac - enables a receive DMA controller
- * @card: card structure
- *
- * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void
-spider_net_enable_rxdmac(struct spider_net_card *card)
-{
- wmb();
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_VALUE);
-}
-
-/**
- * spider_net_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * spider_net_disable_rxdmac terminates processing on the DMA controller
- * by turing off the DMA controller, with the force-end flag set.
- */
-static inline void
-spider_net_disable_rxdmac(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
- SPIDER_NET_DMA_RX_FEND_VALUE);
-}
-
-/**
- * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
- * @card: card structure
- *
- * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
- */
-static void
-spider_net_refill_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- unsigned long flags;
-
- /* one context doing the refill (and a second context seeing that
- * and omitting it) is ok. If called by NAPI, we'll be called again
- * as spider_net_decode_one_descr is called several times. If some
- * interrupt calls us, the NAPI is about to clean up anyway.
- */
- if (!spin_trylock_irqsave(&chain->lock, flags))
- return;
-
- while (spider_net_get_descr_status(chain->head->hwdescr) ==
- SPIDER_NET_DESCR_NOT_IN_USE) {
- if (spider_net_prepare_rx_descr(card, chain->head))
- break;
- chain->head = chain->head->next;
- }
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-/**
- * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure.
- */
-static int
-spider_net_alloc_rx_skbs(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start = chain->tail;
- struct spider_net_descr *descr = start;
-
- /* Link up the hardware chain pointers */
- do {
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- descr = descr->next;
- } while (descr != start);
-
- /* Put at least one buffer into the chain. if this fails,
- * we've got a problem. If not, spider_net_refill_rx_chain
- * will do the rest at the end of this function.
- */
- if (spider_net_prepare_rx_descr(card, chain->head))
- goto error;
- else
- chain->head = chain->head->next;
-
- /* This will allocate the rest of the rx buffers;
- * if not, it's business as usual later on.
- */
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- return 0;
-
-error:
- spider_net_free_rx_chain_contents(card);
- return -ENOMEM;
-}
-
-/**
- * spider_net_get_multicast_hash - generates hash for multicast filter table
- * @netdev: interface device structure
- * @addr: multicast address
- *
- * returns the hash value.
- *
- * spider_net_get_multicast_hash calculates a hash value for a given multicast
- * address, that is used to set the multicast filter tables
- */
-static u8
-spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
-{
- u32 crc;
- u8 hash;
- char addr_for_crc[ETH_ALEN] = { 0, };
- int i, bit;
-
- for (i = 0; i < ETH_ALEN * 8; i++) {
- bit = (addr[i / 8] >> (i % 8)) & 1;
- addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
- }
-
- crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
-
- hash = (crc >> 27);
- hash <<= 3;
- hash |= crc & 7;
- hash &= 0xff;
-
- return hash;
-}
-
-/**
- * spider_net_set_multi - sets multicast addresses and promisc flags
- * @netdev: interface device structure
- *
- * spider_net_set_multi configures multicast addresses as needed for the
- * netdev interface. It also sets up multicast, allmulti and promisc
- * flags appropriately
- */
-static void
-spider_net_set_multi(struct net_device *netdev)
-{
- struct netdev_hw_addr *ha;
- u8 hash;
- int i;
- u32 reg;
- struct spider_net_card *card = netdev_priv(netdev);
- DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- spider_net_set_promisc(card);
-
- if (netdev->flags & IFF_ALLMULTI) {
- bitmap_fill(bitmask, SPIDER_NET_MULTICAST_HASHES);
- goto write_hash;
- }
-
- bitmap_zero(bitmask, SPIDER_NET_MULTICAST_HASHES);
-
- /* well, we know, what the broadcast hash value is: it's xfd
- hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
- __set_bit(0xfd, bitmask);
-
- netdev_for_each_mc_addr(ha, netdev) {
- hash = spider_net_get_multicast_hash(netdev, ha->addr);
- __set_bit(hash, bitmask);
- }
-
-write_hash:
- for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
- reg = 0;
- if (test_bit(i * 4, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 1, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 2, bitmask))
- reg += 0x08;
- reg <<= 8;
- if (test_bit(i * 4 + 3, bitmask))
- reg += 0x08;
-
- spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
- }
-}
-
-/**
- * spider_net_prepare_tx_descr - fill tx descriptor with skb data
- * @card: card structure
- * @skb: packet to use
- *
- * returns 0 on success, <0 on failure.
- *
- * fills out the descriptor structure with skb data and len. Copies data,
- * if needed (32bit DMA!)
- */
-static int
-spider_net_prepare_tx_descr(struct spider_net_card *card,
- struct sk_buff *skb)
-{
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- dma_addr_t buf;
- unsigned long flags;
-
- buf = dma_map_single(&card->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&card->pdev->dev, buf)) {
- if (netif_msg_tx_err(card) && net_ratelimit())
- dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
- "Dropping packet\n", skb->data, skb->len);
- card->spider_stats.tx_iommu_map_error++;
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&chain->lock, flags);
- descr = card->tx_chain.head;
- if (descr->next == chain->tail->prev) {
- spin_unlock_irqrestore(&chain->lock, flags);
- dma_unmap_single(&card->pdev->dev, buf, skb->len,
- DMA_TO_DEVICE);
- return -ENOMEM;
- }
- hwdescr = descr->hwdescr;
- chain->head = descr->next;
-
- descr->skb = skb;
- hwdescr->buf_addr = buf;
- hwdescr->buf_size = skb->len;
- hwdescr->next_descr_addr = 0;
- hwdescr->data_status = 0;
-
- hwdescr->dmac_cmd_status =
- SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
- break;
- case IPPROTO_UDP:
- hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
- break;
- }
-
- /* Chain the bus address, so that the DMA engine finds this descr. */
- wmb();
- descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
-
- netif_trans_update(card->netdev); /* set netdev watchdog timer */
- return 0;
-}
-
-static int
-spider_net_set_low_watermark(struct spider_net_card *card)
-{
- struct spider_net_descr *descr = card->tx_chain.tail;
- struct spider_net_hw_descr *hwdescr;
- unsigned long flags;
- int status;
- int cnt=0;
- int i;
-
- /* Measure the length of the queue. Measurement does not
- * need to be precise -- does not need a lock.
- */
- while (descr != card->tx_chain.head) {
- status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- break;
- descr = descr->next;
- cnt++;
- }
-
- /* If TX queue is short, don't even bother with interrupts */
- if (cnt < card->tx_chain.num_desc/4)
- return cnt;
-
- /* Set low-watermark 3/4th's of the way into the queue. */
- descr = card->tx_chain.tail;
- cnt = (cnt*3)/4;
- for (i=0;i<cnt; i++)
- descr = descr->next;
-
- /* Set the new watermark, clear the old watermark */
- spin_lock_irqsave(&card->tx_chain.lock, flags);
- descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
- if (card->low_watermark && card->low_watermark != descr) {
- hwdescr = card->low_watermark->hwdescr;
- hwdescr->dmac_cmd_status =
- hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
- }
- card->low_watermark = descr;
- spin_unlock_irqrestore(&card->tx_chain.lock, flags);
- return cnt;
-}
-
-/**
- * spider_net_release_tx_chain - processes sent tx descriptors
- * @card: adapter structure
- * @brutal: if set, don't care about whether descriptor seems to be in use
- *
- * returns 0 if the tx ring is empty, otherwise 1.
- *
- * spider_net_release_tx_chain releases the tx descriptors that spider has
- * finished with (if non-brutal) or simply release tx descriptors (if brutal).
- * If some other context is calling this function, we return 1 so that we're
- * scheduled again (if we were scheduled) and will not lose initiative.
- */
-static int
-spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->tx_chain;
- struct spider_net_descr *descr;
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 buf_addr;
- unsigned long flags;
- int status;
-
- while (1) {
- spin_lock_irqsave(&chain->lock, flags);
- if (chain->tail == chain->head) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 0;
- }
- descr = chain->tail;
- hwdescr = descr->hwdescr;
-
- status = spider_net_get_descr_status(hwdescr);
- switch (status) {
- case SPIDER_NET_DESCR_COMPLETE:
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += descr->skb->len;
- break;
-
- case SPIDER_NET_DESCR_CARDOWNED:
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
-
- /* fallthrough, if we release the descriptors
- * brutally (then we don't care about
- * SPIDER_NET_DESCR_CARDOWNED)
- */
- fallthrough;
-
- case SPIDER_NET_DESCR_RESPONSE_ERROR:
- case SPIDER_NET_DESCR_PROTECTION_ERROR:
- case SPIDER_NET_DESCR_FORCE_END:
- if (netif_msg_tx_err(card))
- dev_err(&card->netdev->dev, "forcing end of tx descriptor "
- "with status x%02x\n", status);
- dev->stats.tx_errors++;
- break;
-
- default:
- dev->stats.tx_dropped++;
- if (!brutal) {
- spin_unlock_irqrestore(&chain->lock, flags);
- return 1;
- }
- }
-
- chain->tail = descr->next;
- hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
- skb = descr->skb;
- descr->skb = NULL;
- buf_addr = hwdescr->buf_addr;
- spin_unlock_irqrestore(&chain->lock, flags);
-
- /* unmap the skb */
- if (skb) {
- dma_unmap_single(&card->pdev->dev, buf_addr, skb->len,
- DMA_TO_DEVICE);
- dev_consume_skb_any(skb);
- }
- }
- return 0;
-}
-
-/**
- * spider_net_kick_tx_dma - enables TX DMA processing
- * @card: card structure
- *
- * This routine will start the transmit DMA running if
- * it is not already running. This routine ned only be
- * called when queueing a new packet to an empty tx queue.
- * Writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission
- * DMA engine.
- */
-static inline void
-spider_net_kick_tx_dma(struct spider_net_card *card)
-{
- struct spider_net_descr *descr;
-
- if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
- SPIDER_NET_TX_DMA_EN)
- goto out;
-
- descr = card->tx_chain.tail;
- for (;;) {
- if (spider_net_get_descr_status(descr->hwdescr) ==
- SPIDER_NET_DESCR_CARDOWNED) {
- spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
- descr->bus_addr);
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_VALUE);
- break;
- }
- if (descr == card->tx_chain.head)
- break;
- descr = descr->next;
- }
-
-out:
- mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
-}
-
-/**
- * spider_net_xmit - transmits a frame over the device
- * @skb: packet to send out
- * @netdev: interface device structure
- *
- * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
- */
-static netdev_tx_t
-spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
-{
- int cnt;
- struct spider_net_card *card = netdev_priv(netdev);
-
- spider_net_release_tx_chain(card, 0);
-
- if (spider_net_prepare_tx_descr(card, skb) != 0) {
- netdev->stats.tx_dropped++;
- netif_stop_queue(netdev);
- return NETDEV_TX_BUSY;
- }
-
- cnt = spider_net_set_low_watermark(card);
- if (cnt < 5)
- spider_net_kick_tx_dma(card);
- return NETDEV_TX_OK;
-}
-
-/**
- * spider_net_cleanup_tx_ring - cleans up the TX ring
- * @t: timer context used to obtain the pointer to net card data structure
- *
- * spider_net_cleanup_tx_ring is called by either the tx_timer
- * or from the NAPI polling routine.
- * This routine releases resources associted with transmitted
- * packets, including updating the queue tail pointer.
- */
-static void
-spider_net_cleanup_tx_ring(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, tx_timer);
- if ((spider_net_release_tx_chain(card, 0) != 0) &&
- (card->netdev->flags & IFF_UP)) {
- spider_net_kick_tx_dma(card);
- netif_wake_queue(card->netdev);
- }
-}
-
-/**
- * spider_net_do_ioctl - called for device ioctls
- * @netdev: interface device structure
- * @ifr: request parameter structure for ioctl
- * @cmd: command code for ioctl
- *
- * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
- * -EOPNOTSUPP is returned, if an unknown ioctl was requested
- */
-static int
-spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
- * @descr: descriptor to process
- * @card: card structure
- *
- * Fills out skb structure and passes the data to the stack.
- * The descriptor state is not changed.
- */
-static void
-spider_net_pass_skb_up(struct spider_net_descr *descr,
- struct spider_net_card *card)
-{
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- struct sk_buff *skb = descr->skb;
- struct net_device *netdev = card->netdev;
- u32 data_status = hwdescr->data_status;
- u32 data_error = hwdescr->data_error;
-
- skb_put(skb, hwdescr->valid_size);
-
- /* the card seems to add 2 bytes of junk in front
- * of the ethernet frame
- */
-#define SPIDER_MISALIGN 2
- skb_pull(skb, SPIDER_MISALIGN);
- skb->protocol = eth_type_trans(skb, netdev);
-
- /* checksum offload */
- skb_checksum_none_assert(skb);
- if (netdev->features & NETIF_F_RXCSUM) {
- if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
- SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
- !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
-
- if (data_status & SPIDER_NET_VLAN_PACKET) {
- /* further enhancements: HW-accel VLAN */
- }
-
- /* update netdevice statistics */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += skb->len;
-
- /* pass skb up to stack */
- netif_receive_skb(skb);
-}
-
-static void show_rx_chain(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *start= chain->tail;
- struct spider_net_descr *descr= start;
- struct spider_net_hw_descr *hwd = start->hwdescr;
- struct device *dev = &card->netdev->dev;
- u32 curr_desc, next_desc;
- int status;
-
- int tot = 0;
- int cnt = 0;
- int off = start - chain->ring;
- int cstat = hwd->dmac_cmd_status;
-
- dev_info(dev, "Total number of descrs=%d\n",
- chain->num_desc);
- dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
- off, cstat);
-
- curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
- next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
-
- status = cstat;
- do
- {
- hwd = descr->hwdescr;
- off = descr - chain->ring;
- status = hwd->dmac_cmd_status;
-
- if (descr == chain->head)
- dev_info(dev, "Chain head is at %d, head status=0x%x\n",
- off, status);
-
- if (curr_desc == descr->bus_addr)
- dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
- off, status);
-
- if (next_desc == descr->bus_addr)
- dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
- off, status);
-
- if (hwd->next_descr_addr == 0)
- dev_info(dev, "chain is cut at %d\n", off);
-
- if (cstat != status) {
- int from = (chain->num_desc + off - cnt) % chain->num_desc;
- int to = (chain->num_desc + off - 1) % chain->num_desc;
- dev_info(dev, "Have %d (from %d to %d) descrs "
- "with stat=0x%08x\n", cnt, from, to, cstat);
- cstat = status;
- cnt = 0;
- }
-
- cnt ++;
- tot ++;
- descr = descr->next;
- } while (descr != start);
-
- dev_info(dev, "Last %d descrs with stat=0x%08x "
- "for a total of %d descrs\n", cnt, cstat, tot);
-
-#ifdef DEBUG
- /* Now dump the whole ring */
- descr = start;
- do
- {
- struct spider_net_hw_descr *hwd = descr->hwdescr;
- status = spider_net_get_descr_status(hwd);
- cnt = descr - chain->ring;
- dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
- cnt, status, descr->skb);
- dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
- descr->bus_addr, hwd->buf_addr, hwd->buf_size);
- dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
- hwd->next_descr_addr, hwd->result_size,
- hwd->valid_size);
- dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
- hwd->dmac_cmd_status, hwd->data_status,
- hwd->data_error);
- dev_info(dev, "\n");
-
- descr = descr->next;
- } while (descr != start);
-#endif
-
-}
-
-/**
- * spider_net_resync_head_ptr - Advance head ptr past empty descrs
- * @card: card structure
- *
- * If the driver fails to keep up and empty the queue, then the
- * hardware wil run out of room to put incoming packets. This
- * will cause the hardware to skip descrs that are full (instead
- * of halting/retrying). Thus, once the driver runs, it wil need
- * to "catch up" to where the hardware chain pointer is at.
- */
-static void spider_net_resync_head_ptr(struct spider_net_card *card)
-{
- unsigned long flags;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance head pointer past any empty descrs */
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- if (status == SPIDER_NET_DESCR_NOT_IN_USE)
- return;
-
- spin_lock_irqsave(&chain->lock, flags);
-
- descr = chain->head;
- status = spider_net_get_descr_status(descr->hwdescr);
- for (i=0; i<chain->num_desc; i++) {
- if (status != SPIDER_NET_DESCR_CARDOWNED) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->head = descr;
-
- spin_unlock_irqrestore(&chain->lock, flags);
-}
-
-static int spider_net_resync_tail_ptr(struct spider_net_card *card)
-{
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr;
- int i, status;
-
- /* Advance tail pointer past any empty and reaped descrs */
- descr = chain->tail;
- status = spider_net_get_descr_status(descr->hwdescr);
-
- for (i=0; i<chain->num_desc; i++) {
- if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
- (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
- descr = descr->next;
- status = spider_net_get_descr_status(descr->hwdescr);
- }
- chain->tail = descr;
-
- if ((i == chain->num_desc) || (i == 0))
- return 1;
- return 0;
-}
-
-/**
- * spider_net_decode_one_descr - processes an RX descriptor
- * @card: card structure
- *
- * Returns 1 if a packet has been sent to the stack, otherwise 0.
- *
- * Processes an RX descriptor by iommu-unmapping the data buffer
- * and passing the packet up to the stack. This function is called
- * in softirq context, e.g. either bottom half from interrupt or
- * NAPI polling context.
- */
-static int
-spider_net_decode_one_descr(struct spider_net_card *card)
-{
- struct net_device *dev = card->netdev;
- struct spider_net_descr_chain *chain = &card->rx_chain;
- struct spider_net_descr *descr = chain->tail;
- struct spider_net_hw_descr *hwdescr = descr->hwdescr;
- u32 hw_buf_addr;
- int status;
-
- status = spider_net_get_descr_status(hwdescr);
-
- /* Nothing in the descriptor, or ring must be empty */
- if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
- (status == SPIDER_NET_DESCR_NOT_IN_USE))
- return 0;
-
- /* descriptor definitively used -- move on tail */
- chain->tail = descr->next;
-
- /* unmap descriptor */
- hw_buf_addr = hwdescr->buf_addr;
- hwdescr->buf_addr = 0xffffffff;
- dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME,
- DMA_FROM_DEVICE);
-
- if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
- (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
- (status == SPIDER_NET_DESCR_FORCE_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&dev->dev,
- "dropping RX descriptor with state %d\n", status);
- dev->stats.rx_dropped++;
- goto bad_desc;
- }
-
- if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
- (status != SPIDER_NET_DESCR_FRAME_END) ) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "RX descriptor with unknown state %d\n", status);
- card->spider_stats.rx_desc_unk_state++;
- goto bad_desc;
- }
-
- /* The cases we'll throw away the packet immediately */
- if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
- if (netif_msg_rx_err(card))
- dev_err(&card->netdev->dev,
- "error in received descriptor found, "
- "data_status=x%08x, data_error=x%08x\n",
- hwdescr->data_status, hwdescr->data_error);
- goto bad_desc;
- }
-
- if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
- dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
- hwdescr->dmac_cmd_status);
- pr_err("buf_addr=x%08x\n", hw_buf_addr);
- pr_err("buf_size=x%08x\n", hwdescr->buf_size);
- pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
- pr_err("result_size=x%08x\n", hwdescr->result_size);
- pr_err("valid_size=x%08x\n", hwdescr->valid_size);
- pr_err("data_status=x%08x\n", hwdescr->data_status);
- pr_err("data_error=x%08x\n", hwdescr->data_error);
- pr_err("which=%ld\n", descr - card->rx_chain.ring);
-
- card->spider_stats.rx_desc_error++;
- goto bad_desc;
- }
-
- /* Ok, we've got a packet in descr */
- spider_net_pass_skb_up(descr, card);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 1;
-
-bad_desc:
- if (netif_msg_rx_err(card))
- show_rx_chain(card);
- dev_kfree_skb_irq(descr->skb);
- descr->skb = NULL;
- hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
- return 0;
-}
-
-/**
- * spider_net_poll - NAPI poll function called by the stack to return packets
- * @napi: napi device structure
- * @budget: number of packets we can pass to the stack at most
- *
- * returns 0 if no more packets available to the driver/stack. Returns 1,
- * if the quota is exceeded, but the driver has still packets.
- *
- * spider_net_poll returns all packets from the rx descriptors to the stack
- * (using netif_receive_skb). If all/enough packets are up, the driver
- * reenables interrupts and returns 0. If not, 1 is returned.
- */
-static int spider_net_poll(struct napi_struct *napi, int budget)
-{
- struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
- int packets_done = 0;
-
- while (packets_done < budget) {
- if (!spider_net_decode_one_descr(card))
- break;
-
- packets_done++;
- }
-
- if ((packets_done == 0) && (card->num_rx_ints != 0)) {
- if (!spider_net_resync_tail_ptr(card))
- packets_done = budget;
- spider_net_resync_head_ptr(card);
- }
- card->num_rx_ints = 0;
-
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_cleanup_tx_ring(&card->tx_timer);
-
- /* if all packets are in the stack, enable interrupts and return 0 */
- /* if not, return 1 */
- if (packets_done < budget) {
- napi_complete_done(napi, packets_done);
- spider_net_rx_irq_on(card);
- card->ignore_rx_ramfull = 0;
- }
-
- return packets_done;
-}
-
-/**
- * spider_net_set_mac - sets the MAC of an interface
- * @netdev: interface device structure
- * @p: pointer to new MAC address
- *
- * Returns 0 on success, <0 on failure. Currently, we don't support this
- * and will always return EOPNOTSUPP.
- */
-static int
-spider_net_set_mac(struct net_device *netdev, void *p)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- u32 macl, macu, regvalue;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- /* switch off GMACTPE and GMACRPE */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue &= ~((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- /* write mac */
- macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
- (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
- macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
- spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
-
- /* switch GMACTPE and GMACRPE back on */
- regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
- regvalue |= ((1 << 5) | (1 << 6));
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
-
- spider_net_set_promisc(card);
-
- return 0;
-}
-
-/**
- * spider_net_link_reset
- * @netdev: net device structure
- *
- * This is called when the PHY_LINK signal is asserted. For the blade this is
- * not connected so we should never get here.
- *
- */
-static void
-spider_net_link_reset(struct net_device *netdev)
-{
-
- struct spider_net_card *card = netdev_priv(netdev);
-
- del_timer_sync(&card->aneg_timer);
-
- /* clear interrupt, block further interrupts */
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-
- /* reset phy and setup aneg */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
-}
-
-/**
- * spider_net_handle_error_irq - handles errors raised by an interrupt
- * @card: card structure
- * @status_reg: interrupt status register 0 (GHIINT0STS)
- * @error_reg1: interrupt status register 1 (GHIINT1STS)
- * @error_reg2: interrupt status register 2 (GHIINT2STS)
- *
- * spider_net_handle_error_irq treats or ignores all error conditions
- * found when an interrupt is presented
- */
-static void
-spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
- u32 error_reg1, u32 error_reg2)
-{
- u32 i;
- int show_error = 1;
-
- /* check GHIINT0STS ************************************/
- if (status_reg)
- for (i = 0; i < 32; i++)
- if (status_reg & (1<<i))
- switch (i)
- {
- /* let error_reg1 and error_reg2 evaluation decide, what to do
- case SPIDER_NET_PHYINT:
- case SPIDER_NET_GMAC2INT:
- case SPIDER_NET_GMAC1INT:
- case SPIDER_NET_GFIFOINT:
- case SPIDER_NET_DMACINT:
- case SPIDER_NET_GSYSINT:
- break; */
-
- case SPIDER_NET_GIPSINT:
- show_error = 0;
- break;
-
- case SPIDER_NET_GPWOPCMPINT:
- /* PHY write operation completed */
- show_error = 0;
- break;
- case SPIDER_NET_GPROPCMPINT:
- /* PHY read operation completed */
- /* we don't use semaphores, as we poll for the completion
- * of the read operation in spider_net_read_phy. Should take
- * about 50 us
- */
- show_error = 0;
- break;
- case SPIDER_NET_GPWFFINT:
- /* PHY command queue full */
- if (netif_msg_intr(card))
- dev_err(&card->netdev->dev, "PHY write queue full\n");
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GRMDADRINT: not used. print a message */
- /* case SPIDER_NET_GRMARPINT: not used. print a message */
- /* case SPIDER_NET_GRMMPINT: not used. print a message */
-
- case SPIDER_NET_GDTDEN0INT:
- /* someone has set TX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- case SPIDER_NET_GDDDEN0INT:
- case SPIDER_NET_GDCDEN0INT:
- case SPIDER_NET_GDBDEN0INT:
- case SPIDER_NET_GDADEN0INT:
- /* someone has set RX_DMA_EN to 0 */
- show_error = 0;
- break;
-
- /* RX interrupts */
- case SPIDER_NET_GDDFDCINT:
- case SPIDER_NET_GDCFDCINT:
- case SPIDER_NET_GDBFDCINT:
- case SPIDER_NET_GDAFDCINT:
- /* case SPIDER_NET_GDNMINT: not used. print a message */
- /* case SPIDER_NET_GCNMINT: not used. print a message */
- /* case SPIDER_NET_GBNMINT: not used. print a message */
- /* case SPIDER_NET_GANMINT: not used. print a message */
- /* case SPIDER_NET_GRFNMINT: not used. print a message */
- show_error = 0;
- break;
-
- /* TX interrupts */
- case SPIDER_NET_GDTFDCINT:
- show_error = 0;
- break;
- case SPIDER_NET_GTTEDINT:
- show_error = 0;
- break;
- case SPIDER_NET_GDTDCEINT:
- /* chain end. If a descriptor should be sent, kick off
- * tx dma
- if (card->tx_chain.tail != card->tx_chain.head)
- spider_net_kick_tx_dma(card);
- */
- show_error = 0;
- break;
-
- /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
- /* case SPIDER_NET_GFREECNTINT: not used. print a message */
- }
-
- /* check GHIINT1STS ************************************/
- if (error_reg1)
- for (i = 0; i < 32; i++)
- if (error_reg1 & (1<<i))
- switch (i)
- {
- case SPIDER_NET_GTMFLLINT:
- /* TX RAM full may happen on a usual case.
- * Logging is not needed.
- */
- show_error = 0;
- break;
- case SPIDER_NET_GRFDFLLINT:
- case SPIDER_NET_GRFCFLLINT:
- case SPIDER_NET_GRFBFLLINT:
- case SPIDER_NET_GRFAFLLINT:
- case SPIDER_NET_GRMFLLINT:
- /* Could happen when rx chain is full */
- if (card->ignore_rx_ramfull == 0) {
- card->ignore_rx_ramfull = 1;
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- }
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GTMSHTINT: problem, print a message */
- case SPIDER_NET_GDTINVDINT:
- /* allrighty. tx from previous descr ok */
- show_error = 0;
- break;
-
- /* chain end */
- case SPIDER_NET_GDDDCEINT:
- case SPIDER_NET_GDCDCEINT:
- case SPIDER_NET_GDBDCEINT:
- case SPIDER_NET_GDADCEINT:
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* invalid descriptor */
- case SPIDER_NET_GDDINVDINT:
- case SPIDER_NET_GDCINVDINT:
- case SPIDER_NET_GDBINVDINT:
- case SPIDER_NET_GDAINVDINT:
- /* Could happen when rx chain is full */
- spider_net_resync_head_ptr(card);
- spider_net_refill_rx_chain(card);
- spider_net_enable_rxdmac(card);
- card->num_rx_ints ++;
- napi_schedule(&card->napi);
- show_error = 0;
- break;
-
- /* case SPIDER_NET_GDTRSERINT: problem, print a message */
- /* case SPIDER_NET_GDDRSERINT: problem, print a message */
- /* case SPIDER_NET_GDCRSERINT: problem, print a message */
- /* case SPIDER_NET_GDBRSERINT: problem, print a message */
- /* case SPIDER_NET_GDARSERINT: problem, print a message */
- /* case SPIDER_NET_GDSERINT: problem, print a message */
- /* case SPIDER_NET_GDTPTERINT: problem, print a message */
- /* case SPIDER_NET_GDDPTERINT: problem, print a message */
- /* case SPIDER_NET_GDCPTERINT: problem, print a message */
- /* case SPIDER_NET_GDBPTERINT: problem, print a message */
- /* case SPIDER_NET_GDAPTERINT: problem, print a message */
- default:
- show_error = 1;
- break;
- }
-
- /* check GHIINT2STS ************************************/
- if (error_reg2)
- for (i = 0; i < 32; i++)
- if (error_reg2 & (1<<i))
- switch (i)
- {
- /* there is nothing we can (want to) do at this time. Log a
- * message, we can switch on and off the specific values later on
- case SPIDER_NET_GPROPERINT:
- case SPIDER_NET_GMCTCRSNGINT:
- case SPIDER_NET_GMCTLCOLINT:
- case SPIDER_NET_GMCTTMOTINT:
- case SPIDER_NET_GMCRCAERINT:
- case SPIDER_NET_GMCRCALERINT:
- case SPIDER_NET_GMCRALNERINT:
- case SPIDER_NET_GMCROVRINT:
- case SPIDER_NET_GMCRRNTINT:
- case SPIDER_NET_GMCRRXERINT:
- case SPIDER_NET_GTITCSERINT:
- case SPIDER_NET_GTIFMTERINT:
- case SPIDER_NET_GTIPKTRVKINT:
- case SPIDER_NET_GTISPINGINT:
- case SPIDER_NET_GTISADNGINT:
- case SPIDER_NET_GTISPDNGINT:
- case SPIDER_NET_GRIFMTERINT:
- case SPIDER_NET_GRIPKTRVKINT:
- case SPIDER_NET_GRISPINGINT:
- case SPIDER_NET_GRISADNGINT:
- case SPIDER_NET_GRISPDNGINT:
- break;
- */
- default:
- break;
- }
-
- if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
- dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
- "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
- status_reg, error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
-}
-
-/**
- * spider_net_interrupt - interrupt handler for spider_net
- * @irq: interrupt number
- * @ptr: pointer to net_device
- *
- * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
- * interrupt found raised by card.
- *
- * This is the interrupt handler, that turns off
- * interrupts for this device and makes the stack poll the driver
- */
-static irqreturn_t
-spider_net_interrupt(int irq, void *ptr)
-{
- struct net_device *netdev = ptr;
- struct spider_net_card *card = netdev_priv(netdev);
- u32 status_reg, error_reg1, error_reg2;
-
- status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
- error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
- error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
-
- if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
- !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
- !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
- return IRQ_NONE;
-
- if (status_reg & SPIDER_NET_RXINT ) {
- spider_net_rx_irq_off(card);
- napi_schedule(&card->napi);
- card->num_rx_ints ++;
- }
- if (status_reg & SPIDER_NET_TXINT)
- napi_schedule(&card->napi);
-
- if (status_reg & SPIDER_NET_LINKINT)
- spider_net_link_reset(netdev);
-
- if (status_reg & SPIDER_NET_ERRINT )
- spider_net_handle_error_irq(card, status_reg,
- error_reg1, error_reg2);
-
- /* clear interrupt sources */
- spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * spider_net_poll_controller - artificial interrupt for netconsole etc.
- * @netdev: interface device structure
- *
- * see Documentation/networking/netconsole.rst
- */
-static void
-spider_net_poll_controller(struct net_device *netdev)
-{
- disable_irq(netdev->irq);
- spider_net_interrupt(netdev->irq, netdev);
- enable_irq(netdev->irq);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
-/**
- * spider_net_enable_interrupts - enable interrupts
- * @card: card structure
- *
- * spider_net_enable_interrupt enables several interrupts
- */
-static void
-spider_net_enable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
- SPIDER_NET_INT0_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
- SPIDER_NET_INT1_MASK_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
- SPIDER_NET_INT2_MASK_VALUE);
-}
-
-/**
- * spider_net_disable_interrupts - disable interrupts
- * @card: card structure
- *
- * spider_net_disable_interrupts disables all the interrupts
- */
-static void
-spider_net_disable_interrupts(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
-}
-
-/**
- * spider_net_init_card - initializes the card
- * @card: card structure
- *
- * spider_net_init_card initializes the card so that other registers can
- * be used
- */
-static void
-spider_net_init_card(struct spider_net_card *card)
-{
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* trigger ETOMOD signal */
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
-
- spider_net_disable_interrupts(card);
-}
-
-/**
- * spider_net_enable_card - enables the card by setting all kinds of regs
- * @card: card structure
- *
- * spider_net_enable_card sets a lot of SMMIO registers to enable the device
- */
-static void
-spider_net_enable_card(struct spider_net_card *card)
-{
- int i;
- /* the following array consists of (register),(value) pairs
- * that are set in this function. A register of 0 ends the list
- */
- u32 regs[][2] = {
- { SPIDER_NET_GRESUMINTNUM, 0 },
- { SPIDER_NET_GREINTNUM, 0 },
-
- /* set interrupt frame number registers */
- /* clear the single DMA engine registers first */
- { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
- /* then set, what we really need */
- { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
-
- /* timer counter registers and stuff */
- { SPIDER_NET_GFREECNNUM, 0 },
- { SPIDER_NET_GONETIMENUM, 0 },
- { SPIDER_NET_GTOUTFRMNUM, 0 },
-
- /* RX mode setting */
- { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
- /* TX mode setting */
- { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
- /* IPSEC mode setting */
- { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
-
- { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
-
- { SPIDER_NET_GMRWOLCTRL, 0 },
- { SPIDER_NET_GTESTMD, 0x10000000 },
- { SPIDER_NET_GTTQMSK, 0x00400040 },
-
- { SPIDER_NET_GMACINTEN, 0 },
-
- /* flow control stuff */
- { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
- { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
-
- { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
- { 0, 0}
- };
-
- i = 0;
- while (regs[i][0]) {
- spider_net_write_reg(card, regs[i][0], regs[i][1]);
- i++;
- }
-
- /* clear unicast filter table entries 1 to 14 */
- for (i = 1; i <= 14; i++) {
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8,
- 0x00080000);
- spider_net_write_reg(card,
- SPIDER_NET_GMRUAFILnR + i * 8 + 4,
- 0x00000000);
- }
-
- spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
-
- spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
-
- /* set chain tail address for RX chains and
- * enable DMA
- */
- spider_net_enable_rxchtails(card);
- spider_net_enable_rxdmac(card);
-
- spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
- SPIDER_NET_LENLMT_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
- SPIDER_NET_OPMODE_VALUE);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_GDTBSTA);
-}
-
-/**
- * spider_net_download_firmware - loads firmware into the adapter
- * @card: card structure
- * @firmware_ptr: pointer to firmware data
- *
- * spider_net_download_firmware loads the firmware data into the
- * adapter. It assumes the length etc. to be allright.
- */
-static int
-spider_net_download_firmware(struct spider_net_card *card,
- const void *firmware_ptr)
-{
- int sequencer, i;
- const u32 *fw_ptr = firmware_ptr;
-
- /* stop sequencers */
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_STOP_SEQ_VALUE);
-
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card,
- SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, *fw_ptr);
- fw_ptr++;
- }
- }
-
- if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
- return -EIO;
-
- spider_net_write_reg(card, SPIDER_NET_GSINIT,
- SPIDER_NET_RUN_SEQ_VALUE);
-
- return 0;
-}
-
-/**
- * spider_net_init_firmware - reads in firmware parts
- * @card: card structure
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_init_firmware opens the sequencer firmware and does some basic
- * checks. This function opens and releases the firmware structure. A call
- * to download the firmware is performed before the release.
- *
- * Firmware format
- * ===============
- * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
- * the program for each sequencer. Use the command
- * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
- * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
- * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
- *
- * to generate spider_fw.bin, if you have sequencer programs with something
- * like the following contents for each sequencer:
- * <ONE LINE COMMENT>
- * <FIRST 4-BYTES-WORD FOR SEQUENCER>
- * <SECOND 4-BYTES-WORD FOR SEQUENCER>
- * ...
- * <1024th 4-BYTES-WORD FOR SEQUENCER>
- */
-static int
-spider_net_init_firmware(struct spider_net_card *card)
-{
- struct firmware *firmware = NULL;
- struct device_node *dn;
- const u8 *fw_prop = NULL;
- int err = -ENOENT;
- int fw_size;
-
- if (request_firmware((const struct firmware **)&firmware,
- SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
- if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in " \
- "filesystem. Looking in host firmware...\n");
- goto try_host_fw;
- }
- err = spider_net_download_firmware(card, firmware->data);
-
- release_firmware(firmware);
- if (err)
- goto try_host_fw;
-
- goto done;
- }
-
-try_host_fw:
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- goto out_err;
-
- fw_prop = of_get_property(dn, "firmware", &fw_size);
- if (!fw_prop)
- goto out_err;
-
- if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
- netif_msg_probe(card) ) {
- dev_err(&card->netdev->dev,
- "Incorrect size of spidernet firmware in host firmware\n");
- goto done;
- }
-
- err = spider_net_download_firmware(card, fw_prop);
-
-done:
- return err;
-out_err:
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't find spidernet firmware in filesystem " \
- "or host firmware\n");
- return err;
-}
-
-/**
- * spider_net_open - called upon ifonfig up
- * @netdev: interface device structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_open allocates all the descriptors and memory needed for
- * operation, sets up multicast list and enables interrupts
- */
-int
-spider_net_open(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
- int result;
-
- result = spider_net_init_firmware(card);
- if (result)
- goto init_firmware_failed;
-
- /* start probing with copper */
- card->aneg_count = 0;
- card->medium = BCM54XX_COPPER;
- spider_net_setup_aneg(card);
- if (card->phy.def->phy_id)
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
-
- result = spider_net_init_chain(card, &card->tx_chain);
- if (result)
- goto alloc_tx_failed;
- card->low_watermark = NULL;
-
- result = spider_net_init_chain(card, &card->rx_chain);
- if (result)
- goto alloc_rx_failed;
-
- /* Allocate rx skbs */
- result = spider_net_alloc_rx_skbs(card);
- if (result)
- goto alloc_skbs_failed;
-
- spider_net_set_multi(netdev);
-
- /* further enhancement: setup hw vlan, if needed */
-
- result = -EBUSY;
- if (request_irq(netdev->irq, spider_net_interrupt,
- IRQF_SHARED, netdev->name, netdev))
- goto register_int_failed;
-
- spider_net_enable_card(card);
-
- netif_start_queue(netdev);
- netif_carrier_on(netdev);
- napi_enable(&card->napi);
-
- spider_net_enable_interrupts(card);
-
- return 0;
-
-register_int_failed:
- spider_net_free_rx_chain_contents(card);
-alloc_skbs_failed:
- spider_net_free_chain(card, &card->rx_chain);
-alloc_rx_failed:
- spider_net_free_chain(card, &card->tx_chain);
-alloc_tx_failed:
- del_timer_sync(&card->aneg_timer);
-init_firmware_failed:
- return result;
-}
-
-/**
- * spider_net_link_phy
- * @t: timer context used to obtain the pointer to net card data structure
- */
-static void spider_net_link_phy(struct timer_list *t)
-{
- struct spider_net_card *card = from_timer(card, t, aneg_timer);
- struct mii_phy *phy = &card->phy;
-
- /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
- if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
-
- pr_debug("%s: link is down trying to bring it up\n",
- card->netdev->name);
-
- switch (card->medium) {
- case BCM54XX_COPPER:
- /* enable fiber with autonegotiation first */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 1);
- card->medium = BCM54XX_FIBER;
- break;
-
- case BCM54XX_FIBER:
- /* fiber didn't come up, try to disable fiber autoneg */
- if (phy->def->ops->enable_fiber)
- phy->def->ops->enable_fiber(phy, 0);
- card->medium = BCM54XX_UNKNOWN;
- break;
-
- case BCM54XX_UNKNOWN:
- /* copper, fiber with and without failed,
- * retry from beginning
- */
- spider_net_setup_aneg(card);
- card->medium = BCM54XX_COPPER;
- break;
- }
-
- card->aneg_count = 0;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link still not up, try again later */
- if (!(phy->def->ops->poll_link(phy))) {
- card->aneg_count++;
- mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
- return;
- }
-
- /* link came up, get abilities */
- phy->def->ops->read_link(phy);
-
- spider_net_write_reg(card, SPIDER_NET_GMACST,
- spider_net_read_reg(card, SPIDER_NET_GMACST));
- spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
-
- if (phy->speed == 1000)
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
- else
- spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
-
- card->aneg_count = 0;
-
- pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
- card->netdev->name, phy->speed,
- phy->duplex == 1 ? "Full" : "Half",
- phy->autoneg == 1 ? "" : "no ");
-}
-
-/**
- * spider_net_setup_phy - setup PHY
- * @card: card structure
- *
- * returns 0 on success, <0 on failure
- *
- * spider_net_setup_phy is used as part of spider_net_probe.
- **/
-static int
-spider_net_setup_phy(struct spider_net_card *card)
-{
- struct mii_phy *phy = &card->phy;
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
- SPIDER_NET_DMASEL_VALUE);
- spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
- SPIDER_NET_PHY_CTRL_VALUE);
-
- phy->dev = card->netdev;
- phy->mdio_read = spider_net_read_phy;
- phy->mdio_write = spider_net_write_phy;
-
- for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
- unsigned short id;
- id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
- if (id != 0x0000 && id != 0xffff) {
- if (!sungem_phy_probe(phy, phy->mii_id)) {
- pr_info("Found %s.\n", phy->def->name);
- break;
- }
- }
- }
-
- return 0;
-}
-
-/**
- * spider_net_workaround_rxramfull - work around firmware bug
- * @card: card structure
- *
- * no return value
- **/
-static void
-spider_net_workaround_rxramfull(struct spider_net_card *card)
-{
- int i, sequencer = 0;
-
- /* cancel reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- /* empty sequencer data */
- for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
- sequencer++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
- sequencer * 8, 0x0);
- for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
- spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
- sequencer * 8, 0x0);
- }
- }
-
- /* set sequencer operation */
- spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
-
- /* reset */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
-}
-
-/**
- * spider_net_stop - called upon ifconfig down
- * @netdev: interface device structure
- *
- * always returns 0
- */
-int
-spider_net_stop(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- napi_disable(&card->napi);
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- del_timer_sync(&card->tx_timer);
- del_timer_sync(&card->aneg_timer);
-
- spider_net_disable_interrupts(card);
-
- free_irq(netdev->irq, netdev);
-
- spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
- SPIDER_NET_DMA_TX_FEND_VALUE);
-
- /* turn off DMA, force end */
- spider_net_disable_rxdmac(card);
-
- /* release chains */
- spider_net_release_tx_chain(card, 1);
- spider_net_free_rx_chain_contents(card);
-
- spider_net_free_chain(card, &card->tx_chain);
- spider_net_free_chain(card, &card->rx_chain);
-
- return 0;
-}
-
-/**
- * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
- * function (to be called not under interrupt status)
- * @work: work context used to obtain the pointer to net card data structure
- *
- * called as task when tx hangs, resets interface (if interface is up)
- */
-static void
-spider_net_tx_timeout_task(struct work_struct *work)
-{
- struct spider_net_card *card =
- container_of(work, struct spider_net_card, tx_timeout_task);
- struct net_device *netdev = card->netdev;
-
- if (!(netdev->flags & IFF_UP))
- goto out;
-
- netif_device_detach(netdev);
- spider_net_stop(netdev);
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- if (spider_net_setup_phy(card))
- goto out;
-
- spider_net_open(netdev);
- spider_net_kick_tx_dma(card);
- netif_device_attach(netdev);
-
-out:
- atomic_dec(&card->tx_timeout_task_counter);
-}
-
-/**
- * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
- * @netdev: interface device structure
- * @txqueue: unused
- *
- * called, if tx hangs. Schedules a task that resets the interface
- */
-static void
-spider_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
-{
- struct spider_net_card *card;
-
- card = netdev_priv(netdev);
- atomic_inc(&card->tx_timeout_task_counter);
- if (netdev->flags & IFF_UP)
- schedule_work(&card->tx_timeout_task);
- else
- atomic_dec(&card->tx_timeout_task_counter);
- card->spider_stats.tx_timeouts++;
-}
-
-static const struct net_device_ops spider_net_ops = {
- .ndo_open = spider_net_open,
- .ndo_stop = spider_net_stop,
- .ndo_start_xmit = spider_net_xmit,
- .ndo_set_rx_mode = spider_net_set_multi,
- .ndo_set_mac_address = spider_net_set_mac,
- .ndo_eth_ioctl = spider_net_do_ioctl,
- .ndo_tx_timeout = spider_net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- /* HW VLAN */
-#ifdef CONFIG_NET_POLL_CONTROLLER
- /* poll controller */
- .ndo_poll_controller = spider_net_poll_controller,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-};
-
-/**
- * spider_net_setup_netdev_ops - initialization of net_device operations
- * @netdev: net_device structure
- *
- * fills out function pointers in the net_device structure
- */
-static void
-spider_net_setup_netdev_ops(struct net_device *netdev)
-{
- netdev->netdev_ops = &spider_net_ops;
- netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
- /* ethtool ops */
- netdev->ethtool_ops = &spider_net_ethtool_ops;
-}
-
-/**
- * spider_net_setup_netdev - initialization of net_device
- * @card: card structure
- *
- * Returns 0 on success or <0 on failure
- *
- * spider_net_setup_netdev initializes the net_device structure
- **/
-static int
-spider_net_setup_netdev(struct spider_net_card *card)
-{
- int result;
- struct net_device *netdev = card->netdev;
- struct device_node *dn;
- struct sockaddr addr;
- const u8 *mac;
-
- SET_NETDEV_DEV(netdev, &card->pdev->dev);
-
- pci_set_drvdata(card->pdev, netdev);
-
- timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
- netdev->irq = card->pdev->irq;
-
- card->aneg_count = 0;
- timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
-
- netif_napi_add(netdev, &card->napi, spider_net_poll);
-
- spider_net_setup_netdev_ops(netdev);
-
- netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
- if (SPIDER_NET_RX_CSUM_DEFAULT)
- netdev->features |= NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IP_CSUM;
- /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- * NETIF_F_HW_VLAN_CTAG_FILTER
- */
- netdev->lltx = true;
-
- /* MTU range: 64 - 2294 */
- netdev->min_mtu = SPIDER_NET_MIN_MTU;
- netdev->max_mtu = SPIDER_NET_MAX_MTU;
-
- netdev->irq = card->pdev->irq;
- card->num_rx_ints = 0;
- card->ignore_rx_ramfull = 0;
-
- dn = pci_device_to_OF_node(card->pdev);
- if (!dn)
- return -EIO;
-
- mac = of_get_property(dn, "local-mac-address", NULL);
- if (!mac)
- return -EIO;
- memcpy(addr.sa_data, mac, ETH_ALEN);
-
- result = spider_net_set_mac(netdev, &addr);
- if ((result) && (netif_msg_probe(card)))
- dev_err(&card->netdev->dev,
- "Failed to set MAC address: %i\n", result);
-
- result = register_netdev(netdev);
- if (result) {
- if (netif_msg_probe(card))
- dev_err(&card->netdev->dev,
- "Couldn't register net_device: %i\n", result);
- return result;
- }
-
- if (netif_msg_probe(card))
- pr_info("Initialized device %s.\n", netdev->name);
-
- return 0;
-}
-
-/**
- * spider_net_alloc_card - allocates net_device and card structure
- *
- * returns the card structure or NULL in case of errors
- *
- * the card and net_device structures are linked to each other
- */
-static struct spider_net_card *
-spider_net_alloc_card(void)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = alloc_etherdev(struct_size(card, darray,
- size_add(tx_descriptors, rx_descriptors)));
- if (!netdev)
- return NULL;
-
- card = netdev_priv(netdev);
- card->netdev = netdev;
- card->msg_enable = SPIDER_NET_DEFAULT_MSG;
- INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
- init_waitqueue_head(&card->waitq);
- atomic_set(&card->tx_timeout_task_counter, 0);
-
- card->rx_chain.num_desc = rx_descriptors;
- card->rx_chain.ring = card->darray;
- card->tx_chain.num_desc = tx_descriptors;
- card->tx_chain.ring = card->darray + rx_descriptors;
-
- return card;
-}
-
-/**
- * spider_net_undo_pci_setup - releases PCI ressources
- * @card: card structure
- *
- * spider_net_undo_pci_setup releases the mapped regions
- */
-static void
-spider_net_undo_pci_setup(struct spider_net_card *card)
-{
- iounmap(card->regs);
- pci_release_regions(card->pdev);
-}
-
-/**
- * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @pdev: PCI device
- *
- * Returns the card structure or NULL if any errors occur
- *
- * spider_net_setup_pci_dev initializes pdev and together with the
- * functions called in spider_net_open configures the device so that
- * data can be transferred over it
- * The net_device structure is attached to the card structure, if the
- * function returns without error.
- **/
-static struct spider_net_card *
-spider_net_setup_pci_dev(struct pci_dev *pdev)
-{
- struct spider_net_card *card;
- unsigned long mmio_start, mmio_len;
-
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Couldn't enable PCI device\n");
- return NULL;
- }
-
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev,
- "Couldn't find proper PCI device base address.\n");
- goto out_disable_dev;
- }
-
- if (pci_request_regions(pdev, spider_net_driver_name)) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_disable_dev;
- }
-
- pci_set_master(pdev);
-
- card = spider_net_alloc_card();
- if (!card) {
- dev_err(&pdev->dev,
- "Couldn't allocate net_device structure, aborting.\n");
- goto out_release_regions;
- }
- card->pdev = pdev;
-
- /* fetch base address and length of first resource */
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
- card->netdev->mem_start = mmio_start;
- card->netdev->mem_end = mmio_start + mmio_len;
- card->regs = ioremap(mmio_start, mmio_len);
-
- if (!card->regs) {
- dev_err(&pdev->dev,
- "Couldn't obtain PCI resources, aborting.\n");
- goto out_release_regions;
- }
-
- return card;
-
-out_release_regions:
- pci_release_regions(pdev);
-out_disable_dev:
- pci_disable_device(pdev);
- return NULL;
-}
-
-/**
- * spider_net_probe - initialization of a device
- * @pdev: PCI device
- * @ent: entry in the device id list
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_probe initializes pdev and registers a net_device
- * structure for it. After that, the device can be ifconfig'ed up
- **/
-static int
-spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = -EIO;
- struct spider_net_card *card;
-
- card = spider_net_setup_pci_dev(pdev);
- if (!card)
- goto out;
-
- spider_net_workaround_rxramfull(card);
- spider_net_init_card(card);
-
- err = spider_net_setup_phy(card);
- if (err)
- goto out_undo_pci;
-
- err = spider_net_setup_netdev(card);
- if (err)
- goto out_undo_pci;
-
- return 0;
-
-out_undo_pci:
- spider_net_undo_pci_setup(card);
- free_netdev(card->netdev);
-out:
- return err;
-}
-
-/**
- * spider_net_remove - removal of a device
- * @pdev: PCI device
- *
- * Returns 0 on success, <0 on failure
- *
- * spider_net_remove is called to remove the device and unregisters the
- * net_device
- **/
-static void
-spider_net_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev;
- struct spider_net_card *card;
-
- netdev = pci_get_drvdata(pdev);
- card = netdev_priv(netdev);
-
- wait_event(card->waitq,
- atomic_read(&card->tx_timeout_task_counter) == 0);
-
- unregister_netdev(netdev);
-
- /* switch off card */
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_STOP_VALUE);
- spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
- SPIDER_NET_CKRCTRL_RUN_VALUE);
-
- spider_net_undo_pci_setup(card);
- free_netdev(netdev);
-}
-
-static struct pci_driver spider_net_driver = {
- .name = spider_net_driver_name,
- .id_table = spider_net_pci_tbl,
- .probe = spider_net_probe,
- .remove = spider_net_remove
-};
-
-/**
- * spider_net_init - init function when the driver is loaded
- *
- * spider_net_init registers the device driver
- */
-static int __init spider_net_init(void)
-{
- printk(KERN_INFO "Spidernet version %s.\n", VERSION);
-
- if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
- rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
- pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
- }
- if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
- if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
- tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
- pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
- }
-
- return pci_register_driver(&spider_net_driver);
-}
-
-/**
- * spider_net_cleanup - exit function when driver is unloaded
- *
- * spider_net_cleanup unregisters the device driver
- */
-static void __exit spider_net_cleanup(void)
-{
- pci_unregister_driver(&spider_net_driver);
-}
-
-module_init(spider_net_init);
-module_exit(spider_net_cleanup);
diff --git a/drivers/net/ethernet/toshiba/spider_net.h b/drivers/net/ethernet/toshiba/spider_net.h
deleted file mode 100644
index 51948e2b3a34..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Network device driver for Cell Processor-Based Blade and Celleb platform
- *
- * (C) Copyright IBM Corp. 2005
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#ifndef _SPIDER_NET_H
-#define _SPIDER_NET_H
-
-#define VERSION "2.0 B"
-
-#include <linux/sungem_phy.h>
-
-int spider_net_stop(struct net_device *netdev);
-int spider_net_open(struct net_device *netdev);
-
-extern const struct ethtool_ops spider_net_ethtool_ops;
-
-extern char spider_net_driver_name[];
-
-#define SPIDER_NET_MAX_FRAME 2312
-#define SPIDER_NET_MAX_MTU 2294
-#define SPIDER_NET_MIN_MTU 64
-
-#define SPIDER_NET_RXBUF_ALIGN 128
-
-#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_RX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
-#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
-#define SPIDER_NET_TX_DESCRIPTORS_MAX 512
-
-#define SPIDER_NET_TX_TIMER (HZ/5)
-#define SPIDER_NET_ANEG_TIMER (HZ)
-#define SPIDER_NET_ANEG_TIMEOUT 5
-
-#define SPIDER_NET_RX_CSUM_DEFAULT 1
-
-#define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
-
-#define SPIDER_NET_FIRMWARE_SEQS 6
-#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
-#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
- SPIDER_NET_FIRMWARE_SEQWORDS * \
- sizeof(u32))
-#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
-
-/** spider_net SMMIO registers */
-#define SPIDER_NET_GHIINT0STS 0x00000000
-#define SPIDER_NET_GHIINT1STS 0x00000004
-#define SPIDER_NET_GHIINT2STS 0x00000008
-#define SPIDER_NET_GHIINT0MSK 0x00000010
-#define SPIDER_NET_GHIINT1MSK 0x00000014
-#define SPIDER_NET_GHIINT2MSK 0x00000018
-
-#define SPIDER_NET_GRESUMINTNUM 0x00000020
-#define SPIDER_NET_GREINTNUM 0x00000024
-
-#define SPIDER_NET_GFFRMNUM 0x00000028
-#define SPIDER_NET_GFAFRMNUM 0x0000002c
-#define SPIDER_NET_GFBFRMNUM 0x00000030
-#define SPIDER_NET_GFCFRMNUM 0x00000034
-#define SPIDER_NET_GFDFRMNUM 0x00000038
-
-/* clear them (don't use it) */
-#define SPIDER_NET_GFREECNNUM 0x0000003c
-#define SPIDER_NET_GONETIMENUM 0x00000040
-
-#define SPIDER_NET_GTOUTFRMNUM 0x00000044
-
-#define SPIDER_NET_GTXMDSET 0x00000050
-#define SPIDER_NET_GPCCTRL 0x00000054
-#define SPIDER_NET_GRXMDSET 0x00000058
-#define SPIDER_NET_GIPSECINIT 0x0000005c
-#define SPIDER_NET_GFTRESTRT 0x00000060
-#define SPIDER_NET_GRXDMAEN 0x00000064
-#define SPIDER_NET_GMRWOLCTRL 0x00000068
-#define SPIDER_NET_GPCWOPCMD 0x0000006c
-#define SPIDER_NET_GPCROPCMD 0x00000070
-#define SPIDER_NET_GTTFRMCNT 0x00000078
-#define SPIDER_NET_GTESTMD 0x0000007c
-
-#define SPIDER_NET_GSINIT 0x00000080
-#define SPIDER_NET_GSnPRGADR 0x00000084
-#define SPIDER_NET_GSnPRGDAT 0x00000088
-
-#define SPIDER_NET_GMACOPEMD 0x00000100
-#define SPIDER_NET_GMACLENLMT 0x00000108
-#define SPIDER_NET_GMACST 0x00000110
-#define SPIDER_NET_GMACINTEN 0x00000118
-#define SPIDER_NET_GMACPHYCTRL 0x00000120
-
-#define SPIDER_NET_GMACAPAUSE 0x00000154
-#define SPIDER_NET_GMACTXPAUSE 0x00000164
-
-#define SPIDER_NET_GMACMODE 0x000001b0
-#define SPIDER_NET_GMACBSTLMT 0x000001b4
-
-#define SPIDER_NET_GMACUNIMACU 0x000001c0
-#define SPIDER_NET_GMACUNIMACL 0x000001c8
-
-#define SPIDER_NET_GMRMHFILnR 0x00000400
-#define SPIDER_NET_MULTICAST_HASHES 256
-
-#define SPIDER_NET_GMRUAFILnR 0x00000500
-#define SPIDER_NET_GMRUA0FIL15R 0x00000578
-
-#define SPIDER_NET_GTTQMSK 0x00000934
-
-/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
- * 0x00000b.. for DMA controller B, etc. */
-#define SPIDER_NET_GDADCHA 0x00000a00
-#define SPIDER_NET_GDADMACCNTR 0x00000a04
-#define SPIDER_NET_GDACTDPA 0x00000a08
-#define SPIDER_NET_GDACTDCNT 0x00000a0c
-#define SPIDER_NET_GDACDBADDR 0x00000a20
-#define SPIDER_NET_GDACDBSIZE 0x00000a24
-#define SPIDER_NET_GDACNEXTDA 0x00000a28
-#define SPIDER_NET_GDACCOMST 0x00000a2c
-#define SPIDER_NET_GDAWBCOMST 0x00000a30
-#define SPIDER_NET_GDAWBRSIZE 0x00000a34
-#define SPIDER_NET_GDAWBVSIZE 0x00000a38
-#define SPIDER_NET_GDAWBTRST 0x00000a3c
-#define SPIDER_NET_GDAWBTRERR 0x00000a40
-
-/* TX DMA controller registers */
-#define SPIDER_NET_GDTDCHA 0x00000e00
-#define SPIDER_NET_GDTDMACCNTR 0x00000e04
-#define SPIDER_NET_GDTCDPA 0x00000e08
-#define SPIDER_NET_GDTDMASEL 0x00000e14
-
-#define SPIDER_NET_ECMODE 0x00000f00
-/* clock and reset control register */
-#define SPIDER_NET_CKRCTRL 0x00000ff0
-
-/** SCONFIG registers */
-#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
-
-/** interrupt mask registers */
-#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
-#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
-#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
-
-/* we rely on flagged descriptor interrupts */
-#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
-/* set this first, then the FRAMENUM_VALUE */
-#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
-
-#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
-#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
-
-#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
-/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
-#define SPIDER_NET_RXMODE_VALUE 0x00000011
-/* auto retransmission in case of MAC aborts */
-#define SPIDER_NET_TXMODE_VALUE 0x00010000
-#define SPIDER_NET_RESTART_VALUE 0x00000000
-#define SPIDER_NET_WOL_VALUE 0x00001111
-#if 0
-#define SPIDER_NET_WOL_VALUE 0x00000000
-#endif
-#define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
-
-/* pause frames: automatic, no upper retransmission count */
-/* outside loopback mode: ETOMOD signal dont matter, not connected */
-/* ETOMOD signal is brought to PHY reset. bit 2 must be 1 in Celleb */
-#define SPIDER_NET_OPMODE_VALUE 0x00000067
-/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
-#define SPIDER_NET_LENLMT_VALUE 0x00000908
-
-#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
-#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
-
-#define SPIDER_NET_MACMODE_VALUE 0x00000001
-#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
-
-/* DMAC control register GDMACCNTR
- *
- * 1(0) enable r/tx dma
- * 0000000 fixed to 0
- *
- * 000000 fixed to 0
- * 0(1) en/disable descr writeback on force end
- * 0(1) force end
- *
- * 000000 fixed to 0
- * 00 burst alignment: 128 bytes
- * 11 burst alignment: 1024 bytes
- *
- * 00000 fixed to 0
- * 0 descr writeback size 32 bytes
- * 0(1) descr chain end interrupt enable
- * 0(1) descr status writeback enable */
-
-/* to set RX_DMA_EN */
-#define SPIDER_NET_DMA_RX_VALUE 0x80000000
-#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
-/* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN 0x80000000
-#define SPIDER_NET_GDTBSTA 0x00000300
-#define SPIDER_NET_GDTDCEIDIS 0x00000002
-#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
- SPIDER_NET_GDTDCEIDIS | \
- SPIDER_NET_GDTBSTA
-
-#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
-
-/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
-#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
-#define SPIDER_NET_PROMISC_VALUE 0x00080000
-#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
-
-#define SPIDER_NET_DMASEL_VALUE 0x00000001
-
-#define SPIDER_NET_ECMODE_VALUE 0x00000000
-
-#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
-#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
-
-#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
-#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
-
-/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
- * with 1 << SPIDER_NET_... */
-enum spider_net_int0_status {
- SPIDER_NET_GPHYINT = 0,
- SPIDER_NET_GMAC2INT,
- SPIDER_NET_GMAC1INT,
- SPIDER_NET_GIPSINT,
- SPIDER_NET_GFIFOINT,
- SPIDER_NET_GDMACINT,
- SPIDER_NET_GSYSINT,
- SPIDER_NET_GPWOPCMPINT,
- SPIDER_NET_GPROPCMPINT,
- SPIDER_NET_GPWFFINT,
- SPIDER_NET_GRMDADRINT,
- SPIDER_NET_GRMARPINT,
- SPIDER_NET_GRMMPINT,
- SPIDER_NET_GDTDEN0INT,
- SPIDER_NET_GDDDEN0INT,
- SPIDER_NET_GDCDEN0INT,
- SPIDER_NET_GDBDEN0INT,
- SPIDER_NET_GDADEN0INT,
- SPIDER_NET_GDTFDCINT,
- SPIDER_NET_GDDFDCINT,
- SPIDER_NET_GDCFDCINT,
- SPIDER_NET_GDBFDCINT,
- SPIDER_NET_GDAFDCINT,
- SPIDER_NET_GTTEDINT,
- SPIDER_NET_GDTDCEINT,
- SPIDER_NET_GRFDNMINT,
- SPIDER_NET_GRFCNMINT,
- SPIDER_NET_GRFBNMINT,
- SPIDER_NET_GRFANMINT,
- SPIDER_NET_GRFNMINT,
- SPIDER_NET_G1TMCNTINT,
- SPIDER_NET_GFREECNTINT
-};
-/* GHIINT1STS bits */
-enum spider_net_int1_status {
- SPIDER_NET_GTMFLLINT = 0,
- SPIDER_NET_GRMFLLINT,
- SPIDER_NET_GTMSHTINT,
- SPIDER_NET_GDTINVDINT,
- SPIDER_NET_GRFDFLLINT,
- SPIDER_NET_GDDDCEINT,
- SPIDER_NET_GDDINVDINT,
- SPIDER_NET_GRFCFLLINT,
- SPIDER_NET_GDCDCEINT,
- SPIDER_NET_GDCINVDINT,
- SPIDER_NET_GRFBFLLINT,
- SPIDER_NET_GDBDCEINT,
- SPIDER_NET_GDBINVDINT,
- SPIDER_NET_GRFAFLLINT,
- SPIDER_NET_GDADCEINT,
- SPIDER_NET_GDAINVDINT,
- SPIDER_NET_GDTRSERINT,
- SPIDER_NET_GDDRSERINT,
- SPIDER_NET_GDCRSERINT,
- SPIDER_NET_GDBRSERINT,
- SPIDER_NET_GDARSERINT,
- SPIDER_NET_GDSERINT,
- SPIDER_NET_GDTPTERINT,
- SPIDER_NET_GDDPTERINT,
- SPIDER_NET_GDCPTERINT,
- SPIDER_NET_GDBPTERINT,
- SPIDER_NET_GDAPTERINT
-};
-/* GHIINT2STS bits */
-enum spider_net_int2_status {
- SPIDER_NET_GPROPERINT = 0,
- SPIDER_NET_GMCTCRSNGINT,
- SPIDER_NET_GMCTLCOLINT,
- SPIDER_NET_GMCTTMOTINT,
- SPIDER_NET_GMCRCAERINT,
- SPIDER_NET_GMCRCALERINT,
- SPIDER_NET_GMCRALNERINT,
- SPIDER_NET_GMCROVRINT,
- SPIDER_NET_GMCRRNTINT,
- SPIDER_NET_GMCRRXERINT,
- SPIDER_NET_GTITCSERINT,
- SPIDER_NET_GTIFMTERINT,
- SPIDER_NET_GTIPKTRVKINT,
- SPIDER_NET_GTISPINGINT,
- SPIDER_NET_GTISADNGINT,
- SPIDER_NET_GTISPDNGINT,
- SPIDER_NET_GRIFMTERINT,
- SPIDER_NET_GRIPKTRVKINT,
- SPIDER_NET_GRISPINGINT,
- SPIDER_NET_GRISADNGINT,
- SPIDER_NET_GRISPDNGINT
-};
-
-#define SPIDER_NET_TXINT (1 << SPIDER_NET_GDTFDCINT)
-
-/* We rely on flagged descriptor interrupts */
-#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
-
-#define SPIDER_NET_LINKINT ( 1 << SPIDER_NET_GMAC2INT )
-
-#define SPIDER_NET_ERRINT ( 0xffffffff & \
- (~SPIDER_NET_TXINT) & \
- (~SPIDER_NET_RXINT) & \
- (~SPIDER_NET_LINKINT) )
-
-#define SPIDER_NET_GPREXEC 0x80000000
-#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
-
-#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
-#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
-#define SPIDER_NET_DMAC_TCP 0x00020000
-#define SPIDER_NET_DMAC_UDP 0x00030000
-#define SPIDER_NET_TXDCEST 0x08000000
-
-#define SPIDER_NET_DESCR_RXFDIS 0x00000001
-#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
-#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
-#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
-#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
-#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
-#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
-#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
-#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
-#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
-#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
-
-#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
-#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
-#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
-#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
-#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
-
-#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
- SPIDER_NET_DESCR_RXRERRIS | \
- SPIDER_NET_DESCR_RXDEN0IMS | \
- SPIDER_NET_DESCR_RXINVDIMS | \
- SPIDER_NET_DESCR_RXRERRMIS | \
- SPIDER_NET_DESCR_UNUSED)
-
-/* Descriptor, as defined by the hardware */
-struct spider_net_hw_descr {
- u32 buf_addr;
- u32 buf_size;
- u32 next_descr_addr;
- u32 dmac_cmd_status;
- u32 result_size;
- u32 valid_size; /* all zeroes for tx */
- u32 data_status;
- u32 data_error; /* all zeroes for tx */
-} __attribute__((aligned(32)));
-
-struct spider_net_descr {
- struct spider_net_hw_descr *hwdescr;
- struct sk_buff *skb;
- u32 bus_addr;
- struct spider_net_descr *next;
- struct spider_net_descr *prev;
-};
-
-struct spider_net_descr_chain {
- spinlock_t lock;
- struct spider_net_descr *head;
- struct spider_net_descr *tail;
- struct spider_net_descr *ring;
- int num_desc;
- struct spider_net_hw_descr *hwring;
- dma_addr_t dma_addr;
-};
-
-/* descriptor data_status bits */
-#define SPIDER_NET_RX_IPCHK 29
-#define SPIDER_NET_RX_TCPCHK 28
-#define SPIDER_NET_VLAN_PACKET 21
-#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
- (1 << SPIDER_NET_RX_TCPCHK) )
-
-/* descriptor data_error bits */
-#define SPIDER_NET_RX_IPCHKERR 27
-#define SPIDER_NET_RX_RXTCPCHKERR 28
-
-#define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
-
-/* the cases we don't pass the packet to the stack.
- * 701b8000 would be correct, but every packets gets that flag */
-#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-
-#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL )
-
-struct spider_net_extra_stats {
- unsigned long rx_desc_error;
- unsigned long tx_timeouts;
- unsigned long alloc_rx_skb_error;
- unsigned long rx_iommu_map_error;
- unsigned long tx_iommu_map_error;
- unsigned long rx_desc_unk_state;
-};
-
-struct spider_net_card {
- struct net_device *netdev;
- struct pci_dev *pdev;
- struct mii_phy phy;
-
- struct napi_struct napi;
-
- int medium;
-
- void __iomem *regs;
-
- struct spider_net_descr_chain tx_chain;
- struct spider_net_descr_chain rx_chain;
- struct spider_net_descr *low_watermark;
-
- int aneg_count;
- struct timer_list aneg_timer;
- struct timer_list tx_timer;
- struct work_struct tx_timeout_task;
- atomic_t tx_timeout_task_counter;
- wait_queue_head_t waitq;
- int num_rx_ints;
- int ignore_rx_ramfull;
-
- /* for ethtool */
- int msg_enable;
- struct spider_net_extra_stats spider_stats;
-
- /* Must be last item in struct */
- struct spider_net_descr darray[];
-};
-
-#endif
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
deleted file mode 100644
index fef9fd127b5e..000000000000
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Network device driver for Cell Processor-Based Blade
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Authors : Utz Bacher <utz.bacher@de.ibm.com>
- * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
- */
-
-#include <linux/netdevice.h>
-#include <linux/ethtool.h>
-#include <linux/pci.h>
-
-#include "spider_net.h"
-
-
-static struct {
- const char str[ETH_GSTRING_LEN];
-} ethtool_stats_keys[] = {
- { "tx_packets" },
- { "tx_bytes" },
- { "rx_packets" },
- { "rx_bytes" },
- { "tx_errors" },
- { "tx_dropped" },
- { "rx_dropped" },
- { "rx_descriptor_error" },
- { "tx_timeouts" },
- { "alloc_rx_skb_error" },
- { "rx_iommu_map_error" },
- { "tx_iommu_map_error" },
- { "rx_desc_unk_state" },
-};
-
-static int
-spider_net_ethtool_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
- cmd->base.speed = card->phy.speed;
- cmd->base.duplex = DUPLEX_FULL;
-
- return 0;
-}
-
-static void
-spider_net_ethtool_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
-
- /* clear and fill out info */
- strscpy(drvinfo->driver, spider_net_driver_name,
- sizeof(drvinfo->driver));
- strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
- strscpy(drvinfo->fw_version, "no information",
- sizeof(drvinfo->fw_version));
- strscpy(drvinfo->bus_info, pci_name(card->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-spider_net_ethtool_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wolinfo)
-{
- /* no support for wol */
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
-}
-
-static u32
-spider_net_ethtool_get_msglevel(struct net_device *netdev)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- return card->msg_enable;
-}
-
-static void
-spider_net_ethtool_set_msglevel(struct net_device *netdev,
- u32 level)
-{
- struct spider_net_card *card;
- card = netdev_priv(netdev);
- card->msg_enable = level;
-}
-
-static int
-spider_net_ethtool_nway_reset(struct net_device *netdev)
-{
- if (netif_running(netdev)) {
- spider_net_stop(netdev);
- spider_net_open(netdev);
- }
- return 0;
-}
-
-static void
-spider_net_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering,
- struct kernel_ethtool_ringparam *kernel_ering,
- struct netlink_ext_ack *extack)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
- ering->tx_pending = card->tx_chain.num_desc;
- ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
- ering->rx_pending = card->rx_chain.num_desc;
-}
-
-static int spider_net_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(ethtool_stats_keys);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void spider_net_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- data[0] = netdev->stats.tx_packets;
- data[1] = netdev->stats.tx_bytes;
- data[2] = netdev->stats.rx_packets;
- data[3] = netdev->stats.rx_bytes;
- data[4] = netdev->stats.tx_errors;
- data[5] = netdev->stats.tx_dropped;
- data[6] = netdev->stats.rx_dropped;
- data[7] = card->spider_stats.rx_desc_error;
- data[8] = card->spider_stats.tx_timeouts;
- data[9] = card->spider_stats.alloc_rx_skb_error;
- data[10] = card->spider_stats.rx_iommu_map_error;
- data[11] = card->spider_stats.tx_iommu_map_error;
- data[12] = card->spider_stats.rx_desc_unk_state;
-}
-
-static void spider_net_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
-}
-
-const struct ethtool_ops spider_net_ethtool_ops = {
- .get_drvinfo = spider_net_ethtool_get_drvinfo,
- .get_wol = spider_net_ethtool_get_wol,
- .get_msglevel = spider_net_ethtool_get_msglevel,
- .set_msglevel = spider_net_ethtool_set_msglevel,
- .get_link = ethtool_op_get_link,
- .nway_reset = spider_net_ethtool_nway_reset,
- .get_ringparam = spider_net_ethtool_get_ringparam,
- .get_strings = spider_net_get_strings,
- .get_sset_count = spider_net_get_sset_count,
- .get_ethtool_stats = spider_net_get_ethtool_stats,
- .get_link_ksettings = spider_net_ethtool_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index e46ccebcfd22..47e3e8434b9e 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config LIBWX
config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
select LIBWX
select PHYLINK
help
@@ -42,6 +44,7 @@ config TXGBE
depends on PCI
depends on COMMON_CLK
depends on I2C_DESIGNWARE_PLATFORM
+ depends on PTP_1588_CLOCK_OPTIONAL
select MARVELL_10G_PHY
select REGMAP
select PHYLINK
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 42ccd6e4052e..e9f0f1f2309b 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index abe5921dde02..43019ec9329c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+ WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
static const struct wx_stats wx_gstrings_fdir_stats[] = {
@@ -69,7 +72,7 @@ int wx_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return (wx->mac.type == wx_mac_sp) ?
+ return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ?
WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
@@ -87,7 +90,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (i = 0; i < WX_FDIR_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
}
@@ -121,7 +124,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
data[i++] = *(u64 *)p;
@@ -196,7 +199,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
- if (wx->mac.type == wx_mac_sp)
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
stats_len += WX_FDIR_STATS_LEN;
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
@@ -216,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_nway_reset(wx->phylink);
}
EXPORT_SYMBOL(wx_nway_reset);
@@ -225,6 +231,9 @@ int wx_get_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_get(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_get_link_ksettings);
@@ -234,6 +243,9 @@ int wx_set_link_ksettings(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_ksettings_set(wx->phylink, cmd);
}
EXPORT_SYMBOL(wx_set_link_ksettings);
@@ -243,6 +255,9 @@ void wx_get_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return;
+
phylink_ethtool_get_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_get_pauseparam);
@@ -252,6 +267,9 @@ int wx_set_pauseparam(struct net_device *netdev,
{
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_aml)
+ return -EOPNOTSUPP;
+
return phylink_ethtool_set_pauseparam(wx->phylink, pause);
}
EXPORT_SYMBOL(wx_set_pauseparam);
@@ -322,10 +340,17 @@ int wx_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq)
wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ max_eitr = WX_AML_MAX_EITR;
+ break;
+ default:
max_eitr = WX_EM_MAX_EITR;
+ break;
+ }
if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) ||
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
@@ -347,10 +372,15 @@ int wx_set_coalesce(struct net_device *netdev,
wx->tx_itr_setting = ec->tx_coalesce_usecs;
if (wx->tx_itr_setting == 1) {
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
tx_itr_param = WX_12K_ITR;
- else
+ break;
+ default:
tx_itr_param = WX_20K_ITR;
+ break;
+ }
} else {
tx_itr_param = wx->tx_itr_setting;
}
@@ -383,10 +413,15 @@ static unsigned int wx_max_channels(struct wx *wx)
max_combined = 1;
} else {
/* support up to max allowed queues with RSS */
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
max_combined = 63;
- else
+ break;
+ default:
max_combined = 8;
+ break;
+ }
}
return max_combined;
@@ -452,3 +487,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data)
wx->msg_enable = data;
}
EXPORT_SYMBOL(wx_set_msglevel);
+
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (wx->ptp_clock)
+ info->phc_index = ptp_clock_index(wx->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_ts_info);
+
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (wx->ptp_clock) {
+ ts_stats->pkts = wx->tx_hwtstamp_pkts;
+ ts_stats->lost = wx->tx_hwtstamp_timeouts +
+ wx->tx_hwtstamp_skipped +
+ wx->rx_hwtstamp_cleared;
+ ts_stats->err = wx->tx_hwtstamp_errors;
+ }
+}
+EXPORT_SYMBOL(wx_get_ptp_stats);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 600c3b597d1a..9e002e699eca 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
+int wx_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+void wx_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index deaf670c160e..aed45abafb1b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -112,10 +112,15 @@ static void wx_intr_disable(struct wx *wx, u64 qmask)
if (mask)
wr32(wx, WX_PX_IMS(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMS(1), mask);
+ break;
+ default:
+ break;
}
}
@@ -126,10 +131,16 @@ void wx_intr_enable(struct wx *wx, u64 qmask)
mask = (qmask & U32_MAX);
if (mask)
wr32(wx, WX_PX_IMC(0), mask);
- if (wx->mac.type == wx_mac_sp) {
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
mask = (qmask >> 32);
if (mask)
wr32(wx, WX_PX_IMC(1), mask);
+ break;
+ default:
+ break;
}
}
EXPORT_SYMBOL(wx_intr_enable);
@@ -278,22 +289,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
return ret;
}
-/**
- * wx_host_interface_command - Issue command to manageability block
- * @wx: pointer to the HW structure
- * @buffer: contains the command to write and where the return status will
- * be placed
- * @length: length of buffer, must be multiple of 4 bytes
- * @timeout: time in ms to wait for command completion
- * @return_data: read and return data from the buffer (true) or not (false)
- * Needed because FW structures are big endian and decoding of
- * these fields can be 8 bit or 16 bit based on command. Decoding
- * is not easily understood without making a table of commands.
- * So we will leave this up to the caller to read back the data
- * in these cases.
- **/
-int wx_host_interface_command(struct wx *wx, u32 *buffer,
- u32 length, u32 timeout, bool return_data)
+static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
u32 hicr, i, bi, buf[64] = {};
@@ -301,22 +298,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 dword_len;
u16 buf_len;
- if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
- return -EINVAL;
- }
-
status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
- /* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
- wx_err(wx, "Buffer length failure, not aligned to dword");
- status = -EINVAL;
- goto rel_out;
- }
-
dword_len = length >> 2;
/* The device driver writes the relevant command block
@@ -391,8 +376,160 @@ rel_out:
wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
+
+static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
+{
+ u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
+ struct wx_hic_hdr *recv_hdr;
+ u32 i;
+
+ /* read hdr */
+ for (i = 0; i < dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+ /* check hdr */
+ recv_hdr = (struct wx_hic_hdr *)buffer;
+ if (recv_hdr->cmd == send_cmd &&
+ recv_hdr->index == wx->swfw_index)
+ return true;
+
+ return false;
+}
+
+static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
+ u32 hdr_size = sizeof(struct wx_hic_hdr);
+ bool busy, reply;
+ u32 dword_len;
+ u16 buf_len;
+ int err = 0;
+ u8 send_cmd;
+ u32 i;
+
+ /* wait to get lock */
+ might_sleep();
+ err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
+ false, WX_STATE_SWFW_BUSY, wx->state);
+ if (err)
+ return err;
+
+ /* index to unique seq id for each mbox message */
+ hdr->index = wx->swfw_index;
+ send_cmd = hdr->cmd;
+
+ dword_len = length >> 2;
+ /* write data to SW-FW mbox array */
+ for (i = 0; i < dword_len; i++) {
+ wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ /* write flush */
+ rd32a(wx, WX_SW2FW_MBOX, i);
+ }
+
+ /* generate interrupt to notify FW */
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
+ wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
+
+ /* polling reply from FW */
+ err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000,
+ true, wx, buffer, send_cmd);
+ if (err) {
+ wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
+ send_cmd, wx->swfw_index);
+ goto rel_out;
+ }
+
+ /* expect no reply from FW then return */
+ if (!return_data)
+ goto rel_out;
+
+ /* If there is any thing in data position pull it in */
+ buf_len = hdr->buf_len;
+ if (buf_len == 0)
+ goto rel_out;
+
+ if (length < buf_len + hdr_size) {
+ wx_err(wx, "Buffer not large enough for reply message.\n");
+ err = -EFAULT;
+ goto rel_out;
+ }
+
+ /* Calculate length in DWORDs, add 3 for odd lengths */
+ dword_len = (buf_len + 3) >> 2;
+ for (i = hdr_size >> 2; i <= dword_len; i++) {
+ buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
+ le32_to_cpus(&buffer[i]);
+ }
+
+rel_out:
+ /* index++, index replace wx_hic_hdr.checksum */
+ if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
+ wx->swfw_index = 0;
+ else
+ wx->swfw_index++;
+
+ clear_bit(WX_STATE_SWFW_BUSY, wx->state);
+ return err;
+}
+
+/**
+ * wx_host_interface_command - Issue command to manageability block
+ * @wx: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @length: length of buffer, must be multiple of 4 bytes
+ * @timeout: time in ms to wait for command completion
+ * @return_data: read and return data from the buffer (true) or not (false)
+ * Needed because FW structures are big endian and decoding of
+ * these fields can be 8 bit or 16 bit based on command. Decoding
+ * is not easily understood without making a table of commands.
+ * So we will leave this up to the caller to read back the data
+ * in these cases.
+ **/
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
+ u32 length, u32 timeout, bool return_data)
+{
+ if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
+ return -EINVAL;
+ }
+
+ /* Calculate length in DWORDs. We must be DWORD aligned */
+ if ((length % (sizeof(u32))) != 0) {
+ wx_err(wx, "Buffer length failure, not aligned to dword");
+ return -EINVAL;
+ }
+
+ if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ return wx_host_interface_command_r(wx, buffer, length,
+ timeout, return_data);
+
+ return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
+}
EXPORT_SYMBOL(wx_host_interface_command);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
+{
+ struct wx_hic_set_pps pps_cmd;
+
+ pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
+ pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
+ pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ pps_cmd.lan_id = wx->bus.func;
+ pps_cmd.enable = (u8)enable;
+ pps_cmd.nsec = nsec;
+ pps_cmd.cycles = cycles;
+ pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+
+ return wx_host_interface_command(wx, (u32 *)&pps_cmd,
+ sizeof(pps_cmd),
+ WX_HI_COMMAND_TIMEOUT,
+ false);
+}
+
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
@@ -423,7 +560,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
if (status != 0)
return status;
- *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ else
+ *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
@@ -467,6 +607,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
u16 words_to_read;
u32 value = 0;
int status;
+ u32 mbox;
u32 i;
/* Take semaphore for the entire operation. */
@@ -499,8 +640,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx,
goto out;
}
+ if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
+ mbox = WX_MNG_MBOX;
+ else
+ mbox = WX_FW2SW_MBOX;
for (i = 0; i < words_to_read; i++) {
- u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
+ u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
@@ -550,12 +695,17 @@ void wx_init_eeprom_params(struct wx *wx)
}
}
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
+ break;
+ default:
+ break;
}
eeprom->sw_region_offset = data;
@@ -616,8 +766,15 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
/* setup VMDq pool mapping */
wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wx->mac.type == wx_mac_sp)
+
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+ break;
+ default:
+ break;
+ }
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -755,9 +912,14 @@ void wx_init_rx_addrs(struct wx *wx)
wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wx->mac.type == wx_mac_sp) {
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
/* clear VMDq pool/queue selection for RAR 0 */
wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
+ break;
+ default:
+ break;
}
}
@@ -1699,7 +1861,7 @@ void wx_configure_rx(struct wx *wx)
/* enable hw crc stripping */
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
u32 psrctl;
/* RSC Setup */
@@ -2351,7 +2513,7 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
- if (wx->mac.type == wx_mac_sp) {
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 11fb33349482..b883342bb576 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv);
int wx_mng_present(struct wx *wx);
int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
+int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles);
int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2b3d6586f44a..00b0b318df27 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -13,6 +13,7 @@
#include "wx_type.h"
#include "wx_lib.h"
+#include "wx_ptp.h"
#include "wx_hw.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
@@ -597,8 +598,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring,
union wx_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct wx *wx = netdev_priv(rx_ring->netdev);
+
wx_rx_hash(rx_ring, rx_desc, skb);
wx_rx_checksum(rx_ring, rx_desc, skb);
+
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) &&
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) {
+ wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
wx_rx_vlan(rx_ring, rx_desc, skb);
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -705,6 +715,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
{
unsigned int budget = q_vector->wx->tx_work_limit;
unsigned int total_bytes = 0, total_packets = 0;
+ struct wx *wx = netdev_priv(tx_ring->netdev);
unsigned int i = tx_ring->next_to_clean;
struct wx_tx_buffer *tx_buffer;
union wx_tx_desc *tx_desc;
@@ -737,6 +748,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
+ /* schedule check for Tx timestamp */
+ if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) &&
+ skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ ptp_schedule_worker(wx->ptp_clock, 0);
+
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
@@ -932,9 +948,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-static void wx_tx_map(struct wx_ring *tx_ring,
- struct wx_tx_buffer *first,
- const u8 hdr_len)
+static int wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first,
+ const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
struct wx_tx_buffer *tx_buffer;
@@ -1013,6 +1029,8 @@ static void wx_tx_map(struct wx_ring *tx_ring,
netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+ /* set the timestamp */
+ first->time_stamp = jiffies;
skb_tx_timestamp(skb);
/* Force memory writes to complete before letting h/w know there
@@ -1038,7 +1056,7 @@ static void wx_tx_map(struct wx_ring *tx_ring,
if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- return;
+ return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
@@ -1062,6 +1080,8 @@ dma_error:
first->skb = NULL;
tx_ring->next_to_use = i;
+
+ return -ENOMEM;
}
static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
@@ -1082,26 +1102,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
-static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
-{
- struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
-
- *nexthdr = hdr->nexthdr;
- offset += sizeof(struct ipv6hdr);
- while (ipv6_ext_hdr(*nexthdr)) {
- struct ipv6_opt_hdr _hdr, *hp;
-
- if (*nexthdr == NEXTHDR_NONE)
- return;
- hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
- if (!hp)
- return;
- if (*nexthdr == NEXTHDR_FRAGMENT)
- break;
- *nexthdr = hp->nexthdr;
- }
-}
-
union network_header {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
@@ -1112,6 +1112,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
{
u8 tun_prot = 0, l4_prot = 0, ptype = 0;
struct sk_buff *skb = first->skb;
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
union network_header hdr;
@@ -1122,14 +1124,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_TUN_IPV4;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
ptype = WX_PTYPE_TUN_IPV6;
break;
default:
return ptype;
}
- if (tun_prot == IPPROTO_IPIP) {
+ if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) {
hdr.raw = (void *)inner_ip_hdr(skb);
ptype |= WX_PTYPE_PKT_IPIP;
} else if (tun_prot == IPPROTO_UDP) {
@@ -1166,7 +1172,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
l4_prot = hdr.ipv4->protocol;
break;
case 6:
- wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
+ l4_hdr = skb_inner_transport_header(skb);
+ exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = inner_ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype |= WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1179,7 +1189,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
ptype = WX_PTYPE_PKT_IP;
break;
case htons(ETH_P_IPV6):
- wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
+ l4_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
break;
default:
@@ -1269,13 +1283,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
if (enc) {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
+
switch (first->protocol) {
case htons(ETH_P_IP):
tun_prot = ip_hdr(skb)->protocol;
first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off);
break;
default:
break;
@@ -1298,6 +1319,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1335,12 +1357,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
u8 tun_prot = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
!(first->tx_flags & WX_TX_FLAGS_CC))
return;
vlan_macip_lens = skb_network_offset(skb) <<
WX_TXD_MACLEN_SHIFT;
} else {
+ unsigned char *exthdr, *l4_hdr;
+ __be16 frag_off;
u8 l4_prot = 0;
union {
struct iphdr *ipv4;
@@ -1362,7 +1387,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
tun_prot = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
+ l4_hdr = skb_transport_header(skb);
+ exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr);
tun_prot = ipv6_hdr(skb)->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &tun_prot, &frag_off);
break;
default:
return;
@@ -1386,6 +1416,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_TUNNEL_LEN_SHIFT);
break;
case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
(char *)ip_hdr(skb)) >> 2) <<
WX_TXD_OUTER_IPLEN_SHIFT;
@@ -1408,7 +1439,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
break;
case 6:
vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
+ exthdr = network_hdr.raw + sizeof(struct ipv6hdr);
l4_prot = network_hdr.ipv6->nexthdr;
+ if (transport_hdr.raw != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off);
break;
default:
break;
@@ -1428,7 +1462,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
WX_TXD_L4LEN_SHIFT;
break;
default:
- break;
+ skb_checksum_help(skb);
+ goto csum_failed;
}
/* update TX checksum flag */
@@ -1486,6 +1521,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= WX_TX_FLAGS_HW_VLAN;
}
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ wx->ptp_clock) {
+ if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
+ !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS,
+ wx->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= WX_TX_FLAGS_TSTAMP;
+ wx->ptp_tx_skb = skb_get(skb);
+ wx->ptp_tx_start = jiffies;
+ } else {
+ wx->tx_hwtstamp_skipped++;
+ }
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = vlan_get_protocol(skb);
@@ -1501,12 +1550,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
wx->atr(tx_ring, first, ptype);
- wx_tx_map(tx_ring, first, hdr_len);
+ if (wx_tx_map(tx_ring, first, hdr_len))
+ goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
out_drop:
dev_kfree_skb_any(first->skb);
first->skb = NULL;
+cleanup_tx_tstamp:
+ if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ wx->tx_hwtstamp_errors++;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ }
return NETDEV_TX_OK;
}
@@ -1781,10 +1838,16 @@ static int wx_alloc_q_vector(struct wx *wx,
/* initialize pointer to rings */
ring = q_vector->ring;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ case wx_mac_aml:
default_itr = WX_12K_ITR;
- else
+ break;
+ default:
default_itr = WX_7K_ITR;
+ break;
+ }
+
/* initialize ITR */
if (txr_count && !rxr_count)
/* tx only vector */
@@ -2140,10 +2203,17 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
int v_idx = q_vector->v_idx;
u32 itr_reg;
- if (wx->mac.type == wx_mac_sp)
+ switch (wx->mac.type) {
+ case wx_mac_sp:
itr_reg = q_vector->itr & WX_SP_MAX_EITR;
- else
+ break;
+ case wx_mac_aml:
+ itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR;
+ break;
+ default:
itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+ break;
+ }
itr_reg |= WX_PX_ITR_CNT_WDIS;
@@ -2719,7 +2789,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
netdev->features = features;
- if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset)
wx->do_reset(netdev);
else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
@@ -2751,7 +2821,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
break;
}
- if (need_reset)
+ if (need_reset && wx->do_reset)
wx->do_reset(netdev);
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
new file mode 100644
index 000000000000..07c015ba338f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */
+/* Copyright (c) 1999 - 2025 Intel Corporation. */
+
+#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_ptp.h"
+#include "wx_hw.h"
+
+#define WX_INCVAL_10GB 0xCCCCCC
+#define WX_INCVAL_1GB 0x800000
+#define WX_INCVAL_100 0xA00000
+#define WX_INCVAL_10 0xC7F380
+#define WX_INCVAL_EM 0x2000000
+
+#define WX_INCVAL_SHIFT_10GB 20
+#define WX_INCVAL_SHIFT_1GB 18
+#define WX_INCVAL_SHIFT_100 15
+#define WX_INCVAL_SHIFT_10 12
+#define WX_INCVAL_SHIFT_EM 22
+
+#define WX_OVERFLOW_PERIOD (HZ * 30)
+#define WX_PTP_TX_TIMEOUT (HZ)
+
+#define WX_1588_PPS_WIDTH_EM 120
+
+#define WX_NS_PER_SEC 1000000000ULL
+
+static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&wx->hw_tc_lock);
+ ns = timecounter_cyc2time(&wx->hw_tc, timestamp);
+ } while (read_seqretry(&wx->hw_tc_lock, seq));
+
+ return ns;
+}
+
+static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts)
+{
+ u32 timeh1, timeh2, timel;
+
+ timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_postts(sts);
+ timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH);
+
+ if (timeh1 != timeh2) {
+ ptp_read_system_prets(sts);
+ timel = rd32ptp(wx, WX_TSC_1588_SYSTIML);
+ ptp_read_system_prets(sts);
+ }
+ return (u64)timel | (u64)timeh2 << 32;
+}
+
+static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 incval, mask;
+
+ smp_mb(); /* Force any pending update before accessing. */
+ incval = READ_ONCE(wx->base_incval);
+ incval = adjust_by_scaled_ppm(incval, ppb);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ return 0;
+}
+
+static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_adjtime(&wx->hw_tc, delta);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+static int wx_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ u64 ns, stamp;
+
+ stamp = wx_ptp_readtime(wx, sts);
+ ns = wx_ptp_timecounter_cyc2time(wx, stamp);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int wx_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ /* reset the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc, ns);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ return 0;
+}
+
+/**
+ * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state
+ * @wx: the private board structure
+ *
+ * This function should be called whenever the state related to a Tx timestamp
+ * needs to be cleared. This helps ensure that all related bits are reset for
+ * the next Tx timestamp event.
+ */
+static void wx_ptp_clear_tx_timestamp(struct wx *wx)
+{
+ rd32ptp(wx, WX_TSC_1588_STMPH);
+ if (wx->ptp_tx_skb) {
+ dev_kfree_skb_any(wx->ptp_tx_skb);
+ wx->ptp_tx_skb = NULL;
+ }
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+}
+
+/**
+ * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp
+ * @wx: private board structure
+ * @hwtstamp: stack timestamp structure
+ * @timestamp: unsigned 64bit system time value
+ *
+ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
+ * which can be used by the stack's ptp functions.
+ *
+ * The lock is used to protect consistency of the cyclecounter and the SYSTIME
+ * registers. However, it does not need to protect against the Rx or Tx
+ * timestamp registers, as there can't be a new timestamp until the old one is
+ * unlatched by reading.
+ *
+ * In addition to the timestamp in hardware, some controllers need a software
+ * overflow cyclecounter, and this function takes this into account as well.
+ **/
+static void wx_ptp_convert_to_hwtstamp(struct wx *wx,
+ struct skb_shared_hwtstamps *hwtstamp,
+ u64 timestamp)
+{
+ u64 ns;
+
+ ns = wx_ptp_timecounter_cyc2time(wx, timestamp);
+ hwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp
+ * @wx: the private board struct
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+static void wx_ptp_tx_hwtstamp(struct wx *wx)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct sk_buff *skb = wx->ptp_tx_skb;
+ u64 regval = 0;
+
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL);
+ regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval);
+
+ wx->ptp_tx_skb = NULL;
+ clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ wx->tx_hwtstamp_pkts++;
+}
+
+static int wx_ptp_tx_hwtstamp_work(struct wx *wx)
+{
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb to poll for a timestamp */
+ if (!wx->ptp_tx_skb) {
+ wx_ptp_clear_tx_timestamp(wx);
+ return 0;
+ }
+
+ /* stop polling once we have a valid timestamp */
+ tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL);
+ if (tsynctxctl & WX_TSC_1588_CTL_VALID) {
+ wx_ptp_tx_hwtstamp(wx);
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task periodically reads the timecounter
+ * in order to prevent missing when the system time registers wrap
+ * around. This needs to be run approximately twice a minute for the fastest
+ * overflowing hardware. We run it for all hardware since it shouldn't have a
+ * large impact.
+ */
+static void wx_ptp_overflow_check(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->last_overflow_check +
+ WX_OVERFLOW_PERIOD);
+ unsigned long flags;
+
+ if (timeout) {
+ /* Update the timecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_read(&wx->hw_tc);
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ }
+}
+
+/**
+ * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @wx: pointer to wx struct
+ *
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to
+ * timestamp any future packets.
+ */
+static void wx_ptp_rx_hang(struct wx *wx)
+{
+ struct wx_ring *rx_ring;
+ unsigned long rx_event;
+ u32 tsyncrxctl;
+ int n;
+
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) {
+ wx->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = wx->last_rx_ptp_check;
+ for (n = 0; n < wx->num_rx_queues; n++) {
+ rx_ring = wx->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(wx, WX_PSR_1588_STMPH);
+ wx->last_rx_ptp_check = jiffies;
+
+ wx->rx_hwtstamp_cleared++;
+ dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang");
+ }
+}
+
+/**
+ * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes
+ * @wx: private network wx structure
+ */
+static void wx_ptp_tx_hang(struct wx *wx)
+{
+ bool timeout = time_is_before_jiffies(wx->ptp_tx_start +
+ WX_PTP_TX_TIMEOUT);
+
+ if (!wx->ptp_tx_skb)
+ return;
+
+ if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state))
+ return;
+
+ /* If we haven't received a timestamp within the timeout, it is
+ * reasonable to assume that it will never occur, so we can unlock the
+ * timestamp bit when this occurs.
+ */
+ if (timeout) {
+ wx_ptp_clear_tx_timestamp(wx);
+ wx->tx_hwtstamp_timeouts++;
+ dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n");
+ }
+}
+
+static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+ int ts_done;
+
+ ts_done = wx_ptp_tx_hwtstamp_work(wx);
+
+ wx_ptp_overflow_check(wx);
+ if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ wx->flags)))
+ wx_ptp_rx_hang(wx);
+ wx_ptp_tx_hang(wx);
+
+ return ts_done ? 1 : HZ;
+}
+
+static u64 wx_ptp_trigger_calc(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ unsigned long flags;
+ u64 ns = 0;
+ u32 rem;
+
+ /* Read the current clock time, and save the cycle counter value */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ ns = timecounter_read(&wx->hw_tc);
+ wx->pps_edge_start = wx->hw_tc.cycle_last;
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+ wx->pps_edge_end = wx->pps_edge_start;
+
+ /* Figure out how far past the next second we are */
+ div_u64_rem(ns, WX_NS_PER_SEC, &rem);
+
+ /* Figure out how many nanoseconds to add to round the clock edge up
+ * to the next full second
+ */
+ rem = (WX_NS_PER_SEC - rem);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult);
+ wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) <<
+ cc->shift), cc->mult);
+
+ return (ns + rem);
+}
+
+static int wx_ptp_setup_sdp(struct wx *wx)
+{
+ struct cyclecounter *cc = &wx->hw_cc;
+ u32 tsauxc;
+ u64 nsec;
+
+ if (wx->pps_width >= WX_NS_PER_SEC) {
+ wx_err(wx, "PTP pps width cannot be longer than 1s!\n");
+ return -EINVAL;
+ }
+
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) {
+ if (wx->pps_enabled) {
+ wx->pps_enabled = false;
+ wx_set_pps(wx, false, 0, 0);
+ }
+ return 0;
+ }
+
+ wx->pps_enabled = true;
+ nsec = wx_ptp_trigger_calc(wx);
+ wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_SDP(0),
+ WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H);
+ wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0);
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1);
+ WX_WRITE_FLUSH(wx);
+
+ /* Adjust the clock edge to align with the next full second. */
+ wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult);
+
+ return 0;
+}
+
+static int wx_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct wx *wx = container_of(ptp, struct wx, ptp_caps);
+
+ /**
+ * When PPS is enabled, unmask the interrupt for the ClockOut
+ * feature, so that the interrupt handler can send the PPS
+ * event when the clock SDP triggers. Clear mask when PPS is
+ * disabled
+ */
+ if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp)
+ return -EOPNOTSUPP;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.phase.sec || rq->perout.phase.nsec) {
+ wx_err(wx, "Absolute start time not supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.period.sec != 1 || rq->perout.period.nsec) {
+ wx_err(wx, "Only 1pps is supported.\n");
+ return -EINVAL;
+ }
+
+ if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on;
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ wx->pps_width = timespec64_to_ns(&ts_on);
+ } else {
+ wx->pps_width = 120000000;
+ }
+
+ if (on)
+ set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ else
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+
+ return wx->ptp_setup_sdp(wx);
+}
+
+void wx_ptp_check_pps_event(struct wx *wx)
+{
+ u32 tsauxc, int_status;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!wx->ptp_clock)
+ return;
+
+ int_status = rd32ptp(wx, WX_TSC_1588_INT_ST);
+ if (int_status & WX_TSC_1588_INT_ST_TT1) {
+ /* disable the pin first */
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0);
+ WX_WRITE_FLUSH(wx);
+
+ wx_ptp_trigger_calc(wx);
+
+ tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 |
+ WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0;
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32));
+ wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end);
+ wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32));
+ wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc);
+ WX_WRITE_FLUSH(wx);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_check_pps_event);
+
+static long wx_ptp_create_clock(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(wx->ptp_clock))
+ return 0;
+
+ snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name),
+ "%s", netdev->name);
+ wx->ptp_caps.owner = THIS_MODULE;
+ wx->ptp_caps.n_alarm = 0;
+ wx->ptp_caps.n_ext_ts = 0;
+ wx->ptp_caps.pps = 0;
+ wx->ptp_caps.adjfine = wx_ptp_adjfine;
+ wx->ptp_caps.adjtime = wx_ptp_adjtime;
+ wx->ptp_caps.gettimex64 = wx_ptp_gettimex64;
+ wx->ptp_caps.settime64 = wx_ptp_settime64;
+ wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work;
+ if (wx->mac.type == wx_mac_em) {
+ wx->ptp_caps.max_adj = 500000000;
+ wx->ptp_caps.n_per_out = 1;
+ wx->ptp_setup_sdp = wx_ptp_setup_sdp;
+ wx->ptp_caps.enable = wx_ptp_feature_enable;
+ } else {
+ wx->ptp_caps.max_adj = 250000000;
+ wx->ptp_caps.n_per_out = 0;
+ wx->ptp_setup_sdp = NULL;
+ }
+
+ wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev);
+ if (IS_ERR(wx->ptp_clock)) {
+ err = PTR_ERR(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ wx_err(wx, "ptp clock register failed\n");
+ return err;
+ } else if (wx->ptp_clock) {
+ dev_info(&wx->pdev->dev, "registered PHC device on %s\n",
+ netdev->name);
+ }
+
+ /* Set the default timestamp mode to disabled here. We do this in
+ * create_clock instead of initialization, because we don't want to
+ * override the previous settings during a suspend/resume cycle.
+ */
+ wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+static int wx_ptp_set_timestamp_mode(struct wx *wx,
+ struct kernel_hwtstamp_config *config)
+{
+ u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED;
+ u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED;
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ bool is_l2 = false;
+ u32 regval;
+
+ memcpy(flags, wx->flags, sizeof(wx->flags));
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
+ clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1;
+ tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2;
+ is_l2 = true;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags);
+ set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags);
+ break;
+ default:
+ /* register PSR_1588_MSG must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages unless hardware supports timestamping all
+ * packets => return error
+ */
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ /* define ethertype filter for timestamping L2 packets */
+ if (is_l2)
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588),
+ (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */
+ WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */
+ ETH_P_1588)); /* 1588 eth protocol type */
+ else
+ wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0);
+
+ /* enable/disable TX */
+ regval = rd32ptp(wx, WX_TSC_1588_CTL);
+ regval &= ~WX_TSC_1588_CTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ wr32ptp(wx, WX_TSC_1588_CTL, regval);
+
+ /* enable/disable RX */
+ regval = rd32(wx, WX_PSR_1588_CTL);
+ regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ wr32(wx, WX_PSR_1588_CTL, regval);
+
+ /* define which PTP packets are time stamped */
+ wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl);
+
+ WX_WRITE_FLUSH(wx);
+
+ /* configure adapter flags only when HW is actually configured */
+ memcpy(wx->flags, flags, sizeof(wx->flags));
+
+ /* clear TX/RX timestamp state, just to be sure */
+ wx_ptp_clear_tx_timestamp(wx);
+ rd32(wx, WX_PSR_1588_STMPH);
+
+ return 0;
+}
+
+static u64 wx_ptp_read(const struct cyclecounter *hw_cc)
+{
+ struct wx *wx = container_of(hw_cc, struct wx, hw_cc);
+
+ return wx_ptp_readtime(wx, NULL);
+}
+
+static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval)
+{
+ if (wx->mac.type == wx_mac_em) {
+ *shift = WX_INCVAL_SHIFT_EM;
+ *incval = WX_INCVAL_EM;
+ return;
+ }
+
+ switch (wx->speed) {
+ case SPEED_10:
+ *shift = WX_INCVAL_SHIFT_10;
+ *incval = WX_INCVAL_10;
+ break;
+ case SPEED_100:
+ *shift = WX_INCVAL_SHIFT_100;
+ *incval = WX_INCVAL_100;
+ break;
+ case SPEED_1000:
+ *shift = WX_INCVAL_SHIFT_1GB;
+ *incval = WX_INCVAL_1GB;
+ break;
+ case SPEED_10000:
+ default:
+ *shift = WX_INCVAL_SHIFT_10GB;
+ *incval = WX_INCVAL_10GB;
+ break;
+ }
+}
+
+/**
+ * wx_ptp_reset_cyclecounter - create the cycle counter from hw
+ * @wx: pointer to the wx structure
+ *
+ * This function should be called to set the proper values for the TSC_1588_INC
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TSC_1588_INC value is
+ * necessary, such as during initialization or when the link speed changes.
+ */
+void wx_ptp_reset_cyclecounter(struct wx *wx)
+{
+ u32 incval = 0, mask = 0;
+ struct cyclecounter cc;
+ unsigned long flags;
+
+ /* For some of the boards below this mask is technically incorrect.
+ * The timestamp mask overflows at approximately 61bits. However the
+ * particular hardware does not overflow on an even bitmask value.
+ * Instead, it overflows due to conversion of upper 32bits billions of
+ * cycles. Timecounters are not really intended for this purpose so
+ * they do not properly function if the overflow point isn't 2^N-1.
+ * However, the actual SYSTIME values in question take ~138 years to
+ * overflow. In practice this means they won't actually overflow. A
+ * proper fix to this problem would require modification of the
+ * timecounter delta calculations.
+ */
+ cc.mask = CLOCKSOURCE_MASK(64);
+ cc.mult = 1;
+ cc.shift = 0;
+
+ cc.read = wx_ptp_read;
+ wx_ptp_link_speed_adjust(wx, &cc.shift, &incval);
+
+ /* update the base incval used to calculate frequency adjustment */
+ WRITE_ONCE(wx->base_incval, incval);
+
+ mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF;
+ incval &= mask;
+ if (wx->mac.type != wx_mac_em)
+ incval |= 2 << 24;
+ wr32ptp(wx, WX_TSC_1588_INC, incval);
+
+ smp_mb(); /* Force the above update. */
+
+ /* need lock to prevent incorrect read while modifying cyclecounter */
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+}
+EXPORT_SYMBOL(wx_ptp_reset_cyclecounter);
+
+void wx_ptp_reset(struct wx *wx)
+{
+ unsigned long flags;
+
+ /* reset the hardware timestamping mode */
+ wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config);
+ wx_ptp_reset_cyclecounter(wx);
+
+ wr32ptp(wx, WX_TSC_1588_SYSTIML, 0);
+ wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0);
+ WX_WRITE_FLUSH(wx);
+
+ write_seqlock_irqsave(&wx->hw_tc_lock, flags);
+ timecounter_init(&wx->hw_tc, &wx->hw_cc,
+ ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&wx->hw_tc_lock, flags);
+
+ wx->last_overflow_check = jiffies;
+ ptp_schedule_worker(wx->ptp_clock, HZ);
+
+ /* Now that the shift has been calculated and the systime
+ * registers reset, (re-)enable the Clock out feature
+ */
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_reset);
+
+void wx_ptp_init(struct wx *wx)
+{
+ /* Initialize the seqlock_t first, since the user might call the clock
+ * functions any time after we've initialized the ptp clock device.
+ */
+ seqlock_init(&wx->hw_tc_lock);
+
+ /* obtain a ptp clock device, or re-use an existing device */
+ if (wx_ptp_create_clock(wx))
+ return;
+
+ wx->tx_hwtstamp_pkts = 0;
+ wx->tx_hwtstamp_timeouts = 0;
+ wx->tx_hwtstamp_skipped = 0;
+ wx->tx_hwtstamp_errors = 0;
+ wx->rx_hwtstamp_cleared = 0;
+ /* reset the ptp related hardware bits */
+ wx_ptp_reset(wx);
+
+ /* enter the WX_STATE_PTP_RUNNING state */
+ set_bit(WX_STATE_PTP_RUNNING, wx->state);
+}
+EXPORT_SYMBOL(wx_ptp_init);
+
+/**
+ * wx_ptp_suspend - stop ptp work items
+ * @wx: pointer to wx struct
+ *
+ * This function suspends ptp activity, and prevents more work from being
+ * generated, but does not destroy the clock device.
+ */
+void wx_ptp_suspend(struct wx *wx)
+{
+ /* leave the WX_STATE_PTP_RUNNING STATE */
+ if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state))
+ return;
+
+ clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags);
+ if (wx->ptp_setup_sdp)
+ wx->ptp_setup_sdp(wx);
+
+ wx_ptp_clear_tx_timestamp(wx);
+}
+EXPORT_SYMBOL(wx_ptp_suspend);
+
+/**
+ * wx_ptp_stop - destroy the ptp_clock device
+ * @wx: pointer to wx struct
+ *
+ * Completely destroy the ptp_clock device, and disable all PTP related
+ * features. Intended to be run when the device is being closed.
+ */
+void wx_ptp_stop(struct wx *wx)
+{
+ /* first, suspend ptp activity */
+ wx_ptp_suspend(wx);
+
+ /* now destroy the ptp clock device */
+ if (wx->ptp_clock) {
+ ptp_clock_unregister(wx->ptp_clock);
+ wx->ptp_clock = NULL;
+ dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name);
+ }
+}
+EXPORT_SYMBOL(wx_ptp_stop);
+
+/**
+ * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @wx: pointer to wx struct
+ * @skb: particular skb to send timestamp with
+ *
+ * if the timestamp is valid, we convert it into the timecounter ns
+ * value, then store that result into the shhwtstamps structure which
+ * is passed up the network stack
+ */
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb)
+{
+ u64 regval = 0;
+ u32 tsyncrxctl;
+
+ /* Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
+ tsyncrxctl = rd32(wx, WX_PSR_1588_CTL);
+ if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID))
+ return;
+
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPL);
+ regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32;
+
+ wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval);
+}
+
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ *cfg = wx->tstamp_config;
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_get);
+
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ int err;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ err = wx_ptp_set_timestamp_mode(wx, cfg);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config));
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_hwtstamp_set);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
new file mode 100644
index 000000000000..50db90a6e3ee
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_PTP_H_
+#define _WX_PTP_H_
+
+void wx_ptp_check_pps_event(struct wx *wx);
+void wx_ptp_reset_cyclecounter(struct wx *wx);
+void wx_ptp_reset(struct wx *wx);
+void wx_ptp_init(struct wx *wx);
+void wx_ptp_suspend(struct wx *wx);
+void wx_ptp_stop(struct wx *wx);
+void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb);
+int wx_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg);
+int wx_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+
+#endif /* _WX_PTP_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b54bffda027b..5b230ecbbabb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,8 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
@@ -180,6 +182,23 @@
#define WX_PSR_VLAN_CTL 0x15088
#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
+/* EType Queue Filter */
+#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4))
+#define WX_PSR_ETYPE_SWC_FILTER_1588 3
+#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31)
+#define WX_PSR_ETYPE_SWC_1588 BIT(30)
+/* 1588 */
+#define WX_PSR_1588_MSG 0x15120
+#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0)
+#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1)
+#define WX_PSR_1588_STMPL 0x151E8
+#define WX_PSR_1588_STMPH 0x151A4
+#define WX_PSR_1588_CTL 0x15188
+#define WX_PSR_1588_CTL_ENABLED BIT(4)
+#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1)
+#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1)
+#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5)
+#define WX_PSR_1588_CTL_VALID BIT(0)
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
@@ -253,6 +272,32 @@
#define WX_TSC_ST_SECTX_RDY BIT(0)
#define WX_TSC_BUF_AE 0x1D00C
#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+/* 1588 */
+#define WX_TSC_1588_CTL 0x11F00
+#define WX_TSC_1588_CTL_ENABLED BIT(4)
+#define WX_TSC_1588_CTL_VALID BIT(0)
+#define WX_TSC_1588_STMPL 0x11F04
+#define WX_TSC_1588_STMPH 0x11F08
+#define WX_TSC_1588_SYSTIML 0x11F0C
+#define WX_TSC_1588_SYSTIMH 0x11F10
+#define WX_TSC_1588_INC 0x11F14
+#define WX_TSC_1588_INT_ST 0x11F20
+#define WX_TSC_1588_INT_ST_TT1 BIT(5)
+#define WX_TSC_1588_INT_EN 0x11F24
+#define WX_TSC_1588_INT_EN_TT1 BIT(5)
+#define WX_TSC_1588_AUX_CTL 0x11F28
+#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8)
+#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2)
+#define WX_TSC_1588_AUX_CTL_PLSG BIT(1)
+#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0)
+#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */
+#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */
+#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0)
+#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1)
+#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0)
+#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1)
+#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5)
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
@@ -264,6 +309,10 @@
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
#define WX_MNG_BMC2OS_CNT 0x1E090
#define WX_MNG_OS2BMC_CNT 0x1E094
+#define WX_SW2FW_MBOX_CMD 0x1E0A0
+#define WX_SW2FW_MBOX_CMD_VLD BIT(31)
+#define WX_SW2FW_MBOX 0x1E200
+#define WX_FW2SW_MBOX 0x1E300
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -327,6 +376,7 @@ enum WX_MSCA_CMD_value {
#define WX_12K_ITR 336
#define WX_20K_ITR 200
#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_AML_MAX_EITR 0x00000FFFU
#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
@@ -370,6 +420,7 @@ enum WX_MSCA_CMD_value {
/****************** Manageablility Host Interface defines ********************/
#define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */
#define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */
+#define WX_HIC_HDR_INDEX_MAX 255
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
@@ -382,6 +433,8 @@ enum WX_MSCA_CMD_value {
#define FW_CEM_CMD_RESERVED 0X0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_PPS_SET_CMD 0xF6
+#define FW_PPS_SET_LEN 0x14
#define WX_SW_REGION_PTR 0x1C
@@ -460,6 +513,7 @@ enum WX_MSCA_CMD_value {
#define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */
#define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */
#define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/
+#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */
#define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */
#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
@@ -663,21 +717,30 @@ struct wx_hic_hdr {
u8 cmd_resv;
u8 ret_status;
} cmd_or_resp;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_req {
u8 cmd;
u8 buf_lenh;
u8 buf_lenl;
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
struct wx_hic_hdr2_rsp {
u8 cmd;
u8 buf_lenl;
u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */
- u8 checksum;
+ union {
+ u8 checksum;
+ u8 index;
+ };
};
union wx_hic_hdr2 {
@@ -701,6 +764,15 @@ struct wx_hic_reset {
u16 reset_type;
};
+struct wx_hic_set_pps {
+ struct wx_hic_hdr hdr;
+ u8 lan_id;
+ u8 enable;
+ u16 pad2;
+ u64 nsec;
+ u64 cycles;
+};
+
/* Bus parameters */
struct wx_bus_info {
u8 func;
@@ -716,7 +788,8 @@ struct wx_thermal_sensor_data {
enum wx_mac_type {
wx_mac_unknown = 0,
wx_mac_sp,
- wx_mac_em
+ wx_mac_em,
+ wx_mac_aml,
};
enum sp_media_type {
@@ -863,6 +936,7 @@ struct wx_tx_context_desc {
*/
struct wx_tx_buffer {
union wx_tx_desc *next_to_watch;
+ unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
@@ -924,6 +998,7 @@ struct wx_ring {
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
+ unsigned long last_rx_timestamp;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -1026,13 +1101,21 @@ struct wx_hw_stats {
enum wx_state {
WX_STATE_RESETTING,
- WX_STATE_NBITS, /* must be last */
+ WX_STATE_SWFW_BUSY,
+ WX_STATE_PTP_RUNNING,
+ WX_STATE_PTP_TX_IN_PROGRESS,
+ WX_STATE_NBITS /* must be last */
};
enum wx_pf_flags {
+ WX_FLAG_SWFW_RING,
WX_FLAG_FDIR_CAPABLE,
WX_FLAG_FDIR_HASH,
WX_FLAG_FDIR_PERFECT,
+ WX_FLAG_RSC_CAPABLE,
+ WX_FLAG_RX_HWTSTAMP_ENABLED,
+ WX_FLAG_RX_HWTSTAMP_IN_REGISTER,
+ WX_FLAG_PTP_PPS_ENABLED,
WX_PF_FLAGS_NBITS /* must be last */
};
@@ -1066,6 +1149,7 @@ struct wx {
char eeprom_id[32];
char *driver_name;
enum wx_reset_type reset_type;
+ u8 swfw_index;
/* PHY stuff */
unsigned int link;
@@ -1133,6 +1217,29 @@ struct wx {
void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
void (*configure_fdir)(struct wx *wx);
void (*do_reset)(struct net_device *netdev);
+ int (*ptp_setup_sdp)(struct wx *wx);
+
+ bool pps_enabled;
+ u64 pps_width;
+ u64 pps_edge_start;
+ u64 pps_edge_end;
+ u64 sec_to_cc;
+ u32 base_incval;
+ u32 tx_hwtstamp_pkts;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_errors;
+ u32 rx_hwtstamp_cleared;
+ unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
+ unsigned long ptp_tx_start;
+ seqlock_t hw_tc_lock; /* seqlock for ptp */
+ struct cyclecounter hw_cc;
+ struct timecounter hw_tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct kernel_hwtstamp_config tstamp_config;
+ struct sk_buff *ptp_tx_skb;
};
#define WX_INTR_ALL (~0ULL)
@@ -1177,6 +1284,24 @@ rd64(struct wx *wx, u32 reg)
return (lsb | msb << 32);
}
+static inline u32
+rd32ptp(struct wx *wx, u32 reg)
+{
+ if (wx->mac.type == wx_mac_em)
+ return rd32(wx, reg);
+
+ return rd32(wx, reg + 0xB500);
+}
+
+static inline void
+wr32ptp(struct wx *wx, u32 reg, u32 value)
+{
+ if (wx->mac.type == wx_mac_em)
+ return wr32(wx, reg, value);
+
+ return wr32(wx, reg + 0xB500, value);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index e868f7ef4920..7e2d9ec38a30 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -138,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_channels = ngbe_set_channels,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 53aeae2f884b..a6159214ec0a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
#include "ngbe_hw.h"
@@ -167,7 +168,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
struct wx_q_vector *q_vector;
struct wx *wx = data;
struct pci_dev *pdev;
- u32 eicr;
+ u32 eicr, eicr_misc;
q_vector = wx->q_vector[0];
pdev = wx->pdev;
@@ -185,6 +186,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
if (!(pdev->msi_enabled))
wr32(wx, WX_PX_INTA, 1);
+ eicr_misc = wx_misc_isb(wx, WX_ISB_MISC);
+ if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
+
wx->isb_mem[WX_ISB_MISC] = 0;
/* would disable interrupts here but it is auto disabled */
napi_schedule_irqoff(&q_vector->napi);
@@ -198,6 +203,12 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
{
struct wx *wx = data;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+
+ if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC))
+ wx_ptp_check_pps_event(wx);
/* re-enable the original interrupt state, no lsc, no queues */
if (netif_running(wx->netdev))
@@ -317,6 +328,8 @@ void ngbe_down(struct wx *wx)
{
phylink_stop(wx->phylink);
ngbe_disable_device(wx);
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
}
@@ -379,6 +392,8 @@ static int ngbe_open(struct net_device *netdev)
if (err)
goto err_dis_phy;
+ wx_ptp_init(wx);
+
ngbe_up(wx);
return 0;
@@ -407,6 +422,7 @@ static int ngbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
ngbe_down(wx);
wx_free_irq(wx);
wx_free_isb_resources(wx);
@@ -507,6 +523,8 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index a5e9b779c44d..ea1d7e9a91f3 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -7,6 +7,7 @@
#include <linux/phy.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "ngbe_type.h"
#include "ngbe_mdio.h"
@@ -64,6 +65,11 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode,
static void ngbe_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
+ struct wx *wx = phylink_to_wx(config);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void ngbe_mac_link_up(struct phylink_config *config,
@@ -103,6 +109,11 @@ static void ngbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static const struct phylink_mac_ops ngbe_mac_ops = {
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index f48ed7fc1805..992adbb98c7d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -70,15 +70,20 @@
/* Extended Interrupt Enable Set */
#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11)
#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
#define NGBE_PX_MISC_IEN_GPIO BIT(26)
#define NGBE_PX_MISC_IEN_MASK ( \
NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_TIMESYNC | \
NGBE_PX_MISC_IEN_ETH_LK | \
NGBE_PX_MISC_IEN_INT_ERR | \
NGBE_PX_MISC_IEN_GPIO)
+/* Extended Interrupt Cause Read */
+#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */
+
#define NGBE_INTR_ALL 0x1FF
#define NGBE_INTR_MISC BIT(0)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index d98314b26c19..78999d484f18 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -529,6 +529,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
+ .get_ts_info = wx_get_ts_info,
+ .get_ts_stats = wx_get_ptp_stats,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index cd1372da92a9..4b9921b7bb11 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -197,6 +197,12 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ if (wx->mac.type != wx_mac_sp) {
+ wr32(wx, TXGBE_PX_PF_BME, 0x1);
+ wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL,
+ TXGBE_RDM_RSC_CTL_FREE_CTL);
+ }
+
wx_clear_hw_cntrs(wx);
/* Store the permanent mac address */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index 0ee73a265545..8658a51ee810 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -166,6 +166,9 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe)
void txgbe_free_misc_irq(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
free_irq(txgbe->link_irq, txgbe);
free_irq(txgbe->misc.irq, txgbe);
txgbe_del_irq_domain(txgbe);
@@ -177,6 +180,9 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int hwirq, err;
+ if (wx->mac.type == wx_mac_aml)
+ goto skip_sp_irq;
+
txgbe->misc.nirqs = 1;
txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
&txgbe_misc_irq_domain_ops, txgbe);
@@ -206,6 +212,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_msic_irq;
+skip_sp_irq:
wx->misc_irq_domain = true;
return 0;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index f77450268036..a2e245e3b016 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -13,6 +13,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
@@ -34,6 +35,12 @@ char txgbe_driver_name[] = "txgbe";
static const struct pci_device_id txgbe_pci_tbl[] = {
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0},
{ PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0},
+ { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0},
/* required last entry */
{ .device = 0 }
};
@@ -89,7 +96,18 @@ static void txgbe_up_complete(struct wx *wx)
smp_mb__before_atomic();
wx_napi_enable_all(wx);
- phylink_start(wx->phylink);
+ if (wx->mac.type == wx_mac_aml) {
+ u32 reg;
+
+ reg = rd32(wx, TXGBE_AML_MAC_TX_CFG);
+ reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK;
+ reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ txgbe_enable_sec_tx_path(wx);
+ netif_carrier_on(wx->netdev);
+ } else {
+ phylink_start(wx->phylink);
+ }
/* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC(0));
@@ -116,6 +134,9 @@ static void txgbe_reset(struct wx *wx)
memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
wx_flush_sw_mac_table(wx);
wx_mac_set_default_filter(wx, old_addr);
+
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset(wx);
}
static void txgbe_disable_device(struct wx *wx)
@@ -167,7 +188,10 @@ void txgbe_down(struct wx *wx)
{
txgbe_disable_device(wx);
txgbe_reset(wx);
- phylink_stop(wx->phylink);
+ if (wx->mac.type == wx_mac_aml)
+ netif_carrier_off(wx->netdev);
+ else
+ phylink_stop(wx->phylink);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
@@ -176,6 +200,7 @@ void txgbe_down(struct wx *wx)
void txgbe_up(struct wx *wx)
{
wx_configure(wx);
+ wx_ptp_init(wx);
txgbe_up_complete(wx);
}
@@ -192,6 +217,14 @@ static void txgbe_init_type_code(struct wx *wx)
case TXGBE_DEV_ID_WX1820:
wx->mac.type = wx_mac_sp;
break;
+ case TXGBE_DEV_ID_AML5010:
+ case TXGBE_DEV_ID_AML5110:
+ case TXGBE_DEV_ID_AML5025:
+ case TXGBE_DEV_ID_AML5125:
+ case TXGBE_DEV_ID_AML5040:
+ case TXGBE_DEV_ID_AML5140:
+ wx->mac.type = wx_mac_aml;
+ break;
default:
wx->mac.type = wx_mac_unknown;
break;
@@ -265,6 +298,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->atr = txgbe_atr;
wx->configure_fdir = txgbe_configure_fdir;
+ set_bit(WX_FLAG_RSC_CAPABLE, wx->flags);
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -279,6 +314,17 @@ static int txgbe_sw_init(struct wx *wx)
wx->do_reset = txgbe_do_reset;
+ switch (wx->mac.type) {
+ case wx_mac_sp:
+ break;
+ case wx_mac_aml:
+ set_bit(WX_FLAG_SWFW_RING, wx->flags);
+ wx->swfw_index = 0;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
@@ -321,6 +367,8 @@ static int txgbe_open(struct net_device *netdev)
if (err)
goto err_free_irq;
+ wx_ptp_init(wx);
+
txgbe_up_complete(wx);
return 0;
@@ -344,6 +392,7 @@ err_reset:
*/
static void txgbe_close_suspend(struct wx *wx)
{
+ wx_ptp_suspend(wx);
txgbe_disable_device(wx);
wx_free_resources(wx);
}
@@ -363,6 +412,7 @@ static int txgbe_close(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
+ wx_ptp_stop(wx);
txgbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
@@ -479,6 +529,8 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_get_stats64 = wx_get_stats64,
.ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid,
+ .ndo_hwtstamp_set = wx_hwtstamp_set,
+ .ndo_hwtstamp_get = wx_hwtstamp_get,
};
/**
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1ae68f94dd49..85f022ceef4f 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -15,6 +15,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
+#include "../libwx/wx_ptp.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_phy.h"
@@ -179,6 +180,10 @@ static void txgbe_mac_link_down(struct phylink_config *config,
struct wx *wx = phylink_to_wx(config);
wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+
+ wx->speed = SPEED_UNKNOWN;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static void txgbe_mac_link_up(struct phylink_config *config,
@@ -215,6 +220,11 @@ static void txgbe_mac_link_up(struct phylink_config *config,
wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
wdg = rd32(wx, WX_MAC_WDG_TIMEOUT);
wr32(wx, WX_MAC_WDG_TIMEOUT, wdg);
+
+ wx->speed = speed;
+ wx->last_rx_ptp_check = jiffies;
+ if (test_bit(WX_STATE_PTP_RUNNING, wx->state))
+ wx_ptp_reset_cyclecounter(wx);
}
static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode,
@@ -557,6 +567,9 @@ int txgbe_init_phy(struct txgbe *txgbe)
struct wx *wx = txgbe->wx;
int ret;
+ if (wx->mac.type == wx_mac_aml)
+ return 0;
+
if (txgbe->wx->media_type == sp_media_copper)
return txgbe_ext_phy_init(txgbe);
@@ -621,6 +634,9 @@ err_unregister_swnode:
void txgbe_remove_phy(struct txgbe *txgbe)
{
+ if (txgbe->wx->mac.type == wx_mac_aml)
+ return;
+
if (txgbe->wx->media_type == sp_media_copper) {
phylink_disconnect_phy(txgbe->wx->phylink);
phylink_destroy(txgbe->wx->phylink);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 629a13e96b85..9c1c26234cad 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -10,6 +10,12 @@
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
#define TXGBE_DEV_ID_WX1820 0x2001
+#define TXGBE_DEV_ID_AML5010 0x5010
+#define TXGBE_DEV_ID_AML5110 0x5110
+#define TXGBE_DEV_ID_AML5025 0x5025
+#define TXGBE_DEV_ID_AML5125 0x5125
+#define TXGBE_DEV_ID_AML5040 0x5040
+#define TXGBE_DEV_ID_AML5140 0x5140
/* Subsystem IDs */
/* SFP */
@@ -137,6 +143,14 @@
#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+/*************************** Amber Lite Registers ****************************/
+#define TXGBE_PX_PF_BME 0x4B8
+#define TXGBE_AML_MAC_TX_CFG 0x11000
+#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27)
+#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28)
+#define TXGBE_RDM_RSC_CTL 0x1200C
+#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 35d96c633a33..7502214cc7d5 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -28,6 +28,7 @@ config XILINX_AXI_EMAC
depends on HAS_IOMEM
depends on XILINX_DMA
select PHYLINK
+ select DIMLIB
help
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs and Soc's.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index a3f4f3e42587..5ff742103beb 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -9,6 +9,7 @@
#ifndef XILINX_AXIENET_H
#define XILINX_AXIENET_H
+#include <linux/dim.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -112,9 +113,6 @@
#define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
#define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
-#define XAXIDMA_DELAY_SHIFT 24
-#define XAXIDMA_COALESCE_SHIFT 16
-
#define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
#define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
@@ -126,8 +124,7 @@
/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
#define XAXIDMA_DFT_TX_USEC 50
-#define XAXIDMA_DFT_RX_THRESHOLD 1
-#define XAXIDMA_DFT_RX_USEC 50
+#define XAXIDMA_DFT_RX_USEC 16
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -487,7 +484,12 @@ struct skbuf_dma_descriptor {
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @napi_rx: NAPI RX control structure
+ * @rx_dim: DIM state for the receive queue
+ * @rx_dim_enabled: Whether DIM is enabled or not
+ * @rx_irqs: Number of interrupts
+ * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started
* @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_dma_started: Set when RX DMA is started
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
* @rx_bd_num: Size of RX buffer descriptor ring
@@ -497,7 +499,9 @@ struct skbuf_dma_descriptor {
* @rx_bytes: RX byte count for statistics
* @rx_stat_sync: Synchronization object for RX stats
* @napi_tx: NAPI TX control structure
+ * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started
* @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_dma_started: Set when TX DMA is started
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
* @tx_bd_num: Size of TX buffer descriptor ring
@@ -532,10 +536,6 @@ struct skbuf_dma_descriptor {
* supported, the maximum frame size would be 9k. Else it is
* 1522 bytes (assuming support for basic VLAN)
* @rxmem: Stores rx memory size for jumbo frame handling.
- * @coalesce_count_rx: Store the irq coalesce on RX side.
- * @coalesce_usec_rx: IRQ coalesce delay for RX
- * @coalesce_count_tx: Store the irq coalesce on TX side.
- * @coalesce_usec_tx: IRQ coalesce delay for TX
* @use_dmaengine: flag to check dmaengine framework usage.
* @tx_chan: TX DMA channel.
* @rx_chan: RX DMA channel.
@@ -569,7 +569,12 @@ struct axienet_local {
void __iomem *dma_regs;
struct napi_struct napi_rx;
+ struct dim rx_dim;
+ bool rx_dim_enabled;
+ u16 rx_irqs;
+ spinlock_t rx_cr_lock;
u32 rx_dma_cr;
+ bool rx_dma_started;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
u32 rx_bd_num;
@@ -579,7 +584,9 @@ struct axienet_local {
struct u64_stats_sync rx_stat_sync;
struct napi_struct napi_tx;
+ spinlock_t tx_cr_lock;
u32 tx_dma_cr;
+ bool tx_dma_started;
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
@@ -610,10 +617,6 @@ struct axienet_local {
u32 max_frm_size;
u32 rxmem;
- u32 coalesce_count_rx;
- u32 coalesce_usec_rx;
- u32 coalesce_count_tx;
- u32 coalesce_usec_tx;
u8 use_dmaengine;
struct dma_chan *tx_chan;
struct dma_chan *rx_chan;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f33178f90c42..054abf283ab3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev)
lp->rx_bd_p);
}
+static u64 axienet_dma_rate(struct axienet_local *lp)
+{
+ if (lp->axi_clk)
+ return clk_get_rate(lp->axi_clk);
+ return 125000000; /* arbitrary guess if no clock rate set */
+}
+
/**
- * axienet_usec_to_timer - Calculate IRQ delay timer value
- * @lp: Pointer to the axienet_local structure
- * @coalesce_usec: Microseconds to convert into timer value
+ * axienet_calc_cr() - Calculate control register value
+ * @lp: Device private data
+ * @count: Number of completions before an interrupt
+ * @usec: Microseconds after the last completion before an interrupt
+ *
+ * Calculate a control register value based on the coalescing settings. The
+ * run/stop bit is not set.
*/
-static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
- u32 result;
- u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+ u32 cr;
- if (lp->axi_clk)
- clk_rate = clk_get_rate(lp->axi_clk);
+ cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (count > 1) {
+ u64 clk_rate = axienet_dma_rate(lp);
+ u32 timer;
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate,
+ XAXIDMA_DELAY_SCALE);
- /* 1 Timeout Interval = 125 * (clock period of SG clock) */
- result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
- XAXIDMA_DELAY_SCALE);
- return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK));
+ cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ return cr;
+}
+
+/**
+ * axienet_coalesce_params() - Extract coalesce parameters from the CR
+ * @lp: Device private data
+ * @cr: The control register to parse
+ * @count: Number of packets before an interrupt
+ * @usec: Idle time (in usec) before an interrupt
+ */
+static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
+ u32 *count, u32 *usec)
+{
+ u64 clk_rate = axienet_dma_rate(lp);
+ u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr);
+
+ *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr);
+ *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate);
}
/**
@@ -248,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
*/
static void axienet_dma_start(struct axienet_local *lp)
{
+ spin_lock_irq(&lp->rx_cr_lock);
+
/* Start updating the Rx channel control register */
- lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_rx > 1)
- lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
+ lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
- /* Start updating the Tx channel control register */
- lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
- XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
- /* Only set interrupt delay timer if not generating an interrupt on
- * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
- */
- if (lp->coalesce_count_tx > 1)
- lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
- << XAXIDMA_DELAY_SHIFT) |
- XAXIDMA_IRQ_DELAY_MASK;
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
-
/* Populate the tail pointer and bring the Rx Axi DMA engine out of
* halted state. This will make the Rx side ready for reception.
*/
@@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
(sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ lp->rx_dma_started = true;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp)
axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ lp->tx_dma_started = true;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
}
/**
@@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp)
int count;
u32 cr, sr;
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->rx_cr_lock);
+
+ cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ lp->rx_dma_started = false;
+
+ spin_unlock_irq(&lp->rx_cr_lock);
synchronize_irq(lp->rx_irq);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ spin_lock_irq(&lp->tx_cr_lock);
+
+ cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ lp->tx_dma_started = false;
+
+ spin_unlock_irq(&lp->tx_cr_lock);
synchronize_irq(lp->tx_irq);
/* Give DMAs a chance to halt gracefully */
@@ -962,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
&size, budget);
if (packets) {
+ netdev_completed_queue(ndev, packets, size);
u64_stats_update_begin(&lp->tx_stat_sync);
u64_stats_add(&lp->tx_packets, packets);
u64_stats_add(&lp->tx_bytes, size);
@@ -979,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget)
* cause an immediate interrupt if any TX packets are
* already pending.
*/
+ spin_lock_irq(&lp->tx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ spin_unlock_irq(&lp->tx_cr_lock);
}
return packets;
}
@@ -1083,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++new_tail_ptr >= lp->tx_bd_num)
new_tail_ptr = 0;
WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+ netdev_sent_queue(ndev, skb->len);
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
@@ -1241,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
if (packets < budget && napi_complete_done(napi, packets)) {
+ if (READ_ONCE(lp->rx_dim_enabled)) {
+ struct dim_sample sample = {
+ .time = ktime_get(),
+ /* Safe because we are the only writer */
+ .pkt_ctr = u64_stats_read(&lp->rx_packets),
+ .byte_ctr = u64_stats_read(&lp->rx_bytes),
+ .event_ctr = READ_ONCE(lp->rx_irqs),
+ };
+
+ net_dim(&lp->rx_dim, &sample);
+ }
+
/* Re-enable RX completion interrupts. This should
* cause an immediate interrupt if any RX packets are
* already pending.
*/
+ spin_lock_irq(&lp->rx_cr_lock);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ spin_unlock_irq(&lp->rx_cr_lock);
}
return packets;
}
@@ -1283,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
/* Disable further TX completion interrupts and schedule
* NAPI to handle the completions.
*/
- u32 cr = lp->tx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
if (napi_schedule_prep(&lp->napi_tx)) {
+ u32 cr;
+
+ spin_lock(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ spin_unlock(&lp->tx_cr_lock);
__napi_schedule(&lp->napi_tx);
}
}
@@ -1328,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
/* Disable further RX completion interrupts and schedule
* NAPI receive.
*/
- u32 cr = lp->rx_dma_cr;
-
- cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
if (napi_schedule_prep(&lp->napi_rx)) {
+ u32 cr;
+
+ spin_lock(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ spin_unlock(&lp->rx_cr_lock);
+
__napi_schedule(&lp->napi_rx);
}
}
@@ -1625,6 +1691,7 @@ err_free_eth_irq:
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
err_phy:
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
@@ -1654,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev)
napi_disable(&lp->napi_rx);
}
+ cancel_work_sync(&lp->rx_dim.work);
cancel_delayed_work_sync(&lp->stats_work);
phylink_stop(lp->phylink);
@@ -1685,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev)
dma_release_channel(lp->tx_chan);
}
+ netdev_reset_queue(ndev);
axienet_iow(lp, XAE_IE_OFFSET, 0);
if (lp->eth_irq > 0)
@@ -1999,6 +2068,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
}
/**
+ * axienet_update_coalesce_rx() - Set RX CR
+ * @lp: Device private data
+ * @cr: Value to write to the RX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->rx_cr_lock);
+ lp->rx_dma_cr &= ~mask;
+ lp->rx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->rx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->rx_dma_cr;
+ else
+ cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->rx_cr_lock);
+}
+
+/**
+ * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
+ * @lp: Device private data
+ */
+static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
+{
+ return min(1 << (lp->rx_dim.profile_ix << 1), 255);
+}
+
+/**
+ * axienet_rx_dim_work() - Adjust RX DIM settings
+ * @work: The work struct
+ */
+static void axienet_rx_dim_work(struct work_struct *work)
+{
+ struct axienet_local *lp =
+ container_of(work, struct axienet_local, rx_dim.work);
+ u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
+ u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK |
+ XAXIDMA_IRQ_ERROR_MASK;
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ lp->rx_dim.state = DIM_START_MEASURE;
+}
+
+/**
+ * axienet_update_coalesce_tx() - Set TX CR
+ * @lp: Device private data
+ * @cr: Value to write to the TX CR
+ * @mask: Bits to set from @cr
+ */
+static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
+ u32 mask)
+{
+ spin_lock_irq(&lp->tx_cr_lock);
+ lp->tx_dma_cr &= ~mask;
+ lp->tx_dma_cr |= cr;
+ /* If DMA isn't started, then the settings will be applied the next
+ * time dma_start() is called.
+ */
+ if (lp->tx_dma_started) {
+ u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+
+ /* Don't enable IRQs if they are disabled by NAPI */
+ if (reg & XAXIDMA_IRQ_ALL_MASK)
+ cr = lp->tx_dma_cr;
+ else
+ cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ }
+ spin_unlock_irq(&lp->tx_cr_lock);
+}
+
+/**
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
@@ -2018,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
- ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
- ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
- ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
+ u32 cr;
+
+ ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
+
+ spin_lock_irq(&lp->rx_cr_lock);
+ cr = lp->rx_dma_cr;
+ spin_unlock_irq(&lp->rx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->rx_max_coalesced_frames,
+ &ecoalesce->rx_coalesce_usecs);
+
+ spin_lock_irq(&lp->tx_cr_lock);
+ cr = lp->tx_dma_cr;
+ spin_unlock_irq(&lp->tx_cr_lock);
+ axienet_coalesce_params(lp, cr,
+ &ecoalesce->tx_max_coalesced_frames,
+ &ecoalesce->tx_coalesce_usecs);
return 0;
}
@@ -2046,12 +2208,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
-
- if (netif_running(ndev)) {
- NL_SET_ERR_MSG(extack,
- "Please stop netif before applying configuration");
- return -EBUSY;
- }
+ bool new_dim = ecoalesce->use_adaptive_rx_coalesce;
+ bool old_dim = lp->rx_dim_enabled;
+ u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK;
if (ecoalesce->rx_max_coalesced_frames > 255 ||
ecoalesce->tx_max_coalesced_frames > 255) {
@@ -2065,7 +2224,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- if ((ecoalesce->rx_max_coalesced_frames > 1 &&
+ if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) &&
!ecoalesce->rx_coalesce_usecs) ||
(ecoalesce->tx_max_coalesced_frames > 1 &&
!ecoalesce->tx_coalesce_usecs)) {
@@ -2074,11 +2233,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EINVAL;
}
- lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
- lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
- lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
- lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
+ if (new_dim && !old_dim) {
+ cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ ecoalesce->rx_coalesce_usecs);
+ } else if (!new_dim) {
+ if (old_dim) {
+ WRITE_ONCE(lp->rx_dim_enabled, false);
+ napi_synchronize(&lp->napi_rx);
+ flush_work(&lp->rx_dim.work);
+ }
+
+ cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
+ ecoalesce->rx_coalesce_usecs);
+ } else {
+ /* Dummy value for count just to calculate timer */
+ cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
+ mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK;
+ }
+
+ axienet_update_coalesce_rx(lp, cr, mask);
+ if (new_dim && !old_dim)
+ WRITE_ONCE(lp->rx_dim_enabled, true);
+ cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
+ ecoalesce->tx_coalesce_usecs);
+ axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
return 0;
}
@@ -2316,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev,
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_USECS,
+ ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -2499,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_dma_stop(lp);
+ netdev_reset_queue(ndev);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
@@ -2858,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev)
axienet_set_mac_address(ndev, NULL);
}
- lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
- lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
- lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
- lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+ spin_lock_init(&lp->rx_cr_lock);
+ spin_lock_init(&lp->tx_cr_lock);
+ INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
+ lp->rx_dim_enabled = true;
+ lp->rx_dim.profile_ix = 1;
+ lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
+ XAXIDMA_DFT_RX_USEC);
+ lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
+ XAXIDMA_DFT_TX_USEC);
ret = axienet_mdio_setup(lp);
if (ret)
@@ -2891,7 +3077,6 @@ static int axienet_probe(struct platform_device *pdev)
}
of_node_put(np);
lp->pcs.ops = &axienet_pcs_ops;
- lp->pcs.neg_mode = true;
lp->pcs.poll = true;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index dbb3960126ee..66e38ce9cd1d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -18,6 +18,7 @@
#include <net/rtnetlink.h>
#include <net/geneve.h>
#include <net/gro.h>
+#include <net/netdev_lock.h>
#include <net/protocol.h>
#define GENEVE_NETDEV_VER "0.6"
@@ -57,6 +58,8 @@ struct geneve_config {
bool ttl_inherit;
enum ifla_geneve_df df;
bool inner_proto_inherit;
+ u16 port_min;
+ u16 port_max;
};
/* Pseudo network device */
@@ -835,7 +838,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -945,7 +950,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ sport = udp_flow_src_port(geneve->net, skb,
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, key, sport,
@@ -1084,7 +1091,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
tos = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr,
&info->key,
@@ -1110,7 +1118,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
use_cache = ip_tunnel_dst_cache_usable(skb, info);
prio = geneve_get_dsfield(skb, dev, info, &use_cache);
sport = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ geneve->cfg.port_min,
+ geneve->cfg.port_max, true);
dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0,
&saddr, &info->key, sport,
@@ -1234,6 +1243,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
[IFLA_GENEVE_DF] = { .type = NLA_U8 },
[IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)),
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1279,6 +1289,17 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ if (ntohs(p->high) < ntohs(p->low)) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE],
+ "Invalid source port range");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1506,6 +1527,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]);
}
+ if (data[IFLA_GENEVE_PORT_RANGE]) {
+ const struct ifla_geneve_port_range *p;
+
+ if (changelink) {
+ attrtype = IFLA_GENEVE_PORT_RANGE;
+ goto change_notsup;
+ }
+ p = nla_data(data[IFLA_GENEVE_PORT_RANGE]);
+ cfg->port_min = ntohs(p->low);
+ cfg->port_max = ntohs(p->high);
+ }
+
if (data[IFLA_GENEVE_COLLECT_METADATA]) {
if (changelink) {
attrtype = IFLA_GENEVE_COLLECT_METADATA;
@@ -1614,15 +1647,20 @@ static void geneve_link_config(struct net_device *dev,
geneve_change_mtu(dev, ldev_mtu - info->options_len);
}
-static int geneve_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int geneve_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct geneve_config cfg = {
.df = GENEVE_DF_UNSET,
.use_udp6_rx_checksums = false,
.ttl_inherit = false,
.collect_md = false,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
int err;
@@ -1631,7 +1669,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = geneve_configure(net, dev, extack, &cfg);
+ err = geneve_configure(link_net, dev, extack, &cfg);
if (err)
return err;
@@ -1741,6 +1779,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */
nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */
+ nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */
0;
}
@@ -1750,6 +1789,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_info *info = &geneve->cfg.info;
bool ttl_inherit = geneve->cfg.ttl_inherit;
bool metadata = geneve->cfg.collect_md;
+ struct ifla_geneve_port_range ports = {
+ .low = htons(geneve->cfg.port_min),
+ .high = htons(geneve->cfg.port_max),
+ };
__u8 tmp_vni[3];
__u32 vni;
@@ -1806,6 +1849,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT))
goto nla_put_failure;
+ if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1838,6 +1884,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
.use_udp6_rx_checksums = true,
.ttl_inherit = false,
.collect_md = true,
+ .port_min = 1,
+ .port_max = USHRT_MAX,
};
memset(tb, 0, sizeof(tb));
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b7b46c5e6399..ef793607890d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1462,10 +1462,12 @@ static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
-static int gtp_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int gtp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
unsigned int role = GTP_ROLE_GGSN;
struct gtp_dev *gtp;
struct gtp_net *gn;
@@ -1496,7 +1498,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT],
0);
- gtp->net = src_net;
+ gtp->net = link_net;
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@@ -1526,7 +1528,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
goto out_encap;
}
- gn = net_generic(src_net, gtp_net_id);
+ gn = net_generic(link_net, gtp_net_id);
list_add(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index 00ebc25d0b22..f03797103c6a 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -427,7 +427,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, bc->options ? "par96" : "picpar");
+ strscpy(hi->data.modename, bc->options ? "par96" : "picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
@@ -439,7 +439,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "par96,picpar");
+ strscpy(hi->data.modename, "par96,picpar");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 799f8ece7824..ee5bd3c12040 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12,ser3,ser24");
+ strscpy(hi->data.modename, "ser12,ser3,ser24");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 5d1ab4840753..05bdad214799 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -570,7 +570,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
break;
case HDLCDRVCTL_GETMODE:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (bc->opt_dcd <= 0)
strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
@@ -584,7 +584,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data,
return baycom_setmode(bc, hi->data.modename);
case HDLCDRVCTL_MODELIST:
- strcpy(hi->data.modename, "ser12");
+ strscpy(hi->data.modename, "ser12");
if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl)))
return -EFAULT;
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index bac1bb69d63a..0e0fe32d2da4 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -77,6 +77,7 @@
#include <net/ip.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/net_namespace.h>
#include <linux/bpqether.h>
@@ -107,27 +108,6 @@ struct bpqdev {
static LIST_HEAD(bpq_devices);
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
- lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
- netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
/* ------------------------------------------------------------------------ */
@@ -454,6 +434,8 @@ static const struct net_device_ops bpq_netdev_ops = {
static void bpq_setup(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
+
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
@@ -499,7 +481,6 @@ static int bpq_new_device(struct net_device *edev)
err = register_netdevice(ndev);
if (err)
goto error;
- bpq_set_lockdep_class(ndev);
/* List protected by RTNL */
list_add_rcu(&bpq->bpq_list, &bpq_devices);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 234db693cefa..70f7cb383228 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1166,6 +1166,8 @@ struct netvsc_device {
u32 max_chn;
u32 num_chn;
+ u32 netvsc_gso_max_size;
+
atomic_t open_chn;
struct work_struct subchan_work;
wait_queue_head_t subchan_open;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d6c4abfc3a28..c51b318b8a72 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <net/arp.h>
+#include <net/netdev_lock.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
@@ -2461,6 +2462,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event)
} else {
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
+
+ /* In Azure, when accelerated networking in enabled, other NICs
+ * like MANA, MLX, are configured as a bonded nic with
+ * Netvsc(failover) NIC. For bonded NICs, the min of the max
+ * pkt aggregate size of the members is propagated in the stack.
+ * In order to allow these NICs (MANA/MLX) to use up to
+ * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to
+ * also support this in the guest.
+ * This value is only increased for netvsc NIC when datapath is
+ * switched over to the VF
+ */
+ if (vf_is_up)
+ netif_set_tso_max_size(ndev, vf_netdev->tso_max_size);
+ else
+ netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size);
}
return NOTIFY_OK;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c0ceeef4fcd8..82747dfacd70 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1356,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
- unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
int ret;
+ nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE;
+
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
if (ret != 0)
@@ -1390,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
- if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
@@ -1411,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
- if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
- gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size)
+ nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
@@ -1438,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
*/
net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
- netif_set_tso_max_size(net, gso_max_size);
+ netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size);
ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f632b0cfd5ae..fd91f8a45bce 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -776,8 +776,8 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp,
state->trx.tx_buf = state->buf;
state->trx.rx_buf = state->buf;
spi_message_add_tail(&state->trx, &state->msg);
- hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- state->timer.function = at86rf230_async_state_timer;
+ hrtimer_setup(&state->timer, at86rf230_async_state_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static irqreturn_t at86rf230_isr(int irq, void *data)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 753215ebc67c..ebc4f1b18e7b 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -52,12 +52,10 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/ieee802154.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
@@ -350,8 +348,8 @@ struct work_priv_container {
* @extclockenable: true if the external clock is to be enabled
* @extclockfreq: frequency of the external clock
* @extclockgpio: ca8210 output gpio of the external clock
- * @gpio_reset: gpio number of ca8210 reset line
- * @gpio_irq: gpio number of ca8210 interrupt line
+ * @reset_gpio: ca8210 reset GPIO descriptor
+ * @irq_gpio: ca8210 interrupt GPIO descriptor
* @irq_id: identifier for the ca8210 irq
*
*/
@@ -359,8 +357,8 @@ struct ca8210_platform_data {
bool extclockenable;
unsigned int extclockfreq;
unsigned int extclockgpio;
- int gpio_reset;
- int gpio_irq;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
int irq_id;
};
@@ -627,14 +625,15 @@ static int ca8210_spi_transfer(
*/
static void ca8210_reset_send(struct spi_device *spi, unsigned int ms)
{
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct ca8210_priv *priv = spi_get_drvdata(spi);
long status;
- gpio_set_value(pdata->gpio_reset, 0);
+ gpiod_set_value(pdata->reset_gpio, 1);
reinit_completion(&priv->ca8210_is_awake);
msleep(ms);
- gpio_set_value(pdata->gpio_reset, 1);
+ gpiod_set_value(pdata->reset_gpio, 0);
priv->promiscuous = false;
/* Wait until wakeup indication seen */
@@ -1446,8 +1445,7 @@ static u8 mcps_data_request(
command.pdata.data_req.src_addr_mode = src_addr_mode;
command.pdata.data_req.dst.mode = dst_address_mode;
if (dst_address_mode != MAC_MODE_NO_ADDR) {
- command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id);
- command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id);
+ put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id);
if (dst_address_mode == MAC_MODE_SHORT_ADDR) {
command.pdata.data_req.dst.address[0] = LS_BYTE(
dst_addr->short_address
@@ -1795,12 +1793,12 @@ static int ca8210_skb_rx(
}
hdr.source.mode = data_ind[0];
dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode);
- hdr.source.pan_id = *(u16 *)&data_ind[1];
+ hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1]));
dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id);
memcpy(&hdr.source.extended_addr, &data_ind[3], 8);
hdr.dest.mode = data_ind[11];
dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode);
- hdr.dest.pan_id = *(u16 *)&data_ind[12];
+ hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12]));
dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id);
memcpy(&hdr.dest.extended_addr, &data_ind[14], 8);
@@ -1927,7 +1925,7 @@ static int ca8210_skb_tx(
status = mcps_data_request(
header.source.mode,
header.dest.mode,
- header.dest.pan_id,
+ le16_to_cpu(header.dest.pan_id),
(union macaddr *)&header.dest.extended_addr,
skb->len - mac_len,
&skb->data[mac_len],
@@ -2737,9 +2735,10 @@ static int ca8210_config_extern_clk(
*/
static int ca8210_register_ext_clock(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
struct device_node *np = spi->dev.of_node;
struct ca8210_priv *priv = spi_get_drvdata(spi);
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
if (!np)
return -EFAULT;
@@ -2785,25 +2784,16 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
*/
static int ca8210_reset_init(struct spi_device *spi)
{
- int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
-
- pdata->gpio_reset = of_get_named_gpio(
- spi->dev.of_node,
- "reset-gpio",
- 0
- );
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
- ret = gpio_direction_output(pdata->gpio_reset, 1);
- if (ret < 0) {
- dev_crit(
- &spi->dev,
- "Reset GPIO %d did not set to output mode\n",
- pdata->gpio_reset
- );
+ pdata->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->reset_gpio)) {
+ dev_crit(dev, "Reset GPIO did not set to output mode\n");
+ return PTR_ERR(pdata->reset_gpio);
}
- return ret;
+ return 0;
}
/**
@@ -2814,23 +2804,19 @@ static int ca8210_reset_init(struct spi_device *spi)
*/
static int ca8210_interrupt_init(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
+ struct ca8210_platform_data *pdata = dev_get_platdata(dev);
int ret;
- struct ca8210_platform_data *pdata = spi->dev.platform_data;
- pdata->gpio_irq = of_get_named_gpio(
- spi->dev.of_node,
- "irq-gpio",
- 0
- );
+ pdata->irq_gpio = devm_gpiod_get(dev, "irq", GPIOD_IN);
+ if (IS_ERR(pdata->irq_gpio)) {
+ dev_crit(dev, "Could not retrieve IRQ GPIO\n");
+ return PTR_ERR(pdata->irq_gpio);
+ }
- pdata->irq_id = gpio_to_irq(pdata->gpio_irq);
+ pdata->irq_id = gpiod_to_irq(pdata->irq_gpio);
if (pdata->irq_id < 0) {
- dev_crit(
- &spi->dev,
- "Could not get irq for gpio pin %d\n",
- pdata->gpio_irq
- );
- gpio_free(pdata->gpio_irq);
+ dev_crit(dev, "Could not get irq for IRQ GPIO\n");
return pdata->irq_id;
}
@@ -2841,10 +2827,8 @@ static int ca8210_interrupt_init(struct spi_device *spi)
"ca8210-irq",
spi_get_drvdata(spi)
);
- if (ret) {
+ if (ret)
dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id);
- gpio_free(pdata->gpio_irq);
- }
return ret;
}
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index c8c23d9be961..41f212209993 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -28,20 +28,18 @@ enum ipa_resource_type {
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
- IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
- IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
- IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_UL_DL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
- .max_writes = 8,
- .max_reads = 0, /* no limit (hardware max) */
+ .max_writes = 12,
+ .max_reads = 13,
.max_reads_beats = 120,
},
};
@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 2, .max = 2,
},
},
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 025e0c19ec25..50de3ee204db 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -166,8 +166,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type);
void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
unsigned int len, bool success, bool mcast);
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
void ipvlan_link_setup(struct net_device *dev);
diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
index b4ef386bdb1b..7c017fe35522 100644
--- a/drivers/net/ipvlan/ipvlan_l3s.c
+++ b/drivers/net/ipvlan/ipvlan_l3s.c
@@ -226,5 +226,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port)
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
- dev->l3mdev_ops = NULL;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index da3a97a65507..0ed2fd833a5d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -3,6 +3,7 @@
*/
#include <linux/ethtool.h>
+#include <net/netdev_lock.h>
#include "ipvlan.h"
@@ -532,11 +533,13 @@ err:
return ret;
}
-int ipvlan_link_new(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct ipvl_dev *ipvlan = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
@@ -545,7 +548,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ phy_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!phy_dev)
return -ENODEV;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1afc4c47be73..edd13916831a 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -73,8 +73,8 @@ static void ipvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ipvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct ipvtap_dev *vlantap = netdev_priv(dev);
@@ -97,7 +97,7 @@ static int ipvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = ipvlan_link_new(src_net, dev, tb, data, extack);
+ err = ipvlan_link_new(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f1d68153987e..1fb6ce6843ad 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -54,6 +54,7 @@
#include <linux/percpu.h>
#include <linux/net_tstamp.h>
#include <net/net_namespace.h>
+#include <net/netdev_lock.h>
#include <linux/u64_stats_sync.h>
/* blackhole_netdev - a device used for dsts that are marked expired!
@@ -172,7 +173,7 @@ static void gen_lo_setup(struct net_device *dev,
dev->flags = IFF_LOOPBACK;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->lltx = true;
- dev->netns_local = true;
+ dev->netns_immutable = true;
netif_keep_dst(dev);
dev->hw_features = NETIF_F_GSO_SOFTWARE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1bc1e5993f56..3d315e30ee47 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -19,6 +19,7 @@
#include <net/gro_cells.h>
#include <net/macsec.h>
#include <net/dst_metadata.h>
+#include <net/netdev_lock.h>
#include <linux/phy.h>
#include <linux/byteorder/generic.h>
#include <linux/if_arp.h>
@@ -4141,11 +4142,14 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
static struct lock_class_key macsec_netdev_addr_lock_key;
-static int macsec_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macsec_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macsec_dev *macsec = macsec_priv(dev);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
rx_handler_func_t *rx_handler;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
struct net_device *real_dev;
@@ -4154,7 +4158,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
- real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev)
return -ENODEV;
if (real_dev->type != ARPHRD_ETHER)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fed4fe2a4748..d0dfa6bca6cc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -28,6 +28,7 @@
#include <linux/if_macvlan.h>
#include <linux/hash.h>
#include <linux/workqueue.h>
+#include <net/netdev_lock.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
@@ -1440,21 +1441,24 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
return 0;
}
-int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct macvlan_dev *vlan = netdev_priv(dev);
- struct macvlan_port *port;
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct net_device *lowerdev;
- int err;
- int macmode;
+ struct macvlan_port *port;
bool create = false;
+ int macmode;
+ int err;
if (!tb[IFLA_LINK])
return -EINVAL;
- lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK]));
if (lowerdev == NULL)
return -ENODEV;
@@ -1565,11 +1569,11 @@ destroy_macvlan_port:
}
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- return macvlan_common_newlink(src_net, dev, tb, data, extack);
+ return macvlan_common_newlink(dev, params, extack);
}
void macvlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 29a5929d48e5..b391a0f740a3 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -77,8 +77,8 @@ static void macvtap_update_features(struct tap_dev *tap,
netdev_update_features(vlan->dev);
}
-static int macvtap_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int macvtap_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct macvtap_dev *vlantap = netdev_priv(dev);
@@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev,
/* Don't put anything that may fail after macvlan_common_newlink
* because we can't undo what it does.
*/
- err = macvlan_common_newlink(src_net, dev, tb, data, extack);
+ err = macvlan_common_newlink(dev, params, extack);
if (err) {
netdev_rx_handler_unregister(dev);
return err;
diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig
index 15860d6ac39f..cf325ab0b1ef 100644
--- a/drivers/net/mctp/Kconfig
+++ b/drivers/net/mctp/Kconfig
@@ -47,6 +47,16 @@ config MCTP_TRANSPORT_I3C
A MCTP protocol network device is created for each I3C bus
having a "mctp-controller" devicetree property.
+config MCTP_TRANSPORT_USB
+ tristate "MCTP USB transport"
+ depends on USB
+ help
+ Provides a driver to access MCTP devices over USB transport,
+ defined by DMTF specification DSP0283.
+
+ MCTP-over-USB interfaces are peer-to-peer, so each interface
+ represents a physical connection to one remote MCTP endpoint.
+
endmenu
endif
diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile
index e1cb99ced54a..c36006849a1e 100644
--- a/drivers/net/mctp/Makefile
+++ b/drivers/net/mctp/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o
obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o
obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o
+obj-$(CONFIG_MCTP_TRANSPORT_USB) += mctp-usb.o
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index e3dcdeacc12c..f782d93f826e 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -537,7 +537,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
rc = __i2c_transfer(midev->adapter, &msg, 1);
/* on tx errors, the flow can no longer be considered valid */
- if (rc)
+ if (rc < 0)
mctp_i2c_invalidate_tx_flow(midev, skb);
break;
@@ -583,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
struct mctp_i2c_hdr *hdr;
struct mctp_hdr *mhdr;
u8 lldst, llsrc;
+ int rc;
if (len > MCTP_I2C_MAXMTU)
return -EMSGSIZE;
@@ -593,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
lldst = *((u8 *)daddr);
llsrc = *((u8 *)saddr);
+ rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr));
+ if (rc)
+ return rc;
+
skb_push(skb, sizeof(struct mctp_i2c_hdr));
skb_reset_mac_header(skb);
hdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index d247fe483c58..c678f79aa356 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -506,6 +506,14 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned int len)
{
struct mctp_i3c_internal_hdr *ihdr;
+ int rc;
+
+ if (!daddr || !saddr)
+ return -EINVAL;
+
+ rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr));
+ if (rc)
+ return rc;
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
new file mode 100644
index 000000000000..e8d4b01c3f34
--- /dev/null
+++ b/drivers/net/mctp/mctp-usb.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver.
+ *
+ * DSP0283 is available at:
+ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf
+ *
+ * Copyright (C) 2024-2025 Code Construct Pty Ltd
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+#include <linux/usb/mctp-usb.h>
+
+#include <net/mctp.h>
+#include <net/mctpdevice.h>
+#include <net/pkt_sched.h>
+
+#include <uapi/linux/if_arp.h>
+
+struct mctp_usb {
+ struct usb_device *usbdev;
+ struct usb_interface *intf;
+ bool stopped;
+
+ struct net_device *netdev;
+
+ u8 ep_in;
+ u8 ep_out;
+
+ struct urb *tx_urb;
+ struct urb *rx_urb;
+
+ struct delayed_work rx_retry_work;
+};
+
+static void mctp_usb_out_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ dev_dstats_tx_dropped(netdev);
+ break;
+ case 0:
+ dev_dstats_tx_add(netdev, skb->len);
+ netif_wake_queue(netdev);
+ consume_skb(skb);
+ return;
+ default:
+ netdev_dbg(netdev, "unexpected tx urb status: %d\n", status);
+ dev_dstats_tx_dropped(netdev);
+ }
+
+ kfree_skb(skb);
+}
+
+static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+ struct mctp_usb_hdr *hdr;
+ unsigned int plen;
+ struct urb *urb;
+ int rc;
+
+ plen = skb->len;
+
+ if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE)
+ goto err_drop;
+
+ rc = skb_cow_head(skb, sizeof(*hdr));
+ if (rc)
+ goto err_drop;
+
+ hdr = skb_push(skb, sizeof(*hdr));
+ if (!hdr)
+ goto err_drop;
+
+ hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID);
+ hdr->rsvd = 0;
+ hdr->len = plen + sizeof(*hdr);
+
+ urb = mctp_usb->tx_urb;
+
+ usb_fill_bulk_urb(urb, mctp_usb->usbdev,
+ usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out),
+ skb->data, skb->len,
+ mctp_usb_out_complete, skb);
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc)
+ goto err_drop;
+ else
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+
+err_drop:
+ dev_dstats_tx_dropped(dev);
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void mctp_usb_in_complete(struct urb *urb);
+
+/* If we fail to queue an in urb atomically (either due to skb allocation or
+ * urb submission), we will schedule a rx queue in nonatomic context
+ * after a delay, specified in jiffies
+ */
+static const unsigned long RX_RETRY_DELAY = HZ / 4;
+
+static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ int rc;
+
+ skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp);
+ if (!skb) {
+ rc = -ENOMEM;
+ goto err_retry;
+ }
+
+ usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev,
+ usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in),
+ skb->data, MCTP_USB_XFER_SIZE,
+ mctp_usb_in_complete, skb);
+
+ rc = usb_submit_urb(mctp_usb->rx_urb, gfp);
+ if (rc) {
+ netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc);
+ kfree_skb(skb);
+ if (rc == -ENOMEM)
+ goto err_retry;
+ }
+
+ return rc;
+
+err_retry:
+ schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY);
+ return rc;
+}
+
+static void mctp_usb_in_complete(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct net_device *netdev = skb->dev;
+ struct mctp_usb *mctp_usb = netdev_priv(netdev);
+ struct mctp_skb_cb *cb;
+ unsigned int len;
+ int status;
+
+ status = urb->status;
+
+ switch (status) {
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ kfree_skb(skb);
+ return;
+ case 0:
+ break;
+ default:
+ netdev_dbg(netdev, "unexpected rx urb status: %d\n", status);
+ kfree_skb(skb);
+ return;
+ }
+
+ len = urb->actual_length;
+ __skb_put(skb, len);
+
+ while (skb) {
+ struct sk_buff *skb2 = NULL;
+ struct mctp_usb_hdr *hdr;
+ u8 pkt_len; /* length of MCTP packet, no USB header */
+
+ hdr = skb_pull_data(skb, sizeof(*hdr));
+ if (!hdr)
+ break;
+
+ if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) {
+ netdev_dbg(netdev, "rx: invalid id %04x\n",
+ be16_to_cpu(hdr->id));
+ break;
+ }
+
+ if (hdr->len <
+ sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) {
+ netdev_dbg(netdev, "rx: short packet (hdr) %d\n",
+ hdr->len);
+ break;
+ }
+
+ /* we know we have at least sizeof(struct mctp_usb_hdr) here */
+ pkt_len = hdr->len - sizeof(struct mctp_usb_hdr);
+ if (pkt_len > skb->len) {
+ netdev_dbg(netdev,
+ "rx: short packet (xfer) %d, actual %d\n",
+ hdr->len, skb->len);
+ break;
+ }
+
+ if (pkt_len < skb->len) {
+ /* more packets may follow - clone to a new
+ * skb to use on the next iteration
+ */
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ if (!skb_pull(skb2, pkt_len)) {
+ kfree_skb(skb2);
+ skb2 = NULL;
+ }
+ }
+ skb_trim(skb, pkt_len);
+ }
+
+ dev_dstats_rx_add(netdev, skb->len);
+
+ skb->protocol = htons(ETH_P_MCTP);
+ skb_reset_network_header(skb);
+ cb = __mctp_cb(skb);
+ cb->halen = 0;
+ netif_rx(skb);
+
+ skb = skb2;
+ }
+
+ if (skb)
+ kfree_skb(skb);
+
+ mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC);
+}
+
+static void mctp_usb_rx_retry_work(struct work_struct *work)
+{
+ struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb,
+ rx_retry_work.work);
+
+ if (READ_ONCE(mctp_usb->stopped))
+ return;
+
+ mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_open(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ WRITE_ONCE(mctp_usb->stopped, false);
+
+ return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL);
+}
+
+static int mctp_usb_stop(struct net_device *dev)
+{
+ struct mctp_usb *mctp_usb = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* prevent RX submission retry */
+ WRITE_ONCE(mctp_usb->stopped, true);
+
+ usb_kill_urb(mctp_usb->rx_urb);
+ usb_kill_urb(mctp_usb->tx_urb);
+
+ cancel_delayed_work_sync(&mctp_usb->rx_retry_work);
+
+ return 0;
+}
+
+static const struct net_device_ops mctp_usb_netdev_ops = {
+ .ndo_start_xmit = mctp_usb_start_xmit,
+ .ndo_open = mctp_usb_open,
+ .ndo_stop = mctp_usb_stop,
+};
+
+static void mctp_usb_netdev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_MCTP;
+
+ dev->mtu = MCTP_USB_MTU_MIN;
+ dev->min_mtu = MCTP_USB_MTU_MIN;
+ dev->max_mtu = MCTP_USB_MTU_MAX;
+
+ dev->hard_header_len = sizeof(struct mctp_usb_hdr);
+ dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
+ dev->flags = IFF_NOARP;
+ dev->netdev_ops = &mctp_usb_netdev_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+}
+
+static int mctp_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_endpoint_descriptor *ep_in, *ep_out;
+ struct usb_host_interface *iface_desc;
+ struct net_device *netdev;
+ struct mctp_usb *dev;
+ int rc;
+
+ /* only one alternate */
+ iface_desc = intf->cur_altsetting;
+
+ rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL);
+ if (rc) {
+ dev_err(&intf->dev, "invalid endpoints on device?\n");
+ return rc;
+ }
+
+ netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM,
+ mctp_usb_netdev_setup);
+ if (!netdev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+ dev = netdev_priv(netdev);
+ dev->netdev = netdev;
+ dev->usbdev = usb_get_dev(interface_to_usbdev(intf));
+ dev->intf = intf;
+ usb_set_intfdata(intf, dev);
+
+ dev->ep_in = ep_in->bEndpointAddress;
+ dev->ep_out = ep_out->bEndpointAddress;
+
+ dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->tx_urb || !dev->rx_urb) {
+ rc = -ENOMEM;
+ goto err_free_urbs;
+ }
+
+ INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work);
+
+ rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB);
+ if (rc)
+ goto err_free_urbs;
+
+ return 0;
+
+err_free_urbs:
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ free_netdev(netdev);
+ return rc;
+}
+
+static void mctp_usb_disconnect(struct usb_interface *intf)
+{
+ struct mctp_usb *dev = usb_get_intfdata(intf);
+
+ mctp_unregister_netdev(dev->netdev);
+ usb_free_urb(dev->tx_urb);
+ usb_free_urb(dev->rx_urb);
+ usb_put_dev(dev->usbdev);
+ free_netdev(dev->netdev);
+}
+
+static const struct usb_device_id mctp_usb_devices[] = {
+ { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(usb, mctp_usb_devices);
+
+static struct usb_driver mctp_usb_driver = {
+ .name = "mctp-usb",
+ .id_table = mctp_usb_devices,
+ .probe = mctp_usb_probe,
+ .disconnect = mctp_usb_disconnect,
+};
+
+module_usb_driver(mctp_usb_driver)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
+MODULE_DESCRIPTION("MCTP USB transport");
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index da2001ea1f99..53e96bfab542 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -106,6 +106,62 @@ static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg,
return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val);
}
+static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id,
+ int reg)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int val = 0, ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ val = (smbus_data.byte & 0xff) << 8;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ val |= smbus_data.byte & 0xff;
+
+ return val;
+}
+
+static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ struct i2c_adapter *i2c = bus->priv;
+ union i2c_smbus_data smbus_data;
+ int ret;
+
+ if (!i2c_mii_valid_phy_id(phy_id))
+ return 0;
+
+ smbus_data.byte = (val & 0xff00) >> 8;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ smbus_data.byte = val & 0xff;
+
+ ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+ return ret < 0 ? ret : 0;
+}
+
/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
* instead via address 0x51, when SFP page is set to 0x03 and password to
* 0xffffffff.
@@ -378,13 +434,26 @@ static int i2c_mii_init_rollball(struct i2c_adapter *i2c)
return 0;
}
+static bool mdio_i2c_check_functionality(struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol)
+{
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ return true;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ protocol == MDIO_I2C_MARVELL_C22)
+ return true;
+
+ return false;
+}
+
struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
enum mdio_i2c_proto protocol)
{
struct mii_bus *mii;
int ret;
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
+ if (!mdio_i2c_check_functionality(i2c, protocol))
return ERR_PTR(-EINVAL);
mii = mdiobus_alloc();
@@ -395,6 +464,14 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
mii->parent = parent;
mii->priv = i2c;
+ /* Only use SMBus if we have no other choice */
+ if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) &&
+ !i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ mii->read = smbus_byte_mii_read_default_c22;
+ mii->write = smbus_byte_mii_write_default_c22;
+ return mii;
+ }
+
switch (protocol) {
case MDIO_I2C_ROLLBALL:
ret = i2c_mii_init_rollball(i2c);
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 54c8b9d5b5fc..5b50d9186f12 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -734,7 +734,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->lltx = true;
/* Don't allow failover devices to change network namespaces. */
- failover_dev->netns_local = true;
+ failover_dev->netns_immutable = true;
failover_dev->hw_features = FAILOVER_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 86ab4a42769a..4289ccd3e41b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -45,12 +45,12 @@ MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
#define MAX_PARAM_LENGTH 256
-#define MAX_USERDATA_ENTRY_LENGTH 256
-#define MAX_USERDATA_VALUE_LENGTH 200
+#define MAX_EXTRADATA_ENTRY_LEN 256
+#define MAX_EXTRADATA_VALUE_LEN 200
/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
-#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
- MAX_USERDATA_VALUE_LENGTH - 3)
-#define MAX_USERDATA_ITEMS 16
+#define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \
+ MAX_EXTRADATA_VALUE_LEN - 3)
+#define MAX_EXTRADATA_ITEMS 16
#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
@@ -97,13 +97,27 @@ struct netconsole_target_stats {
struct u64_stats_sync syncp;
};
+/* Features enabled in sysdata. Contrary to userdata, this data is populated by
+ * the kernel. The fields are designed as bitwise flags, allowing multiple
+ * features to be set in sysdata_fields.
+ */
+enum sysdata_feature {
+ /* Populate the CPU that sends the message */
+ SYSDATA_CPU_NR = BIT(0),
+ /* Populate the task name (as in current->comm) in sysdata */
+ SYSDATA_TASKNAME = BIT(1),
+ /* Kernel release/version as part of sysdata */
+ SYSDATA_RELEASE = BIT(2),
+};
+
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
* @group: Links us into the configfs subsystem hierarchy.
* @userdata_group: Links to the userdata configfs hierarchy
- * @userdata_complete: Cached, formatted string of append
- * @userdata_length: String length of userdata_complete
+ * @extradata_complete: Cached, formatted string of append
+ * @userdata_length: String length of usedata in extradata_complete.
+ * @sysdata_fields: Sysdata features enabled.
* @stats: Packet send stats for the target. Used for debugging.
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
@@ -123,20 +137,25 @@ struct netconsole_target_stats {
* remote_ip (read-write)
* local_mac (read-only)
* remote_mac (read-write)
+ * @buf: The buffer used to send the full msg to the network stack
*/
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
struct config_group group;
struct config_group userdata_group;
- char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ char extradata_complete[MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS];
size_t userdata_length;
+ /* bit-wise with sysdata_feature bits */
+ u32 sysdata_fields;
#endif
struct netconsole_target_stats stats;
bool enabled;
bool extended;
bool release;
struct netpoll np;
+ /* protected by target_list_lock */
+ char buf[MAX_PRINT_CHUNK];
};
#ifdef CONFIG_NETCONSOLE_DYNAMIC
@@ -396,6 +415,46 @@ static ssize_t transmit_errors_show(struct config_item *item, char *buf)
return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count);
}
+/* configfs helper to display if cpu_nr sysdata feature is enabled */
+static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", cpu_nr_enabled);
+}
+
+/* configfs helper to display if taskname sysdata feature is enabled */
+static ssize_t sysdata_taskname_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", taskname_enabled);
+}
+
+static ssize_t sysdata_release_enabled_show(struct config_item *item,
+ char *buf)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ mutex_unlock(&dynamic_netconsole_mutex);
+
+ return sysfs_emit(buf, "%d\n", release_enabled);
+}
+
/*
* This one is special -- targets created through the configfs interface
* are not enabled (and the corresponding netpoll activated) by default.
@@ -659,6 +718,28 @@ out_unlock:
return ret;
}
+/* Count number of entries we have in extradata.
+ * This is important because the extradata_complete only supports
+ * MAX_EXTRADATA_ITEMS entries. Before enabling any new {user,sys}data
+ * feature, number of entries needs to checked for available space.
+ */
+static size_t count_extradata_entries(struct netconsole_target *nt)
+{
+ size_t entries;
+
+ /* Userdata entries */
+ entries = list_count_nodes(&nt->userdata_group.cg_children);
+ /* Plus sysdata entries */
+ if (nt->sysdata_fields & SYSDATA_CPU_NR)
+ entries += 1;
+ if (nt->sysdata_fields & SYSDATA_TASKNAME)
+ entries += 1;
+ if (nt->sysdata_fields & SYSDATA_RELEASE)
+ entries += 1;
+
+ return entries;
+}
+
static ssize_t remote_mac_store(struct config_item *item, const char *buf,
size_t count)
{
@@ -675,7 +756,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
if (!mac_pton(buf, remote_mac))
goto out_unlock;
- if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n')
+ if (buf[MAC_ADDR_STR_LEN] && buf[MAC_ADDR_STR_LEN] != '\n')
goto out_unlock;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
@@ -687,7 +768,7 @@ out_unlock:
struct userdatum {
struct config_item item;
- char value[MAX_USERDATA_VALUE_LENGTH];
+ char value[MAX_EXTRADATA_VALUE_LEN];
};
static struct userdatum *to_userdatum(struct config_item *item)
@@ -724,13 +805,13 @@ static void update_userdata(struct netconsole_target *nt)
/* Clear the current string in case the last userdatum was deleted */
nt->userdata_length = 0;
- nt->userdata_complete[0] = 0;
+ nt->extradata_complete[0] = 0;
list_for_each(entry, &nt->userdata_group.cg_children) {
struct userdatum *udm_item;
struct config_item *item;
- if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS))
+ if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
break;
child_count++;
@@ -738,19 +819,19 @@ static void update_userdata(struct netconsole_target *nt)
udm_item = to_userdatum(item);
/* Skip userdata with no value set */
- if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ if (strnlen(udm_item->value, MAX_EXTRADATA_VALUE_LEN) == 0)
continue;
- /* This doesn't overflow userdata_complete since it will write
- * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ /* This doesn't overflow extradata_complete since it will write
+ * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
* checked to not exceed MAX items with child_count above
*/
- complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
- MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
+ complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
+ MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
item->ci_name, udm_item->value);
}
- nt->userdata_length = strnlen(nt->userdata_complete,
- sizeof(nt->userdata_complete));
+ nt->userdata_length = strnlen(nt->extradata_complete,
+ sizeof(nt->extradata_complete));
}
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
@@ -761,7 +842,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
struct userdata *ud;
ssize_t ret;
- if (count > MAX_USERDATA_VALUE_LENGTH)
+ if (count > MAX_EXTRADATA_VALUE_LEN)
return -EMSGSIZE;
mutex_lock(&dynamic_netconsole_mutex);
@@ -780,7 +861,132 @@ out_unlock:
return ret;
}
+/* disable_sysdata_feature - Disable sysdata feature and clean sysdata
+ * @nt: target that is disabling the feature
+ * @feature: feature being disabled
+ */
+static void disable_sysdata_feature(struct netconsole_target *nt,
+ enum sysdata_feature feature)
+{
+ nt->sysdata_fields &= ~feature;
+ nt->extradata_complete[nt->userdata_length] = 0;
+}
+
+static ssize_t sysdata_release_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool release_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &release_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_RELEASE);
+ if (release_enabled == curr)
+ goto unlock_ok;
+
+ if (release_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (release_enabled)
+ nt->sysdata_fields |= SYSDATA_RELEASE;
+ else
+ disable_sysdata_feature(nt, SYSDATA_RELEASE);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+static ssize_t sysdata_taskname_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool taskname_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &taskname_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME);
+ if (taskname_enabled == curr)
+ goto unlock_ok;
+
+ if (taskname_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (taskname_enabled)
+ nt->sysdata_fields |= SYSDATA_TASKNAME;
+ else
+ disable_sysdata_feature(nt, SYSDATA_TASKNAME);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+/* configfs helper to sysdata cpu_nr feature */
+static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item,
+ const char *buf, size_t count)
+{
+ struct netconsole_target *nt = to_target(item->ci_parent);
+ bool cpu_nr_enabled, curr;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &cpu_nr_enabled);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR);
+ if (cpu_nr_enabled == curr)
+ /* no change requested */
+ goto unlock_ok;
+
+ if (cpu_nr_enabled &&
+ count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) {
+ /* user wants the new feature, but there is no space in the
+ * buffer.
+ */
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (cpu_nr_enabled)
+ nt->sysdata_fields |= SYSDATA_CPU_NR;
+ else
+ /* This is special because extradata_complete might have
+ * remaining data from previous sysdata, and it needs to be
+ * cleaned.
+ */
+ disable_sysdata_feature(nt, SYSDATA_CPU_NR);
+
+unlock_ok:
+ ret = strnlen(buf, count);
+unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
CONFIGFS_ATTR(userdatum_, value);
+CONFIGFS_ATTR(sysdata_, cpu_nr_enabled);
+CONFIGFS_ATTR(sysdata_, taskname_enabled);
+CONFIGFS_ATTR(sysdata_, release_enabled);
static struct configfs_attribute *userdatum_attrs[] = {
&userdatum_attr_value,
@@ -808,15 +1014,13 @@ static struct config_item *userdatum_make_item(struct config_group *group,
struct netconsole_target *nt;
struct userdatum *udm;
struct userdata *ud;
- size_t child_count;
- if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ if (strlen(name) > MAX_EXTRADATA_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ud = to_userdata(&group->cg_item);
nt = userdata_to_target(ud);
- child_count = list_count_nodes(&nt->userdata_group.cg_children);
- if (child_count >= MAX_USERDATA_ITEMS)
+ if (count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS)
return ERR_PTR(-ENOSPC);
udm = kzalloc(sizeof(*udm), GFP_KERNEL);
@@ -842,6 +1046,9 @@ static void userdatum_drop(struct config_group *group, struct config_item *item)
}
static struct configfs_attribute *userdata_attrs[] = {
+ &sysdata_attr_cpu_nr_enabled,
+ &sysdata_attr_taskname_enabled,
+ &sysdata_attr_release_enabled,
NULL,
};
@@ -1017,6 +1224,63 @@ static void populate_configfs_item(struct netconsole_target *nt,
init_target_config_group(nt, target_name);
}
+static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset)
+{
+ /* Append cpu=%d at extradata_complete after userdata str */
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n",
+ raw_smp_processor_id());
+}
+
+static int sysdata_append_taskname(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n",
+ current->comm);
+}
+
+static int sysdata_append_release(struct netconsole_target *nt, int offset)
+{
+ return scnprintf(&nt->extradata_complete[offset],
+ MAX_EXTRADATA_ENTRY_LEN, " release=%s\n",
+ init_utsname()->release);
+}
+
+/*
+ * prepare_extradata - append sysdata at extradata_complete in runtime
+ * @nt: target to send message to
+ */
+static int prepare_extradata(struct netconsole_target *nt)
+{
+ u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME;
+ int extradata_len;
+
+ /* userdata was appended when configfs write helper was called
+ * by update_userdata().
+ */
+ extradata_len = nt->userdata_length;
+
+ if (!(nt->sysdata_fields & fields))
+ goto out;
+
+ if (nt->sysdata_fields & SYSDATA_CPU_NR)
+ extradata_len += sysdata_append_cpu_nr(nt, extradata_len);
+ if (nt->sysdata_fields & SYSDATA_TASKNAME)
+ extradata_len += sysdata_append_taskname(nt, extradata_len);
+ if (nt->sysdata_fields & SYSDATA_RELEASE)
+ extradata_len += sysdata_append_release(nt, extradata_len);
+
+ WARN_ON_ONCE(extradata_len >
+ MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS);
+
+out:
+ return extradata_len;
+}
+#else /* CONFIG_NETCONSOLE_DYNAMIC not set */
+static int prepare_extradata(struct netconsole_target *nt)
+{
+ return 0;
+}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
/* Handle network interface device notifications */
@@ -1117,29 +1381,28 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt,
int msg_len,
int release_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
- const char *userdata = NULL;
+ const char *extradata = NULL;
const char *release;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
+ extradata = nt->extradata_complete;
#endif
if (release_len) {
release = init_utsname()->release;
- scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
+ scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
} else {
- memcpy(buf, msg, msg_len);
+ memcpy(nt->buf, msg, msg_len);
}
- if (userdata)
- msg_len += scnprintf(&buf[msg_len],
+ if (extradata)
+ msg_len += scnprintf(&nt->buf[msg_len],
MAX_PRINT_CHUNK - msg_len,
- "%s", userdata);
+ "%s", extradata);
- send_udp(nt, buf, msg_len);
+ send_udp(nt, nt->buf, msg_len);
}
static void append_release(char *buf)
@@ -1150,28 +1413,27 @@ static void append_release(char *buf)
scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release);
}
-static void send_fragmented_body(struct netconsole_target *nt, char *buf,
+static void send_fragmented_body(struct netconsole_target *nt,
const char *msgbody, int header_len,
- int msgbody_len)
+ int msgbody_len, int extradata_len)
{
- const char *userdata = NULL;
+ int sent_extradata, preceding_bytes;
+ const char *extradata = NULL;
int body_len, offset = 0;
- int userdata_len = 0;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata = nt->userdata_complete;
- userdata_len = nt->userdata_length;
+ extradata = nt->extradata_complete;
#endif
/* body_len represents the number of bytes that will be sent. This is
* bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple
* packets
*/
- body_len = msgbody_len + userdata_len;
+ body_len = msgbody_len + extradata_len;
/* In each iteration of the while loop below, we send a packet
* containing the header and a portion of the body. The body is
- * composed of two parts: msgbody and userdata. We keep track of how
+ * composed of two parts: msgbody and extradata. We keep track of how
* many bytes have been sent so far using the offset variable, which
* ranges from 0 to the total length of the body.
*/
@@ -1181,7 +1443,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
int this_offset = 0;
int this_chunk = 0;
- this_header += scnprintf(buf + this_header,
+ this_header += scnprintf(nt->buf + this_header,
MAX_PRINT_CHUNK - this_header,
",ncfrag=%d/%d;", offset,
body_len);
@@ -1192,47 +1454,48 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
MAX_PRINT_CHUNK - this_header);
if (WARN_ON_ONCE(this_chunk <= 0))
return;
- memcpy(buf + this_header, msgbody + offset, this_chunk);
+ memcpy(nt->buf + this_header, msgbody + offset,
+ this_chunk);
this_offset += this_chunk;
}
/* msgbody was finally written, either in the previous
* messages and/or in the current buf. Time to write
- * the userdata.
+ * the extradata.
*/
msgbody_written |= offset + this_offset >= msgbody_len;
- /* Msg body is fully written and there is pending userdata to
- * write, append userdata in this chunk
+ /* Msg body is fully written and there is pending extradata to
+ * write, append extradata in this chunk
*/
if (msgbody_written && offset + this_offset < body_len) {
/* Track how much user data was already sent. First
* time here, sent_userdata is zero
*/
- int sent_userdata = (offset + this_offset) - msgbody_len;
+ sent_extradata = (offset + this_offset) - msgbody_len;
/* offset of bytes used in current buf */
- int preceding_bytes = this_chunk + this_header;
+ preceding_bytes = this_chunk + this_header;
- if (WARN_ON_ONCE(sent_userdata < 0))
+ if (WARN_ON_ONCE(sent_extradata < 0))
return;
- this_chunk = min(userdata_len - sent_userdata,
+ this_chunk = min(extradata_len - sent_extradata,
MAX_PRINT_CHUNK - preceding_bytes);
if (WARN_ON_ONCE(this_chunk < 0))
/* this_chunk could be zero if all the previous
* message used all the buffer. This is not a
- * problem, userdata will be sent in the next
+ * problem, extradata will be sent in the next
* iteration
*/
return;
- memcpy(buf + this_header + this_offset,
- userdata + sent_userdata,
+ memcpy(nt->buf + this_header + this_offset,
+ extradata + sent_extradata,
this_chunk);
this_offset += this_chunk;
}
- send_udp(nt, buf, this_header + this_offset);
+ send_udp(nt, nt->buf, this_header + this_offset);
offset += this_offset;
}
}
@@ -1240,9 +1503,9 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf,
static void send_msg_fragmented(struct netconsole_target *nt,
const char *msg,
int msg_len,
- int release_len)
+ int release_len,
+ int extradata_len)
{
- static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */
int header_len, msgbody_len;
const char *msgbody;
@@ -1260,16 +1523,17 @@ static void send_msg_fragmented(struct netconsole_target *nt,
* "ncfrag=<byte-offset>/<total-bytes>"
*/
if (release_len)
- append_release(buf);
+ append_release(nt->buf);
/* Copy the header into the buffer */
- memcpy(buf + release_len, msg, header_len);
+ memcpy(nt->buf + release_len, msg, header_len);
header_len += release_len;
/* for now on, the header will be persisted, and the msgbody
* will be replaced
*/
- send_fragmented_body(nt, buf, msgbody, header_len, msgbody_len);
+ send_fragmented_body(nt, msgbody, header_len, msgbody_len,
+ extradata_len);
}
/**
@@ -1285,20 +1549,19 @@ static void send_msg_fragmented(struct netconsole_target *nt,
static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
int msg_len)
{
- int userdata_len = 0;
int release_len = 0;
+ int extradata_len;
-#ifdef CONFIG_NETCONSOLE_DYNAMIC
- userdata_len = nt->userdata_length;
-#endif
+ extradata_len = prepare_extradata(nt);
if (nt->release)
release_len = strlen(init_utsname()->release) + 1;
- if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK)
+ if (msg_len + release_len + extradata_len <= MAX_PRINT_CHUNK)
return send_msg_no_fragmentation(nt, msg, msg_len, release_len);
- return send_msg_fragmented(nt, msg, msg_len, release_len);
+ return send_msg_fragmented(nt, msg, msg_len, release_len,
+ extradata_len);
}
static void write_ext_msg(struct console *con, const char *msg,
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 608953d4f98d..49537d3c4120 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -296,7 +296,8 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
return -EINVAL;
}
- if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ if (bpf->prog && !bpf->prog->aux->xdp_has_frags &&
+ ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 7ab358616e03..4d191a3293c7 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -107,10 +107,8 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct netdevsim *ns = netdev_priv(dev);
int err;
- netdev_lock(dev);
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
- netdev_unlock(dev);
if (err)
return err;
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 88187dd4eb2d..d88bdb9a1717 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -217,20 +217,9 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
ipsec->count--;
}
-static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
-{
- struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
- struct nsim_ipsec *ipsec = &ns->ipsec;
-
- ipsec->ok++;
-
- return true;
-}
-
static const struct xfrmdev_ops nsim_xfrmdev_ops = {
.xdo_dev_state_add = nsim_ipsec_add_sa,
.xdo_dev_state_delete = nsim_ipsec_del_sa,
- .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
};
bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 42f247cbdcee..b67af4651185 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -25,6 +25,7 @@
#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/net_shaper.h>
+#include <net/netdev_lock.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
@@ -87,7 +88,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
- napi_schedule(&rq->napi);
+ if (!hrtimer_active(&rq->napi_timer))
+ hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL);
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
@@ -114,7 +116,8 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
+ if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags &&
+ new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
WRITE_ONCE(dev->mtu, new_mtu);
@@ -401,7 +404,7 @@ static int nsim_init_napi(struct netdevsim *ns)
for (i = 0; i < dev->num_rx_queues; i++) {
rq = ns->rq[i];
- netif_napi_add_config(dev, &rq->napi, nsim_poll, i);
+ netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i);
}
for (i = 0; i < dev->num_rx_queues; i++) {
@@ -421,11 +424,27 @@ err_pp_destroy:
}
for (i = 0; i < dev->num_rx_queues; i++)
- __netif_napi_del(&ns->rq[i]->napi);
+ __netif_napi_del_locked(&ns->rq[i]->napi);
return err;
}
+static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer)
+{
+ struct nsim_rq *rq;
+
+ rq = container_of(timer, struct nsim_rq, napi_timer);
+ napi_schedule(&rq->napi);
+
+ return HRTIMER_NORESTART;
+}
+
+static void nsim_rq_timer_init(struct nsim_rq *rq)
+{
+ hrtimer_init(&rq->napi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->napi_timer.function = nsim_napi_schedule;
+}
+
static void nsim_enable_napi(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
@@ -435,7 +454,7 @@ static void nsim_enable_napi(struct netdevsim *ns)
struct nsim_rq *rq = ns->rq[i];
netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
- napi_enable(&rq->napi);
+ napi_enable_locked(&rq->napi);
}
}
@@ -444,6 +463,8 @@ static int nsim_open(struct net_device *dev)
struct netdevsim *ns = netdev_priv(dev);
int err;
+ netdev_assert_locked(dev);
+
err = nsim_init_napi(ns);
if (err)
return err;
@@ -461,8 +482,8 @@ static void nsim_del_napi(struct netdevsim *ns)
for (i = 0; i < dev->num_rx_queues; i++) {
struct nsim_rq *rq = ns->rq[i];
- napi_disable(&rq->napi);
- __netif_napi_del(&rq->napi);
+ napi_disable_locked(&rq->napi);
+ __netif_napi_del_locked(&rq->napi);
}
synchronize_net();
@@ -477,6 +498,8 @@ static int nsim_stop(struct net_device *dev)
struct netdevsim *ns = netdev_priv(dev);
struct netdevsim *peer;
+ netdev_assert_locked(dev);
+
netif_carrier_off(dev);
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -615,11 +638,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
return NULL;
skb_queue_head_init(&rq->skb_queue);
+ nsim_rq_timer_init(rq);
return rq;
}
static void nsim_queue_free(struct nsim_rq *rq)
{
+ hrtimer_cancel(&rq->napi_timer);
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
@@ -645,8 +670,11 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
if (ns->rq_reset_mode > 3)
return -EINVAL;
- if (ns->rq_reset_mode == 1)
+ if (ns->rq_reset_mode == 1) {
+ if (!netif_running(ns->netdev))
+ return -ENETDOWN;
return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi);
+ }
qmem->rq = nsim_queue_alloc();
if (!qmem->rq)
@@ -657,7 +685,8 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx)
goto err_free;
if (!ns->rq_reset_mode)
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
return 0;
@@ -674,7 +703,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem)
page_pool_destroy(qmem->pp);
if (qmem->rq) {
if (!ns->rq_reset_mode)
- netif_napi_del(&qmem->rq->napi);
+ netif_napi_del_locked(&qmem->rq->napi);
page_pool_destroy(qmem->rq->page_pool);
nsim_queue_free(qmem->rq);
}
@@ -686,9 +715,11 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
+ netdev_assert_locked(dev);
+
if (ns->rq_reset_mode == 1) {
ns->rq[idx]->page_pool = qmem->pp;
- napi_enable(&ns->rq[idx]->napi);
+ napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@@ -696,15 +727,17 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx)
* here we want to test various call orders.
*/
if (ns->rq_reset_mode == 2) {
- netif_napi_del(&ns->rq[idx]->napi);
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
+ netif_napi_del_locked(&ns->rq[idx]->napi);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
} else if (ns->rq_reset_mode == 3) {
- netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx);
- netif_napi_del(&ns->rq[idx]->napi);
+ netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll,
+ idx);
+ netif_napi_del_locked(&ns->rq[idx]->napi);
}
ns->rq[idx] = qmem->rq;
- napi_enable(&ns->rq[idx]->napi);
+ napi_enable_locked(&ns->rq[idx]->napi);
return 0;
}
@@ -714,7 +747,9 @@ static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx)
struct nsim_queue_mem *qmem = per_queue_mem;
struct netdevsim *ns = netdev_priv(dev);
- napi_disable(&ns->rq[idx]->napi);
+ netdev_assert_locked(dev);
+
+ napi_disable_locked(&ns->rq[idx]->napi);
if (ns->rq_reset_mode == 1) {
qmem->pp = ns->rq[idx]->page_pool;
@@ -753,12 +788,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
if (ret != 2)
return -EINVAL;
- rtnl_lock();
- if (!netif_running(ns->netdev)) {
- ret = -ENETDOWN;
- goto exit_unlock;
- }
-
+ netdev_lock(ns->netdev);
if (queue >= ns->netdev->real_num_rx_queues) {
ret = -EINVAL;
goto exit_unlock;
@@ -772,7 +802,7 @@ nsim_qreset_write(struct file *file, const char __user *data,
ret = count;
exit_unlock:
- rtnl_unlock();
+ netdev_unlock(ns->netdev);
return ret;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 96d54c08043d..665020d18f29 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -54,7 +54,6 @@ struct nsim_ipsec {
struct dentry *pfile;
u32 count;
u32 tx;
- u32 ok;
};
#define NSIM_MACSEC_MAX_SECY_COUNT 3
@@ -97,6 +96,7 @@ struct nsim_rq {
struct napi_struct napi;
struct sk_buff_head skb_queue;
struct page_pool *page_pool;
+ struct hrtimer napi_timer;
};
struct netdevsim {
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 1e1b00756be7..d072a7968f56 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -65,7 +65,6 @@ static void netkit_prep_forward(struct sk_buff *skb,
skb_reset_mac_header(skb);
if (!xnet)
return;
- ipvs_reset(skb);
skb_clear_tstamp(skb);
if (xnet_scrub)
netkit_xnet(skb);
@@ -327,17 +326,20 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
static struct rtnl_link_ops netkit_link_ops;
-static int netkit_new_link(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int netkit_new_link(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
- struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
- enum netkit_action policy_prim = NETKIT_PASS;
- enum netkit_action policy_peer = NETKIT_PASS;
+ struct net *peer_net = rtnl_newlink_peer_net(params);
enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
+ struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr;
+ enum netkit_action policy_prim = NETKIT_PASS;
+ enum netkit_action policy_peer = NETKIT_PASS;
+ struct nlattr **data = params->data;
enum netkit_mode mode = NETKIT_L3;
unsigned char ifname_assign_type;
+ struct nlattr **tb = params->tb;
u16 headroom = 0, tailroom = 0;
struct ifinfomsg *ifmp = NULL;
struct net_device *peer;
@@ -345,6 +347,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev,
struct netkit *nk;
int err;
+ tbp = tb;
if (data) {
if (data[IFLA_NETKIT_MODE])
mode = nla_get_u32(data[IFLA_NETKIT_MODE]);
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index e46f588cae7d..23b40e9eacbb 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -355,7 +355,6 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
mdio_device_get(mdio);
lynx->mdio = mdio;
lynx->pcs.ops = &lynx_pcs_phylink_ops;
- lynx->pcs.neg_mode = true;
lynx->pcs.poll = true;
for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++)
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 7d6261dee534..149ddf51d785 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -305,7 +305,6 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
mpcs->regmap = regmap;
mpcs->flags = flags;
mpcs->pcs.ops = &mtk_pcs_lynxi_ops;
- mpcs->pcs.neg_mode = true;
mpcs->pcs.poll = true;
mpcs->interface = PHY_INTERFACE_MODE_NA;
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 61944574d087..d79bb9b06cd2 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -268,17 +268,6 @@ static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
(MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val);
}
-static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
- const struct phylink_link_state *state)
-{
- if (phy_interface_mode_is_rgmii(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_RMII ||
- state->interface == PHY_INTERFACE_MODE_MII)
- return 1;
-
- return -EINVAL;
-}
-
static int miic_pre_init(struct phylink_pcs *pcs)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -307,7 +296,6 @@ static int miic_pre_init(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops miic_phylink_ops = {
- .pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
.pcs_pre_init = miic_pre_init,
@@ -361,7 +349,10 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
- miic_port->pcs.neg_mode = true;
+
+ phy_interface_set_rgmii(miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII, miic_port->pcs.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII, miic_port->pcs.supported_interfaces);
return &miic_port->pcs;
}
@@ -472,13 +463,10 @@ static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
- for_each_child_of_node(np, conv) {
+ for_each_available_child_of_node(np, conv) {
if (of_property_read_u32(conv, "reg", &port))
continue;
- if (!of_device_is_available(conv))
- continue;
-
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 1faa37f0e7b9..3d1bd5aac093 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -602,35 +602,21 @@ static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
__set_bit(compat->interface, interfaces);
}
-int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
+static int xpcs_switch_interface_mode(struct dw_xpcs *xpcs,
+ phy_interface_t interface)
{
- u16 mask, val;
- int ret;
-
- mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- DW_VR_MII_EEE_MULT_FACT_100NS;
+ int ret = 0;
- if (enable)
- val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
- DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
- DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
- FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
- mult_fact_100ns);
- else
- val = 0;
-
- ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
- val);
- if (ret < 0)
- return ret;
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
+ ret = txgbe_xpcs_switch_mode(xpcs, interface);
+ } else if (xpcs->interface != interface) {
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ xpcs->need_reset = true;
+ xpcs->interface = interface;
+ }
- return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
- DW_VR_MII_EEE_TRN_LPI,
- enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+ return ret;
}
-EXPORT_SYMBOL_GPL(xpcs_config_eee);
static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
{
@@ -638,6 +624,11 @@ static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface)
const struct dw_xpcs_compat *compat;
int ret;
+ ret = xpcs_switch_interface_mode(xpcs, interface);
+ if (ret)
+ dev_err(&xpcs->mdiodev->dev, "switch interface failed: %pe\n",
+ ERR_PTR(ret));
+
if (!xpcs->need_reset)
return;
@@ -829,10 +820,6 @@ static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return -ENODEV;
if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
- ret = txgbe_xpcs_switch_mode(xpcs, interface);
- if (ret)
- return ret;
-
/* Wangxun devices need backplane CL37 AN enabled for
* SGMII and 1000base-X
*/
@@ -1193,6 +1180,63 @@ static void xpcs_an_restart(struct phylink_pcs *pcs)
BMCR_ANRESTART);
}
+static int xpcs_config_eee(struct dw_xpcs *xpcs, bool enable)
+{
+ u16 mask, val;
+ int ret;
+
+ mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ DW_VR_MII_EEE_MULT_FACT_100NS;
+
+ if (enable)
+ val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN |
+ DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN |
+ DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL |
+ FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS,
+ xpcs->eee_mult_fact);
+ else
+ val = 0;
+
+ ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask,
+ val);
+ if (ret < 0)
+ return ret;
+
+ return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1,
+ DW_VR_MII_EEE_TRN_LPI,
+ enable ? DW_VR_MII_EEE_TRN_LPI : 0);
+}
+
+static void xpcs_disable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, false);
+}
+
+static void xpcs_enable_eee(struct phylink_pcs *pcs)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ xpcs_config_eee(xpcs, true);
+}
+
+/**
+ * xpcs_config_eee_mult_fact() - set the EEE clock multiplying factor
+ * @xpcs: pointer to a &struct dw_xpcs instance
+ * @mult_fact: the multiplying factor
+ *
+ * Configure the EEE clock multiplying factor. This value should be such that
+ * clk_eee_time_period * (mult_fact + 1) is within the range 80 to 120ns.
+ */
+void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact)
+{
+ xpcs->eee_mult_fact = mult_fact;
+}
+EXPORT_SYMBOL_GPL(xpcs_config_eee_mult_fact);
+
static int xpcs_read_ids(struct dw_xpcs *xpcs)
{
int ret;
@@ -1341,6 +1385,8 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_get_state = xpcs_get_state,
.pcs_an_restart = xpcs_an_restart,
.pcs_link_up = xpcs_link_up,
+ .pcs_disable_eee = xpcs_disable_eee,
+ .pcs_enable_eee = xpcs_enable_eee,
};
static int xpcs_identify(struct dw_xpcs *xpcs)
@@ -1374,7 +1420,6 @@ static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
mdio_device_get(mdiodev);
xpcs->mdiodev = mdiodev;
xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.neg_mode = true;
xpcs->pcs.poll = true;
return xpcs;
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index adc5a0b3c883..929fa238445e 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -55,23 +55,11 @@
/* Clause 37 Defines */
/* VR MII MMD registers offsets */
#define DW_VR_MII_DIG_CTRL1 0x8000
-#define DW_VR_MII_AN_CTRL 0x8001
-#define DW_VR_MII_AN_INTR_STS 0x8002
-/* EEE Mode Control Register */
-#define DW_VR_MII_EEE_MCTRL0 0x8006
-#define DW_VR_MII_EEE_MCTRL1 0x800b
-#define DW_VR_MII_DIG_CTRL2 0x80e1
-
-/* VR_MII_DIG_CTRL1 */
#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2)
#define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0)
-/* VR_MII_DIG_CTRL2 */
-#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
-#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
-
-/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL 0x8001
#define DW_VR_MII_AN_CTRL_8BIT BIT(8)
#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
@@ -81,7 +69,7 @@
#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
#define DW_VR_MII_AN_INTR_EN BIT(0)
-/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_INTR_STS 0x8002
#define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0)
#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
@@ -90,19 +78,22 @@
#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
-/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_MCTRL0 0x8006
#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
-
#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
-/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_MCTRL1 0x800b
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_VR_MII_DIG_CTRL2 0x80e1
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
+
#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \
static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma }
@@ -122,6 +113,7 @@ struct dw_xpcs {
struct phylink_pcs pcs;
phy_interface_t interface;
bool need_reset;
+ u8 eee_mult_fact;
};
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
index 68d0d9e92a22..f873a92d2445 100644
--- a/drivers/net/pfcp.c
+++ b/drivers/net/pfcp.c
@@ -184,15 +184,16 @@ static int pfcp_add_sock(struct pfcp_dev *pfcp)
return PTR_ERR_OR_ZERO(pfcp->sock);
}
-static int pfcp_newlink(struct net *net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int pfcp_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct pfcp_dev *pfcp = netdev_priv(dev);
struct pfcp_net *pn;
int err;
- pfcp->net = net;
+ pfcp->net = link_net;
err = pfcp_add_sock(pfcp);
if (err) {
@@ -206,7 +207,7 @@ static int pfcp_newlink(struct net *net, struct net_device *dev,
goto exit_del_pfcp_sock;
}
- pn = net_generic(net, pfcp_net_id);
+ pn = net_generic(link_net, pfcp_net_id);
list_add(&pfcp->list, &pn->pfcp_dev_list);
netdev_dbg(dev, "registered new PFCP interface\n");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 41c15a2c2037..d29f9f7fd2e1 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -328,7 +328,7 @@ config NXP_C45_TJA11XX_PHY
depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
- Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
+ Currently supports the TJA1103, TJA1104, TJA1120 and TJA1121 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index c8dac6e92278..23ce205ae91d 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -2,7 +2,8 @@
# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
- linkmode.o phy_link_topology.o
+ linkmode.o phy_link_topology.o \
+ phy_package.o phy_caps.o
mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_MDIO_DEVICE
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 6bb469429b9d..bd7a47a903ac 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -215,8 +215,11 @@ static int adin_resume(struct phy_device *phydev)
return adin_set_powerdown_mode(phydev, false);
}
-static int adin_set_loopback(struct phy_device *phydev, bool enable)
+static int adin_set_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
if (enable)
return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL,
BMCR_LOOPBACK);
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index dab3af80593f..bbbcc9736b00 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -328,10 +328,11 @@ static int aqr_firmware_load_fs(struct phy_device *phydev)
const char *fw_name;
int ret;
- ret = of_property_read_string(dev->of_node, "firmware-name",
- &fw_name);
- if (ret)
+ ret = device_property_read_string(dev, "firmware-name", &fw_name);
+ if (ret) {
+ phydev_err(phydev, "failed to read firmware-name: %d\n", ret);
return ret;
+ }
ret = request_firmware(&fw, fw_name, dev);
if (ret) {
diff --git a/drivers/net/phy/aquantia/aquantia_hwmon.c b/drivers/net/phy/aquantia/aquantia_hwmon.c
index 7b3c49c3bf49..1a714b56b765 100644
--- a/drivers/net/phy/aquantia/aquantia_hwmon.c
+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
@@ -172,33 +172,13 @@ static const struct hwmon_ops aqr_hwmon_ops = {
.write = aqr_hwmon_write,
};
-static u32 aqr_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_chip = {
- .type = hwmon_chip,
- .config = aqr_hwmon_chip_config,
-};
-
-static u32 aqr_hwmon_temp_config[] = {
- HWMON_T_INPUT |
- HWMON_T_MAX | HWMON_T_MIN |
- HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
- HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
- 0,
-};
-
-static const struct hwmon_channel_info aqr_hwmon_temp = {
- .type = hwmon_temp,
- .config = aqr_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
- &aqr_hwmon_chip,
- &aqr_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT |
+ HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+ HWMON_T_CRIT | HWMON_T_LCRIT |
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM),
NULL,
};
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index e42ace4e682a..08b1c9cc902b 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -50,6 +50,7 @@
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11)
#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10)
+#define MDIO_AN_VEND_PROV_EXC_PHYID_INFO BIT(6)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
@@ -333,6 +334,238 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
+static int aqr105_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Normal feature discovery */
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The AQR105 PHY misses to indicate the 2.5G and 5G modes, so add them
+ * here
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported);
+
+ /* The AQR105 PHY suppports both RJ45 and SFP+ interfaces */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int aqr105_setup_forced(struct phy_device *phydev)
+{
+ int vend = MDIO_AN_VEND_PROV_EXC_PHYID_INFO;
+ int ctrl10 = 0;
+ int adv = ADVERTISE_CSMA;
+ int ret;
+
+ switch (phydev->speed) {
+ case SPEED_100:
+ adv |= ADVERTISE_100FULL;
+ break;
+ case SPEED_1000:
+ adv |= ADVERTISE_NPAGE;
+ if (phydev->duplex == DUPLEX_FULL)
+ vend |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+ else
+ vend |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+ break;
+ case SPEED_2500:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+ break;
+ case SPEED_5000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ vend |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+ break;
+ case SPEED_10000:
+ adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV);
+ ctrl10 |= MDIO_AN_10GBT_CTRL_ADV10G;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, vend);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, ctrl10);
+ if (ret < 0)
+ return ret;
+
+ /* set by vendor driver, but should be on by default */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1,
+ MDIO_AN_CTRL1_XNP);
+ if (ret < 0)
+ return ret;
+
+ return genphy_c45_an_disable_aneg(phydev);
+}
+
+static int aqr105_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ u16 reg;
+ int ret;
+
+ ret = aqr_set_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return aqr105_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ /* Clause 45 has no standardized support for 1000BaseT, therefore
+ * use vendor registers for this mode.
+ */
+ reg = 0;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+
+ /* Handle the case when the 2.5G and 5G speeds are not advertised */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->advertising))
+ reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_1000BASET_HALF |
+ MDIO_AN_VEND_PROV_1000BASET_FULL |
+ MDIO_AN_VEND_PROV_2500BASET_FULL |
+ MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int aqr105_read_rate(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+ if (val < 0)
+ return val;
+
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+ case MDIO_AN_TX_VEND_STATUS1_10BASET:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+ phydev->speed = SPEED_5000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int aqr105_read_status(struct phy_device *phydev)
+{
+ int ret;
+ int val;
+
+ ret = aqr_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+ return 0;
+
+ /**
+ * The status register is not immediately correct on line side link up.
+ * Poll periodically until it reflects the correct ON state.
+ * Only return fail for read error, timeout defaults to OFF state.
+ */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_VEND_IF_STATUS, val,
+ (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) !=
+ MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF),
+ AQR107_OP_IN_PROG_SLEEP,
+ AQR107_OP_IN_PROG_TIMEOUT, false);
+ if (ret && ret != -ETIMEDOUT)
+ return ret;
+
+ switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+ phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
+ phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+ phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+ phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
+ default:
+ phydev->link = false;
+ phydev->interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ /* Read rate from vendor register */
+ return aqr105_read_rate(phydev);
+}
+
static int aqr107_read_rate(struct phy_device *phydev)
{
u32 config_reg;
@@ -911,10 +1144,13 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
.name = "Aquantia AQR105",
- .config_aneg = aqr_config_aneg,
+ .get_features = aqr105_get_features,
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr105_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr105_read_status,
.suspend = aqr107_suspend,
.resume = aqr107_resume,
},
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 208e8f561e06..eba8b5fb1365 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -597,7 +597,8 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- if (req->flags & PTP_PEROUT_PHASE)
+ /* Reject unsupported flags */
+ if (req->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 7969345f6b35..a8edf45fd733 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/phy.h>
+#include "phylib.h"
#include "bcm-phy-lib.h"
/* RDB per-port registers
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 22edb7e4c1a1..13e43fee1906 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,7 +16,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/pm_wakeup.h>
+#include <linux/device.h>
#include <linux/brcmphy.h>
#include <linux/of.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6599feca1967..14f361549638 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -31,6 +31,7 @@
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_MLEDCR 0x25
+#define MII_DP83822_LDCTRL 0x403
#define MII_DP83822_LEDCFG1 0x460
#define MII_DP83822_IOCTRL1 0x462
#define MII_DP83822_IOCTRL2 0x463
@@ -123,6 +124,9 @@
#define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0)
#define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0)
+/* LDCTRL bits */
+#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4)
+
/* IOCTRL2 bits */
#define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4)
#define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0)
@@ -197,6 +201,7 @@ struct dp83822_private {
bool set_gpio2_clk_out;
u32 gpio2_clk_out;
bool led_pin_enable[DP83822_MAX_LED_PINS];
+ int tx_amplitude_100base_tx_index;
};
static int dp83822_config_wol(struct phy_device *phydev,
@@ -522,6 +527,12 @@ static int dp83822_config_init(struct phy_device *phydev)
FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC,
dp83822->gpio2_clk_out));
+ if (dp83822->tx_amplitude_100base_tx_index >= 0)
+ phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL,
+ DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING,
+ dp83822->tx_amplitude_100base_tx_index));
+
err = dp83822_config_init_leds(phydev);
if (err)
return err;
@@ -720,6 +731,11 @@ static int dp83822_phy_reset(struct phy_device *phydev)
}
#ifdef CONFIG_OF_MDIO
+static const u32 tx_amplitude_100base_tx_gain[] = {
+ 80, 82, 83, 85, 87, 88, 90, 92,
+ 93, 95, 97, 98, 100, 102, 103, 105,
+};
+
static int dp83822_of_init_leds(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -780,6 +796,8 @@ static int dp83822_of_init(struct phy_device *phydev)
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
const char *of_val;
+ int i, ret;
+ u32 val;
/* Signal detection for the PHY is only enabled if the FX_EN and the
* SD_EN pins are strapped. Signal detection can only enabled if FX_EN
@@ -815,6 +833,25 @@ static int dp83822_of_init(struct phy_device *phydev)
dp83822->set_gpio2_clk_out = true;
}
+ ret = phy_get_tx_amplitude_gain(phydev, dev,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ &val);
+ if (!ret) {
+ for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) {
+ if (tx_amplitude_100base_tx_gain[i] == val) {
+ dp83822->tx_amplitude_100base_tx_index = i;
+ break;
+ }
+ }
+
+ if (dp83822->tx_amplitude_100base_tx_index < 0) {
+ phydev_err(phydev,
+ "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
return dp83822_of_init_leds(phydev);
}
@@ -893,6 +930,7 @@ static int dp8382x_probe(struct phy_device *phydev)
if (!dp83822)
return -ENOMEM;
+ dp83822->tx_amplitude_100base_tx_index = -1;
phydev->priv = dp83822;
return 0;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1451df430ac..063266cafe9c 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1009,8 +1009,11 @@ static void dp83867_link_change_notify(struct phy_device *phydev)
}
}
-static int dp83867_loopback(struct phy_device *phydev, bool enable)
+static int dp83867_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
enable ? BMCR_LOOPBACK : 0);
}
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index a42af9c168ec..23af1ac194fa 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -204,10 +204,191 @@ struct dp83td510_priv {
#define DP83TD510E_UNKN_030E 0x30e
#define DP83TD510E_030E_VAL 0x2520
+#define DP83TD510E_LEDS_CFG_1 0x460
+#define DP83TD510E_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
+#define DP83TD510E_LED_FN_MASK(idx) (0xf << ((idx) * 4))
+/* link OK */
+#define DP83TD510E_LED_MODE_LINK_OK 0x0
+/* TX/RX activity */
+#define DP83TD510E_LED_MODE_TX_RX_ACTIVITY 0x1
+/* TX activity */
+#define DP83TD510E_LED_MODE_TX_ACTIVITY 0x2
+/* RX activity */
+#define DP83TD510E_LED_MODE_RX_ACTIVITY 0x3
+/* LR */
+#define DP83TD510E_LED_MODE_LR 0x4
+/* SR */
+#define DP83TD510E_LED_MODE_SR 0x5
+/* LED SPEED: High for 10Base-T */
+#define DP83TD510E_LED_MODE_LED_SPEED 0x6
+/* Duplex mode */
+#define DP83TD510E_LED_MODE_DUPLEX 0x7
+/* link + blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_LINK_BLINK 0x8
+/* blink on activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_ACTIVITY 0x9
+/* blink on tx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_TX 0xa
+/* blink on rx activity with stretch option */
+#define DP83TD510E_LED_MODE_BLINK_RX 0xb
+/* link_lost */
+#define DP83TD510E_LED_MODE_LINK_LOST 0xc
+/* PRBS error: toggles on error */
+#define DP83TD510E_LED_MODE_PRBS_ERROR 0xd
+/* XMII TX/RX Error with stretch option */
+#define DP83TD510E_LED_MODE_XMII_ERR 0xe
+
+#define DP83TD510E_LED_COUNT 4
+
+#define DP83TD510E_LEDS_CFG_2 0x469
+#define DP83TD510E_LED_POLARITY(idx) BIT((idx) * 4 + 2)
+#define DP83TD510E_LED_DRV_VAL(idx) BIT((idx) * 4 + 1)
+#define DP83TD510E_LED_DRV_EN(idx) BIT((idx) * 4)
+
#define DP83TD510E_ALCD_STAT 0xa9f
#define DP83TD510E_ALCD_COMPLETE BIT(15)
#define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0)
+static int dp83td510_led_brightness_set(struct phy_device *phydev, u8 index,
+ enum led_brightness brightness)
+{
+ u32 val;
+
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ val = DP83TD510E_LED_DRV_EN(index);
+
+ if (brightness)
+ val |= DP83TD510E_LED_DRV_VAL(index);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_VAL(index) |
+ DP83TD510E_LED_DRV_EN(index), val);
+}
+
+static int dp83td510_led_mode(u8 index, unsigned long rules)
+{
+ if (index >= DP83TD510E_LED_COUNT)
+ return -EINVAL;
+
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return DP83TD510E_LED_MODE_LINK_OK;
+ case BIT(TRIGGER_NETDEV_LINK_10):
+ return DP83TD510E_LED_MODE_LED_SPEED;
+ case BIT(TRIGGER_NETDEV_FULL_DUPLEX):
+ return DP83TD510E_LED_MODE_DUPLEX;
+ case BIT(TRIGGER_NETDEV_TX):
+ return DP83TD510E_LED_MODE_TX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_TX_RX_ACTIVITY;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX):
+ return DP83TD510E_LED_MODE_LINK_BLINK;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83td510_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int ret;
+
+ ret = dp83td510_led_mode(index, rules);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dp83td510_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode, ret;
+
+ mode = dp83td510_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1,
+ DP83TD510E_LED_FN_MASK(index),
+ DP83TD510E_LED_FN(index, mode));
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_DRV_EN(index), 0);
+}
+
+static int dp83td510_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1);
+ if (val < 0)
+ return val;
+
+ val &= DP83TD510E_LED_FN_MASK(index);
+ val >>= index * 4;
+
+ switch (val) {
+ case DP83TD510E_LED_MODE_LINK_OK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ /* LED mode: LED SPEED (10BaseT1L indicator) */
+ case DP83TD510E_LED_MODE_LED_SPEED:
+ *rules = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case DP83TD510E_LED_MODE_DUPLEX:
+ *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX);
+ break;
+ case DP83TD510E_LED_MODE_TX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case DP83TD510E_LED_MODE_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_TX_RX_ACTIVITY:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case DP83TD510E_LED_MODE_LINK_BLINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int dp83td510_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ u16 polarity = DP83TD510E_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2,
+ DP83TD510E_LED_POLARITY(index), polarity);
+}
+
/**
* dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY.
* @phydev: Pointer to the phy_device structure.
@@ -712,6 +893,12 @@ static struct phy_driver dp83td510_driver[] = {
.get_phy_stats = dp83td510_get_phy_stats,
.update_stats = dp83td510_update_stats,
+ .led_brightness_set = dp83td510_led_brightness_set,
+ .led_hw_is_supported = dp83td510_led_hw_is_supported,
+ .led_hw_control_set = dp83td510_led_hw_control_set,
+ .led_hw_control_get = dp83td510_led_hw_control_get,
+ .led_polarity_set = dp83td510_led_polarity_set,
+
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 050f4537d140..7e76323409c4 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -4,12 +4,31 @@
*/
#include <linux/bitfield.h>
#include <linux/ethtool_netlink.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/random.h>
#include "open_alliance_helpers.h"
+/*
+ * DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link
+ * is active.
+ * DP83TG720S_POLL_NO_LINK_MIN - Minimum polling interval in milliseconds when
+ * the link is down.
+ * DP83TG720S_POLL_NO_LINK_MAX - Maximum polling interval in milliseconds when
+ * the link is down.
+ *
+ * These values are not documented or officially recommended by the vendor but
+ * were determined through empirical testing. They achieve a good balance in
+ * minimizing the number of reset retries while ensuring reliable link recovery
+ * within a reasonable timeframe.
+ */
+#define DP83TG720S_POLL_ACTIVE_LINK 1000
+#define DP83TG720S_POLL_NO_LINK_MIN 100
+#define DP83TG720S_POLL_NO_LINK_MAX 1000
+
#define DP83TG720S_PHY_ID 0x2000a284
/* MDIO_MMD_VEND2 registers */
@@ -371,6 +390,13 @@ static int dp83tg720_read_status(struct phy_device *phydev)
if (ret)
return ret;
+ /* Sleep 600ms for PHY stabilization post-reset.
+ * Empirically chosen value (not documented).
+ * Helps reduce reset bounces with link partners having similar
+ * issues.
+ */
+ msleep(600);
+
/* After HW reset we need to restore master/slave configuration.
* genphy_c45_pma_baset1_read_master_slave() call will be done
* by the dp83tg720_config_aneg() function.
@@ -498,6 +524,57 @@ static int dp83tg720_probe(struct phy_device *phydev)
return 0;
}
+/**
+ * dp83tg720_get_next_update_time - Determine the next update time for PHY
+ * state
+ * @phydev: Pointer to the phy_device structure
+ *
+ * This function addresses a limitation of the DP83TG720 PHY, which cannot
+ * reliably detect or report a stable link state. To recover from such
+ * scenarios, the PHY must be periodically reset when the link is down. However,
+ * if the link partner also runs Linux with the same driver, synchronized reset
+ * intervals can lead to a deadlock where the link never establishes due to
+ * simultaneous resets on both sides.
+ *
+ * To avoid this, the function implements randomized polling intervals when the
+ * link is down. It ensures that reset intervals are desynchronized by
+ * introducing a random delay between a configured minimum and maximum range.
+ * When the link is up, a fixed polling interval is used to minimize overhead.
+ *
+ * This mechanism guarantees that the link will reestablish within 10 seconds
+ * in the worst-case scenario.
+ *
+ * Return: Time (in jiffies) until the next update event for the PHY state
+ * machine.
+ */
+static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev)
+{
+ unsigned int next_time_jiffies;
+
+ if (phydev->link) {
+ /* When the link is up, use a fixed 1000ms interval
+ * (in jiffies)
+ */
+ next_time_jiffies =
+ msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK);
+ } else {
+ unsigned int min_jiffies, max_jiffies, rand_jiffies;
+
+ /* When the link is down, randomize interval between min/max
+ * (in jiffies)
+ */
+ min_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MIN);
+ max_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MAX);
+
+ rand_jiffies = min_jiffies +
+ get_random_u32_below(max_jiffies - min_jiffies + 1);
+ next_time_jiffies = rand_jiffies;
+ }
+
+ /* Ensure the polling time is at least one jiffy */
+ return max(next_time_jiffies, 1U);
+}
+
static struct phy_driver dp83tg720_driver[] = {
{
PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID),
@@ -516,6 +593,7 @@ static struct phy_driver dp83tg720_driver[] = {
.get_link_stats = dp83tg720_get_link_stats,
.get_phy_stats = dp83tg720_get_phy_stats,
.update_stats = dp83tg720_update_stats,
+ .get_next_update_time = dp83tg720_get_next_update_time,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index aef739c20ac4..ee7831a9849b 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/device/faux.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -40,7 +40,7 @@ struct fixed_phy {
struct gpio_desc *link_gpiod;
};
-static struct platform_device *pdev;
+static struct faux_device *fdev;
static struct fixed_mdio_bus platform_fmb = {
.phys = LIST_HEAD_INIT(platform_fmb.phys),
};
@@ -337,9 +337,9 @@ static int __init fixed_mdio_bus_init(void)
struct fixed_mdio_bus *fmb = &platform_fmb;
int ret;
- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ fdev = faux_device_create("Fixed MDIO bus", NULL, NULL);
+ if (!fdev)
+ return -ENODEV;
fmb->mii_bus = mdiobus_alloc();
if (fmb->mii_bus == NULL) {
@@ -350,7 +350,7 @@ static int __init fixed_mdio_bus_init(void)
snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
fmb->mii_bus->name = "Fixed MDIO Bus";
fmb->mii_bus->priv = fmb;
- fmb->mii_bus->parent = &pdev->dev;
+ fmb->mii_bus->parent = &fdev->dev;
fmb->mii_bus->read = &fixed_mdio_read;
fmb->mii_bus->write = &fixed_mdio_write;
fmb->mii_bus->phy_mask = ~0;
@@ -364,7 +364,7 @@ static int __init fixed_mdio_bus_init(void)
err_mdiobus_alloc:
mdiobus_free(fmb->mii_bus);
err_mdiobus_reg:
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
return ret;
}
module_init(fixed_mdio_bus_init);
@@ -376,7 +376,7 @@ static void __exit fixed_mdio_bus_exit(void)
mdiobus_unregister(fmb->mii_bus);
mdiobus_free(fmb->mii_bus);
- platform_device_unregister(pdev);
+ faux_device_destroy(fdev);
list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
list_del(&fp->node);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index a3996471a1c9..23e1f0521f54 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -7,30 +7,34 @@
* Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
+#include <linux/hwmon.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <linux/phy.h>
-#include <linux/hwmon.h>
-#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
-#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
-#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
-#define MDIO_MMD_AN_MV_STAT 32769
-#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
-#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
-#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
-#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
-#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT 32769
+#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
+#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000
+#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000
+#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
+#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
-#define MDIO_MMD_AN_MV_STAT2 32794
-#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
-#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
-#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
-#define MDIO_MMD_PCS_MV_INT_EN 32784
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
-#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
-#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+#define MDIO_MMD_PCS_MV_RESET_CTRL 32768
+#define MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE 0x8
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
@@ -40,6 +44,22 @@
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL 32790
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK GENMASK(7, 4)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK GENMASK(3, 0)
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK 0x0 /* Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX 0x1 /* Link established, blink for rx or tx activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1 0x2 /* Blink 3x for 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX_ON 0x3 /* Receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX 0x4 /* Blink on receive or transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX 0x5 /* Transmit activity */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_COPPER 0x6 /* Copper Link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON 0x7 /* 1000BT1 link established */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_OFF 0x8 /* Force off */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_ON 0x9 /* Force on */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_HIGHZ 0xa /* Force Hi-Z */
+#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_BLINK 0xb /* Force blink */
+
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
@@ -60,11 +80,11 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000
-#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
-#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
+#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
@@ -72,7 +92,7 @@
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
-#define MDIO_MMD_PCS_MV_RX_STAT 33328
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
#define MDIO_MMD_PCS_MV_TDR_RESET 65226
#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
@@ -95,8 +115,12 @@
#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+#define MV88Q2XXX_LED_INDEX_TX_ENABLE 0
+#define MV88Q2XXX_LED_INDEX_GPIO 1
+
struct mv88q2xxx_priv {
bool enable_temp;
+ bool enable_led0;
};
struct mmd_val {
@@ -460,6 +484,9 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ int ret;
+
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
@@ -469,10 +496,31 @@ static int mv88q2xxx_config_init(struct phy_device *phydev)
/* Configure interrupt with default settings, output is driven low for
* active interrupt and high for inactive.
*/
- if (phy_interrupt_is_valid(phydev))
- return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
- MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (phy_interrupt_is_valid(phydev)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */
+ if (priv->enable_led0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RESET_CTRL,
+ MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable temperature sense */
+ if (priv->enable_temp) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -717,16 +765,10 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
struct mv88q2xxx_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
struct device *hwmon;
- char *hwmon_name;
priv->enable_temp = true;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
- hwmon = devm_hwmon_device_register_with_info(dev,
- hwmon_name,
- phydev,
+ hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&mv88q2xxx_hwmon_chip_info,
NULL);
@@ -740,6 +782,49 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
}
#endif
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct mv88q2xxx_priv *priv = phydev->priv;
+ struct device_node *leds;
+ int ret = 0;
+ u32 index;
+
+ if (!node)
+ return 0;
+
+ leds = of_get_child_by_name(node, "leds");
+ if (!leds)
+ return 0;
+
+ for_each_available_child_of_node_scoped(leds, led) {
+ ret = of_property_read_u32(led, "reg", &index);
+ if (ret)
+ goto exit;
+
+ if (index > MV88Q2XXX_LED_INDEX_GPIO) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ priv->enable_led0 = true;
+ }
+
+exit:
+ of_node_put(leds);
+
+ return ret;
+}
+
+#else
+static int mv88q2xxx_leds_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int mv88q2xxx_probe(struct phy_device *phydev)
{
struct mv88q2xxx_priv *priv;
@@ -750,6 +835,21 @@ static int mv88q2xxx_probe(struct phy_device *phydev)
phydev->priv = priv;
+ return 0;
+}
+
+static int mv88q222x_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q2xxx_probe(phydev);
+ if (ret)
+ return ret;
+
+ ret = mv88q2xxx_leds_probe(phydev);
+ if (ret)
+ return ret;
+
return mv88q2xxx_hwmon_probe(phydev);
}
@@ -817,18 +917,6 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
static int mv88q222x_config_init(struct phy_device *phydev)
{
- struct mv88q2xxx_priv *priv = phydev->priv;
- int ret;
-
- /* Enable temperature sense */
- if (priv->enable_temp) {
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2,
- MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
- if (ret < 0)
- return ret;
- }
-
if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
return mv88q222x_revb0_config_init(phydev);
else
@@ -918,11 +1006,104 @@ static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int mv88q2xxx_led_mode(u8 index, unsigned long rules)
+{
+ switch (rules) {
+ case BIT(TRIGGER_NETDEV_LINK):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK;
+ case BIT(TRIGGER_NETDEV_LINK_1000):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON;
+ case BIT(TRIGGER_NETDEV_TX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX;
+ case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX;
+ case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX):
+ return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ return 0;
+}
+
+static int mv88q2xxx_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ int mode;
+
+ mode = mv88q2xxx_led_mode(index, rules);
+ if (mode < 0)
+ return mode;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK,
+ mode));
+ else
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL,
+ MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK,
+ mode));
+}
+
+static int mv88q2xxx_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_LED_FUNC_CTRL);
+ if (val < 0)
+ return val;
+
+ if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE)
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, val);
+ else
+ val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, val);
+
+ switch (val) {
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK:
+ *rules = BIT(TRIGGER_NETDEV_LINK);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON:
+ *rules = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX:
+ *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) |
+ BIT(TRIGGER_NETDEV_RX);
+ break;
+ default:
+ *rules = 0;
+ break;
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2110",
+ .probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.config_init = mv88q2110_config_init,
@@ -937,7 +1118,7 @@ static struct phy_driver mv88q2xxx_driver[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
- .probe = mv88q2xxx_probe,
+ .probe = mv88q222x_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
@@ -953,6 +1134,9 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
.suspend = mv88q2xxx_suspend,
.resume = mv88q2xxx_resume,
+ .led_hw_is_supported = mv88q2xxx_led_hw_is_supported,
+ .led_hw_control_set = mv88q2xxx_led_hw_control_set,
+ .led_hw_control_get = mv88q2xxx_led_hw_control_get,
},
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 44e1927de499..623292948fa7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -2131,52 +2131,52 @@ static void marvell_get_stats_simple(struct phy_device *phydev,
data[i] = marvell_get_stat_simple(phydev, i);
}
-static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+static int m88e1510_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ u16 bmcr_ctl, mscr2_ctl = 0;
int err;
- if (enable) {
- u16 bmcr_ctl, mscr2_ctl = 0;
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
- bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
-
- err = phy_write(phydev, MII_BMCR, bmcr_ctl);
- if (err < 0)
- return err;
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
- if (phydev->speed == SPEED_1000)
- mscr2_ctl = BMCR_SPEED1000;
- else if (phydev->speed == SPEED_100)
- mscr2_ctl = BMCR_SPEED100;
+ bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
- err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1510_MSCR_2, BMCR_SPEED1000 |
- BMCR_SPEED100, mscr2_ctl);
- if (err < 0)
- return err;
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
- /* Need soft reset to have speed configuration takes effect */
- err = genphy_soft_reset(phydev);
- if (err < 0)
- return err;
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
- BMCR_LOOPBACK);
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
- if (!err) {
- /* It takes some time for PHY device to switch
- * into/out-of loopback mode.
- */
- msleep(1000);
- }
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
return err;
- } else {
- err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
- if (err < 0)
- return err;
- return phy_config_aneg(phydev);
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+
+ if (!err) {
+ /*
+ * It takes some time for PHY device to switch into loopback
+ * mode.
+ */
+ msleep(1000);
}
+ return err;
}
static int marvell_vct5_wait_complete(struct phy_device *phydev)
@@ -3124,33 +3124,13 @@ static umode_t marvell_hwmon_is_visible(const void *data,
}
}
-static u32 marvell_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_chip = {
- .type = hwmon_chip,
- .config = marvell_hwmon_chip_config,
-};
-
/* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not
* defined for all PHYs, because the hwmon code checks whether the attributes
* exists via the .is_visible method
*/
-static u32 marvell_hwmon_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info marvell_hwmon_temp = {
- .type = hwmon_temp,
- .config = marvell_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const marvell_hwmon_info[] = {
- &marvell_hwmon_chip,
- &marvell_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM),
NULL
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 623bdb8466b8..5354c8895163 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -230,29 +230,9 @@ static const struct hwmon_ops mv3310_hwmon_ops = {
.read = mv3310_hwmon_read,
};
-static u32 mv3310_hwmon_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_chip = {
- .type = hwmon_chip,
- .config = mv3310_hwmon_chip_config,
-};
-
-static u32 mv3310_hwmon_temp_config[] = {
- HWMON_T_INPUT,
- 0,
-};
-
-static const struct hwmon_channel_info mv3310_hwmon_temp = {
- .type = hwmon_temp,
- .config = mv3310_hwmon_temp_config,
-};
-
static const struct hwmon_channel_info * const mv3310_hwmon_info[] = {
- &mv3310_hwmon_chip,
- &mv3310_hwmon_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
NULL,
};
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 7e2f10182c0c..ede596c1a69d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -551,6 +551,8 @@ static int mdiobus_create_device(struct mii_bus *bus,
static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
{
struct phy_device *phydev = ERR_PTR(-ENODEV);
+ struct fwnode_handle *fwnode;
+ char node_name[16];
int err;
phydev = get_phy_device(bus, addr, c45);
@@ -562,6 +564,18 @@ static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
*/
of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+ /* Search for a swnode for the phy in the swnode hierarchy of the bus.
+ * If there is no swnode for the phy provided, just ignore it.
+ */
+ if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) {
+ snprintf(node_name, sizeof(node_name), "ethernet-phy@%d",
+ addr);
+ fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev),
+ node_name);
+ if (fwnode)
+ device_set_node(&phydev->mdio.dev, fwnode);
+ }
+
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c
index bdf99b327029..175cf5239bba 100644
--- a/drivers/net/phy/mediatek/mtk-ge-soc.c
+++ b/drivers/net/phy/mediatek/mtk-ge-soc.c
@@ -8,6 +8,7 @@
#include <linux/phy.h>
#include <linux/regmap.h>
+#include "../phylib.h"
#include "mtk.h"
#define MTK_GPHY_ID_MT7981 0x03a29461
@@ -24,7 +25,107 @@
#define MTK_PHY_SMI_DET_ON_THRESH_MASK GENMASK(13, 8)
#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x0, node_addr = 0x7, data_addr = 0x15 */
+/* NormMseLoThresh */
+#define NORMAL_MSE_LO_THRESH_MASK GENMASK(15, 8)
+
+/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */
+/* RemAckCntLimitCtrl */
+#define REMOTE_ACK_COUNT_LIMIT_CTRL_MASK GENMASK(2, 1)
+
+/* ch_addr = 0x1, node_addr = 0xd, data_addr = 0x20 */
+/* VcoSlicerThreshBitsHigh */
+#define VCO_SLICER_THRESH_HIGH_MASK GENMASK(23, 0)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x0 */
+/* DfeTailEnableVgaThresh1000 */
+#define DFE_TAIL_EANBLE_VGA_TRHESH_1000 GENMASK(5, 1)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x1 */
+/* MrvlTrFix100Kp */
+#define MRVL_TR_FIX_100KP_MASK GENMASK(22, 20)
+/* MrvlTrFix100Kf */
+#define MRVL_TR_FIX_100KF_MASK GENMASK(19, 17)
+/* MrvlTrFix1000Kp */
+#define MRVL_TR_FIX_1000KP_MASK GENMASK(16, 14)
+/* MrvlTrFix1000Kf */
+#define MRVL_TR_FIX_1000KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x12 */
+/* VgaDecRate */
+#define VGA_DECIMATION_RATE_MASK GENMASK(8, 5)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+/* SlvDSPreadyTime */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+/* MasDSPreadyTime */
+#define MASTER_DSP_READY_TIME_MASK GENMASK(14, 7)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x18 */
+/* EnabRandUpdTrig */
+#define ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER BIT(8)
+
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x20 */
+/* ResetSyncOffset */
+#define RESET_SYNC_OFFSET_MASK GENMASK(11, 8)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x0 */
+/* FfeUpdGainForceVal */
+#define FFE_UPDATE_GAIN_FORCE_VAL_MASK GENMASK(9, 7)
+/* FfeUpdGainForce */
+#define FFE_UPDATE_GAIN_FORCE BIT(6)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x3 */
+/* TrFreeze */
+#define TR_FREEZE_MASK GENMASK(11, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x6 */
+/* SS: Steady-state, KP: Proportional Gain */
+/* SSTrKp100 */
+#define SS_TR_KP100_MASK GENMASK(21, 19)
+/* SSTrKf100 */
+#define SS_TR_KF100_MASK GENMASK(18, 16)
+/* SSTrKp1000Mas */
+#define SS_TR_KP1000_MASTER_MASK GENMASK(15, 13)
+/* SSTrKf1000Mas */
+#define SS_TR_KF1000_MASTER_MASK GENMASK(12, 10)
+/* SSTrKp1000Slv */
+#define SS_TR_KP1000_SLAVE_MASK GENMASK(9, 7)
+/* SSTrKf1000Slv */
+#define SS_TR_KF1000_SLAVE_MASK GENMASK(6, 4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x8 */
+/* clear this bit if wanna select from AFE */
+/* Regsigdet_sel_1000 */
+#define EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE BIT(4)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xd */
+/* RegEEE_st2TrKf1000 */
+#define EEE1000_STAGE2_TR_KF_MASK GENMASK(13, 11)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xf */
+/* RegEEE_slv_waketr_timer_tar */
+#define SLAVE_WAKETR_TIMER_MASK GENMASK(20, 11)
+/* RegEEE_slv_remtx_timer_tar */
+#define SLAVE_REMTX_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x10 */
+/* RegEEE_slv_wake_int_timer_tar */
+#define SLAVE_WAKEINT_TIMER_MASK GENMASK(10, 1)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x14 */
+/* RegEEE_trfreeze_timer2 */
+#define TR_FREEZE_TIMER2_MASK GENMASK(9, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x1c */
+/* RegEEE100Stg1_tar */
+#define EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK GENMASK(8, 0)
+
+/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x25 */
+/* REGEEE_wake_slv_tr_wait_dfesigdet_en */
+#define WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN BIT(11)
#define ANALOG_INTERNAL_OPERATION_MAX_US 20
#define TXRESERVE_MIN 0
@@ -701,40 +802,36 @@ restore:
static void mt798x_phy_common_finetune(struct phy_device *phydev)
{
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */
- __phy_write(phydev, 0x11, 0xc71);
- __phy_write(phydev, 0x12, 0xc);
- __phy_write(phydev, 0x10, 0x8fae);
-
- /* EnabRandUpdTrig = 1 */
- __phy_write(phydev, 0x11, 0x2f00);
- __phy_write(phydev, 0x12, 0xe);
- __phy_write(phydev, 0x10, 0x8fb0);
-
- /* NormMseLoThresh = 85 */
- __phy_write(phydev, 0x11, 0x55a0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x83aa);
-
- /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */
- __phy_write(phydev, 0x11, 0x240);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9680);
-
- /* TrFreeze = 0 (mt7988 default) */
- __phy_write(phydev, 0x11, 0x0);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9686);
-
- /* SSTrKp100 = 5 */
- /* SSTrKf100 = 6 */
- /* SSTrKp1000Mas = 5 */
- /* SSTrKf1000Mas = 6 */
- /* SSTrKp1000Slv = 5 */
- /* SSTrKf1000Slv = 6 */
- __phy_write(phydev, 0x11, 0xbaef);
- __phy_write(phydev, 0x12, 0x2e);
- __phy_write(phydev, 0x10, 0x968c);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x17,
+ SLAVE_DSP_READY_TIME_MASK | MASTER_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x18) |
+ FIELD_PREP(MASTER_DSP_READY_TIME_MASK, 0x18));
+
+ __mtk_tr_set_bits(phydev, 0x1, 0xf, 0x18,
+ ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER);
+
+ __mtk_tr_modify(phydev, 0x0, 0x7, 0x15,
+ NORMAL_MSE_LO_THRESH_MASK,
+ FIELD_PREP(NORMAL_MSE_LO_THRESH_MASK, 0x55));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x0,
+ FFE_UPDATE_GAIN_FORCE_VAL_MASK,
+ FIELD_PREP(FFE_UPDATE_GAIN_FORCE_VAL_MASK, 0x4) |
+ FFE_UPDATE_GAIN_FORCE);
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x3, TR_FREEZE_MASK);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x6,
+ SS_TR_KP100_MASK | SS_TR_KF100_MASK |
+ SS_TR_KP1000_MASTER_MASK | SS_TR_KF1000_MASTER_MASK |
+ SS_TR_KP1000_SLAVE_MASK | SS_TR_KF1000_SLAVE_MASK,
+ FIELD_PREP(SS_TR_KP100_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF100_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_MASTER_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_MASTER_MASK, 0x6) |
+ FIELD_PREP(SS_TR_KP1000_SLAVE_MASK, 0x5) |
+ FIELD_PREP(SS_TR_KF1000_SLAVE_MASK, 0x6));
+
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
}
@@ -757,27 +854,29 @@ static void mt7981_phy_finetune(struct phy_device *phydev)
}
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 6 */
- __phy_write(phydev, 0x11, 0x600);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x6));
- /* VgaDecRate = 1 */
- __phy_write(phydev, 0x11, 0x4c2a);
- __phy_write(phydev, 0x12, 0x3e);
- __phy_write(phydev, 0x10, 0x8fa4);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x12,
+ VGA_DECIMATION_RATE_MASK,
+ FIELD_PREP(VGA_DECIMATION_RATE_MASK, 0x1));
/* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2,
* MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2
*/
- __phy_write(phydev, 0x11, 0xd10a);
- __phy_write(phydev, 0x12, 0x34);
- __phy_write(phydev, 0x10, 0x8f82);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x2) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x3) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x2));
/* VcoSlicerThreshBitsHigh */
- __phy_write(phydev, 0x11, 0x5555);
- __phy_write(phydev, 0x12, 0x55);
- __phy_write(phydev, 0x10, 0x8ec0);
+ __mtk_tr_modify(phydev, 0x1, 0xd, 0x20,
+ VCO_SLICER_THRESH_HIGH_MASK,
+ FIELD_PREP(VCO_SLICER_THRESH_HIGH_MASK, 0x555555));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */
@@ -829,25 +928,23 @@ static void mt7988_phy_finetune(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* ResetSyncOffset = 5 */
- __phy_write(phydev, 0x11, 0x500);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8fc0);
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x20,
+ RESET_SYNC_OFFSET_MASK,
+ FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x5));
/* VgaDecRate is 1 at default on mt7988 */
- /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7,
- * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7
- */
- __phy_write(phydev, 0x11, 0xb90a);
- __phy_write(phydev, 0x12, 0x6f);
- __phy_write(phydev, 0x10, 0x8f82);
-
- /* RemAckCntLimitCtrl = 1 */
- __phy_write(phydev, 0x11, 0xfbba);
- __phy_write(phydev, 0x12, 0xc3);
- __phy_write(phydev, 0x10, 0x87f8);
-
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x1,
+ MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK |
+ MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK,
+ FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x7) |
+ FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x6) |
+ FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x7));
+
+ __mtk_tr_modify(phydev, 0x0, 0xf, 0x3c,
+ REMOTE_ACK_COUNT_LIMIT_CTRL_MASK,
+ FIELD_PREP(REMOTE_ACK_COUNT_LIMIT_CTRL_MASK, 0x1));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
/* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */
@@ -923,45 +1020,37 @@ static void mt798x_phy_eee(struct phy_device *phydev)
MTK_PHY_TR_READY_SKIP_AFE_WAKEUP);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- /* Regsigdet_sel_1000 = 0 */
- __phy_write(phydev, 0x11, 0xb);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x9690);
-
- /* REG_EEE_st2TrKf1000 = 2 */
- __phy_write(phydev, 0x11, 0x114f);
- __phy_write(phydev, 0x12, 0x2);
- __phy_write(phydev, 0x10, 0x969a);
-
- /* RegEEE_slv_wake_tr_timer_tar = 6, RegEEE_slv_remtx_timer_tar = 20 */
- __phy_write(phydev, 0x11, 0x3028);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x969e);
-
- /* RegEEE_slv_wake_int_timer_tar = 8 */
- __phy_write(phydev, 0x11, 0x5010);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a0);
-
- /* RegEEE_trfreeze_timer2 = 586 */
- __phy_write(phydev, 0x11, 0x24a);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96a8);
-
- /* RegEEE100Stg1_tar = 16 */
- __phy_write(phydev, 0x11, 0x3210);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96b8);
-
- /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */
- __phy_write(phydev, 0x11, 0x1463);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x96ca);
-
- /* DfeTailEnableVgaThresh1000 = 27 */
- __phy_write(phydev, 0x11, 0x36);
- __phy_write(phydev, 0x12, 0x0);
- __phy_write(phydev, 0x10, 0x8f80);
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x8,
+ EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE);
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xd,
+ EEE1000_STAGE2_TR_KF_MASK,
+ FIELD_PREP(EEE1000_STAGE2_TR_KF_MASK, 0x2));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0xf,
+ SLAVE_WAKETR_TIMER_MASK | SLAVE_REMTX_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKETR_TIMER_MASK, 0x6) |
+ FIELD_PREP(SLAVE_REMTX_TIMER_MASK, 0x14));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x10,
+ SLAVE_WAKEINT_TIMER_MASK,
+ FIELD_PREP(SLAVE_WAKEINT_TIMER_MASK, 0x8));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x14,
+ TR_FREEZE_TIMER2_MASK,
+ FIELD_PREP(TR_FREEZE_TIMER2_MASK, 0x24a));
+
+ __mtk_tr_modify(phydev, 0x2, 0xd, 0x1c,
+ EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ FIELD_PREP(EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK,
+ 0x10));
+
+ __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x25,
+ WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN);
+
+ __mtk_tr_modify(phydev, 0x1, 0xf, 0x0,
+ DFE_TAIL_EANBLE_VGA_TRHESH_1000,
+ FIELD_PREP(DFE_TAIL_EANBLE_VGA_TRHESH_1000, 0x1b));
phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3);
@@ -1190,7 +1279,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num)
{
- struct mtk_socphy_shared *priv = phydev->shared->priv;
+ struct mtk_socphy_shared *priv = phy_package_get_priv(phydev);
u32 polarities;
if (led_num == 0)
@@ -1229,7 +1318,7 @@ static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev)
static int mt7988_phy_probe_shared(struct phy_device *phydev)
{
struct device_node *np = dev_of_node(&phydev->mdio.bus->dev);
- struct mtk_socphy_shared *shared = phydev->shared->priv;
+ struct mtk_socphy_shared *shared = phy_package_get_priv(phydev);
struct regmap *regmap;
u32 reg;
int ret;
@@ -1280,7 +1369,7 @@ static int mt7988_phy_probe(struct phy_device *phydev)
return err;
}
- shared = phydev->shared->priv;
+ shared = phy_package_get_priv(phydev);
priv = &shared->priv[phydev->mdio.addr];
phydev->priv = priv;
diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c
index b517ca8573e7..73d9b72f9d9e 100644
--- a/drivers/net/phy/mediatek/mtk-ge.c
+++ b/drivers/net/phy/mediatek/mtk-ge.c
@@ -8,31 +8,58 @@
#define MTK_GPHY_ID_MT7530 0x03a29412
#define MTK_GPHY_ID_MT7531 0x03a29441
-#define MTK_EXT_PAGE_ACCESS 0x1f
-#define MTK_PHY_PAGE_STANDARD 0x0000
-#define MTK_PHY_PAGE_EXTENDED 0x0001
-#define MTK_PHY_PAGE_EXTENDED_2 0x0002
-#define MTK_PHY_PAGE_EXTENDED_3 0x0003
-#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
-#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
+#define MTK_PHY_PAGE_EXTENDED_2 0x0002
+#define MTK_PHY_PAGE_EXTENDED_3 0x0003
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11 0x11
+
+#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30
+
+/* Registers on Token Ring debug nodes */
+/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */
+#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15)
+
+/* Registers on MDIO_MMD_VEND1 */
+#define MTK_PHY_GBE_MODE_TX_DELAY_SEL 0x13
+#define MTK_PHY_TEST_MODE_TX_DELAY_SEL 0x14
+#define MTK_TX_DELAY_PAIR_B_MASK GENMASK(10, 8)
+#define MTK_TX_DELAY_PAIR_D_MASK GENMASK(2, 0)
+
+#define MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL 0xa6
+#define MTK_MCC_NEARECHO_OFFSET_MASK GENMASK(15, 8)
+
+#define MTK_PHY_RXADC_CTRL_RG7 0xc6
+#define MTK_PHY_DA_AD_BUF_BIAS_LP_MASK GENMASK(9, 8)
+
+#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123 0x123
+#define MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK GENMASK(15, 8)
+#define MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK GENMASK(7, 0)
static void mtk_gephy_config_init(struct phy_device *phydev)
{
/* Enable HW auto downshift */
- phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
+ phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1,
+ MTK_PHY_AUX_CTRL_AND_STATUS,
+ 0, MTK_PHY_ENABLE_DOWNSHIFT);
/* Increase SlvDPSready time */
- phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
- __phy_write(phydev, 0x10, 0xafae);
- __phy_write(phydev, 0x12, 0x2f);
- __phy_write(phydev, 0x10, 0x8fae);
- phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+ mtk_tr_modify(phydev, 0x1, 0xf, 0x17, SLAVE_DSP_READY_TIME_MASK,
+ FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x5e));
/* Adjust 100_mse_threshold */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff);
-
- /* Disable mcc */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123,
+ MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK |
+ MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK,
+ 0xff) |
+ FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK,
+ 0xff));
+
+ /* If echo time is narrower than 0x3, it will be regarded as noise */
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL,
+ MTK_MCC_NEARECHO_OFFSET_MASK,
+ FIELD_PREP(MTK_MCC_NEARECHO_OFFSET_MASK, 0x3));
}
static int mt7530_phy_config_init(struct phy_device *phydev)
@@ -40,7 +67,8 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
mtk_gephy_config_init(phydev);
/* Increase post_update_timer */
- phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b);
+ phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3,
+ MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11, 0x4b);
return 0;
}
@@ -51,11 +79,19 @@ static int mt7531_phy_config_init(struct phy_device *phydev)
/* PHY link down power saving enable */
phy_set_bits(phydev, 0x17, BIT(4));
- phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7,
+ MTK_PHY_DA_AD_BUF_BIAS_LP_MASK,
+ FIELD_PREP(MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3));
/* Set TX Pair delay selection */
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404);
- phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404);
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_GBE_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
+ phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TEST_MODE_TX_DELAY_SEL,
+ MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK,
+ FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) |
+ FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4));
return 0;
}
diff --git a/drivers/net/phy/mediatek/mtk-phy-lib.c b/drivers/net/phy/mediatek/mtk-phy-lib.c
index 98a09d670e9c..dfd0f4e439a2 100644
--- a/drivers/net/phy/mediatek/mtk-phy-lib.c
+++ b/drivers/net/phy/mediatek/mtk-phy-lib.c
@@ -6,6 +6,83 @@
#include "mtk.h"
+/* Difference between functions with mtk_tr* and __mtk_tr* prefixes is
+ * mtk_tr* functions: wrapped by page switching operations
+ * __mtk_tr* functions: no page switching operations
+ */
+
+static void __mtk_tr_access(struct phy_device *phydev, bool read, u8 ch_addr,
+ u8 node_addr, u8 data_addr)
+{
+ u16 tr_cmd = BIT(15); /* bit 14 & 0 are reserved */
+
+ if (read)
+ tr_cmd |= BIT(13);
+
+ tr_cmd |= (((ch_addr & 0x3) << 11) |
+ ((node_addr & 0xf) << 7) |
+ ((data_addr & 0x3f) << 1));
+ dev_dbg(&phydev->mdio.dev, "tr_cmd: 0x%x\n", tr_cmd);
+ __phy_write(phydev, 0x10, tr_cmd);
+}
+
+static void __mtk_tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u16 *tr_high, u16 *tr_low)
+{
+ __mtk_tr_access(phydev, true, ch_addr, node_addr, data_addr);
+ *tr_low = __phy_read(phydev, 0x11);
+ *tr_high = __phy_read(phydev, 0x12);
+ dev_dbg(&phydev->mdio.dev, "tr_high read: 0x%x, tr_low read: 0x%x\n",
+ *tr_high, *tr_low);
+}
+
+static void __mtk_tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 tr_data)
+{
+ __phy_write(phydev, 0x11, tr_data & 0xffff);
+ __phy_write(phydev, 0x12, tr_data >> 16);
+ dev_dbg(&phydev->mdio.dev, "tr_high write: 0x%x, tr_low write: 0x%x\n",
+ tr_data >> 16, tr_data & 0xffff);
+ __mtk_tr_access(phydev, false, ch_addr, node_addr, data_addr);
+}
+
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ u32 tr_data;
+ u16 tr_high;
+ u16 tr_low;
+
+ __mtk_tr_read(phydev, ch_addr, node_addr, data_addr, &tr_high, &tr_low);
+ tr_data = (tr_high << 16) | tr_low;
+ tr_data = (tr_data & ~mask) | set;
+ __mtk_tr_write(phydev, ch_addr, node_addr, data_addr, tr_data);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_modify);
+
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set)
+{
+ phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5);
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, mask, set);
+ phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0);
+}
+EXPORT_SYMBOL_GPL(mtk_tr_modify);
+
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, 0, set);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_set_bits);
+
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr)
+{
+ __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, clr, 0);
+}
+EXPORT_SYMBOL_GPL(__mtk_tr_clr_bits);
+
int mtk_phy_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, MTK_EXT_PAGE_ACCESS);
diff --git a/drivers/net/phy/mediatek/mtk.h b/drivers/net/phy/mediatek/mtk.h
index 63d9fe179b8f..320f76ffa81f 100644
--- a/drivers/net/phy/mediatek/mtk.h
+++ b/drivers/net/phy/mediatek/mtk.h
@@ -8,7 +8,13 @@
#ifndef _MTK_EPHY_H_
#define _MTK_EPHY_H_
+#define MTK_PHY_AUX_CTRL_AND_STATUS 0x14
+#define MTK_PHY_ENABLE_DOWNSHIFT BIT(4)
+
#define MTK_EXT_PAGE_ACCESS 0x1f
+#define MTK_PHY_PAGE_EXTENDED_1 0x0001
+#define MTK_PHY_PAGE_STANDARD 0x0000
+#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5
/* Registers on MDIO_MMD_VEND2 */
#define MTK_PHY_LED0_ON_CTRL 0x24
@@ -66,6 +72,15 @@ struct mtk_socphy_priv {
unsigned long led_state;
};
+void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 mask, u32 set);
+void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 set);
+void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr,
+ u8 data_addr, u32 clr);
+
int mtk_phy_read_page(struct phy_device *phydev);
int mtk_phy_write_page(struct phy_device *phydev, int page);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9c0b1c229af6..24882d30f685 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -34,6 +34,8 @@
#include <linux/net_tstamp.h>
#include <linux/gpio/consumer.h>
+#include "phylib.h"
+
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
#define KSZPHY_OMSO_FACTORY_TEST BIT(15)
@@ -1030,6 +1032,29 @@ static int ksz9021_config_init(struct phy_device *phydev)
#define MII_KSZ9031RN_EDPD 0x23
#define MII_KSZ9031RN_EDPD_ENABLE BIT(0)
+static int ksz9031_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
+{
+ u16 ctl = BMCR_LOOPBACK;
+ int val;
+
+ if (!enable)
+ return genphy_loopback(phydev, enable, 0);
+
+ if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+ phydev->duplex = DUPLEX_FULL;
+
+ ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
+
+ phy_write(phydev, MII_BMCR, ctl);
+
+ return phy_read_poll_timeout(phydev, MII_BMSR, val, val & BMSR_LSTATUS,
+ 5000, 500000, true);
+}
+
static int ksz9031_of_load_skew_values(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg, size_t field_sz,
@@ -2631,8 +2656,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
- struct phy_device *phydev = ptp_priv->phydev;
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(ptp_priv->phydev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -3653,7 +3677,7 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
int ret;
mutex_lock(&shared->shared_lock);
@@ -3864,7 +3888,7 @@ static void lan8814_ptp_init(struct phy_device *phydev)
static int lan8814_ptp_probe_once(struct phy_device *phydev)
{
- struct lan8814_shared_priv *shared = phydev->shared->priv;
+ struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -5564,6 +5588,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = kszphy_resume,
.cable_test_start = ksz9x31_cable_test_start,
.cable_test_get_status = ksz9x31_cable_test_get_status,
+ .set_loopback = ksz9031_set_loopback,
}, {
.phy_id = PHY_ID_LAN8814,
.phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 19cf12ee8990..7ff975efd8e7 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -17,6 +17,8 @@
#include <linux/of.h>
#include <linux/netdevice.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
+
+#include "../phylib.h"
#include "mscc_serdes.h"
#include "mscc.h"
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 738a8822fcf0..ed8fb14a7f21 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -17,6 +17,7 @@
#include <linux/udp.h>
#include <linux/unaligned.h>
+#include "../phylib.h"
#include "mscc.h"
#include "mscc_ptp.h"
@@ -645,11 +646,12 @@ static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
val |= PTP_LTC_CTRL_SAVE_ENA;
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
@@ -696,11 +698,12 @@ static int __vsc85xx_settime(struct ptp_clock_info *info,
{
struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
struct phy_device *phydev = ptp->phydev;
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
struct vsc8531_private *priv = phydev->priv;
+ struct vsc85xx_shared_private *shared;
u32 val;
+ shared = phy_package_get_priv(phydev);
+
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
@@ -1580,8 +1583,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
- struct vsc85xx_shared_private *shared =
- (struct vsc85xx_shared_private *)phydev->shared->priv;
+ struct vsc85xx_shared_private *shared = phy_package_get_priv(phydev);
/* Initialize shared GPIO lock */
mutex_init(&shared->gpio_lock);
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 94d9cb727121..0c8dc16ee7bd 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -225,14 +225,8 @@ static int gpy_hwmon_register(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device *hwmon_dev;
- char *hwmon_name;
- hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(hwmon_name))
- return PTR_ERR(hwmon_name);
-
- hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
- phydev,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&gpy_hwmon_chip_info,
NULL);
@@ -813,7 +807,7 @@ static void gpy_get_wol(struct phy_device *phydev,
wol->wolopts = priv->wolopts;
}
-static int gpy_loopback(struct phy_device *phydev, bool enable)
+static int gpy_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
u16 set = 0;
@@ -822,6 +816,9 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
if (enable) {
u64 now = get_jiffies_64();
+ if (speed)
+ return -EOPNOTSUPP;
+
/* wait until 3 seconds from last disable */
if (time_before64(now, priv->lb_dis_to))
msleep(jiffies64_to_msecs(priv->lb_dis_to - now));
@@ -845,15 +842,15 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
return 0;
}
-static int gpy115_loopback(struct phy_device *phydev, bool enable)
+static int gpy115_loopback(struct phy_device *phydev, bool enable, int speed)
{
struct gpy_priv *priv = phydev->priv;
if (enable)
- return gpy_loopback(phydev, enable);
+ return gpy_loopback(phydev, enable, speed);
if (priv->fw_minor > 0x76)
- return gpy_loopback(phydev, 0);
+ return gpy_loopback(phydev, 0, 0);
return genphy_soft_reset(phydev);
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 34231b5b9175..250a018d5546 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
- * Copyright 2021-2023 NXP
+ * Copyright 2021-2025 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
@@ -19,9 +19,17 @@
#include "nxp-c45-tja11xx.h"
+#define PHY_ID_MASK GENMASK(31, 4)
+/* Same id: TJA1103, TJA1104 */
#define PHY_ID_TJA_1103 0x001BB010
+/* Same id: TJA1120, TJA1121 */
#define PHY_ID_TJA_1120 0x001BB031
+#define VEND1_DEVICE_ID3 0x0004
+#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12)
+#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8)
+#define DEVICE_ID3_SAMPLE_TYPE_R 0x9
+
#define VEND1_DEVICE_CONTROL 0x0040
#define DEVICE_CONTROL_RESET BIT(15)
#define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
@@ -109,6 +117,9 @@
#define MII_BASIC_CONFIG_RMII 0x5
#define MII_BASIC_CONFIG_MII 0x4
+#define VEND1_SGMII_BASIC_CONTROL 0xB000
+#define SGMII_LPM BIT(11)
+
#define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
#define EXTENDED_CNT_EN BIT(15)
#define VEND1_MONITOR_STATUS 0xAC80
@@ -1593,6 +1604,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev)
return 0;
}
+/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
+static void nxp_c45_tja1120_errata(struct phy_device *phydev)
+{
+ bool macsec_ability, sgmii_ability;
+ int silicon_version, sample_type;
+ int phy_abilities;
+ int ret = 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
+ if (ret < 0)
+ return;
+
+ sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
+ if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
+ return;
+
+ silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+ sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
+ if ((!macsec_ability && silicon_version == 2) ||
+ (macsec_ability && silicon_version == 1)) {
+ /* TJA1120/TJA1121 PHY configuration errata workaround.
+ * Apply PHY writes sequence before link up.
+ */
+ if (!macsec_ability) {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
+ } else {
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
+ }
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
+
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
+
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
+
+ if (sgmii_ability) {
+ /* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
+ * Put SGMII PCS into power down mode and back up.
+ */
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_SGMII_BASIC_CONTROL,
+ SGMII_LPM);
+ }
+ }
+}
+
static int nxp_c45_config_init(struct phy_device *phydev)
{
int ret;
@@ -1609,6 +1677,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+ if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
+ nxp_c45_tja1120_errata(phydev);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -1888,6 +1959,42 @@ static void tja1120_nmi_handler(struct phy_device *phydev,
}
}
+static int nxp_c45_macsec_ability(struct phy_device *phydev)
+{
+ bool macsec_ability;
+ int phy_abilities;
+
+ phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ VEND1_PORT_ABILITIES);
+ macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
+
+ return macsec_ability;
+}
+
+static int tja1103_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1104_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1120_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
+ !nxp_c45_macsec_ability(phydev);
+}
+
+static int tja1121_match_phy_device(struct phy_device *phydev)
+{
+ return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) &&
+ nxp_c45_macsec_ability(phydev);
+}
+
static const struct nxp_c45_regmap tja1120_regmap = {
.vend1_ptp_clk_period = 0x1020,
.vend1_event_msg_filt = 0x9010,
@@ -1958,7 +2065,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = {
static struct phy_driver nxp_c45_driver[] = {
{
- PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
.name = "NXP C45 TJA1103",
.get_features = nxp_c45_get_features,
.driver_data = &tja1103_phy_data,
@@ -1980,9 +2086,33 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja1103_match_phy_device,
+ },
+ {
+ .name = "NXP C45 TJA1104",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1103_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1103_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja1104_match_phy_device,
},
{
- PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
.name = "NXP C45 TJA1120",
.get_features = nxp_c45_get_features,
.driver_data = &tja1120_phy_data,
@@ -2005,6 +2135,32 @@ static struct phy_driver nxp_c45_driver[] = {
.get_sqi = nxp_c45_get_sqi,
.get_sqi_max = nxp_c45_get_sqi_max,
.remove = nxp_c45_remove,
+ .match_phy_device = tja1120_match_phy_device,
+ },
+ {
+ .name = "NXP C45 TJA1121",
+ .get_features = nxp_c45_get_features,
+ .driver_data = &tja1120_phy_data,
+ .probe = nxp_c45_probe,
+ .soft_reset = nxp_c45_soft_reset,
+ .config_aneg = genphy_c45_config_aneg,
+ .config_init = nxp_c45_config_init,
+ .config_intr = tja1120_config_intr,
+ .handle_interrupt = nxp_c45_handle_interrupt,
+ .read_status = genphy_c45_read_status,
+ .link_change_notify = tja1120_link_change_notify,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = genphy_c45_pma_resume,
+ .get_sset_count = nxp_c45_get_sset_count,
+ .get_strings = nxp_c45_get_strings,
+ .get_stats = nxp_c45_get_stats,
+ .cable_test_start = nxp_c45_cable_test_start,
+ .cable_test_get_status = nxp_c45_cable_test_get_status,
+ .set_loopback = genphy_c45_loopback,
+ .get_sqi = nxp_c45_get_sqi,
+ .get_sqi_max = nxp_c45_get_sqi_max,
+ .remove = nxp_c45_remove,
+ .match_phy_device = tja1121_match_phy_device,
},
};
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index ed7fa26bac8e..07e94a2478ac 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -21,12 +21,14 @@
#define PHY_ID_TJA1100 0x0180dc40
#define PHY_ID_TJA1101 0x0180dd00
#define PHY_ID_TJA1102 0x0180dc80
+#define PHY_ID_TJA1102S 0x0180dc90
#define MII_ECTRL 17
#define MII_ECTRL_LINK_CONTROL BIT(15)
#define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11)
#define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11)
#define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11)
+#define MII_ECTRL_POWER_MODE_SLEEP (0xa << 11)
#define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11)
#define MII_ECTRL_CABLE_TEST BIT(5)
#define MII_ECTRL_CONFIG_EN BIT(2)
@@ -78,12 +80,13 @@
#define MII_COMMCFG 27
#define MII_COMMCFG_AUTO_OP BIT(15)
+#define MII_CFG3 28
+#define MII_CFG3_PHY_EN BIT(0)
+
/* Configure REF_CLK as input in RMII mode */
#define TJA110X_RMII_MODE_REFCLK_IN BIT(0)
struct tja11xx_priv {
- char *hwmon_name;
- struct device *hwmon_dev;
struct phy_device *phydev;
struct work_struct phy_register_work;
u32 flags;
@@ -179,6 +182,14 @@ static int tja11xx_wakeup(struct phy_device *phydev)
return ret;
return tja11xx_enable_link_control(phydev);
+ case MII_ECTRL_POWER_MODE_SLEEP:
+ switch (phydev->phy_id & PHY_ID_MASK) {
+ case PHY_ID_TJA1102S:
+ /* Enable PHY, maybe it is disabled due to pin strapping */
+ return phy_set_bits(phydev, MII_CFG3, MII_CFG3_PHY_EN);
+ default:
+ return 0;
+ }
default:
break;
}
@@ -316,6 +327,7 @@ static int tja11xx_config_init(struct phy_device *phydev)
if (ret)
return ret;
break;
+ case PHY_ID_TJA1102S:
case PHY_ID_TJA1101:
reg_mask = MII_CFG1_INTERFACE_MODE_MASK;
ret = tja11xx_get_interface_mode(phydev);
@@ -494,19 +506,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = {
static int tja11xx_hwmon_register(struct phy_device *phydev,
struct tja11xx_priv *priv)
{
- struct device *dev = &phydev->mdio.dev;
-
- priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(priv->hwmon_name))
- return PTR_ERR(priv->hwmon_name);
+ struct device *hdev, *dev = &phydev->mdio.dev;
- priv->hwmon_dev =
- devm_hwmon_device_register_with_info(dev, priv->hwmon_name,
- phydev,
- &tja11xx_hwmon_chip_info,
- NULL);
-
- return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+ hdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
+ &tja11xx_hwmon_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hdev);
}
static int tja11xx_parse_dt(struct phy_device *phydev)
@@ -883,6 +888,29 @@ static struct phy_driver tja11xx_driver[] = {
.handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
+ }, {
+ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S),
+ .name = "NXP TJA1102S",
+ .features = PHY_BASIC_T1_FEATURES,
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = tja11xx_probe,
+ .soft_reset = tja11xx_soft_reset,
+ .config_aneg = tja11xx_config_aneg,
+ .config_init = tja11xx_config_init,
+ .read_status = tja11xx_read_status,
+ .get_sqi = tja11xx_get_sqi,
+ .get_sqi_max = tja11xx_get_sqi_max,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .set_loopback = genphy_loopback,
+ /* Statistics */
+ .get_sset_count = tja11xx_get_sset_count,
+ .get_strings = tja11xx_get_strings,
+ .get_stats = tja11xx_get_stats,
+ .config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
+ .cable_test_start = tja11xx_cable_test_start,
+ .cable_test_get_status = tja11xx_cable_test_get_status,
}
};
@@ -892,6 +920,7 @@ static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) },
{ PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S) },
{ }
};
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 0dac08e85304..bdd70d424491 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,6 +9,7 @@
#include <linux/phy.h>
#include "mdio-open-alliance.h"
+#include "phylib-internal.h"
/**
* genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
@@ -683,13 +684,10 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
static int genphy_c45_write_eee_adv(struct phy_device *phydev,
unsigned long *adv)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
int val, changed = 0;
- linkmode_andnot(tmp, adv, phydev->eee_broken_modes);
-
if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) {
- val = linkmode_to_mii_eee_cap1_t(tmp);
+ val = linkmode_to_mii_eee_cap1_t(adv);
/* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
* (Register 7.60)
@@ -707,7 +705,7 @@ static int genphy_c45_write_eee_adv(struct phy_device *phydev,
}
if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
- val = linkmode_to_mii_eee_cap2_t(tmp);
+ val = linkmode_to_mii_eee_cap2_t(adv);
/* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
* (Register 7.62)
@@ -1230,8 +1228,11 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-int genphy_c45_loopback(struct phy_device *phydev, bool enable)
+int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed)
{
+ if (enable && speed)
+ return -EOPNOTSUPP;
+
return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
MDIO_PCS_CTRL1_LOOPBACK,
enable ? MDIO_PCS_CTRL1_LOOPBACK : 0);
@@ -1467,42 +1468,32 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
/**
* genphy_c45_eee_is_active - get EEE status
* @phydev: target phy_device struct
- * @adv: variable to store advertised linkmodes
* @lp: variable to store LP advertised linkmodes
*
- * Description: this function will read local and link partner PHY
- * advertisements. Compare them return current EEE state.
+ * Description: this function will read link partner PHY advertisement
+ * and compare it to local advertisement to return current EEE state.
*/
-int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
- unsigned long *lp)
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- bool eee_active;
int ret;
- ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
- if (ret)
- return ret;
+ if (!phydev->eee_cfg.eee_enabled)
+ return 0;
ret = genphy_c45_read_eee_lpa(phydev, tmp_lp);
if (ret)
return ret;
- linkmode_and(common, tmp_adv, tmp_lp);
- if (!linkmode_empty(tmp_adv) && !linkmode_empty(common))
- eee_active = phy_check_valid(phydev->speed, phydev->duplex,
- common);
- else
- eee_active = false;
-
- if (adv)
- linkmode_copy(adv, tmp_adv);
if (lp)
linkmode_copy(lp, tmp_lp);
- return eee_active;
+ linkmode_and(common, phydev->advertising_eee, tmp_lp);
+ if (linkmode_empty(common))
+ return 0;
+
+ return phy_check_valid(phydev->speed, phydev->duplex, common);
}
EXPORT_SYMBOL(genphy_c45_eee_is_active);
@@ -1519,14 +1510,14 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
{
int ret;
- ret = genphy_c45_eee_is_active(phydev, data->advertised,
- data->lp_advertised);
+ ret = genphy_c45_eee_is_active(phydev, data->lp_advertised);
if (ret < 0)
return ret;
data->eee_active = phydev->eee_active;
- linkmode_copy(data->supported, phydev->supported_eee);
-
+ linkmode_andnot(data->supported, phydev->supported_eee,
+ phydev->eee_disabled_modes);
+ linkmode_copy(data->advertised, phydev->advertising_eee);
return 0;
}
EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
@@ -1559,7 +1550,9 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
- linkmode_copy(phydev->advertising_eee, adv);
+
+ linkmode_andnot(phydev->advertising_eee, adv,
+ phydev->eee_disabled_modes);
} else if (linkmode_empty(phydev->advertising_eee)) {
phy_advertise_eee_all(phydev);
}
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
new file mode 100644
index 000000000000..157759966650
--- /dev/null
+++ b/drivers/net/phy/phy-caps.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * link caps internal header, for link modes <-> capabilities <-> interfaces
+ * conversions.
+ */
+
+#ifndef __PHY_CAPS_H
+#define __PHY_CAPS_H
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+enum {
+ LINK_CAPA_10HD = 0,
+ LINK_CAPA_10FD,
+ LINK_CAPA_100HD,
+ LINK_CAPA_100FD,
+ LINK_CAPA_1000HD,
+ LINK_CAPA_1000FD,
+ LINK_CAPA_2500FD,
+ LINK_CAPA_5000FD,
+ LINK_CAPA_10000FD,
+ LINK_CAPA_20000FD,
+ LINK_CAPA_25000FD,
+ LINK_CAPA_40000FD,
+ LINK_CAPA_50000FD,
+ LINK_CAPA_56000FD,
+ LINK_CAPA_100000FD,
+ LINK_CAPA_200000FD,
+ LINK_CAPA_400000FD,
+ LINK_CAPA_800000FD,
+
+ __LINK_CAPA_MAX,
+};
+
+#define LINK_CAPA_ALL GENMASK((__LINK_CAPA_MAX - 1), 0)
+
+struct link_capabilities {
+ int speed;
+ unsigned int duplex;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes);
+};
+
+int phy_caps_init(void);
+
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes);
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes);
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes);
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes);
+unsigned long phy_caps_from_interface(phy_interface_t interface);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes);
+
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only);
+
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact);
+
+#endif /* __PHY_CAPS_H */
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index f181f05cb429..e177037f9110 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,6 +6,10 @@
#include <linux/phy.h>
#include <linux/of.h>
+#include "phylib.h"
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
/**
* phy_speed_to_str - Return a string representing the PHY link speed
*
@@ -153,221 +157,9 @@ int phy_interface_num_ports(phy_interface_t interface)
}
EXPORT_SYMBOL_GPL(phy_interface_num_ports);
-/* A mapping of all SUPPORTED settings to speed/duplex. This table
- * must be grouped by speed and sorted in descending match priority
- * - iow, descending speed.
- */
-
-#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
- .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
-
-static const struct phy_setting settings[] = {
- /* 800G */
- PHY_SETTING( 800000, FULL, 800000baseCR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseKR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ),
- PHY_SETTING( 800000, FULL, 800000baseSR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseVR8_Full ),
- PHY_SETTING( 800000, FULL, 800000baseCR4_Full ),
- PHY_SETTING( 800000, FULL, 800000baseKR4_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR4_Full ),
- PHY_SETTING( 800000, FULL, 800000baseDR4_2_Full ),
- PHY_SETTING( 800000, FULL, 800000baseSR4_Full ),
- PHY_SETTING( 800000, FULL, 800000baseVR4_Full ),
- /* 400G */
- PHY_SETTING( 400000, FULL, 400000baseCR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR8_Full ),
- PHY_SETTING( 400000, FULL, 400000baseCR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR4_Full ),
- PHY_SETTING( 400000, FULL, 400000baseCR2_Full ),
- PHY_SETTING( 400000, FULL, 400000baseKR2_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR2_Full ),
- PHY_SETTING( 400000, FULL, 400000baseDR2_2_Full ),
- PHY_SETTING( 400000, FULL, 400000baseSR2_Full ),
- PHY_SETTING( 400000, FULL, 400000baseVR2_Full ),
- /* 200G */
- PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR4_Full ),
- PHY_SETTING( 200000, FULL, 200000baseCR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseCR_Full ),
- PHY_SETTING( 200000, FULL, 200000baseKR_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR_Full ),
- PHY_SETTING( 200000, FULL, 200000baseDR_2_Full ),
- PHY_SETTING( 200000, FULL, 200000baseSR_Full ),
- PHY_SETTING( 200000, FULL, 200000baseVR_Full ),
- /* 100G */
- PHY_SETTING( 100000, FULL, 100000baseCR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR4_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR2_Full ),
- PHY_SETTING( 100000, FULL, 100000baseCR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseKR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseDR_Full ),
- PHY_SETTING( 100000, FULL, 100000baseSR_Full ),
- /* 56G */
- PHY_SETTING( 56000, FULL, 56000baseCR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseKR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseLR4_Full ),
- PHY_SETTING( 56000, FULL, 56000baseSR4_Full ),
- /* 50G */
- PHY_SETTING( 50000, FULL, 50000baseCR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR2_Full ),
- PHY_SETTING( 50000, FULL, 50000baseCR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseKR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseDR_Full ),
- PHY_SETTING( 50000, FULL, 50000baseSR_Full ),
- /* 40G */
- PHY_SETTING( 40000, FULL, 40000baseCR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseKR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseLR4_Full ),
- PHY_SETTING( 40000, FULL, 40000baseSR4_Full ),
- /* 25G */
- PHY_SETTING( 25000, FULL, 25000baseCR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseKR_Full ),
- PHY_SETTING( 25000, FULL, 25000baseSR_Full ),
- /* 20G */
- PHY_SETTING( 20000, FULL, 20000baseKR2_Full ),
- PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ),
- /* 10G */
- PHY_SETTING( 10000, FULL, 10000baseCR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseER_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseKX4_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseLRM_Full ),
- PHY_SETTING( 10000, FULL, 10000baseR_FEC ),
- PHY_SETTING( 10000, FULL, 10000baseSR_Full ),
- PHY_SETTING( 10000, FULL, 10000baseT_Full ),
- /* 5G */
- PHY_SETTING( 5000, FULL, 5000baseT_Full ),
- /* 2.5G */
- PHY_SETTING( 2500, FULL, 2500baseT_Full ),
- PHY_SETTING( 2500, FULL, 2500baseX_Full ),
- /* 1G */
- PHY_SETTING( 1000, FULL, 1000baseT_Full ),
- PHY_SETTING( 1000, HALF, 1000baseT_Half ),
- PHY_SETTING( 1000, FULL, 1000baseT1_Full ),
- PHY_SETTING( 1000, FULL, 1000baseX_Full ),
- PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
- /* 100M */
- PHY_SETTING( 100, FULL, 100baseT_Full ),
- PHY_SETTING( 100, FULL, 100baseT1_Full ),
- PHY_SETTING( 100, HALF, 100baseT_Half ),
- PHY_SETTING( 100, HALF, 100baseFX_Half ),
- PHY_SETTING( 100, FULL, 100baseFX_Full ),
- /* 10M */
- PHY_SETTING( 10, FULL, 10baseT_Full ),
- PHY_SETTING( 10, HALF, 10baseT_Half ),
- PHY_SETTING( 10, FULL, 10baseT1L_Full ),
- PHY_SETTING( 10, FULL, 10baseT1S_Full ),
- PHY_SETTING( 10, HALF, 10baseT1S_Half ),
- PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
- PHY_SETTING( 10, FULL, 10baseT1BRR_Full ),
-};
-#undef PHY_SETTING
-
-/**
- * phy_lookup_setting - lookup a PHY setting
- * @speed: speed to match
- * @duplex: duplex to match
- * @mask: allowed link modes
- * @exact: an exact match is required
- *
- * Search the settings array for a setting that matches the speed and
- * duplex, and which is supported.
- *
- * If @exact is unset, either an exact match or %NULL for no match will
- * be returned.
- *
- * If @exact is set, an exact match, the fastest supported setting at
- * or below the specified speed, the slowest supported setting, or if
- * they all fail, %NULL will be returned.
- */
-const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
-{
- const struct phy_setting *p, *match = NULL, *last = NULL;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(p->bit, mask)) {
- last = p;
- if (p->speed == speed && p->duplex == duplex) {
- /* Exact match for speed and duplex */
- match = p;
- break;
- } else if (!exact) {
- if (!match && p->speed <= speed)
- /* Candidate */
- match = p;
-
- if (p->speed < speed)
- break;
- }
- }
- }
-
- if (!match && !exact)
- match = last;
-
- return match;
-}
-EXPORT_SYMBOL_GPL(phy_lookup_setting);
-
-size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask)
-{
- size_t count;
- int i;
-
- for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
- test_bit(settings[i].bit, mask) &&
- (count == 0 || speeds[count - 1] != settings[i].speed))
- speeds[count++] = settings[i].speed;
-
- return count;
-}
-
-static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
-{
- const struct phy_setting *p;
- int i;
-
- for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->speed > max_speed)
- linkmode_clear_bit(p->bit, addr);
- else
- break;
- }
-}
-
static void __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- __set_linkmode_max_speed(max_speed, phydev->supported);
+ phy_caps_linkmode_max_speed(max_speed, phydev->supported);
}
/**
@@ -406,7 +198,7 @@ void of_set_phy_supported(struct phy_device *phydev)
void of_set_phy_eee_broken(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
- unsigned long *modes = phydev->eee_broken_modes;
+ unsigned long *modes = phydev->eee_disabled_modes;
if (!IS_ENABLED(CONFIG_OF_MDIO) || !node)
return;
@@ -493,16 +285,15 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i;
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- phydev->speed = settings[i].speed;
- phydev->duplex = settings[i].duplex;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
+ }
phy_resolve_aneg_pause(phydev);
}
@@ -520,7 +311,8 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
void phy_check_downshift(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i, speed = SPEED_UNKNOWN;
+ const struct link_capabilities *c;
+ int speed = SPEED_UNKNOWN;
phydev->downshifted_rate = 0;
@@ -530,11 +322,9 @@ void phy_check_downshift(struct phy_device *phydev)
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- for (i = 0; i < ARRAY_SIZE(settings); i++)
- if (test_bit(settings[i].bit, common)) {
- speed = settings[i].speed;
- break;
- }
+ c = phy_caps_lookup_by_linkmode(common);
+ if (c)
+ speed = c->speed;
if (speed == SPEED_UNKNOWN || phydev->speed >= speed)
return;
@@ -544,22 +334,17 @@ void phy_check_downshift(struct phy_device *phydev)
phydev->downshifted_rate = 1;
}
-EXPORT_SYMBOL_GPL(phy_check_downshift);
static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- int i = ARRAY_SIZE(settings);
+ const struct link_capabilities *c;
linkmode_and(common, phydev->lp_advertising, phydev->advertising);
- while (--i >= 0) {
- if (test_bit(settings[i].bit, common)) {
- if (fdx_only && settings[i].duplex != DUPLEX_FULL)
- continue;
- return settings[i].speed;
- }
- }
+ c = phy_caps_lookup_by_linkmode_rev(common, fdx_only);
+ if (c)
+ return c->speed;
return SPEED_UNKNOWN;
}
@@ -571,7 +356,7 @@ int phy_speed_down_core(struct phy_device *phydev)
if (min_common_speed == SPEED_UNKNOWN)
return -EINVAL;
- __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+ phy_caps_linkmode_max_speed(min_common_speed, phydev->advertising);
return 0;
}
@@ -733,43 +518,6 @@ int __phy_package_read_mmd(struct phy_device *phydev,
EXPORT_SYMBOL(__phy_package_read_mmd);
/**
- * phy_package_read_mmd - read MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to read from
- * @regnum: The register on the MMD to read
- *
- * Convenience helper for reading a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_read();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_read_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int val;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum);
- phy_unlock_mdio_bus(phydev);
-
- return val;
-}
-EXPORT_SYMBOL(phy_package_read_mmd);
-
-/**
* __phy_package_write_mmd - write MMD reg relative to PHY package base addr
* @phydev: The phy_device struct
* @addr_offset: The offset to be added to PHY package base_addr
@@ -803,44 +551,6 @@ int __phy_package_write_mmd(struct phy_device *phydev,
EXPORT_SYMBOL(__phy_package_write_mmd);
/**
- * phy_package_write_mmd - write MMD reg relative to PHY package base addr
- * @phydev: The phy_device struct
- * @addr_offset: The offset to be added to PHY package base_addr
- * @devad: The MMD to write to
- * @regnum: The register on the MMD to write
- * @val: value to write to @regnum
- *
- * Convenience helper for writing a register of an MMD on a given PHY
- * using the PHY package base address. The base address is added to
- * the addr_offset value.
- *
- * Same calling rules as for phy_write();
- *
- * NOTE: It's assumed that the entire PHY package is either C22 or C45.
- */
-int phy_package_write_mmd(struct phy_device *phydev,
- unsigned int addr_offset, int devad,
- u32 regnum, u16 val)
-{
- int addr = phy_package_address(phydev, addr_offset);
- int ret;
-
- if (addr < 0)
- return addr;
-
- if (regnum > (u16)~0 || devad > 32)
- return -EINVAL;
-
- phy_lock_mdio_bus(phydev);
- ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
- regnum, val);
- phy_unlock_mdio_bus(phydev);
-
- return ret;
-}
-EXPORT_SYMBOL(phy_package_write_mmd);
-
-/**
* phy_modify_changed - Function for modifying a PHY register
* @phydev: the phy_device struct
* @regnum: register number to modify
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d0c1718e2b16..13df28445f02 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -36,6 +36,9 @@
#include <net/genetlink.h>
#include <net/sock.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
#define PHY_STATE_TIME HZ
#define PHY_STATE_STR(_state) \
@@ -211,25 +214,6 @@ int phy_aneg_done(struct phy_device *phydev)
EXPORT_SYMBOL(phy_aneg_done);
/**
- * phy_find_valid - find a PHY setting that matches the requested parameters
- * @speed: desired speed
- * @duplex: desired duplex
- * @supported: mask of supported link modes
- *
- * Locate a supported phy setting that is, in priority order:
- * - an exact match for the specified speed and duplex mode
- * - a match for the specified speed, or slower speed
- * - the slowest supported speed
- * Returns the matched phy_setting entry, or %NULL if no supported phy
- * settings were found.
- */
-static const struct phy_setting *
-phy_find_valid(int speed, int duplex, unsigned long *supported)
-{
- return phy_lookup_setting(speed, duplex, supported, false);
-}
-
-/**
* phy_supported_speeds - return all speeds currently supported by a phy device
* @phy: The phy device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
@@ -243,7 +227,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- return phy_speeds(speeds, size, phy->supported);
+ return phy_caps_speeds(speeds, size, phy->supported);
}
/**
@@ -257,7 +241,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*/
bool phy_check_valid(int speed, int duplex, unsigned long *features)
{
- return !!phy_lookup_setting(speed, duplex, features, true);
+ return phy_caps_valid(speed, duplex, features);
}
EXPORT_SYMBOL(phy_check_valid);
@@ -271,13 +255,14 @@ EXPORT_SYMBOL(phy_check_valid);
*/
static void phy_sanitize_settings(struct phy_device *phydev)
{
- const struct phy_setting *setting;
+ const struct link_capabilities *c;
+
+ c = phy_caps_lookup(phydev->speed, phydev->duplex, phydev->supported,
+ false);
- setting = phy_find_valid(phydev->speed, phydev->duplex,
- phydev->supported);
- if (setting) {
- phydev->speed = setting->speed;
- phydev->duplex = setting->duplex;
+ if (c) {
+ phydev->speed = c->speed;
+ phydev->duplex = c->duplex;
} else {
/* We failed to find anything (no supported speeds?) */
phydev->speed = SPEED_UNKNOWN;
@@ -302,7 +287,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = phydev->port;
- cmd->base.transceiver = phy_is_internal(phydev) ?
+ cmd->base.transceiver = phydev->is_internal ?
XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
@@ -520,12 +505,12 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
* @phydev: the phy_device struct
* @jiffies: Run the state machine after these jiffies
*/
-void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
jiffies);
}
-EXPORT_SYMBOL(phy_queue_state_machine);
/**
* phy_trigger_machine - Trigger the state machine to run now
@@ -1031,7 +1016,7 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
- err = genphy_c45_eee_is_active(phydev, NULL, NULL);
+ err = genphy_c45_eee_is_active(phydev, NULL);
phydev->eee_active = err > 0;
phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled &&
phydev->eee_active;
@@ -1501,6 +1486,24 @@ void phy_free_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_free_interrupt);
+/**
+ * phy_get_next_update_time - Determine the next PHY update time
+ * @phydev: Pointer to the phy_device structure
+ *
+ * This function queries the PHY driver to get the time for the next polling
+ * event. If the driver does not implement the callback, a default value is
+ * used.
+ *
+ * Return: The time for the next polling event in jiffies
+ */
+static unsigned int phy_get_next_update_time(struct phy_device *phydev)
+{
+ if (phydev->drv && phydev->drv->get_next_update_time)
+ return phydev->drv->get_next_update_time(phydev);
+
+ return PHY_STATE_TIME;
+}
+
enum phy_state_work {
PHY_STATE_WORK_NONE,
PHY_STATE_WORK_ANEG,
@@ -1580,7 +1583,8 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
* called from phy_disconnect() synchronously.
*/
if (phy_polling_mode(phydev) && phy_is_started(phydev))
- phy_queue_state_machine(phydev, PHY_STATE_TIME);
+ phy_queue_state_machine(phydev,
+ phy_get_next_update_time(phydev));
return state_work;
}
@@ -1704,6 +1708,93 @@ void phy_mac_interrupt(struct phy_device *phydev)
EXPORT_SYMBOL(phy_mac_interrupt);
/**
+ * phy_loopback - Configure loopback mode of PHY
+ * @phydev: target phy_device struct
+ * @enable: enable or disable loopback mode
+ * @speed: enable loopback mode with speed
+ *
+ * Configure loopback mode of PHY and signal link down and link up if speed is
+ * changing.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int phy_loopback(struct phy_device *phydev, bool enable, int speed)
+{
+ bool link_up = false;
+ int ret = 0;
+
+ if (!phydev->drv)
+ return -EIO;
+
+ mutex_lock(&phydev->lock);
+
+ if (enable && phydev->loopback_enabled) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!enable && !phydev->loopback_enabled) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (enable) {
+ /*
+ * Link up is signaled with a defined speed. If speed changes,
+ * then first link down and after that link up needs to be
+ * signaled.
+ */
+ if (phydev->link && phydev->state == PHY_RUNNING) {
+ /* link is up and signaled */
+ if (speed && phydev->speed != speed) {
+ /* signal link down and up for new speed */
+ phydev->link = false;
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev);
+
+ link_up = true;
+ }
+ } else {
+ /* link is not signaled */
+ if (speed) {
+ /* signal link up for new speed */
+ link_up = true;
+ }
+ }
+ }
+
+ if (phydev->drv->set_loopback)
+ ret = phydev->drv->set_loopback(phydev, enable, speed);
+ else
+ ret = genphy_loopback(phydev, enable, speed);
+
+ if (ret) {
+ if (enable) {
+ /* try to restore link if enabling loopback fails */
+ if (phydev->drv->set_loopback)
+ phydev->drv->set_loopback(phydev, false, 0);
+ else
+ genphy_loopback(phydev, false, 0);
+ }
+
+ goto out;
+ }
+
+ if (link_up) {
+ phydev->link = true;
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ }
+
+ phydev->loopback_enabled = enable;
+
+out:
+ mutex_unlock(&phydev->lock);
+ return ret;
+}
+EXPORT_SYMBOL(phy_loopback);
+
+/**
* phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock
* @phydev: target phy_device struct
*
@@ -1761,7 +1852,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (!phydev->drv)
return -EIO;
- ret = genphy_c45_eee_is_active(phydev, NULL, NULL);
+ ret = genphy_c45_eee_is_active(phydev, NULL);
if (ret < 0)
return ret;
if (!ret)
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
new file mode 100644
index 000000000000..703321689726
--- /dev/null
+++ b/drivers/net/phy/phy_caps.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/phy.h>
+
+#include "phy-caps.h"
+
+static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = {
+ { SPEED_10, DUPLEX_HALF, {0} }, /* LINK_CAPA_10HD */
+ { SPEED_10, DUPLEX_FULL, {0} }, /* LINK_CAPA_10FD */
+ { SPEED_100, DUPLEX_HALF, {0} }, /* LINK_CAPA_100HD */
+ { SPEED_100, DUPLEX_FULL, {0} }, /* LINK_CAPA_100FD */
+ { SPEED_1000, DUPLEX_HALF, {0} }, /* LINK_CAPA_1000HD */
+ { SPEED_1000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1000FD */
+ { SPEED_2500, DUPLEX_FULL, {0} }, /* LINK_CAPA_2500FD */
+ { SPEED_5000, DUPLEX_FULL, {0} }, /* LINK_CAPA_5000FD */
+ { SPEED_10000, DUPLEX_FULL, {0} }, /* LINK_CAPA_10000FD */
+ { SPEED_20000, DUPLEX_FULL, {0} }, /* LINK_CAPA_20000FD */
+ { SPEED_25000, DUPLEX_FULL, {0} }, /* LINK_CAPA_25000FD */
+ { SPEED_40000, DUPLEX_FULL, {0} }, /* LINK_CAPA_40000FD */
+ { SPEED_50000, DUPLEX_FULL, {0} }, /* LINK_CAPA_50000FD */
+ { SPEED_56000, DUPLEX_FULL, {0} }, /* LINK_CAPA_56000FD */
+ { SPEED_100000, DUPLEX_FULL, {0} }, /* LINK_CAPA_100000FD */
+ { SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */
+ { SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */
+ { SPEED_800000, DUPLEX_FULL, {0} }, /* LINK_CAPA_800000FD */
+};
+
+static int speed_duplex_to_capa(int speed, unsigned int duplex)
+{
+ if (duplex == DUPLEX_UNKNOWN ||
+ (speed > SPEED_1000 && duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ switch (speed) {
+ case SPEED_10: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_10FD : LINK_CAPA_10HD;
+ case SPEED_100: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_100FD : LINK_CAPA_100HD;
+ case SPEED_1000: return duplex == DUPLEX_FULL ?
+ LINK_CAPA_1000FD : LINK_CAPA_1000HD;
+ case SPEED_2500: return LINK_CAPA_2500FD;
+ case SPEED_5000: return LINK_CAPA_5000FD;
+ case SPEED_10000: return LINK_CAPA_10000FD;
+ case SPEED_20000: return LINK_CAPA_20000FD;
+ case SPEED_25000: return LINK_CAPA_25000FD;
+ case SPEED_40000: return LINK_CAPA_40000FD;
+ case SPEED_50000: return LINK_CAPA_50000FD;
+ case SPEED_56000: return LINK_CAPA_56000FD;
+ case SPEED_100000: return LINK_CAPA_100000FD;
+ case SPEED_200000: return LINK_CAPA_200000FD;
+ case SPEED_400000: return LINK_CAPA_400000FD;
+ case SPEED_800000: return LINK_CAPA_800000FD;
+ }
+
+ return -EINVAL;
+}
+
+#define for_each_link_caps_asc_speed(cap) \
+ for (cap = link_caps; cap < &link_caps[__LINK_CAPA_MAX]; cap++)
+
+#define for_each_link_caps_desc_speed(cap) \
+ for (cap = &link_caps[__LINK_CAPA_MAX - 1]; cap >= link_caps; cap--)
+
+/**
+ * phy_caps_init() - Initializes the link_caps array from the link_mode_params.
+ *
+ * Returns: 0 if phy caps init was successful, -EINVAL if we found an
+ * unexpected linkmode setting that requires LINK_CAPS update.
+ *
+ */
+int phy_caps_init(void)
+{
+ const struct link_mode_info *linkmode;
+ int i, capa;
+
+ /* Fill the caps array from net/ethtool/common.c */
+ for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) {
+ linkmode = &link_mode_params[i];
+ capa = speed_duplex_to_capa(linkmode->speed, linkmode->duplex);
+
+ if (capa < 0) {
+ if (linkmode->speed != SPEED_UNKNOWN) {
+ pr_err("Unknown speed %d, please update LINK_CAPS\n",
+ linkmode->speed);
+ return -EINVAL;
+ }
+ continue;
+ }
+
+ __set_bit(i, link_caps[capa].linkmodes);
+ }
+
+ return 0;
+}
+
+/**
+ * phy_caps_speeds() - Fill an array of supported SPEED_* values for given modes
+ * @speeds: Output array to store the speeds list into
+ * @size: Size of the output array
+ * @linkmodes: Linkmodes to get the speeds from
+ *
+ * Fills the speeds array with all possible speeds that can be achieved with
+ * the specified linkmodes.
+ *
+ * Returns: The number of speeds filled into the array. If the input array isn't
+ * big enough to store all speeds, fill it as much as possible.
+ */
+size_t phy_caps_speeds(unsigned int *speeds, size_t size,
+ unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+ size_t count = 0;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, linkmodes) &&
+ (count == 0 || speeds[count - 1] != lcap->speed)) {
+ speeds[count++] = lcap->speed;
+ if (count >= size)
+ break;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode() - Lookup the fastest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ *
+ * Returns: The highest-speed link_capabilities that intersects the given
+ * linkmodes. In case several DUPLEX_ options exist at that speed,
+ * DUPLEX_FULL is matched first. NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode(const unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup_by_linkmode_rev() - Lookup the slowest matching link_capabilities
+ * @linkmodes: Linkmodes to match against
+ * @fdx_only: Full duplex match only when set
+ *
+ * Returns: The lowest-speed link_capabilities that intersects the given
+ * linkmodes. When set, fdx_only will ignore half-duplex matches.
+ * NULL is returned if no match.
+ */
+const struct link_capabilities *
+phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_asc_speed(lcap) {
+ if (fdx_only && lcap->duplex != DUPLEX_FULL)
+ continue;
+
+ if (linkmode_intersects(lcap->linkmodes, linkmodes))
+ return lcap;
+ }
+
+ return NULL;
+}
+
+/**
+ * phy_caps_lookup() - Lookup capabilities by speed/duplex that matches a mask
+ * @speed: Speed to match
+ * @duplex: Duplex to match
+ * @supported: Mask of linkmodes to match
+ * @exact: Perform an exact match or not.
+ *
+ * Lookup a link_capabilities entry that intersect the supported linkmodes mask,
+ * and that matches the passed speed and duplex.
+ *
+ * When @exact is set, an exact match is performed on speed and duplex, meaning
+ * that if the linkmodes for the given speed and duplex intersect the supported
+ * mask, this capability is returned, otherwise we don't have a match and return
+ * NULL.
+ *
+ * When @exact is not set, we return either an exact match, or matching capabilities
+ * at lower speed, or the lowest matching speed, or NULL.
+ *
+ * Returns: a matched link_capabilities according to the above process, NULL
+ * otherwise.
+ */
+const struct link_capabilities *
+phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
+ bool exact)
+{
+ const struct link_capabilities *lcap, *last = NULL;
+
+ for_each_link_caps_desc_speed(lcap) {
+ if (linkmode_intersects(lcap->linkmodes, supported)) {
+ last = lcap;
+ /* exact match on speed and duplex*/
+ if (lcap->speed == speed && lcap->duplex == duplex) {
+ return lcap;
+ } else if (!exact) {
+ if (lcap->speed <= speed)
+ return lcap;
+ }
+ }
+ }
+
+ if (!exact)
+ return last;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(phy_caps_lookup);
+
+/**
+ * phy_caps_linkmode_max_speed() - Clamp a linkmodes set to a max speed
+ * @max_speed: Speed limit for the linkmode set
+ * @linkmodes: Linkmodes to limit
+ */
+void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes)
+{
+ struct link_capabilities *lcap;
+
+ for_each_link_caps_desc_speed(lcap)
+ if (lcap->speed > max_speed)
+ linkmode_andnot(linkmodes, linkmodes, lcap->linkmodes);
+ else
+ break;
+}
+
+/**
+ * phy_caps_valid() - Validate a linkmodes set agains given speed and duplex
+ * @speed: input speed to validate
+ * @duplex: input duplex to validate. Passing DUPLEX_UNKNOWN is always not valid
+ * @linkmodes: The linkmodes to validate
+ *
+ * Returns: True if at least one of the linkmodes in @linkmodes can function at
+ * the given speed and duplex, false otherwise.
+ */
+bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes)
+{
+ int capa = speed_duplex_to_capa(speed, duplex);
+
+ if (capa < 0)
+ return false;
+
+ return linkmode_intersects(link_caps[capa].linkmodes, linkmodes);
+}
+
+/**
+ * phy_caps_linkmodes() - Convert a bitfield of capabilities into linkmodes
+ * @caps: The list of caps, each bit corresponding to a LINK_CAPA value
+ * @linkmodes: The set of linkmodes to fill. Must be previously initialized.
+ */
+void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes)
+{
+ unsigned long capa;
+
+ for_each_set_bit(capa, &caps, __LINK_CAPA_MAX)
+ linkmode_or(linkmodes, linkmodes, link_caps[capa].linkmodes);
+}
+EXPORT_SYMBOL_GPL(phy_caps_linkmodes);
+
+/**
+ * phy_caps_from_interface() - Get the link capa from a given PHY interface
+ * @interface: The PHY interface we want to get the possible Speed/Duplex from
+ *
+ * Returns: A bitmask of LINK_CAPA_xxx values that can be achieved with the
+ * provided interface.
+ */
+unsigned long phy_caps_from_interface(phy_interface_t interface)
+{
+ unsigned long link_caps = 0;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_USXGMII:
+ link_caps |= BIT(LINK_CAPA_10000FD) | BIT(LINK_CAPA_5000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_PSGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ link_caps |= BIT(LINK_CAPA_1000HD) | BIT(LINK_CAPA_1000FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_REVRMII:
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_SMII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_MII:
+ link_caps |= BIT(LINK_CAPA_10HD) | BIT(LINK_CAPA_10FD);
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_100BASEX:
+ link_caps |= BIT(LINK_CAPA_100HD) | BIT(LINK_CAPA_100FD);
+ break;
+
+ case PHY_INTERFACE_MODE_TBI:
+ case PHY_INTERFACE_MODE_MOCA:
+ case PHY_INTERFACE_MODE_RTBI:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ link_caps |= BIT(LINK_CAPA_1000HD);
+ fallthrough;
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ case PHY_INTERFACE_MODE_TRGMII:
+ link_caps |= BIT(LINK_CAPA_1000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ link_caps |= BIT(LINK_CAPA_2500FD);
+ break;
+
+ case PHY_INTERFACE_MODE_5GBASER:
+ link_caps |= BIT(LINK_CAPA_5000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_RXAUI:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_10GKR:
+ link_caps |= BIT(LINK_CAPA_10000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_25GBASER:
+ link_caps |= BIT(LINK_CAPA_25000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_XLGMII:
+ link_caps |= BIT(LINK_CAPA_40000FD);
+ break;
+
+ case PHY_INTERFACE_MODE_INTERNAL:
+ link_caps |= LINK_CAPA_ALL;
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MAX:
+ break;
+ }
+
+ return link_caps;
+}
+EXPORT_SYMBOL_GPL(phy_caps_from_interface);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 46713d27412b..675fbd225378 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -41,10 +41,24 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
+#include "phylib-internal.h"
+#include "phy-caps.h"
+
MODULE_DESCRIPTION("PHY library");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+#define PHY_ANY_ID "MATCH ANY PHY"
+#define PHY_ANY_UID 0xffffffff
+
+struct phy_fixup {
+ struct list_head list;
+ char bus_id[MII_BUS_ID_SIZE + 3];
+ u32 phy_uid;
+ u32 phy_uid_mask;
+ int (*run)(struct phy_device *phydev);
+};
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_features);
@@ -80,37 +94,28 @@ static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-const int phy_10_100_features_array[4] = {
+static const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-const int phy_basic_t1_features_array[3] = {
+static const int phy_basic_t1_features_array[3] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-const int phy_basic_t1s_p2mp_features_array[2] = {
+static const int phy_basic_t1s_p2mp_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
};
-EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array);
-const int phy_gbit_features_array[2] = {
+static const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
-EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-
-const int phy_10gbit_features_array[1] = {
- ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-};
-EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
static const int phy_eee_cap1_features_array[] = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -185,9 +190,8 @@ static void features_init(void)
linkmode_set_bit_array(phy_gbit_features_array,
ARRAY_SIZE(phy_gbit_features_array),
phy_10gbit_features);
- linkmode_set_bit_array(phy_10gbit_features_array,
- ARRAY_SIZE(phy_10gbit_features_array),
- phy_10gbit_features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phy_10gbit_features);
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
@@ -378,8 +382,8 @@ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend,
* comparison
* @run: The actual code to be run when a matching PHY is found
*/
-int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *))
+static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *))
{
struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
@@ -397,7 +401,6 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
return 0;
}
-EXPORT_SYMBOL(phy_register_fixup);
/* Registers a fixup to be run on any PHY with the UID in phy_uid */
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
@@ -544,7 +547,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
struct phy_device *phydev = to_phy_device(dev);
const char *mode = NULL;
- if (phy_is_internal(phydev))
+ if (phydev->is_internal)
mode = "internal";
else
mode = phy_modes(phydev->interface);
@@ -1685,243 +1688,6 @@ bool phy_driver_is_genphy_10g(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
/**
- * phy_package_join - join a common PHY group
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This joins a PHY group and provides a shared storage for all phydevs in
- * this group. This is intended to be used for packages which contain
- * more than one PHY, for example a quad PHY transceiver.
- *
- * The base_addr parameter serves as cookie which has to have the same values
- * for all members of one group and as the base PHY address of the PHY package
- * for offset calculation to access generic registers of a PHY package.
- * Usually, one of the PHY addresses of the different PHYs in the package
- * provides access to these global registers.
- * The address which is given here, will be used in the phy_package_read()
- * and phy_package_write() convenience functions as base and added to the
- * passed offset in those functions.
- *
- * This will set the shared pointer of the phydev to the shared storage.
- * If this is the first call for a this cookie the shared storage will be
- * allocated. If priv_size is non-zero, the given amount of bytes are
- * allocated for the priv member.
- *
- * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error.
- */
-int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
-{
- struct mii_bus *bus = phydev->mdio.bus;
- struct phy_package_shared *shared;
- int ret;
-
- if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
- return -EINVAL;
-
- mutex_lock(&bus->shared_lock);
- shared = bus->shared[base_addr];
- if (!shared) {
- ret = -ENOMEM;
- shared = kzalloc(sizeof(*shared), GFP_KERNEL);
- if (!shared)
- goto err_unlock;
- if (priv_size) {
- shared->priv = kzalloc(priv_size, GFP_KERNEL);
- if (!shared->priv)
- goto err_free;
- shared->priv_size = priv_size;
- }
- shared->base_addr = base_addr;
- shared->np = NULL;
- refcount_set(&shared->refcnt, 1);
- bus->shared[base_addr] = shared;
- } else {
- ret = -EINVAL;
- if (priv_size && priv_size != shared->priv_size)
- goto err_unlock;
- refcount_inc(&shared->refcnt);
- }
- mutex_unlock(&bus->shared_lock);
-
- phydev->shared = shared;
-
- return 0;
-
-err_free:
- kfree(shared);
-err_unlock:
- mutex_unlock(&bus->shared_lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(phy_package_join);
-
-/**
- * of_phy_package_join - join a common PHY group in PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * This is a variant of phy_package_join for PHY package defined in DT.
- *
- * The parent node of the @phydev is checked as a valid PHY package node
- * structure (by matching the node name "ethernet-phy-package") and the
- * base_addr for the PHY package is passed to phy_package_join.
- *
- * With this configuration the shared struct will also have the np value
- * filled to use additional DT defined properties in PHY specific
- * probe_once and config_init_once PHY package OPs.
- *
- * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
- * with the same cookie but a different priv_size is an error. Or a parent
- * node is not detected or is not valid or doesn't match the expected node
- * name for PHY package.
- */
-int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct device_node *package_node;
- u32 base_addr;
- int ret;
-
- if (!node)
- return -EINVAL;
-
- package_node = of_get_parent(node);
- if (!package_node)
- return -EINVAL;
-
- if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (of_property_read_u32(package_node, "reg", &base_addr)) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = phy_package_join(phydev, base_addr, priv_size);
- if (ret)
- goto exit;
-
- phydev->shared->np = package_node;
-
- return 0;
-exit:
- of_node_put(package_node);
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_phy_package_join);
-
-/**
- * phy_package_leave - leave a common PHY group
- * @phydev: target phy_device struct
- *
- * This leaves a PHY group created by phy_package_join(). If this phydev
- * was the last user of the shared data between the group, this data is
- * freed. Resets the phydev->shared pointer to NULL.
- */
-void phy_package_leave(struct phy_device *phydev)
-{
- struct phy_package_shared *shared = phydev->shared;
- struct mii_bus *bus = phydev->mdio.bus;
-
- if (!shared)
- return;
-
- /* Decrease the node refcount on leave if present */
- if (shared->np)
- of_node_put(shared->np);
-
- if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
- bus->shared[shared->base_addr] = NULL;
- mutex_unlock(&bus->shared_lock);
- kfree(shared->priv);
- kfree(shared);
- }
-
- phydev->shared = NULL;
-}
-EXPORT_SYMBOL_GPL(phy_package_leave);
-
-static void devm_phy_package_leave(struct device *dev, void *res)
-{
- phy_package_leave(*(struct phy_device **)res);
-}
-
-/**
- * devm_phy_package_join - resource managed phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @base_addr: cookie and base PHY address of PHY package for offset
- * calculation of global register access
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * phy_package_join() for more information.
- */
-int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int base_addr, size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = phy_package_join(phydev, base_addr, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_phy_package_join);
-
-/**
- * devm_of_phy_package_join - resource managed of_phy_package_join()
- * @dev: device that is registering this PHY package
- * @phydev: target phy_device struct
- * @priv_size: if non-zero allocate this amount of bytes for private data
- *
- * Managed of_phy_package_join(). Shared storage fetched by this function,
- * phy_package_leave() is automatically called on driver detach. See
- * of_phy_package_join() for more information.
- */
-int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
- size_t priv_size)
-{
- struct phy_device **ptr;
- int ret;
-
- ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- ret = of_phy_package_join(phydev, priv_size);
-
- if (!ret) {
- *ptr = phydev;
- devres_add(dev, ptr);
- } else {
- devres_free(ptr);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
-
-/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -2052,41 +1818,6 @@ int phy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_resume);
-int phy_loopback(struct phy_device *phydev, bool enable)
-{
- int ret = 0;
-
- if (!phydev->drv)
- return -EIO;
-
- mutex_lock(&phydev->lock);
-
- if (enable && phydev->loopback_enabled) {
- ret = -EBUSY;
- goto out;
- }
-
- if (!enable && !phydev->loopback_enabled) {
- ret = -EINVAL;
- goto out;
- }
-
- if (phydev->drv->set_loopback)
- ret = phydev->drv->set_loopback(phydev, enable);
- else
- ret = genphy_loopback(phydev, enable);
-
- if (ret)
- goto out;
-
- phydev->loopback_enabled = enable;
-
-out:
- mutex_unlock(&phydev->lock);
- return ret;
-}
-EXPORT_SYMBOL(phy_loopback);
-
/**
* phy_reset_after_clk_enable - perform a PHY reset if needed
* @phydev: target phy_device struct
@@ -2354,7 +2085,7 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg);
int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert);
- const struct phy_setting *set;
+ const struct link_capabilities *c;
unsigned long *advert;
int err;
@@ -2380,10 +2111,11 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
} else {
linkmode_zero(fixed_advert);
- set = phy_lookup_setting(phydev->speed, phydev->duplex,
- phydev->supported, true);
- if (set)
- linkmode_set_bit(set->bit, fixed_advert);
+ c = phy_caps_lookup(phydev->speed, phydev->duplex,
+ phydev->supported, true);
+ if (c)
+ linkmode_and(fixed_advert, phydev->supported,
+ c->linkmodes);
advert = fixed_advert;
}
@@ -2843,12 +2575,18 @@ int genphy_resume(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_resume);
-int genphy_loopback(struct phy_device *phydev, bool enable)
+int genphy_loopback(struct phy_device *phydev, bool enable, int speed)
{
if (enable) {
u16 ctl = BMCR_LOOPBACK;
int ret, val;
+ if (speed == SPEED_10 || speed == SPEED_100 ||
+ speed == SPEED_1000)
+ phydev->speed = speed;
+ else if (speed)
+ return -EINVAL;
+
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
phy_modify(phydev, MII_BMCR, ~0, ctl);
@@ -2966,7 +2704,7 @@ void phy_disable_eee(struct phy_device *phydev)
phydev->eee_cfg.tx_lpi_enabled = false;
phydev->eee_cfg.eee_enabled = false;
/* don't let userspace re-enable EEE advertisement */
- linkmode_fill(phydev->eee_broken_modes);
+ linkmode_fill(phydev->eee_disabled_modes);
}
EXPORT_SYMBOL_GPL(phy_disable_eee);
@@ -3096,19 +2834,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
EXPORT_SYMBOL(phy_get_pause);
#if IS_ENABLED(CONFIG_OF_MDIO)
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
- s32 int_delay;
- int ret;
-
- ret = device_property_read_u32(dev, name, &int_delay);
- if (ret)
- return ret;
-
- return int_delay;
+ return device_property_read_u32(dev, name, val);
}
#else
-static int phy_get_int_delay_property(struct device *dev, const char *name)
+static int phy_get_u32_property(struct device *dev, const char *name, u32 *val)
{
return -EINVAL;
}
@@ -3133,12 +2864,12 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
const int *delay_values, int size, bool is_rx)
{
- s32 delay;
- int i;
+ int i, ret;
+ u32 delay;
if (is_rx) {
- delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
return 1;
@@ -3147,8 +2878,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
} else {
- delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
- if (delay < 0 && size == 0) {
+ ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay);
+ if (ret < 0 && size == 0) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
return 1;
@@ -3157,8 +2888,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
}
- if (delay < 0)
- return delay;
+ if (ret < 0)
+ return ret;
if (size == 0)
return delay;
@@ -3193,6 +2924,30 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
+/**
+ * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @linkmode: linkmode for which the tx amplitude gain should be retrieved
+ * @val: tx amplitude gain
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val)
+{
+ switch (linkmode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ return phy_get_u32_property(dev,
+ "tx-amplitude-100base-tx-percent",
+ val);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain);
+
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3563,22 +3318,21 @@ static int phy_probe(struct device *dev)
if (err)
goto out;
- /* There is no "enabled" flag. If PHY is advertising, assume it is
- * kind of enabled.
- */
- phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee);
+ /* Get the EEE modes we want to prohibit. */
+ of_set_phy_eee_broken(phydev);
/* Some PHYs may advertise, by default, not support EEE modes. So,
- * we need to clean them.
+ * we need to clean them. In addition remove all disabled EEE modes.
*/
- if (phydev->eee_cfg.eee_enabled)
- linkmode_and(phydev->advertising_eee, phydev->supported_eee,
- phydev->advertising_eee);
+ linkmode_and(phydev->advertising_eee, phydev->supported_eee,
+ phydev->advertising_eee);
+ linkmode_andnot(phydev->advertising_eee, phydev->advertising_eee,
+ phydev->eee_disabled_modes);
- /* Get the EEE modes we want to prohibit. We will ask
- * the PHY stop advertising these mode later on
+ /* There is no "enabled" flag. If PHY is advertising, assume it is
+ * kind of enabled.
*/
- of_set_phy_eee_broken(phydev);
+ phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee);
/* Get master/slave strap overrides */
of_set_phy_timing_role(phydev);
@@ -3777,6 +3531,10 @@ static int __init phy_init(void)
if (rc)
goto err_ethtool_phy_ops;
+ rc = phy_caps_init();
+ if (rc)
+ goto err_mdio_bus;
+
features_init();
rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index f550576eb9da..bd3c9554f6ac 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -5,6 +5,8 @@
#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
+#include "phylib-internal.h"
+
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
unsigned int speed)
{
diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c
index 4a5d73002a1a..0e9e987f37dd 100644
--- a/drivers/net/phy/phy_link_topology.c
+++ b/drivers/net/phy/phy_link_topology.c
@@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev,
xa_limit_32b, &topo->next_phy_index,
GFP_KERNEL);
- if (ret)
+ if (ret < 0)
goto err;
return 0;
diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c
new file mode 100644
index 000000000000..c738f76e8664
--- /dev/null
+++ b/drivers/net/phy/phy_package.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PHY package support
+ */
+
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include "phylib.h"
+#include "phylib-internal.h"
+
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @base_addr: Base PHY address of PHY package used to combine PHYs
+ * in one package and for offset calculation of phy_package_read/write
+ * @np: Pointer to the Device Node if PHY package defined in DT
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
+ * package, for example a quad PHY. See phy_package_join() and
+ * phy_package_leave().
+ */
+struct phy_package_shared {
+ u8 base_addr;
+ /* With PHY package defined in DT this points to the PHY package node */
+ struct device_node *np;
+ refcount_t refcnt;
+ unsigned long flags;
+ size_t priv_size;
+
+ /* private data pointer */
+ /* note that this pointer is shared between different phydevs and
+ * the user has to take care of appropriate locking. It is allocated
+ * and freed automatically by phy_package_join() and
+ * phy_package_leave().
+ */
+ void *priv;
+};
+
+struct device_node *phy_package_get_node(struct phy_device *phydev)
+{
+ return phydev->shared->np;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_node);
+
+void *phy_package_get_priv(struct phy_device *phydev)
+{
+ return phydev->shared->priv;
+}
+EXPORT_SYMBOL_GPL(phy_package_get_priv);
+
+int phy_package_address(struct phy_device *phydev, unsigned int addr_offset)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ u8 base_addr = shared->base_addr;
+
+ if (addr_offset >= PHY_MAX_ADDR - base_addr)
+ return -EIO;
+
+ /* we know that addr will be in the range 0..31 and thus the
+ * implicit cast to a signed int is not a problem.
+ */
+ return base_addr + addr_offset;
+}
+
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_read(phydev->mdio.bus, addr, regnum);
+}
+EXPORT_SYMBOL_GPL(__phy_package_read);
+
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val)
+{
+ int addr = phy_package_address(phydev, addr_offset);
+
+ if (addr < 0)
+ return addr;
+
+ return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+}
+EXPORT_SYMBOL_GPL(__phy_package_write);
+
+static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b)
+{
+ struct phy_package_shared *shared = phydev->shared;
+
+ if (!shared)
+ return false;
+
+ return !test_and_set_bit(b, &shared->flags);
+}
+
+bool phy_package_init_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 0);
+}
+EXPORT_SYMBOL_GPL(phy_package_init_once);
+
+bool phy_package_probe_once(struct phy_device *phydev)
+{
+ return __phy_package_set_once(phydev, 1);
+}
+EXPORT_SYMBOL_GPL(phy_package_probe_once);
+
+/**
+ * phy_package_join - join a common PHY group
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This joins a PHY group and provides a shared storage for all phydevs in
+ * this group. This is intended to be used for packages which contain
+ * more than one PHY, for example a quad PHY transceiver.
+ *
+ * The base_addr parameter serves as cookie which has to have the same values
+ * for all members of one group and as the base PHY address of the PHY package
+ * for offset calculation to access generic registers of a PHY package.
+ * Usually, one of the PHY addresses of the different PHYs in the package
+ * provides access to these global registers.
+ * The address which is given here, will be used in the phy_package_read()
+ * and phy_package_write() convenience functions as base and added to the
+ * passed offset in those functions.
+ *
+ * This will set the shared pointer of the phydev to the shared storage.
+ * If this is the first call for a this cookie the shared storage will be
+ * allocated. If priv_size is non-zero, the given amount of bytes are
+ * allocated for the priv member.
+ *
+ * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error.
+ */
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ struct phy_package_shared *shared;
+ int ret;
+
+ if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
+ return -EINVAL;
+
+ mutex_lock(&bus->shared_lock);
+ shared = bus->shared[base_addr];
+ if (!shared) {
+ ret = -ENOMEM;
+ shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+ if (!shared)
+ goto err_unlock;
+ if (priv_size) {
+ shared->priv = kzalloc(priv_size, GFP_KERNEL);
+ if (!shared->priv)
+ goto err_free;
+ shared->priv_size = priv_size;
+ }
+ shared->base_addr = base_addr;
+ shared->np = NULL;
+ refcount_set(&shared->refcnt, 1);
+ bus->shared[base_addr] = shared;
+ } else {
+ ret = -EINVAL;
+ if (priv_size && priv_size != shared->priv_size)
+ goto err_unlock;
+ refcount_inc(&shared->refcnt);
+ }
+ mutex_unlock(&bus->shared_lock);
+
+ phydev->shared = shared;
+
+ return 0;
+
+err_free:
+ kfree(shared);
+err_unlock:
+ mutex_unlock(&bus->shared_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_package_join);
+
+/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
+ * phy_package_leave - leave a common PHY group
+ * @phydev: target phy_device struct
+ *
+ * This leaves a PHY group created by phy_package_join(). If this phydev
+ * was the last user of the shared data between the group, this data is
+ * freed. Resets the phydev->shared pointer to NULL.
+ */
+void phy_package_leave(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct mii_bus *bus = phydev->mdio.bus;
+
+ if (!shared)
+ return;
+
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
+ if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ bus->shared[shared->base_addr] = NULL;
+ mutex_unlock(&bus->shared_lock);
+ kfree(shared->priv);
+ kfree(shared);
+ }
+
+ phydev->shared = NULL;
+}
+EXPORT_SYMBOL_GPL(phy_package_leave);
+
+static void devm_phy_package_leave(struct device *dev, void *res)
+{
+ phy_package_leave(*(struct phy_device **)res);
+}
+
+/**
+ * devm_phy_package_join - resource managed phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @base_addr: cookie and base PHY address of PHY package for offset
+ * calculation of global register access
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * phy_package_join() for more information.
+ */
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_phy_package_join);
+
+/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h
new file mode 100644
index 000000000000..afac2bd15b50
--- /dev/null
+++ b/drivers/net/phy/phylib-internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib-internal header
+ */
+
+#ifndef __PHYLIB_INTERNAL_H
+#define __PHYLIB_INTERNAL_H
+
+struct phy_device;
+
+/*
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
+ */
+unsigned int phy_supported_speeds(struct phy_device *phy,
+ unsigned int *speeds,
+ unsigned int size);
+void of_set_phy_supported(struct phy_device *phydev);
+void of_set_phy_eee_broken(struct phy_device *phydev);
+void of_set_phy_timing_role(struct phy_device *phydev);
+int phy_speed_down_core(struct phy_device *phydev);
+void phy_check_downshift(struct phy_device *phydev);
+
+int phy_package_address(struct phy_device *phydev, unsigned int addr_offset);
+
+int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv);
+
+#endif /* __PHYLIB_INTERNAL_H */
diff --git a/drivers/net/phy/phylib.h b/drivers/net/phy/phylib.h
new file mode 100644
index 000000000000..c15484a805b3
--- /dev/null
+++ b/drivers/net/phy/phylib.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * phylib header
+ */
+
+#ifndef __PHYLIB_H
+#define __PHYLIB_H
+
+struct device_node;
+struct phy_device;
+
+struct device_node *phy_package_get_node(struct phy_device *phydev);
+void *phy_package_get_priv(struct phy_device *phydev);
+int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum);
+int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset,
+ u32 regnum, u16 val);
+int __phy_package_read_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum);
+int __phy_package_write_mmd(struct phy_device *phydev,
+ unsigned int addr_offset, int devad,
+ u32 regnum, u16 val);
+bool phy_package_init_once(struct phy_device *phydev);
+bool phy_package_probe_once(struct phy_device *phydev);
+int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
+void phy_package_leave(struct phy_device *phydev);
+int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ int base_addr, size_t priv_size);
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size);
+
+#endif /* __PHYLIB_H */
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index b00a315de060..69ca765485db 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -20,6 +20,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include "phy-caps.h"
#include "sfp.h"
#include "swphy.h"
@@ -71,8 +72,6 @@ struct phylink {
struct gpio_desc *link_gpio;
unsigned int link_irq;
struct timer_list link_poll;
- void (*get_fixed_state)(struct net_device *dev,
- struct phylink_link_state *s);
struct mutex state_mutex;
struct phylink_link_state phy_state;
@@ -82,12 +81,14 @@ struct phylink {
unsigned int pcs_state;
bool link_failed;
+ bool major_config_failed;
bool mac_supports_eee_ops;
bool mac_supports_eee;
bool phy_enable_tx_lpi;
bool mac_enable_tx_lpi;
bool mac_tx_clk_stop;
u32 mac_tx_lpi_timer;
+ u8 mac_rx_clk_stop_blocked;
struct sfp_bus *sfp_bus;
bool sfp_may_have_phy;
@@ -291,6 +292,61 @@ static int phylink_interface_max_speed(phy_interface_t interface)
return SPEED_UNKNOWN;
}
+static struct {
+ unsigned long mask;
+ int speed;
+ unsigned int duplex;
+ unsigned int caps_bit;
+} phylink_caps_params[] = {
+ { MAC_400000FD, SPEED_400000, DUPLEX_FULL, BIT(LINK_CAPA_400000FD) },
+ { MAC_200000FD, SPEED_200000, DUPLEX_FULL, BIT(LINK_CAPA_200000FD) },
+ { MAC_100000FD, SPEED_100000, DUPLEX_FULL, BIT(LINK_CAPA_100000FD) },
+ { MAC_56000FD, SPEED_56000, DUPLEX_FULL, BIT(LINK_CAPA_56000FD) },
+ { MAC_50000FD, SPEED_50000, DUPLEX_FULL, BIT(LINK_CAPA_50000FD) },
+ { MAC_40000FD, SPEED_40000, DUPLEX_FULL, BIT(LINK_CAPA_40000FD) },
+ { MAC_25000FD, SPEED_25000, DUPLEX_FULL, BIT(LINK_CAPA_25000FD) },
+ { MAC_20000FD, SPEED_20000, DUPLEX_FULL, BIT(LINK_CAPA_20000FD) },
+ { MAC_10000FD, SPEED_10000, DUPLEX_FULL, BIT(LINK_CAPA_10000FD) },
+ { MAC_5000FD, SPEED_5000, DUPLEX_FULL, BIT(LINK_CAPA_5000FD) },
+ { MAC_2500FD, SPEED_2500, DUPLEX_FULL, BIT(LINK_CAPA_2500FD) },
+ { MAC_1000FD, SPEED_1000, DUPLEX_FULL, BIT(LINK_CAPA_1000FD) },
+ { MAC_1000HD, SPEED_1000, DUPLEX_HALF, BIT(LINK_CAPA_1000HD) },
+ { MAC_100FD, SPEED_100, DUPLEX_FULL, BIT(LINK_CAPA_100FD) },
+ { MAC_100HD, SPEED_100, DUPLEX_HALF, BIT(LINK_CAPA_100HD) },
+ { MAC_10FD, SPEED_10, DUPLEX_FULL, BIT(LINK_CAPA_10FD) },
+ { MAC_10HD, SPEED_10, DUPLEX_HALF, BIT(LINK_CAPA_10HD) },
+};
+
+/**
+ * phylink_caps_to_link_caps() - Convert a set of MAC capabilities LINK caps
+ * @caps: A set of MAC capabilities
+ *
+ * Returns: The corresponding set of LINK_CAPA as defined in phy-caps.h
+ */
+static unsigned long phylink_caps_to_link_caps(unsigned long caps)
+{
+ unsigned long link_caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (caps & phylink_caps_params[i].mask)
+ link_caps |= phylink_caps_params[i].caps_bit;
+
+ return link_caps;
+}
+
+static unsigned long phylink_link_caps_to_mac_caps(unsigned long link_caps)
+{
+ unsigned long caps = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++)
+ if (link_caps & phylink_caps_params[i].caps_bit)
+ caps |= phylink_caps_params[i].mask;
+
+ return caps;
+}
+
/**
* phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes
* @linkmodes: ethtool linkmode mask (must be already initialised)
@@ -302,172 +358,17 @@ static int phylink_interface_max_speed(phy_interface_t interface)
static void phylink_caps_to_linkmodes(unsigned long *linkmodes,
unsigned long caps)
{
+ unsigned long link_caps = phylink_caps_to_link_caps(caps);
+
if (caps & MAC_SYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes);
if (caps & MAC_ASYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
- if (caps & MAC_10HD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_10FD) {
- __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100HD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes);
- }
-
- if (caps & MAC_100FD) {
- __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_1000HD)
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes);
-
- if (caps & MAC_1000FD) {
- __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_2500FD) {
- __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_5000FD)
- __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes);
-
- if (caps & MAC_10000FD) {
- __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_25000FD) {
- __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_40000FD) {
- __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_50000FD) {
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_56000FD) {
- __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_100000FD) {
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_200000FD) {
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes);
- }
-
- if (caps & MAC_400000FD) {
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
- linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes);
- __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes);
- }
+ phy_caps_linkmodes(link_caps, linkmodes);
}
-static struct {
- unsigned long mask;
- int speed;
- unsigned int duplex;
-} phylink_caps_params[] = {
- { MAC_400000FD, SPEED_400000, DUPLEX_FULL },
- { MAC_200000FD, SPEED_200000, DUPLEX_FULL },
- { MAC_100000FD, SPEED_100000, DUPLEX_FULL },
- { MAC_56000FD, SPEED_56000, DUPLEX_FULL },
- { MAC_50000FD, SPEED_50000, DUPLEX_FULL },
- { MAC_40000FD, SPEED_40000, DUPLEX_FULL },
- { MAC_25000FD, SPEED_25000, DUPLEX_FULL },
- { MAC_20000FD, SPEED_20000, DUPLEX_FULL },
- { MAC_10000FD, SPEED_10000, DUPLEX_FULL },
- { MAC_5000FD, SPEED_5000, DUPLEX_FULL },
- { MAC_2500FD, SPEED_2500, DUPLEX_FULL },
- { MAC_1000FD, SPEED_1000, DUPLEX_FULL },
- { MAC_1000HD, SPEED_1000, DUPLEX_HALF },
- { MAC_100FD, SPEED_100, DUPLEX_FULL },
- { MAC_100HD, SPEED_100, DUPLEX_HALF },
- { MAC_10FD, SPEED_10, DUPLEX_FULL },
- { MAC_10HD, SPEED_10, DUPLEX_HALF },
-};
-
/**
* phylink_limit_mac_speed - limit the phylink_config to a maximum speed
* @config: pointer to a &struct phylink_config
@@ -523,86 +424,12 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
unsigned long mac_capabilities,
int rate_matching)
{
+ unsigned long link_caps = phy_caps_from_interface(interface);
int max_speed = phylink_interface_max_speed(interface);
unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
unsigned long matched_caps = 0;
- switch (interface) {
- case PHY_INTERFACE_MODE_USXGMII:
- caps |= MAC_10000FD | MAC_5000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_10G_QXGMII:
- caps |= MAC_2500FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_PSGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- caps |= MAC_1000HD | MAC_1000FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_REVRMII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_SMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_MII:
- caps |= MAC_10HD | MAC_10FD;
- fallthrough;
-
- case PHY_INTERFACE_MODE_100BASEX:
- caps |= MAC_100HD | MAC_100FD;
- break;
-
- case PHY_INTERFACE_MODE_TBI:
- case PHY_INTERFACE_MODE_MOCA:
- case PHY_INTERFACE_MODE_RTBI:
- case PHY_INTERFACE_MODE_1000BASEX:
- caps |= MAC_1000HD;
- fallthrough;
- case PHY_INTERFACE_MODE_1000BASEKX:
- case PHY_INTERFACE_MODE_TRGMII:
- caps |= MAC_1000FD;
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- caps |= MAC_2500FD;
- break;
-
- case PHY_INTERFACE_MODE_5GBASER:
- caps |= MAC_5000FD;
- break;
-
- case PHY_INTERFACE_MODE_XGMII:
- case PHY_INTERFACE_MODE_RXAUI:
- case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_10GKR:
- caps |= MAC_10000FD;
- break;
-
- case PHY_INTERFACE_MODE_25GBASER:
- caps |= MAC_25000FD;
- break;
-
- case PHY_INTERFACE_MODE_XLGMII:
- caps |= MAC_40000FD;
- break;
-
- case PHY_INTERFACE_MODE_INTERNAL:
- caps |= ~0;
- break;
-
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_MAX:
- break;
- }
+ caps |= phylink_link_caps_to_mac_caps(link_caps);
switch (rate_matching) {
case RATE_MATCH_OPEN_LOOP:
@@ -801,12 +628,26 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
return phylink_validate_mac_and_pcs(pl, supported, state);
}
+static void phylink_fill_fixedlink_supported(unsigned long *supported)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported);
+}
+
static int phylink_parse_fixedlink(struct phylink *pl,
const struct fwnode_handle *fwnode)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(match) = { 0, };
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ const struct link_capabilities *c;
struct fwnode_handle *fixed_node;
- const struct phy_setting *s;
struct gpio_desc *desc;
u32 speed;
int ret;
@@ -874,12 +715,16 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n",
pl->link_config.speed);
- linkmode_fill(pl->supported);
+ linkmode_zero(pl->supported);
+ phylink_fill_fixedlink_supported(pl->supported);
+
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
- s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported, true);
+ c = phy_caps_lookup(pl->link_config.speed, pl->link_config.duplex,
+ pl->supported, true);
+ if (c)
+ linkmode_and(match, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mask);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, mask);
@@ -888,9 +733,10 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_set(pl->supported, MII);
- if (s) {
- __set_bit(s->bit, pl->supported);
- __set_bit(s->bit, pl->link_config.lp_advertising);
+ if (c) {
+ linkmode_or(pl->supported, pl->supported, match);
+ linkmode_or(pl->link_config.lp_advertising,
+ pl->link_config.lp_advertising, match);
} else {
phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
@@ -1073,6 +919,18 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
}
+static void phylink_pcs_disable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_disable_eee)
+ pcs->ops->pcs_disable_eee(pcs);
+}
+
+static void phylink_pcs_enable_eee(struct phylink_pcs *pcs)
+{
+ if (pcs && pcs->ops->pcs_enable_eee)
+ pcs->ops->pcs_enable_eee(pcs);
+}
+
/* Query inband for a specific interface mode, asking the MAC for the
* PCS which will be used to handle the interface mode.
*/
@@ -1353,19 +1211,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
struct phylink_pcs *pcs = NULL;
bool pcs_changed = false;
unsigned int rate_kbd;
- unsigned int neg_mode;
int err;
phylink_dbg(pl, "major config, requested %s/%s\n",
phylink_an_mode_str(pl->req_link_an_mode),
phy_modes(state->interface));
+ pl->major_config_failed = false;
+
if (pl->mac_ops->mac_select_pcs) {
pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
if (IS_ERR(pcs)) {
phylink_err(pl,
"mac_select_pcs unexpectedly failed: %pe\n",
pcs);
+
+ pl->major_config_failed = true;
return;
}
@@ -1387,6 +1248,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (err < 0) {
phylink_err(pl, "mac_prepare failed: %pe\n",
ERR_PTR(err));
+ pl->major_config_failed = true;
return;
}
}
@@ -1410,23 +1272,27 @@ static void phylink_major_config(struct phylink *pl, bool restart,
phylink_mac_config(pl, state);
- if (pl->pcs)
- phylink_pcs_post_config(pl->pcs, state->interface);
+ if (pl->pcs) {
+ err = phylink_pcs_post_config(pl->pcs, state->interface);
+ if (err < 0) {
+ phylink_err(pl, "pcs_post_config failed: %pe\n",
+ ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
+ }
if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
phylink_pcs_enable(pl->pcs);
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- err = phylink_pcs_config(pl->pcs, neg_mode, state,
+ err = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, state,
!!(pl->link_config.pause & MLO_PAUSE_AN));
- if (err < 0)
- phylink_err(pl, "pcs_config failed: %pe\n",
- ERR_PTR(err));
- else if (err > 0)
+ if (err < 0) {
+ phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err));
+ pl->major_config_failed = true;
+ } else if (err > 0) {
restart = true;
+ }
if (restart)
phylink_pcs_an_restart(pl);
@@ -1434,16 +1300,22 @@ static void phylink_major_config(struct phylink *pl, bool restart,
if (pl->mac_ops->mac_finish) {
err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
state->interface);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "mac_finish failed: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->phydev && pl->phy_ib_mode) {
err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
- if (err < 0)
+ if (err < 0) {
phylink_err(pl, "phy_config_inband: %pe\n",
ERR_PTR(err));
+
+ pl->major_config_failed = true;
+ }
}
if (pl->sfp_bus) {
@@ -1463,7 +1335,6 @@ static void phylink_major_config(struct phylink *pl, bool restart,
*/
static int phylink_change_inband_advert(struct phylink *pl)
{
- unsigned int neg_mode;
int ret;
if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
@@ -1479,15 +1350,11 @@ static int phylink_change_inband_advert(struct phylink *pl)
phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
pl->link_config.advertising);
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
/* Modern PCS-based method; update the advert at the PCS, and
* restart negotiation if the pcs_config() helper indicates that
* the programmed advertisement has changed.
*/
- ret = phylink_pcs_config(pl->pcs, neg_mode, &pl->link_config,
+ ret = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, &pl->link_config,
!!(pl->link_config.pause & MLO_PAUSE_AN));
if (ret < 0)
return ret;
@@ -1511,13 +1378,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->an_complete = 0;
state->link = 1;
- pcs = pl->pcs;
- if (!pcs || pcs->neg_mode)
- autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
- else
- autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- state->advertising);
-
+ autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED;
if (autoneg) {
state->speed = SPEED_UNKNOWN;
state->duplex = DUPLEX_UNKNOWN;
@@ -1528,6 +1389,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
state->pause = pl->link_config.pause;
}
+ pcs = pl->pcs;
if (pcs)
pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state);
else
@@ -1601,6 +1463,8 @@ static void phylink_deactivate_lpi(struct phylink *pl)
phylink_dbg(pl, "disabling LPI\n");
pl->mac_ops->mac_disable_tx_lpi(pl->config);
+
+ phylink_pcs_disable_eee(pl->pcs);
}
}
@@ -1617,20 +1481,24 @@ static void phylink_activate_lpi(struct phylink *pl)
phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n",
pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop);
+ phylink_pcs_enable_eee(pl->pcs);
+
err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer,
pl->mac_tx_clk_stop);
- if (!err)
- pl->mac_enable_tx_lpi = true;
- else
+ if (err) {
+ phylink_pcs_disable_eee(pl->pcs);
phylink_err(pl, "%ps() failed: %pe\n",
pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err));
+ return;
+ }
+
+ pl->mac_enable_tx_lpi = true;
}
static void phylink_link_up(struct phylink *pl,
struct phylink_link_state link_state)
{
struct net_device *ndev = pl->netdev;
- unsigned int neg_mode;
int speed, duplex;
bool rx_pause;
@@ -1661,11 +1529,7 @@ static void phylink_link_up(struct phylink *pl,
pl->cur_interface = link_state.interface;
- neg_mode = pl->act_link_an_mode;
- if (pl->pcs && pl->pcs->neg_mode)
- neg_mode = pl->pcs_neg_mode;
-
- phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
+ phylink_pcs_link_up(pl->pcs, pl->pcs_neg_mode, pl->cur_interface, speed,
duplex);
pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
@@ -1795,6 +1659,12 @@ static void phylink_resolve(struct work_struct *w)
}
}
+ /* If configuration of the interface failed, force the link down
+ * until we get a successful configuration.
+ */
+ if (pl->major_config_failed)
+ link_state.link = false;
+
if (link_state.link != cur_link_state) {
pl->old_link_state = link_state.link;
if (!link_state.link)
@@ -1879,21 +1749,20 @@ static int phylink_register_sfp(struct phylink *pl,
int phylink_set_fixed_link(struct phylink *pl,
const struct phylink_link_state *state)
{
- const struct phy_setting *s;
+ const struct link_capabilities *c;
unsigned long *adv;
if (pl->cfg_link_an_mode != MLO_AN_PHY || !state ||
!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
return -EINVAL;
- s = phy_lookup_setting(state->speed, state->duplex,
- pl->supported, true);
- if (!s)
+ c = phy_caps_lookup(state->speed, state->duplex,
+ pl->supported, true);
+ if (!c)
return -EINVAL;
adv = pl->link_config.advertising;
- linkmode_zero(adv);
- linkmode_set_bit(s->bit, adv);
+ linkmode_and(adv, pl->supported, c->linkmodes);
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv);
pl->link_config.speed = state->speed;
@@ -1957,8 +1826,7 @@ struct phylink *phylink_create(struct phylink_config *config,
return ERR_PTR(-EINVAL);
}
- pl->mac_supports_eee_ops = mac_ops->mac_disable_tx_lpi &&
- mac_ops->mac_enable_tx_lpi;
+ pl->mac_supports_eee_ops = phylink_mac_implements_lpi(mac_ops);
pl->mac_supports_eee = pl->mac_supports_eee_ops &&
pl->config->lpi_capabilities &&
!phy_interface_empty(pl->config->lpi_interfaces);
@@ -2046,7 +1914,7 @@ bool phylink_expects_phy(struct phylink *pl)
{
if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_config.interface)))
+ phy_interface_mode_is_8023z(pl->link_interface)))
return false;
return true;
}
@@ -2595,6 +2463,64 @@ void phylink_stop(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_stop);
/**
+ * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disable the PHY's ability to stop the receive clock while the receive path
+ * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block()
+ * are balanced by calls to phylink_rx_clk_stop_unblock().
+ */
+void phylink_rx_clk_stop_block(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == U8_MAX) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Disable PHY receive clock stop if this is the first time this
+ * function has been called and clock-stop was previously enabled.
+ */
+ if (pl->mac_rx_clk_stop_blocked++ == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, false);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block);
+
+/**
+ * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * All calls to phylink_rx_clk_stop_block() must be balanced with a
+ * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs
+ * ability to stop the receive clock when the receive path is in EEE LPI mode.
+ */
+void phylink_rx_clk_stop_unblock(struct phylink *pl)
+{
+ ASSERT_RTNL();
+
+ if (pl->mac_rx_clk_stop_blocked == 0) {
+ phylink_warn(pl, "%s called too many times - ignoring\n",
+ __func__);
+ dump_stack();
+ return;
+ }
+
+ /* Re-enable PHY receive clock stop if the number of unblocks matches
+ * the number of calls to the block function above.
+ */
+ if (--pl->mac_rx_clk_stop_blocked == 0 &&
+ pl->mac_supports_eee_ops && pl->phydev &&
+ pl->config->eee_rx_clk_stop_enable)
+ phy_eee_rx_clock_stop(pl->phydev, true);
+}
+EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock);
+
+/**
* phylink_suspend() - handle a network device suspend event
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
@@ -2639,6 +2565,31 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
EXPORT_SYMBOL_GPL(phylink_suspend);
/**
+ * phylink_prepare_resume() - prepare to resume a network device
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Optional, but if called must be called prior to phylink_resume().
+ *
+ * Prepare to resume a network device, preparing the PHY as necessary.
+ */
+void phylink_prepare_resume(struct phylink *pl)
+{
+ struct phy_device *phydev = pl->phydev;
+
+ ASSERT_RTNL();
+
+ /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock
+ * when PDOWN is set. However, some MACs require RXC to be running
+ * in order to resume. If the MAC requires RXC, and we have a PHY,
+ * then resume the PHY. Note that 802.3 allows PHYs 500ms before
+ * the clock meets requirements. We do not implement this delay.
+ */
+ if (pl->config->mac_requires_rxc && phydev && phydev->suspended)
+ phy_resume(phydev);
+}
+EXPORT_SYMBOL_GPL(phylink_prepare_resume);
+
+/**
* phylink_resume() - handle a network device resume event
* @pl: a pointer to a &struct phylink returned from phylink_create()
*
@@ -2854,8 +2805,8 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
+ const struct link_capabilities *c;
struct phylink_link_state config;
- const struct phy_setting *s;
ASSERT_RTNL();
@@ -2898,23 +2849,23 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
/* Autonegotiation disabled, select a suitable speed and
* duplex.
*/
- s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported, false);
- if (!s)
+ c = phy_caps_lookup(kset->base.speed, kset->base.duplex,
+ pl->supported, false);
+ if (!c)
return -EINVAL;
/* If we have a fixed link, refuse to change link parameters.
* If the link parameters match, accept them but do nothing.
*/
if (pl->req_link_an_mode == MLO_AN_FIXED) {
- if (s->speed != pl->link_config.speed ||
- s->duplex != pl->link_config.duplex)
+ if (c->speed != pl->link_config.speed ||
+ c->duplex != pl->link_config.duplex)
return -EINVAL;
return 0;
}
- config.speed = s->speed;
- config.duplex = s->duplex;
+ config.speed = c->speed;
+ config.duplex = c->duplex;
break;
case AUTONEG_ENABLE:
@@ -3160,24 +3111,6 @@ int phylink_get_eee_err(struct phylink *pl)
EXPORT_SYMBOL_GPL(phylink_get_eee_err);
/**
- * phylink_init_eee() - init and check the EEE features
- * @pl: a pointer to a &struct phylink returned from phylink_create()
- * @clk_stop_enable: allow PHY to stop receive clock
- *
- * Must be called either with RTNL held or within mac_link_up()
- */
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
- int ret = -EOPNOTSUPP;
-
- if (pl->phydev)
- ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
-/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
* @eee: a pointer to a &struct ethtool_keee for the read parameters
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 2ad8c2586d64..1af6b5ead74b 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -15,6 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/sfp.h>
+#include "../phylib.h"
#include "qcom.h"
#define QCA807X_CHIP_CONFIGURATION 0x1f
@@ -486,13 +487,13 @@ static int qca807x_read_status(struct phy_device *phydev)
static int qca807x_phy_package_probe_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
+ struct device_node *np = phy_package_get_node(phydev);
unsigned int tx_drive_strength;
const char *package_mode_name;
/* Default to 600mw if not defined */
- if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ if (of_property_read_u32(np, "qcom,tx-drive-strength-milliwatt",
&tx_drive_strength))
tx_drive_strength = 600;
@@ -541,7 +542,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
}
priv->package_mode = PHY_INTERFACE_MODE_NA;
- if (!of_property_read_string(shared->np, "qcom,package-mode",
+ if (!of_property_read_string(np, "qcom,package-mode",
&package_mode_name)) {
if (!strcasecmp(package_mode_name,
phy_modes(PHY_INTERFACE_MODE_PSGMII)))
@@ -558,8 +559,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev)
static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
{
- struct phy_package_shared *shared = phydev->shared;
- struct qca807x_shared_priv *priv = shared->priv;
+ struct qca807x_shared_priv *priv = phy_package_get_priv(phydev);
int val, ret;
/* Make sure PHY follow PHY package mode if enforced */
@@ -708,7 +708,6 @@ static int qca807x_probe(struct phy_device *phydev)
struct device_node *node = phydev->mdio.dev.of_node;
struct qca807x_shared_priv *shared_priv;
struct device *dev = &phydev->mdio.dev;
- struct phy_package_shared *shared;
struct qca807x_priv *priv;
int ret;
@@ -722,8 +721,7 @@ static int qca807x_probe(struct phy_device *phydev)
return ret;
}
- shared = phydev->shared;
- shared_priv = shared->priv;
+ shared_priv = phy_package_get_priv(phydev);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs
index 1ab065798175..7e754d5d7154 100644
--- a/drivers/net/phy/qt2025.rs
+++ b/drivers/net/phy/qt2025.rs
@@ -41,7 +41,7 @@ impl Driver for PhyQT2025 {
fn probe(dev: &mut phy::Device) -> Result<()> {
// Check the hardware revision code.
- // Only 0x3b works with this driver and firmware.
+ // Only 0xb3 works with this driver and firmware.
let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?;
if (hw_rev >> 8) != 0xb3 {
return Err(code::ENODEV);
diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c
index 1ecb410bb941..ac96e2d1ebe8 100644
--- a/drivers/net/phy/realtek/realtek_hwmon.c
+++ b/drivers/net/phy/realtek/realtek_hwmon.c
@@ -63,16 +63,11 @@ static const struct hwmon_chip_info rtl822x_hwmon_chip_info = {
int rtl822x_hwmon_init(struct phy_device *phydev)
{
struct device *hwdev, *dev = &phydev->mdio.dev;
- const char *name;
/* Ensure over-temp alarm is reset. */
phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3);
- name = devm_hwmon_sanitize_name(dev, dev_name(dev));
- if (IS_ERR(name))
- return PTR_ERR(name);
-
- hwdev = devm_hwmon_device_register_with_info(dev, name, phydev,
+ hwdev = devm_hwmon_device_register_with_info(dev, NULL, phydev,
&rtl822x_hwmon_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwdev);
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index 210fefac44d4..893c82479671 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -33,6 +33,9 @@
#define RTL8211F_PHYCR1 0x18
#define RTL8211F_PHYCR2 0x19
+#define RTL8211F_CLKOUT_EN BIT(0)
+#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5)
+
#define RTL8211F_INSR 0x1d
#define RTL8211F_LEDCR 0x10
@@ -55,8 +58,6 @@
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
-#define RTL8211F_CLKOUT_EN BIT(0)
-
#define RTL8201F_ISR 0x1e
#define RTL8201F_ISR_ANERR BIT(15)
#define RTL8201F_ISR_DUPLEX BIT(13)
@@ -79,9 +80,7 @@
/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
* is set, they cannot be accessed by C45-over-C22.
*/
-#define RTL822X_VND2_GBCR 0xa412
-
-#define RTL822X_VND2_GANLPAR 0xa414
+#define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg))
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -96,6 +95,16 @@
#define RTL_VND2_PHYSR_MASTER BIT(11)
#define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH)
+#define RTL_MDIO_PCS_EEE_ABLE 0xa5c4
+#define RTL_MDIO_AN_EEE_ADV 0xa5d0
+#define RTL_MDIO_AN_EEE_LPABLE 0xa5d2
+#define RTL_MDIO_AN_10GBT_CTRL 0xa5d4
+#define RTL_MDIO_AN_10GBT_STAT 0xa5d6
+#define RTL_MDIO_PMA_SPEED 0xa616
+#define RTL_MDIO_AN_EEE_LPABLE2 0xa6d0
+#define RTL_MDIO_AN_EEE_ADV2 0xa6d4
+#define RTL_MDIO_PCS_EEE_ABLE2 0xa6ec
+
#define RTL_GENERIC_PHYID 0x001cc800
#define RTL_8211FVD_PHYID 0x001cc878
#define RTL_8221B 0x001cc840
@@ -445,6 +454,12 @@ static int rtl8211f_config_init(struct phy_device *phydev)
str_enabled_disabled(val_rxdly));
}
+ /* Disable PHY-mode EEE so LPI is passed to the MAC */
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0);
+ if (ret)
+ return ret;
+
if (priv->has_phycr2) {
ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
RTL8211F_CLKOUT_EN, priv->phycr2);
@@ -735,29 +750,31 @@ static int rtlgen_read_status(struct phy_device *phydev)
return 0;
}
+static int rtlgen_read_vend2(struct phy_device *phydev, int regnum)
+{
+ return __mdiobus_c45_read(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum);
+}
+
+static int rtlgen_write_vend2(struct phy_device *phydev, int regnum, u16 val)
+{
+ return __mdiobus_c45_write(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum,
+ val);
+}
+
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret;
- if (devnum == MDIO_MMD_VEND2) {
- rtl821x_write_page(phydev, regnum >> 4);
- ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1));
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
- rtl821x_write_page(phydev, 0xa5c);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_read(phydev, 0x11);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_read_vend2(phydev, regnum);
+ else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -767,17 +784,12 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
{
int ret;
- if (devnum == MDIO_MMD_VEND2) {
- rtl821x_write_page(phydev, regnum >> 4);
- ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
- rtl821x_write_page(phydev, 0xa5d);
- ret = __phy_write(phydev, 0x10, val);
- rtl821x_write_page(phydev, 0);
- } else {
+ if (devnum == MDIO_MMD_VEND2)
+ ret = rtlgen_write_vend2(phydev, regnum, val);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV)
+ ret = rtlgen_write_vend2(phydev, regnum, RTL_MDIO_AN_EEE_ADV);
+ else
ret = -EOPNOTSUPP;
- }
return ret;
}
@@ -789,19 +801,12 @@ static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) {
- rtl821x_write_page(phydev, 0xa6e);
- ret = __phy_read(phydev, 0x16);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x12);
- rtl821x_write_page(phydev, 0);
- } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_read(phydev, 0x10);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV2);
+ else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2)
+ ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE2);
return ret;
}
@@ -814,11 +819,8 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
if (ret != -EOPNOTSUPP)
return ret;
- if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
- rtl821x_write_page(phydev, 0xa6d);
- ret = __phy_write(phydev, 0x12, val);
- rtl821x_write_page(phydev, 0);
- }
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2)
+ ret = rtlgen_write_vend2(phydev, RTL_MDIO_AN_EEE_ADV2, val);
return ret;
}
@@ -914,7 +916,7 @@ static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
- val = phy_read_paged(phydev, 0xa61, 0x13);
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_PMA_SPEED);
if (val < 0)
return val;
@@ -935,10 +937,10 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
if (phydev->autoneg == AUTONEG_ENABLE) {
u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
- ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- MDIO_AN_10GBT_CTRL_ADV2_5G |
- MDIO_AN_10GBT_CTRL_ADV5G,
- adv);
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL_MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G, adv);
if (ret < 0)
return ret;
}
@@ -982,7 +984,7 @@ static int rtl822x_read_status(struct phy_device *phydev)
!phydev->autoneg_complete)
return 0;
- lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+ lpadv = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_AN_10GBT_STAT);
if (lpadv < 0)
return lpadv;
@@ -1029,7 +1031,8 @@ static int rtl822x_c45_config_aneg(struct phy_device *phydev)
val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
/* Vendor register as C45 has no standardized support for 1000BaseT */
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR,
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_C22_REG(MII_CTRL1000),
ADVERTISE_1000FULL, val);
if (ret < 0)
return ret;
@@ -1046,7 +1049,7 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
/* Vendor register as C45 has no standardized support for 1000BaseT */
if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) {
val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
- RTL822X_VND2_GANLPAR);
+ RTL822X_VND2_C22_REG(MII_STAT1000));
if (val < 0)
return val;
} else {
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 7dbcbf0a4ee2..347c1e0e94d9 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -234,6 +234,7 @@ struct sfp {
enum mdio_i2c_proto mdio_protocol;
struct phy_device *mod_phy;
const struct sff_data *type;
+ size_t i2c_max_block_size;
size_t i2c_block_size;
u32 max_power_mW;
@@ -385,7 +386,7 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
+static void sfp_fixup_rollball_wait4s(struct sfp *sfp)
{
sfp_fixup_rollball(sfp);
@@ -399,7 +400,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
static void sfp_fixup_fs_10gt(struct sfp *sfp)
{
sfp_fixup_10gbaset_30m(sfp);
- sfp_fixup_fs_2_5gt(sfp);
+ sfp_fixup_rollball_wait4s(sfp);
}
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
@@ -479,9 +480,10 @@ static const struct sfp_quirk sfp_quirks[] = {
// PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
- // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
- // needs 4 sec wait before probing the PHY.
- SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+ // Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk
+ // to the PHY and needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s),
+ SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s),
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
@@ -515,6 +517,8 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
+ SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
@@ -688,14 +692,71 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
return ret == ARRAY_SIZE(msgs) ? len : 0;
}
-static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+static int sfp_smbus_byte_read(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
{
- if (!i2c_check_functionality(i2c, I2C_FUNC_I2C))
- return -EINVAL;
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+
+ while (len) {
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_READ, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret < 0)
+ return ret;
+
+ *data = smbus_data.byte;
+
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return data - (u8 *)buf;
+}
+
+static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr,
+ void *buf, size_t len)
+{
+ union i2c_smbus_data smbus_data;
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ u8 *data = buf;
+ int ret;
+ while (len) {
+ smbus_data.byte = *data;
+ ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0,
+ I2C_SMBUS_WRITE, dev_addr,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+ if (ret)
+ return ret;
+
+ len--;
+ data++;
+ dev_addr++;
+ }
+
+ return 0;
+}
+
+static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
+{
sfp->i2c = i2c;
- sfp->read = sfp_i2c_read;
- sfp->write = sfp_i2c_write;
+
+ if (i2c_check_functionality(i2c, I2C_FUNC_I2C)) {
+ sfp->read = sfp_i2c_read;
+ sfp->write = sfp_i2c_write;
+ sfp->i2c_max_block_size = SFP_EEPROM_BLOCK_SIZE;
+ } else if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ sfp->read = sfp_smbus_byte_read;
+ sfp->write = sfp_smbus_byte_write;
+ sfp->i2c_max_block_size = 1;
+ } else {
+ sfp->i2c = NULL;
+ return -EINVAL;
+ }
return 0;
}
@@ -1591,7 +1652,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
*/
if (sfp->i2c_block_size < 2) {
dev_info(sfp->dev,
- "skipping hwmon device registration due to broken EEPROM\n");
+ "skipping hwmon device registration\n");
dev_info(sfp->dev,
"diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n");
return;
@@ -2198,7 +2259,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
u8 check;
int ret;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ sfp->i2c_block_size = sfp->i2c_max_block_size;
ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
if (ret < 0) {
@@ -2938,7 +2999,6 @@ static struct sfp *sfp_alloc(struct device *dev)
return ERR_PTR(-ENOMEM);
sfp->dev = dev;
- sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
mutex_init(&sfp->sm_mutex);
mutex_init(&sfp->st_mutex);
@@ -3112,6 +3172,15 @@ static int sfp_probe(struct platform_device *pdev)
if (!sfp->sfp_bus)
return -ENOMEM;
+ if (sfp->i2c_max_block_size < 2)
+ dev_warn(sfp->dev,
+ "Please note:\n"
+ "This SFP cage is accessed via an SMBus only capable of single byte\n"
+ "transactions. Some features are disabled, other may be unreliable or\n"
+ "sporadically fail. Use with caution. There is nothing that the kernel\n"
+ "or community can do to fix it, the kernel will try best efforts. Please\n"
+ "verify any problems on hardware that supports multi-byte I2C transactions.\n");
+
sfp_debugfs_init(sfp);
return 0;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7c51daecf18e..2024d8ef36d9 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -64,15 +64,16 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
return 0;
}
-static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable)
+static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable,
+ int speed)
{
struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio);
int err;
if (priv->phy_drv->set_loopback)
- err = priv->phy_drv->set_loopback(phydev, enable);
+ err = priv->phy_drv->set_loopback(phydev, enable, speed);
else
- err = genphy_loopback(phydev, enable);
+ err = genphy_loopback(phydev, enable, speed);
if (err < 0)
return err;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4583e15ad03a..53463767cc43 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -45,6 +45,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/unaligned.h>
+#include <net/netdev_lock.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -72,6 +73,17 @@
#define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
+/* The filter instructions generated by libpcap are constructed
+ * assuming a four-byte PPP header on each packet, where the last
+ * 2 bytes are the protocol field defined in the RFC and the first
+ * byte of the first 2 bytes indicates the direction.
+ * The second byte is currently unused, but we still need to initialize
+ * it to prevent crafted BPF programs from reading them which would
+ * cause reading of uninitialized data.
+ */
+#define PPP_FILTER_OUTBOUND_TAG 0x0100
+#define PPP_FILTER_INBOUND_TAG 0x0000
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -1303,10 +1315,13 @@ static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int ppp_nl_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct ppp_config conf = {
.unit = -1,
.ifname_is_set = true,
@@ -1343,7 +1358,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME]))
conf.ifname_is_set = false;
- err = ppp_dev_configure(src_net, dev, &conf);
+ err = ppp_dev_configure(link_net, dev, &conf);
out_unlock:
mutex_unlock(&ppp_mutex);
@@ -1762,10 +1777,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
- /* check if we should pass this packet */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 2) = 1;
+ /* check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_OUTBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2482,14 +2497,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
- /* check if the packet passes the pass and active filters */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
-
- *(u8 *)skb_push(skb, 2) = 0;
+ /* Check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_INBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -3490,6 +3504,10 @@ ppp_connect_channel(struct channel *pch, int unit)
ret = -ENOTCONN;
goto outl;
}
+ if (pch->chan->direct_xmit)
+ ppp->dev->priv_flags |= IFF_NO_QUEUE;
+ else
+ ppp->dev->priv_flags &= ~IFF_NO_QUEUE;
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea4f4890d23..68e631718ab0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -693,6 +693,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
po->chan.ops = &pppoe_chan_ops;
+ po->chan.direct_xmit = true;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 689687bd2574..5feaa70b5f47 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -465,6 +465,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu -= PPTP_HEADER_OVERHEAD;
po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+ po->chan.direct_xmit = true;
error = ppp_register_channel(&po->chan);
if (error) {
pr_err("PPTP: failed to register PPP channel (%d)\n", error);
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
deleted file mode 100644
index c3f8020571ad..000000000000
--- a/drivers/net/sb1000.c
+++ /dev/null
@@ -1,1179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* sb1000.c: A General Instruments SB1000 driver for linux. */
-/*
- Written 1998 by Franco Venturi.
-
- Copyright 1998 by Franco Venturi.
- Copyright 1994,1995 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This driver is for the General Instruments SB1000 (internal SURFboard)
-
- The author may be reached as fventuri@mediaone.net
-
-
- Changes:
-
- 981115 Steven Hirsch <shirsch@adelphia.net>
-
- Linus changed the timer interface. Should work on all recent
- development kernels.
-
- 980608 Steven Hirsch <shirsch@adelphia.net>
-
- Small changes to make it work with 2.1.x kernels. Hopefully,
- nothing major will change before official release of Linux 2.2.
-
- Merged with 2.2 - Alan Cox
-*/
-
-static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/if_cablemodem.h> /* for SIOGCM/SIOSCM stuff */
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h> /* for udelay() */
-#include <linux/etherdevice.h>
-#include <linux/pnp.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-
-#ifdef SB1000_DEBUG
-static int sb1000_debug = SB1000_DEBUG;
-#else
-static const int sb1000_debug = 1;
-#endif
-
-static const int SB1000_IO_EXTENT = 8;
-/* SB1000 Maximum Receive Unit */
-static const int SB1000_MRU = 1500; /* octects */
-
-#define NPIDS 4
-struct sb1000_private {
- struct sk_buff *rx_skb[NPIDS];
- short rx_dlen[NPIDS];
- unsigned int rx_frames;
- short rx_error_count;
- short rx_error_dpc_count;
- unsigned char rx_session_id[NPIDS];
- unsigned char rx_frame_id[NPIDS];
- unsigned char rx_pkt_type[NPIDS];
-};
-
-/* prototypes for Linux interface */
-extern int sb1000_probe(struct net_device *dev);
-static int sb1000_open(struct net_device *dev);
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd);
-static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id);
-static int sb1000_close(struct net_device *dev);
-
-
-/* SB1000 hardware routines to be used during open/configuration phases */
-static int card_wait_for_busy_clear(const int ioaddr[],
- const char* name);
-static int card_wait_for_ready(const int ioaddr[], const char* name,
- unsigned char in[]);
-static int card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[]);
-
-/* SB1000 hardware routines to be used during frame rx interrupt */
-static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
-static int sb1000_wait_for_ready_clear(const int ioaddr[],
- const char* name);
-static void sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[]);
-static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
-static void sb1000_issue_read_command(const int ioaddr[],
- const char* name);
-
-/* SB1000 commands for open/configuration */
-static int sb1000_reset(const int ioaddr[], const char* name);
-static int sb1000_check_CRC(const int ioaddr[], const char* name);
-static inline int sb1000_start_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_end_get_set_command(const int ioaddr[],
- const char* name);
-static int sb1000_activate(const int ioaddr[], const char* name);
-static int sb1000_get_firmware_version(const int ioaddr[],
- const char* name, unsigned char version[], int do_end);
-static int sb1000_get_frequency(const int ioaddr[], const char* name,
- int* frequency);
-static int sb1000_set_frequency(const int ioaddr[], const char* name,
- int frequency);
-static int sb1000_get_PIDs(const int ioaddr[], const char* name,
- short PID[]);
-static int sb1000_set_PIDs(const int ioaddr[], const char* name,
- const short PID[]);
-
-/* SB1000 commands for frame rx interrupt */
-static int sb1000_rx(struct net_device *dev);
-static void sb1000_error_dpc(struct net_device *dev);
-
-static const struct pnp_device_id sb1000_pnp_ids[] = {
- { "GIC1000", 0 },
- { "", 0 }
-};
-MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids);
-
-static const struct net_device_ops sb1000_netdev_ops = {
- .ndo_open = sb1000_open,
- .ndo_start_xmit = sb1000_start_xmit,
- .ndo_siocdevprivate = sb1000_siocdevprivate,
- .ndo_stop = sb1000_close,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int
-sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
-{
- struct net_device *dev;
- unsigned short ioaddr[2], irq;
- unsigned int serial_number;
- int error = -ENODEV;
- u8 addr[ETH_ALEN];
-
- if (pnp_device_attach(pdev) < 0)
- return -ENODEV;
- if (pnp_activate_dev(pdev) < 0)
- goto out_detach;
-
- if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1))
- goto out_disable;
- if (!pnp_irq_valid(pdev, 0))
- goto out_disable;
-
- serial_number = pdev->card->serial;
-
- ioaddr[0] = pnp_port_start(pdev, 0);
- ioaddr[1] = pnp_port_start(pdev, 0);
-
- irq = pnp_irq(pdev, 0);
-
- if (!request_region(ioaddr[0], 16, "sb1000"))
- goto out_disable;
- if (!request_region(ioaddr[1], 16, "sb1000"))
- goto out_release_region0;
-
- dev = alloc_etherdev(sizeof(struct sb1000_private));
- if (!dev) {
- error = -ENOMEM;
- goto out_release_regions;
- }
-
-
- dev->base_addr = ioaddr[0];
- /* mem_start holds the second I/O address */
- dev->mem_start = ioaddr[1];
- dev->irq = irq;
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), "
- "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr,
- dev->mem_start, serial_number, dev->irq);
-
- /*
- * The SB1000 is an rx-only cable modem device. The uplink is a modem
- * and we do not want to arp on it.
- */
- dev->flags = IFF_POINTOPOINT|IFF_NOARP;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- if (sb1000_debug > 0)
- printk(KERN_NOTICE "%s", version);
-
- dev->netdev_ops = &sb1000_netdev_ops;
-
- /* hardware address is 0:0:serial_number */
- addr[0] = 0;
- addr[1] = 0;
- addr[2] = serial_number >> 24 & 0xff;
- addr[3] = serial_number >> 16 & 0xff;
- addr[4] = serial_number >> 8 & 0xff;
- addr[5] = serial_number >> 0 & 0xff;
- eth_hw_addr_set(dev, addr);
-
- pnp_set_drvdata(pdev, dev);
-
- error = register_netdev(dev);
- if (error)
- goto out_free_netdev;
- return 0;
-
- out_free_netdev:
- free_netdev(dev);
- out_release_regions:
- release_region(ioaddr[1], 16);
- out_release_region0:
- release_region(ioaddr[0], 16);
- out_disable:
- pnp_disable_dev(pdev);
- out_detach:
- pnp_device_detach(pdev);
- return error;
-}
-
-static void
-sb1000_remove_one(struct pnp_dev *pdev)
-{
- struct net_device *dev = pnp_get_drvdata(pdev);
-
- unregister_netdev(dev);
- release_region(dev->base_addr, 16);
- release_region(dev->mem_start, 16);
- free_netdev(dev);
-}
-
-static struct pnp_driver sb1000_driver = {
- .name = "sb1000",
- .id_table = sb1000_pnp_ids,
- .probe = sb1000_probe_one,
- .remove = sb1000_remove_one,
-};
-
-
-/*
- * SB1000 hardware routines to be used during open/configuration phases
- */
-
-static const int TimeOutJiffies = (875 * HZ) / 100;
-
-/* Card Wait For Busy Clear (cannot be used during an interrupt) */
-static int
-card_wait_for_busy_clear(const int ioaddr[], const char* name)
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[0] + 7);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || a & 0x40) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[0] + 7);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n",
- name);
- return -ETIME;
- }
- }
-
- return 0;
-}
-
-/* Card Wait For Ready (cannot be used during an interrupt) */
-static int
-card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
-{
- unsigned char a;
- unsigned long timeout;
-
- a = inb(ioaddr[1] + 6);
- timeout = jiffies + TimeOutJiffies;
- while (a & 0x80 || !(a & 0x40)) {
- /* a little sleep */
- yield();
-
- a = inb(ioaddr[1] + 6);
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: card_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
-
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
- in[6] = inb(ioaddr[0] + 6);
- in[5] = inb(ioaddr[1] + 6);
- return 0;
-}
-
-/* Card Send Command (cannot be used during an interrupt) */
-static int
-card_send_command(const int ioaddr[], const char* name,
- const unsigned char out[], unsigned char in[])
-{
- int status;
-
- if ((status = card_wait_for_busy_clear(ioaddr, name)))
- return status;
- outb(0xa0, ioaddr[0] + 6);
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(0xa0, ioaddr[0] + 6);
- outb(out[0], ioaddr[0] + 7);
- if (out[0] != 0x20 && out[0] != 0x30) {
- if ((status = card_wait_for_ready(ioaddr, name, in)))
- return status;
- inb(ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x "
- "in: %02x%02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5],
- in[0], in[1], in[2], in[3], in[4], in[5], in[6]);
- } else {
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: card_send_command "
- "out: %02x%02x%02x%02x%02x%02x\n", name,
- out[0], out[1], out[2], out[3], out[4], out[5]);
- }
-
- if (out[1] != 0x1b) {
- if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
- return -EIO;
- }
- return 0;
-}
-
-
-/*
- * SB1000 hardware routines to be used during frame rx interrupt
- */
-static const int Sb1000TimeOutJiffies = 7 * HZ;
-
-/* Card Wait For Ready (to be used during frame rx) */
-static int
-sb1000_wait_for_ready(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (!(inb(ioaddr[1] + 6) & 0x40)) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n",
- name);
- return -ETIME;
- }
- }
- inb(ioaddr[0] + 7);
- return 0;
-}
-
-/* Card Wait For Ready Clear (to be used during frame rx) */
-static int
-sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
-{
- unsigned long timeout;
-
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x80) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- timeout = jiffies + Sb1000TimeOutJiffies;
- while (inb(ioaddr[1] + 6) & 0x40) {
- if (time_after_eq(jiffies, timeout)) {
- printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n",
- name);
- return -ETIME;
- }
- }
- return 0;
-}
-
-/* Card Send Command (to be used during frame rx) */
-static void
-sb1000_send_command(const int ioaddr[], const char* name,
- const unsigned char out[])
-{
- outb(out[2], ioaddr[0] + 1);
- outb(out[3], ioaddr[0] + 2);
- outb(out[4], ioaddr[0] + 3);
- outb(out[5], ioaddr[0] + 4);
- outb(out[1], ioaddr[0] + 5);
- outb(out[0], ioaddr[0] + 7);
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
- "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
-}
-
-/* Card Read Status (to be used during frame rx) */
-static void
-sb1000_read_status(const int ioaddr[], unsigned char in[])
-{
- in[1] = inb(ioaddr[0] + 1);
- in[2] = inb(ioaddr[0] + 2);
- in[3] = inb(ioaddr[0] + 3);
- in[4] = inb(ioaddr[0] + 4);
- in[0] = inb(ioaddr[0] + 5);
-}
-
-/* Issue Read Command (to be used during frame rx) */
-static void
-sb1000_issue_read_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- outb(0xa0, ioaddr[0] + 6);
- sb1000_send_command(ioaddr, name, Command0);
-}
-
-
-/*
- * SB1000 commands for open/configuration
- */
-/* reset SB1000 card */
-static int
-sb1000_reset(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int port, status;
-
- port = ioaddr[1] + 6;
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- ssleep(1);
- outb(0x4, port);
- inb(port);
- udelay(1000);
- outb(0x0, port);
- inb(port);
- udelay(0);
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[3] != 0xf0)
- return -EIO;
- return 0;
-}
-
-/* check SB1000 firmware CRC */
-static int
-sb1000_check_CRC(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- /* check CRC */
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[1] != st[3] || st[2] != st[4])
- return -EIO;
- return 0;
-}
-
-static inline int
-sb1000_start_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
-
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-static int
-sb1000_end_get_set_command(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- return card_send_command(ioaddr, name, Command1, st);
-}
-
-static int
-sb1000_activate(const int ioaddr[], const char* name)
-{
- static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- ssleep(1);
- status = card_send_command(ioaddr, name, Command0, st);
- if (status)
- return status;
- status = card_send_command(ioaddr, name, Command1, st);
- if (status)
- return status;
- if (st[3] != 0xf1) {
- status = sb1000_start_get_set_command(ioaddr, name);
- if (status)
- return status;
- return -EIO;
- }
- udelay(1000);
- return sb1000_start_get_set_command(ioaddr, name);
-}
-
-/* get SB1000 firmware version */
-static int
-sb1000_get_firmware_version(const int ioaddr[], const char* name,
- unsigned char version[], int do_end)
-{
- static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- if (st[0] != 0xa3)
- return -EIO;
- version[0] = st[1];
- version[1] = st[2];
- if (do_end)
- return sb1000_end_get_set_command(ioaddr, name);
- else
- return 0;
-}
-
-/* get SB1000 frequency */
-static int
-sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
-{
- static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4];
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 frequency */
-static int
-sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
-{
- unsigned char st[7];
- int status;
- unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00};
-
- const int FrequencyLowerLimit = 57000;
- const int FrequencyUpperLimit = 804000;
-
- if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) {
- printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range "
- "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit,
- FrequencyUpperLimit);
- return -EINVAL;
- }
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
- Command0[5] = frequency & 0xff;
- frequency >>= 8;
- Command0[4] = frequency & 0xff;
- frequency >>= 8;
- Command0[3] = frequency & 0xff;
- frequency >>= 8;
- Command0[2] = frequency & 0xff;
- return card_send_command(ioaddr, name, Command0, st);
-}
-
-/* get SB1000 PIDs */
-static int
-sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
-{
- static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- int status;
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
- PID[0] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
- PID[1] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
- PID[2] = st[1] << 8 | st[2];
-
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
- PID[3] = st[1] << 8 | st[2];
-
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-/* set SB1000 PIDs */
-static int
-sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
-{
- static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- unsigned char st[7];
- short p;
- int status;
- unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
- unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
-
- udelay(1000);
- if ((status = sb1000_start_get_set_command(ioaddr, name)))
- return status;
-
- p = PID[0];
- Command0[3] = p & 0xff;
- p >>= 8;
- Command0[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command0, st)))
- return status;
-
- p = PID[1];
- Command1[3] = p & 0xff;
- p >>= 8;
- Command1[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command1, st)))
- return status;
-
- p = PID[2];
- Command2[3] = p & 0xff;
- p >>= 8;
- Command2[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command2, st)))
- return status;
-
- p = PID[3];
- Command3[3] = p & 0xff;
- p >>= 8;
- Command3[2] = p & 0xff;
- if ((status = card_send_command(ioaddr, name, Command3, st)))
- return status;
-
- if ((status = card_send_command(ioaddr, name, Command4, st)))
- return status;
- return sb1000_end_get_set_command(ioaddr, name);
-}
-
-
-static void
-sb1000_print_status_buffer(const char* name, unsigned char st[],
- unsigned char buffer[], int size)
-{
- int i, j, k;
-
- printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]);
- if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) {
- printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d "
- "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29],
- buffer[35], buffer[38], buffer[39], buffer[40], buffer[41],
- buffer[46] << 8 | buffer[47],
- buffer[42], buffer[43], buffer[44], buffer[45],
- buffer[48] << 8 | buffer[49]);
- } else {
- for (i = 0, k = 0; i < (size + 7) / 8; i++) {
- printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:");
- for (j = 0; j < 8 && k < size; j++, k++)
- printk(" %02x", buffer[k]);
- printk("\n");
- }
- }
-}
-
-/*
- * SB1000 commands for frame rx interrupt
- */
-/* receive a single frame and assemble datagram
- * (this is the heart of the interrupt routine)
- */
-static int
-sb1000_rx(struct net_device *dev)
-{
-
-#define FRAMESIZE 184
- unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
- short dlen;
- int ioaddr, ns;
- unsigned int skbsize;
- struct sk_buff *skb;
- struct sb1000_private *lp = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
-
- /* SB1000 frame constants */
- const int FrameSize = FRAMESIZE;
- const int NewDatagramHeaderSkip = 8;
- const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
- const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
- const int ContDatagramHeaderSkip = 7;
- const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
- const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
- const int TrailerSize = 4;
-
- ioaddr = dev->base_addr;
-
- insw(ioaddr, (unsigned short*) st, 1);
-#ifdef XXXDEBUG
-printk("cm0: received: %02x %02x\n", st[0], st[1]);
-#endif /* XXXDEBUG */
- lp->rx_frames++;
-
- /* decide if it is a good or bad frame */
- for (ns = 0; ns < NPIDS; ns++) {
- session_id = lp->rx_session_id[ns];
- frame_id = lp->rx_frame_id[ns];
- if (st[0] == session_id) {
- if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
- goto good_frame;
- } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- } else if (st[0] == (session_id | 0x40)) {
- if ((st[1] & 0xf0) == 0x30) {
- goto skipped_frame;
- } else {
- goto bad_frame;
- }
- }
- }
- goto bad_frame;
-
-skipped_frame:
- stats->rx_frame_errors++;
- skb = lp->rx_skb[ns];
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
- "expecting %02x %02x\n", dev->name, st[0], st[1],
- skb ? session_id : session_id | 0x40, frame_id);
- if (skb) {
- dev_kfree_skb(skb);
- skb = NULL;
- }
-
-good_frame:
- lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
- /* new datagram */
- if (st[0] & 0x40) {
- /* get data length */
- insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
-#ifdef XXXDEBUG
-printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
-#endif /* XXXDEBUG */
- if (buffer[0] != NewDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: new datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- NewDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
- buffer[NewDatagramHeaderSkip + 4]) - 17;
- if (dlen > SB1000_MRU) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: datagram length (%d) greater "
- "than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto bad_frame_next;
- }
- lp->rx_dlen[ns] = dlen;
- /* compute size to allocate for datagram */
- skbsize = dlen + FrameSize;
- if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: can't allocate %d bytes long "
- "skbuff\n", dev->name, skbsize);
- stats->rx_dropped++;
- insw(ioaddr, buffer, NewDatagramDataSize / 2);
- goto dropped_frame;
- }
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
- insw(ioaddr, skb_put(skb, NewDatagramDataSize),
- NewDatagramDataSize / 2);
- lp->rx_skb[ns] = skb;
- } else {
- /* continuation of previous datagram */
- insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
- if (buffer[0] != ContDatagramHeaderSkip) {
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: cont datagram header skip error: "
- "got %02x expecting %02x\n", dev->name, buffer[0],
- ContDatagramHeaderSkip);
- stats->rx_length_errors++;
- insw(ioaddr, buffer, ContDatagramDataSize / 2);
- goto bad_frame_next;
- }
- skb = lp->rx_skb[ns];
- insw(ioaddr, skb_put(skb, ContDatagramDataSize),
- ContDatagramDataSize / 2);
- dlen = lp->rx_dlen[ns];
- }
- if (skb->len < dlen + TrailerSize) {
- lp->rx_session_id[ns] &= ~0x40;
- return 0;
- }
-
- /* datagram completed: send to upper level */
- skb_trim(skb, dlen);
- __netif_rx(skb);
- stats->rx_bytes+=dlen;
- stats->rx_packets++;
- lp->rx_skb[ns] = NULL;
- lp->rx_session_id[ns] |= 0x40;
- return 0;
-
-bad_frame:
- insw(ioaddr, buffer, FrameSize / 2);
- if (sb1000_debug > 1)
- printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
- dev->name, st[0], st[1]);
- stats->rx_frame_errors++;
-bad_frame_next:
- if (sb1000_debug > 2)
- sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
-dropped_frame:
- stats->rx_errors++;
- if (ns < NPIDS) {
- if ((skb = lp->rx_skb[ns])) {
- dev_kfree_skb(skb);
- lp->rx_skb[ns] = NULL;
- }
- lp->rx_session_id[ns] |= 0x40;
- }
- return -1;
-}
-
-static void
-sb1000_error_dpc(struct net_device *dev)
-{
- static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st[5];
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
- const int ErrorDpcCounterInitialize = 200;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_read_status(ioaddr, st);
- if (st[1] & 0x10)
- lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
-}
-
-
-/*
- * Linux interface functions
- */
-static int
-sb1000_open(struct net_device *dev)
-{
- char *name;
- int ioaddr[2], status;
- struct sb1000_private *lp = netdev_priv(dev);
- const unsigned short FirmwareVersion[] = {0x01, 0x01};
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* initialize sb1000 */
- if ((status = sb1000_reset(ioaddr, name)))
- return status;
- ssleep(1);
- if ((status = sb1000_check_CRC(ioaddr, name)))
- return status;
-
- /* initialize private data before board can catch interrupts */
- lp->rx_skb[0] = NULL;
- lp->rx_skb[1] = NULL;
- lp->rx_skb[2] = NULL;
- lp->rx_skb[3] = NULL;
- lp->rx_dlen[0] = 0;
- lp->rx_dlen[1] = 0;
- lp->rx_dlen[2] = 0;
- lp->rx_dlen[3] = 0;
- lp->rx_frames = 0;
- lp->rx_error_count = 0;
- lp->rx_error_dpc_count = 0;
- lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
- return -EAGAIN;
- }
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);
-
- /* Activate board and check firmware version */
- udelay(1000);
- if ((status = sb1000_activate(ioaddr, name)))
- return status;
- udelay(0);
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
- return status;
- if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
- printk(KERN_WARNING "%s: found firmware version %x.%02x "
- "(should be %x.%02x)\n", name, version[0], version[1],
- FirmwareVersion[0], FirmwareVersion[1]);
-
-
- netif_start_queue(dev);
- return 0; /* Always succeed */
-}
-
-static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
- void __user *data, int cmd)
-{
- char* name;
- unsigned char version[2];
- short PID[4];
- int ioaddr[2], status, frequency;
- unsigned int stats[5];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (!(dev && dev->flags & IFF_UP))
- return -ENODEV;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- switch (cmd) {
- case SIOCGCMSTATS: /* get statistics */
- stats[0] = dev->stats.rx_bytes;
- stats[1] = lp->rx_frames;
- stats[2] = dev->stats.rx_packets;
- stats[3] = dev->stats.rx_errors;
- stats[4] = dev->stats.rx_dropped;
- if (copy_to_user(data, stats, sizeof(stats)))
- return -EFAULT;
- status = 0;
- break;
-
- case SIOCGCMFIRMWARE: /* get firmware version */
- if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1)))
- return status;
- if (copy_to_user(data, version, sizeof(version)))
- return -EFAULT;
- break;
-
- case SIOCGCMFREQUENCY: /* get frequency */
- if ((status = sb1000_get_frequency(ioaddr, name, &frequency)))
- return status;
- if (put_user(frequency, (int __user *)data))
- return -EFAULT;
- break;
-
- case SIOCSCMFREQUENCY: /* set frequency */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (get_user(frequency, (int __user *)data))
- return -EFAULT;
- if ((status = sb1000_set_frequency(ioaddr, name, frequency)))
- return status;
- break;
-
- case SIOCGCMPIDS: /* get PIDs */
- if ((status = sb1000_get_PIDs(ioaddr, name, PID)))
- return status;
- if (copy_to_user(data, PID, sizeof(PID)))
- return -EFAULT;
- break;
-
- case SIOCSCMPIDS: /* set PIDs */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(PID, data, sizeof(PID)))
- return -EFAULT;
- if ((status = sb1000_set_PIDs(ioaddr, name, PID)))
- return status;
- /* set session_id, frame_id and pkt_type too */
- lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f);
- lp->rx_session_id[1] = 0x48;
- lp->rx_session_id[2] = 0x44;
- lp->rx_session_id[3] = 0x42;
- lp->rx_frame_id[0] = 0;
- lp->rx_frame_id[1] = 0;
- lp->rx_frame_id[2] = 0;
- lp->rx_frame_id[3] = 0;
- break;
-
- default:
- status = -EINVAL;
- break;
- }
- return status;
-}
-
-/* transmit function: do nothing since SB1000 can't send anything out */
-static netdev_tx_t
-sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
- /* sb1000 can't xmit datagrams */
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* SB1000 interrupt handler. */
-static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
-{
- static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
- static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
-
- char *name;
- unsigned char st;
- int ioaddr[2];
- struct net_device *dev = dev_id;
- struct sb1000_private *lp = netdev_priv(dev);
-
- const int MaxRxErrorCount = 6;
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
- name = dev->name;
-
- /* is it a good interrupt? */
- st = inb(ioaddr[1] + 6);
- if (!(st & 0x08 && st & 0x20)) {
- return IRQ_NONE;
- }
-
- if (sb1000_debug > 3)
- printk(KERN_DEBUG "%s: entering interrupt\n", dev->name);
-
- st = inb(ioaddr[0] + 7);
- if (sb1000_rx(dev))
- lp->rx_error_count++;
-#ifdef SB1000_DELAY
- udelay(SB1000_DELAY);
-#endif /* SB1000_DELAY */
- sb1000_issue_read_command(ioaddr, name);
- if (st & 0x01) {
- sb1000_error_dpc(dev);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command0);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- }
- if (lp->rx_error_count >= MaxRxErrorCount) {
- sb1000_wait_for_ready_clear(ioaddr, name);
- sb1000_send_command(ioaddr, name, Command1);
- sb1000_wait_for_ready(ioaddr, name);
- sb1000_issue_read_command(ioaddr, name);
- lp->rx_error_count = 0;
- }
-
- return IRQ_HANDLED;
-}
-
-static int sb1000_close(struct net_device *dev)
-{
- int i;
- int ioaddr[2];
- struct sb1000_private *lp = netdev_priv(dev);
-
- if (sb1000_debug > 2)
- printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name);
-
- netif_stop_queue(dev);
-
- ioaddr[0] = dev->base_addr;
- /* mem_start holds the second I/O address */
- ioaddr[1] = dev->mem_start;
-
- free_irq(dev->irq, dev);
- /* If we don't do this, we can't re-insmod it later. */
- release_region(ioaddr[1], SB1000_IO_EXTENT);
- release_region(ioaddr[0], SB1000_IO_EXTENT);
-
- /* free rx_skb's if needed */
- for (i=0; i<4; i++) {
- if (lp->rx_skb[i]) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- }
- return 0;
-}
-
-MODULE_AUTHOR("Franco Venturi <fventuri@mediaone.net>");
-MODULE_DESCRIPTION("General Instruments SB1000 driver");
-MODULE_LICENSE("GPL");
-
-module_pnp_driver(sb1000_driver);
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 5ca6ecf0ce5f..d4ece538f1b2 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -26,74 +26,9 @@
#include <linux/virtio_net.h>
#include <linux/skb_array.h>
-#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
-
-#define TAP_VNET_LE 0x80000000
-#define TAP_VNET_BE 0x40000000
-
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s = !!(q->flags & TAP_VNET_BE);
-
- if (put_user(s, sp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
-{
- int s;
-
- if (get_user(s, sp))
- return -EFAULT;
-
- if (s)
- q->flags |= TAP_VNET_BE;
- else
- q->flags &= ~TAP_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tap_is_little_endian(struct tap_queue *q)
-{
- return q->flags & TAP_VNET_LE ||
- tap_legacy_is_little_endian(q);
-}
-
-static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
-{
- return __virtio16_to_cpu(tap_is_little_endian(q), val);
-}
+#include "tun_vnet.h"
-static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
-{
- return __cpu_to_virtio16(tap_is_little_endian(q), val);
-}
+#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
static struct proto tap_proto = {
.name = "tap",
@@ -645,6 +580,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
int err;
struct virtio_net_hdr vnet_hdr = { 0 };
int vnet_hdr_len = 0;
+ int hdr_len = 0;
int copylen = 0;
int depth;
bool zerocopy = false;
@@ -654,25 +590,13 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- err = -EINVAL;
- if (len < vnet_hdr_len)
+ hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr);
+ if (hdr_len < 0) {
+ err = hdr_len;
goto err;
- len -= vnet_hdr_len;
+ }
- err = -EFAULT;
- if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
- goto err;
- iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
- if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
- tap16_to_cpu(q, vnet_hdr.hdr_len))
- vnet_hdr.hdr_len = cpu_to_tap16(q,
- tap16_to_cpu(q, vnet_hdr.csum_start) +
- tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
- err = -EINVAL;
- if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
- goto err;
+ len -= vnet_hdr_len;
}
err = -EINVAL;
@@ -682,12 +606,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
struct iov_iter i;
- copylen = vnet_hdr.hdr_len ?
- tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
- else if (copylen < ETH_HLEN)
- copylen = ETH_HLEN;
+ copylen = clamp(hdr_len ?: GOODCOPY_LEN, ETH_HLEN, good_linear);
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -697,11 +616,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
if (!zerocopy) {
copylen = len;
- linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
- if (linear > good_linear)
- linear = good_linear;
- else if (linear < ETH_HLEN)
- linear = ETH_HLEN;
+ linear = clamp(hdr_len, ETH_HLEN, good_linear);
}
skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
@@ -733,8 +648,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb->dev = tap->dev;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
- tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr);
if (err) {
rcu_read_unlock();
drop_reason = SKB_DROP_REASON_DEV_HDR;
@@ -797,23 +711,17 @@ static ssize_t tap_put_user(struct tap_queue *q,
int total;
if (q->flags & IFF_VNET_HDR) {
- int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
struct virtio_net_hdr vnet_hdr;
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
- if (iov_iter_count(iter) < vnet_hdr_len)
- return -EINVAL;
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- tap_is_little_endian(q), true,
- vlan_hlen))
- BUG();
-
- if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
- sizeof(vnet_hdr))
- return -EFAULT;
+ ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
+ ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr);
+ if (ret)
+ return ret;
}
total = vnet_hdr_len;
total += skb->len;
@@ -1072,42 +980,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
q->sk.sk_sndbuf = s;
return 0;
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNGETVNETLE:
- s = !!(q->flags & TAP_VNET_LE);
- if (put_user(s, sp))
- return -EFAULT;
- return 0;
-
- case TUNSETVNETLE:
- if (get_user(s, sp))
- return -EFAULT;
- if (s)
- q->flags |= TAP_VNET_LE;
- else
- q->flags &= ~TAP_VNET_LE;
- return 0;
-
- case TUNGETVNETBE:
- return tap_get_vnet_be(q, sp);
-
- case TUNSETVNETBE:
- return tap_set_vnet_be(q, sp);
-
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
@@ -1151,7 +1023,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
return ret;
default:
- return -EINVAL;
+ return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp);
}
}
@@ -1198,7 +1070,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
skb->protocol = eth_hdr(skb)->h_proto;
if (vnet_hdr_len) {
- err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
+ err = tun_vnet_hdr_to_skb(q->flags, skb, gso);
if (err)
goto err_kfree;
}
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index f4019815f473..d8fc0c79745d 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -23,6 +23,7 @@
#include <linux/rtnetlink.h>
#include <net/rtnetlink.h>
#include <net/genetlink.h>
+#include <net/netdev_lock.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <linux/if_team.h>
@@ -2203,7 +2204,7 @@ static void team_setup(struct net_device *dev)
dev->lltx = true;
/* Don't allow team devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
dev->features |= NETIF_F_GRO;
@@ -2218,10 +2219,12 @@ static void team_setup(struct net_device *dev)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
-static int team_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int team_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct nlattr **tb = params->tb;
+
if (tb[IFLA_ADDRESS] == NULL)
eth_hw_addr_random(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index acf96f262488..f75f912a0225 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -83,6 +83,8 @@
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
+#include "tun_vnet.h"
+
static void tun_default_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd);
@@ -94,9 +96,6 @@ static void tun_default_link_ksettings(struct net_device *dev,
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
-/* High bits in flags field are unused. */
-#define TUN_VNET_LE 0x80000000
-#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
@@ -298,70 +297,6 @@ static bool tun_napi_frags_enabled(const struct tun_file *tfile)
return tfile->napi_frags_enabled;
}
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_BE ? false :
- virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be = !!(tun->flags & TUN_VNET_BE);
-
- if (put_user(be, argp))
- return -EFAULT;
-
- return 0;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- int be;
-
- if (get_user(be, argp))
- return -EFAULT;
-
- if (be)
- tun->flags |= TUN_VNET_BE;
- else
- tun->flags &= ~TUN_VNET_BE;
-
- return 0;
-}
-#else
-static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
-{
- return virtio_legacy_is_little_endian();
-}
-
-static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-
-static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool tun_is_little_endian(struct tun_struct *tun)
-{
- return tun->flags & TUN_VNET_LE ||
- tun_legacy_is_little_endian(tun);
-}
-
-static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
-{
- return __virtio16_to_cpu(tun_is_little_endian(tun), val);
-}
-
-static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
-{
- return __cpu_to_virtio16(tun_is_little_endian(tun), val);
-}
-
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & TUN_MASK_FLOW_ENTRIES;
@@ -1600,7 +1535,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
struct page_frag *alloc_frag, char *buf,
- int buflen, int len, int pad)
+ int buflen, int len, int pad,
+ int metasize)
{
struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1545,8 @@ static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
skb_reserve(skb, pad);
skb_put(skb, len);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
@@ -1668,6 +1606,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
char *buf;
size_t copied;
int pad = TUN_RX_PAD;
+ int metasize = 0;
int err = 0;
rcu_read_lock();
@@ -1695,7 +1634,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
- pad);
+ pad, metasize);
}
*skb_xdp = 0;
@@ -1709,7 +1648,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
u32 act;
xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
- xdp_prepare_buff(&xdp, buf, pad, len, false);
+ xdp_prepare_buff(&xdp, buf, pad, len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
if (act == XDP_REDIRECT || act == XDP_TX) {
@@ -1730,12 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data;
+
+ /* It is known that the xdp_buff was prepared with metadata
+ * support, so the metasize will never be negative.
+ */
+ metasize = xdp.data - xdp.data_meta;
}
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
- return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad,
+ metasize);
out:
bpf_net_ctx_clear(bpf_net_ctx);
@@ -1756,6 +1701,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct virtio_net_hdr gso = { 0 };
int good_linear;
int copylen;
+ int hdr_len = 0;
bool zerocopy = false;
int err;
u32 rxhash = 0;
@@ -1775,26 +1721,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (tun->flags & IFF_VNET_HDR) {
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (len < vnet_hdr_sz)
- return -EINVAL;
- len -= vnet_hdr_sz;
+ hdr_len = tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, from, &gso);
+ if (hdr_len < 0)
+ return hdr_len;
- if (!copy_from_iter_full(&gso, sizeof(gso), from))
- return -EFAULT;
-
- if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
- tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
- gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
-
- if (tun16_to_cpu(tun, gso.hdr_len) > len)
- return -EINVAL;
- iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
+ len -= vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
- if (unlikely(len < ETH_HLEN ||
- (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
+ if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN)))
return -EINVAL;
}
@@ -1807,9 +1743,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
- copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
- if (copylen > good_linear)
- copylen = good_linear;
+ copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear);
linear = copylen;
iov_iter_advance(&i, copylen);
if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
@@ -1830,10 +1764,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
} else {
if (!zerocopy) {
copylen = len;
- if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
- linear = good_linear;
- else
- linear = tun16_to_cpu(tun, gso.hdr_len);
+ linear = min(hdr_len, good_linear);
}
if (frags) {
@@ -1868,7 +1799,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
}
- if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
+ if (tun_vnet_hdr_to_skb(tun->flags, skb, &gso)) {
atomic_long_inc(&tun->rx_frame_errors);
err = -EINVAL;
goto free_skb;
@@ -2063,18 +1994,15 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
{
int vnet_hdr_sz = 0;
size_t size = xdp_frame->len;
- size_t ret;
+ ssize_t ret;
if (tun->flags & IFF_VNET_HDR) {
struct virtio_net_hdr gso = { 0 };
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
- if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
- return -EINVAL;
- if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
- sizeof(gso)))
- return -EFAULT;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
+ if (ret)
+ return ret;
}
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
@@ -2097,6 +2025,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
+ int ret;
if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
@@ -2123,31 +2052,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (vnet_hdr_sz) {
struct virtio_net_hdr gso;
- if (iov_iter_count(iter) < vnet_hdr_sz)
- return -EINVAL;
-
- if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun), true,
- vlan_hlen)) {
- struct skb_shared_info *sinfo = skb_shinfo(skb);
-
- if (net_ratelimit()) {
- netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
- return -EFAULT;
+ ret = tun_vnet_hdr_from_skb(tun->flags, tun->dev, skb, &gso);
+ if (ret)
+ return ret;
- iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+ ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso);
+ if (ret)
+ return ret;
}
if (vlan_hlen) {
@@ -2452,6 +2363,7 @@ static int tun_xdp_one(struct tun_struct *tun,
struct sk_buff_head *queue;
u32 rxhash = 0, act;
int buflen = hdr->buflen;
+ int metasize = 0;
int ret = 0;
bool skb_xdp = false;
struct page *page;
@@ -2467,7 +2379,6 @@ static int tun_xdp_one(struct tun_struct *tun,
}
xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
- xdp_set_data_meta_invalid(xdp);
act = bpf_prog_run_xdp(xdp_prog, xdp);
ret = tun_xdp_act(tun, xdp_prog, xdp, act);
@@ -2507,7 +2418,15 @@ build:
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
- if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
+ /* The externally provided xdp_buff may have no metadata support, which
+ * is marked by xdp->data_meta being xdp->data + 1. This will lead to a
+ * metasize of -1 and is the reason why the condition checks for > 0.
+ */
+ metasize = xdp->data - xdp->data_meta;
+ if (metasize > 0)
+ skb_metadata_set(skb, metasize);
+
+ if (tun_vnet_hdr_to_skb(tun->flags, skb, gso)) {
atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
ret = -EINVAL;
@@ -3091,8 +3010,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
kgid_t group;
int ifindex;
int sndbuf;
- int vnet_hdr_sz;
- int le;
int ret;
bool do_notify = false;
@@ -3299,50 +3216,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_set_sndbuf(tun);
break;
- case TUNGETVNETHDRSZ:
- vnet_hdr_sz = tun->vnet_hdr_sz;
- if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETHDRSZ:
- if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
- ret = -EFAULT;
- break;
- }
- if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
- ret = -EINVAL;
- break;
- }
-
- tun->vnet_hdr_sz = vnet_hdr_sz;
- break;
-
- case TUNGETVNETLE:
- le = !!(tun->flags & TUN_VNET_LE);
- if (put_user(le, (int __user *)argp))
- ret = -EFAULT;
- break;
-
- case TUNSETVNETLE:
- if (get_user(le, (int __user *)argp)) {
- ret = -EFAULT;
- break;
- }
- if (le)
- tun->flags |= TUN_VNET_LE;
- else
- tun->flags &= ~TUN_VNET_LE;
- break;
-
- case TUNGETVNETBE:
- ret = tun_get_vnet_be(tun, argp);
- break;
-
- case TUNSETVNETBE:
- ret = tun_set_vnet_be(tun, argp);
- break;
-
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
@@ -3398,7 +3271,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
default:
- ret = -EINVAL;
+ ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp);
break;
}
diff --git a/drivers/net/tun_vnet.h b/drivers/net/tun_vnet.h
new file mode 100644
index 000000000000..58b9ac7a5fc4
--- /dev/null
+++ b/drivers/net/tun_vnet.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef TUN_VNET_H
+#define TUN_VNET_H
+
+/* High bits in flags field are unused. */
+#define TUN_VNET_LE 0x80000000
+#define TUN_VNET_BE 0x40000000
+
+static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags)
+{
+ bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) &&
+ (flags & TUN_VNET_BE);
+
+ return !be && virtio_legacy_is_little_endian();
+}
+
+static inline long tun_get_vnet_be(unsigned int flags, int __user *argp)
+{
+ int be = !!(flags & TUN_VNET_BE);
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (put_user(be, argp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp)
+{
+ int be;
+
+ if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE))
+ return -EINVAL;
+
+ if (get_user(be, argp))
+ return -EFAULT;
+
+ if (be)
+ *flags |= TUN_VNET_BE;
+ else
+ *flags &= ~TUN_VNET_BE;
+
+ return 0;
+}
+
+static inline bool tun_vnet_is_little_endian(unsigned int flags)
+{
+ return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags);
+}
+
+static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val)
+{
+ return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val)
+{
+ return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val);
+}
+
+static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags,
+ unsigned int cmd, int __user *sp)
+{
+ int s;
+
+ switch (cmd) {
+ case TUNGETVNETHDRSZ:
+ s = *vnet_hdr_sz;
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETHDRSZ:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s < (int)sizeof(struct virtio_net_hdr))
+ return -EINVAL;
+
+ *vnet_hdr_sz = s;
+ return 0;
+
+ case TUNGETVNETLE:
+ s = !!(*flags & TUN_VNET_LE);
+ if (put_user(s, sp))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETVNETLE:
+ if (get_user(s, sp))
+ return -EFAULT;
+ if (s)
+ *flags |= TUN_VNET_LE;
+ else
+ *flags &= ~TUN_VNET_LE;
+ return 0;
+
+ case TUNGETVNETBE:
+ return tun_get_vnet_be(*flags, sp);
+
+ case TUNSETVNETBE:
+ return tun_set_vnet_be(flags, sp);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int tun_vnet_hdr_get(int sz, unsigned int flags,
+ struct iov_iter *from,
+ struct virtio_net_hdr *hdr)
+{
+ u16 hdr_len;
+
+ if (iov_iter_count(from) < sz)
+ return -EINVAL;
+
+ if (!copy_from_iter_full(hdr, sizeof(*hdr), from))
+ return -EFAULT;
+
+ hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len);
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len);
+ hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len);
+ }
+
+ if (hdr_len > iov_iter_count(from))
+ return -EINVAL;
+
+ iov_iter_advance(from, sz - sizeof(*hdr));
+
+ return hdr_len;
+}
+
+static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter,
+ const struct virtio_net_hdr *hdr)
+{
+ if (unlikely(iov_iter_count(iter) < sz))
+ return -EINVAL;
+
+ if (unlikely(copy_to_iter(hdr, sizeof(*hdr), iter) != sizeof(*hdr)))
+ return -EFAULT;
+
+ if (iov_iter_zero(sz - sizeof(*hdr), iter) != sz - sizeof(*hdr))
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags));
+}
+
+static inline int tun_vnet_hdr_from_skb(unsigned int flags,
+ const struct net_device *dev,
+ const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr)
+{
+ int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+
+ if (virtio_net_hdr_from_skb(skb, hdr,
+ tun_vnet_is_little_endian(flags), true,
+ vlan_hlen)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ if (net_ratelimit()) {
+ netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size),
+ tun_vnet16_to_cpu(flags, hdr->hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true);
+ }
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#endif /* TUN_VNET_H */
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 57d6e5abc30e..da24941a6e44 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = {
.data = FLAG_EEPROM_MAC,
};
+static const struct driver_info lyconsys_fibergecko100_info = {
+ .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter",
+ .bind = ax88178_bind,
+ .status = asix_status,
+ .link_reset = ax88178_link_reset,
+ .reset = ax88178_link_reset,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup_common,
+ .tx_fixup = asix_tx_fixup,
+ .data = 0x20061201,
+};
+
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1578,6 +1591,10 @@ static const struct usb_device_id products [] = {
// Linux Automation GmbH USB 10Base-T1L
USB_DEVICE(0x33f7, 0x0004),
.driver_info = (unsigned long) &lxausb_t1l_info,
+}, {
+ /* LyconSys FiberGecko 100 */
+ USB_DEVICE(0x1d2a, 0x0801),
+ .driver_info = (unsigned long) &lyconsys_fibergecko100_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index e47bb125048d..f613e4bc68c8 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -18,8 +18,8 @@
struct ax88172a_private {
struct mii_bus *mdio;
struct phy_device *phydev;
- char phy_name[20];
- u16 phy_addr;
+ char phy_name[PHY_ID_SIZE];
+ u8 phy_addr;
u16 oldmode;
int use_embdphy;
struct asix_rx_fixup_info rx_fixup_info;
@@ -210,7 +210,11 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_phy_addr(dev, priv->use_embdphy);
if (ret < 0)
goto free;
-
+ if (ret >= PHY_MAX_ADDR) {
+ netdev_err(dev->net, "Invalid PHY address %#x\n", ret);
+ ret = -ENODEV;
+ goto free;
+ }
priv->phy_addr = ret;
ax88172a_reset_phy(dev, priv->use_embdphy);
@@ -308,7 +312,7 @@ static int ax88172a_reset(struct usbnet *dev)
rx_ctl);
/* Connect to PHY */
- snprintf(priv->phy_name, 20, PHY_ID_FMT,
+ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
priv->mdio->id, priv->phy_addr);
priv->phydev = phy_connect(dev->net, priv->phy_name,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index a6469235d904..a032c1ded406 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -783,6 +783,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e13e4920ee9b..dbf01210b0e7 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -660,12 +660,12 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FN990 */
+ /* Telit FN990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
- /* Telit FE990 */
+ /* Telit FE990A */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d5c47a2a62dc..34e82f1e37d9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -833,8 +833,7 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
ctx->dev = dev;
- hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
+ hrtimer_setup(&ctx->tx_timer, &cdc_ncm_tx_timer_cb, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_setup(&ctx->bh, cdc_ncm_txpath_bh);
atomic_set(&ctx->stop, 0);
spin_lock_init(&ctx->mtx);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a91bf9c7e31d..137adf6d5b08 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -627,7 +627,7 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
@@ -658,7 +658,7 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
kfree(buf);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e9208a8d2bfa..b586b1c13a47 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1360,14 +1360,16 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
- {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 468c73974046..2cab046749a9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -785,6 +785,7 @@ enum rtl8152_flags {
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062
+#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359
struct tally_counter {
__le64 tx_packets;
@@ -9787,6 +9788,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
case DEVICE_ID_THINKPAD_USB_C_DONGLE:
+ case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK:
return 1;
}
} else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) {
@@ -10064,6 +10066,8 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
{ USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
{ USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
+
+ /* Lenovo */
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
@@ -10074,11 +10078,14 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) },
{ USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) },
+
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
+ { USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
{}
};
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
index 20b2df8d74ae..8d860dacdf49 100644
--- a/drivers/net/usb/r8153_ecm.c
+++ b/drivers/net/usb/r8153_ecm.c
@@ -135,6 +135,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&r8153_info,
},
+/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
{ }, /* END */
};
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 01251868a9c2..7bb53961c0ea 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -17,6 +17,7 @@
#include <net/rtnetlink.h>
#include <net/dst.h>
+#include <net/netdev_lock.h>
#include <net/xfrm.h>
#include <net/xdp.h>
#include <linux/veth.h>
@@ -684,8 +685,7 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames,
void *skbs[VETH_XDP_BATCH];
int i;
- if (xdp_alloc_skb_bulk(skbs, n_xdpf,
- GFP_ATOMIC | __GFP_ZERO) < 0) {
+ if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) {
for (i = 0; i < n_xdpf; i++)
xdp_return_frame(frames[i]);
stats->rx_drops += n_xdpf;
@@ -1765,10 +1765,13 @@ static int veth_init_queues(struct net_device *dev, struct nlattr *tb[])
return 0;
}
-static int veth_newlink(struct net *peer_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int veth_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *peer_net = rtnl_newlink_peer_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
int err;
struct net_device *peer;
struct veth_priv *priv;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7646ddd9bef7..7e4617216a4b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -360,24 +360,7 @@ struct receive_queue {
struct xdp_buff **xsk_buffs;
};
-/* This structure can contain rss message with maximum settings for indirection table and keysize
- * Note, that default structure that describes RSS configuration virtio_net_rss_config
- * contains same info but can't handle table values.
- * In any case, structure would be passed to virtio hw through sg_buf split by parts
- * because table sizes may be differ according to the device configuration.
- */
#define VIRTIO_NET_RSS_MAX_KEY_SIZE 40
-struct virtio_net_ctrl_rss {
- u32 hash_types;
- u16 indirection_table_mask;
- u16 unclassified_queue;
- u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */
- u16 max_tx_vq;
- u8 hash_key_length;
- u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
-
- u16 *indirection_table;
-};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
@@ -421,7 +404,9 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
- struct virtio_net_ctrl_rss rss;
+ struct virtio_net_rss_config_hdr *rss_hdr;
+ struct virtio_net_rss_config_trailer rss_trailer;
+ u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE];
/* Has control virtqueue */
bool has_cvq;
@@ -523,23 +508,16 @@ enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_XSK,
};
-static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size)
+static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi)
{
- if (!indir_table_size) {
- rss->indirection_table = NULL;
- return 0;
- }
-
- rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL);
- if (!rss->indirection_table)
- return -ENOMEM;
+ u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1;
- return 0;
+ return struct_size(vi->rss_hdr, indirection_table, indir_table_size);
}
-static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss)
+static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi)
{
- kfree(rss->indirection_table);
+ return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size);
}
/* We use the last two bits of the pointer to distinguish the xmit type. */
@@ -1088,11 +1066,10 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
return false;
}
-static void check_sq_full_and_disable(struct virtnet_info *vi,
- struct net_device *dev,
- struct send_queue *sq)
+static bool tx_may_stop(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
{
- bool use_napi = sq->napi.weight;
int qnum;
qnum = sq - vi->sq;
@@ -1114,6 +1091,25 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.stop);
u64_stats_update_end(&sq->stats.syncp);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+ struct net_device *dev,
+ struct send_queue *sq)
+{
+ bool use_napi = sq->napi.weight;
+ int qnum;
+
+ qnum = sq - vi->sq;
+
+ if (tx_may_stop(vi, dev, sq)) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
@@ -2789,7 +2785,8 @@ static void skb_recv_done(struct virtqueue *rvq)
virtqueue_napi_schedule(&rq->napi, rvq);
}
-static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
+static void virtnet_napi_do_enable(struct virtqueue *vq,
+ struct napi_struct *napi)
{
napi_enable(napi);
@@ -2802,10 +2799,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
local_bh_enable();
}
-static void virtnet_napi_tx_enable(struct virtnet_info *vi,
- struct virtqueue *vq,
- struct napi_struct *napi)
+static void virtnet_napi_enable(struct receive_queue *rq)
{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ int qidx = vq2rxq(rq->vq);
+
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+}
+
+static void virtnet_napi_tx_enable(struct send_queue *sq)
+{
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
if (!napi->weight)
return;
@@ -2817,13 +2825,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi,
return;
}
- return virtnet_napi_enable(vq, napi);
+ virtnet_napi_do_enable(sq->vq, napi);
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi);
}
-static void virtnet_napi_tx_disable(struct napi_struct *napi)
+static void virtnet_napi_tx_disable(struct send_queue *sq)
{
- if (napi->weight)
+ struct virtnet_info *vi = sq->vq->vdev->priv;
+ struct napi_struct *napi = &sq->napi;
+ int qidx = vq2txq(sq->vq);
+
+ if (napi->weight) {
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL);
napi_disable(napi);
+ }
+}
+
+static void virtnet_napi_disable(struct receive_queue *rq)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct napi_struct *napi = &rq->napi;
+ int qidx = vq2rxq(rq->vq);
+
+ netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(napi);
}
static void refill_work(struct work_struct *work)
@@ -2836,9 +2861,23 @@ static void refill_work(struct work_struct *work)
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ /*
+ * When queue API support is added in the future and the call
+ * below becomes napi_disable_locked, this driver will need to
+ * be refactored.
+ *
+ * One possible solution would be to:
+ * - cancel refill_work with cancel_delayed_work (note:
+ * non-sync)
+ * - cancel refill_work with cancel_delayed_work_sync in
+ * virtnet_remove after the netdev is unregistered
+ * - wrap all of the work in a lock (perhaps the netdev
+ * instance lock)
+ * - check netif_running() and return early to avoid a race
+ */
napi_disable(&rq->napi);
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_do_enable(rq->vq, &rq->napi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again.
@@ -3035,8 +3074,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
{
- virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
- napi_disable(&vi->rq[qp_index].napi);
+ virtnet_napi_tx_disable(&vi->sq[qp_index]);
+ virtnet_napi_disable(&vi->rq[qp_index]);
xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
}
@@ -3055,8 +3094,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
- virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
- virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+ virtnet_napi_enable(&vi->rq[qp_index]);
+ virtnet_napi_tx_enable(&vi->sq[qp_index]);
return 0;
@@ -3253,15 +3292,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;
bool kick;
- /* Free up any pending old buffers before queueing new ones. */
- do {
- if (use_napi)
- virtqueue_disable_cb(sq->vq);
-
+ if (!use_napi)
free_old_xmit(sq, txq, false);
-
- } while (use_napi && !xmit_more &&
- unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
+ else
+ virtqueue_disable_cb(sq->vq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
@@ -3287,7 +3321,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset_ct(skb);
}
- check_sq_full_and_disable(vi, dev, sq);
+ if (use_napi)
+ tx_may_stop(vi, dev, sq);
+ else
+ check_sq_full_and_disable(vi, dev,sq);
kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
!xmit_more || netif_xmit_stopped(txq);
@@ -3299,6 +3336,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+
return NETDEV_TX_OK;
}
@@ -3307,7 +3347,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
bool running = netif_running(vi->dev);
if (running) {
- napi_disable(&rq->napi);
+ virtnet_napi_disable(rq);
virtnet_cancel_dim(vi, &rq->dim);
}
}
@@ -3320,7 +3360,7 @@ static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
schedule_delayed_work(&vi->refill, 0);
if (running)
- virtnet_napi_enable(rq->vq, &rq->napi);
+ virtnet_napi_enable(rq);
}
static int virtnet_rx_resize(struct virtnet_info *vi,
@@ -3349,7 +3389,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
qindex = sq - vi->sq;
if (running)
- virtnet_napi_tx_disable(&sq->napi);
+ virtnet_napi_tx_disable(sq);
txq = netdev_get_tx_queue(vi->dev, qindex);
@@ -3383,7 +3423,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
__netif_tx_unlock_bh(txq);
if (running)
- virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+ virtnet_napi_tx_enable(sq);
}
static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
@@ -3576,15 +3616,16 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, queue_pairs);
- vi->rss.indirection_table[i] = indir_val;
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val);
}
- vi->rss.max_tx_vq = queue_pairs;
+ vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs);
}
static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
- struct virtio_net_ctrl_rss old_rss;
+ struct virtio_net_rss_config_hdr *old_rss_hdr;
+ struct virtio_net_rss_config_trailer old_rss_trailer;
struct net_device *dev = vi->dev;
struct scatterlist sg;
@@ -3599,24 +3640,28 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
* update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly.
*/
if (vi->has_rss && !netif_is_rxfh_configured(dev)) {
- memcpy(&old_rss, &vi->rss, sizeof(old_rss));
- if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) {
- vi->rss.indirection_table = old_rss.indirection_table;
+ old_rss_hdr = vi->rss_hdr;
+ old_rss_trailer = vi->rss_trailer;
+ vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ vi->rss_hdr = old_rss_hdr;
return -ENOMEM;
}
+ *vi->rss_hdr = *old_rss_hdr;
virtnet_rss_update_by_qpairs(vi, queue_pairs);
if (!virtnet_commit_rss_command(vi)) {
/* restore ctrl_rss if commit_rss_command failed */
- rss_indirection_table_free(&vi->rss);
- memcpy(&vi->rss, &old_rss, sizeof(old_rss));
+ devm_kfree(&dev->dev, vi->rss_hdr);
+ vi->rss_hdr = old_rss_hdr;
+ vi->rss_trailer = old_rss_trailer;
dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n",
queue_pairs);
return -EINVAL;
}
- rss_indirection_table_free(&old_rss);
+ devm_kfree(&dev->dev, old_rss_hdr);
goto succ;
}
@@ -3826,7 +3871,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
cpumask_var_t mask;
int stragglers;
int group_size;
- int i, j, cpu;
+ int i, start = 0, cpu;
int num_cpu;
int stride;
@@ -3840,16 +3885,18 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
- for (j = 0; j < group_size; j++) {
+ for_each_online_cpu_wrap(cpu, start) {
+ if (!group_size--) {
+ start = cpu;
+ break;
+ }
cpumask_set_cpu(cpu, mask);
- cpu = cpumask_next_wrap(cpu, cpu_online_mask,
- nr_cpu_ids, false);
}
+
virtqueue_set_affinity(vi->rq[i].vq, mask);
virtqueue_set_affinity(vi->sq[i].vq, mask);
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
@@ -4059,28 +4106,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
static bool virtnet_commit_rss_command(struct virtnet_info *vi)
{
struct net_device *dev = vi->dev;
- struct scatterlist sgs[4];
- unsigned int sg_buf_size;
+ struct scatterlist sgs[2];
/* prepare sgs */
- sg_init_table(sgs, 4);
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved);
- sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
-
- if (vi->has_rss) {
- sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size;
- sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
- } else {
- sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t));
- }
-
- sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- - offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
-
- sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
+ sg_init_table(sgs, 2);
+ sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi));
+ sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
@@ -4097,17 +4128,17 @@ err:
static void virtnet_init_default_rss(struct virtnet_info *vi)
{
- vi->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported);
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->rss.indirection_table_mask = vi->rss_indir_table_size
- ? vi->rss_indir_table_size - 1 : 0;
- vi->rss.unclassified_queue = 0;
+ vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size
+ ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0;
+ vi->rss_hdr->unclassified_queue = 0;
virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs);
- vi->rss.hash_key_length = vi->rss_key_size;
+ vi->rss_trailer.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -4218,7 +4249,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -5398,11 +5429,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->rss.indirection_table[i];
+ rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]);
}
if (rxfh->key)
- memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -5426,7 +5457,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]);
update = true;
}
@@ -5438,7 +5469,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -5615,8 +5646,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
netif_tx_unlock_bh(vi->dev);
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ rtnl_lock();
virtnet_close(vi->dev);
+ rtnl_unlock();
+ }
}
static int init_vqs(struct virtnet_info *vi);
@@ -5636,7 +5670,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
+ rtnl_lock();
err = virtnet_open(vi->dev);
+ rtnl_unlock();
if (err)
return err;
}
@@ -5926,8 +5962,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* Make sure NAPI is not using any XDP TX queues for RX. */
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
+ virtnet_napi_disable(&vi->rq[i]);
+ virtnet_napi_tx_disable(&vi->sq[i]);
}
}
@@ -5964,9 +6000,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev)) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
+ virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
}
@@ -5981,9 +6016,8 @@ err:
if (netif_running(dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
- virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
- virtnet_napi_tx_enable(vi, vi->sq[i].vq,
- &vi->sq[i].napi);
+ virtnet_napi_enable(&vi->rq[i]);
+ virtnet_napi_tx_enable(&vi->sq[i]);
}
}
if (prog)
@@ -6044,9 +6078,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved);
else
- vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE);
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -6390,8 +6424,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
INIT_DELAYED_WORK(&vi->refill, refill_work);
for (i = 0; i < vi->max_queue_pairs; i++) {
vi->rq[i].pages = NULL;
- netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
- napi_weight);
+ netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll,
+ i);
+ vi->rq[i].napi.weight = napi_weight;
netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
@@ -6735,9 +6770,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
}
- err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size);
- if (err)
+ vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL);
+ if (!vi->rss_hdr) {
+ err = -ENOMEM;
goto free;
+ }
if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
@@ -7016,8 +7053,6 @@ static void virtnet_remove(struct virtio_device *vdev)
remove_vq_common(vi);
- rss_indirection_table_free(&vi->rss);
-
free_netdev(vi->dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6793fa09f9d1..3df6aabc7e33 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2033,6 +2033,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
+
+ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ page_pool_destroy(rq->page_pool);
+ rq->page_pool = NULL;
}
@@ -2073,11 +2078,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
}
- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- page_pool_destroy(rq->page_pool);
- rq->page_pool = NULL;
-
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ca81b212a246..7168b33adadb 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -34,6 +34,7 @@
#include <net/addrconf.h>
#include <net/l3mdev.h>
#include <net/fib_rules.h>
+#include <net/netdev_lock.h>
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
@@ -1537,14 +1538,12 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
nlmsg_end(skb, nlh);
- /* fib_nl_{new,del}rule handling looks for net from skb->sk */
- skb->sk = dev_net(dev)->rtnl;
if (add_it) {
- err = fib_nl_newrule(skb, nlh, NULL);
+ err = fib_newrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -EEXIST)
err = 0;
} else {
- err = fib_nl_delrule(skb, nlh, NULL);
+ err = fib_delrule(dev_net(dev), skb, nlh, NULL, true);
if (err == -ENOENT)
err = 0;
}
@@ -1619,7 +1618,7 @@ static void vrf_setup(struct net_device *dev)
dev->lltx = true;
/* don't allow vrf devices to change network namespaces. */
- dev->netns_local = true;
+ dev->netns_immutable = true;
/* does not make sense for a VLAN to be added to a vrf device */
dev->features |= NETIF_F_VLAN_CHALLENGED;
@@ -1677,11 +1676,12 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int vrf_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vrf_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct net_vrf *vrf = netdev_priv(dev);
+ struct nlattr **data = params->data;
struct netns_vrf *nn_vrf;
bool *add_fib_rules;
struct net *net;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 03175419bf28..8c49e903cb3a 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -25,6 +25,7 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/netdev_lock.h>
#include <net/tun_proto.h>
#include <net/vxlan.h>
#include <net/nexthop.h>
@@ -1670,7 +1671,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
return err <= 1;
}
-/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_vni_node *vninode = NULL;
@@ -1840,7 +1840,6 @@ drop:
return 0;
}
-/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_dev *vxlan;
@@ -3938,7 +3937,7 @@ static void vxlan_config_apply(struct net_device *dev,
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
- struct vxlan_config *conf, bool changelink,
+ struct vxlan_config *conf,
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -3949,7 +3948,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
if (ret)
return ret;
- vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
+ vxlan_config_apply(dev, conf, lowerdev, src_net, false);
return 0;
}
@@ -3967,7 +3966,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
int err;
dst = &vxlan->default_dst;
- err = vxlan_dev_configure(net, dev, conf, false, extack);
+ err = vxlan_dev_configure(net, dev, conf, extack);
if (err)
return err;
@@ -4402,10 +4401,13 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int vxlan_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **data = params->data;
+ struct nlattr **tb = params->tb;
struct vxlan_config conf;
int err;
@@ -4413,7 +4415,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- return __vxlan_dev_create(src_net, dev, &conf, extack);
+ return __vxlan_dev_create(link_net, dev, &conf, extack);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -4421,6 +4423,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ bool rem_ip_changed, change_igmp;
struct net_device *lowerdev;
struct vxlan_config conf;
struct vxlan_rdst *dst;
@@ -4444,8 +4447,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip);
+ change_igmp = vxlan->dev->flags & IFF_UP &&
+ (rem_ip_changed ||
+ dst->remote_ifindex != conf.remote_ifindex);
+
/* handle default dst entry */
- if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
+ if (rem_ip_changed) {
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
spin_lock_bh(&vxlan->hash_lock[hash_index]);
@@ -4489,6 +4497,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
}
}
+ if (change_igmp && vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_leave(vxlan);
+
if (conf.age_interval != vxlan->cfg.age_interval)
mod_timer(&vxlan->age_timer, jiffies);
@@ -4496,7 +4507,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (lowerdev && lowerdev != dst->remote_dev)
dst->remote_dev = lowerdev;
vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
- return 0;
+
+ if (!err && change_igmp &&
+ vxlan_addr_multicast(&dst->remote_ip))
+ err = vxlan_multicast_join(vxlan);
+
+ return err;
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 6cf173a008e7..c496d35b266d 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -307,14 +307,15 @@ static void wg_setup(struct net_device *dev)
wg->dev = dev;
}
-static int wg_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int wg_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct net *link_net = rtnl_newlink_link_net(params);
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;
- rcu_assign_pointer(wg->creating_net, src_net);
+ rcu_assign_pointer(wg->creating_net, link_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b3294287bce1..6d336e39d673 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1163,9 +1163,12 @@ int ath10k_core_check_dt(struct ath10k *ar)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
@@ -2259,7 +2262,9 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
"boot did not find a pre calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"unable to load pre cal data from DT: %d\n", ret);
@@ -2337,7 +2342,9 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
+ ret = ath10k_download_cal_dt(ar, "qcom,calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 43d2d8ddcdc0..d9092414b362 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -27,6 +27,7 @@ ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
+ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index f2fc04596d48..eedba3766ba2 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
@@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
ath11k_core_deinit(ab);
free_resources:
+ ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c576bbba52bf..3d39ff85ba94 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -1175,9 +1175,12 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
if (!node)
return -ENOENT;
- of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
+ of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ &variant);
+ if (!variant)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
@@ -2056,6 +2059,7 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
@@ -2257,6 +2261,7 @@ static void ath11k_core_reset(struct work_struct *work)
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
+ ath11k_coredump_collect(ab);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
@@ -2346,7 +2351,6 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
- ath11k_fw_destroy(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@@ -2393,6 +2397,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
+ INIT_WORK(&ab->dump_work, ath11k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index a9dc7fe7765a..1a3d0de4afde 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -32,6 +32,7 @@
#include "spectral.h"
#include "wow.h"
#include "fw.h"
+#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -370,6 +371,7 @@ struct ath11k_vif {
struct ieee80211_vif *vif;
struct wmi_wmm_params_all_arg wmm_params;
+ struct wmi_wmm_params_all_arg muedca_params;
struct list_head list;
union {
struct {
@@ -685,7 +687,7 @@ struct ath11k {
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
- * channel context data, survey info, test mode data.
+ * channel context data, survey info, test mode data, channel_update_queue.
*/
spinlock_t data_lock;
@@ -743,6 +745,9 @@ struct ath11k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
+ struct work_struct channel_update_work;
+ /* protected with data_lock */
+ struct list_head channel_update_queue;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
@@ -900,6 +905,10 @@ struct ath11k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
+ struct ath11k_dump_file_data *dump_data;
+ size_t ath11k_coredump_len;
+ struct work_struct dump_work;
+
struct ath11k_htc htc;
struct ath11k_dp dp;
diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c
new file mode 100644
index 000000000000..b8bad358cebe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/devcoredump.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ enum ath11k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+EXPORT_SYMBOL(ath11k_coredump_get_dump_type);
+
+void ath11k_coredump_upload(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
+
+ ath11k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+ ath11k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/coredump.h b/drivers/net/wireless/ath/ath11k/coredump.h
new file mode 100644
index 000000000000..3960d9385261
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_COREDUMP_H_
+#define _ATH11K_COREDUMP_H_
+
+#define ATH11K_FW_CRASH_DUMP_V2 2
+
+enum ath11k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath11k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath11k_dump_file_data {
+ /* "ATH11K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_DEV_COREDUMP
+enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type);
+void ath11k_coredump_upload(struct work_struct *work);
+void ath11k_coredump_collect(struct ath11k_base *ab);
+#else
+static inline enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath11k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index fbf666d0ecf1..f124b7329e1a 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached) {
- dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
- DMA_FROM_DEVICE);
- kfree(ring->vaddr_unaligned);
- } else {
+ if (ring->cached)
+ dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned, DMA_FROM_DEVICE);
+ else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
- }
ring->vaddr_unaligned = NULL;
}
@@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
default:
cached = false;
}
-
- if (cached) {
- ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- if (!ring->vaddr_unaligned)
- return -ENOMEM;
-
- ring->paddr_unaligned = dma_map_single(ab->dev,
- ring->vaddr_unaligned,
- ring->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
- kfree(ring->vaddr_unaligned);
- ring->vaddr_unaligned = NULL;
- return -ENOMEM;
- }
- }
}
- if (!cached)
+ if (cached)
+ ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ DMA_FROM_DEVICE,
+ GFP_KERNEL);
+ else
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index f777314db8b3..7a55afd33be8 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid {
u8 tid;
- u32 *vaddr;
dma_addr_t paddr;
u32 size;
u32 ba_win_sz;
@@ -37,6 +36,9 @@ struct dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
struct ath11k_base *ab;
+ u32 *vaddr_unaligned;
+ dma_addr_t paddr_unaligned;
+ u32 unaligned_size;
};
#define DP_REO_DESC_FREE_THRESHOLD 64
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 029ecf51c9ef..f2bdbac2a0b7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd);
}
@@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
- if (rx_tid->vaddr) {
- dma_unmap_single(ab->dev, rx_tid->paddr,
- rx_tid->size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd_cache);
}
@@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- if (rx_tid->vaddr) {
- dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
}
@@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
}
rx_tid->paddr = 0;
+ rx_tid->paddr_unaligned = 0;
rx_tid->size = 0;
+ rx_tid->unaligned_size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
if (!rx_tid->active)
goto unlock_exit;
- dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
- u32 hw_desc_sz;
- u32 *addr_aligned;
- void *vaddr;
+ u32 hw_desc_sz, *vaddr;
+ void *vaddr_unaligned;
dma_addr_t paddr;
int ret;
@@ -1050,37 +1050,34 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
- vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
- if (!vaddr) {
+ rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+ vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+ DMA_BIDIRECTIONAL, GFP_ATOMIC);
+ if (!vaddr_unaligned) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
- addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
- ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
- ssn, pn_type);
-
- paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
- DMA_BIDIRECTIONAL);
-
- ret = dma_mapping_error(ab->dev, paddr);
- if (ret) {
- spin_unlock_bh(&ab->base_lock);
- ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
- peer_mac, tid, ret);
- goto err_mem_free;
- }
-
- rx_tid->vaddr = vaddr;
- rx_tid->paddr = paddr;
+ rx_tid->vaddr_unaligned = vaddr_unaligned;
+ vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+ rx_tid->paddr_unaligned = paddr;
+ rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+ (unsigned long)rx_tid->vaddr_unaligned);
+ ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+ * Since these changes are not reflected in the device, driver now needs to
+ * explicitly call dma_sync_single_for_device.
+ */
+ dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+ rx_tid->size,
+ DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
- ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
- paddr, tid, 1, ba_win_sz);
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+ tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
@@ -1088,12 +1085,6 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
}
return ret;
-
-err_mem_free:
- kfree(rx_tid->vaddr);
- rx_tid->vaddr = NULL;
-
- return ret;
}
int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
@@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
- arsta->rssi_comb = ppdu_info->rssi_comb;
-
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
@@ -4783,7 +4772,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"msdu_pop: invalid buf_id %d\n", buf_id);
- break;
+ goto next_msdu;
}
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
@@ -5148,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
- void *mon_dst_srng;
+ struct hal_srng *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
u32 ring_id;
@@ -5165,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
spin_lock_bh(&pmon->mon_lock);
+ spin_lock_bh(&mon_dst_srng->lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
@@ -5223,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
mon_dst_srng);
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
@@ -5410,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
- break;
+ goto next_msdu;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@@ -5612,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
- void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ struct hal_srng *mon_dst_srng;
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
@@ -5628,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
goto reap_status_ring;
}
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ spin_lock_bh(&mon_dst_srng->lock);
+
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
@@ -5671,6 +5665,7 @@ next_entry:
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
index 4e36292a79db..cbbd8e57119f 100644
--- a/drivers/net/wireless/ath/ath11k/fw.c
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
- * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
{
release_firmware(ab->fw.fw);
}
+EXPORT_SYMBOL(ath11k_fw_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 674ff772b181..770c39ff99b4 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -31,6 +31,7 @@ struct ath11k_hif_ops {
void (*ce_irq_enable)(struct ath11k_base *ab);
void (*ce_irq_disable)(struct ath11k_base *ab);
void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+ void (*coredump_download)(struct ath11k_base *ab);
};
static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
@@ -146,4 +147,10 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
*msi_data_idx = ce_id;
}
+static inline void ath11k_hif_coredump_download(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->coredump_download)
+ ab->hif.ops->coredump_download(ab);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1556392f7ad4..97816916abac 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -1529,17 +1529,23 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
+static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
+{
+ if (arvif->vif->mbssid_tx_vif)
+ return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+
+ return NULL;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
- struct ath11k_vif *tx_arvif;
struct ieee80211_ema_beacons *beacons;
int ret = 0;
bool nontx_vif_params_set = false;
u32 params = 0;
u8 i = 0;
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
tx_arvif->vif, 0);
if (!beacons || !beacons->cnt) {
@@ -1585,25 +1591,22 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return ret;
}
-static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *tx_arvif = arvif;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
int ret;
- if (vif->mbssid_tx_vif) {
- tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
- if (tx_arvif != arvif) {
- ar = tx_arvif->ar;
- ab = ar->ab;
- hw = ar->hw;
- vif = tx_arvif->vif;
- }
+ if (tx_arvif != arvif) {
+ ar = tx_arvif->ar;
+ ab = ar->ab;
+ hw = ar->hw;
+ vif = tx_arvif->vif;
}
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
@@ -1632,6 +1635,7 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath11k_vif *tx_arvif;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
@@ -1639,14 +1643,18 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
/* Target does not expect beacon templates for the already up
* non-transmitting interfaces, and results in a crash if sent.
*/
- if (vif->mbssid_tx_vif &&
- arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
- return 0;
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
+ if (arvif != tx_arvif && arvif->is_up)
+ return 0;
- if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
- return ath11k_mac_setup_bcn_tmpl_ema(arvif);
+ if (vif->bss_conf.ema_ap)
+ return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
+ } else {
+ tx_arvif = arvif;
+ }
- return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
+ return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
@@ -1674,7 +1682,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
- struct ath11k_vif *tx_arvif = NULL;
+ struct ath11k_vif *tx_arvif;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -1701,9 +1709,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
- if (arvif->vif->mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -5204,6 +5210,45 @@ exit:
return ret;
}
+static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = hw->priv;
+ struct wmi_wmm_params_arg *p;
+ int ret;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->muedca_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->muedca_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->muedca_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->muedca_params.ac_bk;
+ break;
+ default:
+ ath11k_warn(ar->ab, "error ac: %d", ac);
+ return -EINVAL;
+ }
+
+ p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
+ p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
+ p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
+ p->txop = params->mu_edca_param_rec.mu_edca_timer;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->muedca_params,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
+ return ret;
+}
+
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
@@ -5242,12 +5287,22 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
p->txop = params->txop;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
- &arvif->wmm_params);
+ &arvif->wmm_params,
+ WMI_WMM_PARAM_TYPE_LEGACY);
if (ret) {
ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
+ if (params->mu_edca) {
+ ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
+ params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
+ goto exit;
+ }
+ }
+
ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
@@ -5336,8 +5391,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@@ -5421,9 +5474,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
/* Enable Beamformee STS Field only if SU BF is enabled */
if (subfee) {
- if (nsts > (ar->num_rx_chains - 1))
- nsts = ar->num_rx_chains - 1;
-
nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
*vht_cap |= nsts;
@@ -6288,6 +6338,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ struct scan_chan_list_params *params;
int ret;
ath11k_mac_drain_tx(ar);
@@ -6303,6 +6354,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
@@ -6312,10 +6364,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
}
spin_lock_bh(&ar->data_lock);
+
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
+
+ while ((params = list_first_entry_or_null(&ar->channel_update_queue,
+ struct scan_chan_list_params,
+ list))) {
+ list_del(&params->list);
+ kfree(params);
+ }
+
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@@ -6330,23 +6391,20 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif;
- struct ieee80211_vif *tx_vif;
*tx_vdev_id = 0;
- tx_vif = arvif->vif->mbssid_tx_vif;
- if (!tx_vif) {
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif) {
*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
return 0;
}
- tx_arvif = ath11k_vif_to_arvif(tx_vif);
-
if (arvif->vif->bss_conf.nontransmitted) {
- if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
return -EINVAL;
*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
- *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
+ *tx_vdev_id = tx_arvif->vdev_id;
} else if (tx_arvif == arvif) {
*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
} else {
@@ -7306,8 +7364,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
int n_vifs)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif, *tx_arvif = NULL;
- struct ieee80211_vif *mbssid_tx_vif;
+ struct ath11k_vif *arvif, *tx_arvif;
int ret;
int i;
bool monitor_vif = false;
@@ -7361,10 +7418,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
- if (mbssid_tx_vif)
- tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
-
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@@ -10019,6 +10073,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
static void __ath11k_mac_unregister(struct ath11k *ar)
{
+ cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
@@ -10418,6 +10473,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
+ INIT_LIST_HEAD(&ar->channel_update_queue);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6e45f464a429..fc77eac83e95 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -491,3 +491,8 @@ int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
return 0;
}
+
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
+{
+ mhi_download_rddm_image(mhi_ctrl, in_panic);
+}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index a682aad52fc5..651470091bd5 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -26,5 +26,6 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index b93f04973ad7..412f4a134e4a 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@@ -610,6 +612,187 @@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
PCI_EXP_LNKCTL_ASPMC);
}
+#ifdef CONFIG_DEV_COREDUMP
+static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ enum ath11k_fw_crash_dump_type mem_type;
+ u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
+ struct ath11k_dump_file_data *file_data;
+ int i;
+
+ rddm_img = mhi_ctrl->rddm_image;
+ if (!rddm_img) {
+ ath11k_err(ab, "No RDDM dump found\n");
+ return 0;
+ }
+
+ fw_img = mhi_ctrl->fbc_image;
+
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ paging_tlv_sz += fw_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
+
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ rddm_tlv_sz += rddm_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+
+ if (mem_type == FW_CRASH_DUMP_NONE)
+ continue;
+
+ if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "target mem region type %d not supported",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
+
+ dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
+ }
+
+ for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
+ if (!dump_seg_sz[i])
+ continue;
+
+ len += sizeof(*dump_tlv) + dump_seg_sz[i];
+ }
+
+ if (len)
+ len += sizeof(*file_data);
+
+ return len;
+}
+
+static void ath11k_pci_coredump_download(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct timespec64 timestamp;
+ int i, len, mem_idx;
+ enum ath11k_fw_crash_dump_type mem_type;
+ struct ath11k_dump_file_data *file_data;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*file_data);
+ void *buf;
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
+
+ ath11k_mhi_coredump(mhi_ctrl, false);
+
+ len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
+ if (!len) {
+ ath11k_warn(ab, "No crash dump data found for devcoredump");
+ return;
+ }
+
+ rddm_img = mhi_ctrl->rddm_image;
+ fw_img = mhi_ctrl->fbc_image;
+
+ /* dev_coredumpv() requires vmalloc data */
+ buf = vzalloc(len);
+ if (!buf)
+ return;
+
+ ab->dump_data = buf;
+ ab->ath11k_coredump_len = len;
+ file_data = ab->dump_data;
+ strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
+ file_data->len = cpu_to_le32(len);
+ file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
+ file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
+ file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
+ file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
+ guid_gen(&file_data->guid);
+ ktime_get_real_ts64(&timestamp);
+ file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+ file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+ buf += hdr_len;
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
+ fw_img->mhi_buf[i].len);
+ buf += fw_img->mhi_buf[i].len;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
+ rddm_img->mhi_buf[i].len);
+ buf += rddm_img->mhi_buf[i].len;
+ }
+
+ mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
+ if (mem_idx == FW_CRASH_DUMP_NONE)
+ continue;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type
+ (ab->qmi.target_mem[i].type);
+
+ if (mem_type != mem_idx)
+ continue;
+
+ if (!ab->qmi.target_mem[i].anyaddr) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Skipping mem region type %d",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(mem_idx);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
+ ab->qmi.target_mem[i].size);
+
+ buf += ab->qmi.target_mem[i].size;
+ }
+ }
+
+ queue_work(ab->workqueue, &ab->dump_work);
+}
+#endif
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -713,6 +896,9 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump_download = ath11k_pci_coredump_download,
+#endif
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
@@ -735,7 +921,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev,
@@ -939,6 +1125,8 @@ unsupported_wcn6855_soc:
return 0;
err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pcic_free_irq(ab);
err_ce_free:
@@ -981,9 +1169,12 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->reset_work);
+ cancel_work_sync(&ab->dump_work);
ath11k_core_deinit(ab);
qmi_fail:
+ ath11k_fw_destroy(ab);
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 5759fc521316..4f8b08ed1bbc 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1957,13 +1957,15 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
- if ((ab->hw_params.fixed_mem_region ||
- test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
- ab->qmi.target_mem[i].iaddr)
- iounmap(ab->qmi.target_mem[i].iaddr);
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
- if (!ab->qmi.target_mem[i].vaddr)
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ iounmap(ab->qmi.target_mem[i].iaddr);
+ ab->qmi.target_mem[i].iaddr = NULL;
continue;
+ }
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].prev_size,
@@ -2070,7 +2072,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -2093,10 +2095,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
- ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].iaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 7e06d100af57..7968ab122b65 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@@ -102,8 +102,11 @@ struct target_mem_chunk {
u32 prev_size;
u32 prev_type;
dma_addr_t paddr;
- u32 *vaddr;
- void __iomem *iaddr;
+ union {
+ u32 *vaddr;
+ void __iomem *iaddr;
+ void *anyaddr;
+ };
};
struct target_info {
@@ -154,6 +157,7 @@ struct ath11k_qmi {
#define BDF_MEM_REGION_TYPE 0x2
#define M3_DUMP_REGION_TYPE 0x3
#define CALDB_MEM_REGION_TYPE 0x4
+#define PAGEABLE_MEM_REGION_TYPE 0x9
struct qmi_wlanfw_host_cap_req_msg_v01 {
u8 num_clients_valid;
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b0f289784dd3..d62a2014315a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ar->state != ATH11K_STATE_ON)
+ return;
+
+ ret = ath11k_reg_update_chan_list(ar, true);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
+
+ return;
+ }
+
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int i, ret, left;
-
- if (wait && ar->state_11d != ATH11K_11D_IDLE) {
- left = wait_for_completion_timeout(&ar->completed_11d_scan,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left) {
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive 11d scan complete: timed out\n");
- ar->state_11d = ATH11K_11D_IDLE;
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "11d scan wait left time %d\n", left);
- }
-
- if (wait &&
- (ar->scan.state == ATH11K_SCAN_STARTING ||
- ar->scan.state == ATH11K_SCAN_RUNNING)) {
- left = wait_for_completion_timeout(&ar->scan.completed,
- ATH11K_SCAN_TIMEOUT_HZ);
- if (!left)
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "failed to receive hw scan complete: timed out\n");
-
- ath11k_dbg(ar->ab, ATH11K_DBG_REG,
- "hw scan wait left time %d\n", left);
- }
+ int i, ret = 0;
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
@@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
}
}
+ if (wait) {
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&params->list, &ar->channel_update_queue);
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->ab->workqueue, &ar->channel_update_work);
+
+ return 0;
+ }
+
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
@@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar)
if (ret)
goto err;
- if (ar->state == ATH11K_STATE_ON) {
- ret = ath11k_reg_update_chan_list(ar, true);
- if (ret)
- goto err;
- }
-
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
@@ -804,6 +796,54 @@ ret:
return new_regd;
}
+void ath11k_regd_update_chan_list_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ channel_update_work);
+ struct scan_chan_list_params *params;
+ struct list_head local_update_list;
+ int left;
+
+ INIT_LIST_HEAD(&local_update_list);
+
+ spin_lock_bh(&ar->data_lock);
+ list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
+ spin_unlock_bh(&ar->data_lock);
+
+ while ((params = list_first_entry_or_null(&local_update_list,
+ struct scan_chan_list_params,
+ list))) {
+ if (ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if ((ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ list_del(&params->list);
+ kfree(params);
+ }
+}
+
static bool ath11k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
@@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work)
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 263ea9061948..72b483594015 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
+void ath11k_regd_update_chan_list_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect,
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 302d66092b97..9be1cd742339 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
@@ -10,18 +10,18 @@
#include "wmi.h"
#include "hw.h"
#include "core.h"
-#include "testmode_i.h"
+#include "../testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
-static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
- [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
- .len = ATH11K_TM_DATA_MAX_LEN },
- [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
- [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
@@ -73,9 +73,9 @@ static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
@@ -140,7 +140,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
data_pos = ab->testmode.data_pos;
- if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
@@ -172,10 +172,10 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
- if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
- ATH11K_TM_CMD_WMI_FTM) ||
- nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
- nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
@@ -235,23 +235,23 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd get version_major %d version_minor %d\n",
- ATH11K_TESTMODE_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
- ATH11K_TESTMODE_VERSION_MAJOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
- ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
- ATH11K_TESTMODE_VERSION_MINOR);
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
@@ -277,7 +277,7 @@ static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
goto err;
}
- ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
+ ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
@@ -310,25 +310,25 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
mutex_lock(&ar->conf_mutex);
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
- cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
@@ -409,13 +409,13 @@ static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
goto out;
}
- if (!tb[ATH11K_TM_ATTR_DATA]) {
+ if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
- buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
- buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
@@ -476,25 +476,25 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
- struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
- if (!tb[ATH11K_TM_ATTR_CMD])
+ if (!tb[ATH_TM_ATTR_CMD])
return -EINVAL;
- switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
- case ATH11K_TM_CMD_GET_VERSION:
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
- case ATH11K_TM_CMD_WMI:
+ case ATH_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb, vif);
- case ATH11K_TM_CMD_TESTMODE_START:
+ case ATH_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
- case ATH11K_TM_CMD_WMI_FTM:
+ case ATH_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 87abfa547529..d7f852bebf4a 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -2662,7 +2662,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param)
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
@@ -2681,7 +2682,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
- cmd->wmm_param_type = 0;
+ cmd->wmm_param_type = wmm_param_type;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
@@ -2714,8 +2715,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
wmm_param->no_ack = wmi_wmm_arg->no_ack;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
- "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
- ac, wmm_param->aifs, wmm_param->cwmin,
+ "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 8982b909c821..9fcffaa2f383 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd {
};
struct scan_chan_list_params {
+ struct list_head list;
u32 pdev_id;
u16 nallchans;
struct channel_param ch_param[];
@@ -6346,6 +6347,11 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+enum wmi_wmm_params_type {
+ WMI_WMM_PARAM_TYPE_LEGACY = 0,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
+};
+
const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
@@ -6402,7 +6408,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param);
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
- struct wmi_wmm_params_all_arg *param);
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type);
int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
u32 pdev_id);
int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index b5bb3e2599cd..60644cb42c76 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,11 +23,12 @@ ath12k-y += core.o \
fw.o \
p2p.o
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
ath12k-$(CONFIG_PM) += wow.o
ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
index 0555d35aab47..d81367ce6929 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.c
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -12,7 +12,7 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
{
union acpi_object *obj;
acpi_handle root_handle;
- int ret;
+ int ret, i;
root_handle = ACPI_HANDLE(ab->dev);
if (!root_handle) {
@@ -29,9 +29,48 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
}
if (obj->type == ACPI_TYPE_INTEGER) {
- ab->acpi.func_bit = obj->integer.value;
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ ab->acpi.func_bit = obj->integer.value;
+ break;
+ case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG:
+ ab->acpi.bit_flag = obj->integer.value;
+ break;
+ }
+ } else if (obj->type == ACPI_TYPE_STRING) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_BDF_EXT:
+ if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN ||
+ obj->string.length > ATH12K_ACPI_BDF_MAX_LEN ||
+ memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING,
+ ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) {
+ ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n",
+ obj->string.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ab->acpi.bdf_string, obj->string.pointer,
+ obj->buffer.length);
+
+ break;
+ }
} else if (obj->type == ACPI_TYPE_BUFFER) {
switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE ||
+ obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM func size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->acpi.func_bit = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8);
+
+ break;
case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
@@ -247,24 +286,118 @@ static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
return 0;
}
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_rfkill;
+}
+
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_11be;
+}
+
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+ int ret;
+ u8 *buf;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return;
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_cca_enable) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_band_edge_enable) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
int ath12k_acpi_start(struct ath12k_base *ab)
{
acpi_status status;
- u8 *buf;
int ret;
+ ab->acpi.acpi_tas_enable = false;
+ ab->acpi.acpi_disable_11be = false;
+ ab->acpi.acpi_disable_rfkill = false;
+ ab->acpi.acpi_bios_sar_enable = false;
+ ab->acpi.acpi_cca_enable = false;
+ ab->acpi.acpi_band_edge_enable = false;
+ ab->acpi.acpi_enable_bdf = false;
+ ab->acpi.bdf_string[0] = '\0';
+
if (!ab->hw_params->acpi_guid)
/* not supported with this hardware */
return 0;
- ab->acpi.acpi_tas_enable = false;
-
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
return ret;
}
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_11BE_BIT))
+ ab->acpi.acpi_disable_11be = true;
+
+ if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT))
+ ab->acpi.acpi_disable_rfkill = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT);
+ if (ret || ab->acpi.bdf_string[0] == '\0') {
+ ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret);
+ return ret;
+ }
+
+ ab->acpi.acpi_enable_bdf = true;
+ }
+
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
if (ret) {
@@ -308,20 +441,6 @@ int ath12k_acpi_start(struct ath12k_base *ab)
ab->acpi.acpi_bios_sar_enable = true;
}
- if (ab->acpi.acpi_tas_enable) {
- ret = ath12k_acpi_set_tas_params(ab);
- if (ret) {
- ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
- return ret;
- }
- }
-
- if (ab->acpi.acpi_bios_sar_enable) {
- ret = ath12k_acpi_set_bios_sar_params(ab);
- if (ret)
- return ret;
- }
-
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
if (ret) {
@@ -332,18 +451,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
- ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
- buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
- buf,
- ATH12K_ACPI_CCA_THR_OFFSET_LEN);
- if (ret) {
- ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
- ret);
- return ret;
- }
- }
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG)
+ ab->acpi.acpi_cca_enable = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
@@ -356,18 +465,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
}
if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
- ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
- ret = ath12k_wmi_set_bios_cmd(ab,
- WMI_BIOS_PARAM_TYPE_BANDEDGE,
- ab->acpi.band_edge_power,
- sizeof(ab->acpi.band_edge_power));
- if (ret) {
- ath12k_warn(ab,
- "failed to set ACPI DSM band edge channel power: %d\n",
- ret);
- return ret;
- }
- }
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG)
+ ab->acpi.acpi_band_edge_enable = true;
}
status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
@@ -383,6 +482,21 @@ int ath12k_acpi_start(struct ath12k_base *ab)
return 0;
}
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+
+ if (!ab->acpi.acpi_enable_bdf)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "acpi bdf variant longer than the buffer (variant: %s)\n",
+ ab->acpi.bdf_string);
+
+ return 0;
+}
+
void ath12k_acpi_stop(struct ath12k_base *ab)
{
if (!ab->acpi.started)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
index 39e003d86a48..3a26fea6af1a 100644
--- a/drivers/net/wireless/ath/ath12k/acpi.h
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_ACPI_H
#define ATH12K_ACPI_H
@@ -9,6 +9,8 @@
#include <linux/acpi.h>
#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2
+#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3
#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
@@ -16,6 +18,8 @@
#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1)
+#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2)
#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
@@ -25,6 +29,7 @@
#define ATH12K_ACPI_NOTIFY_EVENT 0x86
#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func))
#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
@@ -48,6 +53,16 @@
#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1
+#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4
+
+#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0)
+#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2)
+
+#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3
+#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF"
+#define ATH12K_ACPI_BDF_MAX_LEN 100
+
#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
@@ -59,6 +74,10 @@
int ath12k_acpi_start(struct ath12k_base *ab);
void ath12k_acpi_stop(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
#else
@@ -71,6 +90,25 @@ static inline void ath12k_acpi_stop(struct ath12k_base *ab)
{
}
+static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ return 0;
+}
+
#endif /* CONFIG_ACPI */
#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 0606116d6b9c..0b2dec081c6e 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -23,6 +23,10 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
+bool ath12k_ftm_mode;
+module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
/* protected with ath12k_hw_group_mutex */
static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
@@ -36,6 +40,9 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
return 0;
+ if (ath12k_acpi_get_disable_rfkill(ab))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -173,7 +180,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
- bool bus_type_mode)
+ bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
@@ -204,7 +211,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ with_default ?
+ ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
+ variant);
break;
}
@@ -216,19 +225,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
}
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
}
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
- return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@@ -693,6 +702,11 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
{
int ret;
+ if (ath12k_ftm_mode) {
+ ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
+ ath12k_info(ab, "Booting in ftm mode\n");
+ }
+
ret = ath12k_qmi_init_service(ab);
if (ret) {
ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -741,8 +755,7 @@ static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
ath12k_dp_pdev_free(ab);
}
-static int ath12k_core_start(struct ath12k_base *ab,
- enum ath12k_firmware_mode mode)
+static int ath12k_core_start(struct ath12k_base *ab)
{
int ret;
@@ -836,10 +849,7 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
- ret = ath12k_acpi_start(ab);
- if (ret)
- /* ACPI is optional so continue in case of an error */
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+ ath12k_acpi_set_dsm_func(ab);
if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
/* Indicate the core start in the appropriate group */
@@ -887,10 +897,41 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ath12k_mac_destroy(ag);
}
+u8 ath12k_get_num_partner_link(struct ath12k *ar)
+{
+ struct ath12k_base *partner_ab, *ab = ar->ab;
+ struct ath12k_hw_group *ag = ab->ag;
+ struct ath12k_pdev *pdev;
+ u8 num_link = 0;
+ int i, j;
+
+ lockdep_assert_held(&ag->mutex);
+
+ for (i = 0; i < ag->num_devices; i++) {
+ partner_ab = ag->ab[i];
+
+ for (j = 0; j < partner_ab->num_radios; j++) {
+ pdev = &partner_ab->pdevs[j];
+
+ /* Avoid the self link */
+ if (ar == pdev->ar)
+ continue;
+
+ num_link++;
+ }
+ }
+
+ return num_link;
+}
+
static int __ath12k_mac_mlo_ready(struct ath12k *ar)
{
+ u8 num_link = ath12k_get_num_partner_link(ar);
int ret;
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_ready(ar);
if (ret) {
ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
@@ -920,19 +961,18 @@ int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
ar = &ah->radio[j];
ret = __ath12k_mac_mlo_ready(ar);
if (ret)
- goto out;
+ return ret;
}
}
-out:
- return ret;
+ return 0;
}
static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
{
int ret, i;
- if (!ag->mlo_capable || ag->num_devices == 1)
+ if (!ag->mlo_capable)
return 0;
ret = ath12k_mac_mlo_setup(ag);
@@ -1068,7 +1108,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
int ret, i;
- ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ ret = ath12k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath12k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
@@ -1089,7 +1129,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
- ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ ret = ath12k_core_start(ab);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
@@ -1122,16 +1162,18 @@ err_core_stop:
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
}
+ mutex_unlock(&ag->mutex);
goto exit;
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
+ mutex_unlock(&ag->mutex);
+
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
exit:
- mutex_unlock(&ag->mutex);
return ret;
}
@@ -1239,7 +1281,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
for (i = 0; i < ag->num_hw; i++) {
ah = ath12k_ag_to_ah(ag, i);
- if (!ah || ah->state == ATH12K_HW_STATE_OFF)
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
+ ah->state == ATH12K_HW_STATE_TM)
continue;
ieee80211_stop_queues(ah->hw);
@@ -1309,6 +1352,9 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
ath12k_warn(ab,
"device is wedged, will not restart hw %d\n", i);
break;
+ case ATH12K_HW_STATE_TM:
+ ath12k_warn(ab, "fw mode reset done radio %d\n", i);
+ break;
}
mutex_unlock(&ah->hw_mutex);
@@ -1602,6 +1648,9 @@ static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *a
lockdep_assert_held(&ath12k_hw_group_mutex);
+ if (ath12k_ftm_mode)
+ goto invalid_group;
+
/* The grouping of multiple devices will be done based on device tree file.
* The platforms that do not have any valid group information would have
* each device to be part of its own invalid group.
@@ -1789,19 +1838,19 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
struct ath12k_base *ab;
int i;
+ if (ath12k_ftm_mode)
+ return;
+
lockdep_assert_held(&ag->mutex);
/* If more than one devices are grouped, then inter MLO
* functionality can work still independent of whether internally
* each device supports single_chip_mlo or not.
- * Only when there is one device, then it depends whether the
- * device can support intra chip MLO or not
+ * Only when there is one device, then disable for WCN chipsets
+ * till the required driver implementation is in place.
*/
- if (ag->num_devices > 1) {
- ag->mlo_capable = true;
- } else {
+ if (ag->num_devices == 1) {
ab = ag->ab[0];
- ag->mlo_capable = ab->single_chip_mlo_supp;
/* WCN chipsets does not advertise in firmware features
* hence skip checking
@@ -1810,8 +1859,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
return;
}
- if (!ag->mlo_capable)
- return;
+ ag->mlo_capable = true;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
@@ -1927,7 +1975,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index ee595794a7ae..3fac4f00d383 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
#include <linux/panic_notifier.h>
+#include <linux/average.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -52,8 +53,6 @@
#define ATH12K_INVALID_HW_MAC_ID 0xFF
#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH12K_RX_RATE_TABLE_NUM 320
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
#define ATH12K_MON_TIMER_INTERVAL 10
#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
@@ -87,6 +86,7 @@ enum wme_ac {
#define ATH12K_HT_MCS_MAX 7
#define ATH12K_VHT_MCS_MAX 9
#define ATH12K_HE_MCS_MAX 11
+#define ATH12K_EHT_MCS_MAX 15
enum ath12k_crypt_mode {
/* Only use hardware crypto engine */
@@ -141,6 +141,7 @@ struct ath12k_skb_rxcb {
u8 is_frag;
u8 tid;
u16 peer_id;
+ bool is_end_of_ppdu;
};
enum ath12k_hw_rev {
@@ -166,6 +167,7 @@ struct ath12k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
+ bool napi_enabled;
struct napi_struct napi;
struct net_device *napi_ndev;
};
@@ -235,6 +237,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+ ATH12K_FLAG_FTM_SEGMENTED,
};
struct ath12k_tx_conf {
@@ -298,6 +301,8 @@ struct ath12k_link_vif {
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
+
+ u8 current_cntdown_counter;
};
struct ath12k_vif {
@@ -327,6 +332,7 @@ struct ath12k_vif {
u32 key_cipher;
u8 tx_encap_type;
bool ps;
+ atomic_t mcbc_gsn;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
@@ -354,20 +360,20 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_MCS_HT 31
#define HAL_RX_MAX_MCS_VHT 9
#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_MCS_BE 15
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
-#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
-#define ATH12K_RX_RATE_TABLE_NUM 320
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
u64 nss_count[HAL_RX_MAX_NSS];
u64 bw_count[HAL_RX_BW_MAX];
u64 gi_count[HAL_RX_GI_MAX];
u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
- u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+ u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
};
struct ath12k_rx_peer_stats {
@@ -477,6 +483,8 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
+DECLARE_EWMA(avg_rssi, 10, 8)
+
struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
@@ -496,10 +504,13 @@ struct ath12k_link_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
+ u32 peer_nss;
+ s8 rssi_beacon;
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -534,18 +545,26 @@ enum ath12k_hw_state {
ATH12K_HW_STATE_RESTARTING,
ATH12K_HW_STATE_RESTARTED,
ATH12K_HW_STATE_WEDGED,
+ ATH12K_HW_STATE_TM,
/* Add other states as required */
};
/* Antenna noise floor */
#define ATH12K_DEFAULT_NOISE_FLOOR -95
+struct ath12k_ftm_event_obj {
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+};
+
struct ath12k_fw_stats {
u32 pdev_id;
u32 stats_id;
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ bool fw_stats_done;
};
struct ath12k_dbg_htt_stats {
@@ -559,6 +578,12 @@ struct ath12k_debug {
struct dentry *debugfs_pdev;
struct dentry *debugfs_pdev_symlink;
struct ath12k_dbg_htt_stats htt_stats;
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type;
+ bool tpc_request;
+ struct completion tpc_complete;
+ struct wmi_tpc_stats_arg *tpc_stats;
+ u32 rx_filter;
+ bool extd_rx_stats;
};
struct ath12k_per_peer_tx_stats {
@@ -712,8 +737,12 @@ struct ath12k {
bool nlo_enabled;
+ struct completion fw_stats_complete;
+
struct completion mlo_setup_done;
u32 mlo_setup_status;
+ u8 ftm_msgref;
+ struct ath12k_fw_stats fw_stats;
};
struct ath12k_hw {
@@ -1026,9 +1055,6 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* Denotes the whether MLO is possible within the chip */
- bool single_chip_mlo_supp;
-
struct completion restart_completed;
#ifdef CONFIG_ACPI
@@ -1038,6 +1064,13 @@ struct ath12k_base {
u32 func_bit;
bool acpi_tas_enable;
bool acpi_bios_sar_enable;
+ bool acpi_disable_11be;
+ bool acpi_disable_rfkill;
+ bool acpi_cca_enable;
+ bool acpi_band_edge_enable;
+ bool acpi_enable_bdf;
+ u32 bit_flag;
+ char bdf_string[ATH12K_ACPI_BDF_MAX_LEN];
u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
@@ -1052,6 +1085,8 @@ struct ath12k_base {
struct ath12k_hw_group *ag;
struct ath12k_wsi_info wsi_info;
+ enum ath12k_firmware_mode fw_mode;
+ struct ath12k_ftm_event_obj ftm_event_obj;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -1062,6 +1097,93 @@ struct ath12k_pdev_map {
u8 pdev_idx;
};
+struct ath12k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath12k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+struct ath12k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count;
+ u32 rx_frame_count;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 stateless_tid_alloc_failure;
+ u32 phy_underrun;
+ u32 txop_ovf;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -1084,6 +1206,7 @@ int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
+u8 ath12k_get_num_partner_link(struct ath12k *ar);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index ff6eaeafa092..5ce100cd9a9d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -63,8 +63,10 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
vaf.fmt = fmt;
vaf.va = &args;
- if (ath12k_debug_mask & mask)
+ if (likely(ab))
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+ else
+ printk(KERN_DEBUG "ath12k: %pV", &vaf);
/* TODO: trace log */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index 90e801136bc6..48916e4e1f60 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -37,6 +37,7 @@ __printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
+extern bool ath12k_ftm_mode;
#ifdef CONFIG_ATH12K_DEBUG
__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@@ -61,11 +62,14 @@ static inline void ath12k_dbg_dump(struct ath12k_base *ab,
}
#endif /* CONFIG_ATH12K_DEBUG */
-#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
+#define ath12k_dbg(ab, dbg_mask, fmt, ...) \
do { \
typeof(dbg_mask) mask = (dbg_mask); \
if (ath12k_debug_mask & mask) \
- __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
+ __ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \
} while (0)
+#define ath12k_generic_dbg(dbg_mask, fmt, ...) \
+ ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__)
+
#endif /* _ATH12K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
index d4b32d1a431c..57002215ddf1 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
#include "debugfs.h"
#include "debugfs_htt_stats.h"
@@ -31,6 +33,806 @@ static const struct file_operations fops_simulate_radar = {
.open = simple_open
};
+static ssize_t ath12k_write_tpc_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX)
+ return -EINVAL;
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_stats_type = type;
+ spin_unlock_bh(&ar->data_lock);
+
+ return count;
+}
+
+static int ath12k_debug_tpc_stats_request(struct ath12k *ar)
+{
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id;
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = true;
+ tpc_stats_sub_id = ar->debug.tpc_stats_type;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
+ enum wmi_tpc_pream_bw pream_bw, int *mode_idx)
+{
+ u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ u8 band;
+
+ band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ :
+ ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ));
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT60:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_EHT80:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_VHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20;
+ break;
+ case WMI_TPC_PREAM_HE160:
+ case WMI_TPC_PREAM_EHT160:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ;
+ break;
+ case WMI_TPC_PREAM_EHT200:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120;
+ break;
+ case WMI_TPC_PREAM_EHT240:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80;
+ break;
+ case WMI_TPC_PREAM_EHT280:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40;
+ break;
+ case WMI_TPC_PREAM_EHT320:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ;
+ break;
+ default:
+ /* for 5GHZ and 6GHZ, default case will be for OFDM */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ;
+ break;
+ }
+ } else {
+ switch (pream_bw) {
+ case WMI_TPC_PREAM_OFDM:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_EHT20:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ;
+ break;
+ case WMI_TPC_PREAM_HT40:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_EHT40:
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ;
+ break;
+ default:
+ /* for 2GHZ, default case will be CCK */
+ *mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static s16 ath12k_tpc_get_rate(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ u32 rate_idx, u32 num_chains, u32 rate_code,
+ enum wmi_tpc_pream_bw pream_bw,
+ enum wmi_halphy_ctrl_path_stats_id type,
+ u32 eht_rate_idx)
+{
+ u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3;
+ u8 chain_idx, stm_idx, num_streams;
+ bool is_mu, txbf_enabled = 0;
+ s8 rates_ctl_min, tpc_ctl;
+ s16 rates, tpc, reg_pwr;
+ u16 rate1, rate2;
+ int mode, ret;
+
+ num_streams = 1 + ATH12K_HW_NSS(rate_code);
+ chain_idx = num_chains - 1;
+ stm_idx = num_streams - 1;
+ mode = -1;
+
+ ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode);
+ if (ret) {
+ ath12k_warn(ar->ab, "Invalid mode index received\n");
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (num_chains < num_streams) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) {
+ tpc = TPC_INVAL;
+ goto out;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS)
+ txbf_enabled = 1;
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ is_mu = true;
+ } else {
+ is_mu = false;
+ }
+
+ /* Below is the min calculation of ctl array, rates array and
+ * regulator power table. tpc is minimum of all 3
+ */
+ if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) {
+ rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU);
+ } else {
+ rate1 = tpc_stats->rates_array1.rate_array[rate_idx];
+ if (is_mu)
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU);
+ else
+ rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU);
+ }
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+ tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1);
+ tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2);
+ txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3);
+ index_offset1 = txbf_on_off * tot_modes * tot_nss;
+ index_offset2 = tot_modes * tot_nss;
+ index_offset3 = tot_nss;
+
+ tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table +
+ chain_idx * index_offset1 + txbf_enabled * index_offset2
+ + mode * index_offset3 + stm_idx);
+ } else {
+ tpc_ctl = TPC_MAX;
+ ath12k_warn(ar->ab,
+ "ctl array for tpc stats not received from fw\n");
+ }
+
+ rates_ctl_min = min_t(s16, rates, tpc_ctl);
+
+ reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx];
+
+ if (reg_pwr < 0)
+ reg_pwr = TPC_INVAL;
+
+ tpc = min_t(s16, rates_ctl_min, reg_pwr);
+
+ /* MODULATION_LIMIT is the maximum power limit,tpc should not exceed
+ * modulation limit even if min tpc of all three array is greater
+ * modulation limit
+ */
+ tpc = min_t(s16, tpc, MODULATION_LIMIT);
+
+out:
+ return tpc;
+}
+
+static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate)
+{
+ u16 mode_type = ~0;
+
+ /* Below assignments are just for printing purpose only */
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_CCK:
+ mode_type = WMI_RATE_PREAMBLE_CCK;
+ break;
+ case WMI_TPC_PREAM_OFDM:
+ mode_type = WMI_RATE_PREAMBLE_OFDM;
+ break;
+ case WMI_TPC_PREAM_HT20:
+ case WMI_TPC_PREAM_HT40:
+ mode_type = WMI_RATE_PREAMBLE_HT;
+ break;
+ case WMI_TPC_PREAM_VHT20:
+ case WMI_TPC_PREAM_VHT40:
+ case WMI_TPC_PREAM_VHT80:
+ case WMI_TPC_PREAM_VHT160:
+ mode_type = WMI_RATE_PREAMBLE_VHT;
+ break;
+ case WMI_TPC_PREAM_HE20:
+ case WMI_TPC_PREAM_HE40:
+ case WMI_TPC_PREAM_HE80:
+ case WMI_TPC_PREAM_HE160:
+ mode_type = WMI_RATE_PREAMBLE_HE;
+ break;
+ case WMI_TPC_PREAM_EHT20:
+ case WMI_TPC_PREAM_EHT40:
+ case WMI_TPC_PREAM_EHT60:
+ case WMI_TPC_PREAM_EHT80:
+ case WMI_TPC_PREAM_EHT120:
+ case WMI_TPC_PREAM_EHT140:
+ case WMI_TPC_PREAM_EHT160:
+ case WMI_TPC_PREAM_EHT200:
+ case WMI_TPC_PREAM_EHT240:
+ case WMI_TPC_PREAM_EHT280:
+ case WMI_TPC_PREAM_EHT320:
+ mode_type = WMI_RATE_PREAMBLE_EHT;
+ if (mcs_rate == 0 || mcs_rate == 1)
+ mcs_rate += 14;
+ else
+ mcs_rate -= 2;
+ break;
+ default:
+ return mode_type;
+ }
+ return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F));
+}
+
+static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq)
+{
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k_band_cap *cap_band;
+ bool extra_mcs_supported;
+
+ if (freq <= ATH12K_2GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_2GHZ];
+ else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY)
+ cap_band = &cap->band[NL80211_BAND_5GHZ];
+ else
+ cap_band = &cap->band[NL80211_BAND_6GHZ];
+
+ extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1],
+ HE_EXTRA_MCS_SUPPORT);
+ return extra_mcs_supported;
+}
+
+static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len,
+ enum wmi_tpc_pream_bw pream_bw, u32 max_rix,
+ int max_nss, int max_rates, int pream_type,
+ enum wmi_halphy_ctrl_path_stats_id tpc_type,
+ int rate_idx, int eht_rate_idx)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+ int nss, rates, chains;
+ u8 active_tx_chains;
+ u16 rate_code;
+ s16 tpc;
+
+ static const char *const pream_str[] = {
+ [WMI_TPC_PREAM_CCK] = "CCK",
+ [WMI_TPC_PREAM_OFDM] = "OFDM",
+ [WMI_TPC_PREAM_HT20] = "HT20",
+ [WMI_TPC_PREAM_HT40] = "HT40",
+ [WMI_TPC_PREAM_VHT20] = "VHT20",
+ [WMI_TPC_PREAM_VHT40] = "VHT40",
+ [WMI_TPC_PREAM_VHT80] = "VHT80",
+ [WMI_TPC_PREAM_VHT160] = "VHT160",
+ [WMI_TPC_PREAM_HE20] = "HE20",
+ [WMI_TPC_PREAM_HE40] = "HE40",
+ [WMI_TPC_PREAM_HE80] = "HE80",
+ [WMI_TPC_PREAM_HE160] = "HE160",
+ [WMI_TPC_PREAM_EHT20] = "EHT20",
+ [WMI_TPC_PREAM_EHT40] = "EHT40",
+ [WMI_TPC_PREAM_EHT60] = "EHT60",
+ [WMI_TPC_PREAM_EHT80] = "EHT80",
+ [WMI_TPC_PREAM_EHT120] = "EHT120",
+ [WMI_TPC_PREAM_EHT140] = "EHT140",
+ [WMI_TPC_PREAM_EHT160] = "EHT160",
+ [WMI_TPC_PREAM_EHT200] = "EHT200",
+ [WMI_TPC_PREAM_EHT240] = "EHT240",
+ [WMI_TPC_PREAM_EHT280] = "EHT280",
+ [WMI_TPC_PREAM_EHT320] = "EHT320"};
+
+ active_tx_chains = ar->num_tx_chains;
+
+ for (nss = 0; nss < max_nss; nss++) {
+ for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) {
+ /* FW send extra MCS(10&11) for VHT and HE rates,
+ * this is not used. Hence skipping it here
+ */
+ if (pream_type == WMI_RATE_PREAMBLE_VHT &&
+ rates > ATH12K_VHT_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_HE &&
+ rates > ATH12K_HE_MCS_MAX)
+ continue;
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT &&
+ rates > ATH12K_EHT_MCS_MAX)
+ continue;
+
+ rate_code = ath12k_get_ratecode(pream_bw, nss, rates);
+ len += scnprintf(buf + len, buf_len - len,
+ "%d\t %s\t 0x%03x\t", max_rix,
+ pream_str[pream_bw], rate_code);
+
+ for (chains = 0; chains < active_tx_chains; chains++) {
+ if (nss > chains) {
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "\t%s", "NA");
+ } else {
+ tpc = ath12k_tpc_get_rate(ar, tpc_stats,
+ rate_idx, chains + 1,
+ rate_code, pream_bw,
+ tpc_type,
+ eht_rate_idx);
+
+ if (tpc == TPC_INVAL) {
+ len += scnprintf(buf + len,
+ buf_len - len, "\tNA");
+ } else {
+ len += scnprintf(buf + len,
+ buf_len - len, "\t%d",
+ tpc);
+ }
+ }
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (pream_type == WMI_RATE_PREAMBLE_EHT)
+ /*For fetching the next eht rates pwr from rates array2*/
+ ++eht_rate_idx;
+ }
+ }
+
+ return len;
+}
+
+static int ath12k_tpc_stats_print(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf, size_t len,
+ enum wmi_halphy_ctrl_path_stats_id type)
+{
+ u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0;
+ u32 chan_freq, num_tx_chain, caps, i, j = 1;
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ u8 nss, active_tx_chains;
+ bool he_ext_mcs;
+ static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = {
+ [WMI_HALPHY_PDEV_TX_SU_STATS] = "SU",
+ [WMI_HALPHY_PDEV_TX_SUTXBF_STATS] = "SU WITH TXBF",
+ [WMI_HALPHY_PDEV_TX_MU_STATS] = "MU",
+ [WMI_HALPHY_PDEV_TX_MUTXBF_STATS] = "MU WITH TXBF"};
+
+ u8 max_rates[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_CCK_RATES,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_OFDM_RATES,
+ [WMI_TPC_PREAM_HT20] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_HT40] = ATH12K_HT_RATES,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_VHT_RATES,
+ [WMI_TPC_PREAM_HE20] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE40] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE80] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_HE160] = ATH12K_HE_RATES,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_EHT_RATES,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_EHT_RATES};
+ static const u8 max_nss[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_OFDM] = ATH12K_NSS_1,
+ [WMI_TPC_PREAM_HT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_VHT20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_VHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_HE20] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE40] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE80] = ATH12K_NSS_8,
+ [WMI_TPC_PREAM_HE160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT20] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT40] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT60] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT80] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT120] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT140] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT160] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT200] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT240] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT280] = ATH12K_NSS_4,
+ [WMI_TPC_PREAM_EHT320] = ATH12K_NSS_4};
+
+ u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {};
+ static const u8 pream_type[WMI_TPC_PREAM_MAX] = {
+ [WMI_TPC_PREAM_CCK] = WMI_RATE_PREAMBLE_CCK,
+ [WMI_TPC_PREAM_OFDM] = WMI_RATE_PREAMBLE_OFDM,
+ [WMI_TPC_PREAM_HT20] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_HT40] = WMI_RATE_PREAMBLE_HT,
+ [WMI_TPC_PREAM_VHT20] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT40] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT80] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_VHT160] = WMI_RATE_PREAMBLE_VHT,
+ [WMI_TPC_PREAM_HE20] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE40] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE80] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_HE160] = WMI_RATE_PREAMBLE_HE,
+ [WMI_TPC_PREAM_EHT20] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT40] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT60] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT80] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT120] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT140] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT160] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT200] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT240] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT280] = WMI_RATE_PREAMBLE_EHT,
+ [WMI_TPC_PREAM_EHT320] = WMI_RATE_PREAMBLE_EHT};
+
+ chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+ num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain);
+ caps = le32_to_cpu(tpc_stats->tpc_config.caps);
+
+ active_tx_chains = ar->num_tx_chains;
+ he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq);
+
+ /* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as
+ * it is not supported
+ */
+ if (he_ext_mcs) {
+ for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i)
+ max_rates[i] = ATH12K_HE_RATES;
+ }
+
+ if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+ type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+ pream_idx = WMI_TPC_PREAM_VHT20;
+
+ for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i)
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ /* Enumerate all the rate indices */
+ for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i - 1] < num_tx_chain ?
+ max_nss[i - 1] : num_tx_chain);
+
+ rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss;
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) {
+ eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss;
+ ++j;
+ }
+ }
+
+ for (i = 0; i < WMI_TPC_PREAM_MAX; i++) {
+ nss = (max_nss[i] < num_tx_chain ?
+ max_nss[i] : num_tx_chain);
+ total_rates += max_rates[i] * nss;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of rates-%d\n", total_rates);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "**************** %s ****************\n",
+ type_str[type]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\t\t\t\tTPC values for Active chains\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "Rate idx Preamble Rate code");
+
+ for (i = 1; i <= active_tx_chains; ++i) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\t%d-Chain", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) {
+ if (chan_freq <= 2483) {
+ if (i == WMI_TPC_PREAM_VHT80 ||
+ i == WMI_TPC_PREAM_VHT160 ||
+ i == WMI_TPC_PREAM_HE80 ||
+ i == WMI_TPC_PREAM_HE160 ||
+ (i >= WMI_TPC_PREAM_EHT60 &&
+ i <= WMI_TPC_PREAM_EHT320)) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ } else {
+ if (i == WMI_TPC_PREAM_CCK) {
+ max_rix += max_rates[i];
+ continue;
+ }
+ }
+
+ nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains);
+
+ if (!(caps &
+ (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) {
+ if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 ||
+ i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 ||
+ i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) {
+ max_rix += max_nss[i] * max_rates[i];
+ continue;
+ }
+ }
+
+ len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss,
+ max_rates[i], pream_type[i],
+ type, rate_idx[i], eht_rate_idx[eht_idx]);
+
+ if (pream_type[i] == WMI_RATE_PREAMBLE_EHT)
+ /*For fetch the next index eht rates from rates array2*/
+ ++eht_idx;
+
+ max_rix += max_nss[i] * max_rates[i];
+ }
+ return len;
+}
+
+static void ath12k_tpc_stats_fill(struct ath12k *ar,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ char *buf)
+{
+ size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+ struct wmi_tpc_config_params *tpc;
+ size_t len = 0;
+
+ if (!tpc_stats) {
+ ath12k_warn(ar->ab, "failed to find tpc stats\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ tpc = &tpc_stats->tpc_config;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************** TPC config **************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "* powers are in 0.25 dBm steps\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "reg domain-%d\t\tchan freq-%d\n",
+ tpc->reg_domain, tpc->chan_freq);
+ len += scnprintf(buf + len, buf_len - len,
+ "power limit-%d\t\tmax reg-domain Power-%d\n",
+ le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit);
+ len += scnprintf(buf + len, buf_len - len,
+ "No.of tx chain-%d\t",
+ ar->num_tx_chains);
+
+ ath12k_tpc_stats_print(ar, tpc_stats, buf, len,
+ ar->debug.tpc_stats_type);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_open_tpc_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ath12k_warn(ar->ab, "Interface not up\n");
+ return -ENETDOWN;
+ }
+
+ void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath12k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request tpc stats: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) {
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ ar->debug.tpc_request = false;
+ spin_unlock_bh(&ar->data_lock);
+ return -ETIMEDOUT;
+ }
+
+ ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = no_free_ptr(buf);
+
+ spin_lock_bh(&ar->data_lock);
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_tpc_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath12k_release_tpc_stats(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath12k_open_tpc_stats,
+ .release = ath12k_release_tpc_stats,
+ .read = ath12k_read_tpc_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_tpc_stats_type = {
+ .write = ath12k_write_tpc_stats_type,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id, rx_filter = 0;
+ bool enable;
+ int ret, i;
+
+ if (kstrtobool_from_user(ubuf, count, &enable))
+ return -EINVAL;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ar->ab->hw_params->rxdma1_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ if (ar->ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = !!enable;
+ ret = count;
+exit:
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+ return ret;
+}
+
+static ssize_t ath12k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath12k_read_extd_rx_stats,
+ .write = ath12k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
bool dput_needed;
@@ -68,6 +870,382 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
*/
}
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath12k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats.fw_stats_done = false;
+ ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_debugfs_fw_stats_request(struct ath12k *ar,
+ struct ath12k_fw_stats_req_params *param)
+{
+ struct ath12k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * 1000);
+
+ ath12k_debugfs_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
+ param->vdev_id, param->pdev_id);
+
+ if (ret) {
+ ath12k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+ /* If the wait timed out, return -ETIMEDOUT */
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
+ * when stats data buffer limit is reached. fw_stats_complete
+ * is completed once host receives first event from firmware, but
+ * still end might not be marked in the TLV.
+ * Below loop is to confirm that firmware completed sending all the event
+ * and fw_stats_done is marked true when end is marked in the TLV
+ */
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->fw_stats.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+void
+ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i;
+
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
+ ath12k_warn(ab, "empty vdev stats");
+ return;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += pdev->ar->num_started_vdevs;
+ }
+ rcu_read_unlock();
+
+ is_end = ((++num_vdev) == total_vdevs_started);
+
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end) {
+ ar->fw_stats.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ return;
+ }
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath12k_warn(ab, "empty beacon stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end) {
+ ar->fw_stats.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+ if (stats->stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats.fw_stats_done = true;
+ }
+}
+
+static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ah)
+ return -ENETDOWN;
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath12k_open_vdev_stats,
+ .release = ath12k_release_vdev_stats,
+ .read = ath12k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_fw_stats_req_params param;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.stats_id = WMI_REQUEST_BCN_STAT;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ param.vdev_id = arvif->vdev_id;
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath12k_open_bcn_stats,
+ .release = ath12k_release_bcn_stats,
+ .read = ath12k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_fw_stats_req_params param;
+ int ret;
+
+ guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (ah && ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+
+ param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ param.vdev_id = 0;
+ param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath12k_debugfs_fw_stats_request(ar, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+ buf);
+
+ file->private_data = no_free_ptr(buf);
+
+ return 0;
+}
+
+static int ath12k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath12k_open_pdev_stats,
+ .release = ath12k_release_pdev_stats,
+ .read = ath12k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static
+void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
void ath12k_debugfs_register(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -91,7 +1269,18 @@ void ath12k_debugfs_register(struct ath12k *ar)
&fops_simulate_radar);
}
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar,
+ &fops_tpc_stats);
+ debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_tpc_stats_type);
+ init_completion(&ar->debug.tpc_complete);
+
ath12k_debugfs_htt_stats_register(ar);
+ ath12k_debugfs_fw_stats_register(ar);
+
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
}
void ath12k_debugfs_unregister(struct ath12k *ar)
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
index 8d64ba03aa9a..d7041297d5d8 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUGFS_H_
@@ -12,6 +12,101 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
+void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats);
+void ath12k_debugfs_fw_stats_reset(struct ath12k *ar);
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+#define ATH12K_CCK_RATES 4
+#define ATH12K_OFDM_RATES 8
+#define ATH12K_HT_RATES 8
+#define ATH12K_VHT_RATES 12
+#define ATH12K_HE_RATES 12
+#define ATH12K_HE_RATES_WITH_EXTRA_MCS 14
+#define ATH12K_EHT_RATES 16
+#define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16)
+#define ATH12K_NSS_1 1
+#define ATH12K_NSS_4 4
+#define ATH12K_NSS_8 8
+#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7)
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define MAX_TPC_PREAM_STR_LEN 7
+#define TPC_INVAL -128
+#define TPC_MAX 127
+#define TPC_STATS_WAIT_TIME (1 * HZ)
+#define TPC_STATS_TOT_ROW 700
+#define TPC_STATS_TOT_COLUMN 100
+#define MODULATION_LIMIT 126
+
+#define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
+
+enum wmi_tpc_pream_bw {
+ WMI_TPC_PREAM_CCK,
+ WMI_TPC_PREAM_OFDM,
+ WMI_TPC_PREAM_HT20,
+ WMI_TPC_PREAM_HT40,
+ WMI_TPC_PREAM_VHT20,
+ WMI_TPC_PREAM_VHT40,
+ WMI_TPC_PREAM_VHT80,
+ WMI_TPC_PREAM_VHT160,
+ WMI_TPC_PREAM_HE20,
+ WMI_TPC_PREAM_HE40,
+ WMI_TPC_PREAM_HE80,
+ WMI_TPC_PREAM_HE160,
+ WMI_TPC_PREAM_EHT20,
+ WMI_TPC_PREAM_EHT40,
+ WMI_TPC_PREAM_EHT60,
+ WMI_TPC_PREAM_EHT80,
+ WMI_TPC_PREAM_EHT120,
+ WMI_TPC_PREAM_EHT140,
+ WMI_TPC_PREAM_EHT160,
+ WMI_TPC_PREAM_EHT200,
+ WMI_TPC_PREAM_EHT240,
+ WMI_TPC_PREAM_EHT280,
+ WMI_TPC_PREAM_EHT320,
+ WMI_TPC_PREAM_MAX
+};
+
+enum ath12k_debug_tpc_stats_ctl_mode {
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ,
+ ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ,
+
+ ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
+ ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
+ ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
+};
+
+enum ath12k_debug_tpc_stats_support_modes {
+ ATH12K_TPC_STATS_SUPPORT_160 = 0,
+ ATH12K_TPC_STATS_SUPPORT_320,
+ ATH12K_TPC_STATS_SUPPORT_AX,
+ ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
+ ATH12K_TPC_STATS_SUPPORT_BE,
+ ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
+};
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@@ -29,6 +124,24 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
+static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+ struct ath12k_fw_stats *stats)
+{
+}
+
+static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
+{
+}
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+ return false;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+ return 0;
+}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
index 41e4ef2ef3af..1c0d5fa39a8d 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -48,6 +48,34 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header,
footer);
}
+static u32
+print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index,
+ const s8 *array, u32 array_len, const char *footer)
+{
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ int index = 0;
+ u8 i;
+
+ if (header)
+ index += scnprintf(buf + offset, buf_len - offset, "%s = ", header);
+
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ " %u:%d,", stats_index++, array[i]);
+ }
+
+ index--;
+ if ((offset + index) < buf_len)
+ buf[offset + index] = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+ "%s", footer);
+ }
+
+ return index;
+}
+
static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
{
switch (ru_size) {
@@ -2512,6 +2540,268 @@ ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 tx_sounding_mode;
+ u8 i, u;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ cbf_20 = htt_stats_buf->cbf_20;
+ cbf_40 = htt_stats_buf->cbf_40;
+ cbf_80 = htt_stats_buf->cbf_80;
+ cbf_160 = htt_stats_buf->cbf_160;
+ cbf_320 = htt_stats_buf->cbf_320;
+ tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode);
+
+ if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_AC_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len,
+ "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+ le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+ len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+ le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+ for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User_%u = 20MHz: %u, ", u,
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+ le32_to_cpu(htt_stats_buf->sounding[i++]));
+ len += scnprintf(buf + len, buf_len - len,
+ "160MHz: %u, 320MHz: %u\n",
+ le32_to_cpu(htt_stats_buf->sounding[i++]),
+ le32_to_cpu(htt_stats_buf->sounding_320[u]));
+ }
+ } else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nCV UPLOAD HANDLER STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fcs_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_peer_id));
+ len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_no_txbf_setup));
+ len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expiry_in_update));
+ len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_not_done_err));
+ len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_update_failed));
+ len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_dma_timeout_error));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_received));
+ len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_buf_fed_back));
+
+ len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_total_pattern_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_pattern_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_bw_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding));
+ len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_forced_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_standalone_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_standalone_sounding));
+ len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nc_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_bw_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_pattern_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_preamble_mismatch));
+ len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_nr_mismatch));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_in_use_cnt_exceeded = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded));
+ len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_ntbr_sounding));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_found_upload_in_progress = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query));
+ len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found));
+ len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_total_query_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_found_ibf));
+ len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n",
+ le32_to_cpu(htt_stats_buf->cv_not_found_ibf));
+ len += scnprintf(buf + len, buf_len - len,
+ "cv_expired_during_query_ibf = %u\n\n",
+ le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -2577,6 +2867,428 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
}
static void
+ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "duration = %u\n",
+ le32_to_cpu(htt_stats_buf->duration));
+ len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_msdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->rx_mpdu_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->prof_enable_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ if (le32_to_cpu(htt_stats_buf->print_header) == 1) {
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_LATENCY_PROF_TLV:\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n",
+ htt_stats_buf->latency_prof_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ le32_to_cpu(htt_stats_buf->cnt));
+ len += scnprintf(buf + len, buf_len - len, "minimum = %u\n",
+ le32_to_cpu(htt_stats_buf->min));
+ len += scnprintf(buf + len, buf_len - len, "maximum = %u\n",
+ le32_to_cpu(htt_stats_buf->max));
+ len += scnprintf(buf + len, buf_len - len, "last = %u\n",
+ le32_to_cpu(htt_stats_buf->last));
+ len += scnprintf(buf + len, buf_len - len, "total = %u\n",
+ le32_to_cpu(htt_stats_buf->tot));
+ len += scnprintf(buf + len, buf_len - len, "average = %u\n",
+ le32_to_cpu(htt_stats_buf->avg));
+ len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n",
+ le32_to_cpu(htt_stats_buf->hist_intvl));
+ len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist,
+ ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u8 j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+ htt_stats_buf->ul_ofdma_rx_mcs,
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j);
+ len += print_array_to_buf(buf, len, "",
+ htt_stats_buf->ul_ofdma_rx_gi[j],
+ ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+ }
+
+ len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1,
+ htt_stats_buf->ul_ofdma_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+ htt_stats_buf->ul_ofdma_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) {
+ len += scnprintf(buf + len, buf_len - len, j == 0 ?
+ "half_ul_ofdma_rx_bw" :
+ "quarter_ul_ofdma_rx_bw");
+ len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_ru_size_ppdu = ");
+ for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+ ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+ le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid",
+ htt_stats_buf->uplink_sta_aid,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_target_rssi",
+ htt_stats_buf->uplink_sta_target_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_fd_rssi",
+ htt_stats_buf->uplink_sta_fd_rssi,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += print_array_to_buf(buf, len, "rx_sta_power_headroom",
+ htt_stats_buf->uplink_sta_power_headroom,
+ ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 user_index;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ user_index = __le32_to_cpu(htt_stats_buf->user_index);
+
+ if (!user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulofdma_non_data_nusers_%u = %u\n", user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers));
+ len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n",
+ user_index,
+ le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+ u32 mac_id;
+ u16 index;
+ u8 i, j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo));
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf);
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+ index = 0;
+ memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i,
+ le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i]));
+
+ for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+ le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_rx_gi_%u = %s\n", j, str_buf);
+ }
+
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1,
+ htt_stats_buf->ul_mumimo_rx_nss,
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+
+ len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw",
+ htt_stats_buf->ul_mumimo_rx_bw,
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) {
+ index = 0;
+ memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+ for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++)
+ index += scnprintf(&str_buf[index],
+ ATH12K_HTT_MAX_STRING_LEN - index,
+ " %u:%u,", j,
+ le32_to_cpu(htt_stats_buf->red_bw[i][j]));
+ str_buf[--index] = '\0';
+ len += scnprintf(buf + len, buf_len - len, "%s = %s\n",
+ i == 0 ? "half_ul_mumimo_rx_bw" :
+ "quarter_ul_mumimo_rx_bw", str_buf);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc));
+
+ for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_rssi_in_dbm: chain%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->ul_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_target_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->tgt_rssi[j],
+ ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_mumimo_fd_rssi: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->fd[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j);
+ len += print_array_to_buf_s8(buf, len, "", 0,
+ htt_stats_buf->db[j],
+ ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n",
+ le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 len = stats_req->buf_len;
+ u8 *buf = stats_req->buf;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Enable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_enable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_disable_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n");
+ len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_num_searches_cnt));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6]));
+ len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]),
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8]));
+ len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4]));
+ len += scnprintf(buf + len, buf_len - len, "[256] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3]));
+ len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n");
+ len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]),
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2]));
+ len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3]));
+
+ stats_req->buf_len = len;
+}
+
+static void
ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
@@ -3812,6 +4524,497 @@ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_l
stats_req->buf_len = len;
}
+static inline void
+ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ofdma_tx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_success));
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_rssi));
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 12 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[0]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[2]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[3]));
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+ le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[1]),
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[2]),
+ le32_to_cpu(htt_stats_buf->tx_he_ltf[3]));
+
+ len += print_array_to_buf(buf, len, "tx_mcs", htt_stats_buf->tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_mcs_ext[j]));
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +
+ ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_mcs_ext_2[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_mcs",
+ htt_stats_buf->ax_mu_mimo_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "ofdma_tx_mcs",
+ htt_stats_buf->ofdma_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ofdma_tx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_nss =");
+ for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+ len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+ j, le32_to_cpu(htt_stats_buf->ofdma_tx_nss[j - 1]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ len += print_array_to_buf(buf, len, "tx_bw", htt_stats_buf->tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, NULL);
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u\n",
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_bw_320mhz));
+
+ len += print_array_to_buf(buf, len, "tx_stbc",
+ htt_stats_buf->tx_stbc,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_stbc_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ax_mimo_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ax_tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ofdma_tx_gi[j],
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ NULL);
+ for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->ofd_tx_gi_ext[j][i]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += print_array_to_buf(buf, len, "tx_su_mcs", htt_stats_buf->tx_su_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_mu_mcs", htt_stats_buf->tx_mu_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_mcs",
+ htt_stats_buf->ac_mu_mimo_tx_mcs,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_bw",
+ htt_stats_buf->ac_mu_mimo_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_bw",
+ htt_stats_buf->ax_mu_mimo_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ofdma_tx_bw",
+ htt_stats_buf->ofdma_tx_bw,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+ len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm,
+ ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ le32_to_cpu(htt_stats_buf->nsts));
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->rts_cnt));
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_mgmt));
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_data));
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ le32_to_cpu(htt_stats_buf->rssi_comb));
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ le32_to_cpu(htt_stats_buf->rssi_in_dbm));
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ le32_to_cpu(htt_stats_buf->nss_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ le32_to_cpu(htt_stats_buf->pilot_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_su_ext));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ac_mumimo));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_mumimo));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ofdma));
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ le32_to_cpu(htt_stats_buf->txbf));
+ len += scnprintf(buf + len, buf_len - len, "rx_su_ndpa = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_su_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "rx_mu_ndpa = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_mu_ndpa));
+ len += scnprintf(buf + len, buf_len - len, "rx_br_poll = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_br_poll));
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ le32_to_cpu(htt_stats_buf->per_chain_rssi_pkt_type));
+
+ len += print_array_to_buf(buf, len, "rx_nss", htt_stats_buf->rx_nss,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "rx_dcm", htt_stats_buf->rx_dcm,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_stbc", htt_stats_buf->rx_stbc,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_bw", htt_stats_buf->rx_bw,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_pream", htt_stats_buf->rx_pream,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs",
+ htt_stats_buf->rx_11ax_su_txbf_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs",
+ htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_legacy_cck_rate",
+ htt_stats_buf->rx_legacy_cck_rate,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+ len += print_array_to_buf(buf, len, "rx_legacy_ofdm_rate",
+ htt_stats_buf->rx_legacy_ofdm_rate,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+ htt_stats_buf->ul_ofdma_rx_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_nss",
+ htt_stats_buf->ul_ofdma_rx_nss,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+ htt_stats_buf->ul_ofdma_rx_bw,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_ppdu",
+ htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_data_ppdu",
+ htt_stats_buf->rx_ulofdma_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_ok",
+ htt_stats_buf->rx_ulofdma_mpdu_ok,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_fail",
+ htt_stats_buf->rx_ulofdma_mpdu_fail,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_nusers",
+ htt_stats_buf->rx_ulofdma_non_data_nusers,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulofdma_data_nusers",
+ htt_stats_buf->rx_ulofdma_data_nusers,
+ ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs",
+ htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_ru",
+ htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_non_data_ppdu",
+ htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_data_ppdu",
+ htt_stats_buf->rx_ulmumimo_data_ppdu,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_ok",
+ htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+ len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_fail",
+ htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+
+ len += print_array_to_buf(buf, len, "rx_mcs",
+ htt_stats_buf->rx_mcs,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+ for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ le32_to_cpu(htt_stats_buf->rx_mcs_ext[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] =", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_pil_evm_db[j],
+ ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS,
+ "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean =");
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ le32_to_cpu(htt_stats_buf->rx_pilot_evm_db_mean[i]));
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain_in_db[%u] = ", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u: %d,", i,
+ htt_stats_buf->rssi_chain_in_db[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_gi[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ul_ofdma_rx_gi[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] =", j);
+ for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len--;
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n",
+ le32_to_cpu(htt_stats_buf->rssi_mgmt_in_dbm));
+
+ len += print_array_to_buf(buf, len, "rx_stbc_ext",
+ htt_stats_buf->rx_stbc_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs_ext",
+ htt_stats_buf->ul_ofdma_rx_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs_ext",
+ htt_stats_buf->rx_11ax_su_txbf_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs_ext",
+ htt_stats_buf->rx_11ax_mu_txbf_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs_ext",
+ htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+ len += print_array_to_buf(buf, len, "rx_bw_ext",
+ htt_stats_buf->rx_bw_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n");
+ len += print_array_to_buf(buf, len, "rx_su_punctured_mode",
+ htt_stats_buf->rx_su_punctured_mode,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS,
+ "\n");
+
+ len += print_array_to_buf(buf, len, "rx_mcs_ext",
+ htt_stats_buf->rx_mcs_ext,
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ NULL);
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+ len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+ j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ le32_to_cpu(htt_stats_buf->rx_mcs_ext_2[j]));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi_ext[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->rx_gi_ext[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ "\n");
+ }
+
+ for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi_ext[%u] = ", j);
+ len += print_array_to_buf(buf, len, NULL,
+ htt_stats_buf->ul_ofdma_rx_gi_ext[j],
+ ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+ "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -3982,9 +5185,33 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ ath12k_htt_print_tx_sounding_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_PDEV_OBSS_PD_TAG:
ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
break;
+ case HTT_STATS_LATENCY_CTX_TAG:
+ ath12k_htt_print_latency_prof_ctx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_LATENCY_CNT_TAG:
+ ath12k_htt_print_latency_prof_cnt(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_LATENCY_PROF_STATS_TAG:
+ ath12k_htt_print_latency_prof_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG:
+ ath12k_htt_print_ul_ofdma_trigger_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG:
+ ath12k_htt_print_ul_ofdma_user_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG:
+ ath12k_htt_print_ul_mumimo_trig_stats(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_FSE_STATS_TAG:
+ ath12k_htt_print_rx_fse_stats_tlv(tag_buf, len, stats_req);
+ break;
case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
break;
@@ -4047,6 +5274,15 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
stats_req);
break;
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG:
+ ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req);
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
index 4b59976fbc35..c2a02cf8a38b 100644
--- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@@ -123,30 +123,38 @@ struct ath12k_htt_extd_stats_msg {
/* htt_dbg_ext_stats_type */
enum ath12k_dbg_htt_ext_stats_type {
- ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
- ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
- ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
- ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
- ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
- ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
- ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
- ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
- ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
- ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
- ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
- ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
- ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
- ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
- ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
- ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS = 25,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS = 26,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS = 27,
+ ATH12K_DBG_HTT_EXT_STATS_FSE_RX = 28,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT = 30,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
+ ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
+ ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+ ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
+ ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
+ ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
+ ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
+ ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
+ ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
+ ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -173,6 +181,8 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
HTT_STATS_SFM_CMN_TAG = 26,
HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
@@ -195,12 +205,21 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_LATENCY_PROF_STATS_TAG = 91,
+ HTT_STATS_LATENCY_CTX_TAG = 92,
+ HTT_STATS_LATENCY_CNT_TAG = 93,
+ HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG = 94,
+ HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG = 95,
+ HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG = 97,
+ HTT_STATS_RX_FSE_STATS_TAG = 98,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG = 103,
HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
@@ -387,6 +406,182 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
__le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
} __packed;
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF 4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6
+
+struct ath12k_htt_tx_pdev_rate_stats_tlv {
+ __le32 mac_id_word;
+ __le32 tx_ldpc;
+ __le32 rts_cnt;
+ __le32 ack_rssi;
+ __le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ __le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ __le32 rts_success;
+ __le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 ac_mu_mimo_tx_ldpc;
+ __le32 ax_mu_mimo_tx_ldpc;
+ __le32 ofdma_tx_ldpc;
+ __le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF];
+ __le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES];
+ __le32 tx_11ax_su_ext;
+ __le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+ __le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 tx_bw_320mhz;
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
+#define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS 16
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
+
+struct ath12k_htt_rx_pdev_rate_stats_tlv {
+ __le32 mac_id_word;
+ __le32 nsts;
+ __le32 rx_ldpc;
+ __le32 rts_cnt;
+ __le32 rssi_mgmt;
+ __le32 rssi_data;
+ __le32 rssi_comb;
+ __le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ __le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rssi_in_dbm;
+ __le32 rx_11ax_su_ext;
+ __le32 rx_11ac_mumimo;
+ __le32 rx_11ax_mumimo;
+ __le32 rx_11ax_ofdma;
+ __le32 txbf;
+ __le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ __le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ __le32 rx_active_dur_us_low;
+ __le32 rx_active_dur_us_high;
+ __le32 rx_11ax_ul_ofdma;
+ __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ __le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 ul_ofdma_rx_stbc;
+ __le32 ul_ofdma_rx_ldpc;
+ __le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 nss_count;
+ __le32 pilot_count;
+ __le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS];
+ __le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ __le32 rx_su_ndpa;
+ __le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_mu_ndpa;
+ __le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_br_poll;
+ __le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ __le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+ __le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ __le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS 5
+
+struct ath12k_htt_rx_pdev_rate_ext_stats_tlv {
+ u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+ s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+ __le32 rssi_mcast_in_dbm;
+ __le32 rssi_mgmt_in_dbm;
+ __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+ __le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS];
+ __le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+ [ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+ __le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+};
+
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
@@ -1058,6 +1253,82 @@ struct ath12k_htt_pdev_cca_stats_hist_v1_tlv {
__le32 collection_interval;
} __packed;
+#define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS 8
+#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4
+#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define ATH12K_HTT_TX_NUM_MCS_CNTRS 12
+#define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS 2
+
+#define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS)
+
+enum ath12k_htt_txbf_sound_steer_modes {
+ ATH12K_HTT_IMPL_STEER_STATS = 0,
+ ATH12K_HTT_EXPL_SUSIFS_STEER_STATS = 1,
+ ATH12K_HTT_EXPL_SURBO_STEER_STATS = 2,
+ ATH12K_HTT_EXPL_MUSIFS_STEER_STATS = 3,
+ ATH12K_HTT_EXPL_MURBO_STEER_STATS = 4,
+ ATH12K_HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum ath12k_htt_stats_sounding_tx_mode {
+ ATH12K_HTT_TX_AC_SOUNDING_MODE = 0,
+ ATH12K_HTT_TX_AX_SOUNDING_MODE = 1,
+ ATH12K_HTT_TX_BE_SOUNDING_MODE = 2,
+ ATH12K_HTT_TX_CMN_SOUNDING_MODE = 3,
+};
+
+struct ath12k_htt_tx_sounding_stats_tlv {
+ __le32 tx_sounding_mode;
+ __le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+ __le32 cv_nc_mismatch_err;
+ __le32 cv_fcs_err;
+ __le32 cv_frag_idx_mismatch;
+ __le32 cv_invalid_peer_id;
+ __le32 cv_no_txbf_setup;
+ __le32 cv_expiry_in_update;
+ __le32 cv_pkt_bw_exceed;
+ __le32 cv_dma_not_done_err;
+ __le32 cv_update_failed;
+ __le32 cv_total_query;
+ __le32 cv_total_pattern_query;
+ __le32 cv_total_bw_query;
+ __le32 cv_invalid_bw_coding;
+ __le32 cv_forced_sounding;
+ __le32 cv_standalone_sounding;
+ __le32 cv_nc_mismatch;
+ __le32 cv_fb_type_mismatch;
+ __le32 cv_ofdma_bw_mismatch;
+ __le32 cv_bw_mismatch;
+ __le32 cv_pattern_mismatch;
+ __le32 cv_preamble_mismatch;
+ __le32 cv_nr_mismatch;
+ __le32 cv_in_use_cnt_exceeded;
+ __le32 cv_found;
+ __le32 cv_not_found;
+ __le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+ __le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+ __le32 cv_ntbr_sounding;
+ __le32 cv_found_upload_in_progress;
+ __le32 cv_expired_during_query;
+ __le32 cv_dma_timeout_error;
+ __le32 cv_buf_ibf_uploads;
+ __le32 cv_buf_ebf_uploads;
+ __le32 cv_buf_received;
+ __le32 cv_buf_fed_back;
+ __le32 cv_total_query_ibf;
+ __le32 cv_found_ibf;
+ __le32 cv_not_found_ibf;
+ __le32 cv_expired_during_query_ibf;
+} __packed;
+
struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_obss_tx_ppdu_success;
__le32 num_obss_tx_ppdu_failure;
@@ -1080,6 +1351,127 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
+#define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN 32
+#define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST 3
+#define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3
+
+struct ath12k_htt_latency_prof_stats_tlv {
+ __le32 print_header;
+ s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN];
+ __le32 cnt;
+ __le32 min;
+ __le32 max;
+ __le32 last;
+ __le32 tot;
+ __le32 avg;
+ __le32 hist_intvl;
+ __le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST];
+} __packed;
+
+struct ath12k_htt_latency_prof_ctx_tlv {
+ __le32 duration;
+ __le32 tx_msdu_cnt;
+ __le32 tx_mpdu_cnt;
+ __le32 tx_ppdu_cnt;
+ __le32 rx_msdu_cnt;
+ __le32 rx_mpdu_cnt;
+} __packed;
+
+struct ath12k_htt_latency_prof_cnt_tlv {
+ __le32 prof_enable_cnt;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MCS_CNTRS 12
+#define ATH12K_HTT_RX_NUM_GI_CNTRS 4
+#define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS 8
+#define ATH12K_HTT_RX_NUM_BW_CNTRS 4
+#define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS 6
+#define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS 7
+#define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
+#define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES 2
+#define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS 2
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+ ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+ ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS,
+};
+
+struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv {
+ __le32 user_index;
+ __le32 rx_ulofdma_non_data_ppdu;
+ __le32 rx_ulofdma_data_ppdu;
+ __le32 rx_ulofdma_mpdu_ok;
+ __le32 rx_ulofdma_mpdu_fail;
+ __le32 rx_ulofdma_non_data_nusers;
+ __le32 rx_ulofdma_data_nusers;
+} __packed;
+
+struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv {
+ __le32 mac_id__word;
+ __le32 rx_11ax_ul_ofdma;
+ __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_ofdma_rx_stbc;
+ __le32 ul_ofdma_rx_ldpc;
+ __le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+ __le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+ __le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+ __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_ofdma_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS 8
+
+struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv {
+ __le32 mac_id__word;
+ __le32 rx_11ax_ul_mumimo;
+ __le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+ __le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 ul_mumimo_rx_stbc;
+ __le32 ul_mumimo_rx_ldpc;
+ __le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+ __le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+ s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+ __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+ __le32 mumimo_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX 10
+#define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX 10
+#define ATH12K_HTT_RX_NUM_SQUARE_INDEX 6
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX 4
+#define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX 4
+
+struct ath12k_htt_rx_fse_stats_tlv {
+ __le32 fse_enable_cnt;
+ __le32 fse_disable_cnt;
+ __le32 fse_cache_invalidate_entry_cnt;
+ __le32 fse_full_cache_invalidate_cnt;
+ __le32 fse_num_cache_hits_cnt;
+ __le32 fse_num_searches_cnt;
+ __le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX];
+ __le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX];
+ __le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX];
+ __le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX];
+ __le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX];
+} __packed;
+
#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
@@ -1417,17 +1809,6 @@ enum ATH12K_HTT_RC_MODE {
ATH12K_HTT_RC_MODE_2D_COUNT
};
-enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
- ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
- ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
-};
-
enum ath12k_htt_stats_rc_mode {
ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.c b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
new file mode 100644
index 000000000000..5bd2bf4c9dac
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs.h"
+
+static
+u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size,
+ bool he_rates_avail,
+ const struct ath12k_rx_peer_rate_stats *stats)
+{
+ static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = {
+ "1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps",
+ "9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps",
+ "24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"};
+ u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS;
+ int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0;
+ u32 i, len = offset, max = max_bw * max_gi * max_mcs;
+ bool found;
+
+ len += scnprintf(buf + len, size - len, "\nEHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->be_mcs_count[i],
+ (i + 1) % 8 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nHE stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->he_mcs_count[i],
+ (i + 1) % 6 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nVHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->vht_mcs_count[i],
+ (i + 1) % 5 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nHT stats:\n");
+ for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
+ len += scnprintf(buf + len, size - len,
+ "MCS %d: %llu%s", i, stats->ht_mcs_count[i],
+ (i + 1) % 8 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nLegacy stats:\n");
+ for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
+ len += scnprintf(buf + len, size - len,
+ "%s: %llu%s", legacy_rate_str[i],
+ stats->legacy_count[i],
+ (i + 1) % 4 ? "\t" : "\n");
+
+ len += scnprintf(buf + len, size - len, "\nNSS stats:\n");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len,
+ "%dx%d: %llu ", i + 1, i + 1,
+ stats->nss_count[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n",
+ stats->gi_count[0],
+ stats->gi_count[1],
+ stats->gi_count[2],
+ stats->gi_count[3]);
+
+ len += scnprintf(buf + len, size - len,
+ "BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n",
+ stats->bw_count[0],
+ stats->bw_count[1],
+ stats->bw_count[2],
+ stats->bw_count[3],
+ stats->bw_count[4]);
+
+ for (i = 0; i < max; i++) {
+ found = false;
+
+ for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+ if (stats->rx_rate[bw][gi][nss][mcs]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto skip_report;
+
+ switch (bw) {
+ case HAL_RX_BW_20MHZ:
+ bw_num = 20;
+ break;
+ case HAL_RX_BW_40MHZ:
+ bw_num = 40;
+ break;
+ case HAL_RX_BW_80MHZ:
+ bw_num = 80;
+ break;
+ case HAL_RX_BW_160MHZ:
+ bw_num = 160;
+ break;
+ case HAL_RX_BW_320MHZ:
+ bw_num = 320;
+ break;
+ }
+
+ len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
+ bw_num, gi, nss + 1, nss + 1);
+
+ for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+ if (stats->rx_rate[bw][gi][nss][mcs])
+ len += scnprintf(buf + len, size - len,
+ " %d:%llu", mcs,
+ stats->rx_rate[bw][gi][nss][mcs]);
+ }
+
+skip_report:
+ if (nss++ >= max_mcs - 1) {
+ nss = 0;
+ if (gi++ >= max_gi - 1) {
+ gi = 0;
+ if (bw < max_bw - 1)
+ bw++;
+ }
+ }
+ }
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ return len - offset;
+}
+
+static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+ const int size = ATH12K_STA_RX_STATS_BUF_SIZE;
+ struct ath12k_hw *ah = ahsta->ahvif->ah;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_link_sta *arsta;
+ u8 link_id = link_sta->link_id;
+ int len = 0, i, ret = 0;
+ bool he_rates_avail;
+ struct ath12k *ar;
+
+ wiphy_lock(ah->hw->wiphy);
+
+ if (!(BIT(link_id) & ahsta->links_map)) {
+ wiphy_unlock(ah->hw->wiphy);
+ return -ENOENT;
+ }
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arsta || !arsta->arvif->ar) {
+ wiphy_unlock(ah->hw->wiphy);
+ return -ENOENT;
+ }
+
+ ar = arsta->arvif->ar;
+
+ u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ rx_stats = arsta->rx_stats;
+ if (!rx_stats) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n",
+ rx_stats->other_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+
+ he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false;
+
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n",
+ rx_stats->rx_duration);
+
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU26: %llu\nRU52: %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]);
+
+ len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n");
+ len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+ &rx_stats->pkt_stats);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n");
+ len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+ &rx_stats->byte_stats);
+
+unlock:
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len)
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+out:
+ wiphy_unlock(ah->hw->wiphy);
+ return ret;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath12k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_link_sta *link_sta = file->private_data;
+ struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+ struct ath12k_hw *ah = ahsta->ahvif->ah;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_link_sta *arsta;
+ u8 link_id = link_sta->link_id;
+ struct ath12k *ar;
+ bool reset;
+ int ret;
+
+ ret = kstrtobool_from_user(buf, count, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return -EINVAL;
+
+ wiphy_lock(ah->hw->wiphy);
+
+ if (!(BIT(link_id) & ahsta->links_map)) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+ if (!arsta || !arsta->arvif->ar) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ar = arsta->arvif->ar;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ rx_stats = arsta->rx_stats;
+ if (!rx_stats) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ memset(rx_stats, 0, sizeof(*rx_stats));
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ret = count;
+out:
+ wiphy_unlock(ah->hw->wiphy);
+ return ret;
+}
+
+static const struct file_operations fops_reset_rx_stats = {
+ .write = ath12k_dbg_sta_reset_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ struct ath12k *ar;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id);
+ if (!ar)
+ return;
+
+ if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+ debugfs_create_file("rx_stats", 0400, dir, link_sta,
+ &fops_rx_stats);
+ debugfs_create_file("reset_rx_stats", 0200, dir, link_sta,
+ &fops_reset_rx_stats);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.h b/drivers/net/wireless/ath/ath12k/debugfs_sta.h
new file mode 100644
index 000000000000..8de924f4d7d5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_STA_H_
+#define _ATH12K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+
+#define ATH12K_STA_RX_STATS_BUF_SIZE (1024 * 16)
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index 9e5a4e75f2f6..b1f27c3ac723 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -1315,6 +1315,9 @@ void ath12k_dp_cc_config(struct ath12k_base *ab)
u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
u32 val = 0;
+ if (ath12k_ftm_mode)
+ return;
+
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 7ac3143de016..75435a931548 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -125,7 +125,6 @@ struct ath12k_mon_data {
struct sk_buff_head rx_status_q;
struct dp_mon_mpdu *mon_mpdu;
struct list_head dp_rx_mon_mpdu_list;
- struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
};
@@ -176,7 +175,7 @@ struct ath12k_pdev_dp {
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 8092
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_TX_MONITOR_BUF_RING_SIZE 4096
#define DP_TX_MONITOR_DEST_RING_SIZE 2048
@@ -373,17 +372,24 @@ struct ath12k_dp {
};
/* HTT definitions */
+#define HTT_TAG_TCL_METADATA_VERSION 5
-#define HTT_TCL_META_DATA_TYPE BIT(0)
-#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(2)
/* vdev meta data */
-#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
-#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
-#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
/* peer meta data */
-#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3)
+
+/* Global sequence number */
+#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2)
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3)
+#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
@@ -414,9 +420,15 @@ enum htt_h2t_msg_type {
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V2 2
+#define HTT_OPTION_TAG GENMASK(7, 0)
+#define HTT_OPTION_LEN GENMASK(15, 8)
+#define HTT_OPTION_VALUE GENMASK(31, 16)
+#define HTT_TCL_METADATA_VER_SZ 4
struct htt_ver_req_cmd {
__le32 ver_reg_info;
+ __le32 tcl_metadata_version;
} __packed;
enum htt_srng_ring_type {
@@ -434,8 +446,11 @@ enum htt_srng_ring_id {
HTT_HOST1_TO_FW_RXBUF_RING,
HTT_HOST2_TO_FW_RXBUF_RING,
HTT_RXDMA_NON_MONITOR_DEST_RING,
+ HTT_RXDMA_HOST_BUF_RING2,
HTT_TX_MON_HOST2MON_BUF_RING,
HTT_TX_MON_MON2HOST_DEST_RING,
+ HTT_RX_MON_HOST2MON_BUF_RING,
+ HTT_RX_MON_MON2HOST_DEST_RING,
};
/* host -> target HTT_SRING_SETUP message
@@ -767,8 +782,22 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
-#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1)
#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
@@ -797,6 +826,7 @@ enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13),
};
enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
@@ -1085,6 +1115,21 @@ enum htt_rx_data_pkt_filter_tlv_flasg3 {
HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
+
/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
#define HTT_RX_TLV_FLAGS_RXDMA_RING \
(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
@@ -1113,6 +1158,10 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 info3;
} __packed;
+#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32
+#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7
+#define HTT_RX_RING_PKT_TLV_OFFSET 0x1
+
struct htt_rx_ring_tlv_filter {
u32 rx_filter; /* see htt_rx_filter_tlv_flags */
u32 pkt_filter_flags0; /* MGMT */
@@ -1130,6 +1179,17 @@ struct htt_rx_ring_tlv_filter {
u16 rx_mpdu_start_wmask;
u16 rx_mpdu_end_wmask;
u32 rx_msdu_end_wmask;
+ u32 conf_len_ctrl;
+ u32 conf_len_mgmt;
+ u32 conf_len_data;
+ u16 rx_drop_threshold;
+ bool enable_log_mgmt_type;
+ bool enable_log_ctrl_type;
+ bool enable_log_data_type;
+ bool enable_rx_tlv_offset;
+ u16 rx_tlv_offset;
+ bool drop_threshold_valid;
+ bool rxmon_disable;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 5a21961cfd46..d22800e89485 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -10,6 +10,12 @@
#include "dp_tx.h"
#include "peer.h"
+#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \
+ u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
+
static void
ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
struct hal_rx_user_status *rx_user_status)
@@ -75,7 +81,7 @@ ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *
static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
struct hal_rx_mon_ppdu_info *ppdu_info)
{
- u32 nsts, group_id, info0, info1;
+ u32 nsts, info0, info1;
u8 gi_setting;
info0 = __le32_to_cpu(vht_sig->info0);
@@ -103,12 +109,8 @@ static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vh
ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
ppdu_info->beamformed = u32_get_bits(info1,
HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
- group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
- if (group_id == 0 || group_id == 63)
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
- else
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
- ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values5 = u32_get_bits(info0,
+ HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
ppdu_info->nss);
ppdu_info->vht_flag_values2 = ppdu_info->bw;
@@ -128,7 +130,6 @@ static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
ppdu_info->nss = (ppdu_info->mcs >> 3);
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
@@ -160,7 +161,6 @@ static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
ppdu_info->rate = rate;
ppdu_info->cck_flag = 1;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
@@ -200,7 +200,6 @@ static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
}
ppdu_info->rate = rate;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
}
static void
@@ -237,7 +236,6 @@ ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *of
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
ppdu_info->beamformed = u32_get_bits(info0,
HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
}
static void
@@ -277,7 +275,6 @@ ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b
HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
ppdu_info->he_RU[0] = ru_tones;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void
@@ -411,7 +408,6 @@ ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_
ppdu_info->is_stbc = info1 &
HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
}
static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
@@ -559,17 +555,887 @@ static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *
dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
ppdu_info->dcm = dcm;
- ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 common;
+
+ ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BW);
+ ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_UL_DL);
+
+ common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common);
+ common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+ u32_encode_bits(ppdu_info->u_sig_info.bw,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+ u32_encode_bits(ppdu_info->u_sig_info.ul_dl,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
+ ATH12K_LE32_DEC_ENC(cmn->info0,
+ HAL_RX_USIG_CMN_INFO0_TXOP,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ ppdu_info->u_sig_info.usig.common = cpu_to_le32(common);
+
+ switch (ppdu_info->u_sig_info.bw) {
+ default:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ ppdu_info->bw = HAL_RX_BW_20MHZ;
+ break;
+ case HAL_EHT_BW_40:
+ ppdu_info->bw = HAL_RX_BW_40MHZ;
+ break;
+ case HAL_EHT_BW_80:
+ ppdu_info->bw = HAL_RX_BW_80MHZ;
+ break;
+ case HAL_EHT_BW_160:
+ ppdu_info->bw = HAL_RX_BW_160MHZ;
+ break;
+ case HAL_EHT_BW_320_1:
+ case HAL_EHT_BW_320_2:
+ ppdu_info->bw = HAL_RX_BW_320MHZ;
+ break;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2;
+ u32 common, value, mask;
+
+ spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1;
+ spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
+ spatial_reuse1) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
+ spatial_reuse2) |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_tb->info0,
+ HAL_RX_USIG_TB_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+ spatial_reuse1 | spatial_reuse2 |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+ enum ieee80211_radiotap_eht_usig_mu sig_symb, punc;
+ u32 common, value, mask;
+
+ sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS;
+ punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO;
+
+ common = __le32_to_cpu(usig->common);
+ value = __le32_to_cpu(usig->value);
+ mask = __le32_to_cpu(usig->mask);
+
+ ppdu_info->u_sig_info.ppdu_type_comp_mode =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+ ppdu_info->u_sig_info.eht_sig_mcs =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
+ ppdu_info->u_sig_info.num_eht_sig_sym =
+ le32_get_bits(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
+
+ common |= ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
+ punc) |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
+ u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym,
+ sig_symb) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
+ ATH12K_LE32_DEC_ENC(usig_mu->info0,
+ HAL_RX_USIG_MU_INFO0_TAIL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
+
+ mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+ punc |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS |
+ sig_symb |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC |
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL;
+
+ usig->common = cpu_to_le32(common);
+ usig->value = cpu_to_le32(value);
+ usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u8 comp_mode;
+
+ ppdu_info->eht_usig = true;
+
+ ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info);
+
+ comp_mode = le32_get_bits(usig->non_cmn.mu.info0,
+ HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+
+ if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl)
+ ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info);
+ else
+ ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
+ u16 tlv_len, const void *tlv_data)
+{
+ if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
+ memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
+ tlv_data, tlv_len);
+ ppdu_info->tlv_aggr.cur_len += tlv_len;
+ }
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 1 &&
+ usig_info->eht_sig_mcs == 0 &&
+ usig_info->num_eht_sig_sym == 0)
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode;
+ u32 ul_dl = usig_info->ul_dl;
+
+ if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
+ (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1))
+ return true;
+
+ return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
+ IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
+ IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
+ eht->data[0] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+ data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+ HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+ IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
+ IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* GI and LTF size are separately indicated in radiotap header
+ * and hence will be parsed from other TLV
+ */
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ data |= ATH12K_LE32_DEC_ENC(ovflow->info0,
+ HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
+ eht->data[0] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[7]);
+ data |= ATH12K_LE32_DEC_ENC(eb->info0,
+ HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+ eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+ u32 user_idx;
+
+ if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+ return;
+
+ user_idx = eht_info->num_user_info++;
+
+ eht_info->user_info[user_idx] |=
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
+ ATH12K_LE32_DEC_ENC(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ ppdu_info->mcs = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
+
+ ppdu_info->nss = le32_get_bits(user->info0,
+ HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info)
+{
+ if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
+ usig_info->ul_dl == 1)
+ return true;
+
+ return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv;
+
+ ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info);
+
+ if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo,
+ ppdu_info);
+ else
+ ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1;
+ const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126;
+ enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111;
+ u32 data;
+
+ ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3;
+ ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4;
+ ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5;
+ ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6;
+ ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1;
+ ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2;
+ ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2;
+ ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1;
+
+ switch (ppdu_info->u_sig_info.bw) {
+ case HAL_EHT_BW_320_2:
+ case HAL_EHT_BW_320_1:
+ data = __le32_to_cpu(eht->data[4]);
+ /* CC1 2::3 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
+ ru_123);
+ eht->data[4] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[5]);
+ /* CC1 2::4 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
+ ru_124);
+
+ /* CC1 2::5 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
+ ru_125);
+ eht->data[5] = cpu_to_le32(data);
+
+ data = __le32_to_cpu(eht->data[6]);
+ /* CC1 2::6 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
+ ru_126);
+ eht->data[6] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_160:
+ data = __le32_to_cpu(eht->data[3]);
+ /* CC1 2::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
+ ru_121);
+ /* CC1 2::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
+ ru_122);
+ eht->data[3] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_80:
+ data = __le32_to_cpu(eht->data[2]);
+ /* CC1 1::2 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
+ ru_112);
+ eht->data[2] = cpu_to_le32(data);
+
+ fallthrough;
+ case HAL_EHT_BW_40:
+ fallthrough;
+ case HAL_EHT_BW_20:
+ data = __le32_to_cpu(eht->data[1]);
+ /* CC1 1::1 */
+ data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
+ ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+ HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
+ ru_111);
+ eht->data[1] = cpu_to_le32(data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv;
+
+ ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+ ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info);
+
+ ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo,
+ ppdu_info);
+}
+
+static void
+ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
+ const void *tlv_data)
+{
+ ppdu_info->is_eht = true;
+
+ if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
+ else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
+ else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info))
+ ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
+}
+
+static inline enum ath12k_eht_ru_size
+hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
+{
+ switch (hal_ru_size) {
+ case HAL_EHT_RU_26:
+ return ATH12K_EHT_RU_26;
+ case HAL_EHT_RU_52:
+ return ATH12K_EHT_RU_52;
+ case HAL_EHT_RU_78:
+ return ATH12K_EHT_RU_52_26;
+ case HAL_EHT_RU_106:
+ return ATH12K_EHT_RU_106;
+ case HAL_EHT_RU_132:
+ return ATH12K_EHT_RU_106_26;
+ case HAL_EHT_RU_242:
+ return ATH12K_EHT_RU_242;
+ case HAL_EHT_RU_484:
+ return ATH12K_EHT_RU_484;
+ case HAL_EHT_RU_726:
+ return ATH12K_EHT_RU_484_242;
+ case HAL_EHT_RU_996:
+ return ATH12K_EHT_RU_996;
+ case HAL_EHT_RU_996x2:
+ return ATH12K_EHT_RU_996x2;
+ case HAL_EHT_RU_996x3:
+ return ATH12K_EHT_RU_996x3;
+ case HAL_EHT_RU_996x4:
+ return ATH12K_EHT_RU_996x4;
+ case HAL_EHT_RU_NONE:
+ return ATH12K_EHT_RU_INVALID;
+ case HAL_EHT_RU_996_484:
+ return ATH12K_EHT_RU_996_484;
+ case HAL_EHT_RU_996x2_484:
+ return ATH12K_EHT_RU_996x2_484;
+ case HAL_EHT_RU_996x3_484:
+ return ATH12K_EHT_RU_996x3_484;
+ case HAL_EHT_RU_996_484_242:
+ return ATH12K_EHT_RU_996_484_242;
+ default:
+ return ATH12K_EHT_RU_INVALID;
+ }
+}
+
+static inline u32
+hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size)
+{
+ switch (ru_size) {
+ case ATH12K_EHT_RU_26:
+ return RU_26;
+ case ATH12K_EHT_RU_52:
+ return RU_52;
+ case ATH12K_EHT_RU_52_26:
+ return RU_52_26;
+ case ATH12K_EHT_RU_106:
+ return RU_106;
+ case ATH12K_EHT_RU_106_26:
+ return RU_106_26;
+ case ATH12K_EHT_RU_242:
+ return RU_242;
+ case ATH12K_EHT_RU_484:
+ return RU_484;
+ case ATH12K_EHT_RU_484_242:
+ return RU_484_242;
+ case ATH12K_EHT_RU_996:
+ return RU_996;
+ case ATH12K_EHT_RU_996_484:
+ return RU_996_484;
+ case ATH12K_EHT_RU_996_484_242:
+ return RU_996_484_242;
+ case ATH12K_EHT_RU_996x2:
+ return RU_2X996;
+ case ATH12K_EHT_RU_996x2_484:
+ return RU_2X996_484;
+ case ATH12K_EHT_RU_996x3:
+ return RU_3X996;
+ case ATH12K_EHT_RU_996x3_484:
+ return RU_3X996_484;
+ case ATH12K_EHT_RU_996x4:
+ return RU_4X996;
+ default:
+ return RU_INVALID;
+ }
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info,
+ u16 user_id,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_user_status *mon_rx_user_status = NULL;
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID;
+ u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID;
+ u32 ru_type_80_0, ru_start_index_80_0;
+ u32 ru_type_80_1, ru_start_index_80_1;
+ u32 ru_type_80_2, ru_start_index_80_2;
+ u32 ru_type_80_3, ru_start_index_80_3;
+ u32 ru_size = 0, num_80mhz_with_ru = 0;
+ u64 ru_index_320mhz = 0;
+ u32 ru_index_per80mhz;
+
+ reception_type = le32_get_bits(rx_usr_info->info0,
+ HAL_RX_USR_INFO0_RECEPTION_TYPE);
+
+ switch (reception_type) {
+ case HAL_RECEPTION_TYPE_SU:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFMA:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
+ case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
+ }
+
+ ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC);
+ ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC);
+ ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM);
+ ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW);
+ ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS);
+ ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1;
+
+ if (user_id < HAL_MAX_UL_MU_USERS) {
+ mon_rx_user_status = &ppdu_info->userstats[user_id];
+ mon_rx_user_status->mcs = ppdu_info->mcs;
+ mon_rx_user_status->nss = ppdu_info->nss;
+ }
+
+ if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+ return;
+
+ /* RU allocation present only for OFDMA reception */
+ ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0);
+ ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_0);
+ if (ru_type_80_0 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_0;
+ ru_index_per80mhz = ru_start_index_80_0;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1);
+ ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_1);
+ if (ru_type_80_1 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_1;
+ ru_index_per80mhz = ru_start_index_80_1;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2);
+ ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_2);
+ if (ru_type_80_2 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_2;
+ ru_index_per80mhz = ru_start_index_80_2;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3);
+ ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2,
+ HAL_RX_USR_INFO3_RU_START_IDX_80_3);
+ if (ru_type_80_3 != HAL_EHT_RU_NONE) {
+ ru_size += ru_type_80_3;
+ ru_index_per80mhz = ru_start_index_80_3;
+ ru_index = ru_index_per80mhz;
+ ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz);
+ num_80mhz_with_ru++;
+ }
+
+ if (num_80mhz_with_ru > 1) {
+ /* Calculate the MRU index */
+ switch (ru_index_320mhz) {
+ case HAL_EHT_RU_996_484_0:
+ case HAL_EHT_RU_996x2_484_0:
+ case HAL_EHT_RU_996x3_484_0:
+ ru_index = 0;
+ break;
+ case HAL_EHT_RU_996_484_1:
+ case HAL_EHT_RU_996x2_484_1:
+ case HAL_EHT_RU_996x3_484_1:
+ ru_index = 1;
+ break;
+ case HAL_EHT_RU_996_484_2:
+ case HAL_EHT_RU_996x2_484_2:
+ case HAL_EHT_RU_996x3_484_2:
+ ru_index = 2;
+ break;
+ case HAL_EHT_RU_996_484_3:
+ case HAL_EHT_RU_996x2_484_3:
+ case HAL_EHT_RU_996x3_484_3:
+ ru_index = 3;
+ break;
+ case HAL_EHT_RU_996_484_4:
+ case HAL_EHT_RU_996x2_484_4:
+ case HAL_EHT_RU_996x3_484_4:
+ ru_index = 4;
+ break;
+ case HAL_EHT_RU_996_484_5:
+ case HAL_EHT_RU_996x2_484_5:
+ case HAL_EHT_RU_996x3_484_5:
+ ru_index = 5;
+ break;
+ case HAL_EHT_RU_996_484_6:
+ case HAL_EHT_RU_996x2_484_6:
+ case HAL_EHT_RU_996x3_484_6:
+ ru_index = 6;
+ break;
+ case HAL_EHT_RU_996_484_7:
+ case HAL_EHT_RU_996x2_484_7:
+ case HAL_EHT_RU_996x3_484_7:
+ ru_index = 7;
+ break;
+ case HAL_EHT_RU_996x2_484_8:
+ ru_index = 8;
+ break;
+ case HAL_EHT_RU_996x2_484_9:
+ ru_index = 9;
+ break;
+ case HAL_EHT_RU_996x2_484_10:
+ ru_index = 10;
+ break;
+ case HAL_EHT_RU_996x2_484_11:
+ ru_index = 11;
+ break;
+ default:
+ ru_index = HAL_EHT_RU_INVALID;
+ break;
+ }
+
+ ru_size += 4;
+ }
+
+ rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
+ if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (ru_index != HAL_EHT_RU_INVALID) {
+ u32 known, data;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
+ eht->known = cpu_to_le32(known);
+
+ data = __le32_to_cpu(eht->data[1]);
+ data |= u32_encode_bits(rtap_ru_size,
+ IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
+ eht->data[1] = cpu_to_le32(data);
+ }
+
+ if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
+ rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+ mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
+ mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
+
+ ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size);
+
+ mon_rx_user_status->ul_ofdma_ru_width = ru_width;
+ mon_rx_user_status->ofdma_info_valid = 1;
+ }
}
static enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- u32 tlv_tag, const void *tlv_data,
- u32 userid)
+ const struct hal_tlv_64_hdr *tlv)
{
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- u32 info[7];
+ const void *tlv_data = tlv->value;
+ u32 info[7], userid;
+ u16 tlv_tag, tlv_len;
+
+ tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+ userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
+
+ if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) {
+ ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf);
+
+ ppdu_info->tlv_aggr.in_progress = false;
+ ppdu_info->tlv_aggr.cur_len = 0;
+ }
switch (tlv_tag) {
case HAL_RX_PPDU_START: {
@@ -638,6 +1504,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
ppdu_info->num_mpdu_fcs_err =
u32_get_bits(info[0],
HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ ppdu_info->peer_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
switch (ppdu_info->preamble_type) {
case HAL_RX_PREAMBLE_11N:
ppdu_info->ht_flags = 1;
@@ -648,6 +1517,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
case HAL_RX_PREAMBLE_11AX:
ppdu_info->he_flags = 1;
break;
+ case HAL_RX_PREAMBLE_11BE:
+ ppdu_info->is_eht = true;
+ break;
default:
break;
}
@@ -655,6 +1527,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
struct hal_rx_user_status *rxuser_stats =
&ppdu_info->userstats[userid];
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1 ||
+ ppdu_info->num_mpdu_fcs_err > 1)
+ ppdu_info->userstats[userid].ampdu_present = true;
+
ppdu_info->num_users += 1;
ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
@@ -730,6 +1607,17 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
+ case HAL_PHYRX_OTHER_RECEIVE_INFO: {
+ const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
+
+ ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_PHY_CMN_USER_INFO0_GI);
+ break;
+ }
+ case HAL_RX_PPDU_START_USER_INFO:
+ ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info);
+ break;
+
case HAL_RXPCU_PPDU_END_INFO: {
const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
@@ -743,7 +1631,6 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
}
case HAL_RX_MPDU_START: {
const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
- struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
u16 peer_id;
info[1] = __le32_to_cpu(mpdu_start->info1);
@@ -756,68 +1643,38 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
if (userid < HAL_MAX_UL_MU_USERS) {
info[0] = __le32_to_cpu(mpdu_start->info0);
ppdu_info->userid = userid;
- ppdu_info->ampdu_id[userid] =
- u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+ ppdu_info->userstats[userid].ampdu_id =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
}
- mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
- if (!mon_mpdu)
- return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
-
break;
}
case HAL_RX_MSDU_START:
/* TODO: add msdu start parsing logic */
break;
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
- const struct dp_mon_packet_info *packet_info = tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+ case HAL_MON_BUF_ADDR:
+ return HAL_RX_MON_STATUS_BUF_ADDR;
+ case HAL_RX_MSDU_END:
+ return HAL_RX_MON_STATUS_MSDU_END;
+ case HAL_RX_MPDU_END:
+ return HAL_RX_MON_STATUS_MPDU_END;
+ case HAL_PHYRX_GENERIC_U_SIG:
+ ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
+ break;
+ case HAL_PHYRX_GENERIC_EHT_SIG:
+ /* Handle the case where aggregation is in progress
+ * or the current TLV is one of the TLVs which should be
+ * aggregated
+ */
+ if (!ppdu_info->tlv_aggr.in_progress) {
+ ppdu_info->tlv_aggr.in_progress = true;
+ ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
+ ppdu_info->tlv_aggr.cur_len = 0;
}
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
- else
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ppdu_info->is_eht = true;
- break;
- }
- case HAL_RX_MSDU_END: {
- const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data;
- bool is_first_msdu_in_mpdu;
- u16 msdu_end_info;
-
- msdu_end_info = __le16_to_cpu(msdu_end->info5);
- is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
- RX_MSDU_END_INFO5_FIRST_MSDU);
- if (is_first_msdu_in_mpdu) {
- pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
- pmon->mon_mpdu->tail = NULL;
- }
- break;
- }
- case HAL_RX_MPDU_END:
- list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
break;
case HAL_DUMMY:
return HAL_RX_MON_STATUS_BUF_DONE;
@@ -844,7 +1701,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
}
static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id,
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct ieee80211_rx_status *rxs, bool *fcs_err)
{
@@ -1005,18 +1862,70 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
{
struct ieee80211_supported_band *sband;
u8 *ptr = NULL;
- u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
rxs->flag |= RX_FLAG_MACTIME_START;
rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
rxs->nss = ppduinfo->nss + 1;
- if (ampdu_id) {
+ if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
- rxs->ampdu_reference = ampdu_id;
+ rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
}
- if (ppduinfo->he_mu_flags) {
+ if (ppduinfo->is_eht || ppduinfo->eht_usig) {
+ struct ieee80211_radiotap_tlv *tlv;
+ struct ieee80211_radiotap_eht *eht;
+ struct ieee80211_radiotap_eht_usig *usig;
+ u16 len = 0, i, eht_len, usig_len;
+ u8 user;
+
+ if (ppduinfo->is_eht) {
+ eht_len = struct_size(eht,
+ user_info,
+ ppduinfo->eht_info.num_user_info);
+ len += sizeof(*tlv) + eht_len;
+ }
+
+ if (ppduinfo->eht_usig) {
+ usig_len = sizeof(*usig);
+ len += sizeof(*tlv) + usig_len;
+ }
+
+ rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+ rxs->encoding = RX_ENC_EHT;
+
+ skb_reset_mac_header(mon_skb);
+
+ tlv = skb_push(mon_skb, len);
+
+ if (ppduinfo->is_eht) {
+ tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
+ tlv->len = cpu_to_le16(eht_len);
+
+ eht = (struct ieee80211_radiotap_eht *)tlv->data;
+ eht->known = ppduinfo->eht_info.eht.known;
+
+ for (i = 0;
+ i < ARRAY_SIZE(eht->data) &&
+ i < ARRAY_SIZE(ppduinfo->eht_info.eht.data);
+ i++)
+ eht->data[i] = ppduinfo->eht_info.eht.data[i];
+
+ for (user = 0; user < ppduinfo->eht_info.num_user_info; user++)
+ put_unaligned_le32(ppduinfo->eht_info.user_info[user],
+ &eht->user_info[user]);
+
+ tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len];
+ }
+
+ if (ppduinfo->eht_usig) {
+ tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
+ tlv->len = cpu_to_le16(usig_len);
+
+ usig = (struct ieee80211_radiotap_eht_usig *)tlv->data;
+ *usig = ppduinfo->u_sig_info.usig;
+ }
+ } else if (ppduinfo->he_mu_flags) {
rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
rxs->encoding = RX_ENC_HE;
ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
@@ -1125,7 +2034,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
-static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
struct hal_rx_mon_ppdu_info *ppduinfo,
struct napi_struct *napi)
@@ -1135,7 +2044,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
struct ieee80211_rx_status *rxs = &dp->rx_status;
bool fcs_err = false;
- mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id,
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar,
head_msdu, tail_msdu,
rxs, &fcs_err);
if (!mon_skb)
@@ -1180,24 +2089,18 @@ mon_deliver_fail:
}
static enum hal_rx_mon_status
-ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
struct sk_buff *skb)
{
- struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct hal_tlv_64_hdr *tlv;
+ struct ath12k_skb_rxcb *rxcb;
enum hal_rx_mon_status hal_status;
- u32 tlv_userid;
u16 tlv_tag, tlv_len;
u8 *ptr = skb->data;
- memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
-
do {
tlv = (struct hal_tlv_64_hdr *)ptr;
tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
- tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
- tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
- ptr += sizeof(*tlv);
/* The actual length of PPDU_END is the combined length of many PHY
* TLVs that follow. Skip the TLV header and
@@ -1207,16 +2110,24 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
if (tlv_tag == HAL_RX_PPDU_END)
tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+ else
+ tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
- hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
- tlv_tag, ptr, tlv_userid);
- ptr += tlv_len;
+ hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+ ptr += sizeof(*tlv) + tlv_len;
ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
break;
- } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+ } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
+ (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+ (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
+ (hal_status == HAL_RX_MON_STATUS_MSDU_END));
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->is_end_of_ppdu)
+ hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
return hal_status;
}
@@ -1224,18 +2135,16 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi)
{
- struct ath12k_base *ab = ar->ab;
struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
struct dp_mon_mpdu *tmp;
struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
struct sk_buff *head_msdu, *tail_msdu;
enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
- ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+ ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
list_del(&mon_mpdu->list);
@@ -1243,7 +2152,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
tail_msdu = mon_mpdu->tail;
if (head_msdu && tail_msdu) {
- ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ ath12k_dp_mon_rx_deliver(ar, head_msdu,
tail_msdu, ppdu_info, napi);
}
@@ -1924,7 +2833,7 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
}
static void
-ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
+ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
struct napi_struct *napi,
struct dp_mon_tx_ppdu_info *tx_ppdu_info)
{
@@ -1938,7 +2847,7 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
tail_msdu = mon_mpdu->tail;
if (head_msdu)
- ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu,
+ ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu,
&tx_ppdu_info->rx_status, napi);
kfree(mon_mpdu);
@@ -1948,7 +2857,6 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id)
@@ -1995,155 +2903,43 @@ ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
break;
} while (tlv_status != DP_MON_TX_FES_STATUS_END);
- ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
- ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info);
return tlv_status;
}
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
- enum dp_monitor_mode monitor_mode,
- struct napi_struct *napi)
-{
- struct hal_mon_dest_desc *mon_dst_desc;
- struct ath12k_pdev_dp *pdev_dp = &ar->dp;
- struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct sk_buff *skb;
- struct ath12k_skb_rxcb *rxcb;
- struct dp_srng *mon_dst_ring;
- struct hal_srng *srng;
- struct dp_rxdma_mon_ring *buf_ring;
- u64 cookie;
- u32 ppdu_id;
- int num_buffs_reaped = 0, srng_id, buf_id;
- u8 dest_idx = 0, i;
- bool end_of_ppdu;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct ath12k_peer *peer = NULL;
-
- ppdu_info = &pmon->mon_ppdu_info;
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
-
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
-
- if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
- mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
- buf_ring = &dp->rxdma_mon_buf_ring;
- } else {
- return 0;
- }
-
- srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
-
- spin_lock_bh(&srng->lock);
- ath12k_hal_srng_access_begin(ab, srng);
-
- while (likely(*budget)) {
- *budget -= 1;
- mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
- if (unlikely(!mon_dst_desc))
- break;
-
- cookie = le32_to_cpu(mon_dst_desc->cookie);
- buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
- spin_lock_bh(&buf_ring->idr_lock);
- skb = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!skb)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- goto move_next;
- }
-
- rxcb = ATH12K_SKB_RXCB(skb);
- dma_unmap_single(ab->dev, rxcb->paddr,
- skb->len + skb_tailroom(skb),
- DMA_FROM_DEVICE);
-
- pmon->dest_skb_q[dest_idx] = skb;
- dest_idx++;
- ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
- end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
- HAL_MON_DEST_INFO0_END_OF_PPDU);
- if (!end_of_ppdu)
- continue;
-
- for (i = 0; i < dest_idx; i++) {
- skb = pmon->dest_skb_q[i];
-
- if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
- ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
- skb, napi);
- else
- ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
- skb, napi, ppdu_id);
-
- peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
-
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info->peer_id);
- dev_kfree_skb_any(skb);
- continue;
- }
-
- dev_kfree_skb_any(skb);
- pmon->dest_skb_q[i] = NULL;
- }
-
- dest_idx = 0;
-move_next:
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- ath12k_hal_srng_src_get_next_entry(ab, srng);
- num_buffs_reaped++;
- }
-
- ath12k_hal_srng_access_end(ab, srng);
- spin_unlock_bh(&srng->lock);
-
- return num_buffs_reaped;
-}
-
static void
ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
struct hal_rx_mon_ppdu_info *ppdu_info,
struct hal_rx_user_status *user_stats,
u32 num_msdu)
{
- u32 rate_idx = 0;
+ struct ath12k_rx_peer_rate_stats *stats;
u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
u32 bw_idx = ppdu_info->bw;
u32 gi_idx = ppdu_info->gi;
+ u32 len;
- if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
- (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
+ if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS ||
+ bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
return;
}
- if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
- ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
- rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
- rate_idx += bw_idx * 2 + gi_idx;
- } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE)
gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
- rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
- rate_idx += bw_idx * 3 + gi_idx;
- } else {
- return;
- }
- rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
+ rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu;
+ stats = &rx_stats->byte_stats;
+
if (user_stats)
- rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
+ len = user_stats->mpdu_ok_byte_count;
else
- rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
+ len = ppdu_info->mpdu_len;
+
+ stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len;
}
static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
@@ -2157,6 +2953,7 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -2229,6 +3026,12 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
}
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) {
+ rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
@@ -2329,6 +3132,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
return;
arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
user_stats->udp_msdu_count + user_stats->other_msdu_count;
@@ -2415,8 +3219,15 @@ ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
}
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
- struct napi_struct *napi, int *budget)
+static void
+ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
+ struct napi_struct *napi)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev_dp *pdev_dp = &ar->dp;
@@ -2432,13 +3243,14 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct ath12k_sta *ahsta = NULL;
struct ath12k_link_sta *arsta;
struct ath12k_peer *peer;
+ struct sk_buff_head skb_list;
u64 cookie;
int num_buffs_reaped = 0, srng_id, buf_id;
- u8 dest_idx = 0, i;
- bool end_of_ppdu;
- u32 hal_status;
+ u32 hal_status, end_offset, info0, end_reason;
+ u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx);
- srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ __skb_queue_head_init(&skb_list);
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx);
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
@@ -2451,6 +3263,15 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
if (unlikely(!mon_dst_desc))
break;
+
+ /* In case of empty descriptor, the cookie in the ring descriptor
+ * is invalid. Therefore, this entry is skipped, and ring processing
+ * continues.
+ */
+ info0 = le32_to_cpu(mon_dst_desc->info0);
+ if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC))
+ goto move_next;
+
cookie = le32_to_cpu(mon_dst_desc->cookie);
buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
@@ -2468,63 +3289,102 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
dma_unmap_single(ab->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
- pmon->dest_skb_q[dest_idx] = skb;
- dest_idx++;
- end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
- HAL_MON_DEST_INFO0_END_OF_PPDU);
- if (!end_of_ppdu)
- continue;
- for (i = 0; i < dest_idx; i++) {
- skb = pmon->dest_skb_q[i];
- hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+ end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON);
- if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
- if (!peer || !peer->sta) {
- ath12k_dbg(ab, ATH12K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info->peer_id);
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of
+ * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got
+ * truncated due to a system level error. In both the cases, buffer data
+ * can be discarded
+ */
+ if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+ (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Monitor dest descriptor end reason %d", end_reason);
+ dev_kfree_skb_any(skb);
+ goto move_next;
+ }
- if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
- ahsta = ath12k_sta_to_ahsta(peer->sta);
- arsta = &ahsta->deflink;
- ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
- ppdu_info);
- } else if ((ppdu_info->fc_valid) &&
- (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
- ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
- ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
- }
+ /* Calculate the budget when the ring descriptor with the
+ * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always
+ * reaped. This helps to efficiently utilize the NAPI budget.
+ */
+ if (end_reason == HAL_MON_END_OF_PPDU) {
+ *budget -= 1;
+ rxcb->is_end_of_ppdu = true;
+ }
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- memset(ppdu_info, 0, sizeof(*ppdu_info));
- ppdu_info->peer_id = HAL_INVALID_PEERID;
+ end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET);
+ if (likely(end_offset <= DP_RX_BUFFER_SIZE)) {
+ skb_put(skb, end_offset);
+ } else {
+ ath12k_warn(ab,
+ "invalid offset on mon stats destination %u\n",
+ end_offset);
+ skb_put(skb, DP_RX_BUFFER_SIZE);
}
- dest_idx = 0;
+ __skb_queue_tail(&skb_list, skb);
+
move_next:
ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- ath12k_hal_srng_src_get_next_entry(ab, srng);
+ ath12k_hal_srng_dst_get_next_entry(ab, srng);
num_buffs_reaped++;
}
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ return 0;
+
+ /* In some cases, one PPDU worth of data can be spread across multiple NAPI
+ * schedules, To avoid losing existing parsed ppdu_info information, skip
+ * the memset of the ppdu_info structure and continue processing it.
+ */
+ if (!ppdu_info->ppdu_continuation)
+ ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+ if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ ppdu_info->ppdu_continuation = true;
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+ goto free_skb;
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with monitor peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+ ahsta = ath12k_sta_to_ahsta(peer->sta);
+ arsta = &ahsta->deflink;
+ ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+ ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+ (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+ ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+ ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+ }
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+free_skb:
+ dev_kfree_skb_any(skb);
+ ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+ }
+
return num_buffs_reaped;
}
@@ -2535,11 +3395,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
int num_buffs_reaped = 0;
- if (!ar->monitor_started)
- ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
- else
- num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
- monitor_mode, napi);
+ if (ab->hw_params->rxdma1_enable) {
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+ num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+ }
return num_buffs_reaped;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
index fb9e9c176ce5..e4368eb42aca 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.h
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@@ -77,14 +77,11 @@ struct dp_mon_tx_ppdu_info {
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id, struct sk_buff *skb,
+ struct sk_buff *skb,
struct napi_struct *napi);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
- int *budget, enum dp_monitor_mode monitor_mode,
- struct napi_struct *napi);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
@@ -96,11 +93,9 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
- int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id);
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
- struct napi_struct *napi, int *budget);
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index dad35bfd83f6..ff6a709b5042 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -2392,6 +2392,23 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
+ case RX_MSDU_START_PKT_TYPE_11BE:
+ rx_status->rate_idx = rate_mcs;
+
+ if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in EHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+
+ rx_status->encoding = RX_ENC_EHT;
+ rx_status->nss = nss;
+ rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ default:
+ break;
}
}
@@ -2486,7 +2503,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2497,6 +2514,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->encoding == RX_ENC_EHT) ? "eht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
@@ -2530,6 +2548,29 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
+static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *rx_desc,
+ struct sk_buff *msdu)
+{
+ struct ieee80211_hdr *hdr;
+ u8 decap_type;
+ u32 hdr_len;
+
+ decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+ if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
+ return true;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
+ return true;
+
+ ab->soc_stats.invalid_rbm++;
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct sk_buff_head *msdu_list,
@@ -2588,6 +2629,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
}
}
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
+ ret = -EINVAL;
+ goto free_out;
+ }
+
ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
@@ -2978,6 +3024,9 @@ mic_fail:
RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
skb_pull(msdu, hal_rx_desc_sz);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
+ return -EINVAL;
+
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
@@ -3720,6 +3769,9 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
}
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return -EINVAL;
+
ath12k_dp_rx_h_ppdu(ar, desc, status);
ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
@@ -3764,7 +3816,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
return drop;
}
-static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_rx_status *status)
{
struct ath12k_base *ab = ar->ab;
@@ -3782,6 +3834,9 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
+ return true;
+
ath12k_dp_rx_h_ppdu(ar, desc, status);
status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
@@ -3789,6 +3844,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
ath12k_dp_rx_h_undecap(ar, msdu, desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+ return false;
}
static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
@@ -3807,7 +3863,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
- ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
break;
}
fallthrough;
@@ -4032,7 +4088,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
hw_links[hw_link_id].pdev_idx);
ar = partner_ab->pdevs[pdev_id].ar;
- if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 1ce82088c954..88e42365a9d8 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -79,6 +79,9 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
case RX_MSDU_START_SGI_3_2_US:
ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
}
return ret;
@@ -135,9 +138,6 @@ u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
-
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index a8d341a6df01..ced232bf4aed 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
+#include "peer.h"
+#include "mac.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
@@ -117,7 +119,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
- tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
@@ -217,7 +219,7 @@ out:
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@@ -290,13 +292,27 @@ tcl_ring_sel:
msdu_ext_desc = true;
}
+ if (gsn_valid) {
+ /* Reset and Initialize meta_data_flags with Global Sequence
+ * Number (GSN) info.
+ */
+ ti.meta_data_flags =
+ u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+ HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+ }
+
ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
+
ti.vdev_id = arvif->vdev_id;
+ if (gsn_valid)
+ ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
@@ -368,6 +384,7 @@ map:
add_htt_metadata = true;
msdu_ext_desc = true;
ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
@@ -398,6 +415,7 @@ map:
if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"Failed to add HTT meta data, dropping packet\n");
+ kfree_skb(skb_ext_desc);
goto fail_unmap_dma;
}
}
@@ -558,13 +576,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
- case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
@@ -580,6 +598,124 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
}
}
+static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ struct rate_info txrate = {0};
+ u16 rate, ru_tones;
+ u8 rate_idx = 0;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+ sta = peer->sta;
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ txrate.nss = arsta->last_txrate.nss;
+ else
+ txrate.nss = arsta->peer_nss;
+ spin_unlock_bh(&ab->base_lock);
+
+ switch (ts->pkt_type) {
+ case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+ case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+ ts->pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0) {
+ ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+ return;
+ }
+
+ txrate.legacy = rate;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+ if (ts->mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ if (txrate.nss != 0)
+ txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+ txrate.flags = RATE_INFO_FLAGS_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+ if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+ if (ts->sgi)
+ txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+ if (ts->mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+ break;
+ case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+ if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+ return;
+ }
+
+ txrate.mcs = ts->mcs;
+ txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+ break;
+ default:
+ ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+ return;
+ }
+
+ txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ txrate.bw = RATE_INFO_BW_HE_RU;
+ ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+ txrate.he_ru_alloc =
+ ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+ txrate.bw = RATE_INFO_BW_EHT_RU;
+ txrate.eht_ru_alloc =
+ ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ arsta->txrate = txrate;
+ spin_unlock_bh(&ab->base_lock);
+}
+
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
@@ -658,6 +794,8 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
+ ath12k_dp_tx_update_txcompl(ar, ts);
+
ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
@@ -668,6 +806,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
+ u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@@ -682,10 +822,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
- if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
- ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
- else
- ts->rate_stats = 0;
+
+ ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+ if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+ ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+ ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+ ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+ ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+ ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+ ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+ }
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
@@ -814,7 +961,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
- *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
@@ -822,7 +969,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
- *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
@@ -971,7 +1118,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
- HTT_VER_REQ_INFO_MSG_ID);
+ HTT_OPTION_TAG);
+
+ cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+ HTT_OPTION_TAG) |
+ le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+ HTT_OPTION_LEN) |
+ le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+ HTT_OPTION_VALUE);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
@@ -1077,15 +1231,46 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
- HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
+ cmd->info0 |=
+ le32_encode_bits(tlv_filter->drop_threshold_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
+ cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
+
cmd->info1 = le32_encode_bits(rx_buf_size,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+ cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_mgmt_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_ctrl_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
+ cmd->info2 |=
+ le32_encode_bits(tlv_filter->enable_log_data_type,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
+
+ cmd->info3 =
+ le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
+ cmd->info3 |=
+ le32_encode_bits(tlv_filter->rx_tlv_offset,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
+
if (tlv_filter->offset_valid) {
cmd->rx_packet_offset =
le32_encode_bits(tlv_filter->rx_packet_offset,
@@ -1210,15 +1395,28 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
+ int ret, ring_id, i;
- ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
tlv_filter.offset_valid = false;
if (!reset) {
- tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
+
+ tlv_filter.drop_threshold_valid = true;
+ tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
+
+ tlv_filter.enable_log_mgmt_type = true;
+ tlv_filter.enable_log_ctrl_type = true;
+ tlv_filter.enable_log_data_type = true;
+
+ tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+ tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+ tlv_filter.enable_rx_tlv_offset = true;
+ tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
+
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@@ -1236,14 +1434,19 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
}
if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
- HAL_RXDMA_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ return ret;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 46dce23501f3..a5904097dc34 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -17,7 +17,7 @@ struct ath12k_dp_htt_wbm_tx_status {
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
- struct sk_buff *skb);
+ struct sk_buff *skb, bool gsn_valid, int mcbc_gsn);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 7b0403d245e5..3e8983b85de8 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2968,9 +2968,8 @@ struct hal_mon_buf_ring {
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
-#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
-#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
-#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
+#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0)
+#define HAL_MON_DEST_INFO0_END_REASON GENMASK(17, 16)
#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19)
#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index b08aa2e79f41..6bdcd0867d86 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@@ -22,9 +22,6 @@ struct hal_rx_wbm_rel_info {
#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
-#define HAL_RX_MAX_MCS 12
-#define HAL_RX_MAX_NSS 8
-
#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
le32_get_bits((__val), GENMASK(7, 0))
@@ -71,6 +68,8 @@ enum hal_rx_preamble {
HAL_RX_PREAMBLE_11N,
HAL_RX_PREAMBLE_11AC,
HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_11BA,
+ HAL_RX_PREAMBLE_11BE,
HAL_RX_PREAMBLE_MAX,
};
@@ -108,6 +107,9 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
+ HAL_RX_MON_STATUS_BUF_ADDR,
+ HAL_RX_MON_STATUS_MPDU_END,
+ HAL_RX_MON_STATUS_MSDU_END,
};
#define HAL_RX_MAX_MPDU 256
@@ -143,10 +145,43 @@ struct hal_rx_user_status {
u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
+ bool ampdu_present;
+ u16 ampdu_id;
};
#define HAL_MAX_UL_MU_USERS 37
+struct hal_rx_u_sig_info {
+ bool ul_dl;
+ u8 bw;
+ u8 ppdu_type_comp_mode;
+ u8 eht_sig_mcs;
+ u8 num_eht_sig_sym;
+ struct ieee80211_radiotap_eht_usig usig;
+};
+
+#define HAL_RX_MON_MAX_AGGR_SIZE 128
+
+struct hal_rx_tlv_aggr_info {
+ bool in_progress;
+ u16 cur_len;
+ u16 tlv_tag;
+ u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
+};
+
+struct hal_rx_radiotap_eht {
+ __le32 known;
+ __le32 data[9];
+};
+
+#define EHT_MAX_USER_INFO 4
+
+struct hal_rx_eht_info {
+ u8 num_user_info;
+ struct hal_rx_radiotap_eht eht;
+ u32 user_info[EHT_MAX_USER_INFO];
+};
+
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 last_ppdu_id;
@@ -227,10 +262,15 @@ struct hal_rx_mon_ppdu_info {
u8 addr4[ETH_ALEN];
struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
u8 userid;
- u16 ampdu_id[HAL_MAX_UL_MU_USERS];
bool first_msdu_in_mpdu;
bool is_ampdu;
u8 medium_prot_type;
+ bool ppdu_continuation;
+ bool eht_usig;
+ struct hal_rx_u_sig_info u_sig_info;
+ bool is_eht;
+ struct hal_rx_eht_info eht_info;
+ struct hal_rx_tlv_aggr_info tlv_aggr;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@@ -641,6 +681,395 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
+
+struct hal_phyrx_common_user_info {
+ __le32 rsvd[2];
+ __le32 info0;
+ __le32 rsvd1;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS GENMASK(10, 7)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED BIT(11)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD GENMASK(13, 12)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC GENMASK(17, 14)
+
+struct hal_eht_sig_ndp_cmn_eb {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD GENMASK(16, 13)
+
+struct hal_eht_sig_usig_overflow {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE BIT(15)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS GENMASK(19, 16)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED BIT(20)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING BIT(21)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC GENMASK(25, 22)
+
+struct hal_eht_sig_non_mu_mimo {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING BIT(15)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING GENMASK(22, 16)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC GENMASK(26, 23)
+
+struct hal_eht_sig_mu_mimo {
+ __le32 info0;
+} __packed;
+
+union hal_eht_sig_user_field {
+ struct hal_eht_sig_mu_mimo mu_mimo;
+ struct hal_eht_sig_non_mu_mimo n_mu_mimo;
+};
+
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD GENMASK(16, 13)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS GENMASK(19, 17)
+
+struct hal_eht_sig_non_ofdma_cmn_eb {
+ __le32 info0;
+ union hal_eht_sig_user_field user_field;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE GENMASK_ULL(3, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF GENMASK_ULL(5, 4)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM GENMASK_ULL(8, 6)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM BIT(9)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR GENMASK_ULL(11, 10)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY BIT(12)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD GENMASK_ULL(16, 13)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1 GENMASK_ULL(25, 17)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2 GENMASK_ULL(34, 26)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC GENMASK_ULL(30, 27)
+
+struct hal_eht_sig_ofdma_cmn_eb1 {
+ __le64 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1 GENMASK_ULL(8, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2 GENMASK_ULL(17, 9)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3 GENMASK_ULL(26, 18)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4 GENMASK_ULL(35, 27)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5 GENMASK_ULL(44, 36)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6 GENMASK_ULL(53, 45)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS GNEMASK_ULL(57, 54)
+
+struct hal_eht_sig_ofdma_cmn_eb2 {
+ __le64 info0;
+} __packed;
+
+struct hal_eht_sig_ofdma_cmn_eb {
+ struct hal_eht_sig_ofdma_cmn_eb1 eb1;
+ struct hal_eht_sig_ofdma_cmn_eb2 eb2;
+ union hal_eht_sig_user_field user_field;
+} __packed;
+
+enum hal_eht_bw {
+ HAL_EHT_BW_20,
+ HAL_EHT_BW_40,
+ HAL_EHT_BW_80,
+ HAL_EHT_BW_160,
+ HAL_EHT_BW_320_1,
+ HAL_EHT_BW_320_2,
+};
+
+#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION GENMASK(2, 0)
+#define HAL_RX_USIG_CMN_INFO0_BW GENMASK(5, 3)
+#define HAL_RX_USIG_CMN_INFO0_UL_DL BIT(6)
+#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR GENMASK(12, 7)
+#define HAL_RX_USIG_CMN_INFO0_TXOP GENMASK(19, 13)
+#define HAL_RX_USIG_CMN_INFO0_DISREGARD GENMASK(25, 20)
+#define HAL_RX_USIG_CMN_INFO0_VALIDATE BIT(26)
+
+struct hal_mon_usig_cmn {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
+#define HAL_RX_USIG_TB_INFO0_VALIDATE BIT(2)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1 GENMASK(6, 3)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2 GENMASK(10, 7)
+#define HAL_RX_USIG_TB_INFO0_DISREGARD_1 GENMASK(15, 11)
+#define HAL_RX_USIG_TB_INFO0_CRC GENMASK(19, 16)
+#define HAL_RX_USIG_TB_INFO0_TAIL GENMASK(25, 20)
+#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS BIT(31)
+
+struct hal_mon_usig_tb {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_1 BIT(2)
+#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO GENMASK(7, 3)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_2 BIT(8)
+#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS GENMASK(10, 9)
+#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM GENMASK(15, 11)
+#define HAL_RX_USIG_MU_INFO0_CRC GENMASK(20, 16)
+#define HAL_RX_USIG_MU_INFO0_TAIL GENMASK(26, 21)
+#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS BIT(31)
+
+struct hal_mon_usig_mu {
+ __le32 info0;
+} __packed;
+
+union hal_mon_usig_non_cmn {
+ struct hal_mon_usig_tb tb;
+ struct hal_mon_usig_mu mu;
+};
+
+struct hal_mon_usig_hdr {
+ struct hal_mon_usig_cmn cmn;
+ union hal_mon_usig_non_cmn non_cmn;
+} __packed;
+
+#define HAL_RX_USR_INFO0_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_USR_INFO0_USR_RSSI GENMASK(23, 16)
+#define HAL_RX_USR_INFO0_PKT_TYPE GENMASK(27, 24)
+#define HAL_RX_USR_INFO0_STBC BIT(28)
+#define HAL_RX_USR_INFO0_RECEPTION_TYPE GENMASK(31, 29)
+
+#define HAL_RX_USR_INFO1_MCS GENMASK(3, 0)
+#define HAL_RX_USR_INFO1_SGI GENMASK(5, 4)
+#define HAL_RX_USR_INFO1_HE_RANGING_NDP BIT(6)
+#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP GENMASK(15, 8)
+#define HAL_RX_USR_INFO1_RX_BW GENMASK(18, 16)
+#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX GENMASK(31, 24)
+
+#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN BIT(0)
+#define HAL_RX_USR_INFO2_NSS GENMASK(10, 8)
+#define HAL_RX_USR_INFO2_STREAM_OFFSET GENMASK(13, 11)
+#define HAL_RX_USR_INFO2_STA_DCM BIT(14)
+#define HAL_RX_USR_INFO2_LDPC BIT(15)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_0 GENMASK(19, 16)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_1 GENMASK(23, 20)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_2 GENMASK(27, 24)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_3 GENMASK(31, 28)
+
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_0 GENMASK(5, 0)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_1 GENMASK(13, 8)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_2 GENMASK(21, 16)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_3 GENMASK(29, 24)
+
+struct hal_receive_user_info {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 user_fd_rssi_seg0;
+ __le32 user_fd_rssi_seg1;
+ __le32 user_fd_rssi_seg2;
+ __le32 user_fd_rssi_seg3;
+} __packed;
+
+enum hal_mon_reception_type {
+ HAL_RECEPTION_TYPE_SU,
+ HAL_RECEPTION_TYPE_DL_MU_MIMO,
+ HAL_RECEPTION_TYPE_DL_MU_OFMA,
+ HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ HAL_RECEPTION_TYPE_UL_MU_MIMO,
+ HAL_RECEPTION_TYPE_UL_MU_OFDMA,
+ HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+/* Different allowed RU in 11BE */
+#define HAL_EHT_RU_26 0ULL
+#define HAL_EHT_RU_52 1ULL
+#define HAL_EHT_RU_78 2ULL
+#define HAL_EHT_RU_106 3ULL
+#define HAL_EHT_RU_132 4ULL
+#define HAL_EHT_RU_242 5ULL
+#define HAL_EHT_RU_484 6ULL
+#define HAL_EHT_RU_726 7ULL
+#define HAL_EHT_RU_996 8ULL
+#define HAL_EHT_RU_996x2 9ULL
+#define HAL_EHT_RU_996x3 10ULL
+#define HAL_EHT_RU_996x4 11ULL
+#define HAL_EHT_RU_NONE 15ULL
+#define HAL_EHT_RU_INVALID 31ULL
+/* MRUs spanning above 80Mhz
+ * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved)
+ */
+#define HAL_EHT_RU_996_484 18ULL
+#define HAL_EHT_RU_996x2_484 28ULL
+#define HAL_EHT_RU_996x3_484 40ULL
+#define HAL_EHT_RU_996_484_242 23ULL
+
+#define NUM_RU_BITS_PER80 16
+#define NUM_RU_BITS_PER20 4
+
+/* Different per_80Mhz band in 320Mhz bandwidth */
+#define HAL_80_0 0
+#define HAL_80_1 1
+#define HAL_80_2 2
+#define HAL_80_3 3
+
+#define HAL_RU_80MHZ(num_band) ((num_band) * NUM_RU_BITS_PER80)
+#define HAL_RU_20MHZ(idx_per_80) ((idx_per_80) * NUM_RU_BITS_PER20)
+
+#define HAL_RU_SHIFT(num_band, idx_per_80) \
+ (HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80))
+
+#define HAL_RU(ru, num_band, idx_per_80) \
+ ((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80))
+
+/* MRU-996+484 */
+#define HAL_EHT_RU_996_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_2 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1))
+#define HAL_EHT_RU_996_484_3 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_4 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_5 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_6 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996_484_7 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x2+484 */
+#define HAL_EHT_RU_996x2_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_2 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_3 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_4 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1))
+#define HAL_EHT_RU_996x2_484_5 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_6 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_7 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_8 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_9 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_10 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x2_484_11 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x3+484 */
+#define HAL_EHT_RU_996x3_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_2 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_3 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_4 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_5 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_6 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x3_484_7 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
+ HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
+ HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \
+ (HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz))
+
+#define RU_INVALID 0
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+#define RU_2X996 74
+#define RU_3X996 111
+#define RU_4X996 148
+#define RU_52_26 (RU_52 + RU_26)
+#define RU_106_26 (RU_106 + RU_26)
+#define RU_484_242 (RU_484 + RU_242)
+#define RU_996_484 (RU_996 + RU_484)
+#define RU_996_484_242 (RU_996 + RU_484_242)
+#define RU_2X996_484 (RU_2X996 + RU_484)
+#define RU_3X996_484 (RU_3X996 + RU_484)
+
+enum ath12k_eht_ru_size {
+ ATH12K_EHT_RU_26,
+ ATH12K_EHT_RU_52,
+ ATH12K_EHT_RU_106,
+ ATH12K_EHT_RU_242,
+ ATH12K_EHT_RU_484,
+ ATH12K_EHT_RU_996,
+ ATH12K_EHT_RU_996x2,
+ ATH12K_EHT_RU_996x4,
+ ATH12K_EHT_RU_52_26,
+ ATH12K_EHT_RU_106_26,
+ ATH12K_EHT_RU_484_242,
+ ATH12K_EHT_RU_996_484,
+ ATH12K_EHT_RU_996_484_242,
+ ATH12K_EHT_RU_996x2_484,
+ ATH12K_EHT_RU_996x3,
+ ATH12K_EHT_RU_996x3_484,
+
+ /* Keep last */
+ ATH12K_EHT_RU_INVALID,
+};
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID
+
static inline
enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
{
@@ -662,6 +1091,9 @@ enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
case RU_996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
+ case RU_2X996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
case RU_26:
fallthrough;
default:
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 3cf5973771d7..eb065a79f6c6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
+ * All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -63,7 +64,12 @@ struct hal_tx_status {
u8 try_cnt;
u8 tid;
u16 peer_id;
- u32 rate_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum ath12k_supported_bw bw;
+ u8 mcs;
+ u16 tones;
+ u8 ofdma;
};
#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index b7b583fadb5a..a106ebed7870 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -543,7 +543,11 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_TX_RING_MASK_3,
},
.rx_mon_dest = {
- 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ ATH12K_RX_MON_RING_MASK_1,
+ ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -1035,7 +1039,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
- .rxdma1_enable = false,
+ .rxdma1_enable = true,
.num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 2d062b5904a8..dfa05f0ee6c9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -15,10 +15,12 @@
#include "hw.h"
#include "dp_tx.h"
#include "dp_rx.h"
+#include "testmode.h"
#include "peer.h"
#include "debugfs.h"
#include "hif.h"
#include "wow.h"
+#include "debugfs_sta.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -337,6 +339,82 @@ static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
return "<unknown>";
}
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones)
+{
+ switch (tones) {
+ case 26:
+ return RU_26;
+ case 52:
+ return RU_52;
+ case 106:
+ return RU_106;
+ case 242:
+ return RU_242;
+ case 484:
+ return RU_484;
+ case 996:
+ return RU_996;
+ case (996 * 2):
+ return RU_2X996;
+ default:
+ return RU_26;
+ }
+}
+
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
+{
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ case RX_MSDU_START_SGI_1_6_US:
+ return NL80211_RATE_INFO_EHT_GI_1_6;
+ case RX_MSDU_START_SGI_3_2_US:
+ return NL80211_RATE_INFO_EHT_GI_3_2;
+ default:
+ return NL80211_RATE_INFO_EHT_GI_0_8;
+ }
+}
+
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
+{
+ switch (ru_tones) {
+ case 26:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ case 52:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ case (52 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ case 106:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ case (106 + 26):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ case 242:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ case 484:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ case (484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ case 996:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ case (996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ case (996 + 484 + 242):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ case (2 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ case (2 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ case (3 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ case (3 * 996 + 484):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ case (4 * 996):
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ default:
+ return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ }
+}
+
enum rate_info_bw
ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
{
@@ -502,7 +580,20 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id,
return 0;
}
-static struct ieee80211_bss_conf *
+static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *tx_ahvif;
+
+ if (arvif->ahvif->vif->mbssid_tx_vif) {
+ tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif);
+ if (tx_ahvif)
+ return &tx_ahvif->deflink;
+ }
+
+ return NULL;
+}
+
+struct ieee80211_bss_conf *
ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
{
struct ieee80211_vif *vif = arvif->ahvif->vif;
@@ -675,7 +766,10 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM)
+ pdev = &ab->pdevs[i];
+ else
+ pdev = rcu_dereference(ab->pdevs_active[i]);
if (pdev && pdev->pdev_id == pdev_id)
return (pdev->ar ? pdev->ar : NULL);
@@ -725,9 +819,9 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
}
-static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- u8 link_id)
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 link_id)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
@@ -1549,30 +1643,18 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu
}
}
-static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif,
+ struct ath12k_link_vif *tx_arvif,
+ u8 bssid_index)
{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_bss_conf *bss_conf;
struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
struct ieee80211_ema_beacons *beacons;
- struct ath12k_link_vif *tx_arvif;
bool nontx_profile_found = false;
- struct ath12k_vif *tx_ahvif;
int ret = 0;
u8 i;
- bss_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!bss_conf) {
- ath12k_warn(arvif->ar->ab,
- "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n",
- ahvif->vif->addr, arvif->link_id);
- return -ENOLINK;
- }
-
- tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
- tx_arvif = &tx_ahvif->deflink;
beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
- tx_ahvif->vif,
+ tx_arvif->ahvif->vif,
tx_arvif->link_id);
if (!beacons || !beacons->cnt) {
ath12k_warn(arvif->ar->ab,
@@ -1586,13 +1668,12 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_profile_found)
ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
- bss_conf->bssid_index,
+ bssid_index,
&nontx_profile_found);
ema_args.bcn_cnt = beacons->cnt;
ema_args.bcn_index = i;
- ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
- &beacons->bcn[i].offs,
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif, &beacons->bcn[i].offs,
beacons->bcn[i].skb, &ema_args);
if (ret) {
ath12k_warn(tx_arvif->ar->ab,
@@ -1605,7 +1686,7 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
if (tx_arvif != arvif && !nontx_profile_found)
ath12k_warn(arvif->ar->ab,
"nontransmitted bssid index %u not found in beacon template\n",
- bss_conf->bssid_index);
+ bssid_index);
ieee80211_beacon_free_ema_list(beacons);
return ret;
@@ -1616,11 +1697,10 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ieee80211_bss_conf *link_conf;
- struct ath12k_link_vif *tx_arvif = arvif;
+ struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
struct ieee80211_mutable_offsets offs = {};
- struct ath12k_vif *tx_ahvif = ahvif;
bool nontx_profile_found = false;
struct sk_buff *bcn;
int ret;
@@ -1635,17 +1715,20 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
return -ENOLINK;
}
- if (vif->mbssid_tx_vif) {
- tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
if (tx_arvif != arvif && arvif->is_up)
return 0;
if (link_conf->ema_ap)
- return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif,
+ link_conf->bssid_index);
+ } else {
+ tx_arvif = arvif;
}
- bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_arvif->ahvif->vif,
&offs, tx_arvif->link_id);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
@@ -1686,7 +1769,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
+ ret = ath12k_wmi_bcn_tmpl(arvif, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1702,6 +1785,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
{
struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_link_vif *tx_arvif;
struct ath12k *ar = arvif->ar;
int ret;
@@ -1732,11 +1816,9 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- if (ahvif->vif->mbssid_tx_vif) {
- struct ath12k_vif *tx_ahvif =
- ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
- struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = info->bssid_index;
params.nontx_profile_cnt = 1 << info->bssid_indicator;
@@ -3116,6 +3198,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_smps(arsta, arg);
ath12k_peer_assoc_h_mlo(arsta, arg);
+ arsta->peer_nss = arg->peer_nss;
/* TODO: amsdu_disable req? */
}
@@ -3138,6 +3221,37 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arv
ath12k_smps_map[smps]);
}
+static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
+ struct ieee80211_link_sta *link_sta)
+{
+ u32 bw;
+
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_320:
+ bw = WMI_PEER_CHWIDTH_320MHZ;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid bandwidth %d for link station %pM\n",
+ link_sta->bandwidth, link_sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ return bw;
+}
+
static void ath12k_bss_assoc(struct ath12k *ar,
struct ath12k_link_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
@@ -3358,12 +3472,178 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
}
+static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
+ struct ath12k_link_vif *arvif, int link_id)
+{
+ struct ath12k_hw *ah = ahvif->ah;
+ u8 _link_id;
+ int i;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ if (WARN_ON(!arvif))
+ return;
+
+ if (WARN_ON(link_id >= ATH12K_NUM_MAX_LINKS))
+ return;
+
+ if (link_id < 0)
+ _link_id = 0;
+ else
+ _link_id = link_id;
+
+ arvif->ahvif = ahvif;
+ arvif->link_id = _link_id;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Handle MLO related assignments */
+ if (link_id >= 0) {
+ rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
+ ahvif->links_map |= BIT(_link_id);
+ }
+
+ ath12k_generic_dbg(ATH12K_DBG_MAC,
+ "mac init link arvif (link_id %d%s) for vif %pM. links_map 0x%x",
+ _link_id, (link_id < 0) ? " deflink" : "", ahvif->vif->addr,
+ ahvif->links_map);
+}
+
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+ struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
+ arvif->vdev_id, arvif->link_id);
+
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
+ arvif->vdev_id, arvif->link_id, ret);
+ }
+ ath12k_mac_vdev_delete(ar, arvif);
+}
+
+static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
+ struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ struct ath12k_link_vif *arvif;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+ if (arvif)
+ return arvif;
+
+ if (!vif->valid_links) {
+ /* Use deflink for Non-ML VIFs and mark the link id as 0
+ */
+ link_id = 0;
+ arvif = &ahvif->deflink;
+ } else {
+ /* If this is the first link arvif being created for an ML VIF
+ * use the preallocated deflink memory except for scan arvifs
+ */
+ if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
+ arvif = &ahvif->deflink;
+ } else {
+ arvif = (struct ath12k_link_vif *)
+ kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
+ if (!arvif)
+ return NULL;
+ }
+ }
+
+ ath12k_mac_init_arvif(ahvif, arvif, link_id);
+
+ return arvif;
+}
+
+static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
+{
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ath12k_hw *ah = ahvif->ah;
+
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
+ rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
+ synchronize_rcu();
+ ahvif->links_map &= ~BIT(arvif->link_id);
+
+ if (arvif != &ahvif->deflink)
+ kfree(arvif);
+ else
+ memset(arvif, 0, sizeof(*arvif));
+}
+
static int
ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS])
{
+ struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+ unsigned long to_remove = old_links & ~new_links;
+ unsigned long to_add = ~old_links & new_links;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_link_vif *arvif;
+ u8 link_id;
+
+ lockdep_assert_wiphy(hw->wiphy);
+
+ ath12k_generic_dbg(ATH12K_DBG_MAC,
+ "mac vif link changed for MLD %pM old_links 0x%x new_links 0x%x\n",
+ vif->addr, old_links, new_links);
+
+ for_each_set_bit(link_id, &to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ /* mac80211 wants to add link but driver already has the
+ * link. This should not happen ideally.
+ */
+ if (WARN_ON(arvif))
+ return -EINVAL;
+
+ arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+ }
+
+ for_each_set_bit(link_id, &to_remove, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (WARN_ON(!arvif))
+ return -EINVAL;
+
+ if (!arvif->is_created)
+ continue;
+
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ ath12k_mac_remove_link_interface(hw, arvif);
+ ath12k_mac_unassign_link_vif(arvif);
+ }
+
return 0;
}
@@ -3862,109 +4142,6 @@ static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw,
ath12k_mac_bss_info_changed(ar, arvif, info, changed);
}
-static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
- struct ieee80211_vif *vif,
- u8 link_id)
-{
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif;
- int i;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
- if (arvif)
- return arvif;
-
- if (!vif->valid_links) {
- /* Use deflink for Non-ML VIFs and mark the link id as 0
- */
- link_id = 0;
- arvif = &ahvif->deflink;
- } else {
- /* If this is the first link arvif being created for an ML VIF
- * use the preallocated deflink memory except for scan arvifs
- */
- if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) {
- arvif = &ahvif->deflink;
- } else {
- arvif = (struct ath12k_link_vif *)
- kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
- if (!arvif)
- return NULL;
- }
- }
-
- arvif->ahvif = ahvif;
- arvif->link_id = link_id;
- ahvif->links_map |= BIT(link_id);
-
- INIT_LIST_HEAD(&arvif->list);
- INIT_DELAYED_WORK(&arvif->connection_loss_work,
- ath12k_mac_vif_sta_connection_loss_work);
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
-
- /* Allocate Default Queue now and reassign during actual vdev create */
- vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
- for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
- vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
-
- vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
- rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
- ahvif->links_map |= BIT(link_id);
- synchronize_rcu();
- return arvif;
-}
-
-static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
-{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ath12k_hw *ah = ahvif->ah;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
- synchronize_rcu();
- ahvif->links_map &= ~BIT(arvif->link_id);
-
- if (arvif != &ahvif->deflink)
- kfree(arvif);
- else
- memset(arvif, 0, sizeof(*arvif));
-}
-
-static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
- struct ath12k_link_vif *arvif)
-{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ath12k_hw *ah = hw->priv;
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- cancel_delayed_work_sync(&arvif->connection_loss_work);
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
- arvif->vdev_id, arvif->link_id);
-
- if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
- if (ret)
- ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
- arvif->vdev_id, arvif->link_id, ret);
- }
- ath12k_mac_vdev_delete(ar, arvif);
-}
-
static struct ath12k*
ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4534,9 +4711,6 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
struct ath12k_link_sta *arsta,
struct ieee80211_key_conf *key)
{
- struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ieee80211_bss_conf *link_conf;
struct ieee80211_sta *sta = NULL;
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
@@ -4553,19 +4727,10 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- link_conf = ath12k_mac_get_link_bss_conf(arvif);
- if (!link_conf) {
- ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return -ENOLINK;
- }
-
if (sta)
peer_addr = arsta->addr;
- else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
- peer_addr = link_conf->bssid;
else
- peer_addr = link_conf->addr;
+ peer_addr = arvif->bssid;
key->hw_key_idx = key->keyidx;
@@ -4909,6 +5074,11 @@ static int ath12k_mac_station_assoc(struct ath12k *ar,
return -EINVAL;
}
+ spin_lock_bh(&ar->data_lock);
+ arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
+ arsta->bw_prev = link_sta->bandwidth;
+ spin_unlock_bh(&ar->data_lock);
+
if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
band);
@@ -5403,6 +5573,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
}
}
+ ewma_avg_rssi_init(&arsta->avg_rssi);
return 0;
free_peer:
@@ -5415,37 +5586,6 @@ exit:
return ret;
}
-static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
- struct ieee80211_sta *sta)
-{
- u32 bw = WMI_PEER_CHWIDTH_20MHZ;
-
- switch (sta->deflink.bandwidth) {
- case IEEE80211_STA_RX_BW_20:
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- case IEEE80211_STA_RX_BW_40:
- bw = WMI_PEER_CHWIDTH_40MHZ;
- break;
- case IEEE80211_STA_RX_BW_80:
- bw = WMI_PEER_CHWIDTH_80MHZ;
- break;
- case IEEE80211_STA_RX_BW_160:
- bw = WMI_PEER_CHWIDTH_160MHZ;
- break;
- case IEEE80211_STA_RX_BW_320:
- bw = WMI_PEER_CHWIDTH_320MHZ;
- break;
- default:
- ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
- sta->deflink.bandwidth, sta->addr);
- bw = WMI_PEER_CHWIDTH_20MHZ;
- break;
- }
-
- return bw;
-}
-
static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
struct ath12k_sta *ahsta,
struct ath12k_link_sta *arsta,
@@ -5529,7 +5669,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
- struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
struct ath12k *ar = arvif->ar;
int ret = 0;
@@ -5572,13 +5711,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
arsta->addr);
- spin_lock_bh(&ar->data_lock);
-
- arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = sta->deflink.bandwidth;
-
- spin_unlock_bh(&ar->data_lock);
-
/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
* authorized
*/
@@ -5846,7 +5978,7 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
spin_lock_bh(&ar->data_lock);
if (changed & IEEE80211_RC_BW_CHANGED) {
- bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
arsta->bw_prev = arsta->bw;
arsta->bw = bw;
}
@@ -6674,7 +6806,8 @@ static void ath12k_mac_copy_eht_cap(struct ath12k *ar,
memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap));
- if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)))
+ if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)) ||
+ ath12k_acpi_get_disable_11be(ar->ab))
return;
eht_cap->has_eht = true;
@@ -7071,6 +7204,22 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
}
/* Note: called under rcu_read_lock() */
+static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+ u8 link_id, struct sk_buff *skb,
+ u32 info_flags)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bss_conf *bss_conf;
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return;
+
+ bss_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (bss_conf)
+ ether_addr_copy(hdr->addr2, bss_conf->addr);
+}
+
+/* Note: called under rcu_read_lock() */
static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
u8 link, struct sk_buff *skb, u32 info_flags)
{
@@ -7181,9 +7330,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_sta *sta = control->sta;
+ struct ath12k_link_vif *tmp_arvif;
u32 info_flags = info->flags;
- struct ath12k *ar;
+ struct sk_buff *msdu_copied;
+ struct ath12k *ar, *tmp_ar;
+ struct ath12k_peer *peer;
+ unsigned long links_map;
+ bool is_mcast = false;
+ struct ethhdr *eth;
bool is_prb_rsp;
+ u16 mcbc_gsn;
u8 link_id;
int ret;
@@ -7220,6 +7376,9 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ eth = (struct ethhdr *)skb->data;
+ is_mcast = is_multicast_ether_addr(eth->h_dest);
+
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
@@ -7231,14 +7390,99 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ is_mcast = is_multicast_ether_addr(hdr->addr1);
+
/* This is case only for P2P_GO */
if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
- ret = ath12k_dp_tx(ar, arvif, skb);
- if (ret) {
- ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
- ieee80211_free_txskb(hw, skb);
+ if (!vif->valid_links || !is_mcast ||
+ test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
+ ret = ath12k_dp_tx(ar, arvif, skb, false, 0);
+ if (unlikely(ret)) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->ah->hw, skb);
+ return;
+ }
+ } else {
+ mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff;
+
+ links_map = ahvif->links_map;
+ for_each_set_bit(link_id, &links_map,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ tmp_arvif = rcu_dereference(ahvif->link[link_id]);
+ if (!tmp_arvif || !tmp_arvif->is_up)
+ continue;
+
+ tmp_ar = tmp_arvif->ar;
+ msdu_copied = skb_copy(skb, GFP_ATOMIC);
+ if (!msdu_copied) {
+ ath12k_err(ar->ab,
+ "skb copy failure link_id 0x%X vdevid 0x%X\n",
+ link_id, tmp_arvif->vdev_id);
+ continue;
+ }
+
+ ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
+ msdu_copied,
+ info_flags);
+
+ skb_cb = ATH12K_SKB_CB(msdu_copied);
+ info = IEEE80211_SKB_CB(msdu_copied);
+ skb_cb->link_id = link_id;
+
+ /* For open mode, skip peer find logic */
+ if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE))
+ goto skip_peer_find;
+
+ spin_lock_bh(&tmp_ar->ab->base_lock);
+ peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid);
+ if (!peer) {
+ spin_unlock_bh(&tmp_ar->ab->base_lock);
+ ath12k_warn(tmp_ar->ab,
+ "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n",
+ tmp_arvif->vdev_id, tmp_arvif->bssid,
+ ahvif->links_map);
+ dev_kfree_skb_any(msdu_copied);
+ continue;
+ }
+
+ key = peer->keys[peer->mcast_keyidx];
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+ info->control.hw_key = key;
+
+ hdr = (struct ieee80211_hdr *)msdu_copied->data;
+ if (!ieee80211_has_protected(hdr->frame_control))
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+ spin_unlock_bh(&tmp_ar->ab->base_lock);
+
+skip_peer_find:
+ ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
+ msdu_copied, true, mcbc_gsn);
+ if (unlikely(ret)) {
+ if (ret == -ENOMEM) {
+ /* Drops are expected during heavy multicast
+ * frame flood. Print with debug log
+ * level to avoid lot of console prints
+ */
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "failed to transmit frame %d\n",
+ ret);
+ } else {
+ ath12k_warn(ar->ab,
+ "failed to transmit frame %d\n",
+ ret);
+ }
+
+ dev_kfree_skb_any(msdu_copied);
+ }
+ }
+ ieee80211_free_txskb(ar->ah->hw, skb);
}
}
@@ -7255,8 +7499,40 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -EOPNOTSUPP;
- /* TODO: Need to support new monitor mode */
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ struct ath12k_base *ab = ar->ab;
+ u32 ring_id, i;
+ int ret = 0;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (!ab->hw_params->rxdma1_enable)
+ return ret;
+
+ if (enable) {
+ tlv_filter = ath12k_mac_mon_status_filter_default;
+
+ if (ath12k_debugfs_rx_filter(ar))
+ tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
+ } else {
+ tlv_filter.rxmon_disable = true;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_DST,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n",
+ ret);
+ }
+ }
+
+ return ret;
}
static int ath12k_mac_start(struct ath12k *ar)
@@ -7363,9 +7639,14 @@ err:
static void ath12k_drain_tx(struct ath12k_hw *ah)
{
- struct ath12k *ar;
+ struct ath12k *ar = ah->radio;
int i;
+ if (ath12k_ftm_mode) {
+ ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n");
+ return;
+ }
+
lockdep_assert_wiphy(ah->hw->wiphy);
for_each_ar(ah, ar, i)
@@ -7394,6 +7675,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
case ATH12K_HW_STATE_RESTARTED:
case ATH12K_HW_STATE_WEDGED:
case ATH12K_HW_STATE_ON:
+ case ATH12K_HW_STATE_TM:
ah->state = ATH12K_HW_STATE_OFF;
WARN_ON(1);
@@ -7561,14 +7843,9 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
u32 *flags, u32 *tx_vdev_id)
{
struct ath12k_vif *ahvif = arvif->ahvif;
- struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
struct ieee80211_bss_conf *link_conf;
struct ath12k *ar = arvif->ar;
struct ath12k_link_vif *tx_arvif;
- struct ath12k_vif *tx_ahvif;
-
- if (!tx_vif)
- return 0;
link_conf = ath12k_mac_get_link_bss_conf(arvif);
if (!link_conf) {
@@ -7577,11 +7854,13 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif,
return -ENOLINK;
}
- tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
- tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif)
+ return 0;
if (link_conf->nontransmitted) {
- if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ if (ath12k_ar_to_hw(ar)->wiphy !=
+ ath12k_ar_to_hw(tx_arvif->ar)->wiphy)
return -EINVAL;
*flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
@@ -8066,6 +8345,7 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id];
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_bss_conf *link_conf;
int ret;
@@ -8084,7 +8364,13 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif
}
if (cache->bss_conf_changed) {
- ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+ link_conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!link_conf) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+ ath12k_mac_bss_info_changed(ar, arvif, link_conf,
cache->bss_conf_changed);
}
@@ -8207,19 +8493,8 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
ahvif->ah = ah;
ahvif->vif = vif;
arvif = &ahvif->deflink;
- arvif->ahvif = ahvif;
- INIT_LIST_HEAD(&arvif->list);
- INIT_DELAYED_WORK(&arvif->connection_loss_work,
- ath12k_mac_vif_sta_connection_loss_work);
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
+ ath12k_mac_init_arvif(ahvif, arvif, -1);
/* Allocate Default Queue now and reassign during actual vdev create */
vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
@@ -8381,29 +8656,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_configure_filter(struct ath12k *ar,
- unsigned int total_flags)
-{
- bool reset_flag;
- int ret;
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
- ar->filter_flags = total_flags;
-
- /* For monitor mode */
- reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
- ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (ret)
- ath12k_warn(ar->ab,
- "fail to set monitor filter: %d\n", ret);
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
- "total_flags:0x%x, reset_flag:%d\n",
- total_flags, reset_flag);
-}
-
static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -8417,7 +8669,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ar = ath12k_ah_to_ar(ah, 0);
*total_flags &= SUPPORTED_FILTERS;
- ath12k_mac_configure_filter(ar, *total_flags);
+ ar->filter_flags = *total_flags;
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
@@ -8677,6 +8929,9 @@ ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
if (arvif == arvif_p)
continue;
+ if (!arvif_p->is_created)
+ continue;
+
link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
ahvif->vif->link_conf[arvif_p->link_id]);
@@ -8982,9 +9237,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
int n_vifs)
{
struct ath12k_wmi_vdev_up_params params = {};
+ struct ath12k_link_vif *arvif, *tx_arvif;
struct ieee80211_bss_conf *link_conf;
struct ath12k_base *ab = ar->ab;
- struct ath12k_link_vif *arvif;
struct ieee80211_vif *vif;
struct ath12k_vif *ahvif;
u8 link_id;
@@ -9052,11 +9307,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
params.vdev_id = arvif->vdev_id;
params.aid = ahvif->aid;
params.bssid = arvif->bssid;
- if (vif->mbssid_tx_vif) {
- struct ath12k_vif *tx_ahvif =
- ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
- struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
+ tx_arvif = ath12k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
params.tx_bssid = tx_arvif->bssid;
params.nontx_profile_idx = link_conf->bssid_index;
params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
@@ -9322,9 +9575,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
-
- ath12k_mac_remove_link_interface(hw, arvif);
- ath12k_mac_unassign_link_vif(arvif);
}
static int
@@ -10017,6 +10267,40 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ unsigned long time_left;
+ int ret;
+
+ guard(mutex)(&ah->hw_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON)
+ return -ENETDOWN;
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
+ if (!time_left)
+ ath12k_warn(ab, "time out while waiting for get fw stats\n");
+
+ return ret;
+}
+
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -10024,10 +10308,19 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
{
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
struct ath12k_link_sta *arsta;
+ struct ath12k *ar;
+ s8 signal;
+ bool db2dbm;
lockdep_assert_wiphy(hw->wiphy);
arsta = &ahsta->deflink;
+ ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id);
+ if (!ar)
+ return;
+
+ db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -10035,25 +10328,43 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->tx_duration = arsta->tx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
- sinfo->txrate.he_gi = arsta->txrate.he_gi;
- sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
- sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+ sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
/* TODO: Use real NF instead of default one. */
- sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ signal = arsta->rssi_comb;
+
+ if (!signal &&
+ ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
+ signal = arsta->rssi_beacon;
+
+ if (signal) {
+ sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
@@ -10299,6 +10610,10 @@ static const struct ieee80211_ops ath12k_ops = {
.resume = ath12k_wow_op_resume,
.set_wakeup = ath12k_wow_op_set_wakeup,
#endif
+ CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
+#ifdef CONFIG_ATH12K_DEBUGFS
+ .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -10950,6 +11265,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+ ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX);
}
hw->queues = ATH12K_HW_MAX_QUEUES;
@@ -11033,6 +11350,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_debugfs_register(ar);
}
+ init_completion(&ar->fw_stats_complete);
+
return 0;
err_unregister_hw:
@@ -11133,6 +11452,9 @@ static int __ath12k_mac_mlo_setup(struct ath12k *ar)
}
}
+ if (num_link == 0)
+ return 0;
+
mlo.group_id = cpu_to_le32(ag->id);
mlo.partner_link_id = partner_link_id;
mlo.num_partner_links = num_link;
@@ -11162,10 +11484,16 @@ static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
int ret;
+ u8 num_link;
if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
return 0;
+ num_link = ath12k_get_num_partner_link(ar);
+
+ if (num_link == 0)
+ return 0;
+
ret = ath12k_wmi_mlo_teardown(ar);
if (ret) {
ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3594729b6397..ae35b73312bf 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -108,5 +108,11 @@ int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data);
-
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones);
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi);
+struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif);
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 link_id);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 06cff3849ab8..b474696ac6d8 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -483,8 +483,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
ath12k_pci_ext_grp_disable(irq_grp);
- napi_synchronize(&irq_grp->napi);
- napi_disable(&irq_grp->napi);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
}
}
@@ -646,7 +649,7 @@ static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return 0;
- return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath12k_pci_config_irq(struct ath12k_base *ab)
@@ -1114,7 +1117,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
- napi_enable(&irq_grp->napi);
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+
ath12k_pci_ext_grp_enable(irq_grp);
}
@@ -1561,6 +1568,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab_pci->ab = ab;
ab_pci->pdev = pdev;
ab->hif.ops = &ath12k_pci_hif_ops;
+ ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
@@ -1689,6 +1697,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath12k_pci_free_irq(ab);
err_ce_free:
@@ -1734,9 +1744,9 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
- ath12k_fw_unmap(ab);
qmi_fail:
+ ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
ath12k_pci_free_irq(ab);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 5c3563383fab..348dbc81bad8 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -2056,8 +2056,7 @@ static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
- ab->single_chip_mlo_supp = false;
-
+ ag->mlo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
@@ -2265,10 +2264,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
- if (resp.single_chip_mlo_support_valid &&
- resp.single_chip_mlo_support)
- ab->single_chip_mlo_supp = true;
-
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2277,10 +2272,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
+ "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
resp.num_phy_valid, resp.num_phy,
- resp.board_id_valid, resp.board_id,
- resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
+ resp.board_id_valid, resp.board_id);
return;
@@ -2740,6 +2734,15 @@ int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
if (r)
ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
+ r = ath12k_acpi_start(ab);
+ if (r)
+ /* ACPI is optional so continue in case of an error */
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r);
+
+ r = ath12k_acpi_check_bdf_variant_name(ab);
+ if (r)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n");
+
out:
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 29c7ec3260da..75f80df2aa0c 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_REG_H
@@ -13,6 +13,9 @@
struct ath12k_base;
struct ath12k;
+#define ATH12K_2GHZ_MAX_FREQUENCY 2495
+#define ATH12K_5GHZ_MAX_FREQUENCY 5920
+
/* DFS regdomains supported by Firmware */
enum ath12k_dfs_region {
ATH12K_DFS_REG_UNSET,
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 10366bbe9999..6c600473b402 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -637,6 +637,8 @@ enum rx_msdu_start_pkt_type {
RX_MSDU_START_PKT_TYPE_11N,
RX_MSDU_START_PKT_TYPE_11AC,
RX_MSDU_START_PKT_TYPE_11AX,
+ RX_MSDU_START_PKT_TYPE_11BA,
+ RX_MSDU_START_PKT_TYPE_11BE,
};
enum rx_msdu_start_sgi {
@@ -1539,12 +1541,4 @@ struct hal_rx_desc {
#define MAX_MU_GROUP_SHOW 16
#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
-#define HAL_RX_RU_ALLOC_TYPE_MAX 6
-#define RU_26 1
-#define RU_52 2
-#define RU_106 4
-#define RU_242 9
-#define RU_484 18
-#define RU_996 37
-
#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c
new file mode 100644
index 000000000000..18d56a976dc7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/testmode.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "hif.h"
+#include "../testmode_i.h"
+
+#define ATH12K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
+static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab)
+{
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ if (ar && ar->ah->state == ATH12K_HW_STATE_TM)
+ return ar;
+ }
+
+ return NULL;
+}
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ struct ath12k *ar;
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb length %d\n",
+ cmd_id, skb->len);
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ ar = ath12k_tm_get_ar(ab);
+ if (!ar) {
+ ath12k_warn(ab, "testmode event not handled due to invalid pdev\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(skb->len),
+ GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!nl_skb) {
+ ath12k_warn(ab,
+ "failed to allocate skb for unsegmented testmode wmi event\n");
+ return;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
+ ath12k_warn(ab, "failed to populate testmode unsegmented event\n");
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *ftm_msg,
+ u16 length)
+{
+ struct sk_buff *nl_skb;
+ struct ath12k *ar;
+ u32 data_pos, pdev_id;
+ u16 datalen;
+ u8 total_segments, current_seq;
+ u8 const *buf_pos;
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ cmd_id, ftm_msg, length);
+ ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+ pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
+
+ if (pdev_id >= ab->num_radios) {
+ ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n");
+ return;
+ }
+
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar) {
+ ath12k_warn(ab, "testmode event not handled due to absence of pdev\n");
+ return;
+ }
+
+ current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+ ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+ total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+ ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS);
+ datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params));
+ buf_pos = ftm_msg->data;
+
+ if (current_seq == 0) {
+ ab->ftm_event_obj.expected_seq = 0;
+ ab->ftm_event_obj.data_pos = 0;
+ }
+
+ data_pos = ab->ftm_event_obj.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath12k_warn(ab,
+ "Invalid event length date_pos[%d] datalen[%d]\n",
+ data_pos, datalen);
+ return;
+ }
+
+ memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ab->ftm_event_obj.expected_seq != total_segments) {
+ ab->ftm_event_obj.data_pos = data_pos;
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "partial data received current_seq[%d], total_seg[%d]\n",
+ current_seq, total_segments);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+ "total data length[%d] = [%d]\n",
+ data_pos, ftm_msg->seg_hdr.len);
+
+ spin_lock_bh(&ar->data_lock);
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(data_pos),
+ GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!nl_skb) {
+ ath12k_warn(ab,
+ "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
+ &ab->ftm_event_obj.eventdata[0])) {
+ ath12k_warn(ab, "failed to populate testmode event");
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
+
+ spin_lock_bh(&ar->data_lock);
+ skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)));
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR) ||
+ nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR)) {
+ kfree_skb(skb);
+ return -ENOBUFS;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct ath12k_wmi_ftm_cmd *ftm_cmd;
+ int ret = 0;
+ void *buf;
+ size_t aligned_len;
+ u32 cmd_id, buf_len;
+ u16 chunk_len, total_bytes, num_segments;
+ u8 segnumber = 0, *bufpos;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state %d\n", ar->ah->state);
+ if (ar->ah->state != ATH12K_HW_STATE_TM)
+ return -ENETDOWN;
+
+ if (!tb[ATH_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ if (buf_len > MAX_WMI_UTF_LEN)
+ chunk_len = MAX_WMI_UTF_LEN; /* MAX message */
+ else
+ chunk_len = buf_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+ sizeof(struct ath12k_wmi_ftm_cmd)));
+
+ if (!skb)
+ return -ENOMEM;
+
+ ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data;
+ aligned_len = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params);
+ ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes);
+ ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref);
+ ftm_cmd->seg_hdr.segmentinfo =
+ le32_encode_bits(num_segments,
+ ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) |
+ le32_encode_bits(segnumber,
+ ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+ ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+ segnumber++;
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+ ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
+ kfree_skb(skb);
+ return ret;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ++ar->ftm_msgref;
+ return ret;
+}
+
+static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[])
+{
+ if (ar->ah->state == ATH12K_HW_STATE_TM)
+ return -EALREADY;
+
+ if (ar->ah->state != ATH12K_HW_STATE_OFF)
+ return -EBUSY;
+
+ ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
+ GFP_KERNEL);
+
+ if (!ar->ab->ftm_event_obj.eventdata)
+ return -ENOMEM;
+
+ ar->ah->state = ATH12K_HW_STATE_TM;
+ ar->ftm_msgref = 0;
+ return 0;
+}
+
+static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[])
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct wmi_pdev_set_param_cmd *cmd;
+ int ret = 0, tag;
+ void *buf;
+ u32 cmd_id, buf_len;
+
+ if (!tb[ATH_TM_ATTR_DATA])
+ return -EINVAL;
+
+ if (!tb[ATH_TM_ATTR_WMI_CMDID])
+ return -EINVAL;
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+
+ if (!buf_len) {
+ ath12k_warn(ar->ab, "No data present in testmode command\n");
+ return -EINVAL;
+ }
+
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
+
+ cmd = buf;
+ tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG);
+
+ if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf length %d\n",
+ cmd_id, buf_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar = NULL;
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
+ struct ath12k_base *ab;
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ lockdep_assert_held(&wiphy->mtx);
+
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH_TM_ATTR_CMD])
+ return -EINVAL;
+
+ /* TODO: have to handle ar for MLO case */
+ if (ah->num_radio)
+ ar = ah->radio;
+
+ if (!ar)
+ return -EINVAL;
+
+ ab = ar->ab;
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_WMI:
+ return ath12k_tm_cmd_wmi(ar, tb);
+ case ATH_TM_CMD_TESTMODE_START:
+ return ath12k_tm_cmd_testmode_start(ar, tb);
+ case ATH_TM_CMD_GET_VERSION:
+ return ath12k_tm_cmd_get_version(ar, tb);
+ case ATH_TM_CMD_WMI_FTM:
+ set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+ return ath12k_tm_cmd_process_ftm(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/testmode.h b/drivers/net/wireless/ath/ath12k/testmode.h
new file mode 100644
index 000000000000..ef6ab21d19b8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/testmode.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "hif.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb);
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *ftm_msg,
+ u16 length);
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+ const struct ath12k_wmi_ftm_event *msg,
+ u16 length)
+{
+}
+
+static inline int ath12k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index abb510d235a5..6d1ea5f3a791 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -15,16 +15,22 @@
#include <linux/time.h>
#include <linux/of.h>
#include "core.h"
+#include "debugfs.h"
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "peer.h"
#include "p2p.h"
+#include "testmode.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
};
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+};
+
struct ath12k_wmi_dma_ring_caps_parse {
struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
u32 n_dma_ring_caps;
@@ -173,7 +179,7 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
};
-static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
{
return le32_encode_bits(cmd, WMI_TLV_TAG) |
le32_encode_bits(len, WMI_TLV_LEN);
@@ -814,6 +820,39 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
return ret;
}
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
+ sizeof(*cmd));
+
+ cmd->stats_id = cpu_to_le32(stats_id);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI request stats 0x%x vdev id %d pdev id %d\n",
+ stats_id, vdev_id, pdev_id);
+
+ return ret;
+}
+
int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
struct ath12k_wmi_vdev_create_arg *args)
{
@@ -1888,14 +1927,19 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
return ret;
}
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn,
struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
+ struct ath12k *ar = arvif->ar;
struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = ar->ab;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
+ struct ath12k_vif *ahvif = arvif->ahvif;
+ struct ieee80211_bss_conf *conf;
+ u32 vdev_id = arvif->vdev_id;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u32 ema_params = 0;
@@ -1903,6 +1947,14 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
+ conf = ath12k_mac_get_link_bss_conf(arvif);
+ if (!conf) {
+ ath12k_warn(ab,
+ "unable to access bss link conf in beacon template command for vif %pM link %u\n",
+ ahvif->vif->addr, arvif->link_id);
+ return -EINVAL;
+ }
+
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
@@ -1914,8 +1966,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
- cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
- cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+
+ if (conf->csa_active) {
+ cmd->csa_switch_count_offset =
+ cpu_to_le32(offs->cntdwn_counter_offs[0]);
+ cmd->ext_csa_switch_count_offset =
+ cpu_to_le32(offs->cntdwn_counter_offs[1]);
+ cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
+ arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
+ }
+
cmd->buf_len = cpu_to_le32(bcn->len);
cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
if (ema_args) {
@@ -1945,7 +2005,7 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
if (ret) {
- ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
dev_kfree_skb(skb);
}
@@ -2373,8 +2433,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150;
- arg->dwell_time_active_6g = 40;
- arg->dwell_time_passive_6g = 30;
+ arg->dwell_time_active_6g = 70;
+ arg->dwell_time_passive_6g = 70;
arg->min_rest_time = 50;
arg->max_rest_time = 500;
arg->repeat_probe_time = 0;
@@ -2794,6 +2854,8 @@ int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
WMI_CHAN_REG_INFO1_REG_CLS);
*reg2 |= le32_encode_bits(channel_arg->antennamax,
WMI_CHAN_REG_INFO2_ANT_MAX);
+ *reg2 |= le32_encode_bits(channel_arg->maxregpower,
+ WMI_CHAN_REG_INFO2_MAX_TX_PWR);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -6842,8 +6904,584 @@ static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff
rcu_read_unlock();
}
+static void
+ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_vdev *vdev;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+ struct ath12k_link_vif *arvif;
+ u32 len = *length;
+ u8 *vif_macaddr;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath12k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
+ if (!arvif)
+ continue;
+ vif_macaddr = arvif->ahvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < WLAN_MAX_AC; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+ for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+ }
+}
+
+static void
+ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_bcn *bcn;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+ struct ath12k_link_vif *arvif;
+ u32 len = *length;
+ size_t num_bcn;
+
+ num_bcn = list_count_nodes(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath12k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list) {
+ arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
+ if (!arvif)
+ continue;
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", arvif->ahvif->vif->addr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+ }
+}
+
+static void
+ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length, u64 fw_soc_drop_cnt)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len = scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath12k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
+ "soc drop count", fw_soc_drop_cnt);
+
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath12k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requeued", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath12k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ char *buf, u32 *length)
+{
+ const struct ath12k_fw_stats_pdev *pdev;
+ u32 len = *length;
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath12k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath12k_warn(ar->ab, "failed to get pdev stats\n");
+ return;
+ }
+
+ ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
+ ar->ab->fw_soc_drop_count);
+ ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
+ ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
+
+ *length = len;
+}
+
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (stats_id) {
+ case WMI_REQUEST_VDEV_STAT:
+ ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ case WMI_REQUEST_BCN_STAT:
+ ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ case WMI_REQUEST_PDEV_STAT:
+ ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ ath12k_debugfs_fw_stats_reset(ar);
+}
+
+static void
+ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
+ struct ath12k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = le32_to_cpu(src->beacon_snr);
+ dst->data_snr = le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames[i] =
+ le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames_retries[i] =
+ le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < WLAN_MAX_AC; i++)
+ dst->num_tx_frames_failures[i] =
+ le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+ dst->tx_rate_history[i] =
+ le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+ dst->beacon_rssi_history[i] =
+ le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static void
+ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
+ struct ath12k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = le32_to_cpu(src->vdev_id);
+ dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
+ dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
+ dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
+ dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
+ dst->local_freed = a_sle32_to_cpu(src->local_freed);
+ dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
+ dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
+ dst->underrun = a_sle32_to_cpu(src->underrun);
+ dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->stateless_tid_alloc_failure =
+ __le32_to_cpu(src->stateless_tid_alloc_failure);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
+ struct ath12k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change =
+ a_sle32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
+ dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
+ dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
+ dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
+ dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
+ dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
+}
+
+static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath12k_fw_stats stats = {0};
+ struct ath12k *ar;
+ struct ath12k_link_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k_link_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+ INIT_LIST_HEAD(&stats.pdevs);
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ rcu_read_lock();
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
+ le32_to_cpu(ev->pdev_id));
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
+ const struct wmi_vdev_stats_params *src;
+ struct ath12k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+ arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "not found station bssid %pM for vdev stat\n",
+ arvif->bssid);
+ }
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath12k_wmi_pull_vdev_stats(src, dst);
+ stats.stats_id = WMI_REQUEST_VDEV_STAT;
+ list_add_tail(&dst->list, &stats.vdevs);
+ }
+ for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
+ const struct ath12k_wmi_bcn_stats_params *src;
+ struct ath12k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath12k_wmi_pull_bcn_stats(src, dst);
+ stats.stats_id = WMI_REQUEST_BCN_STAT;
+ list_add_tail(&dst->list, &stats.bcn);
+ }
+ for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
+ const struct ath12k_wmi_pdev_stats_params *src;
+ struct ath12k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats.pdevs);
+ }
+
+ complete(&ar->fw_stats_complete);
+ ath12k_debugfs_fw_stats_process(ar, &stats);
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ int ret;
+ struct wmi_tlv_fw_stats_parse parse = {};
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_fw_stats_parse,
+ &parse);
+ if (ret)
+ ath12k_warn(ab, "failed to parse fw stats %d\n", ret);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6889,17 +7527,15 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
const struct ath12k_wmi_pdev_csa_event *ev,
const u32 *vdev_ids)
{
- int i;
+ u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
+ u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
struct ieee80211_bss_conf *conf;
struct ath12k_link_vif *arvif;
struct ath12k_vif *ahvif;
-
- /* Finish CSA once the switch count becomes NULL */
- if (ev->current_switch_count)
- return;
+ int i;
rcu_read_lock();
- for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
+ for (i = 0; i < num_vdevs; i++) {
arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
if (!arvif) {
@@ -6922,8 +7558,26 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
continue;
}
- if (arvif->is_up && conf->csa_active)
- ieee80211_csa_finish(ahvif->vif, 0);
+ if (!arvif->is_up || !conf->csa_active)
+ continue;
+
+ /* Finish CSA when counter reaches zero */
+ if (!current_switch_count) {
+ ieee80211_csa_finish(ahvif->vif, arvif->link_id);
+ arvif->current_cntdown_counter = 0;
+ } else if (current_switch_count > 1) {
+ /* If the count in event is not what we expect, don't update the
+ * mac80211 count. Since during beacon Tx failure, count in the
+ * firmware will not decrement and this event will come with the
+ * previous count value again
+ */
+ if (current_switch_count != arvif->current_cntdown_counter)
+ continue;
+
+ arvif->current_cntdown_counter =
+ ieee80211_beacon_update_cntdwn(ahvif->vif,
+ arvif->link_id);
+ }
}
rcu_read_unlock();
}
@@ -7026,6 +7680,35 @@ exit:
kfree(tb);
}
+static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ const struct ath12k_wmi_ftm_event *ev;
+ const void **tb;
+ int ret;
+ u16 length;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch ftm msg\n");
+ kfree(tb);
+ return;
+ }
+
+ length = skb->len - TLV_HDR_SIZE;
+ ath12k_tm_process_event(ab, cmd_id, ev, length);
+ kfree(tb);
+ tb = NULL;
+}
+
static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct sk_buff *skb)
@@ -7446,6 +8129,389 @@ static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
kfree(tb);
}
+#ifdef CONFIG_ATH12K_DEBUGFS
+static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
+ const void *ptr, u16 tag, u16 len,
+ struct wmi_tpc_stats_arg *tpc_stats)
+{
+ u32 len1, len2, len3, len4;
+ s16 *dst_ptr;
+ s8 *dst_ptr_ctl;
+
+ len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
+ len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
+ len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
+ len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
+
+ switch (tpc_stats->event_count) {
+ case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
+ if (len1 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
+ dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
+ memcpy(dst_ptr, ptr, len1);
+ }
+ break;
+ case ATH12K_TPC_STATS_RATES_EVENT1:
+ if (len2 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
+ dst_ptr = tpc_stats->rates_array1.rate_array;
+ memcpy(dst_ptr, ptr, len2);
+ }
+ break;
+ case ATH12K_TPC_STATS_RATES_EVENT2:
+ if (len3 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
+ dst_ptr = tpc_stats->rates_array2.rate_array;
+ memcpy(dst_ptr, ptr, len3);
+ }
+ break;
+ case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
+ if (len4 > len)
+ return -ENOBUFS;
+
+ if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+ dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
+ memcpy(dst_ptr_ctl, ptr, len4);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_max_reg_power_fixed_params *ev)
+{
+ struct wmi_max_reg_power_allowed_arg *reg_pwr;
+ u32 total_size;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received reg power array type %d length %d for tpc stats\n",
+ ev->reg_power_type, ev->reg_array_len);
+
+ switch (le32_to_cpu(ev->reg_power_type)) {
+ case TPC_STATS_REG_PWR_ALLOWED_TYPE:
+ reg_pwr = &tpc_stats->max_reg_allowed_power;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Each entry is 2 byte hence multiplying the indices with 2 */
+ total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+ le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
+ if (le32_to_cpu(ev->reg_array_len) != total_size) {
+ ath12k_warn(ab,
+ "Total size and reg_array_len doesn't match for tpc stats\n");
+ return -EINVAL;
+ }
+
+ memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
+
+ reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
+ GFP_ATOMIC);
+ if (!reg_pwr->reg_pwr_array)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
+
+ return 0;
+}
+
+static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_tpc_rates_array_fixed_params *ev)
+{
+ struct wmi_tpc_rates_array_arg *rates_array;
+ u32 flag = 0, rate_array_len;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received rates array type %d length %d for tpc stats\n",
+ ev->rate_array_type, ev->rate_array_len);
+
+ switch (le32_to_cpu(ev->rate_array_type)) {
+ case ATH12K_TPC_STATS_RATES_ARRAY1:
+ rates_array = &tpc_stats->rates_array1;
+ flag = WMI_TPC_RATES_ARRAY1;
+ break;
+ case ATH12K_TPC_STATS_RATES_ARRAY2:
+ rates_array = &tpc_stats->rates_array2;
+ flag = WMI_TPC_RATES_ARRAY2;
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid type of rates array for tpc stats\n");
+ return -EINVAL;
+ }
+ memcpy(&rates_array->tpc_rates_array, ev,
+ sizeof(struct wmi_tpc_rates_array_fixed_params));
+ rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
+ rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
+ if (!rates_array->rate_array)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= flag;
+ return 0;
+}
+
+static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
+ struct wmi_tpc_stats_arg *tpc_stats,
+ struct wmi_tpc_ctl_pwr_fixed_params *ev)
+{
+ struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
+ u32 total_size, ctl_array_len, flag = 0;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "Received ctl array type %d length %d for tpc stats\n",
+ ev->ctl_array_type, ev->ctl_array_len);
+
+ switch (le32_to_cpu(ev->ctl_array_type)) {
+ case ATH12K_TPC_STATS_CTL_ARRAY:
+ ctl_array = &tpc_stats->ctl_array;
+ flag = WMI_TPC_CTL_PWR_ARRAY;
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid type of ctl pwr table for tpc stats\n");
+ return -EINVAL;
+ }
+
+ total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+ le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
+ if (le32_to_cpu(ev->ctl_array_len) != total_size) {
+ ath12k_warn(ab,
+ "Total size and ctl_array_len doesn't match for tpc stats\n");
+ return -EINVAL;
+ }
+
+ memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
+ ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
+ ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
+ if (!ctl_array->ctl_pwr_table)
+ return -ENOMEM;
+
+ tpc_stats->tlvs_rcvd |= flag;
+ return 0;
+}
+
+static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
+ struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
+ struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
+ struct wmi_tpc_stats_arg *tpc_stats = data;
+ struct wmi_tpc_config_params *tpc_config;
+ int ret = 0;
+
+ if (!tpc_stats) {
+ ath12k_warn(ab, "tpc stats memory unavailable\n");
+ return -EINVAL;
+ }
+
+ switch (tag) {
+ case WMI_TAG_TPC_STATS_CONFIG_EVENT:
+ tpc_config = (struct wmi_tpc_config_params *)ptr;
+ memcpy(&tpc_stats->tpc_config, tpc_config,
+ sizeof(struct wmi_tpc_config_params));
+ break;
+ case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
+ tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
+ ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
+ break;
+ case WMI_TAG_TPC_STATS_RATES_ARRAY:
+ tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
+ ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
+ break;
+ case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
+ tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
+ ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
+ break;
+ default:
+ ath12k_warn(ab,
+ "Received invalid tag for tpc stats in subtlvs\n");
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
+ ret = 0;
+ /* Fixed param is already processed*/
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ /* len 0 is expected for array of struct when there
+ * is no content of that type to pack inside that tlv
+ */
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_tpc_stats_subtlv_parser,
+ tpc_stats);
+ break;
+ case WMI_TAG_ARRAY_INT16:
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+ WMI_TAG_ARRAY_INT16,
+ len, tpc_stats);
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (len == 0)
+ return 0;
+ ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+ WMI_TAG_ARRAY_BYTE,
+ len, tpc_stats);
+ break;
+ default:
+ ath12k_warn(ab, "Received invalid tag for tpc stats\n");
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
+{
+ struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+
+ lockdep_assert_held(&ar->data_lock);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
+ if (tpc_stats) {
+ kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
+ kfree(tpc_stats->rates_array1.rate_array);
+ kfree(tpc_stats->rates_array2.rate_array);
+ kfree(tpc_stats->ctl_array.ctl_pwr_table);
+ kfree(tpc_stats);
+ ar->debug.tpc_stats = NULL;
+ }
+}
+
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
+ struct wmi_tpc_stats_arg *tpc_stats;
+ const struct wmi_tlv *tlv;
+ void *ptr = skb->data;
+ struct ath12k *ar;
+ u16 tlv_tag;
+ u32 event_count;
+ int ret;
+
+ if (!skb->data) {
+ ath12k_warn(ab, "No data present in tpc stats event\n");
+ return;
+ }
+
+ if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+ ath12k_warn(ab, "TPC stats event size invalid\n");
+ return;
+ }
+
+ tlv = (struct wmi_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+ ptr += sizeof(*tlv);
+
+ if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
+ ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
+ return;
+ }
+
+ fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
+ if (!ar) {
+ ath12k_warn(ab, "Failed to get ar for tpc stats\n");
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+ if (!ar->debug.tpc_request) {
+ /* Event is received either without request or the
+ * timeout, if memory is already allocated free it
+ */
+ if (ar->debug.tpc_stats) {
+ ath12k_warn(ab, "Freeing memory for tpc_stats\n");
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ }
+ goto unlock;
+ }
+
+ event_count = le32_to_cpu(fixed_param->event_count);
+ if (event_count == 0) {
+ if (ar->debug.tpc_stats) {
+ ath12k_warn(ab,
+ "Invalid tpc memory present\n");
+ goto unlock;
+ }
+ ar->debug.tpc_stats =
+ kzalloc(sizeof(struct wmi_tpc_stats_arg),
+ GFP_ATOMIC);
+ if (!ar->debug.tpc_stats) {
+ ath12k_warn(ab,
+ "Failed to allocate memory for tpc stats\n");
+ goto unlock;
+ }
+ }
+
+ tpc_stats = ar->debug.tpc_stats;
+ if (!tpc_stats) {
+ ath12k_warn(ab, "tpc stats memory unavailable\n");
+ goto unlock;
+ }
+
+ if (!(event_count == 0)) {
+ if (event_count != tpc_stats->event_count + 1) {
+ ath12k_warn(ab,
+ "Invalid tpc event received\n");
+ goto unlock;
+ }
+ }
+ tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
+ tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
+ tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "tpc stats event_count %d\n",
+ tpc_stats->event_count);
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tpc_stats_event_parser,
+ tpc_stats);
+ if (ret) {
+ ath12k_wmi_free_tpc_stats_mem(ar);
+ ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
+ goto unlock;
+ }
+
+ if (tpc_stats->end_of_event)
+ complete(&ar->debug.tpc_complete);
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+#else
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7571,6 +8637,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
ath12k_wmi_event_teardown_complete(ab, skb);
break;
+ case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
+ ath12k_wmi_process_tpc_stats(ab, skb);
+ break;
/* add Unsupported events (rare) here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -7584,7 +8653,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
/* debug might flood hence silently ignore (no-op) */
break;
- /* TODO: Add remaining events */
+ case WMI_PDEV_UTF_EVENTID:
+ if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+ ath12k_tm_wmi_event_segmented(ab, id, skb);
+ else
+ ath12k_tm_wmi_event_unsegmented(ab, id, skb);
+ break;
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
break;
@@ -7721,6 +8795,74 @@ int ath12k_wmi_simulate_radar(struct ath12k *ar)
return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
}
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
+{
+ struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ __le32 *pdev_id;
+ u32 buf_len;
+ void *ptr;
+ int ret;
+
+ buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+ cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
+ sizeof(*cmd));
+
+ cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
+ cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
+ cmd->subid = cpu_to_le32(tpc_stats_type);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ /* The below TLV arrays optionally follow this fixed param TLV structure
+ * 1. ARRAY_UINT32 pdev_ids[]
+ * If this array is present and non-zero length, stats should only
+ * be provided from the pdevs identified in the array.
+ * 2. ARRAY_UNIT32 vdev_ids[]
+ * If this array is present and non-zero length, stats should only
+ * be provided from the vdevs identified in the array.
+ * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
+ * If this array is present and non-zero length, stats should only
+ * be provided from the peers with the MAC addresses specified
+ * in the array
+ */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+ ptr += TLV_HDR_SIZE;
+
+ pdev_id = ptr;
+ *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
+ ptr += sizeof(*pdev_id);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+ ptr += TLV_HDR_SIZE;
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
+ ptr += TLV_HDR_SIZE;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
+ ar->pdev->pdev_id);
+
+ return ret;
+}
+
int ath12k_wmi_connect(struct ath12k_base *ab)
{
u32 i;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 45fe699ce8a5..1ba33e30ddd2 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -25,6 +25,7 @@
struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
+struct ath12k_fw_stats;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -516,6 +517,9 @@ enum wmi_tlv_cmd_id {
WMI_REQUEST_RCPI_CMDID,
WMI_REQUEST_PEER_STATS_INFO_CMDID,
WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_REQUEST_WLM_STATS_CMDID,
+ WMI_REQUEST_CTRL_PATH_STATS_CMDID,
+ WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3,
WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -785,6 +789,9 @@ enum wmi_tlv_event_id {
WMI_UPDATE_RCPI_EVENTID,
WMI_PEER_STATS_INFO_EVENTID,
WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_WLM_STATS_EVENTID,
+ WMI_CTRL_PATH_STATS_EVENTID,
+ WMI_HALPHY_STATS_CTRL_PATH_EVENTID,
WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
WMI_NLO_SCAN_COMPLETE_EVENTID,
WMI_APFIND_EVENTID,
@@ -1191,6 +1198,7 @@ enum wmi_tlv_tag {
WMI_TAG_ARRAY_BYTE,
WMI_TAG_ARRAY_STRUCT,
WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_ARRAY_INT16,
WMI_TAG_LAST_ARRAY_ENUM = 31,
WMI_TAG_SERVICE_READY_EVENT,
WMI_TAG_HAL_REG_CAPABILITIES,
@@ -1941,6 +1949,12 @@ enum wmi_tlv_tag {
WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
+ WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM,
+ WMI_TAG_TPC_STATS_CONFIG_EVENT,
+ WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
+ WMI_TAG_TPC_STATS_RATES_ARRAY,
+ WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@@ -1958,6 +1972,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
+ WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
+ WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM,
WMI_TAG_MAX
};
@@ -3624,6 +3640,26 @@ struct ath12k_wmi_p2p_noa_info {
struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
} __packed;
+#define MAX_WMI_UTF_LEN 252
+
+struct ath12k_wmi_ftm_seg_hdr_params {
+ __le32 len;
+ __le32 msgref;
+ __le32 segmentinfo;
+ __le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_ftm_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+ u8 data[];
+} __packed;
+
+struct ath12k_wmi_ftm_event {
+ struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+ u8 data[];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
@@ -4604,6 +4640,7 @@ enum wmi_rate_preamble {
WMI_RATE_PREAMBLE_HT,
WMI_RATE_PREAMBLE_VHT,
WMI_RATE_PREAMBLE_HE,
+ WMI_RATE_PREAMBLE_EHT,
};
/**
@@ -5628,6 +5665,245 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+struct wmi_stats_event {
+ __le32 stats_id;
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+ __le32 num_mib_stats;
+ __le32 pdev_id;
+ __le32 num_bcn_stats;
+ __le32 num_peer_extd_stats;
+ __le32 num_peer_extd2_stats;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+};
+
+struct wmi_request_stats_cmd {
+ __le32 tlv_header;
+ __le32 stats_id;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 pdev_id;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats_params {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[WLAN_MAX_AC];
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[WLAN_MAX_AC];
+ __le32 num_tx_frames_failures[WLAN_MAX_AC];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[MAX_TX_RATE_VALUES];
+ __le32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct ath12k_wmi_bcn_stats_params {
+ __le32 vdev_id;
+ __le32 tx_bcn_succ_cnt;
+ __le32 tx_bcn_outage_cnt;
+} __packed;
+
+struct ath12k_wmi_pdev_base_stats_params {
+ a_sle32 chan_nf;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
+struct ath12k_wmi_pdev_tx_stats_params {
+ a_sle32 comp_queued;
+ a_sle32 comp_delivered;
+ a_sle32 msdu_enqued;
+ a_sle32 mpdu_enqued;
+ a_sle32 wmm_drop;
+ a_sle32 local_enqued;
+ a_sle32 local_freed;
+ a_sle32 hw_queued;
+ a_sle32 hw_reaped;
+ a_sle32 underrun;
+ a_sle32 tx_abort;
+ a_sle32 mpdus_requed;
+ __le32 tx_ko;
+ __le32 data_rc;
+ __le32 self_triggers;
+ __le32 sw_retry_failure;
+ __le32 illgl_rate_phy_err;
+ __le32 pdev_cont_xretry;
+ __le32 pdev_tx_timeout;
+ __le32 pdev_resets;
+ __le32 stateless_tid_alloc_failure;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+} __packed;
+
+struct ath12k_wmi_pdev_rx_stats_params {
+ a_sle32 mid_ppdu_route_change;
+ a_sle32 status_rcvd;
+ a_sle32 r0_frags;
+ a_sle32 r1_frags;
+ a_sle32 r2_frags;
+ a_sle32 r3_frags;
+ a_sle32 htt_msdus;
+ a_sle32 htt_mpdus;
+ a_sle32 loc_msdus;
+ a_sle32 loc_mpdus;
+ a_sle32 oversize_amsdu;
+ a_sle32 phy_errs;
+ a_sle32 phy_err_drop;
+ a_sle32 mpdu_errs;
+} __packed;
+
+struct ath12k_wmi_pdev_stats_params {
+ struct ath12k_wmi_pdev_base_stats_params base;
+ struct ath12k_wmi_pdev_tx_stats_params tx;
+ struct ath12k_wmi_pdev_rx_stats_params rx;
+} __packed;
+
+struct ath12k_fw_stats_req_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+#define WMI_REQ_CTRL_PATH_PDEV_TX_STAT 1
+#define WMI_REQUEST_CTRL_PATH_STAT_GET 1
+
+#define WMI_TPC_CONFIG BIT(1)
+#define WMI_TPC_REG_PWR_ALLOWED BIT(2)
+#define WMI_TPC_RATES_ARRAY1 BIT(3)
+#define WMI_TPC_RATES_ARRAY2 BIT(4)
+#define WMI_TPC_RATES_DL_OFDMA_ARRAY BIT(5)
+#define WMI_TPC_CTL_PWR_ARRAY BIT(6)
+#define WMI_TPC_CONFIG_PARAM 0x1
+#define ATH12K_TPC_RATE_ARRAY_MU GENMASK(15, 8)
+#define ATH12K_TPC_RATE_ARRAY_SU GENMASK(7, 0)
+#define TPC_STATS_REG_PWR_ALLOWED_TYPE 0
+
+enum wmi_halphy_ctrl_path_stats_id {
+ WMI_HALPHY_PDEV_TX_SU_STATS = 0,
+ WMI_HALPHY_PDEV_TX_SUTXBF_STATS,
+ WMI_HALPHY_PDEV_TX_MU_STATS,
+ WMI_HALPHY_PDEV_TX_MUTXBF_STATS,
+ WMI_HALPHY_PDEV_TX_STATS_MAX,
+};
+
+enum ath12k_wmi_tpc_stats_rates_array {
+ ATH12K_TPC_STATS_RATES_ARRAY1,
+ ATH12K_TPC_STATS_RATES_ARRAY2,
+};
+
+enum ath12k_wmi_tpc_stats_ctl_array {
+ ATH12K_TPC_STATS_CTL_ARRAY,
+ ATH12K_TPC_STATS_CTL_160ARRAY,
+};
+
+enum ath12k_wmi_tpc_stats_events {
+ ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT,
+ ATH12K_TPC_STATS_RATES_EVENT1,
+ ATH12K_TPC_STATS_RATES_EVENT2,
+ ATH12K_TPC_STATS_CTL_TABLE_EVENT
+};
+
+struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params {
+ __le32 tlv_header;
+ __le32 stats_id_mask;
+ __le32 request_id;
+ __le32 action;
+ __le32 subid;
+} __packed;
+
+struct ath12k_wmi_pdev_tpc_stats_event_fixed_params {
+ __le32 pdev_id;
+ __le32 end_of_event;
+ __le32 event_count;
+} __packed;
+
+struct wmi_tpc_config_params {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_reg_power;
+ __le32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ __le32 caps;
+} __packed;
+
+struct wmi_max_reg_power_fixed_params {
+ __le32 reg_power_type;
+ __le32 reg_array_len;
+ __le32 d1;
+ __le32 d2;
+ __le32 d3;
+ __le32 d4;
+} __packed;
+
+struct wmi_max_reg_power_allowed_arg {
+ struct wmi_max_reg_power_fixed_params tpc_reg_pwr;
+ s16 *reg_pwr_array;
+};
+
+struct wmi_tpc_rates_array_fixed_params {
+ __le32 rate_array_type;
+ __le32 rate_array_len;
+} __packed;
+
+struct wmi_tpc_rates_array_arg {
+ struct wmi_tpc_rates_array_fixed_params tpc_rates_array;
+ s16 *rate_array;
+};
+
+struct wmi_tpc_ctl_pwr_fixed_params {
+ __le32 ctl_array_type;
+ __le32 ctl_array_len;
+ __le32 end_of_ctl_pwr;
+ __le32 ctl_pwr_count;
+ __le32 d1;
+ __le32 d2;
+ __le32 d3;
+ __le32 d4;
+} __packed;
+
+struct wmi_tpc_ctl_pwr_table_arg {
+ struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr;
+ s8 *ctl_pwr_table;
+};
+
+struct wmi_tpc_stats_arg {
+ u32 pdev_id;
+ u32 event_count;
+ u32 end_of_event;
+ u32 tlvs_rcvd;
+ struct wmi_max_reg_power_allowed_arg max_reg_allowed_power;
+ struct wmi_tpc_rates_array_arg rates_array1;
+ struct wmi_tpc_rates_array_arg rates_array2;
+ struct wmi_tpc_config_params tpc_config;
+ struct wmi_tpc_ctl_pwr_table_arg ctl_array;
+};
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -5639,7 +5915,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn,
struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
@@ -5753,6 +6029,13 @@ int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
const u8 *buf, size_t buf_len);
int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id);
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len);
+
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+ enum wmi_halphy_ctrl_path_stats_id tpc_stats_type);
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@@ -5806,5 +6089,8 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
int ath12k_wmi_mlo_ready(struct ath12k *ar);
int ath12k_wmi_mlo_teardown(struct ath12k *ar);
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+ struct ath12k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
index 9e1c0bfd212f..dce9bd0bcaef 100644
--- a/drivers/net/wireless/ath/ath12k/wow.c
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -990,6 +990,7 @@ exit:
case ATH12K_HW_STATE_RESTARTING:
case ATH12K_HW_STATE_RESTARTED:
case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_TM:
ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ah->state);
ret = -EIO;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a728cc0387df..6e38aa7351e3 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -274,7 +274,6 @@ struct ath_node {
struct ath_tx_control {
struct ath_txq *txq;
- struct ath_node *an;
struct ieee80211_sta *sta;
u8 paprd;
};
@@ -1018,7 +1017,7 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
- u32 rx_active_check_time;
+ unsigned long rx_active_check_time;
u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index 628eeec4b82f..300d178830ad 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -628,12 +628,12 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
else
RX_STAT_INC(sc, rx_spectral_sample_err);
- memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
-
/* Mix the received bins to the /dev/random
* pool
*/
add_device_randomness(sample_buf, num_bins);
+
+ memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
}
/* Process a normal frame */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index f9e77c4624d9..01e0dffbf57e 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -647,7 +647,9 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
- of_get_mac_address(np, common->macaddr);
+ ret = of_get_mac_address(np, common->macaddr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index db07ce6dbc08..0ac9212e42f7 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2291,19 +2291,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
- struct ath_vif *avp;
struct ath_softc *sc = hw->priv;
int frmlen = skb->len + FCS_LEN;
int padpos, padsize;
- /* NOTE: sta can be NULL according to net/mac80211.h */
- if (sta)
- txctl->an = (struct ath_node *)sta->drv_priv;
- else if (vif && ieee80211_is_data(hdr->frame_control)) {
- avp = (void *)vif->drv_priv;
- txctl->an = &avp->mcast_node;
- }
-
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/testmode_i.h
index 91b83873d660..980ef2f3f05f 100644
--- a/drivers/net/wireless/ath/ath11k/testmode_i.h
+++ b/drivers/net/wireless/ath/testmode_i.h
@@ -1,59 +1,59 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-/* "API" level of the ath11k testmode interface. Bump it after every
+/* "API" level of the ath testmode interface. Bump it after every
* incompatible interface change.
*/
-#define ATH11K_TESTMODE_VERSION_MAJOR 1
+#define ATH_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
-#define ATH11K_TESTMODE_VERSION_MINOR 1
+#define ATH_TESTMODE_VERSION_MINOR 1
-#define ATH11K_TM_DATA_MAX_LEN 5000
-#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
+#define ATH_TM_DATA_MAX_LEN 5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
-enum ath11k_tm_attr {
- __ATH11K_TM_ATTR_INVALID = 0,
- ATH11K_TM_ATTR_CMD = 1,
- ATH11K_TM_ATTR_DATA = 2,
- ATH11K_TM_ATTR_WMI_CMDID = 3,
- ATH11K_TM_ATTR_VERSION_MAJOR = 4,
- ATH11K_TM_ATTR_VERSION_MINOR = 5,
- ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+enum ath_tm_attr {
+ __ATH_TM_ATTR_INVALID = 0,
+ ATH_TM_ATTR_CMD = 1,
+ ATH_TM_ATTR_DATA = 2,
+ ATH_TM_ATTR_WMI_CMDID = 3,
+ ATH_TM_ATTR_VERSION_MAJOR = 4,
+ ATH_TM_ATTR_VERSION_MINOR = 5,
+ ATH_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
- __ATH11K_TM_ATTR_AFTER_LAST,
- ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+ __ATH_TM_ATTR_AFTER_LAST,
+ ATH_TM_ATTR_MAX = __ATH_TM_ATTR_AFTER_LAST - 1,
};
-/* All ath11k testmode interface commands specified in
- * ATH11K_TM_ATTR_CMD
+/* All ath testmode interface commands specified in
+ * ATH_TM_ATTR_CMD
*/
-enum ath11k_tm_cmd {
- /* Returns the supported ath11k testmode interface version in
- * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+enum ath_tm_cmd {
+ /* Returns the supported ath testmode interface version in
+ * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
- ATH11K_TM_CMD_GET_VERSION = 0,
+ ATH_TM_CMD_GET_VERSION = 0,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
- * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
- * ATH11K_TM_ATTR_DATA.
+ * provided with ATH_TM_ATTR_WMI_CMDID and payload in
+ * ATH_TM_ATTR_DATA.
*/
- ATH11K_TM_CMD_WMI = 1,
+ ATH_TM_CMD_WMI = 1,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
- ATH11K_TM_CMD_TESTMODE_START = 2,
+ ATH_TM_CMD_TESTMODE_START = 2,
/* The command used to transmit a FTM WMI command to the firmware
* and the event to receive WMI events from the firmware. The data
@@ -62,5 +62,5 @@ enum ath11k_tm_cmd {
* The data payload size could be large and the driver needs to
* send segmented data to firmware.
*/
- ATH11K_TM_CMD_WMI_FTM = 3,
+ ATH_TM_CMD_WMI_FTM = 3,
};
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 25b4ef9d3c9a..7529afd24aed 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2166,7 +2166,7 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
{
const char text[] =
"You must go to " \
- "https://wireless.wiki.kernel.org/en/users/Drivers/b43#devicefirmware " \
+ "https://wireless.docs.kernel.org/en/latest/en/users/drivers/b43/developers.html#list-of-firmware " \
"and download the correct firmware for this driver version. " \
"Please carefully read all instructions on this website.\n";
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 60eb95fc19a5..6bc107476a2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1172,6 +1172,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
+ bool cap_power_off;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1179,19 +1180,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
if (func->num != 1)
return 0;
+ cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- if (sdiodev->wowl_enabled) {
+ if (sdiodev->wowl_enabled || !cap_power_off) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1213,18 +1218,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
int ret = 0;
+ bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!sdiodev->wowl_enabled) {
+ if (!sdiodev->wowl_enabled && cap_power_off) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index be1d971b3d32..24a5624ef207 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3295,7 +3295,7 @@ static int ipw_init_nic(struct ipw_priv *priv)
rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
if (rc < 0)
- IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
+ IPW_DEBUG_INFO("FAILED wait for clock stabilization\n");
/* assert SW reset */
ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index 3c20353e5a41..e031e8692ca6 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -1011,8 +1011,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
void libipw_txb_free(struct libipw_txb *);
/* libipw_rx.c */
-void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
- struct libipw_rx_stats *stats);
int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
struct libipw_rx_stats *rx_stats);
/* make sure to set stats->len */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
index dc4e91f58bb4..b7bc94f7abd8 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -823,96 +823,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
return 0;
}
-/* Filter out unrelated packets, call libipw_rx[_mgt]
- * This function takes over the skb, it should not be used again after calling
- * this function. */
-void libipw_rx_any(struct libipw_device *ieee,
- struct sk_buff *skb, struct libipw_rx_stats *stats)
-{
- struct libipw_hdr_4addr *hdr;
- int is_packet_for_us;
- u16 fc;
-
- if (ieee->iw_mode == IW_MODE_MONITOR) {
- if (!libipw_rx(ieee, skb, stats))
- dev_kfree_skb_irq(skb);
- return;
- }
-
- if (skb->len < sizeof(struct ieee80211_hdr))
- goto drop_free;
-
- hdr = (struct libipw_hdr_4addr *)skb->data;
- fc = le16_to_cpu(hdr->frame_ctl);
-
- if ((fc & IEEE80211_FCTL_VERS) != 0)
- goto drop_free;
-
- switch (fc & IEEE80211_FCTL_FTYPE) {
- case IEEE80211_FTYPE_MGMT:
- if (skb->len < sizeof(struct libipw_hdr_3addr))
- goto drop_free;
- libipw_rx_mgt(ieee, hdr, stats);
- dev_kfree_skb_irq(skb);
- return;
- case IEEE80211_FTYPE_DATA:
- break;
- case IEEE80211_FTYPE_CTL:
- return;
- default:
- return;
- }
-
- is_packet_for_us = 0;
- switch (ieee->iw_mode) {
- case IW_MODE_ADHOC:
- /* our BSS and not from/to DS */
- if (ether_addr_equal(hdr->addr3, ieee->bssid) &&
- ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) {
- /* promisc: get all */
- if (ieee->dev->flags & IFF_PROMISC)
- is_packet_for_us = 1;
- /* to us */
- else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- /* mcast */
- else if (is_multicast_ether_addr(hdr->addr1))
- is_packet_for_us = 1;
- }
- break;
- case IW_MODE_INFRA:
- /* our BSS (== from our AP) and from DS */
- if (ether_addr_equal(hdr->addr2, ieee->bssid) &&
- ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) {
- /* promisc: get all */
- if (ieee->dev->flags & IFF_PROMISC)
- is_packet_for_us = 1;
- /* to us */
- else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- /* mcast */
- else if (is_multicast_ether_addr(hdr->addr1)) {
- /* not our own packet bcasted from AP */
- if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
- is_packet_for_us = 1;
- }
- }
- break;
- default:
- /* ? */
- break;
- }
-
- if (is_packet_for_us)
- if (!libipw_rx(ieee, skb, stats))
- dev_kfree_skb_irq(skb);
- return;
-
-drop_free:
- dev_kfree_skb_irq(skb);
- ieee->dev->stats.rx_dropped++;
-}
-
#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
@@ -1729,6 +1639,5 @@ void libipw_rx_mgt(struct libipw_device *ieee,
}
}
-EXPORT_SYMBOL_GPL(libipw_rx_any);
EXPORT_SYMBOL(libipw_rx_mgt);
EXPORT_SYMBOL(libipw_rx);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 718efb1aa1b0..0e5130d1fccd 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -132,15 +132,8 @@ static void il4965_rs_fill_link_cmd(struct il_priv *il,
static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
bool force_search);
-#ifdef CONFIG_MAC80211_DEBUGFS
static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
u32 *rate_n_flags, int idx);
-#else
-static void
-il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
-{
-}
-#endif
/*
* The following tables contain the expected throughput metrics for all rates
@@ -2495,8 +2488,6 @@ il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
D_RATE("leave\n");
}
-#ifdef CONFIG_MAC80211_DEBUGFS
-
static void
il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
{
@@ -2504,6 +2495,9 @@ il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
u8 valid_tx_ant;
u8 ant_sel_tx;
+ if (!IS_ENABLED(CONFIG_MAC80211_DEBUGFS))
+ return;
+
il = lq_sta->drv;
valid_tx_ant = il->hw_params.valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
@@ -2758,7 +2752,6 @@ il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
&lq_sta->tx_agg_tid_en);
}
-#endif
/*
* Initialization of rate scaling information is done by driver after
@@ -2781,9 +2774,8 @@ static const struct rate_control_ops rs_4965_ops = {
.free = il4965_rs_free,
.alloc_sta = il4965_rs_alloc_sta,
.free_sta = il4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = il4965_rs_add_debugfs,
-#endif
+ .add_sta_debugfs = PTR_IF(IS_ENABLED(CONFIG_MAC80211_DEBUGFS),
+ il4965_rs_add_debugfs),
};
int
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 92285412ab10..52610f5e57a3 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2815,9 +2815,7 @@ struct il_lq_sta {
struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
struct il_traffic_load load[TID_MAX_LOAD_COUNT];
u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
u32 dbg_fixed_rate;
-#endif
struct il_priv *drv;
/* used to be in sta_info */
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 4b04865fc2c9..82f577da1a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -81,14 +81,25 @@ config IWLMVM
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+config IWLMLD
+ tristate "Intel Wireless WiFi MLD Firmware support"
+ select WANT_DEV_COREDUMP
+ depends on MAC80211
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This is the driver that supports firmwares of MLD capable devices.
+ The list of the devices that use this firmware is available here:
+ https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
bool
default y if IWLDVM=m
default y if IWLMVM=m
+ default y if IWLMLD=m
-comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
- depends on IWLDVM=n && IWLMVM=n
+comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
+ depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
menu "Debugging Options"
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 19c4ce6f2465..9546ceeaf5e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -12,7 +12,8 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
+iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
@@ -20,6 +21,7 @@ iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
+iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
@@ -33,6 +35,7 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLMLD) += mld/
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 2e2fcb3807ef..130b9a8aa7eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -205,7 +205,6 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
-const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index dcba1a5d793b..e87b57b9e2c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -180,7 +180,6 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
};
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
-const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index efa3e0e35f79..f055255a7c93 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 96
+#define IWL_BZ_UCODE_API_MAX 98
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 92
+#define IWL_BZ_UCODE_API_MIN 93
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
@@ -38,6 +38,11 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
+#if !IS_ENABLED(CONFIG_IWLMVM)
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
+const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+#endif
+
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
@@ -50,6 +55,13 @@ static const struct iwl_base_params iwl_bz_base_params = {
.pcie_l1_allowed = true,
};
+const struct iwl_ht_params iwl_bz_ht_params = {
+ .stbc = true,
+ .ldpc = true,
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
+ BIT(NL80211_BAND_6GHZ),
+};
+
#define IWL_DEVICE_BZ_COMMON \
.ucode_api_max = IWL_BZ_UCODE_API_MAX, \
.ucode_api_min = IWL_BZ_UCODE_API_MIN, \
@@ -113,7 +125,7 @@ static const struct iwl_base_params iwl_bz_base_params = {
#define IWL_DEVICE_BZ \
IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -145,7 +157,6 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
.low_latency_xtal = true,
};
-const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index ab7c0f8d54f4..282b9b846c3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -9,7 +9,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 96
+#define IWL_DR_UCODE_API_MAX 98
/* Lowest firmware API version supported */
#define IWL_DR_UCODE_API_MIN 96
@@ -114,7 +114,7 @@ static const struct iwl_base_params iwl_dr_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_DR_EHT, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.gen2 = true,
- .integrated = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
- .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const char iwl_br_name[] = "Intel(R) TBD Br device";
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index c9eeb3f6704e..670031fd60dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 96
+#define IWL_SC_UCODE_API_MAX 98
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 92
+#define IWL_SC_UCODE_API_MIN 93
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -121,7 +121,7 @@ static const struct iwl_base_params iwl_sc_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_SC_EHT, \
- .ht_params = &iwl_22000_ht_params
+ .ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@@ -142,22 +142,18 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-const char iwl_sc_name[] = "Intel(R) TBD Sc device";
+const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
IWL_DEVICE_SC,
};
-const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
-
const struct iwl_cfg iwl_cfg_sc2 = {
.fw_name_mac = "sc2",
IWL_DEVICE_SC,
};
-const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
-
const struct iwl_cfg iwl_cfg_sc2f = {
.fw_name_mac = "sc2f",
IWL_DEVICE_SC,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 3f49c0bccb28..96ea6c8dfc89 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1180,85 +1180,87 @@ struct iwl_dram_scratch {
} __packed;
struct iwl_tx_cmd {
- /*
- * MPDU byte count:
- * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
- * + 8 byte IV for CCM or TKIP (not used for WEP)
- * + Data payload
- * + 8-byte MIC (not used for CCM/WEP)
- * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
- * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
- * Range: 14-2342 bytes.
- */
- __le16 len;
-
- /*
- * MPDU or MSDU byte count for next frame.
- * Used for fragmentation and bursting, but not 11n aggregation.
- * Same as "len", but for next frame. Set to 0 if not applicable.
- */
- __le16 next_frame_len;
-
- __le32 tx_flags; /* TX_CMD_FLG_* */
-
- /* uCode may modify this field of the Tx command (in host DRAM!).
- * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
-
- /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
- __le32 rate_n_flags; /* RATE_MCS_* */
-
- /* Index of destination station in uCode's station table */
- u8 sta_id;
-
- /* Type of security encryption: CCM or TKIP */
- u8 sec_ctl; /* TX_CMD_SEC_* */
-
- /*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
- * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
- * data frames, this field may be used to selectively reduce initial
- * rate (via non-0 value) for special frames (e.g. management), while
- * still supporting rate scaling for all frames.
- */
- u8 initial_rate_index;
- u8 reserved;
- u8 key[16];
- __le16 next_frame_flags;
- __le16 reserved2;
- union {
- __le32 life_time;
- __le32 attempt;
- } stop_time;
-
- /* Host DRAM physical address pointer to "scratch" in this command.
- * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
- __le32 dram_lsb_ptr;
- u8 dram_msb_ptr;
-
- u8 rts_retry_limit; /*byte 50 */
- u8 data_retry_limit; /*byte 51 */
- u8 tid_tspec;
- union {
- __le16 pm_frame_timeout;
- __le16 attempt_duration;
- } timeout;
-
- /*
- * Duration of EDCA burst Tx Opportunity, in 32-usec units.
- * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
- */
- __le16 driver_txop;
-
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(iwl_tx_cmd_hdr, __hdr, __packed,
+ /*
+ * MPDU byte count:
+ * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * NOTE: Does not include Tx command bytes, post-MAC pad bytes,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+ * Range: 14-2342 bytes.
+ */
+ __le16 len;
+
+ /*
+ * MPDU or MSDU byte count for next frame.
+ * Used for fragmentation and bursting, but not 11n aggregation.
+ * Same as "len", but for next frame. Set to 0 if not applicable.
+ */
+ __le16 next_frame_len;
+
+ __le32 tx_flags; /* TX_CMD_FLG_* */
+
+ /* uCode may modify this field of the Tx command (in host DRAM!).
+ * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+ struct iwl_dram_scratch scratch;
+
+ /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+ __le32 rate_n_flags; /* RATE_MCS_* */
+
+ /* Index of destination station in uCode's station table */
+ u8 sta_id;
+
+ /* Type of security encryption: CCM or TKIP */
+ u8 sec_ctl; /* TX_CMD_SEC_* */
+
+ /*
+ * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
+ * data frames, this field may be used to selectively reduce initial
+ * rate (via non-0 value) for special frames (e.g. management), while
+ * still supporting rate scaling for all frames.
+ */
+ u8 initial_rate_index;
+ u8 reserved;
+ u8 key[16];
+ __le16 next_frame_flags;
+ __le16 reserved2;
+ union {
+ __le32 life_time;
+ __le32 attempt;
+ } stop_time;
+
+ /* Host DRAM physical address pointer to "scratch" in this command.
+ * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
+ __le32 dram_lsb_ptr;
+ u8 dram_msb_ptr;
+
+ u8 rts_retry_limit; /*byte 50 */
+ u8 data_retry_limit; /*byte 51 */
+ u8 tid_tspec;
+ union {
+ __le16 pm_frame_timeout;
+ __le16 attempt_duration;
+ } timeout;
+
+ /*
+ * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+ * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+ */
+ __le16 driver_txop;
+
+ );
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- union {
- DECLARE_FLEX_ARRAY(u8, payload);
- DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
- };
+ struct ieee80211_hdr hdr[];
} __packed;
+static_assert(offsetof(struct iwl_tx_cmd, hdr) == sizeof(struct iwl_tx_cmd_hdr),
+ "struct member likely outside of __struct_group()");
/*
* TX command response is sent after *agn* transmission attempts.
@@ -2312,7 +2314,7 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct iwl_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
@@ -2423,7 +2425,7 @@ struct iwlagn_beacon_notif {
*/
struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+ struct iwl_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 43e8d04d5a8b..e1d78550e443 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -124,17 +124,6 @@ enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
return restriction->tx_stream;
}
-enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
-{
- struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- struct iwl_tt_restriction *restriction;
-
- if (!priv->thermal_throttle.advanced_tt)
- return IWL_ANT_OK_MULTI;
- restriction = tt->restriction + tt->state;
- return restriction->rx_stream;
-}
-
#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index 4af792d41dce..198f934a0d16 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -100,7 +100,6 @@ u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
bool iwl_ht_enabled(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
-enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
void iwl_tt_handler(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index ebe85fdf08d3..42360a8f23aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -117,7 +117,7 @@ struct iwl_alive_ntf_v6 {
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
- * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ * @IWL_INIT_PHY: driver is going to send the PHY_CONFIGURATION_CMD
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 34a1f97653c0..d43adb6f9220 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,16 @@ enum iwl_legacy_cmds {
/**
* @DTS_MEASUREMENT_NOTIFICATION:
* &struct iwl_dts_measurement_notif_v1 or
- * &struct iwl_dts_measurement_notif_v2
+ * &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIFICATION = 0xdd,
/**
+ * @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd
+ */
+ DEBUG_HOST_COMMAND = 0xf1,
+
+ /**
* @LDBG_CONFIG_CMD: configure continuous trace recording
*/
LDBG_CONFIG_CMD = 0xf6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
index a9fa5f054ce0..464eed9b5e71 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -14,6 +14,9 @@
* @FW_CTXT_COLOR_POS: position of the color
* @FW_CTXT_COLOR_MSK: mask of the color
* @FW_CTXT_INVALID: value used to indicate unused/invalid
+ * @FW_CTXT_ID_INVALID: value used to indicate unused/invalid. This can be
+ * used with newer firmware which no longer use the color. Typically,
+ * firmware versions supported by iwlmld can use this value.
*/
enum iwl_ctxt_id_and_color {
FW_CTXT_ID_POS = 0,
@@ -21,6 +24,7 @@ enum iwl_ctxt_id_and_color {
FW_CTXT_COLOR_POS = 8,
FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
FW_CTXT_INVALID = 0xffffffff,
+ FW_CTXT_ID_INVALID = 0xff,
};
#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index c2362bc786b2..9c271ea67155 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1006,7 +1006,7 @@ struct iwl_wowlan_wake_pkt_notif {
* struct iwl_mvm_d3_end_notif - d3 end notification
* @flags: See &enum iwl_d0i3_flags
*/
-struct iwl_mvm_d3_end_notif {
+struct iwl_d3_end_notif {
__le32 flags;
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 570a3f722510..c139b965980d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@@ -91,8 +91,14 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
+ * @OMI_SEND_STATUS_NOTIF: notification after OMI was sent
+ * uses &struct iwl_omi_send_status_notif
+ */
+ OMI_SEND_STATUS_NOTIF = 0xF2,
+
+ /**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
- * uses &struct iwl_mvm_esr_mode_notif
+ * uses &struct iwl_esr_mode_notif
*/
ESR_MODE_NOTIF = 0xF3,
@@ -688,4 +694,13 @@ struct iwl_sec_key_cmd {
} __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */
} __packed; /* SEC_KEY_CMD_API_S_VER_1 */
+/**
+ * struct iwl_omi_send_status_notif - OMI status notification
+ * @success: indicates that the OMI was sent successfully
+ * (currently always set)
+ */
+struct iwl_omi_send_status_notif {
+ __le32 success;
+} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 550de6db1834..4fab6c66994e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -527,6 +527,8 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
+ * @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and
+ * split dump to before/after with region marking
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@@ -535,6 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
+ IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17),
};
/**
@@ -556,12 +559,14 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
+ * @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset
+ * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
-
+ IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET = BIT(3),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index aa88e91d117e..0cf1e5124fba 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -51,7 +51,7 @@ enum iwl_debug_cmds {
/**
* @GET_TAS_STATUS:
* sends command to fw to get TAS status
- * the response is &struct iwl_mvm_tas_status_resp
+ * the response is &struct iwl_tas_status_resp
*/
GET_TAS_STATUS = 0xA,
/**
@@ -439,25 +439,20 @@ struct iwl_dbg_dump_complete_cmd {
__le32 tp_data;
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
-#define TAS_LMAC_BAND_HB 0
-#define TAS_LMAC_BAND_LB 1
-#define TAS_LMAC_BAND_UHB 2
-#define TAS_LMAC_BAND_INVALID 3
-
/**
- * struct iwl_mvm_tas_status_per_mac - tas status per lmac
+ * struct iwl_tas_status_per_mac - tas status per lmac
* @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE
* @static_dis_reason: TAS static disable reason, uses
- * &enum iwl_mvm_tas_statically_disabled_reason
+ * &enum iwl_tas_statically_disabled_reason
* @dynamic_status: Current TAS status. uses
- * &enum iwl_mvm_tas_dyna_status
+ * &enum iwl_tas_dyna_status
* @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE
* @max_reg_pwr_limit: Regulatory power limits in dBm
* @sar_limit: SAR limits per lmac in dBm
* @band: Band per lmac
* @reserved: reserved
*/
-struct iwl_mvm_tas_status_per_mac {
+struct iwl_tas_status_per_mac {
u8 static_status;
u8 static_dis_reason;
u8 dynamic_status;
@@ -466,35 +461,35 @@ struct iwl_mvm_tas_status_per_mac {
__le16 sar_limit;
u8 band;
u8 reserved[3];
-} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/
+} __packed; /* DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1 */
/**
- * struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS
+ * struct iwl_tas_status_resp - Response to GET_TAS_STATUS
* @tas_fw_version: TAS FW version
* @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE
* @curr_mcc: current mcc
* @block_list: country block list
* @tas_status_mac: TAS status per lmac, uses
- * &struct iwl_mvm_tas_status_per_mac
+ * &struct iwl_tas_status_per_mac
* @in_dual_radio: is TAS in dual radio? - TRUE/FALSE
* @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
* This member is valid only when fw has
* %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
* @reserved: reserved
*/
-struct iwl_mvm_tas_status_resp {
+struct iwl_tas_status_resp {
u8 tas_fw_version;
u8 is_uhb_for_usa_enable;
__le16 curr_mcc;
__le16 block_list[16];
- struct iwl_mvm_tas_status_per_mac tas_status_mac[2];
+ struct iwl_tas_status_per_mac tas_status_mac[2];
u8 in_dual_radio;
u8 uhb_allowed_flags;
u8 reserved[2];
-} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/
+} __packed; /* DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3 */
/**
- * enum iwl_mvm_tas_dyna_status - TAS current running status
+ * enum iwl_tas_dyna_status - TAS current running status
* @TAS_DYNA_INACTIVE: TAS status is inactive
* @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode
* or is in softap mode.
@@ -507,7 +502,7 @@ struct iwl_mvm_tas_status_resp {
* @TAS_DYNA_ACTIVE: TAS is currently active
* @TAS_DYNA_STATUS_MAX: TAS status max value
*/
-enum iwl_mvm_tas_dyna_status {
+enum iwl_tas_dyna_status {
TAS_DYNA_INACTIVE,
TAS_DYNA_INACTIVE_MVM_MODE,
TAS_DYNA_INACTIVE_TRIGGER_MODE,
@@ -516,19 +511,22 @@ enum iwl_mvm_tas_dyna_status {
TAS_DYNA_ACTIVE,
TAS_DYNA_STATUS_MAX,
-}; /*_TAS_DYNA_STATUS_E*/
+};
/**
- * enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason
+ * enum iwl_tas_statically_disabled_reason - TAS statically disabled reason
* @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS
* @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm
* @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid
+ * @TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID: TAS is disabled due to
+ * table source invalid
* @TAS_DISABLED_REASON_MAX: TAS disable reason max value
*/
-enum iwl_mvm_tas_statically_disabled_reason {
+enum iwl_tas_statically_disabled_reason {
TAS_DISABLED_DUE_TO_BIOS,
TAS_DISABLED_DUE_TO_SAR_6DBM,
TAS_DISABLED_REASON_INVALID,
+ TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID,
TAS_DISABLED_REASON_MAX,
}; /*_TAS_STATICALLY_DISABLED_REASON_E*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h
new file mode 100644
index 000000000000..b6d79c678cd8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_fw_api_dhc_h__
+#define __iwl_fw_api_dhc_h__
+
+#define DHC_TABLE_MASK_POS (28)
+
+/**
+ * enum iwl_dhc_table_id - DHC table operations index
+ */
+enum iwl_dhc_table_id {
+ /**
+ * @DHC_TABLE_INTEGRATION: select the integration table
+ */
+ DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS,
+ /**
+ * @DHC_TABLE_TOOLS: select the tools table
+ */
+ DHC_TABLE_TOOLS = 0,
+};
+
+/**
+ * enum iwl_dhc_umac_tools_table - tools operations
+ * @DHC_TOOLS_UMAC_GET_TAS_STATUS: Get TAS status.
+ * See @struct iwl_dhc_tas_status_resp
+ */
+enum iwl_dhc_umac_tools_table {
+ DHC_TOOLS_UMAC_GET_TAS_STATUS = 0,
+};
+
+/**
+ * enum iwl_dhc_umac_integration_table - integration operations
+ */
+enum iwl_dhc_umac_integration_table {
+ /**
+ * @DHC_INT_UMAC_TWT_OPERATION: trigger a TWT operation
+ */
+ DHC_INT_UMAC_TWT_OPERATION = 4,
+ /**
+ * @DHC_INTEGRATION_TLC_DEBUG_CONFIG: TLC debug
+ */
+ DHC_INTEGRATION_TLC_DEBUG_CONFIG = 1,
+ /**
+ * @DHC_INTEGRATION_MAX: Maximum UMAC integration table entries
+ */
+ DHC_INTEGRATION_MAX
+};
+
+#define DHC_TARGET_UMAC BIT(27)
+
+/**
+ * struct iwl_dhc_cmd - debug host command
+ * @length: length in DWs of the data structure that is concatenated to the end
+ * of this struct
+ * @index_and_mask: bit 31 is 1 for data set operation else it's 0
+ * bits 28-30 is the index of the table of the operation -
+ * &enum iwl_dhc_table_id *
+ * bit 27 is 0 if the cmd targeted to LMAC and 1 if targeted to UMAC,
+ * (LMAC is 0 for backward compatibility)
+ * bit 26 is 0 if the cmd targeted to LMAC0 and 1 if targeted to LMAC1,
+ * relevant only if bit 27 set to 0
+ * bits 0-25 is a specific entry index in the table specified in bits 28-30
+ *
+ * @data: the concatenated data.
+ */
+struct iwl_dhc_cmd {
+ __le32 length;
+ __le32 index_and_mask;
+ __le32 data[];
+} __packed; /* DHC_CMD_API_S */
+
+/**
+ * struct iwl_dhc_payload_hdr - DHC payload header
+ * @version: a version of a payload
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_payload_hdr {
+ u8 version;
+ u8 reserved[3];
+} __packed; /* DHC_PAYLOAD_HDR_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_tas_status_per_radio - TAS status per radio
+ * @band: &PHY_BAND_5 for high band, PHY_BAND_24 for low band and
+ * &PHY_BAND_6 for ultra high band.
+ * @static_status: TAS statically enabled or disabled
+ * @static_disable_reason: TAS static disable reason, uses
+ * &enum iwl_tas_statically_disabled_reason
+ * @near_disconnection: is TAS currently near disconnection per radio
+ * @dynamic_status_ant_a: Antenna A current TAS status.
+ * uses &enum iwl_tas_dyna_status
+ * @dynamic_status_ant_b: Antenna B current TAS status.
+ * uses &enum iwl_tas_dyna_status
+ * @max_reg_pwr_limit_ant_a: Antenna A regulatory power limits in dBm
+ * @max_reg_pwr_limit_ant_b: Antenna B regulatory power limits in dBm
+ * @sar_limit_ant_a: Antenna A SAR limit per radio in dBm
+ * @sar_limit_ant_b: Antenna B SAR limit per radio in dBm
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_tas_status_per_radio {
+ u8 band;
+ u8 static_status;
+ u8 static_disable_reason;
+ u8 near_disconnection;
+ u8 dynamic_status_ant_a;
+ u8 dynamic_status_ant_b;
+ __le16 max_reg_pwr_limit_ant_a;
+ __le16 max_reg_pwr_limit_ant_b;
+ __le16 sar_limit_ant_a;
+ __le16 sar_limit_ant_b;
+ u8 reserved[2];
+} __packed; /* DHC_TAS_STATUS_PER_RADIO_S_VER_1 */
+
+/**
+ * struct iwl_dhc_tas_status_resp - Response to DHC_TOOLS_UMAC_GET_TAS_STATUS
+ * @header: DHC payload header, uses &struct iwl_dhc_payload_hdr
+ * @tas_config_info: see @struct bios_value_u32
+ * @mcc_block_list: block listed country codes
+ * @tas_status_radio: TAS status, uses &struct iwl_dhc_tas_status_per_radio
+ * @curr_mcc: current mcc
+ * @valid_radio_mask: represent entry in tas_status_per_radio is valid.
+ * @reserved: reserved for alignment
+ */
+struct iwl_dhc_tas_status_resp {
+ struct iwl_dhc_payload_hdr header;
+ struct bios_value_u32 tas_config_info;
+ __le16 mcc_block_list[IWL_WTAS_BLACK_LIST_MAX];
+ struct iwl_dhc_tas_status_per_radio tas_status_radio[2];
+ __le16 curr_mcc;
+ u8 valid_radio_mask;
+ u8 reserved;
+} __packed; /* DHC_TAS_STATUS_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_cmd_resp_v1 - debug host command response
+ * @status: status of the command
+ * @data: the response data
+ */
+struct iwl_dhc_cmd_resp_v1 {
+ __le32 status;
+ __le32 data[];
+} __packed; /* DHC_RESP_API_S_VER_1 */
+
+/**
+ * struct iwl_dhc_cmd_resp - debug host command response
+ * @status: status of the command
+ * @descriptor: command descriptor (index_and_mask) returned
+ * @data: the response data
+ */
+struct iwl_dhc_cmd_resp {
+ __le32 status;
+ __le32 descriptor;
+ __le32 data[];
+} __packed; /* DHC_RESP_API_S_VER_2 and DHC_RESP_API_S_VER_3 */
+
+/**
+ * enum iwl_dhc_twt_operation_type - describes the TWT operation type
+ *
+ * @DHC_TWT_REQUEST: Send a Request TWT command
+ * @DHC_TWT_SUGGEST: Send a Suggest TWT command
+ * @DHC_TWT_DEMAND: Send a Demand TWT command
+ * @DHC_TWT_GROUPING: Send a Grouping TWT command
+ * @DHC_TWT_ACCEPT: Send a Accept TWT command
+ * @DHC_TWT_ALTERNATE: Send a Alternate TWT command
+ * @DHC_TWT_DICTATE: Send a Dictate TWT command
+ * @DHC_TWT_REJECT: Send a Reject TWT command
+ * @DHC_TWT_TEARDOWN: Send a TearDown TWT command
+ */
+enum iwl_dhc_twt_operation_type {
+ DHC_TWT_REQUEST,
+ DHC_TWT_SUGGEST,
+ DHC_TWT_DEMAND,
+ DHC_TWT_GROUPING,
+ DHC_TWT_ACCEPT,
+ DHC_TWT_ALTERNATE,
+ DHC_TWT_DICTATE,
+ DHC_TWT_REJECT,
+ DHC_TWT_TEARDOWN,
+}; /* DHC_TWT_OPERATION_TYPE_E */
+
+/**
+ * struct iwl_dhc_twt_operation - trigger a TWT operation
+ *
+ * @mac_id: the mac Id on which to trigger TWT operation
+ * @twt_operation: see &enum iwl_dhc_twt_operation_type
+ * @target_wake_time: when should we be on channel
+ * @interval_exp: the exponent for the interval
+ * @interval_mantissa: the mantissa for the interval
+ * @min_wake_duration: the minimum duration for the wake period
+ * @trigger: is the TWT triggered or not
+ * @flow_type: is the TWT announced or not
+ * @flow_id: the TWT flow identifier from 0 to 7
+ * @protection: is the TWT protected
+ * @ndo_paging_indicator: is ndo_paging_indicator set
+ * @responder_pm_mode: is responder_pm_mode set
+ * @negotiation_type: if the responder wants to doze outside the TWT SP
+ * @twt_request: 1 for TWT request, 0 otherwise
+ * @implicit: is TWT implicit
+ * @twt_group_assignment: the TWT group assignment
+ * @twt_channel: the TWT channel
+ * @reserved: reserved
+ */
+struct iwl_dhc_twt_operation {
+ __le32 mac_id;
+ __le32 twt_operation;
+ __le64 target_wake_time;
+ __le32 interval_exp;
+ __le32 interval_mantissa;
+ __le32 min_wake_duration;
+ u8 trigger;
+ u8 flow_type;
+ u8 flow_id;
+ u8 protection;
+ u8 ndo_paging_indicator;
+ u8 responder_pm_mode;
+ u8 negotiation_type;
+ u8 twt_request;
+ u8 implicit;
+ u8 twt_group_assignment;
+ u8 twt_channel;
+ u8 reserved;
+}; /* DHC_TWT_OPERATION_API_S */
+
+#endif /* __iwl_fw_api_dhc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b8dff139aa05..e1952fc6d149 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
- * Copyright (C) 2024 Intel Corporation
+ * Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@@ -1015,7 +1015,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
- * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
* @channel_num: AP Channel number
@@ -1063,7 +1063,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
* @min_time_between_msr: For non trigger based NDP ranging, the minimum time
* between measurements in units of milliseconds
*/
-struct iwl_tof_range_req_ap_entry_v10 {
+struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags;
u8 band;
u8 channel_num;
@@ -1134,7 +1134,7 @@ enum iwl_tof_initiator_flags {
IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20),
}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
-#define IWL_MVM_TOF_MAX_APS 5
+#define IWL_TOF_MAX_APS 5
#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
/**
@@ -1153,7 +1153,7 @@ enum iwl_tof_initiator_flags {
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @reserved0: reserved
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @range_req_bssid: ranging request BSSID
@@ -1183,7 +1183,7 @@ struct iwl_tof_range_req_cmd_v5 {
u8 ftm_tx_chains;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v2 ap[IWL_TOF_MAX_APS];
} __packed;
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
@@ -1192,7 +1192,7 @@ struct iwl_tof_range_req_cmd_v5 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1216,7 +1216,7 @@ struct iwl_tof_range_req_cmd_v7 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/**
@@ -1224,7 +1224,7 @@ struct iwl_tof_range_req_cmd_v7 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1248,7 +1248,7 @@ struct iwl_tof_range_req_cmd_v8 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
- struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
@@ -1256,7 +1256,7 @@ struct iwl_tof_range_req_cmd_v8 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1276,7 +1276,7 @@ struct iwl_tof_range_req_cmd_v9 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v6 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
/**
@@ -1284,7 +1284,7 @@ struct iwl_tof_range_req_cmd_v9 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1304,7 +1304,7 @@ struct iwl_tof_range_req_cmd_v11 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v7 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
/**
@@ -1312,7 +1312,7 @@ struct iwl_tof_range_req_cmd_v11 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1332,7 +1332,7 @@ struct iwl_tof_range_req_cmd_v12 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v8 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
/**
@@ -1340,7 +1340,7 @@ struct iwl_tof_range_req_cmd_v12 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1360,15 +1360,15 @@ struct iwl_tof_range_req_cmd_v13 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v9 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
/**
- * struct iwl_tof_range_req_cmd_v14 - start measurement cmd
+ * struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
- * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@@ -1377,9 +1377,9 @@ struct iwl_tof_range_req_cmd_v13 {
* This is the session time for completing the measurement.
* @tsf_mac_id: report the measurement start time for each ap in terms of the
* TSF of this mac id. 0xff to disable TSF reporting.
- * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry.
*/
-struct iwl_tof_range_req_cmd_v14 {
+struct iwl_tof_range_req_cmd {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@@ -1388,8 +1388,8 @@ struct iwl_tof_range_req_cmd_v14 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+ struct iwl_tof_range_req_ap_entry ap[IWL_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_15 */
/*
* enum iwl_tof_range_request_status - status of the sent request
@@ -1609,7 +1609,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -1645,7 +1645,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
* @tx_pn: the last PN used for this responder Tx in case PMF is configured in
* LE byte order.
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
+struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1695,7 +1695,7 @@ enum iwl_tof_response_status {
* @request_status: status of current measurement session, one of
* &enum iwl_tof_response_status.
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
- * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @num_of_aps: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy_v5 {
@@ -1703,7 +1703,7 @@ struct iwl_tof_range_rsp_ntfy_v5 {
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
/**
@@ -1719,7 +1719,7 @@ struct iwl_tof_range_rsp_ntfy_v6 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
/**
@@ -1735,23 +1735,23 @@ struct iwl_tof_range_rsp_ntfy_v7 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
/**
- * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
+ * struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
* @reserved: reserved
* @ap: per-AP data
*/
-struct iwl_tof_range_rsp_ntfy_v8 {
+struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 37bb7002c1c9..b511e3aa6bb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -46,7 +46,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
STA_CONFIG_CMD = 0xA,
/**
- * @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd
+ * @AUX_STA_CMD: &struct iwl_aux_sta_cmd
*/
AUX_STA_CMD = 0xB,
/**
@@ -62,6 +62,10 @@ enum iwl_mac_conf_subcmd_ids {
*/
ROC_CMD = 0xE,
/**
+ * @TWT_OPERATION_CMD: &struct iwl_twt_operation_cmd
+ */
+ TWT_OPERATION_CMD = 0x10,
+ /**
* @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
*/
MISSED_BEACONS_NOTIF = 0xF6,
@@ -641,7 +645,7 @@ struct iwl_sta_cfg_cmd {
} __packed; /* STA_CMD_API_S_VER_1 */
/**
- * struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration
+ * struct iwl_aux_sta_cmd - command for AUX STA configuration
* ( AUX_STA_CMD = 0xB )
*
* @sta_id: index of aux sta to configure
@@ -649,7 +653,7 @@ struct iwl_sta_cfg_cmd {
* @mac_addr: mac addr of the auxilary sta
* @reserved_for_mac_addr: reserved
*/
-struct iwl_mvm_aux_sta_cmd {
+struct iwl_aux_sta_cmd {
__le32 sta_id;
__le32 lmac_id;
u8 mac_addr[ETH_ALEN];
@@ -693,11 +697,11 @@ enum iwl_mvm_fw_esr_recommendation {
}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
/**
- * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
+ * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode
*
* @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
*/
-struct iwl_mvm_esr_mode_notif {
+struct iwl_esr_mode_notif {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
@@ -748,4 +752,83 @@ struct iwl_esr_trans_fail_notif {
__le32 err_code;
} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */
+/*
+ * enum iwl_twt_operation_type: TWT operation in a TWT action frame
+ *
+ * @TWT_OPERATION_REQUEST: TWT Request
+ * @TWT_OPERATION_SUGGEST: TWT Suggest
+ * @TWT_OPERATION_DEMAND: TWT Demand
+ * @TWT_OPERATION_GROUPING: TWT Grouping
+ * @TWT_OPERATION_ACCEPT: TWT Accept
+ * @TWT_OPERATION_ALTERNATE: TWT Alternate
+ * @TWT_OPERATION_DICTATE: TWT Dictate
+ * @TWT_OPERATION_REJECT: TWT Reject
+ * @TWT_OPERATION_TEARDOWN: TWT Teardown
+ * @TWT_OPERATION_UNAVAILABILITY: TWT Unavailability
+ */
+enum iwl_twt_operation_type {
+ TWT_OPERATION_REQUEST,
+ TWT_OPERATION_SUGGEST,
+ TWT_OPERATION_DEMAND,
+ TWT_OPERATION_GROUPING,
+ TWT_OPERATION_ACCEPT,
+ TWT_OPERATION_ALTERNATE,
+ TWT_OPERATION_DICTATE,
+ TWT_OPERATION_REJECT,
+ TWT_OPERATION_TEARDOWN,
+ TWT_OPERATION_UNAVAILABILITY,
+ TWT_OPERATION_MAX,
+}; /* TWT_OPERATION_TYPE_E_VER_1 */
+
+/**
+ * struct iwl_twt_operation_cmd - initiate a TWT session from driver
+ *
+ * @link_id: FW link id to initiate the TWT
+ * @twt_operation: &enum iwl_twt_operation_type
+ * @target_wake_time: TSF time to start the TWT
+ * @interval_exponent: the exponent for the interval
+ * @interval_mantissa: the mantissa for the interval
+ * @minimum_wake_duration: the minimum duration for the wake period
+ * @trigger: is the TWT triggered or not
+ * @flow_type: is the TWT announced (0) or not (1)
+ * @flow_id: the TWT flow identifier 0 - 7
+ * @twt_protection: is the TWT protected
+ * @ndp_paging_indicator: is ndp paging indicator set
+ * @responder_pm_mode: is responder pm mode set
+ * @negotiation_type: if the responder wants to doze outside the TWT SP
+ * @twt_request: 1 for TWT request (STA), 0 for TWT response (AP)
+ * @implicit: is TWT implicit
+ * @twt_group_assignment: the TWT group assignment
+ * @twt_channel: the TWT channel
+ * @restricted_info_present: is this a restricted TWT
+ * @dl_bitmap_valid: is DL (download) bitmap valid (restricted TWT)
+ * @ul_bitmap_valid: is UL (upload) bitmap valid (restricted TWT)
+ * @dl_tid_bitmap: DL TID bitmap (restricted TWT)
+ * @ul_tid_bitmap: UL TID bitmap (restricted TWT)
+ */
+struct iwl_twt_operation_cmd {
+ __le32 link_id;
+ __le32 twt_operation;
+ __le64 target_wake_time;
+ __le32 interval_exponent;
+ __le32 interval_mantissa;
+ __le32 minimum_wake_duration;
+ u8 trigger;
+ u8 flow_type;
+ u8 flow_id;
+ u8 twt_protection;
+ u8 ndp_paging_indicator;
+ u8 responder_pm_mode;
+ u8 negotiation_type;
+ u8 twt_request;
+ u8 implicit;
+ u8 twt_group_assignment;
+ u8 twt_channel;
+ u8 restricted_info_present;
+ u8 dl_bitmap_valid;
+ u8 ul_bitmap_valid;
+ u8 dl_tid_bitmap;
+ u8 ul_tid_bitmap;
+} __packed; /* TWT_OPERATION_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index c73d4d597857..f63b25b03b7e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_phy_h__
#define __iwl_fw_api_phy_h__
+#include <linux/types.h>
+#include <linux/bits.h>
/**
* enum iwl_phy_ops_subcmd_ids - PHY group commands
@@ -19,7 +21,7 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
/**
- * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
+ * @CTDP_CONFIG_CMD: &struct iwl_ctdp_cmd
*/
CTDP_CONFIG_CMD = 0x03,
@@ -55,7 +57,7 @@ enum iwl_phy_ops_subcmd_ids {
/**
* @DTS_MEASUREMENT_NOTIF_WIDE:
* &struct iwl_dts_measurement_notif_v1 or
- * &struct iwl_dts_measurement_notif_v2
+ * &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
@@ -152,13 +154,13 @@ struct iwl_dts_measurement_notif_v1 {
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
/**
- * struct iwl_dts_measurement_notif_v2 - measurements notification
+ * struct iwl_dts_measurement_notif - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
* @threshold_idx: the trip index that was crossed
*/
-struct iwl_dts_measurement_notif_v2 {
+struct iwl_dts_measurement_notif {
__le32 temp;
__le32 voltage;
__le32 threshold_idx;
@@ -195,25 +197,25 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
-* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
+* enum iwl_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
*/
-enum iwl_mvm_ctdp_cmd_operation {
+enum iwl_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
CTDP_CMD_OPERATION_STOP = 0x2,
CTDP_CMD_OPERATION_REPORT = 0x4,
};/* CTDP_CMD_OPERATION_TYPE_E */
/**
- * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
+ * struct iwl_ctdp_cmd - track and manage the FW power consumption budget
*
- * @operation: see &enum iwl_mvm_ctdp_cmd_operation
+ * @operation: see &enum iwl_ctdp_cmd_operation
* @budget: the budget in milliwatt
* @window_size: defined in API but not used
*/
-struct iwl_mvm_ctdp_cmd {
+struct iwl_ctdp_cmd {
__le32 operation;
__le32 budget;
__le32 window_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 1a60f0cdf972..c2f806cbab59 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -226,6 +226,58 @@ struct iwl_tlc_update_notif {
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+/**
+ * enum iwl_tlc_debug_types - debug options
+ */
+enum iwl_tlc_debug_types {
+ /**
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ */
+ IWL_TLC_DEBUG_FIXED_RATE,
+ /**
+ * @IWL_TLC_DEBUG_AGG_DURATION_LIM: time limit for a BA
+ * session, in usec
+ */
+ IWL_TLC_DEBUG_AGG_DURATION_LIM,
+ /**
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames
+ * in an aggregation
+ */
+ IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+ /**
+ * @IWL_TLC_DEBUG_TPC_ENABLED: enable or disable tpc
+ */
+ IWL_TLC_DEBUG_TPC_ENABLED,
+ /**
+ * @IWL_TLC_DEBUG_TPC_STATS: get number of frames Tx'ed in each
+ * tpc step
+ */
+ IWL_TLC_DEBUG_TPC_STATS,
+ /**
+ * @IWL_TLC_DEBUG_RTS_DISABLE: disable RTS (bool true/false).
+ */
+ IWL_TLC_DEBUG_RTS_DISABLE,
+ /**
+ * @IWL_TLC_DEBUG_PARTIAL_FIXED_RATE: set partial fixed rate to fw
+ */
+ IWL_TLC_DEBUG_PARTIAL_FIXED_RATE,
+}; /* TLC_MNG_DEBUG_TYPES_API_E */
+
+#define MAX_DATA_IN_DHC_TLC_CMD 10
+
+/**
+ * struct iwl_dhc_tlc_cmd - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @type: type id of %enum iwl_tlc_debug_types
+ * @data: data to send
+ */
+struct iwl_dhc_tlc_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ __le32 type;
+ __le32 data[MAX_DATA_IN_DHC_TLC_CMD];
+} __packed; /* TLC_MNG_DEBUG_CMD_S */
#define IWL_MAX_MCS_DISPLAY_SIZE 12
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d7f8a276b683..ecbcd5084cd8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -191,6 +191,7 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_IDX_INVALID (0xff)
#define STA_KEY_MAX_DATA_KEY_NUM (4)
#define IWL_MAX_GLOBAL_KEYS (4)
+#define IWL_MAX_NUM_IGTKS 2
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 18d030334a6a..f586379d66dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2022-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -351,7 +351,7 @@ enum iwl_roc_activity {
}; /* ROC_ACTIVITY_API_E_VER_1 */
/*
- * ROC command
+ * ROC command v5
*
* Command requests the firmware to remain on a channel for a certain duration.
*
@@ -366,7 +366,7 @@ enum iwl_roc_activity {
* @max_delay: max delay the ROC can start in TU
* @duration: remain on channel duration in TU
*/
-struct iwl_roc_req {
+struct iwl_roc_req_v5 {
__le32 action;
__le32 activity;
__le32 sta_id;
@@ -375,7 +375,41 @@ struct iwl_roc_req {
__le16 reserved;
__le32 max_delay;
__le32 duration;
-} __packed; /* ROC_CMD_API_S_VER_3 */
+} __packed; /* ROC_CMD_API_S_VER_5 */
+
+/*
+ * ROC command
+ *
+ * Command requests the firmware to remain on a channel for a certain duration.
+ *
+ * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE )
+ *
+ * @action: action to perform, see &enum iwl_ctxt_action
+ * @activity: type of activity, see &enum iwl_roc_activity
+ * @sta_id: station id, resumed during "Remain On Channel" activity.
+ * @channel_info: &struct iwl_fw_channel_info
+ * @node_addr: node MAC address for Rx filtering
+ * @reserved1: align to a dword
+ * @max_delay: max delay the ROC can start in TU
+ * @duration: remain on channel duration in TU
+ * @interval: interval between repetitions (when repetitions > 1).
+ * @repetitions: number of repetitions
+ * 0xFF: infinite repetitions. 0 or 1: single repetition.
+ * @reserved2: align to a dword
+ */
+struct iwl_roc_req {
+ __le32 action;
+ __le32 activity;
+ __le32 sta_id;
+ struct iwl_fw_channel_info channel_info;
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved1;
+ __le32 max_delay;
+ __le32 duration;
+ __le32 interval;
+ u8 repetitions;
+ u8 reserved2[3];
+} __packed; /* ROC_CMD_API_S_VER_6 */
/*
* ROC notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fb2ea38e89ac..03f639fbf9b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
}
/*
- * alloc_sgtable - allocates scallerlist table in the given size,
- * fills it with pages and returns it
+ * alloc_sgtable - allocates (chained) scatterlist in the given size,
+ * fills it with pages and returns it
* @size: the size (in bytes) of the table
-*/
-static struct scatterlist *alloc_sgtable(int size)
+ */
+static struct scatterlist *alloc_sgtable(ssize_t size)
{
- int alloc_size, nents, i;
- struct page *new_page;
- struct scatterlist *iter;
- struct scatterlist *table;
+ struct scatterlist *result = NULL, *prev;
+ int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE);
- table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
- if (!table)
- return NULL;
- sg_init_table(table, nents);
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = alloc_page(GFP_KERNEL);
- if (!new_page) {
- /* release all previous allocated pages in the table */
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = sg_page(iter);
- if (new_page)
- __free_page(new_page);
- }
- kfree(table);
+
+#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
+ /*
+ * We need an additional entry for table chaining,
+ * this ensures the loop can finish i.e. we can
+ * fit at least two entries per page (obviously,
+ * many more really fit.)
+ */
+ BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
+
+ while (nents > 0) {
+ struct scatterlist *new, *iter;
+ int n_fill, n_alloc;
+
+ if (nents <= N_ENTRIES_PER_PAGE) {
+ /* last needed table */
+ n_fill = nents;
+ n_alloc = nents;
+ nents = 0;
+ } else {
+ /* fill a page with entries */
+ n_alloc = N_ENTRIES_PER_PAGE;
+ /* reserve one for chaining */
+ n_fill = n_alloc - 1;
+ nents -= n_fill;
+ }
+
+ new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ if (result)
+ _devcd_free_sgtable(result);
return NULL;
}
- alloc_size = min_t(int, size, PAGE_SIZE);
- size -= PAGE_SIZE;
- sg_set_page(iter, new_page, alloc_size, 0);
+ sg_init_table(new, n_alloc);
+
+ if (!result)
+ result = new;
+ else
+ sg_chain(prev, n_prev, new);
+ prev = new;
+ n_prev = n_alloc;
+
+ for_each_sg(new, iter, n_fill, i) {
+ struct page *new_page = alloc_page(GFP_KERNEL);
+
+ if (!new_page) {
+ _devcd_free_sgtable(result);
+ return NULL;
+ }
+
+ sg_set_page(iter, new_page, PAGE_SIZE, 0);
+ }
}
- return table;
+
+ return result;
}
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
@@ -2588,29 +2618,28 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
};
-static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
- struct iwl_fwrt_dump_data *dump_data,
- struct list_head *list)
+enum iwl_dump_ini_region_selector {
+ IWL_INI_DUMP_ALL_REGIONS,
+ IWL_INI_DUMP_EARLY_REGIONS,
+ IWL_INI_DUMP_LATE_REGIONS,
+};
+
+static u32
+iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list,
+ enum iwl_fw_ini_time_point tp_id,
+ u64 regions_mask,
+ struct iwl_dump_ini_region_data *imr_reg_data,
+ enum iwl_dump_ini_region_selector which)
{
- struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
- enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
- struct iwl_dump_ini_region_data reg_data = {
- .dump_data = dump_data,
- };
- struct iwl_dump_ini_region_data imr_reg_data = {
- .dump_data = dump_data,
- };
- int i;
u32 size = 0;
- u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
- ~(fwrt->trans->dbg.unsupported_region_msk);
-
- BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
- BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
- ARRAY_SIZE(fwrt->trans->dbg.active_regions));
- for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
- u32 reg_type;
+ for (int i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
+ struct iwl_dump_ini_region_data reg_data = {
+ .dump_data = dump_data,
+ };
+ u32 reg_type, dp;
struct iwl_fw_ini_region_tlv *reg;
if (!(BIT_ULL(i) & regions_mask))
@@ -2628,6 +2657,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
+ dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK);
+
if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) &&
@@ -2637,6 +2668,20 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
+
+ switch (which) {
+ case IWL_INI_DUMP_ALL_REGIONS:
+ break;
+ case IWL_INI_DUMP_EARLY_REGIONS:
+ if (!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET))
+ continue;
+ break;
+ case IWL_INI_DUMP_LATE_REGIONS:
+ if (dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET)
+ continue;
+ break;
+ }
+
/*
* DRAM_IMR can be collected only for FW/HW error timepoint
* when fw is not alive. In addition, it must be collected
@@ -2646,7 +2691,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
- imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
+ imr_reg_data->reg_tlv =
+ fwrt->trans->dbg.active_regions[i];
else
IWL_INFO(fwrt,
"WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
@@ -2659,9 +2705,44 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
+
+ return size;
+}
+
+static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_dump_data *dump_data,
+ struct list_head *list)
+{
+ struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
+ enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
+ struct iwl_dump_ini_region_data imr_reg_data = {
+ .dump_data = dump_data,
+ };
+ u32 size = 0;
+ u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
+ ~(fwrt->trans->dbg.unsupported_region_msk);
+
+ BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
+ BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
+ ARRAY_SIZE(fwrt->trans->dbg.active_regions));
+
+ if (trigger->time_point &
+ cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) {
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_EARLY_REGIONS);
+ iwl_trans_pcie_fw_reset_handshake(fwrt->trans);
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_LATE_REGIONS);
+ } else {
+ size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
+ regions_mask, &imr_reg_data,
+ IWL_INI_DUMP_ALL_REGIONS);
+ }
/* collect DRAM_IMR region in the last */
if (imr_reg_data.reg_tlv)
- size += iwl_dump_ini_mem(fwrt, list, &reg_data,
+ size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
@@ -3042,9 +3123,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
-void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
- u32 timepoint,
- u32 timepoint_data)
+static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
+ u32 timepoint, u32 timepoint_data)
{
struct iwl_dbg_dump_complete_cmd hcmd_data;
struct iwl_host_cmd hcmd = {
@@ -3072,6 +3152,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
+
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@@ -3096,9 +3177,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
- iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ iwl_fw_error_ini_dump(fwrt, dump_data);
else
- iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
+ iwl_fw_error_dump(fwrt, dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
@@ -3115,7 +3196,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
-
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 87998374f459..35e30e5db462 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -324,9 +324,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
-void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
- u32 timepoint,
- u32 timepoint_data);
bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id);
void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h
new file mode 100644
index 000000000000..983acee5cd7d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2021, 2025 Intel Corporation
+ */
+#ifndef __iwl_fw_dhc_utils_h__
+#define __iwl_fw_dhc_utils_h__
+
+#include <linux/types.h>
+#include "fw/img.h"
+#include "api/commands.h"
+#include "api/dhc.h"
+
+/**
+ * iwl_dhc_resp_status - return status of DHC response
+ * @fw: firwmware image information
+ * @pkt: response packet, must not be %NULL
+ *
+ * Returns: the status value of the DHC command or (u32)-1 if the
+ * response was too short.
+ */
+static inline u32 iwl_dhc_resp_status(const struct iwl_fw *fw,
+ struct iwl_rx_packet *pkt)
+{
+ if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND, 1) >= 2) {
+ struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return (u32)-1;
+
+ return le32_to_cpu(resp->status);
+ } else {
+ struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return (u32)-1;
+
+ return le32_to_cpu(resp->status);
+ }
+}
+
+/**
+ * iwl_dhc_resp_data - return data pointer of DHC response
+ * @fw: firwmware image information
+ * @pkt: response packet, must not be %NULL
+ * @len: where to store the length
+ *
+ * Returns: The data pointer, or an ERR_PTR() if the data was
+ * not valid (too short).
+ */
+static inline void *iwl_dhc_resp_data(const struct iwl_fw *fw,
+ struct iwl_rx_packet *pkt,
+ unsigned int *len)
+{
+ if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND, 1) >= 2) {
+ struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return ERR_PTR(-EINVAL);
+
+ *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
+ return (void *)&resp->data;
+ } else {
+ struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
+
+ if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
+ return ERR_PTR(-EINVAL);
+
+ *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
+ return (void *)&resp->data;
+ }
+}
+
+#endif /* __iwl_fw_dhc_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index 8e0c85a1240d..c7b261c8ec96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -540,6 +540,9 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id)
} err_info = {};
int ret;
+ if (err_id)
+ *err_id = 0;
+
if (!base)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index ea435ee94312..6adcfa6e214a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -456,7 +456,25 @@ iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
}
IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
-static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
+{
+ for (int i = 0; i < *size; i++) {
+ if (list[i] == mcc)
+ return true;
+ }
+
+ /* Verify that there is room for another country
+ * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full.
+ */
+ if (*size >= IWL_WTAS_BLACK_LIST_MAX)
+ return false;
+
+ list[*size++] = mcc;
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
@@ -503,6 +521,7 @@ static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
{
@@ -553,6 +572,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
+ if (WARN_ONCE(cmd_ver > 12,
+ "Don't add newer versions to this function\n"))
+ return -EINVAL;
+
memset(cmd, 0, sizeof(*cmd));
*cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
@@ -674,3 +697,34 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
}
}
IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
+
+bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt)
+{
+ /* default behaviour is disabled */
+ u32 value = 0;
+ int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret);
+ return false;
+ }
+
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
+ IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n");
+ return true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM RFI got invalid value, value=%d\n", value);
+ }
+
+ return false;
+}
+IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index b355d7bef14c..53693314d505 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -167,6 +167,8 @@ enum iwl_dsm_values_rfi {
#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
DSM_VALUE_RFI_DDR_DISABLE)
+bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt);
+
enum iwl_dsm_masks_reg {
DSM_MASK_CHINA_22_REG = BIT(2)
};
@@ -190,6 +192,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
bool iwl_is_tas_approved(void);
+bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc);
struct iwl_tas_selection_data
iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev);
@@ -212,6 +215,7 @@ int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
struct iwl_lari_config_change_cmd *cmd,
size_t *cmd_size);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 048877fa7c71..a9e6bba2419e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -104,6 +104,7 @@ struct iwl_txf_iter_data {
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
* @uats_table: AP type table
+ * @uats_valid: is AP type table valid
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
* Only read the UEFI variables if locked.
@@ -181,6 +182,7 @@ struct iwl_fw_runtime {
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
+ bool uats_valid;
u8 uefi_tables_lock_status;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 434eed4130b9..386aadbce2a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#include "iwl-drv.h"
@@ -402,6 +402,9 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
memcpy(fwrt->uats_table.offset_map, uats_data->offset_map,
sizeof(fwrt->uats_table.offset_map));
+
+ fwrt->uats_valid = true;
+
return 0;
}
@@ -678,14 +681,16 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
struct uefi_cnv_var_eckv *data;
int ret = 0;
- data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
- "ECKV", sizeof(*data), NULL);
+ data = iwl_uefi_get_verified_variable_guid(fwrt->trans,
+ &IWL_EFI_WIFI_BT_GUID,
+ IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
if (IS_ERR(data))
return -EINVAL;
if (data->revision != IWL_UEFI_ECKV_REVISION) {
ret = -EINVAL;
- IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI ECKV revision:%d\n",
data->revision);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 0c8943a8bd01..eb3c05417da3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2024 Intel Corporation
+ * Copyright(c) 2021-2025 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -19,7 +19,7 @@
#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
-#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvCommonECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 2b6a80142aba..b9bd89bfdd74 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -451,11 +451,8 @@ struct iwl_cfg {
#define IWL_CFG_RF_ID_HR 0x7
#define IWL_CFG_RF_ID_HR1 0x4
-#define IWL_CFG_NO_160 0x1
-#define IWL_CFG_160 0x0
-
-#define IWL_CFG_NO_320 0x1
-#define IWL_CFG_320 0x0
+#define IWL_CFG_BW_NO_LIM (U16_MAX - 1)
+#define IWL_CFG_BW_ANY U16_MAX
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -467,7 +464,7 @@ struct iwl_cfg {
#define IWL_CFG_IS_JACKET 0x1
#define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
+#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9)
#define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
struct iwl_dev_info {
@@ -475,10 +472,10 @@ struct iwl_dev_info {
u16 subdevice;
u16 mac_type;
u16 rf_type;
+ u16 bw_limit;
u8 mac_step;
u8 rf_step;
u8 rf_id;
- u8 no_160;
u8 cores;
u8 cdb;
u8 jacket;
@@ -492,7 +489,7 @@ extern const unsigned int iwl_dev_info_table_size;
const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+ u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step);
extern const struct pci_device_id iwl_hw_card_ids[];
#endif
@@ -534,7 +531,6 @@ extern const char iwl9560_killer_1550i_name[];
extern const char iwl9560_killer_1550s_name[];
extern const char iwl_ax200_name[];
extern const char iwl_ax203_name[];
-extern const char iwl_ax204_name[];
extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
@@ -550,17 +546,13 @@ extern const char iwl_ax211_killer_1675i_name[];
extern const char iwl_ax411_killer_1690s_name[];
extern const char iwl_ax411_killer_1690i_name[];
extern const char iwl_ax211_name[];
-extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
-extern const char iwl_bz_name[];
extern const char iwl_fm_name[];
extern const char iwl_wh_name[];
+extern const char iwl_sp_name[];
extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
-extern const char iwl_sc_name[];
-extern const char iwl_sc2_name[];
-extern const char iwl_sc2f_name[];
extern const char iwl_dr_name[];
extern const char iwl_br_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
@@ -662,6 +654,12 @@ extern const struct iwl_cfg iwl_cfg_ma;
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
+#endif /* CONFIG_IWLMVM */
+
+#if IS_ENABLED(CONFIG_IWLMLD)
+extern const struct iwl_ht_params iwl_bz_ht_params;
+
+extern const struct iwl_ht_params iwl_bz_ht_params;
extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
@@ -671,6 +669,6 @@ extern const struct iwl_cfg iwl_cfg_sc2;
extern const struct iwl_cfg iwl_cfg_sc2f;
extern const struct iwl_cfg iwl_cfg_dr;
extern const struct iwl_cfg iwl_cfg_br;
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index cd25a1b9f2ff..20563a32a21a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2024 Intel Corporation
+ * Copyright (C) 2018, 2020-2025 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -80,10 +80,12 @@ enum iwl_prph_scratch_flags {
* enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags
* @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting
* @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode
+ * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock
*/
enum iwl_prph_scratch_ext_flags {
- IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
- IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+ IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4),
+ IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5),
+ IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 08d990ba8a79..ce787326aa69 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
switch (tp_id) {
case IWL_FW_INI_TIME_POINT_EARLY:
iwl_dbg_tlv_init_cfg(fwrt);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_update_drams(fwrt);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
iwl_dbg_tlv_apply_buffers(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
case IWL_FW_INI_TIME_POINT_PERIODIC:
iwl_dbg_tlv_set_periodic_trigs(fwrt);
@@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
default:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
- iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
break;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index bf52c2edaad1..ea32dc88584f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2021, 2024 Intel Corporation
+ * Copyright(c) 2018 - 2021, 2024-2025 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*****************************************************************************/
@@ -156,6 +156,7 @@ do { \
#define IWL_DL_FW 0x00010000
#define IWL_DL_RF_KILL 0x00020000
#define IWL_DL_TPT 0x00040000
+#define IWL_DL_PTP 0x00080000
/* 0x00F00000 - 0x00100000 */
#define IWL_DL_RATE 0x00100000
#define IWL_DL_CALIB 0x00200000
@@ -165,7 +166,7 @@ do { \
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
-#define IWL_DL_EXTERNAL 0x08000000
+#define IWL_DL_EHT 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
@@ -175,7 +176,6 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -216,5 +216,6 @@ do { \
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#define IWL_DEBUG_FW_INFO(p, f, a...) \
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
-
+#define IWL_DEBUG_PTP(p, f, a...) IWL_DEBUG(p, IWL_DL_PTP, f, ## a)
+#define IWL_DEBUG_EHT(p, f, a...) IWL_DEBUG(p, IWL_DL_EHT, f, ## a)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d3a65f33097c..d36837501e08 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -75,6 +75,9 @@ struct iwl_drv {
enum {
DVM_OP_MODE,
MVM_OP_MODE,
+#if IS_ENABLED(CONFIG_IWLMLD)
+ MLD_OP_MODE,
+#endif
};
/* Protects the table contents, i.e. the ops pointer & drv list */
@@ -86,6 +89,9 @@ static struct iwlwifi_opmode_table {
} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
[DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
[MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
+#if IS_ENABLED(CONFIG_IWLMLD)
+ [MLD_OP_MODE] = { .name = "iwlmld", .ops = NULL },
+#endif
};
#define IWL_DEFAULT_SCAN_CHANNELS 40
@@ -168,6 +174,11 @@ static inline char iwl_drv_get_step(int step)
return 'a' + step;
}
+static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans)
+{
+ return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM;
+}
+
const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
{
char mac_step, rf_step;
@@ -316,6 +327,7 @@ struct iwl_firmware_pieces {
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_mem_tlv;
+ u32 major;
};
static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
@@ -957,19 +969,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
case IWL_UCODE_TLV_FW_VERSION: {
const __le32 *ptr = (const void *)tlv_data;
- u32 major, minor;
+ u32 minor;
u8 local_comp;
if (tlv_len != sizeof(u32) * 3)
goto invalid_tlv_len;
- major = le32_to_cpup(ptr++);
+ pieces->major = le32_to_cpup(ptr++);
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
snprintf(drv->fw.fw_version,
sizeof(drv->fw.fw_version),
- "%u.%08x.%u %s", major, minor,
+ "%u.%08x.%u %s", pieces->major, minor,
local_comp, iwl_reduced_fw_name(drv));
break;
}
@@ -1181,7 +1193,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
- IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
+ IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
fseq_ver->version);
}
break;
@@ -1468,6 +1480,8 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
}
}
+#define IWL_MLD_SUPPORTED_FW_VERSION 97
+
/*
* iwl_req_fw_callback - callback when firmware was loaded
*
@@ -1720,6 +1734,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
}
+#if IS_ENABLED(CONFIG_IWLMLD)
+ if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION &&
+ iwl_drv_is_wifi7_supported(drv->trans))
+ op = &iwlwifi_opmode_table[MLD_OP_MODE];
+#else
+ if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION &&
+ iwl_drv_is_wifi7_supported(drv->trans)) {
+ IWL_ERR(drv,
+ "IWLMLD needs to be compiled to support this firmware\n");
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ goto out_unbind;
+ }
+#endif
+
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 9f7e013252fe..cd1b0048bb6d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -684,10 +684,13 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC,
+ .mac_cap_info[1] =
+ IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS,
.phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
@@ -913,11 +916,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
{
bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO));
- bool no_320;
-
- no_320 = (!trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
- trans->reduced_cap_sku;
+ bool slow_pcie = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB);
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -944,7 +944,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
break;
case NL80211_BAND_6GHZ:
- if (!no_320) {
+ if (!trans->reduced_cap_sku &&
+ trans->bw_limit >= 320) {
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |=
IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |=
@@ -986,6 +987,14 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10;
}
}
+
+ if (slow_pcie) {
+ struct ieee80211_eht_mcs_nss_supp *mcs_nss =
+ &iftype_data->eht_cap.eht_mcs_nss_supp;
+
+ mcs_nss->bw._320.rx_tx_mcs11_max_nss = 0;
+ mcs_nss->bw._320.rx_tx_mcs13_max_nss = 0;
+ }
} else {
struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp =
&iftype_data->he_cap.he_mcs_nss_supp;
@@ -1086,19 +1095,22 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
}
- if (trans->no_160)
+ if (trans->bw_limit < 160)
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- if (trans->reduced_cap_sku) {
+ if (trans->bw_limit < 320 || trans->reduced_cap_sku) {
memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
+
+ if (trans->reduced_cap_sku) {
iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
- iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 49c8507d1a6b..c1607b6d0759 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2025 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -97,7 +97,7 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk)
module_put(THIS_MODULE);
}
-#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */
+#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */
static enum iwl_reset_mode
iwl_trans_determine_restart_mode(struct iwl_trans *trans)
@@ -403,6 +403,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans)
iwl_trans_pcie_op_mode_leave(trans);
+ cancel_work_sync(&trans->restart.wk);
+
trans->op_mode = NULL;
trans->state = IWL_TRANS_NO_FW;
@@ -639,6 +641,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx);
void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs, bool is_flush)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return;
@@ -671,6 +676,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"bad state = %d\n", trans->state))
return -EIO;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index f6234065dbdd..25fb4c50e38b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -876,7 +876,7 @@ struct iwl_txq {
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
* @reduced_cap_sku: reduced capability supported SKU
- * @no_160: device not supporting 160 MHz
+ * @bw_limit: the max bandwidth
* @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
* @restart: restart worker data
* @restart.wk: restart worker
@@ -888,6 +888,7 @@ struct iwl_txq {
* @trans_specific: data for the specific transport this is allocated for/with
* @dsbr_urm_fw_dependent: switch to URM based on fw settings
* @dsbr_urm_permanent: switch to URM permanently
+ * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used
*/
struct iwl_trans {
bool csme_own;
@@ -910,11 +911,14 @@ struct iwl_trans {
char hw_id_str[52];
u32 sku_id[3];
bool reduced_cap_sku;
- u8 no_160:1, step_urm:1;
+ u16 bw_limit;
+ bool step_urm;
u8 dsbr_urm_fw_dependent:1,
dsbr_urm_permanent:1;
+ bool ext_32khz_clock_valid;
+
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
@@ -1261,6 +1265,7 @@ enum iwl_reset_mode {
};
void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode);
+void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
new file mode 100644
index 000000000000..ece66e7a9be4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+obj-$(CONFIG_IWLMLD) += iwlmld.o
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
+iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o
+iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o
+iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o
+iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmld-$(CONFIG_PM_SLEEP) += d3.o
+
+# non-upstream things
+iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o
+iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o
+
+subdir-ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
new file mode 100644
index 000000000000..db9e0f04f4b7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "agg.h"
+#include "sta.h"
+#include "hcmd.h"
+
+static void
+iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mld_baid_data *baid_data,
+ struct iwl_mld_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ struct iwl_mld_reorder_buf_entry *entries =
+ &baid_data->entries[reorder_buf->queue *
+ baid_data->entries_per_queue];
+ u16 ssn = reorder_buf->head_sn;
+
+ while (ieee80211_sn_less(ssn, nssn)) {
+ int index = ssn % baid_data->buf_size;
+ struct sk_buff_head *skb_list = &entries[index].frames;
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /* Empty the list. Will have more than one frame for A-MSDU.
+ * Empty list is valid as well since nssn indicates frames were
+ * received.
+ */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+}
+
+static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ u8 baid, u16 nssn, int queue)
+{
+ struct iwl_mld_reorder_buffer *reorder_buf;
+ struct iwl_mld_baid_data *ba_data;
+ struct ieee80211_link_sta *link_sta;
+ u32 sta_id;
+
+ IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
+ baid, nssn);
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!ba_data) {
+ IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+ goto out_unlock;
+ }
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+ goto out_unlock;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
+ reorder_buf, nssn);
+out_unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_frame_release *release = (void *)pkt->data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %u (expected %zu)\n",
+ pkt_len, sizeof(*release)))
+ return;
+
+ iwl_mld_release_frames_from_notif(mld, napi, release->baid,
+ le16_to_cpu(release->nssn),
+ queue);
+}
+
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt,
+ int queue)
+{
+ struct iwl_bar_frame_release *release = (void *)pkt->data;
+ struct iwl_mld_baid_data *baid_data;
+ unsigned int baid, nssn, sta_id, tid;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
+ "Unexpected frame release notif size %u (expected %zu)\n",
+ pkt_len, sizeof(*release)))
+ return;
+
+ baid = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_BAID_MASK);
+ nssn = le32_get_bits(release->ba_info,
+ IWL_BAR_FRAME_RELEASE_NSSN_MASK);
+ sta_id = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_STA_MASK);
+ tid = le32_get_bits(release->sta_tid,
+ IWL_BAR_FRAME_RELEASE_TID_MASK);
+
+ if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+ "BAR release: invalid BAID (%x)\n", baid))
+ return;
+
+ rcu_read_lock();
+ baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!IWL_FW_CHECK(mld, !baid_data,
+ "Got valid BAID %d but not allocated, invalid BAR release!\n",
+ baid))
+ goto out_unlock;
+
+ if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
+ sta_id > mld->fw->ucode_capa.num_stations ||
+ !(baid_data->sta_mask & BIT(sta_id)),
+ "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid, sta_id,
+ tid))
+ goto out_unlock;
+
+ IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
+ nssn);
+
+ iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
+out_unlock:
+ rcu_read_unlock();
+}
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+ struct iwl_mld_delba_data *data)
+{
+ struct iwl_mld_baid_data *ba_data;
+ struct iwl_mld_reorder_buffer *reorder_buf;
+ struct ieee80211_link_sta *link_sta;
+ u8 baid = data->baid;
+ u32 sta_id;
+
+ if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out_unlock;
+
+ /* pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
+ goto out_unlock;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
+ ba_data, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ ba_data->buf_size));
+out_unlock:
+ rcu_read_unlock();
+}
+
+/* Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+ int queue, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct iwl_mld_baid_data *baid_data;
+ struct iwl_mld_reorder_buffer *buffer;
+ struct iwl_mld_reorder_buf_entry *entries;
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu, last_subframe, is_old_sn, is_dup;
+ u8 tid = ieee80211_get_tid(hdr);
+ u8 baid;
+ u16 nssn, sn;
+ u32 sta_mask = 0;
+ int index;
+ u8 link_id;
+
+ baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
+
+ /* This also covers the case of receiving a Block Ack Request
+ * outside a BA session; we'll pass it to mac80211 and that
+ * then sends a delBA action frame.
+ * This also covers pure monitor mode, in which case we won't
+ * have any BA sessions.
+ */
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return IWL_MLD_PASS_SKB;
+
+ /* no sta yet */
+ if (WARN_ONCE(!sta,
+ "Got valid BAID without a valid station assigned\n"))
+ return IWL_MLD_PASS_SKB;
+
+ /* not a data packet */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return IWL_MLD_PASS_SKB;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return IWL_MLD_PASS_SKB;
+
+ baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!baid_data) {
+ IWL_DEBUG_HT(mld,
+ "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
+ baid, reorder);
+ return IWL_MLD_PASS_SKB;
+ }
+
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ sta_mask |= BIT(mld_link_sta->fw_id);
+
+ /* verify the BAID is correctly mapped to the sta and tid */
+ if (IWL_FW_CHECK(mld,
+ tid != baid_data->tid ||
+ !(sta_mask & baid_data->sta_mask),
+ "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
+ baid, baid_data->sta_mask, baid_data->tid,
+ sta_mask, tid))
+ return IWL_MLD_PASS_SKB;
+
+ buffer = &baid_data->reorder_buf[queue];
+ entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+ is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+ if (!buffer->valid && is_old_sn)
+ return IWL_MLD_PASS_SKB;
+
+ buffer->valid = true;
+
+ is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
+
+ /* drop any duplicated or outdated packets */
+ if (is_dup || is_old_sn)
+ return IWL_MLD_DROP_SKB;
+
+ sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
+ nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
+ amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = nssn;
+ return IWL_MLD_PASS_SKB;
+ }
+
+ /* release immediately if there are no stored frames, and the sn is
+ * equal to the head.
+ * This can happen due to reorder timer, where NSSN is behind head_sn.
+ * When we released everything, and we got the next frame in the
+ * sequence, according to the NSSN we can't release immediately,
+ * while technically there is no hole and we can move forward.
+ */
+ if (!buffer->num_stored && sn == buffer->head_sn) {
+ if (!amsdu || last_subframe)
+ buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ return IWL_MLD_PASS_SKB;
+ }
+
+ /* put in reorder buffer */
+ index = sn % baid_data->buf_size;
+ __skb_queue_tail(&entries[index].frames, skb);
+ buffer->num_stored++;
+
+ /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
+ * reason is that NSSN advances on the first sub-frame, and may cause
+ * the reorder buffer to advance before all the sub-frames arrive.
+ *
+ * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+ * SN 1. NSSN for first sub frame will be 3 with the result of driver
+ * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+ * already ahead and it will be dropped.
+ * If the last sub-frame is not on this queue - we will get frame
+ * release notification with up to date NSSN.
+ */
+ if (!amsdu || last_subframe)
+ iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
+ buffer, nssn);
+
+ return IWL_MLD_BUFFERED_SKB;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
+
+static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
+{
+ struct iwl_mld_baid_data *data =
+ from_timer(data, t, session_timer);
+ struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+ struct iwl_mld_baid_data *ba_data;
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+ unsigned long timeout;
+ unsigned int sta_id;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (WARN_ON(!ba_data->timeout))
+ goto unlock;
+
+ timeout = ba_data->last_rx_timestamp +
+ TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* timer expired, pick any STA ID to find the pointer */
+ sta_id = ffs(ba_data->sta_mask) - 1;
+ link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
+
+ /* sta should be valid unless the following happens:
+ * The firmware asserts which triggers a reconfig flow, but
+ * the reconfig fails before we set the pointer to sta into
+ * the fw_id_to_link_sta pointer table. mac80211 can't stop
+ * A-MPDU and hence the timer continues to run. Then, the
+ * timer expires and sta is NULL.
+ */
+ if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
+ goto unlock;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
+ ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
+static int
+iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
+ .remove.sta_id_mask =
+ cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+ .remove.tid = cpu_to_le32(tid),
+
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ RX_BAID_ALLOCATION_CONFIG_CMD),
+ &cmd);
+ if (ret)
+ return ret;
+
+ IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
+ .alloc.sta_id_mask =
+ cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
+ .alloc.tid = tid,
+ .alloc.ssn = cpu_to_le16(ssn),
+ .alloc.win_size = cpu_to_le16(buf_size),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_WANT_SKB,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
+ struct iwl_rx_baid_cfg_resp *resp;
+ struct iwl_rx_packet *pkt;
+ u32 resp_len;
+ int ret, baid;
+
+ BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ return ret;
+
+ pkt = hcmd.resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+ "BAID_ALLOC_CMD: unexpected response length %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
+
+ resp = (void *)pkt->data;
+ baid = le32_to_cpu(resp->baid);
+
+ if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
+ "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = baid;
+out:
+ iwl_free_resp(&hcmd);
+ return ret;
+}
+
+static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
+ struct iwl_mld_baid_data *data,
+ u16 ssn)
+{
+ for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ struct iwl_mld_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mld_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ reorder_buf->head_sn = ssn;
+ reorder_buf->queue = i;
+
+ for (int j = 0; j < data->buf_size; j++)
+ __skb_queue_head_init(&entries[j].frames);
+ }
+}
+
+static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
+ struct iwl_mld_baid_data *data)
+{
+ struct iwl_mld_delba_data delba_data = {
+ .baid = data->baid,
+ };
+
+ iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
+ &delba_data, sizeof(delba_data));
+
+ for (int i = 0; i < mld->trans->num_rx_queues; i++) {
+ struct iwl_mld_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ struct iwl_mld_reorder_buf_entry *entries =
+ &data->entries[i * data->entries_per_queue];
+
+ if (likely(!reorder_buf->num_stored))
+ continue;
+
+ /* This shouldn't happen in regular DELBA since the RX queues
+ * sync internal DELBA notification should trigger a release
+ * of all frames in the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (int j = 0; j < data->buf_size; j++)
+ __skb_queue_purge(&entries[j].frames);
+ }
+}
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size, u16 timeout)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_baid_data *baid_data = NULL;
+ u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+ int ret, baid;
+ u32 sta_mask;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
+ IWL_DEBUG_HT(mld,
+ "Max num of RX BA sessions reached; blocking new session\n");
+ return -ENOSPC;
+ }
+
+ sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ /* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+ /* The division below will be OK if either the cache line size
+ * can be divided by the entry size (ALIGN will round up) or if
+ * the entry size can be divided by the cache line size, in which
+ * case the ALIGN() will do nothing.
+ */
+ BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+ sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+ /* Upward align the reorder buffer size to fill an entire cache
+ * line for each queue, to avoid sharing cache lines between
+ * different queues.
+ */
+ reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+ /* Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mld->trans->num_rx_queues * reorder_buf_size,
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+
+ /* This division is why we need the above BUILD_BUG_ON(),
+ * if that doesn't hold then this will not be right.
+ */
+ baid_data->entries_per_queue =
+ reorder_buf_size / sizeof(baid_data->entries[0]);
+
+ baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
+ if (baid < 0) {
+ ret = baid;
+ goto out_free;
+ }
+
+ mld->num_rx_ba_sessions++;
+ mld_sta->tid_to_baid[tid] = baid;
+
+ baid_data->baid = baid;
+ baid_data->mld = mld;
+ baid_data->tid = tid;
+ baid_data->buf_size = buf_size;
+ baid_data->sta_mask = sta_mask;
+ baid_data->timeout = timeout;
+ baid_data->last_rx_timestamp = jiffies;
+ baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
+
+ iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
+
+ timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
+ 0);
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
+ baid_data->sta_mask, tid, baid);
+
+ /* protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
+ rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+ return 0;
+
+out_free:
+ kfree(baid_data);
+ return ret;
+}
+
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ int baid = mld_sta->tid_to_baid[tid];
+ struct iwl_mld_baid_data *baid_data;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* during firmware restart, do not send the command as the firmware no
+ * longer recognizes the session. instead, only clear the driver BA
+ * session data.
+ */
+ if (!mld->fw_status.in_hw_restart) {
+ ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
+ if (ret)
+ return ret;
+ }
+
+ if (!WARN_ON(mld->num_rx_ba_sessions == 0))
+ mld->num_rx_ba_sessions--;
+
+ baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ if (timer_pending(&baid_data->session_timer))
+ timer_shutdown_sync(&baid_data->session_timer);
+
+ iwl_mld_free_reorder_buffer(mld, baid_data);
+
+ RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
+
+ IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
+
+ return 0;
+}
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_rx_baid_cfg_cmd cmd = {
+ .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
+ .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
+ .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
+ };
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ int baid;
+
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (mld->fw_status.in_hw_restart)
+ return 0;
+
+ BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
+
+ for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
+ struct iwl_mld_baid_data *data;
+ int ret;
+
+ data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
+ if (!data)
+ continue;
+
+ if (!(data->sta_mask & old_sta_mask))
+ continue;
+
+ WARN_ONCE(data->sta_mask != old_sta_mask,
+ "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
+ baid, old_sta_mask, data->sta_mask);
+
+ cmd.modify.tid = cpu_to_le32(data->tid);
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ return ret;
+ data->sta_mask = new_sta_mask;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.h b/drivers/net/wireless/intel/iwlwifi/mld/agg.h
new file mode 100644
index 000000000000..651c80d1c7cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_agg_h__
+#define __iwl_agg_h__
+
+#include "mld.h"
+#include "fw/api/rx.h"
+
+/**
+ * struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sequence number
+ * @num_stored: number of MPDUs stored in the buffer
+ * @queue: queue of this reorder buffer
+ * @valid: true if reordering is valid for this queue
+ */
+struct iwl_mld_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ int queue;
+ bool valid;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored. a list is necessary because in an A-MSDU,
+ * all sub-frames share the same sequence number, so they are stored
+ * together in the same list.
+ */
+struct iwl_mld_reorder_buf_entry {
+ struct sk_buff_head frames;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
+#endif
+;
+
+/**
+ * struct iwl_mld_baid_data - Block Ack session data
+ * @rcu_head: RCU head for freeing this data
+ * @sta_mask: station mask for the BAID
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @buf_size: the reorder buffer size as set by the last ADDBA request
+ * @entries_per_queue: number of buffers per queue, this actually gets
+ * aligned up to avoid cache line sharing between queues
+ * @timeout: the timeout value specified in the ADDBA request.
+ * @last_rx_timestamp: timestamp of the last received packet (in jiffies). This
+ * value is updated only when the configured @timeout has passed since
+ * the last update to minimize cache bouncing between RX queues.
+ * @session_timer: timer is set to expire after 2 * @timeout (since we want
+ * to minimize the cache bouncing by updating @last_rx_timestamp only once
+ * after @timeout has passed). If no packets are received within this
+ * period, it informs mac80211 to initiate delBA flow, terminating the
+ * BA session.
+ * @rcu_ptr: BA data RCU protected access
+ * @mld: mld pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @entries: data
+ */
+struct iwl_mld_baid_data {
+ struct rcu_head rcu_head;
+ u32 sta_mask;
+ u8 tid;
+ u8 baid;
+ u16 buf_size;
+ u16 entries_per_queue;
+ u16 timeout;
+ struct timer_list session_timer;
+ unsigned long last_rx_timestamp;
+ struct iwl_mld_baid_data __rcu **rcu_ptr;
+ struct iwl_mld *mld;
+ struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+ struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
+};
+
+/**
+ * struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA
+ *
+ * @baid: Block Ack id, used to identify the BA session to be removed
+ */
+struct iwl_mld_delba_data {
+ u32 baid;
+} __packed;
+
+/**
+ * enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder()
+ * indicating how the caller should handle the skb based on the result.
+ *
+ * @IWL_MLD_PASS_SKB: skb should be passed to upper layer.
+ * @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer.
+ * @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller.
+ */
+enum iwl_mld_reorder_result {
+ IWL_MLD_PASS_SKB,
+ IWL_MLD_BUFFERED_SKB,
+ IWL_MLD_DROP_SKB
+};
+
+int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid, u16 ssn, u16 buf_size, u16 timeout);
+int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ int tid);
+
+enum iwl_mld_reorder_result
+iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
+ int queue, struct ieee80211_sta *sta,
+ struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc);
+
+void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt,
+ int queue);
+
+void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
+ struct iwl_mld_delba_data *data);
+
+int iwl_mld_update_sta_baids(struct iwl_mld *mld,
+ u32 old_sta_mask,
+ u32 new_sta_mask);
+
+#endif /* __iwl_agg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
new file mode 100644
index 000000000000..571eabd0b511
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include <net/mac80211.h>
+
+#include "ap.h"
+#include "hcmd.h"
+#include "tx.h"
+#include "power.h"
+#include "key.h"
+#include "iwl-utils.h"
+
+#include "fw/api/sta.h"
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+ u8 *beacon, u32 frame_size)
+{
+ u32 tim_idx;
+ struct ieee80211_mgmt *mgmt = (void *)beacon;
+
+ /* The index is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && beacon[tim_idx] == WLAN_EID_TIM)
+ *tim_index = cpu_to_le32(tim_idx);
+ else
+ IWL_WARN(mld, "Unable to find TIM Element in beacon\n");
+}
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ enum nl80211_band band)
+{
+ u32 legacy = link->beacon_tx_rate.control[band].legacy;
+ u32 rate_idx, rate_flags = 0, fw_rate;
+
+ /* if beacon rate was configured try using it */
+ if (hweight32(legacy) == 1) {
+ u32 rate = ffs(legacy) - 1;
+ struct ieee80211_supported_band *sband =
+ mld->hw->wiphy->bands[band];
+
+ rate_idx = sband->bitrates[rate].hw_value;
+ } else {
+ rate_idx = iwl_mld_get_lowest_rate(mld, info, vif);
+ }
+
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ rate_flags = IWL_MAC_BEACON_CCK;
+
+ /* Legacy rates are indexed as follows:
+ * 0 - 3 for CCK and 0 - 7 for OFDM.
+ */
+ fw_rate = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+ return fw_rate | rate_flags;
+}
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = BEACON_TEMPLATE_CMD,
+ };
+
+ hcmd.len[0] = sizeof(*cmd);
+ hcmd.data[0] = cmd;
+
+ hcmd.len[1] = beacon->len;
+ hcmd.data[1] = beacon->data;
+ hcmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+ return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int iwl_mld_fill_beacon_template_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+ struct ieee80211_chanctx_conf *ctx;
+ bool enable_fils;
+ u16 flags = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ cmd->link_id = cpu_to_le32(mld_link->fw_id);
+
+ ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ if (WARN_ON(!ctx || !ctx->def.chan))
+ return -EINVAL;
+
+ enable_fils = cfg80211_channel_is_psc(ctx->def.chan) ||
+ (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+
+ if (enable_fils) {
+ flags |= IWL_MAC_BEACON_FILS;
+ cmd->short_ssid = cpu_to_le32(~crc32_le(~0, vif->cfg.ssid,
+ vif->cfg.ssid_len));
+ }
+
+ cmd->byte_cnt = cpu_to_le16((u16)beacon->len);
+
+ flags |= iwl_mld_get_rate_flags(mld, info, vif, link,
+ ctx->def.chan->band);
+
+ cmd->flags = cpu_to_le16(flags);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ iwl_mld_set_tim_idx(mld, &cmd->tim_idx,
+ beacon->data, beacon->len);
+
+ cmd->btwt_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+ }
+
+ cmd->csa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_CHANNEL_SWITCH,
+ beacon->len));
+ cmd->ecsa_offset =
+ cpu_to_le32(iwl_find_ie_offset(beacon->data,
+ WLAN_EID_EXT_CHANSWITCH_ANN,
+ beacon->len));
+
+ return 0;
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mac_beacon_cmd cmd = {};
+ struct sk_buff *beacon;
+ int ret;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+#endif
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC);
+
+ if (IWL_MLD_NON_TRANSMITTING_AP)
+ return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mld_vif->beacon_inject_active) {
+ IWL_DEBUG_INFO(mld,
+ "Can't update template, beacon injection's active\n");
+ return -EBUSY;
+ }
+
+#endif
+ beacon = ieee80211_beacon_get_template(mld->hw, vif, NULL,
+ link_conf->link_id);
+ if (!beacon)
+ return -ENOMEM;
+
+ ret = iwl_mld_fill_beacon_template_cmd(mld, vif, beacon, &cmd,
+ link_conf);
+
+ if (!ret)
+ ret = iwl_mld_send_beacon_template_cmd(mld, beacon, &cmd);
+
+ dev_kfree_skb(beacon);
+
+ return ret;
+}
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif)
+{
+ struct iwl_mld_link *link;
+
+ if (WARN_ON(key->link_id < 0))
+ return;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+ if (link->ap_early_keys[i] != key)
+ continue;
+ /* Those weren't sent to FW, so should be marked as INVALID */
+ if (WARN_ON(key->hw_key_idx != STA_KEY_IDX_INVALID))
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ link->ap_early_keys[i] = NULL;
+ }
+}
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif)
+{
+ struct iwl_mld_link *link;
+
+ if (WARN_ON(key->link_id < 0))
+ return -EINVAL;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) {
+ if (!link->ap_early_keys[i]) {
+ link->ap_early_keys[i] = key;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+static int iwl_mld_send_ap_early_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret = 0;
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ for (int i = 0; i < ARRAY_SIZE(mld_link->ap_early_keys); i++) {
+ struct ieee80211_key_conf *key = mld_link->ap_early_keys[i];
+
+ if (!key)
+ continue;
+
+ mld_link->ap_early_keys[i] = NULL;
+
+ ret = iwl_mld_add_key(mld, vif, NULL, key);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+ ret = iwl_mld_update_beacon_template(mld, vif, link);
+ if (ret)
+ return ret;
+
+ /* the link should be already activated when assigning chan context,
+ * and LINK_CONTEXT_MODIFY_EHT_PARAMS is deprecated
+ */
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_ALL &
+ ~(LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_EHT_PARAMS));
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_add_mcast_sta(mld, vif, link);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_add_bcast_sta(mld, vif, link);
+ if (ret)
+ goto rm_mcast;
+
+ /* Those keys were configured by the upper layers before starting the
+ * AP. Now that it is started and the bcast and mcast sta were added to
+ * the FW, we can add the keys too.
+ */
+ ret = iwl_mld_send_ap_early_keys(mld, vif, link);
+ if (ret)
+ goto rm_bcast;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ iwl_mld_vif_update_low_latency(mld, vif, true,
+ LOW_LATENCY_VIF_TYPE);
+
+ mld_vif->ap_ibss_active = true;
+
+ if (vif->p2p && mld->p2p_device_vif)
+ return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+ FW_CTXT_ACTION_MODIFY);
+
+ return 0;
+rm_bcast:
+ iwl_mld_remove_bcast_sta(mld, vif, link);
+rm_mcast:
+ iwl_mld_remove_mcast_sta(mld, vif, link);
+ return ret;
+}
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ mld_vif->ap_ibss_active = false;
+
+ if (vif->p2p && mld->p2p_device_vif)
+ iwl_mld_mac_fw_action(mld, mld->p2p_device_vif,
+ FW_CTXT_ACTION_MODIFY);
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ iwl_mld_vif_update_low_latency(mld, vif, false,
+ LOW_LATENCY_VIF_TYPE);
+
+ iwl_mld_remove_bcast_sta(mld, vif, link);
+
+ iwl_mld_remove_mcast_sta(mld, vif, link);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.h b/drivers/net/wireless/intel/iwlwifi/mld/ap.h
new file mode 100644
index 000000000000..4a6f52b9552d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_ap_h__
+#define __iwl_ap_h__
+
+#include "mld.h"
+#include "iface.h"
+
+#include "fw/api/tx.h"
+
+int iwl_mld_update_beacon_template(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_store_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif);
+
+void iwl_mld_free_ap_early_key(struct iwl_mld *mld,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_vif *mld_vif);
+
+u8 iwl_mld_get_rate_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ enum nl80211_band band);
+
+void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index,
+ u8 *beacon, u32 frame_size);
+
+int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld,
+ struct sk_buff *beacon,
+ struct iwl_mac_beacon_cmd *cmd);
+
+#endif /* __iwl_ap_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
new file mode 100644
index 000000000000..5f262bd43f21
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "fw/api/coex.h"
+
+#include "coex.h"
+#include "mld.h"
+#include "hcmd.h"
+#include "mlo.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld)
+{
+ struct iwl_bt_coex_cmd cmd = {
+ .mode = cpu_to_le32(BT_COEX_NW),
+ .enabled_modules = cpu_to_le32(BT_COEX_MPLUT_ENABLED |
+ BT_COEX_HIGH_BAND_RET),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, BT_CONFIG, &cmd);
+}
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ const struct iwl_bt_coex_profile_notif zero_notif = {};
+ /* zeroed structure means that BT is OFF */
+ bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif));
+
+ if (bt_is_active == mld->bt_is_active)
+ return;
+
+ IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF");
+
+ mld->bt_is_active = bt_is_active;
+
+ iwl_mld_emlsr_check_bt(mld);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.h b/drivers/net/wireless/intel/iwlwifi/mld/coex.h
new file mode 100644
index 000000000000..a77c5dc9613c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_coex_h__
+#define __iwl_mld_coex_h__
+
+#include "mld.h"
+
+int iwl_mld_send_bt_init_conf(struct iwl_mld *mld);
+
+void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/constants.h b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
new file mode 100644
index 000000000000..2a59b29b75cb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/constants.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_constants_h__
+#define __iwl_mld_constants_h__
+
+#define IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD 4
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD 8
+#define IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG 19
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH 15
+#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
+#define IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH -72
+
+#define IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
+#define IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
+
+#define IWL_MLD_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MLD_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+
+#define IWL_MLD_PS_SNOOZE_INTERVAL 25
+#define IWL_MLD_PS_SNOOZE_INTERVAL 25
+#define IWL_MLD_PS_SNOOZE_WINDOW 50
+
+#define IWL_MLD_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30
+#define IWL_MLD_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20
+
+#define IWL_MLD_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MLD_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MLD_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20
+
+#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)
+
+/* OMI reduced BW thresholds (channel load percentage) */
+#define IWL_MLD_OMI_ENTER_CHAN_LOAD 10
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160 20
+#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320 30
+/* time (in milliseconds) to let AP "settle" the OMI */
+#define IWL_MLD_OMI_AP_SETTLE_DELAY 27
+/* time (in milliseconds) to not enter OMI reduced BW after leaving */
+#define IWL_MLD_OMI_EXIT_PROTECTION 5000
+
+#define IWL_MLD_DIS_RANDOM_FW_ID false
+#define IWL_MLD_D3_DEBUG false
+#define IWL_MLD_NON_TRANSMITTING_AP false
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MLD_CONN_LISTEN_INTERVAL 10
+#define IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE 0
+#define IWL_MLD_AUTO_EML_ENABLE true
+
+#define IWL_MLD_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MLD_LOW_RSSI_THRESH_20MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MLD_LOW_RSSI_THRESH_40MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MLD_LOW_RSSI_THRESH_80MHZ -72
+#define IWL_MLD_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MLD_LOW_RSSI_THRESH_160MHZ -72
+
+#define IWL_MLD_ENTER_EMLSR_TPT_THRESH 400
+#define IWL_MLD_EXIT_EMLSR_CHAN_LOAD 2 /* in percentage */
+
+#define IWL_MLD_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
+#define IWL_MLD_FTM_INITIATOR_DYNACK true
+#define IWL_MLD_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MLD_FTM_TEST_INCORRECT_SAC false
+#define IWL_MLD_FTM_R2I_MAX_REP 7
+#define IWL_MLD_FTM_I2R_MAX_REP 7
+#define IWL_MLD_FTM_R2I_MAX_STS 1
+#define IWL_MLD_FTM_I2R_MAX_STS 1
+#define IWL_MLD_FTM_R2I_MAX_TOTAL_LTF 3
+#define IWL_MLD_FTM_I2R_MAX_TOTAL_LTF 3
+#define IWL_MLD_FTM_RESP_NDP_SUPPORT true
+#define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT true
+#define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7
+#define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
+
+#endif /* __iwl_mld_constants_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
new file mode 100644
index 000000000000..5a7207accd86
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -0,0 +1,1998 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+
+#include "d3.h"
+#include "power.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mcc.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/d3.h"
+#include "fw/api/offload.h"
+#include "fw/api/sta.h"
+#include "fw/dbg.h"
+
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+
+/**
+ * enum iwl_mld_d3_notif - d3 notifications
+ * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF is expected/was received
+ * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF is expected/was received
+ * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF is expected/was received
+ */
+enum iwl_mld_d3_notif {
+ IWL_D3_NOTIF_WOWLAN_INFO = BIT(0),
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1),
+ IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2),
+ IWL_D3_ND_MATCH_INFO = BIT(3),
+ IWL_D3_NOTIF_D3_END_NOTIF = BIT(4)
+};
+
+struct iwl_mld_resume_key_iter_data {
+ struct iwl_mld *mld;
+ struct iwl_mld_wowlan_status *wowlan_status;
+ u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
+ bool unhandled_cipher;
+};
+
+struct iwl_mld_suspend_key_iter_data {
+ struct iwl_wowlan_rsc_tsc_params_cmd *rsc;
+ bool have_rsc;
+ int gtks;
+ int found_gtk_idx[4];
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+};
+
+struct iwl_mld_mcast_key_data {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ u8 id;
+ union {
+ struct {
+ struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+ struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+ } gtk;
+ struct {
+ struct ieee80211_key_seq cmac_gmac_seq;
+ } igtk_bigtk;
+ };
+
+};
+
+/**
+ * struct iwl_mld_wowlan_status - contains wowlan status data from
+ * all wowlan notifications
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns on packets
+ * @last_qos_seq: QoS sequence counter of offloaded tid
+ * @num_of_gtk_rekeys: number of GTK rekeys during D3
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ * while in wowlan
+ * @wake_packet: wakeup packet received
+ * @wake_packet_length: wake packet length
+ * @wake_packet_bufsize: wake packet bufsize
+ * @gtk: data of the last two used gtk's by the FW upon resume
+ * @igtk: data of the last used igtk by the FW upon resume
+ * @bigtk: data of the last two used gtk's by the FW upon resume
+ * @ptk: last seq numbers per tid passed by the FW,
+ * holds both in tkip and aes formats
+ */
+struct iwl_mld_wowlan_status {
+ u32 wakeup_reasons;
+ u64 replay_ctr;
+ u16 pattern_number;
+ u16 last_qos_seq;
+ u32 num_of_gtk_rekeys;
+ u8 tid_offloaded_tx;
+ u8 *wake_packet;
+ u32 wake_packet_length;
+ u32 wake_packet_bufsize;
+ struct iwl_mld_mcast_key_data gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_mld_mcast_key_data igtk;
+ struct iwl_mld_mcast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ struct {
+ struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT];
+ struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
+
+ } ptk;
+};
+
+#define NETDETECT_QUERY_BUF_LEN \
+ (sizeof(struct iwl_scan_offload_profile_match) * \
+ IWL_SCAN_MAX_PROFILES_V2)
+
+/**
+ * struct iwl_mld_netdetect_res - contains netdetect results from
+ * match_info_notif
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @matches: array of match information, one for each match
+ */
+struct iwl_mld_netdetect_res {
+ u32 matched_profiles;
+ u8 matches[NETDETECT_QUERY_BUF_LEN];
+};
+
+/**
+ * struct iwl_mld_resume_data - d3 resume flow data
+ * @notifs_expected: bitmap of expected notifications from fw,
+ * see &enum iwl_mld_d3_notif
+ * @notifs_received: bitmap of received notifications from fw,
+ * see &enum iwl_mld_d3_notif
+ * @d3_end_flags: bitmap of flags from d3_end_notif
+ * @notif_handling_err: error handling one of the resume notifications
+ * @wowlan_status: wowlan status data from all wowlan notifications
+ * @netdetect_res: contains netdetect results from match_info_notif
+ */
+struct iwl_mld_resume_data {
+ u32 notifs_expected;
+ u32 notifs_received;
+ u32 d3_end_flags;
+ bool notif_handling_err;
+ struct iwl_mld_wowlan_status *wowlan_status;
+ struct iwl_mld_netdetect_res *netdetect_res;
+};
+
+#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT \
+ (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
+ IWL_WOWLAN_WAKEUP_BY_PATTERN | \
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD)
+
+#define IWL_WOWLAN_OFFLOAD_TID 0
+
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wowlan_data->rekey_data.kek_len = data->kek_len;
+ wowlan_data->rekey_data.kck_len = data->kck_len;
+ memcpy(wowlan_data->rekey_data.kek, data->kek, data->kek_len);
+ memcpy(wowlan_data->rekey_data.kck, data->kck, data->kck_len);
+ wowlan_data->rekey_data.akm = data->akm & 0xFF;
+ wowlan_data->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr));
+ wowlan_data->rekey_data.valid = true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(wowlan_data->tentative_addrs, 0,
+ sizeof(wowlan_data->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ wowlan_data->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, wowlan_data->tentative_addrs);
+ idx++;
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ wowlan_data->num_target_ipv6_addrs = idx;
+}
+#endif
+
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mld_check_err_tables(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ u32 err_id;
+
+ /* check for lmac1 error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.lmac_error_event_table[0],
+ &err_id)) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+
+ return FW_NEEDS_RESET;
+ }
+ return FW_ERROR;
+ }
+
+ /* check if we have lmac2 set and check for error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.lmac_error_event_table[1],
+ NULL))
+ return FW_ERROR;
+
+ /* check for umac error */
+ if (iwl_fwrt_read_err_table(mld->trans,
+ mld->trans->dbg.umac_error_event_table,
+ NULL))
+ return FW_ERROR;
+
+ return FW_ALIVE;
+}
+
+static bool iwl_mld_fw_needs_restart(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ enum rt_status rt_status = iwl_mld_check_err_tables(mld, vif);
+
+ if (rt_status == FW_ALIVE)
+ return false;
+
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mld, "FW Error occurred during suspend\n");
+ iwl_fwrt_dump_error_logs(&mld->fwrt);
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ }
+
+ return true;
+}
+
+static int
+iwl_mld_netdetect_config(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_wowlan *wowlan)
+{
+ int ret;
+ struct cfg80211_sched_scan_request *netdetect_cfg =
+ wowlan->nd_config;
+ struct ieee80211_scan_ies ies = {};
+
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, true);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_sched_scan_start(mld, vif, netdetect_cfg, &ies,
+ IWL_MLD_SCAN_NETDETECT);
+ return ret;
+}
+
+static void
+iwl_mld_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->tkip.iv16 = (u16)pn;
+ seq->tkip.iv32 = (u32)(pn >> 16);
+}
+
+static void
+iwl_mld_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq)
+{
+ u64 pn = le64_to_cpu(le_pn);
+
+ seq->ccmp.pn[0] = pn >> 40;
+ seq->ccmp.pn[1] = pn >> 32;
+ seq->ccmp.pn[2] = pn >> 24;
+ seq->ccmp.pn[3] = pn >> 16;
+ seq->ccmp.pn[4] = pn >> 8;
+ seq->ccmp.pn[5] = pn;
+}
+
+static void
+iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc,
+ int rsc_idx)
+{
+ struct ieee80211_key_seq *aes_seq = gtk_data->gtk.aes_seq;
+ struct ieee80211_key_seq *tkip_seq = gtk_data->gtk.tkip_seq;
+
+ if (rsc_idx >= ARRAY_SIZE(sc->mcast_rsc))
+ return;
+
+ /* We store both the TKIP and AES representations coming from the
+ * FW because we decode the data from there before we iterate
+ * the keys and know which type is used.
+ */
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mld_le64_to_tkip_seq(sc->mcast_rsc[rsc_idx][tid],
+ &tkip_seq[tid]);
+ iwl_mld_le64_to_aes_seq(sc->mcast_rsc[rsc_idx][tid],
+ &aes_seq[tid]);
+ }
+}
+
+static void
+iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ int status_idx = 0;
+
+ BUILD_BUG_ON(sizeof(wowlan_status->gtk[0].key) <
+ sizeof(gtk_data[0].key));
+ BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->gtk) < WOWLAN_GTK_KEYS_NUM);
+
+ for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
+ notif_idx++) {
+ int rsc_idx;
+
+ if (!(gtk_data[notif_idx].key_len))
+ continue;
+
+ wowlan_status->gtk[status_idx].len =
+ gtk_data[notif_idx].key_len;
+ wowlan_status->gtk[status_idx].flags =
+ gtk_data[notif_idx].key_flags;
+ wowlan_status->gtk[status_idx].id =
+ wowlan_status->gtk[status_idx].flags &
+ IWL_WOWLAN_GTK_IDX_MASK;
+ memcpy(wowlan_status->gtk[status_idx].key,
+ gtk_data[notif_idx].key,
+ sizeof(gtk_data[notif_idx].key));
+
+ /* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
+ * The gtk ids can be any two numbers between 0 and 3,
+ * the id_map maps between the key id and the index in sc->mcast
+ */
+ rsc_idx =
+ sc->mcast_key_id_map[wowlan_status->gtk[status_idx].id];
+ iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
+ sc, rsc_idx);
+
+ /* if it's as long as the TKIP encryption key, copy MIC key */
+ if (wowlan_status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(wowlan_status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ gtk_data[notif_idx].tkip_mic_key,
+ sizeof(gtk_data[notif_idx].tkip_mic_key));
+ status_idx++;
+ }
+}
+
+static void
+iwl_mld_convert_ptk_resume_seq(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
+{
+ struct ieee80211_key_seq *aes_seq = wowlan_status->ptk.aes_seq;
+ struct ieee80211_key_seq *tkip_seq = wowlan_status->ptk.tkip_seq;
+
+ BUILD_BUG_ON(ARRAY_SIZE(sc->ucast_rsc) != IWL_MAX_TID_COUNT);
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ iwl_mld_le64_to_aes_seq(sc->ucast_rsc[tid], &aes_seq[tid]);
+ iwl_mld_le64_to_tkip_seq(sc->ucast_rsc[tid], &tkip_seq[tid]);
+ }
+}
+
+static void
+iwl_mld_convert_mcast_ipn(struct iwl_mld_mcast_key_data *key_status,
+ const struct iwl_wowlan_igtk_status *key)
+{
+ struct ieee80211_key_seq *seq =
+ &key_status->igtk_bigtk.cmac_gmac_seq;
+ u8 ipn_len = ARRAY_SIZE(key->ipn);
+
+ BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_gmac.pn));
+ BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_cmac.pn));
+ BUILD_BUG_ON(offsetof(struct ieee80211_key_seq, aes_gmac) !=
+ offsetof(struct ieee80211_key_seq, aes_cmac));
+
+ /* mac80211 expects big endian for memcmp() to work, convert.
+ * We don't have the key cipher yet so copy to both to cmac and gmac
+ */
+ for (int i = 0; i < ipn_len; i++) {
+ seq->aes_gmac.pn[i] = key->ipn[ipn_len - i - 1];
+ seq->aes_cmac.pn[i] = key->ipn[ipn_len - i - 1];
+ }
+}
+
+static void
+iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_igtk_status *igtk)
+{
+ BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
+ if (!igtk->key_len)
+ return;
+
+ wowlan_status->igtk.len = igtk->key_len;
+ wowlan_status->igtk.flags = igtk->key_flags;
+ wowlan_status->igtk.id =
+ u32_get_bits(igtk->key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
+ WOWLAN_IGTK_MIN_INDEX;
+
+ memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
+}
+
+static void
+iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
+ const struct iwl_wowlan_igtk_status *bigtk)
+{
+ int status_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->bigtk) < WOWLAN_BIGTK_KEYS_NUM);
+
+ for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
+ notif_idx++) {
+ if (!bigtk[notif_idx].key_len)
+ continue;
+
+ wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
+ wowlan_status->bigtk[status_idx].flags =
+ bigtk[notif_idx].key_flags;
+ wowlan_status->bigtk[status_idx].id =
+ u32_get_bits(bigtk[notif_idx].key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+ + WOWLAN_BIGTK_MIN_INDEX;
+
+ BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
+ sizeof(bigtk[notif_idx].key));
+ memcpy(wowlan_status->bigtk[status_idx].key,
+ bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+ iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
+ &bigtk[notif_idx]);
+ status_idx++;
+ }
+}
+
+static bool
+iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
+ u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
+
+ expected_len = sizeof(*notif);
+
+ if (IWL_FW_CHECK(mld, len < expected_len,
+ "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
+ expected_len, len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
+ "Invalid tid_offloaded_tx %d\n",
+ wowlan_status->tid_offloaded_tx))
+ return true;
+
+ iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
+ &notif->gtk[0].sc);
+ iwl_mld_convert_ptk_resume_seq(mld, wowlan_status, &notif->gtk[0].sc);
+ /* only one igtk is passed by FW */
+ iwl_mld_convert_igtk_resume_data(wowlan_status, &notif->igtk[0]);
+ iwl_mld_convert_bigtk_resume_data(wowlan_status, notif->bigtk);
+
+ wowlan_status->replay_ctr = le64_to_cpu(notif->replay_ctr);
+ wowlan_status->pattern_number = le16_to_cpu(notif->pattern_number);
+
+ wowlan_status->tid_offloaded_tx = notif->tid_offloaded_tx;
+ wowlan_status->last_qos_seq = le16_to_cpu(notif->qos_seq_ctr);
+ wowlan_status->num_of_gtk_rekeys =
+ le32_to_cpu(notif->num_of_gtk_rekeys);
+ wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+ return false;
+ /* TODO: mlo_links (task=MLO)*/
+}
+
+static bool
+iwl_mld_handle_wake_pkt_notif(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data;
+ u32 actual_size, len = iwl_rx_packet_payload_len(pkt);
+ u32 expected_size = le32_to_cpu(notif->wake_packet_length);
+
+ if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+ "Invalid WoWLAN wake packet notification (expected size=%zu got=%u)\n",
+ sizeof(*notif), len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, !(wowlan_status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT),
+ "Got wake packet but wakeup reason is %x\n",
+ wowlan_status->wakeup_reasons))
+ return true;
+
+ actual_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif,
+ wake_packet);
+
+ /* actual_size got the padding from the notification, remove it. */
+ if (expected_size < actual_size)
+ actual_size = expected_size;
+ wowlan_status->wake_packet = kmemdup(notif->wake_packet, actual_size,
+ GFP_ATOMIC);
+ if (!wowlan_status->wake_packet)
+ return true;
+
+ wowlan_status->wake_packet_length = expected_size;
+ wowlan_status->wake_packet_bufsize = actual_size;
+
+ return false;
+}
+
+static void
+iwl_mld_set_wake_packet(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_wowlan_status *wowlan_status,
+ struct cfg80211_wowlan_wakeup *wakeup,
+ struct sk_buff **_pkt)
+{
+ int pkt_bufsize = wowlan_status->wake_packet_bufsize;
+ int expected_pktlen = wowlan_status->wake_packet_length;
+ const u8 *pktdata = wowlan_status->wake_packet;
+ const struct ieee80211_hdr *hdr = (const void *)pktdata;
+ int truncated = expected_pktlen - pkt_bufsize;
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int ivlen = 0, icvlen = 4; /* also FCS */
+
+ struct sk_buff *pkt = alloc_skb(pkt_bufsize, GFP_KERNEL);
+ *_pkt = pkt;
+ if (!pkt)
+ return;
+
+ skb_put_data(pkt, pktdata, hdrlen);
+ pktdata += hdrlen;
+ pkt_bufsize -= hdrlen;
+
+ /* if truncated, FCS/ICV is (partially) gone */
+ if (truncated >= icvlen) {
+ truncated -= icvlen;
+ icvlen = 0;
+ } else {
+ icvlen -= truncated;
+ truncated = 0;
+ }
+
+ pkt_bufsize -= ivlen + icvlen;
+ pktdata += ivlen;
+
+ skb_put_data(pkt, pktdata, pkt_bufsize);
+
+ if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+ return;
+ wakeup->packet = pkt->data;
+ wakeup->packet_present_len = pkt->len;
+ wakeup->packet_len = pkt->len - truncated;
+ wakeup->packet_80211 = false;
+ } else {
+ int fcslen = 4;
+
+ if (truncated >= 4) {
+ truncated -= 4;
+ fcslen = 0;
+ } else {
+ fcslen -= truncated;
+ truncated = 0;
+ }
+ pkt_bufsize -= fcslen;
+ wakeup->packet = wowlan_status->wake_packet;
+ wakeup->packet_present_len = pkt_bufsize;
+ wakeup->packet_len = expected_pktlen - truncated;
+ wakeup->packet_80211 = true;
+ }
+}
+
+static void
+iwl_mld_report_wowlan_wakeup(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ struct sk_buff *pkt = NULL;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ u32 reasons = wowlan_status->wakeup_reasons;
+
+ if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+ ieee80211_report_wowlan_wakeup(vif, NULL, GFP_KERNEL);
+ return;
+ }
+
+ pm_wakeup_event(mld->dev, 0);
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+ wakeup.magic_pkt = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+ wakeup.pattern_idx =
+ wowlan_status->pattern_number;
+
+ if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+ wakeup.disconnect = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+ wakeup.gtk_rekey_failure = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+ wakeup.eap_identity_req = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+ wakeup.four_way_handshake = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+ wakeup.tcp_connlost = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+ wakeup.tcp_nomoretokens = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+ wakeup.tcp_match = true;
+
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
+ if (wowlan_status->wake_packet)
+ iwl_mld_set_wake_packet(mld, vif, wowlan_status, &wakeup, &pkt);
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
+ kfree_skb(pkt);
+}
+
+static void
+iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
+{
+ int tid;
+
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_set_key_rx_seq(key, tid, &seq[tid]);
+}
+
+static void
+iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iwl_mld_set_key_rx_seq_tids(key,
+ key_data->gtk.aes_seq);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iwl_mld_set_key_rx_seq_tids(key,
+ key_data->gtk.tkip_seq);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ /* igtk/bigtk ciphers*/
+ ieee80211_set_key_rx_seq(key, 0,
+ &key_data->igtk_bigtk.cmac_gmac_seq);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void
+iwl_mld_d3_update_mcast_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
+{
+ if (key->keyidx != key_data->id &&
+ (key->keyidx < 4 || key->keyidx > 5)) {
+ IWL_ERR(mld,
+ "Unexpected keyId mismatch. Old keyId:%d, New keyId:%d\n",
+ key->keyidx, key_data->id);
+ return;
+ }
+
+ /* All installed keys are sent by the FW, even weren't
+ * rekeyed during D3.
+ * We remove an existing key if it has the same index as
+ * a new key and a rekey has occurred during d3
+ */
+ if (wowlan_status->num_of_gtk_rekeys && key_data->len) {
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link;
+ int link_id = vif->active_links ?
+ __ffs(vif->active_links) : 0;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif,
+ link_id);
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (mld_link->igtk == key)
+ mld_link->igtk = NULL;
+ mld->num_igtks--;
+ }
+
+ ieee80211_remove_key(key);
+ return;
+ }
+
+ iwl_mld_set_key_rx_seq(key, key_data);
+}
+
+static void
+iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool is_tkip)
+{
+ struct iwl_mld_sta *mld_sta =
+ iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_ptk_pn *mld_ptk_pn =
+ wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[key->keyidx]);
+
+ iwl_mld_set_key_rx_seq_tids(key, is_tkip ?
+ wowlan_status->ptk.tkip_seq :
+ wowlan_status->ptk.aes_seq);
+ if (is_tkip)
+ return;
+
+ if (WARN_ON(!mld_ptk_pn))
+ return;
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ for (int i = 1; i < mld->trans->num_rx_queues; i++)
+ memcpy(mld_ptk_pn->q[i].pn[tid],
+ wowlan_status->ptk.aes_seq[tid].ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+}
+
+static void
+iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld_resume_key_iter_data *data = _data;
+ struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
+ u8 status_idx;
+
+ /* TODO: check key link id (task=MLO) */
+ if (data->unhandled_cipher)
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* ignore WEP completely, nothing to do */
+ return;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
+ sta, key,
+ key->cipher ==
+ WLAN_CIPHER_SUITE_TKIP);
+ return;
+ }
+
+ if (WARN_ON(data->gtk_cipher &&
+ data->gtk_cipher != key->cipher))
+ return;
+
+ data->gtk_cipher = key->cipher;
+ status_idx = key->keyidx == wowlan_status->gtk[1].id;
+ iwl_mld_d3_update_mcast_key(data->mld, vif, wowlan_status, key,
+ &wowlan_status->gtk[status_idx]);
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ if (WARN_ON(data->igtk_cipher &&
+ data->igtk_cipher != key->cipher))
+ return;
+
+ data->igtk_cipher = key->cipher;
+ iwl_mld_d3_update_mcast_key(data->mld, vif,
+ wowlan_status,
+ key, &wowlan_status->igtk);
+ }
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ if (WARN_ON(data->bigtk_cipher &&
+ data->bigtk_cipher != key->cipher))
+ return;
+
+ data->bigtk_cipher = key->cipher;
+ status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+ iwl_mld_d3_update_mcast_key(data->mld, vif,
+ wowlan_status, key,
+ &wowlan_status->bigtk[status_idx]);
+ }
+ break;
+ default:
+ data->unhandled_cipher = true;
+ return;
+ }
+ data->num_keys++;
+}
+
+static bool
+iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
+ struct iwl_mld *mld,
+ struct iwl_mld_mcast_key_data *key_data,
+ struct ieee80211_bss_conf *link_conf,
+ u32 cipher)
+{
+ struct ieee80211_key_conf *key_config;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ } conf = {
+ .conf.cipher = cipher,
+ .conf.keyidx = key_data->id,
+ };
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
+ BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
+
+ if (!key_data->len)
+ return true;
+
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ if (IS_ERR(key_config))
+ return false;
+
+ iwl_mld_set_key_rx_seq(key_config, key_data);
+
+ /* The FW holds only one igtk so we keep track of the valid one */
+ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_from_mac80211(link_conf);
+ mld_link->igtk = key_config;
+ mld->num_igtks++;
+ }
+ return true;
+}
+
+static bool
+iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct iwl_mld_resume_key_iter_data *key_iter_data,
+ struct ieee80211_bss_conf *link_conf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->gtk[i],
+ link_conf,
+ key_iter_data->gtk_cipher))
+ return false;
+
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->igtk,
+ link_conf, key_iter_data->igtk_cipher))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
+ if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
+ &wowlan_status->bigtk[i],
+ link_conf,
+ key_iter_data->bigtk_cipher))
+ return false;
+
+ return true;
+}
+
+static bool
+iwl_mld_update_sec_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ __be64 replay_ctr = cpu_to_be64(wowlan_status->replay_ctr);
+ struct iwl_mld_resume_key_iter_data key_iter_data = {
+ .mld = mld,
+ .wowlan_status = wowlan_status,
+ };
+
+ if (WARN_ON(!link_conf))
+ return false;
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
+ &key_iter_data);
+
+ if (key_iter_data.unhandled_cipher)
+ return false;
+
+ IWL_DEBUG_WOWLAN(mld,
+ "Number of installed keys: %d, Number of rekeys: %d\n",
+ key_iter_data.num_keys,
+ wowlan_status->num_of_gtk_rekeys);
+
+ if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+ return true;
+
+ iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+ link_conf);
+
+ ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ /* TODO: MLO rekey (task=MLO) */
+ return true;
+}
+
+static bool
+iwl_mld_process_wowlan_status(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+ struct iwl_mld_txq *mld_txq;
+
+ iwl_mld_report_wowlan_wakeup(mld, vif, wowlan_status);
+
+ if (WARN_ON(!ap_sta))
+ return false;
+
+ mld_txq =
+ iwl_mld_txq_from_mac80211(ap_sta->txq[wowlan_status->tid_offloaded_tx]);
+
+ /* Update the pointers of the Tx queue that may have moved during
+ * suspend if the firmware sent frames.
+ * The firmware stores last-used value, we store next value.
+ */
+ WARN_ON(!mld_txq->status.allocated);
+ iwl_trans_set_q_ptrs(mld->trans, mld_txq->fw_id,
+ (wowlan_status->last_qos_seq +
+ 0x10) >> 4);
+
+ if (!iwl_mld_update_sec_keys(mld, vif, wowlan_status))
+ return false;
+
+ if (wowlan_status->wakeup_reasons &
+ (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
+ return false;
+
+ return true;
+}
+
+static bool
+iwl_mld_netdetect_match_info_handler(struct iwl_mld *mld,
+ struct iwl_mld_resume_data *resume_data,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mld_netdetect_res *results = resume_data->netdetect_res;
+ const struct iwl_scan_offload_match_info *notif = (void *)pkt->data;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+
+ if (IWL_FW_CHECK(mld, !mld->netdetect,
+ "Got scan match info notif when mld->netdetect==%d\n",
+ mld->netdetect))
+ return true;
+
+ if (IWL_FW_CHECK(mld, len < sizeof(*notif),
+ "Invalid scan offload match notif of length: %d\n",
+ len))
+ return true;
+
+ if (IWL_FW_CHECK(mld, resume_data->wowlan_status->wakeup_reasons !=
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+ "Ignore scan match info: unexpected wakeup reason (expected=0x%x got=0x%x)\n",
+ IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS,
+ resume_data->wowlan_status->wakeup_reasons))
+ return true;
+
+ results->matched_profiles = le32_to_cpu(notif->matched_profiles);
+ IWL_DEBUG_WOWLAN(mld, "number of matched profiles=%u\n",
+ results->matched_profiles);
+
+ if (results->matched_profiles)
+ memcpy(results->matches, notif->matches,
+ NETDETECT_QUERY_BUF_LEN);
+
+ /* No scan should be active at this point */
+ mld->scan.status = 0;
+ memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+ return false;
+}
+
+static void
+iwl_mld_set_netdetect_info(struct iwl_mld *mld,
+ const struct cfg80211_sched_scan_request *netdetect_cfg,
+ struct cfg80211_wowlan_nd_info *netdetect_info,
+ struct iwl_mld_netdetect_res *netdetect_res,
+ unsigned long matched_profiles)
+{
+ int i;
+
+ for_each_set_bit(i, &matched_profiles, netdetect_cfg->n_match_sets) {
+ struct cfg80211_wowlan_nd_match *match;
+ int idx, j, n_channels = 0;
+ struct iwl_scan_offload_profile_match *matches =
+ (void *)netdetect_res->matches;
+
+ for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++)
+ n_channels +=
+ hweight8(matches[i].matching_channels[k]);
+ match = kzalloc(struct_size(match, channels, n_channels),
+ GFP_KERNEL);
+ if (!match)
+ return;
+
+ netdetect_info->matches[netdetect_info->n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+ */
+ idx = netdetect_cfg->n_match_sets - i - 1;
+ match->ssid.ssid_len =
+ netdetect_cfg->match_sets[idx].ssid.ssid_len;
+ memcpy(match->ssid.ssid,
+ netdetect_cfg->match_sets[idx].ssid.ssid,
+ match->ssid.ssid_len);
+
+ if (netdetect_cfg->n_channels < n_channels)
+ continue;
+
+ for_each_set_bit(j,
+ (unsigned long *)&matches[i].matching_channels[0],
+ sizeof(matches[i].matching_channels))
+ match->channels[match->n_channels++] =
+ netdetect_cfg->channels[j]->center_freq;
+ }
+}
+
+static void
+iwl_mld_process_netdetect_res(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_resume_data *resume_data)
+{
+ struct cfg80211_wowlan_nd_info *netdetect_info = NULL;
+ const struct cfg80211_sched_scan_request *netdetect_cfg;
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .pattern_idx = -1,
+ };
+ struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ unsigned long matched_profiles;
+ u32 wakeup_reasons;
+ int n_matches;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld->wiphy->wowlan_config ||
+ !mld->wiphy->wowlan_config->nd_config)) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Netdetect isn't configured on resume flow\n");
+ goto out;
+ }
+
+ netdetect_cfg = mld->wiphy->wowlan_config->nd_config;
+ wakeup_reasons = resume_data->wowlan_status->wakeup_reasons;
+
+ if (wakeup_reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+ wakeup.rfkill_release = true;
+
+ if (wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+ goto out;
+
+ if (!resume_data->netdetect_res->matched_profiles) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Netdetect results aren't valid\n");
+ wakeup_report = NULL;
+ goto out;
+ }
+
+ matched_profiles = resume_data->netdetect_res->matched_profiles;
+ if (!netdetect_cfg->n_match_sets) {
+ IWL_DEBUG_WOWLAN(mld,
+ "No netdetect match sets are configured\n");
+ goto out;
+ }
+ n_matches = hweight_long(matched_profiles);
+ netdetect_info = kzalloc(struct_size(netdetect_info, matches,
+ n_matches), GFP_KERNEL);
+ if (netdetect_info)
+ iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info,
+ resume_data->netdetect_res,
+ matched_profiles);
+
+ wakeup.net_detect = netdetect_info;
+ out:
+ ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+ if (netdetect_info) {
+ for (int i = 0; i < netdetect_info->n_matches; i++)
+ kfree(netdetect_info->matches[i]);
+ kfree(netdetect_info);
+ }
+}
+
+static bool iwl_mld_handle_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_resume_data *resume_data = data;
+ struct iwl_mld *mld =
+ container_of(notif_wait, struct iwl_mld, notif_wait);
+
+ switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
+ if (resume_data->notifs_received & IWL_D3_NOTIF_WOWLAN_INFO) {
+ IWL_DEBUG_WOWLAN(mld,
+ "got additional wowlan_info notif\n");
+ break;
+ }
+ resume_data->notif_handling_err =
+ iwl_mld_handle_wowlan_info_notif(mld,
+ resume_data->wowlan_status,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+
+ if (resume_data->wowlan_status->wakeup_reasons &
+ IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
+ resume_data->notifs_expected |=
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): {
+ if (resume_data->notifs_received &
+ IWL_D3_NOTIF_WOWLAN_WAKE_PKT) {
+ /* We shouldn't get two wake packet notifications */
+ IWL_DEBUG_WOWLAN(mld,
+ "Got additional wowlan wake packet notification\n");
+ break;
+ }
+ resume_data->notif_handling_err =
+ iwl_mld_handle_wake_pkt_notif(mld,
+ resume_data->wowlan_status,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT;
+ break;
+ }
+ case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): {
+ if (resume_data->notifs_received & IWL_D3_ND_MATCH_INFO) {
+ IWL_ERR(mld,
+ "Got additional netdetect match info\n");
+ break;
+ }
+
+ resume_data->notif_handling_err =
+ iwl_mld_netdetect_match_info_handler(mld, resume_data,
+ pkt);
+ resume_data->notifs_received |= IWL_D3_ND_MATCH_INFO;
+ break;
+ }
+ case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
+ struct iwl_d3_end_notif *notif = (void *)pkt->data;
+
+ resume_data->d3_end_flags = le32_to_cpu(notif->flags);
+ resume_data->notifs_received |= IWL_D3_NOTIF_D3_END_NOTIF;
+ break;
+ }
+ default:
+ WARN_ON(1);
+ }
+
+ return resume_data->notifs_received == resume_data->notifs_expected;
+}
+
+#define IWL_MLD_D3_NOTIF_TIMEOUT (HZ / 3)
+
+static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
+ struct iwl_mld_resume_data *resume_data,
+ bool with_wowlan)
+{
+ static const u16 wowlan_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION),
+ WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION),
+ WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ static const u16 d3_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
+ struct iwl_notification_wait wait_d3_notif;
+ enum iwl_d3_status d3_status;
+ int ret;
+
+ if (with_wowlan)
+ iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+ wowlan_resume_notif,
+ ARRAY_SIZE(wowlan_resume_notif),
+ iwl_mld_handle_d3_notif,
+ resume_data);
+ else
+ iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mld_handle_d3_notif,
+ resume_data);
+
+ ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
+ if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
+ if (d3_status != IWL_D3_STATUS_ALIVE) {
+ IWL_INFO(mld, "Device was reset during suspend\n");
+ ret = -ENOENT;
+ } else {
+ IWL_ERR(mld, "Transport resume failed\n");
+ }
+ iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &wait_d3_notif,
+ IWL_MLD_D3_NOTIF_TIMEOUT);
+ if (ret)
+ IWL_ERR(mld, "Couldn't get the d3 notif %d\n", ret);
+
+ if (resume_data->notif_handling_err)
+ ret = -EIO;
+
+ return ret;
+}
+
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan suspend flow\n");
+
+ iwl_mld_low_latency_stop(mld);
+
+ /* This will happen if iwl_mld_supsend failed with FW error */
+ if (mld->trans->state == IWL_TRANS_NO_FW &&
+ test_bit(STATUS_FW_ERROR, &mld->trans->status))
+ return -ENODEV;
+
+ ret = iwl_mld_update_device_power(mld, true);
+ if (ret) {
+ IWL_ERR(mld,
+ "d3 suspend: couldn't send power_device %d\n", ret);
+ goto out;
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
+ &d3_cfg_cmd_data);
+ if (ret) {
+ IWL_ERR(mld,
+ "d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+ goto out;
+ }
+
+ ret = iwl_trans_d3_suspend(mld->trans, false, false);
+ if (ret) {
+ IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+ } else {
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ mld->fw_status.in_d3 = true;
+ }
+
+ out:
+ if (ret) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+
+ return ret;
+}
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
+{
+ struct iwl_mld_resume_data resume_data = {
+ .notifs_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n");
+
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ mld->fw_status.in_d3 = false;
+ iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+ if (iwl_mld_fw_needs_restart(mld, NULL))
+ ret = -ENODEV;
+ else
+ ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+
+ if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
+ return -ENODEV;
+
+ if (ret) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ return ret;
+ }
+ iwl_mld_low_latency_restart(mld);
+
+ return iwl_mld_update_device_power(mld, false);
+}
+
+static void
+iwl_mld_aes_seq_to_le64_pn(struct ieee80211_key_conf *key,
+ __le64 *key_rsc)
+{
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ struct ieee80211_key_seq seq;
+ u8 *pn = key->cipher == WLAN_CIPHER_SUITE_CCMP ? seq.ccmp.pn :
+ seq.gcmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ key_rsc[i] = cpu_to_le64((u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+}
+
+static void
+iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, __le64 *key_rsc)
+{
+ struct iwl_mld_sta *mld_sta =
+ iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_ptk_pn *mld_ptk_pn;
+
+ if (WARN_ON(key->keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return;
+
+ mld_ptk_pn = wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!mld_ptk_pn))
+ return;
+
+ for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct ieee80211_key_seq seq;
+ u8 *max_pn = seq.ccmp.pn;
+
+ /* get the PN from mac80211, used on the default queue */
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+
+ /* and use the internal data for all queues */
+ for (int que = 1; que < mld->trans->num_rx_queues; que++) {
+ u8 *cur_pn = mld_ptk_pn->q[que].pn[tid];
+
+ if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0)
+ max_pn = cur_pn;
+ }
+ key_rsc[tid] = cpu_to_le64((u64)max_pn[5] |
+ ((u64)max_pn[4] << 8) |
+ ((u64)max_pn[3] << 16) |
+ ((u64)max_pn[2] << 24) |
+ ((u64)max_pn[1] << 32) |
+ ((u64)max_pn[0] << 40));
+ }
+}
+
+static void
+iwl_mld_suspend_convert_tkip_ipn(struct ieee80211_key_conf *key,
+ __le64 *rsc)
+{
+ struct ieee80211_key_seq seq;
+
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ rsc[i] =
+ cpu_to_le64(((u64)seq.tkip.iv32 << 16) |
+ seq.tkip.iv16);
+ }
+}
+
+static void
+iwl_mld_suspend_key_data_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_suspend_key_iter_data *data = _data;
+ __le64 *key_rsc;
+ __le32 cipher = 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_TKIP);
+ if (sta) {
+ key_rsc = data->rsc->ucast_rsc;
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+ else
+ iwl_mld_suspend_set_ucast_pn(mld, sta, key,
+ key_rsc);
+
+ data->have_rsc = true;
+ return;
+ }
+ /* We're iterating from old to new, there're 4 possible
+ * gtk ids, and only the last two keys matter
+ */
+ if (WARN_ON(data->gtks >=
+ ARRAY_SIZE(data->found_gtk_idx)))
+ return;
+
+ if (WARN_ON(key->keyidx >=
+ ARRAY_SIZE(data->rsc->mcast_key_id_map)))
+ return;
+ data->gtk_cipher = cipher;
+ data->found_gtk_idx[data->gtks] = key->keyidx;
+ key_rsc = data->rsc->mcast_rsc[data->gtks % 2];
+ data->rsc->mcast_key_id_map[key->keyidx] =
+ data->gtks % 2;
+
+ if (data->gtks >= 2) {
+ int prev = data->gtks % 2;
+ int prev_idx = data->found_gtk_idx[prev];
+
+ data->rsc->mcast_key_id_map[prev_idx] =
+ IWL_MCAST_KEY_MAP_INVALID;
+ }
+
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ iwl_mld_suspend_convert_tkip_ipn(key, key_rsc);
+ else
+ iwl_mld_aes_seq_to_le64_pn(key, key_rsc);
+
+ data->gtks++;
+ data->have_rsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ fallthrough;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (!cipher)
+ cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ if (key->keyidx == 4 || key->keyidx == 5)
+ data->igtk_cipher = cipher;
+
+ if (key->keyidx == 6 || key->keyidx == 7)
+ data->bigtk_cipher = cipher;
+
+ break;
+ }
+}
+
+static int
+iwl_mld_send_kek_kck_cmd(struct iwl_mld *mld,
+ struct iwl_mld_vif *mld_vif,
+ struct iwl_mld_suspend_key_iter_data data,
+ int ap_sta_id)
+{
+ struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
+ struct iwl_mld_rekey_data *rekey_data =
+ &mld_vif->wowlan_data.rekey_data;
+
+ memcpy(kek_kck_cmd.kck, rekey_data->kck,
+ rekey_data->kck_len);
+ kek_kck_cmd.kck_len = cpu_to_le16(rekey_data->kck_len);
+ memcpy(kek_kck_cmd.kek, rekey_data->kek,
+ rekey_data->kek_len);
+ kek_kck_cmd.kek_len = cpu_to_le16(rekey_data->kek_len);
+ kek_kck_cmd.replay_ctr = rekey_data->replay_ctr;
+ kek_kck_cmd.akm = cpu_to_le32(rekey_data->akm);
+ kek_kck_cmd.sta_id = cpu_to_le32(ap_sta_id);
+ kek_kck_cmd.gtk_cipher = data.gtk_cipher;
+ kek_kck_cmd.igtk_cipher = data.igtk_cipher;
+ kek_kck_cmd.bigtk_cipher = data.bigtk_cipher;
+
+ IWL_DEBUG_WOWLAN(mld, "setting akm %d\n",
+ rekey_data->akm);
+
+ return iwl_mld_send_cmd_pdu(mld, WOWLAN_KEK_KCK_MATERIAL,
+ &kek_kck_cmd);
+}
+
+static int
+iwl_mld_suspend_send_security_cmds(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_vif *mld_vif,
+ int ap_sta_id)
+{
+ struct iwl_mld_suspend_key_iter_data data = {};
+ int ret;
+
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
+ if (!data.rsc)
+ return -ENOMEM;
+
+ memset(data.rsc->mcast_key_id_map, IWL_MCAST_KEY_MAP_INVALID,
+ ARRAY_SIZE(data.rsc->mcast_key_id_map));
+
+ data.rsc->sta_id = cpu_to_le32(ap_sta_id);
+ ieee80211_iter_keys(mld->hw, vif,
+ iwl_mld_suspend_key_data_iter,
+ &data);
+
+ if (data.have_rsc)
+ ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_TSC_RSC_PARAM,
+ data.rsc);
+ else
+ ret = 0;
+
+ if (!ret && mld_vif->wowlan_data.rekey_data.valid)
+ ret = iwl_mld_send_kek_kck_cmd(mld, mld_vif, data, ap_sta_id);
+
+ kfree(data.rsc);
+
+ return ret;
+}
+
+static void
+iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan,
+ struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+ struct ieee80211_sta *ap_sta)
+{
+ wowlan_config_cmd->is_11n_connection =
+ ap_sta->deflink.ht_cap.ht_supported;
+ wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+ ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
+ if (wowlan->disconnect)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+ if (wowlan->any) {
+ wowlan_config_cmd->wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+ }
+}
+
+static int iwl_mld_send_patterns(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan,
+ int ap_sta_id)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
+
+ pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = wowlan->n_patterns;
+ pattern_cmd->sta_id = ap_sta_id;
+
+ for (int i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ pattern_cmd->patterns[i].pattern_type =
+ WOWLAN_PATTERN_TYPE_BITMASK;
+
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+ pattern_cmd->patterns[i].u.bitmask.pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ kfree(pattern_cmd);
+ return ret;
+}
+
+static int
+iwl_mld_send_proto_offload(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u8 ap_sta_id)
+{
+ struct iwl_proto_offload_cmd_v4 *cmd __free(kfree);
+ struct iwl_host_cmd hcmd = {
+ .id = PROT_OFFLOAD_CONFIG_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*cmd),
+ };
+ u32 enabled = 0;
+
+ cmd = kzalloc(hcmd.len[0], GFP_KERNEL);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data;
+ struct iwl_ns_config *nsc;
+ struct iwl_targ_addr *addrs;
+ int n_nsc, n_addrs;
+ int i, c;
+ int num_skipped = 0;
+
+ nsc = cmd->ns_config;
+ n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+ addrs = cmd->targ_addrs;
+ n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+
+ /* For each address we have (and that will fit) fill a target
+ * address struct and combine for NS offload structs with the
+ * solicited node addresses.
+ */
+ for (i = 0, c = 0;
+ i < wowlan_data->num_target_ipv6_addrs &&
+ i < n_addrs && c < n_nsc; i++) {
+ int j;
+ struct in6_addr solicited_addr;
+
+ /* Because ns is offloaded skip tentative address to avoid
+ * violating RFC4862.
+ */
+ if (test_bit(i, wowlan_data->tentative_addrs)) {
+ num_skipped++;
+ continue;
+ }
+
+ addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i],
+ &solicited_addr);
+ for (j = 0; j < c; j++)
+ if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+ &solicited_addr) == 0)
+ break;
+ if (j == c)
+ c++;
+ addrs[i].addr = wowlan_data->target_ipv6_addrs[i];
+ addrs[i].config_num = cpu_to_le32(j);
+ nsc[j].dest_ipv6_addr = solicited_addr;
+ memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ if (wowlan_data->num_target_ipv6_addrs - num_skipped)
+ enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+ cmd->num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped);
+ if (enabled & IWL_D3_PROTO_IPV6_VALID)
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+
+ if (vif->cfg.arp_addr_cnt) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+ cmd->common.host_ipv4_addr = vif->cfg.arp_addr_list[0];
+ ether_addr_copy(cmd->common.arp_mac_addr, vif->addr);
+ }
+
+ enabled |= IWL_D3_PROTO_OFFLOAD_BTM;
+ cmd->common.enabled = cpu_to_le32(enabled);
+ cmd->sta_id = cpu_to_le32(ap_sta_id);
+ hcmd.data[0] = cmd;
+ return iwl_mld_send_cmd(mld, &hcmd);
+}
+
+static int
+iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_vif);
+ struct ieee80211_sta *ap_sta = mld_vif->ap_sta;
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = IWL_WOWLAN_OFFLOAD_TID,
+ };
+ u32 sta_id_mask;
+ int ap_sta_id, ret;
+ int link_id = iwl_mld_get_primary_link(bss_vif);
+ struct ieee80211_bss_conf *link_conf;
+
+ ret = iwl_mld_block_emlsr_sync(mld, bss_vif,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN, link_id);
+ if (ret)
+ return ret;
+
+ link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+ if (WARN_ON(!ap_sta || !link_conf))
+ return -EINVAL;
+
+ sta_id_mask = iwl_mld_fw_sta_id_mask(mld, ap_sta);
+ if (WARN_ON(hweight32(sta_id_mask) != 1))
+ return -EINVAL;
+
+ ap_sta_id = __ffs(sta_id_mask);
+ wowlan_config_cmd.sta_id = ap_sta_id;
+
+ ret = iwl_mld_ensure_queue(mld,
+ ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ return ret;
+
+ iwl_mld_set_wowlan_config_cmd(mld, wowlan,
+ &wowlan_config_cmd, ap_sta);
+ ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_suspend_send_security_cmds(mld, bss_vif, mld_vif,
+ ap_sta_id);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_patterns(mld, wowlan, ap_sta_id);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_proto_offload(mld, bss_vif, ap_sta_id);
+ if (ret)
+ return ret;
+
+ iwl_mld_enable_beacon_filter(mld, link_conf, true);
+ return iwl_mld_update_mac_power(mld, bss_vif, true);
+}
+
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld, struct cfg80211_wowlan *wowlan)
+{
+ struct ieee80211_vif *bss_vif;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!wowlan))
+ return 1;
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the wowlan suspend flow\n");
+
+ bss_vif = iwl_mld_get_bss_vif(mld);
+ if (WARN_ON(!bss_vif))
+ return 1;
+
+ if (!bss_vif->cfg.assoc) {
+ int ret;
+ /* If we're not associated, this must be netdetect */
+ if (WARN_ON(!wowlan->nd_config))
+ return 1;
+
+ ret = iwl_mld_netdetect_config(mld, bss_vif, wowlan);
+ if (!ret)
+ mld->netdetect = true;
+
+ return ret;
+ }
+
+ return iwl_mld_wowlan_config(mld, bss_vif, wowlan);
+}
+
+/* Returns 0 on success, 1 if an error occurred in firmware during d3,
+ * A negative value is expected only in unrecovreable cases.
+ */
+int iwl_mld_wowlan_resume(struct iwl_mld *mld)
+{
+ struct ieee80211_vif *bss_vif;
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld_netdetect_res netdetect_res;
+ struct iwl_mld_resume_data resume_data = {
+ .notifs_expected =
+ IWL_D3_NOTIF_WOWLAN_INFO |
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ .netdetect_res = &netdetect_res,
+ };
+ int link_id;
+ int ret;
+ bool fw_err = false;
+ bool keep_connection;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n");
+
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ if (!mld->fw_status.in_d3) {
+ IWL_DEBUG_WOWLAN(mld,
+ "Device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
+ mld->fw_status.in_d3 = false;
+ mld->scan.last_start_time_jiffies = jiffies;
+
+ bss_vif = iwl_mld_get_bss_vif(mld);
+ if (WARN_ON(!bss_vif))
+ goto err;
+
+ /* We can't have several links upon wowlan entry,
+ * this is enforced in the suspend flow.
+ */
+ WARN_ON(hweight16(bss_vif->active_links) > 1);
+ link_id = bss_vif->active_links ? __ffs(bss_vif->active_links) : 0;
+ link_conf = link_conf_dereference_protected(bss_vif, link_id);
+
+ if (WARN_ON(!link_conf))
+ goto err;
+
+ iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
+
+ if (iwl_mld_fw_needs_restart(mld, bss_vif)) {
+ fw_err = true;
+ goto err;
+ }
+
+ resume_data.wowlan_status = kzalloc(sizeof(*resume_data.wowlan_status),
+ GFP_KERNEL);
+ if (!resume_data.wowlan_status)
+ return -1;
+
+ if (mld->netdetect)
+ resume_data.notifs_expected |= IWL_D3_ND_MATCH_INFO;
+
+ ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
+ if (ret) {
+ IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
+ fw_err = true;
+ goto err;
+ }
+
+ if (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) {
+ mld->fw_status.in_hw_restart = true;
+ goto process_wakeup_results;
+ }
+
+ iwl_mld_update_changed_regdomain(mld);
+ iwl_mld_update_mac_power(mld, bss_vif, false);
+ iwl_mld_enable_beacon_filter(mld, link_conf, false);
+ iwl_mld_update_device_power(mld, false);
+
+ if (mld->netdetect)
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_NETDETECT, false);
+
+ process_wakeup_results:
+ if (mld->netdetect) {
+ iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data);
+ mld->netdetect = false;
+ } else {
+ keep_connection =
+ iwl_mld_process_wowlan_status(mld, bss_vif,
+ resume_data.wowlan_status);
+
+ /* EMLSR state will be cleared if the connection is not kept */
+ if (keep_connection)
+ iwl_mld_unblock_emlsr(mld, bss_vif,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN);
+ }
+
+ if (!mld->netdetect && !keep_connection)
+ ieee80211_resume_disconnect(bss_vif);
+
+ goto out;
+
+ err:
+ if (fw_err) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+
+ mld->fw_status.in_hw_restart = true;
+ ret = 1;
+ out:
+ if (resume_data.wowlan_status) {
+ kfree(resume_data.wowlan_status->wake_packet);
+ kfree(resume_data.wowlan_status);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.h b/drivers/net/wireless/intel/iwlwifi/mld/d3.h
new file mode 100644
index 000000000000..618d6fb3c796
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_d3_h__
+#define __iwl_mld_d3_h__
+
+#include "fw/api/d3.h"
+
+struct iwl_mld_rekey_data {
+ bool valid;
+ u8 kck[NL80211_KCK_EXT_LEN];
+ u8 kek[NL80211_KEK_EXT_LEN];
+ size_t kck_len;
+ size_t kek_len;
+ __le64 replay_ctr;
+ u32 akm;
+};
+
+/**
+ * struct iwl_mld_wowlan_data - data used by the wowlan suspend flow
+ *
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @rekey_data: security key data used for rekeying during D3
+ */
+struct iwl_mld_wowlan_data {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+ struct iwl_mld_rekey_data rekey_data;
+};
+
+int iwl_mld_no_wowlan_resume(struct iwl_mld *mld);
+int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld);
+int iwl_mld_wowlan_suspend(struct iwl_mld *mld,
+ struct cfg80211_wowlan *wowlan);
+int iwl_mld_wowlan_resume(struct iwl_mld *mld);
+void iwl_mld_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data);
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev);
+#endif
+
+#endif /* __iwl_mld_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
new file mode 100644
index 000000000000..453ce2ba39d1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -0,0 +1,1082 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "tlc.h"
+#include "power.h"
+#include "notif.h"
+#include "ap.h"
+#include "iwl-utils.h"
+#include "scan.h"
+#ifdef CONFIG_THERMAL
+#include "thermal.h"
+#endif
+
+#include "fw/api/rs.h"
+#include "fw/api/dhc.h"
+#include "fw/api/rfi.h"
+#include "fw/dhc-utils.h"
+#include <linux/dmi.h>
+
+#define MLD_DEBUGFS_READ_FILE_OPS(name, bufsz) \
+ _MLD_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_mld)
+
+#define MLD_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, mld, \
+ &iwl_dbgfs_##name##_ops)
+#define MLD_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MLD_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static bool iwl_mld_dbgfs_fw_cmd_disabled(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+ return !mld->fw_status.running || mld->fw_status.in_d3;
+#else
+ return !mld->fw_status.running;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ /* If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return 0;
+
+ iwl_fw_dbg_clear_monitor_buf(&mld->fwrt);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ IWL_ERR(mld, "Triggering an NMI from debugfs\n");
+
+ if (count == 6 && !strcmp(buf, "nolog\n"))
+ mld->fw_status.do_not_dump_once = true;
+
+ iwl_force_nmi(mld->trans);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ int __maybe_unused ret;
+
+ if (!iwlwifi_mod_params.fw_restart)
+ return -EPERM;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (count == 6 && !strcmp(buf, "nolog\n")) {
+ mld->fw_status.do_not_dump_once = true;
+ set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mld->trans->status);
+ }
+
+ /* take the return value to make compiler happy - it will
+ * fail anyway
+ */
+ ret = iwl_mld_send_cmd_empty(mld, WIDE_ID(LONG_GROUP, REPLY_ERROR));
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_send_cmd_empty(mld, ECHO_CMD) ?: count;
+}
+
+struct iwl_mld_sniffer_apply {
+ struct iwl_mld *mld;
+ const u8 *bssid;
+ u16 aid;
+};
+
+static bool iwl_mld_sniffer_apply(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_sniffer_apply *apply = data;
+
+ apply->mld->monitor.cur_aid = cpu_to_le16(apply->aid);
+ memcpy(apply->mld->monitor.cur_bssid, apply->bssid,
+ sizeof(apply->mld->monitor.cur_bssid));
+
+ return true;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_write(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ struct iwl_notification_wait wait;
+ struct iwl_he_monitor_cmd he_mon_cmd = {};
+ struct iwl_mld_sniffer_apply apply = {
+ .mld = mld,
+ };
+ u16 wait_cmds[] = {
+ WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD),
+ };
+ u32 aid;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (!mld->monitor.on)
+ return -ENODEV;
+
+ ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid,
+ &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1],
+ &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3],
+ &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]);
+ if (ret != 7)
+ return -EINVAL;
+
+ he_mon_cmd.aid = cpu_to_le16(aid);
+
+ apply.aid = aid;
+ apply.bssid = (void *)he_mon_cmd.bssid;
+
+ /* Use the notification waiter to get our function triggered
+ * in sequence with other RX. This ensures that frames we get
+ * on the RX queue _before_ the new configuration is applied
+ * still have mld->cur_aid pointing to the old AID, and that
+ * frames on the RX queue _after_ the firmware processed the
+ * new configuration (and sent the response, synchronously)
+ * get mld->cur_aid correctly set to the new AID.
+ */
+ iwl_init_notification_wait(&mld->notif_wait, &wait,
+ wait_cmds, ARRAY_SIZE(wait_cmds),
+ iwl_mld_sniffer_apply, &apply);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ HE_AIR_SNIFFER_CONFIG_CMD),
+ &he_mon_cmd);
+
+ /* no need to really wait, we already did anyway */
+ iwl_remove_notification(&mld->notif_wait, &wait);
+
+ return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_he_sniffer_params_read(struct iwl_mld *mld, char *buf, size_t count)
+{
+ return scnprintf(buf, count,
+ "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n",
+ le16_to_cpu(mld->monitor.cur_aid),
+ mld->monitor.cur_bssid[0], mld->monitor.cur_bssid[1],
+ mld->monitor.cur_bssid[2], mld->monitor.cur_bssid[3],
+ mld->monitor.cur_bssid[4], mld->monitor.cur_bssid[5]);
+}
+
+/* The size computation is as follows:
+ * each number needs at most 3 characters, number of rows is the size of
+ * the table; So, need 5 chars for the "freq: " part and each tuple afterwards
+ * needs 6 characters for numbers and 5 for the punctuation around. 32 bytes
+ * for feature support message.
+ */
+#define IWL_RFI_DDR_BUF_SIZE (IWL_RFI_DDR_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_DDR_LUT_ENTRY_CHANNELS_NUM *\
+ (6 + 5)) + 32)
+#define IWL_RFI_DLVR_BUF_SIZE (IWL_RFI_DLVR_LUT_INSTALLED_SIZE *\
+ (5 + IWL_RFI_DLVR_LUT_ENTRY_CHANNELS_NUM *\
+ (6 + 5)) + 32)
+#define IWL_RFI_DESENSE_BUF_SIZE IWL_RFI_DDR_BUF_SIZE
+
+/* Extra 32 for "DDR and DLVR table" message */
+#define IWL_RFI_BUF_SIZE (IWL_RFI_DDR_BUF_SIZE + IWL_RFI_DLVR_BUF_SIZE +\
+ IWL_RFI_DESENSE_BUF_SIZE + 32)
+
+static size_t iwl_mld_dump_tas_resp(struct iwl_dhc_tas_status_resp *resp,
+ size_t count, u8 *buf)
+{
+ const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
+ [TAS_DISABLED_DUE_TO_BIOS] =
+ "Due To BIOS",
+ [TAS_DISABLED_DUE_TO_SAR_6DBM] =
+ "Due To SAR Limit Less Than 6 dBm",
+ [TAS_DISABLED_REASON_INVALID] =
+ "N/A",
+ [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] =
+ "Due to table source invalid"
+ };
+ const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = {
+ [TAS_DYNA_INACTIVE] = "INACTIVE",
+ [TAS_DYNA_INACTIVE_MVM_MODE] =
+ "inactive due to mvm mode",
+ [TAS_DYNA_INACTIVE_TRIGGER_MODE] =
+ "inactive due to trigger mode",
+ [TAS_DYNA_INACTIVE_BLOCK_LISTED] =
+ "inactive due to block listed",
+ [TAS_DYNA_INACTIVE_UHB_NON_US] =
+ "inactive due to uhb non US",
+ [TAS_DYNA_ACTIVE] = "ACTIVE",
+ };
+ ssize_t pos = 0;
+
+ if (resp->header.version != 1) {
+ pos += scnprintf(buf + pos, count - pos,
+ "Unsupported TAS response version:%d",
+ resp->header.version);
+ return pos;
+ }
+
+ pos += scnprintf(buf + pos, count - pos, "TAS Report\n");
+ switch (resp->tas_config_info.table_source) {
+ case BIOS_SOURCE_NONE:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE NONE ");
+ break;
+ case BIOS_SOURCE_ACPI:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE ACPI ");
+ break;
+ case BIOS_SOURCE_UEFI:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE UEFI ");
+ break;
+ default:
+ pos += scnprintf(buf + pos, count - pos,
+ "BIOS SOURCE UNKNOWN (%d) ",
+ resp->tas_config_info.table_source);
+ break;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "revision is: %d data is: 0x%08x\n",
+ resp->tas_config_info.table_revision,
+ resp->tas_config_info.value);
+ pos += scnprintf(buf + pos, count - pos, "Current MCC: 0x%x\n",
+ le16_to_cpu(resp->curr_mcc));
+
+ pos += scnprintf(buf + pos, count - pos, "Block list entries:");
+ for (int i = 0; i < ARRAY_SIZE(resp->mcc_block_list); i++)
+ pos += scnprintf(buf + pos, count - pos, " 0x%x",
+ le16_to_cpu(resp->mcc_block_list[i]));
+
+ pos += scnprintf(buf + pos, count - pos,
+ "\nDo TAS Support Dual Radio?: %s\n",
+ hweight8(resp->valid_radio_mask) > 1 ?
+ "TRUE" : "FALSE");
+
+ for (int i = 0; i < ARRAY_SIZE(resp->tas_status_radio); i++) {
+ int tmp;
+ unsigned long dynamic_status;
+
+ if (!(resp->valid_radio_mask & BIT(i)))
+ continue;
+
+ pos += scnprintf(buf + pos, count - pos,
+ "TAS report for radio:%d\n", i + 1);
+ pos += scnprintf(buf + pos, count - pos,
+ "Static status: %sabled\n",
+ resp->tas_status_radio[i].static_status ?
+ "En" : "Dis");
+ if (!resp->tas_status_radio[i].static_status) {
+ u8 static_disable_reason =
+ resp->tas_status_radio[i].static_disable_reason;
+
+ pos += scnprintf(buf + pos, count - pos,
+ "\tStatic Disabled Reason: ");
+ if (static_disable_reason >= TAS_DISABLED_REASON_MAX) {
+ pos += scnprintf(buf + pos, count - pos,
+ "unsupported value (%d)\n",
+ static_disable_reason);
+ continue;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "%s (%d)\n",
+ tas_dis_reason[static_disable_reason],
+ static_disable_reason);
+ continue;
+ }
+
+ pos += scnprintf(buf + pos, count - pos, "\tANT A %s and ",
+ (resp->tas_status_radio[i].dynamic_status_ant_a
+ & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF");
+
+ pos += scnprintf(buf + pos, count - pos, "ANT B %s for ",
+ (resp->tas_status_radio[i].dynamic_status_ant_b
+ & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF");
+
+ switch (resp->tas_status_radio[i].band) {
+ case PHY_BAND_5:
+ pos += scnprintf(buf + pos, count - pos, "HB\n");
+ break;
+ case PHY_BAND_24:
+ pos += scnprintf(buf + pos, count - pos, "LB\n");
+ break;
+ case PHY_BAND_6:
+ pos += scnprintf(buf + pos, count - pos, "UHB\n");
+ break;
+ default:
+ pos += scnprintf(buf + pos, count - pos,
+ "Unsupported band (%d)\n",
+ resp->tas_status_radio[i].band);
+ break;
+ }
+
+ pos += scnprintf(buf + pos, count - pos,
+ "Is near disconnection?: %s\n",
+ resp->tas_status_radio[i].near_disconnection ?
+ "True" : "False");
+
+ pos += scnprintf(buf + pos, count - pos,
+ "Dynamic status antenna A:\n");
+ dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_a;
+ for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
+ }
+ pos += scnprintf(buf + pos, count - pos,
+ "\nDynamic status antenna B:\n");
+ dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_b;
+ for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
+ }
+
+ tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_a);
+ pos += scnprintf(buf + pos, count - pos,
+ "Max antenna A regulatory pwr limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_b);
+ pos += scnprintf(buf + pos, count - pos,
+ "Max antenna B regulatory pwr limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+
+ tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_a);
+ pos += scnprintf(buf + pos, count - pos,
+ "Antenna A SAR limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_b);
+ pos += scnprintf(buf + pos, count - pos,
+ "Antenna B SAR limit (dBm): %d.%03d\n",
+ tmp / 8, 125 * (tmp % 8));
+ }
+
+ return pos;
+}
+
+static ssize_t iwl_dbgfs_tas_get_status_read(struct iwl_mld *mld, char *buf,
+ size_t count)
+{
+ struct iwl_dhc_cmd cmd = {
+ .index_and_mask = cpu_to_le32(DHC_TABLE_TOOLS |
+ DHC_TARGET_UMAC |
+ DHC_TOOLS_UMAC_GET_TAS_STATUS),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LEGACY_GROUP, DEBUG_HOST_COMMAND),
+ .flags = CMD_WANT_SKB,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
+ struct iwl_dhc_tas_status_resp *resp = NULL;
+ ssize_t pos = 0;
+ u32 resp_len;
+ u32 status;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, count - pos, "\nOEM name: %s\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ pos += scnprintf(buf + pos, count - pos,
+ "\tVendor In Approved List: %s\n",
+ iwl_is_tas_approved() ? "YES" : "NO");
+
+ status = iwl_dhc_resp_status(mld->fwrt.fw, hcmd.resp_pkt);
+ if (status != 1) {
+ pos += scnprintf(buf + pos, count - pos,
+ "response status is not success: %d\n",
+ status);
+ goto out;
+ }
+
+ resp = iwl_dhc_resp_data(mld->fwrt.fw, hcmd.resp_pkt, &resp_len);
+ if (IS_ERR(resp) || resp_len != sizeof(*resp)) {
+ pos += scnprintf(buf + pos, count - pos,
+ "Invalid size for TAS response (%u instead of %zd)\n",
+ resp_len, sizeof(*resp));
+ goto out;
+ }
+
+ pos += iwl_mld_dump_tas_resp(resp, count - pos, buf + pos);
+
+out:
+ iwl_free_resp(&hcmd);
+ return pos;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_nmi, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_restart, 10);
+WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(he_sniffer_params, 32);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_dbg_clear, 10);
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(send_echo_cmd, 8);
+WIPHY_DEBUGFS_READ_FILE_OPS_MLD(tas_get_status, 2048);
+
+static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct iwl_mld *mld,
+ size_t count, u8 *buf)
+{
+ int err;
+ u32 value;
+
+ err = iwl_bios_get_dsm(&mld->fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (err)
+ return err;
+
+ return scnprintf(buf, count, "0x%08x\n", value);
+}
+
+MLD_DEBUGFS_READ_FILE_OPS(wifi_6e_enable, 64);
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ struct iwl_op_mode *opmode = container_of((void *)mld,
+ struct iwl_op_mode,
+ op_mode_specific);
+ struct iwl_rx_cmd_buffer rxb = {};
+ struct iwl_rx_packet *pkt;
+ int n_bytes = count / 2;
+ int ret = -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ rxb._page = alloc_pages(GFP_KERNEL, 0);
+ if (!rxb._page)
+ return -ENOMEM;
+ pkt = rxb_addr(&rxb);
+
+ ret = hex2bin(page_address(rxb._page), buf, n_bytes);
+ if (ret)
+ goto out;
+
+ /* avoid invalid memory access and malformed packet */
+ if (n_bytes < sizeof(*pkt) ||
+ n_bytes != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
+ goto out;
+
+ local_bh_disable();
+ iwl_mld_rx(opmode, NULL, &rxb);
+ local_bh_enable();
+ ret = 0;
+
+out:
+ iwl_free_rxb(&rxb);
+
+ return ret ?: count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(inject_packet, 512);
+
+#ifdef CONFIG_THERMAL
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_STOP) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(stop_ctdp, 8);
+
+static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mld *mld,
+ char *buf, size_t count)
+{
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_START) ? : count;
+}
+
+WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(start_ctdp, 8);
+
+#endif /* CONFIG_THERMAL */
+
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{
+ /* Add debugfs files here */
+
+ MLD_DEBUGFS_ADD_FILE(fw_nmi, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(fw_restart, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(wifi_6e_enable, debugfs_dir, 0400);
+ MLD_DEBUGFS_ADD_FILE(he_sniffer_params, debugfs_dir, 0600);
+ MLD_DEBUGFS_ADD_FILE(fw_dbg_clear, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(send_echo_cmd, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(tas_get_status, debugfs_dir, 0400);
+#ifdef CONFIG_THERMAL
+ MLD_DEBUGFS_ADD_FILE(start_ctdp, debugfs_dir, 0200);
+ MLD_DEBUGFS_ADD_FILE(stop_ctdp, debugfs_dir, 0200);
+#endif
+ MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200);
+
+ /* Create a symlink with mac80211. It will be removed when mac80211
+ * exits (before the opmode exits which removes the target.)
+ */
+ if (!IS_ERR(debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mld->wiphy->debugfsdir,
+ buf);
+ }
+}
+
+#define VIF_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(vif_##name, bufsz, vif)
+
+#define VIF_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+ IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(vif_##name, bufsz, vif) \
+
+#define VIF_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, vif, \
+ &iwl_dbgfs_vif_##name##_ops)
+#define VIF_DEBUGFS_ADD_FILE(name, parent, mode) \
+ VIF_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t iwl_dbgfs_vif_bf_params_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+ int val;
+
+ if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+ if (sscanf(buf + 24, "%d", &val) != 1)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (WARN_ON(!link_conf))
+ return -ENODEV;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ mld_vif->disable_bf = !val;
+
+ if (val)
+ return iwl_mld_enable_beacon_filter(mld, link_conf,
+ false) ?: count;
+ else
+ return iwl_mld_disable_beacon_filter(mld, vif) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_pm_params_write(struct iwl_mld *mld,
+ char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int val;
+
+ if (!strncmp("use_ps_poll=", buf, 12)) {
+ if (sscanf(buf + 12, "%d", &val) != 1)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ mld_vif->use_ps_poll = val;
+
+ return iwl_mld_update_mac_power(mld, vif, false) ?: count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_write(struct iwl_mld *mld,
+ char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 1)
+ return -EINVAL;
+
+ iwl_mld_vif_update_low_latency(mld, vif, value, LOW_LATENCY_DEBUGFS);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_vif_low_latency_read(struct ieee80211_vif *vif,
+ size_t count, char *buf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ char format[] = "traffic=%d\ndbgfs=%d\nvif_type=%d\nactual=%d\n";
+ u8 ll_causes;
+
+ if (WARN_ON(count < sizeof(format)))
+ return -EINVAL;
+
+ ll_causes = READ_ONCE(mld_vif->low_latency_causes);
+
+ /* all values in format are boolean so the size of format is enough
+ * for holding the result string
+ */
+ return scnprintf(buf, count, format,
+ !!(ll_causes & LOW_LATENCY_TRAFFIC),
+ !!(ll_causes & LOW_LATENCY_DEBUGFS),
+ !!(ll_causes & LOW_LATENCY_VIF_TYPE),
+ !!(ll_causes));
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(pm_params, 32);
+VIF_DEBUGFS_WRITE_FILE_OPS(bf_params, 32);
+VIF_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 45);
+
+static int
+_iwl_dbgfs_inject_beacon_ie(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ char *bin, ssize_t len,
+ bool restore)
+{
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mac_beacon_cmd beacon_cmd = {};
+ int n_bytes = len / 2;
+
+ /* Element len should be represented by u8 */
+ if (n_bytes >= U8_MAX)
+ return -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (!vif)
+ return -EINVAL;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ mld_vif->beacon_inject_active = true;
+ mld->hw->extra_beacon_tailroom = n_bytes;
+
+ for_each_mld_vif_valid_link(mld_vif, mld_link) {
+ u32 offset;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct ieee80211_chanctx_conf *ctx =
+ wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+ struct sk_buff *beacon =
+ ieee80211_beacon_get_template(mld->hw, vif,
+ NULL, link_id);
+
+ if (!beacon)
+ return -EINVAL;
+
+ if (!restore && (WARN_ON(!n_bytes || !bin) ||
+ hex2bin(skb_put_zero(beacon, n_bytes),
+ bin, n_bytes))) {
+ dev_kfree_skb(beacon);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(beacon);
+
+ beacon_cmd.flags =
+ cpu_to_le16(iwl_mld_get_rate_flags(mld, info, vif,
+ link_conf,
+ ctx->def.chan->band));
+ beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+ beacon_cmd.link_id =
+ cpu_to_le32(mld_link->fw_id);
+
+ iwl_mld_set_tim_idx(mld, &beacon_cmd.tim_idx,
+ beacon->data, beacon->len);
+
+ offset = iwl_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+
+ iwl_mld_send_beacon_template_cmd(mld, beacon, &beacon_cmd);
+ dev_kfree_skb(beacon);
+ }
+
+ if (restore)
+ mld_vif->beacon_inject_active = false;
+
+ return 0;
+}
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_write(struct iwl_mld *mld,
+ char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, buf,
+ count, false);
+
+ mld->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512);
+
+static ssize_t
+iwl_dbgfs_vif_inject_beacon_ie_restore_write(struct iwl_mld *mld,
+ char *buf,
+ size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, NULL,
+ 0, true);
+
+ mld->hw->extra_beacon_tailroom = 0;
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
+
+static ssize_t
+iwl_dbgfs_vif_twt_setup_write(struct iwl_mld *mld, char *buf, size_t count,
+ void *data)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, DEBUG_HOST_COMMAND),
+ };
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_dhc_cmd *cmd __free(kfree) = NULL;
+ struct iwl_dhc_twt_operation *dhc_twt_cmd;
+ u64 target_wake_time;
+ u32 twt_operation, interval_exp, interval_mantissa, min_wake_duration;
+ u8 trigger, flow_type, flow_id, protection, tenth_param;
+ u8 twt_request = 1, broadcast = 0;
+ int ret;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &twt_operation, &target_wake_time, &interval_exp,
+ &interval_mantissa, &min_wake_duration, &trigger,
+ &flow_type, &flow_id, &protection, &tenth_param);
+
+ /* the new twt_request parameter is optional for station */
+ if ((ret != 9 && ret != 10) ||
+ (ret == 10 && vif->type != NL80211_IFTYPE_STATION &&
+ tenth_param == 1))
+ return -EINVAL;
+
+ /* The 10th parameter:
+ * In STA mode - the TWT type (broadcast or individual)
+ * In AP mode - the role (0 responder, 2 unsolicited)
+ */
+ if (ret == 10) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ broadcast = tenth_param;
+ else
+ twt_request = tenth_param;
+ }
+
+ cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ dhc_twt_cmd = (void *)cmd->data;
+ dhc_twt_cmd->mac_id = cpu_to_le32(mld_vif->fw_id);
+ dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation);
+ dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time);
+ dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp);
+ dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa);
+ dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration);
+ dhc_twt_cmd->trigger = trigger;
+ dhc_twt_cmd->flow_type = flow_type;
+ dhc_twt_cmd->flow_id = flow_id;
+ dhc_twt_cmd->protection = protection;
+ dhc_twt_cmd->twt_request = twt_request;
+ dhc_twt_cmd->negotiation_type = broadcast ? 3 : 0;
+
+ cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2);
+ cmd->index_and_mask =
+ cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+ DHC_INT_UMAC_TWT_OPERATION);
+
+ hcmd.len[0] = sizeof(*cmd) + sizeof(*dhc_twt_cmd);
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_setup, 256);
+
+static ssize_t
+iwl_dbgfs_vif_twt_operation_write(struct iwl_mld *mld, char *buf, size_t count,
+ void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_twt_operation_cmd twt_cmd = {};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mld_link *mld_link = iwl_mld_link_dereference_check(mld_vif,
+ link_id);
+ int ret;
+
+ if (WARN_ON(!mld_link))
+ return -ENODEV;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ if (hweight16(vif->active_links) > 1)
+ return -EOPNOTSUPP;
+
+ ret = sscanf(buf,
+ "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
+ &twt_cmd.twt_operation, &twt_cmd.target_wake_time,
+ &twt_cmd.interval_exponent, &twt_cmd.interval_mantissa,
+ &twt_cmd.minimum_wake_duration, &twt_cmd.trigger,
+ &twt_cmd.flow_type, &twt_cmd.flow_id,
+ &twt_cmd.twt_protection, &twt_cmd.ndp_paging_indicator,
+ &twt_cmd.responder_pm_mode, &twt_cmd.negotiation_type,
+ &twt_cmd.twt_request, &twt_cmd.implicit,
+ &twt_cmd.twt_group_assignment, &twt_cmd.twt_channel,
+ &twt_cmd.restricted_info_present, &twt_cmd.dl_bitmap_valid,
+ &twt_cmd.ul_bitmap_valid, &twt_cmd.dl_tid_bitmap,
+ &twt_cmd.ul_tid_bitmap);
+
+ if (ret != 21)
+ return -EINVAL;
+
+ twt_cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, TWT_OPERATION_CMD),
+ &twt_cmd);
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(twt_operation, 256);
+
+static ssize_t iwl_dbgfs_vif_int_mlo_scan_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ if (action == 0) {
+ ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ iwl_mld_int_mlo_scan(mld, vif);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret ?: count;
+}
+
+VIF_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct dentry *mld_vif_dbgfs =
+ debugfs_create_dir("iwlmld", vif->debugfs_dir);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ char target[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
+
+ /* Create symlink for convenience pointing to interface specific
+ * debugfs entries for the driver. For example, under
+ * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmld/
+ * find
+ * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmld/
+ */
+ snprintf(name, sizeof(name), "%pd", vif->debugfs_dir);
+ snprintf(target, sizeof(target), "../../../%pd3/iwlmld",
+ vif->debugfs_dir);
+ mld_vif->dbgfs_slink =
+ debugfs_create_symlink(name, mld->debugfs_dir, target);
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ vif->type == NL80211_IFTYPE_STATION) {
+ VIF_DEBUGFS_ADD_FILE(pm_params, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(bf_params, mld_vif_dbgfs, 0200);
+ }
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ VIF_DEBUGFS_ADD_FILE(inject_beacon_ie, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(inject_beacon_ie_restore,
+ mld_vif_dbgfs, 0200);
+ }
+
+ VIF_DEBUGFS_ADD_FILE(low_latency, mld_vif_dbgfs, 0600);
+ VIF_DEBUGFS_ADD_FILE(twt_setup, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(twt_operation, mld_vif_dbgfs, 0200);
+ VIF_DEBUGFS_ADD_FILE(int_mlo_scan, mld_vif_dbgfs, 0200);
+}
+#define LINK_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, bss_conf)
+
+#define LINK_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, link_conf, \
+ &iwl_dbgfs_link_##name##_ops)
+#define LINK_DEBUGFS_ADD_FILE(name, parent, mode) \
+ LINK_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
+{
+ struct dentry *mld_link_dir;
+
+ mld_link_dir = debugfs_lookup("iwlmld", dir);
+
+ /* For non-MLO vifs, the dir of deflink is the same as the vif's one.
+ * so if iwlmld dir already exists, this means that this is deflink.
+ * If not, this is a per-link dir of a MLO vif, add in it the iwlmld
+ * dir.
+ */
+ if (!mld_link_dir)
+ mld_link_dir = debugfs_create_dir("iwlmld", dir);
+}
+
+static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_link_sta *link_sta = data;
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 rate;
+ u32 partial = false;
+ char pretty_rate[100];
+ int ret;
+ u8 fw_sta_id;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ fw_sta_id = mld_link_sta->fw_id;
+
+ if (sscanf(buf, "%i %i", &rate, &partial) == 0)
+ return -EINVAL;
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id,
+ partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE :
+ IWL_TLC_DEBUG_FIXED_RATE,
+ rate);
+
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), rate);
+
+ IWL_DEBUG_RATE(mld, "sta_id %d rate %s partial: %d, ret:%d\n",
+ fw_sta_id, pretty_rate, partial, ret);
+
+ return ret ? : count;
+}
+
+static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf,
+ size_t count, void *data)
+{
+ struct ieee80211_link_sta *link_sta = data;
+ struct iwl_mld_link_sta *mld_link_sta;
+ u32 type, value;
+ int ret;
+ u8 fw_sta_id;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ fw_sta_id = mld_link_sta->fw_id;
+
+ if (sscanf(buf, "%i %i", &type, &value) != 2) {
+ IWL_DEBUG_RATE(mld, "usage <type> <value>\n");
+ return -EINVAL;
+ }
+
+ if (iwl_mld_dbgfs_fw_cmd_disabled(mld))
+ return -EIO;
+
+ ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, type, value);
+
+ return ret ? : count;
+}
+
+#define LINK_STA_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \
+ debugfs_create_file(alias, mode, parent, link_sta, \
+ &iwl_dbgfs_##name##_ops)
+#define LINK_STA_DEBUGFS_ADD_FILE(name, parent, mode) \
+ LINK_STA_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+#define LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(name, bufsz) \
+ WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, link_sta)
+
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64);
+LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64);
+
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200);
+ LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h
new file mode 100644
index 000000000000..eeba35342ba1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "iface.h"
+#include "sta.h"
+
+#define MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+struct dbgfs_##name##_data { \
+ argtype *arg; \
+ bool read_done; \
+ ssize_t rlen; \
+ char buf[buflen]; \
+}; \
+static int _iwl_dbgfs_##name##_open(struct inode *inode, \
+ struct file *file) \
+{ \
+ struct dbgfs_##name##_data *data; \
+ \
+ if ((file->f_flags & O_ACCMODE) == O_RDWR) \
+ return -EOPNOTSUPP; \
+ \
+ data = kzalloc(sizeof(*data), GFP_KERNEL); \
+ if (!data) \
+ return -ENOMEM; \
+ \
+ data->read_done = false; \
+ data->arg = inode->i_private; \
+ file->private_data = data; \
+ \
+ return 0; \
+}
+
+#define MLD_DEBUGFS_READ_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = iwl_dbgfs_##name##_read(data->arg, \
+ sizeof(data->buf),\
+ data->buf); \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->buf, data->rlen); \
+}
+
+static int _iwl_dbgfs_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+#define _MLD_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \
+MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \
+MLD_DEBUGFS_READ_WRAPPER(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+}
+
+#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_write(mld, buf, count, data); \
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_link_sta(struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_vif *vif =
+ iwl_mld_sta_from_mac80211(link_sta->sta)->vif;
+ return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+static inline struct iwl_mld *
+iwl_mld_from_bss_conf(struct ieee80211_bss_conf *link)
+{
+ return iwl_mld_vif_from_mac80211(link->vif)->mld;
+}
+
+static inline struct iwl_mld *iwl_mld_from_vif(struct ieee80211_vif *vif)
+{
+ return iwl_mld_vif_from_mac80211(vif)->mld;
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct ieee80211_##objtype *arg = file->private_data; \
+ struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
+ char buf[bufsz] = {}; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ buf, sizeof(buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ arg); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, objtype) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+ }
+
+#define WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \
+static ssize_t iwl_dbgfs_##name##_read_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_read(mld, buf, count); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \
+static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \
+ struct file *file, char *buf, \
+ size_t count, void *data) \
+{ \
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \
+ return iwl_dbgfs_##name##_write(mld, buf, count); \
+}
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \
+static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct iwl_mld *mld = data->arg; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ data->buf, sizeof(data->buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ NULL); \
+}
+
+#define WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \
+static ssize_t __iwl_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct iwl_mld *mld = data->arg; \
+ \
+ if (!data->read_done) { \
+ data->read_done = true; \
+ data->rlen = wiphy_locked_debugfs_read(mld->wiphy, \
+ file, data->buf, sizeof(data->buf), \
+ user_buf, count, ppos, \
+ iwl_dbgfs_##name##_read_handler, NULL); \
+ return data->rlen; \
+ } \
+ \
+ if (data->rlen < 0) \
+ return data->rlen; \
+ return simple_read_from_buffer(user_buf, count, ppos, \
+ data->buf, data->rlen); \
+}
+
+#define WIPHY_DEBUGFS_READ_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = __iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(name, bufsz) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \
+ WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = __iwl_dbgfs_##name##_write, \
+ .read = __iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
+
+#define WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
+WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct dbgfs_##name##_data *data = file->private_data; \
+ struct ieee80211_##objtype *arg = data->arg; \
+ struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \
+ char buf[bufsz] = {}; \
+ \
+ return wiphy_locked_debugfs_write(mld->wiphy, file, \
+ buf, sizeof(buf), \
+ user_buf, count, \
+ iwl_dbgfs_##name##_write_handler, \
+ arg); \
+}
+
+#define IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, objtype) \
+ MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct ieee80211_##objtype) \
+ WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \
+ MLD_DEBUGFS_READ_WRAPPER(name) \
+ static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = _iwl_dbgfs_##name##_read, \
+ .open = _iwl_dbgfs_##name##_open, \
+ .llseek = generic_file_llseek, \
+ .release = _iwl_dbgfs_release, \
+ }
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
new file mode 100644
index 000000000000..f77ba21a174d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <linux/etherdevice.h>
+#include <linux/math64.h>
+#include <net/cfg80211.h>
+#include "mld.h"
+#include "iface.h"
+#include "phy.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "constants.h"
+#include "fw/api/location.h"
+#include "ftm-initiator.h"
+
+static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_cmd *cmd,
+ struct cfg80211_pmsr_request *req)
+{
+ int i;
+
+ cmd->initiator_flags =
+ cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
+ IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
+ cmd->request_id = req->cookie;
+ cmd->num_of_ap = req->n_peers;
+
+ /* Use a large value for "no timeout". Don't use the maximum value
+ * because of fw limitations.
+ */
+ if (req->timeout)
+ cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff));
+ else
+ cmd->req_timeout_ms = cpu_to_le32(0xfffff);
+
+ memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
+
+ if (vif->cfg.assoc) {
+ memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ /* AP's TSF is only relevant if associated */
+ for (i = 0; i < req->n_peers; i++) {
+ if (req->peers[i].report_ap_tsf) {
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(vif);
+
+ cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id);
+ return;
+ }
+ }
+ } else {
+ eth_broadcast_addr(cmd->range_req_bssid);
+ }
+
+ /* Don't report AP's TSF */
+ cmd->tsf_mac_id = cpu_to_le32(0xff);
+}
+
+static int
+iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ u32 freq = peer->chandef.chan->center_freq;
+
+ target->channel_num = ieee80211_frequency_to_channel(freq);
+
+ switch (peer->chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
+ target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
+ target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
+ target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ break;
+ default:
+ IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n",
+ peer->chandef.width);
+ return -EINVAL;
+}
+
+ /* non EDCA based measurement must use HE preamble */
+ if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
+ target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
+
+ target->ctrl_ch_position =
+ (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
+ iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0;
+
+ target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band);
+ return 0;
+}
+
+#define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
+
+static void
+iwl_mld_ftm_set_target_flags(struct iwl_mld *mld,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ target->initiator_ap_flags = cpu_to_le32(0);
+
+ if (peer->ftm.asap)
+ FTM_SET_FLAG(ASAP);
+
+ if (peer->ftm.request_lci)
+ FTM_SET_FLAG(LCI_REQUEST);
+
+ if (peer->ftm.request_civicloc)
+ FTM_SET_FLAG(CIVIC_REQUEST);
+
+ if (IWL_MLD_FTM_INITIATOR_DYNACK)
+ FTM_SET_FLAG(DYN_ACK);
+
+ if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
+ FTM_SET_FLAG(ALGO_LR);
+ else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
+ FTM_SET_FLAG(ALGO_FFT);
+
+ if (peer->ftm.trigger_based)
+ FTM_SET_FLAG(TB);
+ else if (peer->ftm.non_trigger_based)
+ FTM_SET_FLAG(NON_TB);
+
+ if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
+ peer->ftm.lmr_feedback)
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ u32 sta_id_mask;
+
+ target->sta_id = IWL_INVALID_STA;
+
+ /* TODO: add ftm_unprotected debugfs support */
+
+ if (!vif->cfg.assoc || !mld_vif->ap_sta)
+ return;
+
+ sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta);
+ if (WARN_ON(hweight32(sta_id_mask) != 1))
+ return;
+
+ target->sta_id = __ffs(sta_id_mask);
+
+ if (mld_vif->ap_sta->mfp &&
+ (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
+ FTM_SET_FLAG(PMF);
+}
+
+static int
+iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry *target)
+{
+ u32 i2r_max_sts;
+ int ret;
+
+ ret = iwl_mld_ftm_set_target_chandef(mld, peer, target);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period = cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mld_ftm_set_target_flags(mld, peer, target);
+ iwl_mld_ftm_set_sta(mld, vif, peer, target);
+
+ /* TODO: add secured ranging support */
+
+ i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MLD_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP |
+ (IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ /* TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ return 0;
+}
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (mld->ftm_initiator.req)
+ return -EBUSY;
+
+ if (req->n_peers > ARRAY_SIZE(cmd.ap))
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+
+ ret = iwl_mld_ftm_set_target(mld, vif, peer, target);
+ if (ret)
+ return ret;
+ }
+
+ /* TODO: get the status from the response*/
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (!ret) {
+ mld->ftm_initiator.req = req;
+ mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
+ }
+
+ return ret;
+}
+
+static void iwl_mld_ftm_reset(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld->ftm_initiator.req = NULL;
+ mld->ftm_initiator.req_wdev = NULL;
+ memset(mld->ftm_initiator.responses, 0,
+ sizeof(mld->ftm_initiator.responses));
+}
+
+static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id,
+ u8 num_of_aps)
+{
+ if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie,
+ "Request ID mismatch, got %u, active %u\n",
+ request_id, (u8)mld->ftm_initiator.req->cookie))
+ return -EINVAL;
+
+ if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers ||
+ num_of_aps > IWL_TOF_MAX_APS,
+ "FTM range response: invalid num of APs (%u)\n",
+ num_of_aps))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req,
+ const u8 *addr)
+{
+ for (int i = 0; i < req->n_peers; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+
+ if (ether_addr_equal_unaligned(peer->addr, addr))
+ return i;
+ }
+
+ return -ENOENT;
+}
+
+static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index,
+ struct cfg80211_pmsr_result *res)
+{
+ s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
+
+ IWL_DEBUG_INFO(mld, "entry %d\n", index);
+ IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status);
+ IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr);
+ IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time);
+ IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index);
+ IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
+ IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg);
+ IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread);
+ IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg);
+ IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance);
+ IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread);
+ IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg);
+}
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+ u8 num_of_aps, last_in_batch;
+
+ if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req,
+ "FTM response without a pending request\n"))
+ return;
+
+ if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id,
+ fw_resp->num_of_aps))
+ return;
+
+ num_of_aps = fw_resp->num_of_aps;
+ last_in_batch = fw_resp->last_report;
+
+ IWL_DEBUG_INFO(mld, "Range response received\n");
+ IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n",
+ mld->ftm_initiator.req->cookie, num_of_aps);
+
+ for (int i = 0; i < num_of_aps; i++) {
+ struct cfg80211_pmsr_result result = {};
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ int peer_idx;
+
+ fw_ap = &fw_resp->ap[i];
+ result.final = fw_ap->last_burst;
+ result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+ result.ap_tsf_valid = 1;
+
+ peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req,
+ fw_ap->bssid);
+ if (peer_idx < 0) {
+ IWL_WARN(mld,
+ "Unknown address (%pM, target #%d) in FTM response\n",
+ fw_ap->bssid, i);
+ continue;
+ }
+
+ switch (fw_ap->measure_status) {
+ case IWL_TOF_ENTRY_SUCCESS:
+ result.status = NL80211_PMSR_STATUS_SUCCESS;
+ break;
+ case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
+ result.status = NL80211_PMSR_STATUS_TIMEOUT;
+ break;
+ case IWL_TOF_ENTRY_NO_RESPONSE:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
+ break;
+ case IWL_TOF_ENTRY_REQUEST_REJECTED:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
+ result.ftm.busy_retry_time = fw_ap->refusal_period;
+ break;
+ default:
+ result.status = NL80211_PMSR_STATUS_FAILURE;
+ result.ftm.failure_reason =
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
+ break;
+ }
+ memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
+
+ /* TODO: convert the timestamp from the result to systime */
+ result.host_time = ktime_get_boottime_ns();
+
+ result.type = NL80211_PMSR_TYPE_FTM;
+ result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx];
+ mld->ftm_initiator.responses[peer_idx]++;
+ result.ftm.rssi_avg = fw_ap->rssi;
+ result.ftm.rssi_avg_valid = 1;
+ result.ftm.rssi_spread = fw_ap->rssi_spread;
+ result.ftm.rssi_spread_valid = 1;
+ result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
+ result.ftm.rtt_avg_valid = 1;
+ result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
+ result.ftm.rtt_variance_valid = 1;
+ result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
+ result.ftm.rtt_spread_valid = 1;
+
+ cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ &result, GFP_KERNEL);
+
+ if (fw_has_api(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ IWL_DEBUG_INFO(mld, "RTT confidence: %u\n",
+ fw_ap->rttConfidence);
+
+ iwl_mld_debug_range_resp(mld, i, &result);
+ }
+
+ if (last_in_batch) {
+ cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ GFP_KERNEL);
+ iwl_mld_ftm_reset(mld);
+ }
+}
+
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld)
+{
+ struct cfg80211_pmsr_result result = {
+ .status = NL80211_PMSR_STATUS_FAILURE,
+ .final = 1,
+ .host_time = ktime_get_boottime_ns(),
+ .type = NL80211_PMSR_TYPE_FTM,
+ };
+
+ if (!mld->ftm_initiator.req)
+ return;
+
+ for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) {
+ memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr,
+ ETH_ALEN);
+
+ cfg80211_pmsr_report(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req,
+ &result, GFP_KERNEL);
+ }
+
+ cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev,
+ mld->ftm_initiator.req, GFP_KERNEL);
+ iwl_mld_ftm_reset(mld);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h
new file mode 100644
index 000000000000..3fab25a52508
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ftm_initiator_h__
+#define __iwl_mld_ftm_initiator_h__
+
+/**
+ * struct ftm_initiator_data - FTM initiator data
+ *
+ * @req: a pointer to cfg80211 FTM request
+ * @req_wdev: a pointer to the wdev that requested the current FTM request
+ * @responses: the number of responses received for the current FTM session.
+ * Used for tracking the burst index in a periodic request.
+ */
+struct ftm_initiator_data {
+ struct cfg80211_pmsr_request *req;
+ struct wireless_dev *req_wdev;
+ int responses[IWL_TOF_MAX_APS];
+};
+
+int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req);
+
+void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_ftm_initiator_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
new file mode 100644
index 000000000000..62da137e1024
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+
+#include "fw/api/alive.h"
+#include "fw/api/scan.h"
+#include "fw/api/rx.h"
+#include "fw/dbg.h"
+#include "fw/pnvm.h"
+#include "hcmd.h"
+#include "iwl-nvm-parse.h"
+#include "power.h"
+#include "mcc.h"
+#include "led.h"
+#include "coex.h"
+#include "regulatory.h"
+#include "thermal.h"
+
+static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
+{
+ struct iwl_tx_ant_cfg_cmd cmd;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
+
+ IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
+
+ return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
+}
+
+static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
+{
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
+ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+ };
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
+ for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+ cmd.indirection_table[i] =
+ 1 + (i % (mld->trans->num_rx_queues - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+ return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
+}
+
+static int iwl_mld_config_scan(struct iwl_mld *mld)
+{
+ struct iwl_scan_config cmd = {
+ .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
+ .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
+ &cmd);
+}
+
+static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
+ const struct iwl_imr_alive_info *imr_info)
+{
+ struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
+
+ imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
+ imr_data->imr_size = le32_to_cpu(imr_info->size);
+ imr_data->imr2sram_remainbyte = imr_data->imr_size;
+ imr_data->imr_base_addr = imr_info->base_addr;
+ imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
+
+ if (imr_data->imr_enable)
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
+ struct iwl_fw_ini_region_tlv *reg;
+
+ if (!trans->dbg.active_regions[i])
+ continue;
+
+ reg = (void *)trans->dbg.active_regions[i]->data;
+
+ /* We have only one DRAM IMR region, so we
+ * can break as soon as we find the first
+ * one.
+ */
+ if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
+ trans->dbg.unsupported_region_msk |= BIT(i);
+ break;
+ }
+ }
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mld *mld =
+ container_of(notif_wait, struct iwl_mld, notif_wait);
+ struct iwl_trans *trans = mld->trans;
+ u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0);
+ struct iwl_alive_ntf_v6 *palive;
+ bool *alive_valid = data;
+ struct iwl_umac_alive *umac;
+ struct iwl_lmac_alive *lmac1;
+ struct iwl_lmac_alive *lmac2 = NULL;
+ u32 lmac_error_event_table;
+ u32 umac_error_table;
+ u16 status;
+
+ if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
+ return false;
+
+ palive = (void *)pkt->data;
+
+ iwl_mld_alive_imr_data(trans, &palive->imr);
+
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+
+ trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
+ trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
+ trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+
+ IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+ trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
+
+ lmac_error_event_table =
+ le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
+ iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
+
+ if (lmac2)
+ trans->dbg.lmac_error_event_table[1] =
+ le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
+
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
+ ~FW_ADDR_CACHE_CONTROL;
+
+ if (umac_error_table >= trans->cfg->min_umac_error_event_table)
+ iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
+ else
+ IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
+ umac_error_table);
+
+ *alive_valid = status == IWL_ALIVE_STATUS_OK;
+
+ IWL_DEBUG_FW(mld,
+ "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+ status, lmac1->ver_type, lmac1->ver_subtype);
+
+ if (lmac2)
+ IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
+
+ IWL_DEBUG_FW(mld,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ le32_to_cpu(umac->umac_major),
+ le32_to_cpu(umac->umac_minor));
+
+ if (version >= 7)
+ IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
+ le16_to_cpu(palive->flags));
+
+ iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
+
+ return true;
+}
+
+#define MLD_ALIVE_TIMEOUT (2 * HZ)
+#define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ)
+
+static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
+{
+ struct iwl_trans *trans = mld->trans;
+ struct iwl_pc_data *pc_data;
+ u8 count;
+
+ IWL_ERR(mld,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
+ iwl_read_umac_prph(trans,
+ UMAG_SB_CPU_2_STATUS));
+#define IWL_FW_PRINT_REG_INFO(reg_name) \
+ IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
+
+ IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
+
+ IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
+
+ /* print OTP info */
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
+ IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
+#undef IWL_FW_PRINT_REG_INFO
+
+ pc_data = trans->dbg.pc_data;
+ for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
+ IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
+ pc_data->pc_address);
+}
+
+static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
+{
+ const struct fw_img *fw =
+ iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
+ static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
+ struct iwl_notification_wait alive_wait;
+ bool alive_valid = false;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
+ alive_cmd, ARRAY_SIZE(alive_cmd),
+ iwl_alive_fn, &alive_valid);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
+ ret = iwl_trans_start_fw(mld->trans, fw, true);
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &alive_wait);
+ return ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
+ MLD_ALIVE_TIMEOUT);
+
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ iwl_fw_dbg_error_collect(&mld->fwrt,
+ FW_DBG_TRIGGER_ALIVE_TIMEOUT);
+ iwl_mld_print_alive_notif_timeout(mld);
+ goto alive_failure;
+ }
+
+ if (!alive_valid) {
+ IWL_ERR(mld, "Loaded firmware is not valid!\n");
+ ret = -EIO;
+ goto alive_failure;
+ }
+
+ iwl_trans_fw_alive(mld->trans, 0);
+
+ return 0;
+
+alive_failure:
+ iwl_trans_stop_device(mld->trans);
+ return ret;
+}
+
+static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
+{
+ struct iwl_notification_wait init_wait;
+ struct iwl_init_extended_cfg_cmd init_cfg = {};
+ static const u16 init_complete[] = {
+ INIT_COMPLETE_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_load_fw_wait_alive(mld);
+ if (ret)
+ return ret;
+
+ mld->trans->step_urm =
+ !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
+ ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
+ &mld->fw->ucode_capa);
+ if (ret) {
+ IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
+ goto init_failure;
+ }
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
+ NULL);
+
+ iwl_init_notification_wait(&mld->notif_wait,
+ &init_wait,
+ init_complete,
+ ARRAY_SIZE(init_complete),
+ NULL, NULL);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
+ &init_cfg);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
+ iwl_remove_notification(&mld->notif_wait, &init_wait);
+ goto init_failure;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
+ MLD_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
+ goto init_failure;
+ }
+
+ if (!mld->nvm_data) {
+ mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
+ if (IS_ERR(mld->nvm_data)) {
+ ret = PTR_ERR(mld->nvm_data);
+ mld->nvm_data = NULL;
+ IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
+ goto init_failure;
+ }
+ }
+
+ return 0;
+
+init_failure:
+ iwl_trans_stop_device(mld->trans);
+ return ret;
+}
+
+int iwl_mld_load_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_trans_start_hw(mld->trans);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_run_fw_init_sequence(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_mcc(mld);
+ if (ret)
+ return ret;
+
+ mld->fw_status.running = true;
+
+ return 0;
+}
+
+void iwl_mld_stop_fw(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_abort_notification_waits(&mld->notif_wait);
+
+ iwl_fw_dbg_stop_sync(&mld->fwrt);
+
+ iwl_trans_stop_device(mld->trans);
+
+ mld->fw_status.running = false;
+}
+
+static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
+}
+
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
+{
+ u32 error_log_size = mld->fw->ucode_capa.error_log_size;
+ struct iwl_fw_error_recovery_cmd recovery_cmd = {
+ .flags = cpu_to_le32(flags),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
+ .flags = CMD_WANT_SKB,
+ .data = {&recovery_cmd, },
+ .len = {sizeof(recovery_cmd), },
+ };
+ int ret;
+
+ /* no error log was defined in TLV */
+ if (!error_log_size)
+ return;
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ /* no buf was allocated upon NIC error */
+ if (!mld->error_recovery_buf)
+ return;
+
+ cmd.data[1] = mld->error_recovery_buf;
+ cmd.len[1] = error_log_size;
+ cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+ recovery_cmd.buf_size = cpu_to_le32(error_log_size);
+ }
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+
+ /* we no longer need the recovery buffer */
+ kfree(mld->error_recovery_buf);
+ mld->error_recovery_buf = NULL;
+
+ if (ret) {
+ IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
+ return;
+ }
+
+ if (flags & ERROR_RECOVERY_UPDATE_DB) {
+ struct iwl_rx_packet *pkt = cmd.resp_pkt;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u32 resp;
+
+ if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
+ "Unexpected recovery cmd response size %u (expected %zu)\n",
+ pkt_len, sizeof(resp)))
+ goto out;
+
+ resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
+ if (!resp)
+ goto out;
+
+ IWL_ERR(mld,
+ "Failed to send recovery cmd blob was invalid %d\n",
+ resp);
+
+ ieee80211_iterate_interfaces(mld->hw, 0,
+ iwl_mld_restart_disconnect_iter,
+ NULL);
+ }
+
+out:
+ iwl_free_resp(&cmd);
+}
+
+static int iwl_mld_config_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ iwl_fw_disable_dbg_asserts(&mld->fwrt);
+ iwl_get_shared_mem_conf(&mld->fwrt);
+
+ ret = iwl_mld_send_tx_ant_cfg(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_bt_init_conf(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_set_soc_latency(&mld->fwrt);
+ if (ret)
+ return ret;
+
+ iwl_mld_configure_lari(mld);
+
+ ret = iwl_mld_config_temp_report_ths(mld);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_THERMAL
+ ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
+ CTDP_CMD_OPERATION_START);
+ if (ret)
+ return ret;
+#endif
+
+ ret = iwl_configure_rxq(&mld->fwrt);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_send_rss_cfg_cmd(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_config_scan(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_update_device_power(mld, false);
+ if (ret)
+ return ret;
+
+ if (mld->fw_status.in_hw_restart) {
+ iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
+ iwl_mld_time_sync_fw_config(mld);
+ }
+
+ iwl_mld_led_config_fw(mld);
+
+ ret = iwl_mld_init_ppag(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_sar(mld);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_init_sgom(mld);
+ if (ret)
+ return ret;
+
+ iwl_mld_init_tas(mld);
+ iwl_mld_init_uats(mld);
+
+ return 0;
+}
+
+int iwl_mld_start_fw(struct iwl_mld *mld)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_load_fw(mld);
+ if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
+ iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
+ goto error;
+ }
+
+ IWL_DEBUG_INFO(mld, "uCode started.\n");
+
+ ret = iwl_mld_config_fw(mld);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ iwl_mld_stop_fw(mld);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h
new file mode 100644
index 000000000000..64a8d4248324
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_hcmd_h__
+#define __iwl_mld_hcmd_h__
+
+static inline int iwl_mld_send_cmd(struct iwl_mld *mld, struct iwl_host_cmd *cmd)
+{
+ /* No commands, including the d3 related commands, should be sent
+ * after entering d3
+ */
+#ifdef CONFIG_PM_SLEEP
+ if (WARN_ON(mld->fw_status.in_d3))
+ return -EIO;
+#endif
+
+ if (!(cmd->flags & CMD_ASYNC))
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Devices that need to shutdown immediately on rfkill are not
+ * supported, so we can send all the cmds in rfkill
+ */
+ cmd->flags |= CMD_SEND_IN_RFKILL;
+
+ return iwl_trans_send_cmd(mld->trans, cmd);
+}
+
+static inline int
+__iwl_mld_send_cmd_with_flags_pdu(struct iwl_mld *mld, u32 id,
+ u32 flags, const void *data, u16 len)
+{
+ struct iwl_host_cmd cmd = {
+ .id = id,
+ .len = { data ? len : 0, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwl_mld_send_cmd(mld, &cmd);
+}
+
+#define _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len, \
+ ignored...) \
+ __iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len)
+#define iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len...) \
+ _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, ##len, \
+ sizeof(*(data)))
+
+#define iwl_mld_send_cmd_pdu(mld, id, ...) \
+ iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, __VA_ARGS__)
+
+#define iwl_mld_send_cmd_empty(mld, id) \
+ iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, NULL, 0)
+
+#endif /* __iwl_mld_hcmd_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
new file mode 100644
index 000000000000..e49e2260ac05
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+
+#include "iface.h"
+#include "hcmd.h"
+#include "key.h"
+#include "mlo.h"
+#include "mac80211.h"
+
+#include "fw/api/context.h"
+#include "fw/api/mac.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_link *link;
+
+ /* EMLSR is turned back on during recovery */
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ for_each_mld_vif_valid_link(mld_vif, link) {
+ iwl_mld_cleanup_link(mld_vif->mld, link);
+
+ /* Correctly allocated primary link in non-MLO mode */
+ if (!ieee80211_vif_is_mld(vif) &&
+ link_id == 0 && link == &mld_vif->deflink)
+ continue;
+
+ if (vif->active_links & BIT(link_id))
+ continue;
+
+ /* Should not happen as link removal should always succeed */
+ WARN_ON(1);
+ if (link != &mld_vif->deflink)
+ kfree_rcu(link, rcu_head);
+ RCU_INIT_POINTER(mld_vif->link[link_id], NULL);
+ }
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL);
+
+ CLEANUP_STRUCT(mld_vif);
+}
+
+static int iwl_mld_send_mac_cmd(struct iwl_mld *mld,
+ struct iwl_mac_config_cmd *cmd)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret);
+
+ return ret;
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ return vif->p2p ? FW_MAC_TYPE_P2P_STA : FW_MAC_TYPE_BSS_STA;
+ case NL80211_IFTYPE_AP:
+ return FW_MAC_TYPE_GO;
+ case NL80211_IFTYPE_MONITOR:
+ return FW_MAC_TYPE_LISTENER;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ return FW_MAC_TYPE_P2P_DEVICE;
+ case NL80211_IFTYPE_ADHOC:
+ return FW_MAC_TYPE_IBSS;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return FW_MAC_TYPE_BSS_STA;
+}
+
+static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *own_he_cap;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This capability is the same for all bands,
+ * so take it from one of them.
+ */
+ sband = mld->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ return own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN);
+}
+
+/* fill the common part for all interface types */
+static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd,
+ u32 action)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+ cmd->action = cpu_to_le32(action);
+
+ cmd->mac_type =
+ cpu_to_le32(iwl_mld_mac80211_iftype_to_fw(vif));
+
+ memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN);
+
+ if (iwlwifi_mod_params.disable_11ax)
+ return;
+
+ cmd->nic_not_ack_enabled =
+ cpu_to_le32(!iwl_mld_is_nic_ack_enabled(mld, vif));
+
+ /* If we have MLO enabled, then the firmware needs to enable
+ * address translation for the station(s) we add. That depends
+ * on having EHT enabled in firmware, which in turn depends on
+ * mac80211 in the code below.
+ * However, mac80211 doesn't enable HE/EHT until it has parsed
+ * the association response successfully, so just skip all that
+ * and enable both when we have MLO.
+ */
+ if (ieee80211_vif_is_mld(vif)) {
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->he_ap_support = cpu_to_le16(1);
+ else
+ cmd->he_support = cpu_to_le16(1);
+
+ cmd->eht_support = cpu_to_le32(1);
+ return;
+ }
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (!link_conf->he_support)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->he_ap_support = cpu_to_le16(1);
+ else
+ cmd->he_support = cpu_to_le16(1);
+
+ /* EHT, if supported, was already set above */
+ break;
+ }
+}
+
+static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif, u32 action,
+ struct iwl_mac_config_cmd *cmd)
+{
+ struct ieee80211_bss_conf *link;
+ u32 twt_policy = 0;
+ int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+ /* We always want to hear MCAST frames, if we're not authorized yet,
+ * we'll drop them.
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP);
+
+ /* Adding a MAC ctxt with is_assoc set is not allowed in fw
+ * (and shouldn't happen)
+ */
+ if (vif->cfg.assoc && action != FW_CTXT_ACTION_ADD) {
+ cmd->client.is_assoc = 1;
+
+ if (!iwl_mld_vif_from_mac80211(vif)->authorized)
+ cmd->client.data_policy |=
+ cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE);
+ } else {
+ /* Allow beacons to pass through as long as we are not
+ * associated
+ */
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+ }
+
+ cmd->client.assoc_id = cpu_to_le16(vif->cfg.aid);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ u16 esr_transition_timeout =
+ u16_get_bits(vif->cfg.eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ cmd->client.esr_transition_timeout =
+ min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU,
+ esr_transition_timeout);
+ cmd->client.medium_sync_delay =
+ cpu_to_le16(vif->cfg.eml_med_sync_delay);
+ }
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (!link->he_support)
+ continue;
+
+ if (link->twt_requester)
+ twt_policy |= TWT_SUPPORTED;
+ if (link->twt_protected)
+ twt_policy |= PROTECTED_TWT_SUPPORTED;
+ if (link->twt_broadcast)
+ twt_policy |= BROADCAST_TWT_SUPPORTED;
+ }
+
+ if (!iwlwifi_mod_params.disable_11ax)
+ cmd->client.data_policy |= cpu_to_le16(twt_policy);
+
+ if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p)
+ cmd->filter_flags |=
+ cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+ if (vif->p2p)
+ cmd->client.ctwin =
+ cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+ IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+}
+
+static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_AP);
+
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+
+ /* in AP mode, pass beacons from other APs (needed for ht protection).
+ * When there're no any associated station, which means that we are not
+ * TXing anyway, don't ask FW to pass beacons to prevent unnecessary
+ * wake-ups.
+ */
+ if (mld_vif->num_associated_stas)
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON);
+}
+
+static void iwl_mld_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *go_active = _data;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+ iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+ *go_active = true;
+}
+
+static bool iwl_mld_p2p_dev_has_extended_disc(struct iwl_mld *mld)
+{
+ bool go_active = false;
+
+ /* This flag should be set to true when the P2P Device is
+ * discoverable and there is at least a P2P GO. Setting
+ * this flag will allow the P2P Device to be discoverable on other
+ * channels in addition to its listen channel.
+ * Note that this flag should not be set in other cases as it opens the
+ * Rx filters on all MAC and increases the number of interrupts.
+ */
+ ieee80211_iterate_active_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ iwl_mld_go_iterator, &go_active);
+
+ return go_active;
+}
+
+static void iwl_mld_fill_mac_cmd_p2p_dev(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ bool ext_disc = iwl_mld_p2p_dev_has_extended_disc(mld);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd->filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
+
+ if (ext_disc)
+ cmd->p2p_dev.is_disc_extended = cpu_to_le32(1);
+}
+
+static void iwl_mld_fill_mac_cmd_ibss(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_config_cmd *cmd)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+ cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+}
+
+static int
+iwl_mld_rm_mac_from_fw(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mac_config_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .id_and_color = cpu_to_le32(mld_vif->fw_id),
+ };
+
+ return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ u32 action)
+{
+ struct iwl_mac_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (action == FW_CTXT_ACTION_REMOVE)
+ return iwl_mld_rm_mac_from_fw(mld, vif);
+
+ iwl_mld_mac_cmd_fill_common(mld, vif, &cmd, action);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mld_fill_mac_cmd_sta(mld, vif, action, &cmd);
+ break;
+ case NL80211_IFTYPE_AP:
+ iwl_mld_fill_mac_cmd_ap(mld, vif, &cmd);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ cmd.filter_flags =
+ cpu_to_le32(MAC_CFG_FILTER_PROMISC |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_BEACON |
+ MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
+ MAC_CFG_FILTER_ACCEPT_GRP);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ iwl_mld_fill_mac_cmd_p2p_dev(mld, vif, &cmd);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mld_fill_mac_cmd_ibss(mld, vif, &cmd);
+ break;
+ default:
+ WARN(1, "not supported yet\n");
+ return -EOPNOTSUPP;
+ }
+
+ return iwl_mld_send_mac_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Constructor function for struct iwl_mld_vif */
+static int
+iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld_vif->mld = mld;
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+ if (ret)
+ return ret;
+
+ if (!mld->fw_status.in_hw_restart) {
+ wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk,
+ iwl_mld_emlsr_unblock_tpt_wk);
+ wiphy_delayed_work_init(&mld_vif->emlsr.check_tpt_wk,
+ iwl_mld_emlsr_check_tpt);
+ wiphy_delayed_work_init(&mld_vif->emlsr.prevent_done_wk,
+ iwl_mld_emlsr_prevent_done_wk);
+ wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk,
+ iwl_mld_emlsr_tmp_non_bss_done_wk);
+ }
+
+ return 0;
+}
+
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_init_vif(mld, vif);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_ADD);
+ if (ret)
+ RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+ return ret;
+}
+
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+
+ if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
+
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
+ mld_vif->fw_id);
+
+ return ret;
+}
+
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *link;
+ unsigned int link_id;
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (iwl_mld_link_set_associated(mld, vif, link))
+ IWL_ERR(mld, "failed to update link %d\n", link_id);
+ }
+
+ iwl_mld_recalc_multicast_filter(mld);
+}
+
+static void iwl_mld_get_fw_id_bss_bitmap_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u8 *fw_id_bitmap = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ return;
+
+ *fw_id_bitmap |= BIT(mld_vif->fw_id);
+}
+
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld)
+{
+ u8 fw_id_bitmap = 0;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ iwl_mld_get_fw_id_bss_bitmap_iter,
+ &fw_id_bitmap);
+
+ return fw_id_bitmap;
+}
+
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
+ struct iwl_probe_resp_data *old_data, *new_data;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_link *mld_link;
+
+ IWL_DEBUG_INFO(mld, "Probe response data notif: noa %d, csa %d\n",
+ notif->noa_active, notif->csa_counter);
+
+ if (IWL_FW_CHECK(mld, le32_to_cpu(notif->mac_id) >=
+ ARRAY_SIZE(mld->fw_id_to_vif),
+ "mac id is invalid: %d\n",
+ le32_to_cpu(notif->mac_id)))
+ return;
+
+ vif = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_vif[le32_to_cpu(notif->mac_id)]);
+
+ /* the firmware gives us the mac_id (and not the link_id), mac80211
+ * gets a vif and not a link, bottom line, this flow is not MLD ready
+ * yet.
+ */
+ if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+ return;
+
+ if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
+ notif->csa_counter >= 1)
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
+
+ if (!vif->p2p)
+ return;
+
+ mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink;
+
+ new_data = kzalloc(sizeof(*new_data), GFP_KERNEL);
+ if (!new_data)
+ return;
+
+ memcpy(&new_data->notif, notif, sizeof(new_data->notif));
+
+ /* noa_attr contains 1 reserved byte, need to substruct it */
+ new_data->noa_len = sizeof(struct ieee80211_vendor_ie) +
+ sizeof(new_data->notif.noa_attr) - 1;
+
+ /*
+ * If it's a one time NoA, only one descriptor is needed,
+ * adjust the length according to len_low.
+ */
+ if (new_data->notif.noa_attr.len_low ==
+ sizeof(struct ieee80211_p2p_noa_desc) + 2)
+ new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc);
+
+ old_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+ rcu_assign_pointer(mld_link->probe_resp_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+}
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif;
+
+ if (IWL_FW_CHECK(mld, notif->mac_id >= ARRAY_SIZE(mld->fw_id_to_vif),
+ "mac id is invalid: %d\n", notif->mac_id))
+ return;
+
+ vif = wiphy_dereference(mld->wiphy, mld->fw_id_to_vif[notif->mac_id]);
+
+ if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif))
+ return;
+
+ IWL_WARN(mld, "uapsd misbehaving AP: %pM\n", vif->bss_conf.bssid);
+}
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+
+ if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+ return;
+
+ link = iwl_mld_fw_id_to_link_conf(mld, notif->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ vif = link->vif;
+ if (WARN_ON(!vif) || vif->type != NL80211_IFTYPE_STATION ||
+ !vif->cfg.assoc)
+ return;
+
+ if (!link->chanreq.oper.chan ||
+ link->chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ link->chanreq.oper.width < NL80211_CHAN_WIDTH_40)
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* this shouldn't happen *again*, ignore it */
+ if (mld_vif->cca_40mhz_workaround != CCA_40_MHZ_WA_NONE)
+ return;
+
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RECONNECT;
+
+ /*
+ * This capability manipulation isn't really ideal, but it's the
+ * easiest choice - otherwise we'd have to do some major changes
+ * in mac80211 to support this, which isn't worth it. This does
+ * mean that userspace may have outdated information, but that's
+ * actually not an issue at all.
+ */
+ sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ WARN_ON(!he->has_he);
+ WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+ he->he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ ieee80211_disconnect(vif, true);
+}
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_sta_he_cap *he_cap;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_NONE)
+ return;
+
+ /* Now we are just reconnecting with the new capabilities,
+ * but remember to reset the capabilities when we disconnect for real
+ */
+ if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_RECONNECT) {
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RESET;
+ return;
+ }
+
+ /* Now cca_40mhz_workaround == CCA_40_MHZ_WA_RESET */
+
+ sband = mld->wiphy->bands[NL80211_BAND_2GHZ];
+
+ sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+
+ if (he_cap) {
+ /* we know that ours is writable */
+ struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
+
+ he->he_cap_elem.phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ }
+
+ mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_NONE;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld)
+{
+ unsigned long fw_id_bitmap = iwl_mld_get_fw_bss_vifs_ids(mld);
+ int fw_id;
+
+ if (hweight8(fw_id_bitmap) != 1)
+ return NULL;
+
+ fw_id = __ffs(fw_id_bitmap);
+
+ return wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_vif[fw_id]);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
new file mode 100644
index 000000000000..d1d56b081bf6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_iface_h__
+#define __iwl_mld_iface_h__
+
+#include <net/mac80211.h>
+
+#include "link.h"
+#include "session-protect.h"
+#include "d3.h"
+
+enum iwl_mld_cca_40mhz_wa_status {
+ CCA_40_MHZ_WA_NONE,
+ CCA_40_MHZ_WA_RESET,
+ CCA_40_MHZ_WA_RECONNECT,
+};
+
+/**
+ * enum iwl_mld_emlsr_blocked - defines reasons for which EMLSR is blocked
+ *
+ * These blocks are applied/stored per-VIF.
+ *
+ * @IWL_MLD_EMLSR_BLOCKED_PREVENTION: Prevent repeated EMLSR enter/exit
+ * @IWL_MLD_EMLSR_BLOCKED_WOWLAN: WOWLAN is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's
+ * link is preventing EMLSR. This is a temporary blocking that is set when
+ * there is an indication that a non-BSS interface is to be added.
+ * @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
+ */
+enum iwl_mld_emlsr_blocked {
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1,
+ IWL_MLD_EMLSR_BLOCKED_WOWLAN = 0x2,
+ IWL_MLD_EMLSR_BLOCKED_ROC = 0x4,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS = 0x8,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10,
+ IWL_MLD_EMLSR_BLOCKED_TPT = 0x20,
+};
+
+/**
+ * enum iwl_mld_emlsr_exit - defines reasons for exiting EMLSR
+ *
+ * Reasons to exit EMLSR may be either link specific or even specific to a
+ * combination of links.
+ *
+ * @IWL_MLD_EMLSR_EXIT_BLOCK: Exit due to a block reason being set
+ * @IWL_MLD_EMLSR_EXIT_MISSED_BEACON: Exit due to missed beacons
+ * @IWL_MLD_EMLSR_EXIT_FAIL_ENTRY: FW failed to enter EMLSR
+ * @IWL_MLD_EMLSR_EXIT_CSA: EMLSR prevented due to channel switch on link
+ * @IWL_MLD_EMLSR_EXIT_EQUAL_BAND: EMLSR prevented as both links share the band
+ * @IWL_MLD_EMLSR_EXIT_LOW_RSSI: Link RSSI is unsuitable for EMLSR
+ * @IWL_MLD_EMLSR_EXIT_LINK_USAGE: Exit EMLSR due to low TPT on secondary link
+ * @IWL_MLD_EMLSR_EXIT_BT_COEX: Exit EMLSR due to BT coexistence
+ * @IWL_MLD_EMLSR_EXIT_CHAN_LOAD: Exit EMLSR because the primary channel is not
+ * loaded enough to justify EMLSR.
+ * @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI
+ * @IWL_MLD_EMLSR_EXIT_FW_REQUEST: Exit EMLSR because the FW requested it
+ */
+enum iwl_mld_emlsr_exit {
+ IWL_MLD_EMLSR_EXIT_BLOCK = 0x1,
+ IWL_MLD_EMLSR_EXIT_MISSED_BEACON = 0x2,
+ IWL_MLD_EMLSR_EXIT_FAIL_ENTRY = 0x4,
+ IWL_MLD_EMLSR_EXIT_CSA = 0x8,
+ IWL_MLD_EMLSR_EXIT_EQUAL_BAND = 0x10,
+ IWL_MLD_EMLSR_EXIT_LOW_RSSI = 0x20,
+ IWL_MLD_EMLSR_EXIT_LINK_USAGE = 0x40,
+ IWL_MLD_EMLSR_EXIT_BT_COEX = 0x80,
+ IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x100,
+ IWL_MLD_EMLSR_EXIT_RFI = 0x200,
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST = 0x400,
+};
+
+/**
+ * struct iwl_mld_emlsr - per-VIF data about EMLSR operation
+ *
+ * @primary: The current primary link
+ * @selected_primary: Primary link as selected during the last link selection
+ * @selected_links: Links as selected during the last link selection
+ * @blocked_reasons: Reasons preventing EMLSR from being enabled
+ * @last_exit_reason: Reason for the last EMLSR exit
+ * @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero)
+ * @exit_repeat_count: Number of times EMLSR was exited for the same reason
+ * @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached
+ * @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be
+ * added, for example if there is no longer enough traffic.
+ * @prevent_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_PREVENTION
+ * @tmp_non_bss_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS
+ */
+struct iwl_mld_emlsr {
+ struct_group(zeroed_on_not_authorized,
+ u8 primary;
+
+ u8 selected_primary;
+ u16 selected_links;
+
+ enum iwl_mld_emlsr_blocked blocked_reasons;
+
+ enum iwl_mld_emlsr_exit last_exit_reason;
+ unsigned long last_exit_ts;
+ u8 exit_repeat_count;
+ );
+
+ struct wiphy_work unblock_tpt_wk;
+ struct wiphy_delayed_work check_tpt_wk;
+
+ struct wiphy_delayed_work prevent_done_wk;
+ struct wiphy_delayed_work tmp_non_bss_done_wk;
+};
+
+/**
+ * struct iwl_mld_vif - virtual interface (MAC context) configuration parameters
+ *
+ * @fw_id: fw id of the mac context.
+ * @session_protect: session protection parameters
+ * @ap_sta: pointer to AP sta, for easier access to it.
+ * Relevant only for STA vifs.
+ * @authorized: indicates the AP station was set to authorized
+ * @bigtks: BIGTKs of the AP, for beacon protection.
+ * Only valid for STA. (FIXME: needs to be per link)
+ * @num_associated_stas: number of associated STAs. Relevant only for AP mode.
+ * @ap_ibss_active: whether the AP/IBSS was started
+ * @roc_activity: the id of the roc_activity running. Relevant for p2p device
+ * only. Set to %ROC_NUM_ACTIVITIES when not in use.
+ * @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
+ * environment is too loaded, we work around this by reconnecting to the
+ * same AP with 20 MHz. This manages the status of the workaround.
+ * @beacon_inject_active: indicates an active debugfs beacon ie injection
+ * @low_latency_causes: bit flags, indicating the causes for low-latency,
+ * see @iwl_mld_low_latency_cause.
+ * @ps_disabled: indicates that PS is disabled for this interface
+ * @mld: pointer to the mld structure.
+ * @deflink: default link data, for use in non-MLO,
+ * @link: reference to link data for each valid link, for use in MLO.
+ * @emlsr: information related to EMLSR
+ * @wowlan_data: data used by the wowlan suspend flow
+ * @use_ps_poll: use ps_poll frames
+ * @disable_bf: disable beacon filter
+ * @dbgfs_slink: debugfs symlink for this interface
+ */
+struct iwl_mld_vif {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ struct iwl_mld_session_protect session_protect;
+ struct ieee80211_sta *ap_sta;
+ bool authorized;
+ struct ieee80211_key_conf __rcu *bigtks[2];
+ u8 num_associated_stas;
+ bool ap_ibss_active;
+ u32 roc_activity;
+ enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool beacon_inject_active;
+#endif
+ u8 low_latency_causes;
+ bool ps_disabled;
+ );
+ /* And here fields that survive a fw restart */
+ struct iwl_mld *mld;
+ struct iwl_mld_link deflink;
+ struct iwl_mld_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mld_emlsr emlsr;
+
+#if CONFIG_PM_SLEEP
+ struct iwl_mld_wowlan_data wowlan_data;
+#endif
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ bool use_ps_poll;
+ bool disable_bf;
+ struct dentry *dbgfs_slink;
+#endif
+};
+
+static inline struct iwl_mld_vif *
+iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+ return (void *)vif->drv_priv;
+}
+
+#define iwl_mld_link_dereference_check(mld_vif, link_id) \
+ rcu_dereference_check((mld_vif)->link[link_id], \
+ lockdep_is_held(&mld_vif->mld->wiphy->mtx))
+
+#define for_each_mld_vif_valid_link(mld_vif, mld_link) \
+ for (int link_id = 0; link_id < ARRAY_SIZE((mld_vif)->link); \
+ link_id++) \
+ if ((mld_link = iwl_mld_link_dereference_check(mld_vif, link_id)))
+
+/* Retrieve pointer to mld link from mac80211 structures */
+static inline struct iwl_mld_link *
+iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+
+ return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id);
+}
+
+int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif);
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
+int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ u32 action);
+int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_set_vif_associated(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
+void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+static inline bool iwl_mld_vif_low_latency(const struct iwl_mld_vif *mld_vif)
+{
+ return !!mld_vif->low_latency_causes;
+}
+
+struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_iface_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c
new file mode 100644
index 000000000000..0eff13e5ffd5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "key.h"
+#include "iface.h"
+#include "sta.h"
+#include "fw/api/datapath.h"
+
+static u32 iwl_mld_get_key_flags(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = key->keyidx == 4 || key->keyidx == 5;
+ u32 flags = 0;
+
+ if (!pairwise)
+ flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_CCMP:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ flags |= IWL_SEC_KEY_FLAG_KEY_SIZE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP;
+ break;
+ }
+
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mld_vif->ap_sta;
+
+ /* If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
+ */
+ if ((sta && sta->mfp && pairwise) || igtk)
+ flags |= IWL_SEC_KEY_FLAG_MFP;
+
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
+ return flags;
+}
+
+static u32 iwl_mld_get_key_sta_mask(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_link_sta *link_sta;
+ int sta_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* AP group keys are per link and should be on the mcast/bcast STA */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ struct iwl_mld_link *link = NULL;
+
+ if (key->link_id >= 0)
+ link = iwl_mld_link_dereference_check(mld_vif,
+ key->link_id);
+
+ if (WARN_ON(!link))
+ return 0;
+
+ /* In this stage we should have both the bcast and mcast STAs */
+ if (WARN_ON(link->bcast_sta.sta_id == IWL_INVALID_STA ||
+ link->mcast_sta.sta_id == IWL_INVALID_STA))
+ return 0;
+
+ /* IGTK/BIGTK to bcast STA */
+ if (key->keyidx >= 4)
+ return BIT(link->bcast_sta.sta_id);
+
+ /* GTK for data to mcast STA */
+ return BIT(link->mcast_sta.sta_id);
+ }
+
+ /* for client mode use the AP STA also for group keys */
+ if (!sta && vif->type == NL80211_IFTYPE_STATION)
+ sta = mld_vif->ap_sta;
+
+ /* STA should be non-NULL now */
+ if (WARN_ON(!sta))
+ return 0;
+
+ /* Key is not per-link, get the full sta mask */
+ if (key->link_id < 0)
+ return iwl_mld_fw_sta_id_mask(mld, sta);
+
+ /* The link_sta shouldn't be NULL now, but this is checked in
+ * iwl_mld_fw_sta_id_mask
+ */
+ link_sta = link_sta_dereference_check(sta, key->link_id);
+
+ sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ if (sta_id < 0)
+ return 0;
+
+ return BIT(sta_id);
+}
+
+static int iwl_mld_add_key_to_fw(struct iwl_mld *mld, u32 sta_mask,
+ u32 key_flags, struct ieee80211_key_conf *key)
+{
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .u.add.sta_mask = cpu_to_le32(sta_mask),
+ .u.add.key_id = cpu_to_le32(key->keyidx),
+ .u.add.key_flags = cpu_to_le32(key_flags),
+ .u.add.tx_seq = cpu_to_le64(atomic64_read(&key->tx_pn)),
+ };
+ bool tkip = key->cipher == WLAN_CIPHER_SUITE_TKIP;
+ int max_key_len = sizeof(cmd.u.add.key);
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ if (WARN_ON(key->keylen > max_key_len))
+ return -EINVAL;
+
+ memcpy(cmd.u.add.key, key->key, key->keylen);
+
+ if (tkip) {
+ memcpy(cmd.u.add.tkip_mic_rx_key,
+ key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ 8);
+ memcpy(cmd.u.add.tkip_mic_tx_key,
+ key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ 8);
+ }
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+ &cmd);
+}
+
+static void iwl_mld_remove_key_from_fw(struct iwl_mld *mld, u32 sta_mask,
+ u32 key_flags, u32 keyidx)
+{
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .u.remove.sta_mask = cpu_to_le32(sta_mask),
+ .u.remove.key_id = cpu_to_le32(keyidx),
+ .u.remove.key_flags = cpu_to_le32(key_flags),
+ };
+
+ if (WARN_ON(!sta_mask))
+ return;
+
+ iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), &cmd);
+}
+
+void iwl_mld_remove_key(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+ u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!sta_mask)
+ return;
+
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ struct iwl_mld_link *mld_link;
+ unsigned int link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (key->link_id >= 0)
+ link_id = key->link_id;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (mld_link->igtk == key)
+ mld_link->igtk = NULL;
+
+ mld->num_igtks--;
+ }
+
+ iwl_mld_remove_key_from_fw(mld, sta_mask, key_flags, key->keyidx);
+
+ /* no longer in HW */
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+int iwl_mld_add_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key);
+ u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link = NULL;
+ bool igtk = key->keyidx == 4 || key->keyidx == 5;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!sta_mask)
+ return -EINVAL;
+
+ if (igtk) {
+ if (mld->num_igtks == IWL_MAX_NUM_IGTKS)
+ return -EOPNOTSUPP;
+
+ u8 link_id = 0;
+
+ /* set to -1 for non-MLO right now */
+ if (key->link_id >= 0)
+ link_id = key->link_id;
+
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (mld_link->igtk) {
+ IWL_DEBUG_MAC80211(mld, "remove old IGTK %d\n",
+ mld_link->igtk->keyidx);
+ iwl_mld_remove_key(mld, vif, sta, mld_link->igtk);
+ }
+
+ WARN_ON(mld_link->igtk);
+ }
+
+ ret = iwl_mld_add_key_to_fw(mld, sta_mask, key_flags, key);
+ if (ret)
+ return ret;
+
+ if (mld_link) {
+ mld_link->igtk = key;
+ mld->num_igtks++;
+ }
+
+ /* We don't really need this, but need it to be not invalid,
+ * so we will know if the key is in fw.
+ */
+ key->hw_key_idx = 0;
+
+ return 0;
+}
+
+struct remove_ap_keys_iter_data {
+ u8 link_id;
+ struct ieee80211_sta *sta;
+};
+
+static void iwl_mld_remove_ap_keys_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct remove_ap_keys_iter_data *data = _data;
+
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ /* All the pairwise keys should have been removed by now */
+ if (WARN_ON(sta))
+ return;
+
+ if (key->link_id >= 0 && key->link_id != data->link_id)
+ return;
+
+ iwl_mld_remove_key(mld, vif, data->sta, key);
+}
+
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned int link_id)
+{
+ struct remove_ap_keys_iter_data iter_data = {
+ .link_id = link_id,
+ .sta = sta,
+ };
+
+ if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION))
+ return;
+
+ ieee80211_iter_keys(mld->hw, vif,
+ iwl_mld_remove_ap_keys_iter,
+ &iter_data);
+}
+
+struct iwl_mvm_sta_key_update_data {
+ struct ieee80211_sta *sta;
+ u32 old_sta_mask;
+ u32 new_sta_mask;
+ int err;
+};
+
+static void iwl_mld_update_sta_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
+{
+ struct iwl_mvm_sta_key_update_data *data = _data;
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_sec_key_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
+ .u.modify.key_id = cpu_to_le32(key->keyidx),
+ .u.modify.key_flags =
+ cpu_to_le32(iwl_mld_get_key_flags(mld, vif, sta, key)),
+ };
+ int err;
+
+ /* only need to do this for pairwise keys (link_id == -1) */
+ if (sta != data->sta || key->link_id >= 0)
+ return;
+
+ err = iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD),
+ &cmd);
+
+ if (err)
+ data->err = err;
+}
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ struct iwl_mvm_sta_key_update_data data = {
+ .sta = sta,
+ .old_sta_mask = old_sta_mask,
+ .new_sta_mask = new_sta_mask,
+ };
+
+ ieee80211_iter_keys(mld->hw, vif, iwl_mld_update_sta_key_iter,
+ &data);
+ return data.err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h
new file mode 100644
index 000000000000..a68ea48913be
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_key_h__
+#define __iwl_mld_key_h__
+
+#include "mld.h"
+#include <net/mac80211.h>
+#include "fw/api/sta.h"
+
+void iwl_mld_remove_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int iwl_mld_add_key(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void iwl_mld_remove_ap_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ unsigned int link_id);
+
+int iwl_mld_update_sta_keys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask);
+
+static inline void
+iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, void *data)
+{
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+}
+
+#endif /* __iwl_mld_key_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.c b/drivers/net/wireless/intel/iwlwifi/mld/led.c
new file mode 100644
index 000000000000..a37b32cbc6e6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/led.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "fw/api/led.h"
+#include "mld.h"
+#include "led.h"
+#include "hcmd.h"
+
+static void iwl_mld_send_led_fw_cmd(struct iwl_mld *mld, bool on)
+{
+ struct iwl_led_cmd led_cmd = {
+ .status = cpu_to_le32(on),
+ };
+ int err;
+
+ if (WARN_ON(!mld->fw_status.running))
+ return;
+
+ err = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(LONG_GROUP,
+ LEDS_CMD),
+ CMD_ASYNC, &led_cmd);
+
+ if (err)
+ IWL_WARN(mld, "LED command failed: %d\n", err);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct iwl_mld *mld = container_of(led_cdev, struct iwl_mld, led);
+
+ if (!mld->fw_status.running)
+ return;
+
+ iwl_mld_send_led_fw_cmd(mld, brightness > 0);
+}
+
+int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+ int mode = iwlwifi_mod_params.led_mode;
+ int ret;
+
+ switch (mode) {
+ case IWL_LED_BLINK:
+ IWL_ERR(mld, "Blink led mode not supported, used default\n");
+ fallthrough;
+ case IWL_LED_DEFAULT:
+ case IWL_LED_RF_STATE:
+ mode = IWL_LED_RF_STATE;
+ break;
+ case IWL_LED_DISABLE:
+ IWL_INFO(mld, "Led disabled\n");
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ mld->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(mld->hw->wiphy));
+ if (!mld->led.name)
+ return -ENOMEM;
+
+ mld->led.brightness_set = iwl_led_brightness_set;
+ mld->led.max_brightness = 1;
+
+ if (mode == IWL_LED_RF_STATE)
+ mld->led.default_trigger =
+ ieee80211_get_radio_led_name(mld->hw);
+
+ ret = led_classdev_register(mld->trans->dev, &mld->led);
+ if (ret) {
+ kfree(mld->led.name);
+ mld->led.name = NULL;
+ IWL_INFO(mld, "Failed to enable led\n");
+ }
+
+ return ret;
+}
+
+void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+ if (!mld->led.name)
+ return;
+
+ iwl_mld_send_led_fw_cmd(mld, mld->led.brightness > 0);
+}
+
+void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+ if (!mld->led.name)
+ return;
+
+ led_classdev_unregister(&mld->led);
+ kfree(mld->led.name);
+ mld->led.name = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.h b/drivers/net/wireless/intel/iwlwifi/mld/led.h
new file mode 100644
index 000000000000..0954e214e73d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/led.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_led_h__
+#define __iwl_mld_led_h__
+
+#include "mld.h"
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mld_leds_init(struct iwl_mld *mld);
+void iwl_mld_leds_exit(struct iwl_mld *mld);
+void iwl_mld_led_config_fw(struct iwl_mld *mld);
+#else
+static inline int iwl_mld_leds_init(struct iwl_mld *mld)
+{
+ return 0;
+}
+
+static inline void iwl_mld_leds_exit(struct iwl_mld *mld)
+{
+}
+
+static inline void iwl_mld_led_config_fw(struct iwl_mld *mld)
+{
+}
+#endif
+
+#endif /* __iwl_mld_led_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
new file mode 100644
index 000000000000..82a4979a3af3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "constants.h"
+#include "link.h"
+#include "iface.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "phy.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/mac.h"
+
+#include "fw/api/context.h"
+#include "fw/dbg.h"
+
+static int iwl_mld_send_link_cmd(struct iwl_mld *mld,
+ struct iwl_link_config_cmd *cmd,
+ enum iwl_ctxt_action action)
+{
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ cmd->action = cpu_to_le32(action);
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n",
+ action, ret);
+ return ret;
+}
+
+static int iwl_mld_add_link_to_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(link_conf);
+ struct iwl_link_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(link->fw_id);
+ cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+ cmd.spec_link_id = link_conf->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+ ether_addr_copy(cmd.local_link_addr, link_conf->addr);
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
+ ether_addr_copy(cmd.ibss_bssid_addr, link_conf->bssid);
+
+ return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_ADD);
+}
+
+/* Get the basic rates of the used band and add the mandatory ones */
+static void iwl_mld_fill_rates(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *chan_ctx,
+ __le32 *cck_rates, __le32 *ofdm_rates)
+{
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, chan_ctx);
+ struct ieee80211_supported_band *sband =
+ mld->hw->wiphy->bands[chandef->chan->band];
+ unsigned long basic = link->basic_rates;
+ int lowest_present_ofdm = 100;
+ int lowest_present_cck = 100;
+ u32 cck = 0;
+ u32 ofdm = 0;
+ int i;
+
+ for_each_set_bit(i, &basic, BITS_PER_LONG) {
+ int hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+ if (lowest_present_ofdm > hw)
+ lowest_present_ofdm = hw;
+ } else {
+ BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+ cck |= BIT(hw);
+ if (lowest_present_cck > hw)
+ lowest_present_cck = hw;
+ }
+ }
+
+ /* Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (lowest_present_ofdm > IWL_RATE_24M_INDEX)
+ ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+ if (lowest_present_ofdm > IWL_RATE_12M_INDEX)
+ ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+ /* CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (lowest_present_cck > IWL_RATE_11M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+ if (lowest_present_cck > IWL_RATE_5M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+ if (lowest_present_cck > IWL_RATE_2M_INDEX)
+ cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+ *cck_rates = cpu_to_le32((u32)cck);
+ *ofdm_rates = cpu_to_le32((u32)ofdm);
+}
+
+static void iwl_mld_fill_protection_flags(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ __le32 *protection_flags)
+{
+ u8 protection_mode = link->ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION;
+ u8 ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT;
+
+ IWL_DEBUG_RATE(mld, "HT protection mode: %d\n", protection_mode);
+
+ if (link->use_cts_prot)
+ *protection_flags |= cpu_to_le32(LINK_PROT_FLG_TGG_PROTECT);
+
+ /* See section 9.23.3.1 of IEEE 80211-2012.
+ * Nongreenfield HT STAs Present is not supported.
+ */
+ switch (protection_mode) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ /* Protect when channel wider than 20MHz */
+ if (link->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
+ *protection_flags |= cpu_to_le32(ht_flag);
+ break;
+ }
+}
+
+static u8 iwl_mld_mac80211_ac_to_fw_ac(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_fw[] = {
+ AC_VO,
+ AC_VI,
+ AC_BE,
+ AC_BK
+ };
+
+ return mac80211_ac_to_fw[ac];
+}
+
+static void iwl_mld_fill_qos_params(struct ieee80211_bss_conf *link,
+ struct iwl_ac_qos *ac, __le32 *qos_flags)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ /* no need to check mld_link since it is done in the caller */
+
+ for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+ u8 txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(mac_ac);
+ u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+ ac[fw_ac].cw_min =
+ cpu_to_le16(mld_link->queue_params[mac_ac].cw_min);
+ ac[fw_ac].cw_max =
+ cpu_to_le16(mld_link->queue_params[mac_ac].cw_max);
+ ac[fw_ac].edca_txop =
+ cpu_to_le16(mld_link->queue_params[mac_ac].txop * 32);
+ ac[fw_ac].aifsn = mld_link->queue_params[mac_ac].aifs;
+ ac[fw_ac].fifos_mask = BIT(txf);
+ }
+
+ if (link->qos)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+ if (link->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
+ *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+}
+
+static bool iwl_mld_fill_mu_edca(struct iwl_mld *mld,
+ const struct iwl_mld_link *mld_link,
+ struct iwl_he_backoff_conf *trig_based_txf)
+{
+ for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) {
+ const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+ &mld_link->queue_params[mac_ac].mu_edca_param_rec;
+ u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac);
+
+ if (!mld_link->queue_params[mac_ac].mu_edca)
+ return false;
+
+ trig_based_txf[fw_ac].cwmin =
+ cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+ trig_based_txf[fw_ac].cwmax =
+ cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+ trig_based_txf[fw_ac].aifsn =
+ cpu_to_le16(mu_edca->aifsn & 0xf);
+ trig_based_txf[fw_ac].mu_time =
+ cpu_to_le16(mu_edca->mu_edca_timer);
+ }
+ return true;
+}
+
+static u8 iwl_mld_sta_rx_bw_to_fw(enum ieee80211_sta_rx_bandwidth bw)
+{
+ switch (bw) {
+ default: /* potential future values not supported by this hw/driver */
+ case IEEE80211_STA_RX_BW_20:
+ return IWL_LINK_MODIFY_BW_20;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_LINK_MODIFY_BW_40;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_LINK_MODIFY_BW_80;
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_LINK_MODIFY_BW_160;
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_LINK_MODIFY_BW_320;
+ }
+}
+
+static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw,
+ u32 changes)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct ieee80211_vif *vif = link->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct iwl_link_config_cmd cmd = {};
+ u32 flags = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+ cmd.spec_link_id = link->link_id;
+ cmd.mac_id = cpu_to_le32(mld_vif->fw_id);
+
+ chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+
+ cmd.phy_id = cpu_to_le32(chan_ctx ?
+ iwl_mld_phy_from_mac80211(chan_ctx)->fw_id :
+ FW_CTXT_ID_INVALID);
+
+ ether_addr_copy(cmd.local_link_addr, link->addr);
+
+ cmd.active = cpu_to_le32(mld_link->active);
+
+ if ((changes & LINK_CONTEXT_MODIFY_ACTIVE) && !mld_link->active &&
+ mld_link->silent_deactivation) {
+ /* We are de-activating a link that is having CSA with
+ * immediate quiet in EMLSR. Tell the firmware not to send any
+ * frame.
+ */
+ cmd.block_tx = 1;
+ mld_link->silent_deactivation = false;
+ }
+
+ if (vif->type == NL80211_IFTYPE_ADHOC && link->bssid)
+ ether_addr_copy(cmd.ibss_bssid_addr, link->bssid);
+
+ /* Channel context is needed to get the rates */
+ if (chan_ctx)
+ iwl_mld_fill_rates(mld, link, chan_ctx, &cmd.cck_rates,
+ &cmd.ofdm_rates);
+
+ cmd.cck_short_preamble = cpu_to_le32(link->use_short_preamble);
+ cmd.short_slot = cpu_to_le32(link->use_short_slot);
+
+ iwl_mld_fill_protection_flags(mld, link, &cmd.protection_flags);
+
+ iwl_mld_fill_qos_params(link, cmd.ac, &cmd.qos_flags);
+
+ cmd.bi = cpu_to_le32(link->beacon_int);
+ cmd.dtim_interval = cpu_to_le32(link->beacon_int * link->dtim_period);
+
+ if (changes & LINK_CONTEXT_MODIFY_BANDWIDTH)
+ cmd.modify_bandwidth = iwl_mld_sta_rx_bw_to_fw(bw);
+
+ /* Configure HE parameters only if HE is supported, and only after
+ * the parameters are set in mac80211 (meaning after assoc)
+ */
+ if (!link->he_support || iwlwifi_mod_params.disable_11ax ||
+ (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) {
+ changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS;
+ goto send_cmd;
+ }
+
+ /* ap_sta may be NULL if we're disconnecting */
+ if (mld_vif->ap_sta) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(mld_vif->ap_sta,
+ link->link_id);
+
+ if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he &&
+ link_sta->he_cap.he_cap_elem.mac_cap_info[5] &
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX)
+ cmd.ul_mu_data_disable = 1;
+ }
+
+ cmd.htc_trig_based_pkt_ext = link->htc_trig_based_pkt_ext;
+
+ if (link->uora_exists) {
+ cmd.rand_alloc_ecwmin = link->uora_ocw_range & 0x7;
+ cmd.rand_alloc_ecwmax = (link->uora_ocw_range >> 3) & 0x7;
+ }
+
+ if (iwl_mld_fill_mu_edca(mld, mld_link, cmd.trig_based_txf))
+ flags |= LINK_FLG_MU_EDCA_CW;
+
+ cmd.bss_color = link->he_bss_color.color;
+
+ if (!link->he_bss_color.enabled)
+ flags |= LINK_FLG_BSS_COLOR_DIS;
+
+ cmd.frame_time_rts_th = cpu_to_le16(link->frame_time_rts_th);
+
+ /* Block 26-tone RU OFDMA transmissions */
+ if (mld_link->he_ru_2mhz_block)
+ flags |= LINK_FLG_RU_2MHZ_BLOCK;
+
+ if (link->nontransmitted) {
+ ether_addr_copy(cmd.ref_bssid_addr, link->transmitter_bssid);
+ cmd.bssid_index = link->bssid_index;
+ }
+
+ /* The only EHT parameter is puncturing, and starting from PHY cmd
+ * version 6 - it is sent there. For older versions of the PHY cmd,
+ * puncturing is not needed at all.
+ */
+ if (WARN_ON(changes & LINK_CONTEXT_MODIFY_EHT_PARAMS))
+ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+
+send_cmd:
+ cmd.modify_mask = cpu_to_le32(changes);
+ cmd.flags = cpu_to_le32(flags);
+
+ return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_MODIFY);
+}
+
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ u32 changes)
+{
+ if (WARN_ON(changes & LINK_CONTEXT_MODIFY_BANDWIDTH))
+ changes &= ~LINK_CONTEXT_MODIFY_BANDWIDTH;
+
+ return _iwl_mld_change_link_in_fw(mld, link, 0, changes);
+}
+
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw)
+{
+ return _iwl_mld_change_link_in_fw(mld, link, bw,
+ LINK_CONTEXT_MODIFY_BANDWIDTH);
+}
+
+int iwl_mld_activate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link || mld_link->active))
+ return -EINVAL;
+
+ mld_link->rx_omi.exit_ts = jiffies;
+ mld_link->active = true;
+
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_ACTIVE);
+ if (ret)
+ mld_link->active = false;
+
+ return ret;
+}
+
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct iwl_probe_resp_data *probe_data;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link || !mld_link->active))
+ return;
+
+ iwl_mld_cancel_session_protection(mld, link->vif, link->link_id);
+
+ /* If we deactivate the link, we will probably remove it, or switch
+ * channel. In both cases, the CSA or Notice of Absence information is
+ * now irrelevant. Remove the data here.
+ */
+ probe_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data);
+ RCU_INIT_POINTER(mld_link->probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ mld_link->active = false;
+
+ iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ACTIVE);
+
+ /* Now that the link is not active in FW, we don't expect any new
+ * notifications for it. Cancel the ones that are already pending
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_LINK,
+ mld_link->fw_id);
+}
+
+static void
+iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ struct iwl_link_config_cmd cmd = {};
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+ cmd.spec_link_id = link->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+
+ iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE);
+}
+
+static void iwl_mld_omi_bw_update(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ struct iwl_mld_link *mld_link,
+ struct ieee80211_link_sta *link_sta,
+ enum ieee80211_sta_rx_bandwidth bw,
+ bool ap_update)
+{
+ enum ieee80211_sta_rx_bandwidth apply_bw;
+
+ mld_link->rx_omi.desired_bw = bw;
+
+ /* Can't update OMI while already in progress, desired_bw was
+ * set so on FW notification the worker will see the change
+ * and apply new the new desired bw.
+ */
+ if (mld_link->rx_omi.bw_in_progress)
+ return;
+
+ if (bw == IEEE80211_STA_RX_BW_MAX)
+ apply_bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+ else
+ apply_bw = bw;
+
+ if (!ap_update) {
+ /* The update isn't due to AP tracking after leaving OMI,
+ * where the AP could increase BW and then we must tell
+ * it that we can do the increased BW as well, if we did
+ * update the chandef.
+ * In this case, if we want MAX, then we will need to send
+ * a new OMI to the AP if it increases its own bandwidth as
+ * we can (due to internal and FW limitations, and being
+ * worried the AP might break) only send to what we're doing
+ * at the moment. In this case, set last_max_bw; otherwise
+ * if we really want to decrease our bandwidth set it to 0
+ * to indicate no updates are needed if the AP changes.
+ */
+ if (bw != IEEE80211_STA_RX_BW_MAX)
+ mld_link->rx_omi.last_max_bw = apply_bw;
+ else
+ mld_link->rx_omi.last_max_bw = 0;
+ } else {
+ /* Otherwise, if we're already trying to do maximum and
+ * the AP is changing, set last_max_bw to the new max the
+ * AP is using, we'll only get to this code path if the
+ * new bandwidth of the AP is bigger than what we sent it
+ * previously. This avoids repeatedly sending updates if
+ * it changes bandwidth, only doing it once on an increase.
+ */
+ mld_link->rx_omi.last_max_bw = apply_bw;
+ }
+
+ if (ieee80211_prepare_rx_omi_bw(link_sta, bw)) {
+ mld_link->rx_omi.bw_in_progress = apply_bw;
+ iwl_mld_change_link_omi_bw(mld, link_conf, apply_bw);
+ }
+}
+
+static void iwl_mld_omi_bw_finished_work(struct wiphy *wiphy,
+ struct wiphy_work *work)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link =
+ container_of(work, typeof(*mld_link), rx_omi.finished_work.work);
+ enum ieee80211_sta_rx_bandwidth desired_bw, switched_to_bw;
+ struct ieee80211_vif *vif = mld_link->vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ if (!mld_vif->ap_sta)
+ return;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld_vif->ap_sta->link[mld_link->link_id]);
+ if (WARN_ON_ONCE(!link_sta))
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (WARN_ON(!mld_link->rx_omi.bw_in_progress))
+ return;
+
+ desired_bw = mld_link->rx_omi.desired_bw;
+ switched_to_bw = mld_link->rx_omi.bw_in_progress;
+
+ ieee80211_finalize_rx_omi_bw(link_sta);
+ mld_link->rx_omi.bw_in_progress = 0;
+
+ if (desired_bw != switched_to_bw)
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+ desired_bw, false);
+}
+
+static struct ieee80211_vif *
+iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld,
+ struct ieee80211_link_sta **link_sta,
+ struct iwl_mld_link **mld_link)
+{
+ struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ int n_link_stas = 0;
+
+ *link_sta = NULL;
+
+ if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC)
+ return NULL;
+
+ vif = iwl_mld_get_bss_vif(mld);
+ if (!vif)
+ return NULL;
+
+ for (int i = 0; i < ARRAY_SIZE(mld->fw_id_to_link_sta); i++) {
+ struct ieee80211_link_sta *tmp;
+
+ tmp = wiphy_dereference(mld->wiphy, mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(tmp))
+ continue;
+
+ n_link_stas++;
+ *link_sta = tmp;
+ }
+
+ /* can't do anything if we have TDLS peers or EMLSR */
+ if (n_link_stas != 1)
+ return NULL;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ *mld_link = iwl_mld_link_dereference_check(mld_vif,
+ (*link_sta)->link_id);
+ if (WARN_ON(!*mld_link))
+ return NULL;
+
+ return vif;
+}
+
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ enum ieee80211_sta_rx_bandwidth bw)
+{
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ if (WARN_ON(link_conf->vif != vif))
+ return;
+
+ /* This is 0 if we requested an OMI BW reduction and don't want to
+ * be sending an OMI when the AP's bandwidth changes.
+ */
+ if (!mld_link->rx_omi.last_max_bw)
+ return;
+
+ /* We only need to tell the AP if it increases BW over what we last
+ * told it we were using, if it reduces then our last OMI to it will
+ * not get used anyway (e.g. we said we want 160 but it's doing 80.)
+ */
+ if (bw < mld_link->rx_omi.last_max_bw)
+ return;
+
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, true);
+}
+
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (IWL_FW_CHECK(mld, !vif, "unexpected OMI notification\n"))
+ return;
+
+ if (IWL_FW_CHECK(mld, !mld_link->rx_omi.bw_in_progress,
+ "OMI notification when not requested\n"))
+ return;
+
+ wiphy_delayed_work_queue(mld->hw->wiphy,
+ &mld_link->rx_omi.finished_work,
+ msecs_to_jiffies(IWL_MLD_OMI_AP_SETTLE_DELAY));
+}
+
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld)
+{
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link *mld_link;
+ struct ieee80211_vif *vif;
+
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (!link_conf->he_support)
+ return;
+
+ mld_link->rx_omi.exit_ts = jiffies;
+
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta,
+ IEEE80211_STA_RX_BW_MAX, false);
+}
+
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld)
+{
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_MAX;
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct cfg80211_chan_def chandef;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mld_vif *mld_vif;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_phy *phy;
+ u16 punctured;
+ int exit_thr;
+
+ /* not allowed in CAM mode */
+ if (iwlmld_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ return;
+
+ /* must have one BSS connection (no P2P), no TDLS, nor EMLSR */
+ vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link);
+ if (!vif)
+ return;
+
+ link_conf = link_conf_dereference_protected(vif, link_sta->link_id);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (!link_conf->he_support)
+ return;
+
+ chanctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+ if (WARN_ON(!chanctx))
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ if (!mld_vif->authorized)
+ goto apply;
+
+ /* must not be in low-latency mode */
+ if (iwl_mld_vif_low_latency(mld_vif))
+ goto apply;
+
+ chandef = link_conf->chanreq.oper;
+
+ switch (chandef.width) {
+ case NL80211_CHAN_WIDTH_320:
+ exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_320;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_160;
+ break;
+ default:
+ /* since we reduce to 80 MHz, must have more to start with */
+ goto apply;
+ }
+
+ /* not to be done if primary 80 MHz is punctured */
+ if (cfg80211_chandef_primary(&chandef, NL80211_CHAN_WIDTH_80,
+ &punctured) < 0 ||
+ punctured != 0)
+ goto apply;
+
+ phy = iwl_mld_phy_from_mac80211(chanctx);
+
+ if (phy->channel_load_by_us > exit_thr) {
+ /* send OMI for max bandwidth */
+ goto apply;
+ }
+
+ if (phy->channel_load_by_us > IWL_MLD_OMI_ENTER_CHAN_LOAD) {
+ /* no changes between enter/exit thresholds */
+ return;
+ }
+
+ if (time_is_after_jiffies(mld_link->rx_omi.exit_ts +
+ msecs_to_jiffies(IWL_MLD_OMI_EXIT_PROTECTION)))
+ return;
+
+ /* reduce bandwidth to 80 MHz to save power */
+ bw = IEEE80211_STA_RX_BW_80;
+apply:
+ iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, false);
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+/* Constructor function for struct iwl_mld_link */
+static int
+iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link,
+ struct iwl_mld_link *mld_link)
+{
+ mld_link->vif = link->vif;
+ mld_link->link_id = link->link_id;
+
+ iwl_mld_init_internal_sta(&mld_link->bcast_sta);
+ iwl_mld_init_internal_sta(&mld_link->mcast_sta);
+ iwl_mld_init_internal_sta(&mld_link->aux_sta);
+
+ wiphy_delayed_work_init(&mld_link->rx_omi.finished_work,
+ iwl_mld_omi_bw_finished_work);
+
+ return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+}
+
+/* Initializes the link structure, maps fw id to the ieee80211_bss_conf, and
+ * adds a link to the fw
+ */
+int iwl_mld_add_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+ bool is_deflink = bss_conf == &bss_conf->vif->bss_conf;
+ int ret;
+
+ if (!link) {
+ if (is_deflink)
+ link = &mld_vif->deflink;
+ else
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ } else {
+ WARN_ON(!mld->fw_status.in_hw_restart);
+ }
+
+ ret = iwl_mld_init_link(mld, bss_conf, link);
+ if (ret)
+ goto free;
+
+ rcu_assign_pointer(mld_vif->link[bss_conf->link_id], link);
+
+ ret = iwl_mld_add_link_to_fw(mld, bss_conf);
+ if (ret) {
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+ RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+ goto free;
+ }
+
+ return ret;
+
+free:
+ if (!is_deflink)
+ kfree(link);
+ return ret;
+}
+
+/* Remove link from fw, unmap the bss_conf, and destroy the link structure */
+void iwl_mld_remove_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
+ struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
+ bool is_deflink = link == &mld_vif->deflink;
+
+ if (WARN_ON(!link || link->active))
+ return;
+
+ iwl_mld_rm_link_from_fw(mld, bss_conf);
+ /* Continue cleanup on failure */
+
+ if (!is_deflink)
+ kfree_rcu(link, rcu_head);
+
+ RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
+
+ wiphy_delayed_work_cancel(mld->wiphy, &link->rx_omi.finished_work);
+
+ if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
+ return;
+
+ RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
+}
+
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_missed_beacons_notif *notif = (const void *)pkt->data;
+ union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
+ u32 link_id = le32_to_cpu(notif->link_id);
+ u32 missed_bcon = le32_to_cpu(notif->consec_missed_beacons);
+ u32 missed_bcon_since_rx =
+ le32_to_cpu(notif->consec_missed_beacons_since_last_rx);
+ u32 scnd_lnk_bcn_lost =
+ le32_to_cpu(notif->consec_missed_beacons_other_link);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, link_id);
+ u32 bss_param_ch_cnt_link_id;
+ struct ieee80211_vif *vif;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+ bss_param_ch_cnt_link_id = link_conf->bss_param_ch_cnt_link_id;
+
+ IWL_DEBUG_INFO(mld,
+ "missed bcn link_id=%u, %u consecutive=%u\n",
+ link_id, missed_bcon, missed_bcon_since_rx);
+
+ if (WARN_ON(!vif))
+ return;
+
+ mld->trans->dbg.dump_file_name_ext_valid = true;
+ snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
+ "LinkId_%d_MacType_%d", link_id,
+ iwl_mld_mac80211_iftype_to_fw(vif));
+
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
+
+ if (missed_bcon >= IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (missed_bcon_since_rx >=
+ IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD) {
+ ieee80211_connection_loss(vif);
+ return;
+ }
+ IWL_WARN(mld,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ return;
+ }
+
+ if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+
+ /* try to switch links, no-op if we don't have MLO */
+ iwl_mld_int_mlo_scan(mld, vif);
+ }
+
+ /* no more logic if we're not in EMLSR */
+ if (hweight16(vif->active_links) <= 1)
+ return;
+
+ /* We are processing a notification before link activation */
+ if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
+ return;
+
+ /* Exit EMLSR if we lost more than
+ * IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
+ * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
+ * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
+ * on current link and the link's bss_param_ch_count has changed on
+ * the other link's beacon.
+ */
+ if ((missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
+ scnd_lnk_bcn_lost >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
+ missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH ||
+ (bss_param_ch_cnt_link_id != link_id &&
+ missed_bcon >=
+ IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED)) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_MISSED_BEACON,
+ iwl_mld_get_primary_link(vif));
+ }
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_handle_missed_beacon_notif);
+
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 removed_link_id)
+{
+ struct iwl_missed_beacons_notif *notif = (void *)pkt->data;
+
+ if (le32_to_cpu(notif->other_link_id) == removed_link_id) {
+ /* Second link is being removed. Don't cancel the notification,
+ * but mark second link as invalid.
+ */
+ notif->other_link_id = cpu_to_le32(FW_CTXT_ID_INVALID);
+ }
+
+ /* If the primary link is removed, cancel the notification */
+ return le32_to_cpu(notif->link_id) == removed_link_id;
+}
+
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ return iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ALL &
+ ~(LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_EHT_PARAMS));
+}
+
+struct iwl_mld_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mld_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 172),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1720),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_2GHZ 30
+#define DEFAULT_CHAN_LOAD_5GHZ 15
+#define DEFAULT_CHAN_LOAD_6GHZ 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+#define MAX_CHAN_LOAD 256
+
+static unsigned int
+iwl_mld_get_n_subchannels(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return 1;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+
+ /* No puncturing if less than 80 MHz */
+ if (mhz >= 80)
+ n_subchannels -= hweight16(link_conf->chanreq.oper.punctured);
+
+ return n_subchannels;
+}
+
+static int
+iwl_mld_get_chan_load_from_element(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ const struct cfg80211_bss_ies *ies;
+ const struct element *bss_load_elem = NULL;
+ const struct ieee80211_bss_load_elem *bss_load;
+
+ guard(rcu)();
+
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load))
+ return -EINVAL;
+
+ bss_load = (const void *)bss_load_elem->data;
+
+ return bss_load->channel_util;
+}
+
+static unsigned int
+iwl_mld_get_chan_load_by_us(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct iwl_mld_phy *phy;
+
+ if (!mld_link || !mld_link->active) {
+ WARN_ON(expect_active_link);
+ return 0;
+ }
+
+ if (WARN_ONCE(!rcu_access_pointer(mld_link->chan_ctx),
+ "Active link (%u) without channel ctxt assigned!\n",
+ link_conf->link_id))
+ return 0;
+
+ chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx);
+ phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+ return phy->channel_load_by_us;
+}
+
+/* Returns error if the channel utilization element is invalid/unavailable */
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link)
+{
+ int chan_load;
+ unsigned int chan_load_by_us;
+
+ /* get overall load */
+ chan_load = iwl_mld_get_chan_load_from_element(mld, link_conf);
+ if (chan_load < 0)
+ return chan_load;
+
+ chan_load_by_us = iwl_mld_get_chan_load_by_us(mld, link_conf,
+ expect_active_link);
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(chan_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mld_get_default_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return DEFAULT_CHAN_LOAD_2GHZ;
+ case NL80211_BAND_5GHZ:
+ return DEFAULT_CHAN_LOAD_5GHZ;
+ case NL80211_BAND_6GHZ:
+ return DEFAULT_CHAN_LOAD_6GHZ;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ int chan_load;
+
+ chan_load = iwl_mld_get_chan_load_by_others(mld, link_conf, false);
+ if (chan_load >= 0)
+ return chan_load;
+
+ /* No information from the element, take the defaults */
+ chan_load = iwl_mld_get_default_chan_load(link_conf);
+
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+}
+
+static unsigned int
+iwl_mld_get_avail_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ return MAX_CHAN_LOAD - iwl_mld_get_chan_load(mld, link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ && link_rssi)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ IWL_DEBUG_EHT(mld,
+ "Calculating grade of link %d: band = %d, bandwidth = %d, punctured subchannels =0x%x RSSI = %d\n",
+ link_conf->link_id, band,
+ link_conf->chanreq.oper.width,
+ link_conf->chanreq.oper.punctured, link_rssi);
+
+ /* Get grade based on RSSI */
+ for (int i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mld_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* Apply the channel load and puncturing factors */
+ grade = grade * iwl_mld_get_avail_chan_load(mld, link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mld_get_n_subchannels(link_conf);
+
+ IWL_DEBUG_EHT(mld, "Link %d's grade: %d\n", link_conf->link_id, grade);
+
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_link_grade);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
new file mode 100644
index 000000000000..42b7bdcbd741
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_link_h__
+#define __iwl_mld_link_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "sta.h"
+
+/**
+ * struct iwl_probe_resp_data - data for NoA/CSA updates
+ * @rcu_head: used for freeing the data on update
+ * @notif: notification data
+ * @noa_len: length of NoA attribute, calculated from the notification
+ */
+struct iwl_probe_resp_data {
+ struct rcu_head rcu_head;
+ struct iwl_probe_resp_data_notif notif;
+ int noa_len;
+};
+
+/**
+ * struct iwl_mld_link - link configuration parameters
+ *
+ * @rcu_head: RCU head for freeing this data.
+ * @fw_id: the fw id of the link.
+ * @active: if the link is active or not.
+ * @queue_params: QoS data from mac80211. This is updated with a call to
+ * drv_conf_tx per each AC, and then notified once with BSS_CHANGED_QOS.
+ * So we store it here and then send one link cmd for all the ACs.
+ * @chan_ctx: pointer to the channel context assigned to the link. If a link
+ * has an assigned channel context it means that it is active.
+ * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
+ * @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
+ * This tracks the one IGTK that currently exists in FW.
+ * @vif: the vif this link belongs to
+ * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
+ * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
+ * @aux_sta: station used for remain on channel. Used in P2P device.
+ * @link_id: over the air link ID
+ * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @silent_deactivation: next deactivation needs to be silent.
+ * @probe_resp_data: data from FW notification to store NOA related data to be
+ * inserted into probe response.
+ * @rx_omi: data for BW reduction with OMI
+ * @rx_omi.bw_in_progress: update is in progress (indicates target BW)
+ * @rx_omi.exit_ts: timestamp of last exit
+ * @rx_omi.finished_work: work for the delayed reaction to the firmware saying
+ * the change was applied, and for then applying a new mode if it was
+ * updated while waiting for firmware/AP settle delay.
+ * @rx_omi.desired_bw: desired bandwidth
+ * @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes
+ */
+struct iwl_mld_link {
+ struct rcu_head rcu_head;
+
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ bool active;
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct ieee80211_chanctx_conf __rcu *chan_ctx;
+ bool he_ru_2mhz_block;
+ struct ieee80211_key_conf *igtk;
+ );
+ /* And here fields that survive a fw restart */
+ struct ieee80211_vif *vif;
+ struct iwl_mld_int_sta bcast_sta;
+ struct iwl_mld_int_sta mcast_sta;
+ struct iwl_mld_int_sta aux_sta;
+ u8 link_id;
+
+ struct {
+ struct wiphy_delayed_work finished_work;
+ unsigned long exit_ts;
+ enum ieee80211_sta_rx_bandwidth bw_in_progress,
+ desired_bw,
+ last_max_bw;
+ } rx_omi;
+
+ /* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
+ struct ieee80211_key_conf *ap_early_keys[6];
+ bool silent_deactivation;
+ struct iwl_probe_resp_data __rcu *probe_resp_data;
+};
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link)
+{
+ struct iwl_probe_resp_data *probe_data;
+
+ probe_data = wiphy_dereference(mld->wiphy, link->probe_resp_data);
+ RCU_INIT_POINTER(link->probe_resp_data, NULL);
+ if (probe_data)
+ kfree_rcu(probe_data, rcu_head);
+
+ CLEANUP_STRUCT(link);
+ if (link->bcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->bcast_sta);
+ if (link->mcast_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->mcast_sta);
+ if (link->aux_sta.sta_id != IWL_INVALID_STA)
+ iwl_mld_free_internal_sta(mld, &link->aux_sta);
+}
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * 256 / 100)
+
+int iwl_mld_add_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf);
+void iwl_mld_remove_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *bss_conf);
+int iwl_mld_activate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+void iwl_mld_deactivate_link(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+int iwl_mld_change_link_omi_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link,
+ enum ieee80211_sta_rx_bandwidth bw);
+int iwl_mld_change_link_in_fw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link, u32 changes);
+void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 removed_link_id);
+int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf);
+
+unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf);
+
+int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ bool expect_active_link);
+void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld);
+void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ enum ieee80211_sta_rx_bandwidth bw);
+
+#endif /* __iwl_mld_link_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
new file mode 100644
index 000000000000..a4a612afb3b3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mld.h"
+#include "iface.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "power.h"
+#include "mlo.h"
+
+#define MLD_LL_WK_INTERVAL_MSEC 500
+#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000)
+#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10)
+
+/* packets/MLD_LL_WK_PERIOD seconds */
+#define MLD_LL_ENABLE_THRESH 100
+
+static bool iwl_mld_calc_low_latency(struct iwl_mld *mld,
+ unsigned long timestamp)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ bool global_low_latency = false;
+ u8 num_rx_q = mld->trans->num_rx_queues;
+
+ for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) {
+ u32 total_vo_vi_pkts = 0;
+ bool ll_period_expired;
+
+ /* If it's not initialized yet, it means we have not yet
+ * received/transmitted any vo/vi packet on this MAC.
+ */
+ if (!ll->window_start[mac_id])
+ continue;
+
+ ll_period_expired =
+ time_after(timestamp, ll->window_start[mac_id] +
+ MLD_LL_ACTIVE_WK_PERIOD);
+
+ if (ll_period_expired)
+ ll->window_start[mac_id] = timestamp;
+
+ for (int q = 0; q < num_rx_q; q++) {
+ struct iwl_mld_low_latency_packets_counters *counters =
+ &mld->low_latency.pkts_counters[q];
+
+ spin_lock_bh(&counters->lock);
+
+ total_vo_vi_pkts += counters->vo_vi[mac_id];
+
+ if (ll_period_expired)
+ counters->vo_vi[mac_id] = 0;
+
+ spin_unlock_bh(&counters->lock);
+ }
+
+ /* enable immediately with enough packets but defer
+ * disabling only if the low-latency period expired and
+ * below threshold.
+ */
+ if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH)
+ mld->low_latency.result[mac_id] = true;
+ else if (ll_period_expired)
+ mld->low_latency.result[mac_id] = false;
+
+ global_low_latency |= mld->low_latency.result[mac_id];
+ }
+
+ return global_low_latency;
+}
+
+static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
+ bool low_latency;
+
+ if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
+ return;
+
+ low_latency = mld->low_latency.result[mld_vif->fw_id];
+
+ if (prev != low_latency)
+ iwl_mld_vif_update_low_latency(mld, vif, low_latency,
+ LOW_LATENCY_TRAFFIC);
+}
+
+static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ low_latency.work.work);
+ unsigned long timestamp = jiffies;
+ bool low_latency_active;
+
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ /* It is assumed that the work was scheduled only after checking
+ * at least MLD_LL_PERIOD has passed since the last update.
+ */
+
+ low_latency_active = iwl_mld_calc_low_latency(mld, timestamp);
+
+ /* Update the timestamp now after the low-latency calculation */
+ mld->low_latency.timestamp = timestamp;
+
+ /* If low-latency is active we need to force re-evaluation after
+ * 10 seconds, so that we can disable low-latency when
+ * the low-latency traffic ends.
+ *
+ * Otherwise, we don't need to run the work because there is nothing to
+ * disable.
+ *
+ * Note that this has no impact on the regular scheduling of the
+ * updates triggered by traffic - those happen whenever the
+ * MLD_LL_PERIOD timeout expire.
+ */
+ if (low_latency_active)
+ wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+ MLD_LL_ACTIVE_WK_PERIOD);
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_low_latency_iter, mld);
+}
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ unsigned long ts = jiffies;
+
+ ll->pkts_counters = kcalloc(mld->trans->num_rx_queues,
+ sizeof(*ll->pkts_counters), GFP_KERNEL);
+ if (!ll->pkts_counters)
+ return -ENOMEM;
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ spin_lock_init(&ll->pkts_counters[q].lock);
+
+ wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk);
+
+ ll->timestamp = ts;
+
+ /* The low-latency window_start will be initialized per-MAC on
+ * the first vo/vi packet received/transmitted.
+ */
+
+ return 0;
+}
+
+void iwl_mld_low_latency_free(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+ kfree(ll->pkts_counters);
+ ll->pkts_counters = NULL;
+}
+
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+
+ ll->timestamp = jiffies;
+
+ memset(ll->window_start, 0, sizeof(ll->window_start));
+ memset(ll->result, 0, sizeof(ll->result));
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ memset(ll->pkts_counters[q].vo_vi, 0,
+ sizeof(ll->pkts_counters[q].vo_vi));
+}
+
+static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency,
+ u16 mac_id)
+{
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mac_id)
+ };
+ u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD);
+ int ret;
+
+ if (low_latency) {
+ /* Currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send low latency command\n");
+
+ return ret;
+}
+
+static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set,
+ enum iwl_mld_low_latency_cause cause)
+{
+ if (set)
+ mld_vif->low_latency_causes |= cause;
+ else
+ mld_vif->low_latency_causes &= ~cause;
+}
+
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mld_low_latency_cause cause)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ bool prev;
+
+ prev = iwl_mld_vif_low_latency(mld_vif);
+ iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause);
+
+ low_latency = iwl_mld_vif_low_latency(mld_vif);
+ if (low_latency == prev)
+ return;
+
+ if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) {
+ /* revert to previous low-latency state */
+ iwl_mld_vif_set_low_latency(mld_vif, prev, cause);
+ return;
+ }
+
+ if (low_latency)
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT)
+ return;
+
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (low_latency)
+ iwl_mld_retry_emlsr(mld, vif);
+}
+
+static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr)
+{
+ u8 tid;
+ static const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ };
+
+ if (!hdr || !ieee80211_is_data_qos(hdr->frame_control))
+ return false;
+
+ tid = ieee80211_get_tid(hdr);
+ if (tid >= IWL_MAX_TID_COUNT)
+ return false;
+
+ return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI;
+}
+
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_sta *sta,
+ u8 queue)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ struct iwl_mld_low_latency_packets_counters *counters;
+ unsigned long ts = jiffies ? jiffies : 1;
+ u8 fw_id = mld_vif->fw_id;
+
+ /* we should have failed op mode init if NULL */
+ if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
+ return;
+
+ if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
+ queue >= mld->trans->num_rx_queues))
+ return;
+
+ if (mld->low_latency.stopped)
+ return;
+
+ if (!iwl_mld_is_vo_vi_pkt(hdr))
+ return;
+
+ counters = &mld->low_latency.pkts_counters[queue];
+
+ spin_lock_bh(&counters->lock);
+ counters->vo_vi[fw_id]++;
+ spin_unlock_bh(&counters->lock);
+
+ /* Initialize the window_start on the first vo/vi packet */
+ if (!mld->low_latency.window_start[fw_id])
+ mld->low_latency.window_start[fw_id] = ts;
+
+ if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD))
+ wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work,
+ 0);
+}
+
+void iwl_mld_low_latency_stop(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld->low_latency.stopped = true;
+
+ wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work);
+}
+
+void iwl_mld_low_latency_restart(struct iwl_mld *mld)
+{
+ struct iwl_mld_low_latency *ll = &mld->low_latency;
+ bool low_latency = false;
+ unsigned long ts = jiffies;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ll->timestamp = ts;
+ mld->low_latency.stopped = false;
+
+ for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+ ll->window_start[mac] = 0;
+ low_latency |= ll->result[mac];
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ spin_lock_bh(&ll->pkts_counters[q].lock);
+ ll->pkts_counters[q].vo_vi[mac] = 0;
+ spin_unlock_bh(&ll->pkts_counters[q].lock);
+ }
+ }
+
+ /* if low latency is active, force re-evaluation to cover the case of
+ * no traffic.
+ */
+ if (low_latency)
+ wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h
new file mode 100644
index 000000000000..f59684d235af
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_low_latency_h__
+#define __iwl_mld_low_latency_h__
+
+/**
+ * struct iwl_mld_low_latency_packets_counters - Packets counters
+ * @lock: synchronize the counting in data path against the worker
+ * @vo_vi: per-mac, counts the number of TX and RX voice and video packets
+ */
+struct iwl_mld_low_latency_packets_counters {
+ spinlock_t lock;
+ u32 vo_vi[NUM_MAC_INDEX_DRIVER];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * enum iwl_mld_low_latency_cause - low-latency set causes
+ *
+ * @LOW_LATENCY_TRAFFIC: indicates low-latency traffic was detected
+ * @LOW_LATENCY_DEBUGFS: low-latency mode set from debugfs
+ * @LOW_LATENCY_VIF_TYPE: low-latency mode set because of vif type (AP)
+ */
+enum iwl_mld_low_latency_cause {
+ LOW_LATENCY_TRAFFIC = BIT(0),
+ LOW_LATENCY_DEBUGFS = BIT(1),
+ LOW_LATENCY_VIF_TYPE = BIT(2),
+};
+
+/**
+ * struct iwl_mld_low_latency - Manage low-latency detection and activation.
+ * @work: this work is used to detect low-latency by monitoring the number of
+ * voice and video packets transmitted in a period of time. If the
+ * threshold is reached, low-latency is activated. When active,
+ * it is deactivated if the threshold is not reached within a
+ * 10-second period.
+ * @timestamp: timestamp of the last update.
+ * @window_start: per-mac, timestamp of the start of the current window. when
+ * the window is over, the counters are reset.
+ * @pkts_counters: per-queue array voice/video packet counters
+ * @result: per-mac latest low-latency result
+ * @stopped: if true, ignore the requests to update the counters
+ */
+struct iwl_mld_low_latency {
+ struct wiphy_delayed_work work;
+ unsigned long timestamp;
+ unsigned long window_start[NUM_MAC_INDEX_DRIVER];
+ struct iwl_mld_low_latency_packets_counters *pkts_counters;
+ bool result[NUM_MAC_INDEX_DRIVER];
+ bool stopped;
+};
+
+int iwl_mld_low_latency_init(struct iwl_mld *mld);
+void iwl_mld_low_latency_free(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld);
+void iwl_mld_vif_update_low_latency(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency,
+ enum iwl_mld_low_latency_cause cause);
+void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_sta *sta,
+ u8 queue);
+void iwl_mld_low_latency_stop(struct iwl_mld *mld);
+void iwl_mld_low_latency_restart(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_low_latency_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
new file mode 100644
index 000000000000..6851064b82da
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -0,0 +1,2670 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <linux/ip.h>
+
+#include "mld.h"
+#include "mac80211.h"
+#include "phy.h"
+#include "iface.h"
+#include "power.h"
+#include "sta.h"
+#include "agg.h"
+#include "scan.h"
+#include "d3.h"
+#include "tlc.h"
+#include "key.h"
+#include "ap.h"
+#include "tx.h"
+#include "roc.h"
+#include "mlo.h"
+#include "stats.h"
+#include "ftm-initiator.h"
+#include "low_latency.h"
+#include "fw/api/scan.h"
+#include "fw/api/context.h"
+#include "fw/api/filter.h"
+#include "fw/api/sta.h"
+#include "fw/api/tdls.h"
+#ifdef CONFIG_PM_SLEEP
+#include "fw/api/d3.h"
+#endif /* CONFIG_PM_SLEEP */
+#include "iwl-trans.h"
+
+#define IWL_MLD_LIMITS(ap) \
+ { \
+ .max = 2, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
+static const struct ieee80211_iface_limit iwl_mld_limits[] = {
+ IWL_MLD_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mld_limits_ap[] = {
+ IWL_MLD_LIMITS(BIT(NL80211_IFTYPE_AP))
+};
+
+static const struct ieee80211_iface_combination
+iwl_mld_iface_combinations[] = {
+ {
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .limits = iwl_mld_limits,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits),
+ },
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .limits = iwl_mld_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mld_limits_ap),
+ },
+};
+
+static const u8 if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+#define IWL_MLD_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
+ __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MLD_CAPA_OPS (FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = if_types_ext_capa_sta,
+ .extended_capabilities_mask = if_types_ext_capa_sta,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_sta),
+ /* relevant only if EHT is supported */
+ .eml_capabilities = IWL_MLD_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MLD_CAPA_OPS,
+ },
+};
+
+static void iwl_mld_hw_set_addresses(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ int num_addrs = 1;
+
+ /* Extract MAC address */
+ memcpy(mld->addresses[0].addr, mld->nvm_data->hw_addr, ETH_ALEN);
+ wiphy->addresses = mld->addresses;
+ wiphy->n_addresses = 1;
+
+ /* Extract additional MAC addresses if available */
+ if (mld->nvm_data->n_hw_addrs > 1)
+ num_addrs = min(mld->nvm_data->n_hw_addrs,
+ IWL_MLD_MAX_ADDRESSES);
+
+ for (int i = 1; i < num_addrs; i++) {
+ memcpy(mld->addresses[i].addr,
+ mld->addresses[i - 1].addr,
+ ETH_ALEN);
+ mld->addresses[i].addr[ETH_ALEN - 1]++;
+ wiphy->n_addresses++;
+ }
+}
+
+static void iwl_mld_hw_set_channels(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ struct ieee80211_supported_band *bands = mld->nvm_data->bands;
+
+ wiphy->bands[NL80211_BAND_2GHZ] = &bands[NL80211_BAND_2GHZ];
+ wiphy->bands[NL80211_BAND_5GHZ] = &bands[NL80211_BAND_5GHZ];
+
+ if (bands[NL80211_BAND_6GHZ].n_channels)
+ wiphy->bands[NL80211_BAND_6GHZ] = &bands[NL80211_BAND_6GHZ];
+}
+
+static void iwl_mld_hw_set_security(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+ static const u32 mld_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256
+ };
+
+ hw->wiphy->n_cipher_suites = ARRAY_SIZE(mld_ciphers);
+ hw->wiphy->cipher_suites = mld_ciphers;
+
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION);
+}
+
+static void iwl_mld_hw_set_antennas(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+
+ wiphy->available_antennas_tx = iwl_mld_get_valid_tx_ant(mld);
+ wiphy->available_antennas_rx = iwl_mld_get_valid_rx_ant(mld);
+}
+
+static void iwl_mld_hw_set_pm(struct iwl_mld *mld)
+{
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy *wiphy = mld->wiphy;
+
+ if (!device_can_wakeup(mld->trans->dev))
+ return;
+
+ mld->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE |
+ WIPHY_WOWLAN_NET_DETECT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+ mld->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+ mld->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+ mld->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ mld->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+ wiphy->wowlan = &mld->wowlan;
+#endif /* CONFIG_PM_SLEEP */
+}
+
+static void iwl_mac_hw_set_radiotap(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+ IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+
+ /* this is the case for CCK frames, it's better (only 8) for OFDM */
+ hw->radiotap_timestamp.accuracy = 22;
+}
+
+static void iwl_mac_hw_set_flags(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ ieee80211_hw_set(hw, USES_RSS);
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+ ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+ ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+ ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, TDLS_WIDER_BW);
+}
+
+static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ const struct iwl_ucode_capabilities *ucode_capa = &mld->fw->ucode_capa;
+
+ snprintf(wiphy->fw_version,
+ sizeof(wiphy->fw_version),
+ "%.31s", mld->fw->fw_version);
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_HT_IBSS |
+ NL80211_FEATURE_P2P_GO_CTWIN |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
+ NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_SUPPORTS_WMM_ADMISSION |
+ NL80211_FEATURE_TX_POWER_INSERTION |
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+ wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ |
+ WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (mld->nvm_data->sku_cap_11be_enable &&
+ !iwlwifi_mod_params.disable_11ax &&
+ !iwlwifi_mod_params.disable_11be)
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+ /* the firmware uses u8 for num of iterations, but 0xff is saved for
+ * infinite loop, so the maximum number of iterations is actually 254.
+ */
+ wiphy->max_sched_scan_plan_iterations = 254;
+ wiphy->max_sched_scan_ie_len = iwl_mld_scan_max_template_size();
+ wiphy->max_scan_ie_len = iwl_mld_scan_max_template_size();
+ wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_reqs = 1;
+ wiphy->max_sched_scan_plan_interval = U16_MAX;
+ wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES_V2;
+
+ wiphy->max_remain_on_channel_duration = 10000;
+
+ wiphy->hw_version = mld->trans->hw_id;
+
+ wiphy->hw_timestamp_max_peers = 1;
+
+ wiphy->iface_combinations = iwl_mld_iface_combinations;
+ wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mld_iface_combinations);
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
+ if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT);
+
+ wiphy->iftype_ext_capab = NULL;
+ wiphy->num_iftype_ext_capab = 0;
+
+ if (!iwlwifi_mod_params.disable_11ax) {
+ wiphy->iftype_ext_capab = iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
+
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+}
+
+static void iwl_mac_hw_set_misc(struct iwl_mld *mld)
+{
+ struct ieee80211_hw *hw = mld->hw;
+
+ hw->queues = IEEE80211_NUM_ACS;
+
+ hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+ hw->netdev_features |= mld->cfg->features;
+
+ hw->max_tx_fragments = mld->trans->max_skb_frags;
+ hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL;
+
+ hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+ hw->uapsd_queues = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE;
+
+ hw->chanctx_data_size = sizeof(struct iwl_mld_phy);
+ hw->vif_data_size = sizeof(struct iwl_mld_vif);
+ hw->sta_data_size = sizeof(struct iwl_mld_sta);
+ hw->txq_data_size = sizeof(struct iwl_mld_txq);
+
+ /* TODO: Remove this division when IEEE80211_MAX_AMPDU_BUF_EHT size
+ * is supported.
+ * Note: ensure that IWL_DEFAULT_QUEUE_SIZE_EHT is updated accordingly.
+ */
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT / 2;
+}
+
+static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld)
+{
+ /* 11ax is expected to be enabled for all supported devices */
+ if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable))
+ return -EINVAL;
+
+ /* LAR is expected to be enabled for all supported devices */
+ if (WARN_ON(!mld->nvm_data->lar_enabled))
+ return -EINVAL;
+
+ /* All supported devices are currently using version 3 of the cmd.
+ * Since version 3, IWL_SCAN_MAX_PROFILES_V2 shall be used where
+ * necessary.
+ */
+ if (WARN_ON(iwl_fw_lookup_cmd_ver(mld->fw,
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ IWL_FW_CMD_VER_UNKNOWN) != 3))
+ return -EINVAL;
+
+ return 0;
+}
+
+int iwl_mld_register_hw(struct iwl_mld *mld)
+{
+ /* verify once essential preconditions required for setting
+ * the hw capabilities
+ */
+ if (iwl_mld_hw_verify_preconditions(mld))
+ return -EINVAL;
+
+ iwl_mld_hw_set_addresses(mld);
+ iwl_mld_hw_set_channels(mld);
+ iwl_mld_hw_set_security(mld);
+ iwl_mld_hw_set_pm(mld);
+ iwl_mld_hw_set_antennas(mld);
+ iwl_mac_hw_set_radiotap(mld);
+ iwl_mac_hw_set_flags(mld);
+ iwl_mac_hw_set_wiphy(mld);
+ iwl_mac_hw_set_misc(mld);
+
+ SET_IEEE80211_DEV(mld->hw, mld->trans->dev);
+
+ return ieee80211_register_hw(mld->hw);
+}
+
+static void
+iwl_mld_mac80211_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control, struct sk_buff *skb)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ /* In AP mode, mgmt frames are sent on the bcast station,
+ * so the FW can't translate the MLD addr to the link addr. Do it here
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control) && sta &&
+ link_id != IEEE80211_LINK_UNSPECIFIED &&
+ !ieee80211_is_probe_resp(hdr->frame_control)) {
+ /* translate MLD addresses to LINK addresses */
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(sta->link[link_id]);
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(info->control.vif->link_conf[link_id]);
+ struct ieee80211_mgmt *mgmt;
+
+ if (WARN_ON(!link_sta || !link_conf)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+
+ mgmt = (void *)hdr;
+ memcpy(mgmt->da, link_sta->addr, ETH_ALEN);
+ memcpy(mgmt->sa, link_conf->addr, ETH_ALEN);
+ memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN);
+ }
+
+ iwl_mld_tx_skb(mld, skb, NULL);
+}
+
+static void
+iwl_mld_restart_cleanup(struct iwl_mld *mld)
+{
+ iwl_cleanup_mld(mld);
+
+ ieee80211_iterate_interfaces(mld->hw, IEEE80211_IFACE_ITER_ACTIVE,
+ iwl_mld_cleanup_vif, NULL);
+
+ ieee80211_iterate_stations_atomic(mld->hw,
+ iwl_mld_cleanup_sta, NULL);
+
+ iwl_mld_ftm_restart_cleanup(mld);
+}
+
+static
+int iwl_mld_mac80211_start(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+ bool in_d3 = false;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+#ifdef CONFIG_PM_SLEEP
+ /* Unless the host goes into hibernate the FW always stays on and
+ * the d3_resume flow is used. When wowlan is configured, mac80211
+ * would call it's resume callback and the wowlan_resume flow
+ * would be used.
+ */
+
+ in_d3 = mld->fw_status.in_d3;
+ if (in_d3) {
+ /* mac80211 already cleaned up the state, no need for cleanup */
+ ret = iwl_mld_no_wowlan_resume(mld);
+ if (ret)
+ iwl_mld_stop_fw(mld);
+ }
+#endif /* CONFIG_PM_SLEEP */
+
+ if (mld->fw_status.in_hw_restart) {
+ iwl_mld_stop_fw(mld);
+ iwl_mld_restart_cleanup(mld);
+ }
+
+ if (!in_d3 || ret) {
+ ret = iwl_mld_start_fw(mld);
+ if (ret)
+ goto error;
+ }
+
+ mld->scan.last_start_time_jiffies = jiffies;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
+ NULL);
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
+ NULL);
+
+ return 0;
+
+error:
+ /* If we failed to restart the hw, there is nothing useful
+ * we can do but indicate we are no longer in restart.
+ */
+ mld->fw_status.in_hw_restart = false;
+
+ return ret;
+}
+
+static
+void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ wiphy_work_cancel(mld->wiphy, &mld->add_txqs_wk);
+
+ /* if the suspend flow fails the fw is in error. Stop it here, and it
+ * will be started upon wakeup
+ */
+ if (!suspend || iwl_mld_no_wowlan_suspend(mld))
+ iwl_mld_stop_fw(mld);
+
+ /* HW is stopped, no more coming RX. OTOH, the worker can't run as the
+ * wiphy lock is held. Cancel it in case it was scheduled just before
+ * we stopped the HW.
+ */
+ wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
+
+ /* Empty out the list, as the worker won't do that */
+ iwl_mld_purge_async_handlers_list(mld);
+
+ /* Clear in_hw_restart flag when stopping the hw, as mac80211 won't
+ * execute the restart.
+ */
+ mld->fw_status.in_hw_restart = false;
+
+ /* We shouldn't have any UIDs still set. Loop over all the UIDs to
+ * make sure there's nothing left there and warn if any is found.
+ */
+ for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+ if (WARN_ONCE(mld->scan.uid_status[i],
+ "UMAC scan UID %d status was not cleaned (0x%x 0x%x)\n",
+ i, mld->scan.uid_status[i], mld->scan.status))
+ mld->scan.uid_status[i] = 0;
+}
+
+static
+int iwl_mld_mac80211_config(struct ieee80211_hw *hw, u32 changed)
+{
+ return 0;
+}
+
+static
+int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Construct mld_vif, add it to fw, and map its ID to ieee80211_vif */
+ ret = iwl_mld_add_vif(mld, vif);
+ if (ret)
+ return ret;
+
+ /*
+ * Add the default link, but not if this is an MLD vif as that implies
+ * the HW is restarting and it will be configured by change_vif_links.
+ */
+ if (!ieee80211_vif_is_mld(vif))
+ ret = iwl_mld_add_link(mld, &vif->bss_conf);
+ if (ret)
+ goto err;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC;
+ if (!vif->p2p)
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+ }
+
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+ /*
+ * For an MLD vif (in restart) we may not have a link; delay the call
+ * the initial change_vif_links.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !ieee80211_vif_is_mld(vif))
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ mld->monitor.on = true;
+ ieee80211_hw_set(mld->hw, RX_INCLUDES_FCS);
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mld->p2p_device_vif = vif;
+
+ return 0;
+
+err:
+ iwl_mld_rm_vif(mld, vif);
+ return ret;
+}
+
+static
+void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mld->hw->flags);
+ mld->monitor.on = false;
+ }
+
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ mld->p2p_device_vif = NULL;
+
+ iwl_mld_remove_link(mld, &vif->bss_conf);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink);
+#endif
+
+ iwl_mld_rm_vif(mld, vif);
+}
+
+struct iwl_mld_mc_iter_data {
+ struct iwl_mld *mld;
+ int port_id;
+};
+
+static void iwl_mld_mc_iface_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_mc_iter_data *mc_data = data;
+ struct iwl_mld *mld = mc_data->mld;
+ struct iwl_mcast_filter_cmd *cmd = mld->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int ret, len;
+
+ /* If we don't have free ports, mcast frames will be dropped */
+ if (WARN_ON_ONCE(mc_data->port_id >= MAX_PORT_ID_NUM))
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return;
+
+ cmd->port_id = mc_data->port_id++;
+ ether_addr_copy(cmd->bssid, vif->bss_conf.bssid);
+ len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret)
+ IWL_ERR(mld, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld)
+{
+ struct iwl_mld_mc_iter_data iter_data = {
+ .mld = mld,
+ };
+
+ if (WARN_ON_ONCE(!mld->mcast_filter_cmd))
+ return;
+
+ ieee80211_iterate_active_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_mc_iface_iterator,
+ &iter_data);
+}
+
+static u64
+iwl_mld_mac80211_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mcast_filter_cmd *cmd;
+ struct netdev_hw_addr *addr;
+ int addr_count = netdev_hw_addr_list_count(mc_list);
+ bool pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES;
+ int len;
+
+ if (pass_all)
+ addr_count = 0;
+
+ /* len must be a multiple of 4 */
+ len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+ cmd = kzalloc(len, GFP_ATOMIC);
+ if (!cmd)
+ return 0;
+
+ if (pass_all) {
+ cmd->pass_all = 1;
+ goto out;
+ }
+
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ IWL_DEBUG_MAC80211(mld, "mcast addr (%d): %pM\n",
+ cmd->count, addr->addr);
+ ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN],
+ addr->addr);
+ cmd->count++;
+ }
+
+out:
+ return (u64)(unsigned long)cmd;
+}
+
+static
+void iwl_mld_mac80211_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+
+ /* Replace previous configuration */
+ kfree(mld->mcast_filter_cmd);
+ mld->mcast_filter_cmd = cmd;
+
+ if (!cmd)
+ goto out;
+
+ if (changed_flags & FIF_ALLMULTI)
+ cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+ if (cmd->pass_all)
+ cmd->count = 0;
+
+ iwl_mld_recalc_multicast_filter(mld);
+out:
+ *total_flags = 0;
+}
+
+static
+void iwl_mld_mac80211_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+
+ if (likely(mld_txq->status.allocated) || !txq->sta) {
+ iwl_mld_tx_from_txq(mld, txq);
+ return;
+ }
+
+ /* We don't support TSPEC tids. %IEEE80211_NUM_TIDS is for mgmt */
+ if (txq->tid != IEEE80211_NUM_TIDS && txq->tid >= IWL_MAX_TID_COUNT) {
+ IWL_DEBUG_MAC80211(mld, "TID %d is not supported\n", txq->tid);
+ return;
+ }
+
+ /* The worker will handle any packets we leave on the txq now */
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ /* The list is being deleted only after the queue is fully allocated. */
+ if (list_empty(&mld_txq->list) &&
+ /* recheck under lock, otherwise it can be added twice */
+ !mld_txq->status.allocated) {
+ list_add_tail(&mld_txq->list, &mld->txqs_to_add);
+ wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+ }
+ spin_unlock_bh(&mld->add_txqs_lock);
+}
+
+static void iwl_mld_teardown_tdls_peers(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ if (!link_sta->sta->tdls)
+ continue;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ ieee80211_tdls_oper_request(mld_sta->vif, link_sta->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+ GFP_KERNEL);
+ }
+}
+
+static
+int iwl_mld_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ int fw_id = iwl_mld_allocate_fw_phy_id(mld);
+ int ret;
+
+ if (fw_id < 0)
+ return fw_id;
+
+ phy->mld = mld;
+ phy->fw_id = fw_id;
+ phy->chandef = *iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ ret = iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_ADD);
+ if (ret) {
+ mld->used_phy_ids &= ~BIT(phy->fw_id);
+ return ret;
+ }
+
+ if (hweight8(mld->used_phy_ids) > 1)
+ iwl_mld_teardown_tdls_peers(mld);
+
+ return 0;
+}
+
+static
+void iwl_mld_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_REMOVE);
+ mld->used_phy_ids &= ~BIT(phy->fw_id);
+}
+
+static
+void iwl_mld_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx, u32 changed)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef =
+ iwl_mld_get_chandef_from_chanctx(mld, ctx);
+
+ /* We don't care about these */
+ if (!(changed & ~(IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+ IEEE80211_CHANCTX_CHANGE_RADAR |
+ IEEE80211_CHANCTX_CHANGE_CHANNEL)))
+ return;
+
+ /* Check if a FW update is required */
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_AP)
+ goto update;
+
+ if (chandef->chan == phy->chandef.chan &&
+ chandef->center_freq1 == phy->chandef.center_freq1 &&
+ chandef->punctured == phy->chandef.punctured) {
+ /* Check if we are toggling between HT and non-HT, no-op */
+ if (phy->chandef.width == chandef->width ||
+ (phy->chandef.width <= NL80211_CHAN_WIDTH_20 &&
+ chandef->width <= NL80211_CHAN_WIDTH_20))
+ return;
+ }
+update:
+ phy->chandef = *chandef;
+
+ iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY);
+}
+
+static u8
+iwl_mld_chandef_get_primary_80(struct cfg80211_chan_def *chandef)
+{
+ int data_start;
+ int control_start;
+ int bw;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_320)
+ bw = 320;
+ else if (chandef->width == NL80211_CHAN_WIDTH_160)
+ bw = 160;
+ else
+ return 0;
+
+ /* data is bw wide so the start is half the width */
+ data_start = chandef->center_freq1 - bw / 2;
+ /* control is 20Mhz width */
+ control_start = chandef->chan->center_freq - 10;
+
+ return (control_start - data_start) / 80;
+}
+
+static bool iwl_mld_can_activate_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link_sta *link_sta;
+
+ /* In association, we activate the assoc link before adding the STA. */
+ if (!mld_vif->ap_sta || !vif->cfg.assoc)
+ return true;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* When switching links, we need to wait with the activation until the
+ * STA was added to the FW. It'll be activated in
+ * iwl_mld_update_link_stas
+ */
+ link_sta = wiphy_dereference(mld->wiphy, mld_sta->link[link->link_id]);
+
+ /* In restart we can have a link_sta that doesn't exist in FW yet */
+ return link_sta && link_sta->in_fw;
+}
+
+static
+int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ /* if the assigned one was not counted yet, count it now */
+ if (!rcu_access_pointer(mld_link->chan_ctx)) {
+ n_active++;
+
+ /* Track addition of non-BSS link */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ ret = iwl_mld_emlsr_check_non_bss_block(mld, 1);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* for AP, mac parameters such as HE support are updated at this stage. */
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+ if (ret) {
+ IWL_ERR(mld, "failed to update MAC %pM\n", vif->addr);
+ return -EINVAL;
+ }
+ }
+
+ rcu_assign_pointer(mld_link->chan_ctx, ctx);
+
+ if (n_active > 1) {
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ /* Indicate to mac80211 that EML is enabled */
+ vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
+
+ if (vif->active_links & BIT(mld_vif->emlsr.selected_links))
+ mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary;
+ else
+ mld_vif->emlsr.primary = __ffs(vif->active_links);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP,
+ NULL);
+ }
+
+ /* First send the link command with the phy context ID.
+ * Now that we have the phy, we know the band so also the rates
+ */
+ ret = iwl_mld_change_link_in_fw(mld, link,
+ LINK_CONTEXT_MODIFY_RATES_INFO);
+ if (ret)
+ goto err;
+
+ /* TODO: Initialize rate control for the AP station, since we might be
+ * doing a link switch here - we cannot initialize it before since
+ * this needs the phy context assigned (and in FW?), and we cannot
+ * do it later because it needs to be initialized as soon as we're
+ * able to TX on the link, i.e. when active. (task=link-switch)
+ */
+
+ /* Now activate the link */
+ if (iwl_mld_can_activate_link(mld, vif, link)) {
+ ret = iwl_mld_activate_link(mld, link);
+ if (ret)
+ goto err;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ /* TODO: task=sniffer add sniffer station */
+ mld->monitor.p80 =
+ iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
+ }
+
+ return 0;
+err:
+ RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+ return ret;
+}
+
+static
+void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ unsigned int n_active = iwl_mld_count_active_links(mld, vif);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ /* Track removal of non-BSS link */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mld_emlsr_check_non_bss_block(mld, -1);
+
+ iwl_mld_deactivate_link(mld, link);
+
+ /* TODO: task=sniffer remove sniffer station */
+
+ if (n_active > 1) {
+ /* Indicate to mac80211 that EML is disabled */
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt,
+ IWL_FW_INI_TIME_ESR_LINK_DOWN,
+ NULL);
+ }
+
+ RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
+
+ /* in the non-MLO case, remove/re-add the link to clean up FW state.
+ * In MLO, it'll be done in drv_change_vif_link
+ */
+ if (!ieee80211_vif_is_mld(vif) && !mld_vif->ap_sta &&
+ !WARN_ON_ONCE(vif->cfg.assoc) &&
+ vif->type != NL80211_IFTYPE_AP && !mld->fw_status.in_hw_restart) {
+ iwl_mld_remove_link(mld, link);
+ iwl_mld_add_link(mld, link);
+ }
+}
+
+static
+int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ return 0;
+}
+
+static void
+iwl_mld_link_info_changed_ap_ibss(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ u64 changes)
+{
+ u32 link_changes = 0;
+
+ if (changes & BSS_CHANGED_ERP_SLOT)
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT))
+ link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+ if (changes & (BSS_CHANGED_QOS | BSS_CHANGED_BANDWIDTH))
+ link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ if (changes & BSS_CHANGED_HE_BSS_COLOR)
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+
+ if (link_changes)
+ iwl_mld_change_link_in_fw(mld, link, link_changes);
+
+ if (changes & BSS_CHANGED_BEACON)
+ iwl_mld_update_beacon_template(mld, vif, link);
+}
+
+static
+u32 iwl_mld_link_changed_mapping(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ u32 link_changes = 0;
+ bool has_he, has_eht;
+
+ if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos)
+ link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS;
+
+ if (changes & (BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_ERP_SLOT))
+ link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO;
+
+ if (changes & (BSS_CHANGED_HT | BSS_CHANGED_ERP_CTS_PROT))
+ link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS;
+
+ /* TODO: task=MLO check mac80211's HE flags and if command is needed
+ * every time there's a link change. Currently used flags are
+ * BSS_CHANGED_HE_OBSS_PD and BSS_CHANGED_HE_BSS_COLOR.
+ */
+ has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax;
+ has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be;
+
+ if (vif->cfg.assoc && (has_he || has_eht)) {
+ IWL_DEBUG_MAC80211(mld, "Associated in HE mode\n");
+ link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
+ }
+
+ return link_changes;
+}
+
+static void
+iwl_mld_mac80211_link_info_changed_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ u32 link_changes = iwl_mld_link_changed_mapping(mld, vif, link_conf,
+ changes);
+
+ if (link_changes)
+ iwl_mld_change_link_in_fw(mld, link_conf, link_changes);
+
+ if (changes & BSS_CHANGED_TPE)
+ iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link_conf);
+
+ if (changes & BSS_CHANGED_BEACON_INFO)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ /* The firmware will wait quite a while after association before it
+ * starts filtering the beacons. We can safely enable beacon filtering
+ * upon CQM configuration, even if we didn't get a beacon yet.
+ */
+ if (changes & (BSS_CHANGED_CQM | BSS_CHANGED_BEACON_INFO))
+ iwl_mld_enable_beacon_filter(mld, link_conf, false);
+
+ /* If we have used OMI before to reduce bandwidth to 80 MHz and then
+ * increased to 160 MHz again, and then the AP changes to 320 MHz, it
+ * will think that we're limited to 160 MHz right now. Update it by
+ * requesting a new OMI bandwidth.
+ */
+ if (changes & BSS_CHANGED_BANDWIDTH) {
+ enum ieee80211_sta_rx_bandwidth bw;
+
+ bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width);
+
+ iwl_mld_omi_ap_changed_bw(mld, link_conf, bw);
+
+ }
+
+ if (changes & BSS_CHANGED_BANDWIDTH)
+ iwl_mld_retry_emlsr(mld, vif);
+}
+
+static int iwl_mld_update_mu_groups(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mu_group_mgmt_cmd cmd = {};
+
+ BUILD_BUG_ON(sizeof(cmd.membership_status) !=
+ sizeof(link_conf->mu_group.membership));
+ BUILD_BUG_ON(sizeof(cmd.user_position) !=
+ sizeof(link_conf->mu_group.position));
+
+ memcpy(cmd.membership_status, link_conf->mu_group.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(cmd.user_position, link_conf->mu_group.position,
+ WLAN_USER_POSITION_LEN);
+
+ return iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ UPDATE_MU_GROUPS_CMD),
+ &cmd);
+}
+
+static void
+iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ iwl_mld_mac80211_link_info_changed_sta(mld, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ iwl_mld_link_info_changed_ap_ibss(mld, vif, link_conf,
+ changes);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ /* The firmware tracks this on its own in STATION mode, but
+ * obviously not in sniffer mode.
+ */
+ if (changes & BSS_CHANGED_MU_GROUPS)
+ iwl_mld_update_mu_groups(mld, link_conf);
+ break;
+ default:
+ /* shouldn't happen */
+ WARN_ON_ONCE(1);
+ }
+
+ /* We now know our BSSID, we can configure the MAC context with
+ * eht_support if needed.
+ */
+ if (changes & BSS_CHANGED_BSSID)
+ iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+
+ if (changes & BSS_CHANGED_TXPOWER)
+ iwl_mld_set_tx_power(mld, link_conf, link_conf->txpower);
+}
+
+static void
+iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* Send the device-level power commands since the
+ * firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to
+ * determine SMPS mode.
+ */
+ if (mld_vif->ps_disabled == !enable)
+ return;
+
+ mld_vif->ps_disabled = !enable;
+
+ iwl_mld_update_device_power(mld, false);
+}
+
+static
+void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changes)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY);
+ if (ret)
+ IWL_ERR(mld, "failed to update context\n");
+
+ if (vif->cfg.assoc) {
+ /* Clear statistics to get clean beacon counter, and
+ * ask for periodic statistics, as they are needed for
+ * link selection and RX OMI decisions.
+ */
+ iwl_mld_clear_stats_in_fw(mld);
+ iwl_mld_request_periodic_fw_stats(mld, true);
+
+ iwl_mld_set_vif_associated(mld, vif);
+ } else {
+ iwl_mld_request_periodic_fw_stats(mld, false);
+ }
+ }
+
+ if (changes & BSS_CHANGED_PS) {
+ iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ iwl_mld_update_mac_power(mld, vif, false);
+ }
+
+ /* TODO: task=MLO BSS_CHANGED_MLD_VALID_LINKS/CHANGED_MLD_TTLM */
+}
+
+static int
+iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ if (WARN_ON(!hw_req->req.n_channels ||
+ hw_req->req.n_channels >
+ mld->fw->ucode_capa.n_scan_channels))
+ return -EINVAL;
+
+ return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies);
+}
+
+static void
+iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a hw_scan when it's already stopped. This can
+ * happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_scan_completed() and the userspace called
+ * cancel scan before ieee80211_scan_work() could run.
+ * To handle that, simply return if the scan is not running.
+ */
+ if (mld->scan.status & IWL_MLD_SCAN_REGULAR)
+ iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_sched_scan_start(mld, vif, req, ies, IWL_MLD_SCAN_SCHED);
+}
+
+static int
+iwl_mld_mac80211_sched_scan_stop(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Due to a race condition, it's possible that mac80211 asks
+ * us to stop a sched_scan when it's already stopped. This
+ * can happen, for instance, if we stopped the scan ourselves,
+ * called ieee80211_sched_scan_stopped() and the userspace called
+ * stop sched scan before ieee80211_sched_scan_stopped_work()
+ * could run. To handle this, simply return if the scan is
+ * not running.
+ */
+ if (!(mld->scan.status & IWL_MLD_SCAN_SCHED))
+ return 0;
+
+ return iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, false);
+}
+
+static void
+iwl_mld_restart_complete_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld *mld = data;
+ int link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ enum ieee80211_sta_rx_bandwidth bw;
+ struct iwl_mld_link *mld_link;
+
+ mld_link = wiphy_dereference(mld->wiphy,
+ mld_vif->link[link_id]);
+
+ if (WARN_ON_ONCE(!mld_link))
+ continue;
+
+ bw = mld_link->rx_omi.bw_in_progress;
+ if (bw)
+ iwl_mld_change_link_omi_bw(mld, link_conf, bw);
+ }
+}
+
+static void
+iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ switch (reconfig_type) {
+ case IEEE80211_RECONFIG_TYPE_RESTART:
+ mld->fw_status.in_hw_restart = false;
+ iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_END_OF_RECOVERY);
+
+ ieee80211_iterate_interfaces(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_restart_complete_vif, mld);
+
+ iwl_trans_finish_sw_reset(mld->trans);
+ /* no need to lock, adding in parallel would schedule too */
+ if (!list_empty(&mld->txqs_to_add))
+ wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk);
+
+ IWL_INFO(mld, "restart completed\n");
+ break;
+ case IEEE80211_RECONFIG_TYPE_SUSPEND:
+ break;
+ }
+}
+
+static
+void iwl_mld_mac80211_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ u32 duration = IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS;
+
+ /* After a successful association the connection is etalibeshed
+ * and we can rely on the quota to send the disassociation frame.
+ */
+ if (info->was_assoc)
+ return;
+
+ if (info->duration > duration)
+ duration = info->duration;
+
+ iwl_mld_schedule_session_protection(mld, vif, duration,
+ IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS,
+ info->link_id);
+}
+
+static
+void iwl_mld_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Successful authentication is the only case that requires to let
+ * the session protection go. We'll need it for the upcoming
+ * association. For all the other cases, we need to cancel the session
+ * protection.
+ * After successful association the connection is established and
+ * further mgd tx can rely on the quota.
+ */
+ if (info->success && info->subtype == IEEE80211_STYPE_AUTH)
+ return;
+
+ /* The firmware will be on medium after we configure the vif as
+ * associated. Removing the session protection allows the firmware
+ * to stop being on medium. In order to ensure the continuity of our
+ * presence on medium, we need first to configure the vif as associated
+ * and only then, remove the session protection.
+ * Currently, mac80211 calls vif_cfg_changed() first and then,
+ * drv_mgd_complete_tx(). Ensure that this assumption stays true by
+ * a warning.
+ */
+ WARN_ON(info->success &&
+ (info->subtype == IEEE80211_STYPE_ASSOC_REQ ||
+ info->subtype == IEEE80211_STYPE_REASSOC_REQ) &&
+ !vif->cfg.assoc);
+
+ iwl_mld_cancel_session_protection(mld, vif, info->link_id);
+}
+
+static int
+iwl_mld_mac80211_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ link = iwl_mld_link_dereference_check(mld_vif, link_id);
+ if (!link)
+ return -EINVAL;
+
+ link->queue_params[ac] = *params;
+
+ /* No need to update right away, we'll get BSS_CHANGED_QOS
+ * The exception is P2P_DEVICE interface which needs immediate update.
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ iwl_mld_change_link_in_fw(mld, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS);
+
+ return 0;
+}
+
+static void iwl_mld_set_uapsd(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (vif->p2p &&
+ !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT))
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ if (!vif->p2p &&
+ !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS))
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld)
+{
+ int count = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ if (!link_sta->sta->tdls)
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+static void iwl_mld_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ bool *tolerated = _data;
+ const struct cfg80211_bss_ies *ies;
+ const struct element *elem;
+
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
+ ies->len);
+
+ if (!elem || elem->datalen < 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
+ *tolerated = false;
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl_mld_check_he_obss_narrow_bw_ru(struct iwl_mld *mld,
+ struct iwl_mld_link *mld_link,
+ struct ieee80211_bss_conf *link_conf)
+{
+ bool tolerated = true;
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+ return;
+
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
+ mld_link->he_ru_2mhz_block = false;
+ return;
+ }
+
+ cfg80211_bss_iter(mld->wiphy, &link_conf->chanreq.oper,
+ iwl_mld_check_he_obss_narrow_bw_ru_iter, &tolerated);
+
+ /* If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ mld_link->he_ru_2mhz_block = !tolerated;
+}
+
+static void iwl_mld_link_set_2mhz_block(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_from_mac80211(link_conf);
+
+ if (WARN_ON(!link_conf || !mld_link))
+ continue;
+
+ if (link_sta->he_cap.has_he)
+ iwl_mld_check_he_obss_narrow_bw_ru(mld, mld_link,
+ link_conf);
+ }
+}
+
+static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int tdls_count = 0;
+ int ret;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ if (sta->tdls) {
+ if (vif->p2p || hweight8(mld->used_phy_ids) != 1)
+ return -EBUSY;
+
+ tdls_count = iwl_mld_tdls_sta_count(mld);
+ if (tdls_count >= IWL_TDLS_STA_COUNT)
+ return -EBUSY;
+ }
+
+ /*
+ * If this is the first STA (i.e. the AP) it won't do
+ * anything, otherwise must leave for any new STA on
+ * any other interface, or for TDLS, etc.
+ * Need to call this _before_ adding the STA so it can
+ * look up the one STA to use to ask mac80211 to leave
+ * OMI; in the unlikely event that adding the new STA
+ * then fails we'll just re-enter OMI later (via the
+ * statistics notification handling.)
+ */
+ iwl_mld_leave_omi_bw_reduction(mld);
+
+ ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER);
+ if (ret)
+ return ret;
+
+ /* just added first TDLS STA, so disable PM */
+ if (sta->tdls && tdls_count == 0)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mld_vif->ap_sta = sta;
+
+ /* Initialize TLC here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ return ret;
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_AUTH) {
+ iwl_mld_set_uapsd(mld, vif);
+ return 0;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ ret = iwl_mld_update_all_link_stations(mld, sta);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mld_link_set_2mhz_block(mld, vif, sta);
+ /* Now the link_sta's capabilities are set, update the FW */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /* Update MAC_CFG_FILTER_ACCEPT_BEACON if at least
+ * one sta is associated
+ */
+ if (++mld_vif->num_associated_stas == 1)
+ ret = iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ }
+
+ return ret;
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ ret = 0;
+
+ if (!sta->tdls) {
+ mld_vif->authorized = true;
+
+ /* Ensure any block due to a non-BSS link is synced */
+ iwl_mld_emlsr_check_non_bss_block(mld, 0);
+
+ /* Block EMLSR until a certain throughput it reached */
+ if (!mld->fw_status.in_hw_restart &&
+ IWL_MLD_ENTER_EMLSR_TPT_THRESH > 0)
+ iwl_mld_block_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TPT,
+ 0);
+
+ /* clear COEX_HIGH_PRIORITY_ENABLE */
+ ret = iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ if (ret)
+ return ret;
+ iwl_mld_smps_wa(mld, vif, vif->cfg.ps);
+ }
+
+ /* MFP is set by default before the station is authorized.
+ * Clear it here in case it's not used.
+ */
+ if (!sta->mfp)
+ ret = iwl_mld_update_all_link_stations(mld, sta);
+
+ /* We can use wide bandwidth now, not only 20 MHz */
+ iwl_mld_config_tlc(mld, vif, sta);
+
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ if (!sta->tdls) {
+ mld_vif->authorized = false;
+
+ memset(&mld_vif->emlsr.zeroed_on_not_authorized, 0,
+ sizeof(mld_vif->emlsr.zeroed_on_not_authorized));
+
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.prevent_done_wk);
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.tmp_non_bss_done_wk);
+ wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+ wiphy_delayed_work_cancel(mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+
+ iwl_mld_reset_cca_40mhz_workaround(mld, vif);
+ iwl_mld_smps_wa(mld, vif, true);
+ }
+
+ /* once we move into assoc state, need to update the FW to
+ * stop using wide bandwidth
+ */
+ iwl_mld_config_tlc(mld, vif, sta);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !WARN_ON(!mld_vif->num_associated_stas)) {
+ /* Update MAC_CFG_FILTER_ACCEPT_BEACON if the last sta
+ * is disassociating
+ */
+ if (--mld_vif->num_associated_stas == 0)
+ iwl_mld_mac_fw_action(mld, vif,
+ FW_CTXT_ACTION_MODIFY);
+ }
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE) {
+ /* nothing */
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
+ iwl_mld_remove_sta(mld, sta);
+
+ if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) {
+ /* just removed last TDLS STA, so enable PM */
+ iwl_mld_update_mac_power(mld, vif, false);
+ }
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int iwl_mld_mac80211_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ IWL_DEBUG_MAC80211(mld, "station %pM state change %d->%d\n",
+ sta->addr, old_state, new_state);
+
+ mld_sta->sta_state = new_state;
+
+ if (old_state < new_state)
+ return iwl_mld_move_sta_state_up(mld, vif, sta, old_state,
+ new_state);
+ else
+ return iwl_mld_move_sta_state_down(mld, vif, sta, old_state,
+ new_state);
+}
+
+static void iwl_mld_mac80211_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* Make sure we're done with the deferred traffic before flushing */
+ iwl_mld_add_txq_list(mld);
+
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+
+ if (IS_ERR_OR_NULL(link_sta))
+ continue;
+
+ /* Check that the sta belongs to the given vif */
+ if (vif && vif != iwl_mld_sta_from_mac80211(link_sta->sta)->vif)
+ continue;
+
+ if (drop)
+ iwl_mld_flush_sta_txqs(mld, link_sta->sta);
+ else
+ iwl_mld_wait_sta_txqs_empty(mld, link_sta->sta);
+ }
+}
+
+static void iwl_mld_mac80211_flush_sta(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ iwl_mld_flush_sta_txqs(mld, sta);
+}
+
+static int
+iwl_mld_mac80211_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ u16 buf_size = params->buf_size;
+ u16 timeout = params->timeout;
+ int ret;
+
+ IWL_DEBUG_HT(mld, "A-MPDU action on addr %pM tid: %d action: %d\n",
+ sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = iwl_mld_ampdu_rx_start(mld, sta, tid, ssn, buf_size,
+ timeout);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = iwl_mld_ampdu_rx_stop(mld, sta, tid);
+ break;
+ default:
+ /* The mac80211 TX_AMPDU_SETUP_IN_HW flag is set for all
+ * devices, since all support TX A-MPDU offload in hardware.
+ * Therefore, no TX action should be requested here.
+ */
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static bool iwl_mld_can_hw_csum(struct sk_buff *skb)
+{
+ u8 protocol = ip_hdr(skb)->protocol;
+
+ return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
+}
+
+static bool iwl_mld_mac80211_can_aggregate(struct ieee80211_hw *hw,
+ struct sk_buff *head,
+ struct sk_buff *skb)
+{
+ if (!IS_ENABLED(CONFIG_INET))
+ return false;
+
+ /* For now don't aggregate IPv6 in AMSDU */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ /* Allow aggregation only if both frames have the same HW csum offload
+ * capability, ensuring consistent HW or SW csum handling in A-MSDU.
+ */
+ return iwl_mld_can_hw_csum(skb) == iwl_mld_can_hw_csum(head);
+}
+
+static void iwl_mld_mac80211_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_EMPTY, NULL, 0);
+}
+
+static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED)) {
+ struct ieee80211_bss_conf *link =
+ link_conf_dereference_check(vif, link_sta->link_id);
+
+ if (WARN_ON(!link))
+ return;
+
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+ }
+}
+
+static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ device_set_wakeup_enable(mld->trans->dev, enabled);
+}
+
+/* Returns 0 on success. 1 if failed to suspend with wowlan:
+ * If the circumstances didn't satisfy the conditions for suspension
+ * with wowlan, mac80211 would use the no_wowlan flow.
+ * If an error had occurred we update the trans status and state here
+ * and the result will be stopping the FW.
+ */
+static int
+iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_fw_runtime_suspend(&mld->fwrt);
+
+ ret = iwl_mld_wowlan_suspend(mld, wowlan);
+ if (ret) {
+ if (ret < 0) {
+ mld->trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &mld->trans->status);
+ }
+ return 1;
+ }
+
+ if (iwl_mld_no_wowlan_suspend(mld))
+ return 1;
+
+ return 0;
+}
+
+static int iwl_mld_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ ret = iwl_mld_wowlan_resume(mld);
+ if (ret)
+ return ret;
+
+ iwl_fw_runtime_resume(&mld->fwrt);
+
+ iwl_mld_low_latency_restart(mld);
+
+ return 0;
+}
+
+static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld,
+ struct iwl_mld_sta *mld_sta,
+ struct ieee80211_key_conf *key,
+ struct iwl_mld_ptk_pn **ptk_pn)
+{
+ u8 num_rx_queues = mld->trans->num_rx_queues;
+ int keyidx = key->keyidx;
+ struct ieee80211_key_seq seq;
+
+ if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return -EINVAL;
+
+ WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx]));
+ *ptk_pn = kzalloc(struct_size(*ptk_pn, q, num_rx_queues),
+ GFP_KERNEL);
+ if (!*ptk_pn)
+ return -ENOMEM;
+
+ for (u8 tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ ieee80211_get_key_rx_seq(key, tid, &seq);
+ for (u8 q = 0; q < num_rx_queues; q++)
+ memcpy((*ptk_pn)->q[q].pn[tid], seq.ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
+ }
+
+ rcu_assign_pointer(mld_sta->ptk_pn[keyidx], *ptk_pn);
+
+ return 0;
+}
+
+static int iwl_mld_set_key_add(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta =
+ sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+ struct iwl_mld_ptk_pn *ptk_pn = NULL;
+ int keyidx = key->keyidx;
+ int ret;
+
+ /* Will be set to 0 if added successfully */
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ IWL_DEBUG_MAC80211(mld, "Use SW encryption for WEP\n");
+ return -EOPNOTSUPP;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE;
+ break;
+ }
+ IWL_DEBUG_MAC80211(mld, "Use SW encryption for TKIP\n");
+ return -EOPNOTSUPP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+
+ /* After exiting from RFKILL, hostapd configures GTK/ITGK before the
+ * AP is started, but those keys can't be sent to the FW before the
+ * MCAST/BCAST STAs are added to it (which happens upon AP start).
+ * Store it here to be sent later when the AP is started.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_AP) && !sta &&
+ !mld_vif->ap_ibss_active)
+ return iwl_mld_store_ap_early_key(mld, key, mld_vif);
+
+ if (!mld->fw_status.in_hw_restart && mld_sta &&
+ key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ ret = iwl_mld_alloc_ptk_pn(mld, mld_sta, key, &ptk_pn);
+ if (ret)
+ return ret;
+ }
+
+ IWL_DEBUG_MAC80211(mld, "set hwcrypto key (sta:%pM, id:%d)\n",
+ sta ? sta->addr : NULL, keyidx);
+
+ ret = iwl_mld_add_key(mld, vif, sta, key);
+ if (ret) {
+ IWL_WARN(mld, "set key failed (%d)\n", ret);
+ if (ptk_pn) {
+ RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+ kfree(ptk_pn);
+ }
+
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void iwl_mld_set_key_remove(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_sta *mld_sta =
+ sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
+ int keyidx = key->keyidx;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ (keyidx == 6 || keyidx == 7))
+ RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+
+ if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+ (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+ key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+ struct iwl_mld_ptk_pn *ptk_pn;
+
+ if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn)))
+ return;
+
+ ptk_pn = wiphy_dereference(mld->wiphy,
+ mld_sta->ptk_pn[keyidx]);
+ RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL);
+ if (!WARN_ON(!ptk_pn))
+ kfree_rcu(ptk_pn, rcu_head);
+ }
+
+ /* if this key was stored to be added later to the FW - free it here */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ iwl_mld_free_ap_early_key(mld, key, mld_vif);
+
+ /* We already removed it */
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID)
+ return;
+
+ IWL_DEBUG_MAC80211(mld, "disable hwcrypto key\n");
+
+ iwl_mld_remove_key(mld, vif, sta, key);
+}
+
+static int iwl_mld_mac80211_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ switch (cmd) {
+ case SET_KEY:
+ ret = iwl_mld_set_key_add(mld, vif, sta, key);
+ if (ret)
+ ret = -EOPNOTSUPP;
+ break;
+ case DISABLE_KEY:
+ iwl_mld_set_key_remove(mld, vif, sta, key);
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+iwl_mld_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link =
+ iwl_mld_link_dereference_check(mld_vif, chsw->link_id);
+ u8 primary;
+ int selected;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(mld, "pre CSA to freq %d\n",
+ chsw->chandef.center_freq1);
+
+ if (!iwl_mld_emlsr_active(vif))
+ return 0;
+
+ primary = iwl_mld_get_primary_link(vif);
+
+ /* stay on the primary link unless it is undergoing a CSA with quiet */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mld_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ /* Remember to tell the firmware that this link can't tx
+ * Note that this logic seems to be unrelated to emlsr, but it
+ * really is needed only when emlsr is active. When we have a
+ * single link, the firmware will handle all this on its own.
+ * In multi-link scenarios, we can learn about the CSA from
+ * another link and this logic is too complex for the firmware
+ * to track.
+ * Since we want to de-activate the link that got a CSA with mode=1,
+ * we need to tell the firmware not to send any frame on that link
+ * as the firmware may not be aware that link is under a CSA
+ * with mode=1 (no Tx allowed).
+ */
+ mld_link->silent_deactivation = chsw->block_tx;
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CSA, selected);
+
+ return 0;
+}
+
+static void
+iwl_mld_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ /* By implementing this operation, we prevent mac80211 from
+ * starting its own channel switch timer, so that we can call
+ * ieee80211_chswitch_done() ourselves at the right time
+ * (Upon receiving the channel_switch_start notification from the fw)
+ */
+ IWL_DEBUG_MAC80211(mld,
+ "dummy channel switch op\n");
+}
+
+static int
+iwl_mld_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN_ON(mld_link->silent_deactivation);
+
+ return 0;
+}
+
+static void
+iwl_mld_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+
+ IWL_DEBUG_MAC80211(mld,
+ "abort channel switch op\n");
+ mld_link->silent_deactivation = false;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_swap(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx);
+ iwl_mld_remove_chanctx(hw, vifs[0].old_ctx);
+
+ ret = iwl_mld_add_chanctx(hw, vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld, "failed to add new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_remove;
+ }
+
+ return 0;
+
+ out_remove:
+ iwl_mld_remove_chanctx(hw, vifs[0].new_ctx);
+ out_reassign:
+ if (iwl_mld_add_chanctx(hw, vifs[0].old_ctx)) {
+ IWL_ERR(mld, "failed to add old_ctx after failure\n");
+ return ret;
+ }
+
+ if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx))
+ IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx_reassign(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int ret;
+
+ iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx);
+ ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].new_ctx);
+ if (ret) {
+ IWL_ERR(mld,
+ "failed to assign new_ctx during channel switch\n");
+ goto out_reassign;
+ }
+
+ return 0;
+
+out_reassign:
+ if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf,
+ vifs[0].old_ctx))
+ IWL_ERR(mld, "failed to reassign old_ctx after failure\n");
+
+ return ret;
+}
+
+static int
+iwl_mld_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int ret;
+
+ /* we only support a single-vif right now */
+ if (n_vifs > 1)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ ret = iwl_mld_switch_vif_chanctx_swap(hw, vifs);
+ break;
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ ret = iwl_mld_switch_vif_chanctx_reassign(hw, vifs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void iwl_mld_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This is called before mac80211 does RCU synchronisation,
+ * so here we already invalidate our internal RCU-protected
+ * station pointer. The rest of the code will thus no longer
+ * be able to find the station this way, and we don't rely
+ * on further RCU synchronisation after the sta_state()
+ * callback deleted the station.
+ */
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+ NULL);
+
+ if (sta == mld_vif->ap_sta)
+ mld_vif->ap_sta = NULL;
+}
+
+static void
+iwl_mld_mac80211_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_bss_conf *link_conf;
+ u32 duration;
+ int ret;
+
+ link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ /* Protect the session to hear the TDLS setup response on the channel */
+
+ duration = 2 * link_conf->dtim_period * link_conf->beacon_int;
+
+ ret = iwl_mld_start_session_protection(mld, vif, duration, duration,
+ link_id, HZ / 5);
+ if (ret)
+ IWL_ERR(mld,
+ "Failed to start session protection for TDLS: %d\n",
+ ret);
+}
+
+static bool iwl_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ int n_links = hweight16(desired_links);
+
+ /* Check if HW supports the wanted number of links */
+ return n_links <= iwl_mld_max_active_links(mld, vif);
+}
+
+static int
+iwl_mld_change_vif_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct ieee80211_bss_conf *link_conf;
+ u16 removed = old_links & ~new_links;
+ u16 added = new_links & ~old_links;
+ int err;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /*
+ * No bits designate non-MLO mode. We can handle MLO exit/enter by
+ * simply mapping that to link ID zero internally.
+ * Note that mac80211 does such a non-MLO to MLO switch during restart
+ * if it was in MLO before. In that case, we do not have a link to
+ * remove.
+ */
+ if (old_links == 0 && !mld->fw_status.in_hw_restart)
+ removed |= BIT(0);
+
+ if (new_links == 0)
+ added |= BIT(0);
+
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (removed & BIT(i))
+ iwl_mld_remove_link(mld, old[i]);
+ }
+
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (added & BIT(i)) {
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (WARN_ON(!link_conf))
+ return -EINVAL;
+
+ err = iwl_mld_add_link(mld, link_conf);
+ if (err)
+ goto remove_added_links;
+ }
+ }
+
+ /*
+ * Ensure we always have a valid primary_link. When using multiple
+ * links the proper value is set in assign_vif_chanctx.
+ */
+ mld_vif->emlsr.primary = new_links ? __ffs(new_links) : 0;
+
+ /*
+ * Special MLO restart case. We did not have a link when the interface
+ * was added, so do the power configuration now.
+ */
+ if (old_links == 0 && mld->fw_status.in_hw_restart)
+ iwl_mld_update_mac_power(mld, vif, false);
+
+ return 0;
+
+remove_added_links:
+ for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
+ if (!(added & BIT(i)))
+ continue;
+
+ link_conf = link_conf_dereference_protected(vif, i);
+ if (!link_conf || !iwl_mld_link_from_mac80211(link_conf))
+ continue;
+
+ iwl_mld_remove_link(mld, link_conf);
+ }
+
+ return err;
+}
+
+static int iwl_mld_change_sta_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_update_link_stas(mld, vif, sta, old_links, new_links);
+}
+
+static int iwl_mld_mac80211_join_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mld_start_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static void iwl_mld_mac80211_leave_ibss(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ return iwl_mld_stop_ap_ibss(hw, vif, &vif->bss_conf);
+}
+
+static int iwl_mld_mac80211_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return mld->ibss_manager;
+}
+
+#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (5 * HZ)
+
+static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ return;
+
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.tmp_non_bss_done_wk,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
+}
+
+static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ IWL_DEBUG_MAC80211(mld, "prep_add_interface: type=%u\n", type);
+
+ if (!(type == NL80211_IFTYPE_AP ||
+ type == NL80211_IFTYPE_P2P_GO ||
+ type == NL80211_IFTYPE_P2P_CLIENT))
+ return;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
+ NULL);
+}
+
+static int iwl_mld_set_hw_timestamp(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_set_hw_timestamp *hwts)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ u32 protocols = 0;
+
+ /* HW timestamping is only supported for a specific station */
+ if (!hwts->macaddr)
+ return -EOPNOTSUPP;
+
+ if (hwts->enable)
+ protocols =
+ IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
+
+ return iwl_mld_time_sync_config(mld, hwts->macaddr, protocols);
+}
+
+static int iwl_mld_start_pmsr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+
+ return iwl_mld_ftm_start(mld, vif, request);
+}
+
+const struct ieee80211_ops iwl_mld_hw_ops = {
+ .tx = iwl_mld_mac80211_tx,
+ .start = iwl_mld_mac80211_start,
+ .stop = iwl_mld_mac80211_stop,
+ .config = iwl_mld_mac80211_config,
+ .add_interface = iwl_mld_mac80211_add_interface,
+ .remove_interface = iwl_mld_mac80211_remove_interface,
+ .conf_tx = iwl_mld_mac80211_conf_tx,
+ .prepare_multicast = iwl_mld_mac80211_prepare_multicast,
+ .configure_filter = iwl_mld_mac80211_configure_filter,
+ .reconfig_complete = iwl_mld_mac80211_reconfig_complete,
+ .wake_tx_queue = iwl_mld_mac80211_wake_tx_queue,
+ .add_chanctx = iwl_mld_add_chanctx,
+ .remove_chanctx = iwl_mld_remove_chanctx,
+ .change_chanctx = iwl_mld_change_chanctx,
+ .assign_vif_chanctx = iwl_mld_assign_vif_chanctx,
+ .unassign_vif_chanctx = iwl_mld_unassign_vif_chanctx,
+ .set_rts_threshold = iwl_mld_mac80211_set_rts_threshold,
+ .link_info_changed = iwl_mld_mac80211_link_info_changed,
+ .vif_cfg_changed = iwl_mld_mac80211_vif_cfg_changed,
+ .set_key = iwl_mld_mac80211_set_key,
+ .hw_scan = iwl_mld_mac80211_hw_scan,
+ .cancel_hw_scan = iwl_mld_mac80211_cancel_hw_scan,
+ .sched_scan_start = iwl_mld_mac80211_sched_scan_start,
+ .sched_scan_stop = iwl_mld_mac80211_sched_scan_stop,
+ .mgd_prepare_tx = iwl_mld_mac80211_mgd_prepare_tx,
+ .mgd_complete_tx = iwl_mld_mac_mgd_complete_tx,
+ .sta_state = iwl_mld_mac80211_sta_state,
+ .sta_statistics = iwl_mld_mac80211_sta_statistics,
+ .flush = iwl_mld_mac80211_flush,
+ .flush_sta = iwl_mld_mac80211_flush_sta,
+ .ampdu_action = iwl_mld_mac80211_ampdu_action,
+ .can_aggregate_in_amsdu = iwl_mld_mac80211_can_aggregate,
+ .sync_rx_queues = iwl_mld_mac80211_sync_rx_queues,
+ .link_sta_rc_update = iwl_mld_sta_rc_update,
+ .start_ap = iwl_mld_start_ap_ibss,
+ .stop_ap = iwl_mld_stop_ap_ibss,
+ .pre_channel_switch = iwl_mld_pre_channel_switch,
+ .channel_switch = iwl_mld_channel_switch,
+ .post_channel_switch = iwl_mld_post_channel_switch,
+ .abort_channel_switch = iwl_mld_abort_channel_switch,
+ .switch_vif_chanctx = iwl_mld_switch_vif_chanctx,
+ .sta_pre_rcu_remove = iwl_mld_sta_pre_rcu_remove,
+ .remain_on_channel = iwl_mld_start_roc,
+ .cancel_remain_on_channel = iwl_mld_cancel_roc,
+ .can_activate_links = iwl_mld_can_activate_links,
+ .change_vif_links = iwl_mld_change_vif_links,
+ .change_sta_links = iwl_mld_change_sta_links,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_mld_suspend,
+ .resume = iwl_mld_resume,
+ .set_wakeup = iwl_mld_set_wakeup,
+ .set_rekey_data = iwl_mld_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = iwl_mld_ipv6_addr_change,
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mld_add_vif_debugfs,
+ .link_add_debugfs = iwl_mld_add_link_debugfs,
+ .link_sta_add_debugfs = iwl_mld_add_link_sta_debugfs,
+#endif
+ .mgd_protect_tdls_discover = iwl_mld_mac80211_mgd_protect_tdls_discover,
+ .join_ibss = iwl_mld_mac80211_join_ibss,
+ .leave_ibss = iwl_mld_mac80211_leave_ibss,
+ .tx_last_beacon = iwl_mld_mac80211_tx_last_beacon,
+ .prep_add_interface = iwl_mld_prep_add_interface,
+ .set_hw_timestamp = iwl_mld_set_hw_timestamp,
+ .start_pmsr = iwl_mld_start_pmsr,
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h
new file mode 100644
index 000000000000..aad04d7b2617
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mac80211_h__
+#define __iwl_mld_mac80211_h__
+
+#include "mld.h"
+
+int iwl_mld_register_hw(struct iwl_mld *mld);
+void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_mac80211_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
new file mode 100644
index 000000000000..daca14e208bd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include <fw/dbg.h>
+#include <iwl-nvm-parse.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "mcc.h"
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v8(const struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (const void *)pkt->data;
+ int n_channels = __le32_to_cpu(mcc_resp_v8->n_channels);
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ int notif_len = struct_size(resp_cp, channels, n_channels);
+
+ if (iwl_rx_packet_payload_len(pkt) != notif_len)
+ return ERR_PTR(-EINVAL);
+
+ resp_cp = kmemdup(mcc_resp_v8, notif_len, GFP_KERNEL);
+ if (!resp_cp)
+ return ERR_PTR(-ENOMEM);
+
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_parse_mcc_update_resp_v5_v6(const struct iwl_rx_packet *pkt)
+{
+ const struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (const void *)pkt->data;
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ int n_channels = __le32_to_cpu(mcc_resp_v4->n_channels);
+ int resp_len;
+
+ if (iwl_rx_packet_payload_len(pkt) !=
+ struct_size(mcc_resp_v4, channels, n_channels))
+ return ERR_PTR(-EINVAL);
+
+ resp_len = struct_size(resp_cp, channels, n_channels);
+ resp_cp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp_cp)
+ return ERR_PTR(-ENOMEM);
+
+ resp_cp->status = mcc_resp_v4->status;
+ resp_cp->mcc = mcc_resp_v4->mcc;
+ resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap));
+ resp_cp->source_id = mcc_resp_v4->source_id;
+ resp_cp->geo_info = mcc_resp_v4->geo_info;
+ resp_cp->n_channels = mcc_resp_v4->n_channels;
+ memcpy(resp_cp->channels, mcc_resp_v4->channels,
+ n_channels * sizeof(__le32));
+
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct iwl_mcc_update_resp_v8 *
+iwl_mld_update_mcc(struct iwl_mld *mld, const char *alpha2,
+ enum iwl_mcc_source src_id)
+{
+ int resp_ver = iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+ struct iwl_mcc_update_cmd mcc_update_cmd = {
+ .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+ .source_id = (u8)src_id,
+ };
+ struct iwl_mcc_update_resp_v8 *resp_cp;
+ struct iwl_rx_packet *pkt;
+ struct iwl_host_cmd cmd = {
+ .id = MCC_UPDATE_CMD,
+ .flags = CMD_WANT_SKB,
+ .data = { &mcc_update_cmd },
+ .len[0] = sizeof(mcc_update_cmd),
+ };
+ int ret;
+ u16 mcc;
+
+ IWL_DEBUG_LAR(mld, "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], src_id);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pkt = cmd.resp_pkt;
+
+ /* For Wifi-7 radios, we get version 8
+ * For Wifi-6E radios, we get version 6
+ * For Wifi-6 radios, we get version 5, but 5, 6, and 4 are compatible.
+ */
+ switch (resp_ver) {
+ case 5:
+ case 6:
+ resp_cp = iwl_mld_parse_mcc_update_resp_v5_v6(pkt);
+ break;
+ case 8:
+ resp_cp = iwl_mld_parse_mcc_update_resp_v8(pkt);
+ break;
+ default:
+ IWL_FW_CHECK_FAILED(mld, "Unknown MCC_UPDATE_CMD version %d\n", resp_ver);
+ resp_cp = ERR_PTR(-EINVAL);
+ }
+
+ if (IS_ERR(resp_cp))
+ goto exit;
+
+ mcc = le16_to_cpu(resp_cp->mcc);
+
+ IWL_FW_CHECK(mld, !mcc, "mcc can't be 0: %d\n", mcc);
+
+ IWL_DEBUG_LAR(mld,
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c')\n",
+ le32_to_cpu(resp_cp->status), mcc, mcc >> 8, mcc & 0xff);
+
+exit:
+ iwl_free_resp(&cmd);
+ return resp_cp;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed)
+{
+ struct ieee80211_regdomain *regd = NULL;
+ struct iwl_mcc_update_resp_v8 *resp;
+ u8 resp_ver = iwl_fw_lookup_notif_ver(mld->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+
+ IWL_DEBUG_LAR(mld, "Getting regdomain data for %s from FW\n", alpha2);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ resp = iwl_mld_update_mcc(mld, alpha2, src_id);
+ if (IS_ERR(resp)) {
+ IWL_DEBUG_LAR(mld, "Could not get update from FW %ld\n",
+ PTR_ERR(resp));
+ resp = NULL;
+ goto out;
+ }
+
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
+ IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver);
+
+ regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg,
+ __le32_to_cpu(resp->n_channels),
+ resp->channels,
+ __le16_to_cpu(resp->mcc),
+ __le16_to_cpu(resp->geo_info),
+ le32_to_cpu(resp->cap), resp_ver);
+
+ if (IS_ERR(regd)) {
+ IWL_DEBUG_LAR(mld, "Could not get parse update from FW %ld\n",
+ PTR_ERR(regd));
+ goto out;
+ }
+
+ IWL_DEBUG_LAR(mld, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+ regd->alpha2, regd->alpha2[0],
+ regd->alpha2[1], resp->source_id);
+
+ mld->mcc_src = resp->source_id;
+
+ if (!iwl_puncturing_is_allowed_in_bios(mld->bios_enable_puncturing,
+ le16_to_cpu(resp->mcc)))
+ ieee80211_hw_set(mld->hw, DISALLOW_PUNCTURING);
+ else
+ __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mld->hw->flags);
+
+out:
+ kfree(resp);
+ return regd;
+}
+
+/* It is the caller's responsibility to free the pointer returned here */
+static struct ieee80211_regdomain *
+iwl_mld_get_current_regdomain(struct iwl_mld *mld,
+ bool *changed)
+{
+ return iwl_mld_get_regdomain(mld, "ZZ",
+ MCC_SOURCE_GET_CURRENT, changed);
+}
+
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld)
+{
+ struct ieee80211_regdomain *regd;
+ bool changed;
+
+ regd = iwl_mld_get_current_regdomain(mld, &changed);
+
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ if (changed)
+ regulatory_set_wiphy_regd(mld->wiphy, regd);
+ kfree(regd);
+}
+
+static int iwl_mld_apply_last_mcc(struct iwl_mld *mld,
+ const char *alpha2)
+{
+ struct ieee80211_regdomain *regd;
+ u32 used_src;
+ bool changed;
+ int ret;
+
+ /* save the last source in case we overwrite it below */
+ used_src = mld->mcc_src;
+
+ /* Notify the firmware we support wifi location updates */
+ regd = iwl_mld_get_current_regdomain(mld, NULL);
+ if (!IS_ERR_OR_NULL(regd))
+ kfree(regd);
+
+ /* Now set our last stored MCC and source */
+ regd = iwl_mld_get_regdomain(mld, alpha2, used_src,
+ &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ /* update cfg80211 if the regdomain was changed */
+ if (changed)
+ ret = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+ else
+ ret = 0;
+
+ kfree(regd);
+ return ret;
+}
+
+int iwl_mld_init_mcc(struct iwl_mld *mld)
+{
+ const struct ieee80211_regdomain *r;
+ struct ieee80211_regdomain *regd;
+ char mcc[3];
+ int retval;
+
+ /* try to replay the last set MCC to FW */
+ r = wiphy_dereference(mld->wiphy, mld->wiphy->regd);
+
+ if (r)
+ return iwl_mld_apply_last_mcc(mld, r->alpha2);
+
+ regd = iwl_mld_get_current_regdomain(mld, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+
+ if (!iwl_bios_get_mcc(&mld->fwrt, mcc)) {
+ kfree(regd);
+ regd = iwl_mld_get_regdomain(mld, mcc, MCC_SOURCE_BIOS, NULL);
+ if (IS_ERR_OR_NULL(regd))
+ return -EIO;
+ }
+
+ retval = regulatory_set_wiphy_regd_sync(mld->wiphy, regd);
+
+ kfree(regd);
+ return retval;
+}
+
+static void iwl_mld_find_assoc_vif_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *assoc = data;
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->cfg.assoc)
+ *assoc = true;
+}
+
+static bool iwl_mld_is_a_vif_assoc(struct iwl_mld *mld)
+{
+ bool assoc = false;
+
+ ieee80211_iterate_active_interfaces_atomic(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_find_assoc_vif_iterator,
+ &assoc);
+ return assoc;
+}
+
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+ struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+ enum iwl_mcc_source src;
+ char mcc[3];
+ struct ieee80211_regdomain *regd;
+ bool changed;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (iwl_mld_is_a_vif_assoc(mld) &&
+ notif->source_id == MCC_SOURCE_WIFI) {
+ IWL_DEBUG_LAR(mld, "Ignore mcc update while associated\n");
+ return;
+ }
+
+ mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+ mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+ mcc[2] = '\0';
+ src = notif->source_id;
+
+ IWL_DEBUG_LAR(mld,
+ "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+ mcc, src);
+ regd = iwl_mld_get_regdomain(mld, mcc, src, &changed);
+ if (IS_ERR_OR_NULL(regd))
+ return;
+
+ if (changed)
+ regulatory_set_wiphy_regd(mld->hw->wiphy, regd);
+ kfree(regd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.h b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h
new file mode 100644
index 000000000000..2b31e5b5e2ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_mcc_h__
+#define __iwl_mld_mcc_h__
+
+int iwl_mld_init_mcc(struct iwl_mld *mld);
+void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_update_changed_regdomain(struct iwl_mld *mld);
+struct ieee80211_regdomain *
+iwl_mld_get_regdomain(struct iwl_mld *mld,
+ const char *alpha2,
+ enum iwl_mcc_source src_id,
+ bool *changed);
+
+#endif /* __iwl_mld_mcc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
new file mode 100644
index 000000000000..d4a99ae64074
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <linux/rtnetlink.h>
+#include <net/mac80211.h>
+
+#include "fw/api/rx.h"
+#include "fw/api/datapath.h"
+#include "fw/api/commands.h"
+#include "fw/api/offload.h"
+#include "fw/api/coex.h"
+#include "fw/dbg.h"
+#include "fw/uefi.h"
+
+#include "mld.h"
+#include "mlo.h"
+#include "mac80211.h"
+#include "led.h"
+#include "scan.h"
+#include "tx.h"
+#include "sta.h"
+#include "regulatory.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "hcmd.h"
+#include "fw/api/location.h"
+
+#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IWLWIFI");
+
+static const struct iwl_op_mode_ops iwl_mld_ops;
+
+static int __init iwl_mld_init(void)
+{
+ int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops);
+
+ if (ret)
+ pr_err("Unable to register MLD op_mode: %d\n", ret);
+
+ return ret;
+}
+module_init(iwl_mld_init);
+
+static void __exit iwl_mld_exit(void)
+{
+ iwl_opmode_deregister("iwlmld");
+}
+module_exit(iwl_mld_exit);
+
+static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
+{
+ struct wiphy *wiphy = mld->wiphy;
+
+ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+ wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
+{
+ mld->dev = trans->dev;
+ mld->trans = trans;
+ mld->cfg = cfg;
+ mld->fw = fw;
+ mld->hw = hw;
+ mld->wiphy = hw->wiphy;
+ mld->debugfs_dir = dbgfs_dir;
+
+ iwl_notification_wait_init(&mld->notif_wait);
+
+ /* Setup async RX handling */
+ spin_lock_init(&mld->async_handlers_lock);
+ wiphy_work_init(&mld->async_handlers_wk,
+ iwl_mld_async_handlers_wk);
+
+ /* Dynamic Queue Allocation */
+ spin_lock_init(&mld->add_txqs_lock);
+ INIT_LIST_HEAD(&mld->txqs_to_add);
+ wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk);
+
+ /* Setup RX queues sync wait queue */
+ init_waitqueue_head(&mld->rxq_sync.waitq);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld);
+
+static void __acquires(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_start(void *ctx)
+{
+ struct iwl_mld *mld = ctx;
+
+ wiphy_lock(mld->wiphy);
+}
+
+static void __releases(&mld->wiphy->mtx)
+iwl_mld_fwrt_dump_end(void *ctx)
+{
+ struct iwl_mld *mld = ctx;
+
+ wiphy_unlock(mld->wiphy);
+}
+
+static bool iwl_mld_d3_debug_enable(void *ctx)
+{
+ return IWL_MLD_D3_DEBUG;
+}
+
+static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)ctx;
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+ ret = iwl_mld_send_cmd(mld, host_cmd);
+ wiphy_unlock(mld->wiphy);
+
+ return ret;
+}
+
+static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = {
+ .dump_start = iwl_mld_fwrt_dump_start,
+ .dump_end = iwl_mld_fwrt_dump_end,
+ .send_hcmd = iwl_mld_fwrt_send_hcmd,
+ .d3_debug_enable = iwl_mld_d3_debug_enable,
+};
+
+static void
+iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_fw *fw,
+ struct dentry *debugfs_dir)
+{
+ iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld,
+ NULL, NULL, debugfs_dir);
+
+ iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR);
+}
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
+ HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(INIT_COMPLETE_NOTIF),
+ HCMD_NAME(PHY_CONTEXT_CMD),
+ HCMD_NAME(SCAN_CFG_CMD),
+ HCMD_NAME(SCAN_REQ_UMAC),
+ HCMD_NAME(SCAN_ABORT_UMAC),
+ HCMD_NAME(SCAN_COMPLETE_UMAC),
+ HCMD_NAME(TX_CMD),
+ HCMD_NAME(TXPATH_FLUSH),
+ HCMD_NAME(LEDS_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+ HCMD_NAME(POWER_TABLE_CMD),
+ HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+ HCMD_NAME(BEACON_NOTIFICATION),
+ HCMD_NAME(BEACON_TEMPLATE_CMD),
+ HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(REDUCE_TX_POWER_CMD),
+ HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+ HCMD_NAME(MAC_PM_POWER_TABLE),
+ HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+ HCMD_NAME(RSS_CONFIG_CMD),
+ HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+ HCMD_NAME(REPLY_RX_MPDU_CMD),
+ HCMD_NAME(BA_NOTIF),
+ HCMD_NAME(MCC_UPDATE_CMD),
+ HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+ HCMD_NAME(MCAST_FILTER_CMD),
+ HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+ HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+ HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+ HCMD_NAME(WOWLAN_PATTERNS),
+ HCMD_NAME(WOWLAN_CONFIGURATION),
+ HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+ HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+ HCMD_NAME(DEBUG_HOST_COMMAND),
+ HCMD_NAME(LDBG_CONFIG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
+ HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+ HCMD_NAME(FW_ERROR_RECOVERY_CMD),
+ HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = {
+ HCMD_NAME(LARI_CONFIG_CHANGE),
+ HCMD_NAME(NVM_GET_INFO),
+ HCMD_NAME(TAS_CONFIG),
+ HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD),
+ HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_debug_names[] = {
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
+ HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(MAC_CONFIG_CMD),
+ HCMD_NAME(LINK_CONFIG_CMD),
+ HCMD_NAME(STA_CONFIG_CMD),
+ HCMD_NAME(AUX_STA_CMD),
+ HCMD_NAME(STA_REMOVE_CMD),
+ HCMD_NAME(ROC_CMD),
+ HCMD_NAME(MISSED_BEACONS_NOTIF),
+ HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
+ HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
+ HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+ HCMD_NAME(TLC_MNG_CONFIG_CMD),
+ HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
+ HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(OMI_SEND_STATUS_NOTIF),
+ HCMD_NAME(ESR_MODE_NOTIF),
+ HCMD_NAME(MONITOR_NOTIF),
+ HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
+ HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_location_names[] = {
+ HCMD_NAME(TOF_RANGE_REQ_CMD),
+ HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_phy_names[] = {
+ HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ HCMD_NAME(CTDP_CONFIG_CMD),
+ HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(CT_KILL_NOTIFICATION),
+ HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
+ HCMD_NAME(STATISTICS_OPER_NOTIF),
+ HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
+ HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mld_coex_names[] = {
+ HCMD_NAME(PROFILE_NOTIF),
+};
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_hcmd_arr iwl_mld_groups[] = {
+ [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+ [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
+ [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
+ [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
+ [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
+ [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names),
+ [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names),
+ [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names),
+ [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names),
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
+#endif
+
+static void
+iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
+{
+ const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ static const u8 no_reclaim_cmds[] = {TX_CMD};
+ struct iwl_trans_config trans_cfg = {
+ .op_mode = op_mode,
+ /* Rx is not supported yet, but add it to avoid warnings */
+ .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
+ .command_groups = iwl_mld_groups,
+ .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
+ .fw_reset_handshake = true,
+ .queue_alloc_cmd_ver =
+ iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ 0),
+ .no_reclaim_cmds = no_reclaim_cmds,
+ .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
+ .cb_data_offs = offsetof(struct ieee80211_tx_info,
+ driver_data[2]),
+ };
+ struct iwl_trans *trans = mld->trans;
+
+ trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+ trans->iml = mld->fw->iml;
+ trans->iml_len = mld->fw->iml_len;
+ trans->wide_cmd_header = true;
+
+ iwl_trans_configure(trans, &trans_cfg);
+}
+
+/*
+ *****************************************************
+ * op mode ops functions
+ *****************************************************
+ */
+
+#define NUM_FW_LOAD_RETRIES 3
+static struct iwl_op_mode *
+iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+ struct ieee80211_hw *hw;
+ struct iwl_op_mode *op_mode;
+ struct iwl_mld *mld;
+ u32 eckv_value;
+ int ret;
+
+ /* Allocate and initialize a new hardware device */
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+ sizeof(struct iwl_mld),
+ &iwl_mld_hw_ops);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ op_mode = hw->priv;
+
+ op_mode->ops = &iwl_mld_ops;
+
+ mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
+
+ iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
+
+ iwl_mld_get_bios_tables(mld);
+ iwl_uefi_get_sgom_table(trans, &mld->fwrt);
+ iwl_uefi_get_step_table(trans);
+ if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
+ IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
+ else
+ trans->ext_32khz_clock_valid = !!eckv_value;
+ iwl_bios_setup_step(trans, &mld->fwrt);
+ mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
+
+ iwl_mld_hw_set_regulatory(mld);
+
+ /* Configure transport layer with the opmode specific params */
+ iwl_mld_configure_trans(op_mode);
+
+ /* needed for regulatory init */
+ rtnl_lock();
+ /* Needed for sending commands */
+ wiphy_lock(mld->wiphy);
+
+ for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) {
+ ret = iwl_mld_load_fw(mld);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ wiphy_unlock(mld->wiphy);
+ rtnl_unlock();
+ iwl_fw_flush_dumps(&mld->fwrt);
+ goto free_hw;
+ }
+
+ iwl_mld_stop_fw(mld);
+
+ wiphy_unlock(mld->wiphy);
+ rtnl_unlock();
+
+ ret = iwl_mld_leds_init(mld);
+ if (ret)
+ goto free_nvm;
+
+ ret = iwl_mld_alloc_scan_cmd(mld);
+ if (ret)
+ goto leds_exit;
+
+ ret = iwl_mld_low_latency_init(mld);
+ if (ret)
+ goto free_scan_cmd;
+
+ ret = iwl_mld_register_hw(mld);
+ if (ret)
+ goto low_latency_free;
+
+ iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+ iwl_mld_add_debugfs_files(mld, dbgfs_dir);
+ iwl_mld_thermal_initialize(mld);
+
+ iwl_mld_ptp_init(mld);
+
+ return op_mode;
+
+low_latency_free:
+ iwl_mld_low_latency_free(mld);
+free_scan_cmd:
+ kfree(mld->scan.cmd);
+leds_exit:
+ iwl_mld_leds_exit(mld);
+free_nvm:
+ kfree(mld->nvm_data);
+free_hw:
+ ieee80211_free_hw(mld->hw);
+ return ERR_PTR(ret);
+}
+
+static void
+iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_mld_ptp_remove(mld);
+ iwl_mld_leds_exit(mld);
+
+ wiphy_lock(mld->wiphy);
+ iwl_mld_thermal_exit(mld);
+ iwl_mld_low_latency_stop(mld);
+ iwl_mld_deinit_time_sync(mld);
+ wiphy_unlock(mld->wiphy);
+
+ ieee80211_unregister_hw(mld->hw);
+
+ iwl_fw_runtime_free(&mld->fwrt);
+ iwl_mld_low_latency_free(mld);
+
+ iwl_trans_op_mode_leave(mld->trans);
+
+ kfree(mld->nvm_data);
+ kfree(mld->scan.cmd);
+ kfree(mld->error_recovery_buf);
+ kfree(mld->mcast_filter_cmd);
+
+ ieee80211_free_hw(mld->hw);
+}
+
+static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode,
+ int hw_queue, bool queue_full)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct ieee80211_txq *txq;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_txq *mld_txq;
+
+ rcu_read_lock();
+
+ txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]);
+ if (!txq) {
+ rcu_read_unlock();
+
+ if (queue_full) {
+ /* An internal queue is not expected to become full */
+ IWL_WARN(mld,
+ "Internal hw_queue %d is full! stopping all queues\n",
+ hw_queue);
+ /* Stop all queues, as an internal queue is not
+ * mapped to a mac80211 one
+ */
+ ieee80211_stop_queues(mld->hw);
+ } else {
+ ieee80211_wake_queues(mld->hw);
+ }
+
+ return;
+ }
+
+ mld_txq = iwl_mld_txq_from_mac80211(txq);
+ mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL;
+
+ mld_txq->status.stop_full = queue_full;
+
+ if (!queue_full && mld_sta &&
+ mld_sta->sta_state != IEEE80211_STA_NOTEXIST) {
+ local_bh_disable();
+ iwl_mld_tx_from_txq(mld, txq);
+ local_bh_enable();
+ }
+
+ rcu_read_unlock();
+}
+
+static void
+iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mld_queue_state_change(op_mode, hw_queue, true);
+}
+
+static void
+iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ iwl_mld_queue_state_change(op_mode, hw_queue, false);
+}
+
+static bool
+iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_mld_set_hwkill(mld, state);
+
+ return false;
+}
+
+static void
+iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+ ieee80211_free_txskb(mld->hw, skb);
+}
+
+static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld)
+{
+ u32 src_size = mld->fw->ucode_capa.error_log_size;
+ u32 src_addr = mld->fw->ucode_capa.error_log_addr;
+ u8 *recovery_buf;
+ int ret;
+
+ /* no recovery buffer size defined in a TLV */
+ if (!src_size)
+ return;
+
+ recovery_buf = kzalloc(src_size, GFP_ATOMIC);
+ if (!recovery_buf)
+ return;
+
+ ret = iwl_trans_read_mem_bytes(mld->trans, src_addr,
+ recovery_buf, src_size);
+ if (ret) {
+ IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n",
+ ret);
+ kfree(recovery_buf);
+ return;
+ }
+
+ mld->error_recovery_buf = recovery_buf;
+}
+
+static void iwl_mld_restart_nic(struct iwl_mld *mld)
+{
+ iwl_mld_read_error_recovery_buffer(mld);
+
+ mld->fwrt.trans->dbg.restart_required = false;
+
+ ieee80211_restart_hw(mld->hw);
+}
+
+static void
+iwl_mld_nic_error(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status);
+
+ if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
+ IWL_ERR(mld, "Command queue full!\n");
+ else if (!trans_dead && !mld->fw_status.do_not_dump_once)
+ iwl_fwrt_dump_error_logs(&mld->fwrt);
+
+ mld->fw_status.do_not_dump_once = false;
+
+ /* It is necessary to abort any os scan here because mac80211 requires
+ * having the scan cleared before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next drv_start() call from mac80211. If ieee80211_hw_restart
+ * isn't called scan status will stay busy.
+ */
+ iwl_mld_report_scan_aborted(mld);
+
+ /*
+ * This should be first thing before trying to collect any
+ * data to avoid endless loops if any HW error happens while
+ * collecting debug data.
+ * It might not actually be true that we'll restart, but the
+ * setting doesn't matter if we're going to be unbound either.
+ */
+ if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
+ mld->fw_status.in_hw_restart = true;
+}
+
+static void iwl_mld_dump_error(struct iwl_op_mode *op_mode,
+ struct iwl_fw_error_dump_mode *mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ /* if we come in from opmode we have the mutex held */
+ if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
+ lockdep_assert_wiphy(mld->wiphy);
+ iwl_fw_error_collect(&mld->fwrt);
+ } else {
+ wiphy_lock(mld->wiphy);
+ if (mode->context != IWL_ERR_CONTEXT_ABORT)
+ iwl_fw_error_collect(&mld->fwrt);
+ wiphy_unlock(mld->wiphy);
+ }
+}
+
+static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
+ enum iwl_fw_error_type type)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ /* Do restart only in the following conditions are met:
+ * - we consider the FW as running
+ * - The trigger that brought us here is defined as one that requires
+ * a restart (in the debug TLVs)
+ */
+ if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required)
+ return false;
+
+ iwl_mld_restart_nic(mld);
+ return true;
+}
+
+static void
+iwl_mld_time_point(struct iwl_op_mode *op_mode,
+ enum iwl_fw_ini_time_point tp_id,
+ union iwl_dbg_tlv_tp_data *tp_data)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+
+ wiphy_lock(mld->wiphy);
+ mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mld_stop_fw(mld);
+ mld->fw_status.in_d3 = false;
+ wiphy_unlock(mld->wiphy);
+}
+#else
+static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
+{}
+#endif
+
+static const struct iwl_op_mode_ops iwl_mld_ops = {
+ .start = iwl_op_mode_mld_start,
+ .stop = iwl_op_mode_mld_stop,
+ .rx = iwl_mld_rx,
+ .rx_rss = iwl_mld_rx_rss,
+ .queue_full = iwl_mld_queue_full,
+ .queue_not_full = iwl_mld_queue_not_full,
+ .hw_rf_kill = iwl_mld_set_hw_rfkill_state,
+ .free_skb = iwl_mld_free_skb,
+ .nic_error = iwl_mld_nic_error,
+ .dump_error = iwl_mld_dump_error,
+ .sw_reset = iwl_mld_sw_reset,
+ .time_point = iwl_mld_time_point,
+ .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
+};
+
+struct iwl_mld_mod_params iwlmld_mod_params = {
+ .power_scheme = IWL_POWER_SCHEME_BPS,
+};
+
+module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+ "power management scheme: 1-active, 2-balanced, default: 2");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
new file mode 100644
index 000000000000..5eceaaf7696d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h
@@ -0,0 +1,582 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_h__
+#define __iwl_mld_h__
+
+#include <linux/leds.h>
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/runtime.h"
+#include "fw/notif-wait.h"
+#include "fw/api/commands.h"
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/mac.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/datapath.h"
+#include "fw/api/rx.h"
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/coex.h"
+#include "fw/api/location.h"
+
+#include "fw/dbg.h"
+
+#include "notif.h"
+#include "scan.h"
+#include "rx.h"
+#include "thermal.h"
+#include "low_latency.h"
+#include "constants.h"
+#include "ptp.h"
+#include "time_sync.h"
+#include "ftm-initiator.h"
+
+/**
+ * DOC: Introduction
+ *
+ * iwlmld is an operation mode (a.k.a. op_mode) for Intel wireless devices.
+ * It is used for devices that ship after 2024 which typically support
+ * the WiFi-7 features. MLD stands for multi-link device. Note that there are
+ * devices that do not support WiFi-7 or even WiFi 6E and yet use iwlmld, but
+ * the firmware APIs used in this driver are WiFi-7 compatible.
+ *
+ * In the architecture of iwlwifi, an op_mode is a layer that translates
+ * mac80211's APIs into commands for the firmware and, of course, notifications
+ * from the firmware to mac80211's APIs. An op_mode must implement the
+ * interface defined in iwl-op-mode.h to interact with the transport layer
+ * which allows to send and receive data to the device, start the hardware,
+ * etc...
+ */
+
+/**
+ * DOC: Locking policy
+ *
+ * iwlmld has a very simple locking policy: it doesn't have any mutexes. It
+ * relies on cfg80211's wiphy->mtx and takes the lock when needed. All the
+ * control flows originating from mac80211 already acquired the lock, so that
+ * part is trivial, but also notifications that are received from the firmware
+ * and handled asynchronously are handled only after having taken the lock.
+ * This is described in notif.c.
+ * There are spin_locks needed to synchronize with the data path, around the
+ * allocation of the queues, for example.
+ */
+
+/**
+ * DOC: Debugfs
+ *
+ * iwlmld adds its share of debugfs hooks and its handlers are synchronized
+ * with the wiphy_lock using wiphy_locked_debugfs. This avoids races against
+ * resources deletion while the debugfs hook is being used.
+ */
+
+/**
+ * DOC: Main resources
+ *
+ * iwlmld is designed with the life cycle of the resource in mind. The
+ * resources are:
+ *
+ * - struct iwl_mld (matches mac80211's struct ieee80211_hw)
+ *
+ * - struct iwl_mld_vif (matches macu80211's struct ieee80211_vif)
+ * iwl_mld_vif contains an array of pointers to struct iwl_mld_link
+ * which describe the links for this vif.
+ *
+ * - struct iwl_mld_sta (matches mac80211's struct ieee80211_sta)
+ * iwl_mld_sta contains an array of points to struct iwl_mld_link_sta
+ * which describes the link stations for this station
+ *
+ * Each object has properties that can survive a firmware reset or not.
+ * Asynchronous firmware notifications can declare themselves as dependent on a
+ * certain instance of those resources and that means that the notifications
+ * will be cancelled once the instance is destroyed.
+ */
+
+#define IWL_MLD_MAX_ADDRESSES 5
+
+/**
+ * struct iwl_mld - MLD op mode
+ *
+ * @fw_id_to_bss_conf: maps a fw id of a link to the corresponding
+ * ieee80211_bss_conf.
+ * @fw_id_to_vif: maps a fw id of a MAC context to the corresponding
+ * ieee80211_vif. Mapping is valid only when the MAC exists in the fw.
+ * @fw_id_to_txq: maps a fw id of a txq to the corresponding
+ * ieee80211_txq.
+ * @used_phy_ids: a bitmap of the phy IDs used. If a bit is set, it means
+ * that the index of this bit is already used as a PHY id.
+ * @num_igtks: the number if iGTKs that were sent to the FW.
+ * @monitor: monitor related data
+ * @monitor.on: does a monitor vif exist (singleton hence bool)
+ * @monitor.ampdu_ref: the id of the A-MPDU for sniffer
+ * @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU
+ * @monitor.cur_aid: current association id tracked by the sniffer
+ * @monitor.cur_bssid: current bssid tracked by the sniffer
+ * @monitor.p80: primary channel position relative to he whole bandwidth, in
+ * steps of 80 MHz
+ * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding
+ * ieee80211_link_sta. This is not cleaned up on restart since we want to
+ * preserve the fw sta ids during a restart (for SN/PN restoring).
+ * FW ids of internal stations will be mapped to ERR_PTR, and will be
+ * re-allocated during a restart, so make sure to free it in restart
+ * cleanup using iwl_mld_free_internal_sta
+ * @netdetect: indicates the FW is in suspend mode with netdetect configured
+ * @p2p_device_vif: points to the p2p device vif if exists
+ * @bt_is_active: indicates that BT is active
+ * @dev: pointer to device struct. For printing purposes
+ * @trans: pointer to the transport layer
+ * @cfg: pointer to the device configuration
+ * @fw: a pointer to the fw object
+ * @hw: pointer to the hw object.
+ * @wiphy: a pointer to the wiphy struct, for easier access to it.
+ * @nvm_data: pointer to the nvm_data that includes all our capabilities
+ * @fwrt: fw runtime data
+ * @debugfs_dir: debugfs directory
+ * @notif_wait: notification wait related data.
+ * @async_handlers_list: a list of all async RX handlers. When a notifciation
+ * with an async handler is received, it is added to this list.
+ * When &async_handlers_wk runs - it runs these handlers one by one.
+ * @async_handlers_lock: a lock for &async_handlers_list. Sync
+ * &async_handlers_wk and RX notifcation path.
+ * @async_handlers_wk: A work to run all async RX handlers from
+ * &async_handlers_list.
+ * @ct_kill_exit_wk: worker to exit thermal kill
+ * @fw_status: bitmap of fw status bits
+ * @running: true if the firmware is running
+ * @do_not_dump_once: true if firmware dump must be prevented once
+ * @in_d3: indicates FW is in suspend mode and should be resumed
+ * @in_hw_restart: indicates that we are currently in restart flow.
+ * rather than restarted. Should be unset upon restart.
+ * @radio_kill: bitmap of radio kill status
+ * @radio_kill.hw: radio is killed by hw switch
+ * @radio_kill.ct: radio is killed because the device it too hot
+ * @addresses: device MAC addresses.
+ * @scan: instance of the scan object
+ * @wowlan: WoWLAN support data.
+ * @led: the led device
+ * @mcc_src: the source id of the MCC, comes from the firmware
+ * @bios_enable_puncturing: is puncturing enabled by bios
+ * @fw_id_to_ba: maps a fw (BA) id to a corresponding Block Ack session data.
+ * @num_rx_ba_sessions: tracks the number of active Rx Block Ack (BA) sessions.
+ * the driver ensures that new BA sessions are blocked once the maximum
+ * supported by the firmware is reached, preventing firmware asserts.
+ * @rxq_sync: manages RX queue sync state
+ * @txqs_to_add: a list of &ieee80211_txq's to allocate in &add_txqs_wk
+ * @add_txqs_wk: a worker to allocate txqs.
+ * @add_txqs_lock: to lock the &txqs_to_add list.
+ * @error_recovery_buf: pointer to the recovery buffer that will be read
+ * from firmware upon fw/hw error and sent back to the firmware in
+ * reconfig flow (after NIC reset).
+ * @mcast_filter_cmd: pointer to the multicast filter command.
+ * @mgmt_tx_ant: stores the last TX antenna index; used for setting
+ * TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure).
+ * @low_latency: low-latency manager.
+ * @tzone: thermal zone device's data
+ * @cooling_dev: cooling device's related data
+ * @ibss_manager: in IBSS mode (only one vif can be active), indicates what
+ * firmware indicated about having transmitted the last beacon, i.e.
+ * being IBSS manager for that time and needing to respond to probe
+ * requests
+ * @ptp_data: data of the PTP clock
+ * @time_sync: time sync data.
+ * @ftm_initiator: FTM initiator data
+ */
+struct iwl_mld {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1];
+ struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER];
+ struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES];
+ u8 used_phy_ids: NUM_PHY_CTX;
+ u8 num_igtks;
+ struct {
+ bool on;
+ u32 ampdu_ref;
+ bool ampdu_toggle;
+ u8 p80;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ __le16 cur_aid;
+ u8 cur_bssid[ETH_ALEN];
+#endif
+ } monitor;
+#ifdef CONFIG_PM_SLEEP
+ bool netdetect;
+#endif /* CONFIG_PM_SLEEP */
+ struct ieee80211_vif *p2p_device_vif;
+ bool bt_is_active;
+ );
+ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX];
+ /* And here fields that survive a fw restart */
+ struct device *dev;
+ struct iwl_trans *trans;
+ const struct iwl_cfg *cfg;
+ const struct iwl_fw *fw;
+ struct ieee80211_hw *hw;
+ struct wiphy *wiphy;
+ struct iwl_nvm_data *nvm_data;
+ struct iwl_fw_runtime fwrt;
+ struct dentry *debugfs_dir;
+ struct iwl_notif_wait_data notif_wait;
+ struct list_head async_handlers_list;
+ spinlock_t async_handlers_lock;
+ struct wiphy_work async_handlers_wk;
+ struct wiphy_delayed_work ct_kill_exit_wk;
+
+ struct {
+ u32 running:1,
+ do_not_dump_once:1,
+#ifdef CONFIG_PM_SLEEP
+ in_d3:1,
+#endif
+ in_hw_restart:1;
+
+ } fw_status;
+
+ struct {
+ u32 hw:1,
+ ct:1;
+ } radio_kill;
+
+ struct mac_address addresses[IWL_MLD_MAX_ADDRESSES];
+ struct iwl_mld_scan scan;
+#ifdef CONFIG_PM_SLEEP
+ struct wiphy_wowlan_support wowlan;
+#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_IWLWIFI_LEDS
+ struct led_classdev led;
+#endif
+ enum iwl_mcc_source mcc_src;
+ bool bios_enable_puncturing;
+
+ struct iwl_mld_baid_data __rcu *fw_id_to_ba[IWL_MAX_BAID];
+ u8 num_rx_ba_sessions;
+
+ struct iwl_mld_rx_queues_sync rxq_sync;
+
+ struct list_head txqs_to_add;
+ struct wiphy_work add_txqs_wk;
+ spinlock_t add_txqs_lock;
+
+ u8 *error_recovery_buf;
+ struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+
+ u8 mgmt_tx_ant;
+
+ struct iwl_mld_low_latency low_latency;
+
+ bool ibss_manager;
+#ifdef CONFIG_THERMAL
+ struct thermal_zone_device *tzone;
+ struct iwl_mld_cooling_device cooling_dev;
+#endif
+
+ struct ptp_data ptp_data;
+
+ struct iwl_mld_time_sync_data __rcu *time_sync;
+
+ struct ftm_initiator_data ftm_initiator;
+};
+
+/* memset the part of the struct that requires cleanup on restart */
+#define CLEANUP_STRUCT(_ptr) \
+ memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \
+ sizeof((_ptr)->zeroed_on_hw_restart))
+
+/* Cleanup function for struct iwl_mld_vif, will be called in restart */
+static inline void
+iwl_cleanup_mld(struct iwl_mld *mld)
+{
+ CLEANUP_STRUCT(mld);
+ CLEANUP_STRUCT(&mld->scan);
+
+#ifdef CONFIG_PM_SLEEP
+ mld->fw_status.in_d3 = false;
+#endif
+
+ iwl_mld_low_latency_restart_cleanup(mld);
+
+ /* Empty the list of async notification handlers so we won't process
+ * notifications from the dead fw after the reconfig flow.
+ */
+ iwl_mld_purge_async_handlers_list(mld);
+}
+
+enum iwl_power_scheme {
+ IWL_POWER_SCHEME_CAM = 1,
+ IWL_POWER_SCHEME_BPS,
+};
+
+/**
+ * struct iwl_mld_mod_params - module parameters for iwlmld
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mld_mod_params {
+ int power_scheme;
+};
+
+extern struct iwl_mld_mod_params iwlmld_mod_params;
+
+/* Extract MLD priv from op_mode */
+#define IWL_OP_MODE_GET_MLD(_iwl_op_mode) \
+ ((struct iwl_mld *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MLD(_hw) \
+ IWL_OP_MODE_GET_MLD((struct iwl_op_mode *)((_hw)->priv))
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir);
+#else
+static inline void
+iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir)
+{}
+#endif
+
+int iwl_mld_load_fw(struct iwl_mld *mld);
+void iwl_mld_stop_fw(struct iwl_mld *mld);
+int iwl_mld_start_fw(struct iwl_mld *mld);
+void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags);
+
+static inline void iwl_mld_set_ctkill(struct iwl_mld *mld, bool state)
+{
+ mld->radio_kill.ct = state;
+
+ wiphy_rfkill_set_hw_state(mld->wiphy,
+ mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline void iwl_mld_set_hwkill(struct iwl_mld *mld, bool state)
+{
+ mld->radio_kill.hw = state;
+
+ wiphy_rfkill_set_hw_state(mld->wiphy,
+ mld->radio_kill.hw || mld->radio_kill.ct);
+}
+
+static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld)
+{
+ u8 tx_ant = mld->fw->valid_tx_ant;
+
+ if (mld->nvm_data && mld->nvm_data->valid_tx_ant)
+ tx_ant &= mld->nvm_data->valid_tx_ant;
+
+ return tx_ant;
+}
+
+static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld)
+{
+ u8 rx_ant = mld->fw->valid_rx_ant;
+
+ if (mld->nvm_data && mld->nvm_data->valid_rx_ant)
+ rx_ant &= mld->nvm_data->valid_rx_ant;
+
+ return rx_ant;
+}
+
+static inline u8 iwl_mld_nl80211_band_to_fw(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return PHY_BAND_24;
+ case NL80211_BAND_5GHZ:
+ return PHY_BAND_5;
+ case NL80211_BAND_6GHZ:
+ return PHY_BAND_6;
+ default:
+ WARN_ONCE(1, "Unsupported band (%u)\n", band);
+ return PHY_BAND_5;
+ }
+}
+
+static inline u8 iwl_mld_phy_band_to_nl80211(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
+static inline int
+iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ bool is_lb = band == NL80211_BAND_2GHZ;
+
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate;
+
+ /* CCK is not allowed in 5 GHz */
+ return is_lb ? rate : -1;
+}
+
+extern const struct ieee80211_ops iwl_mld_hw_ops;
+
+/**
+ * enum iwl_rx_handler_context: context for Rx handler
+ * @RX_HANDLER_SYNC: this means that it will be called in the Rx path
+ * which can't acquire the wiphy->mutex.
+ * @RX_HANDLER_ASYNC: If the handler needs to hold wiphy->mutex
+ * (and only in this case!), it should be set as ASYNC. In that case,
+ * it will be called from a worker with wiphy->mutex held.
+ */
+enum iwl_rx_handler_context {
+ RX_HANDLER_SYNC,
+ RX_HANDLER_ASYNC,
+};
+
+/**
+ * struct iwl_rx_handler: handler for FW notification
+ * @val_fn: input validation function.
+ * @sizes: an array that mapps a version to the expected size.
+ * @fn: the function is called when notification is handled
+ * @cmd_id: command id
+ * @n_sizes: number of elements in &sizes.
+ * @context: see &iwl_rx_handler_context
+ * @obj_type: the type of the object that this handler is related to.
+ * See &iwl_mld_object_type. Use IWL_MLD_OBJECT_TYPE_NONE if not related.
+ * @cancel: function to cancel the notification. valid only if obj_type is not
+ * IWL_MLD_OBJECT_TYPE_NONE.
+ */
+struct iwl_rx_handler {
+ union {
+ bool (*val_fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+ const struct iwl_notif_struct_size *sizes;
+ };
+ void (*fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+ u16 cmd_id;
+ u8 n_sizes;
+ u8 context;
+ enum iwl_mld_object_type obj_type;
+ bool (*cancel)(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+ u32 obj_id);
+};
+
+/**
+ * struct iwl_notif_struct_size: map a notif ver to the expected size
+ *
+ * @size: the size to expect
+ * @ver: the version of the notification
+ */
+struct iwl_notif_struct_size {
+ u32 size:24, ver:8;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_hcmd_arr iwl_mld_groups[];
+extern const unsigned int global_iwl_mld_goups_size;
+extern const struct iwl_rx_handler iwl_mld_rx_handlers[];
+extern const unsigned int iwl_mld_rx_handlers_num;
+
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status, int queue);
+
+void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
+ const struct iwl_cfg *cfg, const struct iwl_fw *fw,
+ struct ieee80211_hw *hw, struct dentry *dbgfs_dir);
+#endif
+
+#define IWL_MLD_INVALID_FW_ID 0xff
+
+#define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \
+static int \
+iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \
+ u8 *fw_id, \
+ struct ieee80211_##_mac80211_type *mac80211_ptr) \
+{ \
+ u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \
+ u8 arr_sz = ARRAY_SIZE(mld->fw_id_to_##_mac80211_type); \
+ if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
+ struct ieee80211_link_sta)) \
+ arr_sz = mld->fw->ucode_capa.num_stations; \
+ if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \
+ struct ieee80211_bss_conf)) \
+ arr_sz = mld->fw->ucode_capa.num_links; \
+ for (int i = 0; i < arr_sz; i++) { \
+ u8 idx = (i + rand) % arr_sz; \
+ if (rcu_access_pointer(mld->fw_id_to_##_mac80211_type[idx])) \
+ continue; \
+ IWL_DEBUG_INFO(mld, "Allocated at index %d / %d\n", idx, arr_sz); \
+ *fw_id = idx; \
+ rcu_assign_pointer(mld->fw_id_to_##_mac80211_type[idx], mac80211_ptr); \
+ return 0; \
+ } \
+ return -ENOSPC; \
+}
+
+static inline struct ieee80211_bss_conf *
+iwl_mld_fw_id_to_link_conf(struct iwl_mld *mld, u8 fw_link_id)
+{
+ if (IWL_FW_CHECK(mld, fw_link_id >= mld->fw->ucode_capa.num_links,
+ "Invalid fw_link_id: %d\n", fw_link_id))
+ return NULL;
+
+ return wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_link_id]);
+}
+
+#define MSEC_TO_TU(_msec) ((_msec) * 1000 / 1024)
+
+void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir);
+void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir);
+
+/* Utilities */
+
+static inline u8 iwl_mld_mac80211_ac_to_fw_tx_fifo(enum ieee80211_ac_numbers ac)
+{
+ static const u8 mac80211_ac_to_fw_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ };
+ return mac80211_ac_to_fw_tx_fifo[ac];
+}
+
+static inline u32
+iwl_mld_get_lmac_id(struct iwl_mld *mld, enum nl80211_band band)
+{
+ if (!fw_has_capa(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
+
+/* Check if we had an error, but reconfig flow didn't start yet */
+static inline bool iwl_mld_error_before_recovery(struct iwl_mld *mld)
+{
+ return mld->fw_status.in_hw_restart &&
+ !iwl_trans_fw_running(mld->trans);
+}
+
+int iwl_mld_tdls_sta_count(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
new file mode 100644
index 000000000000..a870e169e265
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include "mlo.h"
+#include "phy.h"
+
+/* Block reasons helper */
+#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
+ HOW(PREVENTION) \
+ HOW(WOWLAN) \
+ HOW(ROC) \
+ HOW(NON_BSS) \
+ HOW(TMP_NON_BSS) \
+ HOW(TPT)
+
+static const char *
+iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
+{
+ /* Using switch without "default" will warn about missing entries */
+ switch (blocked) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
+ HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
+#undef REASON_CASE
+ }
+
+ return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mld,
+ "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+/* Exit reasons helper */
+#define HANDLE_EMLSR_EXIT_REASONS(HOW) \
+ HOW(BLOCK) \
+ HOW(MISSED_BEACON) \
+ HOW(FAIL_ENTRY) \
+ HOW(CSA) \
+ HOW(EQUAL_BAND) \
+ HOW(LOW_RSSI) \
+ HOW(LINK_USAGE) \
+ HOW(BT_COEX) \
+ HOW(CHAN_LOAD) \
+ HOW(RFI) \
+ HOW(FW_REQUEST)
+
+static const char *
+iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
+{
+ /* Using switch without "default" will warn about missing entries */
+ switch (exit) {
+#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
+ HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
+#undef REASON_CASE
+ }
+
+ return "ERROR";
+}
+
+static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mld,
+ "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
+}
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.prevent_done_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
+ return;
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_PREVENTION);
+}
+
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.tmp_non_bss_done_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
+ return;
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
+}
+
+#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
+#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
+
+/* Exit reasons that can cause longer EMLSR prevention */
+#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
+ IWL_MLD_EMLSR_EXIT_LINK_USAGE | \
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST)
+#define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
+
+#define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
+#define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
+
+static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
+ struct iwl_mld_vif *mld_vif,
+ enum iwl_mld_emlsr_exit reason)
+{
+ unsigned long delay;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
+ IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
+ mld_vif->emlsr.last_exit_reason != reason)
+ mld_vif->emlsr.exit_repeat_count = 0;
+
+ mld_vif->emlsr.last_exit_reason = reason;
+ mld_vif->emlsr.last_exit_ts = jiffies;
+ mld_vif->emlsr.exit_repeat_count++;
+
+ /*
+ * Do not add a prevention when the reason was a block. For a block,
+ * EMLSR will be enabled again on unblock.
+ */
+ if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
+ return;
+
+ /* Set prevention for a minimum of 30 seconds */
+ mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
+ delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
+
+ /* Handle repeats for reasons that can cause long prevention */
+ if (mld_vif->emlsr.exit_repeat_count > 1 &&
+ reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
+ if (mld_vif->emlsr.exit_repeat_count == 2)
+ delay = IWL_MLD_EMLSR_PREVENT_SHORT;
+ else
+ delay = IWL_MLD_EMLSR_PREVENT_LONG;
+
+ /*
+ * The timeouts are chosen so that this will not happen, i.e.
+ * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
+ */
+ WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
+ }
+
+ IWL_DEBUG_INFO(mld,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mld_vif->emlsr.exit_repeat_count,
+ iwl_mld_get_emlsr_exit_string(reason), reason);
+
+ wiphy_delayed_work_queue(mld->wiphy,
+ &mld_vif->emlsr.prevent_done_wk, delay);
+}
+
+static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *dat)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+
+ /* It is ok to do it for all chanctx (and not only for the ones that
+ * belong to the EMLSR vif) since EMLSR is not allowed if there is
+ * another vif.
+ */
+ phy->avg_channel_load_not_by_us = 0;
+}
+
+static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
+ bool sync)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ u16 new_active_links;
+ int ret = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* On entry failure need to exit anyway, even if entered from debugfs */
+ if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
+ return 0;
+
+ /* Ignore exit request if EMLSR is not active */
+ if (!iwl_mld_emlsr_active(vif))
+ return 0;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
+ return 0;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mld,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_mld_get_emlsr_exit_string(exit), exit,
+ vif->active_links, new_active_links);
+
+ if (sync)
+ ret = ieee80211_set_active_links(vif, new_active_links);
+ else
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Update latest exit reason and check EMLSR prevention */
+ iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
+
+ /* channel_load_not_by_us is invalid when in EMLSR.
+ * Clear it so wrong values won't be used.
+ */
+ ieee80211_iter_chan_contexts_atomic(mld->hw,
+ iwl_mld_clear_avg_chan_load_iter,
+ NULL);
+
+ return ret;
+}
+
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
+{
+ _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
+}
+
+static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason,
+ u8 link_to_keep, bool sync)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+ return 0;
+
+ if (mld_vif->emlsr.blocked_reasons & reason)
+ return 0;
+
+ mld_vif->emlsr.blocked_reasons |= reason;
+
+ IWL_DEBUG_INFO(mld,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_mld_get_emlsr_blocked_string(reason), reason);
+ iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+ if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+ wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk);
+
+ return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
+ link_to_keep, sync);
+}
+
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+ _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
+}
+
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
+{
+ return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ if (!(mld_vif->emlsr.blocked_reasons & reason))
+ return;
+
+ mld_vif->emlsr.blocked_reasons &= ~reason;
+
+ IWL_DEBUG_INFO(mld,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_mld_get_emlsr_blocked_string(reason), reason);
+ iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
+
+ if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+
+ if (mld_vif->emlsr.blocked_reasons)
+ return;
+
+ IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
+ iwl_mld_int_mlo_scan(mld, vif);
+}
+
+static void
+iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_esr_mode_notif *notif = (void *)data;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ switch (le32_to_cpu(notif->action)) {
+ case ESR_RECOMMEND_LEAVE:
+ iwl_mld_exit_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_EXIT_FW_REQUEST,
+ iwl_mld_get_primary_link(vif));
+ break;
+ case ESR_RECOMMEND_ENTER:
+ case ESR_FORCE_LEAVE:
+ default:
+ IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
+ le32_to_cpu(notif->action));
+ }
+}
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_mode_notif,
+ pkt->data);
+}
+
+static void
+iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (!iwl_mld_vif_has_emlsr_cap(vif))
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
+ u32 fw_link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *bss_conf =
+ iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+ IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
+ le32_to_cpu(notif->activation) ? "enter" : "exit",
+ bss_conf ? bss_conf->link_id : -1,
+ le32_to_cpu(notif->link_id),
+ le32_to_cpu(notif->err_code));
+
+ if (IWL_FW_CHECK(mld, !bss_conf,
+ "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
+ le32_to_cpu(notif->activation) ? "" : "de",
+ fw_link_id)) {
+ ieee80211_iterate_active_interfaces_mtx(
+ mld->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_disconnect_emlsr, NULL);
+ return;
+ }
+
+ /* Disconnect if we failed to deactivate a link */
+ if (!le32_to_cpu(notif->activation)) {
+ ieee80211_connection_loss(bss_conf->vif);
+ return;
+ }
+
+ /*
+ * We failed to activate the second link, go back to the link specified
+ * by the firmware as that is the one that is still valid now.
+ */
+ iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
+ bss_conf->link_id);
+}
+
+/* Active non-station link tracking */
+static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int *count = _data;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ *count += iwl_mld_count_active_links(mld_vif->mld, vif);
+}
+
+struct iwl_mld_update_emlsr_block_data {
+ bool block;
+ int result;
+};
+
+static void
+iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_update_emlsr_block_data *data = _data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ if (data->block) {
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ data->result = ret;
+ } else {
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_NON_BSS);
+ }
+}
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+ int pending_link_changes)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct iwl_mld_update_emlsr_block_data block_data = {};
+ int count = pending_link_changes;
+
+ /* No need to count if we are activating a non-BSS link */
+ if (count <= 0)
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_count_non_bss_links,
+ &count);
+
+ /*
+ * We could skip updating it if the block change did not change (and
+ * pending_link_changes is non-zero).
+ */
+ block_data.block = !!count;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_update_emlsr_non_bss_block,
+ &block_data);
+
+ return block_data.result;
+}
+
+#define EMLSR_SEC_LINK_MIN_PERC 10
+#define EMLSR_MIN_TX 3000
+#define EMLSR_MIN_RX 400
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.check_tpt_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link *sec_link;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ s8 sec_link_id;
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
+
+ /* We only count for the AP sta in a MLO connection */
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ /* This wk should only run when the TPT blocker isn't set.
+ * When the blocker is set, the decision to remove it, as well as
+ * clearing the counters is done in DP (to avoid having a wk every
+ * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
+ */
+ if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+ return;
+ /*
+ * TPT is unblocked, need to check if the TPT criteria is still met.
+ *
+ * If EMLSR is active, then we also need to check the secondar link
+ * requirements.
+ */
+ if (iwl_mld_emlsr_active(vif)) {
+ sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
+ sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
+ if (WARN_ON_ONCE(!sec_link))
+ return;
+ /* We need the FW ID here */
+ sec_link_id = sec_link->fw_id;
+ } else {
+ sec_link_id = -1;
+ }
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mld->trans->num_rx_queues; q++) {
+ struct iwl_mld_per_q_mpdu_counter *queue_counter =
+ &mld_sta->mpdu_counters[q];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0;
+ link < ARRAY_SIZE(queue_counter->per_link);
+ link++) {
+ total_tx += queue_counter->per_link[link].tx;
+ total_rx += queue_counter->per_link[link].rx;
+ }
+
+ if (sec_link_id != -1) {
+ sec_link_tx += queue_counter->per_link[sec_link_id].tx;
+ sec_link_rx += queue_counter->per_link[sec_link_id].rx;
+ }
+
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+
+ spin_unlock_bh(&queue_counter->lock);
+ }
+
+ IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
+ total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
+ iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+
+ /* EMLSR is not active */
+ if (sec_link_id == -1)
+ return;
+
+ IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
+ sec_link_id, sec_link_tx, sec_link_rx);
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > EMLSR_MIN_TX &&
+ sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
+ (total_rx > EMLSR_MIN_RX &&
+ sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+
+ /* Check again when the next window ends */
+ wiphy_delayed_work_queue(mld_vif->mld->wiphy,
+ &mld_vif->emlsr.check_tpt_wk,
+ round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
+}
+
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
+ emlsr.unblock_tpt_wk);
+ struct ieee80211_vif *vif =
+ container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
+}
+
+/*
+ * Link selection
+ */
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+#define RSSI_THRESHOLD(_low, _bw) \
+ (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
+ : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ /* 320 MHz has the same thresholds as 20 MHz */
+ case NL80211_CHAN_WIDTH_320:
+ return RSSI_THRESHOLD(low, 20);
+ case NL80211_CHAN_WIDTH_40:
+ return RSSI_THRESHOLD(low, 40);
+ case NL80211_CHAN_WIDTH_80:
+ return RSSI_THRESHOLD(low, 80);
+ case NL80211_CHAN_WIDTH_160:
+ return RSSI_THRESHOLD(low, 160);
+ default:
+ WARN_ON(1);
+ return S8_MAX;
+ }
+#undef RSSI_THRESHOLD
+}
+
+static u32
+iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mld->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mld_emlsr_exit ret = 0;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
+ ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
+
+ if (link->signal <
+ iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
+ ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MLD_EMLSR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mld,
+ "Link %d is not allowed for EMLSR as %s\n",
+ link->link_id,
+ primary ? "primary" : "secondary");
+ iwl_mld_print_emlsr_exit(mld, ret);
+ }
+
+ return ret;
+}
+
+static u8
+iwl_mld_set_link_sel_data(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /*
+ * TODO: don't select links that weren't discovered in the last scan
+ * This requires mac80211 (or cfg80211) changes to forward/track when
+ * a BSS was last updated. cfg80211 already tracks this information but
+ * it is not exposed within the kernel.
+ */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* Ignore any BSS that was not seen in the last MLO scan */
+ if (ktime_before(link_conf->bss->ts_boottime,
+ mld->scan.last_mlo_scan_time))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
+ data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
+
+ if (n_data == 0 || data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+static u32
+iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
+{
+ const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
+
+ switch (phy->chandef.width) {
+ case NL80211_CHAN_WIDTH_320:
+ case NL80211_CHAN_WIDTH_160:
+ return 5;
+ case NL80211_CHAN_WIDTH_80:
+ return 7;
+ default:
+ break;
+ }
+ return 10;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT bool
+iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_link_sel_data *a,
+ const struct iwl_mld_link_sel_data *b)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link_a =
+ iwl_mld_link_dereference_check(mld_vif, a->link_id);
+ struct ieee80211_chanctx_conf *chanctx_a = NULL;
+ u32 bw_a, bw_b, ratio;
+ u32 primary_load_perc;
+
+ if (!link_a || !link_a->active) {
+ IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
+ return false;
+ }
+
+ chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
+
+ if (WARN_ON(!chanctx_a))
+ return false;
+
+ primary_load_perc =
+ iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
+
+ IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
+
+ if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
+ IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
+ return false;
+ }
+
+ if (iwl_mld_vif_low_latency(mld_vif)) {
+ IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
+ return true;
+ }
+
+ if (a->chandef->width <= b->chandef->width)
+ return true;
+
+ bw_a = nl80211_chan_width_to_mhz(a->chandef->width);
+ bw_b = nl80211_chan_width_to_mhz(b->chandef->width);
+ ratio = bw_a / bw_b;
+
+ switch (ratio) {
+ case 2:
+ return primary_load_perc > 25;
+ case 4:
+ return primary_load_perc > 40;
+ case 8:
+ case 16:
+ return primary_load_perc > 50;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr);
+
+static bool
+iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ u32 reason_mask = 0;
+
+ /* Per-link considerations */
+ if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
+ iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
+ return false;
+
+ if (a->chandef->chan->band == b->chandef->chan->band)
+ reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
+ if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
+ reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
+
+ if (reason_mask) {
+ IWL_DEBUG_INFO(mld,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ IWL_DEBUG_INFO(mld,
+ "Links bandwidth are: %d and %d\n",
+ nl80211_chan_width_to_mhz(a->chandef->width),
+ nl80211_chan_width_to_mhz(b->chandef->width));
+ iwl_mld_print_emlsr_exit(mld, reason_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* Calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/*
+ * Returns the combined grade of two given links.
+ * Returns 0 if EMLSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_link_sel_data *a,
+ struct iwl_mld_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mld_valid_emlsr_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mld_get_chan_load(mld, primary_conf);
+
+ /* The more the primary link is loaded, the more worthwhile EMLSR becomes */
+ return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+static void _iwl_mld_select_links(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mld_link_sel_data *best_link;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int max_active_links = iwl_mld_max_active_links(mld, vif);
+ u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
+ u8 best_idx, new_primary, n_data;
+ u16 max_grade;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!mld_vif->authorized || hweight16(usable_links) <= 1)
+ return;
+
+ if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
+ ktime_sub_ns(ktime_get_boottime_ns(),
+ 5ULL * NSEC_PER_SEC)),
+ "Last MLO scan was too long ago, can't select links\n"))
+ return;
+
+ /* The logic below is simple and not suited for more than 2 links */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
+ &best_idx);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ /* Default to selecting the single best link */
+ best_link = &data[best_idx];
+ new_primary = best_link->link_id;
+ new_active = BIT(best_link->link_id);
+ max_grade = best_link->grade;
+
+ /* If EMLSR is not possible, activate the best link */
+ if (max_active_links == 1 || n_data == 1 ||
+ !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
+ mld_vif->emlsr.blocked_reasons)
+ goto set_active;
+
+ /* Try to find the best link combination */
+ for (u8 a = 0; a < n_data; a++) {
+ for (u8 b = a + 1; b < n_data; b++) {
+ u8 best_in_pair;
+ u16 emlsr_grade =
+ iwl_mld_get_emlsr_grade(mld, vif,
+ &data[a], &data[b],
+ &best_in_pair);
+
+ /*
+ * Prefer (new) EMLSR combination to prefer EMLSR over
+ * a single link.
+ */
+ if (emlsr_grade < max_grade)
+ continue;
+
+ max_grade = emlsr_grade;
+ new_primary = best_in_pair;
+ new_active = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+ }
+
+set_active:
+ IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
+ new_active, new_primary);
+
+ mld_vif->emlsr.selected_primary = new_primary;
+ mld_vif->emlsr.selected_links = new_active;
+
+ ieee80211_set_active_links_async(vif, new_active);
+}
+
+static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+
+ _iwl_mld_select_links(mld, vif);
+}
+
+void iwl_mld_select_links(struct iwl_mld *mld)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_select_links,
+ NULL);
+}
+
+static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld *mld = mld_vif->mld;
+ struct ieee80211_bss_conf *link;
+ unsigned int link_id;
+
+ if (!mld->bt_is_active) {
+ iwl_mld_retry_emlsr(mld, vif);
+ return;
+ }
+
+ /* BT is turned ON but we are not in EMLSR, nothing to do */
+ if (!iwl_mld_emlsr_active(vif))
+ return;
+
+ /* In EMLSR and BT is turned ON */
+
+ for_each_vif_active_link(vif, link, link_id) {
+ if (WARN_ON(!link->chanreq.oper.chan))
+ continue;
+
+ if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
+ iwl_mld_get_primary_link(vif));
+ return;
+ }
+ }
+}
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
+{
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_emlsr_check_bt_iter,
+ NULL);
+}
+
+struct iwl_mld_chan_load_data {
+ struct iwl_mld_phy *phy;
+ u32 prev_chan_load_not_by_us;
+};
+
+static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_chan_load_data *data = _data;
+ const struct iwl_mld_phy *phy = data->phy;
+ struct ieee80211_chanctx_conf *chanctx =
+ container_of((const void *)phy, struct ieee80211_chanctx_conf,
+ drv_priv);
+ struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
+ struct ieee80211_bss_conf *prim_link;
+ unsigned int prim_link_id;
+
+ prim_link_id = iwl_mld_get_primary_link(vif);
+ prim_link = link_conf_dereference_protected(vif, prim_link_id);
+
+ if (WARN_ON(!prim_link))
+ return;
+
+ if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
+ return;
+
+ if (iwl_mld_emlsr_active(vif)) {
+ int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
+ true);
+
+ if (chan_load < 0)
+ return;
+
+ /* chan_load is in range [0,255] */
+ if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
+ iwl_mld_exit_emlsr(mld, vif,
+ IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
+ prim_link_id);
+ } else {
+ u32 old_chan_load = data->prev_chan_load_not_by_us;
+ u32 new_chan_load = phy->avg_channel_load_not_by_us;
+ u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
+
+#define THRESHOLD_CROSSED(threshold) \
+ (old_chan_load <= (threshold) && new_chan_load > (threshold))
+
+ if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
+ THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
+ iwl_mld_retry_emlsr(mld, vif);
+#undef THRESHOLD_CROSSED
+ }
+}
+
+void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
+ struct iwl_mld_phy *phy,
+ u32 prev_chan_load_not_by_us)
+{
+ struct iwl_mld_chan_load_data data = {
+ .phy = phy,
+ .prev_chan_load_not_by_us = prev_chan_load_not_by_us,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_chan_load_update_iter,
+ &data);
+}
+
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
+ mld_vif->emlsr.blocked_reasons)
+ return;
+
+ iwl_mld_int_mlo_scan(mld, vif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
new file mode 100644
index 000000000000..4fb1fdbe3df9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_mlo_h__
+#define __iwl_mld_mlo_h__
+
+#include <linux/ieee80211.h>
+#include <linux/types.h>
+#include <net/mac80211.h>
+#include "iwl-config.h"
+#include "iwl-trans.h"
+#include "iface.h"
+#include "phy.h"
+
+struct iwl_mld;
+
+void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk);
+
+static inline bool iwl_mld_emlsr_active(struct ieee80211_vif *vif)
+{
+ /* Set on phy context activation, so should be a good proxy */
+ return !!(vif->driver_flags & IEEE80211_VIF_EML_ACTIVE);
+}
+
+static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* We only track/permit EMLSR state once authorized */
+ if (!mld_vif->authorized)
+ return false;
+
+ /* No EMLSR on dual radio devices */
+ return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION &&
+ ieee80211_vif_is_mld(vif) &&
+ vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP &&
+ !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id);
+}
+
+static inline int
+iwl_mld_max_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_AP)
+ return mld->fw->ucode_capa.num_beacons;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return IWL_FW_MAX_ACTIVE_LINKS_NUM;
+
+ /* For now, do not accept more links on other interface types */
+ return 1;
+}
+
+static inline int
+iwl_mld_count_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *mld_link;
+ int n_active = 0;
+
+ for_each_mld_vif_valid_link(mld_vif, mld_link) {
+ if (rcu_access_pointer(mld_link->chan_ctx))
+ n_active++;
+ }
+
+ return n_active;
+}
+
+static inline u8 iwl_mld_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ lockdep_assert_wiphy(mld_vif->mld->wiphy);
+
+ if (!ieee80211_vif_is_mld(vif) || WARN_ON(!vif->active_links))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (iwl_mld_emlsr_active(vif) &&
+ !WARN_ON(!(BIT(mld_vif->emlsr.primary) & vif->active_links)))
+ return mld_vif->emlsr.primary;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+static inline u8 iwl_mld_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+
+/* EMLSR block/unblock and exit */
+void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason, u8 link_to_keep);
+void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_blocked reason);
+void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ enum iwl_mld_emlsr_exit exit, u8 link_to_keep);
+
+int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
+ int pending_link_changes);
+
+void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_select_links(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_bt(struct iwl_mld *mld);
+
+void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
+ struct iwl_mld_phy *phy,
+ u32 prev_chan_load_not_by_us);
+
+/**
+ * iwl_mld_retry_emlsr - Retry entering EMLSR
+ * @mld: MLD context
+ * @vif: VIF to retry EMLSR on
+ *
+ * Retry entering EMLSR on the given VIF.
+ * Use this if one of the parameters that can prevent EMLSR has changed.
+ */
+void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+struct iwl_mld_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ const struct iwl_mld_link_sel_data *a,
+ const struct iwl_mld_link_sel_data *b);
+#endif
+
+#endif /* __iwl_mld_mlo_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
new file mode 100644
index 000000000000..fc18cba8aaa8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "notif.h"
+#include "scan.h"
+#include "iface.h"
+#include "mlo.h"
+#include "iwl-trans.h"
+#include "fw/file.h"
+#include "fw/dbg.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/mac-cfg.h"
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/offload.h"
+#include "fw/api/stats.h"
+#include "fw/api/rfi.h"
+#include "fw/api/coex.h"
+
+#include "mcc.h"
+#include "link.h"
+#include "tx.h"
+#include "rx.h"
+#include "tlc.h"
+#include "agg.h"
+#include "mac80211.h"
+#include "thermal.h"
+#include "roc.h"
+#include "stats.h"
+#include "coex.h"
+#include "time_sync.h"
+#include "ftm-initiator.h"
+
+/* Please use this in an increasing order of the versions */
+#define CMD_VER_ENTRY(_ver, _struct) \
+ { .size = sizeof(struct _struct), .ver = _ver },
+#define CMD_VERSIONS(name, ...) \
+ static const struct iwl_notif_struct_size \
+ iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ };
+
+#define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ .sizes = iwl_notif_struct_sizes_##_name, \
+ .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
+ },
+
+/* Use this for Rx handlers that do not need notification validation */
+#define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ },
+
+#define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), \
+ .context = _context, \
+ .fn = iwl_mld_handle_##_name, \
+ .val_fn = iwl_mld_validate_##_name, \
+ },
+
+#define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member) \
+static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \
+ struct iwl_rx_packet *pkt, \
+ u32 obj_id) \
+{ \
+ const struct notif_struct *notif = (const void *)pkt->data; \
+ \
+ return obj_id == _Generic((notif)->id_member, \
+ __le32: le32_to_cpu((notif)->id_member), \
+ __le16: le16_to_cpu((notif)->id_member), \
+ u8: (notif)->id_member); \
+}
+
+static bool iwl_mld_always_cancel(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u32 obj_id)
+{
+ return true;
+}
+
+/* Currently only defined for the RX_HANDLER_SIZES options. Use this for
+ * notifications that belong to a specific object, and that should be
+ * canceled when the object is removed
+ */
+#define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type) \
+ {.cmd_id = WIDE_ID(_grp, _cmd), \
+ /* Only async handlers can be canceled */ \
+ .context = RX_HANDLER_ASYNC, \
+ .fn = iwl_mld_handle_##_name, \
+ .sizes = iwl_notif_struct_sizes_##_name, \
+ .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \
+ .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type, \
+ .cancel = iwl_mld_cancel_##_name, \
+ },
+
+#define RX_HANDLER_OF_LINK(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK) \
+
+#define RX_HANDLER_OF_VIF(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF) \
+
+#define RX_HANDLER_OF_STA(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA) \
+
+#define RX_HANDLER_OF_ROC(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC)
+
+#define RX_HANDLER_OF_SCAN(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN)
+
+#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \
+ RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ)
+
+static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+ IWL_DEBUG_INFO(mld,
+ "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->installed_ver),
+ le32_to_cpu(mfuart_notif->external_ver));
+ IWL_DEBUG_INFO(mld,
+ "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n",
+ le32_to_cpu(mfuart_notif->status),
+ le32_to_cpu(mfuart_notif->duration),
+ le32_to_cpu(mfuart_notif->image_size));
+}
+
+static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ unsigned int link_id = 0;
+
+ if (WARN(hweight16(vif->active_links) > 1,
+ "no support for this notif while in EMLSR 0x%x\n",
+ vif->active_links))
+ return;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ link_id = __ffs(vif->active_links);
+ bss_conf = link_conf_dereference_check(vif, link_id);
+ }
+
+ if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) {
+ const struct iwl_mu_group_mgmt_notif *notif = _data;
+
+ BUILD_BUG_ON(sizeof(notif->membership_status) !=
+ WLAN_MEMBERSHIP_LEN);
+ BUILD_BUG_ON(sizeof(notif->user_position) !=
+ WLAN_USER_POSITION_LEN);
+
+ /* MU-MIMO Group Id action frame is little endian. We treat
+ * the data received from firmware as if it came from the
+ * action frame, so no conversion is needed.
+ */
+ ieee80211_update_mu_groups(vif, link_id,
+ (u8 *)&notif->membership_status,
+ (u8 *)&notif->user_position);
+ }
+}
+
+/* This handler is called in SYNC mode because it needs to be serialized with
+ * Rx as specified in ieee80211_update_mu_groups()'s documentation.
+ */
+static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+ ieee80211_iterate_active_interfaces_atomic(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_mu_mimo_iface_iterator,
+ notif);
+}
+
+static void
+iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+ struct ieee80211_rx_status rx_status = {};
+ struct sk_buff *skb;
+ u32 size = le32_to_cpu(sb->common.byte_count);
+
+ if (size == 0)
+ return;
+
+ if (pkt_len < struct_size(sb, data, size))
+ return;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ /* update rx_status according to the notification's metadata */
+ rx_status.mactime = le64_to_cpu(sb->common.tsf);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+ rx_status.device_timestamp = le32_to_cpu(sb->common.system_time);
+ rx_status.band =
+ iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band));
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel),
+ rx_status.band);
+
+ /* copy the data */
+ skb_put_data(skb, sb->data, size);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ /* pass it as regular rx to mac80211 */
+ ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+static void
+iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
+ u32 link_id = le32_to_cpu(notif->link_id);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, link_id);
+ struct ieee80211_vif *vif;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+
+ IWL_DEBUG_INFO(mld,
+ "CSA Start Notification with vif type: %d, link_id: %d\n",
+ vif->type,
+ link_conf->link_id);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ /* We don't support canceling a CSA as it was advertised
+ * by the AP itself
+ */
+ if (!link_conf->csa_active)
+ return;
+
+ ieee80211_csa_finish(vif, link_conf->link_id);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!link_conf->csa_active) {
+ /* Either unexpected cs notif or mac80211 chose to
+ * ignore, for example in channel switch to same channel
+ */
+ struct iwl_cancel_channel_switch_cmd cmd = {
+ .id = cpu_to_le32(link_id),
+ };
+
+ if (iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP,
+ CANCEL_CHANNEL_SWITCH_CMD),
+ &cmd))
+ IWL_ERR(mld,
+ "Failed to cancel the channel switch\n");
+ return;
+ }
+
+ ieee80211_chswitch_done(vif, true, link_conf->link_id);
+ break;
+
+ default:
+ WARN(1, "CSA on invalid vif type: %d", vif->type);
+ }
+}
+
+static void
+iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_channel_switch_error_notif *notif = (void *)pkt->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_vif *vif;
+ u32 link_id = le32_to_cpu(notif->link_id);
+ u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask);
+
+ link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id);
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+
+ IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n",
+ link_id, csa_err_mask);
+
+ if (csa_err_mask & (CS_ERR_COUNT_ERROR |
+ CS_ERR_LONG_DELAY_AFTER_CS |
+ CS_ERR_TX_BLOCK_TIMER_EXPIRED))
+ ieee80211_channel_switch_disconnect(vif);
+}
+
+static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+
+ mld->ibss_manager = !!beacon->ibss_mgr_status;
+}
+
+/**
+ * DOC: Notification versioning
+ *
+ * The firmware's notifications change from time to time. In order to
+ * differentiate between different versions of the same notification, the
+ * firmware advertises the version of each notification.
+ * Here are listed all the notifications that are supported. Several versions
+ * of the same notification can be allowed at the same time:
+ *
+ * CMD_VERSION(my_multi_version_notif,
+ * CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1)
+ * CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2)
+ *
+ * etc...
+ *
+ * The driver will enforce that the notification coming from the firmware
+ * has its version listed here and it'll also enforce that the firmware sent
+ * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY.
+ */
+
+CMD_VERSIONS(scan_complete_notif,
+ CMD_VER_ENTRY(1, iwl_umac_scan_complete))
+CMD_VERSIONS(scan_iter_complete_notif,
+ CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif))
+CMD_VERSIONS(mfuart_notif,
+ CMD_VER_ENTRY(2, iwl_mfuart_load_notif))
+CMD_VERSIONS(update_mcc,
+ CMD_VER_ENTRY(1, iwl_mcc_chub_notif))
+CMD_VERSIONS(session_prot_notif,
+ CMD_VER_ENTRY(3, iwl_session_prot_notif))
+CMD_VERSIONS(missed_beacon_notif,
+ CMD_VER_ENTRY(5, iwl_missed_beacons_notif))
+CMD_VERSIONS(tx_resp_notif,
+ CMD_VER_ENTRY(8, iwl_tx_resp))
+CMD_VERSIONS(compressed_ba_notif,
+ CMD_VER_ENTRY(5, iwl_compressed_ba_notif)
+ CMD_VER_ENTRY(6, iwl_compressed_ba_notif))
+CMD_VERSIONS(tlc_notif,
+ CMD_VER_ENTRY(3, iwl_tlc_update_notif))
+CMD_VERSIONS(mu_mimo_grp_notif,
+ CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif))
+CMD_VERSIONS(channel_switch_start_notif,
+ CMD_VER_ENTRY(3, iwl_channel_switch_start_notif))
+CMD_VERSIONS(channel_switch_error_notif,
+ CMD_VER_ENTRY(2, iwl_channel_switch_error_notif))
+CMD_VERSIONS(ct_kill_notif,
+ CMD_VER_ENTRY(2, ct_kill_notif))
+CMD_VERSIONS(temp_notif,
+ CMD_VER_ENTRY(2, iwl_dts_measurement_notif))
+CMD_VERSIONS(stored_beacon_notif,
+ CMD_VER_ENTRY(4, iwl_stored_beacon_notif))
+CMD_VERSIONS(roc_notif,
+ CMD_VER_ENTRY(1, iwl_roc_notif))
+CMD_VERSIONS(probe_resp_data_notif,
+ CMD_VER_ENTRY(1, iwl_probe_resp_data_notif))
+CMD_VERSIONS(datapath_monitor_notif,
+ CMD_VER_ENTRY(1, iwl_datapath_monitor_notif))
+CMD_VERSIONS(stats_oper_notif,
+ CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper))
+CMD_VERSIONS(stats_oper_part1_notif,
+ CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper))
+CMD_VERSIONS(bt_coex_notif,
+ CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif))
+CMD_VERSIONS(beacon_notification,
+ CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
+CMD_VERSIONS(emlsr_mode_notif,
+ CMD_VER_ENTRY(1, iwl_esr_mode_notif))
+CMD_VERSIONS(emlsr_trans_fail_notif,
+ CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
+CMD_VERSIONS(uapsd_misbehaving_ap_notif,
+ CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif))
+CMD_VERSIONS(time_msmt_notif,
+ CMD_VER_ENTRY(1, iwl_time_msmt_notify))
+CMD_VERSIONS(time_sync_confirm_notif,
+ CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify))
+CMD_VERSIONS(omi_status_notif,
+ CMD_VER_ENTRY(1, iwl_omi_send_status_notif))
+CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy))
+
+DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id)
+DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_start,
+ iwl_channel_switch_start_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(channel_switch_error,
+ iwl_channel_switch_error_notif, link_id)
+DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif,
+ link_id)
+DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity)
+DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid)
+DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif,
+ mac_id)
+DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif,
+ mac_id)
+#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel
+DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id)
+
+/**
+ * DOC: Handlers for fw notifications
+ *
+ * Here are listed the notifications IDs (including the group ID), the handler
+ * of the notification and how it should be called:
+ *
+ * - RX_HANDLER_SYNC: will be called as part of the Rx path
+ * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
+ *
+ * This means that if the firmware sends two notifications A and B in that
+ * order and notification A is RX_HANDLER_ASYNC and notification is
+ * RX_HANDLER_SYNC, the handler of B will likely be called before the handler
+ * of A.
+ *
+ * This list should be in order of frequency for performance purposes.
+ * The handler can be one from two contexts, see &iwl_rx_handler_context
+ *
+ * A handler can declare that it relies on a specific object in which case it
+ * can be cancelled in case the object is deleted. In order to use this
+ * mechanism, a cancellation function is needed. The cancellation function must
+ * receive an object id (the index of that object in the firmware) and a
+ * notification payload. It'll return true if that specific notification should
+ * be cancelled upon the obliteration of the specific instance of the object.
+ *
+ * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function
+ * that wills simply return true if a given object id matches the object id in
+ * the firmware notification.
+ */
+
+VISIBLE_IF_IWLWIFI_KUNIT
+const struct iwl_rx_handler iwl_mld_rx_handlers[] = {
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC,
+ scan_complete_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC,
+ scan_iter_complete_notif,
+ RX_HANDLER_SYNC)
+ RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION,
+ match_found_notif, RX_HANDLER_SYNC)
+
+ RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
+ stats_oper_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
+ stats_oper_part1_notif, RX_HANDLER_ASYNC)
+
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION,
+ mfuart_notif, RX_HANDLER_SYNC)
+
+ RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+ temp_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
+ session_prot_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF,
+ missed_beacon_notif)
+ RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
+ channel_switch_start_notif)
+ RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
+ channel_switch_error_notif)
+ RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif)
+ RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+ mu_mimo_grp_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+ stored_beacon_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
+ probe_resp_data_notif)
+ RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+ ct_kill_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF,
+ datapath_monitor_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc,
+ RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF,
+ bt_coex_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION,
+ beacon_notification, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ emlsr_mode_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF,
+ emlsr_trans_fail_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+ uapsd_misbehaving_ap_notif)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
+ time_msmt_notif, RX_HANDLER_SYNC)
+ RX_HANDLER_NO_OBJECT(LEGACY_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
+ time_sync_confirm_notif, RX_HANDLER_ASYNC)
+ RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF,
+ omi_status_notif)
+ RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+ ftm_resp_notif)
+};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num);
+#endif
+
+static bool
+iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt,
+ const struct iwl_rx_handler *handler)
+{
+ unsigned int size = iwl_rx_packet_payload_len(pkt);
+ size_t notif_ver;
+
+ /* If n_sizes == 0, it indicates that a validation function may be used
+ * or that no validation is required.
+ */
+ if (!handler->n_sizes) {
+ if (handler->val_fn)
+ return handler->val_fn(mld, pkt);
+ return true;
+ }
+
+ notif_ver = iwl_fw_lookup_notif_ver(mld->fw,
+ iwl_cmd_groupid(handler->cmd_id),
+ iwl_cmd_opcode(handler->cmd_id),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ for (int i = 0; i < handler->n_sizes; i++) {
+ if (handler->sizes[i].ver != notif_ver)
+ continue;
+
+ if (IWL_FW_CHECK(mld, size < handler->sizes[i].size,
+ "unexpected notification 0x%04x size %d, need %d\n",
+ handler->cmd_id, size, handler->sizes[i].size))
+ return false;
+ return true;
+ }
+
+ IWL_FW_CHECK_FAILED(mld,
+ "notif 0x%04x ver %zu missing expected size, use version %u size\n",
+ handler->cmd_id, notif_ver,
+ handler->sizes[handler->n_sizes - 1].ver);
+
+ return size < handler->sizes[handler->n_sizes - 1].size;
+}
+
+struct iwl_async_handler_entry {
+ struct list_head list;
+ struct iwl_rx_cmd_buffer rxb;
+ const struct iwl_rx_handler *rx_h;
+};
+
+static void
+iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+ IWL_DEBUG_HC(mld,
+ "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n",
+ op, iwl_get_cmd_string(mld->trans,
+ WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
+ pkt->hdr.group_id, pkt->hdr.cmd,
+ le16_to_cpu(pkt->hdr.sequence));
+}
+
+static void iwl_mld_rx_notif(struct iwl_mld *mld,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_rx_packet *pkt)
+{
+ for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) {
+ const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i];
+ struct iwl_async_handler_entry *entry;
+
+ if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+ continue;
+
+ if (!iwl_mld_notif_is_valid(mld, pkt, rx_h))
+ return;
+
+ if (rx_h->context == RX_HANDLER_SYNC) {
+ rx_h->fn(mld, pkt);
+ break;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ /* we can't do much... */
+ if (!entry)
+ return;
+
+ /* Set the async handler entry */
+ entry->rxb._page = rxb_steal_page(rxb);
+ entry->rxb._offset = rxb->_offset;
+ entry->rxb._rx_page_order = rxb->_rx_page_order;
+
+ entry->rx_h = rx_h;
+
+ /* Add it to the list and queue the work */
+ spin_lock(&mld->async_handlers_lock);
+ list_add_tail(&entry->list, &mld->async_handlers_list);
+ spin_unlock(&mld->async_handlers_lock);
+
+ wiphy_work_queue(mld->hw->wiphy,
+ &mld->async_handlers_wk);
+
+ iwl_mld_log_async_handler_op(mld, "Queued", rxb);
+ break;
+ }
+
+ iwl_notification_wait_notify(&mld->notif_wait, pkt);
+}
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mld_rx_mpdu(mld, napi, rxb, 0);
+ else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+ iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0);
+ else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
+ iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0);
+ else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0);
+ else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
+ iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0);
+ else
+ iwl_mld_rx_notif(mld, rxb, pkt);
+}
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, unsigned int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
+ u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ if (unlikely(queue >= mld->trans->num_rx_queues))
+ return;
+
+ if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+ iwl_mld_rx_mpdu(mld, napi, rxb, queue);
+ else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP,
+ RX_QUEUES_NOTIFICATION)))
+ iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue);
+ else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+ iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue);
+}
+
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ bool match = false;
+
+ for (int i = 0; i < n_cmds; i++) {
+ if (entry->rx_h->cmd_id == cmds[i]) {
+ match = true;
+ break;
+ }
+ }
+
+ if (!match)
+ continue;
+
+ iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld =
+ container_of(wk, struct iwl_mld, async_handlers_wk);
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(local_list);
+
+ /* Sync with Rx path with a lock. Remove all the entries from this
+ * list, add them to a local one (lock free), and then handle them.
+ */
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_splice_init(&mld->async_handlers_list, &local_list);
+ spin_unlock_bh(&mld->async_handlers_lock);
+
+ list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb);
+ entry->rx_h->fn(mld, rxb_addr(&entry->rxb));
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb);
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ spin_unlock_bh(&mld->async_handlers_lock);
+}
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+ enum iwl_mld_object_type obj_type,
+ u32 obj_id)
+{
+ struct iwl_async_handler_entry *entry, *tmp;
+ LIST_HEAD(cancel_list);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE))
+ return;
+
+ /* Sync with RX path and remove matching entries from the async list */
+ spin_lock_bh(&mld->async_handlers_lock);
+ list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) {
+ const struct iwl_rx_handler *rx_h = entry->rx_h;
+
+ if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel))
+ continue;
+
+ if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) {
+ iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb);
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &cancel_list);
+ }
+ }
+
+ spin_unlock_bh(&mld->async_handlers_lock);
+
+ /* Free the matching entries outside of the spinlock */
+ list_for_each_entry_safe(entry, tmp, &cancel_list, list) {
+ iwl_free_rxb(&entry->rxb);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
new file mode 100644
index 000000000000..2eaa1d4e138e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_notif_h__
+#define __iwl_mld_notif_h__
+
+struct iwl_mld;
+
+void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb);
+
+void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
+
+void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+
+void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld);
+
+enum iwl_mld_object_type {
+ IWL_MLD_OBJECT_TYPE_NONE,
+ IWL_MLD_OBJECT_TYPE_LINK,
+ IWL_MLD_OBJECT_TYPE_STA,
+ IWL_MLD_OBJECT_TYPE_VIF,
+ IWL_MLD_OBJECT_TYPE_ROC,
+ IWL_MLD_OBJECT_TYPE_SCAN,
+ IWL_MLD_OBJECT_TYPE_FTM_REQ,
+};
+
+void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld,
+ enum iwl_mld_object_type obj_type,
+ u32 obj_id);
+void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds);
+
+#endif /* __iwl_mld_notif_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
new file mode 100644
index 000000000000..2fbc8090088b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "phy.h"
+#include "hcmd.h"
+#include "fw/api/phy-ctxt.h"
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld)
+{
+ int id;
+ unsigned long used = mld->used_phy_ids;
+
+ for_each_clear_bit(id, &used, NUM_PHY_CTX) {
+ mld->used_phy_ids |= BIT(id);
+ return id;
+ }
+
+ return -ENOSPC;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_allocate_fw_phy_id);
+
+struct iwl_mld_chanctx_usage_data {
+ struct iwl_mld *mld;
+ struct ieee80211_chanctx_conf *ctx;
+ bool use_def;
+};
+
+static bool iwl_mld_chanctx_fils_enabled(struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+ return cfg80211_channel_is_psc(ctx->def.chan) ||
+ (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+}
+
+static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_chanctx_usage_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ int link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx)
+ continue;
+
+ if (iwl_mld_chanctx_fils_enabled(vif, data->ctx))
+ data->use_def = true;
+ }
+}
+
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct iwl_mld_chanctx_usage_data data = {
+ .mld = mld,
+ .ctx = ctx,
+ };
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_chanctx_usage_iter,
+ &data);
+
+ return data.use_def ? &ctx->def : &ctx->min_def;
+}
+
+static u8
+iwl_mld_nl80211_width_to_fw(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ return IWL_PHY_CHANNEL_MODE20;
+ case NL80211_CHAN_WIDTH_40:
+ return IWL_PHY_CHANNEL_MODE40;
+ case NL80211_CHAN_WIDTH_80:
+ return IWL_PHY_CHANNEL_MODE80;
+ case NL80211_CHAN_WIDTH_160:
+ return IWL_PHY_CHANNEL_MODE160;
+ case NL80211_CHAN_WIDTH_320:
+ return IWL_PHY_CHANNEL_MODE320;
+ default:
+ WARN(1, "Invalid channel width=%u", width);
+ return IWL_PHY_CHANNEL_MODE20;
+ }
+}
+
+/* Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the fw values
+ */
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef)
+{
+ int offs = chandef->chan->center_freq - chandef->center_freq1;
+ int abs_offs = abs(offs);
+ u8 ret;
+
+ if (offs == 0) {
+ /* The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one.
+ */
+ return 0;
+ }
+
+ /* this results in a value 0-7, i.e. fitting into 0b0111 */
+ ret = (abs_offs - 10) / 20;
+ /* But we need the value to be in 0b1011 because 0b0100 is
+ * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in
+ * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000)
+ */
+ ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) |
+ ((ret & BIT(2)) << 1);
+ /* and add the above bit */
+ ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE;
+
+ return ret;
+}
+
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx, u32 action)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+ struct iwl_phy_context_cmd cmd = {
+ .id_and_color = cpu_to_le32(phy->fw_id),
+ .action = cpu_to_le32(action),
+ .puncture_mask = cpu_to_le16(chandef->punctured),
+ /* Channel info */
+ .ci.channel = cpu_to_le32(chandef->chan->hw_value),
+ .ci.band = iwl_mld_nl80211_band_to_fw(chandef->chan->band),
+ .ci.width = iwl_mld_nl80211_width_to_fw(chandef->width),
+ .ci.ctrl_pos = iwl_mld_get_fw_ctrl_pos(chandef),
+ };
+ int ret;
+
+ if (ctx->ap.chan) {
+ cmd.sbb_bandwidth =
+ iwl_mld_nl80211_width_to_fw(ctx->ap.width);
+ cmd.sbb_ctrl_channel_loc = iwl_mld_get_fw_ctrl_pos(&ctx->ap);
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld, PHY_CONTEXT_CMD, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send PHY_CONTEXT_CMD ret = %d\n", ret);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
new file mode 100644
index 000000000000..2212a89321b7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_phy_h__
+#define __iwl_mld_phy_h__
+
+#include "mld.h"
+
+/**
+ * struct iwl_mld_phy - PHY configuration parameters
+ *
+ * @fw_id: fw id of the phy.
+ * @chandef: the last chandef that mac80211 configured the driver
+ * with. Used to detect a no-op when the chanctx changes.
+ * @channel_load_by_us: channel load on this channel caused by
+ * the NIC itself, as indicated by firmware
+ * @avg_channel_load_not_by_us: averaged channel load on this channel caused by
+ * others. This value is invalid when in EMLSR (due to FW limitations)
+ * @mld: pointer to the MLD context
+ */
+struct iwl_mld_phy {
+ /* Add here fields that need clean up on hw restart */
+ struct_group(zeroed_on_hw_restart,
+ u8 fw_id;
+ struct cfg80211_chan_def chandef;
+ );
+ /* And here fields that survive a hw restart */
+ u32 channel_load_by_us;
+ u32 avg_channel_load_not_by_us;
+ struct iwl_mld *mld;
+};
+
+static inline struct iwl_mld_phy *
+iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel)
+{
+ return (void *)channel->drv_priv;
+}
+
+/* Cleanup function for struct iwl_mld_phy, will be called in restart */
+static inline void
+iwl_mld_cleanup_phy(struct iwl_mld *mld, struct iwl_mld_phy *phy)
+{
+ CLEANUP_STRUCT(phy);
+}
+
+int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld);
+int iwl_mld_phy_fw_action(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx, u32 action);
+struct cfg80211_chan_def *
+iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld,
+ struct ieee80211_chanctx_conf *ctx);
+u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef);
+
+#endif /* __iwl_mld_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c
new file mode 100644
index 000000000000..2f16c174b57e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "hcmd.h"
+#include "power.h"
+#include "iface.h"
+#include "link.h"
+#include "constants.h"
+
+static void iwl_mld_vif_ps_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *ps_enable = (bool *)data;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ *ps_enable &= !mld_vif->ps_disabled;
+}
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3)
+{
+ struct iwl_device_power_cmd cmd = {};
+ bool enable_ps = false;
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) {
+ enable_ps = true;
+
+ /* Disable power save if any STA interface has
+ * power save turned off
+ */
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_ps_iterator,
+ &enable_ps);
+ }
+
+ if (enable_ps)
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (d3)
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
+
+ IWL_DEBUG_POWER(mld,
+ "Sending device power command with flags = 0x%X\n",
+ cmd.flags);
+
+ return iwl_mld_send_cmd_pdu(mld, POWER_TABLE_CMD, &cmd);
+}
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ bool d3)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ .ba_enable_beacon_abort = cpu_to_le32(1),
+ };
+
+ if (ieee80211_vif_type_p2p(link_conf->vif) != NL80211_IFTYPE_STATION)
+ return 0;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (iwl_mld_vif_from_mac80211(link_conf->vif)->disable_bf)
+ return 0;
+#endif
+
+ if (link_conf->cqm_rssi_thold) {
+ cmd.bf_energy_delta =
+ cpu_to_le32(link_conf->cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd.bf_roaming_state =
+ cpu_to_le32(-link_conf->cqm_rssi_thold);
+ }
+
+ if (d3)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+ &cmd);
+}
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_beacon_filter_cmd cmd = {};
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ return 0;
+
+ return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD,
+ &cmd);
+}
+
+static bool iwl_mld_power_is_radar(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf)
+{
+ const struct ieee80211_chanctx_conf *chanctx_conf;
+
+ chanctx_conf = wiphy_dereference(mld->wiphy, link_conf->chanctx_conf);
+
+ if (WARN_ON(!chanctx_conf))
+ return false;
+
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
+}
+
+static void iwl_mld_power_configure_uapsd(struct iwl_mld *mld,
+ struct iwl_mld_link *link,
+ struct iwl_mac_power_cmd *cmd,
+ bool ps_poll)
+{
+ bool tid_found = false;
+
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MLD_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MLD_UAPSD_TX_DATA_TIMEOUT);
+
+ /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+ if (ps_poll) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ return;
+ }
+
+ for (enum ieee80211_ac_numbers ac = IEEE80211_AC_VO;
+ ac <= IEEE80211_AC_BK;
+ ac++) {
+ if (!link->queue_params[ac].uapsd)
+ continue;
+
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK |
+ POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !link->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval = cpu_to_le16(IWL_MLD_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window = cpu_to_le16(IWL_MLD_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = mld->hw->uapsd_max_sp_len;
+}
+
+static void
+iwl_mld_power_config_skip_dtim(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ struct iwl_mac_power_cmd *cmd)
+{
+ unsigned int dtimper_tu;
+ unsigned int dtimper;
+ unsigned int skip;
+
+ dtimper = link_conf->dtim_period ?: 1;
+ dtimper_tu = dtimper * link_conf->beacon_int;
+
+ if (dtimper >= 10 || iwl_mld_power_is_radar(mld, link_conf))
+ return;
+
+ if (WARN_ON(!dtimper_tu))
+ return;
+
+ /* configure skip over dtim up to 900 TU DTIM interval */
+ skip = max_t(int, 1, 900 / dtimper_tu);
+
+ cmd->skip_dtim_periods = skip;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static void iwl_mld_power_build_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd,
+ bool d3)
+{
+ int dtimper, bi;
+ int keep_alive;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct ieee80211_bss_conf *link_conf = &vif->bss_conf;
+ struct iwl_mld_link *link = &mld_vif->deflink;
+ bool ps_poll = false;
+
+ cmd->id_and_color = cpu_to_le32(mld_vif->fw_id);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ int link_id;
+
+ if (WARN_ON(!vif->active_links))
+ return;
+
+ /* The firmware consumes one single configuration for the vif
+ * and can't differentiate between links, just pick the lowest
+ * link_id's configuration and use that.
+ */
+ link_id = __ffs(vif->active_links);
+ link_conf = link_conf_dereference_check(vif, link_id);
+ link = iwl_mld_link_dereference_check(mld_vif, link_id);
+
+ if (WARN_ON(!link_conf || !link))
+ return;
+ }
+ dtimper = link_conf->dtim_period;
+ bi = link_conf->beacon_int;
+
+ /* Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
+ */
+ keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+ USEC_PER_SEC);
+ keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+ if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ if (!vif->cfg.ps || iwl_mld_tdls_sta_count(mld) > 0)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ /* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */
+ if (link_conf->beacon_rate &&
+ (link_conf->beacon_rate->bitrate == 10 ||
+ link_conf->beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+ }
+
+ if (d3) {
+ iwl_mld_power_config_skip_dtim(mld, link_conf, cmd);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else if (iwl_mld_vif_low_latency(mld_vif) && vif->p2p) {
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT);
+ }
+
+ /* uAPSD is only enabled for specific certifications. For those cases,
+ * mac80211 will allow uAPSD. Always call iwl_mld_power_configure_uapsd
+ * which will look at what mac80211 is saying.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ ps_poll = mld_vif->use_ps_poll;
+#endif
+ iwl_mld_power_configure_uapsd(mld, link, cmd, ps_poll);
+}
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool d3)
+{
+ struct iwl_mac_power_cmd cmd = {};
+
+ iwl_mld_power_build_cmd(mld, vif, &cmd, d3);
+
+ return iwl_mld_send_cmd_pdu(mld, MAC_PM_POWER_TABLE, &cmd);
+}
+
+static void
+iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *link)
+{
+ u8 i;
+
+ /* NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(link->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(link->tpe.psd_local[0].power[i],
+ link->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(link->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(link->tpe.max_local[0].power[i],
+ link->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!mld_link->active)
+ return;
+
+ if (link->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ return;
+
+ cmd.link_id = cpu_to_le16(mld_link->fw_id);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (link->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(link->power_type - 1);
+ iwl_mld_tpe_sta_cmd_data(&cmd, link);
+ }
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
+}
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ s16 tx_power)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
+ u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ?
+ IWL_DEV_MAX_TX_POWER : 8 * tx_power;
+ struct iwl_dev_tx_power_cmd cmd = {
+ /* Those fields sit on the same place for v9 and v10 */
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
+ .common.pwr_restriction = cpu_to_le16(u_tx_power),
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int len = sizeof(cmd.common);
+
+ if (WARN_ON(!mld_link))
+ return -ENODEV;
+
+ cmd.common.link_id = cpu_to_le32(mld_link->fw_id);
+
+ if (cmd_ver == 10)
+ len += sizeof(cmd.v10);
+ else if (cmd_ver == 9)
+ len += sizeof(cmd.v9);
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.h b/drivers/net/wireless/intel/iwlwifi/mld/power.h
new file mode 100644
index 000000000000..05ce27bef106
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/power.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_power_h__
+#define __iwl_mld_power_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+
+int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3);
+
+int iwl_mld_enable_beacon_filter(struct iwl_mld *mld,
+ const struct ieee80211_bss_conf *link_conf,
+ bool d3);
+
+int iwl_mld_disable_beacon_filter(struct iwl_mld *mld,
+ struct ieee80211_vif *vif);
+
+int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool d3);
+
+void
+iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_set_tx_power(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link_conf,
+ s16 tx_power);
+
+#endif /* __iwl_mld_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
new file mode 100644
index 000000000000..d5c3f853d96c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "iwl-debug.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include <linux/timekeeping.h>
+
+/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional
+ * part, which means that a value of 1 in one of those fields actually means
+ * 2^-16 ppm, and 2^16=65536 is 1 ppm.
+ */
+#define PTP_SCALE_FACTOR 65536000000ULL
+
+#define IWL_PTP_GP2_WRAP 0x100000000ULL
+#define IWL_PTP_WRAP_TIME (3600 * HZ)
+#define IWL_PTP_WRAP_THRESHOLD_USEC (5000)
+
+static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2)
+{
+ *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr);
+
+ if (*gp2 == 0x5a5a5a5a)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void iwl_mld_ptp_update_new_read(struct iwl_mld *mld, u32 gp2)
+{
+ IWL_DEBUG_PTP(mld, "PTP: last_gp2=%u, new gp2 read=%u\n",
+ mld->ptp_data.last_gp2, gp2);
+
+ /* If the difference is above the threshold, assume it's a wraparound.
+ * Otherwise assume it's an old read and ignore it.
+ */
+ if (gp2 < mld->ptp_data.last_gp2) {
+ if (mld->ptp_data.last_gp2 - gp2 <
+ IWL_PTP_WRAP_THRESHOLD_USEC) {
+ IWL_DEBUG_PTP(mld,
+ "PTP: ignore old read (gp2=%u, last_gp2=%u)\n",
+ gp2, mld->ptp_data.last_gp2);
+ return;
+ }
+
+ mld->ptp_data.wrap_counter++;
+ IWL_DEBUG_PTP(mld,
+ "PTP: wraparound detected (new counter=%u)\n",
+ mld->ptp_data.wrap_counter);
+ }
+
+ mld->ptp_data.last_gp2 = gp2;
+ schedule_delayed_work(&mld->ptp_data.dwork, IWL_PTP_WRAP_TIME);
+}
+
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ u64 scale_time_gp2_ns = mld->ptp_data.scale_update_gp2 * NSEC_PER_USEC;
+ u64 res;
+ u64 diff;
+ s64 scaled_diff;
+
+ lockdep_assert_held(&data->lock);
+
+ iwl_mld_ptp_update_new_read(mld,
+ div64_u64(base_time_ns, NSEC_PER_USEC));
+
+ base_time_ns = base_time_ns +
+ (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC);
+
+ /* It is possible that a GP2 timestamp was received from fw before the
+ * last scale update.
+ */
+ if (base_time_ns < scale_time_gp2_ns) {
+ diff = scale_time_gp2_ns - base_time_ns;
+ scaled_diff = -mul_u64_u64_div_u64(diff,
+ data->scaled_freq,
+ PTP_SCALE_FACTOR);
+ } else {
+ diff = base_time_ns - scale_time_gp2_ns;
+ scaled_diff = mul_u64_u64_div_u64(diff,
+ data->scaled_freq,
+ PTP_SCALE_FACTOR);
+ }
+
+ IWL_DEBUG_PTP(mld, "base_time=%llu diff ns=%llu scaled_diff_ns=%lld\n",
+ (unsigned long long)base_time_ns,
+ (unsigned long long)diff, (long long)scaled_diff);
+
+ res = data->scale_update_adj_time_ns + data->delta + scaled_diff;
+
+ IWL_DEBUG_PTP(mld, "scale_update_ns=%llu delta=%lld adj=%llu\n",
+ (unsigned long long)data->scale_update_adj_time_ns,
+ (long long)data->delta, (unsigned long long)res);
+ return res;
+}
+
+static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+ u64 ns;
+
+ if (iwl_mld_get_systime(mld, &gp2)) {
+ IWL_DEBUG_PTP(mld, "PTP: gettime: failed to read systime\n");
+ return -EIO;
+ }
+
+ spin_lock_bh(&data->lock);
+ ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+ spin_unlock_bh(&data->lock);
+
+ *ts = ns_to_timespec64(ns);
+ return 0;
+}
+
+static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+
+ spin_lock_bh(&data->lock);
+ data->delta += delta;
+ IWL_DEBUG_PTP(mld, "delta=%lld, new delta=%lld\n", (long long)delta,
+ (long long)data->delta);
+ spin_unlock_bh(&data->lock);
+ return 0;
+}
+
+static int iwl_mld_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+
+ /* Must call iwl_mld_ptp_get_adj_time() before updating
+ * data->scale_update_gp2 or data->scaled_freq since
+ * scale_update_adj_time_ns should reflect the previous scaled_freq.
+ */
+ if (iwl_mld_get_systime(mld, &gp2)) {
+ IWL_DEBUG_PTP(mld, "adjfine: failed to read systime\n");
+ return -EBUSY;
+ }
+
+ spin_lock_bh(&data->lock);
+ data->scale_update_adj_time_ns =
+ iwl_mld_ptp_get_adj_time(mld, gp2 * NSEC_PER_USEC);
+ data->scale_update_gp2 = gp2;
+
+ /* scale_update_adj_time_ns now relects the configured delta, the
+ * wrap_counter and the previous scaled frequency. Thus delta and
+ * wrap_counter should be reset, and the scale frequency is updated
+ * to the new frequency.
+ */
+ data->delta = 0;
+ data->wrap_counter = 0;
+ data->scaled_freq = PTP_SCALE_FACTOR + scaled_ppm;
+ IWL_DEBUG_PTP(mld, "adjfine: scaled_ppm=%ld new=%llu\n",
+ scaled_ppm, (unsigned long long)data->scaled_freq);
+ spin_unlock_bh(&data->lock);
+ return 0;
+}
+
+static void iwl_mld_ptp_work(struct work_struct *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ ptp_data.dwork.work);
+ struct ptp_data *data = &mld->ptp_data;
+ u32 gp2;
+
+ spin_lock_bh(&data->lock);
+ if (!iwl_mld_get_systime(mld, &gp2))
+ iwl_mld_ptp_update_new_read(mld, gp2);
+ else
+ IWL_DEBUG_PTP(mld, "PTP work: failed to read GP2\n");
+ spin_unlock_bh(&data->lock);
+}
+
+static int
+iwl_mld_get_crosstimestamp_fw(struct iwl_mld *mld, u32 *gp2, u64 *sys_time)
+{
+ struct iwl_synced_time_cmd synced_time_cmd = {
+ .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH)
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD),
+ .flags = CMD_WANT_SKB,
+ .data[0] = &synced_time_cmd,
+ .len[0] = sizeof(synced_time_cmd),
+ };
+ struct iwl_synced_time_rsp *resp;
+ struct iwl_rx_packet *pkt;
+ int ret;
+ u64 gp2_10ns;
+
+ wiphy_lock(mld->wiphy);
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ wiphy_unlock(mld->wiphy);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) {
+ IWL_DEBUG_PTP(mld, "PTP: Invalid PTM command response\n");
+ iwl_free_resp(&cmd);
+ return -EIO;
+ }
+
+ resp = (void *)pkt->data;
+
+ gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 |
+ le32_to_cpu(resp->gp2_timestamp_lo);
+ *gp2 = div_u64(gp2_10ns, 100);
+
+ *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 |
+ le32_to_cpu(resp->platform_timestamp_lo);
+
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int
+iwl_mld_phc_get_crosstimestamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct iwl_mld *mld = container_of(ptp, struct iwl_mld,
+ ptp_data.ptp_clock_info);
+ struct ptp_data *data = &mld->ptp_data;
+ int ret = 0;
+ /* Raw value read from GP2 register in usec */
+ u32 gp2;
+ /* GP2 value in ns*/
+ s64 gp2_ns;
+ /* System (wall) time */
+ ktime_t sys_time;
+
+ memset(xtstamp, 0, sizeof(struct system_device_crosststamp));
+
+ ret = iwl_mld_get_crosstimestamp_fw(mld, &gp2, &sys_time);
+ if (ret) {
+ IWL_DEBUG_PTP(mld,
+ "PTP: fw get_crosstimestamp failed (ret=%d)\n",
+ ret);
+ return ret;
+ }
+
+ spin_lock_bh(&data->lock);
+ gp2_ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_PTP(mld,
+ "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n",
+ gp2, mld->ptp_data.last_gp2, gp2_ns, (s64)sys_time);
+
+ /* System monotonic raw time is not used */
+ xtstamp->device = ns_to_ktime(gp2_ns);
+ xtstamp->sys_realtime = sys_time;
+
+ return ret;
+}
+
+void iwl_mld_ptp_init(struct iwl_mld *mld)
+{
+ if (WARN_ON(mld->ptp_data.ptp_clock))
+ return;
+
+ spin_lock_init(&mld->ptp_data.lock);
+ INIT_DELAYED_WORK(&mld->ptp_data.dwork, iwl_mld_ptp_work);
+
+ mld->ptp_data.ptp_clock_info.owner = THIS_MODULE;
+ mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime;
+ mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff;
+ mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime;
+ mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine;
+ mld->ptp_data.scaled_freq = PTP_SCALE_FACTOR;
+ mld->ptp_data.ptp_clock_info.getcrosststamp =
+ iwl_mld_phc_get_crosstimestamp;
+
+ /* Give a short 'friendly name' to identify the PHC clock */
+ snprintf(mld->ptp_data.ptp_clock_info.name,
+ sizeof(mld->ptp_data.ptp_clock_info.name),
+ "%s", "iwlwifi-PTP");
+
+ mld->ptp_data.ptp_clock =
+ ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev);
+
+ if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) {
+ IWL_ERR(mld, "Failed to register PHC clock (%ld)\n",
+ PTR_ERR(mld->ptp_data.ptp_clock));
+ mld->ptp_data.ptp_clock = NULL;
+ } else {
+ IWL_INFO(mld, "Registered PHC clock: %s, with index: %d\n",
+ mld->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mld->ptp_data.ptp_clock));
+ }
+}
+
+void iwl_mld_ptp_remove(struct iwl_mld *mld)
+{
+ if (mld->ptp_data.ptp_clock) {
+ IWL_INFO(mld, "Unregistering PHC clock: %s, with index: %d\n",
+ mld->ptp_data.ptp_clock_info.name,
+ ptp_clock_index(mld->ptp_data.ptp_clock));
+
+ ptp_clock_unregister(mld->ptp_data.ptp_clock);
+ mld->ptp_data.ptp_clock = NULL;
+ mld->ptp_data.last_gp2 = 0;
+ mld->ptp_data.wrap_counter = 0;
+ cancel_delayed_work_sync(&mld->ptp_data.dwork);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.h b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h
new file mode 100644
index 000000000000..f3d18dd304e5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_ptp_h__
+#define __iwl_mld_ptp_h__
+
+#include <linux/ptp_clock_kernel.h>
+
+/**
+ * struct ptp_data - PTP hardware clock data
+ *
+ * @ptp_clock: struct ptp_clock pointer returned by the ptp_clock_register()
+ * function.
+ * @ptp_clock_info: struct ptp_clock_info that describes a PTP hardware clock
+ * @lock: protects the time adjustments data
+ * @delta: delta between hardware clock and ptp clock in nanoseconds
+ * @scale_update_gp2: GP2 time when the scale was last updated
+ * @scale_update_adj_time_ns: adjusted time when the scale was last updated,
+ * in nanoseconds
+ * @scaled_freq: clock frequency offset, scaled to 65536000000
+ * @last_gp2: the last GP2 reading from the hardware, used for tracking GP2
+ * wraparounds
+ * @wrap_counter: number of wraparounds since scale_update_adj_time_ns
+ * @dwork: worker scheduled every 1 hour to detect workarounds
+ */
+struct ptp_data {
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+
+ spinlock_t lock;
+ s64 delta;
+ u32 scale_update_gp2;
+ u64 scale_update_adj_time_ns;
+ u64 scaled_freq;
+ u32 last_gp2;
+ u32 wrap_counter;
+ struct delayed_work dwork;
+};
+
+void iwl_mld_ptp_init(struct iwl_mld *mld);
+void iwl_mld_ptp_remove(struct iwl_mld *mld);
+u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns);
+
+#endif /* __iwl_mld_ptp_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
new file mode 100644
index 000000000000..a75af8c1e8ab
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "fw/regulatory.h"
+#include "fw/acpi.h"
+#include "fw/uefi.h"
+
+#include "regulatory.h"
+#include "mld.h"
+#include "hcmd.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld)
+{
+ int ret;
+
+ iwl_acpi_get_guid_lock_status(&mld->fwrt);
+
+ ret = iwl_bios_get_ppag_table(&mld->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "PPAG BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ }
+
+ ret = iwl_bios_get_wrds_table(&mld->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ /* If not available, don't fail and don't bother with EWRD and
+ * WGDS
+ */
+
+ if (!iwl_bios_get_wgds_table(&mld->fwrt)) {
+ /* If basic SAR is not available, we check for WGDS,
+ * which should *not* be available either. If it is
+ * available, issue an error, because we can't use SAR
+ * Geo without basic SAR.
+ */
+ IWL_ERR(mld, "BIOS contains WGDS but no WRDS\n");
+ }
+
+ } else {
+ ret = iwl_bios_get_ewrd_table(&mld->fwrt);
+ /* If EWRD is not available, we can still use
+ * WRDS, so don't fail.
+ */
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mld,
+ "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+
+ ret = iwl_bios_get_wgds_table(&mld->fwrt);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mld,
+ "Geo SAR BIOS table invalid or unavailable. (%d)\n",
+ ret);
+ /* we don't fail if the table is not available */
+ }
+
+ ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
+ if (ret)
+ IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret);
+}
+
+static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
+{
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
+ union iwl_geo_tx_power_profiles_cmd cmd;
+ u16 len;
+ u32 n_bands;
+ __le32 sk = cpu_to_le32(0);
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
+
+ cmd.v4.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+ /* Only set to South Korea if the table revision is 1 */
+ if (mld->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
+ if (cmd_ver == 5) {
+ len = sizeof(cmd.v5);
+ n_bands = ARRAY_SIZE(cmd.v5.table[0]);
+ cmd.v5.table_revision = sk;
+ } else if (cmd_ver == 4) {
+ len = sizeof(cmd.v4);
+ n_bands = ARRAY_SIZE(cmd.v4.table[0]);
+ cmd.v4.table_revision = sk;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
+ /* the table is at the same position for all versions, so set use v4 */
+ ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v4.table[0][0],
+ n_bands, BIOS_GEO_MAX_PROFILE_NUM);
+
+ /* It is a valid scenario to not support SAR, or miss wgds table,
+ * but in that case there is no need to send the command.
+ */
+ if (ret)
+ return 0;
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b)
+{
+ u32 cmd_id = REDUCE_TX_POWER_CMD;
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ };
+ __le16 *per_chain;
+ int ret;
+ u16 len = sizeof(cmd.common);
+ u32 n_subbands;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == 10) {
+ len += sizeof(cmd.v10);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = &cmd.v10.per_chain[0][0][0];
+ cmd.v10.flags =
+ cpu_to_le32(mld->fwrt.reduced_power_flags);
+ } else if (cmd_ver == 9) {
+ len += sizeof(cmd.v9);
+ n_subbands = IWL_NUM_SUB_BANDS_V1;
+ per_chain = &cmd.v9.per_chain[0][0];
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */
+ ret = iwl_sar_fill_profile(&mld->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
+ /* return on error or if the profile is disabled (positive number) */
+ if (ret)
+ return ret;
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len);
+}
+
+int iwl_mld_init_sar(struct iwl_mld *mld)
+{
+ int chain_a_prof = 1;
+ int chain_b_prof = 1;
+ int ret;
+
+ /* If no profile was chosen by the user yet, choose profile 1 (WRDS) as
+ * default for both chains
+ */
+ if (mld->fwrt.sar_chain_a_profile && mld->fwrt.sar_chain_b_profile) {
+ chain_a_prof = mld->fwrt.sar_chain_a_profile;
+ chain_b_prof = mld->fwrt.sar_chain_b_profile;
+ }
+
+ ret = iwl_mld_config_sar_profile(mld, chain_a_prof, chain_b_prof);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return 0;
+
+ return iwl_mld_geo_sar_init(mld);
+}
+
+int iwl_mld_init_sgom(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ SAR_OFFSET_MAPPING_TABLE_CMD),
+ .data[0] = &mld->fwrt.sgom_table,
+ .len[0] = sizeof(mld->fwrt.sgom_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mld->fwrt.sgom_enabled) {
+ IWL_DEBUG_RADIO(mld, "SGOM table is disabled\n");
+ return 0;
+ }
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ IWL_ERR(mld,
+ "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
+
+ return ret;
+}
+
+static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
+{
+ union iwl_ppag_table_cmd cmd = {};
+ int ret, len;
+
+ ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
+ /* Not supporting PPAG table is a valid scenario */
+ if (ret < 0)
+ return 0;
+
+ IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD),
+ &cmd, len);
+ if (ret < 0)
+ IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
+ ret);
+
+ return ret;
+}
+
+int iwl_mld_init_ppag(struct iwl_mld *mld)
+{
+ /* no need to read the table, done in INIT stage */
+
+ if (!(iwl_is_ppag_approved(&mld->fwrt)))
+ return 0;
+
+ return iwl_mld_ppag_send_cmd(mld);
+}
+
+void iwl_mld_configure_lari(struct iwl_mld *mld)
+{
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+ struct iwl_lari_config_change_cmd cmd = {
+ .config_bitmap = iwl_get_lari_config_bitmap(fwrt),
+ };
+ int ret;
+ u32 value;
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret)
+ cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret)
+ cmd.oem_unii4_allow_bitmap =
+ cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret)
+ cmd.chan_state_active_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret)
+ cmd.force_disable_channels_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret)
+ cmd.edt_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd.oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd.oem_11be_allow_bitmap = cpu_to_le32(value);
+
+ if (!cmd.config_bitmap &&
+ !cmd.oem_uhb_allow_bitmap &&
+ !cmd.oem_11ax_allow_bitmap &&
+ !cmd.oem_unii4_allow_bitmap &&
+ !cmd.chan_state_active_bitmap &&
+ !cmd.force_disable_channels_bitmap &&
+ !cmd.edt_bitmap &&
+ !cmd.oem_320mhz_allow_bitmap &&
+ !cmd.oem_11be_allow_bitmap)
+ return;
+
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.config_bitmap),
+ le32_to_cpu(cmd.oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd.chan_state_active_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd.force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.edt_bitmap),
+ le32_to_cpu(cmd.oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(mld,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_11be_allow_bitmap));
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), &cmd);
+ if (ret)
+ IWL_DEBUG_RADIO(mld,
+ "Failed to send LARI_CONFIG_CHANGE (%d)\n",
+ ret);
+}
+
+void iwl_mld_init_uats(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ MCC_ALLOWED_AP_TYPE_CMD),
+ .data[0] = &mld->fwrt.uats_table,
+ .len[0] = sizeof(mld->fwrt.uats_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (!mld->fwrt.uats_valid)
+ return;
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
+}
+
+void iwl_mld_init_tas(struct iwl_mld *mld)
+{
+ int ret;
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
+ u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
+
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+
+ if (!fw_has_capa(&mld->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
+ IWL_DEBUG_RADIO(mld, "TAS not enabled in FW\n");
+ return;
+ }
+
+ ret = iwl_bios_get_tas_table(&mld->fwrt, &data);
+ if (ret < 0) {
+ IWL_DEBUG_RADIO(mld,
+ "TAS table invalid or unavailable. (%d)\n",
+ ret);
+ return;
+ }
+
+ if (!iwl_is_tas_approved()) {
+ IWL_DEBUG_RADIO(mld,
+ "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
+ IWL_DEBUG_RADIO(mld,
+ "Unable to add US/Canada to TAS block list, disabling TAS\n");
+ return;
+ }
+ } else {
+ IWL_DEBUG_RADIO(mld,
+ "System vendor '%s' is in the approved list.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ }
+
+ cmd.block_list_size = cpu_to_le16(data.block_list_size);
+ for (u8 i = 0; i < data.block_list_size; i++)
+ cmd.block_list_array[i] =
+ cpu_to_le16(data.block_list_array[i]);
+ cmd.tas_config_info.table_source = data.table_source;
+ cmd.tas_config_info.table_revision = data.table_revision;
+ cmd.tas_config_info.value = cpu_to_le32(data.tas_selection);
+
+ ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
+ if (ret)
+ IWL_DEBUG_RADIO(mld, "failed to send TAS_CONFIG (%d)\n", ret);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h
new file mode 100644
index 000000000000..3b01c645adda
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_regulatory_h__
+#define __iwl_mld_regulatory_h__
+
+#include "mld.h"
+
+void iwl_mld_get_bios_tables(struct iwl_mld *mld);
+void iwl_mld_configure_lari(struct iwl_mld *mld);
+void iwl_mld_init_uats(struct iwl_mld *mld);
+void iwl_mld_init_tas(struct iwl_mld *mld);
+
+int iwl_mld_init_ppag(struct iwl_mld *mld);
+
+int iwl_mld_init_sgom(struct iwl_mld *mld);
+
+int iwl_mld_init_sar(struct iwl_mld *mld);
+
+int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b);
+
+#endif /* __iwl_mld_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
new file mode 100644
index 000000000000..b87faca23ceb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "roc.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "sta.h"
+#include "mlo.h"
+
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(200)
+
+static void
+iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int *result = data;
+ int ret;
+
+ ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
+ IWL_MLD_EMLSR_BLOCKED_ROC,
+ iwl_mld_get_primary_link(vif));
+ if (ret)
+ *result = ret;
+}
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_int_sta *aux_sta;
+ struct iwl_roc_req cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+ enum iwl_roc_activity activity;
+ int ret = 0;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_block_roc,
+ &ret);
+ if (ret)
+ return ret;
+
+ /* TODO: task=Hotspot 2.0 */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
+ IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n",
+ vif->type);
+
+ return -EOPNOTSUPP;
+ }
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
+ /* No MLO on P2P device */
+ aux_sta = &mld_vif->deflink.aux_sta;
+
+ ret = iwl_mld_add_aux_sta(mld, aux_sta);
+ if (ret)
+ return ret;
+
+ cmd.activity = cpu_to_le32(activity);
+ cmd.sta_id = cpu_to_le32(aux_sta->sta_id);
+ cmd.channel_info.channel = cpu_to_le32(channel->hw_value);
+ cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band);
+ cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20;
+ /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC
+ * on the BSS vif
+ */
+ cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY);
+ cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+
+ memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ &cmd, cmd_len);
+ if (ret) {
+ IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
+ return ret;
+ }
+ mld_vif->roc_activity = activity;
+
+ return 0;
+}
+
+static void
+iwl_mld_vif_iter_emlsr_unblock_roc(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_ROC);
+}
+
+static void iwl_mld_destroy_roc(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_vif *mld_vif)
+{
+ mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_vif_iter_emlsr_unblock_roc,
+ NULL);
+
+ /* wait until every tx has seen that roc_activity has been reset */
+ synchronize_net();
+ /* from here, no new tx will be added
+ * we can flush the Tx on the queues
+ */
+
+ iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id);
+
+ iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf);
+}
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw);
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_roc_req cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+ u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* TODO: task=Hotspot 2.0 */
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return -EOPNOTSUPP;
+
+ /* No roc activity running it's probably already done */
+ if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES)
+ return 0;
+
+ cmd.activity = cpu_to_le32(mld_vif->roc_activity);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ &cmd, cmd_len);
+ if (ret)
+ IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
+
+ /* We may have raced with the firmware expiring the ROC instance at
+ * this very moment. In that case, we can have a notification in the
+ * async processing queue. However, none can arrive _after_ this as
+ * ROC_CMD was sent synchronously, i.e. we waited for a response and
+ * the firmware cannot refer to this ROC after the response. Thus,
+ * if we just cancel the notification (if there's one) we'll be at a
+ * clean state for any possible next ROC.
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_ROC,
+ mld_vif->roc_activity);
+
+ iwl_mld_destroy_roc(mld, vif, mld_vif);
+
+ return 0;
+}
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ /* TODO: task=Hotspot 2.0 - roc can run on BSS */
+ struct ieee80211_vif *vif = mld->p2p_device_vif;
+ struct iwl_mld_vif *mld_vif;
+
+ if (WARN_ON(!vif))
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ /* It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (mld_vif->roc_activity != activity)
+ return;
+
+ if (le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started)) {
+ /* We had a successful start */
+ ieee80211_ready_on_channel(mld->hw);
+ } else {
+ /* ROC was not successful, tell the firmware to remove it */
+ if (le32_to_cpu(notif->started))
+ iwl_mld_cancel_roc(mld->hw, vif);
+ else
+ iwl_mld_destroy_roc(mld, vif, mld_vif);
+ /* we need to let know mac80211 about end OR
+ * an unsuccessful start
+ */
+ ieee80211_remain_on_channel_expired(mld->hw);
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.h b/drivers/net/wireless/intel/iwlwifi/mld/roc.h
new file mode 100644
index 000000000000..985d7f20a3d7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_roc_h__
+#define __iwl_mld_roc_h__
+
+#include <net/mac80211.h>
+
+int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel, int duration,
+ enum ieee80211_roc_type type);
+
+int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_handle_roc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_roc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
new file mode 100644
index 000000000000..c4f189bcece2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -0,0 +1,2060 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include <kunit/static_stub.h>
+
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "time_sync.h"
+#include "fw/dbg.h"
+#include "fw/api/rx.h"
+
+/* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */
+struct iwl_mld_rx_phy_data {
+ enum iwl_rx_phy_info_type info_type;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+ __le32 eht_data4;
+ __le32 data5;
+ __le16 data4;
+ bool first_subframe;
+ bool with_data;
+ __le32 rx_vec[4];
+ u32 rate_n_flags;
+ u32 gp2_on_air_rise;
+ u16 phy_info;
+ u8 energy_a, energy_b;
+ u8 channel;
+};
+
+static void
+iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ phy_data->phy_info = le16_to_cpu(desc->phy_info);
+ phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+ phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+ phy_data->channel = desc->v3.channel;
+ phy_data->energy_a = desc->v3.energy_a;
+ phy_data->energy_b = desc->v3.energy_b;
+ phy_data->data0 = desc->v3.phy_data0;
+ phy_data->data1 = desc->v3.phy_data1;
+ phy_data->data2 = desc->v3.phy_data2;
+ phy_data->data3 = desc->v3.phy_data3;
+ phy_data->data4 = desc->phy_data4;
+ phy_data->eht_data4 = desc->phy_eht_data4;
+ phy_data->data5 = desc->v3.phy_data5;
+ phy_data->with_data = true;
+}
+
+static inline int iwl_mld_check_pn(struct iwl_mld *mld, struct sk_buff *skb,
+ int queue, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+ struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_ptk_pn *ptk_pn;
+ int res;
+ u8 tid, keyidx;
+ u8 pn[IEEE80211_CCMP_PN_LEN];
+ u8 *extiv;
+
+ /* multicast and non-data only arrives on default queue; avoid checking
+ * for default queue - we don't want to replicate all the logic that's
+ * necessary for checking the PN on fragmented frames, leave that
+ * to mac80211
+ */
+ if (queue == 0 || !ieee80211_is_data(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return 0;
+
+ if (!(stats->flag & RX_FLAG_DECRYPTED))
+ return 0;
+
+ /* if we are here - this for sure is either CCMP or GCMP */
+ if (!sta) {
+ IWL_DEBUG_DROP(mld,
+ "expected hw-decrypted unicast frame for station\n");
+ return -1;
+ }
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+ keyidx = extiv[3] >> 6;
+
+ ptk_pn = rcu_dereference(mld_sta->ptk_pn[keyidx]);
+ if (!ptk_pn)
+ return -1;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = 0;
+
+ /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+ if (tid >= IWL_MAX_TID_COUNT)
+ return -1;
+
+ /* load pn */
+ pn[0] = extiv[7];
+ pn[1] = extiv[6];
+ pn[2] = extiv[5];
+ pn[3] = extiv[4];
+ pn[4] = extiv[1];
+ pn[5] = extiv[0];
+
+ res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+ if (res < 0)
+ return -1;
+ if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+ return -1;
+
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ stats->flag |= RX_FLAG_PN_VALIDATED;
+
+ return 0;
+}
+
+/* iwl_mld_pass_packet_to_mac80211 - passes the packet for mac80211 */
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ KUNIT_STATIC_STUB_REDIRECT(iwl_mld_pass_packet_to_mac80211,
+ mld, napi, skb, queue, sta);
+
+ if (unlikely(iwl_mld_check_pn(mld, skb, queue, sta))) {
+ kfree_skb(skb);
+ return;
+ }
+
+ ieee80211_rx_napi(mld->hw, sta, skb, napi);
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_pass_packet_to_mac80211);
+
+static void iwl_mld_fill_signal(struct iwl_mld *mld,
+ struct ieee80211_rx_status *rx_status,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ int energy_a = phy_data->energy_a;
+ int energy_b = phy_data->energy_b;
+ int max_energy;
+
+ energy_a = energy_a ? -energy_a : S8_MIN;
+ energy_b = energy_b ? -energy_b : S8_MIN;
+ max_energy = max(energy_a, energy_b);
+
+ IWL_DEBUG_STATS(mld, "energy in A %d B %d, and max %d\n",
+ energy_a, energy_b, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains =
+ (rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+}
+
+static void
+iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status)
+{
+ /* Unfortunately, we have to leave the mac80211 data
+ * incorrect for the case that we receive an HE-MU
+ * transmission and *don't* have the HE phy data (due
+ * to the bits being used for TSF). This shouldn't
+ * happen though as management frames where we need
+ * the TSF/timers are not be transmitted in HE-MU.
+ */
+ u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 offs = 0;
+
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+ switch (ru) {
+ case 0 ... 36:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ offs = ru;
+ break;
+ case 37 ... 52:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ offs = ru - 37;
+ break;
+ case 53 ... 60:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ offs = ru - 53;
+ break;
+ case 61 ... 64:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ offs = ru - 61;
+ break;
+ case 65 ... 66:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ offs = ru - 65;
+ break;
+ case 67:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case 68:
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ }
+ he->data2 |= le16_encode_bits(offs,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
+ if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
+ he->data2 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+
+#define CHECK_BW(bw) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \
+ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
+ CHECK_BW(20);
+ CHECK_BW(40);
+ CHECK_BW(80);
+ CHECK_BW(160);
+
+ if (he_mu)
+ he_mu->flags2 |=
+ le16_encode_bits(u32_get_bits(rate_n_flags,
+ RATE_MCS_CHAN_WIDTH_MSK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ he->data6 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
+ le16_encode_bits(u32_get_bits(rate_n_flags,
+ RATE_MCS_CHAN_WIDTH_MSK),
+ IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
+}
+
+static void
+iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he_mu *he_mu)
+{
+ u32 phy_data2 = le32_to_cpu(phy_data->data2);
+ u32 phy_data3 = le32_to_cpu(phy_data->data3);
+ u16 phy_data4 = le16_to_cpu(phy_data->data4);
+ u32 rate_n_flags = phy_data->rate_n_flags;
+
+ if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
+
+ he_mu->flags1 |=
+ le16_encode_bits(u32_get_bits(phy_data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
+
+ he_mu->ru_ch1[0] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0);
+ he_mu->ru_ch1[1] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1);
+ he_mu->ru_ch1[2] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2);
+ he_mu->ru_ch1[3] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3);
+ }
+
+ if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) &&
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+ he_mu->flags1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
+
+ he_mu->flags2 |=
+ le16_encode_bits(u32_get_bits(phy_data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
+
+ he_mu->ru_ch2[0] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0);
+ he_mu->ru_ch2[1] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1);
+ he_mu->ru_ch2[2] = u32_get_bits(phy_data2,
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2);
+ he_mu->ru_ch2[3] = u32_get_bits(phy_data3,
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3);
+ }
+}
+
+static void
+iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_radiotap_he *he,
+ struct ieee80211_radiotap_he_mu *he_mu,
+ struct ieee80211_rx_status *rx_status,
+ int queue)
+{
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_NONE:
+ case IWL_RX_PHY_INFO_TYPE_CCK:
+ case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ return;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ /* HE common */
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
+ phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_UPLINK),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ }
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_DOPPLER),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+ he_mu->flags1 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+ he_mu->flags2 |=
+ le16_encode_bits(le16_get_bits(phy_data->data4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+ iwl_mld_decode_he_mu_ext(phy_data, he_mu);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ he_mu->flags2 |=
+ le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ fallthrough;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status);
+ break;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
+ default:
+ /* nothing */
+ break;
+ }
+}
+
+static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u8 ltf;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+ };
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ u16 phy_info = phy_data->phy_info;
+
+ he = skb_put_data(skb, &known, sizeof(known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
+ phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ }
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status,
+ queue);
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) {
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+ rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rx_status->bw = RATE_INFO_BW_HE_RU;
+ rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ /* actually data is filled in mac80211 */
+ if (he_type == RATE_MCS_HE_TYPE_SU ||
+ he_type == RATE_MCS_HE_TYPE_EXT_SU)
+ he->data1 |=
+ cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+ switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+ RATE_MCS_HE_GI_LTF_POS) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ if (he_type == RATE_MCS_HE_TYPE_MU)
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ else
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ break;
+ case 1:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ } else {
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 3:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ case 4:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ default:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ }
+
+ he->data5 |= le16_encode_bits(ltf,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+}
+
+static void iwl_mld_decode_lsig(struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_lsig *lsig;
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
+ lsig = skb_put(skb, sizeof(*lsig));
+ lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
+ lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
+ rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
+ break;
+ default:
+ break;
+ }
+}
+
+/* Put a TLV on the skb and return data pointer
+ *
+ * Also pad the len to 4 and zero out all data part
+ */
+static void *
+iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+ struct ieee80211_radiotap_tlv *tlv;
+
+ tlv = skb_put(skb, sizeof(*tlv));
+ tlv->type = cpu_to_le16(type);
+ tlv->len = cpu_to_le16(len);
+ return skb_put_zero(skb, ALIGN(len, 4));
+}
+
+#define LE32_DEC_ENC(value, dec_bits, enc_bits) \
+ le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define IWL_MLD_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \
+ typeof(enc_bits) _enc_bits = enc_bits; \
+ typeof(usig) _usig = usig; \
+ (_usig)->mask |= cpu_to_le32(_enc_bits); \
+ (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \
+} while (0)
+
+#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ eht->data[(rt_data)] |= \
+ (cpu_to_le32 \
+ (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \
+ LE32_DEC_ENC(data ## fw_data, \
+ IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \
+ IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru))
+
+#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \
+ __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru)
+
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3
+#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3
+#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4
+
+#define IWL_RX_RU_DATA_A1 2
+#define IWL_RX_RU_DATA_A2 2
+#define IWL_RX_RU_DATA_B1 2
+#define IWL_RX_RU_DATA_B2 4
+#define IWL_RX_RU_DATA_C1 3
+#define IWL_RX_RU_DATA_C2 3
+#define IWL_RX_RU_DATA_D1 4
+#define IWL_RX_RU_DATA_D2 4
+
+#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \
+ _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \
+ rt_ru, \
+ IWL_RX_RU_DATA_ ## fw_ru, \
+ fw_ru)
+
+static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data1 = phy_data->data1;
+ __le32 data2 = phy_data->data2;
+ __le32 data3 = phy_data->data3;
+ __le32 data4 = phy_data->eht_data4;
+ __le32 data5 = phy_data->data5;
+ u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MLD_ENC_USIG_VALUE_MASK
+ (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) |
+ LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR,
+ IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M);
+ eht->data[7] |= LE32_DEC_ENC
+ (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA,
+ IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+
+ /*
+ * Hardware labels the content channels/RU allocation values
+ * as follows:
+ * Content Channel 1 Content Channel 2
+ * 20 MHz: A1
+ * 40 MHz: A1 B1
+ * 80 MHz: A1 C1 B1 D1
+ * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2
+ * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4
+ *
+ * However firmware can only give us A1-D2, so the higher
+ * frequencies are missing.
+ */
+
+ switch (phy_bw) {
+ case RATE_MCS_CHAN_WIDTH_320:
+ /* additional values are missing in RX metadata */
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_160:
+ /* content channel 1 */
+ IWL_MLD_ENC_EHT_RU(1_2_1, A2);
+ IWL_MLD_ENC_EHT_RU(1_2_2, C2);
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_2_1, B2);
+ IWL_MLD_ENC_EHT_RU(2_2_2, D2);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_80:
+ /* content channel 1 */
+ IWL_MLD_ENC_EHT_RU(1_1_2, C1);
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_1_2, D1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_40:
+ /* content channel 2 */
+ IWL_MLD_ENC_EHT_RU(2_1_1, B1);
+ fallthrough;
+ case RATE_MCS_CHAN_WIDTH_20:
+ IWL_MLD_ENC_EHT_RU(1_1_1, A1);
+ break;
+ }
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PUNC_CHANNEL,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_SIG_MCS,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS);
+ IWL_MLD_ENC_USIG_VALUE_MASK
+ (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC);
+ }
+}
+
+static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+{
+ if (phy_data->with_data) {
+ __le32 data5 = phy_data->data5;
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ } else {
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ __le32 usig_a2 = phy_data->rx_vec[1];
+
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1,
+ IWL_RX_USIG_A1_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_PPDU_TYPE,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD);
+ IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2,
+ IWL_RX_USIG_A2_EHT_CRC_OK,
+ IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC);
+ }
+}
+
+static void iwl_mld_decode_eht_ru(struct iwl_mld *mld,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht)
+{
+ u32 ru = le32_get_bits(eht->data[8],
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+ enum nl80211_eht_ru_alloc nl_ru;
+
+ /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields
+ * in an EHT variant User Info field
+ */
+
+ switch (ru) {
+ case 0 ... 36:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+ break;
+ case 37 ... 52:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+ break;
+ case 53 ... 60:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+ break;
+ case 61 ... 64:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+ break;
+ case 65 ... 66:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+ break;
+ case 67:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+ break;
+ case 68:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+ break;
+ case 69:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+ break;
+ case 70 ... 81:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+ break;
+ case 82 ... 89:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+ break;
+ case 90 ... 93:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+ break;
+ case 94 ... 95:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+ break;
+ case 96 ... 99:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+ break;
+ case 100 ... 103:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+ break;
+ case 104:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+ break;
+ case 105 ... 106:
+ nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+ break;
+ default:
+ return;
+ }
+
+ rx_status->bw = RATE_INFO_BW_EHT_RU;
+ rx_status->eht.ru = nl_ru;
+}
+
+static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_radiotap_eht *eht,
+ struct ieee80211_radiotap_eht_usig *usig)
+
+{
+ __le32 data0 = phy_data->data0;
+ __le32 data1 = phy_data->data1;
+ __le32 usig_a1 = phy_data->rx_vec[0];
+ u8 info_type = phy_data->info_type;
+
+ /* Not in EHT range */
+ if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU ||
+ info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT)
+ return;
+
+ usig->common |= cpu_to_le32
+ (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN);
+ if (phy_data->with_data) {
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_UPLINK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ } else {
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_UL_FLAG,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL);
+ usig->common |= LE32_DEC_ENC(usig_a1,
+ IWL_RX_USIG_A1_BSS_COLOR,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR);
+ }
+
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED);
+ usig->common |=
+ LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE);
+ eht->data[0] |= LE32_DEC_ENC(data0,
+ IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+ /* All RU allocating size/index is in TB format */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT);
+ eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0);
+ eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7,
+ IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1);
+
+ iwl_mld_decode_eht_ru(mld, rx_status, eht);
+
+ /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+ * which is on only in case of monitor mode so no need to check monitor
+ * mode
+ */
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80);
+ eht->data[1] |=
+ le32_encode_bits(mld->monitor.p80,
+ IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN);
+ if (phy_data->with_data)
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+ else
+ usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM,
+ IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK,
+ IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM);
+ eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG,
+ IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+ /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */
+
+ if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK))
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+ usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN);
+ usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER,
+ IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER);
+
+ /*
+ * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE,
+ * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS
+ */
+
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF);
+ eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM,
+ IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB)
+ iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig);
+
+ if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT ||
+ info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU)
+ iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig);
+}
+
+static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_eht *eht;
+ struct ieee80211_radiotap_eht_usig *usig;
+ size_t eht_len = sizeof(*eht);
+
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ /* EHT and HE have the same values for LTF */
+ u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
+ u16 phy_info = phy_data->phy_info;
+ u32 bw;
+
+ /* u32 for 1 user_info */
+ if (phy_data->with_data)
+ eht_len += sizeof(u32);
+
+ eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
+
+ usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+ sizeof(*usig));
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+ usig->common |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN);
+
+ /* specific handling for 320MHz */
+ bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK);
+ if (bw == RATE_MCS_CHAN_WIDTH_320_VAL)
+ bw += le32_get_bits(phy_data->data0,
+ IWL_RX_PHY_DATA0_EHT_BW320_SLOT);
+
+ usig->common |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw));
+
+ /* report the AMPDU-EOF bit on single frames */
+ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+ if (phy_data->data0 &
+ cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF))
+ rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+ }
+
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig);
+
+#define CHECK_TYPE(F) \
+ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
+ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+ CHECK_TYPE(SU);
+ CHECK_TYPE(EXT_SU);
+ CHECK_TYPE(MU);
+ CHECK_TYPE(TRIG);
+
+ switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) {
+ case 0:
+ if (he_type == RATE_MCS_HE_TYPE_TRIG) {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X;
+ } else {
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ }
+ break;
+ case 1:
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X;
+ break;
+ case 2:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ else
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8;
+ break;
+ case 3:
+ if (he_type != RATE_MCS_HE_TYPE_TRIG) {
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2;
+ }
+ break;
+ default:
+ /* nothing here */
+ break;
+ }
+
+ if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI);
+ eht->data[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF,
+ ltf) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI,
+ rx_status->eht.gi));
+ }
+
+ if (!phy_data->with_data) {
+ eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+ IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S);
+ eht->data[7] |=
+ le32_encode_bits(le32_get_bits(phy_data->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK),
+ IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->data[7] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+ } else {
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER);
+
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
+
+ eht->user_info[0] |= cpu_to_le32
+ (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS,
+ u32_get_bits(rate_n_flags,
+ RATE_VHT_MCS_RATE_CODE_MSK)) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O,
+ u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK)));
+ }
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_vendor_content *radiotap;
+ const u16 vendor_data_len = sizeof(mld->monitor.cur_aid);
+
+ if (!mld->monitor.cur_aid)
+ return;
+
+ radiotap =
+ iwl_mld_radiotap_put_tlv(skb,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE,
+ sizeof(*radiotap) + vendor_data_len);
+
+ /* Intel OUI */
+ radiotap->oui[0] = 0xf6;
+ radiotap->oui[1] = 0x54;
+ radiotap->oui[2] = 0x25;
+ /* radiotap sniffer config sub-namespace */
+ radiotap->oui_subtype = 1;
+ radiotap->vendor_type = 0;
+
+ /* fill the data now */
+ memcpy(radiotap->data, &mld->monitor.cur_aid,
+ sizeof(mld->monitor.cur_aid));
+
+ rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+}
+#endif
+
+static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_hdr *hdr,
+ int queue)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 rate_n_flags = phy_data->rate_n_flags;
+ u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
+ bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr)))
+ return;
+
+ /* Keep packets with CRC errors (and with overrun) for monitor mode
+ * (otherwise the firmware discards them) but mark them as bad.
+ */
+ if (phy_data->with_data &&
+ (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) {
+ IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n",
+ le32_to_cpu(mpdu_desc->status));
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ }
+
+ phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE;
+
+ if (phy_data->with_data &&
+ likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+ rx_status->mactime =
+ le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise);
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+ } else {
+ phy_data->info_type =
+ le32_get_bits(phy_data->data1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+ }
+
+ /* management stuff on default queue */
+ if (!queue && phy_data->with_data &&
+ unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))) {
+ rx_status->boottime_ns = ktime_get_boottime_ns();
+
+ if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND;
+ }
+
+ /* set the preamble flag if appropriate */
+ if (format == RATE_MCS_CCK_MSK &&
+ phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+ iwl_mld_fill_signal(mld, rx_status, phy_data);
+
+ /* This may be overridden by iwl_mld_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rx_status->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ /* must be before L-SIG data */
+ if (format == RATE_MCS_HE_MSK)
+ iwl_mld_rx_he(mld, skb, phy_data, queue);
+
+ iwl_mld_decode_lsig(skb, phy_data);
+
+ rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ /* using TLV format and must be after all fixed len fields */
+ if (format == RATE_MCS_EHT_MSK)
+ iwl_mld_rx_eht(mld, skb, phy_data, queue);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (unlikely(mld->monitor.on))
+ iwl_mld_add_rtap_sniffer_config(mld, skb);
+#endif
+
+ if (format != RATE_MCS_CCK_MSK && is_sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ case RATE_MCS_VHT_MSK:
+ case RATE_MCS_HE_MSK:
+ case RATE_MCS_EHT_MSK:
+ if (format == RATE_MCS_VHT_MSK) {
+ rx_status->encoding = RX_ENC_VHT;
+ } else if (format == RATE_MCS_HE_MSK) {
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->he_dcm =
+ !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+ } else if (format == RATE_MCS_EHT_MSK) {
+ rx_status->encoding = RX_ENC_EHT;
+ }
+
+ rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ break;
+ default: {
+ int rate =
+ iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ /* valid rate */
+ if (rate >= 0 && rate <= 0xFF) {
+ rx_status->rate_idx = rate;
+ break;
+ }
+
+ /* invalid rate */
+ rx_status->rate_idx = 0;
+
+ if (net_ratelimit())
+ IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n",
+ rate_n_flags, rx_status->band);
+ break;
+ }
+ }
+}
+
+/* iwl_mld_create_skb adds the rxb to a new skb */
+static int iwl_mld_build_rx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr, u16 len,
+ u8 crypt_len, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ unsigned int headlen, fraglen, pad_len = 0;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ u8 mic_crc_len = u8_get_bits(desc->mac_flags1,
+ IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1;
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ len -= 2;
+ pad_len = 2;
+ }
+
+ /* For non monitor interface strip the bytes the RADA might not have
+ * removed (it might be disabled, e.g. for mgmt frames). As a monitor
+ * interface cannot exist with other interfaces, this removal is safe
+ * and sufficient, in monitor mode there's no decryption being done.
+ */
+ if (len > mic_crc_len && !ieee80211_hw_check(mld->hw, RX_INCLUDES_FCS))
+ len -= mic_crc_len;
+
+ /* If frame is small enough to fit in skb->head, pull it completely.
+ * If not, only pull ieee80211_hdr (including crypto if present, and
+ * an additional 8 bytes for SNAP/ethertype, see below) so that
+ * splice() or TCP coalesce are more efficient.
+ *
+ * Since, in addition, ieee80211_data_to_8023() always pull in at
+ * least 8 bytes (possibly more for mesh) we can do the same here
+ * to save the cost of doing it later. That still doesn't pull in
+ * the actual IP header since the typical case has a SNAP header.
+ * If the latter changes (there are efforts in the standards group
+ * to do so) we should revisit this and ieee80211_data_to_8023().
+ */
+ headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+ /* The firmware may align the packet to DWORD.
+ * The padding is inserted after the IV.
+ * After copying the header + IV skip the padding if
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
+
+ if (unlikely(headlen < hdrlen))
+ return -EINVAL;
+
+ /* Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would
+ * be put
+ */
+ skb_set_mac_header(skb, skb->len);
+ skb_put_data(skb, hdr, hdrlen);
+ skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ struct {
+ u8 hdr[6];
+ __be16 type;
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+ (shdr->type != htons(ETH_P_IP) &&
+ shdr->type != htons(ETH_P_ARP) &&
+ shdr->type != htons(ETH_P_IPV6) &&
+ shdr->type != htons(ETH_P_8021Q) &&
+ shdr->type != htons(ETH_P_PAE) &&
+ shdr->type != htons(ETH_P_TDLS))))
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ fraglen = len - headlen;
+
+ if (fraglen) {
+ int offset = (u8 *)hdr + headlen + pad_len -
+ (u8 *)rxb_addr(rxb) + rxb_offset(rxb);
+
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
+
+ return 0;
+}
+
+/* returns true if a packet is a duplicate or invalid tid and
+ * should be dropped. Updates AMSDU PN tracking info
+ */
+VISIBLE_IF_IWLWIFI_KUNIT
+bool
+iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status, int queue)
+{
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_rxq_dup_data *dup_data;
+ u8 tid, sub_frame_idx;
+
+ if (WARN_ON(!sta))
+ return false;
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mld_sta->dup_data))
+ return false;
+
+ dup_data = &mld_sta->dup_data[queue];
+
+ /* Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2020: 10.3.2.14 "Duplicate detection and recovery")
+ */
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ /* frame has qos control */
+ tid = ieee80211_get_tid(hdr);
+ if (tid >= IWL_MAX_TID_COUNT)
+ return true;
+ } else {
+ tid = IWL_MAX_TID_COUNT;
+ }
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ sub_frame_idx = mpdu_desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ if (IWL_FW_CHECK(mld,
+ sub_frame_idx > 0 &&
+ !(mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU),
+ "got sub_frame_idx=%d but A-MSDU flag is not set\n",
+ sub_frame_idx))
+ return true;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ dup_data->last_sub_frame_idx[tid] >= sub_frame_idx))
+ return true;
+
+ /* Allow same PN as the first subframe for following sub frames */
+ if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ sub_frame_idx > dup_data->last_sub_frame_idx[tid])
+ rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+ dup_data->last_seq[tid] = hdr->seq_ctrl;
+ dup_data->last_sub_frame_idx[tid] = sub_frame_idx;
+
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+ return false;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_is_dup);
+
+static void iwl_mld_update_last_rx_timestamp(struct iwl_mld *mld, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mld_baid_data *ba_data;
+
+ ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
+ if (!ba_data) {
+ IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
+ return;
+ }
+
+ if (!ba_data->timeout)
+ return;
+
+ /* To minimize cache bouncing between RX queues, avoid frequent updates
+ * to last_rx_timestamp. update it only when the timeout period has
+ * passed. The worst-case scenario is the session expiring after
+ * approximately 2 * timeout, which is negligible (the update is
+ * atomic).
+ */
+ timeout = TU_TO_JIFFIES(ba_data->timeout);
+ if (time_is_before_jiffies(ba_data->last_rx_timestamp + timeout))
+ ba_data->last_rx_timestamp = now;
+}
+
+/* Processes received packets for a station.
+ * Sets *drop to true if the packet should be dropped.
+ * Returns the station if found, or NULL otherwise.
+ */
+static struct ieee80211_sta *
+iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
+ struct sk_buff *skb,
+ const struct iwl_rx_mpdu_desc *mpdu_desc,
+ const struct iwl_rx_packet *pkt, int queue, bool *drop)
+{
+ struct ieee80211_sta *sta = NULL;
+ struct ieee80211_link_sta *link_sta = NULL;
+ struct ieee80211_rx_status *rx_status;
+ u8 baid;
+
+ if (mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 sta_id = le32_get_bits(mpdu_desc->status,
+ IWL_RX_MPDU_STATUS_STA_ID);
+
+ if (IWL_FW_CHECK(mld,
+ sta_id >= mld->fw->ucode_capa.num_stations,
+ "rx_mpdu: invalid sta_id %d\n", sta_id))
+ return NULL;
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (!IS_ERR_OR_NULL(link_sta))
+ sta = link_sta->sta;
+ } else if (!is_multicast_ether_addr(hdr->addr2)) {
+ /* Passing NULL is fine since we prevent two stations with the
+ * same address from being added.
+ */
+ sta = ieee80211_find_sta_by_ifaddr(mld->hw, hdr->addr2, NULL);
+ }
+
+ /* we may not have any station yet */
+ if (!sta)
+ return NULL;
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ if (link_sta && sta->valid_links) {
+ rx_status->link_valid = true;
+ rx_status->link_id = link_sta->link_id;
+ }
+
+ /* fill checksum */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+ u16 hwsum = be16_to_cpu(mpdu_desc->v3.raw_xsum);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
+ }
+
+ if (iwl_mld_is_dup(mld, sta, hdr, mpdu_desc, rx_status, queue)) {
+ IWL_DEBUG_DROP(mld, "Dropping duplicate packet 0x%x\n",
+ le16_to_cpu(hdr->seq_ctrl));
+ *drop = true;
+ return NULL;
+ }
+
+ baid = le32_get_bits(mpdu_desc->reorder_data,
+ IWL_RX_MPDU_REORDER_BAID_MASK);
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ iwl_mld_update_last_rx_timestamp(mld, baid);
+
+ if (link_sta && ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = mpdu_desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mld_count_mpdu_rx(link_sta, queue, 1);
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ iwl_mld_low_latency_update_counters(mld, hdr, sta,
+ queue);
+ }
+
+ return sta;
+}
+
+#define KEY_IDX_LEN 2
+
+static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u32 mpdu_status,
+ u32 mpdu_len)
+{
+ struct wireless_dev *wdev;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_vif *mld_vif;
+ u8 keyidx;
+ struct ieee80211_key_conf *key;
+ const u8 *frame = (void *)hdr;
+
+ if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ /* For non-beacon, we don't really care. But beacons may
+ * be filtered out, and we thus need the firmware's replay
+ * detection, otherwise beacons the firmware previously
+ * filtered could be replayed, or something like that, and
+ * it can filter a lot - though usually only if nothing has
+ * changed.
+ */
+ if (!ieee80211_is_beacon(hdr->frame_control))
+ return 0;
+
+ if (!sta)
+ return -1;
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+
+ /* key mismatch - will also report !MIC_OK but we shouldn't count it */
+ if (!(mpdu_status & IWL_RX_MPDU_STATUS_KEY_VALID))
+ goto report;
+
+ /* good cases */
+ if (likely(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK &&
+ !(mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) {
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ }
+
+ /* both keys will have the same cipher and MIC length, use
+ * whichever one is available
+ */
+ key = rcu_dereference(mld_vif->bigtks[0]);
+ if (!key) {
+ key = rcu_dereference(mld_vif->bigtks[1]);
+ if (!key)
+ goto report;
+ }
+
+ if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+ goto report;
+
+ /* get the real key ID */
+ keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+ /* and if that's the other key, look it up */
+ if (keyidx != key->keyidx) {
+ /* shouldn't happen since firmware checked, but be safe
+ * in case the MIC length is wrong too, for example
+ */
+ if (keyidx != 6 && keyidx != 7)
+ return -1;
+
+ key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+ if (!key)
+ goto report;
+ }
+
+ /* Report status to mac80211 */
+ if (!(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK))
+ ieee80211_key_mic_failure(key);
+ else if (mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+ ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mld_sta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr,
+ mpdu_len);
+
+ return -1;
+}
+
+static int iwl_mld_rx_crypto(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ struct iwl_rx_mpdu_desc *desc, int queue,
+ u32 pkt_flags, u8 *crypto_len)
+{
+ u32 status = le32_to_cpu(desc->status);
+
+ if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_protected(hdr->frame_control)))
+ return iwl_mld_rx_mgmt_prot(sta, hdr, rx_status, status,
+ le16_to_cpu(desc->mpdu_len));
+
+ if (!ieee80211_has_protected(hdr->frame_control) ||
+ (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+ IWL_RX_MPDU_STATUS_SEC_NONE)
+ return 0;
+
+ switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+ case IWL_RX_MPDU_STATUS_SEC_CCM:
+ case IWL_RX_MPDU_STATUS_SEC_GCM:
+ BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mld,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
+ return -1;
+ }
+
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
+ *crypto_len = IEEE80211_CCMP_HDR_LEN;
+ return 0;
+ case IWL_RX_MPDU_STATUS_SEC_TKIP:
+ if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+ return -1;
+
+ if (!(status & RX_MPDU_RES_STATUS_MIC_OK))
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (pkt_flags & FH_RSCSR_RADA_EN) {
+ rx_status->flag |= RX_FLAG_ICV_STRIPPED;
+ rx_status->flag |= RX_FLAG_MMIC_STRIPPED;
+ }
+
+ *crypto_len = IEEE80211_TKIP_IV_LEN;
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld,
+ struct iwl_mld_rx_phy_data *phy_data,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool toggle_bit =
+ phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+ /* Toggle is switched whenever new aggregation starts. Make
+ * sure ampdu_reference is never 0 so we can later use it to
+ * see if the frame was really part of an A-MPDU or not.
+ */
+ if (toggle_bit != mld->monitor.ampdu_toggle) {
+ mld->monitor.ampdu_ref++;
+ if (mld->monitor.ampdu_ref == 0)
+ mld->monitor.ampdu_ref++;
+ mld->monitor.ampdu_toggle = toggle_bit;
+ phy_data->first_subframe = true;
+ }
+ rx_status->ampdu_reference = mld->monitor.ampdu_ref;
+}
+
+static void
+iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data,
+ struct iwl_rx_mpdu_desc *mpdu_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ enum nl80211_band band;
+
+ band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx);
+ rx_status->band = iwl_mld_phy_band_to_nl80211(band);
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
+ rx_status->band);
+}
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mld_rx_phy_data phy_data = {};
+ struct iwl_rx_mpdu_desc *mpdu_desc = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ size_t mpdu_desc_size = sizeof(*mpdu_desc);
+ bool drop = false;
+ u8 crypto_len = 0;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u32 mpdu_len;
+ enum iwl_mld_reorder_result reorder_res;
+ struct ieee80211_rx_status *rx_status;
+
+ if (unlikely(mld->fw_status.in_hw_restart))
+ return;
+
+ if (IWL_FW_CHECK(mld, pkt_len < mpdu_desc_size,
+ "Bad REPLY_RX_MPDU_CMD size (%d)\n", pkt_len))
+ return;
+
+ mpdu_len = le16_to_cpu(mpdu_desc->mpdu_len);
+
+ if (IWL_FW_CHECK(mld, mpdu_len + mpdu_desc_size > pkt_len,
+ "FW lied about packet len (%d)\n", pkt_len))
+ return;
+
+ /* Don't use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ hdr = (void *)(pkt->data + mpdu_desc_size);
+
+ iwl_mld_fill_phy_data(mpdu_desc, &phy_data);
+
+ if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+ /* If the device inserted padding it means that (it thought)
+ * the 802.11 header wasn't a multiple of 4 bytes long. In
+ * this case, reserve two bytes at the start of the SKB to
+ * align the payload properly in case we end up copying it.
+ */
+ skb_reserve(skb, 2);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* this is needed early */
+ iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status);
+
+ rcu_read_lock();
+
+ sta = iwl_mld_rx_with_sta(mld, hdr, skb, mpdu_desc, pkt, queue, &drop);
+ if (drop)
+ goto drop;
+
+ /* update aggregation data for monitor sake on default queue */
+ if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU))
+ iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status);
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue);
+
+ if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue,
+ le32_to_cpu(pkt->len_n_flags), &crypto_len))
+ goto drop;
+
+ if (iwl_mld_build_rx_skb(mld, skb, hdr, mpdu_len, crypto_len, rxb))
+ goto drop;
+
+ /* time sync frame is saved and will be released later when the
+ * notification with the timestamps arrives.
+ */
+ if (iwl_mld_time_sync_frame(mld, skb, hdr->addr2))
+ goto out;
+
+ reorder_res = iwl_mld_reorder(mld, napi, queue, sta, skb, mpdu_desc);
+ switch (reorder_res) {
+ case IWL_MLD_PASS_SKB:
+ break;
+ case IWL_MLD_DROP_SKB:
+ goto drop;
+ case IWL_MLD_BUFFERED_SKB:
+ goto out;
+ default:
+ WARN_ON(1);
+ goto drop;
+ }
+
+ iwl_mld_pass_packet_to_mac80211(mld, napi, skb, queue, sta);
+
+ goto out;
+
+drop:
+ kfree_skb(skb);
+out:
+ rcu_read_unlock();
+}
+
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+ enum iwl_mld_internal_rxq_notif_type type,
+ const void *notif_payload, u32 notif_payload_size)
+{
+ u8 num_rx_queues = mld->trans->num_rx_queues;
+ struct {
+ struct iwl_rxq_sync_cmd sync_cmd;
+ struct iwl_mld_internal_rxq_notif notif;
+ } __packed cmd = {
+ .sync_cmd.rxq_mask = cpu_to_le32(BIT(num_rx_queues) - 1),
+ .sync_cmd.count =
+ cpu_to_le32(sizeof(struct iwl_mld_internal_rxq_notif) +
+ notif_payload_size),
+ .notif.type = type,
+ .notif.cookie = mld->rxq_sync.cookie,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ .data[1] = notif_payload,
+ .len[1] = notif_payload_size,
+ };
+ int ret;
+
+ /* size must be a multiple of DWORD */
+ if (WARN_ON(cmd.sync_cmd.count & cpu_to_le32(3)))
+ return;
+
+ mld->rxq_sync.state = (1 << num_rx_queues) - 1;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret) {
+ IWL_ERR(mld, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(mld->rxq_sync.waitq,
+ READ_ONCE(mld->rxq_sync.state) == 0,
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n",
+ mld->rxq_sync.state, mld->rxq_sync.cookie);
+
+out:
+ mld->rxq_sync.state = 0;
+ mld->rxq_sync.cookie++;
+}
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_rxq_sync_notification *notif;
+ struct iwl_mld_internal_rxq_notif *internal_notif;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ size_t combined_notif_len = sizeof(*notif) + sizeof(*internal_notif);
+
+ notif = (void *)pkt->data;
+ internal_notif = (void *)notif->payload;
+
+ if (IWL_FW_CHECK(mld, len < combined_notif_len,
+ "invalid notification size %u (%zu)\n",
+ len, combined_notif_len))
+ return;
+
+ len -= combined_notif_len;
+
+ if (IWL_FW_CHECK(mld, mld->rxq_sync.cookie != internal_notif->cookie,
+ "received expired RX queue sync message (cookie=%d expected=%d q[%d])\n",
+ internal_notif->cookie, mld->rxq_sync.cookie, queue))
+ return;
+
+ switch (internal_notif->type) {
+ case IWL_MLD_RXQ_EMPTY:
+ IWL_FW_CHECK(mld, len,
+ "invalid empty notification size %d\n", len);
+ break;
+ case IWL_MLD_RXQ_NOTIF_DEL_BA:
+ if (IWL_FW_CHECK(mld, len != sizeof(struct iwl_mld_delba_data),
+ "invalid delba notification size %u (%zu)\n",
+ len, sizeof(struct iwl_mld_delba_data)))
+ break;
+ iwl_mld_del_ba(mld, queue, (void *)internal_notif->payload);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ IWL_FW_CHECK(mld, !test_and_clear_bit(queue, &mld->rxq_sync.state),
+ "RXQ sync: queue %d responded a second time!\n", queue);
+
+ if (READ_ONCE(mld->rxq_sync.state) == 0)
+ wake_up(&mld->rxq_sync.waitq);
+}
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue)
+{
+ struct iwl_rx_no_data_ver_3 *desc;
+ struct iwl_mld_rx_phy_data phy_data;
+ struct ieee80211_rx_status *rx_status;
+ struct sk_buff *skb;
+ u32 format, rssi;
+
+ if (unlikely(mld->fw_status.in_hw_restart))
+ return;
+
+ if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc),
+ "Bad RX_NO_DATA_NOTIF size (%d)\n",
+ iwl_rx_packet_payload_len(pkt)))
+ return;
+
+ desc = (void *)pkt->data;
+
+ rssi = le32_to_cpu(desc->rssi);
+ phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK);
+ phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK);
+ phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK);
+ phy_data.data0 = desc->phy_info[0];
+ phy_data.data1 = desc->phy_info[1];
+ phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
+ phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
+ phy_data.rate_n_flags = le32_to_cpu(desc->rate);
+ phy_data.with_data = false;
+
+ BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec));
+ memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec));
+
+ format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ /* Don't use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mld, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* 0-length PSDU */
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
+ case RX_NO_DATA_INFO_TYPE_NDP:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+ break;
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+ case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+ break;
+ default:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+ break;
+ }
+
+ rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+
+ rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel,
+ rx_status->band);
+
+ iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue);
+
+ /* No more radiotap info should be added after this point.
+ * Mark it as mac header for upper layers to know where
+ * the radiotap header ends.
+ */
+ skb_set_mac_header(skb, skb->len);
+
+ /* Override the nss from the rx_vec since the rate_n_flags has
+ * only 1 bit for the nss which gives a max of 2 ss but there
+ * may be up to 8 spatial streams.
+ */
+ switch (format) {
+ case RATE_MCS_VHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_HE_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[0],
+ RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
+ break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
+ }
+
+ /* pass the packet to mac80211 */
+ rcu_read_lock();
+ ieee80211_rx_napi(mld->hw, NULL, skb, napi);
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.h b/drivers/net/wireless/intel/iwlwifi/mld/rx.h
new file mode 100644
index 000000000000..2beabd7e70b1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_rx_h__
+#define __iwl_mld_rx_h__
+
+#include "mld.h"
+
+/**
+ * enum iwl_mld_internal_rxq_notif_type - RX queue sync notif types
+ *
+ * @IWL_MLD_RXQ_EMPTY: empty sync notification
+ * @IWL_MLD_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
+enum iwl_mld_internal_rxq_notif_type {
+ IWL_MLD_RXQ_EMPTY,
+ IWL_MLD_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+ * struct iwl_mld_internal_rxq_notif - @iwl_rxq_sync_cmd internal data.
+ * This data is echoed by the firmware to all RSS queues and should be DWORD
+ * aligned. FW is agnostic to the data, so there are no endianness requirements
+ *
+ * @type: one of &iwl_mld_internal_rxq_notif_type
+ * @cookie: unique internal cookie to identify old notifications
+ * @reserved: reserved for alignment
+ * @payload: data to send to RX queues based on the type (may be empty)
+ */
+struct iwl_mld_internal_rxq_notif {
+ u8 type;
+ u8 reserved[3];
+ u32 cookie;
+ u8 payload[];
+} __packed;
+
+/**
+ * struct iwl_mld_rx_queues_sync - RX queues sync data
+ *
+ * @waitq: wait queue for RX queues sync completion
+ * @cookie: unique id to correlate sync requests with responses
+ * @state: bitmask representing the sync state of RX queues
+ * all RX queues bits are set before sending the command, and the
+ * corresponding queue bit cleared upon handling the notification
+ */
+struct iwl_mld_rx_queues_sync {
+ wait_queue_head_t waitq;
+ u32 cookie;
+ unsigned long state;
+};
+
+void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
+
+void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
+ enum iwl_mld_internal_rxq_notif_type type,
+ const void *notif_payload, u32 notif_payload_size);
+
+void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+
+void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta);
+
+void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi,
+ struct iwl_rx_packet *pkt, int queue);
+
+#endif /* __iwl_mld_agg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
new file mode 100644
index 000000000000..7ec04318ec2f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -0,0 +1,2008 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <linux/crc32.h>
+
+#include "mld.h"
+#include "scan.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "phy.h"
+#include "mlo.h"
+
+#include "fw/api/scan.h"
+#include "fw/dbg.h"
+
+#define IWL_SCAN_DWELL_ACTIVE 10
+#define IWL_SCAN_DWELL_PASSIVE 110
+#define IWL_SCAN_NUM_OF_FRAGS 3
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+
+/* adaptive dwell default high band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+
+/* adaptive dwell default low band APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+
+/* adaptive dwell default APs number for P2P social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+/* adaptive dwell number of APs override for P2P friendly GO channels */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+
+/* adaptive dwell number of APs override for P2P social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+/* adaptive dwell number of APs override mask for p2p friendly GO */
+#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
+
+/* adaptive dwell number of APs override mask for social channels */
+#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
+
+#define SCAN_TIMEOUT_MSEC (30000 * HZ)
+
+/* minimal number of 2GHz and 5GHz channels in the regular scan request */
+#define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
+
+enum iwl_mld_scan_type {
+ IWL_SCAN_TYPE_NOT_SET,
+ IWL_SCAN_TYPE_UNASSOC,
+ IWL_SCAN_TYPE_WILD,
+ IWL_SCAN_TYPE_MILD,
+ IWL_SCAN_TYPE_FRAGMENTED,
+ IWL_SCAN_TYPE_FAST_BALANCE,
+};
+
+struct iwl_mld_scan_timing_params {
+ u32 suspend_time;
+ u32 max_out_time;
+};
+
+static const struct iwl_mld_scan_timing_params scan_timing[] = {
+ [IWL_SCAN_TYPE_UNASSOC] = {
+ .suspend_time = 0,
+ .max_out_time = 0,
+ },
+ [IWL_SCAN_TYPE_WILD] = {
+ .suspend_time = 30,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_MILD] = {
+ .suspend_time = 120,
+ .max_out_time = 120,
+ },
+ [IWL_SCAN_TYPE_FRAGMENTED] = {
+ .suspend_time = 95,
+ .max_out_time = 44,
+ },
+ [IWL_SCAN_TYPE_FAST_BALANCE] = {
+ .suspend_time = 30,
+ .max_out_time = 37,
+ },
+};
+
+struct iwl_mld_scan_params {
+ enum iwl_mld_scan_type type;
+ u32 n_channels;
+ u16 delay;
+ int n_ssids;
+ struct cfg80211_ssid *ssids;
+ struct ieee80211_channel **channels;
+ u32 flags;
+ u8 *mac_addr;
+ u8 *mac_addr_mask;
+ bool no_cck;
+ bool pass_all;
+ int n_match_sets;
+ struct iwl_scan_probe_req preq;
+ struct cfg80211_match_set *match_sets;
+ int n_scan_plans;
+ struct cfg80211_sched_scan_plan *scan_plans;
+ bool iter_notif;
+ bool respect_p2p_go;
+ u8 fw_link_id;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
+ u32 n_6ghz_params;
+ bool scan_6ghz;
+ bool enable_6ghz_passive;
+ u8 bssid[ETH_ALEN] __aligned(2);
+};
+
+struct iwl_mld_scan_respect_p2p_go_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool p2p_go;
+};
+
+static void iwl_mld_scan_respect_p2p_go_iter(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_scan_respect_p2p_go_iter_data *data = _data;
+
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ /* TODO: CDB check the band of the GO */
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
+ iwl_mld_vif_from_mac80211(vif)->ap_ibss_active)
+ data->p2p_go = true;
+}
+
+static bool iwl_mld_get_respect_p2p_go(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ struct iwl_mld_scan_respect_p2p_go_iter_data data = {
+ .current_vif = vif,
+ .p2p_go = false,
+ };
+
+ if (!low_latency)
+ return false;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_respect_p2p_go_iter,
+ &data);
+
+ return data.p2p_go;
+}
+
+struct iwl_mld_scan_iter_data {
+ struct ieee80211_vif *current_vif;
+ bool active_vif;
+ bool is_dcm_with_p2p_go;
+ bool global_low_latency;
+};
+
+static void iwl_mld_scan_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_scan_iter_data *data = _data;
+ struct ieee80211_vif *curr_vif = data->current_vif;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_vif *curr_mld_vif;
+ unsigned long curr_vif_active_links;
+ u16 link_id;
+
+ data->global_low_latency |= iwl_mld_vif_low_latency(mld_vif);
+
+ if ((ieee80211_vif_is_mld(vif) && vif->active_links) ||
+ (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ mld_vif->deflink.active))
+ data->active_vif = true;
+
+ if (vif == curr_vif)
+ return;
+
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_GO)
+ return;
+
+ /* Currently P2P GO can't be AP MLD so the logic below assumes that */
+ WARN_ON_ONCE(ieee80211_vif_is_mld(vif));
+
+ curr_vif_active_links =
+ ieee80211_vif_is_mld(curr_vif) ? curr_vif->active_links : 1;
+
+ curr_mld_vif = iwl_mld_vif_from_mac80211(curr_vif);
+
+ for_each_set_bit(link_id, &curr_vif_active_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct iwl_mld_link *curr_mld_link =
+ iwl_mld_link_dereference_check(curr_mld_vif, link_id);
+
+ if (WARN_ON(!curr_mld_link))
+ return;
+
+ if (rcu_access_pointer(curr_mld_link->chan_ctx) &&
+ rcu_access_pointer(mld_vif->deflink.chan_ctx) !=
+ rcu_access_pointer(curr_mld_link->chan_ctx)) {
+ data->is_dcm_with_p2p_go = true;
+ return;
+ }
+ }
+}
+
+static enum
+iwl_mld_scan_type iwl_mld_get_scan_type(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct iwl_mld_scan_iter_data *data)
+{
+ enum iwl_mld_traffic_load load = mld->scan.traffic_load.status;
+
+ /* A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
+ if (!data->active_vif)
+ return IWL_SCAN_TYPE_UNASSOC;
+
+ if ((load == IWL_MLD_TRAFFIC_HIGH || data->global_low_latency) &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ return IWL_SCAN_TYPE_FRAGMENTED;
+
+ /* In case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ data->is_dcm_with_p2p_go)
+ return IWL_SCAN_TYPE_FAST_BALANCE;
+
+ if (load >= IWL_MLD_TRAFFIC_MEDIUM || data->global_low_latency)
+ return IWL_SCAN_TYPE_MILD;
+
+ return IWL_SCAN_TYPE_WILD;
+}
+
+static u8 *
+iwl_mld_scan_add_2ghz_elems(struct iwl_mld *mld, const u8 *ies,
+ size_t len, u8 *const pos)
+{
+ static const u8 before_ds_params[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ };
+ size_t offs;
+ u8 *newpos = pos;
+
+ offs = ieee80211_ie_split(ies, len,
+ before_ds_params,
+ ARRAY_SIZE(before_ds_params),
+ 0);
+
+ memcpy(newpos, ies, offs);
+ newpos += offs;
+
+ /* Add a placeholder for DS Parameter Set element */
+ *newpos++ = WLAN_EID_DS_PARAMS;
+ *newpos++ = 1;
+ *newpos++ = 0;
+
+ memcpy(newpos, ies + offs, len - offs);
+ newpos += len - offs;
+
+ return newpos;
+}
+
+static void
+iwl_mld_scan_add_tpc_report_elem(u8 *pos)
+{
+ pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+ pos[1] = WFA_TPC_IE_LEN - 2;
+ pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+ pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+ pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+ pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+ pos[6] = 0;
+ /* pos[7] - tx power will be inserted by the FW */
+ pos[7] = 0;
+ pos[8] = 0;
+}
+
+static u32
+iwl_mld_scan_ooc_priority(enum iwl_mld_scan_status scan_status)
+{
+ if (scan_status == IWL_MLD_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (scan_status == IWL_MLD_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
+}
+
+static bool
+iwl_mld_scan_is_regular(struct iwl_mld_scan_params *params)
+{
+ return params->n_scan_plans == 1 &&
+ params->scan_plans[0].iterations == 1;
+}
+
+static bool
+iwl_mld_scan_is_fragmented(enum iwl_mld_scan_type type)
+{
+ return (type == IWL_SCAN_TYPE_FRAGMENTED ||
+ type == IWL_SCAN_TYPE_FAST_BALANCE);
+}
+
+static int
+iwl_mld_scan_uid_by_status(struct iwl_mld *mld, int status)
+{
+ for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++)
+ if (mld->scan.uid_status[i] == status)
+ return i;
+
+ return -ENOENT;
+}
+
+static const char *
+iwl_mld_scan_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+ switch (status) {
+ case IWL_SCAN_EBS_SUCCESS:
+ return "successful";
+ case IWL_SCAN_EBS_INACTIVE:
+ return "inactive";
+ case IWL_SCAN_EBS_FAILED:
+ case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+ default:
+ return "failed";
+ }
+}
+
+static int
+iwl_mld_scan_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+ for (int i = 0; i < PROBE_OPTION_MAX; i++) {
+ if (!ssid_list[i].len)
+ return -1;
+ if (ssid_list[i].len == ssid_len &&
+ !memcmp(ssid_list[i].ssid, ssid, ssid_len))
+ return i;
+ }
+
+ return -1;
+}
+
+static bool
+iwl_mld_scan_fits(struct iwl_mld *mld, int n_ssids,
+ struct ieee80211_scan_ies *ies, int n_channels)
+{
+ return ((n_ssids <= PROBE_OPTION_MAX) &&
+ (n_channels <= mld->fw->ucode_capa.n_scan_channels) &
+ (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
+ ies->len[NL80211_BAND_5GHZ] + ies->len[NL80211_BAND_6GHZ] <=
+ iwl_mld_scan_max_template_size()));
+}
+
+static void
+iwl_mld_scan_build_probe_req(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_scan_ies *ies,
+ struct iwl_mld_scan_params *params)
+{
+ struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+ u8 *pos, *newpos;
+ const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+ params->mac_addr : NULL;
+
+ if (mac_addr)
+ get_random_mask_addr(frame->sa, mac_addr,
+ params->mac_addr_mask);
+ else
+ memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ eth_broadcast_addr(frame->da);
+ ether_addr_copy(frame->bssid, params->bssid);
+ frame->seq_ctrl = 0;
+
+ pos = frame->u.probe_req.variable;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ params->preq.mac_header.offset = 0;
+ params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+ /* Insert DS parameter set element on 2.4 GHz band */
+ newpos = iwl_mld_scan_add_2ghz_elems(mld,
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
+ pos);
+ params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+ pos = newpos;
+
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
+ params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[1].len =
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
+
+ memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
+ ies->len[NL80211_BAND_6GHZ]);
+ params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
+ params->preq.band_data[2].len =
+ cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
+ pos += ies->len[NL80211_BAND_6GHZ];
+
+ memcpy(pos, ies->common_ies, ies->common_ie_len);
+ params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+ iwl_mld_scan_add_tpc_report_elem(pos + ies->common_ie_len);
+ params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+ WFA_TPC_IE_LEN);
+}
+
+static u16
+iwl_mld_scan_get_cmd_gen_flags(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ enum iwl_mld_scan_status scan_status)
+{
+ u16 flags = 0;
+
+ /* If no direct SSIDs are provided perform a passive scan. Otherwise,
+ * if there is a single SSID which is not the broadcast SSID, assume
+ * that the scan is intended for roaming purposes and thus enable Rx on
+ * all chains to improve chances of hearing the beacons/probe responses.
+ */
+ if (params->n_ssids == 0)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
+
+ if (params->pass_all)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ else
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
+
+ if (iwl_mld_scan_is_fragmented(params->type))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
+
+ if (!iwl_mld_scan_is_regular(params))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
+
+ if (params->iter_notif ||
+ mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+
+ if (scan_status == IWL_MLD_SCAN_SCHED ||
+ scan_status == IWL_MLD_SCAN_NETDETECT)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
+
+ if (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
+ NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
+ NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
+
+ if ((scan_status == IWL_MLD_SCAN_SCHED ||
+ scan_status == IWL_MLD_SCAN_NETDETECT) &&
+ params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
+
+ if (params->enable_6ghz_passive)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
+
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ return flags;
+}
+
+static u8
+iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif, u16 gen_flags)
+{
+ u8 flags = 0;
+
+ /* TODO: CDB */
+ if (params->respect_p2p_go)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
+ IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
+
+ if (params->scan_6ghz)
+ flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+
+ return flags;
+}
+
+static void
+iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld,
+ struct iwl_scan_general_params_v11 *gp,
+ struct iwl_mld_scan_params *params)
+{
+ const struct iwl_mld_scan_timing_params *timing =
+ &scan_timing[params->type];
+
+ gp->adwell_default_social_chn =
+ IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ gp->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ gp->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ if (params->n_ssids && params->ssids[0].ssid_len)
+ gp->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ gp->adwell_max_budget =
+ cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ gp->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+
+ gp->max_out_of_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time);
+ gp->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time);
+
+ gp->active_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+ gp->passive_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+ gp->active_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE;
+ gp->passive_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE;
+
+ IWL_DEBUG_SCAN(mld,
+ "Scan: adwell_max_budget=%d max_out_of_time=%d suspend_time=%d\n",
+ gp->adwell_max_budget,
+ gp->max_out_of_time[SCAN_LB_LMAC_IDX],
+ gp->suspend_time[SCAN_LB_LMAC_IDX]);
+}
+
+static void
+iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_general_params_v11 *gp,
+ enum iwl_mld_scan_status scan_status)
+{
+ u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+ scan_status);
+ u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif,
+ gen_flags);
+
+ IWL_DEBUG_SCAN(mld, "General: flags=0x%x, flags2=0x%x\n",
+ gen_flags, gen_flags2);
+
+ gp->flags = cpu_to_le16(gen_flags);
+ gp->flags2 = gen_flags2;
+
+ iwl_mld_scan_cmd_set_dwell(mld, gp, params);
+
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
+
+ if (params->fw_link_id != IWL_MLD_INVALID_FW_ID)
+ gp->scan_start_mac_or_link_id = params->fw_link_id;
+}
+
+static int
+iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params,
+ struct iwl_scan_umac_schedule *schedule,
+ __le16 *delay)
+{
+ if (WARN_ON(!params->n_scan_plans ||
+ params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+ return -EINVAL;
+
+ for (int i = 0; i < params->n_scan_plans; i++) {
+ struct cfg80211_sched_scan_plan *scan_plan =
+ &params->scan_plans[i];
+
+ schedule[i].iter_count = scan_plan->iterations;
+ schedule[i].interval =
+ cpu_to_le16(scan_plan->interval);
+ }
+
+ /* If the number of iterations of the last scan plan is set to zero,
+ * it should run infinitely. However, this is not always the case.
+ * For example, when regular scan is requested the driver sets one scan
+ * plan with one iteration.
+ */
+ if (!schedule[params->n_scan_plans - 1].iter_count)
+ schedule[params->n_scan_plans - 1].iter_count = 0xff;
+
+ *delay = cpu_to_le16(params->delay);
+
+ return 0;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void
+iwl_mld_scan_cmd_build_ssids(struct iwl_mld_scan_params *params,
+ struct iwl_ssid_ie *ssids, u32 *ssid_bitmap)
+{
+ int i, j;
+ int index;
+ u32 tmp_bitmap = 0;
+
+ /* copy SSIDs from match list. iwl_config_sched_scan_profiles()
+ * uses the order of these ssids to config match list.
+ */
+ for (i = 0, j = params->n_match_sets - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ /* skip empty SSID match_sets */
+ if (!params->match_sets[j].ssid.ssid_len)
+ continue;
+
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->match_sets[j].ssid.ssid_len;
+ memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+ ssids[i].len);
+ }
+
+ /* add SSIDs from scan SSID list */
+ for (j = params->n_ssids - 1;
+ j >= 0 && i < PROBE_OPTION_MAX;
+ i++, j--) {
+ index = iwl_mld_scan_ssid_exist(params->ssids[j].ssid,
+ params->ssids[j].ssid_len,
+ ssids);
+ if (index < 0) {
+ ssids[i].id = WLAN_EID_SSID;
+ ssids[i].len = params->ssids[j].ssid_len;
+ memcpy(ssids[i].ssid, params->ssids[j].ssid,
+ ssids[i].len);
+ tmp_bitmap |= BIT(i);
+ } else {
+ tmp_bitmap |= BIT(index);
+ }
+ }
+
+ if (ssid_bitmap)
+ *ssid_bitmap = tmp_bitmap;
+}
+
+static void
+iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp)
+{
+ int j, idex_s = 0, idex_b = 0;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+
+ for (j = 0;
+ j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
+ j++) {
+ if (!params->ssids[j].ssid_len)
+ continue;
+
+ pp->short_ssid[idex_s] =
+ cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
+ params->ssids[j].ssid_len));
+
+ /* hidden 6ghz scan */
+ pp->direct_scan[idex_s].id = WLAN_EID_SSID;
+ pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
+ memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
+ params->ssids[j].ssid_len);
+ idex_s++;
+ }
+
+ /* Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
+ * collocated parameters. This might not be optimal, as this processing
+ * does not (yet) correspond to the actual channels, so it is possible
+ * that some entries would be left out.
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ int k;
+
+ /* First, try to place the short SSID */
+ if (scan_6ghz_params[j].short_ssid_valid) {
+ for (k = 0; k < idex_s; k++) {
+ if (pp->short_ssid[k] ==
+ cpu_to_le32(scan_6ghz_params[j].short_ssid))
+ break;
+ }
+
+ if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
+ pp->short_ssid[idex_s++] =
+ cpu_to_le32(scan_6ghz_params[j].short_ssid);
+ }
+ }
+
+ /* try to place BSSID for the same entry */
+ for (k = 0; k < idex_b; k++) {
+ if (!memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid, ETH_ALEN))
+ break;
+ }
+
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
+ memcpy(&pp->bssid_array[idex_b++],
+ scan_6ghz_params[j].bssid, ETH_ALEN);
+ }
+ }
+
+ pp->short_ssid_num = idex_s;
+ pp->bssid_num = idex_b;
+}
+
+static void
+iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params,
+ struct iwl_scan_probe_params_v4 *pp,
+ u32 *bitmap_ssid)
+{
+ pp->preq = params->preq;
+
+ if (params->scan_6ghz) {
+ iwl_mld_scan_fill_6g_chan_list(params, pp);
+ return;
+ }
+
+ /* relevant only for 2.4 GHz /5 GHz scan */
+ iwl_mld_scan_cmd_build_ssids(params, pp->direct_scan, bitmap_ssid);
+}
+
+static bool
+iwl_mld_scan_use_ebs(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ const struct iwl_ucode_capabilities *capa = &mld->fw->ucode_capa;
+
+ /* We can only use EBS if:
+ * 1. the feature is supported.
+ * 2. the last EBS was successful.
+ * 3. it's not a p2p find operation.
+ * 4. we are not in low latency mode,
+ * or if fragmented ebs is supported by the FW
+ * 5. the VIF is not an AP interface (scan wants survey results)
+ */
+ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+ !mld->scan.last_ebs_failed &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ (!low_latency || fw_has_api(capa, IWL_UCODE_TLV_API_FRAG_EBS)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
+}
+
+static u8
+iwl_mld_scan_cmd_set_chan_flags(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ bool low_latency)
+{
+ u8 flags = 0;
+
+ flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ if (iwl_mld_scan_use_ebs(mld, vif, low_latency))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+ /* set fragmented ebs for fragmented scan */
+ if (iwl_mld_scan_is_fragmented(params->type))
+ flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
+
+ /* Force EBS in case the scan is a fragmented and there is a need
+ * to take P2P GO operation into consideration during scan operation.
+ */
+ /* TODO: CDB */
+ if (iwl_mld_scan_is_fragmented(params->type) &&
+ params->respect_p2p_go) {
+ IWL_DEBUG_SCAN(mld, "Respect P2P GO. Force EBS\n");
+ flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
+ }
+
+ return flags;
+}
+
+static const u8 p2p_go_friendly_chs[] = {
+ 36, 40, 44, 48, 149, 153, 157, 161, 165,
+};
+
+static const u8 social_chs[] = {
+ 1, 6, 11
+};
+
+static u32 iwl_mld_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
+{
+ if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
+ return 0;
+
+ for (int i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
+ if (ch_id == p2p_go_friendly_chs[i])
+ return IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(social_chs); i++) {
+ if (ch_id == social_chs[i])
+ return IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
+ }
+
+ return 0;
+}
+
+static void
+iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld,
+ struct ieee80211_channel **channels,
+ struct iwl_scan_channel_params_v7 *cp,
+ int n_channels, u32 flags,
+ enum nl80211_iftype vif_type)
+{
+ for (int i = 0; i < n_channels; i++) {
+ enum nl80211_band band = channels[i]->band;
+ struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
+ u8 iwl_band = iwl_mld_nl80211_band_to_fw(band);
+ u32 n_aps_flag =
+ iwl_mld_scan_ch_n_aps_flag(vif_type,
+ channels[i]->hw_value);
+
+ if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+ n_aps_flag = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
+
+ cfg->flags = cpu_to_le32(flags | n_aps_flag);
+ cfg->channel_num = channels[i]->hw_value;
+ if (cfg80211_channel_is_psc(channels[i]))
+ cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
+ cfg->v2.iter_count = 1;
+ cfg->v2.iter_interval = 0;
+ cfg->flags |= cpu_to_le32(iwl_band <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+ }
+}
+
+static u8
+iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ u32 n_channels,
+ struct iwl_scan_probe_params_v4 *pp,
+ struct iwl_scan_channel_params_v7 *cp,
+ enum nl80211_iftype vif_type)
+{
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ params->scan_6ghz_params;
+ u32 i;
+ u8 ch_cnt;
+
+ for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[ch_cnt];
+
+ u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
+ u8 k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
+ unsolicited_probe_on_chan = false, psc_no_listen = false;
+ s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+
+ /* Avoid performing passive scan on non PSC channels unless the
+ * scan is specifically a passive scan, i.e., no SSIDs
+ * configured in the scan command.
+ */
+ if (!cfg80211_channel_is_psc(params->channels[i]) &&
+ !params->n_6ghz_params && params->n_ssids)
+ continue;
+
+ cfg->channel_num = params->channels[i]->hw_value;
+ cfg->flags |=
+ cpu_to_le32(PHY_BAND_6 << IWL_CHAN_CFG_FLAGS_BAND_POS);
+
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ s8 tmp_psd_20;
+
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
+ /* Use the highest PSD value allowed as advertised by
+ * APs for this channel
+ */
+ tmp_psd_20 = scan_6ghz_params[j].psd_20;
+ if (tmp_psd_20 !=
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
+ (psd_20 ==
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
+ psd_20 < tmp_psd_20))
+ psd_20 = tmp_psd_20;
+
+ psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
+
+ /* In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 2. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mld_scan_is_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ psc_no_listen) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /* To optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ */
+ for (u32 j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
+
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
+ if (!scan_6ghz_params[j].unsolicited_probe &&
+ le32_to_cpu(pp->short_ssid[k]) ==
+ scan_6ghz_params[j].short_ssid) {
+ /* Relevant short SSID bit set */
+ if (s_ssid_bitmap & BIT(k)) {
+ found = true;
+ break;
+ }
+
+ /* Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
+ */
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
+ break;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
+ allow_passive = false;
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ for (k = 0; k < pp->bssid_num; k++) {
+ if (memcmp(&pp->bssid_array[k],
+ scan_6ghz_params[j].bssid,
+ ETH_ALEN))
+ continue;
+
+ if (bssid_bitmap & BIT(k))
+ break;
+
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
+
+ break;
+ }
+ }
+
+ if (cfg80211_channel_is_psc(params->channels[i]) &&
+ psc_no_listen)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
+
+ if (unsolicited_probe_on_chan)
+ flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
+
+ if ((allow_passive && force_passive) ||
+ (!(bssid_bitmap | s_ssid_bitmap) &&
+ !cfg80211_channel_is_psc(params->channels[i])))
+ flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
+ else
+ flags |= bssid_bitmap | (s_ssid_bitmap << 16);
+
+ cfg->flags |= cpu_to_le32(flags);
+ cfg->v5.psd_20 = psd_20;
+
+ ch_cnt++;
+ }
+
+ if (params->n_channels > ch_cnt)
+ IWL_DEBUG_SCAN(mld,
+ "6GHz: reducing number channels: (%u->%u)\n",
+ params->n_channels, ch_cnt);
+
+ return ch_cnt;
+}
+
+static int
+iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_req_params_v17 *scan_p,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params;
+ struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params;
+
+ chan_p->flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
+ scan_status);
+ chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params,
+ params->n_channels,
+ probe_p, chan_p,
+ vif->type);
+ if (!chan_p->count)
+ return -EINVAL;
+
+ if (!params->n_ssids ||
+ (params->n_ssids == 1 && !params->ssids[0].ssid_len))
+ chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif,
+ struct iwl_scan_req_params_v17 *scan_p,
+ bool low_latency,
+ enum iwl_mld_scan_status scan_status,
+ u32 channel_cfg_flags)
+{
+ struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
+ struct ieee80211_supported_band *sband =
+ &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+
+ cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+
+ if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE)
+ cp->n_aps_override[0] = IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE;
+
+ if (params->scan_6ghz)
+ return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params,
+ vif, scan_p,
+ scan_status);
+
+ /* relevant only for 2.4 GHz/5 GHz scan */
+ cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif,
+ low_latency);
+ cp->count = params->n_channels;
+
+ iwl_mld_scan_cmd_set_channels(mld, params->channels, cp,
+ params->n_channels, channel_cfg_flags,
+ vif->type);
+
+ if (!params->enable_6ghz_passive)
+ return 0;
+
+ /* fill 6 GHz passive scan cfg */
+ for (int i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *channel =
+ &sband->channels[i];
+ struct iwl_scan_channel_cfg_umac *cfg =
+ &cp->channel_config[cp->count];
+
+ if (!cfg80211_channel_is_psc(channel))
+ continue;
+
+ cfg->channel_num = channel->hw_value;
+ cfg->v5.iter_count = 1;
+ cfg->v5.iter_interval = 0;
+ cfg->v5.psd_20 =
+ IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
+ cfg->flags = cpu_to_le32(PHY_BAND_6 <<
+ IWL_CHAN_CFG_FLAGS_BAND_POS);
+ cp->count++;
+ }
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct iwl_mld_scan_params *params,
+ enum iwl_mld_scan_status scan_status,
+ bool low_latency)
+{
+ struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd;
+ struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
+ u32 bitmap_ssid = 0;
+ int uid, ret;
+
+ memset(mld->scan.cmd, 0, mld->scan.cmd_size);
+
+ /* find a free UID entry */
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE);
+ if (uid < 0)
+ return uid;
+
+ cmd->uid = cpu_to_le32(uid);
+ cmd->ooc_priority =
+ cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status));
+
+ iwl_mld_scan_cmd_set_gen_params(mld, params, vif,
+ &scan_p->general_params, scan_status);
+
+ ret = iwl_mld_scan_cmd_set_sched_params(params,
+ scan_p->periodic_params.schedule,
+ &scan_p->periodic_params.delay);
+ if (ret)
+ return ret;
+
+ iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params,
+ &bitmap_ssid);
+
+ ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p,
+ low_latency, scan_status,
+ bitmap_ssid);
+ if (ret)
+ return ret;
+
+ return uid;
+}
+
+static bool
+iwl_mld_scan_pass_all(struct iwl_mld *mld,
+ struct cfg80211_sched_scan_request *req)
+{
+ if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+ IWL_DEBUG_SCAN(mld,
+ "Sending scheduled scan with filtering, n_match_sets %d\n",
+ req->n_match_sets);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ return false;
+ }
+
+ IWL_DEBUG_SCAN(mld, "Sending Scheduled scan without filtering\n");
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+
+ return true;
+}
+
+static int
+iwl_mld_config_sched_scan_profiles(struct iwl_mld *mld,
+ struct cfg80211_sched_scan_request *req)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ struct iwl_scan_offload_profile *profile;
+ struct iwl_scan_offload_profile_cfg_data *cfg_data;
+ struct iwl_scan_offload_profile_cfg *profile_cfg;
+ struct iwl_scan_offload_blocklist *blocklist;
+ u32 blocklist_size = IWL_SCAN_MAX_BLACKLIST_LEN * sizeof(*blocklist);
+ u32 cmd_size = blocklist_size + sizeof(*profile_cfg);
+ u8 *cmd;
+ int ret;
+
+ if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES_V2))
+ return -EIO;
+
+ cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ hcmd.data[0] = cmd;
+ hcmd.len[0] = cmd_size;
+
+ blocklist = (struct iwl_scan_offload_blocklist *)cmd;
+ profile_cfg = (struct iwl_scan_offload_profile_cfg *)(cmd + blocklist_size);
+
+ /* No blocklist configuration */
+ cfg_data = &profile_cfg->data;
+ cfg_data->num_profiles = req->n_match_sets;
+ cfg_data->active_clients = SCAN_CLIENT_SCHED_SCAN;
+ cfg_data->pass_match = SCAN_CLIENT_SCHED_SCAN;
+ cfg_data->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ cfg_data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+ for (int i = 0; i < req->n_match_sets; i++) {
+ profile = &profile_cfg->profiles[i];
+
+ /* Support any cipher and auth algorithm */
+ profile->unicast_cipher = 0xff;
+ profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
+ IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK |
+ IWL_AUTH_ALGO_8021X | IWL_AUTH_ALGO_SAE |
+ IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
+ profile->network_type = IWL_NETWORK_TYPE_ANY;
+ profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+ profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+ profile->ssid_index = i;
+ }
+
+ IWL_DEBUG_SCAN(mld,
+ "Sending scheduled scan profile config (n_match_sets=%u)\n",
+ req->n_match_sets);
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int
+iwl_mld_sched_scan_handle_non_psc_channels(struct iwl_mld_scan_params *params,
+ bool *non_psc_included)
+{
+ int i, j;
+
+ *non_psc_included = false;
+ /* for 6 GHZ band only PSC channels need to be added */
+ for (i = 0; i < params->n_channels; i++) {
+ struct ieee80211_channel *channel = params->channels[i];
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(channel)) {
+ *non_psc_included = true;
+ break;
+ }
+ }
+
+ if (!*non_psc_included)
+ return 0;
+
+ params->channels =
+ kmemdup(params->channels,
+ sizeof(params->channels[0]) * params->n_channels,
+ GFP_KERNEL);
+ if (!params->channels)
+ return -ENOMEM;
+
+ for (i = j = 0; i < params->n_channels; i++) {
+ if (params->channels[i]->band == NL80211_BAND_6GHZ &&
+ !cfg80211_channel_is_psc(params->channels[i]))
+ continue;
+ params->channels[j++] = params->channels[i];
+ }
+
+ params->n_channels = j;
+
+ return 0;
+}
+
+static void
+iwl_mld_scan_6ghz_passive_scan(struct iwl_mld *mld,
+ struct iwl_mld_scan_params *params,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband =
+ &mld->nvm_data->bands[NL80211_BAND_6GHZ];
+ u32 n_disabled, i;
+
+ params->enable_6ghz_passive = false;
+
+ /* 6 GHz passive scan may be enabled in the first 2.4 GHz/5 GHz scan
+ * phase to discover geo location if no AP's are found. Skip it when
+ * we're in the 6 GHz scan phase.
+ */
+ if (params->scan_6ghz)
+ return;
+
+ /* 6 GHz passive scan allowed only on station interface */
+ if (vif->type != NL80211_IFTYPE_STATION) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: not station interface\n");
+ return;
+ }
+
+ /* 6 GHz passive scan is allowed in a defined time interval following
+ * HW reset or resume flow, or while not associated and a large
+ * interval has passed since the last 6 GHz passive scan.
+ */
+ if ((vif->cfg.assoc ||
+ time_after(mld->scan.last_6ghz_passive_jiffies +
+ (IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
+ (time_before(mld->scan.last_start_time_jiffies +
+ (IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
+ jiffies))) {
+ IWL_DEBUG_SCAN(mld, "6GHz passive scan: %s\n",
+ vif->cfg.assoc ? "associated" :
+ "timeout did not expire");
+ return;
+ }
+
+ /* not enough channels in the regular scan request */
+ if (params->n_channels < IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: not enough channels %d\n",
+ params->n_channels);
+ return;
+ }
+
+ for (i = 0; i < params->n_ssids; i++) {
+ if (!params->ssids[i].ssid_len)
+ break;
+ }
+
+ /* not a wildcard scan, so cannot enable passive 6 GHz scan */
+ if (i == params->n_ssids) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: no wildcard SSID\n");
+ return;
+ }
+
+ if (!sband || !sband->n_channels) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: no 6GHz channels\n");
+ return;
+ }
+
+ for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
+ if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
+ n_disabled++;
+ }
+
+ /* Not all the 6 GHz channels are disabled, so no need for 6 GHz
+ * passive scan
+ */
+ if (n_disabled != sband->n_channels) {
+ IWL_DEBUG_SCAN(mld,
+ "6GHz passive scan: 6GHz channels enabled\n");
+ return;
+ }
+
+ /* all conditions to enable 6 GHz passive scan are satisfied */
+ IWL_DEBUG_SCAN(mld, "6GHz passive scan: can be enabled\n");
+ params->enable_6ghz_passive = true;
+}
+
+static void
+iwl_mld_scan_set_link_id(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct iwl_mld_scan_params *params,
+ s8 tsf_report_link_id,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ if (tsf_report_link_id < 0) {
+ if (vif->active_links)
+ tsf_report_link_id = __ffs(vif->active_links);
+ else
+ tsf_report_link_id = 0;
+ }
+
+ link = iwl_mld_link_dereference_check(mld_vif, tsf_report_link_id);
+ if (!WARN_ON(!link)) {
+ params->fw_link_id = link->fw_id;
+ /* we to store fw_link_id only for regular scan,
+ * and use it in scan complete notif
+ */
+ if (scan_status == IWL_MLD_SCAN_REGULAR)
+ mld->scan.fw_link_id = link->fw_id;
+ } else {
+ mld->scan.fw_link_id = IWL_MLD_INVALID_FW_ID;
+ params->fw_link_id = IWL_MLD_INVALID_FW_ID;
+ }
+}
+
+static int
+_iwl_mld_single_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ enum iwl_mld_scan_status scan_status)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+ .len = { mld->scan.cmd_size, },
+ .data = { mld->scan.cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mld_scan_iter_data scan_iter_data = {
+ .current_vif = vif,
+ };
+ struct cfg80211_sched_scan_plan scan_plan = {.iterations = 1};
+ struct iwl_mld_scan_params params = {};
+ int ret, uid;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mld->scan.cmd))
+ return -ENOMEM;
+
+ if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, req->n_channels))
+ return -ENOBUFS;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_iterator,
+ &scan_iter_data);
+
+ params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+ params.n_ssids = req->n_ssids;
+ params.flags = req->flags;
+ params.n_channels = req->n_channels;
+ params.delay = 0;
+ params.ssids = req->ssids;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = req->no_cck;
+ params.pass_all = true;
+ params.n_match_sets = 0;
+ params.match_sets = NULL;
+ params.scan_plans = &scan_plan;
+ params.n_scan_plans = 1;
+
+ params.n_6ghz_params = req->n_6ghz_params;
+ params.scan_6ghz_params = req->scan_6ghz_params;
+ params.scan_6ghz = req->scan_6ghz;
+
+ ether_addr_copy(params.bssid, req->bssid);
+ /* TODO: CDB - per-band flag */
+ params.respect_p2p_go =
+ iwl_mld_get_respect_p2p_go(mld, vif,
+ scan_iter_data.global_low_latency);
+
+ if (req->duration)
+ params.iter_notif = true;
+
+ iwl_mld_scan_set_link_id(mld, vif, &params, req->tsf_report_link_id,
+ scan_status);
+
+ iwl_mld_scan_build_probe_req(mld, vif, ies, &params);
+
+ iwl_mld_scan_6ghz_passive_scan(mld, &params, vif);
+
+ uid = iwl_mld_scan_build_cmd(mld, vif, &params, scan_status,
+ scan_iter_data.global_low_latency);
+ if (uid < 0)
+ return uid;
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (ret) {
+ IWL_ERR(mld, "Scan failed! ret %d\n", ret);
+ return ret;
+ }
+
+ IWL_DEBUG_SCAN(mld, "Scan request send success: status=%u, uid=%u\n",
+ scan_status, uid);
+
+ mld->scan.uid_status[uid] = scan_status;
+ mld->scan.status |= scan_status;
+
+ if (params.enable_6ghz_passive)
+ mld->scan.last_6ghz_passive_jiffies = jiffies;
+
+ return 0;
+}
+
+static int
+iwl_mld_scan_send_abort_cmd_status(struct iwl_mld *mld, int uid, u32 *status)
+{
+ struct iwl_umac_scan_abort abort_cmd = {
+ .uid = cpu_to_le32(uid),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_ABORT_UMAC),
+ .flags = CMD_WANT_SKB,
+ .data = { &abort_cmd },
+ .len[0] = sizeof(abort_cmd),
+ };
+ struct iwl_rx_packet *pkt;
+ struct iwl_cmd_response *resp;
+ u32 resp_len;
+ int ret;
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = cmd.resp_pkt;
+
+ resp_len = iwl_rx_packet_payload_len(pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
+ "Scan Abort: unexpected response length %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32_to_cpu(resp->status);
+
+out:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int
+iwl_mld_scan_abort(struct iwl_mld *mld, int type, int uid, bool *wait)
+{
+ enum iwl_umac_scan_abort_status status;
+ int ret;
+
+ *wait = true;
+
+ IWL_DEBUG_SCAN(mld, "Sending scan abort, uid %u\n", uid);
+
+ ret = iwl_mld_scan_send_abort_cmd_status(mld, uid, &status);
+
+ IWL_DEBUG_SCAN(mld, "Scan abort: ret=%d status=%u\n", ret, status);
+
+ /* We don't need to wait to scan complete in the following cases:
+ * 1. Driver failed to send the scan abort cmd.
+ * 2. The FW is no longer familiar with the scan that needs to be
+ * stopped. It is expected that the scan complete notification was
+ * already received but not yet processed.
+ *
+ * In both cases the flow should continue similar to the case that the
+ * scan was really aborted.
+ */
+ if (ret || status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND)
+ *wait = false;
+
+ return ret;
+}
+
+static int
+iwl_mld_scan_stop_wait(struct iwl_mld *mld, int type, int uid)
+{
+ struct iwl_notification_wait wait_scan_done;
+ static const u16 scan_comp_notif[] = { SCAN_COMPLETE_UMAC };
+ bool wait = true;
+ int ret;
+
+ iwl_init_notification_wait(&mld->notif_wait, &wait_scan_done,
+ scan_comp_notif,
+ ARRAY_SIZE(scan_comp_notif),
+ NULL, NULL);
+
+ IWL_DEBUG_SCAN(mld, "Preparing to stop scan, type=%x\n", type);
+
+ ret = iwl_mld_scan_abort(mld, type, uid, &wait);
+ if (ret) {
+ IWL_DEBUG_SCAN(mld, "couldn't stop scan type=%d\n", type);
+ goto return_no_wait;
+ }
+
+ if (!wait) {
+ IWL_DEBUG_SCAN(mld, "no need to wait for scan type=%d\n", type);
+ goto return_no_wait;
+ }
+
+ return iwl_wait_notification(&mld->notif_wait, &wait_scan_done, HZ);
+
+return_no_wait:
+ iwl_remove_notification(&mld->notif_wait, &wait_scan_done);
+ return ret;
+}
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
+{
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC),
+ .len = { mld->scan.cmd_size, },
+ .data = { mld->scan.cmd, },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ };
+ struct iwl_mld_scan_params params = {};
+ struct iwl_mld_scan_iter_data scan_iter_data = {
+ .current_vif = vif,
+ };
+ bool non_psc_included = false;
+ int ret, uid;
+
+ /* we should have failed registration if scan_cmd was NULL */
+ if (WARN_ON(!mld->scan.cmd))
+ return -ENOMEM;
+
+ /* FW supports only a single periodic scan */
+ if (mld->scan.status & (IWL_MLD_SCAN_SCHED | IWL_MLD_SCAN_NETDETECT))
+ return -EBUSY;
+
+ ieee80211_iterate_active_interfaces_mtx(mld->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mld_scan_iterator,
+ &scan_iter_data);
+
+ params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data);
+ params.flags = req->flags;
+ params.n_ssids = req->n_ssids;
+ params.ssids = req->ssids;
+ params.n_channels = req->n_channels;
+ params.channels = req->channels;
+ params.mac_addr = req->mac_addr;
+ params.mac_addr_mask = req->mac_addr_mask;
+ params.no_cck = false;
+ params.pass_all = iwl_mld_scan_pass_all(mld, req);
+ params.n_match_sets = req->n_match_sets;
+ params.match_sets = req->match_sets;
+ params.n_scan_plans = req->n_scan_plans;
+ params.scan_plans = req->scan_plans;
+ /* TODO: CDB - per-band flag */
+ params.respect_p2p_go =
+ iwl_mld_get_respect_p2p_go(mld, vif,
+ scan_iter_data.global_low_latency);
+
+ /* UMAC scan supports up to 16-bit delays, trim it down to 16-bits */
+ params.delay = req->delay > U16_MAX ? U16_MAX : req->delay;
+
+ eth_broadcast_addr(params.bssid);
+
+ ret = iwl_mld_config_sched_scan_profiles(mld, req);
+ if (ret)
+ return ret;
+
+ iwl_mld_scan_build_probe_req(mld, vif, ies, &params);
+
+ ret = iwl_mld_sched_scan_handle_non_psc_channels(&params,
+ &non_psc_included);
+ if (ret)
+ goto out;
+
+ if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ uid = iwl_mld_scan_build_cmd(mld, vif, &params, type,
+ scan_iter_data.global_low_latency);
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
+
+ ret = iwl_mld_send_cmd(mld, &hcmd);
+ if (!ret) {
+ IWL_DEBUG_SCAN(mld,
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mld->scan.uid_status[uid] = type;
+ mld->scan.status |= type;
+ } else {
+ IWL_ERR(mld, "Sched scan failed! ret %d\n", ret);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ }
+
+out:
+ if (non_psc_included)
+ kfree(params.channels);
+ return ret;
+}
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify)
+{
+ int uid, ret;
+
+ IWL_DEBUG_SCAN(mld,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mld->scan.status);
+
+ if (!(mld->scan.status & type))
+ return 0;
+
+ uid = iwl_mld_scan_uid_by_status(mld, type);
+ /* must be valid, we just checked it's running */
+ if (WARN_ON_ONCE(uid < 0))
+ return uid;
+
+ ret = iwl_mld_scan_stop_wait(mld, type, uid);
+ if (ret)
+ IWL_DEBUG_SCAN(mld, "Failed to stop scan\n");
+
+ /* Clear the scan status so the next scan requests will
+ * succeed and mark the scan as stopping, so that the Rx
+ * handler doesn't do anything, as the scan was stopped from
+ * above. Also remove the handler to not notify mac80211
+ * erroneously after a new scan starts, for example.
+ */
+ mld->scan.status &= ~type;
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_SCAN,
+ uid);
+
+ if (type == IWL_MLD_SCAN_REGULAR) {
+ if (notify) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mld->hw, &info);
+ }
+ } else if (notify) {
+ ieee80211_sched_scan_stopped(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ }
+
+ return ret;
+}
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mld_single_scan_start(mld, vif, req, ies,
+ IWL_MLD_SCAN_REGULAR);
+}
+
+static void iwl_mld_int_mlo_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req __free(kfree) = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size;
+ int ret;
+
+ IWL_DEBUG_SCAN(mld, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return;
+
+ /* set the requested channels */
+ for (int i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (int i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mld->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mld->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mld->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mld_single_scan_start(mld, vif, req, &ies,
+ IWL_MLD_SCAN_INT_MLO);
+
+ if (!ret)
+ mld->scan.last_mlo_scan_time = ktime_get_boottime_ns();
+
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan: ret=%d\n", ret);
+}
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
+ hweight16(vif->valid_links) == 1)
+ return;
+
+ if (mld->scan.status & IWL_MLD_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan is already running\n");
+ return;
+ }
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_check(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ if (!n_channels)
+ return;
+
+ iwl_mld_int_mlo_scan_start(mld, vif, channels, n_channels);
+}
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+ u32 uid = __le32_to_cpu(notif->uid);
+
+ if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+ "FW reports out-of-range scan UID %d\n", uid))
+ return;
+
+ if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR)
+ mld->scan.start_tsf = le64_to_cpu(notif->start_tsf);
+
+ IWL_DEBUG_SCAN(mld,
+ "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+ notif->status, notif->scanned_channels);
+
+ if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_FOUND) {
+ IWL_DEBUG_SCAN(mld, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED;
+ }
+
+ IWL_DEBUG_SCAN(mld,
+ "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+ le64_to_cpu(notif->start_tsf));
+}
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ IWL_DEBUG_SCAN(mld, "Scheduled scan results\n");
+ ieee80211_sched_scan_results(mld->hw);
+}
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+ bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ u32 uid = __le32_to_cpu(notif->uid);
+
+ if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status),
+ "FW reports out-of-range scan UID %d\n", uid))
+ return;
+
+ IWL_DEBUG_SCAN(mld,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mld->scan.uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mld_scan_ebs_status_str(notif->ebs_status));
+ IWL_DEBUG_SCAN(mld, "Scan completed: scan_status=0x%x\n",
+ mld->scan.status);
+ IWL_DEBUG_SCAN(mld,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
+ if (IWL_FW_CHECK(mld, !(mld->scan.uid_status[uid] & mld->scan.status),
+ "FW reports scan UID %d we didn't trigger\n", uid))
+ return;
+
+ /* if the scan is already stopping, we don't need to notify mac80211 */
+ if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR) {
+ struct cfg80211_scan_info info = {
+ .aborted = aborted,
+ .scan_start_tsf = mld->scan.start_tsf,
+ };
+ int fw_link_id = mld->scan.fw_link_id;
+ struct ieee80211_bss_conf *link_conf = NULL;
+
+ if (fw_link_id != IWL_MLD_INVALID_FW_ID)
+ link_conf =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_link_id]);
+
+ /* It is possible that by the time the scan is complete the
+ * link was already removed and is not valid.
+ */
+ if (link_conf)
+ ether_addr_copy(info.tsf_bssid, link_conf->bssid);
+ else
+ IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n");
+
+ ieee80211_scan_completed(mld->hw, &info);
+ } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) {
+ ieee80211_sched_scan_stopped(mld->hw);
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan completed\n");
+
+ /*
+ * We limit link selection to internal MLO scans as otherwise
+ * we do not know whether all channels were covered.
+ */
+ iwl_mld_select_links(mld);
+ }
+
+ mld->scan.status &= ~mld->scan.uid_status[uid];
+
+ IWL_DEBUG_SCAN(mld, "Scan completed: after update: scan_status=0x%x\n",
+ mld->scan.status);
+
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+ if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+ notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+ mld->scan.last_ebs_failed = true;
+}
+
+/* This function is used in nic restart flow, to inform mac80211 about scans
+ * that were aborted by restart flow or by an assert.
+ */
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld)
+{
+ int uid;
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_REGULAR);
+ if (uid >= 0) {
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ ieee80211_scan_completed(mld->hw, &info);
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ }
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_SCHED);
+ if (uid >= 0) {
+ mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED;
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+
+ /* sched scan will be restarted by mac80211 in reconfig.
+ * report to mac80211 that sched scan stopped only if we won't
+ * restart the firmware.
+ */
+ if (!iwlwifi_mod_params.fw_restart)
+ ieee80211_sched_scan_stopped(mld->hw);
+ }
+
+ uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mld, "Internal MLO scan aborted\n");
+ mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE;
+ }
+
+ BUILD_BUG_ON(IWL_MLD_SCAN_NONE != 0);
+ memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status));
+}
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
+{
+ u8 scan_cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
+ size_t scan_cmd_size;
+
+ if (scan_cmd_ver == 17) {
+ scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+ } else {
+ IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver);
+ return -EINVAL;
+ }
+
+ mld->scan.cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
+ if (!mld->scan.cmd)
+ return -ENOMEM;
+
+ mld->scan.cmd_size = scan_cmd_size;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.h b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
new file mode 100644
index 000000000000..3ae940d55065
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifndef __iwl_mld_scan_h__
+#define __iwl_mld_scan_h__
+
+int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld);
+
+int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies);
+
+void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif);
+
+void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify);
+
+int iwl_mld_sched_scan_start(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct cfg80211_sched_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type);
+
+void iwl_mld_handle_match_found_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#define WFA_TPC_IE_LEN 9
+
+static inline int iwl_mld_scan_max_template_size(void)
+{
+#define MAC_HDR_LEN 24
+#define DS_IE_LEN 3
+#define SSID_IE_LEN 2
+
+/* driver create the 802.11 header, WFA TPC IE, DS parameter and SSID IE */
+#define DRIVER_TOTAL_IES_LEN \
+ (MAC_HDR_LEN + WFA_TPC_IE_LEN + DS_IE_LEN + SSID_IE_LEN)
+
+ BUILD_BUG_ON(SCAN_OFFLOAD_PROBE_REQ_SIZE < DRIVER_TOTAL_IES_LEN);
+
+ return SCAN_OFFLOAD_PROBE_REQ_SIZE - DRIVER_TOTAL_IES_LEN;
+}
+
+void iwl_mld_report_scan_aborted(struct iwl_mld *mld);
+
+enum iwl_mld_scan_status {
+ IWL_MLD_SCAN_NONE = 0,
+ IWL_MLD_SCAN_REGULAR = BIT(0),
+ IWL_MLD_SCAN_SCHED = BIT(1),
+ IWL_MLD_SCAN_NETDETECT = BIT(2),
+ IWL_MLD_SCAN_INT_MLO = BIT(3),
+};
+
+/* enum iwl_mld_pass_all_sched_results_states - Defines the states for
+ * handling/passing scheduled scan results to mac80211
+ * @SCHED_SCAN_PASS_ALL_STATE_DISABLED: Don't pass all scan results, only when
+ * a match found.
+ * @SCHED_SCAN_PASS_ALL_STATE_ENABLED: Pass all scan results is enabled
+ * (no filtering).
+ * @SCHED_SCAN_PASS_ALL_STATE_FOUND: A scan result is found, pass it on the
+ * next scan iteration complete notification.
+ */
+enum iwl_mld_pass_all_sched_results_states {
+ SCHED_SCAN_PASS_ALL_STATE_DISABLED,
+ SCHED_SCAN_PASS_ALL_STATE_ENABLED,
+ SCHED_SCAN_PASS_ALL_STATE_FOUND,
+};
+
+/**
+ * enum iwl_mld_traffic_load - Levels of traffic load
+ *
+ * @IWL_MLD_TRAFFIC_LOW: low traffic load
+ * @IWL_MLD_TRAFFIC_MEDIUM: medium traffic load
+ * @IWL_MLD_TRAFFIC_HIGH: high traffic load
+ */
+enum iwl_mld_traffic_load {
+ IWL_MLD_TRAFFIC_LOW,
+ IWL_MLD_TRAFFIC_MEDIUM,
+ IWL_MLD_TRAFFIC_HIGH,
+};
+
+/**
+ * struct iwl_mld_scan - Scan data
+ * @status: scan status, a combination of %enum iwl_mld_scan_status,
+ * reflects the %scan.uid_status array.
+ * @uid_status: array to track the scan status per uid.
+ * @start_tsf: start time of last scan in TSF of the link that requested
+ * the scan.
+ * @last_ebs_failed: true if the last EBS (Energy Based Scan) failed.
+ * @pass_all_sched_res: see %enum iwl_mld_pass_all_sched_results_states.
+ * @fw_link_id: the current (regular) scan fw link id, used by scan
+ * complete notif.
+ * @traffic_load: traffic load related data
+ * @traffic_load.last_stats_ts_usec: The timestamp of the last statistics
+ * notification, used to calculate the elapsed time between two
+ * notifications and determine the traffic load
+ * @traffic_load.status: The current traffic load status, see
+ * &enum iwl_mld_traffic_load
+ * @cmd_size: size of %cmd.
+ * @cmd: pointer to scan cmd buffer (allocated once in op mode start).
+ * @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time
+ * in jiffies.
+ * @last_start_time_jiffies: stores the last start time in jiffies
+ * (interface up/reset/resume).
+ * @last_mlo_scan_time: start time of the last MLO scan in nanoseconds since
+ * boot.
+ */
+struct iwl_mld_scan {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ unsigned int status;
+ u32 uid_status[IWL_MAX_UMAC_SCANS];
+ u64 start_tsf;
+ bool last_ebs_failed;
+ enum iwl_mld_pass_all_sched_results_states pass_all_sched_res;
+ u8 fw_link_id;
+ struct {
+ u32 last_stats_ts_usec;
+ enum iwl_mld_traffic_load status;
+ } traffic_load;
+ );
+ /* And here fields that survive a fw restart */
+ size_t cmd_size;
+ void *cmd;
+ unsigned long last_6ghz_passive_jiffies;
+ unsigned long last_start_time_jiffies;
+ unsigned long last_mlo_scan_time;
+};
+
+#endif /* __iwl_mld_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c
new file mode 100644
index 000000000000..dbb5615dc3f6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include "session-protect.h"
+#include "fw/api/time-event.h"
+#include "fw/api/context.h"
+#include "iface.h"
+#include <net/mac80211.h>
+
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
+ int fw_link_id = le32_to_cpu(notif->mac_link_id);
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_session_protect *session_protect;
+
+ if (WARN_ON(!link_conf))
+ return;
+
+ vif = link_conf->vif;
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ session_protect = &mld_vif->session_protect;
+
+ if (!le32_to_cpu(notif->status)) {
+ memset(session_protect, 0, sizeof(*session_protect));
+ } else if (le32_to_cpu(notif->start)) {
+ /* End_jiffies indicates an active session */
+ session_protect->session_requested = false;
+ session_protect->end_jiffies =
+ TU_TO_EXP_TIME(session_protect->duration);
+ /* !session_protect->end_jiffies means inactive session */
+ if (!session_protect->end_jiffies)
+ session_protect->end_jiffies = 1;
+ } else {
+ memset(session_protect, 0, sizeof(*session_protect));
+ }
+}
+
+static int _iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link =
+ iwl_mld_link_dereference_check(mld_vif, link_id);
+ struct iwl_mld_session_protect *session_protect =
+ &mld_vif->session_protect;
+ struct iwl_session_prot_cmd cmd = {
+ .id_and_color = cpu_to_le32(link->fw_id),
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ WARN(hweight16(vif->active_links) > 1,
+ "Session protection isn't allowed with more than one active link");
+
+ if (session_protect->end_jiffies &&
+ time_after(session_protect->end_jiffies,
+ TU_TO_EXP_TIME(min_duration))) {
+ IWL_DEBUG_TE(mld, "We have ample in the current session: %u\n",
+ jiffies_to_msecs(session_protect->end_jiffies -
+ jiffies));
+ return -EALREADY;
+ }
+
+ IWL_DEBUG_TE(mld, "Add a new session protection, duration %d TU\n",
+ le32_to_cpu(cmd.duration_tu));
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), &cmd);
+
+ if (ret)
+ return ret;
+
+ /* end_jiffies will be updated when handling session_prot_notif */
+ session_protect->end_jiffies = 0;
+ session_protect->duration = duration;
+ session_protect->session_requested = true;
+
+ return 0;
+}
+
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id)
+{
+ int ret;
+
+ ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+ min_duration, link_id);
+ if (ret && ret != -EALREADY)
+ IWL_ERR(mld,
+ "Couldn't send the SESSION_PROTECTION_CMD (%d)\n",
+ ret);
+}
+
+struct iwl_mld_session_start_data {
+ struct iwl_mld *mld;
+ struct ieee80211_bss_conf *link_conf;
+ bool success;
+};
+
+static bool iwl_mld_session_start_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *_data)
+{
+ struct iwl_session_prot_notif *notif = (void *)pkt->data;
+ unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+ struct iwl_mld_session_start_data *data = _data;
+ struct ieee80211_bss_conf *link_conf;
+ struct iwl_mld *mld = data->mld;
+ int fw_link_id;
+
+ if (IWL_FW_CHECK(mld, pkt_len < sizeof(*notif),
+ "short session prot notif (%d)\n",
+ pkt_len))
+ return false;
+
+ fw_link_id = le32_to_cpu(notif->mac_link_id);
+ link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
+
+ if (link_conf != data->link_conf)
+ return false;
+
+ if (!le32_to_cpu(notif->status))
+ return true;
+
+ if (notif->start) {
+ data->success = true;
+ return true;
+ }
+
+ return false;
+}
+
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id, unsigned long timeout)
+{
+ static const u16 start_notif[] = { SESSION_PROTECTION_NOTIF };
+ struct iwl_notification_wait start_wait;
+ struct iwl_mld_session_start_data data = {
+ .mld = mld,
+ .link_conf = wiphy_dereference(mld->wiphy,
+ vif->link_conf[link_id]),
+ };
+ int ret;
+
+ if (WARN_ON(!data.link_conf))
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mld->notif_wait, &start_wait,
+ start_notif, ARRAY_SIZE(start_notif),
+ iwl_mld_session_start_fn, &data);
+
+ ret = _iwl_mld_schedule_session_protection(mld, vif, duration,
+ min_duration, link_id);
+
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &start_wait);
+ return ret == -EALREADY ? 0 : ret;
+ }
+
+ ret = iwl_wait_notification(&mld->notif_wait, &start_wait, timeout);
+ if (ret)
+ return ret;
+ return data.success ? 0 : -EIO;
+}
+
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link =
+ iwl_mld_link_dereference_check(mld_vif, link_id);
+ struct iwl_mld_session_protect *session_protect =
+ &mld_vif->session_protect;
+ struct iwl_session_prot_cmd cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* If there isn't an active session or a requested one for this
+ * link do nothing
+ */
+ if (!session_protect->session_requested &&
+ !session_protect->end_jiffies)
+ return 0;
+
+ if (WARN_ON(!link))
+ return -EINVAL;
+
+ cmd.id_and_color = cpu_to_le32(link->fw_id);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), &cmd);
+ if (ret) {
+ IWL_ERR(mld,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ return ret;
+ }
+
+ memset(session_protect, 0, sizeof(*session_protect));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h
new file mode 100644
index 000000000000..642bec8451a1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __session_protect_h__
+#define __session_protect_h__
+
+#include "mld.h"
+#include "hcmd.h"
+#include <net/mac80211.h>
+#include "fw/api/mac-cfg.h"
+
+/**
+ * DOC: session protection
+ *
+ * Session protection is an API from the firmware that allows the driver to
+ * request time on medium. This is needed before the association when we need
+ * to be on medium for the association frame exchange. Once we configure the
+ * firmware as 'associated', the firmware will allocate time on medium without
+ * needed a session protection.
+ *
+ * TDLS discover uses this API as well even after association to ensure that
+ * other activities internal to the firmware will not interrupt our presence
+ * on medium.
+ */
+
+/**
+ * struct iwl_mld_session_protect - session protection parameters
+ * @end_jiffies: expected end_jiffies of current session protection.
+ * 0 if not active
+ * @duration: the duration in tu of current session
+ * @session_requested: A session protection command was sent and wasn't yet
+ * answered
+ */
+struct iwl_mld_session_protect {
+ unsigned long end_jiffies;
+ u32 duration;
+ bool session_requested;
+};
+
+#define IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS 900
+#define IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mld_handle_session_prot_notif - handles %SESSION_PROTECTION_NOTIF
+ * @mld: the mld component
+ * @pkt: the RX packet containing the notification
+ */
+void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+/**
+ * iwl_mld_schedule_session_protection - schedule a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ */
+void iwl_mld_schedule_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id);
+
+/**
+ * iwl_mld_start_session_protection - start a session protection
+ * @mld: the mld component
+ * @vif: the virtual interface for which the protection issued
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
+ * @link_id: The link to schedule a session protection for
+ * @timeout: timeout for waiting
+ *
+ * This schedules the session protection, and waits for it to start
+ * (with timeout)
+ *
+ * Returns: 0 if successful, error code otherwise
+ */
+int iwl_mld_start_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ u32 duration, u32 min_duration,
+ int link_id, unsigned long timeout);
+
+/**
+ * iwl_mld_cancel_session_protection - cancel the session protection.
+ * @mld: the mld component
+ * @vif: the virtual interface for which the session is issued
+ * @link_id: cancel the session protection for given link
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other mac contexts wait for the medium during that time.
+ *
+ * Returns: 0 if successful, error code otherwise
+ *
+ */
+int iwl_mld_cancel_session_protection(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ int link_id);
+
+#endif /* __session_protect_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
new file mode 100644
index 000000000000..332a7aecec2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -0,0 +1,1289 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <linux/ieee80211.h>
+#include <kunit/static_stub.h>
+
+#include "sta.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "mlo.h"
+#include "key.h"
+#include "agg.h"
+#include "tlc.h"
+#include "fw/api/sta.h"
+#include "fw/api/mac.h"
+#include "fw/api/rx.h"
+
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* This function should only be used with the wiphy lock held,
+ * In other cases, it is not guaranteed that the link_sta will exist
+ * in the driver too, and it is checked here.
+ */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This is not meant to be called with a NULL pointer */
+ if (WARN_ON(!link_sta))
+ return -ENOENT;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (!mld_link_sta) {
+ WARN_ON(!iwl_mld_error_before_recovery(mld));
+ return -ENOENT;
+ }
+
+ return mld_link_sta->fw_id;
+}
+
+static void
+iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_bss_conf *link,
+ __le32 *tx_ampdu_max_size,
+ __le32 *tx_ampdu_spacing)
+{
+ u32 agg_size = 0, mpdu_dens = 0;
+
+ if (WARN_ON(!link_sta || !link))
+ return;
+
+ /* Note that we always use only legacy & highest supported PPDUs, so
+ * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating
+ * the maximum A-MPDU size of various PPDU types in different bands,
+ * we only need to worry about the highest supported PPDU type here.
+ */
+
+ if (link_sta->ht_cap.ht_supported) {
+ agg_size = link_sta->ht_cap.ampdu_factor;
+ mpdu_dens = link_sta->ht_cap.ampdu_density;
+ }
+
+ if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
+ /* overwrite HT values on 6 GHz */
+ mpdu_dens =
+ le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ agg_size =
+ le16_get_bits(link_sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ } else if (link_sta->vht_cap.vht_supported) {
+ /* if VHT supported overwrite HT value */
+ agg_size =
+ u32_get_bits(link_sta->vht_cap.cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ }
+
+ /* D6.0 10.12.2 A-MPDU length limit rules
+ * A STA indicates the maximum length of the A-MPDU preEOF padding
+ * that it can receive in an HE PPDU in the Maximum A-MPDU Length
+ * Exponent field in its HT Capabilities, VHT Capabilities,
+ * and HE 6 GHz Band Capabilities elements (if present) and the
+ * Maximum AMPDU Length Exponent Extension field in its HE
+ * Capabilities element
+ */
+ if (link_sta->he_cap.has_he)
+ agg_size +=
+ u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (link_sta->eht_cap.has_eht)
+ agg_size +=
+ u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1],
+ IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK);
+
+ /* Limit to max A-MPDU supported by FW */
+ agg_size = min_t(u32, agg_size,
+ STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT);
+
+ *tx_ampdu_max_size = cpu_to_le32(agg_size);
+ *tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
+}
+
+static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta)
+{
+ u8 uapsd_acs = 0;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd_acs |= BIT(AC_BK);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd_acs |= BIT(AC_BE);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd_acs |= BIT(AC_VI);
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd_acs |= BIT(AC_VO);
+
+ return uapsd_acs | uapsd_acs << 4;
+}
+
+static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+ u8 byte_num = ppe_pos_bit / 8;
+ u8 bit_num = ppe_pos_bit % 8;
+ u8 residue_bits;
+ u8 res;
+
+ if (bit_num <= 5)
+ return (ppe[byte_num] >> bit_num) &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+ /* If bit_num > 5, we have to combine bits with next byte.
+ * Calculate how many bits we need to take from current byte (called
+ * here "residue_bits"), and add them to bits from next byte.
+ */
+
+ residue_bits = 8 - bit_num;
+
+ res = (ppe[byte_num + 1] &
+ (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+ residue_bits;
+ res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+ return res;
+}
+
+static void iwl_mld_parse_ppe(struct iwl_mld *mld,
+ struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss,
+ u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit,
+ bool inheritance)
+{
+ /* FW currently supports only nss == MAX_HE_SUPP_NSS
+ *
+ * If nss > MAX: we can ignore values we don't support
+ * If nss < MAX: we can set zeros in other streams
+ */
+ if (nss > MAX_HE_SUPP_NSS) {
+ IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss,
+ MAX_HE_SUPP_NSS);
+ nss = MAX_HE_SUPP_NSS;
+ }
+
+ for (int i = 0; i < nss; i++) {
+ u8 ru_index_tmp = ru_index_bitmap << 1;
+ u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE;
+
+ for (u8 bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ ru_index_tmp >>= 1;
+
+ /* According to the 11be spec, if for a specific BW the PPE Thresholds
+ * isn't present - it should inherit the thresholds from the last
+ * BW for which we had PPE Thresholds. In 11ax though, we don't have
+ * this inheritance - continue in this case
+ */
+ if (!(ru_index_tmp & 1)) {
+ if (inheritance)
+ goto set_thresholds;
+ else
+ continue;
+ }
+
+ high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit);
+ ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+
+set_thresholds:
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+}
+
+static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext,
+ bool inheritance)
+{
+ u8 nss = (link_sta->he_cap.ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->he_cap.ppe_thres[0];
+ u8 ru_index_bitmap =
+ u8_get_bits(*ppe,
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit,
+ inheritance);
+}
+
+static int
+iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ int low_th = -1;
+ int high_th = -1;
+
+ /* all the macros are the same for EHT and HE */
+ switch (nominal_padding) {
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US:
+ low_th = IWL_HE_PKT_EXT_BPSK;
+ high_th = IWL_HE_PKT_EXT_NONE;
+ break;
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US:
+ case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US:
+ low_th = IWL_HE_PKT_EXT_NONE;
+ high_th = IWL_HE_PKT_EXT_BPSK;
+ break;
+ }
+
+ if (low_th < 0 || high_th < 0)
+ return -EINVAL;
+
+ /* Set the PPE thresholds accordingly */
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (u8 bw = 0;
+ bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th;
+ }
+ }
+
+ return 0;
+}
+
+static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext,
+ u8 nominal_padding)
+{
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ if (nominal_padding >
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[1] = IWL_HE_PKT_EXT_4096QAM;
+ else if (nominal_padding ==
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US &&
+ qam_th[0] == IWL_HE_PKT_EXT_NONE &&
+ qam_th[1] == IWL_HE_PKT_EXT_NONE)
+ qam_th[0] = IWL_HE_PKT_EXT_4096QAM;
+ }
+ }
+}
+
+static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_he_pkt_ext_v2 *pkt_ext)
+{
+ if (WARN_ON(!link_sta))
+ return;
+
+ /* Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext));
+
+ if (link_sta->eht_cap.has_eht) {
+ u8 nominal_padding =
+ u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ /* If PPE Thresholds exists, parse them into a FW-familiar
+ * format.
+ */
+ if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] &
+ IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1;
+ u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0];
+ u8 ru_index_bitmap =
+ u16_get_bits(*ppe,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ /* Starting after PPE header */
+ u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+
+ iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap,
+ ppe, ppe_pos_bit, true);
+ /* EHT PPE Thresholds doesn't exist - set the API according to
+ * HE PPE Tresholds
+ */
+ } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ /* Even though HE Capabilities IE doesn't contain PPE
+ * Thresholds for BW 320Mhz, thresholds for this BW will
+ * be filled in with the same values as 160Mhz, due to
+ * the inheritance, as required.
+ */
+ iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+ true);
+
+ /* According to the requirements, for MCSs 12-13 the
+ * maximum value between HE PPE Threshold and Common
+ * Nominal Packet Padding needs to be taken
+ */
+ iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding);
+
+ /* if PPE Thresholds doesn't present in both EHT IE and HE IE -
+ * take the Thresholds from Common Nominal Packet Padding field
+ */
+ } else {
+ iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ } else if (link_sta->he_cap.has_he) {
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
+ if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext,
+ false);
+ /* PPE Thresholds doesn't exist - set the API PPE values
+ * according to Common Nominal Packet Padding field.
+ */
+ } else {
+ u8 nominal_padding =
+ u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED)
+ iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext,
+ nominal_padding);
+ }
+ }
+
+ for (int i = 0; i < MAX_HE_SUPP_NSS; i++) {
+ for (int bw = 0;
+ bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]);
+ bw++) {
+ u8 *qam_th =
+ &pkt_ext->pkt_ext_qam_th[i][bw][0];
+
+ IWL_DEBUG_HT(mld,
+ "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n",
+ i, bw, qam_th[0], qam_th[1]);
+ }
+ }
+}
+
+static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
+{
+ u8 *mac_cap_info =
+ &link_sta->he_cap.he_cap_elem.mac_cap_info[0];
+ u32 htc_flags = 0;
+
+ if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ htc_flags |= IWL_HE_HTC_SUPPORT;
+ if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+ (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+ u8 link_adap =
+ ((mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+ (mac_cap_info[1] &
+ IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+ if (link_adap == 2)
+ htc_flags |=
+ IWL_HE_HTC_LINK_ADAP_UNSOLICITED;
+ else if (link_adap == 3)
+ htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH;
+ }
+ if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ htc_flags |= IWL_HE_HTC_BSR_SUPP;
+ if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ htc_flags |= IWL_HE_HTC_OMI_SUPP;
+ if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ htc_flags |= IWL_HE_HTC_BQR_SUPP;
+
+ return htc_flags;
+}
+
+static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
+ const struct iwl_sta_cfg_cmd *cmd)
+{
+ int ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ cmd);
+ if (ret)
+ IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
+ return ret;
+}
+
+static int
+iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_bss_conf *link;
+ struct iwl_mld_link *mld_link;
+ struct iwl_sta_cfg_cmd cmd = {};
+ int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ link = link_conf_dereference_protected(mld_sta->vif,
+ link_sta->link_id);
+
+ mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!link || !mld_link) || fw_id < 0)
+ return -EINVAL;
+
+ cmd.sta_id = cpu_to_le32(fw_id);
+ cmd.station_type = cpu_to_le32(mld_sta->sta_type);
+ cmd.link_id = cpu_to_le32(mld_link->fw_id);
+
+ memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
+ memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
+
+ if (mld_sta->sta_state >= IEEE80211_STA_ASSOC)
+ cmd.assoc_id = cpu_to_le32(sta->aid);
+
+ if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+ cmd.mfp = cpu_to_le32(1);
+
+ switch (link_sta->rx_nss) {
+ case 1:
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case 2 ... 8:
+ cmd.mimo = cpu_to_le32(1);
+ break;
+ }
+
+ switch (link_sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ cmd.mimo = cpu_to_le32(0);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cmd.mimo_protection = cpu_to_le32(1);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
+
+ iwl_mld_fill_ampdu_size_and_dens(link_sta, link,
+ &cmd.tx_ampdu_max_size,
+ &cmd.tx_ampdu_spacing);
+
+ if (sta->wme) {
+ cmd.sp_length =
+ cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
+ cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta));
+ }
+
+ if (link_sta->he_cap.has_he) {
+ cmd.trig_rnd_alloc =
+ cpu_to_le32(link->uora_exists ? 1 : 0);
+
+ /* PPE Thresholds */
+ iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext);
+
+ /* HTC flags */
+ cmd.htc_flags =
+ cpu_to_le32(iwl_mld_get_htc_flags(link_sta));
+
+ if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
+ IEEE80211_HE_MAC_CAP2_ACK_EN)
+ cmd.ack_enabled = cpu_to_le32(1);
+ }
+
+ return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static int
+iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ int ret;
+ u8 fw_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* We will fail to add it to the FW anyway */
+ if (iwl_mld_error_before_recovery(mld))
+ return -ENODEV;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+
+ /* We need to preserve the fw sta ids during a restart, since the fw
+ * will recover SN/PN for them, this is why the mld_link_sta exists.
+ */
+ if (mld_link_sta) {
+ /* But if we are not restarting, this is not OK */
+ WARN_ON(!mld->fw_status.in_hw_restart);
+
+ /* Avoid adding a STA that is already in FW to avoid an assert */
+ if (WARN_ON(mld_link_sta->in_fw))
+ return -EINVAL;
+
+ fw_id = mld_link_sta->fw_id;
+ goto add_to_fw;
+ }
+
+ /* Allocate a fw id and map it to the link_sta */
+ ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+ if (ret)
+ return ret;
+
+ if (link_sta == &link_sta->sta->deflink) {
+ mld_link_sta = &mld_sta->deflink;
+ } else {
+ mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL);
+ if (!mld_link_sta)
+ return -ENOMEM;
+ }
+
+ mld_link_sta->fw_id = fw_id;
+ rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta);
+
+add_to_fw:
+ ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+ if (ret) {
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL);
+ RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+ if (link_sta != &link_sta->sta->deflink)
+ kfree(mld_link_sta);
+ return ret;
+ }
+ mld_link_sta->in_fw = true;
+
+ return 0;
+}
+
+static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id)
+{
+ struct iwl_remove_sta_cmd cmd = {
+ .sta_id = cpu_to_le32(fw_sta_id),
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id);
+
+ return ret;
+}
+
+static void
+iwl_mld_remove_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct iwl_mld_link_sta *mld_link_sta =
+ iwl_mld_link_sta_from_mac80211(link_sta);
+
+ if (WARN_ON(!mld_link_sta))
+ return;
+
+ iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id);
+ mld_link_sta->in_fw = false;
+
+ /* Now that the STA doesn't exist in FW, we don't expect any new
+ * notifications for it. Cancel the ones that are already pending
+ */
+ iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA,
+ mld_link_sta->fw_id);
+
+ /* This will not be done upon reconfig, so do it also when
+ * failed to remove from fw
+ */
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL);
+ RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL);
+ if (mld_link_sta != &mld_sta->deflink)
+ kfree_rcu(mld_link_sta, rcu_head);
+}
+
+static void iwl_mld_set_max_amsdu_len(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+
+ /* For EHT, HE and VHT we can use the value as it was calculated by
+ * mac80211. For HT, mac80211 doesn't enforce to 4095, so force it
+ * here
+ */
+ if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he ||
+ link_sta->vht_cap.vht_supported ||
+ !ht_cap->ht_supported ||
+ !(ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU))
+ return;
+
+ link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA;
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+}
+
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta);
+
+ if (ret)
+ return ret;
+
+ if (mld_sta->sta_state == IEEE80211_STA_ASSOC)
+ iwl_mld_set_max_amsdu_len(mld, link_sta);
+ }
+ return 0;
+}
+
+static void iwl_mld_destroy_sta(struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ kfree(mld_sta->dup_data);
+ kfree(mld_sta->mpdu_counters);
+}
+
+static int
+iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta)
+{
+ struct iwl_mld_rxq_dup_data *dup_data;
+
+ if (mld->fw_status.in_hw_restart)
+ return 0;
+
+ dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data),
+ GFP_KERNEL);
+ if (!dup_data)
+ return -ENOMEM;
+
+ /* Initialize all the last_seq values to 0xffff which can never
+ * compare equal to the frame's seq_ctrl in the check in
+ * iwl_mld_is_dup() since the lower 4 bits are the fragment
+ * number and fragmented packets don't reach that function.
+ *
+ * This thus allows receiving a packet with seqno 0 and the
+ * retry bit set as the very first packet on a new TID.
+ */
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ memset(dup_data[q].last_seq, 0xff,
+ sizeof(dup_data[q].last_seq));
+ mld_sta->dup_data = dup_data;
+
+ return 0;
+}
+
+static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mld_sta->vif;
+
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ /* MPDUs are counted only when EMLSR is possible */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION ||
+ sta->tdls || !ieee80211_vif_is_mld(vif))
+ return;
+
+ mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues,
+ sizeof(*mld_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ for (int q = 0; q < mld->trans->num_rx_queues; q++)
+ spin_lock_init(&mld_sta->mpdu_counters[q].lock);
+}
+
+static int
+iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ mld_sta->vif = vif;
+ mld_sta->sta_type = type;
+ mld_sta->mld = mld;
+
+ if (!mld->fw_status.in_hw_restart)
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+ iwl_mld_alloc_mpdu_counters(mld, sta);
+
+ iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+ return iwl_mld_alloc_dup_data(mld, mld_sta);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+ int ret;
+
+ ret = iwl_mld_init_sta(mld, sta, vif, type);
+ if (ret)
+ return ret;
+
+ /* We could have add only the deflink link_sta, but it will not work
+ * in the restart case if the single link that is active during
+ * reconfig is not the deflink one.
+ */
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ ret = iwl_mld_add_link_sta(mld, link_sta);
+ if (ret)
+ goto destroy_sta;
+ }
+
+ return 0;
+
+destroy_sta:
+ iwl_mld_destroy_sta(sta);
+
+ return ret;
+}
+
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) {
+ int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ if (fw_sta_id < 0)
+ continue;
+
+ iwl_mld_flush_link_sta_txqs(mld, fw_sta_id);
+ }
+}
+
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way
+ * to a restart.
+ */
+ if (iwl_mld_error_before_recovery(mld))
+ return;
+
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mld_txq *mld_txq =
+ iwl_mld_txq_from_mac80211(sta->txq[i]);
+
+ if (!mld_txq->status.allocated)
+ continue;
+
+ iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id);
+ }
+}
+
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct ieee80211_vif *vif = mld_sta->vif;
+ struct ieee80211_link_sta *link_sta;
+ u8 link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Tell the HW to flush the queues */
+ iwl_mld_flush_sta_txqs(mld, sta);
+
+ /* Wait for trans to empty its queues */
+ iwl_mld_wait_sta_txqs_empty(mld, sta);
+
+ /* Now we can remove the queues */
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ iwl_mld_remove_txq(mld, sta->txq[i]);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ /* Mac8011 will remove the groupwise keys after the sta is
+ * removed, but FW expects all the keys to be removed before
+ * the STA is, so remove them all here.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ iwl_mld_remove_ap_keys(mld, vif, sta, link_id);
+
+ /* Remove the link_sta */
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ iwl_mld_destroy_sta(sta);
+}
+
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ u32 result = 0;
+
+ KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta);
+
+ /* This function should only be used with the wiphy lock held,
+ * In other cases, it is not guaranteed that the link_sta will exist
+ * in the driver too, and it is checked in
+ * iwl_mld_fw_sta_id_from_link_sta.
+ */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+
+ if (!(fw_id < 0))
+ result |= BIT(fw_id);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask);
+
+static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count, bool tx)
+{
+ struct iwl_mld_per_q_mpdu_counter *queue_counter;
+ struct iwl_mld_per_link_mpdu_counter *link_counter;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_link *mld_link;
+ struct iwl_mld *mld;
+ int total_mpdus = 0;
+
+ if (WARN_ON(!link_sta))
+ return;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ if (!mld_sta->mpdu_counters)
+ return;
+
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id);
+
+ if (WARN_ON_ONCE(!mld_link))
+ return;
+
+ queue_counter = &mld_sta->mpdu_counters[queue];
+
+ mld = mld_vif->mld;
+
+ /* If it the window is over, first clear the counters.
+ * When we are not blocked by TPT, the window is managed by check_tpt_wk
+ */
+ if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) &&
+ time_is_before_jiffies(queue_counter->window_start_time +
+ IWL_MLD_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start_time = jiffies;
+
+ IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n");
+ }
+
+ link_counter = &queue_counter->per_link[mld_link->fw_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ /* Update the statistics for this TPT measurement window */
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * Next, evaluate whether we should queue an unblock,
+ * skip this if we are not blocked due to low throughput.
+ */
+ if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
+ goto unlock;
+
+ for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ /* Unblock is already queued if the threshold was reached before */
+ if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+ goto unlock;
+
+ if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH)
+ wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk);
+
+unlock:
+ spin_unlock_bh(&queue_counter->lock);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count)
+{
+ iwl_mld_count_mpdu(link_sta, queue, count, false);
+}
+
+/* must be called under rcu_read_lock() */
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count)
+{
+ /* use queue 0 for all TX */
+ iwl_mld_count_mpdu(link_sta, 0, count, true);
+}
+
+static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ u8 tid)
+{
+ u32 sta_mask = BIT(internal_sta->sta_id);
+ int queue, size;
+
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mld->trans->cfg->min_txq_size);
+
+ queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size,
+ IWL_WATCHDOG_DISABLED);
+
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mld,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
+ return queue;
+}
+
+static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld,
+ const struct iwl_mld_int_sta *internal_sta)
+{
+ struct iwl_aux_sta_cmd cmd = {
+ .sta_id = cpu_to_le32(internal_sta->sta_id),
+ /* TODO: CDB - properly set the lmac_id */
+ .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
+ &cmd);
+}
+
+static int
+iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
+ const struct iwl_mld_int_sta *internal_sta,
+ u8 fw_link_id,
+ const u8 *addr)
+{
+ struct iwl_sta_cfg_cmd cmd = {};
+
+ if (internal_sta->sta_type == STATION_TYPE_AUX)
+ return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
+
+ cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
+ cmd.link_id = cpu_to_le32(fw_link_id);
+ cmd.station_type = cpu_to_le32(internal_sta->sta_type);
+
+ /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.
+ * On the other hand, FW will never check this flag during RX since
+ * an AP/GO doesn't receive protected broadcast management frames.
+ * So, we can set it unconditionally.
+ */
+ if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT)
+ cmd.mfp = cpu_to_le32(1);
+
+ if (addr) {
+ memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
+ memcpy(cmd.peer_link_address, addr, ETH_ALEN);
+ }
+
+ return iwl_mld_send_sta_cmd(mld, &cmd);
+}
+
+static int iwl_mld_add_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ enum iwl_fw_sta_type sta_type,
+ u8 fw_link_id, const u8 *addr, u8 tid)
+{
+ int ret, queue_id;
+
+ ret = iwl_mld_allocate_link_sta_fw_id(mld,
+ &internal_sta->sta_id,
+ ERR_PTR(-EINVAL));
+ if (ret)
+ return ret;
+
+ internal_sta->sta_type = sta_type;
+
+ ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id,
+ addr);
+ if (ret)
+ goto err;
+
+ queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid);
+ if (queue_id < 0) {
+ iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+ ret = queue_id;
+ goto err;
+ }
+
+ internal_sta->queue_id = queue_id;
+
+ return 0;
+err:
+ iwl_mld_free_internal_sta(mld, internal_sta);
+ return ret;
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+ const u8 *addr;
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EINVAL;
+
+ addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta,
+ STATION_TYPE_BCAST_MGMT,
+ mld_link->fw_id, addr,
+ IWL_MGMT_TID);
+}
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+ const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ if (WARN_ON(!mld_link))
+ return -EINVAL;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return -EINVAL;
+
+ return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta,
+ STATION_TYPE_MCAST,
+ mld_link->fw_id, mcast_addr, 0);
+}
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta)
+{
+ return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX,
+ 0, NULL, IWL_MAX_TID_COUNT);
+}
+
+static void iwl_mld_remove_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta,
+ bool flush, u8 tid)
+{
+ if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA ||
+ internal_sta->queue_id == IWL_MLD_INVALID_QUEUE))
+ return;
+
+ if (flush)
+ iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id);
+
+ iwl_mld_free_txq(mld, BIT(internal_sta->sta_id),
+ tid, internal_sta->queue_id);
+
+ iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id);
+
+ iwl_mld_free_internal_sta(mld, internal_sta);
+}
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true,
+ IWL_MGMT_TID);
+}
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0);
+}
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return;
+
+ /* TODO: Hotspot 2.0 */
+ if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE))
+ return;
+
+ iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false,
+ IWL_MAX_TID_COUNT);
+}
+
+static int iwl_mld_update_sta_resources(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask,
+ u32 new_sta_mask)
+{
+ int ret;
+
+ ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask);
+ if (ret)
+ return ret;
+
+ ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask);
+ if (ret)
+ return ret;
+
+ return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask);
+}
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ unsigned long links_to_add = ~old_links & new_links;
+ unsigned long links_to_rem = old_links & ~new_links;
+ unsigned long old_links_long = old_links;
+ unsigned long sta_mask_added = 0;
+ u32 current_sta_mask = 0, sta_mask_to_rem = 0;
+ unsigned int link_id, sta_id;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_set_bit(link_id, &old_links_long,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ mld_link_sta =
+ iwl_mld_link_sta_dereference_check(mld_sta, link_id);
+
+ if (WARN_ON(!mld_link_sta))
+ return -EINVAL;
+
+ current_sta_mask |= BIT(mld_link_sta->fw_id);
+ if (links_to_rem & BIT(link_id))
+ sta_mask_to_rem |= BIT(mld_link_sta->fw_id);
+ }
+
+ if (sta_mask_to_rem) {
+ ret = iwl_mld_update_sta_resources(mld, vif, sta,
+ current_sta_mask,
+ current_sta_mask &
+ ~sta_mask_to_rem);
+ if (ret)
+ return ret;
+
+ current_sta_mask &= ~sta_mask_to_rem;
+ }
+
+ for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+
+ if (WARN_ON(!link_sta))
+ return -EINVAL;
+
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_protected(sta, link_id);
+ struct ieee80211_bss_conf *link;
+
+ if (WARN_ON(!link_sta))
+ return -EINVAL;
+
+ ret = iwl_mld_add_link_sta(mld, link_sta);
+ if (ret)
+ goto remove_added_link_stas;
+
+ mld_link_sta =
+ iwl_mld_link_sta_dereference_check(mld_sta,
+ link_id);
+
+ link = link_conf_dereference_protected(mld_sta->vif,
+ link_sta->link_id);
+
+ iwl_mld_set_max_amsdu_len(mld, link_sta);
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+
+ sta_mask_added |= BIT(mld_link_sta->fw_id);
+ }
+
+ if (sta_mask_added) {
+ ret = iwl_mld_update_sta_resources(mld, vif, sta,
+ current_sta_mask,
+ current_sta_mask |
+ sta_mask_added);
+ if (ret)
+ goto remove_added_link_stas;
+ }
+
+ /* We couldn't activate the links before it has a STA. Now we can */
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link =
+ link_conf_dereference_protected(mld_sta->vif, link_id);
+
+ if (WARN_ON(!link))
+ continue;
+
+ iwl_mld_activate_link(mld, link);
+ }
+
+ return 0;
+
+remove_added_link_stas:
+ for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[sta_id]);
+
+ if (WARN_ON(!link_sta))
+ continue;
+
+ iwl_mld_remove_link_sta(mld, link_sta);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
new file mode 100644
index 000000000000..ddcffd7b9fde
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_sta_h__
+#define __iwl_mld_sta_h__
+
+#include <net/mac80211.h>
+
+#include "mld.h"
+#include "tx.h"
+
+/**
+ * struct iwl_mld_rxq_dup_data - Duplication detection data, per STA & Rx queue
+ * @last_seq: last sequence per tid.
+ * @last_sub_frame_idx: the index of the last subframe in an A-MSDU. This value
+ * will be zero if the packet is not part of an A-MSDU.
+ */
+struct iwl_mld_rxq_dup_data {
+ __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+ u8 last_sub_frame_idx[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_link_sta - link-level station
+ *
+ * This represents the link-level sta - the driver level equivalent to the
+ * ieee80211_link_sta
+ *
+ * @last_rate_n_flags: rate_n_flags from the last &iwl_tlc_update_notif
+ * @signal_avg: the signal average coming from the firmware
+ * @in_fw: whether the link STA is uploaded to the FW (false during restart)
+ * @rcu_head: RCU head for freeing this object
+ * @fw_id: the FW id of this link sta.
+ */
+struct iwl_mld_link_sta {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u32 last_rate_n_flags;
+ bool in_fw;
+ s8 signal_avg;
+ );
+ /* And here fields that survive a fw restart */
+ struct rcu_head rcu_head;
+ u32 fw_id;
+};
+
+#define iwl_mld_link_sta_dereference_check(mld_sta, link_id) \
+ rcu_dereference_check((mld_sta)->link[link_id], \
+ lockdep_is_held(&mld_sta->mld->wiphy->mtx))
+
+#define for_each_mld_link_sta(mld_sta, link_sta, link_id) \
+ for (link_id = 0; link_id < ARRAY_SIZE((mld_sta)->link); \
+ link_id++) \
+ if ((link_sta = \
+ iwl_mld_link_sta_dereference_check(mld_sta, link_id)))
+
+#define IWL_NUM_DEFAULT_KEYS 4
+
+/* struct iwl_mld_ptk_pn - Holds Packet Number (PN) per TID.
+ * @rcu_head: RCU head for freeing this data.
+ * @pn: Array storing PN for each TID.
+ */
+struct iwl_mld_ptk_pn {
+ struct rcu_head rcu_head;
+ struct {
+ u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+ } ____cacheline_aligned_in_smp q[];
+};
+
+/**
+ * struct iwl_mld_per_link_mpdu_counter - per-link TX/RX MPDU counters
+ *
+ * @tx: Number of TX MPDUs.
+ * @rx: Number of RX MPDUs.
+ */
+struct iwl_mld_per_link_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mld_per_q_mpdu_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start_time: timestamp of the counting-window start
+ */
+struct iwl_mld_per_q_mpdu_counter {
+ spinlock_t lock;
+ struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1];
+ unsigned long window_start_time;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mld_sta - representation of a station in the driver.
+ *
+ * This represent the MLD-level sta, and will not be added to the FW.
+ * Embedded in ieee80211_sta.
+ *
+ * @vif: pointer the vif object.
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @sta_type: type of this station. See &enum iwl_fw_sta_type
+ * @mld: a pointer to the iwl_mld object
+ * @dup_data: per queue duplicate packet detection data
+ * @data_tx_ant: stores the last TX antenna index; used for setting
+ * TX rate_n_flags for injected data frames (toggles on every TX failure).
+ * @tid_to_baid: a simple map of TID to Block-Ack fw id
+ * @deflink: This holds the default link STA information, for non MLO STA all
+ * link specific STA information is accessed through @deflink or through
+ * link[0] which points to address of @deflink. For MLO Link STA
+ * the first added link STA will point to deflink.
+ * @link: reference to Link Sta entries. For Non MLO STA, except 1st link,
+ * i.e link[0] all links would be assigned to NULL by default and
+ * would access link information via @deflink or link[0]. For MLO
+ * STA, first link STA being added will point its link pointer to
+ * @deflink address and remaining would be allocated and the address
+ * would be assigned to link[link_id] where link_id is the id assigned
+ * by the AP.
+ * @ptk_pn: Array of pointers to PTK PN data, used to track the Packet Number
+ * per key index and per queue (TID).
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
+ */
+struct iwl_mld_sta {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ enum ieee80211_sta_state sta_state;
+ enum iwl_fw_sta_type sta_type;
+ );
+ /* And here fields that survive a fw restart */
+ struct iwl_mld *mld;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_rxq_dup_data *dup_data;
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
+ u8 data_tx_ant;
+
+ struct iwl_mld_link_sta deflink;
+ struct iwl_mld_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mld_ptk_pn __rcu *ptk_pn[IWL_NUM_DEFAULT_KEYS];
+ struct iwl_mld_per_q_mpdu_counter *mpdu_counters;
+};
+
+static inline struct iwl_mld_sta *
+iwl_mld_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+ return (void *)sta->drv_priv;
+}
+
+static inline void
+iwl_mld_cleanup_sta(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+
+ for (int i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ CLEANUP_STRUCT(iwl_mld_txq_from_mac80211(sta->txq[i]));
+
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) {
+ CLEANUP_STRUCT(mld_link_sta);
+
+ if (!ieee80211_vif_is_mld(mld_sta->vif)) {
+ /* not an MLD STA; only has the deflink with ID zero */
+ WARN_ON(link_id);
+ continue;
+ }
+
+ if (mld_sta->vif->active_links & BIT(link_id))
+ continue;
+
+ /* Should not happen as link removal should always succeed */
+ WARN_ON(1);
+ RCU_INIT_POINTER(mld_sta->link[link_id], NULL);
+ RCU_INIT_POINTER(mld_sta->mld->fw_id_to_link_sta[mld_link_sta->fw_id],
+ NULL);
+ if (mld_link_sta != &mld_sta->deflink)
+ kfree_rcu(mld_link_sta, rcu_head);
+ }
+
+ CLEANUP_STRUCT(mld_sta);
+}
+
+static inline struct iwl_mld_link_sta *
+iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ return iwl_mld_link_sta_dereference_check(mld_sta, link_sta->link_id);
+}
+
+int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, enum iwl_fw_sta_type type);
+void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta);
+u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta);
+int iwl_mld_update_all_link_stations(struct iwl_mld *mld,
+ struct ieee80211_sta *sta);
+void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta);
+void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld,
+ struct ieee80211_sta *sta);
+void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue,
+ u32 count);
+void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count);
+
+/**
+ * struct iwl_mld_int_sta - representation of an internal station
+ * (a station that exist in FW and in driver, but not in mac80211)
+ *
+ * @sta_id: the index of the station in the fw
+ * @queue_id: the if of the queue used by the station
+ * @sta_type: station type. One of &iwl_fw_sta_type
+ */
+struct iwl_mld_int_sta {
+ u8 sta_id;
+ u32 queue_id;
+ enum iwl_fw_sta_type sta_type;
+};
+
+static inline void
+iwl_mld_init_internal_sta(struct iwl_mld_int_sta *internal_sta)
+{
+ internal_sta->sta_id = IWL_INVALID_STA;
+ internal_sta->queue_id = IWL_MLD_INVALID_QUEUE;
+}
+
+static inline void
+iwl_mld_free_internal_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta)
+{
+ if (WARN_ON(internal_sta->sta_id == IWL_INVALID_STA))
+ return;
+
+ RCU_INIT_POINTER(mld->fw_id_to_link_sta[internal_sta->sta_id], NULL);
+ iwl_mld_init_internal_sta(internal_sta);
+}
+
+int iwl_mld_add_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_add_aux_sta(struct iwl_mld *mld,
+ struct iwl_mld_int_sta *internal_sta);
+
+void iwl_mld_remove_bcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_mcast_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+void iwl_mld_remove_aux_sta(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
+
+int iwl_mld_update_link_stas(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u16 old_links, u16 new_links);
+#endif /* __iwl_mld_sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
new file mode 100644
index 000000000000..0715bbc31031
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "stats.h"
+#include "sta.h"
+#include "mlo.h"
+#include "hcmd.h"
+#include "iface.h"
+#include "scan.h"
+#include "phy.h"
+#include "fw/api/stats.h"
+
+static int iwl_mld_send_fw_stats_cmd(struct iwl_mld *mld, u32 cfg_mask,
+ u32 cfg_time, u32 type_mask)
+{
+ u32 cmd_id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD);
+ struct iwl_system_statistics_cmd stats_cmd = {
+ .cfg_mask = cpu_to_le32(cfg_mask),
+ .config_time_sec = cpu_to_le32(cfg_time),
+ .type_id_mask = cpu_to_le32(type_mask),
+ };
+
+ return iwl_mld_send_cmd_pdu(mld, cmd_id, &stats_cmd);
+}
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld)
+{
+ u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK;
+ u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+
+ return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+}
+
+static void
+iwl_mld_fill_stats_from_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt,
+ u8 fw_sta_id, struct station_info *sinfo)
+{
+ const struct iwl_system_statistics_notif_oper *notif =
+ (void *)&pkt->data;
+ const struct iwl_stats_ntfy_per_sta *per_sta =
+ &notif->per_sta[fw_sta_id];
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* 0 isn't a valid value, but FW might send 0.
+ * In that case, set the latest non-zero value we stored
+ */
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[fw_sta_id]);
+ if (IS_ERR_OR_NULL(link_sta))
+ goto unlock;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ goto unlock;
+
+ if (per_sta->average_energy)
+ mld_link_sta->signal_avg =
+ -(s8)le32_to_cpu(per_sta->average_energy);
+
+ sinfo->signal_avg = mld_link_sta->signal_avg;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+
+unlock:
+ rcu_read_unlock();
+}
+
+struct iwl_mld_stats_data {
+ u8 fw_sta_id;
+ struct station_info *sinfo;
+ struct iwl_mld *mld;
+};
+
+static bool iwl_mld_wait_stats_handler(struct iwl_notif_wait_data *notif_data,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_mld_stats_data *stats_data = data;
+ u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+ switch (cmd) {
+ case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF):
+ iwl_mld_fill_stats_from_oper_notif(stats_data->mld, pkt,
+ stats_data->fw_sta_id,
+ stats_data->sinfo);
+ break;
+ case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF):
+ break;
+ case WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF):
+ return true;
+ }
+
+ return false;
+}
+
+static int
+iwl_mld_fw_stats_to_mac80211(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta,
+ struct station_info *sinfo)
+{
+ u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK |
+ IWL_STATS_CFG_FLG_RESET_MSK;
+ u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1;
+ static const u16 notifications[] = {
+ WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF),
+ WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF),
+ WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
+ };
+ struct iwl_mld_stats_data wait_stats_data = {
+ /* We don't support drv_sta_statistics in EMLSR */
+ .fw_sta_id = mld_sta->deflink.fw_id,
+ .sinfo = sinfo,
+ .mld = mld,
+ };
+ struct iwl_notification_wait stats_wait;
+ int ret;
+
+ iwl_init_notification_wait(&mld->notif_wait, &stats_wait,
+ notifications, ARRAY_SIZE(notifications),
+ iwl_mld_wait_stats_handler,
+ &wait_stats_data);
+
+ ret = iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask);
+ if (ret) {
+ iwl_remove_notification(&mld->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* Wait 500ms for OPERATIONAL, PART1, and END notifications,
+ * which should be sufficient for the firmware to gather data
+ * from all LMACs and send notifications to the host.
+ */
+ ret = iwl_wait_notification(&mld->notif_wait, &stats_wait, HZ / 2);
+ if (ret)
+ return ret;
+
+ /* When periodic statistics are sent, FW will clear its statistics DB.
+ * If the statistics request here happens shortly afterwards,
+ * the response will contain data collected over a short time
+ * interval. The response we got here shouldn't be processed by
+ * the general statistics processing because it's incomplete.
+ * So, we delete it from the list so it won't be processed.
+ */
+ iwl_mld_delete_handlers(mld, notifications, ARRAY_SIZE(notifications));
+
+ return 0;
+}
+
+#define PERIODIC_STATS_SECONDS 5
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable)
+{
+ u32 cfg_mask = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type_mask = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ u32 cfg_time = enable ? PERIODIC_STATS_SECONDS : 0;
+
+ return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, cfg_time, type_mask);
+}
+
+static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta,
+ struct station_info *sinfo)
+{
+ struct rate_info *rinfo = &sinfo->txrate;
+ u32 rate_n_flags = mld_sta->deflink.last_rate_n_flags;
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 gi_ltf;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ rinfo->bw = RATE_INFO_BW_20;
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rinfo->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rinfo->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rinfo->bw = RATE_INFO_BW_160;
+ break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rinfo->bw = RATE_INFO_BW_320;
+ break;
+ }
+
+ if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) {
+ int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
+
+ /* add the offset needed to get to the legacy ofdm indices */
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ rate += IWL_FIRST_OFDM_RATE;
+
+ switch (rate) {
+ case IWL_RATE_1M_INDEX:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_INDEX:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_INDEX:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_INDEX:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_INDEX:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_INDEX:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_INDEX:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_INDEX:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_INDEX:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_INDEX:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_INDEX:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_INDEX:
+ rinfo->legacy = 540;
+ }
+ return;
+ }
+
+ rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
+
+ if (format == RATE_MCS_HT_MSK)
+ rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags);
+ else
+ rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (format) {
+ case RATE_MCS_EHT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+ break;
+ case RATE_MCS_HE_MSK:
+ gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
+
+ rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+
+ if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+ rinfo->bw = RATE_INFO_BW_HE_RU;
+ rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ }
+
+ switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+ case RATE_MCS_HE_TYPE_SU:
+ case RATE_MCS_HE_TYPE_EXT_SU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else if (gi_ltf == 3)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RATE_MCS_HE_TYPE_MU:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (gi_ltf == 2)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ case RATE_MCS_HE_TYPE_TRIG:
+ if (gi_ltf == 0 || gi_ltf == 1)
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+ rinfo->he_dcm = 1;
+ break;
+ case RATE_MCS_HT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+ break;
+ case RATE_MCS_VHT_MSK:
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ }
+}
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ /* This API is not EMLSR ready, so we cannot provide complete
+ * information if EMLSR is active
+ */
+ if (hweight16(vif->active_links) > 1)
+ return;
+
+ if (iwl_mld_fw_stats_to_mac80211(mld_sta->mld, mld_sta, sinfo))
+ return;
+
+ iwl_mld_sta_stats_fill_txrate(mld_sta, sinfo);
+
+ /* TODO: NL80211_STA_INFO_BEACON_RX */
+
+ /* TODO: NL80211_STA_INFO_BEACON_SIGNAL_AVG */
+}
+
+#define IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH 10 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH 50 /* percentage */
+#define IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC (500 * 1000)
+
+static u8 iwl_mld_stats_load_percentage(u32 last_ts_usec, u32 curr_ts_usec,
+ u32 total_airtime_usec)
+{
+ u32 elapsed_usec = curr_ts_usec - last_ts_usec;
+
+ if (elapsed_usec < IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC)
+ return 0;
+
+ return (100 * total_airtime_usec / elapsed_usec);
+}
+
+static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld,
+ u32 total_airtime_usec,
+ u32 curr_ts_usec)
+{
+ u32 last_ts_usec = mld->scan.traffic_load.last_stats_ts_usec;
+ u8 load_prec;
+
+ /* Skip the calculation as this is the first notification received */
+ if (!last_ts_usec)
+ goto out;
+
+ load_prec = iwl_mld_stats_load_percentage(last_ts_usec, curr_ts_usec,
+ total_airtime_usec);
+
+ if (load_prec > IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH)
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_HIGH;
+ else if (load_prec > IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH)
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_MEDIUM;
+ else
+ mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_LOW;
+
+out:
+ mld->scan.traffic_load.last_stats_ts_usec = curr_ts_usec;
+}
+
+static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
+ int exit_emlsr_thresh;
+
+ if (sig == 0) {
+ IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n");
+ return;
+ }
+
+ /* TODO: task=statistics handle CQM notifications */
+
+ if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+ iwl_mld_int_mlo_scan(mld, vif);
+
+ if (!iwl_mld_emlsr_active(vif))
+ return;
+
+ /* We are in EMLSR, check if we need to exit */
+ exit_emlsr_thresh =
+ iwl_mld_get_emlsr_rssi_thresh(mld, &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_emlsr_thresh)
+ iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LOW_RSSI,
+ iwl_mld_get_other_link(vif,
+ bss_conf->link_id));
+}
+
+static void
+iwl_mld_process_per_link_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_link *per_link,
+ u32 curr_ts_usec)
+{
+ u32 total_airtime_usec = 0;
+
+ for (u32 fw_id = 0;
+ fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf);
+ fw_id++) {
+ const struct iwl_stats_ntfy_per_link *link_stats;
+ struct ieee80211_bss_conf *bss_conf;
+ int sig;
+
+ bss_conf = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_bss_conf[fw_id]);
+ if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION)
+ continue;
+
+ link_stats = &per_link[fw_id];
+
+ total_airtime_usec += le32_to_cpu(link_stats->air_time);
+
+ sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
+ iwl_mld_update_link_sig(bss_conf->vif, sig, bss_conf);
+
+ /* TODO: parse more fields here (task=statistics)*/
+ }
+
+ iwl_mld_stats_recalc_traffic_load(mld, total_airtime_usec,
+ curr_ts_usec);
+}
+
+static void
+iwl_mld_process_per_sta_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_sta *per_sta)
+{
+ for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) {
+ struct ieee80211_link_sta *link_sta =
+ wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[i]);
+ struct iwl_mld_link_sta *mld_link_sta;
+ s8 avg_energy =
+ -(s8)le32_to_cpu(per_sta[i].average_energy);
+
+ if (IS_ERR_OR_NULL(link_sta) || !avg_energy)
+ continue;
+
+ mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta);
+ if (WARN_ON(!mld_link_sta))
+ continue;
+
+ mld_link_sta->signal_avg = avg_energy;
+ }
+}
+
+static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ void *data)
+{
+ struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
+ const struct iwl_stats_ntfy_per_phy *per_phy = data;
+ u32 new_load, old_load;
+
+ if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL))
+ return;
+
+ phy->channel_load_by_us =
+ le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us);
+
+ old_load = phy->avg_channel_load_not_by_us;
+ new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us);
+ if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n",
+ new_load))
+ return;
+
+ /* give a weight of 0.5 for the old value */
+ phy->avg_channel_load_not_by_us = (new_load >> 1) + (old_load >> 1);
+
+ iwl_mld_emlsr_check_chan_load(hw, phy, old_load);
+}
+
+static void
+iwl_mld_process_per_phy_stats(struct iwl_mld *mld,
+ const struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ ieee80211_iter_chan_contexts_mtx(mld->hw,
+ iwl_mld_fill_chanctx_stats,
+ (void *)(uintptr_t)per_phy);
+
+}
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct iwl_system_statistics_notif_oper *stats =
+ (void *)&pkt->data;
+ u32 curr_ts_usec = le32_to_cpu(stats->time_stamp);
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->per_sta) != IWL_STATION_COUNT_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(stats->per_link) <
+ ARRAY_SIZE(mld->fw_id_to_bss_conf));
+
+ iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec);
+ iwl_mld_process_per_sta_stats(mld, stats->per_sta);
+ iwl_mld_process_per_phy_stats(mld, stats->per_phy);
+
+ iwl_mld_check_omi_bw_reduction(mld);
+}
+
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ /* TODO */
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.h b/drivers/net/wireless/intel/iwlwifi/mld/stats.h
new file mode 100644
index 000000000000..de434e66d555
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_stats_h__
+#define __iwl_mld_stats_h__
+
+int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable);
+
+void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+
+void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
new file mode 100644
index 000000000000..36317feb923b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o
+
+ccflags-y += -I$(src)/../
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
new file mode 100644
index 000000000000..1fd664be1a7c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/static_stub.h>
+#include <kunit/skbuff.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "sta.h"
+#include "agg.h"
+#include "rx.h"
+
+#define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)
+#define BA_WINDOW_SIZE 64
+#define QUEUE 0
+
+static const struct reorder_buffer_case {
+ const char *desc;
+ struct {
+ /* ieee80211_hdr fields */
+ u16 fc;
+ u8 tid;
+ bool multicast;
+ /* iwl_rx_mpdu_desc fields */
+ u16 nssn;
+ /* used also for setting hdr::seq_ctrl */
+ u16 sn;
+ u8 baid;
+ bool amsdu;
+ bool last_subframe;
+ bool old_sn;
+ bool dup;
+ } rx_pkt;
+ struct {
+ bool valid;
+ u16 head_sn;
+ u8 baid;
+ u16 num_entries;
+ /* The test prepares the reorder buffer with fake skbs based
+ * on the sequence numbers provided in @entries array.
+ */
+ struct {
+ u16 sn;
+ /* Set add_subframes > 0 to simulate an A-MSDU by
+ * queueing additional @add_subframes skbs in the
+ * appropriate reorder buffer entry (based on the @sn)
+ */
+ u8 add_subframes;
+ } entries[BA_WINDOW_SIZE];
+ } reorder_buf_state;
+ struct {
+ enum iwl_mld_reorder_result reorder_res;
+ u16 head_sn;
+ u16 num_stored;
+ u16 skb_release_order[BA_WINDOW_SIZE];
+ u16 skb_release_order_count;
+ } expected;
+} reorder_buffer_cases[] = {
+ {
+ .desc = "RX packet with invalid BAID",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .baid = IWL_RX_REORDER_DATA_INVALID_BAID,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* Invalid BAID should not be buffered. The frame is
+ * passed to the network stack immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ },
+ },
+ {
+ .desc = "RX Multicast packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .multicast = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* Multicast packets are not buffered. The packet is
+ * passed to the network stack immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ },
+ },
+ {
+ .desc = "RX non-QoS data",
+ .rx_pkt = {
+ .fc = IEEE80211_FTYPE_DATA,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ },
+ .expected = {
+ /* non-QoS data frames do not require reordering.
+ * The packet is passed to the network stack
+ * immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ },
+ },
+ {
+ .desc = "RX old SN packet, reorder buffer is not yet valid",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .old_sn = true,
+ },
+ .reorder_buf_state = {
+ .valid = false,
+ },
+ .expected = {
+ /* The buffer is invalid and the RX packet has an old
+ * SN. The packet is passed to the network stack
+ * immediately.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ },
+ },
+ {
+ .desc = "RX old SN packet, reorder buffer valid",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .old_sn = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* The buffer is valid and the RX packet has an old SN.
+ * The packet should be dropped.
+ */
+ .reorder_res = IWL_MLD_DROP_SKB,
+ .num_stored = 0,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX duplicate packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .dup = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* Duplicate packets should be dropped */
+ .reorder_res = IWL_MLD_DROP_SKB,
+ .num_stored = 0,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX In-order packet, sn < nssn",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX packet SN is in order and less than NSSN.
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is updated to NSSN.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 101,
+ },
+ },
+ {
+ .desc = "RX In-order packet, sn == head_sn",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 101,
+ .nssn = 100,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 101,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX packet SN is equal to buffer->head_sn.
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is incremented.
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 102,
+ },
+ },
+ {
+ .desc = "RX In-order packet, IEEE80211_MAX_SN wrap around",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = IEEE80211_MAX_SN,
+ .nssn = IEEE80211_MAX_SN - 1,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = IEEE80211_MAX_SN,
+ },
+ .expected = {
+ /* 1. Reorder buffer is empty.
+ * 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN
+ * Packet is released to the network stack immediately
+ * and buffer->head_sn is incremented correctly (wraps
+ * around to 0).
+ */
+ .reorder_res = IWL_MLD_PASS_SKB,
+ .num_stored = 0,
+ .head_sn = 0,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, pending packet in buffer",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 102,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0].sn = 101,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with SN=101.
+ * 2. RX packet SN = buffer->head_sn.
+ * Both packets are released (in order) to the network
+ * stack as there are no gaps.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 102,
+ .skb_release_order = {100, 101},
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, pending packet in buffer (wrap around)",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 0,
+ .nssn = 1,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = IEEE80211_MAX_SN - 1,
+ .num_entries = 1,
+ .entries[0].sn = IEEE80211_MAX_SN,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with
+ * SN=IEEE80211_MAX_SN.
+ * 2. RX Packet SN = 0 (after wrap around)
+ * Both packets are released (in order) to the network
+ * stack as there are no gaps.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 1,
+ .skb_release_order = { 4095, 0 },
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0].sn = 102,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains one packet with SN=102.
+ * 2. There are 2 holes at SN={100, 101}.
+ * Only the RX packet (SN=100) is released, there is
+ * still a hole at 101.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 1,
+ .head_sn = 101,
+ .skb_release_order = {100},
+ .skb_release_order_count = 1,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 102,
+ .nssn = 103,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 3,
+ .entries[0].sn = 101,
+ .entries[1].sn = 104,
+ .entries[2].sn = 105,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains three packets.
+ * 2. RX packet fills one of two holes (at SN=102).
+ * Two packets are released (until the next hole at
+ * SN=103).
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 2,
+ .head_sn = 103,
+ .skb_release_order = {101, 102},
+ .skb_release_order_count = 2,
+ },
+ },
+ {
+ .desc = "RX Out-of-order packet, filling 1/1 holes, no packets released",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 102,
+ .nssn = 100,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 3,
+ .entries[0].sn = 101,
+ .entries[1].sn = 103,
+ .entries[2].sn = 104,
+ },
+ .expected = {
+ /* 1. Reorder buffer contains three packets:
+ * SN={101, 103, 104}.
+ * 2. RX packet fills a hole (SN=102), but NSSN is
+ * smaller than buffered frames.
+ * No packets can be released yet and buffer->head_sn
+ * is not updated.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 4,
+ .head_sn = 100,
+ },
+ },
+ {
+ .desc = "RX In-order A-MSDU, last subframe",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ .amsdu = true,
+ .last_subframe = true,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0] = {
+ .sn = 100,
+ .add_subframes = 1,
+ },
+ },
+ .expected = {
+ /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+ * at SN=100.
+ * 2. RX packet is the last SN=100 A-MSDU subframe
+ * All packets are released in order (3 x SN=100).
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 0,
+ .head_sn = 101,
+ .skb_release_order = {100, 100, 100},
+ .skb_release_order_count = 3,
+ },
+ },
+ {
+ .desc = "RX In-order A-MSDU, not the last subframe",
+ .rx_pkt = {
+ .fc = FC_QOS_DATA,
+ .sn = 100,
+ .nssn = 101,
+ .amsdu = true,
+ .last_subframe = false,
+ },
+ .reorder_buf_state = {
+ .valid = true,
+ .head_sn = 100,
+ .num_entries = 1,
+ .entries[0] = {
+ .sn = 100,
+ .add_subframes = 1,
+ },
+ },
+ .expected = {
+ /* 1. Reorder buffer contains a 2-sub frames A-MSDU
+ * at SN=100.
+ * 2. RX packet additional SN=100 A-MSDU subframe,
+ * but not the last one
+ * No packets are released and head_sn is not updated.
+ */
+ .reorder_res = IWL_MLD_BUFFERED_SKB,
+ .num_stored = 3,
+ .head_sn = 100,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc);
+
+static struct sk_buff_head g_released_skbs;
+static u16 g_num_released_skbs;
+
+/* Add released skbs from reorder buffer to a global list; This allows us
+ * to verify the correct release order of packets after they pass through the
+ * simulated reorder logic.
+ */
+static void
+fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld,
+ struct napi_struct *napi,
+ struct sk_buff *skb, int queue,
+ struct ieee80211_sta *sta)
+{
+ __skb_queue_tail(&g_released_skbs, skb);
+ g_num_released_skbs++;
+}
+
+static u32
+fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld_link_sta *mld_link_sta;
+ u8 link_id;
+ u32 sta_mask = 0;
+
+ /* This is the expectation in the real function */
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* We can't use for_each_sta_active_link */
+ for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
+ sta_mask |= BIT(mld_link_sta->fw_id);
+ return sta_mask;
+}
+
+static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_rx_mpdu_desc *mpdu_desc;
+
+ KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc);
+
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK,
+ param->rx_pkt.baid));
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK,
+ param->rx_pkt.sn));
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK,
+ param->rx_pkt.nssn));
+ if (param->rx_pkt.old_sn)
+ mpdu_desc->reorder_data |=
+ cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN);
+
+ if (param->rx_pkt.dup)
+ mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE);
+
+ if (param->rx_pkt.amsdu) {
+ mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+ if (param->rx_pkt.last_subframe)
+ mpdu_desc->amsdu_info |=
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+ }
+
+ return mpdu_desc;
+}
+
+static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid,
+ bool mcast)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_hdr hdr = {
+ .frame_control = cpu_to_le16(fc),
+ .seq_ctrl = cpu_to_le16(seq_ctrl),
+ };
+ struct sk_buff *skb;
+
+ skb = kunit_zalloc_skb(test, 128, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, skb);
+
+ if (ieee80211_is_data_qos(hdr.frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(&hdr);
+
+ qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ if (mcast)
+ hdr.addr1[0] = 0x1;
+
+ skb_set_mac_header(skb, skb->len);
+ skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control));
+
+ return skb;
+}
+
+static struct iwl_mld_reorder_buffer *
+setup_reorder_buffer(struct iwl_mld_baid_data *baid_data)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf;
+ struct iwl_mld_reorder_buf_entry *entries = baid_data->entries;
+ struct sk_buff *fake_skb;
+
+ buffer->valid = param->reorder_buf_state.valid;
+ buffer->head_sn = param->reorder_buf_state.head_sn;
+ buffer->queue = QUEUE;
+
+ for (int i = 0; i < baid_data->buf_size; i++)
+ __skb_queue_head_init(&entries[i].frames);
+
+ for (int i = 0; i < param->reorder_buf_state.num_entries; i++) {
+ u16 sn = param->reorder_buf_state.entries[i].sn;
+ int index = sn % baid_data->buf_size;
+ u8 add_subframes =
+ param->reorder_buf_state.entries[i].add_subframes;
+ /* create 1 skb per entry + additional skbs per num of
+ * requested subframes
+ */
+ u8 num_skbs = 1 + add_subframes;
+
+ for (int j = 0; j < num_skbs; j++) {
+ fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0,
+ false);
+ __skb_queue_tail(&entries[index].frames, fake_skb);
+ buffer->num_stored++;
+ }
+ }
+
+ return buffer;
+}
+
+static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_mld_baid_data *baid_data = NULL;
+ struct iwl_mld_reorder_buffer *buffer;
+ u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]);
+ u8 baid = param->reorder_buf_state.baid;
+
+ /* Assuming only 1 RXQ */
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data,
+ sizeof(*baid_data) + reorder_buf_size);
+
+ baid_data->baid = baid;
+ baid_data->tid = param->rx_pkt.tid;
+ baid_data->buf_size = BA_WINDOW_SIZE;
+
+ wiphy_lock(mld->wiphy);
+ baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
+ wiphy_unlock(mld->wiphy);
+
+ baid_data->entries_per_queue = BA_WINDOW_SIZE;
+
+ buffer = setup_reorder_buffer(baid_data);
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid]));
+ rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
+
+ return buffer;
+}
+
+static void test_reorder_buffer(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct reorder_buffer_case *param =
+ (const void *)(test->param_value);
+ struct iwl_rx_mpdu_desc *mpdu_desc;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+ struct iwl_mld_reorder_buffer *buffer;
+ enum iwl_mld_reorder_result reorder_res;
+ u16 skb_release_order_count = param->expected.skb_release_order_count;
+ u16 skb_idx = 0;
+
+ /* Init globals and activate stubs */
+ __skb_queue_head_init(&g_released_skbs);
+ g_num_released_skbs = 0;
+ kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask,
+ fake_iwl_mld_fw_sta_id_mask);
+ kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211,
+ fake_iwl_mld_pass_packet_to_mac80211);
+
+ vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+ sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+ /* Prepare skb, mpdu_desc, BA data and the reorder buffer */
+ skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn,
+ param->rx_pkt.tid, param->rx_pkt.multicast);
+ buffer = setup_ba_data(sta);
+ mpdu_desc = setup_mpdu_desc();
+
+ rcu_read_lock();
+ reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc);
+ rcu_read_unlock();
+
+ KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res);
+ KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored);
+ KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn);
+
+ /* Verify skbs release order */
+ KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs);
+ while ((skb = __skb_dequeue(&g_released_skbs))) {
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
+
+ KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl),
+ param->expected.skb_release_order[skb_idx]);
+ skb_idx++;
+ }
+ KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count);
+}
+
+static struct kunit_case reorder_buffer_test_cases[] = {
+ KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params),
+ {},
+};
+
+static struct kunit_suite reorder_buffer = {
+ .name = "iwlmld-reorder-buffer",
+ .test_cases = reorder_buffer_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(reorder_buffer);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
new file mode 100644
index 000000000000..4e189bf8b3fb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+
+#include <iwl-trans.h>
+#include "mld.h"
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static void test_hcmd_names_sorted(struct kunit *test)
+{
+ int i;
+
+ for (i = 0; i < global_iwl_mld_goups_size; i++) {
+ const struct iwl_hcmd_arr *arr = &iwl_mld_groups[i];
+ int j;
+
+ if (!arr->arr)
+ continue;
+ for (j = 0; j < arr->size - 1; j++)
+ KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id,
+ arr->arr[j + 1].cmd_id);
+ }
+}
+
+static void test_hcmd_names_for_rx(struct kunit *test)
+{
+ static struct iwl_trans t = {
+ .command_groups = iwl_mld_groups,
+ };
+
+ t.command_groups_size = global_iwl_mld_goups_size;
+
+ for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) {
+ const struct iwl_rx_handler *rxh;
+ const char *name;
+
+ rxh = &iwl_mld_rx_handlers[i];
+
+ name = iwl_get_cmd_string(&t, rxh->cmd_id);
+ KUNIT_EXPECT_NOT_NULL(test, name);
+ KUNIT_EXPECT_NE_MSG(test, strcmp(name, "UNKNOWN"), 0,
+ "ID 0x%04x is UNKNOWN", rxh->cmd_id);
+ }
+}
+
+static struct kunit_case hcmd_names_cases[] = {
+ KUNIT_CASE(test_hcmd_names_sorted),
+ KUNIT_CASE(test_hcmd_names_for_rx),
+ {},
+};
+
+static struct kunit_suite hcmd_names = {
+ .name = "iwlmld-hcmd-names",
+ .test_cases = hcmd_names_cases,
+};
+
+kunit_test_suite(hcmd_names);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
new file mode 100644
index 000000000000..295dcfd3f85d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for link selection functions
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "phy.h"
+#include "mlo.h"
+
+static const struct link_grading_test_case {
+ const char *desc;
+ struct {
+ struct {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ bool active;
+ s32 signal;
+ bool has_chan_util_elem;
+ u8 chan_util; /* 0-255 , used only if has_chan_util_elem is true */
+ u8 chan_load_by_us; /* 0-100, used only if active is true */;
+ } link;
+ } input;
+ unsigned int expected_grade;
+} link_grading_cases[] = {
+ {
+ .desc = "channel util of 128 (50%)",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = false,
+ .has_chan_util_elem = true,
+ .chan_util = 128,
+ },
+ .expected_grade = 86,
+ },
+ {
+ .desc = "channel util of 180 (70%)",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = false,
+ .has_chan_util_elem = true,
+ .chan_util = 180,
+ },
+ .expected_grade = 51,
+ },
+ {
+ .desc = "channel util of 180 (70%), channel load by us of 10%",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .has_chan_util_elem = true,
+ .chan_util = 180,
+ .active = true,
+ .chan_load_by_us = 10,
+ },
+ .expected_grade = 67,
+ },
+ {
+ .desc = "no channel util element",
+ .input.link = {
+ .link_id = 0,
+ .chandef = &chandef_2ghz,
+ .active = true,
+ },
+ .expected_grade = 120,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc);
+
+static void setup_link(struct ieee80211_bss_conf *link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ const struct link_grading_test_case *test_param =
+ (const void *)(test->param_value);
+
+ KUNIT_ALLOC_AND_ASSERT(test, link->bss);
+
+ link->bss->signal = DBM_TO_MBM(test_param->input.link.signal);
+
+ link->chanreq.oper = *test_param->input.link.chandef;
+
+ if (test_param->input.link.has_chan_util_elem) {
+ struct cfg80211_bss_ies *ies;
+ struct ieee80211_bss_load_elem bss_load = {
+ .channel_util = test_param->input.link.chan_util,
+ };
+ struct element *elem =
+ iwlmld_kunit_gen_element(WLAN_EID_QBSS_LOAD,
+ &bss_load,
+ sizeof(bss_load));
+ unsigned int elem_len = sizeof(*elem) + sizeof(bss_load);
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ies, sizeof(*ies) + elem_len);
+ memcpy(ies->data, elem, elem_len);
+ ies->len = elem_len;
+ rcu_assign_pointer(link->bss->beacon_ies, ies);
+ rcu_assign_pointer(link->bss->ies, ies);
+ }
+
+ if (test_param->input.link.active) {
+ struct ieee80211_chanctx_conf *chan_ctx =
+ wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ struct iwl_mld_phy *phy;
+
+ KUNIT_ASSERT_NOT_NULL(test, chan_ctx);
+
+ phy = iwl_mld_phy_from_mac80211(chan_ctx);
+
+ phy->channel_load_by_us = test_param->input.link.chan_load_by_us;
+ }
+}
+
+static void test_link_grading(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ const struct link_grading_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ unsigned int actual_grade;
+ /* Extract test case parameters */
+ u8 link_id = test_param->input.link.link_id;
+ bool active = test_param->input.link.active;
+ u16 valid_links;
+ struct iwl_mld_kunit_link assoc_link = {
+ .band = test_param->input.link.chandef->chan->band,
+ };
+
+ /* If the link is not active, use a different link as the assoc link */
+ if (active) {
+ assoc_link.id = link_id;
+ valid_links = BIT(link_id);
+ } else {
+ assoc_link.id = BIT(ffz(BIT(link_id)));
+ valid_links = BIT(assoc_link.id) | BIT(link_id);
+ }
+
+ vif = iwlmld_kunit_setup_mlo_assoc(valid_links, &assoc_link);
+
+ wiphy_lock(mld->wiphy);
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+ KUNIT_ASSERT_NOT_NULL(test, link);
+
+ setup_link(link);
+
+ actual_grade = iwl_mld_get_link_grade(mld, link);
+ wiphy_unlock(mld->wiphy);
+
+ /* Assert that the returned grade matches the expected grade */
+ KUNIT_EXPECT_EQ(test, actual_grade, test_param->expected_grade);
+}
+
+static struct kunit_case link_selection_cases[] = {
+ KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
+ {},
+};
+
+static struct kunit_suite link_selection = {
+ .name = "iwlmld-link-selection-tests",
+ .test_cases = link_selection_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link_selection);
+
+static const struct channel_load_case {
+ const char *desc;
+ bool low_latency_vif;
+ u32 chan_load_not_by_us;
+ enum nl80211_chan_width bw_a;
+ enum nl80211_chan_width bw_b;
+ bool primary_link_active;
+ bool expected_result;
+} channel_load_cases[] = {
+ {
+ .desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed",
+ .low_latency_vif = false,
+ .primary_link_active = false,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = false,
+ },
+ {
+ .desc = "Equal bandwidths, sufficient channel load, EMLSR allowed",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 11,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_40,
+ .expected_result = true,
+ },
+ {
+ .desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 6,
+ .bw_a = NL80211_CHAN_WIDTH_80,
+ .bw_b = NL80211_CHAN_WIDTH_80,
+ .expected_result = false,
+ },
+ {
+ .desc = "Low latency VIF, sufficient channel load, EMLSR allowed",
+ .low_latency_vif = true,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 6,
+ .bw_a = NL80211_CHAN_WIDTH_160,
+ .bw_b = NL80211_CHAN_WIDTH_160,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (2x ratio), primary link load permits EMLSR",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 30,
+ .bw_a = NL80211_CHAN_WIDTH_40,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (4x ratio), primary link load permits EMLSR",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 45,
+ .bw_a = NL80211_CHAN_WIDTH_80,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = true,
+ },
+ {
+ .desc = "Different bandwidths (16x ratio), primary link load insufficient",
+ .low_latency_vif = false,
+ .primary_link_active = true,
+ .chan_load_not_by_us = 45,
+ .bw_a = NL80211_CHAN_WIDTH_320,
+ .bw_b = NL80211_CHAN_WIDTH_20,
+ .expected_result = false,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc);
+
+static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test)
+{
+ const struct channel_load_case *params = test->param_value;
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct cfg80211_chan_def chandef_a, chandef_b;
+ struct iwl_mld_link_sel_data a = {.chandef = &chandef_a,
+ .link_id = 4};
+ struct iwl_mld_link_sel_data b = {.chandef = &chandef_b,
+ .link_id = 5};
+ struct iwl_mld_kunit_link assoc_link = {
+ .id = params->primary_link_active ? a.link_id : b.link_id,
+ .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b,
+ };
+ bool result;
+
+ vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id),
+ &assoc_link);
+
+ chandef_a.width = params->bw_a;
+ chandef_b.width = params->bw_b;
+
+ if (params->low_latency_vif)
+ iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1;
+
+ wiphy_lock(mld->wiphy);
+
+ /* Simulate channel load */
+ if (params->primary_link_active) {
+ struct iwl_mld_phy *phy =
+ iwlmld_kunit_get_phy_of_link(vif, a.link_id);
+
+ phy->avg_channel_load_not_by_us = params->chan_load_not_by_us;
+ }
+
+ result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b);
+
+ wiphy_unlock(mld->wiphy);
+
+ KUNIT_EXPECT_EQ(test, result, params->expected_result);
+}
+
+static struct kunit_case channel_load_criteria_test_cases[] = {
+ KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params),
+ {}
+};
+
+static struct kunit_suite channel_load_criteria_tests = {
+ .name = "iwlmld_channel_load_allows_emlsr",
+ .test_cases = channel_load_criteria_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(channel_load_criteria_tests);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
new file mode 100644
index 000000000000..4a4eaa134bd3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+
+#include "utils.h"
+#include "mld.h"
+#include "link.h"
+#include "iface.h"
+#include "fw/api/mac-cfg.h"
+
+static const struct missed_beacon_test_case {
+ const char *desc;
+ struct {
+ struct iwl_missed_beacons_notif notif;
+ bool emlsr;
+ } input;
+ struct {
+ bool disconnected;
+ bool emlsr;
+ } output;
+} missed_beacon_cases[] = {
+ {
+ .desc = "no EMLSR, no disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(4),
+ },
+ },
+ {
+ .desc = "no EMLSR, no beacon loss since Rx, no disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(20),
+ },
+ },
+ {
+ .desc = "no EMLSR, beacon loss since Rx, disconnect",
+ .input.notif = {
+ .consec_missed_beacons = cpu_to_le32(20),
+ .consec_missed_beacons_since_last_rx =
+ cpu_to_le32(10),
+ },
+ .output.disconnected = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_missed_beacon, missed_beacon_cases, desc);
+
+static void fake_ieee80211_connection_loss(struct ieee80211_vif *vif)
+{
+ vif->cfg.assoc = false;
+}
+
+static void test_missed_beacon(struct kunit *test)
+{
+ struct iwl_mld *mld = test->priv;
+ struct iwl_missed_beacons_notif *notif;
+ const struct missed_beacon_test_case *test_param =
+ (const void *)(test->param_value);
+ struct ieee80211_vif *vif;
+ struct iwl_rx_packet *pkt;
+ struct iwl_mld_kunit_link link1 = {
+ .id = 0,
+ .band = NL80211_BAND_6GHZ,
+ };
+ struct iwl_mld_kunit_link link2 = {
+ .id = 1,
+ .band = NL80211_BAND_5GHZ,
+ };
+
+ kunit_activate_static_stub(test, ieee80211_connection_loss,
+ fake_ieee80211_connection_loss);
+ pkt = iwl_mld_kunit_create_pkt(test_param->input.notif);
+ notif = (void *)pkt->data;
+
+ if (test_param->input.emlsr) {
+ vif = iwlmld_kunit_assoc_emlsr(&link1, &link2);
+ } else {
+ struct iwl_mld_vif *mld_vif;
+
+ vif = iwlmld_kunit_setup_non_mlo_assoc(&link1);
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ notif->link_id = cpu_to_le32(mld_vif->deflink.fw_id);
+ }
+
+ wiphy_lock(mld->wiphy);
+
+ iwl_mld_handle_missed_beacon_notif(mld, pkt);
+
+ wiphy_unlock(mld->wiphy);
+
+ KUNIT_ASSERT_NE(test, vif->cfg.assoc, test_param->output.disconnected);
+
+ /* TODO: add test cases for esr and check */
+}
+
+static struct kunit_case link_cases[] = {
+ KUNIT_CASE_PARAM(test_missed_beacon, test_missed_beacon_gen_params),
+ {},
+};
+
+static struct kunit_suite link = {
+ .name = "iwlmld-link",
+ .test_cases = link_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(link);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c
new file mode 100644
index 000000000000..5d9818587b23
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * This is just module boilerplate for the iwlmld kunit module.
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_IMPORT_NS("IWLWIFI");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlmld");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c
new file mode 100644
index 000000000000..20cb4e03ab41
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "utils.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "sta.h"
+
+static const struct is_dup_case {
+ const char *desc;
+ struct {
+ /* ieee80211_hdr fields */
+ __le16 fc;
+ __le16 seq;
+ u8 tid;
+ bool multicast;
+ /* iwl_rx_mpdu_desc fields */
+ bool is_amsdu;
+ u8 sub_frame_idx;
+ } rx_pkt;
+ struct {
+ __le16 last_seq;
+ u8 last_sub_frame_idx;
+ u8 tid;
+ } dup_data_state;
+ struct {
+ bool is_dup;
+ u32 rx_status_flag;
+ } result;
+} is_dup_cases[] = {
+ {
+ .desc = "Control frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_CTL),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "Null func frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "Multicast data",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .multicast = true,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "QoS null func frame",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC),
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = 0,
+ }
+ },
+ {
+ .desc = "QoS data new sequence",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x101),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "QoS data same sequence, no retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "QoS data same sequence, has retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "QoS data invalid tid",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .tid = IWL_MAX_TID_COUNT + 1,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence, same tid, no retry",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = IWL_MAX_TID_COUNT,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence, same tid, has retry",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = IWL_MAX_TID_COUNT,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+ {
+ .desc = "non-QoS data, same sequence on different tid's",
+ .rx_pkt = {
+ /* Driver will use tid = IWL_MAX_TID_COUNT */
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA),
+ .seq = __cpu_to_le16(0x100),
+ },
+ .dup_data_state = {
+ .tid = 7,
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU new subframe, allow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 1,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_ALLOW_SAME_PN |
+ RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU subframe with smaller idx, disallow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 1,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 2,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU same subframe, no retry, disallow same PN",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 0,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = false,
+ .rx_status_flag = RX_FLAG_DUP_VALIDATED,
+ },
+ },
+ {
+ .desc = "A-MSDU same subframe, has retry",
+ .rx_pkt = {
+ .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_DATA |
+ IEEE80211_FCTL_RETRY),
+ .seq = __cpu_to_le16(0x100),
+ .is_amsdu = true,
+ .sub_frame_idx = 0,
+ },
+ .dup_data_state = {
+ .last_seq = __cpu_to_le16(0x100),
+ .last_sub_frame_idx = 0,
+ },
+ .result = {
+ .is_dup = true,
+ .rx_status_flag = 0,
+ },
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(test_is_dup, is_dup_cases, desc);
+
+static void
+setup_dup_data_state(struct ieee80211_sta *sta)
+{
+ struct kunit *test = kunit_get_current_test();
+ const struct is_dup_case *param = (const void *)(test->param_value);
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ u8 tid = param->dup_data_state.tid;
+ struct iwl_mld_rxq_dup_data *dup_data;
+
+ /* Allocate dup_data only for 1 queue */
+ KUNIT_ALLOC_AND_ASSERT(test, dup_data);
+
+ /* Initialize dup data, see iwl_mld_alloc_dup_data */
+ memset(dup_data->last_seq, 0xff, sizeof(dup_data->last_seq));
+
+ dup_data->last_seq[tid] = param->dup_data_state.last_seq;
+ dup_data->last_sub_frame_idx[tid] =
+ param->dup_data_state.last_sub_frame_idx;
+
+ mld_sta->dup_data = dup_data;
+}
+
+static void setup_rx_pkt(const struct is_dup_case *param,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *mpdu_desc)
+{
+ u8 tid = param->rx_pkt.tid;
+
+ /* Set "new rx packet" header */
+ hdr->frame_control = param->rx_pkt.fc;
+ hdr->seq_ctrl = param->rx_pkt.seq;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK;
+ }
+
+ if (param->rx_pkt.multicast)
+ hdr->addr1[0] = 0x1;
+
+ /* Set mpdu_desc */
+ mpdu_desc->amsdu_info = param->rx_pkt.sub_frame_idx &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ if (param->rx_pkt.is_amsdu)
+ mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU;
+}
+
+static void test_is_dup(struct kunit *test)
+{
+ const struct is_dup_case *param = (const void *)(test->param_value);
+ struct iwl_mld *mld = test->priv;
+ struct iwl_rx_mpdu_desc mpdu_desc = { };
+ struct ieee80211_rx_status rx_status = { };
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr hdr;
+
+ vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION);
+ sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1);
+
+ /* Prepare test case state */
+ setup_dup_data_state(sta);
+ setup_rx_pkt(param, &hdr, &mpdu_desc);
+
+ KUNIT_EXPECT_EQ(test,
+ iwl_mld_is_dup(mld, sta, &hdr, &mpdu_desc, &rx_status,
+ 0), /* assuming only 1 queue */
+ param->result.is_dup);
+ KUNIT_EXPECT_EQ(test, rx_status.flag, param->result.rx_status_flag);
+}
+
+static struct kunit_case is_dup_test_cases[] = {
+ KUNIT_CASE_PARAM(test_is_dup, test_is_dup_gen_params),
+ {},
+};
+
+static struct kunit_suite is_dup = {
+ .name = "iwlmld-rx-is-dup",
+ .test_cases = is_dup_test_cases,
+ .init = iwlmld_kunit_test_init,
+};
+
+kunit_test_suite(is_dup);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
new file mode 100644
index 000000000000..9712ee696509
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "utils.h"
+
+#include <linux/device.h>
+
+#include "fw/api/scan.h"
+#include "fw/api/mac-cfg.h"
+#include "iwl-trans.h"
+#include "mld.h"
+#include "iface.h"
+#include "link.h"
+#include "phy.h"
+#include "sta.h"
+
+int iwlmld_kunit_test_init(struct kunit *test)
+{
+ struct iwl_mld *mld;
+ struct iwl_trans *trans;
+ const struct iwl_cfg *cfg;
+ struct iwl_fw *fw;
+ struct ieee80211_hw *hw;
+
+ KUNIT_ALLOC_AND_ASSERT(test, trans);
+ KUNIT_ALLOC_AND_ASSERT(test, trans->dev);
+ KUNIT_ALLOC_AND_ASSERT(test, cfg);
+ KUNIT_ALLOC_AND_ASSERT(test, fw);
+ KUNIT_ALLOC_AND_ASSERT(test, hw);
+ KUNIT_ALLOC_AND_ASSERT(test, hw->wiphy);
+
+ mutex_init(&hw->wiphy->mtx);
+
+ /* Allocate and initialize the mld structure */
+ KUNIT_ALLOC_AND_ASSERT(test, mld);
+ iwl_construct_mld(mld, trans, cfg, fw, hw, NULL);
+
+ fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
+ fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1;
+
+ mld->fwrt.trans = trans;
+ mld->fwrt.fw = fw;
+ mld->fwrt.dev = trans->dev;
+
+ /* TODO: add priv_size to hw allocation and setup hw->priv to enable
+ * testing mac80211 callbacks
+ */
+
+ KUNIT_ALLOC_AND_ASSERT(test, mld->nvm_data);
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, mld->scan.cmd,
+ sizeof(struct iwl_scan_req_umac_v17));
+ mld->scan.cmd_size = sizeof(struct iwl_scan_req_umac_v17);
+
+ /* This is not the state at the end of the regular opmode_start,
+ * but it is more common to need it. Explicitly undo this if needed.
+ */
+ mld->trans->state = IWL_TRANS_FW_ALIVE;
+ mld->fw_status.running = true;
+
+ /* Avoid passing mld struct around */
+ test->priv = mld;
+ return 0;
+}
+
+IWL_MLD_ALLOC_FN(link, bss_conf)
+
+static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct iwl_mld_link *mld_link, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ int ret;
+
+ /* setup mac80211 link */
+ rcu_assign_pointer(vif->link_conf[link_id], link);
+ link->link_id = link_id;
+ link->vif = vif;
+ link->beacon_int = 100;
+ link->dtim_period = 3;
+ link->qos = true;
+
+ /* and mld_link */
+ ret = iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ rcu_assign_pointer(mld_vif->link[link_id], mld_link);
+ rcu_assign_pointer(vif->link_conf[link_id], link);
+}
+
+IWL_MLD_ALLOC_FN(vif, vif)
+
+/* Helper function to add and initialize a VIF for KUnit tests */
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct iwl_mld_vif *mld_vif;
+ int ret;
+
+ /* TODO: support more types */
+ KUNIT_ASSERT_EQ(test, type, NL80211_IFTYPE_STATION);
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, vif,
+ sizeof(*vif) + sizeof(*mld_vif));
+
+ vif->type = type;
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+ mld_vif->mld = mld;
+
+ ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* TODO: revisit (task=EHT) */
+ if (mlo)
+ return vif;
+
+ /* Initialize the default link */
+ iwlmld_kunit_init_link(vif, &vif->bss_conf, &mld_vif->deflink, 0);
+
+ return vif;
+}
+
+/* Use only for MLO vif */
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_bss_conf *link;
+ struct iwl_mld_link *mld_link;
+
+ KUNIT_ALLOC_AND_ASSERT(test, link);
+ KUNIT_ALLOC_AND_ASSERT(test, mld_link);
+
+ iwlmld_kunit_init_link(vif, link, mld_link, link_id);
+ vif->valid_links |= BIT(link_id);
+
+ return link;
+}
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_chanctx_conf *ctx;
+ struct iwl_mld_phy *phy;
+ int fw_id;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ctx, sizeof(*ctx) + sizeof(*phy));
+
+ /* Setup the chanctx conf */
+ ctx->def = *def;
+ ctx->min_def = *def;
+ ctx->ap = *def;
+
+ /* and the iwl_mld_phy */
+ phy = iwl_mld_phy_from_mac80211(ctx);
+
+ fw_id = iwl_mld_allocate_fw_phy_id(mld);
+ KUNIT_ASSERT_GE(test, fw_id, 0);
+
+ phy->fw_id = fw_id;
+ phy->mld = mld;
+ phy->chandef = *def;
+
+ return ctx;
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct iwl_mld_link *mld_link;
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(link->chanctx_conf));
+ rcu_assign_pointer(link->chanctx_conf, ctx);
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ mld_link = iwl_mld_link_from_mac80211(link);
+
+ KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld_link->chan_ctx));
+ KUNIT_EXPECT_FALSE(test, mld_link->active);
+
+ rcu_assign_pointer(mld_link->chan_ctx, ctx);
+ mld_link->active = true;
+
+ if (ieee80211_vif_is_mld(vif))
+ vif->active_links |= BIT(link->link_id);
+}
+
+IWL_MLD_ALLOC_FN(link_sta, link_sta)
+
+static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
+ struct iwl_mld_link_sta *mld_link_sta,
+ u8 link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+ struct iwl_mld *mld = test->priv;
+ u8 fw_id;
+ int ret;
+
+ /* initialize mac80211's link_sta */
+ link_sta->link_id = link_id;
+ rcu_assign_pointer(sta->link[link_id], link_sta);
+
+ link_sta->sta = sta;
+
+ /* and the iwl_mld_link_sta */
+ ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ mld_link_sta->fw_id = fw_id;
+
+ rcu_assign_pointer(mld_sta->link[link_id], mld_link_sta);
+}
+
+static struct ieee80211_link_sta *
+iwlmld_kunit_alloc_link_sta(struct ieee80211_sta *sta, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_link_sta *mld_link_sta;
+
+ /* Only valid for MLO */
+ KUNIT_ASSERT_TRUE(test, sta->valid_links);
+
+ KUNIT_ALLOC_AND_ASSERT(test, link_sta);
+ KUNIT_ALLOC_AND_ASSERT(test, mld_link_sta);
+
+ iwlmld_kunit_add_link_sta(sta, link_sta, mld_link_sta, link_id);
+
+ sta->valid_links |= BIT(link_id);
+
+ return link_sta;
+}
+
+/* Allocate and initialize a STA with the first link_sta */
+static struct ieee80211_sta *
+iwlmld_kunit_add_sta(struct ieee80211_vif *vif, int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_sta *sta;
+ struct iwl_mld_sta *mld_sta;
+
+ /* Allocate memory for ieee80211_sta with embedded iwl_mld_sta */
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, sta, sizeof(*sta) + sizeof(*mld_sta));
+
+ /* TODO: allocate and initialize the TXQs ? */
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_sta->vif = vif;
+ mld_sta->mld = test->priv;
+
+ /* TODO: adjust for internal stations */
+ mld_sta->sta_type = STATION_TYPE_PEER;
+
+ if (link_id >= 0) {
+ iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+ &mld_sta->deflink, link_id);
+ sta->valid_links = BIT(link_id);
+ } else {
+ iwlmld_kunit_add_link_sta(sta, &sta->deflink,
+ &mld_sta->deflink, 0);
+ }
+ return sta;
+}
+
+/* Move s STA to a state */
+static void iwlmld_kunit_move_sta_state(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state state)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld_sta *mld_sta;
+ struct iwl_mld_vif *mld_vif;
+
+ /* The sta will be removed automatically at the end of the test */
+ KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+ mld_sta = iwl_mld_sta_from_mac80211(sta);
+ mld_sta->sta_state = state;
+
+ mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif);
+ mld_vif->authorized = state == IEEE80211_STA_AUTHORIZED;
+
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ mld_vif->ap_sta = sta;
+}
+
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+ enum ieee80211_sta_state state,
+ int link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_sta *sta;
+
+ /* The sta will be removed automatically at the end of the test */
+ KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST);
+
+ /* First - allocate and init the STA */
+ sta = iwlmld_kunit_add_sta(vif, link_id);
+
+ /* Now move it all the way to the wanted state */
+ for (enum ieee80211_sta_state _state = IEEE80211_STA_NONE;
+ _state <= state; _state++)
+ iwlmld_kunit_move_sta_state(vif, sta, state);
+
+ return sta;
+}
+
+static void iwlmld_kunit_set_vif_associated(struct ieee80211_vif *vif)
+{
+ /* TODO: setup chanreq */
+ /* TODO setup capabilities */
+
+ vif->cfg.assoc = 1;
+}
+
+static struct ieee80211_vif *
+iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_chanctx_conf *chan_ctx;
+
+ KUNIT_ASSERT_TRUE(test, mlo || assoc_link->id == 0);
+
+ vif = iwlmld_kunit_add_vif(mlo, NL80211_IFTYPE_STATION);
+
+ if (mlo)
+ link = iwlmld_kunit_add_link(vif, assoc_link->id);
+ else
+ link = &vif->bss_conf;
+
+ chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band,
+ assoc_link->bandwidth);
+
+ wiphy_lock(mld->wiphy);
+ iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+ wiphy_unlock(mld->wiphy);
+
+ /* The AP sta will now be pointer to by mld_vif->ap_sta */
+ iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, assoc_link->id);
+
+ iwlmld_kunit_set_vif_associated(vif);
+
+ return vif;
+}
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+ struct iwl_mld_kunit_link *assoc_link)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct ieee80211_vif *vif;
+
+ KUNIT_ASSERT_TRUE(test,
+ hweight16(valid_links) == 1 ||
+ hweight16(valid_links) == 2);
+ KUNIT_ASSERT_TRUE(test, valid_links & BIT(assoc_link->id));
+
+ vif = iwlmld_kunit_setup_assoc(true, assoc_link);
+
+ /* Add the other link, if applicable */
+ if (hweight16(valid_links) > 1) {
+ u8 other_link_id = ffs(valid_links & ~BIT(assoc_link->id)) - 1;
+
+ iwlmld_kunit_add_link(vif, other_link_id);
+ }
+
+ return vif;
+}
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link)
+{
+ return iwlmld_kunit_setup_assoc(false, assoc_link);
+}
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_rx_packet *pkt;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, pkt, sizeof(pkt) + notif_sz);
+
+ memcpy(pkt->data, notif, notif_sz);
+ pkt->len_n_flags = cpu_to_le32(sizeof(pkt->hdr) + notif_sz);
+
+ return pkt;
+}
+
+struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
+ struct iwl_mld_kunit_link *link2)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *link;
+ struct ieee80211_chanctx_conf *chan_ctx;
+ struct ieee80211_sta *sta;
+ struct iwl_mld_vif *mld_vif;
+ u16 valid_links = BIT(link1->id) | BIT(link2->id);
+
+ KUNIT_ASSERT_TRUE(test, hweight16(valid_links) == 2);
+
+ vif = iwlmld_kunit_setup_mlo_assoc(valid_links, link1);
+ mld_vif = iwl_mld_vif_from_mac80211(vif);
+
+ /* Activate second link */
+ wiphy_lock(mld->wiphy);
+
+ link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]);
+ KUNIT_EXPECT_NOT_NULL(test, link);
+
+ chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth);
+ iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx);
+
+ wiphy_unlock(mld->wiphy);
+
+ /* And other link sta */
+ sta = mld_vif->ap_sta;
+ KUNIT_EXPECT_NOT_NULL(test, sta);
+
+ iwlmld_kunit_alloc_link_sta(sta, link2->id);
+
+ return vif;
+}
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct element *elem;
+
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, elem, sizeof(*elem) + len);
+
+ elem->id = id;
+ elem->datalen = len;
+ memcpy(elem->data, data, len);
+
+ return elem;
+}
+
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+ u8 link_id)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct iwl_mld *mld = test->priv;
+ struct ieee80211_chanctx_conf *chanctx;
+ struct ieee80211_bss_conf *link =
+ wiphy_dereference(mld->wiphy, vif->link_conf[link_id]);
+
+ KUNIT_EXPECT_NOT_NULL(test, link);
+
+ chanctx = wiphy_dereference(mld->wiphy, link->chanctx_conf);
+ KUNIT_EXPECT_NOT_NULL(test, chanctx);
+
+ return iwl_mld_phy_from_mac80211(chanctx);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
new file mode 100644
index 000000000000..d3723653cf1b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#ifndef __iwl_mld_kunit_utils_h__
+#define __iwl_mld_kunit_utils_h__
+
+#include <net/mac80211.h>
+#include <kunit/test-bug.h>
+
+struct iwl_mld;
+
+int iwlmld_kunit_test_init(struct kunit *test);
+
+struct iwl_mld_kunit_link {
+ u8 id;
+ enum nl80211_band band;
+ enum nl80211_chan_width bandwidth;
+};
+
+enum nl80211_iftype;
+
+struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type);
+
+struct ieee80211_bss_conf *
+iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id);
+
+#define KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, size) \
+do { \
+ (ptr) = kunit_kzalloc((test), (size), GFP_KERNEL); \
+ KUNIT_ASSERT_NOT_NULL((test), (ptr)); \
+} while (0)
+
+#define KUNIT_ALLOC_AND_ASSERT(test, ptr) \
+ KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, sizeof(*(ptr)))
+
+#define CHANNEL(_name, _band, _freq) \
+static struct ieee80211_channel _name = { \
+ .band = (_band), \
+ .center_freq = (_freq), \
+ .hw_value = (_freq), \
+}
+
+#define CHANDEF(_name, _channel, _freq1, _width) \
+__maybe_unused static struct cfg80211_chan_def _name = { \
+ .chan = &(_channel), \
+ .center_freq1 = (_freq1), \
+ .width = (_width), \
+}
+
+CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412);
+CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200);
+CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115);
+/* Feel free to add more */
+
+CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20);
+CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40);
+CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160);
+/* Feel free to add more */
+
+//struct cfg80211_chan_def;
+
+struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def);
+
+static inline struct ieee80211_chanctx_conf *
+iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width)
+{
+ struct cfg80211_chan_def chandef;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chandef = chandef_2ghz;
+ break;
+ case NL80211_BAND_5GHZ:
+ chandef = chandef_5ghz;
+ break;
+ default:
+ case NL80211_BAND_6GHZ:
+ chandef = chandef_6ghz;
+ break;
+ }
+
+ chandef.width = width;
+
+ return iwlmld_kunit_add_chanctx_from_def(&chandef);
+}
+
+void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link,
+ struct ieee80211_chanctx_conf *ctx);
+
+/* Allocate a sta, initialize it and move it to the wanted state */
+struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif,
+ enum ieee80211_sta_state state,
+ int link_id);
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_mlo_assoc(u16 valid_links,
+ struct iwl_mld_kunit_link *assoc_link);
+
+struct ieee80211_vif *
+iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link);
+
+struct iwl_rx_packet *
+_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz);
+
+#define iwl_mld_kunit_create_pkt(_notif) \
+ _iwl_mld_kunit_create_pkt(&(_notif), sizeof(_notif))
+
+struct ieee80211_vif *
+iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1,
+ struct iwl_mld_kunit_link *link2);
+
+struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len);
+
+/**
+ * iwlmld_kunit_get_phy_of_link - Get the phy of a link
+ *
+ * @vif: The vif to get the phy from.
+ * @link_id: The id of the link to get the phy for.
+ *
+ * given a vif and link id, return the phy pointer of that link.
+ * This assumes that the link exists, and that it had a chanctx
+ * assigned.
+ * If this is not the case, the test will fail.
+ *
+ * Return: phy pointer.
+ */
+struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif,
+ u8 link_id);
+
+#endif /* __iwl_mld_kunit_utils_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
new file mode 100644
index 000000000000..1909953a9be9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+#ifdef CONFIG_THERMAL
+#include <linux/sort.h>
+#include <linux/thermal.h>
+#endif
+
+#include "fw/api/phy.h"
+
+#include "thermal.h"
+#include "mld.h"
+#include "hcmd.h"
+
+#define IWL_MLD_CT_KILL_DURATION (5 * HZ)
+
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ const struct ct_kill_notif *notif = (const void *)pkt->data;
+
+ IWL_ERR(mld,
+ "CT Kill notification: temp = %d, DTS = 0x%x, Scheme 0x%x - Enter CT Kill\n",
+ le16_to_cpu(notif->temperature), notif->dts,
+ notif->scheme);
+
+ iwl_mld_set_ctkill(mld, true);
+
+ wiphy_delayed_work_queue(mld->wiphy, &mld->ct_kill_exit_wk,
+ round_jiffies_relative(IWL_MLD_CT_KILL_DURATION));
+}
+
+static void iwl_mld_exit_ctkill(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld;
+
+ mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work);
+
+ IWL_ERR(mld, "Exit CT Kill\n");
+ iwl_mld_set_ctkill(mld, false);
+}
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt)
+{
+ const struct iwl_dts_measurement_notif *notif =
+ (const void *)pkt->data;
+ int temp;
+ u32 ths_crossed;
+
+ temp = le32_to_cpu(notif->temp);
+
+ /* shouldn't be negative, but since it's s32, make sure it isn't */
+ if (IWL_FW_CHECK(mld, temp < 0, "negative temperature %d\n", temp))
+ return;
+
+ ths_crossed = le32_to_cpu(notif->threshold_idx);
+
+ /* 0xFF in ths_crossed means the notification is not related
+ * to a trip, so we can ignore it here.
+ */
+ if (ths_crossed == 0xFF)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Temp = %d Threshold crossed = %d\n",
+ temp, ths_crossed);
+
+ if (IWL_FW_CHECK(mld, ths_crossed >= IWL_MAX_DTS_TRIPS,
+ "bad threshold: %d\n", ths_crossed))
+ return;
+
+#ifdef CONFIG_THERMAL
+ if (mld->tzone)
+ thermal_zone_device_update(mld->tzone, THERMAL_TRIP_VIOLATED);
+#endif /* CONFIG_THERMAL */
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_get_temp(struct iwl_mld *mld, s32 *temp)
+{
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ .flags = CMD_WANT_SKB,
+ };
+ const struct iwl_dts_measurement_resp *resp;
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret) {
+ IWL_ERR(mld,
+ "Failed to send the temperature measurement command (err=%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (iwl_rx_packet_payload_len(cmd.resp_pkt) < sizeof(*resp)) {
+ IWL_ERR(mld,
+ "Failed to get a valid response to DTS measurement\n");
+ ret = -EIO;
+ goto free_resp;
+ }
+
+ resp = (const void *)cmd.resp_pkt->data;
+ *temp = le32_to_cpu(resp->temp);
+
+ IWL_DEBUG_TEMP(mld,
+ "Got temperature measurement response: temp=%d\n",
+ *temp);
+
+free_resp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+static int compare_temps(const void *a, const void *b)
+{
+ return ((s16)le16_to_cpu(*(__le16 *)a) -
+ (s16)le16_to_cpu(*(__le16 *)b));
+}
+
+struct iwl_trip_walk_data {
+ __le16 *thresholds;
+ int count;
+};
+
+static int iwl_trip_temp_iter(struct thermal_trip *trip, void *arg)
+{
+ struct iwl_trip_walk_data *twd = arg;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ return 0;
+
+ twd->thresholds[twd->count++] =
+ cpu_to_le16((s16)(trip->temperature / 1000));
+ return 0;
+}
+#endif
+
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld)
+{
+ struct temp_report_ths_cmd cmd = {0};
+ int ret;
+#ifdef CONFIG_THERMAL
+ struct iwl_trip_walk_data twd = {
+ .thresholds = cmd.thresholds,
+ .count = 0
+ };
+
+ if (!mld->tzone)
+ goto send;
+
+ /* The thermal core holds an array of temperature trips that are
+ * unsorted and uncompressed, the FW should get it compressed and
+ * sorted.
+ */
+
+ /* compress trips to cmd array, remove uninitialized values*/
+ for_each_thermal_trip(mld->tzone, iwl_trip_temp_iter, &twd);
+
+ cmd.num_temps = cpu_to_le32(twd.count);
+ if (twd.count)
+ sort(cmd.thresholds, twd.count, sizeof(s16),
+ compare_temps, NULL);
+
+send:
+#endif
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
+ TEMP_REPORTING_THRESHOLDS_CMD),
+ &cmd);
+ if (ret)
+ IWL_ERR(mld, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+ ret);
+
+ return ret;
+}
+
+#ifdef CONFIG_THERMAL
+static int iwl_mld_tzone_get_temp(struct thermal_zone_device *device,
+ int *temperature)
+{
+ struct iwl_mld *mld = thermal_zone_device_priv(device);
+ int temp;
+ int ret = 0;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ /* Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ goto unlock;
+ }
+
+ ret = iwl_mld_get_temp(mld, &temp);
+ if (ret)
+ goto unlock;
+
+ *temperature = temp * 1000;
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static int iwl_mld_tzone_set_trip_temp(struct thermal_zone_device *device,
+ const struct thermal_trip *trip,
+ int temp)
+{
+ struct iwl_mld *mld = thermal_zone_device_priv(device);
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if ((temp / 1000) > S16_MAX) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mld_config_temp_report_ths(mld);
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = iwl_mld_tzone_get_temp,
+ .set_trip_temp = iwl_mld_tzone_set_trip_temp,
+};
+
+static void iwl_mld_thermal_zone_register(struct iwl_mld *mld)
+{
+ int ret;
+ char name[16];
+ static atomic_t counter = ATOMIC_INIT(0);
+ struct thermal_trip trips[IWL_MAX_DTS_TRIPS] = {
+ [0 ... IWL_MAX_DTS_TRIPS - 1] = {
+ .temperature = THERMAL_TEMP_INVALID,
+ .type = THERMAL_TRIP_PASSIVE,
+ .flags = THERMAL_TRIP_FLAG_RW_TEMP,
+ },
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ mld->tzone =
+ thermal_zone_device_register_with_trips(name, trips,
+ IWL_MAX_DTS_TRIPS,
+ mld, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(mld->tzone)) {
+ IWL_DEBUG_TEMP(mld,
+ "Failed to register to thermal zone (err = %ld)\n",
+ PTR_ERR(mld->tzone));
+ mld->tzone = NULL;
+ return;
+ }
+
+ ret = thermal_zone_device_enable(mld->tzone);
+ if (ret) {
+ IWL_DEBUG_TEMP(mld, "Failed to enable thermal zone\n");
+ thermal_zone_device_unregister(mld->tzone);
+ }
+}
+
+/* budget in mWatt */
+static const u32 iwl_mld_cdev_budgets[] = {
+ 2400, /* cooling state 0 */
+ 2000, /* cooling state 1 */
+ 1800, /* cooling state 2 */
+ 1600, /* cooling state 3 */
+ 1400, /* cooling state 4 */
+ 1200, /* cooling state 5 */
+ 1000, /* cooling state 6 */
+ 900, /* cooling state 7 */
+ 800, /* cooling state 8 */
+ 700, /* cooling state 9 */
+ 650, /* cooling state 10 */
+ 600, /* cooling state 11 */
+ 550, /* cooling state 12 */
+ 500, /* cooling state 13 */
+ 450, /* cooling state 14 */
+ 400, /* cooling state 15 */
+ 350, /* cooling state 16 */
+ 300, /* cooling state 17 */
+ 250, /* cooling state 18 */
+ 200, /* cooling state 19 */
+ 150, /* cooling state 20 */
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+ enum iwl_ctdp_cmd_operation op)
+{
+ struct iwl_ctdp_cmd cmd = {
+ .operation = cpu_to_le32(op),
+ .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]),
+ .window_size = 0,
+ };
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD),
+ &cmd);
+
+ if (ret) {
+ IWL_ERR(mld, "cTDP command failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ if (op == CTDP_CMD_OPERATION_START)
+ mld->cooling_dev.cur_state = state;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+
+ *state = mld->cooling_dev.cur_state;
+
+ return 0;
+}
+
+static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata);
+ int ret;
+
+ wiphy_lock(mld->wiphy);
+
+ if (!mld->fw_status.running) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mld_config_ctdp(mld, new_state, CTDP_CMD_OPERATION_START);
+
+unlock:
+ wiphy_unlock(mld->wiphy);
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+ .get_max_state = iwl_mld_tcool_get_max_state,
+ .get_cur_state = iwl_mld_tcool_get_cur_state,
+ .set_cur_state = iwl_mld_tcool_set_cur_state,
+};
+
+static void iwl_mld_cooling_device_register(struct iwl_mld *mld)
+{
+ char name[] = "iwlwifi";
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mld->cooling_dev.cdev =
+ thermal_cooling_device_register(name,
+ mld,
+ &tcooling_ops);
+
+ if (IS_ERR(mld->cooling_dev.cdev)) {
+ IWL_DEBUG_TEMP(mld,
+ "Failed to register to cooling device (err = %ld)\n",
+ PTR_ERR(mld->cooling_dev.cdev));
+ mld->cooling_dev.cdev = NULL;
+ return;
+ }
+}
+
+static void iwl_mld_thermal_zone_unregister(struct iwl_mld *mld)
+{
+ if (!mld->tzone)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Thermal zone device unregister\n");
+ if (mld->tzone) {
+ thermal_zone_device_unregister(mld->tzone);
+ mld->tzone = NULL;
+ }
+}
+
+static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld)
+{
+ if (!mld->cooling_dev.cdev)
+ return;
+
+ IWL_DEBUG_TEMP(mld, "Cooling device unregister\n");
+ if (mld->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mld->cooling_dev.cdev);
+ mld->cooling_dev.cdev = NULL;
+ }
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mld_thermal_initialize(struct iwl_mld *mld)
+{
+ wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+ iwl_mld_cooling_device_register(mld);
+ iwl_mld_thermal_zone_register(mld);
+#endif
+}
+
+void iwl_mld_thermal_exit(struct iwl_mld *mld)
+{
+ wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk);
+
+#ifdef CONFIG_THERMAL
+ iwl_mld_cooling_device_unregister(mld);
+ iwl_mld_thermal_zone_unregister(mld);
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.h b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h
new file mode 100644
index 000000000000..8c8893331b05
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_thermal_h__
+#define __iwl_mld_thermal_h__
+
+#include "iwl-trans.h"
+
+struct iwl_mld;
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+
+/*
+ * struct iwl_mld_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mld_cooling_device {
+ u32 cur_state;
+ struct thermal_cooling_device *cdev;
+};
+
+int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state,
+ enum iwl_ctdp_cmd_operation op);
+#endif
+
+void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt);
+void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+int iwl_mld_config_temp_report_ths(struct iwl_mld *mld);
+void iwl_mld_thermal_initialize(struct iwl_mld *mld);
+void iwl_mld_thermal_exit(struct iwl_mld *mld);
+
+#endif /* __iwl_mld_thermal_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c
new file mode 100644
index 000000000000..50799f9bfccb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include "mld.h"
+#include "hcmd.h"
+#include "ptp.h"
+#include "time_sync.h"
+#include <linux/ieee80211.h>
+
+static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols,
+ const u8 *addr)
+{
+ struct iwl_mld_time_sync_data *time_sync = kzalloc(sizeof(*time_sync),
+ GFP_KERNEL);
+
+ if (!time_sync)
+ return -ENOMEM;
+
+ time_sync->active_protocols = protocols;
+ ether_addr_copy(time_sync->peer_addr, addr);
+ skb_queue_head_init(&time_sync->frame_list);
+ rcu_assign_pointer(mld->time_sync, time_sync);
+
+ return 0;
+}
+
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld)
+{
+ struct iwl_time_sync_cfg_cmd cmd = {};
+ struct iwl_mld_time_sync_data *time_sync;
+ int err;
+
+ time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+ if (!time_sync)
+ return -EINVAL;
+
+ cmd.protocols = cpu_to_le32(time_sync->active_protocols);
+ ether_addr_copy(cmd.peer_addr, time_sync->peer_addr);
+
+ err = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
+ &cmd);
+ if (err)
+ IWL_ERR(mld, "Failed to send time sync cfg cmd: %d\n", err);
+
+ return err;
+}
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, u32 protocols)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+ int err;
+
+ time_sync = wiphy_dereference(mld->wiphy, mld->time_sync);
+
+ /* The fw only supports one peer. We do allow reconfiguration of the
+ * same peer for cases of fw reset etc.
+ */
+ if (time_sync && time_sync->active_protocols &&
+ !ether_addr_equal(addr, time_sync->peer_addr)) {
+ IWL_DEBUG_INFO(mld, "Time sync: reject config for peer: %pM\n",
+ addr);
+ return -ENOBUFS;
+ }
+
+ if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM |
+ IWL_TIME_SYNC_PROTOCOL_FTM))
+ return -EINVAL;
+
+ IWL_DEBUG_INFO(mld, "Time sync: set peer addr=%pM\n", addr);
+
+ iwl_mld_deinit_time_sync(mld);
+ err = iwl_mld_init_time_sync(mld, protocols, addr);
+ if (err)
+ return err;
+
+ err = iwl_mld_time_sync_fw_config(mld);
+ return err;
+}
+
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld)
+{
+ struct iwl_mld_time_sync_data *time_sync =
+ wiphy_dereference(mld->wiphy, mld->time_sync);
+
+ if (!time_sync)
+ return;
+
+ RCU_INIT_POINTER(mld->time_sync, NULL);
+ skb_queue_purge(&time_sync->frame_list);
+ kfree_rcu(time_sync, rcu_head);
+}
+
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, u8 *addr)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+
+ rcu_read_lock();
+ time_sync = rcu_dereference(mld->time_sync);
+ if (time_sync && ether_addr_equal(time_sync->peer_addr, addr) &&
+ (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) {
+ skb_queue_tail(&time_sync->frame_list, skb);
+ rcu_read_unlock();
+ return true;
+ }
+ rcu_read_unlock();
+
+ return false;
+}
+
+static bool iwl_mld_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ u8 skb_dialog_token;
+
+ if (ieee80211_is_timing_measurement(skb))
+ skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token;
+ else
+ skb_dialog_token = mgmt->u.action.u.ftm.dialog_token;
+
+ if ((ether_addr_equal(mgmt->sa, addr) ||
+ ether_addr_equal(mgmt->da, addr)) &&
+ skb_dialog_token == dialog_token)
+ return true;
+
+ return false;
+}
+
+static struct sk_buff *iwl_mld_time_sync_find_skb(struct iwl_mld *mld, u8 *addr,
+ u8 dialog_token)
+{
+ struct iwl_mld_time_sync_data *time_sync;
+ struct sk_buff *skb;
+
+ rcu_read_lock();
+
+ time_sync = rcu_dereference(mld->time_sync);
+ if (IWL_FW_CHECK(mld, !time_sync,
+ "Time sync notification but time sync is not initialized\n")) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ /* The notifications are expected to arrive in the same order of the
+ * frames. If the incoming notification doesn't match the first SKB
+ * in the queue, it means there was no time sync notification for this
+ * SKB and it can be dropped.
+ */
+ while ((skb = skb_dequeue(&time_sync->frame_list))) {
+ if (iwl_mld_is_skb_match(skb, addr, dialog_token))
+ break;
+
+ kfree_skb(skb);
+ skb = NULL;
+ IWL_DEBUG_DROP(mld,
+ "Time sync: drop SKB without matching notification\n");
+ }
+ rcu_read_unlock();
+
+ return skb;
+}
+
+static u64 iwl_mld_get_64_bit(__le32 high, __le32 low)
+{
+ return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low);
+}
+
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ struct iwl_time_msmt_notify *notif = (void *)pkt->data;
+ struct ieee80211_rx_status *rx_status;
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns;
+ struct sk_buff *skb =
+ iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+ u64 adj_time;
+
+ if (IWL_FW_CHECK(mld, !skb, "Time sync event but no pending skb\n"))
+ return;
+
+ spin_lock_bh(&data->lock);
+ ts_10ns = iwl_mld_get_64_bit(notif->t2_hi, notif->t2_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ shwt = skb_hwtstamps(skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mld_get_64_bit(notif->t3_hi, notif->t3_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_INFO(mld,
+ "Time sync: RX event - report frame t2=%llu t3=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(rx_status->ack_tx_hwtstamp));
+ ieee80211_rx_napi(mld->hw, NULL, skb, NULL);
+}
+
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct ptp_data *data = &mld->ptp_data;
+ struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data;
+ struct ieee80211_tx_status status = {};
+ struct skb_shared_hwtstamps *shwt;
+ u64 ts_10ns, adj_time;
+
+ status.skb =
+ iwl_mld_time_sync_find_skb(mld, notif->peer_addr,
+ le32_to_cpu(notif->dialog_token));
+
+ if (IWL_FW_CHECK(mld, !status.skb,
+ "Time sync confirm but no pending skb\n"))
+ return;
+
+ spin_lock_bh(&data->lock);
+ ts_10ns = iwl_mld_get_64_bit(notif->t1_hi, notif->t1_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ shwt = skb_hwtstamps(status.skb);
+ shwt->hwtstamp = ktime_set(0, adj_time);
+
+ ts_10ns = iwl_mld_get_64_bit(notif->t4_hi, notif->t4_lo);
+ adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10);
+ status.info = IEEE80211_SKB_CB(status.skb);
+ status.ack_hwtstamp = ktime_set(0, adj_time);
+ spin_unlock_bh(&data->lock);
+
+ IWL_DEBUG_INFO(mld,
+ "Time sync: TX event - report frame t1=%llu t4=%llu\n",
+ ktime_to_ns(shwt->hwtstamp),
+ ktime_to_ns(status.ack_hwtstamp));
+ ieee80211_tx_status_ext(mld->hw, &status);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h
new file mode 100644
index 000000000000..2d4c5512e961
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+#ifndef __iwl_mld_time_sync_h__
+#define __iwl_mld_time_sync_h__
+
+struct iwl_mld_time_sync_data {
+ struct rcu_head rcu_head;
+ u8 peer_addr[ETH_ALEN];
+ u32 active_protocols;
+ struct sk_buff_head frame_list;
+};
+
+int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr,
+ u32 protocols);
+int iwl_mld_time_sync_fw_config(struct iwl_mld *mld);
+void iwl_mld_deinit_time_sync(struct iwl_mld *mld);
+void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb,
+ u8 *addr);
+void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+#endif /* __iwl_mld_time_sync_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
new file mode 100644
index 000000000000..f054cc921d9d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024-2025 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+
+#include "tlc.h"
+#include "hcmd.h"
+#include "sta.h"
+
+#include "fw/api/rs.h"
+#include "fw/api/context.h"
+#include "fw/api/dhc.h"
+
+static u8 iwl_mld_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta)
+{
+ switch (link_sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_320:
+ return IWL_TLC_MNG_CH_WIDTH_320MHZ;
+ case IEEE80211_STA_RX_BW_160:
+ return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+ case IEEE80211_STA_RX_BW_80:
+ return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+ case IEEE80211_STA_RX_BW_40:
+ return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+}
+
+static __le16
+iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ bool has_vht = vht_cap->vht_supported;
+ u16 flags = 0;
+
+ /* STBC flags */
+ if (mld->cfg->ht_params->stbc &&
+ (hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) {
+ if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
+ flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+ }
+
+ /* LDPC */
+ if (mld->cfg->ht_params->ldpc &&
+ ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) ||
+ (has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ if (own_he_cap &&
+ !(own_he_cap->he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+ /* DCM */
+ if (he_cap->has_he &&
+ (he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK &&
+ own_he_cap &&
+ own_he_cap->he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK))
+ flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
+
+ /* Extra EHT LTF */
+ if (own_eht_cap &&
+ own_eht_cap->eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF &&
+ link_sta->eht_cap.has_eht &&
+ link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) {
+ flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK;
+ }
+
+ return cpu_to_le16(flags);
+}
+
+static u8 iwl_mld_get_fw_chains(struct iwl_mld *mld)
+{
+ u8 chains = iwl_mld_get_valid_tx_ant(mld);
+ u8 fw_chains = 0;
+
+ if (chains & ANT_A)
+ fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+ if (chains & ANT_B)
+ fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+
+ return fw_chains;
+}
+
+static u8 iwl_mld_get_fw_sgi(struct ieee80211_link_sta *link_sta)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u8 sgi_chwidths = 0;
+
+ /* If the association supports HE, HT/VHT rates will never be used for
+ * Tx and therefor there's no need to set the
+ * sgi-per-channel-width-support bits
+ */
+ if (he_cap->has_he)
+ return 0;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+ return sgi_chwidths;
+}
+
+static int
+iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
+ int nss)
+{
+ u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+ (0x3 << (2 * (nss - 1)));
+ rx_mcs >>= (2 * (nss - 1));
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ return IWL_TLC_MNG_HT_RATE_MCS7;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ return IWL_TLC_MNG_HT_RATE_MCS8;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ return IWL_TLC_MNG_HT_RATE_MCS9;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_vht_cap *vht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ u16 supp;
+ int i, highest_mcs;
+ u8 max_nss = link_sta->rx_nss;
+ struct ieee80211_vht_cap ieee_vht_cap = {
+ .vht_cap_info = cpu_to_le32(vht_cap->cap),
+ .supp_mcs = vht_cap->vht_mcs,
+ };
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ max_nss = 1;
+
+ for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+ int nss = i + 1;
+
+ highest_mcs = iwl_mld_get_highest_fw_mcs(vht_cap, nss);
+ if (!highest_mcs)
+ continue;
+
+ supp = BIT(highest_mcs + 1) - 1;
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ /* Check if VHT extended NSS indicates that the bandwidth/NSS
+ * configuration is supported - only for MCS 0 since we already
+ * decoded the MCS bits anyway ourselves.
+ */
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ ieee80211_get_vht_max_nss(&ieee_vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true, nss) >= nss)
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80];
+ }
+}
+
+static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+{
+ switch (mcs) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ }
+
+ WARN(1, "invalid HE MCS %d\n", mcs);
+ return 0;
+}
+
+static void
+iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+ u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ u16 tx_mcs_80 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_80);
+ u16 tx_mcs_160 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_160);
+ int i;
+ u8 nss = link_sta->rx_nss;
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ nss = 1;
+
+ for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
+ u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+ u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_80 > _tx_mcs_80)
+ _mcs_80 = _tx_mcs_80;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_160 > _tx_mcs_160)
+ _mcs_160 = _tx_mcs_160;
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
+ cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+ }
+}
+
+static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+ enum IWL_TLC_MCS_PER_BW bw,
+ u8 max_nss, u16 mcs_msk)
+{
+ if (max_nss >= 2)
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+
+ if (max_nss >= 1)
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+}
+
+static const
+struct ieee80211_eht_mcs_nss_supp_bw *
+iwl_mld_get_eht_mcs_of_bw(enum IWL_TLC_MCS_PER_BW bw,
+ const struct ieee80211_eht_mcs_nss_supp *eht_mcs)
+{
+ switch (bw) {
+ case IWL_TLC_MCS_PER_BW_80:
+ return &eht_mcs->bw._80;
+ case IWL_TLC_MCS_PER_BW_160:
+ return &eht_mcs->bw._160;
+ case IWL_TLC_MCS_PER_BW_320:
+ return &eht_mcs->bw._320;
+ default:
+ return NULL;
+ }
+}
+
+static u8 iwl_mld_get_eht_max_nss(u8 rx_nss, u8 tx_nss)
+{
+ u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX);
+ /* the max nss that can be used,
+ * is the min with our tx capa and the peer rx capa.
+ */
+ return min(tx, rx);
+}
+
+#define MAX_NSS_MCS(mcs_num, rx, tx) \
+ iwl_mld_get_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \
+ (tx)->rx_tx_mcs ##mcs_num## _max_nss)
+
+static void
+iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
+ const struct ieee80211_link_sta *link_sta,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ /* peer RX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
+ &link_sta->eht_cap.eht_mcs_nss_supp;
+ /* our TX mcs capa */
+ const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs =
+ &own_eht_cap->eht_mcs_nss_supp;
+
+ enum IWL_TLC_MCS_PER_BW bw;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20;
+
+ /* peer is 20 MHz only */
+ if (vif->type == NL80211_IFTYPE_AP &&
+ !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_rx_20 = eht_rx_mcs->only_20mhz;
+ } else {
+ mcs_rx_20.rx_tx_mcs7_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs9_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_rx_20.rx_tx_mcs11_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_rx_20.rx_tx_mcs13_max_nss =
+ eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* NIC is capable of 20 MHz only */
+ if (!(own_he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_tx_20 = eht_tx_mcs->only_20mhz;
+ } else {
+ mcs_tx_20.rx_tx_mcs7_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs9_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss;
+ mcs_tx_20.rx_tx_mcs11_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss;
+ mcs_tx_20.rx_tx_mcs13_max_nss =
+ eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss;
+ }
+
+ /* rates for 20/40/80 MHz */
+ bw = IWL_TLC_MCS_PER_BW_80;
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(7, 0));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(9, 8));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(11, 10));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20),
+ GENMASK(13, 12));
+
+ /* rates for 160/320 MHz */
+ for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) {
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx =
+ iwl_mld_get_eht_mcs_of_bw(bw, eht_rx_mcs);
+ const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx =
+ iwl_mld_get_eht_mcs_of_bw(bw, eht_tx_mcs);
+
+ /* got unsupported index for bw */
+ if (!mcs_rx || !mcs_tx)
+ continue;
+
+ /* break out if we don't support the bandwidth */
+ if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ))
+ break;
+
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(9, mcs_rx, mcs_tx),
+ GENMASK(9, 0));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(11, mcs_rx, mcs_tx),
+ GENMASK(11, 10));
+ iwl_mld_set_eht_mcs(cmd->ht_rates, bw,
+ MAX_NSS_MCS(13, mcs_rx, mcs_tx),
+ GENMASK(13, 12));
+ }
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC ||
+ link_sta->rx_nss < 2)
+ memset(cmd->ht_rates[IWL_TLC_NSS_2], 0,
+ sizeof(cmd->ht_rates[IWL_TLC_NSS_2]));
+}
+
+static void
+iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct ieee80211_supported_band *sband,
+ const struct ieee80211_sta_he_cap *own_he_cap,
+ const struct ieee80211_sta_eht_cap *own_eht_cap,
+ struct iwl_tlc_config_cmd_v4 *cmd)
+{
+ int i;
+ u16 non_ht_rates = 0;
+ unsigned long rates_bitmap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
+ const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
+ const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
+
+ /* non HT rates */
+ rates_bitmap = link_sta->supp_rates[sband->band];
+ for_each_set_bit(i, &rates_bitmap, BITS_PER_LONG)
+ non_ht_rates |= BIT(sband->bitrates[i].hw_value);
+
+ cmd->non_ht_rates = cpu_to_le16(non_ht_rates);
+ cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+ if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_EHT;
+ iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
+ own_eht_cap, cmd);
+ } else if (he_cap->has_he && own_he_cap) {
+ cmd->mode = IWL_TLC_MNG_MODE_HE;
+ iwl_mld_fill_he_rates(link_sta, own_he_cap, cmd);
+ } else if (vht_cap->vht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_VHT;
+ iwl_mld_fill_vht_rates(link_sta, vht_cap, cmd);
+ } else if (ht_cap->ht_supported) {
+ cmd->mode = IWL_TLC_MNG_MODE_HT;
+ cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+
+ /* the station support only a single receive chain */
+ if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ 0;
+ else
+ cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
+ cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ }
+}
+
+static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ enum nl80211_band band)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
+ const struct ieee80211_sta_he_cap *own_he_cap =
+ ieee80211_get_he_iftype_cap_vif(sband, vif);
+ const struct ieee80211_sta_eht_cap *own_eht_cap =
+ ieee80211_get_eht_iftype_cap_vif(sband, vif);
+ struct iwl_tlc_config_cmd_v4 cmd = {
+ /* For AP mode, use 20 MHz until the STA is authorized */
+ .max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
+ iwl_mld_fw_bw_from_sta_bw(link_sta) :
+ IWL_TLC_MNG_CH_WIDTH_20MHZ,
+ .flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta,
+ own_he_cap, own_eht_cap),
+ .chains = iwl_mld_get_fw_chains(mld),
+ .sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta),
+ .max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
+ };
+ int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ int ret;
+
+ if (fw_sta_id < 0)
+ return;
+
+ cmd.sta_id = fw_sta_id;
+
+ iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
+ own_he_cap, own_eht_cap,
+ &cmd);
+
+ IWL_DEBUG_RATE(mld,
+ "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
+ cmd.sta_id, cmd.max_ch_width, cmd.mode);
+
+ /* Send async since this can be called within a RCU-read section */
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD),
+ CMD_ASYNC, &cmd);
+ if (ret)
+ IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
+}
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data)
+{
+ struct {
+ struct iwl_dhc_cmd dhc;
+ struct iwl_dhc_tlc_cmd tlc;
+ } __packed cmd = {
+ .tlc.sta_id = sta_id,
+ .tlc.type = cpu_to_le32(type),
+ .tlc.data[0] = cpu_to_le32(data),
+ .dhc.length = cpu_to_le32(sizeof(cmd.tlc) >> 2),
+ .dhc.index_and_mask =
+ cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC |
+ DHC_INTEGRATION_TLC_DEBUG_CONFIG),
+ };
+ int ret;
+
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld,
+ WIDE_ID(IWL_ALWAYS_LONG_GROUP,
+ DEBUG_HOST_COMMAND),
+ CMD_ASYNC, &cmd);
+ IWL_DEBUG_RATE(mld, "sta_id %d, type: 0x%X, value: 0x%X, ret%d\n",
+ sta_id, type, data, ret);
+ return ret;
+}
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+ enum nl80211_band band;
+
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
+ return;
+
+ /* Before we have information about a station, configure the A-MSDU RC
+ * limit such that iwlmd and mac80211 would not be allowed to build
+ * A-MSDUs.
+ */
+ if (mld_sta->sta_state < IEEE80211_STA_ASSOC) {
+ link_sta->agg.max_rc_amsdu_len = 1;
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+ }
+
+ band = link_conf->chanreq.oper.chan->band;
+ iwl_mld_send_tlc_cmd(mld, vif, link_sta, band);
+}
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_bss_conf *link;
+ int link_id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_vif_active_link(vif, link, link_id) {
+ struct ieee80211_link_sta *link_sta =
+ link_sta_dereference_check(sta, link_id);
+
+ if (!link || !link_sta)
+ continue;
+
+ iwl_mld_config_tlc_link(mld, vif, link, link_sta);
+ }
+}
+
+static u16
+iwl_mld_get_amsdu_size_of_tid(struct iwl_mld *mld,
+ struct ieee80211_link_sta *link_sta,
+ unsigned int tid)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif;
+ const u8 tid_to_mac80211_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO,
+ };
+ unsigned int result = link_sta->agg.max_rc_amsdu_len;
+ u8 ac, txf, lmac;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+
+ if (WARN_ON(tid >= ARRAY_SIZE(tid_to_mac80211_ac)))
+ return 0;
+
+ ac = tid_to_mac80211_ac[tid];
+
+ /* For HE redirect to trigger based fifos */
+ if (link_sta->he_cap.has_he)
+ ac += 4;
+
+ txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(ac);
+
+ /* Only one link: take the lmac according to the band */
+ if (hweight16(sta->valid_links) <= 1) {
+ enum nl80211_band band;
+ struct ieee80211_bss_conf *link =
+ wiphy_dereference(mld->wiphy,
+ vif->link_conf[link_sta->link_id]);
+
+ if (WARN_ON(!link || !link->chanreq.oper.chan))
+ band = NL80211_BAND_2GHZ;
+ else
+ band = link->chanreq.oper.chan->band;
+ lmac = iwl_mld_get_lmac_id(mld, band);
+
+ /* More than one link but with 2 lmacs: take the minimum */
+ } else if (fw_has_capa(&mld->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
+ lmac = IWL_LMAC_5G_INDEX;
+ result = min_t(unsigned int, result,
+ mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+ lmac = IWL_LMAC_24G_INDEX;
+ /* More than one link but only one lmac */
+ } else {
+ lmac = IWL_LMAC_24G_INDEX;
+ }
+
+ return min_t(unsigned int, result,
+ mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
+}
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tlc_update_notif *notif = (void *)pkt->data;
+ struct ieee80211_link_sta *link_sta;
+ u32 flags = le32_to_cpu(notif->flags);
+ u32 enabled;
+ u16 size;
+
+ if (IWL_FW_CHECK(mld, notif->sta_id >= mld->fw->ucode_capa.num_stations,
+ "Invalid sta id (%d) in TLC notification\n",
+ notif->sta_id))
+ return;
+
+ link_sta = wiphy_dereference(mld->wiphy,
+ mld->fw_id_to_link_sta[notif->sta_id]);
+
+ if (WARN(IS_ERR_OR_NULL(link_sta),
+ "link_sta of sta id (%d) doesn't exist\n", notif->sta_id))
+ return;
+
+ if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+ struct iwl_mld_link_sta *mld_link_sta =
+ iwl_mld_link_sta_from_mac80211(link_sta);
+ char pretty_rate[100];
+
+ if (WARN_ON(!mld_link_sta))
+ return;
+
+ mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+
+ rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
+ mld_link_sta->last_rate_n_flags);
+ IWL_DEBUG_RATE(mld, "TLC notif: new rate = %s\n", pretty_rate);
+ }
+
+ /* We are done processing the notif */
+ if (!(flags & IWL_TLC_NOTIF_FLAG_AMSDU))
+ return;
+
+ enabled = le32_to_cpu(notif->amsdu_enabled);
+ size = le32_to_cpu(notif->amsdu_size);
+
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
+ if (IWL_FW_CHECK(mld, size > link_sta->agg.max_amsdu_len,
+ "Invalid AMSDU len in TLC notif: %d (Max AMSDU len: %d)\n",
+ size, link_sta->agg.max_amsdu_len))
+ return;
+
+ link_sta->agg.max_rc_amsdu_len = size;
+
+ for (int i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ if (enabled & BIT(i))
+ link_sta->agg.max_tid_amsdu_len[i] =
+ iwl_mld_get_amsdu_size_of_tid(mld, link_sta, i);
+ else
+ link_sta->agg.max_tid_amsdu_len[i] = 1;
+ }
+
+ ieee80211_sta_recalc_aggregates(link_sta->sta);
+
+ IWL_DEBUG_RATE(mld,
+ "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+ le32_to_cpu(notif->amsdu_size), size, enabled);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.h b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h
new file mode 100644
index 000000000000..c32f42e8840b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tlc_h__
+#define __iwl_mld_tlc_h__
+
+#include "mld.h"
+
+void iwl_mld_config_tlc_link(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta);
+
+void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+
+int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data);
+
+#endif /* __iwl_mld_tlc_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
new file mode 100644
index 000000000000..543abe72e465
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2024 - 2025 Intel Corporation
+ */
+#include <net/ip.h>
+
+#include "tx.h"
+#include "sta.h"
+#include "hcmd.h"
+#include "iwl-utils.h"
+#include "iface.h"
+
+#include "fw/dbg.h"
+
+#include "fw/api/tx.h"
+#include "fw/api/rs.h"
+#include "fw/api/txq.h"
+#include "fw/api/datapath.h"
+#include "fw/api/time-event.h"
+
+#define MAX_ANT_NUM 2
+
+/* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
+ * the *index* used for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
+{
+ u8 index = last_idx;
+
+ for (int i = 0; i < MAX_ANT_NUM; i++) {
+ index = (index + 1) % MAX_ANT_NUM;
+ if (valid & BIT(index))
+ return index;
+ }
+
+ WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+
+ return last_idx;
+}
+
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
+{
+ *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
+}
+
+static int
+iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct ieee80211_sta *sta = txq->sta;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int max_size = IWL_DEFAULT_QUEUE_SIZE;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
+ if (link_sta->eht_cap.has_eht) {
+ max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
+ break;
+ }
+
+ if (link_sta->he_cap.has_he)
+ max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
+ }
+
+ return max_size;
+}
+
+static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
+ u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+ /* We can't know when the station is asleep or awake, so we
+ * must disable the queue hang detection.
+ */
+ unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
+ IWL_WATCHDOG_DISABLED :
+ mld->trans->trans_cfg->base_params->wd_timeout;
+ int queue, size;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (tid == IWL_MGMT_TID)
+ size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
+ mld->trans->cfg->min_txq_size);
+ else
+ size = iwl_mld_get_queue_size(mld, txq);
+
+ queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
+ watchdog_timeout);
+
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mld,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, fw_sta_mask, tid);
+ return queue;
+}
+
+static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ int id;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ /* This will alse send the SCD_QUEUE_CONFIG_CMD */
+ id = iwl_mld_allocate_txq(mld, txq);
+ if (id < 0)
+ return id;
+
+ mld_txq->fw_id = id;
+ mld_txq->status.allocated = true;
+
+ rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
+
+ return 0;
+}
+
+void iwl_mld_add_txq_list(struct iwl_mld *mld)
+{
+ lockdep_assert_wiphy(mld->wiphy);
+
+ while (!list_empty(&mld->txqs_to_add)) {
+ struct ieee80211_txq *txq;
+ struct iwl_mld_txq *mld_txq =
+ list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
+ list);
+ int failed;
+
+ txq = container_of((void *)mld_txq, struct ieee80211_txq,
+ drv_priv);
+
+ failed = iwl_mld_add_txq(mld, txq);
+
+ local_bh_disable();
+ spin_lock(&mld->add_txqs_lock);
+ list_del_init(&mld_txq->list);
+ spin_unlock(&mld->add_txqs_lock);
+ /* If the queue allocation failed, we can't transmit. Leave the
+ * frames on the txq, maybe the attempt to allocate the queue
+ * will succeed.
+ */
+ if (!failed)
+ iwl_mld_tx_from_txq(mld, txq);
+ local_bh_enable();
+ }
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mld *mld = container_of(wk, struct iwl_mld,
+ add_txqs_wk);
+
+ /* will reschedule to run after restart */
+ if (mld->fw_status.in_hw_restart)
+ return;
+
+ iwl_mld_add_txq_list(mld);
+}
+
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
+{
+ struct iwl_scd_queue_cfg_cmd remove_cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
+ .u.remove.tid = cpu_to_le32(tid),
+ .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
+ };
+
+ iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
+ &remove_cmd);
+
+ iwl_trans_txq_free(mld->trans, queue_id);
+}
+
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ u32 sta_msk, tid;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ if (!list_empty(&mld_txq->list))
+ list_del_init(&mld_txq->list);
+ spin_unlock_bh(&mld->add_txqs_lock);
+
+ if (!mld_txq->status.allocated ||
+ WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
+ return;
+
+ sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
+
+ tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
+ txq->tid;
+
+ iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
+
+ RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
+ mld_txq->status.allocated = false;
+}
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static __le32
+iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = 0;
+#if IS_ENABLED(CONFIG_INET)
+ u8 protocol = 0;
+
+ /* Do not compute checksum if already computed */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto out;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+
+ /* TBD: do we also need to check
+ * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
+ * the devices we support has this flags?
+ */
+ if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST) {
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ goto out;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /* Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) && amsdu) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+out:
+#endif
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ if (amsdu)
+ offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+ else if (ieee80211_hdrlen(hdr->frame_control) % 4)
+ /* padding is inserted later in transport */
+ offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+ return cpu_to_le32(offload_assist);
+}
+
+static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_tx_info *info,
+ unsigned long *basic_rates,
+ u8 *band)
+{
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ *basic_rates = vif->bss_conf.basic_rates;
+ *band = info->band;
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED &&
+ ieee80211_vif_is_mld(vif)) {
+ /* shouldn't do this when >1 link is active */
+ WARN_ON(hweight16(vif->active_links) != 1);
+ link_id = __ffs(vif->active_links);
+ }
+
+ if (link_id < IEEE80211_LINK_UNSPECIFIED) {
+ struct ieee80211_bss_conf *link_conf;
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ *basic_rates = link_conf->basic_rates;
+ if (link_conf->chanreq.oper.chan)
+ *band = link_conf->chanreq.oper.chan->band;
+ }
+ rcu_read_unlock();
+ }
+}
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_supported_band *sband;
+ u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
+ unsigned long basic_rates;
+ u8 band, rate;
+ u32 i;
+
+ iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
+
+ sband = mld->hw->wiphy->bands[band];
+ for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
+ u16 hw = sband->bitrates[i].hw_value;
+
+ if (hw >= IWL_FIRST_OFDM_RATE) {
+ if (lowest_ofdm > hw)
+ lowest_ofdm = hw;
+ } else if (lowest_cck > hw) {
+ lowest_cck = hw;
+ }
+ }
+
+ if (band == NL80211_BAND_2GHZ && !vif->p2p &&
+ vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
+ if (lowest_cck != IWL_RATE_COUNT)
+ rate = lowest_cck;
+ else if (lowest_ofdm != IWL_RATE_COUNT)
+ rate = lowest_ofdm;
+ else
+ rate = IWL_FIRST_CCK_RATE;
+ } else if (lowest_ofdm != IWL_RATE_COUNT) {
+ rate = lowest_ofdm;
+ } else {
+ rate = IWL_FIRST_OFDM_RATE;
+ }
+
+ return rate;
+}
+
+static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ int rate_idx)
+{
+ u32 rate_flags = 0;
+ u8 rate_plcp;
+
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = iwl_mld_get_lowest_rate(mld, info,
+ info->control.vif);
+
+ WARN_ON_ONCE(rate_idx < 0);
+
+ /* Set CCK or OFDM flag */
+ if (rate_idx <= IWL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+ else
+ rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+
+ /* Legacy rates are indexed:
+ * 0 - 3 for CCK and 0 - 7 for OFDM
+ */
+ rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
+
+ return (u32)rate_plcp | rate_flags;
+}
+
+static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (sta && ieee80211_is_data(fc)) {
+ struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
+
+ return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
+ }
+
+ return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+}
+
+static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
+{
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ u32 result;
+
+ /* we only care about legacy/HT/VHT so far, so we can
+ * build in v1 and use iwl_new_rate_from_v1()
+ * FIXME: in newer devices we only support the new rates, build
+ * the rate_n_flags in the new format here instead of using v1 and
+ * converting it.
+ */
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+ u8 nss = ieee80211_rate_get_vht_nss(rate);
+
+ result = RATE_MCS_VHT_MSK_V1;
+ result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
+ result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK_V1;
+
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
+
+ result = iwl_new_rate_from_v1(result);
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ result = RATE_MCS_HT_MSK_V1;
+ result |= u32_encode_bits(rate->idx,
+ RATE_HT_MCS_RATE_CODE_MSK_V1 |
+ RATE_HT_MCS_NSS_MSK_V1);
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ result |= RATE_MCS_SGI_MSK_V1;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ result |= RATE_MCS_LDPC_MSK_V1;
+ if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
+ result |= RATE_MCS_STBC_MSK;
+
+ result = iwl_new_rate_from_v1(result);
+ } else {
+ result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
+ }
+
+ if (info->control.antennas)
+ result |= u32_encode_bits(info->control.antennas,
+ RATE_MCS_ANT_AB_MSK);
+ else
+ result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
+
+ return result;
+}
+
+static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
+
+ return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
+ iwl_mld_get_tx_ant(mld, info, sta, fc);
+}
+
+static void
+iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
+ struct sk_buff *skb, bool amsdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_vif *vif;
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+
+ if (!amsdu || !skb_is_gso(skb))
+ return;
+
+ /* As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ *
+ * In TSO, the skb header address 3 contains the original address 3 to
+ * correctly create all the A-MSDU subframes headers from it.
+ * Override now the address 3 in the command header with the BSSID.
+ *
+ * Note: we fill in the MLD address, but the firmware will do the
+ * necessary translation to link address after encryption.
+ */
+ vif = info->control.vif;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
+ break;
+ case NL80211_IFTYPE_AP:
+ ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_tx_cmd,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
+ NULL;
+ struct iwl_tx_cmd_gen3 *tx_cmd;
+ bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ u32 rate_n_flags = 0;
+ u16 flags = 0;
+
+ dev_tx_cmd->hdr.cmd = TX_CMD;
+
+ if (!info->control.hw_key)
+ flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+ /* For data and mgmt packets rate info comes from the fw.
+ * Only set rate/antenna for injected frames with fixed rate, or
+ * when no sta is given.
+ */
+ if (unlikely(!sta ||
+ info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ flags |= IWL_TX_FLAGS_CMD_RATE;
+ rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
+ hdr->frame_control);
+ } else if (!ieee80211_is_data(hdr->frame_control) ||
+ (mld_sta &&
+ mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
+ /* These are important frames */
+ flags |= IWL_TX_FLAGS_HIGH_PRI;
+ }
+
+ tx_cmd = (void *)dev_tx_cmd->payload;
+
+ iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
+
+ tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
+
+ /* Total # bytes to be transmitted */
+ tx_cmd->len = cpu_to_le16((u16)skb->len);
+
+ tx_cmd->flags = cpu_to_le16(flags);
+
+ tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+}
+
+/* Caller of this need to check that info->control.vif is not NULL */
+static struct iwl_mld_link *
+iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
+{
+ struct iwl_mld_vif *mld_vif =
+ iwl_mld_vif_from_mac80211(info->control.vif);
+ u32 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (info->control.vif->active_links)
+ link_id = ffs(info->control.vif->active_links) - 1;
+ else
+ link_id = 0;
+ }
+
+ return rcu_dereference(mld_vif->link[link_id]);
+}
+
+static int
+iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ struct iwl_mld_vif *mld_vif;
+ struct iwl_mld_link *link;
+
+ if (txq && txq->sta)
+ return iwl_mld_txq_from_mac80211(txq)->fw_id;
+
+ if (!info->control.vif)
+ return IWL_MLD_INVALID_QUEUE;
+
+ switch (info->control.vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ link = iwl_mld_get_link_from_tx_info(info);
+
+ if (WARN_ON(!link))
+ break;
+
+ /* ucast disassociate/deauth frames without a station might
+ * happen, especially with reason 7 ("Class 3 frame received
+ * from nonassociated STA").
+ */
+ if (ieee80211_is_mgmt(fc) &&
+ (!ieee80211_is_bufferable_mmpdu(skb) ||
+ ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+ return link->bcast_sta.queue_id;
+
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_order(fc))
+ return link->mcast_sta.queue_id;
+
+ WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+ "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
+ return link->bcast_sta.queue_id;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
+
+ if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
+ IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
+ return IWL_MLD_INVALID_DROP_TX;
+ }
+
+ WARN_ON(!ieee80211_is_mgmt(fc));
+
+ return mld_vif->deflink.aux_sta.queue_id;
+ default:
+ /* TODO: consider monitor (task=monitor) */
+ WARN_ONCE(1, "Unsupported vif type\n");
+ break;
+ }
+
+ return IWL_MLD_INVALID_QUEUE;
+}
+
+static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mld_link *mld_link =
+ &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
+ struct iwl_probe_resp_data *resp_data;
+ u8 *pos;
+
+ if (!info->control.vif->p2p)
+ return;
+
+ rcu_read_lock();
+
+ resp_data = rcu_dereference(mld_link->probe_resp_data);
+ if (!resp_data)
+ goto out;
+
+ if (!resp_data->notif.noa_active)
+ goto out;
+
+ if (skb_tailroom(skb) < resp_data->noa_len) {
+ if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
+ IWL_ERR(mld,
+ "Failed to reallocate probe resp\n");
+ goto out;
+ }
+ }
+
+ pos = skb_put(skb, resp_data->noa_len);
+
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ /* Set length of IE body (not including ID and length itself) */
+ *pos++ = resp_data->noa_len - 2;
+ *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
+ *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
+ *pos++ = WLAN_OUI_WFA & 0xff;
+ *pos++ = WLAN_OUI_TYPE_WFA_P2P;
+
+ memcpy(pos, &resp_data->notif.noa_attr,
+ resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
+
+out:
+ rcu_read_unlock();
+}
+
+/* This function must be called with BHs disabled */
+static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txq ? txq->sta : NULL;
+ struct iwl_device_tx_cmd *dev_tx_cmd;
+ int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
+ u8 tid = IWL_MAX_TID_COUNT;
+
+ if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
+ queue == IWL_MLD_INVALID_DROP_TX)
+ return -1;
+
+ if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
+ return -1;
+
+ dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
+ if (unlikely(!dev_tx_cmd))
+ return -1;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ if (IWL_MLD_NON_TRANSMITTING_AP)
+ return -1;
+
+ iwl_mld_probe_resp_set_noa(mld, skb);
+ }
+
+ iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = IWL_TID_NON_QOS;
+ }
+
+ IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
+ tid, queue, skb->len);
+
+ /* From now on, we cannot access info->control */
+ memset(&info->status, 0, sizeof(info->status));
+ memset(info->driver_data, 0, sizeof(info->driver_data));
+
+ info->driver_data[1] = dev_tx_cmd;
+
+ if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
+ goto err;
+
+ /* Update low-latency counter when a packet is queued instead
+ * of after TX, it makes sense for early low-latency detection
+ */
+ if (sta)
+ iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
+
+ return 0;
+
+err:
+ iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
+ IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
+ return -1;
+}
+
+#ifdef CONFIG_INET
+
+/* This function handles the segmentation of a large TSO packet into multiple
+ * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
+ * constraints.
+ */
+static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skbs)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int num_subframes, tcp_payload_len, subf_len;
+ u16 snap_ip_tcp, pad, max_tid_amsdu_len;
+ u8 tid;
+
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
+
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ !sta->cur->max_rc_amsdu_len)
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+ /* Do not build AMSDU for IPv6 with extension headers.
+ * Ask stack to segment and checksum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ netdev_flags &= ~NETIF_F_CSUM_MASK;
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+ }
+
+ tid = ieee80211_get_tid(hdr);
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
+ if (!max_tid_amsdu_len)
+ return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
+
+ /* Sub frame header + SNAP + IP header + TCP header + MSS */
+ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+ pad = (4 - subf_len) & 0x3;
+
+ /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
+ * N * subf_len + (N - 1) * pad.
+ */
+ num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
+
+ if (sta->max_amsdu_subframes &&
+ num_subframes > sta->max_amsdu_subframes)
+ num_subframes = sta->max_amsdu_subframes;
+
+ tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ /* Make sure we have enough TBs for the A-MSDU:
+ * 2 for each subframe
+ * 1 more for each fragment
+ * 1 more for the potential data in the header
+ */
+ if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+ mld->trans->max_skb_frags)
+ num_subframes = 1;
+
+ if (num_subframes > 1)
+ *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ /* This skb fits in one single A-MSDU */
+ if (tcp_payload_len <= num_subframes * mss) {
+ __skb_queue_tail(mpdus_skbs, skb);
+ return 0;
+ }
+
+ /* Trick the segmentation function to make it create SKBs that can fit
+ * into one A-MSDU.
+ */
+ return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
+}
+
+/* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
+ * large packets when necessary and transmitting each segment as MPDU.
+ */
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff *orig_skb = skb;
+ struct sk_buff_head mpdus_skbs;
+ unsigned int payload_len;
+ int ret;
+
+ if (WARN_ON(!txq || !txq->sta))
+ return -1;
+
+ payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ if (payload_len <= skb_shinfo(skb)->gso_size)
+ return iwl_mld_tx_mpdu(mld, skb, txq);
+
+ if (!info->control.vif)
+ return -1;
+
+ __skb_queue_head_init(&mpdus_skbs);
+
+ ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
+ if (ret)
+ return ret;
+
+ WARN_ON(skb_queue_empty(&mpdus_skbs));
+
+ while (!skb_queue_empty(&mpdus_skbs)) {
+ skb = __skb_dequeue(&mpdus_skbs);
+
+ ret = iwl_mld_tx_mpdu(mld, skb, txq);
+ if (!ret)
+ continue;
+
+ /* Free skbs created as part of TSO logic that have not yet
+ * been dequeued
+ */
+ __skb_queue_purge(&mpdus_skbs);
+
+ /* skb here is not necessarily same as skb that entered
+ * this method, so free it explicitly.
+ */
+ if (skb == orig_skb)
+ ieee80211_free_txskb(mld->hw, skb);
+ else
+ kfree_skb(skb);
+
+ /* there was error, but we consumed skb one way or
+ * another, so return 0
+ */
+ return 0;
+ }
+
+ return 0;
+}
+#else
+static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ /* Impossible to get TSO without CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif /* CONFIG_INET */
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq)
+{
+ if (skb_is_gso(skb)) {
+ if (!iwl_mld_tx_tso(mld, skb, txq))
+ return;
+ goto err;
+ }
+
+ if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
+ return;
+
+err:
+ ieee80211_free_txskb(mld->hw, skb);
+}
+
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ struct sk_buff *skb = NULL;
+ u8 zero_addr[ETH_ALEN] = {};
+
+ /*
+ * No need for threads to be pending here, they can leave the first
+ * taker all the work.
+ *
+ * mld_txq->tx_request logic:
+ *
+ * If 0, no one is currently TXing, set to 1 to indicate current thread
+ * will now start TX and other threads should quit.
+ *
+ * If 1, another thread is currently TXing, set to 2 to indicate to
+ * that thread that there was another request. Since that request may
+ * have raced with the check whether the queue is empty, the TXing
+ * thread should check the queue's status one more time before leaving.
+ * This check is done in order to not leave any TX hanging in the queue
+ * until the next TX invocation (which may not even happen).
+ *
+ * If 2, another thread is currently TXing, and it will already double
+ * check the queue, so do nothing.
+ */
+ if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
+ return;
+
+ rcu_read_lock();
+ do {
+ while (likely(!mld_txq->status.stop_full) &&
+ (skb = ieee80211_tx_dequeue(mld->hw, txq)))
+ iwl_mld_tx_skb(mld, skb, txq);
+ } while (atomic_dec_return(&mld_txq->tx_request));
+
+ IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
+ txq->sta ? txq->sta->addr : zero_addr, txq->tid);
+
+ rcu_read_unlock();
+}
+
+static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ enum nl80211_band band = info->band;
+ struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
+ u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
+ u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
+ if (sgi)
+ tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+ switch (chan_width) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ default:
+ break;
+ }
+
+ switch (format) {
+ case RATE_MCS_HT_MSK:
+ tx_rate->flags |= IEEE80211_TX_RC_MCS;
+ tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
+ break;
+ case RATE_MCS_VHT_MSK:
+ ieee80211_rate_set_vht(tx_rate,
+ rate_n_flags & RATE_MCS_CODE_MSK,
+ FIELD_GET(RATE_MCS_NSS_MSK,
+ rate_n_flags) + 1);
+ tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ case RATE_MCS_HE_MSK:
+ /* mac80211 cannot do this without ieee80211_tx_status_ext()
+ * but it only matters for radiotap
+ */
+ tx_rate->idx = 0;
+ break;
+ default:
+ tx_rate->idx =
+ iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ band);
+ break;
+ }
+}
+
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_tx_resp *tx_resp = (void *)pkt->data;
+ int txq_id = le16_to_cpu(tx_resp->tx_queue);
+ struct agg_tx_status *agg_status = &tx_resp->status;
+ u32 status = le16_to_cpu(agg_status->status);
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
+ int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
+ int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
+ struct ieee80211_link_sta *link_sta;
+ struct iwl_mld_sta *mld_sta;
+ u16 ssn;
+ struct sk_buff_head skbs;
+ u8 skb_freed = 0;
+ bool mgmt = false;
+ bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
+
+ if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
+ "Invalid tx_resp notif frame_count (%d)\n",
+ tx_resp->frame_count))
+ return;
+
+ /* validate the size of the variable part of the notif */
+ if (IWL_FW_CHECK(mld, notif_size != pkt_len,
+ "Invalid tx_resp notif size (expected=%zu got=%u)\n",
+ notif_size, pkt_len))
+ return;
+
+ ssn = le32_to_cpup((__le32 *)agg_status +
+ tx_resp->frame_count) & 0xFFFF;
+
+ __skb_queue_head_init(&skbs);
+
+ /* we can free until ssn % q.n_bd not inclusive */
+ iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
+
+ while (!skb_queue_empty(&skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ skb_freed++;
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
+
+ /* inform mac80211 about what happened with the frame */
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ break;
+ default:
+ break;
+ }
+
+ /* If we are freeing multiple frames, mark all the frames
+ * but the first one as acked, since they were acknowledged
+ * before
+ */
+ if (skb_freed > 1)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (tx_failure) {
+ enum iwl_fw_ini_time_point tp =
+ IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+ if (ieee80211_is_action(hdr->frame_control))
+ tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+ else if (ieee80211_is_mgmt(hdr->frame_control))
+ mgmt = true;
+
+ iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
+ }
+
+ iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
+ info);
+
+ if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
+ ieee80211_tx_status_skb(mld->hw, skb);
+ }
+
+ IWL_DEBUG_TX_REPLY(mld,
+ "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
+ txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
+ tx_resp->failure_frame);
+
+ if (tx_failure && mgmt)
+ iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
+
+ if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+ "Got invalid sta_id (%d)\n", sta_id))
+ return;
+
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (!link_sta) {
+ /* This can happen if the TX cmd was sent before pre_rcu_remove
+ * but the TX response was received after
+ */
+ IWL_DEBUG_TX_REPLY(mld,
+ "Got valid sta_id (%d) but sta is NULL\n",
+ sta_id);
+ goto out;
+ }
+
+ if (IS_ERR(link_sta))
+ goto out;
+
+ mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
+
+ if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
+ iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
+
+ if (tid < IWL_MAX_TID_COUNT)
+ iwl_mld_count_mpdu_tx(link_sta, 1);
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
+ bool in_flush)
+{
+ struct sk_buff_head reclaimed_skbs;
+
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
+
+ while (!skb_queue_empty(&reclaimed_skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ if (!in_flush)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_skb(mld->hw, skb);
+ }
+}
+
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
+{
+ struct iwl_tx_path_flush_cmd_rsp *rsp;
+ struct iwl_tx_path_flush_cmd flush_cmd = {
+ .sta_id = cpu_to_le32(fw_sta_id),
+ .tid_mask = cpu_to_le16(0xffff),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret, num_flushed_queues;
+ u32 resp_len;
+
+ IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
+ fw_sta_id, 0xffff);
+
+ ret = iwl_mld_send_cmd(mld, &cmd);
+ if (ret) {
+ IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
+ return ret;
+ }
+
+ resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+ if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
+ "Invalid TXPATH_FLUSH response len: %d\n",
+ resp_len)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ rsp = (void *)cmd.resp_pkt->data;
+
+ if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
+ "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
+ le16_to_cpu(rsp->sta_id))) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+ if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+ "num_flushed_queues %d\n", num_flushed_queues)) {
+ ret = -EIO;
+ goto free_rsp;
+ }
+
+ for (int i = 0; i < num_flushed_queues; i++) {
+ struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+ int read_after = le16_to_cpu(queue_info->read_after_flush);
+ int txq_id = le16_to_cpu(queue_info->queue_num);
+
+ if (IWL_FW_CHECK(mld,
+ txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+ "Invalid txq id %d\n", txq_id))
+ continue;
+
+ IWL_DEBUG_TX_QUEUES(mld,
+ "tid %d txq_id %d read-before %d read-after %d\n",
+ le16_to_cpu(queue_info->tid), txq_id,
+ le16_to_cpu(queue_info->read_before_flush),
+ read_after);
+
+ iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
+ }
+
+free_rsp:
+ iwl_free_resp(&cmd);
+ return ret;
+}
+
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
+{
+ struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
+ int ret;
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ if (likely(mld_txq->status.allocated))
+ return 0;
+
+ ret = iwl_mld_add_txq(mld, txq);
+
+ spin_lock_bh(&mld->add_txqs_lock);
+ if (!list_empty(&mld_txq->list))
+ list_del_init(&mld_txq->list);
+ spin_unlock_bh(&mld->add_txqs_lock);
+
+ return ret;
+}
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask, u32 new_sta_mask)
+{
+ struct iwl_scd_queue_cfg_cmd cmd = {
+ .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
+ .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
+ .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
+ };
+
+ lockdep_assert_wiphy(mld->wiphy);
+
+ for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
+ struct ieee80211_txq *txq =
+ sta->txq[tid != IWL_MAX_TID_COUNT ?
+ tid : IEEE80211_NUM_TIDS];
+ struct iwl_mld_txq *mld_txq =
+ iwl_mld_txq_from_mac80211(txq);
+ int ret;
+
+ if (!mld_txq->status.allocated)
+ continue;
+
+ if (tid == IWL_MAX_TID_COUNT)
+ cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
+ else
+ cmd.u.modify.tid = cpu_to_le32(tid);
+
+ ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(DATA_PATH_GROUP,
+ SCD_QUEUE_CONFIG_CMD),
+ &cmd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+ u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+ u8 sta_id = ba_res->sta_id;
+ struct ieee80211_link_sta *link_sta;
+
+ if (!tfd_cnt)
+ return;
+
+ if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
+ "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
+ tfd_cnt, pkt_len))
+ return;
+
+ IWL_DEBUG_TX_REPLY(mld,
+ "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
+ for (int i = 0; i < tfd_cnt; i++) {
+ struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+ int txq_id = le16_to_cpu(ba_tfd->q_num);
+ int index = le16_to_cpu(ba_tfd->tfd_index);
+
+ if (IWL_FW_CHECK(mld,
+ txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
+ "Invalid txq id %d\n", txq_id))
+ continue;
+
+ iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
+ }
+
+ if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
+ "Got invalid sta_id (%d)\n", sta_id))
+ return;
+
+ rcu_read_lock();
+
+ link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
+ if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
+ "Got valid sta_id (%d) but link_sta is NULL\n",
+ sta_id))
+ goto out;
+
+ iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
+out:
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.h b/drivers/net/wireless/intel/iwlwifi/mld/tx.h
new file mode 100644
index 000000000000..520f15f9d33c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024 Intel Corporation
+ */
+#ifndef __iwl_mld_tx_h__
+#define __iwl_mld_tx_h__
+
+#include "mld.h"
+
+#define IWL_MLD_INVALID_QUEUE 0xFFFF
+#define IWL_MLD_INVALID_DROP_TX 0xFFFE
+
+/**
+ * struct iwl_mld_txq - TX Queue data
+ *
+ * @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
+ * @status: bitmap of the txq status
+ * @status.allocated: Indicates that the queue was allocated.
+ * @status.stop_full: Indicates that the queue is full and should stop TXing.
+ * @list: list pointer, for &mld::txqs_to_add
+ * @tx_request: makes sure that if there are multiple threads that want to tx
+ * from this txq, only one of them will do all the TXing.
+ * This is needed to avoid spinning the trans txq lock, which is expensive
+ */
+struct iwl_mld_txq {
+ /* Add here fields that need clean up on restart */
+ struct_group(zeroed_on_hw_restart,
+ u16 fw_id;
+ struct {
+ u8 allocated:1;
+ u8 stop_full:1;
+ } status;
+ );
+ struct list_head list;
+ atomic_t tx_request;
+ /* And here fields that survive a fw restart */
+};
+
+static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
+{
+ INIT_LIST_HEAD(&mld_txq->list);
+ atomic_set(&mld_txq->tx_request, 0);
+}
+
+static inline struct iwl_mld_txq *
+iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
+{
+ return (void *)txq->drv_priv;
+}
+
+void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
+void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_add_txq_list(struct iwl_mld *mld);
+void
+iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
+void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
+void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
+int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
+
+int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
+ struct ieee80211_sta *sta,
+ u32 old_sta_mask, u32 new_sta_mask);
+
+void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
+ struct iwl_rx_packet *pkt);
+void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
+
+u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_vif *vif);
+
+void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
+ struct ieee80211_txq *txq);
+
+#endif /* __iwl_mld_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 129b6bdf9ef9..3e8b7168af01 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3092,8 +3092,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
u32 err_id;
@@ -3101,29 +3107,35 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ IWL_WARN(mvm, "Rfkill was toggled during suspend\n");
+ if (vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+
+ return FW_NEEDS_RESET;
}
- return true;
+ return FW_ERROR;
}
/* check if we have lmac2 set and check for error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[1],
NULL))
- return true;
+ return FW_ERROR;
/* check for umac error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.umac_error_event_table,
NULL))
- return true;
+ return FW_ERROR;
- return false;
+ return FW_ALIVE;
}
/*
@@ -3376,7 +3388,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
break;
}
case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): {
- struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data;
+ struct iwl_d3_end_notif *notif = (void *)pkt->data;
d3_data->d3_end_flags = __le32_to_cpu(notif->flags);
d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF;
@@ -3492,6 +3504,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ enum rt_status rt_status;
bool keep = false;
mutex_lock(&mvm->mutex);
@@ -3515,14 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, vif)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, vif);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
ret = 1;
goto err;
}
@@ -3679,6 +3697,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
.notif_expected =
IWL_D3_NOTIF_D3_END_NOTIF,
};
+ enum rt_status rt_status;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3688,14 +3707,20 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
mvm->last_reset_or_resume_time_jiffies = jiffies;
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, NULL)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, NULL);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm,
+ "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 83e3c1160362..671d3f8d79c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023, 2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -542,7 +542,7 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- struct iwl_mvm_tas_status_resp *rsp = NULL;
+ struct iwl_tas_status_resp *rsp = NULL;
static const size_t bufsz = 1024;
char *buff, *pos, *endpos;
const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = {
@@ -552,6 +552,8 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
"Due To SAR Limit Less Than 6 dBm",
[TAS_DISABLED_REASON_INVALID] =
"N/A",
+ [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] =
+ "Due to table source invalid",
};
const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = {
[TAS_DYNA_INACTIVE] = "INACTIVE",
@@ -598,23 +600,19 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n");
for (i = 0; i < rsp->in_dual_radio + 1; i++) {
- if (rsp->tas_status_mac[i].band != TAS_LMAC_BAND_INVALID &&
- rsp->tas_status_mac[i].dynamic_status & BIT(TAS_DYNA_ACTIVE)) {
+ if (rsp->tas_status_mac[i].dynamic_status &
+ BIT(TAS_DYNA_ACTIVE)) {
pos += scnprintf(pos, endpos - pos, "\tON for ");
switch (rsp->tas_status_mac[i].band) {
- case TAS_LMAC_BAND_HB:
+ case PHY_BAND_5:
pos += scnprintf(pos, endpos - pos, "HB\n");
break;
- case TAS_LMAC_BAND_LB:
+ case PHY_BAND_24:
pos += scnprintf(pos, endpos - pos, "LB\n");
break;
- case TAS_LMAC_BAND_UHB:
+ case PHY_BAND_6:
pos += scnprintf(pos, endpos - pos, "UHB\n");
break;
- case TAS_LMAC_BAND_INVALID:
- pos += scnprintf(pos, endpos - pos,
- "INVALID BAND\n");
- break;
default:
pos += scnprintf(pos, endpos - pos,
"Unsupported band (%d)\n",
@@ -668,20 +666,16 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "TAS status for ");
switch (rsp->tas_status_mac[i].band) {
- case TAS_LMAC_BAND_HB:
+ case PHY_BAND_5:
pos += scnprintf(pos, endpos - pos, "High band\n");
break;
- case TAS_LMAC_BAND_LB:
+ case PHY_BAND_24:
pos += scnprintf(pos, endpos - pos, "Low band\n");
break;
- case TAS_LMAC_BAND_UHB:
+ case PHY_BAND_6:
pos += scnprintf(pos, endpos - pos,
"Ultra high band\n");
break;
- case TAS_LMAC_BAND_INVALID:
- pos += scnprintf(pos, endpos - pos,
- "INVALID band\n");
- break;
default:
pos += scnprintf(pos, endpos - pos,
"Unsupported band (%d)\n",
@@ -704,11 +698,9 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
pos += scnprintf(pos, endpos - pos, "Dynamic status:\n");
dyn_status = (rsp->tas_status_mac[i].dynamic_status);
- for_each_set_bit(tmp, &dyn_status, sizeof(dyn_status)) {
- if (tmp >= 0 && tmp < TAS_DYNA_STATUS_MAX)
- pos += scnprintf(pos, endpos - pos,
- "\t%s (%d)\n",
- tas_current_status[tmp], tmp);
+ for_each_set_bit(tmp, &dyn_status, TAS_DYNA_STATUS_MAX) {
+ pos += scnprintf(pos, endpos - pos, "\t%s (%d)\n",
+ tas_current_status[tmp], tmp);
}
pos += scnprintf(pos, endpos - pos,
@@ -1479,6 +1471,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
+ /*
+ * If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (!iwl_mvm_firmware_running(mvm))
+ return count;
+
mutex_lock(&mvm->mutex);
iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index b26141c30c61..a493ef6bedc3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -46,107 +46,6 @@ struct iwl_mvm_ftm_iter_data {
u8 *tk;
};
-int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len)
-{
- struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
- GFP_KERNEL);
- u32 expected_tk_len;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!pasn)
- return -ENOBUFS;
-
- iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
-
- pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
-
- switch (pasn->cipher) {
- case IWL_LOCATION_CIPHER_CCMP_128:
- case IWL_LOCATION_CIPHER_GCMP_128:
- expected_tk_len = WLAN_KEY_LEN_CCMP;
- break;
- case IWL_LOCATION_CIPHER_GCMP_256:
- expected_tk_len = WLAN_KEY_LEN_GCMP_256;
- break;
- default:
- goto out;
- }
-
- /*
- * If associated to this AP and already have security context,
- * the TK is already configured for this station, so it
- * shouldn't be set again here.
- */
- if (vif->cfg.assoc) {
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_bss_conf *link_conf;
- unsigned int link_id;
- struct ieee80211_sta *sta;
- u8 sta_id;
-
- rcu_read_lock();
- for_each_vif_active_link(vif, link_conf, link_id) {
- if (memcmp(addr, link_conf->bssid, ETH_ALEN))
- continue;
-
- sta_id = mvmvif->link[link_id]->ap_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
- expected_tk_len = 0;
- break;
- }
- rcu_read_unlock();
- }
-
- if (tk_len != expected_tk_len ||
- (hltk_len && hltk_len != sizeof(pasn->hltk))) {
- IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
- tk_len, hltk_len);
- goto out;
- }
-
- if (!expected_tk_len && !hltk_len) {
- IWL_ERR(mvm, "TK and HLTK not set\n");
- goto out;
- }
-
- memcpy(pasn->addr, addr, sizeof(pasn->addr));
-
- if (hltk_len) {
- memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
- pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
- }
-
- if (tk && tk_len)
- memcpy(pasn->tk, tk, sizeof(pasn->tk));
-
- list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
- return 0;
-out:
- kfree(pasn);
- return -EINVAL;
-}
-
-void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
-{
- struct iwl_mvm_ftm_pasn_entry *entry, *prev;
-
- lockdep_assert_held(&mvm->mutex);
-
- list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
- list) {
- if (memcmp(entry->addr, addr, sizeof(entry->addr)))
- continue;
-
- list_del(&entry->list);
- kfree(entry);
- return;
- }
-}
-
static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
{
struct iwl_mvm_loc_entry *e, *t;
@@ -773,7 +672,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
target.bssid = bssid;
target.cipher = cipher;
+ target.tk = NULL;
ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+
+ if (!WARN_ON(!target.tk))
+ memcpy(tk, target.tk, TK_11AZ_LEN);
} else {
memcpy(tk, entry->tk, sizeof(entry->tk));
}
@@ -949,7 +852,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
static int
iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v10 *target)
+ struct iwl_tof_range_req_ap_entry *target)
{
u32 i2r_max_sts, flags;
int ret;
@@ -1021,7 +924,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
- struct iwl_tof_range_req_cmd_v14 cmd;
+ struct iwl_tof_range_req_cmd cmd;
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
.dataflags[0] = IWL_HCMD_DFL_DUP,
@@ -1035,7 +938,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i];
+ struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
if (err)
@@ -1301,7 +1204,7 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
static void
iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap)
{
struct iwl_mvm_ftm_pasn_entry *entry;
@@ -1339,7 +1242,7 @@ static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
switch (ver) {
case 9:
case 8:
- return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
+ return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy);
case 7:
return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
case 6:
@@ -1359,7 +1262,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
- struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
@@ -1395,9 +1298,9 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
mvm->ftm_initiator.req->cookie, num_of_aps);
- for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
+ for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) {
struct cfg80211_pmsr_result result = {};
- struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
+ struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
int peer_idx;
if (new_api) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index e6e468e81ab3..83f6e508a094 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -324,92 +324,6 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
kfree(sta);
}
-int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len)
-{
- int ret;
- struct iwl_mvm_pasn_sta *sta = NULL;
- struct iwl_mvm_pasn_hltk_data hltk_data = {
- .addr = addr,
- .hltk = hltk,
- };
- struct iwl_mvm_pasn_hltk_data *hltk_data_ptr = NULL;
-
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD),
- 2);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (cmd_ver < 3) {
- IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
- return -EOPNOTSUPP;
- }
-
- if ((!hltk || !hltk_len) && (!tk || !tk_len)) {
- IWL_ERR(mvm, "TK and HLTK not set\n");
- return -EINVAL;
- }
-
- if (hltk && hltk_len) {
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
- IWL_ERR(mvm, "No support for secure LTF measurement\n");
- return -EINVAL;
- }
-
- hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
- if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
- IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
- return -EINVAL;
- }
-
- hltk_data_ptr = &hltk_data;
- }
-
- if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
- if (!sta)
- return -ENOBUFS;
-
- ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len, &sta->keyconf);
- if (ret) {
- kfree(sta);
- return ret;
- }
-
- memcpy(sta->addr, addr, ETH_ALEN);
- list_add_tail(&sta->list, &mvm->resp_pasn_list);
- }
-
- ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, hltk_data_ptr);
- if (ret && sta)
- iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
-
- return ret;
-}
-
-int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, u8 *addr)
-{
- struct iwl_mvm_pasn_sta *sta, *prev;
-
- lockdep_assert_held(&mvm->mutex);
-
- list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) {
- if (!memcmp(sta->addr, addr, ETH_ALEN)) {
- iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
- return 0;
- }
- }
-
- IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr);
- return -EINVAL;
-}
-
int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index df49dd2e2026..2b5a62604fc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
/* if reached this point, Alive notification was received */
iwl_mei_alive_notif(true);
+ iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
&mvm->fw->ucode_capa);
if (ret) {
@@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return ret;
}
- iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
-
/*
* Note: all the queues are enabled as part of the interface
* initialization, but in firmware restart scenarios they
@@ -1094,22 +1094,6 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
return iwl_mvm_ppag_send_cmd(mvm);
}
-static bool
-iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
-{
- /* Verify that there is room for another country */
- if (*size >= IWL_WTAS_BLACK_LIST_MAX)
- return false;
-
- for (u8 i = 0; i < *size; i++) {
- if (list[i] == mcc)
- return true;
- }
-
- list[*size++] = mcc;
- return true;
-}
-
static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
@@ -1150,10 +1134,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
- if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ (!iwl_add_mcc_to_tas_block_list(data.block_list_array,
&data.block_list_size,
IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
@@ -1213,38 +1197,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- u32 value = 0;
- /* default behaviour is disabled */
- bool bios_enable_rfi = false;
- int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
-
-
- if (ret < 0) {
- IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
- return bios_enable_rfi;
- }
-
- value &= DSM_VALUE_RFI_DISABLE;
- /* RFI BIOS CONFIG value can be 0 or 3 only.
- * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
- * 1 and 2 are invalid BIOS configurations, So, it's not possible to
- * disable ddr/dlvr separately.
- */
- if (!value) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- bios_enable_rfi = true;
- } else if (value == DSM_VALUE_RFI_DISABLE) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
- } else {
- IWL_DEBUG_RADIO(mvm,
- "DSM RFI got invalid value, value=%d\n", value);
- }
-
- return bios_enable_rfi;
-}
-
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
struct iwl_lari_config_change_cmd cmd;
@@ -1631,7 +1583,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm))
+ if (iwl_rfi_is_enabled_in_bios(&mvm->fwrt))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6b06732441c3..bec18d197f31 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1960,26 +1960,3 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
ieee80211_channel_switch_disconnect(vif);
rcu_read_unlock();
}
-
-void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_vap_notif *mb = (void *)pkt->data;
- struct ieee80211_vif *vif;
- u32 id = le32_to_cpu(mb->mac_id);
-
- IWL_DEBUG_INFO(mvm,
- "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
- le32_to_cpu(mb->mac_id),
- mb->num_beacon_intervals_elapsed,
- mb->profile_periodicity);
-
- rcu_read_lock();
-
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
- if (vif)
- iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
-
- rcu_read_unlock();
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index af6644b7e95f..1e916a0ce082 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -70,7 +70,7 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
};
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
- .max_peers = IWL_MVM_TOF_MAX_APS,
+ .max_peers = IWL_TOF_MAX_APS,
.report_ap_tsf = 1,
.randomize_mac_addr = 1,
@@ -272,9 +272,10 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
-#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+#define IWL_MVM_MLD_CAPA_OPS (FIELD_PREP_CONST( \
IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
- IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -4099,6 +4100,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
return 0;
}
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!iwl_mvm_has_rlc_offload(mvm))
+ return;
+
+ mvmvif->ps_disabled = !vif->cfg.ps;
+
+ if (update)
+ iwl_mvm_power_update_mac(mvm);
+}
+
/* Common part for MLD and non-MLD modes */
int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -4191,6 +4206,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_AUTHORIZED) {
ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta,
callbacks);
+ iwl_mvm_smps_workaround(mvm, vif, true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 341a2a7a49ec..78d7153a0cfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2024 Intel Corporation
+ * Copyright (C) 2022-2025 Intel Corporation
*/
#include "mvm.h"
@@ -887,6 +887,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_PS) {
+ iwl_mvm_smps_workaround(mvm, vif, false);
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 2f159024eeb8..9dd670041137 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -121,7 +121,7 @@ static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm,
{
int ret;
- struct iwl_mvm_aux_sta_cmd cmd = {
+ struct iwl_aux_sta_cmd cmd = {
.sta_id = cpu_to_le32(sta->sta_id),
.lmac_id = cpu_to_le32(lmac_id),
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ee769da72e68..f6391c7a3e29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1310,7 +1310,7 @@ struct iwl_mvm {
struct cfg80211_pmsr_request *req;
struct wireless_dev *req_wdev;
struct list_head loc_list;
- int responses[IWL_MVM_TOF_MAX_APS];
+ int responses[IWL_TOF_MAX_APS];
struct {
struct list_head resp;
} smooth;
@@ -2095,8 +2095,6 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm,
@@ -2520,12 +2518,6 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_bss_conf *bss_conf);
void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, u8 *addr);
-int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len);
void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
@@ -2540,10 +2532,6 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
-int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
- u8 *hltk, u32 hltk_len);
-void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr);
/* TDLS */
@@ -3043,4 +3031,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
bool is_ap);
+
+void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool update);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 984f407f7027..76603ef02704 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -145,7 +145,7 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
+ struct iwl_esr_mode_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
/* FW recommendations is only for entering EMLSR */
@@ -495,7 +495,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
iwl_mvm_rx_esr_mode_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_mvm_esr_mode_notif),
+ struct iwl_esr_mode_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index a8c4e354e2ce..068c58e9c1eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1783,7 +1783,7 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
if ((high_tpt != IWL_INVALID_VALUE) &&
(high_tpt > current_tpt)) {
IWL_DEBUG_RATE(mvm,
- "Higher rate is better. Increate rate\n");
+ "Higher rate is better. Increase rate\n");
return RS_ACTION_UPSCALE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 09fd8752046e..14ea89f931bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
u32 rate_n_flags = phy_data->rate_n_flags;
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu)
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
he->data6 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 7a4844ec3c10..78fd7faaed97 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -4311,67 +4311,6 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
}
-int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
- u16 queue;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int wdg_timeout =
- iwl_mvm_get_wd_timeout(mvm, vif);
- bool mld = iwl_mvm_has_mld_api(mvm->fw);
- u32 type = IWL_STA_LINK;
-
- if (mld)
- type = STATION_TYPE_PEER;
-
- ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
- NL80211_IFTYPE_UNSPECIFIED, type);
- if (ret)
- return ret;
-
- if (mld)
- ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr,
- mvmvif->deflink.fw_link_id,
- &queue,
- IWL_MAX_TID_COUNT,
- &wdg_timeout);
- else
- ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id,
- mvmvif->color, addr, sta,
- &queue,
- IWL_MVM_TX_FIFO_BE);
- if (ret)
- goto out;
-
- keyconf->cipher = cipher;
- memcpy(keyconf->key, key, key_len);
- keyconf->keylen = key_len;
- keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE;
-
- if (mld) {
- /* The MFP flag is set according to the station mfp field. Since
- * we don't have a station, set it manually.
- */
- u32 key_flags =
- iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
- IWL_SEC_KEY_FLAG_MFP;
- u32 sta_mask = BIT(sta->sta_id);
-
- ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf);
- } else {
- ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
- 0, NULL, 0, 0, true);
- }
-
-out:
- if (ret)
- iwl_mvm_dealloc_int_sta(mvm, sta);
- return ret;
-}
-
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 6856f7440ef3..19c905b641e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -597,10 +597,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len,
- struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 9216c43a35c4..1a30bb1ff8ca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -771,7 +771,7 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
{
- struct iwl_roc_req roc_cmd = {
+ struct iwl_roc_req_v5 roc_cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.activity = cpu_to_le32(activity),
};
@@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
+ /* set the bit so the ROC cleanup will actually clean up */
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
@@ -1100,7 +1102,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
{
int res;
u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
+ struct iwl_roc_req_v5 roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index d92470960b38..c851290e75a2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -105,7 +105,7 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_dts_measurement_notif_v2 *notif_v2;
+ struct iwl_dts_measurement_notif *notif_v2;
int len = iwl_rx_packet_payload_len(pkt);
int temp;
u32 ths_crossed;
@@ -506,7 +506,7 @@ static const u32 iwl_mvm_cdev_budgets[] = {
int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
{
- struct iwl_mvm_ctdp_cmd cmd = {
+ struct iwl_ctdp_cmd cmd = {
.operation = cpu_to_le32(op),
.budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
.window_size = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 838c426db7f0..8aa7c455bdee 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/dmi.h>
#include "iwl-trans.h"
@@ -137,6 +137,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
if (trans->dsbr_urm_permanent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
+ if (trans->ext_32khz_clock_valid)
+ control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
+
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e0b657b2f74b..93446c374008 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -498,7 +498,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
-
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz devices */
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
@@ -543,7 +544,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
/* Dr devices */
{IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)},
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
{0}
};
@@ -551,16 +552,17 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
- _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
+ _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \
{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
.name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \
- .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \
.mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY }
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_ANY, _cfg, _name)
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \
+ _cfg, _name)
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -723,66 +725,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_2ac_cfg_soc, iwl9560_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9270_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_160_name),
_IWL_DEV_INFO(0x271B, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9162_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_160_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9260_2ac_cfg, iwl9260_name),
/* Qu with Jf */
@@ -790,132 +792,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9462_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1551,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name),
_IWL_DEV_INFO(IWL_CFG_ANY, 0x1552,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name),
/* Qu with Hr */
@@ -923,202 +925,204 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax201_name),
/* QuZ */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_quz_a0_hr_b0, iwl_ax201_name),
/* Ma */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
/* So with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Hr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
/* So-F with Gf */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* SoF with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* So with GF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
/* So with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
/* So with JF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+#endif /* CONFIG_IWLMVM */
+#if IS_ENABLED(CONFIG_IWLMLD)
/* Bz */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1130,75 +1134,121 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax201_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_fm_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_bz, iwl_wh_name),
/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_gl_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ 160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_mtp_name),
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc, iwl_sc_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc, iwl_sp_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2, iwl_sc2_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sp_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_sc2f, iwl_sc2f_name),
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_fm_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_wh_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
+ 160, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sp_name),
+
/* Dr */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_dr, iwl_dr_name),
/* Br */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_br, iwl_br_name),
-#endif /* CONFIG_IWLMVM */
+#endif /* CONFIG_IWLMLD */
};
EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
@@ -1348,7 +1398,7 @@ out:
VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
- u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
+ u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step)
{
int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
@@ -1391,8 +1441,15 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
dev_info->rf_id != rf_id)
continue;
- if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
- dev_info->no_160 != no_160)
+ /*
+ * Check that bw_limit have the same "boolean" value since
+ * IWL_SUBDEVICE_BW_LIM can only return a boolean value and
+ * dev_info->bw_limit encodes a non-boolean value.
+ * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to
+ * !bw_limit to have a match.
+ */
+ if (dev_info->bw_limit != IWL_CFG_BW_ANY &&
+ (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit)
continue;
if (dev_info->cores != (u8)IWL_CFG_ANY &&
@@ -1530,13 +1587,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id),
IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
- IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
+ IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device),
IWL_SUBDEVICE_CORES(pdev->subsystem_device),
CSR_HW_RFID_STEP(iwl_trans->hw_rf_id));
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
- iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
+ iwl_trans->bw_limit = dev_info->bw_limit;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 856b7e9f717d..45460f93d24a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -646,7 +646,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room);
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset);
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 793514a1852a..3ece34e30d58 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2024 Intel Corporation
+ * Copyright (C) 2018-2025 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -95,7 +95,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
-static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
+void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 1f483f15c238..401919f9fe88 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len);
if (!sgt)
return -ENOMEM;
@@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
return tfd;
out_err:
+ iwl_pcie_free_tso_pages(trans, skb, out_meta);
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 334ebd4c12fa..7c1dd5cc084a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1855,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
* @cmd_meta: command meta to store the scatter list information for unmapping
* @hdr: output argument for TSO headers
* @hdr_room: requested length for TSO headers
+ * @offset: offset into the data from which mapping should start
*
* Allocate space for a scatter gather list and TSO headers and map the SKB
* using the scatter gather list. The SKB is unmapped again when the page is
@@ -1864,9 +1865,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
*/
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room)
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset)
{
struct sg_table *sgt;
+ unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1;
+ int orig_nents;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
@@ -1874,8 +1878,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
- (skb_shinfo(skb)->nr_frags + 1) *
- sizeof(struct scatterlist),
+ n_segments * sizeof(struct scatterlist),
skb);
if (!*hdr)
return NULL;
@@ -1883,14 +1886,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
sgt->sgl = (void *)(sgt + 1);
- sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+ sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
- skb->data_len);
- if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset);
+ if (WARN_ON_ONCE(orig_nents <= 0))
return NULL;
+ sgt->orig_nents = orig_nents;
+
/* And map the entire SKB */
if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
return NULL;
@@ -1939,7 +1943,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len + iv_len);
if (!sgt)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index d0bda23c628a..7ef5e89c6af2 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,7 +2,7 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023-2024 Intel Corporation
+ * Copyright (C) 2023-2025 Intel Corporation
*/
#include <kunit/test.h>
#include <linux/pci.h>
@@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
{
- printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n",
pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
- di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit,
di->cores);
}
@@ -31,8 +31,13 @@ static void devinfo_table_order(struct kunit *test)
di->mac_type, di->mac_step,
di->rf_type, di->cdb,
di->jacket, di->rf_id,
- di->no_160, di->cores, di->rf_step);
- if (ret != di) {
+ di->bw_limit != IWL_CFG_BW_NO_LIM,
+ di->cores, di->rf_step);
+ if (!ret) {
+ iwl_pci_print_dev_info("No entry found for: ", di);
+ KUNIT_FAIL(test,
+ "No entry found for entry at index %d\n", idx);
+ } else if (ret != di) {
iwl_pci_print_dev_info("searched: ", di);
iwl_pci_print_dev_info("found: ", ret);
KUNIT_FAIL(test,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 5a525da434c2..21fde876bb0d 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -453,46 +453,6 @@ out:
}
/**
- * lbs_get_snmp_mib - Get an SNMP MIB value
- *
- * @priv: A pointer to &struct lbs_private structure
- * @oid: The OID to retrieve from the firmware
- * @out_val: Location for the returned value
- *
- * returns: 0 on success, error on failure
- */
-int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
-{
- struct cmd_ds_802_11_snmp_mib cmd;
- int ret;
-
- memset(&cmd, 0, sizeof (cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_GET);
- cmd.oid = cpu_to_le16(oid);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
- if (ret)
- goto out;
-
- switch (le16_to_cpu(cmd.bufsize)) {
- case sizeof(u8):
- *out_val = cmd.value[0];
- break;
- case sizeof(u16):
- *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
- break;
- default:
- lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n",
- oid, le16_to_cpu(cmd.bufsize));
- break;
- }
-
-out:
- return ret;
-}
-
-/**
* lbs_get_tx_power - Get the min, max, and current TX power
*
* @priv: A pointer to &struct lbs_private structure
@@ -525,31 +485,6 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
}
/**
- * lbs_set_tx_power - Set the TX power
- *
- * @priv: A pointer to &struct lbs_private structure
- * @dbm: The desired power level in dBm
- *
- * returns: 0 on success, error on failure
- */
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
-{
- struct cmd_ds_802_11_rf_tx_power cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.curlevel = cpu_to_le16(dbm);
-
- lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm);
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
-
- return ret;
-}
-
-/**
* lbs_set_monitor_mode - Enable or disable monitor mode
* (only implemented on OLPC usb8388 FW)
*
@@ -968,10 +903,6 @@ static void lbs_submit_command(struct lbs_private *priv,
}
if (command == CMD_802_11_DEEP_SLEEP) {
- if (priv->is_auto_deep_sleep_enabled) {
- priv->wakeup_dev_required = 1;
- priv->dnld_sent = 0;
- }
priv->is_deep_sleep = 1;
lbs_complete_command(priv, cmdnode, 0);
} else {
@@ -1440,70 +1371,6 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
}
-/**
- * lbs_set_tpc_cfg - Configures the transmission power control functionality
- *
- * @priv: A pointer to &struct lbs_private structure
- * @enable: Transmission power control enable
- * @p0: Power level when link quality is good (dBm).
- * @p1: Power level when link quality is fair (dBm).
- * @p2: Power level when link quality is poor (dBm).
- * @usesnr: Use Signal to Noise Ratio in TPC
- *
- * returns: 0 on success
- */
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
- int8_t p2, int usesnr)
-{
- struct cmd_ds_802_11_tpc_cfg cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.enable = !!enable;
- cmd.usesnr = !!usesnr;
- cmd.P0 = p0;
- cmd.P1 = p1;
- cmd.P2 = p2;
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd);
-
- return ret;
-}
-
-/**
- * lbs_set_power_adapt_cfg - Configures the power adaptation settings
- *
- * @priv: A pointer to &struct lbs_private structure
- * @enable: Power adaptation enable
- * @p0: Power level for 1, 2, 5.5 and 11 Mbps (dBm).
- * @p1: Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
- * @p2: Power level for 48 and 54 Mbps (dBm).
- *
- * returns: 0 on Success
- */
-
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
- int8_t p1, int8_t p2)
-{
- struct cmd_ds_802_11_pa_cfg cmd;
- int ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.size = cpu_to_le16(sizeof(cmd));
- cmd.action = cpu_to_le16(CMD_ACT_SET);
- cmd.enable = !!enable;
- cmd.P0 = p0;
- cmd.P1 = p1;
- cmd.P2 = p2;
-
- ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd);
-
- return ret;
-}
-
-
struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
@@ -1520,12 +1387,10 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
/* No commands are allowed in Deep Sleep until we toggle the GPIO
* to wake up the card and it has signaled that it's ready.
*/
- if (!priv->is_auto_deep_sleep_enabled) {
- if (priv->is_deep_sleep) {
- lbs_deb_cmd("command not allowed in deep sleep\n");
- cmdnode = ERR_PTR(-EBUSY);
- goto done;
- }
+ if (priv->is_deep_sleep) {
+ lbs_deb_cmd("command not allowed in deep sleep\n");
+ cmdnode = ERR_PTR(-EBUSY);
+ goto done;
}
cmdnode = lbs_get_free_cmd_node(priv);
diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h
index d7be232f5739..a95c2651e67f 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.h
+++ b/drivers/net/wireless/marvell/libertas/cmd.h
@@ -105,19 +105,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
-int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-
/* Commands only used in wext.c, assoc. and scan.c */
-int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
- int8_t p1, int8_t p2);
-
-int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
- int8_t p2, int usesnr);
-
-int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
-
int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep);
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index f2aa659e7714..8393f396eebe 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -279,7 +279,6 @@ void lbs_process_event(struct lbs_private *priv, u32 event)
priv->reset_deep_sleep_wakeup(priv);
lbs_deb_cmd("EVENT: ds awake\n");
priv->is_deep_sleep = 0;
- priv->wakeup_dev_required = 0;
wake_up_interruptible(&priv->ds_awake_q);
break;
diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h
index c1e0388ef01d..ea69007a2958 100644
--- a/drivers/net/wireless/marvell/libertas/decl.h
+++ b/drivers/net/wireless/marvell/libertas/decl.h
@@ -64,11 +64,7 @@ int lbs_resume(struct lbs_private *priv);
void lbs_queue_event(struct lbs_private *priv, u32 event);
void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
-int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
-int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
-
u32 lbs_fw_index_to_data_rate(u8 index);
-u8 lbs_data_rate_to_fw_index(u32 rate);
int lbs_get_firmware(struct device *dev, u32 card_model,
const struct lbs_fw_table *fw_table,
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index 4b6e05a8e5d5..c4708ce4eb83 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -83,12 +83,8 @@ struct lbs_private {
/* Deep sleep */
int is_deep_sleep;
int deep_sleep_required;
- int is_auto_deep_sleep_enabled;
- int wakeup_dev_required;
int is_activity_detected;
- int auto_deep_sleep_timeout; /* in ms */
wait_queue_head_t ds_awake_q;
- struct timer_list auto_deepsleep_timer;
/* Host sleep*/
int is_host_sleep_configured;
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 78e8b5aecec0..017e5c6bbade 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -79,26 +79,6 @@ u32 lbs_fw_index_to_data_rate(u8 idx)
return fw_data_rates[idx];
}
-/**
- * lbs_data_rate_to_fw_index - use rate to get the index
- *
- * @rate: data rate
- * returns: index or 0
- */
-u8 lbs_data_rate_to_fw_index(u32 rate)
-{
- u8 i;
-
- if (!rate)
- return 0;
-
- for (i = 0; i < sizeof(fw_data_rates); i++) {
- if (rate == fw_data_rates[i])
- return i;
- }
- return 0;
-}
-
int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type)
{
int ret = 0;
@@ -276,8 +256,7 @@ void lbs_host_to_card_done(struct lbs_private *priv)
/* Wake main thread if commands are pending */
if (!priv->cur_cmd || priv->tx_pending_len > 0) {
- if (!priv->wakeup_dev_required)
- wake_up(&priv->waitq);
+ wake_up(&priv->waitq);
}
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -468,8 +447,7 @@ static int lbs_thread(void *data)
shouldsleep = 0; /* We have a command response */
else if (priv->cur_cmd)
shouldsleep = 1; /* Can't send a command; one already running */
- else if (!list_empty(&priv->cmdpendingq) &&
- !(priv->wakeup_dev_required))
+ else if (!list_empty(&priv->cmdpendingq))
shouldsleep = 0; /* We have a command to send */
else if (kfifo_len(&priv->event_fifo))
shouldsleep = 0; /* We have an event to process */
@@ -536,14 +514,6 @@ static int lbs_thread(void *data)
}
spin_unlock_irq(&priv->driver_lock);
- if (priv->wakeup_dev_required) {
- lbs_deb_thread("Waking up device...\n");
- /* Wake up device */
- if (priv->exit_deep_sleep(priv))
- lbs_deb_thread("Wakeup device failed\n");
- continue;
- }
-
/* command timeout stuff */
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
@@ -626,7 +596,6 @@ static int lbs_thread(void *data)
del_timer(&priv->command_timer);
del_timer(&priv->tx_lockup_timer);
- del_timer(&priv->auto_deepsleep_timer);
return 0;
}
@@ -773,55 +742,6 @@ static void lbs_tx_lockup_handler(struct timer_list *t)
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
-/**
- * auto_deepsleep_timer_fn - put the device back to deep sleep mode when
- * timer expires and no activity (command, event, data etc.) is detected.
- * @t: Context from which to retrieve a &struct lbs_private pointer
- * returns: N/A
- */
-static void auto_deepsleep_timer_fn(struct timer_list *t)
-{
- struct lbs_private *priv = from_timer(priv, t, auto_deepsleep_timer);
-
- if (priv->is_activity_detected) {
- priv->is_activity_detected = 0;
- } else {
- if (priv->is_auto_deep_sleep_enabled &&
- (!priv->wakeup_dev_required) &&
- (priv->connect_status != LBS_CONNECTED)) {
- struct cmd_header cmd;
-
- lbs_deb_main("Entering auto deep sleep mode...\n");
- memset(&cmd, 0, sizeof(cmd));
- cmd.size = cpu_to_le16(sizeof(cmd));
- lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd,
- sizeof(cmd));
- }
- }
- mod_timer(&priv->auto_deepsleep_timer , jiffies +
- (priv->auto_deep_sleep_timeout * HZ)/1000);
-}
-
-int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
-{
- priv->is_auto_deep_sleep_enabled = 1;
- if (priv->is_deep_sleep)
- priv->wakeup_dev_required = 1;
- mod_timer(&priv->auto_deepsleep_timer ,
- jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
-
- return 0;
-}
-
-int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
-{
- priv->is_auto_deep_sleep_enabled = 0;
- priv->auto_deep_sleep_timeout = 0;
- del_timer(&priv->auto_deepsleep_timer);
-
- return 0;
-}
-
static int lbs_init_adapter(struct lbs_private *priv)
{
int ret;
@@ -835,9 +755,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
- priv->is_auto_deep_sleep_enabled = 0;
priv->deep_sleep_required = 0;
- priv->wakeup_dev_required = 0;
init_waitqueue_head(&priv->ds_awake_q);
init_waitqueue_head(&priv->scan_q);
priv->authtype_auto = 1;
@@ -849,7 +767,6 @@ static int lbs_init_adapter(struct lbs_private *priv)
timer_setup(&priv->command_timer, lbs_cmd_timeout_handler, 0);
timer_setup(&priv->tx_lockup_timer, lbs_tx_lockup_handler, 0);
- timer_setup(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, 0);
INIT_LIST_HEAD(&priv->cmdfreeq);
INIT_LIST_HEAD(&priv->cmdpendingq);
@@ -883,7 +800,6 @@ static void lbs_free_adapter(struct lbs_private *priv)
kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->tx_lockup_timer);
- del_timer(&priv->auto_deepsleep_timer);
}
static const struct net_device_ops lbs_netdev_ops = {
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 66f0f5377ac1..738bafc3749b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
+ chan_list->chan_scan_param[0].radio_type |=
+ CHAN_BW_40MHZ << 2;
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
-
+ }
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index d39092b99212..d7fd79214bcf 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -150,7 +150,7 @@ static const u16 ac_mcs_rate_nss2[8][10] = {
struct region_code_mapping {
u8 code;
- u8 region[IEEE80211_COUNTRY_STRING_LEN];
+ u8 region[IEEE80211_COUNTRY_STRING_LEN] __nonstring;
};
static struct region_code_mapping region_code_mapping_t[] = {
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 4a96281792cc..91458f3bd14a 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -454,6 +454,11 @@ enum mwifiex_channel_flags {
#define HostCmd_RET_BIT 0x8000
#define HostCmd_ACT_GEN_GET 0x0000
#define HostCmd_ACT_GEN_SET 0x0001
+#define HOST_CMD_ACT_GEN_SET 0x0001
+/* Add this non-CamelCase-style macro to comply with checkpatch requirements.
+ * This macro will eventually replace all existing CamelCase-style macros in
+ * the future for consistency.
+ */
#define HostCmd_ACT_GEN_REMOVE 0x0004
#define HostCmd_ACT_BITWISE_SET 0x0002
#define HostCmd_ACT_BITWISE_CLR 0x0003
@@ -2352,6 +2357,14 @@ struct host_cmd_ds_add_station {
u8 tlv[];
} __packed;
+#define MWIFIEX_CFG_TYPE_CAL 0x2
+
+struct host_cmd_ds_802_11_cfg_data {
+ __le16 action;
+ __le16 type;
+ __le16 data_len;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2431,6 +2444,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl;
struct host_cmd_ds_sta_configure sta_cfg;
struct host_cmd_ds_add_station sta_info;
+ struct host_cmd_ds_802_11_cfg_data cfg_data;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 855019fe5485..b07cb302a00c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -54,7 +54,7 @@ const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
* proper cleanup before exiting.
*/
static int mwifiex_register(void *card, struct device *dev,
- struct mwifiex_if_ops *if_ops, void **padapter)
+ const struct mwifiex_if_ops *if_ops, void **padapter)
{
struct mwifiex_adapter *adapter;
int i;
@@ -691,10 +691,6 @@ err_dnld_fw:
init_failed = true;
done:
- if (adapter->cal_data) {
- release_firmware(adapter->cal_data);
- adapter->cal_data = NULL;
- }
if (adapter->firmware) {
release_firmware(adapter->firmware);
adapter->firmware = NULL;
@@ -1713,7 +1709,7 @@ err_exit:
*/
int
mwifiex_add_card(void *card, struct completion *fw_done,
- struct mwifiex_if_ops *if_ops, u8 iface_type,
+ const struct mwifiex_if_ops *if_ops, u8 iface_type,
struct device *dev)
{
struct mwifiex_adapter *adapter;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 0674dcf7a537..63f1c900e096 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1470,7 +1470,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
u32 func_init_shutdown);
int mwifiex_add_card(void *card, struct completion *fw_done,
- struct mwifiex_if_ops *if_ops, u8 iface_type,
+ const struct mwifiex_if_ops *if_ops, u8 iface_type,
struct device *dev);
int mwifiex_remove_card(struct mwifiex_adapter *adapter);
@@ -1571,8 +1571,6 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
struct cfg80211_chan_def chandef);
int mwifiex_config_start_uap(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_cfg);
-void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
- struct mwifiex_sta_node *node);
void mwifiex_config_uap_11d(struct mwifiex_private *priv,
struct cfg80211_beacon_data *beacon_data);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 5f997becdbaa..e11458fd4d50 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -21,7 +21,7 @@
#define PCIE_VERSION "1.0"
#define DRV_NAME "Marvell mwifiex PCIe"
-static struct mwifiex_if_ops pcie_ops;
+static const struct mwifiex_if_ops pcie_ops;
static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
.cmd_addr_lo = PCIE_SCRATCH_0_REG,
@@ -3240,7 +3240,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
mwifiex_pcie_free_buffers(adapter);
}
-static struct mwifiex_if_ops pcie_ops = {
+static const struct mwifiex_if_ops pcie_ops = {
.init_if = mwifiex_init_pcie,
.cleanup_if = mwifiex_cleanup_pcie,
.check_fw_status = mwifiex_check_fw_status,
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 490ffd981164..c1fe48448839 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -21,7 +21,7 @@
static void mwifiex_sdio_work(struct work_struct *work);
-static struct mwifiex_if_ops sdio_ops;
+static const struct mwifiex_if_ops sdio_ops;
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
.start_rd_port = 1,
@@ -3167,7 +3167,7 @@ static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter)
dev_err(&card->func->dev, "error enabling SDIO port\n");
}
-static struct mwifiex_if_ops sdio_ops = {
+static const struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
.check_fw_status = mwifiex_check_fw_status,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index e2800a831c8e..c4689f5a1acc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1507,6 +1507,7 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
u32 len;
u8 *data = (u8 *)cmd + S_DS_GEN;
int ret;
+ struct host_cmd_ds_802_11_cfg_data *pcfg_data;
if (prop) {
len = prop->length;
@@ -1514,12 +1515,20 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
data, len);
if (ret)
return ret;
+
+ cmd->size = cpu_to_le16(S_DS_GEN + len);
mwifiex_dbg(adapter, INFO,
"download cfg_data from device tree: %s\n",
prop->name);
} else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
- adapter->cal_data->size, data);
+ adapter->cal_data->size,
+ data + sizeof(*pcfg_data));
+ pcfg_data = &cmd->params.cfg_data;
+ pcfg_data->action = cpu_to_le16(HOST_CMD_ACT_GEN_SET);
+ pcfg_data->type = cpu_to_le16(MWIFIEX_CFG_TYPE_CAL);
+ pcfg_data->data_len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*pcfg_data) + len);
mwifiex_dbg(adapter, INFO,
"download cfg_data from config file\n");
} else {
@@ -1527,7 +1536,6 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
}
cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
- cmd->size = cpu_to_le16(S_DS_GEN + len);
return 0;
}
@@ -2293,9 +2301,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
"marvell,caldata");
}
- if (adapter->cal_data)
+ if (adapter->cal_data) {
mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0, NULL, true);
+ release_firmware(adapter->cal_data);
+ adapter->cal_data = NULL;
+ }
+
/* Read MAC address from HW */
ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 58ef5020a46a..245cb99a3daa 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -325,19 +325,3 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
return 0;
}
-
-/* This function deletes station entry from associated station list.
- * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
- * tables created for this station are deleted.
- */
-void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
- struct mwifiex_sta_node *node)
-{
- if (priv->ap_11n_enabled && node->is_11n_enabled) {
- mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
- mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
- }
- mwifiex_del_sta_entry(priv, node->mac_addr);
-
- return;
-}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6085cd50970d..3034c4405cb5 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -10,7 +10,7 @@
#define USB_VERSION "1.0"
-static struct mwifiex_if_ops usb_ops;
+static const struct mwifiex_if_ops usb_ops;
static const struct usb_device_id mwifiex_usb_table[] = {
/* 8766 */
@@ -1585,7 +1585,7 @@ mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
return 0;
}
-static struct mwifiex_if_ops usb_ops = {
+static const struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index 6a35c6ebd823..e7b839e74290 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -293,6 +293,7 @@ struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
kfree(mlink);
return ERR_PTR(ret);
}
+ rcu_assign_pointer(mvif->offchannel_link, mlink);
return mlink;
}
@@ -301,10 +302,12 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt76_dev *dev = phy->dev;
+ struct mt76_vif_data *mvif = mlink->mvif;
if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel)
return;
+ rcu_assign_pointer(mvif->offchannel_link, NULL);
dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink);
kfree(mlink);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 0bc66cc19acd..443517d06c9f 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -95,6 +95,10 @@ int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int l
#ifdef CONFIG_NL80211_TESTMODE
dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
+ if (!dev->test_mtd.name) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
dev->test_mtd.offset = offset;
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 508b472408c2..b88d7e10742e 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -816,8 +816,8 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
-static struct mt76_phy *
-mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_chanctx *ctx;
@@ -831,6 +831,7 @@ mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv;
return ctx->phy;
}
+EXPORT_SYMBOL_GPL(mt76_vif_phy);
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
@@ -1697,6 +1698,17 @@ void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
}
EXPORT_SYMBOL_GPL(mt76_wcid_add_poll);
+s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower)
+{
+ int n_chains = hweight16(phy->chainmask);
+
+ txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2);
+ txpower -= mt76_tx_power_nss_delta(n_chains);
+
+ return txpower;
+}
+EXPORT_SYMBOL_GPL(mt76_get_power_bound);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, int *dbm)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 132148f7b107..d7cd467b812f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -351,6 +351,7 @@ struct mt76_wcid {
u8 hw_key_idx;
u8 hw_key_idx2;
+ u8 offchannel:1;
u8 sta:1;
u8 sta_disabled:1;
u8 amsdu:1;
@@ -491,6 +492,7 @@ struct mt76_hw_cap {
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
#define MT_DRV_AMSDU_OFFLOAD BIT(5)
+#define MT_DRV_IGNORE_TXS_FAILED BIT(6)
struct mt76_driver_ops {
u32 drv_flags;
@@ -769,6 +771,7 @@ struct mt76_testmode_data {
struct mt76_vif_link {
u8 idx;
+ u8 link_idx;
u8 omac_idx;
u8 band_idx;
u8 wmm_idx;
@@ -786,6 +789,7 @@ struct mt76_vif_link {
struct mt76_vif_data {
struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct mt76_vif_link __rcu *offchannel_link;
struct mt76_phy *roc_phy;
u16 valid_links;
@@ -1224,6 +1228,8 @@ struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
u8 band_idx);
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
+struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
const struct file_operations *ops);
@@ -1482,6 +1488,8 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx);
+s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower);
+
int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, int *dbm);
int mt76_init_sar_power(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index db0c29e65185..487ad716f872 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -314,6 +314,9 @@ enum tx_frag_idx {
#define MT_TXFREE_INFO_COUNT GENMASK(27, 24)
#define MT_TXFREE_INFO_STAT GENMASK(29, 28)
+#define MT_TXS_HDR_SIZE 4 /* Unit: DW */
+#define MT_TXS_SIZE 12 /* Unit: DW */
+
#define MT_TXS0_BW GENMASK(31, 29)
#define MT_TXS0_TID GENMASK(28, 26)
#define MT_TXS0_AMPDU BIT(25)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index f30cf9e71610..bafcf5a279e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -391,7 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
if (vif->type == NL80211_IFTYPE_STATION &&
- !is_zero_ether_addr(link_conf->bssid)) {
+ link_conf && !is_zero_ether_addr(link_conf->bssid)) {
memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN);
basic->aid = cpu_to_le16(vif->cfg.aid);
} else {
@@ -1168,7 +1168,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
- .link_idx = mvif->idx,
+ .link_idx = mvif->link_idx,
},
};
struct {
@@ -1191,7 +1191,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
.sta_idx = cpu_to_le16(wcid->idx),
.conn_state = 1,
- .link_idx = mvif->idx,
+ .link_idx = mvif->link_idx,
},
};
int err, idx, cmd, len;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index b456ccd00d58..11c16d1fc70f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index b031c500b741..90e5666c0857 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x0_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 0e1ede9314d8..4840d0b500b3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -264,8 +264,8 @@ void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
};
dev->beacon_ops = &beacon_ops;
- hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ hrtimer_setup(&dev->pre_tbtt_timer, mt76x02u_pre_tbtt_interrupt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
mt76x02_init_beacon_config(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 727bfdd00b40..2303019670e2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.drv_flags = MT_DRV_TX_ALIGNED4_SKBS |
- MT_DRV_SW_RX_AIRTIME,
+ MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2e_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index e832ad53e239..84ef80ab4afb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -22,6 +22,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
+ { USB_DEVICE(0x2357, 0x0137) }, /* TP-Link TL-WDN6200 */
{ },
};
@@ -29,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
- .drv_flags = MT_DRV_SW_RX_AIRTIME,
+ .drv_flags = MT_DRV_SW_RX_AIRTIME |
+ MT_DRV_IGNORE_TXS_FAILED,
.survey_flags = SURVEY_INFO_TIME_TX,
.update_survey = mt76x02_update_channel,
.set_channel = mt76x2u_set_channel,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 578013884e43..192e8eff970b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -303,9 +303,9 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.dl_vht_3mu_cnt,
phy->mib.dl_vht_4mu_cnt);
- sub_total_cnt = phy->mib.dl_vht_2mu_cnt +
- phy->mib.dl_vht_3mu_cnt +
- phy->mib.dl_vht_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_vht_2mu_cnt +
+ phy->mib.dl_vht_3mu_cnt +
+ phy->mib.dl_vht_4mu_cnt;
seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld",
sub_total_cnt);
@@ -353,26 +353,27 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.dl_he_9to16ru_cnt,
phy->mib.dl_he_gtr16ru_cnt);
- sub_total_cnt = phy->mib.dl_he_2mu_cnt +
- phy->mib.dl_he_3mu_cnt +
- phy->mib.dl_he_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_he_2mu_cnt +
+ phy->mib.dl_he_3mu_cnt +
+ phy->mib.dl_he_4mu_cnt;
total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld",
sub_total_cnt);
- sub_total_cnt = phy->mib.dl_he_2ru_cnt +
- phy->mib.dl_he_3ru_cnt +
- phy->mib.dl_he_4ru_cnt +
- phy->mib.dl_he_5to8ru_cnt +
- phy->mib.dl_he_9to16ru_cnt +
- phy->mib.dl_he_gtr16ru_cnt;
+ sub_total_cnt = (u64)phy->mib.dl_he_2ru_cnt +
+ phy->mib.dl_he_3ru_cnt +
+ phy->mib.dl_he_4ru_cnt +
+ phy->mib.dl_he_5to8ru_cnt +
+ phy->mib.dl_he_9to16ru_cnt +
+ phy->mib.dl_he_gtr16ru_cnt;
total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld",
sub_total_cnt);
- total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt;
+ total_ppdu_cnt += (u64)phy->mib.dl_he_su_cnt +
+ phy->mib.dl_he_ext_su_cnt;
seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt);
@@ -404,20 +405,20 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data)
phy->mib.ul_hetrig_9to16ru_cnt,
phy->mib.ul_hetrig_gtr16ru_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt +
- phy->mib.ul_hetrig_3mu_cnt +
- phy->mib.ul_hetrig_4mu_cnt;
+ sub_total_cnt = (u64)phy->mib.ul_hetrig_2mu_cnt +
+ phy->mib.ul_hetrig_3mu_cnt +
+ phy->mib.ul_hetrig_4mu_cnt;
total_ppdu_cnt = sub_total_cnt;
seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld",
sub_total_cnt);
- sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt +
- phy->mib.ul_hetrig_3ru_cnt +
- phy->mib.ul_hetrig_4ru_cnt +
- phy->mib.ul_hetrig_5to8ru_cnt +
- phy->mib.ul_hetrig_9to16ru_cnt +
- phy->mib.ul_hetrig_gtr16ru_cnt;
+ sub_total_cnt = (u64)phy->mib.ul_hetrig_2ru_cnt +
+ phy->mib.ul_hetrig_3ru_cnt +
+ phy->mib.ul_hetrig_4ru_cnt +
+ phy->mib.ul_hetrig_5to8ru_cnt +
+ phy->mib.ul_hetrig_9to16ru_cnt +
+ phy->mib.ul_hetrig_gtr16ru_cnt;
total_ppdu_cnt += sub_total_cnt;
seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld",
@@ -1084,13 +1085,13 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
return -EINVAL;
if (pwr160)
- pwr160 = mt7915_get_power_bound(phy, pwr160);
+ pwr160 = mt76_get_power_bound(mphy, pwr160);
if (pwr80)
- pwr80 = mt7915_get_power_bound(phy, pwr80);
+ pwr80 = mt76_get_power_bound(mphy, pwr80);
if (pwr40)
- pwr40 = mt7915_get_power_bound(phy, pwr40);
+ pwr40 = mt76_get_power_bound(mphy, pwr40);
if (pwr20)
- pwr20 = mt7915_get_power_bound(phy, pwr20);
+ pwr20 = mt76_get_power_bound(mphy, pwr20);
if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0)
return -EINVAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 9d790f234e82..3643c72bb68d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -3323,7 +3323,7 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
if (ret)
return ret;
- txpower = mt7915_get_power_bound(phy, txpower);
+ txpower = mt76_get_power_bound(mphy, txpower);
if (txpower > mphy->txpower_cur || txpower < 0)
return -EINVAL;
@@ -3373,7 +3373,7 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
int i, idx;
int tx_power;
- tx_power = mt7915_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt76_get_power_bound(mphy, hw->conf.power_level);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 49476a4182fd..092ed504a8f2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -515,16 +515,4 @@ enum {
sizeof(struct bss_info_bmc_rate) +\
sizeof(struct bss_info_ext_bss))
-static inline s8
-mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
-{
- struct mt76_phy *mphy = phy->mt76;
- int n_chains = hweight16(mphy->chainmask);
-
- txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
-
- return txpower;
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 13e58c328aff..78b77a54d195 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
msta->deflink.last_txs = jiffies;
+ msta->deflink.sta = msta;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index f41ca4248497..63cb08f4d87c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -58,15 +58,110 @@ static int mt7925_thermal_init(struct mt792x_phy *phy)
return PTR_ERR_OR_ZERO(hwmon);
}
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2)
+{
+ struct mt792x_phy *phy = &dev->phy;
+ struct mt7925_clc_rule_v2 *rule;
+ struct mt7925_clc *clc;
+ bool old = dev->has_eht, new = true;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2);
+ u8 *pos;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID &&
+ (((mtcl_conf >> 4) & 0x3) == 0)) {
+ new = false;
+ goto out;
+ }
+
+ if (!phy->clc[MT792x_CLC_BE_CTRL])
+ goto out;
+
+ clc = (struct mt7925_clc *)phy->clc[MT792x_CLC_BE_CTRL];
+ pos = clc->data;
+
+ while (1) {
+ rule = (struct mt7925_clc_rule_v2 *)pos;
+
+ if (rule->alpha2[0] == alpha2[0] &&
+ rule->alpha2[1] == alpha2[1]) {
+ new = false;
+ break;
+ }
+
+ /* Check the last one */
+ if (rule->flag && BIT(0))
+ break;
+
+ pos += sizeof(*rule);
+ }
+
+out:
+ if (old == new)
+ return;
+
+ dev->has_eht = new;
+ mt7925_set_stream_he_eht_caps(phy);
+}
+
+static void
+mt7925_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev)
+{
+#define IS_UNII_INVALID(idx, sfreq, efreq, cfreq) \
+ (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq))
+#define MT7925_UNII_59G_IS_VALID 0x1
+#define MT7925_UNII_6G_IS_VALID 0x1e
+ struct ieee80211_supported_band *sband;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_channel *ch;
+ u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, mdev->alpha2);
+ int i;
+
+ if (mtcl_conf != MT792X_ACPI_MTCL_INVALID) {
+ if ((mtcl_conf & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_59G_IS_VALID;
+ if (((mtcl_conf >> 2) & 0x3) == 0)
+ dev->phy.clc_chan_conf &= ~MT7925_UNII_6G_IS_VALID;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-4 */
+ if (IS_UNII_INVALID(0, 5845, 5925, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ sband = wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ /* UNII-5/6/7/8 */
+ if (IS_UNII_INVALID(1, 5925, 6425, ch->center_freq) ||
+ IS_UNII_INVALID(2, 6425, 6525, ch->center_freq) ||
+ IS_UNII_INVALID(3, 6525, 6875, ch->center_freq) ||
+ IS_UNII_INVALID(4, 6875, 7125, ch->center_freq))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
void mt7925_regd_update(struct mt792x_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
struct ieee80211_hw *hw = mdev->hw;
+ struct wiphy *wiphy = hw->wiphy;
if (!dev->regd_change)
return;
mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env);
+ mt7925_regd_channel_update(wiphy, dev);
mt7925_mcu_set_channel_domain(hw->priv);
mt7925_set_tx_sar_pwr(hw, NULL);
dev->regd_change = false;
@@ -244,6 +339,7 @@ int mt7925_register_device(struct mt792x_dev *dev)
dev->mt76.tx_worker.fn = mt792x_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work);
+ INIT_DELAYED_WORK(&dev->mlo_pm_work, mt7925_mlo_pm_work);
INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work);
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 98daf80ac131..e79364ac129e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -251,12 +251,12 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
},
};
- if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN))
+ if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EN))
return 0;
ext_capab[0].eml_capabilities = phy->eml_cap;
ext_capab[0].mld_capa_and_ops =
- u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
+ u16_encode_bits(0, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
wiphy->iftype_ext_capab = ext_capab;
@@ -360,10 +360,15 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
struct mt76_txq *mtxq;
int idx, ret = 0;
- mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
- ret = -ENOSPC;
- goto out;
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ mconf->mt76.idx = MT792x_MAX_INTERFACES;
+ } else {
+ mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+
+ if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
}
mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
@@ -371,6 +376,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
mconf->mt76.band_idx = 0xff;
mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ?
0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.link_idx = hweight16(mvif->valid_links);
if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
@@ -421,6 +427,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->bss_conf.vif = mvif;
mvif->sta.vif = mvif;
mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC;
ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink);
if (ret < 0)
@@ -1149,7 +1156,12 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
struct mt792x_bss_conf *mconf;
mconf = mt792x_link_conf_to_mconf(link_conf);
- mt792x_mac_link_bss_remove(dev, mconf, mlink);
+
+ if (ieee80211_vif_is_mld(vif))
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ else
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
}
spin_lock_bh(&mdev->sta_poll_lock);
@@ -1169,6 +1181,31 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
struct mt76_wcid *wcid;
unsigned int link_id;
+ /* clean up bss before starec */
+ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_bss_conf *mconf;
+ struct mt792x_link_sta *mlink;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ if (!link_sta)
+ continue;
+
+ mlink = mt792x_sta_to_link(msta, link_id);
+ if (!mlink)
+ continue;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ if (!link_conf)
+ continue;
+
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
+ }
+
for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_link_sta *link_sta;
struct mt792x_link_sta *mlink;
@@ -1206,51 +1243,22 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct {
- struct {
- u8 omac_idx;
- u8 band_idx;
- __le16 pad;
- } __packed hdr;
- struct req_tlv {
- __le16 tag;
- __le16 len;
- u8 active;
- u8 link_idx; /* hw link idx */
- u8 omac_addr[ETH_ALEN];
- } __packed tlv;
- } dev_req = {
- .hdr = {
- .omac_idx = 0,
- .band_idx = 0,
- },
- .tlv = {
- .tag = cpu_to_le16(DEV_INFO_ACTIVE),
- .len = cpu_to_le16(sizeof(struct req_tlv)),
- .active = true,
- },
- };
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
unsigned long rem;
rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
mt7925_mac_sta_remove_links(dev, vif, sta, rem);
- if (ieee80211_vif_is_mld(vif)) {
- mt7925_mcu_set_dbdc(&dev->mphy, false);
-
- /* recovery omac address for the legacy interface */
- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
- mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
- &dev_req, sizeof(dev_req), true);
- }
+ if (ieee80211_vif_is_mld(vif))
+ mt7925_mcu_del_dev(mdev, vif);
if (vif->type == NL80211_IFTYPE_STATION) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
-
mvif->wep_sta = NULL;
ewma_rssi_init(&mvif->bss_conf.rssi);
}
+
+ mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC;
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove);
@@ -1289,22 +1297,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
- mt7925_mcu_uni_rx_ba(dev, vif, params, true);
+ mt7925_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
- mt7925_mcu_uni_rx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7925_mcu_uni_tx_ba(dev, vif, params, true);
+ mt7925_mcu_uni_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
set_bit(tid, &msta->deflink.wcid.ampdu_state);
@@ -1313,7 +1321,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
clear_bit(tid, &msta->deflink.wcid.ampdu_state);
- mt7925_mcu_uni_tx_ba(dev, vif, params, false);
+ mt7925_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
@@ -1322,6 +1330,38 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return ret;
}
+static void
+mt7925_mlo_pm_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt792x_dev *dev = priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ if (mvif->mlo_pm_state != MT792x_MLO_CHANGED_PS)
+ return;
+
+ mt792x_mutex_acquire(dev);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ mt792x_mutex_release(dev);
+}
+
+void mt7925_mlo_pm_work(struct work_struct *work)
+{
+ struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
+ mlo_pm_work.work);
+ struct ieee80211_hw *hw = mt76_hw(dev);
+
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7925_mlo_pm_iter, dev);
+}
+
static bool is_valid_alpha2(const char *alpha2)
{
if (!alpha2)
@@ -1378,6 +1418,8 @@ void mt7925_scan_work(struct work_struct *work)
if (!is_valid_alpha2(evt->alpha2))
break;
+ mt7925_regd_be_ctrl(phy->dev, evt->alpha2);
+
if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0')
break;
@@ -1871,6 +1913,9 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
mt7925_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
+
+ if (ieee80211_vif_is_mld(vif))
+ mvif->mlo_pm_state = MT792x_MLO_LINK_ASSOC;
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -1881,9 +1926,19 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_PS) {
- for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
- bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ if (hweight16(mvif->valid_links) < 2) {
+ /* legacy */
+ bss_conf = &vif->bss_conf;
mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ } else {
+ if (mvif->mlo_pm_state == MT792x_MLO_LINK_ASSOC) {
+ mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS_PENDING;
+ } else if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ }
}
}
@@ -1934,11 +1989,12 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
mt7925_mcu_set_tx(dev, info);
- if (changed & BSS_CHANGED_BSSID) {
- if (ieee80211_vif_is_mld(vif) &&
- hweight16(mvif->valid_links) == 2)
- /* Indicate the secondary setup done */
- mt7925_mcu_uni_bss_bcnft(dev, info, true);
+ if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS_PENDING) {
+ /* Indicate the secondary setup done */
+ mt7925_mcu_uni_bss_bcnft(dev, info, true);
+
+ ieee80211_queue_delayed_work(hw, &dev->mlo_pm_work, 5 * HZ);
+ mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS;
}
mt792x_mutex_release(dev);
@@ -2022,8 +2078,6 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto free;
if (mconf != &mvif->bss_conf) {
- mt7925_mcu_set_bss_pm(dev, link_conf, true);
-
err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
vif->active_links);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 15815ad84713..e61da76b2097 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -348,14 +348,10 @@ mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv)
basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv;
if (basic->hifsuspend) {
- if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
- basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)
- /* success */
- dev->hif_idle = true;
- else
- /* busy */
- /* invalid */
- dev->hif_idle = false;
+ dev->hif_idle = true;
+ if (!(basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE &&
+ basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE))
+ dev_info(dev->mt76.dev, "Hif traffic not idle.\n");
} else {
dev->hif_resumed = true;
}
@@ -576,10 +572,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
static int
mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
- struct mt76_wcid *wcid,
struct ieee80211_ampdu_params *params,
bool enable, bool tx)
{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -607,60 +603,76 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_link_sta *mlink;
- struct mt792x_bss_conf *mconf;
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct mt76_wcid *wcid;
- u8 link_id, ret;
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- mconf = mt792x_vif_to_link(mvif, link_id);
- mlink = mt792x_sta_to_link(msta, link_id);
- wcid = &mlink->wcid;
-
- if (enable && !params->amsdu)
- mlink->wcid.amsdu = false;
+ struct mt792x_vif *mvif = msta->vif;
- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
- enable, true);
- if (ret < 0)
- break;
- }
+ if (enable && !params->amsdu)
+ msta->deflink.wcid.amsdu = false;
- return ret;
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+ enable, true);
}
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable)
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_link_sta *mlink;
- struct mt792x_bss_conf *mconf;
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct mt76_wcid *wcid;
- u8 link_id, ret;
+ struct mt792x_vif *mvif = msta->vif;
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- mconf = mt792x_vif_to_link(mvif, link_id);
- mlink = mt792x_sta_to_link(msta, link_id);
- wcid = &mlink->wcid;
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
+ enable, false);
+}
- ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params,
- enable, false);
- if (ret < 0)
- break;
- }
+static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
+{
+ struct {
+ u8 rsv[4];
- return ret;
+ __le16 tag;
+ __le16 len;
+
+ __le32 addr;
+ __le32 valid;
+ u8 data[MT7925_EEPROM_BLOCK_SIZE];
+ } __packed req = {
+ .tag = cpu_to_le16(1),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .addr = cpu_to_le32(round_down(offset,
+ MT7925_EEPROM_BLOCK_SIZE)),
+ };
+ struct evt {
+ u8 rsv[4];
+
+ __le16 tag;
+ __le16 len;
+
+ __le32 ver;
+ __le32 addr;
+ __le32 valid;
+ __le32 size;
+ __le32 magic_num;
+ __le32 type;
+ __le32 rsv1[4];
+ u8 data[32];
+ } __packed *res;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct evt *)skb->data;
+ *val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE];
+
+ dev_kfree_skb(skb);
+
+ return 0;
}
static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
@@ -671,13 +683,21 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
struct mt76_dev *mdev = &dev->mt76;
struct mt792x_phy *phy = &dev->phy;
const struct firmware *fw;
+ u8 *clc_base = NULL, hw_encap = 0;
int ret, i, len, offset = 0;
- u8 *clc_base = NULL;
+ dev->phy.clc_chan_conf = 0xff;
if (mt7925_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
+ if (mt76_is_mmio(&dev->mt76)) {
+ ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
+ if (ret)
+ return ret;
+ hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
+ }
+
ret = request_firmware(&fw, fw_name, mdev->dev);
if (ret)
return ret;
@@ -722,6 +742,12 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
if (phy->clc[clc->idx])
continue;
+ /* header content sanity */
+ if ((clc->idx == MT792x_CLC_BE_CTRL &&
+ u8_get_bits(clc->t2.type, MT_EE_HW_TYPE_ENCAP) != hw_encap) ||
+ u8_get_bits(clc->t0.type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
+ continue;
+
phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
le32_to_cpu(clc->len),
GFP_KERNEL);
@@ -828,7 +854,6 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
mdev->phy.chainmask = mdev->phy.antenna_mask;
mdev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G);
mdev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G);
- dev->has_eht = cap->eht;
}
static void
@@ -1850,49 +1875,6 @@ mt7925_mcu_sta_mld_tlv(struct sk_buff *skb,
}
}
-static int
-mt7925_mcu_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info)
-{
- struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv;
- struct mt76_dev *dev = phy->dev;
- struct sk_buff *skb;
- int conn_state;
-
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
- MT7925_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- conn_state = info->enable ? CONN_STATE_PORT_SECURE :
- CONN_STATE_DISCONNECT;
- if (info->link_sta)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
- info->link_sta,
- conn_state, info->newly);
- if (info->link_sta && info->enable) {
- mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
- mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
- mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
- mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
- mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
- mt7925_mcu_sta_he_tlv(skb, info->link_sta);
- mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
- mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
- mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
- info->link_sta);
- mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
- info->vif, info->rcpi,
- info->state);
- mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
- }
-
- if (info->enable)
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
-
- return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
-}
-
static void
mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
{
@@ -1905,8 +1887,8 @@ mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
}
static int
-mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
- struct mt76_sta_cmd_info *info)
+mt7925_mcu_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
@@ -1920,12 +1902,10 @@ mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
- if (info->enable)
+ if (info->enable && info->link_sta) {
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
info->enable, info->newly);
-
- if (info->enable && info->link_sta) {
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
@@ -1976,25 +1956,15 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev,
};
struct mt792x_sta *msta;
struct mt792x_link_sta *mlink;
- int err;
if (link_sta) {
msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
mlink = mt792x_sta_to_link(msta, link_sta->link_id);
}
info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+ info.newly = state != MT76_STA_INFO_STATE_ASSOC;
- if (link_sta)
- info.newly = state != MT76_STA_INFO_STATE_ASSOC;
- else
- info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true;
-
- if (ieee80211_vif_is_mld(vif))
- err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
- else
- err = mt7925_mcu_sta_cmd(&dev->mphy, &info);
-
- return err;
+ return mt7925_mcu_sta_cmd(&dev->mphy, &info);
}
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -2559,6 +2529,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif = link_conf->vif;
struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
struct bss_mld_tlv *mld;
struct tlv *tlv;
bool is_mld;
@@ -2574,8 +2545,13 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff;
mld->own_mld_id = mconf->mt76.idx + 32;
mld->remap_idx = 0xff;
- mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
- IEEE80211_EML_CAP_EMLSR_SUPP);
+
+ if (phy->chip_cap & MT792x_CHIP_CAP_MLO_EML_EN) {
+ mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ IEEE80211_EML_CAP_EMLSR_SUPP);
+ } else {
+ mld->eml_enable = 0;
+ }
memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
}
@@ -2668,6 +2644,62 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
+void mt7925_mcu_del_dev(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv;
+ struct {
+ struct {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 pad;
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 link_idx; /* hw link idx */
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } dev_req = {
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = true,
+ },
+ };
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct mt76_connac_bss_basic_tlv basic;
+ } basic_req = {
+ .basic = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_BASIC),
+ .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)),
+ .active = true,
+ .conn_state = 1,
+ },
+ };
+
+ dev_req.hdr.omac_idx = mvif->omac_idx;
+ dev_req.hdr.band_idx = mvif->band_idx;
+
+ basic_req.hdr.bss_idx = mvif->idx;
+ basic_req.basic.omac_idx = mvif->omac_idx;
+ basic_req.basic.band_idx = mvif->band_idx;
+ basic_req.basic.link_idx = mvif->link_idx;
+
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &basic_req, sizeof(basic_req), true);
+
+ /* recovery omac address for the legacy interface */
+ memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE),
+ &dev_req, sizeof(dev_req), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_bss_conf *link_conf,
@@ -3155,16 +3187,17 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.idx = idx,
.env = env_cap,
- .acpi_conf = mt792x_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
- u8 i, *pos;
+ u8 *pos, *last_pos;
if (!clc)
return 0;
- pos = clc->data + sizeof(*seg) * clc->nr_seg;
- for (i = 0; i < clc->nr_country; i++) {
+ req.ver = clc->ver;
+ pos = clc->data + sizeof(*seg) * clc->t0.nr_seg;
+ last_pos = clc->data + le32_to_cpu(*(__le32 *)(clc->data + 4));
+ while (pos < last_pos) {
struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos;
pos += sizeof(*rule);
@@ -3179,6 +3212,7 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
memcpy(req.type, rule->type, 2);
req.size = cpu_to_le16(seg->len);
+ dev->phy.clc_chan_conf = clc->ver == 1 ? 0xff : rule->flag;
skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
le16_to_cpu(req.size) + sizeof(req),
sizeof(req), GFP_KERNEL);
@@ -3194,8 +3228,10 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
valid_cnt++;
}
- if (!valid_cnt)
+ if (!valid_cnt) {
+ dev->phy.clc_chan_conf = 0xff;
return -ENOENT;
+ }
return 0;
}
@@ -3208,6 +3244,9 @@ int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
/* submit all clc config */
for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
+ if (i == MT792x_CLC_BE_CTRL)
+ continue;
+
ret = __mt7925_mcu_set_clc(dev, alpha2, env_cap,
phy->clc[i], i);
@@ -3266,6 +3305,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+ if (cmd == MCU_UNI_CMD(HIF_CTRL))
+ uni_txd->option &= ~MCU_CMD_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 1e47d2c61b54..8ac43feb26d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -566,8 +566,8 @@ struct mt7925_wow_pattern_tlv {
u8 offset;
u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
- u8 rsv[7];
-} __packed;
+ u8 rsv[4];
+};
struct roc_acquire_tlv {
__le16 tag;
@@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy,
int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
+void mt7925_mcu_del_dev(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif);
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_bss_conf *link_conf,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 8707b5d04743..4e50f2597ccd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -137,11 +137,18 @@ enum {
MT7925_CLC_MAX_NUM,
};
+struct mt7925_clc_rule_v2 {
+ u32 flag;
+ u8 alpha2[2];
+ u8 rsv[10];
+} __packed;
+
struct mt7925_clc_rule {
u8 alpha2[2];
u8 type[2];
u8 seg_idx;
- u8 rsv[3];
+ u8 flag; /* UNII4~8 ctrl flag */
+ u8 rsv[2];
} __packed;
struct mt7925_clc_segment {
@@ -152,14 +159,26 @@ struct mt7925_clc_segment {
u8 rsv2[4];
} __packed;
-struct mt7925_clc {
- __le32 len;
- u8 idx;
- u8 ver;
+struct mt7925_clc_type0 {
u8 nr_country;
u8 type;
u8 nr_seg;
u8 rsv[7];
+} __packed;
+
+struct mt7925_clc_type2 {
+ u8 type;
+ u8 rsv[9];
+} __packed;
+
+struct mt7925_clc {
+ __le32 len;
+ u8 idx;
+ u8 ver;
+ union {
+ struct mt7925_clc_type0 t0;
+ struct mt7925_clc_type2 t2;
+ };
u8 data[];
} __packed;
@@ -167,9 +186,12 @@ enum mt7925_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
+ MT_EE_HW_TYPE = 0xa71,
__MT_EE_MAX = 0x9ff
};
+#define MT_EE_HW_TYPE_ENCAP GENMASK(1, 0)
+
enum {
TXPWR_USER,
TXPWR_EEPROM,
@@ -235,6 +257,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd);
int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2);
void mt7925_regd_update(struct mt792x_dev *dev);
int mt7925_mac_init(struct mt792x_dev *dev);
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -263,13 +286,12 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
- struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params,
bool enable);
+void mt7925_mlo_pm_work(struct work_struct *work);
void mt7925_scan_work(struct work_struct *work);
void mt7925_roc_work(struct work_struct *work);
int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 32ed01a96bf7..e0359d431eca 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -27,8 +27,9 @@
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
-#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
#define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3)
+#define MT792x_CHIP_CAP_MLO_EN BIT(8)
+#define MT792x_CHIP_CAP_MLO_EML_EN BIT(9)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -70,6 +71,7 @@ struct mt792x_fw_features {
enum {
MT792x_CLC_POWER,
MT792x_CLC_POWER_EXT,
+ MT792x_CLC_BE_CTRL,
MT792x_CLC_MAX_NUM,
};
@@ -81,6 +83,13 @@ enum mt792x_reg_power_type {
MT_AP_VLP,
};
+enum mt792x_mlo_pm_state {
+ MT792x_MLO_LINK_DISASSOC,
+ MT792x_MLO_LINK_ASSOC,
+ MT792x_MLO_CHANGED_PS_PENDING,
+ MT792x_MLO_CHANGED_PS,
+};
+
DECLARE_EWMA(avg_signal, 10, 8)
struct mt792x_link_sta {
@@ -134,6 +143,7 @@ struct mt792x_vif {
struct mt792x_phy *phy;
u16 valid_links;
u8 deflink_id;
+ enum mt792x_mlo_pm_state mlo_pm_state;
struct work_struct csa_work;
struct timer_list csa_timer;
@@ -239,6 +249,7 @@ struct mt792x_dev {
const struct mt792x_irq_map *irq_map;
struct work_struct ipv6_ns_work;
+ struct delayed_work mlo_pm_work;
/* IPv6 addresses for WoWLAN */
struct sk_buff_head ipv6_ns_list;
@@ -497,7 +508,7 @@ int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev);
int mt792x_init_acpi_sar(struct mt792x_dev *dev);
int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default);
u8 mt792x_acpi_get_flags(struct mt792x_phy *phy);
-u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2);
+u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2);
#else
static inline int mt792x_init_acpi_sar(struct mt792x_dev *dev)
{
@@ -515,9 +526,9 @@ static inline u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
return 0;
}
-static inline u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+static inline u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
{
- return 0xf;
+ return MT792X_ACPI_MTCL_INVALID;
}
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index 9317f8ff2070..d1aebadd50aa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -4,6 +4,28 @@
#include <linux/acpi.h>
#include "mt792x.h"
+static const char * const cc_list_all[] = {
+ "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR",
+ "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME",
+ "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR",
+ "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY",
+};
+
+static const char * const cc_list_eu[] = {
+ "AD", "AT", "BE", "BG", "CY", "CZ", "HR", "DK",
+ "EE", "FI", "FR", "DE", "GR", "HU", "IS", "IE",
+ "IT", "LV", "LI", "LT", "LU", "MC", "MT", "NL",
+ "NO", "PL", "PT", "RO", "SK", "SI", "ES", "SE",
+ "CH",
+};
+
+static const char * const cc_list_be[] = {
+ "AR", "BR", "BY", "CL", "IQ", "MX", "OM", "RU",
+ "RW", "VN", "KR", "UA", "", "", "", "",
+ "EU", "AT", "CN", "CA", "TW", "NZ", "PH", "UK",
+ "US",
+};
+
static int
mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len)
{
@@ -66,13 +88,22 @@ free:
}
/* MTCL : Country List Table for 6G band */
+/* MTCL : Country List Table for 6G band and 11BE */
static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- int ret;
+ int len, ret;
- *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
- ? 1 : 2;
+ ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, &len);
+ if (ret)
+ return ret;
+
+ if (len == sizeof(struct mt792x_asar_cl))
+ *version = ((struct mt792x_asar_cl *)*table)->version;
+ else if (len == sizeof(struct mt792x_asar_cl_v3))
+ *version = ((struct mt792x_asar_cl_v3 *)*table)->version;
+ else
+ return -EINVAL;
return ret;
}
@@ -351,10 +382,24 @@ u8 mt792x_acpi_get_flags(struct mt792x_phy *phy)
}
EXPORT_SYMBOL_GPL(mt792x_acpi_get_flags);
-static u8
+static u32
+mt792x_acpi_get_mtcl_map_v3(int row, int column, struct mt792x_asar_cl_v3 *cl)
+{
+ u32 config = 0;
+ u8 mode_be = 0;
+
+ mode_be = (cl->mode_be > 0x02) ? 0 : cl->mode_be;
+
+ if (cl->version > 2 && cl->clbe[row] & BIT(column))
+ config |= (mode_be & 0x3) << 4;
+
+ return config;
+}
+
+static u32
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
- u8 config = 0;
+ u32 config = 0;
u8 mode_6g, mode_5g9;
mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
@@ -368,30 +413,44 @@ mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
return config;
}
-u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+static u32
+mt792x_acpi_parse_mtcl_tbl_v3(struct mt792x_phy *phy, char *alpha2)
{
- static const char * const cc_list_all[] = {
- "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR",
- "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME",
- "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR",
- "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY",
- };
- static const char * const cc_list_eu[] = {
- "AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
- "FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
- "LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
- "PT", "RO", "SK", "SI", "ES", "SE", "CH",
- };
struct mt792x_acpi_sar *sar = phy->acpisar;
- struct mt792x_asar_cl *cl;
+ struct mt792x_asar_cl_v3 *cl = sar->countrylist_v3;
int col, row, i;
- if (!sar)
- return 0xf;
+ if (sar->ver != 3)
+ goto out;
- cl = sar->countrylist;
if (!cl)
- return 0xc;
+ return MT792X_ACPI_MTCL_INVALID;
+
+ for (i = 0; i < ARRAY_SIZE(cc_list_be); i++) {
+ col = 7 - i % 8;
+ row = i / 8;
+ if (!memcmp(cc_list_be[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map_v3(row, col, cl);
+ }
+ for (i = 0; i < ARRAY_SIZE(cc_list_eu); i++) {
+ if (!memcmp(cc_list_eu[i], alpha2, 2))
+ return mt792x_acpi_get_mtcl_map_v3(3, 7, cl);
+ }
+
+out:
+ /* Depends on driver */
+ return 0x20;
+}
+
+static u32
+mt792x_acpi_parse_mtcl_tbl(struct mt792x_phy *phy, char *alpha2)
+{
+ struct mt792x_acpi_sar *sar = phy->acpisar;
+ struct mt792x_asar_cl *cl = sar->countrylist;
+ int col, row, i;
+
+ if (!cl)
+ return MT792X_ACPI_MTCL_INVALID;
for (i = 0; i < ARRAY_SIZE(cc_list_all); i++) {
col = 7 - i % 8;
@@ -406,4 +465,22 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
return mt792x_acpi_get_mtcl_map(0, 7, cl);
}
+
+u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
+{
+ struct mt792x_acpi_sar *sar = phy->acpisar;
+ u32 config = 0;
+
+ if (!sar)
+ return MT792X_ACPI_MTCL_INVALID;
+
+ config = mt792x_acpi_parse_mtcl_tbl_v3(phy, alpha2);
+
+ if (config == MT792X_ACPI_MTCL_INVALID)
+ return MT792X_ACPI_MTCL_INVALID;
+
+ config |= mt792x_acpi_parse_mtcl_tbl(phy, alpha2);
+
+ return config;
+}
EXPORT_SYMBOL_GPL(mt792x_acpi_get_mtcl_conf);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
index 2298983b6342..e45dcd7fbdb1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h
@@ -15,6 +15,8 @@
#define MT792x_ACPI_MTGS "MTGS"
#define MT792x_ACPI_MTFG "MTFG"
+#define MT792X_ACPI_MTCL_INVALID 0xffffffff
+
struct mt792x_asar_dyn_limit {
u8 idx;
u8 frp[5];
@@ -72,6 +74,17 @@ struct mt792x_asar_geo_v2 {
DECLARE_FLEX_ARRAY(struct mt792x_asar_geo_limit_v2, tbl);
} __packed;
+struct mt792x_asar_cl_v3 {
+ u8 names[4];
+ u8 version;
+ u8 mode_6g;
+ u8 cl6g[6];
+ u8 mode_5g9;
+ u8 cl5g9[6];
+ u8 mode_be;
+ u8 clbe[6];
+} __packed;
+
struct mt792x_asar_cl {
u8 names[4];
u8 version;
@@ -100,7 +113,10 @@ struct mt792x_acpi_sar {
struct mt792x_asar_geo *geo;
struct mt792x_asar_geo_v2 *geo_v2;
};
- struct mt792x_asar_cl *countrylist;
+ union {
+ struct mt792x_asar_cl *countrylist;
+ struct mt792x_asar_cl_v3 *countrylist_v3;
+ };
struct mt792x_asar_fg *fg;
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 8799627f6292..0f7806f6338d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -665,7 +665,8 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
- ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ if (is_mt7921(&dev->mt76))
+ ieee80211_hw_set(hw, CHANCTX_STA_CSA);
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 7b2bb72b407d..4a28db17a287 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -616,28 +616,51 @@ static void
mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_vif *mvif = msta->vif;
+ struct mt7996_dev *dev = mvif->deflink.phy->dev;
+ struct ieee80211_link_sta *link_sta;
struct seq_file *s = data;
- u8 ac;
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ rcu_read_lock();
- for (ac = 0; ac < 4; ac++) {
- u32 qlen, ctrl, val;
- u32 idx = msta->wcid.idx >> 5;
- u8 offs = msta->wcid.idx & GENMASK(4, 0);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt76_vif_link *mlink;
+ u8 ac;
- ctrl = BIT(31) | BIT(11) | (ac << 24);
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (!mlink)
+ continue;
- if (val & BIT(offs))
+ msta_link = rcu_dereference(msta->link[link_id]);
+ if (!msta_link)
continue;
- mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
- GENMASK(11, 0));
- seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
- sta->addr, msta->wcid.idx,
- msta->vif->deflink.mt76.wmm_idx, ac, qlen);
+ for (ac = 0; ac < 4; ac++) {
+ u32 idx = msta_link->wcid.idx >> 5, qlen, ctrl, val;
+ u8 offs = msta_link->wcid.idx & GENMASK(4, 0);
+
+ ctrl = BIT(31) | BIT(11) | (ac << 24);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+
+ if (val & BIT(offs))
+ continue;
+
+ mt76_wr(dev,
+ MT_FL_Q0_CTRL, ctrl | msta_link->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
+ sta->addr, msta_link->wcid.idx,
+ mlink->wmm_idx, ac, qlen);
+ }
}
+
+ rcu_read_unlock();
}
static int
@@ -930,6 +953,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
struct ieee80211_sta *sta = file->private_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
struct ra_rate phy = {};
char buf[100];
int ret;
@@ -964,7 +988,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
goto out;
}
- phy.wlan_idx = cpu_to_le16(msta->wcid.idx);
+ phy.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
phy.gi = cpu_to_le16(gi);
phy.ltf = cpu_to_le16(ltf);
phy.ldpc = phy.ldpc ? 7 : 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 019c925ae600..d89c06f47997 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -53,26 +53,48 @@ static const struct mt7996_dfs_radar_spec jp_radar_specs = {
};
static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
- u16 idx, bool unicast)
+ u16 idx, u8 band_idx)
{
- struct mt7996_sta *sta;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_sta *msta;
+ struct mt7996_vif *mvif;
struct mt76_wcid *wcid;
+ int i;
if (idx >= ARRAY_SIZE(dev->mt76.wcid))
return NULL;
wcid = rcu_dereference(dev->mt76.wcid[idx]);
- if (unicast || !wcid)
- return wcid;
+ if (!wcid)
+ return NULL;
- if (!wcid->sta)
+ if (!mt7996_band_valid(dev, band_idx))
return NULL;
- sta = container_of(wcid, struct mt7996_sta, wcid);
- if (!sta->vif)
+ if (wcid->phy_idx == band_idx)
+ return wcid;
+
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ msta = msta_link->sta;
+ if (!msta || !msta->vif)
return NULL;
- return &sta->vif->deflink.sta.wcid;
+ mvif = msta->vif;
+ for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
+ struct mt76_vif_link *mlink;
+
+ mlink = rcu_dereference(mvif->mt76.link[i]);
+ if (!mlink)
+ continue;
+
+ if (mlink->band_idx != band_idx)
+ continue;
+
+ msta_link = rcu_dereference(msta->link[i]);
+ break;
+ }
+
+ return &msta_link->wcid;
}
bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
@@ -100,10 +122,13 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
[IEEE80211_AC_VI] = 4,
[IEEE80211_AC_VO] = 6
};
+ struct mt7996_sta_link *msta_link;
+ struct mt76_vif_link *mlink;
struct ieee80211_sta *sta;
struct mt7996_sta *msta;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
+ struct mt76_wcid *wcid;
int i;
spin_lock_bh(&dev->mt76.sta_poll_lock);
@@ -123,25 +148,28 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
spin_unlock_bh(&dev->mt76.sta_poll_lock);
break;
}
- msta = list_first_entry(&sta_poll_list,
- struct mt7996_sta, wcid.poll_list);
- list_del_init(&msta->wcid.poll_list);
+ msta_link = list_first_entry(&sta_poll_list,
+ struct mt7996_sta_link,
+ wcid.poll_list);
+ msta = msta_link->sta;
+ wcid = &msta_link->wcid;
+ list_del_init(&wcid->poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = wcid->idx;
/* refresh peer's airtime reporting */
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = msta_link->airtime_ac[i];
+ u32 rx_last = msta_link->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ msta_link->airtime_ac[i] = mt76_rr(dev, addr);
+ msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = msta_link->airtime_ac[i] - tx_last;
+ rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -152,10 +180,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
if (clear) {
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(msta_link->airtime_ac, 0,
+ sizeof(msta_link->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!wcid->sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -181,28 +210,23 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
- mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi);
+ mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
+ if (mlink) {
+ struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
+
+ if (mphy)
+ msta_link->ack_signal =
+ mt76_rx_signal(mphy->antenna_mask,
+ rssi);
+ }
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&msta_link->avg_ack_signal,
+ -msta_link->ack_signal);
}
rcu_read_unlock();
}
-void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, bool enable)
-{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- u32 addr;
-
- addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5);
- if (enable)
- mt76_set(dev, addr, BIT(5));
- else
- mt76_clear(dev, addr, BIT(5));
-}
-
/* The HW does not translate the mac header to 802.3 for mesh point */
static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
@@ -474,11 +498,15 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
- status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
+ status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt7996_sta, wcid);
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = container_of(status->wcid, struct mt7996_sta_link,
+ wcid);
+ msta = msta_link->sta;
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
}
status->freq = mphy->chandef.chan->center_freq;
@@ -717,9 +745,8 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
u32 val;
if (wcid->sta) {
- struct ieee80211_sta *sta;
+ struct ieee80211_sta *sta = wcid_to_sta(wcid);
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
wmm = sta->wme;
}
@@ -746,7 +773,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
static void
mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ struct mt76_wcid *wcid)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
@@ -754,6 +783,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
+ u16 seqno = le16_to_cpu(sc);
u8 fc_type, fc_stype;
u32 val;
@@ -773,8 +803,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD1_FIXED_RATE;
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
- key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) {
val |= MT_TXD1_BIP;
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
@@ -804,9 +833,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
}
- if (info->flags & IEEE80211_TX_CTL_INJECTED) {
- u16 seqno = le16_to_cpu(sc);
+ if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
+ val = MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
+ txwi[3] |= cpu_to_le32(val);
+ }
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
if (ieee80211_is_back_req(hdr->frame_control)) {
struct ieee80211_bar *bar;
@@ -819,6 +852,19 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(val);
txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
}
+
+ if (ieee80211_vif_is_mld(info->control.vif) &&
+ (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
+ ieee80211_vif_is_mld(info->control.vif)) {
+ txwi[5] |= cpu_to_le32(MT_TXD5_FL);
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
+ }
+
+ if (!wcid->sta && ieee80211_is_mgmt(fc))
+ txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
}
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
@@ -832,7 +878,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
- struct mt76_vif_link *mvif;
+ struct mt76_vif_link *mlink = NULL;
+ struct mt7996_vif *mvif;
+ unsigned int link_id;
u16 tx_count = 15;
u32 val;
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -840,11 +888,20 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
bool beacon = !!(changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
- mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL;
- if (mvif) {
- omac_idx = mvif->omac_idx;
- wmm_idx = mvif->wmm_idx;
- band_idx = mvif->band_idx;
+ if (wcid != &dev->mt76.global_wcid)
+ link_id = wcid->link_id;
+ else
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+ if (mvif)
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+
+ if (mlink) {
+ omac_idx = mlink->omac_idx;
+ wmm_idx = mlink->wmm_idx;
+ band_idx = mlink->band_idx;
}
if (inband_disc) {
@@ -891,7 +948,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
+ val = MT_TXD6_DAS;
+ if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val |= MT_TXD6_DIS_MAT;
+
if (is_mt7996(&dev->mt76))
val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
@@ -903,23 +963,25 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
if (is_8023)
mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
else
- mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
+ mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
bool mcast = ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1);
u8 idx = MT7996_BASIC_RATES_TBL;
- if (mvif) {
- if (mcast && mvif->mcast_rates_idx)
- idx = mvif->mcast_rates_idx;
- else if (beacon && mvif->beacon_rates_idx)
- idx = mvif->beacon_rates_idx;
+ if (mlink) {
+ if (mcast && mlink->mcast_rates_idx)
+ idx = mlink->mcast_rates_idx;
+ else if (beacon && mlink->beacon_rates_idx)
+ idx = mlink->beacon_rates_idx;
else
- idx = mvif->basic_rates_idx;
+ idx = mlink->basic_rates_idx;
}
val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
+ if (mcast)
+ val |= MT_TXD6_DIS_MAT;
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
@@ -984,8 +1046,14 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = NULL;
+
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = &mvif->deflink.mt76;
- txp->fw.bss_idx = mvif->deflink.mt76.idx;
+ txp->fw.bss_idx = mlink->idx;
}
txp->fw.token = cpu_to_le16(id);
@@ -1027,9 +1095,10 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
static void
mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
{
- struct mt7996_sta *msta;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_sta *msta;
u16 fc, tid;
if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
@@ -1058,7 +1127,9 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
return;
msta = (struct mt7996_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
+ msta_link = &msta->deflink;
+
+ if (!test_and_set_bit(tid, &msta_link->wcid.ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
@@ -1136,7 +1207,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
*/
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
- struct mt7996_sta *msta;
+ struct mt7996_sta_link *msta_link;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1145,8 +1216,9 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt7996_sta, wcid);
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ msta_link = container_of(wcid, struct mt7996_sta_link,
+ wcid);
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
continue;
} else if (info & MT_TXFREE_INFO_HEADER) {
u32 tx_retries = 0, tx_failed = 0;
@@ -1230,7 +1302,7 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_sta *sta;
u8 tid;
- sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(wcid);
tid = FIELD_GET(MT_TXS0_TID, txs);
ieee80211_refresh_tx_agg_session_timer(sta, tid);
}
@@ -1344,7 +1416,7 @@ out:
static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
{
- struct mt7996_sta *msta = NULL;
+ struct mt7996_sta_link *msta_link;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -1365,14 +1437,13 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt7996_sta, wcid);
-
mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
- mt76_wcid_add_poll(&dev->mt76, &msta->wcid);
+ msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
+ mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
out:
rcu_read_unlock();
@@ -1399,7 +1470,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
mt7996_mac_tx_free(dev, data, len);
return false;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
return false;
case PKT_TYPE_RX_FW_MONITOR:
@@ -1442,7 +1513,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt7996_mcu_rx_event(dev, skb);
break;
case PKT_TYPE_TXS:
- for (rxd += 4; rxd + 8 <= end; rxd += 8)
+ for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
mt7996_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
@@ -2239,38 +2310,70 @@ void mt7996_mac_update_stats(struct mt7996_phy *phy)
void mt7996_mac_sta_rc_work(struct work_struct *work)
{
struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ struct mt76_vif_link *mlink;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct mt7996_sta *msta;
- u32 changed;
+ struct mt7996_vif *mvif;
LIST_HEAD(list);
+ u32 changed;
+ u8 link_id;
+ rcu_read_lock();
spin_lock_bh(&dev->mt76.sta_poll_lock);
list_splice_init(&dev->sta_rc_list, &list);
while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7996_sta, rc_list);
- list_del_init(&msta->rc_list);
- changed = msta->changed;
- msta->changed = 0;
+ msta_link = list_first_entry(&list, struct mt7996_sta_link,
+ rc_list);
+ list_del_init(&msta_link->rc_list);
+
+ changed = msta_link->changed;
+ msta_link->changed = 0;
+
+ sta = wcid_to_sta(&msta_link->wcid);
+ link_id = msta_link->wcid.link_id;
+ msta = msta_link->sta;
+ mvif = msta->vif;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (!mlink)
+ continue;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ continue;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (!link_conf)
+ continue;
+
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link = (struct mt7996_vif_link *)mlink;
if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
IEEE80211_RC_BW_CHANGED))
- mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
+ mt7996_mcu_add_rate_ctrl(dev, vif, link_conf,
+ link_sta, link, msta_link,
+ true);
if (changed & IEEE80211_RC_SMPS_CHANGED)
- mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
+ mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, NULL,
RATE_PARAM_MMPS_UPDATE);
spin_lock_bh(&dev->mt76.sta_poll_lock);
}
spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ rcu_read_unlock();
}
void mt7996_mac_work(struct work_struct *work)
@@ -2538,7 +2641,7 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
}
static bool
-mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
struct ieee80211_twt_params *twt_agrt)
{
u16 type = le16_to_cpu(twt_agrt->req_type);
@@ -2549,10 +2652,10 @@ mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
struct mt7996_twt_flow *f;
- if (!(msta->twt.flowid_mask & BIT(i)))
+ if (!(msta_link->twt.flowid_mask & BIT(i)))
continue;
- f = &msta->twt.flow[i];
+ f = &msta_link->twt.flow[i];
if (f->duration == twt_agrt->min_twt_dur &&
f->mantissa == twt_agrt->mantissa &&
f->exp == exp &&
@@ -2572,6 +2675,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
u16 req_type = le16_to_cpu(twt_agrt->req_type);
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
@@ -2586,7 +2690,8 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
goto unlock;
- if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ if (hweight8(msta_link->twt.flowid_mask) ==
+ ARRAY_SIZE(msta_link->twt.flow))
goto unlock;
if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
@@ -2595,10 +2700,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
goto unlock;
}
- if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
goto unlock;
- flowid = ffs(~msta->twt.flowid_mask) - 1;
+ flowid = ffs(~msta_link->twt.flowid_mask) - 1;
twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
twt_agrt->req_type |= le16_encode_bits(flowid,
IEEE80211_TWT_REQTYPE_FLOWID);
@@ -2607,10 +2712,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
- flow = &msta->twt.flow[flowid];
+ flow = &msta_link->twt.flow[flowid];
memset(flow, 0, sizeof(*flow));
INIT_LIST_HEAD(&flow->list);
- flow->wcid = msta->wcid.idx;
+ flow->wcid = msta_link->wcid.idx;
flow->table_id = table_id;
flow->id = flowid;
flow->duration = twt_agrt->min_twt_dur;
@@ -2628,7 +2733,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
flow->sched = true;
flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
- curr_tsf = __mt7996_get_tsf(hw, msta->vif);
+ curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
flow_tsf = curr_tsf + interval - rem;
twt_agrt->twt = cpu_to_le64(flow_tsf);
@@ -2637,12 +2742,13 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
}
flow->tsf = le64_to_cpu(twt_agrt->twt);
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
+ MCU_TWT_AGRT_ADD))
goto unlock;
setup_cmd = TWT_SETUP_CMD_ACCEPT;
dev->twt.table_mask |= BIT(table_id);
- msta->twt.flowid_mask |= BIT(flowid);
+ msta_link->twt.flowid_mask |= BIT(flowid);
dev->twt.n_agrt++;
unlock:
@@ -2655,26 +2761,26 @@ out:
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
- struct mt7996_sta *msta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
u8 flowid)
{
struct mt7996_twt_flow *flow;
lockdep_assert_held(&dev->mt76.mutex);
- if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
return;
- if (!(msta->twt.flowid_mask & BIT(flowid)))
+ if (!(msta_link->twt.flowid_mask & BIT(flowid)))
return;
- flow = &msta->twt.flow[flowid];
- if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
- MCU_TWT_AGRT_DELETE))
+ flow = &msta_link->twt.flow[flowid];
+ if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
return;
list_del_init(&flow->list);
- msta->twt.flowid_mask &= ~BIT(flowid);
+ msta_link->twt.flowid_mask &= ~BIT(flowid);
dev->twt.table_mask &= ~BIT(flow->table_id);
dev->twt.n_agrt--;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 69dd565d8319..91c64e3a0860 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -158,58 +158,101 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin
static int
mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct mt7996_vif_link *mlink, struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv :
- &mlink->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
- struct mt7996_phy *phy;
int idx = key->keyidx;
+ unsigned int link_id;
+ unsigned long links;
+
+ if (key->link_id >= 0)
+ links = BIT(key->link_id);
+ else if (sta && sta->valid_links)
+ links = sta->valid_links;
+ else if (vif->valid_links)
+ links = vif->valid_links;
+ else
+ links = BIT(0);
- phy = mt7996_vif_link_phy(mlink);
- if (!phy)
- return -EINVAL;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ u8 *wcid_keyidx;
+ int err;
- if (sta && !wcid->sta)
- return -EOPNOTSUPP;
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ if (sta) {
+ struct mt7996_sta *msta;
+
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ msta_link = mt76_dereference(msta->link[link_id],
+ &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (!msta_link->wcid.sta)
+ return -EOPNOTSUPP;
+ } else {
+ msta_link = &link->msta_link;
+ }
+ wcid_keyidx = &msta_link->wcid.hw_key_idx;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ wcid_keyidx = &msta_link->wcid.hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
- }
- if (cmd == SET_KEY && !sta && !mlink->mt76.cipher) {
- mlink->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(phy, vif, &vif->bss_conf, &mlink->mt76, true);
- }
+ if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
+ struct ieee80211_bss_conf *link_conf;
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- return 0;
- }
+ link_conf = link_conf_dereference_protected(vif,
+ link_id);
+ if (!link_conf)
+ link_conf = &vif->bss_conf;
+
+ link->mt76.cipher =
+ mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, msta_link, true);
+ }
- mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ if (cmd == SET_KEY) {
+ *wcid_keyidx = idx;
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ continue;
+ }
- if (key->keyidx == 6 || key->keyidx == 7)
- return mt7996_mcu_bcn_prot_enable(dev, vif, key);
+ mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
+
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ err = mt7996_mcu_bcn_prot_enable(dev, link,
+ msta_link, key);
+ if (err)
+ return err;
+ }
+
+ err = mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta_link->wcid, cmd);
+ if (err)
+ return err;
+ }
- return mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ return 0;
}
static void
@@ -217,12 +260,10 @@ mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
void *data)
{
- struct mt7996_vif_link *mlink = data;
-
if (sta)
return;
- WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, mlink, key));
+ WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key));
}
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -230,6 +271,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
@@ -248,7 +291,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3;
- mlink->wcid = &link->sta.wcid;
+ mlink->wcid = &msta_link->wcid;
+ mlink->wcid->offchannel = mlink->offchannel;
ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true);
if (ret)
@@ -259,10 +303,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
idx = MT7996_WTBL_RESERVED - mlink->idx;
- INIT_LIST_HEAD(&link->sta.rc_list);
- link->sta.wcid.idx = idx;
- link->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&link->sta.wcid, band_idx);
+ INIT_LIST_HEAD(&msta_link->rc_list);
+ msta_link->wcid.idx = idx;
+ msta_link->wcid.link_id = link_conf->link_id;
+ msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&msta_link->wcid, band_idx);
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -283,15 +328,19 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
mt7996_init_bitrate_mask(vif, link);
- mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, true);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, true);
/* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
* interface, since firmware only records BSSID when the entry is new
*/
if (vif->type != NL80211_IFTYPE_STATION)
- mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_PORT_SECURE, true);
- rcu_assign_pointer(dev->mt76.wcid[idx], &link->sta.wcid);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
- ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, link);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL);
+
+ if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
+ mvif->mt76.deflink_id = link_conf->link_id;
return 0;
}
@@ -301,29 +350,42 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink)
{
struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76);
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
- struct mt7996_sta *msta;
- int idx;
+ int idx = msta_link->wcid.idx;
- msta = &link->sta;
- idx = msta->wcid.idx;
- mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_DISCONNECT, false);
- mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, false);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_DISCONNECT, false);
+ mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false);
mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ if (mvif->mt76.deflink_id == link_conf->link_id) {
+ struct ieee80211_bss_conf *iter;
+ unsigned int link_id;
+
+ mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ for_each_vif_active_link(vif, iter, link_id) {
+ if (link_id != IEEE80211_LINK_UNSPECIFIED) {
+ mvif->mt76.deflink_id = link_id;
+ break;
+ }
+ }
+ }
+
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&msta_link->wcid.poll_list))
+ list_del_init(&msta_link->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
+ mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid);
}
static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy)
@@ -399,6 +461,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mt76_vif_init(vif, &mvif->mt76);
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+ mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED;
out:
mutex_unlock(&dev->mt76.mutex);
@@ -480,7 +543,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *mlink = &mvif->deflink;
int err;
/* The hardware does not support per-STA RX GTK, fallback
@@ -515,11 +577,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (!mt7996_vif_link_phy(mlink))
- return 0; /* defer until after link add */
+ if (!mt7996_vif_link_phy(&mvif->deflink))
+ return 0; /* defer until after link add */
mutex_lock(&dev->mt76.mutex);
- err = mt7996_set_hw_key(hw, cmd, vif, sta, mlink, key);
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, key);
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -601,6 +663,33 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, int *dbm)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct wireless_dev *wdev;
+ int n_chains, delta, i;
+
+ if (!phy) {
+ wdev = ieee80211_vif_to_wdev(vif);
+ for (i = 0; i < hw->wiphy->n_radio; i++)
+ if (wdev->radio_mask & BIT(i))
+ phy = dev->radio_phy[i];
+
+ if (!phy)
+ return -EINVAL;
+ }
+
+ n_chains = hweight16(phy->mt76->chainmask);
+ delta = mt76_tx_power_nss_delta(n_chains);
+ *dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2);
+
+ return 0;
+}
+
static u8
mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
bool beacon, bool mcast)
@@ -630,12 +719,11 @@ mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf,
}
static void
-mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+mt7996_update_mu_group(struct ieee80211_hw *hw, struct mt7996_vif_link *link,
struct ieee80211_bss_conf *info)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- u8 band = mvif->deflink.mt76.band_idx;
+ u8 band = link->mt76.band_idx;
u32 *mu;
mu = (u32 *)info->mu_group.membership;
@@ -649,23 +737,56 @@ mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS3(band), mu[3]);
}
-static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void
+mt7996_vif_cfg_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 changed)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt76_vif_link *mvif;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if ((changed & BSS_CHANGED_ASSOC) && vif->cfg.assoc) {
+ struct ieee80211_bss_conf *link_conf;
+ unsigned long link_id;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ if (!link->phy)
+ continue;
+
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, &link->msta_link,
+ true);
+ mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE,
+ !!(changed & BSS_CHANGED_BSSID));
+ }
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u64 changed)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_vif_link *link;
struct mt7996_phy *phy;
struct mt76_phy *mphy;
mutex_lock(&dev->mt76.mutex);
- mvif = mt76_vif_conf_link(&dev->mt76, vif, info);
- if (!mvif)
+ link = mt7996_vif_conf_link(dev, vif, info);
+ if (!link)
goto out;
- mphy = mt76_vif_link_phy(mvif);
+ mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy)
goto out;
@@ -675,16 +796,14 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
* and then peer references bss_info_rfch to set bandwidth cap.
*/
if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ||
- (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
- mt7996_mcu_add_bss_info(phy, vif, info, mvif, true);
- mt7996_mcu_add_sta(dev, vif, mvif, NULL, CONN_STATE_PORT_SECURE,
+ mt7996_mcu_add_bss_info(phy, vif, info, &link->mt76,
+ &link->msta_link, true);
+ mt7996_mcu_add_sta(dev, info, NULL, link, NULL,
+ CONN_STATE_PORT_SECURE,
!!(changed & BSS_CHANGED_BSSID));
}
- if (changed & BSS_CHANGED_ERP_CTS_PROT)
- mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot);
-
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
@@ -695,11 +814,11 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_MCAST_RATE)
- mvif->mcast_rates_idx =
+ link->mt76.mcast_rates_idx =
mt7996_get_rates_table(phy, info, false, true);
if (changed & BSS_CHANGED_BASIC_RATES)
- mvif->basic_rates_idx =
+ link->mt76.basic_rates_idx =
mt7996_get_rates_table(phy, info, false, false);
/* ensure that enable txcmd_mode after bss_info */
@@ -707,19 +826,19 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
mt7996_mcu_set_tx(dev, vif, info);
if (changed & BSS_CHANGED_HE_OBSS_PD)
- mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
+ mt7996_mcu_add_obss_spr(phy, link, &info->he_obss_pd);
if (changed & BSS_CHANGED_HE_BSS_COLOR) {
if ((vif->type == NL80211_IFTYPE_AP &&
- mvif->omac_idx <= HW_BSSID_MAX) ||
+ link->mt76.omac_idx <= HW_BSSID_MAX) ||
vif->type == NL80211_IFTYPE_STATION)
- mt7996_mcu_update_bss_color(dev, mvif,
+ mt7996_mcu_update_bss_color(dev, &link->mt76,
&info->he_bss_color);
}
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED)) {
- mvif->beacon_rates_idx =
+ link->mt76.beacon_rates_idx =
mt7996_get_rates_table(phy, info, true, false);
mt7996_mcu_add_beacon(hw, vif, info);
@@ -727,10 +846,10 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY))
- mt7996_mcu_beacon_inband_discov(dev, vif, changed);
+ mt7996_mcu_beacon_inband_discov(dev, info, link, changed);
if (changed & BSS_CHANGED_MU_GROUPS)
- mt7996_update_mu_group(hw, vif, info);
+ mt7996_update_mu_group(hw, link, info);
if (changed & BSS_CHANGED_TXPOWER &&
info->txpower != phy->txpower) {
@@ -754,96 +873,322 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
-int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int
+mt7996_mac_sta_init_link(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link, unsigned int link_id)
{
- struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct ieee80211_sta *sta = link_sta->sta;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *link = &mvif->deflink;
- u8 band_idx = link->phy->mt76->band_idx;
+ struct mt7996_phy *phy = link->phy;
+ struct mt7996_sta_link *msta_link;
int idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
if (idx < 0)
return -ENOSPC;
- INIT_LIST_HEAD(&msta->rc_list);
- INIT_LIST_HEAD(&msta->wcid.poll_list);
- msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = band_idx;
+ if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) {
+ int i;
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ msta_link = &msta->deflink;
+ msta->deflink_id = link_id;
- mt7996_mac_wtbl_update(dev, idx,
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+ mtxq->wcid = idx;
+ }
+ } else {
+ msta_link = kzalloc(sizeof(*msta_link), GFP_KERNEL);
+ if (!msta_link)
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&msta_link->rc_list);
+ INIT_LIST_HEAD(&msta_link->wcid.poll_list);
+ msta_link->sta = msta;
+ msta_link->wcid.sta = 1;
+ msta_link->wcid.idx = idx;
+ msta_link->wcid.link_id = link_id;
+
+ ewma_avg_signal_init(&msta_link->avg_ack_signal);
+ ewma_signal_init(&msta_link->wcid.rssi);
+
+ rcu_assign_pointer(msta->link[link_id], msta_link);
+
+ mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mt7996_mcu_add_sta(dev, link_conf, link_sta, link, msta_link,
+ CONN_STATE_DISCONNECT, true);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
+ mt76_wcid_init(&msta_link->wcid, phy->mt76->band_idx);
+
+ return 0;
+}
+
+static void
+mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++)
+ mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i);
+
+ mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, CONN_STATE_DISCONNECT,
- true);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (!list_empty(&msta_link->wcid.poll_list))
+ list_del_init(&msta_link->wcid.poll_list);
+ if (!list_empty(&msta_link->rc_list))
+ list_del_init(&msta_link->rc_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid);
+ mt76_wcid_mask_clear(dev->mt76.wcid_mask, msta_link->wcid.idx);
+}
+
+static void
+mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_sta *sta,
+ unsigned long links)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[link_id], msta_link,
+ lockdep_is_held(&mdev->mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+ if (msta->deflink_id == link_id) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static int
+mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long new_links)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned int link_id;
+ int err;
+
+ for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+ struct mt7996_vif_link *link;
+
+ if (rcu_access_pointer(msta->link[link_id]))
+ continue;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ goto error_unlink;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ goto error_unlink;
+
+ link_sta = link_sta_dereference_protected(sta, link_id);
+ if (!link_sta)
+ goto error_unlink;
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+ }
return 0;
+
+error_unlink:
+ mt7996_mac_sta_remove_links(dev, sta, new_links);
+
+ return err;
+}
+
+static int
+mt7996_mac_sta_change_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 old_links,
+ u16 new_links)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ int ret;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7996_mac_sta_remove_links(dev, sta, rem);
+ ret = mt7996_mac_sta_add_links(dev, vif, sta, add);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
}
-int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, enum mt76_sta_event ev)
+static int
+mt7996_mac_sta_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ struct mt76_dev *mdev = mphy->dev;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_vif_link *link = &mvif->deflink;
- int i, ret;
+ unsigned long links = sta->mlo ? sta->valid_links : BIT(0);
+ int err;
- switch (ev) {
- case MT76_STA_EVENT_ASSOC:
- ret = mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_CONNECT, true);
- if (ret)
- return ret;
+ mutex_lock(&mdev->mutex);
- ret = mt7996_mcu_add_rate_ctrl(dev, vif, sta, false);
- if (ret)
- return ret;
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ msta->vif = mvif;
+ err = mt7996_mac_sta_add_links(dev, vif, sta, links);
+ if (!err)
+ mphy->num_sta++;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->wcid.sta = 1;
+ mutex_unlock(&mdev->mutex);
- return 0;
+ return err;
+}
- case MT76_STA_EVENT_AUTHORIZE:
- return mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_PORT_SECURE, false);
+static int
+mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum mt76_sta_event ev)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->valid_links;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ int i, err;
+
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ continue;
- case MT76_STA_EVENT_DISASSOC:
- for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
- mt7996_mac_twt_teardown_flow(dev, msta, i);
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- mt7996_mcu_add_sta(dev, vif, &link->mt76, sta,
- CONN_STATE_DISCONNECT, false);
- msta->wcid.sta_disabled = 1;
- msta->wcid.sta = 0;
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
- return 0;
+ switch (ev) {
+ case MT76_STA_EVENT_ASSOC:
+ err = mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_CONNECT, true);
+ if (err)
+ return err;
+
+ err = mt7996_mcu_add_rate_ctrl(dev, vif, link_conf,
+ link_sta, link,
+ msta_link, false);
+ if (err)
+ return err;
+
+ msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta_link->wcid.sta = 1;
+ break;
+ case MT76_STA_EVENT_AUTHORIZE:
+ err = mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_PORT_SECURE, false);
+ if (err)
+ return err;
+ break;
+ case MT76_STA_EVENT_DISASSOC:
+ for (i = 0; i < ARRAY_SIZE(msta_link->twt.flow); i++)
+ mt7996_mac_twt_teardown_flow(dev, link,
+ msta_link, i);
+
+ if (sta->mlo && links == BIT(link_id)) /* last link */
+ mt7996_mcu_teardown_mld_sta(dev, link,
+ msta_link);
+ else
+ mt7996_mcu_add_sta(dev, link_conf, link_sta,
+ link, msta_link,
+ CONN_STATE_DISCONNECT, false);
+ msta_link->wcid.sta_disabled = 1;
+ msta_link->wcid.sta = 0;
+ links = links & ~BIT(link_id);
+ break;
+ }
}
return 0;
}
-void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void
+mt7996_mac_sta_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
+ struct mt76_dev *mdev = mphy->dev;
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->mlo ? sta->valid_links : BIT(0);
- mt7996_mac_wtbl_update(dev, msta->wcid.idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ mutex_lock(&mdev->mutex);
+
+ mt7996_mac_sta_remove_links(dev, sta, links);
+ mphy->num_sta--;
+
+ mutex_unlock(&mdev->mutex);
+}
+
+static int
+mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct mt76_phy *mphy = mt76_vif_phy(hw, vif);
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ enum mt76_sta_event ev;
- spin_lock_bh(&mdev->sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
- if (!list_empty(&msta->rc_list))
- list_del_init(&msta->rc_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
+ if (!mphy)
+ return -EINVAL;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return mt7996_mac_sta_add(mphy, vif, sta);
+
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ mt7996_mac_sta_remove(mphy, vif, sta);
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC)
+ ev = MT76_STA_EVENT_ASSOC;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED)
+ ev = MT76_STA_EVENT_AUTHORIZE;
+ else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ ev = MT76_STA_EVENT_DISASSOC;
+ else
+ return 0;
+
+ return mt7996_mac_sta_event(dev, vif, sta, ev);
}
static void mt7996_tx(struct ieee80211_hw *hw,
@@ -855,12 +1200,18 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ u8 link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
+
+ rcu_read_lock();
if (vif) {
- struct mt7996_vif *mvif;
+ struct mt7996_vif *mvif = (void *)vif->drv_priv;
+ struct mt76_vif_link *mlink;
- mvif = (struct mt7996_vif *)vif->drv_priv;
- wcid = &mvif->deflink.sta.wcid;
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mlink && mlink->wcid)
+ wcid = mlink->wcid;
if (mvif->mt76.roc_phy &&
(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
@@ -872,19 +1223,22 @@ static void mt7996_tx(struct ieee80211_hw *hw,
}
}
- if (control->sta) {
- struct mt7996_sta *sta;
-
- sta = (struct mt7996_sta *)control->sta->drv_priv;
- wcid = &sta->wcid;
- }
-
if (!mphy) {
ieee80211_free_txskb(hw, skb);
- return;
+ goto unlock;
}
+ if (control->sta) {
+ struct mt7996_sta *msta = (void *)control->sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = rcu_dereference(msta->link[link_id]);
+ if (msta_link)
+ wcid = &msta_link->wcid;
+ }
mt76_tx(mphy, control->sta, wcid, skb);
+unlock:
+ rcu_read_unlock();
}
static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@@ -914,11 +1268,13 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
- struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct ieee80211_link_sta *link_sta;
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
+ unsigned int link_id;
int ret = 0;
if (!txq)
@@ -927,38 +1283,61 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)txq->drv_priv;
mutex_lock(&dev->mt76.mutex);
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
- params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, true);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, false);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ret = mt7996_mcu_add_tx_ba(dev, params, true);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, false);
- break;
- case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
- ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, false);
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- break;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
+ ssn, params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta_link->wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, link,
+ msta_link, false);
+ break;
+ }
+
+ if (ret)
+ break;
}
+
+ if (action == IEEE80211_AMPDU_TX_STOP_CONT)
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -989,10 +1368,10 @@ mt7996_get_stats(struct ieee80211_hw *hw,
return 0;
}
-u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_phy *phy = link->phy;
union {
u64 t64;
u32 t32[2];
@@ -1004,8 +1383,8 @@ u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif)
lockdep_assert_held(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
/* TSF software read */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_READ);
@@ -1023,7 +1402,7 @@ mt7996_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
u64 ret;
mutex_lock(&dev->mt76.mutex);
- ret = __mt7996_get_tsf(hw, mvif);
+ ret = __mt7996_get_tsf(hw, &mvif->deflink);
mutex_unlock(&dev->mt76.mutex);
return ret;
@@ -1035,26 +1414,33 @@ mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_vif_link *link;
+ struct mt7996_phy *phy;
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
- if (!phy)
- return;
-
mutex_lock(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id);
+ if (!link)
+ goto unlock;
+
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
+ phy = link->phy;
+ if (!phy)
+ goto unlock;
+
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software overwrite */
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_WRITE);
+unlock:
mutex_unlock(&dev->mt76.mutex);
}
@@ -1064,26 +1450,33 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_vif_link *link;
+ struct mt7996_phy *phy;
union {
u64 t64;
u32 t32[2];
} tsf = { .t64 = timestamp, };
u16 n;
- if (!phy)
- return;
-
mutex_lock(&dev->mt76.mutex);
- n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
- : mvif->deflink.mt76.omac_idx;
+ link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id);
+ if (!link)
+ goto unlock;
+
+ phy = link->phy;
+ if (!phy)
+ goto unlock;
+
+ n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : link->mt76.omac_idx;
mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]);
/* TSF software adjust*/
mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE,
MT_LPON_TCR_SW_ADJUST);
+unlock:
mutex_unlock(&dev->mt76.mutex);
}
@@ -1145,7 +1538,8 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct rate_info *txrate = &msta->wcid.rate;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
+ struct rate_info *txrate = &msta_link->wcid.rate;
if (txrate->legacy || txrate->flags) {
if (txrate->legacy) {
@@ -1165,55 +1559,67 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->tx_failed = msta_link->wcid.stats.tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->tx_retries = msta_link->wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
- sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->ack_signal = (s8)msta_link->ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
- sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->avg_ack_signal =
+ -(s8)ewma_avg_signal_read(&msta_link->avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->tx_bytes = msta_link->wcid.stats.tx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
- sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
+ sinfo->rx_bytes = msta_link->wcid.stats.rx_bytes;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
- sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->tx_packets = msta_link->wcid.stats.tx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
- sinfo->rx_packets = msta->wcid.stats.rx_packets;
+ sinfo->rx_packets = msta_link->wcid.stats.rx_packets;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
}
}
-static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
+ struct mt7996_sta_link *msta_link;
u32 *changed = data;
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (!msta_link)
+ goto out;
+
spin_lock_bh(&dev->mt76.sta_poll_lock);
- msta->changed |= *changed;
- if (list_empty(&msta->rc_list))
- list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+
+ msta_link->changed |= *changed;
+ if (list_empty(&msta_link->rc_list))
+ list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
+
spin_unlock_bh(&dev->mt76.sta_poll_lock);
+out:
+ rcu_read_unlock();
}
-static void mt7996_sta_rc_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_link_sta *link_sta,
- u32 changed)
+static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
- mt7996_sta_rc_work(&changed, sta);
+ mt7996_link_rate_ctrl_update(&changed, sta);
ieee80211_queue_work(hw, &dev->rc_work);
}
@@ -1235,7 +1641,8 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_sta_rc_work, &changed);
+ ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ &changed);
ieee80211_queue_work(hw, &dev->rc_work);
return 0;
@@ -1246,18 +1653,37 @@ static void mt7996_sta_set_4addr(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
bool enabled)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
- if (enabled)
- set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ mutex_lock(&dev->mt76.mutex);
- if (!msta->wcid.sta)
- return;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags);
+
+ if (!msta_link->wcid.sta)
+ continue;
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
@@ -1265,18 +1691,39 @@ static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
bool enabled)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
- if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ mutex_lock(&dev->mt76.mutex);
- if (!msta->wcid.sta)
- return;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS,
+ &msta_link->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS,
+ &msta_link->wcid.flags);
- mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ if (!msta_link->wcid.sta)
+ continue;
+
+ mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
}
static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -1408,11 +1855,12 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
{
struct mt76_ethtool_worker_info *wi = wi_data;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
if (msta->vif->deflink.mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats, true);
+ mt76_ethtool_worker(wi, &msta_link->wcid.stats, true);
}
static
@@ -1516,10 +1964,12 @@ mt7996_twt_teardown_request(struct ieee80211_hw *hw,
u8 flowid)
{
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link = &msta->deflink;
+ struct mt7996_vif_link *link = &msta->vif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mac_twt_teardown_flow(dev, msta, flowid);
+ mt7996_mac_twt_teardown_flow(dev, link, msta_link, flowid);
mutex_unlock(&dev->mt76.mutex);
}
@@ -1589,12 +2039,26 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- struct mt7996_vif_link *mlink = &mvif->deflink;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+ struct mt76_vif_link *mlink;
struct mt7996_phy *phy;
- phy = mt7996_vif_link_phy(mlink);
+ mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]);
+ if (!mlink)
+ return -EIO;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (!msta_link)
+ return -EIO;
+
+ if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA)
+ return -EIO;
+
+ link = (struct mt7996_vif_link *)mlink;
+ phy = mt7996_vif_link_phy(link);
if (!phy)
return -ENODEV;
@@ -1604,15 +2068,12 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!mtk_wed_device_active(wed))
return -ENODEV;
- if (!msta->wcid.sta || msta->wcid.idx > MT7996_WTBL_STA)
- return -EIO;
-
path->type = DEV_PATH_MTK_WDMA;
path->dev = ctx->dev;
path->mtk_wdma.wdma_idx = wed->wdma_idx;
- path->mtk_wdma.bss = mvif->deflink.mt76.idx;
+ path->mtk_wdma.bss = mlink->idx;
path->mtk_wdma.queue = 0;
- path->mtk_wdma.wcid = msta->wcid.idx;
+ path->mtk_wdma.wcid = msta_link->wcid.idx;
path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
ctx->dev = NULL;
@@ -1622,6 +2083,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
+static int
+mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ return 0;
+}
+
const struct ieee80211_ops mt7996_ops = {
.add_chanctx = mt76_add_chanctx,
.remove_chanctx = mt76_remove_chanctx,
@@ -1637,10 +2106,11 @@ const struct ieee80211_ops mt7996_ops = {
.config = mt7996_config,
.conf_tx = mt7996_conf_tx,
.configure_filter = mt7996_configure_filter,
- .bss_info_changed = mt7996_bss_info_changed,
- .sta_state = mt76_sta_state,
+ .vif_cfg_changed = mt7996_vif_cfg_changed,
+ .link_info_changed = mt7996_link_info_changed,
+ .sta_state = mt7996_sta_state,
.sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
- .link_sta_rc_update = mt7996_sta_rc_update,
+ .link_sta_rc_update = mt7996_link_sta_rc_update,
.set_key = mt7996_set_key,
.ampdu_action = mt7996_ampdu_action,
.set_rts_threshold = mt7996_set_rts_threshold,
@@ -1650,7 +2120,7 @@ const struct ieee80211_ops mt7996_ops = {
.remain_on_channel = mt76_remain_on_channel,
.cancel_remain_on_channel = mt76_cancel_remain_on_channel,
.release_buffered_frames = mt76_release_buffered_frames,
- .get_txpower = mt76_get_txpower,
+ .get_txpower = mt7996_get_txpower,
.channel_switch_beacon = mt7996_channel_switch_beacon,
.get_stats = mt7996_get_stats,
.get_et_sset_count = mt7996_get_et_sset_count,
@@ -1677,4 +2147,6 @@ const struct ieee80211_ops mt7996_ops = {
.net_fill_forward_path = mt7996_net_fill_forward_path,
.net_setup_tc = mt76_wed_net_setup_tc,
#endif
+ .change_vif_links = mt7996_change_vif_links,
+ .change_sta_links = mt7996_mac_sta_change_links,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index e4569d032221..ddd555942c73 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -118,13 +118,13 @@ mt7996_mcu_get_sta_nss(u16 mcs_map)
}
static void
-mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- u16 mcs_map)
+mt7996_mcu_set_sta_he_mcs(struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ __le16 *he_mcs, u16 mcs_map)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
- enum nl80211_band band = msta->vif->deflink.phy->mt76->chandef.chan->band;
- const u16 *mask = msta->vif->deflink.bitrate_mask.control[band].he_mcs;
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
+ enum nl80211_band band = link->phy->mt76->chandef.chan->band;
+ const u16 *mask = link->bitrate_mask.control[band].he_mcs;
for (nss = 0; nss < max_nss; nss++) {
int mcs;
@@ -167,11 +167,11 @@ mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
}
static void
-mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
- const u16 *mask)
+mt7996_mcu_set_sta_vht_mcs(struct ieee80211_link_sta *link_sta,
+ __le16 *vht_mcs, const u16 *mask)
{
- u16 mcs, mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ u16 mcs, mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
switch (mcs_map & 0x3) {
@@ -193,13 +193,13 @@ mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
}
static void
-mt7996_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
- const u8 *mask)
+mt7996_mcu_set_sta_ht_mcs(struct ieee80211_link_sta *link_sta,
+ u8 *ht_mcs, const u8 *mask)
{
- int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss;
for (nss = 0; nss < max_nss; nss++)
- ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
+ ht_mcs[nss] = link_sta->ht_cap.mcs.rx_mask[nss] & mask[nss];
}
static int
@@ -1064,7 +1064,8 @@ __mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
- struct mt76_vif_link *mlink, int enable)
+ struct mt76_vif_link *mlink,
+ struct mt7996_sta_link *msta_link, int enable)
{
struct mt7996_dev *dev = phy->dev;
struct sk_buff *skb;
@@ -1081,7 +1082,7 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
/* bss_basic must be first */
mt7996_mcu_bss_basic_tlv(skb, vif, link_conf, mlink, phy->mt76,
- mlink->wcid->idx, enable);
+ msta_link->wcid.idx, enable);
mt7996_mcu_bss_sec_tlv(skb, mlink);
if (vif->type == NL80211_IFTYPE_MONITOR)
@@ -1159,37 +1160,34 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool enable)
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
- struct mt7996_vif *mvif = msta->vif;
-
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta_link->wcid.amsdu = false;
- return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, true);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool enable)
+ struct mt7996_vif_link *link, bool enable)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv;
- struct mt7996_vif *mvif = msta->vif;
-
- return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, false);
+ return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
}
static void
-mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_he_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem;
struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he_v2 *he;
struct tlv *tlv;
int i = 0;
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he));
@@ -1201,21 +1199,21 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->he_phy_cap[i] = elem->phy_cap_info[i];
}
- mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
- switch (sta->deflink.bandwidth) {
+ mcs_map = link_sta->he_cap.he_mcs_nss_supp;
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
le16_to_cpu(mcs_map.rx_mcs_80p80));
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
- mt7996_mcu_set_sta_he_mcs(sta,
+ mt7996_mcu_set_sta_he_mcs(link_sta, link,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
le16_to_cpu(mcs_map.rx_mcs_80));
break;
@@ -1225,24 +1223,26 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
struct sta_rec_he_6g_capa *he_6g;
struct tlv *tlv;
- if (!sta->deflink.he_6ghz_capa.capa)
+ if (!link_sta->he_6ghz_capa.capa)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g));
he_6g = (struct sta_rec_he_6g_capa *)tlv;
- he_6g->capa = sta->deflink.he_6ghz_capa.capa;
+ he_6g->capa = link_sta->he_6ghz_capa.capa;
}
static void
-mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_eht_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = (struct mt7996_sta *)link_sta->sta->drv_priv;
struct ieee80211_vif *vif = container_of((void *)msta->vif,
struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
@@ -1250,11 +1250,11 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_eht *eht;
struct tlv *tlv;
- if (!sta->deflink.eht_cap.has_eht)
+ if (!link_sta->eht_cap.has_eht)
return;
- mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
- elem = &sta->deflink.eht_cap.eht_cap_elem;
+ mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp;
+ elem = &link_sta->eht_cap.eht_cap_elem;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
@@ -1265,7 +1265,7 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
if (vif->type != NL80211_IFTYPE_STATION &&
- (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
@@ -1281,47 +1281,48 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_ht_uni *ht;
struct tlv *tlv;
- if (!sta->deflink.ht_cap.ht_supported)
+ if (!link_sta->ht_cap.ht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht_uni *)tlv;
- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
- ht->ampdu_param = u8_encode_bits(sta->deflink.ht_cap.ampdu_factor,
+ ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap);
+ ht->ampdu_param = u8_encode_bits(link_sta->ht_cap.ampdu_factor,
IEEE80211_HT_AMPDU_PARM_FACTOR) |
- u8_encode_bits(sta->deflink.ht_cap.ampdu_density,
+ u8_encode_bits(link_sta->ht_cap.ampdu_density,
IEEE80211_HT_AMPDU_PARM_DENSITY);
}
static void
-mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_vht *vht;
struct tlv *tlv;
/* For 6G band, this tlv is necessary to let hw work normally */
- if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported)
+ if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map;
}
static void
mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
struct tlv *tlv;
@@ -1330,16 +1331,16 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->deflink.agg.max_amsdu_len)
+ if (!link_sta->agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- msta->wcid.amsdu = true;
+ msta_link->wcid.amsdu = true;
- switch (sta->deflink.agg.max_amsdu_len) {
+ switch (link_sta->agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
amsdu->max_mpdu_size =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
@@ -1356,30 +1357,31 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
static void
mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta)
{
- struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem;
struct sta_rec_muru *muru;
struct tlv *tlv;
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION &&
+ link_conf->vif->type != NL80211_IFTYPE_AP)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
- muru->cfg.mimo_dl_en = vif->bss_conf.eht_mu_beamformer ||
- vif->bss_conf.he_mu_beamformer ||
- vif->bss_conf.vht_mu_beamformer ||
- vif->bss_conf.vht_mu_beamformee;
+ muru->cfg.mimo_dl_en = link_conf->eht_mu_beamformer ||
+ link_conf->he_mu_beamformer ||
+ link_conf->vht_mu_beamformer ||
+ link_conf->vht_mu_beamformee;
muru->cfg.ofdma_dl_en = true;
- if (sta->deflink.vht_cap.vht_supported)
+ if (link_sta->vht_cap.vht_supported)
muru->mimo_dl.vht_mu_bfee =
- !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
muru->mimo_dl.partial_bw_dl_mimo =
@@ -1410,49 +1412,50 @@ mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
}
static inline bool
-mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool bfee)
+mt7996_is_ebf_supported(struct mt7996_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta, bool bfee)
{
int sts = hweight16(phy->mt76->chainmask);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION &&
+ link_conf->vif->type != NL80211_IFTYPE_AP)
return false;
if (!bfee && sts < 2)
return false;
- if (sta->deflink.eht_cap.has_eht) {
- struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ if (link_sta->eht_cap.has_eht) {
+ struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
if (bfee)
- return vif->bss_conf.eht_su_beamformee &&
+ return link_conf->eht_su_beamformee &&
EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
else
- return vif->bss_conf.eht_su_beamformer &&
+ return link_conf->eht_su_beamformer &&
EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
}
- if (sta->deflink.he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+ if (link_sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem;
if (bfee)
- return vif->bss_conf.he_su_beamformee &&
+ return link_conf->he_su_beamformee &&
HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
else
- return vif->bss_conf.he_su_beamformer &&
+ return link_conf->he_su_beamformer &&
HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
}
- if (sta->deflink.vht_cap.vht_supported) {
- u32 cap = sta->deflink.vht_cap.cap;
+ if (link_sta->vht_cap.vht_supported) {
+ u32 cap = link_sta->vht_cap.cap;
if (bfee)
- return vif->bss_conf.vht_su_beamformee &&
+ return link_conf->vht_su_beamformee &&
(cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
else
- return vif->bss_conf.vht_su_beamformer &&
+ return link_conf->vht_su_beamformer &&
(cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
}
@@ -1473,10 +1476,11 @@ mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf, struct mt7996_phy *phy)
}
static void
-mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf, bool explicit)
+mt7996_mcu_sta_bfer_ht(struct ieee80211_link_sta *link_sta,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
- struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
+ struct ieee80211_mcs_info *mcs = &link_sta->ht_cap.mcs;
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
@@ -1499,10 +1503,11 @@ mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
}
static void
-mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
- struct sta_rec_bf *bf, bool explicit)
+mt7996_mcu_sta_bfer_vht(struct ieee80211_link_sta *link_sta,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf,
+ bool explicit)
{
- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap;
struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map);
@@ -1523,24 +1528,24 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy,
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, bf->ncol);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
bf->nrow = 1;
} else {
bf->nrow = tx_ant;
bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
bf->ibf_nrow = 1;
}
}
static void
-mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf,
- bool explicit)
+mt7996_mcu_sta_bfer_he(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_vif *vif, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
+ struct ieee80211_sta_he_cap *pc = &link_sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
const struct ieee80211_sta_he_cap *vc =
mt76_connac_get_he_phy_cap(phy->mt76, vif);
@@ -1569,7 +1574,7 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth != IEEE80211_STA_RX_BW_160)
return;
/* go over for 160MHz and 80p80 */
@@ -1601,11 +1606,11 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
}
static void
-mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
- struct mt7996_phy *phy, struct sta_rec_bf *bf,
- bool explicit)
+mt7996_mcu_sta_bfer_eht(struct ieee80211_link_sta *link_sta,
+ struct ieee80211_vif *vif, struct mt7996_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
{
- struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap;
struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
struct ieee80211_eht_mcs_nss_supp *eht_nss = &pc->eht_mcs_nss_supp;
const struct ieee80211_sta_eht_cap *vc =
@@ -1629,10 +1634,10 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) :
min_t(u8, MT7996_IBF_MAX_NC, nss_mcs);
- if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
+ if (link_sta->bandwidth < IEEE80211_STA_RX_BW_160)
return;
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_160MHZ_MASK, ve->phy_cap_info[2]);
sts = EHT_PHY(CAP1_BEAMFORMEE_SS_160MHZ_MASK, pe->phy_cap_info[1]);
@@ -1660,13 +1665,15 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
#define EBF_MODE BIT(0)
#define IBF_MODE BIT(1)
#define BF_MAT_ORDER 4
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->deflink.phy;
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_phy *phy = link->phy;
int tx_ant = hweight16(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -1678,10 +1685,10 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
};
bool ebf;
- if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
return;
- ebf = mt7996_is_ebf_supported(phy, vif, sta, false);
+ ebf = mt7996_is_ebf_supported(phy, link_conf, link_sta, false);
if (!ebf && !dev->ibf)
return;
@@ -1692,28 +1699,29 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.eht_cap.has_eht)
- mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf, ebf);
- else if (sta->deflink.he_cap.has_he)
- mt7996_mcu_sta_bfer_he(sta, vif, phy, bf, ebf);
- else if (sta->deflink.vht_cap.vht_supported)
- mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
- else if (sta->deflink.ht_cap.ht_supported)
- mt7996_mcu_sta_bfer_ht(sta, phy, bf, ebf);
+ if (link_sta->eht_cap.has_eht)
+ mt7996_mcu_sta_bfer_eht(link_sta, vif, link->phy, bf, ebf);
+ else if (link_sta->he_cap.has_he)
+ mt7996_mcu_sta_bfer_he(link_sta, vif, link->phy, bf, ebf);
+ else if (link_sta->vht_cap.vht_supported)
+ mt7996_mcu_sta_bfer_vht(link_sta, link->phy, bf, ebf);
+ else if (link_sta->ht_cap.ht_supported)
+ mt7996_mcu_sta_bfer_ht(link_sta, link->phy, bf, ebf);
else
return;
bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0);
if (is_mt7992(&dev->mt76) && tx_ant == 4)
bf->bf_cap |= IBF_MODE;
- bf->bw = sta->deflink.bandwidth;
- bf->ibf_dbw = sta->deflink.bandwidth;
+
+ bf->bw = link_sta->bandwidth;
+ bf->ibf_dbw = link_sta->bandwidth;
bf->ibf_nrow = tx_ant;
- if (sta->deflink.eht_cap.has_eht || sta->deflink.he_cap.has_he)
+ if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he)
bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT :
MT7992_IBF_TIMEOUT;
- else if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ else if (!ebf && link_sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY;
else
bf->ibf_timeout = MT7996_IBF_TIMEOUT;
@@ -1727,7 +1735,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
matrix[bf->nrow][bf->ncol] : 0;
}
- switch (sta->deflink.bandwidth) {
+ switch (link_sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
case IEEE80211_STA_RX_BW_80:
bf->mem_total = bf->mem_20m * 2;
@@ -1743,31 +1751,32 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
static void
mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mvif->deflink.phy;
+ struct mt7996_phy *phy = link->phy;
int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
u8 nrow = 0;
- if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
+ if (!(link_sta->vht_cap.vht_supported || link_sta->he_cap.has_he))
return;
- if (!mt7996_is_ebf_supported(phy, vif, sta, true))
+ if (!mt7996_is_ebf_supported(phy, link_conf, link_sta, true))
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
- if (sta->deflink.he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+ if (link_sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem;
nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
pe->phy_cap_info[5]);
- } else if (sta->deflink.vht_cap.vht_supported) {
- struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ } else if (link_sta->vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap;
nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
pc->cap);
@@ -1874,18 +1883,19 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
MCU_WM_UNI_CMD(RA), true);
}
-int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *data, u32 field)
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ void *data, u32 field)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sta_phy_uni *phy = data;
struct sta_rec_ra_fixed_uni *ra;
struct sk_buff *skb;
struct tlv *tlv;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -1904,7 +1914,7 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
ra->phy = *phy;
break;
case RATE_PARAM_MMPS_UPDATE:
- ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode);
break;
default:
break;
@@ -1916,12 +1926,13 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif
}
static int
-mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->deflink.phy->mt76->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
+ struct cfg80211_chan_def *chandef = &link->phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &link->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_phy_uni phy = {};
int ret, nrates = 0;
@@ -1942,11 +1953,11 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
} \
} while (0)
- if (sta->deflink.he_cap.has_he) {
+ if (link_sta->he_cap.has_he) {
__sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
- } else if (sta->deflink.vht_cap.vht_supported) {
+ } else if (link_sta->vht_cap.vht_supported) {
__sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
- } else if (sta->deflink.ht_cap.ht_supported) {
+ } else if (link_sta->ht_cap.ht_supported) {
__sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
} else {
nrates = hweight32(mask->control[band].legacy);
@@ -1963,7 +1974,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed single rate */
if (nrates == 1) {
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_MCS);
if (ret)
return ret;
@@ -1972,20 +1984,20 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed GI */
if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI ||
mask->control[band].he_gi != GENMASK(7, 0)) {
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
u32 addr;
/* firmware updates only TXCMD but doesn't take WTBL into
* account, so driver should update here to reflect the
* actual txrate hardware sends out.
*/
- addr = mt7996_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
- if (sta->deflink.he_cap.has_he)
+ addr = mt7996_mac_wtbl_lmac_addr(dev, msta_link->wcid.idx, 7);
+ if (link_sta->he_cap.has_he)
mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
else
mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_GI);
if (ret)
return ret;
@@ -1993,7 +2005,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
/* fixed HE_LTF */
if (mask->control[band].he_ltf != GENMASK(7, 0)) {
- ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy,
+ ret = mt7996_mcu_set_fixed_field(dev, link_sta, link,
+ msta_link, &phy,
RATE_PARAM_FIXED_HE_LTF);
if (ret)
return ret;
@@ -2004,30 +2017,32 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif
static void
mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link)
{
#define INIT_RCPI 180
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_phy *mphy = mvif->deflink.phy->mt76;
+ struct mt76_phy *mphy = link->phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
- struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask;
+ struct cfg80211_bitrate_mask *mask = &link->bitrate_mask;
+ u32 cap = link_sta->sta->wme ? STA_CAP_WMM : 0;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra_uni *ra;
struct tlv *tlv;
- u32 supp_rate = sta->deflink.supp_rates[band];
- u32 cap = sta->wme ? STA_CAP_WMM : 0;
+ u32 supp_rate = link_sta->supp_rates[band];
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra_uni *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, link_sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
- CMD_CBW_320MHZ : sta->deflink.bandwidth;
+ ra->bw = (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) ?
+ CMD_CBW_320MHZ : link_sta->bandwidth;
ra->phy.bw = ra->bw;
- ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode);
if (supp_rate) {
supp_rate &= mask->control[band].legacy;
@@ -2047,60 +2062,60 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
}
}
- if (sta->deflink.ht_cap.ht_supported) {
+ if (link_sta->ht_cap.ht_supported) {
ra->supp_mode |= MODE_HT;
- ra->af = sta->deflink.ht_cap.ampdu_factor;
- ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+ ra->af = link_sta->ht_cap.ampdu_factor;
+ ra->ht_gf = !!(link_sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
cap |= STA_CAP_HT;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
cap |= STA_CAP_SGI_20;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
cap |= STA_CAP_SGI_40;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
cap |= STA_CAP_TX_STBC;
- if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (vif->bss_conf.ht_ldpc &&
- (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ if (link_conf->ht_ldpc &&
+ (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
- mt7996_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
+ mt7996_mcu_set_sta_ht_mcs(link_sta, ra->ht_mcs,
mask->control[band].ht_mcs);
ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
- if (sta->deflink.vht_cap.vht_supported) {
+ if (link_sta->vht_cap.vht_supported) {
u8 af;
ra->supp_mode |= MODE_VHT;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->deflink.vht_cap.cap);
+ link_sta->vht_cap.cap);
ra->af = max_t(u8, ra->af, af);
cap |= STA_CAP_VHT;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
cap |= STA_CAP_VHT_SGI_80;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
cap |= STA_CAP_VHT_SGI_160;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
cap |= STA_CAP_VHT_TX_STBC;
- if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) &&
- (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ if ((vif->type != NL80211_IFTYPE_AP || link_conf->vht_ldpc) &&
+ (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
- mt7996_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
+ mt7996_mcu_set_sta_vht_mcs(link_sta, ra->supp_vht_mcs,
mask->control[band].vht_mcs);
}
- if (sta->deflink.he_cap.has_he) {
+ if (link_sta->he_cap.has_he) {
ra->supp_mode |= MODE_HE;
cap |= STA_CAP_HE;
- if (sta->deflink.he_6ghz_capa.capa)
- ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ if (link_sta->he_6ghz_capa.capa)
+ ra->af = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
@@ -2108,16 +2123,18 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi));
}
-int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool changed)
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool changed)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2127,19 +2144,19 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
* update sta_rec_he here.
*/
if (changed)
- mt7996_mcu_sta_he_tlv(skb, sta);
+ mt7996_mcu_sta_he_tlv(skb, link_sta, link);
/* sta_rec_ra accommodates BW, NSS and only MCS range format
* i.e 0-{7,8,9} for VHT.
*/
- mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+ mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, link_conf, link_sta, link);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
if (ret)
return ret;
- return mt7996_mcu_add_rate_ctrl_fixed(dev, vif, sta);
+ return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link);
}
static int
@@ -2148,6 +2165,7 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
{
#define MT_STA_BSS_GROUP 1
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_sta_link *msta_link;
struct mt7996_sta *msta;
struct {
u8 __rsv1[4];
@@ -2166,73 +2184,154 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
- req.wlan_idx = cpu_to_le16(msta->wcid.idx);
+ msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
+ msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link;
+ req.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
sizeof(req), true);
}
-int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct mt76_vif_link *mlink,
- struct ieee80211_sta *sta, int conn_state, bool newly)
+static void
+mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
{
- struct ieee80211_link_sta *link_sta = NULL;
- struct mt76_wcid *wcid = mlink->wcid;
- struct sk_buff *skb;
- int ret;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ unsigned long links = sta->valid_links;
+ unsigned int nlinks = hweight16(links);
+ struct mld_setup_link *mld_setup_link;
+ struct sta_rec_mld_setup *mld_setup;
+ struct mt7996_sta_link *msta_link;
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ struct tlv *tlv;
+
+ msta_link = mt76_dereference(msta->link[msta->deflink_id], &dev->mt76);
+ if (!msta_link)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD,
+ sizeof(struct sta_rec_mld_setup) +
+ sizeof(struct mld_setup_link) * nlinks);
+
+ mld_setup = (struct sta_rec_mld_setup *)tlv;
+ memcpy(mld_setup->mld_addr, sta->addr, ETH_ALEN);
+ mld_setup->setup_wcid = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup->primary_id = cpu_to_le16(msta_link->wcid.idx);
+
+ if (nlinks > 1) {
+ link_id = __ffs(links & ~BIT(msta->deflink_id));
+ msta_link = mt76_dereference(msta->link[msta->deflink_id],
+ &dev->mt76);
+ if (!msta_link)
+ return;
+ }
+ mld_setup->seconed_id = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup->link_num = nlinks;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ mld_setup_link = (struct mld_setup_link *)mld_setup->link_info;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
- if (sta) {
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
- wcid = &msta->wcid;
- link_sta = &sta->deflink;
+ if (!msta_link)
+ continue;
+
+ mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx);
+ mld_setup_link->bss_idx = link->mt76.idx;
+ mld_setup_link++;
}
+}
+
+static void
+mt7996_mcu_sta_eht_mld_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct sta_rec_eht_mld *eht_mld;
+ struct tlv *tlv;
+ int i;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld));
+ eht_mld = (struct sta_rec_eht_mld *)tlv;
+
+ for (i = 0; i < ARRAY_SIZE(eht_mld->str_cap); i++)
+ eht_mld->str_cap[i] = 0x7;
+}
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, wcid,
+int mt7996_mcu_add_sta(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ int conn_state, bool newly)
+{
+ struct mt76_wcid *wcid = msta_link ? &msta_link->wcid : link->mt76.wcid;
+ struct ieee80211_sta *sta = link_sta ? link_sta->sta : NULL;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, link_conf, link_sta,
conn_state, newly);
if (conn_state == CONN_STATE_DISCONNECT)
goto out;
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, wcid);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, link_conf->vif, wcid);
/* starec tx proc */
mt7996_mcu_sta_tx_proc_tlv(skb);
/* tag order is in accordance with firmware dependency. */
- if (sta) {
+ if (link_sta) {
/* starec hdrt mode */
mt7996_mcu_sta_hdrt_tlv(dev, skb);
- /* starec bfer */
- mt7996_mcu_sta_bfer_tlv(dev, skb, vif, sta);
+ if (conn_state == CONN_STATE_CONNECT) {
+ /* starec bfer */
+ mt7996_mcu_sta_bfer_tlv(dev, skb, link_conf, link_sta,
+ link);
+ /* starec bfee */
+ mt7996_mcu_sta_bfee_tlv(dev, skb, link_conf, link_sta,
+ link);
+ }
/* starec ht */
- mt7996_mcu_sta_ht_tlv(skb, sta);
+ mt7996_mcu_sta_ht_tlv(skb, link_sta);
/* starec vht */
- mt7996_mcu_sta_vht_tlv(skb, sta);
+ mt7996_mcu_sta_vht_tlv(skb, link_sta);
/* starec uapsd */
- mt76_connac_mcu_sta_uapsd(skb, vif, sta);
+ mt76_connac_mcu_sta_uapsd(skb, link_conf->vif, sta);
/* starec amsdu */
- mt7996_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_amsdu_tlv(dev, skb, link_conf->vif, link_sta,
+ msta_link);
/* starec he */
- mt7996_mcu_sta_he_tlv(skb, sta);
+ mt7996_mcu_sta_he_tlv(skb, link_sta, link);
/* starec he 6g*/
- mt7996_mcu_sta_he_6g_tlv(skb, sta);
+ mt7996_mcu_sta_he_6g_tlv(skb, link_sta);
/* starec eht */
- mt7996_mcu_sta_eht_tlv(skb, sta);
+ mt7996_mcu_sta_eht_tlv(skb, link_sta);
/* starec muru */
- mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta);
- /* starec bfee */
- mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
+ mt7996_mcu_sta_muru_tlv(dev, skb, link_conf, link_sta);
+
+ if (sta->mlo) {
+ mt7996_mcu_sta_mld_setup_tlv(dev, skb, sta);
+ mt7996_mcu_sta_eht_mld_tlv(dev, skb, sta);
+ }
}
- ret = mt7996_mcu_add_group(dev, vif, sta);
+ ret = mt7996_mcu_add_group(dev, link_conf->vif, sta);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2242,6 +2341,24 @@ out:
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
+int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
+{
+ struct sk_buff *skb;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
+ MT7996_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, sizeof(struct tlv));
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
+}
+
static int
mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct sk_buff *skb,
@@ -2307,17 +2424,18 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
-static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- u8 *pn)
+static int mt7996_mcu_get_pn(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, u8 *pn)
{
#define TSC_TYPE_BIGTK_PN 2
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct sta_rec_pn_info *pn_info;
struct sk_buff *skb, *rskb;
struct tlv *tlv;
int ret;
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, &mvif->deflink.sta.wcid);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2341,10 +2459,11 @@ static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return 0;
}
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
struct ieee80211_key_conf *key)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
struct sk_buff *skb;
struct tlv *tlv;
@@ -2353,7 +2472,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
sizeof(struct mt7996_mcu_bcn_prot_tlv);
int ret;
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, len);
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -2361,7 +2480,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
- ret = mt7996_mcu_get_pn(dev, vif, pn);
+ ret = mt7996_mcu_get_pn(dev, link, msta_link, pn);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2592,13 +2711,14 @@ out:
}
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, u32 changed)
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link, u32 changed)
{
#define OFFLOAD_TX_MODE_SU BIT(0)
#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_vif *vif = link_conf->vif;
struct ieee80211_hw *hw = mt76_hw(dev);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink);
+ struct mt7996_phy *phy = link->phy;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
struct bss_inband_discovery_tlv *discov;
struct ieee80211_tx_info *info;
@@ -2615,21 +2735,21 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
chandef = &phy->mt76->chandef;
band = chandef->chan->band;
- if (vif->bss_conf.nontransmitted)
+ if (link_conf->nontransmitted)
return 0;
- rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76,
+ rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
if (changed & BSS_CHANGED_FILS_DISCOVERY &&
- vif->bss_conf.fils_discovery.max_interval) {
- interval = vif->bss_conf.fils_discovery.max_interval;
+ link_conf->fils_discovery.max_interval) {
+ interval = link_conf->fils_discovery.max_interval;
skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
} else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
- vif->bss_conf.unsol_bcast_probe_resp_interval) {
- interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ link_conf->unsol_bcast_probe_resp_interval) {
+ interval = link_conf->unsol_bcast_probe_resp_interval;
skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
}
@@ -4068,12 +4188,12 @@ mt7996_mcu_set_obss_spr_pd(struct mt7996_phy *phy,
}
static int
-mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_dev *dev = phy->dev;
- u8 omac = mvif->deflink.mt76.omac_idx;
+ u8 omac = link->mt76.omac_idx;
struct {
u8 band_idx;
u8 __rsv[3];
@@ -4145,7 +4265,8 @@ mt7996_mcu_set_obss_spr_bitmap(struct mt7996_phy *phy,
sizeof(req), true);
}
-int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd)
{
int ret;
@@ -4179,7 +4300,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
return ret;
/* Set SR prohibit */
- ret = mt7996_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
+ ret = mt7996_mcu_set_obss_spr_siga(phy, link, he_obss_pd);
if (ret)
return ret;
@@ -4215,7 +4336,7 @@ int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
#define TWT_AGRT_PROTECT BIT(2)
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- struct mt7996_vif *mvif,
+ struct mt7996_vif_link *link,
struct mt7996_twt_flow *flow,
int cmd)
{
@@ -4246,12 +4367,12 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
.len = cpu_to_le16(sizeof(req) - 4),
.tbl_idx = flow->table_id,
.cmd = cmd,
- .own_mac_idx = mvif->deflink.mt76.omac_idx,
+ .own_mac_idx = link->mt76.omac_idx,
.flowid = flow->id,
.peer_id = cpu_to_le16(flow->wcid),
.duration = flow->duration,
- .bss = mvif->deflink.mt76.idx,
- .bss_idx = mvif->deflink.mt76.idx,
+ .bss = link->mt76.idx,
+ .bss_idx = link->mt76.idx,
.start_tsf = cpu_to_le64(flow->tsf),
.mantissa = flow->mantissa,
.exponent = flow->exp,
@@ -4341,22 +4462,19 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index,
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta *msta;
struct sk_buff *skb;
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta;
-
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76,
- &msta->wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
+ &msta_link->wcid,
MT7996_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta->wcid);
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta_link->wcid);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
@@ -4579,7 +4697,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
struct sk_buff *skb;
int i, tx_power;
- tx_power = mt7996_get_power_bound(phy, phy->txpower);
+ tx_power = mt76_get_power_bound(mphy, phy->txpower);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&la, tx_power);
mphy->txpower_cur = tx_power;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 43468bcaffc6..2ab6a53bee86 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -625,6 +625,35 @@ struct sta_rec_hdr_trans {
u8 mesh;
} __packed;
+struct sta_rec_mld_setup {
+ __le16 tag;
+ __le16 len;
+ u8 mld_addr[ETH_ALEN];
+ __le16 primary_id;
+ __le16 seconed_id;
+ __le16 setup_wcid;
+ u8 link_num;
+ u8 info;
+ u8 __rsv[2];
+ u8 link_info[];
+} __packed;
+
+struct sta_rec_eht_mld {
+ __le16 tag;
+ __le16 len;
+ u8 nsep;
+ u8 __rsv1[2];
+ u8 str_cap[__MT_MAX_BAND];
+ __le16 eml_cap;
+ u8 __rsv2[4];
+} __packed;
+
+struct mld_setup_link {
+ __le16 wcid;
+ u8 bss_idx;
+ u8 __rsv;
+} __packed;
+
struct hdr_trans_en {
__le16 tag;
__le16 len;
@@ -798,6 +827,9 @@ enum {
sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdrt) + \
sizeof(struct sta_rec_hdr_trans) + \
+ sizeof(struct sta_rec_mld_setup) + \
+ sizeof(struct mld_setup_link) * 3 + \
+ sizeof(struct sta_rec_eht_mld) + \
sizeof(struct tlv))
#define MT7996_MAX_BEACON_SIZE 1338
@@ -809,18 +841,6 @@ enum {
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
MT7996_BEACON_UPDATE_SIZE)
-static inline s8
-mt7996_get_power_bound(struct mt7996_phy *phy, s8 txpower)
-{
- struct mt76_phy *mphy = phy->mt76;
- int n_chains = hweight16(mphy->chainmask);
-
- txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
- txpower -= mt76_tx_power_nss_delta(n_chains);
-
- return txpower;
-}
-
enum {
UNI_BAND_CONFIG_RADIO_ENABLE,
UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08,
@@ -908,7 +928,8 @@ enum {
UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE,
UNI_CMD_SER_SET_RECOVER_L3_BF,
UNI_CMD_SER_SET_RECOVER_L4_MDP,
- UNI_CMD_SER_SET_RECOVER_FULL,
+ UNI_CMD_SER_SET_RECOVER_FROM_ETH,
+ UNI_CMD_SER_SET_RECOVER_FULL = 8,
UNI_CMD_SER_SET_SYSTEM_ASSERT,
/* action */
UNI_CMD_SER_ENABLE = 1,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 7a8ee6c75cf2..13b188e281bd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed)
if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
return -EBUSY;
- ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1,
+ ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH,
mphy->band_idx);
if (ret)
goto out;
@@ -618,9 +618,6 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
- .sta_add = mt7996_mac_sta_add,
- .sta_event = mt7996_mac_sta_event,
- .sta_remove = mt7996_mac_sta_remove,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
.vif_link_add = mt7996_vif_link_add,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 29fabb9b04ae..43e646ed6094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -185,10 +185,10 @@ struct mt7996_twt_flow {
DECLARE_EWMA(avg_signal, 10, 8)
-struct mt7996_sta {
+struct mt7996_sta_link {
struct mt76_wcid wcid; /* must be first */
- struct mt7996_vif *vif;
+ struct mt7996_sta *sta;
struct list_head rc_list;
u32 airtime_ac[8];
@@ -204,12 +204,22 @@ struct mt7996_sta {
u8 flowid_mask;
struct mt7996_twt_flow flow[MT7996_MAX_STA_TWT_AGRT];
} twt;
+
+ struct rcu_head rcu_head;
+};
+
+struct mt7996_sta {
+ struct mt7996_sta_link deflink; /* must be first */
+ struct mt7996_sta_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 deflink_id;
+
+ struct mt7996_vif *vif;
};
struct mt7996_vif_link {
struct mt76_vif_link mt76; /* must be first */
- struct mt7996_sta sta;
+ struct mt7996_sta_link msta_link;
struct mt7996_phy *phy;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
@@ -525,7 +535,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
void mt7996_wfsys_reset(struct mt7996_dev *dev);
irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
-u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif);
+u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link);
int mt7996_register_device(struct mt7996_dev *dev);
void mt7996_unregister_device(struct mt7996_dev *dev);
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -553,7 +563,7 @@ int mt7996_run(struct mt7996_phy *phy);
int mt7996_mcu_init(struct mt7996_dev *dev);
int mt7996_mcu_init_firmware(struct mt7996_dev *dev);
int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev,
- struct mt7996_vif *mvif,
+ struct mt7996_vif_link *link,
struct mt7996_twt_flow *flow,
int cmd);
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -561,35 +571,52 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink, bool enable);
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
- struct mt76_vif_link *mlink, int enable);
-int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct mt76_vif_link *mlink,
- struct ieee80211_sta *sta, int conn_state, bool newly);
+ struct mt76_vif_link *mlink,
+ struct mt7996_sta_link *msta_link, int enable);
+int mt7996_mcu_add_sta(struct mt7996_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ int conn_state, bool newly);
+int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool add);
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- bool add);
+ struct mt7996_vif_link *link, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, u32 changed);
-int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link, u32 changed);
+int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy,
+ struct mt7996_vif_link *link,
struct ieee80211_he_obss_pd *he_obss_pd);
-int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool changed);
+int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link, bool changed);
int mt7996_set_channel(struct mt76_phy *mphy);
int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version);
-int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *data, u32 field);
+int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev,
+ struct ieee80211_link_sta *link_sta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
+ void *data, u32 field);
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
@@ -683,26 +710,19 @@ bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask);
void mt7996_mac_reset_counters(struct mt7996_phy *phy);
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy);
void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band);
-void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
- struct ieee80211_vif *vif, bool enable);
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
void mt7996_mac_set_coverage_class(struct mt7996_phy *phy);
-int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, enum mt76_sta_event ev);
-void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
void mt7996_mac_work(struct work_struct *work);
void mt7996_mac_reset_work(struct work_struct *work);
void mt7996_mac_dump_work(struct work_struct *work);
void mt7996_mac_sta_rc_work(struct work_struct *work);
void mt7996_mac_update_stats(struct mt7996_phy *phy);
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
- struct mt7996_sta *msta,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
u8 flowid);
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@@ -727,11 +747,14 @@ bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link,
struct ieee80211_key_conf *key);
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ struct mt7996_vif_link *link,
+ struct mt7996_sta_link *msta_link);
int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
index 1c4f9deaaada..9b20ccbeb8cf 100644
--- a/drivers/net/wireless/mediatek/mt76/scan.c
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -52,11 +52,6 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
ether_addr_copy(hdr->addr3, req->bssid);
}
- info = IEEE80211_SKB_CB(skb);
- if (req->no_cck)
- info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
- info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
-
if (req->ie_len)
skb_put_data(skb, req->ie, req->ie_len);
@@ -64,10 +59,20 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
rcu_read_lock();
- if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL))
- mt76_tx(phy, NULL, mvif->wcid, skb);
- else
+
+ if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) {
ieee80211_free_txskb(phy->hw, skb);
+ goto out;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ if (req->no_cck)
+ info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+ info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK;
+
+ mt76_tx(phy, NULL, mvif->wcid, skb);
+
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index af0c50c983ec..513916469ca2 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
return;
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
- if (flags & MT_TX_CB_TXS_FAILED) {
+ if (flags & MT_TX_CB_TXS_FAILED &&
+ (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) {
info->status.rates[0].count = 0;
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 674461fa7fb3..eae35b678952 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -1510,10 +1510,15 @@ enum qlink_tlv_id {
};
struct qlink_tlv_hdr {
- __le16 type;
- __le16 len;
+ /* New members MUST be added within the struct_group() macro below. */
+ __struct_group(qlink_tlv_hdr_fixed, __hdr, __packed,
+ __le16 type;
+ __le16 len;
+ );
u8 val[];
} __packed;
+static_assert(offsetof(struct qlink_tlv_hdr, val) == sizeof(struct qlink_tlv_hdr_fixed),
+ "struct member likely outside of __struct_group()");
struct qlink_iface_limit {
__le16 max_num;
@@ -1567,7 +1572,7 @@ enum qlink_reg_rule_flags {
* @dfs_cac_ms: DFS CAC period.
*/
struct qlink_tlv_reg_rule {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
__le32 start_freq_khz;
__le32 end_freq_khz;
__le32 max_bandwidth_khz;
@@ -1606,7 +1611,7 @@ enum qlink_dfs_state {
* @channel: ieee80211 channel settings.
*/
struct qlink_tlv_channel {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
struct qlink_channel chan;
} __packed;
@@ -1618,7 +1623,7 @@ struct qlink_tlv_channel {
* @chan: channel definition data.
*/
struct qlink_tlv_chandef {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
struct qlink_chandef chdef;
} __packed;
@@ -1643,7 +1648,7 @@ enum qlink_ie_set_type {
* @ie_data: IEs data.
*/
struct qlink_tlv_ie_set {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 type;
u8 flags;
u8 rsvd[2];
@@ -1657,7 +1662,7 @@ struct qlink_tlv_ie_set {
* @ie_data: IEs data.
*/
struct qlink_tlv_ext_ie {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 eid_ext;
u8 rsvd[3];
u8 ie_data[];
@@ -1678,7 +1683,7 @@ struct qlink_sband_iftype_data {
* @iftype_data: interface type data entries.
*/
struct qlink_tlv_iftype_data {
- struct qlink_tlv_hdr hdr;
+ struct qlink_tlv_hdr_fixed hdr;
u8 n_iftype_data;
u8 rsvd[3];
struct qlink_sband_iftype_data iftype_data[];
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 5323acff962a..45775ecdf221 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -842,7 +842,7 @@ int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800mmio_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 160bef79acdb..b51a23300ba2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -618,7 +618,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Set txstatus timer function.
*/
- rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout;
+ hrtimer_update_function(&rt2x00dev->txstatus_timer, rt2800usb_tx_sta_fifo_timeout);
/*
* Overwrite TX done handler
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 9e7d9dbe954c..432ddfac2c33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1391,8 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
- hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
+ hrtimer_setup(&rt2x00dev->txstatus_timer, hrtimer_dummy_timeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
index 0abb1b092bc2..73034e7e41d1 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
@@ -644,6 +644,8 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .supports_ap = 1,
+ .max_macid_num = 32,
.max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 4ce0c05c5129..569856ca677f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
return len;
write_error:
- dev_info(&udev->dev,
- "%s: Failed to write block at addr: %04x size: %04x\n",
- __func__, addr, blocksize);
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+ dev_info(&udev->dev,
+ "%s: Failed to write block at addr: %04x size: %04x\n",
+ __func__, addr, blocksize);
return -EAGAIN;
}
@@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
*/
rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary);
- ret = rtl8xxxu_download_firmware(priv);
- dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ for (int retry = 5; retry >= 0 ; retry--) {
+ ret = rtl8xxxu_download_firmware(priv);
+ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
+ if (ret != -EAGAIN)
+ break;
+ if (retry)
+ dev_dbg(dev, "%s: retry firmware download\n", __func__);
+ }
if (ret)
goto exit;
ret = rtl8xxxu_start_firmware(priv);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d429560009bb..e07402e73ba3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -484,7 +484,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
* pathA or mac1 has to set phy0&phy1 pathA */
if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> althougth Path A, we load radiob.txt\n");
+ " ===> although Path A, we load radiob.txt\n");
radioa_arraylen = radiob_arraylen;
radioa_array_table = radiob_array_table;
}
@@ -750,7 +750,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
&& rtlhal->interfaceindex == 1) {
need_pwr_down = rtl92d_phy_enable_anotherphy(hw, false);
rtlhal->during_mac1init_radioa = true;
- /* asume no this case */
+ /* assume no this case */
if (need_pwr_down)
rtl92d_phy_enable_rf_env(hw, path,
&u4regvalue);
@@ -1885,7 +1885,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
bneed_powerdown_radio =
rtl92d_phy_enable_anotherphy(hw, false);
rtlpriv->rtlhal.during_mac1init_radioa = true;
- /* asume no this case */
+ /* assume no this case */
if (bneed_powerdown_radio)
rtl92d_phy_enable_rf_env(hw, erfpath,
&u4regvalue);
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index ab21b8059e0b..3736f290bd42 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -54,6 +54,9 @@ config RTW88_8812A
tristate
select RTW88_88XXA
+config RTW88_8814A
+ tristate
+
config RTW88_8822BE
tristate "Realtek 8822BE PCI wireless network adapter"
depends on PCI
@@ -222,6 +225,28 @@ config RTW88_8812AU
802.11ac USB wireless network adapter
+config RTW88_8814AE
+ tristate "Realtek 8814AE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ select RTW88_8814A
+ help
+ Select this option will enable support for 8814AE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_8814AU
+ tristate "Realtek 8814AU USB wireless network adapter"
+ depends on USB
+ select RTW88_CORE
+ select RTW88_USB
+ select RTW88_8814A
+ help
+ Select this option will enable support for 8814AU chipset
+
+ 802.11ac USB wireless network adapter
+
config RTW88_DEBUG
bool "Realtek rtw88 debug support"
depends on RTW88_CORE
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index 0cbbb366e393..0b3da05a2938 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -94,6 +94,15 @@ rtw88_8821au-objs := rtw8821au.o
obj-$(CONFIG_RTW88_8812AU) += rtw88_8812au.o
rtw88_8812au-objs := rtw8812au.o
+obj-$(CONFIG_RTW88_8814A) += rtw88_8814a.o
+rtw88_8814a-objs := rtw8814a.o rtw8814a_table.o
+
+obj-$(CONFIG_RTW88_8814AE) += rtw88_8814ae.o
+rtw88_8814ae-objs := rtw8814ae.o
+
+obj-$(CONFIG_RTW88_8814AU) += rtw88_8814au.o
+rtw88_8814au-objs := rtw8814au.o
+
obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o
rtw88_pci-objs := pci.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 364ec0436d0f..b67d69b01f87 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -654,10 +654,10 @@ static void rtw_print_rate(struct seq_file *m, u8 rate)
case DESC_RATE6M...DESC_RATE54M:
rtw_print_ofdm_rate_txt(m, rate);
break;
- case DESC_RATEMCS0...DESC_RATEMCS15:
+ case DESC_RATEMCS0...DESC_RATEMCS31:
rtw_print_ht_rate_txt(m, rate);
break;
- case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT4SS_MCS9:
rtw_print_vht_rate_txt(m, rate);
break;
default:
@@ -692,9 +692,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_power_params pwr_param = {0};
struct rtw_hal *hal = &rtwdev->hal;
+ u8 nss = rtwdev->efuse.hw_cap.nss;
u8 path, rate, bw, ch, regd;
- struct rtw_power_params pwr_param = {0};
+ u8 max_ht_rate, max_rate;
mutex_lock(&rtwdev->mutex);
bw = hal->current_band_width;
@@ -707,19 +709,23 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n",
"path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem");
+ max_ht_rate = DESC_RATEMCS0 + nss * 8 - 1;
+
+ if (rtwdev->chip->vht_supported)
+ max_rate = DESC_RATEVHT1SS_MCS0 + nss * 10 - 1;
+ else
+ max_rate = max_ht_rate;
+
mutex_lock(&hal->tx_power_mutex);
- for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ for (path = RF_PATH_A; path < hal->rf_path_num; path++) {
/* there is no CCK rates used in 5G */
if (hal->current_band_type == RTW_BAND_5G)
rate = DESC_RATE6M;
else
rate = DESC_RATE1M;
- /* now, not support vht 3ss and vht 4ss*/
- for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) {
- /* now, not support ht 3ss and ht 4ss*/
- if (rate > DESC_RATEMCS15 &&
- rate < DESC_RATEVHT1SS_MCS0)
+ for (; rate <= max_rate; rate++) {
+ if (rate > max_ht_rate && rate <= DESC_RATEMCS31)
continue;
rtw_get_tx_power_params(rtwdev, path, rate, bw,
@@ -849,20 +855,28 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
last_cnt->num_qry_pkt[rate_id + 9]);
}
- seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n",
+ seq_printf(m, "[RSSI(dBm)] = {%d, %d, %d, %d}\n",
dm_info->rssi[RF_PATH_A] - 100,
- dm_info->rssi[RF_PATH_B] - 100);
- seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n",
+ dm_info->rssi[RF_PATH_B] - 100,
+ dm_info->rssi[RF_PATH_C] - 100,
+ dm_info->rssi[RF_PATH_D] - 100);
+ seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d, -%d, -%d}\n",
dm_info->rx_evm_dbm[RF_PATH_A],
- dm_info->rx_evm_dbm[RF_PATH_B]);
- seq_printf(m, "[Rx SNR] = {%d, %d}\n",
+ dm_info->rx_evm_dbm[RF_PATH_B],
+ dm_info->rx_evm_dbm[RF_PATH_C],
+ dm_info->rx_evm_dbm[RF_PATH_D]);
+ seq_printf(m, "[Rx SNR] = {%d, %d, %d, %d}\n",
dm_info->rx_snr[RF_PATH_A],
- dm_info->rx_snr[RF_PATH_B]);
- seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n",
+ dm_info->rx_snr[RF_PATH_B],
+ dm_info->rx_snr[RF_PATH_C],
+ dm_info->rx_snr[RF_PATH_D]);
+ seq_printf(m, "[CFO_tail(KHz)] = {%d, %d, %d, %d}\n",
dm_info->cfo_tail[RF_PATH_A],
- dm_info->cfo_tail[RF_PATH_B]);
+ dm_info->cfo_tail[RF_PATH_B],
+ dm_info->cfo_tail[RF_PATH_C],
+ dm_info->cfo_tail[RF_PATH_D]);
- if (dm_info->curr_rx_rate >= DESC_RATE11M) {
+ if (dm_info->curr_rx_rate >= DESC_RATE6M) {
seq_puts(m, "[Rx Average Status]:\n");
seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n",
(u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]),
@@ -875,6 +889,13 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v)
(u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]),
(u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]),
(u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B]));
+ seq_printf(m, " * 3SS, EVM: {-%d, -%d, -%d}, SNR: {%d, %d, %d}\n",
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_A]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_B]),
+ (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_C]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_A]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_B]),
+ (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_C]));
}
seq_puts(m, "[Rx Counter]:\n");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 02389b7c6876..6b563ac489a7 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -735,6 +735,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
bool disable_pt = true;
+ u32 mask_hi;
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
@@ -755,6 +756,20 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
si->init_ra_lv = 0;
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+
+ if (rtwdev->chip->id != RTW_CHIP_TYPE_8814A)
+ return;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO_HI);
+
+ mask_hi = si->ra_mask >> 32;
+
+ SET_RA_INFO_RA_MASK0(h2c_pkt, (mask_hi & 0xff));
+ SET_RA_INFO_RA_MASK1(h2c_pkt, (mask_hi & 0xff00) >> 8);
+ SET_RA_INFO_RA_MASK2(h2c_pkt, (mask_hi & 0xff0000) >> 16);
+ SET_RA_INFO_RA_MASK3(h2c_pkt, (mask_hi & 0xff000000) >> 24);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 404de1b0c407..48ad9ceab6ea 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -557,6 +557,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_DEFAULT_PORT 0x2c
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_RA_INFO_HI 0x46
#define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index cae9cca6dca3..0491f501c138 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -291,6 +291,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
if (rtw_read8(rtwdev, REG_CR) == 0xea)
cur_pwr = false;
else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ chip->id != RTW_CHIP_TYPE_8814A &&
(rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
cur_pwr = false;
else
@@ -784,7 +785,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
if (!check_firmware_size(data, size))
return -EINVAL;
- if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
return -EBUSY;
wlan_cpu_enable(rtwdev, false);
@@ -802,7 +804,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev,
wlan_cpu_enable(rtwdev, true);
- if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
+ if (rtwdev->chip->ltecoex_addr &&
+ !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
ret = -EBUSY;
goto dlfw_fail;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 0cee0fd8c0ef..959f56a3cc1a 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -136,7 +136,7 @@ u16 rtw_desc_to_bitrate(u8 desc_rate)
return rate.bitrate;
}
-static struct ieee80211_supported_band rtw_band_2ghz = {
+static const struct ieee80211_supported_band rtw_band_2ghz = {
.band = NL80211_BAND_2GHZ,
.channels = rtw_channeltable_2g,
@@ -149,7 +149,7 @@ static struct ieee80211_supported_band rtw_band_2ghz = {
.vht_cap = {0},
};
-static struct ieee80211_supported_band rtw_band_5ghz = {
+static const struct ieee80211_supported_band rtw_band_5ghz = {
.band = NL80211_BAND_5GHZ,
.channels = rtw_channeltable_5g,
@@ -1234,7 +1234,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
ldpc_en = VHT_LDPC_EN;
} else if (sta->deflink.ht_cap.ht_supported) {
- ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
+ ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 36) |
+ ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 28) |
+ (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
stbc_en = HT_STBC_EN;
@@ -1244,6 +1246,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss)
ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
+ else if (efuse->hw_cap.nss == 2)
+ ra_mask &= RA_MASK_VHT_RATES_2SS | RA_MASK_HT_RATES_2SS |
+ RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4;
@@ -1302,10 +1307,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
break;
}
- if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000)
- tx_num = 2;
- else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000)
- tx_num = 2;
+ if (sta->deflink.vht_cap.vht_supported ||
+ sta->deflink.ht_cap.ht_supported)
+ tx_num = efuse->hw_cap.nss;
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
@@ -1561,6 +1565,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_efuse *efuse = &rtwdev->efuse;
+ int i;
ht_cap->ht_supported = true;
ht_cap->cap = 0;
@@ -1580,25 +1585,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ampdu_density = chip->ampdu_density;
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (efuse->hw_cap.nss > 1) {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0xFF;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(300);
- } else {
- ht_cap->mcs.rx_mask[0] = 0xFF;
- ht_cap->mcs.rx_mask[1] = 0x00;
- ht_cap->mcs.rx_mask[4] = 0x01;
- ht_cap->mcs.rx_highest = cpu_to_le16(150);
- }
+
+ for (i = 0; i < efuse->hw_cap.nss; i++)
+ ht_cap->mcs.rx_mask[i] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss);
}
static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
struct ieee80211_sta_vht_cap *vht_cap)
{
struct rtw_efuse *efuse = &rtwdev->efuse;
- u16 mcs_map;
+ u16 mcs_map = 0;
__le16 highest;
+ int i;
if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
@@ -1621,21 +1621,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
if (rtw_chip_has_rx_ldpc(rtwdev))
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
- if (efuse->hw_cap.nss > 1) {
- highest = cpu_to_le16(780);
- mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
- } else {
- highest = cpu_to_le16(390);
- mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
+ for (i = 0; i < 8; i++) {
+ if (i < efuse->hw_cap.nss)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
+ highest = cpu_to_le16(390 * efuse->hw_cap.nss);
+
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
vht_cap->vht_mcs.rx_highest = highest;
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 62cd4c526301..02343e059fd9 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -61,7 +61,7 @@ enum rtw_hci_type {
};
struct rtw_hci {
- struct rtw_hci_ops *ops;
+ const struct rtw_hci_ops *ops;
enum rtw_hci_type type;
u32 rpwm_addr;
@@ -166,9 +166,14 @@ enum rtw_rate_section {
RTW_RATE_SECTION_HT_2S,
RTW_RATE_SECTION_VHT_1S,
RTW_RATE_SECTION_VHT_2S,
+ __RTW_RATE_SECTION_2SS_MAX = RTW_RATE_SECTION_VHT_2S,
+ RTW_RATE_SECTION_HT_3S,
+ RTW_RATE_SECTION_HT_4S,
+ RTW_RATE_SECTION_VHT_3S,
+ RTW_RATE_SECTION_VHT_4S,
/* keep last */
- RTW_RATE_SECTION_MAX,
+ RTW_RATE_SECTION_NUM,
};
enum rtw_wireless_set {
@@ -191,6 +196,7 @@ enum rtw_chip_type {
RTW_CHIP_TYPE_8703B,
RTW_CHIP_TYPE_8821A,
RTW_CHIP_TYPE_8812A,
+ RTW_CHIP_TYPE_8814A,
};
enum rtw_tx_queue_type {
@@ -380,6 +386,9 @@ enum rtw_evm {
RTW_EVM_1SS,
RTW_EVM_2SS_A,
RTW_EVM_2SS_B,
+ RTW_EVM_3SS_A,
+ RTW_EVM_3SS_B,
+ RTW_EVM_3SS_C,
/* keep it last */
RTW_EVM_NUM
};
@@ -397,6 +406,10 @@ enum rtw_snr {
RTW_SNR_2SS_B,
RTW_SNR_2SS_C,
RTW_SNR_2SS_D,
+ RTW_SNR_3SS_A,
+ RTW_SNR_3SS_B,
+ RTW_SNR_3SS_C,
+ RTW_SNR_3SS_D,
/* keep it last */
RTW_SNR_NUM
};
@@ -822,7 +835,7 @@ struct rtw_vif {
};
struct rtw_regulatory {
- char alpha2[2];
+ char alpha2[2] __nonstring;
u8 txpwr_regd_2g;
u8 txpwr_regd_5g;
};
@@ -1130,14 +1143,26 @@ struct rtw_rfe_def {
* For 2G there are cck rate and ofdm rate with different settings.
*/
struct rtw_pwr_track_tbl {
+ const u8 *pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM];
const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM];
+ const u8 *pwrtrk_2gd_n;
+ const u8 *pwrtrk_2gd_p;
+ const u8 *pwrtrk_2gc_n;
+ const u8 *pwrtrk_2gc_p;
const u8 *pwrtrk_2gb_n;
const u8 *pwrtrk_2gb_p;
const u8 *pwrtrk_2ga_n;
const u8 *pwrtrk_2ga_p;
+ const u8 *pwrtrk_2g_cckd_n;
+ const u8 *pwrtrk_2g_cckd_p;
+ const u8 *pwrtrk_2g_cckc_n;
+ const u8 *pwrtrk_2g_cckc_p;
const u8 *pwrtrk_2g_cckb_n;
const u8 *pwrtrk_2g_cckb_p;
const u8 *pwrtrk_2g_ccka_n;
@@ -1227,8 +1252,8 @@ struct rtw_chip_info {
const struct rtw_hw_reg *dig;
const struct rtw_hw_reg *dig_cck;
- u32 rf_base_addr[2];
- u32 rf_sipi_addr[2];
+ u32 rf_base_addr[RTW_RF_PATH_MAX];
+ u32 rf_sipi_addr[RTW_RF_PATH_MAX];
const struct rtw_rf_sipi_addr *rf_sipi_read_addr;
u8 fix_rf_phy_num;
const struct rtw_ltecoex_addr *ltecoex_addr;
@@ -1924,7 +1949,7 @@ union rtw_sar_cfg {
struct rtw_sar {
enum rtw_sar_sources src;
- union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX];
+ union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_NUM];
};
struct rtw_hal {
@@ -1968,16 +1993,16 @@ struct rtw_hal {
s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX]
- [RTW_RATE_SECTION_MAX];
+ [RTW_RATE_SECTION_NUM];
s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX]
- [RTW_RATE_SECTION_MAX];
+ [RTW_RATE_SECTION_NUM];
s8 tx_pwr_limit_2g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
- [RTW_RATE_SECTION_MAX]
+ [RTW_RATE_SECTION_NUM]
[RTW_MAX_CHANNEL_NUM_2G];
s8 tx_pwr_limit_5g[RTW_REGD_MAX]
[RTW_CHANNEL_WIDTH_MAX]
- [RTW_RATE_SECTION_MAX]
+ [RTW_RATE_SECTION_NUM]
[RTW_MAX_CHANNEL_NUM_5G];
s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
[DESC_RATE_MAX];
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 0ecaefc4c83d..bb4c4ccb31d4 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -20,7 +20,7 @@ module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
-static u32 rtw_pci_tx_queue_idx_addr[] = {
+static const u32 rtw_pci_tx_queue_idx_addr[] = {
[RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
[RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
[RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
@@ -1591,7 +1591,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
rtw_pci_io_unmapping(rtwdev, pdev);
}
-static struct rtw_hci_ops rtw_pci_ops = {
+static const struct rtw_hci_ops rtw_pci_ops = {
.tx_write = rtw_pci_tx_write,
.tx_kick_off = rtw_pci_tx_kick_off,
.flush_queues = rtw_pci_flush_queues,
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8ed20c89d216..55be0d8e0c28 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -52,60 +52,93 @@ static const u32 db_invert_table[12][8] = {
1995262315, 2511886432U, 3162277660U, 3981071706U}
};
-u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
-u8 rtw_ofdm_rates[] = {
+const u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
+
+const u8 rtw_ofdm_rates[] = {
DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
DESC_RATE48M, DESC_RATE54M
};
-u8 rtw_ht_1s_rates[] = {
+
+const u8 rtw_ht_1s_rates[] = {
DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
DESC_RATEMCS6, DESC_RATEMCS7
};
-u8 rtw_ht_2s_rates[] = {
+
+const u8 rtw_ht_2s_rates[] = {
DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
DESC_RATEMCS14, DESC_RATEMCS15
};
-u8 rtw_vht_1s_rates[] = {
+
+const u8 rtw_vht_1s_rates[] = {
DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
};
-u8 rtw_vht_2s_rates[] = {
+
+const u8 rtw_vht_2s_rates[] = {
DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
};
-u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
+
+const u8 rtw_ht_3s_rates[] = {
+ DESC_RATEMCS16, DESC_RATEMCS17, DESC_RATEMCS18,
+ DESC_RATEMCS19, DESC_RATEMCS20, DESC_RATEMCS21,
+ DESC_RATEMCS22, DESC_RATEMCS23
+};
+
+const u8 rtw_ht_4s_rates[] = {
+ DESC_RATEMCS24, DESC_RATEMCS25, DESC_RATEMCS26,
+ DESC_RATEMCS27, DESC_RATEMCS28, DESC_RATEMCS29,
+ DESC_RATEMCS30, DESC_RATEMCS31
+};
+
+const u8 rtw_vht_3s_rates[] = {
+ DESC_RATEVHT3SS_MCS0, DESC_RATEVHT3SS_MCS1,
+ DESC_RATEVHT3SS_MCS2, DESC_RATEVHT3SS_MCS3,
+ DESC_RATEVHT3SS_MCS4, DESC_RATEVHT3SS_MCS5,
+ DESC_RATEVHT3SS_MCS6, DESC_RATEVHT3SS_MCS7,
+ DESC_RATEVHT3SS_MCS8, DESC_RATEVHT3SS_MCS9
+};
+
+const u8 rtw_vht_4s_rates[] = {
+ DESC_RATEVHT4SS_MCS0, DESC_RATEVHT4SS_MCS1,
+ DESC_RATEVHT4SS_MCS2, DESC_RATEVHT4SS_MCS3,
+ DESC_RATEVHT4SS_MCS4, DESC_RATEVHT4SS_MCS5,
+ DESC_RATEVHT4SS_MCS6, DESC_RATEVHT4SS_MCS7,
+ DESC_RATEVHT4SS_MCS8, DESC_RATEVHT4SS_MCS9
+};
+
+const u8 * const rtw_rate_section[RTW_RATE_SECTION_NUM] = {
rtw_cck_rates, rtw_ofdm_rates,
rtw_ht_1s_rates, rtw_ht_2s_rates,
- rtw_vht_1s_rates, rtw_vht_2s_rates
+ rtw_vht_1s_rates, rtw_vht_2s_rates,
+ rtw_ht_3s_rates, rtw_ht_4s_rates,
+ rtw_vht_3s_rates, rtw_vht_4s_rates
};
EXPORT_SYMBOL(rtw_rate_section);
-u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
+const u8 rtw_rate_size[RTW_RATE_SECTION_NUM] = {
ARRAY_SIZE(rtw_cck_rates),
ARRAY_SIZE(rtw_ofdm_rates),
ARRAY_SIZE(rtw_ht_1s_rates),
ARRAY_SIZE(rtw_ht_2s_rates),
ARRAY_SIZE(rtw_vht_1s_rates),
- ARRAY_SIZE(rtw_vht_2s_rates)
+ ARRAY_SIZE(rtw_vht_2s_rates),
+ ARRAY_SIZE(rtw_ht_3s_rates),
+ ARRAY_SIZE(rtw_ht_4s_rates),
+ ARRAY_SIZE(rtw_vht_3s_rates),
+ ARRAY_SIZE(rtw_vht_4s_rates)
};
EXPORT_SYMBOL(rtw_rate_size);
-static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
-static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
-static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
-static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
-static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
-static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
-
enum rtw_phy_band_type {
PHY_BAND_2G = 0,
PHY_BAND_5G = 1,
@@ -1590,7 +1623,7 @@ static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
ch_idx = rtw_channel_to_idx(band, ch);
if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
- rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
+ rs >= RTW_RATE_SECTION_NUM || ch_idx < 0) {
WARN(1,
"wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
regd, band, bw, rs, ch_idx, pwr_limit);
@@ -1634,11 +1667,15 @@ rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd,
static void
rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx)
{
+ static const u8 rs_cmp[4][2] = {
+ {RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
+ {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S},
+ {RTW_RATE_SECTION_HT_3S, RTW_RATE_SECTION_VHT_3S},
+ {RTW_RATE_SECTION_HT_4S, RTW_RATE_SECTION_VHT_4S}
+ };
u8 rs_idx, rs_ht, rs_vht;
- u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S},
- {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} };
- for (rs_idx = 0; rs_idx < 2; rs_idx++) {
+ for (rs_idx = 0; rs_idx < 4; rs_idx++) {
rs_ht = rs_cmp[rs_idx][0];
rs_vht = rs_cmp[rs_idx][1];
@@ -1695,7 +1732,7 @@ rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
u8 bw, rs;
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
__cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
bw, rs);
}
@@ -1959,10 +1996,10 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
u8 rate, u8 group)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- u8 tx_power;
- bool mcs_rate;
- bool above_2ss;
+ bool above_2ss, above_3ss, above_4ss;
u8 factor = chip->txgi_factor;
+ bool mcs_rate;
+ u8 tx_power;
if (rate <= DESC_RATE11M)
tx_power = pwr_idx_2g->cck_base[group];
@@ -1972,11 +2009,15 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
- mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9);
- above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ rate <= DESC_RATEVHT4SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT2SS_MCS0);
+ above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT3SS_MCS0);
+ above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT4SS_MCS0);
if (!mcs_rate)
return tx_power;
@@ -1989,11 +2030,19 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_2g->ht_3s_diff.bw20 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_2g->ht_4s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_2g->ht_3s_diff.bw40 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_2g->ht_4s_diff.bw40 * factor;
break;
}
@@ -2006,19 +2055,23 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
u8 rate, u8 group)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- u8 tx_power;
+ bool above_2ss, above_3ss, above_4ss;
+ u8 factor = chip->txgi_factor;
u8 upper, lower;
bool mcs_rate;
- bool above_2ss;
- u8 factor = chip->txgi_factor;
+ u8 tx_power;
tx_power = pwr_idx_5g->bw40_base[group];
- mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT1SS_MCS0 &&
- rate <= DESC_RATEVHT2SS_MCS9);
- above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ rate <= DESC_RATEVHT4SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) ||
(rate >= DESC_RATEVHT2SS_MCS0);
+ above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT3SS_MCS0);
+ above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) ||
+ (rate >= DESC_RATEVHT4SS_MCS0);
if (!mcs_rate) {
tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
@@ -2033,11 +2086,19 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->ht_3s_diff.bw20 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->ht_4s_diff.bw20 * factor;
break;
case RTW_CHANNEL_WIDTH_40:
/* bw40 is the base power */
if (above_2ss)
tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->ht_3s_diff.bw40 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->ht_4s_diff.bw40 * factor;
break;
case RTW_CHANNEL_WIDTH_80:
/* the base idx of bw80 is the average of bw40+/bw40- */
@@ -2048,13 +2109,17 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
if (above_2ss)
tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
+ if (above_3ss)
+ tx_power += pwr_idx_5g->vht_3s_diff.bw80 * factor;
+ if (above_4ss)
+ tx_power += pwr_idx_5g->vht_4s_diff.bw80 * factor;
break;
}
return tx_power;
}
-/* return RTW_RATE_SECTION_MAX to indicate rate is invalid */
+/* return RTW_RATE_SECTION_NUM to indicate rate is invalid */
static u8 rtw_phy_rate_to_rate_section(u8 rate)
{
if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
@@ -2065,12 +2130,20 @@ static u8 rtw_phy_rate_to_rate_section(u8 rate)
return RTW_RATE_SECTION_HT_1S;
else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
return RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS23)
+ return RTW_RATE_SECTION_HT_3S;
+ else if (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31)
+ return RTW_RATE_SECTION_HT_4S;
else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
return RTW_RATE_SECTION_VHT_1S;
else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
return RTW_RATE_SECTION_VHT_2S;
+ else if (rate >= DESC_RATEVHT3SS_MCS0 && rate <= DESC_RATEVHT3SS_MCS9)
+ return RTW_RATE_SECTION_VHT_3S;
+ else if (rate >= DESC_RATEVHT4SS_MCS0 && rate <= DESC_RATEVHT4SS_MCS9)
+ return RTW_RATE_SECTION_VHT_4S;
else
- return RTW_RATE_SECTION_MAX;
+ return RTW_RATE_SECTION_NUM;
}
static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
@@ -2088,7 +2161,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
if (regd > RTW_REGD_WW)
return power_limit;
- if (rs == RTW_RATE_SECTION_MAX)
+ if (rs == RTW_RATE_SECTION_NUM)
goto err;
/* only 20M BW with cck and ofdm */
@@ -2096,7 +2169,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
bw = RTW_CHANNEL_WIDTH_20;
/* only 20/40M BW with ht */
- if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S)
+ if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31)
bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40);
/* select min power limit among [20M BW ~ current BW] */
@@ -2132,7 +2205,7 @@ static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band,
.rs = rs,
};
- if (rs == RTW_RATE_SECTION_MAX)
+ if (rs == RTW_RATE_SECTION_NUM)
goto err;
return rtw_query_sar(rtwdev, &arg);
@@ -2214,14 +2287,14 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
{
struct rtw_hal *hal = &rtwdev->hal;
u8 regd = rtw_regd_get(rtwdev);
- u8 *rates;
+ const u8 *rates;
u8 size;
u8 rate;
u8 pwr_idx;
u8 bw;
int i;
- if (rs >= RTW_RATE_SECTION_MAX)
+ if (rs >= RTW_RATE_SECTION_NUM)
return;
rates = rtw_rate_section[rs];
@@ -2252,7 +2325,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev,
else
rs = RTW_RATE_SECTION_OFDM;
- for (; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (; rs < RTW_RATE_SECTION_NUM; rs++)
rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
}
@@ -2274,13 +2347,13 @@ EXPORT_SYMBOL(rtw_phy_set_tx_power_level);
static void
rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
- u8 rs, u8 size, u8 *rates)
+ u8 rs, u8 size, const u8 *rates)
{
u8 rate;
u8 base_idx, rate_idx;
s8 base_2g, base_5g;
- if (rs >= RTW_RATE_SECTION_VHT_1S)
+ if (size == 10) /* VHT rates */
base_idx = rates[size - 3];
else
base_idx = rates[size - 1];
@@ -2297,28 +2370,12 @@ rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
{
- u8 path;
+ u8 path, rs;
- for (path = 0; path < RTW_RF_PATH_MAX; path++) {
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_CCK,
- rtw_cck_size, rtw_cck_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_OFDM,
- rtw_ofdm_size, rtw_ofdm_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_HT_1S,
- rtw_ht_1s_size, rtw_ht_1s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_HT_2S,
- rtw_ht_2s_size, rtw_ht_2s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_VHT_1S,
- rtw_vht_1s_size, rtw_vht_1s_rates);
- rtw_phy_tx_power_by_rate_config_by_path(hal, path,
- RTW_RATE_SECTION_VHT_2S,
- rtw_vht_2s_size, rtw_vht_2s_rates);
- }
+ for (path = 0; path < RTW_RF_PATH_MAX; path++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
+ rtw_phy_tx_power_by_rate_config_by_path(hal, path, rs,
+ rtw_rate_size[rs], rtw_rate_section[rs]);
}
static void
@@ -2347,7 +2404,7 @@ void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
__rtw_phy_tx_power_limit_config(hal, regd, bw, rs);
}
@@ -2383,7 +2440,7 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev)
/* init tx power limit */
for (regd = 0; regd < RTW_REGD_MAX; regd++)
for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++)
rtw_phy_init_tx_power_limit(rtwdev, regd, bw,
rs);
}
@@ -2401,32 +2458,56 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2g_cckc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2g_cckc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2g_cckd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2g_cckd_n;
} else {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n;
}
} else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_1];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_1];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_1];
} else if (IS_CH_5G_BAND_3(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_2];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_2];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_2];
} else if (IS_CH_5G_BAND_4(channel)) {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3];
swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3];
swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3];
swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_3];
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_3];
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_3];
} else {
swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p;
swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n;
swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p;
swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n;
+ swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p;
+ swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n;
+ swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p;
+ swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n;
}
}
EXPORT_SYMBOL(rtw_phy_config_swing_table);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index ccfcbd3ced03..c9e6b869661d 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -7,14 +7,18 @@
#include "debug.h"
-extern u8 rtw_cck_rates[];
-extern u8 rtw_ofdm_rates[];
-extern u8 rtw_ht_1s_rates[];
-extern u8 rtw_ht_2s_rates[];
-extern u8 rtw_vht_1s_rates[];
-extern u8 rtw_vht_2s_rates[];
-extern u8 *rtw_rate_section[];
-extern u8 rtw_rate_size[];
+extern const u8 rtw_cck_rates[];
+extern const u8 rtw_ofdm_rates[];
+extern const u8 rtw_ht_1s_rates[];
+extern const u8 rtw_ht_2s_rates[];
+extern const u8 rtw_vht_1s_rates[];
+extern const u8 rtw_vht_2s_rates[];
+extern const u8 rtw_ht_3s_rates[];
+extern const u8 rtw_ht_4s_rates[];
+extern const u8 rtw_vht_3s_rates[];
+extern const u8 rtw_vht_4s_rates[];
+extern const u8 * const rtw_rate_section[];
+extern const u8 rtw_rate_size[];
void rtw_phy_init(struct rtw_dev *rtwdev);
void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e438405fba56..08e9494977e0 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -8,6 +8,7 @@
#define REG_SYS_FUNC_EN 0x0002
#define BIT_FEN_EN_25_1 BIT(13)
#define BIT_FEN_ELDR BIT(12)
+#define BIT_FEN_PCIEA BIT(6)
#define BIT_FEN_CPUEN BIT(2)
#define BIT_FEN_USBA BIT(2)
#define BIT_FEN_BB_GLB_RST BIT(1)
@@ -39,6 +40,9 @@
#define BIT_RF_RSTB BIT(1)
#define BIT_RF_EN BIT(0)
+#define REG_RF_CTRL1 0x0020
+#define REG_RF_CTRL2 0x0021
+
#define REG_AFE_CTRL1 0x0024
#define BIT_MAC_CLK_SEL (BIT(20) | BIT(21))
#define REG_EFUSE_CTRL 0x0030
@@ -73,6 +77,8 @@
#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
+#define REG_GPIO_PIN_CTRL 0x0044
+
#define REG_LED_CFG 0x004C
#define BIT_LNAON_SEL_EN BIT(26)
#define BIT_PAPE_SEL_EN BIT(25)
@@ -110,6 +116,7 @@
#define BIT_SDIO_PAD_E5 BIT(18)
#define REG_RF_B_CTRL 0x76
+#define REG_RF_CTRL3 0x0076
#define REG_AFE_CTRL_4 0x0078
#define BIT_CK320M_AFE_EN BIT(4)
@@ -130,6 +137,7 @@
#define BIT_SHIFT_ROM_PGE 16
#define BIT_FW_INIT_RDY BIT(15)
#define BIT_FW_DW_RDY BIT(14)
+#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13))
#define BIT_RPWM_TOGGLE BIT(7)
#define BIT_RAM_DL_SEL BIT(7) /* legacy only */
#define BIT_DMEM_CHKSUM_OK BIT(6)
@@ -147,7 +155,7 @@
BIT_CHECK_SUM_OK)
#define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \
BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
-#define FW_READY_MASK 0xffff
+#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL)
#define REG_MCU_TST_CFG 0x84
#define VAL_FW_TRIGGER 0x1
@@ -602,15 +610,25 @@
#define REG_CCA2ND 0x0838
#define REG_L1PKTH 0x0848
#define REG_CLKTRK 0x0860
+#define REG_CSI_MASK_SETTING1 0x0874
+#define REG_NBI_SETTING 0x087c
+#define BIT_NBI_ENABLE BIT(13)
+#define REG_CSI_FIX_MASK0 0x0880
+#define REG_CSI_FIX_MASK1 0x0884
+#define REG_CSI_FIX_MASK6 0x0898
+#define REG_CSI_FIX_MASK7 0x089c
#define REG_ADCCLK 0x08AC
#define REG_HSSI_READ 0x08B0
#define REG_FPGA0_XCD_RF_PARA 0x08B4
#define REG_RX_MCS_LIMIT 0x08BC
#define REG_ADC160 0x08C4
+#define REG_DBGSEL 0x08fc
#define REG_ANTSEL_SW 0x0900
#define REG_DAC_RSTB 0x090c
+#define REG_PSD 0x0910
+#define BIT_PSD_INI GENMASK(23, 22)
#define REG_SINGLE_TONE_CONT_TX 0x0914
-
+#define REG_AGC_TABLE 0x0958
#define REG_RFE_CTRL_E 0x0974
#define REG_2ND_CCA_CTRL 0x0976
#define REG_IQK_COM00 0x0978
@@ -620,10 +638,18 @@
#define REG_FAS 0x09a4
#define REG_RXSB 0x0a00
+#define BIT_RXSB_ANA_DIV BIT(15)
#define REG_CCK_RX 0x0a04
#define REG_CCK_PD_TH 0x0a0a
-
-#define REG_CCK0_FAREPORT 0xa2c
+#define REG_PRECTRL 0x0a14
+#define BIT_DIS_CO_PATHSEL BIT(7)
+#define BIT_IQ_WGT GENMASK(9, 8)
+#define REG_CCA_MF 0x0a20
+#define BIT_MBC_WIN GENMASK(5, 4)
+#define REG_CCK0_TX_FILTER1 0x0a20
+#define REG_CCK0_TX_FILTER2 0x0a24
+#define REG_CCK0_DEBUG_PORT 0x0a28
+#define REG_CCK0_FAREPORT 0x0a2c
#define BIT_CCK0_2RX BIT(18)
#define BIT_CCK0_MRC BIT(22)
#define REG_FA_CCK 0x0a5c
@@ -642,10 +668,18 @@
#define DIS_DPD_RATEVHT2SS_MCS1 BIT(9)
#define DIS_DPD_RATEALL GENMASK(9, 0)
+#define REG_CCA 0x0a70
+#define BIT_CCA_CO BIT(7)
+#define REG_ANTSEL 0x0a74
+#define BIT_ANT_BYCO BIT(8)
+#define REG_CCKTX 0x0a84
+#define BIT_CMB_CCA_2R BIT(28)
+
#define REG_CNTRST 0x0b58
#define REG_3WIRE_SWA 0x0c00
#define REG_RX_IQC_AB_A 0x0c10
+#define REG_RX_IQC_CD_A 0x0c14
#define REG_TXSCALE_A 0x0c1c
#define BB_SWING_MASK GENMASK(31, 21)
#define REG_TX_AGC_A_CCK_11_CCK_1 0xc20
@@ -673,7 +707,7 @@
#define REG_LSSI_WRITE_A 0x0c90
#define REG_PREDISTA 0x0c90
#define REG_TXAGCIDX 0x0c94
-
+#define REG_TX_AGC_A 0x0c94
#define REG_RFE_PINMUX_A 0x0cb0
#define REG_RFE_INV_A 0x0cb4
#define REG_RFE_CTRL8 0x0cb4
@@ -682,6 +716,7 @@
#define DPDT_CTRL_PIN 0x77
#define RFE_INV_MASK 0x3ff00000
#define REG_RFECTL_A 0x0cb8
+#define REG_RFE_INV0 0x0cbc
#define REG_RFE_INV8 0x0cbd
#define BIT_MASK_RFE_INV89 GENMASK(1, 0)
#define REG_RFE_INV16 0x0cbe
@@ -702,6 +737,7 @@
#define REG_3WIRE_SWB 0x0e00
#define REG_RX_IQC_AB_B 0x0e10
+#define REG_RX_IQC_CD_B 0x0e14
#define REG_TXSCALE_B 0x0e1c
#define REG_TX_AGC_B_CCK_11_CCK_1 0xe20
#define REG_TX_AGC_B_OFDM18_OFDM6 0xe24
@@ -728,6 +764,7 @@
#define REG_LSSI_WRITE_B 0x0e90
#define REG_PREDISTB 0x0e90
#define REG_INIDLYB 0x0e94
+#define REG_TX_AGC_B 0x0e94
#define REG_RFE_PINMUX_B 0x0eb0
#define REG_RFE_INV_B 0x0eb4
#define REG_RFECTL_B 0x0eb8
@@ -743,8 +780,11 @@
#define REG_CRC_HT 0x0f10
#define REG_CRC_OFDM 0x0f14
#define REG_FA_OFDM 0x0f48
+#define REG_DBGRPT 0x0fa0
#define REG_CCA_CCK 0x0fcc
+#define REG_SYS_CFG3_8814A 0x1000
+
#define REG_ANAPARSW_MAC_0 0x1010
#define BIT_CF_L_V2 GENMASK(29, 28)
@@ -862,9 +902,27 @@
#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+#define REG_RX_IQC_AB_C 0x1810
+#define REG_RX_IQC_CD_C 0x1814
+#define REG_TXSCALE_C 0x181c
+#define REG_CK_MONHC 0x185c
+#define REG_AFE_PWR1_C 0x1860
#define REG_IGN_GNT_BT1 0x1860
+#define REG_TX_AGC_C 0x1894
+#define REG_RFE_PINMUX_C 0x18b4
#define REG_RFESEL_CTRL 0x1990
+#define REG_AGC_TBL 0x1998
+
+#define REG_RX_IQC_AB_D 0x1a10
+#define REG_RX_IQC_CD_D 0x1a14
+#define REG_TXSCALE_D 0x1a1c
+#define REG_CK_MONHD 0x1a5c
+#define REG_AFE_PWR1_D 0x1a60
+#define REG_TX_AGC_D 0x1a94
+#define REG_RFE_PINMUX_D 0x1ab4
+#define REG_RFE_INVSEL_D 0x1abc
+#define BIT_RFE_SELSW0_D GENMASK(27, 20)
#define REG_NOMASK_TXBT 0x1ca7
#define REG_ANAPAR 0x1c30
@@ -905,6 +963,7 @@
#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
#define RF18_CHANNEL_MASK (MASKBYTE0)
#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF_RCK1_V1 0x1c
#define RF_RCK 0x1d
#define RF_MODE_TABLE_ADDR 0x30
#define RF_MODE_TABLE_DATA0 0x31
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index eeca31bf71f1..87715bd54860 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -444,7 +444,7 @@ static u8 rtw8723d_iqk_check_tx_failed(struct rtw_dev *rtwdev,
rtw_read32(rtwdev, REG_IQK_RES_TX),
rtw_read32(rtwdev, REG_IQK_RES_TY));
rtw_dbg(rtwdev, RTW_DBG_RFK,
- "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(afer IQK) = 0x%x\n",
+ "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(after IQK) = 0x%x\n",
rtw_read32(rtwdev, 0xe90),
rtw_read32(rtwdev, 0xe98));
@@ -472,7 +472,7 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev,
rtw_read32(rtwdev, REG_IQK_RES_RY));
rtw_dbg(rtwdev, RTW_DBG_RFK,
- "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(afer IQK) = 0x%x\n",
+ "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(after IQK) = 0x%x\n",
rtw_read32(rtwdev, 0xea0),
rtw_read32(rtwdev, 0xea8));
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
new file mode 100644
index 000000000000..cfd35d40d46e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c
@@ -0,0 +1,2257 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/usb.h>
+#include "main.h"
+#include "coex.h"
+#include "tx.h"
+#include "phy.h"
+#include "rtw8814a.h"
+#include "rtw8814a_table.h"
+#include "rtw88xxa.h"
+#include "reg.h"
+#include "debug.h"
+#include "efuse.h"
+#include "regd.h"
+#include "usb.h"
+
+static void rtw8814a_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR,
+ BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void rtw8814a_read_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ if (!(efuse->rfe_option & BIT(7)))
+ return;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ efuse->rfe_option = 0;
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ efuse->rfe_option = 1;
+}
+
+static void rtw8814a_read_amplifier_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ switch (efuse->rfe_option) {
+ case 1:
+ /* Internal 2G */
+ efuse->pa_type_2g = 0;
+ efuse->lna_type_2g = 0;
+ /* External 5G */
+ efuse->pa_type_5g = BIT(0);
+ efuse->lna_type_5g = BIT(3);
+ break;
+ case 2 ... 5:
+ /* External everything */
+ efuse->pa_type_2g = BIT(4);
+ efuse->lna_type_2g = BIT(3);
+ efuse->pa_type_5g = BIT(0);
+ efuse->lna_type_5g = BIT(3);
+ break;
+ case 6:
+ efuse->lna_type_5g = BIT(3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8814a_read_rf_type(struct rtw_dev *rtwdev,
+ struct rtw8814a_efuse *map)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ switch (map->trx_antenna_option) {
+ case 0xff: /* 4T4R */
+ case 0xee: /* 3T3R */
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ rtwusb->udev->speed != USB_SPEED_SUPER)
+ hal->rf_type = RF_2T2R;
+ else
+ hal->rf_type = RF_3T3R;
+
+ break;
+ case 0x66: /* 2T2R */
+ case 0x6f: /* 2T4R */
+ default:
+ hal->rf_type = RF_2T2R;
+ break;
+ }
+
+ hal->rf_path_num = 4;
+ hal->rf_phy_num = 4;
+
+ if (hal->rf_type == RF_3T3R) {
+ hal->antenna_rx = BB_PATH_ABC;
+ hal->antenna_tx = BB_PATH_ABC;
+ } else {
+ hal->antenna_rx = BB_PATH_AB;
+ hal->antenna_tx = BB_PATH_AB;
+ }
+}
+
+static void rtw8814a_init_hwcap(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ efuse->hw_cap.bw = BIT(RTW_CHANNEL_WIDTH_20) |
+ BIT(RTW_CHANNEL_WIDTH_40) |
+ BIT(RTW_CHANNEL_WIDTH_80);
+ efuse->hw_cap.ptcl = EFUSE_HW_CAP_PTCL_VHT;
+
+ if (hal->rf_type == RF_3T3R)
+ efuse->hw_cap.nss = 3;
+ else
+ efuse->hw_cap.nss = 2;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n",
+ efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl,
+ efuse->hw_cap.ant_num, efuse->hw_cap.nss);
+}
+
+static int rtw8814a_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8814a_efuse *map;
+ int i;
+
+ if (rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ log_map, rtwdev->chip->log_efuse_size, true);
+
+ map = (struct rtw8814a_efuse *)log_map;
+
+ efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(4));
+ efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[RF_PATH_A] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
+ efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
+
+ rtw8814a_read_rfe_type(rtwdev);
+ rtw8814a_read_amplifier_type(rtwdev);
+
+ /* Override rtw_chip_parameter_setup() */
+ rtw8814a_read_rf_type(rtwdev, map);
+
+ rtw8814a_init_hwcap(rtwdev);
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_USB:
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
+ break;
+ case RTW_HCI_TYPE_PCIE:
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ default:
+ /* unsupported now */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8814a_init_rfe_reg(struct rtw_dev *rtwdev)
+{
+ u8 rfe_option = rtwdev->efuse.rfe_option;
+
+ if (rfe_option == 2 || rfe_option == 1) {
+ rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf);
+ rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xf0);
+ } else if (rfe_option == 0) {
+ rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf);
+ rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xc0);
+ }
+}
+
+#define RTW_TXSCALE_SIZE 37
+static const u32 rtw8814a_txscale_tbl[RTW_TXSCALE_SIZE] = {
+ 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+ 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+ 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+ 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static u32 rtw8814a_get_bb_swing(struct rtw_dev *rtwdev, u8 band, u8 rf_path)
+{
+ static const u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6};
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 tx_bb_swing;
+
+ if (band == RTW_BAND_2G)
+ tx_bb_swing = efuse->tx_bb_swing_setting_2g;
+ else
+ tx_bb_swing = efuse->tx_bb_swing_setting_5g;
+
+ tx_bb_swing >>= 2 * rf_path;
+ tx_bb_swing &= 0x3;
+
+ return swing2setting[tx_bb_swing];
+}
+
+static u8 rtw8814a_get_swing_index(struct rtw_dev *rtwdev)
+{
+ u32 swing, table_value;
+ u8 i;
+
+ swing = rtw8814a_get_bb_swing(rtwdev, rtwdev->hal.current_band_type,
+ RF_PATH_A);
+
+ for (i = 0; i < ARRAY_SIZE(rtw8814a_txscale_tbl); i++) {
+ table_value = rtw8814a_txscale_tbl[i];
+ if (swing == table_value)
+ return i;
+ }
+
+ return 24;
+}
+
+static void rtw8814a_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ dm_info->default_ofdm_index = rtw8814a_get_swing_index(rtwdev);
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ dm_info->delta_power_index_last[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8814a_config_trx_path(struct rtw_dev *rtwdev)
+{
+ /* RX CCK disable 2R CCA */
+ rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT,
+ BIT_CCK0_2RX | BIT_CCK0_MRC);
+ /* pathB tx on, path A/C/D tx off */
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0xf0000000, 0x4);
+ /* pathB rx */
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5);
+}
+
+static void rtw8814a_config_cck_rx_antenna_init(struct rtw_dev *rtwdev)
+{
+ /* CCK 2R CCA parameters */
+
+ /* Disable Ant diversity */
+ rtw_write32_mask(rtwdev, REG_RXSB, BIT_RXSB_ANA_DIV, 0x0);
+ /* Concurrent CCA at LSB & USB */
+ rtw_write32_mask(rtwdev, REG_CCA, BIT_CCA_CO, 0);
+ /* RX path diversity enable */
+ rtw_write32_mask(rtwdev, REG_ANTSEL, BIT_ANT_BYCO, 0);
+ /* r_en_mrc_antsel */
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_DIS_CO_PATHSEL, 0);
+ /* MBC weighting */
+ rtw_write32_mask(rtwdev, REG_CCA_MF, BIT_MBC_WIN, 1);
+ /* 2R CCA only */
+ rtw_write32_mask(rtwdev, REG_CCKTX, BIT_CMB_CCA_2R, 1);
+}
+
+static void rtw8814a_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u32 crystal_cap, val32;
+ u8 val8, rf_path;
+
+ /* power on BB/RF domain */
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_USBA);
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_PCIEA);
+
+ rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2,
+ BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+
+ /* Power on RF paths A..D */
+ val8 = BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB;
+ rtw_write8(rtwdev, REG_RF_CTRL, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL1, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL2, val8);
+ rtw_write8(rtwdev, REG_RF_CTRL3, val8);
+
+ rtw_load_table(rtwdev, rtwdev->chip->bb_tbl);
+ rtw_load_table(rtwdev, rtwdev->chip->agc_tbl);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ crystal_cap |= crystal_cap << 6;
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, 0x07ff8000, crystal_cap);
+
+ rtw8814a_config_trx_path(rtwdev);
+
+ for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++)
+ rtw_load_table(rtwdev, rtwdev->chip->rf_tbl[rf_path]);
+
+ val32 = rtw_read_rf(rtwdev, RF_PATH_A, RF_RCK1_V1, RFREG_MASK);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK1_V1, RFREG_MASK, val32);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_RCK1_V1, RFREG_MASK, val32);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_RCK1_V1, RFREG_MASK, val32);
+
+ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0xFF);
+
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, 0);
+
+ rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(5));
+
+ rtw8814a_config_cck_rx_antenna_init(rtwdev);
+
+ rtw_phy_init(rtwdev);
+ rtw8814a_pwrtrack_init(rtwdev);
+
+ rtw8814a_init_rfe_reg(rtwdev);
+
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT(3));
+
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, 235);
+
+ /* enable Tx report. */
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ /* Reset USB mode switch setting */
+ rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x0);
+ rtw_write8(rtwdev, REG_ACLK_MON, 0x0);
+ }
+}
+
+static void rtw8814ae_enable_rf_1_2v(struct rtw_dev *rtwdev)
+{
+ /* This is for fullsize card, because GPIO7 there is floating.
+ * We should pull GPIO7 high to enable RF 1.2V Switch Power Supply
+ */
+
+ /* 1. set 0x40[1:0] to 0, BIT_GPIOSEL=0, select pin as GPIO */
+ rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(1) | BIT(0));
+
+ /* 2. set 0x44[31] to 0
+ * mode=0: data port;
+ * mode=1 and BIT_GPIO_IO_SEL=0: interrupt mode;
+ */
+ rtw_write8_clr(rtwdev, REG_GPIO_PIN_CTRL + 3, BIT(7));
+
+ /* 3. data mode
+ * 3.1 set 0x44[23] to 1
+ * sel=0: input;
+ * sel=1: output;
+ */
+ rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 2, BIT(7));
+
+ /* 3.2 set 0x44[15] to 1
+ * output high value;
+ */
+ rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 1, BIT(7));
+}
+
+static int rtw8814a_mac_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+
+ rtw_write16(rtwdev, REG_CR,
+ MAC_TRX_ENABLE | BIT_MAC_SEC_EN | BIT_32K_CAL_TMR_EN);
+
+ rtw_load_table(rtwdev, rtwdev->chip->mac_tbl);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB)
+ rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3,
+ rtwdev->chip->usb_tx_agg_desc_num << 1);
+
+ rtw_write32(rtwdev, REG_HIMR0, 0);
+ rtw_write32(rtwdev, REG_HIMR1, 0);
+
+ rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, 0xfffff);
+
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x3030);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, 0xffff);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, 0x0400);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, 0xffff);
+
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, 0x36);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM + 1, 0x36);
+
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(rtwdev, REG_SIFS, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(rtwdev, REG_SIFS + 2, 0x100a);
+
+ /* TXOP */
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226);
+
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT(7));
+
+ rtw_write8(rtwdev, REG_ACKTO, 0x80);
+
+ rtw_write16(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | (BIT_DIS_TSF_UDT << 8));
+ rtw_write32_mask(rtwdev, REG_TBTT_PROHIBIT, 0xfffff, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, 0x05);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write16(rtwdev, REG_BCNTCFG, 0x4413);
+ rtw_write8(rtwdev, REG_BCN_MAX_ERR, 0xFF);
+
+ rtw_write32(rtwdev, REG_FAST_EDCA_VOVI_SETTING, 0x08070807);
+ rtw_write32(rtwdev, REG_FAST_EDCA_BEBK_SETTING, 0x08070807);
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ rtwusb->udev->speed == USB_SPEED_SUPER) {
+ /* Disable U1/U2 Mode to avoid 2.5G spur in USB3.0. */
+ rtw_write8_clr(rtwdev, REG_USB_MOD, BIT(4) | BIT(3));
+ /* To avoid usb 3.0 H2C fail. */
+ rtw_write16(rtwdev, 0xf002, 0);
+
+ rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL,
+ BIT_PRE_TX_CMD);
+ } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) {
+ rtw8814ae_enable_rf_1_2v(rtwdev);
+
+ /* Force the antenna b to wifi. */
+ rtw_write8_set(rtwdev, REG_PAD_CTRL1, BIT(2));
+ rtw_write8_set(rtwdev, REG_PAD_CTRL1 + 1, BIT(0));
+ rtw_write8_set(rtwdev, REG_LED_CFG + 3,
+ (BIT(27) | BIT_DPDT_WL_SEL) >> 24);
+ }
+
+ return 0;
+}
+
+static void rtw8814a_set_rfe_reg_24g(struct rtw_dev *rtwdev)
+{
+ switch (rtwdev->efuse.rfe_option) {
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x72707270);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77707770);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x72);
+
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x77);
+
+ break;
+ case 0:
+ default:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ /* Is it not necessary to set REG_RFE_PINMUX_D ? */
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x77);
+
+ break;
+ }
+}
+
+static void rtw8814a_set_rfe_reg_5g(struct rtw_dev *rtwdev)
+{
+ switch (rtwdev->efuse.rfe_option) {
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x37173717);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x37);
+
+ break;
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x33173317);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x33);
+
+ break;
+ case 0:
+ default:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x54775477);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x54775477);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D,
+ BIT_RFE_SELSW0_D, 0x54);
+
+ break;
+ }
+}
+
+static void rtw8814a_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 band)
+{
+ rtw_write32_mask(rtwdev, REG_TXSCALE_A, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_A));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_B, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_B));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_C, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_C));
+ rtw_write32_mask(rtwdev, REG_TXSCALE_D, BB_SWING_MASK,
+ rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_D));
+ rtw8814a_pwrtrack_init(rtwdev);
+}
+
+static void rtw8814a_set_bw_reg_adc(struct rtw_dev *rtwdev, u8 bw)
+{
+ u32 adc = 0;
+
+ if (bw == RTW_CHANNEL_WIDTH_20)
+ adc = 0;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ adc = 1;
+ else if (bw == RTW_CHANNEL_WIDTH_80)
+ adc = 2;
+
+ rtw_write32_mask(rtwdev, REG_ADCCLK, BIT(1) | BIT(0), adc);
+}
+
+static void rtw8814a_set_bw_reg_agc(struct rtw_dev *rtwdev, u8 new_band, u8 bw)
+{
+ u32 agc = 7;
+
+ if (bw == RTW_CHANNEL_WIDTH_20) {
+ agc = 6;
+ } else if (bw == RTW_CHANNEL_WIDTH_40) {
+ if (new_band == RTW_BAND_5G)
+ agc = 8;
+ else
+ agc = 7;
+ } else if (bw == RTW_CHANNEL_WIDTH_80) {
+ agc = 3;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CCASEL, 0xf000, agc);
+}
+
+static void rtw8814a_switch_band(struct rtw_dev *rtwdev, u8 new_band, u8 bw)
+{
+ /* Clear 0x1000[16], When this bit is set to 0, CCK and OFDM
+ * are disabled, and clock are gated. Otherwise, CCK and OFDM
+ * are enabled.
+ */
+ rtw_write8_clr(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB);
+
+ if (new_band == RTW_BAND_2G) {
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 0);
+
+ rtw8814a_set_rfe_reg_24g(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x2);
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x3);
+
+ rtw_write8(rtwdev, REG_CCK_CHECK, 0);
+
+ rtw_write32_mask(rtwdev, 0xa80, BIT(18), 0);
+ } else {
+ rtw_write8(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+
+ /* Enable CCK Tx function, even when CCK is off */
+ rtw_write32_mask(rtwdev, 0xa80, BIT(18), 1);
+
+ rtw8814a_set_rfe_reg_5g(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0xf);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x2);
+ }
+
+ rtw8814a_set_channel_bb_swing(rtwdev, new_band);
+
+ rtw8814a_set_bw_reg_adc(rtwdev, bw);
+ rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw);
+
+ rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB);
+}
+
+static void rtw8814a_switch_channel(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 fc_area, rf_mod_ag, cfgch;
+ u8 path;
+
+ switch (channel) {
+ case 36 ... 48:
+ fc_area = 0x494;
+ break;
+ case 50 ... 64:
+ fc_area = 0x453;
+ break;
+ case 100 ... 116:
+ fc_area = 0x452;
+ break;
+ default:
+ if (channel >= 118)
+ fc_area = 0x412;
+ else
+ fc_area = 0x96a;
+ break;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, fc_area);
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ switch (channel) {
+ case 36 ... 64:
+ rf_mod_ag = 0x101;
+ break;
+ case 100 ... 140:
+ rf_mod_ag = 0x301;
+ break;
+ default:
+ if (channel > 140)
+ rf_mod_ag = 0x501;
+ else
+ rf_mod_ag = 0x000;
+ break;
+ }
+
+ cfgch = (rf_mod_ag << 8) | channel;
+
+ rtw_write_rf(rtwdev, path, RF_CFGCH,
+ RF18_RFSI_MASK | RF18_BAND_MASK | RF18_CHANNEL_MASK, cfgch);
+ }
+
+ switch (channel) {
+ case 36 ... 64:
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 1);
+ break;
+ case 100 ... 144:
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 2);
+ break;
+ default:
+ if (channel >= 149)
+ rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 3);
+
+ break;
+ }
+}
+
+static void rtw8814a_24g_cck_tx_dfir(struct rtw_dev *rtwdev, u8 channel)
+{
+ if (channel >= 1 && channel <= 11) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1317);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000204);
+ } else if (channel >= 12 && channel <= 13) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1217);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000305);
+ } else if (channel == 14) {
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030);
+ rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x00000E17);
+ rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000000);
+ }
+}
+
+static void rtw8814a_set_bw_reg_mac(struct rtw_dev *rtwdev, u8 bw)
+{
+ u16 val16 = rtw_read16(rtwdev, REG_WMAC_TRXPTCL_CTL);
+
+ val16 &= ~BIT_RFMOD;
+ if (bw == RTW_CHANNEL_WIDTH_80)
+ val16 |= BIT_RFMOD_80M;
+ else if (bw == RTW_CHANNEL_WIDTH_40)
+ val16 |= BIT_RFMOD_40M;
+
+ rtw_write16(rtwdev, REG_WMAC_TRXPTCL_CTL, val16);
+}
+
+static void rtw8814a_set_bw_rf(struct rtw_dev *rtwdev, u8 bw)
+{
+ u8 path;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 3);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 1);
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 0);
+ break;
+ }
+ }
+}
+
+static void rtw8814a_adc_clk(struct rtw_dev *rtwdev)
+{
+ static const u32 rxiqc_reg[2][4] = {
+ { REG_RX_IQC_AB_A, REG_RX_IQC_AB_B,
+ REG_RX_IQC_AB_C, REG_RX_IQC_AB_D },
+ { REG_RX_IQC_CD_A, REG_RX_IQC_CD_B,
+ REG_RX_IQC_CD_C, REG_RX_IQC_CD_D }
+ };
+ u32 bb_reg_8fc, bb_reg_808, rxiqc[4];
+ u32 i = 0, mac_active = 1;
+ u8 mac_reg_522;
+
+ if (rtwdev->hal.cut_version != RTW_CHIP_VER_CUT_A)
+ return;
+
+ /* 1 Step1. MAC TX pause */
+ mac_reg_522 = rtw_read8(rtwdev, REG_TXPAUSE);
+ bb_reg_8fc = rtw_read32(rtwdev, REG_DBGSEL);
+ bb_reg_808 = rtw_read32(rtwdev, REG_RXPSEL);
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x3f);
+
+ /* 1 Step 2. Backup rxiqc & rxiqc = 0 */
+ for (i = 0; i < 4; i++) {
+ rxiqc[i] = rtw_read32(rtwdev, rxiqc_reg[0][i]);
+ rtw_write32(rtwdev, rxiqc_reg[0][i], 0x0);
+ rtw_write32(rtwdev, rxiqc_reg[1][i], 0x0);
+ }
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x3);
+ i = 0;
+
+ /* 1 Step 3. Monitor MAC IDLE */
+ rtw_write32(rtwdev, REG_DBGSEL, 0x0);
+ while (mac_active) {
+ mac_active = rtw_read32(rtwdev, REG_DBGRPT) & 0x803e0008;
+ i++;
+ if (i > 1000)
+ break;
+ }
+
+ /* 1 Step 4. ADC clk flow */
+ rtw_write8(rtwdev, REG_RXPSEL, 0x11);
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1);
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x3);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1);
+
+ /* 0xc1c/0xe1c/0x181c/0x1a1c[4] must=1 to ensure table can be
+ * written when bbrstb=0
+ * 0xc60/0xe60/0x1860/0x1a60[15] always = 1 after this line
+ * 0xc60/0xe60/0x1860/0x1a60[14] always = 0 bcz its error in A-cut
+ */
+
+ /* power_off/clk_off @ anapar_state=idle mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x01808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x15800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x01808003);
+
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0);
+ /* [19] = 1 to turn off ADC */
+ rtw_write32(rtwdev, REG_CK_MONHA, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHB, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHC, 0x0D080058);
+ rtw_write32(rtwdev, REG_CK_MONHD, 0x0D080058);
+
+ /* power_on/clk_off */
+ /* [19] = 0 to turn on ADC */
+ rtw_write32(rtwdev, REG_CK_MONHA, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHB, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHC, 0x0D000058);
+ rtw_write32(rtwdev, REG_CK_MONHD, 0x0D000058);
+
+ /* power_on/clk_on @ anapar_state=BT mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032);
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1);
+
+ /* recover original setting @ anapar_state=BT mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032);
+
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05800002);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003);
+
+ rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x0);
+ rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0);
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x0);
+
+ /* 1 Step 5. Recover MAC TX & IQC */
+ rtw_write8(rtwdev, REG_TXPAUSE, mac_reg_522);
+ rtw_write32(rtwdev, REG_DBGSEL, bb_reg_8fc);
+ rtw_write32(rtwdev, REG_RXPSEL, bb_reg_808);
+ for (i = 0; i < 4; i++) {
+ rtw_write32(rtwdev, rxiqc_reg[0][i], rxiqc[i]);
+ rtw_write32(rtwdev, rxiqc_reg[1][i], 0x01000000);
+ }
+ rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x0);
+}
+
+static void rtw8814a_spur_calibration_ch140(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ /* Add for 8814AE module ch140 MP Rx */
+ if (channel == 140) {
+ if (hal->ch_param[0] == 0)
+ hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL);
+ if (hal->ch_param[1] == 0)
+ hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH);
+
+ rtw_write32(rtwdev, REG_CCASEL, 0x75438170);
+ rtw_write32(rtwdev, REG_PDMFTH, 0x79a18a0a);
+ } else {
+ if (rtw_read32(rtwdev, REG_CCASEL) == 0x75438170 &&
+ hal->ch_param[0] != 0)
+ rtw_write32(rtwdev, REG_CCASEL, hal->ch_param[0]);
+
+ if (rtw_read32(rtwdev, REG_PDMFTH) == 0x79a18a0a &&
+ hal->ch_param[1] != 0)
+ rtw_write32(rtwdev, REG_PDMFTH, hal->ch_param[1]);
+
+ hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL);
+ hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH);
+ }
+}
+
+static void rtw8814a_set_nbi_reg(struct rtw_dev *rtwdev, u32 tone_idx)
+{
+ /* tone_idx X 10 */
+ static const u32 nbi_128[] = {
+ 25, 55, 85, 115, 135,
+ 155, 185, 205, 225, 245,
+ 265, 285, 305, 335, 355,
+ 375, 395, 415, 435, 455,
+ 485, 505, 525, 555, 585, 615, 635
+ };
+ u32 reg_idx = 0;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(nbi_128); i++) {
+ if (tone_idx < nbi_128[i]) {
+ reg_idx = i + 1;
+ break;
+ }
+ }
+
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, 0xfc000, reg_idx);
+}
+
+static void rtw8814a_nbi_setting(struct rtw_dev *rtwdev, u32 ch, u32 f_intf)
+{
+ u32 fc, int_distance, tone_idx;
+
+ fc = 2412 + (ch - 1) * 5;
+ int_distance = abs_diff(fc, f_intf);
+
+ /* 10 * (int_distance / 0.3125) */
+ tone_idx = int_distance << 5;
+
+ rtw8814a_set_nbi_reg(rtwdev, tone_idx);
+
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 1);
+}
+
+static void rtw8814a_spur_nbi_setting(struct rtw_dev *rtwdev)
+{
+ u8 primary_channel = rtwdev->hal.primary_channel;
+ u8 rfe_type = rtwdev->efuse.rfe_option;
+
+ if (rfe_type != 0 && rfe_type != 1 && rfe_type != 6 && rfe_type != 7)
+ return;
+
+ if (primary_channel == 14)
+ rtw8814a_nbi_setting(rtwdev, primary_channel, 2480);
+ else if (primary_channel >= 4 && primary_channel <= 8)
+ rtw8814a_nbi_setting(rtwdev, primary_channel, 2440);
+ else
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 0);
+}
+
+/* A workaround to eliminate the 5280 MHz & 5600 MHz & 5760 MHz spur of 8814A */
+static void rtw8814a_spur_calibration(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u8 rfe_type = rtwdev->efuse.rfe_option;
+ bool reset_nbi_csi = true;
+
+ if (rfe_type == 0) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_40:
+ if (channel == 54 || channel == 118) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x3e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ } else if (channel == 151) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (channel == 58 || channel == 122) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x3a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(0), 1);
+
+ reset_nbi_csi = false;
+ } else if (channel == 155) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x5a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_20:
+ if (channel == 153) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(16), 1);
+
+ reset_nbi_csi = false;
+ }
+
+ rtw8814a_spur_calibration_ch140(rtwdev, channel);
+ break;
+ default:
+ break;
+ }
+ } else if (rfe_type == 1 || rfe_type == 2) {
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ if (channel == 153) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1E >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7,
+ BIT(16), 1);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (channel == 151) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x1e >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ if (channel == 155) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0x5a >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1,
+ BIT(0), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6,
+ BIT(16), 1);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+
+ reset_nbi_csi = false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (reset_nbi_csi) {
+ rtw_write32_mask(rtwdev, REG_NBI_SETTING,
+ 0x000fe000, 0xfc >> 1);
+ rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, BIT(0), 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0);
+ rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0);
+ }
+
+ rtw8814a_spur_nbi_setting(rtwdev);
+}
+
+static void rtw8814a_set_bw_mode(struct rtw_dev *rtwdev, u8 new_band,
+ u8 channel, u8 bw, u8 primary_chan_idx)
+{
+ u8 txsc40 = 0, txsc20, txsc;
+
+ rtw8814a_set_bw_reg_mac(rtwdev, bw);
+
+ txsc20 = primary_chan_idx;
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
+ txsc40 = RTW_SC_40_UPPER;
+ else
+ txsc40 = RTW_SC_40_LOWER;
+ }
+
+ txsc = BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40);
+ rtw_write8(rtwdev, REG_DATA_SC, txsc);
+
+ rtw8814a_set_bw_reg_adc(rtwdev, bw);
+ rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw);
+
+ if (bw == RTW_CHANNEL_WIDTH_80) {
+ rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc);
+ } else if (bw == RTW_CHANNEL_WIDTH_40) {
+ rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc);
+
+ if (txsc == RTW_SC_20_UPPER)
+ rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
+ else
+ rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
+ }
+
+ rtw8814a_set_bw_rf(rtwdev, bw);
+
+ rtw8814a_adc_clk(rtwdev);
+
+ rtw8814a_spur_calibration(rtwdev, channel, bw);
+}
+
+static void rtw8814a_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ u8 old_band, new_band;
+
+ if (rtw_read8(rtwdev, REG_CCK_CHECK) & BIT_CHECK_CCK_EN)
+ old_band = RTW_BAND_5G;
+ else
+ old_band = RTW_BAND_2G;
+
+ if (channel > 14)
+ new_band = RTW_BAND_5G;
+ else
+ new_band = RTW_BAND_2G;
+
+ if (new_band != old_band)
+ rtw8814a_switch_band(rtwdev, new_band, bw);
+
+ rtw8814a_switch_channel(rtwdev, channel);
+
+ rtw8814a_24g_cck_tx_dfir(rtwdev, channel);
+
+ rtw8814a_set_bw_mode(rtwdev, new_band, channel, bw, primary_chan_idx);
+}
+
+static s8 rtw8814a_cck_rx_pwr(u8 lna_idx, u8 vga_idx)
+{
+ s8 rx_pwr_all = 0;
+
+ switch (lna_idx) {
+ case 7:
+ rx_pwr_all = -38 - 2 * vga_idx;
+ break;
+ case 5:
+ rx_pwr_all = -28 - 2 * vga_idx;
+ break;
+ case 3:
+ rx_pwr_all = -8 - 2 * vga_idx;
+ break;
+ case 2:
+ rx_pwr_all = -1 - 2 * vga_idx;
+ break;
+ default:
+ break;
+ }
+
+ return rx_pwr_all;
+}
+
+static void rtw8814a_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_jaguar_phy_status_rpt *rpt;
+ u8 gain[RTW_RF_PATH_MAX], rssi, i;
+ s8 rx_pwr_db, middle1, middle2;
+ s8 snr[RTW_RF_PATH_MAX];
+ s8 evm[RTW_RF_PATH_MAX];
+ u8 rfmode, subchannel;
+ u8 lna, vga;
+ s8 cfo[2];
+
+ rpt = (struct rtw_jaguar_phy_status_rpt *)phy_status;
+
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+
+ if (pkt_stat->rate <= DESC_RATE11M) {
+ lna = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_LNA_IDX);
+ vga = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_VGA_IDX);
+
+ rx_pwr_db = rtw8814a_cck_rx_pwr(lna, vga);
+
+ pkt_stat->rx_power[RF_PATH_A] = rx_pwr_db;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ pkt_stat->signal_power = rx_pwr_db;
+ } else { /* OFDM rate */
+ gain[RF_PATH_A] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_A);
+ gain[RF_PATH_B] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_B);
+ gain[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_GAIN_C);
+ gain[RF_PATH_D] = le32_get_bits(rpt->w6, RTW_JGRPHY_W6_GAIN_D);
+
+ snr[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXSNR_A);
+ snr[RF_PATH_B] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXSNR_B);
+ snr[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_C);
+ snr[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_D);
+
+ evm[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_1);
+ evm[RF_PATH_B] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_2);
+ evm[RF_PATH_C] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXEVM_3);
+ evm[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXEVM_4);
+
+ if (pkt_stat->rate <= DESC_RATE54M)
+ evm[RF_PATH_A] = le32_get_bits(rpt->w6,
+ RTW_JGRPHY_W6_SIGEVM);
+
+ for (i = RF_PATH_A; i < RTW_RF_PATH_MAX; i++) {
+ pkt_stat->rx_power[i] = gain[i] - 110;
+
+ rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[i], 1);
+ dm_info->rssi[i] = rssi;
+
+ pkt_stat->rx_snr[i] = snr[i];
+ dm_info->rx_snr[i] = snr[i] >> 1;
+
+ pkt_stat->rx_evm[i] = evm[i];
+ evm[i] = max_t(s8, -127, evm[i]);
+ dm_info->rx_evm_dbm[i] = abs(evm[i]) >> 1;
+ }
+
+ rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power,
+ RTW_RF_PATH_MAX);
+ pkt_stat->rssi = rssi;
+
+ /* When power saving is enabled the hardware sometimes
+ * reports unbelievably high gain for paths A and C
+ * (e.g. one frame 64 68 68 72, the next frame 106 66 88 72,
+ * the next 66 66 68 72), so use the second lowest gain
+ * instead of the highest.
+ */
+ middle1 = max(min(gain[RF_PATH_A], gain[RF_PATH_B]),
+ min(gain[RF_PATH_C], gain[RF_PATH_D]));
+ middle2 = min(max(gain[RF_PATH_A], gain[RF_PATH_B]),
+ max(gain[RF_PATH_C], gain[RF_PATH_D]));
+ rx_pwr_db = min(middle1, middle2);
+ rx_pwr_db -= 110;
+ pkt_stat->signal_power = rx_pwr_db;
+
+ rfmode = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_R_RFMOD);
+ subchannel = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_SUB_CHNL);
+
+ if (rfmode == 1 && subchannel == 0) {
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_40;
+ } else if (rfmode == 2) {
+ if (subchannel == 0)
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_80;
+ else if (subchannel == 9 || subchannel == 10)
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_40;
+ }
+
+ cfo[RF_PATH_A] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_A);
+ cfo[RF_PATH_B] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_B);
+
+ for (i = RF_PATH_A; i < 2; i++) {
+ pkt_stat->cfo_tail[i] = cfo[i];
+ dm_info->cfo_tail[i] = (cfo[i] * 5) >> 1;
+ }
+ }
+}
+
+static void
+rtw8814a_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 txagc_table_wd;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+
+ pwr_index = hal->tx_pwr_tbl[path][rate] + 2;
+ if (pwr_index > rtwdev->chip->max_power_index)
+ pwr_index = rtwdev->chip->max_power_index;
+
+ txagc_table_wd = 0x00801000;
+ txagc_table_wd |= (pwr_index << 24) | (path << 8) | rate;
+
+ rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd);
+
+ /* first time to turn on the txagc table
+ * second to write the addr0
+ */
+ if (rate == DESC_RATE1M)
+ rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd);
+ }
+}
+
+static void rtw8814a_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ if (hal->current_band_type == RTW_BAND_2G)
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_CCK);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_OFDM);
+
+ if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
+ continue;
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_1S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_1S);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_2S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_2S);
+
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_HT_3S);
+ rtw8814a_set_tx_power_index_by_rate(rtwdev, path,
+ RTW_RATE_SECTION_VHT_3S);
+ }
+}
+
+static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+}
+
+static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt, ofdm_fa_cnt;
+ u32 crc32_cnt, cca32_cnt;
+ u32 cck_enable;
+
+ cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28);
+ cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK);
+ ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ if (cck_enable)
+ dm_info->total_fa_cnt += cck_fa_cnt;
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
+ dm_info->cck_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->cck_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT);
+ dm_info->vht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD);
+ dm_info->vht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD);
+
+ cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM);
+ dm_info->ofdm_cca_cnt = u32_get_bits(cca32_cnt, MASKHWORD);
+ dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+ if (cck_enable) {
+ cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK);
+ dm_info->cck_cca_cnt = u32_get_bits(cca32_cnt, MASKLWORD);
+ dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+ }
+
+ rtw_write32_set(rtwdev, REG_FAS, BIT(17));
+ rtw_write32_clr(rtwdev, REG_FAS, BIT(17));
+ rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT, BIT(15));
+ rtw_write32_set(rtwdev, REG_CCK0_FAREPORT, BIT(15));
+ rtw_write32_set(rtwdev, REG_CNTRST, BIT(0));
+ rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0));
+}
+
+#define MAC_REG_NUM_8814 2
+#define BB_REG_NUM_8814 14
+#define RF_REG_NUM_8814 1
+
+static void rtw8814a_iqk_backup_mac_bb(struct rtw_dev *rtwdev,
+ u32 *mac_backup, u32 *bb_backup,
+ const u32 *mac_regs,
+ const u32 *bb_regs)
+{
+ u32 i;
+
+ /* save MACBB default value */
+ for (i = 0; i < MAC_REG_NUM_8814; i++)
+ mac_backup[i] = rtw_read32(rtwdev, mac_regs[i]);
+
+ for (i = 0; i < BB_REG_NUM_8814; i++)
+ bb_backup[i] = rtw_read32(rtwdev, bb_regs[i]);
+}
+
+static void rtw8814a_iqk_backup_rf(struct rtw_dev *rtwdev,
+ u32 rf_backup[][4], const u32 *rf_regs)
+{
+ u32 i;
+
+ /* Save RF Parameters */
+ for (i = 0; i < RF_REG_NUM_8814; i++) {
+ rf_backup[i][RF_PATH_A] = rtw_read_rf(rtwdev, RF_PATH_A,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_B] = rtw_read_rf(rtwdev, RF_PATH_B,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_C] = rtw_read_rf(rtwdev, RF_PATH_C,
+ rf_regs[i], RFREG_MASK);
+ rf_backup[i][RF_PATH_D] = rtw_read_rf(rtwdev, RF_PATH_D,
+ rf_regs[i], RFREG_MASK);
+ }
+}
+
+static void rtw8814a_iqk_afe_setting(struct rtw_dev *rtwdev, bool do_iqk)
+{
+ if (do_iqk) {
+ /* IQK AFE setting RX_WAIT_CCA mode */
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x0e808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x0e808003);
+ } else {
+ rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003);
+ rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003);
+ }
+
+ rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1);
+
+ rtw_write8_set(rtwdev, REG_GNT_BT, BIT(2) | BIT(1));
+ rtw_write8_clr(rtwdev, REG_GNT_BT, BIT(2) | BIT(1));
+
+ rtw_write32_set(rtwdev, REG_CCK_RPT_FORMAT, BIT(2));
+ rtw_write32_clr(rtwdev, REG_CCK_RPT_FORMAT, BIT(2));
+}
+
+static void rtw8814a_iqk_restore_mac_bb(struct rtw_dev *rtwdev,
+ u32 *mac_backup, u32 *bb_backup,
+ const u32 *mac_regs,
+ const u32 *bb_regs)
+{
+ u32 i;
+
+ /* Reload MacBB Parameters */
+ for (i = 0; i < MAC_REG_NUM_8814; i++)
+ rtw_write32(rtwdev, mac_regs[i], mac_backup[i]);
+
+ for (i = 0; i < BB_REG_NUM_8814; i++)
+ rtw_write32(rtwdev, bb_regs[i], bb_backup[i]);
+}
+
+static void rtw8814a_iqk_restore_rf(struct rtw_dev *rtwdev,
+ const u32 rf_backup[][4],
+ const u32 *rf_regs)
+{
+ u32 i;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_LUTWE, RFREG_MASK, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_LUTWE, RFREG_MASK, 0x0);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_RXBB2, RFREG_MASK, 0x88001);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_RXBB2, RFREG_MASK, 0x88001);
+
+ for (i = 0; i < RF_REG_NUM_8814; i++) {
+ rtw_write_rf(rtwdev, RF_PATH_A, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_A]);
+ rtw_write_rf(rtwdev, RF_PATH_B, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_B]);
+ rtw_write_rf(rtwdev, RF_PATH_C, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_C]);
+ rtw_write_rf(rtwdev, RF_PATH_D, rf_regs[i],
+ RFREG_MASK, rf_backup[i][RF_PATH_D]);
+ }
+}
+
+static void rtw8814a_iqk_reset_nctl(struct rtw_dev *rtwdev)
+{
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000);
+ rtw_write32(rtwdev, 0x1b80, 0x00000006);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000);
+ rtw_write32(rtwdev, 0x1b80, 0x00000002);
+}
+
+static void rtw8814a_iqk_configure_mac(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x3f);
+ rtw_write32_clr(rtwdev, REG_BCN_CTRL,
+ (BIT_EN_BCN_FUNCTION << 8) | BIT_EN_BCN_FUNCTION);
+
+ /* RX ante off */
+ rtw_write8(rtwdev, REG_RXPSEL, 0x00);
+ /* CCA off */
+ rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf, 0xe);
+ /* CCK RX path off */
+ rtw_write32_set(rtwdev, REG_PRECTRL, BIT_IQ_WGT);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777);
+ rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, BIT_RFE_SELSW0_D, 0x77);
+ rtw_write32_mask(rtwdev, REG_PSD, BIT_PSD_INI, 0x0);
+
+ rtw_write32_mask(rtwdev, REG_RFE_INV0, 0xf, 0x0);
+}
+
+static void rtw8814a_lok_one_shot(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 lok_temp1, lok_temp2;
+ bool lok_ready;
+ u8 ii;
+
+ /* ADC Clock source */
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+ /* LOK: CMD ID = 0
+ * {0xf8000011, 0xf8000021, 0xf8000041, 0xf8000081}
+ */
+ rtw_write32(rtwdev, 0x1b00, 0xf8000001 | (BIT(path) << 4));
+
+ usleep_range(1000, 1100);
+
+ if (read_poll_timeout(!rtw_read32_mask, lok_ready, lok_ready,
+ 1000, 10000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "==>S%d LOK timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, RFREG_MASK, 0x08400);
+
+ return;
+ }
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+ rtw_write32(rtwdev, 0x1bd4, 0x003f0001);
+
+ lok_temp2 = rtw_read32_mask(rtwdev, 0x1bfc, 0x003e0000);
+ lok_temp2 = (lok_temp2 + 0x10) & 0x1f;
+
+ lok_temp1 = rtw_read32_mask(rtwdev, 0x1bfc, 0x0000003e);
+ lok_temp1 = (lok_temp1 + 0x10) & 0x1f;
+
+ for (ii = 1; ii < 5; ii++) {
+ lok_temp1 += (lok_temp1 & BIT(4 - ii)) << (ii * 2);
+ lok_temp2 += (lok_temp2 & BIT(4 - ii)) << (ii * 2);
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "path %d lok_temp1 = %#x, lok_temp2 = %#x\n",
+ path, lok_temp1 >> 4, lok_temp2 >> 4);
+
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, 0x07c00, lok_temp1 >> 4);
+ rtw_write_rf(rtwdev, path, RF_DTXLOK, 0xf8000, lok_temp2 >> 4);
+}
+
+static void rtw8814a_iqk_tx_one_shot(struct rtw_dev *rtwdev, u8 path,
+ u32 *tx_matrix, bool *tx_ok)
+{
+ u8 bw = rtwdev->hal.current_band_width;
+ u8 cal_retry;
+ u32 iqk_cmd;
+
+ for (cal_retry = 0; cal_retry < 4; cal_retry++) {
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+
+ iqk_cmd = 0xf8000001 | ((bw + 3) << 8) | (BIT(path) << 4);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "TXK_Trigger = %#x\n", iqk_cmd);
+
+ rtw_write32(rtwdev, 0x1b00, iqk_cmd);
+
+ usleep_range(10000, 11000);
+
+ if (read_poll_timeout(!rtw_read32_mask, *tx_ok, *tx_ok,
+ 1000, 20000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "tx iqk S%d timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+ } else {
+ *tx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26));
+
+ if (*tx_ok)
+ break;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b00 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b00));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b08 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b08));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> cal_retry = %x\n",
+ path, cal_retry);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+
+ if (*tx_ok) {
+ *tx_matrix = rtw_read32(rtwdev, 0x1b38);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n",
+ path, *tx_matrix);
+ }
+}
+
+static void rtw8814a_iqk_rx_one_shot(struct rtw_dev *rtwdev, u8 path,
+ u32 *tx_matrix, bool *tx_ok)
+{
+ static const u16 iqk_apply[RTW_RF_PATH_MAX] = {
+ REG_TXAGCIDX, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D
+ };
+ u8 band = rtwdev->hal.current_band_type;
+ u8 bw = rtwdev->hal.current_band_width;
+ u32 rx_matrix;
+ u8 cal_retry;
+ u32 iqk_cmd;
+ bool rx_ok;
+
+ for (cal_retry = 0; cal_retry < 4; cal_retry++) {
+ rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path);
+
+ if (band == RTW_BAND_2G) {
+ rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x1);
+ rtw_write_rf(rtwdev, path, RF_GAINTX, 0xfffff, 0x51ce1);
+
+ switch (path) {
+ case 0:
+ case 1:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_B,
+ 0x54775477);
+ break;
+ case 2:
+ rtw_write32(rtwdev, REG_RFE_PINMUX_C,
+ 0x54775477);
+ break;
+ case 3:
+ rtw_write32(rtwdev, REG_RFE_INVSEL_D, 0x75400000);
+ rtw_write32(rtwdev, REG_RFE_PINMUX_D,
+ 0x77777777);
+ break;
+ }
+ }
+
+ iqk_cmd = 0xf8000001 | ((9 - bw) << 8) | (BIT(path) << 4);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "RXK_Trigger = 0x%x\n", iqk_cmd);
+
+ rtw_write32(rtwdev, 0x1b00, iqk_cmd);
+
+ usleep_range(10000, 11000);
+
+ if (read_poll_timeout(!rtw_read32_mask, rx_ok, rx_ok,
+ 1000, 20000, false,
+ rtwdev, 0x1b00, BIT(0))) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "rx iqk S%d timed out\n", path);
+
+ rtw8814a_iqk_reset_nctl(rtwdev);
+ } else {
+ rx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26));
+
+ if (rx_ok)
+ break;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b00 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b00));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b08 = 0x%x\n",
+ path, rtw_read32(rtwdev, 0x1b08));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> cal_retry = %x\n",
+ path, cal_retry);
+
+ rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1));
+
+ if (rx_ok) {
+ rtw_write32(rtwdev, 0x1b3c, 0x20000000);
+ rx_matrix = rtw_read32(rtwdev, 0x1b3c);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n",
+ path, rx_matrix);
+ }
+
+ if (*tx_ok)
+ rtw_write32(rtwdev, 0x1b38, *tx_matrix);
+ else
+ rtw_write32_mask(rtwdev, iqk_apply[path], BIT(0), 0x0);
+
+ if (!rx_ok)
+ rtw_write32_mask(rtwdev, iqk_apply[path],
+ BIT(11) | BIT(10), 0x0);
+
+ if (band == RTW_BAND_2G)
+ rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x0);
+}
+
+static void rtw8814a_iqk(struct rtw_dev *rtwdev)
+{
+ u8 band = rtwdev->hal.current_band_type;
+ u8 bw = rtwdev->hal.current_band_width;
+ u32 tx_matrix[RTW_RF_PATH_MAX];
+ bool tx_ok[RTW_RF_PATH_MAX];
+ u8 path;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "IQK band = %d GHz bw = %d MHz\n",
+ band == RTW_BAND_2G ? 2 : 5, (1 << (bw + 1)) * 10);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_C, RF_TXMOD, BIT(19), 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_D, RF_TXMOD, BIT(19), 0x1);
+
+ rtw_write32_mask(rtwdev, REG_TXAGCIDX,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_B,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_C,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+ rtw_write32_mask(rtwdev, REG_TX_AGC_D,
+ (BIT(11) | BIT(10) | BIT(0)), 0x401);
+
+ if (band == RTW_BAND_5G)
+ rtw_write32(rtwdev, 0x1b00, 0xf8000ff1);
+ else
+ rtw_write32(rtwdev, 0x1b00, 0xf8000ef1);
+
+ usleep_range(1000, 1100);
+
+ rtw_write32(rtwdev, 0x810, 0x20101063);
+ rtw_write32(rtwdev, REG_DAC_RSTB, 0x0B00C000);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_lok_one_shot(rtwdev, path);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_iqk_tx_one_shot(rtwdev, path,
+ &tx_matrix[path], &tx_ok[path]);
+
+ for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++)
+ rtw8814a_iqk_rx_one_shot(rtwdev, path,
+ &tx_matrix[path], &tx_ok[path]);
+}
+
+static void rtw8814a_do_iqk(struct rtw_dev *rtwdev)
+{
+ static const u32 backup_mac_reg[MAC_REG_NUM_8814] = {0x520, 0x550};
+ static const u32 backup_bb_reg[BB_REG_NUM_8814] = {
+ 0xa14, 0x808, 0x838, 0x90c, 0x810, 0xcb0, 0xeb0,
+ 0x18b4, 0x1ab4, 0x1abc, 0x9a4, 0x764, 0xcbc, 0x910
+ };
+ static const u32 backup_rf_reg[RF_REG_NUM_8814] = {0x0};
+ u32 rf_backup[RF_REG_NUM_8814][RTW_RF_PATH_MAX];
+ u32 mac_backup[MAC_REG_NUM_8814];
+ u32 bb_backup[BB_REG_NUM_8814];
+
+ rtw8814a_iqk_backup_mac_bb(rtwdev, mac_backup, bb_backup,
+ backup_mac_reg, backup_bb_reg);
+ rtw8814a_iqk_afe_setting(rtwdev, true);
+ rtw8814a_iqk_backup_rf(rtwdev, rf_backup, backup_rf_reg);
+ rtw8814a_iqk_configure_mac(rtwdev);
+ rtw8814a_iqk(rtwdev);
+ rtw8814a_iqk_reset_nctl(rtwdev); /* for 3-wire to BB use */
+ rtw8814a_iqk_afe_setting(rtwdev, false);
+ rtw8814a_iqk_restore_mac_bb(rtwdev, mac_backup, bb_backup,
+ backup_mac_reg, backup_bb_reg);
+ rtw8814a_iqk_restore_rf(rtwdev, rf_backup, backup_rf_reg);
+}
+
+static void rtw8814a_phy_calibration(struct rtw_dev *rtwdev)
+{
+ rtw8814a_do_iqk(rtwdev);
+}
+
+static void rtw8814a_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+ u8 pos_type)
+{
+ /* Override rtw_coex_coex_ctrl_owner(). RF path C does not
+ * function when BIT_LTE_MUX_CTRL_PATH is set.
+ */
+ rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL + 3,
+ BIT_LTE_MUX_CTRL_PATH >> 24);
+}
+
+static void rtw8814a_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8814a_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+
+ /* Only needed to make rtw8814a_coex_cfg_ant_switch() run. */
+ coex_rfe->ant_switch_exist = true;
+}
+
+static void rtw8814a_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+}
+
+static void rtw8814a_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+}
+
+static void rtw8814a_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path,
+ u8 tx_pwr_idx_offset,
+ s8 *txagc_idx, u8 *swing_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+ s8 delta_pwr_idx = dm_info->delta_power_index[path];
+ u8 swing_index = dm_info->default_ofdm_index;
+ u8 max_tx_pwr_idx_offset = 0xf;
+ u8 swing_lower_bound = 0;
+ s8 agc_index = 0;
+
+ tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset);
+
+ if (delta_pwr_idx >= 0) {
+ if (delta_pwr_idx <= tx_pwr_idx_offset) {
+ agc_index = delta_pwr_idx;
+ swing_index = dm_info->default_ofdm_index;
+ } else if (delta_pwr_idx > tx_pwr_idx_offset) {
+ agc_index = tx_pwr_idx_offset;
+ swing_index = dm_info->default_ofdm_index +
+ delta_pwr_idx - tx_pwr_idx_offset;
+ swing_index = min_t(u8, swing_index, swing_upper_bound);
+ }
+ } else {
+ if (dm_info->default_ofdm_index > abs(delta_pwr_idx))
+ swing_index =
+ dm_info->default_ofdm_index + delta_pwr_idx;
+ else
+ swing_index = swing_lower_bound;
+ swing_index = max_t(u8, swing_index, swing_lower_bound);
+
+ agc_index = 0;
+ }
+
+ if (swing_index >= RTW_TXSCALE_SIZE) {
+ rtw_warn(rtwdev, "swing index overflow\n");
+ swing_index = RTW_TXSCALE_SIZE - 1;
+ }
+ *txagc_idx = agc_index;
+ *swing_idx = swing_index;
+}
+
+static void rtw8814a_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path,
+ u8 pwr_idx_offset)
+{
+ static const u32 txagc_reg[RTW_RF_PATH_MAX] = {
+ REG_TX_AGC_A, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D
+ };
+ static const u32 txscale_reg[RTW_RF_PATH_MAX] = {
+ REG_TXSCALE_A, REG_TXSCALE_B, REG_TXSCALE_C, REG_TXSCALE_D
+ };
+ s8 txagc_idx;
+ u8 swing_idx;
+
+ rtw8814a_txagc_swing_offset(rtwdev, path, pwr_idx_offset,
+ &txagc_idx, &swing_idx);
+ rtw_write32_mask(rtwdev, txagc_reg[path], GENMASK(29, 25),
+ txagc_idx);
+ rtw_write32_mask(rtwdev, txscale_reg[path], BB_SWING_MASK,
+ rtw8814a_txscale_tbl[swing_idx]);
+}
+
+static void rtw8814a_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ u8 max_pwr_idx = rtwdev->chip->max_power_index;
+ u8 band_width = rtwdev->hal.current_band_width;
+ u8 channel = rtwdev->hal.current_channel;
+ u8 tx_rate = rtwdev->dm_info.tx_rate;
+ u8 regd = rtw_regd_get(rtwdev);
+ u8 pwr_idx_offset, tx_pwr_idx;
+
+ tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate,
+ band_width, channel, regd);
+
+ tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+ pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+
+ rtw8814a_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset);
+}
+
+static void rtw8814a_phy_pwrtrack_path(struct rtw_dev *rtwdev,
+ struct rtw_swing_table *swing_table,
+ u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 power_idx_cur, power_idx_last;
+ u8 delta;
+
+ /* 8814A only has one thermal meter at PATH A */
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ power_idx_last = dm_info->delta_power_index[path];
+ power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table,
+ path, RF_PATH_A, delta);
+
+ /* if delta of power indexes are the same, just skip */
+ if (power_idx_cur == power_idx_last)
+ return;
+
+ dm_info->delta_power_index[path] = power_idx_cur;
+ rtw8814a_pwrtrack_set(rtwdev, path);
+}
+
+static void rtw8814a_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, path;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++)
+ rtw8814a_phy_pwrtrack_path(rtwdev, &swing_table, path);
+
+iqk:
+ if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+ rtw8814a_do_iqk(rtwdev);
+}
+
+static void rtw8814a_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8814a_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8814a_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
+{
+ static const u8 pd[CCK_PD_LV_MAX] = {0x40, 0x83, 0xcd, 0xdd, 0xed};
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ /* Override rtw_phy_cck_pd_lv_link(). It implements something
+ * like type 2/3/4. We need type 1 here.
+ */
+ if (rtw_is_assoc(rtwdev)) {
+ if (dm_info->min_rssi > 60) {
+ new_lvl = CCK_PD_LV3;
+ } else if (dm_info->min_rssi > 35) {
+ new_lvl = CCK_PD_LV2;
+ } else if (dm_info->min_rssi > 20) {
+ if (dm_info->cck_fa_avg > 500)
+ new_lvl = CCK_PD_LV2;
+ else if (dm_info->cck_fa_avg < 250)
+ new_lvl = CCK_PD_LV1;
+ else
+ return;
+ } else {
+ new_lvl = CCK_PD_LV1;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n",
+ dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl);
+
+ if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl)
+ return;
+
+ dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
+ dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl;
+
+ rtw_write8(rtwdev, REG_CCK_PD_TH, pd[new_lvl]);
+}
+
+static void rtw8814a_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ u32 led_gpio_cfg;
+
+ led_gpio_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2);
+ led_gpio_cfg |= BIT(16) | BIT(17) | BIT(21) | BIT(22);
+
+ if (brightness == LED_OFF) {
+ led_gpio_cfg |= BIT(8) | BIT(9) | BIT(13) | BIT(14);
+ } else {
+ led_gpio_cfg &= ~(BIT(8) | BIT(9) | BIT(13) | BIT(14));
+ led_gpio_cfg &= ~(BIT(0) | BIT(1) | BIT(5) | BIT(6));
+ }
+
+ rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, led_gpio_cfg);
+}
+
+static void rtw8814a_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
+
+ fill_txdesc_checksum_common(txdesc, words);
+}
+
+static const struct rtw_chip_ops rtw8814a_ops = {
+ .power_on = rtw_power_on,
+ .power_off = rtw_power_off,
+ .phy_set_param = rtw8814a_phy_set_param,
+ .read_efuse = rtw8814a_read_efuse,
+ .query_phy_status = rtw8814a_query_phy_status,
+ .set_channel = rtw8814a_set_channel,
+ .mac_init = rtw8814a_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8814a_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8814a_cfg_ldo25,
+ .efuse_grant = rtw8814a_efuse_grant,
+ .false_alarm_statistics = rtw8814a_false_alarm_statistics,
+ .phy_calibration = rtw8814a_phy_calibration,
+ .cck_pd_set = rtw8814a_phy_cck_pd_set,
+ .pwr_track = rtw8814a_pwr_track,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+ .led_set = rtw8814a_led_set,
+ .fill_txdesc_checksum = rtw8814a_fill_txdesc_checksum,
+
+ .coex_set_init = rtw8814a_coex_cfg_init,
+ .coex_set_ant_switch = rtw8814a_coex_cfg_ant_switch,
+ .coex_set_gnt_fix = rtw8814a_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8814a_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8814a_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8814a_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8814a_coex_cfg_wl_rx_gain,
+};
+
+static const struct rtw_rqpn rqpn_table_8814a[] = {
+ /* SDIO */
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, /* vo vi */
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, /* be bk */
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, /* mg hi */
+ /* PCIE */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 2 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 3 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ /* USB, 4 bulk out */
+ {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+};
+
+static const struct rtw_prioq_addrs prioq_addrs_8814a = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2,
+ },
+ .wsize = true,
+};
+
+static const struct rtw_page_table page_table_8814a[] = {
+ /* SDIO */
+ {0, 0, 0, 0, 0}, /* hq nq lq exq gapq */
+ /* PCIE */
+ {32, 32, 32, 32, 0},
+ /* USB, 2 bulk out */
+ {32, 32, 32, 32, 0},
+ /* USB, 3 bulk out */
+ {32, 32, 32, 32, 0},
+ /* USB, 4 bulk out */
+ {32, 32, 32, 32, 0},
+};
+
+static const struct rtw_intf_phy_para_table phy_para_table_8814a = {};
+
+static const struct rtw_hw_reg rtw8814a_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xe50, .mask = 0x7f },
+ [2] = { .addr = 0x1850, .mask = 0x7f },
+ [3] = { .addr = 0x1a50, .mask = 0x7f },
+};
+
+static const struct rtw_rfe_def rtw8814a_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8814a_bb_pg_type0_tbl,
+ .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type0_tbl,
+ .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_type0_tbl },
+ [1] = { .phy_pg_tbl = &rtw8814a_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type1_tbl,
+ .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_tbl },
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8814a[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8814a[] = {30, 30, 30, 30};
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8814a[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static const struct coex_rf_para rf_para_rx_8814a[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8814a) == ARRAY_SIZE(rf_para_rx_8814a));
+
+const struct rtw_chip_info rtw8814a_hw_spec = {
+ .ops = &rtw8814a_ops,
+ .id = RTW_CHIP_TYPE_8814A,
+ .fw_name = "rtw88/rtw8814a_fw.bin",
+ .wlan_cpu = RTW_WCPU_11AC,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 1024,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 0,
+ .txff_size = (2048 - 10) * TX_PAGE_SIZE,
+ .rxff_size = 23552,
+ .rsvd_drv_pg_num = 8,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = TX_PAGE_SIZE,
+ .csi_buf_pg_num = 0,
+ .dig_min = 0x1c,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .rx_ldpc = true,
+ .max_power_index = 0x3f,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2,
+ .usb_tx_agg_desc_num = 3,
+ .hw_feature_report = false,
+ .c2h_ra_report_size = 6,
+ .old_datarate_fb_limit = false,
+ .ht_supported = true,
+ .vht_supported = true,
+ .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
+ .sys_func_en = 0xDC,
+ .pwr_on_seq = card_enable_flow_8814a,
+ .pwr_off_seq = card_disable_flow_8814a,
+ .rqpn_table = rqpn_table_8814a,
+ .prioq_addrs = &prioq_addrs_8814a,
+ .page_table = page_table_8814a,
+ .intf_table = &phy_para_table_8814a,
+ .dig = rtw8814a_dig,
+ .dig_cck = NULL,
+ .rf_base_addr = {0x2800, 0x2c00, 0x3800, 0x3c00},
+ .rf_sipi_addr = {0xc90, 0xe90, 0x1890, 0x1a90},
+ .ltecoex_addr = NULL,
+ .mac_tbl = &rtw8814a_mac_tbl,
+ .agc_tbl = &rtw8814a_agc_tbl,
+ .bb_tbl = &rtw8814a_bb_tbl,
+ .rf_tbl = {&rtw8814a_rf_a_tbl, &rtw8814a_rf_b_tbl,
+ &rtw8814a_rf_c_tbl, &rtw8814a_rf_d_tbl},
+ .rfe_defs = rtw8814a_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8814a_rfe_defs),
+ .iqk_threshold = 8,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
+
+ .coex_para_ver = 0,
+ .bt_desired_ver = 0,
+ .scbd_support = false,
+ .new_scbd10_def = false,
+ .ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8814a,
+ .bt_rssi_step = bt_rssi_step_8814a,
+ .table_sant_num = 0,
+ .table_sant = NULL,
+ .table_nsant_num = 0,
+ .table_nsant = NULL,
+ .tdma_sant_num = 0,
+ .tdma_sant = NULL,
+ .tdma_nsant_num = 0,
+ .tdma_nsant = NULL,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8814a),
+ .wl_rf_para_tx = rf_para_tx_8814a,
+ .wl_rf_para_rx = rf_para_rx_8814a,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = 0,
+ .afh_5g = NULL,
+ .coex_info_hw_regs_num = 0,
+ .coex_info_hw_regs = NULL,
+};
+EXPORT_SYMBOL(rtw8814a_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8814a_fw.bin");
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814a driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.h b/drivers/net/wireless/realtek/rtw88/rtw8814a.h
new file mode 100644
index 000000000000..c57c7c8f915e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW8814A_H__
+#define __RTW8814A_H__
+
+struct rtw8814au_efuse {
+ u8 vid[2]; /* 0xd0 */
+ u8 pid[2]; /* 0xd2 */
+ u8 res[4]; /* 0xd4 */
+ u8 mac_addr[ETH_ALEN]; /* 0xd8 */
+} __packed;
+
+struct rtw8814ae_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vid[2]; /* 0xd6 */
+ u8 did[2]; /* 0xd8 */
+ u8 svid[2]; /* 0xda */
+ u8 smid[2]; /* 0xdc */
+} __packed;
+
+struct rtw8814a_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0c];
+ u8 usb_mode; /* 0x0e */
+ u8 res1;
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k; /* 0xb9 */
+ u8 thermal_meter; /* 0xba */
+ u8 iqk_lck; /* 0xbb */
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2]; /* 0xbf */
+ u8 rf_board_option; /* 0xc1 */
+ u8 res2;
+ u8 rf_bt_setting; /* 0xc3 */
+ u8 eeprom_version; /* 0xc4 */
+ u8 eeprom_customer_id; /* 0xc5 */
+ u8 tx_bb_swing_setting_2g; /* 0xc6 */
+ u8 tx_bb_swing_setting_5g; /* 0xc7 */
+ u8 res3;
+ u8 trx_antenna_option; /* 0xc9 */
+ u8 rfe_option; /* 0xca */
+ u8 country_code[2]; /* 0xcb */
+ u8 res4[3];
+ union {
+ struct rtw8814au_efuse u;
+ struct rtw8814ae_efuse e;
+ };
+ u8 res5[0x122]; /* 0xde */
+} __packed;
+
+static_assert(sizeof(struct rtw8814a_efuse) == 512);
+
+extern const struct rtw_chip_info rtw8814a_hw_spec;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c
new file mode 100644
index 000000000000..b9ce51a09fc8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c
@@ -0,0 +1,23930 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8814a_table.h"
+
+static const u32 rtw8814a_mac[] = {
+ 0x010, 0x0000007C,
+ 0x014, 0x000000DB,
+ 0x016, 0x00000002,
+ 0x073, 0x00000010,
+ 0x420, 0x00000080,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x000000F0,
+ 0x446, 0x00000001,
+ 0x447, 0x000000FE,
+ 0x448, 0x00000000,
+ 0x449, 0x00000000,
+ 0x44A, 0x00000000,
+ 0x44B, 0x00000040,
+ 0x44C, 0x00000010,
+ 0x44D, 0x000000F0,
+ 0x44E, 0x0000003F,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x00000000,
+ 0x452, 0x00000000,
+ 0x453, 0x00000040,
+ 0x45E, 0x00000004,
+ 0x49C, 0x00000010,
+ 0x49D, 0x000000F0,
+ 0x49E, 0x00000000,
+ 0x49F, 0x00000006,
+ 0x4A0, 0x000000E0,
+ 0x4A1, 0x00000003,
+ 0x4A2, 0x00000000,
+ 0x4A3, 0x00000040,
+ 0x4A4, 0x00000015,
+ 0x4A5, 0x000000F0,
+ 0x4A6, 0x00000000,
+ 0x4A7, 0x00000006,
+ 0x4A8, 0x000000E0,
+ 0x4A9, 0x00000000,
+ 0x4AA, 0x00000000,
+ 0x4AB, 0x00000000,
+ 0x7DA, 0x00000008,
+ 0x1448, 0x00000006,
+ 0x144A, 0x00000006,
+ 0x144C, 0x00000006,
+ 0x144E, 0x00000006,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CA, 0x0000003C,
+ 0x4CB, 0x0000003C,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4CF, 0x00000008,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x521, 0x0000002F,
+ 0x525, 0x00000047,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000064,
+ 0x55D, 0x000000FF,
+ 0x577, 0x00000003,
+ 0x5BE, 0x00000064,
+ 0x604, 0x00000001,
+ 0x605, 0x00000030,
+ 0x607, 0x00000001,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x60A, 0x00000000,
+ 0x60C, 0x00000018,
+ 0x60D, 0x00000050,
+ 0x6A0, 0x000000FF,
+ 0x6A1, 0x000000FF,
+ 0x6A2, 0x000000FF,
+ 0x6A3, 0x000000FF,
+ 0x6A4, 0x000000FF,
+ 0x6A5, 0x000000FF,
+ 0x6DE, 0x00000084,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000064,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x718, 0x00000040,
+ 0x7D5, 0x000000BC,
+ 0x7D8, 0x00000028,
+ 0x7D9, 0x00000000,
+ 0x7DA, 0x0000000B,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8814a_agc[] = {
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x27620003,
+ 0x81C, 0x26640003,
+ 0x81C, 0x25660003,
+ 0x81C, 0x24680003,
+ 0x81C, 0x236A0003,
+ 0x81C, 0x226C0003,
+ 0x81C, 0x216E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xE4340003,
+ 0x81C, 0xE3360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x49580003,
+ 0x81C, 0x485A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xE4340003,
+ 0x81C, 0xE3360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x49580003,
+ 0x81C, 0x485A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xDF000003,
+ 0x81C, 0xDF020003,
+ 0x81C, 0xDF040003,
+ 0x81C, 0xDE060003,
+ 0x81C, 0xDD080003,
+ 0x81C, 0xDC0A0003,
+ 0x81C, 0xDB0C0003,
+ 0x81C, 0xDA0E0003,
+ 0x81C, 0xD9100003,
+ 0x81C, 0xD8120003,
+ 0x81C, 0xD7140003,
+ 0x81C, 0xD6160003,
+ 0x81C, 0xD5180003,
+ 0x81C, 0xD41A0003,
+ 0x81C, 0xD31C0003,
+ 0x81C, 0xD21E0003,
+ 0x81C, 0xD1200003,
+ 0x81C, 0xD0220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xA73A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0x87460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x445C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x27640003,
+ 0x81C, 0x26660003,
+ 0x81C, 0x25680003,
+ 0x81C, 0x246A0003,
+ 0x81C, 0x236C0003,
+ 0x81C, 0x226E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE82E0003,
+ 0x81C, 0xE7300003,
+ 0x81C, 0xE6320003,
+ 0x81C, 0xE5340003,
+ 0x81C, 0xE4360003,
+ 0x81C, 0xE3380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0xA34E0003,
+ 0x81C, 0xA2500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xDF000003,
+ 0x81C, 0xDF020003,
+ 0x81C, 0xDF040003,
+ 0x81C, 0xDE060003,
+ 0x81C, 0xDD080003,
+ 0x81C, 0xDC0A0003,
+ 0x81C, 0xDB0C0003,
+ 0x81C, 0xDA0E0003,
+ 0x81C, 0xD9100003,
+ 0x81C, 0xD8120003,
+ 0x81C, 0xD7140003,
+ 0x81C, 0xD6160003,
+ 0x81C, 0xD5180003,
+ 0x81C, 0xD41A0003,
+ 0x81C, 0xD31C0003,
+ 0x81C, 0xD21E0003,
+ 0x81C, 0xD1200003,
+ 0x81C, 0xD0220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xA73A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0x87460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x445C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x27640003,
+ 0x81C, 0x26660003,
+ 0x81C, 0x25680003,
+ 0x81C, 0x246A0003,
+ 0x81C, 0x236C0003,
+ 0x81C, 0x226E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0x0F260003,
+ 0x81C, 0x0E280003,
+ 0x81C, 0x0D2A0003,
+ 0x81C, 0x0C2C0003,
+ 0x81C, 0x0B2E0003,
+ 0x81C, 0x0A300003,
+ 0x81C, 0x09320003,
+ 0x81C, 0x08340003,
+ 0x81C, 0x07360003,
+ 0x81C, 0x06380003,
+ 0x81C, 0x053A0003,
+ 0x81C, 0x043C0003,
+ 0x81C, 0x033E0003,
+ 0x81C, 0x23400003,
+ 0x81C, 0x22420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0xA44C0003,
+ 0x81C, 0x684E0003,
+ 0x81C, 0x67500003,
+ 0x81C, 0x66520003,
+ 0x81C, 0x65540003,
+ 0x81C, 0x64560003,
+ 0x81C, 0x63580003,
+ 0x81C, 0x625A0003,
+ 0x81C, 0x615C0003,
+ 0x81C, 0x475E0003,
+ 0x81C, 0x46600003,
+ 0x81C, 0x45620003,
+ 0x81C, 0x44640003,
+ 0x81C, 0x43660003,
+ 0x81C, 0x42680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFE020003,
+ 0x81C, 0xFD040003,
+ 0x81C, 0xFC060003,
+ 0x81C, 0xFB080003,
+ 0x81C, 0xFA0A0003,
+ 0x81C, 0xF90C0003,
+ 0x81C, 0xF80E0003,
+ 0x81C, 0xF7100003,
+ 0x81C, 0xF6120003,
+ 0x81C, 0xF5140003,
+ 0x81C, 0xF4160003,
+ 0x81C, 0xF3180003,
+ 0x81C, 0xF21A0003,
+ 0x81C, 0xF11C0003,
+ 0x81C, 0xF01E0003,
+ 0x81C, 0xEF200003,
+ 0x81C, 0xEE220003,
+ 0x81C, 0xED240003,
+ 0x81C, 0xEC260003,
+ 0x81C, 0xEB280003,
+ 0x81C, 0xEA2A0003,
+ 0x81C, 0xE92C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0x08340003,
+ 0x81C, 0x07360003,
+ 0x81C, 0x06380003,
+ 0x81C, 0x053A0003,
+ 0x81C, 0x043C0003,
+ 0x81C, 0x033E0003,
+ 0x81C, 0x02400003,
+ 0x81C, 0xA9420003,
+ 0x81C, 0xA8440003,
+ 0x81C, 0xA7460003,
+ 0x81C, 0xA6480003,
+ 0x81C, 0xA54A0003,
+ 0x81C, 0x684C0003,
+ 0x81C, 0x674E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x475C0003,
+ 0x81C, 0x465E0003,
+ 0x81C, 0x45600003,
+ 0x81C, 0x44620003,
+ 0x81C, 0x43640003,
+ 0x81C, 0x42660003,
+ 0x81C, 0x41680003,
+ 0x81C, 0x416A0003,
+ 0x81C, 0x416C0003,
+ 0x81C, 0x416E0003,
+ 0x81C, 0x41700003,
+ 0x81C, 0x41720003,
+ 0x81C, 0x41740003,
+ 0x81C, 0x41760003,
+ 0x81C, 0x41780003,
+ 0x81C, 0x417A0003,
+ 0x81C, 0x417C0003,
+ 0x81C, 0x417E0003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xCF240003,
+ 0x81C, 0xCE260003,
+ 0x81C, 0xCD280003,
+ 0x81C, 0xCC2A0003,
+ 0x81C, 0xCB2C0003,
+ 0x81C, 0xCA2E0003,
+ 0x81C, 0xC9300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xA63C0003,
+ 0x81C, 0xA53E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0x86480003,
+ 0x81C, 0x854A0003,
+ 0x81C, 0x844C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x66500003,
+ 0x81C, 0x65520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x615A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x27620003,
+ 0x81C, 0x26640003,
+ 0x81C, 0x25660003,
+ 0x81C, 0x24680003,
+ 0x81C, 0x236A0003,
+ 0x81C, 0x226C0003,
+ 0x81C, 0x216E0003,
+ 0x81C, 0x21700003,
+ 0x81C, 0x21720003,
+ 0x81C, 0x21740003,
+ 0x81C, 0x21760003,
+ 0x81C, 0x21780003,
+ 0x81C, 0x217A0003,
+ 0x81C, 0x217C0003,
+ 0x81C, 0x217E0003,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x444E0103,
+ 0x81C, 0x43500103,
+ 0x81C, 0x42520103,
+ 0x81C, 0x41540103,
+ 0x81C, 0x25560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x065C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xE12E0103,
+ 0x81C, 0xA5300103,
+ 0x81C, 0xA4320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0x843A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x454A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x25520103,
+ 0x81C, 0x24540103,
+ 0x81C, 0x23560103,
+ 0x81C, 0x06580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEF1A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xE1360103,
+ 0x81C, 0x87380103,
+ 0x81C, 0x863A0103,
+ 0x81C, 0x853C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x464C0103,
+ 0x81C, 0x454E0103,
+ 0x81C, 0x44500103,
+ 0x81C, 0x43520103,
+ 0x81C, 0x26540103,
+ 0x81C, 0x25560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x075A0103,
+ 0x81C, 0x065C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA6300103,
+ 0x81C, 0xA5320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x843C0103,
+ 0x81C, 0x833E0103,
+ 0x81C, 0x82400103,
+ 0x81C, 0x81420103,
+ 0x81C, 0x64440103,
+ 0x81C, 0x63460103,
+ 0x81C, 0x62480103,
+ 0x81C, 0x614A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x41520103,
+ 0x81C, 0x25540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x225A0103,
+ 0x81C, 0x055C0103,
+ 0x81C, 0x045E0103,
+ 0x81C, 0x03600103,
+ 0x81C, 0x02620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xE2360103,
+ 0x81C, 0xE1380103,
+ 0x81C, 0xA33A0103,
+ 0x81C, 0xA23C0103,
+ 0x81C, 0xA13E0103,
+ 0x81C, 0x84400103,
+ 0x81C, 0x83420103,
+ 0x81C, 0x82440103,
+ 0x81C, 0x81460103,
+ 0x81C, 0x64480103,
+ 0x81C, 0x634A0103,
+ 0x81C, 0x624C0103,
+ 0x81C, 0x614E0103,
+ 0x81C, 0x45500103,
+ 0x81C, 0x44520103,
+ 0x81C, 0x43540103,
+ 0x81C, 0x42560103,
+ 0x81C, 0x25580103,
+ 0x81C, 0x245A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x065E0103,
+ 0x81C, 0x05600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xEC1C0103,
+ 0x81C, 0xEB1E0103,
+ 0x81C, 0xEA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE52A0103,
+ 0x81C, 0xE42C0103,
+ 0x81C, 0xE32E0103,
+ 0x81C, 0xE2300103,
+ 0x81C, 0xE1320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x81440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x454E0103,
+ 0x81C, 0x44500103,
+ 0x81C, 0x43520103,
+ 0x81C, 0x42540103,
+ 0x81C, 0x41560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x225C0103,
+ 0x81C, 0x055E0103,
+ 0x81C, 0x04600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFF020103,
+ 0x81C, 0xFE040103,
+ 0x81C, 0xFD060103,
+ 0x81C, 0xFC080103,
+ 0x81C, 0xFB0A0103,
+ 0x81C, 0xFA0C0103,
+ 0x81C, 0xF90E0103,
+ 0x81C, 0xF8100103,
+ 0x81C, 0xF7120103,
+ 0x81C, 0xF6140103,
+ 0x81C, 0xF5160103,
+ 0x81C, 0xF4180103,
+ 0x81C, 0xF31A0103,
+ 0x81C, 0xF21C0103,
+ 0x81C, 0xF11E0103,
+ 0x81C, 0xF0200103,
+ 0x81C, 0xEF220103,
+ 0x81C, 0xEE240103,
+ 0x81C, 0xED260103,
+ 0x81C, 0xEC280103,
+ 0x81C, 0xEB2A0103,
+ 0x81C, 0xEA2C0103,
+ 0x81C, 0xE92E0103,
+ 0x81C, 0xE8300103,
+ 0x81C, 0xE7320103,
+ 0x81C, 0xE6340103,
+ 0x81C, 0xE5360103,
+ 0x81C, 0xE4380103,
+ 0x81C, 0xE33A0103,
+ 0x81C, 0xA53C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x85460103,
+ 0x81C, 0x84480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x44560103,
+ 0x81C, 0x43580103,
+ 0x81C, 0x425A0103,
+ 0x81C, 0x265C0103,
+ 0x81C, 0x255E0103,
+ 0x81C, 0x24600103,
+ 0x81C, 0x07620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xE12E0103,
+ 0x81C, 0xA4300103,
+ 0x81C, 0xA3320103,
+ 0x81C, 0xA2340103,
+ 0x81C, 0xA1360103,
+ 0x81C, 0x85380103,
+ 0x81C, 0x843A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x65400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x45480103,
+ 0x81C, 0x444A0103,
+ 0x81C, 0x434C0103,
+ 0x81C, 0x264E0103,
+ 0x81C, 0x25500103,
+ 0x81C, 0x24520103,
+ 0x81C, 0x08540103,
+ 0x81C, 0x07560103,
+ 0x81C, 0x06580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFF020103,
+ 0x81C, 0xFE040103,
+ 0x81C, 0xFD060103,
+ 0x81C, 0xFC080103,
+ 0x81C, 0xFB0A0103,
+ 0x81C, 0xFA0C0103,
+ 0x81C, 0xF90E0103,
+ 0x81C, 0xF8100103,
+ 0x81C, 0xF7120103,
+ 0x81C, 0xF6140103,
+ 0x81C, 0xF5160103,
+ 0x81C, 0xF4180103,
+ 0x81C, 0xF31A0103,
+ 0x81C, 0xF21C0103,
+ 0x81C, 0xF11E0103,
+ 0x81C, 0xF0200103,
+ 0x81C, 0xEF220103,
+ 0x81C, 0xEE240103,
+ 0x81C, 0xED260103,
+ 0x81C, 0xEC280103,
+ 0x81C, 0xEB2A0103,
+ 0x81C, 0xEA2C0103,
+ 0x81C, 0xE92E0103,
+ 0x81C, 0xE8300103,
+ 0x81C, 0xE7320103,
+ 0x81C, 0xE6340103,
+ 0x81C, 0xE5360103,
+ 0x81C, 0xE4380103,
+ 0x81C, 0xE33A0103,
+ 0x81C, 0xA53C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x85460103,
+ 0x81C, 0x84480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x44560103,
+ 0x81C, 0x43580103,
+ 0x81C, 0x425A0103,
+ 0x81C, 0x265C0103,
+ 0x81C, 0x255E0103,
+ 0x81C, 0x24600103,
+ 0x81C, 0x07620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA6300103,
+ 0x81C, 0xA5320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x843C0103,
+ 0x81C, 0x833E0103,
+ 0x81C, 0x82400103,
+ 0x81C, 0x81420103,
+ 0x81C, 0x64440103,
+ 0x81C, 0x63460103,
+ 0x81C, 0x62480103,
+ 0x81C, 0x614A0103,
+ 0x81C, 0x444C0103,
+ 0x81C, 0x434E0103,
+ 0x81C, 0x42500103,
+ 0x81C, 0x41520103,
+ 0x81C, 0x25540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x225A0103,
+ 0x81C, 0x055C0103,
+ 0x81C, 0x045E0103,
+ 0x81C, 0x03600103,
+ 0x81C, 0x02620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xCC1C0103,
+ 0x81C, 0xCB1E0103,
+ 0x81C, 0xCA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xE22E0103,
+ 0x81C, 0xA7300103,
+ 0x81C, 0xA6320103,
+ 0x81C, 0xA5340103,
+ 0x81C, 0xA4360103,
+ 0x81C, 0xA3380103,
+ 0x81C, 0xA23A0103,
+ 0x81C, 0xA13C0103,
+ 0x81C, 0x843E0103,
+ 0x81C, 0x83400103,
+ 0x81C, 0x82420103,
+ 0x81C, 0x65440103,
+ 0x81C, 0x64460103,
+ 0x81C, 0x63480103,
+ 0x81C, 0x624A0103,
+ 0x81C, 0x614C0103,
+ 0x81C, 0x444E0103,
+ 0x81C, 0x43500103,
+ 0x81C, 0x42520103,
+ 0x81C, 0x41540103,
+ 0x81C, 0x24560103,
+ 0x81C, 0x23580103,
+ 0x81C, 0x055A0103,
+ 0x81C, 0x045C0103,
+ 0x81C, 0x035E0103,
+ 0x81C, 0x02600103,
+ 0x81C, 0x01620103,
+ 0x81C, 0x01640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x016A0103,
+ 0x81C, 0x016C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000103,
+ 0x81C, 0xFE020103,
+ 0x81C, 0xFD040103,
+ 0x81C, 0xFC060103,
+ 0x81C, 0xFB080103,
+ 0x81C, 0xFA0A0103,
+ 0x81C, 0xF90C0103,
+ 0x81C, 0xF80E0103,
+ 0x81C, 0xF7100103,
+ 0x81C, 0xF6120103,
+ 0x81C, 0xF5140103,
+ 0x81C, 0xF4160103,
+ 0x81C, 0xF3180103,
+ 0x81C, 0xF21A0103,
+ 0x81C, 0xF11C0103,
+ 0x81C, 0xF01E0103,
+ 0x81C, 0xEF200103,
+ 0x81C, 0xEE220103,
+ 0x81C, 0xED240103,
+ 0x81C, 0xEC260103,
+ 0x81C, 0xEB280103,
+ 0x81C, 0xEA2A0103,
+ 0x81C, 0xE92C0103,
+ 0x81C, 0xE82E0103,
+ 0x81C, 0xE7300103,
+ 0x81C, 0xE6320103,
+ 0x81C, 0xE5340103,
+ 0x81C, 0xE4360103,
+ 0x81C, 0xE3380103,
+ 0x81C, 0xE23A0103,
+ 0x81C, 0xE13C0103,
+ 0x81C, 0xA43E0103,
+ 0x81C, 0xA3400103,
+ 0x81C, 0xA2420103,
+ 0x81C, 0xA1440103,
+ 0x81C, 0x86460103,
+ 0x81C, 0x85480103,
+ 0x81C, 0x844A0103,
+ 0x81C, 0x834C0103,
+ 0x81C, 0x824E0103,
+ 0x81C, 0x81500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x435A0103,
+ 0x81C, 0x425C0103,
+ 0x81C, 0x415E0103,
+ 0x81C, 0x25600103,
+ 0x81C, 0x24620103,
+ 0x81C, 0x06640103,
+ 0x81C, 0x05660103,
+ 0x81C, 0x04680103,
+ 0x81C, 0x036A0103,
+ 0x81C, 0x026C0103,
+ 0x81C, 0x016E0103,
+ 0x81C, 0x01700103,
+ 0x81C, 0x01720103,
+ 0x81C, 0x01740103,
+ 0x81C, 0x01760103,
+ 0x81C, 0x01780103,
+ 0x81C, 0x017A0103,
+ 0x81C, 0x017C0103,
+ 0x81C, 0x017E0103,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000203,
+ 0x81C, 0xF9020203,
+ 0x81C, 0xF8040203,
+ 0x81C, 0xF7060203,
+ 0x81C, 0xF6080203,
+ 0x81C, 0xF50A0203,
+ 0x81C, 0xF40C0203,
+ 0x81C, 0xF30E0203,
+ 0x81C, 0xF2100203,
+ 0x81C, 0xF1120203,
+ 0x81C, 0xF0140203,
+ 0x81C, 0xEF160203,
+ 0x81C, 0xEE180203,
+ 0x81C, 0xED1A0203,
+ 0x81C, 0xEC1C0203,
+ 0x81C, 0xEB1E0203,
+ 0x81C, 0xEA200203,
+ 0x81C, 0xE9220203,
+ 0x81C, 0xE8240203,
+ 0x81C, 0xE7260203,
+ 0x81C, 0xE6280203,
+ 0x81C, 0xE52A0203,
+ 0x81C, 0xE42C0203,
+ 0x81C, 0xE32E0203,
+ 0x81C, 0xE2300203,
+ 0x81C, 0xE1320203,
+ 0x81C, 0xA5340203,
+ 0x81C, 0xA4360203,
+ 0x81C, 0xA3380203,
+ 0x81C, 0xA23A0203,
+ 0x81C, 0xA13C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x464C0203,
+ 0x81C, 0x454E0203,
+ 0x81C, 0x44500203,
+ 0x81C, 0x43520203,
+ 0x81C, 0x42540203,
+ 0x81C, 0x41560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x065C0203,
+ 0x81C, 0x055E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0x853A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x25520203,
+ 0x81C, 0x24540203,
+ 0x81C, 0x23560203,
+ 0x81C, 0x06580203,
+ 0x81C, 0x055A0203,
+ 0x81C, 0x045C0203,
+ 0x81C, 0x035E0203,
+ 0x81C, 0x02600203,
+ 0x81C, 0x01620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0x87380203,
+ 0x81C, 0x863A0203,
+ 0x81C, 0x853C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x474C0203,
+ 0x81C, 0x464E0203,
+ 0x81C, 0x45500203,
+ 0x81C, 0x44520203,
+ 0x81C, 0x43540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x075C0203,
+ 0x81C, 0x065E0203,
+ 0x81C, 0x05600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x41520203,
+ 0x81C, 0x25540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x065A0203,
+ 0x81C, 0x055C0203,
+ 0x81C, 0x045E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000203,
+ 0x81C, 0xFA020203,
+ 0x81C, 0xF9040203,
+ 0x81C, 0xF8060203,
+ 0x81C, 0xF7080203,
+ 0x81C, 0xF60A0203,
+ 0x81C, 0xF50C0203,
+ 0x81C, 0xF40E0203,
+ 0x81C, 0xF3100203,
+ 0x81C, 0xF2120203,
+ 0x81C, 0xF1140203,
+ 0x81C, 0xF0160203,
+ 0x81C, 0xEF180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xE1340203,
+ 0x81C, 0xA5360203,
+ 0x81C, 0xA4380203,
+ 0x81C, 0xA33A0203,
+ 0x81C, 0xA23C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x614C0203,
+ 0x81C, 0x474E0203,
+ 0x81C, 0x46500203,
+ 0x81C, 0x45520203,
+ 0x81C, 0x44540203,
+ 0x81C, 0x43560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x075E0203,
+ 0x81C, 0x06600203,
+ 0x81C, 0x05620203,
+ 0x81C, 0x04640203,
+ 0x81C, 0x03660203,
+ 0x81C, 0x02680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xA5380203,
+ 0x81C, 0xA43A0203,
+ 0x81C, 0xA33C0203,
+ 0x81C, 0x853E0203,
+ 0x81C, 0x84400203,
+ 0x81C, 0x83420203,
+ 0x81C, 0x82440203,
+ 0x81C, 0x81460203,
+ 0x81C, 0x64480203,
+ 0x81C, 0x634A0203,
+ 0x81C, 0x624C0203,
+ 0x81C, 0x614E0203,
+ 0x81C, 0x46500203,
+ 0x81C, 0x45520203,
+ 0x81C, 0x44540203,
+ 0x81C, 0x43560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x075E0203,
+ 0x81C, 0x06600203,
+ 0x81C, 0x05620203,
+ 0x81C, 0x04640203,
+ 0x81C, 0x03660203,
+ 0x81C, 0x02680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x84480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x45580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x24600203,
+ 0x81C, 0x23620203,
+ 0x81C, 0x07640203,
+ 0x81C, 0x06660203,
+ 0x81C, 0x05680203,
+ 0x81C, 0x046A0203,
+ 0x81C, 0x036C0203,
+ 0x81C, 0x026E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xE22A0203,
+ 0x81C, 0xA62C0203,
+ 0x81C, 0xA52E0203,
+ 0x81C, 0xA4300203,
+ 0x81C, 0xA3320203,
+ 0x81C, 0xA2340203,
+ 0x81C, 0xA1360203,
+ 0x81C, 0x86380203,
+ 0x81C, 0x853A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x65400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x46460203,
+ 0x81C, 0x45480203,
+ 0x81C, 0x444A0203,
+ 0x81C, 0x434C0203,
+ 0x81C, 0x264E0203,
+ 0x81C, 0x25500203,
+ 0x81C, 0x24520203,
+ 0x81C, 0x08540203,
+ 0x81C, 0x07560203,
+ 0x81C, 0x06580203,
+ 0x81C, 0x055A0203,
+ 0x81C, 0x045C0203,
+ 0x81C, 0x035E0203,
+ 0x81C, 0x02600203,
+ 0x81C, 0x01620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x84480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x45580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x24600203,
+ 0x81C, 0x23620203,
+ 0x81C, 0x07640203,
+ 0x81C, 0x06660203,
+ 0x81C, 0x05680203,
+ 0x81C, 0x046A0203,
+ 0x81C, 0x036C0203,
+ 0x81C, 0x026E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xE22C0203,
+ 0x81C, 0xE12E0203,
+ 0x81C, 0xA6300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x843C0203,
+ 0x81C, 0x833E0203,
+ 0x81C, 0x82400203,
+ 0x81C, 0x81420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x444C0203,
+ 0x81C, 0x434E0203,
+ 0x81C, 0x42500203,
+ 0x81C, 0x41520203,
+ 0x81C, 0x25540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x065A0203,
+ 0x81C, 0x055C0203,
+ 0x81C, 0x045E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xCE160203,
+ 0x81C, 0xCD180203,
+ 0x81C, 0xCC1A0203,
+ 0x81C, 0xCB1C0203,
+ 0x81C, 0xCA1E0203,
+ 0x81C, 0xC9200203,
+ 0x81C, 0xC8220203,
+ 0x81C, 0xC7240203,
+ 0x81C, 0xC6260203,
+ 0x81C, 0xC5280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xA5320203,
+ 0x81C, 0xA4340203,
+ 0x81C, 0xA3360203,
+ 0x81C, 0xA2380203,
+ 0x81C, 0xA13A0203,
+ 0x81C, 0x853C0203,
+ 0x81C, 0x843E0203,
+ 0x81C, 0x83400203,
+ 0x81C, 0x82420203,
+ 0x81C, 0x81440203,
+ 0x81C, 0x64460203,
+ 0x81C, 0x63480203,
+ 0x81C, 0x624A0203,
+ 0x81C, 0x614C0203,
+ 0x81C, 0x444E0203,
+ 0x81C, 0x43500203,
+ 0x81C, 0x42520203,
+ 0x81C, 0x41540203,
+ 0x81C, 0x24560203,
+ 0x81C, 0x23580203,
+ 0x81C, 0x075A0203,
+ 0x81C, 0x065C0203,
+ 0x81C, 0x055E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x016A0203,
+ 0x81C, 0x016C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000203,
+ 0x81C, 0xFF020203,
+ 0x81C, 0xFE040203,
+ 0x81C, 0xFD060203,
+ 0x81C, 0xFC080203,
+ 0x81C, 0xFB0A0203,
+ 0x81C, 0xFA0C0203,
+ 0x81C, 0xF90E0203,
+ 0x81C, 0xF8100203,
+ 0x81C, 0xF7120203,
+ 0x81C, 0xF6140203,
+ 0x81C, 0xF5160203,
+ 0x81C, 0xF4180203,
+ 0x81C, 0xF31A0203,
+ 0x81C, 0xF21C0203,
+ 0x81C, 0xF11E0203,
+ 0x81C, 0xF0200203,
+ 0x81C, 0xEF220203,
+ 0x81C, 0xEE240203,
+ 0x81C, 0xED260203,
+ 0x81C, 0xEC280203,
+ 0x81C, 0xEB2A0203,
+ 0x81C, 0xEA2C0203,
+ 0x81C, 0xE92E0203,
+ 0x81C, 0xE8300203,
+ 0x81C, 0xE7320203,
+ 0x81C, 0xE6340203,
+ 0x81C, 0xE5360203,
+ 0x81C, 0xE4380203,
+ 0x81C, 0xE33A0203,
+ 0x81C, 0xE23C0203,
+ 0x81C, 0xE13E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x85480203,
+ 0x81C, 0x844A0203,
+ 0x81C, 0x834C0203,
+ 0x81C, 0x824E0203,
+ 0x81C, 0x81500203,
+ 0x81C, 0x64520203,
+ 0x81C, 0x63540203,
+ 0x81C, 0x62560203,
+ 0x81C, 0x61580203,
+ 0x81C, 0x445A0203,
+ 0x81C, 0x435C0203,
+ 0x81C, 0x425E0203,
+ 0x81C, 0x25600203,
+ 0x81C, 0x24620203,
+ 0x81C, 0x06640203,
+ 0x81C, 0x05660203,
+ 0x81C, 0x04680203,
+ 0x81C, 0x036A0203,
+ 0x81C, 0x026C0203,
+ 0x81C, 0x016E0203,
+ 0x81C, 0x01700203,
+ 0x81C, 0x01720203,
+ 0x81C, 0x01740203,
+ 0x81C, 0x01760203,
+ 0x81C, 0x01780203,
+ 0x81C, 0x017A0203,
+ 0x81C, 0x017C0203,
+ 0x81C, 0x017E0203,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xE8200303,
+ 0x81C, 0xE7220303,
+ 0x81C, 0xE6240303,
+ 0x81C, 0xE5260303,
+ 0x81C, 0xE4280303,
+ 0x81C, 0xE32A0303,
+ 0x81C, 0xE22C0303,
+ 0x81C, 0xE12E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xC32A0303,
+ 0x81C, 0xC22C0303,
+ 0x81C, 0xC12E0303,
+ 0x81C, 0xA5300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x25520303,
+ 0x81C, 0x24540303,
+ 0x81C, 0x23560303,
+ 0x81C, 0x06580303,
+ 0x81C, 0x055A0303,
+ 0x81C, 0x045C0303,
+ 0x81C, 0x035E0303,
+ 0x81C, 0x02600303,
+ 0x81C, 0x01620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xE9200303,
+ 0x81C, 0xE8220303,
+ 0x81C, 0xE7240303,
+ 0x81C, 0xE6260303,
+ 0x81C, 0xE5280303,
+ 0x81C, 0xE42A0303,
+ 0x81C, 0xE32C0303,
+ 0x81C, 0xE22E0303,
+ 0x81C, 0xE1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x444C0303,
+ 0x81C, 0x434E0303,
+ 0x81C, 0x42500303,
+ 0x81C, 0x25520303,
+ 0x81C, 0x24540303,
+ 0x81C, 0x23560303,
+ 0x81C, 0x07580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xE12C0303,
+ 0x81C, 0xA72E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xC2360303,
+ 0x81C, 0xC1380303,
+ 0x81C, 0xA33A0303,
+ 0x81C, 0xA23C0303,
+ 0x81C, 0x853E0303,
+ 0x81C, 0x84400303,
+ 0x81C, 0x83420303,
+ 0x81C, 0x66440303,
+ 0x81C, 0x65460303,
+ 0x81C, 0x64480303,
+ 0x81C, 0x634A0303,
+ 0x81C, 0x624C0303,
+ 0x81C, 0x614E0303,
+ 0x81C, 0x45500303,
+ 0x81C, 0x44520303,
+ 0x81C, 0x43540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x25580303,
+ 0x81C, 0x245A0303,
+ 0x81C, 0x235C0303,
+ 0x81C, 0x065E0303,
+ 0x81C, 0x05600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xE9200303,
+ 0x81C, 0xE8220303,
+ 0x81C, 0xE7240303,
+ 0x81C, 0xE6260303,
+ 0x81C, 0xE5280303,
+ 0x81C, 0xE42A0303,
+ 0x81C, 0xE32C0303,
+ 0x81C, 0xE22E0303,
+ 0x81C, 0xE1300303,
+ 0x81C, 0xA6320303,
+ 0x81C, 0xA5340303,
+ 0x81C, 0xA4360303,
+ 0x81C, 0xA3380303,
+ 0x81C, 0xA23A0303,
+ 0x81C, 0xA13C0303,
+ 0x81C, 0x853E0303,
+ 0x81C, 0x84400303,
+ 0x81C, 0x83420303,
+ 0x81C, 0x82440303,
+ 0x81C, 0x81460303,
+ 0x81C, 0x64480303,
+ 0x81C, 0x634A0303,
+ 0x81C, 0x624C0303,
+ 0x81C, 0x614E0303,
+ 0x81C, 0x44500303,
+ 0x81C, 0x43520303,
+ 0x81C, 0x42540303,
+ 0x81C, 0x41560303,
+ 0x81C, 0x25580303,
+ 0x81C, 0x245A0303,
+ 0x81C, 0x235C0303,
+ 0x81C, 0x055E0303,
+ 0x81C, 0x04600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000303,
+ 0x81C, 0xFD020303,
+ 0x81C, 0xFC040303,
+ 0x81C, 0xFB060303,
+ 0x81C, 0xFA080303,
+ 0x81C, 0xF90A0303,
+ 0x81C, 0xF80C0303,
+ 0x81C, 0xF70E0303,
+ 0x81C, 0xF6100303,
+ 0x81C, 0xF5120303,
+ 0x81C, 0xF4140303,
+ 0x81C, 0xF3160303,
+ 0x81C, 0xF2180303,
+ 0x81C, 0xF11A0303,
+ 0x81C, 0xF01C0303,
+ 0x81C, 0xEF1E0303,
+ 0x81C, 0xEE200303,
+ 0x81C, 0xED220303,
+ 0x81C, 0xEC240303,
+ 0x81C, 0xEB260303,
+ 0x81C, 0xEA280303,
+ 0x81C, 0xE92A0303,
+ 0x81C, 0xE82C0303,
+ 0x81C, 0xE72E0303,
+ 0x81C, 0xE6300303,
+ 0x81C, 0xE5320303,
+ 0x81C, 0xE4340303,
+ 0x81C, 0xE3360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xA43E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0x85460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x44580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x265E0303,
+ 0x81C, 0x25600303,
+ 0x81C, 0x24620303,
+ 0x81C, 0x06640303,
+ 0x81C, 0x05660303,
+ 0x81C, 0x04680303,
+ 0x81C, 0x036A0303,
+ 0x81C, 0x026C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xA62C0303,
+ 0x81C, 0xA52E0303,
+ 0x81C, 0xA4300303,
+ 0x81C, 0xA3320303,
+ 0x81C, 0xA2340303,
+ 0x81C, 0x87360303,
+ 0x81C, 0x86380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x66400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x45460303,
+ 0x81C, 0x44480303,
+ 0x81C, 0x434A0303,
+ 0x81C, 0x274C0303,
+ 0x81C, 0x264E0303,
+ 0x81C, 0x25500303,
+ 0x81C, 0x24520303,
+ 0x81C, 0x23540303,
+ 0x81C, 0x08560303,
+ 0x81C, 0x07580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000303,
+ 0x81C, 0xFD020303,
+ 0x81C, 0xFC040303,
+ 0x81C, 0xFB060303,
+ 0x81C, 0xFA080303,
+ 0x81C, 0xF90A0303,
+ 0x81C, 0xF80C0303,
+ 0x81C, 0xF70E0303,
+ 0x81C, 0xF6100303,
+ 0x81C, 0xF5120303,
+ 0x81C, 0xF4140303,
+ 0x81C, 0xF3160303,
+ 0x81C, 0xF2180303,
+ 0x81C, 0xF11A0303,
+ 0x81C, 0xF01C0303,
+ 0x81C, 0xEF1E0303,
+ 0x81C, 0xEE200303,
+ 0x81C, 0xED220303,
+ 0x81C, 0xEC240303,
+ 0x81C, 0xEB260303,
+ 0x81C, 0xEA280303,
+ 0x81C, 0xE92A0303,
+ 0x81C, 0xE82C0303,
+ 0x81C, 0xE72E0303,
+ 0x81C, 0xE6300303,
+ 0x81C, 0xE5320303,
+ 0x81C, 0xE4340303,
+ 0x81C, 0xE3360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xA43E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0x85460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x44580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x265E0303,
+ 0x81C, 0x25600303,
+ 0x81C, 0x24620303,
+ 0x81C, 0x06640303,
+ 0x81C, 0x05660303,
+ 0x81C, 0x04680303,
+ 0x81C, 0x036A0303,
+ 0x81C, 0x026C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xE81E0303,
+ 0x81C, 0xE7200303,
+ 0x81C, 0xE6220303,
+ 0x81C, 0xE5240303,
+ 0x81C, 0xE4260303,
+ 0x81C, 0xE3280303,
+ 0x81C, 0xE22A0303,
+ 0x81C, 0xE12C0303,
+ 0x81C, 0xA72E0303,
+ 0x81C, 0xA6300303,
+ 0x81C, 0xA5320303,
+ 0x81C, 0xA4340303,
+ 0x81C, 0xA3360303,
+ 0x81C, 0xA2380303,
+ 0x81C, 0xA13A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x454C0303,
+ 0x81C, 0x444E0303,
+ 0x81C, 0x43500303,
+ 0x81C, 0x42520303,
+ 0x81C, 0x41540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x065A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xAF1C0303,
+ 0x81C, 0xAE1E0303,
+ 0x81C, 0xAD200303,
+ 0x81C, 0xAC220303,
+ 0x81C, 0xAB240303,
+ 0x81C, 0xAA260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xA5300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0x853A0303,
+ 0x81C, 0x843C0303,
+ 0x81C, 0x833E0303,
+ 0x81C, 0x82400303,
+ 0x81C, 0x81420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x444C0303,
+ 0x81C, 0x434E0303,
+ 0x81C, 0x42500303,
+ 0x81C, 0x41520303,
+ 0x81C, 0x25540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x06580303,
+ 0x81C, 0x055A0303,
+ 0x81C, 0x045C0303,
+ 0x81C, 0x035E0303,
+ 0x81C, 0x02600303,
+ 0x81C, 0x01620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x016A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000303,
+ 0x81C, 0xFC020303,
+ 0x81C, 0xFB040303,
+ 0x81C, 0xFA060303,
+ 0x81C, 0xF9080303,
+ 0x81C, 0xF80A0303,
+ 0x81C, 0xF70C0303,
+ 0x81C, 0xF60E0303,
+ 0x81C, 0xF5100303,
+ 0x81C, 0xF4120303,
+ 0x81C, 0xF3140303,
+ 0x81C, 0xF2160303,
+ 0x81C, 0xF1180303,
+ 0x81C, 0xF01A0303,
+ 0x81C, 0xEF1C0303,
+ 0x81C, 0xEE1E0303,
+ 0x81C, 0xED200303,
+ 0x81C, 0xEC220303,
+ 0x81C, 0xEB240303,
+ 0x81C, 0xEA260303,
+ 0x81C, 0xE9280303,
+ 0x81C, 0xE82A0303,
+ 0x81C, 0xE72C0303,
+ 0x81C, 0xE62E0303,
+ 0x81C, 0xE5300303,
+ 0x81C, 0xE4320303,
+ 0x81C, 0xE3340303,
+ 0x81C, 0xE2360303,
+ 0x81C, 0xE1380303,
+ 0x81C, 0xA53A0303,
+ 0x81C, 0xA43C0303,
+ 0x81C, 0xA33E0303,
+ 0x81C, 0xA2400303,
+ 0x81C, 0xA1420303,
+ 0x81C, 0x87440303,
+ 0x81C, 0x86460303,
+ 0x81C, 0x85480303,
+ 0x81C, 0x844A0303,
+ 0x81C, 0x834C0303,
+ 0x81C, 0x824E0303,
+ 0x81C, 0x81500303,
+ 0x81C, 0x64520303,
+ 0x81C, 0x63540303,
+ 0x81C, 0x62560303,
+ 0x81C, 0x61580303,
+ 0x81C, 0x435A0303,
+ 0x81C, 0x425C0303,
+ 0x81C, 0x415E0303,
+ 0x81C, 0x07600303,
+ 0x81C, 0x06620303,
+ 0x81C, 0x05640303,
+ 0x81C, 0x04660303,
+ 0x81C, 0x03680303,
+ 0x81C, 0x026A0303,
+ 0x81C, 0x016C0303,
+ 0x81C, 0x016E0303,
+ 0x81C, 0x01700303,
+ 0x81C, 0x01720303,
+ 0x81C, 0x01740303,
+ 0x81C, 0x01760303,
+ 0x81C, 0x01780303,
+ 0x81C, 0x017A0303,
+ 0x81C, 0x017C0303,
+ 0x81C, 0x017E0303,
+ 0xB0000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x1850, 0x00000022,
+ 0x1850, 0x00000020,
+ 0x1A50, 0x00000022,
+ 0x1A50, 0x00000020,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8814a_bb[] = {
+ 0x800, 0x9020D010,
+ 0x804, 0x08011280,
+ 0x808, 0x0E0282FF,
+ 0x80C, 0x1000002F,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x810, 0x33303265,
+ 0xA0000000, 0x00000000,
+ 0x810, 0x33303265,
+ 0xB0000000, 0x00000000,
+ 0x814, 0x020C3D10,
+ 0x818, 0x04A10385,
+ 0x820, 0x00000000,
+ 0x824, 0x00033E40,
+ 0x828, 0x00000000,
+ 0x82C, 0x73985170,
+ 0x830, 0x79A0EA08,
+ 0x834, 0x042E708A,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667640,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x838, 0x86667641,
+ 0xA0000000, 0x00000000,
+ 0x838, 0x86667640,
+ 0xB0000000, 0x00000000,
+ 0x83C, 0x9798B9B9,
+ 0x840, 0x17578F60,
+ 0x844, 0x4BBDFCDE,
+ 0x848, 0x5CD07F8B,
+ 0x84C, 0x6CFBF7B5,
+ 0x850, 0x28834706,
+ 0x854, 0x0001520C,
+ 0x858, 0x4060C000,
+ 0x85C, 0x74210368,
+ 0x860, 0x6929C321,
+ 0x864, 0x79727432,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x438C2878,
+ 0x870, 0x44444444,
+ 0x874, 0x21612C2E,
+ 0x878, 0x00003152,
+ 0x87C, 0x000FC000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0xA202033E,
+ 0x8AC, 0xF40F550A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0xEC0057FF,
+ 0x8BC, 0x8CA520C3,
+ 0x8C0, 0x3FF00020,
+ 0x8C4, 0x44C00000,
+ 0x80000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x8C8, 0x80025969,
+ 0xA0000000, 0x00000000,
+ 0x8C8, 0x80025167,
+ 0xB0000000, 0x00000000,
+ 0x8CC, 0x08250492,
+ 0x8D0, 0x0000B800,
+ 0x8D4, 0x940008A0,
+ 0x8D8, 0x290B5612,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x32316407,
+ 0x8E4, 0x4A092925,
+ 0x8E8, 0xFFFFC42C,
+ 0x8EC, 0x99999999,
+ 0x8F0, 0x00009999,
+ 0x8F4, 0x00F80FA1,
+ 0x8F8, 0x400082C0,
+ 0x8FC, 0x00000000,
+ 0x900, 0x00400700,
+ 0x90C, 0x09004000,
+ 0x910, 0x0000FC00,
+ 0x914, 0xD6400404,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x055AA500,
+ 0x928, 0x4AB0E4E4,
+ 0x92C, 0xFFFE0000,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x938, 0x00008400,
+ 0x93C, 0x932C0642,
+ 0x940, 0x093E9360,
+ 0x944, 0x08000000,
+ 0x948, 0x04000000,
+ 0x950, 0x02010080,
+ 0x954, 0x86510080,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x801FFFFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x98C, 0x03440000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x00080080,
+ 0x9A8, 0x0C2F0000,
+ 0x9AC, 0x00560000,
+ 0x9B0, 0x81081008,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x01081008,
+ 0x9BC, 0x01081008,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E4, 0x00000002,
+ 0x9E8, 0x000022D5,
+ 0x9FC, 0xEFFFF7FF,
+ 0xB00, 0xE3100000,
+ 0xB04, 0x0000B000,
+ 0xB0C, 0x31EAA006,
+ 0xB5C, 0x41CFFFFF,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00042020,
+ 0xC08, 0x80410231,
+ 0xC0C, 0x00000000,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC1C, 0x40000053,
+ 0xC50, 0x00000020,
+ 0xC54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xC58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0xC58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0xC5C, 0x0D000058,
+ 0xC60, 0x1B800000,
+ 0xC60, 0x0B800001,
+ 0xC60, 0x05800002,
+ 0xC60, 0x07800003,
+ 0xC60, 0x1A800004,
+ 0xC60, 0x0B800005,
+ 0xC60, 0x05800006,
+ 0xC60, 0x0E800007,
+ 0xC60, 0x1A800008,
+ 0xC60, 0x0B800009,
+ 0xC60, 0x1580000A,
+ 0xC60, 0x0880000B,
+ 0xC60, 0x1A80000C,
+ 0xC60, 0x0B80000D,
+ 0xC60, 0x0580000E,
+ 0xC60, 0x0E80000F,
+ 0xC60, 0x1A800010,
+ 0xC60, 0x0B800011,
+ 0xC60, 0x15800012,
+ 0xC60, 0x08800013,
+ 0xC60, 0x1A800014,
+ 0xC60, 0x0B800015,
+ 0xC60, 0x05800016,
+ 0xC60, 0x07800017,
+ 0xC60, 0x1A800018,
+ 0xC60, 0x0B800019,
+ 0xC60, 0x1580001A,
+ 0xC60, 0x0880001B,
+ 0xC60, 0x1B80001C,
+ 0xC60, 0x0B80001D,
+ 0xC60, 0x0580001E,
+ 0xC60, 0x0780001F,
+ 0xC60, 0x1B800020,
+ 0xC60, 0x0B800021,
+ 0xC60, 0x05800022,
+ 0xC60, 0x07800023,
+ 0xC60, 0x1B800024,
+ 0xC60, 0x0B800025,
+ 0xC60, 0x05800026,
+ 0xC60, 0x07800027,
+ 0xC60, 0x1B800028,
+ 0xC60, 0x0B800029,
+ 0xC60, 0x0580002A,
+ 0xC60, 0x0780002B,
+ 0xC60, 0x1B800030,
+ 0xC60, 0x0B800031,
+ 0xC60, 0x05800032,
+ 0xC60, 0x00800033,
+ 0xC60, 0x1B800034,
+ 0xC60, 0x0B800035,
+ 0xC60, 0x05800036,
+ 0xC60, 0x00800037,
+ 0xC60, 0x1B800038,
+ 0xC60, 0x0B800039,
+ 0xC60, 0x0580003A,
+ 0xC60, 0x0E80803B,
+ 0xC94, 0x01000401,
+ 0xC98, 0x00188000,
+ 0xCA0, 0x00002929,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCAC, 0x77777000,
+ 0xCB0, 0x54775477,
+ 0xCB4, 0x54775477,
+ 0xCB8, 0x00500000,
+ 0xCBC, 0x77700000,
+ 0xCC0, 0x00000010,
+ 0xCC8, 0x00000010,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00042020,
+ 0xE08, 0x80410231,
+ 0xE0C, 0x00000000,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE1C, 0x40000053,
+ 0xE50, 0x00000020,
+ 0xE54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xE58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0xE58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0xE5C, 0x0D000058,
+ 0xE60, 0x1B800000,
+ 0xE60, 0x0B800001,
+ 0xE60, 0x05800002,
+ 0xE60, 0x07800003,
+ 0xE60, 0x1A800004,
+ 0xE60, 0x0B800005,
+ 0xE60, 0x05800006,
+ 0xE60, 0x0E800007,
+ 0xE60, 0x1A800008,
+ 0xE60, 0x0B800009,
+ 0xE60, 0x1580000A,
+ 0xE60, 0x0880000B,
+ 0xE60, 0x1A80000C,
+ 0xE60, 0x0B80000D,
+ 0xE60, 0x0580000E,
+ 0xE60, 0x0E80000F,
+ 0xE60, 0x1A800010,
+ 0xE60, 0x0B800011,
+ 0xE60, 0x15800012,
+ 0xE60, 0x08800013,
+ 0xE60, 0x1A800014,
+ 0xE60, 0x0B800015,
+ 0xE60, 0x05800016,
+ 0xE60, 0x07800017,
+ 0xE60, 0x1A800018,
+ 0xE60, 0x0B800019,
+ 0xE60, 0x1580001A,
+ 0xE60, 0x0880001B,
+ 0xE60, 0x1B80001C,
+ 0xE60, 0x0B80001D,
+ 0xE60, 0x0580001E,
+ 0xE60, 0x0780001F,
+ 0xE60, 0x1B800020,
+ 0xE60, 0x0B800021,
+ 0xE60, 0x05800022,
+ 0xE60, 0x07800023,
+ 0xE60, 0x1B800024,
+ 0xE60, 0x0B800025,
+ 0xE60, 0x05800026,
+ 0xE60, 0x07800027,
+ 0xE60, 0x1B800028,
+ 0xE60, 0x0B800029,
+ 0xE60, 0x0580002A,
+ 0xE60, 0x0780002B,
+ 0xE60, 0x1B800030,
+ 0xE60, 0x0B800031,
+ 0xE60, 0x05800032,
+ 0xE60, 0x00800033,
+ 0xE60, 0x1B800034,
+ 0xE60, 0x0B800035,
+ 0xE60, 0x05800036,
+ 0xE60, 0x00800037,
+ 0xE60, 0x1B800038,
+ 0xE60, 0x0B800039,
+ 0xE60, 0x0580003A,
+ 0xE60, 0x0E80803B,
+ 0xE94, 0x01000401,
+ 0xE98, 0x00188000,
+ 0xEA0, 0x00002929,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xEAC, 0x77777000,
+ 0xEB0, 0x54775477,
+ 0xEB4, 0x54775477,
+ 0xEB8, 0x00500000,
+ 0xEBC, 0x77700000,
+ 0x1800, 0x00000007,
+ 0x1804, 0x00042020,
+ 0x1808, 0x80410231,
+ 0x180C, 0x00000000,
+ 0x1810, 0x00000100,
+ 0x1814, 0x01000000,
+ 0x181C, 0x40000053,
+ 0x1850, 0x00000020,
+ 0x1854, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1858, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0x1858, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0x185C, 0x0D000058,
+ 0x1860, 0x1B800000,
+ 0x1860, 0x0B800001,
+ 0x1860, 0x05800002,
+ 0x1860, 0x07800003,
+ 0x1860, 0x1A800004,
+ 0x1860, 0x0B800005,
+ 0x1860, 0x05800006,
+ 0x1860, 0x0E800007,
+ 0x1860, 0x1A800008,
+ 0x1860, 0x0B800009,
+ 0x1860, 0x1580000A,
+ 0x1860, 0x0880000B,
+ 0x1860, 0x1A80000C,
+ 0x1860, 0x0B80000D,
+ 0x1860, 0x0580000E,
+ 0x1860, 0x0E80000F,
+ 0x1860, 0x1A800010,
+ 0x1860, 0x0B800011,
+ 0x1860, 0x15800012,
+ 0x1860, 0x08800013,
+ 0x1860, 0x1A800014,
+ 0x1860, 0x0B800015,
+ 0x1860, 0x05800016,
+ 0x1860, 0x07800017,
+ 0x1860, 0x1A800018,
+ 0x1860, 0x0B800019,
+ 0x1860, 0x1580001A,
+ 0x1860, 0x0880001B,
+ 0x1860, 0x1B80001C,
+ 0x1860, 0x0B80001D,
+ 0x1860, 0x0580001E,
+ 0x1860, 0x0780001F,
+ 0x1860, 0x1B800020,
+ 0x1860, 0x0B800021,
+ 0x1860, 0x05800022,
+ 0x1860, 0x07800023,
+ 0x1860, 0x1B800024,
+ 0x1860, 0x0B800025,
+ 0x1860, 0x05800026,
+ 0x1860, 0x07800027,
+ 0x1860, 0x1B800028,
+ 0x1860, 0x0B800029,
+ 0x1860, 0x0580002A,
+ 0x1860, 0x0780002B,
+ 0x1860, 0x1B800030,
+ 0x1860, 0x0B800031,
+ 0x1860, 0x05800032,
+ 0x1860, 0x00800033,
+ 0x1860, 0x1B800034,
+ 0x1860, 0x0B800035,
+ 0x1860, 0x05800036,
+ 0x1860, 0x00800037,
+ 0x1860, 0x1B800038,
+ 0x1860, 0x0B800039,
+ 0x1860, 0x0580003A,
+ 0x1860, 0x0E80803B,
+ 0x1894, 0x01000401,
+ 0x1898, 0x00188000,
+ 0x18A0, 0x00002929,
+ 0x18A4, 0x08040201,
+ 0x18A8, 0x80402010,
+ 0x18AC, 0x77777000,
+ 0x18B0, 0x54775477,
+ 0x18B4, 0x54775477,
+ 0x18B8, 0x00500000,
+ 0x18BC, 0x77700000,
+ 0x1A00, 0x00000007,
+ 0x1A04, 0x00042020,
+ 0x1A08, 0x80410231,
+ 0x1A0C, 0x00000000,
+ 0x1A10, 0x00000100,
+ 0x1A14, 0x01000000,
+ 0x1A1C, 0x40000053,
+ 0x1A50, 0x00000020,
+ 0x1A54, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1A58, 0x3C0A0C14,
+ 0xA0000000, 0x00000000,
+ 0x1A58, 0x3C020C14,
+ 0xB0000000, 0x00000000,
+ 0x1A5C, 0x0D000058,
+ 0x1A60, 0x1B800000,
+ 0x1A60, 0x0B800001,
+ 0x1A60, 0x05800002,
+ 0x1A60, 0x07800003,
+ 0x1A60, 0x1A800004,
+ 0x1A60, 0x0B800005,
+ 0x1A60, 0x05800006,
+ 0x1A60, 0x0E800007,
+ 0x1A60, 0x1A800008,
+ 0x1A60, 0x0B800009,
+ 0x1A60, 0x1580000A,
+ 0x1A60, 0x0880000B,
+ 0x1A60, 0x1A80000C,
+ 0x1A60, 0x0B80000D,
+ 0x1A60, 0x0580000E,
+ 0x1A60, 0x0E80000F,
+ 0x1A60, 0x1A800010,
+ 0x1A60, 0x0B800011,
+ 0x1A60, 0x15800012,
+ 0x1A60, 0x08800013,
+ 0x1A60, 0x1A800014,
+ 0x1A60, 0x0B800015,
+ 0x1A60, 0x05800016,
+ 0x1A60, 0x07800017,
+ 0x1A60, 0x1A800018,
+ 0x1A60, 0x0B800019,
+ 0x1A60, 0x1580001A,
+ 0x1A60, 0x0880001B,
+ 0x1A60, 0x1B80001C,
+ 0x1A60, 0x0B80001D,
+ 0x1A60, 0x0580001E,
+ 0x1A60, 0x0780001F,
+ 0x1A60, 0x1B800020,
+ 0x1A60, 0x0B800021,
+ 0x1A60, 0x05800022,
+ 0x1A60, 0x07800023,
+ 0x1A60, 0x1B800024,
+ 0x1A60, 0x0B800025,
+ 0x1A60, 0x05800026,
+ 0x1A60, 0x07800027,
+ 0x1A60, 0x1B800028,
+ 0x1A60, 0x0B800029,
+ 0x1A60, 0x0580002A,
+ 0x1A60, 0x0780002B,
+ 0x1A60, 0x1B800030,
+ 0x1A60, 0x0B800031,
+ 0x1A60, 0x05800032,
+ 0x1A60, 0x00800033,
+ 0x1A60, 0x1B800034,
+ 0x1A60, 0x0B800035,
+ 0x1A60, 0x05800036,
+ 0x1A60, 0x00800037,
+ 0x1A60, 0x1B800038,
+ 0x1A60, 0x0B800039,
+ 0x1A60, 0x0580003A,
+ 0x1A60, 0x0E80803B,
+ 0x1A94, 0x01000401,
+ 0x1A98, 0x00188000,
+ 0x1AA0, 0x00002929,
+ 0x1AA4, 0x08040201,
+ 0x1AA8, 0x80402010,
+ 0x1AAC, 0x77777000,
+ 0x1AB0, 0x54775477,
+ 0x1AB4, 0x54775477,
+ 0x1AB8, 0x00500000,
+ 0x1ABC, 0x77700000,
+ 0x1904, 0x00030000,
+ 0x1914, 0x00030000,
+ 0x1984, 0x03000000,
+ 0x1988, 0x00000087,
+ 0x198C, 0x00000007,
+ 0x1990, 0xFFAA5500,
+ 0x1994, 0x00000077,
+ 0x1998, 0x12801000,
+ 0x1998, 0x12801000,
+ 0x1998, 0x12801001,
+ 0x1998, 0x12801002,
+ 0x1998, 0x12801003,
+ 0x1998, 0x12801004,
+ 0x1998, 0x12801005,
+ 0x1998, 0x12801006,
+ 0x1998, 0x12801007,
+ 0x1998, 0x12801008,
+ 0x1998, 0x12801009,
+ 0x1998, 0x1280100A,
+ 0x1998, 0x1280100B,
+ 0x1998, 0x1280100C,
+ 0x1998, 0x1280100D,
+ 0x1998, 0x1280100E,
+ 0x1998, 0x1280100F,
+ 0x1998, 0x12801010,
+ 0x1998, 0x12801011,
+ 0x1998, 0x12801012,
+ 0x1998, 0x12801013,
+ 0x1998, 0x12801014,
+ 0x1998, 0x12801015,
+ 0x1998, 0x12801016,
+ 0x1998, 0x12801017,
+ 0x1998, 0x12801018,
+ 0x1998, 0x12801019,
+ 0x1998, 0x1280101A,
+ 0x1998, 0x1280101B,
+ 0x1998, 0x1280101C,
+ 0x1998, 0x1280101D,
+ 0x1998, 0x1280101E,
+ 0x1998, 0x1280101F,
+ 0x1998, 0x12801020,
+ 0x1998, 0x12801021,
+ 0x1998, 0x12801022,
+ 0x1998, 0x12801023,
+ 0x1998, 0x1280102C,
+ 0x1998, 0x1280102D,
+ 0x1998, 0x1280102E,
+ 0x1998, 0x1280102F,
+ 0x1998, 0x12801030,
+ 0x1998, 0x12801031,
+ 0x1998, 0x12801032,
+ 0x1998, 0x12801033,
+ 0x1998, 0x12801034,
+ 0x1998, 0x12801035,
+ 0x1998, 0x12801036,
+ 0x1998, 0x12801037,
+ 0x1998, 0x12801038,
+ 0x1998, 0x12801039,
+ 0x1998, 0x1280103A,
+ 0x1998, 0x1280103B,
+ 0x1998, 0x1280103C,
+ 0x1998, 0x1280103D,
+ 0x1998, 0x1280103E,
+ 0x1998, 0x1280103F,
+ 0x1998, 0x12801040,
+ 0x1998, 0x12801041,
+ 0x1998, 0x12801042,
+ 0x1998, 0x12801043,
+ 0x1998, 0x12801044,
+ 0x1998, 0x12801045,
+ 0x1998, 0x12801046,
+ 0x1998, 0x12801047,
+ 0x1998, 0x12801048,
+ 0x1998, 0x12801049,
+ 0x1998, 0x12801100,
+ 0x1998, 0x12801101,
+ 0x1998, 0x12801102,
+ 0x1998, 0x12801103,
+ 0x1998, 0x12801104,
+ 0x1998, 0x12801105,
+ 0x1998, 0x12801106,
+ 0x1998, 0x12801107,
+ 0x1998, 0x12801108,
+ 0x1998, 0x12801109,
+ 0x1998, 0x1280110A,
+ 0x1998, 0x1280110B,
+ 0x1998, 0x1280110C,
+ 0x1998, 0x1280110D,
+ 0x1998, 0x1280110E,
+ 0x1998, 0x1280110F,
+ 0x1998, 0x12801110,
+ 0x1998, 0x12801111,
+ 0x1998, 0x12801112,
+ 0x1998, 0x12801113,
+ 0x1998, 0x12801114,
+ 0x1998, 0x12801115,
+ 0x1998, 0x12801116,
+ 0x1998, 0x12801117,
+ 0x1998, 0x12801118,
+ 0x1998, 0x12801119,
+ 0x1998, 0x1280111A,
+ 0x1998, 0x1280111B,
+ 0x1998, 0x1280111C,
+ 0x1998, 0x1280111D,
+ 0x1998, 0x1280111E,
+ 0x1998, 0x1280111F,
+ 0x1998, 0x12801120,
+ 0x1998, 0x12801121,
+ 0x1998, 0x12801122,
+ 0x1998, 0x12801123,
+ 0x1998, 0x1280112C,
+ 0x1998, 0x1280112D,
+ 0x1998, 0x1280112E,
+ 0x1998, 0x1280112F,
+ 0x1998, 0x12801130,
+ 0x1998, 0x12801131,
+ 0x1998, 0x12801132,
+ 0x1998, 0x12801133,
+ 0x1998, 0x12801134,
+ 0x1998, 0x12801135,
+ 0x1998, 0x12801136,
+ 0x1998, 0x12801137,
+ 0x1998, 0x12801138,
+ 0x1998, 0x12801139,
+ 0x1998, 0x1280113A,
+ 0x1998, 0x1280113B,
+ 0x1998, 0x1280113C,
+ 0x1998, 0x1280113D,
+ 0x1998, 0x1280113E,
+ 0x1998, 0x1280113F,
+ 0x1998, 0x12801140,
+ 0x1998, 0x12801141,
+ 0x1998, 0x12801142,
+ 0x1998, 0x12801143,
+ 0x1998, 0x12801144,
+ 0x1998, 0x12801145,
+ 0x1998, 0x12801146,
+ 0x1998, 0x12801147,
+ 0x1998, 0x12801148,
+ 0x1998, 0x12801149,
+ 0x1998, 0x12801200,
+ 0x1998, 0x12801201,
+ 0x1998, 0x12801202,
+ 0x1998, 0x12801203,
+ 0x1998, 0x12801204,
+ 0x1998, 0x12801205,
+ 0x1998, 0x12801206,
+ 0x1998, 0x12801207,
+ 0x1998, 0x12801208,
+ 0x1998, 0x12801209,
+ 0x1998, 0x1280120A,
+ 0x1998, 0x1280120B,
+ 0x1998, 0x1280120C,
+ 0x1998, 0x1280120D,
+ 0x1998, 0x1280120E,
+ 0x1998, 0x1280120F,
+ 0x1998, 0x12801210,
+ 0x1998, 0x12801211,
+ 0x1998, 0x12801212,
+ 0x1998, 0x12801213,
+ 0x1998, 0x12801214,
+ 0x1998, 0x12801215,
+ 0x1998, 0x12801216,
+ 0x1998, 0x12801217,
+ 0x1998, 0x12801218,
+ 0x1998, 0x12801219,
+ 0x1998, 0x1280121A,
+ 0x1998, 0x1280121B,
+ 0x1998, 0x1280121C,
+ 0x1998, 0x1280121D,
+ 0x1998, 0x1280121E,
+ 0x1998, 0x1280121F,
+ 0x1998, 0x12801220,
+ 0x1998, 0x12801221,
+ 0x1998, 0x12801222,
+ 0x1998, 0x12801223,
+ 0x1998, 0x1280122C,
+ 0x1998, 0x1280122D,
+ 0x1998, 0x1280122E,
+ 0x1998, 0x1280122F,
+ 0x1998, 0x12801230,
+ 0x1998, 0x12801231,
+ 0x1998, 0x12801232,
+ 0x1998, 0x12801233,
+ 0x1998, 0x12801234,
+ 0x1998, 0x12801235,
+ 0x1998, 0x12801236,
+ 0x1998, 0x12801237,
+ 0x1998, 0x12801238,
+ 0x1998, 0x12801239,
+ 0x1998, 0x1280123A,
+ 0x1998, 0x1280123B,
+ 0x1998, 0x1280123C,
+ 0x1998, 0x1280123D,
+ 0x1998, 0x1280123E,
+ 0x1998, 0x1280123F,
+ 0x1998, 0x12801240,
+ 0x1998, 0x12801241,
+ 0x1998, 0x12801242,
+ 0x1998, 0x12801243,
+ 0x1998, 0x12801244,
+ 0x1998, 0x12801245,
+ 0x1998, 0x12801246,
+ 0x1998, 0x12801247,
+ 0x1998, 0x12801248,
+ 0x1998, 0x12801249,
+ 0x1998, 0x12801300,
+ 0x1998, 0x12801301,
+ 0x1998, 0x12801302,
+ 0x1998, 0x12801303,
+ 0x1998, 0x12801304,
+ 0x1998, 0x12801305,
+ 0x1998, 0x12801306,
+ 0x1998, 0x12801307,
+ 0x1998, 0x12801308,
+ 0x1998, 0x12801309,
+ 0x1998, 0x1280130A,
+ 0x1998, 0x1280130B,
+ 0x1998, 0x1280130C,
+ 0x1998, 0x1280130D,
+ 0x1998, 0x1280130E,
+ 0x1998, 0x1280130F,
+ 0x1998, 0x12801310,
+ 0x1998, 0x12801311,
+ 0x1998, 0x12801312,
+ 0x1998, 0x12801313,
+ 0x1998, 0x12801314,
+ 0x1998, 0x12801315,
+ 0x1998, 0x12801316,
+ 0x1998, 0x12801317,
+ 0x1998, 0x12801318,
+ 0x1998, 0x12801319,
+ 0x1998, 0x1280131A,
+ 0x1998, 0x1280131B,
+ 0x1998, 0x1280131C,
+ 0x1998, 0x1280131D,
+ 0x1998, 0x1280131E,
+ 0x1998, 0x1280131F,
+ 0x1998, 0x12801320,
+ 0x1998, 0x12801321,
+ 0x1998, 0x12801322,
+ 0x1998, 0x12801323,
+ 0x1998, 0x1280132C,
+ 0x1998, 0x1280132D,
+ 0x1998, 0x1280132E,
+ 0x1998, 0x1280132F,
+ 0x1998, 0x12801330,
+ 0x1998, 0x12801331,
+ 0x1998, 0x12801332,
+ 0x1998, 0x12801333,
+ 0x1998, 0x12801334,
+ 0x1998, 0x12801335,
+ 0x1998, 0x12801336,
+ 0x1998, 0x12801337,
+ 0x1998, 0x12801338,
+ 0x1998, 0x12801339,
+ 0x1998, 0x1280133A,
+ 0x1998, 0x1280133B,
+ 0x1998, 0x1280133C,
+ 0x1998, 0x1280133D,
+ 0x1998, 0x1280133E,
+ 0x1998, 0x1280133F,
+ 0x1998, 0x12801340,
+ 0x1998, 0x12801341,
+ 0x1998, 0x12801342,
+ 0x1998, 0x12801343,
+ 0x1998, 0x12801344,
+ 0x1998, 0x12801345,
+ 0x1998, 0x12801346,
+ 0x1998, 0x12801347,
+ 0x1998, 0x12801348,
+ 0x1998, 0x12801349,
+ 0x19D4, 0x88888888,
+ 0x19D8, 0x00000888,
+ 0xB00, 0xE3100100,
+ 0xB00, 0xE7100100,
+ 0xC60, 0x15808002,
+ 0xC60, 0x01808003,
+ 0xE60, 0x15808002,
+ 0xE60, 0x01808003,
+ 0x1860, 0x15808002,
+ 0x1860, 0x01808003,
+ 0x1A60, 0x15808002,
+ 0x1A60, 0x01808003,
+ 0xB00, 0xE3100100,
+ 0xC5C, 0x0D080058,
+ 0xE5C, 0x0D080058,
+ 0x185C, 0x0D080058,
+ 0x1A5C, 0x0D080058,
+ 0xC5C, 0x0D000058,
+ 0xE5C, 0x0D000058,
+ 0x185C, 0x0D000058,
+ 0x1A5C, 0x0D000058,
+ 0xC60, 0x05808002,
+ 0xC60, 0x0E808003,
+ 0xE60, 0x05808002,
+ 0xE60, 0x0E808003,
+ 0x1860, 0x05808002,
+ 0x1860, 0x0E808003,
+ 0x1A60, 0x05808002,
+ 0x1A60, 0x0E808003,
+ 0xB00, 0xE7100100,
+ 0xB00, 0xE3100100,
+ 0xB00, 0xE3100000,
+ 0x1C38, 0x00000002,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x46FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7E000F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x11144028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0030,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00900000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000128,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B2,
+ 0xA84, 0x9C1F8C00,
+ 0x1B04, 0xE24628D2,
+ 0x1B10, 0x88010D46,
+ 0x1B14, 0x00000000,
+ 0x1B18, 0x00292903,
+ 0x1B00, 0xF8000000,
+ 0x1B00, 0xF800D000,
+ 0x1B00, 0xF801F000,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000002,
+ 0x1B00, 0xF800D002,
+ 0x1B00, 0xF801F002,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000004,
+ 0x1B00, 0xF800D004,
+ 0x1B00, 0xF801F004,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000006,
+ 0x1B00, 0xF800D006,
+ 0x1B00, 0xF801F006,
+ 0x1B1C, 0xA2123DB2,
+ 0x1B20, 0x07040001,
+ 0x1B24, 0x07060807,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B28, 0xC0060324,
+ 0xA0000000, 0x00000000,
+ 0x1B28, 0xC0060348,
+ 0xB0000000, 0x00000000,
+ 0x1B2C, 0x20000003,
+ 0x1B30, 0x20000000,
+ 0x1B38, 0x20000000,
+ 0x1B3C, 0x20000000,
+ 0x1BD4, 0x00000001,
+ 0x1B94, 0x80000000,
+ 0x1B34, 0x00000000,
+ 0x1B34, 0x00000002,
+ 0x1B34, 0x00000000,
+ 0x1B00, 0xF8000000,
+ 0x1B80, 0x00000007,
+ 0x1B80, 0x09060005,
+ 0x1B80, 0x09060007,
+ 0x1B80, 0x0FFE0015,
+ 0x1B80, 0x0FFE0017,
+ 0x1B80, 0x00240025,
+ 0x1B80, 0x00240027,
+ 0x1B80, 0x00040035,
+ 0x1B80, 0x00040037,
+ 0x1B80, 0x05C00045,
+ 0x1B80, 0x05C00047,
+ 0x1B80, 0x00070055,
+ 0x1B80, 0x00070057,
+ 0x1B80, 0x64000065,
+ 0x1B80, 0x64000067,
+ 0x1B80, 0x00020075,
+ 0x1B80, 0x00020077,
+ 0x1B80, 0x00080085,
+ 0x1B80, 0x00080087,
+ 0x1B80, 0x80000095,
+ 0x1B80, 0x80000097,
+ 0x1B80, 0x090100A5,
+ 0x1B80, 0x090100A7,
+ 0x1B80, 0x0F0200B5,
+ 0x1B80, 0x0F0200B7,
+ 0x1B80, 0x002400C5,
+ 0x1B80, 0x002400C7,
+ 0x1B80, 0x000400D5,
+ 0x1B80, 0x000400D7,
+ 0x1B80, 0x05C000E5,
+ 0x1B80, 0x05C000E7,
+ 0x1B80, 0x000700F5,
+ 0x1B80, 0x000700F7,
+ 0x1B80, 0x64020105,
+ 0x1B80, 0x64020107,
+ 0x1B80, 0x00020115,
+ 0x1B80, 0x00020117,
+ 0x1B80, 0x00040125,
+ 0x1B80, 0x00040127,
+ 0x1B80, 0x4A000135,
+ 0x1B80, 0x4A000137,
+ 0x1B80, 0x4B040145,
+ 0x1B80, 0x4B040147,
+ 0x1B80, 0x85030155,
+ 0x1B80, 0x85030157,
+ 0x1B80, 0x40010165,
+ 0x1B80, 0x40010167,
+ 0x1B80, 0xE0290175,
+ 0x1B80, 0xE0290177,
+ 0x1B80, 0x00040185,
+ 0x1B80, 0x00040187,
+ 0x1B80, 0x4B050195,
+ 0x1B80, 0x4B050197,
+ 0x1B80, 0x860301A5,
+ 0x1B80, 0x860301A7,
+ 0x1B80, 0x400301B5,
+ 0x1B80, 0x400301B7,
+ 0x1B80, 0xE02901C5,
+ 0x1B80, 0xE02901C7,
+ 0x1B80, 0x000401D5,
+ 0x1B80, 0x000401D7,
+ 0x1B80, 0x4B0601E5,
+ 0x1B80, 0x4B0601E7,
+ 0x1B80, 0x870301F5,
+ 0x1B80, 0x870301F7,
+ 0x1B80, 0x40050205,
+ 0x1B80, 0x40050207,
+ 0x1B80, 0xE0290215,
+ 0x1B80, 0xE0290217,
+ 0x1B80, 0x00040225,
+ 0x1B80, 0x00040227,
+ 0x1B80, 0x4B070235,
+ 0x1B80, 0x4B070237,
+ 0x1B80, 0x88030245,
+ 0x1B80, 0x88030247,
+ 0x1B80, 0x40070255,
+ 0x1B80, 0x40070257,
+ 0x1B80, 0xE0290265,
+ 0x1B80, 0xE0290267,
+ 0x1B80, 0x4B000275,
+ 0x1B80, 0x4B000277,
+ 0x1B80, 0x30000285,
+ 0x1B80, 0x30000287,
+ 0x1B80, 0xFE100295,
+ 0x1B80, 0xFE100297,
+ 0x1B80, 0xFF1002A5,
+ 0x1B80, 0xFF1002A7,
+ 0x1B80, 0xE18602B5,
+ 0x1B80, 0xE18602B7,
+ 0x1B80, 0xF00A02C5,
+ 0x1B80, 0xF00A02C7,
+ 0x1B80, 0xF10A02D5,
+ 0x1B80, 0xF10A02D7,
+ 0x1B80, 0xF20A02E5,
+ 0x1B80, 0xF20A02E7,
+ 0x1B80, 0xF30802F5,
+ 0x1B80, 0xF30802F7,
+ 0x1B80, 0xF4070305,
+ 0x1B80, 0xF4070307,
+ 0x1B80, 0xF5060315,
+ 0x1B80, 0xF5060317,
+ 0x1B80, 0xF7060325,
+ 0x1B80, 0xF7060327,
+ 0x1B80, 0xF8050335,
+ 0x1B80, 0xF8050337,
+ 0x1B80, 0xF9040345,
+ 0x1B80, 0xF9040347,
+ 0x1B80, 0x00010355,
+ 0x1B80, 0x00010357,
+ 0x1B80, 0x303B0365,
+ 0x1B80, 0x303B0367,
+ 0x1B80, 0x30500375,
+ 0x1B80, 0x30500377,
+ 0x1B80, 0x305C0385,
+ 0x1B80, 0x305C0387,
+ 0x1B80, 0x31D50395,
+ 0x1B80, 0x31D50397,
+ 0x1B80, 0x31C503A5,
+ 0x1B80, 0x31C503A7,
+ 0x1B80, 0x4D0403B5,
+ 0x1B80, 0x4D0403B7,
+ 0x1B80, 0x2EF003C5,
+ 0x1B80, 0x2EF003C7,
+ 0x1B80, 0x000203D5,
+ 0x1B80, 0x000203D7,
+ 0x1B80, 0x208003E5,
+ 0x1B80, 0x208003E7,
+ 0x1B80, 0x000003F5,
+ 0x1B80, 0x000003F7,
+ 0x1B80, 0x4D000405,
+ 0x1B80, 0x4D000407,
+ 0x1B80, 0x55070415,
+ 0x1B80, 0x55070417,
+ 0x1B80, 0xE1230425,
+ 0x1B80, 0xE1230427,
+ 0x1B80, 0xE1230435,
+ 0x1B80, 0xE1230437,
+ 0x1B80, 0x4D040445,
+ 0x1B80, 0x4D040447,
+ 0x1B80, 0x20800455,
+ 0x1B80, 0x20800457,
+ 0x1B80, 0x84000465,
+ 0x1B80, 0x84000467,
+ 0x1B80, 0x4D000475,
+ 0x1B80, 0x4D000477,
+ 0x1B80, 0x550F0485,
+ 0x1B80, 0x550F0487,
+ 0x1B80, 0xE1230495,
+ 0x1B80, 0xE1230497,
+ 0x1B80, 0x4F0204A5,
+ 0x1B80, 0x4F0204A7,
+ 0x1B80, 0x4E0004B5,
+ 0x1B80, 0x4E0004B7,
+ 0x1B80, 0x530204C5,
+ 0x1B80, 0x530204C7,
+ 0x1B80, 0x520104D5,
+ 0x1B80, 0x520104D7,
+ 0x1B80, 0xE12704E5,
+ 0x1B80, 0xE12704E7,
+ 0x1B80, 0x000104F5,
+ 0x1B80, 0x000104F7,
+ 0x1B80, 0x5C720505,
+ 0x1B80, 0x5C720507,
+ 0x1B80, 0xE1320515,
+ 0x1B80, 0xE1320517,
+ 0x1B80, 0x54E50525,
+ 0x1B80, 0x54E50527,
+ 0x1B80, 0x54BF0535,
+ 0x1B80, 0x54BF0537,
+ 0x1B80, 0x54C50545,
+ 0x1B80, 0x54C50547,
+ 0x1B80, 0x54BE0555,
+ 0x1B80, 0x54BE0557,
+ 0x1B80, 0x54DF0565,
+ 0x1B80, 0x54DF0567,
+ 0x1B80, 0x0BA60575,
+ 0x1B80, 0x0BA60577,
+ 0x1B80, 0xF3130585,
+ 0x1B80, 0xF3130587,
+ 0x1B80, 0xF41E0595,
+ 0x1B80, 0xF41E0597,
+ 0x1B80, 0xF53C05A5,
+ 0x1B80, 0xF53C05A7,
+ 0x1B80, 0x000105B5,
+ 0x1B80, 0x000105B7,
+ 0x1B80, 0x620605C5,
+ 0x1B80, 0x620605C7,
+ 0x1B80, 0x600605D5,
+ 0x1B80, 0x600605D7,
+ 0x1B80, 0xE1A905E5,
+ 0x1B80, 0xE1A905E7,
+ 0x1B80, 0x0C0005F5,
+ 0x1B80, 0x0C0005F7,
+ 0x1B80, 0x5C720605,
+ 0x1B80, 0x5C720607,
+ 0x1B80, 0xE1320615,
+ 0x1B80, 0xE1320617,
+ 0x1B80, 0x5CF10625,
+ 0x1B80, 0x5CF10627,
+ 0x1B80, 0x0C010635,
+ 0x1B80, 0x0C010637,
+ 0x1B80, 0xF2020645,
+ 0x1B80, 0xF2020647,
+ 0x1B80, 0x30D60655,
+ 0x1B80, 0x30D60657,
+ 0x1B80, 0x0AC60665,
+ 0x1B80, 0x0AC60667,
+ 0x1B80, 0xE1B60675,
+ 0x1B80, 0xE1B60677,
+ 0x1B80, 0xE1580685,
+ 0x1B80, 0xE1580687,
+ 0x1B80, 0x54E50695,
+ 0x1B80, 0x54E50697,
+ 0x1B80, 0x000106A5,
+ 0x1B80, 0x000106A7,
+ 0x1B80, 0x560106B5,
+ 0x1B80, 0x560106B7,
+ 0x1B80, 0x5CE206C5,
+ 0x1B80, 0x5CE206C7,
+ 0x1B80, 0x0AE106D5,
+ 0x1B80, 0x0AE106D7,
+ 0x1B80, 0x630C06E5,
+ 0x1B80, 0x630C06E7,
+ 0x1B80, 0xE13F06F5,
+ 0x1B80, 0xE13F06F7,
+ 0x1B80, 0x00270705,
+ 0x1B80, 0x00270707,
+ 0x1B80, 0xE16C0715,
+ 0x1B80, 0xE16C0717,
+ 0x1B80, 0x00020725,
+ 0x1B80, 0x00020727,
+ 0x1B80, 0x002A0735,
+ 0x1B80, 0x002A0737,
+ 0x1B80, 0x07140745,
+ 0x1B80, 0x07140747,
+ 0x1B80, 0x00020755,
+ 0x1B80, 0x00020757,
+ 0x1B80, 0x30C30765,
+ 0x1B80, 0x30C30767,
+ 0x1B80, 0x56010775,
+ 0x1B80, 0x56010777,
+ 0x1B80, 0x5CE20785,
+ 0x1B80, 0x5CE20787,
+ 0x1B80, 0x0AE10795,
+ 0x1B80, 0x0AE10797,
+ 0x1B80, 0x631707A5,
+ 0x1B80, 0x631707A7,
+ 0x1B80, 0xE13F07B5,
+ 0x1B80, 0xE13F07B7,
+ 0x1B80, 0x002507C5,
+ 0x1B80, 0x002507C7,
+ 0x1B80, 0xE16C07D5,
+ 0x1B80, 0xE16C07D7,
+ 0x1B80, 0x000207E5,
+ 0x1B80, 0x000207E7,
+ 0x1B80, 0x630F07F5,
+ 0x1B80, 0x630F07F7,
+ 0x1B80, 0xE13F0805,
+ 0x1B80, 0xE13F0807,
+ 0x1B80, 0x63070815,
+ 0x1B80, 0x63070817,
+ 0x1B80, 0xE13F0825,
+ 0x1B80, 0xE13F0827,
+ 0x1B80, 0x07140835,
+ 0x1B80, 0x07140837,
+ 0x1B80, 0x56000845,
+ 0x1B80, 0x56000847,
+ 0x1B80, 0x5CF20855,
+ 0x1B80, 0x5CF20857,
+ 0x1B80, 0x0AF10865,
+ 0x1B80, 0x0AF10867,
+ 0x1B80, 0x07140875,
+ 0x1B80, 0x07140877,
+ 0x1B80, 0x07140885,
+ 0x1B80, 0x07140887,
+ 0x1B80, 0x630F0895,
+ 0x1B80, 0x630F0897,
+ 0x1B80, 0xE13F08A5,
+ 0x1B80, 0xE13F08A7,
+ 0x1B80, 0x631708B5,
+ 0x1B80, 0x631708B7,
+ 0x1B80, 0xE13F08C5,
+ 0x1B80, 0xE13F08C7,
+ 0x1B80, 0x002508D5,
+ 0x1B80, 0x002508D7,
+ 0x1B80, 0xE16C08E5,
+ 0x1B80, 0xE16C08E7,
+ 0x1B80, 0x000208F5,
+ 0x1B80, 0x000208F7,
+ 0x1B80, 0x30C30905,
+ 0x1B80, 0x30C30907,
+ 0x1B80, 0xE1A90915,
+ 0x1B80, 0xE1A90917,
+ 0x1B80, 0x62060925,
+ 0x1B80, 0x62060927,
+ 0x1B80, 0x60060935,
+ 0x1B80, 0x60060937,
+ 0x1B80, 0xE1160945,
+ 0x1B80, 0xE1160947,
+ 0x1B80, 0x54BE0955,
+ 0x1B80, 0x54BE0957,
+ 0x1B80, 0x56010965,
+ 0x1B80, 0x56010967,
+ 0x1B80, 0x5CE20975,
+ 0x1B80, 0x5CE20977,
+ 0x1B80, 0x0AE10985,
+ 0x1B80, 0x0AE10987,
+ 0x1B80, 0x633A0995,
+ 0x1B80, 0x633A0997,
+ 0x1B80, 0xE13F09A5,
+ 0x1B80, 0xE13F09A7,
+ 0x1B80, 0x633709B5,
+ 0x1B80, 0x633709B7,
+ 0x1B80, 0xE13F09C5,
+ 0x1B80, 0xE13F09C7,
+ 0x1B80, 0x632F09D5,
+ 0x1B80, 0x632F09D7,
+ 0x1B80, 0xE13F09E5,
+ 0x1B80, 0xE13F09E7,
+ 0x1B80, 0x632709F5,
+ 0x1B80, 0x632709F7,
+ 0x1B80, 0xE13F0A05,
+ 0x1B80, 0xE13F0A07,
+ 0x1B80, 0x631F0A15,
+ 0x1B80, 0x631F0A17,
+ 0x1B80, 0xE13F0A25,
+ 0x1B80, 0xE13F0A27,
+ 0x1B80, 0x63170A35,
+ 0x1B80, 0x63170A37,
+ 0x1B80, 0xE13F0A45,
+ 0x1B80, 0xE13F0A47,
+ 0x1B80, 0x630F0A55,
+ 0x1B80, 0x630F0A57,
+ 0x1B80, 0xE13F0A65,
+ 0x1B80, 0xE13F0A67,
+ 0x1B80, 0x63070A75,
+ 0x1B80, 0x63070A77,
+ 0x1B80, 0xE13F0A85,
+ 0x1B80, 0xE13F0A87,
+ 0x1B80, 0xE16C0A95,
+ 0x1B80, 0xE16C0A97,
+ 0x1B80, 0x56000AA5,
+ 0x1B80, 0x56000AA7,
+ 0x1B80, 0x5CF20AB5,
+ 0x1B80, 0x5CF20AB7,
+ 0x1B80, 0x0AF10AC5,
+ 0x1B80, 0x0AF10AC7,
+ 0x1B80, 0xF5040AD5,
+ 0x1B80, 0xF5040AD7,
+ 0x1B80, 0xE13F0AE5,
+ 0x1B80, 0xE13F0AE7,
+ 0x1B80, 0xE16C0AF5,
+ 0x1B80, 0xE16C0AF7,
+ 0x1B80, 0x30B30B05,
+ 0x1B80, 0x30B30B07,
+ 0x1B80, 0x07140B15,
+ 0x1B80, 0x07140B17,
+ 0x1B80, 0x07140B25,
+ 0x1B80, 0x07140B27,
+ 0x1B80, 0x630F0B35,
+ 0x1B80, 0x630F0B37,
+ 0x1B80, 0xE13F0B45,
+ 0x1B80, 0xE13F0B47,
+ 0x1B80, 0x63170B55,
+ 0x1B80, 0x63170B57,
+ 0x1B80, 0xE13F0B65,
+ 0x1B80, 0xE13F0B67,
+ 0x1B80, 0x631F0B75,
+ 0x1B80, 0x631F0B77,
+ 0x1B80, 0xE13F0B85,
+ 0x1B80, 0xE13F0B87,
+ 0x1B80, 0x63270B95,
+ 0x1B80, 0x63270B97,
+ 0x1B80, 0xE13F0BA5,
+ 0x1B80, 0xE13F0BA7,
+ 0x1B80, 0x632F0BB5,
+ 0x1B80, 0x632F0BB7,
+ 0x1B80, 0xE13F0BC5,
+ 0x1B80, 0xE13F0BC7,
+ 0x1B80, 0x63370BD5,
+ 0x1B80, 0x63370BD7,
+ 0x1B80, 0xE13F0BE5,
+ 0x1B80, 0xE13F0BE7,
+ 0x1B80, 0x633A0BF5,
+ 0x1B80, 0x633A0BF7,
+ 0x1B80, 0xE13F0C05,
+ 0x1B80, 0xE13F0C07,
+ 0x1B80, 0xF60B0C15,
+ 0x1B80, 0xF60B0C17,
+ 0x1B80, 0xF7170C25,
+ 0x1B80, 0xF7170C27,
+ 0x1B80, 0x4D300C35,
+ 0x1B80, 0x4D300C37,
+ 0x1B80, 0x57040C45,
+ 0x1B80, 0x57040C47,
+ 0x1B80, 0x57000C55,
+ 0x1B80, 0x57000C57,
+ 0x1B80, 0x96000C65,
+ 0x1B80, 0x96000C67,
+ 0x1B80, 0x57080C75,
+ 0x1B80, 0x57080C77,
+ 0x1B80, 0x57000C85,
+ 0x1B80, 0x57000C87,
+ 0x1B80, 0x95000C95,
+ 0x1B80, 0x95000C97,
+ 0x1B80, 0x4D000CA5,
+ 0x1B80, 0x4D000CA7,
+ 0x1B80, 0x6C070CB5,
+ 0x1B80, 0x6C070CB7,
+ 0x1B80, 0x00010CC5,
+ 0x1B80, 0x00010CC7,
+ 0x1B80, 0x00220CD5,
+ 0x1B80, 0x00220CD7,
+ 0x1B80, 0x06140CE5,
+ 0x1B80, 0x06140CE7,
+ 0x1B80, 0xE16C0CF5,
+ 0x1B80, 0xE16C0CF7,
+ 0x1B80, 0x00020D05,
+ 0x1B80, 0x00020D07,
+ 0x1B80, 0x00250D15,
+ 0x1B80, 0x00250D17,
+ 0x1B80, 0x06140D25,
+ 0x1B80, 0x06140D27,
+ 0x1B80, 0xE16C0D35,
+ 0x1B80, 0xE16C0D37,
+ 0x1B80, 0x00020D45,
+ 0x1B80, 0x00020D47,
+ 0x1B80, 0x00010D55,
+ 0x1B80, 0x00010D57,
+ 0x1B80, 0x00320D65,
+ 0x1B80, 0x00320D67,
+ 0x1B80, 0xE16C0D75,
+ 0x1B80, 0xE16C0D77,
+ 0x1B80, 0x00020D85,
+ 0x1B80, 0x00020D87,
+ 0x1B80, 0xE1860D95,
+ 0x1B80, 0xE1860D97,
+ 0x1B80, 0xE1B60DA5,
+ 0x1B80, 0xE1B60DA7,
+ 0x1B80, 0x5CD10DB5,
+ 0x1B80, 0x5CD10DB7,
+ 0x1B80, 0x673A0DC5,
+ 0x1B80, 0x673A0DC7,
+ 0x1B80, 0xE1230DD5,
+ 0x1B80, 0xE1230DD7,
+ 0x1B80, 0xF80B0DE5,
+ 0x1B80, 0xF80B0DE7,
+ 0x1B80, 0xF9110DF5,
+ 0x1B80, 0xF9110DF7,
+ 0x1B80, 0xE1580E05,
+ 0x1B80, 0xE1580E07,
+ 0x1B80, 0x67370E15,
+ 0x1B80, 0x67370E17,
+ 0x1B80, 0xE1580E25,
+ 0x1B80, 0xE1580E27,
+ 0x1B80, 0x672F0E35,
+ 0x1B80, 0x672F0E37,
+ 0x1B80, 0xE1580E45,
+ 0x1B80, 0xE1580E47,
+ 0x1B80, 0x67270E55,
+ 0x1B80, 0x67270E57,
+ 0x1B80, 0xE1580E65,
+ 0x1B80, 0xE1580E67,
+ 0x1B80, 0x671F0E75,
+ 0x1B80, 0x671F0E77,
+ 0x1B80, 0xE1580E85,
+ 0x1B80, 0xE1580E87,
+ 0x1B80, 0x67170E95,
+ 0x1B80, 0x67170E97,
+ 0x1B80, 0xE1580EA5,
+ 0x1B80, 0xE1580EA7,
+ 0x1B80, 0xF8020EB5,
+ 0x1B80, 0xF8020EB7,
+ 0x1B80, 0x30EE0EC5,
+ 0x1B80, 0x30EE0EC7,
+ 0x1B80, 0xE0D10ED5,
+ 0x1B80, 0xE0D10ED7,
+ 0x1B80, 0x670F0EE5,
+ 0x1B80, 0x670F0EE7,
+ 0x1B80, 0xE1580EF5,
+ 0x1B80, 0xE1580EF7,
+ 0x1B80, 0x67070F05,
+ 0x1B80, 0x67070F07,
+ 0x1B80, 0xE1580F15,
+ 0x1B80, 0xE1580F17,
+ 0x1B80, 0xF9020F25,
+ 0x1B80, 0xF9020F27,
+ 0x1B80, 0x30F50F35,
+ 0x1B80, 0x30F50F37,
+ 0x1B80, 0xE0CD0F45,
+ 0x1B80, 0xE0CD0F47,
+ 0x1B80, 0x06140F55,
+ 0x1B80, 0x06140F57,
+ 0x1B80, 0xE16C0F65,
+ 0x1B80, 0xE16C0F67,
+ 0x1B80, 0x5CF10F75,
+ 0x1B80, 0x5CF10F77,
+ 0x1B80, 0xE1580F85,
+ 0x1B80, 0xE1580F87,
+ 0x1B80, 0x06140F95,
+ 0x1B80, 0x06140F97,
+ 0x1B80, 0xE16C0FA5,
+ 0x1B80, 0xE16C0FA7,
+ 0x1B80, 0xF9020FB5,
+ 0x1B80, 0xF9020FB7,
+ 0x1B80, 0x30FF0FC5,
+ 0x1B80, 0x30FF0FC7,
+ 0x1B80, 0xE0CD0FD5,
+ 0x1B80, 0xE0CD0FD7,
+ 0x1B80, 0x31130FE5,
+ 0x1B80, 0x31130FE7,
+ 0x1B80, 0x670F0FF5,
+ 0x1B80, 0x670F0FF7,
+ 0x1B80, 0xE1581005,
+ 0x1B80, 0xE1581007,
+ 0x1B80, 0x67171015,
+ 0x1B80, 0x67171017,
+ 0x1B80, 0xE1581025,
+ 0x1B80, 0xE1581027,
+ 0x1B80, 0xF8021035,
+ 0x1B80, 0xF8021037,
+ 0x1B80, 0x31071045,
+ 0x1B80, 0x31071047,
+ 0x1B80, 0xE0D11055,
+ 0x1B80, 0xE0D11057,
+ 0x1B80, 0x31131065,
+ 0x1B80, 0x31131067,
+ 0x1B80, 0x670F1075,
+ 0x1B80, 0x670F1077,
+ 0x1B80, 0xE1581085,
+ 0x1B80, 0xE1581087,
+ 0x1B80, 0x671F1095,
+ 0x1B80, 0x671F1097,
+ 0x1B80, 0xE15810A5,
+ 0x1B80, 0xE15810A7,
+ 0x1B80, 0x672710B5,
+ 0x1B80, 0x672710B7,
+ 0x1B80, 0xE15810C5,
+ 0x1B80, 0xE15810C7,
+ 0x1B80, 0x672F10D5,
+ 0x1B80, 0x672F10D7,
+ 0x1B80, 0xE15810E5,
+ 0x1B80, 0xE15810E7,
+ 0x1B80, 0x673710F5,
+ 0x1B80, 0x673710F7,
+ 0x1B80, 0xE1581105,
+ 0x1B80, 0xE1581107,
+ 0x1B80, 0x673A1115,
+ 0x1B80, 0x673A1117,
+ 0x1B80, 0xE1581125,
+ 0x1B80, 0xE1581127,
+ 0x1B80, 0x4D101135,
+ 0x1B80, 0x4D101137,
+ 0x1B80, 0x30C41145,
+ 0x1B80, 0x30C41147,
+ 0x1B80, 0x00011155,
+ 0x1B80, 0x00011157,
+ 0x1B80, 0x6F241165,
+ 0x1B80, 0x6F241167,
+ 0x1B80, 0x6E401175,
+ 0x1B80, 0x6E401177,
+ 0x1B80, 0x6D001185,
+ 0x1B80, 0x6D001187,
+ 0x1B80, 0x55031195,
+ 0x1B80, 0x55031197,
+ 0x1B80, 0x312311A5,
+ 0x1B80, 0x312311A7,
+ 0x1B80, 0x6F1C11B5,
+ 0x1B80, 0x6F1C11B7,
+ 0x1B80, 0x6E4011C5,
+ 0x1B80, 0x6E4011C7,
+ 0x1B80, 0x550B11D5,
+ 0x1B80, 0x550B11D7,
+ 0x1B80, 0x312311E5,
+ 0x1B80, 0x312311E7,
+ 0x1B80, 0x061C11F5,
+ 0x1B80, 0x061C11F7,
+ 0x1B80, 0x54DE1205,
+ 0x1B80, 0x54DE1207,
+ 0x1B80, 0x06DC1215,
+ 0x1B80, 0x06DC1217,
+ 0x1B80, 0x55131225,
+ 0x1B80, 0x55131227,
+ 0x1B80, 0x74011235,
+ 0x1B80, 0x74011237,
+ 0x1B80, 0x74001245,
+ 0x1B80, 0x74001247,
+ 0x1B80, 0x8E001255,
+ 0x1B80, 0x8E001257,
+ 0x1B80, 0x00011265,
+ 0x1B80, 0x00011267,
+ 0x1B80, 0x57021275,
+ 0x1B80, 0x57021277,
+ 0x1B80, 0x57001285,
+ 0x1B80, 0x57001287,
+ 0x1B80, 0x97001295,
+ 0x1B80, 0x97001297,
+ 0x1B80, 0x000112A5,
+ 0x1B80, 0x000112A7,
+ 0x1B80, 0x54BF12B5,
+ 0x1B80, 0x54BF12B7,
+ 0x1B80, 0x54C112C5,
+ 0x1B80, 0x54C112C7,
+ 0x1B80, 0x54A212D5,
+ 0x1B80, 0x54A212D7,
+ 0x1B80, 0x54C012E5,
+ 0x1B80, 0x54C012E7,
+ 0x1B80, 0x54A112F5,
+ 0x1B80, 0x54A112F7,
+ 0x1B80, 0x54DF1305,
+ 0x1B80, 0x54DF1307,
+ 0x1B80, 0x00011315,
+ 0x1B80, 0x00011317,
+ 0x1B80, 0x55001325,
+ 0x1B80, 0x55001327,
+ 0x1B80, 0xE1231335,
+ 0x1B80, 0xE1231337,
+ 0x1B80, 0x54811345,
+ 0x1B80, 0x54811347,
+ 0x1B80, 0xE1231355,
+ 0x1B80, 0xE1231357,
+ 0x1B80, 0x54801365,
+ 0x1B80, 0x54801367,
+ 0x1B80, 0x002A1375,
+ 0x1B80, 0x002A1377,
+ 0x1B80, 0xE12B1385,
+ 0x1B80, 0xE12B1387,
+ 0x1B80, 0xE1231395,
+ 0x1B80, 0xE1231397,
+ 0x1B80, 0x548013A5,
+ 0x1B80, 0x548013A7,
+ 0x1B80, 0xE17213B5,
+ 0x1B80, 0xE17213B7,
+ 0x1B80, 0xBF3013C5,
+ 0x1B80, 0xBF3013C7,
+ 0x1B80, 0x000213D5,
+ 0x1B80, 0x000213D7,
+ 0x1B80, 0x302813E5,
+ 0x1B80, 0x302813E7,
+ 0x1B80, 0x4F7813F5,
+ 0x1B80, 0x4F7813F7,
+ 0x1B80, 0x4E001405,
+ 0x1B80, 0x4E001407,
+ 0x1B80, 0x53871415,
+ 0x1B80, 0x53871417,
+ 0x1B80, 0x52F11425,
+ 0x1B80, 0x52F11427,
+ 0x1B80, 0xE1161435,
+ 0x1B80, 0xE1161437,
+ 0x1B80, 0xE11B1445,
+ 0x1B80, 0xE11B1447,
+ 0x1B80, 0xE11F1455,
+ 0x1B80, 0xE11F1457,
+ 0x1B80, 0xE1271465,
+ 0x1B80, 0xE1271467,
+ 0x1B80, 0x54811475,
+ 0x1B80, 0x54811477,
+ 0x1B80, 0xE1161485,
+ 0x1B80, 0xE1161487,
+ 0x1B80, 0xE11B1495,
+ 0x1B80, 0xE11B1497,
+ 0x1B80, 0xE11F14A5,
+ 0x1B80, 0xE11F14A7,
+ 0x1B80, 0xE12714B5,
+ 0x1B80, 0xE12714B7,
+ 0x1B80, 0x548014C5,
+ 0x1B80, 0x548014C7,
+ 0x1B80, 0x002A14D5,
+ 0x1B80, 0x002A14D7,
+ 0x1B80, 0xE12B14E5,
+ 0x1B80, 0xE12B14E7,
+ 0x1B80, 0xE11614F5,
+ 0x1B80, 0xE11614F7,
+ 0x1B80, 0xE11B1505,
+ 0x1B80, 0xE11B1507,
+ 0x1B80, 0xE11F1515,
+ 0x1B80, 0xE11F1517,
+ 0x1B80, 0xE1271525,
+ 0x1B80, 0xE1271527,
+ 0x1B80, 0x54801535,
+ 0x1B80, 0x54801537,
+ 0x1B80, 0xE1721545,
+ 0x1B80, 0xE1721547,
+ 0x1B80, 0xBF171555,
+ 0x1B80, 0xBF171557,
+ 0x1B80, 0x00021565,
+ 0x1B80, 0x00021567,
+ 0x1B80, 0x30281575,
+ 0x1B80, 0x30281577,
+ 0x1B80, 0x06141585,
+ 0x1B80, 0x06141587,
+ 0x1B80, 0x73201595,
+ 0x1B80, 0x73201597,
+ 0x1B80, 0x720015A5,
+ 0x1B80, 0x720015A7,
+ 0x1B80, 0x710015B5,
+ 0x1B80, 0x710015B7,
+ 0x1B80, 0x550115C5,
+ 0x1B80, 0x550115C7,
+ 0x1B80, 0xE12315D5,
+ 0x1B80, 0xE12315D7,
+ 0x1B80, 0xE12715E5,
+ 0x1B80, 0xE12715E7,
+ 0x1B80, 0x548115F5,
+ 0x1B80, 0x548115F7,
+ 0x1B80, 0xE1231605,
+ 0x1B80, 0xE1231607,
+ 0x1B80, 0xE1271615,
+ 0x1B80, 0xE1271617,
+ 0x1B80, 0x54801625,
+ 0x1B80, 0x54801627,
+ 0x1B80, 0x002A1635,
+ 0x1B80, 0x002A1637,
+ 0x1B80, 0xE12B1645,
+ 0x1B80, 0xE12B1647,
+ 0x1B80, 0xE1231655,
+ 0x1B80, 0xE1231657,
+ 0x1B80, 0xE1271665,
+ 0x1B80, 0xE1271667,
+ 0x1B80, 0x54801675,
+ 0x1B80, 0x54801677,
+ 0x1B80, 0xE1721685,
+ 0x1B80, 0xE1721687,
+ 0x1B80, 0xBF031695,
+ 0x1B80, 0xBF031697,
+ 0x1B80, 0x000216A5,
+ 0x1B80, 0x000216A7,
+ 0x1B80, 0x302816B5,
+ 0x1B80, 0x302816B7,
+ 0x1B80, 0x54BF16C5,
+ 0x1B80, 0x54BF16C7,
+ 0x1B80, 0x54C516D5,
+ 0x1B80, 0x54C516D7,
+ 0x1B80, 0x050A16E5,
+ 0x1B80, 0x050A16E7,
+ 0x1B80, 0x071416F5,
+ 0x1B80, 0x071416F7,
+ 0x1B80, 0x54DF1705,
+ 0x1B80, 0x54DF1707,
+ 0x1B80, 0x00011715,
+ 0x1B80, 0x00011717,
+ 0x1B80, 0x54BF1725,
+ 0x1B80, 0x54BF1727,
+ 0x1B80, 0x54C01735,
+ 0x1B80, 0x54C01737,
+ 0x1B80, 0x54A31745,
+ 0x1B80, 0x54A31747,
+ 0x1B80, 0x54C11755,
+ 0x1B80, 0x54C11757,
+ 0x1B80, 0x54A41765,
+ 0x1B80, 0x54A41767,
+ 0x1B80, 0x4C831775,
+ 0x1B80, 0x4C831777,
+ 0x1B80, 0x4C031785,
+ 0x1B80, 0x4C031787,
+ 0x1B80, 0xBF0B1795,
+ 0x1B80, 0xBF0B1797,
+ 0x1B80, 0x54C217A5,
+ 0x1B80, 0x54C217A7,
+ 0x1B80, 0x54A417B5,
+ 0x1B80, 0x54A417B7,
+ 0x1B80, 0x4C8517C5,
+ 0x1B80, 0x4C8517C7,
+ 0x1B80, 0x4C0517D5,
+ 0x1B80, 0x4C0517D7,
+ 0x1B80, 0xBF0617E5,
+ 0x1B80, 0xBF0617E7,
+ 0x1B80, 0x54C117F5,
+ 0x1B80, 0x54C117F7,
+ 0x1B80, 0x54A31805,
+ 0x1B80, 0x54A31807,
+ 0x1B80, 0x4C861815,
+ 0x1B80, 0x4C861817,
+ 0x1B80, 0x4C061825,
+ 0x1B80, 0x4C061827,
+ 0x1B80, 0xBF011835,
+ 0x1B80, 0xBF011837,
+ 0x1B80, 0x54DF1845,
+ 0x1B80, 0x54DF1847,
+ 0x1B80, 0x00011855,
+ 0x1B80, 0x00011857,
+ 0x1B80, 0x00071865,
+ 0x1B80, 0x00071867,
+ 0x1B80, 0x54011875,
+ 0x1B80, 0x54011877,
+ 0x1B80, 0x00041885,
+ 0x1B80, 0x00041887,
+ 0x1B80, 0x56001895,
+ 0x1B80, 0x56001897,
+ 0x1B80, 0x5CF218A5,
+ 0x1B80, 0x5CF218A7,
+ 0x1B80, 0x630718B5,
+ 0x1B80, 0x630718B7,
+ 0x1B80, 0x620418C5,
+ 0x1B80, 0x620418C7,
+ 0x1B80, 0x610018D5,
+ 0x1B80, 0x610018D7,
+ 0x1B80, 0x670718E5,
+ 0x1B80, 0x670718E7,
+ 0x1B80, 0x660618F5,
+ 0x1B80, 0x660618F7,
+ 0x1B80, 0x6F201905,
+ 0x1B80, 0x6F201907,
+ 0x1B80, 0x6E001915,
+ 0x1B80, 0x6E001917,
+ 0x1B80, 0x6D001925,
+ 0x1B80, 0x6D001927,
+ 0x1B80, 0x6C031935,
+ 0x1B80, 0x6C031937,
+ 0x1B80, 0x73201945,
+ 0x1B80, 0x73201947,
+ 0x1B80, 0x72001955,
+ 0x1B80, 0x72001957,
+ 0x1B80, 0x71001965,
+ 0x1B80, 0x71001967,
+ 0x1B80, 0x7B201975,
+ 0x1B80, 0x7B201977,
+ 0x1B80, 0x7A001985,
+ 0x1B80, 0x7A001987,
+ 0x1B80, 0x79001995,
+ 0x1B80, 0x79001997,
+ 0x1B80, 0x7F2019A5,
+ 0x1B80, 0x7F2019A7,
+ 0x1B80, 0x7E0019B5,
+ 0x1B80, 0x7E0019B7,
+ 0x1B80, 0x7D0019C5,
+ 0x1B80, 0x7D0019C7,
+ 0x1B80, 0x090119D5,
+ 0x1B80, 0x090119D7,
+ 0x1B80, 0x0AC619E5,
+ 0x1B80, 0x0AC619E7,
+ 0x1B80, 0x0BA619F5,
+ 0x1B80, 0x0BA619F7,
+ 0x1B80, 0x0C011A05,
+ 0x1B80, 0x0C011A07,
+ 0x1B80, 0x0D021A15,
+ 0x1B80, 0x0D021A17,
+ 0x1B80, 0x0E041A25,
+ 0x1B80, 0x0E041A27,
+ 0x1B80, 0x0FFF1A35,
+ 0x1B80, 0x0FFF1A37,
+ 0x1B80, 0x4D041A45,
+ 0x1B80, 0x4D041A47,
+ 0x1B80, 0x28F81A55,
+ 0x1B80, 0x28F81A57,
+ 0x1B80, 0xE0001A65,
+ 0x1B80, 0xE0001A67,
+ 0x1B80, 0x4D001A75,
+ 0x1B80, 0x4D001A77,
+ 0x1B80, 0x00011A85,
+ 0x1B80, 0x00011A87,
+ 0x1B80, 0x4D041A95,
+ 0x1B80, 0x4D041A97,
+ 0x1B80, 0x2EF81AA5,
+ 0x1B80, 0x2EF81AA7,
+ 0x1B80, 0x00021AB5,
+ 0x1B80, 0x00021AB7,
+ 0x1B80, 0x23031AC5,
+ 0x1B80, 0x23031AC7,
+ 0x1B80, 0x00001AD5,
+ 0x1B80, 0x00001AD7,
+ 0x1B80, 0x23131AE5,
+ 0x1B80, 0x23131AE7,
+ 0x1B80, 0xE77F1AF5,
+ 0x1B80, 0xE77F1AF7,
+ 0x1B80, 0x232F1B05,
+ 0x1B80, 0x232F1B07,
+ 0x1B80, 0xEFBF1B15,
+ 0x1B80, 0xEFBF1B17,
+ 0x1B80, 0x2EF01B25,
+ 0x1B80, 0x2EF01B27,
+ 0x1B80, 0x00021B35,
+ 0x1B80, 0x00021B37,
+ 0x1B80, 0x4D001B45,
+ 0x1B80, 0x4D001B47,
+ 0x1B80, 0x00011B55,
+ 0x1B80, 0x00011B57,
+ 0x1B80, 0x4D041B65,
+ 0x1B80, 0x4D041B67,
+ 0x1B80, 0x2EF81B75,
+ 0x1B80, 0x2EF81B77,
+ 0x1B80, 0x00021B85,
+ 0x1B80, 0x00021B87,
+ 0x1B80, 0x23031B95,
+ 0x1B80, 0x23031B97,
+ 0x1B80, 0x00001BA5,
+ 0x1B80, 0x00001BA7,
+ 0x1B80, 0x23131BB5,
+ 0x1B80, 0x23131BB7,
+ 0x1B80, 0xE77F1BC5,
+ 0x1B80, 0xE77F1BC7,
+ 0x1B80, 0x232F1BD5,
+ 0x1B80, 0x232F1BD7,
+ 0x1B80, 0xE79F1BE5,
+ 0x1B80, 0xE79F1BE7,
+ 0x1B80, 0x2EF01BF5,
+ 0x1B80, 0x2EF01BF7,
+ 0x1B80, 0x00021C05,
+ 0x1B80, 0x00021C07,
+ 0x1B80, 0x28F81C15,
+ 0x1B80, 0x28F81C17,
+ 0x1B80, 0x80001C25,
+ 0x1B80, 0x80001C27,
+ 0x1B80, 0x4D001C35,
+ 0x1B80, 0x4D001C37,
+ 0x1B80, 0x00011C45,
+ 0x1B80, 0x00011C47,
+ 0x1B80, 0x00041C55,
+ 0x1B80, 0x00041C57,
+ 0x1B80, 0x6BC01C65,
+ 0x1B80, 0x6BC01C67,
+ 0x1B80, 0x4D041C75,
+ 0x1B80, 0x4D041C77,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241C85,
+ 0x1B80, 0x68241C87,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x68481C85,
+ 0x1B80, 0x68481C87,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x66061C95,
+ 0x1B80, 0x66061C97,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x650C1CA5,
+ 0x1B80, 0x650C1CA7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x65041CA5,
+ 0x1B80, 0x65041CA7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x64471CB5,
+ 0x1B80, 0x64471CB7,
+ 0x1B80, 0x23411CC5,
+ 0x1B80, 0x23411CC7,
+ 0x1B80, 0x100E1CD5,
+ 0x1B80, 0x100E1CD7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60101CE5,
+ 0x1B80, 0x60101CE7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60011CE5,
+ 0x1B80, 0x60011CE7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411CF5,
+ 0x1B80, 0x23411CF7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60811D05,
+ 0x1B80, 0x60811D07,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60611D05,
+ 0x1B80, 0x60611D07,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411D15,
+ 0x1B80, 0x23411D17,
+ 0x1B80, 0x70E11D25,
+ 0x1B80, 0x70E11D27,
+ 0x1B80, 0x4D001D35,
+ 0x1B80, 0x4D001D37,
+ 0x1B80, 0x00011D45,
+ 0x1B80, 0x00011D47,
+ 0x1B80, 0x00041D55,
+ 0x1B80, 0x00041D57,
+ 0x1B80, 0x6B401D65,
+ 0x1B80, 0x6B401D67,
+ 0x1B80, 0x4D041D75,
+ 0x1B80, 0x4D041D77,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x68241D85,
+ 0x1B80, 0x68241D87,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x68481D85,
+ 0x1B80, 0x68481D87,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x66061D95,
+ 0x1B80, 0x66061D97,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x65181DA5,
+ 0x1B80, 0x65181DA7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x65081DA5,
+ 0x1B80, 0x65081DA7,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x64481DB5,
+ 0x1B80, 0x64481DB7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x64471DB5,
+ 0x1B80, 0x64471DB7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411DC5,
+ 0x1B80, 0x23411DC7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x11E81DD5,
+ 0x1B80, 0x11E81DD7,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x11E41DD5,
+ 0x1B80, 0x11E41DD7,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x60011DE5,
+ 0x1B80, 0x60011DE7,
+ 0x1B80, 0x23411DF5,
+ 0x1B80, 0x23411DF7,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x1B80, 0x61E11E05,
+ 0x1B80, 0x61E11E07,
+ 0xA0000000, 0x00000000,
+ 0x1B80, 0x60E11E05,
+ 0x1B80, 0x60E11E07,
+ 0xB0000000, 0x00000000,
+ 0x1B80, 0x23411E15,
+ 0x1B80, 0x23411E17,
+ 0x1B80, 0x70611E25,
+ 0x1B80, 0x70611E27,
+ 0x1B80, 0x4D001E35,
+ 0x1B80, 0x4D001E37,
+ 0x1B80, 0x00011E45,
+ 0x1B80, 0x00011E47,
+ 0x1B80, 0x00001E55,
+ 0x1B80, 0x00001E57,
+ 0x1B80, 0x00001E65,
+ 0x1B80, 0x00001E67,
+ 0x1B80, 0x00001E75,
+ 0x1B80, 0x00001E77,
+ 0x1B80, 0x00001E85,
+ 0x1B80, 0x00001E87,
+ 0x1B80, 0x00001E95,
+ 0x1B80, 0x00001E97,
+ 0x1B80, 0x00001EA5,
+ 0x1B80, 0x00001EA7,
+ 0x1B80, 0x00001EB5,
+ 0x1B80, 0x00001EB7,
+ 0x1B80, 0x00001EC5,
+ 0x1B80, 0x00001EC7,
+ 0x1B80, 0x00001ED5,
+ 0x1B80, 0x00001ED7,
+ 0x1B80, 0x00001EE5,
+ 0x1B80, 0x00001EE7,
+ 0x1B80, 0x00001EF5,
+ 0x1B80, 0x00001EF7,
+ 0x1B80, 0x00001F05,
+ 0x1B80, 0x00001F07,
+ 0x1B80, 0x00001F15,
+ 0x1B80, 0x00001F17,
+ 0x1B80, 0x00001F25,
+ 0x1B80, 0x00001F27,
+ 0x1B80, 0x00001F35,
+ 0x1B80, 0x00001F37,
+ 0x1B80, 0x00001F45,
+ 0x1B80, 0x00001F47,
+ 0x1B80, 0x00001F55,
+ 0x1B80, 0x00001F57,
+ 0x1B80, 0x00001F65,
+ 0x1B80, 0x00001F67,
+ 0x1B80, 0x00001F75,
+ 0x1B80, 0x00001F77,
+ 0x1B80, 0x00001F85,
+ 0x1B80, 0x00001F87,
+ 0x1B80, 0x00001F95,
+ 0x1B80, 0x00001F97,
+ 0x1B80, 0x00001FA5,
+ 0x1B80, 0x00001FA7,
+ 0x1B80, 0x00001FB5,
+ 0x1B80, 0x00001FB7,
+ 0x1B80, 0x00001FC5,
+ 0x1B80, 0x00001FC7,
+ 0x1B80, 0x00001FD5,
+ 0x1B80, 0x00001FD7,
+ 0x1B80, 0x00001FE5,
+ 0x1B80, 0x00001FE7,
+ 0x1B80, 0x00001FF5,
+ 0x1B80, 0x00001FF7,
+ 0x1B80, 0x00000006,
+ 0x1B80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8814a_bb, rtw_phy_cfg_bb);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type0[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x28303232, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x26283032, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x30303030, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x26283032, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x30302224, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x28303030, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x28303232, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x26283032, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x30303030, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x26283032, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x30302224, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x28303030, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type0);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type2[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type2);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type3[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x42444646, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x46463840, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x46464646, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x42444646, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463840, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type3);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type4[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x36384042, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x34363840, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x42424242, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x34363840, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x34363840, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x42423032, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x38404242, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x36384042, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x34363840, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x42424242, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x34363840, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x42423032, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x38404242, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type4);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type5[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x44444040, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38384042, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203636, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x44444040, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38384042, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203636, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x44444444, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x40424444, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x44444040, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x44444444, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38384042, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20203636, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x44444040, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38384042, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203636, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x44443840, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x36384042, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203436, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x44443840, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x36384042, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203436, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x44444444, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x40424444, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x44443840, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x44444444, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x36384042, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20203436, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x44443840, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x36384042, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203436, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type5);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type7[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x34343434, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x28303234, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x34342426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x32343434, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x34343434, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x28303234, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x34342426, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x32343434, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type7);
+
+static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type8[] = {
+ { 0, 0, 0, 0x00000c20, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c24, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c28, 0xffffffff, 0x35373941, },
+ { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x43434343, },
+ { 0, 0, 0, 0x00000c30, 0xffffffff, 0x33353739, },
+ { 0, 0, 1, 0x00000c34, 0xffffffff, 0x43434343, },
+ { 0, 0, 1, 0x00000c38, 0xffffffff, 0x31333537, },
+ { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x43434343, },
+ { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x29313335, },
+ { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, },
+ { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, },
+ { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, },
+ { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, },
+ { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, },
+ { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, },
+ { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, },
+ { 0, 1, 0, 0x00000e20, 0xffffffff, 0x43434343, },
+ { 0, 1, 0, 0x00000e24, 0xffffffff, 0x43434343, },
+ { 0, 1, 0, 0x00000e28, 0xffffffff, 0x35373941, },
+ { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x41434343, },
+ { 0, 1, 0, 0x00000e30, 0xffffffff, 0x33353739, },
+ { 0, 1, 1, 0x00000e34, 0xffffffff, 0x39414141, },
+ { 0, 1, 1, 0x00000e38, 0xffffffff, 0x31333537, },
+ { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x37393939, },
+ { 0, 1, 2, 0x00000edc, 0xffffffff, 0x29313335, },
+ { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, },
+ { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, },
+ { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, },
+ { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, },
+ { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, },
+ { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, },
+ { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, },
+ { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, },
+ { 0, 2, 0, 0x00001820, 0xffffffff, 0x43434343, },
+ { 0, 2, 0, 0x00001824, 0xffffffff, 0x43434343, },
+ { 0, 2, 0, 0x00001828, 0xffffffff, 0x35373941, },
+ { 0, 2, 0, 0x0000182c, 0xffffffff, 0x41434343, },
+ { 0, 2, 0, 0x00001830, 0xffffffff, 0x33353739, },
+ { 0, 2, 1, 0x00001834, 0xffffffff, 0x39414141, },
+ { 0, 2, 1, 0x00001838, 0xffffffff, 0x31333537, },
+ { 0, 2, 2, 0x000018d8, 0xffffffff, 0x37393939, },
+ { 0, 2, 2, 0x000018dc, 0xffffffff, 0x29313335, },
+ { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, },
+ { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, },
+ { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, },
+ { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, },
+ { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, },
+ { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, },
+ { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, },
+ { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, },
+ { 0, 3, 0, 0x00001a20, 0xffffffff, 0x43434343, },
+ { 0, 3, 0, 0x00001a24, 0xffffffff, 0x43434343, },
+ { 0, 3, 0, 0x00001a28, 0xffffffff, 0x35373941, },
+ { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x41434343, },
+ { 0, 3, 0, 0x00001a30, 0xffffffff, 0x33353739, },
+ { 0, 3, 1, 0x00001a34, 0xffffffff, 0x39414141, },
+ { 0, 3, 1, 0x00001a38, 0xffffffff, 0x31333537, },
+ { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x37393939, },
+ { 0, 3, 2, 0x00001adc, 0xffffffff, 0x29313335, },
+ { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, },
+ { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, },
+ { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, },
+ { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, },
+ { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, },
+ { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, },
+ { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, },
+ { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, },
+ { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c28, 0xffffffff, 0x39414345, },
+ { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c30, 0xffffffff, 0x38404244, },
+ { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, },
+ { 1, 0, 1, 0x00000c38, 0xffffffff, 0x36384042, },
+ { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, },
+ { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, },
+ { 1, 0, 0, 0x00000c40, 0xffffffff, 0x38404244, },
+ { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463738, },
+ { 1, 0, 1, 0x00000c48, 0xffffffff, 0x42444646, },
+ { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x35373840, },
+ { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, },
+ { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x37394143, },
+ { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x33333335, },
+ { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e28, 0xffffffff, 0x39414345, },
+ { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e30, 0xffffffff, 0x38404244, },
+ { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, },
+ { 1, 1, 1, 0x00000e38, 0xffffffff, 0x36384042, },
+ { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, },
+ { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, },
+ { 1, 1, 0, 0x00000e40, 0xffffffff, 0x38404244, },
+ { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463738, },
+ { 1, 1, 1, 0x00000e48, 0xffffffff, 0x42444646, },
+ { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x35373840, },
+ { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, },
+ { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x37394143, },
+ { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x33333335, },
+ { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001828, 0xffffffff, 0x39414345, },
+ { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001830, 0xffffffff, 0x38404244, },
+ { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, },
+ { 1, 2, 1, 0x00001838, 0xffffffff, 0x36384042, },
+ { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, },
+ { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, },
+ { 1, 2, 0, 0x00001840, 0xffffffff, 0x38404244, },
+ { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463738, },
+ { 1, 2, 1, 0x00001848, 0xffffffff, 0x42444646, },
+ { 1, 2, 1, 0x0000184c, 0xffffffff, 0x35373840, },
+ { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, },
+ { 1, 2, 2, 0x000018e4, 0xffffffff, 0x37394143, },
+ { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x33333335, },
+ { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a28, 0xffffffff, 0x39414345, },
+ { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a30, 0xffffffff, 0x38404244, },
+ { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, },
+ { 1, 3, 1, 0x00001a38, 0xffffffff, 0x36384042, },
+ { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, },
+ { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, },
+ { 1, 3, 0, 0x00001a40, 0xffffffff, 0x38404244, },
+ { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463738, },
+ { 1, 3, 1, 0x00001a48, 0xffffffff, 0x42444646, },
+ { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x35373840, },
+ { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, },
+ { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x37394143, },
+ { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x33333335, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type8);
+
+static const u32 rtw8814a_rf_a[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x0B0, 0x000FFFFE,
+ 0x0B1, 0x0003FF48,
+ 0x0B2, 0x0006AA3F,
+ 0x0B3, 0x000FFC9A,
+ 0x0B4, 0x0000A78F,
+ 0x0B5, 0x00000A3F,
+ 0x0B6, 0x0000C09C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B7, 0x00030008,
+ 0xA0000000, 0x00000000,
+ 0x0B7, 0x0003000C,
+ 0xB0000000, 0x00000000,
+ 0x0B8, 0x0007400E,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0xA0000000, 0x00000000,
+ 0x0B9, 0x000FBF50,
+ 0xB0000000, 0x00000000,
+ 0x0BA, 0x00050780,
+ 0x0BB, 0x00000000,
+ 0x0BC, 0x00040009,
+ 0x0BD, 0x00000000,
+ 0x0BE, 0x00000000,
+ 0x0BF, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x03E, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00020000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00040000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x03E, 0x00060000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00030000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00006800,
+ 0x03E, 0x00000080,
+ 0x03F, 0x00006000,
+ 0x03E, 0x00000100,
+ 0x03F, 0x00004800,
+ 0x03E, 0x00000180,
+ 0x03F, 0x00004000,
+ 0x03E, 0x00000200,
+ 0x03F, 0x00004000,
+ 0x03E, 0x00000280,
+ 0x03F, 0x00002800,
+ 0x03E, 0x00000300,
+ 0x03F, 0x00002800,
+ 0x03E, 0x00000380,
+ 0x03F, 0x00002000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00040000,
+ 0x03E, 0x00000000,
+ 0x03F, 0x000000BC,
+ 0x03E, 0x00000040,
+ 0x03F, 0x00000053,
+ 0x03E, 0x00000050,
+ 0x03F, 0x00000050,
+ 0x03E, 0x00000060,
+ 0x03F, 0x00000050,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x03E, 0x00000006,
+ 0x041, 0x000EE080,
+ 0x03E, 0x00000008,
+ 0x041, 0x000EE0C0,
+ 0x03E, 0x0000000A,
+ 0x041, 0x000EE100,
+ 0x03E, 0x0000000C,
+ 0x041, 0x000EE100,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x000179C3,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00028000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0000118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000AC000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0004138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0004078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0008C000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000801,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001801,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001001,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A1AD,
+ 0x034, 0x000491AA,
+ 0x034, 0x000481A7,
+ 0x034, 0x000470AA,
+ 0x034, 0x000460A7,
+ 0x034, 0x00045049,
+ 0x034, 0x00044046,
+ 0x034, 0x00043026,
+ 0x034, 0x00042009,
+ 0x034, 0x00041006,
+ 0x034, 0x00040003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F2,
+ 0x034, 0x000483B0,
+ 0x034, 0x00047370,
+ 0x034, 0x0004636D,
+ 0x034, 0x0004536A,
+ 0x034, 0x00044349,
+ 0x034, 0x0004316A,
+ 0x034, 0x00042167,
+ 0x034, 0x00041129,
+ 0x034, 0x00040049,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AF,
+ 0x034, 0x000483AB,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004406A,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF1,
+ 0x034, 0x00049FEE,
+ 0x034, 0x00048FEB,
+ 0x034, 0x00047FE8,
+ 0x034, 0x00046DEA,
+ 0x034, 0x00045DE7,
+ 0x034, 0x00044CEA,
+ 0x034, 0x00043CE7,
+ 0x034, 0x00042C69,
+ 0x034, 0x00041C66,
+ 0x034, 0x00040C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A1AD,
+ 0x034, 0x000291AA,
+ 0x034, 0x000281A7,
+ 0x034, 0x000270AA,
+ 0x034, 0x000260A7,
+ 0x034, 0x00025049,
+ 0x034, 0x00024046,
+ 0x034, 0x00023026,
+ 0x034, 0x00022009,
+ 0x034, 0x00021006,
+ 0x034, 0x00020003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F2,
+ 0x034, 0x000282F1,
+ 0x034, 0x000272B0,
+ 0x034, 0x000262AD,
+ 0x034, 0x000252AA,
+ 0x034, 0x000242A7,
+ 0x034, 0x000230EC,
+ 0x034, 0x000220E9,
+ 0x034, 0x0002106A,
+ 0x034, 0x00020067,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF1,
+ 0x034, 0x00029FEE,
+ 0x034, 0x00028FEB,
+ 0x034, 0x00027FE8,
+ 0x034, 0x00026DEA,
+ 0x034, 0x00025DE7,
+ 0x034, 0x00024CEA,
+ 0x034, 0x00023CE7,
+ 0x034, 0x00022C69,
+ 0x034, 0x00021C66,
+ 0x034, 0x00020C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x0000938C,
+ 0x034, 0x000081AD,
+ 0x034, 0x000071AA,
+ 0x034, 0x000061A7,
+ 0x034, 0x000050AA,
+ 0x034, 0x000040A7,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x0000100C,
+ 0x034, 0x00000009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F4,
+ 0x034, 0x000093F1,
+ 0x034, 0x000082B1,
+ 0x034, 0x000071D1,
+ 0x034, 0x000061CE,
+ 0x034, 0x000051CB,
+ 0x034, 0x000041C8,
+ 0x034, 0x000030CB,
+ 0x034, 0x000020C8,
+ 0x034, 0x00001087,
+ 0x034, 0x00000084,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF1,
+ 0x034, 0x00009FEE,
+ 0x034, 0x00008FEB,
+ 0x034, 0x00007FE8,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005DE7,
+ 0x034, 0x00004CEA,
+ 0x034, 0x00003CE7,
+ 0x034, 0x00002C69,
+ 0x034, 0x00001C66,
+ 0x034, 0x00000C28,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA1AD,
+ 0x034, 0x000C91AA,
+ 0x034, 0x000C81A7,
+ 0x034, 0x000C70AA,
+ 0x034, 0x000C60A7,
+ 0x034, 0x000C5049,
+ 0x034, 0x000C4046,
+ 0x034, 0x000C3026,
+ 0x034, 0x000C2009,
+ 0x034, 0x000C1006,
+ 0x034, 0x000C0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F2,
+ 0x034, 0x000C83B0,
+ 0x034, 0x000C7370,
+ 0x034, 0x000C636D,
+ 0x034, 0x000C536A,
+ 0x034, 0x000C4349,
+ 0x034, 0x000C316A,
+ 0x034, 0x000C2167,
+ 0x034, 0x000C1129,
+ 0x034, 0x000C0049,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AF,
+ 0x034, 0x000C83AB,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C406A,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA1AD,
+ 0x034, 0x000A91AA,
+ 0x034, 0x000A81A7,
+ 0x034, 0x000A70AA,
+ 0x034, 0x000A60A7,
+ 0x034, 0x000A5049,
+ 0x034, 0x000A4046,
+ 0x034, 0x000A3026,
+ 0x034, 0x000A2009,
+ 0x034, 0x000A1006,
+ 0x034, 0x000A0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F2,
+ 0x034, 0x000A82F1,
+ 0x034, 0x000A72B0,
+ 0x034, 0x000A62AD,
+ 0x034, 0x000A52AA,
+ 0x034, 0x000A42A7,
+ 0x034, 0x000A30EC,
+ 0x034, 0x000A20E9,
+ 0x034, 0x000A106A,
+ 0x034, 0x000A0067,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x0008938C,
+ 0x034, 0x000881AD,
+ 0x034, 0x000871AA,
+ 0x034, 0x000861A7,
+ 0x034, 0x000850AA,
+ 0x034, 0x000840A7,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x0008100C,
+ 0x034, 0x00080009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F4,
+ 0x034, 0x000893F1,
+ 0x034, 0x000882B1,
+ 0x034, 0x000871D1,
+ 0x034, 0x000861CE,
+ 0x034, 0x000851CB,
+ 0x034, 0x000841C8,
+ 0x034, 0x000830CB,
+ 0x034, 0x000820C8,
+ 0x034, 0x00081087,
+ 0x034, 0x00080084,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000747,
+ 0x035, 0x00008747,
+ 0x035, 0x00010747,
+ 0x035, 0x00020747,
+ 0x035, 0x00028747,
+ 0x035, 0x00030747,
+ 0x035, 0x00040747,
+ 0x035, 0x00048747,
+ 0x035, 0x00050747,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x00005146,
+ 0x037, 0x00004000,
+ 0x038, 0x00005146,
+ 0x037, 0x00008000,
+ 0x038, 0x00005146,
+ 0x037, 0x00010000,
+ 0x038, 0x00005146,
+ 0x037, 0x00014000,
+ 0x038, 0x00005146,
+ 0x037, 0x00018000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x0001C000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x00020000,
+ 0x038, 0x00004D4E,
+ 0x037, 0x00024000,
+ 0x038, 0x000071C6,
+ 0x037, 0x00028000,
+ 0x038, 0x000071C6,
+ 0x037, 0x0002C000,
+ 0x038, 0x000071C6,
+ 0x037, 0x00030000,
+ 0x038, 0x000071CE,
+ 0x037, 0x00034000,
+ 0x038, 0x000071CE,
+ 0x037, 0x00038000,
+ 0x038, 0x00005126,
+ 0x037, 0x0003C000,
+ 0x038, 0x00005126,
+ 0x037, 0x00040000,
+ 0x038, 0x00005126,
+ 0x037, 0x00044000,
+ 0x038, 0x00005126,
+ 0x037, 0x00048000,
+ 0x038, 0x00005126,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x0000054A,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x0000154A,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x0000254A,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000933FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x055, 0x00082061,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+ 0x01C, 0x000739D2,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0xA0000000, 0x00000000,
+ 0x01F, 0x0002255C,
+ 0xB0000000, 0x00000000,
+ 0x0B1, 0x0007FF48,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0C4, 0x00081700,
+ 0xA0000000, 0x00000000,
+ 0x0C4, 0x00083F00,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001B126,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00013126,
+ 0x018, 0x00013124,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_a, A);
+
+static const u32 rtw8814a_rf_b[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F39B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F39B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017BC3,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00000F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x03B, 0x0004078B,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x000491AD,
+ 0x034, 0x000481AA,
+ 0x034, 0x000471A7,
+ 0x034, 0x000460AA,
+ 0x034, 0x000450A7,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x0004200C,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38B,
+ 0x034, 0x00049388,
+ 0x034, 0x0004818B,
+ 0x034, 0x00047188,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38B,
+ 0x034, 0x00049388,
+ 0x034, 0x0004818B,
+ 0x034, 0x00047188,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F3,
+ 0x034, 0x000483B2,
+ 0x034, 0x00047390,
+ 0x034, 0x0004638D,
+ 0x034, 0x0004538A,
+ 0x034, 0x00044387,
+ 0x034, 0x0004324A,
+ 0x034, 0x00042247,
+ 0x034, 0x0004104D,
+ 0x034, 0x0004004A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043DEA,
+ 0x034, 0x00042DE7,
+ 0x034, 0x00041DE4,
+ 0x034, 0x00040CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x00049389,
+ 0x034, 0x0004816D,
+ 0x034, 0x0004716A,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A38C,
+ 0x034, 0x000291AD,
+ 0x034, 0x000281AA,
+ 0x034, 0x000271A7,
+ 0x034, 0x000260AA,
+ 0x034, 0x000250A7,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x0002200C,
+ 0x034, 0x00021009,
+ 0x034, 0x00020006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AD,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EF,
+ 0x034, 0x000293AD,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F3,
+ 0x034, 0x000283D0,
+ 0x034, 0x00027371,
+ 0x034, 0x0002636E,
+ 0x034, 0x0002536B,
+ 0x034, 0x00024368,
+ 0x034, 0x0002332A,
+ 0x034, 0x00022327,
+ 0x034, 0x0002104C,
+ 0x034, 0x00020049,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020F25,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293AC,
+ 0x034, 0x00028389,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A38C,
+ 0x034, 0x000091AD,
+ 0x034, 0x000081AA,
+ 0x034, 0x000071A7,
+ 0x034, 0x000060AA,
+ 0x034, 0x000050A7,
+ 0x034, 0x0000402C,
+ 0x034, 0x00003029,
+ 0x034, 0x00002026,
+ 0x034, 0x00001009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093AD,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093AD,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F4,
+ 0x034, 0x000093F0,
+ 0x034, 0x000083AE,
+ 0x034, 0x00007350,
+ 0x034, 0x0000634D,
+ 0x034, 0x0000534A,
+ 0x034, 0x00004347,
+ 0x034, 0x0000312D,
+ 0x034, 0x0000212A,
+ 0x034, 0x00001127,
+ 0x034, 0x0000002A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF4,
+ 0x034, 0x00008FF1,
+ 0x034, 0x00007FEE,
+ 0x034, 0x00006FEB,
+ 0x034, 0x00005FE8,
+ 0x034, 0x00004DEB,
+ 0x034, 0x00003DE8,
+ 0x034, 0x00002DE5,
+ 0x034, 0x00001C8B,
+ 0x034, 0x00000C88,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EC,
+ 0x034, 0x000093AC,
+ 0x034, 0x000081EC,
+ 0x034, 0x0000716D,
+ 0x034, 0x0000616A,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000404C,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C91AD,
+ 0x034, 0x000C81AA,
+ 0x034, 0x000C71A7,
+ 0x034, 0x000C60AA,
+ 0x034, 0x000C50A7,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C200C,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38B,
+ 0x034, 0x000C9388,
+ 0x034, 0x000C818B,
+ 0x034, 0x000C7188,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38B,
+ 0x034, 0x000C9388,
+ 0x034, 0x000C818B,
+ 0x034, 0x000C7188,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F3,
+ 0x034, 0x000C83B2,
+ 0x034, 0x000C7390,
+ 0x034, 0x000C638D,
+ 0x034, 0x000C538A,
+ 0x034, 0x000C4387,
+ 0x034, 0x000C324A,
+ 0x034, 0x000C2247,
+ 0x034, 0x000C104D,
+ 0x034, 0x000C004A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3DEA,
+ 0x034, 0x000C2DE7,
+ 0x034, 0x000C1DE4,
+ 0x034, 0x000C0CE7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C9389,
+ 0x034, 0x000C816D,
+ 0x034, 0x000C716A,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA38C,
+ 0x034, 0x000A91AD,
+ 0x034, 0x000A81AA,
+ 0x034, 0x000A71A7,
+ 0x034, 0x000A60AA,
+ 0x034, 0x000A50A7,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A200C,
+ 0x034, 0x000A1009,
+ 0x034, 0x000A0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AD,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EF,
+ 0x034, 0x000A93AD,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F3,
+ 0x034, 0x000A83D0,
+ 0x034, 0x000A7371,
+ 0x034, 0x000A636E,
+ 0x034, 0x000A536B,
+ 0x034, 0x000A4368,
+ 0x034, 0x000A332A,
+ 0x034, 0x000A2327,
+ 0x034, 0x000A104C,
+ 0x034, 0x000A0049,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0F25,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A8389,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A38C,
+ 0x034, 0x000891AD,
+ 0x034, 0x000881AA,
+ 0x034, 0x000871A7,
+ 0x034, 0x000860AA,
+ 0x034, 0x000850A7,
+ 0x034, 0x0008402C,
+ 0x034, 0x00083029,
+ 0x034, 0x00082026,
+ 0x034, 0x00081009,
+ 0x034, 0x00080006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893AD,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893AD,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F4,
+ 0x034, 0x000893F0,
+ 0x034, 0x000883AE,
+ 0x034, 0x00087350,
+ 0x034, 0x0008634D,
+ 0x034, 0x0008534A,
+ 0x034, 0x00084347,
+ 0x034, 0x0008312D,
+ 0x034, 0x0008212A,
+ 0x034, 0x00081127,
+ 0x034, 0x0008002A,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF4,
+ 0x034, 0x00088FF1,
+ 0x034, 0x00087FEE,
+ 0x034, 0x00086FEB,
+ 0x034, 0x00085FE8,
+ 0x034, 0x00084DEB,
+ 0x034, 0x00083DE8,
+ 0x034, 0x00082DE5,
+ 0x034, 0x00081C8B,
+ 0x034, 0x00080C88,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EC,
+ 0x034, 0x000893AC,
+ 0x034, 0x000881EC,
+ 0x034, 0x0008716D,
+ 0x034, 0x0008616A,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008404C,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027E,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227E,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_b, B);
+
+static const u32 rtw8814a_rf_c[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017823,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0006C000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000D4000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0006C000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0008C000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x000A0000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0000118B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00084000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0004C000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x000D0000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00028000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x03B, 0x0004078B,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00080000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008128B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000800,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0xA0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A38C,
+ 0x034, 0x000491AD,
+ 0x034, 0x000481AA,
+ 0x034, 0x000471A7,
+ 0x034, 0x000460AA,
+ 0x034, 0x000450A7,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x0004200C,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F5,
+ 0x034, 0x000493F3,
+ 0x034, 0x00048393,
+ 0x034, 0x00047390,
+ 0x034, 0x0004638D,
+ 0x034, 0x0004538A,
+ 0x034, 0x00044387,
+ 0x034, 0x000430ED,
+ 0x034, 0x000420EA,
+ 0x034, 0x000410E7,
+ 0x034, 0x0004002D,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043CD0,
+ 0x034, 0x00042CCD,
+ 0x034, 0x00041CCA,
+ 0x034, 0x00040CC7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EF,
+ 0x034, 0x000493AD,
+ 0x034, 0x0004838A,
+ 0x034, 0x0004718C,
+ 0x034, 0x00046189,
+ 0x034, 0x0004506D,
+ 0x034, 0x0004404C,
+ 0x034, 0x0004302C,
+ 0x034, 0x00042029,
+ 0x034, 0x00041026,
+ 0x034, 0x00040023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002938C,
+ 0x034, 0x000281AD,
+ 0x034, 0x000271AA,
+ 0x034, 0x000261A7,
+ 0x034, 0x000250AA,
+ 0x034, 0x000240A7,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x0002100C,
+ 0x034, 0x00020009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x000293AC,
+ 0x034, 0x0002838A,
+ 0x034, 0x0002718C,
+ 0x034, 0x00026189,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293F3,
+ 0x034, 0x000282F2,
+ 0x034, 0x000272D0,
+ 0x034, 0x000262CD,
+ 0x034, 0x000252CA,
+ 0x034, 0x000242C7,
+ 0x034, 0x000230CD,
+ 0x034, 0x000220CA,
+ 0x034, 0x000210C7,
+ 0x034, 0x00020086,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020E44,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EC,
+ 0x034, 0x0002936D,
+ 0x034, 0x0002836A,
+ 0x034, 0x0002716D,
+ 0x034, 0x0002616A,
+ 0x034, 0x0002506D,
+ 0x034, 0x0002406A,
+ 0x034, 0x0002302C,
+ 0x034, 0x00022029,
+ 0x034, 0x00021026,
+ 0x034, 0x00020023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A38C,
+ 0x034, 0x000091AD,
+ 0x034, 0x000081AA,
+ 0x034, 0x000071A7,
+ 0x034, 0x000060AA,
+ 0x034, 0x000050A7,
+ 0x034, 0x0000402C,
+ 0x034, 0x00003029,
+ 0x034, 0x0000200C,
+ 0x034, 0x00001009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F5,
+ 0x034, 0x000093F1,
+ 0x034, 0x000083B0,
+ 0x034, 0x00007370,
+ 0x034, 0x0000636D,
+ 0x034, 0x0000536A,
+ 0x034, 0x00004367,
+ 0x034, 0x0000308E,
+ 0x034, 0x0000208B,
+ 0x034, 0x00001088,
+ 0x034, 0x00000085,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF5,
+ 0x034, 0x00008FF2,
+ 0x034, 0x00007FEF,
+ 0x034, 0x00006FEC,
+ 0x034, 0x00005FE9,
+ 0x034, 0x00004EAA,
+ 0x034, 0x00003EA7,
+ 0x034, 0x00002C70,
+ 0x034, 0x00001C6D,
+ 0x034, 0x00000C6A,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AB,
+ 0x034, 0x00008389,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA38C,
+ 0x034, 0x000C91AD,
+ 0x034, 0x000C81AA,
+ 0x034, 0x000C71A7,
+ 0x034, 0x000C60AA,
+ 0x034, 0x000C50A7,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C200C,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F5,
+ 0x034, 0x000C93F3,
+ 0x034, 0x000C8393,
+ 0x034, 0x000C7390,
+ 0x034, 0x000C638D,
+ 0x034, 0x000C538A,
+ 0x034, 0x000C4387,
+ 0x034, 0x000C30ED,
+ 0x034, 0x000C20EA,
+ 0x034, 0x000C10E7,
+ 0x034, 0x000C002D,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3CD0,
+ 0x034, 0x000C2CCD,
+ 0x034, 0x000C1CCA,
+ 0x034, 0x000C0CC7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EF,
+ 0x034, 0x000C93AD,
+ 0x034, 0x000C838A,
+ 0x034, 0x000C718C,
+ 0x034, 0x000C6189,
+ 0x034, 0x000C506D,
+ 0x034, 0x000C404C,
+ 0x034, 0x000C302C,
+ 0x034, 0x000C2029,
+ 0x034, 0x000C1026,
+ 0x034, 0x000C0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A938C,
+ 0x034, 0x000A81AD,
+ 0x034, 0x000A71AA,
+ 0x034, 0x000A61A7,
+ 0x034, 0x000A50AA,
+ 0x034, 0x000A40A7,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A100C,
+ 0x034, 0x000A0009,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A93AC,
+ 0x034, 0x000A838A,
+ 0x034, 0x000A718C,
+ 0x034, 0x000A6189,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93F3,
+ 0x034, 0x000A82F2,
+ 0x034, 0x000A72D0,
+ 0x034, 0x000A62CD,
+ 0x034, 0x000A52CA,
+ 0x034, 0x000A42C7,
+ 0x034, 0x000A30CD,
+ 0x034, 0x000A20CA,
+ 0x034, 0x000A10C7,
+ 0x034, 0x000A0086,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0E44,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EC,
+ 0x034, 0x000A936D,
+ 0x034, 0x000A836A,
+ 0x034, 0x000A716D,
+ 0x034, 0x000A616A,
+ 0x034, 0x000A506D,
+ 0x034, 0x000A406A,
+ 0x034, 0x000A302C,
+ 0x034, 0x000A2029,
+ 0x034, 0x000A1026,
+ 0x034, 0x000A0023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A38C,
+ 0x034, 0x000891AD,
+ 0x034, 0x000881AA,
+ 0x034, 0x000871A7,
+ 0x034, 0x000860AA,
+ 0x034, 0x000850A7,
+ 0x034, 0x0008402C,
+ 0x034, 0x00083029,
+ 0x034, 0x0008200C,
+ 0x034, 0x00081009,
+ 0x034, 0x00000006,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F5,
+ 0x034, 0x000893F1,
+ 0x034, 0x000883B0,
+ 0x034, 0x00087370,
+ 0x034, 0x0008636D,
+ 0x034, 0x0008536A,
+ 0x034, 0x00084367,
+ 0x034, 0x0008308E,
+ 0x034, 0x0008208B,
+ 0x034, 0x00081088,
+ 0x034, 0x00080085,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF5,
+ 0x034, 0x00088FF2,
+ 0x034, 0x00087FEF,
+ 0x034, 0x00086FEC,
+ 0x034, 0x00085FE9,
+ 0x034, 0x00084EAA,
+ 0x034, 0x00083EA7,
+ 0x034, 0x00082C70,
+ 0x034, 0x00081C6D,
+ 0x034, 0x00080C6A,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AB,
+ 0x034, 0x00088389,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000541,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001541,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002541,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027D,
+ 0x03C, 0x00000546,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127D,
+ 0x03C, 0x00001546,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227D,
+ 0x03C, 0x00002546,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_c, C);
+
+static const u32 rtw8814a_rf_d[] = {
+ 0x018, 0x00013124,
+ 0x040, 0x00000C00,
+ 0x058, 0x00000F98,
+ 0x07F, 0x00068004,
+ 0x018, 0x00000006,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x086, 0x000E335A,
+ 0x087, 0x00079F80,
+ 0xA0000000, 0x00000000,
+ 0x086, 0x000E4B58,
+ 0x087, 0x00049F80,
+ 0xB0000000, 0x00000000,
+ 0x0DF, 0x00000008,
+ 0x0EF, 0x00002000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F09B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F09B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0003F19B,
+ 0x03B, 0x00037A5B,
+ 0x03B, 0x0002A433,
+ 0x03B, 0x00027BD3,
+ 0x03B, 0x0001F80B,
+ 0x03B, 0x00017803,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0003F258,
+ 0x03B, 0x00030A58,
+ 0x03B, 0x0002FA58,
+ 0x03B, 0x00022590,
+ 0x03B, 0x0001FA50,
+ 0x03B, 0x00010248,
+ 0x03B, 0x00008240,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000ADF6,
+ 0x034, 0x00009DF3,
+ 0x034, 0x00008DF0,
+ 0x034, 0x00007DED,
+ 0x034, 0x00006DEA,
+ 0x034, 0x00005CED,
+ 0x034, 0x00004CEA,
+ 0x034, 0x000034EA,
+ 0x034, 0x000024E7,
+ 0x034, 0x0000146A,
+ 0x034, 0x0000006B,
+ 0xB0000000, 0x00000000,
+ 0x80000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A0D0,
+ 0x034, 0x000090CD,
+ 0x034, 0x000080CA,
+ 0x034, 0x0000704D,
+ 0x034, 0x0000604A,
+ 0x034, 0x00005047,
+ 0x034, 0x0000400A,
+ 0x034, 0x00003007,
+ 0x034, 0x00002004,
+ 0x034, 0x00001001,
+ 0x034, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008ADF6,
+ 0x034, 0x00089DF3,
+ 0x034, 0x00088DF0,
+ 0x034, 0x00087DED,
+ 0x034, 0x00086DEA,
+ 0x034, 0x00085CED,
+ 0x034, 0x00084CEA,
+ 0x034, 0x000834EA,
+ 0x034, 0x000824E7,
+ 0x034, 0x0008146A,
+ 0x034, 0x0008006B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x000020A2,
+ 0x0DF, 0x00000080,
+ 0x035, 0x00000192,
+ 0x035, 0x00008192,
+ 0x035, 0x00010192,
+ 0x036, 0x00000024,
+ 0x036, 0x00008024,
+ 0x036, 0x00010024,
+ 0x036, 0x00018024,
+ 0x0EF, 0x00000000,
+ 0x051, 0x00000C21,
+ 0x052, 0x000006D9,
+ 0x053, 0x000FC649,
+ 0x054, 0x0000017E,
+ 0x018, 0x0001012A,
+ 0x081, 0x0007FC00,
+ 0x089, 0x00050110,
+ 0x08A, 0x00043E50,
+ 0x08B, 0x0002E180,
+ 0x08C, 0x00093C3C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x000F8000,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xA0000000, 0x00000000,
+ 0x08D, 0x000FFFF0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00001000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00038023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00040000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00088000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00048000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00030023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00028623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00021633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0001C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00010293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00009593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00000F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0000078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00078023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00044000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00070023,
+ 0x03C, 0x00048000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00068623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00061633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0005C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00050293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00049593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x00040F8B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0004078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B8023,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00060000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00024000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000B0023,
+ 0x80000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x00020000,
+ 0xB0000000, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A8623,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x000A1633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x0009C633,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00090293,
+ 0x03C, 0x00000000,
+ 0x03A, 0x0000013C,
+ 0x03B, 0x00089593,
+ 0x03C, 0x00000000,
+ 0x03A, 0x00000148,
+ 0x80000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03B, 0x0008138B,
+ 0xA0000000, 0x00000000,
+ 0x03B, 0x0008078B,
+ 0xB0000000, 0x00000000,
+ 0x03C, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x03B, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00000803,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00040000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000001,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000802,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001803,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0xB0000000, 0x00000000,
+ 0x03B, 0x00080000,
+ 0x80000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001802,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00001000,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03A, 0x00000802,
+ 0xA0000000, 0x00000000,
+ 0x03A, 0x00001002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0EF, 0x00000100,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3EB,
+ 0x034, 0x0004938B,
+ 0x034, 0x000481AC,
+ 0x034, 0x000471A9,
+ 0x034, 0x000460AC,
+ 0x034, 0x000450A9,
+ 0x034, 0x0004402E,
+ 0x034, 0x0004302B,
+ 0x034, 0x00042028,
+ 0x034, 0x0004100B,
+ 0x034, 0x00040008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3F4,
+ 0x034, 0x000493D2,
+ 0x034, 0x000482D1,
+ 0x034, 0x000471F1,
+ 0x034, 0x000461EE,
+ 0x034, 0x000451EB,
+ 0x034, 0x000441E8,
+ 0x034, 0x0004314B,
+ 0x034, 0x00042148,
+ 0x034, 0x0004104B,
+ 0x034, 0x00040048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004AFF7,
+ 0x034, 0x00049FF6,
+ 0x034, 0x00048FF3,
+ 0x034, 0x00047FF0,
+ 0x034, 0x00046FED,
+ 0x034, 0x00045FEA,
+ 0x034, 0x00044FE7,
+ 0x034, 0x00043CB1,
+ 0x034, 0x00042CAE,
+ 0x034, 0x00041CAB,
+ 0x034, 0x00040CA8,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0004A3AD,
+ 0x034, 0x0004938A,
+ 0x034, 0x0004818C,
+ 0x034, 0x00047189,
+ 0x034, 0x0004606D,
+ 0x034, 0x0004506A,
+ 0x034, 0x0004402C,
+ 0x034, 0x00043029,
+ 0x034, 0x00042026,
+ 0x034, 0x00041009,
+ 0x034, 0x00040006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0004AFF4,
+ 0x034, 0x00049FF1,
+ 0x034, 0x00048FEE,
+ 0x034, 0x00047FEB,
+ 0x034, 0x00046FE8,
+ 0x034, 0x00045DEA,
+ 0x034, 0x00044CED,
+ 0x034, 0x00043CEA,
+ 0x034, 0x00042C6C,
+ 0x034, 0x00041C69,
+ 0x034, 0x00040C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3EE,
+ 0x034, 0x000293EB,
+ 0x034, 0x0002838B,
+ 0x034, 0x000271AC,
+ 0x034, 0x000261A9,
+ 0x034, 0x000250AC,
+ 0x034, 0x000240A9,
+ 0x034, 0x000230A6,
+ 0x034, 0x0002202C,
+ 0x034, 0x00021029,
+ 0x034, 0x00020026,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3F5,
+ 0x034, 0x000293D2,
+ 0x034, 0x000283CE,
+ 0x034, 0x00027290,
+ 0x034, 0x0002628D,
+ 0x034, 0x0002528A,
+ 0x034, 0x00024287,
+ 0x034, 0x0002308D,
+ 0x034, 0x0002208A,
+ 0x034, 0x00021087,
+ 0x034, 0x00020048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002AFF7,
+ 0x034, 0x00029FF6,
+ 0x034, 0x00028FF3,
+ 0x034, 0x00027FF0,
+ 0x034, 0x00026FED,
+ 0x034, 0x00025FEA,
+ 0x034, 0x00024FE7,
+ 0x034, 0x00023DEA,
+ 0x034, 0x00022DE7,
+ 0x034, 0x00021DE4,
+ 0x034, 0x00020D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0002A3AD,
+ 0x034, 0x0002938A,
+ 0x034, 0x0002818C,
+ 0x034, 0x00027189,
+ 0x034, 0x0002606D,
+ 0x034, 0x0002504C,
+ 0x034, 0x0002402C,
+ 0x034, 0x00023029,
+ 0x034, 0x00022026,
+ 0x034, 0x00021023,
+ 0x034, 0x00020006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0002AFF4,
+ 0x034, 0x00029FF1,
+ 0x034, 0x00028FEE,
+ 0x034, 0x00027FEB,
+ 0x034, 0x00026FE8,
+ 0x034, 0x00025DEA,
+ 0x034, 0x00024CED,
+ 0x034, 0x00023CEA,
+ 0x034, 0x00022C6C,
+ 0x034, 0x00021C69,
+ 0x034, 0x00020C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EF,
+ 0x034, 0x000093EC,
+ 0x034, 0x0000838C,
+ 0x034, 0x000071AD,
+ 0x034, 0x000061AA,
+ 0x034, 0x000050AD,
+ 0x034, 0x000040AA,
+ 0x034, 0x0000306A,
+ 0x034, 0x0000202D,
+ 0x034, 0x0000102A,
+ 0x034, 0x00000027,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3F1,
+ 0x034, 0x000092B1,
+ 0x034, 0x000081CF,
+ 0x034, 0x00007170,
+ 0x034, 0x0000616D,
+ 0x034, 0x0000516A,
+ 0x034, 0x00004167,
+ 0x034, 0x0000302F,
+ 0x034, 0x0000202C,
+ 0x034, 0x00001029,
+ 0x034, 0x00000026,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000AFF7,
+ 0x034, 0x00009FF6,
+ 0x034, 0x00008FF3,
+ 0x034, 0x00007FF0,
+ 0x034, 0x00006FED,
+ 0x034, 0x00005FEA,
+ 0x034, 0x00004FE7,
+ 0x034, 0x00003EC7,
+ 0x034, 0x00002EC4,
+ 0x034, 0x00001D4B,
+ 0x034, 0x00000D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0000A3EE,
+ 0x034, 0x000093AC,
+ 0x034, 0x0000838A,
+ 0x034, 0x0000718C,
+ 0x034, 0x00006189,
+ 0x034, 0x0000506D,
+ 0x034, 0x0000406A,
+ 0x034, 0x0000302C,
+ 0x034, 0x00002029,
+ 0x034, 0x00001026,
+ 0x034, 0x00000023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0000AFF4,
+ 0x034, 0x00009FF1,
+ 0x034, 0x00008FEE,
+ 0x034, 0x00007FEB,
+ 0x034, 0x00006FE8,
+ 0x034, 0x00005DEA,
+ 0x034, 0x00004CED,
+ 0x034, 0x00003CEA,
+ 0x034, 0x00002C6C,
+ 0x034, 0x00001C69,
+ 0x034, 0x00000C2B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3EB,
+ 0x034, 0x000C938B,
+ 0x034, 0x000C81AC,
+ 0x034, 0x000C71A9,
+ 0x034, 0x000C60AC,
+ 0x034, 0x000C50A9,
+ 0x034, 0x000C402E,
+ 0x034, 0x000C302B,
+ 0x034, 0x000C2028,
+ 0x034, 0x000C100B,
+ 0x034, 0x000C0008,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3F4,
+ 0x034, 0x000C93D2,
+ 0x034, 0x000C82D1,
+ 0x034, 0x000C71F1,
+ 0x034, 0x000C61EE,
+ 0x034, 0x000C51EB,
+ 0x034, 0x000C41E8,
+ 0x034, 0x000C314B,
+ 0x034, 0x000C2148,
+ 0x034, 0x000C104B,
+ 0x034, 0x000C0048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CAFF7,
+ 0x034, 0x000C9FF6,
+ 0x034, 0x000C8FF3,
+ 0x034, 0x000C7FF0,
+ 0x034, 0x000C6FED,
+ 0x034, 0x000C5FEA,
+ 0x034, 0x000C4FE7,
+ 0x034, 0x000C3CB1,
+ 0x034, 0x000C2CAE,
+ 0x034, 0x000C1CAB,
+ 0x034, 0x000C0CA8,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000CA3AD,
+ 0x034, 0x000C938A,
+ 0x034, 0x000C818C,
+ 0x034, 0x000C7189,
+ 0x034, 0x000C606D,
+ 0x034, 0x000C506A,
+ 0x034, 0x000C402C,
+ 0x034, 0x000C3029,
+ 0x034, 0x000C2026,
+ 0x034, 0x000C1009,
+ 0x034, 0x000C0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000CA794,
+ 0x034, 0x000C9791,
+ 0x034, 0x000C878E,
+ 0x034, 0x000C778B,
+ 0x034, 0x000C658D,
+ 0x034, 0x000C558A,
+ 0x034, 0x000C448D,
+ 0x034, 0x000C348A,
+ 0x034, 0x000C244C,
+ 0x034, 0x000C1449,
+ 0x034, 0x000C042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3EE,
+ 0x034, 0x000A93EB,
+ 0x034, 0x000A838B,
+ 0x034, 0x000A71AC,
+ 0x034, 0x000A61A9,
+ 0x034, 0x000A50AC,
+ 0x034, 0x000A40A9,
+ 0x034, 0x000A30A6,
+ 0x034, 0x000A202C,
+ 0x034, 0x000A1029,
+ 0x034, 0x000A0026,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3F5,
+ 0x034, 0x000A93D2,
+ 0x034, 0x000A83CE,
+ 0x034, 0x000A7290,
+ 0x034, 0x000A628D,
+ 0x034, 0x000A528A,
+ 0x034, 0x000A4287,
+ 0x034, 0x000A308D,
+ 0x034, 0x000A208A,
+ 0x034, 0x000A1087,
+ 0x034, 0x000A0048,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AAFF7,
+ 0x034, 0x000A9FF6,
+ 0x034, 0x000A8FF3,
+ 0x034, 0x000A7FF0,
+ 0x034, 0x000A6FED,
+ 0x034, 0x000A5FEA,
+ 0x034, 0x000A4FE7,
+ 0x034, 0x000A3DEA,
+ 0x034, 0x000A2DE7,
+ 0x034, 0x000A1DE4,
+ 0x034, 0x000A0D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x000AA3AD,
+ 0x034, 0x000A938A,
+ 0x034, 0x000A818C,
+ 0x034, 0x000A7189,
+ 0x034, 0x000A606D,
+ 0x034, 0x000A504C,
+ 0x034, 0x000A402C,
+ 0x034, 0x000A3029,
+ 0x034, 0x000A2026,
+ 0x034, 0x000A1023,
+ 0x034, 0x000A0006,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x000AA794,
+ 0x034, 0x000A9791,
+ 0x034, 0x000A878E,
+ 0x034, 0x000A778B,
+ 0x034, 0x000A658D,
+ 0x034, 0x000A558A,
+ 0x034, 0x000A448D,
+ 0x034, 0x000A348A,
+ 0x034, 0x000A244C,
+ 0x034, 0x000A1449,
+ 0x034, 0x000A042B,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EF,
+ 0x034, 0x000893EC,
+ 0x034, 0x0008838C,
+ 0x034, 0x000871AD,
+ 0x034, 0x000861AA,
+ 0x034, 0x000850AD,
+ 0x034, 0x000840AA,
+ 0x034, 0x0008306A,
+ 0x034, 0x0008202D,
+ 0x034, 0x0008102A,
+ 0x034, 0x00080027,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3F1,
+ 0x034, 0x000892B1,
+ 0x034, 0x000881CF,
+ 0x034, 0x00087170,
+ 0x034, 0x0008616D,
+ 0x034, 0x0008516A,
+ 0x034, 0x00084167,
+ 0x034, 0x0008302F,
+ 0x034, 0x0008202C,
+ 0x034, 0x00081029,
+ 0x034, 0x00080026,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008AFF7,
+ 0x034, 0x00089FF6,
+ 0x034, 0x00088FF3,
+ 0x034, 0x00087FF0,
+ 0x034, 0x00086FED,
+ 0x034, 0x00085FEA,
+ 0x034, 0x00084FE7,
+ 0x034, 0x00083EC7,
+ 0x034, 0x00082EC4,
+ 0x034, 0x00081D4B,
+ 0x034, 0x00080D48,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x034, 0x0008A3EE,
+ 0x034, 0x000893AC,
+ 0x034, 0x0008838A,
+ 0x034, 0x0008718C,
+ 0x034, 0x00086189,
+ 0x034, 0x0008506D,
+ 0x034, 0x0008406A,
+ 0x034, 0x0008302C,
+ 0x034, 0x00082029,
+ 0x034, 0x00081026,
+ 0x034, 0x00080023,
+ 0xA0000000, 0x00000000,
+ 0x034, 0x0008A794,
+ 0x034, 0x00089791,
+ 0x034, 0x0008878E,
+ 0x034, 0x0008778B,
+ 0x034, 0x0008658D,
+ 0x034, 0x0008558A,
+ 0x034, 0x0008448D,
+ 0x034, 0x0008348A,
+ 0x034, 0x0008244C,
+ 0x034, 0x00081449,
+ 0x034, 0x0008042B,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000040,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x035, 0x000006CC,
+ 0x035, 0x000086CC,
+ 0x035, 0x000106CC,
+ 0x035, 0x000206CC,
+ 0x035, 0x000286CC,
+ 0x035, 0x000306CC,
+ 0x035, 0x000406CC,
+ 0x035, 0x000486CC,
+ 0x035, 0x000506CC,
+ 0x035, 0x000806CC,
+ 0x035, 0x000886CC,
+ 0x035, 0x000906CC,
+ 0x035, 0x000A06CC,
+ 0x035, 0x000A86CC,
+ 0x035, 0x000B06CC,
+ 0x035, 0x000C06CC,
+ 0x035, 0x000C86CC,
+ 0x035, 0x000D06CC,
+ 0xA0000000, 0x00000000,
+ 0x035, 0x00000484,
+ 0x035, 0x00008484,
+ 0x035, 0x00010484,
+ 0x035, 0x00020584,
+ 0x035, 0x00028584,
+ 0x035, 0x00030584,
+ 0x035, 0x00040584,
+ 0x035, 0x00048584,
+ 0x035, 0x00050584,
+ 0x035, 0x000805FB,
+ 0x035, 0x000885FB,
+ 0x035, 0x000905FB,
+ 0x035, 0x000A05FB,
+ 0x035, 0x000A85FB,
+ 0x035, 0x000B05FB,
+ 0x035, 0x000C05FB,
+ 0x035, 0x000C85FB,
+ 0x035, 0x000D05FB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0DF, 0x00000001,
+ 0xA0000000, 0x00000000,
+ 0x0DF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001712A,
+ 0x0EF, 0x00000010,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000473,
+ 0x036, 0x00008473,
+ 0x036, 0x00010473,
+ 0x036, 0x00020473,
+ 0x036, 0x00028473,
+ 0x036, 0x00030473,
+ 0x036, 0x00040473,
+ 0x036, 0x00048473,
+ 0x036, 0x00050473,
+ 0x036, 0x00080473,
+ 0x036, 0x00088473,
+ 0x036, 0x00090473,
+ 0x036, 0x000A0473,
+ 0x036, 0x000A8473,
+ 0x036, 0x000B0473,
+ 0x036, 0x000C0473,
+ 0x036, 0x000C8473,
+ 0x036, 0x000D0473,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x036, 0x00000475,
+ 0x036, 0x00008475,
+ 0x036, 0x00010475,
+ 0x036, 0x00020475,
+ 0x036, 0x00028475,
+ 0x036, 0x00030475,
+ 0x036, 0x00040475,
+ 0x036, 0x00048475,
+ 0x036, 0x00050475,
+ 0x036, 0x00080475,
+ 0x036, 0x00088475,
+ 0x036, 0x00090475,
+ 0x036, 0x000A0475,
+ 0x036, 0x000A8475,
+ 0x036, 0x000B0475,
+ 0x036, 0x000C0475,
+ 0x036, 0x000C8475,
+ 0x036, 0x000D0475,
+ 0xA0000000, 0x00000000,
+ 0x036, 0x00000474,
+ 0x036, 0x00008474,
+ 0x036, 0x00010474,
+ 0x036, 0x00020474,
+ 0x036, 0x00028474,
+ 0x036, 0x00030474,
+ 0x036, 0x00040474,
+ 0x036, 0x00048474,
+ 0x036, 0x00050474,
+ 0x036, 0x00080474,
+ 0x036, 0x00088474,
+ 0x036, 0x00090474,
+ 0x036, 0x000A0474,
+ 0x036, 0x000A8474,
+ 0x036, 0x000B0474,
+ 0x036, 0x000C0474,
+ 0x036, 0x000C8474,
+ 0x036, 0x000D0474,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x037, 0x00000000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00004000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00008000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00010000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00014000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00018000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0001C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00020000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00024000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00028000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0002C000,
+ 0x038, 0x0000714E,
+ 0x037, 0x00030000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00034000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00038000,
+ 0x038, 0x0000514E,
+ 0x037, 0x0003C000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00040000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00044000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00048000,
+ 0x038, 0x0000514E,
+ 0x037, 0x00080000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00084000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00088000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00090000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00094000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x00098000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x0009C000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000A8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000AC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000B8000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000BC000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C0000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C4000,
+ 0x038, 0x00005ECE,
+ 0x037, 0x000C8000,
+ 0x038, 0x00005ECE,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000008,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000007D,
+ 0x03C, 0x0000047D,
+ 0x03C, 0x0000087D,
+ 0x03C, 0x0000107D,
+ 0x03C, 0x0000147D,
+ 0x03C, 0x0000187D,
+ 0xB0000000, 0x00000000,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x00000275,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x00001275,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x00002275,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03C, 0x0000027F,
+ 0x03C, 0x00000542,
+ 0x03C, 0x00000821,
+ 0x03C, 0x0000127F,
+ 0x03C, 0x00001542,
+ 0x03C, 0x00001821,
+ 0x03C, 0x0000227F,
+ 0x03C, 0x00002542,
+ 0x03C, 0x00002821,
+ 0xA0000000, 0x00000000,
+ 0x03C, 0x0000037E,
+ 0x03C, 0x00000575,
+ 0x03C, 0x00000971,
+ 0x03C, 0x0000127E,
+ 0x03C, 0x00001575,
+ 0x03C, 0x00001871,
+ 0x03C, 0x0000217E,
+ 0x03C, 0x00002575,
+ 0x03C, 0x00002871,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x061, 0x000C0D47,
+ 0x062, 0x0000133C,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x000750E7,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x0007D0E7,
+ 0xB0000000, 0x00000000,
+ 0x064, 0x00014FEC,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x065, 0x000920D0,
+ 0xA0000000, 0x00000000,
+ 0x065, 0x000923FF,
+ 0xB0000000, 0x00000000,
+ 0x066, 0x00000040,
+ 0x057, 0x00050000,
+ 0x056, 0x00051DF0,
+ 0x80000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x055, 0x00082060,
+ 0xB0000000, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_d, D);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt[] = {
+ { 0, 0, 0, 0, 1, 36, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 36, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 36, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 36, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 36, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 36, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 36, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 36, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 36, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 36, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 36, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 34, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 36, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 36, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 36, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 36, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 36, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 36, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 36, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 36, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 36, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 32, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 34, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 36, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 36, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 36, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 36, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 36, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 36, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 36, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 36, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 36, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 32, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 34, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 34, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 34, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 34, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 34, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 34, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 34, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 34, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 34, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 30, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 30, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 32, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 32, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 32, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 32, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 32, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 32, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 32, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 32, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 32, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 28, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 28, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 30, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 30, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 30, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 30, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 30, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 30, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 30, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 30, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 30, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 26, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 32, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 36, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 36, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 36, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 36, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 36, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 36, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 36, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 32, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 30, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 28, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 32, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 32, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 32, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 32, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 32, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 32, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 32, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 26, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 30, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 30, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 30, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 30, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 30, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 30, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 30, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 26, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 30, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 30, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 30, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 36, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 34, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 30, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 30, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 34, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 34, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 36, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 34, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 30, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 30, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 36, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 36, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 36, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 36, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 36, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 30, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 30, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 30, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 34, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 30, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 30, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 34, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 34, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 36, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 34, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 30, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 30, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 28, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 36, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 36, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 36, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 36, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 36, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 28, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 28, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 28, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 34, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 32, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 26, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 28, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 28, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 30, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 34, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 30, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 28, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 28, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 34, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 34, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 34, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 34, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 34, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 26, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 26, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 26, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 26, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 32, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 30, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 28, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 24, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 26, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 26, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 28, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 30, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 30, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 30, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 28, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 26, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 26, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 24, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 32, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 32, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 32, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 32, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 32, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 24, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 24, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 24, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 24, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 30, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 28, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 26, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 22, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 24, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 24, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 26, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 28, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 28, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 30, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 28, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 26, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 24, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 24, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 22, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 30, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 30, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 30, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 30, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 30, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 30, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 32, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 28, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 36, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 34, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 32, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 36, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 28, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 28, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 30, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 26, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 34, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 32, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 34, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 34, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 26, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 26, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 28, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 24, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 28, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 32, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 30, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 28, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 32, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 32, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 24, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 24, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 26, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 22, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 26, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 30, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 28, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 26, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 30, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 30, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 30, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 28, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 30, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 34, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 36, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 28, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 26, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 28, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 32, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 34, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 26, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 24, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 26, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 30, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 32, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 24, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 22, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 24, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 28, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 30, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 24, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 16, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 28, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 32, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 32, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 32, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 32, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 32, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 32, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 32, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 18, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 8, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 32, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 32, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 32, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 32, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 32, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 32, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 32, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 16, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 6, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 24, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 30, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 30, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 30, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 30, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 30, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 24, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 14, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 4, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 22, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 28, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 28, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 28, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 28, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 28, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 28, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 28, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 28, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 28, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 22, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 14, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 4, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 20, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 26, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 26, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 26, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 26, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 26, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 26, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 26, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 26, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 26, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 20, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 14, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 4, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 32, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 32, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 32, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 32, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 32, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 32, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 26, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 16, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 10, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 30, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 30, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 30, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 30, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 30, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 30, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 30, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 24, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 14, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 8, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 22, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 28, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 28, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 28, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 28, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 28, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 28, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 28, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 22, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 14, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 8, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 20, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 26, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 26, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 26, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 26, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 26, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 26, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 26, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 20, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 14, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 8, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 28, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 28, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 28, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 26, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 26, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 26, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 24, },
+ { 2, 1, 0, 3, 36, 28, },
+ { 1, 1, 0, 3, 36, 28, },
+ { 0, 1, 0, 3, 40, 28, },
+ { 2, 1, 0, 3, 40, 28, },
+ { 1, 1, 0, 3, 40, 28, },
+ { 0, 1, 0, 3, 44, 28, },
+ { 2, 1, 0, 3, 44, 28, },
+ { 1, 1, 0, 3, 44, 28, },
+ { 0, 1, 0, 3, 48, 28, },
+ { 2, 1, 0, 3, 48, 28, },
+ { 1, 1, 0, 3, 48, 28, },
+ { 0, 1, 0, 3, 52, 28, },
+ { 2, 1, 0, 3, 52, 28, },
+ { 1, 1, 0, 3, 52, 28, },
+ { 0, 1, 0, 3, 56, 28, },
+ { 2, 1, 0, 3, 56, 28, },
+ { 1, 1, 0, 3, 56, 28, },
+ { 0, 1, 0, 3, 60, 28, },
+ { 2, 1, 0, 3, 60, 28, },
+ { 1, 1, 0, 3, 60, 28, },
+ { 0, 1, 0, 3, 64, 24, },
+ { 2, 1, 0, 3, 64, 28, },
+ { 1, 1, 0, 3, 64, 28, },
+ { 0, 1, 0, 3, 100, 24, },
+ { 2, 1, 0, 3, 100, 28, },
+ { 1, 1, 0, 3, 100, 28, },
+ { 0, 1, 0, 3, 104, 28, },
+ { 2, 1, 0, 3, 104, 28, },
+ { 1, 1, 0, 3, 104, 28, },
+ { 0, 1, 0, 3, 108, 28, },
+ { 2, 1, 0, 3, 108, 28, },
+ { 1, 1, 0, 3, 108, 28, },
+ { 0, 1, 0, 3, 112, 28, },
+ { 2, 1, 0, 3, 112, 28, },
+ { 1, 1, 0, 3, 112, 28, },
+ { 0, 1, 0, 3, 116, 28, },
+ { 2, 1, 0, 3, 116, 28, },
+ { 1, 1, 0, 3, 116, 28, },
+ { 0, 1, 0, 3, 120, 28, },
+ { 2, 1, 0, 3, 120, 28, },
+ { 1, 1, 0, 3, 120, 28, },
+ { 0, 1, 0, 3, 124, 28, },
+ { 2, 1, 0, 3, 124, 28, },
+ { 1, 1, 0, 3, 124, 28, },
+ { 0, 1, 0, 3, 128, 28, },
+ { 2, 1, 0, 3, 128, 28, },
+ { 1, 1, 0, 3, 128, 28, },
+ { 0, 1, 0, 3, 132, 28, },
+ { 2, 1, 0, 3, 132, 28, },
+ { 1, 1, 0, 3, 132, 28, },
+ { 0, 1, 0, 3, 136, 28, },
+ { 2, 1, 0, 3, 136, 28, },
+ { 1, 1, 0, 3, 136, 28, },
+ { 0, 1, 0, 3, 140, 24, },
+ { 2, 1, 0, 3, 140, 28, },
+ { 1, 1, 0, 3, 140, 28, },
+ { 0, 1, 0, 3, 149, 24, },
+ { 2, 1, 0, 3, 149, 28, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 28, },
+ { 2, 1, 0, 3, 153, 28, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 28, },
+ { 2, 1, 0, 3, 157, 28, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 28, },
+ { 2, 1, 0, 3, 161, 28, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 28, },
+ { 2, 1, 0, 3, 165, 28, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 22, },
+ { 2, 1, 0, 6, 36, 26, },
+ { 1, 1, 0, 6, 36, 26, },
+ { 0, 1, 0, 6, 40, 26, },
+ { 2, 1, 0, 6, 40, 26, },
+ { 1, 1, 0, 6, 40, 26, },
+ { 0, 1, 0, 6, 44, 26, },
+ { 2, 1, 0, 6, 44, 26, },
+ { 1, 1, 0, 6, 44, 26, },
+ { 0, 1, 0, 6, 48, 26, },
+ { 2, 1, 0, 6, 48, 26, },
+ { 1, 1, 0, 6, 48, 26, },
+ { 0, 1, 0, 6, 52, 26, },
+ { 2, 1, 0, 6, 52, 26, },
+ { 1, 1, 0, 6, 52, 26, },
+ { 0, 1, 0, 6, 56, 26, },
+ { 2, 1, 0, 6, 56, 26, },
+ { 1, 1, 0, 6, 56, 26, },
+ { 0, 1, 0, 6, 60, 26, },
+ { 2, 1, 0, 6, 60, 26, },
+ { 1, 1, 0, 6, 60, 26, },
+ { 0, 1, 0, 6, 64, 22, },
+ { 2, 1, 0, 6, 64, 26, },
+ { 1, 1, 0, 6, 64, 26, },
+ { 0, 1, 0, 6, 100, 22, },
+ { 2, 1, 0, 6, 100, 26, },
+ { 1, 1, 0, 6, 100, 26, },
+ { 0, 1, 0, 6, 104, 26, },
+ { 2, 1, 0, 6, 104, 26, },
+ { 1, 1, 0, 6, 104, 26, },
+ { 0, 1, 0, 6, 108, 26, },
+ { 2, 1, 0, 6, 108, 26, },
+ { 1, 1, 0, 6, 108, 26, },
+ { 0, 1, 0, 6, 112, 26, },
+ { 2, 1, 0, 6, 112, 26, },
+ { 1, 1, 0, 6, 112, 26, },
+ { 0, 1, 0, 6, 116, 26, },
+ { 2, 1, 0, 6, 116, 26, },
+ { 1, 1, 0, 6, 116, 26, },
+ { 0, 1, 0, 6, 120, 26, },
+ { 2, 1, 0, 6, 120, 26, },
+ { 1, 1, 0, 6, 120, 26, },
+ { 0, 1, 0, 6, 124, 26, },
+ { 2, 1, 0, 6, 124, 26, },
+ { 1, 1, 0, 6, 124, 26, },
+ { 0, 1, 0, 6, 128, 26, },
+ { 2, 1, 0, 6, 128, 26, },
+ { 1, 1, 0, 6, 128, 26, },
+ { 0, 1, 0, 6, 132, 26, },
+ { 2, 1, 0, 6, 132, 26, },
+ { 1, 1, 0, 6, 132, 26, },
+ { 0, 1, 0, 6, 136, 26, },
+ { 2, 1, 0, 6, 136, 26, },
+ { 1, 1, 0, 6, 136, 26, },
+ { 0, 1, 0, 6, 140, 22, },
+ { 2, 1, 0, 6, 140, 26, },
+ { 1, 1, 0, 6, 140, 26, },
+ { 0, 1, 0, 6, 149, 22, },
+ { 2, 1, 0, 6, 149, 26, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 26, },
+ { 2, 1, 0, 6, 153, 26, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 26, },
+ { 2, 1, 0, 6, 157, 26, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 26, },
+ { 2, 1, 0, 6, 161, 26, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 26, },
+ { 2, 1, 0, 6, 165, 26, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 20, },
+ { 2, 1, 0, 7, 36, 24, },
+ { 1, 1, 0, 7, 36, 24, },
+ { 0, 1, 0, 7, 40, 24, },
+ { 2, 1, 0, 7, 40, 24, },
+ { 1, 1, 0, 7, 40, 24, },
+ { 0, 1, 0, 7, 44, 24, },
+ { 2, 1, 0, 7, 44, 24, },
+ { 1, 1, 0, 7, 44, 24, },
+ { 0, 1, 0, 7, 48, 24, },
+ { 2, 1, 0, 7, 48, 24, },
+ { 1, 1, 0, 7, 48, 24, },
+ { 0, 1, 0, 7, 52, 24, },
+ { 2, 1, 0, 7, 52, 24, },
+ { 1, 1, 0, 7, 52, 24, },
+ { 0, 1, 0, 7, 56, 24, },
+ { 2, 1, 0, 7, 56, 24, },
+ { 1, 1, 0, 7, 56, 24, },
+ { 0, 1, 0, 7, 60, 24, },
+ { 2, 1, 0, 7, 60, 24, },
+ { 1, 1, 0, 7, 60, 24, },
+ { 0, 1, 0, 7, 64, 20, },
+ { 2, 1, 0, 7, 64, 24, },
+ { 1, 1, 0, 7, 64, 24, },
+ { 0, 1, 0, 7, 100, 20, },
+ { 2, 1, 0, 7, 100, 24, },
+ { 1, 1, 0, 7, 100, 24, },
+ { 0, 1, 0, 7, 104, 24, },
+ { 2, 1, 0, 7, 104, 24, },
+ { 1, 1, 0, 7, 104, 24, },
+ { 0, 1, 0, 7, 108, 24, },
+ { 2, 1, 0, 7, 108, 24, },
+ { 1, 1, 0, 7, 108, 24, },
+ { 0, 1, 0, 7, 112, 24, },
+ { 2, 1, 0, 7, 112, 24, },
+ { 1, 1, 0, 7, 112, 24, },
+ { 0, 1, 0, 7, 116, 24, },
+ { 2, 1, 0, 7, 116, 24, },
+ { 1, 1, 0, 7, 116, 24, },
+ { 0, 1, 0, 7, 120, 24, },
+ { 2, 1, 0, 7, 120, 24, },
+ { 1, 1, 0, 7, 120, 24, },
+ { 0, 1, 0, 7, 124, 24, },
+ { 2, 1, 0, 7, 124, 24, },
+ { 1, 1, 0, 7, 124, 24, },
+ { 0, 1, 0, 7, 128, 24, },
+ { 2, 1, 0, 7, 128, 24, },
+ { 1, 1, 0, 7, 128, 24, },
+ { 0, 1, 0, 7, 132, 24, },
+ { 2, 1, 0, 7, 132, 24, },
+ { 1, 1, 0, 7, 132, 24, },
+ { 0, 1, 0, 7, 136, 24, },
+ { 2, 1, 0, 7, 136, 24, },
+ { 1, 1, 0, 7, 136, 24, },
+ { 0, 1, 0, 7, 140, 20, },
+ { 2, 1, 0, 7, 140, 24, },
+ { 1, 1, 0, 7, 140, 24, },
+ { 0, 1, 0, 7, 149, 20, },
+ { 2, 1, 0, 7, 149, 24, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 24, },
+ { 2, 1, 0, 7, 153, 24, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 24, },
+ { 2, 1, 0, 7, 157, 24, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 24, },
+ { 2, 1, 0, 7, 161, 24, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 24, },
+ { 2, 1, 0, 7, 165, 24, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 26, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 26, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 26, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 32, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 26, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 24, },
+ { 2, 1, 1, 3, 38, 28, },
+ { 1, 1, 1, 3, 38, 28, },
+ { 0, 1, 1, 3, 46, 28, },
+ { 2, 1, 1, 3, 46, 28, },
+ { 1, 1, 1, 3, 46, 28, },
+ { 0, 1, 1, 3, 54, 28, },
+ { 2, 1, 1, 3, 54, 28, },
+ { 1, 1, 1, 3, 54, 28, },
+ { 0, 1, 1, 3, 62, 24, },
+ { 2, 1, 1, 3, 62, 28, },
+ { 1, 1, 1, 3, 62, 28, },
+ { 0, 1, 1, 3, 102, 24, },
+ { 2, 1, 1, 3, 102, 28, },
+ { 1, 1, 1, 3, 102, 28, },
+ { 0, 1, 1, 3, 110, 28, },
+ { 2, 1, 1, 3, 110, 28, },
+ { 1, 1, 1, 3, 110, 28, },
+ { 0, 1, 1, 3, 118, 28, },
+ { 2, 1, 1, 3, 118, 28, },
+ { 1, 1, 1, 3, 118, 28, },
+ { 0, 1, 1, 3, 126, 28, },
+ { 2, 1, 1, 3, 126, 28, },
+ { 1, 1, 1, 3, 126, 28, },
+ { 0, 1, 1, 3, 134, 28, },
+ { 2, 1, 1, 3, 134, 28, },
+ { 1, 1, 1, 3, 134, 28, },
+ { 0, 1, 1, 3, 151, 24, },
+ { 2, 1, 1, 3, 151, 28, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 28, },
+ { 2, 1, 1, 3, 159, 28, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 20, },
+ { 2, 1, 1, 6, 38, 26, },
+ { 1, 1, 1, 6, 38, 26, },
+ { 0, 1, 1, 6, 46, 26, },
+ { 2, 1, 1, 6, 46, 26, },
+ { 1, 1, 1, 6, 46, 26, },
+ { 0, 1, 1, 6, 54, 26, },
+ { 2, 1, 1, 6, 54, 26, },
+ { 1, 1, 1, 6, 54, 26, },
+ { 0, 1, 1, 6, 62, 20, },
+ { 2, 1, 1, 6, 62, 26, },
+ { 1, 1, 1, 6, 62, 26, },
+ { 0, 1, 1, 6, 102, 20, },
+ { 2, 1, 1, 6, 102, 26, },
+ { 1, 1, 1, 6, 102, 26, },
+ { 0, 1, 1, 6, 110, 26, },
+ { 2, 1, 1, 6, 110, 26, },
+ { 1, 1, 1, 6, 110, 26, },
+ { 0, 1, 1, 6, 118, 26, },
+ { 2, 1, 1, 6, 118, 26, },
+ { 1, 1, 1, 6, 118, 26, },
+ { 0, 1, 1, 6, 126, 26, },
+ { 2, 1, 1, 6, 126, 26, },
+ { 1, 1, 1, 6, 126, 26, },
+ { 0, 1, 1, 6, 134, 26, },
+ { 2, 1, 1, 6, 134, 26, },
+ { 1, 1, 1, 6, 134, 26, },
+ { 0, 1, 1, 6, 151, 20, },
+ { 2, 1, 1, 6, 151, 26, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 26, },
+ { 2, 1, 1, 6, 159, 26, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 18, },
+ { 2, 1, 1, 7, 38, 24, },
+ { 1, 1, 1, 7, 38, 24, },
+ { 0, 1, 1, 7, 46, 24, },
+ { 2, 1, 1, 7, 46, 24, },
+ { 1, 1, 1, 7, 46, 24, },
+ { 0, 1, 1, 7, 54, 24, },
+ { 2, 1, 1, 7, 54, 24, },
+ { 1, 1, 1, 7, 54, 24, },
+ { 0, 1, 1, 7, 62, 18, },
+ { 2, 1, 1, 7, 62, 24, },
+ { 1, 1, 1, 7, 62, 24, },
+ { 0, 1, 1, 7, 102, 18, },
+ { 2, 1, 1, 7, 102, 24, },
+ { 1, 1, 1, 7, 102, 24, },
+ { 0, 1, 1, 7, 110, 24, },
+ { 2, 1, 1, 7, 110, 24, },
+ { 1, 1, 1, 7, 110, 24, },
+ { 0, 1, 1, 7, 118, 24, },
+ { 2, 1, 1, 7, 118, 24, },
+ { 1, 1, 1, 7, 118, 24, },
+ { 0, 1, 1, 7, 126, 24, },
+ { 2, 1, 1, 7, 126, 24, },
+ { 1, 1, 1, 7, 126, 24, },
+ { 0, 1, 1, 7, 134, 24, },
+ { 2, 1, 1, 7, 134, 24, },
+ { 1, 1, 1, 7, 134, 24, },
+ { 0, 1, 1, 7, 151, 18, },
+ { 2, 1, 1, 7, 151, 24, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 24, },
+ { 2, 1, 1, 7, 159, 24, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 22, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 30, },
+ { 0, 1, 2, 4, 58, 22, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 30, },
+ { 0, 1, 2, 4, 106, 22, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 155, 22, },
+ { 2, 1, 2, 4, 155, 30, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 20, },
+ { 2, 1, 2, 5, 42, 28, },
+ { 1, 1, 2, 5, 42, 28, },
+ { 0, 1, 2, 5, 58, 20, },
+ { 2, 1, 2, 5, 58, 28, },
+ { 1, 1, 2, 5, 58, 28, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 28, },
+ { 1, 1, 2, 5, 106, 28, },
+ { 0, 1, 2, 5, 122, 28, },
+ { 2, 1, 2, 5, 122, 28, },
+ { 1, 1, 2, 5, 122, 28, },
+ { 0, 1, 2, 5, 155, 20, },
+ { 2, 1, 2, 5, 155, 28, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 18, },
+ { 2, 1, 2, 8, 42, 26, },
+ { 1, 1, 2, 8, 42, 26, },
+ { 0, 1, 2, 8, 58, 18, },
+ { 2, 1, 2, 8, 58, 26, },
+ { 1, 1, 2, 8, 58, 26, },
+ { 0, 1, 2, 8, 106, 18, },
+ { 2, 1, 2, 8, 106, 26, },
+ { 1, 1, 2, 8, 106, 26, },
+ { 0, 1, 2, 8, 122, 26, },
+ { 2, 1, 2, 8, 122, 26, },
+ { 1, 1, 2, 8, 122, 26, },
+ { 0, 1, 2, 8, 155, 18, },
+ { 2, 1, 2, 8, 155, 26, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 16, },
+ { 2, 1, 2, 9, 42, 24, },
+ { 1, 1, 2, 9, 42, 24, },
+ { 0, 1, 2, 9, 58, 16, },
+ { 2, 1, 2, 9, 58, 24, },
+ { 1, 1, 2, 9, 58, 24, },
+ { 0, 1, 2, 9, 106, 16, },
+ { 2, 1, 2, 9, 106, 24, },
+ { 1, 1, 2, 9, 106, 24, },
+ { 0, 1, 2, 9, 122, 24, },
+ { 2, 1, 2, 9, 122, 24, },
+ { 1, 1, 2, 9, 122, 24, },
+ { 0, 1, 2, 9, 155, 16, },
+ { 2, 1, 2, 9, 155, 24, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type0);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type1[] = {
+ { 0, 0, 0, 0, 1, 34, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 34, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 34, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 34, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 34, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 34, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 34, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 34, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 34, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 34, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 34, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 24, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 16, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 30, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 32, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 32, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 32, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 32, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 32, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 32, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 32, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 30, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 18, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 8, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 28, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 32, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 32, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 32, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 32, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 32, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 32, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 32, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 28, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 16, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 6, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 30, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 30, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 30, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 30, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 30, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 14, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 4, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 24, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 28, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 28, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 28, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 28, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 28, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 28, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 28, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 28, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 28, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 24, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 14, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 4, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 22, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 26, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 26, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 26, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 26, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 26, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 26, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 26, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 26, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 26, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 22, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 14, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 4, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 28, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 32, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 32, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 32, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 32, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 32, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 32, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 16, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 10, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 26, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 30, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 30, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 30, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 30, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 30, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 30, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 30, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 14, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 8, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 24, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 28, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 28, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 28, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 28, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 28, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 28, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 28, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 24, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 14, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 8, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 22, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 26, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 26, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 26, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 26, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 26, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 26, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 26, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 22, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 14, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 8, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 28, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 30, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 28, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 28, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 28, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 30, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 28, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 28, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 28, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 26, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 28, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 30, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 30, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 30, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 30, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 30, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 30, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 30, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 26, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 30, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 30, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 30, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 30, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 24, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 28, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 28, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 28, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 28, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 28, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 28, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 26, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 24, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 28, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 28, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 28, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 28, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 28, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 28, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 28, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 28, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 28, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 24, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 24, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 28, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 28, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 28, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 28, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 22, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 26, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 26, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 26, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 26, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 26, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 26, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 24, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 22, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 26, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 26, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 26, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 26, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 26, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 26, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 26, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 26, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 26, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 22, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 22, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 26, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 26, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 26, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 26, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 28, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 28, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 28, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 28, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 26, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 26, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 26, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 28, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 26, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 20, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 28, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 20, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 22, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 28, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 28, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 28, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 26, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 22, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 28, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 18, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 26, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 18, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 20, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 26, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 26, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 26, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 24, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 20, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 26, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 24, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 24, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 24, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 32, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 26, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 22, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 22, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 22, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 24, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 20, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 20, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 20, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 28, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 20, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 18, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 18, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 18, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 26, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 18, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type1);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type2[] = {
+ { 0, 0, 0, 0, 1, 42, },
+ { 2, 0, 0, 0, 1, 42, },
+ { 1, 0, 0, 0, 1, 42, },
+ { 0, 0, 0, 0, 2, 50, },
+ { 2, 0, 0, 0, 2, 42, },
+ { 1, 0, 0, 0, 2, 42, },
+ { 0, 0, 0, 0, 3, 50, },
+ { 2, 0, 0, 0, 3, 42, },
+ { 1, 0, 0, 0, 3, 42, },
+ { 0, 0, 0, 0, 4, 50, },
+ { 2, 0, 0, 0, 4, 42, },
+ { 1, 0, 0, 0, 4, 42, },
+ { 0, 0, 0, 0, 5, 50, },
+ { 2, 0, 0, 0, 5, 42, },
+ { 1, 0, 0, 0, 5, 42, },
+ { 0, 0, 0, 0, 6, 50, },
+ { 2, 0, 0, 0, 6, 42, },
+ { 1, 0, 0, 0, 6, 42, },
+ { 0, 0, 0, 0, 7, 50, },
+ { 2, 0, 0, 0, 7, 42, },
+ { 1, 0, 0, 0, 7, 42, },
+ { 0, 0, 0, 0, 8, 50, },
+ { 2, 0, 0, 0, 8, 42, },
+ { 1, 0, 0, 0, 8, 42, },
+ { 0, 0, 0, 0, 9, 50, },
+ { 2, 0, 0, 0, 9, 42, },
+ { 1, 0, 0, 0, 9, 42, },
+ { 0, 0, 0, 0, 10, 50, },
+ { 2, 0, 0, 0, 10, 42, },
+ { 1, 0, 0, 0, 10, 42, },
+ { 0, 0, 0, 0, 11, 44, },
+ { 2, 0, 0, 0, 11, 42, },
+ { 1, 0, 0, 0, 11, 42, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 42, },
+ { 1, 0, 0, 0, 12, 42, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 42, },
+ { 1, 0, 0, 0, 13, 42, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 42, },
+ { 0, 0, 0, 1, 1, 32, },
+ { 2, 0, 0, 1, 1, 42, },
+ { 1, 0, 0, 1, 1, 42, },
+ { 0, 0, 0, 1, 2, 42, },
+ { 2, 0, 0, 1, 2, 42, },
+ { 1, 0, 0, 1, 2, 42, },
+ { 0, 0, 0, 1, 3, 42, },
+ { 2, 0, 0, 1, 3, 42, },
+ { 1, 0, 0, 1, 3, 42, },
+ { 0, 0, 0, 1, 4, 42, },
+ { 2, 0, 0, 1, 4, 42, },
+ { 1, 0, 0, 1, 4, 42, },
+ { 0, 0, 0, 1, 5, 42, },
+ { 2, 0, 0, 1, 5, 42, },
+ { 1, 0, 0, 1, 5, 42, },
+ { 0, 0, 0, 1, 6, 42, },
+ { 2, 0, 0, 1, 6, 42, },
+ { 1, 0, 0, 1, 6, 42, },
+ { 0, 0, 0, 1, 7, 42, },
+ { 2, 0, 0, 1, 7, 42, },
+ { 1, 0, 0, 1, 7, 42, },
+ { 0, 0, 0, 1, 8, 42, },
+ { 2, 0, 0, 1, 8, 42, },
+ { 1, 0, 0, 1, 8, 42, },
+ { 0, 0, 0, 1, 9, 42, },
+ { 2, 0, 0, 1, 9, 42, },
+ { 1, 0, 0, 1, 9, 42, },
+ { 0, 0, 0, 1, 10, 42, },
+ { 2, 0, 0, 1, 10, 42, },
+ { 1, 0, 0, 1, 10, 42, },
+ { 0, 0, 0, 1, 11, 32, },
+ { 2, 0, 0, 1, 11, 42, },
+ { 1, 0, 0, 1, 11, 42, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 42, },
+ { 1, 0, 0, 1, 12, 42, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 42, },
+ { 1, 0, 0, 1, 13, 42, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 32, },
+ { 2, 0, 0, 2, 1, 42, },
+ { 1, 0, 0, 2, 1, 42, },
+ { 0, 0, 0, 2, 2, 40, },
+ { 2, 0, 0, 2, 2, 42, },
+ { 1, 0, 0, 2, 2, 42, },
+ { 0, 0, 0, 2, 3, 40, },
+ { 2, 0, 0, 2, 3, 42, },
+ { 1, 0, 0, 2, 3, 42, },
+ { 0, 0, 0, 2, 4, 40, },
+ { 2, 0, 0, 2, 4, 42, },
+ { 1, 0, 0, 2, 4, 42, },
+ { 0, 0, 0, 2, 5, 40, },
+ { 2, 0, 0, 2, 5, 42, },
+ { 1, 0, 0, 2, 5, 42, },
+ { 0, 0, 0, 2, 6, 40, },
+ { 2, 0, 0, 2, 6, 42, },
+ { 1, 0, 0, 2, 6, 42, },
+ { 0, 0, 0, 2, 7, 40, },
+ { 2, 0, 0, 2, 7, 42, },
+ { 1, 0, 0, 2, 7, 42, },
+ { 0, 0, 0, 2, 8, 40, },
+ { 2, 0, 0, 2, 8, 42, },
+ { 1, 0, 0, 2, 8, 42, },
+ { 0, 0, 0, 2, 9, 40, },
+ { 2, 0, 0, 2, 9, 42, },
+ { 1, 0, 0, 2, 9, 42, },
+ { 0, 0, 0, 2, 10, 40, },
+ { 2, 0, 0, 2, 10, 42, },
+ { 1, 0, 0, 2, 10, 42, },
+ { 0, 0, 0, 2, 11, 28, },
+ { 2, 0, 0, 2, 11, 42, },
+ { 1, 0, 0, 2, 11, 42, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 42, },
+ { 1, 0, 0, 2, 12, 42, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 42, },
+ { 1, 0, 0, 2, 13, 42, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 40, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 40, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 40, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 40, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 40, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 40, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 40, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 40, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 40, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 28, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 32, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 40, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 40, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 40, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 40, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 40, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 40, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 40, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 40, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 40, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 28, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 32, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 40, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 40, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 40, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 40, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 40, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 40, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 40, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 40, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 40, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 28, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 30, },
+ { 2, 0, 1, 2, 3, 34, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 34, },
+ { 2, 0, 1, 2, 4, 34, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 34, },
+ { 2, 0, 1, 2, 5, 34, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 34, },
+ { 2, 0, 1, 2, 6, 34, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 34, },
+ { 2, 0, 1, 2, 7, 34, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 34, },
+ { 2, 0, 1, 2, 8, 34, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 34, },
+ { 2, 0, 1, 2, 9, 34, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 34, },
+ { 2, 0, 1, 2, 10, 34, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 34, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 34, },
+ { 1, 0, 1, 2, 12, 34, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 34, },
+ { 1, 0, 1, 2, 13, 34, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 34, },
+ { 1, 0, 1, 3, 3, 34, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 34, },
+ { 1, 0, 1, 3, 4, 34, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 34, },
+ { 1, 0, 1, 3, 5, 34, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 34, },
+ { 1, 0, 1, 3, 6, 34, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 34, },
+ { 1, 0, 1, 3, 7, 34, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 34, },
+ { 1, 0, 1, 3, 8, 34, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 34, },
+ { 1, 0, 1, 3, 9, 34, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 34, },
+ { 1, 0, 1, 3, 10, 34, },
+ { 0, 0, 1, 3, 11, 28, },
+ { 2, 0, 1, 3, 11, 34, },
+ { 1, 0, 1, 3, 11, 34, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 34, },
+ { 1, 0, 1, 3, 12, 34, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 34, },
+ { 1, 0, 1, 3, 13, 34, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 30, },
+ { 2, 0, 1, 6, 3, 34, },
+ { 1, 0, 1, 6, 3, 34, },
+ { 0, 0, 1, 6, 4, 34, },
+ { 2, 0, 1, 6, 4, 34, },
+ { 1, 0, 1, 6, 4, 34, },
+ { 0, 0, 1, 6, 5, 34, },
+ { 2, 0, 1, 6, 5, 34, },
+ { 1, 0, 1, 6, 5, 34, },
+ { 0, 0, 1, 6, 6, 34, },
+ { 2, 0, 1, 6, 6, 34, },
+ { 1, 0, 1, 6, 6, 34, },
+ { 0, 0, 1, 6, 7, 34, },
+ { 2, 0, 1, 6, 7, 34, },
+ { 1, 0, 1, 6, 7, 34, },
+ { 0, 0, 1, 6, 8, 34, },
+ { 2, 0, 1, 6, 8, 34, },
+ { 1, 0, 1, 6, 8, 34, },
+ { 0, 0, 1, 6, 9, 34, },
+ { 2, 0, 1, 6, 9, 34, },
+ { 1, 0, 1, 6, 9, 34, },
+ { 0, 0, 1, 6, 10, 34, },
+ { 2, 0, 1, 6, 10, 34, },
+ { 1, 0, 1, 6, 10, 34, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 34, },
+ { 1, 0, 1, 6, 11, 34, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 34, },
+ { 1, 0, 1, 6, 12, 34, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 34, },
+ { 1, 0, 1, 6, 13, 34, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 30, },
+ { 2, 0, 1, 7, 3, 34, },
+ { 1, 0, 1, 7, 3, 34, },
+ { 0, 0, 1, 7, 4, 34, },
+ { 2, 0, 1, 7, 4, 34, },
+ { 1, 0, 1, 7, 4, 34, },
+ { 0, 0, 1, 7, 5, 34, },
+ { 2, 0, 1, 7, 5, 34, },
+ { 1, 0, 1, 7, 5, 34, },
+ { 0, 0, 1, 7, 6, 34, },
+ { 2, 0, 1, 7, 6, 34, },
+ { 1, 0, 1, 7, 6, 34, },
+ { 0, 0, 1, 7, 7, 34, },
+ { 2, 0, 1, 7, 7, 34, },
+ { 1, 0, 1, 7, 7, 34, },
+ { 0, 0, 1, 7, 8, 34, },
+ { 2, 0, 1, 7, 8, 34, },
+ { 1, 0, 1, 7, 8, 34, },
+ { 0, 0, 1, 7, 9, 34, },
+ { 2, 0, 1, 7, 9, 34, },
+ { 1, 0, 1, 7, 9, 34, },
+ { 0, 0, 1, 7, 10, 34, },
+ { 2, 0, 1, 7, 10, 34, },
+ { 1, 0, 1, 7, 10, 34, },
+ { 0, 0, 1, 7, 11, 28, },
+ { 2, 0, 1, 7, 11, 34, },
+ { 1, 0, 1, 7, 11, 34, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 34, },
+ { 1, 0, 1, 7, 12, 34, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 34, },
+ { 1, 0, 1, 7, 13, 34, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 42, },
+ { 2, 1, 0, 1, 36, 42, },
+ { 1, 1, 0, 1, 36, 42, },
+ { 0, 1, 0, 1, 40, 42, },
+ { 2, 1, 0, 1, 40, 42, },
+ { 1, 1, 0, 1, 40, 42, },
+ { 0, 1, 0, 1, 44, 42, },
+ { 2, 1, 0, 1, 44, 42, },
+ { 1, 1, 0, 1, 44, 42, },
+ { 0, 1, 0, 1, 48, 42, },
+ { 2, 1, 0, 1, 48, 42, },
+ { 1, 1, 0, 1, 48, 42, },
+ { 0, 1, 0, 1, 52, 42, },
+ { 2, 1, 0, 1, 52, 42, },
+ { 1, 1, 0, 1, 52, 42, },
+ { 0, 1, 0, 1, 56, 42, },
+ { 2, 1, 0, 1, 56, 42, },
+ { 1, 1, 0, 1, 56, 42, },
+ { 0, 1, 0, 1, 60, 42, },
+ { 2, 1, 0, 1, 60, 42, },
+ { 1, 1, 0, 1, 60, 42, },
+ { 0, 1, 0, 1, 64, 42, },
+ { 2, 1, 0, 1, 64, 42, },
+ { 1, 1, 0, 1, 64, 42, },
+ { 0, 1, 0, 1, 100, 42, },
+ { 2, 1, 0, 1, 100, 42, },
+ { 1, 1, 0, 1, 100, 42, },
+ { 0, 1, 0, 1, 104, 42, },
+ { 2, 1, 0, 1, 104, 42, },
+ { 1, 1, 0, 1, 104, 42, },
+ { 0, 1, 0, 1, 108, 42, },
+ { 2, 1, 0, 1, 108, 42, },
+ { 1, 1, 0, 1, 108, 42, },
+ { 0, 1, 0, 1, 112, 42, },
+ { 2, 1, 0, 1, 112, 42, },
+ { 1, 1, 0, 1, 112, 42, },
+ { 0, 1, 0, 1, 116, 42, },
+ { 2, 1, 0, 1, 116, 42, },
+ { 1, 1, 0, 1, 116, 42, },
+ { 0, 1, 0, 1, 120, 42, },
+ { 2, 1, 0, 1, 120, 42, },
+ { 1, 1, 0, 1, 120, 42, },
+ { 0, 1, 0, 1, 124, 42, },
+ { 2, 1, 0, 1, 124, 42, },
+ { 1, 1, 0, 1, 124, 42, },
+ { 0, 1, 0, 1, 128, 42, },
+ { 2, 1, 0, 1, 128, 42, },
+ { 1, 1, 0, 1, 128, 42, },
+ { 0, 1, 0, 1, 132, 42, },
+ { 2, 1, 0, 1, 132, 42, },
+ { 1, 1, 0, 1, 132, 42, },
+ { 0, 1, 0, 1, 136, 42, },
+ { 2, 1, 0, 1, 136, 42, },
+ { 1, 1, 0, 1, 136, 42, },
+ { 0, 1, 0, 1, 140, 40, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 44, },
+ { 2, 1, 0, 1, 149, 44, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 44, },
+ { 2, 1, 0, 1, 153, 44, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 44, },
+ { 2, 1, 0, 1, 157, 44, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 44, },
+ { 2, 1, 0, 1, 161, 44, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 44, },
+ { 2, 1, 0, 1, 165, 44, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 32, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 36, },
+ { 2, 1, 0, 2, 48, 36, },
+ { 1, 1, 0, 2, 48, 36, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 36, },
+ { 1, 1, 0, 2, 52, 36, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 32, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 32, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 30, },
+ { 2, 1, 0, 2, 140, 30, },
+ { 1, 1, 0, 2, 140, 30, },
+ { 0, 1, 0, 2, 149, 40, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 40, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 40, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 40, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 42, },
+ { 2, 1, 0, 2, 165, 42, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 32, },
+ { 2, 1, 0, 3, 36, 32, },
+ { 1, 1, 0, 3, 36, 32, },
+ { 0, 1, 0, 3, 40, 32, },
+ { 2, 1, 0, 3, 40, 32, },
+ { 1, 1, 0, 3, 40, 32, },
+ { 0, 1, 0, 3, 44, 32, },
+ { 2, 1, 0, 3, 44, 32, },
+ { 1, 1, 0, 3, 44, 32, },
+ { 0, 1, 0, 3, 48, 36, },
+ { 2, 1, 0, 3, 48, 36, },
+ { 1, 1, 0, 3, 48, 36, },
+ { 0, 1, 0, 3, 52, 36, },
+ { 2, 1, 0, 3, 52, 36, },
+ { 1, 1, 0, 3, 52, 36, },
+ { 0, 1, 0, 3, 56, 32, },
+ { 2, 1, 0, 3, 56, 32, },
+ { 1, 1, 0, 3, 56, 32, },
+ { 0, 1, 0, 3, 60, 32, },
+ { 2, 1, 0, 3, 60, 32, },
+ { 1, 1, 0, 3, 60, 32, },
+ { 0, 1, 0, 3, 64, 32, },
+ { 2, 1, 0, 3, 64, 32, },
+ { 1, 1, 0, 3, 64, 32, },
+ { 0, 1, 0, 3, 100, 32, },
+ { 2, 1, 0, 3, 100, 32, },
+ { 1, 1, 0, 3, 100, 32, },
+ { 0, 1, 0, 3, 104, 32, },
+ { 2, 1, 0, 3, 104, 32, },
+ { 1, 1, 0, 3, 104, 32, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 32, },
+ { 1, 1, 0, 3, 108, 32, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 32, },
+ { 1, 1, 0, 3, 112, 32, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 32, },
+ { 1, 1, 0, 3, 116, 32, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 32, },
+ { 1, 1, 0, 3, 120, 32, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 32, },
+ { 1, 1, 0, 3, 124, 32, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 32, },
+ { 1, 1, 0, 3, 128, 32, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 32, },
+ { 1, 1, 0, 3, 132, 32, },
+ { 0, 1, 0, 3, 136, 32, },
+ { 2, 1, 0, 3, 136, 32, },
+ { 1, 1, 0, 3, 136, 32, },
+ { 0, 1, 0, 3, 140, 30, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 40, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 40, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 40, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 40, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 42, },
+ { 2, 1, 0, 3, 165, 42, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 32, },
+ { 2, 1, 0, 6, 36, 32, },
+ { 1, 1, 0, 6, 36, 32, },
+ { 0, 1, 0, 6, 40, 32, },
+ { 2, 1, 0, 6, 40, 32, },
+ { 1, 1, 0, 6, 40, 32, },
+ { 0, 1, 0, 6, 44, 32, },
+ { 2, 1, 0, 6, 44, 32, },
+ { 1, 1, 0, 6, 44, 32, },
+ { 0, 1, 0, 6, 48, 36, },
+ { 2, 1, 0, 6, 48, 36, },
+ { 1, 1, 0, 6, 48, 36, },
+ { 0, 1, 0, 6, 52, 36, },
+ { 2, 1, 0, 6, 52, 36, },
+ { 1, 1, 0, 6, 52, 36, },
+ { 0, 1, 0, 6, 56, 32, },
+ { 2, 1, 0, 6, 56, 32, },
+ { 1, 1, 0, 6, 56, 32, },
+ { 0, 1, 0, 6, 60, 32, },
+ { 2, 1, 0, 6, 60, 32, },
+ { 1, 1, 0, 6, 60, 32, },
+ { 0, 1, 0, 6, 64, 32, },
+ { 2, 1, 0, 6, 64, 32, },
+ { 1, 1, 0, 6, 64, 32, },
+ { 0, 1, 0, 6, 100, 32, },
+ { 2, 1, 0, 6, 100, 32, },
+ { 1, 1, 0, 6, 100, 32, },
+ { 0, 1, 0, 6, 104, 32, },
+ { 2, 1, 0, 6, 104, 32, },
+ { 1, 1, 0, 6, 104, 32, },
+ { 0, 1, 0, 6, 108, 32, },
+ { 2, 1, 0, 6, 108, 32, },
+ { 1, 1, 0, 6, 108, 32, },
+ { 0, 1, 0, 6, 112, 32, },
+ { 2, 1, 0, 6, 112, 32, },
+ { 1, 1, 0, 6, 112, 32, },
+ { 0, 1, 0, 6, 116, 32, },
+ { 2, 1, 0, 6, 116, 32, },
+ { 1, 1, 0, 6, 116, 32, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 32, },
+ { 1, 1, 0, 6, 120, 32, },
+ { 0, 1, 0, 6, 124, 32, },
+ { 2, 1, 0, 6, 124, 32, },
+ { 1, 1, 0, 6, 124, 32, },
+ { 0, 1, 0, 6, 128, 32, },
+ { 2, 1, 0, 6, 128, 32, },
+ { 1, 1, 0, 6, 128, 32, },
+ { 0, 1, 0, 6, 132, 32, },
+ { 2, 1, 0, 6, 132, 32, },
+ { 1, 1, 0, 6, 132, 32, },
+ { 0, 1, 0, 6, 136, 32, },
+ { 2, 1, 0, 6, 136, 32, },
+ { 1, 1, 0, 6, 136, 32, },
+ { 0, 1, 0, 6, 140, 30, },
+ { 2, 1, 0, 6, 140, 30, },
+ { 1, 1, 0, 6, 140, 30, },
+ { 0, 1, 0, 6, 149, 40, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 40, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 40, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 40, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 42, },
+ { 2, 1, 0, 6, 165, 42, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 32, },
+ { 2, 1, 0, 7, 36, 32, },
+ { 1, 1, 0, 7, 36, 32, },
+ { 0, 1, 0, 7, 40, 32, },
+ { 2, 1, 0, 7, 40, 32, },
+ { 1, 1, 0, 7, 40, 32, },
+ { 0, 1, 0, 7, 44, 32, },
+ { 2, 1, 0, 7, 44, 32, },
+ { 1, 1, 0, 7, 44, 32, },
+ { 0, 1, 0, 7, 48, 36, },
+ { 2, 1, 0, 7, 48, 36, },
+ { 1, 1, 0, 7, 48, 36, },
+ { 0, 1, 0, 7, 52, 36, },
+ { 2, 1, 0, 7, 52, 36, },
+ { 1, 1, 0, 7, 52, 36, },
+ { 0, 1, 0, 7, 56, 32, },
+ { 2, 1, 0, 7, 56, 32, },
+ { 1, 1, 0, 7, 56, 32, },
+ { 0, 1, 0, 7, 60, 32, },
+ { 2, 1, 0, 7, 60, 32, },
+ { 1, 1, 0, 7, 60, 32, },
+ { 0, 1, 0, 7, 64, 32, },
+ { 2, 1, 0, 7, 64, 32, },
+ { 1, 1, 0, 7, 64, 32, },
+ { 0, 1, 0, 7, 100, 32, },
+ { 2, 1, 0, 7, 100, 32, },
+ { 1, 1, 0, 7, 100, 32, },
+ { 0, 1, 0, 7, 104, 32, },
+ { 2, 1, 0, 7, 104, 32, },
+ { 1, 1, 0, 7, 104, 32, },
+ { 0, 1, 0, 7, 108, 32, },
+ { 2, 1, 0, 7, 108, 32, },
+ { 1, 1, 0, 7, 108, 32, },
+ { 0, 1, 0, 7, 112, 32, },
+ { 2, 1, 0, 7, 112, 32, },
+ { 1, 1, 0, 7, 112, 32, },
+ { 0, 1, 0, 7, 116, 32, },
+ { 2, 1, 0, 7, 116, 32, },
+ { 1, 1, 0, 7, 116, 32, },
+ { 0, 1, 0, 7, 120, 32, },
+ { 2, 1, 0, 7, 120, 32, },
+ { 1, 1, 0, 7, 120, 32, },
+ { 0, 1, 0, 7, 124, 32, },
+ { 2, 1, 0, 7, 124, 32, },
+ { 1, 1, 0, 7, 124, 32, },
+ { 0, 1, 0, 7, 128, 32, },
+ { 2, 1, 0, 7, 128, 32, },
+ { 1, 1, 0, 7, 128, 32, },
+ { 0, 1, 0, 7, 132, 32, },
+ { 2, 1, 0, 7, 132, 32, },
+ { 1, 1, 0, 7, 132, 32, },
+ { 0, 1, 0, 7, 136, 32, },
+ { 2, 1, 0, 7, 136, 32, },
+ { 1, 1, 0, 7, 136, 32, },
+ { 0, 1, 0, 7, 140, 30, },
+ { 2, 1, 0, 7, 140, 30, },
+ { 1, 1, 0, 7, 140, 30, },
+ { 0, 1, 0, 7, 149, 40, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 40, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 40, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 40, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 42, },
+ { 2, 1, 0, 7, 165, 42, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 32, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 36, },
+ { 2, 1, 1, 2, 46, 36, },
+ { 1, 1, 1, 2, 46, 36, },
+ { 0, 1, 1, 2, 54, 36, },
+ { 2, 1, 1, 2, 54, 36, },
+ { 1, 1, 1, 2, 54, 36, },
+ { 0, 1, 1, 2, 62, 32, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 32, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 32, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 32, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 36, },
+ { 2, 1, 1, 2, 134, 36, },
+ { 1, 1, 1, 2, 134, 36, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 36, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 40, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 32, },
+ { 2, 1, 1, 3, 38, 32, },
+ { 1, 1, 1, 3, 38, 32, },
+ { 0, 1, 1, 3, 46, 36, },
+ { 2, 1, 1, 3, 46, 36, },
+ { 1, 1, 1, 3, 46, 36, },
+ { 0, 1, 1, 3, 54, 36, },
+ { 2, 1, 1, 3, 54, 36, },
+ { 1, 1, 1, 3, 54, 36, },
+ { 0, 1, 1, 3, 62, 32, },
+ { 2, 1, 1, 3, 62, 32, },
+ { 1, 1, 1, 3, 62, 32, },
+ { 0, 1, 1, 3, 102, 30, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 32, },
+ { 2, 1, 1, 3, 110, 32, },
+ { 1, 1, 1, 3, 110, 32, },
+ { 0, 1, 1, 3, 118, 32, },
+ { 2, 1, 1, 3, 118, 32, },
+ { 1, 1, 1, 3, 118, 32, },
+ { 0, 1, 1, 3, 126, 32, },
+ { 2, 1, 1, 3, 126, 32, },
+ { 1, 1, 1, 3, 126, 32, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 36, },
+ { 1, 1, 1, 3, 134, 36, },
+ { 0, 1, 1, 3, 151, 36, },
+ { 2, 1, 1, 3, 151, 36, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 40, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 32, },
+ { 2, 1, 1, 6, 38, 32, },
+ { 1, 1, 1, 6, 38, 32, },
+ { 0, 1, 1, 6, 46, 36, },
+ { 2, 1, 1, 6, 46, 36, },
+ { 1, 1, 1, 6, 46, 36, },
+ { 0, 1, 1, 6, 54, 36, },
+ { 2, 1, 1, 6, 54, 36, },
+ { 1, 1, 1, 6, 54, 36, },
+ { 0, 1, 1, 6, 62, 32, },
+ { 2, 1, 1, 6, 62, 32, },
+ { 1, 1, 1, 6, 62, 32, },
+ { 0, 1, 1, 6, 102, 30, },
+ { 2, 1, 1, 6, 102, 30, },
+ { 1, 1, 1, 6, 102, 30, },
+ { 0, 1, 1, 6, 110, 32, },
+ { 2, 1, 1, 6, 110, 32, },
+ { 1, 1, 1, 6, 110, 32, },
+ { 0, 1, 1, 6, 118, 32, },
+ { 2, 1, 1, 6, 118, 32, },
+ { 1, 1, 1, 6, 118, 32, },
+ { 0, 1, 1, 6, 126, 32, },
+ { 2, 1, 1, 6, 126, 32, },
+ { 1, 1, 1, 6, 126, 32, },
+ { 0, 1, 1, 6, 134, 36, },
+ { 2, 1, 1, 6, 134, 36, },
+ { 1, 1, 1, 6, 134, 36, },
+ { 0, 1, 1, 6, 151, 36, },
+ { 2, 1, 1, 6, 151, 36, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 40, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 32, },
+ { 2, 1, 1, 7, 38, 32, },
+ { 1, 1, 1, 7, 38, 32, },
+ { 0, 1, 1, 7, 46, 36, },
+ { 2, 1, 1, 7, 46, 36, },
+ { 1, 1, 1, 7, 46, 36, },
+ { 0, 1, 1, 7, 54, 36, },
+ { 2, 1, 1, 7, 54, 36, },
+ { 1, 1, 1, 7, 54, 36, },
+ { 0, 1, 1, 7, 62, 32, },
+ { 2, 1, 1, 7, 62, 32, },
+ { 1, 1, 1, 7, 62, 32, },
+ { 0, 1, 1, 7, 102, 30, },
+ { 2, 1, 1, 7, 102, 30, },
+ { 1, 1, 1, 7, 102, 30, },
+ { 0, 1, 1, 7, 110, 32, },
+ { 2, 1, 1, 7, 110, 32, },
+ { 1, 1, 1, 7, 110, 32, },
+ { 0, 1, 1, 7, 118, 32, },
+ { 2, 1, 1, 7, 118, 32, },
+ { 1, 1, 1, 7, 118, 32, },
+ { 0, 1, 1, 7, 126, 32, },
+ { 2, 1, 1, 7, 126, 32, },
+ { 1, 1, 1, 7, 126, 32, },
+ { 0, 1, 1, 7, 134, 36, },
+ { 2, 1, 1, 7, 134, 36, },
+ { 1, 1, 1, 7, 134, 36, },
+ { 0, 1, 1, 7, 151, 36, },
+ { 2, 1, 1, 7, 151, 36, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 40, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 34, },
+ { 2, 1, 2, 4, 42, 34, },
+ { 1, 1, 2, 4, 42, 34, },
+ { 0, 1, 2, 4, 58, 34, },
+ { 2, 1, 2, 4, 58, 34, },
+ { 1, 1, 2, 4, 58, 34, },
+ { 0, 1, 2, 4, 106, 32, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 34, },
+ { 2, 1, 2, 4, 122, 34, },
+ { 1, 1, 2, 4, 122, 34, },
+ { 0, 1, 2, 4, 155, 34, },
+ { 2, 1, 2, 4, 155, 34, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 34, },
+ { 2, 1, 2, 5, 42, 34, },
+ { 1, 1, 2, 5, 42, 34, },
+ { 0, 1, 2, 5, 58, 34, },
+ { 2, 1, 2, 5, 58, 34, },
+ { 1, 1, 2, 5, 58, 34, },
+ { 0, 1, 2, 5, 106, 32, },
+ { 2, 1, 2, 5, 106, 32, },
+ { 1, 1, 2, 5, 106, 32, },
+ { 0, 1, 2, 5, 122, 34, },
+ { 2, 1, 2, 5, 122, 34, },
+ { 1, 1, 2, 5, 122, 34, },
+ { 0, 1, 2, 5, 155, 34, },
+ { 2, 1, 2, 5, 155, 34, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 34, },
+ { 2, 1, 2, 8, 42, 34, },
+ { 1, 1, 2, 8, 42, 34, },
+ { 0, 1, 2, 8, 58, 34, },
+ { 2, 1, 2, 8, 58, 34, },
+ { 1, 1, 2, 8, 58, 34, },
+ { 0, 1, 2, 8, 106, 32, },
+ { 2, 1, 2, 8, 106, 32, },
+ { 1, 1, 2, 8, 106, 32, },
+ { 0, 1, 2, 8, 122, 34, },
+ { 2, 1, 2, 8, 122, 34, },
+ { 1, 1, 2, 8, 122, 34, },
+ { 0, 1, 2, 8, 155, 34, },
+ { 2, 1, 2, 8, 155, 34, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 34, },
+ { 2, 1, 2, 9, 42, 34, },
+ { 1, 1, 2, 9, 42, 34, },
+ { 0, 1, 2, 9, 58, 34, },
+ { 2, 1, 2, 9, 58, 34, },
+ { 1, 1, 2, 9, 58, 34, },
+ { 0, 1, 2, 9, 106, 32, },
+ { 2, 1, 2, 9, 106, 32, },
+ { 1, 1, 2, 9, 106, 32, },
+ { 0, 1, 2, 9, 122, 34, },
+ { 2, 1, 2, 9, 122, 34, },
+ { 1, 1, 2, 9, 122, 34, },
+ { 0, 1, 2, 9, 155, 34, },
+ { 2, 1, 2, 9, 155, 34, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type2);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type3[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 40, },
+ { 1, 0, 0, 0, 1, 40, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 40, },
+ { 1, 0, 0, 0, 2, 40, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 40, },
+ { 1, 0, 0, 0, 3, 40, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 40, },
+ { 1, 0, 0, 0, 4, 40, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 40, },
+ { 1, 0, 0, 0, 5, 40, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 40, },
+ { 1, 0, 0, 0, 6, 40, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 40, },
+ { 1, 0, 0, 0, 7, 40, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 40, },
+ { 1, 0, 0, 0, 8, 40, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 40, },
+ { 1, 0, 0, 0, 9, 40, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 40, },
+ { 1, 0, 0, 0, 10, 40, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 40, },
+ { 1, 0, 0, 0, 11, 40, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 40, },
+ { 1, 0, 0, 0, 12, 40, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 40, },
+ { 1, 0, 0, 0, 13, 40, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 40, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 40, },
+ { 1, 0, 0, 1, 1, 40, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 40, },
+ { 1, 0, 0, 1, 2, 40, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 40, },
+ { 1, 0, 0, 1, 3, 40, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 40, },
+ { 1, 0, 0, 1, 4, 40, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 40, },
+ { 1, 0, 0, 1, 5, 40, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 40, },
+ { 1, 0, 0, 1, 6, 40, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 40, },
+ { 1, 0, 0, 1, 7, 40, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 40, },
+ { 1, 0, 0, 1, 8, 40, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 40, },
+ { 1, 0, 0, 1, 9, 40, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 40, },
+ { 1, 0, 0, 1, 10, 40, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 40, },
+ { 1, 0, 0, 1, 11, 40, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 40, },
+ { 1, 0, 0, 1, 13, 40, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 40, },
+ { 1, 0, 0, 2, 1, 40, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 40, },
+ { 1, 0, 0, 2, 2, 40, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 40, },
+ { 1, 0, 0, 2, 3, 40, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 40, },
+ { 1, 0, 0, 2, 4, 40, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 40, },
+ { 1, 0, 0, 2, 5, 40, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 40, },
+ { 1, 0, 0, 2, 6, 40, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 40, },
+ { 1, 0, 0, 2, 7, 40, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 40, },
+ { 1, 0, 0, 2, 8, 40, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 40, },
+ { 1, 0, 0, 2, 9, 40, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 40, },
+ { 1, 0, 0, 2, 10, 40, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 40, },
+ { 1, 0, 0, 2, 11, 40, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 40, },
+ { 1, 0, 0, 2, 13, 40, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 46, },
+ { 2, 0, 1, 2, 3, 40, },
+ { 1, 0, 1, 2, 3, 40, },
+ { 0, 0, 1, 2, 4, 46, },
+ { 2, 0, 1, 2, 4, 40, },
+ { 1, 0, 1, 2, 4, 40, },
+ { 0, 0, 1, 2, 5, 46, },
+ { 2, 0, 1, 2, 5, 40, },
+ { 1, 0, 1, 2, 5, 40, },
+ { 0, 0, 1, 2, 6, 46, },
+ { 2, 0, 1, 2, 6, 40, },
+ { 1, 0, 1, 2, 6, 40, },
+ { 0, 0, 1, 2, 7, 46, },
+ { 2, 0, 1, 2, 7, 40, },
+ { 1, 0, 1, 2, 7, 40, },
+ { 0, 0, 1, 2, 8, 46, },
+ { 2, 0, 1, 2, 8, 40, },
+ { 1, 0, 1, 2, 8, 40, },
+ { 0, 0, 1, 2, 9, 46, },
+ { 2, 0, 1, 2, 9, 40, },
+ { 1, 0, 1, 2, 9, 40, },
+ { 0, 0, 1, 2, 10, 46, },
+ { 2, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 10, 40, },
+ { 0, 0, 1, 2, 11, 46, },
+ { 2, 0, 1, 2, 11, 40, },
+ { 1, 0, 1, 2, 11, 40, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 40, },
+ { 1, 0, 1, 2, 12, 40, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 40, },
+ { 1, 0, 1, 2, 13, 40, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 46, },
+ { 2, 0, 1, 3, 3, 40, },
+ { 1, 0, 1, 3, 3, 40, },
+ { 0, 0, 1, 3, 4, 46, },
+ { 2, 0, 1, 3, 4, 40, },
+ { 1, 0, 1, 3, 4, 40, },
+ { 0, 0, 1, 3, 5, 46, },
+ { 2, 0, 1, 3, 5, 40, },
+ { 1, 0, 1, 3, 5, 40, },
+ { 0, 0, 1, 3, 6, 46, },
+ { 2, 0, 1, 3, 6, 40, },
+ { 1, 0, 1, 3, 6, 40, },
+ { 0, 0, 1, 3, 7, 46, },
+ { 2, 0, 1, 3, 7, 40, },
+ { 1, 0, 1, 3, 7, 40, },
+ { 0, 0, 1, 3, 8, 46, },
+ { 2, 0, 1, 3, 8, 40, },
+ { 1, 0, 1, 3, 8, 40, },
+ { 0, 0, 1, 3, 9, 46, },
+ { 2, 0, 1, 3, 9, 40, },
+ { 1, 0, 1, 3, 9, 40, },
+ { 0, 0, 1, 3, 10, 46, },
+ { 2, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 10, 40, },
+ { 0, 0, 1, 3, 11, 46, },
+ { 2, 0, 1, 3, 11, 40, },
+ { 1, 0, 1, 3, 11, 40, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 40, },
+ { 1, 0, 1, 3, 12, 40, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 40, },
+ { 1, 0, 1, 3, 13, 40, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 46, },
+ { 2, 0, 1, 6, 3, 40, },
+ { 1, 0, 1, 6, 3, 40, },
+ { 0, 0, 1, 6, 4, 46, },
+ { 2, 0, 1, 6, 4, 40, },
+ { 1, 0, 1, 6, 4, 40, },
+ { 0, 0, 1, 6, 5, 46, },
+ { 2, 0, 1, 6, 5, 40, },
+ { 1, 0, 1, 6, 5, 40, },
+ { 0, 0, 1, 6, 6, 46, },
+ { 2, 0, 1, 6, 6, 40, },
+ { 1, 0, 1, 6, 6, 40, },
+ { 0, 0, 1, 6, 7, 46, },
+ { 2, 0, 1, 6, 7, 40, },
+ { 1, 0, 1, 6, 7, 40, },
+ { 0, 0, 1, 6, 8, 46, },
+ { 2, 0, 1, 6, 8, 40, },
+ { 1, 0, 1, 6, 8, 40, },
+ { 0, 0, 1, 6, 9, 46, },
+ { 2, 0, 1, 6, 9, 40, },
+ { 1, 0, 1, 6, 9, 40, },
+ { 0, 0, 1, 6, 10, 46, },
+ { 2, 0, 1, 6, 10, 40, },
+ { 1, 0, 1, 6, 10, 40, },
+ { 0, 0, 1, 6, 11, 46, },
+ { 2, 0, 1, 6, 11, 40, },
+ { 1, 0, 1, 6, 11, 40, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 40, },
+ { 1, 0, 1, 6, 12, 40, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 40, },
+ { 1, 0, 1, 6, 13, 40, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 46, },
+ { 2, 0, 1, 7, 3, 40, },
+ { 1, 0, 1, 7, 3, 40, },
+ { 0, 0, 1, 7, 4, 46, },
+ { 2, 0, 1, 7, 4, 40, },
+ { 1, 0, 1, 7, 4, 40, },
+ { 0, 0, 1, 7, 5, 46, },
+ { 2, 0, 1, 7, 5, 40, },
+ { 1, 0, 1, 7, 5, 40, },
+ { 0, 0, 1, 7, 6, 46, },
+ { 2, 0, 1, 7, 6, 40, },
+ { 1, 0, 1, 7, 6, 40, },
+ { 0, 0, 1, 7, 7, 46, },
+ { 2, 0, 1, 7, 7, 40, },
+ { 1, 0, 1, 7, 7, 40, },
+ { 0, 0, 1, 7, 8, 46, },
+ { 2, 0, 1, 7, 8, 40, },
+ { 1, 0, 1, 7, 8, 40, },
+ { 0, 0, 1, 7, 9, 46, },
+ { 2, 0, 1, 7, 9, 40, },
+ { 1, 0, 1, 7, 9, 40, },
+ { 0, 0, 1, 7, 10, 46, },
+ { 2, 0, 1, 7, 10, 40, },
+ { 1, 0, 1, 7, 10, 40, },
+ { 0, 0, 1, 7, 11, 46, },
+ { 2, 0, 1, 7, 11, 40, },
+ { 1, 0, 1, 7, 11, 40, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 40, },
+ { 1, 0, 1, 7, 12, 40, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 40, },
+ { 1, 0, 1, 7, 13, 40, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 40, },
+ { 1, 1, 0, 1, 36, 40, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 40, },
+ { 1, 1, 0, 1, 40, 40, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 40, },
+ { 1, 1, 0, 1, 44, 40, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 40, },
+ { 1, 1, 0, 1, 48, 40, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 40, },
+ { 1, 1, 0, 1, 52, 40, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 40, },
+ { 1, 1, 0, 1, 56, 40, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 40, },
+ { 1, 1, 0, 1, 60, 40, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 40, },
+ { 1, 1, 0, 1, 64, 40, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 40, },
+ { 1, 1, 0, 1, 100, 40, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 40, },
+ { 1, 1, 0, 1, 104, 40, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 40, },
+ { 1, 1, 0, 1, 108, 40, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 40, },
+ { 1, 1, 0, 1, 112, 40, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 40, },
+ { 1, 1, 0, 1, 116, 40, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 40, },
+ { 1, 1, 0, 1, 120, 40, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 40, },
+ { 1, 1, 0, 1, 124, 40, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 40, },
+ { 1, 1, 0, 1, 128, 40, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 40, },
+ { 1, 1, 0, 1, 132, 40, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 40, },
+ { 1, 1, 0, 1, 136, 40, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 40, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 40, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 40, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 40, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 40, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 40, },
+ { 1, 1, 0, 2, 36, 40, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 40, },
+ { 1, 1, 0, 2, 40, 40, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 40, },
+ { 1, 1, 0, 2, 44, 40, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 40, },
+ { 1, 1, 0, 2, 48, 40, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 40, },
+ { 1, 1, 0, 2, 52, 40, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 40, },
+ { 1, 1, 0, 2, 56, 40, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 40, },
+ { 1, 1, 0, 2, 60, 40, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 40, },
+ { 1, 1, 0, 2, 64, 40, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 40, },
+ { 1, 1, 0, 2, 100, 40, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 40, },
+ { 1, 1, 0, 2, 104, 40, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 40, },
+ { 1, 1, 0, 2, 108, 40, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 40, },
+ { 1, 1, 0, 2, 112, 40, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 40, },
+ { 1, 1, 0, 2, 116, 40, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 40, },
+ { 1, 1, 0, 2, 120, 40, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 40, },
+ { 1, 1, 0, 2, 124, 40, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 40, },
+ { 1, 1, 0, 2, 128, 40, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 40, },
+ { 1, 1, 0, 2, 132, 40, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 40, },
+ { 1, 1, 0, 2, 136, 40, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 40, },
+ { 1, 1, 0, 2, 140, 40, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 40, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 40, },
+ { 1, 1, 0, 3, 36, 40, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 40, },
+ { 1, 1, 0, 3, 40, 40, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 40, },
+ { 1, 1, 0, 3, 44, 40, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 40, },
+ { 1, 1, 0, 3, 48, 40, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 40, },
+ { 1, 1, 0, 3, 52, 40, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 40, },
+ { 1, 1, 0, 3, 56, 40, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 40, },
+ { 1, 1, 0, 3, 60, 40, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 40, },
+ { 1, 1, 0, 3, 64, 40, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 40, },
+ { 1, 1, 0, 3, 100, 40, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 40, },
+ { 1, 1, 0, 3, 104, 40, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 40, },
+ { 1, 1, 0, 3, 108, 40, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 40, },
+ { 1, 1, 0, 3, 112, 40, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 40, },
+ { 1, 1, 0, 3, 116, 40, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 40, },
+ { 1, 1, 0, 3, 120, 40, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 40, },
+ { 1, 1, 0, 3, 124, 40, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 40, },
+ { 1, 1, 0, 3, 128, 40, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 40, },
+ { 1, 1, 0, 3, 132, 40, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 40, },
+ { 1, 1, 0, 3, 136, 40, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 40, },
+ { 1, 1, 0, 3, 140, 40, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 40, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 40, },
+ { 1, 1, 0, 6, 36, 40, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 40, },
+ { 1, 1, 0, 6, 40, 40, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 40, },
+ { 1, 1, 0, 6, 44, 40, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 40, },
+ { 1, 1, 0, 6, 48, 40, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 40, },
+ { 1, 1, 0, 6, 52, 40, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 40, },
+ { 1, 1, 0, 6, 56, 40, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 40, },
+ { 1, 1, 0, 6, 60, 40, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 40, },
+ { 1, 1, 0, 6, 64, 40, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 40, },
+ { 1, 1, 0, 6, 100, 40, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 40, },
+ { 1, 1, 0, 6, 104, 40, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 40, },
+ { 1, 1, 0, 6, 108, 40, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 40, },
+ { 1, 1, 0, 6, 112, 40, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 40, },
+ { 1, 1, 0, 6, 116, 40, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 40, },
+ { 1, 1, 0, 6, 120, 40, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 40, },
+ { 1, 1, 0, 6, 124, 40, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 40, },
+ { 1, 1, 0, 6, 128, 40, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 40, },
+ { 1, 1, 0, 6, 132, 40, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 40, },
+ { 1, 1, 0, 6, 136, 40, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 40, },
+ { 1, 1, 0, 6, 140, 40, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 40, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 40, },
+ { 1, 1, 0, 7, 36, 40, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 40, },
+ { 1, 1, 0, 7, 40, 40, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 40, },
+ { 1, 1, 0, 7, 44, 40, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 40, },
+ { 1, 1, 0, 7, 48, 40, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 40, },
+ { 1, 1, 0, 7, 52, 40, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 40, },
+ { 1, 1, 0, 7, 56, 40, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 40, },
+ { 1, 1, 0, 7, 60, 40, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 40, },
+ { 1, 1, 0, 7, 64, 40, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 40, },
+ { 1, 1, 0, 7, 100, 40, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 40, },
+ { 1, 1, 0, 7, 104, 40, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 40, },
+ { 1, 1, 0, 7, 108, 40, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 40, },
+ { 1, 1, 0, 7, 112, 40, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 40, },
+ { 1, 1, 0, 7, 116, 40, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 40, },
+ { 1, 1, 0, 7, 120, 40, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 40, },
+ { 1, 1, 0, 7, 124, 40, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 40, },
+ { 1, 1, 0, 7, 128, 40, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 40, },
+ { 1, 1, 0, 7, 132, 40, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 40, },
+ { 1, 1, 0, 7, 136, 40, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 40, },
+ { 1, 1, 0, 7, 140, 40, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 40, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 40, },
+ { 1, 1, 1, 2, 38, 40, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 40, },
+ { 1, 1, 1, 2, 46, 40, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 40, },
+ { 1, 1, 1, 2, 54, 40, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 40, },
+ { 1, 1, 1, 2, 62, 40, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 40, },
+ { 1, 1, 1, 2, 102, 40, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 40, },
+ { 1, 1, 1, 2, 110, 40, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 40, },
+ { 1, 1, 1, 2, 118, 40, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 40, },
+ { 1, 1, 1, 2, 126, 40, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 40, },
+ { 1, 1, 1, 2, 134, 40, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 40, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 40, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 40, },
+ { 1, 1, 1, 6, 38, 40, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 40, },
+ { 1, 1, 1, 6, 46, 40, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 40, },
+ { 1, 1, 1, 6, 54, 40, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 40, },
+ { 1, 1, 1, 6, 62, 40, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 40, },
+ { 1, 1, 1, 6, 102, 40, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 40, },
+ { 1, 1, 1, 6, 110, 40, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 40, },
+ { 1, 1, 1, 6, 118, 40, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 40, },
+ { 1, 1, 1, 6, 126, 40, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 40, },
+ { 1, 1, 1, 6, 134, 40, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 40, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 40, },
+ { 1, 1, 1, 7, 38, 40, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 40, },
+ { 1, 1, 1, 7, 46, 40, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 40, },
+ { 1, 1, 1, 7, 54, 40, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 40, },
+ { 1, 1, 1, 7, 62, 40, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 40, },
+ { 1, 1, 1, 7, 102, 40, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 40, },
+ { 1, 1, 1, 7, 110, 40, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 40, },
+ { 1, 1, 1, 7, 118, 40, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 40, },
+ { 1, 1, 1, 7, 126, 40, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 40, },
+ { 1, 1, 1, 7, 134, 40, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 40, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 40, },
+ { 1, 1, 2, 4, 42, 40, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 40, },
+ { 1, 1, 2, 4, 58, 40, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 40, },
+ { 1, 1, 2, 4, 106, 40, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 40, },
+ { 1, 1, 2, 4, 122, 40, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 40, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 40, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 40, },
+ { 1, 1, 2, 8, 42, 40, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 40, },
+ { 1, 1, 2, 8, 58, 40, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 40, },
+ { 1, 1, 2, 8, 106, 40, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 40, },
+ { 1, 1, 2, 8, 122, 40, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 40, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 40, },
+ { 1, 1, 2, 9, 42, 40, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 40, },
+ { 1, 1, 2, 9, 58, 40, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 40, },
+ { 1, 1, 2, 9, 106, 40, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 40, },
+ { 1, 1, 2, 9, 122, 40, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 40, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type3);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 40, },
+ { 1, 0, 0, 0, 1, 40, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 40, },
+ { 1, 0, 0, 0, 2, 40, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 40, },
+ { 1, 0, 0, 0, 3, 40, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 40, },
+ { 1, 0, 0, 0, 4, 40, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 40, },
+ { 1, 0, 0, 0, 5, 40, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 40, },
+ { 1, 0, 0, 0, 6, 40, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 40, },
+ { 1, 0, 0, 0, 7, 40, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 40, },
+ { 1, 0, 0, 0, 8, 40, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 40, },
+ { 1, 0, 0, 0, 9, 40, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 40, },
+ { 1, 0, 0, 0, 10, 40, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 40, },
+ { 1, 0, 0, 0, 11, 40, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 40, },
+ { 1, 0, 0, 0, 12, 40, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 40, },
+ { 1, 0, 0, 0, 13, 40, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 40, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 40, },
+ { 1, 0, 0, 1, 1, 40, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 40, },
+ { 1, 0, 0, 1, 2, 40, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 40, },
+ { 1, 0, 0, 1, 3, 40, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 40, },
+ { 1, 0, 0, 1, 4, 40, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 40, },
+ { 1, 0, 0, 1, 5, 40, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 40, },
+ { 1, 0, 0, 1, 6, 40, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 40, },
+ { 1, 0, 0, 1, 7, 40, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 40, },
+ { 1, 0, 0, 1, 8, 40, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 40, },
+ { 1, 0, 0, 1, 9, 40, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 40, },
+ { 1, 0, 0, 1, 10, 40, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 40, },
+ { 1, 0, 0, 1, 11, 40, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 40, },
+ { 1, 0, 0, 1, 13, 40, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 40, },
+ { 1, 0, 0, 2, 1, 40, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 40, },
+ { 1, 0, 0, 2, 2, 40, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 40, },
+ { 1, 0, 0, 2, 3, 40, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 40, },
+ { 1, 0, 0, 2, 4, 40, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 40, },
+ { 1, 0, 0, 2, 5, 40, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 40, },
+ { 1, 0, 0, 2, 6, 40, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 40, },
+ { 1, 0, 0, 2, 7, 40, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 40, },
+ { 1, 0, 0, 2, 8, 40, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 40, },
+ { 1, 0, 0, 2, 9, 40, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 40, },
+ { 1, 0, 0, 2, 10, 40, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 40, },
+ { 1, 0, 0, 2, 11, 40, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 40, },
+ { 1, 0, 0, 2, 13, 40, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 40, },
+ { 1, 0, 0, 3, 1, 40, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 40, },
+ { 1, 0, 0, 3, 2, 40, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 40, },
+ { 1, 0, 0, 3, 3, 40, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 40, },
+ { 1, 0, 0, 3, 4, 40, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 40, },
+ { 1, 0, 0, 3, 5, 40, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 40, },
+ { 1, 0, 0, 3, 6, 40, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 40, },
+ { 1, 0, 0, 3, 7, 40, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 40, },
+ { 1, 0, 0, 3, 8, 40, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 40, },
+ { 1, 0, 0, 3, 9, 40, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 40, },
+ { 1, 0, 0, 3, 10, 40, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 40, },
+ { 1, 0, 0, 3, 11, 40, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 40, },
+ { 1, 0, 0, 3, 13, 40, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 40, },
+ { 1, 0, 0, 6, 1, 40, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 40, },
+ { 1, 0, 0, 6, 2, 40, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 40, },
+ { 1, 0, 0, 6, 3, 40, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 40, },
+ { 1, 0, 0, 6, 4, 40, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 40, },
+ { 1, 0, 0, 6, 5, 40, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 40, },
+ { 1, 0, 0, 6, 6, 40, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 40, },
+ { 1, 0, 0, 6, 7, 40, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 40, },
+ { 1, 0, 0, 6, 8, 40, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 40, },
+ { 1, 0, 0, 6, 9, 40, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 40, },
+ { 1, 0, 0, 6, 10, 40, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 40, },
+ { 1, 0, 0, 6, 11, 40, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 40, },
+ { 1, 0, 0, 6, 12, 40, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 40, },
+ { 1, 0, 0, 6, 13, 40, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 40, },
+ { 1, 0, 0, 7, 1, 40, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 40, },
+ { 1, 0, 0, 7, 2, 40, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 40, },
+ { 1, 0, 0, 7, 3, 40, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 40, },
+ { 1, 0, 0, 7, 4, 40, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 40, },
+ { 1, 0, 0, 7, 5, 40, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 40, },
+ { 1, 0, 0, 7, 6, 40, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 40, },
+ { 1, 0, 0, 7, 7, 40, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 40, },
+ { 1, 0, 0, 7, 8, 40, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 40, },
+ { 1, 0, 0, 7, 9, 40, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 40, },
+ { 1, 0, 0, 7, 10, 40, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 40, },
+ { 1, 0, 0, 7, 11, 40, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 40, },
+ { 1, 0, 0, 7, 12, 40, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 40, },
+ { 1, 0, 0, 7, 13, 40, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 46, },
+ { 2, 0, 1, 2, 3, 40, },
+ { 1, 0, 1, 2, 3, 40, },
+ { 0, 0, 1, 2, 4, 46, },
+ { 2, 0, 1, 2, 4, 40, },
+ { 1, 0, 1, 2, 4, 40, },
+ { 0, 0, 1, 2, 5, 46, },
+ { 2, 0, 1, 2, 5, 40, },
+ { 1, 0, 1, 2, 5, 40, },
+ { 0, 0, 1, 2, 6, 46, },
+ { 2, 0, 1, 2, 6, 40, },
+ { 1, 0, 1, 2, 6, 40, },
+ { 0, 0, 1, 2, 7, 46, },
+ { 2, 0, 1, 2, 7, 40, },
+ { 1, 0, 1, 2, 7, 40, },
+ { 0, 0, 1, 2, 8, 46, },
+ { 2, 0, 1, 2, 8, 40, },
+ { 1, 0, 1, 2, 8, 40, },
+ { 0, 0, 1, 2, 9, 46, },
+ { 2, 0, 1, 2, 9, 40, },
+ { 1, 0, 1, 2, 9, 40, },
+ { 0, 0, 1, 2, 10, 46, },
+ { 2, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 10, 40, },
+ { 0, 0, 1, 2, 11, 46, },
+ { 2, 0, 1, 2, 11, 40, },
+ { 1, 0, 1, 2, 11, 40, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 40, },
+ { 1, 0, 1, 2, 12, 40, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 40, },
+ { 1, 0, 1, 2, 13, 40, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 46, },
+ { 2, 0, 1, 3, 3, 40, },
+ { 1, 0, 1, 3, 3, 40, },
+ { 0, 0, 1, 3, 4, 46, },
+ { 2, 0, 1, 3, 4, 40, },
+ { 1, 0, 1, 3, 4, 40, },
+ { 0, 0, 1, 3, 5, 46, },
+ { 2, 0, 1, 3, 5, 40, },
+ { 1, 0, 1, 3, 5, 40, },
+ { 0, 0, 1, 3, 6, 46, },
+ { 2, 0, 1, 3, 6, 40, },
+ { 1, 0, 1, 3, 6, 40, },
+ { 0, 0, 1, 3, 7, 46, },
+ { 2, 0, 1, 3, 7, 40, },
+ { 1, 0, 1, 3, 7, 40, },
+ { 0, 0, 1, 3, 8, 46, },
+ { 2, 0, 1, 3, 8, 40, },
+ { 1, 0, 1, 3, 8, 40, },
+ { 0, 0, 1, 3, 9, 46, },
+ { 2, 0, 1, 3, 9, 40, },
+ { 1, 0, 1, 3, 9, 40, },
+ { 0, 0, 1, 3, 10, 46, },
+ { 2, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 10, 40, },
+ { 0, 0, 1, 3, 11, 46, },
+ { 2, 0, 1, 3, 11, 40, },
+ { 1, 0, 1, 3, 11, 40, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 40, },
+ { 1, 0, 1, 3, 12, 40, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 40, },
+ { 1, 0, 1, 3, 13, 40, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 46, },
+ { 2, 0, 1, 6, 3, 40, },
+ { 1, 0, 1, 6, 3, 40, },
+ { 0, 0, 1, 6, 4, 46, },
+ { 2, 0, 1, 6, 4, 40, },
+ { 1, 0, 1, 6, 4, 40, },
+ { 0, 0, 1, 6, 5, 46, },
+ { 2, 0, 1, 6, 5, 40, },
+ { 1, 0, 1, 6, 5, 40, },
+ { 0, 0, 1, 6, 6, 46, },
+ { 2, 0, 1, 6, 6, 40, },
+ { 1, 0, 1, 6, 6, 40, },
+ { 0, 0, 1, 6, 7, 46, },
+ { 2, 0, 1, 6, 7, 40, },
+ { 1, 0, 1, 6, 7, 40, },
+ { 0, 0, 1, 6, 8, 46, },
+ { 2, 0, 1, 6, 8, 40, },
+ { 1, 0, 1, 6, 8, 40, },
+ { 0, 0, 1, 6, 9, 46, },
+ { 2, 0, 1, 6, 9, 40, },
+ { 1, 0, 1, 6, 9, 40, },
+ { 0, 0, 1, 6, 10, 46, },
+ { 2, 0, 1, 6, 10, 40, },
+ { 1, 0, 1, 6, 10, 40, },
+ { 0, 0, 1, 6, 11, 46, },
+ { 2, 0, 1, 6, 11, 40, },
+ { 1, 0, 1, 6, 11, 40, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 40, },
+ { 1, 0, 1, 6, 12, 40, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 40, },
+ { 1, 0, 1, 6, 13, 40, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 46, },
+ { 2, 0, 1, 7, 3, 40, },
+ { 1, 0, 1, 7, 3, 40, },
+ { 0, 0, 1, 7, 4, 46, },
+ { 2, 0, 1, 7, 4, 40, },
+ { 1, 0, 1, 7, 4, 40, },
+ { 0, 0, 1, 7, 5, 46, },
+ { 2, 0, 1, 7, 5, 40, },
+ { 1, 0, 1, 7, 5, 40, },
+ { 0, 0, 1, 7, 6, 46, },
+ { 2, 0, 1, 7, 6, 40, },
+ { 1, 0, 1, 7, 6, 40, },
+ { 0, 0, 1, 7, 7, 46, },
+ { 2, 0, 1, 7, 7, 40, },
+ { 1, 0, 1, 7, 7, 40, },
+ { 0, 0, 1, 7, 8, 46, },
+ { 2, 0, 1, 7, 8, 40, },
+ { 1, 0, 1, 7, 8, 40, },
+ { 0, 0, 1, 7, 9, 46, },
+ { 2, 0, 1, 7, 9, 40, },
+ { 1, 0, 1, 7, 9, 40, },
+ { 0, 0, 1, 7, 10, 46, },
+ { 2, 0, 1, 7, 10, 40, },
+ { 1, 0, 1, 7, 10, 40, },
+ { 0, 0, 1, 7, 11, 46, },
+ { 2, 0, 1, 7, 11, 40, },
+ { 1, 0, 1, 7, 11, 40, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 40, },
+ { 1, 0, 1, 7, 12, 40, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 40, },
+ { 1, 0, 1, 7, 13, 40, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 40, },
+ { 1, 1, 0, 1, 36, 40, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 40, },
+ { 1, 1, 0, 1, 40, 40, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 40, },
+ { 1, 1, 0, 1, 44, 40, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 40, },
+ { 1, 1, 0, 1, 48, 40, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 40, },
+ { 1, 1, 0, 1, 52, 40, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 40, },
+ { 1, 1, 0, 1, 56, 40, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 40, },
+ { 1, 1, 0, 1, 60, 40, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 40, },
+ { 1, 1, 0, 1, 64, 40, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 40, },
+ { 1, 1, 0, 1, 100, 40, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 40, },
+ { 1, 1, 0, 1, 104, 40, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 40, },
+ { 1, 1, 0, 1, 108, 40, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 40, },
+ { 1, 1, 0, 1, 112, 40, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 40, },
+ { 1, 1, 0, 1, 116, 40, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 40, },
+ { 1, 1, 0, 1, 120, 40, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 40, },
+ { 1, 1, 0, 1, 124, 40, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 40, },
+ { 1, 1, 0, 1, 128, 40, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 40, },
+ { 1, 1, 0, 1, 132, 40, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 40, },
+ { 1, 1, 0, 1, 136, 40, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 40, },
+ { 1, 1, 0, 1, 140, 40, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 40, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 40, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 40, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 40, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 40, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 40, },
+ { 1, 1, 0, 2, 36, 40, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 40, },
+ { 1, 1, 0, 2, 40, 40, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 40, },
+ { 1, 1, 0, 2, 44, 40, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 40, },
+ { 1, 1, 0, 2, 48, 40, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 40, },
+ { 1, 1, 0, 2, 52, 40, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 40, },
+ { 1, 1, 0, 2, 56, 40, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 40, },
+ { 1, 1, 0, 2, 60, 40, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 40, },
+ { 1, 1, 0, 2, 64, 40, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 40, },
+ { 1, 1, 0, 2, 100, 40, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 40, },
+ { 1, 1, 0, 2, 104, 40, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 40, },
+ { 1, 1, 0, 2, 108, 40, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 40, },
+ { 1, 1, 0, 2, 112, 40, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 40, },
+ { 1, 1, 0, 2, 116, 40, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 40, },
+ { 1, 1, 0, 2, 120, 40, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 40, },
+ { 1, 1, 0, 2, 124, 40, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 40, },
+ { 1, 1, 0, 2, 128, 40, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 40, },
+ { 1, 1, 0, 2, 132, 40, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 40, },
+ { 1, 1, 0, 2, 136, 40, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 40, },
+ { 1, 1, 0, 2, 140, 40, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 40, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 40, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 40, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 40, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 40, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 40, },
+ { 1, 1, 0, 3, 36, 40, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 40, },
+ { 1, 1, 0, 3, 40, 40, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 40, },
+ { 1, 1, 0, 3, 44, 40, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 40, },
+ { 1, 1, 0, 3, 48, 40, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 40, },
+ { 1, 1, 0, 3, 52, 40, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 40, },
+ { 1, 1, 0, 3, 56, 40, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 40, },
+ { 1, 1, 0, 3, 60, 40, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 40, },
+ { 1, 1, 0, 3, 64, 40, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 40, },
+ { 1, 1, 0, 3, 100, 40, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 40, },
+ { 1, 1, 0, 3, 104, 40, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 40, },
+ { 1, 1, 0, 3, 108, 40, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 40, },
+ { 1, 1, 0, 3, 112, 40, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 40, },
+ { 1, 1, 0, 3, 116, 40, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 40, },
+ { 1, 1, 0, 3, 120, 40, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 40, },
+ { 1, 1, 0, 3, 124, 40, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 40, },
+ { 1, 1, 0, 3, 128, 40, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 40, },
+ { 1, 1, 0, 3, 132, 40, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 40, },
+ { 1, 1, 0, 3, 136, 40, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 40, },
+ { 1, 1, 0, 3, 140, 40, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 40, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 40, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 40, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 40, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 40, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 40, },
+ { 1, 1, 0, 6, 36, 40, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 40, },
+ { 1, 1, 0, 6, 40, 40, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 40, },
+ { 1, 1, 0, 6, 44, 40, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 40, },
+ { 1, 1, 0, 6, 48, 40, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 40, },
+ { 1, 1, 0, 6, 52, 40, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 40, },
+ { 1, 1, 0, 6, 56, 40, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 40, },
+ { 1, 1, 0, 6, 60, 40, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 40, },
+ { 1, 1, 0, 6, 64, 40, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 40, },
+ { 1, 1, 0, 6, 100, 40, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 40, },
+ { 1, 1, 0, 6, 104, 40, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 40, },
+ { 1, 1, 0, 6, 108, 40, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 40, },
+ { 1, 1, 0, 6, 112, 40, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 40, },
+ { 1, 1, 0, 6, 116, 40, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 40, },
+ { 1, 1, 0, 6, 120, 40, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 40, },
+ { 1, 1, 0, 6, 124, 40, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 40, },
+ { 1, 1, 0, 6, 128, 40, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 40, },
+ { 1, 1, 0, 6, 132, 40, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 40, },
+ { 1, 1, 0, 6, 136, 40, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 40, },
+ { 1, 1, 0, 6, 140, 40, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 40, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 40, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 40, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 40, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 40, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 40, },
+ { 1, 1, 0, 7, 36, 40, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 40, },
+ { 1, 1, 0, 7, 40, 40, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 40, },
+ { 1, 1, 0, 7, 44, 40, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 40, },
+ { 1, 1, 0, 7, 48, 40, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 40, },
+ { 1, 1, 0, 7, 52, 40, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 40, },
+ { 1, 1, 0, 7, 56, 40, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 40, },
+ { 1, 1, 0, 7, 60, 40, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 40, },
+ { 1, 1, 0, 7, 64, 40, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 40, },
+ { 1, 1, 0, 7, 100, 40, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 40, },
+ { 1, 1, 0, 7, 104, 40, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 40, },
+ { 1, 1, 0, 7, 108, 40, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 40, },
+ { 1, 1, 0, 7, 112, 40, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 40, },
+ { 1, 1, 0, 7, 116, 40, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 40, },
+ { 1, 1, 0, 7, 120, 40, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 40, },
+ { 1, 1, 0, 7, 124, 40, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 40, },
+ { 1, 1, 0, 7, 128, 40, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 40, },
+ { 1, 1, 0, 7, 132, 40, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 40, },
+ { 1, 1, 0, 7, 136, 40, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 40, },
+ { 1, 1, 0, 7, 140, 40, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 40, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 40, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 40, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 40, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 40, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 40, },
+ { 1, 1, 1, 2, 38, 40, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 40, },
+ { 1, 1, 1, 2, 46, 40, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 40, },
+ { 1, 1, 1, 2, 54, 40, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 40, },
+ { 1, 1, 1, 2, 62, 40, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 40, },
+ { 1, 1, 1, 2, 102, 40, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 40, },
+ { 1, 1, 1, 2, 110, 40, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 40, },
+ { 1, 1, 1, 2, 118, 40, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 40, },
+ { 1, 1, 1, 2, 126, 40, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 40, },
+ { 1, 1, 1, 2, 134, 40, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 40, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 40, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 1, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 1, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 1, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 1, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 1, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 1, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 1, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 1, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 1, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 40, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 40, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 40, },
+ { 1, 1, 1, 6, 38, 40, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 40, },
+ { 1, 1, 1, 6, 46, 40, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 40, },
+ { 1, 1, 1, 6, 54, 40, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 40, },
+ { 1, 1, 1, 6, 62, 40, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 40, },
+ { 1, 1, 1, 6, 102, 40, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 40, },
+ { 1, 1, 1, 6, 110, 40, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 40, },
+ { 1, 1, 1, 6, 118, 40, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 40, },
+ { 1, 1, 1, 6, 126, 40, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 40, },
+ { 1, 1, 1, 6, 134, 40, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 40, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 40, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 40, },
+ { 1, 1, 1, 7, 38, 40, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 40, },
+ { 1, 1, 1, 7, 46, 40, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 40, },
+ { 1, 1, 1, 7, 54, 40, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 40, },
+ { 1, 1, 1, 7, 62, 40, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 40, },
+ { 1, 1, 1, 7, 102, 40, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 40, },
+ { 1, 1, 1, 7, 110, 40, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 40, },
+ { 1, 1, 1, 7, 118, 40, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 40, },
+ { 1, 1, 1, 7, 126, 40, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 40, },
+ { 1, 1, 1, 7, 134, 40, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 40, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 40, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 40, },
+ { 1, 1, 2, 4, 42, 40, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 40, },
+ { 1, 1, 2, 4, 58, 40, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 40, },
+ { 1, 1, 2, 4, 106, 40, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 40, },
+ { 1, 1, 2, 4, 122, 40, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 40, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 1, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 1, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 1, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 1, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 40, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 40, },
+ { 1, 1, 2, 8, 42, 40, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 40, },
+ { 1, 1, 2, 8, 58, 40, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 40, },
+ { 1, 1, 2, 8, 106, 40, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 40, },
+ { 1, 1, 2, 8, 122, 40, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 40, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 40, },
+ { 1, 1, 2, 9, 42, 40, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 40, },
+ { 1, 1, 2, 9, 58, 40, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 40, },
+ { 1, 1, 2, 9, 106, 40, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 40, },
+ { 1, 1, 2, 9, 122, 40, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 40, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type5);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type7[] = {
+ { 0, 0, 0, 0, 1, 44, },
+ { 2, 0, 0, 0, 1, 32, },
+ { 1, 0, 0, 0, 1, 32, },
+ { 0, 0, 0, 0, 2, 52, },
+ { 2, 0, 0, 0, 2, 32, },
+ { 1, 0, 0, 0, 2, 32, },
+ { 0, 0, 0, 0, 3, 52, },
+ { 2, 0, 0, 0, 3, 32, },
+ { 1, 0, 0, 0, 3, 32, },
+ { 0, 0, 0, 0, 4, 52, },
+ { 2, 0, 0, 0, 4, 32, },
+ { 1, 0, 0, 0, 4, 32, },
+ { 0, 0, 0, 0, 5, 52, },
+ { 2, 0, 0, 0, 5, 32, },
+ { 1, 0, 0, 0, 5, 32, },
+ { 0, 0, 0, 0, 6, 52, },
+ { 2, 0, 0, 0, 6, 32, },
+ { 1, 0, 0, 0, 6, 32, },
+ { 0, 0, 0, 0, 7, 52, },
+ { 2, 0, 0, 0, 7, 32, },
+ { 1, 0, 0, 0, 7, 32, },
+ { 0, 0, 0, 0, 8, 52, },
+ { 2, 0, 0, 0, 8, 32, },
+ { 1, 0, 0, 0, 8, 32, },
+ { 0, 0, 0, 0, 9, 52, },
+ { 2, 0, 0, 0, 9, 32, },
+ { 1, 0, 0, 0, 9, 32, },
+ { 0, 0, 0, 0, 10, 52, },
+ { 2, 0, 0, 0, 10, 32, },
+ { 1, 0, 0, 0, 10, 32, },
+ { 0, 0, 0, 0, 11, 44, },
+ { 2, 0, 0, 0, 11, 32, },
+ { 1, 0, 0, 0, 11, 32, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 32, },
+ { 1, 0, 0, 0, 12, 32, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 32, },
+ { 1, 0, 0, 0, 13, 32, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 38, },
+ { 2, 0, 0, 1, 1, 32, },
+ { 1, 0, 0, 1, 1, 32, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 32, },
+ { 1, 0, 0, 1, 2, 32, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 32, },
+ { 1, 0, 0, 1, 3, 32, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 32, },
+ { 1, 0, 0, 1, 4, 32, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 32, },
+ { 1, 0, 0, 1, 5, 32, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 32, },
+ { 1, 0, 0, 1, 6, 32, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 32, },
+ { 1, 0, 0, 1, 7, 32, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 32, },
+ { 1, 0, 0, 1, 8, 32, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 32, },
+ { 1, 0, 0, 1, 9, 32, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 32, },
+ { 1, 0, 0, 1, 10, 32, },
+ { 0, 0, 0, 1, 11, 38, },
+ { 2, 0, 0, 1, 11, 32, },
+ { 1, 0, 0, 1, 11, 32, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 32, },
+ { 1, 0, 0, 1, 12, 32, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 32, },
+ { 1, 0, 0, 1, 13, 32, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 34, },
+ { 2, 0, 0, 2, 1, 32, },
+ { 1, 0, 0, 2, 1, 32, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 32, },
+ { 1, 0, 0, 2, 2, 32, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 32, },
+ { 1, 0, 0, 2, 3, 32, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 32, },
+ { 1, 0, 0, 2, 4, 32, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 32, },
+ { 1, 0, 0, 2, 5, 32, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 32, },
+ { 1, 0, 0, 2, 6, 32, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 32, },
+ { 1, 0, 0, 2, 7, 32, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 32, },
+ { 1, 0, 0, 2, 8, 32, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 32, },
+ { 1, 0, 0, 2, 9, 32, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 32, },
+ { 1, 0, 0, 2, 10, 32, },
+ { 0, 0, 0, 2, 11, 34, },
+ { 2, 0, 0, 2, 11, 32, },
+ { 1, 0, 0, 2, 11, 32, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 32, },
+ { 1, 0, 0, 2, 12, 32, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 32, },
+ { 1, 0, 0, 2, 13, 32, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 32, },
+ { 2, 0, 0, 3, 1, 30, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 44, },
+ { 2, 0, 0, 3, 2, 30, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 44, },
+ { 2, 0, 0, 3, 3, 30, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 44, },
+ { 2, 0, 0, 3, 4, 30, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 44, },
+ { 2, 0, 0, 3, 5, 30, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 44, },
+ { 2, 0, 0, 3, 6, 30, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 44, },
+ { 2, 0, 0, 3, 7, 30, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 44, },
+ { 2, 0, 0, 3, 8, 30, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 44, },
+ { 2, 0, 0, 3, 9, 30, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 44, },
+ { 2, 0, 0, 3, 10, 30, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 32, },
+ { 2, 0, 0, 3, 11, 30, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 30, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 30, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 0, 6, 1, 30, },
+ { 2, 0, 0, 6, 1, 28, },
+ { 1, 0, 0, 6, 1, 28, },
+ { 0, 0, 0, 6, 2, 42, },
+ { 2, 0, 0, 6, 2, 28, },
+ { 1, 0, 0, 6, 2, 28, },
+ { 0, 0, 0, 6, 3, 42, },
+ { 2, 0, 0, 6, 3, 28, },
+ { 1, 0, 0, 6, 3, 28, },
+ { 0, 0, 0, 6, 4, 42, },
+ { 2, 0, 0, 6, 4, 28, },
+ { 1, 0, 0, 6, 4, 28, },
+ { 0, 0, 0, 6, 5, 42, },
+ { 2, 0, 0, 6, 5, 28, },
+ { 1, 0, 0, 6, 5, 28, },
+ { 0, 0, 0, 6, 6, 42, },
+ { 2, 0, 0, 6, 6, 28, },
+ { 1, 0, 0, 6, 6, 28, },
+ { 0, 0, 0, 6, 7, 42, },
+ { 2, 0, 0, 6, 7, 28, },
+ { 1, 0, 0, 6, 7, 28, },
+ { 0, 0, 0, 6, 8, 42, },
+ { 2, 0, 0, 6, 8, 28, },
+ { 1, 0, 0, 6, 8, 28, },
+ { 0, 0, 0, 6, 9, 42, },
+ { 2, 0, 0, 6, 9, 28, },
+ { 1, 0, 0, 6, 9, 28, },
+ { 0, 0, 0, 6, 10, 42, },
+ { 2, 0, 0, 6, 10, 28, },
+ { 1, 0, 0, 6, 10, 28, },
+ { 0, 0, 0, 6, 11, 30, },
+ { 2, 0, 0, 6, 11, 28, },
+ { 1, 0, 0, 6, 11, 28, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 28, },
+ { 1, 0, 0, 6, 12, 28, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 28, },
+ { 1, 0, 0, 6, 13, 28, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 63, },
+ { 0, 0, 0, 7, 1, 28, },
+ { 2, 0, 0, 7, 1, 26, },
+ { 1, 0, 0, 7, 1, 26, },
+ { 0, 0, 0, 7, 2, 40, },
+ { 2, 0, 0, 7, 2, 26, },
+ { 1, 0, 0, 7, 2, 26, },
+ { 0, 0, 0, 7, 3, 40, },
+ { 2, 0, 0, 7, 3, 26, },
+ { 1, 0, 0, 7, 3, 26, },
+ { 0, 0, 0, 7, 4, 40, },
+ { 2, 0, 0, 7, 4, 26, },
+ { 1, 0, 0, 7, 4, 26, },
+ { 0, 0, 0, 7, 5, 40, },
+ { 2, 0, 0, 7, 5, 26, },
+ { 1, 0, 0, 7, 5, 26, },
+ { 0, 0, 0, 7, 6, 40, },
+ { 2, 0, 0, 7, 6, 26, },
+ { 1, 0, 0, 7, 6, 26, },
+ { 0, 0, 0, 7, 7, 40, },
+ { 2, 0, 0, 7, 7, 26, },
+ { 1, 0, 0, 7, 7, 26, },
+ { 0, 0, 0, 7, 8, 40, },
+ { 2, 0, 0, 7, 8, 26, },
+ { 1, 0, 0, 7, 8, 26, },
+ { 0, 0, 0, 7, 9, 40, },
+ { 2, 0, 0, 7, 9, 26, },
+ { 1, 0, 0, 7, 9, 26, },
+ { 0, 0, 0, 7, 10, 40, },
+ { 2, 0, 0, 7, 10, 26, },
+ { 1, 0, 0, 7, 10, 26, },
+ { 0, 0, 0, 7, 11, 28, },
+ { 2, 0, 0, 7, 11, 26, },
+ { 1, 0, 0, 7, 11, 26, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 26, },
+ { 1, 0, 0, 7, 12, 26, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 26, },
+ { 1, 0, 0, 7, 13, 26, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 36, },
+ { 2, 0, 1, 2, 3, 32, },
+ { 1, 0, 1, 2, 3, 32, },
+ { 0, 0, 1, 2, 4, 40, },
+ { 2, 0, 1, 2, 4, 32, },
+ { 1, 0, 1, 2, 4, 32, },
+ { 0, 0, 1, 2, 5, 40, },
+ { 2, 0, 1, 2, 5, 32, },
+ { 1, 0, 1, 2, 5, 32, },
+ { 0, 0, 1, 2, 6, 40, },
+ { 2, 0, 1, 2, 6, 32, },
+ { 1, 0, 1, 2, 6, 32, },
+ { 0, 0, 1, 2, 7, 40, },
+ { 2, 0, 1, 2, 7, 32, },
+ { 1, 0, 1, 2, 7, 32, },
+ { 0, 0, 1, 2, 8, 40, },
+ { 2, 0, 1, 2, 8, 32, },
+ { 1, 0, 1, 2, 8, 32, },
+ { 0, 0, 1, 2, 9, 40, },
+ { 2, 0, 1, 2, 9, 32, },
+ { 1, 0, 1, 2, 9, 32, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 32, },
+ { 1, 0, 1, 2, 10, 32, },
+ { 0, 0, 1, 2, 11, 34, },
+ { 2, 0, 1, 2, 11, 32, },
+ { 1, 0, 1, 2, 11, 32, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 32, },
+ { 1, 0, 1, 2, 12, 32, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 32, },
+ { 1, 0, 1, 2, 13, 32, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 34, },
+ { 2, 0, 1, 3, 3, 30, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 38, },
+ { 2, 0, 1, 3, 4, 30, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 38, },
+ { 2, 0, 1, 3, 5, 30, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 38, },
+ { 2, 0, 1, 3, 6, 30, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 38, },
+ { 2, 0, 1, 3, 7, 30, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 38, },
+ { 2, 0, 1, 3, 8, 30, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 38, },
+ { 2, 0, 1, 3, 9, 30, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 38, },
+ { 2, 0, 1, 3, 10, 30, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 32, },
+ { 2, 0, 1, 3, 11, 30, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 30, },
+ { 1, 0, 1, 3, 12, 30, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 30, },
+ { 1, 0, 1, 3, 13, 30, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 32, },
+ { 2, 0, 1, 6, 3, 28, },
+ { 1, 0, 1, 6, 3, 28, },
+ { 0, 0, 1, 6, 4, 36, },
+ { 2, 0, 1, 6, 4, 28, },
+ { 1, 0, 1, 6, 4, 28, },
+ { 0, 0, 1, 6, 5, 36, },
+ { 2, 0, 1, 6, 5, 28, },
+ { 1, 0, 1, 6, 5, 28, },
+ { 0, 0, 1, 6, 6, 36, },
+ { 2, 0, 1, 6, 6, 28, },
+ { 1, 0, 1, 6, 6, 28, },
+ { 0, 0, 1, 6, 7, 36, },
+ { 2, 0, 1, 6, 7, 28, },
+ { 1, 0, 1, 6, 7, 28, },
+ { 0, 0, 1, 6, 8, 36, },
+ { 2, 0, 1, 6, 8, 28, },
+ { 1, 0, 1, 6, 8, 28, },
+ { 0, 0, 1, 6, 9, 36, },
+ { 2, 0, 1, 6, 9, 28, },
+ { 1, 0, 1, 6, 9, 28, },
+ { 0, 0, 1, 6, 10, 36, },
+ { 2, 0, 1, 6, 10, 28, },
+ { 1, 0, 1, 6, 10, 28, },
+ { 0, 0, 1, 6, 11, 30, },
+ { 2, 0, 1, 6, 11, 28, },
+ { 1, 0, 1, 6, 11, 28, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 28, },
+ { 1, 0, 1, 6, 12, 28, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 28, },
+ { 1, 0, 1, 6, 13, 28, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 32, },
+ { 2, 0, 1, 7, 3, 26, },
+ { 1, 0, 1, 7, 3, 26, },
+ { 0, 0, 1, 7, 4, 36, },
+ { 2, 0, 1, 7, 4, 26, },
+ { 1, 0, 1, 7, 4, 26, },
+ { 0, 0, 1, 7, 5, 36, },
+ { 2, 0, 1, 7, 5, 26, },
+ { 1, 0, 1, 7, 5, 26, },
+ { 0, 0, 1, 7, 6, 36, },
+ { 2, 0, 1, 7, 6, 26, },
+ { 1, 0, 1, 7, 6, 26, },
+ { 0, 0, 1, 7, 7, 36, },
+ { 2, 0, 1, 7, 7, 26, },
+ { 1, 0, 1, 7, 7, 26, },
+ { 0, 0, 1, 7, 8, 36, },
+ { 2, 0, 1, 7, 8, 26, },
+ { 1, 0, 1, 7, 8, 26, },
+ { 0, 0, 1, 7, 9, 36, },
+ { 2, 0, 1, 7, 9, 26, },
+ { 1, 0, 1, 7, 9, 26, },
+ { 0, 0, 1, 7, 10, 36, },
+ { 2, 0, 1, 7, 10, 26, },
+ { 1, 0, 1, 7, 10, 26, },
+ { 0, 0, 1, 7, 11, 30, },
+ { 2, 0, 1, 7, 11, 26, },
+ { 1, 0, 1, 7, 11, 26, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 26, },
+ { 1, 0, 1, 7, 12, 26, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 26, },
+ { 1, 0, 1, 7, 13, 26, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 38, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 32, },
+ { 0, 1, 0, 1, 40, 38, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 32, },
+ { 0, 1, 0, 1, 44, 38, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 32, },
+ { 0, 1, 0, 1, 48, 38, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 32, },
+ { 0, 1, 0, 1, 52, 38, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 32, },
+ { 0, 1, 0, 1, 56, 38, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 32, },
+ { 0, 1, 0, 1, 60, 38, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 32, },
+ { 0, 1, 0, 1, 64, 38, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 32, },
+ { 0, 1, 0, 1, 100, 36, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 36, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 36, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 36, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 36, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 36, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 36, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 36, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 36, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 36, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 36, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 149, 36, },
+ { 2, 1, 0, 1, 149, 32, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 36, },
+ { 2, 1, 0, 1, 153, 32, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 36, },
+ { 2, 1, 0, 1, 157, 32, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 36, },
+ { 2, 1, 0, 1, 161, 32, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 36, },
+ { 2, 1, 0, 1, 165, 32, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 36, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 32, },
+ { 0, 1, 0, 2, 40, 36, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 32, },
+ { 0, 1, 0, 2, 44, 36, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 32, },
+ { 0, 1, 0, 2, 48, 36, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 32, },
+ { 0, 1, 0, 2, 52, 36, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 32, },
+ { 0, 1, 0, 2, 56, 36, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 32, },
+ { 0, 1, 0, 2, 60, 36, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 32, },
+ { 0, 1, 0, 2, 64, 36, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 32, },
+ { 0, 1, 0, 2, 100, 36, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 36, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 36, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 36, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 36, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 36, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 36, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 36, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 36, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 36, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 34, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 32, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 38, },
+ { 2, 1, 0, 2, 153, 32, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 38, },
+ { 2, 1, 0, 2, 157, 32, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 38, },
+ { 2, 1, 0, 2, 161, 32, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 38, },
+ { 2, 1, 0, 2, 165, 32, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 34, },
+ { 2, 1, 0, 3, 36, 30, },
+ { 1, 1, 0, 3, 36, 30, },
+ { 0, 1, 0, 3, 40, 34, },
+ { 2, 1, 0, 3, 40, 30, },
+ { 1, 1, 0, 3, 40, 30, },
+ { 0, 1, 0, 3, 44, 34, },
+ { 2, 1, 0, 3, 44, 30, },
+ { 1, 1, 0, 3, 44, 30, },
+ { 0, 1, 0, 3, 48, 34, },
+ { 2, 1, 0, 3, 48, 30, },
+ { 1, 1, 0, 3, 48, 30, },
+ { 0, 1, 0, 3, 52, 34, },
+ { 2, 1, 0, 3, 52, 30, },
+ { 1, 1, 0, 3, 52, 30, },
+ { 0, 1, 0, 3, 56, 34, },
+ { 2, 1, 0, 3, 56, 30, },
+ { 1, 1, 0, 3, 56, 30, },
+ { 0, 1, 0, 3, 60, 34, },
+ { 2, 1, 0, 3, 60, 30, },
+ { 1, 1, 0, 3, 60, 30, },
+ { 0, 1, 0, 3, 64, 34, },
+ { 2, 1, 0, 3, 64, 30, },
+ { 1, 1, 0, 3, 64, 30, },
+ { 0, 1, 0, 3, 100, 34, },
+ { 2, 1, 0, 3, 100, 30, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 34, },
+ { 2, 1, 0, 3, 104, 30, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 34, },
+ { 2, 1, 0, 3, 108, 30, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 34, },
+ { 2, 1, 0, 3, 112, 30, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 34, },
+ { 2, 1, 0, 3, 116, 30, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 34, },
+ { 2, 1, 0, 3, 120, 30, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 34, },
+ { 2, 1, 0, 3, 124, 30, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 34, },
+ { 2, 1, 0, 3, 128, 30, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 34, },
+ { 2, 1, 0, 3, 132, 30, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 34, },
+ { 2, 1, 0, 3, 136, 30, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 32, },
+ { 2, 1, 0, 3, 140, 30, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 149, 30, },
+ { 2, 1, 0, 3, 149, 30, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 36, },
+ { 2, 1, 0, 3, 153, 30, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 36, },
+ { 2, 1, 0, 3, 157, 30, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 36, },
+ { 2, 1, 0, 3, 161, 30, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 36, },
+ { 2, 1, 0, 3, 165, 30, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 32, },
+ { 2, 1, 0, 6, 36, 28, },
+ { 1, 1, 0, 6, 36, 28, },
+ { 0, 1, 0, 6, 40, 32, },
+ { 2, 1, 0, 6, 40, 28, },
+ { 1, 1, 0, 6, 40, 28, },
+ { 0, 1, 0, 6, 44, 32, },
+ { 2, 1, 0, 6, 44, 28, },
+ { 1, 1, 0, 6, 44, 28, },
+ { 0, 1, 0, 6, 48, 32, },
+ { 2, 1, 0, 6, 48, 28, },
+ { 1, 1, 0, 6, 48, 28, },
+ { 0, 1, 0, 6, 52, 32, },
+ { 2, 1, 0, 6, 52, 28, },
+ { 1, 1, 0, 6, 52, 28, },
+ { 0, 1, 0, 6, 56, 32, },
+ { 2, 1, 0, 6, 56, 28, },
+ { 1, 1, 0, 6, 56, 28, },
+ { 0, 1, 0, 6, 60, 32, },
+ { 2, 1, 0, 6, 60, 28, },
+ { 1, 1, 0, 6, 60, 28, },
+ { 0, 1, 0, 6, 64, 32, },
+ { 2, 1, 0, 6, 64, 28, },
+ { 1, 1, 0, 6, 64, 28, },
+ { 0, 1, 0, 6, 100, 32, },
+ { 2, 1, 0, 6, 100, 28, },
+ { 1, 1, 0, 6, 100, 28, },
+ { 0, 1, 0, 6, 104, 32, },
+ { 2, 1, 0, 6, 104, 28, },
+ { 1, 1, 0, 6, 104, 28, },
+ { 0, 1, 0, 6, 108, 32, },
+ { 2, 1, 0, 6, 108, 28, },
+ { 1, 1, 0, 6, 108, 28, },
+ { 0, 1, 0, 6, 112, 32, },
+ { 2, 1, 0, 6, 112, 28, },
+ { 1, 1, 0, 6, 112, 28, },
+ { 0, 1, 0, 6, 116, 32, },
+ { 2, 1, 0, 6, 116, 28, },
+ { 1, 1, 0, 6, 116, 28, },
+ { 0, 1, 0, 6, 120, 32, },
+ { 2, 1, 0, 6, 120, 28, },
+ { 1, 1, 0, 6, 120, 28, },
+ { 0, 1, 0, 6, 124, 32, },
+ { 2, 1, 0, 6, 124, 28, },
+ { 1, 1, 0, 6, 124, 28, },
+ { 0, 1, 0, 6, 128, 32, },
+ { 2, 1, 0, 6, 128, 28, },
+ { 1, 1, 0, 6, 128, 28, },
+ { 0, 1, 0, 6, 132, 32, },
+ { 2, 1, 0, 6, 132, 28, },
+ { 1, 1, 0, 6, 132, 28, },
+ { 0, 1, 0, 6, 136, 32, },
+ { 2, 1, 0, 6, 136, 28, },
+ { 1, 1, 0, 6, 136, 28, },
+ { 0, 1, 0, 6, 140, 30, },
+ { 2, 1, 0, 6, 140, 28, },
+ { 1, 1, 0, 6, 140, 28, },
+ { 0, 1, 0, 6, 149, 28, },
+ { 2, 1, 0, 6, 149, 28, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 34, },
+ { 2, 1, 0, 6, 153, 28, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 34, },
+ { 2, 1, 0, 6, 157, 28, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 34, },
+ { 2, 1, 0, 6, 161, 28, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 34, },
+ { 2, 1, 0, 6, 165, 28, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 30, },
+ { 2, 1, 0, 7, 36, 26, },
+ { 1, 1, 0, 7, 36, 26, },
+ { 0, 1, 0, 7, 40, 30, },
+ { 2, 1, 0, 7, 40, 26, },
+ { 1, 1, 0, 7, 40, 26, },
+ { 0, 1, 0, 7, 44, 30, },
+ { 2, 1, 0, 7, 44, 26, },
+ { 1, 1, 0, 7, 44, 26, },
+ { 0, 1, 0, 7, 48, 30, },
+ { 2, 1, 0, 7, 48, 26, },
+ { 1, 1, 0, 7, 48, 26, },
+ { 0, 1, 0, 7, 52, 30, },
+ { 2, 1, 0, 7, 52, 26, },
+ { 1, 1, 0, 7, 52, 26, },
+ { 0, 1, 0, 7, 56, 30, },
+ { 2, 1, 0, 7, 56, 26, },
+ { 1, 1, 0, 7, 56, 26, },
+ { 0, 1, 0, 7, 60, 30, },
+ { 2, 1, 0, 7, 60, 26, },
+ { 1, 1, 0, 7, 60, 26, },
+ { 0, 1, 0, 7, 64, 30, },
+ { 2, 1, 0, 7, 64, 26, },
+ { 1, 1, 0, 7, 64, 26, },
+ { 0, 1, 0, 7, 100, 30, },
+ { 2, 1, 0, 7, 100, 26, },
+ { 1, 1, 0, 7, 100, 26, },
+ { 0, 1, 0, 7, 104, 30, },
+ { 2, 1, 0, 7, 104, 26, },
+ { 1, 1, 0, 7, 104, 26, },
+ { 0, 1, 0, 7, 108, 30, },
+ { 2, 1, 0, 7, 108, 26, },
+ { 1, 1, 0, 7, 108, 26, },
+ { 0, 1, 0, 7, 112, 30, },
+ { 2, 1, 0, 7, 112, 26, },
+ { 1, 1, 0, 7, 112, 26, },
+ { 0, 1, 0, 7, 116, 30, },
+ { 2, 1, 0, 7, 116, 26, },
+ { 1, 1, 0, 7, 116, 26, },
+ { 0, 1, 0, 7, 120, 30, },
+ { 2, 1, 0, 7, 120, 26, },
+ { 1, 1, 0, 7, 120, 26, },
+ { 0, 1, 0, 7, 124, 30, },
+ { 2, 1, 0, 7, 124, 26, },
+ { 1, 1, 0, 7, 124, 26, },
+ { 0, 1, 0, 7, 128, 30, },
+ { 2, 1, 0, 7, 128, 26, },
+ { 1, 1, 0, 7, 128, 26, },
+ { 0, 1, 0, 7, 132, 30, },
+ { 2, 1, 0, 7, 132, 26, },
+ { 1, 1, 0, 7, 132, 26, },
+ { 0, 1, 0, 7, 136, 30, },
+ { 2, 1, 0, 7, 136, 26, },
+ { 1, 1, 0, 7, 136, 26, },
+ { 0, 1, 0, 7, 140, 28, },
+ { 2, 1, 0, 7, 140, 26, },
+ { 1, 1, 0, 7, 140, 26, },
+ { 0, 1, 0, 7, 149, 26, },
+ { 2, 1, 0, 7, 149, 26, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 32, },
+ { 2, 1, 0, 7, 153, 26, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 32, },
+ { 2, 1, 0, 7, 157, 26, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 32, },
+ { 2, 1, 0, 7, 161, 26, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 32, },
+ { 2, 1, 0, 7, 165, 26, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 32, },
+ { 2, 1, 1, 2, 38, 32, },
+ { 1, 1, 1, 2, 38, 32, },
+ { 0, 1, 1, 2, 46, 32, },
+ { 2, 1, 1, 2, 46, 32, },
+ { 1, 1, 1, 2, 46, 32, },
+ { 0, 1, 1, 2, 54, 32, },
+ { 2, 1, 1, 2, 54, 32, },
+ { 1, 1, 1, 2, 54, 32, },
+ { 0, 1, 1, 2, 62, 30, },
+ { 2, 1, 1, 2, 62, 32, },
+ { 1, 1, 1, 2, 62, 32, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 32, },
+ { 1, 1, 1, 2, 102, 32, },
+ { 0, 1, 1, 2, 110, 38, },
+ { 2, 1, 1, 2, 110, 32, },
+ { 1, 1, 1, 2, 110, 32, },
+ { 0, 1, 1, 2, 118, 38, },
+ { 2, 1, 1, 2, 118, 32, },
+ { 1, 1, 1, 2, 118, 32, },
+ { 0, 1, 1, 2, 126, 38, },
+ { 2, 1, 1, 2, 126, 32, },
+ { 1, 1, 1, 2, 126, 32, },
+ { 0, 1, 1, 2, 134, 38, },
+ { 2, 1, 1, 2, 134, 32, },
+ { 1, 1, 1, 2, 134, 32, },
+ { 0, 1, 1, 2, 151, 32, },
+ { 2, 1, 1, 2, 151, 32, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 32, },
+ { 2, 1, 1, 2, 159, 32, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 30, },
+ { 2, 1, 1, 3, 38, 30, },
+ { 1, 1, 1, 3, 38, 30, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 30, },
+ { 1, 1, 1, 3, 46, 30, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 30, },
+ { 1, 1, 1, 3, 54, 30, },
+ { 0, 1, 1, 3, 62, 28, },
+ { 2, 1, 1, 3, 62, 30, },
+ { 1, 1, 1, 3, 62, 30, },
+ { 0, 1, 1, 3, 102, 28, },
+ { 2, 1, 1, 3, 102, 30, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 36, },
+ { 2, 1, 1, 3, 110, 30, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 36, },
+ { 2, 1, 1, 3, 118, 30, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 36, },
+ { 2, 1, 1, 3, 126, 30, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 30, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 30, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 30, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 28, },
+ { 2, 1, 1, 6, 38, 28, },
+ { 1, 1, 1, 6, 38, 28, },
+ { 0, 1, 1, 6, 46, 28, },
+ { 2, 1, 1, 6, 46, 28, },
+ { 1, 1, 1, 6, 46, 28, },
+ { 0, 1, 1, 6, 54, 28, },
+ { 2, 1, 1, 6, 54, 28, },
+ { 1, 1, 1, 6, 54, 28, },
+ { 0, 1, 1, 6, 62, 26, },
+ { 2, 1, 1, 6, 62, 28, },
+ { 1, 1, 1, 6, 62, 28, },
+ { 0, 1, 1, 6, 102, 26, },
+ { 2, 1, 1, 6, 102, 28, },
+ { 1, 1, 1, 6, 102, 28, },
+ { 0, 1, 1, 6, 110, 34, },
+ { 2, 1, 1, 6, 110, 28, },
+ { 1, 1, 1, 6, 110, 28, },
+ { 0, 1, 1, 6, 118, 34, },
+ { 2, 1, 1, 6, 118, 28, },
+ { 1, 1, 1, 6, 118, 28, },
+ { 0, 1, 1, 6, 126, 34, },
+ { 2, 1, 1, 6, 126, 28, },
+ { 1, 1, 1, 6, 126, 28, },
+ { 0, 1, 1, 6, 134, 34, },
+ { 2, 1, 1, 6, 134, 28, },
+ { 1, 1, 1, 6, 134, 28, },
+ { 0, 1, 1, 6, 151, 28, },
+ { 2, 1, 1, 6, 151, 28, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 28, },
+ { 2, 1, 1, 6, 159, 28, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 26, },
+ { 2, 1, 1, 7, 38, 26, },
+ { 1, 1, 1, 7, 38, 26, },
+ { 0, 1, 1, 7, 46, 26, },
+ { 2, 1, 1, 7, 46, 26, },
+ { 1, 1, 1, 7, 46, 26, },
+ { 0, 1, 1, 7, 54, 26, },
+ { 2, 1, 1, 7, 54, 26, },
+ { 1, 1, 1, 7, 54, 26, },
+ { 0, 1, 1, 7, 62, 24, },
+ { 2, 1, 1, 7, 62, 26, },
+ { 1, 1, 1, 7, 62, 26, },
+ { 0, 1, 1, 7, 102, 24, },
+ { 2, 1, 1, 7, 102, 26, },
+ { 1, 1, 1, 7, 102, 26, },
+ { 0, 1, 1, 7, 110, 32, },
+ { 2, 1, 1, 7, 110, 26, },
+ { 1, 1, 1, 7, 110, 26, },
+ { 0, 1, 1, 7, 118, 32, },
+ { 2, 1, 1, 7, 118, 26, },
+ { 1, 1, 1, 7, 118, 26, },
+ { 0, 1, 1, 7, 126, 32, },
+ { 2, 1, 1, 7, 126, 26, },
+ { 1, 1, 1, 7, 126, 26, },
+ { 0, 1, 1, 7, 134, 32, },
+ { 2, 1, 1, 7, 134, 26, },
+ { 1, 1, 1, 7, 134, 26, },
+ { 0, 1, 1, 7, 151, 26, },
+ { 2, 1, 1, 7, 151, 26, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 26, },
+ { 2, 1, 1, 7, 159, 26, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 26, },
+ { 2, 1, 2, 4, 42, 32, },
+ { 1, 1, 2, 4, 42, 32, },
+ { 0, 1, 2, 4, 58, 26, },
+ { 2, 1, 2, 4, 58, 32, },
+ { 1, 1, 2, 4, 58, 32, },
+ { 0, 1, 2, 4, 106, 28, },
+ { 2, 1, 2, 4, 106, 32, },
+ { 1, 1, 2, 4, 106, 32, },
+ { 0, 1, 2, 4, 122, 28, },
+ { 2, 1, 2, 4, 122, 32, },
+ { 1, 1, 2, 4, 122, 32, },
+ { 0, 1, 2, 4, 155, 28, },
+ { 2, 1, 2, 4, 155, 32, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 24, },
+ { 2, 1, 2, 5, 42, 30, },
+ { 1, 1, 2, 5, 42, 30, },
+ { 0, 1, 2, 5, 58, 24, },
+ { 2, 1, 2, 5, 58, 30, },
+ { 1, 1, 2, 5, 58, 30, },
+ { 0, 1, 2, 5, 106, 26, },
+ { 2, 1, 2, 5, 106, 30, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 26, },
+ { 2, 1, 2, 5, 122, 30, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 155, 26, },
+ { 2, 1, 2, 5, 155, 30, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 22, },
+ { 2, 1, 2, 8, 42, 28, },
+ { 1, 1, 2, 8, 42, 28, },
+ { 0, 1, 2, 8, 58, 22, },
+ { 2, 1, 2, 8, 58, 28, },
+ { 1, 1, 2, 8, 58, 28, },
+ { 0, 1, 2, 8, 106, 24, },
+ { 2, 1, 2, 8, 106, 28, },
+ { 1, 1, 2, 8, 106, 28, },
+ { 0, 1, 2, 8, 122, 24, },
+ { 2, 1, 2, 8, 122, 28, },
+ { 1, 1, 2, 8, 122, 28, },
+ { 0, 1, 2, 8, 155, 24, },
+ { 2, 1, 2, 8, 155, 28, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 20, },
+ { 2, 1, 2, 9, 42, 26, },
+ { 1, 1, 2, 9, 42, 26, },
+ { 0, 1, 2, 9, 58, 20, },
+ { 2, 1, 2, 9, 58, 26, },
+ { 1, 1, 2, 9, 58, 26, },
+ { 0, 1, 2, 9, 106, 22, },
+ { 2, 1, 2, 9, 106, 26, },
+ { 1, 1, 2, 9, 106, 26, },
+ { 0, 1, 2, 9, 122, 22, },
+ { 2, 1, 2, 9, 122, 26, },
+ { 1, 1, 2, 9, 122, 26, },
+ { 0, 1, 2, 9, 155, 22, },
+ { 2, 1, 2, 9, 155, 26, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type7);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type8[] = {
+ { 0, 0, 0, 0, 1, 46, },
+ { 2, 0, 0, 0, 1, 46, },
+ { 1, 0, 0, 0, 1, 46, },
+ { 0, 0, 0, 0, 2, 46, },
+ { 2, 0, 0, 0, 2, 46, },
+ { 1, 0, 0, 0, 2, 46, },
+ { 0, 0, 0, 0, 3, 46, },
+ { 2, 0, 0, 0, 3, 46, },
+ { 1, 0, 0, 0, 3, 46, },
+ { 0, 0, 0, 0, 4, 46, },
+ { 2, 0, 0, 0, 4, 46, },
+ { 1, 0, 0, 0, 4, 46, },
+ { 0, 0, 0, 0, 5, 46, },
+ { 2, 0, 0, 0, 5, 46, },
+ { 1, 0, 0, 0, 5, 46, },
+ { 0, 0, 0, 0, 6, 46, },
+ { 2, 0, 0, 0, 6, 46, },
+ { 1, 0, 0, 0, 6, 46, },
+ { 0, 0, 0, 0, 7, 46, },
+ { 2, 0, 0, 0, 7, 46, },
+ { 1, 0, 0, 0, 7, 46, },
+ { 0, 0, 0, 0, 8, 46, },
+ { 2, 0, 0, 0, 8, 46, },
+ { 1, 0, 0, 0, 8, 46, },
+ { 0, 0, 0, 0, 9, 46, },
+ { 2, 0, 0, 0, 9, 46, },
+ { 1, 0, 0, 0, 9, 46, },
+ { 0, 0, 0, 0, 10, 46, },
+ { 2, 0, 0, 0, 10, 46, },
+ { 1, 0, 0, 0, 10, 46, },
+ { 0, 0, 0, 0, 11, 46, },
+ { 2, 0, 0, 0, 11, 46, },
+ { 1, 0, 0, 0, 11, 46, },
+ { 0, 0, 0, 0, 12, 63, },
+ { 2, 0, 0, 0, 12, 46, },
+ { 1, 0, 0, 0, 12, 46, },
+ { 0, 0, 0, 0, 13, 63, },
+ { 2, 0, 0, 0, 13, 46, },
+ { 1, 0, 0, 0, 13, 46, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 46, },
+ { 0, 0, 0, 1, 1, 46, },
+ { 2, 0, 0, 1, 1, 46, },
+ { 1, 0, 0, 1, 1, 46, },
+ { 0, 0, 0, 1, 2, 46, },
+ { 2, 0, 0, 1, 2, 46, },
+ { 1, 0, 0, 1, 2, 46, },
+ { 0, 0, 0, 1, 3, 46, },
+ { 2, 0, 0, 1, 3, 46, },
+ { 1, 0, 0, 1, 3, 46, },
+ { 0, 0, 0, 1, 4, 46, },
+ { 2, 0, 0, 1, 4, 46, },
+ { 1, 0, 0, 1, 4, 46, },
+ { 0, 0, 0, 1, 5, 46, },
+ { 2, 0, 0, 1, 5, 46, },
+ { 1, 0, 0, 1, 5, 46, },
+ { 0, 0, 0, 1, 6, 46, },
+ { 2, 0, 0, 1, 6, 46, },
+ { 1, 0, 0, 1, 6, 46, },
+ { 0, 0, 0, 1, 7, 46, },
+ { 2, 0, 0, 1, 7, 46, },
+ { 1, 0, 0, 1, 7, 46, },
+ { 0, 0, 0, 1, 8, 46, },
+ { 2, 0, 0, 1, 8, 46, },
+ { 1, 0, 0, 1, 8, 46, },
+ { 0, 0, 0, 1, 9, 46, },
+ { 2, 0, 0, 1, 9, 46, },
+ { 1, 0, 0, 1, 9, 46, },
+ { 0, 0, 0, 1, 10, 46, },
+ { 2, 0, 0, 1, 10, 46, },
+ { 1, 0, 0, 1, 10, 46, },
+ { 0, 0, 0, 1, 11, 46, },
+ { 2, 0, 0, 1, 11, 46, },
+ { 1, 0, 0, 1, 11, 46, },
+ { 0, 0, 0, 1, 12, 63, },
+ { 2, 0, 0, 1, 12, 46, },
+ { 1, 0, 0, 1, 12, 46, },
+ { 0, 0, 0, 1, 13, 63, },
+ { 2, 0, 0, 1, 13, 46, },
+ { 1, 0, 0, 1, 13, 46, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 46, },
+ { 0, 0, 0, 2, 1, 46, },
+ { 2, 0, 0, 2, 1, 46, },
+ { 1, 0, 0, 2, 1, 46, },
+ { 0, 0, 0, 2, 2, 46, },
+ { 2, 0, 0, 2, 2, 46, },
+ { 1, 0, 0, 2, 2, 46, },
+ { 0, 0, 0, 2, 3, 46, },
+ { 2, 0, 0, 2, 3, 46, },
+ { 1, 0, 0, 2, 3, 46, },
+ { 0, 0, 0, 2, 4, 46, },
+ { 2, 0, 0, 2, 4, 46, },
+ { 1, 0, 0, 2, 4, 46, },
+ { 0, 0, 0, 2, 5, 46, },
+ { 2, 0, 0, 2, 5, 46, },
+ { 1, 0, 0, 2, 5, 46, },
+ { 0, 0, 0, 2, 6, 46, },
+ { 2, 0, 0, 2, 6, 46, },
+ { 1, 0, 0, 2, 6, 46, },
+ { 0, 0, 0, 2, 7, 46, },
+ { 2, 0, 0, 2, 7, 46, },
+ { 1, 0, 0, 2, 7, 46, },
+ { 0, 0, 0, 2, 8, 46, },
+ { 2, 0, 0, 2, 8, 46, },
+ { 1, 0, 0, 2, 8, 46, },
+ { 0, 0, 0, 2, 9, 46, },
+ { 2, 0, 0, 2, 9, 46, },
+ { 1, 0, 0, 2, 9, 46, },
+ { 0, 0, 0, 2, 10, 46, },
+ { 2, 0, 0, 2, 10, 46, },
+ { 1, 0, 0, 2, 10, 46, },
+ { 0, 0, 0, 2, 11, 46, },
+ { 2, 0, 0, 2, 11, 46, },
+ { 1, 0, 0, 2, 11, 46, },
+ { 0, 0, 0, 2, 12, 63, },
+ { 2, 0, 0, 2, 12, 46, },
+ { 1, 0, 0, 2, 12, 46, },
+ { 0, 0, 0, 2, 13, 63, },
+ { 2, 0, 0, 2, 13, 46, },
+ { 1, 0, 0, 2, 13, 46, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 46, },
+ { 0, 0, 0, 3, 1, 46, },
+ { 2, 0, 0, 3, 1, 46, },
+ { 1, 0, 0, 3, 1, 46, },
+ { 0, 0, 0, 3, 2, 46, },
+ { 2, 0, 0, 3, 2, 46, },
+ { 1, 0, 0, 3, 2, 46, },
+ { 0, 0, 0, 3, 3, 46, },
+ { 2, 0, 0, 3, 3, 46, },
+ { 1, 0, 0, 3, 3, 46, },
+ { 0, 0, 0, 3, 4, 46, },
+ { 2, 0, 0, 3, 4, 46, },
+ { 1, 0, 0, 3, 4, 46, },
+ { 0, 0, 0, 3, 5, 46, },
+ { 2, 0, 0, 3, 5, 46, },
+ { 1, 0, 0, 3, 5, 46, },
+ { 0, 0, 0, 3, 6, 46, },
+ { 2, 0, 0, 3, 6, 46, },
+ { 1, 0, 0, 3, 6, 46, },
+ { 0, 0, 0, 3, 7, 46, },
+ { 2, 0, 0, 3, 7, 46, },
+ { 1, 0, 0, 3, 7, 46, },
+ { 0, 0, 0, 3, 8, 46, },
+ { 2, 0, 0, 3, 8, 46, },
+ { 1, 0, 0, 3, 8, 46, },
+ { 0, 0, 0, 3, 9, 46, },
+ { 2, 0, 0, 3, 9, 46, },
+ { 1, 0, 0, 3, 9, 46, },
+ { 0, 0, 0, 3, 10, 46, },
+ { 2, 0, 0, 3, 10, 46, },
+ { 1, 0, 0, 3, 10, 46, },
+ { 0, 0, 0, 3, 11, 46, },
+ { 2, 0, 0, 3, 11, 46, },
+ { 1, 0, 0, 3, 11, 46, },
+ { 0, 0, 0, 3, 12, 63, },
+ { 2, 0, 0, 3, 12, 46, },
+ { 1, 0, 0, 3, 12, 46, },
+ { 0, 0, 0, 3, 13, 63, },
+ { 2, 0, 0, 3, 13, 46, },
+ { 1, 0, 0, 3, 13, 46, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 46, },
+ { 0, 0, 0, 6, 1, 46, },
+ { 2, 0, 0, 6, 1, 46, },
+ { 1, 0, 0, 6, 1, 46, },
+ { 0, 0, 0, 6, 2, 46, },
+ { 2, 0, 0, 6, 2, 46, },
+ { 1, 0, 0, 6, 2, 46, },
+ { 0, 0, 0, 6, 3, 46, },
+ { 2, 0, 0, 6, 3, 46, },
+ { 1, 0, 0, 6, 3, 46, },
+ { 0, 0, 0, 6, 4, 46, },
+ { 2, 0, 0, 6, 4, 46, },
+ { 1, 0, 0, 6, 4, 46, },
+ { 0, 0, 0, 6, 5, 46, },
+ { 2, 0, 0, 6, 5, 46, },
+ { 1, 0, 0, 6, 5, 46, },
+ { 0, 0, 0, 6, 6, 46, },
+ { 2, 0, 0, 6, 6, 46, },
+ { 1, 0, 0, 6, 6, 46, },
+ { 0, 0, 0, 6, 7, 46, },
+ { 2, 0, 0, 6, 7, 46, },
+ { 1, 0, 0, 6, 7, 46, },
+ { 0, 0, 0, 6, 8, 46, },
+ { 2, 0, 0, 6, 8, 46, },
+ { 1, 0, 0, 6, 8, 46, },
+ { 0, 0, 0, 6, 9, 46, },
+ { 2, 0, 0, 6, 9, 46, },
+ { 1, 0, 0, 6, 9, 46, },
+ { 0, 0, 0, 6, 10, 46, },
+ { 2, 0, 0, 6, 10, 46, },
+ { 1, 0, 0, 6, 10, 46, },
+ { 0, 0, 0, 6, 11, 46, },
+ { 2, 0, 0, 6, 11, 46, },
+ { 1, 0, 0, 6, 11, 46, },
+ { 0, 0, 0, 6, 12, 63, },
+ { 2, 0, 0, 6, 12, 46, },
+ { 1, 0, 0, 6, 12, 46, },
+ { 0, 0, 0, 6, 13, 63, },
+ { 2, 0, 0, 6, 13, 46, },
+ { 1, 0, 0, 6, 13, 46, },
+ { 0, 0, 0, 6, 14, 63, },
+ { 2, 0, 0, 6, 14, 63, },
+ { 1, 0, 0, 6, 14, 46, },
+ { 0, 0, 0, 7, 1, 46, },
+ { 2, 0, 0, 7, 1, 46, },
+ { 1, 0, 0, 7, 1, 46, },
+ { 0, 0, 0, 7, 2, 46, },
+ { 2, 0, 0, 7, 2, 46, },
+ { 1, 0, 0, 7, 2, 46, },
+ { 0, 0, 0, 7, 3, 46, },
+ { 2, 0, 0, 7, 3, 46, },
+ { 1, 0, 0, 7, 3, 46, },
+ { 0, 0, 0, 7, 4, 46, },
+ { 2, 0, 0, 7, 4, 46, },
+ { 1, 0, 0, 7, 4, 46, },
+ { 0, 0, 0, 7, 5, 46, },
+ { 2, 0, 0, 7, 5, 46, },
+ { 1, 0, 0, 7, 5, 46, },
+ { 0, 0, 0, 7, 6, 46, },
+ { 2, 0, 0, 7, 6, 46, },
+ { 1, 0, 0, 7, 6, 46, },
+ { 0, 0, 0, 7, 7, 46, },
+ { 2, 0, 0, 7, 7, 46, },
+ { 1, 0, 0, 7, 7, 46, },
+ { 0, 0, 0, 7, 8, 46, },
+ { 2, 0, 0, 7, 8, 46, },
+ { 1, 0, 0, 7, 8, 46, },
+ { 0, 0, 0, 7, 9, 46, },
+ { 2, 0, 0, 7, 9, 46, },
+ { 1, 0, 0, 7, 9, 46, },
+ { 0, 0, 0, 7, 10, 46, },
+ { 2, 0, 0, 7, 10, 46, },
+ { 1, 0, 0, 7, 10, 46, },
+ { 0, 0, 0, 7, 11, 46, },
+ { 2, 0, 0, 7, 11, 46, },
+ { 1, 0, 0, 7, 11, 46, },
+ { 0, 0, 0, 7, 12, 63, },
+ { 2, 0, 0, 7, 12, 46, },
+ { 1, 0, 0, 7, 12, 46, },
+ { 0, 0, 0, 7, 13, 63, },
+ { 2, 0, 0, 7, 13, 46, },
+ { 1, 0, 0, 7, 13, 46, },
+ { 0, 0, 0, 7, 14, 63, },
+ { 2, 0, 0, 7, 14, 63, },
+ { 1, 0, 0, 7, 14, 46, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 30, },
+ { 2, 0, 1, 2, 3, 34, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 34, },
+ { 2, 0, 1, 2, 4, 34, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 34, },
+ { 2, 0, 1, 2, 5, 34, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 34, },
+ { 2, 0, 1, 2, 6, 34, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 34, },
+ { 2, 0, 1, 2, 7, 34, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 34, },
+ { 2, 0, 1, 2, 8, 34, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 34, },
+ { 2, 0, 1, 2, 9, 34, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 34, },
+ { 2, 0, 1, 2, 10, 34, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 34, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 34, },
+ { 1, 0, 1, 2, 12, 34, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 34, },
+ { 1, 0, 1, 2, 13, 34, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 30, },
+ { 2, 0, 1, 3, 3, 34, },
+ { 1, 0, 1, 3, 3, 34, },
+ { 0, 0, 1, 3, 4, 34, },
+ { 2, 0, 1, 3, 4, 34, },
+ { 1, 0, 1, 3, 4, 34, },
+ { 0, 0, 1, 3, 5, 34, },
+ { 2, 0, 1, 3, 5, 34, },
+ { 1, 0, 1, 3, 5, 34, },
+ { 0, 0, 1, 3, 6, 34, },
+ { 2, 0, 1, 3, 6, 34, },
+ { 1, 0, 1, 3, 6, 34, },
+ { 0, 0, 1, 3, 7, 34, },
+ { 2, 0, 1, 3, 7, 34, },
+ { 1, 0, 1, 3, 7, 34, },
+ { 0, 0, 1, 3, 8, 34, },
+ { 2, 0, 1, 3, 8, 34, },
+ { 1, 0, 1, 3, 8, 34, },
+ { 0, 0, 1, 3, 9, 34, },
+ { 2, 0, 1, 3, 9, 34, },
+ { 1, 0, 1, 3, 9, 34, },
+ { 0, 0, 1, 3, 10, 34, },
+ { 2, 0, 1, 3, 10, 34, },
+ { 1, 0, 1, 3, 10, 34, },
+ { 0, 0, 1, 3, 11, 28, },
+ { 2, 0, 1, 3, 11, 34, },
+ { 1, 0, 1, 3, 11, 34, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 34, },
+ { 1, 0, 1, 3, 12, 34, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 34, },
+ { 1, 0, 1, 3, 13, 34, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 0, 1, 6, 1, 63, },
+ { 2, 0, 1, 6, 1, 63, },
+ { 1, 0, 1, 6, 1, 63, },
+ { 0, 0, 1, 6, 2, 63, },
+ { 2, 0, 1, 6, 2, 63, },
+ { 1, 0, 1, 6, 2, 63, },
+ { 0, 0, 1, 6, 3, 30, },
+ { 2, 0, 1, 6, 3, 34, },
+ { 1, 0, 1, 6, 3, 34, },
+ { 0, 0, 1, 6, 4, 34, },
+ { 2, 0, 1, 6, 4, 34, },
+ { 1, 0, 1, 6, 4, 34, },
+ { 0, 0, 1, 6, 5, 34, },
+ { 2, 0, 1, 6, 5, 34, },
+ { 1, 0, 1, 6, 5, 34, },
+ { 0, 0, 1, 6, 6, 34, },
+ { 2, 0, 1, 6, 6, 34, },
+ { 1, 0, 1, 6, 6, 34, },
+ { 0, 0, 1, 6, 7, 34, },
+ { 2, 0, 1, 6, 7, 34, },
+ { 1, 0, 1, 6, 7, 34, },
+ { 0, 0, 1, 6, 8, 34, },
+ { 2, 0, 1, 6, 8, 34, },
+ { 1, 0, 1, 6, 8, 34, },
+ { 0, 0, 1, 6, 9, 34, },
+ { 2, 0, 1, 6, 9, 34, },
+ { 1, 0, 1, 6, 9, 34, },
+ { 0, 0, 1, 6, 10, 34, },
+ { 2, 0, 1, 6, 10, 34, },
+ { 1, 0, 1, 6, 10, 34, },
+ { 0, 0, 1, 6, 11, 28, },
+ { 2, 0, 1, 6, 11, 34, },
+ { 1, 0, 1, 6, 11, 34, },
+ { 0, 0, 1, 6, 12, 63, },
+ { 2, 0, 1, 6, 12, 34, },
+ { 1, 0, 1, 6, 12, 34, },
+ { 0, 0, 1, 6, 13, 63, },
+ { 2, 0, 1, 6, 13, 34, },
+ { 1, 0, 1, 6, 13, 34, },
+ { 0, 0, 1, 6, 14, 63, },
+ { 2, 0, 1, 6, 14, 63, },
+ { 1, 0, 1, 6, 14, 63, },
+ { 0, 0, 1, 7, 1, 63, },
+ { 2, 0, 1, 7, 1, 63, },
+ { 1, 0, 1, 7, 1, 63, },
+ { 0, 0, 1, 7, 2, 63, },
+ { 2, 0, 1, 7, 2, 63, },
+ { 1, 0, 1, 7, 2, 63, },
+ { 0, 0, 1, 7, 3, 30, },
+ { 2, 0, 1, 7, 3, 34, },
+ { 1, 0, 1, 7, 3, 34, },
+ { 0, 0, 1, 7, 4, 34, },
+ { 2, 0, 1, 7, 4, 34, },
+ { 1, 0, 1, 7, 4, 34, },
+ { 0, 0, 1, 7, 5, 34, },
+ { 2, 0, 1, 7, 5, 34, },
+ { 1, 0, 1, 7, 5, 34, },
+ { 0, 0, 1, 7, 6, 34, },
+ { 2, 0, 1, 7, 6, 34, },
+ { 1, 0, 1, 7, 6, 34, },
+ { 0, 0, 1, 7, 7, 34, },
+ { 2, 0, 1, 7, 7, 34, },
+ { 1, 0, 1, 7, 7, 34, },
+ { 0, 0, 1, 7, 8, 34, },
+ { 2, 0, 1, 7, 8, 34, },
+ { 1, 0, 1, 7, 8, 34, },
+ { 0, 0, 1, 7, 9, 34, },
+ { 2, 0, 1, 7, 9, 34, },
+ { 1, 0, 1, 7, 9, 34, },
+ { 0, 0, 1, 7, 10, 34, },
+ { 2, 0, 1, 7, 10, 34, },
+ { 1, 0, 1, 7, 10, 34, },
+ { 0, 0, 1, 7, 11, 28, },
+ { 2, 0, 1, 7, 11, 34, },
+ { 1, 0, 1, 7, 11, 34, },
+ { 0, 0, 1, 7, 12, 63, },
+ { 2, 0, 1, 7, 12, 34, },
+ { 1, 0, 1, 7, 12, 34, },
+ { 0, 0, 1, 7, 13, 63, },
+ { 2, 0, 1, 7, 13, 34, },
+ { 1, 0, 1, 7, 13, 34, },
+ { 0, 0, 1, 7, 14, 63, },
+ { 2, 0, 1, 7, 14, 63, },
+ { 1, 0, 1, 7, 14, 63, },
+ { 0, 1, 0, 1, 36, 46, },
+ { 2, 1, 0, 1, 36, 46, },
+ { 1, 1, 0, 1, 36, 46, },
+ { 0, 1, 0, 1, 40, 46, },
+ { 2, 1, 0, 1, 40, 46, },
+ { 1, 1, 0, 1, 40, 46, },
+ { 0, 1, 0, 1, 44, 46, },
+ { 2, 1, 0, 1, 44, 46, },
+ { 1, 1, 0, 1, 44, 46, },
+ { 0, 1, 0, 1, 48, 46, },
+ { 2, 1, 0, 1, 48, 46, },
+ { 1, 1, 0, 1, 48, 46, },
+ { 0, 1, 0, 1, 52, 46, },
+ { 2, 1, 0, 1, 52, 46, },
+ { 1, 1, 0, 1, 52, 46, },
+ { 0, 1, 0, 1, 56, 46, },
+ { 2, 1, 0, 1, 56, 46, },
+ { 1, 1, 0, 1, 56, 46, },
+ { 0, 1, 0, 1, 60, 46, },
+ { 2, 1, 0, 1, 60, 46, },
+ { 1, 1, 0, 1, 60, 46, },
+ { 0, 1, 0, 1, 64, 46, },
+ { 2, 1, 0, 1, 64, 46, },
+ { 1, 1, 0, 1, 64, 46, },
+ { 0, 1, 0, 1, 100, 46, },
+ { 2, 1, 0, 1, 100, 46, },
+ { 1, 1, 0, 1, 100, 46, },
+ { 0, 1, 0, 1, 104, 46, },
+ { 2, 1, 0, 1, 104, 46, },
+ { 1, 1, 0, 1, 104, 46, },
+ { 0, 1, 0, 1, 108, 46, },
+ { 2, 1, 0, 1, 108, 46, },
+ { 1, 1, 0, 1, 108, 46, },
+ { 0, 1, 0, 1, 112, 46, },
+ { 2, 1, 0, 1, 112, 46, },
+ { 1, 1, 0, 1, 112, 46, },
+ { 0, 1, 0, 1, 116, 46, },
+ { 2, 1, 0, 1, 116, 46, },
+ { 1, 1, 0, 1, 116, 46, },
+ { 0, 1, 0, 1, 120, 46, },
+ { 2, 1, 0, 1, 120, 46, },
+ { 1, 1, 0, 1, 120, 46, },
+ { 0, 1, 0, 1, 124, 46, },
+ { 2, 1, 0, 1, 124, 46, },
+ { 1, 1, 0, 1, 124, 46, },
+ { 0, 1, 0, 1, 128, 46, },
+ { 2, 1, 0, 1, 128, 46, },
+ { 1, 1, 0, 1, 128, 46, },
+ { 0, 1, 0, 1, 132, 46, },
+ { 2, 1, 0, 1, 132, 46, },
+ { 1, 1, 0, 1, 132, 46, },
+ { 0, 1, 0, 1, 136, 46, },
+ { 2, 1, 0, 1, 136, 46, },
+ { 1, 1, 0, 1, 136, 46, },
+ { 0, 1, 0, 1, 140, 46, },
+ { 2, 1, 0, 1, 140, 46, },
+ { 1, 1, 0, 1, 140, 46, },
+ { 0, 1, 0, 1, 149, 46, },
+ { 2, 1, 0, 1, 149, 46, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 46, },
+ { 2, 1, 0, 1, 153, 46, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 46, },
+ { 2, 1, 0, 1, 157, 46, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 46, },
+ { 2, 1, 0, 1, 161, 46, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 46, },
+ { 2, 1, 0, 1, 165, 46, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 46, },
+ { 2, 1, 0, 2, 36, 46, },
+ { 1, 1, 0, 2, 36, 46, },
+ { 0, 1, 0, 2, 40, 46, },
+ { 2, 1, 0, 2, 40, 46, },
+ { 1, 1, 0, 2, 40, 46, },
+ { 0, 1, 0, 2, 44, 46, },
+ { 2, 1, 0, 2, 44, 46, },
+ { 1, 1, 0, 2, 44, 46, },
+ { 0, 1, 0, 2, 48, 46, },
+ { 2, 1, 0, 2, 48, 46, },
+ { 1, 1, 0, 2, 48, 46, },
+ { 0, 1, 0, 2, 52, 46, },
+ { 2, 1, 0, 2, 52, 46, },
+ { 1, 1, 0, 2, 52, 46, },
+ { 0, 1, 0, 2, 56, 46, },
+ { 2, 1, 0, 2, 56, 46, },
+ { 1, 1, 0, 2, 56, 46, },
+ { 0, 1, 0, 2, 60, 46, },
+ { 2, 1, 0, 2, 60, 46, },
+ { 1, 1, 0, 2, 60, 46, },
+ { 0, 1, 0, 2, 64, 46, },
+ { 2, 1, 0, 2, 64, 46, },
+ { 1, 1, 0, 2, 64, 46, },
+ { 0, 1, 0, 2, 100, 46, },
+ { 2, 1, 0, 2, 100, 46, },
+ { 1, 1, 0, 2, 100, 46, },
+ { 0, 1, 0, 2, 104, 46, },
+ { 2, 1, 0, 2, 104, 46, },
+ { 1, 1, 0, 2, 104, 46, },
+ { 0, 1, 0, 2, 108, 46, },
+ { 2, 1, 0, 2, 108, 46, },
+ { 1, 1, 0, 2, 108, 46, },
+ { 0, 1, 0, 2, 112, 46, },
+ { 2, 1, 0, 2, 112, 46, },
+ { 1, 1, 0, 2, 112, 46, },
+ { 0, 1, 0, 2, 116, 46, },
+ { 2, 1, 0, 2, 116, 46, },
+ { 1, 1, 0, 2, 116, 46, },
+ { 0, 1, 0, 2, 120, 46, },
+ { 2, 1, 0, 2, 120, 46, },
+ { 1, 1, 0, 2, 120, 46, },
+ { 0, 1, 0, 2, 124, 46, },
+ { 2, 1, 0, 2, 124, 46, },
+ { 1, 1, 0, 2, 124, 46, },
+ { 0, 1, 0, 2, 128, 46, },
+ { 2, 1, 0, 2, 128, 46, },
+ { 1, 1, 0, 2, 128, 46, },
+ { 0, 1, 0, 2, 132, 46, },
+ { 2, 1, 0, 2, 132, 46, },
+ { 1, 1, 0, 2, 132, 46, },
+ { 0, 1, 0, 2, 136, 46, },
+ { 2, 1, 0, 2, 136, 46, },
+ { 1, 1, 0, 2, 136, 46, },
+ { 0, 1, 0, 2, 140, 46, },
+ { 2, 1, 0, 2, 140, 46, },
+ { 1, 1, 0, 2, 140, 46, },
+ { 0, 1, 0, 2, 149, 46, },
+ { 2, 1, 0, 2, 149, 46, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 46, },
+ { 2, 1, 0, 2, 153, 46, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 46, },
+ { 2, 1, 0, 2, 157, 46, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 46, },
+ { 2, 1, 0, 2, 161, 46, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 46, },
+ { 2, 1, 0, 2, 165, 46, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 46, },
+ { 2, 1, 0, 3, 36, 46, },
+ { 1, 1, 0, 3, 36, 46, },
+ { 0, 1, 0, 3, 40, 46, },
+ { 2, 1, 0, 3, 40, 46, },
+ { 1, 1, 0, 3, 40, 46, },
+ { 0, 1, 0, 3, 44, 46, },
+ { 2, 1, 0, 3, 44, 46, },
+ { 1, 1, 0, 3, 44, 46, },
+ { 0, 1, 0, 3, 48, 46, },
+ { 2, 1, 0, 3, 48, 46, },
+ { 1, 1, 0, 3, 48, 46, },
+ { 0, 1, 0, 3, 52, 46, },
+ { 2, 1, 0, 3, 52, 46, },
+ { 1, 1, 0, 3, 52, 46, },
+ { 0, 1, 0, 3, 56, 46, },
+ { 2, 1, 0, 3, 56, 46, },
+ { 1, 1, 0, 3, 56, 46, },
+ { 0, 1, 0, 3, 60, 46, },
+ { 2, 1, 0, 3, 60, 46, },
+ { 1, 1, 0, 3, 60, 46, },
+ { 0, 1, 0, 3, 64, 46, },
+ { 2, 1, 0, 3, 64, 46, },
+ { 1, 1, 0, 3, 64, 46, },
+ { 0, 1, 0, 3, 100, 46, },
+ { 2, 1, 0, 3, 100, 46, },
+ { 1, 1, 0, 3, 100, 46, },
+ { 0, 1, 0, 3, 104, 46, },
+ { 2, 1, 0, 3, 104, 46, },
+ { 1, 1, 0, 3, 104, 46, },
+ { 0, 1, 0, 3, 108, 46, },
+ { 2, 1, 0, 3, 108, 46, },
+ { 1, 1, 0, 3, 108, 46, },
+ { 0, 1, 0, 3, 112, 46, },
+ { 2, 1, 0, 3, 112, 46, },
+ { 1, 1, 0, 3, 112, 46, },
+ { 0, 1, 0, 3, 116, 46, },
+ { 2, 1, 0, 3, 116, 46, },
+ { 1, 1, 0, 3, 116, 46, },
+ { 0, 1, 0, 3, 120, 46, },
+ { 2, 1, 0, 3, 120, 46, },
+ { 1, 1, 0, 3, 120, 46, },
+ { 0, 1, 0, 3, 124, 46, },
+ { 2, 1, 0, 3, 124, 46, },
+ { 1, 1, 0, 3, 124, 46, },
+ { 0, 1, 0, 3, 128, 46, },
+ { 2, 1, 0, 3, 128, 46, },
+ { 1, 1, 0, 3, 128, 46, },
+ { 0, 1, 0, 3, 132, 46, },
+ { 2, 1, 0, 3, 132, 46, },
+ { 1, 1, 0, 3, 132, 46, },
+ { 0, 1, 0, 3, 136, 46, },
+ { 2, 1, 0, 3, 136, 46, },
+ { 1, 1, 0, 3, 136, 46, },
+ { 0, 1, 0, 3, 140, 46, },
+ { 2, 1, 0, 3, 140, 46, },
+ { 1, 1, 0, 3, 140, 46, },
+ { 0, 1, 0, 3, 149, 46, },
+ { 2, 1, 0, 3, 149, 46, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 46, },
+ { 2, 1, 0, 3, 153, 46, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 46, },
+ { 2, 1, 0, 3, 157, 46, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 46, },
+ { 2, 1, 0, 3, 161, 46, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 46, },
+ { 2, 1, 0, 3, 165, 46, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 0, 6, 36, 46, },
+ { 2, 1, 0, 6, 36, 46, },
+ { 1, 1, 0, 6, 36, 46, },
+ { 0, 1, 0, 6, 40, 46, },
+ { 2, 1, 0, 6, 40, 46, },
+ { 1, 1, 0, 6, 40, 46, },
+ { 0, 1, 0, 6, 44, 46, },
+ { 2, 1, 0, 6, 44, 46, },
+ { 1, 1, 0, 6, 44, 46, },
+ { 0, 1, 0, 6, 48, 46, },
+ { 2, 1, 0, 6, 48, 46, },
+ { 1, 1, 0, 6, 48, 46, },
+ { 0, 1, 0, 6, 52, 46, },
+ { 2, 1, 0, 6, 52, 46, },
+ { 1, 1, 0, 6, 52, 46, },
+ { 0, 1, 0, 6, 56, 46, },
+ { 2, 1, 0, 6, 56, 46, },
+ { 1, 1, 0, 6, 56, 46, },
+ { 0, 1, 0, 6, 60, 46, },
+ { 2, 1, 0, 6, 60, 46, },
+ { 1, 1, 0, 6, 60, 46, },
+ { 0, 1, 0, 6, 64, 46, },
+ { 2, 1, 0, 6, 64, 46, },
+ { 1, 1, 0, 6, 64, 46, },
+ { 0, 1, 0, 6, 100, 46, },
+ { 2, 1, 0, 6, 100, 46, },
+ { 1, 1, 0, 6, 100, 46, },
+ { 0, 1, 0, 6, 104, 46, },
+ { 2, 1, 0, 6, 104, 46, },
+ { 1, 1, 0, 6, 104, 46, },
+ { 0, 1, 0, 6, 108, 46, },
+ { 2, 1, 0, 6, 108, 46, },
+ { 1, 1, 0, 6, 108, 46, },
+ { 0, 1, 0, 6, 112, 46, },
+ { 2, 1, 0, 6, 112, 46, },
+ { 1, 1, 0, 6, 112, 46, },
+ { 0, 1, 0, 6, 116, 46, },
+ { 2, 1, 0, 6, 116, 46, },
+ { 1, 1, 0, 6, 116, 46, },
+ { 0, 1, 0, 6, 120, 46, },
+ { 2, 1, 0, 6, 120, 46, },
+ { 1, 1, 0, 6, 120, 46, },
+ { 0, 1, 0, 6, 124, 46, },
+ { 2, 1, 0, 6, 124, 46, },
+ { 1, 1, 0, 6, 124, 46, },
+ { 0, 1, 0, 6, 128, 46, },
+ { 2, 1, 0, 6, 128, 46, },
+ { 1, 1, 0, 6, 128, 46, },
+ { 0, 1, 0, 6, 132, 46, },
+ { 2, 1, 0, 6, 132, 46, },
+ { 1, 1, 0, 6, 132, 46, },
+ { 0, 1, 0, 6, 136, 46, },
+ { 2, 1, 0, 6, 136, 46, },
+ { 1, 1, 0, 6, 136, 46, },
+ { 0, 1, 0, 6, 140, 46, },
+ { 2, 1, 0, 6, 140, 46, },
+ { 1, 1, 0, 6, 140, 46, },
+ { 0, 1, 0, 6, 149, 46, },
+ { 2, 1, 0, 6, 149, 46, },
+ { 1, 1, 0, 6, 149, 63, },
+ { 0, 1, 0, 6, 153, 46, },
+ { 2, 1, 0, 6, 153, 46, },
+ { 1, 1, 0, 6, 153, 63, },
+ { 0, 1, 0, 6, 157, 46, },
+ { 2, 1, 0, 6, 157, 46, },
+ { 1, 1, 0, 6, 157, 63, },
+ { 0, 1, 0, 6, 161, 46, },
+ { 2, 1, 0, 6, 161, 46, },
+ { 1, 1, 0, 6, 161, 63, },
+ { 0, 1, 0, 6, 165, 46, },
+ { 2, 1, 0, 6, 165, 46, },
+ { 1, 1, 0, 6, 165, 63, },
+ { 0, 1, 0, 7, 36, 46, },
+ { 2, 1, 0, 7, 36, 46, },
+ { 1, 1, 0, 7, 36, 46, },
+ { 0, 1, 0, 7, 40, 46, },
+ { 2, 1, 0, 7, 40, 46, },
+ { 1, 1, 0, 7, 40, 46, },
+ { 0, 1, 0, 7, 44, 46, },
+ { 2, 1, 0, 7, 44, 46, },
+ { 1, 1, 0, 7, 44, 46, },
+ { 0, 1, 0, 7, 48, 46, },
+ { 2, 1, 0, 7, 48, 46, },
+ { 1, 1, 0, 7, 48, 46, },
+ { 0, 1, 0, 7, 52, 46, },
+ { 2, 1, 0, 7, 52, 46, },
+ { 1, 1, 0, 7, 52, 46, },
+ { 0, 1, 0, 7, 56, 46, },
+ { 2, 1, 0, 7, 56, 46, },
+ { 1, 1, 0, 7, 56, 46, },
+ { 0, 1, 0, 7, 60, 46, },
+ { 2, 1, 0, 7, 60, 46, },
+ { 1, 1, 0, 7, 60, 46, },
+ { 0, 1, 0, 7, 64, 46, },
+ { 2, 1, 0, 7, 64, 46, },
+ { 1, 1, 0, 7, 64, 46, },
+ { 0, 1, 0, 7, 100, 46, },
+ { 2, 1, 0, 7, 100, 46, },
+ { 1, 1, 0, 7, 100, 46, },
+ { 0, 1, 0, 7, 104, 46, },
+ { 2, 1, 0, 7, 104, 46, },
+ { 1, 1, 0, 7, 104, 46, },
+ { 0, 1, 0, 7, 108, 46, },
+ { 2, 1, 0, 7, 108, 46, },
+ { 1, 1, 0, 7, 108, 46, },
+ { 0, 1, 0, 7, 112, 46, },
+ { 2, 1, 0, 7, 112, 46, },
+ { 1, 1, 0, 7, 112, 46, },
+ { 0, 1, 0, 7, 116, 46, },
+ { 2, 1, 0, 7, 116, 46, },
+ { 1, 1, 0, 7, 116, 46, },
+ { 0, 1, 0, 7, 120, 46, },
+ { 2, 1, 0, 7, 120, 46, },
+ { 1, 1, 0, 7, 120, 46, },
+ { 0, 1, 0, 7, 124, 46, },
+ { 2, 1, 0, 7, 124, 46, },
+ { 1, 1, 0, 7, 124, 46, },
+ { 0, 1, 0, 7, 128, 46, },
+ { 2, 1, 0, 7, 128, 46, },
+ { 1, 1, 0, 7, 128, 46, },
+ { 0, 1, 0, 7, 132, 46, },
+ { 2, 1, 0, 7, 132, 46, },
+ { 1, 1, 0, 7, 132, 46, },
+ { 0, 1, 0, 7, 136, 46, },
+ { 2, 1, 0, 7, 136, 46, },
+ { 1, 1, 0, 7, 136, 46, },
+ { 0, 1, 0, 7, 140, 46, },
+ { 2, 1, 0, 7, 140, 46, },
+ { 1, 1, 0, 7, 140, 46, },
+ { 0, 1, 0, 7, 149, 46, },
+ { 2, 1, 0, 7, 149, 46, },
+ { 1, 1, 0, 7, 149, 63, },
+ { 0, 1, 0, 7, 153, 46, },
+ { 2, 1, 0, 7, 153, 46, },
+ { 1, 1, 0, 7, 153, 63, },
+ { 0, 1, 0, 7, 157, 46, },
+ { 2, 1, 0, 7, 157, 46, },
+ { 1, 1, 0, 7, 157, 63, },
+ { 0, 1, 0, 7, 161, 46, },
+ { 2, 1, 0, 7, 161, 46, },
+ { 1, 1, 0, 7, 161, 63, },
+ { 0, 1, 0, 7, 165, 46, },
+ { 2, 1, 0, 7, 165, 46, },
+ { 1, 1, 0, 7, 165, 63, },
+ { 0, 1, 1, 2, 38, 46, },
+ { 2, 1, 1, 2, 38, 46, },
+ { 1, 1, 1, 2, 38, 46, },
+ { 0, 1, 1, 2, 46, 46, },
+ { 2, 1, 1, 2, 46, 46, },
+ { 1, 1, 1, 2, 46, 46, },
+ { 0, 1, 1, 2, 54, 46, },
+ { 2, 1, 1, 2, 54, 46, },
+ { 1, 1, 1, 2, 54, 46, },
+ { 0, 1, 1, 2, 62, 46, },
+ { 2, 1, 1, 2, 62, 46, },
+ { 1, 1, 1, 2, 62, 46, },
+ { 0, 1, 1, 2, 102, 46, },
+ { 2, 1, 1, 2, 102, 46, },
+ { 1, 1, 1, 2, 102, 46, },
+ { 0, 1, 1, 2, 110, 46, },
+ { 2, 1, 1, 2, 110, 46, },
+ { 1, 1, 1, 2, 110, 46, },
+ { 0, 1, 1, 2, 118, 46, },
+ { 2, 1, 1, 2, 118, 46, },
+ { 1, 1, 1, 2, 118, 46, },
+ { 0, 1, 1, 2, 126, 46, },
+ { 2, 1, 1, 2, 126, 46, },
+ { 1, 1, 1, 2, 126, 46, },
+ { 0, 1, 1, 2, 134, 46, },
+ { 2, 1, 1, 2, 134, 46, },
+ { 1, 1, 1, 2, 134, 46, },
+ { 0, 1, 1, 2, 151, 46, },
+ { 2, 1, 1, 2, 151, 46, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 46, },
+ { 2, 1, 1, 2, 159, 46, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 46, },
+ { 2, 1, 1, 3, 38, 46, },
+ { 1, 1, 1, 3, 38, 46, },
+ { 0, 1, 1, 3, 46, 46, },
+ { 2, 1, 1, 3, 46, 46, },
+ { 1, 1, 1, 3, 46, 46, },
+ { 0, 1, 1, 3, 54, 46, },
+ { 2, 1, 1, 3, 54, 46, },
+ { 1, 1, 1, 3, 54, 46, },
+ { 0, 1, 1, 3, 62, 46, },
+ { 2, 1, 1, 3, 62, 46, },
+ { 1, 1, 1, 3, 62, 46, },
+ { 0, 1, 1, 3, 102, 46, },
+ { 2, 1, 1, 3, 102, 46, },
+ { 1, 1, 1, 3, 102, 46, },
+ { 0, 1, 1, 3, 110, 46, },
+ { 2, 1, 1, 3, 110, 46, },
+ { 1, 1, 1, 3, 110, 46, },
+ { 0, 1, 1, 3, 118, 46, },
+ { 2, 1, 1, 3, 118, 46, },
+ { 1, 1, 1, 3, 118, 46, },
+ { 0, 1, 1, 3, 126, 46, },
+ { 2, 1, 1, 3, 126, 46, },
+ { 1, 1, 1, 3, 126, 46, },
+ { 0, 1, 1, 3, 134, 46, },
+ { 2, 1, 1, 3, 134, 46, },
+ { 1, 1, 1, 3, 134, 46, },
+ { 0, 1, 1, 3, 151, 46, },
+ { 2, 1, 1, 3, 151, 46, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 46, },
+ { 2, 1, 1, 3, 159, 46, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 1, 6, 38, 46, },
+ { 2, 1, 1, 6, 38, 46, },
+ { 1, 1, 1, 6, 38, 46, },
+ { 0, 1, 1, 6, 46, 46, },
+ { 2, 1, 1, 6, 46, 46, },
+ { 1, 1, 1, 6, 46, 46, },
+ { 0, 1, 1, 6, 54, 46, },
+ { 2, 1, 1, 6, 54, 46, },
+ { 1, 1, 1, 6, 54, 46, },
+ { 0, 1, 1, 6, 62, 46, },
+ { 2, 1, 1, 6, 62, 46, },
+ { 1, 1, 1, 6, 62, 46, },
+ { 0, 1, 1, 6, 102, 46, },
+ { 2, 1, 1, 6, 102, 46, },
+ { 1, 1, 1, 6, 102, 46, },
+ { 0, 1, 1, 6, 110, 46, },
+ { 2, 1, 1, 6, 110, 46, },
+ { 1, 1, 1, 6, 110, 46, },
+ { 0, 1, 1, 6, 118, 46, },
+ { 2, 1, 1, 6, 118, 46, },
+ { 1, 1, 1, 6, 118, 46, },
+ { 0, 1, 1, 6, 126, 46, },
+ { 2, 1, 1, 6, 126, 46, },
+ { 1, 1, 1, 6, 126, 46, },
+ { 0, 1, 1, 6, 134, 46, },
+ { 2, 1, 1, 6, 134, 46, },
+ { 1, 1, 1, 6, 134, 46, },
+ { 0, 1, 1, 6, 151, 46, },
+ { 2, 1, 1, 6, 151, 46, },
+ { 1, 1, 1, 6, 151, 63, },
+ { 0, 1, 1, 6, 159, 46, },
+ { 2, 1, 1, 6, 159, 46, },
+ { 1, 1, 1, 6, 159, 63, },
+ { 0, 1, 1, 7, 38, 46, },
+ { 2, 1, 1, 7, 38, 46, },
+ { 1, 1, 1, 7, 38, 46, },
+ { 0, 1, 1, 7, 46, 46, },
+ { 2, 1, 1, 7, 46, 46, },
+ { 1, 1, 1, 7, 46, 46, },
+ { 0, 1, 1, 7, 54, 46, },
+ { 2, 1, 1, 7, 54, 46, },
+ { 1, 1, 1, 7, 54, 46, },
+ { 0, 1, 1, 7, 62, 46, },
+ { 2, 1, 1, 7, 62, 46, },
+ { 1, 1, 1, 7, 62, 46, },
+ { 0, 1, 1, 7, 102, 46, },
+ { 2, 1, 1, 7, 102, 46, },
+ { 1, 1, 1, 7, 102, 46, },
+ { 0, 1, 1, 7, 110, 46, },
+ { 2, 1, 1, 7, 110, 46, },
+ { 1, 1, 1, 7, 110, 46, },
+ { 0, 1, 1, 7, 118, 46, },
+ { 2, 1, 1, 7, 118, 46, },
+ { 1, 1, 1, 7, 118, 46, },
+ { 0, 1, 1, 7, 126, 46, },
+ { 2, 1, 1, 7, 126, 46, },
+ { 1, 1, 1, 7, 126, 46, },
+ { 0, 1, 1, 7, 134, 46, },
+ { 2, 1, 1, 7, 134, 46, },
+ { 1, 1, 1, 7, 134, 46, },
+ { 0, 1, 1, 7, 151, 46, },
+ { 2, 1, 1, 7, 151, 46, },
+ { 1, 1, 1, 7, 151, 63, },
+ { 0, 1, 1, 7, 159, 46, },
+ { 2, 1, 1, 7, 159, 46, },
+ { 1, 1, 1, 7, 159, 63, },
+ { 0, 1, 2, 4, 42, 46, },
+ { 2, 1, 2, 4, 42, 46, },
+ { 1, 1, 2, 4, 42, 46, },
+ { 0, 1, 2, 4, 58, 46, },
+ { 2, 1, 2, 4, 58, 46, },
+ { 1, 1, 2, 4, 58, 46, },
+ { 0, 1, 2, 4, 106, 46, },
+ { 2, 1, 2, 4, 106, 46, },
+ { 1, 1, 2, 4, 106, 46, },
+ { 0, 1, 2, 4, 122, 46, },
+ { 2, 1, 2, 4, 122, 46, },
+ { 1, 1, 2, 4, 122, 46, },
+ { 0, 1, 2, 4, 155, 46, },
+ { 2, 1, 2, 4, 155, 46, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 46, },
+ { 2, 1, 2, 5, 42, 46, },
+ { 1, 1, 2, 5, 42, 46, },
+ { 0, 1, 2, 5, 58, 46, },
+ { 2, 1, 2, 5, 58, 46, },
+ { 1, 1, 2, 5, 58, 46, },
+ { 0, 1, 2, 5, 106, 46, },
+ { 2, 1, 2, 5, 106, 46, },
+ { 1, 1, 2, 5, 106, 46, },
+ { 0, 1, 2, 5, 122, 46, },
+ { 2, 1, 2, 5, 122, 46, },
+ { 1, 1, 2, 5, 122, 46, },
+ { 0, 1, 2, 5, 155, 46, },
+ { 2, 1, 2, 5, 155, 46, },
+ { 1, 1, 2, 5, 155, 63, },
+ { 0, 1, 2, 8, 42, 46, },
+ { 2, 1, 2, 8, 42, 46, },
+ { 1, 1, 2, 8, 42, 46, },
+ { 0, 1, 2, 8, 58, 46, },
+ { 2, 1, 2, 8, 58, 46, },
+ { 1, 1, 2, 8, 58, 46, },
+ { 0, 1, 2, 8, 106, 46, },
+ { 2, 1, 2, 8, 106, 46, },
+ { 1, 1, 2, 8, 106, 46, },
+ { 0, 1, 2, 8, 122, 46, },
+ { 2, 1, 2, 8, 122, 46, },
+ { 1, 1, 2, 8, 122, 46, },
+ { 0, 1, 2, 8, 155, 46, },
+ { 2, 1, 2, 8, 155, 46, },
+ { 1, 1, 2, 8, 155, 63, },
+ { 0, 1, 2, 9, 42, 46, },
+ { 2, 1, 2, 9, 42, 46, },
+ { 1, 1, 2, 9, 42, 46, },
+ { 0, 1, 2, 9, 58, 46, },
+ { 2, 1, 2, 9, 58, 46, },
+ { 1, 1, 2, 9, 58, 46, },
+ { 0, 1, 2, 9, 106, 46, },
+ { 2, 1, 2, 9, 106, 46, },
+ { 1, 1, 2, 9, 106, 46, },
+ { 0, 1, 2, 9, 122, 46, },
+ { 2, 1, 2, 9, 122, 46, },
+ { 1, 1, 2, 9, 122, 46, },
+ { 0, 1, 2, 9, 155, 46, },
+ { 2, 1, 2, 9, 155, 46, },
+ { 1, 1, 2, 9, 155, 63, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type8);
+
+static const u8
+rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11,
+ 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19},
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10,
+ 11, 12, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 14, 15, 15, 15, 15, 16, 16, 17, 18},
+ {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11,
+ 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11,
+ 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 24, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17, 17},
+ {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11,
+ 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11,
+ 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20},
+};
+
+static const u8
+rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 23, 24},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 25},
+};
+
+static const u8
+rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 11, 11, 11, 11, 12, 12, 13, 13, 14},
+ {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 11,
+ 12, 13, 14, 14, 15, 16, 16, 17, 18, 19, 19, 20, 21},
+ {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19},
+};
+
+static const u8
+rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 12, 13,
+ 14, 15, 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25, 25, 25},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25},
+};
+
+static const u8 rtw8814a_pwrtrk_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
+ 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
+ 6, 6, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7,
+ 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type0_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type0_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type0_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type0_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type0_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type0_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type0_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type0_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type0_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type0_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type0_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type0_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type0_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type0_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type0_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type0_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12, 12,
+ 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
+ {0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10, 10,
+ 11, 11, 12, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12,
+ 12, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23, 23, 23},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12,
+ 12, 13, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+ {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 20, 20, 21, 21, 21, 21, 21},
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11, 12, 13,
+ 13, 13, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21, 21},
+ {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20},
+ {0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 11,
+ 12, 13, 13, 13, 13, 14, 15, 15, 15, 15, 15, 15, 15},
+ {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12,
+ 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 11, 11, 12,
+ 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8
+rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14,
+ 15, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 20},
+ {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14,
+ 15, 16, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15,
+ 15, 16, 17, 18, 19, 19, 20, 20, 20, 20, 20, 20, 20},
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9,
+ 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8,
+ 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10,
+ 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9,
+ 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type2_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type2_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type2_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type2_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type2_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type2_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type2_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type2_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type2_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type2_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type2_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type2_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type2_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type2_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type2_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type2_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 3, 3, 3, 4, 6, 6, 7, 7, 8, 9, 10, 11, 12, 13, 13, 14,
+ 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 4, 5, 6, 7, 7, 8, 7, 8, 10, 11, 12, 12, 13, 13, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 5, 5, 6, 7, 8, 9, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 9, 10, 10, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 0, 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 11, 12,
+ 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 0, 1, 1, 2, 2, 3, 5, 7, 8, 9, 10, 11, 12, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 3, 3, 4, 6, 7, 7, 8, 9, 9, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 11, 11,
+ 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13},
+ {0, 0, 1, 2, 3, 3, 5, 5, 6, 8, 8, 9, 10, 11, 13, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 4, 5, 7, 8, 9, 9, 10, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 9, 10, 10, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 1, 2, 2, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 0, 2, 3, 4, 5, 6, 8, 8, 9, 9, 11, 12, 13, 13, 13,
+ 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 0, 1, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12},
+ {0, 2, 3, 4, 5, 7, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 17,
+ 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18},
+ {0, 1, 2, 3, 3, 4, 6, 7, 8, 8, 10, 11, 11, 12, 13, 13, 13,
+ 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 1, 3, 3, 3, 5, 5, 6, 6, 8, 8, 9, 10, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 11, 12, 13, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 3, 3, 4, 5, 5, 6, 7, 7, 8, 10, 10, 11, 11, 11,
+ 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 10,
+ 10, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10,
+ 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 9, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 111, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9,
+ 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 11, 11, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 2, 3, 4, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8,
+ 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9,
+ 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type5_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type5_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type5_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type5_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type5_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type5_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type5_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type5_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type5_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type5_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type5_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type5_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type5_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type5_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type5_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type5_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+};
+
+static const u8
+rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7,
+ 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type7_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type7_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type7_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type7_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type7_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type7_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type7_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type7_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type7_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type7_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type7_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type7_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type7_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type7_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type7_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type7_2g_cck_a_p,
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10, 10,
+ 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 3, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12,
+ 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+ 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 3, 4, 4, 5, 6, 7, 7, 8, 10, 11, 12, 13, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15},
+ {0, 0, 1, 2, 4, 5, 5, 6, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+ 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 10, 11, 11, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 10, 10, 11, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12},
+ {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 10, 11, 11,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+ {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12,
+ 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13},
+};
+
+static const u8
+rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = {
+ {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+ {0, 0, 1, 2, 3, 3, 4, 4, 6, 7, 7, 9, 10, 11, 12, 13, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14},
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gd_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gd_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gc_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gc_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10,
+ 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gb_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2gb_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2ga_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9,
+ 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2ga_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8,
+ 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10,
+ 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10,
+ 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10,
+ 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10,
+ 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9,
+ 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12
+};
+
+static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10,
+ 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13
+};
+
+const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl = {
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_3],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_1],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_2],
+ .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_3],
+ .pwrtrk_2gd_n = rtw8814a_pwrtrk_type8_2gd_n,
+ .pwrtrk_2gd_p = rtw8814a_pwrtrk_type8_2gd_p,
+ .pwrtrk_2gc_n = rtw8814a_pwrtrk_type8_2gc_n,
+ .pwrtrk_2gc_p = rtw8814a_pwrtrk_type8_2gc_p,
+ .pwrtrk_2gb_n = rtw8814a_pwrtrk_type8_2gb_n,
+ .pwrtrk_2gb_p = rtw8814a_pwrtrk_type8_2gb_p,
+ .pwrtrk_2ga_n = rtw8814a_pwrtrk_type8_2ga_n,
+ .pwrtrk_2ga_p = rtw8814a_pwrtrk_type8_2ga_p,
+ .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type8_2g_cck_d_n,
+ .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type8_2g_cck_d_p,
+ .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type8_2g_cck_c_n,
+ .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type8_2g_cck_c_p,
+ .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type8_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type8_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type8_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type8_2g_cck_a_p,
+};
+
+static const struct rtw_pwr_seq_cmd init_power_on_8814a[] = {
+ {0x10c2,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8814a[] = {
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0071,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8814a[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x00F0,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0x30, 0x20},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8814a[] = {
+ {0x0c00,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0x0e00,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0x1002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_US},
+ {0x1002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x28},
+ {0x0008,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0x02, 0},
+ {0x0066,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0042,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x004e,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8814a[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0080,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x01},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xff},
+ {0x0047,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0015,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0008,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0021,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0076,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0091,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xA0, 0xA0},
+ {0x0070,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[] = {
+ init_power_on_8814a,
+ trans_carddis_to_cardemu_8814a,
+ trans_cardemu_to_act_8814a,
+ NULL
+};
+
+const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[] = {
+ trans_act_to_cardemu_8814a,
+ trans_cardemu_to_carddis_8814a,
+ NULL
+};
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h
new file mode 100644
index 000000000000..69fb87e36c48
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#ifndef __RTW8814A_TABLE_H__
+#define __RTW8814A_TABLE_H__
+
+extern const struct rtw_table rtw8814a_mac_tbl;
+extern const struct rtw_table rtw8814a_agc_tbl;
+extern const struct rtw_table rtw8814a_bb_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type0_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type3_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type4_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type5_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type7_tbl;
+extern const struct rtw_table rtw8814a_bb_pg_type8_tbl;
+extern const struct rtw_table rtw8814a_rf_a_tbl;
+extern const struct rtw_table rtw8814a_rf_b_tbl;
+extern const struct rtw_table rtw8814a_rf_c_tbl;
+extern const struct rtw_table rtw8814a_rf_d_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type1_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type2_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type3_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type5_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type7_tbl;
+extern const struct rtw_table rtw8814a_txpwr_lmt_type8_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl;
+extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl;
+extern const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[];
+extern const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
new file mode 100644
index 000000000000..54d2e20a7764
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "pci.h"
+#include "rtw8814a.h"
+
+static const struct pci_device_id rtw_8814ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8813),
+ .driver_data = (kernel_ulong_t)&rtw8814a_hw_spec
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table);
+
+static struct pci_driver rtw_8814ae_driver = {
+ .name = "rtw_8814ae",
+ .id_table = rtw_8814ae_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+ .driver.pm = &rtw_pm_ops,
+ .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8814ae_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814ae driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
new file mode 100644
index 000000000000..afe045fb84de
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2025 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "main.h"
+#include "rtw8814a.h"
+#include "usb.h"
+
+static const struct usb_device_id rtw_8814au_id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8813, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9054, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1817, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1852, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1853, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0026, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0106, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa834, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa833, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table);
+
+static struct usb_driver rtw_8814au_driver = {
+ .name = "rtw_8814au",
+ .id_table = rtw_8814au_id_table,
+ .probe = rtw_usb_probe,
+ .disconnect = rtw_usb_disconnect,
+};
+module_usb_driver(rtw_8814au_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814au driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index eb7e34c545d0..0ade7f11cbd2 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -680,11 +680,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
}
static void
-rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
+ u8 rs, u32 *phy_pwr_idx)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
- static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
@@ -692,12 +692,12 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
- phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3 || rate == DESC_RATEVHT1SS_MCS9) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
- phy_pwr_idx);
- phy_pwr_idx = 0;
+ *phy_pwr_idx);
+ *phy_pwr_idx = 0;
}
}
}
@@ -705,14 +705,16 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u32 phy_pwr_idx = 0;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
if (rs == RTW_RATE_SECTION_HT_2S ||
rs == RTW_RATE_SECTION_VHT_2S)
continue;
- rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs);
+ rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs,
+ &phy_pwr_idx);
}
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 7f03903ddf4b..b4934da88e33 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -935,11 +935,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
}
static void
-rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path,
+ u8 rs, u32 *phy_pwr_idx)
{
struct rtw_hal *hal = &rtwdev->hal;
static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
- static u32 phy_pwr_idx;
u8 rate, rate_idx, pwr_index, shift;
int j;
@@ -947,12 +947,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
rate = rtw_rate_section[rs][j];
pwr_index = hal->tx_pwr_tbl[path][rate];
shift = rate & 0x3;
- phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ *phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
if (shift == 0x3) {
rate_idx = rate & 0xfc;
rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
- phy_pwr_idx);
- phy_pwr_idx = 0;
+ *phy_pwr_idx);
+ *phy_pwr_idx = 0;
}
}
}
@@ -960,11 +960,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
{
struct rtw_hal *hal = &rtwdev->hal;
+ u32 phy_pwr_idx = 0;
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
- rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++)
+ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs,
+ &phy_pwr_idx);
}
}
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
index 8883300fc6ad..572d1f31832e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c
@@ -73,6 +73,10 @@ static const struct usb_device_id rtw_8822bu_id_table[] = {
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x010a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index ec362a817f5f..5e53e0db177e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -2746,7 +2746,7 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
s8 diff_idx[4];
rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm);
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
for (j = 0; j < rtw_rate_size[rs]; j++) {
rate = rtw_rate_section[rs][j];
pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate];
diff --git a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
index 71e61b9c0bec..0fa943271fb6 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c
@@ -1637,7 +1637,7 @@ void rtw88xxa_set_tx_power_index(struct rtw_dev *rtwdev)
int rs, path;
for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) {
if (hal->rf_path_num == 1 &&
(rs == RTW_RATE_SECTION_HT_2S ||
rs == RTW_RATE_SECTION_VHT_2S))
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
index 90fc8a5fa89e..8b0afaaffaa0 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.c
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -73,6 +73,12 @@ static void rtw_rx_phy_stat(struct rtw_dev *rtwdev,
rate_ss_evm = 2;
evm_id = RTW_EVM_2SS_A;
break;
+ case DESC_RATEMCS16...DESC_RATEMCS23:
+ case DESC_RATEVHT3SS_MCS0...DESC_RATEVHT3SS_MCS9:
+ rate_ss = 3;
+ rate_ss_evm = 3;
+ evm_id = RTW_EVM_3SS_A;
+ break;
default:
rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate);
return;
diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c
index c472f1502b82..50b9c2412bb1 100644
--- a/drivers/net/wireless/realtek/rtw88/sar.c
+++ b/drivers/net/wireless/realtek/rtw88/sar.c
@@ -97,7 +97,7 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev,
power, BIT(RTW_COMMON_SAR_FCT));
for (j = 0; j < RTW_RF_PATH_MAX; j++) {
- for (k = 0; k < RTW_RATE_SECTION_MAX; k++) {
+ for (k = 0; k < RTW_RATE_SECTION_NUM; k++) {
arg = (struct rtw_sar_arg){
.sar_band = idx,
.path = j,
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index e024061bdbf7..6209a49312f1 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -1147,7 +1147,7 @@ static void rtw_sdio_declaim(struct rtw_dev *rtwdev,
sdio_release_host(sdio_func);
}
-static struct rtw_hci_ops rtw_sdio_ops = {
+static const struct rtw_hci_ops rtw_sdio_ops = {
.tx_write = rtw_sdio_tx_write,
.tx_kick_off = rtw_sdio_tx_kick_off,
.setup = rtw_sdio_setup,
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index c4908db4ff0e..c8092fa0d9f1 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -881,7 +881,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable)
}
}
-static struct rtw_hci_ops rtw_usb_ops = {
+static const struct rtw_hci_ops rtw_usb_ops = {
.tx_write = rtw_usb_tx_write,
.tx_kick_off = rtw_usb_tx_kick_off,
.setup = rtw_usb_setup,
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
index e222d3c01a77..66819f694405 100644
--- a/drivers/net/wireless/realtek/rtw88/util.c
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss)
*nss = 4;
*mcs = rate - DESC_RATEVHT4SS_MCS0;
} else if (rate >= DESC_RATEMCS0 &&
- rate <= DESC_RATEMCS15) {
+ rate <= DESC_RATEMCS31) {
+ *nss = 0;
*mcs = rate - DESC_RATEMCS0;
}
}
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index b1c86cdd9c0e..205d7ecca7d7 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -123,7 +123,7 @@ config RTW89_DEBUGMSG
config RTW89_DEBUGFS
bool "Realtek rtw89 debugfs support"
- depends on RTW89_CORE
+ depends on RTW89_CORE && CFG80211_DEBUGFS
select RTW89_DEBUG
help
Enable debugfs support
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 8fa1e6c1ce13..eca3d767ff60 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -476,6 +476,12 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
case WLAN_CIPHER_SUITE_WEP104:
hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (!chip->hw_tkip_crypto)
+ return -EOPNOTSUPP;
+ hw_key_type = RTW89_SEC_KEY_TYPE_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
case WLAN_CIPHER_SUITE_CCMP:
hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128;
if (!chip->hw_mgmt_tx_encrypt)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 4df4e04c3e67..f60e93870b09 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -8,6 +8,7 @@
#include "fw.h"
#include "mac.h"
#include "ps.h"
+#include "sar.h"
#include "util.h"
static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev,
@@ -155,7 +156,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
int ret;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) {
chan = rtw89_chan_get(rtwdev, idx);
@@ -310,7 +311,7 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode mode;
u8 role_index;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (unlikely(link_index >= __RTW89_MLD_MAX_LINK_NUM)) {
WARN(1, "link index %u is invalid (max link inst num: %d)\n",
@@ -366,7 +367,7 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev)
u8 pos = 0;
int i, j;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++)
mgnt->active_roles[i] = NULL;
@@ -427,7 +428,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
struct rtw89_chan chan;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX);
@@ -2390,7 +2391,7 @@ static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev)
rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL);
}
-void rtw89_chanctx_work(struct work_struct *work)
+void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
chanctx_work.work);
@@ -2401,12 +2402,10 @@ void rtw89_chanctx_work(struct work_struct *work)
int ret;
int i;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
- if (hal->entity_pause) {
- mutex_unlock(&rtwdev->mutex);
+ if (hal->entity_pause)
return;
- }
for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) {
if (test_and_clear_bit(i, hal->changes))
@@ -2445,8 +2444,6 @@ void rtw89_chanctx_work(struct work_struct *work)
default:
break;
}
-
- mutex_unlock(&rtwdev->mutex);
}
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
@@ -2477,8 +2474,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"queue chanctx work for mode %d with delay %d us\n",
mode, delay);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work,
- usecs_to_jiffies(delay));
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->chanctx_work,
+ usecs_to_jiffies(delay));
}
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev)
@@ -2491,7 +2488,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (hal->entity_pause)
return;
@@ -2512,7 +2509,7 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
enum rtw89_entity_mode mode;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (hal->entity_pause)
return;
@@ -2555,7 +2552,7 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
enum rtw89_entity_mode mode;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (unlikely(!hal->entity_pause)) {
rtw89_chanctx_proceed_cb(rtwdev, cb_parm);
@@ -2677,6 +2674,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
struct rtw89_entity_weight w = {};
+ int ret;
rtwvif_link->chanctx_idx = cfg->idx;
rtwvif_link->chanctx_assigned = true;
@@ -2696,7 +2694,13 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0);
out:
- return rtw89_set_channel(rtwdev);
+ ret = rtw89_set_channel(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_tas_reset(rtwdev, true);
+
+ return 0;
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 092a6f676894..e6391f6f2aa7 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -99,7 +99,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
const struct cfg80211_chan_def *chandef);
void rtw89_entity_init(struct rtw89_dev *rtwdev);
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev);
-void rtw89_chanctx_work(struct work_struct *work);
+void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev);
void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_changes change);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index 68316d44b204..5ccf0cbaed2f 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -10,7 +10,7 @@
#include "ps.h"
#include "reg.h"
-#define RTW89_COEX_VERSION 0x07000113
+#define RTW89_COEX_VERSION 0x07000413
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
#define BTC_E2G_LIMIT_DEF 80
@@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
[CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
[CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
- [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
+ [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@@ -132,6 +132,14 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 122, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800,
+ .max_role_num = 6,
+ },
{RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0),
.fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
@@ -1372,11 +1380,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtcrpt == 8) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
- break;
} else if (ver->fcxbtcrpt == 7) {
pfinfo = &pfwinfo->rpt_ctrl.finfo.v7;
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7);
- break;
} else {
goto err;
}
@@ -1534,6 +1540,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtafh == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2);
+ } else if (ver->fcxbtafh == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v7);
} else {
goto err;
}
@@ -1602,7 +1611,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v4.wl_fw_info.fw_ver);
dm->wl_fw_cx_offload = !!le32_to_cpu(prpt->v4.wl_fw_info.cx_offload);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v4.gnt_val[i],
sizeof(dm->gnt.band[i]));
@@ -1634,7 +1643,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v5.rpt_info.fw_ver);
dm->wl_fw_cx_offload = 0;
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v5.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1661,7 +1670,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw = le32_to_cpu(prpt->v105.rpt_info.fw_ver);
dm->wl_fw_cx_offload = 0;
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v105.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1687,7 +1696,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver);
wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -1719,7 +1728,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver);
wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver);
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++)
memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0],
sizeof(dm->gnt.band[i]));
@@ -2706,7 +2715,7 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st
if (phy_map > BTC_PHY_ALL)
return;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (!(phy_map & BIT(i)))
continue;
@@ -2755,7 +2764,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
if (phy_map > BTC_PHY_ALL)
return;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (!(phy_map & BIT(i)))
continue;
@@ -2955,7 +2964,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
if (ver->fwlrole == 0) {
link_mode = wl->role_info.link_mode;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (wl->dbcc_info.real_band[i] == RTW89_BAND_2G)
dbcc_2g_phy = i;
}
@@ -4240,7 +4249,7 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
case BTC_ANT_W2G:
rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL);
if (rtwdev->dbcc_en) {
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
@@ -4583,23 +4592,16 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
- struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_dm *dm = &btc->dm;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ dm->slot_dur[CXST_W1] = 20;
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP);
@@ -4609,15 +4611,10 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */
case BTC_WLINKING: /* wl-connecting + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP);
break;
case BTC_WIDLE: /* wl-idle + bt-A2DP */
_set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP);
@@ -4645,7 +4642,10 @@ static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */
- _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DPSINK);
+ else
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
break;
case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */
_set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK);
@@ -4693,27 +4693,20 @@ static void _action_bt_pan(struct rtw89_dev *rtwdev)
static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
- struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_dm *dm = &btc->dm;
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ dm->slot_dur[CXST_W1] = 20;
+ dm->slot_dur[CXST_B1] = BTC_B1_MAX;
+
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */
case BTC_WIDLE: /* wl-idle + bt-A2DP */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev,
- BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID);
- }
+ _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
- _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID);
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_HID);
break;
case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */
@@ -4721,15 +4714,10 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
break;
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */
case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */
- if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
- dm->slot_dur[CXST_W1] = 40;
- dm->slot_dur[CXST_B1] = 200;
- _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
- BTC_ACT_BT_A2DP_HID);
- } else {
- _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1,
- BTC_ACT_BT_A2DP_HID);
- }
+ if (btc->cx.wl.rfk_info.con_rfk)
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP_HID);
+ else
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
break;
}
}
@@ -4905,9 +4893,9 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
if (rtwdev->dbcc_en) {
if (ver->fwlrole == 0) {
- wl_rinfo.dbcc_2g_phy = RTW89_PHY_MAX;
+ wl_rinfo.dbcc_2g_phy = RTW89_PHY_NUM;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
if (wl_dinfo->real_band[i] == RTW89_BAND_2G)
wl_rinfo.dbcc_2g_phy = i;
}
@@ -5408,7 +5396,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
+ if (btc->cx.state_map != BTC_WLINKING &&
+ RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_action_wl_25g_mcc(rtwdev);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
} else if (rtwdev->dbcc_en) {
@@ -5798,7 +5787,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
/* check dbcc role */
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -5948,7 +5937,7 @@ static void _update_wl_info_v1(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -6098,7 +6087,7 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev)
phy = wl_linfo[i].phy;
- if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) {
wl_dinfo->role[phy] = wl_linfo[i].role;
wl_dinfo->op_band[phy] = wl_linfo[i].band;
_update_dbcc_band(rtwdev, phy);
@@ -6389,7 +6378,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
struct rtw89_btc_wl_active_role_v7 *act_role = NULL;
- u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc;
+ u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_NUM, phy_dbcc;
bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false;
u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
@@ -6400,7 +6389,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
for (i = 0; i < RTW89_PORT_NUM; i++) {
- if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX)
+ if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_NUM)
continue;
act_role = &wl_rinfo->active_role[i];
@@ -6494,7 +6483,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid)
mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy);
/* correct 2G-located PHY band for gnt ctrl */
- if (dbcc_2g_phy < RTW89_PHY_MAX)
+ if (dbcc_2g_phy < RTW89_PHY_NUM)
wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G;
} else if (b2g && b5g && cnt == 2) {
mode = BTC_WLINK_25G_MCC;
@@ -6720,7 +6709,7 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id
_fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
}
-void rtw89_coex_act1_work(struct work_struct *work)
+void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_act1_work.work);
@@ -6729,7 +6718,8 @@ void rtw89_coex_act1_work(struct work_struct *work)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
if (wl->status.map._4way)
@@ -6738,10 +6728,9 @@ void rtw89_coex_act1_work(struct work_struct *work)
wl->status.map.connecting = false;
_run_coex(rtwdev, BTC_RSN_ACT1_WORK);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_coex_bt_devinfo_work(struct work_struct *work)
+void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_bt_devinfo_work.work);
@@ -6749,15 +6738,15 @@ void rtw89_coex_bt_devinfo_work(struct work_struct *work)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
a2dp->play_latency = 0;
_run_coex(rtwdev, BTC_RSN_BT_DEVINFO_WORK);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_coex_rfk_chk_work(struct work_struct *work)
+void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
coex_rfk_chk_work.work);
@@ -6766,7 +6755,8 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
dm->cnt_notify[BTC_NCNT_TIMER]++;
if (wl->rfk_info.state != BTC_WRFK_STOP) {
@@ -6778,7 +6768,6 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work)
_write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
_run_coex(rtwdev, BTC_RSN_RFK_CHK_WORK);
}
- mutex_unlock(&rtwdev->mutex);
}
static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
@@ -6882,7 +6871,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
dm->run_reason = reason;
_update_dm_step(rtwdev, reason);
@@ -7002,7 +6991,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
goto exit;
}
- if (wl->status.val & btc_scanning_map.val) {
+ if (wl->status.val & btc_scanning_map.val && !wl->rfk_info.con_rfk) {
_action_wl_scan(rtwdev);
bt->scan_rx_low_pri = true;
goto exit;
@@ -7187,7 +7176,7 @@ void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
- if (phy_idx >= RTW89_PHY_MAX)
+ if (phy_idx >= RTW89_PHY_NUM)
return;
btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++;
@@ -7223,6 +7212,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx)
_fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
}
+ btc->dm.tdma_instant_excute = 1;
+
_run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH);
}
@@ -7235,7 +7226,7 @@ void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
"[BTC], %s(): phy_idx=%d, band=%d\n",
__func__, phy_idx, band);
- if (phy_idx >= RTW89_PHY_MAX)
+ if (phy_idx >= RTW89_PHY_NUM)
return;
btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++;
@@ -7284,7 +7275,7 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
"[BTC], %s(): EAPOL_End cnt=%d\n",
__func__, cnt);
wl->status.map._4way = false;
- cancel_delayed_work(&rtwdev->coex_act1_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work);
break;
case PACKET_ARP:
cnt = ++cx->cnt_wl[BTC_WCNT_ARP];
@@ -7303,56 +7294,56 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
}
if (delay_work) {
- cancel_delayed_work(&rtwdev->coex_act1_work);
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_act1_work, delay);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_act1_work, delay);
}
btc->dm.cnt_notify[BTC_NCNT_SPECIAL_PACKET]++;
_run_coex(rtwdev, BTC_RSN_NTFY_SPECIFIC_PACKET);
}
-void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.eapol_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.arp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ARP);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.dhcp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_DHCP);
- mutex_unlock(&rtwdev->mutex);
}
-void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work)
+void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
btc.icmp_notify_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ICMP);
- mutex_unlock(&rtwdev->mutex);
}
static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi)
@@ -7531,9 +7522,9 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
a2dp->vendor_id = 0;
a2dp->flush_time = 0;
a2dp->play_latency = 1;
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_bt_devinfo_work,
- RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_bt_devinfo_work,
+ RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
}
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
@@ -7671,7 +7662,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
else
wl->status.map.connecting = 0;
- if (state == BTC_ROLE_MSTS_STA_DIS_CONN)
+ if (state == BTC_ROLE_MSTS_STA_DIS_CONN ||
+ state == BTC_ROLE_MSTS_STA_CONN_END)
wl->status.map._4way = false;
_run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO);
@@ -7781,7 +7773,7 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path,
wl->rfk_info.state = BTC_WRFK_STOP;
_write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
- cancel_delayed_work(&rtwdev->coex_rfk_chk_work);
+ wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_rfk_chk_work);
break;
default:
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -7795,9 +7787,9 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path,
_run_coex(rtwdev, BTC_RSN_NTFY_WL_RFK);
if (wl->rfk_info.state == BTC_WRFK_START)
- ieee80211_queue_delayed_work(rtwdev->hw,
- &rtwdev->coex_rfk_chk_work,
- RTW89_COEX_RFK_CHK_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy,
+ &rtwdev->coex_rfk_chk_work,
+ RTW89_COEX_RFK_CHK_WORK_PERIOD);
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -7815,6 +7807,8 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
bool allow;
int ret;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
band = FIELD_GET(BTC_RFK_BAND_MAP, phy_map);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -8115,6 +8109,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return;
func = rtw89_btc_c2h_get_index_by_ver(rtwdev, func);
+ pfwinfo->cnt_c2h++;
switch (func) {
case BTF_EVNT_BUF_OVERFLOW:
@@ -8156,7 +8151,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
#define BTC_CX_FW_OFFLOAD 0
-static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_cx_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -8168,40 +8163,43 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
u8 cv, rfe, iso, ant_num, ant_single_pos;
+ char *p = buf, *end = buf + bufsz;
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
- return;
+ return 0;
dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++;
- seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
- chip->chip_id);
+ p += scnprintf(p, end - p,
+ "========== [BTC COEX INFO (%d)] ==========\n",
+ chip->chip_id);
ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION);
ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION);
ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION);
id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION);
- seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
- "[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
+ p += scnprintf(p, end - p, " %-15s : Coex:%d.%d.%d(branch:%d), ",
+ "[coex_version]", ver_main, ver_sub, ver_hotfix,
+ id_branch);
ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex);
ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw_coex);
ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw_coex);
id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw_coex);
- seq_printf(m, "WL_FW_coex:%d.%d.%d(branch:%d)",
- ver_main, ver_sub, ver_hotfix, id_branch);
+ p += scnprintf(p, end - p, "WL_FW_coex:%d.%d.%d(branch:%d)",
+ ver_main, ver_sub, ver_hotfix, id_branch);
ver_main = FIELD_GET(GENMASK(31, 24), chip->wlcx_desired);
ver_sub = FIELD_GET(GENMASK(23, 16), chip->wlcx_desired);
ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
- seq_printf(m, "(%s, desired:%d.%d.%d), ",
- (wl->ver_info.fw_coex >= chip->wlcx_desired ?
- "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
+ p += scnprintf(p, end - p, "(%s, desired:%d.%d.%d), ",
+ (wl->ver_info.fw_coex >= chip->wlcx_desired ?
+ "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix);
- seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
- bt->ver_info.fw_coex,
- (bt->ver_info.fw_coex >= chip->btcx_desired ?
- "Match" : "Mismatch"), chip->btcx_desired);
+ p += scnprintf(p, end - p, "BT_FW_coex:%d(%s, desired:%d)\n",
+ bt->ver_info.fw_coex,
+ (bt->ver_info.fw_coex >= chip->btcx_desired ?
+ "Match" : "Mismatch"), chip->btcx_desired);
if (bt->enable.now && bt->ver_info.fw == 0)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
@@ -8212,10 +8210,11 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw);
ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw);
id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw);
- seq_printf(m, " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n",
- "[sub_module]",
- ver_main, ver_sub, ver_hotfix, id_branch,
- bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+ p += scnprintf(p, end - p,
+ " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n",
+ "[sub_module]",
+ ver_main, ver_sub, ver_hotfix, id_branch,
+ bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
if (ver->fcxinit == 7) {
cv = md->md_v7.kt_ver;
@@ -8231,36 +8230,41 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ant_single_pos = md->md.ant.single_pos;
}
- seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
- "[hw_info]", cv, rfe, iso, ant_num,
- ant_num > 1 ? "" :
- ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
+ p += scnprintf(p, end - p,
+ " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
+ "[hw_info]", cv, rfe, iso, ant_num,
+ ant_num > 1 ? "" :
+ ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
- seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
- btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
- hal->rx_nss);
+ p += scnprintf(p, end - p,
+ "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
+ btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
+ hal->rx_nss);
+
+ return p - buf;
}
-static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_wl_role_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_link_info *plink = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
struct rtw89_traffic_stats *t;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (rtwdev->dbcc_en) {
- seq_printf(m,
- " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
- "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
- wl_dinfo->scan_band[RTW89_PHY_0],
- wl_dinfo->real_band[RTW89_PHY_0]);
- seq_printf(m,
- "PHY1_band(op:%d/scan:%d/real:%d)\n",
- wl_dinfo->op_band[RTW89_PHY_1],
- wl_dinfo->scan_band[RTW89_PHY_1],
- wl_dinfo->real_band[RTW89_PHY_1]);
+ p += scnprintf(p, end - p,
+ " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
+ "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
+ wl_dinfo->scan_band[RTW89_PHY_0],
+ wl_dinfo->real_band[RTW89_PHY_0]);
+ p += scnprintf(p, end - p,
+ "PHY1_band(op:%d/scan:%d/real:%d)\n",
+ wl_dinfo->op_band[RTW89_PHY_1],
+ wl_dinfo->scan_band[RTW89_PHY_1],
+ wl_dinfo->real_band[RTW89_PHY_1]);
}
for (i = 0; i < RTW89_PORT_NUM; i++) {
@@ -8272,38 +8276,41 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!plink->active)
continue;
- seq_printf(m,
- " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
- plink->pid, (u32)plink->role, plink->phy,
- (u32)plink->connected, plink->client_cnt - 1,
- (u32)plink->mode, plink->ch, (u32)plink->bw);
+ p += scnprintf(p, end - p,
+ " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
+ plink->pid, (u32)plink->role, plink->phy,
+ (u32)plink->connected, plink->client_cnt - 1,
+ (u32)plink->mode, plink->ch, (u32)plink->bw);
if (plink->connected == MLME_NO_LINK)
continue;
- seq_printf(m,
- ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
- plink->mac_id, plink->tx_time, plink->tx_retry);
+ p += scnprintf(p, end - p,
+ ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
+ plink->mac_id, plink->tx_time, plink->tx_retry);
- seq_printf(m,
- " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
- plink->pid, 110 - plink->stat.rssi,
- plink->stat.rssi, plink->busy,
- plink->dir == RTW89_TFC_UL ? "UL" : "DL");
+ p += scnprintf(p, end - p,
+ " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
+ plink->pid, 110 - plink->stat.rssi,
+ plink->stat.rssi, plink->busy,
+ plink->dir == RTW89_TFC_UL ? "UL" : "DL");
t = &plink->stat.traffic;
- seq_printf(m,
- "tx[rate:%d/busy_level:%d], ",
- (u32)t->tx_rate, t->tx_tfc_lv);
+ p += scnprintf(p, end - p,
+ "tx[rate:%d/busy_level:%d], ",
+ (u32)t->tx_rate, t->tx_tfc_lv);
- seq_printf(m, "rx[rate:%d/busy_level:%d/drop:%d]\n",
- (u32)t->rx_rate,
- t->rx_tfc_lv, plink->rx_rate_drop_cnt);
+ p += scnprintf(p, end - p,
+ "rx[rate:%d/busy_level:%d/drop:%d]\n",
+ (u32)t->rx_rate,
+ t->rx_tfc_lv, plink->rx_rate_drop_cnt);
}
+
+ return p - buf;
}
-static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_wl_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -8314,12 +8321,13 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7;
struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
+ char *p = buf, *end = buf + bufsz;
u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
- return;
+ return 0;
- seq_puts(m, "========== [WL Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [WL Status] ==========\n");
if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
@@ -8332,24 +8340,28 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
else if (ver->fwlrole == 8)
mode = wl_rinfo_v8->link_mode;
else
- return;
+ goto out;
- seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode);
+ p += scnprintf(p, end - p, " %-15s : link_mode:%d, ", "[status]",
+ mode);
- seq_printf(m,
- "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
- wl->status.map.rf_off, wl->status.map.lps,
- wl->status.map.scan ? "Y" : "N",
- wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
+ p += scnprintf(p, end - p,
+ "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off, wl->status.map.lps,
+ wl->status.map.scan ? "Y" : "N",
+ wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
- seq_printf(m,
- "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n",
- wl->status.map.connecting ? "Y" : "N",
- wl->status.map.roaming ? "Y" : "N",
- wl->status.map._4way ? "Y" : "N",
- wl->status.map.init_ok ? "Y" : "N");
+ p += scnprintf(p, end - p,
+ "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n",
+ wl->status.map.connecting ? "Y" : "N",
+ wl->status.map.roaming ? "Y" : "N",
+ wl->status.map._4way ? "Y" : "N",
+ wl->status.map.init_ok ? "Y" : "N");
- _show_wl_role_info(rtwdev, m);
+ p += _show_wl_role_info(rtwdev, p, end - p);
+
+out:
+ return p - buf;
}
enum btc_bt_a2dp_type {
@@ -8358,7 +8370,7 @@ enum btc_bt_a2dp_type {
BTC_A2DP_TWS_RELAY = 2,
};
-static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_bt_profile_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
@@ -8366,50 +8378,55 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc;
struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc;
+ char *p = buf, *end = buf + bufsz;
if (hfp.exist) {
- seq_printf(m, " %-15s : type:%s, sut_pwr:%d, golden-rx:%d",
- "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"),
- bt_linfo->sut_pwr_level[0],
- bt_linfo->golden_rx_shift[0]);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, sut_pwr:%d, golden-rx:%d",
+ "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"),
+ bt_linfo->sut_pwr_level[0],
+ bt_linfo->golden_rx_shift[0]);
}
if (hid.exist) {
- seq_printf(m,
- "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n",
- "[HID]",
- hid.type & BTC_HID_218 ? "2/18," : "",
- hid.type & BTC_HID_418 ? "4/18," : "",
- hid.type & BTC_HID_BLE ? "BLE," : "",
- hid.type & BTC_HID_RCU ? "RCU," : "",
- hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "",
- hid.pair_cnt, bt_linfo->sut_pwr_level[1],
- bt_linfo->golden_rx_shift[1]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n",
+ "[HID]",
+ hid.type & BTC_HID_218 ? "2/18," : "",
+ hid.type & BTC_HID_418 ? "4/18," : "",
+ hid.type & BTC_HID_BLE ? "BLE," : "",
+ hid.type & BTC_HID_RCU ? "RCU," : "",
+ hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "",
+ hid.pair_cnt, bt_linfo->sut_pwr_level[1],
+ bt_linfo->golden_rx_shift[1]);
}
if (a2dp.exist) {
- seq_printf(m,
- " %-15s : type:%s, bit-pool:%d, flush-time:%d, ",
- "[A2DP]",
- a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS",
- a2dp.bitpool, a2dp.flush_time);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, bit-pool:%d, flush-time:%d, ",
+ "[A2DP]",
+ a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS",
+ a2dp.bitpool, a2dp.flush_time);
- seq_printf(m,
- "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n",
- a2dp.vendor_id, a2dp.device_name,
- bt_linfo->sut_pwr_level[2],
- bt_linfo->golden_rx_shift[2]);
+ p += scnprintf(p, end - p,
+ "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n",
+ a2dp.vendor_id, a2dp.device_name,
+ bt_linfo->sut_pwr_level[2],
+ bt_linfo->golden_rx_shift[2]);
}
if (pan.exist) {
- seq_printf(m, " %-15s : sut_pwr:%d, golden-rx:%d\n",
- "[PAN]",
- bt_linfo->sut_pwr_level[3],
- bt_linfo->golden_rx_shift[3]);
+ p += scnprintf(p, end - p,
+ " %-15s : sut_pwr:%d, golden-rx:%d\n",
+ "[PAN]",
+ bt_linfo->sut_pwr_level[3],
+ bt_linfo->golden_rx_shift[3]);
}
+
+ return p - buf;
}
-static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_bt_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -8418,129 +8435,136 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
union rtw89_btc_module_info *md = &btc->mdinfo;
+ char *p = buf, *end = buf + bufsz;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
u8 bt_pos;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
- return;
+ return 0;
if (ver->fcxinit == 7)
bt_pos = md->md_v7.bt_pos;
else
bt_pos = md->md.bt_pos;
- seq_puts(m, "========== [BT Status] ==========\n");
-
- seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
- "[status]", bt->enable.now ? "Y" : "N",
- bt->btg_type ? "Y" : "N",
- (bt->enable.now && (bt->btg_type != bt_pos) ?
- "(efuse-mismatch!!)" : ""),
- (bt_linfo->status.map.connect ? "Y" : "N"));
-
- seq_printf(m, "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n",
- bt->igno_wl ? "Y" : "N",
- bt->mbx_avl ? "Y" : "N", bt->rfk_info.val);
-
- seq_printf(m, " %-15s : profile:%s%s%s%s%s ",
- "[profile]",
- (bt_linfo->profile_cnt.now == 0) ? "None," : "",
- bt_linfo->hfp_desc.exist ? "HFP," : "",
- bt_linfo->hid_desc.exist ? "HID," : "",
- bt_linfo->a2dp_desc.exist ?
- (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "",
- bt_linfo->pan_desc.exist ? "PAN," : "");
-
- seq_printf(m,
- "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n",
- bt_linfo->multi_link.now ? "Y" : "N",
- bt_linfo->slave_role ? "Slave" : "Master",
- bt_linfo->status.map.ble_connect ? "Y" : "N",
- bt_linfo->cqddr ? "Y" : "N",
- bt_linfo->a2dp_desc.active ? "Y" : "N",
- bt_linfo->pan_desc.active ? "Y" : "N");
-
- seq_printf(m,
- " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s",
- "[link]", bt_linfo->rssi - 100,
- bt->rssi_level,
- bt_linfo->tx_3m ? 3 : 2,
- bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
- bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
- bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
-
- seq_printf(m,
- "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ",
- bt_linfo->relink.now ? " ReLink!!" : "",
- afh[0], afh[1], afh[2], afh[3], afh[4],
- afh[5], afh[6], afh[7], afh[8], afh[9]);
+ p += scnprintf(p, end - p, "========== [BT Status] ==========\n");
+
+ p += scnprintf(p, end - p,
+ " %-15s : enable:%s, btg:%s%s, connect:%s, ",
+ "[status]", bt->enable.now ? "Y" : "N",
+ bt->btg_type ? "Y" : "N",
+ (bt->enable.now && (bt->btg_type != bt_pos) ?
+ "(efuse-mismatch!!)" : ""),
+ (bt_linfo->status.map.connect ? "Y" : "N"));
+
+ p += scnprintf(p, end - p,
+ "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n",
+ bt->igno_wl ? "Y" : "N",
+ bt->mbx_avl ? "Y" : "N", bt->rfk_info.val);
+
+ p += scnprintf(p, end - p, " %-15s : profile:%s%s%s%s%s ",
+ "[profile]",
+ (bt_linfo->profile_cnt.now == 0) ? "None," : "",
+ bt_linfo->hfp_desc.exist ? "HFP," : "",
+ bt_linfo->hid_desc.exist ? "HID," : "",
+ bt_linfo->a2dp_desc.exist ?
+ (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "",
+ bt_linfo->pan_desc.exist ? "PAN," : "");
+
+ p += scnprintf(p, end - p,
+ "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n",
+ bt_linfo->multi_link.now ? "Y" : "N",
+ bt_linfo->slave_role ? "Slave" : "Master",
+ bt_linfo->status.map.ble_connect ? "Y" : "N",
+ bt_linfo->cqddr ? "Y" : "N",
+ bt_linfo->a2dp_desc.active ? "Y" : "N",
+ bt_linfo->pan_desc.active ? "Y" : "N");
+
+ p += scnprintf(p, end - p,
+ " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s",
+ "[link]", bt_linfo->rssi - 100,
+ bt->rssi_level,
+ bt_linfo->tx_3m ? 3 : 2,
+ bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
+ bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
+ bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
+
+ p += scnprintf(p, end - p,
+ "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ",
+ bt_linfo->relink.now ? " ReLink!!" : "",
+ afh[0], afh[1], afh[2], afh[3], afh[4],
+ afh[5], afh[6], afh[7], afh[8], afh[9]);
if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
- seq_printf(m,
- "LE[%02x%02x_%02x_%02x%02x]",
- afh_le[0], afh_le[1], afh_le[2],
- afh_le[3], afh_le[4]);
-
- seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
- wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
-
- seq_printf(m,
- " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ",
- "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY],
- cx->cnt_bt[BTC_BCNT_RELINK], cx->cnt_bt[BTC_BCNT_RATECHG],
- cx->cnt_bt[BTC_BCNT_REINIT], cx->cnt_bt[BTC_BCNT_REENABLE]);
-
- seq_printf(m,
- "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n",
- cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH],
- cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ],
- cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]);
-
- _show_bt_profile_info(rtwdev, m);
-
- seq_printf(m,
- " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n",
- "[bt_info]", bt->raw_info[2], bt->raw_info[3],
- bt->raw_info[4], bt->raw_info[5], bt->raw_info[6],
- bt->raw_info[7],
- bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
- cx->cnt_bt[BTC_BCNT_INFOUPDATE],
- cx->cnt_bt[BTC_BCNT_INFOSAME]);
-
- seq_printf(m,
- " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)",
- "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
- cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
- cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]);
+ p += scnprintf(p, end - p,
+ "LE[%02x%02x_%02x_%02x%02x]",
+ afh_le[0], afh_le[1], afh_le[2],
+ afh_le[3], afh_le[4]);
+
+ p += scnprintf(p, end - p, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
+ wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
+
+ p += scnprintf(p, end - p,
+ " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ",
+ "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY],
+ cx->cnt_bt[BTC_BCNT_RELINK],
+ cx->cnt_bt[BTC_BCNT_RATECHG],
+ cx->cnt_bt[BTC_BCNT_REINIT],
+ cx->cnt_bt[BTC_BCNT_REENABLE]);
+
+ p += scnprintf(p, end - p,
+ "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n",
+ cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH],
+ cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ],
+ cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]);
+
+ p += _show_bt_profile_info(rtwdev, p, end - p);
+
+ p += scnprintf(p, end - p,
+ " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n",
+ "[bt_info]", bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5], bt->raw_info[6],
+ bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ p += scnprintf(p, end - p,
+ " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)",
+ "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX],
+ cx->cnt_bt[BTC_BCNT_LOPRI_RX],
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX],
+ cx->cnt_bt[BTC_BCNT_POLUT]);
if (!bt->scan_info_update) {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, true);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
} else {
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, false);
if (ver->fcxbtscan == 1) {
- seq_printf(m,
- "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)",
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win),
- le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl));
+ p += scnprintf(p, end - p,
+ "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)",
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win),
+ le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl));
} else if (ver->fcxbtscan == 2) {
- seq_printf(m,
- "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)",
- le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win),
- le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl));
+ p += scnprintf(p, end - p,
+ "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)",
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win),
+ le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
@@ -8560,6 +8584,8 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, true);
else
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, false);
+
+ return p - buf;
}
#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
@@ -8853,114 +8879,132 @@ static const char *id_to_ant(u32 id)
}
static
-void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
- u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
+int scnprintf_segment(char *buf, size_t bufsz, const char *prefix, const u16 *data,
+ u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
{
- u8 i;
+ char *p = buf, *end = buf + bufsz;
u8 cur_index;
+ u8 i;
for (i = 0; i < len ; i++) {
if ((i % seg_len) == 0)
- seq_printf(m, " %-15s : ", prefix);
+ p += scnprintf(p, end - p, " %-15s : ", prefix);
cur_index = (start_idx + i) % ring_len;
if (i % 3 == 0)
- seq_printf(m, "-> %-20s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-20s",
+ steps_to_str(*(data + cur_index)));
else if (i % 3 == 1)
- seq_printf(m, "-> %-15s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-15s",
+ steps_to_str(*(data + cur_index)));
else
- seq_printf(m, "-> %-13s",
- steps_to_str(*(data + cur_index)));
+ p += scnprintf(p, end - p, "-> %-13s",
+ steps_to_str(*(data + cur_index)));
if (i == (len - 1) || (i % seg_len) == (seg_len - 1))
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+ return p - buf;
}
-static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_dm_step(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u8 start_idx;
u8 len;
len = dm->dm_step.step_ov ? RTW89_BTC_DM_MAXSTEP : dm->dm_step.step_pos;
start_idx = dm->dm_step.step_ov ? dm->dm_step.step_pos : 0;
- seq_print_segment(m, "[dm_steps]", dm->dm_step.step, len, 6, start_idx,
- ARRAY_SIZE(dm->dm_step.step));
+ p += scnprintf_segment(p, end - p, "[dm_steps]", dm->dm_step.step, len,
+ 6, start_idx, ARRAY_SIZE(dm->dm_step.step));
+
+ return p - buf;
}
-static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_dm_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ char *p = buf, *end = buf + bufsz;
u8 igno_bt;
if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
- return;
+ return 0;
- seq_printf(m, "========== [Mechanism Status %s] ==========\n",
- (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
+ p += scnprintf(p, end - p,
+ "========== [Mechanism Status %s] ==========\n",
+ (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
- seq_printf(m,
- " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
- "[status]",
- btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
- steps_to_str(dm->run_reason),
- steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
- id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
- id_to_mode(wl->coex_mode),
- dm->cnt_dm[BTC_DCNT_RUN]);
+ p += scnprintf(p, end - p,
+ " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
+ "[status]",
+ btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ steps_to_str(dm->run_reason),
+ steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
+ id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
+ id_to_mode(wl->coex_mode),
+ dm->cnt_dm[BTC_DCNT_RUN]);
- _show_dm_step(rtwdev, m);
+ p += _show_dm_step(rtwdev, p, end - p);
if (ver->fcxctrl == 7)
igno_bt = btc->ctrl.ctrl_v7.igno_bt;
else
igno_bt = btc->ctrl.ctrl.igno_bt;
- seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
- "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
- dm->freerun, btc->lps, dm->wl_mimo_ps);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
+ "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
+ dm->freerun, btc->lps, dm->wl_mimo_ps);
- seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
- (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
- (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
- "" : "(Mismatch!!)"));
+ p += scnprintf(p, end - p, "leak_ap:%d, fw_offload:%s%s\n",
+ dm->leak_ap,
+ (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
+ (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
+ "" : "(Mismatch!!)"));
if (dm->rf_trx_para.wl_tx_power == 0xff)
- seq_printf(m,
- " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ",
- "[trx_ctrl]", wl->rssi_level, dm->trx_para_level);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ",
+ "[trx_ctrl]", wl->rssi_level,
+ dm->trx_para_level);
else
- seq_printf(m,
- " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ",
- "[trx_ctrl]", wl->rssi_level, dm->trx_para_level,
- dm->rf_trx_para.wl_tx_power);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ",
+ "[trx_ctrl]", wl->rssi_level,
+ dm->trx_para_level,
+ dm->rf_trx_para.wl_tx_power);
- seq_printf(m,
- "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n",
- dm->rf_trx_para.wl_rx_gain, dm->rf_trx_para.bt_tx_power,
- dm->rf_trx_para.bt_rx_gain,
- (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
+ p += scnprintf(p, end - p,
+ "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n",
+ dm->rf_trx_para.wl_rx_gain,
+ dm->rf_trx_para.bt_tx_power,
+ dm->rf_trx_para.bt_rx_gain,
+ (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
- seq_printf(m,
- " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
- "[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
- dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri);
+ p += scnprintf(p, end - p,
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n",
+ "[dm_ctrl]", dm->wl_tx_limit.enable,
+ dm->wl_tx_limit.tx_time,
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len,
+ bt->scan_rx_low_pri);
+
+ return p - buf;
}
-static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_error(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
union rtw89_btc_fbtc_cysta_info *pcysta;
+ char *p = buf, *end = buf + bufsz;
u32 except_cnt, exception_map;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
@@ -8985,81 +9029,87 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
except_cnt = pcysta->v7.except_cnt;
exception_map = le32_to_cpu(pcysta->v7.except_map);
} else {
- return;
+ return 0;
}
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
!pfwinfo->len_mismch && !pfwinfo->fver_mismch)
- return;
+ return 0;
- seq_printf(m, " %-15s : ", "[error]");
+ p += scnprintf(p, end - p, " %-15s : ", "[error]");
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]) {
- seq_printf(m,
- "overflow-cnt: %d, ",
- pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]);
+ p += scnprintf(p, end - p,
+ "overflow-cnt: %d, ",
+ pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]);
}
if (pfwinfo->len_mismch) {
- seq_printf(m,
- "len-mismatch: 0x%x, ",
- pfwinfo->len_mismch);
+ p += scnprintf(p, end - p,
+ "len-mismatch: 0x%x, ",
+ pfwinfo->len_mismch);
}
if (pfwinfo->fver_mismch) {
- seq_printf(m,
- "fver-mismatch: 0x%x, ",
- pfwinfo->fver_mismch);
+ p += scnprintf(p, end - p,
+ "fver-mismatch: 0x%x, ",
+ pfwinfo->fver_mismch);
}
/* cycle statistics exceptions */
if (exception_map || except_cnt) {
- seq_printf(m,
- "exception-type: 0x%x, exception-cnt = %d",
- exception_map, except_cnt);
+ p += scnprintf(p, end - p,
+ "exception-type: 0x%x, exception-cnt = %d",
+ exception_map, except_cnt);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_tdma(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_tdma *t = NULL;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
if (ver->fcxtdma == 1)
t = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
else
t = &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma;
- seq_printf(m,
- " %-15s : ", "[tdma_policy]");
- seq_printf(m,
- "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ",
- (u32)t->type,
- t->rxflctrl, t->txpause);
+ p += scnprintf(p, end - p,
+ " %-15s : ", "[tdma_policy]");
+ p += scnprintf(p, end - p,
+ "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ",
+ (u32)t->type,
+ t->rxflctrl, t->txpause);
- seq_printf(m,
- "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ",
- t->wtgle_n, t->leak_n, t->ext_ctrl);
+ p += scnprintf(p, end - p,
+ "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ",
+ t->wtgle_n, t->leak_n, t->ext_ctrl);
- seq_printf(m,
- "policy_type:%d",
- (u32)btc->policy_type);
+ p += scnprintf(p, end - p,
+ "policy_type:%d",
+ (u32)btc->policy_type);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_slots(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u16 dur, cxtype;
u32 tbl;
u8 i = 0;
@@ -9074,28 +9124,30 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl);
cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype);
} else {
- return;
+ return 0;
}
if (i % 5 == 0)
- seq_printf(m,
- " %-15s : %5s[%03d/0x%x/%d]",
- "[slot_list]",
- id_to_slot((u32)i),
- dur, tbl, cxtype);
+ p += scnprintf(p, end - p,
+ " %-15s : %5s[%03d/0x%x/%d]",
+ "[slot_list]",
+ id_to_slot((u32)i),
+ dur, tbl, cxtype);
else
- seq_printf(m,
- ", %5s[%03d/0x%x/%d]",
- id_to_slot((u32)i),
- dur, tbl, cxtype);
+ p += scnprintf(p, end - p,
+ ", %5s[%03d/0x%x/%d]",
+ id_to_slot((u32)i),
+ dur, tbl, cxtype);
if (i % 5 == 4)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -9104,63 +9156,64 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_cysta_v2 *pcysta_le32 = NULL;
union rtw89_btc_fbtc_rxflct r;
- u8 i, cnt = 0, slot_pair;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
+ u8 i, cnt = 0, slot_pair;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta_le32->cycles),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
- le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta_le32->cycles),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le32_to_cpu(pcysta_le32->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot((u32)i),
- le32_to_cpu(pcysta_le32->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot((u32)i),
+ le32_to_cpu(pcysta_le32->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl) {
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta_le32->leakrx_cnt));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta_le32->leakrx_cnt));
}
if (le32_to_cpu(pcysta_le32->collision_cnt)) {
- seq_printf(m, ", collision:%d",
- le32_to_cpu(pcysta_le32->collision_cnt));
+ p += scnprintf(p, end - p, ", collision:%d",
+ le32_to_cpu(pcysta_le32->collision_cnt));
}
if (le32_to_cpu(pcysta_le32->skip_cnt)) {
- seq_printf(m, ", skip:%d",
- le32_to_cpu(pcysta_le32->skip_cnt));
- }
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
- le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
- le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
- seq_printf(m, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
- le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
- le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
- seq_printf(m, ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
- le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le32_to_cpu(pcysta_le32->skip_cnt));
+ }
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
+ p += scnprintf(p, end - p, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
+ p += scnprintf(p, end - p, ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
if (le16_to_cpu(pcysta_le32->cycles) <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9177,53 +9230,57 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1)
- seq_printf(m,
- " %-15s : ->b%02d->w%02d", "[cycle_step]",
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
+ p += scnprintf(p, end - p,
+ " %-15s : ->b%02d->w%02d",
+ "[cycle_step]",
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
else
- seq_printf(m,
- "->b%02d->w%02d",
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
- le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
+ p += scnprintf(p, end - p,
+ "->b%02d->w%02d",
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m,
- " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta_le32->a2dpept),
- le16_to_cpu(pcysta_le32->a2dpeptto));
-
- seq_printf(m,
- ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta_le32->tavg_a2dpept),
- le16_to_cpu(pcysta_le32->tmax_a2dpept));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta_le32->a2dpept),
+ le16_to_cpu(pcysta_le32->a2dpeptto));
+
+ p += scnprintf(p, end - p,
+ ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta_le32->tavg_a2dpept),
+ le16_to_cpu(pcysta_le32->tmax_a2dpept));
r.val = dm->tdma_now.rxflctrl;
if (r.type && r.tgln_n) {
- seq_printf(m,
- ", cycle[PSTDMA:%d/TDMA:%d], ",
- le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
-
- seq_printf(m,
- "avg_t[PSTDMA:%d/TDMA:%d], ",
- le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
-
- seq_printf(m,
- "max_t[PSTDMA:%d/TDMA:%d]",
- le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
- le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
+ p += scnprintf(p, end - p,
+ ", cycle[PSTDMA:%d/TDMA:%d], ",
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
+
+ p += scnprintf(p, end - p,
+ "avg_t[PSTDMA:%d/TDMA:%d], ",
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
+
+ p += scnprintf(p, end - p,
+ "max_t[PSTDMA:%d/TDMA:%d]",
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9234,60 +9291,64 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le32_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le32_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le32_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (le32_to_cpu(pcysta->collision_cnt))
- seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt));
+ p += scnprintf(p, end - p, ", collision:%d",
+ le32_to_cpu(pcysta->collision_cnt));
if (le32_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le32_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9309,51 +9370,56 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9364,62 +9430,64 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (le16_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d",
- le16_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9441,51 +9509,56 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
@@ -9496,58 +9569,60 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
+ char *p = buf, *end = buf + bufsz;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v5;
- seq_printf(m,
- " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]",
- le16_to_cpu(pcysta->cycles),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d", id_to_slot(i),
- le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (le16_to_cpu(pcysta->skip_cnt))
- seq_printf(m, ", skip:%d",
- le16_to_cpu(pcysta->skip_cnt));
-
- seq_puts(m, "\n");
-
- seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_time]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
- le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p, "\n");
+
+ p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
cycle = le16_to_cpu(pcysta->cycles);
if (cycle <= 1)
- return;
+ goto out;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9565,58 +9640,63 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
if (c_begin > c_end)
- return;
+ goto out;
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
store_index = ((cycle - 1) % slot_pair) * 2;
if (cnt % divide_cnt == 1)
- seq_printf(m, " %-15s : ", "[cycle_step]");
+ p += scnprintf(p, end - p, " %-15s : ",
+ "[cycle_step]");
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
+ p += scnprintf(p, end - p, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ p += scnprintf(p, end - p, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
if (a2dp->exist) {
a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
if (cnt % divide_cnt == 0 || cnt == c_end)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
if (a2dp->exist) {
- seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
- "[a2dp_t_sta]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+ p += scnprintf(p, end - p,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc;
@@ -9624,68 +9704,75 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_rpt_cmn_info *pcinfo;
+ char *p = buf, *end = buf + bufsz;
u16 cycle, c_begin, c_end, s_id;
u8 i, cnt = 0, divide_cnt;
u8 slot_pair;
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
- seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]",
- le16_to_cpu(pcysta->cycles));
+ p += scnprintf(p, end - p, "\n\r %-15s : cycle:%d", "[slot_stat]",
+ le16_to_cpu(pcysta->cycles));
for (i = 0; i < CXST_MAX; i++) {
if (!le16_to_cpu(pcysta->slot_cnt[i]))
continue;
- seq_printf(m, ", %s:%d",
- id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i]));
+ p += scnprintf(p, end - p, ", %s:%d",
+ id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl)
- seq_printf(m, ", leak_rx:%d",
- le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+ p += scnprintf(p, end - p, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
if (pcysta->collision_cnt)
- seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+ p += scnprintf(p, end - p, ", collision:%d",
+ pcysta->collision_cnt);
if (pcysta->skip_cnt)
- seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt));
-
- seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
- "[cycle_stat]",
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
- le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
- le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
- seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
- le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
- dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
- le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
- le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
- seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
- le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+ p += scnprintf(p, end - p, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_stat]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ p += scnprintf(p, end - p,
+ ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
+ le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
+ p += scnprintf(p, end - p, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
if (a2dp->exist) {
- seq_printf(m,
- "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
- "[a2dp_stat]",
- le16_to_cpu(pcysta->a2dp_ept.cnt),
- le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
- a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
+ "[a2dp_stat]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
+ a2dp->no_empty_streak_2s,
+ a2dp->no_empty_streak_max);
- seq_printf(m, ", avg_t:%d, max_t:%d",
- le16_to_cpu(pcysta->a2dp_ept.tavg),
- le16_to_cpu(pcysta->a2dp_ept.tmax));
+ p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
}
if (le16_to_cpu(pcysta->cycles) <= 1)
- return;
+ goto out;
/* 1 cycle = 1 wl-slot + 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
@@ -9703,7 +9790,7 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
divide_cnt = 6;
if (c_begin > c_end)
- return;
+ goto out;
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
@@ -9711,129 +9798,142 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % divide_cnt == 1) {
if (a2dp->exist)
- seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]");
+ p += scnprintf(p, end - p, "\n\r %-15s : ",
+ "[slotT_wermtan]");
else
- seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]");
+ p += scnprintf(p, end - p, "\n\r %-15s : ",
+ "[slotT_rxerr]");
}
- seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id]));
+ p += scnprintf(p, end - p, "->b%d",
+ le16_to_cpu(pcysta->slot_step_time[s_id]));
if (a2dp->exist)
- seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
- pcysta->wl_rx_err_ratio[s_id],
- pcysta->a2dp_trx[s_id].empty_cnt,
- pcysta->a2dp_trx[s_id].retry_cnt,
- (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
- pcysta->a2dp_trx[s_id].tx_cnt,
- pcysta->a2dp_trx[s_id].ack_cnt,
- pcysta->a2dp_trx[s_id].nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id],
+ pcysta->a2dp_trx[s_id].empty_cnt,
+ pcysta->a2dp_trx[s_id].retry_cnt,
+ (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id].tx_cnt,
+ pcysta->a2dp_trx[s_id].ack_cnt,
+ pcysta->a2dp_trx[s_id].nack_cnt);
else
- seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]);
+ p += scnprintf(p, end - p, "(%d)",
+ pcysta->wl_rx_err_ratio[s_id]);
- seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
+ p += scnprintf(p, end - p, "->w%d",
+ le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
if (a2dp->exist)
- seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
- pcysta->wl_rx_err_ratio[s_id + 1],
- pcysta->a2dp_trx[s_id + 1].empty_cnt,
- pcysta->a2dp_trx[s_id + 1].retry_cnt,
- (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
- pcysta->a2dp_trx[s_id + 1].tx_cnt,
- pcysta->a2dp_trx[s_id + 1].ack_cnt,
- pcysta->a2dp_trx[s_id + 1].nack_cnt);
+ p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1],
+ pcysta->a2dp_trx[s_id + 1].empty_cnt,
+ pcysta->a2dp_trx[s_id + 1].retry_cnt,
+ (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id + 1].tx_cnt,
+ pcysta->a2dp_trx[s_id + 1].ack_cnt,
+ pcysta->a2dp_trx[s_id + 1].nack_cnt);
else
- seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]);
+ p += scnprintf(p, end - p, "(%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1]);
}
+
+out:
+ return p - buf;
}
-static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
union rtw89_btc_fbtc_cynullsta_info *ns;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
- return;
+ return 0;
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
if (ver->fcxnullsta == 1) {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v1.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v1.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v1.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v1.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v1.avg_t[i]) / 1000,
- le32_to_cpu(ns->v1.avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v1.max_t[i]) / 1000,
- le32_to_cpu(ns->v1.max_t[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v1.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v1.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v1.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v1.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v1.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v1.avg_t[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v1.max_t[i]) / 1000,
+ le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
} else if (ver->fcxnullsta == 7) {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns->v7.result[i][4]));
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v7.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v7.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v7.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v7.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v7.tavg[i]) / 1000,
- le32_to_cpu(ns->v7.tavg[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v7.tmax[i]) / 1000,
- le32_to_cpu(ns->v7.tmax[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[Tx:%d/",
+ le32_to_cpu(ns->v7.result[i][4]));
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v7.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v7.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v7.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v7.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v7.tavg[i]) / 1000,
+ le32_to_cpu(ns->v7.tavg[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v7.tmax[i]) / 1000,
+ le32_to_cpu(ns->v7.tmax[i]) % 1000);
}
} else {
for (i = 0; i < 2; i++) {
- seq_printf(m, " %-15s : ", "[NULL-STA]");
- seq_printf(m, "null-%d", i);
- seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns->v2.result[i][4]));
- seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->v2.result[i][1]));
- seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->v2.result[i][0]));
- seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->v2.result[i][2]));
- seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->v2.result[i][3]));
- seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->v2.avg_t[i]) / 1000,
- le32_to_cpu(ns->v2.avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]\n",
- le32_to_cpu(ns->v2.max_t[i]) / 1000,
- le32_to_cpu(ns->v2.max_t[i]) % 1000);
+ p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]");
+ p += scnprintf(p, end - p, "null-%d", i);
+ p += scnprintf(p, end - p, "[Tx:%d/",
+ le32_to_cpu(ns->v2.result[i][4]));
+ p += scnprintf(p, end - p, "[ok:%d/",
+ le32_to_cpu(ns->v2.result[i][1]));
+ p += scnprintf(p, end - p, "fail:%d/",
+ le32_to_cpu(ns->v2.result[i][0]));
+ p += scnprintf(p, end - p, "on_time:%d/",
+ le32_to_cpu(ns->v2.result[i][2]));
+ p += scnprintf(p, end - p, "retry:%d/",
+ le32_to_cpu(ns->v2.result[i][3]));
+ p += scnprintf(p, end - p, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v2.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v2.avg_t[i]) % 1000);
+ p += scnprintf(p, end - p, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v2.max_t[i]) / 1000,
+ le32_to_cpu(ns->v2.max_t[i]) % 1000);
}
}
+
+ return p - buf;
}
-static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_step_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
const struct rtw89_btc_ver *ver = btc->ver;
+ char *p = buf, *end = buf + bufsz;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
@@ -9841,14 +9941,14 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pstep = &pfwinfo->rpt_fbtc_step.finfo.v2;
pos_old = le16_to_cpu(pstep->pos_old);
pos_new = le16_to_cpu(pstep->pos_new);
if (pcinfo->req_fver != pstep->fver)
- return;
+ return 0;
/* store step info by using ring instead of FIFO*/
do {
@@ -9877,13 +9977,15 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
continue;
if (cnt % 10 == 0)
- seq_printf(m, " %-15s : ", "[steps]");
+ p += scnprintf(p, end - p,
+ " %-15s : ", "[steps]");
- seq_printf(m, "-> %s(%02d)(%02d)",
- (type == CXSTEP_SLOT ? "SLT" :
- "EVT"), (u32)val, diff_t);
+ p += scnprintf(p, end - p,
+ "-> %s(%02d)(%02d)",
+ (type == CXSTEP_SLOT ? "SLT" :
+ "EVT"), (u32)val, diff_t);
if (cnt % 10 == 9)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
}
@@ -9900,29 +10002,32 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
break;
}
} while (!outloop);
+
+ return p - buf;
}
-static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fbtc_step_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
struct rtw89_btc_fbtc_steps_v3 *pstep;
u32 i, n_begin, n_end, array_idx, cnt = 0;
+ char *p = buf, *end = buf + bufsz;
u8 type, val;
u16 diff_t;
if ((pfwinfo->rpt_en_map &
rtw89_btc_fw_rpt_ver(rtwdev, RPT_EN_FW_STEP_INFO)) == 0)
- return;
+ return 0;
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
- return;
+ return 0;
pstep = &pfwinfo->rpt_fbtc_step.finfo.v3;
if (pcinfo->req_fver != pstep->fver)
- return;
+ return 0;
if (le32_to_cpu(pstep->cnt) <= FCXDEF_STEP)
n_begin = 1;
@@ -9932,7 +10037,7 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
n_end = le32_to_cpu(pstep->cnt);
if (n_begin > n_end)
- return;
+ return 0;
/* restore step info by using ring instead of FIFO */
for (i = n_begin; i <= n_end; i++) {
@@ -9945,50 +10050,55 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
continue;
if (cnt % 10 == 0)
- seq_printf(m, " %-15s : ", "[steps]");
+ p += scnprintf(p, end - p, " %-15s : ", "[steps]");
- seq_printf(m, "-> %s(%02d)",
- (type == CXSTEP_SLOT ?
- id_to_slot((u32)val) :
- id_to_evt((u32)val)), diff_t);
+ p += scnprintf(p, end - p, "-> %s(%02d)",
+ (type == CXSTEP_SLOT ?
+ id_to_slot((u32)val) :
+ id_to_evt((u32)val)), diff_t);
if (cnt % 10 == 9)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
}
+
+ return p - buf;
}
-static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_fw_dm_msg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
+ char *p = buf, *end = buf + bufsz;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
- return;
+ goto out;
- _show_error(rtwdev, m);
- _show_fbtc_tdma(rtwdev, m);
- _show_fbtc_slots(rtwdev, m);
+ p += _show_error(rtwdev, p, end - p);
+ p += _show_fbtc_tdma(rtwdev, p, end - p);
+ p += _show_fbtc_slots(rtwdev, p, end - p);
if (ver->fcxcysta == 2)
- _show_fbtc_cysta_v2(rtwdev, m);
+ p += _show_fbtc_cysta_v2(rtwdev, p, end - p);
else if (ver->fcxcysta == 3)
- _show_fbtc_cysta_v3(rtwdev, m);
+ p += _show_fbtc_cysta_v3(rtwdev, p, end - p);
else if (ver->fcxcysta == 4)
- _show_fbtc_cysta_v4(rtwdev, m);
+ p += _show_fbtc_cysta_v4(rtwdev, p, end - p);
else if (ver->fcxcysta == 5)
- _show_fbtc_cysta_v5(rtwdev, m);
+ p += _show_fbtc_cysta_v5(rtwdev, p, end - p);
else if (ver->fcxcysta == 7)
- _show_fbtc_cysta_v7(rtwdev, m);
+ p += _show_fbtc_cysta_v7(rtwdev, p, end - p);
- _show_fbtc_nullsta(rtwdev, m);
+ p += _show_fbtc_nullsta(rtwdev, p, end - p);
if (ver->fcxstep == 2)
- _show_fbtc_step_v2(rtwdev, m);
+ p += _show_fbtc_step_v2(rtwdev, p, end - p);
else if (ver->fcxstep == 3)
- _show_fbtc_step_v3(rtwdev, m);
+ p += _show_fbtc_step_v3(rtwdev, p, end - p);
+out:
+ return p - buf;
}
static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
@@ -10033,12 +10143,13 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
}
}
-static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_gpio_dbg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
+ char *p = buf, *end = buf + bufsz;
u8 *gpio_map, i;
u32 en_map;
@@ -10048,8 +10159,8 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
__func__);
- seq_puts(m, "\n");
- return;
+ p += scnprintf(p, end - p, "\n");
+ goto out;
}
if (ver->fcxgpiodbg == 7) {
@@ -10061,20 +10172,24 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
}
if (!en_map)
- return;
+ goto out;
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", en_map);
+ p += scnprintf(p, end - p, " %-15s : enable_map:0x%08x",
+ "[gpio_dbg]", en_map);
for (i = 0; i < BTC_DBG_MAX1; i++) {
if (!(en_map & BIT(i)))
continue;
- seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]);
+ p += scnprintf(p, end - p, ", %s->GPIO%d", id_to_gdbg(i),
+ gpio_map[i]);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+out:
+ return p - buf;
}
-static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
@@ -10086,45 +10201,47 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
struct rtw89_mac_ax_gnt gnt;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "========== [HW Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [HW Status] ==========\n");
- seq_printf(m,
- " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ p += scnprintf(p, end - p,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
+ p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n",
__func__);
- return;
+ goto out;
}
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v1;
@@ -10138,21 +10255,26 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, " %-15s : %d_0x%04x=0x%08x",
- "[reg]", (u32)type, offset, val);
+ p += scnprintf(p, end - p,
+ " %-15s : %d_0x%04x=0x%08x",
+ "[reg]", (u32)type, offset, val);
else
- seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type,
- offset, val);
+ p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x",
+ (u32)type,
+ offset, val);
if (cnt % 6 == 5)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
if (i >= pmreg->reg_num)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
@@ -10164,46 +10286,48 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_mac_ax_coex_gnt gnt_cfg = {};
struct rtw89_mac_ax_gnt gnt;
+ char *p = buf, *end = buf + bufsz;
u8 i = 0, type = 0, cnt = 0;
u32 val, offset;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "========== [HW Status] ==========\n");
+ p += scnprintf(p, end - p, "========== [HW Status] ==========\n");
- seq_printf(m,
- " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
- seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
- "[gnt_status]",
- chip->chip_id == RTL8852C ? "HW" :
- btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
- id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+ p += scnprintf(p, end - p,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
+ "[gnt_status]",
+ chip->chip_id == RTL8852C ? "HW" :
+ btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = gnt_cfg.band[1];
- seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
- gnt.gnt_wl_sw_en ? "SW" : "HW",
- gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW",
- gnt.gnt_bt);
+ p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ gnt.gnt_wl_sw_en ? "SW" : "HW",
+ gnt.gnt_wl,
+ gnt.gnt_bt_sw_en ? "SW" : "HW",
+ gnt.gnt_bt);
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n",
__func__);
- return;
+ goto out;
}
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v2;
@@ -10217,21 +10341,26 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, " %-15s : %d_0x%04x=0x%08x",
- "[reg]", (u32)type, offset, val);
+ p += scnprintf(p, end - p,
+ " %-15s : %d_0x%04x=0x%08x",
+ "[reg]", (u32)type, offset, val);
else
- seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type,
- offset, val);
+ p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x",
+ (u32)type,
+ offset, val);
if (cnt % 6 == 5)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
cnt++;
if (i >= pmreg->reg_num)
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
+
+out:
+ return p - buf;
}
-static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_mreg_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10242,46 +10371,50 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_mac_ax_gnt *gnt = NULL;
struct rtw89_btc_dm *dm = &btc->dm;
+ char *p = buf, *end = buf + bufsz;
u8 i, type, cnt = 0;
u32 val, offset;
if (!(dm->coex_info_map & BTC_COEX_INFO_MREG))
- return;
+ return 0;
- seq_puts(m, "\n\r========== [HW Status] ==========");
+ p += scnprintf(p, end - p, "\n\r========== [HW Status] ==========");
- seq_printf(m,
- "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
- "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
- bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
- cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
+ "[scoreboard]", wl->scbd,
+ cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
/* To avoid I/O if WL LPS or power-off */
dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
- seq_printf(m,
- "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
- "[gnt_status]",
- rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
- dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
- wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
+ "[gnt_status]",
+ rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
+ dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ wl->pta_req_mac,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = &dm->gnt.band[RTW89_PHY_0];
- seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
- gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
- gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+ p += scnprintf(p, end - p, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
if (rtwdev->dbcc_en) {
gnt = &dm->gnt.band[RTW89_PHY_1];
- seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
- gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
- gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+ p += scnprintf(p, end - p,
+ ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
}
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
if (!pcinfo->valid)
- return;
+ goto out;
pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
@@ -10291,17 +10424,21 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
val = le32_to_cpu(pmreg->mreg_val[i]);
if (cnt % 6 == 0)
- seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
- id_to_regtype(type), offset, val);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
+ id_to_regtype(type), offset, val);
else
- seq_printf(m, ", %s_0x%x=0x%x",
- id_to_regtype(type), offset, val);
+ p += scnprintf(p, end - p, ", %s_0x%x=0x%x",
+ id_to_regtype(type), offset, val);
cnt++;
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+out:
+ return p - buf;
}
-static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10312,56 +10449,59 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_info *bt = &cx->bt;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v1;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt,
- pfwinfo->cnt_c2h, prptctrl->c2h_cnt);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt,
+ pfwinfo->cnt_c2h, prptctrl->c2h_cnt);
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
- pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt,
- prptctrl->rpt_enable, dm->error.val);
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ prptctrl->rpt_cnt,
+ prptctrl->rpt_enable, dm->error.val);
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d",
- "[mailbox]", prptctrl->mb_send_ok_cnt,
- prptctrl->mb_send_fail_cnt, prptctrl->mb_recv_cnt);
-
- seq_printf(m,
- "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n",
- prptctrl->mb_a2dp_empty_cnt,
- prptctrl->mb_a2dp_flct_cnt,
- prptctrl->mb_a2dp_full_cnt);
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
- "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT],
- prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]);
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d",
+ "[mailbox]", prptctrl->mb_send_ok_cnt,
+ prptctrl->mb_send_fail_cnt,
+ prptctrl->mb_recv_cnt);
+
+ p += scnprintf(p, end - p,
+ "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n",
+ prptctrl->mb_a2dp_empty_cnt,
+ prptctrl->mb_a2dp_flct_cnt,
+ prptctrl->mb_a2dp_full_cnt);
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]);
if (prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT] > 0)
bt->rfk_info.map.timeout = 1;
@@ -10370,42 +10510,44 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
- pfwinfo->event[BTF_EVNT_RPT],
- btc->fwinfo.rpt_en_map);
- seq_puts(m, " (WL FW report invalid!!)\n");
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ p += scnprintf(p, end - p, " (WL FW report invalid!!)\n");
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
- cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d\n",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10416,64 +10558,65 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_bt_info *bt = &cx->bt;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v4;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail,
- le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le32_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en),
- dm->error.val);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le32_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le32_to_cpu(prptctrl->rpt_info.cnt_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le32_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en),
+ dm->error.val);
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
- "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
- le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]),
+ le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL]));
if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
bt->rfk_info.map.timeout = 1;
@@ -10482,42 +10625,44 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
- pfwinfo->event[BTF_EVNT_RPT],
- btc->fwinfo.rpt_en_map);
- seq_puts(m, " (WL FW report invalid!!)\n");
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ p += scnprintf(p, end - p, " (WL FW report invalid!!)\n");
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
- cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d\n",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10527,112 +10672,118 @@ static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v5;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m,
- ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
}
if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
pfwinfo->err[BTFRE_EXCEPTION]) {
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
- "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
- "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
- pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch,
+ pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ return p - buf;
}
-static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v105(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
@@ -10642,112 +10793,118 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_puts(m, "========== [Statistics] ==========\n");
+ p += scnprintf(p, end - p, "========== [Statistics] ==========\n");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v105;
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
- pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h));
-
- seq_printf(m,
- "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]",
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m,
- "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
-
- seq_printf(m,
- ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m,
- ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ p += scnprintf(p, end - p,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
- "[summary]", pfwinfo->cnt_h2c,
- pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ p += scnprintf(p, end - p,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
}
if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
pfwinfo->err[BTFRE_EXCEPTION]) {
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
- "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
- "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
- pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch,
+ pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_puts(m, "\n");
- seq_printf(m,
- " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p, "\n");
+ p += scnprintf(p, end - p,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- "timer=%d, control=%d, customerize=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
- cnt[BTC_NCNT_CUSTOMERIZE]);
+ p += scnprintf(p, end - p,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+
+ return p - buf;
}
-static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL;
@@ -10756,100 +10913,111 @@ static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u32 cnt_sum = 0;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+ p += scnprintf(p, end - p, "%s",
+ "\n\r========== [Statistics] ==========");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
!wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v7;
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
- "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h),
- rtwdev->btc.ver->info_buf);
-
- seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d),"
+ "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ rtwdev->btc.ver->info_buf);
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
-
- seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
- wl->rfk_info.proc_time);
-
- seq_printf(m, ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ p += scnprintf(p, end - p, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
- "[summary]",
- pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- pfwinfo->cnt_c2h,
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m,
- "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
- seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
- rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
- cnt[BTC_NCNT_COUNTRYCODE]);
+ return p - buf;
}
-static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
+static int _show_summary_v8(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
@@ -10858,153 +11026,173 @@ static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ char *p = buf, *end = buf + bufsz;
u32 cnt_sum = 0;
u8 i;
if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
- return;
+ return 0;
- seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+ p += scnprintf(p, end - p, "%s",
+ "\n\r========== [Statistics] ==========");
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
!wl->status.map.rf_off) {
prptctrl = &pfwinfo->rpt_ctrl.finfo.v8;
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
- "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
- le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
- le16_to_cpu(prptctrl->rpt_info.len_c2h),
- (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
- rtwdev->btc.ver->info_buf);
-
- seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
- pfwinfo->event[BTF_EVNT_RPT],
- le16_to_cpu(prptctrl->rpt_info.cnt),
- le32_to_cpu(prptctrl->rpt_info.en));
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
+ rtwdev->btc.ver->info_buf);
+
+ p += scnprintf(p, end - p,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
if (dm->error.map.wl_fw_hang)
- seq_puts(m, " (WL FW Hang!!)");
-
- seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
- "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
- le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
-
- seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
- le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
-
- seq_printf(m,
- "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
- "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
- cx->cnt_wl[BTC_WCNT_RFK_GO],
- cx->cnt_wl[BTC_WCNT_RFK_REJECT],
- cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
- wl->rfk_info.proc_time);
-
- seq_printf(m, ", bt_rfk[req:%d]",
- le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
-
- seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
- le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ p += scnprintf(p, end - p, " (WL FW Hang!!)");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ p += scnprintf(p, end - p,
+ "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ p += scnprintf(p, end - p, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
} else {
- seq_printf(m,
- "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
- "[summary]",
- pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
- pfwinfo->cnt_c2h,
- wl->status.map.lps, wl->status.map.rf_off);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
}
for (i = 0; i < BTC_NCNT_NUM; i++)
cnt_sum += dm->cnt_notify[i];
- seq_printf(m,
- "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
- "[notify_cnt]",
- cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
- cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
- seq_printf(m,
- "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
- cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
- cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
- cnt[BTC_NCNT_WL_STA]);
+ p += scnprintf(p, end - p,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
- seq_printf(m,
- "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
- "[notify_cnt]",
- cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
- cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
- cnt[BTC_NCNT_SPECIAL_PACKET]);
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
- seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
- cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
- rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
- cnt[BTC_NCNT_COUNTRYCODE]);
+ p += scnprintf(p, end - p,
+ "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+
+ return p - buf;
}
-void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
-
- seq_puts(m, "=========================================\n");
- seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
- fw_suit->major_ver, fw_suit->minor_ver,
- fw_suit->sub_ver, fw_suit->sub_idex);
- seq_printf(m, "manual %d\n", btc->manual_ctrl);
-
- seq_puts(m, "=========================================\n");
-
- seq_printf(m, "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
- "[bt_info]",
- bt->raw_info[2], bt->raw_info[3],
- bt->raw_info[4], bt->raw_info[5],
- bt->raw_info[6], bt->raw_info[7],
- bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
- cx->cnt_bt[BTC_BCNT_INFOUPDATE],
- cx->cnt_bt[BTC_BCNT_INFOSAME]);
-
- seq_puts(m, "\n=========================================\n");
-
- _show_cx_info(rtwdev, m);
- _show_wl_info(rtwdev, m);
- _show_bt_info(rtwdev, m);
- _show_dm_info(rtwdev, m);
- _show_fw_dm_msg(rtwdev, m);
+ char *p = buf, *end = buf + bufsz;
+
+ p += scnprintf(p, end - p,
+ "=========================================\n");
+ p += scnprintf(p, end - p,
+ "WL FW / BT FW %d.%d.%d.%d / NA\n",
+ fw_suit->major_ver, fw_suit->minor_ver,
+ fw_suit->sub_ver, fw_suit->sub_idex);
+ p += scnprintf(p, end - p, "manual %d\n",
+ btc->manual_ctrl);
+
+ p += scnprintf(p, end - p,
+ "=========================================\n");
+
+ p += scnprintf(p, end - p,
+ "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
+ "[bt_info]",
+ bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5],
+ bt->raw_info[6], bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ p += scnprintf(p, end - p,
+ "\n=========================================\n");
+
+ p += _show_cx_info(rtwdev, p, end - p);
+ p += _show_wl_info(rtwdev, p, end - p);
+ p += _show_bt_info(rtwdev, p, end - p);
+ p += _show_dm_info(rtwdev, p, end - p);
+ p += _show_fw_dm_msg(rtwdev, p, end - p);
if (ver->fcxmreg == 1)
- _show_mreg_v1(rtwdev, m);
+ p += _show_mreg_v1(rtwdev, p, end - p);
else if (ver->fcxmreg == 2)
- _show_mreg_v2(rtwdev, m);
+ p += _show_mreg_v2(rtwdev, p, end - p);
else if (ver->fcxmreg == 7)
- _show_mreg_v7(rtwdev, m);
+ p += _show_mreg_v7(rtwdev, p, end - p);
- _show_gpio_dbg(rtwdev, m);
+ p += _show_gpio_dbg(rtwdev, p, end - p);
if (ver->fcxbtcrpt == 1)
- _show_summary_v1(rtwdev, m);
+ p += _show_summary_v1(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 4)
- _show_summary_v4(rtwdev, m);
+ p += _show_summary_v4(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 5)
- _show_summary_v5(rtwdev, m);
+ p += _show_summary_v5(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 105)
- _show_summary_v105(rtwdev, m);
+ p += _show_summary_v105(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 7)
- _show_summary_v7(rtwdev, m);
+ p += _show_summary_v7(rtwdev, p, end - p);
else if (ver->fcxbtcrpt == 8)
- _show_summary_v8(rtwdev, m);
+ p += _show_summary_v8(rtwdev, p, end - p);
+
+ return p - buf;
}
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
@@ -11037,3 +11225,24 @@ out:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n",
(int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code);
}
+
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms)
+{
+ struct rtw89_btc_bt_link_info *bt_linfo = &rtwdev->btc.cx.bt.link_info;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
+ return;
+
+ if (!a2dp.exist)
+ return;
+
+ fsleep(ms * 1000);
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_preserve_bt_time);
+
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state)
+{
+ rtwdev->btc.cx.wl.rfk_info.con_rfk = state;
+}
+EXPORT_SYMBOL(rtw89_btc_ntfy_conn_rfk);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index dbdb56e063ef..e3a1fcd79620 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -267,10 +267,10 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx);
void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band);
void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
enum btc_pkt_type pkt_type);
-void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work);
-void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -282,14 +282,16 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev);
void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
-void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m);
-void rtw89_coex_act1_work(struct work_struct *work);
-void rtw89_coex_bt_devinfo_work(struct work_struct *work);
-void rtw89_coex_rfk_chk_work(struct work_struct *work);
+ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
+void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
+void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms);
+void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 85f739f1173d..cc9b014457ac 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -913,16 +913,17 @@ static enum btc_pkt_type
rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
struct sk_buff *skb = tx_req->skb;
struct udphdr *udphdr;
if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.eapol_notify_work);
return PACKET_EAPOL;
}
if (skb->protocol == htons(ETH_P_ARP)) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.arp_notify_work);
return PACKET_ARP;
}
@@ -932,14 +933,14 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) ||
(udphdr->source == htons(68) && udphdr->dest == htons(67))) &&
skb->len > 282) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.dhcp_notify_work);
return PACKET_DHCP;
}
}
if (skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
- ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work);
+ wiphy_work_queue(wiphy, &rtwdev->btc.icmp_notify_work);
return PACKET_ICMP;
}
@@ -2071,17 +2072,17 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
+static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
cancel_6ghz_probe_work);
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
struct rtw89_pktofld_info *info;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
if (!rtwdev->scanning)
- goto out;
+ return;
list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload))
@@ -2094,9 +2095,6 @@ static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
* since if during scanning, pkt_list is accessed in bottom half.
*/
}
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
@@ -2131,7 +2129,7 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
}
if (queue_work)
- ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->cancel_6ghz_probe_work);
}
static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
@@ -2194,8 +2192,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
}
pkt_stat->beacon_nr++;
- if (phy_ppdu)
+ if (phy_ppdu) {
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
+ if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
+ }
pkt_stat->beacon_rate = desc_info->data_rate;
}
@@ -2381,6 +2382,49 @@ static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
}
+static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ size_t hdr_len, ielen;
+ u8 *variable;
+ int chan;
+
+ if (!rtwdev->chip->rx_freq_frome_ie)
+ return;
+
+ if (!rtwdev->scanning)
+ return;
+
+ if (ieee80211_is_beacon(mgmt->frame_control)) {
+ variable = mgmt->u.beacon.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ } else if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ variable = mgmt->u.probe_resp.variable;
+ hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ } else {
+ return;
+ }
+
+ if (skb->len > hdr_len)
+ ielen = skb->len - hdr_len;
+ else
+ return;
+
+ /* The parsing code for both 2GHz and 5GHz bands is the same in this
+ * function.
+ */
+ chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ);
+ if (chan == -1)
+ return;
+
+ rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+ rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band);
+}
+
static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
@@ -2398,6 +2442,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
rtw89_core_validate_rx_signal(rx_status);
+ rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -3143,13 +3188,14 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv
ieee80211_txq_schedule_end(hw, ac);
}
-static void rtw89_ips_work(struct work_struct *work)
+static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
ips_work);
- mutex_lock(&rtwdev->mutex);
+
+ lockdep_assert_wiphy(wiphy);
+
rtw89_enter_ips_by_hwflags(rtwdev);
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_core_txq_work(struct work_struct *w)
@@ -3291,7 +3337,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
u32 reg;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_leave_ips_by_hwflags(rtwdev);
rtw89_leave_lps(rtwdev);
@@ -3330,9 +3376,9 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_write32_clr(rtwdev, reg, B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH);
ieee80211_ready_on_channel(hw);
- cancel_delayed_work(&rtwvif->roc.roc_work);
- ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work,
- msecs_to_jiffies(rtwvif->roc.duration));
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
+ wiphy_delayed_work_queue(hw->wiphy, &rtwvif->roc.roc_work,
+ msecs_to_jiffies(rtwvif->roc.duration));
}
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -3345,7 +3391,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
u32 reg;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
ieee80211_remain_on_channel_expired(hw);
@@ -3377,18 +3423,18 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
if (hw->conf.flags & IEEE80211_CONF_IDLE)
- ieee80211_queue_delayed_work(hw, &roc->roc_work,
- msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
+ wiphy_delayed_work_queue(hw->wiphy, &roc->roc_work,
+ msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT));
}
-void rtw89_roc_work(struct work_struct *work)
+void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
roc.roc_work.work);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct rtw89_roc *roc = &rtwvif->roc;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
switch (roc->state) {
case RTW89_ROC_IDLE:
@@ -3401,8 +3447,6 @@ void rtw89_roc_work(struct work_struct *work)
default:
break;
}
-
- mutex_unlock(&rtwdev->mutex);
}
static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
@@ -3533,26 +3577,26 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
ewma_tp_init(&stats->rx_ewma_tp);
}
-static void rtw89_track_work(struct work_struct *work)
+static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
track_work.work);
bool tfc_changed;
+ lockdep_assert_wiphy(wiphy);
+
if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags))
return;
- mutex_lock(&rtwdev->mutex);
-
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
- goto out;
+ return;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
tfc_changed = rtw89_traffic_stats_track(rtwdev);
if (rtwdev->scanning)
- goto out;
+ return;
rtw89_leave_lps(rtwdev);
@@ -3577,9 +3621,6 @@ static void rtw89_track_work(struct work_struct *work)
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
rtw89_enter_lps_track(rtwdev);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size)
@@ -3613,7 +3654,7 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
u8 idx;
int i;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
if (idx == chip->bacam_num) {
@@ -3657,7 +3698,7 @@ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
struct rtw89_ba_cam_entry *entry = NULL, *tmp;
u8 idx;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) {
if (entry->tid != tid)
@@ -4432,26 +4473,26 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
{
int i;
- for (i = 0; i < RTW89_PHY_MAX; i++)
+ for (i = 0; i < RTW89_PHY_NUM; i++)
skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]);
- for (i = 0; i < RTW89_PHY_MAX; i++)
+ for (i = 0; i < RTW89_PHY_NUM; i++)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
-void rtw89_core_update_beacon_work(struct work_struct *work)
+void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev;
struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link,
update_beacon_work);
+ lockdep_assert_wiphy(wiphy);
+
if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE)
return;
rtwdev = rtwvif_link->rtwvif->rtwdev;
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
- mutex_unlock(&rtwdev->mutex);
}
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
@@ -4560,16 +4601,14 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_mac_cfg_phy_rpt_bands(rtwdev, true);
rtw89_mac_update_rts_threshold(rtwdev);
- rtw89_tas_reset(rtwdev);
-
ret = rtw89_hci_start(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to start hci\n");
return ret;
}
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@@ -4583,8 +4622,11 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
void rtw89_core_stop(struct rtw89_dev *rtwdev)
{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
struct rtw89_btc *btc = &rtwdev->btc;
+ lockdep_assert_wiphy(wiphy);
+
/* Prvent to stop twice; enter_ips and ops_stop */
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
return;
@@ -4593,25 +4635,21 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
- mutex_unlock(&rtwdev->mutex);
-
- cancel_work_sync(&rtwdev->c2h_work);
- cancel_work_sync(&rtwdev->cancel_6ghz_probe_work);
- cancel_work_sync(&btc->eapol_notify_work);
- cancel_work_sync(&btc->arp_notify_work);
- cancel_work_sync(&btc->dhcp_notify_work);
- cancel_work_sync(&btc->icmp_notify_work);
+ wiphy_work_cancel(wiphy, &rtwdev->c2h_work);
+ wiphy_work_cancel(wiphy, &rtwdev->cancel_6ghz_probe_work);
+ wiphy_work_cancel(wiphy, &btc->eapol_notify_work);
+ wiphy_work_cancel(wiphy, &btc->arp_notify_work);
+ wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
+ wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
- cancel_delayed_work_sync(&rtwdev->track_work);
- cancel_delayed_work_sync(&rtwdev->chanctx_work);
- cancel_delayed_work_sync(&rtwdev->coex_act1_work);
- cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
- cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
- cancel_delayed_work_sync(&rtwdev->cfo_track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work);
cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
- cancel_delayed_work_sync(&rtwdev->antdiv_work);
-
- mutex_lock(&rtwdev->mutex);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work);
rtw89_btc_ntfy_poweroff(rtwdev);
rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
@@ -4825,20 +4863,19 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
- INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work);
- INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work);
- INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
- INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
- INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
- INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work);
+ wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work);
+ wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
+ wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
+ wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
+ wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
- INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
+ wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!rtwdev->txq_wq)
return -ENOMEM;
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
- mutex_init(&rtwdev->mutex);
mutex_init(&rtwdev->rf_mutex);
rtwdev->total_sta_assoc = 0;
@@ -4847,10 +4884,10 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_init_wait(&rtwdev->wow.wait);
rtw89_init_wait(&rtwdev->mac.ps_wait);
- INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
- INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
+ wiphy_work_init(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ wiphy_work_init(&rtwdev->ips_work, rtw89_ips_work);
+ wiphy_work_init(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work);
- INIT_WORK(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
@@ -4867,10 +4904,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
}
- INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
- INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
- INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
- INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
+ rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0;
+ rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1;
+
+ wiphy_work_init(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
+ wiphy_work_init(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
+ wiphy_work_init(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
+ wiphy_work_init(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
init_completion(&rtwdev->rfk_wait.completion);
@@ -4890,11 +4930,10 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev)
{
rtw89_ser_deinit(rtwdev);
rtw89_unload_firmware(rtwdev);
- rtw89_fw_free_all_early_h2c(rtwdev);
+ __rtw89_fw_free_all_early_h2c(rtwdev);
destroy_workqueue(rtwdev->txq_wq);
mutex_destroy(&rtwdev->rf_mutex);
- mutex_destroy(&rtwdev->mutex);
}
EXPORT_SYMBOL(rtw89_core_deinit);
@@ -4903,6 +4942,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif_link->chanctx_idx);
+ struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
rtwdev->scanning = true;
rtw89_leave_lps(rtwdev);
@@ -4913,7 +4953,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv
rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type);
rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true);
rtw89_hci_recalc_int_mit(rtwdev);
- rtw89_phy_config_edcca(rtwdev, true);
+ rtw89_phy_config_edcca(rtwdev, bb, true);
+ rtw89_tas_scan(rtwdev, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr);
}
@@ -4922,6 +4963,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool hw_scan)
{
struct ieee80211_bss_conf *bss_conf;
+ struct rtw89_bb_ctx *bb;
if (!rtwvif_link)
return;
@@ -4937,12 +4979,15 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false);
rtw89_btc_ntfy_scan_finish(rtwdev, rtwvif_link->phy_idx);
- rtw89_phy_config_edcca(rtwdev, false);
+ bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
+ rtw89_phy_config_edcca(rtwdev, bb, false);
+ rtw89_tas_scan(rtwdev, false);
rtwdev->scanning = false;
- rtwdev->dig.bypass_dig = true;
+ rtw89_for_each_active_bb(rtwdev, bb)
+ bb->dig.bypass_dig = true;
if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
- ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->ips_work);
}
static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
@@ -5042,8 +5087,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
rtw89_hci_mac_pre_deinit(rtwdev);
- rtw89_mac_pwr_off(rtwdev);
-
return 0;
}
@@ -5124,36 +5167,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
rtw89_read_chip_ver(rtwdev);
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to power on\n");
+ return ret;
+ }
+
ret = rtw89_wait_firmware_completion(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to wait firmware completion\n");
- return ret;
+ goto out;
}
ret = rtw89_fw_recognize(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_efuse_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
ret = rtw89_fw_recognize_elements(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to recognize firmware elements\n");
- return ret;
+ goto out;
}
ret = rtw89_chip_board_info_setup(rtwdev);
if (ret)
- return ret;
+ goto out;
rtw89_core_setup_rfe_parms(rtwdev);
rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
- return 0;
+out:
+ rtw89_mac_pwr_off(rtwdev);
+
+ return ret;
}
EXPORT_SYMBOL(rtw89_chip_info_setup);
@@ -5296,7 +5348,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
goto err_free_supported_band;
}
- ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
+ ret = rtw89_regd_init_hint(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to init regd\n");
goto err_unregister_hw;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ff4894c7fa8a..4be05d6cad18 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,11 +17,13 @@ struct rtw89_dev;
struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
+struct rtw89_fw_blacklist;
struct rtw89_efuse_block_cfg;
struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
struct rtw89_debugfs;
+struct rtw89_regd_data;
extern const struct ieee80211_ops rtw89_ops;
@@ -718,6 +720,7 @@ enum rtw89_ofdma_type {
RTW89_OFDMA_NUM,
};
+/* neither insert new in the middle, nor change any given definition */
enum rtw89_regulation_type {
RTW89_WW = 0,
RTW89_ETSI = 1,
@@ -826,7 +829,7 @@ enum rtw89_mac_idx {
enum rtw89_phy_idx {
RTW89_PHY_0 = 0,
RTW89_PHY_1 = 1,
- RTW89_PHY_MAX
+ RTW89_PHY_NUM,
};
#define __RTW89_MLD_MAX_LINK_NUM 2
@@ -1540,16 +1543,16 @@ struct rtw89_btc_u8_sta_chg {
};
struct rtw89_btc_wl_scan_info {
- u8 band[RTW89_PHY_MAX];
+ u8 band[RTW89_PHY_NUM];
u8 phy_map;
u8 rsvd;
};
struct rtw89_btc_wl_dbcc_info {
- u8 op_band[RTW89_PHY_MAX]; /* op band in each phy */
- u8 scan_band[RTW89_PHY_MAX]; /* scan band in each phy */
- u8 real_band[RTW89_PHY_MAX];
- u8 role[RTW89_PHY_MAX]; /* role in each phy */
+ u8 op_band[RTW89_PHY_NUM]; /* op band in each phy */
+ u8 scan_band[RTW89_PHY_NUM]; /* scan band in each phy */
+ u8 real_band[RTW89_PHY_NUM];
+ u8 role[RTW89_PHY_NUM]; /* role in each phy */
};
struct rtw89_btc_wl_active_role {
@@ -1761,7 +1764,8 @@ struct rtw89_btc_wl_rfk_info {
u32 phy_map: 2;
u32 band: 2;
u32 type: 8;
- u32 rsvd: 14;
+ u32 con_rfk: 1;
+ u32 rsvd: 13;
u32 start_time;
u32 proc_time;
@@ -1898,7 +1902,7 @@ struct rtw89_btc_wl_info {
u8 cn_report;
u8 coex_mode;
u8 pta_req_mac;
- u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */
+ u8 bt_polut_type[RTW89_PHY_NUM]; /* BT polluted WL-Tx type for phy0/1 */
bool is_5g_hi_channel;
bool pta_reg_mac_chg;
@@ -2230,7 +2234,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v4 {
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info;
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
__le32 bt_cnt[BTC_BCNT_STA_MAX];
- struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
+ struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_NUM];
} __packed;
struct rtw89_btc_fbtc_rpt_ctrl_v5 {
@@ -2238,7 +2242,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v5 {
u8 rsvd;
__le16 rsvd1;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX];
struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
@@ -2250,7 +2254,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
u8 rsvd;
__le16 rsvd1;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
@@ -2263,7 +2267,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v7 {
u8 rsvd1;
u8 rsvd2;
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
@@ -2276,7 +2280,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v8 {
u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */
u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */
- u8 gnt_val[RTW89_PHY_MAX][4];
+ u8 gnt_val[RTW89_PHY_NUM][4];
__le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
@@ -3040,6 +3044,7 @@ struct rtw89_btc_rpt_cmn_info {
union rtw89_btc_fbtc_btafh_info {
struct rtw89_btc_fbtc_btafh v1;
struct rtw89_btc_fbtc_btafh_v2 v2;
+ struct rtw89_btc_fbtc_btafh_v7 v7;
};
struct rtw89_btc_report_ctrl_state {
@@ -3174,10 +3179,10 @@ struct rtw89_btc {
struct rtw89_btc_btf_fwinfo fwinfo;
struct rtw89_btc_dbg dbg;
- struct work_struct eapol_notify_work;
- struct work_struct arp_notify_work;
- struct work_struct dhcp_notify_work;
- struct work_struct icmp_notify_work;
+ struct wiphy_work eapol_notify_work;
+ struct wiphy_work arp_notify_work;
+ struct wiphy_work dhcp_notify_work;
+ struct wiphy_work icmp_notify_work;
u32 bt_req_len;
@@ -3444,7 +3449,7 @@ enum rtw89_roc_state {
struct rtw89_roc {
struct ieee80211_channel chan;
- struct delayed_work roc_work;
+ struct wiphy_delayed_work roc_work;
enum ieee80211_roc_type type;
enum rtw89_roc_state state;
int duration;
@@ -3498,6 +3503,7 @@ struct rtw89_vif_link {
u8 self_role;
u8 wmm;
u8 bcn_hit_cond;
+ u8 bcn_bw_idx;
u8 hit_rule;
u8 last_noa_nr;
u64 sync_bcn_tsf;
@@ -3514,7 +3520,7 @@ struct rtw89_vif_link {
bool pre_pwr_diff_en;
bool pwr_diff_en;
u8 def_tri_idx;
- struct work_struct update_beacon_work;
+ struct wiphy_work update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
@@ -3672,6 +3678,8 @@ struct rtw89_chip_ops {
int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
+ int (*h2c_txtime_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link);
int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
@@ -4189,10 +4197,12 @@ struct rtw89_edcca_regs {
u32 edcca_p_mask;
u32 ppdu_level;
u32 ppdu_mask;
- u32 rpt_a;
- u32 rpt_b;
- u32 rpt_sel;
- u32 rpt_sel_mask;
+ struct rtw89_edcca_p_regs {
+ u32 rpt_a;
+ u32 rpt_b;
+ u32 rpt_sel;
+ u32 rpt_sel_mask;
+ } p[RTW89_PHY_NUM];
u32 rpt_sel_be;
u32 rpt_sel_be_mask;
u32 tx_collision_t2r_st;
@@ -4231,6 +4241,7 @@ enum rtw89_chanctx_state {
enum rtw89_chanctx_callbacks {
RTW89_CHANCTX_CALLBACK_PLACEHOLDER,
RTW89_CHANCTX_CALLBACK_RFK,
+ RTW89_CHANCTX_CALLBACK_TAS,
NUM_OF_RTW89_CHANCTX_CALLBACKS,
};
@@ -4251,6 +4262,7 @@ struct rtw89_chip_info {
bool try_ce_fw;
u8 bbmcu_nr;
u32 needed_fw_elms;
+ const struct rtw89_fw_blacklist *fw_blacklist;
u32 fifo_size;
bool small_fifo_size;
u32 dle_scc_rsvd_size;
@@ -4271,10 +4283,13 @@ struct rtw89_chip_info {
bool support_unii4;
bool support_rnr;
bool support_ant_gain;
+ bool support_tas;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
+ bool rx_freq_frome_ie;
bool hw_sec_hdr;
bool hw_mgmt_tx_encrypt;
+ bool hw_tkip_crypto;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
@@ -4478,6 +4493,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_CH_INFO_BE_V0,
RTW89_FW_FEATURE_LPS_CH_INFO,
RTW89_FW_FEATURE_NO_PHYCAP_P1,
+ RTW89_FW_FEATURE_NO_POWER_DIFFERENCE,
};
struct rtw89_fw_suit {
@@ -4536,6 +4552,7 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_table *rf_nctl;
struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
+ const struct rtw89_regd_data *regd;
};
enum rtw89_fw_mss_dev_type {
@@ -4651,6 +4668,7 @@ enum rtw89_ant_gain_domain_type {
struct rtw89_ant_gain_info {
s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR];
u32 regd_enabled;
+ bool block_country;
};
struct rtw89_6ghz_span {
@@ -4666,18 +4684,29 @@ struct rtw89_6ghz_span {
enum rtw89_tas_state {
RTW89_TAS_STATE_DPR_OFF,
RTW89_TAS_STATE_DPR_ON,
- RTW89_TAS_STATE_DPR_FORBID,
+ RTW89_TAS_STATE_STATIC_SAR,
};
-#define RTW89_TAS_MAX_WINDOW 50
+#define RTW89_TAS_TX_RATIO_WINDOW 6
+#define RTW89_TAS_TXPWR_WINDOW 180
struct rtw89_tas_info {
- s16 txpwr_history[RTW89_TAS_MAX_WINDOW];
- s32 total_txpwr;
- u8 cur_idx;
- s8 dpr_gap;
- s8 delta;
+ u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW];
+ u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW];
+ u8 txpwr_head_idx;
+ u8 txpwr_tail_idx;
+ u8 tx_ratio_idx;
+ u16 total_tx_ratio;
+ u64 total_txpwr;
+ u64 instant_txpwr;
+ u32 window_size;
+ s8 dpr_on_threshold;
+ s8 dpr_off_threshold;
+ enum rtw89_tas_state backup_state;
enum rtw89_tas_state state;
+ bool keep_history;
+ bool block_regd;
bool enable;
+ bool pause;
};
struct rtw89_chanctx_cfg {
@@ -4735,6 +4764,7 @@ struct rtw89_edcca_bak {
enum rtw89_dm_type {
RTW89_DM_DYNAMIC_EDCCA,
RTW89_DM_THERMAL_PROTECT,
+ RTW89_DM_TAS,
};
#define RTW89_THERMAL_PROT_LV_MAX 5
@@ -4762,12 +4792,11 @@ struct rtw89_hal {
struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX];
struct cfg80211_chan_def roc_chandef;
- bool entity_active[RTW89_PHY_MAX];
+ bool entity_active[RTW89_PHY_NUM];
bool entity_pause;
enum rtw89_entity_mode entity_mode;
struct rtw89_entity_mgnt entity_mgnt;
- struct rtw89_edcca_bak edcca_bak;
u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */
u8 thermal_prot_th;
@@ -4973,7 +5002,7 @@ struct rtw89_dpk_bkup_para {
struct rtw89_dpk_info {
bool is_dpk_enable;
bool is_dpk_reload_en;
- u8 dpk_gs[RTW89_PHY_MAX];
+ u8 dpk_gs[RTW89_PHY_NUM];
u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
@@ -5135,7 +5164,7 @@ struct rtw89_tssi_info {
u32 alignment_backup_by_ch[RF_PATH_MAX][TSSI_MAX_CH_NUM][TSSI_ALIMK_VALUE_NUM];
u32 alignment_value[RF_PATH_MAX][TSSI_ALIMK_MAX][TSSI_ALIMK_VALUE_NUM];
bool alignment_done[RF_PATH_MAX][TSSI_ALIMK_MAX];
- u32 tssi_alimk_time;
+ u64 tssi_alimk_time;
};
struct rtw89_power_trim_info {
@@ -5146,9 +5175,27 @@ struct rtw89_power_trim_info {
u8 pad_bias_trim[RF_PATH_MAX];
};
+enum rtw89_regd_func {
+ RTW89_REGD_FUNC_TAS = 0, /* TAS (Time Average SAR) */
+ RTW89_REGD_FUNC_DAG = 1, /* DAG (Dynamic Antenna Gain) */
+
+ NUM_OF_RTW89_REGD_FUNC,
+};
+
struct rtw89_regd {
char alpha2[3];
u8 txpwr_regd[RTW89_BAND_NUM];
+ DECLARE_BITMAP(func_bitmap, NUM_OF_RTW89_REGD_FUNC);
+};
+
+struct rtw89_regd_data {
+ unsigned int nr;
+ struct rtw89_regd map[] __counted_by(nr);
+};
+
+struct rtw89_regd_ctrl {
+ unsigned int nr;
+ const struct rtw89_regd *map;
};
#define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX
@@ -5156,6 +5203,7 @@ struct rtw89_regd {
#define RTW89_5GHZ_UNII4_START_INDEX 25
struct rtw89_regulatory_info {
+ struct rtw89_regd_ctrl ctrl;
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
@@ -5299,8 +5347,8 @@ struct rtw89_lps_parm {
};
struct rtw89_ppdu_sts_info {
- struct sk_buff_head rx_queue[RTW89_PHY_MAX];
- u8 curr_rx_ppdu_cnt[RTW89_PHY_MAX];
+ struct sk_buff_head rx_queue[RTW89_PHY_NUM];
+ u8 curr_rx_ppdu_cnt[RTW89_PHY_NUM];
};
struct rtw89_early_h2c {
@@ -5419,8 +5467,8 @@ struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */
- s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */
- s8 rssi_base[RTW89_PHY_MAX]; /* S(8, 4) */
+ s8 offset_base[RTW89_PHY_NUM]; /* S(8, 4) */
+ s8 rssi_base[RTW89_PHY_NUM]; /* S(8, 4) */
s8 comp[RF_PATH_MAX][RTW89_SUBBAND_NR]; /* S(8, 0) */
};
@@ -5629,8 +5677,6 @@ struct rtw89_dev {
struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM];
refcount_t refcount_ap_info;
- /* ensures exclusive access from mac80211 callbacks */
- struct mutex mutex;
struct list_head rtwvifs_list;
/* used to protect rf read write */
struct mutex rf_mutex;
@@ -5650,10 +5696,10 @@ struct rtw89_dev {
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
- struct work_struct c2h_work;
- struct work_struct ips_work;
+ struct wiphy_work c2h_work;
+ struct wiphy_work ips_work;
+ struct wiphy_work cancel_6ghz_probe_work;
struct work_struct load_firmware_work;
- struct work_struct cancel_6ghz_probe_work;
struct list_head early_h2c_list;
@@ -5682,9 +5728,6 @@ struct rtw89_dev {
struct rtw89_power_trim_info pwr_trim;
struct rtw89_cfo_tracking_info cfo_tracking;
- struct rtw89_env_monitor_info env_monitor;
- struct rtw89_dig_info dig;
- struct rtw89_phy_ch_info ch_info;
union {
struct rtw89_phy_bb_gain_info ax;
struct rtw89_phy_bb_gain_info_be be;
@@ -5693,15 +5736,22 @@ struct rtw89_dev {
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
- struct delayed_work track_work;
- struct delayed_work chanctx_work;
- struct delayed_work coex_act1_work;
- struct delayed_work coex_bt_devinfo_work;
- struct delayed_work coex_rfk_chk_work;
- struct delayed_work cfo_track_work;
+ struct rtw89_bb_ctx {
+ enum rtw89_phy_idx phy_idx;
+ struct rtw89_env_monitor_info env_monitor;
+ struct rtw89_dig_info dig;
+ struct rtw89_phy_ch_info ch_info;
+ struct rtw89_edcca_bak edcca_bak;
+ } bbs[RTW89_PHY_NUM];
+
+ struct wiphy_delayed_work track_work;
+ struct wiphy_delayed_work chanctx_work;
+ struct wiphy_delayed_work coex_act1_work;
+ struct wiphy_delayed_work coex_bt_devinfo_work;
+ struct wiphy_delayed_work coex_rfk_chk_work;
+ struct wiphy_delayed_work cfo_track_work;
struct delayed_work forbid_ba_work;
- struct delayed_work roc_work;
- struct delayed_work antdiv_work;
+ struct wiphy_delayed_work antdiv_work;
struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc;
bool scanning;
@@ -6978,6 +7028,48 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
}
}
+static inline u8 rtw89_get_active_phy_bitmap(struct rtw89_dev *rtwdev)
+{
+ if (!rtwdev->dbcc_en)
+ return BIT(RTW89_PHY_0);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ case MLO_0_PLUS_2_2RF:
+ return BIT(RTW89_PHY_1);
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case DBCC_LEGACY:
+ return BIT(RTW89_PHY_0) | BIT(RTW89_PHY_1);
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ default:
+ return BIT(RTW89_PHY_0);
+ }
+}
+
+#define rtw89_for_each_active_bb(rtwdev, bb) \
+ for (u8 __active_bb_bitmap = rtw89_get_active_phy_bitmap(rtwdev), \
+ __phy_idx = 0; __phy_idx < RTW89_PHY_NUM; __phy_idx++) \
+ if (__active_bb_bitmap & BIT(__phy_idx) && \
+ (bb = &rtwdev->bbs[__phy_idx]))
+
+#define rtw89_for_each_capab_bb(rtwdev, bb) \
+ for (u8 __phy_idx_max = rtwdev->dbcc_en ? RTW89_PHY_1 : RTW89_PHY_0, \
+ __phy_idx = 0; __phy_idx <= __phy_idx_max; __phy_idx++) \
+ if ((bb = &rtwdev->bbs[__phy_idx]))
+
+static inline
+struct rtw89_bb_ctx *rtw89_get_bb_ctx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx >= RTW89_PHY_NUM)
+ return &rtwdev->bbs[RTW89_PHY_0];
+
+ return &rtwdev->bbs[phy_idx];
+}
+
static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -7092,9 +7184,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
-int rtw89_regd_init(struct rtw89_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request));
-void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
@@ -7102,8 +7192,8 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
-void rtw89_core_update_beacon_work(struct work_struct *work);
-void rtw89_roc_work(struct work_struct *work);
+void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work);
+void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 09fa977a6e6d..f2c5753fd386 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -13,6 +13,7 @@
#include "ps.h"
#include "reg.h"
#include "sar.h"
+#include "util.h"
#ifdef CONFIG_RTW89_DEBUGMSG
unsigned int rtw89_debug_mask;
@@ -22,11 +23,21 @@ MODULE_PARM_DESC(debug_mask, "Debugging mask");
#endif
#ifdef CONFIG_RTW89_DEBUGFS
+struct rtw89_debugfs_priv_opt {
+ bool rlock:1;
+ bool wlock:1;
+ size_t rsize;
+};
+
struct rtw89_debugfs_priv {
struct rtw89_dev *rtwdev;
- int (*cb_read)(struct seq_file *m, void *v);
- ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
- size_t count, loff_t *loff);
+ ssize_t (*cb_read)(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz);
+ ssize_t (*cb_write)(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count);
+ struct rtw89_debugfs_priv_opt opt;
union {
u32 cb_data;
struct {
@@ -51,6 +62,8 @@ struct rtw89_debugfs_priv {
u8 sel;
} mac_mem;
};
+ ssize_t rused;
+ char *rbuf;
};
struct rtw89_debugfs {
@@ -74,6 +87,28 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv disable_dm;
};
+struct rtw89_debugfs_iter_data {
+ char *buf;
+ size_t bufsz;
+ int written_sz;
+};
+
+static void rtw89_debugfs_iter_data_setup(struct rtw89_debugfs_iter_data *iter_data,
+ char *buf, size_t bufsz)
+{
+ iter_data->buf = buf;
+ iter_data->bufsz = bufsz;
+ iter_data->written_sz = 0;
+}
+
+static void rtw89_debugfs_iter_data_next(struct rtw89_debugfs_iter_data *iter_data,
+ char *buf, size_t bufsz, int written_sz)
+{
+ iter_data->buf = buf;
+ iter_data->bufsz = bufsz;
+ iter_data->written_sz += written_sz;
+}
+
static const u16 rtw89_rate_info_bw_to_mhz_map[] = {
[RATE_INFO_BW_20] = 20,
[RATE_INFO_BW_40] = 40,
@@ -90,84 +125,122 @@ static u16 rtw89_rate_info_bw_to_mhz(enum rate_info_bw bw)
return 0;
}
-static int rtw89_debugfs_single_show(struct seq_file *m, void *v)
+static ssize_t rtw89_debugfs_file_read_helper(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsz, void *data)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_debugfs_priv *debugfs_priv = data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ ssize_t n;
- return debugfs_priv->cb_read(m, v);
+ n = debugfs_priv->cb_read(rtwdev, debugfs_priv, buf, bufsz);
+ rtw89_might_trailing_ellipsis(buf, bufsz, n);
+
+ return n;
}
-static ssize_t rtw89_debugfs_single_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debugfs_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = file->private_data;
+ struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ size_t bufsz = opt->rsize ? opt->rsize : PAGE_SIZE;
+ char *buf;
+ ssize_t n;
- return debugfs_priv->cb_write(filp, buffer, count, loff);
-}
+ if (!debugfs_priv->rbuf)
+ debugfs_priv->rbuf = devm_kzalloc(rtwdev->dev, bufsz, GFP_KERNEL);
-static ssize_t rtw89_debugfs_seq_file_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *loff)
-{
- struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private;
+ buf = debugfs_priv->rbuf;
+ if (!buf)
+ return -ENOMEM;
+
+ if (*ppos) {
+ n = debugfs_priv->rused;
+ goto out;
+ }
+
+ if (opt->rlock) {
+ n = wiphy_locked_debugfs_read(rtwdev->hw->wiphy, file, buf, bufsz,
+ userbuf, count, ppos,
+ rtw89_debugfs_file_read_helper,
+ debugfs_priv);
+ debugfs_priv->rused = n;
- return debugfs_priv->cb_write(filp, buffer, count, loff);
+ return n;
+ }
+
+ n = rtw89_debugfs_file_read_helper(rtwdev->hw->wiphy, file, buf, bufsz,
+ debugfs_priv);
+ debugfs_priv->rused = n;
+
+out:
+ return simple_read_from_buffer(userbuf, count, ppos, buf, n);
}
-static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp)
+static ssize_t rtw89_debugfs_file_write_helper(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t count, void *data)
{
- return single_open(filp, rtw89_debugfs_single_show, inode->i_private);
+ struct rtw89_debugfs_priv *debugfs_priv = data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count);
}
-static int rtw89_debugfs_close(struct inode *inode, struct file *filp)
+static ssize_t rtw89_debugfs_file_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *loff)
{
- return 0;
+ struct rtw89_debugfs_priv *debugfs_priv = file->private_data;
+ struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *buf __free(kfree) = kmalloc(count + 1, GFP_KERNEL);
+ ssize_t n;
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (opt->wlock) {
+ n = wiphy_locked_debugfs_write(rtwdev->hw->wiphy,
+ file, buf, count + 1,
+ userbuf, count,
+ rtw89_debugfs_file_write_helper,
+ debugfs_priv);
+ return n;
+ }
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count);
}
-static const struct file_operations file_ops_single_r = {
- .owner = THIS_MODULE,
- .open = rtw89_debugfs_single_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct debugfs_short_fops file_ops_single_r = {
+ .read = rtw89_debugfs_file_read,
+ .llseek = generic_file_llseek,
};
-static const struct file_operations file_ops_common_rw = {
- .owner = THIS_MODULE,
- .open = rtw89_debugfs_single_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = rtw89_debugfs_seq_file_write,
+static const struct debugfs_short_fops file_ops_common_rw = {
+ .read = rtw89_debugfs_file_read,
+ .write = rtw89_debugfs_file_write,
+ .llseek = generic_file_llseek,
};
-static const struct file_operations file_ops_single_w = {
- .owner = THIS_MODULE,
- .write = rtw89_debugfs_single_write,
- .open = simple_open,
- .release = rtw89_debugfs_close,
+static const struct debugfs_short_fops file_ops_single_w = {
+ .write = rtw89_debugfs_file_write,
+ .llseek = generic_file_llseek,
};
static ssize_t
-rtw89_debug_priv_read_reg_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_read_reg_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x", &addr, &len);
if (num != 2) {
rtw89_info(rtwdev, "invalid format: <addr> <len>\n");
@@ -182,11 +255,13 @@ rtw89_debug_priv_read_reg_select(struct file *filp,
return count;
}
-static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_read_reg_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- u32 addr, end, data, k;
+ char *p = buf, *end = buf + bufsz;
+ u32 addr, addr_end, data, k;
u32 len;
len = debugfs_priv->read_reg.len;
@@ -210,41 +285,34 @@ static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v)
return -EINVAL;
}
- seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data);
+ p += scnprintf(p, end - p, "get %d bytes at 0x%08x=0x%08x\n", len,
+ addr, data);
- return 0;
+ return p - buf;
ndata:
- end = addr + len;
+ addr_end = addr + len;
- for (; addr < end; addr += 16) {
- seq_printf(m, "%08xh : ", 0x18600000 + addr);
+ for (; addr < addr_end; addr += 16) {
+ p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + addr);
for (k = 0; k < 16; k += 4) {
data = rtw89_read32(rtwdev, addr + k);
- seq_printf(m, "%08x ", data);
+ p += scnprintf(p, end - p, "%08x ", data);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- return 0;
+ return p - buf;
}
-static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static
+ssize_t rtw89_debug_priv_write_reg_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, val, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x %x", &addr, &val, &len);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <addr> <val> <len>\n");
@@ -273,24 +341,14 @@ static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp,
}
static ssize_t
-rtw89_debug_priv_read_rf_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_read_rf_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, mask;
u8 path;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <path> <addr> <mask>\n");
@@ -310,10 +368,12 @@ rtw89_debug_priv_read_rf_select(struct file *filp,
return count;
}
-static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_read_rf_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
u32 addr, data, mask;
u8 path;
@@ -323,28 +383,21 @@ static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v)
data = rtw89_read_rf(rtwdev, path, addr, mask);
- seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data);
+ p += scnprintf(p, end - p, "path %d, rf register 0x%08x=0x%08x\n",
+ path, addr, data);
- return 0;
+ return p - buf;
}
-static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static
+ssize_t rtw89_debug_priv_write_rf_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 addr, val, mask;
u8 path;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val);
if (num != 4) {
rtw89_info(rtwdev, "invalid format: <path> <addr> <mask> <val>\n");
@@ -363,29 +416,31 @@ static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp,
return count;
}
-static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_rf_reg_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 addr, offset, data;
u8 path;
for (path = 0; path < chip->rf_path_num; path++) {
- seq_printf(m, "RF path %d:\n\n", path);
+ p += scnprintf(p, end - p, "RF path %d:\n\n", path);
for (addr = 0; addr < 0x100; addr += 4) {
- seq_printf(m, "0x%08x: ", addr);
+ p += scnprintf(p, end - p, "0x%08x: ", addr);
for (offset = 0; offset < 4; offset++) {
data = rtw89_read_rf(rtwdev, path,
addr + offset, RFREG_MASK);
- seq_printf(m, "0x%05x ", data);
+ p += scnprintf(p, end - p, "0x%05x ", data);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
- return 0;
+ return p - buf;
}
struct txpwr_ent {
@@ -704,56 +759,71 @@ static const struct txpwr_map __txpwr_map_lmt_ru_be = {
};
static unsigned int
-__print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent,
- const s8 *buf, const unsigned int cur)
+__print_txpwr_ent(char *buf, size_t bufsz, const struct txpwr_ent *ent,
+ const s8 *bufp, const unsigned int cur, unsigned int *ate)
{
+ char *p = buf, *end = buf + bufsz;
unsigned int cnt, i;
+ unsigned int eaten;
char *fmt;
if (ent->nested) {
- for (cnt = 0, i = 0; i < ent->len; i++)
- cnt += __print_txpwr_ent(m, ent->ptr + i, buf,
- cur + cnt);
- return cnt;
+ for (cnt = 0, i = 0; i < ent->len; i++, cnt += eaten)
+ p += __print_txpwr_ent(p, end - p, ent->ptr + i, bufp,
+ cur + cnt, &eaten);
+ *ate = cnt;
+ goto out;
}
switch (ent->len) {
case 0:
- seq_printf(m, "\t<< %s >>\n", ent->txt);
- return 0;
+ p += scnprintf(p, end - p, "\t<< %s >>\n", ent->txt);
+ *ate = 0;
+ goto out;
case 2:
fmt = "%s\t| %3d, %3d,\t\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]);
- return 2;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1]);
+ *ate = 2;
+ goto out;
case 4:
fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
- buf[cur + 2], buf[cur + 3]);
- return 4;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1],
+ bufp[cur + 2], bufp[cur + 3]);
+ *ate = 4;
+ goto out;
case 8:
fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n";
- seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
- buf[cur + 2], buf[cur + 3], buf[cur + 4],
- buf[cur + 5], buf[cur + 6], buf[cur + 7]);
- return 8;
+ p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur],
+ bufp[cur + 1],
+ bufp[cur + 2], bufp[cur + 3], bufp[cur + 4],
+ bufp[cur + 5], bufp[cur + 6], bufp[cur + 7]);
+ *ate = 8;
+ goto out;
default:
return 0;
}
+
+out:
+ return p - buf;
}
-static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct txpwr_map *map)
+static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct txpwr_map *map)
{
u8 fct = rtwdev->chip->txpwr_factor_mac;
u8 path_num = rtwdev->chip->rf_path_num;
+ char *p = buf, *end = buf + bufsz;
unsigned int cur, i;
+ unsigned int eaten;
u32 max_valid_addr;
u32 val, addr;
- s8 *buf, tmp;
+ s8 *bufp, tmp;
int ret;
- buf = vzalloc(map->addr_to - map->addr_from + 4);
- if (!buf)
+ bufp = vzalloc(map->addr_to - map->addr_from + 4);
+ if (!bufp)
return -ENOMEM;
if (path_num == 1)
@@ -773,31 +843,32 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
for (i = 0; i < 4; i++, val >>= 8) {
/* signed 7 bits, and reserved BIT(7) */
tmp = sign_extend32(val, 6);
- buf[cur + i] = tmp >> fct;
+ bufp[cur + i] = tmp >> fct;
}
}
- for (cur = 0, i = 0; i < map->size; i++)
- cur += __print_txpwr_ent(m, &map->ent[i], buf, cur);
+ for (cur = 0, i = 0; i < map->size; i++, cur += eaten)
+ p += __print_txpwr_ent(p, end - p, &map->ent[i], bufp, cur, &eaten);
- vfree(buf);
- return 0;
+ vfree(bufp);
+ return p - buf;
}
#define case_REGD(_regd) \
case RTW89_ ## _regd: \
- seq_puts(m, #_regd "\n"); \
+ p += scnprintf(p, end - p, #_regd "\n"); \
break
-static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
+ char *p = buf, *end = buf + bufsz;
u8 band = chan->band_type;
u8 regd = rtw89_regd_get(rtwdev, band);
switch (regd) {
default:
- seq_printf(m, "UNKNOWN: %d\n", regd);
+ p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd);
break;
case_REGD(WW);
case_REGD(ETSI);
@@ -816,6 +887,8 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev,
case_REGD(UK);
case_REGD(THAILAND);
}
+
+ return p - buf;
}
#undef case_REGD
@@ -844,96 +917,93 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] =
};
static
-void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+int rtw89_debug_priv_txpwr_table_get_regd(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_reg_6ghz_tpe *tpe6 = &regulatory->reg_6ghz_tpe;
+ char *p = buf, *end = buf + bufsz;
- seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n",
- chan->band_type, chan->channel, chan->band_width);
+ p += scnprintf(p, end - p, "[Chanctx] band %u, ch %u, bw %u\n",
+ chan->band_type, chan->channel, chan->band_width);
- seq_puts(m, "[Regulatory] ");
- __print_regd(m, rtwdev, chan);
+ p += scnprintf(p, end - p, "[Regulatory] ");
+ p += __print_regd(rtwdev, p, end - p, chan);
if (chan->band_type == RTW89_BAND_6G) {
- seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power);
+ p += scnprintf(p, end - p, "[reg6_pwr_type] %u\n",
+ regulatory->reg_6ghz_power);
if (tpe6->valid)
- seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint);
+ p += scnprintf(p, end - p, "[TPE] %d dBm\n",
+ tpe6->constraint);
}
+
+ return p - buf;
}
-static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
const struct dbgfs_txpwr_table *tbl;
const struct rtw89_chan *chan;
- int ret = 0;
+ char *p = buf, *end = buf + bufsz;
+ ssize_t n;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
- rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
+ p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan);
- seq_puts(m, "[SAR]\n");
- rtw89_print_sar(m, rtwdev, chan->freq);
+ p += scnprintf(p, end - p, "[SAR]\n");
+ p += rtw89_print_sar(rtwdev, p, end - p, chan->freq);
- seq_puts(m, "[TAS]\n");
- rtw89_print_tas(m, rtwdev);
+ p += scnprintf(p, end - p, "[TAS]\n");
+ p += rtw89_print_tas(rtwdev, p, end - p);
- seq_puts(m, "[DAG]\n");
- rtw89_print_ant_gain(m, rtwdev, chan);
+ p += scnprintf(p, end - p, "[DAG]\n");
+ p += rtw89_print_ant_gain(rtwdev, p, end - p, chan);
tbl = dbgfs_txpwr_tables[chip_gen];
- if (!tbl) {
- ret = -EOPNOTSUPP;
- goto err;
- }
-
- seq_puts(m, "\n[TX power byrate]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->byr);
- if (ret)
- goto err;
-
- seq_puts(m, "\n[TX power limit]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->lmt);
- if (ret)
- goto err;
-
- seq_puts(m, "\n[TX power limit_ru]\n");
- ret = __print_txpwr_map(m, rtwdev, tbl->lmt_ru);
- if (ret)
- goto err;
+ if (!tbl)
+ return -EOPNOTSUPP;
-err:
- mutex_unlock(&rtwdev->mutex);
- return ret;
+ p += scnprintf(p, end - p, "\n[TX power byrate]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->byr);
+ if (n < 0)
+ return n;
+ p += n;
+
+ p += scnprintf(p, end - p, "\n[TX power limit]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt);
+ if (n < 0)
+ return n;
+ p += n;
+
+ p += scnprintf(p, end - p, "\n[TX power limit_ru]\n");
+ n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt_ru);
+ if (n < 0)
+ return n;
+ p += n;
+
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_reg_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_chip_info *chip = rtwdev->chip;
- char buf[32];
- size_t buf_size;
int sel;
int ret;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
ret = kstrtoint(buf, 0, &sel);
if (ret)
return ret;
@@ -957,99 +1027,91 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
#define RTW89_MAC_PAGE_SIZE 0x100
-static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v)
+static
+ssize_t rtw89_debug_priv_mac_reg_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data;
- u32 start, end;
+ char *p = buf, *end = buf + bufsz;
+ u32 start, end_addr;
u32 i, j, k, page;
u32 val;
switch (reg_sel) {
case RTW89_DBG_SEL_MAC_00:
- seq_puts(m, "Debug selected MAC page 0x00\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x00\n");
start = 0x000;
- end = 0x014;
+ end_addr = 0x014;
break;
case RTW89_DBG_SEL_MAC_30:
- seq_puts(m, "Debug selected MAC page 0x30\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x30\n");
start = 0x030;
- end = 0x033;
+ end_addr = 0x033;
break;
case RTW89_DBG_SEL_MAC_40:
- seq_puts(m, "Debug selected MAC page 0x40\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x40\n");
start = 0x040;
- end = 0x07f;
+ end_addr = 0x07f;
break;
case RTW89_DBG_SEL_MAC_80:
- seq_puts(m, "Debug selected MAC page 0x80\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0x80\n");
start = 0x080;
- end = 0x09f;
+ end_addr = 0x09f;
break;
case RTW89_DBG_SEL_MAC_C0:
- seq_puts(m, "Debug selected MAC page 0xc0\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0xc0\n");
start = 0x0c0;
- end = 0x0df;
+ end_addr = 0x0df;
break;
case RTW89_DBG_SEL_MAC_E0:
- seq_puts(m, "Debug selected MAC page 0xe0\n");
+ p += scnprintf(p, end - p, "Debug selected MAC page 0xe0\n");
start = 0x0e0;
- end = 0x0ff;
+ end_addr = 0x0ff;
break;
case RTW89_DBG_SEL_BB:
- seq_puts(m, "Debug selected BB register\n");
+ p += scnprintf(p, end - p, "Debug selected BB register\n");
start = 0x100;
- end = 0x17f;
+ end_addr = 0x17f;
break;
case RTW89_DBG_SEL_IQK:
- seq_puts(m, "Debug selected IQK register\n");
+ p += scnprintf(p, end - p, "Debug selected IQK register\n");
start = 0x180;
- end = 0x1bf;
+ end_addr = 0x1bf;
break;
case RTW89_DBG_SEL_RFC:
- seq_puts(m, "Debug selected RFC register\n");
+ p += scnprintf(p, end - p, "Debug selected RFC register\n");
start = 0x1c0;
- end = 0x1ff;
+ end_addr = 0x1ff;
break;
default:
- seq_puts(m, "Selected invalid register page\n");
+ p += scnprintf(p, end - p, "Selected invalid register page\n");
return -EINVAL;
}
- for (i = start; i <= end; i++) {
+ for (i = start; i <= end_addr; i++) {
page = i << 8;
for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) {
- seq_printf(m, "%08xh : ", 0x18600000 + j);
+ p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + j);
for (k = 0; k < 4; k++) {
val = rtw89_read32(rtwdev, j + (k << 2));
- seq_printf(m, "%08x ", val);
+ p += scnprintf(p, end - p, "%08x ", val);
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
}
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_mem_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
u32 sel, start_addr, len;
int num;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len);
if (num != 3) {
rtw89_info(rtwdev, "invalid format: <sel> <start> <len>\n");
@@ -1066,15 +1128,16 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
return count;
}
-static void rtw89_debug_dump_mac_mem(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- u8 sel, u32 start_addr, u32 len)
+static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ u8 sel, u32 start_addr, u32 len)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 filter_model_addr = mac->filter_model_addr;
u32 indir_access_addr = mac->indir_access_addr;
u32 base_addr, start_page, residue;
- u32 i, j, p, pages;
+ char *p = buf, *end = buf + bufsz;
+ u32 i, j, pp, pages;
u32 dump_len, remain;
u32 val;
@@ -1085,32 +1148,37 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m,
base_addr = mac->mem_base_addrs[sel];
base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
- for (p = 0; p < pages; p++) {
+ for (pp = 0; pp < pages; pp++) {
dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE);
rtw89_write32(rtwdev, filter_model_addr, base_addr);
for (i = indir_access_addr + residue;
i < indir_access_addr + dump_len;) {
- seq_printf(m, "%08xh:", i);
+ p += scnprintf(p, end - p, "%08xh:", i);
for (j = 0;
j < 4 && i < indir_access_addr + dump_len;
j++, i += 4) {
val = rtw89_read32(rtwdev, i);
- seq_printf(m, " %08x", val);
+ p += scnprintf(p, end - p, " %08x", val);
remain -= 4;
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
}
base_addr += MAC_MEM_DUMP_PAGE_SIZE;
}
+
+ return p - buf;
}
-static int
-rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_mac_mem_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
bool grant_read = false;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
if (debugfs_priv->mac_mem.sel >= RTW89_MAC_MEM_NUM)
return -ENOENT;
@@ -1127,40 +1195,28 @@ rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
}
}
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
if (grant_read)
rtw89_write32_set(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
- rtw89_debug_dump_mac_mem(m, rtwdev,
- debugfs_priv->mac_mem.sel,
- debugfs_priv->mac_mem.start,
- debugfs_priv->mac_mem.len);
+ p += rtw89_debug_dump_mac_mem(rtwdev, p, end - p,
+ debugfs_priv->mac_mem.sel,
+ debugfs_priv->mac_mem.start,
+ debugfs_priv->mac_mem.len);
if (grant_read)
rtw89_write32_clr(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO);
- mutex_unlock(&rtwdev->mutex);
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_mac_dbg_port_dump_select(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
- char buf[32];
- size_t buf_size;
int sel, set;
int num;
bool enable;
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- buf[buf_size] = '\0';
num = sscanf(buf, "%d %d", &sel, &set);
if (num != 2) {
rtw89_info(rtwdev, "invalid format: <sel> <set>\n");
@@ -1196,13 +1252,13 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
}
static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
return 0;
}
static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
#define DLE_DFI_DUMP(__type, __target, __sel) \
({ \
@@ -1231,7 +1287,7 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
__data; \
})
-#define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \
+#define DLE_DFI_FREE_PAGE_DUMP(__p, __end, __type) \
({ \
u32 __freepg, __pubpg; \
u32 __freepg_head, __freepg_tail, __pubpg_num; \
@@ -1241,24 +1297,25 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
__freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \
__freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \
__pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \
- seq_printf(__m, "[%s] freepg head: %d\n", \
- #__type, __freepg_head); \
- seq_printf(__m, "[%s] freepg tail: %d\n", \
- #__type, __freepg_tail); \
- seq_printf(__m, "[%s] pubpg num : %d\n", \
- #__type, __pubpg_num); \
+ __p += scnprintf(__p, __end - __p, "[%s] freepg head: %d\n", \
+ #__type, __freepg_head); \
+ __p += scnprintf(__p, __end - __p, "[%s] freepg tail: %d\n", \
+ #__type, __freepg_tail); \
+ __p += scnprintf(__p, __end - __p, "[%s] pubpg num : %d\n", \
+ #__type, __pubpg_num); \
})
-#define case_QUOTA(__m, __type, __id) \
+#define case_QUOTA(__p, __end, __type, __id) \
case __type##_QTAID_##__id: \
- val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \
+ val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \
rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \
use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \
- seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \
- #__type, #__id, rsv_pgnum); \
- seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \
- #__type, #__id, use_pgnum); \
+ __p += scnprintf(__p, __end - __p, "[%s][%s] rsv_pgnum: %d\n", \
+ #__type, #__id, rsv_pgnum); \
+ __p += scnprintf(__p, __end - __p, "[%s][%s] use_pgnum: %d\n", \
+ #__type, #__id, use_pgnum); \
break
+ char *p = buf, *end = buf + bufsz;
u32 quota_id;
u32 val32;
u16 rsv_pgnum, use_pgnum;
@@ -1266,38 +1323,39 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
if (ret) {
- seq_puts(m, "[DLE] : DMAC not enabled\n");
- return ret;
+ p += scnprintf(p, end - p, "[DLE] : DMAC not enabled\n");
+ goto out;
}
- DLE_DFI_FREE_PAGE_DUMP(m, WDE);
- DLE_DFI_FREE_PAGE_DUMP(m, PLE);
+ DLE_DFI_FREE_PAGE_DUMP(p, end, WDE);
+ DLE_DFI_FREE_PAGE_DUMP(p, end, PLE);
for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) {
switch (quota_id) {
- case_QUOTA(m, WDE, HOST_IF);
- case_QUOTA(m, WDE, WLAN_CPU);
- case_QUOTA(m, WDE, DATA_CPU);
- case_QUOTA(m, WDE, PKTIN);
- case_QUOTA(m, WDE, CPUIO);
+ case_QUOTA(p, end, WDE, HOST_IF);
+ case_QUOTA(p, end, WDE, WLAN_CPU);
+ case_QUOTA(p, end, WDE, DATA_CPU);
+ case_QUOTA(p, end, WDE, PKTIN);
+ case_QUOTA(p, end, WDE, CPUIO);
}
}
for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) {
switch (quota_id) {
- case_QUOTA(m, PLE, B0_TXPL);
- case_QUOTA(m, PLE, B1_TXPL);
- case_QUOTA(m, PLE, C2H);
- case_QUOTA(m, PLE, H2C);
- case_QUOTA(m, PLE, WLAN_CPU);
- case_QUOTA(m, PLE, MPDU);
- case_QUOTA(m, PLE, CMAC0_RX);
- case_QUOTA(m, PLE, CMAC1_RX);
- case_QUOTA(m, PLE, CMAC1_BBRPT);
- case_QUOTA(m, PLE, WDRLS);
- case_QUOTA(m, PLE, CPUIO);
+ case_QUOTA(p, end, PLE, B0_TXPL);
+ case_QUOTA(p, end, PLE, B1_TXPL);
+ case_QUOTA(p, end, PLE, C2H);
+ case_QUOTA(p, end, PLE, H2C);
+ case_QUOTA(p, end, PLE, WLAN_CPU);
+ case_QUOTA(p, end, PLE, MPDU);
+ case_QUOTA(p, end, PLE, CMAC0_RX);
+ case_QUOTA(p, end, PLE, CMAC1_RX);
+ case_QUOTA(p, end, PLE, CMAC1_BBRPT);
+ case_QUOTA(p, end, PLE, WDRLS);
+ case_QUOTA(p, end, PLE, CPUIO);
}
}
- return 0;
+out:
+ return p - buf;
#undef case_QUOTA
#undef DLE_DFI_DUMP
@@ -1305,73 +1363,88 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
}
static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 dmac_err;
int i, ret;
ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
if (ret) {
- seq_puts(m, "[DMAC] : DMAC not enabled\n");
- return ret;
+ p += scnprintf(p, end - p, "[DMAC] : DMAC not enabled\n");
+ goto out;
}
dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
- seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
- seq_printf(m, "R_AX_DMAC_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err);
+ p += scnprintf(p, end - p, "R_AX_DMAC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR));
if (dmac_err) {
- seq_printf(m, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1));
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
- seq_printf(m, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
- seq_printf(m, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
- seq_printf(m, "R_AX_PLE_DBGERR_STS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG));
+ p += scnprintf(p, end - p,
+ "R_AX_WDE_ERRFLAG_MSG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN));
+ p += scnprintf(p, end - p,
+ "R_AX_PLE_DBGERR_STS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS));
}
}
if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
- seq_printf(m, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
- seq_printf(m, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
if (chip->chip_id == RTL8852C)
- seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
+ p += scnprintf(p, end - p,
+ "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1));
else
- seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
+ p += scnprintf(p, end - p,
+ "R_AX_RPQ_RXBD_IDX=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
}
if (dmac_err & B_AX_WSEC_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_SEC_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
- seq_printf(m, "R_AX_SEC_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
- seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
- seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
- seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
- seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
- seq_printf(m, "R_AX_SEC_DEBUG1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
- seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
- seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ p += scnprintf(p, end - p, "R_AX_SEC_DEBUG1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG1));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
B_AX_DBG_SEL0, 0x8B);
@@ -1382,187 +1455,229 @@ static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
for (i = 0; i < 0x10; i++) {
rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL,
B_AX_SEC_DBG_PORT_FIELD_MASK, i);
- seq_printf(m, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
- i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
+ p += scnprintf(p, end - p,
+ "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n",
+ i,
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG2));
}
} else {
- seq_printf(m, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
- seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
- seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
- seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
- seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
- seq_printf(m, "R_AX_SEC_CAM_WDATA=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
- seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
- seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
- seq_printf(m, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
- seq_printf(m, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_ENG_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_MPDU_PROC=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_ACCESS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_RDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_CAM_WDATA=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_RX_DEBUG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TRX_PKT_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
+ p += scnprintf(p, end - p,
+ "R_AX_SEC_TRX_BLK_CNT=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
}
}
if (dmac_err & B_AX_MPDU_ERR_FLAG) {
- seq_printf(m, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
- seq_printf(m, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
- seq_printf(m, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
- seq_printf(m, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
}
if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
- seq_printf(m, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
- seq_printf(m, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
}
if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
- seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
- seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
}
if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
- seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
- seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
- seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR));
} else {
- seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
- seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
}
}
if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
- seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
- seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
- seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
- seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
- seq_printf(m, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
- seq_printf(m, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
- seq_printf(m, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
- seq_printf(m, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
- seq_printf(m, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
- seq_printf(m, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
- seq_printf(m, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
- seq_printf(m, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
+ p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
+ p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_RX_CTRL0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL0));
- seq_printf(m, "R_AX_RX_CTRL1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL1));
- seq_printf(m, "R_AX_RX_CTRL2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RX_CTRL2));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL0));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL1));
+ p += scnprintf(p, end - p, "R_AX_RX_CTRL2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RX_CTRL2));
} else {
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
- seq_printf(m, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ p += scnprintf(p, end - p,
+ "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
}
}
if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
- seq_printf(m, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
- seq_printf(m, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
+ p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
}
if (dmac_err & B_AX_DISPATCH_ERR_FLAG) {
- seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
- seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
- seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
- seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
}
if (dmac_err & B_AX_BBRPT_ERR_FLAG) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
} else {
- seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
- seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
- seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR));
+ p += scnprintf(p, end - p,
+ "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR));
}
}
if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
- seq_printf(m, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
- rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
+ p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK));
+ p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HAXI_IDCT));
}
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev,
- struct seq_file *m,
+ char *buf, size_t bufsz,
enum rtw89_mac_idx band)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ char *p = buf, *end = buf + bufsz;
u32 offset = 0;
u32 cmac_err;
int ret;
@@ -1570,96 +1685,127 @@ static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev,
ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL);
if (ret) {
if (band)
- seq_puts(m, "[CMAC] : CMAC1 not enabled\n");
+ p += scnprintf(p, end - p,
+ "[CMAC] : CMAC1 not enabled\n");
else
- seq_puts(m, "[CMAC] : CMAC0 not enabled\n");
- return ret;
+ p += scnprintf(p, end - p,
+ "[CMAC] : CMAC0 not enabled\n");
+ goto out;
}
if (band)
offset = RTW89_MAC_AX_BAND_REG_OFFSET;
cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset);
- seq_printf(m, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
- seq_printf(m, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
- seq_printf(m, "R_AX_CK_EN [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CK_EN + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset));
+ p += scnprintf(p, end - p, "R_AX_CK_EN [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CK_EN + offset));
if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
- seq_printf(m, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
- seq_printf(m, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset));
}
if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
- seq_printf(m, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
- seq_printf(m, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
+ p += scnprintf(p, end - p, "R_AX_PTCL_IMR0 [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset));
+ p += scnprintf(p, end - p, "R_AX_PTCL_ISR0 [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset));
}
if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
- seq_printf(m, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset));
} else {
- seq_printf(m, "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_DLE_CTRL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset));
}
}
if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
- seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
} else {
- seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset));
}
}
if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
- seq_printf(m, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
- seq_printf(m, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
+ p += scnprintf(p, end - p, "R_AX_TXPWR_IMR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset));
+ p += scnprintf(p, end - p, "R_AX_TXPWR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset));
}
if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
if (chip->chip_id == RTL8852C) {
- seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset));
- seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TRXPTCL_ERROR_INDICA + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TRXPTCL_ERROR_INDICA_MASK + offset));
} else {
- seq_printf(m, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n",
+ band,
+ rtw89_read32(rtwdev,
+ R_AX_TMAC_ERR_IMR_ISR + offset));
}
- seq_printf(m, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
+ p += scnprintf(p, end - p,
+ "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset));
}
- seq_printf(m, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
- rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
+ p += scnprintf(p, end - p, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band,
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset));
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
- rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_0);
+ char *p = buf, *end = buf + bufsz;
+
+ p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_0);
if (rtwdev->dbcc_en)
- rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_1);
+ p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_1);
- return 0;
+ return p - buf;
}
static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = {
@@ -2465,11 +2611,12 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = {
.rd_msk = B_AX_DEBUG_ST_MASK
};
-static const struct rtw89_mac_dbg_port_info *
-rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
- struct rtw89_dev *rtwdev, u32 sel)
+static int
+rtw89_debug_mac_dbg_port_sel(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 sel, const struct rtw89_mac_dbg_port_info **ppinfo)
{
- const struct rtw89_mac_dbg_port_info *info;
+ const struct rtw89_mac_dbg_port_info *info = NULL;
+ char *p = buf, *end = buf + bufsz;
u32 index;
u32 val32;
u16 val16;
@@ -2481,28 +2628,28 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG);
val16 |= B_AX_PTCL_DBG_EN;
rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16);
- seq_puts(m, "Enable PTCL C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable PTCL C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_PTCL_C1:
info = &dbg_port_ptcl_c1;
val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1);
val16 |= B_AX_PTCL_DBG_EN;
rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16);
- seq_puts(m, "Enable PTCL C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable PTCL C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_SCH_C0:
info = &dbg_port_sch_c0;
val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL);
val32 |= B_AX_SCH_DBG_EN;
rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32);
- seq_puts(m, "Enable SCH C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable SCH C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_SCH_C1:
info = &dbg_port_sch_c1;
val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1);
val32 |= B_AX_SCH_DBG_EN;
rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32);
- seq_puts(m, "Enable SCH C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable SCH C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TMAC_C0:
info = &dbg_port_tmac_c0;
@@ -2519,7 +2666,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TMAC C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TMAC C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TMAC_C1:
info = &dbg_port_tmac_c1;
@@ -2536,7 +2683,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TMAC C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TMAC C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_C0:
info = &dbg_port_rmac_c0;
@@ -2558,7 +2705,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
B_AX_DBGSEL_TRXPTCL_MASK);
rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8);
- seq_puts(m, "Enable RMAC C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_C1:
info = &dbg_port_rmac_c1;
@@ -2580,23 +2727,23 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
B_AX_DBGSEL_TRXPTCL_MASK);
rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8);
- seq_puts(m, "Enable RMAC C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMACST_C0:
info = &dbg_port_rmacst_c0;
- seq_puts(m, "Enable RMAC state C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC state C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMACST_C1:
info = &dbg_port_rmacst_c1;
- seq_puts(m, "Enable RMAC state C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC state C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0:
info = &dbg_port_rmac_plcp_c0;
- seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC PLCP C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1:
info = &dbg_port_rmac_plcp_c1;
- seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable RMAC PLCP C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TRXPTCL_C0:
info = &dbg_port_trxptcl_c0;
@@ -2608,7 +2755,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TRXPTCL C0 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TRXPTCL C0 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TRXPTCL_C1:
info = &dbg_port_trxptcl_c1;
@@ -2620,131 +2767,137 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
- seq_puts(m, "Enable TRXPTCL C1 dbgport.\n");
+ p += scnprintf(p, end - p, "Enable TRXPTCL C1 dbgport.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOL_C0:
info = &dbg_port_tx_infol_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOH_C0:
info = &dbg_port_tx_infoh_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOL_C1:
info = &dbg_port_tx_infol_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TX_INFOH_C1:
info = &dbg_port_tx_infoh_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0:
info = &dbg_port_txtf_infol_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx tf infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0:
info = &dbg_port_txtf_infoh_c0;
val32 = rtw89_read32(rtwdev, R_AX_TCR1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1, val32);
- seq_puts(m, "Enable tx tf infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1:
info = &dbg_port_txtf_infol_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx tf infol dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infol dump.\n");
break;
case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1:
info = &dbg_port_txtf_infoh_c1;
val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
- seq_puts(m, "Enable tx tf infoh dump.\n");
+ p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG:
info = &dbg_port_wde_bufmgn_freepg;
- seq_puts(m, "Enable wde bufmgn freepg dump.\n");
+ p += scnprintf(p, end - p, "Enable wde bufmgn freepg dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA:
info = &dbg_port_wde_bufmgn_quota;
- seq_puts(m, "Enable wde bufmgn quota dump.\n");
+ p += scnprintf(p, end - p, "Enable wde bufmgn quota dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT:
info = &dbg_port_wde_bufmgn_pagellt;
- seq_puts(m, "Enable wde bufmgn pagellt dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde bufmgn pagellt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO:
info = &dbg_port_wde_bufmgn_pktinfo;
- seq_puts(m, "Enable wde bufmgn pktinfo dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde bufmgn pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT:
info = &dbg_port_wde_quemgn_prepkt;
- seq_puts(m, "Enable wde quemgn prepkt dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn prepkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT:
info = &dbg_port_wde_quemgn_nxtpkt;
- seq_puts(m, "Enable wde quemgn nxtpkt dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn nxtpkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL:
info = &dbg_port_wde_quemgn_qlnktbl;
- seq_puts(m, "Enable wde quemgn qlnktbl dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable wde quemgn qlnktbl dump.\n");
break;
case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY:
info = &dbg_port_wde_quemgn_qempty;
- seq_puts(m, "Enable wde quemgn qempty dump.\n");
+ p += scnprintf(p, end - p, "Enable wde quemgn qempty dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG:
info = &dbg_port_ple_bufmgn_freepg;
- seq_puts(m, "Enable ple bufmgn freepg dump.\n");
+ p += scnprintf(p, end - p, "Enable ple bufmgn freepg dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA:
info = &dbg_port_ple_bufmgn_quota;
- seq_puts(m, "Enable ple bufmgn quota dump.\n");
+ p += scnprintf(p, end - p, "Enable ple bufmgn quota dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT:
info = &dbg_port_ple_bufmgn_pagellt;
- seq_puts(m, "Enable ple bufmgn pagellt dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple bufmgn pagellt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO:
info = &dbg_port_ple_bufmgn_pktinfo;
- seq_puts(m, "Enable ple bufmgn pktinfo dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple bufmgn pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT:
info = &dbg_port_ple_quemgn_prepkt;
- seq_puts(m, "Enable ple quemgn prepkt dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn prepkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT:
info = &dbg_port_ple_quemgn_nxtpkt;
- seq_puts(m, "Enable ple quemgn nxtpkt dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn nxtpkt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL:
info = &dbg_port_ple_quemgn_qlnktbl;
- seq_puts(m, "Enable ple quemgn qlnktbl dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable ple quemgn qlnktbl dump.\n");
break;
case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY:
info = &dbg_port_ple_quemgn_qempty;
- seq_puts(m, "Enable ple quemgn qempty dump.\n");
+ p += scnprintf(p, end - p, "Enable ple quemgn qempty dump.\n");
break;
case RTW89_DBG_PORT_SEL_PKTINFO:
info = &dbg_port_pktinfo;
- seq_puts(m, "Enable pktinfo dump.\n");
+ p += scnprintf(p, end - p, "Enable pktinfo dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX0:
rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL,
@@ -2763,7 +2916,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX6:
info = &dbg_port_dspt_hdt_tx6;
@@ -2771,7 +2925,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 6);
- seq_puts(m, "Enable Dispatcher hdt tx6 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx6 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX7:
info = &dbg_port_dspt_hdt_tx7;
@@ -2779,7 +2934,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 7);
- seq_puts(m, "Enable Dispatcher hdt tx7 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx7 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX8:
info = &dbg_port_dspt_hdt_tx8;
@@ -2787,7 +2943,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 8);
- seq_puts(m, "Enable Dispatcher hdt tx8 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx8 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TX9:
case RTW89_DBG_PORT_SEL_DSPT_HDT_TXA:
@@ -2799,7 +2956,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_TXD:
info = &dbg_port_dspt_hdt_txD;
@@ -2807,7 +2965,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 0);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0xD);
- seq_puts(m, "Enable Dispatcher hdt txD dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt txD dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX0:
info = &dbg_port_dspt_cdt_tx0;
@@ -2815,7 +2974,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher cdt tx0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX1:
info = &dbg_port_dspt_cdt_tx1;
@@ -2823,7 +2983,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 1);
- seq_puts(m, "Enable Dispatcher cdt tx1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX3:
info = &dbg_port_dspt_cdt_tx3;
@@ -2831,7 +2992,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher cdt tx3 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx3 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX4:
info = &dbg_port_dspt_cdt_tx4;
@@ -2839,7 +3001,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher cdt tx4 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx4 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX5:
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX6:
@@ -2851,7 +3014,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TX9:
info = &dbg_port_dspt_cdt_tx9;
@@ -2859,7 +3023,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 9);
- seq_puts(m, "Enable Dispatcher cdt tx9 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx9 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_TXA:
case RTW89_DBG_PORT_SEL_DSPT_CDT_TXB:
@@ -2870,7 +3035,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 1);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt tx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX0:
info = &dbg_port_dspt_hdt_rx0;
@@ -2878,7 +3044,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher hdt rx0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX1:
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX2:
@@ -2888,7 +3055,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, index);
- seq_printf(m, "Enable Dispatcher hdt rx%x dump.\n", index);
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx%x dump.\n", index);
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX3:
info = &dbg_port_dspt_hdt_rx3;
@@ -2896,7 +3064,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher hdt rx3 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx3 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX4:
info = &dbg_port_dspt_hdt_rx4;
@@ -2904,7 +3073,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher hdt rx4 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx4 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_HDT_RX5:
info = &dbg_port_dspt_hdt_rx5;
@@ -2912,7 +3082,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 2);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 5);
- seq_puts(m, "Enable Dispatcher hdt rx5 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher hdt rx5 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_0:
info = &dbg_port_dspt_cdt_rx_p0_0;
@@ -2920,7 +3091,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 0);
- seq_puts(m, "Enable Dispatcher cdt rx part0 0 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 0 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0:
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_1:
@@ -2929,7 +3101,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 1);
- seq_puts(m, "Enable Dispatcher cdt rx part0 1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_2:
info = &dbg_port_dspt_cdt_rx_p0_2;
@@ -2937,43 +3110,50 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
rtw89_write16_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_CH_SEL_MASK, 2);
- seq_puts(m, "Enable Dispatcher cdt rx part0 2 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part0 2 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P1:
info = &dbg_port_dspt_cdt_rx_p1;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 3);
- seq_puts(m, "Enable Dispatcher cdt rx part1 dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher cdt rx part1 dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_STF_CTRL:
info = &dbg_port_dspt_stf_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 4);
- seq_puts(m, "Enable Dispatcher stf control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher stf control dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_ADDR_CTRL:
info = &dbg_port_dspt_addr_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 5);
- seq_puts(m, "Enable Dispatcher addr control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher addr control dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_WDE_INTF:
info = &dbg_port_dspt_wde_intf;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 6);
- seq_puts(m, "Enable Dispatcher wde interface dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher wde interface dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_PLE_INTF:
info = &dbg_port_dspt_ple_intf;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 7);
- seq_puts(m, "Enable Dispatcher ple interface dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher ple interface dump.\n");
break;
case RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL:
info = &dbg_port_dspt_flow_ctrl;
rtw89_write8_mask(rtwdev, info->sel_addr,
B_AX_DISPATCHER_INTN_SEL_MASK, 8);
- seq_puts(m, "Enable Dispatcher flow control dump.\n");
+ p += scnprintf(p, end - p,
+ "Enable Dispatcher flow control dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_TXDMA:
info = &dbg_port_pcie_txdma;
@@ -2981,7 +3161,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie txdma dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie txdma dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_RXDMA:
info = &dbg_port_pcie_rxdma;
@@ -2989,7 +3169,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie rxdma dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie rxdma dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_CVT:
info = &dbg_port_pcie_cvt;
@@ -2997,7 +3177,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie cvt dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie cvt dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_CXPL:
info = &dbg_port_pcie_cxpl;
@@ -3005,7 +3185,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie cxpl dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie cxpl dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_IO:
info = &dbg_port_pcie_io;
@@ -3013,7 +3193,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie io dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie io dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_MISC:
info = &dbg_port_pcie_misc;
@@ -3021,7 +3201,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0);
val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1);
rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
- seq_puts(m, "Enable pcie misc dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie misc dump.\n");
break;
case RTW89_DBG_PORT_SEL_PCIE_MISC2:
info = &dbg_port_pcie_misc2;
@@ -3029,14 +3209,16 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL,
B_AX_PCIE_DBG_SEL_MASK);
rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16);
- seq_puts(m, "Enable pcie misc2 dump.\n");
+ p += scnprintf(p, end - p, "Enable pcie misc2 dump.\n");
break;
default:
- seq_puts(m, "Dbg port select err\n");
- return NULL;
+ p += scnprintf(p, end - p, "Dbg port select err\n");
+ break;
}
- return info;
+ *ppinfo = info;
+
+ return p - buf;
}
static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
@@ -3070,23 +3252,25 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
}
static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
- struct seq_file *m, u32 sel)
+ char *buf, size_t bufsz, u32 sel)
{
- const struct rtw89_mac_dbg_port_info *info;
- u8 val8;
- u16 val16;
+ const struct rtw89_mac_dbg_port_info *info = NULL;
+ char *p = buf, *end = buf + bufsz;
u32 val32;
+ u16 val16;
+ u8 val8;
u32 i;
- info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel);
+ p += rtw89_debug_mac_dbg_port_sel(rtwdev, p, end - p, sel, &info);
+
if (!info) {
rtw89_err(rtwdev, "failed to select debug port %d\n", sel);
- return -EINVAL;
+ goto out;
}
#define case_DBG_SEL(__sel) \
case RTW89_DBG_PORT_SEL_##__sel: \
- seq_puts(m, "Dump debug port " #__sel ":\n"); \
+ p += scnprintf(p, end - p, "Dump debug port " #__sel ":\n"); \
break
switch (sel) {
@@ -3182,8 +3366,8 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
#undef case_DBG_SEL
- seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr);
- seq_printf(m, "Read addr = 0x%X\n", info->rd_addr);
+ p += scnprintf(p, end - p, "Sel addr = 0x%X\n", info->sel_addr);
+ p += scnprintf(p, end - p, "Read addr = 0x%X\n", info->rd_addr);
for (i = info->srt; i <= info->end; i++) {
switch (info->sel_byte) {
@@ -3191,17 +3375,17 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
default:
rtw89_write8_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%02X: ", i);
+ p += scnprintf(p, end - p, "0x%02X: ", i);
break;
case 2:
rtw89_write16_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%04X: ", i);
+ p += scnprintf(p, end - p, "0x%04X: ", i);
break;
case 4:
rtw89_write32_mask(rtwdev, info->sel_addr,
info->sel_msk, i);
- seq_printf(m, "0x%04X: ", i);
+ p += scnprintf(p, end - p, "0x%04X: ", i);
break;
}
@@ -3212,77 +3396,75 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
default:
val8 = rtw89_read8_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%02X\n", val8);
+ p += scnprintf(p, end - p, "0x%02X\n", val8);
break;
case 2:
val16 = rtw89_read16_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%04X\n", val16);
+ p += scnprintf(p, end - p, "0x%04X\n", val16);
break;
case 4:
val32 = rtw89_read32_mask(rtwdev,
info->rd_addr, info->rd_msk);
- seq_printf(m, "0x%08X\n", val32);
+ p += scnprintf(p, end - p, "0x%08X\n", val32);
break;
}
}
- return 0;
+out:
+ return p - buf;
}
static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev,
- struct seq_file *m)
+ char *buf, size_t bufsz)
{
+ char *p = buf, *end = buf + bufsz;
+ ssize_t n;
u32 sel;
- int ret = 0;
for (sel = RTW89_DBG_PORT_SEL_PTCL_C0;
sel < RTW89_DBG_PORT_SEL_LAST; sel++) {
if (!is_dbg_port_valid(rtwdev, sel))
continue;
- ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel);
- if (ret) {
+ n = rtw89_debug_mac_dbg_port_dump(rtwdev, p, end - p, sel);
+ if (n < 0) {
rtw89_err(rtwdev,
"failed to dump debug port %d\n", sel);
break;
}
+ p += n;
}
- return ret;
+ return p - buf;
}
-static int
-rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_mac_dbg_port_dump_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
if (debugfs_priv->dbgpkg_en.ss_dbg)
- rtw89_debug_mac_dump_ss_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_ss_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dle_dbg)
- rtw89_debug_mac_dump_dle_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_dle_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dmac_dbg)
- rtw89_debug_mac_dump_dmac_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_dmac_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.cmac_dbg)
- rtw89_debug_mac_dump_cmac_dbg(rtwdev, m);
+ p += rtw89_debug_mac_dump_cmac_dbg(rtwdev, p, end - p);
if (debugfs_priv->dbgpkg_en.dbg_port)
- rtw89_debug_mac_dump_dbg_port(rtwdev, m);
+ p += rtw89_debug_mac_dump_dbg_port(rtwdev, p, end - p);
- return 0;
+ return p - buf;
};
-static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev,
- const char __user *user_buf, size_t count)
+static u8 *rtw89_hex2bin(struct rtw89_dev *rtwdev, const char *buf, size_t count)
{
- char *buf;
u8 *bin;
int num;
int err = 0;
- buf = memdup_user(user_buf, count);
- if (IS_ERR(buf))
- return buf;
-
num = count / 2;
bin = kmalloc(num, GFP_KERNEL);
if (!bin) {
@@ -3297,22 +3479,18 @@ static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev,
}
out:
- kfree(buf);
-
return err ? ERR_PTR(err) : bin;
}
-static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_send_h2c_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
u8 *h2c;
int ret;
u16 h2c_len = count / 2;
- h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ h2c = rtw89_hex2bin(rtwdev, buf, count);
if (IS_ERR(h2c))
return -EFAULT;
@@ -3323,34 +3501,36 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
return ret ? ret : count;
}
-static int
-rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_early_h2c_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_early_h2c *early_h2c;
+ char *p = buf, *end = buf + bufsz;
int seq = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list)
- seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c);
- mutex_unlock(&rtwdev->mutex);
+ p += scnprintf(p, end - p, "%d: %*ph\n", ++seq,
+ early_h2c->h2c_len, early_h2c->h2c);
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_early_h2c_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_early_h2c *early_h2c;
u8 *h2c;
u16 h2c_len = count / 2;
- h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ h2c = rtw89_hex2bin(rtwdev, buf, count);
if (IS_ERR(h2c))
return -EFAULT;
@@ -3369,9 +3549,7 @@ rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf,
early_h2c->h2c = h2c;
early_h2c->h2c_len = h2c_len;
- mutex_lock(&rtwdev->mutex);
list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list);
- mutex_unlock(&rtwdev->mutex);
out:
return count;
@@ -3404,15 +3582,16 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
-static int
-rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char *p = buf, *end = buf + bufsz;
- seq_printf(m, "%d\n",
- test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
- return 0;
+ p += scnprintf(p, end - p, "%d\n",
+ test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags));
+ return p - buf;
}
enum rtw89_dbg_crash_simulation_type {
@@ -3421,17 +3600,17 @@ enum rtw89_dbg_crash_simulation_type {
};
static ssize_t
-rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
int (*sim)(struct rtw89_dev *rtwdev);
u8 crash_type;
int ret;
- ret = kstrtou8_from_user(user_buf, count, 0, &crash_type);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ ret = kstrtou8(buf, 0, &crash_type);
if (ret)
return -EINVAL;
@@ -3448,10 +3627,8 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
return -EINVAL;
}
- mutex_lock(&rtwdev->mutex);
set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
ret = sim(rtwdev);
- mutex_unlock(&rtwdev->mutex);
if (ret)
return ret;
@@ -3459,27 +3636,22 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf,
return count;
}
-static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_btc_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
-
- rtw89_btc_dump_info(rtwdev, m);
-
- return 0;
+ return rtw89_btc_dump_info(rtwdev, buf, bufsz);
}
-static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_btc_manual_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl);
+ ret = kstrtobool(buf, &btc->manual_ctrl);
if (ret)
return ret;
@@ -3491,31 +3663,29 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
return count;
}
-static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *loff)
+static ssize_t rtw89_debug_priv_fw_log_manual_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_fw_log *log = &rtwdev->fw.log;
bool fw_log_manual;
- if (kstrtobool_from_user(user_buf, count, &fw_log_manual))
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ if (kstrtobool(buf, &fw_log_manual))
goto out;
- mutex_lock(&rtwdev->mutex);
log->enable = fw_log_manual;
if (log->enable)
rtw89_fw_log_prepare(rtwdev);
rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual);
- mutex_unlock(&rtwdev->mutex);
out:
return count;
}
-static void rtw89_sta_link_info_get_iter(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_sta_link_info_get_iter(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
static const char * const he_gi_str[] = {
[NL80211_RATE_INFO_HE_GI_0_8] = "0.8",
@@ -3533,6 +3703,7 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m,
u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
struct ieee80211_link_sta *link_sta;
+ char *p = buf, *end = buf + bufsz;
u8 evm_min, evm_max, evm_1ss;
u16 max_rc_amsdu_len;
u8 rssi;
@@ -3546,107 +3717,136 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m,
rcu_read_unlock();
- seq_printf(m, "TX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "TX rate [%u, %u]: ", rtwsta_link->mac_id,
+ rtwsta_link->link_id);
if (rate->flags & RATE_INFO_FLAGS_MCS)
- seq_printf(m, "HT MCS-%d%s", rate->mcs,
- rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "HT MCS-%d%s", rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
- seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs,
- rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", rate->nss,
+ rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
- seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs,
- rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[rate->he_gi] : "N/A");
+ p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s", rate->nss,
+ rate->mcs,
+ rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[rate->he_gi] : "N/A");
else if (rate->flags & RATE_INFO_FLAGS_EHT_MCS)
- seq_printf(m, "EHT %dSS MCS-%d GI:%s", rate->nss, rate->mcs,
- rate->eht_gi < ARRAY_SIZE(eht_gi_str) ?
- eht_gi_str[rate->eht_gi] : "N/A");
+ p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s", rate->nss,
+ rate->mcs,
+ rate->eht_gi < ARRAY_SIZE(eht_gi_str) ?
+ eht_gi_str[rate->eht_gi] : "N/A");
else
- seq_printf(m, "Legacy %d", rate->legacy);
- seq_printf(m, "%s", rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : "");
- seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(rate->bw));
- seq_printf(m, " (hw_rate=0x%x)", rtwsta_link->ra_report.hw_rate);
- seq_printf(m, " ==> agg_wait=%d (%d)\n", rtwsta_link->max_agg_wait,
- max_rc_amsdu_len);
-
- seq_printf(m, "RX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id);
+ p += scnprintf(p, end - p, "Legacy %d", rate->legacy);
+ p += scnprintf(p, end - p, "%s",
+ rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : "");
+ p += scnprintf(p, end - p, " BW:%u",
+ rtw89_rate_info_bw_to_mhz(rate->bw));
+ p += scnprintf(p, end - p, " (hw_rate=0x%x)",
+ rtwsta_link->ra_report.hw_rate);
+ p += scnprintf(p, end - p, " ==> agg_wait=%d (%d)\n",
+ rtwsta_link->max_agg_wait,
+ max_rc_amsdu_len);
+
+ p += scnprintf(p, end - p, "RX rate [%u, %u]: ", rtwsta_link->mac_id,
+ rtwsta_link->link_id);
switch (status->encoding) {
case RX_ENC_LEGACY:
- seq_printf(m, "Legacy %d", status->rate_idx +
- (status->band != NL80211_BAND_2GHZ ? 4 : 0));
+ p += scnprintf(p, end - p, "Legacy %d", status->rate_idx +
+ (status->band != NL80211_BAND_2GHZ ? 4 : 0));
break;
case RX_ENC_HT:
- seq_printf(m, "HT MCS-%d%s", status->rate_idx,
- status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "HT MCS-%d%s", status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
break;
case RX_ENC_VHT:
- seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx,
- status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", status->nss,
+ status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
break;
case RX_ENC_HE:
- seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
- status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[status->he_gi] : "N/A");
+ p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s",
+ status->nss, status->rate_idx,
+ status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[status->he_gi] : "N/A");
break;
case RX_ENC_EHT:
- seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
- status->eht.gi < ARRAY_SIZE(eht_gi_str) ?
- eht_gi_str[status->eht.gi] : "N/A");
+ p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s",
+ status->nss, status->rate_idx,
+ status->eht.gi < ARRAY_SIZE(eht_gi_str) ?
+ eht_gi_str[status->eht.gi] : "N/A");
break;
}
- seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw));
- seq_printf(m, " (hw_rate=0x%x)\n", rtwsta_link->rx_hw_rate);
+ p += scnprintf(p, end - p, " BW:%u",
+ rtw89_rate_info_bw_to_mhz(status->bw));
+ p += scnprintf(p, end - p, " (hw_rate=0x%x)\n",
+ rtwsta_link->rx_hw_rate);
rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
- seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
- RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta_link->prev_rssi);
+ p += scnprintf(p, end - p, "RSSI: %d dBm (raw=%d, prev=%d) [",
+ RTW89_RSSI_RAW_TO_DBM(rssi), rssi,
+ rtwsta_link->prev_rssi);
for (i = 0; i < ant_num; i++) {
rssi = ewma_rssi_read(&rtwsta_link->rssi[i]);
- seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
- ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
- i + 1 == ant_num ? "" : ", ");
+ p += scnprintf(p, end - p, "%d%s%s",
+ RTW89_RSSI_RAW_TO_DBM(rssi),
+ ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == ant_num ? "" : ", ");
}
- seq_puts(m, "]\n");
+ p += scnprintf(p, end - p, "]\n");
evm_1ss = ewma_evm_read(&rtwsta_link->evm_1ss);
- seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25);
+ p += scnprintf(p, end - p, "EVM: [%2u.%02u, ", evm_1ss >> 2,
+ (evm_1ss & 0x3) * 25);
for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
evm_min = ewma_evm_read(&rtwsta_link->evm_min[i]);
evm_max = ewma_evm_read(&rtwsta_link->evm_max[i]);
- seq_printf(m, "%s(%2u.%02u, %2u.%02u)", i == 0 ? "" : " ",
- evm_min >> 2, (evm_min & 0x3) * 25,
- evm_max >> 2, (evm_max & 0x3) * 25);
+ p += scnprintf(p, end - p, "%s(%2u.%02u, %2u.%02u)",
+ i == 0 ? "" : " ",
+ evm_min >> 2, (evm_min & 0x3) * 25,
+ evm_max >> 2, (evm_max & 0x3) * 25);
}
- seq_puts(m, "]\t");
+ p += scnprintf(p, end - p, "]\t");
snr = ewma_snr_read(&rtwsta_link->avg_snr);
- seq_printf(m, "SNR: %u\n", snr);
+ p += scnprintf(p, end - p, "SNR: %u\n", snr);
+
+ return p - buf;
}
static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct rtw89_sta_link *rtwsta_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- rtw89_sta_link_info_get_iter(m, rtwdev, rtwsta_link);
+ p += rtw89_sta_link_info_get_iter(rtwdev, p, end - p, rtwsta_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static void
-rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
+static int
+rtw89_debug_append_rx_rate(char *buf, size_t bufsz, struct rtw89_pkt_stat *pkt_stat,
enum rtw89_hw_rate first_rate, int len)
{
+ char *p = buf, *end = buf + bufsz;
int i;
for (i = 0; i < len; i++)
- seq_printf(m, "%s%u", i == 0 ? "" : ", ",
- pkt_stat->rx_rate_cnt[first_rate + i]);
+ p += scnprintf(p, end - p, "%s%u", i == 0 ? "" : ", ",
+ pkt_stat->rx_rate_cnt[first_rate + i]);
+
+ return p - buf;
}
#define FIRST_RATE_SAME(rate) {RTW89_HW_RATE_ ## rate, RTW89_HW_RATE_ ## rate}
@@ -3671,34 +3871,40 @@ static const struct rtw89_rx_rate_cnt_info {
{FIRST_RATE_GEV1(EHT_NSS2_MCS0), 14, 0, "EHT 2SS:"},
};
-static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_phy_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_traffic_stats *stats = &rtwdev->stats;
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_debugfs_iter_data iter_data;
const struct rtw89_rx_rate_cnt_info *info;
struct rtw89_hal *hal = &rtwdev->hal;
+ char *p = buf, *end = buf + bufsz;
enum rtw89_hw_rate first_rate;
u8 rssi;
int i;
rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
- seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d",
- stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv);
+ p += scnprintf(p, end - p, "TP TX: %u [%u] Mbps (lv: %d",
+ stats->tx_throughput, stats->tx_throughput_raw,
+ stats->tx_tfc_lv);
if (hal->thermal_prot_lv)
- seq_printf(m, ", duty: %d%%",
- 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP);
- seq_printf(m, "), RX: %u [%u] Mbps (lv: %d)\n",
- stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv);
- seq_printf(m, "Beacon: %u (%d dBm), TF: %u\n", pkt_stat->beacon_nr,
- RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic);
- seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len,
- stats->rx_avg_len);
-
- seq_puts(m, "RX count:\n");
+ p += scnprintf(p, end - p, ", duty: %d%%",
+ 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP);
+ p += scnprintf(p, end - p, "), RX: %u [%u] Mbps (lv: %d)\n",
+ stats->rx_throughput, stats->rx_throughput_raw,
+ stats->rx_tfc_lv);
+ p += scnprintf(p, end - p, "Beacon: %u (%d dBm), TF: %u\n",
+ pkt_stat->beacon_nr,
+ RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic);
+ p += scnprintf(p, end - p, "Avg packet length: TX=%u, RX=%u\n",
+ stats->tx_avg_len,
+ stats->rx_avg_len);
+
+ p += scnprintf(p, end - p, "RX count:\n");
for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) {
info = &rtw89_rx_rate_cnt_infos[i];
@@ -3706,189 +3912,238 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
if (first_rate >= RTW89_HW_RATE_NR)
continue;
- seq_printf(m, "%10s [", info->rate_mode);
- rtw89_debug_append_rx_rate(m, pkt_stat,
- first_rate, info->len);
+ p += scnprintf(p, end - p, "%10s [", info->rate_mode);
+ p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat,
+ first_rate, info->len);
if (info->ext) {
- seq_puts(m, "][");
- rtw89_debug_append_rx_rate(m, pkt_stat,
- first_rate + info->len, info->ext);
+ p += scnprintf(p, end - p, "][");
+ p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat,
+ first_rate + info->len, info->ext);
}
- seq_puts(m, "]\n");
+ p += scnprintf(p, end - p, "]\n");
}
- ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m);
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, &iter_data);
+ p += iter_data.written_sz;
- return 0;
+ return p - buf;
}
-static void rtw89_dump_addr_cam(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_addr_cam_entry *addr_cam)
+static int rtw89_dump_addr_cam(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_addr_cam_entry *addr_cam)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
const struct rtw89_sec_cam_entry *sec_entry;
+ char *p = buf, *end = buf + bufsz;
u8 sec_cam_idx;
int i;
- seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
- seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
- seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
- addr_cam->sec_cam_map);
+ p += scnprintf(p, end - p, "\taddr_cam_idx=%u\n",
+ addr_cam->addr_cam_idx);
+ p += scnprintf(p, end - p, "\t-> bssid_cam_idx=%u\n",
+ addr_cam->bssid_cam_idx);
+ p += scnprintf(p, end - p, "\tsec_cam_bitmap=%*ph\n",
+ (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
sec_cam_idx = addr_cam->sec_ent[i];
sec_entry = cam_info->sec_entries[sec_cam_idx];
if (!sec_entry)
continue;
- seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ p += scnprintf(p, end - p, "\tsec[%d]: sec_cam_idx %u", i,
+ sec_entry->sec_cam_idx);
if (sec_entry->ext_key)
- seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, ", %u",
+ sec_entry->sec_cam_idx + 1);
+ p += scnprintf(p, end - p, "\n");
}
+
+ return p - buf;
}
-__printf(3, 4)
-static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_list,
- const char *fmt, ...)
+__printf(4, 5)
+static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt_list,
+ const char *fmt, ...)
{
+ char *p = buf, *end = buf + bufsz;
struct rtw89_pktofld_info *info;
struct va_format vaf;
va_list args;
if (list_empty(pkt_list))
- return;
+ return 0;
va_start(args, fmt);
vaf.va = &args;
vaf.fmt = fmt;
- seq_printf(m, "%pV", &vaf);
+ p += scnprintf(p, end - p, "%pV", &vaf);
va_end(args);
list_for_each_entry(info, pkt_list, list)
- seq_printf(m, "%d ", info->id);
+ p += scnprintf(p, end - p, "%d ", info->id);
+
+ p += scnprintf(p, end - p, "\n");
- seq_puts(m, "\n");
+ return p - buf;
}
-static void rtw89_vif_link_ids_get(struct seq_file *m, u8 *mac,
- struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz, u8 *mac,
+ struct rtw89_vif_link *rtwvif_link)
{
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam;
-
- seq_printf(m, " [%u] %pM\n", rtwvif_link->mac_id, rtwvif_link->mac_addr);
- seq_printf(m, "\tlink_id=%u\n", rtwvif_link->link_id);
- seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
- rtw89_dump_addr_cam(m, rtwdev, &rtwvif_link->addr_cam);
- rtw89_dump_pkt_offload(m, &rtwvif_link->general_pkt_list,
- "\tpkt_ofld[GENERAL]: ");
+ char *p = buf, *end = buf + bufsz;
+
+ p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id,
+ rtwvif_link->mac_addr);
+ p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id);
+ p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n",
+ bssid_cam->bssid_cam_idx);
+ p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam);
+ p += rtw89_dump_pkt_offload(p, end - p, &rtwvif_link->general_pkt_list,
+ "\tpkt_ofld[GENERAL]: ");
+
+ return p - buf;
}
static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct rtw89_vif_link *rtwvif_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
- seq_printf(m, "VIF %pM\n", rtwvif->mac_addr);
+ p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr);
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- rtw89_vif_link_ids_get(m, mac, rtwdev, rtwvif_link);
+ p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
struct rtw89_ba_cam_entry *entry;
+ char *p = buf, *end = buf + bufsz;
bool first = true;
list_for_each_entry(entry, &rtwsta_link->ba_cam_list, list) {
if (first) {
- seq_puts(m, "\tba_cam ");
+ p += scnprintf(p, end - p, "\tba_cam ");
first = false;
} else {
- seq_puts(m, ", ");
+ p += scnprintf(p, end - p, ", ");
}
- seq_printf(m, "tid[%u]=%d", entry->tid,
- (int)(entry - rtwdev->cam_info.ba_cam_entry));
+ p += scnprintf(p, end - p, "tid[%u]=%d", entry->tid,
+ (int)(entry - rtwdev->cam_info.ba_cam_entry));
}
- seq_puts(m, "\n");
+ p += scnprintf(p, end - p, "\n");
+
+ return p - buf;
}
-static void rtw89_sta_link_ids_get(struct seq_file *m,
- struct rtw89_dev *rtwdev,
- struct rtw89_sta_link *rtwsta_link)
+static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev,
+ char *buf, size_t bufsz,
+ struct rtw89_sta_link *rtwsta_link)
{
struct ieee80211_link_sta *link_sta;
+ char *p = buf, *end = buf + bufsz;
rcu_read_lock();
link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
- seq_printf(m, " [%u] %pM\n", rtwsta_link->mac_id, link_sta->addr);
+ p += scnprintf(p, end - p, " [%u] %pM\n", rtwsta_link->mac_id,
+ link_sta->addr);
rcu_read_unlock();
- seq_printf(m, "\tlink_id=%u\n", rtwsta_link->link_id);
- rtw89_dump_addr_cam(m, rtwdev, &rtwsta_link->addr_cam);
- rtw89_dump_ba_cam(m, rtwdev, rtwsta_link);
+ p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id);
+ p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam);
+ p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link);
+
+ return p - buf;
}
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
- struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_debugfs_iter_data *iter_data =
+ (struct rtw89_debugfs_iter_data *)data;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct rtw89_sta_link *rtwsta_link;
+ size_t bufsz = iter_data->bufsz;
+ char *buf = iter_data->buf;
+ char *p = buf, *end = buf + bufsz;
unsigned int link_id;
- seq_printf(m, "STA %pM %s\n", sta->addr, sta->tdls ? "(TDLS)" : "");
+ p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr,
+ sta->tdls ? "(TDLS)" : "");
rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
- rtw89_sta_link_ids_get(m, rtwdev, rtwsta_link);
+ p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link);
+
+ rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf);
}
-static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_debugfs_iter_data iter_data;
+ char *p = buf, *end = buf + bufsz;
u8 idx;
- mutex_lock(&rtwdev->mutex);
-
- seq_puts(m, "map:\n");
- seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
- rtwdev->mac_id_map);
- seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
- cam_info->addr_cam_map);
- seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
- cam_info->bssid_cam_map);
- seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
- cam_info->sec_cam_map);
- seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
- cam_info->ba_cam_map);
- seq_printf(m, "\tpkt_ofld: %*ph\n", (int)sizeof(rtwdev->pkt_offload),
- rtwdev->pkt_offload);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ p += scnprintf(p, end - p, "map:\n");
+ p += scnprintf(p, end - p, "\tmac_id: %*ph\n",
+ (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ p += scnprintf(p, end - p, "\taddr_cam: %*ph\n",
+ (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ p += scnprintf(p, end - p, "\tbssid_cam: %*ph\n",
+ (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ p += scnprintf(p, end - p, "\tsec_cam: %*ph\n",
+ (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+ p += scnprintf(p, end - p, "\tba_cam: %*ph\n",
+ (int)sizeof(cam_info->ba_cam_map),
+ cam_info->ba_cam_map);
+ p += scnprintf(p, end - p, "\tpkt_ofld: %*ph\n",
+ (int)sizeof(rtwdev->pkt_offload),
+ rtwdev->pkt_offload);
for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
if (!(rtwdev->chip->support_bands & BIT(idx)))
continue;
- rtw89_dump_pkt_offload(m, &rtwdev->scan_info.pkt_list[idx],
- "\t\t[SCAN %u]: ", idx);
+ p += rtw89_dump_pkt_offload(p, end - p, &rtwdev->scan_info.pkt_list[idx],
+ "\t\t[SCAN %u]: ", idx);
}
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
- IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, &iter_data);
+ p += iter_data.written_sz;
- ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+ rtw89_debugfs_iter_data_setup(&iter_data, p, end - p);
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, &iter_data);
+ p += iter_data.written_sz;
- mutex_unlock(&rtwdev->mutex);
-
- return 0;
+ return p - buf;
}
#define DM_INFO(type) {RTW89_DM_ ## type, #type}
@@ -3899,43 +4154,45 @@ static const struct rtw89_disabled_dm_info {
} rtw89_disabled_dm_infos[] = {
DM_INFO(DYNAMIC_EDCCA),
DM_INFO(THERMAL_PROTECT),
+ DM_INFO(TAS),
};
-static int
-rtw89_debug_priv_disable_dm_get(struct seq_file *m, void *v)
+static ssize_t
+rtw89_debug_priv_disable_dm_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
{
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
const struct rtw89_disabled_dm_info *info;
struct rtw89_hal *hal = &rtwdev->hal;
+ char *p = buf, *end = buf + bufsz;
u32 disabled;
int i;
- seq_printf(m, "Disabled DM: 0x%x\n", hal->disabled_dm_bitmap);
+ p += scnprintf(p, end - p, "Disabled DM: 0x%x\n",
+ hal->disabled_dm_bitmap);
for (i = 0; i < ARRAY_SIZE(rtw89_disabled_dm_infos); i++) {
info = &rtw89_disabled_dm_infos[i];
disabled = BIT(info->type) & hal->disabled_dm_bitmap;
- seq_printf(m, "[%d] %s: %c\n", info->type, info->name,
- disabled ? 'X' : 'O');
+ p += scnprintf(p, end - p, "[%d] %s: %c\n", info->type,
+ info->name,
+ disabled ? 'X' : 'O');
}
- return 0;
+ return p - buf;
}
static ssize_t
-rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
- size_t count, loff_t *loff)
+rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ const char *buf, size_t count)
{
- struct seq_file *m = (struct seq_file *)filp->private_data;
- struct rtw89_debugfs_priv *debugfs_priv = m->private;
- struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_hal *hal = &rtwdev->hal;
u32 conf;
int ret;
- ret = kstrtou32_from_user(user_buf, count, 0, &conf);
+ ret = kstrtou32(buf, 0, &conf);
if (ret)
return -EINVAL;
@@ -3944,46 +4201,62 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf,
return count;
}
-#define rtw89_debug_priv_get(name) \
+#define rtw89_debug_priv_get(name, opts...) \
{ \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_set(name) \
+#define rtw89_debug_priv_set(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _set, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_select_and_get(name) \
+#define rtw89_debug_priv_select_and_get(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _select, \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
-#define rtw89_debug_priv_set_and_get(name) \
+#define rtw89_debug_priv_set_and_get(name, opts...) \
{ \
.cb_write = rtw89_debug_priv_ ##name## _set, \
.cb_read = rtw89_debug_priv_ ##name## _get, \
+ .opt = { opts }, \
}
+#define RSIZE_8K .rsize = 0x2000
+#define RSIZE_12K .rsize = 0x3000
+#define RSIZE_16K .rsize = 0x4000
+#define RSIZE_20K .rsize = 0x5000
+#define RSIZE_32K .rsize = 0x8000
+#define RSIZE_64K .rsize = 0x10000
+#define RSIZE_128K .rsize = 0x20000
+#define RSIZE_1M .rsize = 0x100000
+#define RLOCK .rlock = 1
+#define WLOCK .wlock = 1
+#define RWLOCK RLOCK, WLOCK
+
static const struct rtw89_debugfs rtw89_debugfs_templ = {
.read_reg = rtw89_debug_priv_select_and_get(read_reg),
.write_reg = rtw89_debug_priv_set(write_reg),
.read_rf = rtw89_debug_priv_select_and_get(read_rf),
.write_rf = rtw89_debug_priv_set(write_rf),
- .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump),
- .txpwr_table = rtw89_debug_priv_get(txpwr_table),
- .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump),
- .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump),
- .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump),
+ .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump, RSIZE_8K),
+ .txpwr_table = rtw89_debug_priv_get(txpwr_table, RSIZE_20K, RLOCK),
+ .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump, RSIZE_128K),
+ .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump, RSIZE_16K, RLOCK),
+ .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump, RSIZE_1M),
.send_h2c = rtw89_debug_priv_set(send_h2c),
- .early_h2c = rtw89_debug_priv_set_and_get(early_h2c),
- .fw_crash = rtw89_debug_priv_set_and_get(fw_crash),
- .btc_info = rtw89_debug_priv_get(btc_info),
+ .early_h2c = rtw89_debug_priv_set_and_get(early_h2c, RWLOCK),
+ .fw_crash = rtw89_debug_priv_set_and_get(fw_crash, WLOCK),
+ .btc_info = rtw89_debug_priv_get(btc_info, RSIZE_12K),
.btc_manual = rtw89_debug_priv_set(btc_manual),
- .fw_log_manual = rtw89_debug_priv_set(fw_log_manual),
+ .fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK),
.phy_info = rtw89_debug_priv_get(phy_info),
- .stations = rtw89_debug_priv_get(stations),
+ .stations = rtw89_debug_priv_get(stations, RLOCK),
.disable_dm = rtw89_debug_priv_set_and_get(disable_dm),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 5d4ad23cc3bd..8643b17866f8 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -38,6 +38,16 @@ struct rtw89_arp_rsp {
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
+ .ver = 0x00,
+ .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ },
+};
+EXPORT_SYMBOL(rtw89_fw_blacklist_default);
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -314,7 +324,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
if (!sec->secure_boot)
goto out;
- sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ sb_sel_ver = get_unaligned_le32(&section_content->sb_sel_ver.v);
if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
goto ignore;
@@ -344,6 +354,46 @@ ignore:
return 0;
}
+static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const void *content)
+{
+ const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u8 byte_idx;
+ u8 bit_mask;
+
+ if (!sec->secure_boot)
+ return 0;
+
+ if (!info->secure_section_exist || section_info->ignore)
+ return 0;
+
+ if (!chip_blacklist) {
+ rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
+ return -ENOENT;
+ }
+
+ byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
+ bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
+
+ if (section_content->blacklist.ver > chip_blacklist->ver) {
+ rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
+ section_content->blacklist.ver, chip_blacklist->ver);
+ return -EINVAL;
+ }
+
+ if (chip_blacklist->list[byte_idx] & bit_mask) {
+ rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
+ section_content->blacklist.ver);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static int __parse_security_section(struct rtw89_dev *rtwdev,
struct rtw89_fw_bin_info *info,
struct rtw89_fw_hdr_section_info *section_info,
@@ -364,8 +414,11 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
*mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
if (sec->secure_boot) {
- if (sec->mss_idx >= section_info->mssc)
+ if (sec->mss_idx >= section_info->mssc) {
+ rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
+ sec->mss_idx, section_info->mssc);
return -EFAULT;
+ }
section_info->key_addr = content + section_info->len +
sec->mss_idx * FWDL_SECURITY_SIGLEN;
section_info->key_len = FWDL_SECURITY_SIGLEN;
@@ -374,6 +427,9 @@ static int __parse_security_section(struct rtw89_dev *rtwdev,
info->secure_section_exist = true;
}
+ ret = __check_secure_blacklist(rtwdev, info, section_info, content);
+ WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
+
return 0;
}
@@ -490,18 +546,61 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
}
static
+const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
+ const struct firmware *firmware)
+{
+ const struct rtw89_mfw_hdr *mfw_hdr;
+
+ if (sizeof(*mfw_hdr) > firmware->size)
+ return NULL;
+
+ mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data;
+
+ if (mfw_hdr->sig != RTW89_MFW_SIG)
+ return NULL;
+
+ return mfw_hdr;
+}
+
+static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
+ const struct firmware *firmware,
+ const struct rtw89_mfw_hdr *mfw_hdr)
+{
+ const void *mfw = firmware->data;
+ u32 mfw_len = firmware->size;
+ u8 fw_nr = mfw_hdr->fw_nr;
+ const void *ptr;
+
+ if (fw_nr == 0) {
+ rtw89_err(rtwdev, "mfw header has no fw entry\n");
+ return -ENOENT;
+ }
+
+ ptr = &mfw_hdr->info[fw_nr];
+
+ if (ptr > mfw + mfw_len) {
+ rtw89_err(rtwdev, "mfw header out of address\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static
int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
struct rtw89_fw_suit *fw_suit, bool nowarn)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const struct firmware *firmware = fw_info->req.firmware;
+ const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
+ const struct rtw89_mfw_hdr *mfw_hdr;
const u8 *mfw = firmware->data;
u32 mfw_len = firmware->size;
- const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
- const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
+ int ret;
int i;
- if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
+ if (!mfw_hdr) {
rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
/* legacy firmware support normal type only */
if (type != RTW89_FW_NORMAL)
@@ -511,6 +610,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
for (i = 0; i < mfw_hdr->fw_nr; i++) {
tmp = &mfw_hdr->info[i];
if (tmp->type != type)
@@ -540,6 +643,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
found:
fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
fw_suit->size = le32_to_cpu(mfw_info->size);
+
+ if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
+ rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
+ return -EFAULT;
+ }
+
return 0;
}
@@ -547,16 +656,21 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const struct firmware *firmware = fw_info->req.firmware;
- const struct rtw89_mfw_hdr *mfw_hdr =
- (const struct rtw89_mfw_hdr *)firmware->data;
const struct rtw89_mfw_info *mfw_info;
+ const struct rtw89_mfw_hdr *mfw_hdr;
u32 size;
+ int ret;
- if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
+ if (!mfw_hdr) {
rtw89_warn(rtwdev, "not mfw format\n");
return 0;
}
+ ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
+ if (ret)
+ return ret;
+
mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
@@ -735,6 +849,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
+ __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -988,7 +1103,7 @@ int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
if ((bitmap & needed_bitmap) != needed_bitmap) {
- rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n",
+ rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
needed_bitmap, bitmap);
return -ENOENT;
}
@@ -1056,6 +1171,101 @@ allocated:
return 0;
}
+static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
+ u8 cursor_size)
+{
+ /* fill default values if needed for backward compatibility */
+ struct rtw89_fw_regd_entry entry = {
+ .rule_2ghz = RTW89_NA,
+ .rule_5ghz = RTW89_NA,
+ .rule_6ghz = RTW89_NA,
+ .fmap = cpu_to_le32(0x0),
+ };
+ u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
+ unsigned int i;
+ u32 fmap;
+
+ memcpy(&entry, cursor, valid_size);
+ memset(regd, 0, sizeof(*regd));
+
+ regd->alpha2[0] = entry.alpha2_0;
+ regd->alpha2[1] = entry.alpha2_1;
+ regd->alpha2[2] = '\0';
+
+ /* also need to consider forward compatibility */
+ regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
+ entry.rule_2ghz : RTW89_NA;
+ regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
+ entry.rule_5ghz : RTW89_NA;
+ regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
+ entry.rule_6ghz : RTW89_NA;
+
+ BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
+ BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
+
+ fmap = le32_to_cpu(entry.fmap);
+ for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
+ if (fmap & BIT(i))
+ set_bit(i, regd->func_bitmap);
+ }
+
+ return true;
+}
+
+#define rtw89_for_each_in_regd_element(regd, element) \
+ for (const void *cursor = (element)->content, \
+ *end = (element)->content + \
+ le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
+ cursor < end; cursor += (element)->ent_sz) \
+ if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
+
+static
+int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ u32 num_ents = le32_to_cpu(regd_elm->num_ents);
+ struct rtw89_regd_data *p;
+ struct rtw89_regd regd;
+ u32 i = 0;
+
+ if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
+ rtw89_warn(rtwdev,
+ "regd element ents (%d) are over max num (%d)\n",
+ num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
+ rtw89_warn(rtwdev,
+ "regd element ignore and take another/common\n");
+ return 1;
+ }
+
+ if (elm_info->regd) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "regd element take the latter\n");
+ devm_kfree(rtwdev->dev, elm_info->regd);
+ elm_info->regd = NULL;
+ }
+
+ p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ p->nr = num_ents;
+ rtw89_for_each_in_regd_element(&regd, regd_elm)
+ p->map[i++] = regd;
+
+ if (i != num_ents) {
+ rtw89_err(rtwdev, "regd element has %d invalid ents\n",
+ num_ents - i);
+ devm_kfree(rtwdev->dev, p);
+ return -EINVAL;
+ }
+
+ elm_info->regd = p;
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -1114,6 +1324,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
},
+ [RTW89_FW_ELEMENT_ID_REGD] = {
+ rtw89_recognize_regd_from_elm, {}, "REGD",
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -1322,7 +1535,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -1409,7 +1621,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
- ret = -1;
goto fail;
}
@@ -2697,7 +2908,9 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
{
const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_h2c_lps_ml_cmn_info *h2c;
struct rtw89_vif_link *rtwvif_link;
const struct rtw89_chan *chan;
@@ -2705,6 +2918,7 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
u32 len = sizeof(*h2c);
unsigned int link_id;
struct sk_buff *skb;
+ u8 beacon_bw_ofst;
u8 gain_band;
u32 done;
u8 path;
@@ -2722,9 +2936,10 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
- h2c->fmt_id = 0x1;
+ h2c->fmt_id = 0x3;
h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+ h2c->rfe_type = efuse->rfe_type;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
@@ -2745,9 +2960,21 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
h2c->tia_gain[rtwvif_link->phy_idx][i] =
cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
}
+
+ if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
+ beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
+ h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
+ }
+
memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
gain->lna_gain[gain_band][bw_idx][path],
LNA_GAIN_NUM);
+ memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
+ gain->tia_lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM + 1);
+ memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
+ gain->lna_op1db[gain_band][bw_idx][path],
+ LNA_GAIN_NUM);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -3281,9 +3508,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
- h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
+ le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
CCTLINFO_G7_W6_ULDL);
- h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
if (rtwsta_link) {
h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
@@ -3419,6 +3647,61 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
+
+int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ if (rtwsta_link->cctl_tx_time) {
+ h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
+ h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
+
+ h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
+ CCTLINFO_G7_W2_AMPDU_MAX_TIME);
+ h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
+ }
+ if (rtwsta_link->cctl_tx_retry_limit) {
+ h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
+ le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
+ CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
+ h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
+ CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link)
@@ -3622,14 +3905,15 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
-#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
enum rtw89_upd_mode upd_mode)
{
- struct sk_buff *skb;
u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
+ struct rtw89_h2c_role_maintain *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
u8 self_role;
int ret;
@@ -3642,21 +3926,27 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
self_role = rtwvif_link->self_role;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
- SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
- SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_role_maintain *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
+ le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
+ le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
+ le32_encode_bits(rtwvif_link->wifi_role,
+ RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
+ le32_encode_bits(rtwvif_link->mac_idx,
+ RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
+ le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_ROLE_MAINTAIN_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -5311,6 +5601,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
u8 opch_size = sizeof(*opch) * option->num_opch;
u8 probe_id[NUM_NL80211_BANDS];
+ u8 scan_offload_ver = U8_MAX;
u8 cfg_len = sizeof(*h2c);
unsigned int cond;
u8 ver = U8_MAX;
@@ -5321,6 +5612,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+ if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
+ cfg_len = offsetofend(typeof(*h2c), w8);
+ scan_offload_ver = 0;
+ }
+
len = cfg_len + macc_role_size + opch_size;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -5392,10 +5688,8 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
}
- if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
- cfg_len = offsetofend(typeof(*h2c), w8);
+ if (scan_offload_ver == 0)
goto flex_member;
- }
h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
@@ -5994,24 +6288,29 @@ void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
{
struct rtw89_early_h2c *early_h2c;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
}
}
-void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
+void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
{
struct rtw89_early_h2c *early_h2c, *tmp;
- mutex_lock(&rtwdev->mutex);
list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
list_del(&early_h2c->list);
kfree(early_h2c->h2c);
kfree(early_h2c);
}
- mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
+{
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ __rtw89_fw_free_all_early_h2c(rtwdev);
}
static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
@@ -6055,7 +6354,7 @@ void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
enqueue:
skb_queue_tail(&rtwdev->c2h_queue, c2h);
- ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
}
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
@@ -6093,17 +6392,17 @@ static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
}
-void rtw89_fw_c2h_work(struct work_struct *work)
+void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
c2h_work);
struct sk_buff *skb, *tmp;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
skb_unlink(skb, &rtwdev->c2h_queue);
- mutex_lock(&rtwdev->mutex);
rtw89_fw_c2h_cmd_handle(rtwdev, skb);
- mutex_unlock(&rtwdev->mutex);
dev_kfree_skb_any(skb);
}
}
@@ -6184,7 +6483,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
u32 ret;
if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (!h2c_info && !c2h_info)
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 2026bc2fd2ac..55255b48bdb7 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -664,6 +664,11 @@ struct rtw89_fw_mss_pool_hdr {
union rtw89_fw_section_mssc_content {
struct {
+ u8 pad[0x20];
+ u8 bit_in_chip_list;
+ u8 ver;
+ } __packed blacklist;
+ struct {
u8 pad[58];
__le32 v;
} __packed sb_sel_ver;
@@ -673,6 +678,13 @@ union rtw89_fw_section_mssc_content {
} __packed key_sign_len;
} __packed;
+struct rtw89_fw_blacklist {
+ u8 ver;
+ u8 list[32];
+};
+
+extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default;
+
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1579,25 +1591,17 @@ struct rtw89_h2c_bcn_upd_be {
#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
-static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_FWROLE_MAINTAIN_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8));
-}
-
-static inline void SET_FWROLE_MAINTAIN_UPD_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10));
-}
+struct rtw89_h2c_role_maintain {
+ __le32 w0;
+};
-static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
-}
+#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE GENMASK(9, 8)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE GENMASK(12, 10)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE GENMASK(16, 13)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_BAND GENMASK(18, 17)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_PORT GENMASK(21, 19)
+#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID_EXT GENMASK(31, 24)
enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
RTW89_FW_N_AC_STA = 0,
@@ -1801,17 +1805,21 @@ struct rtw89_h2c_lps_ch_info {
struct rtw89_h2c_lps_ml_cmn_info {
u8 fmt_id;
- u8 rsvd0[3];
+ u8 rfe_type;
+ u8 rsvd0[2];
__le32 mlo_dbcc_mode;
- u8 central_ch[RTW89_PHY_MAX];
- u8 pri_ch[RTW89_PHY_MAX];
- u8 bw[RTW89_PHY_MAX];
- u8 band[RTW89_PHY_MAX];
- u8 bcn_rate_type[RTW89_PHY_MAX];
+ u8 central_ch[RTW89_PHY_NUM];
+ u8 pri_ch[RTW89_PHY_NUM];
+ u8 bw[RTW89_PHY_NUM];
+ u8 band[RTW89_PHY_NUM];
+ u8 bcn_rate_type[RTW89_PHY_NUM];
u8 rsvd1[2];
- __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM];
- u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM];
+ __le16 tia_gain[RTW89_PHY_NUM][TIA_GAIN_NUM];
+ u8 lna_gain[RTW89_PHY_NUM][LNA_GAIN_NUM];
u8 rsvd2[2];
+ u8 tia_lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM + 1];
+ u8 lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM];
+ u8 dup_bcn_ofst[RTW89_PHY_NUM];
} __packed;
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
@@ -3882,6 +3890,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17,
RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18,
RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19,
+ RTW89_FW_ELEMENT_ID_REGD = 20,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -3925,6 +3934,15 @@ struct __rtw89_fw_txpwr_element {
u8 content[];
} __packed;
+struct __rtw89_fw_regd_element {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+ u8 ent_sz;
+ __le32 num_ents;
+ u8 content[];
+} __packed;
+
enum rtw89_fw_txpwr_trk_type {
__RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START = 0,
RTW89_FW_TXPWR_TRK_TYPE_6GB_N = 0,
@@ -4016,6 +4034,7 @@ struct rtw89_fw_element_hdr {
__le16 offset[];
} __packed rfk_log_fmt;
struct __rtw89_fw_txpwr_element txpwr;
+ struct __rtw89_fw_regd_element regd;
} __packed u;
} __packed;
@@ -4524,6 +4543,12 @@ struct rtw89_c2h_rfk_report {
u8 version;
} __packed;
+struct rtw89_c2h_rf_tas_info {
+ struct rtw89_c2h_hdr hdr;
+ __le32 cur_idx;
+ __le16 txpwr_history[20];
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -4573,6 +4598,8 @@ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
+int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
@@ -4588,7 +4615,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
-void rtw89_fw_c2h_work(struct work_struct *work);
+void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -4654,6 +4681,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
bool rack, bool dack);
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
+void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
u8 macid);
@@ -4854,6 +4882,15 @@ static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
}
static inline
+int rtw89_chip_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta_link *rtwsta_link)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+}
+
+static inline
int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
@@ -4874,6 +4911,18 @@ int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
+/* Must consider compatibility; don't insert new in the mid.
+ * Fill each field's default value in rtw89_regd_entcpy().
+ */
+struct rtw89_fw_regd_entry {
+ u8 alpha2_0;
+ u8 alpha2_1;
+ u8 rule_2ghz;
+ u8 rule_5ghz;
+ u8 rule_6ghz;
+ __le32 fmap;
+} __packed;
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index a37c6d525d6f..b4841f948ec1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1495,6 +1495,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
#undef PWR_ACT
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret) {
+ rtw89_mac_power_switch(rtwdev, false);
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
{
rtw89_mac_power_switch(rtwdev, false);
@@ -3996,14 +4011,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb)
{
int ret;
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret) {
- rtw89_mac_power_switch(rtwdev, false);
- ret = rtw89_mac_power_switch(rtwdev, true);
- if (ret)
- return ret;
- }
-
rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
if (include_bb) {
@@ -4036,6 +4043,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
bool include_bb = !!chip->bbmcu_nr;
int ret;
+ ret = rtw89_mac_pwr_on(rtwdev);
+ if (ret)
+ return ret;
+
ret = rtw89_mac_partial_init(rtwdev, include_bb);
if (ret)
goto fail;
@@ -4067,7 +4078,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
return ret;
fail:
- rtw89_mac_power_switch(rtwdev, false);
+ rtw89_mac_pwr_off(rtwdev);
return ret;
}
@@ -4826,6 +4837,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct ieee80211_bss_conf *bss_conf;
+ bool set;
+ u32 reg;
+
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_BE)
+ return;
+
+ rcu_read_lock();
+
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ set = bss_conf->he_support && !bss_conf->eht_support;
+
+ rcu_read_unlock();
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL,
+ rtwvif_link->mac_idx);
+
+ if (set)
+ rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+ else
+ rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB);
+}
+
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
{
rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link);
@@ -6031,7 +6068,7 @@ int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl)
if (wl)
return 0;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
g[i].gnt_bt_sw_en = 1;
g[i].gnt_bt = 1;
g[i].gnt_wl_sw_en = 1;
@@ -6422,6 +6459,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
u32 tx_time)
{
#define MAC_AX_DFLT_TX_TIME 5280
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time;
u32 reg;
@@ -6429,7 +6467,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
if (rtwsta_link->cctl_tx_time) {
rtwsta_link->ampdu_max_time = (max_tx_time - 512) >> 9;
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
} else {
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret) {
@@ -6437,8 +6475,8 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK,
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, mac->agg_limit.mask,
max_tx_time >> 5);
}
@@ -6464,6 +6502,7 @@ int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst
int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link,
u32 *tx_time)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 reg;
int ret = 0;
@@ -6477,8 +6516,8 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx);
- *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5;
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx);
+ *tx_time = rtw89_read32_mask(rtwdev, reg, mac->agg_limit.mask) << 5;
}
return ret;
@@ -6494,9 +6533,9 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
if (!resume) {
rtwsta_link->cctl_tx_retry_limit = true;
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
} else {
- ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
+ ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link);
rtwsta_link->cctl_tx_retry_limit = false;
}
@@ -6506,6 +6545,7 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
struct rtw89_sta_link *rtwsta_link, u8 *tx_retry)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx;
u32 reg;
int ret = 0;
@@ -6519,8 +6559,8 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
return ret;
}
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx);
- *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->txcnt_limit.addr, mac_idx);
+ *tx_retry = rtw89_read32_mask(rtwdev, reg, mac->txcnt_limit.mask);
}
return ret;
@@ -6798,6 +6838,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_RXTRIG_RU26_DIS,
},
.wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,},
+ .agg_limit = {.addr = R_AX_AMPDU_AGG_LIMIT, .mask = B_AX_AMPDU_MAX_TIME_MASK,},
+ .txcnt_limit = {.addr = R_AX_TXCNT, .mask = B_AX_L_TXCNT_LMT_MASK,},
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 8edea96d037f..fd7935d24501 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -964,6 +964,8 @@ struct rtw89_mac_gen_def {
struct rtw89_reg_def bfee_ctrl;
struct rtw89_reg_def narrow_bw_ru_dis;
struct rtw89_reg_def wow_ctrl;
+ struct rtw89_reg_def agg_limit;
+ struct rtw89_reg_def txcnt_limit;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -1145,6 +1147,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l
rtw89_write32_set(rtwdev, reg, bit);
}
+int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev);
void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb);
int rtw89_mac_init(struct rtw89_dev *rtwdev);
@@ -1185,6 +1188,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en);
int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index b3669e0074df..4fded07d0bee 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -57,32 +57,30 @@ static void rtw89_ops_wake_tx_queue(struct ieee80211_hw *hw,
static int rtw89_ops_start(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- ret = rtw89_core_start(rtwdev);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ return rtw89_core_start(rtwdev);
}
static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_core_stop(rtwdev);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
+ lockdep_assert_wiphy(hw->wiphy);
+
/* let previous ips work finish to ensure we don't leave ips twice */
- cancel_work_sync(&rtwdev->ips_work);
+ wiphy_work_cancel(hw->wiphy, &rtwdev->ips_work);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
@@ -100,8 +98,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
!rtwdev->scanning)
rtw89_enter_ips(rtwdev);
- mutex_unlock(&rtwdev->mutex);
-
return 0;
}
@@ -115,7 +111,7 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
rtw89_vif_type_mapping(rtwvif_link, false);
- INIT_WORK(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
+ wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work);
INIT_LIST_HEAD(&rtwvif_link->general_pkt_list);
rtwvif_link->hit_rule = 0;
@@ -142,9 +138,9 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev,
static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
- mutex_unlock(&rtwdev->mutex);
- cancel_work_sync(&rtwvif_link->update_beacon_work);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
@@ -162,11 +158,11 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
u8 mac_id, port;
int ret = 0;
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n",
vif->addr, vif->type, vif->p2p);
- mutex_lock(&rtwdev->mutex);
-
rtw89_leave_ips_by_hwflags(rtwdev);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
@@ -174,10 +170,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
mac_id = rtw89_acquire_mac_id(rtwdev);
- if (mac_id == RTW89_MAX_MAC_ID_NUM) {
- ret = -ENOSPC;
- goto err;
- }
+ if (mac_id == RTW89_MAX_MAC_ID_NUM)
+ return -ENOSPC;
port = rtw89_core_acquire_bit_map(rtwdev->hw_port, RTW89_PORT_NUM);
if (port == RTW89_PORT_NUM) {
@@ -198,7 +192,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->offchan = false;
rtwvif->roc.state = RTW89_ROC_IDLE;
- INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work);
+ wiphy_delayed_work_init(&rtwvif->roc.roc_work, rtw89_roc_work);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
@@ -213,8 +207,6 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
goto unset_link;
rtw89_recalc_lps(rtwdev);
-
- mutex_unlock(&rtwdev->mutex);
return 0;
unset_link:
@@ -224,8 +216,6 @@ release_port:
rtw89_core_release_bit_map(rtwdev->hw_port, port);
release_macid:
rtw89_release_mac_id(rtwdev, mac_id);
-err:
- mutex_unlock(&rtwdev->mutex);
return ret;
}
@@ -239,12 +229,12 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
u8 port = rtw89_vif_get_main_port(rtwvif);
struct rtw89_vif_link *rtwvif_link;
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n",
vif->addr, vif->type, vif->p2p);
- cancel_delayed_work_sync(&rtwvif->roc.roc_work);
-
- mutex_lock(&rtwdev->mutex);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID];
if (unlikely(!rtwvif_link)) {
@@ -265,8 +255,6 @@ bottom:
rtw89_recalc_lps(rtwdev);
rtw89_enter_ips_by_hwflags(rtwdev);
-
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_change_interface(struct ieee80211_hw *hw,
@@ -304,7 +292,8 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 rx_fltr;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
@@ -367,14 +356,11 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
B_AX_RX_FLTR_CFG_MASK,
rx_fltr);
if (!rtwdev->dbcc_en)
- goto out;
+ return;
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1),
B_AX_RX_FLTR_CFG_MASK,
rx_fltr);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = {
@@ -670,6 +656,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev,
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link);
rtw89_mac_port_update(rtwdev, rtwvif_link);
rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link);
+ rtw89_mac_set_he_tb(rtwdev, rtwvif_link);
}
static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev,
@@ -689,7 +676,8 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
@@ -712,8 +700,6 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER)
rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
-
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
@@ -725,7 +711,8 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtwvif_link = rtwvif->links[conf->link_id];
@@ -733,7 +720,7 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, conf->link_id);
- goto out;
+ return;
}
if (changed & BSS_CHANGED_BSSID) {
@@ -763,9 +750,6 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TPE)
rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
@@ -778,22 +762,19 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
const struct rtw89_chan *chan;
int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
- if (chan->band_type == RTW89_BAND_6G) {
- mutex_unlock(&rtwdev->mutex);
+ if (chan->band_type == RTW89_BAND_6G)
return -EOPNOTSUPP;
- }
if (rtwdev->scanning)
rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
@@ -810,15 +791,12 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) {
ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true);
if (ret)
- goto out;
+ return ret;
}
rtw89_queue_chanctx_work(rtwdev);
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return 0;
}
static
@@ -829,14 +807,14 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- goto out;
+ return;
}
if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw))
@@ -845,22 +823,18 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtw89_mac_stop_ap(rtwdev, rtwvif_link);
rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
bool set)
{
- struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
struct rtw89_vif_link *rtwvif_link;
unsigned int link_id;
rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
- ieee80211_queue_work(rtwdev->hw, &rtwvif_link->update_beacon_work);
+ wiphy_work_queue(hw->wiphy, &rtwvif_link->update_beacon_work);
return 0;
}
@@ -873,9 +847,9 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
rtwvif_link = rtwvif->links[link_id];
@@ -883,17 +857,13 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
rtwvif_link->tx_params[ac] = *params;
__rtw89_conf_tx(rtwdev, rtwvif_link, ac);
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return 0;
}
static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
@@ -937,14 +907,11 @@ static int rtw89_ops_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state new_state)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- rtw89_leave_ps_mode(rtwdev);
- ret = __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ rtw89_leave_ps_mode(rtwdev);
+ return __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state);
}
static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -953,9 +920,10 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret = 0;
+ int ret;
+
+ lockdep_assert_wiphy(hw->wiphy);
- mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
switch (cmd) {
@@ -964,7 +932,7 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = rtw89_cam_sec_key_add(rtwdev, vif, sta, key);
if (ret && ret != -EOPNOTSUPP) {
rtw89_err(rtwdev, "failed to add key to sec cam\n");
- goto out;
+ return ret;
}
break;
case DISABLE_KEY:
@@ -974,14 +942,11 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ret = rtw89_cam_sec_key_del(rtwdev, vif, sta, key, true);
if (ret) {
rtw89_err(rtwdev, "failed to remove key from sec cam\n");
- goto out;
+ return ret;
}
break;
}
-out:
- mutex_unlock(&rtwdev->mutex);
-
return ret;
}
@@ -997,38 +962,32 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_txq *txq = sta->txq[tid];
struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
switch (params->action) {
case IEEE80211_AMPDU_TX_START:
return IEEE80211_AMPDU_TX_START_IMMEDIATE;
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
clear_bit(tid, rtwsta->ampdu_map);
rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
- mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- mutex_lock(&rtwdev->mutex);
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta);
- mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
- mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
- mutex_lock(&rtwdev->mutex);
rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
- mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
@@ -1042,11 +1001,11 @@ static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_ps_mode(rtwdev);
if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
rtw89_mac_update_rts_threshold(rtwdev);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1086,7 +1045,8 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_leave_lps(rtwdev);
rtw89_hci_flush_queues(rtwdev, queues, drop);
@@ -1094,8 +1054,6 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
__rtw89_drop_packets(rtwdev, vif);
else
rtw89_mac_flush_txq(rtwdev, queues, drop);
-
- mutex_unlock(&rtwdev->mutex);
}
struct rtw89_iter_bitrate_mask_data {
@@ -1142,10 +1100,10 @@ static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_phy_rate_pattern_vif(rtwdev, vif, mask);
rtw89_ra_mask_info_update(rtwdev, vif, mask);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1156,6 +1114,8 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (hal->ant_diversity) {
if (tx_ant != rx_ant || hweight32(tx_ant) != 1)
return -EINVAL;
@@ -1163,12 +1123,10 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
return -EINVAL;
}
- mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
hal->tx_path_diversity = false;
hal->ant_diversity_fixed = true;
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1193,18 +1151,15 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
@@ -1214,18 +1169,15 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_core_scan_complete(rtwdev, rtwvif_link, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
@@ -1245,21 +1197,18 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct rtw89_vif_link *rtwvif_link;
int ret;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return 1;
- mutex_lock(&rtwdev->mutex);
-
- if (rtwdev->scanning || rtwvif->offchan) {
- ret = -EBUSY;
- goto out;
- }
+ if (rtwdev->scanning || rtwvif->offchan)
+ return -EBUSY;
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "hw scan: find no link on HW-0\n");
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
rtw89_hw_scan_start(rtwdev, rtwvif_link, req);
@@ -1269,9 +1218,6 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret);
}
-out:
- mutex_unlock(&rtwdev->mutex);
-
return ret;
}
@@ -1282,24 +1228,21 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw))
return;
- mutex_lock(&rtwdev->mutex);
-
if (!rtwdev->scanning)
- goto out;
+ return;
rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0);
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n");
- goto out;
+ return;
}
rtw89_hw_scan_abort(rtwdev, rtwvif_link);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw,
@@ -1322,13 +1265,10 @@ static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_dev *rtwdev = hw->priv;
- int ret;
- mutex_lock(&rtwdev->mutex);
- ret = rtw89_chanctx_ops_add(rtwdev, ctx);
- mutex_unlock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
- return ret;
+ return rtw89_chanctx_ops_add(rtwdev, ctx);
}
static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
@@ -1336,9 +1276,9 @@ static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_chanctx_ops_remove(rtwdev, ctx);
- mutex_unlock(&rtwdev->mutex);
}
static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
@@ -1347,9 +1287,9 @@ static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
rtw89_chanctx_ops_change(rtwdev, ctx, changed);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
@@ -1360,25 +1300,18 @@ static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- int ret;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
- ret = -ENOLINK;
- goto out;
+ return -ENOLINK;
}
- ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx);
-
-out:
- mutex_unlock(&rtwdev->mutex);
-
- return ret;
+ return rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx);
}
static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
@@ -1390,11 +1323,10 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
struct rtw89_vif_link *rtwvif_link;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtwvif_link = rtwvif->links[link_conf->link_id];
if (unlikely(!rtwvif_link)) {
- mutex_unlock(&rtwdev->mutex);
rtw89_err(rtwdev,
"%s: rtwvif link (link_id %u) is not active\n",
__func__, link_conf->link_id);
@@ -1402,7 +1334,6 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, ctx);
- mutex_unlock(&rtwdev->mutex);
}
static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
@@ -1415,13 +1346,12 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_roc *roc = &rtwvif->roc;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!rtwvif)
return -EINVAL;
- mutex_lock(&rtwdev->mutex);
-
if (roc->state != RTW89_ROC_IDLE) {
- mutex_unlock(&rtwdev->mutex);
return -EBUSY;
}
@@ -1439,8 +1369,6 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
rtw89_roc_start(rtwdev, rtwvif);
- mutex_unlock(&rtwdev->mutex);
-
return 0;
}
@@ -1450,14 +1378,14 @@ static int rtw89_ops_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
+ lockdep_assert_wiphy(hw->wiphy);
+
if (!rtwvif)
return -EINVAL;
- cancel_delayed_work_sync(&rtwvif->roc.roc_work);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work);
- mutex_lock(&rtwdev->mutex);
rtw89_roc_end(rtwdev, rtwvif);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1478,14 +1406,14 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
if (sta)
rtw89_core_set_tid_config(rtwdev, sta, tid_config);
else
ieee80211_iterate_stations_atomic(rtwdev->hw,
rtw89_set_tid_config_iter,
tid_config);
- mutex_unlock(&rtwdev->mutex);
return 0;
}
@@ -1509,7 +1437,7 @@ static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw,
{
struct rtw89_dev *rtwdev = hw->priv;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
return rtw89_can_work_on_links(rtwdev, vif, active_links);
}
@@ -1571,7 +1499,7 @@ int rtw89_ops_change_vif_links(struct ieee80211_hw *hw,
int ret = 0;
int i;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
@@ -1720,7 +1648,7 @@ int rtw89_ops_change_sta_links(struct ieee80211_hw *hw,
unsigned long set_links = new_links & ~old_links;
int ret = 0;
- guard(mutex)(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
rtw89_debug(rtwdev, RTW89_DBG_STATE,
"%s: old_links (0x%08x) -> new_links (0x%08x)\n",
@@ -1750,13 +1678,12 @@ static int rtw89_ops_suspend(struct ieee80211_hw *hw,
struct rtw89_dev *rtwdev = hw->priv;
int ret;
+ lockdep_assert_wiphy(hw->wiphy);
+
set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
- cancel_delayed_work_sync(&rtwdev->track_work);
+ wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_work);
- mutex_lock(&rtwdev->mutex);
ret = rtw89_wow_suspend(rtwdev, wowlan);
- mutex_unlock(&rtwdev->mutex);
-
if (ret) {
rtw89_warn(rtwdev, "failed to suspend for wow %d\n", ret);
clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
@@ -1771,15 +1698,15 @@ static int rtw89_ops_resume(struct ieee80211_hw *hw)
struct rtw89_dev *rtwdev = hw->priv;
int ret;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
+
ret = rtw89_wow_resume(rtwdev);
if (ret)
rtw89_warn(rtwdev, "failed to resume for wow %d\n", ret);
- mutex_unlock(&rtwdev->mutex);
clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
return ret ? 1 : 0;
}
@@ -1799,18 +1726,16 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ lockdep_assert_wiphy(hw->wiphy);
+
if (data->kek_len > sizeof(gtk_info->kek) ||
data->kck_len > sizeof(gtk_info->kck)) {
rtw89_warn(rtwdev, "kek or kck length over fw limit\n");
return;
}
- mutex_lock(&rtwdev->mutex);
-
memcpy(gtk_info->kek, data->kek, data->kek_len);
memcpy(gtk_info->kck, data->kck, data->kck_len);
-
- mutex_unlock(&rtwdev->mutex);
}
#endif
@@ -1818,16 +1743,13 @@ static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(hw->wiphy);
/* wl_disable GPIO get floating when entering LPS */
if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
- goto out;
+ return;
rtw89_core_rfkill_poll(rtwdev, false);
-
-out:
- mutex_unlock(&rtwdev->mutex);
}
const struct ieee80211_ops rtw89_ops = {
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 2dbdeae904ad..99b82dc85ea3 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -708,8 +708,8 @@ static int sec_eng_init_be(struct rtw89_dev *rtwdev)
val32 |= B_BE_CLK_EN_CGCMP | B_BE_CLK_EN_WAPI | B_BE_CLK_EN_WEP_TKIP |
B_BE_SEC_TX_ENC | B_BE_SEC_RX_DEC |
B_BE_MC_DEC | B_BE_BC_DEC |
- B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC;
- val32 &= ~B_BE_SEC_PRE_ENQUE_TX;
+ B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC |
+ B_BE_SEC_PRE_ENQUE_TX;
rtw89_write32(rtwdev, R_BE_SEC_ENG_CTRL, val32);
rtw89_write32_set(rtwdev, R_BE_SEC_MPDU_PROC, B_BE_APPEND_ICV | B_BE_APPEND_MIC);
@@ -1865,7 +1865,7 @@ int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
if (wl)
return 0;
- for (i = 0; i < RTW89_PHY_MAX; i++) {
+ for (i = 0; i < RTW89_PHY_NUM; i++) {
g[i].gnt_bt_sw_en = 1;
g[i].gnt_bt = 1;
g[i].gnt_wl_sw_en = 1;
@@ -2585,6 +2585,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_RXTRIG_RU26_DIS,
},
.wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,},
+ .agg_limit = {.addr = R_BE_AMPDU_AGG_LIMIT, .mask = B_BE_AMPDU_MAX_TIME_MASK,},
+ .txcnt_limit = {.addr = R_BE_TXCNT, .mask = B_BE_L_TXCNT_LMT_MASK,},
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 4d11c3dd60a5..79fef5f90140 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -455,34 +455,36 @@
#define B_BE_RX0DMA_INT_EN BIT(0)
#define R_BE_HAXI_HISR00 0xB0B4
-#define B_BE_RDU_CH6_INT BIT(28)
-#define B_BE_RDU_CH5_INT BIT(27)
-#define B_BE_RDU_CH4_INT BIT(26)
-#define B_BE_RDU_CH2_INT BIT(25)
-#define B_BE_RDU_CH1_INT BIT(24)
-#define B_BE_RDU_CH0_INT BIT(23)
-#define B_BE_RXDMA_STUCK_INT BIT(22)
-#define B_BE_TXDMA_STUCK_INT BIT(21)
-#define B_BE_TXDMA_CH14_INT BIT(20)
-#define B_BE_TXDMA_CH13_INT BIT(19)
-#define B_BE_TXDMA_CH12_INT BIT(18)
-#define B_BE_TXDMA_CH11_INT BIT(17)
-#define B_BE_TXDMA_CH10_INT BIT(16)
-#define B_BE_TXDMA_CH9_INT BIT(15)
-#define B_BE_TXDMA_CH8_INT BIT(14)
-#define B_BE_TXDMA_CH7_INT BIT(13)
-#define B_BE_TXDMA_CH6_INT BIT(12)
-#define B_BE_TXDMA_CH5_INT BIT(11)
-#define B_BE_TXDMA_CH4_INT BIT(10)
-#define B_BE_TXDMA_CH3_INT BIT(9)
-#define B_BE_TXDMA_CH2_INT BIT(8)
-#define B_BE_TXDMA_CH1_INT BIT(7)
-#define B_BE_TXDMA_CH0_INT BIT(6)
-#define B_BE_RPQ1DMA_INT BIT(5)
-#define B_BE_RX1P1DMA_INT BIT(4)
+#define B_BE_RDU_CH5_INT_V1 BIT(30)
+#define B_BE_RDU_CH4_INT_V1 BIT(29)
+#define B_BE_RDU_CH3_INT_V1 BIT(28)
+#define B_BE_RDU_CH2_INT_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_V1 BIT(23)
+#define B_BE_TXDMA_CH14_INT_V1 BIT(22)
+#define B_BE_TXDMA_CH13_INT_V1 BIT(21)
+#define B_BE_TXDMA_CH12_INT_V1 BIT(20)
+#define B_BE_TXDMA_CH11_INT_V1 BIT(19)
+#define B_BE_TXDMA_CH10_INT_V1 BIT(18)
+#define B_BE_TXDMA_CH9_INT_V1 BIT(17)
+#define B_BE_TXDMA_CH8_INT_V1 BIT(16)
+#define B_BE_TXDMA_CH7_INT_V1 BIT(15)
+#define B_BE_TXDMA_CH6_INT_V1 BIT(14)
+#define B_BE_TXDMA_CH5_INT_V1 BIT(13)
+#define B_BE_TXDMA_CH4_INT_V1 BIT(12)
+#define B_BE_TXDMA_CH3_INT_V1 BIT(11)
+#define B_BE_TXDMA_CH2_INT_V1 BIT(10)
+#define B_BE_TXDMA_CH1_INT_V1 BIT(9)
+#define B_BE_TXDMA_CH0_INT_V1 BIT(8)
+#define B_BE_RX1P1DMA_INT_V1 BIT(7)
+#define B_BE_RX0P1DMA_INT_V1 BIT(6)
+#define B_BE_RO1DMA_INT BIT(5)
+#define B_BE_RP1DMA_INT BIT(4)
#define B_BE_RX1DMA_INT BIT(3)
-#define B_BE_RPQ0DMA_INT BIT(2)
-#define B_BE_RX0P1DMA_INT BIT(1)
+#define B_BE_RO0DMA_INT BIT(2)
+#define B_BE_RP0DMA_INT BIT(1)
#define B_BE_RX0DMA_INT BIT(0)
/* TX/RX */
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index cd39eebe8186..12e6a0cbb889 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -666,7 +666,7 @@ SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
EXPORT_SYMBOL(rtw89_pm_ops_be);
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
- .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
+ .isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index c7c05f7fda1d..f4eee642e5ce 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2044,12 +2044,15 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente
if (!chip->support_ant_gain)
return 0;
- if (!(ant_gain->regd_enabled & BIT(regd)))
+ if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
+ if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
+ return min(offset_patha, offset_pathb);
+
return max(offset_patha, offset_pathb);
}
@@ -2057,10 +2060,17 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
s8 offset_patha, offset_pathb;
- if (!(ant_gain->regd_enabled & BIT(regd)))
+ if (!chip->support_ant_gain)
+ return 0;
+
+ if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
+ return 0;
+
+ if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
return 0;
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
@@ -2070,24 +2080,29 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
-void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan)
+int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan)
{
struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 regd = rtw89_regd_get(rtwdev, chan->band_type);
+ char *p = buf, *end = buf + bufsz;
s8 offset_patha, offset_pathb;
- if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) {
- seq_puts(m, "no DAG is applied\n");
- return;
+ if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) ||
+ ant_gain->block_country) {
+ p += scnprintf(p, end - p, "no DAG is applied\n");
+ goto out;
}
offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
- seq_printf(m, "ChainA offset: %d dBm\n", offset_patha);
- seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb);
+ p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha);
+ p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb);
+
+out:
+ return p - buf;
}
static const u8 rtw89_rs_idx_num_ax[] = {
@@ -3455,6 +3470,30 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rf_tas_info *rf_tas =
+ (const struct rtw89_c2h_rf_tas_info *)c2h->data;
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear = 0;
+ u32 i, cur_idx;
+ s16 txpwr;
+
+ if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ return;
+
+ cur_idx = le32_to_cpu(rf_tas->cur_idx);
+ for (i = 0; i < cur_idx; i++) {
+ txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]);
+ linear += rtw89_db_quarter_to_linear(txpwr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: index: %u, txpwr: %d\n", i, txpwr);
+ }
+
+ if (cur_idx == 0)
+ tas->instant_txpwr = rtw89_db_to_linear(0);
+ else
+ tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
}
static
@@ -4600,7 +4639,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
cfo->dcfo_avg = 0;
rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
rtwdev->total_sta_assoc);
- if (rtwdev->total_sta_assoc == 0) {
+ if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) {
rtw89_phy_cfo_reset(rtwdev);
return;
}
@@ -4651,29 +4690,28 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
rtw89_phy_cfo_statistics_reset(rtwdev);
}
-void rtw89_phy_cfo_track_work(struct work_struct *work)
+void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
cfo_track_work.work);
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
+
if (!cfo->cfo_trig_by_timer_en)
- goto out;
+ return;
rtw89_leave_ps_mode(rtwdev);
rtw89_phy_cfo_dm(rtwdev);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
- msecs_to_jiffies(cfo->cfo_timer_ms));
-out:
- mutex_unlock(&rtwdev->mutex);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
}
static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
{
struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
- msecs_to_jiffies(cfo->cfo_timer_ms));
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
}
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
@@ -5115,7 +5153,6 @@ static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
struct rtw89_phy_iter_rssi_data {
struct rtw89_dev *rtwdev;
- struct rtw89_phy_ch_info *ch_info;
bool rssi_changed;
};
@@ -5123,10 +5160,15 @@ static
void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
struct rtw89_phy_iter_rssi_data *rssi_data)
{
- struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
+ struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
+ struct rtw89_dev *rtwdev = rssi_data->rtwdev;
+ struct rtw89_phy_ch_info *ch_info;
+ struct rtw89_bb_ctx *bb;
unsigned long rssi_curr;
rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
+ bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
+ ch_info = &bb->ch_info;
if (rssi_curr < ch_info->rssi_min) {
ch_info->rssi_min = rssi_curr;
@@ -5157,11 +5199,13 @@ static void rtw89_phy_stat_rssi_update_iter(void *data,
static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
{
- struct rtw89_phy_iter_rssi_data rssi_data = {0};
+ struct rtw89_phy_iter_rssi_data rssi_data = {};
+ struct rtw89_bb_ctx *bb;
rssi_data.rtwdev = rtwdev;
- rssi_data.ch_info = &rtwdev->ch_info;
- rssi_data.ch_info->rssi_min = U8_MAX;
+ rtw89_for_each_active_bb(rtwdev, bb)
+ bb->ch_info.rssi_min = U8_MAX;
+
ieee80211_iterate_stations_atomic(rtwdev->hw,
rtw89_phy_stat_rssi_update_iter,
&rssi_data);
@@ -5199,24 +5243,27 @@ void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
}
-static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
+static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u32 time_us)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
}
-static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
+static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u16 idx)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
}
-static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
env->ccx_manual_ctrl = false;
@@ -5225,17 +5272,20 @@ static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
env->ccx_period = 0;
env->ccx_unit_idx = RTW89_CCX_32_US;
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
- RTW89_CCX_EDCCA_BW20_0);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
+ RTW89_CCX_EDCCA_BW20_0, bb->phy_idx);
}
-static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
- u16 score)
+static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u16 report, u16 score)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
u32 numer = 0;
u16 ret = 0;
@@ -5275,9 +5325,10 @@ static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
*period, *unit_idx);
}
-static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"lv:(%d)->(0)\n", env->ccx_rac_lv);
@@ -5288,9 +5339,10 @@ static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
}
static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
bool is_update = env->ifs_clm_app != para->ifs_clm_app;
u8 i = 0;
u16 *ifs_th_l = env->ifs_clm_th_l;
@@ -5325,12 +5377,12 @@ static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
*/
ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
- ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
+ ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb,
ifs_th0_us);
for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
ifs_th_l[i] = ifs_th_h[i - 1] + 1;
ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
- ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
+ ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]);
}
ifs_update_finished:
@@ -5341,30 +5393,31 @@ ifs_update_finished:
return is_update;
}
-static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u8 i = 0;
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
- env->ifs_clm_th_l[0]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
- env->ifs_clm_th_l[1]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
- env->ifs_clm_th_l[2]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
- env->ifs_clm_th_l[3]);
-
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
- env->ifs_clm_th_h[0]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
- env->ifs_clm_th_h[1]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
- env->ifs_clm_th_h[2]);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
- env->ifs_clm_th_h[3]);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
+ env->ifs_clm_th_l[0], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
+ env->ifs_clm_th_l[1], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
+ env->ifs_clm_th_l[2], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
+ env->ifs_clm_th_l[3], bb->phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
+ env->ifs_clm_th_h[0], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
+ env->ifs_clm_th_h[1], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
+ env->ifs_clm_th_h[2], bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
+ env->ifs_clm_th_h[3], bb->phy_idx);
for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
@@ -5372,31 +5425,38 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
}
-static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
- struct rtw89_ccx_para_info para = {0};
+ struct rtw89_ccx_para_info para = {};
env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
env->ifs_clm_mntr_time = 0;
para.ifs_clm_app = RTW89_IFS_CLM_INIT;
- if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
- rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, &para))
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true,
+ bb->phy_idx);
}
static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
enum rtw89_env_racing_lv level)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
int ret = 0;
if (level >= RTW89_RAC_MAX_NUM) {
@@ -5425,56 +5485,62 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
return ret;
}
-static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1);
- rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
+ bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
+ bb->phy_idx);
env->ccx_ongoing = true;
}
-static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
+static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
u8 i = 0;
u32 res = 0;
env->ifs_clm_tx_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT);
env->ifs_clm_edcca_excl_cca_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca,
PERCENT);
env->ifs_clm_cck_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT);
env->ifs_clm_ofdm_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT);
env->ifs_clm_cck_cca_excl_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa,
PERCENT);
env->ifs_clm_ofdm_cca_excl_fa_ratio =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa,
PERCENT);
env->ifs_clm_cck_fa_permil =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL);
env->ifs_clm_ofdm_fa_permil =
- rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
+ rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL);
for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
} else {
env->ifs_clm_ifs_avg[i] =
- rtw89_phy_ccx_idx_to_us(rtwdev,
+ rtw89_phy_ccx_idx_to_us(rtwdev, bb,
env->ifs_clm_avg[i]);
}
- res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
+ res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]);
res += env->ifs_clm_his[i] >> 1;
if (env->ifs_clm_his[i])
res /= env->ifs_clm_his[i];
@@ -5504,81 +5570,82 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
env->ifs_clm_cca_avg[i]);
}
-static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
+static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u8 i = 0;
- if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
- ccx->ifs_cnt_done_mask) == 0) {
+ if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
+ ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) {
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"Get IFS_CLM report Fail\n");
return false;
}
env->ifs_clm_tx =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
- ccx->ifs_clm_tx_cnt_msk);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
+ ccx->ifs_clm_tx_cnt_msk, bb->phy_idx);
env->ifs_clm_edcca_excl_cca =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr,
- ccx->ifs_clm_edcca_excl_cca_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
+ ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx);
env->ifs_clm_cckcca_excl_fa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
- ccx->ifs_clm_cckcca_excl_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
+ ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx);
env->ifs_clm_ofdmcca_excl_fa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr,
- ccx->ifs_clm_ofdmcca_excl_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
+ ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx);
env->ifs_clm_cckfa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
- ccx->ifs_clm_cck_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
+ ccx->ifs_clm_cck_fa_mask, bb->phy_idx);
env->ifs_clm_ofdmfa =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr,
- ccx->ifs_clm_ofdm_fa_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
+ ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx);
env->ifs_clm_his[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t1_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t1_his_mask, bb->phy_idx);
env->ifs_clm_his[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t2_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t2_his_mask, bb->phy_idx);
env->ifs_clm_his[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t3_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t3_his_mask, bb->phy_idx);
env->ifs_clm_his[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr,
- ccx->ifs_t4_his_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
+ ccx->ifs_t4_his_mask, bb->phy_idx);
env->ifs_clm_avg[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
- ccx->ifs_t1_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
+ ccx->ifs_t1_avg_mask, bb->phy_idx);
env->ifs_clm_avg[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr,
- ccx->ifs_t2_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
+ ccx->ifs_t2_avg_mask, bb->phy_idx);
env->ifs_clm_avg[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
- ccx->ifs_t3_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
+ ccx->ifs_t3_avg_mask, bb->phy_idx);
env->ifs_clm_avg[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr,
- ccx->ifs_t4_avg_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
+ ccx->ifs_t4_avg_mask, bb->phy_idx);
env->ifs_clm_cca[0] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
- ccx->ifs_t1_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
+ ccx->ifs_t1_cca_mask, bb->phy_idx);
env->ifs_clm_cca[1] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr,
- ccx->ifs_t2_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
+ ccx->ifs_t2_cca_mask, bb->phy_idx);
env->ifs_clm_cca[2] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
- ccx->ifs_t3_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
+ ccx->ifs_t3_cca_mask, bb->phy_idx);
env->ifs_clm_cca[3] =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr,
- ccx->ifs_t4_cca_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
+ ccx->ifs_t4_cca_mask, bb->phy_idx);
env->ifs_clm_total_ifs =
- rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr,
- ccx->ifs_total_mask);
+ rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
+ ccx->ifs_total_mask, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
env->ifs_clm_total_ifs);
@@ -5598,16 +5665,17 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
"T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
- rtw89_phy_ifs_clm_get_utility(rtwdev);
+ rtw89_phy_ifs_clm_get_utility(rtwdev, bb);
return true;
}
static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
const struct rtw89_ccx_regs *ccx = phy->ccx;
u32 period = 0;
u32 unit_idx = 0;
@@ -5618,17 +5686,17 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
return -EINVAL;
}
- if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
+ if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
return -EINVAL;
if (para->mntr_time != env->ifs_clm_mntr_time) {
rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
&period, &unit_idx);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
- ccx->ifs_clm_period_mask, period);
- rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr,
- ccx->ifs_clm_cnt_unit_mask,
- unit_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
+ ccx->ifs_clm_period_mask, period, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
+ ccx->ifs_clm_cnt_unit_mask,
+ unit_idx, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"Update IFS-CLM time ((%d)) -> ((%d))\n",
@@ -5639,18 +5707,19 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
env->ccx_unit_idx = (u8)unit_idx;
}
- if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) {
env->ifs_clm_app = para->ifs_clm_app;
- rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
}
return 0;
}
-void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
- struct rtw89_ccx_para_info para = {0};
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_ccx_para_info para = {};
u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
@@ -5660,25 +5729,36 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
return;
}
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "BB-%d env_monitor track\n", bb->phy_idx);
+
/* only ifs_clm for now */
- if (rtw89_phy_ifs_clm_get_result(rtwdev))
+ if (rtw89_phy_ifs_clm_get_result(rtwdev, bb))
env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
- rtw89_phy_ccx_racing_release(rtwdev);
+ rtw89_phy_ccx_racing_release(rtwdev, bb);
para.mntr_time = 1900;
para.rac_lv = RTW89_RAC_LV_1;
para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
- if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
+ if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
if (chk_result)
- rtw89_phy_ccx_trigger(rtwdev);
+ rtw89_phy_ccx_trigger(rtwdev, bb);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"get_result=0x%x, chk_result:0x%x\n",
env->ccx_watchdog_result, chk_result);
}
+void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_env_monitor_track(rtwdev, bb);
+}
+
static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page)
{
if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
@@ -5799,11 +5879,12 @@ static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1);
}
-static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
+static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, int type)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct rtw89_dig_info *dig = &rtwdev->dig;
const struct rtw89_phy_dig_gain_cfg *cfg;
+ struct rtw89_dig_info *dig = &bb->dig;
const char *msg;
u8 i;
s8 gain_base;
@@ -5840,8 +5921,8 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
}
for (i = 0; i < cfg->size; i++) {
- tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
- cfg->table[i].mask);
+ tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
+ cfg->table[i].mask, bb->phy_idx);
tmp >>= DIG_GAIN_SHIFT;
gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
gain_base += DIG_GAIN;
@@ -5851,25 +5932,26 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
}
}
-static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u32 tmp;
u8 i;
if (!rtwdev->hal.support_igi)
return;
- tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
- B_PATH0_IB_PKPW_MSK);
+ tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW,
+ B_PATH0_IB_PKPW_MSK, bb->phy_idx);
dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
- dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
- B_PATH0_IB_PBK_MSK);
+ dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK,
+ B_PATH0_IB_PBK_MSK, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
dig->ib_pkpwr, dig->ib_pbk);
for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
- rtw89_phy_dig_read_gain_table(rtwdev, i);
+ rtw89_phy_dig_read_gain_table(rtwdev, bb, i);
}
static const u8 rssi_nolink = 22;
@@ -5878,10 +5960,11 @@ static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
-static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
if (is_linked) {
@@ -5892,10 +5975,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
}
}
-static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
const u16 *fa_th_src = NULL;
@@ -5924,9 +6008,10 @@ static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
static const u8 igi_max_performance_mode = 0x5a;
static const u8 dynamic_pd_threshold_max;
-static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
@@ -5942,15 +6027,27 @@ static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
dig->is_linked_pre = false;
}
+static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx);
+
+ rtw89_phy_dig_update_gain_para(rtwdev, bb);
+ rtw89_phy_dig_reset(rtwdev, bb);
+}
+
static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
{
- rtw89_phy_dig_update_gain_para(rtwdev);
- rtw89_phy_dig_reset(rtwdev);
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_dig_init(rtwdev, bb);
}
-static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 lna_idx;
if (rssi < dig->igi_rssi_th[0])
@@ -5969,9 +6066,10 @@ static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
return lna_idx;
}
-static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 tia_idx;
if (rssi < dig->igi_rssi_th[0])
@@ -5984,10 +6082,11 @@ static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
#define IB_PBK_BASE 110
#define WB_RSSI_BASE 10
-static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi,
struct rtw89_agc_gaincode_set *set)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
s8 lna_gain = dig->lna_gain[set->lna_idx];
s8 tia_gain = dig->tia_gain[set->tia_idx];
s32 wb_rssi = rssi + lna_gain + tia_gain;
@@ -6003,12 +6102,13 @@ static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
return rxb_idx;
}
-static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rssi,
struct rtw89_agc_gaincode_set *set)
{
- set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
- set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
- set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
+ set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi);
+ set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi);
+ set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set);
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
@@ -6017,10 +6117,11 @@ static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
#define IGI_OFFSET_MAX 25
#define IGI_OFFSET_MUL 2
-static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_dig_info *dig = &bb->dig;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
enum rtw89_dig_noisy_level noisy_lv;
u8 igi_offset = dig->fa_rssi_ofst;
u16 fa_ratio = 0;
@@ -6057,92 +6158,99 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
noisy_lv, igi_offset);
}
-static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
+static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 lna_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr,
- dig_regs->p0_lna_init.mask, lna_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr,
- dig_regs->p1_lna_init.mask, lna_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr,
+ dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr,
+ dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx);
}
-static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
+static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 tia_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr,
- dig_regs->p0_tia_init.mask, tia_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr,
- dig_regs->p1_tia_init.mask, tia_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr,
+ dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr,
+ dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx);
}
-static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
+static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, u8 rxb_idx)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr,
- dig_regs->p0_rxb_init.mask, rxb_idx);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr,
- dig_regs->p1_rxb_init.mask, rxb_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr,
+ dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr,
+ dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx);
}
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
const struct rtw89_agc_gaincode_set set)
{
if (!rtwdev->hal.support_igi)
return;
- rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
- rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
- rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
+ rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx);
+ rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx);
+ rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
set.lna_idx, set.tia_idx, set.rxb_idx);
}
static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
bool enable)
{
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
- dig_regs->p0_p20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
- dig_regs->p0_s20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
- dig_regs->p1_p20_pagcugc_en.mask, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
- dig_regs->p1_s20_pagcugc_en.mask, enable);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
+ dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
+ dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
+ dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
+ dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx);
rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
}
-static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev)
+static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
if (!rtwdev->hal.support_igi)
return;
if (dig->force_gaincode_idx_en) {
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Force gaincode index enabled.\n");
} else {
- rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
+ rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi,
&dig->cur_gaincode);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode);
}
}
-static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
- bool enable)
+static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ u8 rssi, bool enable)
{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
enum rtw89_bandwidth cbw = chan->band_width;
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
u8 ofdm_cca_th;
s8 cck_cca_th;
@@ -6184,10 +6292,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
- rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
- dig_regs->pd_lower_bound_mask, pd_val);
- rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg,
- dig_regs->pd_spatial_reuse_en, enable);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
+ dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx);
if (!rtwdev->hal.support_cckpd)
return;
@@ -6199,29 +6307,29 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
"igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
final_rssi, cck_cca_th, under_region, pd_val);
- rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg,
- dig_regs->bmode_cca_rssi_limit_en, enable);
- rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
- dig_regs->bmode_rssi_nocca_low_th_mask, pd_val);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg,
+ dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
+ dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx);
}
-void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
dig->bypass_dig = false;
- rtw89_phy_dig_para_reset(rtwdev);
- rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
- rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_para_reset(rtwdev, bb);
+ rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
+ rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
+ rtw89_phy_dig_update_para(rtwdev, bb);
}
#define IGI_RSSI_MIN 10
#define ABS_IGI_MIN 0xc
-void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
- struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_dig_info *dig = &bb->dig;
bool is_linked = rtwdev->total_sta_assoc > 0;
u8 igi_min;
@@ -6230,20 +6338,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
return;
}
- rtw89_phy_dig_update_rssi_info(rtwdev);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx);
+
+ rtw89_phy_dig_update_rssi_info(rtwdev, bb);
if (!dig->is_linked_pre && is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_update_para(rtwdev, bb);
dig->igi_fa_rssi = dig->igi_rssi;
} else if (dig->is_linked_pre && !is_linked) {
rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
- rtw89_phy_dig_update_para(rtwdev);
+ rtw89_phy_dig_update_para(rtwdev, bb);
dig->igi_fa_rssi = dig->igi_rssi;
}
dig->is_linked_pre = is_linked;
- rtw89_phy_dig_igi_offset_by_env(rtwdev);
+ rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
@@ -6262,14 +6372,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev)
dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
dig->igi_fa_rssi);
- rtw89_phy_dig_config_igi(rtwdev);
+ rtw89_phy_dig_config_igi(rtwdev, bb);
- rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
+ rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en);
if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true);
else
- rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
+}
+
+void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_dig(rtwdev, bb);
}
static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
@@ -6443,17 +6561,17 @@ static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
}
antdiv->training_count++;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
- state_period);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work,
+ state_period);
}
-void rtw89_phy_antdiv_work(struct work_struct *work)
+void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
antdiv_work.work);
struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(wiphy);
if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
rtw89_phy_antdiv_training_state(rtwdev);
@@ -6461,8 +6579,6 @@ void rtw89_phy_antdiv_work(struct work_struct *work)
rtw89_phy_antdiv_decision_state(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
}
-
- mutex_unlock(&rtwdev->mutex);
}
void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
@@ -6483,19 +6599,34 @@ void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
return;
antdiv->training_count = 0;
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0);
+}
+
+static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "BB-%d env_monitor init\n", bb->phy_idx);
+
+ rtw89_phy_ccx_top_setting_init(rtwdev, bb);
+ rtw89_phy_ifs_clm_setting_init(rtwdev, bb);
}
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
- rtw89_phy_ccx_top_setting_init(rtwdev);
- rtw89_phy_ifs_clm_setting_init(rtwdev);
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_env_monitor_init(rtwdev, bb);
}
-static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
+static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
+
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx);
memset(edcca_bak, 0, sizeof(*edcca_bak));
@@ -6511,8 +6642,16 @@ static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
}
- rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st,
- edcca_regs->tx_collision_t2r_st_mask, 0x29);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st,
+ edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx);
+}
+
+static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_edcca_init(rtwdev, bb);
}
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
@@ -6875,44 +7014,46 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
}
EXPORT_SYMBOL(rtw89_decode_chan_idx);
-void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan)
+void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, bool scan)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
if (scan) {
edcca_bak->a =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, bb->phy_idx);
edcca_bak->p =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, bb->phy_idx);
edcca_bak->ppdu =
- rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask);
-
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask, EDCCA_MAX);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask, EDCCA_MAX);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask, EDCCA_MAX);
+ rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, bb->phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx);
} else {
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask,
- edcca_bak->a);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask,
- edcca_bak->p);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask,
- edcca_bak->ppdu);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask,
+ edcca_bak->a, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask,
+ edcca_bak->p, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask,
+ edcca_bak->ppdu, bb->phy_idx);
}
}
-static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
+static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
+ const struct rtw89_edcca_p_regs *edcca_p_regs;
bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
u8 path, per20_bitmap;
@@ -6922,13 +7063,18 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
return;
+ if (bb->phy_idx == RTW89_PHY_1)
+ edcca_p_regs = &edcca_regs->p[RTW89_PHY_1];
+ else
+ edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
+
if (rtwdev->chip->chip_id == RTL8922A)
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 0);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 0);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
@@ -6939,19 +7085,19 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 4);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 4);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
- per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a,
+ per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
MASKBYTE0);
if (rtwdev->chip->chip_id == RTL8922A) {
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 4);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
@@ -6959,33 +7105,33 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
edcca_regs->rpt_sel_be_mask, 5);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
} else {
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 0);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 0);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 1);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 1);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 2);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 2);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel,
- edcca_regs->rpt_sel_mask, 3);
- tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a);
+ rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
+ edcca_p_regs->rpt_sel_mask, 3);
+ tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
}
@@ -7007,9 +7153,10 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev)
pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
}
-static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
+static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
{
- struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
+ struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
bool is_linked = rtwdev->total_sta_assoc > 0;
u8 rssi_min = ch_info->rssi_min >> 1;
u8 edcca_thre;
@@ -7025,13 +7172,13 @@ static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev)
return edcca_thre;
}
-void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
{
const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
- struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak;
+ struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
u8 th;
- th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev);
+ th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb);
if (th == edcca_bak->th_old)
return;
@@ -7040,23 +7187,33 @@ void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
"[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_mask, th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level,
- edcca_regs->edcca_p_mask, th);
- rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level,
- edcca_regs->ppdu_mask, th);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_mask, th, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
+ edcca_regs->edcca_p_mask, th, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
+ edcca_regs->ppdu_mask, th, bb->phy_idx);
+}
+
+static
+void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx);
+
+ rtw89_phy_edcca_thre_calc(rtwdev, bb);
+ rtw89_phy_edcca_log(rtwdev, bb);
}
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_bb_ctx *bb;
if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
return;
- rtw89_phy_edcca_thre_calc(rtwdev);
- rtw89_phy_edcca_log(rtwdev);
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_edcca_track(rtwdev, bb);
}
enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 08b635c93ac3..518a100375fb 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -835,8 +835,8 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev);
s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan);
-void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan);
+int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ const struct rtw89_chan *chan);
void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_txpwr_table *tbl);
s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
@@ -914,6 +914,13 @@ static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf)
return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
}
+static inline s8 rtw89_phy_txpwr_bb_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_bb)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return txpwr_bb >> (chip->txpwr_factor_bb - chip->txpwr_factor_rf);
+}
+
static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -978,20 +985,20 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_cfo_track_work(struct work_struct *work);
+void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
struct rtw89_rx_phy_ppdu *phy_ppdu);
void rtw89_phy_stat_track(struct rtw89_dev *rtwdev);
void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val);
-void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_antdiv_work(struct work_struct *work);
+void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
@@ -1002,9 +1009,10 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev);
u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band);
void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
u8 *ch, enum nl80211_band *band);
-void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
+void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
-void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb);
enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 96ea04d90cd3..ac46a7baa00d 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -113,7 +113,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
{
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
__rtw89_leave_ps_mode(rtwdev);
}
@@ -125,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool can_ps_mode = true;
unsigned int link_id;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
@@ -162,7 +162,7 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev)
struct rtw89_vif *rtwvif;
unsigned int link_id;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 10d0efa7a58e..c776954ad360 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -6618,6 +6618,13 @@
#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
#define B_BE_CHECK_CCK_EN BIT(0)
+#define R_BE_TXCNT 0x1082C
+#define R_BE_TXCNT_C1 0x1482C
+#define B_BE_ADD_TXCNT_BY BIT(31)
+#define B_BE_TOTAL_TC_OPT BIT(30)
+#define B_BE_S_TXCNT_LMT_MASK GENMASK(29, 24)
+#define B_BE_L_TXCNT_LMT_MASK GENMASK(21, 16)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -7095,6 +7102,10 @@
#define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3)
#define B_BE_MACLBK_EN BIT(0)
+#define R_BE_CLIENT_OM_CTRL 0x11040
+#define R_BE_CLIENT_OM_CTRL_C1 0x15040
+#define B_BE_TRIG_DIS_EHTTB BIT(24)
+
#define R_BE_WMAC_NAV_CTL 0x11080
#define R_BE_WMAC_NAV_CTL_C1 0x15080
#define B_BE_WMAC_NAV_UPPER_EN BIT(26)
@@ -8157,6 +8168,8 @@
#define B_EDCCA_RPT_B_S40 BIT(4)
#define B_EDCCA_RPT_B_S80 BIT(3)
#define B_EDCCA_RPT_B_PATH_MASK GENMASK(2, 1)
+#define R_EDCCA_RPT_P1_A 0x1740
+#define R_EDCCA_RPT_P1_B 0x1744
#define R_SWSI_V1 0x174C
#define B_SWSI_W_BUSY_V1 BIT(24)
#define B_SWSI_R_BUSY_V1 BIT(25)
@@ -8222,6 +8235,7 @@
#define B_TXCKEN_FORCE_ALL GENMASK(24, 0)
#define R_EDCCA_RPT_SEL 0x20CC
#define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0)
+#define B_EDCCA_RPT_SEL_P1_MSK GENMASK(5, 3)
#define R_ADC_FIFO 0x20fc
#define B_ADC_FIFO_RST GENMASK(31, 24)
#define B_ADC_FIFO_RXK GENMASK(31, 16)
@@ -8291,6 +8305,8 @@
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
#define R_EDCCA_RPT_B_BE 0x2E3C
+#define R_EDCCA_RPT_P1_A_BE 0x2E40
+#define R_EDCCA_RPT_P1_B_BE 0x2E44
#define R_S1_HW_SI_DIS 0x3200
#define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P1_RXCK 0x32A0
@@ -9169,6 +9185,16 @@
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_TXAGC_REF_DBM_RF1_P0 0xBC04
+#define B_TXAGC_OFDM_REF_DBM_RF1_P0 GENMASK(10, 2)
+#define B_TXAGC_CCK_REF_DBM_RF1_P0 GENMASK(19, 11)
+#define R_TSSI_K_RF1_P0 0xBC28
+#define B_TSSI_K_OFDM_RF1_P0 GENMASK(9, 0)
+#define R_TXAGC_REF_DBM_RF1_P1 0xBD04
+#define B_TXAGC_OFDM_REF_DBM_RF1_P1 GENMASK(10, 2)
+#define B_TXAGC_CCK_REF_DBM_RF1_P1 GENMASK(19, 11)
+#define R_TSSI_K_RF1_P1 0xBD28
+#define B_TSSI_K_OFDM_RF1_P1 GENMASK(9, 0)
#define R_RFK_ST 0xBFF8
#define R_DCOF0 0xC000
#define B_DCOF0_RST BIT(17)
@@ -9338,16 +9364,18 @@
#define R_TSSI_MAP_OFST_P1 0xE720
#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
-#define R_TXAGC_REF0_P0 0xE628
-#define R_TXAGC_REF0_P1 0xE728
-#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
-#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
-#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
-#define R_TXAGC_REF1_P0 0xE62C
-#define R_TXAGC_REF1_P1 0xE72C
-#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXAGC_REF_DBM_P0 0xE628
+#define B_TXAGC_OFDM_REF_DBM_P0 GENMASK(8, 0)
+#define B_TXAGC_CCK_REF_DBM_P0 GENMASK(17, 9)
+#define R_TSSI_K_P0 0xE6A0
+#define B_TSSI_K_OFDM_P0 GENMASK(29, 20)
#define R_TXPWR_RSTB 0xE70C
#define B_TXPWR_RSTB BIT(16)
+#define R_TXAGC_REF_DBM_P1 0xE728
+#define B_TXAGC_OFDM_REF_DBM_P1 GENMASK(8, 0)
+#define B_TXAGC_CCK_REF_DBM_P1 GENMASK(17, 9)
+#define R_TSSI_K_P1 0xE7A0
+#define B_TSSI_K_OFDM_P1 GENMASK(29, 20)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index 80b2f74589eb..655323a79608 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -7,257 +7,266 @@
#include "ps.h"
#include "util.h"
-#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \
- {.alpha2 = (_alpha2), \
- .txpwr_regd = {_txpwr_regd}, \
+static
+void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+#define COUNTRY_REGD(_alpha2, _rule_2ghz, _rule_5ghz, _rule_6ghz, _fmap) \
+ { \
+ .alpha2 = _alpha2, \
+ .txpwr_regd[RTW89_BAND_2G] = _rule_2ghz, \
+ .txpwr_regd[RTW89_BAND_5G] = _rule_5ghz, \
+ .txpwr_regd[RTW89_BAND_6G] = _rule_6ghz, \
+ .func_bitmap = { _fmap, }, \
}
+static_assert(BITS_PER_TYPE(unsigned long) >= NUM_OF_RTW89_REGD_FUNC);
+
static const struct rtw89_regd rtw89_ww_regd =
- COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW);
+ COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW, 0x0);
static const struct rtw89_regd rtw89_regd_map[] = {
- COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE),
- COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC),
- COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK),
- COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR),
- COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE),
- COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN),
- COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC),
- COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI),
- COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND),
- COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA),
- COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC),
- COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK),
- COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA),
- COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA),
- COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA),
- COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC),
- COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI),
- COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA),
- COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
- COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE, 0x0),
+ COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0),
+ COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x1),
+ COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK, 0x0),
+ COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR, 0x0),
+ COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2),
+ COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE, 0x0),
+ COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN, 0x0),
+ COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC, 0x1),
+ COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND, 0x0),
+ COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
+ COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0),
+ COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC, 0x1),
+ COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK, 0x0),
+ COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0),
+ COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0),
+ COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0),
+ COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0),
+ COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0),
+ COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0),
+ COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
+ COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0),
};
static const char rtw89_alpha2_list_eu[][3] = {
@@ -295,13 +304,16 @@ static const char rtw89_alpha2_list_eu[][3] = {
"RO",
};
-static const struct rtw89_regd *rtw89_regd_find_reg_by_name(const char *alpha2)
+static const struct rtw89_regd *rtw89_regd_find_reg_by_name(struct rtw89_dev *rtwdev,
+ const char *alpha2)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
u32 i;
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- if (!memcmp(rtw89_regd_map[i].alpha2, alpha2, 2))
- return &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ if (!memcmp(regd_ctrl->map[i].alpha2, alpha2, 2))
+ return &regd_ctrl->map[i];
}
return &rtw89_ww_regd;
@@ -312,22 +324,25 @@ static bool rtw89_regd_is_ww(const struct rtw89_regd *regd)
return regd == &rtw89_ww_regd;
}
-static u8 rtw89_regd_get_index(const struct rtw89_regd *regd)
+static u8 rtw89_regd_get_index(struct rtw89_dev *rtwdev, const struct rtw89_regd *regd)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
+
BUILD_BUG_ON(ARRAY_SIZE(rtw89_regd_map) > RTW89_REGD_MAX_COUNTRY_NUM);
if (rtw89_regd_is_ww(regd))
return RTW89_REGD_MAX_COUNTRY_NUM;
- return regd - rtw89_regd_map;
+ return regd - regd_ctrl->map;
}
-static u8 rtw89_regd_get_index_by_name(const char *alpha2)
+static u8 rtw89_regd_get_index_by_name(struct rtw89_dev *rtwdev, const char *alpha2)
{
const struct rtw89_regd *regd;
- regd = rtw89_regd_find_reg_by_name(alpha2);
- return rtw89_regd_get_index(regd);
+ regd = rtw89_regd_find_reg_by_name(rtwdev, alpha2);
+ return rtw89_regd_get_index(rtwdev, regd);
}
#define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \
@@ -345,6 +360,7 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
struct wiphy *wiphy)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_supported_band *sband;
struct rtw89_acpi_dsm_result res = {};
@@ -382,8 +398,8 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
"acpi: eval if allow unii-4: 0x%x\n", val);
bottom:
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- const struct rtw89_regd *regd = &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ const struct rtw89_regd *regd = &regd_ctrl->map[i];
switch (regd->txpwr_regd[RTW89_BAND_5G]) {
case RTW89_FCC:
@@ -406,7 +422,7 @@ static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
u8 index;
- index = rtw89_regd_get_index_by_name(alpha2);
+ index = rtw89_regd_get_index_by_name(rtwdev, alpha2);
if (index == RTW89_REGD_MAX_COUNTRY_NUM) {
rtw89_debug(rtwdev, RTW89_DBG_REGD, "%s: unknown alpha2 %c%c\n",
__func__, alpha2[0], alpha2[1]);
@@ -474,6 +490,7 @@ out:
static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd_ctrl *regd_ctrl = &regulatory->ctrl;
const struct rtw89_acpi_policy_6ghz_sp *ptr;
struct rtw89_acpi_dsm_result res = {};
bool enable_by_us;
@@ -505,8 +522,8 @@ static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
- for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
- const struct rtw89_regd *tmp = &rtw89_regd_map[i];
+ for (i = 0; i < regd_ctrl->nr; i++) {
+ const struct rtw89_regd *tmp = &regd_ctrl->map[i];
if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0)
clear_bit(i, regulatory->block_6ghz_sp);
@@ -573,8 +590,21 @@ bottom:
int rtw89_regd_setup(struct rtw89_dev *rtwdev)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_regd_data *regd_data = elm_info->regd;
struct wiphy *wiphy = rtwdev->hw->wiphy;
+ if (regd_data) {
+ regulatory->ctrl.nr = regd_data->nr;
+ regulatory->ctrl.map = regd_data->map;
+ } else {
+ regulatory->ctrl.nr = ARRAY_SIZE(rtw89_regd_map);
+ regulatory->ctrl.map = rtw89_regd_map;
+ }
+
+ regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
+
if (!wiphy)
return -EINVAL;
@@ -585,21 +615,16 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev)
return 0;
}
-int rtw89_regd_init(struct rtw89_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+int rtw89_regd_init_hint(struct rtw89_dev *rtwdev)
{
- struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_regd *chip_regd;
struct wiphy *wiphy = rtwdev->hw->wiphy;
int ret;
- regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
-
if (!wiphy)
return -EINVAL;
- chip_regd = rtw89_regd_find_reg_by_name(rtwdev->efuse.country_code);
+ chip_regd = rtw89_regd_find_reg_by_name(rtwdev, rtwdev->efuse.country_code);
if (!rtw89_regd_is_ww(chip_regd)) {
rtwdev->regulatory.regd = chip_regd;
/* Ignore country ie if there is a country domain programmed in chip */
@@ -637,7 +662,7 @@ static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev,
if (!chip->support_unii4)
return;
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
!test_bit(index, regulatory->block_unii4))
return;
@@ -655,7 +680,7 @@ static bool regd_is_6ghz_blocked(struct rtw89_dev *rtwdev)
const struct rtw89_regd *regd = regulatory->regd;
u8 index;
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
!test_bit(index, regulatory->block_6ghz))
return false;
@@ -696,11 +721,36 @@ static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
sband->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
+static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (!tas->enable)
+ return;
+
+ tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap);
+}
+
+static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_regd *regd = regulatory->regd;
+
+ if (!chip->support_ant_gain)
+ return;
+
+ ant_gain->block_country = !test_bit(RTW89_REGD_FUNC_DAG, regd->func_bitmap);
+}
+
static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
struct wiphy *wiphy,
struct regulatory_request *request)
{
- rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(request->alpha2);
+ rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(rtwdev, request->alpha2);
/* This notification might be set from the system of distros,
* and it does not expect the regulatory will be modified by
* connecting to an AP (i.e. country ie).
@@ -713,14 +763,17 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
rtw89_regd_apply_policy_unii4(rtwdev, wiphy);
rtw89_regd_apply_policy_6ghz(rtwdev, wiphy);
+ rtw89_regd_apply_policy_tas(rtwdev);
+ rtw89_regd_apply_policy_ant_gain(rtwdev);
}
+static
void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw89_dev *rtwdev = hw->priv;
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(wiphy);
rtw89_leave_ps_mode(rtwdev);
if (wiphy->regd) {
@@ -736,7 +789,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request
rtw89_core_set_chip_txpwr(rtwdev);
exit:
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(wiphy);
}
/* Maximum Transmit Power field (@raw) can be EIRP or PSD.
@@ -925,7 +978,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
sel = RTW89_REG_6GHZ_POWER_DFLT;
if (sel == RTW89_REG_6GHZ_POWER_STD) {
- index = rtw89_regd_get_index(regd);
+ index = rtw89_regd_get_index(rtwdev, regd);
if (index == RTW89_REGD_MAX_COUNTRY_NUM ||
test_bit(index, regulatory->block_6ghz_sp)) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
@@ -986,7 +1039,7 @@ int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
unsigned int changed = 0;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
/* The result of reg_6ghz_tpe may depend on reg_6ghz_power type,
* so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc().
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index c56f70267882..0d482cd57f6e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -224,10 +224,17 @@ static const struct rtw89_edcca_regs rtw8851b_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1596,10 +1603,16 @@ static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2410,6 +2423,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2445,6 +2459,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -2483,10 +2498,13 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = false,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 1,
.tx_nss = 1,
.rx_nss = 1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 9bd2842c27d5..286334e26c84 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -522,10 +522,17 @@ static const struct rtw89_edcca_regs rtw8852a_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1356,10 +1363,16 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx);
rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -2136,6 +2149,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2162,6 +2176,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = NULL,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2201,10 +2216,13 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.support_ant_gain = false,
+ .support_tas = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index dfb2bf61b0b8..eceb4fb9880d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -189,10 +189,17 @@ static const struct rtw89_edcca_regs rtw8852b_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -568,10 +575,16 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -763,6 +776,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -798,6 +812,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -837,10 +852,13 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = false,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
index 0e094ce9c9b0..99c9505b3cbd 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -621,9 +621,9 @@ static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev,
if (ext_loss_a == ext_loss_b) {
ext_loss_avg = ext_loss_a;
} else {
- linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1;
- linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS);
- ext_loss_avg = rtw89_linear_2_db(linear);
+ linear = rtw89_db_to_linear(abs(ext_loss_a - ext_loss_b)) + 1;
+ linear /= 2;
+ ext_loss_avg = rtw89_linear_to_db(linear);
ext_loss_avg += min(ext_loss_a, ext_loss_b);
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index ef47a5facc83..fbf82d42687b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -3585,9 +3585,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
- u32 start_time, finish_time;
u32 bb_reg_backup[8] = {0};
+ ktime_t start_time;
const s16 *power;
+ s64 this_time;
u8 band;
bool ok;
u32 tmp;
@@ -3613,7 +3614,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
return;
}
- start_time = ktime_get_ns();
+ start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G)
power = power_2g;
@@ -3738,12 +3739,12 @@ out:
rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns();
- tssi_info->tssi_alimk_time += finish_time - start_time;
+ this_time = ktime_us_delta(ktime_get(), start_time);
+ tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[TSSI PA K] %s processing time = %d ms\n", __func__,
- tssi_info->tssi_alimk_time);
+ "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
+ __func__, this_time, tssi_info->tssi_alimk_time);
}
void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index bde3e1fb7ca6..bbf37442c492 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -187,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL_V1,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -541,10 +548,16 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx;
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx);
rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
}
static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev,
@@ -697,6 +710,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -732,6 +746,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
.try_ce_fw = true,
.bbmcu_nr = 0,
.needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
@@ -770,10 +785,13 @@ const struct rtw89_chip_info rtw8852bt_chip_info = {
BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = true,
.hw_sec_hdr = false,
.hw_mgmt_tx_encrypt = false,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index 336a83e1d46b..6e6889eea9a0 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -3663,9 +3663,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
- u32 start_time, finish_time;
u32 bb_reg_backup[8] = {};
+ ktime_t start_time;
const s16 *power;
+ s64 this_time;
u8 band;
bool ok;
u32 tmp;
@@ -3675,7 +3676,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
"======> %s channel=%d path=%d\n", __func__, channel,
path);
- start_time = ktime_get_ns();
+ start_time = ktime_get();
if (chan->band_type == RTW89_BAND_2G)
power = power_2g;
@@ -3802,12 +3803,12 @@ out:
rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
- finish_time = ktime_get_ns();
- tssi_info->tssi_alimk_time += finish_time - start_time;
+ this_time = ktime_us_delta(ktime_get(), start_time);
+ tssi_info->tssi_alimk_time += this_time;
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[TSSI PA K] %s processing time = %d ms\n", __func__,
- tssi_info->tssi_alimk_time);
+ "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n",
+ __func__, this_time, tssi_info->tssi_alimk_time);
}
void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index bc84b15e7826..08bcdf246382 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -12,6 +12,7 @@
#include "rtw8852c.h"
#include "rtw8852c_rfk.h"
#include "rtw8852c_table.h"
+#include "sar.h"
#include "util.h"
#define RTW8852C_FW_FORMAT_MAX 1
@@ -186,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852c_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_EDCCA_LVL,
.ppdu_mask = B_EDCCA_LVL_MSK3,
- .rpt_a = R_EDCCA_RPT_A,
- .rpt_b = R_EDCCA_RPT_B,
- .rpt_sel = R_EDCCA_RPT_SEL,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A,
+ .rpt_b = R_EDCCA_RPT_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A,
+ .rpt_b = R_EDCCA_RPT_P1_B,
+ .rpt_sel = R_EDCCA_RPT_SEL,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST,
.tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M,
};
@@ -1853,10 +1861,16 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
rtw8852c_mcc_get_ch_info(rtwdev, phy_idx);
+ rtw89_btc_ntfy_conn_rfk(rtwdev, true);
+
rtw8852c_rx_dck(rtwdev, phy_idx, false);
rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx);
+ rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30);
rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx);
+
+ rtw89_btc_ntfy_conn_rfk(rtwdev, false);
rtw89_fw_h2c_rf_ntfy_mcc(rtwdev);
}
@@ -2867,6 +2881,7 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = {
.callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852c_rfk_chanctx_cb,
+ .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb,
};
#ifdef CONFIG_PM
@@ -2928,6 +2943,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
.h2c_ampdu_cmac_tbl = NULL,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl,
.h2c_default_dmac_tbl = NULL,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
@@ -2954,6 +2970,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 0,
.needed_fw_elms = 0,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 458752,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2996,10 +3013,13 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.support_ant_gain = true,
+ .support_tas = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 11d66bfceb15..8082592db84a 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -205,10 +205,17 @@ static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
.edcca_p_mask = B_EDCCA_LVL_MSK1,
.ppdu_level = R_SEG0R_PPDU_LVL_BE,
.ppdu_mask = B_EDCCA_LVL_MSK1,
- .rpt_a = R_EDCCA_RPT_A_BE,
- .rpt_b = R_EDCCA_RPT_B_BE,
- .rpt_sel = R_EDCCA_RPT_SEL_BE,
- .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .p = {{
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ }, {
+ .rpt_a = R_EDCCA_RPT_P1_A_BE,
+ .rpt_b = R_EDCCA_RPT_P1_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK,
+ }},
.rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
.rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
.tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
@@ -2149,6 +2156,56 @@ static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
B_BE_PWR_REF_CTRL_CCK, ref_cck);
}
+static const struct rtw89_reg_def rtw8922a_txpwr_ref[][3] = {
+ {{ .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_OFDM_REF_DBM_P0},
+ { .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_CCK_REF_DBM_P0},
+ { .addr = R_TSSI_K_P0, .mask = B_TSSI_K_OFDM_P0}
+ },
+ {{ .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_OFDM_REF_DBM_RF1_P0},
+ { .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_CCK_REF_DBM_RF1_P0},
+ { .addr = R_TSSI_K_RF1_P0, .mask = B_TSSI_K_OFDM_RF1_P0}
+ },
+};
+
+static void rtw8922a_set_txpwr_diff(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ static const u32 path_ofst[] = {0x0, 0x100};
+ const struct rtw89_reg_def *txpwr_ref;
+ static const s16 tssi_k_base = 0x12;
+ s16 tssi_k_ofst = abs(pwr_ofst) + tssi_k_base;
+ s16 ofst_dec[RF_PATH_NUM_8922A];
+ s16 tssi_k[RF_PATH_NUM_8922A];
+ s16 pwr_ref_ofst;
+ s16 pwr_ref = 0;
+ u8 i;
+
+ if (rtwdev->hal.cv == CHIP_CAV)
+ pwr_ref = 16;
+
+ pwr_ref <<= chip->txpwr_factor_rf;
+ pwr_ref_ofst = pwr_ref - rtw89_phy_txpwr_bb_to_rf(rtwdev, abs(pwr_ofst));
+
+ ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? pwr_ref : pwr_ref_ofst;
+ ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ref_ofst : pwr_ref;
+ tssi_k[RF_PATH_A] = pwr_ofst > 0 ? tssi_k_base : tssi_k_ofst;
+ tssi_k[RF_PATH_B] = pwr_ofst > 0 ? tssi_k_ofst : tssi_k_base;
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ txpwr_ref = rtw8922a_txpwr_ref[phy_idx];
+
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[0].addr + path_ofst[i],
+ txpwr_ref[0].mask, ofst_dec[i]);
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[1].addr + path_ofst[i],
+ txpwr_ref[1].mask, ofst_dec[i]);
+ rtw89_phy_write32_mask(rtwdev, txpwr_ref[2].addr + path_ofst[i],
+ txpwr_ref[2].mask, tssi_k[i]);
+ }
+}
+
static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
enum rtw89_phy_idx phy_idx)
{
@@ -2185,6 +2242,8 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+ rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx);
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
}
static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
@@ -2695,6 +2754,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
.h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
.h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl_g7,
.h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
.h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
.h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
@@ -2721,6 +2781,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.try_ce_fw = false,
.bbmcu_nr = 1,
.needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS,
+ .fw_blacklist = &rtw89_fw_blacklist_default,
.fifo_size = 589824,
.small_fifo_size = false,
.dle_scc_rsvd_size = 0,
@@ -2760,11 +2821,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_80) |
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
- .support_ant_gain = false,
+ .support_ant_gain = true,
+ .support_tas = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
+ .rx_freq_frome_ie = false,
.hw_sec_hdr = true,
.hw_mgmt_tx_encrypt = true,
+ .hw_tkip_crypto = true,
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 871f45a6508c..0b5af9528702 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -7,10 +7,16 @@
#include "phy.h"
#include "reg.h"
#include "sar.h"
+#include "util.h"
#define RTW89_TAS_FACTOR 2 /* unit: 0.25 dBm */
+#define RTW89_TAS_SAR_GAP (1 << RTW89_TAS_FACTOR)
#define RTW89_TAS_DPR_GAP (1 << RTW89_TAS_FACTOR)
#define RTW89_TAS_DELTA (2 << RTW89_TAS_FACTOR)
+#define RTW89_TAS_TX_RATIO_THRESHOLD 70
+#define RTW89_TAS_DFLT_TX_RATIO 80
+#define RTW89_TAS_DPR_ON_OFFSET (RTW89_TAS_DELTA + RTW89_TAS_SAR_GAP)
+#define RTW89_TAS_DPR_OFF_OFFSET (4 << RTW89_TAS_FACTOR)
static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev,
u32 center_freq)
@@ -99,7 +105,7 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
typeof(_dev) _d = (_dev); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \
BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \
- lockdep_assert_held(&_d->mutex); \
+ lockdep_assert_wiphy(_d->hw->wiphy); \
_d->sar._cfg_name = *(_cfg_data); \
_d->sar.src = _s; \
} while (0)
@@ -117,8 +123,8 @@ static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg)
RTW89_SAR_TXPWR_MAC_MAX);
}
-static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
- s8 cfg)
+static s32 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
+ s32 cfg)
{
const u8 fct = sar_hdl->txpwr_factor_sar;
@@ -128,8 +134,8 @@ static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl,
return cfg >> (RTW89_TAS_FACTOR - fct);
}
-static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
- s8 cfg)
+static s32 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
+ s32 cfg)
{
const u8 fct = sar_hdl->txpwr_factor_sar;
@@ -139,18 +145,48 @@ static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl,
return cfg << (RTW89_TAS_FACTOR - fct);
}
+static bool rtw89_tas_is_active(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ struct rtw89_vif *rtwvif;
+
+ if (!tas->enable)
+ return false;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (ieee80211_vif_is_mld(rtwvif_to_vif(rtwvif)))
+ return false;
+ }
+
+ return true;
+}
+
+static const char *rtw89_tas_state_str(enum rtw89_tas_state state)
+{
+ switch (state) {
+ case RTW89_TAS_STATE_DPR_OFF:
+ return "DPR OFF";
+ case RTW89_TAS_STATE_DPR_ON:
+ return "DPR ON";
+ case RTW89_TAS_STATE_STATIC_SAR:
+ return "STATIC SAR";
+ default:
+ return NULL;
+ }
+}
+
s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
struct rtw89_tas_info *tas = &rtwdev->tas;
- s8 delta;
+ s32 offset;
int ret;
s32 cfg;
u8 fct;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (src == RTW89_SAR_SOURCE_NONE)
return RTW89_SAR_TXPWR_MAC_MAX;
@@ -159,15 +195,17 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
if (ret)
return RTW89_SAR_TXPWR_MAC_MAX;
- if (tas->enable) {
+ if (rtw89_tas_is_active(rtwdev)) {
switch (tas->state) {
case RTW89_TAS_STATE_DPR_OFF:
- return RTW89_SAR_TXPWR_MAC_MAX;
+ offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_OFF_OFFSET);
+ cfg += offset;
+ break;
case RTW89_TAS_STATE_DPR_ON:
- delta = rtw89_txpwr_tas_to_sar(sar_hdl, tas->delta);
- cfg -= delta;
+ offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_ON_OFFSET);
+ cfg -= offset;
break;
- case RTW89_TAS_STATE_DPR_FORBID:
+ case RTW89_TAS_STATE_STATIC_SAR:
default:
break;
}
@@ -178,72 +216,91 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq)
return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg);
}
-void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq)
+int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 center_freq)
{
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
const u8 fct_mac = rtwdev->chip->txpwr_factor_mac;
+ char *p = buf, *end = buf + bufsz;
int ret;
s32 cfg;
u8 fct;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
if (src == RTW89_SAR_SOURCE_NONE) {
- seq_puts(m, "no SAR is applied\n");
- return;
+ p += scnprintf(p, end - p, "no SAR is applied\n");
+ goto out;
}
- seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source);
+ p += scnprintf(p, end - p, "source: %d (%s)\n", src,
+ sar_hdl->descr_sar_source);
ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg);
if (ret) {
- seq_printf(m, "config: return code: %d\n", ret);
- seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n",
- RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac));
- return;
+ p += scnprintf(p, end - p, "config: return code: %d\n", ret);
+ p += scnprintf(p, end - p,
+ "assign: max setting: %d (unit: 1/%lu dBm)\n",
+ RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac));
+ goto out;
}
fct = sar_hdl->txpwr_factor_sar;
- seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct));
+ p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg,
+ BIT(fct));
+
+out:
+ return p - buf;
}
-void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev)
+int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz)
{
struct rtw89_tas_info *tas = &rtwdev->tas;
+ char *p = buf, *end = buf + bufsz;
- if (!tas->enable) {
- seq_puts(m, "no TAS is applied\n");
- return;
+ if (!rtw89_tas_is_active(rtwdev)) {
+ p += scnprintf(p, end - p, "no TAS is applied\n");
+ goto out;
}
- seq_printf(m, "DPR gap: %d\n", tas->dpr_gap);
- seq_printf(m, "TAS delta: %d\n", tas->delta);
+ p += scnprintf(p, end - p, "State: %s\n",
+ rtw89_tas_state_str(tas->state));
+ p += scnprintf(p, end - p, "Average time: %d\n",
+ tas->window_size * 2);
+ p += scnprintf(p, end - p, "SAR gap: %d dBm\n",
+ RTW89_TAS_SAR_GAP >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR gap: %d dBm\n",
+ RTW89_TAS_DPR_GAP >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR ON offset: %d dBm\n",
+ RTW89_TAS_DPR_ON_OFFSET >> RTW89_TAS_FACTOR);
+ p += scnprintf(p, end - p, "DPR OFF offset: %d dBm\n",
+ RTW89_TAS_DPR_OFF_OFFSET >> RTW89_TAS_FACTOR);
+
+out:
+ return p - buf;
}
static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
const struct rtw89_sar_cfg_common *sar)
{
enum rtw89_sar_sources src;
- int ret = 0;
- mutex_lock(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
src = rtwdev->sar.src;
if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) {
rtw89_warn(rtwdev, "SAR source: %d is in use", src);
- ret = -EBUSY;
- goto exit;
+ return -EBUSY;
}
rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
rtw89_core_set_chip_txpwr(rtwdev);
+ rtw89_tas_reset(rtwdev, false);
-exit:
- mutex_unlock(&rtwdev->mutex);
- return ret;
+ return 0;
}
static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = {
@@ -279,6 +336,8 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
s32 power;
u32 i, idx;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
if (sar->type != NL80211_SAR_TYPE_POWER)
return -EINVAL;
@@ -304,65 +363,174 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
return rtw89_apply_sar_common(rtwdev, &sar_common);
}
-static void rtw89_tas_state_update(struct rtw89_dev *rtwdev)
+static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
const enum rtw89_sar_sources src = rtwdev->sar.src;
/* its members are protected by rtw89_sar_set_src() */
const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
- struct rtw89_tas_info *tas = &rtwdev->tas;
- s32 txpwr_avg = tas->total_txpwr / RTW89_TAS_MAX_WINDOW / PERCENT;
- s32 dpr_on_threshold, dpr_off_threshold, cfg;
- enum rtw89_tas_state state = tas->state;
- const struct rtw89_chan *chan;
int ret;
- lockdep_assert_held(&rtwdev->mutex);
-
if (src == RTW89_SAR_SOURCE_NONE)
- return;
+ return false;
- chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
- ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg);
+ ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg);
if (ret)
+ return false;
+
+ *cfg = rtw89_txpwr_sar_to_tas(sar_hdl, *cfg);
+
+ return true;
+}
+
+static void rtw89_tas_state_update(struct rtw89_dev *rtwdev,
+ enum rtw89_tas_state state)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (tas->state == state)
return;
- cfg = rtw89_txpwr_sar_to_tas(sar_hdl, cfg);
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n",
+ rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state));
- if (tas->delta >= cfg) {
+ tas->state = state;
+ rtw89_core_set_chip_txpwr(rtwdev);
+}
+
+static u32 rtw89_tas_get_window_size(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+
+ switch (regd) {
+ default:
rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "TAS delta exceed SAR limit\n");
- state = RTW89_TAS_STATE_DPR_FORBID;
- goto out;
+ "tas: regd: %u is unhandled\n", regd);
+ fallthrough;
+ case RTW89_IC:
+ case RTW89_KCC:
+ return 180;
+ case RTW89_FCC:
+ switch (band) {
+ case RTW89_BAND_2G:
+ return 50;
+ case RTW89_BAND_5G:
+ return 30;
+ case RTW89_BAND_6G:
+ default:
+ return 15;
+ }
+ break;
+ }
+}
+
+static void rtw89_tas_window_update(struct rtw89_dev *rtwdev)
+{
+ u32 window_size = rtw89_tas_get_window_size(rtwdev);
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 total_txpwr = 0;
+ u8 head_idx;
+ u32 i, j;
+
+ WARN_ON_ONCE(tas->window_size > RTW89_TAS_TXPWR_WINDOW);
+
+ if (tas->window_size == window_size)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: window update: %u -> %u\n",
+ tas->window_size, window_size);
+
+ head_idx = (tas->txpwr_tail_idx - window_size + 1 + RTW89_TAS_TXPWR_WINDOW) %
+ RTW89_TAS_TXPWR_WINDOW;
+ for (i = 0; i < window_size; i++) {
+ j = (head_idx + i) % RTW89_TAS_TXPWR_WINDOW;
+ total_txpwr += tas->txpwr_history[j];
+ }
+
+ tas->window_size = window_size;
+ tas->total_txpwr = total_txpwr;
+ tas->txpwr_head_idx = head_idx;
+}
+
+static void rtw89_tas_history_update(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, RTW89_PHY_0);
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u8 tx_ratio = env->ifs_clm_tx_ratio;
+ u64 instant_txpwr, txpwr;
+
+ /* txpwr in unit of linear(mW) multiply by percentage */
+ if (tx_ratio == 0) {
+ /* special case: idle tx power
+ * use -40 dBm * 100 tx ratio
+ */
+ instant_txpwr = rtw89_db_to_linear(-40);
+ txpwr = instant_txpwr * 100;
+ } else {
+ instant_txpwr = tas->instant_txpwr;
+ txpwr = instant_txpwr * tx_ratio;
}
- dpr_on_threshold = cfg;
- dpr_off_threshold = cfg - tas->dpr_gap;
+ tas->total_txpwr += txpwr - tas->txpwr_history[tas->txpwr_head_idx];
+ tas->total_tx_ratio += tx_ratio - tas->tx_ratio_history[tas->tx_ratio_idx];
+ tas->tx_ratio_history[tas->tx_ratio_idx] = tx_ratio;
+
+ tas->txpwr_head_idx = (tas->txpwr_head_idx + 1) % RTW89_TAS_TXPWR_WINDOW;
+ tas->txpwr_tail_idx = (tas->txpwr_tail_idx + 1) % RTW89_TAS_TXPWR_WINDOW;
+ tas->tx_ratio_idx = (tas->tx_ratio_idx + 1) % RTW89_TAS_TX_RATIO_WINDOW;
+ tas->txpwr_history[tas->txpwr_tail_idx] = txpwr;
+
rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "DPR_ON thold: %d, DPR_OFF thold: %d, txpwr_avg: %d\n",
- dpr_on_threshold, dpr_off_threshold, txpwr_avg);
+ "tas: instant_txpwr: %d, tx_ratio: %u, txpwr: %d\n",
+ rtw89_linear_to_db_quarter(instant_txpwr), tx_ratio,
+ rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT)));
+}
- if (txpwr_avg >= dpr_on_threshold)
+static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 dpr_on_threshold, dpr_off_threshold;
+ enum rtw89_tas_state state;
+ u16 tx_ratio_avg;
+ s32 txpwr_avg;
+ u64 linear;
+
+ linear = DIV_ROUND_DOWN_ULL(tas->total_txpwr, tas->window_size * PERCENT);
+ txpwr_avg = rtw89_linear_to_db_quarter(linear);
+ tx_ratio_avg = tas->total_tx_ratio / RTW89_TAS_TX_RATIO_WINDOW;
+ dpr_on_threshold = tas->dpr_on_threshold;
+ dpr_off_threshold = tas->dpr_off_threshold;
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: DPR_ON: %d, DPR_OFF: %d, txpwr_avg: %d, tx_ratio_avg: %u\n",
+ dpr_on_threshold, dpr_off_threshold, txpwr_avg, tx_ratio_avg);
+
+ if (tx_ratio_avg >= RTW89_TAS_TX_RATIO_THRESHOLD)
+ state = RTW89_TAS_STATE_STATIC_SAR;
+ else if (txpwr_avg >= dpr_on_threshold)
state = RTW89_TAS_STATE_DPR_ON;
else if (txpwr_avg < dpr_off_threshold)
state = RTW89_TAS_STATE_DPR_OFF;
-
-out:
- if (tas->state == state)
+ else
return;
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "TAS old state: %d, new state: %d\n", tas->state, state);
- tas->state = state;
- rtw89_core_set_chip_txpwr(rtwdev);
+ rtw89_tas_state_update(rtwdev, state);
}
void rtw89_tas_init(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_tas_info *tas = &rtwdev->tas;
struct rtw89_acpi_dsm_result res = {};
int ret;
u8 val;
+ if (!chip->support_tas)
+ return;
+
ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_SAR,
@@ -386,64 +554,116 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n");
return;
}
-
- tas->dpr_gap = RTW89_TAS_DPR_GAP;
- tas->delta = RTW89_TAS_DELTA;
}
-void rtw89_tas_reset(struct rtw89_dev *rtwdev)
+void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0);
struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear;
+ s32 cfg;
+ int i;
- if (!tas->enable)
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
return;
- memset(&tas->txpwr_history, 0, sizeof(tas->txpwr_history));
- tas->total_txpwr = 0;
- tas->cur_idx = 0;
+ tas->dpr_on_threshold = cfg - RTW89_TAS_SAR_GAP;
+ tas->dpr_off_threshold = cfg - RTW89_TAS_SAR_GAP - RTW89_TAS_DPR_GAP;
+
+ /* avoid history reset after new SAR apply */
+ if (!force && tas->keep_history)
+ return;
+
+ linear = rtw89_db_quarter_to_linear(cfg) * RTW89_TAS_DFLT_TX_RATIO;
+ for (i = 0; i < RTW89_TAS_TXPWR_WINDOW; i++)
+ tas->txpwr_history[i] = linear;
+
+ for (i = 0; i < RTW89_TAS_TX_RATIO_WINDOW; i++)
+ tas->tx_ratio_history[i] = RTW89_TAS_DFLT_TX_RATIO;
+
+ tas->total_tx_ratio = RTW89_TAS_DFLT_TX_RATIO * RTW89_TAS_TX_RATIO_WINDOW;
+ tas->total_txpwr = linear * RTW89_TAS_TXPWR_WINDOW;
+ tas->window_size = RTW89_TAS_TXPWR_WINDOW;
+ tas->txpwr_head_idx = 0;
+ tas->txpwr_tail_idx = RTW89_TAS_TXPWR_WINDOW - 1;
+ tas->tx_ratio_idx = 0;
tas->state = RTW89_TAS_STATE_DPR_OFF;
-}
+ tas->backup_state = RTW89_TAS_STATE_DPR_OFF;
+ tas->keep_history = true;
-static const struct rtw89_reg_def txpwr_regs[] = {
- {R_PATH0_TXPWR, B_PATH0_TXPWR},
- {R_PATH1_TXPWR, B_PATH1_TXPWR},
-};
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: band: %u, freq: %u\n", chan->band_type, chan->freq);
+}
void rtw89_tas_track(struct rtw89_dev *rtwdev)
{
- struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
- const enum rtw89_sar_sources src = rtwdev->sar.src;
- u8 max_nss_num = rtwdev->chip->rf_path_num;
struct rtw89_tas_info *tas = &rtwdev->tas;
- s16 tmp, txpwr, instant_txpwr = 0;
- u32 val;
- int i;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ s32 cfg;
- if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS))
return;
- if (env->ccx_watchdog_result != RTW89_PHY_ENV_MON_IFS_CLM)
+ if (!rtw89_tas_is_active(rtwdev))
return;
- for (i = 0; i < max_nss_num; i++) {
- val = rtw89_phy_read32_mask(rtwdev, txpwr_regs[i].addr,
- txpwr_regs[i].mask);
- tmp = sign_extend32(val, 8);
- if (tmp <= 0)
- return;
- instant_txpwr += tmp;
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) {
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ return;
}
- instant_txpwr /= max_nss_num;
- /* in unit of 0.25 dBm multiply by percentage */
- txpwr = instant_txpwr * env->ifs_clm_tx_ratio;
- tas->total_txpwr += txpwr - tas->txpwr_history[tas->cur_idx];
- tas->txpwr_history[tas->cur_idx] = txpwr;
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "instant_txpwr: %d, tx_ratio: %d, txpwr: %d\n",
- instant_txpwr, env->ifs_clm_tx_ratio, txpwr);
+ if (tas->pause)
+ return;
- tas->cur_idx = (tas->cur_idx + 1) % RTW89_TAS_MAX_WINDOW;
+ rtw89_tas_window_update(rtwdev);
+ rtw89_tas_history_update(rtwdev);
+ rtw89_tas_rolling_average(rtwdev);
+}
- rtw89_tas_state_update(rtwdev);
+void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 cfg;
+
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
+ return;
+
+ if (start) {
+ tas->backup_state = tas->state;
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ } else {
+ rtw89_tas_state_update(rtwdev, tas->backup_state);
+ }
+}
+
+void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state)
+{
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ s32 cfg;
+
+ if (!rtw89_tas_is_active(rtwdev))
+ return;
+
+ if (!rtw89_tas_query_sar_config(rtwdev, &cfg))
+ return;
+
+ switch (state) {
+ case RTW89_CHANCTX_STATE_MCC_START:
+ tas->pause = true;
+ rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR);
+ break;
+ case RTW89_CHANCTX_STATE_MCC_STOP:
+ tas->pause = false;
+ break;
+ default:
+ break;
+ }
}
+EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 4ae081d2d3b4..0df1661db9a8 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -19,12 +19,16 @@ struct rtw89_sar_handler {
extern const struct cfg80211_sar_capa rtw89_sar_capa;
s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq);
-void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq);
-void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev);
+int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
+ u32 center_freq);
+int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz);
int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
void rtw89_tas_init(struct rtw89_dev *rtwdev);
-void rtw89_tas_reset(struct rtw89_dev *rtwdev);
+void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
void rtw89_tas_track(struct rtw89_dev *rtwdev);
+void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
+void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
+ enum rtw89_chanctx_state state);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 26a944d3b672..0740e303680c 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -156,9 +156,9 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt)
rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
ser_st_name(ser), ser_ev_name(ser, evt));
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(rtwdev->hw->wiphy);
rtw89_leave_lps(rtwdev);
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ser->st_tbl[ser->state].st_func(ser, evt);
}
@@ -483,10 +483,13 @@ static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
{
struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
switch (evt) {
case SER_EV_STATE_IN:
- cancel_delayed_work_sync(&rtwdev->track_work);
+ wiphy_lock(wiphy);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
+ wiphy_unlock(wiphy);
drv_stop_tx(ser);
if (hal_stop_dma(ser)) {
@@ -517,8 +520,8 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
hal_enable_dma(ser);
drv_resume_rx(ser);
drv_resume_tx(ser);
- ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
- RTW89_TRACK_WORK_PERIOD);
+ wiphy_delayed_work_queue(wiphy, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
break;
default:
@@ -708,9 +711,9 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
switch (evt) {
case SER_EV_STATE_IN:
- mutex_lock(&rtwdev->mutex);
+ wiphy_lock(rtwdev->hw->wiphy);
ser_l2_reset_st_pre_hdl(ser);
- mutex_unlock(&rtwdev->mutex);
+ wiphy_unlock(rtwdev->hw->wiphy);
ieee80211_restart_hw(rtwdev->hw);
ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c
index e71956ce9853..073714db26f2 100644
--- a/drivers/net/wireless/realtek/rtw89/util.c
+++ b/drivers/net/wireless/realtek/rtw89/util.c
@@ -4,103 +4,159 @@
#include "util.h"
-#define FRAC_ROWS 3
-#define FRAC_ROW_MAX (FRAC_ROWS - 1)
-#define NORM_ROW_MIN FRAC_ROWS
-
-static const u32 db_invert_table[12][8] = {
- /* rows 0~2 in unit of U(32,3) */
- {10, 13, 16, 20, 25, 32, 40, 50},
- {64, 80, 101, 128, 160, 201, 256, 318},
- {401, 505, 635, 800, 1007, 1268, 1596, 2010},
- /* rows 3~11 in unit of U(32,0) */
- {316, 398, 501, 631, 794, 1000, 1259, 1585},
- {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000},
- {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098},
- {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107},
- {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886},
- {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254,
- 15848932},
- {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823,
- 100000000},
- {125892541, 158489319, 199526232, 251188643, 316227766, 398107171,
- 501187234, 630957345},
- {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U,
- 3162277660U, 3981071706U},
+#define RTW89_DBM_QUARTER_FACTOR 2
+#define RTW89_MIN_DBM (-41.25 * (1 << RTW89_DBM_QUARTER_FACTOR))
+#define RTW89_MAX_DBM (96 * (1 << RTW89_DBM_QUARTER_FACTOR))
+#define RTW89_DB_INVERT_TABLE_OFFSET (-RTW89_MIN_DBM)
+
+static const u64 db_invert_table[] = {
+ /* in unit of 0.000001 */
+ 75, 79, 84, 89, 94, 100, 106, 112, 119, 126, 133, 141, 150, 158, 168, 178, 188,
+ 200, 211, 224, 237, 251, 266, 282, 299, 316, 335, 355, 376, 398, 422, 447, 473,
+ 501, 531, 562, 596, 631, 668, 708, 750, 794, 841, 891, 944, 1000, 1059, 1122, 1189,
+ 1259, 1334, 1413, 1496, 1585, 1679, 1778, 1884, 1995, 2113, 2239, 2371, 2512, 2661,
+ 2818, 2985, 3162, 3350, 3548, 3758, 3981, 4217, 4467, 4732, 5012, 5309, 5623, 5957,
+ 6310, 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, 10593, 11220, 11885, 12589,
+ 13335, 14125, 14962, 15849, 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
+ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, 42170, 44668, 47315, 50119,
+ 53088, 56234, 59566, 63096, 66834, 70795, 74989, 79433, 84140, 89125, 94406, 100000,
+ 105925, 112202, 118850, 125893, 133352, 141254, 149624, 158489, 167880, 177828,
+ 188365, 199526, 211349, 223872, 237137, 251189, 266073, 281838, 298538, 316228,
+ 334965, 354813, 375837, 398107, 421697, 446684, 473151, 501187, 530884, 562341,
+ 595662, 630957, 668344, 707946, 749894, 794328, 841395, 891251, 944061, 1000000,
+ 1059254, 1122018, 1188502, 1258925, 1333521, 1412538, 1496236, 1584893, 1678804,
+ 1778279, 1883649, 1995262, 2113489, 2238721, 2371374, 2511886, 2660725, 2818383,
+ 2985383, 3162278, 3349654, 3548134, 3758374, 3981072, 4216965, 4466836, 4731513,
+ 5011872, 5308844, 5623413, 5956621, 6309573, 6683439, 7079458, 7498942, 7943282,
+ 8413951, 8912509, 9440609, 10000000, 10592537, 11220185, 11885022, 12589254,
+ 13335214, 14125375, 14962357, 15848932, 16788040, 17782794, 18836491, 19952623,
+ 21134890, 22387211, 23713737, 25118864, 26607251, 28183829, 29853826, 31622777,
+ 33496544, 35481339, 37583740, 39810717, 42169650, 44668359, 47315126, 50118723,
+ 53088444, 56234133, 59566214, 63095734, 66834392, 70794578, 74989421, 79432823,
+ 84139514, 89125094, 94406088, 100000000, 105925373, 112201845, 118850223, 125892541,
+ 133352143, 141253754, 149623566, 158489319, 167880402, 177827941, 188364909, 199526231,
+ 211348904, 223872114, 237137371, 251188643, 266072506, 281838293, 298538262, 316227766,
+ 334965439, 354813389, 375837404, 398107171, 421696503, 446683592, 473151259, 501187234,
+ 530884444, 562341325, 595662144, 630957344, 668343918, 707945784, 749894209, 794328235,
+ 841395142, 891250938, 944060876, 1000000000, 1059253725, 1122018454, 1188502227,
+ 1258925412, 1333521432, 1412537545, 1496235656, 1584893192, 1678804018, 1778279410,
+ 1883649089, 1995262315, 2113489040, 2238721139, 2371373706, 2511886432, 2660725060,
+ 2818382931, 2985382619, 3162277660, 3349654392, 3548133892, 3758374043, 3981071706,
+ 4216965034, 4466835922ULL, 4731512590ULL, 5011872336ULL, 5308844442ULL, 5623413252ULL,
+ 5956621435ULL, 6309573445ULL, 6683439176ULL, 7079457844ULL, 7498942093ULL,
+ 7943282347ULL, 8413951416ULL, 8912509381ULL, 9440608763ULL, 10000000000ULL,
+ 10592537252ULL, 11220184543ULL, 11885022274ULL, 12589254118ULL, 13335214322ULL,
+ 14125375446ULL, 14962356561ULL, 15848931925ULL, 16788040181ULL, 17782794100ULL,
+ 18836490895ULL, 19952623150ULL, 21134890398ULL, 22387211386ULL, 23713737057ULL,
+ 25118864315ULL, 26607250598ULL, 28183829313ULL, 29853826189ULL, 31622776602ULL,
+ 33496543916ULL, 35481338923ULL, 37583740429ULL, 39810717055ULL, 42169650343ULL,
+ 44668359215ULL, 47315125896ULL, 50118723363ULL, 53088444423ULL, 56234132519ULL,
+ 59566214353ULL, 63095734448ULL, 66834391757ULL, 70794578438ULL, 74989420933ULL,
+ 79432823472ULL, 84139514165ULL, 89125093813ULL, 94406087629ULL, 100000000000ULL,
+ 105925372518ULL, 112201845430ULL, 118850222744ULL, 125892541179ULL, 133352143216ULL,
+ 141253754462ULL, 149623565609ULL, 158489319246ULL, 167880401812ULL, 177827941004ULL,
+ 188364908949ULL, 199526231497ULL, 211348903984ULL, 223872113857ULL, 237137370566ULL,
+ 251188643151ULL, 266072505980ULL, 281838293126ULL, 298538261892ULL, 316227766017ULL,
+ 334965439158ULL, 354813389234ULL, 375837404288ULL, 398107170553ULL, 421696503429ULL,
+ 446683592151ULL, 473151258961ULL, 501187233627ULL, 530884444231ULL, 562341325190ULL,
+ 595662143529ULL, 630957344480ULL, 668343917569ULL, 707945784384ULL, 749894209332ULL,
+ 794328234724ULL, 841395141645ULL, 891250938134ULL, 944060876286ULL, 1000000000000ULL,
+ 1059253725177ULL, 1122018454302ULL, 1188502227437ULL, 1258925411794ULL,
+ 1333521432163ULL, 1412537544623ULL, 1496235656094ULL, 1584893192461ULL,
+ 1678804018123ULL, 1778279410039ULL, 1883649089490ULL, 1995262314969ULL,
+ 2113489039837ULL, 2238721138568ULL, 2371373705662ULL, 2511886431510ULL,
+ 2660725059799ULL, 2818382931264ULL, 2985382618918ULL, 3162277660168ULL,
+ 3349654391578ULL, 3548133892336ULL, 3758374042884ULL, 3981071705535ULL,
+ 4216965034286ULL, 4466835921510ULL, 4731512589615ULL, 5011872336273ULL,
+ 5308844442310ULL, 5623413251904ULL, 5956621435290ULL, 6309573444802ULL,
+ 6683439175686ULL, 7079457843841ULL, 7498942093325ULL, 7943282347243ULL,
+ 8413951416452ULL, 8912509381337ULL, 9440608762859ULL, 10000000000000ULL,
+ 10592537251773ULL, 11220184543020ULL, 11885022274370ULL, 12589254117942ULL,
+ 13335214321633ULL, 14125375446228ULL, 14962356560944ULL, 15848931924611ULL,
+ 16788040181226ULL, 17782794100389ULL, 18836490894898ULL, 19952623149689ULL,
+ 21134890398367ULL, 22387211385683ULL, 23713737056617ULL, 25118864315096ULL,
+ 26607250597988ULL, 28183829312645ULL, 29853826189180ULL, 31622776601684ULL,
+ 33496543915783ULL, 35481338923358ULL, 37583740428845ULL, 39810717055350ULL,
+ 42169650342858ULL, 44668359215096ULL, 47315125896148ULL, 50118723362727ULL,
+ 53088444423099ULL, 56234132519035ULL, 59566214352901ULL, 63095734448019ULL,
+ 66834391756862ULL, 70794578438414ULL, 74989420933246ULL, 79432823472428ULL,
+ 84139514164520ULL, 89125093813375ULL, 94406087628593ULL, 100000000000000ULL,
+ 105925372517729ULL, 112201845430197ULL, 118850222743702ULL, 125892541179417ULL,
+ 133352143216332ULL, 141253754462276ULL, 149623565609444ULL, 158489319246111ULL,
+ 167880401812256ULL, 177827941003893ULL, 188364908948981ULL, 199526231496888ULL,
+ 211348903983664ULL, 223872113856834ULL, 237137370566166ULL, 251188643150958ULL,
+ 266072505979882ULL, 281838293126446ULL, 298538261891796ULL, 316227766016838ULL,
+ 334965439157829ULL, 354813389233577ULL, 375837404288444ULL, 398107170553497ULL,
+ 421696503428583ULL, 446683592150964ULL, 473151258961482ULL, 501187233627272ULL,
+ 530884444230989ULL, 562341325190350ULL, 595662143529011ULL, 630957344480196ULL,
+ 668343917568615ULL, 707945784384138ULL, 749894209332456ULL, 794328234724284ULL,
+ 841395141645198ULL, 891250938133745ULL, 944060876285923ULL, 1000000000000000ULL,
+ 1059253725177290ULL, 1122018454301970ULL, 1188502227437020ULL, 1258925411794170ULL,
+ 1333521432163330ULL, 1412537544622760ULL, 1496235656094440ULL, 1584893192461110ULL,
+ 1678804018122560ULL, 1778279410038920ULL, 1883649089489810ULL, 1995262314968890ULL,
+ 2113489039836650ULL, 2238721138568340ULL, 2371373705661660ULL, 2511886431509590ULL,
+ 2660725059798820ULL, 2818382931264460ULL, 2985382618917960ULL, 3162277660168380ULL,
+ 3349654391578280ULL, 3548133892335770ULL, 3758374042884440ULL, 3981071705534970ULL
};
-u32 rtw89_linear_2_db(u64 val)
+s32 rtw89_linear_to_db_quarter(u64 val)
{
- u8 i, j;
- u32 dB;
-
- for (i = 0; i < 12; i++) {
- for (j = 0; j < 8; j++) {
- if (i <= FRAC_ROW_MAX &&
- (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j])
- goto cnt;
- else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j])
- goto cnt;
- }
- }
+ int r = ARRAY_SIZE(db_invert_table) - 1;
+ int l = 0;
+ int m;
- return 96; /* maximum 96 dB */
+ while (l <= r) {
+ m = l + (r - l) / 2;
-cnt:
- /* special cases */
- if (j == 0 && i == 0)
- goto end;
+ if (db_invert_table[m] == val)
+ return m - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
- if (i == NORM_ROW_MIN && j == 0) {
- if (db_invert_table[NORM_ROW_MIN][0] - val >
- val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) {
- i = FRAC_ROW_MAX;
- j = 7;
- }
- goto end;
+ if (db_invert_table[m] > val)
+ r = m - 1;
+ else
+ l = m + 1;
}
- if (i <= FRAC_ROW_MAX)
- val <<= RTW89_LINEAR_FRAC_BITS;
-
- /* compare difference to get precise dB */
- if (j == 0) {
- if (db_invert_table[i][j] - val >
- val - db_invert_table[i - 1][7]) {
- i--;
- j = 7;
- }
- } else {
- if (db_invert_table[i][j] - val >
- val - db_invert_table[i][j - 1]) {
- j--;
- }
- }
-end:
- dB = (i << 3) + j + 1;
+ if (l >= ARRAY_SIZE(db_invert_table))
+ return RTW89_MAX_DBM;
+ else if (r < 0)
+ return RTW89_MIN_DBM;
+ else if (val - db_invert_table[r] <= db_invert_table[l] - val)
+ return r - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
+ else
+ return l - (s32)RTW89_DB_INVERT_TABLE_OFFSET;
+}
+EXPORT_SYMBOL(rtw89_linear_to_db_quarter);
- return dB;
+s32 rtw89_linear_to_db(u64 val)
+{
+ return rtw89_linear_to_db_quarter(val) >> RTW89_DBM_QUARTER_FACTOR;
}
-EXPORT_SYMBOL(rtw89_linear_2_db);
+EXPORT_SYMBOL(rtw89_linear_to_db);
-u64 rtw89_db_2_linear(u32 db)
+u64 rtw89_db_quarter_to_linear(s32 db)
{
- u64 linear;
- u8 i, j;
+ /* supported range -41.25 to 96 dBm, in unit of 0.25 dBm */
+ db = clamp_t(s32, db, RTW89_MIN_DBM, RTW89_MAX_DBM);
+ db += (s32)RTW89_DB_INVERT_TABLE_OFFSET;
- if (db > 96)
- db = 96;
- else if (db < 1)
- return 1;
+ return db_invert_table[db];
+}
+EXPORT_SYMBOL(rtw89_db_quarter_to_linear);
- i = (db - 1) >> 3;
- j = (db - 1) & 0x7;
+u64 rtw89_db_to_linear(s32 db)
+{
+ return rtw89_db_quarter_to_linear(db << RTW89_DBM_QUARTER_FACTOR);
+}
+EXPORT_SYMBOL(rtw89_db_to_linear);
- linear = db_invert_table[i][j];
+void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used)
+{
+ static const char ellipsis[] = "...";
- if (i >= NORM_ROW_MIN)
- linear = linear << RTW89_LINEAR_FRAC_BITS;
+ /* length of null terminiator isn't included in 'used' */
+ if (used + 1 < size || size < sizeof(ellipsis))
+ return;
- return linear;
+ memcpy(buf + size - sizeof(ellipsis), ellipsis, sizeof(ellipsis));
}
-EXPORT_SYMBOL(rtw89_db_2_linear);
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e669544cafd3..bd08495301e4 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -6,13 +6,11 @@
#include "core.h"
-#define RTW89_LINEAR_FRAC_BITS 3
-
#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
-/* call this function with rtwdev->mutex is held */
+/* call this function with wiphy mutex is held */
#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
@@ -25,7 +23,7 @@ static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif;
- lockdep_assert_held(&rtwdev->mutex);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
rtw89_for_each_rtwvif(rtwdev, rtwvif)
if (rtwvif == new)
@@ -75,7 +73,10 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask)
}
}
-u32 rtw89_linear_2_db(u64 linear);
-u64 rtw89_db_2_linear(u32 db);
+s32 rtw89_linear_to_db_quarter(u64 val);
+s32 rtw89_linear_to_db(u64 val);
+u64 rtw89_db_quarter_to_linear(s32 db);
+u64 rtw89_db_to_linear(s32 db);
+void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 01754d031bb4..17eee58503cb 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -604,6 +604,8 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key;
u8 sz;
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
cipher_info = rtw89_cipher_alg_recognize(cipher);
sz = struct_size(rekey_conf, key, cipher_info->len);
rekey_conf = kmalloc(sz, GFP_KERNEL);
@@ -616,15 +618,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
memcpy(rekey_conf->key, gtk,
flex_array_size(rekey_conf, key, cipher_info->len));
- /* ieee80211_gtk_rekey_add() will call set_key(), therefore we
- * need to unlock mutex
- */
- mutex_unlock(&rtwdev->mutex);
if (ieee80211_vif_is_mld(wow_vif))
key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id);
else
key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
- mutex_lock(&rtwdev->mutex);
kfree(rekey_conf);
if (IS_ERR(key)) {
diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h
index ccadfdd6873c..79edaef20881 100644
--- a/drivers/net/wireless/silabs/wfx/bus.h
+++ b/drivers/net/wireless/silabs/wfx/bus.h
@@ -28,6 +28,7 @@ struct wfx_hwbus_ops {
void (*lock)(void *bus_priv);
void (*unlock)(void *bus_priv);
size_t (*align_size)(void *bus_priv, size_t size);
+ void (*set_wakeup)(void *priv, bool enabled);
};
extern struct sdio_driver wfx_sdio_driver;
diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
index f290eecde773..ab0793b9908f 100644
--- a/drivers/net/wireless/silabs/wfx/bus_sdio.c
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
@@ -14,6 +14,7 @@
#include <linux/of_irq.h>
#include <linux/irq.h>
#include <linux/align.h>
+#include <linux/pm.h>
#include "bus.h"
#include "wfx.h"
@@ -172,6 +173,13 @@ static size_t wfx_sdio_align_size(void *priv, size_t size)
return sdio_align_size(bus->func, size);
}
+static void wfx_sdio_set_wakeup(void *priv, bool enabled)
+{
+ struct wfx_sdio_priv *bus = priv;
+
+ device_set_wakeup_enable(&bus->func->dev, enabled);
+}
+
static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
.copy_from_io = wfx_sdio_copy_from_io,
.copy_to_io = wfx_sdio_copy_to_io,
@@ -180,6 +188,7 @@ static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = {
.lock = wfx_sdio_lock,
.unlock = wfx_sdio_unlock,
.align_size = wfx_sdio_align_size,
+ .set_wakeup = wfx_sdio_set_wakeup,
};
static const struct of_device_id wfx_sdio_of_match[] = {
@@ -191,9 +200,48 @@ static const struct of_device_id wfx_sdio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wfx_sdio_of_match);
+static int wfx_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+ int ret;
+
+ if (!device_may_wakeup(dev))
+ return 0;
+
+ flush_work(&bus->core->hif.bh);
+ /* Either "wakeup-source" attribute or out-of-band IRQ is required for
+ * WoWLAN
+ */
+ if (bus->of_irq) {
+ ret = enable_irq_wake(bus->of_irq);
+ if (ret)
+ return ret;
+ } else {
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ return ret;
+ }
+ return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+}
+
+static int wfx_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wfx_sdio_priv *bus = sdio_get_drvdata(func);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ if (bus->of_irq)
+ return disable_irq_wake(bus->of_irq);
+ else
+ return 0;
+}
+
static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
{
const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev);
+ mmc_pm_flag_t pm_flag = sdio_get_host_pm_caps(func);
struct device_node *np = func->dev.of_node;
struct wfx_sdio_priv *bus;
int ret;
@@ -235,6 +283,9 @@ static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *i
if (ret)
goto sdio_release;
+ if (pm_flag & MMC_PM_KEEP_POWER)
+ device_set_wakeup_capable(&func->dev, true);
+
return 0;
sdio_release:
@@ -261,6 +312,8 @@ static const struct sdio_device_id wfx_sdio_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids);
+static DEFINE_SIMPLE_DEV_PM_OPS(wfx_sdio_pm_ops, wfx_sdio_suspend, wfx_sdio_resume);
+
struct sdio_driver wfx_sdio_driver = {
.name = "wfx-sdio",
.id_table = wfx_sdio_ids,
@@ -268,5 +321,6 @@ struct sdio_driver wfx_sdio_driver = {
.remove = wfx_sdio_remove,
.drv = {
.of_match_table = wfx_sdio_of_match,
+ .pm = &wfx_sdio_pm_ops,
}
};
diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c
index 160b90114aad..45ee19e1ecbf 100644
--- a/drivers/net/wireless/silabs/wfx/bus_spi.c
+++ b/drivers/net/wireless/silabs/wfx/bus_spi.c
@@ -13,6 +13,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/pm.h>
#include "bus.h"
#include "wfx.h"
@@ -179,6 +180,13 @@ static size_t wfx_spi_align_size(void *priv, size_t size)
return ALIGN(size, 4);
}
+static void wfx_spi_set_wakeup(void *priv, bool enabled)
+{
+ struct wfx_spi_priv *bus = priv;
+
+ device_set_wakeup_enable(&bus->func->dev, enabled);
+}
+
static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = {
.copy_from_io = wfx_spi_copy_from_io,
.copy_to_io = wfx_spi_copy_to_io,
@@ -187,8 +195,29 @@ static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = {
.lock = wfx_spi_lock,
.unlock = wfx_spi_unlock,
.align_size = wfx_spi_align_size,
+ .set_wakeup = wfx_spi_set_wakeup,
};
+static int wfx_spi_suspend(struct device *dev)
+{
+ struct spi_device *func = to_spi_device(dev);
+ struct wfx_spi_priv *bus = spi_get_drvdata(func);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ flush_work(&bus->core->hif.bh);
+ return enable_irq_wake(func->irq);
+}
+
+static int wfx_spi_resume(struct device *dev)
+{
+ struct spi_device *func = to_spi_device(dev);
+
+ if (!device_may_wakeup(dev))
+ return 0;
+ return disable_irq_wake(func->irq);
+}
+
static int wfx_spi_probe(struct spi_device *func)
{
struct wfx_platform_data *pdata;
@@ -239,7 +268,12 @@ static int wfx_spi_probe(struct spi_device *func)
if (!bus->core)
return -EIO;
- return wfx_probe(bus->core);
+ ret = wfx_probe(bus->core);
+ if (ret)
+ return ret;
+
+ device_set_wakeup_capable(&func->dev, true);
+ return 0;
}
static void wfx_spi_remove(struct spi_device *func)
@@ -273,12 +307,15 @@ static const struct of_device_id wfx_spi_of_match[] = {
MODULE_DEVICE_TABLE(of, wfx_spi_of_match);
#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(wfx_spi_pm_ops, wfx_spi_suspend, wfx_spi_resume);
+
struct spi_driver wfx_spi_driver = {
+ .id_table = wfx_spi_id,
+ .probe = wfx_spi_probe,
+ .remove = wfx_spi_remove,
.driver = {
.name = "wfx-spi",
.of_match_table = of_match_ptr(wfx_spi_of_match),
+ .pm = &wfx_spi_pm_ops,
},
- .id_table = wfx_spi_id,
- .probe = wfx_spi_probe,
- .remove = wfx_spi_remove,
};
diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c
index 64441c8bc460..a61128debbad 100644
--- a/drivers/net/wireless/silabs/wfx/main.c
+++ b/drivers/net/wireless/silabs/wfx/main.c
@@ -121,6 +121,12 @@ static const struct ieee80211_iface_combination wfx_iface_combinations[] = {
}
};
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support wfx_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
static const struct ieee80211_ops wfx_ops = {
.start = wfx_start,
.stop = wfx_stop,
@@ -153,6 +159,11 @@ static const struct ieee80211_ops wfx_ops = {
.unassign_vif_chanctx = wfx_unassign_vif_chanctx,
.remain_on_channel = wfx_remain_on_channel,
.cancel_remain_on_channel = wfx_cancel_remain_on_channel,
+#ifdef CONFIG_PM
+ .suspend = wfx_suspend,
+ .resume = wfx_resume,
+ .set_wakeup = wfx_set_wakeup,
+#endif
};
bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
@@ -289,6 +300,9 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
+#ifdef CONFIG_PM
+ hw->wiphy->wowlan = &wfx_wowlan_support;
+#endif
hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->max_remain_on_channel_duration = 5000;
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 7c04810dbf3d..e95b9ded17d9 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -10,6 +10,7 @@
#include "sta.h"
#include "wfx.h"
+#include "bus.h"
#include "fwio.h"
#include "bh.h"
#include "key.h"
@@ -803,6 +804,30 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
}
+#ifdef CONFIG_PM
+int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+ /* FIXME: hardware also support WIPHY_WOWLAN_MAGIC_PKT and other filters */
+ if (!wowlan->any || !wowlan->disconnect)
+ return -EINVAL;
+ return 0;
+}
+
+int wfx_resume(struct ieee80211_hw *hw)
+{
+ return 0;
+}
+
+void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct wfx_dev *wdev = hw->priv;
+
+ if (enabled)
+ dev_info(wdev->dev, "support for WoWLAN is experimental\n");
+ wdev->hwbus_ops->set_wakeup(wdev->hwbus_priv, enabled);
+}
+#endif
+
int wfx_start(struct ieee80211_hw *hw)
{
return 0;
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index 7817c7c6f3dd..8702eed5267f 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -56,6 +56,9 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *conf);
+int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int wfx_resume(struct ieee80211_hw *hw);
+void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled);
/* Hardware API Callbacks */
void wfx_cooling_timeout_work(struct work_struct *work);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index cf6a331d4042..cf3e976471c6 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2024 Intel Corporation
+ * Copyright (C) 2018 - 2025 Intel Corporation
*/
/*
@@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (sta && sta->mlo) {
- if (WARN_ON(!link_sta)) {
- ieee80211_free_txskb(hw, skb);
- return;
- }
+ /* Do address translations only between shared links. It is
+ * possible that while an non-AP MLD station and an AP MLD
+ * station have shared links, the frame is intended to be sent
+ * on a link which is not shared (for example when sending a
+ * probe response).
+ */
+ if (sta && sta->mlo && link_sta) {
/* address translation to link addresses on TX */
ether_addr_copy(hdr->addr1, link_sta->addr);
ether_addr_copy(hdr->addr2, bss_conf->addr);
@@ -5345,6 +5347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, STRICT);
if (param->mlo) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
@@ -5548,10 +5551,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
for (i = 0; i < ARRAY_SIZE(data->link_data); i++) {
- hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_SOFT);
- data->link_data[i].beacon_timer.function =
- mac80211_hwsim_beacon;
+ hrtimer_setup(&data->link_data[i].beacon_timer, mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
data->link_data[i].link_id = i;
}
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 4ee374080466..1fffeff2190c 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -146,7 +146,7 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
static const struct {
u8 tag;
u8 len;
- u8 ssid[8];
+ u8 ssid[8] __nonstring;
} __packed ssid = {
.tag = WLAN_EID_SSID,
.len = VIRT_WIFI_SSID_LEN,
@@ -519,11 +519,13 @@ static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb)
}
/* Called with rtnl lock held. */
-static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int virt_wifi_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+ struct net *link_net = rtnl_newlink_link_net(params);
+ struct nlattr **tb = params->tb;
int err;
if (!tb[IFLA_LINK])
@@ -532,7 +534,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
netif_carrier_off(dev);
priv->upperdev = dev;
- priv->lowerdev = __dev_get_by_index(src_net,
+ priv->lowerdev = __dev_get_by_index(link_net,
nla_get_u32(tb[IFLA_LINK]));
if (!priv->lowerdev)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index f90c33d19b39..9653dbaac3c0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -21,7 +21,7 @@
struct zd_reg_alpha2_map {
u32 reg;
- char alpha2[2];
+ char alpha2[2] __nonstring;
};
static struct zd_reg_alpha2_map reg_alpha2_map[] = {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 829515a601b3..530a3ea47a1a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -1381,24 +1381,20 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
/* The phase is set to power off. */
ipc_imem->phase = IPC_P_OFF;
- hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+ hrtimer_setup(&ipc_imem->startup_timer, ipc_imem_startup_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+ hrtimer_setup(&ipc_imem->tdupdate_timer, ipc_imem_td_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+ hrtimer_setup(&ipc_imem->fast_update_timer, ipc_imem_fast_update_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_setup(&ipc_imem->td_alloc_timer, ipc_imem_td_alloc_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+ hrtimer_setup(&ipc_imem->adb_timer, ipc_imem_adb_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 7a9c09cd4fdc..6a7a26085fc7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -41,6 +41,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <net/gro.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 02f2ec7cf4ce..8bf63f2dcbbf 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -32,7 +32,6 @@
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_wakeup.h>
#include <linux/spinlock.h>
#include "t7xx_mhccif.h"
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index a51e2755991a..63a47d420bc5 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -967,15 +967,18 @@ out:
return dev;
}
-static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+static int wwan_rtnl_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
- u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
struct wwan_netdev_priv *priv = netdev_priv(dev);
+ struct nlattr **data = params->data;
+ u32 link_id;
int ret;
+ link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+
if (IS_ERR(wwandev))
return PTR_ERR(wwandev);
@@ -1061,6 +1064,11 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
{
struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
struct nlattr *data[IFLA_WWAN_MAX + 1];
+ struct rtnl_newlink_params params = {
+ .src_net = &init_net,
+ .tb = tb,
+ .data = data,
+ };
struct net_device *dev;
struct nlmsghdr *nlh;
struct sk_buff *msg;
@@ -1105,7 +1113,7 @@ static void wwan_create_default_link(struct wwan_device *wwandev,
if (WARN_ON(IS_ERR(dev)))
goto unlock;
- if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
+ if (WARN_ON(wwan_rtnl_newlink(dev, &params, NULL))) {
free_netdev(dev);
goto unlock;
}
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 8aeca7914050..1c1c74f4ff2d 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -284,8 +284,7 @@ static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
- hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- pp->timer.function = pp_timer_func;
+ hrtimer_setup(&pp->timer, pp_timer_func, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return pp;
}
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index a002ea6fdd84..ee478ccde7c6 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -167,7 +167,7 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
(u64) s * 512, (u64) num * 512);
/* this isn't an error as the hardware will still throw an exception */
- if (badblocks_set(bb, s, num, 1))
+ if (!badblocks_set(bb, s, num, 1))
dev_info_once(bb->dev, "%s: failed for sector %llx\n",
__func__, (u64) s);
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 5ca06e9a2d29..cc5c8f3f81e8 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -673,7 +673,7 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
{
if (bb->count) {
sector_t first_bad;
- int num_bad;
+ sector_t num_bad;
return !!badblocks_check(bb, sector, len / 512, &first_bad,
&num_bad);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cfdfe0eaa512..8f3e816e805d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -367,9 +367,10 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
struct nd_namespace_common *ndns = nd_pfn->ndns;
void *zero_page = page_address(ZERO_PAGE(0));
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- int num_bad, meta_num, rc, bb_present;
+ int meta_num, rc, bb_present;
sector_t first_bad, meta_start;
struct nd_namespace_io *nsio;
+ sector_t num_bad;
if (nd_pfn->mode != PFN_MODE_PMEM)
return 0;
@@ -394,7 +395,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
- dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
+ dev_dbg(&nd_pfn->dev, "meta: %llx badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
@@ -413,7 +414,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
}
if (rc) {
dev_err(&nd_pfn->dev,
- "error clearing %x badblocks at %llx\n",
+ "error clearing %llx badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index d81faa9d89c9..43156e1576c9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -249,7 +249,7 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT;
struct badblocks *bb = &pmem->bb;
sector_t first_bad;
- int num_bad;
+ sector_t num_bad;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig
index 244432e0b73d..da963e4f3f1f 100644
--- a/drivers/nvme/common/Kconfig
+++ b/drivers/nvme/common/Kconfig
@@ -12,3 +12,4 @@ config NVME_AUTH
select CRYPTO_SHA512
select CRYPTO_DH
select CRYPTO_DH_RFC7919_GROUPS
+ select CRYPTO_HKDF
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 9b7126e1a19d..2c092ec8c0a9 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -11,9 +11,12 @@
#include <linux/unaligned.h>
#include <crypto/hash.h>
#include <crypto/dh.h>
+#include <crypto/hkdf.h>
#include <linux/nvme.h>
#include <linux/nvme-auth.h>
+#define HKDF_MAX_HASHLEN 64
+
static u32 nvme_dhchap_seqnum;
static DEFINE_MUTEX(nvme_dhchap_mutex);
@@ -471,5 +474,339 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
}
EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
+/**
+ * nvme_auth_generate_psk - Generate a PSK for TLS
+ * @hmac_id: Hash function identifier
+ * @skey: Session key
+ * @skey_len: Length of @skey
+ * @c1: Value of challenge C1
+ * @c2: Value of challenge C2
+ * @hash_len: Hash length of the hash algorithm
+ * @ret_psk: Pointer too the resulting generated PSK
+ * @ret_len: length of @ret_psk
+ *
+ * Generate a PSK for TLS as specified in NVMe base specification, section
+ * 8.13.5.9: Generated PSK for TLS
+ *
+ * The generated PSK for TLS shall be computed applying the HMAC function
+ * using the hash function H( ) selected by the HashID parameter in the
+ * DH-HMAC-CHAP_Challenge message with the session key KS as key to the
+ * concatenation of the two challenges C1 and C2 (i.e., generated
+ * PSK = HMAC(KS, C1 || C2)).
+ *
+ * Returns 0 on success with a valid generated PSK pointer in @ret_psk and
+ * the length of @ret_psk in @ret_len, or a negative error number otherwise.
+ */
+int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len)
+{
+ struct crypto_shash *tfm;
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u8 *psk;
+ const char *hmac_name;
+ int ret, psk_len;
+
+ if (!c1 || !c2)
+ return -EINVAL;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ psk_len = crypto_shash_digestsize(tfm);
+ psk = kzalloc(psk_len, GFP_KERNEL);
+ if (!psk) {
+ ret = -ENOMEM;
+ goto out_free_tfm;
+ }
+
+ shash->tfm = tfm;
+ ret = crypto_shash_setkey(tfm, skey, skey_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_update(shash, c1, hash_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_update(shash, c2, hash_len);
+ if (ret)
+ goto out_free_psk;
+
+ ret = crypto_shash_final(shash, psk);
+ if (!ret) {
+ *ret_psk = psk;
+ *ret_len = psk_len;
+ }
+
+out_free_psk:
+ if (ret)
+ kfree_sensitive(psk);
+out_free_tfm:
+ crypto_free_shash(tfm);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_psk);
+
+/**
+ * nvme_auth_generate_digest - Generate TLS PSK digest
+ * @hmac_id: Hash function identifier
+ * @psk: Generated input PSK
+ * @psk_len: Length of @psk
+ * @subsysnqn: NQN of the subsystem
+ * @hostnqn: NQN of the host
+ * @ret_digest: Pointer to the returned digest
+ *
+ * Generate a TLS PSK digest as specified in TP8018 Section 3.6.1.3:
+ * TLS PSK and PSK identity Derivation
+ *
+ * The PSK digest shall be computed by encoding in Base64 (refer to RFC
+ * 4648) the result of the application of the HMAC function using the hash
+ * function specified in item 4 above (ie the hash function of the cipher
+ * suite associated with the PSK identity) with the PSK as HMAC key to the
+ * concatenation of:
+ * - the NQN of the host (i.e., NQNh) not including the null terminator;
+ * - a space character;
+ * - the NQN of the NVM subsystem (i.e., NQNc) not including the null
+ * terminator;
+ * - a space character; and
+ * - the seventeen ASCII characters "NVMe-over-Fabrics"
+ * (i.e., <PSK digest> = Base64(HMAC(PSK, NQNh || " " || NQNc || " " ||
+ * "NVMe-over-Fabrics"))).
+ * The length of the PSK digest depends on the hash function used to compute
+ * it as follows:
+ * - If the SHA-256 hash function is used, the resulting PSK digest is 44
+ * characters long; or
+ * - If the SHA-384 hash function is used, the resulting PSK digest is 64
+ * characters long.
+ *
+ * Returns 0 on success with a valid digest pointer in @ret_digest, or a
+ * negative error number on failure.
+ */
+int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
+ char *subsysnqn, char *hostnqn, u8 **ret_digest)
+{
+ struct crypto_shash *tfm;
+ SHASH_DESC_ON_STACK(shash, tfm);
+ u8 *digest, *enc;
+ const char *hmac_name;
+ size_t digest_len, hmac_len;
+ int ret;
+
+ if (WARN_ON(!subsysnqn || !hostnqn))
+ return -EINVAL;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+
+ switch (nvme_auth_hmac_hash_len(hmac_id)) {
+ case 32:
+ hmac_len = 44;
+ break;
+ case 48:
+ hmac_len = 64;
+ break;
+ default:
+ pr_warn("%s: invalid hash algorithm '%s'\n",
+ __func__, hmac_name);
+ return -EINVAL;
+ }
+
+ enc = kzalloc(hmac_len + 1, GFP_KERNEL);
+ if (!enc)
+ return -ENOMEM;
+
+ tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out_free_enc;
+ }
+
+ digest_len = crypto_shash_digestsize(tfm);
+ digest = kzalloc(digest_len, GFP_KERNEL);
+ if (!digest) {
+ ret = -ENOMEM;
+ goto out_free_tfm;
+ }
+
+ shash->tfm = tfm;
+ ret = crypto_shash_setkey(tfm, psk, psk_len);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_init(shash);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn));
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, " ", 1);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn));
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18);
+ if (ret)
+ goto out_free_digest;
+
+ ret = crypto_shash_final(shash, digest);
+ if (ret)
+ goto out_free_digest;
+
+ ret = base64_encode(digest, digest_len, enc);
+ if (ret < hmac_len) {
+ ret = -ENOKEY;
+ goto out_free_digest;
+ }
+ *ret_digest = enc;
+ ret = 0;
+
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_tfm:
+ crypto_free_shash(tfm);
+out_free_enc:
+ if (ret)
+ kfree_sensitive(enc);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
+
+/**
+ * nvme_auth_derive_tls_psk - Derive TLS PSK
+ * @hmac_id: Hash function identifier
+ * @psk: generated input PSK
+ * @psk_len: size of @psk
+ * @psk_digest: TLS PSK digest
+ * @ret_psk: Pointer to the resulting TLS PSK
+ *
+ * Derive a TLS PSK as specified in TP8018 Section 3.6.1.3:
+ * TLS PSK and PSK identity Derivation
+ *
+ * The TLS PSK shall be derived as follows from an input PSK
+ * (i.e., either a retained PSK or a generated PSK) and a PSK
+ * identity using the HKDF-Extract and HKDF-Expand-Label operations
+ * (refer to RFC 5869 and RFC 8446) where the hash function is the
+ * one specified by the hash specifier of the PSK identity:
+ * 1. PRK = HKDF-Extract(0, Input PSK); and
+ * 2. TLS PSK = HKDF-Expand-Label(PRK, "nvme-tls-psk", PskIdentityContext, L),
+ * where PskIdentityContext is the hash identifier indicated in
+ * the PSK identity concatenated to a space character and to the
+ * Base64 PSK digest (i.e., "<hash> <PSK digest>") and L is the
+ * output size in bytes of the hash function (i.e., 32 for SHA-256
+ * and 48 for SHA-384).
+ *
+ * Returns 0 on success with a valid psk pointer in @ret_psk or a negative
+ * error number otherwise.
+ */
+int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
+ u8 *psk_digest, u8 **ret_psk)
+{
+ struct crypto_shash *hmac_tfm;
+ const char *hmac_name;
+ const char *psk_prefix = "tls13 nvme-tls-psk";
+ static const char default_salt[HKDF_MAX_HASHLEN];
+ size_t info_len, prk_len;
+ char *info;
+ unsigned char *prk, *tls_key;
+ int ret;
+
+ hmac_name = nvme_auth_hmac_name(hmac_id);
+ if (!hmac_name) {
+ pr_warn("%s: invalid hash algorithm %d\n",
+ __func__, hmac_id);
+ return -EINVAL;
+ }
+ if (hmac_id == NVME_AUTH_HASH_SHA512) {
+ pr_warn("%s: unsupported hash algorithm %s\n",
+ __func__, hmac_name);
+ return -EINVAL;
+ }
+
+ hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0);
+ if (IS_ERR(hmac_tfm))
+ return PTR_ERR(hmac_tfm);
+
+ prk_len = crypto_shash_digestsize(hmac_tfm);
+ prk = kzalloc(prk_len, GFP_KERNEL);
+ if (!prk) {
+ ret = -ENOMEM;
+ goto out_free_shash;
+ }
+
+ if (WARN_ON(prk_len > HKDF_MAX_HASHLEN)) {
+ ret = -EINVAL;
+ goto out_free_prk;
+ }
+ ret = hkdf_extract(hmac_tfm, psk, psk_len,
+ default_salt, prk_len, prk);
+ if (ret)
+ goto out_free_prk;
+
+ ret = crypto_shash_setkey(hmac_tfm, prk, prk_len);
+ if (ret)
+ goto out_free_prk;
+
+ /*
+ * 2 addtional bytes for the length field from HDKF-Expand-Label,
+ * 2 addtional bytes for the HMAC ID, and one byte for the space
+ * separator.
+ */
+ info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
+ info = kzalloc(info_len + 1, GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out_free_prk;
+ }
+
+ put_unaligned_be16(psk_len, info);
+ memcpy(info + 2, psk_prefix, strlen(psk_prefix));
+ sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
+
+ tls_key = kzalloc(psk_len, GFP_KERNEL);
+ if (!tls_key) {
+ ret = -ENOMEM;
+ goto out_free_info;
+ }
+ ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
+ if (ret) {
+ kfree(tls_key);
+ goto out_free_info;
+ }
+ *ret_psk = tls_key;
+
+out_free_info:
+ kfree(info);
+out_free_prk:
+ kfree(prk);
+out_free_shash:
+ crypto_free_shash(hmac_tfm);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_derive_tls_psk);
+
MODULE_DESCRIPTION("NVMe Authentication framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c
index ed5167f942d8..32d16c53133b 100644
--- a/drivers/nvme/common/keyring.c
+++ b/drivers/nvme/common/keyring.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <linux/key.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/nvme.h>
@@ -124,6 +123,70 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring,
return key_ref_to_ptr(keyref);
}
+/**
+ * nvme_tls_psk_refresh - Refresh TLS PSK
+ * @keyring: Keyring holding the TLS PSK
+ * @hostnqn: Host NQN to use
+ * @subnqn: Subsystem NQN to use
+ * @hmac_id: Hash function identifier
+ * @data: TLS PSK key material
+ * @data_len: Length of @data
+ * @digest: TLS PSK digest
+ *
+ * Refresh a generated version 1 TLS PSK with the identity generated
+ * from @hmac_id, @hostnqn, @subnqn, and @digest in the keyring given
+ * by @keyring.
+ *
+ * Returns the updated key success or an error pointer otherwise.
+ */
+struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, const char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest)
+{
+ key_perm_t keyperm =
+ KEY_POS_SEARCH | KEY_POS_VIEW | KEY_POS_READ |
+ KEY_POS_WRITE | KEY_POS_LINK | KEY_POS_SETATTR |
+ KEY_USR_SEARCH | KEY_USR_VIEW | KEY_USR_READ;
+ char *identity;
+ key_ref_t keyref;
+ key_serial_t keyring_id;
+ struct key *key;
+
+ if (!hostnqn || !subnqn || !data || !data_len)
+ return ERR_PTR(-EINVAL);
+
+ identity = kasprintf(GFP_KERNEL, "NVMe1G%02d %s %s %s",
+ hmac_id, hostnqn, subnqn, digest);
+ if (!identity)
+ return ERR_PTR(-ENOMEM);
+
+ if (!keyring)
+ keyring = nvme_keyring;
+ keyring_id = key_serial(keyring);
+ pr_debug("keyring %x refresh tls psk '%s'\n",
+ keyring_id, identity);
+ keyref = key_create_or_update(make_key_ref(keyring, true),
+ "psk", identity, data, data_len,
+ keyperm, KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_BUILT_IN |
+ KEY_ALLOC_BYPASS_RESTRICTION);
+ if (IS_ERR(keyref)) {
+ pr_debug("refresh tls psk '%s' failed, error %ld\n",
+ identity, PTR_ERR(keyref));
+ kfree(identity);
+ return ERR_PTR(-ENOKEY);
+ }
+ kfree(identity);
+ /*
+ * Set the default timeout to 1 hour
+ * as suggested in TP8018.
+ */
+ key = key_ref_to_ptr(keyref);
+ key_set_timeout(key, 3600);
+ return key;
+}
+EXPORT_SYMBOL_GPL(nvme_tls_psk_refresh);
+
/*
* NVMe PSK priority list
*
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 486afe598184..10e453b2436e 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -109,7 +109,7 @@ config NVME_HOST_AUTH
bool "NVMe over Fabrics In-Band Authentication in host side"
depends on NVME_CORE
select NVME_AUTH
- select NVME_KEYRING if NVME_TCP_TLS
+ select NVME_KEYRING
help
This provides support for NVMe over Fabrics In-Band Authentication in
host side.
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a060f69558e7..b1fddfa33ab9 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -221,7 +221,7 @@ static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
return APPLE_ANS_MAX_QUEUE_DEPTH;
}
-static void apple_nvme_rtkit_crashed(void *cookie)
+static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
{
struct apple_nvme *anv = cookie;
@@ -525,7 +525,7 @@ static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
if (!iod->sg)
return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+ iod->nents = blk_rq_map_sg(req, iod->sg);
if (!iod->nents)
goto out_free_sg;
@@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 5ea0e21709da..6115fef74c1e 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -12,6 +12,7 @@
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-auth.h>
+#include <linux/nvme-keyring.h>
#define CHAP_BUF_SIZE 4096
static struct kmem_cache *nvme_chap_buf_cache;
@@ -131,7 +132,13 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
data->auth_type = NVME_AUTH_COMMON_MESSAGES;
data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
data->t_id = cpu_to_le16(chap->transaction);
- data->sc_c = 0; /* No secure channel concatenation */
+ if (ctrl->opts->concat && chap->qid == 0) {
+ if (ctrl->opts->tls_key)
+ data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
+ else
+ data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
+ } else
+ data->sc_c = NVME_AUTH_SECP_NOSC;
data->napd = 1;
data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
data->auth_protocol[0].dhchap.halen = 3;
@@ -311,8 +318,9 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
data->hl = chap->hash_len;
data->dhvlen = cpu_to_le16(chap->host_key_len);
memcpy(data->rval, chap->response, chap->hash_len);
- if (ctrl->ctrl_key) {
+ if (ctrl->ctrl_key)
chap->bi_directional = true;
+ if (ctrl->ctrl_key || ctrl->opts->concat) {
get_random_bytes(chap->c2, chap->hash_len);
data->cvalid = 1;
memcpy(data->rval + chap->hash_len, chap->c2,
@@ -322,7 +330,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- chap->s2 = nvme_auth_get_seqnum();
+ if (ctrl->opts->concat)
+ chap->s2 = 0;
+ else
+ chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
@@ -677,6 +688,92 @@ static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
crypto_free_kpp(chap->dh_tfm);
}
+void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
+{
+ dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
+ key_serial(ctrl->opts->tls_key));
+ key_revoke(ctrl->opts->tls_key);
+ key_put(ctrl->opts->tls_key);
+ ctrl->opts->tls_key = NULL;
+}
+EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
+
+static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
+ struct nvme_dhchap_queue_context *chap)
+{
+ u8 *psk, *digest, *tls_psk;
+ struct key *tls_key;
+ size_t psk_len;
+ int ret = 0;
+
+ if (!chap->sess_key) {
+ dev_warn(ctrl->device,
+ "%s: qid %d no session key negotiated\n",
+ __func__, chap->qid);
+ return -ENOKEY;
+ }
+
+ if (chap->qid) {
+ dev_warn(ctrl->device,
+ "qid %d: secure concatenation not supported on I/O queues\n",
+ chap->qid);
+ return -EINVAL;
+ }
+ ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
+ chap->sess_key_len,
+ chap->c1, chap->c2,
+ chap->hash_len, &psk, &psk_len);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to generate PSK, error %d\n",
+ __func__, chap->qid, ret);
+ return ret;
+ }
+ dev_dbg(ctrl->device,
+ "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
+
+ ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
+ ctrl->opts->subsysnqn,
+ ctrl->opts->host->nqn, &digest);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to generate digest, error %d\n",
+ __func__, chap->qid, ret);
+ goto out_free_psk;
+ };
+ dev_dbg(ctrl->device, "%s: generated digest %s\n",
+ __func__, digest);
+ ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
+ digest, &tls_psk);
+ if (ret) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to derive TLS psk, error %d\n",
+ __func__, chap->qid, ret);
+ goto out_free_digest;
+ };
+
+ tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
+ ctrl->opts->host->nqn,
+ ctrl->opts->subsysnqn, chap->hash_id,
+ tls_psk, psk_len, digest);
+ if (IS_ERR(tls_key)) {
+ ret = PTR_ERR(tls_key);
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to insert generated key, error %d\n",
+ __func__, chap->qid, ret);
+ tls_key = NULL;
+ }
+ kfree_sensitive(tls_psk);
+ if (ctrl->opts->tls_key)
+ nvme_auth_revoke_tls_key(ctrl);
+ ctrl->opts->tls_key = tls_key;
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_psk:
+ kfree_sensitive(psk);
+ return ret;
+}
+
static void nvme_queue_auth_work(struct work_struct *work)
{
struct nvme_dhchap_queue_context *chap =
@@ -833,6 +930,13 @@ static void nvme_queue_auth_work(struct work_struct *work)
}
if (!ret) {
chap->error = 0;
+ if (ctrl->opts->concat &&
+ (ret = nvme_auth_secure_concat(ctrl, chap))) {
+ dev_warn(ctrl->device,
+ "%s: qid %d failed to enable secure concatenation\n",
+ __func__, chap->qid);
+ chap->error = ret;
+ }
return;
}
@@ -912,6 +1016,11 @@ static void nvme_ctrl_auth_work(struct work_struct *work)
"qid 0: authentication failed\n");
return;
}
+ /*
+ * Only run authentication on the admin queue for secure concatenation.
+ */
+ if (ctrl->opts->concat)
+ return;
for (q = 1; q < ctrl->queue_count; q++) {
ret = nvme_auth_negotiate(ctrl, q);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f028913e2e62..777db89fdaa7 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)
static inline void __nvme_end_req(struct request *req)
{
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
+ if (blk_rq_is_passthrough(req))
+ nvme_log_err_passthru(req);
+ else
+ nvme_log_error(req);
+ }
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
- if (blk_rq_is_passthrough(req))
- nvme_log_err_passthru(req);
- else
- nvme_log_error(req);
- }
__nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -4018,6 +4018,9 @@ static void nvme_ns_remove(struct nvme_ns *ns)
if (!nvme_ns_head_multipath(ns->head))
nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+
+ nvme_mpath_remove_sysfs_link(ns);
+
del_gendisk(ns->disk);
mutex_lock(&ns->ctrl->namespaces_lock);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 432efcbf9e2f..93e9041b9657 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -472,8 +472,9 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
result = le32_to_cpu(res.u32);
ctrl->cntlid = result & 0xFFFF;
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
- /* Secure concatenation is not implemented */
- if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ /* Check for secure concatenation */
+ if ((result & NVME_CONNECT_AUTHREQ_ASCR) &&
+ !ctrl->opts->concat) {
dev_warn(ctrl->device,
"qid 0: secure concatenation is not supported\n");
ret = -EOPNOTSUPP;
@@ -550,7 +551,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
/* Secure concatenation is not implemented */
if (result & NVME_CONNECT_AUTHREQ_ASCR) {
dev_warn(ctrl->device,
- "qid 0: secure concatenation is not supported\n");
+ "qid %d: secure concatenation is not supported\n", qid);
ret = -EOPNOTSUPP;
goto out_free_data;
}
@@ -706,6 +707,7 @@ static const match_table_t opt_tokens = {
#endif
#ifdef CONFIG_NVME_TCP_TLS
{ NVMF_OPT_TLS, "tls" },
+ { NVMF_OPT_CONCAT, "concat" },
#endif
{ NVMF_OPT_ERR, NULL }
};
@@ -735,6 +737,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->tls = false;
opts->tls_key = NULL;
opts->keyring = NULL;
+ opts->concat = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -1053,6 +1056,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tls = true;
break;
+ case NVMF_OPT_CONCAT:
+ if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) {
+ pr_err("TLS is not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->concat = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -1079,6 +1090,23 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
opts->fast_io_fail_tmo, ctrl_loss_tmo);
}
+ if (opts->concat) {
+ if (opts->tls) {
+ pr_err("Secure concatenation over TLS is not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (opts->tls_key) {
+ pr_err("Cannot specify a TLS key for secure concatenation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!opts->dhchap_secret) {
+ pr_err("Need to enable DH-CHAP for secure concatenation\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
opts->host = nvmf_host_add(hostnqn, &hostid);
if (IS_ERR(opts->host)) {
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 21d75dc4a3a0..9cf5b020adba 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -66,6 +66,7 @@ enum {
NVMF_OPT_TLS = 1 << 25,
NVMF_OPT_KEYRING = 1 << 26,
NVMF_OPT_TLS_KEY = 1 << 27,
+ NVMF_OPT_CONCAT = 1 << 28,
};
/**
@@ -101,6 +102,7 @@ enum {
* @keyring: Keyring to use for key lookups
* @tls_key: TLS key for encrypted connections (TCP)
* @tls: Start TLS encrypted connections (TCP)
+ * @concat: Enabled Secure channel concatenation (TCP)
* @disable_sqflow: disable controller sq flow control
* @hdr_digest: generate/verify header digest (TCP)
* @data_digest: generate/verify data digest (TCP)
@@ -130,6 +132,7 @@ struct nvmf_ctrl_options {
struct key *keyring;
struct key *tls_key;
bool tls;
+ bool concat;
bool disable_sqflow;
bool hdr_digest;
bool data_digest;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b9929a5a7f4e..2257c3c96dd2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2571,7 +2571,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
if (ret)
return -ENOMEM;
- op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+ op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl);
WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, rq_dma_dir(rq));
@@ -2858,7 +2858,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
unsigned int nr_io_queues;
int ret;
- nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(),
ctrl->lport->ops->max_hw_queues);
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
@@ -2912,7 +2912,7 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
unsigned int nr_io_queues;
int ret;
- nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
+ nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(),
ctrl->lport->ops->max_hw_queues);
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret) {
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 98a0750c0cda..ecf136489044 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -129,8 +129,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata)
- return -EINVAL;
+ if (!supports_metadata) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
@@ -140,8 +142,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
- return -EINVAL;
+ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd,
iou_issue_flags);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2a7635565083..6b12ca80aa27 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -686,6 +686,8 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
kblockd_schedule_work(&head->partition_scan_work);
}
+ nvme_mpath_add_sysfs_link(ns->head);
+
mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
int node, srcu_idx;
@@ -768,6 +770,25 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
if (nvme_state_is_live(ns->ana_state) &&
nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
nvme_mpath_set_live(ns);
+ else {
+ /*
+ * Add sysfs link from multipath head gendisk node to path
+ * device gendisk node.
+ * If path's ana state is live (i.e. state is either optimized
+ * or non-optimized) while we alloc the ns then sysfs link would
+ * be created from nvme_mpath_set_live(). In that case we would
+ * not fallthrough this code path. However for the path's ana
+ * state other than live, we call nvme_mpath_set_live() only
+ * after ana state transitioned to the live state. But we still
+ * want to create the sysfs link from head node to a path device
+ * irrespctive of the path's ana state.
+ * If we reach through here then it means that path's ana state
+ * is not live but still create the sysfs link to this path from
+ * head node if head node of the path has already come alive.
+ */
+ if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags))
+ nvme_mpath_add_sysfs_link(ns->head);
+ }
}
static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -955,6 +976,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
}
DEVICE_ATTR_RO(ana_state);
+static ssize_t queue_depth_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active));
+}
+DEVICE_ATTR_RO(queue_depth);
+
+static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int node, srcu_idx;
+ nodemask_t numa_nodes;
+ struct nvme_ns *current_ns;
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+ struct nvme_ns_head *head = ns->head;
+
+ if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA)
+ return 0;
+
+ nodes_clear(numa_nodes);
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ for_each_node(node) {
+ current_ns = srcu_dereference(head->current_path[node],
+ &head->srcu);
+ if (ns == current_ns)
+ node_set(node, numa_nodes);
+ }
+ srcu_read_unlock(&head->srcu, srcu_idx);
+
+ return sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&numa_nodes));
+}
+DEVICE_ATTR_RO(numa_nodes);
+
static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
@@ -967,6 +1027,84 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
return -ENXIO; /* just break out of the loop */
}
+void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head)
+{
+ struct device *target;
+ int rc, srcu_idx;
+ struct nvme_ns *ns;
+ struct kobject *kobj;
+
+ /*
+ * Ensure head disk node is already added otherwise we may get invalid
+ * kobj for head disk node
+ */
+ if (!test_bit(GD_ADDED, &head->disk->state))
+ return;
+
+ kobj = &disk_to_dev(head->disk)->kobj;
+
+ /*
+ * loop through each ns chained through the head->list and create the
+ * sysfs link from head node to the ns path node
+ */
+ srcu_idx = srcu_read_lock(&head->srcu);
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ /*
+ * Avoid creating link if it already exists for the given path.
+ * When path ana state transitions from optimized to non-
+ * optimized or vice-versa, the nvme_mpath_set_live() is
+ * invoked which in truns call this function. Now if the sysfs
+ * link already exists for the given path and we attempt to re-
+ * create the link then sysfs code would warn about it loudly.
+ * So we evaluate NVME_NS_SYSFS_ATTR_LINK flag here to ensure
+ * that we're not creating duplicate link.
+ * The test_and_set_bit() is used because it is protecting
+ * against multiple nvme paths being simultaneously added.
+ */
+ if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
+ continue;
+
+ /*
+ * Ensure that ns path disk node is already added otherwise we
+ * may get invalid kobj name for target
+ */
+ if (!test_bit(GD_ADDED, &ns->disk->state))
+ continue;
+
+ target = disk_to_dev(ns->disk);
+ /*
+ * Create sysfs link from head gendisk kobject @kobj to the
+ * ns path gendisk kobject @target->kobj.
+ */
+ rc = sysfs_add_link_to_group(kobj, nvme_ns_mpath_attr_group.name,
+ &target->kobj, dev_name(target));
+ if (unlikely(rc)) {
+ dev_err(disk_to_dev(ns->head->disk),
+ "failed to create link to %s\n",
+ dev_name(target));
+ clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
+ }
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+}
+
+void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
+{
+ struct device *target;
+ struct kobject *kobj;
+
+ if (!test_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags))
+ return;
+
+ target = disk_to_dev(ns->disk);
+ kobj = &disk_to_dev(ns->head->disk)->kobj;
+ sysfs_remove_link_from_group(kobj, nvme_ns_mpath_attr_group.name,
+ dev_name(target));
+ clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags);
+}
+
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7be92d07430e..51e078642127 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -534,10 +534,11 @@ struct nvme_ns {
struct nvme_ns_head *head;
unsigned long flags;
-#define NVME_NS_REMOVING 0
-#define NVME_NS_ANA_PENDING 2
-#define NVME_NS_FORCE_RO 3
-#define NVME_NS_READY 4
+#define NVME_NS_REMOVING 0
+#define NVME_NS_ANA_PENDING 2
+#define NVME_NS_FORCE_RO 3
+#define NVME_NS_READY 4
+#define NVME_NS_SYSFS_ATTR_LINK 5
struct cdev cdev;
struct device cdev_device;
@@ -933,6 +934,7 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_attr_groups[];
+extern const struct attribute_group nvme_ns_mpath_attr_group;
extern const struct pr_ops nvme_pr_ops;
extern const struct block_device_operations nvme_ns_head_ops;
extern const struct attribute_group nvme_dev_attrs_group;
@@ -955,6 +957,8 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns);
+void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns);
void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
@@ -980,6 +984,8 @@ static inline void nvme_trace_bio_complete(struct request *req)
extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
+extern struct device_attribute dev_attr_queue_depth;
+extern struct device_attribute dev_attr_numa_nodes;
extern struct device_attribute subsys_attr_iopolicy;
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
@@ -1009,6 +1015,12 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
}
+static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns)
+{
+}
+static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns)
+{
+}
static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
{
return false;
@@ -1147,6 +1159,7 @@ void nvme_auth_stop(struct nvme_ctrl *ctrl);
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
void nvme_auth_free(struct nvme_ctrl *ctrl);
+void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl);
#else
static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
@@ -1169,6 +1182,7 @@ static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
return -EPROTONOSUPPORT;
}
static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
+static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {};
#endif
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 950289405ef2..2883d17ee1eb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -812,7 +812,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
- iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl);
if (!iod->sgt.orig_nents)
goto out_free_sg;
@@ -953,9 +953,6 @@ out_free_cmd:
return ret;
}
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1130,8 +1127,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
- !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
- nvme_pci_complete_batch))
+ !blk_mq_add_to_batch(req, iob,
+ nvme_req(req)->status != NVME_SC_SUCCESS,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1411,9 +1409,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
+ /*
+ * Shutdown the device immediately if we see it is disconnected. This
+ * unblocks PCIe error handling if the nvme driver is waiting in
+ * error_resume for a device that has been removed. We can't unbind the
+ * driver while the driver's error callback is waiting to complete, so
+ * we're relying on a timeout to break that deadlock if a removal
+ * occurs while reset work is running.
+ */
+ if (pci_dev_is_disconnected(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;
@@ -1421,7 +1430,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* the recovery mechanism will surely fail.
*/
mb();
- if (pci_channel_offline(to_pci_dev(dev->dev)))
+ if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;
/*
@@ -1983,6 +1992,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Controllers may support a CMB size larger than their BAR, for
+ * example, due to being behind a bridge. Reduce the CMB to the
+ * reported size of the BAR
+ */
+ size = min(size, bar_size - offset);
+
+ if (!IS_ALIGNED(size, memremap_compat_align()) ||
+ !IS_ALIGNED(pci_resource_start(pdev, bar),
+ memremap_compat_align()))
+ return;
+
+ /*
* Tell the controller about the host side address mapping the CMB,
* and enable CMB decoding for the NVMe 1.4+ scheme:
*/
@@ -1992,17 +2013,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
dev->bar + NVME_REG_CMBMSC);
}
- /*
- * Controllers may support a CMB size larger than their BAR,
- * for example, due to being behind a bridge. Reduce the CMB to
- * the reported size of the BAR
- */
- if (size > bar_size - offset)
- size = bar_size - offset;
-
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
dev_warn(dev->ctrl.device,
"failed to register the CMB\n");
+ hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
return;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 86a2891d9bcc..b5a0295b5bf4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1476,8 +1476,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
if (ret)
return -ENOMEM;
- req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
- req->data_sgl.sg_table.sgl);
+ req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl);
*count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
req->data_sgl.nents, rq_dma_dir(rq));
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 3a41b9ab0f13..6d31226f7a4f 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -258,6 +258,8 @@ static struct attribute *nvme_ns_attrs[] = {
#ifdef CONFIG_NVME_MULTIPATH
&dev_attr_ana_grpid.attr,
&dev_attr_ana_state.attr,
+ &dev_attr_queue_depth.attr,
+ &dev_attr_numa_nodes.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL,
@@ -290,6 +292,10 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
return 0;
}
+ if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
+ return 0;
+ }
#endif
return a->mode;
}
@@ -299,8 +305,22 @@ static const struct attribute_group nvme_ns_attr_group = {
.is_visible = nvme_ns_attrs_are_visible,
};
+#ifdef CONFIG_NVME_MULTIPATH
+static struct attribute *nvme_ns_mpath_attrs[] = {
+ NULL,
+};
+
+const struct attribute_group nvme_ns_mpath_attr_group = {
+ .name = "multipath",
+ .attrs = nvme_ns_mpath_attrs,
+};
+#endif
+
const struct attribute_group *nvme_ns_attr_groups[] = {
&nvme_ns_attr_group,
+#ifdef CONFIG_NVME_MULTIPATH
+ &nvme_ns_mpath_attr_group,
+#endif
NULL,
};
@@ -780,10 +800,10 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_tls_key.attr &&
- !ctrl->opts->tls)
+ !ctrl->opts->tls && !ctrl->opts->concat)
return 0;
if (a == &dev_attr_tls_configured_key.attr &&
- !ctrl->opts->tls_key)
+ (!ctrl->opts->tls_key || ctrl->opts->concat))
return 0;
if (a == &dev_attr_tls_keyring.attr &&
!ctrl->opts->keyring)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 8a9131c95a3d..26c459f0198d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -8,7 +8,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -217,6 +216,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
+{
+ switch (type) {
+ case nvme_tcp_c2h_term:
+ case nvme_tcp_c2h_data:
+ case nvme_tcp_r2t:
+ case nvme_tcp_rsp:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Check if the queue is TLS encrypted
*/
@@ -236,7 +248,7 @@ static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl)
if (!IS_ENABLED(CONFIG_NVME_TCP_TLS))
return 0;
- return ctrl->opts->tls;
+ return ctrl->opts->tls || ctrl->opts->concat;
}
static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
@@ -775,7 +787,7 @@ static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
[NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
[NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
[NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
- [NVME_TCP_FES_R2T_LIMIT_EXCEEDED] = "R2T Limit Exceeded",
+ [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
[NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
};
@@ -818,6 +830,16 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
+ if (!nvme_tcp_recv_pdu_supported(hdr->type))
+ goto unsupported_pdu;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "pdu type %d has unexpected header length (%d)\n",
+ hdr->type, hdr->hlen);
+ return -EPROTO;
+ }
+
if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
/*
* C2HTermReq never includes Header or Data digests.
@@ -850,10 +872,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
+ goto unsupported_pdu;
}
+
+unsupported_pdu:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1495,11 +1520,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
- if (ret < sizeof(*icresp)) {
+ if (ret >= 0 && ret < sizeof(*icresp))
+ ret = -ECONNRESET;
+ if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
- if (ret >= 0)
- ret = -ECONNRESET;
goto free_icresp;
}
ret = -ENOTCONN;
@@ -1764,7 +1789,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid,
queue->cmnd_capsule_len = sizeof(struct nvme_command) +
NVME_TCP_ADMIN_CCSZ;
- ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
+ ret = sock_create_kern(current->nsproxy->net_ns,
+ ctrl->addr.ss_family, SOCK_STREAM,
IPPROTO_TCP, &queue->sock);
if (ret) {
dev_err(nctrl->device,
@@ -2034,7 +2060,7 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
if (nvme_tcp_tls_configured(ctrl)) {
if (ctrl->opts->tls_key)
pskid = key_serial(ctrl->opts->tls_key);
- else {
+ else if (ctrl->opts->tls) {
pskid = nvme_tls_psk_default(ctrl->opts->keyring,
ctrl->opts->host->nqn,
ctrl->opts->subsysnqn);
@@ -2064,9 +2090,25 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
int i, ret;
- if (nvme_tcp_tls_configured(ctrl) && !ctrl->tls_pskid) {
- dev_err(ctrl->device, "no PSK negotiated\n");
- return -ENOKEY;
+ if (nvme_tcp_tls_configured(ctrl)) {
+ if (ctrl->opts->concat) {
+ /*
+ * The generated PSK is stored in the
+ * fabric options
+ */
+ if (!ctrl->opts->tls_key) {
+ dev_err(ctrl->device, "no PSK generated\n");
+ return -ENOKEY;
+ }
+ if (ctrl->tls_pskid &&
+ ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) {
+ dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid);
+ ctrl->tls_pskid = 0;
+ }
+ } else if (!ctrl->tls_pskid) {
+ dev_err(ctrl->device, "no PSK negotiated\n");
+ return -ENOKEY;
+ }
}
for (i = 1; i < ctrl->queue_count; i++) {
@@ -2284,6 +2326,27 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
}
}
+/*
+ * The TLS key is set by secure concatenation after negotiation has been
+ * completed on the admin queue. We need to revoke the key when:
+ * - concatenation is enabled (otherwise it's a static key set by the user)
+ * and
+ * - the generated key is present in ctrl->tls_key (otherwise there's nothing
+ * to revoke)
+ * and
+ * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
+ * negotiation has not run).
+ *
+ * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called
+ * twice during secure concatenation, once on a 'normal' connection to run the
+ * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
+ * and once after the negotiation (which uses the key, so it _must_ be set).
+ */
+static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl)
+{
+ return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid;
+}
+
static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
{
struct nvmf_ctrl_options *opts = ctrl->opts;
@@ -2293,6 +2356,16 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
if (ret)
return ret;
+ if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) {
+ /* See comments for nvme_tcp_key_revoke_needed() */
+ dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n");
+ nvme_stop_keep_alive(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
+ ret = nvme_tcp_configure_admin_queue(ctrl, false);
+ if (ret)
+ return ret;
+ }
+
if (ctrl->icdoff) {
ret = -EOPNOTSUPP;
dev_err(ctrl->device, "icdoff is not supported!\n");
@@ -2389,6 +2462,8 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_tcp_ctrl, err_work);
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
@@ -2429,6 +2504,8 @@ static void nvme_reset_ctrl_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, reset_work);
int ret;
+ if (nvme_tcp_key_revoke_needed(ctrl))
+ nvme_auth_revoke_tls_key(ctrl);
nvme_stop_ctrl(ctrl);
nvme_tcp_teardown_ctrl(ctrl, false);
@@ -2699,6 +2776,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
@@ -2706,9 +2784,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
+ ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
+ return ret < 0 ? ret : queue->nr_cqe;
}
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
@@ -2924,7 +3002,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS |
- NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY,
+ NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 382949e18c6a..cce4c5b55aa9 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -146,17 +146,16 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL;
}
-static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
- struct nvme_ns_head *head,
+static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb,
void *data)
{
+ struct nvme_ns_head *head = ns->head;
struct blk_zone zone = { };
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
- dev_err(ctrl->device, "invalid zone type %#x\n",
- entry->zt);
+ dev_err(ns->ctrl->device, "invalid zone type %#x\n", entry->zt);
return -EINVAL;
}
@@ -213,8 +212,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
- ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
- &report->entries[i],
+ ret = nvme_zone_parse_entry(ns, &report->entries[i],
zone_idx, cb, data);
if (ret)
goto out_free;
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index b47d675232d2..0b0645ac5df4 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/random.h>
#include <linux/nvme-auth.h>
+#include <linux/nvme-keyring.h>
#include <linux/unaligned.h>
#include "nvmet.h"
@@ -139,7 +140,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id)
return ret;
}
-u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
int ret = 0;
struct nvmet_host_link *p;
@@ -165,6 +166,11 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
goto out_unlock;
}
+ if (nvmet_queue_tls_keyid(sq)) {
+ pr_debug("host %s tls enabled\n", ctrl->hostnqn);
+ goto out_unlock;
+ }
+
ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id);
if (ret < 0) {
pr_warn("Failed to setup DH group");
@@ -233,6 +239,9 @@ out_unlock:
void nvmet_auth_sq_free(struct nvmet_sq *sq)
{
cancel_delayed_work(&sq->auth_expired_work);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ sq->tls_key = 0;
+#endif
kfree(sq->dhchap_c1);
sq->dhchap_c1 = NULL;
kfree(sq->dhchap_c2);
@@ -261,6 +270,12 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl)
nvme_auth_free_key(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ if (ctrl->tls_key) {
+ key_put(ctrl->tls_key);
+ ctrl->tls_key = NULL;
+ }
+#endif
}
bool nvmet_check_auth_status(struct nvmet_req *req)
@@ -542,3 +557,58 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
return ret;
}
+
+void nvmet_auth_insert_psk(struct nvmet_sq *sq)
+{
+ int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id);
+ u8 *psk, *digest, *tls_psk;
+ size_t psk_len;
+ int ret;
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key = NULL;
+#endif
+
+ ret = nvme_auth_generate_psk(sq->ctrl->shash_id,
+ sq->dhchap_skey,
+ sq->dhchap_skey_len,
+ sq->dhchap_c1, sq->dhchap_c2,
+ hash_len, &psk, &psk_len);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to generate PSK, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ return;
+ }
+ ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len,
+ sq->ctrl->subsysnqn,
+ sq->ctrl->hostnqn, &digest);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ goto out_free_psk;
+ }
+ ret = nvme_auth_derive_tls_psk(sq->ctrl->shash_id, psk, psk_len,
+ digest, &tls_psk);
+ if (ret) {
+ pr_warn("%s: ctrl %d qid %d failed to derive TLS PSK, error %d\n",
+ __func__, sq->ctrl->cntlid, sq->qid, ret);
+ goto out_free_digest;
+ }
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, sq->ctrl->subsysnqn,
+ sq->ctrl->shash_id, tls_psk, psk_len, digest);
+ if (IS_ERR(tls_key)) {
+ pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n",
+ __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key));
+ tls_key = NULL;
+ kfree_sensitive(tls_psk);
+ }
+ if (sq->ctrl->tls_key)
+ key_put(sq->ctrl->tls_key);
+ sq->ctrl->tls_key = tls_key;
+#endif
+
+out_free_digest:
+ kfree_sensitive(digest);
+out_free_psk:
+ kfree_sensitive(psk);
+}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2e741696f371..71f8d06998d6 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1618,8 +1618,6 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
}
ctrl->cntlid = ret;
- uuid_copy(&ctrl->hostid, args->hostid);
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
@@ -1647,7 +1645,7 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
if (args->hostid)
uuid_copy(&ctrl->hostid, args->hostid);
- dhchap_status = nvmet_setup_auth(ctrl);
+ dhchap_status = nvmet_setup_auth(ctrl, args->sq);
if (dhchap_status) {
pr_err("Failed to setup authentication, dhchap status %u\n",
dhchap_status);
@@ -1662,11 +1660,12 @@ struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args)
args->status = NVME_SC_SUCCESS;
- pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n",
+ pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s%s.\n",
nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "",
- nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : "");
+ nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "",
+ nvmet_queue_tls_keyid(args->sq) ? ", TLS" : "");
return ctrl;
diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c
index 220c7391fc19..e4300eb95101 100644
--- a/drivers/nvme/target/debugfs.c
+++ b/drivers/nvme/target/debugfs.c
@@ -132,6 +132,27 @@ static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p)
}
NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static int nvmet_ctrl_tls_key_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+ key_serial_t keyid = nvmet_queue_tls_keyid(ctrl->sqs[0]);
+
+ seq_printf(m, "%08x\n", keyid);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_key);
+
+static int nvmet_ctrl_tls_concat_show(struct seq_file *m, void *p)
+{
+ struct nvmet_ctrl *ctrl = m->private;
+
+ seq_printf(m, "%d\n", ctrl->concat);
+ return 0;
+}
+NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_concat);
+#endif
+
int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
{
char name[32];
@@ -157,6 +178,12 @@ int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl)
&nvmet_ctrl_state_fops);
debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl,
&nvmet_ctrl_host_traddr_fops);
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ debugfs_create_file("tls_concat", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_tls_concat_fops);
+ debugfs_create_file("tls_key", S_IRUSR, ctrl->debugfs_dir, ctrl,
+ &nvmet_ctrl_tls_key_fops);
+#endif
return 0;
}
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 2022757f08dc..bf01ec414c55 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -43,8 +43,26 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
data->auth_protocol[0].dhchap.halen,
data->auth_protocol[0].dhchap.dhlen);
req->sq->dhchap_tid = le16_to_cpu(data->t_id);
- if (data->sc_c)
- return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ if (data->sc_c != NVME_AUTH_SECP_NOSC) {
+ if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ /* Secure concatenation can only be enabled on the admin queue */
+ if (req->sq->qid)
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ switch (data->sc_c) {
+ case NVME_AUTH_SECP_NEWTLSPSK:
+ if (nvmet_queue_tls_keyid(req->sq))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ break;
+ case NVME_AUTH_SECP_REPLACETLSPSK:
+ if (!nvmet_queue_tls_keyid(req->sq))
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ break;
+ default:
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ }
+ ctrl->concat = true;
+ }
if (data->napd != 1)
return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
@@ -103,6 +121,12 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
nvme_auth_dhgroup_name(fallback_dhgid));
ctrl->dh_gid = fallback_dhgid;
}
+ if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
+ pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
+ "for secure channel concatenation\n", __func__,
+ ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
+ }
pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
__func__, ctrl->cntlid, req->sq->qid,
nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
@@ -148,12 +172,22 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
if (memcmp(data->rval, response, data->hl)) {
pr_info("ctrl %d qid %d host response mismatch\n",
ctrl->cntlid, req->sq->qid);
+ pr_debug("ctrl %d qid %d rval %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, data->rval);
+ pr_debug("ctrl %d qid %d response %*ph\n",
+ ctrl->cntlid, req->sq->qid, data->hl, response);
kfree(response);
return NVME_AUTH_DHCHAP_FAILURE_FAILED;
}
kfree(response);
pr_debug("%s: ctrl %d qid %d host authenticated\n",
__func__, ctrl->cntlid, req->sq->qid);
+ if (!data->cvalid && ctrl->concat) {
+ pr_debug("%s: ctrl %d qid %d invalid challenge\n",
+ __func__, ctrl->cntlid, req->sq->qid);
+ return NVME_AUTH_DHCHAP_FAILURE_FAILED;
+ }
+ req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
if (data->cvalid) {
req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
GFP_KERNEL);
@@ -163,11 +197,23 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
__func__, ctrl->cntlid, req->sq->qid, data->hl,
req->sq->dhchap_c2);
- } else {
+ }
+ /*
+ * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
+ * Sequence Number (SEQNUM): [ .. ]
+ * The value 0h is used to indicate that bidirectional authentication
+ * is not performed, but a challenge value C2 is carried in order to
+ * generate a pre-shared key (PSK) for subsequent establishment of a
+ * secure channel.
+ */
+ if (req->sq->dhchap_s2 == 0) {
+ if (ctrl->concat)
+ nvmet_auth_insert_psk(req->sq);
req->sq->authenticated = true;
+ kfree(req->sq->dhchap_c2);
req->sq->dhchap_c2 = NULL;
- }
- req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
+ } else if (!data->cvalid)
+ req->sq->authenticated = true;
return 0;
}
@@ -246,7 +292,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
pr_debug("%s: ctrl %d qid %d reset negotiation\n",
__func__, ctrl->cntlid, req->sq->qid);
if (!req->sq->qid) {
- dhchap_status = nvmet_setup_auth(ctrl);
+ dhchap_status = nvmet_setup_auth(ctrl, req->sq);
if (dhchap_status) {
pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
ctrl->cntlid);
@@ -303,6 +349,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
}
goto done_kfree;
case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
+ if (ctrl->concat)
+ nvmet_auth_insert_psk(req->sq);
req->sq->authenticated = true;
pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
__func__, ctrl->cntlid, req->sq->qid);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index eb406c90c167..f012bdf89850 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -234,10 +234,26 @@ err:
return ret;
}
-static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl)
+static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
+ bool needs_auth = nvmet_has_auth(ctrl, sq);
+ key_serial_t keyid = nvmet_queue_tls_keyid(sq);
+
+ /* Do not authenticate I/O queues for secure concatenation */
+ if (ctrl->concat && sq->qid)
+ needs_auth = false;
+
+ if (keyid)
+ pr_debug("%s: ctrl %d qid %d should %sauthenticate, tls psk %08x\n",
+ __func__, ctrl->cntlid, sq->qid,
+ needs_auth ? "" : "not ", keyid);
+ else
+ pr_debug("%s: ctrl %d qid %d should %sauthenticate%s\n",
+ __func__, ctrl->cntlid, sq->qid,
+ needs_auth ? "" : "not ",
+ ctrl->concat ? ", secure concatenation" : "");
return (u32)ctrl->cntlid |
- (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0);
+ (needs_auth ? NVME_CONNECT_AUTHREQ_ATR : 0);
}
static void nvmet_execute_admin_connect(struct nvmet_req *req)
@@ -247,6 +263,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = NULL;
struct nvmet_alloc_ctrl_args args = {
.port = req->port,
+ .sq = req->sq,
.ops = req->ops,
.p2p_client = req->p2p_client,
.kato = le32_to_cpu(c->kato),
@@ -299,7 +316,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- args.result = cpu_to_le32(nvmet_connect_result(ctrl));
+ args.result = cpu_to_le32(nvmet_connect_result(ctrl, req->sq));
out:
kfree(d);
complete:
@@ -357,7 +374,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
- req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl));
+ req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl, req->sq));
out:
kfree(d);
complete:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 3ef4beacde32..7318b736d414 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -172,20 +172,6 @@ struct nvmet_fc_tgt_assoc {
struct work_struct del_work;
};
-
-static inline int
-nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
-{
- return (iodptr - iodptr->tgtport->iod);
-}
-
-static inline int
-nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
-{
- return (fodptr - fodptr->queue->fod);
-}
-
-
/*
* Association and Connection IDs:
*
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a9d112d34d4f..a5c41144667c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
iod->req.sg = iod->sg_table.sgl;
- iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
+ iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl);
iod->req.transfer_len = blk_rq_payload_bytes(req);
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index d2c1233981e1..b6db8b74dc4a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -165,6 +165,9 @@ struct nvmet_sq {
u8 *dhchap_skey;
int dhchap_skey_len;
#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key;
+#endif
struct completion free_done;
struct completion confirm_done;
};
@@ -289,6 +292,7 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+ bool concat;
#ifdef CONFIG_NVME_TARGET_AUTH
struct nvme_dhchap_key *host_key;
struct nvme_dhchap_key *ctrl_key;
@@ -298,6 +302,9 @@ struct nvmet_ctrl {
u8 *dh_key;
size_t dh_keysize;
#endif
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+ struct key *tls_key;
+#endif
struct nvmet_pr_log_mgr pr_log_mgr;
};
@@ -583,6 +590,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
struct nvmet_alloc_ctrl_args {
struct nvmet_port *port;
+ struct nvmet_sq *sq;
char *subsysnqn;
char *hostnqn;
uuid_t *hostid;
@@ -647,7 +655,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
@@ -820,7 +827,7 @@ static inline u8 nvmet_cc_iocqes(u32 cc)
/* Convert a 32-bit number to a 16-bit 0's based number */
static inline __le16 to0based(u32 a)
{
- return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
+ return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1);
}
static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
@@ -852,6 +859,22 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
bio_put(bio);
}
+#ifdef CONFIG_NVME_TARGET_TCP_TLS
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq)
+{
+ return sq->tls_key ? key_serial(sq->tls_key) : 0;
+}
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq)
+{
+ if (sq->tls_key) {
+ key_put(sq->tls_key);
+ sq->tls_key = NULL;
+ }
+}
+#else
+static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; }
+static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {}
+#endif
#ifdef CONFIG_NVME_TARGET_AUTH
u32 nvmet_auth_send_data_len(struct nvmet_req *req);
void nvmet_execute_auth_send(struct nvmet_req *req);
@@ -860,7 +883,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req);
int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
bool set_ctrl);
int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
-u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
+u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq);
void nvmet_auth_sq_init(struct nvmet_sq *sq);
void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
void nvmet_auth_sq_free(struct nvmet_sq *sq);
@@ -870,16 +893,18 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
unsigned int hash_len);
int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
unsigned int hash_len);
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq)
{
- return ctrl->host_key != NULL;
+ return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq);
}
int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
u8 *buf, int buf_size);
int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
u8 *buf, int buf_size);
+void nvmet_auth_insert_psk(struct nvmet_sq *sq);
#else
-static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
+static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_sq *sq)
{
return 0;
}
@@ -892,11 +917,13 @@ static inline bool nvmet_check_auth_status(struct nvmet_req *req)
{
return true;
}
-static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
+static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl,
+ struct nvmet_sq *sq)
{
return false;
}
static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
+static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {};
#endif
int nvmet_pr_init_ns(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 565d2bd36dcd..b54b3fdbe389 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
- if (flags & NVME_CQ_IRQ_ENABLED)
- set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
-
cq->pci_addr = pci_addr;
cq->qid = cqid;
cq->depth = qsize + 1;
@@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
cq->qes = ctrl->io_cqes;
cq->pci_size = cq->qes * cq->depth;
- cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
- if (!cq->iv) {
- status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
- goto err;
+ if (flags & NVME_CQ_IRQ_ENABLED) {
+ cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
+ if (!cq->iv)
+ return NVME_SC_INTERNAL | NVME_STATUS_DNR;
+ set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}
status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
if (status != NVME_SC_SUCCESS)
goto err;
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector);
return NVME_SC_SUCCESS;
err:
- clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
+ if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
+ nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
return status;
}
@@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
u16 status;
- if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
+ if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
@@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
if (status != NVME_SC_SUCCESS)
- goto out_clear_bit;
+ return status;
sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
@@ -1365,6 +1365,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
goto out_destroy_sq;
}
+ set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
+
dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
sqid, qsize, sq->qes);
@@ -1372,8 +1374,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
out_destroy_sq:
nvmet_sq_destroy(&sq->nvme_sq);
-out_clear_bit:
- clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
return status;
}
@@ -1385,7 +1385,6 @@ static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
- flush_workqueue(sq->iod_wq);
destroy_workqueue(sq->iod_wq);
sq->iod_wq = NULL;
@@ -2129,8 +2128,15 @@ static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
return -ENODEV;
}
- if (epc_features->bar[BAR_0].only_64bit)
- epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ /*
+ * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims
+ * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type
+ * is required to be 64-bit. Thus, for interoperability, always set the
+ * type to 64-bit. In the rare case that the PCI EPC does not support
+ * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail,
+ * and we will return failure back to the user.
+ */
+ epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
/*
* Calculate the size of the register bar: NVMe registers first with
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c51c2a8c109..f2d0c920269b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -8,7 +8,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/key.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
#include <net/sock.h>
@@ -571,10 +570,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ enum nvmet_tcp_recv_state queue_state;
+ struct nvmet_tcp_cmd *queue_cmd;
struct nvme_sgl_desc *sgl;
u32 len;
- if (unlikely(cmd == queue->cmd)) {
+ /* Pairs with store_release in nvmet_prepare_receive_pdu() */
+ queue_state = smp_load_acquire(&queue->rcv_state);
+ queue_cmd = READ_ONCE(queue->cmd);
+
+ if (unlikely(cmd == queue_cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
@@ -583,7 +588,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
- if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ if (queue_state == NVMET_TCP_RECV_PDU &&
len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
@@ -847,8 +852,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{
queue->offset = 0;
queue->left = sizeof(struct nvme_tcp_hdr);
- queue->cmd = NULL;
- queue->rcv_state = NVMET_TCP_RECV_PDU;
+ WRITE_ONCE(queue->cmd, NULL);
+ /* Ensure rcv_state is visible only after queue->cmd is set */
+ smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
@@ -1073,10 +1079,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_tcp_ops))) {
- pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
+ pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n",
req->cmd, req->cmd->common.command_id,
req->cmd->common.opcode,
- le32_to_cpu(req->cmd->common.dptr.sgl.length));
+ le32_to_cpu(req->cmd->common.dptr.sgl.length),
+ le16_to_cpu(req->cqe->status));
nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
return 0;
@@ -1602,6 +1609,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
/* stop accepting incoming data */
queue->rcv_state = NVMET_TCP_RECV_ERR;
+ nvmet_sq_put_tls_key(&queue->nvme_sq);
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
@@ -1787,6 +1795,27 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
return 0;
}
+static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue,
+ key_serial_t peerid)
+{
+ struct key *tls_key = nvme_tls_key_lookup(peerid);
+ int status = 0;
+
+ if (IS_ERR(tls_key)) {
+ pr_warn("%s: queue %d failed to lookup key %x\n",
+ __func__, queue->idx, peerid);
+ spin_lock_bh(&queue->state_lock);
+ queue->state = NVMET_TCP_Q_FAILED;
+ spin_unlock_bh(&queue->state_lock);
+ status = PTR_ERR(tls_key);
+ } else {
+ pr_debug("%s: queue %d using TLS PSK %x\n",
+ __func__, queue->idx, peerid);
+ queue->nvme_sq.tls_key = tls_key;
+ }
+ return status;
+}
+
static void nvmet_tcp_tls_handshake_done(void *data, int status,
key_serial_t peerid)
{
@@ -1807,6 +1836,10 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status,
spin_unlock_bh(&queue->state_lock);
cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
+
+ if (!status)
+ status = nvmet_tcp_tls_key_lookup(queue, peerid);
+
if (status)
nvmet_tcp_schedule_release_queue(queue);
else
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index b810df727b44..b4cf245fb246 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -100,7 +100,7 @@ static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, i
{
u8 mac[ETH_ALEN];
- if (bytes != 3 * ETH_ALEN - 1)
+ if (bytes != MAC_ADDR_STR_LEN)
return -EINVAL;
if (!mac_pton(buf, mac))
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
index 731e6f4f12b2..436426d4e8f9 100644
--- a/drivers/nvmem/layouts/u-boot-env.c
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -37,7 +37,7 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
{
u8 mac[ETH_ALEN];
- if (bytes != 3 * ETH_ALEN - 1)
+ if (bytes != MAC_ADDR_STR_LEN)
return -EINVAL;
if (!mac_pton(buf, mac))
diff --git a/drivers/of/base.c b/drivers/of/base.c
index af6c68bbb427..e37b088f1fad 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -824,6 +824,33 @@ struct device_node *of_get_child_by_name(const struct device_node *node,
}
EXPORT_SYMBOL(of_get_child_by_name);
+/**
+ * of_get_available_child_by_name - Find the available child node by name for a given parent
+ * @node: parent node
+ * @name: child name to look for.
+ *
+ * This function looks for child node for given matching name and checks the
+ * device's availability for use.
+ *
+ * Return: A node pointer if found, with refcount incremented, use
+ * of_node_put() on it when done.
+ * Returns NULL if node is not found.
+ */
+struct device_node *of_get_available_child_by_name(const struct device_node *node,
+ const char *name)
+{
+ struct device_node *child;
+
+ child = of_get_child_by_name(node, name);
+ if (child && !of_device_is_available(child)) {
+ of_node_put(child);
+ return NULL;
+ }
+
+ return child;
+}
+EXPORT_SYMBOL(of_get_available_child_by_name);
+
struct device_node *__of_find_node_by_path(const struct device_node *parent,
const char *path)
{
diff --git a/drivers/of/device.c b/drivers/of/device.c
index edf3be197265..5053e5d532cc 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -99,6 +99,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
bool coherent, set_map = false;
int ret;
+ if (dev->dma_range_map) {
+ dev_dbg(dev, "dma_range_map already set\n");
+ goto skip_map;
+ }
+
if (np == dev->of_node)
bus_np = __of_get_dma_parent(np);
else
@@ -119,7 +124,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
end = dma_range_map_max(map);
set_map = true;
}
-
+skip_map:
/*
* If @dev is expected to be DMA-capable then the bus code that created
* it should have initialised its dma_mask pointer by this point. For
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75e819f66a56..ee2e31522d7e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -415,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_size_cells * sizeof(__be32)) {
+ if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_size_cells, &prop);
+ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 208d922cc24c..c1feb631e383 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -148,6 +148,39 @@ static void *of_find_property_value_of_size(const struct device_node *np,
}
/**
+ * of_property_read_u16_index - Find and read a u16 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u16 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 16-bit value from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u16 value can be decoded.
+ */
+int of_property_read_u16_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u16 *out_value)
+{
+ const u16 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0, NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be16_to_cpup(((__be16 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u16_index);
+
+/**
* of_property_read_u32_index - Find and read a u32 from a multi-value property.
*
* @np: device node from which the property value is to be read.
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2fbd379923fd..5c3054aaec8c 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -203,6 +203,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6084b38bdda1..44d7f4339306 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 9d9596947350..e619accca49d 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 2f647cac4cae..6569ba3577fe 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -295,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -609,12 +608,13 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -659,9 +659,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
@@ -744,15 +741,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index f57ea36d125d..c8bd71a739f7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -812,8 +812,7 @@ static int pci_pm_suspend(struct device *dev)
* suspend callbacks can cope with runtime-suspended devices, it is
* better to resume the device from runtime suspend here.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1151,8 +1150,7 @@ static int pci_pm_poweroff(struct device *dev)
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1653,7 +1651,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- if (!ret && !driver->driver_managed_dma) {
+ /* @driver may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !driver->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index 06fd317529fc..df9a28ba69dc 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <asm/apple_m1_pmu.h>
@@ -120,6 +121,8 @@ enum m1_pmu_events {
*/
M1_PMU_CFG_COUNT_USER = BIT(8),
M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_HOST = BIT(10),
+ M1_PMU_CFG_COUNT_GUEST = BIT(11),
};
/*
@@ -172,6 +175,17 @@ static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
};
+#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event) \
+ [ARMV8_PMUV3_PERFCTR_##pmuv3_event] = M1_PMU_PERFCTR_##m1_event
+
+static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = {
+ [0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1] = HW_OP_UNSUPPORTED,
+ M1_PMUV3_EVENT_MAP(INST_RETIRED, INST_ALL),
+ M1_PMUV3_EVENT_MAP(CPU_CYCLES, CORE_ACTIVE_CYCLE),
+ M1_PMUV3_EVENT_MAP(BR_RETIRED, INST_BRANCH),
+ M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED, BRANCH_MISPRED_NONSPEC),
+};
+
/* sysfs definitions */
static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
@@ -327,11 +341,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel, bool host)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -346,19 +359,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
+
+ if (host)
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+ else if (is_kernel_in_hyp_mode())
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -371,21 +392,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+ bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+ __m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -396,11 +428,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
- m1_pmu_disable_counter_interrupt(event->hw.idx);
- m1_pmu_disable_counter(event->hw.idx);
- isb();
-
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
@@ -538,6 +566,26 @@ static int m2_pmu_map_event(struct perf_event *event)
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
+static int m1_pmu_map_pmuv3_event(unsigned int eventsel)
+{
+ u16 m1_event = HW_OP_UNSUPPORTED;
+
+ if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ m1_event = m1_pmu_pmceid_map[eventsel];
+
+ return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event;
+}
+
+static void m1_pmu_init_pmceid(struct arm_pmu *pmu)
+{
+ unsigned int event;
+
+ for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) {
+ if (m1_pmu_map_pmuv3_event(event) >= 0)
+ set_bit(event, pmu->pmceid_bitmap);
+ }
+}
+
static void m1_pmu_reset(void *info)
{
int i;
@@ -558,7 +606,7 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest) {
+ if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
pr_debug("ARM performance counters do not support mode exclusion\n");
return -EOPNOTSUPP;
}
@@ -566,6 +614,10 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
+ if (!attr->exclude_host)
+ config_base |= M1_PMU_CFG_COUNT_HOST;
+ if (!attr->exclude_guest)
+ config_base |= M1_PMU_CFG_COUNT_GUEST;
event->config_base = config_base;
@@ -594,6 +646,9 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
+ cpu_pmu->map_pmuv3_event = m1_pmu_map_pmuv3_event;
+ m1_pmu_init_pmceid(cpu_pmu);
+
bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index d5fcea3d4328..1a0d0e1a2263 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1273,9 +1273,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
- hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ hrtimer_setup(&ccn->dt.hrtimer, arm_ccn_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
/* Pick one CPU which we will use to collect data from CCN... */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index ef959e66db7c..d4fe30ff225b 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -802,8 +802,6 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
-#define CMN_EVENT_CCLA_RNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
@@ -1798,6 +1796,9 @@ static int arm_cmn_event_init(struct perf_event *event)
} else if (type == CMN_TYPE_XP &&
(cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
+ } else if (type == CMN_TYPE_RND) {
+ /* Secretly permit this as an alias for "rnid" events */
+ type = CMN_TYPE_RNI;
}
/* This is sufficiently annoying to recalculate, so cache it */
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
index f72f5689923c..b8ca69fd9d1d 100644
--- a/drivers/perf/arm_cspmu/ampere_cspmu.c
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -10,10 +10,10 @@
#include "arm_cspmu.h"
-#define PMAUXR0 0xD80
-#define PMAUXR1 0xD84
-#define PMAUXR2 0xD88
-#define PMAUXR3 0xD8C
+#define PMAUXR0 PMIMPDEF
+#define PMAUXR1 (PMIMPDEF + 0x4)
+#define PMAUXR2 (PMIMPDEF + 0x8)
+#define PMAUXR3 (PMIMPDEF + 0xC)
#define to_ampere_cspmu_ctx(cspmu) ((struct ampere_cspmu_ctx *)(cspmu->impl.ctx))
@@ -132,32 +132,20 @@ ampere_cspmu_get_name(const struct arm_cspmu *cspmu)
return ctx->name;
}
-static u32 ampere_cspmu_event_filter(const struct perf_event *event)
+static void ampere_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
/*
- * PMEVFILTR or PMCCFILTR aren't used in Ampere SoC PMU but are marked
- * as RES0. Make sure, PMCCFILTR is written zero.
+ * PMCCFILTR is RES0, so this is just a dummy callback to override
+ * the default implementation and avoid writing to it.
*/
- return 0;
}
static void ampere_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- struct perf_event *event;
- unsigned int idx;
u32 threshold, rank, bank;
- /*
- * At this point, all the events have the same filter settings.
- * Therefore, take the first event and use its configuration.
- */
- idx = find_first_bit(cspmu->hw_events.used_ctrs,
- cspmu->cycle_counter_logical_idx);
-
- event = cspmu->hw_events.events[idx];
-
threshold = get_threshold(event);
rank = get_rank(event);
bank = get_bank(event);
@@ -233,7 +221,7 @@ static int ampere_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
- impl_ops->event_filter = ampere_cspmu_event_filter;
+ impl_ops->set_cc_filter = ampere_cspmu_set_cc_filter;
impl_ops->set_ev_filter = ampere_cspmu_set_ev_filter;
impl_ops->validate_event = ampere_cspmu_validate_event;
impl_ops->get_name = ampere_cspmu_get_name;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index 81e8b97e9353..efa9b229e701 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -40,51 +40,6 @@
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
-/*
- * CoreSight PMU Arch register offsets.
- */
-#define PMEVCNTR_LO 0x0
-#define PMEVCNTR_HI 0x4
-#define PMEVTYPER 0x400
-#define PMCCFILTR 0x47C
-#define PMEVFILTR 0xA00
-#define PMCNTENSET 0xC00
-#define PMCNTENCLR 0xC20
-#define PMINTENSET 0xC40
-#define PMINTENCLR 0xC60
-#define PMOVSCLR 0xC80
-#define PMOVSSET 0xCC0
-#define PMCFGR 0xE00
-#define PMCR 0xE04
-#define PMIIDR 0xE08
-
-/* PMCFGR register field */
-#define PMCFGR_NCG GENMASK(31, 28)
-#define PMCFGR_HDBG BIT(24)
-#define PMCFGR_TRO BIT(23)
-#define PMCFGR_SS BIT(22)
-#define PMCFGR_FZO BIT(21)
-#define PMCFGR_MSI BIT(20)
-#define PMCFGR_UEN BIT(19)
-#define PMCFGR_NA BIT(17)
-#define PMCFGR_EX BIT(16)
-#define PMCFGR_CCD BIT(15)
-#define PMCFGR_CC BIT(14)
-#define PMCFGR_SIZE GENMASK(13, 8)
-#define PMCFGR_N GENMASK(7, 0)
-
-/* PMCR register field */
-#define PMCR_TRO BIT(11)
-#define PMCR_HDBG BIT(10)
-#define PMCR_FZO BIT(9)
-#define PMCR_NA BIT(8)
-#define PMCR_DP BIT(5)
-#define PMCR_X BIT(4)
-#define PMCR_D BIT(3)
-#define PMCR_C BIT(2)
-#define PMCR_P BIT(1)
-#define PMCR_E BIT(0)
-
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
@@ -111,7 +66,9 @@ static unsigned long arm_cspmu_cpuhp_state;
static DEFINE_MUTEX(arm_cspmu_lock);
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
@@ -226,6 +183,7 @@ arm_cspmu_event_attr_is_visible(struct kobject *kobj,
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -250,11 +208,6 @@ static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
-static u32 arm_cspmu_event_filter(const struct perf_event *event)
-{
- return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
-}
-
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -416,7 +369,7 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
DEFAULT_IMPL_OP(get_name),
DEFAULT_IMPL_OP(is_cycle_counter_event),
DEFAULT_IMPL_OP(event_type),
- DEFAULT_IMPL_OP(event_filter),
+ DEFAULT_IMPL_OP(set_cc_filter),
DEFAULT_IMPL_OP(set_ev_filter),
DEFAULT_IMPL_OP(event_attr_is_visible),
};
@@ -812,26 +765,28 @@ static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
}
static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+ const struct perf_event *event)
{
- u32 offset = PMEVFILTR + (4 * hwc->idx);
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
+ u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
+ u32 offset = 4 * event->hw.idx;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
}
-static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMCCFILTR;
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
@@ -839,13 +794,11 @@ static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
arm_cspmu_set_event_period(event);
- filter = cspmu->impl.ops.event_filter(event);
-
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
- arm_cspmu_set_cc_filter(cspmu, filter);
+ cspmu->impl.ops.set_cc_filter(cspmu, event);
} else {
arm_cspmu_set_event(cspmu, hwc);
- cspmu->impl.ops.set_ev_filter(cspmu, hwc, filter);
+ cspmu->impl.ops.set_ev_filter(cspmu, event);
}
hwc->state = 0;
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 2621f3111148..19684b76bd96 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -47,6 +47,8 @@
/* Default filter format */
#define ARM_CSPMU_FORMAT_FILTER_ATTR \
ARM_CSPMU_FORMAT_ATTR(filter, "config1:0-31")
+#define ARM_CSPMU_FORMAT_FILTER2_ATTR \
+ ARM_CSPMU_FORMAT_ATTR(filter2, "config2:0-31")
/*
* This is the default event number for cycle count, if supported, since the
@@ -65,6 +67,53 @@
/* The cycle counter, if implemented, is located at counter[31]. */
#define ARM_CSPMU_CYCLE_CNTR_IDX 31
+/*
+ * CoreSight PMU Arch register offsets.
+ */
+#define PMEVCNTR_LO 0x0
+#define PMEVCNTR_HI 0x4
+#define PMEVTYPER 0x400
+#define PMCCFILTR 0x47C
+#define PMEVFILT2R 0x800
+#define PMEVFILTR 0xA00
+#define PMCNTENSET 0xC00
+#define PMCNTENCLR 0xC20
+#define PMINTENSET 0xC40
+#define PMINTENCLR 0xC60
+#define PMOVSCLR 0xC80
+#define PMOVSSET 0xCC0
+#define PMIMPDEF 0xD80
+#define PMCFGR 0xE00
+#define PMCR 0xE04
+#define PMIIDR 0xE08
+
+/* PMCFGR register field */
+#define PMCFGR_NCG GENMASK(31, 28)
+#define PMCFGR_HDBG BIT(24)
+#define PMCFGR_TRO BIT(23)
+#define PMCFGR_SS BIT(22)
+#define PMCFGR_FZO BIT(21)
+#define PMCFGR_MSI BIT(20)
+#define PMCFGR_UEN BIT(19)
+#define PMCFGR_NA BIT(17)
+#define PMCFGR_EX BIT(16)
+#define PMCFGR_CCD BIT(15)
+#define PMCFGR_CC BIT(14)
+#define PMCFGR_SIZE GENMASK(13, 8)
+#define PMCFGR_N GENMASK(7, 0)
+
+/* PMCR register field */
+#define PMCR_TRO BIT(11)
+#define PMCR_HDBG BIT(10)
+#define PMCR_FZO BIT(9)
+#define PMCR_NA BIT(8)
+#define PMCR_DP BIT(5)
+#define PMCR_X BIT(4)
+#define PMCR_D BIT(3)
+#define PMCR_C BIT(2)
+#define PMCR_P BIT(1)
+#define PMCR_E BIT(0)
+
/* PMIIDR register field */
#define ARM_CSPMU_PMIIDR_IMPLEMENTER GENMASK(11, 0)
#define ARM_CSPMU_PMIIDR_PRODUCTID GENMASK(31, 20)
@@ -103,11 +152,11 @@ struct arm_cspmu_impl_ops {
bool (*is_cycle_counter_event)(const struct perf_event *event);
/* Decode event type/id from configs */
u32 (*event_type)(const struct perf_event *event);
- /* Decode filter value from configs */
- u32 (*event_filter)(const struct perf_event *event);
- /* Set event filter */
+ /* Set event filters */
+ void (*set_cc_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
void (*set_ev_filter)(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc, u32 filter);
+ const struct perf_event *event);
/* Implementation specific event validation */
int (*validate_event)(struct arm_cspmu *cspmu,
struct perf_event *event);
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 8116c7846a46..dc6d4e3e2a1b 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -6,6 +6,7 @@
/* Support for NVIDIA specific attributes. */
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/topology.h>
@@ -183,6 +184,24 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
return filter_val;
}
+static void nv_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+ u32 offset = PMEVFILTR + (4 * event->hw.idx);
+
+ writel(filter, cspmu->base0 + offset);
+}
+
+static void nv_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+
+ writel(filter, cspmu->base0 + PMCCFILTR);
+}
+
+
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
@@ -322,7 +341,8 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
- impl_ops->event_filter = nv_cspmu_event_filter;
+ impl_ops->set_cc_filter = nv_cspmu_set_cc_filter;
+ impl_ops->set_ev_filter = nv_cspmu_set_ev_filter;
impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
impl_ops->get_name = nv_cspmu_get_name;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 398cce3d76fc..2f33e69a8caf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -342,12 +342,10 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
- /*
- * If there is an event in the counter we are going to use then make
- * sure it is disabled.
- */
+ /* The newly-allocated counter should be empty */
+ WARN_ON_ONCE(hw_events->events[idx]);
+
event->hw.idx = idx;
- armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 0e360feb3432..e506d59654e7 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -795,11 +795,6 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
static void armv8pmu_enable_event(struct perf_event *event)
{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
@@ -825,10 +820,10 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
-
- kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
@@ -1369,6 +1364,7 @@ PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
+PMUV3_INIT_SIMPLE(armv8_rainier)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
@@ -1416,6 +1412,7 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
{.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
{.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
+ {.compatible = "arm,rainier-pmu", .data = armv8_rainier_pmu_init},
{.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
{.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index f5e6878db9d6..3efed8839a4e 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
static void set_spe_event_has_cx(struct perf_event *event)
{
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr))
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel())
event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
}
@@ -765,7 +765,7 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
- return perf_allow_kernel(&event->attr);
+ return perf_allow_kernel();
return 0;
}
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
index 420cadd108e7..17831e1920bd 100644
--- a/drivers/perf/arm_v7_pmu.c
+++ b/drivers/perf/arm_v7_pmu.c
@@ -858,16 +858,6 @@ static void armv7pmu_enable_event(struct perf_event *event)
}
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /*
- * Disable counter
- */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
@@ -875,14 +865,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /*
- * Enable interrupt for this counter
- */
armv7_pmnc_enable_intens(idx);
-
- /*
- * Enable counter
- */
armv7_pmnc_enable_counter(idx);
}
@@ -898,18 +881,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
return;
}
- /*
- * Disable counter and interrupt
- */
-
- /*
- * Disable counter
- */
armv7_pmnc_disable_counter(idx);
-
- /*
- * Disable interrupt for this counter
- */
armv7_pmnc_disable_intens(idx);
}
@@ -1477,14 +1449,6 @@ static void krait_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We set the event for the cycle counter because we
* have the ability to perform event filtering.
@@ -1494,10 +1458,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
else
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
@@ -1798,14 +1759,6 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
int idx = hwc->idx;
/*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
-
- /* Disable counter */
- armv7_pmnc_disable_counter(idx);
-
- /*
* Set event (if destined for PMNx counters)
* We don't set the event for the cycle counter because we
* don't have the ability to perform event filtering.
@@ -1815,10 +1768,7 @@ static void scorpion_pmu_enable_event(struct perf_event *event)
else if (idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
- /* Enable interrupt for this counter */
armv7_pmnc_enable_intens(idx);
-
- /* Enable counter */
armv7_pmnc_enable_counter(idx);
}
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index cccecae9823f..f851e070760c 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -565,15 +565,15 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev)
u32 sbdf;
sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
- plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
- pdev, sizeof(*pdev));
-
+ plat_dev = platform_device_register_simple("dwc_pcie_pmu", sbdf, NULL, 0);
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info)
+ if (!dev_info) {
+ platform_device_unregister(plat_dev);
return -ENOMEM;
+ }
/* Cache platform device to handle pci device hotplug */
dev_info->plat_dev = plat_dev;
@@ -614,18 +614,26 @@ static struct notifier_block dwc_pcie_pmu_nb = {
static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
{
- struct pci_dev *pdev = plat_dev->dev.platform_data;
+ struct pci_dev *pdev;
struct dwc_pcie_pmu *pcie_pmu;
char *name;
u32 sbdf;
u16 vsec;
int ret;
+ sbdf = plat_dev->id;
+ pdev = pci_get_domain_bus_and_slot(sbdf >> 16, PCI_BUS_NUM(sbdf & 0xffff),
+ sbdf & 0xff);
+ if (!pdev) {
+ pr_err("No pdev found for the sbdf 0x%x\n", sbdf);
+ return -ENODEV;
+ }
+
vsec = dwc_pcie_des_cap(pdev);
if (!vsec)
return -ENODEV;
- sbdf = plat_dev->id;
+ pci_dev_put(pdev);
name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
if (!name)
return -ENOMEM;
@@ -640,7 +648,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
pcie_pmu->on_cpu = -1;
pcie_pmu->pmu = (struct pmu){
.name = name,
- .parent = &pdev->dev,
+ .parent = &plat_dev->dev,
.module = THIS_MODULE,
.attr_groups = dwc_pcie_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
@@ -730,6 +738,15 @@ static struct platform_driver dwc_pcie_pmu_driver = {
.driver = {.name = "dwc_pcie_pmu",},
};
+static void dwc_pcie_cleanup_devices(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
+ dwc_pcie_unregister_dev(dev_info);
+ }
+}
+
static int __init dwc_pcie_pmu_init(void)
{
struct pci_dev *pdev = NULL;
@@ -742,7 +759,7 @@ static int __init dwc_pcie_pmu_init(void)
ret = dwc_pcie_register_dev(pdev);
if (ret) {
pci_dev_put(pdev);
- return ret;
+ goto err_cleanup;
}
}
@@ -751,35 +768,35 @@ static int __init dwc_pcie_pmu_init(void)
dwc_pcie_pmu_online_cpu,
dwc_pcie_pmu_offline_cpu);
if (ret < 0)
- return ret;
+ goto err_cleanup;
dwc_pcie_pmu_hp_state = ret;
ret = platform_driver_register(&dwc_pcie_pmu_driver);
if (ret)
- goto platform_driver_register_err;
+ goto err_remove_cpuhp;
ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
if (ret)
- goto platform_driver_register_err;
+ goto err_unregister_driver;
notify = true;
return 0;
-platform_driver_register_err:
+err_unregister_driver:
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+err_remove_cpuhp:
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
-
+err_cleanup:
+ dwc_pcie_cleanup_devices();
return ret;
}
static void __exit dwc_pcie_pmu_exit(void)
{
- struct dwc_pcie_dev_info *dev_info, *tmp;
-
if (notify)
bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
- list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
- dwc_pcie_unregister_dev(dev_info);
+ dwc_pcie_cleanup_devices();
platform_driver_unregister(&dwc_pcie_pmu_driver);
cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
}
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 039feded9152..72ac17efd846 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1064,8 +1064,8 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ hrtimer_setup(&ddr_pmu->hrtimer, cn10k_ddr_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index cadd60221b8f..6ed4707bd6bb 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -752,9 +752,8 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
- hrtimer_init(&tx2_pmu->hrtimer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ hrtimer_setup(&tx2_pmu->hrtimer, tx2_pmu->hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = tx2_uncore_pmu_register(tx2_pmu);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 152344e4f7e4..fd0e0cd1c1cf 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -177,9 +177,7 @@ static void phy_mdm6600_cmd(struct phy_mdm6600 *ddata, int val)
values[0] = val;
- gpiod_set_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
- ddata->cmd_gpios->desc,
- ddata->cmd_gpios->info, values);
+ gpiod_multi_set_value_cansleep(ddata->cmd_gpios, values);
}
/**
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 73dbf29c002f..cf6efa9c0364 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = BCM281XX_PIN_VC_CAM3_SDA,
+ .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4,
};
static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 471f644c5eef..d09a5e9b2eca 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -2374,6 +2374,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
pctrl->gpio_bank[id].gc.parent = dev;
pctrl->gpio_bank[id].gc.fwnode = child;
pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].gc.label == NULL)
+ return -ENOMEM;
+
pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig
index 168f8a5ffbb9..d6f6017fd097 100644
--- a/drivers/pinctrl/spacemit/Kconfig
+++ b/drivers/pinctrl/spacemit/Kconfig
@@ -4,9 +4,10 @@
#
config PINCTRL_SPACEMIT_K1
- tristate "SpacemiT K1 SoC Pinctrl driver"
+ bool "SpacemiT K1 SoC Pinctrl driver"
depends on ARCH_SPACEMIT || COMPILE_TEST
depends on OF
+ default ARCH_SPACEMIT
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index a32579d73613..59fd555ff38d 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -1044,7 +1044,7 @@ static struct platform_driver k1_pinctrl_driver = {
.of_match_table = k1_pinctrl_ids,
},
};
-module_platform_driver(k1_pinctrl_driver);
+builtin_platform_driver(k1_pinctrl_driver);
MODULE_AUTHOR("Yixun Lan <dlan@gentoo.org>");
MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC");
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
index f88395ea3376..0abe5377891b 100644
--- a/drivers/platform/arm64/Kconfig
+++ b/drivers/platform/arm64/Kconfig
@@ -33,6 +33,27 @@ config EC_ACER_ASPIRE1
laptop where this information is not properly exposed via the
standard ACPI devices.
+config EC_HUAWEI_GAOKUN
+ tristate "Huawei Matebook E Go Embedded Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on I2C
+ depends on INPUT
+ depends on HWMON
+ select AUXILIARY_BUS
+
+ help
+ Say Y here to enable the EC driver for the Huawei Matebook E Go
+ which is a sc8280xp-based 2-in-1 tablet. The driver handles battery
+ (information, charge control) and USB Type-C DP HPD events as well
+ as some misc functions like the lid sensor and temperature sensors,
+ etc.
+
+ This driver provides battery and AC status support for the mentioned
+ laptop where this information is not properly exposed via the
+ standard ACPI devices.
+
+ Say M or Y here to include this support.
+
config EC_LENOVO_YOGA_C630
tristate "Lenovo Yoga C630 Embedded Controller driver"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/platform/arm64/Makefile b/drivers/platform/arm64/Makefile
index b2ae9114fdd8..46a99eba3264 100644
--- a/drivers/platform/arm64/Makefile
+++ b/drivers/platform/arm64/Makefile
@@ -6,4 +6,5 @@
#
obj-$(CONFIG_EC_ACER_ASPIRE1) += acer-aspire1-ec.o
+obj-$(CONFIG_EC_HUAWEI_GAOKUN) += huawei-gaokun-ec.o
obj-$(CONFIG_EC_LENOVO_YOGA_C630) += lenovo-yoga-c630.o
diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c
new file mode 100644
index 000000000000..97c2607f8d9f
--- /dev/null
+++ b/drivers/platform/arm64/huawei-gaokun-ec.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * huawei-gaokun-ec - An EC driver for HUAWEI Matebook E Go
+ *
+ * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/huawei-gaokun-ec.h>
+
+#define EC_EVENT 0x06
+
+/* Also can be found in ACPI specification 12.3 */
+#define EC_READ 0x80
+#define EC_WRITE 0x81
+#define EC_BURST 0x82
+#define EC_QUERY 0x84
+
+#define EC_FN_LOCK_ON 0x5A
+#define EC_FN_LOCK_OFF 0x55
+#define EC_FN_LOCK_READ 0x6B
+#define EC_FN_LOCK_WRITE 0x6C
+
+#define EC_EVENT_LID 0x81
+
+#define EC_LID_STATE 0x80
+#define EC_LID_OPEN BIT(1)
+
+#define EC_TEMP_REG 0x61
+
+#define EC_STANDBY_REG 0xB2
+#define EC_STANDBY_ENTER 0xDB
+#define EC_STANDBY_EXIT 0xEB
+
+enum gaokun_ec_smart_charge_cmd {
+ SMART_CHARGE_DATA_WRITE = 0xE3,
+ SMART_CHARGE_DATA_READ,
+ SMART_CHARGE_ENABLE_WRITE,
+ SMART_CHARGE_ENABLE_READ,
+};
+
+enum gaokun_ec_ucsi_cmd {
+ UCSI_REG_WRITE = 0xD2,
+ UCSI_REG_READ,
+ UCSI_DATA_WRITE,
+ UCSI_DATA_READ,
+};
+
+#define UCSI_REG_SIZE 7
+
+/*
+ * For tx, command sequences are arranged as
+ * {master_cmd, slave_cmd, data_len, data_seq}
+ */
+#define REQ_HDR_SIZE 3
+#define INPUT_SIZE_OFFSET 2
+#define REQ_LEN(req) (REQ_HDR_SIZE + (req)[INPUT_SIZE_OFFSET])
+
+/*
+ * For rx, data sequences are arranged as
+ * {status, data_len(unreliable), data_seq}
+ */
+#define RESP_HDR_SIZE 2
+
+#define MKREQ(REG0, REG1, SIZE, ...) \
+{ \
+ REG0, REG1, SIZE, \
+ /* ## will remove comma when SIZE is 0 */ \
+ ## __VA_ARGS__, \
+ /* make sure len(pkt[3:]) >= SIZE */ \
+ [3 + (SIZE)] = 0, \
+}
+
+#define MKRESP(SIZE) \
+{ \
+ [RESP_HDR_SIZE + (SIZE) - 1] = 0, \
+}
+
+/* Possible size 1, 4, 20, 24. Most of the time, the size is 1. */
+static inline void refill_req(u8 *dest, const u8 *src, size_t size)
+{
+ memcpy(dest + REQ_HDR_SIZE, src, size);
+}
+
+static inline void refill_req_byte(u8 *dest, const u8 *src)
+{
+ dest[REQ_HDR_SIZE] = *src;
+}
+
+/* Possible size 1, 2, 4, 7, 20. Most of the time, the size is 1. */
+static inline void extr_resp(u8 *dest, const u8 *src, size_t size)
+{
+ memcpy(dest, src + RESP_HDR_SIZE, size);
+}
+
+static inline void extr_resp_byte(u8 *dest, const u8 *src)
+{
+ *dest = src[RESP_HDR_SIZE];
+}
+
+static inline void *extr_resp_shallow(const u8 *src)
+{
+ return (void *)(src + RESP_HDR_SIZE);
+}
+
+struct gaokun_ec {
+ struct i2c_client *client;
+ struct mutex lock; /* EC transaction lock */
+ struct blocking_notifier_head notifier_list;
+ struct device *hwmon_dev;
+ struct input_dev *idev;
+ bool suspended;
+};
+
+static int gaokun_ec_request(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp)
+{
+ struct i2c_client *client = ec->client;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = client->flags,
+ .len = REQ_LEN(req),
+ .buf = (void *)req,
+ }, {
+ .addr = client->addr,
+ .flags = client->flags | I2C_M_RD,
+ .len = resp_len,
+ .buf = resp,
+ },
+ };
+ int ret;
+
+ guard(mutex)(&ec->lock);
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "I2C transfer error %d\n", ret);
+ goto out_after_break;
+ }
+
+ ret = *resp;
+ if (ret)
+ dev_err(&client->dev, "EC transaction error %d\n", ret);
+
+out_after_break:
+ usleep_range(2000, 2500); /* have a break, ACPI did this */
+
+ return ret;
+}
+
+/* -------------------------------------------------------------------------- */
+/* Common API */
+
+/**
+ * gaokun_ec_read - Read from EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ * @resp_len: The size to read
+ * @resp: The buffer to store response sequence
+ *
+ * This function is used to read data after writing a magic sequence to EC.
+ * All EC operations depend on this function.
+ *
+ * Huawei uses magic sequences everywhere to complete various functions, all
+ * these sequences are passed to ECCD(a ACPI method which is quiet similar
+ * to gaokun_ec_request), there is no good abstraction to generalize these
+ * sequences, so just wrap it for now. Almost all magic sequences are kept
+ * in this file.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_read(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp)
+{
+ return gaokun_ec_request(ec, req, resp_len, resp);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_read);
+
+/**
+ * gaokun_ec_write - Write to EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ *
+ * This function has no big difference from gaokun_ec_read. When caller care
+ * only write status and no actual data are returned, then use it.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_write(struct gaokun_ec *ec, const u8 *req)
+{
+ u8 ec_resp[] = MKRESP(0);
+
+ return gaokun_ec_request(ec, req, sizeof(ec_resp), ec_resp);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_write);
+
+int gaokun_ec_read_byte(struct gaokun_ec *ec, const u8 *req, u8 *byte)
+{
+ int ret;
+ u8 ec_resp[] = MKRESP(sizeof(*byte));
+
+ ret = gaokun_ec_read(ec, req, sizeof(ec_resp), ec_resp);
+ extr_resp_byte(byte, ec_resp);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_read_byte);
+
+/**
+ * gaokun_ec_register_notify - Register a notifier callback for EC events.
+ * @ec: The gaokun_ec structure
+ * @nb: Notifier block pointer to register
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_register_notify(struct gaokun_ec *ec, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_register_notify);
+
+/**
+ * gaokun_ec_unregister_notify - Unregister notifier callback for EC events.
+ * @ec: The gaokun_ec structure
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * gaokun_ec_register_notify().
+ */
+void gaokun_ec_unregister_notify(struct gaokun_ec *ec, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&ec->notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_unregister_notify);
+
+/* -------------------------------------------------------------------------- */
+/* API for PSY */
+
+/**
+ * gaokun_ec_psy_multi_read - Read contiguous registers
+ * @ec: The gaokun_ec structure
+ * @reg: The start register
+ * @resp_len: The number of registers to be read
+ * @resp: The buffer to store response sequence
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_multi_read(struct gaokun_ec *ec, u8 reg,
+ size_t resp_len, u8 *resp)
+{
+ u8 ec_req[] = MKREQ(0x02, EC_READ, 1, 0);
+ u8 ec_resp[] = MKRESP(1);
+ int i, ret;
+
+ for (i = 0; i < resp_len; ++i, reg++) {
+ refill_req_byte(ec_req, &reg);
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+ extr_resp_byte(&resp[i], ec_resp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_multi_read);
+
+/* Smart charge */
+
+/**
+ * gaokun_ec_psy_get_smart_charge - Get smart charge data from EC
+ * @ec: The gaokun_ec structure
+ * @resp: The buffer to store response sequence (mode, delay, start, end)
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_get_smart_charge(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_SMART_CHARGE_DATA_SIZE])
+{
+ /* GBCM */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_DATA_READ, 0);
+ u8 ec_resp[] = MKRESP(GAOKUN_SMART_CHARGE_DATA_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp(resp, ec_resp, GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_get_smart_charge);
+
+static inline bool validate_battery_threshold_range(u8 start, u8 end)
+{
+ return end != 0 && start <= end && end <= 100;
+}
+
+/**
+ * gaokun_ec_psy_set_smart_charge - Set smart charge data
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request (mode, delay, start, end)
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_set_smart_charge(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_SMART_CHARGE_DATA_SIZE])
+{
+ /* SBCM */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_DATA_WRITE,
+ GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ if (!validate_battery_threshold_range(req[2], req[3]))
+ return -EINVAL;
+
+ refill_req(ec_req, req, GAOKUN_SMART_CHARGE_DATA_SIZE);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_set_smart_charge);
+
+/* Smart charge enable */
+
+/**
+ * gaokun_ec_psy_get_smart_charge_enable - Get smart charge state
+ * @ec: The gaokun_ec structure
+ * @on: The state
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_get_smart_charge_enable(struct gaokun_ec *ec, bool *on)
+{
+ /* GBAC */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_ENABLE_READ, 0);
+ u8 state;
+ int ret;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &state);
+ if (ret)
+ return ret;
+
+ *on = !!state;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_get_smart_charge_enable);
+
+/**
+ * gaokun_ec_psy_set_smart_charge_enable - Set smart charge state
+ * @ec: The gaokun_ec structure
+ * @on: The state
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_psy_set_smart_charge_enable(struct gaokun_ec *ec, bool on)
+{
+ /* SBAC */
+ u8 ec_req[] = MKREQ(0x02, SMART_CHARGE_ENABLE_WRITE, 1, on);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_psy_set_smart_charge_enable);
+
+/* -------------------------------------------------------------------------- */
+/* API for UCSI */
+
+/**
+ * gaokun_ec_ucsi_read - Read UCSI data from EC
+ * @ec: The gaokun_ec structure
+ * @resp: The buffer to store response sequence
+ *
+ * Read CCI and MSGI (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_read(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_UCSI_READ_SIZE])
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_DATA_READ, 0);
+ u8 ec_resp[] = MKRESP(GAOKUN_UCSI_READ_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp(resp, ec_resp, GAOKUN_UCSI_READ_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_read);
+
+/**
+ * gaokun_ec_ucsi_write - Write UCSI data to EC
+ * @ec: The gaokun_ec structure
+ * @req: The sequence to request
+ *
+ * Write CTRL and MSGO (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_write(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_UCSI_WRITE_SIZE])
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_DATA_WRITE, GAOKUN_UCSI_WRITE_SIZE);
+
+ refill_req(ec_req, req, GAOKUN_UCSI_WRITE_SIZE);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_write);
+
+/**
+ * gaokun_ec_ucsi_get_reg - Get UCSI register from EC
+ * @ec: The gaokun_ec structure
+ * @ureg: The gaokun ucsi register
+ *
+ * Get UCSI register data (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_get_reg(struct gaokun_ec *ec, struct gaokun_ucsi_reg *ureg)
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_REG_READ, 0);
+ u8 ec_resp[] = MKRESP(UCSI_REG_SIZE);
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ extr_resp((u8 *)ureg, ec_resp, UCSI_REG_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_get_reg);
+
+/**
+ * gaokun_ec_ucsi_pan_ack - Ack pin assignment notifications from EC
+ * @ec: The gaokun_ec structure
+ * @port_id: The port id receiving and handling the notifications
+ *
+ * Ack pin assignment notifications (used by UCSI subdriver).
+ *
+ * Return: 0 on success or negative error code.
+ */
+int gaokun_ec_ucsi_pan_ack(struct gaokun_ec *ec, int port_id)
+{
+ u8 ec_req[] = MKREQ(0x03, UCSI_REG_WRITE, 1);
+ u8 data = 1 << port_id;
+
+ if (port_id == GAOKUN_UCSI_NO_PORT_UPDATE)
+ data = 0;
+
+ refill_req_byte(ec_req, &data);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+EXPORT_SYMBOL_GPL(gaokun_ec_ucsi_pan_ack);
+
+/* -------------------------------------------------------------------------- */
+/* EC Sysfs */
+
+/* Fn lock */
+static int gaokun_ec_get_fn_lock(struct gaokun_ec *ec, bool *on)
+{
+ /* GFRS */
+ u8 ec_req[] = MKREQ(0x02, EC_FN_LOCK_READ, 0);
+ int ret;
+ u8 state;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &state);
+ if (ret)
+ return ret;
+
+ if (state == EC_FN_LOCK_ON)
+ *on = true;
+ else if (state == EC_FN_LOCK_OFF)
+ *on = false;
+ else
+ return -EIO;
+
+ return 0;
+}
+
+static int gaokun_ec_set_fn_lock(struct gaokun_ec *ec, bool on)
+{
+ /* SFRS */
+ u8 ec_req[] = MKREQ(0x02, EC_FN_LOCK_WRITE, 1,
+ on ? EC_FN_LOCK_ON : EC_FN_LOCK_OFF);
+
+ return gaokun_ec_write(ec, ec_req);
+}
+
+static ssize_t fn_lock_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ bool on;
+ int ret;
+
+ ret = gaokun_ec_get_fn_lock(ec, &on);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", on);
+}
+
+static ssize_t fn_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ bool on;
+ int ret;
+
+ if (kstrtobool(buf, &on))
+ return -EINVAL;
+
+ ret = gaokun_ec_set_fn_lock(ec, on);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(fn_lock);
+
+static struct attribute *gaokun_ec_attrs[] = {
+ &dev_attr_fn_lock.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(gaokun_ec);
+
+/* -------------------------------------------------------------------------- */
+/* Thermal Zone HwMon */
+
+/* Range from 0 to 0x2C, partially valid */
+static const u8 temp_reg[] = {
+ 0x05, 0x07, 0x08, 0x0E, 0x0F, 0x12, 0x15, 0x1E,
+ 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
+ 0x27, 0x28, 0x29, 0x2A
+};
+
+static int gaokun_ec_get_temp(struct gaokun_ec *ec, u8 idx, long *temp)
+{
+ /* GTMP */
+ u8 ec_req[] = MKREQ(0x02, EC_TEMP_REG, 1, temp_reg[idx]);
+ u8 ec_resp[] = MKRESP(sizeof(__le16));
+ __le16 *tmp;
+ int ret;
+
+ ret = gaokun_ec_read(ec, ec_req, sizeof(ec_resp), ec_resp);
+ if (ret)
+ return ret;
+
+ tmp = (__le16 *)extr_resp_shallow(ec_resp);
+ *temp = le16_to_cpu(*tmp) * 100; /* convert to HwMon's unit */
+
+ return 0;
+}
+
+static umode_t
+gaokun_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ return type == hwmon_temp ? 0444 : 0;
+}
+
+static int
+gaokun_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+
+ if (type == hwmon_temp)
+ return gaokun_ec_get_temp(ec, channel, val);
+
+ return -EINVAL;
+}
+
+static const struct hwmon_ops gaokun_ec_hwmon_ops = {
+ .is_visible = gaokun_ec_hwmon_is_visible,
+ .read = gaokun_ec_hwmon_read,
+};
+
+static u32 gaokun_ec_temp_config[] = {
+ [0 ... ARRAY_SIZE(temp_reg) - 1] = HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info gaokun_ec_temp = {
+ .type = hwmon_temp,
+ .config = gaokun_ec_temp_config,
+};
+
+static const struct hwmon_channel_info * const gaokun_ec_hwmon_info[] = {
+ &gaokun_ec_temp,
+ NULL
+};
+
+static const struct hwmon_chip_info gaokun_ec_hwmon_chip_info = {
+ .ops = &gaokun_ec_hwmon_ops,
+ .info = gaokun_ec_hwmon_info,
+};
+
+/* -------------------------------------------------------------------------- */
+/* Modern Standby */
+
+static int gaokun_ec_suspend(struct device *dev)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ u8 ec_req[] = MKREQ(0x02, EC_STANDBY_REG, 1, EC_STANDBY_ENTER);
+ int ret;
+
+ if (ec->suspended)
+ return 0;
+
+ ret = gaokun_ec_write(ec, ec_req);
+ if (ret)
+ return ret;
+
+ ec->suspended = true;
+
+ return 0;
+}
+
+static int gaokun_ec_resume(struct device *dev)
+{
+ struct gaokun_ec *ec = dev_get_drvdata(dev);
+ u8 ec_req[] = MKREQ(0x02, EC_STANDBY_REG, 1, EC_STANDBY_EXIT);
+ int ret;
+ int i;
+
+ if (!ec->suspended)
+ return 0;
+
+ for (i = 0; i < 3; ++i) {
+ ret = gaokun_ec_write(ec, ec_req);
+ if (ret == 0)
+ break;
+
+ msleep(100); /* EC need time to resume */
+ };
+
+ ec->suspended = false;
+
+ return 0;
+}
+
+static void gaokun_aux_release(struct device *dev)
+{
+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
+
+ kfree(adev);
+}
+
+static void gaokun_aux_remove(void *data)
+{
+ struct auxiliary_device *adev = data;
+
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
+
+static int gaokun_aux_init(struct device *parent, const char *name,
+ struct gaokun_ec *ec)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->name = name;
+ adev->id = 0;
+ adev->dev.parent = parent;
+ adev->dev.release = gaokun_aux_release;
+ adev->dev.platform_data = ec;
+ /* Allow aux devices to access parent's DT nodes directly */
+ device_set_of_node_from_dev(&adev->dev, parent);
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ kfree(adev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(parent, gaokun_aux_remove, adev);
+}
+
+/* -------------------------------------------------------------------------- */
+/* EC */
+
+static irqreturn_t gaokun_ec_irq_handler(int irq, void *data)
+{
+ struct gaokun_ec *ec = data;
+ u8 ec_req[] = MKREQ(EC_EVENT, EC_QUERY, 0);
+ u8 status, id;
+ int ret;
+
+ ret = gaokun_ec_read_byte(ec, ec_req, &id);
+ if (ret)
+ return IRQ_HANDLED;
+
+ switch (id) {
+ case 0x0: /* No event */
+ break;
+
+ case EC_EVENT_LID:
+ gaokun_ec_psy_read_byte(ec, EC_LID_STATE, &status);
+ status &= EC_LID_OPEN;
+ input_report_switch(ec->idev, SW_LID, !status);
+ input_sync(ec->idev);
+ break;
+
+ default:
+ blocking_notifier_call_chain(&ec->notifier_list, id, ec);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int gaokun_ec_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct gaokun_ec *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &ec->lock);
+ if (ret)
+ return ret;
+
+ ec->client = client;
+ i2c_set_clientdata(client, ec);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec->notifier_list);
+
+ /* Lid switch */
+ ec->idev = devm_input_allocate_device(dev);
+ if (!ec->idev)
+ return -ENOMEM;
+
+ ec->idev->name = "LID";
+ ec->idev->phys = "gaokun-ec/input0";
+ input_set_capability(ec->idev, EV_SW, SW_LID);
+
+ ret = input_register_device(ec->idev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register input device\n");
+
+ ret = gaokun_aux_init(dev, GAOKUN_DEV_PSY, ec);
+ if (ret)
+ return ret;
+
+ ret = gaokun_aux_init(dev, GAOKUN_DEV_UCSI, ec);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ gaokun_ec_irq_handler, IRQF_ONESHOT,
+ dev_name(dev), ec);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ\n");
+
+ ec->hwmon_dev = devm_hwmon_device_register_with_info(dev, "gaokun_ec_hwmon",
+ ec, &gaokun_ec_hwmon_chip_info, NULL);
+ if (IS_ERR(ec->hwmon_dev))
+ return dev_err_probe(dev, PTR_ERR(ec->hwmon_dev),
+ "Failed to register hwmon device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id gaokun_ec_id[] = {
+ { "gaokun-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, gaokun_ec_id);
+
+static const struct of_device_id gaokun_ec_of_match[] = {
+ { .compatible = "huawei,gaokun3-ec", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gaokun_ec_of_match);
+
+static const struct dev_pm_ops gaokun_ec_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(gaokun_ec_suspend, gaokun_ec_resume)
+};
+
+static struct i2c_driver gaokun_ec_driver = {
+ .driver = {
+ .name = "gaokun-ec",
+ .of_match_table = gaokun_ec_of_match,
+ .pm = &gaokun_ec_pm_ops,
+ .dev_groups = gaokun_ec_groups,
+ },
+ .probe = gaokun_ec_probe,
+ .id_table = gaokun_ec_id,
+};
+module_i2c_driver(gaokun_ec_driver);
+
+MODULE_DESCRIPTION("HUAWEI Matebook E Go EC driver");
+MODULE_AUTHOR("Pengyu Luo <mitltlatltl@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 5a2f1d98b350..be319949b941 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -30,6 +30,7 @@
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
+#define FRMW_ACPI_DRV_NAME "FRMWC004"
/* True if ACPI device is present */
static bool cros_ec_lpc_acpi_device_found;
@@ -514,7 +515,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
acpi_status status;
struct cros_ec_device *ec_dev;
struct cros_ec_lpc *ec_lpc;
- struct lpc_driver_data *driver_data;
+ const struct lpc_driver_data *driver_data;
u8 buf[2] = {};
int irq, ret;
u32 quirks;
@@ -526,6 +527,9 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
ec_lpc->mmio_memory_base = EC_LPC_ADDR_MEMMAP;
driver_data = platform_get_drvdata(pdev);
+ if (!driver_data)
+ driver_data = acpi_device_get_match_data(dev);
+
if (driver_data) {
quirks = driver_data->quirks;
@@ -696,12 +700,6 @@ static void cros_ec_lpc_remove(struct platform_device *pdev)
cros_ec_unregister(ec_dev);
}
-static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
- { ACPI_DRV_NAME, 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
-
static const struct lpc_driver_data framework_laptop_npcx_lpc_driver_data __initconst = {
.quirks = CROS_EC_LPC_QUIRK_REMAP_MEMORY,
.quirk_mmio_memory_base = 0xE00,
@@ -713,6 +711,13 @@ static const struct lpc_driver_data framework_laptop_mec_lpc_driver_data __initc
.quirk_aml_mutex_name = "ECMT",
};
+static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { FRMW_ACPI_DRV_NAME, (kernel_ulong_t)&framework_laptop_npcx_lpc_driver_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids);
+
static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
{
/*
@@ -866,7 +871,8 @@ static int __init cros_ec_lpc_init(void)
int ret;
const struct dmi_system_id *dmi_match;
- cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME);
+ cros_ec_lpc_acpi_device_found = !!cros_ec_lpc_get_device(ACPI_DRV_NAME) ||
+ !!cros_ec_lpc_get_device(FRMW_ACPI_DRV_NAME);
dmi_match = dmi_first_match(cros_ec_lpc_dmi_table);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index bc1a5ba09528..f22e9523da3e 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -296,18 +296,81 @@ static ssize_t kb_wake_angle_store(struct device *dev,
return count;
}
+static ssize_t usbpdmuxinfo_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ ssize_t count = 0;
+ struct ec_response_usb_pd_ports resp_pd_ports;
+ int ret;
+ int i;
+
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp_pd_ports, sizeof(resp_pd_ports));
+ if (ret < 0)
+ return -EIO;
+
+ for (i = 0; i < resp_pd_ports.num_ports; i++) {
+ struct ec_response_usb_pd_mux_info resp_mux;
+ struct ec_params_usb_pd_mux_info req = {
+ .port = i,
+ };
+
+ ret = cros_ec_cmd(ec->ec_dev, 0, EC_CMD_USB_PD_MUX_INFO,
+ &req, sizeof(req), &resp_mux, sizeof(resp_mux));
+
+ if (ret >= 0) {
+ count += sysfs_emit_at(buf, count, "Port %d:", i);
+ count += sysfs_emit_at(buf, count, " USB=%d",
+ !!(resp_mux.flags & USB_PD_MUX_USB_ENABLED));
+ count += sysfs_emit_at(buf, count, " DP=%d",
+ !!(resp_mux.flags & USB_PD_MUX_DP_ENABLED));
+ count += sysfs_emit_at(buf, count, " POLARITY=%s",
+ (resp_mux.flags & USB_PD_MUX_POLARITY_INVERTED) ?
+ "INVERTED" : "NORMAL");
+ count += sysfs_emit_at(buf, count, " HPD_IRQ=%d",
+ !!(resp_mux.flags & USB_PD_MUX_HPD_IRQ));
+ count += sysfs_emit_at(buf, count, " HPD_LVL=%d",
+ !!(resp_mux.flags & USB_PD_MUX_HPD_LVL));
+ count += sysfs_emit_at(buf, count, " SAFE=%d",
+ !!(resp_mux.flags & USB_PD_MUX_SAFE_MODE));
+ count += sysfs_emit_at(buf, count, " TBT=%d",
+ !!(resp_mux.flags & USB_PD_MUX_TBT_COMPAT_ENABLED));
+ count += sysfs_emit_at(buf, count, " USB4=%d\n",
+ !!(resp_mux.flags & USB_PD_MUX_USB4_ENABLED));
+ }
+ }
+
+ return count ? : -EIO;
+}
+
+static ssize_t ap_mode_entry_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cros_ec_dev *ec = to_cros_ec_dev(dev);
+ const bool ap_driven_altmode = cros_ec_check_features(
+ ec, EC_FEATURE_TYPEC_REQUIRE_AP_MODE_ENTRY);
+
+ return sysfs_emit(buf, "%s\n", ap_driven_altmode ? "yes" : "no");
+}
+
/* Module initialization */
static DEVICE_ATTR_RW(reboot);
static DEVICE_ATTR_RO(version);
static DEVICE_ATTR_RO(flashinfo);
static DEVICE_ATTR_RW(kb_wake_angle);
+static DEVICE_ATTR_RO(usbpdmuxinfo);
+static DEVICE_ATTR_RO(ap_mode_entry);
static struct attribute *__ec_attrs[] = {
&dev_attr_kb_wake_angle.attr,
&dev_attr_reboot.attr,
&dev_attr_version.attr,
&dev_attr_flashinfo.attr,
+ &dev_attr_usbpdmuxinfo.attr,
+ &dev_attr_ap_mode_entry.attr,
NULL,
};
@@ -320,6 +383,14 @@ static umode_t cros_ec_ctrl_visible(struct kobject *kobj,
if (a == &dev_attr_kb_wake_angle.attr && !ec->has_kb_wake_angle)
return 0;
+ if (a == &dev_attr_usbpdmuxinfo.attr ||
+ a == &dev_attr_ap_mode_entry.attr) {
+ struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
+
+ if (strcmp(ec_platform->ec_name, CROS_EC_DEV_NAME))
+ return 0;
+ }
+
return a->mode;
}
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 6ee182101bc9..d2228720991f 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -42,6 +42,24 @@ static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
#endif
}
+static int cros_typec_enter_usb_mode(struct typec_port *tc_port, enum usb_mode mode)
+{
+ struct cros_typec_port *port = typec_get_drvdata(tc_port);
+ struct ec_params_typec_control req = {
+ .port = port->port_num,
+ .command = (mode == USB_MODE_USB4) ?
+ TYPEC_CONTROL_COMMAND_ENTER_MODE : TYPEC_CONTROL_COMMAND_EXIT_MODES,
+ .mode_to_enter = CROS_EC_ALTMODE_USB4
+ };
+
+ return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL,
+ &req, sizeof(req), NULL, 0);
+}
+
+static const struct typec_operations cros_typec_usb_mode_ops = {
+ .enter_usb_mode = cros_typec_enter_usb_mode
+};
+
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
@@ -84,6 +102,13 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
cap->prefer_role = ret;
}
+ if (fwnode_property_present(fwnode, "usb2-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB2;
+ if (fwnode_property_present(fwnode, "usb3-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB3;
+ if (fwnode_property_present(fwnode, "usb4-port"))
+ cap->usb_capability |= USB_CAPABILITY_USB4;
+
cros_typec_role_switch_quirk(fwnode);
cap->fwnode = fwnode;
@@ -379,6 +404,9 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
if (ret < 0)
goto unregister_ports;
+ cap->driver_data = cros_port;
+ cap->ops = &cros_typec_usb_mode_ops;
+
cros_port->port = typec_register_port(dev, cap);
if (IS_ERR(cros_port->port)) {
ret = PTR_ERR(cros_port->port);
diff --git a/drivers/platform/chrome/cros_ec_typec.h b/drivers/platform/chrome/cros_ec_typec.h
index 9fd5342bb0ad..f9c31f04c102 100644
--- a/drivers/platform/chrome/cros_ec_typec.h
+++ b/drivers/platform/chrome/cros_ec_typec.h
@@ -18,6 +18,7 @@
enum {
CROS_EC_ALTMODE_DP = 0,
CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_USB4,
CROS_EC_ALTMODE_MAX,
};
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index f7dfa0e785fd..aa760f064a17 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -14,6 +14,19 @@ menuconfig MELLANOX_PLATFORM
if MELLANOX_PLATFORM
+config MLX_PLATFORM
+ tristate "Mellanox Technologies platform support"
+ depends on ACPI && I2C && PCI
+ select REGMAP
+ help
+ This option enables system support for the Mellanox Technologies
+ platform. The Mellanox systems provide data center networking
+ solutions based on Virtual Protocol Interconnect (VPI) technology
+ enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
+ connection.
+
+ If you have a Mellanox system, say Y or M here.
+
config MLXREG_HOTPLUG
tristate "Mellanox platform hotplug driver support"
depends on HWMON
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 04703c0416b1..ba56485cbe8c 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -3,6 +3,7 @@
# Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers
#
+obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_MLXBF_BOOTCTL) += mlxbf-bootctl.o
obj-$(CONFIG_MLXBF_PMC) += mlxbf-pmc.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/mellanox/mlx-platform.c
index 9c7f30a47f1f..08b0430a2899 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/mellanox/mlx-platform.c
@@ -145,7 +145,7 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
-#define MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET 0xd9
+#define MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET 0xd9
#define MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET 0xdb
#define MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET 0xda
#define MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET 0xdc
@@ -2247,7 +2247,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
-/* Platform hotplug for NVLink blade systems family data */
+/* Platform hotplug for NVLink blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
{
.label = "global_wp_grant",
@@ -2279,7 +2279,7 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_chassis_blade_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
-/* Platform hotplug for switch systems family data */
+/* Platform hotplug for switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_erot_ap_items_data[] = {
{
.label = "erot1_ap",
@@ -2387,7 +2387,7 @@ static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_pwr_events_
.user_handler = mlxplat_mlxcpld_l1_switch_pwr_events_handler,
};
-/* Platform hotplug for l1 switch systems family data */
+/* Platform hotplug for l1 switch systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_pwr_events_items_data[] = {
{
.label = "power_button",
@@ -4401,7 +4401,7 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
-/* Platform register access for chassis blade systems family data */
+/* Platform register access for chassis blade systems family data */
static struct mlxreg_core_data mlxplat_mlxcpld_chassis_blade_regs_io_data[] = {
{
.label = "cpld1_version",
@@ -5050,7 +5050,6 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -5186,7 +5185,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -5343,7 +5342,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
- case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD6_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
@@ -6573,7 +6572,7 @@ static int mlxplat_probe(struct platform_device *pdev)
}
/* Set default registers. */
- for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
+ for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
err = regmap_write(priv->regmap,
mlxplat_regmap_config->reg_defaults[i].reg,
mlxplat_regmap_config->reg_defaults[i].def);
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index 9cae07348d5e..b95dcb8d483c 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -91,6 +91,7 @@ static const char * const mlxbf_rsh_log_level[] = {
static DEFINE_MUTEX(icm_ops_lock);
static DEFINE_MUTEX(os_up_lock);
static DEFINE_MUTEX(mfg_ops_lock);
+static DEFINE_MUTEX(rtc_ops_lock);
/*
* Objects are stored within the MFG partition per type.
@@ -489,6 +490,23 @@ static ssize_t large_icm_store(struct device *dev,
return res.a0 ? -EPERM : count;
}
+static ssize_t rtc_battery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct arm_smccc_res res;
+
+ mutex_lock(&rtc_ops_lock);
+ arm_smccc_smc(MLNX_HANDLE_GET_RTC_LOW_BATT, 0, 0, 0, 0,
+ 0, 0, 0, &res);
+ mutex_unlock(&rtc_ops_lock);
+
+ if (res.a0)
+ return -EPERM;
+
+ return sysfs_emit(buf, "0x%lx\n", res.a1);
+}
+
static ssize_t os_up_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -906,6 +924,7 @@ static DEVICE_ATTR_RW(sn);
static DEVICE_ATTR_RW(uuid);
static DEVICE_ATTR_RW(rev);
static DEVICE_ATTR_WO(mfg_lock);
+static DEVICE_ATTR_RO(rtc_battery);
static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_post_reset_wdog.attr,
@@ -925,6 +944,7 @@ static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_uuid.attr,
&dev_attr_rev.attr,
&dev_attr_mfg_lock.attr,
+ &dev_attr_rtc_battery.attr,
NULL
};
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
index 1299750a8661..90bbbdc65879 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.h
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -103,6 +103,11 @@
*/
#define MLNX_HANDLE_OS_UP 0x82000014
+/*
+ * SMC function ID to get and clear the RTC low voltage bit
+ */
+#define MLNX_HANDLE_GET_RTC_LOW_BATT 0x82000023
+
/* SMC function IDs for SiP Service queries */
#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d4f32ad66530..a594d5fcfcfd 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -371,7 +371,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
NULL,
};
-/* Devices for Surface Pro 9 (Intel/x86) and 10 */
+/* Devices for Surface Pro 9, 10 and 11 (Intel/x86) */
static const struct software_node *ssam_node_group_sp9[] = {
&ssam_node_root,
&ssam_node_hub_kip,
@@ -430,6 +430,9 @@ static const struct acpi_device_id ssam_platform_hub_acpi_match[] = {
/* Surface Pro 10 */
{ "MSHW0510", (unsigned long)ssam_node_group_sp9 },
+ /* Surface Pro 11 */
+ { "MSHW0583", (unsigned long)ssam_node_group_sp9 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0258dd879d64..43407e76476b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -475,6 +475,17 @@ config IDEAPAD_LAPTOP
This is a driver for Lenovo IdeaPad netbooks contains drivers for
rfkill switch, hotkey, fan control and backlight control.
+config LENOVO_WMI_HOTKEY_UTILITIES
+ tristate "Lenovo Hotkey Utility WMI extras driver"
+ depends on ACPI_WMI
+ select NEW_LEDS
+ select LEDS_CLASS
+ imply IDEAPAD_LAPTOP
+ help
+ This driver provides WMI support for Lenovo customized hotkeys function,
+ such as LED control for audio/mic mute event for Ideapad, YOGA, XiaoXin,
+ Gaming, ThinkBook and so on.
+
config LENOVO_YMC
tristate "Lenovo Yoga Tablet Mode Control"
depends on ACPI_WMI
@@ -778,6 +789,23 @@ config BARCO_P50_GPIO
To compile this driver as a module, choose M here: the module
will be called barco-p50-gpio.
+config SAMSUNG_GALAXYBOOK
+ tristate "Samsung Galaxy Book driver"
+ depends on ACPI
+ depends on ACPI_BATTERY
+ depends on INPUT
+ depends on LEDS_CLASS
+ depends on SERIO_I8042
+ select ACPI_PLATFORM_PROFILE
+ select FW_ATTR_CLASS
+ help
+ This is a driver for Samsung Galaxy Book series notebooks. It adds
+ support for the keyboard backlight control, performance mode control,
+ function keys, and various firmware attributes.
+
+ For more information about this driver, see
+ <file:Documentation/admin-guide/laptops/samsung-galaxybook.rst>.
+
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
depends on RFKILL || RFKILL = n
@@ -1012,19 +1040,6 @@ config SERIAL_MULTI_INSTANTIATE
To compile this driver as a module, choose M here: the module
will be called serial-multi-instantiate.
-config MLX_PLATFORM
- tristate "Mellanox Technologies platform support"
- depends on ACPI && I2C && PCI
- select REGMAP
- help
- This option enables system support for the Mellanox Technologies
- platform. The Mellanox systems provide data center networking
- solutions based on Virtual Protocol Interconnect (VPI) technology
- enable seamless connectivity to 56/100Gb/s InfiniBand or 10/40/56GbE
- connection.
-
- If you have a Mellanox system, say Y or M here.
-
config TOUCHSCREEN_DMI
bool "DMI based touchscreen configuration info"
depends on ACPI && DMI && I2C=y && TOUCHSCREEN_SILEAD
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index e1b142947067..650dfbebb6c8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o
# IBM Thinkpad and Lenovo
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
+obj-$(CONFIG_LENOVO_WMI_HOTKEY_UTILITIES) += lenovo-wmi-hotkey-utilities.o
obj-$(CONFIG_LENOVO_YMC) += lenovo-ymc.o
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
@@ -95,8 +96,9 @@ obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
obj-$(CONFIG_BARCO_P50_GPIO) += barco-p50-gpio.o
# Samsung
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
-obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
+obj-$(CONFIG_SAMSUNG_GALAXYBOOK) += samsung-galaxybook.o
+obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
+obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
# Toshiba
obj-$(CONFIG_TOSHIBA_BT_RFKILL) += toshiba_bluetooth.o
@@ -122,7 +124,6 @@ obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
-obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets/
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index 56f62fc9c97b..c6c40bdcbded 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -5,7 +5,7 @@
#
obj-$(CONFIG_AMD_3D_VCACHE) += amd_3d_vcache.o
-amd_3d_vcache-objs := x3d_vcache.o
+amd_3d_vcache-y := x3d_vcache.o
obj-$(CONFIG_AMD_PMC) += pmc/
obj-$(CONFIG_AMD_HSMP) += hsmp/
obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/hsmp/Kconfig b/drivers/platform/x86/amd/hsmp/Kconfig
index 7d10d4462a45..d6f7a62d55b5 100644
--- a/drivers/platform/x86/amd/hsmp/Kconfig
+++ b/drivers/platform/x86/amd/hsmp/Kconfig
@@ -7,7 +7,7 @@ config AMD_HSMP
tristate
menu "AMD HSMP Driver"
- depends on AMD_NB || COMPILE_TEST
+ depends on AMD_NODE || COMPILE_TEST
config AMD_HSMP_ACPI
tristate "AMD HSMP ACPI device driver"
diff --git a/drivers/platform/x86/amd/hsmp/Makefile b/drivers/platform/x86/amd/hsmp/Makefile
index 3175d8885e87..0759bbcd13f6 100644
--- a/drivers/platform/x86/amd/hsmp/Makefile
+++ b/drivers/platform/x86/amd/hsmp/Makefile
@@ -5,8 +5,8 @@
#
obj-$(CONFIG_AMD_HSMP) += hsmp_common.o
-hsmp_common-objs := hsmp.o
+hsmp_common-y := hsmp.o
obj-$(CONFIG_AMD_HSMP_PLAT) += amd_hsmp.o
-amd_hsmp-objs := plat.o
+amd_hsmp-y := plat.o
obj-$(CONFIG_AMD_HSMP_ACPI) += hsmp_acpi.o
-hsmp_acpi-objs := acpi.o
+hsmp_acpi-y := acpi.o
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 444b43be35a2..c1eccb3c80c5 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/device.h>
@@ -24,6 +23,8 @@
#include <uapi/asm-generic/errno-base.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -321,8 +322,8 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return -ENODEV;
hsmp_pdev->sock = devm_kcalloc(&pdev->dev, hsmp_pdev->num_sockets,
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 03164e30b3a5..a3ac09a90de4 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -8,7 +8,6 @@
*/
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.h b/drivers/platform/x86/amd/hsmp/hsmp.h
index e852f0a947e4..af8b21f821d6 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.h
+++ b/drivers/platform/x86/amd/hsmp/hsmp.h
@@ -21,8 +21,6 @@
#define HSMP_ATTR_GRP_NAME_SIZE 10
-#define MAX_AMD_SOCKETS 8
-
#define HSMP_CDEV_NAME "hsmp_cdev"
#define HSMP_DEVNODE_NAME "hsmp"
@@ -41,7 +39,6 @@ struct hsmp_socket {
void __iomem *virt_base_addr;
struct semaphore hsmp_sem;
char name[HSMP_ATTR_GRP_NAME_SIZE];
- struct pci_dev *root;
struct device *dev;
u16 sock_ind;
int (*amd_hsmp_rdwr)(struct hsmp_socket *sock, u32 off, u32 *val, bool rw);
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index 02ca85762b68..b9782a078dbd 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -10,14 +10,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/amd_hsmp.h>
-#include <asm/amd_nb.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <asm/amd_node.h>
+
#include "hsmp.h"
#define DRIVER_NAME "amd_hsmp"
@@ -34,28 +36,12 @@
#define SMN_HSMP_MSG_RESP 0x0010980
#define SMN_HSMP_MSG_DATA 0x00109E0
-#define HSMP_INDEX_REG 0xc4
-#define HSMP_DATA_REG 0xc8
-
static struct hsmp_plat_device *hsmp_pdev;
static int amd_hsmp_pci_rdwr(struct hsmp_socket *sock, u32 offset,
u32 *value, bool write)
{
- int ret;
-
- if (!sock->root)
- return -ENODEV;
-
- ret = pci_write_config_dword(sock->root, HSMP_INDEX_REG,
- sock->mbinfo.base_addr + offset);
- if (ret)
- return ret;
-
- ret = (write ? pci_write_config_dword(sock->root, HSMP_DATA_REG, *value)
- : pci_read_config_dword(sock->root, HSMP_DATA_REG, value));
-
- return ret;
+ return amd_smn_hsmp_rdwr(sock->sock_ind, sock->mbinfo.base_addr + offset, value, write);
}
static ssize_t hsmp_metric_tbl_plat_read(struct file *filp, struct kobject *kobj,
@@ -95,7 +81,12 @@ static umode_t hsmp_is_sock_attr_visible(struct kobject *kobj,
* Static array of 8 + 1(for NULL) elements is created below
* to create sysfs groups for sockets.
* is_bin_visible function is used to show / hide the necessary groups.
+ *
+ * Validate the maximum number against MAX_AMD_NUM_NODES. If this changes,
+ * then the attributes and groups below must be adjusted.
*/
+static_assert(MAX_AMD_NUM_NODES == 8);
+
#define HSMP_BIN_ATTR(index, _list) \
static const struct bin_attribute attr##index = { \
.attr = { .name = HSMP_METRICS_TABLE_NAME, .mode = 0444}, \
@@ -159,10 +150,7 @@ static int init_platform_device(struct device *dev)
int ret, i;
for (i = 0; i < hsmp_pdev->num_sockets; i++) {
- if (!node_to_amd_nb(i))
- return -ENODEV;
sock = &hsmp_pdev->sock[i];
- sock->root = node_to_amd_nb(i)->root;
sock->sock_ind = i;
sock->dev = dev;
sock->mbinfo.base_addr = SMN_HSMP_BASE;
@@ -305,11 +293,11 @@ static int __init hsmp_plt_init(void)
return -ENOMEM;
/*
- * amd_nb_num() returns number of SMN/DF interfaces present in the system
+ * amd_num_nodes() returns number of SMN/DF interfaces present in the system
* if we have N SMN/DF interfaces that ideally means N sockets
*/
- hsmp_pdev->num_sockets = amd_nb_num();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_SOCKETS)
+ hsmp_pdev->num_sockets = amd_num_nodes();
+ if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES)
return ret;
ret = platform_driver_register(&amd_hsmp_driver);
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
index 255d94ddf999..bb6905c4cae9 100644
--- a/drivers/platform/x86/amd/pmc/Makefile
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -4,6 +4,6 @@
# AMD Power Management Controller Driver
#
-amd-pmc-objs := pmc.o pmc-quirks.o mp1_stb.o
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
-amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+amd-pmc-y := pmc.o pmc-quirks.o mp1_stb.o
+amd-pmc-$(CONFIG_AMD_MP2_STB) += mp2_stb.o
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index e6124498b195..d789d6cab794 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -32,70 +32,6 @@
#include "pmc.h"
-/* SMU communication registers */
-#define AMD_PMC_REGISTER_RESPONSE 0x980
-#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
-
-/* PMC Scratch Registers */
-#define AMD_PMC_SCRATCH_REG_CZN 0x94
-#define AMD_PMC_SCRATCH_REG_YC 0xD14
-#define AMD_PMC_SCRATCH_REG_1AH 0xF14
-
-/* STB Registers */
-#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
-#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
-#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
-
-/* Base address of SMU for mapping physical address to virtual address */
-#define AMD_PMC_MAPPING_SIZE 0x01000
-#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
-#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
-#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
-#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
-#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
-
-/* SMU Response Codes */
-#define AMD_PMC_RESULT_OK 0x01
-#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
-#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
-#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
-#define AMD_PMC_RESULT_FAILED 0xFF
-
-/* FCH SSC Registers */
-#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
-#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
-#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
-#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
-#define FCH_SSC_MAPPING_SIZE 0x800
-#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
-#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
-
-/* SMU Message Definations */
-#define SMU_MSG_GETSMUVERSION 0x02
-#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
-#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
-#define SMU_MSG_LOG_START 0x06
-#define SMU_MSG_LOG_RESET 0x07
-#define SMU_MSG_LOG_DUMP_DATA 0x08
-#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
-
-#define PMC_MSG_DELAY_MIN_US 50
-#define RESPONSE_REGISTER_LOOP_MAX 20000
-
-#define DELAY_MIN_US 2000
-#define DELAY_MAX_US 3000
-
-enum amd_pmc_def {
- MSG_TEST = 0x01,
- MSG_OS_HINT_PCO,
- MSG_OS_HINT_RN,
-};
-
-struct amd_pmc_bit_map {
- const char *name;
- u32 bit_mask;
-};
-
static const struct amd_pmc_bit_map soc15_ip_blk_v2[] = {
{"DISPLAY", BIT(0)},
{"CPU", BIT(1)},
@@ -165,23 +101,6 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
iowrite32(val, dev->regbase + reg_offset);
}
-struct smu_metrics {
- u32 table_version;
- u32 hint_count;
- u32 s0i3_last_entry_status;
- u32 timein_s0i2;
- u64 timeentering_s0i3_lastcapture;
- u64 timeentering_s0i3_totaltime;
- u64 timeto_resume_to_os_lastcapture;
- u64 timeto_resume_to_os_totaltime;
- u64 timein_s0i3_lastcapture;
- u64 timein_s0i3_totaltime;
- u64 timein_swdrips_lastcapture;
- u64 timein_swdrips_totaltime;
- u64 timecondition_notmet_lastcapture[32];
- u64 timecondition_notmet_totaltime[32];
-} __packed;
-
static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -247,11 +166,12 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
{
- if (!pdev->smu_virt_addr) {
- int ret = amd_pmc_setup_smu_logging(pdev);
+ int rc;
- if (ret)
- return ret;
+ if (!pdev->smu_virt_addr) {
+ rc = amd_pmc_setup_smu_logging(pdev);
+ if (rc)
+ return rc;
}
if (pdev->cpu_id == AMD_CPU_ID_PCO)
@@ -300,10 +220,10 @@ static ssize_t smu_fw_version_show(struct device *d, struct device_attribute *at
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
+ int rc;
if (!dev->major) {
- int rc = amd_pmc_get_smu_version(dev);
-
+ rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
@@ -314,10 +234,10 @@ static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct amd_pmc_dev *dev = dev_get_drvdata(d);
+ int rc;
if (!dev->major) {
- int rc = amd_pmc_get_smu_version(dev);
-
+ rc = amd_pmc_get_smu_version(dev);
if (rc)
return rc;
}
@@ -778,14 +698,14 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
static int amd_pmc_suspend_handler(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
+ int rc;
/*
* Must be called only from the same set of dev_pm_ops handlers
* as i8042_pm_suspend() is called: currently just from .suspend.
*/
if (pdev->disable_8042_wakeup && !disable_workarounds) {
- int rc = amd_pmc_wa_irq1(pdev);
-
+ rc = amd_pmc_wa_irq1(pdev);
if (rc) {
dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc);
return rc;
@@ -808,6 +728,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SHP) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
{ }
@@ -823,7 +744,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
u32 val;
dev->dev = &pdev->dev;
-
rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
if (!rdev || !pci_match_id(pmc_pci_ids, rdev)) {
err = -ENODEV;
@@ -831,8 +751,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
dev->cpu_id = rdev->device;
-
- if (dev->cpu_id == AMD_CPU_ID_SP) {
+ if (dev->cpu_id == AMD_CPU_ID_SP || dev->cpu_id == AMD_CPU_ID_SHP) {
dev_warn_once(dev->dev, "S0i3 is not supported on this hardware\n");
err = -ENODEV;
goto err_pci_dev_put;
@@ -847,7 +766,6 @@ static int amd_pmc_probe(struct platform_device *pdev)
}
base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
-
err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
if (err) {
dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
@@ -865,7 +783,9 @@ static int amd_pmc_probe(struct platform_device *pdev)
goto err_pci_dev_put;
}
- mutex_init(&dev->lock);
+ err = devm_mutex_init(dev->dev, &dev->lock);
+ if (err)
+ goto err_pci_dev_put;
/* Get num of IP blocks within the SoC */
amd_pmc_get_ip_info(dev);
@@ -904,7 +824,6 @@ static void amd_pmc_remove(struct platform_device *pdev)
pci_dev_put(dev->rdev);
if (IS_ENABLED(CONFIG_AMD_MP2_STB))
amd_mp2_stb_deinit(dev);
- mutex_destroy(&dev->lock);
}
static const struct acpi_device_id amd_pmc_acpi_ids[] = {
diff --git a/drivers/platform/x86/amd/pmc/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index f43f0253b0f5..62f3e51020fd 100644
--- a/drivers/platform/x86/amd/pmc/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
@@ -14,6 +14,59 @@
#include <linux/types.h>
#include <linux/mutex.h>
+/* SMU communication registers */
+#define AMD_PMC_REGISTER_RESPONSE 0x980
+#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
+
+/* PMC Scratch Registers */
+#define AMD_PMC_SCRATCH_REG_CZN 0x94
+#define AMD_PMC_SCRATCH_REG_YC 0xD14
+#define AMD_PMC_SCRATCH_REG_1AH 0xF14
+
+/* STB Registers */
+#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
+#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
+#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
+
+/* Base address of SMU for mapping physical address to virtual address */
+#define AMD_PMC_MAPPING_SIZE 0x01000
+#define AMD_PMC_BASE_ADDR_OFFSET 0x10000
+#define AMD_PMC_BASE_ADDR_LO 0x13B102E8
+#define AMD_PMC_BASE_ADDR_HI 0x13B102EC
+#define AMD_PMC_BASE_ADDR_LO_MASK GENMASK(15, 0)
+#define AMD_PMC_BASE_ADDR_HI_MASK GENMASK(31, 20)
+
+/* SMU Response Codes */
+#define AMD_PMC_RESULT_OK 0x01
+#define AMD_PMC_RESULT_CMD_REJECT_BUSY 0xFC
+#define AMD_PMC_RESULT_CMD_REJECT_PREREQ 0xFD
+#define AMD_PMC_RESULT_CMD_UNKNOWN 0xFE
+#define AMD_PMC_RESULT_FAILED 0xFF
+
+/* FCH SSC Registers */
+#define FCH_S0I3_ENTRY_TIME_L_OFFSET 0x30
+#define FCH_S0I3_ENTRY_TIME_H_OFFSET 0x34
+#define FCH_S0I3_EXIT_TIME_L_OFFSET 0x38
+#define FCH_S0I3_EXIT_TIME_H_OFFSET 0x3C
+#define FCH_SSC_MAPPING_SIZE 0x800
+#define FCH_BASE_PHY_ADDR_LOW 0xFED81100
+#define FCH_BASE_PHY_ADDR_HIGH 0x00000000
+
+/* SMU Message Definations */
+#define SMU_MSG_GETSMUVERSION 0x02
+#define SMU_MSG_LOG_GETDRAM_ADDR_HI 0x04
+#define SMU_MSG_LOG_GETDRAM_ADDR_LO 0x05
+#define SMU_MSG_LOG_START 0x06
+#define SMU_MSG_LOG_RESET 0x07
+#define SMU_MSG_LOG_DUMP_DATA 0x08
+#define SMU_MSG_GET_SUP_CONSTRAINTS 0x09
+
+#define PMC_MSG_DELAY_MIN_US 50
+#define RESPONSE_REGISTER_LOOP_MAX 20000
+
+#define DELAY_MIN_US 2000
+#define DELAY_MAX_US 3000
+
enum s2d_msg_port {
MSG_PORT_PMC,
MSG_PORT_S2D,
@@ -65,6 +118,34 @@ struct amd_pmc_dev {
struct stb_arg stb_arg;
};
+struct amd_pmc_bit_map {
+ const char *name;
+ u32 bit_mask;
+};
+
+struct smu_metrics {
+ u32 table_version;
+ u32 hint_count;
+ u32 s0i3_last_entry_status;
+ u32 timein_s0i2;
+ u64 timeentering_s0i3_lastcapture;
+ u64 timeentering_s0i3_totaltime;
+ u64 timeto_resume_to_os_lastcapture;
+ u64 timeto_resume_to_os_totaltime;
+ u64 timein_s0i3_lastcapture;
+ u64 timein_s0i3_totaltime;
+ u64 timein_swdrips_lastcapture;
+ u64 timein_swdrips_totaltime;
+ u64 timecondition_notmet_lastcapture[32];
+ u64 timecondition_notmet_totaltime[32];
+} __packed;
+
+enum amd_pmc_def {
+ MSG_TEST = 0x01,
+ MSG_OS_HINT_PCO,
+ MSG_OS_HINT_RN,
+};
+
void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev);
void amd_pmc_quirks_init(struct amd_pmc_dev *dev);
void amd_mp2_stb_init(struct amd_pmc_dev *dev);
@@ -79,6 +160,7 @@ void amd_mp2_stb_deinit(struct amd_pmc_dev *dev);
#define AMD_CPU_ID_CB 0x14D8
#define AMD_CPU_ID_PS 0x14E8
#define AMD_CPU_ID_SP 0x14A4
+#define AMD_CPU_ID_SHP 0x153A
#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
#define PCI_DEVICE_ID_AMD_MP2_STB 0x172c
diff --git a/drivers/platform/x86/amd/pmf/Makefile b/drivers/platform/x86/amd/pmf/Makefile
index 6b26e48ce8ad..5978464e0eb7 100644
--- a/drivers/platform/x86/amd/pmf/Makefile
+++ b/drivers/platform/x86/amd/pmf/Makefile
@@ -4,7 +4,7 @@
# AMD Platform Management Framework
#
-obj-$(CONFIG_AMD_PMF) += amd-pmf.o
-amd-pmf-objs := core.o acpi.o sps.o \
- auto-mode.o cnqf.o \
- tee-if.o spc.o
+obj-$(CONFIG_AMD_PMF) += amd-pmf.o
+amd-pmf-y := core.o acpi.o sps.o \
+ auto-mode.o cnqf.o \
+ tee-if.o spc.o
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index dd5780a1d06e..f75f7ecd8cd9 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -220,7 +220,7 @@ static void apmf_sbios_heartbeat_notify(struct work_struct *work)
if (!info)
return;
- schedule_delayed_work(&dev->heart_beat, msecs_to_jiffies(dev->hb_interval * 1000));
+ schedule_delayed_work(&dev->heart_beat, secs_to_jiffies(dev->hb_interval));
kfree(info);
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 764cc1fe90ae..a2cb2d5544f5 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -452,6 +452,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ mutex_init(&dev->cb_mutex);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
@@ -477,6 +478,7 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
+ mutex_destroy(&dev->cb_mutex);
kfree(dev->buf);
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 41b2b91b8fdc..e6bdee68ccf3 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -106,9 +106,12 @@ struct cookie_header {
#define PMF_TA_IF_VERSION_MAJOR 1
#define TA_PMF_ACTION_MAX 32
#define TA_PMF_UNDO_MAX 8
-#define TA_OUTPUT_RESERVED_MEM 906
+#define TA_OUTPUT_RESERVED_MEM 922
#define MAX_OPERATION_PARAMS 4
+#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002
+#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d
+
#define PMF_IF_V1 1
#define PMF_IF_V2 2
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index f34f3130c330..1d90f9382024 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -219,12 +219,14 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
switch (dev->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
val = TA_BEST_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
val = TA_BETTER_PERFORMANCE;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
val = TA_BEST_BATTERY;
break;
default:
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index e6cf0b22dac3..d3083383f11f 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -297,12 +297,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
mode = POWER_MODE_POWER_SAVER;
break;
default:
@@ -387,6 +389,14 @@ static int amd_pmf_profile_set(struct device *dev,
return 0;
}
+static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
{
set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
@@ -398,6 +408,7 @@ static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
static const struct platform_profile_ops amd_pmf_profile_ops = {
.probe = amd_pmf_profile_probe,
+ .hidden_choices = amd_pmf_hidden_choices,
.profile_get = amd_pmf_profile_get,
.profile_set = amd_pmf_profile_set,
};
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 8c88769ea1d8..a1e43873a07b 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444);
MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
#endif
-static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
- 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
+static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
+ 0xcc, 0x2b, 0x2b, 0x60, 0xd6),
+ UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
+ 0x29, 0xb1, 0x3d, 0x85, 0x43),
+ };
static const char *amd_pmf_uevent_as_str(unsigned int state)
{
@@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
*/
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
- dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return -EIO;
+ return res;
}
return 0;
@@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
}
-static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
{
struct tee_ioctl_open_session_arg sess_arg = {};
int rc;
- export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
+ export_uuid(sess_arg.uuid, uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
@@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
{
u32 size;
int ret;
@@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
return PTR_ERR(dev->tee_ctx);
}
- ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
if (ret) {
dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
ret = -EINVAL;
@@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
{
- int ret;
+ bool status;
+ int ret, i;
ret = apmf_check_smart_pc(dev);
if (ret) {
@@ -502,26 +506,22 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return -ENODEV;
}
- ret = amd_pmf_tee_init(dev);
- if (ret)
- return ret;
-
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
ret = amd_pmf_set_dram_addr(dev, true);
if (ret)
- goto error;
+ goto err_cancel_work;
dev->policy_base = devm_ioremap_resource(dev->dev, dev->res);
if (IS_ERR(dev->policy_base)) {
ret = PTR_ERR(dev->policy_base);
- goto error;
+ goto err_free_dram_buf;
}
dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
if (!dev->policy_buf) {
ret = -ENOMEM;
- goto error;
+ goto err_free_dram_buf;
}
memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz);
@@ -531,24 +531,60 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
if (!dev->prev_data) {
ret = -ENOMEM;
- goto error;
+ goto err_free_policy;
}
- ret = amd_pmf_start_policy_engine(dev);
- if (ret)
- goto error;
+ for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
+ ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
+ if (ret)
+ goto err_free_prev_data;
+
+ ret = amd_pmf_start_policy_engine(dev);
+ switch (ret) {
+ case TA_PMF_TYPE_SUCCESS:
+ status = true;
+ break;
+ case TA_ERROR_CRYPTO_INVALID_PARAM:
+ case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
+ amd_pmf_tee_deinit(dev);
+ status = false;
+ break;
+ default:
+ ret = -EINVAL;
+ amd_pmf_tee_deinit(dev);
+ goto err_free_prev_data;
+ }
+
+ if (status)
+ break;
+ }
+
+ if (!status && !pb_side_load) {
+ ret = -EINVAL;
+ goto err_free_prev_data;
+ }
if (pb_side_load)
amd_pmf_open_pb(dev, dev->dbgfs_dir);
ret = amd_pmf_register_input_device(dev);
if (ret)
- goto error;
+ goto err_pmf_remove_pb;
return 0;
-error:
- amd_pmf_deinit_smart_pc(dev);
+err_pmf_remove_pb:
+ if (pb_side_load && dev->esbin)
+ amd_pmf_remove_pb(dev);
+ amd_pmf_tee_deinit(dev);
+err_free_prev_data:
+ kfree(dev->prev_data);
+err_free_policy:
+ kfree(dev->policy_buf);
+err_free_dram_buf:
+ kfree(dev->buf);
+err_cancel_work:
+ cancel_delayed_work_sync(&dev->pb_work);
return ret;
}
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index ca4670d0dc67..f09a3fc6524a 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -856,7 +856,7 @@ static int tf103c_dock_probe(struct i2c_client *client)
/* 5. Setup irqchip for touchpad IRQ pass-through */
dock->tp_irqchip.name = KBUILD_MODNAME;
- dock->tp_irq_domain = irq_domain_add_linear(NULL, 1, &irq_domain_simple_ops, NULL);
+ dock->tp_irq_domain = irq_domain_create_linear(NULL, 1, &irq_domain_simple_ops, NULL);
if (!dock->tp_irq_domain)
return -ENOMEM;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 58754bc5b5b1..abbebd4bfb15 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -69,7 +69,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/power_supply.h>
#include <linux/sysfs.h>
-#include <linux/fb.h>
#include <acpi/video.h>
/* ======= */
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index d09060aedd3f..f8a0dffcaab7 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -18,15 +18,35 @@ config ALIENWARE_WMI
tristate "Alienware Special feature control"
default m
depends on ACPI
+ depends on ACPI_WMI
+ depends on DMI
depends on LEDS_CLASS
depends on NEW_LEDS
- depends on ACPI_WMI
+ help
+ This is a driver for controlling Alienware WMI driven features.
+
+ On legacy devices, it exposes an interface for controlling the AlienFX
+ zones on Alienware machines that don't contain a dedicated
+ AlienFX USB MCU such as the X51 and X51-R2.
+
+ On newer devices, it exposes the AWCC thermal control interface through
+ known Kernel APIs.
+
+config ALIENWARE_WMI_LEGACY
+ bool "Alienware Legacy WMI device driver"
+ default y
+ depends on ALIENWARE_WMI
+ help
+ Legacy Alienware WMI driver with AlienFX LED control capabilities.
+
+config ALIENWARE_WMI_WMAX
+ bool "Alienware WMAX WMI device driver"
+ default y
+ depends on ALIENWARE_WMI
select ACPI_PLATFORM_PROFILE
help
- This is a driver for controlling Alienware BIOS driven
- features. It exposes an interface for controlling the AlienFX
- zones on Alienware machines that don't contain a dedicated AlienFX
- USB MCU such as the X51 and X51-R2.
+ Alienware WMI driver with AlienFX LED, HDMI, amplifier, deep sleep and
+ AWCC thermal control capabilities.
config DCDBAS
tristate "Dell Systems Management Base Driver"
diff --git a/drivers/platform/x86/dell/Makefile b/drivers/platform/x86/dell/Makefile
index bb3cbd470a46..c7501c25e627 100644
--- a/drivers/platform/x86/dell/Makefile
+++ b/drivers/platform/x86/dell/Makefile
@@ -4,24 +4,27 @@
# Dell x86 Platform-Specific Drivers
#
-obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
-obj-$(CONFIG_DCDBAS) += dcdbas.o
-obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
-obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
-obj-$(CONFIG_DELL_RBU) += dell_rbu.o
-obj-$(CONFIG_DELL_PC) += dell-pc.o
-obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
-dell-smbios-objs := dell-smbios-base.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
-dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
-obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
-obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
-obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
-obj-$(CONFIG_DELL_WMI) += dell-wmi.o
-dell-wmi-objs := dell-wmi-base.o
-dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
-obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
-obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
-obj-$(CONFIG_DELL_WMI_DDV) += dell-wmi-ddv.o
-obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
-obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
+obj-$(CONFIG_ALIENWARE_WMI) += alienware-wmi.o
+alienware-wmi-y := alienware-wmi-base.o
+alienware-wmi-$(CONFIG_ALIENWARE_WMI_LEGACY) += alienware-wmi-legacy.o
+alienware-wmi-$(CONFIG_ALIENWARE_WMI_WMAX) += alienware-wmi-wmax.o
+obj-$(CONFIG_DCDBAS) += dcdbas.o
+obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
+obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o
+obj-$(CONFIG_DELL_RBU) += dell_rbu.o
+obj-$(CONFIG_DELL_PC) += dell-pc.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
+dell-smbios-y := dell-smbios-base.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o
+dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o
+obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o
+obj-$(CONFIG_DELL_SMO8800) += dell-lis3lv02d.o
+obj-$(CONFIG_DELL_UART_BACKLIGHT) += dell-uart-backlight.o
+obj-$(CONFIG_DELL_WMI) += dell-wmi.o
+dell-wmi-y := dell-wmi-base.o
+dell-wmi-$(CONFIG_DELL_WMI_PRIVACY) += dell-wmi-privacy.o
+obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
+obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o
+obj-$(CONFIG_DELL_WMI_DDV) += dell-wmi-ddv.o
+obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o
+obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman/
diff --git a/drivers/platform/x86/dell/alienware-wmi-base.c b/drivers/platform/x86/dell/alienware-wmi-base.c
new file mode 100644
index 000000000000..64562b92314f
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-base.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware special feature control
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/leds.h>
+#include "alienware-wmi.h"
+
+MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
+MODULE_AUTHOR("Kurt Borja <kuurtb@gmail.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+
+struct alienfx_quirks *alienfx;
+
+static struct alienfx_quirks quirk_inspiron5675 = {
+ .num_zones = 2,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_unknown = {
+ .num_zones = 2,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_x51_r1_r2 = {
+ .num_zones = 3,
+ .hdmi_mux = false,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_x51_r3 = {
+ .num_zones = 4,
+ .hdmi_mux = false,
+ .amplifier = true,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_asm100 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = false,
+ .deepslp = false,
+};
+
+static struct alienfx_quirks quirk_asm200 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = false,
+ .deepslp = true,
+};
+
+static struct alienfx_quirks quirk_asm201 = {
+ .num_zones = 2,
+ .hdmi_mux = true,
+ .amplifier = true,
+ .deepslp = true,
+};
+
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ alienfx = dmi->driver_data;
+ return 1;
+}
+
+static const struct dmi_system_id alienware_quirks[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
+ },
+ .driver_data = &quirk_asm100,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
+ },
+ .driver_data = &quirk_asm200,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM201",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
+ },
+ .driver_data = &quirk_asm201,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
+ },
+ .driver_data = &quirk_x51_r3,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inc. Inspiron 5675",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
+ },
+ .driver_data = &quirk_inspiron5675,
+ },
+ {}
+};
+
+u8 alienware_interface;
+
+int alienware_wmi_command(struct wmi_device *wdev, u32 method_id,
+ void *in_args, size_t in_size, u32 *out_data)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer in = {in_size, in_args};
+ acpi_status ret;
+
+ ret = wmidev_evaluate_method(wdev, 0, method_id, &in, out_data ? &out : NULL);
+ if (ACPI_FAILURE(ret))
+ return -EIO;
+
+ union acpi_object *obj __free(kfree) = out.pointer;
+
+ if (out_data) {
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ *out_data = (u32)obj->integer.value;
+ else
+ return -ENOMSG;
+ }
+
+ return 0;
+}
+
+/*
+ * Helpers used for zone control
+ */
+static int parse_rgb(const char *buf, struct color_platform *colors)
+{
+ long unsigned int rgb;
+ int ret;
+ union color_union {
+ struct color_platform cp;
+ int package;
+ } repackager;
+
+ ret = kstrtoul(buf, 16, &rgb);
+ if (ret)
+ return ret;
+
+ /* RGB triplet notation is 24-bit hexadecimal */
+ if (rgb > 0xFFFFFF)
+ return -EINVAL;
+
+ repackager.package = rgb & 0x0f0f0f0f;
+ pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+ repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+ *colors = repackager.cp;
+ return 0;
+}
+
+/*
+ * Individual RGB zone control
+ */
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+ char *buf, u8 location)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ struct color_platform *colors = &priv->colors[location];
+
+ return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+ colors->red, colors->green, colors->blue);
+
+}
+
+static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count, u8 location)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ struct color_platform *colors = &priv->colors[location];
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ int ret;
+
+ ret = parse_rgb(buf, colors);
+ if (ret)
+ return ret;
+
+ ret = pdata->ops.upd_led(priv, pdata->wdev, location);
+
+ return ret ? ret : count;
+}
+
+static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 0);
+}
+
+static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 0);
+}
+
+static DEVICE_ATTR_RW(zone00);
+
+static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 1);
+}
+
+static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 1);
+}
+
+static DEVICE_ATTR_RW(zone01);
+
+static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 2);
+}
+
+static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 2);
+}
+
+static DEVICE_ATTR_RW(zone02);
+
+static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return zone_show(dev, attr, buf, 3);
+}
+
+static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return zone_store(dev, attr, buf, count, 3);
+}
+
+static DEVICE_ATTR_RW(zone03);
+
+/*
+ * Lighting control state device attribute (Global)
+ */
+static ssize_t lighting_control_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+
+ if (priv->lighting_control_state == LEGACY_BOOTING)
+ return sysfs_emit(buf, "[booting] running suspend\n");
+ else if (priv->lighting_control_state == LEGACY_SUSPEND)
+ return sysfs_emit(buf, "booting running [suspend]\n");
+
+ return sysfs_emit(buf, "booting [running] suspend\n");
+}
+
+static ssize_t lighting_control_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_priv *priv = dev_get_drvdata(dev);
+ u8 val;
+
+ if (strcmp(buf, "booting\n") == 0)
+ val = LEGACY_BOOTING;
+ else if (strcmp(buf, "suspend\n") == 0)
+ val = LEGACY_SUSPEND;
+ else if (alienware_interface == LEGACY)
+ val = LEGACY_RUNNING;
+ else
+ val = WMAX_RUNNING;
+
+ priv->lighting_control_state = val;
+ pr_debug("alienware-wmi: updated control state to %d\n",
+ priv->lighting_control_state);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(lighting_control_state);
+
+static umode_t zone_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (n < alienfx->num_zones + 1)
+ return attr->mode;
+
+ return 0;
+}
+
+static bool zone_group_visible(struct kobject *kobj)
+{
+ return alienfx->num_zones > 0;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(zone);
+
+static struct attribute *zone_attrs[] = {
+ &dev_attr_lighting_control_state.attr,
+ &dev_attr_zone00.attr,
+ &dev_attr_zone01.attr,
+ &dev_attr_zone02.attr,
+ &dev_attr_zone03.attr,
+ NULL
+};
+
+static struct attribute_group zone_attribute_group = {
+ .name = "rgb_zones",
+ .is_visible = SYSFS_GROUP_VISIBLE(zone),
+ .attrs = zone_attrs,
+};
+
+/*
+ * LED Brightness (Global)
+ */
+static void global_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct alienfx_priv *priv = container_of(led_cdev, struct alienfx_priv,
+ global_led);
+ struct alienfx_platdata *pdata = dev_get_platdata(&priv->pdev->dev);
+ int ret;
+
+ priv->global_brightness = brightness;
+
+ ret = pdata->ops.upd_brightness(priv, pdata->wdev, brightness);
+ if (ret)
+ pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+ struct alienfx_priv *priv = container_of(led_cdev, struct alienfx_priv,
+ global_led);
+
+ return priv->global_brightness;
+}
+
+/*
+ * Platform Driver
+ */
+static int alienfx_probe(struct platform_device *pdev)
+{
+ struct alienfx_priv *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (alienware_interface == WMAX)
+ priv->lighting_control_state = WMAX_RUNNING;
+ else
+ priv->lighting_control_state = LEGACY_RUNNING;
+
+ priv->pdev = pdev;
+ priv->global_led.name = "alienware::global_brightness";
+ priv->global_led.brightness_set = global_led_set;
+ priv->global_led.brightness_get = global_led_get;
+ priv->global_led.max_brightness = 0x0F;
+ priv->global_brightness = priv->global_led.max_brightness;
+ platform_set_drvdata(pdev, priv);
+
+ return devm_led_classdev_register(&pdev->dev, &priv->global_led);
+}
+
+static const struct attribute_group *alienfx_groups[] = {
+ &zone_attribute_group,
+ WMAX_DEV_GROUPS
+ NULL
+};
+
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = "alienware-wmi",
+ .dev_groups = alienfx_groups,
+ },
+ .probe = alienfx_probe,
+};
+
+static void alienware_alienfx_remove(void *data)
+{
+ struct platform_device *pdev = data;
+
+ platform_device_unregister(pdev);
+}
+
+int alienware_alienfx_setup(struct alienfx_platdata *pdata)
+{
+ struct device *dev = &pdata->wdev->dev;
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_data(NULL, "alienware-wmi",
+ PLATFORM_DEVID_NONE, pdata,
+ sizeof(*pdata));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ dev_set_drvdata(dev, pdev);
+ ret = devm_add_action_or_reset(dev, alienware_alienfx_remove, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __init alienware_wmi_init(void)
+{
+ int ret;
+
+ dmi_check_system(alienware_quirks);
+ if (!alienfx)
+ alienfx = &quirk_unknown;
+
+ ret = platform_driver_register(&platform_driver);
+ if (ret < 0)
+ return ret;
+
+ if (wmi_has_guid(WMAX_CONTROL_GUID)) {
+ alienware_interface = WMAX;
+ ret = alienware_wmax_wmi_init();
+ } else {
+ alienware_interface = LEGACY;
+ ret = alienware_legacy_wmi_init();
+ }
+
+ if (ret < 0)
+ platform_driver_unregister(&platform_driver);
+
+ return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+ if (alienware_interface == WMAX)
+ alienware_wmax_wmi_exit();
+ else
+ alienware_legacy_wmi_exit();
+
+ platform_driver_unregister(&platform_driver);
+}
+
+module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/alienware-wmi-legacy.c b/drivers/platform/x86/dell/alienware-wmi-legacy.c
new file mode 100644
index 000000000000..4a84a2fe918b
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-legacy.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware LEGACY WMI device driver
+ *
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/wmi.h>
+#include "alienware-wmi.h"
+
+struct legacy_led_args {
+ struct color_platform colors;
+ u8 brightness;
+ u8 state;
+} __packed;
+
+
+/*
+ * Legacy WMI driver
+ */
+static int legacy_wmi_update_led(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 location)
+{
+ struct legacy_led_args legacy_args = {
+ .colors = priv->colors[location],
+ .brightness = priv->global_brightness,
+ .state = 0,
+ };
+ struct acpi_buffer input;
+ acpi_status status;
+
+ if (legacy_args.state != LEGACY_RUNNING) {
+ legacy_args.state = priv->lighting_control_state;
+
+ input.length = sizeof(legacy_args);
+ input.pointer = &legacy_args;
+
+ status = wmi_evaluate_method(LEGACY_POWER_CONTROL_GUID, 0,
+ location + 1, &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+ }
+
+ return alienware_wmi_command(wdev, location + 1, &legacy_args,
+ sizeof(legacy_args), NULL);
+}
+
+static int legacy_wmi_update_brightness(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 brightness)
+{
+ return legacy_wmi_update_led(priv, wdev, 0);
+}
+
+static int legacy_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct alienfx_platdata pdata = {
+ .wdev = wdev,
+ .ops = {
+ .upd_led = legacy_wmi_update_led,
+ .upd_brightness = legacy_wmi_update_brightness,
+ },
+ };
+
+ return alienware_alienfx_setup(&pdata);
+}
+
+static const struct wmi_device_id alienware_legacy_device_id_table[] = {
+ { LEGACY_CONTROL_GUID, NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(wmi, alienware_legacy_device_id_table);
+
+static struct wmi_driver alienware_legacy_wmi_driver = {
+ .driver = {
+ .name = "alienware-wmi-alienfx",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = alienware_legacy_device_id_table,
+ .probe = legacy_wmi_probe,
+ .no_singleton = true,
+};
+
+int __init alienware_legacy_wmi_init(void)
+{
+ return wmi_driver_register(&alienware_legacy_wmi_driver);
+}
+
+void __exit alienware_legacy_wmi_exit(void)
+{
+ wmi_driver_unregister(&alienware_legacy_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
new file mode 100644
index 000000000000..3d3014b5adf0
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -0,0 +1,768 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Alienware WMAX WMI device driver
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2025 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_profile.h>
+#include <linux/wmi.h>
+#include "alienware-wmi.h"
+
+#define WMAX_METHOD_HDMI_SOURCE 0x1
+#define WMAX_METHOD_HDMI_STATUS 0x2
+#define WMAX_METHOD_HDMI_CABLE 0x5
+#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
+#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
+#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
+#define WMAX_METHOD_BRIGHTNESS 0x3
+#define WMAX_METHOD_ZONE_CONTROL 0x4
+#define WMAX_METHOD_THERMAL_INFORMATION 0x14
+#define WMAX_METHOD_THERMAL_CONTROL 0x15
+#define WMAX_METHOD_GAME_SHIFT_STATUS 0x25
+
+#define WMAX_THERMAL_MODE_GMODE 0xAB
+
+#define WMAX_FAILURE_CODE 0xFFFFFFFF
+#define WMAX_THERMAL_TABLE_MASK GENMASK(7, 4)
+#define WMAX_THERMAL_MODE_MASK GENMASK(3, 0)
+#define WMAX_SENSOR_ID_MASK BIT(8)
+
+static bool force_platform_profile;
+module_param_unsafe(force_platform_profile, bool, 0);
+MODULE_PARM_DESC(force_platform_profile, "Forces auto-detecting thermal profiles without checking if WMI thermal backend is available");
+
+static bool force_gmode;
+module_param_unsafe(force_gmode, bool, 0);
+MODULE_PARM_DESC(force_gmode, "Forces G-Mode when performance profile is selected");
+
+struct awcc_quirks {
+ bool pprof;
+ bool gmode;
+};
+
+static struct awcc_quirks g_series_quirks = {
+ .pprof = true,
+ .gmode = true,
+};
+
+static struct awcc_quirks generic_quirks = {
+ .pprof = true,
+ .gmode = false,
+};
+
+static struct awcc_quirks empty_quirks;
+
+static const struct dmi_system_id awcc_dmi_table[] __initconst = {
+ {
+ .ident = "Alienware m16 R1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware m17 R5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware m18 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18 R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware x15 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R1"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Alienware x17 R2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17 R2"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5510",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5511",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G15 5515",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G3 3500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G3 3500"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G3 3590",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G3 3590"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+ {
+ .ident = "Dell Inc. G5 5500",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G5 5500"),
+ },
+ .driver_data = &g_series_quirks,
+ },
+};
+
+enum WMAX_THERMAL_INFORMATION_OPERATIONS {
+ WMAX_OPERATION_SYS_DESCRIPTION = 0x02,
+ WMAX_OPERATION_LIST_IDS = 0x03,
+ WMAX_OPERATION_CURRENT_PROFILE = 0x0B,
+};
+
+enum WMAX_THERMAL_CONTROL_OPERATIONS {
+ WMAX_OPERATION_ACTIVATE_PROFILE = 0x01,
+};
+
+enum WMAX_GAME_SHIFT_STATUS_OPERATIONS {
+ WMAX_OPERATION_TOGGLE_GAME_SHIFT = 0x01,
+ WMAX_OPERATION_GET_GAME_SHIFT_STATUS = 0x02,
+};
+
+enum WMAX_THERMAL_TABLES {
+ WMAX_THERMAL_TABLE_BASIC = 0x90,
+ WMAX_THERMAL_TABLE_USTT = 0xA0,
+};
+
+enum wmax_thermal_mode {
+ THERMAL_MODE_USTT_BALANCED,
+ THERMAL_MODE_USTT_BALANCED_PERFORMANCE,
+ THERMAL_MODE_USTT_COOL,
+ THERMAL_MODE_USTT_QUIET,
+ THERMAL_MODE_USTT_PERFORMANCE,
+ THERMAL_MODE_USTT_LOW_POWER,
+ THERMAL_MODE_BASIC_QUIET,
+ THERMAL_MODE_BASIC_BALANCED,
+ THERMAL_MODE_BASIC_BALANCED_PERFORMANCE,
+ THERMAL_MODE_BASIC_PERFORMANCE,
+ THERMAL_MODE_LAST,
+};
+
+struct wmax_led_args {
+ u32 led_mask;
+ struct color_platform colors;
+ u8 state;
+} __packed;
+
+struct wmax_brightness_args {
+ u32 led_mask;
+ u32 percentage;
+};
+
+struct wmax_basic_args {
+ u8 arg;
+};
+
+struct wmax_u32_args {
+ u8 operation;
+ u8 arg1;
+ u8 arg2;
+ u8 arg3;
+};
+
+struct awcc_priv {
+ struct wmi_device *wdev;
+ struct device *ppdev;
+ enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
+};
+
+static const enum platform_profile_option wmax_mode_to_platform_profile[THERMAL_MODE_LAST] = {
+ [THERMAL_MODE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [THERMAL_MODE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [THERMAL_MODE_USTT_COOL] = PLATFORM_PROFILE_COOL,
+ [THERMAL_MODE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
+ [THERMAL_MODE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+ [THERMAL_MODE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
+ [THERMAL_MODE_BASIC_QUIET] = PLATFORM_PROFILE_QUIET,
+ [THERMAL_MODE_BASIC_BALANCED] = PLATFORM_PROFILE_BALANCED,
+ [THERMAL_MODE_BASIC_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
+ [THERMAL_MODE_BASIC_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
+};
+
+static struct awcc_quirks *awcc;
+
+/*
+ * The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ * It can toggle between standard system GPU output and HDMI input.
+ */
+static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_CABLE,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "unconnected [connected] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown HDMI cable status: %d\n", ret);
+ return sysfs_emit(buf, "unconnected connected [unknown]\n");
+}
+
+static ssize_t source_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_STATUS,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 1)
+ return sysfs_emit(buf, "[input] gpu unknown\n");
+ else if (out_data == 2)
+ return sysfs_emit(buf, "input [gpu] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", ret);
+ return sysfs_emit(buf, "input gpu [unknown]\n");
+}
+
+static ssize_t source_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args args;
+ int ret;
+
+ if (strcmp(buf, "gpu\n") == 0)
+ args.arg = 1;
+ else if (strcmp(buf, "input\n") == 0)
+ args.arg = 2;
+ else
+ args.arg = 3;
+ pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_HDMI_SOURCE, &args,
+ sizeof(args), NULL);
+ if (ret < 0)
+ pr_err("alienware-wmi: HDMI toggle failed: results: %u\n", ret);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(cable);
+static DEVICE_ATTR_RW(source);
+
+static bool hdmi_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->hdmi_mux;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
+
+static struct attribute *hdmi_attrs[] = {
+ &dev_attr_cable.attr,
+ &dev_attr_source.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_hdmi_attribute_group = {
+ .name = "hdmi",
+ .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
+ .attrs = hdmi_attrs,
+};
+
+/*
+ * Alienware GFX amplifier support
+ * - Currently supports reading cable status
+ * - Leaving expansion room to possibly support dock/undock events later
+ */
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_AMPLIFIER_CABLE,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "unconnected [connected] unknown\n");
+ }
+
+ pr_err("alienware-wmi: unknown amplifier cable status: %d\n", ret);
+ return sysfs_emit(buf, "unconnected connected [unknown]\n");
+}
+
+static DEVICE_ATTR_RO(status);
+
+static bool amplifier_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->amplifier;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
+
+static struct attribute *amplifier_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_amplifier_attribute_group = {
+ .name = "amplifier",
+ .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
+ .attrs = amplifier_attrs,
+};
+
+/*
+ * Deep Sleep Control support
+ * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
+ */
+static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_DEEP_SLEEP_STATUS,
+ &in_args, sizeof(in_args), &out_data);
+ if (!ret) {
+ if (out_data == 0)
+ return sysfs_emit(buf, "[disabled] s5 s5_s4\n");
+ else if (out_data == 1)
+ return sysfs_emit(buf, "disabled [s5] s5_s4\n");
+ else if (out_data == 2)
+ return sysfs_emit(buf, "disabled s5 [s5_s4]\n");
+ }
+
+ pr_err("alienware-wmi: unknown deep sleep status: %d\n", ret);
+ return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
+}
+
+static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct alienfx_platdata *pdata = dev_get_platdata(dev);
+ struct wmax_basic_args args;
+ int ret;
+
+ if (strcmp(buf, "disabled\n") == 0)
+ args.arg = 0;
+ else if (strcmp(buf, "s5\n") == 0)
+ args.arg = 1;
+ else
+ args.arg = 2;
+ pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
+
+ ret = alienware_wmi_command(pdata->wdev, WMAX_METHOD_DEEP_SLEEP_CONTROL,
+ &args, sizeof(args), NULL);
+ if (!ret)
+ pr_err("alienware-wmi: deep sleep control failed: results: %u\n", ret);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(deepsleep);
+
+static bool deepsleep_group_visible(struct kobject *kobj)
+{
+ return alienware_interface == WMAX && alienfx->deepslp;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
+
+static struct attribute *deepsleep_attrs[] = {
+ &dev_attr_deepsleep.attr,
+ NULL,
+};
+
+const struct attribute_group wmax_deepsleep_attribute_group = {
+ .name = "deepsleep",
+ .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
+ .attrs = deepsleep_attrs,
+};
+
+/*
+ * Thermal Profile control
+ * - Provides thermal profile control through the Platform Profile API
+ */
+static bool is_wmax_thermal_code(u32 code)
+{
+ if (code & WMAX_SENSOR_ID_MASK)
+ return false;
+
+ if ((code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_LAST)
+ return false;
+
+ if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_BASIC &&
+ (code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_BASIC_QUIET)
+ return true;
+
+ if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_USTT &&
+ (code & WMAX_THERMAL_MODE_MASK) <= THERMAL_MODE_USTT_LOW_POWER)
+ return true;
+
+ return false;
+}
+
+static int wmax_thermal_information(struct wmi_device *wdev, u8 operation,
+ u8 arg, u32 *out_data)
+{
+ struct wmax_u32_args in_args = {
+ .operation = operation,
+ .arg1 = arg,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_INFORMATION,
+ &in_args, sizeof(in_args), out_data);
+ if (ret < 0)
+ return ret;
+
+ if (*out_data == WMAX_FAILURE_CODE)
+ return -EBADRQC;
+
+ return 0;
+}
+
+static int wmax_thermal_control(struct wmi_device *wdev, u8 profile)
+{
+ struct wmax_u32_args in_args = {
+ .operation = WMAX_OPERATION_ACTIVATE_PROFILE,
+ .arg1 = profile,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ u32 out_data;
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_THERMAL_CONTROL,
+ &in_args, sizeof(in_args), &out_data);
+ if (ret)
+ return ret;
+
+ if (out_data == WMAX_FAILURE_CODE)
+ return -EBADRQC;
+
+ return 0;
+}
+
+static int wmax_game_shift_status(struct wmi_device *wdev, u8 operation,
+ u32 *out_data)
+{
+ struct wmax_u32_args in_args = {
+ .operation = operation,
+ .arg1 = 0,
+ .arg2 = 0,
+ .arg3 = 0,
+ };
+ int ret;
+
+ ret = alienware_wmi_command(wdev, WMAX_METHOD_GAME_SHIFT_STATUS,
+ &in_args, sizeof(in_args), out_data);
+ if (ret < 0)
+ return ret;
+
+ if (*out_data == WMAX_FAILURE_CODE)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int thermal_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+ u32 out_data;
+ int ret;
+
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_CURRENT_PROFILE,
+ 0, &out_data);
+
+ if (ret < 0)
+ return ret;
+
+ if (out_data == WMAX_THERMAL_MODE_GMODE) {
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ return 0;
+ }
+
+ if (!is_wmax_thermal_code(out_data))
+ return -ENODATA;
+
+ out_data &= WMAX_THERMAL_MODE_MASK;
+ *profile = wmax_mode_to_platform_profile[out_data];
+
+ return 0;
+}
+
+static int thermal_profile_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ struct awcc_priv *priv = dev_get_drvdata(dev);
+
+ if (awcc->gmode) {
+ u32 gmode_status;
+ int ret;
+
+ ret = wmax_game_shift_status(priv->wdev,
+ WMAX_OPERATION_GET_GAME_SHIFT_STATUS,
+ &gmode_status);
+
+ if (ret < 0)
+ return ret;
+
+ if ((profile == PLATFORM_PROFILE_PERFORMANCE && !gmode_status) ||
+ (profile != PLATFORM_PROFILE_PERFORMANCE && gmode_status)) {
+ ret = wmax_game_shift_status(priv->wdev,
+ WMAX_OPERATION_TOGGLE_GAME_SHIFT,
+ &gmode_status);
+
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return wmax_thermal_control(priv->wdev,
+ priv->supported_thermal_profiles[profile]);
+}
+
+static int thermal_profile_probe(void *drvdata, unsigned long *choices)
+{
+ enum platform_profile_option profile;
+ struct awcc_priv *priv = drvdata;
+ enum wmax_thermal_mode mode;
+ u8 sys_desc[4];
+ u32 first_mode;
+ u32 out_data;
+ int ret;
+
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_SYS_DESCRIPTION,
+ 0, (u32 *) &sys_desc);
+ if (ret < 0)
+ return ret;
+
+ first_mode = sys_desc[0] + sys_desc[1];
+
+ for (u32 i = 0; i < sys_desc[3]; i++) {
+ ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS,
+ i + first_mode, &out_data);
+
+ if (ret == -EIO)
+ return ret;
+
+ if (ret == -EBADRQC)
+ break;
+
+ if (!is_wmax_thermal_code(out_data))
+ continue;
+
+ mode = out_data & WMAX_THERMAL_MODE_MASK;
+ profile = wmax_mode_to_platform_profile[mode];
+ priv->supported_thermal_profiles[profile] = out_data;
+
+ set_bit(profile, choices);
+ }
+
+ if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
+ return -ENODEV;
+
+ if (awcc->gmode) {
+ priv->supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
+ WMAX_THERMAL_MODE_GMODE;
+
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ }
+
+ return 0;
+}
+
+static const struct platform_profile_ops awcc_platform_profile_ops = {
+ .probe = thermal_profile_probe,
+ .profile_get = thermal_profile_get,
+ .profile_set = thermal_profile_set,
+};
+
+static int awcc_platform_profile_init(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ priv->ppdev = devm_platform_profile_register(&wdev->dev, "alienware-wmi",
+ priv, &awcc_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(priv->ppdev);
+}
+
+static int alienware_awcc_setup(struct wmi_device *wdev)
+{
+ struct awcc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->wdev = wdev;
+ dev_set_drvdata(&wdev->dev, priv);
+
+ if (awcc->pprof) {
+ ret = awcc_platform_profile_init(wdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * WMAX WMI driver
+ */
+static int wmax_wmi_update_led(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 location)
+{
+ struct wmax_led_args in_args = {
+ .led_mask = 1 << location,
+ .colors = priv->colors[location],
+ .state = priv->lighting_control_state,
+ };
+
+ return alienware_wmi_command(wdev, WMAX_METHOD_ZONE_CONTROL, &in_args,
+ sizeof(in_args), NULL);
+}
+
+static int wmax_wmi_update_brightness(struct alienfx_priv *priv,
+ struct wmi_device *wdev, u8 brightness)
+{
+ struct wmax_brightness_args in_args = {
+ .led_mask = 0xFF,
+ .percentage = brightness,
+ };
+
+ return alienware_wmi_command(wdev, WMAX_METHOD_BRIGHTNESS, &in_args,
+ sizeof(in_args), NULL);
+}
+
+static int wmax_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct alienfx_platdata pdata = {
+ .wdev = wdev,
+ .ops = {
+ .upd_led = wmax_wmi_update_led,
+ .upd_brightness = wmax_wmi_update_brightness,
+ },
+ };
+ int ret;
+
+ if (awcc)
+ ret = alienware_awcc_setup(wdev);
+ else
+ ret = alienware_alienfx_setup(&pdata);
+
+ return ret;
+}
+
+static const struct wmi_device_id alienware_wmax_device_id_table[] = {
+ { WMAX_CONTROL_GUID, NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(wmi, alienware_wmax_device_id_table);
+
+static struct wmi_driver alienware_wmax_wmi_driver = {
+ .driver = {
+ .name = "alienware-wmi-wmax",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = alienware_wmax_device_id_table,
+ .probe = wmax_wmi_probe,
+ .no_singleton = true,
+};
+
+int __init alienware_wmax_wmi_init(void)
+{
+ const struct dmi_system_id *id;
+
+ id = dmi_first_match(awcc_dmi_table);
+ if (id)
+ awcc = id->driver_data;
+
+ if (force_platform_profile) {
+ if (!awcc)
+ awcc = &empty_quirks;
+
+ awcc->pprof = true;
+ }
+
+ if (force_gmode) {
+ if (awcc)
+ awcc->gmode = true;
+ else
+ pr_warn("force_gmode requires platform profile support\n");
+ }
+
+ return wmi_driver_register(&alienware_wmax_wmi_driver);
+}
+
+void __exit alienware_wmax_wmi_exit(void)
+{
+ wmi_driver_unregister(&alienware_wmax_wmi_driver);
+}
diff --git a/drivers/platform/x86/dell/alienware-wmi.c b/drivers/platform/x86/dell/alienware-wmi.c
deleted file mode 100644
index e252e0cf47ef..000000000000
--- a/drivers/platform/x86/dell/alienware-wmi.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Alienware AlienFX control
- *
- * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/acpi.h>
-#include <linux/bitfield.h>
-#include <linux/bits.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/platform_profile.h>
-#include <linux/dmi.h>
-#include <linux/leds.h>
-
-#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
-#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
-#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
-
-#define WMAX_METHOD_HDMI_SOURCE 0x1
-#define WMAX_METHOD_HDMI_STATUS 0x2
-#define WMAX_METHOD_BRIGHTNESS 0x3
-#define WMAX_METHOD_ZONE_CONTROL 0x4
-#define WMAX_METHOD_HDMI_CABLE 0x5
-#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
-#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
-#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
-#define WMAX_METHOD_THERMAL_INFORMATION 0x14
-#define WMAX_METHOD_THERMAL_CONTROL 0x15
-#define WMAX_METHOD_GAME_SHIFT_STATUS 0x25
-
-#define WMAX_THERMAL_MODE_GMODE 0xAB
-
-#define WMAX_FAILURE_CODE 0xFFFFFFFF
-
-MODULE_AUTHOR("Mario Limonciello <mario.limonciello@outlook.com>");
-MODULE_DESCRIPTION("Alienware special feature control");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
-MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
-
-static bool force_platform_profile;
-module_param_unsafe(force_platform_profile, bool, 0);
-MODULE_PARM_DESC(force_platform_profile, "Forces auto-detecting thermal profiles without checking if WMI thermal backend is available");
-
-static bool force_gmode;
-module_param_unsafe(force_gmode, bool, 0);
-MODULE_PARM_DESC(force_gmode, "Forces G-Mode when performance profile is selected");
-
-enum INTERFACE_FLAGS {
- LEGACY,
- WMAX,
-};
-
-enum LEGACY_CONTROL_STATES {
- LEGACY_RUNNING = 1,
- LEGACY_BOOTING = 0,
- LEGACY_SUSPEND = 3,
-};
-
-enum WMAX_CONTROL_STATES {
- WMAX_RUNNING = 0xFF,
- WMAX_BOOTING = 0,
- WMAX_SUSPEND = 3,
-};
-
-enum WMAX_THERMAL_INFORMATION_OPERATIONS {
- WMAX_OPERATION_SYS_DESCRIPTION = 0x02,
- WMAX_OPERATION_LIST_IDS = 0x03,
- WMAX_OPERATION_CURRENT_PROFILE = 0x0B,
-};
-
-enum WMAX_THERMAL_CONTROL_OPERATIONS {
- WMAX_OPERATION_ACTIVATE_PROFILE = 0x01,
-};
-
-enum WMAX_GAME_SHIFT_STATUS_OPERATIONS {
- WMAX_OPERATION_TOGGLE_GAME_SHIFT = 0x01,
- WMAX_OPERATION_GET_GAME_SHIFT_STATUS = 0x02,
-};
-
-enum WMAX_THERMAL_TABLES {
- WMAX_THERMAL_TABLE_BASIC = 0x90,
- WMAX_THERMAL_TABLE_USTT = 0xA0,
-};
-
-enum wmax_thermal_mode {
- THERMAL_MODE_USTT_BALANCED,
- THERMAL_MODE_USTT_BALANCED_PERFORMANCE,
- THERMAL_MODE_USTT_COOL,
- THERMAL_MODE_USTT_QUIET,
- THERMAL_MODE_USTT_PERFORMANCE,
- THERMAL_MODE_USTT_LOW_POWER,
- THERMAL_MODE_BASIC_QUIET,
- THERMAL_MODE_BASIC_BALANCED,
- THERMAL_MODE_BASIC_BALANCED_PERFORMANCE,
- THERMAL_MODE_BASIC_PERFORMANCE,
- THERMAL_MODE_LAST,
-};
-
-static const enum platform_profile_option wmax_mode_to_platform_profile[THERMAL_MODE_LAST] = {
- [THERMAL_MODE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_USTT_COOL] = PLATFORM_PROFILE_COOL,
- [THERMAL_MODE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
- [THERMAL_MODE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
- [THERMAL_MODE_BASIC_QUIET] = PLATFORM_PROFILE_QUIET,
- [THERMAL_MODE_BASIC_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [THERMAL_MODE_BASIC_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [THERMAL_MODE_BASIC_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
-};
-
-struct quirk_entry {
- u8 num_zones;
- u8 hdmi_mux;
- u8 amplifier;
- u8 deepslp;
- bool thermal;
- bool gmode;
-};
-
-static struct quirk_entry *quirks;
-
-
-static struct quirk_entry quirk_inspiron5675 = {
- .num_zones = 2,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_unknown = {
- .num_zones = 2,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_x51_r1_r2 = {
- .num_zones = 3,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_x51_r3 = {
- .num_zones = 4,
- .hdmi_mux = 0,
- .amplifier = 1,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm100 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm200 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 0,
- .deepslp = 1,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_asm201 = {
- .num_zones = 2,
- .hdmi_mux = 1,
- .amplifier = 1,
- .deepslp = 1,
- .thermal = false,
- .gmode = false,
-};
-
-static struct quirk_entry quirk_g_series = {
- .num_zones = 0,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = true,
- .gmode = true,
-};
-
-static struct quirk_entry quirk_x_series = {
- .num_zones = 0,
- .hdmi_mux = 0,
- .amplifier = 0,
- .deepslp = 0,
- .thermal = true,
- .gmode = false,
-};
-
-static int __init dmi_matched(const struct dmi_system_id *dmi)
-{
- quirks = dmi->driver_data;
- return 1;
-}
-
-static const struct dmi_system_id alienware_quirks[] __initconst = {
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
- },
- .driver_data = &quirk_asm100,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM200",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
- },
- .driver_data = &quirk_asm200,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware ASM201",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
- },
- .driver_data = &quirk_asm201,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m16 R1 AMD",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m16 R1 AMD"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m17 R5",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware m18 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m18 R2"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware x15 R1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x15 R1"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware x17 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware x17 R2"),
- },
- .driver_data = &quirk_x_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
- },
- .driver_data = &quirk_x51_r1_r2,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R2",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
- },
- .driver_data = &quirk_x51_r1_r2,
- },
- {
- .callback = dmi_matched,
- .ident = "Alienware X51 R3",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
- },
- .driver_data = &quirk_x51_r3,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5510",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5510"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5511",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5511"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G15 5515",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G3 3500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3500"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G3 3590",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G3 3590"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. G5 5500",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "G5 5500"),
- },
- .driver_data = &quirk_g_series,
- },
- {
- .callback = dmi_matched,
- .ident = "Dell Inc. Inspiron 5675",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5675"),
- },
- .driver_data = &quirk_inspiron5675,
- },
- {}
-};
-
-struct color_platform {
- u8 blue;
- u8 green;
- u8 red;
-} __packed;
-
-struct wmax_brightness_args {
- u32 led_mask;
- u32 percentage;
-};
-
-struct wmax_basic_args {
- u8 arg;
-};
-
-struct legacy_led_args {
- struct color_platform colors;
- u8 brightness;
- u8 state;
-} __packed;
-
-struct wmax_led_args {
- u32 led_mask;
- struct color_platform colors;
- u8 state;
-} __packed;
-
-struct wmax_u32_args {
- u8 operation;
- u8 arg1;
- u8 arg2;
- u8 arg3;
-};
-
-static struct platform_device *platform_device;
-static struct color_platform colors[4];
-static enum wmax_thermal_mode supported_thermal_profiles[PLATFORM_PROFILE_LAST];
-
-static u8 interface;
-static u8 lighting_control_state;
-static u8 global_brightness;
-
-/*
- * Helpers used for zone control
- */
-static int parse_rgb(const char *buf, struct color_platform *colors)
-{
- long unsigned int rgb;
- int ret;
- union color_union {
- struct color_platform cp;
- int package;
- } repackager;
-
- ret = kstrtoul(buf, 16, &rgb);
- if (ret)
- return ret;
-
- /* RGB triplet notation is 24-bit hexadecimal */
- if (rgb > 0xFFFFFF)
- return -EINVAL;
-
- repackager.package = rgb & 0x0f0f0f0f;
- pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
- repackager.cp.red, repackager.cp.green, repackager.cp.blue);
- *colors = repackager.cp;
- return 0;
-}
-
-/*
- * Individual RGB zone control
- */
-static int alienware_update_led(u8 location)
-{
- int method_id;
- acpi_status status;
- char *guid;
- struct acpi_buffer input;
- struct legacy_led_args legacy_args;
- struct wmax_led_args wmax_basic_args;
- if (interface == WMAX) {
- wmax_basic_args.led_mask = 1 << location;
- wmax_basic_args.colors = colors[location];
- wmax_basic_args.state = lighting_control_state;
- guid = WMAX_CONTROL_GUID;
- method_id = WMAX_METHOD_ZONE_CONTROL;
-
- input.length = sizeof(wmax_basic_args);
- input.pointer = &wmax_basic_args;
- } else {
- legacy_args.colors = colors[location];
- legacy_args.brightness = global_brightness;
- legacy_args.state = 0;
- if (lighting_control_state == LEGACY_BOOTING ||
- lighting_control_state == LEGACY_SUSPEND) {
- guid = LEGACY_POWER_CONTROL_GUID;
- legacy_args.state = lighting_control_state;
- } else
- guid = LEGACY_CONTROL_GUID;
- method_id = location + 1;
-
- input.length = sizeof(legacy_args);
- input.pointer = &legacy_args;
- }
- pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
-
- status = wmi_evaluate_method(guid, 0, method_id, &input, NULL);
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: zone set failure: %u\n", status);
- return ACPI_FAILURE(status);
-}
-
-static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
- char *buf, u8 location)
-{
- return sprintf(buf, "red: %d, green: %d, blue: %d\n",
- colors[location].red, colors[location].green,
- colors[location].blue);
-
-}
-
-static ssize_t zone_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count, u8 location)
-{
- int ret;
-
- ret = parse_rgb(buf, &colors[location]);
- if (ret)
- return ret;
-
- ret = alienware_update_led(location);
-
- return ret ? ret : count;
-}
-
-static ssize_t zone00_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 0);
-}
-
-static ssize_t zone00_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 0);
-}
-
-static DEVICE_ATTR_RW(zone00);
-
-static ssize_t zone01_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 1);
-}
-
-static ssize_t zone01_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 1);
-}
-
-static DEVICE_ATTR_RW(zone01);
-
-static ssize_t zone02_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 2);
-}
-
-static ssize_t zone02_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 2);
-}
-
-static DEVICE_ATTR_RW(zone02);
-
-static ssize_t zone03_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- return zone_show(dev, attr, buf, 3);
-}
-
-static ssize_t zone03_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return zone_store(dev, attr, buf, count, 3);
-}
-
-static DEVICE_ATTR_RW(zone03);
-
-/*
- * Lighting control state device attribute (Global)
- */
-static ssize_t lighting_control_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- if (lighting_control_state == LEGACY_BOOTING)
- return sysfs_emit(buf, "[booting] running suspend\n");
- else if (lighting_control_state == LEGACY_SUSPEND)
- return sysfs_emit(buf, "booting running [suspend]\n");
-
- return sysfs_emit(buf, "booting [running] suspend\n");
-}
-
-static ssize_t lighting_control_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- u8 val;
-
- if (strcmp(buf, "booting\n") == 0)
- val = LEGACY_BOOTING;
- else if (strcmp(buf, "suspend\n") == 0)
- val = LEGACY_SUSPEND;
- else if (interface == LEGACY)
- val = LEGACY_RUNNING;
- else
- val = WMAX_RUNNING;
-
- lighting_control_state = val;
- pr_debug("alienware-wmi: updated control state to %d\n",
- lighting_control_state);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(lighting_control_state);
-
-static umode_t zone_attr_visible(struct kobject *kobj,
- struct attribute *attr, int n)
-{
- if (n < quirks->num_zones + 1)
- return attr->mode;
-
- return 0;
-}
-
-static bool zone_group_visible(struct kobject *kobj)
-{
- return quirks->num_zones > 0;
-}
-DEFINE_SYSFS_GROUP_VISIBLE(zone);
-
-static struct attribute *zone_attrs[] = {
- &dev_attr_lighting_control_state.attr,
- &dev_attr_zone00.attr,
- &dev_attr_zone01.attr,
- &dev_attr_zone02.attr,
- &dev_attr_zone03.attr,
- NULL
-};
-
-static struct attribute_group zone_attribute_group = {
- .name = "rgb_zones",
- .is_visible = SYSFS_GROUP_VISIBLE(zone),
- .attrs = zone_attrs,
-};
-
-/*
- * LED Brightness (Global)
- */
-static int wmax_brightness(int brightness)
-{
- acpi_status status;
- struct acpi_buffer input;
- struct wmax_brightness_args args = {
- .led_mask = 0xFF,
- .percentage = brightness,
- };
- input.length = sizeof(args);
- input.pointer = &args;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- WMAX_METHOD_BRIGHTNESS, &input, NULL);
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: brightness set failure: %u\n", status);
- return ACPI_FAILURE(status);
-}
-
-static void global_led_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- int ret;
- global_brightness = brightness;
- if (interface == WMAX)
- ret = wmax_brightness(brightness);
- else
- ret = alienware_update_led(0);
- if (ret)
- pr_err("LED brightness update failed\n");
-}
-
-static enum led_brightness global_led_get(struct led_classdev *led_cdev)
-{
- return global_brightness;
-}
-
-static struct led_classdev global_led = {
- .brightness_set = global_led_set,
- .brightness_get = global_led_get,
- .name = "alienware::global_brightness",
-};
-
-static int alienware_zone_init(struct platform_device *dev)
-{
- if (interface == WMAX) {
- lighting_control_state = WMAX_RUNNING;
- } else if (interface == LEGACY) {
- lighting_control_state = LEGACY_RUNNING;
- }
- global_led.max_brightness = 0x0F;
- global_brightness = global_led.max_brightness;
-
- return led_classdev_register(&dev->dev, &global_led);
-}
-
-static void alienware_zone_exit(struct platform_device *dev)
-{
- if (!quirks->num_zones)
- return;
-
- led_classdev_unregister(&global_led);
-}
-
-static acpi_status alienware_wmax_command(void *in_args, size_t in_size,
- u32 command, u32 *out_data)
-{
- acpi_status status;
- union acpi_object *obj;
- struct acpi_buffer input;
- struct acpi_buffer output;
-
- input.length = in_size;
- input.pointer = in_args;
- if (out_data) {
- output.length = ACPI_ALLOCATE_BUFFER;
- output.pointer = NULL;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- command, &input, &output);
- if (ACPI_SUCCESS(status)) {
- obj = (union acpi_object *)output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- *out_data = (u32)obj->integer.value;
- }
- kfree(output.pointer);
- } else {
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
- command, &input, NULL);
- }
- return status;
-}
-
-/*
- * The HDMI mux sysfs node indicates the status of the HDMI input mux.
- * It can toggle between standard system GPU output and HDMI input.
- */
-static ssize_t cable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_HDMI_CABLE, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[unconnected] connected unknown\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "unconnected [connected] unknown\n");
- }
- pr_err("alienware-wmi: unknown HDMI cable status: %d\n", status);
- return sysfs_emit(buf, "unconnected connected [unknown]\n");
-}
-
-static ssize_t source_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_HDMI_STATUS, &out_data);
-
- if (ACPI_SUCCESS(status)) {
- if (out_data == 1)
- return sysfs_emit(buf, "[input] gpu unknown\n");
- else if (out_data == 2)
- return sysfs_emit(buf, "input [gpu] unknown\n");
- }
- pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
- return sysfs_emit(buf, "input gpu [unknown]\n");
-}
-
-static ssize_t source_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct wmax_basic_args args;
- acpi_status status;
-
- if (strcmp(buf, "gpu\n") == 0)
- args.arg = 1;
- else if (strcmp(buf, "input\n") == 0)
- args.arg = 2;
- else
- args.arg = 3;
- pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
-
- status = alienware_wmax_command(&args, sizeof(args),
- WMAX_METHOD_HDMI_SOURCE, NULL);
-
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
- status);
- return count;
-}
-
-static DEVICE_ATTR_RO(cable);
-static DEVICE_ATTR_RW(source);
-
-static bool hdmi_group_visible(struct kobject *kobj)
-{
- return quirks->hdmi_mux;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(hdmi);
-
-static struct attribute *hdmi_attrs[] = {
- &dev_attr_cable.attr,
- &dev_attr_source.attr,
- NULL,
-};
-
-static const struct attribute_group hdmi_attribute_group = {
- .name = "hdmi",
- .is_visible = SYSFS_GROUP_VISIBLE(hdmi),
- .attrs = hdmi_attrs,
-};
-
-/*
- * Alienware GFX amplifier support
- * - Currently supports reading cable status
- * - Leaving expansion room to possibly support dock/undock events later
- */
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status =
- alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_AMPLIFIER_CABLE, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[unconnected] connected unknown\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "unconnected [connected] unknown\n");
- }
- pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
- return sysfs_emit(buf, "unconnected connected [unknown]\n");
-}
-
-static DEVICE_ATTR_RO(status);
-
-static bool amplifier_group_visible(struct kobject *kobj)
-{
- return quirks->amplifier;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(amplifier);
-
-static struct attribute *amplifier_attrs[] = {
- &dev_attr_status.attr,
- NULL,
-};
-
-static const struct attribute_group amplifier_attribute_group = {
- .name = "amplifier",
- .is_visible = SYSFS_GROUP_VISIBLE(amplifier),
- .attrs = amplifier_attrs,
-};
-
-/*
- * Deep Sleep Control support
- * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
- */
-static ssize_t deepsleep_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct wmax_basic_args in_args = {
- .arg = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_DEEP_SLEEP_STATUS, &out_data);
- if (ACPI_SUCCESS(status)) {
- if (out_data == 0)
- return sysfs_emit(buf, "[disabled] s5 s5_s4\n");
- else if (out_data == 1)
- return sysfs_emit(buf, "disabled [s5] s5_s4\n");
- else if (out_data == 2)
- return sysfs_emit(buf, "disabled s5 [s5_s4]\n");
- }
- pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
- return sysfs_emit(buf, "disabled s5 s5_s4 [unknown]\n");
-}
-
-static ssize_t deepsleep_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct wmax_basic_args args;
- acpi_status status;
-
- if (strcmp(buf, "disabled\n") == 0)
- args.arg = 0;
- else if (strcmp(buf, "s5\n") == 0)
- args.arg = 1;
- else
- args.arg = 2;
- pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
-
- status = alienware_wmax_command(&args, sizeof(args),
- WMAX_METHOD_DEEP_SLEEP_CONTROL, NULL);
-
- if (ACPI_FAILURE(status))
- pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
- status);
- return count;
-}
-
-static DEVICE_ATTR_RW(deepsleep);
-
-static bool deepsleep_group_visible(struct kobject *kobj)
-{
- return quirks->deepslp;
-}
-DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(deepsleep);
-
-static struct attribute *deepsleep_attrs[] = {
- &dev_attr_deepsleep.attr,
- NULL,
-};
-
-static const struct attribute_group deepsleep_attribute_group = {
- .name = "deepsleep",
- .is_visible = SYSFS_GROUP_VISIBLE(deepsleep),
- .attrs = deepsleep_attrs,
-};
-
-/*
- * Thermal Profile control
- * - Provides thermal profile control through the Platform Profile API
- */
-#define WMAX_THERMAL_TABLE_MASK GENMASK(7, 4)
-#define WMAX_THERMAL_MODE_MASK GENMASK(3, 0)
-#define WMAX_SENSOR_ID_MASK BIT(8)
-
-static bool is_wmax_thermal_code(u32 code)
-{
- if (code & WMAX_SENSOR_ID_MASK)
- return false;
-
- if ((code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_LAST)
- return false;
-
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_BASIC &&
- (code & WMAX_THERMAL_MODE_MASK) >= THERMAL_MODE_BASIC_QUIET)
- return true;
-
- if ((code & WMAX_THERMAL_TABLE_MASK) == WMAX_THERMAL_TABLE_USTT &&
- (code & WMAX_THERMAL_MODE_MASK) <= THERMAL_MODE_USTT_LOW_POWER)
- return true;
-
- return false;
-}
-
-static int wmax_thermal_information(u8 operation, u8 arg, u32 *out_data)
-{
- struct wmax_u32_args in_args = {
- .operation = operation,
- .arg1 = arg,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_THERMAL_INFORMATION,
- out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (*out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
-
- return 0;
-}
-
-static int wmax_thermal_control(u8 profile)
-{
- struct wmax_u32_args in_args = {
- .operation = WMAX_OPERATION_ACTIVATE_PROFILE,
- .arg1 = profile,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
- u32 out_data;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_THERMAL_CONTROL,
- &out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (out_data == WMAX_FAILURE_CODE)
- return -EBADRQC;
-
- return 0;
-}
-
-static int wmax_game_shift_status(u8 operation, u32 *out_data)
-{
- struct wmax_u32_args in_args = {
- .operation = operation,
- .arg1 = 0,
- .arg2 = 0,
- .arg3 = 0,
- };
- acpi_status status;
-
- status = alienware_wmax_command(&in_args, sizeof(in_args),
- WMAX_METHOD_GAME_SHIFT_STATUS,
- out_data);
-
- if (ACPI_FAILURE(status))
- return -EIO;
-
- if (*out_data == WMAX_FAILURE_CODE)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static int thermal_profile_get(struct device *dev,
- enum platform_profile_option *profile)
-{
- u32 out_data;
- int ret;
-
- ret = wmax_thermal_information(WMAX_OPERATION_CURRENT_PROFILE,
- 0, &out_data);
-
- if (ret < 0)
- return ret;
-
- if (out_data == WMAX_THERMAL_MODE_GMODE) {
- *profile = PLATFORM_PROFILE_PERFORMANCE;
- return 0;
- }
-
- if (!is_wmax_thermal_code(out_data))
- return -ENODATA;
-
- out_data &= WMAX_THERMAL_MODE_MASK;
- *profile = wmax_mode_to_platform_profile[out_data];
-
- return 0;
-}
-
-static int thermal_profile_set(struct device *dev,
- enum platform_profile_option profile)
-{
- if (quirks->gmode) {
- u32 gmode_status;
- int ret;
-
- ret = wmax_game_shift_status(WMAX_OPERATION_GET_GAME_SHIFT_STATUS,
- &gmode_status);
-
- if (ret < 0)
- return ret;
-
- if ((profile == PLATFORM_PROFILE_PERFORMANCE && !gmode_status) ||
- (profile != PLATFORM_PROFILE_PERFORMANCE && gmode_status)) {
- ret = wmax_game_shift_status(WMAX_OPERATION_TOGGLE_GAME_SHIFT,
- &gmode_status);
-
- if (ret < 0)
- return ret;
- }
- }
-
- return wmax_thermal_control(supported_thermal_profiles[profile]);
-}
-
-static int thermal_profile_probe(void *drvdata, unsigned long *choices)
-{
- enum platform_profile_option profile;
- enum wmax_thermal_mode mode;
- u8 sys_desc[4];
- u32 first_mode;
- u32 out_data;
- int ret;
-
- ret = wmax_thermal_information(WMAX_OPERATION_SYS_DESCRIPTION,
- 0, (u32 *) &sys_desc);
- if (ret < 0)
- return ret;
-
- first_mode = sys_desc[0] + sys_desc[1];
-
- for (u32 i = 0; i < sys_desc[3]; i++) {
- ret = wmax_thermal_information(WMAX_OPERATION_LIST_IDS,
- i + first_mode, &out_data);
-
- if (ret == -EIO)
- return ret;
-
- if (ret == -EBADRQC)
- break;
-
- if (!is_wmax_thermal_code(out_data))
- continue;
-
- mode = out_data & WMAX_THERMAL_MODE_MASK;
- profile = wmax_mode_to_platform_profile[mode];
- supported_thermal_profiles[profile] = out_data;
-
- set_bit(profile, choices);
- }
-
- if (bitmap_empty(choices, PLATFORM_PROFILE_LAST))
- return -ENODEV;
-
- if (quirks->gmode) {
- supported_thermal_profiles[PLATFORM_PROFILE_PERFORMANCE] =
- WMAX_THERMAL_MODE_GMODE;
-
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
- }
-
- return 0;
-}
-
-static const struct platform_profile_ops awcc_platform_profile_ops = {
- .probe = thermal_profile_probe,
- .profile_get = thermal_profile_get,
- .profile_set = thermal_profile_set,
-};
-
-static int create_thermal_profile(struct platform_device *platform_device)
-{
- struct device *ppdev;
-
- ppdev = devm_platform_profile_register(&platform_device->dev, "alienware-wmi",
- NULL, &awcc_platform_profile_ops);
-
- return PTR_ERR_OR_ZERO(ppdev);
-}
-
-/*
- * Platform Driver
- */
-static const struct attribute_group *alienfx_groups[] = {
- &zone_attribute_group,
- &hdmi_attribute_group,
- &amplifier_attribute_group,
- &deepsleep_attribute_group,
- NULL
-};
-
-static struct platform_driver platform_driver = {
- .driver = {
- .name = "alienware-wmi",
- .dev_groups = alienfx_groups,
- },
-};
-
-static int __init alienware_wmi_init(void)
-{
- int ret;
-
- if (wmi_has_guid(LEGACY_CONTROL_GUID))
- interface = LEGACY;
- else if (wmi_has_guid(WMAX_CONTROL_GUID))
- interface = WMAX;
- else {
- pr_warn("alienware-wmi: No known WMI GUID found\n");
- return -ENODEV;
- }
-
- dmi_check_system(alienware_quirks);
- if (quirks == NULL)
- quirks = &quirk_unknown;
-
- if (force_platform_profile)
- quirks->thermal = true;
-
- if (force_gmode) {
- if (quirks->thermal)
- quirks->gmode = true;
- else
- pr_warn("force_gmode requires platform profile support\n");
- }
-
- ret = platform_driver_register(&platform_driver);
- if (ret)
- goto fail_platform_driver;
- platform_device = platform_device_alloc("alienware-wmi", PLATFORM_DEVID_NONE);
- if (!platform_device) {
- ret = -ENOMEM;
- goto fail_platform_device1;
- }
- ret = platform_device_add(platform_device);
- if (ret)
- goto fail_platform_device2;
-
- if (quirks->thermal) {
- ret = create_thermal_profile(platform_device);
- if (ret)
- goto fail_prep_thermal_profile;
- }
-
- if (quirks->num_zones > 0) {
- ret = alienware_zone_init(platform_device);
- if (ret)
- goto fail_prep_zones;
- }
-
- return 0;
-
-fail_prep_zones:
- alienware_zone_exit(platform_device);
-fail_prep_thermal_profile:
- platform_device_del(platform_device);
-fail_platform_device2:
- platform_device_put(platform_device);
-fail_platform_device1:
- platform_driver_unregister(&platform_driver);
-fail_platform_driver:
- return ret;
-}
-
-module_init(alienware_wmi_init);
-
-static void __exit alienware_wmi_exit(void)
-{
- alienware_zone_exit(platform_device);
- platform_device_unregister(platform_device);
- platform_driver_unregister(&platform_driver);
-}
-
-module_exit(alienware_wmi_exit);
diff --git a/drivers/platform/x86/dell/alienware-wmi.h b/drivers/platform/x86/dell/alienware-wmi.h
new file mode 100644
index 000000000000..68d4242211ae
--- /dev/null
+++ b/drivers/platform/x86/dell/alienware-wmi.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Alienware WMI special features driver
+ *
+ * Copyright (C) 2014 Dell Inc <Dell.Client.Kernel@dell.com>
+ * Copyright (C) 2024 Kurt Borja <kuurtb@gmail.com>
+ */
+
+#ifndef _ALIENWARE_WMI_H_
+#define _ALIENWARE_WMI_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/wmi.h>
+
+#define LEGACY_CONTROL_GUID "A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID "A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID "A70591CE-A997-11DA-B012-B622A1EF5492"
+
+enum INTERFACE_FLAGS {
+ LEGACY,
+ WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+ LEGACY_RUNNING = 1,
+ LEGACY_BOOTING = 0,
+ LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+ WMAX_RUNNING = 0xFF,
+ WMAX_BOOTING = 0,
+ WMAX_SUSPEND = 3,
+};
+
+struct alienfx_quirks {
+ u8 num_zones;
+ bool hdmi_mux;
+ bool amplifier;
+ bool deepslp;
+};
+
+struct color_platform {
+ u8 blue;
+ u8 green;
+ u8 red;
+} __packed;
+
+struct alienfx_priv {
+ struct platform_device *pdev;
+ struct led_classdev global_led;
+ struct color_platform colors[4];
+ u8 global_brightness;
+ u8 lighting_control_state;
+};
+
+struct alienfx_ops {
+ int (*upd_led)(struct alienfx_priv *priv, struct wmi_device *wdev,
+ u8 location);
+ int (*upd_brightness)(struct alienfx_priv *priv, struct wmi_device *wdev,
+ u8 brightness);
+};
+
+struct alienfx_platdata {
+ struct wmi_device *wdev;
+ struct alienfx_ops ops;
+};
+
+extern u8 alienware_interface;
+extern struct alienfx_quirks *alienfx;
+
+int alienware_wmi_command(struct wmi_device *wdev, u32 method_id,
+ void *in_args, size_t in_size, u32 *out_data);
+
+int alienware_alienfx_setup(struct alienfx_platdata *pdata);
+
+#if IS_ENABLED(CONFIG_ALIENWARE_WMI_LEGACY)
+int __init alienware_legacy_wmi_init(void);
+void __exit alienware_legacy_wmi_exit(void);
+#else
+static inline int alienware_legacy_wmi_init(void)
+{
+ return -ENODEV;
+}
+
+static inline void alienware_legacy_wmi_exit(void)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_ALIENWARE_WMI_WMAX)
+extern const struct attribute_group wmax_hdmi_attribute_group;
+extern const struct attribute_group wmax_amplifier_attribute_group;
+extern const struct attribute_group wmax_deepsleep_attribute_group;
+
+#define WMAX_DEV_GROUPS &wmax_hdmi_attribute_group, \
+ &wmax_amplifier_attribute_group, \
+ &wmax_deepsleep_attribute_group,
+
+int __init alienware_wmax_wmi_init(void);
+void __exit alienware_wmax_wmi_exit(void);
+#else
+#define WMAX_DEV_GROUPS
+
+static inline int alienware_wmax_wmi_init(void)
+{
+ return -ENODEV;
+}
+
+
+static inline void alienware_wmax_wmi_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/platform/x86/dell/dell-uart-backlight.c b/drivers/platform/x86/dell/dell-uart-backlight.c
index 50002ef13d5d..8f868f845350 100644
--- a/drivers/platform/x86/dell/dell-uart-backlight.c
+++ b/drivers/platform/x86/dell/dell-uart-backlight.c
@@ -325,7 +325,7 @@ static int dell_uart_bl_serdev_probe(struct serdev_device *serdev)
return PTR_ERR_OR_ZERO(dell_bl->bl);
}
-struct serdev_device_driver dell_uart_bl_serdev_driver = {
+static struct serdev_device_driver dell_uart_bl_serdev_driver = {
.probe = dell_uart_bl_serdev_probe,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index e75cd6e1efe6..f27739da380f 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -104,7 +104,6 @@ struct dell_wmi_ddv_sensors {
struct dell_wmi_ddv_data {
struct acpi_battery_hook hook;
- struct device_attribute temp_attr;
struct device_attribute eppid_attr;
struct dell_wmi_ddv_sensors fans;
struct dell_wmi_ddv_sensors temps;
@@ -651,24 +650,6 @@ static int dell_wmi_ddv_battery_index(struct acpi_device *acpi_dev, u32 *index)
return kstrtou32(uid_str, 10, index);
}
-static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, temp_attr);
- u32 index, value;
- int ret;
-
- ret = dell_wmi_ddv_battery_index(to_acpi_device(dev->parent), &index);
- if (ret < 0)
- return ret;
-
- ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index, &value);
- if (ret < 0)
- return ret;
-
- /* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
- return sysfs_emit(buf, "%d\n", value - 2731);
-}
-
static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dell_wmi_ddv_data *data = container_of(attr, struct dell_wmi_ddv_data, eppid_attr);
@@ -695,6 +676,46 @@ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, cha
return ret;
}
+static int dell_wmi_ddv_get_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct dell_wmi_ddv_data *data = drvdata;
+ u32 index, value;
+ int ret;
+
+ ret = dell_wmi_ddv_battery_index(to_acpi_device(psy->dev.parent), &index);
+ if (ret < 0)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TEMP:
+ ret = dell_wmi_ddv_query_integer(data->wdev, DELL_DDV_BATTERY_TEMPERATURE, index,
+ &value);
+ if (ret < 0)
+ return ret;
+
+ /* Use 2732 instead of 2731.5 to avoid unnecessary rounding and to emulate
+ * the behaviour of the OEM application which seems to round down the result.
+ */
+ val->intval = value - 2732;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const enum power_supply_property dell_wmi_ddv_properties[] = {
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static const struct power_supply_ext dell_wmi_ddv_extension = {
+ .name = DRIVER_NAME,
+ .properties = dell_wmi_ddv_properties,
+ .num_properties = ARRAY_SIZE(dell_wmi_ddv_properties),
+ .get_property = dell_wmi_ddv_get_property,
+};
+
static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
@@ -706,13 +727,14 @@ static int dell_wmi_ddv_add_battery(struct power_supply *battery, struct acpi_ba
if (ret < 0)
return 0;
- ret = device_create_file(&battery->dev, &data->temp_attr);
+ ret = device_create_file(&battery->dev, &data->eppid_attr);
if (ret < 0)
return ret;
- ret = device_create_file(&battery->dev, &data->eppid_attr);
+ ret = power_supply_register_extension(battery, &dell_wmi_ddv_extension, &data->wdev->dev,
+ data);
if (ret < 0) {
- device_remove_file(&battery->dev, &data->temp_attr);
+ device_remove_file(&battery->dev, &data->eppid_attr);
return ret;
}
@@ -724,38 +746,24 @@ static int dell_wmi_ddv_remove_battery(struct power_supply *battery, struct acpi
{
struct dell_wmi_ddv_data *data = container_of(hook, struct dell_wmi_ddv_data, hook);
- device_remove_file(&battery->dev, &data->temp_attr);
device_remove_file(&battery->dev, &data->eppid_attr);
+ power_supply_unregister_extension(battery, &dell_wmi_ddv_extension);
return 0;
}
-static void dell_wmi_ddv_battery_remove(void *data)
-{
- struct acpi_battery_hook *hook = data;
-
- battery_hook_unregister(hook);
-}
-
static int dell_wmi_ddv_battery_add(struct dell_wmi_ddv_data *data)
{
data->hook.name = "Dell DDV Battery Extension";
data->hook.add_battery = dell_wmi_ddv_add_battery;
data->hook.remove_battery = dell_wmi_ddv_remove_battery;
- sysfs_attr_init(&data->temp_attr.attr);
- data->temp_attr.attr.name = "temp";
- data->temp_attr.attr.mode = 0444;
- data->temp_attr.show = temp_show;
-
sysfs_attr_init(&data->eppid_attr.attr);
data->eppid_attr.attr.name = "eppid";
data->eppid_attr.attr.mode = 0444;
data->eppid_attr.show = eppid_show;
- battery_hook_register(&data->hook);
-
- return devm_add_action_or_reset(&data->wdev->dev, dell_wmi_ddv_battery_remove, &data->hook);
+ return devm_battery_hook_register(&data->wdev->dev, &data->hook);
}
static int dell_wmi_ddv_buffer_read(struct seq_file *seq, enum dell_ddv_method method)
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/Makefile b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
index 825fb2fbeea8..0a6df449e222 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_DELL_WMI_SYSMAN) += dell-wmi-sysman.o
-dell-wmi-sysman-objs := sysman.o \
+dell-wmi-sysman-y := sysman.o \
enum-attributes.o \
int-attributes.o \
string-attributes.o \
diff --git a/drivers/platform/x86/hp/hp-bioscfg/Makefile b/drivers/platform/x86/hp/hp-bioscfg/Makefile
index 67be0d917753..7d23649b34dc 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/Makefile
+++ b/drivers/platform/x86/hp/hp-bioscfg/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_HP_BIOSCFG) := hp-bioscfg.o
-hp-bioscfg-objs := bioscfg.o \
+hp-bioscfg-y := bioscfg.o \
biosattr-interface.o \
enum-attributes.o \
int-attributes.o \
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
index 0b277b7e37dd..13237890fc92 100644
--- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -388,16 +388,13 @@ union acpi_object *hp_get_wmiobj_pointer(int instance_id, const char *guid_strin
*/
int hp_get_instance_count(const char *guid_string)
{
- union acpi_object *wmi_obj = NULL;
- int i = 0;
+ int ret;
- do {
- kfree(wmi_obj);
- wmi_obj = hp_get_wmiobj_pointer(i, guid_string);
- i++;
- } while (wmi_obj);
+ ret = wmi_instance_count(guid_string);
+ if (ret < 0)
+ return 0;
- return i - 1;
+ return ret;
}
/**
@@ -448,7 +445,7 @@ int hp_convert_hexstr_to_str(const char *input, u32 input_len, char **str, int *
return -ENOMEM;
for (i = 0; i < input_len; i += 5) {
- strncpy(tmp, input + i, strlen(tmp));
+ strscpy(tmp, input + i);
if (kstrtol(tmp, 16, &ch) == 0) {
// escape char
if (ch == '\\' ||
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 30bd366d7b58..17a09b7784ed 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -854,6 +854,7 @@ static const struct attribute_group ideapad_attribute_group = {
.is_visible = ideapad_is_visible,
.attrs = ideapad_attributes
};
+__ATTRIBUTE_GROUPS(ideapad_attribute);
/*
* DYTC Platform profile
@@ -1245,21 +1246,6 @@ static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
}
/*
- * Platform device
- */
-static int ideapad_sysfs_init(struct ideapad_private *priv)
-{
- return device_add_group(&priv->platform_device->dev,
- &ideapad_attribute_group);
-}
-
-static void ideapad_sysfs_exit(struct ideapad_private *priv)
-{
- device_remove_group(&priv->platform_device->dev,
- &ideapad_attribute_group);
-}
-
-/*
* input device
*/
#define IDEAPAD_WMI_KEY 0x100
@@ -2175,10 +2161,6 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ideapad_check_features(priv);
- err = ideapad_sysfs_init(priv);
- if (err)
- return err;
-
ideapad_debugfs_init(priv);
err = ideapad_input_init(priv);
@@ -2265,7 +2247,6 @@ backlight_failed:
input_failed:
ideapad_debugfs_exit(priv);
- ideapad_sysfs_exit(priv);
return err;
}
@@ -2293,7 +2274,6 @@ static void ideapad_acpi_remove(struct platform_device *pdev)
ideapad_kbd_bl_exit(priv);
ideapad_input_exit(priv);
ideapad_debugfs_exit(priv);
- ideapad_sysfs_exit(priv);
}
#ifdef CONFIG_PM_SLEEP
@@ -2325,6 +2305,7 @@ static struct platform_driver ideapad_acpi_driver = {
.name = "ideapad_acpi",
.pm = &ideapad_pm,
.acpi_match_table = ACPI_PTR(ideapad_device_ids),
+ .dev_groups = ideapad_attribute_groups,
},
};
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 927a2993f616..88a1a9ff2f34 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -139,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
+ {
+ .ident = "Microsoft Surface Go 4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/ifs/Makefile b/drivers/platform/x86/intel/ifs/Makefile
index 30f035ef5581..c3e417bce9b6 100644
--- a/drivers/platform/x86/intel/ifs/Makefile
+++ b/drivers/platform/x86/intel/ifs/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_INTEL_IFS) += intel_ifs.o
-intel_ifs-objs := core.o load.o runtest.o sysfs.o
+intel_ifs-y := core.o load.o runtest.o sysfs.o
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 092252eb95a8..30ff8f3ea1f5 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -56,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -71,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], con_id, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
int ret;
@@ -88,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, gpio_flags);
+ agpio, con_id, gpio_flags);
if (ret)
return ret;
@@ -101,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, unsigned long gpio_flags)
+ const char *con_id, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -112,12 +112,12 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, con_id, gpio_flags);
if (ret)
return ERR_PTR(ret);
gpiod_add_lookup_table(lookup);
- desc = devm_gpiod_get(int3472->dev, func, GPIOD_OUT_LOW);
+ desc = devm_gpiod_get(int3472->dev, con_id, GPIOD_OUT_LOW);
gpiod_remove_lookup_table(lookup);
return desc;
@@ -129,7 +129,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
* @hid: The ACPI HID of the device without the instance number e.g. INT347E
* @type_from: The GPIO type from ACPI ?SDT
* @type_to: The assigned GPIO type, typically same as @type_from
- * @func: The function, e.g. "enable"
+ * @con_id: The name of the GPIO for the device
* @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
* GPIO_ACTIVE_HIGH otherwise
*/
@@ -138,15 +138,15 @@ struct int3472_gpio_map {
u8 type_from;
u8 type_to;
bool polarity_low;
- const char *func;
+ const char *con_id;
};
static const struct int3472_gpio_map int3472_gpio_map[] = {
{ "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
};
-static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
- const char **func, unsigned long *gpio_flags)
+static void int3472_get_con_id_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **con_id, unsigned long *gpio_flags)
{
unsigned int i;
@@ -165,33 +165,33 @@ static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
*type = int3472_gpio_map[i].type_to;
*gpio_flags = int3472_gpio_map[i].polarity_low ?
GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
- *func = int3472_gpio_map[i].func;
+ *con_id = int3472_gpio_map[i].con_id;
return;
}
switch (*type) {
case INT3472_GPIO_TYPE_RESET:
- *func = "reset";
+ *con_id = "reset";
*gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
- *func = "powerdown";
+ *con_id = "powerdown";
*gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
- *func = "clk-enable";
+ *con_id = "clk-enable";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
- *func = "privacy-led";
+ *con_id = "privacy-led";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
- *func = "power-enable";
+ *con_id = "power-enable";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
- *func = "unknown";
+ *con_id = "unknown";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
@@ -238,7 +238,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
union acpi_object *obj;
struct gpio_desc *gpio;
const char *err_msg;
- const char *func;
+ const char *con_id;
unsigned long gpio_flags;
int ret;
@@ -262,26 +262,26 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
+ int3472_get_con_id_and_polarity(int3472->sensor, &type, &con_id, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
if (pin != (agpio->pin_table[0] & 0xff))
dev_dbg(int3472->dev, FW_BUG "%s %s pin number mismatch _DSM %d resource %d\n",
- func, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
+ con_id, agpio->resource_source.string_ptr, pin, agpio->pin_table[0]);
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
gpio_flags ^= GPIO_ACTIVE_LOW;
- dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
+ dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", con_id,
agpio->resource_source.string_ptr, agpio->pin_table[0],
str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -289,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, con_id, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
index 389e5419dadf..b148b40d09f5 100644
--- a/drivers/platform/x86/intel/pmc/Makefile
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -4,7 +4,7 @@
#
intel_pmc_core-y := core.o core_ssram.o spt.o cnp.o \
- icl.o tgl.o adl.o mtl.o arl.o lnl.o
+ icl.o tgl.o adl.o mtl.o arl.o lnl.o ptl.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
intel_pmc_core_pltdrv-y := pltdrv.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/adl.c b/drivers/platform/x86/intel/pmc/adl.c
index e7878558fd90..9e7dfd6e3310 100644
--- a/drivers/platform/x86/intel/pmc/adl.c
+++ b/drivers/platform/x86/intel/pmc/adl.c
@@ -11,7 +11,7 @@
#include "core.h"
/* Alder Lake: PGD PFET Enable Ack Status Register(s) bitmap */
-const struct pmc_bit_map adl_pfear_map[] = {
+static const struct pmc_bit_map adl_pfear_map[] = {
{"SPI/eSPI", BIT(2)},
{"XHCI", BIT(3)},
{"SPA", BIT(4)},
@@ -54,7 +54,7 @@ const struct pmc_bit_map adl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_adl_pfear_map[] = {
+static const struct pmc_bit_map *ext_adl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of cnp_reg_map for
* a list of core SoCs using this.
@@ -63,7 +63,7 @@ const struct pmc_bit_map *ext_adl_pfear_map[] = {
NULL
};
-const struct pmc_bit_map adl_ltr_show_map[] = {
+static const struct pmc_bit_map adl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -100,7 +100,7 @@ const struct pmc_bit_map adl_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map adl_clocksource_status_map[] = {
+static const struct pmc_bit_map adl_clocksource_status_map[] = {
{"CLKPART1_OFF_STS", BIT(0)},
{"CLKPART2_OFF_STS", BIT(1)},
{"CLKPART3_OFF_STS", BIT(2)},
@@ -128,7 +128,7 @@ const struct pmc_bit_map adl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_0_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -158,7 +158,7 @@ const struct pmc_bit_map adl_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_1_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SMT1_PGD0_PG_STS", BIT(2)},
{"CSMERTC_PGD0_PG_STS", BIT(6)},
@@ -170,14 +170,14 @@ const struct pmc_bit_map adl_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map adl_power_gating_status_2_map[] = {
+static const struct pmc_bit_map adl_power_gating_status_2_map[] = {
{"THC0_PGD0_PG_STS", BIT(7)},
{"THC1_PGD0_PG_STS", BIT(8)},
{"SPF_PGD0_PG_STS", BIT(14)},
{}
};
-const struct pmc_bit_map adl_d3_status_0_map[] = {
+static const struct pmc_bit_map adl_d3_status_0_map[] = {
{"ISH_D3_STS", BIT(2)},
{"LPSS_D3_STS", BIT(3)},
{"XDCI_D3_STS", BIT(4)},
@@ -193,13 +193,13 @@ const struct pmc_bit_map adl_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map adl_d3_status_1_map[] = {
+static const struct pmc_bit_map adl_d3_status_1_map[] = {
{"GBE_D3_STS", BIT(19)},
{"CNVI_D3_STS", BIT(27)},
{}
};
-const struct pmc_bit_map adl_d3_status_2_map[] = {
+static const struct pmc_bit_map adl_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"CSE_D3_STS", BIT(4)},
{"KVMCC_D3_STS", BIT(5)},
@@ -210,20 +210,20 @@ const struct pmc_bit_map adl_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map adl_d3_status_3_map[] = {
+static const struct pmc_bit_map adl_d3_status_3_map[] = {
{"THC0_D3_STS", BIT(14)},
{"THC1_D3_STS", BIT(15)},
{}
};
-const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_0_map[] = {
{"ISH_VNN_REQ_STS", BIT(2)},
{"ESPISPI_VNN_REQ_STS", BIT(18)},
{"DSP_VNN_REQ_STS", BIT(19)},
{}
};
-const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4)},
{"EXI_VNN_REQ_STS", BIT(9)},
{"GBE_VNN_REQ_STS", BIT(19)},
@@ -232,7 +232,7 @@ const struct pmc_bit_map adl_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
{"CSMERTC_VNN_REQ_STS", BIT(1)},
{"CSE_VNN_REQ_STS", BIT(4)},
{"SMT1_VNN_REQ_STS", BIT(8)},
@@ -245,12 +245,12 @@ const struct pmc_bit_map adl_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map adl_vnn_req_status_3_map[] = {
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
{}
};
-const struct pmc_bit_map adl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map adl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"PCIe_LPM_En_REQ_STS", BIT(3)},
{"ITH_REQ_STS", BIT(5)},
@@ -265,7 +265,7 @@ const struct pmc_bit_map adl_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map *adl_lpm_maps[] = {
+static const struct pmc_bit_map *adl_lpm_maps[] = {
adl_clocksource_status_map,
adl_power_gating_status_0_map,
adl_power_gating_status_1_map,
@@ -311,20 +311,8 @@ const struct pmc_reg_map adl_reg_map = {
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
};
-int adl_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- pmc->map = &adl_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return 0;
-}
+struct pmc_dev_info adl_pmc_dev = {
+ .map = &adl_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+};
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 05dec4f5019f..320993bd6d31 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -16,10 +16,11 @@
#define IOEP_LPM_REQ_GUID 0x5077612
#define SOCS_LPM_REQ_GUID 0x8478657
#define PCHS_LPM_REQ_GUID 0x9684572
+#define SOCM_LPM_REQ_GUID 0x2625030
static const u8 ARL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
-const struct pmc_bit_map arl_socs_ltr_show_map[] = {
+static const struct pmc_bit_map arl_socs_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -59,7 +60,7 @@ const struct pmc_bit_map arl_socs_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
+static const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -87,7 +88,7 @@ const struct pmc_bit_map arl_socs_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -123,7 +124,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -159,7 +160,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
+static const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"SOC_D2D_PGD3_PG_STS", BIT(2)},
@@ -187,7 +188,7 @@ const struct pmc_bit_map arl_socs_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
+static const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
{"CSE_D3_STS", BIT(4)},
@@ -206,7 +207,7 @@ const struct pmc_bit_map arl_socs_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
+static const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
{"THC1_D3_STS", BIT(15)},
@@ -214,13 +215,13 @@ const struct pmc_bit_map arl_socs_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map arl_socs_vnn_req_status_3_map[] = {
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
{}
};
-const struct pmc_bit_map *arl_socs_lpm_maps[] = {
+static const struct pmc_bit_map *arl_socs_lpm_maps[] = {
arl_socs_clocksource_status_map,
arl_socs_power_gating_status_0_map,
arl_socs_power_gating_status_1_map,
@@ -238,7 +239,7 @@ const struct pmc_bit_map *arl_socs_lpm_maps[] = {
NULL
};
-const struct pmc_bit_map arl_socs_pfear_map[] = {
+static const struct pmc_bit_map arl_socs_pfear_map[] = {
{"RSVD64", BIT(0)},
{"RSVD65", BIT(1)},
{"RSVD66", BIT(2)},
@@ -249,13 +250,13 @@ const struct pmc_bit_map arl_socs_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_arl_socs_pfear_map[] = {
+static const struct pmc_bit_map *ext_arl_socs_pfear_map[] = {
mtl_socm_pfear_map,
arl_socs_pfear_map,
NULL
};
-const struct pmc_reg_map arl_socs_reg_map = {
+static const struct pmc_reg_map arl_socs_reg_map = {
.pfear_sts = ext_arl_socs_pfear_map,
.ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
@@ -283,7 +284,7 @@ const struct pmc_reg_map arl_socs_reg_map = {
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
};
-const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
+static const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -323,7 +324,7 @@ const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
+static const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -358,7 +359,7 @@ const struct pmc_bit_map arl_pchs_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -394,7 +395,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -430,7 +431,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
{"U3FPW2_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"FIACPCB_X_PGD0_PG_STS", BIT(2)},
@@ -457,7 +458,7 @@ const struct pmc_bit_map arl_pchs_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
{"SPF_D3_STS", BIT(0)},
{"LPSS_D3_STS", BIT(3)},
{"XDCI_D3_STS", BIT(4)},
@@ -474,7 +475,7 @@ const struct pmc_bit_map arl_pchs_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
{"GBETSN1_D3_STS", BIT(14)},
{"GBE_D3_STS", BIT(19)},
{"ITSS_D3_STS", BIT(23)},
@@ -483,7 +484,7 @@ const struct pmc_bit_map arl_pchs_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
{"CSE_D3_STS", BIT(4)},
@@ -504,7 +505,7 @@ const struct pmc_bit_map arl_pchs_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
+static const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
{"ESE_D3_STS", BIT(3)},
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
@@ -513,13 +514,13 @@ const struct pmc_bit_map arl_pchs_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[] = {
{"FIA_VNN_REQ_STS", BIT(17)},
{"ESPISPI_VNN_REQ_STS", BIT(18)},
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4)},
{"DFXAGG_VNN_REQ_STS", BIT(8)},
{"EXI_VNN_REQ_STS", BIT(9)},
@@ -530,7 +531,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
{"FIA2_VNN_REQ_STS", BIT(0)},
{"CSMERTC_VNN_REQ_STS", BIT(1)},
{"CSE_VNN_REQ_STS", BIT(4)},
@@ -548,7 +549,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
{"ESE_VNN_REQ_STS", BIT(3)},
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
@@ -556,7 +557,7 @@ const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
+static const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"TS_OFF_REQ_STS", BIT(1)},
{"PNDE_MET_REQ_STS", BIT(2)},
@@ -586,7 +587,7 @@ const struct pmc_bit_map arl_pchs_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map arl_pchs_signal_status_map[] = {
+static const struct pmc_bit_map arl_pchs_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0)},
{"LSX_Wake1_STS", BIT(1)},
{"LSX_Wake2_STS", BIT(2)},
@@ -606,7 +607,7 @@ const struct pmc_bit_map arl_pchs_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
+static const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
arl_pchs_clocksource_status_map,
arl_pchs_power_gating_status_0_map,
arl_pchs_power_gating_status_1_map,
@@ -624,7 +625,7 @@ const struct pmc_bit_map *arl_pchs_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map arl_pchs_reg_map = {
+static const struct pmc_reg_map arl_pchs_reg_map = {
.pfear_sts = ext_arl_socs_pfear_map,
.ppfear_buckets = ARL_SOCS_PPFEAR_NUM_ENTRIES,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
@@ -650,6 +651,7 @@ const struct pmc_reg_map arl_pchs_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
+#define PMC_DEVID_SOCM 0x777f
#define PMC_DEVID_SOCS 0xae7f
#define PMC_DEVID_IOEP 0x7ecf
#define PMC_DEVID_PCHS 0x7f27
@@ -669,11 +671,17 @@ static struct pmc_info arl_pmc_info_list[] = {
.devid = PMC_DEVID_PCHS,
.map = &arl_pchs_reg_map,
},
+ {
+ .guid = SOCM_LPM_REQ_GUID,
+ .devid = PMC_DEVID_SOCM,
+ .map = &mtl_socm_reg_map,
+ },
{}
};
#define ARL_NPU_PCI_DEV 0xad1d
#define ARL_GNA_PCI_DEV 0xae4c
+#define ARL_H_GNA_PCI_DEV 0x774c
/*
* Set power state of select devices that do not have drivers to D3
* so that they do not block Package C entry.
@@ -684,6 +692,12 @@ static void arl_d3_fixup(void)
pmc_core_set_device_d3(ARL_GNA_PCI_DEV);
}
+static void arl_h_d3_fixup(void)
+{
+ pmc_core_set_device_d3(ARL_NPU_PCI_DEV);
+ pmc_core_set_device_d3(ARL_H_GNA_PCI_DEV);
+}
+
static int arl_resume(struct pmc_dev *pmcdev)
{
arl_d3_fixup();
@@ -691,40 +705,41 @@ static int arl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int arl_core_init(struct pmc_dev *pmcdev)
+static int arl_h_resume(struct pmc_dev *pmcdev)
{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
- int ret;
- int func = 0;
- bool ssram_init = true;
+ arl_h_d3_fixup();
+
+ return cnl_resume(pmcdev);
+}
+static int arl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
arl_d3_fixup();
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = arl_resume;
- pmcdev->regmap_list = arl_pmc_info_list;
+ return generic_core_init(pmcdev, pmc_dev_info);
+}
- /*
- * If ssram init fails use legacy method to at least get the
- * primary PMC
- */
- ret = pmc_core_ssram_init(pmcdev, func);
- if (ret) {
- ssram_init = false;
- pmc->map = &arl_socs_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
-
- pmc_core_get_low_power_modes(pmcdev);
- pmc_core_punit_pmt_init(pmcdev, ARL_PMT_DMU_GUID);
-
- if (ssram_init) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev);
- if (ret)
- return ret;
- }
-
- return 0;
+static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ arl_h_d3_fixup();
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info arl_pmc_dev = {
+ .pci_func = 0,
+ .dmu_guid = ARL_PMT_DMU_GUID,
+ .regmap_list = arl_pmc_info_list,
+ .map = &arl_socs_reg_map,
+ .suspend = cnl_suspend,
+ .resume = arl_resume,
+ .init = arl_core_init,
+};
+
+struct pmc_dev_info arl_h_pmc_dev = {
+ .pci_func = 2,
+ .dmu_guid = ARL_PMT_DMU_GUID,
+ .regmap_list = arl_pmc_info_list,
+ .map = &mtl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = arl_h_resume,
+ .init = arl_h_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/cnp.c b/drivers/platform/x86/intel/pmc/cnp.c
index fc5193fdf8a8..2c5af158bbe2 100644
--- a/drivers/platform/x86/intel/pmc/cnp.c
+++ b/drivers/platform/x86/intel/pmc/cnp.c
@@ -88,7 +88,7 @@ const struct pmc_bit_map cnp_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_cnp_pfear_map[] = {
+static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of cnp_reg_map for
* a list of core SoCs using this.
@@ -97,7 +97,7 @@ const struct pmc_bit_map *ext_cnp_pfear_map[] = {
NULL
};
-const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{"AUDIO_D3", BIT(0)},
{"OTG_D3", BIT(1)},
{"XHCI_D3", BIT(2)},
@@ -110,7 +110,7 @@ const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
{}
};
-const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
{"SDIO_PLL_OFF", BIT(0)},
{"USB2_PLL_OFF", BIT(1)},
{"AUDIO_PLL_OFF", BIT(2)},
@@ -127,7 +127,7 @@ const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
{}
};
-const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
+static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
{"MPHY_CORE_GATED", BIT(0)},
{"CSME_GATED", BIT(1)},
{"USB2_SUS_GATED", BIT(2)},
@@ -274,20 +274,9 @@ int cnl_resume(struct pmc_dev *pmcdev)
return pmc_core_resume_common(pmcdev);
}
-int cnp_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- pmc->map = &cnp_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
+struct pmc_dev_info cnp_pmc_dev = {
+ .map = &cnp_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+};
- return 0;
-}
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 1ee0fb5f8250..7a1d11f2914f 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1345,40 +1345,80 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
}
}
+/*
+ * When supported, ssram init is used to achieve all available PMCs.
+ * If ssram init fails, this function uses legacy method to at least get the
+ * primary PMC.
+ */
+int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
+ bool ssram;
+ int ret;
+
+ pmcdev->suspend = pmc_dev_info->suspend;
+ pmcdev->resume = pmc_dev_info->resume;
+
+ ssram = pmc_dev_info->regmap_list != NULL;
+ if (ssram) {
+ pmcdev->regmap_list = pmc_dev_info->regmap_list;
+ ret = pmc_core_ssram_init(pmcdev, pmc_dev_info->pci_func);
+ if (ret) {
+ dev_warn(&pmcdev->pdev->dev,
+ "ssram init failed, %d, using legacy init\n", ret);
+ ssram = false;
+ }
+ }
+
+ if (!ssram) {
+ pmc->map = pmc_dev_info->map;
+ ret = get_primary_reg_base(pmc);
+ if (ret)
+ return ret;
+ }
+
+ pmc_core_get_low_power_modes(pmcdev);
+ if (pmc_dev_info->dmu_guid)
+ pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
+
+ if (ssram)
+ return pmc_core_ssram_get_lpm_reqs(pmcdev);
+
+ return 0;
+}
+
static const struct x86_cpu_id intel_pmc_core_ids[] = {
- X86_MATCH_VFM(INTEL_SKYLAKE_L, spt_core_init),
- X86_MATCH_VFM(INTEL_SKYLAKE, spt_core_init),
- X86_MATCH_VFM(INTEL_KABYLAKE_L, spt_core_init),
- X86_MATCH_VFM(INTEL_KABYLAKE, spt_core_init),
- X86_MATCH_VFM(INTEL_CANNONLAKE_L, cnp_core_init),
- X86_MATCH_VFM(INTEL_ICELAKE_L, icl_core_init),
- X86_MATCH_VFM(INTEL_ICELAKE_NNPI, icl_core_init),
- X86_MATCH_VFM(INTEL_COMETLAKE, cnp_core_init),
- X86_MATCH_VFM(INTEL_COMETLAKE_L, cnp_core_init),
- X86_MATCH_VFM(INTEL_TIGERLAKE_L, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_TIGERLAKE, tgl_core_init),
- X86_MATCH_VFM(INTEL_ATOM_TREMONT, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, icl_core_init),
- X86_MATCH_VFM(INTEL_ROCKETLAKE, tgl_core_init),
- X86_MATCH_VFM(INTEL_ALDERLAKE_L, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_ALDERLAKE, adl_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE_P, tgl_l_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE, adl_core_init),
- X86_MATCH_VFM(INTEL_RAPTORLAKE_S, adl_core_init),
- X86_MATCH_VFM(INTEL_METEORLAKE_L, mtl_core_init),
- X86_MATCH_VFM(INTEL_ARROWLAKE, arl_core_init),
- X86_MATCH_VFM(INTEL_LUNARLAKE_M, lnl_core_init),
+ X86_MATCH_VFM(INTEL_SKYLAKE_L, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_SKYLAKE, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_KABYLAKE_L, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_KABYLAKE, &spt_pmc_dev),
+ X86_MATCH_VFM(INTEL_CANNONLAKE_L, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_ICELAKE_L, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ICELAKE_NNPI, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_COMETLAKE, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_COMETLAKE_L, &cnp_pmc_dev),
+ X86_MATCH_VFM(INTEL_TIGERLAKE_L, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_TIGERLAKE, &tgl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_TREMONT_L, &icl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ROCKETLAKE, &tgl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ALDERLAKE_L, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_ALDERLAKE, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev),
+ X86_MATCH_VFM(INTEL_ARROWLAKE_U, &arl_h_pmc_dev),
+ X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_pmc_dev),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_pmc_dev),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
-static const struct pci_device_id pmc_pci_ids[] = {
- { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
- { }
-};
-
/*
* This quirk can be used on those platforms where
* the platform BIOS enforces 24Mhz crystal to shutdown
@@ -1452,7 +1492,7 @@ static int pmc_core_probe(struct platform_device *pdev)
static bool device_initialized;
struct pmc_dev *pmcdev;
const struct x86_cpu_id *cpu_id;
- int (*core_init)(struct pmc_dev *pmcdev);
+ struct pmc_dev_info *pmc_dev_info;
struct pmc *primary_pmc;
int ret;
@@ -1472,7 +1512,7 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!cpu_id)
return -ENODEV;
- core_init = (int (*)(struct pmc_dev *))cpu_id->driver_data;
+ pmc_dev_info = (struct pmc_dev_info *)cpu_id->driver_data;
/* Primary PMC */
primary_pmc = devm_kzalloc(&pdev->dev, sizeof(*primary_pmc), GFP_KERNEL);
@@ -1489,16 +1529,13 @@ static int pmc_core_probe(struct platform_device *pdev)
if (!pmcdev->pkgc_res_cnt)
return -ENOMEM;
- /*
- * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
- * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
- * in this case.
- */
- if (core_init == spt_core_init && !pci_dev_present(pmc_pci_ids))
- core_init = cnp_core_init;
-
mutex_init(&pmcdev->lock);
- ret = core_init(pmcdev);
+
+ if (pmc_dev_info->init)
+ ret = pmc_dev_info->init(pmcdev, pmc_dev_info);
+ else
+ ret = generic_core_init(pmcdev, pmc_dev_info);
+
if (ret) {
pmc_core_clean_structure(pdev);
return ret;
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index b9d3291d0bf2..945a1c440cca 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -285,6 +285,14 @@ enum ppfear_regs {
#define LNL_PPFEAR_NUM_ENTRIES 12
#define LNL_S0IX_BLOCKER_OFFSET 0x2004
+/* Panther Lake Power Management Controller register offsets */
+#define PTL_LPM_NUM_MAPS 14
+#define PTL_PMC_LTR_SATA2 0x1B90
+#define PTL_PMC_LTR_PMC 0x1BA8
+#define PTL_PMC_LTR_CUR_ASLT 0x1C28
+#define PTL_PMC_LTR_CUR_PLT 0x1C2C
+#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8
+
extern const char *pmc_lpm_modes[];
struct pmc_bit_map {
@@ -430,178 +438,77 @@ struct pmc_dev {
enum pmc_index {
PMC_IDX_MAIN,
- PMC_IDX_SOC = PMC_IDX_MAIN,
PMC_IDX_IOE,
PMC_IDX_PCH,
PMC_IDX_MAX
};
+/**
+ * struct pmc_dev_info - Structure to keep PMC device info
+ * @pci_func: Function number of the primary PMC
+ * @dmu_guid: Die Management Unit GUID
+ * @regmap_list: Pointer to a list of pmc_info structure that could be
+ * available for the platform. When set, this field implies
+ * SSRAM support.
+ * @map: Pointer to a pmc_reg_map struct that contains platform
+ * specific attributes of the primary PMC
+ * @suspend: Function to perform platform specific suspend
+ * @resume: Function to perform platform specific resume
+ * @init: Function to perform platform specific init action
+ */
+struct pmc_dev_info {
+ u8 pci_func;
+ u32 dmu_guid;
+ struct pmc_info *regmap_list;
+ const struct pmc_reg_map *map;
+ void (*suspend)(struct pmc_dev *pmcdev);
+ int (*resume)(struct pmc_dev *pmcdev);
+ int (*init)(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
+};
+
extern const struct pmc_bit_map msr_map[];
-extern const struct pmc_bit_map spt_pll_map[];
-extern const struct pmc_bit_map spt_mphy_map[];
-extern const struct pmc_bit_map spt_pfear_map[];
-extern const struct pmc_bit_map *ext_spt_pfear_map[];
-extern const struct pmc_bit_map spt_ltr_show_map[];
-extern const struct pmc_reg_map spt_reg_map;
extern const struct pmc_bit_map cnp_pfear_map[];
-extern const struct pmc_bit_map *ext_cnp_pfear_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg0_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg1_map[];
-extern const struct pmc_bit_map cnp_slps0_dbg2_map[];
extern const struct pmc_bit_map *cnp_slps0_dbg_maps[];
extern const struct pmc_bit_map cnp_ltr_show_map[];
extern const struct pmc_reg_map cnp_reg_map;
-extern const struct pmc_bit_map icl_pfear_map[];
-extern const struct pmc_bit_map *ext_icl_pfear_map[];
-extern const struct pmc_reg_map icl_reg_map;
-extern const struct pmc_bit_map tgl_pfear_map[];
-extern const struct pmc_bit_map *ext_tgl_pfear_map[];
-extern const struct pmc_bit_map tgl_clocksource_status_map[];
-extern const struct pmc_bit_map tgl_power_gating_status_map[];
-extern const struct pmc_bit_map tgl_d3_status_map[];
-extern const struct pmc_bit_map tgl_vnn_req_status_map[];
-extern const struct pmc_bit_map tgl_vnn_misc_status_map[];
extern const struct pmc_bit_map tgl_signal_status_map[];
-extern const struct pmc_bit_map *tgl_lpm_maps[];
-extern const struct pmc_reg_map tgl_reg_map;
-extern const struct pmc_reg_map tgl_h_reg_map;
-extern const struct pmc_bit_map adl_pfear_map[];
-extern const struct pmc_bit_map *ext_adl_pfear_map[];
-extern const struct pmc_bit_map adl_ltr_show_map[];
-extern const struct pmc_bit_map adl_clocksource_status_map[];
-extern const struct pmc_bit_map adl_power_gating_status_0_map[];
-extern const struct pmc_bit_map adl_power_gating_status_1_map[];
-extern const struct pmc_bit_map adl_power_gating_status_2_map[];
-extern const struct pmc_bit_map adl_d3_status_0_map[];
-extern const struct pmc_bit_map adl_d3_status_1_map[];
-extern const struct pmc_bit_map adl_d3_status_2_map[];
-extern const struct pmc_bit_map adl_d3_status_3_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_0_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_1_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_2_map[];
-extern const struct pmc_bit_map adl_vnn_req_status_3_map[];
-extern const struct pmc_bit_map adl_vnn_misc_status_map[];
-extern const struct pmc_bit_map *adl_lpm_maps[];
extern const struct pmc_reg_map adl_reg_map;
extern const struct pmc_bit_map mtl_socm_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_socm_pfear_map[];
-extern const struct pmc_bit_map mtl_socm_ltr_show_map[];
-extern const struct pmc_bit_map mtl_socm_clocksource_status_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_0_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_socm_power_gating_status_2_map[];
extern const struct pmc_bit_map mtl_socm_d3_status_0_map[];
extern const struct pmc_bit_map mtl_socm_d3_status_1_map[];
-extern const struct pmc_bit_map mtl_socm_d3_status_2_map[];
-extern const struct pmc_bit_map mtl_socm_d3_status_3_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_0_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_1_map[];
extern const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[];
-extern const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[];
extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[];
extern const struct pmc_bit_map mtl_socm_signal_status_map[];
-extern const struct pmc_bit_map *mtl_socm_lpm_maps[];
extern const struct pmc_reg_map mtl_socm_reg_map;
-extern const struct pmc_bit_map mtl_ioep_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_ioep_pfear_map[];
-extern const struct pmc_bit_map mtl_ioep_ltr_show_map[];
-extern const struct pmc_bit_map mtl_ioep_clocksource_status_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_d3_status_3_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[];
-extern const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[];
-extern const struct pmc_bit_map *mtl_ioep_lpm_maps[];
extern const struct pmc_reg_map mtl_ioep_reg_map;
-extern const struct pmc_bit_map mtl_ioem_pfear_map[];
-extern const struct pmc_bit_map *ext_mtl_ioem_pfear_map[];
-extern const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[];
-extern const struct pmc_bit_map mtl_ioem_vnn_req_status_1_map[];
-extern const struct pmc_bit_map *mtl_ioem_lpm_maps[];
-extern const struct pmc_reg_map mtl_ioem_reg_map;
-extern const struct pmc_reg_map lnl_socm_reg_map;
-
-/* LNL */
-extern const struct pmc_bit_map lnl_ltr_show_map[];
-extern const struct pmc_bit_map lnl_clocksource_status_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_0_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_1_map[];
-extern const struct pmc_bit_map lnl_power_gating_status_2_map[];
-extern const struct pmc_bit_map lnl_d3_status_0_map[];
-extern const struct pmc_bit_map lnl_d3_status_1_map[];
-extern const struct pmc_bit_map lnl_d3_status_2_map[];
-extern const struct pmc_bit_map lnl_d3_status_3_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_0_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_1_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_2_map[];
-extern const struct pmc_bit_map lnl_vnn_req_status_3_map[];
-extern const struct pmc_bit_map lnl_vnn_misc_status_map[];
-extern const struct pmc_bit_map *lnl_lpm_maps[];
-extern const struct pmc_bit_map *lnl_blk_maps[];
-extern const struct pmc_bit_map lnl_pfear_map[];
-extern const struct pmc_bit_map *ext_lnl_pfear_map[];
-extern const struct pmc_bit_map lnl_signal_status_map[];
-
-/* ARL */
-extern const struct pmc_bit_map arl_socs_ltr_show_map[];
-extern const struct pmc_bit_map arl_socs_clocksource_status_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_0_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_1_map[];
-extern const struct pmc_bit_map arl_socs_power_gating_status_2_map[];
-extern const struct pmc_bit_map arl_socs_d3_status_2_map[];
-extern const struct pmc_bit_map arl_socs_d3_status_3_map[];
-extern const struct pmc_bit_map arl_socs_vnn_req_status_3_map[];
-extern const struct pmc_bit_map *arl_socs_lpm_maps[];
-extern const struct pmc_bit_map arl_socs_pfear_map[];
-extern const struct pmc_bit_map *ext_arl_socs_pfear_map[];
-extern const struct pmc_reg_map arl_socs_reg_map;
-extern const struct pmc_bit_map arl_pchs_ltr_show_map[];
-extern const struct pmc_bit_map arl_pchs_clocksource_status_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_power_gating_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_d3_status_3_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_0_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_1_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_2_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_req_status_3_map[];
-extern const struct pmc_bit_map arl_pchs_vnn_misc_status_map[];
-extern const struct pmc_bit_map arl_pchs_signal_status_map[];
-extern const struct pmc_bit_map *arl_pchs_lpm_maps[];
-extern const struct pmc_reg_map arl_pchs_reg_map;
-
-extern void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
-extern int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
+
+void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
+int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
int get_primary_reg_base(struct pmc *pmc);
-extern void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
-extern void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
-extern void pmc_core_set_device_d3(unsigned int device);
-
-extern int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
-
-int spt_core_init(struct pmc_dev *pmcdev);
-int cnp_core_init(struct pmc_dev *pmcdev);
-int icl_core_init(struct pmc_dev *pmcdev);
-int tgl_core_init(struct pmc_dev *pmcdev);
-int tgl_l_core_init(struct pmc_dev *pmcdev);
-int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp);
-int adl_core_init(struct pmc_dev *pmcdev);
-int mtl_core_init(struct pmc_dev *pmcdev);
-int arl_core_init(struct pmc_dev *pmcdev);
-int lnl_core_init(struct pmc_dev *pmcdev);
+void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
+void pmc_core_set_device_d3(unsigned int device);
+
+int pmc_core_ssram_init(struct pmc_dev *pmcdev, int func);
+
+int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
+
+extern struct pmc_dev_info spt_pmc_dev;
+extern struct pmc_dev_info cnp_pmc_dev;
+extern struct pmc_dev_info icl_pmc_dev;
+extern struct pmc_dev_info tgl_l_pmc_dev;
+extern struct pmc_dev_info tgl_pmc_dev;
+extern struct pmc_dev_info adl_pmc_dev;
+extern struct pmc_dev_info mtl_pmc_dev;
+extern struct pmc_dev_info arl_pmc_dev;
+extern struct pmc_dev_info arl_h_pmc_dev;
+extern struct pmc_dev_info lnl_pmc_dev;
+extern struct pmc_dev_info ptl_pmc_dev;
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
diff --git a/drivers/platform/x86/intel/pmc/icl.c b/drivers/platform/x86/intel/pmc/icl.c
index 71b0fd6cb7d8..db7ed15bf863 100644
--- a/drivers/platform/x86/intel/pmc/icl.c
+++ b/drivers/platform/x86/intel/pmc/icl.c
@@ -10,7 +10,7 @@
#include "core.h"
-const struct pmc_bit_map icl_pfear_map[] = {
+static const struct pmc_bit_map icl_pfear_map[] = {
{"RES_65", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -22,7 +22,7 @@ const struct pmc_bit_map icl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_icl_pfear_map[] = {
+static const struct pmc_bit_map *ext_icl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of icl_reg_map for
* a list of core SoCs using this.
@@ -32,7 +32,7 @@ const struct pmc_bit_map *ext_icl_pfear_map[] = {
NULL
};
-const struct pmc_reg_map icl_reg_map = {
+static const struct pmc_reg_map icl_reg_map = {
.pfear_sts = ext_icl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = ICL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -50,18 +50,6 @@ const struct pmc_reg_map icl_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
-int icl_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmc->map = &icl_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return ret;
-}
+struct pmc_dev_info icl_pmc_dev = {
+ .map = &icl_reg_map,
+};
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index be029f12cdf4..da513c234714 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -13,7 +13,7 @@
#include "core.h"
-const struct pmc_bit_map lnl_ltr_show_map[] = {
+static const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -55,7 +55,7 @@ const struct pmc_bit_map lnl_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0), 0},
{"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
{"ESPISPI_PGD0_PG_STS", BIT(2), 0},
@@ -91,7 +91,7 @@ const struct pmc_bit_map lnl_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0), 1},
{"SUSRAM_PGD0_PG_STS", BIT(1), 1},
{"SMT1_PGD0_PG_STS", BIT(2), 1},
@@ -127,7 +127,7 @@ const struct pmc_bit_map lnl_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
+static const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0), 0},
{"SBR16B2_PGD0_PG_STS", BIT(1), 0},
{"D2D_IPU_PGD0_PG_STS", BIT(2), 1},
@@ -163,7 +163,7 @@ const struct pmc_bit_map lnl_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_0_map[] = {
+static const struct pmc_bit_map lnl_d3_status_0_map[] = {
{"LPSS_D3_STS", BIT(3), 1},
{"XDCI_D3_STS", BIT(4), 1},
{"XHCI_D3_STS", BIT(5), 1},
@@ -175,7 +175,7 @@ const struct pmc_bit_map lnl_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_1_map[] = {
+static const struct pmc_bit_map lnl_d3_status_1_map[] = {
{"OSSE_SMT1_D3_STS", BIT(7), 0},
{"GBE_D3_STS", BIT(19), 0},
{"ITSS_D3_STS", BIT(23), 0},
@@ -185,7 +185,7 @@ const struct pmc_bit_map lnl_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_2_map[] = {
+static const struct pmc_bit_map lnl_d3_status_2_map[] = {
{"ESE_D3_STS", BIT(0), 0},
{"CSMERTC_D3_STS", BIT(1), 0},
{"SUSRAM_D3_STS", BIT(2), 0},
@@ -205,7 +205,7 @@ const struct pmc_bit_map lnl_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_d3_status_3_map[] = {
+static const struct pmc_bit_map lnl_d3_status_3_map[] = {
{"THC0_D3_STS", BIT(14), 1},
{"THC1_D3_STS", BIT(15), 1},
{"OSSE_SMT3_D3_STS", BIT(21), 0},
@@ -213,14 +213,14 @@ const struct pmc_bit_map lnl_d3_status_3_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_0_map[] = {
{"LPSS_VNN_REQ_STS", BIT(3), 1},
{"OSSE_VNN_REQ_STS", BIT(15), 1},
{"ESPISPI_VNN_REQ_STS", BIT(18), 1},
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
{"NPK_VNN_REQ_STS", BIT(4), 1},
{"OSSE_SMT1_VNN_REQ_STS", BIT(7), 1},
{"DFXAGG_VNN_REQ_STS", BIT(8), 0},
@@ -232,7 +232,7 @@ const struct pmc_bit_map lnl_vnn_req_status_1_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
{"eSE_VNN_REQ_STS", BIT(0), 1},
{"CSMERTC_VNN_REQ_STS", BIT(1), 1},
{"CSE_VNN_REQ_STS", BIT(4), 1},
@@ -249,14 +249,14 @@ const struct pmc_bit_map lnl_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map lnl_vnn_req_status_3_map[] = {
{"DISP_SHIM_VNN_REQ_STS", BIT(2), 0},
{"DTS0_VNN_REQ_STS", BIT(7), 0},
{"GPIOCOM5_VNN_REQ_STS", BIT(11), 2},
{}
};
-const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0), 0},
{"TS_OFF_REQ_STS", BIT(1), 0},
{"PNDE_MET_REQ_STS", BIT(2), 1},
@@ -292,7 +292,7 @@ const struct pmc_bit_map lnl_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_clocksource_status_map[] = {
+static const struct pmc_bit_map lnl_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0), 0},
{"AON3_OFF_STS", BIT(1), 1},
{"AON4_OFF_STS", BIT(2), 1},
@@ -317,7 +317,7 @@ const struct pmc_bit_map lnl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_signal_status_map[] = {
+static const struct pmc_bit_map lnl_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0), 0},
{"LSX_Wake1_STS", BIT(1), 0},
{"LSX_Wake2_STS", BIT(2), 0},
@@ -337,7 +337,7 @@ const struct pmc_bit_map lnl_signal_status_map[] = {
{}
};
-const struct pmc_bit_map lnl_rsc_status_map[] = {
+static const struct pmc_bit_map lnl_rsc_status_map[] = {
{"Memory", 0, 1},
{"PSF0", 0, 1},
{"PSF4", 0, 1},
@@ -349,7 +349,7 @@ const struct pmc_bit_map lnl_rsc_status_map[] = {
{}
};
-const struct pmc_bit_map *lnl_lpm_maps[] = {
+static const struct pmc_bit_map *lnl_lpm_maps[] = {
lnl_clocksource_status_map,
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
@@ -367,7 +367,7 @@ const struct pmc_bit_map *lnl_lpm_maps[] = {
NULL
};
-const struct pmc_bit_map *lnl_blk_maps[] = {
+static const struct pmc_bit_map *lnl_blk_maps[] = {
lnl_power_gating_status_0_map,
lnl_power_gating_status_1_map,
lnl_power_gating_status_2_map,
@@ -386,7 +386,7 @@ const struct pmc_bit_map *lnl_blk_maps[] = {
NULL
};
-const struct pmc_bit_map lnl_pfear_map[] = {
+static const struct pmc_bit_map lnl_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
{"ESPISPI", BIT(2)},
@@ -498,12 +498,12 @@ const struct pmc_bit_map lnl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_lnl_pfear_map[] = {
+static const struct pmc_bit_map *ext_lnl_pfear_map[] = {
lnl_pfear_map,
NULL
};
-const struct pmc_reg_map lnl_socm_reg_map = {
+static const struct pmc_reg_map lnl_socm_reg_map = {
.pfear_sts = ext_lnl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -550,22 +550,15 @@ static int lnl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int lnl_core_init(struct pmc_dev *pmcdev)
+static int lnl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- int ret;
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
-
lnl_d3_fixup();
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = lnl_resume;
-
- pmc->map = &lnl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
-
- pmc_core_get_low_power_modes(pmcdev);
-
- return 0;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info lnl_pmc_dev = {
+ .map = &lnl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = lnl_resume,
+ .init = lnl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 02949fed76e9..8862829694a7 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -102,12 +102,12 @@ const struct pmc_bit_map mtl_socm_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_socm_pfear_map[] = {
mtl_socm_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
+static const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -141,7 +141,7 @@ const struct pmc_bit_map mtl_socm_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
+static const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -167,7 +167,7 @@ const struct pmc_bit_map mtl_socm_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"ESPISPI_PGD0_PG_STS", BIT(2)},
@@ -203,7 +203,7 @@ const struct pmc_bit_map mtl_socm_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
{"USBR0_PGD0_PG_STS", BIT(0)},
{"SUSRAM_PGD0_PG_STS", BIT(1)},
{"SMT1_PGD0_PG_STS", BIT(2)},
@@ -239,7 +239,7 @@ const struct pmc_bit_map mtl_socm_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
+static const struct pmc_bit_map mtl_socm_power_gating_status_2_map[] = {
{"PSF8_PGD0_PG_STS", BIT(0)},
{"FIA_PGD0_PG_STS", BIT(1)},
{"SOC_D2D_PGD1_PG_STS", BIT(2)},
@@ -291,7 +291,7 @@ const struct pmc_bit_map mtl_socm_d3_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
+static const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
{"GNA_D3_STS", BIT(0)},
{"CSMERTC_D3_STS", BIT(1)},
{"SUSRAM_D3_STS", BIT(2)},
@@ -310,7 +310,7 @@ const struct pmc_bit_map mtl_socm_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
+static const struct pmc_bit_map mtl_socm_d3_status_3_map[] = {
{"ESE_D3_STS", BIT(2)},
{"GBETSN_D3_STS", BIT(13)},
{"THC0_D3_STS", BIT(14)},
@@ -353,7 +353,7 @@ const struct pmc_bit_map mtl_socm_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map mtl_socm_vnn_req_status_3_map[] = {
{"ESE_VNN_REQ_STS", BIT(2)},
{"DTS0_VNN_REQ_STS", BIT(7)},
{"GPIOCOM5_VNN_REQ_STS", BIT(11)},
@@ -432,7 +432,7 @@ const struct pmc_bit_map mtl_socm_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_socm_lpm_maps[] = {
mtl_socm_clocksource_status_map,
mtl_socm_power_gating_status_0_map,
mtl_socm_power_gating_status_1_map,
@@ -476,7 +476,7 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.lpm_reg_index = MTL_LPM_REG_INDEX,
};
-const struct pmc_bit_map mtl_ioep_pfear_map[] = {
+static const struct pmc_bit_map mtl_ioep_pfear_map[] = {
{"PMC_0", BIT(0)},
{"OPI", BIT(1)},
{"TCSS", BIT(2)},
@@ -563,12 +563,12 @@ const struct pmc_bit_map mtl_ioep_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_ioep_pfear_map[] = {
mtl_ioep_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
+static const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
{"SATA", CNP_PMC_LTR_SATA},
@@ -600,7 +600,7 @@ const struct pmc_bit_map mtl_ioep_ltr_show_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
+static const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0)},
{"AON3_OFF_STS", BIT(1)},
{"AON4_OFF_STS", BIT(2)},
@@ -623,7 +623,7 @@ const struct pmc_bit_map mtl_ioep_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
{"PMC_PGD0_PG_STS", BIT(0)},
{"DMI_PGD0_PG_STS", BIT(1)},
{"TCSS_PGD0_PG_STS", BIT(2)},
@@ -650,7 +650,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
{"PSF9_PGD0_PG_STS", BIT(0)},
{"MPFPW4_PGD0_PG_STS", BIT(1)},
{"SBR0_PGD0_PG_STS", BIT(8)},
@@ -668,7 +668,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
{"FIA_PGD0_PG_STS", BIT(1)},
{"FIA_P_PGD0_PG_STS", BIT(3)},
{"TAM_PGD0_PG_STS", BIT(4)},
@@ -680,7 +680,7 @@ const struct pmc_bit_map mtl_ioep_power_gating_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
{"SPF_D3_STS", BIT(0)},
{"SPA_D3_STS", BIT(12)},
{"SPB_D3_STS", BIT(13)},
@@ -691,43 +691,43 @@ const struct pmc_bit_map mtl_ioep_d3_status_0_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_1_map[] = {
{"GBETSN1_D3_STS", BIT(14)},
{"P2S_D3_STS", BIT(24)},
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = {
+static const struct pmc_bit_map mtl_ioep_d3_status_3_map[] = {
{"GBETSN_D3_STS", BIT(13)},
{"ACE_D3_STS", BIT(23)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_0_map[] = {
{"FIA_VNN_REQ_STS", BIT(17)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_1_map[] = {
{"DFXAGG_VNN_REQ_STS", BIT(8)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_2_map[] = {
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_req_status_3_map[] = {
{"DTS0_VNN_REQ_STS", BIT(7)},
{"DISP_VNN_REQ_STS", BIT(19)},
{}
};
-const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
+static const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS", BIT(0)},
{"TS_OFF_REQ_STS", BIT(1)},
{"PNDE_MET_REQ_STS", BIT(2)},
@@ -762,7 +762,7 @@ const struct pmc_bit_map mtl_ioep_vnn_misc_status_map[] = {
{}
};
-const struct pmc_bit_map *mtl_ioep_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_ioep_lpm_maps[] = {
mtl_ioep_clocksource_status_map,
mtl_ioep_power_gating_status_0_map,
mtl_ioep_power_gating_status_1_map,
@@ -800,7 +800,7 @@ const struct pmc_reg_map mtl_ioep_reg_map = {
.lpm_reg_index = MTL_LPM_REG_INDEX,
};
-const struct pmc_bit_map mtl_ioem_pfear_map[] = {
+static const struct pmc_bit_map mtl_ioem_pfear_map[] = {
{"PMC_0", BIT(0)},
{"OPI", BIT(1)},
{"TCSS", BIT(2)},
@@ -887,12 +887,12 @@ const struct pmc_bit_map mtl_ioem_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = {
+static const struct pmc_bit_map *ext_mtl_ioem_pfear_map[] = {
mtl_ioem_pfear_map,
NULL
};
-const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
+static const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
{"PSF9_PGD0_PG_STS", BIT(0)},
{"MPFPW4_PGD0_PG_STS", BIT(1)},
{"SBR0_PGD0_PG_STS", BIT(8)},
@@ -909,7 +909,7 @@ const struct pmc_bit_map mtl_ioem_power_gating_status_1_map[] = {
{}
};
-const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
+static const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
mtl_ioep_clocksource_status_map,
mtl_ioep_power_gating_status_0_map,
mtl_ioem_power_gating_status_1_map,
@@ -927,7 +927,7 @@ const struct pmc_bit_map *mtl_ioem_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map mtl_ioem_reg_map = {
+static const struct pmc_reg_map mtl_ioem_reg_map = {
.regmap_length = MTL_IOE_PMC_MMIO_REG_LEN,
.pfear_sts = ext_mtl_ioem_pfear_map,
.ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
@@ -990,39 +990,18 @@ static int mtl_resume(struct pmc_dev *pmcdev)
return cnl_resume(pmcdev);
}
-int mtl_core_init(struct pmc_dev *pmcdev)
+static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_SOC];
- int ret;
- int func = 2;
- bool ssram_init = true;
-
mtl_d3_fixup();
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = mtl_resume;
- pmcdev->regmap_list = mtl_pmc_info_list;
-
- /*
- * If ssram init fails use legacy method to at least get the
- * primary PMC
- */
- ret = pmc_core_ssram_init(pmcdev, func);
- if (ret) {
- ssram_init = false;
- dev_warn(&pmcdev->pdev->dev,
- "ssram init failed, %d, using legacy init\n", ret);
- pmc->map = &mtl_socm_reg_map;
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
- }
-
- pmc_core_get_low_power_modes(pmcdev);
- pmc_core_punit_pmt_init(pmcdev, MTL_PMT_DMU_GUID);
-
- if (ssram_init)
- return pmc_core_ssram_get_lpm_reqs(pmcdev);
-
- return 0;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info mtl_pmc_dev = {
+ .pci_func = 2,
+ .dmu_guid = MTL_PMT_DMU_GUID,
+ .regmap_list = mtl_pmc_info_list,
+ .map = &mtl_socm_reg_map,
+ .suspend = cnl_suspend,
+ .resume = mtl_resume,
+ .init = mtl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c
new file mode 100644
index 000000000000..394515af60d6
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/ptl.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Panther Lake PCH.
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/pci.h>
+
+#include "core.h"
+
+static const struct pmc_bit_map ptl_pcdp_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"FUSE_OSSE", BIT(1)},
+ {"ESPISPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"SPB", BIT(5)},
+ {"MPFPW2", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SBR16B20", BIT(0)},
+ {"SBR8B20", BIT(1)},
+ {"SBR16B21", BIT(2)},
+ {"DBG_SBR16B", BIT(3)},
+ {"OSSE_HOTHAM", BIT(4)},
+ {"D2D_DISP_1", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"SBR16B2", BIT(2)},
+ {"NPK_0", BIT(3)},
+ {"D2D_NOC_1", BIT(4)},
+ {"SBR8B2", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"SBR16B0", BIT(7)},
+
+ {"PSF0", BIT(0)},
+ {"XDCI", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"KVMCC", BIT(4)},
+ {"PMT", BIT(5)},
+ {"CLINK", BIT(6)},
+ {"PTIO", BIT(7)},
+
+ {"USBR0", BIT(0)},
+ {"SUSRAM", BIT(1)},
+ {"SMT1", BIT(2)},
+ {"MPFPW1", BIT(3)},
+ {"SMS2", BIT(4)},
+ {"SMS1", BIT(5)},
+ {"CSMERTC", BIT(6)},
+ {"CSMEPSF", BIT(7)},
+
+ {"D2D_NOC_0", BIT(0)},
+ {"ESE", BIT(1)},
+ {"P2SB8B", BIT(2)},
+ {"SBR16B7", BIT(3)},
+ {"SBR16B3", BIT(4)},
+ {"OSSE_SMT1", BIT(5)},
+ {"D2D_DISP", BIT(6)},
+ {"DBG_SBR", BIT(7)},
+
+ {"U3FPW1", BIT(0)},
+ {"FIA_X", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFSX2", BIT(4)},
+ {"ENDBG", BIT(5)},
+ {"DBC", BIT(6)},
+ {"FIA_PG", BIT(7)},
+
+ {"D2D_IPU", BIT(0)},
+ {"NPK1", BIT(1)},
+ {"FIACPCB_X", BIT(2)},
+ {"SBR8B4", BIT(3)},
+ {"DBG_PSF", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"UFSPW1", BIT(6)},
+ {"FIA_U", BIT(7)},
+
+ {"PSF8", BIT(0)},
+ {"SBR16B4", BIT(1)},
+ {"SBR16B5", BIT(2)},
+ {"FIACPCB_U", BIT(3)},
+ {"TAM", BIT(4)},
+ {"D2D_NOC_2", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {"THC0", BIT(7)},
+
+ {"THC1", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {"SBR8B1", BIT(2)},
+ {"TCSS", BIT(3)},
+ {"DISP_PGA", BIT(4)},
+ {"SBR16B1", BIT(5)},
+ {"SBRG", BIT(6)},
+ {"PSF5", BIT(7)},
+
+ {"P2SB16B", BIT(0)},
+ {"ACE_0", BIT(1)},
+ {"ACE_1", BIT(2)},
+ {"ACE_2", BIT(3)},
+ {"ACE_3", BIT(4)},
+ {"ACE_4", BIT(5)},
+ {"ACE_5", BIT(6)},
+ {"ACE_6", BIT(7)},
+
+ {"ACE_7", BIT(0)},
+ {"ACE_8", BIT(1)},
+ {"ACE_9", BIT(2)},
+ {"ACE_10", BIT(3)},
+ {"FIACPCB_PG", BIT(4)},
+ {"SBR16B6", BIT(5)},
+ {"OSSE", BIT(6)},
+ {"SBR8B0", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_ptl_pcdp_pfear_map[] = {
+ ptl_pcdp_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map ptl_pcdp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", PTL_PMC_LTR_SATA2},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+ {"OSSE", LNL_PMC_LTR_OSSE},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", PTL_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", PTL_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_clocksource_status_map[] = {
+ {"AON2_OFF_STS", BIT(0), 1},
+ {"AON3_OFF_STS", BIT(1), 0},
+ {"AON4_OFF_STS", BIT(2), 1},
+ {"AON5_OFF_STS", BIT(3), 1},
+ {"AON1_OFF_STS", BIT(4), 0},
+ {"XTAL_LVM_OFF_STS", BIT(5), 0},
+ {"MPFPW1_0_PLL_OFF_STS", BIT(6), 1},
+ {"USB3_PLL_OFF_STS", BIT(8), 1},
+ {"AON3_SPL_OFF_STS", BIT(9), 1},
+ {"MPFPW2_0_PLL_OFF_STS", BIT(12), 1},
+ {"XTAL_AGGR_OFF_STS", BIT(17), 1},
+ {"USB2_PLL_OFF_STS", BIT(18), 0},
+ {"SAF_PLL_OFF_STS", BIT(19), 1},
+ {"SE_TCSS_PLL_OFF_STS", BIT(20), 1},
+ {"DDI_PLL_OFF_STS", BIT(21), 1},
+ {"FILTER_PLL_OFF_STS", BIT(22), 1},
+ {"ACE_PLL_OFF_STS", BIT(24), 0},
+ {"FABRIC_PLL_OFF_STS", BIT(25), 1},
+ {"SOC_PLL_OFF_STS", BIT(26), 1},
+ {"REF_PLL_OFF_STS", BIT(28), 1},
+ {"IMG_PLL_OFF_STS", BIT(29), 1},
+ {"RTC_PLL_OFF_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0), 0},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
+ {"ESPISPI_PGD0_PG_STS", BIT(2), 0},
+ {"XHCI_PGD0_PG_STS", BIT(3), 1},
+ {"SPA_PGD0_PG_STS", BIT(4), 1},
+ {"SPB_PGD0_PG_STS", BIT(5), 1},
+ {"MPFPW2_PGD0_PG_STS", BIT(6), 0},
+ {"GBE_PGD0_PG_STS", BIT(7), 1},
+ {"SBR16B20_PGD0_PG_STS", BIT(8), 0},
+ {"SBR8B20_PGD0_PG_STS", BIT(9), 0},
+ {"SBR16B21_PGD0_PG_STS", BIT(10), 0},
+ {"DBG_PGD0_PG_STS", BIT(11), 0},
+ {"OSSE_HOTHAM_PGD0_PG_STS", BIT(12), 1},
+ {"D2D_DISP_PGD1_PG_STS", BIT(13), 1},
+ {"LPSS_PGD0_PG_STS", BIT(14), 1},
+ {"LPC_PGD0_PG_STS", BIT(15), 0},
+ {"SMB_PGD0_PG_STS", BIT(16), 0},
+ {"ISH_PGD0_PG_STS", BIT(17), 0},
+ {"SBR16B2_PGD0_PG_STS", BIT(18), 0},
+ {"NPK_PGD0_PG_STS", BIT(19), 0},
+ {"D2D_NOC_PGD1_PG_STS", BIT(20), 1},
+ {"SBR8B2_PGD0_PG_STS", BIT(21), 0},
+ {"FUSE_PGD0_PG_STS", BIT(22), 0},
+ {"SBR16B0_PGD0_PG_STS", BIT(23), 0},
+ {"PSF0_PGD0_PG_STS", BIT(24), 0},
+ {"XDCI_PGD0_PG_STS", BIT(25), 1},
+ {"EXI_PGD0_PG_STS", BIT(26), 0},
+ {"CSE_PGD0_PG_STS", BIT(27), 1},
+ {"KVMCC_PGD0_PG_STS", BIT(28), 1},
+ {"PMT_PGD0_PG_STS", BIT(29), 1},
+ {"CLINK_PGD0_PG_STS", BIT(30), 1},
+ {"PTIO_PGD0_PG_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0), 1},
+ {"SUSRAM_PGD0_PG_STS", BIT(1), 1},
+ {"SMT1_PGD0_PG_STS", BIT(2), 1},
+ {"MPFPW1_PGD0_PG_STS", BIT(3), 0},
+ {"SMS2_PGD0_PG_STS", BIT(4), 1},
+ {"SMS1_PGD0_PG_STS", BIT(5), 1},
+ {"CSMERTC_PGD0_PG_STS", BIT(6), 0},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
+ {"D2D_NOC_PGD0_PG_STS", BIT(8), 0},
+ {"ESE_PGD0_PG_STS", BIT(9), 1},
+ {"P2SB8B_PGD0_PG_STS", BIT(10), 1},
+ {"SBR16B7_PGD0_PG_STS", BIT(11), 0},
+ {"SBR16B3_PGD0_PG_STS", BIT(12), 0},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
+ {"D2D_DISP_PGD0_PG_STS", BIT(14), 1},
+ {"DBG_SBR_PGD0_PG_STS", BIT(15), 0},
+ {"U3FPW1_PGD0_PG_STS", BIT(16), 0},
+ {"FIA_X_PGD0_PG_STS", BIT(17), 0},
+ {"PSF4_PGD0_PG_STS", BIT(18), 0},
+ {"CNVI_PGD0_PG_STS", BIT(19), 0},
+ {"UFSX2_PGD0_PG_STS", BIT(20), 1},
+ {"ENDBG_PGD0_PG_STS", BIT(21), 0},
+ {"DBC_PGD0_PG_STS", BIT(22), 0},
+ {"FIA_PG_PGD0_PG_STS", BIT(23), 0},
+ {"D2D_IPU_PGD0_PG_STS", BIT(24), 1},
+ {"NPK_PGD1_PG_STS", BIT(25), 0},
+ {"FIACPCB_X_PGD0_PG_STS", BIT(26), 0},
+ {"SBR8B4_PGD0_PG_STS", BIT(27), 0},
+ {"DBG_PSF_PGD0_PG_STS", BIT(28), 0},
+ {"PSF6_PGD0_PG_STS", BIT(29), 0},
+ {"UFSPW1_PGD0_PG_STS", BIT(30), 0},
+ {"FIA_U_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0), 0},
+ {"SBR16B4_PGD0_PG_STS", BIT(1), 0},
+ {"SBR16B5_PGD0_PG_STS", BIT(2), 0},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
+ {"TAM_PGD0_PG_STS", BIT(4), 1},
+ {"D2D_NOC_PGD0_PG_STS", BIT(5), 1},
+ {"TBTLSX_PGD0_PG_STS", BIT(6), 1},
+ {"THC0_PGD0_PG_STS", BIT(7), 1},
+ {"THC1_PGD0_PG_STS", BIT(8), 1},
+ {"PMC_PGD1_PG_STS", BIT(9), 0},
+ {"SBR8B1_PGD0_PG_STS", BIT(10), 0},
+ {"TCSS_PGD0_PG_STS", BIT(11), 0},
+ {"DISP_PGA_PGD0_PG_STS", BIT(12), 0},
+ {"SBR16B1_PGD0_PG_STS", BIT(13), 0},
+ {"SBRG_PGD0_PG_STS", BIT(14), 0},
+ {"PSF5_PGD0_PG_STS", BIT(15), 0},
+ {"P2SB16B_PGD0_PG_STS", BIT(16), 1},
+ {"ACE_PGD0_PG_STS", BIT(17), 0},
+ {"ACE_PGD1_PG_STS", BIT(18), 0},
+ {"ACE_PGD2_PG_STS", BIT(19), 0},
+ {"ACE_PGD3_PG_STS", BIT(20), 0},
+ {"ACE_PGD4_PG_STS", BIT(21), 0},
+ {"ACE_PGD5_PG_STS", BIT(22), 0},
+ {"ACE_PGD6_PG_STS", BIT(23), 0},
+ {"ACE_PGD7_PG_STS", BIT(24), 0},
+ {"ACE_PGD8_PG_STS", BIT(25), 0},
+ {"ACE_PGD9_PG_STS", BIT(26), 0},
+ {"ACE_PGD10_PG_STS", BIT(27), 0},
+ {"FIACPCB_PG_PGD0_PG_STS", BIT(28), 0},
+ {"SBR16B6_PGD0_PG_STS", BIT(29), 0},
+ {"OSSE_PGD0_PG_STS", BIT(30), 1},
+ {"SBR8B0_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_0_map[] = {
+ {"LPSS_D3_STS", BIT(3), 1},
+ {"XDCI_D3_STS", BIT(4), 1},
+ {"XHCI_D3_STS", BIT(5), 1},
+ {"OSSE_D3_STS", BIT(6), 0},
+ {"SPA_D3_STS", BIT(12), 0},
+ {"SPB_D3_STS", BIT(13), 0},
+ {"ESPISPI_D3_STS", BIT(18), 0},
+ {"PSTH_D3_STS", BIT(21), 0},
+ {"OSSE_SMT1_D3_STS", BIT(30), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_1_map[] = {
+ {"GBE_D3_STS", BIT(19), 0},
+ {"ITSS_D3_STS", BIT(23), 0},
+ {"CNVI_D3_STS", BIT(27), 0},
+ {"UFSX2_D3_STS", BIT(28), 1},
+ {"OSSE_HOTHAM_D3_STS", BIT(29), 0},
+ {"ESE_D3_STS", BIT(30), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1), 0},
+ {"SUSRAM_D3_STS", BIT(2), 0},
+ {"CSE_D3_STS", BIT(4), 0},
+ {"KVMCC_D3_STS", BIT(5), 0},
+ {"USBR0_D3_STS", BIT(6), 0},
+ {"ISH_D3_STS", BIT(7), 0},
+ {"SMT1_D3_STS", BIT(8), 0},
+ {"SMT2_D3_STS", BIT(9), 0},
+ {"SMT3_D3_STS", BIT(10), 0},
+ {"OSSE_SMT2_D3_STS", BIT(12), 0},
+ {"CLINK_D3_STS", BIT(14), 0},
+ {"PTIO_D3_STS", BIT(16), 0},
+ {"PMT_D3_STS", BIT(17), 0},
+ {"SMS1_D3_STS", BIT(18), 0},
+ {"SMS2_D3_STS", BIT(19), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14), 1},
+ {"THC1_D3_STS", BIT(15), 1},
+ {"OSSE_SMT3_D3_STS", BIT(18), 0},
+ {"ACE_D3_STS", BIT(23), 0},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_0_map[] = {
+ {"LPSS_VNN_REQ_STS", BIT(3), 1},
+ {"OSSE_VNN_REQ_STS", BIT(6), 1},
+ {"ESPISPI_VNN_REQ_STS", BIT(18), 1},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(30), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4), 1},
+ {"DFXAGG_VNN_REQ_STS", BIT(8), 0},
+ {"EXI_VNN_REQ_STS", BIT(9), 1},
+ {"P2D_VNN_REQ_STS", BIT(18), 1},
+ {"GBE_VNN_REQ_STS", BIT(19), 1},
+ {"SMB_VNN_REQ_STS", BIT(25), 1},
+ {"LPC_VNN_REQ_STS", BIT(26), 0},
+ {"ESE_VNN_REQ_STS", BIT(30), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1), 1},
+ {"CSE_VNN_REQ_STS", BIT(4), 1},
+ {"ISH_VNN_REQ_STS", BIT(7), 1},
+ {"SMT1_VNN_REQ_STS", BIT(8), 1},
+ {"CLINK_VNN_REQ_STS", BIT(14), 1},
+ {"SMS1_VNN_REQ_STS", BIT(18), 1},
+ {"SMS2_VNN_REQ_STS", BIT(19), 1},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
+ {"DISP_SHIM_VNN_REQ_STS", BIT(26), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[] = {
+ {"DTS0_VNN_REQ_STS", BIT(7), 0},
+ {"GPIOCOM5_VNN_REQ_STS", BIT(11), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0), 0},
+ {"TS_OFF_REQ_STS", BIT(1), 0},
+ {"PNDE_MET_REQ_STS", BIT(2), 1},
+ {"PG5_PMA0_REQ_STS", BIT(3), 0},
+ {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4), 0},
+ {"VNN_SOC_REQ_STS", BIT(6), 1},
+ {"ISH_VNNAON_REQ_STS", BIT(7), 0},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
+ {"D2D_IPU_QACTIVE_REQ_STS", BIT(10), 1},
+ {"PLT_GREATER_REQ_STS", BIT(11), 1},
+ {"ALL_SBR_IDLE_REQ_STS", BIT(12), 0},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
+ {"EA_REQ_STS", BIT(15), 0},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
+ {"BRK_EV_EN_REQ_STS", BIT(17), 0},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
+ {"ARC_IDLE_REQ_STS", BIT(21), 0},
+ {"PG5_PMA1_REQ_STS", BIT(22), 0},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
+ {"XDCI_ATTACHED_REQ_STS", BIT(24), 1},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
+ {"D2D_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
+ {"PRE_WAKE0_REQ_STS", BIT(27), 1},
+ {"PRE_WAKE1_REQ_STS", BIT(28), 1},
+ {"PRE_WAKE2_REQ_STS", BIT(29), 1},
+ {"D2D_DISP_EDP_QACTIVE_REQ_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_signal_status_map[] = {
+ {"LSX_Wake0_STS", BIT(0), 0},
+ {"LSX_Wake1_STS", BIT(1), 0},
+ {"LSX_Wake2_STS", BIT(2), 0},
+ {"LSX_Wake3_STS", BIT(3), 0},
+ {"LSX_Wake4_STS", BIT(4), 0},
+ {"LSX_Wake5_STS", BIT(5), 0},
+ {"LSX_Wake6_STS", BIT(6), 0},
+ {"LSX_Wake7_STS", BIT(7), 0},
+ {"LPSS_Wake0_STS", BIT(8), 1},
+ {"LPSS_Wake1_STS", BIT(9), 1},
+ {"Int_Timer_SS_Wake0_STS", BIT(10), 1},
+ {"Int_Timer_SS_Wake1_STS", BIT(11), 1},
+ {"Int_Timer_SS_Wake2_STS", BIT(12), 1},
+ {"Int_Timer_SS_Wake3_STS", BIT(13), 1},
+ {"Int_Timer_SS_Wake4_STS", BIT(14), 1},
+ {"Int_Timer_SS_Wake5_STS", BIT(15), 1},
+ {}
+};
+
+static const struct pmc_bit_map ptl_pcdp_rsc_status_map[] = {
+ {"Memory", 0, 1},
+ {"PSF0", 0, 1},
+ {"PSF4", 0, 1},
+ {"PSF5", 0, 1},
+ {"PSF6", 0, 1},
+ {"PSF8", 0, 1},
+ {"SAF_CFI_LINK", 0, 1},
+ {"SB", 0, 1},
+ {}
+};
+
+static const struct pmc_bit_map *ptl_pcdp_lpm_maps[] = {
+ ptl_pcdp_clocksource_status_map,
+ ptl_pcdp_power_gating_status_0_map,
+ ptl_pcdp_power_gating_status_1_map,
+ ptl_pcdp_power_gating_status_2_map,
+ ptl_pcdp_d3_status_0_map,
+ ptl_pcdp_d3_status_1_map,
+ ptl_pcdp_d3_status_2_map,
+ ptl_pcdp_d3_status_3_map,
+ ptl_pcdp_vnn_req_status_0_map,
+ ptl_pcdp_vnn_req_status_1_map,
+ ptl_pcdp_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ ptl_pcdp_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_bit_map *ptl_pcdp_blk_maps[] = {
+ ptl_pcdp_power_gating_status_0_map,
+ ptl_pcdp_power_gating_status_1_map,
+ ptl_pcdp_power_gating_status_2_map,
+ ptl_pcdp_rsc_status_map,
+ ptl_pcdp_vnn_req_status_0_map,
+ ptl_pcdp_vnn_req_status_1_map,
+ ptl_pcdp_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ ptl_pcdp_d3_status_0_map,
+ ptl_pcdp_d3_status_1_map,
+ ptl_pcdp_d3_status_2_map,
+ ptl_pcdp_d3_status_3_map,
+ ptl_pcdp_clocksource_status_map,
+ ptl_pcdp_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map ptl_pcdp_reg_map = {
+ .pfear_sts = ext_ptl_pcdp_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = ptl_pcdp_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = PTL_PCD_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .lpm_num_maps = PTL_LPM_NUM_MAPS,
+ .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = ptl_pcdp_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .s0ix_blocker_maps = ptl_pcdp_blk_maps,
+ .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+};
+
+#define PTL_NPU_PCI_DEV 0xb03e
+#define PTL_IPU_PCI_DEV 0xb05d
+
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void ptl_d3_fixup(void)
+{
+ pmc_core_set_device_d3(PTL_IPU_PCI_DEV);
+ pmc_core_set_device_d3(PTL_NPU_PCI_DEV);
+}
+
+static int ptl_resume(struct pmc_dev *pmcdev)
+{
+ ptl_d3_fixup();
+ return cnl_resume(pmcdev);
+}
+
+static int ptl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ ptl_d3_fixup();
+ return generic_core_init(pmcdev, pmc_dev_info);
+}
+
+struct pmc_dev_info ptl_pmc_dev = {
+ .map = &ptl_pcdp_reg_map,
+ .suspend = cnl_suspend,
+ .resume = ptl_resume,
+ .init = ptl_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/spt.c b/drivers/platform/x86/intel/pmc/spt.c
index ab993a69e33e..b50534aa2cba 100644
--- a/drivers/platform/x86/intel/pmc/spt.c
+++ b/drivers/platform/x86/intel/pmc/spt.c
@@ -8,9 +8,11 @@
*
*/
+#include <linux/pci.h>
+
#include "core.h"
-const struct pmc_bit_map spt_pll_map[] = {
+static const struct pmc_bit_map spt_pll_map[] = {
{"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
{"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
{"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
@@ -18,7 +20,7 @@ const struct pmc_bit_map spt_pll_map[] = {
{}
};
-const struct pmc_bit_map spt_mphy_map[] = {
+static const struct pmc_bit_map spt_mphy_map[] = {
{"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
{"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
{"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
@@ -38,7 +40,7 @@ const struct pmc_bit_map spt_mphy_map[] = {
{}
};
-const struct pmc_bit_map spt_pfear_map[] = {
+static const struct pmc_bit_map spt_pfear_map[] = {
{"PMC", SPT_PMC_BIT_PMC},
{"OPI-DMI", SPT_PMC_BIT_OPI},
{"SPI / eSPI", SPT_PMC_BIT_SPI},
@@ -82,7 +84,7 @@ const struct pmc_bit_map spt_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_spt_pfear_map[] = {
+static const struct pmc_bit_map *ext_spt_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of spt_reg_map for
* a list of core SoCs using this.
@@ -91,7 +93,7 @@ const struct pmc_bit_map *ext_spt_pfear_map[] = {
NULL
};
-const struct pmc_bit_map spt_ltr_show_map[] = {
+static const struct pmc_bit_map spt_ltr_show_map[] = {
{"SOUTHPORT_A", SPT_PMC_LTR_SPA},
{"SOUTHPORT_B", SPT_PMC_LTR_SPB},
{"SATA", SPT_PMC_LTR_SATA},
@@ -116,7 +118,7 @@ const struct pmc_bit_map spt_ltr_show_map[] = {
{}
};
-const struct pmc_reg_map spt_reg_map = {
+static const struct pmc_reg_map spt_reg_map = {
.pfear_sts = ext_spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
@@ -134,18 +136,25 @@ const struct pmc_reg_map spt_reg_map = {
.pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
};
-int spt_core_init(struct pmc_dev *pmcdev)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
- int ret;
-
- pmc->map = &spt_reg_map;
-
- ret = get_primary_reg_base(pmc);
- if (ret)
- return ret;
+static const struct pci_device_id spt_pmc_pci_id[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
+ { }
+};
- pmc_core_get_low_power_modes(pmcdev);
+static int spt_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ /*
+ * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
+ * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
+ * in this case.
+ */
+ if (!pci_dev_present(spt_pmc_pci_id))
+ return generic_core_init(pmcdev, &cnp_pmc_dev);
- return ret;
+ return generic_core_init(pmcdev, pmc_dev_info);
}
+
+struct pmc_dev_info spt_pmc_dev = {
+ .map = &spt_reg_map,
+ .init = spt_core_init,
+};
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index e0580de18077..02e731ed3391 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -18,7 +18,7 @@ enum pch_type {
PCH_LP
};
-const struct pmc_bit_map tgl_pfear_map[] = {
+static const struct pmc_bit_map tgl_pfear_map[] = {
{"PSF9", BIT(0)},
{"RES_66", BIT(1)},
{"RES_67", BIT(2)},
@@ -29,7 +29,7 @@ const struct pmc_bit_map tgl_pfear_map[] = {
{}
};
-const struct pmc_bit_map *ext_tgl_pfear_map[] = {
+static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
/*
* Check intel_pmc_core_ids[] users of tgl_reg_map for
* a list of core SoCs using this.
@@ -39,7 +39,7 @@ const struct pmc_bit_map *ext_tgl_pfear_map[] = {
NULL
};
-const struct pmc_bit_map tgl_clocksource_status_map[] = {
+static const struct pmc_bit_map tgl_clocksource_status_map[] = {
{"USB2PLL_OFF_STS", BIT(18)},
{"PCIe/USB3.1_Gen2PLL_OFF_STS", BIT(19)},
{"PCIe_Gen3PLL_OFF_STS", BIT(20)},
@@ -55,7 +55,7 @@ const struct pmc_bit_map tgl_clocksource_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_power_gating_status_map[] = {
+static const struct pmc_bit_map tgl_power_gating_status_map[] = {
{"CSME_PG_STS", BIT(0)},
{"SATA_PG_STS", BIT(1)},
{"xHCI_PG_STS", BIT(2)},
@@ -83,7 +83,7 @@ const struct pmc_bit_map tgl_power_gating_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_d3_status_map[] = {
+static const struct pmc_bit_map tgl_d3_status_map[] = {
{"ADSP_D3_STS", BIT(0)},
{"SATA_D3_STS", BIT(1)},
{"xHCI0_D3_STS", BIT(2)},
@@ -98,7 +98,7 @@ const struct pmc_bit_map tgl_d3_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_vnn_req_status_map[] = {
+static const struct pmc_bit_map tgl_vnn_req_status_map[] = {
{"GPIO_COM0_VNN_REQ_STS", BIT(1)},
{"GPIO_COM1_VNN_REQ_STS", BIT(2)},
{"GPIO_COM2_VNN_REQ_STS", BIT(3)},
@@ -123,7 +123,7 @@ const struct pmc_bit_map tgl_vnn_req_status_map[] = {
{}
};
-const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
+static const struct pmc_bit_map tgl_vnn_misc_status_map[] = {
{"CPU_C10_REQ_STS_0", BIT(0)},
{"PCIe_LPM_En_REQ_STS_3", BIT(3)},
{"ITH_REQ_STS_5", BIT(5)},
@@ -175,7 +175,7 @@ const struct pmc_bit_map tgl_signal_status_map[] = {
{}
};
-const struct pmc_bit_map *tgl_lpm_maps[] = {
+static const struct pmc_bit_map *tgl_lpm_maps[] = {
tgl_clocksource_status_map,
tgl_power_gating_status_map,
tgl_d3_status_map,
@@ -185,7 +185,7 @@ const struct pmc_bit_map *tgl_lpm_maps[] = {
NULL
};
-const struct pmc_reg_map tgl_reg_map = {
+static const struct pmc_reg_map tgl_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -210,7 +210,7 @@ const struct pmc_reg_map tgl_reg_map = {
.etr3_offset = ETR3_OFFSET,
};
-const struct pmc_reg_map tgl_h_reg_map = {
+static const struct pmc_reg_map tgl_h_reg_map = {
.pfear_sts = ext_tgl_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
@@ -285,35 +285,28 @@ free_acpi_obj:
ACPI_FREE(out_obj);
}
-int tgl_l_core_init(struct pmc_dev *pmcdev)
+static int tgl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
- return tgl_core_generic_init(pmcdev, PCH_LP);
-}
-
-int tgl_core_init(struct pmc_dev *pmcdev)
-{
- return tgl_core_generic_init(pmcdev, PCH_H);
-}
-
-int tgl_core_generic_init(struct pmc_dev *pmcdev, int pch_tp)
-{
- struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
int ret;
- if (pch_tp == PCH_H)
- pmc->map = &tgl_h_reg_map;
- else
- pmc->map = &tgl_reg_map;
-
- pmcdev->suspend = cnl_suspend;
- pmcdev->resume = cnl_resume;
-
- ret = get_primary_reg_base(pmc);
+ ret = generic_core_init(pmcdev, pmc_dev_info);
if (ret)
return ret;
- pmc_core_get_low_power_modes(pmcdev);
pmc_core_get_tgl_lpm_reqs(pmcdev->pdev);
-
return 0;
}
+
+struct pmc_dev_info tgl_l_pmc_dev = {
+ .map = &tgl_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+ .init = tgl_core_init,
+};
+
+struct pmc_dev_info tgl_pmc_dev = {
+ .map = &tgl_h_reg_map,
+ .suspend = cnl_suspend,
+ .resume = cnl_resume,
+ .init = tgl_core_init,
+};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 8272f1dd0fbc..db3c031d1757 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -404,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
};
+/* DMR OOBMSM info */
+static const struct intel_vsec_platform_info dmr_oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
+};
+
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
.caps = VSEC_CAP_TELEMETRY,
@@ -420,6 +425,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
@@ -430,6 +436,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
new file mode 100644
index 000000000000..89153afd7015
--- /dev/null
+++ b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lenovo Super Hotkey Utility WMI extras driver for Ideapad laptop
+ *
+ * Copyright (C) 2025 Lenovo
+ */
+
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+/* Lenovo Super Hotkey WMI GUIDs */
+#define LUD_WMI_METHOD_GUID "CE6C0974-0407-4F50-88BA-4FC3B6559AD8"
+
+/* Lenovo Utility Data WMI method_id */
+#define WMI_LUD_GET_SUPPORT 1
+#define WMI_LUD_SET_FEATURE 2
+
+#define WMI_LUD_GET_MICMUTE_LED_VER 20
+#define WMI_LUD_GET_AUDIOMUTE_LED_VER 26
+
+#define WMI_LUD_SUPPORT_MICMUTE_LED_VER 25
+#define WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER 27
+
+/* Input parameters to mute/unmute audio LED and Mic LED */
+struct wmi_led_args {
+ u8 id;
+ u8 subid;
+ u16 value;
+};
+
+/* Values of input parameters to SetFeature of audio LED and Mic LED */
+enum hotkey_set_feature {
+ MIC_MUTE_LED_ON = 1,
+ MIC_MUTE_LED_OFF = 2,
+ AUDIO_MUTE_LED_ON = 4,
+ AUDIO_MUTE_LED_OFF = 5,
+};
+
+#define LSH_ACPI_LED_MAX 2
+
+struct lenovo_super_hotkey_wmi_private {
+ struct led_classdev cdev[LSH_ACPI_LED_MAX];
+ struct wmi_device *led_wdev;
+};
+
+enum mute_led_type {
+ MIC_MUTE,
+ AUDIO_MUTE,
+};
+
+static int lsh_wmi_mute_led_set(enum mute_led_type led_type, struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv = container_of(led_cdev,
+ struct lenovo_super_hotkey_wmi_private, cdev[led_type]);
+ struct wmi_led_args led_arg = {0, 0, 0};
+ struct acpi_buffer input;
+ acpi_status status;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ led_arg.id = brightness == LED_ON ? MIC_MUTE_LED_ON : MIC_MUTE_LED_OFF;
+ break;
+ case AUDIO_MUTE:
+ led_arg.id = brightness == LED_ON ? AUDIO_MUTE_LED_ON : AUDIO_MUTE_LED_OFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ input.length = sizeof(led_arg);
+ input.pointer = &led_arg;
+ status = wmidev_evaluate_method(wpriv->led_wdev, 0, WMI_LUD_SET_FEATURE, &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+static int lsh_wmi_audiomute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+
+{
+ return lsh_wmi_mute_led_set(AUDIO_MUTE, led_cdev, brightness);
+}
+
+static int lsh_wmi_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return lsh_wmi_mute_led_set(MIC_MUTE, led_cdev, brightness);
+}
+
+static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct device *dev)
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv = dev_get_drvdata(dev);
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer input;
+ int led_version, err = 0;
+ unsigned int wmiarg;
+ acpi_status status;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ wmiarg = WMI_LUD_GET_MICMUTE_LED_VER;
+ break;
+ case AUDIO_MUTE:
+ wmiarg = WMI_LUD_GET_AUDIOMUTE_LED_VER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ input.length = sizeof(wmiarg);
+ input.pointer = &wmiarg;
+ status = wmidev_evaluate_method(wpriv->led_wdev, 0, WMI_LUD_GET_SUPPORT, &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ union acpi_object *obj __free(kfree) = output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ led_version = obj->integer.value;
+ else
+ return -EIO;
+
+ wpriv->cdev[led_type].max_brightness = LED_ON;
+ wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+
+ switch (led_type) {
+ case MIC_MUTE:
+ if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER)
+ return -EIO;
+
+ wpriv->cdev[led_type].name = "platform::micmute";
+ wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set;
+ wpriv->cdev[led_type].default_trigger = "audio-micmute";
+ break;
+ case AUDIO_MUTE:
+ if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER)
+ return -EIO;
+
+ wpriv->cdev[led_type].name = "platform::mute";
+ wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set;
+ wpriv->cdev[led_type].default_trigger = "audio-mute";
+ break;
+ default:
+ dev_err(dev, "Unknown LED type %d\n", led_type);
+ return -EINVAL;
+ }
+
+ err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]);
+ if (err < 0) {
+ dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err);
+ return err;
+ }
+ return 0;
+}
+
+static int lenovo_super_hotkey_wmi_leds_setup(struct device *dev)
+{
+ int err;
+
+ err = lenovo_super_hotkey_wmi_led_init(MIC_MUTE, dev);
+ if (err)
+ return err;
+
+ err = lenovo_super_hotkey_wmi_led_init(AUDIO_MUTE, dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int lenovo_super_hotkey_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct lenovo_super_hotkey_wmi_private *wpriv;
+
+ wpriv = devm_kzalloc(&wdev->dev, sizeof(*wpriv), GFP_KERNEL);
+ if (!wpriv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, wpriv);
+ wpriv->led_wdev = wdev;
+ return lenovo_super_hotkey_wmi_leds_setup(&wdev->dev);
+}
+
+static const struct wmi_device_id lenovo_super_hotkey_wmi_id_table[] = {
+ { LUD_WMI_METHOD_GUID, NULL }, /* Utility data */
+ { }
+};
+
+MODULE_DEVICE_TABLE(wmi, lenovo_super_hotkey_wmi_id_table);
+
+static struct wmi_driver lenovo_wmi_hotkey_utilities_driver = {
+ .driver = {
+ .name = "lenovo_wmi_hotkey_utilities",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS
+ },
+ .id_table = lenovo_super_hotkey_wmi_id_table,
+ .probe = lenovo_super_hotkey_wmi_probe,
+ .no_singleton = true,
+};
+
+module_wmi_driver(lenovo_wmi_hotkey_utilities_driver);
+
+MODULE_AUTHOR("Jackie Dong <dongeg1@lenovo.com>");
+MODULE_DESCRIPTION("Lenovo Super Hotkey Utility WMI extras driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
index a96b215cd2c5..25933cd018d1 100644
--- a/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo-yoga-tab2-pro-1380-fastcharger.c
@@ -219,7 +219,7 @@ static int yt2_1380_fc_serdev_probe(struct serdev_device *serdev)
return 0;
}
-struct serdev_device_driver yt2_1380_fc_serdev_driver = {
+static struct serdev_device_driver yt2_1380_fc_serdev_driver = {
.probe = yt2_1380_fc_serdev_probe,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/platform/x86/samsung-galaxybook.c b/drivers/platform/x86/samsung-galaxybook.c
new file mode 100644
index 000000000000..5878a351993e
--- /dev/null
+++ b/drivers/platform/x86/samsung-galaxybook.c
@@ -0,0 +1,1425 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Samsung Galaxy Book driver
+ *
+ * Copyright (c) 2025 Joshua Grisham <josh@joshuagrisham.com>
+ *
+ * With contributions to the SCAI ACPI device interface:
+ * Copyright (c) 2024 Giulio Girardi <giulio.girardi@protechgroup.it>
+ *
+ * Implementation inspired by existing x86 platform drivers.
+ * Thank you to the authors!
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i8042.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/platform_profile.h>
+#include <linux/serio.h>
+#include <linux/sysfs.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+#include <acpi/battery.h>
+#include "firmware_attributes_class.h"
+
+#define DRIVER_NAME "samsung-galaxybook"
+
+struct samsung_galaxybook {
+ struct platform_device *platform;
+ struct acpi_device *acpi;
+
+ struct device *fw_attrs_dev;
+ struct kset *fw_attrs_kset;
+ /* block in case firmware attributes are updated in multiple threads */
+ struct mutex fw_attr_lock;
+
+ bool has_kbd_backlight;
+ bool has_block_recording;
+ bool has_performance_mode;
+
+ struct led_classdev kbd_backlight;
+ struct work_struct kbd_backlight_hotkey_work;
+ /* block in case brightness updated using hotkey and another thread */
+ struct mutex kbd_backlight_lock;
+
+ void *i8042_filter_ptr;
+
+ struct work_struct block_recording_hotkey_work;
+ struct input_dev *camera_lens_cover_switch;
+
+ struct acpi_battery_hook battery_hook;
+
+ u8 profile_performance_modes[PLATFORM_PROFILE_LAST];
+};
+
+enum galaxybook_fw_attr_id {
+ GB_ATTR_POWER_ON_LID_OPEN,
+ GB_ATTR_USB_CHARGING,
+ GB_ATTR_BLOCK_RECORDING,
+};
+
+static const char * const galaxybook_fw_attr_name[] = {
+ [GB_ATTR_POWER_ON_LID_OPEN] = "power_on_lid_open",
+ [GB_ATTR_USB_CHARGING] = "usb_charging",
+ [GB_ATTR_BLOCK_RECORDING] = "block_recording",
+};
+
+static const char * const galaxybook_fw_attr_desc[] = {
+ [GB_ATTR_POWER_ON_LID_OPEN] = "Power On Lid Open",
+ [GB_ATTR_USB_CHARGING] = "USB Charging",
+ [GB_ATTR_BLOCK_RECORDING] = "Block Recording",
+};
+
+#define GB_ATTR_LANGUAGE_CODE "en_US.UTF-8"
+
+struct galaxybook_fw_attr {
+ struct samsung_galaxybook *galaxybook;
+ enum galaxybook_fw_attr_id fw_attr_id;
+ struct attribute_group attr_group;
+ struct kobj_attribute display_name;
+ struct kobj_attribute current_value;
+ int (*get_value)(struct samsung_galaxybook *galaxybook, bool *value);
+ int (*set_value)(struct samsung_galaxybook *galaxybook, const bool value);
+};
+
+struct sawb {
+ u16 safn;
+ u16 sasb;
+ u8 rflg;
+ union {
+ struct {
+ u8 gunm;
+ u8 guds[250];
+ } __packed;
+ struct {
+ u8 caid[16];
+ u8 fncn;
+ u8 subn;
+ u8 iob0;
+ u8 iob1;
+ u8 iob2;
+ u8 iob3;
+ u8 iob4;
+ u8 iob5;
+ u8 iob6;
+ u8 iob7;
+ u8 iob8;
+ u8 iob9;
+ } __packed;
+ struct {
+ u8 iob_prefix[18];
+ u8 iobs[10];
+ } __packed;
+ } __packed;
+} __packed;
+
+#define GB_SAWB_LEN_SETTINGS 0x15
+#define GB_SAWB_LEN_PERFORMANCE_MODE 0x100
+
+#define GB_SAFN 0x5843
+
+#define GB_SASB_KBD_BACKLIGHT 0x78
+#define GB_SASB_POWER_MANAGEMENT 0x7a
+#define GB_SASB_USB_CHARGING_GET 0x67
+#define GB_SASB_USB_CHARGING_SET 0x68
+#define GB_SASB_NOTIFICATIONS 0x86
+#define GB_SASB_BLOCK_RECORDING 0x8a
+#define GB_SASB_PERFORMANCE_MODE 0x91
+
+#define GB_SAWB_RFLG_POS 4
+#define GB_SAWB_GB_GUNM_POS 5
+
+#define GB_RFLG_SUCCESS 0xaa
+#define GB_GUNM_FAIL 0xff
+
+#define GB_GUNM_FEATURE_ENABLE 0xbb
+#define GB_GUNM_FEATURE_ENABLE_SUCCESS 0xdd
+#define GB_GUDS_FEATURE_ENABLE 0xaa
+#define GB_GUDS_FEATURE_ENABLE_SUCCESS 0xcc
+
+#define GB_GUNM_GET 0x81
+#define GB_GUNM_SET 0x82
+
+#define GB_GUNM_POWER_MANAGEMENT 0x82
+
+#define GB_GUNM_USB_CHARGING_GET 0x80
+#define GB_GUNM_USB_CHARGING_ON 0x81
+#define GB_GUNM_USB_CHARGING_OFF 0x80
+#define GB_GUDS_POWER_ON_LID_OPEN 0xa3
+#define GB_GUDS_POWER_ON_LID_OPEN_GET 0x81
+#define GB_GUDS_POWER_ON_LID_OPEN_SET 0x80
+#define GB_GUDS_BATTERY_CHARGE_CONTROL 0xe9
+#define GB_GUDS_BATTERY_CHARGE_CONTROL_GET 0x91
+#define GB_GUDS_BATTERY_CHARGE_CONTROL_SET 0x90
+#define GB_GUNM_ACPI_NOTIFY_ENABLE 0x80
+#define GB_GUDS_ACPI_NOTIFY_ENABLE 0x02
+
+#define GB_BLOCK_RECORDING_ON 0x0
+#define GB_BLOCK_RECORDING_OFF 0x1
+
+#define GB_FNCN_PERFORMANCE_MODE 0x51
+#define GB_SUBN_PERFORMANCE_MODE_LIST 0x01
+#define GB_SUBN_PERFORMANCE_MODE_GET 0x02
+#define GB_SUBN_PERFORMANCE_MODE_SET 0x03
+
+/* guid 8246028d-8bca-4a55-ba0f-6f1e6b921b8f */
+static const guid_t performance_mode_guid =
+ GUID_INIT(0x8246028d, 0x8bca, 0x4a55, 0xba, 0x0f, 0x6f, 0x1e, 0x6b, 0x92, 0x1b, 0x8f);
+#define GB_PERFORMANCE_MODE_GUID performance_mode_guid
+
+#define GB_PERFORMANCE_MODE_FANOFF 0xb
+#define GB_PERFORMANCE_MODE_LOWNOISE 0xa
+#define GB_PERFORMANCE_MODE_OPTIMIZED 0x0
+#define GB_PERFORMANCE_MODE_OPTIMIZED_V2 0x2
+#define GB_PERFORMANCE_MODE_PERFORMANCE 0x1
+#define GB_PERFORMANCE_MODE_PERFORMANCE_V2 0x15
+#define GB_PERFORMANCE_MODE_ULTRA 0x16
+#define GB_PERFORMANCE_MODE_IGNORE1 0x14
+#define GB_PERFORMANCE_MODE_IGNORE2 0xc
+
+#define GB_ACPI_METHOD_ENABLE "SDLS"
+#define GB_ACPI_METHOD_ENABLE_ON 1
+#define GB_ACPI_METHOD_ENABLE_OFF 0
+#define GB_ACPI_METHOD_SETTINGS "CSFI"
+#define GB_ACPI_METHOD_PERFORMANCE_MODE "CSXI"
+
+#define GB_KBD_BACKLIGHT_MAX_BRIGHTNESS 3
+
+#define GB_ACPI_NOTIFY_BATTERY_STATE_CHANGED 0x61
+#define GB_ACPI_NOTIFY_DEVICE_ON_TABLE 0x6c
+#define GB_ACPI_NOTIFY_DEVICE_OFF_TABLE 0x6d
+#define GB_ACPI_NOTIFY_HOTKEY_PERFORMANCE_MODE 0x70
+
+#define GB_KEY_KBD_BACKLIGHT_KEYDOWN 0x2c
+#define GB_KEY_KBD_BACKLIGHT_KEYUP 0xac
+#define GB_KEY_BLOCK_RECORDING_KEYDOWN 0x1f
+#define GB_KEY_BLOCK_RECORDING_KEYUP 0x9f
+#define GB_KEY_BATTERY_NOTIFY_KEYUP 0xf
+#define GB_KEY_BATTERY_NOTIFY_KEYDOWN 0x8f
+
+/*
+ * Optional features which have been determined as not supported on a particular
+ * device will return GB_NOT_SUPPORTED from their init function. Positive
+ * EOPNOTSUPP is used as the underlying value instead of negative to
+ * differentiate this return code from valid upstream failures.
+ */
+#define GB_NOT_SUPPORTED EOPNOTSUPP /* Galaxy Book feature not supported */
+
+/*
+ * ACPI method handling
+ */
+
+static int galaxybook_acpi_method(struct samsung_galaxybook *galaxybook, acpi_string method,
+ struct sawb *buf, size_t len)
+{
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object in_obj, *out_obj;
+ struct acpi_object_list input;
+ acpi_status status;
+ int err;
+
+ in_obj.type = ACPI_TYPE_BUFFER;
+ in_obj.buffer.length = len;
+ in_obj.buffer.pointer = (u8 *)buf;
+
+ input.count = 1;
+ input.pointer = &in_obj;
+
+ status = acpi_evaluate_object_typed(galaxybook->acpi->handle, method, &input, &output,
+ ACPI_TYPE_BUFFER);
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(&galaxybook->acpi->dev, "failed to execute method %s; got %s\n",
+ method, acpi_format_exception(status));
+ return -EIO;
+ }
+
+ out_obj = output.pointer;
+
+ if (out_obj->buffer.length != len || out_obj->buffer.length < GB_SAWB_GB_GUNM_POS + 1) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; response length mismatch\n",
+ method);
+ err = -EPROTO;
+ goto out_free;
+ }
+ if (out_obj->buffer.pointer[GB_SAWB_RFLG_POS] != GB_RFLG_SUCCESS) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; device did not respond with success code 0x%x\n",
+ method, GB_RFLG_SUCCESS);
+ err = -ENXIO;
+ goto out_free;
+ }
+ if (out_obj->buffer.pointer[GB_SAWB_GB_GUNM_POS] == GB_GUNM_FAIL) {
+ dev_err(&galaxybook->acpi->dev,
+ "failed to execute %s; device responded with failure code 0x%x\n",
+ method, GB_GUNM_FAIL);
+ err = -ENXIO;
+ goto out_free;
+ }
+
+ memcpy(buf, out_obj->buffer.pointer, len);
+ err = 0;
+
+out_free:
+ kfree(out_obj);
+ return err;
+}
+
+static int galaxybook_enable_acpi_feature(struct samsung_galaxybook *galaxybook, const u16 sasb)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = sasb;
+ buf.gunm = GB_GUNM_FEATURE_ENABLE;
+ buf.guds[0] = GB_GUDS_FEATURE_ENABLE;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ if (buf.gunm != GB_GUNM_FEATURE_ENABLE_SUCCESS &&
+ buf.guds[0] != GB_GUDS_FEATURE_ENABLE_SUCCESS)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Keyboard Backlight
+ */
+
+static int kbd_backlight_acpi_get(struct samsung_galaxybook *galaxybook,
+ enum led_brightness *brightness)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_KBD_BACKLIGHT;
+ buf.gunm = GB_GUNM_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *brightness = buf.gunm;
+
+ return 0;
+}
+
+static int kbd_backlight_acpi_set(struct samsung_galaxybook *galaxybook,
+ const enum led_brightness brightness)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_KBD_BACKLIGHT;
+ buf.gunm = GB_GUNM_SET;
+
+ buf.guds[0] = brightness;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static enum led_brightness kbd_backlight_show(struct led_classdev *led)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of(led, struct samsung_galaxybook, kbd_backlight);
+ enum led_brightness brightness;
+ int err;
+
+ err = kbd_backlight_acpi_get(galaxybook, &brightness);
+ if (err)
+ return err;
+
+ return brightness;
+}
+
+static int kbd_backlight_store(struct led_classdev *led,
+ const enum led_brightness brightness)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of_const(led, struct samsung_galaxybook, kbd_backlight);
+
+ return kbd_backlight_acpi_set(galaxybook, brightness);
+}
+
+static int galaxybook_kbd_backlight_init(struct samsung_galaxybook *galaxybook)
+{
+ struct led_init_data init_data = {};
+ enum led_brightness brightness;
+ int err;
+
+ err = devm_mutex_init(&galaxybook->platform->dev, &galaxybook->kbd_backlight_lock);
+ if (err)
+ return err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_KBD_BACKLIGHT);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to enable kbd_backlight feature, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ err = kbd_backlight_acpi_get(galaxybook, &brightness);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial kbd_backlight brightness, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ init_data.devicename = DRIVER_NAME;
+ init_data.default_label = ":" LED_FUNCTION_KBD_BACKLIGHT;
+ init_data.devname_mandatory = true;
+
+ galaxybook->kbd_backlight.brightness_get = kbd_backlight_show;
+ galaxybook->kbd_backlight.brightness_set_blocking = kbd_backlight_store;
+ galaxybook->kbd_backlight.flags = LED_BRIGHT_HW_CHANGED;
+ galaxybook->kbd_backlight.max_brightness = GB_KBD_BACKLIGHT_MAX_BRIGHTNESS;
+
+ return devm_led_classdev_register_ext(&galaxybook->platform->dev,
+ &galaxybook->kbd_backlight, &init_data);
+}
+
+/*
+ * Battery Extension (adds charge_control_end_threshold to the battery device)
+ */
+
+static int charge_control_end_threshold_acpi_get(struct samsung_galaxybook *galaxybook, u8 *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_BATTERY_CHARGE_CONTROL;
+ buf.guds[1] = GB_GUDS_BATTERY_CHARGE_CONTROL_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.guds[1];
+
+ return 0;
+}
+
+static int charge_control_end_threshold_acpi_set(struct samsung_galaxybook *galaxybook, u8 value)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_BATTERY_CHARGE_CONTROL;
+ buf.guds[1] = GB_GUDS_BATTERY_CHARGE_CONTROL_SET;
+ buf.guds[2] = value;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static int galaxybook_battery_ext_property_get(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct samsung_galaxybook *galaxybook = ext_data;
+ int err;
+
+ if (psp != POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return -EINVAL;
+
+ err = charge_control_end_threshold_acpi_get(galaxybook, (u8 *)&val->intval);
+ if (err)
+ return err;
+
+ /*
+ * device stores "no end threshold" as 0 instead of 100;
+ * if device has 0, report 100
+ */
+ if (val->intval == 0)
+ val->intval = 100;
+
+ return 0;
+}
+
+static int galaxybook_battery_ext_property_set(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct samsung_galaxybook *galaxybook = ext_data;
+ u8 value;
+
+ if (psp != POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return -EINVAL;
+
+ value = val->intval;
+
+ if (value < 1 || value > 100)
+ return -EINVAL;
+
+ /*
+ * device stores "no end threshold" as 0 instead of 100;
+ * if setting to 100, send 0
+ */
+ if (value == 100)
+ value = 0;
+
+ return charge_control_end_threshold_acpi_set(galaxybook, value);
+}
+
+static int galaxybook_battery_ext_property_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *ext_data,
+ enum power_supply_property psp)
+{
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return true;
+
+ return false;
+}
+
+static const enum power_supply_property galaxybook_battery_properties[] = {
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_ext galaxybook_battery_ext = {
+ .name = DRIVER_NAME,
+ .properties = galaxybook_battery_properties,
+ .num_properties = ARRAY_SIZE(galaxybook_battery_properties),
+ .get_property = galaxybook_battery_ext_property_get,
+ .set_property = galaxybook_battery_ext_property_set,
+ .property_is_writeable = galaxybook_battery_ext_property_is_writeable,
+};
+
+static int galaxybook_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct samsung_galaxybook *galaxybook =
+ container_of(hook, struct samsung_galaxybook, battery_hook);
+
+ return power_supply_register_extension(battery, &galaxybook_battery_ext,
+ &battery->dev, galaxybook);
+}
+
+static int galaxybook_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ power_supply_unregister_extension(battery, &galaxybook_battery_ext);
+ return 0;
+}
+
+static int galaxybook_battery_threshold_init(struct samsung_galaxybook *galaxybook)
+{
+ u8 value;
+ int err;
+
+ err = charge_control_end_threshold_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial battery charge end threshold, error %d\n", err);
+ return 0;
+ }
+
+ galaxybook->battery_hook.add_battery = galaxybook_battery_add;
+ galaxybook->battery_hook.remove_battery = galaxybook_battery_remove;
+ galaxybook->battery_hook.name = "Samsung Galaxy Book Battery Extension";
+
+ return devm_battery_hook_register(&galaxybook->platform->dev, &galaxybook->battery_hook);
+}
+
+/*
+ * Platform Profile / Performance mode
+ */
+
+static int performance_mode_acpi_get(struct samsung_galaxybook *galaxybook, u8 *performance_mode)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+ if (err)
+ return err;
+
+ *performance_mode = buf.iob0;
+
+ return 0;
+}
+
+static int performance_mode_acpi_set(struct samsung_galaxybook *galaxybook,
+ const u8 performance_mode)
+{
+ struct sawb buf = {};
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_SET;
+ buf.iob0 = performance_mode;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+}
+
+static int get_performance_mode_profile(struct samsung_galaxybook *galaxybook,
+ const u8 performance_mode,
+ enum platform_profile_option *profile)
+{
+ switch (performance_mode) {
+ case GB_PERFORMANCE_MODE_FANOFF:
+ *profile = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ case GB_PERFORMANCE_MODE_LOWNOISE:
+ *profile = PLATFORM_PROFILE_QUIET;
+ break;
+ case GB_PERFORMANCE_MODE_OPTIMIZED:
+ case GB_PERFORMANCE_MODE_OPTIMIZED_V2:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case GB_PERFORMANCE_MODE_PERFORMANCE:
+ case GB_PERFORMANCE_MODE_PERFORMANCE_V2:
+ case GB_PERFORMANCE_MODE_ULTRA:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case GB_PERFORMANCE_MODE_IGNORE1:
+ case GB_PERFORMANCE_MODE_IGNORE2:
+ return -EOPNOTSUPP;
+ default:
+ dev_warn(&galaxybook->platform->dev,
+ "unrecognized performance mode 0x%x\n", performance_mode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int galaxybook_platform_profile_get(struct device *dev,
+ enum platform_profile_option *profile)
+{
+ struct samsung_galaxybook *galaxybook = dev_get_drvdata(dev);
+ u8 performance_mode;
+ int err;
+
+ err = performance_mode_acpi_get(galaxybook, &performance_mode);
+ if (err)
+ return err;
+
+ return get_performance_mode_profile(galaxybook, performance_mode, profile);
+}
+
+static int galaxybook_platform_profile_set(struct device *dev,
+ enum platform_profile_option profile)
+{
+ struct samsung_galaxybook *galaxybook = dev_get_drvdata(dev);
+
+ return performance_mode_acpi_set(galaxybook,
+ galaxybook->profile_performance_modes[profile]);
+}
+
+static int galaxybook_platform_profile_probe(void *drvdata, unsigned long *choices)
+{
+ struct samsung_galaxybook *galaxybook = drvdata;
+ u8 *perfmodes = galaxybook->profile_performance_modes;
+ enum platform_profile_option profile;
+ struct sawb buf = {};
+ unsigned int i;
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_PERFORMANCE_MODE;
+ export_guid(buf.caid, &GB_PERFORMANCE_MODE_GUID);
+ buf.fncn = GB_FNCN_PERFORMANCE_MODE;
+ buf.subn = GB_SUBN_PERFORMANCE_MODE_LIST;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_PERFORMANCE_MODE,
+ &buf, GB_SAWB_LEN_PERFORMANCE_MODE);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get supported performance modes, error %d\n", err);
+ return err;
+ }
+
+ /* set initial default profile performance mode values */
+ perfmodes[PLATFORM_PROFILE_LOW_POWER] = GB_PERFORMANCE_MODE_FANOFF;
+ perfmodes[PLATFORM_PROFILE_QUIET] = GB_PERFORMANCE_MODE_LOWNOISE;
+ perfmodes[PLATFORM_PROFILE_BALANCED] = GB_PERFORMANCE_MODE_OPTIMIZED;
+ perfmodes[PLATFORM_PROFILE_PERFORMANCE] = GB_PERFORMANCE_MODE_PERFORMANCE;
+
+ /*
+ * Value returned in iob0 will have the number of supported performance
+ * modes per device. The performance mode values will then be given as a
+ * list after this (iob1-iobX). Loop through the supported values and
+ * enable their mapped platform_profile choice, overriding "legacy"
+ * values along the way if a non-legacy value exists.
+ */
+ for (i = 1; i <= buf.iob0; i++) {
+ err = get_performance_mode_profile(galaxybook, buf.iobs[i], &profile);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "ignoring unmapped performance mode 0x%x\n", buf.iobs[i]);
+ continue;
+ }
+ switch (buf.iobs[i]) {
+ case GB_PERFORMANCE_MODE_OPTIMIZED_V2:
+ perfmodes[profile] = GB_PERFORMANCE_MODE_OPTIMIZED_V2;
+ break;
+ case GB_PERFORMANCE_MODE_PERFORMANCE_V2:
+ /* only update if not already overwritten by Ultra */
+ if (perfmodes[profile] != GB_PERFORMANCE_MODE_ULTRA)
+ perfmodes[profile] = GB_PERFORMANCE_MODE_PERFORMANCE_V2;
+ break;
+ case GB_PERFORMANCE_MODE_ULTRA:
+ perfmodes[profile] = GB_PERFORMANCE_MODE_ULTRA;
+ break;
+ default:
+ break;
+ }
+ set_bit(profile, choices);
+ dev_dbg(&galaxybook->platform->dev,
+ "setting platform profile %d to use performance mode 0x%x\n",
+ profile, perfmodes[profile]);
+ }
+
+ /* initialize performance_mode using balanced's mapped value */
+ if (test_bit(PLATFORM_PROFILE_BALANCED, choices))
+ return performance_mode_acpi_set(galaxybook, perfmodes[PLATFORM_PROFILE_BALANCED]);
+
+ return 0;
+}
+
+static const struct platform_profile_ops galaxybook_platform_profile_ops = {
+ .probe = galaxybook_platform_profile_probe,
+ .profile_get = galaxybook_platform_profile_get,
+ .profile_set = galaxybook_platform_profile_set,
+};
+
+static int galaxybook_platform_profile_init(struct samsung_galaxybook *galaxybook)
+{
+ struct device *platform_profile_dev;
+ u8 performance_mode;
+ int err;
+
+ err = performance_mode_acpi_get(galaxybook, &performance_mode);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial performance mode, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ platform_profile_dev = devm_platform_profile_register(&galaxybook->platform->dev,
+ DRIVER_NAME, galaxybook,
+ &galaxybook_platform_profile_ops);
+
+ return PTR_ERR_OR_ZERO(platform_profile_dev);
+}
+
+/*
+ * Firmware Attributes
+ */
+
+/* Power on lid open (device should power on when lid is opened) */
+
+static int power_on_lid_open_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_POWER_ON_LID_OPEN;
+ buf.guds[1] = GB_GUDS_POWER_ON_LID_OPEN_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.guds[1];
+
+ return 0;
+}
+
+static int power_on_lid_open_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_POWER_MANAGEMENT;
+ buf.gunm = GB_GUNM_POWER_MANAGEMENT;
+ buf.guds[0] = GB_GUDS_POWER_ON_LID_OPEN;
+ buf.guds[1] = GB_GUDS_POWER_ON_LID_OPEN_SET;
+ buf.guds[2] = value ? 1 : 0;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+/* USB Charging (USB ports can provide power when device is powered off) */
+
+static int usb_charging_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_USB_CHARGING_GET;
+ buf.gunm = GB_GUNM_USB_CHARGING_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.gunm == 1;
+
+ return 0;
+}
+
+static int usb_charging_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_USB_CHARGING_SET;
+ buf.gunm = value ? GB_GUNM_USB_CHARGING_ON : GB_GUNM_USB_CHARGING_OFF;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+/* Block recording (blocks access to camera and microphone) */
+
+static int block_recording_acpi_get(struct samsung_galaxybook *galaxybook, bool *value)
+{
+ struct sawb buf = {};
+ int err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_BLOCK_RECORDING;
+ buf.gunm = GB_GUNM_GET;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ *value = buf.gunm == GB_BLOCK_RECORDING_ON;
+
+ return 0;
+}
+
+static int block_recording_acpi_set(struct samsung_galaxybook *galaxybook, const bool value)
+{
+ struct sawb buf = {};
+ int err;
+
+ lockdep_assert_held(&galaxybook->fw_attr_lock);
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_BLOCK_RECORDING;
+ buf.gunm = GB_GUNM_SET;
+ buf.guds[0] = value ? GB_BLOCK_RECORDING_ON : GB_BLOCK_RECORDING_OFF;
+
+ err = galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+ if (err)
+ return err;
+
+ input_report_switch(galaxybook->camera_lens_cover_switch,
+ SW_CAMERA_LENS_COVER, value ? 1 : 0);
+ input_sync(galaxybook->camera_lens_cover_switch);
+
+ return 0;
+}
+
+static int galaxybook_block_recording_init(struct samsung_galaxybook *galaxybook)
+{
+ bool value;
+ int err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_BLOCK_RECORDING);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to initialize block_recording, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = block_recording_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to get initial block_recording state, error %d\n", err);
+ return GB_NOT_SUPPORTED;
+ }
+
+ galaxybook->camera_lens_cover_switch =
+ devm_input_allocate_device(&galaxybook->platform->dev);
+ if (!galaxybook->camera_lens_cover_switch)
+ return -ENOMEM;
+
+ galaxybook->camera_lens_cover_switch->name = "Samsung Galaxy Book Camera Lens Cover";
+ galaxybook->camera_lens_cover_switch->phys = DRIVER_NAME "/input0";
+ galaxybook->camera_lens_cover_switch->id.bustype = BUS_HOST;
+
+ input_set_capability(galaxybook->camera_lens_cover_switch, EV_SW, SW_CAMERA_LENS_COVER);
+
+ err = input_register_device(galaxybook->camera_lens_cover_switch);
+ if (err)
+ return err;
+
+ input_report_switch(galaxybook->camera_lens_cover_switch,
+ SW_CAMERA_LENS_COVER, value ? 1 : 0);
+ input_sync(galaxybook->camera_lens_cover_switch);
+
+ return 0;
+}
+
+/* Firmware Attributes setup */
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "enumeration\n");
+}
+
+static struct kobj_attribute fw_attr_type = __ATTR_RO(type);
+
+static ssize_t default_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "0\n");
+}
+
+static struct kobj_attribute fw_attr_default_value = __ATTR_RO(default_value);
+
+static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "0;1\n");
+}
+
+static struct kobj_attribute fw_attr_possible_values = __ATTR_RO(possible_values);
+
+static ssize_t display_name_language_code_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", GB_ATTR_LANGUAGE_CODE);
+}
+
+static struct kobj_attribute fw_attr_display_name_language_code =
+ __ATTR_RO(display_name_language_code);
+
+static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, display_name);
+
+ return sysfs_emit(buf, "%s\n", galaxybook_fw_attr_desc[fw_attr->fw_attr_id]);
+}
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, current_value);
+ bool value;
+ int err;
+
+ err = fw_attr->get_value(fw_attr->galaxybook, &value);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", value);
+}
+
+static ssize_t current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct galaxybook_fw_attr *fw_attr =
+ container_of(attr, struct galaxybook_fw_attr, current_value);
+ struct samsung_galaxybook *galaxybook = fw_attr->galaxybook;
+ bool value;
+ int err;
+
+ if (!count)
+ return -EINVAL;
+
+ err = kstrtobool(buf, &value);
+ if (err)
+ return err;
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = fw_attr->set_value(galaxybook, value);
+ if (err)
+ return err;
+
+ return count;
+}
+
+#define NUM_FW_ATTR_ENUM_ATTRS 6
+
+static int galaxybook_fw_attr_init(struct samsung_galaxybook *galaxybook,
+ const enum galaxybook_fw_attr_id fw_attr_id,
+ int (*get_value)(struct samsung_galaxybook *galaxybook,
+ bool *value),
+ int (*set_value)(struct samsung_galaxybook *galaxybook,
+ const bool value))
+{
+ struct galaxybook_fw_attr *fw_attr;
+ struct attribute **attrs;
+
+ fw_attr = devm_kzalloc(&galaxybook->platform->dev, sizeof(*fw_attr), GFP_KERNEL);
+ if (!fw_attr)
+ return -ENOMEM;
+
+ attrs = devm_kcalloc(&galaxybook->platform->dev, NUM_FW_ATTR_ENUM_ATTRS + 1,
+ sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ attrs[0] = &fw_attr_type.attr;
+ attrs[1] = &fw_attr_default_value.attr;
+ attrs[2] = &fw_attr_possible_values.attr;
+ attrs[3] = &fw_attr_display_name_language_code.attr;
+
+ sysfs_attr_init(&fw_attr->display_name.attr);
+ fw_attr->display_name.attr.name = "display_name";
+ fw_attr->display_name.attr.mode = 0444;
+ fw_attr->display_name.show = display_name_show;
+ attrs[4] = &fw_attr->display_name.attr;
+
+ sysfs_attr_init(&fw_attr->current_value.attr);
+ fw_attr->current_value.attr.name = "current_value";
+ fw_attr->current_value.attr.mode = 0644;
+ fw_attr->current_value.show = current_value_show;
+ fw_attr->current_value.store = current_value_store;
+ attrs[5] = &fw_attr->current_value.attr;
+
+ attrs[6] = NULL;
+
+ fw_attr->galaxybook = galaxybook;
+ fw_attr->fw_attr_id = fw_attr_id;
+ fw_attr->attr_group.name = galaxybook_fw_attr_name[fw_attr_id];
+ fw_attr->attr_group.attrs = attrs;
+ fw_attr->get_value = get_value;
+ fw_attr->set_value = set_value;
+
+ return sysfs_create_group(&galaxybook->fw_attrs_kset->kobj, &fw_attr->attr_group);
+}
+
+static void galaxybook_kset_unregister(void *data)
+{
+ struct kset *kset = data;
+
+ kset_unregister(kset);
+}
+
+static void galaxybook_fw_attrs_dev_unregister(void *data)
+{
+ struct device *fw_attrs_dev = data;
+
+ device_unregister(fw_attrs_dev);
+}
+
+static int galaxybook_fw_attrs_init(struct samsung_galaxybook *galaxybook)
+{
+ bool value;
+ int err;
+
+ err = devm_mutex_init(&galaxybook->platform->dev, &galaxybook->fw_attr_lock);
+ if (err)
+ return err;
+
+ galaxybook->fw_attrs_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(galaxybook->fw_attrs_dev))
+ return PTR_ERR(galaxybook->fw_attrs_dev);
+
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_fw_attrs_dev_unregister,
+ galaxybook->fw_attrs_dev);
+ if (err)
+ return err;
+
+ galaxybook->fw_attrs_kset = kset_create_and_add("attributes", NULL,
+ &galaxybook->fw_attrs_dev->kobj);
+ if (!galaxybook->fw_attrs_kset)
+ return -ENOMEM;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_kset_unregister, galaxybook->fw_attrs_kset);
+ if (err)
+ return err;
+
+ err = power_on_lid_open_acpi_get(galaxybook, &value);
+ if (!err) {
+ err = galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_POWER_ON_LID_OPEN,
+ &power_on_lid_open_acpi_get,
+ &power_on_lid_open_acpi_set);
+ if (err)
+ return err;
+ }
+
+ err = usb_charging_acpi_get(galaxybook, &value);
+ if (!err) {
+ err = galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_USB_CHARGING,
+ &usb_charging_acpi_get,
+ &usb_charging_acpi_set);
+ if (err)
+ return err;
+ }
+
+ err = galaxybook_block_recording_init(galaxybook);
+ if (err == GB_NOT_SUPPORTED)
+ return 0;
+ else if (err)
+ return err;
+
+ galaxybook->has_block_recording = true;
+
+ return galaxybook_fw_attr_init(galaxybook,
+ GB_ATTR_BLOCK_RECORDING,
+ &block_recording_acpi_get,
+ &block_recording_acpi_set);
+}
+
+/*
+ * Hotkeys and notifications
+ */
+
+static void galaxybook_kbd_backlight_hotkey_work(struct work_struct *work)
+{
+ struct samsung_galaxybook *galaxybook =
+ from_work(galaxybook, work, kbd_backlight_hotkey_work);
+ int brightness;
+ int err;
+
+ guard(mutex)(&galaxybook->kbd_backlight_lock);
+
+ brightness = galaxybook->kbd_backlight.brightness;
+ if (brightness < galaxybook->kbd_backlight.max_brightness)
+ brightness++;
+ else
+ brightness = 0;
+
+ err = led_set_brightness_sync(&galaxybook->kbd_backlight, brightness);
+ if (err) {
+ dev_err(&galaxybook->platform->dev,
+ "failed to set kbd_backlight brightness, error %d\n", err);
+ return;
+ }
+
+ led_classdev_notify_brightness_hw_changed(&galaxybook->kbd_backlight, brightness);
+}
+
+static void galaxybook_block_recording_hotkey_work(struct work_struct *work)
+{
+ struct samsung_galaxybook *galaxybook =
+ from_work(galaxybook, work, block_recording_hotkey_work);
+ bool value;
+ int err;
+
+ guard(mutex)(&galaxybook->fw_attr_lock);
+
+ err = block_recording_acpi_get(galaxybook, &value);
+ if (err) {
+ dev_err(&galaxybook->platform->dev,
+ "failed to get block_recording, error %d\n", err);
+ return;
+ }
+
+ err = block_recording_acpi_set(galaxybook, !value);
+ if (err)
+ dev_err(&galaxybook->platform->dev,
+ "failed to set block_recording, error %d\n", err);
+}
+
+static bool galaxybook_i8042_filter(unsigned char data, unsigned char str, struct serio *port,
+ void *context)
+{
+ struct samsung_galaxybook *galaxybook = context;
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (extended) {
+ extended = false;
+ switch (data) {
+ case GB_KEY_KBD_BACKLIGHT_KEYDOWN:
+ return true;
+ case GB_KEY_KBD_BACKLIGHT_KEYUP:
+ if (galaxybook->has_kbd_backlight)
+ schedule_work(&galaxybook->kbd_backlight_hotkey_work);
+ return true;
+
+ case GB_KEY_BLOCK_RECORDING_KEYDOWN:
+ return true;
+ case GB_KEY_BLOCK_RECORDING_KEYUP:
+ if (galaxybook->has_block_recording)
+ schedule_work(&galaxybook->block_recording_hotkey_work);
+ return true;
+
+ /* battery notification already sent to battery + SCAI device */
+ case GB_KEY_BATTERY_NOTIFY_KEYUP:
+ case GB_KEY_BATTERY_NOTIFY_KEYDOWN:
+ return true;
+
+ default:
+ /*
+ * Report the previously filtered e0 before continuing
+ * with the next non-filtered byte.
+ */
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static void galaxybook_i8042_filter_remove(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ i8042_remove_filter(galaxybook_i8042_filter);
+ cancel_work_sync(&galaxybook->kbd_backlight_hotkey_work);
+ cancel_work_sync(&galaxybook->block_recording_hotkey_work);
+}
+
+static int galaxybook_i8042_filter_install(struct samsung_galaxybook *galaxybook)
+{
+ int err;
+
+ if (!galaxybook->has_kbd_backlight && !galaxybook->has_block_recording)
+ return 0;
+
+ INIT_WORK(&galaxybook->kbd_backlight_hotkey_work,
+ galaxybook_kbd_backlight_hotkey_work);
+ INIT_WORK(&galaxybook->block_recording_hotkey_work,
+ galaxybook_block_recording_hotkey_work);
+
+ err = i8042_install_filter(galaxybook_i8042_filter, galaxybook);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_i8042_filter_remove, galaxybook);
+}
+
+/*
+ * ACPI device setup
+ */
+
+static void galaxybook_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ switch (event) {
+ case GB_ACPI_NOTIFY_BATTERY_STATE_CHANGED:
+ case GB_ACPI_NOTIFY_DEVICE_ON_TABLE:
+ case GB_ACPI_NOTIFY_DEVICE_OFF_TABLE:
+ break;
+ case GB_ACPI_NOTIFY_HOTKEY_PERFORMANCE_MODE:
+ if (galaxybook->has_performance_mode)
+ platform_profile_cycle();
+ break;
+ default:
+ dev_warn(&galaxybook->platform->dev,
+ "unknown ACPI notification event: 0x%x\n", event);
+ }
+
+ acpi_bus_generate_netlink_event(DRIVER_NAME, dev_name(&galaxybook->platform->dev),
+ event, 1);
+}
+
+static int galaxybook_enable_acpi_notify(struct samsung_galaxybook *galaxybook)
+{
+ struct sawb buf = {};
+ int err;
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_NOTIFICATIONS);
+ if (err)
+ return err;
+
+ buf.safn = GB_SAFN;
+ buf.sasb = GB_SASB_NOTIFICATIONS;
+ buf.gunm = GB_GUNM_ACPI_NOTIFY_ENABLE;
+ buf.guds[0] = GB_GUDS_ACPI_NOTIFY_ENABLE;
+
+ return galaxybook_acpi_method(galaxybook, GB_ACPI_METHOD_SETTINGS,
+ &buf, GB_SAWB_LEN_SETTINGS);
+}
+
+static void galaxybook_acpi_remove_notify_handler(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ acpi_remove_notify_handler(galaxybook->acpi->handle, ACPI_ALL_NOTIFY,
+ galaxybook_acpi_notify);
+}
+
+static void galaxybook_acpi_disable(void *data)
+{
+ struct samsung_galaxybook *galaxybook = data;
+
+ acpi_execute_simple_method(galaxybook->acpi->handle,
+ GB_ACPI_METHOD_ENABLE, GB_ACPI_METHOD_ENABLE_OFF);
+}
+
+static int galaxybook_acpi_init(struct samsung_galaxybook *galaxybook)
+{
+ acpi_status status;
+ int err;
+
+ status = acpi_execute_simple_method(galaxybook->acpi->handle, GB_ACPI_METHOD_ENABLE,
+ GB_ACPI_METHOD_ENABLE_ON);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_acpi_disable, galaxybook);
+ if (err)
+ return err;
+
+ status = acpi_install_notify_handler(galaxybook->acpi->handle, ACPI_ALL_NOTIFY,
+ galaxybook_acpi_notify, galaxybook);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ err = devm_add_action_or_reset(&galaxybook->platform->dev,
+ galaxybook_acpi_remove_notify_handler, galaxybook);
+ if (err)
+ return err;
+
+ err = galaxybook_enable_acpi_notify(galaxybook);
+ if (err)
+ dev_dbg(&galaxybook->platform->dev, "failed to enable ACPI notifications; "
+ "some hotkeys will not be supported\n");
+
+ err = galaxybook_enable_acpi_feature(galaxybook, GB_SASB_POWER_MANAGEMENT);
+ if (err)
+ dev_dbg(&galaxybook->platform->dev,
+ "failed to initialize ACPI power management features; "
+ "many features of this driver will not be available\n");
+
+ return 0;
+}
+
+/*
+ * Platform driver
+ */
+
+static int galaxybook_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct samsung_galaxybook *galaxybook;
+ int err;
+
+ if (!adev)
+ return -ENODEV;
+
+ galaxybook = devm_kzalloc(&pdev->dev, sizeof(*galaxybook), GFP_KERNEL);
+ if (!galaxybook)
+ return -ENOMEM;
+
+ galaxybook->platform = pdev;
+ galaxybook->acpi = adev;
+
+ /*
+ * Features must be enabled and initialized in the following order to
+ * avoid failures seen on certain devices:
+ * - GB_SASB_POWER_MANAGEMENT (including performance mode)
+ * - GB_SASB_KBD_BACKLIGHT
+ * - GB_SASB_BLOCK_RECORDING (as part of fw_attrs init)
+ */
+
+ err = galaxybook_acpi_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize ACPI device\n");
+
+ err = galaxybook_platform_profile_init(galaxybook);
+ if (!err)
+ galaxybook->has_performance_mode = true;
+ else if (err != GB_NOT_SUPPORTED)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize platform profile\n");
+
+ err = galaxybook_battery_threshold_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize battery threshold\n");
+
+ err = galaxybook_kbd_backlight_init(galaxybook);
+ if (!err)
+ galaxybook->has_kbd_backlight = true;
+ else if (err != GB_NOT_SUPPORTED)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize kbd_backlight\n");
+
+ err = galaxybook_fw_attrs_init(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize firmware-attributes\n");
+
+ err = galaxybook_i8042_filter_install(galaxybook);
+ if (err)
+ return dev_err_probe(&galaxybook->platform->dev, err,
+ "failed to initialize i8042_filter\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id galaxybook_device_ids[] = {
+ { "SAM0427" },
+ { "SAM0428" },
+ { "SAM0429" },
+ { "SAM0430" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, galaxybook_device_ids);
+
+static struct platform_driver galaxybook_platform_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .acpi_match_table = galaxybook_device_ids,
+ },
+ .probe = galaxybook_probe,
+};
+module_platform_driver(galaxybook_platform_driver);
+
+MODULE_AUTHOR("Joshua Grisham <josh@joshuagrisham.com>");
+MODULE_DESCRIPTION("Samsung Galaxy Book driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 323316ac6783..0fc275e461be 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -262,16 +262,11 @@ static int tlmi_simple_call(const char *guid, const char *arg)
return 0;
}
-/* Extract output string from WMI return buffer */
-static int tlmi_extract_output_string(const struct acpi_buffer *output,
- char **string)
+/* Extract output string from WMI return value */
+static int tlmi_extract_output_string(union acpi_object *obj, char **string)
{
- const union acpi_object *obj;
char *s;
- obj = output->pointer;
- if (!obj)
- return -ENOMEM;
if (obj->type != ACPI_TYPE_STRING || !obj->string.pointer)
return -EIO;
@@ -349,20 +344,18 @@ static int tlmi_opcode_setting(char *setting, const char *value)
return ret;
}
-static int tlmi_setting(int item, char **value, const char *guid_string)
+static int tlmi_setting(struct wmi_device *wdev, int item, char **value)
{
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
+ union acpi_object *obj;
int ret;
- status = wmi_query_block(guid_string, item, &output);
- if (ACPI_FAILURE(status)) {
- kfree(output.pointer);
+ obj = wmidev_block_query(wdev, item);
+ if (!obj)
return -EIO;
- }
- ret = tlmi_extract_output_string(&output, value);
- kfree(output.pointer);
+ ret = tlmi_extract_output_string(obj, value);
+ kfree(obj);
+
return ret;
}
@@ -370,19 +363,22 @@ static int tlmi_get_bios_selections(const char *item, char **value)
{
const struct acpi_buffer input = { strlen(item), (char *)item };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
acpi_status status;
int ret;
status = wmi_evaluate_method(LENOVO_GET_BIOS_SELECTIONS_GUID,
0, 0, &input, &output);
-
- if (ACPI_FAILURE(status)) {
- kfree(output.pointer);
+ if (ACPI_FAILURE(status))
return -EIO;
- }
- ret = tlmi_extract_output_string(&output, value);
- kfree(output.pointer);
+ obj = output.pointer;
+ if (!obj)
+ return -ENODATA;
+
+ ret = tlmi_extract_output_string(obj, value);
+ kfree(obj);
+
return ret;
}
@@ -993,7 +989,7 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
char *item, *value;
int ret;
- ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+ ret = tlmi_setting(setting->wdev, setting->index, &item);
if (ret)
return ret;
@@ -1586,7 +1582,7 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
return new_pwd;
}
-static int tlmi_analyze(void)
+static int tlmi_analyze(struct wmi_device *wdev)
{
int i, ret;
@@ -1623,7 +1619,7 @@ static int tlmi_analyze(void)
char *item = NULL;
tlmi_priv.setting[i] = NULL;
- ret = tlmi_setting(i, &item, LENOVO_BIOS_SETTING_GUID);
+ ret = tlmi_setting(wdev, i, &item);
if (ret)
break;
if (!item)
@@ -1646,6 +1642,7 @@ static int tlmi_analyze(void)
kfree(item);
goto fail_clear_attr;
}
+ setting->wdev = wdev;
setting->index = i;
strscpy(setting->display_name, item);
/* If BIOS selections supported, load those */
@@ -1664,7 +1661,7 @@ static int tlmi_analyze(void)
*/
char *optitem, *optstart, *optend;
- if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
+ if (!tlmi_setting(setting->wdev, setting->index, &optitem)) {
optstart = strstr(optitem, "[Optional:");
if (optstart) {
optstart += strlen("[Optional:");
@@ -1789,7 +1786,7 @@ static int tlmi_probe(struct wmi_device *wdev, const void *context)
{
int ret;
- ret = tlmi_analyze();
+ ret = tlmi_analyze(wdev);
if (ret)
return ret;
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index f267d8b46957..a80452482227 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -4,6 +4,7 @@
#define _THINK_LMI_H_
#include <linux/types.h>
+#include <linux/wmi.h>
#define TLMI_SETTINGS_COUNT 256
#define TLMI_SETTINGS_MAXLEN 512
@@ -87,6 +88,7 @@ struct tlmi_pwd_setting {
/* Attribute setting details */
struct tlmi_attr_setting {
struct kobject kobj;
+ struct wmi_device *wdev;
int index;
char display_name[TLMI_SETTINGS_MAXLEN];
char *possible_values;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 72a10ed2017c..0384cf311878 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -39,7 +39,6 @@
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dmi.h>
-#include <linux/fb.h>
#include <linux/freezer.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -368,9 +367,6 @@ static struct {
u32 beep_needs_two_args:1;
u32 mixer_no_level_control:1;
u32 battery_force_primary:1;
- u32 input_device_registered:1;
- u32 platform_drv_registered:1;
- u32 sensors_pdrv_registered:1;
u32 hotkey_poll_active:1;
u32 has_adaptive_kbd:1;
u32 kbd_lang:1;
@@ -8513,7 +8509,7 @@ static void fan_watchdog_reset(void)
if (fan_watchdog_maxinterval > 0 &&
tpacpi_lifecycle != TPACPI_LIFE_EXITING)
mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
- msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
+ secs_to_jiffies(fan_watchdog_maxinterval));
else
cancel_delayed_work(&fan_watchdog_task);
}
@@ -9972,6 +9968,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
* Individual addressing is broken on models that expose the
* primary battery as BAT1.
*/
+ TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */
TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
TPACPI_Q_LNV('J', '7', true), /* B5400 */
TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
@@ -11816,36 +11813,18 @@ MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
static void thinkpad_acpi_module_exit(void)
{
- struct ibm_struct *ibm, *itmp;
-
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
- if (tpacpi_hwmon)
- hwmon_device_unregister(tpacpi_hwmon);
- if (tp_features.sensors_pdrv_registered)
+ if (tpacpi_sensors_pdev) {
platform_driver_unregister(&tpacpi_hwmon_pdriver);
- if (tp_features.platform_drv_registered)
- platform_driver_unregister(&tpacpi_pdriver);
-
- list_for_each_entry_safe_reverse(ibm, itmp,
- &tpacpi_all_drivers,
- all_drivers) {
- ibm_exit(ibm);
+ platform_device_unregister(tpacpi_sensors_pdev);
}
- dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
-
- if (tpacpi_inputdev) {
- if (tp_features.input_device_registered)
- input_unregister_device(tpacpi_inputdev);
- else
- input_free_device(tpacpi_inputdev);
+ if (tpacpi_pdev) {
+ platform_driver_unregister(&tpacpi_pdriver);
+ platform_device_unregister(tpacpi_pdev);
}
- if (tpacpi_sensors_pdev)
- platform_device_unregister(tpacpi_sensors_pdev);
- if (tpacpi_pdev)
- platform_device_unregister(tpacpi_pdev);
if (proc_dir)
remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
if (tpacpi_wq)
@@ -11857,11 +11836,76 @@ static void thinkpad_acpi_module_exit(void)
kfree(thinkpad_id.nummodel_str);
}
+static void tpacpi_subdrivers_release(void *data)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe_reverse(ibm, itmp, &tpacpi_all_drivers, all_drivers)
+ ibm_exit(ibm);
+
+ dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+}
+
+static int __init tpacpi_pdriver_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_mutex_init(&pdev->dev, &tpacpi_inputdev_send_mutex);
+ if (ret)
+ return ret;
+
+ tpacpi_inputdev = devm_input_allocate_device(&pdev->dev);
+ if (!tpacpi_inputdev)
+ return -ENOMEM;
+
+ tpacpi_inputdev->name = "ThinkPad Extra Buttons";
+ tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
+ tpacpi_inputdev->id.bustype = BUS_HOST;
+ tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
+ tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
+ tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
+ tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
+
+ /* Init subdriver dependencies */
+ tpacpi_detect_brightness_capabilities();
+
+ /* Init subdrivers */
+ for (unsigned int i = 0; i < ARRAY_SIZE(ibms_init); i++) {
+ ret = ibm_init(&ibms_init[i]);
+ if (ret >= 0 && *ibms_init[i].param)
+ ret = ibms_init[i].data->write(ibms_init[i].param);
+ if (ret < 0) {
+ tpacpi_subdrivers_release(NULL);
+ return ret;
+ }
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, tpacpi_subdrivers_release, NULL);
+ if (ret)
+ return ret;
+
+ ret = input_register_device(tpacpi_inputdev);
+ if (ret < 0)
+ pr_err("unable to register input device\n");
+
+ return ret;
+}
+
+static int __init tpacpi_hwmon_pdriver_probe(struct platform_device *pdev)
+{
+ tpacpi_hwmon = devm_hwmon_device_register_with_groups(
+ &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
+
+ if (IS_ERR(tpacpi_hwmon))
+ pr_err("unable to register hwmon device\n");
+
+ return PTR_ERR_OR_ZERO(tpacpi_hwmon);
+}
static int __init thinkpad_acpi_module_init(void)
{
const struct dmi_system_id *dmi_id;
- int ret, i;
+ int ret;
acpi_object_type obj_type;
tpacpi_lifecycle = TPACPI_LIFE_INIT;
@@ -11921,93 +11965,29 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.quirks = dmi_id->driver_data;
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, PLATFORM_DEVID_NONE,
- NULL, 0);
+ tpacpi_pdev = platform_create_bundle(&tpacpi_pdriver, tpacpi_pdriver_probe,
+ NULL, 0, NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- pr_err("unable to register platform device\n");
+ pr_err("unable to register platform device/driver bundle\n");
thinkpad_acpi_module_exit();
return ret;
}
- tpacpi_sensors_pdev = platform_device_register_simple(
- TPACPI_HWMON_DRVR_NAME,
- PLATFORM_DEVID_NONE, NULL, 0);
+
+ tpacpi_sensors_pdev = platform_create_bundle(&tpacpi_hwmon_pdriver,
+ tpacpi_hwmon_pdriver_probe,
+ NULL, 0, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- pr_err("unable to register hwmon platform device\n");
+ pr_err("unable to register hwmon platform device/driver bundle\n");
thinkpad_acpi_module_exit();
return ret;
}
- mutex_init(&tpacpi_inputdev_send_mutex);
- tpacpi_inputdev = input_allocate_device();
- if (!tpacpi_inputdev) {
- thinkpad_acpi_module_exit();
- return -ENOMEM;
- } else {
- /* Prepare input device, but don't register */
- tpacpi_inputdev->name = "ThinkPad Extra Buttons";
- tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
- tpacpi_inputdev->id.bustype = BUS_HOST;
- tpacpi_inputdev->id.vendor = thinkpad_id.vendor;
- tpacpi_inputdev->id.product = TPACPI_HKEY_INPUT_PRODUCT;
- tpacpi_inputdev->id.version = TPACPI_HKEY_INPUT_VERSION;
- tpacpi_inputdev->dev.parent = &tpacpi_pdev->dev;
- }
-
- /* Init subdriver dependencies */
- tpacpi_detect_brightness_capabilities();
-
- /* Init subdrivers */
- for (i = 0; i < ARRAY_SIZE(ibms_init); i++) {
- ret = ibm_init(&ibms_init[i]);
- if (ret >= 0 && *ibms_init[i].param)
- ret = ibms_init[i].data->write(ibms_init[i].param);
- if (ret < 0) {
- thinkpad_acpi_module_exit();
- return ret;
- }
- }
-
tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
- ret = platform_driver_register(&tpacpi_pdriver);
- if (ret) {
- pr_err("unable to register main platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.platform_drv_registered = 1;
-
- ret = platform_driver_register(&tpacpi_hwmon_pdriver);
- if (ret) {
- pr_err("unable to register hwmon platform driver\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
- tp_features.sensors_pdrv_registered = 1;
-
- tpacpi_hwmon = hwmon_device_register_with_groups(
- &tpacpi_sensors_pdev->dev, TPACPI_NAME, NULL, tpacpi_hwmon_groups);
- if (IS_ERR(tpacpi_hwmon)) {
- ret = PTR_ERR(tpacpi_hwmon);
- tpacpi_hwmon = NULL;
- pr_err("unable to register hwmon device\n");
- thinkpad_acpi_module_exit();
- return ret;
- }
-
- ret = input_register_device(tpacpi_inputdev);
- if (ret < 0) {
- pr_err("unable to register input device\n");
- thinkpad_acpi_module_exit();
- return ret;
- } else {
- tp_features.input_device_registered = 1;
- }
-
return 0;
}
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 646370bd6b03..e46453750d5f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -123,24 +123,6 @@ static const void *find_guid_context(struct wmi_block *wblock,
return NULL;
}
-static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
-{
- struct guid_block *block;
- char method[5];
- acpi_status status;
- acpi_handle handle;
-
- block = &wblock->gblock;
- handle = wblock->acpi_device->handle;
-
- snprintf(method, 5, "WE%02X", block->notify_id);
- status = acpi_execute_simple_method(handle, method, enable);
- if (status == AE_NOT_FOUND)
- return AE_OK;
-
- return status;
-}
-
#define WMI_ACPI_METHOD_NAME_SIZE 5
static inline void get_acpi_method_name(const struct wmi_block *wblock,
@@ -184,6 +166,44 @@ static int wmidev_match_guid(struct device *dev, const void *data)
static const struct bus_type wmi_bus_type;
+static const struct device_type wmi_type_event;
+
+static const struct device_type wmi_type_method;
+
+static int wmi_device_enable(struct wmi_device *wdev, bool enable)
+{
+ struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!(wblock->gblock.flags & ACPI_WMI_EXPENSIVE))
+ return 0;
+
+ if (wblock->dev.dev.type == &wmi_type_method)
+ return 0;
+
+ if (wblock->dev.dev.type == &wmi_type_event)
+ snprintf(method, sizeof(method), "WE%02X", wblock->gblock.notify_id);
+ else
+ get_acpi_method_name(wblock, 'C', method);
+
+ /*
+ * Not all WMI devices marked as expensive actually implement the
+ * necessary ACPI method. Ignore this missing ACPI method to match
+ * the behaviour of the Windows driver.
+ */
+ status = acpi_get_handle(wblock->acpi_device->handle, method, &handle);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ status = acpi_execute_simple_method(handle, NULL, enable);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
{
struct device *dev;
@@ -337,10 +357,8 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
{
struct guid_block *block;
acpi_handle handle;
- acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
union acpi_object wq_params[1];
- char wc_method[WMI_ACPI_METHOD_NAME_SIZE];
char method[WMI_ACPI_METHOD_NAME_SIZE];
if (!out)
@@ -364,40 +382,9 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
if (instance == 0 && test_bit(WMI_READ_TAKES_NO_ARGS, &wblock->flags))
input.count = 0;
- /*
- * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
- * enable collection.
- */
- if (block->flags & ACPI_WMI_EXPENSIVE) {
- get_acpi_method_name(wblock, 'C', wc_method);
-
- /*
- * Some GUIDs break the specification by declaring themselves
- * expensive, but have no corresponding WCxx method. So we
- * should not fail if this happens.
- */
- wc_status = acpi_execute_simple_method(handle, wc_method, 1);
- }
-
get_acpi_method_name(wblock, 'Q', method);
- status = acpi_evaluate_object(handle, method, &input, out);
-
- /*
- * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
- * the WQxx method failed - we should disable collection anyway.
- */
- if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- /*
- * Ignore whether this WCxx call succeeds or not since
- * the previously executed WQxx method call might have
- * succeeded, and returning the failing status code
- * of this call would throw away the result of the WQxx
- * call, potentially leaking memory.
- */
- acpi_execute_simple_method(handle, wc_method, 0);
- }
- return status;
+ return acpi_evaluate_object(handle, method, &input, out);
}
/**
@@ -421,9 +408,15 @@ acpi_status wmi_query_block(const char *guid_string, u8 instance,
if (IS_ERR(wdev))
return AE_ERROR;
+ if (wmi_device_enable(wdev, true) < 0)
+ dev_warn(&wdev->dev, "Failed to enable device\n");
+
wblock = container_of(wdev, struct wmi_block, dev);
status = __query_block(wblock, instance, out);
+ if (wmi_device_enable(wdev, false) < 0)
+ dev_warn(&wdev->dev, "Failed to disable device\n");
+
wmi_device_put(wdev);
return status;
@@ -470,7 +463,14 @@ acpi_status wmi_set_block(const char *guid_string, u8 instance, const struct acp
if (IS_ERR(wdev))
return AE_ERROR;
+ if (wmi_device_enable(wdev, true) < 0)
+ dev_warn(&wdev->dev, "Failed to enable device\n");
+
status = wmidev_block_set(wdev, instance, in);
+
+ if (wmi_device_enable(wdev, false) < 0)
+ dev_warn(&wdev->dev, "Failed to disable device\n");
+
wmi_device_put(wdev);
return status;
@@ -551,7 +551,7 @@ acpi_status wmi_install_notify_handler(const char *guid,
wblock->handler = handler;
wblock->handler_data = data;
- if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ if (wmi_device_enable(wdev, true) < 0)
dev_warn(&wblock->dev.dev, "Failed to enable device\n");
status = AE_OK;
@@ -588,7 +588,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
if (!wblock->handler) {
status = AE_NULL_ENTRY;
} else {
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ if (wmi_device_enable(wdev, false) < 0)
dev_warn(&wblock->dev.dev, "Failed to disable device\n");
wblock->handler = NULL;
@@ -821,11 +821,19 @@ static int wmi_dev_match(struct device *dev, const struct device_driver *driver)
return 0;
}
+static void wmi_dev_disable(void *data)
+{
+ struct device *dev = data;
+
+ if (wmi_device_enable(to_wmi_device(dev), false) < 0)
+ dev_warn(dev, "Failed to disable device\n");
+}
+
static int wmi_dev_probe(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = to_wmi_driver(dev->driver);
- int ret = 0;
+ int ret;
/* Some older WMI drivers will break if instantiated multiple times,
* so they are blocked from probing WMI devices with a duplicated GUID.
@@ -844,18 +852,22 @@ static int wmi_dev_probe(struct device *dev)
return -ENODEV;
}
- if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+ if (wmi_device_enable(to_wmi_device(dev), true) < 0)
dev_warn(dev, "failed to enable device -- probing anyway\n");
+ /*
+ * We have to make sure that all devres-managed resources are released first because
+ * some might still want to access the underlying WMI device.
+ */
+ ret = devm_add_action_or_reset(dev, wmi_dev_disable, dev);
+ if (ret < 0)
+ return ret;
+
if (wdriver->probe) {
ret = wdriver->probe(to_wmi_device(dev),
find_guid_context(wblock, wdriver));
- if (ret) {
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
- dev_warn(dev, "Failed to disable device\n");
-
+ if (ret)
return ret;
- }
}
down_write(&wblock->notify_lock);
@@ -876,9 +888,6 @@ static void wmi_dev_remove(struct device *dev)
if (wdriver->remove)
wdriver->remove(to_wmi_device(dev));
-
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
- dev_warn(dev, "failed to disable device\n");
}
static void wmi_dev_shutdown(struct device *dev)
@@ -902,7 +911,11 @@ static void wmi_dev_shutdown(struct device *dev)
if (wdriver->shutdown)
wdriver->shutdown(to_wmi_device(dev));
- if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+ /*
+ * We still need to disable the WMI device here since devres-managed resources
+ * like wmi_dev_disable() will not be release during shutdown.
+ */
+ if (wmi_device_enable(to_wmi_device(dev), false) < 0)
dev_warn(dev, "Failed to disable device\n");
}
}
diff --git a/drivers/platform/x86/x86-android-tablets/Kconfig b/drivers/platform/x86/x86-android-tablets/Kconfig
index a67bddc43007..193da15ee01c 100644
--- a/drivers/platform/x86/x86-android-tablets/Kconfig
+++ b/drivers/platform/x86/x86-android-tablets/Kconfig
@@ -10,6 +10,7 @@ config X86_ANDROID_TABLETS
depends on ACPI && EFI && PCI
select NEW_LEDS
select LEDS_CLASS
+ select POWER_SUPPLY
help
X86 tablets which ship with Android as (part of) the factory image
typically have various problems with their DSDTs. The factory kernels
diff --git a/drivers/pmdomain/Kconfig b/drivers/pmdomain/Kconfig
index 23c64851a5b0..91f04ace35d4 100644
--- a/drivers/pmdomain/Kconfig
+++ b/drivers/pmdomain/Kconfig
@@ -16,6 +16,7 @@ source "drivers/pmdomain/st/Kconfig"
source "drivers/pmdomain/starfive/Kconfig"
source "drivers/pmdomain/sunxi/Kconfig"
source "drivers/pmdomain/tegra/Kconfig"
+source "drivers/pmdomain/thead/Kconfig"
source "drivers/pmdomain/ti/Kconfig"
source "drivers/pmdomain/xilinx/Kconfig"
diff --git a/drivers/pmdomain/Makefile b/drivers/pmdomain/Makefile
index a68ece2f4c68..7030f44a49df 100644
--- a/drivers/pmdomain/Makefile
+++ b/drivers/pmdomain/Makefile
@@ -14,6 +14,7 @@ obj-y += st/
obj-y += starfive/
obj-y += sunxi/
obj-y += tegra/
+obj-y += thead/
obj-y += ti/
obj-y += xilinx/
obj-y += core.o governor.o
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index 42ce41a2fe3a..ff76ea36835e 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_VI_CLK2, 0),
/* ETH is for ethernet online wakeup, and should be always on */
SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON),
- SEC_PD(T7_ISP, 0),
+ TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID),
SEC_PD(T7_MIPI_ISP, 0),
TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID),
TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID),
diff --git a/drivers/pmdomain/arm/scmi_pm_domain.c b/drivers/pmdomain/arm/scmi_pm_domain.c
index 86b531e15b85..2a213c218126 100644
--- a/drivers/pmdomain/arm/scmi_pm_domain.c
+++ b/drivers/pmdomain/arm/scmi_pm_domain.c
@@ -24,8 +24,7 @@ struct scmi_pm_domain {
static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
{
- int ret;
- u32 state, ret_state;
+ u32 state;
struct scmi_pm_domain *pd = to_scmi_pd(domain);
if (power_on)
@@ -33,13 +32,7 @@ static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on)
else
state = SCMI_POWER_STATE_GENERIC_OFF;
- ret = power_ops->state_set(pd->ph, pd->domain, state);
- if (!ret)
- ret = power_ops->state_get(pd->ph, pd->domain, &ret_state);
- if (!ret && state != ret_state)
- return -EIO;
-
- return ret;
+ return power_ops->state_set(pd->ph, pd->domain, state);
}
static int scmi_pd_power_on(struct generic_pm_domain *domain)
diff --git a/drivers/pmdomain/bcm/bcm2835-power.c b/drivers/pmdomain/bcm/bcm2835-power.c
index d2f0233cb620..d3cd816979ac 100644
--- a/drivers/pmdomain/bcm/bcm2835-power.c
+++ b/drivers/pmdomain/bcm/bcm2835-power.c
@@ -520,6 +520,7 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
}
dom->base.name = name;
+ dom->base.flags = GENPD_FLAG_ACTIVE_WAKEUP;
dom->base.power_on = bcm2835_power_pd_power_on;
dom->base.power_off = bcm2835_power_pd_power_off;
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 6c94137865c9..9b2f28b34bb5 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -697,6 +697,37 @@ bool dev_pm_genpd_get_hwmode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_get_hwmode);
+/**
+ * dev_pm_genpd_rpm_always_on() - Control if the PM domain can be powered off.
+ *
+ * @dev: Device for which the PM domain may need to stay on for.
+ * @on: Value to set or unset for the condition.
+ *
+ * For some usecases a consumer driver requires its device to remain power-on
+ * from the PM domain perspective during runtime. This function allows the
+ * behaviour to be dynamically controlled for a device attached to a genpd.
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Return: Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ genpd_lock(genpd);
+ dev_gpd_data(dev)->rpm_always_on = on;
+ genpd_unlock(genpd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_rpm_always_on);
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -868,6 +899,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
if (!pm_runtime_suspended(pdd->dev) ||
irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
not_suspended++;
+
+ /* The device may need its PM domain to stay powered on. */
+ if (to_gpd_data(pdd)->rpm_always_on)
+ return -EBUSY;
}
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
index 958d34d4821b..105fcaf13a34 100644
--- a/drivers/pmdomain/imx/gpcv2.c
+++ b/drivers/pmdomain/imx/gpcv2.c
@@ -1361,7 +1361,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_LOCKDEP) &&
- of_property_read_bool(domain->dev->of_node, "power-domains"))
+ of_property_present(domain->dev->of_node, "power-domains"))
lockdep_set_subclass(&domain->genpd.mlock, 1);
ret = of_genpd_add_provider_simple(domain->dev->of_node,
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index b99326917330..dce1a6d37e80 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -434,8 +434,6 @@ static int __init rcar_sysc_pd_init(void)
}
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
- if (!error)
- fwnode_dev_initialized(of_fwnode_handle(np), true);
out_put:
of_node_put(np);
diff --git a/drivers/pmdomain/rockchip/Kconfig b/drivers/pmdomain/rockchip/Kconfig
index b0d70f1a8439..218d43186e5b 100644
--- a/drivers/pmdomain/rockchip/Kconfig
+++ b/drivers/pmdomain/rockchip/Kconfig
@@ -4,6 +4,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
config ROCKCHIP_PM_DOMAINS
bool "Rockchip generic power domain"
depends on PM
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ depends on REGULATOR
select PM_GENERIC_DOMAINS
help
Say y here to enable power domain support.
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index cb0f93800138..03bcf79a461f 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -5,6 +5,7 @@
* Copyright (c) 2015 ROCKCHIP, Co. Ltd.
*/
+#include <linux/arm-smccc.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
@@ -18,8 +19,10 @@
#include <linux/of_clk.h>
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/mfd/syscon.h>
#include <soc/rockchip/pm_domains.h>
+#include <soc/rockchip/rockchip_sip.h>
#include <dt-bindings/power/px30-power.h>
#include <dt-bindings/power/rockchip,rv1126-power.h>
#include <dt-bindings/power/rk3036-power.h>
@@ -44,6 +47,7 @@ struct rockchip_domain_info {
int idle_mask;
int ack_mask;
bool active_wakeup;
+ bool need_regulator;
int pwr_w_mask;
int req_w_mask;
int clk_ungate_mask;
@@ -92,6 +96,8 @@ struct rockchip_pm_domain {
u32 *qos_save_regs[MAX_QOS_REGS_NUM];
int num_clks;
struct clk_bulk_data *clks;
+ struct device_node *node;
+ struct regulator *supply;
};
struct rockchip_pmu {
@@ -129,7 +135,7 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
-#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup) \
+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup, regulator) \
{ \
.name = _name, \
.pwr_offset = p_offset, \
@@ -145,6 +151,7 @@ struct rockchip_pmu {
.idle_mask = (idle), \
.ack_mask = (ack), \
.active_wakeup = wakeup, \
+ .need_regulator = regulator, \
}
#define DOMAIN_M_O_R_G(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, g_mask, wakeup) \
@@ -303,8 +310,8 @@ void rockchip_pmu_unblock(void)
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
-#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup) \
- DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup)
+#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup, regulator) \
+ DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup, regulator)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -533,16 +540,18 @@ error:
return ret;
}
-static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
- bool on)
+static int rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
u32 pd_pwr_offset = pd->info->pwr_offset;
bool is_on, is_mem_on = false;
+ struct arm_smccc_res res;
+ int ret;
if (pd->info->pwr_mask == 0)
- return;
+ return 0;
if (on && pd->info->mem_status_mask)
is_mem_on = rockchip_pmu_domain_is_mem_on(pd);
@@ -557,16 +566,28 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
wmb();
- if (is_mem_on && rockchip_pmu_domain_mem_reset(pd))
- return;
+ if (is_mem_on) {
+ ret = rockchip_pmu_domain_mem_reset(pd);
+ if (ret)
+ return ret;
+ }
- if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
- is_on == on, 0, 10000)) {
- dev_err(pmu->dev,
- "failed to set domain '%s', val=%d\n",
- genpd->name, is_on);
- return;
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev, "failed to set domain '%s' %s, val=%d\n",
+ genpd->name, on ? "on" : "off", is_on);
+ return ret;
}
+
+ /* Inform firmware to keep this pd on or off */
+ if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE)
+ arm_smccc_smc(ROCKCHIP_SIP_SUSPEND_MODE, ROCKCHIP_SLEEP_PD_CONFIG,
+ pmu->info->pwr_offset + pd_pwr_offset,
+ pd->info->pwr_mask, on, 0, 0, 0, &res);
+
+ return 0;
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -574,54 +595,99 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
struct rockchip_pmu *pmu = pd->pmu;
int ret;
- mutex_lock(&pmu->mutex);
+ guard(mutex)(&pmu->mutex);
- if (rockchip_pmu_domain_is_on(pd) != power_on) {
- ret = clk_bulk_enable(pd->num_clks, pd->clks);
- if (ret < 0) {
- dev_err(pmu->dev, "failed to enable clocks\n");
- mutex_unlock(&pmu->mutex);
- return ret;
- }
+ if (rockchip_pmu_domain_is_on(pd) == power_on)
+ return 0;
- rockchip_pmu_ungate_clk(pd, true);
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret < 0) {
+ dev_err(pmu->dev, "failed to enable clocks\n");
+ return ret;
+ }
- if (!power_on) {
- rockchip_pmu_save_qos(pd);
+ rockchip_pmu_ungate_clk(pd, true);
- /* if powering down, idle request to NIU first */
- rockchip_pmu_set_idle_request(pd, true);
- }
+ if (!power_on) {
+ rockchip_pmu_save_qos(pd);
- rockchip_do_pmu_set_power_domain(pd, power_on);
+ /* if powering down, idle request to NIU first */
+ ret = rockchip_pmu_set_idle_request(pd, true);
+ if (ret < 0)
+ goto out;
+ }
- if (power_on) {
- /* if powering up, leave idle mode */
- rockchip_pmu_set_idle_request(pd, false);
+ ret = rockchip_do_pmu_set_power_domain(pd, power_on);
+ if (ret < 0)
+ goto out;
- rockchip_pmu_restore_qos(pd);
- }
+ if (power_on) {
+ /* if powering up, leave idle mode */
+ ret = rockchip_pmu_set_idle_request(pd, false);
+ if (ret < 0)
+ goto out;
- rockchip_pmu_ungate_clk(pd, false);
- clk_bulk_disable(pd->num_clks, pd->clks);
+ rockchip_pmu_restore_qos(pd);
}
- mutex_unlock(&pmu->mutex);
- return 0;
+out:
+ rockchip_pmu_ungate_clk(pd, false);
+ clk_bulk_disable(pd->num_clks, pd->clks);
+
+ return ret;
+}
+
+static int rockchip_pd_regulator_disable(struct rockchip_pm_domain *pd)
+{
+ return IS_ERR_OR_NULL(pd->supply) ? 0 : regulator_disable(pd->supply);
+}
+
+static int rockchip_pd_regulator_enable(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+
+ if (!pd->info->need_regulator)
+ return 0;
+
+ if (IS_ERR_OR_NULL(pd->supply)) {
+ pd->supply = devm_of_regulator_get(pmu->dev, pd->node, "domain");
+
+ if (IS_ERR(pd->supply))
+ return PTR_ERR(pd->supply);
+ }
+
+ return regulator_enable(pd->supply);
}
static int rockchip_pd_power_on(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+ int ret;
+
+ ret = rockchip_pd_regulator_enable(pd);
+ if (ret) {
+ dev_err(pd->pmu->dev, "Failed to enable supply: %d\n", ret);
+ return ret;
+ }
- return rockchip_pd_power(pd, true);
+ ret = rockchip_pd_power(pd, true);
+ if (ret)
+ rockchip_pd_regulator_disable(pd);
+
+ return ret;
}
static int rockchip_pd_power_off(struct generic_pm_domain *domain)
{
struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
+ int ret;
- return rockchip_pd_power(pd, false);
+ ret = rockchip_pd_power(pd, false);
+ if (ret)
+ return ret;
+
+ rockchip_pd_regulator_disable(pd);
+ return ret;
}
static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
@@ -702,6 +768,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->info = pd_info;
pd->pmu = pmu;
+ pd->node = node;
pd->num_clks = of_clk_get_parent_count(node);
if (pd->num_clks > 0) {
@@ -1165,35 +1232,35 @@ static const struct rockchip_domain_info rk3576_pm_domains[] = {
};
static const struct rockchip_domain_info rk3588_pm_domains[] = {
- [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
- [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
- [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false),
- [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false),
- [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false),
- [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false),
- [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false),
- [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false),
- [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false),
- [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false),
- [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false),
- [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false),
- [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false),
- [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false),
- [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false),
- [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false),
- [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false),
- [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
- [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false),
- [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false),
- [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false),
- [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false),
- [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false),
- [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true),
- [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false),
- [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false),
- [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false),
- [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true),
- [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false),
+ [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false, true),
+ [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false, true),
+ [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false, false),
+ [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false, false),
+ [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false, false),
+ [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false, false),
+ [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false, false),
+ [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false, false),
+ [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false, false),
+ [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false, false),
+ [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false, false),
+ [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false, false),
+ [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false, false),
+ [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false, false),
+ [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false, false),
+ [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false, false),
+ [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false, false),
+ [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false, false),
+ [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false, false),
+ [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false, false),
+ [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false, false),
+ [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false, false),
+ [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false, false),
+ [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true, false),
+ [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false, false),
+ [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false, false),
+ [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false, false),
+ [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true, false),
+ [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false, false),
};
static const struct rockchip_pmu_info px30_pmu = {
diff --git a/drivers/pmdomain/sunxi/sun20i-ppu.c b/drivers/pmdomain/sunxi/sun20i-ppu.c
index 8700f9dd5f75..9f002748d224 100644
--- a/drivers/pmdomain/sunxi/sun20i-ppu.c
+++ b/drivers/pmdomain/sunxi/sun20i-ppu.c
@@ -182,11 +182,26 @@ static const struct sun20i_ppu_desc sun20i_d1_ppu_desc = {
.num_domains = ARRAY_SIZE(sun20i_d1_ppu_pd_names),
};
+static const char *const sun8i_v853_ppu_pd_names[] = {
+ "RISCV",
+ "NPU",
+ "VE",
+};
+
+static const struct sun20i_ppu_desc sun8i_v853_ppu_desc = {
+ .names = sun8i_v853_ppu_pd_names,
+ .num_domains = ARRAY_SIZE(sun8i_v853_ppu_pd_names),
+};
+
static const struct of_device_id sun20i_ppu_of_match[] = {
{
.compatible = "allwinner,sun20i-d1-ppu",
.data = &sun20i_d1_ppu_desc,
},
+ {
+ .compatible = "allwinner,sun8i-v853-ppu",
+ .data = &sun8i_v853_ppu_desc,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun20i_ppu_of_match);
diff --git a/drivers/pmdomain/thead/Kconfig b/drivers/pmdomain/thead/Kconfig
new file mode 100644
index 000000000000..7d52f8374b07
--- /dev/null
+++ b/drivers/pmdomain/thead/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config TH1520_PM_DOMAINS
+ tristate "Support TH1520 Power Domains"
+ depends on TH1520_AON_PROTOCOL
+ select REGMAP_MMIO
+ help
+ This driver enables power domain management for the T-HEAD
+ TH-1520 SoC. On this SoC there are number of power domains,
+ which can be managed independently. For example GPU, NPU,
+ and DPU reside in their own power domains which can be
+ turned on/off.
diff --git a/drivers/pmdomain/thead/Makefile b/drivers/pmdomain/thead/Makefile
new file mode 100644
index 000000000000..adfdf5479c68
--- /dev/null
+++ b/drivers/pmdomain/thead/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_TH1520_PM_DOMAINS) += th1520-pm-domains.o
diff --git a/drivers/pmdomain/thead/th1520-pm-domains.c b/drivers/pmdomain/thead/th1520-pm-domains.c
new file mode 100644
index 000000000000..f702e20306f4
--- /dev/null
+++ b/drivers/pmdomain/thead/th1520-pm-domains.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+
+#include <dt-bindings/power/thead,th1520-power.h>
+
+struct th1520_power_domain {
+ struct th1520_aon_chan *aon_chan;
+ struct generic_pm_domain genpd;
+ u32 rsrc;
+};
+
+struct th1520_power_info {
+ const char *name;
+ u32 rsrc;
+ bool disabled;
+};
+
+/*
+ * The AUDIO power domain is marked as disabled to prevent the driver from
+ * managing its power state. Direct AON firmware calls to control this power
+ * island trigger a firmware bug causing system instability. Until this
+ * firmware issue is resolved, the AUDIO power domain must remain disabled
+ * to avoid crashes.
+ */
+static const struct th1520_power_info th1520_pd_ranges[] = {
+ [TH1520_AUDIO_PD] = {"audio", TH1520_AON_AUDIO_PD, true },
+ [TH1520_VDEC_PD] = { "vdec", TH1520_AON_VDEC_PD, false },
+ [TH1520_NPU_PD] = { "npu", TH1520_AON_NPU_PD, false },
+ [TH1520_VENC_PD] = { "venc", TH1520_AON_VENC_PD, false },
+ [TH1520_GPU_PD] = { "gpu", TH1520_AON_GPU_PD, false },
+ [TH1520_DSP0_PD] = { "dsp0", TH1520_AON_DSP0_PD, false },
+ [TH1520_DSP1_PD] = { "dsp1", TH1520_AON_DSP1_PD, false }
+};
+
+static inline struct th1520_power_domain *
+to_th1520_power_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct th1520_power_domain, genpd);
+}
+
+static int th1520_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct th1520_power_domain *pd = to_th1520_power_domain(domain);
+
+ return th1520_aon_power_update(pd->aon_chan, pd->rsrc, true);
+}
+
+static int th1520_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct th1520_power_domain *pd = to_th1520_power_domain(domain);
+
+ return th1520_aon_power_update(pd->aon_chan, pd->rsrc, false);
+}
+
+static struct generic_pm_domain *th1520_pd_xlate(const struct of_phandle_args *spec,
+ void *data)
+{
+ struct generic_pm_domain *domain = ERR_PTR(-ENOENT);
+ struct genpd_onecell_data *pd_data = data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = to_th1520_power_domain(pd_data->domains[i]);
+ if (pd->rsrc == spec->args[0]) {
+ domain = &pd->genpd;
+ break;
+ }
+ }
+
+ return domain;
+}
+
+static struct th1520_power_domain *
+th1520_add_pm_domain(struct device *dev, const struct th1520_power_info *pi)
+{
+ struct th1520_power_domain *pd;
+ int ret;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->rsrc = pi->rsrc;
+ pd->genpd.power_on = th1520_pd_power_on;
+ pd->genpd.power_off = th1520_pd_power_off;
+ pd->genpd.name = pi->name;
+
+ ret = pm_genpd_init(&pd->genpd, NULL, true);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pd;
+}
+
+static void th1520_pd_init_all_off(struct generic_pm_domain **domains,
+ struct device *dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = to_th1520_power_domain(domains[i]);
+
+ ret = th1520_aon_power_update(pd->aon_chan, pd->rsrc, false);
+ if (ret)
+ dev_err(dev,
+ "Failed to initially power down power domain %s\n",
+ pd->genpd.name);
+ }
+}
+
+static int th1520_pd_probe(struct platform_device *pdev)
+{
+ struct generic_pm_domain **domains;
+ struct genpd_onecell_data *pd_data;
+ struct th1520_aon_chan *aon_chan;
+ struct device *dev = &pdev->dev;
+ int i, ret;
+
+ aon_chan = th1520_aon_init(dev);
+ if (IS_ERR(aon_chan))
+ return dev_err_probe(dev, PTR_ERR(aon_chan),
+ "Failed to get AON channel\n");
+
+ domains = devm_kcalloc(dev, ARRAY_SIZE(th1520_pd_ranges),
+ sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ ret = -ENOMEM;
+ goto err_clean_aon;
+ }
+
+ pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL);
+ if (!pd_data) {
+ ret = -ENOMEM;
+ goto err_clean_aon;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(th1520_pd_ranges); i++) {
+ struct th1520_power_domain *pd;
+
+ if (th1520_pd_ranges[i].disabled)
+ continue;
+
+ pd = th1520_add_pm_domain(dev, &th1520_pd_ranges[i]);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto err_clean_genpd;
+ }
+
+ pd->aon_chan = aon_chan;
+ domains[i] = &pd->genpd;
+ dev_dbg(dev, "added power domain %s\n", pd->genpd.name);
+ }
+
+ pd_data->domains = domains;
+ pd_data->num_domains = ARRAY_SIZE(th1520_pd_ranges);
+ pd_data->xlate = th1520_pd_xlate;
+
+ /*
+ * Initialize all power domains to off to ensure they start in a
+ * low-power state. This allows device drivers to manage power
+ * domains by turning them on or off as needed.
+ */
+ th1520_pd_init_all_off(domains, dev);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, pd_data);
+ if (ret)
+ goto err_clean_genpd;
+
+ return 0;
+
+err_clean_genpd:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(domains[i]);
+err_clean_aon:
+ th1520_aon_deinit(aon_chan);
+
+ return ret;
+}
+
+static const struct of_device_id th1520_pd_match[] = {
+ { .compatible = "thead,th1520-aon" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, th1520_pd_match);
+
+static struct platform_driver th1520_pd_driver = {
+ .driver = {
+ .name = "th1520-pd",
+ .of_match_table = th1520_pd_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = th1520_pd_probe,
+};
+module_platform_driver(th1520_pd_driver);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 SoC power domain controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/ti/omap_prm.c b/drivers/pmdomain/ti/omap_prm.c
index b8ceb3c2b81c..79d165331d8c 100644
--- a/drivers/pmdomain/ti/omap_prm.c
+++ b/drivers/pmdomain/ti/omap_prm.c
@@ -613,7 +613,7 @@ static int omap_prm_domain_attach_clock(struct device *dev,
if (!of_device_is_compatible(np, "simple-pm-bus"))
return 0;
- if (!of_property_read_bool(np, "clocks"))
+ if (!of_property_present(np, "clocks"))
return 0;
error = pm_clk_create(dev);
diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h
index 4e80273dfb1e..b342570d0236 100644
--- a/drivers/pnp/base.h
+++ b/drivers/pnp/base.h
@@ -9,7 +9,6 @@ extern const struct attribute_group *pnp_dev_groups[];
extern const struct bus_type pnp_bus_type;
int pnp_register_protocol(struct pnp_protocol *protocol);
-void pnp_unregister_protocol(struct pnp_protocol *protocol);
#define PNP_EISA_ID_MASK 0x7fffffff
void pnp_eisa_id_to_string(u32 id, char *str);
@@ -21,9 +20,7 @@ int pnp_add_device(struct pnp_dev *dev);
struct pnp_id *pnp_add_id(struct pnp_dev *dev, const char *id);
int pnp_add_card(struct pnp_card *card);
-void pnp_remove_card(struct pnp_card *card);
int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
-void pnp_remove_card_device(struct pnp_dev *dev);
struct pnp_port {
resource_size_t min; /* min base number */
@@ -138,7 +135,6 @@ void pnp_init_resources(struct pnp_dev *dev);
void pnp_fixup_device(struct pnp_dev *dev);
void pnp_free_options(struct pnp_dev *dev);
int __pnp_add_device(struct pnp_dev *dev);
-void __pnp_remove_device(struct pnp_dev *dev);
int pnp_check_port(struct pnp_dev *dev, struct resource *res);
int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 9610a9f08ff4..c7596dc24fbd 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -270,25 +270,6 @@ int pnp_add_card(struct pnp_card *card)
}
/**
- * pnp_remove_card - removes a PnP card from the PnP Layer
- * @card: pointer to the card to remove
- */
-void pnp_remove_card(struct pnp_card *card)
-{
- struct list_head *pos, *temp;
-
- device_unregister(&card->dev);
- mutex_lock(&pnp_lock);
- list_del(&card->global_list);
- list_del(&card->protocol_list);
- mutex_unlock(&pnp_lock);
- list_for_each_safe(pos, temp, &card->devices) {
- struct pnp_dev *dev = card_to_pnp_dev(pos);
- pnp_remove_card_device(dev);
- }
-}
-
-/**
* pnp_add_card_device - adds a device to the specified card
* @card: pointer to the card to add to
* @dev: pointer to the device to add
@@ -307,19 +288,6 @@ int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev)
}
/**
- * pnp_remove_card_device- removes a device from the specified card
- * @dev: pointer to the device to remove
- */
-void pnp_remove_card_device(struct pnp_dev *dev)
-{
- mutex_lock(&pnp_lock);
- dev->card = NULL;
- list_del(&dev->card_list);
- mutex_unlock(&pnp_lock);
- __pnp_remove_device(dev);
-}
-
-/**
* pnp_request_card_device - Searches for a PnP device under the specified card
* @clink: pointer to the card link, cannot be NULL
* @id: pointer to a PnP ID structure that explains the rules for finding the device
diff --git a/drivers/pnp/core.c b/drivers/pnp/core.c
index 6a60c5d83383..ac48db6dcfe3 100644
--- a/drivers/pnp/core.c
+++ b/drivers/pnp/core.c
@@ -78,16 +78,6 @@ int pnp_register_protocol(struct pnp_protocol *protocol)
return ret;
}
-/**
- * pnp_unregister_protocol - removes a pnp protocol from the pnp layer
- * @protocol: pointer to the corresponding pnp_protocol structure
- */
-void pnp_unregister_protocol(struct pnp_protocol *protocol)
-{
- pnp_remove_protocol(protocol);
- device_unregister(&protocol->dev);
-}
-
static void pnp_free_ids(struct pnp_dev *dev)
{
struct pnp_id *id;
@@ -220,12 +210,6 @@ int pnp_add_device(struct pnp_dev *dev)
return 0;
}
-void __pnp_remove_device(struct pnp_dev *dev)
-{
- pnp_delist_device(dev);
- device_unregister(&dev->dev);
-}
-
static int __init pnp_init(void)
{
return bus_register(&pnp_bus_type);
diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c
index 1a6fc8d38e20..90c664d344d0 100644
--- a/drivers/power/reset/ltc2952-poweroff.c
+++ b/drivers/power/reset/ltc2952-poweroff.c
@@ -162,11 +162,11 @@ static void ltc2952_poweroff_default(struct ltc2952_poweroff *data)
data->wde_interval = 300L * NSEC_PER_MSEC;
data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC);
- hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_trigger.function = ltc2952_poweroff_timer_trigger;
+ hrtimer_setup(&data->timer_trigger, ltc2952_poweroff_timer_trigger, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
- hrtimer_init(&data->timer_wde, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->timer_wde.function = ltc2952_poweroff_timer_wde;
+ hrtimer_setup(&data->timer_wde, ltc2952_poweroff_timer_wde, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static int ltc2952_poweroff_init(struct platform_device *pdev)
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index 7a8d1feb8e90..dc6c8b0dd1cf 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1787,13 +1787,12 @@ static int ab8500_chargalg_probe(struct platform_device *pdev)
psy_cfg.drv_data = di;
/* Initilialize safety timer */
- hrtimer_init(&di->safety_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->safety_timer.function = ab8500_chargalg_safety_timer_expired;
+ hrtimer_setup(&di->safety_timer, ab8500_chargalg_safety_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Initilialize maintenance timer */
- hrtimer_init(&di->maintenance_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- di->maintenance_timer.function =
- ab8500_chargalg_maintenance_timer_expired;
+ hrtimer_setup(&di->maintenance_timer, ab8500_chargalg_maintenance_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* Init work for chargalg */
INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
index 69ef8d081c98..03c4c796d993 100644
--- a/drivers/powercap/Kconfig
+++ b/drivers/powercap/Kconfig
@@ -82,7 +82,7 @@ config DTPM
config DTPM_CPU
bool "Add CPU power capping based on the energy model"
- depends on DTPM && ENERGY_MODEL
+ depends on DTPM && ENERGY_MODEL && SMP
help
This enables support for CPU power limitation based on
energy model.
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 04c212953ded..5ad7cc438068 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -339,8 +339,7 @@ struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
return NULL;
cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
- hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ii_dev->timer.function = idle_inject_timer_fn;
+ hrtimer_setup(&ii_dev->timer, idle_inject_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->latency_us = UINT_MAX;
ii_dev->update = update;
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 77d75e1f14a9..5ab3feb29686 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1274,7 +1274,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &rapl_defaults_cht),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &rapl_defaults_tng),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &rapl_defaults_ann),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2,&rapl_defaults_ann),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &rapl_defaults_core),
@@ -2064,8 +2064,7 @@ int rapl_package_add_pmu(struct rapl_package *rp)
raw_spin_lock_init(&data->lock);
INIT_LIST_HEAD(&data->active_list);
data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms);
- hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- data->hrtimer.function = rapl_hrtimer_handle;
+ hrtimer_setup(&data->hrtimer, rapl_hrtimer_handle, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
return rapl_pmu_update(rp);
}
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index d46eed159495..f5eeb4dd01ad 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -208,8 +208,7 @@ static void parport_attach(struct parport *port)
calibrate_port(&device);
- hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- device.timer.function = hrtimer_event;
+ hrtimer_setup(&device.timer, hrtimer_event, CLOCK_REALTIME, HRTIMER_MODE_ABS);
hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
return;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index bf6468c56419..4380e6ddb849 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -205,6 +205,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_EXTTS_REQUEST:
case PTP_EXTTS_REQUEST2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.extts, (void __user *)arg,
@@ -246,6 +250,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_PEROUT_REQUEST:
case PTP_PEROUT_REQUEST2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (copy_from_user(&req.perout, (void __user *)arg,
@@ -314,6 +322,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_ENABLE_PPS:
case PTP_ENABLE_PPS2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
memset(&req, 0, sizeof(req));
if (!capable(CAP_SYS_TIME))
@@ -456,6 +468,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_PIN_SETFUNC:
case PTP_PIN_SETFUNC2:
+ if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) {
+ err = -EACCES;
+ break;
+ }
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index b651087f426f..b25635c5c745 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -2090,6 +2090,10 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
{
struct ptp_ocp_signal s = { };
+ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
+ PTP_PEROUT_PHASE))
+ return -EOPNOTSUPP;
+
s.polarity = bp->signal[gen].polarity;
s.period = ktime_set(req->period.sec, req->period.nsec);
if (!s.period)
@@ -3959,9 +3963,6 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr)
bool on;
u32 val;
- if (!signal)
- return;
-
on = signal->running;
sprintf(label, "GEN%d", nr + 1);
seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d",
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 0915c1e7df16..4731d5b90d7e 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -567,7 +567,7 @@ config PWM_SIFIVE
tristate "SiFive PWM support"
depends on OF
depends on COMMON_CLK && HAS_IOMEM
- depends on RISCV || COMPILE_TEST
+ depends on ARCH_SIFIVE || COMPILE_TEST
help
Generic PWM framework driver for SiFive SoCs.
@@ -584,6 +584,16 @@ config PWM_SL28CPLD
To compile this driver as a module, choose M here: the module
will be called pwm-sl28cpld.
+config PWM_SOPHGO_SG2042
+ tristate "Sophgo SG2042 PWM support"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ PWM driver for the PWM controller on Sophgo SG2042 SoC. The PWM
+ controller supports outputing 4 channels of PWM waveforms.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm_sophgo_sg2042.
+
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -636,7 +646,7 @@ config PWM_STM32_LP
will be called pwm-stm32-lp.
config PWM_STMPE
- bool "STMPE expander PWM export"
+ tristate "STMPE expander PWM export"
depends on MFD_STMPE
help
This enables support for the PWMs found in the STMPE I/O
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 9081e0c0e9e0..539e0def3f82 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PWM_RZ_MTU3) += pwm-rz-mtu3.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o
obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o
+obj-$(CONFIG_PWM_SOPHGO_SG2042) += pwm-sophgo-sg2042.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o
obj-$(CONFIG_PWM_STI) += pwm-sti.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index ccd54c089bab..a40c511e0096 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -1000,11 +1000,27 @@ of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *arg
}
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+/*
+ * This callback is used for PXA PWM chips that only have a single PWM line.
+ * For such chips you could argue that passing the line number (i.e. the first
+ * parameter in the common case) is useless as it's always zero. So compared to
+ * the default xlate function of_pwm_xlate_with_flags() the first parameter is
+ * the default period and the second are flags.
+ *
+ * Note that if #pwm-cells = <3>, the semantic is the same as for
+ * of_pwm_xlate_with_flags() to allow converting the affected driver to
+ * #pwm-cells = <3> without breaking the legacy binding.
+ *
+ * Don't use for new drivers.
+ */
struct pwm_device *
of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
+ if (args->args_count >= 3)
+ return of_pwm_xlate_with_flags(chip, args);
+
pwm = pwm_request_from_chip(chip, 0, NULL);
if (IS_ERR(pwm))
return pwm;
@@ -1716,8 +1732,7 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
return ERR_PTR(index);
}
- err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index,
- &args);
+ err = of_parse_phandle_with_args_map(np, "pwms", "pwm", index, &args);
if (err) {
pr_err("%s(): can't parse \"pwms\" property\n", __func__);
return ERR_PTR(err);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index c950e1dbd2b8..04559a9de718 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -98,7 +98,7 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
return devm_pwmchip_add(&pdev->dev, chip);
}
-static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
+static const struct of_device_id clps711x_pwm_dt_ids[] = {
{ .compatible = "cirrus,ep7209-pwm", },
{ }
};
@@ -107,7 +107,7 @@ MODULE_DEVICE_TABLE(of, clps711x_pwm_dt_ids);
static struct platform_driver clps711x_pwm_driver = {
.driver = {
.name = "clps711x-pwm",
- .of_match_table = of_match_ptr(clps711x_pwm_dt_ids),
+ .of_match_table = clps711x_pwm_dt_ids,
},
.probe = clps711x_pwm_probe,
};
diff --git a/drivers/pwm/pwm-gpio.c b/drivers/pwm/pwm-gpio.c
index 9f8884ac7504..5f4edeb394a9 100644
--- a/drivers/pwm/pwm-gpio.c
+++ b/drivers/pwm/pwm-gpio.c
@@ -207,13 +207,12 @@ static int pwm_gpio_probe(struct platform_device *pdev)
chip->ops = &pwm_gpio_ops;
chip->atomic = true;
- hrtimer_init(&gpwm->gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&gpwm->gpio_timer, pwm_gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
ret = devm_add_action_or_reset(dev, pwm_gpio_disable_hrtimer, gpwm);
if (ret)
return ret;
- gpwm->gpio_timer.function = pwm_gpio_timer;
-
ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(dev, ret, "could not add pwmchip\n");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 3b99feb3bb49..5accab033b8b 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
#include <linux/time.h>
#define DEFAULT_SYMBOL_NAMESPACE "PWM_LPSS"
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index b5267ab5193b..60792181401e 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -10,7 +10,6 @@
#ifndef __PWM_LPSS_H
#define __PWM_LPSS_H
-#include <linux/pwm.h>
#include <linux/types.h>
#include <linux/platform_data/x86/pwm-lpss.h>
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 1298b29183e5..5162f3991644 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -8,7 +8,6 @@
* based on the pwm-twl-led.c driver
*/
-#include <linux/acpi.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -639,21 +638,17 @@ static const struct i2c_device_id pca9685_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca9685_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id pca9685_acpi_ids[] = {
{ "INT3492", 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(acpi, pca9685_acpi_ids);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id pca9685_dt_ids[] = {
{ .compatible = "nxp,pca9685-pwm", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
-#endif
static const struct dev_pm_ops pca9685_pwm_pm = {
SET_RUNTIME_PM_OPS(pca9685_pwm_runtime_suspend,
@@ -663,8 +658,8 @@ static const struct dev_pm_ops pca9685_pwm_pm = {
static struct i2c_driver pca9685_i2c_driver = {
.driver = {
.name = "pca9685-pwm",
- .acpi_match_table = ACPI_PTR(pca9685_acpi_ids),
- .of_match_table = of_match_ptr(pca9685_dt_ids),
+ .acpi_match_table = pca9685_acpi_ids,
+ .of_match_table = pca9685_dt_ids,
.pm = &pca9685_pwm_pm,
},
.probe = pca9685_pwm_probe,
diff --git a/drivers/pwm/pwm-sophgo-sg2042.c b/drivers/pwm/pwm-sophgo-sg2042.c
new file mode 100644
index 000000000000..ff4639d849ce
--- /dev/null
+++ b/drivers/pwm/pwm-sophgo-sg2042.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo SG2042 PWM Controller Driver
+ *
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Chen Wang <unicorn_wang@outlook.com>
+ *
+ * Limitations:
+ * - After reset, the output of the PWM channel is always high.
+ * The value of HLPERIOD/PERIOD is 0.
+ * - When HLPERIOD or PERIOD is reconfigured, PWM will start to
+ * output waveforms with the new configuration after completing
+ * the running period.
+ * - When PERIOD and HLPERIOD is set to 0, the PWM wave output will
+ * be stopped and the output is pulled to high.
+ * See the datasheet [1] for more details.
+ * [1]:https://github.com/sophgo/sophgo-doc/tree/main/SG2042/TRM
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+
+/*
+ * Offset RegisterName
+ * 0x0000 HLPERIOD0
+ * 0x0004 PERIOD0
+ * 0x0008 HLPERIOD1
+ * 0x000C PERIOD1
+ * 0x0010 HLPERIOD2
+ * 0x0014 PERIOD2
+ * 0x0018 HLPERIOD3
+ * 0x001C PERIOD3
+ * Four groups and every group is composed of HLPERIOD & PERIOD
+ */
+#define SG2042_PWM_HLPERIOD(chan) ((chan) * 8 + 0)
+#define SG2042_PWM_PERIOD(chan) ((chan) * 8 + 4)
+
+#define SG2042_PWM_CHANNELNUM 4
+
+/**
+ * struct sg2042_pwm_ddata - private driver data
+ * @base: base address of mapped PWM registers
+ * @clk_rate_hz: rate of base clock in HZ
+ */
+struct sg2042_pwm_ddata {
+ void __iomem *base;
+ unsigned long clk_rate_hz;
+};
+
+/*
+ * period_ticks: PERIOD
+ * hlperiod_ticks: HLPERIOD
+ */
+static void pwm_sg2042_config(struct sg2042_pwm_ddata *ddata, unsigned int chan,
+ u32 period_ticks, u32 hlperiod_ticks)
+{
+ void __iomem *base = ddata->base;
+
+ writel(period_ticks, base + SG2042_PWM_PERIOD(chan));
+ writel(hlperiod_ticks, base + SG2042_PWM_HLPERIOD(chan));
+}
+
+static int pwm_sg2042_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip);
+ u32 hlperiod_ticks;
+ u32 period_ticks;
+
+ if (state->polarity == PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ pwm_sg2042_config(ddata, pwm->hwpwm, 0, 0);
+ return 0;
+ }
+
+ /*
+ * Duration of High level (duty_cycle) = HLPERIOD x Period_of_input_clk
+ * Duration of One Cycle (period) = PERIOD x Period_of_input_clk
+ */
+ period_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->period, NSEC_PER_SEC), U32_MAX);
+ hlperiod_ticks = min(mul_u64_u64_div_u64(ddata->clk_rate_hz, state->duty_cycle, NSEC_PER_SEC), U32_MAX);
+
+ dev_dbg(pwmchip_parent(chip), "chan[%u]: PERIOD=%u, HLPERIOD=%u\n",
+ pwm->hwpwm, period_ticks, hlperiod_ticks);
+
+ pwm_sg2042_config(ddata, pwm->hwpwm, period_ticks, hlperiod_ticks);
+
+ return 0;
+}
+
+static int pwm_sg2042_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct sg2042_pwm_ddata *ddata = pwmchip_get_drvdata(chip);
+ unsigned int chan = pwm->hwpwm;
+ u32 hlperiod_ticks;
+ u32 period_ticks;
+
+ period_ticks = readl(ddata->base + SG2042_PWM_PERIOD(chan));
+ hlperiod_ticks = readl(ddata->base + SG2042_PWM_HLPERIOD(chan));
+
+ if (!period_ticks) {
+ state->enabled = false;
+ return 0;
+ }
+
+ if (hlperiod_ticks > period_ticks)
+ hlperiod_ticks = period_ticks;
+
+ state->enabled = true;
+ state->period = DIV_ROUND_UP_ULL((u64)period_ticks * NSEC_PER_SEC, ddata->clk_rate_hz);
+ state->duty_cycle = DIV_ROUND_UP_ULL((u64)hlperiod_ticks * NSEC_PER_SEC, ddata->clk_rate_hz);
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ return 0;
+}
+
+static const struct pwm_ops pwm_sg2042_ops = {
+ .apply = pwm_sg2042_apply,
+ .get_state = pwm_sg2042_get_state,
+};
+
+static const struct of_device_id sg2042_pwm_ids[] = {
+ { .compatible = "sophgo,sg2042-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sg2042_pwm_ids);
+
+static int pwm_sg2042_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sg2042_pwm_ddata *ddata;
+ struct reset_control *rst;
+ struct pwm_chip *chip;
+ struct clk *clk;
+ int ret;
+
+ chip = devm_pwmchip_alloc(dev, SG2042_PWM_CHANNELNUM, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = pwmchip_get_drvdata(chip);
+
+ ddata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
+
+ clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get base clk\n");
+
+ ret = devm_clk_rate_exclusive_get(dev, clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get exclusive rate\n");
+
+ ddata->clk_rate_hz = clk_get_rate(clk);
+ /* period = PERIOD * NSEC_PER_SEC / clk_rate_hz */
+ if (!ddata->clk_rate_hz || ddata->clk_rate_hz > NSEC_PER_SEC)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid clock rate: %lu\n", ddata->clk_rate_hz);
+
+ rst = devm_reset_control_get_optional_shared_deasserted(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset\n");
+
+ chip->ops = &pwm_sg2042_ops;
+ chip->atomic = true;
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to register PWM chip\n");
+
+ return 0;
+}
+
+static struct platform_driver pwm_sg2042_driver = {
+ .driver = {
+ .name = "sg2042-pwm",
+ .of_match_table = sg2042_pwm_ids,
+ },
+ .probe = pwm_sg2042_probe,
+};
+module_platform_driver(pwm_sg2042_driver);
+
+MODULE_AUTHOR("Chen Wang");
+MODULE_DESCRIPTION("Sophgo SG2042 PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index bb91062d5f1d..73f12843999a 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -326,12 +326,33 @@ static int __init stmpe_pwm_probe(struct platform_device *pdev)
return ret;
}
+ platform_set_drvdata(pdev, chip);
+
return 0;
}
-static struct platform_driver stmpe_pwm_driver = {
+static void __exit stmpe_pwm_remove(struct platform_device *pdev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+
+ pwmchip_remove(chip);
+ stmpe_disable(stmpe, STMPE_BLOCK_PWM);
+}
+
+/*
+ * stmpe_pwm_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver stmpe_pwm_driver __refdata = {
.driver = {
.name = "stmpe-pwm",
},
+ .remove = __exit_p(stmpe_pwm_remove),
};
-builtin_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
+module_platform_driver_probe(stmpe_pwm_driver, stmpe_pwm_probe);
+
+MODULE_DESCRIPTION("STMPE expander PWM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 27afbb9d544b..cbf531d0ba68 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
err = rio_add_net(net);
if (err) {
rmcd_debug(RDEV, "failed to register net, err=%d", err);
- kfree(net);
+ put_device(&net->dev);
+ mport->net = NULL;
goto cleanup;
}
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index fdcf742b2adb..c12941f71e2c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
dev_set_name(&net->dev, "rnet_%d", net->id);
net->dev.parent = &mport->dev;
net->dev.release = rio_scan_release_dev;
- rio_add_net(net);
+ if (rio_add_net(net)) {
+ put_device(&net->dev);
+ net = NULL;
+ }
}
return net;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 39297f7d8177..807d07067951 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -981,6 +981,13 @@ config REGULATOR_PCA9450
Say y here to support the NXP PCA9450A/PCA9450B/PCA9450C PMIC
regulator driver.
+config REGULATOR_PF9453
+ tristate "NXP PF9453 regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the NXP PF9453 PMIC regulator driver.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -988,13 +995,6 @@ config REGULATOR_PCAP
This driver provides support for the voltage regulators of the
PCAP2 PMIC.
-config REGULATOR_PCF50633
- tristate "NXP PCF50633 regulator driver"
- depends on MFD_PCF50633
- help
- Say Y here to support the voltage regulators and converters
- on PCF50633
-
config REGULATOR_PF8X00
tristate "NXP PF8100/PF8121A/PF8200 regulator driver"
depends on I2C && OF
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 3d5a803dce8a..524e026c0273 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o
+obj-$(CONFIG_REGULATOR_PF9453) += pf9453-regulator.o
obj-$(CONFIG_REGULATOR_PF8X00) += pf8x00-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
@@ -132,7 +133,6 @@ obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PBIAS) += pbias-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
-obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RAA215300) += raa215300.o
obj-$(CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY) += rpi-panel-attiny-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 40f7dba42b5a..eb2a666a45cb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -14,8 +14,9 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
-#define AD5398_CURRENT_EN_MASK 0x8000
+#define AD5398_SW_POWER_DOWN BIT(15)
struct ad5398_chip_info {
struct i2c_client *client;
@@ -113,7 +114,7 @@ static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int
/* prepare register data */
selector = (selector << chip->current_offset) & chip->current_mask;
- data = (unsigned short)selector | (data & AD5398_CURRENT_EN_MASK);
+ data = (unsigned short)selector | (data & AD5398_SW_POWER_DOWN);
/* write the new current value back as well as enable bit */
ret = ad5398_write_reg(client, data);
@@ -132,10 +133,10 @@ static int ad5398_is_enabled(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (data & AD5398_CURRENT_EN_MASK)
- return 1;
- else
+ if (data & AD5398_SW_POWER_DOWN)
return 0;
+ else
+ return 1;
}
static int ad5398_enable(struct regulator_dev *rdev)
@@ -149,10 +150,10 @@ static int ad5398_enable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (data & AD5398_CURRENT_EN_MASK)
+ if (!(data & AD5398_SW_POWER_DOWN))
return 0;
- data |= AD5398_CURRENT_EN_MASK;
+ data &= ~AD5398_SW_POWER_DOWN;
ret = ad5398_write_reg(client, data);
@@ -170,10 +171,10 @@ static int ad5398_disable(struct regulator_dev *rdev)
if (ret < 0)
return ret;
- if (!(data & AD5398_CURRENT_EN_MASK))
+ if (data & AD5398_SW_POWER_DOWN)
return 0;
- data &= ~AD5398_CURRENT_EN_MASK;
+ data |= AD5398_SW_POWER_DOWN;
ret = ad5398_write_reg(client, data);
@@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client)
const struct ad5398_current_data_format *df =
(struct ad5398_current_data_format *)id->driver_data;
- if (!init_data)
- return -EINVAL;
-
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
config.dev = &client->dev;
+ if (client->dev.of_node)
+ init_data = of_get_regulator_init_data(&client->dev,
+ client->dev.of_node,
+ &ad5398_reg);
+ if (!init_data)
+ return -EINVAL;
+
config.init_data = init_data;
+ config.of_node = client->dev.of_node;
config.driver_data = chip;
chip->client = client;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index dca99cfb7cbb..da891415efc0 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -371,8 +371,8 @@
.ops = &axp20x_ops, \
}
-#define AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask, _ramp_delay) \
+#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
+ _vmask, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
.supply_name = (_supply), \
@@ -388,15 +388,9 @@
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
- .ramp_delay = (_ramp_delay), \
.ops = &axp20x_ops, \
}
-#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask) \
- AXP_DESC_DELAY(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask, 0)
-
#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \
[_family##_##_id] = { \
.name = (_match), \
@@ -805,9 +799,9 @@ static const struct regulator_desc axp717_regulators[] = {
axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES,
AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK,
AXP717_DCDC_OUTPUT_CONTROL, BIT(2), 640),
- AXP_DESC_DELAY(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
+ AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100,
AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK,
- AXP717_DCDC_OUTPUT_CONTROL, BIT(3), 6400),
+ AXP717_DCDC_OUTPUT_CONTROL, BIT(3)),
AXP_DESC(AXP717, ALDO1, "aldo1", "aldoin", 500, 3500, 100,
AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK,
AXP717_LDO0_OUTPUT_CONTROL, BIT(0)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 4ddf0efead68..00a7f3617cd8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1830,12 +1830,49 @@ static const struct file_operations constraint_flags_fops = {
#define REG_STR_SIZE 64
+static void link_and_create_debugfs(struct regulator *regulator, struct regulator_dev *rdev,
+ struct device *dev)
+{
+ int err = 0;
+
+ if (dev) {
+ regulator->dev = dev;
+
+ /* Add a link to the device sysfs entry */
+ err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
+ regulator->supply_name);
+ if (err) {
+ rdev_dbg(rdev, "could not add device link %s: %pe\n",
+ dev->kobj.name, ERR_PTR(err));
+ /* non-fatal */
+ }
+ }
+
+ if (err != -EEXIST) {
+ regulator->debugfs = debugfs_create_dir(regulator->supply_name, rdev->debugfs);
+ if (IS_ERR(regulator->debugfs)) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ regulator->debugfs = NULL;
+ }
+ }
+
+ if (regulator->debugfs) {
+ debugfs_create_u32("uA_load", 0444, regulator->debugfs,
+ &regulator->uA_load);
+ debugfs_create_u32("min_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
+ debugfs_create_u32("max_uV", 0444, regulator->debugfs,
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
+ debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
+ regulator, &constraint_flags_fops);
+ }
+}
+
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name)
{
struct regulator *regulator;
- int err = 0;
lockdep_assert_held_once(&rdev->mutex.base);
@@ -1868,38 +1905,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
- if (dev) {
- regulator->dev = dev;
-
- /* Add a link to the device sysfs entry */
- err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- supply_name);
- if (err) {
- rdev_dbg(rdev, "could not add device link %s: %pe\n",
- dev->kobj.name, ERR_PTR(err));
- /* non-fatal */
- }
- }
-
- if (err != -EEXIST) {
- regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
- if (IS_ERR(regulator->debugfs)) {
- rdev_dbg(rdev, "Failed to create debugfs directory\n");
- regulator->debugfs = NULL;
- }
- }
-
- if (regulator->debugfs) {
- debugfs_create_u32("uA_load", 0444, regulator->debugfs,
- &regulator->uA_load);
- debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].min_uV);
- debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->voltage[PM_SUSPEND_ON].max_uV);
- debugfs_create_file("constraint_flags", 0444, regulator->debugfs,
- regulator, &constraint_flags_fops);
- }
-
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
@@ -2069,6 +2074,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
if (have_full_constraints()) {
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
} else {
dev_err(dev, "Failed to resolve %s-supply for %s\n",
@@ -2086,6 +2095,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
goto out;
}
r = dummy_regulator_rdev;
+ if (!r) {
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
get_device(&r->dev);
}
@@ -2133,6 +2146,9 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
regulator_unlock_two(rdev, r, &ww_ctx);
+ /* rdev->supply was created in set_supply() */
+ link_and_create_debugfs(rdev->supply, r, &rdev->dev);
+
/*
* In set_machine_constraints() we may have turned this regulator on
* but we couldn't propagate to the supply if it hadn't been resolved
@@ -2211,8 +2227,10 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
* enabled, even if it isn't hooked up, and just
* provide a dummy.
*/
- dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
rdev = dummy_regulator_rdev;
+ if (!rdev)
+ return ERR_PTR(-EPROBE_DEFER);
+ dev_warn(dev, "supply %s not found, using dummy regulator\n", id);
get_device(&rdev->dev);
break;
@@ -2271,6 +2289,8 @@ struct regulator *_regulator_get_common(struct regulator_dev *rdev, struct devic
return regulator;
}
+ link_and_create_debugfs(regulator, rdev, dev);
+
rdev->open_count++;
if (get_type == EXCLUSIVE_GET) {
rdev->exclusive = 1;
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index fb9643ed7a49..fb0767b33a36 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -138,8 +138,8 @@ static int cros_ec_regulator_init_info(struct device *dev,
data->num_voltages =
min_t(u16, ARRAY_SIZE(resp.voltages_mv), resp.num_voltages);
data->voltages_mV =
- devm_kmemdup(dev, resp.voltages_mv,
- sizeof(u16) * data->num_voltages, GFP_KERNEL);
+ devm_kmemdup_array(dev, resp.voltages_mv, data->num_voltages,
+ sizeof(resp.voltages_mv[0]), GFP_KERNEL);
if (!data->voltages_mV)
return -ENOMEM;
diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c
index 36164aec30e8..2cf03042fddf 100644
--- a/drivers/regulator/devres.c
+++ b/drivers/regulator/devres.c
@@ -332,9 +332,8 @@ int devm_regulator_bulk_get_const(struct device *dev, int num_consumers,
const struct regulator_bulk_data *in_consumers,
struct regulator_bulk_data **out_consumers)
{
- *out_consumers = devm_kmemdup(dev, in_consumers,
- num_consumers * sizeof(*in_consumers),
- GFP_KERNEL);
+ *out_consumers = devm_kmemdup_array(dev, in_consumers, num_consumers,
+ sizeof(*in_consumers), GFP_KERNEL);
if (*out_consumers == NULL)
return -ENOMEM;
@@ -772,6 +771,23 @@ static struct regulator *_devm_of_regulator_get(struct device *dev, struct devic
}
/**
+ * devm_of_regulator_get - Resource managed of_regulator_get()
+ * @dev: device used for dev_printk() messages and resource lifetime management
+ * @node: device node for regulator "consumer"
+ * @id: supply name or regulator ID.
+ *
+ * Managed of_regulator_get(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * of_regulator_get() for more information.
+ */
+struct regulator *devm_of_regulator_get(struct device *dev, struct device_node *node,
+ const char *id)
+{
+ return _devm_of_regulator_get(dev, node, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(devm_of_regulator_get);
+
+/**
* devm_of_regulator_get_optional - Resource managed of_regulator_get_optional()
* @dev: device used for dev_printk() messages and resource lifetime management
* @node: device node for regulator "consumer"
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index 5b9b9e4e762d..9f59889129ab 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -60,7 +60,7 @@ static struct platform_driver dummy_regulator_driver = {
.probe = dummy_regulator_probe,
.driver = {
.name = "reg-dummy",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
},
};
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 011088c57891..32e88cada47a 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -698,6 +698,27 @@ struct regulator *_of_regulator_get(struct device *dev, struct device_node *node
}
/**
+ * of_regulator_get - get regulator via device tree lookup
+ * @dev: device used for dev_printk() messages
+ * @node: device node for regulator "consumer"
+ * @id: Supply name
+ *
+ * Return: pointer to struct regulator corresponding to the regulator producer,
+ * or PTR_ERR() encoded error number.
+ *
+ * This is intended for use by consumers that want to get a regulator
+ * supply directly from a device node. This will _not_ consider supply
+ * aliases. See regulator_dev_lookup().
+ */
+struct regulator *of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return _of_regulator_get(dev, node, id, NORMAL_GET);
+}
+EXPORT_SYMBOL_GPL(of_regulator_get);
+
+/**
* of_regulator_get_optional - get optional regulator via device tree lookup
* @dev: device used for dev_printk() messages
* @node: device node for regulator "consumer"
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index faa6b79c27d7..a56f3ab754fa 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -36,6 +36,7 @@ struct pca9450 {
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
+ bool sd_vsel_fixed_low;
};
static const struct regmap_range pca9450_status_range = {
@@ -98,6 +99,61 @@ static const struct regulator_ops pca9450_ldo_regulator_ops = {
.get_voltage_sel = regulator_get_voltage_sel_regmap,
};
+static unsigned int pca9450_ldo5_get_reg_voltage_sel(struct regulator_dev *rdev)
+{
+ struct pca9450 *pca9450 = rdev_get_drvdata(rdev);
+
+ if (pca9450->sd_vsel_fixed_low)
+ return PCA9450_REG_LDO5CTRL_L;
+
+ if (pca9450->sd_vsel_gpio && !gpiod_get_value(pca9450->sd_vsel_gpio))
+ return PCA9450_REG_LDO5CTRL_L;
+
+ return rdev->desc->vsel_reg;
+}
+
+static int pca9450_ldo5_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, pca9450_ldo5_get_reg_voltage_sel(rdev), &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+
+static int pca9450_ldo5_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned int sel)
+{
+ int ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+ ret = regmap_update_bits(rdev->regmap, pca9450_ldo5_get_reg_voltage_sel(rdev),
+ rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+ rdev->desc->apply_bit,
+ rdev->desc->apply_bit);
+ return ret;
+}
+
+static const struct regulator_ops pca9450_ldo5_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pca9450_ldo5_set_voltage_sel_regmap,
+ .get_voltage_sel = pca9450_ldo5_get_voltage_sel_regmap,
+};
+
/*
* BUCK1/2/3
* 0.60 to 2.1875V (12.5mV step)
@@ -453,14 +509,14 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -667,14 +723,14 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -857,14 +913,14 @@ static const struct pca9450_regulator_desc pca9451a_regulators[] = {
.of_match = of_match_ptr("LDO5"),
.regulators_node = of_match_ptr("regulators"),
.id = PCA9450_LDO5,
- .ops = &pca9450_ldo_regulator_ops,
+ .ops = &pca9450_ldo5_regulator_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = PCA9450_LDO5_VOLTAGE_NUM,
.linear_ranges = pca9450_ldo5_volts,
.n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts),
.vsel_reg = PCA9450_REG_LDO5CTRL_H,
.vsel_mask = LDO5HOUT_MASK,
- .enable_reg = PCA9450_REG_LDO5CTRL_H,
+ .enable_reg = PCA9450_REG_LDO5CTRL_L,
.enable_mask = LDO5H_EN_MASK,
.owner = THIS_MODULE,
},
@@ -915,6 +971,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
of_device_get_match_data(&i2c->dev);
const struct pca9450_regulator_desc *regulator_desc;
struct regulator_config config = { };
+ struct regulator_dev *ldo5;
struct pca9450 *pca9450;
unsigned int device_id, i;
unsigned int reset_ctrl;
@@ -980,11 +1037,15 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
config.regmap = pca9450->regmap;
config.dev = pca9450->dev;
+ config.driver_data = pca9450;
rdev = devm_regulator_register(pca9450->dev, desc, &config);
if (IS_ERR(rdev))
return dev_err_probe(pca9450->dev, PTR_ERR(rdev),
"Failed to register regulator(%s)\n", desc->name);
+
+ if (!strcmp(desc->name, "ldo5"))
+ ldo5 = rdev;
}
if (pca9450->irq) {
@@ -1032,15 +1093,19 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
}
/*
- * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
- * This is only valid if the SD_VSEL input of the PMIC is high. Let's
- * check if the pin is available as GPIO and set it to high.
+ * For LDO5 we need to be able to check the status of the SD_VSEL input in
+ * order to know which control register is used. Most boards connect SD_VSEL
+ * to the VSELECT signal, so we can use the GPIO that is internally routed
+ * to this signal (if SION bit is set in IOMUX).
*/
- pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
+ pca9450->sd_vsel_gpio = gpiod_get_optional(&ldo5->dev, "sd-vsel", GPIOD_IN);
+ if (IS_ERR(pca9450->sd_vsel_gpio)) {
+ dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
+ return ret;
+ }
- if (IS_ERR(pca9450->sd_vsel_gpio))
- return dev_err_probe(&i2c->dev, PTR_ERR(pca9450->sd_vsel_gpio),
- "Failed to get SD_VSEL GPIO\n");
+ pca9450->sd_vsel_fixed_low =
+ of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
dev_info(&i2c->dev, "%s probed.\n",
type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
deleted file mode 100644
index 9f08a62c800e..000000000000
--- a/drivers/regulator/pcf50633-regulator.c
+++ /dev/null
@@ -1,124 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NXP PCF50633 PMIC Driver
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * Author: Balaji Rao <balajirrao@openmoko.org>
- * All rights reserved.
- *
- * Broken down from monstrous PCF50633 driver mainly by
- * Harald Welte and Andy Green and Werner Almesberger
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/mfd/pcf50633/pmic.h>
-
-#define PCF50633_REGULATOR(_name, _id, _min_uV, _uV_step, _min_sel, _n) \
- { \
- .name = _name, \
- .id = PCF50633_REGULATOR_##_id, \
- .ops = &pcf50633_regulator_ops, \
- .n_voltages = _n, \
- .min_uV = _min_uV, \
- .uV_step = _uV_step, \
- .linear_min_sel = _min_sel, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .vsel_reg = PCF50633_REG_##_id##OUT, \
- .vsel_mask = 0xff, \
- .enable_reg = PCF50633_REG_##_id##OUT + 1, \
- .enable_mask = PCF50633_REGULATOR_ON, \
- }
-
-static const struct regulator_ops pcf50633_regulator_ops = {
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear,
- .map_voltage = regulator_map_voltage_linear,
- .enable = regulator_enable_regmap,
- .disable = regulator_disable_regmap,
- .is_enabled = regulator_is_enabled_regmap,
-};
-
-static const struct regulator_desc regulators[] = {
- [PCF50633_REGULATOR_AUTO] =
- PCF50633_REGULATOR("auto", AUTO, 1800000, 25000, 0x2f, 128),
- [PCF50633_REGULATOR_DOWN1] =
- PCF50633_REGULATOR("down1", DOWN1, 625000, 25000, 0, 96),
- [PCF50633_REGULATOR_DOWN2] =
- PCF50633_REGULATOR("down2", DOWN2, 625000, 25000, 0, 96),
- [PCF50633_REGULATOR_LDO1] =
- PCF50633_REGULATOR("ldo1", LDO1, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO2] =
- PCF50633_REGULATOR("ldo2", LDO2, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO3] =
- PCF50633_REGULATOR("ldo3", LDO3, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO4] =
- PCF50633_REGULATOR("ldo4", LDO4, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO5] =
- PCF50633_REGULATOR("ldo5", LDO5, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_LDO6] =
- PCF50633_REGULATOR("ldo6", LDO6, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_HCLDO] =
- PCF50633_REGULATOR("hcldo", HCLDO, 900000, 100000, 0, 28),
- [PCF50633_REGULATOR_MEMLDO] =
- PCF50633_REGULATOR("memldo", MEMLDO, 900000, 100000, 0, 28),
-};
-
-static int pcf50633_regulator_probe(struct platform_device *pdev)
-{
- struct regulator_dev *rdev;
- struct pcf50633 *pcf;
- struct regulator_config config = { };
-
- /* Already set by core driver */
- pcf = dev_to_pcf50633(pdev->dev.parent);
-
- config.dev = &pdev->dev;
- config.init_data = dev_get_platdata(&pdev->dev);
- config.driver_data = pcf;
- config.regmap = pcf->regmap;
-
- rdev = devm_regulator_register(&pdev->dev, &regulators[pdev->id],
- &config);
- if (IS_ERR(rdev))
- return PTR_ERR(rdev);
-
- platform_set_drvdata(pdev, rdev);
-
- if (pcf->pdata->regulator_registered)
- pcf->pdata->regulator_registered(pcf, pdev->id);
-
- return 0;
-}
-
-static struct platform_driver pcf50633_regulator_driver = {
- .driver = {
- .name = "pcf50633-regulator",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- },
- .probe = pcf50633_regulator_probe,
-};
-
-static int __init pcf50633_regulator_init(void)
-{
- return platform_driver_register(&pcf50633_regulator_driver);
-}
-subsys_initcall(pcf50633_regulator_init);
-
-static void __exit pcf50633_regulator_exit(void)
-{
- platform_driver_unregister(&pcf50633_regulator_driver);
-}
-module_exit(pcf50633_regulator_exit);
-
-MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
-MODULE_DESCRIPTION("PCF50633 regulator driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pcf50633-regulator");
diff --git a/drivers/regulator/pf9453-regulator.c b/drivers/regulator/pf9453-regulator.c
new file mode 100644
index 000000000000..ed6bf0f6c4fe
--- /dev/null
+++ b/drivers/regulator/pf9453-regulator.c
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 NXP.
+ * NXP PF9453 pmic driver
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+struct pf9453_dvs_config {
+ unsigned int run_reg; /* dvs0 */
+ unsigned int run_mask;
+ unsigned int standby_reg; /* dvs1 */
+ unsigned int standby_mask;
+};
+
+struct pf9453_regulator_desc {
+ struct regulator_desc desc;
+ const struct pf9453_dvs_config dvs;
+};
+
+struct pf9453 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *sd_vsel_gpio;
+ int irq;
+};
+
+enum {
+ PF9453_BUCK1 = 0,
+ PF9453_BUCK2,
+ PF9453_BUCK3,
+ PF9453_BUCK4,
+ PF9453_LDO1,
+ PF9453_LDO2,
+ PF9453_LDOSNVS,
+ PF9453_REGULATOR_CNT
+};
+
+enum {
+ PF9453_DVS_LEVEL_RUN = 0,
+ PF9453_DVS_LEVEL_STANDBY,
+ PF9453_DVS_LEVEL_DPSTANDBY,
+ PF9453_DVS_LEVEL_MAX
+};
+
+#define PF9453_BUCK1_VOLTAGE_NUM 0x80
+#define PF9453_BUCK2_VOLTAGE_NUM 0x80
+#define PF9453_BUCK3_VOLTAGE_NUM 0x80
+#define PF9453_BUCK4_VOLTAGE_NUM 0x80
+
+#define PF9453_LDO1_VOLTAGE_NUM 0x65
+#define PF9453_LDO2_VOLTAGE_NUM 0x3b
+#define PF9453_LDOSNVS_VOLTAGE_NUM 0x59
+
+enum {
+ PF9453_REG_DEV_ID = 0x00,
+ PF9453_REG_OTP_VER = 0x01,
+ PF9453_REG_INT1 = 0x02,
+ PF9453_REG_INT1_MASK = 0x03,
+ PF9453_REG_INT1_STATUS = 0x04,
+ PF9453_REG_VRFLT1_INT = 0x05,
+ PF9453_REG_VRFLT1_MASK = 0x06,
+ PF9453_REG_PWRON_STAT = 0x07,
+ PF9453_REG_RESET_CTRL = 0x08,
+ PF9453_REG_SW_RST = 0x09,
+ PF9453_REG_PWR_CTRL = 0x0a,
+ PF9453_REG_CONFIG1 = 0x0b,
+ PF9453_REG_CONFIG2 = 0x0c,
+ PF9453_REG_32K_CONFIG = 0x0d,
+ PF9453_REG_BUCK1CTRL = 0x10,
+ PF9453_REG_BUCK1OUT = 0x11,
+ PF9453_REG_BUCK2CTRL = 0x14,
+ PF9453_REG_BUCK2OUT = 0x15,
+ PF9453_REG_BUCK2OUT_STBY = 0x1d,
+ PF9453_REG_BUCK2OUT_MAX_LIMIT = 0x1f,
+ PF9453_REG_BUCK2OUT_MIN_LIMIT = 0x20,
+ PF9453_REG_BUCK3CTRL = 0x21,
+ PF9453_REG_BUCK3OUT = 0x22,
+ PF9453_REG_BUCK4CTRL = 0x2e,
+ PF9453_REG_BUCK4OUT = 0x2f,
+ PF9453_REG_LDO1OUT_L = 0x36,
+ PF9453_REG_LDO1CFG = 0x37,
+ PF9453_REG_LDO1OUT_H = 0x38,
+ PF9453_REG_LDOSNVS_CFG1 = 0x39,
+ PF9453_REG_LDOSNVS_CFG2 = 0x3a,
+ PF9453_REG_LDO2CFG = 0x3b,
+ PF9453_REG_LDO2OUT = 0x3c,
+ PF9453_REG_BUCK_POK = 0x3d,
+ PF9453_REG_LSW_CTRL1 = 0x40,
+ PF9453_REG_LSW_CTRL2 = 0x41,
+ PF9453_REG_LOCK = 0x4e,
+ PF9453_MAX_REG
+};
+
+#define PF9453_UNLOCK_KEY 0x5c
+#define PF9453_LOCK_KEY 0x0
+
+/* PF9453 BUCK ENMODE bits */
+#define BUCK_ENMODE_OFF 0x00
+#define BUCK_ENMODE_ONREQ 0x01
+#define BUCK_ENMODE_ONREQ_STBY 0x02
+#define BUCK_ENMODE_ONREQ_STBY_DPSTBY 0x03
+
+/* PF9453 BUCK ENMODE bits */
+#define LDO_ENMODE_OFF 0x00
+#define LDO_ENMODE_ONREQ 0x01
+#define LDO_ENMODE_ONREQ_STBY 0x02
+#define LDO_ENMODE_ONREQ_STBY_DPSTBY 0x03
+
+/* PF9453_REG_BUCK1_CTRL bits */
+#define BUCK1_LPMODE 0x30
+#define BUCK1_AD 0x08
+#define BUCK1_FPWM 0x04
+#define BUCK1_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK2_CTRL bits */
+#define BUCK2_RAMP_MASK GENMASK(7, 6)
+#define BUCK2_RAMP_25MV 0x0
+#define BUCK2_RAMP_12P5MV 0x1
+#define BUCK2_RAMP_6P25MV 0x2
+#define BUCK2_RAMP_3P125MV 0x3
+#define BUCK2_LPMODE 0x30
+#define BUCK2_AD 0x08
+#define BUCK2_FPWM 0x04
+#define BUCK2_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK3_CTRL bits */
+#define BUCK3_LPMODE 0x30
+#define BUCK3_AD 0x08
+#define BUCK3_FPWM 0x04
+#define BUCK3_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK4_CTRL bits */
+#define BUCK4_LPMODE 0x30
+#define BUCK4_AD 0x08
+#define BUCK4_FPWM 0x04
+#define BUCK4_ENMODE_MASK GENMASK(1, 0)
+
+/* PF9453_REG_BUCK123_PRESET_EN bit */
+#define BUCK123_PRESET_EN 0x80
+
+/* PF9453_BUCK1OUT bits */
+#define BUCK1OUT_MASK GENMASK(6, 0)
+
+/* PF9453_BUCK2OUT bits */
+#define BUCK2OUT_MASK GENMASK(6, 0)
+#define BUCK2OUT_STBY_MASK GENMASK(6, 0)
+
+/* PF9453_REG_BUCK3OUT bits */
+#define BUCK3OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_BUCK4OUT bits */
+#define BUCK4OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDO1_VOLT bits */
+#define LDO1_EN_MASK GENMASK(1, 0)
+#define LDO1OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDO2_VOLT bits */
+#define LDO2_EN_MASK GENMASK(1, 0)
+#define LDO2OUT_MASK GENMASK(6, 0)
+
+/* PF9453_REG_LDOSNVS_VOLT bits */
+#define LDOSNVS_EN_MASK GENMASK(0, 0)
+#define LDOSNVSCFG1_MASK GENMASK(6, 0)
+
+/* PF9453_REG_IRQ bits */
+#define IRQ_RSVD 0x80
+#define IRQ_RSTB 0x40
+#define IRQ_ONKEY 0x20
+#define IRQ_RESETKEY 0x10
+#define IRQ_VR_FLT1 0x08
+#define IRQ_LOWVSYS 0x04
+#define IRQ_THERM_100 0x02
+#define IRQ_THERM_80 0x01
+
+/* PF9453_REG_RESET_CTRL bits */
+#define WDOG_B_CFG_MASK GENMASK(7, 6)
+#define WDOG_B_CFG_NONE 0x00
+#define WDOG_B_CFG_WARM 0x40
+#define WDOG_B_CFG_COLD 0x80
+
+/* PF9453_REG_CONFIG2 bits */
+#define I2C_LT_MASK GENMASK(1, 0)
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
+static const struct regmap_range pf9453_status_range = {
+ .range_min = PF9453_REG_INT1,
+ .range_max = PF9453_REG_PWRON_STAT,
+};
+
+static const struct regmap_access_table pf9453_volatile_regs = {
+ .yes_ranges = &pf9453_status_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config pf9453_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &pf9453_volatile_regs,
+ .max_register = PF9453_MAX_REG - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+/*
+ * BUCK2
+ * BUCK2RAM[1:0] BUCK2 DVS ramp rate setting
+ * 00: 25mV/1usec
+ * 01: 25mV/2usec
+ * 10: 25mV/4usec
+ * 11: 25mV/8usec
+ */
+static const unsigned int pf9453_dvs_buck_ramp_table[] = {
+ 25000, 12500, 6250, 3125
+};
+
+static bool is_reg_protect(uint reg)
+{
+ switch (reg) {
+ case PF9453_REG_BUCK1OUT:
+ case PF9453_REG_BUCK2OUT:
+ case PF9453_REG_BUCK3OUT:
+ case PF9453_REG_BUCK4OUT:
+ case PF9453_REG_LDO1OUT_L:
+ case PF9453_REG_LDO1OUT_H:
+ case PF9453_REG_LDO2OUT:
+ case PF9453_REG_LDOSNVS_CFG1:
+ case PF9453_REG_BUCK2OUT_MAX_LIMIT:
+ case PF9453_REG_BUCK2OUT_MIN_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int pf9453_pmic_write(struct pf9453 *pf9453, unsigned int reg, u8 mask, unsigned int val)
+{
+ int ret = -EINVAL;
+ u8 data, key;
+ u32 rxBuf;
+
+ /* If not updating entire register, perform a read-mod-write */
+ data = val;
+ key = PF9453_UNLOCK_KEY;
+
+ if (mask != 0xffU) {
+ /* Read data */
+ ret = regmap_read(pf9453->regmap, reg, &rxBuf);
+ if (ret) {
+ dev_err(pf9453->dev, "Read reg=%0x error!\n", reg);
+ return ret;
+ }
+ data = (val & mask) | (rxBuf & (~mask));
+ }
+
+ if (reg < PF9453_MAX_REG) {
+ if (is_reg_protect(reg)) {
+ ret = regmap_raw_write(pf9453->regmap, PF9453_REG_LOCK, &key, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+
+ ret = regmap_raw_write(pf9453->regmap, reg, &data, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+
+ key = PF9453_LOCK_KEY;
+ ret = regmap_raw_write(pf9453->regmap, PF9453_REG_LOCK, &key, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+ } else {
+ ret = regmap_raw_write(pf9453->regmap, reg, &data, 1U);
+ if (ret) {
+ dev_err(pf9453->dev, "Write reg=%0x error!\n", reg);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * pf9453_regulator_enable_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their enable() operation, saving some code.
+ */
+static int pf9453_regulator_enable_regmap(struct regulator_dev *rdev)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->disable_val;
+ } else {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ }
+
+ return pf9453_pmic_write(pf9453, rdev->desc->enable_reg, rdev->desc->enable_mask, val);
+}
+
+/**
+ * pf9453_regulator_disable_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their disable() operation, saving some code.
+ */
+static int pf9453_regulator_disable_regmap(struct regulator_dev *rdev)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted) {
+ val = rdev->desc->enable_val;
+ if (!val)
+ val = rdev->desc->enable_mask;
+ } else {
+ val = rdev->desc->disable_val;
+ }
+
+ return pf9453_pmic_write(pf9453, rdev->desc->enable_reg, rdev->desc->enable_mask, val);
+}
+
+/**
+ * pf9453_regulator_set_voltage_sel_regmap for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their set_voltage_vsel operation, saving some code.
+ */
+static int pf9453_regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned int sel)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ int ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+ ret = pf9453_pmic_write(pf9453, rdev->desc->vsel_reg, rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = pf9453_pmic_write(pf9453, rdev->desc->apply_reg,
+ rdev->desc->apply_bit, rdev->desc->apply_bit);
+ return ret;
+}
+
+static int find_closest_bigger(unsigned int target, const unsigned int *table,
+ unsigned int num_sel, unsigned int *sel)
+{
+ unsigned int s, tmp, max, maxsel = 0;
+ bool found = false;
+
+ max = table[0];
+
+ for (s = 0; s < num_sel; s++) {
+ if (table[s] > max) {
+ max = table[s];
+ maxsel = s;
+ }
+ if (table[s] >= target) {
+ if (!found || table[s] - target < tmp - target) {
+ tmp = table[s];
+ *sel = s;
+ found = true;
+ if (tmp == target)
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ *sel = maxsel;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * pf9453_regulator_set_ramp_delay_regmap
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the ramp_reg
+ * and ramp_mask fields in their descriptor and then use this as their
+ * set_ramp_delay operation, saving some code.
+ */
+static int pf9453_regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct pf9453 *pf9453 = dev_get_drvdata(rdev->dev.parent);
+ unsigned int sel;
+ int ret;
+
+ if (WARN_ON(!rdev->desc->n_ramp_values || !rdev->desc->ramp_delay_table))
+ return -EINVAL;
+
+ ret = find_closest_bigger(ramp_delay, rdev->desc->ramp_delay_table,
+ rdev->desc->n_ramp_values, &sel);
+
+ if (ret) {
+ dev_warn(rdev_get_dev(rdev),
+ "Can't set ramp-delay %u, setting %u\n", ramp_delay,
+ rdev->desc->ramp_delay_table[sel]);
+ }
+
+ sel <<= ffs(rdev->desc->ramp_mask) - 1;
+
+ return pf9453_pmic_write(pf9453, rdev->desc->ramp_reg,
+ rdev->desc->ramp_mask, sel);
+}
+
+static const struct regulator_ops pf9453_dvs_buck_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pf9453_regulator_set_ramp_delay_regmap,
+};
+
+static const struct regulator_ops pf9453_buck_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static const struct regulator_ops pf9453_ldo_regulator_ops = {
+ .enable = pf9453_regulator_enable_regmap,
+ .disable = pf9453_regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = pf9453_regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+/*
+ * BUCK1/3/4
+ * 0.60 to 3.775V (25mV step)
+ */
+static const struct linear_range pf9453_buck134_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 25000),
+};
+
+/*
+ * BUCK2
+ * 0.60 to 2.1875V (12.5mV step)
+ */
+static const struct linear_range pf9453_buck2_volts[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500),
+};
+
+/*
+ * LDO1
+ * 0.8 to 3.3V (25mV step)
+ */
+static const struct linear_range pf9453_ldo1_volts[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x00, 0x64, 25000),
+};
+
+/*
+ * LDO2
+ * 0.5 to 1.95V (25mV step)
+ */
+static const struct linear_range pf9453_ldo2_volts[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x00, 0x3A, 25000),
+};
+
+/*
+ * LDOSNVS
+ * 1.2 to 3.4V (25mV step)
+ */
+static const struct linear_range pf9453_ldosnvs_volts[] = {
+ REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x58, 25000),
+};
+
+static int buck_set_dvs(const struct regulator_desc *desc,
+ struct device_node *np, struct pf9453 *pf9453,
+ char *prop, unsigned int reg, unsigned int mask)
+{
+ int ret, i;
+ u32 uv;
+
+ ret = of_property_read_u32(np, prop, &uv);
+ if (ret == -EINVAL)
+ return 0;
+ else if (ret)
+ return ret;
+
+ for (i = 0; i < desc->n_voltages; i++) {
+ ret = regulator_desc_list_voltage_linear_range(desc, i);
+ if (ret < 0)
+ continue;
+ if (ret == uv) {
+ i <<= ffs(desc->vsel_mask) - 1;
+ ret = pf9453_pmic_write(pf9453, reg, mask, i);
+ break;
+ }
+ }
+
+ if (ret == 0) {
+ struct pf9453_regulator_desc *regulator = container_of(desc,
+ struct pf9453_regulator_desc, desc);
+
+ /* Enable DVS control through PMIC_STBY_REQ for this BUCK */
+ ret = pf9453_pmic_write(pf9453, regulator->desc.enable_reg,
+ BUCK2_LPMODE, BUCK2_LPMODE);
+ }
+ return ret;
+}
+
+static int pf9453_set_dvs_levels(struct device_node *np, const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct pf9453_regulator_desc *data = container_of(desc, struct pf9453_regulator_desc, desc);
+ struct pf9453 *pf9453 = dev_get_drvdata(cfg->dev);
+ const struct pf9453_dvs_config *dvs = &data->dvs;
+ unsigned int reg, mask;
+ int i, ret = 0;
+ char *prop;
+
+ for (i = 0; i < PF9453_DVS_LEVEL_MAX; i++) {
+ switch (i) {
+ case PF9453_DVS_LEVEL_RUN:
+ prop = "nxp,dvs-run-voltage";
+ reg = dvs->run_reg;
+ mask = dvs->run_mask;
+ break;
+ case PF9453_DVS_LEVEL_DPSTANDBY:
+ case PF9453_DVS_LEVEL_STANDBY:
+ prop = "nxp,dvs-standby-voltage";
+ reg = dvs->standby_reg;
+ mask = dvs->standby_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = buck_set_dvs(desc, np, pf9453, prop, reg, mask);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct pf9453_regulator_desc pf9453_regulators[] = {
+ {
+ .desc = {
+ .name = "buck1",
+ .of_match = of_match_ptr("BUCK1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK1,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK1_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK1OUT,
+ .vsel_mask = BUCK1OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK1CTRL,
+ .enable_mask = BUCK1_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck2",
+ .of_match = of_match_ptr("BUCK2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK2,
+ .ops = &pf9453_dvs_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK2_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck2_volts),
+ .vsel_reg = PF9453_REG_BUCK2OUT,
+ .vsel_mask = BUCK2OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK2CTRL,
+ .enable_mask = BUCK2_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .ramp_reg = PF9453_REG_BUCK2CTRL,
+ .ramp_mask = BUCK2_RAMP_MASK,
+ .ramp_delay_table = pf9453_dvs_buck_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf9453_dvs_buck_ramp_table),
+ .owner = THIS_MODULE,
+ .of_parse_cb = pf9453_set_dvs_levels,
+ },
+ .dvs = {
+ .run_reg = PF9453_REG_BUCK2OUT,
+ .run_mask = BUCK2OUT_MASK,
+ .standby_reg = PF9453_REG_BUCK2OUT_STBY,
+ .standby_mask = BUCK2OUT_STBY_MASK,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck3",
+ .of_match = of_match_ptr("BUCK3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK3,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK3_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK3OUT,
+ .vsel_mask = BUCK3OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK3CTRL,
+ .enable_mask = BUCK3_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "buck4",
+ .of_match = of_match_ptr("BUCK4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_BUCK4,
+ .ops = &pf9453_buck_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_BUCK4_VOLTAGE_NUM,
+ .linear_ranges = pf9453_buck134_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_buck134_volts),
+ .vsel_reg = PF9453_REG_BUCK4OUT,
+ .vsel_mask = BUCK4OUT_MASK,
+ .enable_reg = PF9453_REG_BUCK4CTRL,
+ .enable_mask = BUCK4_ENMODE_MASK,
+ .enable_val = BUCK_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("LDO1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDO1,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDO1_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldo1_volts),
+ .vsel_reg = PF9453_REG_LDO1OUT_H,
+ .vsel_mask = LDO1OUT_MASK,
+ .enable_reg = PF9453_REG_LDO1CFG,
+ .enable_mask = LDO1_EN_MASK,
+ .enable_val = LDO_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("LDO2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDO2,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDO2_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldo2_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldo2_volts),
+ .vsel_reg = PF9453_REG_LDO2OUT,
+ .vsel_mask = LDO2OUT_MASK,
+ .enable_reg = PF9453_REG_LDO2CFG,
+ .enable_mask = LDO2_EN_MASK,
+ .enable_val = LDO_ENMODE_ONREQ,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldosnvs",
+ .of_match = of_match_ptr("LDO-SNVS"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF9453_LDOSNVS,
+ .ops = &pf9453_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF9453_LDOSNVS_VOLTAGE_NUM,
+ .linear_ranges = pf9453_ldosnvs_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf9453_ldosnvs_volts),
+ .vsel_reg = PF9453_REG_LDOSNVS_CFG1,
+ .vsel_mask = LDOSNVSCFG1_MASK,
+ .enable_reg = PF9453_REG_LDOSNVS_CFG2,
+ .enable_mask = LDOSNVS_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ { }
+};
+
+static irqreturn_t pf9453_irq_handler(int irq, void *data)
+{
+ struct pf9453 *pf9453 = data;
+ struct regmap *regmap = pf9453->regmap;
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(regmap, PF9453_REG_INT1, &status);
+ if (ret < 0) {
+ dev_err(pf9453->dev, "Failed to read INT1(%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (status & IRQ_RSTB)
+ dev_warn(pf9453->dev, "IRQ_RSTB interrupt.\n");
+
+ if (status & IRQ_ONKEY)
+ dev_warn(pf9453->dev, "IRQ_ONKEY interrupt.\n");
+
+ if (status & IRQ_VR_FLT1)
+ dev_warn(pf9453->dev, "VRFLT1 interrupt.\n");
+
+ if (status & IRQ_RESETKEY)
+ dev_warn(pf9453->dev, "IRQ_RESETKEY interrupt.\n");
+
+ if (status & IRQ_LOWVSYS)
+ dev_warn(pf9453->dev, "LOWVSYS interrupt.\n");
+
+ if (status & IRQ_THERM_100)
+ dev_warn(pf9453->dev, "IRQ_THERM_100 interrupt.\n");
+
+ if (status & IRQ_THERM_80)
+ dev_warn(pf9453->dev, "IRQ_THERM_80 interrupt.\n");
+
+ return IRQ_HANDLED;
+}
+
+static int pf9453_i2c_probe(struct i2c_client *i2c)
+{
+ const struct pf9453_regulator_desc *regulator_desc = of_device_get_match_data(&i2c->dev);
+ struct regulator_config config = { };
+ unsigned int reset_ctrl;
+ unsigned int device_id;
+ struct pf9453 *pf9453;
+ int ret;
+
+ if (!i2c->irq)
+ return dev_err_probe(&i2c->dev, -EINVAL, "No IRQ configured?\n");
+
+ pf9453 = devm_kzalloc(&i2c->dev, sizeof(struct pf9453), GFP_KERNEL);
+ if (!pf9453)
+ return -ENOMEM;
+
+ pf9453->regmap = devm_regmap_init_i2c(i2c, &pf9453_regmap_config);
+ if (IS_ERR(pf9453->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pf9453->regmap),
+ "regmap initialization failed\n");
+
+ pf9453->irq = i2c->irq;
+ pf9453->dev = &i2c->dev;
+
+ dev_set_drvdata(&i2c->dev, pf9453);
+
+ ret = regmap_read(pf9453->regmap, PF9453_REG_DEV_ID, &device_id);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device id error\n");
+
+ /* Check your board and dts for match the right pmic */
+ if ((device_id >> 4) != 0xb)
+ return dev_err_probe(&i2c->dev, -EINVAL, "Device id(%x) mismatched\n",
+ device_id >> 4);
+
+ while (regulator_desc->desc.name) {
+ const struct regulator_desc *desc;
+ struct regulator_dev *rdev;
+
+ desc = &regulator_desc->desc;
+
+ config.regmap = pf9453->regmap;
+ config.dev = pf9453->dev;
+
+ rdev = devm_regulator_register(pf9453->dev, desc, &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(pf9453->dev, PTR_ERR(rdev),
+ "Failed to register regulator(%s)\n", desc->name);
+
+ regulator_desc++;
+ }
+
+ ret = devm_request_threaded_irq(pf9453->dev, pf9453->irq, NULL, pf9453_irq_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ "pf9453-irq", pf9453);
+ if (ret)
+ return dev_err_probe(pf9453->dev, ret, "Failed to request IRQ: %d\n", pf9453->irq);
+
+ /* Unmask all interrupt except PWRON/WDOG/RSVD */
+ ret = pf9453_pmic_write(pf9453, PF9453_REG_INT1_MASK,
+ IRQ_ONKEY | IRQ_RESETKEY | IRQ_RSTB | IRQ_VR_FLT1
+ | IRQ_LOWVSYS | IRQ_THERM_100 | IRQ_THERM_80, IRQ_RSVD);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ if (of_property_read_bool(i2c->dev.of_node, "nxp,wdog_b-warm-reset"))
+ reset_ctrl = WDOG_B_CFG_WARM;
+ else
+ reset_ctrl = WDOG_B_CFG_COLD;
+
+ /* Set reset behavior on assertion of WDOG_B signal */
+ ret = pf9453_pmic_write(pf9453, PF9453_REG_RESET_CTRL, WDOG_B_CFG_MASK, reset_ctrl);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Failed to set WDOG_B reset behavior\n");
+
+ /*
+ * The driver uses the LDO1OUT_H register to control the LDO1 regulator.
+ * This is only valid if the SD_VSEL input of the PMIC is high. Let's
+ * check if the pin is available as GPIO and set it to high.
+ */
+ pf9453->sd_vsel_gpio = gpiod_get_optional(pf9453->dev, "sd-vsel", GPIOD_OUT_HIGH);
+
+ if (IS_ERR(pf9453->sd_vsel_gpio))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pf9453->sd_vsel_gpio),
+ "Failed to get SD_VSEL GPIO\n");
+
+ return 0;
+}
+
+static const struct of_device_id pf9453_of_match[] = {
+ {
+ .compatible = "nxp,pf9453",
+ .data = pf9453_regulators,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pf9453_of_match);
+
+static struct i2c_driver pf9453_i2c_driver = {
+ .driver = {
+ .name = "nxp-pf9453",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = pf9453_of_match,
+ },
+ .probe = pf9453_i2c_probe,
+};
+
+module_i2c_driver(pf9453_i2c_driver);
+
+MODULE_AUTHOR("Joy Zou <joy.zou@nxp.com>");
+MODULE_DESCRIPTION("NXP PF9453 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
index 5925fa7a9a06..9cde7181b0f0 100644
--- a/drivers/regulator/rtq2208-regulator.c
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -27,6 +27,11 @@
#define RTQ2208_REG_LDO1_CFG 0xB1
#define RTQ2208_REG_LDO2_CFG 0xC1
#define RTQ2208_REG_LDO_DVS_CTRL 0xD0
+#define RTQ2208_REG_HIDDEN_BUCKPH 0x55
+#define RTQ2208_REG_HIDDEN_LDOCFG0 0x8F
+#define RTQ2208_REG_HIDDEN_LDOCFG1 0x96
+#define RTQ2208_REG_HIDDEN0 0xFE
+#define RTQ2208_REG_HIDDEN1 0xFF
/* Mask */
#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
@@ -45,6 +50,11 @@
#define RTQ2208_LDO1_VOSEL_SD_MASK BIT(5)
#define RTQ2208_LDO2_DISCHG_EN_MASK BIT(6)
#define RTQ2208_LDO2_VOSEL_SD_MASK BIT(7)
+#define RTQ2208_MASK_BUCKPH_GROUP1 GENMASK(6, 4)
+#define RTQ2208_MASK_BUCKPH_GROUP2 GENMASK(2, 0)
+#define RTQ2208_MASK_LDO2_OPT0 BIT(7)
+#define RTQ2208_MASK_LDO2_OPT1 BIT(6)
+#define RTQ2208_MASK_LDO1_FIXED BIT(6)
/* Size */
#define RTQ2208_VOUT_MAXNUM 256
@@ -245,11 +255,6 @@ static const unsigned int rtq2208_ldo_volt_table[] = {
3300000,
};
-static struct of_regulator_match rtq2208_ldo_match[] = {
- {.name = "ldo2", },
- {.name = "ldo1", },
-};
-
static unsigned int rtq2208_of_map_mode(unsigned int mode)
{
switch (mode) {
@@ -344,59 +349,6 @@ static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
return IRQ_HANDLED;
}
-static int rtq2208_of_get_ldo_dvs_ability(struct device *dev)
-{
- struct device_node *np;
- struct of_regulator_match *match;
- struct regulator_desc *desc;
- struct regulator_init_data *init_data;
- u32 fixed_uV;
- int ret, i;
-
- if (!dev->of_node)
- return -ENODEV;
-
- np = of_get_child_by_name(dev->of_node, "regulators");
- if (!np)
- np = dev->of_node;
-
- ret = of_regulator_match(dev, np, rtq2208_ldo_match, ARRAY_SIZE(rtq2208_ldo_match));
-
- of_node_put(np);
-
- if (ret < 0)
- return ret;
-
- for (i = 0; i < ARRAY_SIZE(rtq2208_ldo_match); i++) {
- match = rtq2208_ldo_match + i;
- init_data = match->init_data;
- desc = (struct regulator_desc *)match->desc;
-
- if (!init_data || !desc)
- continue;
-
- /* specify working fixed voltage if the propery exists */
- ret = of_property_read_u32(match->of_node, "richtek,fixed-microvolt", &fixed_uV);
-
- if (!ret) {
- if (fixed_uV != init_data->constraints.min_uV ||
- fixed_uV != init_data->constraints.max_uV)
- return -EINVAL;
- desc->n_voltages = 1;
- desc->fixed_uV = fixed_uV;
- desc->fixed_uV = init_data->constraints.min_uV;
- desc->ops = &rtq2208_regulator_ldo_fix_ops;
- } else {
- desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
- desc->volt_table = rtq2208_ldo_volt_table;
- desc->ops = &rtq2208_regulator_ldo_adj_ops;
- }
- }
-
- return 0;
-}
-
-
#define BUCK_INFO(_name, _id) \
{ \
.name = _name, \
@@ -424,9 +376,11 @@ static const struct linear_range rtq2208_vout_range[] = {
REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
};
-static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx)
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel, int idx,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
struct regulator_desc *desc;
+ unsigned int fixed_uV;
static const struct {
char *name;
int base;
@@ -462,7 +416,8 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
- if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ switch (idx) {
+ case RTQ2208_BUCK_B ... RTQ2208_BUCK_E:
/* init buck desc */
desc->ops = &rtq2208_regulator_buck_ops;
desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
@@ -480,7 +435,19 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
- } else {
+ break;
+ default:
+ fixed_uV = idx == RTQ2208_LDO2 ? ldo2_fixed : ldo1_fixed;
+ if (fixed_uV) {
+ desc->n_voltages = 1;
+ desc->fixed_uV = fixed_uV;
+ desc->ops = &rtq2208_regulator_ldo_fix_ops;
+ } else {
+ desc->n_voltages = ARRAY_SIZE(rtq2208_ldo_volt_table);
+ desc->volt_table = rtq2208_ldo_volt_table;
+ desc->ops = &rtq2208_regulator_ldo_adj_ops;
+ }
+
/* init ldo desc */
desc->active_discharge_reg = RTQ2208_REG_LDO_DVS_CTRL;
desc->active_discharge_on = curr_info->dis_on;
@@ -490,13 +457,15 @@ static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, in
rdesc->suspend_config_reg = curr_info->base;
rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ break;
}
}
static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
- struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev,
+ unsigned int ldo1_fixed, unsigned int ldo2_fixed)
{
- int mtp_sel, i, idx, ret;
+ int mtp_sel, i, idx;
/* get mtp_sel0 or mtp_sel1 */
mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
@@ -508,43 +477,101 @@ static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *
if (!rdesc[i])
return -ENOMEM;
- rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx);
-
- /* init ldo dvs ability */
- if (idx >= RTQ2208_LDO2)
- rtq2208_ldo_match[idx - RTQ2208_LDO2].desc = &rdesc[i]->desc;
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, ldo1_fixed, ldo2_fixed);
}
- /* init ldo fixed_uV */
- ret = rtq2208_of_get_ldo_dvs_ability(dev);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
-
return 0;
}
-/** different slave address corresponds different used bucks
- * slave address 0x10: BUCK[BCA FGE]
- * slave address 0x20: BUCK[BC FGHE]
- * slave address 0x40: BUCK[C G]
- */
-static int rtq2208_regulator_check(int slave_addr, int *num,
- int *regulator_idx_table, unsigned int *buck_masks)
+static int rtq2208_regulator_check(struct device *dev, int *num, int *regulator_idx_table,
+ unsigned int *buck_masks, unsigned int *ldo1_fixed_uV,
+ unsigned int *ldo2_fixed_uV)
{
- static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
- /* BUCK[BCA FGE], LDO[12] */
- {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
- /* BUCK[BC FGHE], LDO[12]*/
- {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
- /* BUCK[C G], LDO[12] */
- {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
- };
- int i, idx = ffs(slave_addr >> 4) - 1;
+ struct regmap *regmap = dev_get_regmap(dev, NULL);
+ bool rtq2208_used_table[RTQ2208_LDO_MAX] = {0};
+ u8 entry_key[] = { 0x69, 0x01 };
+ unsigned int buck_phase, ldo_cfg0, ldo_cfg1;
+ int i, ret;
u8 mask;
+ ret = regmap_raw_write(regmap, RTQ2208_REG_HIDDEN0, entry_key, ARRAY_SIZE(entry_key));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enter hidden page\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_BUCKPH, &buck_phase);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read buck phase configuration\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG0, &ldo_cfg0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg0\n");
+
+ ret = regmap_read(regmap, RTQ2208_REG_HIDDEN_LDOCFG1, &ldo_cfg1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ldo cfg1\n");
+
+ ret = regmap_write(regmap, RTQ2208_REG_HIDDEN1, 0x00);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to exit hidden page\n");
+
+ dev_info(dev, "BUCK Phase 0x%x\n", buck_phase);
+ /*
+ * Use buck phase configuration to assign used table mask
+ * GROUP1 GROUP2
+ * 0 -> 2P + 2P BC FG
+ * 1 -> 2P + 1P + 1P BCA FGE
+ * 2 -> 1P + 1P + 1P + 1P BCDA FGHE
+ * 3 -> 3P + 1P BC FG
+ * others -> 4P C G
+ */
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP1, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_D] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_A] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_B] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_C] = true;
+ break;
+ }
+
+ switch (FIELD_GET(RTQ2208_MASK_BUCKPH_GROUP2, buck_phase)) {
+ case 2:
+ rtq2208_used_table[RTQ2208_BUCK_F] = true;
+ fallthrough;
+ case 1:
+ rtq2208_used_table[RTQ2208_BUCK_E] = true;
+ fallthrough;
+ case 0:
+ case 3:
+ rtq2208_used_table[RTQ2208_BUCK_H] = true;
+ fallthrough;
+ default:
+ rtq2208_used_table[RTQ2208_BUCK_G] = true;
+ break;
+ }
+
+ *ldo1_fixed_uV = FIELD_GET(RTQ2208_MASK_LDO1_FIXED, ldo_cfg1) ? 1200000 : 0;
+
+ if (!FIELD_GET(RTQ2208_MASK_LDO2_OPT0, ldo_cfg0) &&
+ !FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 0;
+ else if (FIELD_GET(RTQ2208_MASK_LDO2_OPT1, ldo_cfg1))
+ *ldo2_fixed_uV = 900000;
+ else
+ *ldo2_fixed_uV = 1200000;
+
+ /* By default, LDO1 & LDO2 are always used */
+ rtq2208_used_table[RTQ2208_LDO1] = rtq2208_used_table[RTQ2208_LDO2] = true;
+
for (i = 0; i < RTQ2208_LDO_MAX; i++) {
- if (!rtq2208_used_table[idx][i])
+ if (!rtq2208_used_table[i])
continue;
regulator_idx_table[(*num)++] = i;
@@ -559,7 +586,7 @@ static int rtq2208_regulator_check(int slave_addr, int *num,
static const struct regmap_config rtq2208_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0xEF,
+ .max_register = 0xFF,
};
static int rtq2208_probe(struct i2c_client *i2c)
@@ -573,6 +600,7 @@ static int rtq2208_probe(struct i2c_client *i2c)
int i, ret = 0, idx, n_regulator = 0;
unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+ unsigned int ldo1_fixed_uV, ldo2_fixed_uV;
rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
if (!rdev_map)
@@ -583,7 +611,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
/* get needed regulator */
- ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ ret = rtq2208_regulator_check(dev, &n_regulator, regulator_idx_table, buck_masks,
+ &ldo1_fixed_uV, &ldo2_fixed_uV);
if (ret)
return dev_err_probe(dev, ret, "Failed to check used regulators\n");
@@ -593,7 +622,8 @@ static int rtq2208_probe(struct i2c_client *i2c)
cfg.dev = dev;
/* init regulator desc */
- ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev,
+ ldo1_fixed_uV, ldo2_fixed_uV);
if (ret)
return ret;
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
index d35d844eff3b..618904ede72c 100644
--- a/drivers/regulator/rtq6752-regulator.c
+++ b/drivers/regulator/rtq6752-regulator.c
@@ -105,7 +105,7 @@ static int rtq6752_get_error_flags(struct regulator_dev *rdev,
unsigned int *flags)
{
unsigned int val, events = 0;
- const unsigned int fault_mask[] = {
+ static const unsigned int fault_mask[] = {
RTQ6752_PAVDDF_MASK, RTQ6752_NAVDDF_MASK };
int rid = rdev_get_id(rdev), ret;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 5b3abb6db248..99f6f9784e68 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -96,6 +96,13 @@ config RESET_HSDK
help
This enables the reset controller driver for HSDK board.
+config RESET_IMX_SCU
+ tristate "i.MX8Q Reset Driver"
+ depends on IMX_SCU && HAVE_ARM_SMCCC
+ depends on (ARM64 && ARCH_MXC) || COMPILE_TEST
+ help
+ This enables the reset controller driver for i.MX8QM/i.MX8QXP
+
config RESET_IMX7
tristate "i.MX7/8 Reset Driver"
depends on HAS_IOMEM
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 677c4d1e2632..31f9904d13f9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
obj-$(CONFIG_RESET_EYEQ) += reset-eyeq.o
obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
+obj-$(CONFIG_RESET_IMX_SCU) += reset-imx-scu.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_IMX8MP_AUDIOMIX) += reset-imx8mp-audiomix.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
diff --git a/drivers/reset/reset-imx-scu.c b/drivers/reset/reset-imx-scu.c
new file mode 100644
index 000000000000..919fc29f944c
--- /dev/null
+++ b/drivers/reset/reset-imx-scu.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ * Frank Li <Frank.Li@nxp.com>
+ */
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+struct imx_scu_reset {
+ struct reset_controller_dev rc;
+ struct imx_sc_ipc *ipc_handle;
+};
+
+static struct imx_scu_reset *to_imx_scu(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct imx_scu_reset, rc);
+}
+
+struct imx_scu_id_map {
+ u32 resource_id;
+ u32 command_id;
+};
+
+static const struct imx_scu_id_map imx_scu_id_map[] = {
+ { IMX_SC_R_CSI_0, IMX_SC_C_MIPI_RESET },
+ { IMX_SC_R_CSI_1, IMX_SC_C_MIPI_RESET },
+};
+
+static int imx_scu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct imx_scu_reset *priv = to_imx_scu(rc);
+
+ return imx_sc_misc_set_control(priv->ipc_handle, imx_scu_id_map[id].resource_id,
+ imx_scu_id_map[id].command_id, true);
+}
+
+static const struct reset_control_ops imx_scu_reset_ops = {
+ .assert = imx_scu_reset_assert,
+};
+
+static int imx_scu_xlate(struct reset_controller_dev *rc, const struct of_phandle_args *reset_spec)
+{
+ int i;
+
+ for (i = 0; i < rc->nr_resets; i++)
+ if (reset_spec->args[0] == imx_scu_id_map[i].resource_id)
+ return i;
+
+ return -EINVAL;
+}
+
+static int imx_scu_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_scu_reset *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->rc);
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return dev_err_probe(dev, ret, "sc_misc_MIPI get ipc handle failed!\n");
+
+ priv->rc.ops = &imx_scu_reset_ops;
+ priv->rc.owner = THIS_MODULE;
+ priv->rc.of_node = dev->of_node;
+ priv->rc.of_reset_n_cells = 1;
+ priv->rc.of_xlate = imx_scu_xlate;
+ priv->rc.nr_resets = ARRAY_SIZE(imx_scu_id_map);
+
+ return devm_reset_controller_register(dev, &priv->rc);
+}
+
+static const struct of_device_id imx_scu_reset_ids[] = {
+ { .compatible = "fsl,imx-scu-reset", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_scu_reset_ids);
+
+static struct platform_driver imx_scu_reset_driver = {
+ .probe = imx_scu_reset_probe,
+ .driver = {
+ .name = "scu-reset",
+ .of_match_table = imx_scu_reset_ids,
+ },
+};
+module_platform_driver(imx_scu_reset_driver);
+
+MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
+MODULE_DESCRIPTION("i.MX scu reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index aa5464be7053..6d3e75b33260 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -8,6 +8,7 @@
*/
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -72,14 +73,22 @@ static struct regmap *mchp_lan966x_syscon_to_regmap(struct device *dev,
struct device_node *syscon_np)
{
struct regmap_config regmap_config = mchp_lan966x_syscon_regmap_config;
- resource_size_t size;
+ struct resource res;
void __iomem *base;
+ int err;
+
+ err = of_address_to_resource(syscon_np, 0, &res);
+ if (err)
+ return ERR_PTR(err);
- base = devm_of_iomap(dev, syscon_np, 0, &size);
- if (IS_ERR(base))
- return ERR_CAST(base);
+ /* It is not possible to use devm_of_iomap because this resource is
+ * shared with other drivers.
+ */
+ base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (!base)
+ return ERR_PTR(-ENOMEM);
- regmap_config.max_register = size - 4;
+ regmap_config.max_register = resource_size(&res) - 4;
return devm_regmap_init_mmio(dev, base, &regmap_config);
}
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e31fa0ad127e..b88cd4fb295b 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -240,8 +240,7 @@ static struct rtc_device *rtc_allocate_device(void)
/* Init uie timer */
rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, rtc);
/* Init pie timer */
- hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rtc->pie_timer.function = rtc_pie_update_irq;
+ hrtimer_setup(&rtc->pie_timer, rtc_pie_update_irq, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rtc->pie_enabled = 0;
set_bit(RTC_FEATURE_ALARM, rtc->features);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 26e1ea1940ec..62feb2c639d5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -2326,8 +2326,7 @@ static inline int __init ap_async_init(void)
*/
if (MACHINE_IS_VM)
poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
queue_work(system_long_wq, &ap_scan_bus_work);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 37c24ffea65c..5a3c670aec27 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -303,9 +303,9 @@ if SCSI_LOWLEVEL && SCSI
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
+ select CRC32
select CRYPTO
select CRYPTO_MD5
- select CRYPTO_CRC32C
select SCSI_ISCSI_ATTRS
help
The iSCSI Driver provides a host with the ability to access storage
@@ -336,7 +336,6 @@ source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
-source "drivers/scsi/cxlflash/Kconfig"
config SGIWD93_SCSI
tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1313ddf2fd1a..16de3e41f94c 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
-obj-$(CONFIG_CXLFLASH) += cxlflash/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index abf6a82b74af..0be719f38377 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3221,8 +3221,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
break;
}
fallthrough;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 91170a67cc91..4b12e6dd8f07 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -2029,7 +2029,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
}
-static struct pci_error_handlers aac_pci_err_handler = {
+static const struct pci_error_handlers aac_pci_err_handler = {
.error_detected = aac_pci_error_detected,
.mmio_enabled = aac_pci_mmio_enabled,
.slot_reset = aac_pci_slot_reset,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index e50a3dbf9de3..ef21b85cf014 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -591,7 +591,7 @@ datadir_t acornscsi_datadirection(int command)
case CHANGE_DEFINITION: case COMPARE: case COPY:
case COPY_VERIFY: case LOG_SELECT: case MODE_SELECT:
case MODE_SELECT_10: case SEND_DIAGNOSTIC: case WRITE_BUFFER:
- case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE_6:
case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
case WRITE_6: case WRITE_10: case WRITE_VERIFY:
case UPDATE_BLOCK: case WRITE_LONG: case WRITE_SAME:
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 76a1e373386e..a8b399ed98fc 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -5776,7 +5776,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
-static struct pci_error_handlers beiscsi_eeh_handlers = {
+static const struct pci_error_handlers beiscsi_eeh_handlers = {
.error_detected = beiscsi_eeh_err_detected,
.slot_reset = beiscsi_eeh_reset,
.resume = beiscsi_eeh_resume,
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 6aa1d3a7e24b..f015c53de0d4 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1642,7 +1642,7 @@ MODULE_DEVICE_TABLE(pci, bfad_id_table);
/*
* PCI error recovery handlers.
*/
-static struct pci_error_handlers bfad_err_handler = {
+static const struct pci_error_handlers bfad_err_handler = {
.error_detected = bfad_pci_error_detected,
.slot_reset = bfad_pci_slot_reset,
.mmio_enabled = bfad_pci_mmio_enabled,
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 9a3f2ed050bd..79c8dafdd49e 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1162,7 +1162,7 @@ err_resume_exit:
dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
}
-static struct pci_error_handlers csio_err_handler = {
+static const struct pci_error_handlers csio_err_handler = {
.error_detected = csio_pci_error_detected,
.slot_reset = csio_pci_slot_reset,
.resume = csio_pci_resume,
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
deleted file mode 100644
index c424d36e89a6..000000000000
--- a/drivers/scsi/cxlflash/Kconfig
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# IBM CXL-attached Flash Accelerator SCSI Driver
-#
-
-config CXLFLASH
- tristate "Support for IBM CAPI Flash (DEPRECATED)"
- depends on PCI && SCSI && (CXL || OCXL) && EEH
- select IRQ_POLL
- help
- The cxlflash driver is deprecated and will be removed in a future
- kernel release.
-
- Allows CAPI Accelerated IO to Flash
- If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
deleted file mode 100644
index fd2f0dd9daf9..000000000000
--- a/drivers/scsi/cxlflash/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
-cxlflash-$(CONFIG_CXL) += cxl_hw.o
-cxlflash-$(CONFIG_OCXL) += ocxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
deleted file mode 100644
index 181e0445ed42..000000000000
--- a/drivers/scsi/cxlflash/backend.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#ifndef _CXLFLASH_BACKEND_H
-#define _CXLFLASH_BACKEND_H
-
-extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
-extern const struct cxlflash_backend_ops cxlflash_ocxl_ops;
-
-struct cxlflash_backend_ops {
- struct module *module;
- void __iomem * (*psa_map)(void *ctx_cookie);
- void (*psa_unmap)(void __iomem *addr);
- int (*process_element)(void *ctx_cookie);
- int (*map_afu_irq)(void *ctx_cookie, int num, irq_handler_t handler,
- void *cookie, char *name);
- void (*unmap_afu_irq)(void *ctx_cookie, int num, void *cookie);
- u64 (*get_irq_objhndl)(void *ctx_cookie, int irq);
- int (*start_context)(void *ctx_cookie);
- int (*stop_context)(void *ctx_cookie);
- int (*afu_reset)(void *ctx_cookie);
- void (*set_master)(void *ctx_cookie);
- void * (*get_context)(struct pci_dev *dev, void *afu_cookie);
- void * (*dev_context_init)(struct pci_dev *dev, void *afu_cookie);
- int (*release_context)(void *ctx_cookie);
- void (*perst_reloads_same_image)(void *afu_cookie, bool image);
- ssize_t (*read_adapter_vpd)(struct pci_dev *dev, void *buf,
- size_t count);
- int (*allocate_afu_irqs)(void *ctx_cookie, int num);
- void (*free_afu_irqs)(void *ctx_cookie);
- void * (*create_afu)(struct pci_dev *dev);
- void (*destroy_afu)(void *afu_cookie);
- struct file * (*get_fd)(void *ctx_cookie, struct file_operations *fops,
- int *fd);
- void * (*fops_get_context)(struct file *file);
- int (*start_work)(void *ctx_cookie, u64 irqs);
- int (*fd_mmap)(struct file *file, struct vm_area_struct *vm);
- int (*fd_release)(struct inode *inode, struct file *file);
-};
-
-#endif /* _CXLFLASH_BACKEND_H */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
deleted file mode 100644
index de6229e27b48..000000000000
--- a/drivers/scsi/cxlflash/common.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_COMMON_H
-#define _CXLFLASH_COMMON_H
-
-#include <linux/async.h>
-#include <linux/cdev.h>
-#include <linux/irq_poll.h>
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-extern const struct file_operations cxlflash_cxl_fops;
-
-#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
-#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */
-#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */
-
-#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK))
-#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1))
-
-#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */
-#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */
-#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */
-
-#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
-#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
-#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
- * max_sectors
- * in units of
- * 512 byte
- * sectors
- */
-
-#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
-
-/* AFU command retry limit */
-#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */
-
-/* Command management definitions */
-#define CXLFLASH_MAX_CMDS 256
-#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
-
-/* RRQ for master issued cmds */
-#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* SQ for master issued cmds */
-#define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS
-
-/* Hardware queue definitions */
-#define CXLFLASH_DEF_HWQS 1
-#define CXLFLASH_MAX_HWQS 8
-#define PRIMARY_HWQ 0
-
-
-static inline void check_sizes(void)
-{
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK);
- BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS);
-}
-
-/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
-#define CMD_BUFSIZE SIZE_4K
-
-enum cxlflash_lr_state {
- LINK_RESET_INVALID,
- LINK_RESET_REQUIRED,
- LINK_RESET_COMPLETE
-};
-
-enum cxlflash_init_state {
- INIT_STATE_NONE,
- INIT_STATE_PCI,
- INIT_STATE_AFU,
- INIT_STATE_SCSI,
- INIT_STATE_CDEV
-};
-
-enum cxlflash_state {
- STATE_PROBING, /* Initial state during probe */
- STATE_PROBED, /* Temporary state, probe completed but EEH occurred */
- STATE_NORMAL, /* Normal running state, everything good */
- STATE_RESET, /* Reset state, trying to reset/recover */
- STATE_FAILTERM /* Failed/terminating state, error out users/threads */
-};
-
-enum cxlflash_hwq_mode {
- HWQ_MODE_RR, /* Roundrobin (default) */
- HWQ_MODE_TAG, /* Distribute based on block MQ tag */
- HWQ_MODE_CPU, /* CPU affinity */
- MAX_HWQ_MODE
-};
-
-/*
- * Each context has its own set of resource handles that is visible
- * only from that context.
- */
-
-struct cxlflash_cfg {
- struct afu *afu;
-
- const struct cxlflash_backend_ops *ops;
- struct pci_dev *dev;
- struct pci_device_id *dev_id;
- struct Scsi_Host *host;
- int num_fc_ports;
- struct cdev cdev;
- struct device *chardev;
-
- ulong cxlflash_regs_pci;
-
- struct work_struct work_q;
- enum cxlflash_init_state init_state;
- enum cxlflash_lr_state lr_state;
- int lr_port;
- atomic_t scan_host_needed;
-
- void *afu_cookie;
-
- atomic_t recovery_threads;
- struct mutex ctx_recovery_mutex;
- struct mutex ctx_tbl_list_mutex;
- struct rw_semaphore ioctl_rwsem;
- struct ctx_info *ctx_tbl[MAX_CONTEXT];
- struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
- struct file_operations cxl_fops;
-
- /* Parameters that are LUN table related */
- int last_lun_index[MAX_FC_PORTS];
- int promote_lun_index;
- struct list_head lluns; /* list of llun_info structs */
-
- wait_queue_head_t tmf_waitq;
- spinlock_t tmf_slock;
- bool tmf_active;
- bool ws_unmap; /* Write-same unmap supported */
- wait_queue_head_t reset_waitq;
- enum cxlflash_state state;
- async_cookie_t async_reset_cookie;
-};
-
-struct afu_cmd {
- struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
- struct sisl_ioasa sa; /* IOASA must follow IOARCB */
- struct afu *parent;
- struct scsi_cmnd *scp;
- struct completion cevent;
- struct list_head queue;
- u32 hwq_index;
-
- u8 cmd_tmf:1,
- cmd_aborted:1;
-
- struct list_head list; /* Pending commands link */
-
- /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
- * However for performance reasons the IOARCB/IOASA should be
- * cache line aligned.
- */
-} __aligned(cache_line_size());
-
-static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
-{
- return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
-}
-
-static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- INIT_LIST_HEAD(&afuc->queue);
- return afuc;
-}
-
-static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
-{
- struct afu_cmd *afuc = sc_to_afuc(sc);
-
- memset(afuc, 0, sizeof(*afuc));
- return sc_to_afuci(sc);
-}
-
-struct hwq {
- /* Stuff requiring alignment go first. */
- struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
-
- /* Beware of alignment till here. Preferably introduce new
- * fields after this point
- */
- struct afu *afu;
- void *ctx_cookie;
- struct sisl_host_map __iomem *host_map; /* MC host map */
- struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
- ctx_hndl_t ctx_hndl; /* master's context handle */
- u32 index; /* Index of this hwq */
- int num_irqs; /* Number of interrupts requested for context */
- struct list_head pending_cmds; /* Commands pending completion */
-
- atomic_t hsq_credits;
- spinlock_t hsq_slock; /* Hardware send queue lock */
- struct sisl_ioarcb *hsq_start;
- struct sisl_ioarcb *hsq_end;
- struct sisl_ioarcb *hsq_curr;
- spinlock_t hrrq_slock;
- u64 *hrrq_start;
- u64 *hrrq_end;
- u64 *hrrq_curr;
- bool toggle;
- bool hrrq_online;
-
- s64 room;
-
- struct irq_poll irqpoll;
-} __aligned(cache_line_size());
-
-struct afu {
- struct hwq hwqs[CXLFLASH_MAX_HWQS];
- int (*send_cmd)(struct afu *afu, struct afu_cmd *cmd);
- int (*context_reset)(struct hwq *hwq);
-
- /* AFU HW */
- struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
-
- atomic_t cmds_active; /* Number of currently active AFU commands */
- struct mutex sync_active; /* Mutex to serialize AFU commands */
- u64 hb;
- u32 internal_lun; /* User-desired LUN mode for this AFU */
-
- u32 num_hwqs; /* Number of hardware queues */
- u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */
- enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */
- u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */
-
- char version[16];
- u64 interface_version;
-
- u32 irqpoll_weight;
- struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
-};
-
-static inline struct hwq *get_hwq(struct afu *afu, u32 index)
-{
- WARN_ON(index >= CXLFLASH_MAX_HWQS);
-
- return &afu->hwqs[index];
-}
-
-static inline bool afu_is_irqpoll_enabled(struct afu *afu)
-{
- return !!afu->irqpoll_weight;
-}
-
-static inline bool afu_has_cap(struct afu *afu, u64 cap)
-{
- u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
-
- return afu_cap & cap;
-}
-
-static inline bool afu_is_ocxl_lisn(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_OCXL_LISN);
-}
-
-static inline bool afu_is_afu_debug(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
-}
-
-static inline bool afu_is_lun_provision(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
-}
-
-static inline bool afu_is_sq_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
-}
-
-static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
-{
- return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
-}
-
-static inline u64 lun_to_lunid(u64 lun)
-{
- __be64 lun_id;
-
- int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
- return be64_to_cpu(lun_id);
-}
-
-static inline struct fc_port_bank __iomem *get_fc_port_bank(
- struct cxlflash_cfg *cfg, int i)
-{
- struct afu *afu = cfg->afu;
-
- return &afu->afu_map->global.bank[CHAN2PORTBANK(i)];
-}
-
-static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0];
-}
-
-static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i)
-{
- struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i);
-
- return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0];
-}
-
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode);
-void cxlflash_list_init(void);
-void cxlflash_term_global_luns(void);
-void cxlflash_free_errpage(void);
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd,
- void __user *arg);
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg);
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg);
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg);
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
deleted file mode 100644
index b814130f3f5c..000000000000
--- a/drivers/scsi/cxlflash/cxl_hw.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <misc/cxl.h>
-
-#include "backend.h"
-
-/*
- * The following routines map the cxlflash backend operations to existing CXL
- * kernel API function and are largely simple shims that provide an abstraction
- * for converting generic context and AFU cookies into cxl_context or cxl_afu
- * pointers.
- */
-
-static void __iomem *cxlflash_psa_map(void *ctx_cookie)
-{
- return cxl_psa_map(ctx_cookie);
-}
-
-static void cxlflash_psa_unmap(void __iomem *addr)
-{
- cxl_psa_unmap(addr);
-}
-
-static int cxlflash_process_element(void *ctx_cookie)
-{
- return cxl_process_element(ctx_cookie);
-}
-
-static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
-}
-
-static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- cxl_unmap_afu_irq(ctx_cookie, num, cookie);
-}
-
-static u64 cxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- /* Dummy fop for cxl */
- return 0;
-}
-
-static int cxlflash_start_context(void *ctx_cookie)
-{
- return cxl_start_context(ctx_cookie, 0, NULL);
-}
-
-static int cxlflash_stop_context(void *ctx_cookie)
-{
- return cxl_stop_context(ctx_cookie);
-}
-
-static int cxlflash_afu_reset(void *ctx_cookie)
-{
- return cxl_afu_reset(ctx_cookie);
-}
-
-static void cxlflash_set_master(void *ctx_cookie)
-{
- cxl_set_master(ctx_cookie);
-}
-
-static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_get_context(dev);
-}
-
-static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
-{
- return cxl_dev_context_init(dev);
-}
-
-static int cxlflash_release_context(void *ctx_cookie)
-{
- return cxl_release_context(ctx_cookie);
-}
-
-static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- cxl_perst_reloads_same_image(afu_cookie, image);
-}
-
-static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
- void *buf, size_t count)
-{
- return cxl_read_adapter_vpd(dev, buf, count);
-}
-
-static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return cxl_allocate_afu_irqs(ctx_cookie, num);
-}
-
-static void cxlflash_free_afu_irqs(void *ctx_cookie)
-{
- cxl_free_afu_irqs(ctx_cookie);
-}
-
-static void *cxlflash_create_afu(struct pci_dev *dev)
-{
- return cxl_pci_to_afu(dev);
-}
-
-static void cxlflash_destroy_afu(void *afu)
-{
- /* Dummy fop for cxl */
-}
-
-static struct file *cxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- return cxl_get_fd(ctx_cookie, fops, fd);
-}
-
-static void *cxlflash_fops_get_context(struct file *file)
-{
- return cxl_fops_get_context(file);
-}
-
-static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
-{
- struct cxl_ioctl_start_work work = { 0 };
-
- work.num_interrupts = irqs;
- work.flags = CXL_START_WORK_NUM_IRQS;
-
- return cxl_start_work(ctx_cookie, &work);
-}
-
-static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
-{
- return cxl_fd_mmap(file, vm);
-}
-
-static int cxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return cxl_fd_release(inode, file);
-}
-
-const struct cxlflash_backend_ops cxlflash_cxl_ops = {
- .module = THIS_MODULE,
- .psa_map = cxlflash_psa_map,
- .psa_unmap = cxlflash_psa_unmap,
- .process_element = cxlflash_process_element,
- .map_afu_irq = cxlflash_map_afu_irq,
- .unmap_afu_irq = cxlflash_unmap_afu_irq,
- .get_irq_objhndl = cxlflash_get_irq_objhndl,
- .start_context = cxlflash_start_context,
- .stop_context = cxlflash_stop_context,
- .afu_reset = cxlflash_afu_reset,
- .set_master = cxlflash_set_master,
- .get_context = cxlflash_get_context,
- .dev_context_init = cxlflash_dev_context_init,
- .release_context = cxlflash_release_context,
- .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
- .read_adapter_vpd = cxlflash_read_adapter_vpd,
- .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
- .free_afu_irqs = cxlflash_free_afu_irqs,
- .create_afu = cxlflash_create_afu,
- .destroy_afu = cxlflash_destroy_afu,
- .get_fd = cxlflash_get_fd,
- .fops_get_context = cxlflash_fops_get_context,
- .start_work = cxlflash_start_work,
- .fd_mmap = cxlflash_fd_mmap,
- .fd_release = cxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
deleted file mode 100644
index 962c797fda07..000000000000
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/unaligned.h>
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * create_local() - allocate and initialize a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated local llun_info structure on success, NULL on failure
- */
-static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
-
- lli = kzalloc(sizeof(*lli), GFP_KERNEL);
- if (unlikely(!lli)) {
- dev_err(dev, "%s: could not allocate lli\n", __func__);
- goto out;
- }
-
- lli->sdev = sdev;
- lli->host_no = sdev->host->host_no;
- lli->in_table = false;
-
- memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return lli;
-}
-
-/**
- * create_global() - allocate and initialize a global LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: World Wide Node Name for LUN.
- *
- * Return: Allocated global glun_info structure on success, NULL on failure
- */
-static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = NULL;
-
- gli = kzalloc(sizeof(*gli), GFP_KERNEL);
- if (unlikely(!gli)) {
- dev_err(dev, "%s: could not allocate gli\n", __func__);
- goto out;
- }
-
- mutex_init(&gli->mutex);
- memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
-out:
- return gli;
-}
-
-/**
- * lookup_local() - find a local LUN information structure by WWID
- * @cfg: Internal structure associated with the host.
- * @wwid: WWID associated with LUN.
- *
- * Return: Found local lun_info structure on success, NULL on failure
- */
-static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
-{
- struct llun_info *lli, *temp;
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
- if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return lli;
-
- return NULL;
-}
-
-/**
- * lookup_global() - find a global LUN information structure by WWID
- * @wwid: WWID associated with LUN.
- *
- * Return: Found global lun_info structure on success, NULL on failure
- */
-static struct glun_info *lookup_global(u8 *wwid)
-{
- struct glun_info *gli, *temp;
-
- list_for_each_entry_safe(gli, temp, &global.gluns, list)
- if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
- return gli;
-
- return NULL;
-}
-
-/**
- * find_and_create_lun() - find or create a local LUN information structure
- * @sdev: SCSI device associated with LUN.
- * @wwid: WWID associated with LUN.
- *
- * The LUN is kept both in a local list (per adapter) and in a global list
- * (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask, etc.).
- *
- * The block allocation map is shared across all adapters (i.e. associated
- * wih the global list). Since different attributes are associated with
- * the per adapter and global entries, allocate two separate structures for each
- * LUN (one local, one global).
- *
- * Keep a pointer back from the local to the global entry.
- *
- * This routine assumes the caller holds the global mutex.
- *
- * Return: Found/Allocated local lun_info structure on success, NULL on failure
- */
-static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- struct glun_info *gli = NULL;
-
- if (unlikely(!wwid))
- goto out;
-
- lli = lookup_local(cfg, wwid);
- if (lli)
- goto out;
-
- lli = create_local(sdev, wwid);
- if (unlikely(!lli))
- goto out;
-
- gli = lookup_global(wwid);
- if (gli) {
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
- goto out;
- }
-
- gli = create_global(sdev, wwid);
- if (unlikely(!gli)) {
- kfree(lli);
- lli = NULL;
- goto out;
- }
-
- lli->parent = gli;
- list_add(&lli->list, &cfg->lluns);
-
- list_add(&gli->list, &global.gluns);
-
-out:
- dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
- return lli;
-}
-
-/**
- * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- list_del(&lli->list);
- kfree(lli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_list_init() - initializes the global LUN list
- */
-void cxlflash_list_init(void)
-{
- INIT_LIST_HEAD(&global.gluns);
- mutex_init(&global.mutex);
- global.err_page = NULL;
-}
-
-/**
- * cxlflash_term_global_luns() - frees resources associated with global LUN list
- */
-void cxlflash_term_global_luns(void)
-{
- struct glun_info *gli, *temp;
-
- mutex_lock(&global.mutex);
- list_for_each_entry_safe(gli, temp, &global.gluns, list) {
- list_del(&gli->list);
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- kfree(gli);
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_manage_lun() - handles LUN management activities
- * @sdev: SCSI device associated with LUN.
- * @arg: Manage ioctl data structure.
- *
- * This routine is used to notify the driver about a LUN's WWID and associate
- * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
- * change a LUN's operating mode: legacy or superpipe.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_manage_lun(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_manage_lun *manage = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = NULL;
- int rc = 0;
- u64 flags = manage->hdr.flags;
- u32 chan = sdev->channel;
-
- mutex_lock(&global.mutex);
- lli = find_and_create_lun(sdev, manage->wwid);
- dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
- __func__, get_unaligned_be64(&manage->wwid[0]),
- get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
- if (unlikely(!lli)) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
- /*
- * Update port selection mask based upon channel, store off LUN
- * in unpacked, AFU-friendly format, and hang LUN reference in
- * the sdev.
- */
- lli->port_sel |= CHAN2PORTMASK(chan);
- lli->lun_id[chan] = lun_to_lunid(sdev->lun);
- sdev->hostdata = lli;
- } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
- if (lli->parent->mode != MODE_NONE)
- rc = -EBUSY;
- else {
- /*
- * Clean up local LUN for this port and reset table
- * tracking when no more references exist.
- */
- sdev->hostdata = NULL;
- lli->port_sel &= ~CHAN2PORTMASK(chan);
- if (lli->port_sel == 0U)
- lli->in_table = false;
- }
- }
-
- dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
- __func__, lli->port_sel, chan, lli->lun_id[chan]);
-
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
deleted file mode 100644
index ae626e389c8b..000000000000
--- a/drivers/scsi/cxlflash/main.c
+++ /dev/null
@@ -1,3970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <linux/unaligned.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "main.h"
-#include "sislite.h"
-#include "common.h"
-
-MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
-MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
-MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
-MODULE_LICENSE("GPL");
-
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode);
-static const struct class cxlflash_class = {
- .name = "cxlflash",
- .devnode = cxlflash_devnode,
-};
-
-static u32 cxlflash_major;
-static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
-
-/**
- * process_cmd_err() - command error handler
- * @cmd: AFU command that experienced the error.
- * @scp: SCSI command associated with the AFU command in error.
- *
- * Translates error bits from AFU command to SCSI command results.
- */
-static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
-{
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioasa *ioasa;
- u32 resid;
-
- ioasa = &(cmd->sa);
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
- resid = ioasa->resid;
- scsi_set_resid(scp, resid);
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
- __func__, cmd, scp, resid);
- }
-
- if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
- dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
- __func__, cmd, scp);
- scp->result = (DID_ERROR << 16);
- }
-
- dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
- "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
- ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
- ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
-
- if (ioasa->rc.scsi_rc) {
- /* We have a SCSI status */
- if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
- memcpy(scp->sense_buffer, ioasa->sense_data,
- SISL_SENSE_DATA_LEN);
- scp->result = ioasa->rc.scsi_rc;
- } else
- scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
- }
-
- /*
- * We encountered an error. Set scp->result based on nature
- * of error.
- */
- if (ioasa->rc.fc_rc) {
- /* We have an FC status */
- switch (ioasa->rc.fc_rc) {
- case SISL_FC_RC_LINKDOWN:
- scp->result = (DID_REQUEUE << 16);
- break;
- case SISL_FC_RC_RESID:
- /* This indicates an FCP resid underrun */
- if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
- /* If the SISL_RC_FLAGS_OVERRUN flag was set,
- * then we will handle this error else where.
- * If not then we must handle it here.
- * This is probably an AFU bug.
- */
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_FC_RC_RESIDERR:
- /* Resid mismatch between adapter and device */
- case SISL_FC_RC_TGTABORT:
- case SISL_FC_RC_ABORTOK:
- case SISL_FC_RC_ABORTFAIL:
- case SISL_FC_RC_NOLOGI:
- case SISL_FC_RC_ABORTPEND:
- case SISL_FC_RC_WRABORTPEND:
- case SISL_FC_RC_NOEXP:
- case SISL_FC_RC_INUSE:
- scp->result = (DID_ERROR << 16);
- break;
- }
- }
-
- if (ioasa->rc.afu_rc) {
- /* We have an AFU error */
- switch (ioasa->rc.afu_rc) {
- case SISL_AFU_RC_NO_CHANNELS:
- scp->result = (DID_NO_CONNECT << 16);
- break;
- case SISL_AFU_RC_DATA_DMA_ERR:
- switch (ioasa->afu_extra) {
- case SISL_AFU_DMA_ERR_PAGE_IN:
- /* Retry */
- scp->result = (DID_IMM_RETRY << 16);
- break;
- case SISL_AFU_DMA_ERR_INVALID_EA:
- default:
- scp->result = (DID_ERROR << 16);
- }
- break;
- case SISL_AFU_RC_OUT_OF_DATA_BUFS:
- /* Retry */
- scp->result = (DID_ERROR << 16);
- break;
- default:
- scp->result = (DID_ERROR << 16);
- }
- }
-}
-
-/**
- * cmd_complete() - command completion handler
- * @cmd: AFU command that has completed.
- *
- * For SCSI commands this routine prepares and submits commands that have
- * either completed or timed out to the SCSI stack. For internal commands
- * (TMF or AFU), this routine simply notifies the originator that the
- * command has completed.
- */
-static void cmd_complete(struct afu_cmd *cmd)
-{
- struct scsi_cmnd *scp;
- ulong lock_flags;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- if (cmd->scp) {
- scp = cmd->scp;
- if (unlikely(cmd->sa.ioasc))
- process_cmd_err(cmd, scp);
- else
- scp->result = (DID_OK << 16);
-
- dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
- __func__, scp, scp->result, cmd->sa.ioasc);
- scsi_done(scp);
- } else if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- } else
- complete(&cmd->cevent);
-}
-
-/**
- * flush_pending_cmds() - flush all pending commands on this hardware queue
- * @hwq: Hardware queue to flush.
- *
- * The hardware send queue lock associated with this hardware queue must be
- * held when calling this routine.
- */
-static void flush_pending_cmds(struct hwq *hwq)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct afu_cmd *cmd, *tmp;
- struct scsi_cmnd *scp;
- ulong lock_flags;
-
- list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
- /* Bypass command when on a doneq, cmd_complete() will handle */
- if (!list_empty(&cmd->queue))
- continue;
-
- list_del(&cmd->list);
-
- if (cmd->scp) {
- scp = cmd->scp;
- scp->result = (DID_IMM_RETRY << 16);
- scsi_done(scp);
- } else {
- cmd->cmd_aborted = true;
-
- if (cmd->cmd_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock,
- lock_flags);
- } else
- complete(&cmd->cevent);
- }
- }
-}
-
-/**
- * context_reset() - reset context via specified register
- * @hwq: Hardware queue owning the context to be reset.
- * @reset_reg: MMIO register to perform reset.
- *
- * When the reset is successful, the SISLite specification guarantees that
- * the AFU has aborted all currently pending I/O. Accordingly, these commands
- * must be flushed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
-{
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = -ETIMEDOUT;
- int nretry = 0;
- u64 val = 0x1;
- ulong lock_flags;
-
- dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- writeq_be(val, reset_reg);
- do {
- val = readq_be(reset_reg);
- if ((val & 0x1) == 0x0) {
- rc = 0;
- break;
- }
-
- /* Double delay each time */
- udelay(1 << nretry);
- } while (nretry++ < MC_ROOM_RETRY_CNT);
-
- if (!rc)
- flush_pending_cmds(hwq);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
- __func__, rc, val, nretry);
- return rc;
-}
-
-/**
- * context_reset_ioarrin() - reset context via IOARRIN register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_ioarrin(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->ioarrin);
-}
-
-/**
- * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
- * @hwq: Hardware queue owning the context to be reset.
- *
- * Return: 0 on success, -errno on failure
- */
-static int context_reset_sq(struct hwq *hwq)
-{
- return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
-}
-
-/**
- * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- s64 room;
- ulong lock_flags;
-
- /*
- * To avoid the performance penalty of MMIO, spread the update of
- * 'room' over multiple commands.
- */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- if (--hwq->room < 0) {
- room = readq_be(&hwq->host_map->cmd_room);
- if (room <= 0) {
- dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
- "0x%02X, room=0x%016llX\n",
- __func__, cmd->rcb.cdb[0], room);
- hwq->room = 0;
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- hwq->room = room - 1;
- }
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
-out:
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
- __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
- return rc;
-}
-
-/**
- * send_cmd_sq() - sends an AFU command via SQ ring
- * @afu: AFU associated with the host.
- * @cmd: AFU command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
- int rc = 0;
- int newval;
- ulong lock_flags;
-
- newval = atomic_dec_if_positive(&hwq->hsq_credits);
- if (newval <= 0) {
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
-
- cmd->rcb.ioasa = &cmd->sa;
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
-
- *hwq->hsq_curr = cmd->rcb;
- if (hwq->hsq_curr < hwq->hsq_end)
- hwq->hsq_curr++;
- else
- hwq->hsq_curr = hwq->hsq_start;
-
- list_add(&cmd->list, &hwq->pending_cmds);
- writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
-
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-out:
- dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
- "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
- cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
- readq_be(&hwq->host_map->sq_head),
- readq_be(&hwq->host_map->sq_tail));
- return rc;
-}
-
-/**
- * wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu: AFU associated with the host.
- * @cmd: AFU command that was sent.
- *
- * Return: 0 on success, -errno on failure
- */
-static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
-
- timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout)
- rc = -ETIMEDOUT;
-
- if (cmd->cmd_aborted)
- rc = -EAGAIN;
-
- if (unlikely(cmd->sa.ioasc != 0)) {
- dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
- __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -EIO;
- }
-
- return rc;
-}
-
-/**
- * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- * @afu: SCSI command to send.
- *
- * Hashes a command based upon the hardware queue mode.
- *
- * Return: Trusted index of target hardware queue
- */
-static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
- struct afu *afu)
-{
- u32 tag;
- u32 hwq = 0;
-
- if (afu->num_hwqs == 1)
- return 0;
-
- switch (afu->hwq_mode) {
- case HWQ_MODE_RR:
- hwq = afu->hwq_rr_count++ % afu->num_hwqs;
- break;
- case HWQ_MODE_TAG:
- tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
- hwq = blk_mq_unique_tag_to_hwq(tag);
- break;
- case HWQ_MODE_CPU:
- hwq = smp_processor_id() % afu->num_hwqs;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- return hwq;
-}
-
-/**
- * send_tmf() - sends a Task Management Function (TMF)
- * @cfg: Internal structure associated with the host.
- * @sdev: SCSI device destined for TMF.
- * @tmfcmd: TMF command to send.
- *
- * Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
- */
-static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
- u64 tmfcmd)
-{
- struct afu *afu = cfg->afu;
- struct afu_cmd *cmd = NULL;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- bool needs_deletion = false;
- char *buf = NULL;
- ulong lock_flags;
- int rc = 0;
- ulong to;
-
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
- INIT_LIST_HEAD(&cmd->queue);
-
- /* When Task Management Function is active do not send another */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- cfg->tmf_active = true;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- cmd->parent = afu;
- cmd->cmd_tmf = true;
- cmd->hwq_index = hwq->index;
-
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
- cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
- cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
- SISL_REQ_FLAGS_SUP_UNDERRUN |
- SISL_REQ_FLAGS_TMF_CMD);
- memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- goto out;
- }
-
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- to = msecs_to_jiffies(5000);
- to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock,
- to);
- if (!to) {
- dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -ETIMEDOUT;
- needs_deletion = true;
- } else if (cmd->cmd_aborted) {
- dev_err(dev, "%s: TMF aborted\n", __func__);
- rc = -EAGAIN;
- } else if (cmd->sa.ioasc) {
- dev_err(dev, "%s: TMF failed ioasc=%08x\n",
- __func__, cmd->sa.ioasc);
- rc = -EIO;
- }
- cfg->tmf_active = false;
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- if (needs_deletion) {
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
- }
-out:
- kfree(buf);
- return rc;
-}
-
-/**
- * cxlflash_driver_info() - information handler for this host driver
- * @host: SCSI host associated with device.
- *
- * Return: A string describing the device.
- */
-static const char *cxlflash_driver_info(struct Scsi_Host *host)
-{
- return CXLFLASH_ADAPTER_NAME;
-}
-
-/**
- * cxlflash_queuecommand() - sends a mid-layer request
- * @host: SCSI host associated with device.
- * @scp: SCSI command to send.
- *
- * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
- */
-static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
-{
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afuci(scp);
- struct scatterlist *sg = scsi_sglist(scp);
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
- u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
- ulong lock_flags;
- int rc = 0;
-
- dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n",
- __func__, scp, host->host_no, scp->device->channel,
- scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /*
- * If a Task Management Function is active, wait for it to complete
- * before continuing with regular commands.
- */
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active) {
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- switch (cfg->state) {
- case STATE_PROBING:
- case STATE_PROBED:
- case STATE_RESET:
- dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
- rc = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- case STATE_FAILTERM:
- dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
- scp->result = (DID_NO_CONNECT << 16);
- scsi_done(scp);
- rc = 0;
- goto out;
- default:
- atomic_inc(&afu->cmds_active);
- break;
- }
-
- if (likely(sg)) {
- cmd->rcb.data_len = sg->length;
- cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
- }
-
- cmd->scp = scp;
- cmd->parent = afu;
- cmd->hwq_index = hwq_index;
-
- cmd->sa.ioasc = 0;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
- if (scp->sc_data_direction == DMA_TO_DEVICE)
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- cmd->rcb.req_flags = req_flags;
- memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
-
- rc = afu->send_cmd(afu, cmd);
- atomic_dec(&afu->cmds_active);
-out:
- return rc;
-}
-
-/**
- * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
-
- if (pci_channel_offline(pdev))
- wait_event_timeout(cfg->reset_waitq,
- !pci_channel_offline(pdev),
- CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
-}
-
-/**
- * free_mem() - free memory associated with the AFU
- * @cfg: Internal structure associated with the host.
- */
-static void free_mem(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
-
- if (cfg->afu) {
- free_pages((ulong)afu, get_order(sizeof(struct afu)));
- cfg->afu = NULL;
- }
-}
-
-/**
- * cxlflash_reset_sync() - synchronizing point for asynchronous resets
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
-{
- if (cfg->async_reset_cookie == 0)
- return;
-
- /* Wait until all async calls prior to this cookie have completed */
- async_synchronize_cookie(cfg->async_reset_cookie + 1);
- cfg->async_reset_cookie = 0;
-}
-
-/**
- * stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU in a partially allocated/initialized state.
- *
- * Cancels scheduled worker threads, waits for any active internal AFU
- * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
- */
-static void stop_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- int i;
-
- cancel_work_sync(&cfg->work_q);
- if (!current_is_async())
- cxlflash_reset_sync(cfg);
-
- if (likely(afu)) {
- while (atomic_read(&afu->cmds_active))
- ssleep(1);
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- if (likely(afu->afu_map)) {
- cfg->ops->psa_unmap(afu->afu_map);
- afu->afu_map = NULL;
- }
- }
-}
-
-/**
- * term_intr() - disables all AFU interrupts
- * @cfg: Internal structure associated with the host.
- * @level: Depth of allocation, where to begin waterfall tear down.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
- u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- switch (level) {
- case UNMAP_THREE:
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (index == PRIMARY_HWQ)
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
- fallthrough;
- case UNMAP_TWO:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
- fallthrough;
- case UNMAP_ONE:
- cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
- fallthrough;
- case FREE_IRQ:
- cfg->ops->free_afu_irqs(hwq->ctx_cookie);
- fallthrough;
- case UNDO_NOOP:
- /* No action required */
- break;
- }
-}
-
-/**
- * term_mc() - terminates the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- ulong lock_flags;
-
- if (!afu) {
- dev_err(dev, "%s: returning with NULL afu\n", __func__);
- return;
- }
-
- hwq = get_hwq(afu, index);
-
- if (!hwq->ctx_cookie) {
- dev_err(dev, "%s: returning with NULL MC\n", __func__);
- return;
- }
-
- WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
- if (index != PRIMARY_HWQ)
- WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
- hwq->ctx_cookie = NULL;
-
- spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
- hwq->hrrq_online = false;
- spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
-
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- flush_pending_cmds(hwq);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-}
-
-/**
- * term_afu() - terminates the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Safe to call with AFU/MC in partially allocated/initialized state.
- */
-static void term_afu(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int k;
-
- /*
- * Tear down is carefully orchestrated to ensure
- * no interrupts can come in when the problem state
- * area is unmapped.
- *
- * 1) Disable all AFU interrupts for each master
- * 2) Unmap the problem state area
- * 3) Stop each master context
- */
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_intr(cfg, UNMAP_THREE, k);
-
- stop_afu(cfg);
-
- for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
- term_mc(cfg, k);
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * notify_shutdown() - notifies device of pending shutdown
- * @cfg: Internal structure associated with the host.
- * @wait: Whether to wait for shutdown processing to complete.
- *
- * This function will notify the AFU that the adapter is being shutdown
- * and will wait for shutdown processing to complete if wait is true.
- * This notification should flush pending I/Os to the device and halt
- * further I/Os until the next AFU reset is issued and device restarted.
- */
-static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct dev_dependent_vals *ddv;
- __be64 __iomem *fc_port_regs;
- u64 reg, status;
- int i, retry_cnt = 0;
-
- ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
- if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
- return;
-
- if (!afu || !afu->afu_map) {
- dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
- return;
- }
-
- /* Notify AFU */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg |= SISL_FC_SHUTDOWN_NORMAL;
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
- }
-
- if (!wait)
- return;
-
- /* Wait up to 1.5 seconds for shutdown processing to complete */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
- retry_cnt = 0;
-
- while (true) {
- status = readq_be(&fc_port_regs[FC_STATUS / 8]);
- if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
- break;
- if (++retry_cnt >= MC_RETRY_CNT) {
- dev_dbg(dev, "%s: port %d shutdown processing "
- "not yet completed\n", __func__, i);
- break;
- }
- msleep(100 * retry_cnt);
- }
- }
-}
-
-/**
- * cxlflash_get_minor() - gets the first available minor number
- *
- * Return: Unique minor number that can be used to create the character device.
- */
-static int cxlflash_get_minor(void)
-{
- int minor;
- long bit;
-
- bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
- if (bit >= CXLFLASH_MAX_ADAPTERS)
- return -1;
-
- minor = bit & MINORMASK;
- set_bit(minor, cxlflash_minor);
- return minor;
-}
-
-/**
- * cxlflash_put_minor() - releases the minor number
- * @minor: Minor number that is no longer needed.
- */
-static void cxlflash_put_minor(int minor)
-{
- clear_bit(minor, cxlflash_minor);
-}
-
-/**
- * cxlflash_release_chrdev() - release the character device for the host
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
-{
- device_unregister(cfg->chardev);
- cfg->chardev = NULL;
- cdev_del(&cfg->cdev);
- cxlflash_put_minor(MINOR(cfg->cdev.dev));
-}
-
-/**
- * cxlflash_remove() - PCI entry point to tear down host
- * @pdev: PCI device associated with the host.
- *
- * Safe to use as a cleanup in partially allocated/initialized state. Note that
- * the reset_waitq is flushed as part of the stop/termination of user contexts.
- */
-static void cxlflash_remove(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- ulong lock_flags;
-
- if (!pci_is_enabled(pdev)) {
- dev_dbg(dev, "%s: Device is disabled\n", __func__);
- return;
- }
-
- /* Yield to running recovery threads before continuing with remove */
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- if (cfg->tmf_active)
- wait_event_interruptible_lock_irq(cfg->tmf_waitq,
- !cfg->tmf_active,
- cfg->tmf_slock);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
-
- /* Notify AFU and wait for shutdown processing to complete */
- notify_shutdown(cfg, true);
-
- cfg->state = STATE_FAILTERM;
- cxlflash_stop_term_user_contexts(cfg);
-
- switch (cfg->init_state) {
- case INIT_STATE_CDEV:
- cxlflash_release_chrdev(cfg);
- fallthrough;
- case INIT_STATE_SCSI:
- cxlflash_term_local_luns(cfg);
- scsi_remove_host(cfg->host);
- fallthrough;
- case INIT_STATE_AFU:
- term_afu(cfg);
- fallthrough;
- case INIT_STATE_PCI:
- cfg->ops->destroy_afu(cfg->afu_cookie);
- pci_disable_device(pdev);
- fallthrough;
- case INIT_STATE_NONE:
- free_mem(cfg);
- scsi_host_put(cfg->host);
- break;
- }
-
- dev_dbg(dev, "%s: returning\n", __func__);
-}
-
-/**
- * alloc_mem() - allocates the AFU and its command pool
- * @cfg: Internal structure associated with the host.
- *
- * A partially allocated state remains on failure.
- *
- * Return:
- * 0 on success
- * -ENOMEM on failure to allocate memory
- */
-static int alloc_mem(struct cxlflash_cfg *cfg)
-{
- int rc = 0;
- struct device *dev = &cfg->dev->dev;
-
- /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
- cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(sizeof(struct afu)));
- if (unlikely(!cfg->afu)) {
- dev_err(dev, "%s: cannot get %d free pages\n",
- __func__, get_order(sizeof(struct afu)));
- rc = -ENOMEM;
- goto out;
- }
- cfg->afu->parent = cfg;
- cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
- cfg->afu->afu_map = NULL;
-out:
- return rc;
-}
-
-/**
- * init_pci() - initializes the host as a PCI device
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_pci(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = pci_enable_device(pdev);
- if (rc || pci_channel_offline(pdev)) {
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- rc = pci_enable_device(pdev);
- }
-
- if (rc) {
- dev_err(dev, "%s: Cannot enable adapter\n", __func__);
- cxlflash_wait_for_pci_err_recovery(cfg);
- goto out;
- }
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_scsi(struct cxlflash_cfg *cfg)
-{
- struct pci_dev *pdev = cfg->dev;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- rc = scsi_add_host(cfg->host, &pdev->dev);
- if (rc) {
- dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- scsi_scan_host(cfg->host);
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * set_port_online() - transitions the specified host FC port to online state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. Online state means
- * that the FC link layer has synced, completed the handshaking process, and
- * is ready for login to start.
- */
-static void set_port_online(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * set_port_offline() - transitions the specified host FC port to offline state
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call.
- */
-static void set_port_offline(__be64 __iomem *fc_regs)
-{
- u64 cmdcfg;
-
- cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
- cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
- cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
- writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
-}
-
-/**
- * wait_port_online() - waits for the specified host FC port come online
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call. This will timeout
- * when the cable is not plugged in.
- *
- * Return:
- * TRUE (1) when the specified port is online
- * FALSE (0) when the specified port fails to come online after timeout
- */
-static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
-}
-
-/**
- * wait_port_offline() - waits for the specified host FC port go offline
- * @fc_regs: Top of MMIO region defined for specified port.
- * @delay_us: Number of microseconds to delay between reading port status.
- * @nretry: Number of cycles to retry reading port status.
- *
- * The provided MMIO region must be mapped prior to call.
- *
- * Return:
- * TRUE (1) when the specified port is offline
- * FALSE (0) when the specified port fails to go offline after timeout
- */
-static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
-{
- u64 status;
-
- WARN_ON(delay_us < 1000);
-
- do {
- msleep(delay_us / 1000);
- status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
- if (status == U64_MAX)
- nretry /= 2;
- } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
- nretry--);
-
- return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
-}
-
-/**
- * afu_set_wwpn() - configures the WWPN for the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- * @wwpn: The world-wide-port-number previously discovered for port.
- *
- * The provided MMIO region must be mapped prior to call. As part of the
- * sequence to configure the WWPN, the port is toggled offline and then back
- * online. This toggling action can cause this routine to delay up to a few
- * seconds. When configured to use the internal LUN feature of the AFU, a
- * failure to come online is overridden.
- */
-static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
- u64 wwpn)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
- }
-
- writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT)) {
- dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
- }
-}
-
-/**
- * afu_link_reset() - resets the specified host FC port
- * @afu: AFU associated with the host that owns the specified FC port.
- * @port: Port number being configured.
- * @fc_regs: Top of MMIO region defined for specified port.
- *
- * The provided MMIO region must be mapped prior to call. The sequence to
- * reset the port involves toggling it offline and then back online. This
- * action can cause this routine to delay up to a few seconds. An effort
- * is made to maintain link with the device by switching to host to use
- * the alternate port exclusively while the reset takes place.
- * failure to come online is overridden.
- */
-static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 port_sel;
-
- /* first switch the AFU to the other links, if any */
- port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- port_sel &= ~(1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- set_port_offline(fc_regs);
- if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go offline timed out\n",
- __func__, port);
-
- set_port_online(fc_regs);
- if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
- FC_PORT_STATUS_RETRY_CNT))
- dev_err(dev, "%s: wait on port %d to go online timed out\n",
- __func__, port);
-
- /* switch back to include this port */
- port_sel |= (1ULL << port);
- writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
- cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
-
- dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
-}
-
-/**
- * afu_err_intr_init() - clears and initializes the AFU for error interrupts
- * @afu: AFU associated with the host.
- */
-static void afu_err_intr_init(struct afu *afu)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- __be64 __iomem *fc_port_regs;
- int i;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 reg;
-
- /* global async interrupts: AFU clears afu_ctrl on context exit
- * if async interrupts were sent to that context. This prevents
- * the AFU form sending further async interrupts when
- * there is
- * nobody to receive them.
- */
-
- /* mask all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
- /* set LISN# to send and point to primary master context */
- reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
-
- if (afu->internal_lun)
- reg |= 1; /* Bit 63 indicates local lun */
- writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
- /* clear all */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
- /* unmask bits that are of interest */
- /* note: afu can send an interrupt after this step */
- writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
- /* clear again in case a bit came on after previous clear but before */
- /* unmask */
- writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
-
- /* Clear/Set internal lun bits */
- fc_port_regs = get_fc_port_regs(cfg, 0);
- reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
- reg &= SISL_FC_INTERNAL_MASK;
- if (afu->internal_lun)
- reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
- writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
-
- /* now clear FC errors */
- for (i = 0; i < cfg->num_fc_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- /* sync interrupts for master's IOARRIN write */
- /* note that unlike asyncs, there can be no pending sync interrupts */
- /* at this time (this is a fresh context and master has not written */
- /* IOARRIN yet), so there is nothing to clear. */
-
- /* set LISN#, it is always sent to the context that wrote IOARRIN */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
- reg |= SISL_MSI_SYNC_ERROR;
- writeq_be(reg, &hwq->host_map->ctx_ctrl);
- writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
- }
-}
-
-/**
- * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct cxlflash_cfg *cfg = hwq->afu->parent;
- struct device *dev = &cfg->dev->dev;
- u64 reg;
- u64 reg_unmasked;
-
- reg = readq_be(&hwq->host_map->intr_status);
- reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
-
- if (reg_unmasked == 0UL) {
- dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
- __func__, reg);
- goto cxlflash_sync_err_irq_exit;
- }
-
- dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
- __func__, reg);
-
- writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
-
-cxlflash_sync_err_irq_exit:
- return IRQ_HANDLED;
-}
-
-/**
- * process_hrrq() - process the read-response queue
- * @hwq: HWQ associated with the host.
- * @doneq: Queue of commands harvested from the RRQ.
- * @budget: Threshold of RRQ entries to process.
- *
- * This routine must be called holding the disabled RRQ spin lock.
- *
- * Return: The number of entries processed.
- */
-static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
-{
- struct afu *afu = hwq->afu;
- struct afu_cmd *cmd;
- struct sisl_ioasa *ioasa;
- struct sisl_ioarcb *ioarcb;
- bool toggle = hwq->toggle;
- int num_hrrq = 0;
- u64 entry,
- *hrrq_start = hwq->hrrq_start,
- *hrrq_end = hwq->hrrq_end,
- *hrrq_curr = hwq->hrrq_curr;
-
- /* Process ready RRQ entries up to the specified budget (if any) */
- while (true) {
- entry = *hrrq_curr;
-
- if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
- break;
-
- entry &= ~SISL_RESP_HANDLE_T_BIT;
-
- if (afu_is_sq_cmd_mode(afu)) {
- ioasa = (struct sisl_ioasa *)entry;
- cmd = container_of(ioasa, struct afu_cmd, sa);
- } else {
- ioarcb = (struct sisl_ioarcb *)entry;
- cmd = container_of(ioarcb, struct afu_cmd, rcb);
- }
-
- list_add_tail(&cmd->queue, doneq);
-
- /* Advance to next entry or wrap and flip the toggle bit */
- if (hrrq_curr < hrrq_end)
- hrrq_curr++;
- else {
- hrrq_curr = hrrq_start;
- toggle ^= SISL_RESP_HANDLE_T_BIT;
- }
-
- atomic_inc(&hwq->hsq_credits);
- num_hrrq++;
-
- if (budget > 0 && num_hrrq >= budget)
- break;
- }
-
- hwq->hrrq_curr = hrrq_curr;
- hwq->toggle = toggle;
-
- return num_hrrq;
-}
-
-/**
- * process_cmd_doneq() - process a queue of harvested RRQ commands
- * @doneq: Queue of completed commands.
- *
- * Note that upon return the queue can no longer be trusted.
- */
-static void process_cmd_doneq(struct list_head *doneq)
-{
- struct afu_cmd *cmd, *tmp;
-
- WARN_ON(list_empty(doneq));
-
- list_for_each_entry_safe(cmd, tmp, doneq, queue)
- cmd_complete(cmd);
-}
-
-/**
- * cxlflash_irqpoll() - process a queue of harvested RRQ commands
- * @irqpoll: IRQ poll structure associated with queue to poll.
- * @budget: Threshold of RRQ entries to process per poll.
- *
- * Return: The number of entries processed.
- */
-static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
-{
- struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- num_entries = process_hrrq(hwq, &doneq, budget);
- if (num_entries < budget)
- irq_poll_complete(irqpoll);
-
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- process_cmd_doneq(&doneq);
- return num_entries;
-}
-
-/**
- * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
- */
-static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- unsigned long hrrq_flags;
- LIST_HEAD(doneq);
- int num_entries = 0;
-
- spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
-
- /* Silently drop spurious interrupts when queue is not online */
- if (!hwq->hrrq_online) {
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- irq_poll_sched(&hwq->irqpoll);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
- return IRQ_HANDLED;
- }
-
- num_entries = process_hrrq(hwq, &doneq, -1);
- spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
-
- if (num_entries == 0)
- return IRQ_NONE;
-
- process_cmd_doneq(&doneq);
- return IRQ_HANDLED;
-}
-
-/*
- * Asynchronous interrupt information table
- *
- * NOTE:
- * - Order matters here as this array is indexed by bit position.
- *
- * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
- * as complex and complains due to a lack of parentheses/braces.
- */
-#define ASTATUS_FC(_a, _b, _c, _d) \
- { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
-
-#define BUILD_SISL_ASTATUS_FC_PORT(_a) \
- ASTATUS_FC(_a, LINK_UP, "link up", 0), \
- ASTATUS_FC(_a, LINK_DN, "link down", 0), \
- ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \
- ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \
- ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
- ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \
- ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \
- ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
-
-static const struct asyc_intr_info ainfo[] = {
- BUILD_SISL_ASTATUS_FC_PORT(1),
- BUILD_SISL_ASTATUS_FC_PORT(0),
- BUILD_SISL_ASTATUS_FC_PORT(3),
- BUILD_SISL_ASTATUS_FC_PORT(2)
-};
-
-/**
- * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the AFU.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
-{
- struct hwq *hwq = (struct hwq *)data;
- struct afu *afu = hwq->afu;
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- const struct asyc_intr_info *info;
- struct sisl_global_map __iomem *global = &afu->afu_map->global;
- __be64 __iomem *fc_port_regs;
- u64 reg_unmasked;
- u64 reg;
- u64 bit;
- u8 port;
-
- reg = readq_be(&global->regs.aintr_status);
- reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
-
- if (unlikely(reg_unmasked == 0)) {
- dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
- __func__, reg);
- goto out;
- }
-
- /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
- writeq_be(reg_unmasked, &global->regs.aintr_clear);
-
- /* Check each bit that is on */
- for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
- if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- info = &ainfo[bit];
- if (unlikely(info->status != 1ULL << bit)) {
- WARN_ON_ONCE(1);
- continue;
- }
-
- port = info->port;
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
- __func__, port, info->desc,
- readq_be(&fc_port_regs[FC_STATUS / 8]));
-
- /*
- * Do link reset first, some OTHER errors will set FC_ERROR
- * again if cleared before or w/o a reset
- */
- if (info->action & LINK_RESET) {
- dev_err(dev, "%s: FC Port %d: resetting link\n",
- __func__, port);
- cfg->lr_state = LINK_RESET_REQUIRED;
- cfg->lr_port = port;
- schedule_work(&cfg->work_q);
- }
-
- if (info->action & CLR_FC_ERROR) {
- reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
-
- /*
- * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
- * should be the same and tracing one is sufficient.
- */
-
- dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
- __func__, port, reg);
-
- writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
- writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
- }
-
- if (info->action & SCAN_HOST) {
- atomic_inc(&cfg->scan_host_needed);
- schedule_work(&cfg->work_q);
- }
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * read_vpd() - obtains the WWPNs from VPD
- * @cfg: Internal structure associated with the host.
- * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
-{
- struct device *dev = &cfg->dev->dev;
- struct pci_dev *pdev = cfg->dev;
- int i, k, rc = 0;
- unsigned int kw_size;
- ssize_t vpd_size;
- char vpd_data[CXLFLASH_VPD_LEN];
- char tmp_buf[WWPN_BUF_LEN] = { 0 };
- const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
- cfg->dev_id->driver_data;
- const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
- const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
-
- /* Get the VPD data from the device */
- vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
- if (unlikely(vpd_size <= 0)) {
- dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
- __func__, vpd_size);
- rc = -ENODEV;
- goto out;
- }
-
- /*
- * Find the offset of the WWPN tag within the read only
- * VPD data and validate the found field (partials are
- * no good to us). Convert the ASCII data to an integer
- * value. Note that we must copy to a temporary buffer
- * because the conversion service requires that the ASCII
- * string be terminated.
- *
- * Allow for WWPN not being found for all devices, setting
- * the returned WWPN to zero when not found. Notify with a
- * log error for cards that should have had WWPN keywords
- * in the VPD - cards requiring WWPN will not have their
- * ports programmed and operate in an undefined state.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
- wwpn_vpd_tags[k], &kw_size);
- if (i == -ENOENT) {
- if (wwpn_vpd_required)
- dev_err(dev, "%s: Port %d WWPN not found\n",
- __func__, k);
- wwpn[k] = 0ULL;
- continue;
- }
-
- if (i < 0 || kw_size != WWPN_LEN) {
- dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
- rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
- if (unlikely(rc)) {
- dev_err(dev, "%s: WWPN conversion failed for port %d\n",
- __func__, k);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_pcr() - initialize the provisioning and control registers
- * @cfg: Internal structure associated with the host.
- *
- * Also sets up fast access to the mapped registers and initializes AFU
- * command fields that never change.
- */
-static void init_pcr(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map;
- struct hwq *hwq;
- void *cookie;
- int i;
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctrl_map = &afu->afu_map->ctrls[i].ctrl;
- /* Disrupt any clients that could be running */
- /* e.g. clients that survived a master restart */
- writeq_be(0, &ctrl_map->rht_start);
- writeq_be(0, &ctrl_map->rht_cnt_id);
- writeq_be(0, &ctrl_map->ctx_cap);
- }
-
- /* Copy frequently used fields into hwq */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- cookie = hwq->ctx_cookie;
-
- hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
- hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
- hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
-
- /* Program the Endian Control for the master context */
- writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
- }
-}
-
-/**
- * init_global() - initialize AFU global registers
- * @cfg: Internal structure associated with the host.
- */
-static int init_global(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- struct sisl_host_map __iomem *hmap;
- __be64 __iomem *fc_port_regs;
- u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */
- int i = 0, num_ports = 0;
- int rc = 0;
- int j;
- void *ctx;
- u64 reg;
-
- rc = read_vpd(cfg, &wwpn[0]);
- if (rc) {
- dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
- goto out;
- }
-
- /* Set up RRQ and SQ in HWQ for master issued cmds */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- hmap = hwq->host_map;
-
- writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
- writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
- hwq->hrrq_online = true;
-
- if (afu_is_sq_cmd_mode(afu)) {
- writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
- writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
- }
- }
-
- /* AFU configuration */
- reg = readq_be(&afu->afu_map->global.regs.afu_config);
- reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
- /* enable all auto retry options and control endianness */
- /* leave others at default: */
- /* CTX_CAP write protected, mbox_r does not clear on read and */
- /* checker on if dual afu */
- writeq_be(reg, &afu->afu_map->global.regs.afu_config);
-
- /* Global port select: select either port */
- if (afu->internal_lun) {
- /* Only use port 0 */
- writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
- num_ports = 0;
- } else {
- writeq_be(PORT_MASK(cfg->num_fc_ports),
- &afu->afu_map->global.regs.afu_port_sel);
- num_ports = cfg->num_fc_ports;
- }
-
- for (i = 0; i < num_ports; i++) {
- fc_port_regs = get_fc_port_regs(cfg, i);
-
- /* Unmask all errors (but they are still masked at AFU) */
- writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
- /* Clear CRC error cnt & set a threshold */
- (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
- writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
-
- /* Set WWPNs. If already programmed, wwpn[i] is 0 */
- if (wwpn[i] != 0)
- afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
- /* Programming WWPN back to back causes additional
- * offline/online transitions and a PLOGI
- */
- msleep(100);
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each master */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
- ctx = hwq->ctx_cookie;
-
- for (j = 0; j < hwq->num_irqs; j++) {
- reg = cfg->ops->get_irq_objhndl(ctx, j);
- writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
- }
-
- reg = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(reg, reg),
- &hwq->ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, reg),
- &hwq->ctrl_map->lisn_pasid[1]);
- }
- }
-
- /* Set up master's own CTX_CAP to allow real mode, host translation */
- /* tables, afu cmds and read/write GSCSI cmds. */
- /* First, unlock ctx_cap write by reading mbox */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */
- writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
- SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
- SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
- &hwq->ctrl_map->ctx_cap);
- }
-
- /*
- * Determine write-same unmap support for host by evaluating the unmap
- * sector support bit of the context control register associated with
- * the primary hardware queue. Note that while this status is reflected
- * in a context register, the outcome can be assumed to be host-wide.
- */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- reg = readq_be(&hwq->host_map->ctx_ctrl);
- if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
- cfg->ws_unmap = true;
-
- /* Initialize heartbeat */
- afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-out:
- return rc;
-}
-
-/**
- * start_afu() - initializes and starts the AFU
- * @cfg: Internal structure associated with the host.
- */
-static int start_afu(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int rc = 0;
- int i;
-
- init_pcr(cfg);
-
- /* Initialize each HWQ */
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- /* After an AFU reset, RRQ entries are stale, clear them */
- memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
-
- /* Initialize RRQ pointers */
- hwq->hrrq_start = &hwq->rrq_entry[0];
- hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
- hwq->hrrq_curr = hwq->hrrq_start;
- hwq->toggle = 1;
-
- /* Initialize spin locks */
- spin_lock_init(&hwq->hrrq_slock);
- spin_lock_init(&hwq->hsq_slock);
-
- /* Initialize SQ */
- if (afu_is_sq_cmd_mode(afu)) {
- memset(&hwq->sq, 0, sizeof(hwq->sq));
- hwq->hsq_start = &hwq->sq[0];
- hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
- hwq->hsq_curr = hwq->hsq_start;
-
- atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
- }
-
- /* Initialize IRQ poll */
- if (afu_is_irqpoll_enabled(afu))
- irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
- cxlflash_irqpoll);
-
- }
-
- rc = init_global(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * init_intr() - setup interrupt handlers for the master context
- * @cfg: Internal structure associated with the host.
- * @hwq: Hardware queue to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static enum undo_level init_intr(struct cxlflash_cfg *cfg,
- struct hwq *hwq)
-{
- struct device *dev = &cfg->dev->dev;
- void *ctx = hwq->ctx_cookie;
- int rc = 0;
- enum undo_level level = UNDO_NOOP;
- bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
- int num_irqs = hwq->num_irqs;
-
- rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
- if (unlikely(rc)) {
- dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
- __func__, rc);
- level = UNDO_NOOP;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
- level = FREE_IRQ;
- goto out;
- }
-
- rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
- level = UNMAP_ONE;
- goto out;
- }
-
- /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
- if (!is_primary_hwq)
- goto out;
-
- rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
- if (unlikely(rc <= 0)) {
- dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
- level = UNMAP_TWO;
- goto out;
- }
-out:
- return level;
-}
-
-/**
- * init_mc() - create and register as the master context
- * @cfg: Internal structure associated with the host.
- * @index: HWQ Index of the master context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_mc(struct cxlflash_cfg *cfg, u32 index)
-{
- void *ctx;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
- int num_irqs;
- enum undo_level level;
-
- hwq->afu = cfg->afu;
- hwq->index = index;
- INIT_LIST_HEAD(&hwq->pending_cmds);
-
- if (index == PRIMARY_HWQ) {
- ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
- num_irqs = 3;
- } else {
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- num_irqs = 2;
- }
- if (IS_ERR_OR_NULL(ctx)) {
- rc = -ENOMEM;
- goto err1;
- }
-
- WARN_ON(hwq->ctx_cookie);
- hwq->ctx_cookie = ctx;
- hwq->num_irqs = num_irqs;
-
- /* Set it up as a master with the CXL */
- cfg->ops->set_master(ctx);
-
- /* Reset AFU when initializing primary context */
- if (index == PRIMARY_HWQ) {
- rc = cfg->ops->afu_reset(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU reset failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- }
-
- level = init_intr(cfg, hwq);
- if (unlikely(level)) {
- dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- /* Finally, activate the context by starting it */
- rc = cfg->ops->start_context(hwq->ctx_cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
- level = UNMAP_THREE;
- goto err2;
- }
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- term_intr(cfg, level, index);
- if (index != PRIMARY_HWQ)
- cfg->ops->release_context(ctx);
-err1:
- hwq->ctx_cookie = NULL;
- goto out;
-}
-
-/**
- * get_num_afu_ports() - determines and configures the number of AFU ports
- * @cfg: Internal structure associated with the host.
- *
- * This routine determines the number of AFU ports by converting the global
- * port selection mask. The converted value is only valid following an AFU
- * reset (explicit or power-on). This routine must be invoked shortly after
- * mapping as other routines are dependent on the number of ports during the
- * initialization sequence.
- *
- * To support legacy AFUs that might not have reflected an initial global
- * port mask (value read is 0), default to the number of ports originally
- * supported by the cxlflash driver (2) before hardware with other port
- * offerings was introduced.
- */
-static void get_num_afu_ports(struct cxlflash_cfg *cfg)
-{
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- u64 port_mask;
- int num_fc_ports = LEGACY_FC_PORTS;
-
- port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
- if (port_mask != 0ULL)
- num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
-
- dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
- __func__, port_mask, num_fc_ports);
-
- cfg->num_fc_ports = num_fc_ports;
- cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
-}
-
-/**
- * init_afu() - setup as master context and start AFU
- * @cfg: Internal structure associated with the host.
- *
- * This routine is a higher level of control for configuring the
- * AFU on probe and reset paths.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_afu(struct cxlflash_cfg *cfg)
-{
- u64 reg;
- int rc = 0;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq;
- int i;
-
- cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
-
- mutex_init(&afu->sync_active);
- afu->num_hwqs = afu->desired_hwqs;
- for (i = 0; i < afu->num_hwqs; i++) {
- rc = init_mc(cfg, i);
- if (rc) {
- dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
- __func__, rc, i);
- goto err1;
- }
- }
-
- /* Map the entire MMIO space of the AFU using the first context */
- hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
- if (!afu->afu_map) {
- dev_err(dev, "%s: psa_map failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- /* No byte reverse on reading afu_version or string will be backwards */
- reg = readq(&afu->afu_map->global.regs.afu_version);
- memcpy(afu->version, &reg, sizeof(reg));
- afu->interface_version =
- readq_be(&afu->afu_map->global.regs.interface_version);
- if ((afu->interface_version + 1) == 0) {
- dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
- "interface version %016llx\n", afu->version,
- afu->interface_version);
- rc = -EINVAL;
- goto err1;
- }
-
- if (afu_is_sq_cmd_mode(afu)) {
- afu->send_cmd = send_cmd_sq;
- afu->context_reset = context_reset_sq;
- } else {
- afu->send_cmd = send_cmd_ioarrin;
- afu->context_reset = context_reset_ioarrin;
- }
-
- dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
- afu->version, afu->interface_version);
-
- get_num_afu_ports(cfg);
-
- rc = start_afu(cfg);
- if (rc) {
- dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
- goto err1;
- }
-
- afu_err_intr_init(cfg->afu);
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- hwq->room = readq_be(&hwq->host_map->cmd_room);
- }
-
- /* Restore the LUN mappings */
- cxlflash_restore_luntable(cfg);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err1:
- for (i = afu->num_hwqs - 1; i >= 0; i--) {
- term_intr(cfg, UNMAP_THREE, i);
- term_mc(cfg, i);
- }
- goto out;
-}
-
-/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
-
- rc = init_afu(cfg);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
- *
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
- */
-static void drain_ioctls(struct cxlflash_cfg *cfg)
-{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
-}
-
-/**
- * cxlflash_async_reset_host() - asynchronous host reset handler
- * @data: Private data provided while scheduling reset.
- * @cookie: Cookie that can be used for checkpointing.
- */
-static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
-{
- struct cxlflash_cfg *cfg = data;
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
- if (cfg->state != STATE_RESET) {
- dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
-
-out:
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
- * @cfg: Internal structure associated with the host.
- */
-static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Not performing reset state=%d\n",
- __func__, cfg->state);
- return;
- }
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
- cfg);
-}
-
-/**
- * send_afu_cmd() - builds and sends an internal AFU command
- * @afu: AFU associated with the host.
- * @rcb: Pre-populated IOARCB describing command to send.
- *
- * The AFU can only take one internal AFU command at a time. This limitation is
- * enforced by using a mutex to provide exclusive access to the AFU during the
- * operation. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent AFU operations.
- *
- * The command status is optionally passed back to the caller when the caller
- * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
- *
- * Return:
- * 0 on success, -errno on failure
- */
-static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = NULL;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- ulong lock_flags;
- char *buf = NULL;
- int rc = 0;
- int nretry = 0;
-
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Sync not required state=%u\n",
- __func__, cfg->state);
- return 0;
- }
-
- mutex_lock(&afu->sync_active);
- atomic_inc(&afu->cmds_active);
- buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
-
-retry:
- memset(cmd, 0, sizeof(*cmd));
- memcpy(&cmd->rcb, rcb, sizeof(*rcb));
- INIT_LIST_HEAD(&cmd->queue);
- init_completion(&cmd->cevent);
- cmd->parent = afu;
- cmd->hwq_index = hwq->index;
- cmd->rcb.ctx_id = hwq->ctx_hndl;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
- __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
-
- rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc)) {
- rc = -ENOBUFS;
- goto out;
- }
-
- rc = wait_resp(afu, cmd);
- switch (rc) {
- case -ETIMEDOUT:
- rc = afu->context_reset(hwq);
- if (rc) {
- /* Delete the command from pending_cmds list */
- spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
- list_del(&cmd->list);
- spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
-
- cxlflash_schedule_async_reset(cfg);
- break;
- }
- fallthrough; /* to retry */
- case -EAGAIN:
- if (++nretry < 2)
- goto retry;
- fallthrough; /* to exit */
- default:
- break;
- }
-
- if (rcb->ioasa)
- *rcb->ioasa = cmd->sa;
-out:
- atomic_dec(&afu->cmds_active);
- mutex_unlock(&afu->sync_active);
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
- * @afu: AFU associated with the host.
- * @ctx: Identifies context requesting sync.
- * @res: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
- *
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
- *
- * Return:
- * 0 on success, -errno on failure
- */
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb = { 0 };
-
- dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
- __func__, afu, ctx, res, mode);
-
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- rcb.cdb[0] = SISL_AFU_CMD_SYNC;
- rcb.cdb[1] = mode;
- put_unaligned_be16(ctx, &rcb.cdb[2]);
- put_unaligned_be32(res, &rcb.cdb[4]);
-
- return send_afu_cmd(afu, &rcb);
-}
-
-/**
- * cxlflash_eh_abort_handler() - abort a SCSI command
- * @scp: SCSI command to abort.
- *
- * CXL Flash devices do not support a single command abort. Reset the context
- * as per SISLite specification. Flush any pending commands in the hardware
- * queue before the reset.
- *
- * Return: SUCCESS/FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
-{
- int rc = FAILED;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afuc(scp);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
- /* When the state is not normal, another reset/reload is in progress.
- * Return failed and the mid-layer will invoke host reset handler.
- */
- if (cfg->state != STATE_NORMAL) {
- dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
- __func__, cfg->state);
- goto out;
- }
-
- rc = afu->context_reset(hwq);
- if (unlikely(rc))
- goto out;
-
- rc = SUCCESS;
-
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp: SCSI command to send.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- struct scsi_device *sdev = scp->device;
- struct Scsi_Host *host = sdev->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
- int rcr = 0;
-
- dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
- host->host_no, sdev->channel, sdev->id, sdev->lun);
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
- if (unlikely(rcr))
- rc = FAILED;
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- goto retry;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp: SCSI command from stack identifying host.
- *
- * Following a reset, the state is evaluated again in case an EEH occurred
- * during the reset. In such a scenario, the host reset will either yield
- * until the EEH recovery is complete or return success or failure based
- * upon the current device state.
- *
- * Return:
- * SUCCESS as defined in scsi/scsi.h
- * FAILED as defined in scsi/scsi.h
- */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
-{
- int rc = SUCCESS;
- int rcr = 0;
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
-
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rcr = afu_reset(cfg);
- if (rcr) {
- rc = FAILED;
- cfg->state = STATE_FAILTERM;
- } else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- fallthrough;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- break;
- fallthrough;
- default:
- rc = FAILED;
- break;
- }
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev: SCSI device destined for queue depth change.
- * @qdepth: Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
- if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
- qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
- scsi_change_queue_depth(sdev, qdepth);
- return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_status(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- char *disp_status;
- u64 status;
- __be64 __iomem *fc_port_regs;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_regs = get_fc_port_regs(cfg, port);
- status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
- status &= FC_MTIP_STATUS_MASK;
-
- if (status == FC_MTIP_STATUS_ONLINE)
- disp_status = "online";
- else if (status == FC_MTIP_STATUS_OFFLINE)
- disp_status = "offline";
- else
- disp_status = "unknown";
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * port0_show() - queries and presents the current status of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(0, cfg, buf);
-}
-
-/**
- * port1_show() - queries and presents the current status of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(1, cfg, buf);
-}
-
-/**
- * port2_show() - queries and presents the current status of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(2, cfg, buf);
-}
-
-/**
- * port3_show() - queries and presents the current status of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_status(3, cfg, buf);
-}
-
-/**
- * lun_mode_show() - presents the current LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * lun_mode_store() - sets the LUN mode of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the LUN mode.
- * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t lun_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct afu *afu = cfg->afu;
- int rc;
- u32 lun_mode;
-
- rc = kstrtouint(buf, 10, &lun_mode);
- if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
- afu->internal_lun = lun_mode;
-
- /*
- * When configured for internal LUN, there is only one channel,
- * channel number 0, else there will be one less than the number
- * of fc ports for this card.
- */
- if (afu->internal_lun)
- shost->max_channel = 0;
- else
- shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
-
- afu_reset(cfg);
- scsi_scan_host(cfg->host);
- }
-
- return count;
-}
-
-/**
- * ioctl_version_show() - presents the current ioctl version of the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the ioctl version.
- * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t ioctl_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- ssize_t bytes = 0;
-
- bytes = scnprintf(buf, PAGE_SIZE,
- "disk: %u\n", DK_CXLFLASH_VERSION_0);
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "host: %u\n", HT_CXLFLASH_VERSION_0);
-
- return bytes;
-}
-
-/**
- * cxlflash_show_port_lun_table() - queries and presents the port LUN table
- * @port: Desired port for status reporting.
- * @cfg: Internal structure associated with the host.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf or -EINVAL.
- */
-static ssize_t cxlflash_show_port_lun_table(u32 port,
- struct cxlflash_cfg *cfg,
- char *buf)
-{
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
- int i;
- ssize_t bytes = 0;
-
- WARN_ON(port >= MAX_FC_PORTS);
-
- if (port >= cfg->num_fc_ports) {
- dev_info(dev, "%s: Port %d not supported on this card.\n",
- __func__, port);
- return -EINVAL;
- }
-
- fc_port_luns = get_fc_port_luns(cfg, port);
-
- for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
- bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
- "%03d: %016llx\n",
- i, readq_be(&fc_port_luns[i]));
- return bytes;
-}
-
-/**
- * port0_lun_table_show() - presents the current LUN table of port 0
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port0_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(0, cfg, buf);
-}
-
-/**
- * port1_lun_table_show() - presents the current LUN table of port 1
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port1_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(1, cfg, buf);
-}
-
-/**
- * port2_lun_table_show() - presents the current LUN table of port 2
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port2_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(2, cfg, buf);
-}
-
-/**
- * port3_lun_table_show() - presents the current LUN table of port 3
- * @dev: Generic device associated with the host owning the port.
- * @attr: Device attribute representing the port.
- * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t port3_lun_table_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
-
- return cxlflash_show_port_lun_table(3, cfg, buf);
-}
-
-/**
- * irqpoll_weight_show() - presents the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll
- * weight in ASCII.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
-}
-
-/**
- * irqpoll_weight_store() - sets the current IRQ poll weight for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the IRQ poll weight.
- * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll
- * weight in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * An IRQ poll weight of 0 indicates polling is disabled.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t irqpoll_weight_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct hwq *hwq;
- u32 weight;
- int rc, i;
-
- rc = kstrtouint(buf, 10, &weight);
- if (rc)
- return -EINVAL;
-
- if (weight > 256) {
- dev_info(cfgdev,
- "Invalid IRQ poll weight. It must be 256 or less.\n");
- return -EINVAL;
- }
-
- if (weight == afu->irqpoll_weight) {
- dev_info(cfgdev,
- "Current IRQ poll weight has the same weight.\n");
- return -EINVAL;
- }
-
- if (afu_is_irqpoll_enabled(afu)) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_disable(&hwq->irqpoll);
- }
- }
-
- afu->irqpoll_weight = weight;
-
- if (weight > 0) {
- for (i = 0; i < afu->num_hwqs; i++) {
- hwq = get_hwq(afu, i);
-
- irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
- }
- }
-
- return count;
-}
-
-/**
- * num_hwqs_show() - presents the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE to report back the number of hardware
- * queues in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
-}
-
-/**
- * num_hwqs_store() - sets the number of hardware queues for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the number of hardware queues.
- * @buf: Buffer of length PAGE_SIZE containing the number of hardware
- * queues in ASCII.
- * @count: Length of data resizing in @buf.
- *
- * n > 0: num_hwqs = n
- * n = 0: num_hwqs = num_online_cpus()
- * n < 0: num_online_cpus() / abs(n)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t num_hwqs_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
- int rc;
- int nhwqs, num_hwqs;
-
- rc = kstrtoint(buf, 10, &nhwqs);
- if (rc)
- return -EINVAL;
-
- if (nhwqs >= 1)
- num_hwqs = nhwqs;
- else if (nhwqs == 0)
- num_hwqs = num_online_cpus();
- else
- num_hwqs = num_online_cpus() / abs(nhwqs);
-
- afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
- WARN_ON_ONCE(afu->desired_hwqs == 0);
-
-retry:
- switch (cfg->state) {
- case STATE_NORMAL:
- cfg->state = STATE_RESET;
- drain_ioctls(cfg);
- cxlflash_mark_contexts_error(cfg);
- rc = afu_reset(cfg);
- if (rc)
- cfg->state = STATE_FAILTERM;
- else
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- break;
- case STATE_RESET:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
- if (cfg->state == STATE_NORMAL)
- goto retry;
- fallthrough;
- default:
- /* Ideally should not happen */
- dev_err(dev, "%s: Device is not ready, state=%d\n",
- __func__, cfg->state);
- break;
- }
-
- return count;
-}
-
-static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
-
-/**
- * hwq_mode_show() - presents the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode
- * as a character string.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
- struct afu *afu = cfg->afu;
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
-}
-
-/**
- * hwq_mode_store() - sets the HWQ steering mode for the host
- * @dev: Generic device associated with the host.
- * @attr: Device attribute representing the HWQ steering mode.
- * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode
- * as a character string.
- * @count: Length of data resizing in @buf.
- *
- * rr = Round-Robin
- * tag = Block MQ Tagging
- * cpu = CPU Affinity
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t hwq_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct cxlflash_cfg *cfg = shost_priv(shost);
- struct device *cfgdev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- int i;
- u32 mode = MAX_HWQ_MODE;
-
- for (i = 0; i < MAX_HWQ_MODE; i++) {
- if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
- mode = i;
- break;
- }
- }
-
- if (mode >= MAX_HWQ_MODE) {
- dev_info(cfgdev, "Invalid HWQ steering mode.\n");
- return -EINVAL;
- }
-
- afu->hwq_mode = mode;
-
- return count;
-}
-
-/**
- * mode_show() - presents the current mode of the device
- * @dev: Generic device associated with the device.
- * @attr: Device attribute representing the device mode.
- * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n",
- sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/*
- * Host attributes
- */
-static DEVICE_ATTR_RO(port0);
-static DEVICE_ATTR_RO(port1);
-static DEVICE_ATTR_RO(port2);
-static DEVICE_ATTR_RO(port3);
-static DEVICE_ATTR_RW(lun_mode);
-static DEVICE_ATTR_RO(ioctl_version);
-static DEVICE_ATTR_RO(port0_lun_table);
-static DEVICE_ATTR_RO(port1_lun_table);
-static DEVICE_ATTR_RO(port2_lun_table);
-static DEVICE_ATTR_RO(port3_lun_table);
-static DEVICE_ATTR_RW(irqpoll_weight);
-static DEVICE_ATTR_RW(num_hwqs);
-static DEVICE_ATTR_RW(hwq_mode);
-
-static struct attribute *cxlflash_host_attrs[] = {
- &dev_attr_port0.attr,
- &dev_attr_port1.attr,
- &dev_attr_port2.attr,
- &dev_attr_port3.attr,
- &dev_attr_lun_mode.attr,
- &dev_attr_ioctl_version.attr,
- &dev_attr_port0_lun_table.attr,
- &dev_attr_port1_lun_table.attr,
- &dev_attr_port2_lun_table.attr,
- &dev_attr_port3_lun_table.attr,
- &dev_attr_irqpoll_weight.attr,
- &dev_attr_num_hwqs.attr,
- &dev_attr_hwq_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_host);
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR_RO(mode);
-
-static struct attribute *cxlflash_dev_attrs[] = {
- &dev_attr_mode.attr,
- NULL
-};
-
-ATTRIBUTE_GROUPS(cxlflash_dev);
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
- .module = THIS_MODULE,
- .name = CXLFLASH_ADAPTER_NAME,
- .info = cxlflash_driver_info,
- .ioctl = cxlflash_ioctl,
- .proc_name = CXLFLASH_NAME,
- .queuecommand = cxlflash_queuecommand,
- .eh_abort_handler = cxlflash_eh_abort_handler,
- .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
- .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
- .change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
- .can_queue = CXLFLASH_MAX_CMDS,
- .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
- .this_id = -1,
- .sg_tablesize = 1, /* No scatter gather support */
- .max_sectors = CXLFLASH_MAX_SECTORS,
- .shost_groups = cxlflash_host_groups,
- .sdev_groups = cxlflash_dev_groups,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_WWPN_VPD_REQUIRED };
-static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
- CXLFLASH_NOTIFY_SHUTDOWN };
-static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
- (CXLFLASH_NOTIFY_SHUTDOWN |
- CXLFLASH_OCXL_DEV) };
-
-/*
- * PCI device binding table
- */
-static const struct pci_device_id cxlflash_pci_table[] = {
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
- {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
- {}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
-/**
- * cxlflash_worker_thread() - work thread handler for the AFU
- * @work: Work structure contained within cxlflash associated with host.
- *
- * Handles the following events:
- * - Link reset which cannot be performed on interrupt context due to
- * blocking up to a few seconds
- * - Rescan the host
- */
-static void cxlflash_worker_thread(struct work_struct *work)
-{
- struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
- work_q);
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_regs;
- int port;
- ulong lock_flags;
-
- /* Avoid MMIO if the device has failed */
-
- if (cfg->state != STATE_NORMAL)
- return;
-
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
-
- if (cfg->lr_state == LINK_RESET_REQUIRED) {
- port = cfg->lr_port;
- if (port < 0)
- dev_err(dev, "%s: invalid port index %d\n",
- __func__, port);
- else {
- spin_unlock_irqrestore(cfg->host->host_lock,
- lock_flags);
-
- /* The reset can block... */
- fc_port_regs = get_fc_port_regs(cfg, port);
- afu_link_reset(afu, port, fc_port_regs);
- spin_lock_irqsave(cfg->host->host_lock, lock_flags);
- }
-
- cfg->lr_state = LINK_RESET_COMPLETE;
- }
-
- spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
-
- if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
- scsi_scan_host(cfg->host);
-}
-
-/**
- * cxlflash_chr_open() - character device open handler
- * @inode: Device inode associated with this character device.
- * @file: File pointer for this device.
- *
- * Only users with admin privileges are allowed to open the character device.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_chr_open(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
- file->private_data = cfg;
-
- return 0;
-}
-
-/**
- * decode_hioctl() - translates encoded host ioctl to easily identifiable string
- * @cmd: The host ioctl command to decode.
- *
- * Return: A string identifying the decoded host ioctl.
- */
-static char *decode_hioctl(unsigned int cmd)
-{
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_lun_provision() - host LUN provisioning handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_lun_provision *lunprov = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- __be64 __iomem *fc_port_regs;
- u16 port = lunprov->port;
- u16 scmd = lunprov->hdr.subcmd;
- u16 type;
- u64 reg;
- u64 size;
- u64 lun_id;
- int rc = 0;
-
- if (!afu_is_lun_provision(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (port >= cfg->num_fc_ports) {
- rc = -EINVAL;
- goto out;
- }
-
- switch (scmd) {
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
- type = SISL_AFU_LUN_PROVISION_CREATE;
- size = lunprov->size;
- lun_id = 0;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
- type = SISL_AFU_LUN_PROVISION_DELETE;
- size = 0;
- lun_id = lunprov->lun_id;
- break;
- case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
- fc_port_regs = get_fc_port_regs(cfg, port);
-
- reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
- lunprov->max_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
- lunprov->cur_num_luns = reg;
- reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
- lunprov->max_cap_port = reg;
- reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
- lunprov->cur_cap_port = reg;
-
- goto out;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
- rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
- rcb.lun_id = lun_id;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_LUN_PROV_TIMEOUT;
- rcb.ioasa = &asa;
-
- rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
- rcb.cdb[1] = type;
- rcb.cdb[2] = port;
- put_unaligned_be64(size, &rcb.cdb[8]);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
- lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
- memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
- }
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_afu_debug() - host AFU debug handler
- * @cfg: Internal structure associated with the host.
- * @arg: Kernel copy of userspace ioctl data structure.
- *
- * For debug requests requiring a data buffer, always provide an aligned
- * (cache line) buffer to the AFU to appease any alignment requirements.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, void *arg)
-{
- struct ht_cxlflash_afu_debug *afu_dbg = arg;
- struct afu *afu = cfg->afu;
- struct device *dev = &cfg->dev->dev;
- struct sisl_ioarcb rcb;
- struct sisl_ioasa asa;
- char *buf = NULL;
- char *kbuf = NULL;
- void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
- u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
- u32 ulen = afu_dbg->data_len;
- bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
- int rc = 0;
-
- if (!afu_is_afu_debug(afu)) {
- rc = -ENOTSUPP;
- goto out;
- }
-
- if (ulen) {
- req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
-
- if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
- rc = -EINVAL;
- goto out;
- }
-
- buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
- if (unlikely(!buf)) {
- rc = -ENOMEM;
- goto out;
- }
-
- kbuf = PTR_ALIGN(buf, cache_line_size());
-
- if (is_write) {
- req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
-
- if (copy_from_user(kbuf, ubuf, ulen)) {
- rc = -EFAULT;
- goto out;
- }
- }
- }
-
- memset(&rcb, 0, sizeof(rcb));
- memset(&asa, 0, sizeof(asa));
-
- rcb.req_flags = req_flags;
- rcb.msi = SISL_MSI_RRQ_UPDATED;
- rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
- rcb.ioasa = &asa;
-
- if (ulen) {
- rcb.data_len = ulen;
- rcb.data_ea = (uintptr_t)kbuf;
- }
-
- rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
- memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
- HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
-
- rc = send_afu_cmd(afu, &rcb);
- if (rc) {
- dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
- __func__, rc, asa.ioasc, asa.afu_extra);
- goto out;
- }
-
- if (ulen && !is_write) {
- if (copy_to_user(ubuf, kbuf, ulen))
- rc = -EFAULT;
- }
-out:
- kfree(buf);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_chr_ioctl() - character device IOCTL handler
- * @file: File pointer for this device.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- typedef int (*hioctl) (struct cxlflash_cfg *, void *);
-
- struct cxlflash_cfg *cfg = file->private_data;
- struct device *dev = &cfg->dev->dev;
- char buf[sizeof(union cxlflash_ht_ioctls)];
- void __user *uarg = (void __user *)arg;
- struct ht_cxlflash_hdr *hdr;
- size_t size = 0;
- bool known_ioctl = false;
- int idx = 0;
- int rc = 0;
- hioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- hioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- { sizeof(struct ht_cxlflash_lun_provision), cxlflash_lun_provision },
- { sizeof(struct ht_cxlflash_afu_debug), cxlflash_afu_debug },
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
- __func__, cmd, idx, sizeof(ioctl_tbl));
-
- switch (cmd) {
- case HT_CXLFLASH_LUN_PROVISION:
- case HT_CXLFLASH_AFU_DEBUG:
- known_ioctl = true;
- idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(copy_from_user(&buf, uarg, size))) {
- dev_err(dev, "%s: copy_from_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- goto out;
- }
-
- hdr = (struct ht_cxlflash_hdr *)&buf;
- if (hdr->version != HT_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_hioctl(cmd));
- rc = -EINVAL;
- goto out;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = do_ioctl(cfg, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(uarg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail "
- "size=%lu cmd=%d (%s) uarg=%p\n",
- __func__, size, cmd, decode_hioctl(cmd), uarg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-out:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
- __func__, decode_hioctl(cmd), cmd, rc);
- return rc;
-}
-
-/*
- * Character device file operations
- */
-static const struct file_operations cxlflash_chr_fops = {
- .owner = THIS_MODULE,
- .open = cxlflash_chr_open,
- .unlocked_ioctl = cxlflash_chr_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
-};
-
-/**
- * init_chrdev() - initialize the character device for the host
- * @cfg: Internal structure associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_chrdev(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct device *char_dev;
- dev_t devno;
- int minor;
- int rc = 0;
-
- minor = cxlflash_get_minor();
- if (unlikely(minor < 0)) {
- dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
- rc = -ENOSPC;
- goto out;
- }
-
- devno = MKDEV(cxlflash_major, minor);
- cdev_init(&cfg->cdev, &cxlflash_chr_fops);
-
- rc = cdev_add(&cfg->cdev, devno, 1);
- if (rc) {
- dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- char_dev = device_create(&cxlflash_class, NULL, devno,
- NULL, "cxlflash%d", minor);
- if (IS_ERR(char_dev)) {
- rc = PTR_ERR(char_dev);
- dev_err(dev, "%s: device_create failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- cfg->chardev = char_dev;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- cdev_del(&cfg->cdev);
-err1:
- cxlflash_put_minor(minor);
- goto out;
-}
-
-/**
- * cxlflash_probe() - PCI entry point to add host
- * @pdev: PCI device associated with the host.
- * @dev_id: PCI device id associated with device.
- *
- * The device will initially start out in a 'probing' state and
- * transition to the 'normal' state at the end of a successful
- * probe. Should an EEH event occur during probe, the notification
- * thread (error_detected()) will wait until the probe handler
- * is nearly complete. At that time, the device will be moved to
- * a 'probed' state and the EEH thread woken up to drive the slot
- * reset and recovery (device moves to 'normal' state). Meanwhile,
- * the probe will be allowed to exit successfully.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
-{
- struct Scsi_Host *host;
- struct cxlflash_cfg *cfg = NULL;
- struct device *dev = &pdev->dev;
- struct dev_dependent_vals *ddv;
- int rc = 0;
- int k;
-
- dev_err_once(&pdev->dev, "DEPRECATION: cxlflash is deprecated and will be removed in a future kernel release\n");
-
- dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
- __func__, pdev->irq);
-
- ddv = (struct dev_dependent_vals *)dev_id->driver_data;
- driver_template.max_sectors = ddv->max_sectors;
-
- host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
- if (!host) {
- dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
- host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
- host->unique_id = host->host_no;
- host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
-
- cfg = shost_priv(host);
- cfg->state = STATE_PROBING;
- cfg->host = host;
- rc = alloc_mem(cfg);
- if (rc) {
- dev_err(dev, "%s: alloc_mem failed\n", __func__);
- rc = -ENOMEM;
- scsi_host_put(cfg->host);
- goto out;
- }
-
- cfg->init_state = INIT_STATE_NONE;
- cfg->dev = pdev;
- cfg->cxl_fops = cxlflash_cxl_fops;
- cfg->ops = cxlflash_assign_ops(ddv);
- WARN_ON_ONCE(!cfg->ops);
-
- /*
- * Promoted LUNs move to the top of the LUN table. The rest stay on
- * the bottom half. The bottom half grows from the end (index = 255),
- * whereas the top half grows from the beginning (index = 0).
- *
- * Initialize the last LUN index for all possible ports.
- */
- cfg->promote_lun_index = 0;
-
- for (k = 0; k < MAX_FC_PORTS; k++)
- cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
-
- cfg->dev_id = (struct pci_device_id *)dev_id;
-
- init_waitqueue_head(&cfg->tmf_waitq);
- init_waitqueue_head(&cfg->reset_waitq);
-
- INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
- cfg->lr_state = LINK_RESET_INVALID;
- cfg->lr_port = -1;
- spin_lock_init(&cfg->tmf_slock);
- mutex_init(&cfg->ctx_tbl_list_mutex);
- mutex_init(&cfg->ctx_recovery_mutex);
- init_rwsem(&cfg->ioctl_rwsem);
- INIT_LIST_HEAD(&cfg->ctx_err_recovery);
- INIT_LIST_HEAD(&cfg->lluns);
-
- pci_set_drvdata(pdev, cfg);
-
- rc = init_pci(cfg);
- if (rc) {
- dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_PCI;
-
- cfg->afu_cookie = cfg->ops->create_afu(pdev);
- if (unlikely(!cfg->afu_cookie)) {
- dev_err(dev, "%s: create_afu failed\n", __func__);
- rc = -ENOMEM;
- goto out_remove;
- }
-
- rc = init_afu(cfg);
- if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
- dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_AFU;
-
- rc = init_scsi(cfg);
- if (rc) {
- dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_SCSI;
-
- rc = init_chrdev(cfg);
- if (rc) {
- dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
- goto out_remove;
- }
- cfg->init_state = INIT_STATE_CDEV;
-
- if (wq_has_sleeper(&cfg->reset_waitq)) {
- cfg->state = STATE_PROBED;
- wake_up_all(&cfg->reset_waitq);
- } else
- cfg->state = STATE_NORMAL;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-out_remove:
- cfg->state = STATE_PROBED;
- cxlflash_remove(pdev);
- goto out;
-}
-
-/**
- * cxlflash_pci_error_detected() - called when a PCI error is detected
- * @pdev: PCI device struct.
- * @state: PCI channel state.
- *
- * When an EEH occurs during an active reset, wait until the reset is
- * complete and then take action based upon the device state.
- *
- * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
-
- switch (state) {
- case pci_channel_io_frozen:
- wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
- cfg->state != STATE_PROBING);
- if (cfg->state == STATE_FAILTERM)
- return PCI_ERS_RESULT_DISCONNECT;
-
- cfg->state = STATE_RESET;
- scsi_block_requests(cfg->host);
- drain_ioctls(cfg);
- rc = cxlflash_mark_contexts_error(cfg);
- if (unlikely(rc))
- dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
- __func__, rc);
- term_afu(cfg);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- cfg->state = STATE_FAILTERM;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
- return PCI_ERS_RESULT_DISCONNECT;
- default:
- break;
- }
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * cxlflash_pci_slot_reset() - called when PCI slot has been reset
- * @pdev: PCI device struct.
- *
- * This routine is called by the pci error recovery code after the PCI
- * slot has been reset, just before we should resume normal operations.
- *
- * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
- */
-static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
-{
- int rc = 0;
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- rc = init_afu(cfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * cxlflash_pci_resume() - called when normal operation can resume
- * @pdev: PCI device struct
- */
-static void cxlflash_pci_resume(struct pci_dev *pdev)
-{
- struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
- struct device *dev = &cfg->dev->dev;
-
- dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
-
- cfg->state = STATE_NORMAL;
- wake_up_all(&cfg->reset_waitq);
- scsi_unblock_requests(cfg->host);
-}
-
-/**
- * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
- * @dev: Character device.
- * @mode: Mode that can be used to verify access.
- *
- * Return: Allocated string describing the devtmpfs structure.
- */
-static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
-}
-
-/**
- * cxlflash_class_init() - create character device class
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_class_init(void)
-{
- dev_t devno;
- int rc = 0;
-
- rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
- if (unlikely(rc)) {
- pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- cxlflash_major = MAJOR(devno);
-
- rc = class_register(&cxlflash_class);
- if (rc) {
- pr_err("%s: class_create failed rc=%d\n", __func__, rc);
- goto err;
- }
-
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
- goto out;
-}
-
-/**
- * cxlflash_class_exit() - destroy character device class
- */
-static void cxlflash_class_exit(void)
-{
- dev_t devno = MKDEV(cxlflash_major, 0);
-
- class_unregister(&cxlflash_class);
- unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
-}
-
-static const struct pci_error_handlers cxlflash_err_handler = {
- .error_detected = cxlflash_pci_error_detected,
- .slot_reset = cxlflash_pci_slot_reset,
- .resume = cxlflash_pci_resume,
-};
-
-/*
- * PCI device structure
- */
-static struct pci_driver cxlflash_driver = {
- .name = CXLFLASH_NAME,
- .id_table = cxlflash_pci_table,
- .probe = cxlflash_probe,
- .remove = cxlflash_remove,
- .shutdown = cxlflash_remove,
- .err_handler = &cxlflash_err_handler,
-};
-
-/**
- * init_cxlflash() - module entry point
- *
- * Return: 0 on success, -errno on failure
- */
-static int __init init_cxlflash(void)
-{
- int rc;
-
- check_sizes();
- cxlflash_list_init();
- rc = cxlflash_class_init();
- if (unlikely(rc))
- goto out;
-
- rc = pci_register_driver(&cxlflash_driver);
- if (unlikely(rc))
- goto err;
-out:
- pr_debug("%s: returning rc=%d\n", __func__, rc);
- return rc;
-err:
- cxlflash_class_exit();
- goto out;
-}
-
-/**
- * exit_cxlflash() - module exit point
- */
-static void __exit exit_cxlflash(void)
-{
- cxlflash_term_global_luns();
- cxlflash_free_errpage();
-
- pci_unregister_driver(&cxlflash_driver);
- cxlflash_class_exit();
-}
-
-module_init(init_cxlflash);
-module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
deleted file mode 100644
index 0bfb98effce0..000000000000
--- a/drivers/scsi/cxlflash/main.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_MAIN_H
-#define _CXLFLASH_MAIN_H
-
-#include <linux/list.h>
-#include <linux/types.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-
-#include "backend.h"
-
-#define CXLFLASH_NAME "cxlflash"
-#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
-#define CXLFLASH_MAX_ADAPTERS 32
-
-#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
-#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
-#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
-
-/* Since there is only one target, make it 0 */
-#define CXLFLASH_TARGET 0
-#define CXLFLASH_MAX_CDB_LEN 16
-
-/* Really only one target per bus since the Texan is directly attached */
-#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
-#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
-
-#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
-
-/* FC defines */
-#define FC_MTIP_CMDCONFIG 0x010
-#define FC_MTIP_STATUS 0x018
-#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
-#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
-#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
-#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
-
-#define FC_PNAME 0x300
-#define FC_CONFIG 0x320
-#define FC_CONFIG2 0x328
-#define FC_STATUS 0x330
-#define FC_ERROR 0x380
-#define FC_ERRCAP 0x388
-#define FC_ERRMSK 0x390
-#define FC_CNT_CRCERR 0x538
-#define FC_CRC_THRESH 0x580
-
-#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
-#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
-
-#define FC_MTIP_STATUS_MASK 0x30ULL
-#define FC_MTIP_STATUS_ONLINE 0x20ULL
-#define FC_MTIP_STATUS_OFFLINE 0x10ULL
-
-/* TIMEOUT and RETRY definitions */
-
-/* AFU command timeout values */
-#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
-#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
-#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
-
-/* AFU command room retry limit */
-#define MC_ROOM_RETRY_CNT 10
-
-/* FC CRC clear periodic timer */
-#define MC_CRC_THRESH 100 /* threshold in 5 mins */
-
-#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
-#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
-
-/* VPD defines */
-#define CXLFLASH_VPD_LEN 256
-#define WWPN_LEN 16
-#define WWPN_BUF_LEN (WWPN_LEN + 1)
-
-enum undo_level {
- UNDO_NOOP = 0,
- FREE_IRQ,
- UNMAP_ONE,
- UNMAP_TWO,
- UNMAP_THREE
-};
-
-struct dev_dependent_vals {
- u64 max_sectors;
- u64 flags;
-#define CXLFLASH_NOTIFY_SHUTDOWN 0x0000000000000001ULL
-#define CXLFLASH_WWPN_VPD_REQUIRED 0x0000000000000002ULL
-#define CXLFLASH_OCXL_DEV 0x0000000000000004ULL
-};
-
-static inline const struct cxlflash_backend_ops *
-cxlflash_assign_ops(struct dev_dependent_vals *ddv)
-{
- const struct cxlflash_backend_ops *ops = NULL;
-
-#ifdef CONFIG_OCXL_BASE
- if (ddv->flags & CXLFLASH_OCXL_DEV)
- ops = &cxlflash_ocxl_ops;
-#endif
-
-#ifdef CONFIG_CXL_BASE
- if (!(ddv->flags & CXLFLASH_OCXL_DEV))
- ops = &cxlflash_cxl_ops;
-#endif
-
- return ops;
-}
-
-struct asyc_intr_info {
- u64 status;
- char *desc;
- u8 port;
- u8 action;
-#define CLR_FC_ERROR 0x01
-#define LINK_RESET 0x02
-#define SCAN_HOST 0x04
-};
-
-#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
deleted file mode 100644
index 6542818e595a..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ /dev/null
@@ -1,1399 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#include <linux/file.h>
-#include <linux/idr.h>
-#include <linux/module.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/poll.h>
-#include <linux/sched/signal.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <asm/xive.h>
-#include <misc/ocxl.h>
-
-#include <uapi/misc/cxl.h>
-
-#include "backend.h"
-#include "ocxl_hw.h"
-
-/*
- * Pseudo-filesystem to allocate inodes.
- */
-
-#define OCXLFLASH_FS_MAGIC 0x1697698f
-
-static int ocxlflash_fs_cnt;
-static struct vfsmount *ocxlflash_vfs_mount;
-
-static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type ocxlflash_fs_type = {
- .name = "ocxlflash",
- .owner = THIS_MODULE,
- .init_fs_context = ocxlflash_fs_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
-/*
- * ocxlflash_release_mapping() - release the memory mapping
- * @ctx: Context whose mapping is to be released.
- */
-static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
-{
- if (ctx->mapping)
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
- ctx->mapping = NULL;
-}
-
-/*
- * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
- * @dev: Generic device of the host.
- * @name: Name of the pseudo filesystem.
- * @fops: File operations.
- * @priv: Private data.
- * @flags: Flags for the file.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_getfile(struct device *dev, const char *name,
- const struct file_operations *fops,
- void *priv, int flags)
-{
- struct file *file;
- struct inode *inode;
- int rc;
-
- if (fops->owner && !try_module_get(fops->owner)) {
- dev_err(dev, "%s: Owner does not exist\n", __func__);
- rc = -ENOENT;
- goto err1;
- }
-
- rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
- &ocxlflash_fs_cnt);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
- if (IS_ERR(inode)) {
- rc = PTR_ERR(inode);
- dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
- flags & (O_ACCMODE | O_NONBLOCK), fops);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: alloc_file failed rc=%d\n",
- __func__, rc);
- goto err4;
- }
-
- file->private_data = priv;
-out:
- return file;
-err4:
- iput(inode);
-err3:
- simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
-err2:
- module_put(fops->owner);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_psa_map() - map the process specific MMIO space
- * @ctx_cookie: Adapter context for which the mapping needs to be done.
- *
- * Return: MMIO pointer of the mapped region
- */
-static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return NULL;
- }
- mutex_unlock(&ctx->state_mutex);
-
- return ioremap(ctx->psn_phys, ctx->psn_size);
-}
-
-/**
- * ocxlflash_psa_unmap() - unmap the process specific MMIO space
- * @addr: MMIO pointer to unmap.
- */
-static void ocxlflash_psa_unmap(void __iomem *addr)
-{
- iounmap(addr);
-}
-
-/**
- * ocxlflash_process_element() - get process element of the adapter context
- * @ctx_cookie: Adapter context associated with the process element.
- *
- * Return: process element of the adapter context
- */
-static int ocxlflash_process_element(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return ctx->pe;
-}
-
-/**
- * afu_map_irq() - map the interrupt of the adapter context
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- irq_handler_t handler, void *cookie, char *name)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
- struct xive_irq_data *xd;
- u32 virq;
- int rc = 0;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- rc = -ENOENT;
- goto out;
- }
-
- irq = &ctx->irqs[num];
- virq = irq_create_mapping(NULL, irq->hwirq);
- if (unlikely(!virq)) {
- dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- rc = request_irq(virq, handler, 0, name, cookie);
- if (unlikely(rc)) {
- dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- xd = irq_get_handler_data(virq);
- if (unlikely(!xd)) {
- dev_err(dev, "%s: Can't get interrupt data\n", __func__);
- rc = -ENXIO;
- goto err2;
- }
-
- irq->virq = virq;
- irq->vtrig = xd->trig_mmio;
-out:
- return rc;
-err2:
- free_irq(virq, cookie);
-err1:
- irq_dispose_mapping(virq);
- goto out;
-}
-
-/**
- * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @handler: Interrupt handler to register.
- * @cookie: Interrupt handler private data.
- * @name: Name of the interrupt.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
- irq_handler_t handler, void *cookie,
- char *name)
-{
- return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
-}
-
-/**
- * afu_unmap_irq() - unmap the interrupt
- * @flags: Flags.
- * @ctx: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
- void *cookie)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irq;
-
- if (num < 0 || num >= ctx->num_irqs) {
- dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
- return;
- }
-
- irq = &ctx->irqs[num];
-
- if (irq_find_mapping(NULL, irq->hwirq)) {
- free_irq(irq->virq, cookie);
- irq_dispose_mapping(irq->virq);
- }
-
- memset(irq, 0, sizeof(*irq));
-}
-
-/**
- * ocxlflash_unmap_afu_irq() - unmap the interrupt
- * @ctx_cookie: Adapter context.
- * @num: Per-context AFU interrupt number.
- * @cookie: Interrupt handler private data.
- */
-static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
-{
- return afu_unmap_irq(0, ctx_cookie, num, cookie);
-}
-
-/**
- * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
- * @ctx_cookie: Context associated with the interrupt.
- * @irq: Interrupt number.
- *
- * Return: effective address of the mapped region
- */
-static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- if (irq < 0 || irq >= ctx->num_irqs)
- return 0;
-
- return (__force u64)ctx->irqs[irq].vtrig;
-}
-
-/**
- * ocxlflash_xsl_fault() - callback when translation error is triggered
- * @data: Private data provided at callback registration, the context.
- * @addr: Address that triggered the error.
- * @dsisr: Value of dsisr register.
- */
-static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
-{
- struct ocxlflash_context *ctx = data;
-
- spin_lock(&ctx->slock);
- ctx->fault_addr = addr;
- ctx->fault_dsisr = dsisr;
- ctx->pending_fault = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-}
-
-/**
- * start_context() - local routine to start a context
- * @ctx: Adapter context to be started.
- *
- * Assign the context specific MMIO space, add and enable the PE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int start_context(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- void *link_token = afu->link_token;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- bool master = ctx->master;
- struct mm_struct *mm;
- int rc = 0;
- u32 pid;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != OPENED) {
- dev_err(dev, "%s: Context state invalid, state=%d\n",
- __func__, ctx->state);
- rc = -EINVAL;
- goto out;
- }
-
- if (master) {
- ctx->psn_size = acfg->global_mmio_size;
- ctx->psn_phys = afu->gmmio_phys;
- } else {
- ctx->psn_size = acfg->pp_mmio_stride;
- ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
- }
-
- /* pid and mm not set for master contexts */
- if (master) {
- pid = 0;
- mm = NULL;
- } else {
- pid = current->mm->context.id;
- mm = current->mm;
- }
-
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
- pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
- ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- ctx->state = STARTED;
-out:
- mutex_unlock(&ctx->state_mutex);
- return rc;
-}
-
-/**
- * ocxlflash_start_context() - start a kernel context
- * @ctx_cookie: Adapter context to be started.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- return start_context(ctx);
-}
-
-/**
- * ocxlflash_stop_context() - stop a context
- * @ctx_cookie: Adapter context to be stopped.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_stop_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- enum ocxlflash_ctx_state state;
- int rc = 0;
-
- mutex_lock(&ctx->state_mutex);
- state = ctx->state;
- ctx->state = CLOSED;
- mutex_unlock(&ctx->state_mutex);
- if (state != STARTED)
- goto out;
-
- rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
- ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
- __func__, rc);
- /* If EBUSY, PE could be referenced in future by the AFU */
- if (rc == -EBUSY)
- goto out;
- }
-
- rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-out:
- return rc;
-}
-
-/**
- * ocxlflash_afu_reset() - reset the AFU
- * @ctx_cookie: Adapter context.
- */
-static int ocxlflash_afu_reset(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
-
- /* Pending implementation from OCXL transport services */
- dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
-
- /* Silently return success until it is implemented */
- return 0;
-}
-
-/**
- * ocxlflash_set_master() - sets the context as master
- * @ctx_cookie: Adapter context to set as master.
- */
-static void ocxlflash_set_master(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
-
- ctx->master = true;
-}
-
-/**
- * ocxlflash_get_context() - obtains the context associated with the host
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the pointer to host adapter context
- */
-static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- return afu->ocxl_ctx;
-}
-
-/**
- * ocxlflash_dev_context_init() - allocate and initialize an adapter context
- * @pdev: PCI device associated with the host.
- * @afu_cookie: Hardware AFU associated with the host.
- *
- * Return: returns the adapter context on success, ERR_PTR on failure
- */
-static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- struct device *dev = afu->dev;
- struct ocxlflash_context *ctx;
- int rc;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (unlikely(!ctx)) {
- dev_err(dev, "%s: Context allocation failed\n", __func__);
- rc = -ENOMEM;
- goto err1;
- }
-
- idr_preload(GFP_KERNEL);
- rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
- idr_preload_end();
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
- spin_lock_init(&ctx->slock);
- init_waitqueue_head(&ctx->wq);
- mutex_init(&ctx->state_mutex);
-
- ctx->state = OPENED;
- ctx->pe = rc;
- ctx->master = false;
- ctx->mapping = NULL;
- ctx->hw_afu = afu;
- ctx->irq_bitmap = 0;
- ctx->pending_irq = false;
- ctx->pending_fault = false;
-out:
- return ctx;
-err2:
- kfree(ctx);
-err1:
- ctx = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_release_context() - releases an adapter context
- * @ctx_cookie: Adapter context to be released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_release_context(void *ctx_cookie)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev;
- int rc = 0;
-
- if (!ctx)
- goto out;
-
- dev = ctx->hw_afu->dev;
- mutex_lock(&ctx->state_mutex);
- if (ctx->state >= STARTED) {
- dev_err(dev, "%s: Context in use, state=%d\n", __func__,
- ctx->state);
- mutex_unlock(&ctx->state_mutex);
- rc = -EBUSY;
- goto out;
- }
- mutex_unlock(&ctx->state_mutex);
-
- idr_remove(&ctx->hw_afu->idr, ctx->pe);
- ocxlflash_release_mapping(ctx);
- kfree(ctx);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_perst_reloads_same_image() - sets the image reload policy
- * @afu_cookie: Hardware AFU associated with the host.
- * @image: Whether to load the same image on PERST.
- */
-static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
-
- afu->perst_same_image = image;
-}
-
-/**
- * ocxlflash_read_adapter_vpd() - reads the adapter VPD
- * @pdev: PCI device associated with the host.
- * @buf: Buffer to get the VPD data.
- * @count: Size of buffer (maximum bytes that can be read).
- *
- * Return: size of VPD on success, -errno on failure
- */
-static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
- size_t count)
-{
- return pci_read_vpd(pdev, 0, count, buf);
-}
-
-/**
- * free_afu_irqs() - internal service to free interrupts
- * @ctx: Adapter context.
- */
-static void free_afu_irqs(struct ocxlflash_context *ctx)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- int i;
-
- if (!ctx->irqs) {
- dev_err(dev, "%s: Interrupts not allocated\n", __func__);
- return;
- }
-
- for (i = ctx->num_irqs; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
-
- kfree(ctx->irqs);
- ctx->irqs = NULL;
-}
-
-/**
- * alloc_afu_irqs() - internal service to allocate interrupts
- * @ctx: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
-{
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- struct ocxlflash_irqs *irqs;
- int rc = 0;
- int hwirq;
- int i;
-
- if (ctx->irqs) {
- dev_err(dev, "%s: Interrupts already allocated\n", __func__);
- rc = -EEXIST;
- goto out;
- }
-
- if (num > OCXL_MAX_IRQS) {
- dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
- rc = -EINVAL;
- goto out;
- }
-
- irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
- if (unlikely(!irqs)) {
- dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < num; i++) {
- rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- irqs[i].hwirq = hwirq;
- }
-
- ctx->irqs = irqs;
- ctx->num_irqs = num;
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
- kfree(irqs);
- goto out;
-}
-
-/**
- * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
- * @ctx_cookie: Context associated with the request.
- * @num: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
-{
- return alloc_afu_irqs(ctx_cookie, num);
-}
-
-/**
- * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
- * @ctx_cookie: Adapter context.
- */
-static void ocxlflash_free_afu_irqs(void *ctx_cookie)
-{
- free_afu_irqs(ctx_cookie);
-}
-
-/**
- * ocxlflash_unconfig_afu() - unconfigure the AFU
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
-{
- if (afu->gmmio_virt) {
- iounmap(afu->gmmio_virt);
- afu->gmmio_virt = NULL;
- }
-}
-
-/**
- * ocxlflash_destroy_afu() - destroy the AFU structure
- * @afu_cookie: AFU to be freed.
- */
-static void ocxlflash_destroy_afu(void *afu_cookie)
-{
- struct ocxl_hw_afu *afu = afu_cookie;
- int pos;
-
- if (!afu)
- return;
-
- ocxlflash_release_context(afu->ocxl_ctx);
- idr_destroy(&afu->idr);
-
- /* Disable the AFU */
- pos = afu->acfg.dvsec_afu_control_pos;
- ocxl_config_set_afu_state(afu->pdev, pos, 0);
-
- ocxlflash_unconfig_afu(afu);
- kfree(afu);
-}
-
-/**
- * ocxlflash_config_fn() - configure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- u16 base, enabled, supported;
- int rc = 0;
-
- /* Read DVSEC config of the function */
- rc = ocxl_config_read_function(pdev, fcfg);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Check if function has AFUs defined, only 1 per function supported */
- if (fcfg->max_afu_index >= 0) {
- afu->is_present = true;
- if (fcfg->max_afu_index != 0)
- dev_warn(dev, "%s: Unexpected AFU index value %d\n",
- __func__, fcfg->max_afu_index);
- }
-
- rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- afu->fn_actag_base = base;
- afu->fn_actag_enabled = enabled;
-
- ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
- dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
- __func__, base, enabled);
-
- rc = ocxl_link_setup(pdev, 0, &afu->link_token);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
- __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- ocxl_link_release(pdev, afu->link_token);
- goto out;
-}
-
-/**
- * ocxlflash_unconfig_fn() - unconfigure the host function
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- */
-static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- ocxl_link_release(pdev, afu->link_token);
-}
-
-/**
- * ocxlflash_map_mmio() - map the AFU MMIO space
- * @afu: AFU associated with the host.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct pci_dev *pdev = afu->pdev;
- struct device *dev = afu->dev;
- phys_addr_t gmmio, ppmmio;
- int rc = 0;
-
- rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
- __func__, rc);
- goto out;
- }
- gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
- gmmio += acfg->global_mmio_offset;
-
- rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
- if (unlikely(rc)) {
- dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
- ppmmio += acfg->pp_mmio_offset;
-
- afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
- if (unlikely(!afu->gmmio_virt)) {
- dev_err(dev, "%s: MMIO mapping failed\n", __func__);
- rc = -ENOMEM;
- goto err2;
- }
-
- afu->gmmio_phys = gmmio;
- afu->ppmmio_phys = ppmmio;
-out:
- return rc;
-err2:
- pci_release_region(pdev, acfg->pp_mmio_bar);
-err1:
- pci_release_region(pdev, acfg->global_mmio_bar);
- goto out;
-}
-
-/**
- * ocxlflash_config_afu() - configure the host AFU
- * @pdev: PCI device associated with the host.
- * @afu: AFU associated with the host.
- *
- * Must be called _after_ host function configuration.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
-{
- struct ocxl_afu_config *acfg = &afu->acfg;
- struct ocxl_fn_config *fcfg = &afu->fcfg;
- struct device *dev = &pdev->dev;
- int count;
- int base;
- int pos;
- int rc = 0;
-
- /* This HW AFU function does not have any AFUs defined */
- if (!afu->is_present)
- goto out;
-
- /* Read AFU config at index 0 */
- rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Only one AFU per function is supported, so actag_base is same */
- base = afu->fn_actag_base;
- count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
- pos = acfg->dvsec_afu_control_pos;
-
- ocxl_config_set_afu_actag(pdev, pos, base, count);
- dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
- afu->afu_actag_base = base;
- afu->afu_actag_enabled = count;
- afu->max_pasid = 1 << acfg->pasid_supported_log;
-
- ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
-
- rc = ocxlflash_map_mmio(afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
- __func__, rc);
- goto out;
- }
-
- /* Enable the AFU */
- ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
-out:
- return rc;
-}
-
-/**
- * ocxlflash_create_afu() - create the AFU for OCXL
- * @pdev: PCI device associated with the host.
- *
- * Return: AFU on success, NULL on failure
- */
-static void *ocxlflash_create_afu(struct pci_dev *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ocxlflash_context *ctx;
- struct ocxl_hw_afu *afu;
- int rc;
-
- afu = kzalloc(sizeof(*afu), GFP_KERNEL);
- if (unlikely(!afu)) {
- dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
- goto out;
- }
-
- afu->pdev = pdev;
- afu->dev = dev;
- idr_init(&afu->idr);
-
- rc = ocxlflash_config_fn(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Function configuration failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- rc = ocxlflash_config_afu(pdev, afu);
- if (unlikely(rc)) {
- dev_err(dev, "%s: AFU configuration failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx = ocxlflash_dev_context_init(pdev, afu);
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
- __func__, rc);
- goto err3;
- }
-
- afu->ocxl_ctx = ctx;
-out:
- return afu;
-err3:
- ocxlflash_unconfig_afu(afu);
-err2:
- ocxlflash_unconfig_fn(pdev, afu);
-err1:
- idr_destroy(&afu->idr);
- kfree(afu);
- afu = NULL;
- goto out;
-}
-
-/**
- * ctx_event_pending() - check for any event pending on the context
- * @ctx: Context to be checked.
- *
- * Return: true if there is an event pending, false if none pending
- */
-static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
-{
- if (ctx->pending_irq || ctx->pending_fault)
- return true;
-
- return false;
-}
-
-/**
- * afu_poll() - poll the AFU for events on the context
- * @file: File associated with the adapter context.
- * @poll: Poll structure from the user.
- *
- * Return: poll mask
- */
-static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- ulong lock_flags;
- int mask = 0;
-
- poll_wait(file, &ctx->wq, poll);
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
- if (ctx_event_pending(ctx))
- mask |= POLLIN | POLLRDNORM;
- else if (ctx->state == CLOSED)
- mask |= POLLERR;
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
- __func__, ctx->pe, mask);
-
- return mask;
-}
-
-/**
- * afu_read() - perform a read on the context for any event
- * @file: File associated with the adapter context.
- * @buf: Buffer to receive the data.
- * @count: Size of buffer (maximum bytes that can be read).
- * @off: Offset.
- *
- * Return: size of the data read on success, -errno on failure
- */
-static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
- loff_t *off)
-{
- struct ocxlflash_context *ctx = file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- struct cxl_event event;
- ulong lock_flags;
- ssize_t esize;
- ssize_t rc;
- int bit;
- DEFINE_WAIT(event_wait);
-
- if (*off != 0) {
- dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
- __func__, *off);
- rc = -EINVAL;
- goto out;
- }
-
- spin_lock_irqsave(&ctx->slock, lock_flags);
-
- for (;;) {
- prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
-
- if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
- break;
-
- if (file->f_flags & O_NONBLOCK) {
- dev_err(dev, "%s: File cannot be blocked on I/O\n",
- __func__);
- rc = -EAGAIN;
- goto err;
- }
-
- if (signal_pending(current)) {
- dev_err(dev, "%s: Signal pending on the process\n",
- __func__);
- rc = -ERESTARTSYS;
- goto err;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- schedule();
- spin_lock_irqsave(&ctx->slock, lock_flags);
- }
-
- finish_wait(&ctx->wq, &event_wait);
-
- memset(&event, 0, sizeof(event));
- event.header.process_element = ctx->pe;
- event.header.size = sizeof(struct cxl_event_header);
- if (ctx->pending_irq) {
- esize = sizeof(struct cxl_event_afu_interrupt);
- event.header.size += esize;
- event.header.type = CXL_EVENT_AFU_INTERRUPT;
-
- bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
- clear_bit(bit, &ctx->irq_bitmap);
- event.irq.irq = bit + 1;
- if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
- ctx->pending_irq = false;
- } else if (ctx->pending_fault) {
- event.header.size += sizeof(struct cxl_event_data_storage);
- event.header.type = CXL_EVENT_DATA_STORAGE;
- event.fault.addr = ctx->fault_addr;
- event.fault.dsisr = ctx->fault_dsisr;
- ctx->pending_fault = false;
- }
-
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
-
- if (copy_to_user(buf, &event, event.header.size)) {
- dev_err(dev, "%s: copy_to_user failed\n", __func__);
- rc = -EFAULT;
- goto out;
- }
-
- rc = event.header.size;
-out:
- return rc;
-err:
- finish_wait(&ctx->wq, &event_wait);
- spin_unlock_irqrestore(&ctx->slock, lock_flags);
- goto out;
-}
-
-/**
- * afu_release() - release and free the context
- * @inode: File inode pointer.
- * @file: File associated with the context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_release(struct inode *inode, struct file *file)
-{
- struct ocxlflash_context *ctx = file->private_data;
- int i;
-
- /* Unmap and free the interrupts associated with the context */
- for (i = ctx->num_irqs; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
-
- return ocxlflash_release_context(ctx);
-}
-
-/**
- * ocxlflash_mmap_fault() - mmap fault handler
- * @vmf: VM fault associated with current fault.
- *
- * Return: 0 on success, -errno on failure
- */
-static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct ocxlflash_context *ctx = vma->vm_file->private_data;
- struct device *dev = ctx->hw_afu->dev;
- u64 mmio_area, offset;
-
- offset = vmf->pgoff << PAGE_SHIFT;
- if (offset >= ctx->psn_size)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&ctx->state_mutex);
- if (ctx->state != STARTED) {
- dev_err(dev, "%s: Context not started, state=%d\n",
- __func__, ctx->state);
- mutex_unlock(&ctx->state_mutex);
- return VM_FAULT_SIGBUS;
- }
- mutex_unlock(&ctx->state_mutex);
-
- mmio_area = ctx->psn_phys;
- mmio_area += offset;
-
- return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
-}
-
-static const struct vm_operations_struct ocxlflash_vmops = {
- .fault = ocxlflash_mmap_fault,
-};
-
-/**
- * afu_mmap() - map the fault handler operations
- * @file: File associated with the context.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct ocxlflash_context *ctx = file->private_data;
-
- if ((vma_pages(vma) + vma->vm_pgoff) >
- (ctx->psn_size >> PAGE_SHIFT))
- return -EINVAL;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP);
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &ocxlflash_vmops;
- return 0;
-}
-
-static const struct file_operations ocxl_afu_fops = {
- .owner = THIS_MODULE,
- .poll = afu_poll,
- .read = afu_read,
- .release = afu_release,
- .mmap = afu_mmap,
-};
-
-#define PATCH_FOPS(NAME) \
- do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
-
-/**
- * ocxlflash_get_fd() - get file descriptor for an adapter context
- * @ctx_cookie: Adapter context.
- * @fops: File operations to be associated.
- * @fd: File descriptor to be returned back.
- *
- * Return: pointer to the file on success, ERR_PTR on failure
- */
-static struct file *ocxlflash_get_fd(void *ctx_cookie,
- struct file_operations *fops, int *fd)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct device *dev = ctx->hw_afu->dev;
- struct file *file;
- int flags, fdtmp;
- int rc = 0;
- char *name = NULL;
-
- /* Only allow one fd per context */
- if (ctx->mapping) {
- dev_err(dev, "%s: Context is already mapped to an fd\n",
- __func__);
- rc = -EEXIST;
- goto err1;
- }
-
- flags = O_RDWR | O_CLOEXEC;
-
- /* This code is similar to anon_inode_getfd() */
- rc = get_unused_fd_flags(flags);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
- __func__, rc);
- goto err1;
- }
- fdtmp = rc;
-
- /* Patch the file ops that are not defined */
- if (fops) {
- PATCH_FOPS(poll);
- PATCH_FOPS(read);
- PATCH_FOPS(release);
- PATCH_FOPS(mmap);
- } else /* Use default ops */
- fops = (struct file_operations *)&ocxl_afu_fops;
-
- name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
- file = ocxlflash_getfile(dev, name, fops, ctx, flags);
- kfree(name);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
- __func__, rc);
- goto err2;
- }
-
- ctx->mapping = file->f_mapping;
- *fd = fdtmp;
-out:
- return file;
-err2:
- put_unused_fd(fdtmp);
-err1:
- file = ERR_PTR(rc);
- goto out;
-}
-
-/**
- * ocxlflash_fops_get_context() - get the context associated with the file
- * @file: File associated with the adapter context.
- *
- * Return: pointer to the context
- */
-static void *ocxlflash_fops_get_context(struct file *file)
-{
- return file->private_data;
-}
-
-/**
- * ocxlflash_afu_irq() - interrupt handler for user contexts
- * @irq: Interrupt number.
- * @data: Private data provided at interrupt registration, the context.
- *
- * Return: Always return IRQ_HANDLED.
- */
-static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
-{
- struct ocxlflash_context *ctx = data;
- struct device *dev = ctx->hw_afu->dev;
- int i;
-
- dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
- __func__, ctx->pe, irq);
-
- for (i = 0; i < ctx->num_irqs; i++) {
- if (ctx->irqs[i].virq == irq)
- break;
- }
- if (unlikely(i >= ctx->num_irqs)) {
- dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
- goto out;
- }
-
- spin_lock(&ctx->slock);
- set_bit(i - 1, &ctx->irq_bitmap);
- ctx->pending_irq = true;
- spin_unlock(&ctx->slock);
-
- wake_up_all(&ctx->wq);
-out:
- return IRQ_HANDLED;
-}
-
-/**
- * ocxlflash_start_work() - start a user context
- * @ctx_cookie: Context to be started.
- * @num_irqs: Number of interrupts requested.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
-{
- struct ocxlflash_context *ctx = ctx_cookie;
- struct ocxl_hw_afu *afu = ctx->hw_afu;
- struct device *dev = afu->dev;
- char *name;
- int rc = 0;
- int i;
-
- rc = alloc_afu_irqs(ctx, num_irqs);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
- goto out;
- }
-
- for (i = 0; i < num_irqs; i++) {
- name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
- dev_name(dev), ctx->pe, i);
- rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
- kfree(name);
- if (unlikely(rc < 0)) {
- dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
- __func__, rc);
- goto err;
- }
- }
-
- rc = start_context(ctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
- goto err;
- }
-out:
- return rc;
-err:
- for (i = i-1; i >= 0; i--)
- afu_unmap_irq(0, ctx, i, ctx);
- free_afu_irqs(ctx);
- goto out;
-};
-
-/**
- * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return afu_mmap(file, vma);
-}
-
-/**
- * ocxlflash_fd_release() - release the context associated with the file
- * @inode: File inode pointer.
- * @file: File associated with the adapter context.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ocxlflash_fd_release(struct inode *inode, struct file *file)
-{
- return afu_release(inode, file);
-}
-
-/* Backend ops to ocxlflash services */
-const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
- .module = THIS_MODULE,
- .psa_map = ocxlflash_psa_map,
- .psa_unmap = ocxlflash_psa_unmap,
- .process_element = ocxlflash_process_element,
- .map_afu_irq = ocxlflash_map_afu_irq,
- .unmap_afu_irq = ocxlflash_unmap_afu_irq,
- .get_irq_objhndl = ocxlflash_get_irq_objhndl,
- .start_context = ocxlflash_start_context,
- .stop_context = ocxlflash_stop_context,
- .afu_reset = ocxlflash_afu_reset,
- .set_master = ocxlflash_set_master,
- .get_context = ocxlflash_get_context,
- .dev_context_init = ocxlflash_dev_context_init,
- .release_context = ocxlflash_release_context,
- .perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
- .read_adapter_vpd = ocxlflash_read_adapter_vpd,
- .allocate_afu_irqs = ocxlflash_allocate_afu_irqs,
- .free_afu_irqs = ocxlflash_free_afu_irqs,
- .create_afu = ocxlflash_create_afu,
- .destroy_afu = ocxlflash_destroy_afu,
- .get_fd = ocxlflash_get_fd,
- .fops_get_context = ocxlflash_fops_get_context,
- .start_work = ocxlflash_start_work,
- .fd_mmap = ocxlflash_fd_mmap,
- .fd_release = ocxlflash_fd_release,
-};
diff --git a/drivers/scsi/cxlflash/ocxl_hw.h b/drivers/scsi/cxlflash/ocxl_hw.h
deleted file mode 100644
index f2fe88816bea..000000000000
--- a/drivers/scsi/cxlflash/ocxl_hw.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2018 IBM Corporation
- */
-
-#define OCXL_MAX_IRQS 4 /* Max interrupts per process */
-
-struct ocxlflash_irqs {
- int hwirq;
- u32 virq;
- void __iomem *vtrig;
-};
-
-/* OCXL hardware AFU associated with the host */
-struct ocxl_hw_afu {
- struct ocxlflash_context *ocxl_ctx; /* Host context */
- struct pci_dev *pdev; /* PCI device */
- struct device *dev; /* Generic device */
- bool perst_same_image; /* Same image loaded on perst */
-
- struct ocxl_fn_config fcfg; /* DVSEC config of the function */
- struct ocxl_afu_config acfg; /* AFU configuration data */
-
- int fn_actag_base; /* Function acTag base */
- int fn_actag_enabled; /* Function acTag number enabled */
- int afu_actag_base; /* AFU acTag base */
- int afu_actag_enabled; /* AFU acTag number enabled */
-
- phys_addr_t ppmmio_phys; /* Per process MMIO space */
- phys_addr_t gmmio_phys; /* Global AFU MMIO space */
- void __iomem *gmmio_virt; /* Global MMIO map */
-
- void *link_token; /* Link token for the SPA */
- struct idr idr; /* IDR to manage contexts */
- int max_pasid; /* Maximum number of contexts */
- bool is_present; /* Function has AFUs defined */
-};
-
-enum ocxlflash_ctx_state {
- CLOSED,
- OPENED,
- STARTED
-};
-
-struct ocxlflash_context {
- struct ocxl_hw_afu *hw_afu; /* HW AFU back pointer */
- struct address_space *mapping; /* Mapping for pseudo filesystem */
- bool master; /* Whether this is a master context */
- int pe; /* Process element */
-
- phys_addr_t psn_phys; /* Process mapping */
- u64 psn_size; /* Process mapping size */
-
- spinlock_t slock; /* Protects irq/fault/event updates */
- wait_queue_head_t wq; /* Wait queue for poll and interrupts */
- struct mutex state_mutex; /* Mutex to update context state */
- enum ocxlflash_ctx_state state; /* Context state */
-
- struct ocxlflash_irqs *irqs; /* Pointer to array of structures */
- int num_irqs; /* Number of interrupts */
- bool pending_irq; /* Pending interrupt on the context */
- ulong irq_bitmap; /* Bits indicating pending irq num */
-
- u64 fault_addr; /* Address that triggered the fault */
- u64 fault_dsisr; /* Value of dsisr register at fault */
- bool pending_fault; /* Pending translation fault */
-};
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
deleted file mode 100644
index ab315c59505b..000000000000
--- a/drivers/scsi/cxlflash/sislite.h
+++ /dev/null
@@ -1,560 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _SISLITE_H
-#define _SISLITE_H
-
-#include <linux/types.h>
-
-typedef u16 ctx_hndl_t;
-typedef u32 res_hndl_t;
-
-#define SIZE_4K 4096
-#define SIZE_64K 65536
-
-/*
- * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
- * except for SCSI CDB which remains big endian per SCSI standards.
- */
-struct sisl_ioarcb {
- u16 ctx_id; /* ctx_hndl_t */
- u16 req_flags;
-#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
-#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
-
-#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
-
-#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
-#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
-#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
-#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
-
-#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
-
-#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
-
-#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
-#define SISL_REQ_FLAGS_HOST_READ 0x0000U
-
- union {
- u32 res_hndl; /* res_hndl_t */
- u32 port_sel; /* this is a selection mask:
- * 0x1 -> port#0 can be selected,
- * 0x2 -> port#1 can be selected.
- * Can be bitwise ORed.
- */
- };
- u64 lun_id;
- u32 data_len; /* 4K for read/write */
- u32 ioadl_len;
- union {
- u64 data_ea; /* min 16 byte aligned */
- u64 ioadl_ea;
- };
- u8 msi; /* LISN to send on RRQ write */
-#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
-#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
-#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
-#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
-
- u8 rrq; /* 0 for a single RRQ */
- u16 timeout; /* in units specified by req_flags */
- u32 rsvd1;
- u8 cdb[16]; /* must be in big endian */
-#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
-#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
-#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
-
-#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
-#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
-
- union {
- u64 reserved; /* Reserved for IOARRIN mode */
- struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
- };
-} __packed;
-
-struct sisl_rc {
- u8 flags;
-#define SISL_RC_FLAGS_SENSE_VALID 0x80U
-#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
-#define SISL_RC_FLAGS_OVERRUN 0x20U
-#define SISL_RC_FLAGS_UNDERRUN 0x10U
-
- u8 afu_rc;
-#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
-#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
-#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
-#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
-#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
-#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
-#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
- * may retry if afu_retry is off
- * possible on master exit
- */
-#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
-
-#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
-
- /* NO_CHANNELS means the FC ports selected by dest_port in
- * IOARCB or in the LXT entry are down when the AFU tried to select
- * a FC port. If the port went down on an active IO, it will set
- * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
- */
-#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
-#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
- * afu reset/master restart
- */
-#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
-#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
- * may retry if afu_retry is off
- */
-
- u8 scsi_rc; /* SCSI status byte, retry as appropriate */
-#define SISL_SCSI_RC_CHECK 0x02U
-#define SISL_SCSI_RC_BUSY 0x08u
-
- u8 fc_rc; /* retry */
- /*
- * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
- * commands that are in flight when a link goes down or is logged out.
- * If the link is down or logged out before AFU selects the port, either
- * it will choose the other port or we will get afu_rc=0x20 (no_channel)
- * if there is no valid port to use.
- *
- * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
- * would happen if a frame is dropped and something times out.
- * NOLOGI or LINKDOWN can be retried if the other port is up.
- * RESIDERR can be retried as well.
- *
- * ABORTFAIL might indicate that lots of frames are getting CRC errors.
- * So it maybe retried once and reset the link if it happens again.
- * The link can also be reset on the CRC error threshold interrupt.
- */
-#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
-#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
-#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
-#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
-#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
-#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
-#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
-#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
-#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
-#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
- * reported len, possibly due to dropped
- * frames
- */
-#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
-};
-
-#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
-#define SISL_WWID_DATA_LEN 16 /* WWID data length */
-
-/*
- * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
- * host native endianness
- */
-struct sisl_ioasa {
- union {
- struct sisl_rc rc;
- u32 ioasc;
-#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
- };
-
- union {
- u32 resid;
- u32 lunid_hi;
- };
-
- u8 port;
- u8 afu_extra;
- /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
- * afu_exta contains PSL response code. Useful codes are:
- */
-#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
- * Enabled N/A
- * Disabled retry
- */
-#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
- * afu_rc Implies
- * 0x04, 0x14 master exit.
- * 0x31 user error.
- */
- /* when afu rc=0x20 (no channels):
- * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
- */
-#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
-#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
-
- u8 scsi_extra;
- u8 fc_extra;
-
- union {
- u8 sense_data[SISL_SENSE_DATA_LEN];
- struct {
- u32 lunid_lo;
- u8 wwid[SISL_WWID_DATA_LEN];
- };
- };
-
- /* These fields are defined by the SISlite architecture for the
- * host to use as they see fit for their implementation.
- */
- union {
- u64 host_use[4];
- u8 host_use_b[32];
- };
-} __packed;
-
-#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
-
-/* MMIO space is required to support only 64-bit access */
-
-/*
- * This AFU has two mechanisms to deal with endian-ness.
- * One is a global configuration (in the afu_config) register
- * below that specifies the endian-ness of the host.
- * The other is a per context (i.e. application) specification
- * controlled by the endian_ctrl field here. Since the master
- * context is one such application the master context's
- * endian-ness is set to be the same as the host.
- *
- * As per the SISlite spec, the MMIO registers are always
- * big endian.
- */
-#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
-#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
-
-#ifdef __BIG_ENDIAN
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
-#else
-#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
-#endif
-
-/* per context host transport MMIO */
-struct sisl_host_map {
- __be64 endian_ctrl; /* Per context Endian Control. The AFU will
- * operate on whatever the context is of the
- * host application.
- */
-
- __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
- * Only recovery in a PERM_ERR is a context
- * exit since there is no way to tell which
- * command caused the error.
- */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_EA 0x0400ULL /* b53, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_EA 0x0200ULL /* b54, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_EA 0x0100ULL /* b55, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_3_PASID 0x0080ULL /* b56, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_2_PASID 0x0040ULL /* b57, user error */
-#define SISL_ISTATUS_PERM_ERR_LISN_1_PASID 0x0020ULL /* b58, user error */
-#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
-#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
-#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
-#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
- /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
- * Same error in data/LXT/RHT access is reported via IOASA.
- */
-#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can only be
- * generated when AFU
- * auto retry is
- * disabled. If user
- * can determine the
- * command that caused
- * the error, it can
- * be retried.
- */
-#define SISL_ISTATUS_UNMASK (0x07FFULL) /* 1 means unmasked */
-#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
-
- __be64 intr_clear;
- __be64 intr_mask;
- __be64 ioarrin; /* only write what cmd_room permits */
- __be64 rrq_start; /* start & end are both inclusive */
- __be64 rrq_end; /* write sequence: start followed by end */
- __be64 cmd_room;
- __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
-#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
-#define SISL_CTX_CTRL_LISN_MASK (0xFFULL)
- __be64 mbox_w; /* restricted use */
- __be64 sq_start; /* Submission Queue (R/W): write sequence and */
- __be64 sq_end; /* inclusion semantics are the same as RRQ */
- __be64 sq_head; /* Submission Queue Head (R): for debugging */
- __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */
- __be64 sq_ctx_reset; /* Submission Queue Context Reset (R/W) */
-};
-
-/* per context provisioning & control MMIO */
-struct sisl_ctrl_map {
- __be64 rht_start;
- __be64 rht_cnt_id;
- /* both cnt & ctx_id args must be ULL */
-#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
-
- __be64 ctx_cap; /* afu_rc below is when the capability is violated */
-#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
-#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
-#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
- __be64 mbox_r;
- __be64 lisn_pasid[2];
- /* pasid _a arg must be ULL */
-#define SISL_LISN_PASID(_a, _b) (((_a) << 32) | (_b))
- __be64 lisn_ea[3];
-};
-
-/* single copy global regs */
-struct sisl_global_regs {
- __be64 aintr_status;
- /*
- * In cxlflash, FC port/link are arranged in port pairs, each
- * gets a byte of status:
- *
- * *_OTHER: other err, FC_ERRCAP[31:20]
- * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in
- * *_CRC_T: CRC threshold exceeded
- * *_LOGI_R: login state machine timed out and retrying
- * *_LOGI_F: login failed, FC_ERROR[19:0]
- * *_LOGI_S: login succeeded
- * *_LINK_DN: link online to offline
- * *_LINK_UP: link offline to online
- */
-#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */
-#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */
-#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */
-#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */
-#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */
-#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */
-#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */
-#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */
-
-#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */
-#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */
-#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */
-#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */
-#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */
-#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */
-#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */
-#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */
-
-#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */
-#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */
-#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */
-#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */
-#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */
-#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */
-#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */
-#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */
-
-#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */
-#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */
-#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */
-#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */
-#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */
-#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */
-#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */
-#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */
-
-#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
-#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
-#define SISL_FC_INTERNAL_SHIFT 32
-
-#define SISL_FC_SHUTDOWN_NORMAL 0x0000000000000010ULL
-#define SISL_FC_SHUTDOWN_ABRUPT 0x0000000000000020ULL
-
-#define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL
-#define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL
-
-#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */
-#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
-
- __be64 aintr_clear;
- __be64 aintr_mask;
- __be64 afu_ctrl;
- __be64 afu_hb;
- __be64 afu_scratch_pad;
- __be64 afu_port_sel;
-#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
-#define SISL_AFUCONF_AR_LXT 0x2000ULL
-#define SISL_AFUCONF_AR_RHT 0x1000ULL
-#define SISL_AFUCONF_AR_DATA 0x0800ULL
-#define SISL_AFUCONF_AR_RSRC 0x0400ULL
-#define SISL_AFUCONF_AR_IOASA 0x0200ULL
-#define SISL_AFUCONF_AR_RRQ 0x0100ULL
-/* Aggregate all Auto Retry Bits */
-#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
- SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
- SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
- SISL_AFUCONF_AR_RRQ)
-#ifdef __BIG_ENDIAN
-#define SISL_AFUCONF_ENDIAN 0x0000ULL
-#else
-#define SISL_AFUCONF_ENDIAN 0x0020ULL
-#endif
-#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
- __be64 afu_config;
- __be64 rsvd[0xf8];
- __le64 afu_version;
- __be64 interface_version;
-#define SISL_INTVER_CAP_SHIFT 16
-#define SISL_INTVER_MAJ_SHIFT 8
-#define SISL_INTVER_CAP_MASK 0xFFFFFFFF00000000ULL
-#define SISL_INTVER_MAJ_MASK 0x00000000FFFF0000ULL
-#define SISL_INTVER_MIN_MASK 0x000000000000FFFFULL
-#define SISL_INTVER_CAP_IOARRIN_CMD_MODE 0x800000000000ULL
-#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
-#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
-#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
-#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
-#define SISL_INTVER_CAP_OCXL_LISN 0x020000000000ULL
-};
-
-#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
-#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */
-#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \
- CXLFLASH_MAX_FC_BANKS)
-#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */
-#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */
-#define CXLFLASH_NUM_REGS 512 /* number of registers per port */
-
-struct fc_port_bank {
- __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS];
- __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS];
-};
-
-struct sisl_global_map {
- union {
- struct sisl_global_regs regs;
- char page0[SIZE_4K]; /* page 0 */
- };
-
- char page1[SIZE_4K]; /* page 1 */
-
- struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */
-
- /* pages 10 - 15 are reserved */
-
-};
-
-/*
- * CXL Flash Memory Map
- *
- * +-------------------------------+
- * | 512 * 64 KB User MMIO |
- * | (per context) |
- * | User Accessible |
- * +-------------------------------+
- * | 512 * 128 B per context |
- * | Provisioning and Control |
- * | Trusted Process accessible |
- * +-------------------------------+
- * | 64 KB Global |
- * | Trusted Process accessible |
- * +-------------------------------+
- */
-struct cxlflash_afu_map {
- union {
- struct sisl_host_map host;
- char harea[SIZE_64K]; /* 64KB each */
- } hosts[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_ctrl_map ctrl;
- char carea[cache_line_size()]; /* 128B each */
- } ctrls[CXLFLASH_MAX_CONTEXT];
-
- union {
- struct sisl_global_map global;
- char garea[SIZE_64K]; /* 64KB single block */
- };
-};
-
-/*
- * LXT - LBA Translation Table
- * LXT control blocks
- */
-struct sisl_lxt_entry {
- u64 rlba_base; /* bits 0:47 is base
- * b48:55 is lun index
- * b58:59 is write & read perms
- * (if no perm, afu_rc=0x15)
- * b60:63 is port_sel mask
- */
-};
-
-/*
- * RHT - Resource Handle Table
- * Per the SISlite spec, RHT entries are to be 16-byte aligned
- */
-struct sisl_rht_entry {
- struct sisl_lxt_entry *lxt_start;
- u32 lxt_cnt;
- u16 rsvd;
- u8 fp; /* format & perm nibbles.
- * (if no perm, afu_rc=0x05)
- */
- u8 nmask;
-} __packed __aligned(16);
-
-struct sisl_rht_entry_f1 {
- u64 lun_id;
- union {
- struct {
- u8 valid;
- u8 rsvd[5];
- u8 fp;
- u8 port_sel;
- };
-
- u64 dw;
- };
-} __packed __aligned(16);
-
-/* make the fp byte */
-#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
-
-/* make the fp byte for a clone from a source fp and clone flags
- * flags must be only 2 LSB bits.
- */
-#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
-
-#define RHT_PERM_READ 0x01U
-#define RHT_PERM_WRITE 0x02U
-#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
-
-/* extract the perm bits from a fp */
-#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
-
-#define PORT0 0x01U
-#define PORT1 0x02U
-#define PORT2 0x04U
-#define PORT3 0x08U
-#define PORT_MASK(_n) ((1 << (_n)) - 1)
-
-/* AFU Sync Mode byte */
-#define AFU_LW_SYNC 0x0U
-#define AFU_HW_SYNC 0x1U
-#define AFU_GSYNC 0x2U
-
-/* Special Task Management Function CDB */
-#define TMF_LUN_RESET 0x1U
-#define TMF_CLEAR_ACA 0x2U
-
-#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
deleted file mode 100644
index 97631f48e19d..000000000000
--- a/drivers/scsi/cxlflash/superpipe.c
+++ /dev/null
@@ -1,2218 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_eh.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-struct cxlflash_global global;
-
-/**
- * marshal_rele_to_resize() - translate release to resize structure
- * @release: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = release->hdr;
- resize->context_id = release->context_id;
- resize->rsrc_handle = release->rsrc_handle;
-}
-
-/**
- * marshal_det_to_rele() - translate detach to release structure
- * @detach: Destination structure for the translate/copy.
- * @release: Source structure from which to translate/copy.
- */
-static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
- struct dk_cxlflash_release *release)
-{
- release->hdr = detach->hdr;
- release->context_id = detach->context_id;
-}
-
-/**
- * marshal_udir_to_rele() - translate udirect to release structure
- * @udirect: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
- struct dk_cxlflash_release *release)
-{
- release->hdr = udirect->hdr;
- release->context_id = udirect->context_id;
- release->rsrc_handle = udirect->rsrc_handle;
-}
-
-/**
- * cxlflash_free_errpage() - frees resources associated with global error page
- */
-void cxlflash_free_errpage(void)
-{
-
- mutex_lock(&global.mutex);
- if (global.err_page) {
- __free_page(global.err_page);
- global.err_page = NULL;
- }
- mutex_unlock(&global.mutex);
-}
-
-/**
- * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
- * @cfg: Internal structure associated with the host.
- *
- * When the host needs to go down, all users must be quiesced and their
- * memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear down.
- * Meanwhile, this routine camps until all user contexts have been removed.
- *
- * Note that the main loop in this routine will always execute at least once
- * to flush the reset_waitq.
- */
-void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int i, found = true;
-
- cxlflash_mark_contexts_error(cfg);
-
- while (true) {
- for (i = 0; i < MAX_CONTEXT; i++)
- if (cfg->ctx_tbl[i]) {
- found = true;
- break;
- }
-
- if (!found && list_empty(&cfg->ctx_err_recovery))
- return;
-
- dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
- __func__);
- wake_up_all(&cfg->reset_waitq);
- ssleep(1);
- found = false;
- }
-}
-
-/**
- * find_error_context() - locates a context by cookie on the error recovery list
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context by id.
- * @file: Desired context by file.
- *
- * Return: Found context on success, NULL on failure
- */
-static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
- struct file *file)
-{
- struct ctx_info *ctxi;
-
- list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
- if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
- return ctxi;
-
- return NULL;
-}
-
-/**
- * get_context() - obtains a validated and locked context reference
- * @cfg: Internal structure associated with the host.
- * @rctxid: Desired context (raw, un-decoded format).
- * @arg: LUN information or file associated with request.
- * @ctx_ctrl: Control information to 'steer' desired lookup.
- *
- * NOTE: despite the name pid, in linux, current->pid actually refers
- * to the lightweight process id (tid) and can change if the process is
- * multi threaded. The tgid remains constant for the process and only changes
- * when the process of fork. For all intents and purposes, think of tgid
- * as a pid in the traditional sense.
- *
- * Return: Validated context on success, NULL on failure
- */
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
- void *arg, enum ctx_ctrl ctx_ctrl)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- struct file *file = NULL;
- struct llun_info *lli = arg;
- u64 ctxid = DECODE_CTXID(rctxid);
- int rc;
- pid_t pid = task_tgid_nr(current), ctxpid = 0;
-
- if (ctx_ctrl & CTX_CTRL_FILE) {
- lli = NULL;
- file = (struct file *)arg;
- }
-
- if (ctx_ctrl & CTX_CTRL_CLONE)
- pid = task_ppid_nr(current);
-
- if (likely(ctxid < MAX_CONTEXT)) {
- while (true) {
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- ctxi = cfg->ctx_tbl[ctxid];
- if (ctxi)
- if ((file && (ctxi->file != file)) ||
- (!file && (ctxi->ctxid != rctxid)))
- ctxi = NULL;
-
- if ((ctx_ctrl & CTX_CTRL_ERR) ||
- (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
- ctxi = find_error_context(cfg, rctxid, file);
- if (!ctxi) {
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- goto out;
- }
-
- /*
- * Need to acquire ownership of the context while still
- * under the table/list lock to serialize with a remove
- * thread. Use the 'try' to avoid stalling the
- * table/list lock for a single context.
- *
- * Note that the lock order is:
- *
- * cfg->ctx_tbl_list_mutex -> ctxi->mutex
- *
- * Therefore release ctx_tbl_list_mutex before retrying.
- */
- rc = mutex_trylock(&ctxi->mutex);
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- if (rc)
- break; /* got the context's lock! */
- }
-
- if (ctxi->unavail)
- goto denied;
-
- ctxpid = ctxi->pid;
- if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
- if (pid != ctxpid)
- goto denied;
-
- if (lli) {
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli)
- goto out;
- goto denied;
- }
- }
-
-out:
- dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
- "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
- ctx_ctrl);
-
- return ctxi;
-
-denied:
- mutex_unlock(&ctxi->mutex);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * put_context() - release a context that was retrieved from get_context()
- * @ctxi: Context to release.
- *
- * For now, releasing the context equates to unlocking it's mutex.
- */
-void put_context(struct ctx_info *ctxi)
-{
- mutex_unlock(&ctxi->mutex);
-}
-
-/**
- * afu_attach() - attach a context to the AFU
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to attach.
- *
- * Upon setting the context capabilities, they must be confirmed with
- * a read back operation as the context might have been closed since
- * the mailbox was unlocked. When this occurs, registration is failed.
- *
- * Return: 0 on success, -errno on failure
- */
-static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
-{
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
- int rc = 0;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 val;
- int i;
-
- /* Unlock cap and restrict user to read/write cmds in translated mode */
- readq_be(&ctrl_map->mbox_r);
- val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
- writeq_be(val, &ctrl_map->ctx_cap);
- val = readq_be(&ctrl_map->ctx_cap);
- if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
- dev_err(dev, "%s: ctx may be closed val=%016llx\n",
- __func__, val);
- rc = -EAGAIN;
- goto out;
- }
-
- if (afu_is_ocxl_lisn(afu)) {
- /* Set up the LISN effective address for each interrupt */
- for (i = 0; i < ctxi->irqs; i++) {
- val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
- writeq_be(val, &ctrl_map->lisn_ea[i]);
- }
-
- /* Use primary HWQ PASID as identifier for all interrupts */
- val = hwq->ctx_hndl;
- writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
- writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
- }
-
- /* Set up MMIO registers pointing to the RHT */
- writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
- val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
- writeq_be(val, &ctrl_map->rht_cnt_id);
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * read_cap16() - issues a SCSI READ_CAP16 command
- * @sdev: SCSI device associated with LUN.
- * @lli: LUN destined for capacity request.
- *
- * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl
- * read semaphore. This will allow the EEH handler to proceed with a recovery
- * while this thread is still running. Once the scsi_execute_cmd() returns,
- * reacquire the ioctl read semaphore and check the adapter state in case it
- * changed while inside of scsi_execute_cmd(). The state check will wait if the
- * adapter is still being recovered or return a failure if the recovery failed.
- * In the event that the adapter reset failed, simply return the failure as the
- * ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct glun_info *gli = lli->parent;
- struct scsi_sense_hdr sshdr;
- const struct scsi_exec_args exec_args = {
- .sshdr = &sshdr,
- };
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- int retry_cnt = 0;
- u32 to = CMD_TIMEOUT * HZ;
-
-retry:
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
- scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
- put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
-
- dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
- retry_cnt ? "re" : "", scsi_cmd[0]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
- CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result > 0 && scsi_sense_valid(&sshdr)) {
- if (result & SAM_STAT_CHECK_CONDITION) {
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- result &= ~SAM_STAT_CHECK_CONDITION;
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device capacity changed */
- case 0x3F: /* Report LUNs changed */
- /* Retry the command once more */
- if (retry_cnt++ < 1) {
- kfree(cmd_buf);
- kfree(scsi_cmd);
- goto retry;
- }
- }
- break;
- default:
- break;
- }
- }
- }
-
- if (result) {
- dev_err(dev, "%s: command failed, result=%08x\n",
- __func__, result);
- rc = -EIO;
- goto out;
- }
-
- /*
- * Read cap was successful, grab values from the buffer;
- * note that we don't need to worry about unaligned access
- * as the buffer is allocated on an aligned boundary.
- */
- mutex_lock(&gli->mutex);
- gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
- gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
- mutex_unlock(&gli->mutex);
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
-
- dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
- __func__, gli->max_lba, gli->blk_len, rc);
- return rc;
-}
-
-/**
- * get_rhte() - obtains validated resource handle table entry reference
- * @ctxi: Context owning the resource handle.
- * @rhndl: Resource handle associated with entry.
- * @lli: LUN associated with request.
- *
- * Return: Validated RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
-
- if (unlikely(!ctxi->rht_start)) {
- dev_dbg(dev, "%s: Context does not have allocated RHT\n",
- __func__);
- goto out;
- }
-
- if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
- dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
- __func__, rhndl);
- goto out;
- }
-
- rhte = &ctxi->rht_start[rhndl];
- if (unlikely(rhte->nmask == 0)) {
- dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
- __func__, rhndl);
- rhte = NULL;
- goto out;
- }
-
-out:
- return rhte;
-}
-
-/**
- * rhte_checkout() - obtains free/empty resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @lli: LUN associated with request.
- *
- * Return: Free RHTE on success, NULL on failure
- */
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli)
-{
- struct cxlflash_cfg *cfg = ctxi->cfg;
- struct device *dev = &cfg->dev->dev;
- struct sisl_rht_entry *rhte = NULL;
- int i;
-
- /* Find a free RHT entry */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi->rht_start[i].nmask == 0) {
- rhte = &ctxi->rht_start[i];
- ctxi->rht_out++;
- break;
- }
-
- if (likely(rhte))
- ctxi->rht_lun[i] = lli;
-
- dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
- return rhte;
-}
-
-/**
- * rhte_checkin() - releases a resource handle table entry
- * @ctxi: Context owning the resource handle.
- * @rhte: RHTE to release.
- */
-void rhte_checkin(struct ctx_info *ctxi,
- struct sisl_rht_entry *rhte)
-{
- u32 rsrc_handle = rhte - ctxi->rht_start;
-
- rhte->nmask = 0;
- rhte->fp = 0;
- ctxi->rht_out--;
- ctxi->rht_lun[rsrc_handle] = NULL;
- ctxi->rht_needs_ws[rsrc_handle] = false;
-}
-
-/**
- * rht_format1() - populates a RHTE for format 1
- * @rhte: RHTE to populate.
- * @lun_id: LUN ID of LUN associated with RHTE.
- * @perm: Desired permissions for RHTE.
- * @port_sel: Port selection mask
- */
-static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
- u32 port_sel)
-{
- /*
- * Populate the Format 1 RHT entry for direct access (physical
- * LUN) using the synchronization sequence defined in the
- * SISLite specification.
- */
- struct sisl_rht_entry_f1 dummy = { 0 };
- struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- memset(rhte_f1, 0, sizeof(*rhte_f1));
- rhte_f1->fp = SISL_RHT_FP(1U, 0);
- dma_wmb(); /* Make setting of format bit visible */
-
- rhte_f1->lun_id = lun_id;
- dma_wmb(); /* Make setting of LUN id visible */
-
- /*
- * Use a dummy RHT Format 1 entry to build the second dword
- * of the entry that must be populated in a single write when
- * enabled (valid bit set to TRUE).
- */
- dummy.valid = 0x80;
- dummy.fp = SISL_RHT_FP(1U, perm);
- dummy.port_sel = port_sel;
- rhte_f1->dw = dummy.dw;
-
- dma_wmb(); /* Make remaining RHT entry fields visible */
-}
-
-/**
- * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
- * @gli: LUN to attach.
- * @mode: Desired mode of the LUN.
- * @locked: Mutex status on current thread.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
-{
- int rc = 0;
-
- if (!locked)
- mutex_lock(&gli->mutex);
-
- if (gli->mode == MODE_NONE)
- gli->mode = mode;
- else if (gli->mode != mode) {
- pr_debug("%s: gli_mode=%d requested_mode=%d\n",
- __func__, gli->mode, mode);
- rc = -EINVAL;
- goto out;
- }
-
- gli->users++;
- WARN_ON(gli->users <= 0);
-out:
- pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
- __func__, rc, gli->mode, gli->users);
- if (!locked)
- mutex_unlock(&gli->mutex);
- return rc;
-}
-
-/**
- * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
- * @gli: LUN to detach.
- *
- * When resetting the mode, terminate block allocation resources as they
- * are no longer required (service is safe to call even when block allocation
- * resources were not present - such as when transitioning from physical mode).
- * These resources will be reallocated when needed (subsequent transition to
- * virtual mode).
- */
-void cxlflash_lun_detach(struct glun_info *gli)
-{
- mutex_lock(&gli->mutex);
- WARN_ON(gli->mode == MODE_NONE);
- if (--gli->users == 0) {
- gli->mode = MODE_NONE;
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- }
- pr_debug("%s: gli->users=%u\n", __func__, gli->users);
- WARN_ON(gli->users < 0);
- mutex_unlock(&gli->mutex);
-}
-
-/**
- * _cxlflash_disk_release() - releases the specified resource entry
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @release: Release ioctl data structure.
- *
- * For LUNs in virtual mode, the virtual LUN associated with the specified
- * resource handle is resized to 0 prior to releasing the RHTE. Note that the
- * AFU sync should _not_ be performed when the context is sitting on the error
- * recovery list. A context on the error recovery list is not known to the AFU
- * due to reset. When the context is recovered, it will be reattached and made
- * known again to the AFU.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_disk_release(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_release *release)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- struct dk_cxlflash_resize size;
- res_hndl_t rhndl = release->rsrc_handle;
-
- int rc = 0;
- int rcr = 0;
- u64 ctxid = DECODE_CTXID(release->context_id),
- rctxid = release->context_id;
-
- struct sisl_rht_entry *rhte;
- struct sisl_rht_entry_f1 *rhte_f1;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
- __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Resize to 0 for virtual LUNS by setting the size
- * to 0. This will clear LXT_START and LXT_CNT fields
- * in the RHT entry and properly sync with the AFU.
- *
- * Afterwards we clear the remaining fields.
- */
- switch (gli->mode) {
- case MODE_VIRTUAL:
- marshal_rele_to_resize(release, &size);
- size.req_size = 0;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
- if (rc) {
- dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
- goto out;
- }
-
- break;
- case MODE_PHYSICAL:
- /*
- * Clear the Format 1 RHT entry for direct access
- * (physical LUN) using the synchronization sequence
- * defined in the SISLite specification.
- */
- rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
-
- rhte_f1->valid = 0;
- dma_wmb(); /* Make revocation of RHT entry visible */
-
- rhte_f1->lun_id = 0;
- dma_wmb(); /* Make clearing of LUN id visible */
-
- rhte_f1->dw = 0;
- dma_wmb(); /* Make RHT entry bottom-half clearing visible */
-
- if (!ctxi->err_recovery_active) {
- rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rcr))
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
- __func__, rcr);
- }
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- goto out;
- }
-
- rhte_checkin(ctxi, rhte);
- cxlflash_lun_detach(gli);
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-int cxlflash_disk_release(struct scsi_device *sdev, void *release)
-{
- return _cxlflash_disk_release(sdev, NULL, release);
-}
-
-/**
- * destroy_context() - releases a context
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- *
- * This routine is safe to be called with a a non-initialized context.
- * Also note that the routine conditionally checks for the existence
- * of the context control map before clearing the RHT registers and
- * context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was
- * removed [so there is no need to worry about clearing] and context
- * is waiting for a new mapping).
- */
-static void destroy_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi)
-{
- struct afu *afu = cfg->afu;
-
- if (ctxi->initialized) {
- WARN_ON(!list_empty(&ctxi->luns));
-
- /* Clear RHT registers and drop all capabilities for context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
- }
- }
-
- /* Free memory associated with context */
- free_page((ulong)ctxi->rht_start);
- kfree(ctxi->rht_needs_ws);
- kfree(ctxi->rht_lun);
- kfree(ctxi);
-}
-
-/**
- * create_context() - allocates and initializes a context
- * @cfg: Internal structure associated with the host.
- *
- * Return: Allocated context on success, NULL on failure
- */
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct llun_info **lli = NULL;
- u8 *ws = NULL;
- struct sisl_rht_entry *rhte;
-
- ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
- lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
- ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
- if (unlikely(!ctxi || !lli || !ws)) {
- dev_err(dev, "%s: Unable to allocate context\n", __func__);
- goto err;
- }
-
- rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
- goto err;
- }
-
- ctxi->rht_lun = lli;
- ctxi->rht_needs_ws = ws;
- ctxi->rht_start = rhte;
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
-}
-
-/**
- * init_context() - initializes a previously allocated context
- * @ctxi: Previously allocated context
- * @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained context cookie.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- * @irqs: User-specified number of interrupts.
- */
-static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- void *ctx, int ctxid, struct file *file, u32 perms,
- u64 irqs)
-{
- struct afu *afu = cfg->afu;
-
- ctxi->rht_perms = perms;
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->irqs = irqs;
- ctxi->pid = task_tgid_nr(current); /* tgid = pid */
- ctxi->ctx = ctx;
- ctxi->cfg = cfg;
- ctxi->file = file;
- ctxi->initialized = true;
- mutex_init(&ctxi->mutex);
- kref_init(&ctxi->kref);
- INIT_LIST_HEAD(&ctxi->luns);
- INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
-}
-
-/**
- * remove_context() - context kref release handler
- * @kref: Kernel reference associated with context to be removed.
- *
- * When a context no longer has any references it can safely be removed
- * from global access and destroyed. Note that it is assumed the thread
- * relinquishing access to the context holds its mutex.
- */
-static void remove_context(struct kref *kref)
-{
- struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
- struct cxlflash_cfg *cfg = ctxi->cfg;
- u64 ctxid = DECODE_CTXID(ctxi->ctxid);
-
- /* Remove context from table/error list */
- WARN_ON(!mutex_is_locked(&ctxi->mutex));
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
-
- if (!list_empty(&ctxi->list))
- list_del(&ctxi->list);
- cfg->ctx_tbl[ctxid] = NULL;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- mutex_unlock(&ctxi->mutex);
-
- /* Context now completely uncoupled/unreachable */
- destroy_context(cfg, ctxi);
-}
-
-/**
- * _cxlflash_disk_detach() - detaches a LUN from a context
- * @sdev: SCSI device associated with LUN.
- * @ctxi: Context owning resources.
- * @detach: Detach ioctl data structure.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
- *
- * Return: 0 on success, -errno on failure
- */
-static int _cxlflash_disk_detach(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_detach *detach)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct lun_access *lun_access, *t;
- struct dk_cxlflash_release rel;
- bool put_ctx = false;
-
- int i;
- int rc = 0;
- u64 ctxid = DECODE_CTXID(detach->context_id),
- rctxid = detach->context_id;
-
- dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- /* Cleanup outstanding resources tied to this LUN */
- if (ctxi->rht_out) {
- marshal_det_to_rele(detach, &rel);
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi->rht_lun[i] == lli) {
- rel.rsrc_handle = i;
- _cxlflash_disk_release(sdev, ctxi, &rel);
- }
-
- /* No need to loop further if we're done */
- if (ctxi->rht_out == 0)
- break;
- }
- }
-
- /* Take our LUN out of context, free the node */
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- list_del(&lun_access->list);
- kfree(lun_access);
- lun_access = NULL;
- break;
- }
-
- /*
- * Release the context reference and the sdev reference that
- * bound this LUN to the context.
- */
- if (kref_put(&ctxi->kref, remove_context))
- put_ctx = false;
- scsi_device_put(sdev);
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
-{
- return _cxlflash_disk_detach(sdev, NULL, detach);
-}
-
-/**
- * cxlflash_cxl_release() - release handler for adapter file descriptor
- * @inode: File-system inode associated with fd.
- * @file: File installed with adapter file descriptor.
- *
- * This routine is the release handler for the fops registered with
- * the CXL services on an initial attach for a context. It is called
- * when a close (explicitly by the user or as part of a process tear
- * down) is performed on the adapter file descriptor returned to the
- * user. The user should be aware that explicitly performing a close
- * considered catastrophic and subsequent usage of the superpipe API
- * with previously saved off tokens will fail.
- *
- * This routine derives the context reference and calls detach for
- * each LUN associated with the context.The final detach operation
- * causes the context itself to be freed. With exception to when the
- * CXL process element (context id) lookup fails (a case that should
- * theoretically never occur), every call into this routine results
- * in a complete freeing of a context.
- *
- * Detaching the LUN is typically an ioctl() operation and the underlying
- * code assumes that ioctl_rwsem has been acquired as a reader. To support
- * that design point, the semaphore is acquired and released around detach.
- *
- * Return: 0 on success
- */
-static int cxlflash_cxl_release(struct inode *inode, struct file *file)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct dk_cxlflash_detach detach = { { 0 }, 0 };
- struct lun_access *lun_access, *t;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
- if (!ctxi) {
- dev_dbg(dev, "%s: ctxid=%d already free\n",
- __func__, ctxid);
- goto out_release;
- }
-
- dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
- __func__, ctxid);
- put_context(ctxi);
- goto out;
- }
-
- dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
-
- down_read(&cfg->ioctl_rwsem);
- detach.context_id = ctxi->ctxid;
- list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
- _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
- up_read(&cfg->ioctl_rwsem);
-out_release:
- cfg->ops->fd_release(inode, file);
-out:
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
-}
-
-/**
- * unmap_context() - clears a previously established mapping
- * @ctxi: Context owning the mapping.
- *
- * This routine is used to switch between the error notification page
- * (dummy page of all 1's) and the real mapping (established by the CXL
- * fault handler).
- */
-static void unmap_context(struct ctx_info *ctxi)
-{
- unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
-}
-
-/**
- * get_err_page() - obtains and allocates the error notification page
- * @cfg: Internal structure associated with the host.
- *
- * Return: error notification page on success, NULL on failure
- */
-static struct page *get_err_page(struct cxlflash_cfg *cfg)
-{
- struct page *err_page = global.err_page;
- struct device *dev = &cfg->dev->dev;
-
- if (unlikely(!err_page)) {
- err_page = alloc_page(GFP_KERNEL);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Unable to allocate err_page\n",
- __func__);
- goto out;
- }
-
- memset(page_address(err_page), -1, PAGE_SIZE);
-
- /* Serialize update w/ other threads to avoid a leak */
- mutex_lock(&global.mutex);
- if (likely(!global.err_page))
- global.err_page = err_page;
- else {
- __free_page(err_page);
- err_page = global.err_page;
- }
- mutex_unlock(&global.mutex);
- }
-
-out:
- dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
- return err_page;
-}
-
-/**
- * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
- * @vmf: VM fault associated with current fault.
- *
- * To support error notification via MMIO, faults are 'caught' by this routine
- * that was inserted before passing back the adapter file descriptor on attach.
- * When a fault occurs, this routine evaluates if error recovery is active and
- * if so, installs the error page to 'notify' the user about the error state.
- * During normal operation, the fault is simply handled by the original fault
- * handler that was installed by CXL services as part of initializing the
- * adapter file descriptor. The VMA's page protection bits are toggled to
- * indicate cached/not-cached depending on the memory backing the fault.
- *
- * Return: 0 on success, VM_FAULT_SIGBUS on failure
- */
-static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- struct page *err_page = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- vm_fault_t rc = 0;
- int ctxid;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- goto err;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- goto err;
- }
-
- dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
-
- if (likely(!ctxi->err_recovery_active)) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- rc = ctxi->cxl_mmap_vmops->fault(vmf);
- } else {
- dev_dbg(dev, "%s: err recovery active, use err_page\n",
- __func__);
-
- err_page = get_err_page(cfg);
- if (unlikely(!err_page)) {
- dev_err(dev, "%s: Could not get err_page\n", __func__);
- rc = VM_FAULT_RETRY;
- goto out;
- }
-
- get_page(err_page);
- vmf->page = err_page;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
- return rc;
-
-err:
- rc = VM_FAULT_SIGBUS;
- goto out;
-}
-
-/*
- * Local MMAP vmops to 'catch' faults
- */
-static const struct vm_operations_struct cxlflash_mmap_vmops = {
- .fault = cxlflash_mmap_fault,
-};
-
-/**
- * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
- * @file: File installed with adapter file descriptor.
- * @vma: VM area associated with mapping.
- *
- * Installs local mmap vmops to 'catch' faults for error notification support.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
- cxl_fops);
- void *ctx = cfg->ops->fops_get_context(file);
- struct device *dev = &cfg->dev->dev;
- struct ctx_info *ctxi = NULL;
- enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
- int ctxid;
- int rc = 0;
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely(ctxid < 0)) {
- dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
- __func__, ctx, ctxid);
- rc = -EIO;
- goto out;
- }
-
- ctxi = get_context(cfg, ctxid, file, ctrl);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
- rc = -EIO;
- goto out;
- }
-
- dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
-
- rc = cfg->ops->fd_mmap(file, vma);
- if (likely(!rc)) {
- /* Insert ourself in the mmap fault handler path */
- ctxi->cxl_mmap_vmops = vma->vm_ops;
- vma->vm_ops = &cxlflash_mmap_vmops;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- return rc;
-}
-
-const struct file_operations cxlflash_cxl_fops = {
- .owner = THIS_MODULE,
- .mmap = cxlflash_cxl_mmap,
- .release = cxlflash_cxl_release,
-};
-
-/**
- * cxlflash_mark_contexts_error() - move contexts to error state and list
- * @cfg: Internal structure associated with the host.
- *
- * A context is only moved over to the error list when there are no outstanding
- * references to it. This ensures that a running operation has completed.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
-{
- int i, rc = 0;
- struct ctx_info *ctxi = NULL;
-
- mutex_lock(&cfg->ctx_tbl_list_mutex);
-
- for (i = 0; i < MAX_CONTEXT; i++) {
- ctxi = cfg->ctx_tbl[i];
- if (ctxi) {
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[i] = NULL;
- list_add(&ctxi->list, &cfg->ctx_err_recovery);
- ctxi->err_recovery_active = true;
- ctxi->ctrl_map = NULL;
- unmap_context(ctxi);
- mutex_unlock(&ctxi->mutex);
- }
- }
-
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- return rc;
-}
-
-/*
- * Dummy NULL fops
- */
-static const struct file_operations null_fops = {
- .owner = THIS_MODULE,
-};
-
-/**
- * check_state() - checks and responds to the current adapter state
- * @cfg: Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * It assumes that the caller is an ioctl thread and holding the ioctl
- * read semaphore. This is temporarily let up across the wait to allow
- * for draining actively running ioctls. Also note that when waking up
- * from waiting in reset, the state is unknown and must be checked again
- * before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-int check_state(struct cxlflash_cfg *cfg)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
-
-retry:
- switch (cfg->state) {
- case STATE_RESET:
- dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
- up_read(&cfg->ioctl_rwsem);
- rc = wait_event_interruptible(cfg->reset_waitq,
- cfg->state != STATE_RESET);
- down_read(&cfg->ioctl_rwsem);
- if (unlikely(rc))
- break;
- goto retry;
- case STATE_FAILTERM:
- dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
- rc = -ENODEV;
- break;
- default:
- break;
- }
-
- return rc;
-}
-
-/**
- * cxlflash_disk_attach() - attach a LUN to a context
- * @sdev: SCSI device associated with LUN.
- * @arg: Attach ioctl data structure.
- *
- * Creates a context and attaches LUN to it. A LUN can only be attached
- * one time to a context (subsequent attaches for the same context/LUN pair
- * are not supported). Additional LUNs can be attached to a context by
- * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_attach(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_attach *attach = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct ctx_info *ctxi = NULL;
- struct lun_access *lun_access = NULL;
- int rc = 0;
- u32 perms;
- int ctxid = -1;
- u64 irqs = attach->num_interrupts;
- u64 flags = 0UL;
- u64 rctxid = 0UL;
- struct file *file = NULL;
-
- void *ctx = NULL;
-
- int fd = -1;
-
- if (irqs > 4) {
- dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, irqs);
- rc = -EINVAL;
- goto out;
- }
-
- if (gli->max_lba == 0) {
- dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
- __func__, lli->lun_id[sdev->channel]);
- rc = read_cap16(sdev, lli);
- if (rc) {
- dev_err(dev, "%s: Invalid device rc=%d\n",
- __func__, rc);
- rc = -ENODEV;
- goto out;
- }
- dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
- dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
- }
-
- if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
- rctxid = attach->context_id;
- ctxi = get_context(cfg, rctxid, NULL, 0);
- if (!ctxi) {
- dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
- __func__, rctxid);
- rc = -EINVAL;
- goto out;
- }
-
- list_for_each_entry(lun_access, &ctxi->luns, list)
- if (lun_access->lli == lli) {
- dev_dbg(dev, "%s: Already attached\n",
- __func__);
- rc = -EINVAL;
- goto out;
- }
- }
-
- rc = scsi_device_get(sdev);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
- goto out;
- }
-
- lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
- if (unlikely(!lun_access)) {
- dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
- rc = -ENOMEM;
- goto err;
- }
-
- lun_access->lli = lli;
- lun_access->sdev = sdev;
-
- /* Non-NULL context indicates reuse (another context reference) */
- if (ctxi) {
- dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
- __func__, rctxid);
- kref_get(&ctxi->kref);
- list_add(&lun_access->list, &ctxi->luns);
- goto out_attach;
- }
-
- ctxi = create_context(cfg);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context ctxid=%d\n",
- __func__, ctxid);
- rc = -ENOMEM;
- goto err;
- }
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto err;
- }
-
- rc = cfg->ops->start_work(ctx, irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err;
- }
-
- /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
- perms = SISL_RHT_PERM(attach->hdr.flags + 1);
-
- /* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
-
- rc = afu_attach(cfg, ctxi);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- * There is no need to worry about a deadlock here because no one
- * knows about us yet; we can be the only one holding our mutex.
- */
- list_add(&lun_access->list, &ctxi->luns);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
-
-out_attach:
- if (fd != -1)
- flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- attach->hdr.return_flags = flags;
- attach->context_id = ctxi->ctxid;
- attach->block_size = gli->blk_len;
- attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- attach->last_lba = gli->max_lba;
- attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
- attach->max_xfer /= gli->blk_len;
-
-out:
- attach->adap_fd = fd;
-
- if (ctxi)
- put_context(ctxi);
-
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
- __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
- return rc;
-
-err:
- /* Cleanup CXL context; okay to 'stop' even if it was not started */
- if (!IS_ERR_OR_NULL(ctx)) {
- cfg->ops->stop_context(ctx);
- cfg->ops->release_context(ctx);
- ctx = NULL;
- }
-
- /*
- * Here, we're overriding the fops with a dummy all-NULL fops because
- * fput() calls the release fop, which will cause us to mistakenly
- * call into the CXL code. Rather than try to add yet more complexity
- * to that routine (cxlflash_cxl_release) we should try to fix the
- * issue here.
- */
- if (fd > 0) {
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
- file = NULL;
- }
-
- /* Cleanup our context */
- if (ctxi) {
- destroy_context(cfg, ctxi);
- ctxi = NULL;
- }
-
- kfree(lun_access);
- scsi_device_put(sdev);
- goto out;
-}
-
-/**
- * recover_context() - recovers a context in error
- * @cfg: Internal structure associated with the host.
- * @ctxi: Context to release.
- * @adap_fd: Adapter file descriptor associated with new/recovered context.
- *
- * Restablishes the state for a context-in-error.
- *
- * Return: 0 on success, -errno on failure
- */
-static int recover_context(struct cxlflash_cfg *cfg,
- struct ctx_info *ctxi,
- int *adap_fd)
-{
- struct device *dev = &cfg->dev->dev;
- int rc = 0;
- int fd = -1;
- int ctxid = -1;
- struct file *file;
- void *ctx;
- struct afu *afu = cfg->afu;
-
- ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_err(dev, "%s: Could not initialize context %p\n",
- __func__, ctx);
- rc = -ENODEV;
- goto out;
- }
-
- rc = cfg->ops->start_work(ctx, ctxi->irqs);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err1;
- }
-
- ctxid = cfg->ops->process_element(ctx);
- if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
- dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
- rc = -EPERM;
- goto err2;
- }
-
- file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
- if (unlikely(fd < 0)) {
- rc = -ENODEV;
- dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
- }
-
- /* Update with new MMIO area based on updated context id */
- ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
-
- rc = afu_attach(cfg, ctxi);
- if (rc) {
- dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err3;
- }
-
- /*
- * No error paths after this point. Once the fd is installed it's
- * visible to user space and can't be undone safely on this thread.
- */
- ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
- ctxi->ctx = ctx;
- ctxi->file = file;
-
- /*
- * Put context back in table (note the reinit of the context list);
- * we must first drop the context's mutex and then acquire it in
- * order with the table/list mutex to avoid a deadlock - safe to do
- * here because no one can find us at this moment in time.
- */
- mutex_unlock(&ctxi->mutex);
- mutex_lock(&cfg->ctx_tbl_list_mutex);
- mutex_lock(&ctxi->mutex);
- list_del_init(&ctxi->list);
- cfg->ctx_tbl[ctxid] = ctxi;
- mutex_unlock(&cfg->ctx_tbl_list_mutex);
- fd_install(fd, file);
- *adap_fd = fd;
-out:
- dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
- __func__, ctxid, fd, rc);
- return rc;
-
-err3:
- fput(file);
- put_unused_fd(fd);
-err2:
- cfg->ops->stop_context(ctx);
-err1:
- cfg->ops->release_context(ctx);
- goto out;
-}
-
-/**
- * cxlflash_afu_recover() - initiates AFU recovery
- * @sdev: SCSI device associated with LUN.
- * @arg: Recover ioctl data structure.
- *
- * Only a single recovery is allowed at a time to avoid exhausting CXL
- * resources (leading to recovery failure) in the event that we're up
- * against the maximum number of contexts limit. For similar reasons,
- * a context recovery is retried if there are multiple recoveries taking
- * place at the same time and the failure was due to CXL services being
- * unable to keep up.
- *
- * As this routine is called on ioctl context, it holds the ioctl r/w
- * semaphore that is used to drain ioctls in recovery scenarios. The
- * implementation to achieve the pacing described above (a local mutex)
- * requires that the ioctl r/w semaphore be dropped and reacquired to
- * avoid a 3-way deadlock when multiple process recoveries operate in
- * parallel.
- *
- * Because a user can detect an error condition before the kernel, it is
- * quite possible for this routine to act as the kernel's EEH detection
- * source (MMIO read of mbox_r). Because of this, there is a window of
- * time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter reset state). To avoid
- * looping in this routine during that window, a 1 second sleep is in place
- * between the time the MMIO failure is detected and the time a wait on the
- * reset wait queue is attempted via check_state().
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_afu_recover(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_recover_afu *recover = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct afu *afu = cfg->afu;
- struct ctx_info *ctxi = NULL;
- struct mutex *mutex = &cfg->ctx_recovery_mutex;
- struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
- u64 flags;
- u64 ctxid = DECODE_CTXID(recover->context_id),
- rctxid = recover->context_id;
- long reg;
- bool locked = true;
- int lretry = 20; /* up to 2 seconds */
- int new_adap_fd = -1;
- int rc = 0;
-
- atomic_inc(&cfg->recovery_threads);
- up_read(&cfg->ioctl_rwsem);
- rc = mutex_lock_interruptible(mutex);
- down_read(&cfg->ioctl_rwsem);
- if (rc) {
- locked = false;
- goto out;
- }
-
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
- rc = -ENODEV;
- goto out;
- }
-
- dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
- __func__, recover->reason, rctxid);
-
-retry:
- /* Ensure that this process is attached to the context */
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- if (ctxi->err_recovery_active) {
-retry_recover:
- rc = recover_context(cfg, ctxi, &new_adap_fd);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
- __func__, ctxid, rc);
- if ((rc == -ENODEV) &&
- ((atomic_read(&cfg->recovery_threads) > 1) ||
- (lretry--))) {
- dev_dbg(dev, "%s: Going to try again\n",
- __func__);
- mutex_unlock(mutex);
- msleep(100);
- rc = mutex_lock_interruptible(mutex);
- if (rc) {
- locked = false;
- goto out;
- }
- goto retry_recover;
- }
-
- goto out;
- }
-
- ctxi->err_recovery_active = false;
-
- flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
- DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
- if (afu_is_sq_cmd_mode(afu))
- flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
-
- recover->hdr.return_flags = flags;
- recover->context_id = ctxi->ctxid;
- recover->adap_fd = new_adap_fd;
- recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
- goto out;
- }
-
- /* Test if in error state */
- reg = readq_be(&hwq->ctrl_map->mbox_r);
- if (reg == -1) {
- dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
-
- /*
- * Before checking the state, put back the context obtained with
- * get_context() as it is no longer needed and sleep for a short
- * period of time (see prolog notes).
- */
- put_context(ctxi);
- ctxi = NULL;
- ssleep(1);
- rc = check_state(cfg);
- if (unlikely(rc))
- goto out;
- goto retry;
- }
-
- dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
-out:
- if (likely(ctxi))
- put_context(ctxi);
- if (locked)
- mutex_unlock(mutex);
- atomic_dec_if_positive(&cfg->recovery_threads);
- return rc;
-}
-
-/**
- * process_sense() - evaluates and processes sense data
- * @sdev: SCSI device associated with LUN.
- * @verify: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int process_sense(struct scsi_device *sdev,
- struct dk_cxlflash_verify *verify)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- u64 prev_lba = gli->max_lba;
- struct scsi_sense_hdr sshdr = { 0 };
- int rc = 0;
-
- rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
- DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
- if (!rc) {
- dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- switch (sshdr.sense_key) {
- case NO_SENSE:
- case RECOVERED_ERROR:
- case NOT_READY:
- break;
- case UNIT_ATTENTION:
- switch (sshdr.asc) {
- case 0x29: /* Power on Reset or Device Reset */
- fallthrough;
- case 0x2A: /* Device settings/capacity changed */
- rc = read_cap16(sdev, lli);
- if (rc) {
- rc = -ENODEV;
- break;
- }
- if (prev_lba != gli->max_lba)
- dev_dbg(dev, "%s: Capacity changed old=%lld "
- "new=%lld\n", __func__, prev_lba,
- gli->max_lba);
- break;
- case 0x3F: /* Report LUNs changed, Rescan. */
- scsi_scan_host(cfg->host);
- break;
- default:
- rc = -EIO;
- break;
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
- sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
- * @sdev: SCSI device associated with LUN.
- * @arg: Verify ioctl data structure.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_verify(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_verify *verify = arg;
- int rc = 0;
- struct ctx_info *ctxi = NULL;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct sisl_rht_entry *rhte = NULL;
- res_hndl_t rhndl = verify->rsrc_handle;
- u64 ctxid = DECODE_CTXID(verify->context_id),
- rctxid = verify->context_id;
- u64 last_lba = 0;
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
- "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
- verify->hint, verify->hdr.flags);
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- /*
- * Look at the hint/sense to see if it requires us to redrive
- * inquiry (i.e. the Unit attention is due to the WWN changing).
- */
- if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
- /* Can't hold mutex across process_sense/read_cap16,
- * since we could have an intervening EEH event.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- rc = process_sense(sdev, verify);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed to validate sense data (%d)\n",
- __func__, rc);
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- goto out;
- }
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- switch (gli->mode) {
- case MODE_PHYSICAL:
- last_lba = gli->max_lba;
- break;
- case MODE_VIRTUAL:
- /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
- last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
- last_lba /= CXLFLASH_BLOCK_SIZE;
- last_lba--;
- break;
- default:
- WARN(1, "Unsupported LUN mode!");
- }
-
- verify->last_lba = last_lba;
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
- __func__, rc, verify->last_lba);
- return rc;
-}
-
-/**
- * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
- * @cmd: The ioctl command to decode.
- *
- * Return: A string identifying the decoded ioctl.
- */
-static char *decode_ioctl(unsigned int cmd)
-{
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- return __stringify_1(DK_CXLFLASH_ATTACH);
- case DK_CXLFLASH_USER_DIRECT:
- return __stringify_1(DK_CXLFLASH_USER_DIRECT);
- case DK_CXLFLASH_USER_VIRTUAL:
- return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
- case DK_CXLFLASH_VLUN_RESIZE:
- return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
- case DK_CXLFLASH_RELEASE:
- return __stringify_1(DK_CXLFLASH_RELEASE);
- case DK_CXLFLASH_DETACH:
- return __stringify_1(DK_CXLFLASH_DETACH);
- case DK_CXLFLASH_VERIFY:
- return __stringify_1(DK_CXLFLASH_VERIFY);
- case DK_CXLFLASH_VLUN_CLONE:
- return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
- case DK_CXLFLASH_RECOVER_AFU:
- return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
- case DK_CXLFLASH_MANAGE_LUN:
- return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
- }
-
- return "UNKNOWN";
-}
-
-/**
- * cxlflash_disk_direct_open() - opens a direct (physical) disk
- * @sdev: SCSI device associated with LUN.
- * @arg: UDirect ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the direct lun and the size (in blocks) of
- * the direct lun in last LBA format.
- *
- * Return: 0 on success, -errno on failure
- */
-static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct dk_cxlflash_release rel = { { 0 }, 0 };
-
- struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
-
- u64 ctxid = DECODE_CTXID(pphys->context_id),
- rctxid = pphys->context_id;
- u64 lun_size = 0;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
- u32 port = CHAN2PORTMASK(sdev->channel);
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
- goto out;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
-
- last_lba = gli->max_lba;
- pphys->hdr.return_flags = 0;
- pphys->last_lba = last_lba;
- pphys->rsrc_handle = rsrc_handle;
-
- rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
- goto err2;
- }
-
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- marshal_udir_to_rele(pphys, &rel);
- _cxlflash_disk_release(sdev, ctxi, &rel);
- goto out;
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-}
-
-/**
- * ioctl_common() - common IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- *
- * Handles common fencing operations that are valid for multiple ioctls. Always
- * allow through ioctls that are cleanup oriented in nature, even when operating
- * in a failed/terminating state.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- int rc = 0;
-
- if (unlikely(!lli)) {
- dev_dbg(dev, "%s: Unknown LUN\n", __func__);
- rc = -EINVAL;
- goto out;
- }
-
- rc = check_state(cfg);
- if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
- switch (cmd) {
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- dev_dbg(dev, "%s: Command override rc=%d\n",
- __func__, rc);
- rc = 0;
- break;
- }
- }
-out:
- return rc;
-}
-
-/**
- * cxlflash_ioctl() - IOCTL handler for driver
- * @sdev: SCSI device associated with LUN.
- * @cmd: IOCTL command.
- * @arg: Userspace ioctl data structure.
- *
- * A read/write semaphore is used to implement a 'drain' of currently
- * running ioctls. The read semaphore is taken at the beginning of each
- * ioctl thread and released upon concluding execution. Additionally the
- * semaphore should be released and then reacquired in any ioctl execution
- * path which will wait for an event to occur that is outside the scope of
- * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
- * a thread simply needs to acquire the write semaphore.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
-{
- typedef int (*sioctl) (struct scsi_device *, void *);
-
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_hdr *hdr;
- char buf[sizeof(union cxlflash_ioctls)];
- size_t size = 0;
- bool known_ioctl = false;
- int idx;
- int rc = 0;
- struct Scsi_Host *shost = sdev->host;
- sioctl do_ioctl = NULL;
-
- static const struct {
- size_t size;
- sioctl ioctl;
- } ioctl_tbl[] = { /* NOTE: order matters here */
- {sizeof(struct dk_cxlflash_attach), cxlflash_disk_attach},
- {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
- {sizeof(struct dk_cxlflash_release), cxlflash_disk_release},
- {sizeof(struct dk_cxlflash_detach), cxlflash_disk_detach},
- {sizeof(struct dk_cxlflash_verify), cxlflash_disk_verify},
- {sizeof(struct dk_cxlflash_recover_afu), cxlflash_afu_recover},
- {sizeof(struct dk_cxlflash_manage_lun), cxlflash_manage_lun},
- {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
- {sizeof(struct dk_cxlflash_resize), cxlflash_vlun_resize},
- {sizeof(struct dk_cxlflash_clone), cxlflash_disk_clone},
- };
-
- /* Hold read semaphore so we can drain if needed */
- down_read(&cfg->ioctl_rwsem);
-
- /* Restrict command set to physical support only for internal LUN */
- if (afu->internal_lun)
- switch (cmd) {
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
- __func__, decode_ioctl(cmd), afu->internal_lun);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- switch (cmd) {
- case DK_CXLFLASH_ATTACH:
- case DK_CXLFLASH_USER_DIRECT:
- case DK_CXLFLASH_RELEASE:
- case DK_CXLFLASH_DETACH:
- case DK_CXLFLASH_VERIFY:
- case DK_CXLFLASH_RECOVER_AFU:
- case DK_CXLFLASH_USER_VIRTUAL:
- case DK_CXLFLASH_VLUN_RESIZE:
- case DK_CXLFLASH_VLUN_CLONE:
- dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
- __func__, decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun);
- rc = ioctl_common(sdev, cmd);
- if (unlikely(rc))
- goto cxlflash_ioctl_exit;
-
- fallthrough;
-
- case DK_CXLFLASH_MANAGE_LUN:
- known_ioctl = true;
- idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
- size = ioctl_tbl[idx].size;
- do_ioctl = ioctl_tbl[idx].ioctl;
-
- if (likely(do_ioctl))
- break;
-
- fallthrough;
- default:
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (unlikely(copy_from_user(&buf, arg, size))) {
- dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- goto cxlflash_ioctl_exit;
- }
-
- hdr = (struct dk_cxlflash_hdr *)&buf;
- if (hdr->version != DK_CXLFLASH_VERSION_0) {
- dev_dbg(dev, "%s: Version %u not supported for %s\n",
- __func__, hdr->version, decode_ioctl(cmd));
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
- dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
- rc = -EINVAL;
- goto cxlflash_ioctl_exit;
- }
-
- rc = do_ioctl(sdev, (void *)&buf);
- if (likely(!rc))
- if (unlikely(copy_to_user(arg, &buf, size))) {
- dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
- __func__, size, cmd, decode_ioctl(cmd), arg);
- rc = -EFAULT;
- }
-
- /* fall through to exit */
-
-cxlflash_ioctl_exit:
- up_read(&cfg->ioctl_rwsem);
- if (unlikely(rc && known_ioctl))
- dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__,
- decode_ioctl(cmd), cmd, shost->host_no,
- sdev->channel, sdev->id, sdev->lun, rc);
- else
- dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
- "returned rc %d\n", __func__, decode_ioctl(cmd),
- cmd, shost->host_no, sdev->channel, sdev->id,
- sdev->lun, rc);
- return rc;
-}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
deleted file mode 100644
index fe8c975d13d7..000000000000
--- a/drivers/scsi/cxlflash/superpipe.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_SUPERPIPE_H
-#define _CXLFLASH_SUPERPIPE_H
-
-extern struct cxlflash_global global;
-
-/*
- * Terminology: use afu (and not adapter) to refer to the HW.
- * Adapter is the entire slot and includes PSL out of which
- * only the AFU is visible to user space.
- */
-
-/* Chunk size parms: note sislite minimum chunk size is
- * 0x10000 LBAs corresponding to a NMASK or 16.
- */
-#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
-
-#define CMD_TIMEOUT 30 /* 30 secs */
-#define CMD_RETRIES 5 /* 5 retries for scsi_execute */
-
-#define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */
-
-enum lun_mode {
- MODE_NONE = 0,
- MODE_VIRTUAL,
- MODE_PHYSICAL
-};
-
-/* Global (entire driver, spans adapters) lun_info structure */
-struct glun_info {
- u64 max_lba; /* from read cap(16) */
- u32 blk_len; /* from read cap(16) */
- enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
- int users; /* Number of users w/ references to LUN */
-
- u8 wwid[16];
-
- struct mutex mutex;
-
- struct blka blka;
- struct list_head list;
-};
-
-/* Local (per-adapter) lun_info structure */
-struct llun_info {
- u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */
- u32 lun_index; /* Index in the LUN table */
- u32 host_no; /* host_no from Scsi_host */
- u32 port_sel; /* What port to use for this LUN */
- bool in_table; /* Whether a LUN table entry was created */
-
- u8 wwid[16]; /* Keep a duplicate copy here? */
-
- struct glun_info *parent; /* Pointer to entry in global LUN structure */
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-struct lun_access {
- struct llun_info *lli;
- struct scsi_device *sdev;
- struct list_head list;
-};
-
-enum ctx_ctrl {
- CTX_CTRL_CLONE = (1 << 1),
- CTX_CTRL_ERR = (1 << 2),
- CTX_CTRL_ERR_FALLBACK = (1 << 3),
- CTX_CTRL_NOPID = (1 << 4),
- CTX_CTRL_FILE = (1 << 5)
-};
-
-#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
-#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
-
-struct ctx_info {
- struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
- struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
- * alloc/free on attach/detach
- */
- u32 rht_out; /* Number of checked out RHT entries */
- u32 rht_perms; /* User-defined permissions for RHT entries */
- struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
- u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
-
- u64 ctxid;
- u64 irqs; /* Number of interrupts requested for context */
- pid_t pid;
- bool initialized;
- bool unavail;
- bool err_recovery_active;
- struct mutex mutex; /* Context protection */
- struct kref kref;
- void *ctx;
- struct cxlflash_cfg *cfg;
- struct list_head luns; /* LUNs attached to this context */
- const struct vm_operations_struct *cxl_mmap_vmops;
- struct file *file;
- struct list_head list; /* Link contexts in error recovery */
-};
-
-struct cxlflash_global {
- struct mutex mutex;
- struct list_head gluns;/* list of glun_info structs */
- struct page *err_page; /* One page of all 0xF for error notification */
-};
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize);
-int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize);
-
-int cxlflash_disk_release(struct scsi_device *sdev,
- void *release);
-int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi,
- struct dk_cxlflash_release *release);
-
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg);
-
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg);
-
-int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked);
-void cxlflash_lun_detach(struct glun_info *gli);
-
-struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg,
- enum ctx_ctrl ctrl);
-void put_context(struct ctx_info *ctxi);
-
-struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
- struct llun_info *lli);
-
-struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
- struct llun_info *lli);
-void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte);
-
-void cxlflash_ba_terminate(struct ba_lun *ba_lun);
-
-int cxlflash_manage_lun(struct scsi_device *sdev, void *manage);
-
-int check_state(struct cxlflash_cfg *cfg);
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
deleted file mode 100644
index 32e807703377..000000000000
--- a/drivers/scsi/cxlflash/vlun.c
+++ /dev/null
@@ -1,1336 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/unaligned.h>
-#include <asm/bitsperlong.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <uapi/scsi/cxlflash_ioctl.h>
-
-#include "sislite.h"
-#include "common.h"
-#include "vlun.h"
-#include "superpipe.h"
-
-/**
- * marshal_virt_to_resize() - translate uvirtual to resize structure
- * @virt: Source structure from which to translate/copy.
- * @resize: Destination structure for the translate/copy.
- */
-static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
- struct dk_cxlflash_resize *resize)
-{
- resize->hdr = virt->hdr;
- resize->context_id = virt->context_id;
- resize->rsrc_handle = virt->rsrc_handle;
- resize->req_size = virt->lun_size;
- resize->last_lba = virt->last_lba;
-}
-
-/**
- * marshal_clone_to_rele() - translate clone to release structure
- * @clone: Source structure from which to translate/copy.
- * @release: Destination structure for the translate/copy.
- */
-static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
- struct dk_cxlflash_release *release)
-{
- release->hdr = clone->hdr;
- release->context_id = clone->context_id_dst;
-}
-
-/**
- * ba_init() - initializes a block allocator
- * @ba_lun: Block allocator to initialize.
- *
- * Return: 0 on success, -errno on failure
- */
-static int ba_init(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = NULL;
- int lun_size_au = 0, i = 0;
- int last_word_underflow = 0;
- u64 *lam;
-
- pr_debug("%s: Initializing LUN: lun_id=%016llx "
- "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
- __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
-
- /* Calculate bit map size */
- lun_size_au = ba_lun->lsize / ba_lun->au_size;
- if (lun_size_au == 0) {
- pr_debug("%s: Requested LUN size of 0!\n", __func__);
- return -EINVAL;
- }
-
- /* Allocate lun information container */
- bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
- if (unlikely(!bali)) {
- pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -ENOMEM;
- }
-
- bali->total_aus = lun_size_au;
- bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
-
- if (lun_size_au % BITS_PER_LONG)
- bali->lun_bmap_size++;
-
- /* Allocate bitmap space */
- bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
- GFP_KERNEL);
- if (unlikely(!bali->lun_alloc_map)) {
- pr_err("%s: Failed to allocate lun allocation map: "
- "lun_id=%016llx\n", __func__, ba_lun->lun_id);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Initialize the bit map size and set all bits to '1' */
- bali->free_aun_cnt = lun_size_au;
-
- for (i = 0; i < bali->lun_bmap_size; i++)
- bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
-
- /* If the last word not fully utilized, mark extra bits as allocated */
- last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
- last_word_underflow -= bali->free_aun_cnt;
- if (last_word_underflow > 0) {
- lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
- for (i = (HIBIT - last_word_underflow + 1);
- i < BITS_PER_LONG;
- i++)
- clear_bit(i, (ulong *)lam);
- }
-
- /* Initialize high elevator index, low/curr already at 0 from kzalloc */
- bali->free_high_idx = bali->lun_bmap_size;
-
- /* Allocate clone map */
- bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
- GFP_KERNEL);
- if (unlikely(!bali->aun_clone_map)) {
- pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- return -ENOMEM;
- }
-
- /* Pass the allocated LUN info as a handle to the user */
- ba_lun->ba_lun_handle = bali;
-
- pr_debug("%s: Successfully initialized the LUN: "
- "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->lun_bmap_size,
- bali->free_aun_cnt);
- return 0;
-}
-
-/**
- * find_free_range() - locates a free bit within the block allocator
- * @low: First word in block allocator to start search.
- * @high: Last word in block allocator to search.
- * @bali: LUN information structure owning the block allocator to search.
- * @bit_word: Passes back the word in the block allocator owning the free bit.
- *
- * Return: The bit position within the passed back word, -1 on failure
- */
-static int find_free_range(u32 low,
- u32 high,
- struct ba_lun_info *bali, int *bit_word)
-{
- int i;
- u64 bit_pos = -1;
- ulong *lam, num_bits;
-
- for (i = low; i < high; i++)
- if (bali->lun_alloc_map[i] != 0) {
- lam = (ulong *)&bali->lun_alloc_map[i];
- num_bits = (sizeof(*lam) * BITS_PER_BYTE);
- bit_pos = find_first_bit(lam, num_bits);
-
- pr_devel("%s: Found free bit %llu in LUN "
- "map entry %016llx at bitmap index = %d\n",
- __func__, bit_pos, bali->lun_alloc_map[i], i);
-
- *bit_word = i;
- bali->free_aun_cnt--;
- clear_bit(bit_pos, lam);
- break;
- }
-
- return bit_pos;
-}
-
-/**
- * ba_alloc() - allocates a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- *
- * Return: The allocated block, -1 on failure
- */
-static u64 ba_alloc(struct ba_lun *ba_lun)
-{
- u64 bit_pos = -1;
- int bit_word = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- pr_debug("%s: Received block allocation request: "
- "lun_id=%016llx free_aun_cnt=%llx\n",
- __func__, ba_lun->lun_id, bali->free_aun_cnt);
-
- if (bali->free_aun_cnt == 0) {
- pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
- __func__, ba_lun->lun_id);
- return -1ULL;
- }
-
- /* Search to find a free entry, curr->high then low->curr */
- bit_pos = find_free_range(bali->free_curr_idx,
- bali->free_high_idx, bali, &bit_word);
- if (bit_pos == -1) {
- bit_pos = find_free_range(bali->free_low_idx,
- bali->free_curr_idx,
- bali, &bit_word);
- if (bit_pos == -1) {
- pr_debug("%s: Could not find an allocation unit on LUN:"
- " lun_id=%016llx\n", __func__, ba_lun->lun_id);
- return -1ULL;
- }
- }
-
- /* Update the free_curr_idx */
- if (bit_pos == HIBIT)
- bali->free_curr_idx = bit_word + 1;
- else
- bali->free_curr_idx = bit_word;
-
- pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__,
- ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
- bali->free_aun_cnt);
-
- return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
-}
-
-/**
- * validate_alloc() - validates the specified block has been allocated
- * @bali: LUN info owning the block allocator.
- * @aun: Block to validate.
- *
- * Return: 0 on success, -1 on failure
- */
-static int validate_alloc(struct ba_lun_info *bali, u64 aun)
-{
- int idx = 0, bit_pos = 0;
-
- idx = aun / BITS_PER_LONG;
- bit_pos = aun % BITS_PER_LONG;
-
- if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
- return -1;
-
- return 0;
-}
-
-/**
- * ba_free() - frees a block from the block allocator
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_free: Block to free.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_free(struct ba_lun *ba_lun, u64 to_free)
-{
- int idx = 0, bit_pos = 0;
- struct ba_lun_info *bali = NULL;
-
- bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_free)) {
- pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
- __func__, to_free, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
- "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
- bali->free_aun_cnt);
-
- if (bali->aun_clone_map[to_free] > 0) {
- pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
- __func__, to_free, ba_lun->lun_id,
- bali->aun_clone_map[to_free]);
- bali->aun_clone_map[to_free]--;
- return 0;
- }
-
- idx = to_free / BITS_PER_LONG;
- bit_pos = to_free % BITS_PER_LONG;
-
- set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
- bali->free_aun_cnt++;
-
- if (idx < bali->free_low_idx)
- bali->free_low_idx = idx;
- else if (idx > bali->free_high_idx)
- bali->free_high_idx = idx;
-
- pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
- "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
- ba_lun->lun_id, bali->free_aun_cnt);
-
- return 0;
-}
-
-/**
- * ba_clone() - Clone a chunk of the block allocation table
- * @ba_lun: Block allocator from which to allocate a block.
- * @to_clone: Block to clone.
- *
- * Return: 0 on success, -1 on failure
- */
-static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (validate_alloc(bali, to_clone)) {
- pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
- __func__, to_clone, ba_lun->lun_id);
-
- if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
- pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
- __func__, to_clone, ba_lun->lun_id);
- return -1;
- }
-
- bali->aun_clone_map[to_clone]++;
-
- return 0;
-}
-
-/**
- * ba_space() - returns the amount of free space left in the block allocator
- * @ba_lun: Block allocator.
- *
- * Return: Amount of free space in block allocator
- */
-static u64 ba_space(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- return bali->free_aun_cnt;
-}
-
-/**
- * cxlflash_ba_terminate() - frees resources associated with the block allocator
- * @ba_lun: Block allocator.
- *
- * Safe to call in a partially allocated state.
- */
-void cxlflash_ba_terminate(struct ba_lun *ba_lun)
-{
- struct ba_lun_info *bali = ba_lun->ba_lun_handle;
-
- if (bali) {
- kfree(bali->aun_clone_map);
- kfree(bali->lun_alloc_map);
- kfree(bali);
- ba_lun->ba_lun_handle = NULL;
- }
-}
-
-/**
- * init_vlun() - initializes a LUN for virtual use
- * @lli: LUN information structure that owns the block allocator.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_vlun(struct llun_info *lli)
-{
- int rc = 0;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
-
- memset(blka, 0, sizeof(*blka));
- mutex_init(&blka->mutex);
-
- /* LUN IDs are unique per port, save the index instead */
- blka->ba_lun.lun_id = lli->lun_index;
- blka->ba_lun.lsize = gli->max_lba + 1;
- blka->ba_lun.lba_size = gli->blk_len;
-
- blka->ba_lun.au_size = MC_CHUNK_SIZE;
- blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
-
- rc = ba_init(&blka->ba_lun);
- if (unlikely(rc))
- pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
-
- pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
- return rc;
-}
-
-/**
- * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
- * @sdev: SCSI device associated with LUN.
- * @lba: Logical block address to start write same.
- * @nblks: Number of logical blocks to write same.
- *
- * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
- * part of the recovery, the handler drains all currently running ioctls,
- * waiting until they have completed before proceeding with a reset. As this
- * routine is used on the ioctl path, this can create a condition where the
- * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
- * avoid this behavior, temporarily unmark this thread as an ioctl thread by
- * releasing the ioctl read semaphore. This will allow the EEH handler to
- * proceed with a recovery while this thread is still running. Once the
- * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
- * adapter state in case it changed while inside of scsi_execute_cmd(). The
- * state check will wait if the adapter is still being recovered or return a
- * failure if the recovery failed. In the event that the adapter reset failed,
- * simply return the failure as the ioctl would be unable to continue.
- *
- * Note that the above puts a requirement on this routine to only be called on
- * an ioctl thread.
- *
- * Return: 0 on success, -errno on failure
- */
-static int write_same16(struct scsi_device *sdev,
- u64 lba,
- u32 nblks)
-{
- u8 *cmd_buf = NULL;
- u8 *scsi_cmd = NULL;
- int rc = 0;
- int result = 0;
- u64 offset = lba;
- int left = nblks;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- const u32 s = ilog2(sdev->sector_size) - 9;
- const u32 to = sdev->request_queue->rq_timeout;
- const u32 ws_limit =
- sdev->request_queue->limits.max_write_zeroes_sectors >> s;
-
- cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
- scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
- if (unlikely(!cmd_buf || !scsi_cmd)) {
- rc = -ENOMEM;
- goto out;
- }
-
- while (left > 0) {
-
- scsi_cmd[0] = WRITE_SAME_16;
- scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
- put_unaligned_be64(offset, &scsi_cmd[2]);
- put_unaligned_be32(ws_limit < left ? ws_limit : left,
- &scsi_cmd[10]);
-
- /* Drop the ioctl read semaphore across lengthy call */
- up_read(&cfg->ioctl_rwsem);
- result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
- cmd_buf, CMD_BUFSIZE, to,
- CMD_RETRIES, NULL);
- down_read(&cfg->ioctl_rwsem);
- rc = check_state(cfg);
- if (rc) {
- dev_err(dev, "%s: Failed state result=%08x\n",
- __func__, result);
- rc = -ENODEV;
- goto out;
- }
-
- if (result) {
- dev_err_ratelimited(dev, "%s: command failed for "
- "offset=%lld result=%08x\n",
- __func__, offset, result);
- rc = -EIO;
- goto out;
- }
- left -= ws_limit;
- offset += ws_limit;
- }
-
-out:
- kfree(cmd_buf);
- kfree(scsi_cmd);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * grow_lxt() - expands the translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @new_size: Number of translation entries associated with RHTE.
- *
- * By design, this routine employs a 'best attempt' allocation and will
- * truncate the requested size down if there is not sufficient space in
- * the block allocator to satisfy the request but there does exist some
- * amount of space. The user is made aware of this by returning the size
- * allocated.
- *
- * Return: 0 on success, -errno on failure
- */
-static int grow_lxt(struct afu *afu,
- struct scsi_device *sdev,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- u32 av_size;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = *new_size - rhte->lxt_cnt;
- u64 my_new_size;
- int i, rc = 0;
-
- /*
- * Check what is available in the block allocator before re-allocating
- * LXT array. This is done up front under the mutex which must not be
- * released until after allocation is complete.
- */
- mutex_lock(&blka->mutex);
- av_size = ba_space(&blka->ba_lun);
- if (unlikely(av_size <= 0)) {
- dev_dbg(dev, "%s: ba_space error av_size=%d\n",
- __func__, av_size);
- mutex_unlock(&blka->mutex);
- rc = -ENOSPC;
- goto out;
- }
-
- if (av_size < delta)
- delta = av_size;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
-
- if (ngrps != ngrps_old) {
- /* reallocate to fit new size */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- mutex_unlock(&blka->mutex);
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over all old entries */
- memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
- } else
- lxt = lxt_old;
-
- /* nothing can fail from now on */
- my_new_size = rhte->lxt_cnt + delta;
-
- /* add new entries to the end */
- for (i = rhte->lxt_cnt; i < my_new_size; i++) {
- /*
- * Due to the earlier check of available space, ba_alloc
- * cannot fail here. If it did due to internal error,
- * leave a rlba_base of -1u which will likely be a
- * invalid LUN (too large).
- */
- aun = ba_alloc(&blka->ba_lun);
- if ((aun == -1ULL) || (aun >= blka->nchunk))
- dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
- "max=%llu\n", __func__, aun, blka->nchunk - 1);
-
- /* select both ports, use r/w perms from RHT */
- lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
- (lli->lun_index << LXT_LUNIDX_SHIFT) |
- (RHT_PERM_RW << LXT_PERM_SHIFT |
- lli->port_sel));
- }
-
- mutex_unlock(&blka->mutex);
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
-
- /* free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * shrink_lxt() - reduces translation table associated with the specified RHTE
- * @afu: AFU associated with the host.
- * @sdev: SCSI device associated with LUN.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Resource handle entry (RHTE).
- * @ctxi: Context owning resources.
- * @new_size: Number of translation entries associated with RHTE.
- *
- * Return: 0 on success, -errno on failure
- */
-static int shrink_lxt(struct afu *afu,
- struct scsi_device *sdev,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct ctx_info *ctxi,
- u64 *new_size)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt, *lxt_old;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
- bool needs_ws = ctxi->rht_needs_ws[rhndl];
- bool needs_sync = !ctxi->err_recovery_active;
- u32 ngrps, ngrps_old;
- u64 aun; /* chunk# allocated by block allocator */
- u64 delta = rhte->lxt_cnt - *new_size;
- u64 my_new_size;
- int i, rc = 0;
-
- lxt_old = rhte->lxt_start;
- ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
- ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
-
- if (ngrps != ngrps_old) {
- /* Reallocate to fit new size unless new size is 0 */
- if (ngrps) {
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy over old entries that will remain */
- memcpy(lxt, lxt_old,
- (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
- } else
- lxt = NULL;
- } else
- lxt = lxt_old;
-
- /* Nothing can fail from now on */
- my_new_size = rhte->lxt_cnt - delta;
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when removing LXT entries.
- */
- rhte->lxt_cnt = my_new_size;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- if (needs_sync) {
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc))
- rc = -EAGAIN;
- }
-
- if (needs_ws) {
- /*
- * Mark the context as unavailable, so that we can release
- * the mutex safely.
- */
- ctxi->unavail = true;
- mutex_unlock(&ctxi->mutex);
- }
-
- /* Free LBAs allocated to freed chunks */
- mutex_lock(&blka->mutex);
- for (i = delta - 1; i >= 0; i--) {
- aun = lxt_old[my_new_size + i].rlba_base >> MC_CHUNK_SHIFT;
- if (needs_ws)
- write_same16(sdev, aun, MC_CHUNK_SIZE);
- ba_free(&blka->ba_lun, aun);
- }
- mutex_unlock(&blka->mutex);
-
- if (needs_ws) {
- /* Make the context visible again */
- mutex_lock(&ctxi->mutex);
- ctxi->unavail = false;
- }
-
- /* Free old lxt if reallocated */
- if (lxt != lxt_old)
- kfree(lxt_old);
- *new_size = my_new_size;
-out:
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * _cxlflash_vlun_resize() - changes the size of a virtual LUN
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @ctxi: Context owning resources.
- * @resize: Resize ioctl data structure.
- *
- * On successful return, the user is informed of the new size (in blocks)
- * of the virtual LUN in last LBA format. When the size of the virtual
- * LUN is zero, the last LBA is reflected as -1. See comment in the
- * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
- * on the error recovery list.
- *
- * Return: 0 on success, -errno on failure
- */
-int _cxlflash_vlun_resize(struct scsi_device *sdev,
- struct ctx_info *ctxi,
- struct dk_cxlflash_resize *resize)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct afu *afu = cfg->afu;
- bool put_ctx = false;
-
- res_hndl_t rhndl = resize->rsrc_handle;
- u64 new_size;
- u64 nsectors;
- u64 ctxid = DECODE_CTXID(resize->context_id),
- rctxid = resize->context_id;
-
- struct sisl_rht_entry *rhte;
-
- int rc = 0;
-
- /*
- * The requested size (req_size) is always assumed to be in 4k blocks,
- * so we have to convert it here from 4k to chunk size.
- */
- nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
- new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
-
- dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
- __func__, ctxid, resize->rsrc_handle, resize->req_size,
- new_size);
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
- __func__, gli->mode);
- rc = -EINVAL;
- goto out;
-
- }
-
- if (!ctxi) {
- ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
- if (unlikely(!ctxi)) {
- dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
- __func__, ctxid);
- rc = -EINVAL;
- goto out;
- }
-
- put_ctx = true;
- }
-
- rhte = get_rhte(ctxi, rhndl, lli);
- if (unlikely(!rhte)) {
- dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
- __func__, rhndl);
- rc = -EINVAL;
- goto out;
- }
-
- if (new_size > rhte->lxt_cnt)
- rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
- else if (new_size < rhte->lxt_cnt)
- rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
- else {
- /*
- * Rare case where there is already sufficient space, just
- * need to perform a translation sync with the AFU. This
- * scenario likely follows a previous sync failure during
- * a resize operation. Accordingly, perform the heavyweight
- * form of translation sync as it is unknown which type of
- * resize failed previously.
- */
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto out;
- }
- }
-
- resize->hdr.return_flags = 0;
- resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
- resize->last_lba /= CXLFLASH_BLOCK_SIZE;
- resize->last_lba--;
-
-out:
- if (put_ctx)
- put_context(ctxi);
- dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
- __func__, resize->last_lba, rc);
- return rc;
-}
-
-int cxlflash_vlun_resize(struct scsi_device *sdev, void *resize)
-{
- return _cxlflash_vlun_resize(sdev, NULL, resize);
-}
-
-/**
- * cxlflash_restore_luntable() - Restore LUN table to prior state
- * @cfg: Internal structure associated with the host.
- */
-void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
-{
- struct llun_info *lli, *temp;
- u32 lind;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
- if (!lli->in_table)
- continue;
-
- lind = lli->lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++)
- if (lli->port_sel & (1 << k)) {
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
- }
-
- mutex_unlock(&global.mutex);
-}
-
-/**
- * get_num_ports() - compute number of ports from port selection mask
- * @psm: Port selection mask.
- *
- * Return: Population count of port selection mask
- */
-static inline u8 get_num_ports(u32 psm)
-{
- static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3,
- 1, 2, 2, 3, 2, 3, 3, 4 };
-
- return bits[psm & 0xf];
-}
-
-/**
- * init_luntable() - write an entry in the LUN table
- * @cfg: Internal structure associated with the host.
- * @lli: Per adapter LUN information structure.
- *
- * On successful return, a LUN table entry is created:
- * - at the top for LUNs visible on multiple ports.
- * - at the bottom for LUNs visible only on one port.
- *
- * Return: 0 on success, -errno on failure
- */
-static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
-{
- u32 chan;
- u32 lind;
- u32 nports;
- int rc = 0;
- int k;
- struct device *dev = &cfg->dev->dev;
- __be64 __iomem *fc_port_luns;
-
- mutex_lock(&global.mutex);
-
- if (lli->in_table)
- goto out;
-
- nports = get_num_ports(lli->port_sel);
- if (nports == 0 || nports > cfg->num_fc_ports) {
- WARN(1, "Unsupported port configuration nports=%u", nports);
- rc = -EIO;
- goto out;
- }
-
- if (nports > 1) {
- /*
- * When LUN is visible from multiple ports, we will put
- * it in the top half of the LUN table.
- */
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- if (cfg->promote_lun_index == cfg->last_lun_index[k]) {
- rc = -ENOSPC;
- goto out;
- }
- }
-
- lind = lli->lun_index = cfg->promote_lun_index;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind);
-
- for (k = 0; k < cfg->num_fc_ports; k++) {
- if (!(lli->port_sel & (1 << k)))
- continue;
-
- fc_port_luns = get_fc_port_luns(cfg, k);
- writeq_be(lli->lun_id[k], &fc_port_luns[lind]);
- dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]);
- }
-
- cfg->promote_lun_index++;
- } else {
- /*
- * When LUN is visible only from one port, we will put
- * it in the bottom half of the LUN table.
- */
- chan = PORTMASK2CHAN(lli->port_sel);
- if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
- rc = -ENOSPC;
- goto out;
- }
-
- lind = lli->lun_index = cfg->last_lun_index[chan];
- fc_port_luns = get_fc_port_luns(cfg, chan);
- writeq_be(lli->lun_id[chan], &fc_port_luns[lind]);
- cfg->last_lun_index[chan]--;
- dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n",
- __func__, lind, chan, lli->lun_id[chan]);
- }
-
- lli->in_table = true;
-out:
- mutex_unlock(&global.mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
- * cxlflash_disk_virtual_open() - open a virtual disk of specified size
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: UVirtual ioctl data structure.
- *
- * On successful return, the user is informed of the resource handle
- * to be used to identify the virtual LUN and the size (in blocks) of
- * the virtual LUN in last LBA format. When the size of the virtual LUN
- * is zero, the last LBA is reflected as -1.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
-{
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
-
- struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
- struct dk_cxlflash_resize resize;
-
- u64 ctxid = DECODE_CTXID(virt->context_id),
- rctxid = virt->context_id;
- u64 lun_size = virt->lun_size;
- u64 last_lba = 0;
- u64 rsrc_handle = -1;
-
- int rc = 0;
-
- struct ctx_info *ctxi = NULL;
- struct sisl_rht_entry *rhte = NULL;
-
- dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
-
- /* Setup the LUNs block allocator on first call */
- mutex_lock(&gli->mutex);
- if (gli->mode == MODE_NONE) {
- rc = init_vlun(lli);
- if (rc) {
- dev_err(dev, "%s: init_vlun failed rc=%d\n",
- __func__, rc);
- rc = -ENOMEM;
- goto err0;
- }
- }
-
- rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
- if (unlikely(rc)) {
- dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
- goto err0;
- }
- mutex_unlock(&gli->mutex);
-
- rc = init_luntable(cfg, lli);
- if (rc) {
- dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
- goto err1;
- }
-
- ctxi = get_context(cfg, rctxid, lli, 0);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
- rc = -EINVAL;
- goto err1;
- }
-
- rhte = rhte_checkout(ctxi, lli);
- if (unlikely(!rhte)) {
- dev_err(dev, "%s: too many opens ctxid=%llu\n",
- __func__, ctxid);
- rc = -EMFILE; /* too many opens */
- goto err1;
- }
-
- rsrc_handle = (rhte - ctxi->rht_start);
-
- /* Populate RHT format 0 */
- rhte->nmask = MC_RHT_NMASK;
- rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
-
- /* Resize even if requested size is 0 */
- marshal_virt_to_resize(virt, &resize);
- resize.rsrc_handle = rsrc_handle;
- rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
- if (rc) {
- dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
- goto err2;
- }
- last_lba = resize.last_lba;
-
- if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
- ctxi->rht_needs_ws[rsrc_handle] = true;
-
- virt->hdr.return_flags = 0;
- virt->last_lba = last_lba;
- virt->rsrc_handle = rsrc_handle;
-
- if (get_num_ports(lli->port_sel) > 1)
- virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE;
-out:
- if (likely(ctxi))
- put_context(ctxi);
- dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
- __func__, rsrc_handle, rc, last_lba);
- return rc;
-
-err2:
- rhte_checkin(ctxi, rhte);
-err1:
- cxlflash_lun_detach(gli);
- goto out;
-err0:
- /* Special common cleanup prior to successful LUN attach */
- cxlflash_ba_terminate(&gli->blka.ba_lun);
- mutex_unlock(&gli->mutex);
- goto out;
-}
-
-/**
- * clone_lxt() - copies translation tables from source to destination RHTE
- * @afu: AFU associated with the host.
- * @blka: Block allocator associated with LUN.
- * @ctxid: Context ID of context owning the RHTE.
- * @rhndl: Resource handle associated with the RHTE.
- * @rhte: Destination resource handle entry (RHTE).
- * @rhte_src: Source resource handle entry (RHTE).
- *
- * Return: 0 on success, -errno on failure
- */
-static int clone_lxt(struct afu *afu,
- struct blka *blka,
- ctx_hndl_t ctxid,
- res_hndl_t rhndl,
- struct sisl_rht_entry *rhte,
- struct sisl_rht_entry *rhte_src)
-{
- struct cxlflash_cfg *cfg = afu->parent;
- struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt = NULL;
- bool locked = false;
- u32 ngrps;
- u64 aun; /* chunk# allocated by block allocator */
- int j;
- int i = 0;
- int rc = 0;
-
- ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
-
- if (ngrps) {
- /* allocate new LXTs for clone */
- lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
- GFP_KERNEL);
- if (unlikely(!lxt)) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* copy over */
- memcpy(lxt, rhte_src->lxt_start,
- (sizeof(*lxt) * rhte_src->lxt_cnt));
-
- /* clone the LBAs in block allocator via ref_cnt, note that the
- * block allocator mutex must be held until it is established
- * that this routine will complete without the need for a
- * cleanup.
- */
- mutex_lock(&blka->mutex);
- locked = true;
- for (i = 0; i < rhte_src->lxt_cnt; i++) {
- aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
- if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- rc = -EIO;
- goto err;
- }
- }
- }
-
- /*
- * The following sequence is prescribed in the SISlite spec
- * for syncing up with the AFU when adding LXT entries.
- */
- dma_wmb(); /* Make LXT updates are visible */
-
- rhte->lxt_start = lxt;
- dma_wmb(); /* Make RHT entry's LXT table update visible */
-
- rhte->lxt_cnt = rhte_src->lxt_cnt;
- dma_wmb(); /* Make RHT entry's LXT table size update visible */
-
- rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
- if (unlikely(rc)) {
- rc = -EAGAIN;
- goto err2;
- }
-
-out:
- if (locked)
- mutex_unlock(&blka->mutex);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-err2:
- /* Reset the RHTE */
- rhte->lxt_cnt = 0;
- dma_wmb();
- rhte->lxt_start = NULL;
- dma_wmb();
-err:
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
- kfree(lxt);
- goto out;
-}
-
-/**
- * cxlflash_disk_clone() - clone a context by making snapshot of another
- * @sdev: SCSI device associated with LUN owning virtual LUN.
- * @arg: Clone ioctl data structure.
- *
- * This routine effectively performs cxlflash_disk_open operation for each
- * in-use virtual resource in the source context. Note that the destination
- * context must be in pristine state and cannot have any resource handles
- * open at the time of the clone.
- *
- * Return: 0 on success, -errno on failure
- */
-int cxlflash_disk_clone(struct scsi_device *sdev, void *arg)
-{
- struct dk_cxlflash_clone *clone = arg;
- struct cxlflash_cfg *cfg = shost_priv(sdev->host);
- struct device *dev = &cfg->dev->dev;
- struct llun_info *lli = sdev->hostdata;
- struct glun_info *gli = lli->parent;
- struct blka *blka = &gli->blka;
- struct afu *afu = cfg->afu;
- struct dk_cxlflash_release release = { { 0 }, 0 };
-
- struct ctx_info *ctxi_src = NULL,
- *ctxi_dst = NULL;
- struct lun_access *lun_access_src, *lun_access_dst;
- u32 perms;
- u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
- ctxid_dst = DECODE_CTXID(clone->context_id_dst),
- rctxid_src = clone->context_id_src,
- rctxid_dst = clone->context_id_dst;
- int i, j;
- int rc = 0;
- bool found;
- LIST_HEAD(sidecar);
-
- dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
-
- /* Do not clone yourself */
- if (unlikely(rctxid_src == rctxid_dst)) {
- rc = -EINVAL;
- goto out;
- }
-
- if (unlikely(gli->mode != MODE_VIRTUAL)) {
- rc = -EINVAL;
- dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
- __func__, gli->mode);
- goto out;
- }
-
- ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
- ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
- if (unlikely(!ctxi_src || !ctxi_dst)) {
- dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
- __func__, ctxid_src, ctxid_dst);
- rc = -EINVAL;
- goto out;
- }
-
- /* Verify there is no open resource handle in the destination context */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
- if (ctxi_dst->rht_start[i].nmask != 0) {
- rc = -EINVAL;
- goto out;
- }
-
- /* Clone LUN access list */
- list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
- found = false;
- list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
- if (lun_access_dst->sdev == lun_access_src->sdev) {
- found = true;
- break;
- }
-
- if (!found) {
- lun_access_dst = kzalloc(sizeof(*lun_access_dst),
- GFP_KERNEL);
- if (unlikely(!lun_access_dst)) {
- dev_err(dev, "%s: lun_access allocation fail\n",
- __func__);
- rc = -ENOMEM;
- goto out;
- }
-
- *lun_access_dst = *lun_access_src;
- list_add(&lun_access_dst->list, &sidecar);
- }
- }
-
- if (unlikely(!ctxi_src->rht_out)) {
- dev_dbg(dev, "%s: Nothing to clone\n", __func__);
- goto out_success;
- }
-
- /* User specified permission on attach */
- perms = ctxi_dst->rht_perms;
-
- /*
- * Copy over checked-out RHT (and their associated LXT) entries by
- * hand, stopping after we've copied all outstanding entries and
- * cleaning up if the clone fails.
- *
- * Note: This loop is equivalent to performing cxlflash_disk_open and
- * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
- * account by attaching after each successful RHT entry clone. In the
- * event that a clone failure is experienced, the LUN detach is handled
- * via the cleanup performed by _cxlflash_disk_release.
- */
- for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
- if (ctxi_src->rht_out == ctxi_dst->rht_out)
- break;
- if (ctxi_src->rht_start[i].nmask == 0)
- continue;
-
- /* Consume a destination RHT entry */
- ctxi_dst->rht_out++;
- ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
- ctxi_dst->rht_start[i].fp =
- SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
- ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
-
- rc = clone_lxt(afu, blka, ctxid_dst, i,
- &ctxi_dst->rht_start[i],
- &ctxi_src->rht_start[i]);
- if (rc) {
- marshal_clone_to_rele(clone, &release);
- for (j = 0; j < i; j++) {
- release.rsrc_handle = j;
- _cxlflash_disk_release(sdev, ctxi_dst,
- &release);
- }
-
- /* Put back the one we failed on */
- rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
- goto err;
- }
-
- cxlflash_lun_attach(gli, gli->mode, false);
- }
-
-out_success:
- list_splice(&sidecar, &ctxi_dst->luns);
-
- /* fall through */
-out:
- if (ctxi_src)
- put_context(ctxi_src);
- if (ctxi_dst)
- put_context(ctxi_dst);
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-
-err:
- list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
- kfree(lun_access_src);
- goto out;
-}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
deleted file mode 100644
index 68e3ea52fe80..000000000000
--- a/drivers/scsi/cxlflash/vlun.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CXL Flash Device Driver
- *
- * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
- * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
- *
- * Copyright (C) 2015 IBM Corporation
- */
-
-#ifndef _CXLFLASH_VLUN_H
-#define _CXLFLASH_VLUN_H
-
-/* RHT - Resource Handle Table */
-#define MC_RHT_NMASK 16 /* in bits */
-#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
-
-#define HIBIT (BITS_PER_LONG - 1)
-
-#define MAX_AUN_CLONE_CNT 0xFF
-
-/*
- * LXT - LBA Translation Table
- *
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- * | RLBA_BASE |LUN_IDX| P |SEL|
- * +-------+-------+-------+-------+-------+-------+-------+---+---+
- *
- * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
- * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
- * with RLBA_BASE. The result is the physical LBA to be sent to storage.
- * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
- * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
- * with a global port select bit-mask maintained by the driver.
- * In addition, it has permission bits that are ANDed with the
- * RHT permissions to arrive at the final permissions for the chunk.
- *
- * LXT tables are allocated dynamically in groups. This is done to avoid
- * a malloc/free overhead each time the LXT has to grow or shrink.
- *
- * Based on the current lxt_cnt (used), it is always possible to know
- * how many are allocated (used+free). The number of allocated entries is
- * not stored anywhere.
- *
- * The LXT table is re-allocated whenever it needs to cross into another group.
- */
-#define LXT_GROUP_SIZE 8
-#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
-#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
-#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
-
-struct ba_lun_info {
- u64 *lun_alloc_map;
- u32 lun_bmap_size;
- u32 total_aus;
- u64 free_aun_cnt;
-
- /* indices to be used for elevator lookup of free map */
- u32 free_low_idx;
- u32 free_curr_idx;
- u32 free_high_idx;
-
- u8 *aun_clone_map;
-};
-
-struct ba_lun {
- u64 lun_id;
- u64 wwpn;
- size_t lsize; /* LUN size in number of LBAs */
- size_t lba_size; /* LBA size in number of bytes */
- size_t au_size; /* Allocation Unit size in number of LBAs */
- struct ba_lun_info *ba_lun_handle;
-};
-
-/* Block Allocator */
-struct blka {
- struct ba_lun ba_lun;
- u64 nchunk; /* number of chunks */
- struct mutex mutex;
-};
-
-#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 8469c156ab33..59f277593785 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -735,7 +735,7 @@ efct_pci_io_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, efct_pci_table);
-static struct pci_error_handlers efct_pci_err_handler = {
+static const struct pci_error_handlers efct_pci_err_handler = {
.error_detected = efct_pci_io_error_detected,
.slot_reset = efct_pci_io_slot_reset,
.resume = efct_pci_io_resume,
diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c
index 11211c469583..4c6bbf417a9a 100644
--- a/drivers/scsi/fnic/fdls_disc.c
+++ b/drivers/scsi/fnic/fdls_disc.c
@@ -308,43 +308,29 @@ void fdls_schedule_oxid_free_retry_work(struct work_struct *work)
struct fnic *fnic = iport->fnic;
struct reclaim_entry_s *reclaim_entry;
unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport));
+ unsigned long flags;
int idx;
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) {
FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
"Schedule oxid free. oxid idx: %d\n", idx);
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
- reclaim_entry = (struct reclaim_entry_s *)
- kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL);
- spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags);
-
+ reclaim_entry = kzalloc(sizeof(*reclaim_entry), GFP_KERNEL);
if (!reclaim_entry) {
- FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
- "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n",
- idx);
-
schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry,
msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME));
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
return;
}
- if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) {
- reclaim_entry->oxid_idx = idx;
- reclaim_entry->expires = round_jiffies(jiffies + delay_j);
- list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
- schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
- } else {
- /* unlikely scenario, free the allocated memory and continue */
- kfree(reclaim_entry);
- }
-}
-
- spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags);
+ clear_bit(idx, oxid_pool->pending_schedule_free);
+ reclaim_entry->oxid_idx = idx;
+ reclaim_entry->expires = round_jiffies(jiffies + delay_j);
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list);
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j);
+ }
}
static bool fdls_is_oxid_fabric_req(uint16_t oxid)
@@ -1567,9 +1553,9 @@ void fdls_send_fabric_logo(struct fnic_iport_s *iport)
iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED;
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
- iport->fcid, oxid);
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "0x%x: FDLS send fabric LOGO with oxid: 0x%x",
+ iport->fcid, oxid);
fnic_send_fcoe_frame(iport, frame, frame_size);
@@ -1898,7 +1884,6 @@ static void fdls_fdmi_register_hba(struct fnic_iport_s *iport)
if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN)
fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1;
strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN);
- data[FNIC_FDMI_MODEL_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN,
data, &attr_off_bytes);
@@ -2061,7 +2046,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d",
fnic->host->host_no);
strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN);
- data[FNIC_FDMI_OS_NAME_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME,
FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes);
@@ -2071,7 +2055,6 @@ static void fdls_fdmi_register_pa(struct fnic_iport_s *iport)
sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename);
strscpy_pad(data, fc_host_system_hostname(fnic->host),
FNIC_FDMI_HN_LEN);
- data[FNIC_FDMI_HN_LEN - 1] = 0;
fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME,
FNIC_FDMI_HN_LEN, data, &attr_off_bytes);
@@ -4659,13 +4642,13 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
d_id = ntoh24(fchdr->fh_d_id);
/* some common validation */
- if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
- if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
- FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
- "invalid frame received. Dropping frame");
- return -1;
- }
+ if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) {
+ if (iport->fcid != d_id || (!FNIC_FC_FRAME_CS_CTL(fchdr))) {
+ FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
+ "invalid frame received. Dropping frame");
+ return -1;
}
+ }
/* BLS ABTS response */
if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC)
@@ -4682,7 +4665,7 @@ fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport,
"Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame",
oxid, s_id);
return -1;
- }
+ }
return FNIC_FABRIC_BLS_ABTS_RSP;
} else if (fdls_is_oxid_fdmi_req(oxid)) {
return FNIC_FDMI_BLS_ABTS_RSP;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0b20ac8c3f46..3dd06376e97b 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -1365,10 +1365,9 @@ static void __exit fnic_cleanup_module(void)
if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON)
destroy_workqueue(reset_fnic_work_queue);
- if (fnic_fip_queue) {
- flush_workqueue(fnic_fip_queue);
+ if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
- }
+
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2d438d722d0b..e17f5d8226bf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -633,8 +633,7 @@ extern struct dentry *hisi_sas_debugfs_dir;
extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
- int direction);
+extern u8 hisi_sas_get_ata_protocol(struct sas_task *task);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index da4a2ed8ee86..3596414d970b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -21,8 +21,32 @@ struct hisi_sas_internal_abort_data {
bool rst_ha_timeout; /* reset the HA for timeout */
};
-u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
+static u8 hisi_sas_get_ata_protocol_from_tf(struct ata_queued_cmd *qc)
{
+ if (!qc)
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ case ATA_PROT_PIO:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ case ATA_PROT_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+ case ATA_PROT_NCQ_NODATA:
+ case ATA_PROT_NCQ:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+ default:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+
+u8 hisi_sas_get_ata_protocol(struct sas_task *task)
+{
+ struct host_to_dev_fis *fis = &task->ata_task.fis;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ int direction = task->data_dir;
+
switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
@@ -93,7 +117,7 @@ u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
- return HISI_SAS_SATA_PROTOCOL_PIO;
+ return hisi_sas_get_ata_protocol_from_tf(qc);
}
}
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index bb78e53c66e2..6621d633b2cc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1806,7 +1806,7 @@ static struct platform_driver hisi_sas_v1_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
- .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
+ .acpi_match_table = sas_v1_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 71cd5b4450c2..04ee02797ca3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2538,9 +2538,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -3653,7 +3651,7 @@ static struct platform_driver hisi_sas_v2_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v2_of_match,
- .acpi_match_table = ACPI_PTR(sas_v2_acpi_match),
+ .acpi_match_table = sas_v2_acpi_match,
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 48b95d9a7927..095bbf80c34e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1456,9 +1456,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (hisi_sas_get_ata_protocol(
- &task->ata_task.fis, task->data_dir))
- << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= (hisi_sas_get_ata_protocol(task)) << CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 84d8de07b7ae..c73a71ac3c29 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -453,17 +453,13 @@ static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int status, len;
+ int status;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &status) != 1)
+ if (kstrtoint(buf, 10, &status))
return -EINVAL;
h = shost_to_hba(shost);
h->acciopath_status = !!status;
@@ -477,17 +473,13 @@ static ssize_t host_store_raid_offload_debug(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int debug_level, len;
+ int debug_level;
struct ctlr_info *h;
struct Scsi_Host *shost = class_to_shost(dev);
- char tmpbuf[10];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
- strncpy(tmpbuf, buf, len);
- tmpbuf[len] = '\0';
- if (sscanf(tmpbuf, "%d", &debug_level) != 1)
+ if (kstrtoint(buf, 10, &debug_level))
return -EINVAL;
if (debug_level < 0)
debug_level = 0;
@@ -7238,8 +7230,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
static void init_driver_version(char *driver_version, int len)
{
- memset(driver_version, 0, len);
- strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
+ strscpy_pad(driver_version, HPSA " " HPSA_DRIVER_VERSION, len);
}
static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 16d085d56e9d..9e42230e42b8 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2922,9 +2922,7 @@ static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
struct timer_cb *p_timer;
p_timer = &vscsi->rsp_q_timer;
- hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-
- p_timer->timer.function = ibmvscsis_service_wait_q;
+ hrtimer_setup(&p_timer->timer, ibmvscsis_service_wait_q, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
p_timer->started = false;
p_timer->timer_pops = 0;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index cce6c6b409ad..94adb6ac02a4 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -3631,8 +3631,8 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
break;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
scb->scsi_cmd->result = DID_OK << 16;
break;
@@ -3899,8 +3899,8 @@ ips_chkstatus(ips_ha_t * ha, IPS_STATUS * pstatus)
case WRITE_6:
case READ_10:
case WRITE_10:
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
break;
case MODE_SENSE:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 73085d2f5c43..acf0c2038d20 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -91,31 +91,31 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 2;
+static unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
-u16 ssp_max_occ_to = 20;
+static u16 ssp_max_occ_to = 20;
module_param(ssp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
-u16 stp_max_occ_to = 5;
+static u16 stp_max_occ_to = 5;
module_param(stp_max_occ_to, ushort, 0);
MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
-u16 ssp_inactive_to = 5;
+static u16 ssp_inactive_to = 5;
module_param(ssp_inactive_to, ushort, 0);
MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
-u16 stp_inactive_to = 5;
+static u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
+static unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
-unsigned char max_concurr_spinup;
+static unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 4e6b1decbca7..f6a8fe206415 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -473,13 +473,6 @@ static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
dest[word_cnt] = swab32(src[word_cnt]);
}
-extern unsigned char no_outbound_task_to;
-extern u16 ssp_max_occ_to;
-extern u16 stp_max_occ_to;
-extern u16 ssp_inactive_to;
-extern u16 stp_inactive_to;
-extern unsigned char phy_gen;
-extern unsigned char max_concurr_spinup;
extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 27ae45332704..561ae3f2cbbd 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -198,7 +198,7 @@ enum sci_status sci_remote_device_reset(
* device. When there are no active IO for the device it is is in this
* state.
*
- * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
+ * @SCI_STP_DEV_CMD: This is the command state for the STP remote
* device. This state is entered when the device is processing a
* non-NCQ command. The device object will fail any new start IO
* requests until this command is complete.
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index e81f60985193..7b4fe0e6afb2 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -17,7 +17,6 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
@@ -468,8 +467,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
- hdr + hdrlen);
+ iscsi_tcp_dgst_header(hdr, hdrlen, hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -494,7 +492,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -507,11 +505,10 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
- sg, count, offset, len,
- NULL, tx_hash);
+ sg, count, offset, len, NULL, tx_crcp);
}
static void
@@ -520,7 +517,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct ahash_request *tx_hash = NULL;
+ u32 *tx_crcp = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -532,10 +529,10 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = tcp_sw_conn->tx_hash;
+ tx_crcp = &tcp_sw_conn->tx_crc;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
- data, len, NULL, tx_hash);
+ data, len, NULL, tx_crcp);
}
static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
@@ -583,7 +580,6 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
- struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -596,37 +592,9 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_sw_conn->queue_recv = iscsi_recv_from_iscsi_q;
mutex_init(&tcp_sw_conn->sock_lock);
-
- tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm))
- goto free_conn;
-
- tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->tx_hash)
- goto free_tfm;
- ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
-
- tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
- if (!tcp_sw_conn->rx_hash)
- goto free_tx_hash;
- ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
-
- tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
+ tcp_conn->rx_crcp = &tcp_sw_conn->rx_crc;
return cls_conn;
-
-free_tx_hash:
- ahash_request_free(tcp_sw_conn->tx_hash);
-free_tfm:
- crypto_free_ahash(tfm);
-free_conn:
- iscsi_conn_printk(KERN_ERR, conn,
- "Could not create connection due to crc32c "
- "loading error. Make sure the crc32c "
- "module is built as a module or into the "
- "kernel\n");
- iscsi_tcp_conn_teardown(cls_conn);
- return NULL;
}
static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
@@ -664,20 +632,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
iscsi_sw_tcp_release_conn(conn);
-
- ahash_request_free(tcp_sw_conn->rx_hash);
- if (tcp_sw_conn->tx_hash) {
- struct crypto_ahash *tfm;
-
- tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
- ahash_request_free(tcp_sw_conn->tx_hash);
- crypto_free_ahash(tfm);
- }
-
iscsi_tcp_conn_teardown(cls_conn);
}
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 89a6fc552f0b..c3e5d9fa6add 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -41,8 +41,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct ahash_request *tx_hash; /* CRC32C (Tx) */
- struct ahash_request *rx_hash; /* CRC32C (Rx) */
+ u32 tx_crc; /* CRC32C (Tx) */
+ u32 rx_crc; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index c182aa83f2c9..e90805ba868f 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -15,7 +15,7 @@
* Zhenyu Wang
*/
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
@@ -168,7 +168,7 @@ iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
segment->size = ISCSI_DIGEST_SIZE;
segment->copied = 0;
segment->sg = NULL;
- segment->hash = NULL;
+ segment->crcp = NULL;
}
/**
@@ -191,29 +191,27 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
struct iscsi_segment *segment, int recv,
unsigned copied)
{
- struct scatterlist sg;
unsigned int pad;
ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
segment->copied, copied, segment->size,
recv ? "recv" : "xmit");
- if (segment->hash && copied) {
- /*
- * If a segment is kmapd we must unmap it before sending
- * to the crypto layer since that will try to kmap it again.
- */
- iscsi_tcp_segment_unmap(segment);
-
- if (!segment->data) {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, sg_page(segment->sg), copied,
- segment->copied + segment->sg_offset +
- segment->sg->offset);
- } else
- sg_init_one(&sg, segment->data + segment->copied,
- copied);
- ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
- crypto_ahash_update(segment->hash);
+ if (segment->crcp && copied) {
+ if (segment->data) {
+ *segment->crcp = crc32c(*segment->crcp,
+ segment->data + segment->copied,
+ copied);
+ } else {
+ const void *data;
+
+ data = kmap_local_page(sg_page(segment->sg));
+ *segment->crcp = crc32c(*segment->crcp,
+ data + segment->copied +
+ segment->sg_offset +
+ segment->sg->offset,
+ copied);
+ kunmap_local(data);
+ }
}
segment->copied += copied;
@@ -258,10 +256,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* Set us up for transferring the data digest. hdr digest
* is completely handled in hdr done function.
*/
- if (segment->hash) {
- ahash_request_set_crypt(segment->hash, NULL,
- segment->digest, 0);
- crypto_ahash_final(segment->hash);
+ if (segment->crcp) {
+ put_unaligned_le32(~*segment->crcp, segment->digest);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -282,8 +278,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_segment_done);
* given buffer, and returns the number of bytes
* consumed, which can actually be less than @len.
*
- * If hash digest is enabled, the function will update the
- * hash while copying.
+ * If CRC is enabled, the function will update the CRC while copying.
* Combining these two operations doesn't buy us a lot (yet),
* but in the future we could implement combined copy+crc,
* just way we do for network layer checksums.
@@ -311,14 +306,10 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
- size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(const void *hdr, size_t hdrlen,
+ unsigned char digest[ISCSI_DIGEST_SIZE])
{
- struct scatterlist sg;
-
- sg_init_one(&sg, hdr, hdrlen);
- ahash_request_set_crypt(hash, &sg, digest, hdrlen);
- crypto_ahash_digest(hash);
+ put_unaligned_le32(~crc32c(~0, hdr, hdrlen), digest);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -343,24 +334,23 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
segment->done = done;
- if (hash) {
- segment->hash = hash;
- crypto_ahash_init(hash);
+ if (crcp) {
+ segment->crcp = crcp;
+ *crcp = ~0;
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
- size_t size, iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ size_t size, iscsi_segment_done_fn_t *done, u32 *crcp)
{
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
segment->data = data;
segment->size = size;
}
@@ -370,13 +360,12 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done,
- struct ahash_request *hash)
+ iscsi_segment_done_fn_t *done, u32 *crcp)
{
struct scatterlist *sg;
unsigned int i;
- __iscsi_segment_init(segment, size, done, hash);
+ __iscsi_segment_init(segment, size, done, crcp);
for_each_sg(sg_list, sg, sg_count, i) {
if (offset < sg->length) {
iscsi_tcp_segment_init_sg(segment, sg, offset);
@@ -393,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_segment_seek_sg);
* iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
* @tcp_conn: iscsi connection to prep for
*
- * This function always passes NULL for the hash argument, because when this
+ * This function always passes NULL for the crcp argument, because when this
* function is called we do not yet know the final size of the header and want
* to delay the digest processing until we know that.
*/
@@ -434,15 +423,15 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
iscsi_segment_init_linear(&tcp_conn->in.segment,
conn->data, tcp_conn->in.datalen,
- iscsi_tcp_data_recv_done, rx_hash);
+ iscsi_tcp_data_recv_done, rx_crcp);
}
/**
@@ -730,7 +719,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct ahash_request *rx_hash = NULL;
+ u32 *rx_crcp = NULL;
struct scsi_data_buffer *sdb = &task->sc->sdb;
/*
@@ -743,7 +732,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
*/
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
- rx_hash = tcp_conn->rx_hash;
+ rx_crcp = tcp_conn->rx_crcp;
ISCSI_DBG_TCP(conn, "iscsi_tcp_begin_data_in( "
"offset=%d, datalen=%d)\n",
@@ -756,7 +745,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
tcp_task->data_offset,
tcp_conn->in.datalen,
iscsi_tcp_process_data_in,
- rx_hash);
+ rx_crcp);
spin_unlock(&conn->session->back_lock);
return rc;
}
@@ -878,7 +867,7 @@ iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
return 0;
}
- iscsi_tcp_dgst_header(tcp_conn->rx_hash, hdr,
+ iscsi_tcp_dgst_header(hdr,
segment->total_copied - ISCSI_DIGEST_SIZE,
segment->digest);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5a9c5a323f8..fe4fb67eb50c 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -74,8 +74,7 @@ struct lpfc_sli2_slim;
* queue depths when there are driver resource error or Firmware
* resource error.
*/
-/* 1 Second */
-#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1))
+#define QUEUE_RAMP_DOWN_INTERVAL (secs_to_jiffies(1))
/* Number of exchanges reserved for discovery to complete */
#define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -1715,18 +1714,12 @@ lpfc_phba_elsring(struct lpfc_hba *phba)
* Note: If no valid cpu found, then nr_cpu_ids is returned.
*
**/
-static inline unsigned int
+static __always_inline unsigned int
lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
{
- unsigned int cpu_it;
-
- for_each_cpu_wrap(cpu_it, mask, start) {
- if (cpu_online(cpu_it))
- break;
- }
-
- return cpu_it;
+ return cpumask_next_and_wrap(start, mask, cpu_online_mask);
}
+
/**
* lpfc_next_present_cpu - Finds next present CPU after n
* @n: the cpu prior to search
@@ -1734,16 +1727,9 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
* Note: If no next present cpu, then fallback to first present cpu.
*
**/
-static inline unsigned int lpfc_next_present_cpu(int n)
+static __always_inline unsigned int lpfc_next_present_cpu(int n)
{
- unsigned int cpu;
-
- cpu = cpumask_next(n, cpu_present_mask);
-
- if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
- return cpu;
+ return cpumask_next_wrap(n, cpu_present_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1d7db49a8fe4..e08b48b1b655 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -8045,8 +8045,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies +
- msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
return 0;
}
@@ -8081,7 +8080,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
if (test_bit(FC_DISC_TMO, &vport->fc_flag)) {
tmo = ((phba->fc_ratov * 3) + 3);
mod_timer(&vport->fc_disctmo,
- jiffies + msecs_to_jiffies(1000 * tmo));
+ jiffies + secs_to_jiffies(tmo));
}
if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
!test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) {
@@ -9511,7 +9510,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
if (!list_empty(&pring->txcmplq))
if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
}
/**
@@ -9569,18 +9568,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
+ if (piocb->vport != vport)
+ continue;
+
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2243 iotag = 0x%x cmd_flag = 0x%x "
- "ulp_command = 0x%x this_vport %x "
- "sli_flag = 0x%x\n",
+ "ulp_command = 0x%x sli_flag = 0x%x\n",
piocb->iotag, piocb->cmd_flag,
get_job_cmnd(phba, piocb),
- (piocb->vport == vport),
phba->sli.sli_flag);
- if (piocb->vport != vport)
- continue;
-
if ((phba->sli.sli_flag & LPFC_SLI_ACTIVE) && !mbx_tmo_err) {
if (piocb->cmd_flag & LPFC_IO_LIBDFC)
continue;
@@ -10899,7 +10896,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
"3334 Delay fc port discovery for %d secs\n",
phba->fc_ratov);
mod_timer(&vport->delayed_disc_tmo,
- jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
+ jiffies + secs_to_jiffies(phba->fc_ratov));
return;
}
@@ -11156,7 +11153,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
if (!ndlp)
return;
- mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
+ mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
phba->pport->port_state = LPFC_FLOGI;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 36e66df36a18..8ca590e8469b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -228,10 +228,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
return;
- /* check for recovered fabric node */
- if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
- ndlp->nlp_DID == Fabric_DID)
+ /* Ignore callback for a mismatched (stale) rport */
+ if (ndlp->rport != rport) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_NODE,
+ "6788 fc rport mismatch: d_id x%06x ndlp x%px "
+ "fc rport x%px node rport x%px state x%x "
+ "refcnt %u\n",
+ ndlp->nlp_DID, ndlp, rport, ndlp->rport,
+ ndlp->nlp_state, kref_read(&ndlp->kref));
return;
+ }
if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -3518,7 +3524,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
if (phba->fc_topology &&
phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3314 Toplogy changed was 0x%x is 0x%x\n",
+ "3314 Topology changed was 0x%x is 0x%x\n",
phba->fc_topology,
bf_get(lpfc_mbx_read_top_topology, la));
phba->fc_topology_changed = 1;
@@ -4973,7 +4979,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
tmo, vport->port_state, vport->fc_flag);
}
- mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
+ mod_timer(&vport->fc_disctmo, jiffies + secs_to_jiffies(tmo));
set_bit(FC_DISC_TMO, &vport->fc_flag);
/* Start Discovery Timer state <hba_state> */
@@ -5564,6 +5570,7 @@ static struct lpfc_nodelist *
__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *np = NULL;
uint32_t data1;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
@@ -5578,14 +5585,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
ndlp, ndlp->nlp_DID,
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
ndlp->active_rrqs_xri_bitmap);
- return ndlp;
+
+ /* Check for new or potentially stale node */
+ if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
+ return ndlp;
+ np = ndlp;
}
}
- /* FIND node did <did> NOT FOUND */
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
- "0932 FIND node did x%x NOT FOUND.\n", did);
- return NULL;
+ if (!np)
+ /* FIND node did <did> NOT FOUND */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
+ "0932 FIND node did x%x NOT FOUND.\n", did);
+
+ return np;
}
struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index bcadf11414c8..7238608ca49f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -595,7 +595,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up ring-0 (ELS) timer */
timeout = phba->fc_ratov * 2;
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * timeout));
+ jiffies + secs_to_jiffies(timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
@@ -604,7 +604,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -3361,8 +3361,8 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -6909,7 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
* re-instantiate the Vlink using FDISC.
*/
mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000));
+ jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
vport->port_state = LPFC_FDISC;
@@ -7952,11 +7952,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/* CMF congestion timer */
- hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_timer.function = lpfc_cmf_timer;
+ hrtimer_setup(&phba->cmf_timer, lpfc_cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
/* CMF 1 minute stats collection timer */
- hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
+ hrtimer_setup(&phba->cmf_stats_timer, lpfc_cmf_stats_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Control structure for handling external multi-buffer mailbox
@@ -12873,7 +12872,7 @@ lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
if (offline) {
/* Find next online CPU on original mask */
- cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
+ cpu_next = cpumask_next_wrap(cpu, orig_mask);
cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
/* Found a valid CPU */
@@ -13170,6 +13169,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
eqhdl = lpfc_get_eq_hdl(0);
rc = pci_irq_vector(phba->pcidev, 0);
if (rc < 0) {
+ free_irq(phba->pcidev->irq, phba);
pci_free_irq_vectors(phba->pcidev);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0496 MSI pci_irq_vec failed (%d)\n", rc);
@@ -13250,6 +13250,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
eqhdl = lpfc_get_eq_hdl(0);
retval = pci_irq_vector(phba->pcidev, 0);
if (retval < 0) {
+ free_irq(phba->pcidev->irq, phba);
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0502 INTR pci_irq_vec failed (%d)\n",
retval);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 055ed632c14d..f0158fc00f78 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5645,9 +5645,8 @@ wait_for_cmpl:
* cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
* for abort to complete.
*/
- wait_event_timeout(waitq,
- (lpfc_cmd->pCmd != cmnd),
- msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
+ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd),
+ secs_to_jiffies(2*vport->cfg_devloss_tmo));
spin_lock(&lpfc_cmd->buf_lock);
@@ -5911,7 +5910,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
* If target is not in a MAPPED state, delay until
* target is rediscovered or devloss timeout expires.
*/
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies)) {
if (!pnode)
return FAILED;
@@ -5957,7 +5956,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
lpfc_sli_abort_taskmgmt(vport,
&phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
- later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
+ later = secs_to_jiffies(2 * vport->cfg_devloss_tmo) + jiffies;
while (time_after(later, jiffies) && cnt) {
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
@@ -6137,8 +6136,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
wait_event_timeout(waitq,
!test_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags),
- msecs_to_jiffies(dev_loss_tmo *
- 1000));
+ secs_to_jiffies(dev_loss_tmo));
if (test_and_clear_bit(NLP_WAIT_FOR_LOGO,
&pnode->save_flags))
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 3fd9723cd271..4e0d48fcb204 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1025,7 +1025,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
LIST_HEAD(send_rrq);
clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
- next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
spin_lock_irqsave(&phba->rrq_list_lock, iflags);
list_for_each_entry_safe(rrq, nextrrq,
&phba->active_rrq_list, list) {
@@ -1208,8 +1208,7 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
else
rrq->send_rrq = 0;
rrq->xritag = xritag;
- rrq->rrq_stop_time = jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
+ rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
rrq->nlp_DID = ndlp->nlp_DID;
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
@@ -1736,8 +1735,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
BUG_ON(!piocb->vport);
if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
mod_timer(&piocb->vport->els_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
+ jiffies + secs_to_jiffies(phba->fc_ratov << 1));
}
return 0;
@@ -2923,6 +2921,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
+ } else {
+ clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
}
/* The unreg_login mailbox is complete and had a
@@ -3956,8 +3956,7 @@ void lpfc_poll_eratt(struct timer_list *t)
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll,
- jiffies +
- msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
return;
}
@@ -9008,7 +9007,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
- jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
+ jiffies + secs_to_jiffies(phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
@@ -9027,7 +9026,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
- jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
+ jiffies + secs_to_jiffies(phba->eratt_poll_interval));
/*
* The port is ready, set the host's link state to LINK_DOWN
@@ -9504,8 +9503,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
/* timeout active mbox command */
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000);
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox));
mod_timer(&psli->mbox_tmo, jiffies + timeout);
}
@@ -9629,8 +9627,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
drvr_flag);
goto out_not_finished;
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -9756,9 +9753,8 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Make sure the mailbox is really active */
@@ -9881,8 +9877,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
- * 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -10230,7 +10225,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
- msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
+ secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -13159,7 +13154,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
SLI_IOCB_RET_IOCB);
if (retval == IOCB_SUCCESS) {
- timeout_req = msecs_to_jiffies(timeout * 1000);
+ timeout_req = secs_to_jiffies(timeout);
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
@@ -13275,8 +13270,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
- wait_for_completion_timeout(&mbox_done,
- msecs_to_jiffies(timeout * 1000));
+ wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout));
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->ctx_u.mbox_wait = NULL;
@@ -13336,9 +13330,8 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
* command to be gracefully completed by firmware.
*/
if (phba->sli.mbox_active)
- timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
- phba->sli.mbox_active) *
- 1000) + jiffies;
+ timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
+ phba->sli.mbox_active)) + jiffies;
spin_unlock_irq(&phba->hbalock);
/* Enable softirqs again, done with phba->hbalock */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c35f7225058e..638b50f35287 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom†refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.7"
+#define LPFC_DRIVER_VERSION "14.4.0.8"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2024 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2025 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 3d70cc517573..cc56a7334319 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -246,7 +246,7 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
* fabric RA_TOV value and dev_loss tmo. The driver's
* devloss_tmo is 10 giving this loop a 3x multiplier minimally.
*/
- wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000);
+ wait_time_max = secs_to_jiffies((phba->fc_ratov * 3) + 3);
wait_time_max += jiffies;
start_time = jiffies;
while (time_before(jiffies, wait_time_max)) {
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index adab151663dd..2006094af418 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -855,8 +855,8 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
return scb;
#if MEGA_HAVE_CLUSTERING
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
@@ -875,7 +875,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
}
scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
- scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
+ scb->raw_mbox[2] = *cmd->cmnd == RESERVE_6 ?
MEGA_RESERVE_LD : MEGA_RELEASE_LD;
scb->raw_mbox[3] = ldrv_num;
@@ -1618,8 +1618,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
* failed or the input parameter is invalid
*/
if( status == 1 &&
- (cmd->cmnd[0] == RESERVE ||
- cmd->cmnd[0] == RELEASE) ) {
+ (cmd->cmnd[0] == RESERVE_6 ||
+ cmd->cmnd[0] == RELEASE_6) ) {
cmd->result |= (DID_ERROR << 16) |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 60cc3372991f..3ba837b3093f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1725,8 +1725,8 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
return scb;
- case RESERVE:
- case RELEASE:
+ case RESERVE_6:
+ case RELEASE_6:
/*
* Do we support clustering and is the support enabled
*/
@@ -1748,7 +1748,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
- ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
+ ccb->raw_mbox[2] = scp->cmnd[0] == RESERVE_6 ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
@@ -2334,8 +2334,8 @@ megaraid_mbox_dpc(unsigned long devp)
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
- if (status == 1 && (scp->cmnd[0] == RESERVE ||
- scp->cmnd[0] == RELEASE)) {
+ if (status == 1 && (scp->cmnd[0] == RESERVE_6 ||
+ scp->cmnd[0] == RELEASE_6)) {
scp->result = DID_ERROR << 16 |
SAM_STAT_RESERVATION_CONFLICT;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d85f990aec88..28c75865967a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -93,7 +93,7 @@ static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
module_param(scmd_timeout, int, 0444);
MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
-int perf_mode = -1;
+static int perf_mode = -1;
module_param(perf_mode, int, 0444);
MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
@@ -105,15 +105,15 @@ MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:
"default mode is 'balanced'"
);
-int event_log_level = MFI_EVT_CLASS_CRITICAL;
+static int event_log_level = MFI_EVT_CLASS_CRITICAL;
module_param(event_log_level, int, 0644);
MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
-unsigned int enable_sdev_max_qd;
+static unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
-int poll_queues;
+static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
"This parameter is effective only if host_tagset_enable=1 &\n\t\t"
@@ -122,7 +122,7 @@ MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode
"High iops queues are not allocated &\n\t\t"
);
-int host_tagset_enable = 1;
+static int host_tagset_enable = 1;
module_param(host_tagset_enable, int, 0444);
MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 00cd18edfad6..96401eb7e231 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -19,6 +19,7 @@
#define MPI3_CONFIG_PAGETYPE_PCIE_SWITCH (0x31)
#define MPI3_CONFIG_PAGETYPE_PCIE_LINK (0x33)
#define MPI3_CONFIG_PAGEATTR_MASK (0xf0)
+#define MPI3_CONFIG_PAGEATTR_SHIFT (4)
#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
@@ -29,10 +30,13 @@
#define MPI3_CONFIG_ACTION_READ_PERSISTENT (0x04)
#define MPI3_CONFIG_ACTION_WRITE_PERSISTENT (0x05)
#define MPI3_DEVICE_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_DEVICE_PGAD_FORM_SHIFT (28)
#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000ffff)
+#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0)
#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xf0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 2c6e548cbd0f..8d824107a678 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -66,7 +66,12 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204d4d53)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000c0)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
@@ -214,11 +219,13 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xe0)
+#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20)
#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1f)
+#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0)
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
@@ -236,6 +243,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0c)
#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0d)
#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0f)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index af86d12c8e49..bbef5bac92ed 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -38,23 +38,31 @@ struct mpi3_scsi_io_request {
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ACAQ (0x04000000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00f00000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
-#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000c0000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000f0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
@@ -99,6 +107,7 @@ struct mpi3_scsi_io_reply {
#define MPI3_SCSI_STATUS_ACA_ACTIVE (0x30)
#define MPI3_SCSI_STATUS_TASK_ABORTED (0x40)
#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_SHIFT (0)
#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index c374867f9ba0..b42933fcd423 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -30,6 +30,7 @@ struct mpi3_ioc_init_request {
#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
@@ -40,6 +41,7 @@ struct mpi3_ioc_init_request {
#define MPI3_WHOINIT_MANUFACTURER (0x04)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
@@ -111,9 +113,11 @@ struct mpi3_ioc_facts_data {
__le32 diag_tty_size;
};
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
@@ -134,6 +138,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
@@ -149,6 +154,7 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
#define MPI3_IOCFACTS_PROTOCOL_SAS (0x0010)
@@ -161,10 +167,12 @@ struct mpi3_ioc_facts_data {
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000ff00)
#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000f)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
@@ -204,6 +212,7 @@ struct mpi3_create_request_queue_request {
};
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
@@ -237,10 +246,12 @@ struct mpi3_create_reply_queue_request {
};
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
@@ -326,9 +337,11 @@ struct mpi3_event_notification_reply {
};
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
struct mpi3_event_data_gpio_interrupt {
@@ -487,6 +500,7 @@ struct mpi3_event_sas_topo_phy_entry {
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0f)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
@@ -566,6 +580,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
#define MPI3_EVENT_PCIE_TOPO_PS_RESPONDING (0x06)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xf0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
@@ -573,6 +588,7 @@ struct mpi3_event_pcie_topo_port_entry {
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_8 (0x40)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0f)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
@@ -881,6 +897,7 @@ struct mpi3_pel_req_action_acknowledge {
};
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
@@ -924,6 +941,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
@@ -953,6 +971,7 @@ struct mpi3_ci_download_reply {
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
@@ -976,9 +995,11 @@ struct mpi3_ci_upload_request {
};
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
#define MPI3_CTRL_OP_FORCE_FULL_DISCOVERY (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
index 3b960893870f..50a65b16a818 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_tool.h
@@ -9,6 +9,7 @@
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_ACTION_RELEASE (0x01)
+#define MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED (0x01)
struct mpi3_diag_buffer_post_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index b2ab25a1cfeb..5c522e2531c3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (34)
+#define MPI3_VERSION_UNIT (35)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
@@ -80,6 +80,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000f0000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
@@ -97,6 +98,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_STATUS_READY (0x00000001)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0fff)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0fff0000)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
@@ -106,6 +108,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REPLY_Q_ADDR_HIGH_OFFSET (0x00000034)
#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xc0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
@@ -124,6 +127,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xf)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
@@ -133,6 +137,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_6TH (0xd)
#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001c08)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
@@ -151,6 +156,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_FUNC_AREA_SHIFT (24)
#define MPI3_SYSIF_FAULT_FUNC_AREA_MPI_DEFINED (0x00000000)
#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000ffff)
+#define MPI3_SYSIF_FAULT_CODE_SHIFT (0)
#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000f000)
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
@@ -176,17 +182,20 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00001c5c)
#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001c60)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001c62)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000e)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
@@ -207,7 +216,9 @@ struct mpi3_default_reply_descriptor {
};
#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xf000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
@@ -301,6 +312,7 @@ union mpi3_sge_union {
};
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xf0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
@@ -309,6 +321,7 @@ union mpi3_sge_union {
#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SHIFT (0)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
@@ -322,15 +335,18 @@ union mpi3_sge_union {
#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00c0)
+#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6)
#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00c0)
#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4)
#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
@@ -403,6 +419,7 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
#define MPI3_IOCSTATUS_STATUS_MASK (0x7fff)
+#define MPI3_IOCSTATUS_STATUS_SHIFT (0)
#define MPI3_IOCSTATUS_SUCCESS (0x0000)
#define MPI3_IOCSTATUS_INVALID_FUNCTION (0x0001)
#define MPI3_IOCSTATUS_BUSY (0x0002)
@@ -469,4 +486,5 @@ struct mpi3_default_reply {
#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0fffffff)
+#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0d72b5f1b69d..9bbc7cb98ca3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.12.0.3.50"
-#define MPI3MR_DRIVER_RELDATE "11-November-2024"
+#define MPI3MR_DRIVER_VERSION "8.13.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "20-February-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -80,13 +80,14 @@ extern atomic64_t event_counter;
/* Admin queue management definitions */
#define MPI3MR_ADMIN_REQ_Q_SIZE (2 * MPI3MR_PAGE_SIZE_4K)
-#define MPI3MR_ADMIN_REPLY_Q_SIZE (4 * MPI3MR_PAGE_SIZE_4K)
+#define MPI3MR_ADMIN_REPLY_Q_SIZE (8 * MPI3MR_PAGE_SIZE_4K)
#define MPI3MR_ADMIN_REQ_FRAME_SZ 128
#define MPI3MR_ADMIN_REPLY_FRAME_SZ 16
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD2K 2048
#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
@@ -328,6 +329,7 @@ enum mpi3mr_reset_reason {
#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
+
/* Queue type definitions */
enum queue_type {
MPI3MR_DEFAULT_QUEUE = 0,
@@ -387,6 +389,7 @@ struct mpi3mr_ioc_facts {
u16 max_msix_vectors;
u8 personality;
u8 dma_mask;
+ bool max_req_limit;
u8 protocol_flags;
u8 sge_mod_mask;
u8 sge_mod_value;
@@ -456,6 +459,8 @@ struct op_req_qinfo {
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
* @qtype: Type of queue (types defined in enum queue_type)
+ * @qfull_watermark: Watermark defined in reply queue to avoid
+ * reply queue full
*/
struct op_reply_qinfo {
u16 ci;
@@ -471,6 +476,7 @@ struct op_reply_qinfo {
bool enable_irq_poll;
atomic_t in_use;
enum queue_type qtype;
+ u16 qfull_watermark;
};
/**
@@ -928,6 +934,8 @@ struct trigger_event_data {
* @size: Buffer size
* @addr: Virtual address
* @dma_addr: Buffer DMA address
+ * @is_segmented: The buffer is segmented or not
+ * @disabled_after_reset: The buffer is disabled after reset
*/
struct diag_buffer_desc {
u8 type;
@@ -937,6 +945,8 @@ struct diag_buffer_desc {
u32 size;
void *addr;
dma_addr_t dma_addr;
+ bool is_segmented;
+ bool disabled_after_reset;
};
/**
@@ -1022,6 +1032,8 @@ struct scmd_priv {
* @admin_reply_base: Admin reply queue base virtual address
* @admin_reply_dma: Admin reply queue base dma address
* @admin_reply_q_in_use: Queue is handled by poll/ISR
+ * @admin_pend_isr: Count of unprocessed admin ISR/poll calls
+ * due to another thread processing replies
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
@@ -1090,6 +1102,7 @@ struct scmd_priv {
* @ts_update_interval: Timestamp update interval
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @io_admin_reset_sync: Manage state of I/O ops during an admin reset process
* @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
@@ -1153,6 +1166,12 @@ struct scmd_priv {
* @snapdump_trigger_active: Snapdump trigger active flag
* @pci_err_recovery: PCI error recovery in progress
* @block_on_pci_err: Block IO during PCI error recovery
+ * @reply_qfull_count: Occurences of reply queue full avoidance kicking-in
+ * @prevent_reply_qfull: Enable reply queue prevention
+ * @seg_tb_support: Segmented trace buffer support
+ * @num_tb_segs: Number of Segments in Trace buffer
+ * @trace_buf_pool: DMA pool for Segmented trace buffer segments
+ * @trace_buf: Trace buffer segments memory descriptor
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -1189,6 +1208,7 @@ struct mpi3mr_ioc {
void *admin_reply_base;
dma_addr_t admin_reply_dma;
atomic_t admin_reply_q_in_use;
+ atomic_t admin_pend_isr;
u32 ready_timeout;
@@ -1276,6 +1296,7 @@ struct mpi3mr_ioc {
u16 ts_update_interval;
u8 reset_in_progress;
u8 unrecoverable;
+ u8 io_admin_reset_sync;
int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
@@ -1351,6 +1372,13 @@ struct mpi3mr_ioc {
bool fw_release_trigger_active;
bool pci_err_recovery;
bool block_on_pci_err;
+ atomic_t reply_qfull_count;
+ bool prevent_reply_qfull;
+ bool seg_tb_support;
+ u32 num_tb_segs;
+ struct dma_pool *trace_buf_pool;
+ struct segments *trace_buf;
+
};
/**
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 7589f48aebc8..f36663613950 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -12,23 +12,98 @@
#include <uapi/scsi/scsi_bsg_mpi3mr.h>
/**
- * mpi3mr_alloc_trace_buffer: Allocate trace buffer
+ * mpi3mr_alloc_trace_buffer: Allocate segmented trace buffer
* @mrioc: Adapter instance reference
* @trace_size: Trace buffer size
*
- * Allocate trace buffer
+ * Allocate either segmented memory pools or contiguous buffer
+ * based on the controller capability for the host trace
+ * buffer.
+ *
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_alloc_trace_buffer(struct mpi3mr_ioc *mrioc, u32 trace_size)
{
struct diag_buffer_desc *diag_buffer = &mrioc->diag_buffers[0];
+ int i, sz;
+ u64 *diag_buffer_list = NULL;
+ dma_addr_t diag_buffer_list_dma;
+ u32 seg_count;
+
+ if (mrioc->seg_tb_support) {
+ seg_count = (trace_size) / MPI3MR_PAGE_SIZE_4K;
+ trace_size = seg_count * MPI3MR_PAGE_SIZE_4K;
+
+ diag_buffer_list = dma_alloc_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * seg_count,
+ &diag_buffer_list_dma, GFP_KERNEL);
+ if (!diag_buffer_list)
+ return -1;
+
+ mrioc->num_tb_segs = seg_count;
+
+ sz = sizeof(struct segments) * seg_count;
+ mrioc->trace_buf = kzalloc(sz, GFP_KERNEL);
+ if (!mrioc->trace_buf)
+ goto trace_buf_failed;
+
+ mrioc->trace_buf_pool = dma_pool_create("trace_buf pool",
+ &mrioc->pdev->dev, MPI3MR_PAGE_SIZE_4K, MPI3MR_PAGE_SIZE_4K,
+ 0);
+ if (!mrioc->trace_buf_pool) {
+ ioc_err(mrioc, "trace buf pool: dma_pool_create failed\n");
+ goto trace_buf_pool_failed;
+ }
- diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
- trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
- if (diag_buffer->addr) {
- dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ for (i = 0; i < seg_count; i++) {
+ mrioc->trace_buf[i].segment =
+ dma_pool_zalloc(mrioc->trace_buf_pool, GFP_KERNEL,
+ &mrioc->trace_buf[i].segment_dma);
+ diag_buffer_list[i] =
+ (u64) mrioc->trace_buf[i].segment_dma;
+ if (!diag_buffer_list[i])
+ goto tb_seg_alloc_failed;
+ }
+
+ diag_buffer->addr = diag_buffer_list;
+ diag_buffer->dma_addr = diag_buffer_list_dma;
+ diag_buffer->is_segmented = true;
+
+ dprint_init(mrioc, "segmented trace diag buffer\n"
+ "is allocated successfully seg_count:%d\n", seg_count);
return 0;
+ } else {
+ diag_buffer->addr = dma_alloc_coherent(&mrioc->pdev->dev,
+ trace_size, &diag_buffer->dma_addr, GFP_KERNEL);
+ if (diag_buffer->addr) {
+ dprint_init(mrioc, "trace diag buffer is allocated successfully\n");
+ return 0;
+ }
+ return -1;
}
+
+tb_seg_alloc_failed:
+ if (mrioc->trace_buf_pool) {
+ for (i = 0; i < mrioc->num_tb_segs; i++) {
+ if (mrioc->trace_buf[i].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[i].segment,
+ mrioc->trace_buf[i].segment_dma);
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ mrioc->trace_buf[i].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+trace_buf_pool_failed:
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+trace_buf_failed:
+ if (diag_buffer_list)
+ dma_free_coherent(&mrioc->pdev->dev,
+ sizeof(u64) * mrioc->num_tb_segs,
+ diag_buffer_list, diag_buffer_list_dma);
return -1;
}
@@ -100,8 +175,9 @@ retry_trace:
dprint_init(mrioc,
"trying to allocate trace diag buffer of size = %dKB\n",
trace_size / 1024);
- if (get_order(trace_size) > MAX_PAGE_ORDER ||
+ if ((!mrioc->seg_tb_support && (get_order(trace_size) > MAX_PAGE_ORDER)) ||
mpi3mr_alloc_trace_buffer(mrioc, trace_size)) {
+
retry = true;
trace_size -= trace_dec_size;
dprint_init(mrioc, "trace diag buffer allocation failed\n"
@@ -161,6 +237,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
u8 prev_status;
int retval = 0;
+ if (diag_buffer->disabled_after_reset) {
+ dprint_bsg_err(mrioc, "%s: skipping diag buffer posting\n"
+ "as it is disabled after reset\n", __func__);
+ return -1;
+ }
+
memset(&diag_buf_post_req, 0, sizeof(diag_buf_post_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -177,8 +259,12 @@ int mpi3mr_issue_diag_buf_post(struct mpi3mr_ioc *mrioc,
diag_buf_post_req.address = le64_to_cpu(diag_buffer->dma_addr);
diag_buf_post_req.length = le32_to_cpu(diag_buffer->size);
- dprint_bsg_info(mrioc, "%s: posting diag buffer type %d\n", __func__,
- diag_buffer->type);
+ if (diag_buffer->is_segmented)
+ diag_buf_post_req.msg_flags |= MPI3_DIAG_BUFFER_POST_MSGFLAGS_SEGMENTED;
+
+ dprint_bsg_info(mrioc, "%s: posting diag buffer type %d segmented:%d\n", __func__,
+ diag_buffer->type, diag_buffer->is_segmented);
+
prev_status = diag_buffer->status;
diag_buffer->status = MPI3MR_HDB_BUFSTATUS_POSTED_UNPAUSED;
init_completion(&mrioc->init_cmds.done);
@@ -2339,6 +2425,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job)
}
if (!mrioc->ioctl_sges_allocated) {
+ mutex_unlock(&mrioc->bsg_cmds.mutex);
dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n",
__func__);
return -ENOMEM;
@@ -3061,6 +3148,29 @@ reply_queue_count_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(reply_queue_count);
/**
+ * reply_qfull_count_show - Show reply qfull count
+ * @dev: class device
+ * @attr: Device attributes
+ * @buf: Buffer to copy
+ *
+ * Retrieves the current value of the reply_qfull_count from the mrioc structure and
+ * formats it as a string for display.
+ *
+ * Return: sysfs_emit() return
+ */
+static ssize_t
+reply_qfull_count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct mpi3mr_ioc *mrioc = shost_priv(shost);
+
+ return sysfs_emit(buf, "%u\n", atomic_read(&mrioc->reply_qfull_count));
+}
+
+static DEVICE_ATTR_RO(reply_qfull_count);
+
+/**
* logging_level_show - Show controller debug level
* @dev: class device
* @attr: Device attributes
@@ -3152,6 +3262,7 @@ static struct attribute *mpi3mr_host_attrs[] = {
&dev_attr_fw_queue_depth.attr,
&dev_attr_op_req_q_count.attr,
&dev_attr_reply_queue_count.attr,
+ &dev_attr_reply_qfull_count.attr,
&dev_attr_logging_level.attr,
&dev_attr_adp_state.attr,
NULL,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5ed31fe57474..3fcb1ad3b070 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -17,7 +17,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
struct mpi3_ioc_facts_data *facts_data);
static void mpi3mr_pel_wait_complete(struct mpi3mr_ioc *mrioc,
struct mpi3mr_drv_cmd *drv_cmd);
-
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc);
static int poll_queues;
module_param(poll_queues, int, 0444);
MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
@@ -446,8 +446,10 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
u16 threshold_comps = 0;
struct mpi3_default_reply_descriptor *reply_desc;
- if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
+ if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1)) {
+ atomic_inc(&mrioc->admin_pend_isr);
return 0;
+ }
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
admin_reply_ci;
@@ -459,7 +461,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
mrioc->admin_req_ci = le16_to_cpu(reply_desc->request_queue_ci);
@@ -554,7 +556,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
}
do {
- if (mrioc->unrecoverable)
+ if (mrioc->unrecoverable || mrioc->io_admin_reset_sync)
break;
req_q_idx = le16_to_cpu(reply_desc->request_queue_id) - 1;
@@ -1302,7 +1304,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
retval = 0;
- ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
+ ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
return retval;
}
@@ -1355,6 +1357,19 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
"\tcontroller while sas transport support is enabled at the\n"
"\tdriver, please reboot the system or reload the driver\n");
+ if (mrioc->seg_tb_support) {
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)) {
+ ioc_err(mrioc,
+ "critical error: previously enabled segmented trace\n"
+ " buffer capability is disabled after reset. Please\n"
+ " update the firmware or reboot the system or\n"
+ " reload the driver to enable trace diag buffer\n");
+ mrioc->diag_buffers[0].disabled_after_reset = true;
+ } else
+ mrioc->diag_buffers[0].disabled_after_reset = false;
+ }
+
if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
GFP_KERNEL);
@@ -1717,7 +1732,7 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ "ioc_status/ioc_config after %s reset is (0x%08x)/(0x%08x)\n",
(!retval)?"successful":"failed", ioc_status,
ioc_config);
if (retval)
@@ -2104,15 +2119,22 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
}
reply_qid = qidx + 1;
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
- if ((mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
- !mrioc->pdev->revision)
- op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+
+ if (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) {
+ if (mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
+ } else
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD2K;
+
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
atomic_set(&op_reply_q->in_use, 0);
op_reply_q->enable_irq_poll = false;
+ op_reply_q->qfull_watermark =
+ op_reply_q->num_replies - (MPI3MR_THRESHOLD_REPLY_COUNT * 2);
if (!op_reply_q->q_segments) {
retval = mpi3mr_alloc_op_reply_q_segments(mrioc, qidx);
@@ -2416,8 +2438,10 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
void *segment_base_addr;
u16 req_sz = mrioc->facts.op_req_sz;
struct segments *segments = op_req_q->q_segments;
+ struct op_reply_qinfo *op_reply_q = NULL;
reply_qidx = op_req_q->reply_qid - 1;
+ op_reply_q = mrioc->op_reply_qinfo + reply_qidx;
if (mrioc->unrecoverable)
return -EFAULT;
@@ -2448,6 +2472,15 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
goto out;
}
+ /* Reply queue is nearing to get full, push back IOs to SML */
+ if ((mrioc->prevent_reply_qfull == true) &&
+ (atomic_read(&op_reply_q->pend_ios) >
+ (op_reply_q->qfull_watermark))) {
+ atomic_inc(&mrioc->reply_qfull_count);
+ retval = -EAGAIN;
+ goto out;
+ }
+
segment_base_addr = segments[pi / op_req_q->segment_qd].segment;
req_entry = (u8 *)segment_base_addr +
((pi % op_req_q->segment_qd) * req_sz);
@@ -2726,7 +2759,16 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
return;
}
- if (mrioc->ts_update_counter++ >= mrioc->ts_update_interval) {
+ if (atomic_read(&mrioc->admin_pend_isr)) {
+ ioc_err(mrioc, "Unprocessed admin ISR instance found\n"
+ "flush admin replies\n");
+ mpi3mr_process_admin_reply_q(mrioc);
+ }
+
+ if (!(mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC) &&
+ (mrioc->ts_update_counter++ >= mrioc->ts_update_interval)) {
+
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
@@ -3091,6 +3133,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.dma_mask = (facts_flags &
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
+ mrioc->facts.dma_mask = (facts_flags &
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK) >>
+ MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT;
mrioc->facts.protocol_flags = facts_data->protocol_flags;
mrioc->facts.mpi_version = le32_to_cpu(facts_data->mpi_version.word);
mrioc->facts.max_reqs = le16_to_cpu(facts_data->max_outstanding_requests);
@@ -4214,6 +4259,13 @@ retry_init:
mrioc->shost->transportt = mpi3mr_transport_template;
}
+ if (mrioc->facts.max_req_limit)
+ mrioc->prevent_reply_qfull = true;
+
+ if (mrioc->facts.ioc_capabilities &
+ MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED)
+ mrioc->seg_tb_support = true;
+
mrioc->reply_sz = mrioc->facts.reply_sz;
retval = mpi3mr_check_reset_dma_mask(mrioc);
@@ -4370,6 +4422,7 @@ retry_init:
goto out_failed_noretry;
}
+ mrioc->io_admin_reset_sync = 0;
if (is_resume || mrioc->block_on_pci_err) {
dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
@@ -4671,7 +4724,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*/
void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
- u16 i;
+ u16 i, j;
struct mpi3mr_intr_info *intr_info;
struct diag_buffer_desc *diag_buffer;
@@ -4806,6 +4859,26 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
for (i = 0; i < MPI3MR_MAX_NUM_HDB; i++) {
diag_buffer = &mrioc->diag_buffers[i];
+ if ((i == 0) && mrioc->seg_tb_support) {
+ if (mrioc->trace_buf_pool) {
+ for (j = 0; j < mrioc->num_tb_segs; j++) {
+ if (mrioc->trace_buf[j].segment) {
+ dma_pool_free(mrioc->trace_buf_pool,
+ mrioc->trace_buf[j].segment,
+ mrioc->trace_buf[j].segment_dma);
+ mrioc->trace_buf[j].segment = NULL;
+ }
+
+ mrioc->trace_buf[j].segment = NULL;
+ }
+ dma_pool_destroy(mrioc->trace_buf_pool);
+ mrioc->trace_buf_pool = NULL;
+ }
+
+ kfree(mrioc->trace_buf);
+ mrioc->trace_buf = NULL;
+ diag_buffer->size = sizeof(u64) * mrioc->num_tb_segs;
+ }
if (diag_buffer->addr) {
dma_free_coherent(&mrioc->pdev->dev,
diag_buffer->size, diag_buffer->addr,
@@ -4883,7 +4956,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
}
ioc_info(mrioc,
- "Base IOC Sts/Config after %s shutdown is (0x%x)/(0x%x)\n",
+ "Base IOC Sts/Config after %s shutdown is (0x%08x)/(0x%08x)\n",
(!retval) ? "successful" : "failed", ioc_status,
ioc_config);
}
@@ -5229,6 +5302,55 @@ cleanup_drv_cmd:
}
/**
+ * mpi3mr_check_op_admin_proc -
+ * @mrioc: Adapter instance reference
+ *
+ * Check if any of the operation reply queues
+ * or the admin reply queue are currently in use.
+ * If any queue is in use, this function waits for
+ * a maximum of 10 seconds for them to become available.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+static int mpi3mr_check_op_admin_proc(struct mpi3mr_ioc *mrioc)
+{
+
+ u16 timeout = 10 * 10;
+ u16 elapsed_time = 0;
+ bool op_admin_in_use = false;
+
+ do {
+ op_admin_in_use = false;
+
+ /* Check admin_reply queue first to exit early */
+ if (atomic_read(&mrioc->admin_reply_q_in_use) == 1)
+ op_admin_in_use = true;
+ else {
+ /* Check op_reply queues */
+ int i;
+
+ for (i = 0; i < mrioc->num_queues; i++) {
+ if (atomic_read(&mrioc->op_reply_qinfo[i].in_use) == 1) {
+ op_admin_in_use = true;
+ break;
+ }
+ }
+ }
+
+ if (!op_admin_in_use)
+ break;
+
+ msleep(100);
+
+ } while (++elapsed_time < timeout);
+
+ if (op_admin_in_use)
+ return 1;
+
+ return 0;
+}
+
+/**
* mpi3mr_soft_reset_handler - Reset the controller
* @mrioc: Adapter instance reference
* @reset_reason: Reset reason code
@@ -5308,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
mpi3mr_ioc_disable_intr(mrioc);
+ mrioc->io_admin_reset_sync = 1;
if (snapdump) {
mpi3mr_set_diagsave(mrioc);
@@ -5335,6 +5458,16 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
ioc_err(mrioc, "Failed to issue soft reset to the ioc\n");
goto out;
}
+
+ retval = mpi3mr_check_op_admin_proc(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Soft reset failed due to an Admin or I/O queue polling\n"
+ "thread still processing replies even after a 10 second\n"
+ "timeout. Marking the controller as unrecoverable!\n");
+
+ goto out;
+ }
+
if (mrioc->num_io_throttle_group !=
mrioc->facts.max_io_throttle_group) {
ioc_err(mrioc,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index b9a51d3f2024..c186b892150f 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3839,6 +3839,18 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
if (scmd) {
+ if (tm_type == MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv)
+ goto out_unlock;
+
+ struct op_req_qinfo *op_req_q;
+
+ op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
+ tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
+ tm_req.task_request_queue_id =
+ cpu_to_le16(op_req_q->qid);
+ }
sdev = scmd->device;
sdev_priv_data = sdev->hostdata;
scsi_tgt_priv_data = ((sdev_priv_data) ?
@@ -4388,6 +4400,92 @@ out:
}
/**
+ * mpi3mr_eh_abort - Callback function for abort error handling
+ * @scmd: SCSI command reference
+ *
+ * Issues Abort Task Management if the command is in LLD scope
+ * and verifies if it is aborted successfully, and return status
+ * accordingly.
+ *
+ * Return: SUCCESS if the abort was successful, otherwise FAILED
+ */
+static int mpi3mr_eh_abort(struct scsi_cmnd *scmd)
+{
+ struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
+ struct mpi3mr_stgt_priv_data *stgt_priv_data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data;
+ struct scmd_priv *cmd_priv;
+ u16 dev_handle, timeout = MPI3MR_ABORTTM_TIMEOUT;
+ u8 resp_code = 0;
+ int retval = FAILED, ret = 0;
+ struct request *rq = scsi_cmd_to_rq(scmd);
+ unsigned long scmd_age_ms = jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc);
+ unsigned long scmd_age_sec = scmd_age_ms / HZ;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: attempting abort task for scmd(%p)\n", mrioc->name, scmd);
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd(0x%p) is outstanding for %lus %lums, timeout %us, retries %d, allowed %d\n",
+ mrioc->name, scmd, scmd_age_sec, scmd_age_ms % HZ, rq->timeout / HZ,
+ scmd->retries, scmd->allowed);
+
+ scsi_print_command(scmd);
+
+ sdev_priv_data = scmd->device->hostdata;
+ if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device not available, Skip issuing abort task\n",
+ mrioc->name);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ stgt_priv_data = sdev_priv_data->tgt_priv_data;
+ dev_handle = stgt_priv_data->dev_handle;
+
+ cmd_priv = scsi_cmd_priv(scmd);
+ if (!cmd_priv->in_lld_scope ||
+ cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: scmd (0x%p) not in LLD scope, Skip issuing Abort Task\n",
+ mrioc->name, scmd);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Device (handle = 0x%04x) removed, Skip issuing Abort Task\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
+
+ ret = mpi3mr_issue_tm(mrioc, MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ dev_handle, sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
+ timeout, &mrioc->host_tm_cmds, &resp_code, scmd);
+
+ if (ret)
+ goto out;
+
+ if (cmd_priv->in_lld_scope) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort task failed. scmd (0x%p) was not terminated\n",
+ mrioc->name, scmd);
+ goto out;
+ }
+
+ retval = SUCCESS;
+out:
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: Abort Task %s for scmd (0x%p)\n", mrioc->name,
+ ((retval == SUCCESS) ? "SUCCEEDED" : "FAILED"), scmd);
+
+ return retval;
+}
+
+/**
* mpi3mr_scan_start - Scan start callback handler
* @shost: SCSI host reference
*
@@ -5069,6 +5167,7 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.scan_finished = mpi3mr_scan_finished,
.scan_start = mpi3mr_scan_start,
.change_queue_depth = mpi3mr_change_queue_depth,
+ .eh_abort_handler = mpi3mr_eh_abort,
.eh_device_reset_handler = mpi3mr_eh_dev_reset,
.eh_target_reset_handler = mpi3mr_eh_target_reset,
.eh_bus_reset_handler = mpi3mr_eh_bus_reset,
@@ -5803,7 +5902,7 @@ static const struct pci_device_id mpi3mr_pci_id_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
-static struct pci_error_handlers mpi3mr_err_handler = {
+static const struct pci_error_handlers mpi3mr_err_handler = {
.error_detected = mpi3mr_pcierr_error_detected,
.mmio_enabled = mpi3mr_pcierr_mmio_enabled,
.slot_reset = mpi3mr_pcierr_slot_reset,
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 0ba9e6a6a13c..c8d6ced5640e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -105,10 +105,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 6de35b32223c..b181b113fc80 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -125,6 +125,12 @@
* 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT
* 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT
* 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT
+ * 07-20-20 02.00.58 Bumped MPI2_HEADER_VERSION_UNIT
+ * 03-30-21 02.00.59 Bumped MPI2_HEADER_VERSION_UNIT
+ * 06-03-22 02.00.60 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-20-23 02.00.61 Bumped MPI2_HEADER_VERSION_UNIT
+ * 09-13-24 02.00.62 Bumped MPI2_HEADER_VERSION_UNIT
+ * Added MPI2_FUNCTION_MCTP_PASSTHROUGH
* --------------------------------------------------------------------------
*/
@@ -165,7 +171,7 @@
/* Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x39)
+#define MPI2_HEADER_VERSION_UNIT (0x3E)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -669,6 +675,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
#define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
+#define MPI2_FUNCTION_MCTP_PASSTHROUGH (0x34)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 587f7d248219..02bf26ca976e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -251,6 +251,7 @@
* 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7.
* 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID
* Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT
+ * 09-13-24 02.00.50 Added PCIe 32 GT/s link rate
*/
#ifndef MPI2_CNFG_H
@@ -606,7 +607,7 @@ typedef struct _MPI2_CONFIG_REPLY {
typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
- U8 ChipName[16]; /*0x04 */
+ U8 ChipName[16] __nonstring; /*0x04 */
U8 ChipRevision[8]; /*0x14 */
U8 BoardName[16]; /*0x1C */
U8 BoardAssembly[16]; /*0x2C */
@@ -1121,6 +1122,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_32_0_GBPS (0x04)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
@@ -2301,6 +2303,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_PROD_SPECIFIC_1 (0x8000)
#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
@@ -3591,6 +3594,7 @@ typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
#define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
#define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+#define MPI26_PCIE_NEG_LINK_RATE_32_0 (0x06)
/****************************************************************************
@@ -3700,6 +3704,7 @@ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
#define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
#define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
#define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+#define MPI26_PCIEIOUNIT1_MAX_RATE_32_0 (0x60)
/*values for PCIe IO Unit Page 1 DMDReportPCIe */
#define MPI26_PCIEIOUNIT1_DMDRPT_UNIT_MASK (0x80)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d92852591134..1a279c6e1a9f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -179,6 +179,7 @@
* Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED
* Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP
* Added MPI2_FW_UPLOAD_ITYPE_COREDUMP
+ * 9-13-24 02.00.39 Added MPI26_MCTP_PASSTHROUGH messages
* --------------------------------------------------------------------------
*/
@@ -382,6 +383,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU (0x00800000)
#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000)
#define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
@@ -1798,5 +1800,57 @@ typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
Mpi26IoUnitControlReply_t,
*pMpi26IoUnitControlReply_t;
+/****************************************************************************
+ * MCTP Passthrough messages (MPI v2.6 and later only.)
+ ****************************************************************************/
+
+/* MCTP Passthrough Request Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REQUEST {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1[2]; /* 0x01 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U32 Reserved4; /* 0x0C */
+ U8 Flags; /* 0x10 */
+ U8 Reserved5[3]; /* 0x11 */
+ U32 Reserved6; /* 0x14 */
+ U32 H2DLength; /* 0x18 */
+ U32 D2HLength; /* 0x1C */
+ MPI25_SGE_IO_UNION H2DSGL; /* 0x20 */
+ MPI25_SGE_IO_UNION D2HSGL; /* 0x30 */
+} MPI26_MCTP_PASSTHROUGH_REQUEST,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REQUEST,
+ Mpi26MctpPassthroughRequest_t,
+ *pMpi26MctpPassthroughRequest_t;
+
+/* values for the MsgContext field */
+#define MPI26_MCTP_MSG_CONEXT_UNUSED (0x00)
+
+/* values for the Flags field */
+#define MPI26_MCTP_FLAGS_MSG_FORMAT_MPT (0x01)
+
+/* MCTP Passthrough Reply Message */
+typedef struct _MPI26_MCTP_PASSTHROUGH_REPLY {
+ U8 MsgContext; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U8 Reserved2[3]; /* 0x04 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+ U32 ResponseDataLength; /* 0x14 */
+} MPI26_MCTP_PASSTHROUGH_REPLY,
+ *PTR_MPI26_MCTP_PASSTHROUGH_REPLY,
+ Mpi26MctpPassthroughReply_t,
+ *pMpi26MctpPassthroughReply_t;
#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index dc43cfa83088..bd3efa5b46c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1202,6 +1202,11 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
ioc->sge_size;
func_str = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi26MctpPassthroughRequest_t) +
+ ioc->sge_size;
+ func_str = "mctp_passthru";
+ break;
default:
frame_sz = 32;
func_str = "unknown";
@@ -4874,6 +4879,12 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
+ if (ioc->facts.IOCCapabilities &
+ MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ pr_cont("%sMCTP Passthru", i ? "," : "");
+ i++;
+ }
+
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
pr_cont("%sNCQ", i ? "," : "");
@@ -8018,7 +8029,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->hostdiag_unlock_mutex);
if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
- goto out;
+ goto unlock;
hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
@@ -8038,7 +8049,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_info(ioc,
"Invalid host diagnostic register value\n");
_base_dump_reg_set(ioc);
- goto out;
+ goto unlock;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
break;
@@ -8074,17 +8085,19 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
_base_dump_reg_set(ioc);
- goto out;
+ goto fail;
}
pci_cfg_access_unlock(ioc->pdev);
ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
- out:
+unlock:
+ mutex_unlock(&ioc->hostdiag_unlock_mutex);
+
+fail:
pci_cfg_access_unlock(ioc->pdev);
ioc_err(ioc, "diag reset: FAILED\n");
- mutex_unlock(&ioc->hostdiag_unlock_mutex);
return -EFAULT;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index d8d1a64b4764..939141cde3ca 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "51.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 51
+#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 52
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
@@ -1858,9 +1858,6 @@ int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
int mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage1_t *config_page);
-int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz);
int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply,
struct Mpi2ManufacturingPage10_t *config_page);
@@ -1887,9 +1884,6 @@ int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
u32 form, u32 handle);
-int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle);
int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage0_t *config_page,
u32 form, u32 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 2e88f456fc34..45ac853e1289 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -577,44 +577,6 @@ mpt3sas_config_get_manufacturing_pg1(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @sz: size of buffer passed in config_page
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
- u16 sz)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
- mpi_request.Header.PageNumber = 7;
- mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sz);
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
@@ -1214,47 +1176,6 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
}
/**
- * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
- * @ioc: per adapter object
- * @mpi_reply: reply mf payload returned from firmware
- * @config_page: contents of the config page
- * @form: GET_NEXT_HANDLE or HANDLE
- * @handle: device handle
- * Context: sleep.
- *
- * Return: 0 for success, non-zero for failure.
- */
-int
-mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
- Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
- u32 form, u32 handle)
-{
- Mpi2ConfigRequest_t mpi_request;
- int r;
-
- memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
- mpi_request.Function = MPI2_FUNCTION_CONFIG;
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
- mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
- mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
- mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
- mpi_request.Header.PageNumber = 1;
- ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
- if (r)
- goto out;
-
- mpi_request.PageAddress = cpu_to_le32(form | handle);
- mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
- r = _config_request(ioc, &mpi_request, mpi_reply,
- MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
- sizeof(*config_page));
- out:
- return r;
-}
-
-/**
* mpt3sas_config_get_pcie_device_pg0 - obtain pcie device page 0
* @ioc: per adapter object
* @mpi_reply: reply mf payload returned from firmware
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 87784c96249a..063b10dd8251 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -186,6 +186,9 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
desc = "nvme_encapsulated";
break;
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ desc = "mctp_passthrough";
+ break;
}
if (!desc)
@@ -653,6 +656,40 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
/**
+ * _ctl_send_mctp_passthru_req - Send an MCTP passthru request
+ * @ioc: per adapter object
+ * @mctp_passthru_req: MPI mctp passhthru request from caller
+ * @psge: pointer to the H2DSGL
+ * @data_out_dma: DMA buffer for H2D SGL
+ * @data_out_sz: H2D length
+ * @data_in_dma: DMA buffer for D2H SGL
+ * @data_in_sz: D2H length
+ * @smid: SMID to submit the request
+ *
+ */
+static void
+_ctl_send_mctp_passthru_req(
+ struct MPT3SAS_ADAPTER *ioc,
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge,
+ dma_addr_t data_out_dma, int data_out_sz,
+ dma_addr_t data_in_dma, int data_in_sz,
+ u16 smid)
+{
+ mctp_passthru_req->H2DLength = data_out_sz;
+ mctp_passthru_req->D2HLength = data_in_sz;
+
+ /* Build the H2D SGL from the data out buffer */
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0);
+
+ psge += ioc->sge_size_ieee;
+
+ /* Build the D2H SGL for the data in buffer */
+ ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz);
+
+ ioc->put_smid_default(ioc, smid);
+}
+
+/**
* _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
* @ioc: per adapter object
* @karg: (struct mpt3_ioctl_command)
@@ -679,6 +716,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
size_t data_in_sz = 0;
long ret;
u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ int tm_ret;
issue_reset = 0;
@@ -792,6 +830,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
+ case MPI2_FUNCTION_MCTP_PASSTHROUGH:
+ {
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req =
+ (Mpi26MctpPassthroughRequest_t *)request;
+
+ if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) {
+ ioc_err(ioc, "%s: MCTP Passthrough request not supported\n",
+ __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+ break;
+ }
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
@@ -1120,18 +1175,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(
pcie_device->device_info))))
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
- mpt3sas_scsih_issue_locked_tm(ioc,
+ tm_ret = mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
0, 0, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
+
+ if (tm_ret != SUCCESS) {
+ ioc_info(ioc,
+ "target reset failed, issue hard reset: handle (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
@@ -1200,6 +1262,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
}
karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU;
+
if (copy_to_user(arg, &karg, sizeof(karg))) {
pr_err("failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2787,6 +2851,217 @@ out_unlock_pciaccess:
}
/**
+ * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at
+ * dev_index positionthat support MCTP passhtru
+ * @dev_index: position in the mpt3sas_ioc_list to search for
+ * Return pointer to the IOC on success
+ * NULL if device not found error
+ */
+static struct MPT3SAS_ADAPTER *
+_ctl_get_mpt_mctp_passthru_adapter(int dev_index)
+{
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+ int count = 0;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) {
+ if (count == dev_index) {
+ spin_unlock(&gioc_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&gioc_lock);
+
+ return NULL;
+}
+
+/**
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int
+mpt3sas_get_device_count(void)
+{
+ int count = 0;
+ struct MPT3SAS_ADAPTER *ioc = NULL;
+
+ spin_lock(&gioc_lock);
+ /* Traverse ioc list and return number of IOC that support MCTP passthru */
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)
+ count++;
+
+ spin_unlock(&gioc_lock);
+
+ return count;
+}
+EXPORT_SYMBOL(mpt3sas_get_device_count);
+
+/**
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error.
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi26MctpPassthroughRequest_t *mctp_passthru_req;
+ u16 smid;
+ unsigned long timeout;
+ u8 issue_reset = 0;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+
+ /* Retrieve ioc from dev_index */
+ ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index);
+ if (!ioc)
+ return -ENODEV;
+
+ mutex_lock(&ioc->pci_access_mutex);
+ if (ioc->shost_recovery ||
+ ioc->pci_error_recovery || ioc->is_driver_loading ||
+ ioc->remove_host) {
+ ret = -EAGAIN;
+ goto unlock_pci_access;
+ }
+
+ /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */
+ if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
+ ret = -ERESTARTSYS;
+ goto unlock_pci_access;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
+ ret = -EAGAIN;
+ goto unlock_ctl_cmds;
+ }
+
+ ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
+ if (ret)
+ goto unlock_ctl_cmds;
+
+ mpi_request = (MPI2RequestHeader_t *)command->mpi_request;
+ if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) {
+ ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n",
+ __func__, mpi_request->Function);
+ ret = -EINVAL;
+ goto unlock_ctl_cmds;
+ }
+
+ /* Use first reserved smid for passthrough commands */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(request, 0, ioc->request_sz);
+ memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t));
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = command->data_out_size;
+ data_in_sz = command->data_in_size;
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_ATOMIC);
+ if (!data_out) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ memcpy(data_out, command->data_out_buf_ptr, data_out_sz);
+
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_ATOMIC);
+ if (!data_in) {
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL;
+
+ init_completion(&ioc->ctl_cmds.done);
+
+ mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request;
+
+ _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma,
+ data_out_sz, data_in_dma, data_in_sz, smid);
+
+ timeout = command->timeout;
+ if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+
+ wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->ctl_cmds.status, mpi_request,
+ sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset);
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+
+ /* copy out xdata to user */
+ if (data_in_sz)
+ memcpy(command->data_in_buf_ptr, data_in, data_in_sz);
+
+ /* copy out reply message frame to user */
+ if (command->max_reply_bytes) {
+ sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz);
+ memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz);
+ }
+
+issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ }
+
+out:
+ /* free memory associated with sg buffers */
+ if (data_in)
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
+ data_out_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+
+unlock_ctl_cmds:
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+
+unlock_pci_access:
+ mutex_unlock(&ioc->pci_access_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req);
+
+/**
* _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
* @file: (struct file)
* @cmd: ioctl opcode
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 171709e91006..483e0549c02f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -160,6 +160,9 @@ struct mpt3_ioctl_pci_info {
#define MPT3_IOCTL_INTERFACE_SAS35 (0x07)
#define MPT2_IOCTL_VERSION_LENGTH (32)
+/* Bits set for mpt3_ioctl_iocinfo.driver_cap */
+#define MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU 0x1
+
/**
* struct mpt3_ioctl_iocinfo - generic controller info
* @hdr - generic header
@@ -175,6 +178,7 @@ struct mpt3_ioctl_pci_info {
* @driver_version - driver version - 32 ASCII characters
* @rsvd1 - reserved
* @scsi_id - scsi id of adapter 0
+ * @driver_capability - driver capabilities
* @rsvd2 - reserved
* @pci_information - pci info (2nd revision)
*/
@@ -192,7 +196,8 @@ struct mpt3_ioctl_iocinfo {
uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
uint8_t rsvd1;
uint8_t scsi_id;
- uint16_t rsvd2;
+ uint8_t driver_capability;
+ uint8_t rsvd2;
struct mpt3_ioctl_pci_info pci_information;
};
@@ -458,4 +463,46 @@ struct mpt3_enable_diag_sbr_reload {
struct mpt3_ioctl_header hdr;
};
+/**
+ * struct mpt3_passthru_command - generic mpt firmware passthru command
+ * @dev_index - device index
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - MPI reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @mpi_request - request frame
+ */
+struct mpt3_passthru_command {
+ u8 dev_index;
+ uint32_t timeout;
+ void *reply_frame_buf_ptr;
+ void *data_in_buf_ptr;
+ void *data_out_buf_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ Mpi26MctpPassthroughRequest_t *mpi_request;
+};
+
+/*
+ * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough
+ * capable devices managed by the driver.
+ *
+ * Returns number of devices that support MCTP passthrough.
+ */
+int mpt3sas_get_device_count(void);
+
+/*
+ * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to
+ * firmware
+ * @command: The MPI MCTP passthrough command to send to firmware
+ *
+ * Returns 0 on success, anything else is error .
+ */
+int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command);
+
#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a456e5ec74d8..508861e88d9f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2703,7 +2703,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
ssp_target = 1;
if (sas_device->device_info &
MPI2_SAS_DEVICE_INFO_SEP) {
- sdev_printk(KERN_WARNING, sdev,
+ sdev_printk(KERN_INFO, sdev,
"set ignore_delay_remove for handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->ignore_delay_remove = 1;
@@ -12710,7 +12710,7 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
-static struct pci_error_handlers _mpt3sas_err_handler = {
+static const struct pci_error_handlers _mpt3sas_err_handler = {
.error_detected = scsih_pci_error_detected,
.mmio_enabled = scsih_pci_mmio_enabled,
.slot_reset = scsih_pci_slot_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d84413b77d84..dc74ebc6405a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -328,10 +328,10 @@ struct rep_manu_reply {
u8 reserved0[2];
u8 sas_format;
u8 reserved2[3];
- u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
- u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
- u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
- u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN] __nonstring;
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN] __nonstring;
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN] __nonstring;
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN] __nonstring;
u16 component_id;
u8 component_revision_id;
u8 reserved3;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 1444b1f1c4c8..c4592de4fefc 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -151,16 +151,6 @@ static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
}
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
-{
- u32 no;
- for_each_phy(phy_mask, phy_mask, no) {
- if (!(phy_mask & 1))
- continue;
- MVS_CHIP_DISP->phy_reset(mvi, no, hard);
- }
-}
-
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata)
{
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 19b01f7c4767..09ce3f2241f2 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -425,7 +425,6 @@ struct mvs_task_exec_info {
void mvs_get_sas_addr(void *buf, u32 buflen);
void mvs_iounmap(void __iomem *regs);
int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
-void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index c9539897048a..e87885cc701c 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2876,7 +2876,7 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
-static struct pci_error_handlers qedi_err_handler = {
+static const struct pci_error_handlers qedi_err_handler = {
.error_detected = qedi_io_error_detected,
};
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 4f63aff333db..3a2bd953a976 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -282,8 +282,8 @@ struct register_host_info {
#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
struct config_info_data {
- uint8_t model_num[16];
- uint8_t model_description[80];
+ uint8_t model_num[16] __nonstring;
+ uint8_t model_description[80] __nonstring;
uint8_t reserved0[160];
uint8_t symbolic_name[64];
uint8_t serial_num[32];
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 6d16546e1729..9e7a407ba1b9 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2136,8 +2136,8 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
* @flash_id: Flash ID
*
* This function polls the device until bit 7 of what is read matches data
- * bit 7 or until data bit 5 becomes a 1. If that hapens, the flash ROM timed
- * out (a fatal error). The flash book recommeds reading bit 7 again after
+ * bit 7 or until data bit 5 becomes a 1. If that happens, the flash ROM timed
+ * out (a fatal error). The flash book recommends reading bit 7 again after
* reading bit 5 as a 1.
*
* Returns 0 on success, else non-zero.
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a77e0499b738..53daf923ad8e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -510,22 +510,34 @@ void scsi_attach_vpd(struct scsi_device *sdev)
return;
for (i = 4; i < vpd_buf->len; i++) {
- if (vpd_buf->data[i] == 0x0)
+ switch (vpd_buf->data[i]) {
+ case 0x0:
scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0);
- if (vpd_buf->data[i] == 0x80)
+ break;
+ case 0x80:
scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80);
- if (vpd_buf->data[i] == 0x83)
+ break;
+ case 0x83:
scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83);
- if (vpd_buf->data[i] == 0x89)
+ break;
+ case 0x89:
scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89);
- if (vpd_buf->data[i] == 0xb0)
+ break;
+ case 0xb0:
scsi_update_vpd_page(sdev, 0xb0, &sdev->vpd_pgb0);
- if (vpd_buf->data[i] == 0xb1)
+ break;
+ case 0xb1:
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
- if (vpd_buf->data[i] == 0xb2)
+ break;
+ case 0xb2:
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
- if (vpd_buf->data[i] == 0xb7)
+ break;
+ case 0xb7:
scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
+ break;
+ default:
+ break;
+ }
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 5ceaa4665e5d..f0eec4708ddd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -71,6 +71,10 @@ static const char *sdebug_version_date = "20210520";
#define NO_ADDITIONAL_SENSE 0x0
#define OVERLAP_ATOMIC_COMMAND_ASC 0x0
#define OVERLAP_ATOMIC_COMMAND_ASCQ 0x23
+#define FILEMARK_DETECTED_ASCQ 0x1
+#define EOP_EOM_DETECTED_ASCQ 0x2
+#define BEGINNING_OF_P_M_DETECTED_ASCQ 0x4
+#define EOD_DETECTED_ASCQ 0x5
#define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11
@@ -80,8 +84,10 @@ static const char *sdebug_version_date = "20210520";
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
#define WRITE_PROTECTED 0x27
+#define UA_READY_ASC 0x28
#define UA_RESET_ASC 0x29
#define UA_CHANGED_ASC 0x2a
+#define TOO_MANY_IN_PARTITION_ASC 0x3b
#define TARGET_CHANGED_ASC 0x3f
#define LUNS_CHANGED_ASCQ 0x0e
#define INSUFF_RES_ASC 0x55
@@ -173,6 +179,37 @@ static const char *sdebug_version_date = "20210520";
#define DEF_ZBC_MAX_OPEN_ZONES 8
#define DEF_ZBC_NR_CONV_ZONES 1
+/* Default parameters for tape drives */
+#define TAPE_DEF_DENSITY 0x0
+#define TAPE_BAD_DENSITY 0x65
+#define TAPE_DEF_BLKSIZE 0
+#define TAPE_MIN_BLKSIZE 512
+#define TAPE_MAX_BLKSIZE 1048576
+#define TAPE_EW 20
+#define TAPE_MAX_PARTITIONS 2
+#define TAPE_UNITS 10000
+#define TAPE_PARTITION_1_UNITS 1000
+
+/* The tape block data definitions */
+#define TAPE_BLOCK_FM_FLAG ((u32)0x1 << 30)
+#define TAPE_BLOCK_EOD_FLAG ((u32)0x2 << 30)
+#define TAPE_BLOCK_MARK_MASK ((u32)0x3 << 30)
+#define TAPE_BLOCK_SIZE_MASK (~TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_MARK(a) (a & TAPE_BLOCK_MARK_MASK)
+#define TAPE_BLOCK_SIZE(a) (a & TAPE_BLOCK_SIZE_MASK)
+#define IS_TAPE_BLOCK_FM(a) ((a & TAPE_BLOCK_FM_FLAG) != 0)
+#define IS_TAPE_BLOCK_EOD(a) ((a & TAPE_BLOCK_EOD_FLAG) != 0)
+
+struct tape_block {
+ u32 fl_size;
+ unsigned char data[4];
+};
+
+/* Flags for sense data */
+#define SENSE_FLAG_FILEMARK 0x80
+#define SENSE_FLAG_EOM 0x40
+#define SENSE_FLAG_ILI 0x20
+
#define SDEBUG_LUN_0_VAL 0
/* bit mask values for sdebug_opts */
@@ -216,7 +253,8 @@ static const char *sdebug_version_date = "20210520";
#define SDEBUG_UA_LUNS_CHANGED 5
#define SDEBUG_UA_MICROCODE_CHANGED 6 /* simulate firmware change */
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
-#define SDEBUG_NUM_UAS 8
+#define SDEBUG_UA_NOT_READY_TO_READY 8
+#define SDEBUG_NUM_UAS 9
/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
@@ -262,11 +300,6 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
-static struct kmem_cache *queued_cmd_cache;
-
-#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
-#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
-
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -363,6 +396,19 @@ struct sdebug_dev_info {
ktime_t create_ts; /* time since bootup that this device was created */
struct sdeb_zone_state *zstate;
+ /* For tapes */
+ unsigned int tape_blksize;
+ unsigned int tape_density;
+ unsigned char tape_partition;
+ unsigned char tape_nbr_partitions;
+ unsigned char tape_pending_nbr_partitions;
+ unsigned int tape_pending_part_0_size;
+ unsigned int tape_pending_part_1_size;
+ unsigned char tape_dce;
+ unsigned int tape_location[TAPE_MAX_PARTITIONS];
+ unsigned int tape_eop[TAPE_MAX_PARTITIONS];
+ struct tape_block *tape_blocks[TAPE_MAX_PARTITIONS];
+
struct dentry *debugfs_entry;
struct spinlock list_lock;
struct list_head inject_err_list;
@@ -409,24 +455,9 @@ struct sdebug_defer {
enum sdeb_defer_type defer_t;
};
-struct sdebug_device_access_info {
- bool atomic_write;
- u64 lba;
- u32 num;
- struct scsi_cmnd *self;
-};
-
-struct sdebug_queued_cmd {
- /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
- * instance indicates this slot is in use.
- */
- struct sdebug_defer sd_dp;
- struct scsi_cmnd *scmd;
- struct sdebug_device_access_info *i;
-};
-
struct sdebug_scsi_cmd {
spinlock_t lock;
+ struct sdebug_defer sd_dp;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -483,22 +514,27 @@ enum sdeb_opcode_index {
SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
SDEB_I_ATOMIC_WRITE_16 = 32,
- SDEB_I_LAST_ELEM_P1 = 33, /* keep this last (previous + 1) */
+ SDEB_I_READ_BLOCK_LIMITS = 33,
+ SDEB_I_LOCATE = 34,
+ SDEB_I_WRITE_FILEMARKS = 35,
+ SDEB_I_SPACE = 36,
+ SDEB_I_FORMAT_MEDIUM = 37,
+ SDEB_I_LAST_ELEM_P1 = 38, /* keep this last (previous + 1) */
};
static const unsigned char opcode_ind_arr[256] = {
/* 0x0; 0x0->0x1f: 6 byte cdbs */
SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
- 0, 0, 0, 0,
+ SDEB_I_FORMAT_MEDIUM, SDEB_I_READ_BLOCK_LIMITS, 0, 0,
SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
- 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
- SDEB_I_RELEASE,
+ SDEB_I_WRITE_FILEMARKS, SDEB_I_SPACE, SDEB_I_INQUIRY, 0, 0,
+ SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
SDEB_I_ALLOW_REMOVAL, 0,
/* 0x20; 0x20->0x3f: 10 byte cdbs */
0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
- SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
+ SDEB_I_READ, 0, SDEB_I_WRITE, SDEB_I_LOCATE, 0, 0, 0, SDEB_I_VERIFY,
0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
/* 0x40; 0x40->0x5f: 10 byte cdbs */
@@ -573,6 +609,12 @@ static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_read_blklimits(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_locate(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_filemarks(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_space(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_rewind(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_format_medium(struct scsi_cmnd *, struct sdebug_dev_info *);
static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
@@ -581,8 +623,6 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
-static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
-
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -773,7 +813,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
/* 20 */
{0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
{6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
+ {0, 0x1, 0, 0, resp_rewind, NULL,
{6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -800,6 +840,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_pre_fetch, pre_fetch_iarr,
{10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* PRE-FETCH (10) */
+ /* READ POSITION (10) */
/* 30 */
{ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
@@ -810,11 +851,23 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
{16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
-/* 31 */
+/* 32 */
{0, 0x0, 0x0, F_D_OUT | FF_MEDIA_IO,
resp_atomic_write, NULL, /* ATOMIC WRITE 16 */
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} },
+ {0, 0x05, 0, F_D_IN, resp_read_blklimits, NULL, /* READ BLOCK LIMITS (6) */
+ {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x2b, 0, F_D_UNKN, resp_locate, NULL, /* LOCATE (10) */
+ {10, 0x07, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xc7, 0, 0,
+ 0, 0, 0, 0} },
+ {0, 0x10, 0, F_D_IN, resp_write_filemarks, NULL, /* WRITE FILEMARKS (6) */
+ {6, 0x01, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x11, 0, F_D_IN, resp_space, NULL, /* SPACE (6) */
+ {6, 0x07, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4, 0, 0, resp_format_medium, NULL, /* FORMAT MEDIUM (6) */
+ {6, 0x3, 0x7, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+/* 38 */
/* sentinel */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -1331,6 +1384,30 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
my_name, key, asc, asq);
}
+/* Sense data that has information fields for tapes */
+static void mk_sense_info_tape(struct scsi_cmnd *scp, int key, int asc, int asq,
+ unsigned int information, unsigned char tape_flags)
+{
+ if (!scp->sense_buffer) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+
+ scsi_build_sense(scp, /* sdebug_dsense */ 0, key, asc, asq);
+ /* only fixed format so far */
+
+ scp->sense_buffer[0] |= 0x80; /* valid */
+ scp->sense_buffer[2] |= tape_flags;
+ put_unaligned_be32(information, &scp->sense_buffer[3]);
+
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
+}
+
static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
@@ -1493,6 +1570,12 @@ static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (sdebug_verbose)
cp = "reported luns data has changed";
break;
+ case SDEBUG_UA_NOT_READY_TO_READY:
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_READY_ASC,
+ 0);
+ if (sdebug_verbose)
+ cp = "not ready to ready transition/media change";
+ break;
default:
pr_warn("unexpected unit attention code=%d\n", k);
if (sdebug_verbose)
@@ -2196,6 +2279,14 @@ static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
changing = (stopped_state != want_stop);
if (changing)
atomic_xchg(&devip->stopped, want_stop);
+ if (sdebug_ptype == TYPE_TAPE && !want_stop) {
+ int i;
+
+ set_bit(SDEBUG_UA_NOT_READY_TO_READY, devip->uas_bm); /* not legal! */
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_partition = 0;
+ }
if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
return SDEG_RES_IMMED_MASK;
else
@@ -2728,6 +2819,76 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
+static unsigned char partition_pg[] = {0x11, 12, 1, 0, 0x24, 3, 9, 0,
+ 0xff, 0xff, 0x00, 0x00};
+
+static int resp_partition_m_pg(unsigned char *p, int pcontrol, int target)
+{ /* Partition page for mode_sense (tape) */
+ memcpy(p, partition_pg, sizeof(partition_pg));
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(partition_pg) - 2);
+ return sizeof(partition_pg);
+}
+
+static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
+ unsigned char *new, int pg_len)
+{
+ int new_nbr, p0_size, p1_size;
+
+ if ((new[4] & 0x80) != 0) { /* FDP */
+ partition_pg[4] |= 0x80;
+ devip->tape_pending_nbr_partitions = TAPE_MAX_PARTITIONS;
+ devip->tape_pending_part_0_size = TAPE_UNITS - TAPE_PARTITION_1_UNITS;
+ devip->tape_pending_part_1_size = TAPE_PARTITION_1_UNITS;
+ } else {
+ new_nbr = new[3] + 1;
+ if (new_nbr > TAPE_MAX_PARTITIONS)
+ return 3;
+ if ((new[4] & 0x40) != 0) { /* SDP */
+ p1_size = TAPE_PARTITION_1_UNITS;
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100)
+ return 4;
+ } else if ((new[4] & 0x20) != 0) {
+ if (new_nbr > 1) {
+ p0_size = get_unaligned_be16(new + 8);
+ p1_size = get_unaligned_be16(new + 10);
+ if (p1_size == 0xFFFF)
+ p1_size = TAPE_UNITS - p0_size;
+ else if (p0_size == 0xFFFF)
+ p0_size = TAPE_UNITS - p1_size;
+ if (p0_size < 100 || p1_size < 100)
+ return 8;
+ } else {
+ p0_size = TAPE_UNITS;
+ p1_size = 0;
+ }
+ } else
+ return 6;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ devip->tape_pending_part_0_size = p0_size;
+ devip->tape_pending_part_1_size = p1_size;
+ partition_pg[3] = new_nbr;
+ devip->tape_pending_nbr_partitions = new_nbr;
+ }
+
+ return 0;
+}
+
+static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
+ unsigned char dce)
+{ /* Compression page for mode_sense (tape) */
+ unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 00, 00};
+
+ memcpy(p, compression_pg, sizeof(compression_pg));
+ if (dce)
+ p[2] |= 0x80;
+ if (pcontrol == 1)
+ memset(p + 2, 0, sizeof(compression_pg) - 2);
+ return sizeof(compression_pg);
+}
+
/* PAGE_SIZE is more than necessary but provides room for future expansion. */
#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
@@ -2742,7 +2903,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
unsigned char *ap;
unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc, is_tape;
arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
if (!arr)
@@ -2755,7 +2916,8 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
is_disk = (sdebug_ptype == TYPE_DISK);
is_zbc = devip->zoned;
- if ((is_disk || is_zbc) && !dbd)
+ is_tape = (sdebug_ptype == TYPE_TAPE);
+ if ((is_disk || is_zbc || is_tape) && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
@@ -2793,15 +2955,25 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
put_unaligned_be32(0xffffffff, ap + 0);
else
put_unaligned_be32(sdebug_capacity, ap + 0);
- put_unaligned_be16(sdebug_sector_size, ap + 6);
+ if (is_tape) {
+ ap[0] = devip->tape_density;
+ put_unaligned_be16(devip->tape_blksize, ap + 6);
+ } else
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
+ if (is_tape) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, 4);
+ return check_condition_result;
+ }
put_unaligned_be64((u64)sdebug_capacity, ap + 0);
put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
+ if (cmd[2] == 0)
+ goto only_bd; /* Only block descriptor requested */
/*
* N.B. If len>0 before resp_*_pg() call, then form of that call should be:
@@ -2857,6 +3029,18 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
}
offset += len;
break;
+ case 0xf: /* Compression Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_compression_m_pg(ap, pcontrol, target, devip->tape_dce);
+ offset += len;
+ break;
+ case 0x11: /* Partition Mode Page (tape) */
+ if (!is_tape)
+ goto bad_pcode;
+ len = resp_partition_m_pg(ap, pcontrol, target);
+ offset += len;
+ break;
case 0x19: /* if spc==1 then sas phy, control+discover */
if (subpcode > 0x2 && subpcode < 0xff)
goto bad_subpcode;
@@ -2902,6 +3086,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
default:
goto bad_pcode;
}
+only_bd:
if (msense_6)
arr[0] = offset - 1;
else
@@ -2945,8 +3130,34 @@ static int resp_mode_select(struct scsi_cmnd *scp,
__func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
- off = bd_len + (mselect6 ? 4 : 8);
- if (md_len > 2 || off >= res) {
+ off = (mselect6 ? 4 : 8);
+ if (sdebug_ptype == TYPE_TAPE) {
+ int blksize;
+
+ if (bd_len != 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA,
+ mselect6 ? 3 : 6, -1);
+ return check_condition_result;
+ }
+ if (arr[off] == TAPE_BAD_DENSITY) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
+ return check_condition_result;
+ }
+ blksize = get_unaligned_be16(arr + off + 6);
+ if (blksize != 0 &&
+ (blksize < TAPE_MIN_BLKSIZE ||
+ blksize > TAPE_MAX_BLKSIZE ||
+ (blksize % 4) != 0)) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 1, -1);
+ return check_condition_result;
+ }
+ devip->tape_density = arr[off];
+ devip->tape_blksize = blksize;
+ }
+ off += bd_len;
+ if (off >= res)
+ return 0; /* No page written, just descriptors */
+ if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
}
@@ -2984,6 +3195,25 @@ static int resp_mode_select(struct scsi_cmnd *scp,
goto set_mode_changed_ua;
}
break;
+ case 0xf: /* Compression mode page */
+ if (sdebug_ptype != TYPE_TAPE)
+ goto bad_pcode;
+ if ((arr[off + 2] & 0x40) != 0) {
+ devip->tape_dce = (arr[off + 2] & 0x80) != 0;
+ return 0;
+ }
+ break;
+ case 0x11: /* Medium Partition Mode Page (tape) */
+ if (sdebug_ptype == TYPE_TAPE) {
+ int fld;
+
+ fld = process_medium_part_m_pg(devip, &arr[off], pg_len);
+ if (fld == 0)
+ return 0;
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, fld, -1);
+ return check_condition_result;
+ }
+ break;
case 0x1c: /* Informational Exceptions Mode page */
if (iec_m_pg[1] == arr[off + 1]) {
memcpy(iec_m_pg + 2, arr + off + 2,
@@ -2999,6 +3229,10 @@ static int resp_mode_select(struct scsi_cmnd *scp,
set_mode_changed_ua:
set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
return 0;
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
}
static int resp_temp_l_pg(unsigned char *arr)
@@ -3138,6 +3372,265 @@ static int resp_log_sense(struct scsi_cmnd *scp,
min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
}
+enum {SDEBUG_READ_BLOCK_LIMITS_ARR_SZ = 6};
+static int resp_read_blklimits(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char arr[SDEBUG_READ_BLOCK_LIMITS_ARR_SZ];
+
+ arr[0] = 4;
+ put_unaligned_be24(TAPE_MAX_BLKSIZE, arr + 1);
+ put_unaligned_be16(TAPE_MIN_BLKSIZE, arr + 4);
+ return fill_from_dev_buffer(scp, arr, SDEBUG_READ_BLOCK_LIMITS_ARR_SZ);
+}
+
+static int resp_locate(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, pos;
+ struct tape_block *blp;
+ int partition;
+
+ if ((cmd[1] & 0x02) != 0) {
+ if (cmd[8] >= devip->tape_nbr_partitions) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_partition = cmd[8];
+ }
+ pos = get_unaligned_be32(cmd + 3);
+ partition = devip->tape_partition;
+
+ for (i = 0, blp = devip->tape_blocks[partition];
+ i < pos && i < devip->tape_eop[partition]; i++, blp++)
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ break;
+ if (i < pos) {
+ devip->tape_location[partition] = i;
+ mk_sense_buffer(scp, BLANK_CHECK, 0x05, 0);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_write_filemarks(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd;
+ unsigned int i, count, pos;
+ u32 data;
+ int partition = devip->tape_partition;
+
+ if ((cmd[1] & 0xfe) != 0) { /* probably write setmarks, not in >= SCSI-3 */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+ count = get_unaligned_be24(cmd + 2);
+ data = TAPE_BLOCK_FM_FLAG;
+ for (i = 0, pos = devip->tape_location[partition]; i < count; i++, pos++) {
+ if (pos >= devip->tape_eop[partition] - 1) { /* don't overwrite EOD */
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, count, SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size = data;
+ }
+ (devip->tape_blocks[partition] + pos)->fl_size =
+ TAPE_BLOCK_EOD_FLAG;
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
+static int resp_space(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ unsigned char *cmd = scp->cmnd, code;
+ int i = 0, pos, count;
+ struct tape_block *blp;
+ int partition = devip->tape_partition;
+
+ count = get_unaligned_be24(cmd + 2);
+ if ((count & 0x800000) != 0) /* extend negative to 32-bit count */
+ count |= 0xff000000;
+ code = cmd[1] & 0x0f;
+
+ pos = devip->tape_location[partition];
+ if (code == 0) { /* blocks */
+ if (count < 0) {
+ count = (-count);
+ pos -= 1;
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++) {
+ if (pos < 0)
+ goto is_bop;
+ else if (IS_TAPE_BLOCK_FM(blp->fl_size))
+ goto is_fm;
+ if (i > 0) {
+ pos--;
+ blp--;
+ }
+ }
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos; i < count;
+ i++, pos++, blp++) {
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ pos += 1;
+ goto is_fm;
+ }
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 1) { /* filemarks */
+ if (count < 0) {
+ count = (-count);
+ if (pos == 0)
+ goto is_bop;
+ else {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count && pos >= 0; i++, pos--, blp--) {
+ for (pos--, blp-- ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ pos >= 0; pos--, blp--)
+ ; /* empty */
+ if (pos < 0)
+ goto is_bop;
+ }
+ }
+ pos += 1;
+ } else if (count > 0) {
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < count; i++, pos++, blp++) {
+ for ( ; !IS_TAPE_BLOCK_FM(blp->fl_size) &&
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) &&
+ pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size))
+ goto is_eod;
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ }
+ }
+ } else if (code == 3) { /* EOD */
+ for (blp = devip->tape_blocks[partition] + pos;
+ !IS_TAPE_BLOCK_EOD(blp->fl_size) && pos < devip->tape_eop[partition];
+ pos++, blp++)
+ ; /* empty */
+ if (pos >= devip->tape_eop[partition])
+ goto is_eop;
+ } else {
+ /* sequential filemarks not supported */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, -1);
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+ return 0;
+
+is_fm:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_FILEMARK);
+ return check_condition_result;
+
+is_eod:
+ devip->tape_location[partition] = pos;
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, count - i,
+ 0);
+ return check_condition_result;
+
+is_bop:
+ devip->tape_location[partition] = 0;
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ BEGINNING_OF_P_M_DETECTED_ASCQ, count - i,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = 0;
+ return check_condition_result;
+
+is_eop:
+ devip->tape_location[partition] = devip->tape_eop[partition] - 1;
+ mk_sense_info_tape(scp, MEDIUM_ERROR, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, (unsigned int)i,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+}
+
+static int resp_rewind(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ devip->tape_location[devip->tape_partition] = 0;
+
+ return 0;
+}
+
+static int partition_tape(struct sdebug_dev_info *devip, int nbr_partitions,
+ int part_0_size, int part_1_size)
+{
+ int i;
+
+ if (part_0_size + part_1_size > TAPE_UNITS)
+ return -1;
+ devip->tape_eop[0] = part_0_size;
+ devip->tape_blocks[0]->fl_size = TAPE_BLOCK_EOD_FLAG;
+ devip->tape_eop[1] = part_1_size;
+ devip->tape_blocks[1] = devip->tape_blocks[0] +
+ devip->tape_eop[0];
+ devip->tape_blocks[1]->fl_size = TAPE_BLOCK_EOD_FLAG;
+
+ for (i = 0 ; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+
+ devip->tape_nbr_partitions = nbr_partitions;
+ devip->tape_partition = 0;
+
+ partition_pg[3] = nbr_partitions - 1;
+ put_unaligned_be16(devip->tape_eop[0], partition_pg + 8);
+ put_unaligned_be16(devip->tape_eop[1], partition_pg + 10);
+
+ return nbr_partitions;
+}
+
+static int resp_format_medium(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ int res = 0;
+ unsigned char *cmd = scp->cmnd;
+
+ if (sdebug_ptype != TYPE_TAPE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 0, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] > 2) {
+ mk_sense_invalid_fld(scp, SDEB_IN_DATA, 2, -1);
+ return check_condition_result;
+ }
+ if (cmd[2] != 0) {
+ if (devip->tape_pending_nbr_partitions > 0) {
+ res = partition_tape(devip,
+ devip->tape_pending_nbr_partitions,
+ devip->tape_pending_part_0_size,
+ devip->tape_pending_part_1_size);
+ } else
+ res = partition_tape(devip, devip->tape_nbr_partitions,
+ devip->tape_eop[0], devip->tape_eop[1]);
+ } else
+ res = partition_tape(devip, 1, TAPE_UNITS, 0);
+ if (res < 0)
+ return -EINVAL;
+
+ devip->tape_pending_nbr_partitions = -1;
+
+ return 0;
+}
+
static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{
return devip->nr_zones != 0;
@@ -3871,6 +4364,98 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
return ret;
}
+static int resp_read_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ u32 pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, sili;
+
+ if (cmd[0] != READ_6) { /* Only Read(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ fixed = (cmd[1] & 0x1) != 0;
+ sili = (cmd[1] & 0x2) != 0;
+ if (fixed && sili) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
+ return check_condition_result;
+ }
+
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ for (i = 0, blp = devip->tape_blocks[partition] + pos;
+ i < num && pos < devip->tape_eop[partition];
+ i++, pos++, blp++) {
+ devip->tape_location[partition] = pos + 1;
+ if (IS_TAPE_BLOCK_FM(blp->fl_size)) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ FILEMARK_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_FILEMARK);
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ /* Assume no REW */
+ if (IS_TAPE_BLOCK_EOD(blp->fl_size)) {
+ mk_sense_info_tape(scp, BLANK_CHECK, NO_ADDITIONAL_SENSE,
+ EOD_DETECTED_ASCQ, fixed ? num - i : size,
+ 0);
+ devip->tape_location[partition] = pos;
+ scsi_set_resid(scp, (num - i) * size);
+ return check_condition_result;
+ }
+ sg_zero_buffer(sdb->table.sgl, sdb->table.nents,
+ size, i * size);
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, false);
+ if (fixed) {
+ if (blp->fl_size != devip->tape_blksize) {
+ scsi_set_resid(scp, (num - i) * size);
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, num - i,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ } else {
+ if (blp->fl_size != size) {
+ if (blp->fl_size < size)
+ scsi_set_resid(scp, size - blp->fl_size);
+ if (!sili) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ 0, size - blp->fl_size,
+ SENSE_FLAG_ILI);
+ return check_condition_result;
+ }
+ }
+ }
+ }
+ if (pos >= devip->tape_eop[partition]) {
+ mk_sense_info_tape(scp, NO_SENSE, NO_ADDITIONAL_SENSE,
+ EOP_EOM_DETECTED_ASCQ, fixed ? num - i : size,
+ SENSE_FLAG_EOM);
+ devip->tape_location[partition] = pos - 1;
+ return check_condition_result;
+ }
+ devip->tape_location[partition] = pos;
+
+ return 0;
+}
+
static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -3882,6 +4467,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_read_tape(scp, devip);
+
switch (cmd[0]) {
case READ_16:
ei_lba = 0;
@@ -4178,6 +4766,67 @@ static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
}
}
+static int resp_write_tape(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+{
+ u32 i, num, transfer, size, written = 0;
+ u8 *cmd = scp->cmnd;
+ struct scsi_data_buffer *sdb = &scp->sdb;
+ int partition = devip->tape_partition;
+ int pos = devip->tape_location[partition];
+ struct tape_block *blp;
+ bool fixed, ew;
+
+ if (cmd[0] != WRITE_6) { /* Only Write(6) supported */
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+
+ fixed = (cmd[1] & 1) != 0;
+ transfer = get_unaligned_be24(cmd + 2);
+ if (fixed) {
+ num = transfer;
+ size = devip->tape_blksize;
+ } else {
+ if (transfer < TAPE_MIN_BLKSIZE ||
+ transfer > TAPE_MAX_BLKSIZE) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+ num = 1;
+ size = transfer;
+ }
+
+ scsi_set_resid(scp, num * transfer);
+ for (i = 0, blp = devip->tape_blocks[partition] + pos, ew = false;
+ i < num && pos < devip->tape_eop[partition] - 1; i++, pos++, blp++) {
+ blp->fl_size = size;
+ sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
+ &(blp->data), 4, i * size, true);
+ written += size;
+ scsi_set_resid(scp, num * transfer - written);
+ ew |= (pos == devip->tape_eop[partition] - TAPE_EW);
+ }
+
+ devip->tape_location[partition] = pos;
+ blp->fl_size = TAPE_BLOCK_EOD_FLAG;
+ if (pos >= devip->tape_eop[partition] - 1) {
+ mk_sense_info_tape(scp, VOLUME_OVERFLOW,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+ if (ew) { /* early warning */
+ mk_sense_info_tape(scp, NO_SENSE,
+ NO_ADDITIONAL_SENSE, EOP_EOM_DETECTED_ASCQ,
+ fixed ? num - i : transfer,
+ SENSE_FLAG_EOM);
+ return check_condition_result;
+ }
+
+ return 0;
+}
+
static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
@@ -4190,6 +4839,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u8 *cmd = scp->cmnd;
bool meta_data_locked = false;
+ if (sdebug_ptype == TYPE_TAPE)
+ return resp_write_tape(scp, devip);
+
switch (cmd[0]) {
case WRITE_16:
ei_lba = 0;
@@ -4918,7 +5570,10 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
* a GOOD status otherwise. Model a disk with a big cache and yield
* CONDITION MET. Actually tries to bring range in main memory into the
* cache associated with the CPU(s).
+ *
+ * The pcode 0x34 is also used for READ POSITION by tape devices.
*/
+enum {SDEBUG_READ_POSITION_ARR_SZ = 20};
static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -4930,6 +5585,31 @@ static int resp_pre_fetch(struct scsi_cmnd *scp,
struct sdeb_store_info *sip = devip2sip(devip, true);
u8 *fsp = sip->storep;
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (cmd[0] == PRE_FETCH) { /* READ POSITION (10) */
+ int all_length;
+ unsigned char arr[20];
+ unsigned int pos;
+
+ all_length = get_unaligned_be16(cmd + 7);
+ if ((cmd[1] & 0xfe) != 0 ||
+ all_length != 0) { /* only short form */
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB,
+ all_length ? 7 : 1, 0);
+ return check_condition_result;
+ }
+ memset(arr, 0, SDEBUG_READ_POSITION_ARR_SZ);
+ arr[1] = devip->tape_partition;
+ pos = devip->tape_location[devip->tape_partition];
+ put_unaligned_be32(pos, arr + 4);
+ put_unaligned_be32(pos, arr + 8);
+ return fill_from_dev_buffer(scp, arr,
+ SDEBUG_READ_POSITION_ARR_SZ);
+ }
+ mk_sense_invalid_opcode(scp);
+ return check_condition_result;
+ }
+
if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
lba = get_unaligned_be32(cmd + 2);
nblks = get_unaligned_be16(cmd + 7);
@@ -5638,10 +6318,10 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ struct sdebug_scsi_cmd *sdsc = container_of(sd_dp,
+ typeof(*sdsc), sd_dp);
+ struct scsi_cmnd *scp = (struct scsi_cmnd *)sdsc - 1;
unsigned long flags;
- struct scsi_cmnd *scp = sqcp->scmd;
- struct sdebug_scsi_cmd *sdsc;
bool aborted;
if (sdebug_statistics) {
@@ -5652,27 +6332,23 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
if (!scp) {
pr_err("scmd=NULL\n");
- goto out;
+ return;
}
- sdsc = scsi_cmd_priv(scp);
spin_lock_irqsave(&sdsc->lock, flags);
aborted = sd_dp->aborted;
if (unlikely(aborted))
sd_dp->aborted = false;
- ASSIGN_QUEUED_CMD(scp, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (aborted) {
pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
blk_abort_request(scsi_cmd_to_rq(scp));
- goto out;
+ return;
}
scsi_done(scp); /* callback to mid level */
-out:
- sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5835,6 +6511,10 @@ static struct sdebug_dev_info *sdebug_device_create(
} else {
devip->zoned = false;
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ }
devip->create_ts = ktime_get_boottime();
atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
spin_lock_init(&devip->list_lock);
@@ -5905,6 +6585,21 @@ static int scsi_debug_sdev_configure(struct scsi_device *sdp,
if (devip == NULL)
return 1; /* no resources, will be marked offline */
}
+ if (sdebug_ptype == TYPE_TAPE) {
+ if (!devip->tape_blocks[0]) {
+ devip->tape_blocks[0] =
+ kcalloc(TAPE_UNITS, sizeof(struct tape_block),
+ GFP_KERNEL);
+ if (!devip->tape_blocks[0])
+ return 1;
+ }
+ devip->tape_pending_nbr_partitions = -1;
+ if (partition_tape(devip, 1, TAPE_UNITS, 0) < 0) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ return 1;
+ }
+ }
sdp->hostdata = devip;
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
@@ -5950,31 +6645,41 @@ static void scsi_debug_sdev_destroy(struct scsi_device *sdp)
debugfs_remove(devip->debugfs_entry);
+ if (sdebug_ptype == TYPE_TAPE) {
+ kfree(devip->tape_blocks[0]);
+ devip->tape_blocks[0] = NULL;
+ }
+
/* make this slot available for re-use */
devip->used = false;
sdp->hostdata = NULL;
}
-/* Returns true if we require the queued memory to be freed by the caller. */
-static bool stop_qc_helper(struct sdebug_defer *sd_dp,
- enum sdeb_defer_type defer_t)
+/* Returns true if cancelled or not running callback. */
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
+ enum sdeb_defer_type defer_t = READ_ONCE(sd_dp->defer_t);
+
+ lockdep_assert_held(&sdsc->lock);
+
if (defer_t == SDEB_DEFER_HRT) {
int res = hrtimer_try_to_cancel(&sd_dp->hrt);
switch (res) {
- case 0: /* Not active, it must have already run */
case -1: /* -1 It's executing the CB */
return false;
+ case 0: /* Not active, it must have already run */
case 1: /* Was active, we've now cancelled */
default:
return true;
}
} else if (defer_t == SDEB_DEFER_WQ) {
/* Cancel if pending */
- if (cancel_work_sync(&sd_dp->ew.work))
+ if (cancel_work(&sd_dp->ew.work))
return true;
- /* Was not pending, so it must have run */
+ /* callback may be running, so return false */
return false;
} else if (defer_t == SDEB_DEFER_POLL) {
return true;
@@ -5983,28 +6688,6 @@ static bool stop_qc_helper(struct sdebug_defer *sd_dp,
return false;
}
-
-static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
-{
- enum sdeb_defer_type l_defer_t;
- struct sdebug_defer *sd_dp;
- struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
- struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
-
- lockdep_assert_held(&sdsc->lock);
-
- if (!sqcp)
- return false;
- sd_dp = &sqcp->sd_dp;
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- ASSIGN_QUEUED_CMD(cmnd, NULL);
-
- if (stop_qc_helper(sd_dp, l_defer_t))
- sdebug_free_queued_cmd(sqcp);
-
- return true;
-}
-
/*
* Called from scsi_debug_abort() only, which is for timed-out cmd.
*/
@@ -6076,7 +6759,7 @@ static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok = scsi_debug_abort_cmnd(SCpnt);
+ bool aborted = scsi_debug_abort_cmnd(SCpnt);
u8 *cmd = SCpnt->cmnd;
u8 opcode = cmd[0];
@@ -6085,7 +6768,8 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: command%s found\n", __func__,
- ok ? "" : " not");
+ aborted ? "" : " not");
+
if (sdebug_fail_abort(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
@@ -6093,6 +6777,9 @@ static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
return FAILED;
}
+ if (aborted == false)
+ return FAILED;
+
return SUCCESS;
}
@@ -6144,6 +6831,22 @@ static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
return 0;
}
+static void scsi_tape_reset_clear(struct sdebug_dev_info *devip)
+{
+ if (sdebug_ptype == TYPE_TAPE) {
+ int i;
+
+ devip->tape_blksize = TAPE_DEF_BLKSIZE;
+ devip->tape_density = TAPE_DEF_DENSITY;
+ devip->tape_partition = 0;
+ devip->tape_dce = 0;
+ for (i = 0; i < TAPE_MAX_PARTITIONS; i++)
+ devip->tape_location[i] = 0;
+ devip->tape_pending_nbr_partitions = -1;
+ /* Don't reset partitioning? */
+ }
+}
+
static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{
struct scsi_device *sdp = SCpnt->device;
@@ -6157,8 +6860,10 @@ static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
scsi_debug_stop_all_queued(sdp);
- if (devip)
+ if (devip) {
set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
+ }
if (sdebug_fail_lun_reset(SCpnt)) {
scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
@@ -6196,6 +6901,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if (devip->target == sdp->id) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6227,6 +6933,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
@@ -6250,6 +6957,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ scsi_tape_reset_clear(devip);
++k;
}
}
@@ -6366,33 +7074,6 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
-
-void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
-{
- if (sqcp)
- kmem_cache_free(queued_cmd_cache, sqcp);
-}
-
-static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
-{
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_defer *sd_dp;
-
- sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
- if (!sqcp)
- return NULL;
-
- sd_dp = &sqcp->sd_dp;
-
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
-
- sqcp->scmd = scmd;
-
- return sqcp;
-}
-
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -6409,7 +7090,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -6441,12 +7121,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
}
- sqcp = sdebug_alloc_queued_cmd(cmnd);
- if (!sqcp) {
- pr_err("%s no alloc\n", __func__);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS))
ns_from_boot = ktime_get_boottime_ns();
@@ -6494,7 +7169,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (kt <= d) { /* elapsed duration >= kt */
/* call scsi_done() from this thread */
- sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -6507,13 +7181,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
/* schedule the invocation of scsi_done() for a later time */
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
/*
@@ -6537,13 +7209,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
spin_lock_irqsave(&sdsc->lock, flags);
- ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
spin_unlock_irqrestore(&sdsc->lock, flags);
@@ -6835,7 +7505,7 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
&data);
if (f >= 0) {
- seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
+ seq_printf(m, " BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
}
@@ -7910,12 +8580,6 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
- queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
- if (!queued_cmd_cache) {
- ret = -ENOMEM;
- goto driver_unreg;
- }
-
sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
if (IS_ERR_OR_NULL(sdebug_debugfs_root))
pr_info("%s: failed to create initial debugfs directory\n", __func__);
@@ -7942,8 +8606,6 @@ static int __init scsi_debug_init(void)
return 0;
-driver_unreg:
- driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
@@ -7959,7 +8621,6 @@ static void __exit scsi_debug_exit(void)
for (; k; k--)
sdebug_do_remove_host(true);
- kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -8343,7 +9004,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
struct sdebug_defer *sd_dp;
u32 unique_tag = blk_mq_unique_tag(rq);
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
- struct sdebug_queued_cmd *sqcp;
unsigned long flags;
int queue_num = data->queue_num;
ktime_t time;
@@ -8359,13 +9019,7 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
time = ktime_get_boottime();
spin_lock_irqsave(&sdsc->lock, flags);
- sqcp = TO_QUEUED_CMD(cmd);
- if (!sqcp) {
- spin_unlock_irqrestore(&sdsc->lock, flags);
- return true;
- }
-
- sd_dp = &sqcp->sd_dp;
+ sd_dp = &sdsc->sd_dp;
if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
@@ -8375,8 +9029,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
spin_unlock_irqrestore(&sdsc->lock, flags);
return true;
}
-
- ASSIGN_QUEUED_CMD(cmd, NULL);
spin_unlock_irqrestore(&sdsc->lock, flags);
if (sdebug_statistics) {
@@ -8385,8 +9037,6 @@ static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
atomic_inc(&sdebug_miss_cpus);
}
- sdebug_free_queued_cmd(sqcp);
-
scsi_done(cmd); /* callback to mid level */
(*data->num_entries)++;
return true;
@@ -8701,8 +9351,12 @@ err_out:
static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+ struct sdebug_defer *sd_dp = &sdsc->sd_dp;
spin_lock_init(&sdsc->lock);
+ hrtimer_setup(&sd_dp->hrt, sdebug_q_cmd_hrt_complete, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 815e7d63f3e2..376b8897ab90 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -547,6 +547,18 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
scsi_report_sense(sdev, &sshdr);
+ if (sshdr.sense_key == UNIT_ATTENTION) {
+ /*
+ * Increment the counters for Power on/Reset or New Media so
+ * that all ULDs interested in these can see that those have
+ * happened, even if someone else gets the sense data.
+ */
+ if (sshdr.asc == 0x28)
+ scmd->device->ua_new_media_ctr++;
+ else if (sshdr.asc == 0x29)
+ scmd->device->ua_por_ctr++;
+ }
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -711,6 +723,13 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
case COMPLETED:
+ /*
+ * A command using command duration limits (CDL) with a
+ * descriptor set with policy 0xD may be completed with success
+ * and the sense data DATA CURRENTLY UNAVAILABLE, indicating
+ * that the command was in fact aborted because it exceeded its
+ * duration limit. Never retry these commands.
+ */
if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) {
set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
req->cmd_flags |= REQ_FAILFAST_DEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f1cfe0bb89b2..0d29470e86b0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1149,7 +1149,7 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
- count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
+ count = __blk_rq_map_sg(rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
unsigned int pad_len =
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 96d7e1a9a7c7..4833b8fe251b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -151,8 +151,9 @@ int scsi_complete_async_scans(void)
struct async_scan_data *data;
do {
- if (list_empty(&scanning_hosts))
- return 0;
+ scoped_guard(spinlock, &async_scan_lock)
+ if (list_empty(&scanning_hosts))
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c
index be4aef0f4f99..055a03a83ad6 100644
--- a/drivers/scsi/scsi_sysctl.c
+++ b/drivers/scsi/scsi_sysctl.c
@@ -17,7 +17,9 @@ static const struct ctl_table scsi_table[] = {
.data = &scsi_logging_level,
.maxlen = sizeof(scsi_logging_level),
.mode = 0644,
- .proc_handler = proc_dointvec },
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_INT_MAX },
};
static struct ctl_table_header *scsi_table_header;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ebbd50ec0cda..74a6830b7ed8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -163,9 +163,11 @@ static const char *st_formats[] = {
static int debugging = DEBUG;
+/* Setting these non-zero may risk recognizing resets */
#define MAX_RETRIES 0
#define MAX_WRITE_RETRIES 0
#define MAX_READY_RETRIES 0
+
#define NO_TAPE NOT_READY
#define ST_TIMEOUT (900 * HZ)
@@ -357,10 +359,18 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
{
int result = SRpnt->result;
u8 scode;
+ unsigned int ctr;
DEB(const char *stp;)
char *name = STp->name;
struct st_cmdstatus *cmdstatp;
+ ctr = scsi_get_ua_por_ctr(STp->device);
+ if (ctr != STp->por_ctr) {
+ STp->por_ctr = ctr;
+ STp->pos_unknown = 1; /* ASC => power on / reset */
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
+
if (!result)
return 0;
@@ -413,10 +423,11 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
if (cmdstatp->have_sense &&
cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
- if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29)
+ if (cmdstatp->have_sense && scode == UNIT_ATTENTION &&
+ cmdstatp->sense_hdr.asc == 0x29 && !STp->pos_unknown) {
STp->pos_unknown = 1; /* ASC => power on / reset */
-
- STp->pos_unknown |= STp->device->was_reset;
+ st_printk(KERN_WARNING, STp, "Power on/reset recognized.");
+ }
if (cmdstatp->have_sense &&
scode == RECOVERED_ERROR
@@ -952,7 +963,6 @@ static void reset_state(struct scsi_tape *STp)
STp->partition = find_partition(STp);
if (STp->partition < 0)
STp->partition = 0;
- STp->new_partition = STp->partition;
}
}
@@ -969,6 +979,7 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
{
int attentions, waits, max_wait, scode;
int retval = CHKRES_READY, new_session = 0;
+ unsigned int ctr;
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt = NULL;
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1025,6 +1036,13 @@ static int test_ready(struct scsi_tape *STp, int do_wait)
}
}
+ ctr = scsi_get_ua_new_media_ctr(STp->device);
+ if (ctr != STp->new_media_ctr) {
+ STp->new_media_ctr = ctr;
+ new_session = 1;
+ DEBC_printk(STp, "New tape session.");
+ }
+
retval = (STp->buffer)->syscall_result;
if (!retval)
retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
@@ -2897,7 +2915,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->long_timeout * 8;
DEBC_printk(STp, "Erasing tape.\n");
- fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
case MTSETDENSITY: /* Set tape density */
@@ -2930,14 +2947,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
if (cmd_in == MTSETDENSITY) {
(STp->buffer)->b_data[4] = arg;
STp->density_changed = 1; /* At least we tried ;-) */
+ STp->changed_density = arg;
} else if (cmd_in == SET_DENS_AND_BLK)
(STp->buffer)->b_data[4] = arg >> 24;
else
(STp->buffer)->b_data[4] = STp->density;
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
ltmp = arg & MT_ST_BLKSIZE_MASK;
- if (cmd_in == MTSETBLK)
+ if (cmd_in == MTSETBLK) {
STp->blksize_changed = 1; /* At least we tried ;-) */
+ STp->changed_blksize = arg;
+ }
} else
ltmp = STp->block_size;
(STp->buffer)->b_data[9] = (ltmp >> 16);
@@ -3084,7 +3104,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd_in == MTSETDRVBUFFER ||
cmd_in == SET_DENS_AND_BLK) {
if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
- !(STp->use_pf & PF_TESTED)) {
+ cmdstatp->sense_hdr.asc == 0x24 &&
+ (STp->device)->scsi_level <= SCSI_2 &&
+ !(STp->use_pf & PF_TESTED)) {
/* Try the other possible state of Page Format if not
already tried */
STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
@@ -3636,9 +3658,23 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EIO);
goto out;
}
- reset_state(STp);
- /* remove this when the midlevel properly clears was_reset */
- STp->device->was_reset = 0;
+ reset_state(STp); /* Clears pos_unknown */
+
+ /* Fix the device settings after reset, ignore errors */
+ if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK ||
+ mtc.mt_op == MTEOM) {
+ if (STp->can_partitions) {
+ /* STp->new_partition contains the
+ * latest partition set
+ */
+ STp->partition = 0;
+ switch_partition(STp);
+ }
+ if (STp->density_changed)
+ st_int_ioctl(STp, MTSETDENSITY, STp->changed_density);
+ if (STp->blksize_changed)
+ st_int_ioctl(STp, MTSETBLK, STp->changed_blksize);
+ }
}
if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
@@ -4122,7 +4158,7 @@ static void validate_options(void)
*/
static int __init st_setup(char *str)
{
- int i, len, ints[5];
+ int i, len, ints[ARRAY_SIZE(parms) + 1];
char *stp;
stp = get_options(str, ARRAY_SIZE(ints), ints);
@@ -4384,6 +4420,9 @@ static int st_probe(struct device *dev)
goto out_idr_remove;
}
+ tpnt->new_media_ctr = scsi_get_ua_new_media_ctr(SDp);
+ tpnt->por_ctr = scsi_get_ua_por_ctr(SDp);
+
dev_set_drvdata(dev, tpnt);
@@ -4665,6 +4704,24 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(options);
+/**
+ * position_lost_in_reset_show - Value 1 indicates that reads, writes, etc.
+ * are blocked because a device reset has occurred and no operation positioning
+ * the tape has been issued.
+ * @dev: struct device
+ * @attr: attribute structure
+ * @buf: buffer to return formatted data in
+ */
+static ssize_t position_lost_in_reset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_modedef *STm = dev_get_drvdata(dev);
+ struct scsi_tape *STp = STm->tape;
+
+ return sprintf(buf, "%d", STp->pos_unknown);
+}
+static DEVICE_ATTR_RO(position_lost_in_reset);
+
/* Support for tape stats */
/**
@@ -4849,6 +4906,7 @@ static struct attribute *st_dev_attrs[] = {
&dev_attr_default_density.attr,
&dev_attr_default_compression.attr,
&dev_attr_options.attr,
+ &dev_attr_position_lost_in_reset.attr,
NULL,
};
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 1aaaf5369a40..0d7c4b8c2c8a 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -165,6 +165,7 @@ struct scsi_tape {
unsigned char compression_changed;
unsigned char drv_buffer;
unsigned char density;
+ unsigned char changed_density;
unsigned char door_locked;
unsigned char autorew_dev; /* auto-rewind device */
unsigned char rew_at_close; /* rewind necessary at close */
@@ -172,11 +173,16 @@ struct scsi_tape {
unsigned char cleaning_req; /* cleaning requested? */
unsigned char first_tur; /* first TEST UNIT READY */
int block_size;
+ int changed_blksize;
int min_block;
int max_block;
int recover_count; /* From tape opening */
int recover_reg; /* From last status call */
+ /* The saved values of midlevel counters */
+ unsigned int new_media_ctr;
+ unsigned int por_ctr;
+
#if DEBUG
unsigned char write_pending;
int nbr_finished;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index a8614e54544e..35db061ae3ec 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -776,7 +776,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0) {
- dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ dev_err(dev, "Failed to create sub-channel: op=%d, host=0x%x\n",
vstor_packet->operation, vstor_packet->status);
return;
}
@@ -1183,7 +1183,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR;
storvsc_log_ratelimited(device, loglevel,
- "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n",
+ "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x host 0x%x\n",
scsi_cmd_to_rq(request->cmd)->tag,
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index fd72d9088bdc..64ed7d64458a 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -26,6 +26,19 @@ static unsigned int sh_clk_read(struct clk *clk)
return ioread32(clk->mapped_reg);
}
+static unsigned int sh_clk_read_status(struct clk *clk)
+{
+ void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
+ (phys_addr_t)clk->enable_reg + clk->mapped_reg;
+
+ if (clk->flags & CLK_ENABLE_REG_8BIT)
+ return ioread8(mapped_status);
+ else if (clk->flags & CLK_ENABLE_REG_16BIT)
+ return ioread16(mapped_status);
+
+ return ioread32(mapped_status);
+}
+
static void sh_clk_write(int value, struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
@@ -40,20 +53,10 @@ static int sh_clk_mstp_enable(struct clk *clk)
{
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
if (clk->status_reg) {
- unsigned int (*read)(const void __iomem *addr);
int i;
- void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
- (phys_addr_t)clk->enable_reg + clk->mapped_reg;
-
- if (clk->flags & CLK_ENABLE_REG_8BIT)
- read = ioread8;
- else if (clk->flags & CLK_ENABLE_REG_16BIT)
- read = ioread16;
- else
- read = ioread32;
for (i = 1000;
- (read(mapped_status) & (1 << clk->enable_bit)) && i;
+ (sh_clk_read_status(clk) & (1 << clk->enable_bit)) && i;
i--)
cpu_relax();
if (!i) {
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index e7aa9bd4b44b..6f01d944f9c6 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
}
ret = ctrl->xfer_msg(ctrl, txn);
-
- if (!ret && need_tid && !txn->msg->comp) {
+ if (ret == -ETIMEDOUT) {
+ slim_free_txn_tid(ctrl, txn);
+ } else if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
time_left = wait_for_completion_timeout(txn->comp,
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
index 27c9fa745fd5..b8d5244678f0 100644
--- a/drivers/soc/apple/rtkit-internal.h
+++ b/drivers/soc/apple/rtkit-internal.h
@@ -44,6 +44,7 @@ struct apple_rtkit {
struct apple_rtkit_shmem ioreport_buffer;
struct apple_rtkit_shmem crashlog_buffer;
+ struct apple_rtkit_shmem oslog_buffer;
struct apple_rtkit_shmem syslog_buffer;
char *syslog_msg_buffer;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index e6d940292c9f..5fffd0f003dc 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -12,6 +12,7 @@ enum {
APPLE_RTKIT_PWR_STATE_IDLE = 0x201, /* sleeping, retain state */
APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+ APPLE_RTKIT_PWR_STATE_INIT = 0x220, /* init after starting the coproc */
};
enum {
@@ -66,8 +67,9 @@ enum {
#define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24)
#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
-#define APPLE_RTKIT_OSLOG_INIT 1
-#define APPLE_RTKIT_OSLOG_ACK 3
+#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1
+#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36)
+#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0)
#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
@@ -97,12 +99,19 @@ bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
}
EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
-static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+static int apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
u64 msg)
{
+ int ret;
+
msg &= ~APPLE_RTKIT_MGMT_TYPE;
msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+ ret = apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+
+ if (ret)
+ dev_err(rtk->dev, "RTKit: Failed to send management message: %d\n", ret);
+
+ return ret;
}
static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
@@ -251,15 +260,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
struct apple_rtkit_shmem *buffer,
u8 ep, u64 msg)
{
- size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
u64 reply;
int err;
+ /* The different size vs. IOVA shifts look odd but are indeed correct this way */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg);
+ buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12;
+ } else {
+ buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12;
+ buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+ }
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->is_mapped = false;
- buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
- buffer->size = n_4kpages << 12;
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
@@ -284,17 +299,30 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
}
if (!buffer->is_mapped) {
- reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
- APPLE_RTKIT_BUFFER_REQUEST);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
- reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
- buffer->iova);
+ /* oslog uses different fields and needs a shifted IOVA instead of size */
+ if (ep == APPLE_RTKIT_EP_OSLOG) {
+ reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE,
+ APPLE_RTKIT_OSLOG_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size);
+ reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA,
+ buffer->iova >> 12);
+ } else {
+ reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+ APPLE_RTKIT_BUFFER_REQUEST);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE,
+ buffer->size >> 12);
+ reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+ buffer->iova);
+ }
apple_rtkit_send_message(rtk, ep, reply, NULL, false);
}
return 0;
error:
+ dev_err(rtk->dev, "RTKit: failed buffer request for 0x%zx bytes (%d)\n",
+ buffer->size, err);
+
buffer->buffer = NULL;
buffer->iomem = NULL;
buffer->iova = 0;
@@ -360,7 +388,6 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
rtk->crashlog_buffer.size);
apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
- kfree(bfr);
} else {
dev_err(rtk->dev,
"RTKit: Couldn't allocate crashlog shadow buffer\n");
@@ -368,7 +395,9 @@ static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
rtk->crashed = true;
if (rtk->ops->crashed)
- rtk->ops->crashed(rtk->cookie);
+ rtk->ops->crashed(rtk->cookie, bfr, rtk->crashlog_buffer.size);
+
+ kfree(bfr);
}
static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
@@ -448,7 +477,7 @@ static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
log_context[sizeof(log_context) - 1] = 0;
- msglen = rtk->syslog_msg_size - 1;
+ msglen = strnlen(rtk->syslog_msg_buffer, rtk->syslog_msg_size - 1);
while (msglen > 0 &&
should_crop_syslog_char(rtk->syslog_msg_buffer[msglen - 1]))
msglen--;
@@ -482,25 +511,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
}
}
-static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
-{
- u64 ack;
-
- dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
- ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
- apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
-}
-
static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
{
u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
switch (type) {
- case APPLE_RTKIT_OSLOG_INIT:
- apple_rtkit_oslog_rx_init(rtk, msg);
+ case APPLE_RTKIT_OSLOG_BUFFER_REQUEST:
+ apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer,
+ APPLE_RTKIT_EP_OSLOG, msg);
break;
default:
- dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+ dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n",
+ msg);
}
}
@@ -588,11 +610,18 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
.msg1 = ep,
};
- if (rtk->crashed)
+ if (rtk->crashed) {
+ dev_warn(rtk->dev,
+ "RTKit: Device is crashed, cannot send message\n");
return -EINVAL;
+ }
+
if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
- !apple_rtkit_is_running(rtk))
+ !apple_rtkit_is_running(rtk)) {
+ dev_warn(rtk->dev,
+ "RTKit: Endpoint 0x%02x is not running, cannot send message\n", ep);
return -EINVAL;
+ }
/*
* The message will be sent with a MMIO write. We need the barrier
@@ -667,7 +696,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
rtk->mbox->rx = apple_rtkit_rx;
rtk->mbox->cookie = rtk;
- rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+ rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM,
dev_name(rtk->dev));
if (!rtk->wq) {
ret = -ENOMEM;
@@ -710,6 +739,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
@@ -742,8 +772,10 @@ static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->ap_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
if (ret)
@@ -763,8 +795,10 @@ static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
reinit_completion(&rtk->iop_pwr_ack_completion);
msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
if (ret)
@@ -865,6 +899,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
int apple_rtkit_wake(struct apple_rtkit *rtk)
{
u64 msg;
+ int ret;
if (apple_rtkit_is_running(rtk))
return -EINVAL;
@@ -875,9 +910,11 @@ int apple_rtkit_wake(struct apple_rtkit *rtk)
* Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
* will wait for the completion anyway.
*/
- msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
- apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
- msg);
+ msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_INIT);
+ ret = apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+ msg);
+ if (ret)
+ return ret;
return apple_rtkit_boot(rtk);
}
@@ -890,6 +927,7 @@ void apple_rtkit_free(struct apple_rtkit *rtk)
apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+ apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer);
apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
kfree(rtk->syslog_msg_buffer);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 298b542dd1c0..09347bccdb1d 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -246,6 +246,9 @@ static const struct at91_soc socs[] __initconst = {
"samv70q19", "samv7"),
#endif
#ifdef CONFIG_SOC_SAMA7
+ AT91_SOC(SAMA7D65_CIDR_MATCH, AT91_CIDR_MASK_SAMA7G5,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7D65_EXID_MATCH,
+ "sama7d65", "sama7d6"),
AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G51_EXID_MATCH,
"sama7g51", "sama7g5"),
@@ -305,6 +308,7 @@ static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid)
void __iomem *regs;
static const struct of_device_id chipids[] = {
{ .compatible = "atmel,sama5d2-chipid" },
+ { .compatible = "microchip,sama7d65-chipid" },
{ .compatible = "microchip,sama7g5-chipid" },
{ },
};
@@ -393,6 +397,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = {
{ .compatible = "atmel,at91sam9", },
{ .compatible = "atmel,sama5", },
{ .compatible = "atmel,samv7", },
+ { .compatible = "microchip,sama7d65", },
{ .compatible = "microchip,sama7g5", },
{ }
};
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 2c78e54255f7..66a74017d9a3 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -45,6 +45,7 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9N12_CIDR_MATCH 0x019a07a0
#define SAM9X60_CIDR_MATCH 0x019b35a0
#define SAM9X7_CIDR_MATCH 0x09750020
+#define SAMA7D65_CIDR_MATCH 0x00262100
#define SAMA7G5_CIDR_MATCH 0x00162100
#define AT91SAM9M11_EXID_MATCH 0x00000001
@@ -75,6 +76,8 @@ at91_soc_init(const struct at91_soc *socs);
#define SAM9X75_D5M_EXID_MATCH 0x00000010
#define SAM9X75_EXID_MATCH 0x00000000
+#define SAMA7D65_EXID_MATCH 0x00000080
+
#define SAMA7G51_EXID_MATCH 0x3
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 8aa8dec14911..444a8f59b7da 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1539,8 +1539,8 @@ static ssize_t used_types_show(struct kobject *kobj,
u16 i;
for (i = 0; i < hdev->used_type_num - 1; i++)
- len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name);
- len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name);
+ len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name);
return len;
}
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 8ac7658e3d52..3ed8161d7d28 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -192,9 +192,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \
"unknown"
+static void imx8m_unregister_soc(void *data)
+{
+ soc_device_unregister(data);
+}
+
+static void imx8m_unregister_cpufreq(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int imx8m_soc_probe(struct platform_device *pdev)
{
struct soc_device_attribute *soc_dev_attr;
+ struct platform_device *cpufreq_dev;
const struct imx8_soc_data *data;
struct device *dev = &pdev->dev;
const struct of_device_id *id;
@@ -239,11 +250,22 @@ static int imx8m_soc_probe(struct platform_device *pdev)
if (IS_ERR(soc_dev))
return PTR_ERR(soc_dev);
+ ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev);
+ if (ret)
+ return ret;
+
pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id,
soc_dev_attr->revision);
- if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
- platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) {
+ cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (IS_ERR(cpufreq_dev))
+ return dev_err_probe(dev, PTR_ERR(cpufreq_dev),
+ "Failed to register imx-cpufreq-dev device\n");
+ ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/mediatek/mt8167-mmsys.h b/drivers/soc/mediatek/mt8167-mmsys.h
index f7a35b3656bb..c468926561b4 100644
--- a/drivers/soc/mediatek/mt8167-mmsys.h
+++ b/drivers/soc/mediatek/mt8167-mmsys.h
@@ -14,22 +14,21 @@
#define MT8167_DSI0_SEL_IN_RDMA0 0x1
static const struct mtk_mmsys_routes mt8167_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_RDMA0,
- MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI0,
- MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0
- },
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
+ OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(DITHER0, RDMA0,
+ MT8167_DISP_REG_CONFIG_DISP_DITHER_MOUT_EN, MT8167_DITHER_MOUT_EN_RDMA0,
+ MT8167_DITHER_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8167_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
+ COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, MT8167_DSI0_SEL_IN_RDMA0,
+ MT8167_DSI0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA0, DSI0,
+ MT8167_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8167_RDMA0_SOUT_DSI0,
+ MT8167_RDMA0_SOUT_DSI0),
};
#endif /* __SOC_MEDIATEK_MT8167_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8173-mmsys.h b/drivers/soc/mediatek/mt8173-mmsys.h
index 9d24e381271e..957876d7c166 100644
--- a/drivers/soc/mediatek/mt8173-mmsys.h
+++ b/drivers/soc/mediatek/mt8173-mmsys.h
@@ -33,63 +33,48 @@
#define MT8173_RDMA0_SOUT_COLOR0 BIT(0)
static const struct mtk_mmsys_routes mt8173_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8173_OVL0_MOUT_EN_COLOR0, MT8173_OVL0_MOUT_EN_COLOR0
- }, {
- DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
- MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN,
- MT8173_OD0_MOUT_EN_RDMA0, MT8173_OD0_MOUT_EN_RDMA0
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN,
- MT8173_UFOE_MOUT_EN_DSI0, MT8173_UFOE_MOUT_EN_DSI0
- }, {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN,
- MT8173_COLOR0_SOUT_MERGE, 0 /* SOUT to AAL */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN,
- MT8173_RDMA0_SOUT_COLOR0, 0 /* SOUT to UFOE */
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8173_COLOR0_SEL_IN_OVL0, MT8173_COLOR0_SEL_IN_OVL0
- }, {
- DDP_COMPONENT_AAL0, DDP_COMPONENT_COLOR0,
- MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN,
- MT8173_AAL_SEL_IN_MERGE, 0 /* SEL_IN from COLOR0 */
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_UFOE,
- MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN,
- MT8173_UFOE_SEL_IN_RDMA0, 0 /* SEL_IN from RDMA0 */
- }, {
- DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0,
- MT8173_DISP_REG_CONFIG_DSI0_SEL_IN,
- MT8173_DSI0_SEL_IN_UFOE, 0, /* SEL_IN from UFOE */
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN,
- MT8173_OVL1_MOUT_EN_COLOR1, MT8173_OVL1_MOUT_EN_COLOR1
- }, {
- DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
- MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN,
- MT8173_GAMMA_MOUT_EN_RDMA1, MT8173_GAMMA_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN,
- RDMA1_SOUT_MASK, RDMA1_SOUT_DPI0
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
- MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN,
- COLOR1_SEL_IN_OVL1, COLOR1_SEL_IN_OVL1
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8173_DISP_REG_CONFIG_DPI_SEL_IN,
- MT8173_DPI0_SEL_IN_MASK, MT8173_DPI0_SEL_IN_RDMA1
- }
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, MT8173_OVL0_MOUT_EN_COLOR0,
+ MT8173_OVL0_MOUT_EN_COLOR0),
+ MMSYS_ROUTE(OD0, RDMA0,
+ MT8173_DISP_REG_CONFIG_DISP_OD_MOUT_EN, MT8173_OD0_MOUT_EN_RDMA0,
+ MT8173_OD0_MOUT_EN_RDMA0),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, MT8173_UFOE_MOUT_EN_DSI0,
+ MT8173_UFOE_MOUT_EN_DSI0),
+ MMSYS_ROUTE(COLOR0, AAL0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SOUT_SEL_IN, MT8173_COLOR0_SOUT_MERGE,
+ 0 /* SOUT to AAL */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL_IN, MT8173_RDMA0_SOUT_COLOR0,
+ 0 /* SOUT to UFOE */),
+ MMSYS_ROUTE(OVL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, MT8173_COLOR0_SEL_IN_OVL0,
+ MT8173_COLOR0_SEL_IN_OVL0),
+ MMSYS_ROUTE(AAL0, COLOR0,
+ MT8173_DISP_REG_CONFIG_DISP_AAL_SEL_IN, MT8173_AAL_SEL_IN_MERGE,
+ 0 /* SEL_IN from COLOR0 */),
+ MMSYS_ROUTE(RDMA0, UFOE,
+ MT8173_DISP_REG_CONFIG_DISP_UFOE_SEL_IN, MT8173_UFOE_SEL_IN_RDMA0,
+ 0 /* SEL_IN from RDMA0 */),
+ MMSYS_ROUTE(UFOE, DSI0,
+ MT8173_DISP_REG_CONFIG_DSI0_SEL_IN, MT8173_DSI0_SEL_IN_UFOE,
+ 0 /* SEL_IN from UFOE */),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, MT8173_OVL1_MOUT_EN_COLOR1,
+ MT8173_OVL1_MOUT_EN_COLOR1),
+ MMSYS_ROUTE(GAMMA, RDMA1,
+ MT8173_DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, MT8173_GAMMA_MOUT_EN_RDMA1,
+ MT8173_GAMMA_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
+ RDMA1_SOUT_DPI0),
+ MMSYS_ROUTE(OVL1, COLOR1,
+ MT8173_DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
+ COLOR1_SEL_IN_OVL1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8173_DISP_REG_CONFIG_DPI_SEL_IN, MT8173_DPI0_SEL_IN_MASK,
+ MT8173_DPI0_SEL_IN_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8173_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
index ff6be1703469..123384958c4b 100644
--- a/drivers/soc/mediatek/mt8183-mmsys.h
+++ b/drivers/soc/mediatek/mt8183-mmsys.h
@@ -28,35 +28,27 @@
#define MT8183_MMSYS_SW0_RST_B 0x140
static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
- MT8183_OVL0_MOUT_EN_OVL0_2L
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
- MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
- }, {
- DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
- MT8183_OVL1_2L_MOUT_EN_RDMA1
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
- MT8183_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
- MT8183_DISP_PATH0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
- MT8183_DPI0_SEL_IN_RDMA1
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
- MT8183_RDMA0_SOUT_COLOR0
- }
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
+ MT8183_OVL0_MOUT_EN_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
+ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0),
+ MMSYS_ROUTE(OVL_2L1, RDMA1,
+ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
+ MT8183_OVL1_2L_MOUT_EN_RDMA1),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
+ MT8183_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
+ MT8183_DISP_PATH0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
+ MT8183_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
+ MT8183_RDMA0_SOUT_COLOR0),
};
#endif /* __SOC_MEDIATEK_MT8183_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8186-mmsys.h b/drivers/soc/mediatek/mt8186-mmsys.h
index 279d4138525b..354664be72bd 100644
--- a/drivers/soc/mediatek/mt8186-mmsys.h
+++ b/drivers/soc/mediatek/mt8186-mmsys.h
@@ -63,61 +63,39 @@
#define MT8186_MMSYS_SW0_RST_B 0x160
static const struct mtk_mmsys_routes mmsys_mt8186_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
- MT8186_OVL0_MOUT_TO_RDMA0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
- MT8186_RDMA0_FROM_OVL0
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
- MT8186_OVL0_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
- MT8186_RDMA0_SOUT_TO_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
- MT8186_DITHER0_MOUT_TO_DSI0,
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
- MT8186_DSI0_FROM_DITHER0
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
- MT8186_OVL0_2L_MOUT_TO_RDMA1
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
- MT8186_RDMA1_FROM_OVL0_2L
- },
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA1,
- MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
- MT8186_OVL0_2L_GO_BLEND
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
- MT8186_RDMA1_MOUT_TO_DPI0_SEL
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
- MT8186_DPI0_FROM_RDMA1
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_OVL0_MOUT_EN, MT8186_OVL0_MOUT_EN_MASK,
+ MT8186_OVL0_MOUT_TO_RDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_DISP_RDMA0_SEL_IN, MT8186_RDMA0_SEL_IN_MASK,
+ MT8186_RDMA0_FROM_OVL0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_CON_MASK,
+ MT8186_OVL0_GO_BLEND),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8186_DISP_RDMA0_SOUT_SEL, MT8186_RDMA0_SOUT_SEL_MASK,
+ MT8186_RDMA0_SOUT_TO_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DITHER0_MOUT_EN, MT8186_DITHER0_MOUT_EN_MASK,
+ MT8186_DITHER0_MOUT_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8186_DISP_DSI0_SEL_IN, MT8186_DSI0_SEL_IN_MASK,
+ MT8186_DSI0_FROM_DITHER0),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_OVL0_2L_MOUT_EN, MT8186_OVL0_2L_MOUT_EN_MASK,
+ MT8186_OVL0_2L_MOUT_TO_RDMA1),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_DISP_RDMA1_SEL_IN, MT8186_RDMA1_SEL_IN_MASK,
+ MT8186_RDMA1_FROM_OVL0_2L),
+ MMSYS_ROUTE(OVL_2L0, RDMA1,
+ MT8186_MMSYS_OVL_CON, MT8186_MMSYS_OVL0_2L_CON_MASK,
+ MT8186_OVL0_2L_GO_BLEND),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_RDMA1_MOUT_EN, MT8186_RDMA1_MOUT_EN_MASK,
+ MT8186_RDMA1_MOUT_TO_DPI0_SEL),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8186_DISP_DPI0_SEL_IN, MT8186_DPI0_SEL_IN_MASK,
+ MT8186_DPI0_FROM_RDMA1),
};
#endif /* __SOC_MEDIATEK_MT8186_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h
index 6bebf1a69fc0..99080afead7e 100644
--- a/drivers/soc/mediatek/mt8188-mmsys.h
+++ b/drivers/soc/mediatek/mt8188-mmsys.h
@@ -202,158 +202,126 @@ static const u8 mmsys_mt8188_vdo1_rst_tb[] = {
};
static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
- MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DITHER0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_DSC_WARP_SEL,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
- MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
- MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
- MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8188_VDO0_DISP_RDMA_SEL, MT8188_SOUT_DISP_RDMA0_TO_MASK,
- MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_DISP_DITHER0_SEL_OUT,
- MT8188_SOUT_DISP_DITHER0_TO_MASK,
- MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
- MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8188_VDO0_OVL_MOUT_EN, MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8188_VDO0_DISP_RDMA_SEL, MT8188_SEL_IN_DISP_RDMA0_FROM_MASK,
+ MT8188_SEL_IN_DISP_RDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_MASK,
+ MT8188_SEL_IN_DSC_WRAP0C0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8188_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_DP_INTF0_SEL_IN, MT8188_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8188_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSI0_SEL_IN, MT8188_SEL_IN_DSI0_FROM_MASK,
+ MT8188_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8188_VDO0_DISP_RDMA_SEL, GENMASK(1, 0),
+ MT8188_SOUT_DISP_RDMA0_TO_DISP_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER0, MERGE0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_VPP_MERGE0),
+ MMSYS_ROUTE(DITHER0, DP_INTF0,
+ MT8188_VDO0_DISP_DITHER0_SEL_OUT, MT8188_SOUT_DISP_DITHER0_TO_MASK,
+ MT8188_SOUT_DISP_DITHER0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DISP_WDMA0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8188_VDO0_VPP_MERGE_SEL, MT8188_SOUT_VPP_MERGE_TO_MASK,
+ MT8188_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8188_VDO0_DSC_WARP_SEL, MT8188_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8188_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8188_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
- MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8188_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0),
+ MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
index a016d80b4bc1..7cafa2455fd0 100644
--- a/drivers/soc/mediatek/mt8192-mmsys.h
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -31,47 +31,36 @@
#define MT8192_DSI0_SEL_IN_DITHER0 0x1
static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
- {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
- MT8192_OVL0_MOUT_EN_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
- MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
- MT8192_OVL2_2L_MOUT_EN_RDMA4
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
- MT8192_DITHER0_MOUT_IN_DSI0
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
- MT8192_RDMA0_SEL_IN_OVL0_2L
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
- MT8192_AAL0_SEL_IN_CCORR0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
- MT8192_DSI0_SEL_IN_DITHER0
- }, {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
- MT8192_RDMA0_SOUT_COLOR0
- }, {
- DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
- MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
- MT8192_CCORR0_SOUT_AAL0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
- MT8192_DISP_OVL0_GO_BG
- }, {
- DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
- MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
- MT8192_DISP_OVL0_2L_GO_BLEND
- }
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0),
+ MMSYS_ROUTE(OVL_2L2, RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0,
+ MT8192_DSI0_SEL_IN_DITHER0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(CCORR, AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0),
+ MMSYS_ROUTE(OVL0, OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG),
+ MMSYS_ROUTE(OVL_2L0, RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND),
};
#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8195-mmsys.h b/drivers/soc/mediatek/mt8195-mmsys.h
index 9be2df2832a4..f69929a2a4d4 100644
--- a/drivers/soc/mediatek/mt8195-mmsys.h
+++ b/drivers/soc/mediatek/mt8195-mmsys.h
@@ -160,370 +160,278 @@
#define MT8195_SVPP3_MDP_RSZ BIT(5)
static const struct mtk_mmsys_routes mmsys_mt8195_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
- MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
- MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_RDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
- MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_OVL0,
- MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
- MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
- MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
- MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
- MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
- MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
- MT8195_SEL_IN_DSI1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_OVL1, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
- MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1
- }, {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_WDMA0,
- MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
- MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
- MT8195_SOUT_DISP_DITHER0_TO_DSI0
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_DITHER1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
- MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
- MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSI1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DP_INTF0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_WDMA1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN
- }, {
- DDP_COMPONENT_MERGE0, DDP_COMPONENT_DSC1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
- MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DSI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC0, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DSI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_DPI1,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0
- }, {
- DDP_COMPONENT_DSC1, DDP_COMPONENT_MERGE0,
- MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
- MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE
- }
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_RDMA0),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_WDMA0),
+ MMSYS_ROUTE(OVL0, OVL1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1,
+ MT8195_MOUT_DISP_OVL0_TO_DISP_OVL1),
+ MMSYS_ROUTE(OVL1, RDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_RDMA1),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_WDMA1),
+ MMSYS_ROUTE(OVL1, OVL0,
+ MT8195_VDO0_OVL_MOUT_EN, MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0,
+ MT8195_MOUT_DISP_OVL1_TO_DISP_OVL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_VPP_MERGE_FROM_MASK,
+ MT8195_SEL_IN_VPP_MERGE_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP0_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP0_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_IN_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_IN_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINA_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINA_VIRTUAL0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_SINB_VIRTUAL0_FROM_MASK,
+ MT8195_SEL_IN_SINB_VIRTUAL0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DP_INTF0_FROM_MASK,
+ MT8195_SEL_IN_DP_INTF0_FROM_VDO1_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DSC_WRAP0_OUT),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI0_FROM_MASK,
+ MT8195_SEL_IN_DSI0_FROM_DISP_DITHER0),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSI1_FROM_MASK,
+ MT8195_SEL_IN_DSI1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(OVL1, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_DISP_OVL1),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA1_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA1_FROM_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DSC_WRAP1_FROM_MASK,
+ MT8195_SEL_IN_DSC_WRAP1_OUT_FROM_DISP_DITHER1),
+ MMSYS_ROUTE(OVL0, WDMA0,
+ MT8195_VDO0_SEL_IN, MT8195_SEL_IN_DISP_WDMA0_FROM_MASK,
+ MT8195_SEL_IN_DISP_WDMA0_FROM_DISP_OVL0),
+ MMSYS_ROUTE(DITHER0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER0_TO_MASK,
+ MT8195_SOUT_DISP_DITHER0_TO_DSI0),
+ MMSYS_ROUTE(DITHER1, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DITHER1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_VPP_MERGE),
+ MMSYS_ROUTE(DITHER1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(DITHER1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DISP_DITHER1_TO_MASK,
+ MT8195_SOUT_DISP_DITHER1_TO_DSC_WRAP1_OUT),
+ MMSYS_ROUTE(MERGE5, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_VPP_MERGE),
+ MMSYS_ROUTE(MERGE5, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VDO1_VIRTUAL0_TO_MASK,
+ MT8195_SOUT_VDO1_VIRTUAL0_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSI1),
+ MMSYS_ROUTE(MERGE0, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DP_INTF0),
+ MMSYS_ROUTE(MERGE0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(MERGE0, WDMA1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DISP_WDMA1),
+ MMSYS_ROUTE(MERGE0, DSC0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP0_IN),
+ MMSYS_ROUTE(MERGE0, DSC1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN_MASK,
+ MT8195_SOUT_VPP_MERGE_TO_DSC_WRAP1_IN),
+ MMSYS_ROUTE(DSC0, DSI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_DSI0),
+ MMSYS_ROUTE(DSC0, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_SINB_VIRTUAL0),
+ MMSYS_ROUTE(DSC0, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP0_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE),
+ MMSYS_ROUTE(DSC1, DSI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DSI1),
+ MMSYS_ROUTE(DSC1, DP_INTF0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_DP_INTF0),
+ MMSYS_ROUTE(DSC1, DP_INTF1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, DPI1,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_SINA_VIRTUAL0),
+ MMSYS_ROUTE(DSC1, MERGE0,
+ MT8195_VDO0_SEL_OUT, MT8195_SOUT_DSC_WRAP1_OUT_TO_MASK,
+ MT8195_SOUT_DSC_WRAP1_OUT_TO_VPP_MERGE),
};
static const struct mtk_mmsys_routes mmsys_mt8195_vdo1_routing_table[] = {
- {
- DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0
- }, {
- DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1,
- MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1
- }, {
- DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2,
- MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
- MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN1_SEL
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN2_SEL
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN3_SEL
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
- MT8195_SOUT_TO_MIXER_IN4_SEL
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
- MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL
- }, {
- DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT
- }, {
- DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER,
- MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
- MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
- MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER
- }, {
- DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5,
- MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
- MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DPI1_SEL
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
- MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT
- }, {
- DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1,
- MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
- MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL
- }
+ MMSYS_ROUTE(MDP_RDMA0, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0),
+ MMSYS_ROUTE(MDP_RDMA1, MERGE1,
+ MT8195_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1),
+ MMSYS_ROUTE(MDP_RDMA2, MERGE2,
+ MT8195_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0),
+ MT8195_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN1_SEL),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN2_SEL),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN3_SEL),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0),
+ MT8195_SOUT_TO_MIXER_IN4_SEL),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0),
+ MT8195_MIXER_SOUT_TO_MERGE4_ASYNC_SEL),
+ MMSYS_ROUTE(MERGE1, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE2, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE3, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT),
+ MMSYS_ROUTE(MERGE4, ETHDR_MIXER,
+ MT8195_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0),
+ MT8195_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0),
+ MT8195_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER),
+ MMSYS_ROUTE(ETHDR_MIXER, MERGE5,
+ MT8195_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0),
+ MT8195_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DPI1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DPI1_SEL),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0),
+ MT8195_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT),
+ MMSYS_ROUTE(MERGE5, DP_INTF1,
+ MT8195_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0),
+ MT8195_MERGE4_SOUT_TO_DP_INTF0_SEL),
};
#endif /* __SOC_MEDIATEK_MT8195_MMSYS_H */
diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h
index 7abaf048d91e..533a3fd0923b 100644
--- a/drivers/soc/mediatek/mt8365-mmsys.h
+++ b/drivers/soc/mediatek/mt8365-mmsys.h
@@ -14,8 +14,9 @@
#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8
#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc
+#define MT8365_DISP_MS_IN_OUT_MASK GENMASK(3, 0)
#define MT8365_RDMA0_SOUT_COLOR0 0x1
-#define MT8365_DITHER_MOUT_EN_DSI0 0x1
+#define MT8365_DITHER_MOUT_EN_DSI0 BIT(0)
#define MT8365_DSI0_SEL_IN_DITHER 0x1
#define MT8365_RDMA0_SEL_IN_OVL0 0x0
#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0
@@ -27,56 +28,37 @@
#define MT8365_DPI0_SEL_IN_RDMA1 0x0
static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = {
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
- MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL
- },
- {
- DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
- MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
- MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0
- },
- {
- DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR,
- MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
- MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
- MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0
- },
- {
- DDP_COMPONENT_DITHER0, DDP_COMPONENT_DSI0,
- MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
- MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER
- },
- {
- DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
- MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
- MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
- MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1
- },
- {
- DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
- MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
- MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0
- },
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_OVL0_MOUT_PATH0_SEL),
+ MMSYS_ROUTE(OVL0, RDMA0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SEL_IN_OVL0),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_SOUT_COLOR0),
+ MMSYS_ROUTE(COLOR0, CCORR,
+ MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DISP_COLOR_SEL_IN_COLOR0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DITHER_MOUT_EN_DSI0),
+ MMSYS_ROUTE(DITHER0, DSI0,
+ MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DSI0_SEL_IN_DITHER),
+ MMSYS_ROUTE(RDMA0, COLOR0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK,
+ MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_DPI0_SEL_IN_RDMA1),
+ MMSYS_ROUTE(RDMA1, DPI0,
+ MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL,
+ MT8365_DISP_MS_IN_OUT_MASK, MT8365_RDMA1_SOUT_DPI0),
};
#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index d370192737ca..fe628d5f5198 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -80,6 +80,20 @@
#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit))
+/*
+ * This macro adds a compile time check to make sure that the in/out
+ * selection bit(s) fit in the register mask, similar to bitfield
+ * macros, but this does not transform the value.
+ */
+#define MMSYS_ROUTE(from, to, reg_addr, reg_mask, selection) \
+ { DDP_COMPONENT_##from, DDP_COMPONENT_##to, reg_addr, reg_mask, \
+ (__BUILD_BUG_ON_ZERO_MSG((reg_mask) == 0, "Invalid mask") + \
+ __BUILD_BUG_ON_ZERO_MSG(~(reg_mask) & (selection), \
+ #selection " does not fit in " \
+ #reg_mask) + \
+ (selection)) \
+ }
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 5250c1d702eb..aaa965d4b050 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -155,6 +155,7 @@
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23
#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24
#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30
+#define MT8188_MUTEX_MOD_DISP1_DPI1 38
#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39
#define MT8195_MUTEX_MOD_DISP_OVL0 0
@@ -289,6 +290,7 @@
#define MT8188_MUTEX_SOF_DSI0 1
#define MT8188_MUTEX_SOF_DP_INTF0 3
#define MT8188_MUTEX_SOF_DP_INTF1 4
+#define MT8188_MUTEX_SOF_DPI1 5
#define MT8195_MUTEX_SOF_DSI0 1
#define MT8195_MUTEX_SOF_DSI1 2
#define MT8195_MUTEX_SOF_DP_INTF0 3
@@ -301,6 +303,7 @@
#define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7)
#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7)
+#define MT8188_MUTEX_EOF_DPI1 (MT8188_MUTEX_SOF_DPI1 << 7)
#define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7)
#define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7)
#define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7)
@@ -472,6 +475,7 @@ static const u8 mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0,
[DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0,
[DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1,
+ [DDP_COMPONENT_DPI1] = MT8188_MUTEX_MOD_DISP1_DPI1,
[DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER,
[DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0,
[DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1,
@@ -686,6 +690,8 @@ static const u16 mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] =
MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0,
+ [MUTEX_SOF_DPI1] =
+ MT8188_MUTEX_SOF_DPI1 | MT8188_MUTEX_EOF_DPI1,
[MUTEX_SOF_DP_INTF0] =
MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0,
[MUTEX_SOF_DP_INTF1] =
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
index 123b12cd2543..c697a0398d91 100644
--- a/drivers/soc/mediatek/mtk-socinfo.c
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -56,29 +56,39 @@ static struct socinfo_data socinfo_data_table[] = {
MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8370", "MT8370AV/AZA", "Genio 510", 0x83700000, 0x00000081),
+ MTK_SOCINFO_ENTRY("MT8390", "MT8390AV/AZA", "Genio 700", 0x83900000, 0x00000080),
MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950100, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8395", "MT8395AV/ZA", "Genio 1200", 0x83950800, CELL_NOT_USED),
};
static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
{
struct soc_device_attribute *attrs;
- static char machine[30] = {0};
+ struct socinfo_data *data = mtk_socinfop->socinfo_data;
static const char *soc_manufacturer = "MediaTek";
attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
if (!attrs)
return -ENOMEM;
- snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
- mtk_socinfop->socinfo_data->soc_name);
- attrs->family = soc_manufacturer;
- attrs->machine = machine;
+ if (data->marketing_name != NULL && data->marketing_name[0] != '\0')
+ attrs->family = devm_kasprintf(mtk_socinfop->dev, GFP_KERNEL, "MediaTek %s",
+ data->marketing_name);
+ else
+ attrs->family = soc_manufacturer;
+
+ attrs->soc_id = data->soc_name;
+ /*
+ * The "machine" field will be populated automatically with the model
+ * name from board DTS (if available).
+ **/
mtk_socinfop->soc_dev = soc_device_register(attrs);
if (IS_ERR(mtk_socinfop->soc_dev))
return PTR_ERR(mtk_socinfop->soc_dev);
- dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
+ dev_info(mtk_socinfop->dev, "%s (%s) SoC detected.\n", attrs->family, attrs->soc_id);
return 0;
}
diff --git a/drivers/soc/qcom/ice.c b/drivers/soc/qcom/ice.c
index 393d2d1d275f..2310afa77b76 100644
--- a/drivers/soc/qcom/ice.c
+++ b/drivers/soc/qcom/ice.c
@@ -11,6 +11,7 @@
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -261,7 +262,7 @@ static struct qcom_ice *qcom_ice_create(struct device *dev,
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
* consumer or ERR_PTR() on error.
*/
-struct qcom_ice *of_qcom_ice_get(struct device *dev)
+static struct qcom_ice *of_qcom_ice_get(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct qcom_ice *ice;
@@ -322,7 +323,53 @@ struct qcom_ice *of_qcom_ice_get(struct device *dev)
return ice;
}
-EXPORT_SYMBOL_GPL(of_qcom_ice_get);
+
+static void qcom_ice_put(const struct qcom_ice *ice)
+{
+ struct platform_device *pdev = to_platform_device(ice->dev);
+
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
+ platform_device_put(pdev);
+}
+
+static void devm_of_qcom_ice_put(struct device *dev, void *res)
+{
+ qcom_ice_put(*(struct qcom_ice **)res);
+}
+
+/**
+ * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
+ * a DT node.
+ * @dev: device pointer for the consumer device.
+ *
+ * This function will provide an ICE instance either by creating one for the
+ * consumer device if its DT node provides the 'ice' reg range and the 'ice'
+ * clock (for legacy DT style). On the other hand, if consumer provides a
+ * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
+ * be created and so this function will return that instead.
+ *
+ * Return: ICE pointer on success, NULL if there is no ICE data provided by the
+ * consumer or ERR_PTR() on error.
+ */
+struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
+{
+ struct qcom_ice *ice, **dr;
+
+ dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ ice = of_qcom_ice_get(dev);
+ if (!IS_ERR_OR_NULL(ice)) {
+ *dr = ice;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return ice;
+}
+EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
static int qcom_ice_probe(struct platform_device *pdev)
{
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 328b6153b2be..71be378d2e43 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
{
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
locator_hdl);
- struct pdr_service *pds;
mutex_lock(&pdr->lock);
/* Create a local client port for QMI communication */
@@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->lock);
/* Service pending lookup requests */
- mutex_lock(&pdr->list_lock);
- list_for_each_entry(pds, &pdr->lookups, node) {
- if (pds->need_locator_lookup)
- schedule_work(&pdr->locator_work);
- }
- mutex_unlock(&pdr->list_lock);
+ schedule_work(&pdr->locator_work);
return 0;
}
diff --git a/drivers/soc/qcom/pdr_internal.h b/drivers/soc/qcom/pdr_internal.h
index 8d17f7fb79e7..039508c1bbf7 100644
--- a/drivers/soc/qcom/pdr_internal.h
+++ b/drivers/soc/qcom/pdr_internal.h
@@ -91,7 +91,6 @@ struct servreg_loc_pfr_resp {
struct qmi_response_type_v01 rsp;
};
-extern const struct qmi_elem_info servreg_location_entry_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_req_ei[];
extern const struct qmi_elem_info servreg_get_domain_list_resp_ei[];
extern const struct qmi_elem_info servreg_register_listener_req_ei[];
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index 052c292eeda6..cde19cdfd3c7 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -233,7 +233,7 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv)
static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev)
{
- struct pmic_glink *pg = __pmic_glink;
+ struct pmic_glink *pg;
guard(mutex)(&__pmic_glink_lock);
pg = __pmic_glink;
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 0320ad3b9148..a543ab9bee6c 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/soc/qcom/qcom_aoss.h>
#define CREATE_TRACE_POINTS
@@ -358,7 +359,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
- qmp_cdev->name, cdev_state ? "on" : "off");
+ qmp_cdev->name, str_on_off(cdev_state));
if (!ret)
qmp_cdev->state = cdev_state;
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 154ca5beb471..1d1c438be3e7 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -429,6 +429,16 @@ static const struct qcom_pdm_domain_data *sc8280xp_domains[] = {
NULL,
};
+/* Unlike SDM660, SDM630/636 lack CDSP */
+static const struct qcom_pdm_domain_data *sdm630_domains[] = {
+ &adsp_audio_pd,
+ &adsp_root_pd,
+ &adsp_sensor_pd,
+ &mpss_root_pd,
+ &mpss_wlan_pd,
+ NULL,
+};
+
static const struct qcom_pdm_domain_data *sdm660_domains[] = {
&adsp_audio_pd,
&adsp_root_pd,
@@ -546,6 +556,8 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = sc7280_domains, },
{ .compatible = "qcom,sc8180x", .data = sc8180x_domains, },
{ .compatible = "qcom,sc8280xp", .data = sc8280xp_domains, },
+ { .compatible = "qcom,sdm630", .data = sdm630_domains, },
+ { .compatible = "qcom,sdm636", .data = sdm630_domains, },
{ .compatible = "qcom,sda660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm660", .data = sdm660_domains, },
{ .compatible = "qcom,sdm670", .data = sdm670_domains, },
diff --git a/drivers/soc/qcom/qcom_pdr_msg.c b/drivers/soc/qcom/qcom_pdr_msg.c
index bf3e4a47165e..ca98932140d8 100644
--- a/drivers/soc/qcom/qcom_pdr_msg.c
+++ b/drivers/soc/qcom/qcom_pdr_msg.c
@@ -8,7 +8,7 @@
#include "pdr_internal.h"
-const struct qmi_elem_info servreg_location_entry_ei[] = {
+static const struct qmi_elem_info servreg_location_entry_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = SERVREG_NAME_LENGTH + 1,
@@ -47,7 +47,6 @@ const struct qmi_elem_info servreg_location_entry_ei[] = {
},
{}
};
-EXPORT_SYMBOL_GPL(servreg_location_entry_ei);
const struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
{
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 6d2e135eed89..49648cf28bd2 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,6 +334,7 @@ config ARCH_R9A07G054
config ARCH_R9A08G045
bool "ARM64 Platform support for RZ/G3S"
select ARCH_RZG2L
+ select SYSC_R9A08G045
help
This enables support for the Renesas RZ/G3S SoC variants.
@@ -347,12 +348,14 @@ config ARCH_R9A09G011
config ARCH_R9A09G047
bool "ARM64 Platform support for RZ/G3E"
+ select SYS_R9A09G047
help
This enables support for the Renesas RZ/G3E SoC variants.
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
+ select SYS_R9A09G057
help
This enables support for the Renesas RZ/V2H(P) SoC variants.
@@ -383,4 +386,19 @@ config PWC_RZV2M
config RST_RCAR
bool "Reset Controller support for R-Car" if COMPILE_TEST
+config SYSC_RZ
+ bool "System controller for RZ SoCs" if COMPILE_TEST
+
+config SYSC_R9A08G045
+ bool "Renesas RZ/G3S System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G047
+ bool "Renesas RZ/G3E System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
+config SYS_R9A09G057
+ bool "Renesas RZ/V2H System controller support" if COMPILE_TEST
+ select SYSC_RZ
+
endif # SOC_RENESAS
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 734f8f8cefa4..81d4c5726e4c 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -6,7 +6,11 @@ obj-$(CONFIG_SOC_RENESAS) += renesas-soc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
+obj-$(CONFIG_SYSC_R9A08G045) += r9a08g045-sysc.o
+obj-$(CONFIG_SYS_R9A09G047) += r9a09g047-sys.o
+obj-$(CONFIG_SYS_R9A09G057) += r9a09g057-sys.o
# Family
obj-$(CONFIG_PWC_RZV2M) += pwc-rzv2m.o
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
+obj-$(CONFIG_SYSC_RZ) += rz-sysc.o
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
new file mode 100644
index 000000000000..f4db1431e036
--- /dev/null
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3S System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/init.h>
+
+#include "rz-sysc.h"
+
+static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initconst = {
+ .family = "RZ/G3S",
+ .id = 0x85e0447,
+ .devid_offset = 0xa04,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+};
+
+const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
+ .soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
new file mode 100644
index 000000000000..cd2eb7782cfe
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/G3E System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_CA55_DIS BIT(8)
+#define SYS_LSI_PRR_NPU_DIS BIT(1)
+
+static void rzg3e_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool is_quad_core, npu_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check CPU and NPU configuration */
+ is_quad_core = !(prr_val & SYS_LSI_PRR_CA55_DIS);
+ npu_enabled = !(prr_val & SYS_LSI_PRR_NPU_DIS);
+
+ dev_info(dev, "Detected Renesas %s Core %s %s Rev %s%s\n",
+ is_quad_core ? "Quad" : "Dual", soc_dev_attr->family,
+ soc_dev_attr->soc_id, soc_dev_attr->revision,
+ npu_enabled ? " with Ethos-U55" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initconst = {
+ .family = "RZ/G3E",
+ .id = 0x8679447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzg3e_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzg3e_sys_init_data = {
+ .soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
new file mode 100644
index 000000000000..4c21cc29edbc
--- /dev/null
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ/V2H System controller (SYS) driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include "rz-sysc.h"
+
+/* Register Offsets */
+#define SYS_LSI_MODE 0x300
+/*
+ * BOOTPLLCA[1:0]
+ * [0,0] => 1.1GHZ
+ * [0,1] => 1.5GHZ
+ * [1,0] => 1.6GHZ
+ * [1,1] => 1.7GHZ
+ */
+#define SYS_LSI_MODE_STAT_BOOTPLLCA55 GENMASK(12, 11)
+#define SYS_LSI_MODE_CA55_1_7GHZ 0x3
+
+#define SYS_LSI_PRR 0x308
+#define SYS_LSI_PRR_GPU_DIS BIT(0)
+#define SYS_LSI_PRR_ISP_DIS BIT(4)
+
+static void rzv2h_sys_print_id(struct device *dev,
+ void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr)
+{
+ bool gpu_enabled, isp_enabled;
+ u32 prr_val, mode_val;
+
+ prr_val = readl(sysc_base + SYS_LSI_PRR);
+ mode_val = readl(sysc_base + SYS_LSI_MODE);
+
+ /* Check GPU and ISP configuration */
+ gpu_enabled = !(prr_val & SYS_LSI_PRR_GPU_DIS);
+ isp_enabled = !(prr_val & SYS_LSI_PRR_ISP_DIS);
+
+ dev_info(dev, "Detected Renesas %s %s Rev %s%s%s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision,
+ gpu_enabled ? " with GE3D (Mali-G31)" : "",
+ isp_enabled ? " with ISP (Mali-C55)" : "");
+
+ /* Check CA55 PLL configuration */
+ if (FIELD_GET(SYS_LSI_MODE_STAT_BOOTPLLCA55, mode_val) != SYS_LSI_MODE_CA55_1_7GHZ)
+ dev_warn(dev, "CA55 PLL is not set to 1.7GHz\n");
+}
+
+static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initconst = {
+ .family = "RZ/V2H",
+ .id = 0x847a447,
+ .devid_offset = 0x304,
+ .revision_mask = GENMASK(31, 28),
+ .specific_id_mask = GENMASK(27, 0),
+ .print_id = rzv2h_sys_print_id,
+};
+
+const struct rz_sysc_init_data rzv2h_sys_init_data = {
+ .soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 172d59e6fbcf..df2b38417b80 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -71,14 +71,6 @@ static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
.name = "RZ/G2UL",
};
-static const struct renesas_family fam_rzg3s __initconst __maybe_unused = {
- .name = "RZ/G3S",
-};
-
-static const struct renesas_family fam_rzv2h __initconst __maybe_unused = {
- .name = "RZ/V2H",
-};
-
static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
.name = "RZ/V2L",
};
@@ -176,16 +168,6 @@ static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
.id = 0x8450447,
};
-static const struct renesas_soc soc_rz_g3s __initconst __maybe_unused = {
- .family = &fam_rzg3s,
- .id = 0x85e0447,
-};
-
-static const struct renesas_soc soc_rz_v2h __initconst __maybe_unused = {
- .family = &fam_rzv2h,
- .id = 0x847a447,
-};
-
static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
.family = &fam_rzv2l,
.id = 0x8447447,
@@ -289,7 +271,6 @@ static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.id = 0x37,
};
-
static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R7S72100
{ .compatible = "renesas,r7s72100", .data = &soc_rz_a1h },
@@ -410,15 +391,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R9A07G054
{ .compatible = "renesas,r9a07g054", .data = &soc_rz_v2l },
#endif
-#ifdef CONFIG_ARCH_R9A08G045
- { .compatible = "renesas,r9a08g045", .data = &soc_rz_g3s },
-#endif
#ifdef CONFIG_ARCH_R9A09G011
{ .compatible = "renesas,r9a09g011", .data = &soc_rz_v2m },
#endif
-#ifdef CONFIG_ARCH_R9A09G057
- { .compatible = "renesas,r9a09g057", .data = &soc_rz_v2h },
-#endif
#ifdef CONFIG_ARCH_SH73A0
{ .compatible = "renesas,sh73a0", .data = &soc_shmobile_ag5 },
#endif
@@ -444,11 +419,6 @@ static const struct renesas_id id_rzg2l __initconst = {
.mask = 0xfffffff,
};
-static const struct renesas_id id_rzv2h __initconst = {
- .offset = 0x304,
- .mask = 0xfffffff,
-};
-
static const struct renesas_id id_rzv2m __initconst = {
.offset = 0x104,
.mask = 0xff,
@@ -466,7 +436,6 @@ static const struct of_device_id renesas_ids[] __initconst = {
{ .compatible = "renesas,r9a07g054-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a08g045-sysc", .data = &id_rzg2l },
{ .compatible = "renesas,r9a09g011-sys", .data = &id_rzv2m },
- { .compatible = "renesas,r9a09g057-sys", .data = &id_rzv2h },
{ .compatible = "renesas,prr", .data = &id_prr },
{ /* sentinel */ }
};
@@ -531,7 +500,7 @@ static int __init renesas_soc_init(void)
eslo = product & 0xf;
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
eshi, eslo);
- } else if (id == &id_rzg2l || id == &id_rzv2h) {
+ } else if (id == &id_rzg2l) {
eshi = ((product >> 28) & 0x0f);
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u",
eshi);
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
new file mode 100644
index 000000000000..14db508f669f
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RZ System controller driver
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+
+#include "rz-sysc.h"
+
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+/**
+ * struct rz_sysc - RZ SYSC private data structure
+ * @base: SYSC base address
+ * @dev: SYSC device pointer
+ */
+struct rz_sysc {
+ void __iomem *base;
+ struct device *dev;
+};
+
+static int rz_sysc_soc_init(struct rz_sysc *sysc, const struct of_device_id *match)
+{
+ const struct rz_sysc_init_data *sysc_data = match->data;
+ const struct rz_sysc_soc_id_init_data *soc_data = sysc_data->soc_id_init_data;
+ struct soc_device_attribute *soc_dev_attr;
+ const char *soc_id_start, *soc_id_end;
+ u32 val, revision, specific_id;
+ struct soc_device *soc_dev;
+ char soc_id[32] = {0};
+ size_t size;
+
+ soc_id_start = strchr(match->compatible, ',') + 1;
+ soc_id_end = strchr(match->compatible, '-');
+ size = soc_id_end - soc_id_start + 1;
+ if (size > 32)
+ size = sizeof(soc_id);
+ strscpy(soc_id, soc_id_start, size);
+
+ soc_dev_attr = devm_kzalloc(sysc->dev, sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENOMEM;
+
+ soc_dev_attr->family = devm_kstrdup(sysc->dev, soc_data->family, GFP_KERNEL);
+ if (!soc_dev_attr->family)
+ return -ENOMEM;
+
+ soc_dev_attr->soc_id = devm_kstrdup(sysc->dev, soc_id, GFP_KERNEL);
+ if (!soc_dev_attr->soc_id)
+ return -ENOMEM;
+
+ val = readl(sysc->base + soc_data->devid_offset);
+ revision = field_get(soc_data->revision_mask, val);
+ specific_id = field_get(soc_data->specific_id_mask, val);
+ soc_dev_attr->revision = devm_kasprintf(sysc->dev, GFP_KERNEL, "%u", revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
+
+ if (soc_data->id && specific_id != soc_data->id) {
+ dev_warn(sysc->dev, "SoC mismatch (product = 0x%x)\n", specific_id);
+ return -ENODEV;
+ }
+
+ /* Try to call SoC-specific device identification */
+ if (soc_data->print_id) {
+ soc_data->print_id(sysc->dev, sysc->base, soc_dev_attr);
+ } else {
+ dev_info(sysc->dev, "Detected Renesas %s %s Rev %s\n",
+ soc_dev_attr->family, soc_dev_attr->soc_id, soc_dev_attr->revision);
+ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ return PTR_ERR(soc_dev);
+
+ return 0;
+}
+
+static const struct of_device_id rz_sysc_match[] = {
+#ifdef CONFIG_SYSC_R9A08G045
+ { .compatible = "renesas,r9a08g045-sysc", .data = &rzg3s_sysc_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G047
+ { .compatible = "renesas,r9a09g047-sys", .data = &rzg3e_sys_init_data },
+#endif
+#ifdef CONFIG_SYS_R9A09G057
+ { .compatible = "renesas,r9a09g057-sys", .data = &rzv2h_sys_init_data },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, rz_sysc_match);
+
+static int rz_sysc_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct rz_sysc *sysc;
+
+ match = of_match_node(rz_sysc_match, dev->of_node);
+ if (!match)
+ return -ENODEV;
+
+ sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
+ if (!sysc)
+ return -ENOMEM;
+
+ sysc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sysc->base))
+ return PTR_ERR(sysc->base);
+
+ sysc->dev = dev;
+ return rz_sysc_soc_init(sysc, match);
+}
+
+static struct platform_driver rz_sysc_driver = {
+ .driver = {
+ .name = "renesas-rz-sysc",
+ .suppress_bind_attrs = true,
+ .of_match_table = rz_sysc_match
+ },
+ .probe = rz_sysc_probe
+};
+
+static int __init rz_sysc_init(void)
+{
+ return platform_driver_register(&rz_sysc_driver);
+}
+subsys_initcall(rz_sysc_init);
+
+MODULE_DESCRIPTION("Renesas RZ System Controller Driver");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
new file mode 100644
index 000000000000..aa83948c5117
--- /dev/null
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ System Controller
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+
+#ifndef __SOC_RENESAS_RZ_SYSC_H__
+#define __SOC_RENESAS_RZ_SYSC_H__
+
+#include <linux/device.h>
+#include <linux/sys_soc.h>
+#include <linux/types.h>
+
+/**
+ * struct rz_syc_soc_id_init_data - RZ SYSC SoC identification initialization data
+ * @family: RZ SoC family
+ * @id: RZ SoC expected ID
+ * @devid_offset: SYSC SoC ID register offset
+ * @revision_mask: SYSC SoC ID revision mask
+ * @specific_id_mask: SYSC SoC ID specific ID mask
+ * @print_id: print SoC-specific extended device identification
+ */
+struct rz_sysc_soc_id_init_data {
+ const char * const family;
+ u32 id;
+ u32 devid_offset;
+ u32 revision_mask;
+ u32 specific_id_mask;
+ void (*print_id)(struct device *dev, void __iomem *sysc_base,
+ struct soc_device_attribute *soc_dev_attr);
+};
+
+/**
+ * struct rz_sysc_init_data - RZ SYSC initialization data
+ * @soc_id_init_data: RZ SYSC SoC ID initialization data
+ */
+struct rz_sysc_init_data {
+ const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+};
+
+extern const struct rz_sysc_init_data rzg3e_sys_init_data;
+extern const struct rz_sysc_init_data rzg3s_sysc_init_data;
+extern const struct rz_sysc_init_data rzv2h_sys_init_data;
+
+#endif /* __SOC_RENESAS_RZ_SYSC_H__ */
diff --git a/drivers/soc/samsung/exynos-asv.c b/drivers/soc/samsung/exynos-asv.c
index 97006cc3b946..8e681f519526 100644
--- a/drivers/soc/samsung/exynos-asv.c
+++ b/drivers/soc/samsung/exynos-asv.c
@@ -9,6 +9,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage support
*/
+#include <linux/array_size.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/energy_model.h>
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index e37dde1fb588..c86f1058ceed 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -12,6 +12,7 @@
* Samsung Exynos SoC Adaptive Supply Voltage and Chip ID support
*/
+#include <linux/array_size.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
@@ -55,7 +56,9 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS7870", 0xE7870000 },
/* Compatible with: samsung,exynos850-chipid */
+ { "EXYNOS2200", 0xE9925000 },
{ "EXYNOS7885", 0xE7885000 },
{ "EXYNOS850", 0xE3830000 },
{ "EXYNOS8895", 0xE8895000 },
@@ -134,6 +137,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"%x", soc_info.revision);
+ if (!soc_dev_attr->revision)
+ return -ENOMEM;
soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index dd5256e5aae1..c40313886a01 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c
index 114352695ac2..c5661ac19f7b 100644
--- a/drivers/soc/samsung/exynos-usi.c
+++ b/drivers/soc/samsung/exynos-usi.c
@@ -6,6 +6,7 @@
* Samsung Exynos USI driver (Universal Serial Interface).
*/
+#include <linux/array_size.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -16,6 +17,18 @@
#include <dt-bindings/soc/samsung,exynos-usi.h>
+/* USIv1: System Register: SW_CONF register bits */
+#define USI_V1_SW_CONF_NONE 0x0
+#define USI_V1_SW_CONF_I2C0 0x1
+#define USI_V1_SW_CONF_I2C1 0x2
+#define USI_V1_SW_CONF_I2C0_1 0x3
+#define USI_V1_SW_CONF_SPI 0x4
+#define USI_V1_SW_CONF_UART 0x8
+#define USI_V1_SW_CONF_UART_I2C1 0xa
+#define USI_V1_SW_CONF_MASK (USI_V1_SW_CONF_I2C0 | USI_V1_SW_CONF_I2C1 | \
+ USI_V1_SW_CONF_I2C0_1 | USI_V1_SW_CONF_SPI | \
+ USI_V1_SW_CONF_UART | USI_V1_SW_CONF_UART_I2C1)
+
/* USIv2: System Register: SW_CONF register bits */
#define USI_V2_SW_CONF_NONE 0x0
#define USI_V2_SW_CONF_UART BIT(0)
@@ -34,7 +47,8 @@
#define USI_OPTION_CLKSTOP_ON BIT(2)
enum exynos_usi_ver {
- USI_VER2 = 2,
+ USI_VER1 = 0,
+ USI_VER2,
};
struct exynos_usi_variant {
@@ -66,19 +80,39 @@ struct exynos_usi_mode {
unsigned int val; /* mode register value */
};
-static const struct exynos_usi_mode exynos_usi_modes[] = {
- [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
- [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
- [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
- [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+#define USI_MODES_MAX (USI_MODE_UART_I2C1 + 1)
+static const struct exynos_usi_mode exynos_usi_modes[][USI_MODES_MAX] = {
+ [USI_VER1] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V1_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V1_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V1_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V1_SW_CONF_I2C0 },
+ [USI_MODE_I2C1] = { .name = "i2c1", .val = USI_V1_SW_CONF_I2C1 },
+ [USI_MODE_I2C0_1] = { .name = "i2c0_1", .val = USI_V1_SW_CONF_I2C0_1 },
+ [USI_MODE_UART_I2C1] = { .name = "uart_i2c1", .val = USI_V1_SW_CONF_UART_I2C1 },
+ }, [USI_VER2] = {
+ [USI_MODE_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE },
+ [USI_MODE_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART },
+ [USI_MODE_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI },
+ [USI_MODE_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C },
+ },
};
static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" };
static const struct exynos_usi_variant exynos850_usi_data = {
.ver = USI_VER2,
.sw_conf_mask = USI_V2_SW_CONF_MASK,
- .min_mode = USI_V2_NONE,
- .max_mode = USI_V2_I2C,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_I2C,
+ .num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
+ .clk_names = exynos850_usi_clk_names,
+};
+
+static const struct exynos_usi_variant exynos8895_usi_data = {
+ .ver = USI_VER1,
+ .sw_conf_mask = USI_V1_SW_CONF_MASK,
+ .min_mode = USI_MODE_NONE,
+ .max_mode = USI_MODE_UART_I2C1,
.num_clks = ARRAY_SIZE(exynos850_usi_clk_names),
.clk_names = exynos850_usi_clk_names,
};
@@ -87,6 +121,9 @@ static const struct of_device_id exynos_usi_dt_match[] = {
{
.compatible = "samsung,exynos850-usi",
.data = &exynos850_usi_data,
+ }, {
+ .compatible = "samsung,exynos8895-usi",
+ .data = &exynos8895_usi_data,
},
{ } /* sentinel */
};
@@ -109,14 +146,15 @@ static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode)
if (mode < usi->data->min_mode || mode > usi->data->max_mode)
return -EINVAL;
- val = exynos_usi_modes[mode].val;
+ val = exynos_usi_modes[usi->data->ver][mode].val;
ret = regmap_update_bits(usi->sysreg, usi->sw_conf,
usi->data->sw_conf_mask, val);
if (ret)
return ret;
usi->mode = mode;
- dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name);
+ dev_dbg(usi->dev, "protocol: %s\n",
+ exynos_usi_modes[usi->data->ver][usi->mode].name);
return 0;
}
@@ -168,10 +206,42 @@ static int exynos_usi_configure(struct exynos_usi *usi)
if (ret)
return ret;
- if (usi->data->ver == USI_VER2)
- return exynos_usi_enable(usi);
+ if (usi->data->ver == USI_VER1)
+ ret = clk_bulk_prepare_enable(usi->data->num_clks,
+ usi->clks);
+ else if (usi->data->ver == USI_VER2)
+ ret = exynos_usi_enable(usi);
- return 0;
+ return ret;
+}
+
+static void exynos_usi_unconfigure(void *data)
+{
+ struct exynos_usi *usi = data;
+ u32 val;
+ int ret;
+
+ if (usi->data->ver == USI_VER1) {
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
+ return;
+ }
+
+ ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks);
+ if (ret)
+ return;
+
+ /* Make sure that we've stopped providing the clock to USI IP */
+ val = readl(usi->regs + USI_OPTION);
+ val &= ~USI_OPTION_CLKREQ_ON;
+ val |= ~USI_OPTION_CLKSTOP_ON;
+ writel(val, usi->regs + USI_OPTION);
+
+ /* Set USI block state to reset */
+ val = readl(usi->regs + USI_CON);
+ val |= USI_CON_RESET;
+ writel(val, usi->regs + USI_CON);
+
+ clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks);
}
static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
@@ -186,15 +256,11 @@ static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi)
return -EINVAL;
usi->mode = mode;
- usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
+ usi->sysreg = syscon_regmap_lookup_by_phandle_args(np, "samsung,sysreg",
+ 1, &usi->sw_conf);
if (IS_ERR(usi->sysreg))
return PTR_ERR(usi->sysreg);
- ret = of_property_read_u32_index(np, "samsung,sysreg", 1,
- &usi->sw_conf);
- if (ret)
- return ret;
-
usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on");
return 0;
@@ -255,6 +321,10 @@ static int exynos_usi_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, exynos_usi_unconfigure, usi);
+ if (ret)
+ return ret;
+
/* Make it possible to embed protocol nodes into USI np */
return of_platform_populate(np, NULL, NULL, dev);
}
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
index 30f230ed1769..4bad12a99542 100644
--- a/drivers/soc/samsung/exynos3250-pmu.c
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos3250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
index 7a2d50be6b4a..2ae5c3e1b07a 100644
--- a/drivers/soc/samsung/exynos5250-pmu.c
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5250 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
index 6fedcd78cb45..58a2209795f7 100644
--- a/drivers/soc/samsung/exynos5420-pmu.c
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos5420 - CPU PMU (Power Management Unit) support
+#include <linux/array_size.h>
#include <linux/pm.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index a08c377933c5..51b9d852bb6a 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -47,6 +47,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/string_choices.h>
#include <linux/syscore_ops.h>
#include <soc/tegra/common.h>
@@ -1181,7 +1182,7 @@ static int powergate_show(struct seq_file *s, void *data)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
- status ? "yes" : "no");
+ str_yes_no(status));
}
return 0;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index 4fb0f0a24828..704039eb3c07 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -105,6 +105,12 @@ err_unknown_variant:
return -ENODEV;
}
+static const struct regmap_config k3_chipinfo_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int k3_chipinfo_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -112,13 +118,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct soc_device *soc_dev;
struct regmap *regmap;
+ void __iomem *base;
u32 partno_id;
u32 variant;
u32 jtag_id;
u32 mfg;
int ret;
- regmap = device_node_to_regmap(node);
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 0f45e3404756..295a46dc2be7 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -156,6 +156,7 @@ struct qcom_swrm_port_config {
u8 word_length;
u8 blk_group_count;
u8 lane_control;
+ u8 ch_mask;
};
/*
@@ -1048,9 +1049,13 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
{
u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
+ struct qcom_swrm_port_config *pcfg;
u32 val;
+ pcfg = &ctrl->pconfig[enable_ch->port_num];
ctrl->reg_read(ctrl, reg, &val);
+ if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0)
+ enable_ch->ch_mask = pcfg->ch_mask;
if (enable_ch->enable)
val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
@@ -1270,6 +1275,26 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
return ctrl->sruntime[dai->id];
}
+static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, const unsigned int *tx_slot,
+ unsigned int rx_num, const unsigned int *rx_slot)
+{
+ struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
+ int i;
+
+ if (tx_slot) {
+ for (i = 0; i < tx_num; i++)
+ ctrl->pconfig[i].ch_mask = tx_slot[i];
+ }
+
+ if (rx_slot) {
+ for (i = 0; i < rx_num; i++)
+ ctrl->pconfig[i].ch_mask = rx_slot[i];
+ }
+
+ return 0;
+}
+
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1306,6 +1331,7 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.shutdown = qcom_swrm_shutdown,
.set_stream = qcom_swrm_set_sdw_stream,
.get_stream = qcom_swrm_get_sdw_stream,
+ .set_channel_map = qcom_swrm_set_channel_map,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ea8a31032927..f40c282d4d63 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,9 @@ config SPI_MEM
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
+config SPI_OFFLOAD
+ bool
+
comment "SPI Master Controller Drivers"
config SPI_AIROHA_SNFI
@@ -176,6 +179,7 @@ config SPI_AU1550
config SPI_AXI_SPI_ENGINE
tristate "Analog Devices AXI SPI Engine controller"
depends on HAS_IOMEM
+ select SPI_OFFLOAD
help
This enables support for the Analog Devices AXI SPI Engine SPI controller.
It is part of the SPI Engine framework that is used in some Analog Devices
@@ -932,6 +936,15 @@ config SPI_QCOM_QSPI
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+config SPI_QPIC_SNAND
+ bool "QPIC SNAND controller"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select MTD
+ help
+ QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ QPIC controller supports both parallel nand and serial nand.
+ This config will enable serial nand driver for QPIC controller.
+
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1021,6 +1034,15 @@ config SPI_SN_F_OSPI
for connecting an SPI Flash memory over up to 8-bit wide bus.
It supports indirect access mode only.
+config SPI_SG2044_NOR
+ tristate "SG2044 SPI NOR Controller"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ This enables support for the SG2044 SPI NOR controller,
+ which supports Dual/Quad read and write operations while
+ also supporting 3Byte address devices and 4Byte address
+ devices.
+
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
@@ -1045,6 +1067,16 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
+config SPI_STM32_OSPI
+ tristate "STMicroelectronics STM32 OCTO SPI controller"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on OF
+ depends on SPI_MEM
+ help
+ This enables support for the Octo SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
@@ -1317,4 +1349,16 @@ endif # SPI_SLAVE
config SPI_DYNAMIC
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+if SPI_OFFLOAD
+
+comment "SPI Offload triggers"
+
+config SPI_OFFLOAD_TRIGGER_PWM
+ tristate "SPI offload trigger using PWM"
+ depends on PWM
+ help
+ Generic SPI offload trigger implemented using PWM output.
+
+endif # SPI_OFFLOAD
+
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9db7554c1864..c3a1a47b3bf4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
+obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
@@ -134,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o
+obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
+obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o
obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
@@ -163,3 +167,6 @@ obj-$(CONFIG_SPI_AMD) += spi-amd.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
+
+# SPI offload triggers
+obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index d8c9be64d006..244ac0106862 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -930,11 +930,8 @@ static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem,
/* Release the chip-select. */
ret = atmel_qspi_reg_sync(aq);
- if (ret) {
- pm_runtime_mark_last_busy(&aq->pdev->dev);
- pm_runtime_put_autosuspend(&aq->pdev->dev);
+ if (ret)
return ret;
- }
atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA);
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index e9beae95dded..62a11142bd63 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
u32 ctl_val;
int ret = 0;
- dev_dbg(aspi->dev,
- "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x",
- chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
-
addr_mode = readl(aspi->regs + CE_CTRL_REG);
addr_mode_backup = addr_mode;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7c252126b33e..da9840957778 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -2,11 +2,15 @@
/*
* SPI-Engine SPI controller driver
* Copyright 2015 Analog Devices Inc.
+ * Copyright 2024 BayLibre, SAS
* Author: Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/dmaengine.h>
#include <linux/fpga/adi-axi-common.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -14,9 +18,11 @@
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <trace/events/spi.h>
+#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
#define SPI_ENGINE_REG_INT_ENABLE 0x80
@@ -24,6 +30,7 @@
#define SPI_ENGINE_REG_INT_SOURCE 0x88
#define SPI_ENGINE_REG_SYNC_ID 0xc0
+#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4
#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
@@ -34,10 +41,24 @@
#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+#define SPI_ENGINE_MAX_NUM_OFFLOADS 32
+
+#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x))
+
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8)
+#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0)
+
#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
#define SPI_ENGINE_INT_SYNC BIT(3)
+#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4)
+
+#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0)
#define SPI_ENGINE_CONFIG_CPHA BIT(0)
#define SPI_ENGINE_CONFIG_CPOL BIT(1)
@@ -79,6 +100,10 @@
#define SPI_ENGINE_CMD_CS_INV(flags) \
SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
+/* default sizes - can be changed when SPI Engine firmware is compiled */
+#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
+#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
+
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
@@ -106,6 +131,17 @@ struct spi_engine_message_state {
uint8_t *rx_buf;
};
+enum {
+ SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED,
+ SPI_ENGINE_OFFLOAD_FLAG_PREPARED,
+};
+
+struct spi_engine_offload {
+ struct spi_engine *spi_engine;
+ unsigned long flags;
+ unsigned int offload_num;
+};
+
struct spi_engine {
struct clk *clk;
struct clk *ref_clk;
@@ -118,6 +154,11 @@ struct spi_engine {
unsigned int int_enable;
/* shadows hardware CS inversion flag state */
u8 cs_inv;
+
+ unsigned int offload_ctrl_mem_size;
+ unsigned int offload_sdo_mem_size;
+ struct spi_offload *offload;
+ u32 offload_caps;
};
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
@@ -163,9 +204,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
unsigned int n = min(len, 256U);
unsigned int flags = 0;
- if (xfer->tx_buf)
+ if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
flags |= SPI_ENGINE_TRANSFER_WRITE;
- if (xfer->rx_buf)
+ if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
@@ -217,16 +258,24 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
*
* NB: This is separate from spi_engine_compile_message() because the latter
* is called twice and would otherwise result in double-evaluation.
+ *
+ * Returns 0 on success, -EINVAL on failure.
*/
-static void spi_engine_precompile_message(struct spi_message *msg)
+static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* If we have an offload transfer, we can't rx to buffer */
+ if (msg->offload && xfer->rx_buf)
+ return -EINVAL;
+
clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz);
xfer->effective_speed_hz = max_hz / min(clk_div, 256U);
}
+
+ return 0;
}
static void spi_engine_compile_message(struct spi_message *msg, bool dry,
@@ -521,11 +570,105 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static int spi_engine_offload_prepare(struct spi_message *msg)
+{
+ struct spi_controller *host = msg->spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_program *p = msg->opt_state;
+ struct spi_engine_offload *priv = msg->offload->priv;
+ struct spi_transfer *xfer;
+ void __iomem *cmd_addr;
+ void __iomem *sdo_addr;
+ size_t tx_word_count = 0;
+ unsigned int i;
+
+ if (p->length > spi_engine->offload_ctrl_mem_size)
+ return -EINVAL;
+
+ /* count total number of tx words in message */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* no support for reading to rx_buf */
+ if (xfer->rx_buf)
+ return -EINVAL;
+
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8)
+ tx_word_count += xfer->len;
+ else if (xfer->bits_per_word <= 16)
+ tx_word_count += xfer->len / 2;
+ else
+ tx_word_count += xfer->len / 4;
+ }
+
+ if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA))
+ return -EINVAL;
+
+ if (tx_word_count > spi_engine->offload_sdo_mem_size)
+ return -EINVAL;
+
+ /*
+ * This protects against calling spi_optimize_message() with an offload
+ * that has already been prepared with a different message.
+ */
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags))
+ return -EBUSY;
+
+ cmd_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num);
+ sdo_addr = spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!xfer->tx_buf)
+ continue;
+
+ if (xfer->bits_per_word <= 8) {
+ const u8 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else if (xfer->bits_per_word <= 16) {
+ const u16 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 2; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ } else {
+ const u32 *buf = xfer->tx_buf;
+
+ for (i = 0; i < xfer->len / 4; i++)
+ writel_relaxed(buf[i], sdo_addr);
+ }
+ }
+
+ for (i = 0; i < p->length; i++)
+ writel_relaxed(p->instructions[i], cmd_addr);
+
+ return 0;
+}
+
+static void spi_engine_offload_unprepare(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+
+ writel_relaxed(1, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+ writel_relaxed(0, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num));
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags);
+}
+
static int spi_engine_optimize_message(struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
+ int ret;
- spi_engine_precompile_message(msg);
+ ret = spi_engine_precompile_message(msg);
+ if (ret)
+ return ret;
p_dry.length = 0;
spi_engine_compile_message(msg, true, &p_dry);
@@ -537,20 +680,61 @@ static int spi_engine_optimize_message(struct spi_message *msg)
spi_engine_compile_message(msg, false, p);
spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
- AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
+ msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
+ if (msg->offload) {
+ ret = spi_engine_offload_prepare(msg);
+ if (ret) {
+ msg->opt_state = NULL;
+ kfree(p);
+ return ret;
+ }
+ }
+
return 0;
}
static int spi_engine_unoptimize_message(struct spi_message *msg)
{
+ if (msg->offload)
+ spi_engine_offload_unprepare(msg->offload);
+
kfree(msg->opt_state);
return 0;
}
+static struct spi_offload
+*spi_engine_get_offload(struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller *host = spi->controller;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
+ struct spi_engine_offload *priv;
+
+ if (!spi_engine->offload)
+ return ERR_PTR(-ENODEV);
+
+ if (config->capability_flags & ~spi_engine->offload_caps)
+ return ERR_PTR(-EINVAL);
+
+ priv = spi_engine->offload->priv;
+
+ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags))
+ return ERR_PTR(-EBUSY);
+
+ return spi_engine->offload;
+}
+
+static void spi_engine_put_offload(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+
+ clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags);
+}
+
static int spi_engine_setup(struct spi_device *device)
{
struct spi_controller *host = device->controller;
@@ -583,6 +767,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
unsigned int int_enable = 0;
unsigned long flags;
+ if (msg->offload) {
+ dev_err(&host->dev, "Single transfer offload not supported\n");
+ msg->status = -EOPNOTSUPP;
+ goto out;
+ }
+
/* reinitialize message state for this transfer */
memset(st, 0, sizeof(*st));
st->cmd_buf = p->instructions;
@@ -632,11 +822,68 @@ static int spi_engine_transfer_one_message(struct spi_controller *host,
trace_spi_transfer_stop(msg, xfer);
}
+out:
spi_finalize_current_message(host);
return msg->status;
}
+static int spi_engine_trigger_enable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ return 0;
+}
+
+static void spi_engine_trigger_disable(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ struct spi_engine *spi_engine = priv->spi_engine;
+ unsigned int reg;
+
+ reg = readl_relaxed(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+ reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
+ writel_relaxed(reg, spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
+}
+
+static struct dma_chan
+*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static struct dma_chan
+*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload)
+{
+ struct spi_engine_offload *priv = offload->priv;
+ char name[16];
+
+ snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num);
+
+ return dma_request_chan(offload->provider_dev, name);
+}
+
+static const struct spi_offload_ops spi_engine_offload_ops = {
+ .trigger_enable = spi_engine_trigger_enable,
+ .trigger_disable = spi_engine_trigger_disable,
+ .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan,
+ .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan,
+};
+
static void spi_engine_release_hw(void *p)
{
struct spi_engine *spi_engine = p;
@@ -651,8 +898,7 @@ static int spi_engine_probe(struct platform_device *pdev)
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
- int irq;
- int ret;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -667,6 +913,46 @@ static int spi_engine_probe(struct platform_device *pdev)
spin_lock_init(&spi_engine->lock);
init_completion(&spi_engine->msg_complete);
+ /*
+ * REVISIT: for now, all SPI Engines only have one offload. In the
+ * future, this should be read from a memory mapped register to
+ * determine the number of offloads enabled at HDL compile time. For
+ * now, we can tell if an offload is present if there is a trigger
+ * source wired up to it.
+ */
+ if (device_property_present(&pdev->dev, "trigger-sources")) {
+ struct spi_engine_offload *priv;
+
+ spi_engine->offload =
+ devm_spi_offload_alloc(&pdev->dev,
+ sizeof(struct spi_engine_offload));
+ if (IS_ERR(spi_engine->offload))
+ return PTR_ERR(spi_engine->offload);
+
+ priv = spi_engine->offload->priv;
+ priv->spi_engine = spi_engine;
+ priv->offload_num = 0;
+
+ spi_engine->offload->ops = &spi_engine_offload_ops;
+ spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER;
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM;
+ }
+
+ if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
+ spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
+ } else {
+ /*
+ * HDL compile option to enable TX DMA stream also disables
+ * the SDO memory, so can't do both at the same time.
+ */
+ spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
+ }
+ }
+
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk))
return PTR_ERR(spi_engine->clk);
@@ -688,6 +974,19 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) {
+ unsigned int sizes = readl(spi_engine->base +
+ SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
+
+ spi_engine->offload_ctrl_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes);
+ spi_engine->offload_sdo_mem_size = 1 <<
+ FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes);
+ } else {
+ spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE;
+ spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE;
+ }
+
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
@@ -709,6 +1008,8 @@ static int spi_engine_probe(struct platform_device *pdev)
host->transfer_one_message = spi_engine_transfer_one_message;
host->optimize_message = spi_engine_optimize_message;
host->unoptimize_message = spi_engine_unoptimize_message;
+ host->get_offload = spi_engine_get_offload;
+ host->put_offload = spi_engine_put_offload;
host->num_chipselect = 8;
/* Some features depend of the IP core version. */
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 0cd37a7436d5..559fbdfbd9f7 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1658,6 +1658,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
int ret = PTR_ERR(cqspi->rx_chan);
cqspi->rx_chan = NULL;
+ if (ret == -ENODEV) {
+ /* DMA support is not mandatory */
+ dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
+ return 0;
+ }
+
return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
}
init_completion(&cqspi->rx_dma_complete);
@@ -2067,7 +2073,7 @@ static const struct cqspi_driver_platdata k2g_qspi = {
static const struct cqspi_driver_platdata am654_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
- .quirks = CQSPI_NEEDS_WR_DELAY,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NEEDS_WR_DELAY,
};
static const struct cqspi_driver_platdata intel_lgm_qspi = {
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index fc9e33be1e0e..e01c63d23b64 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len);
fsi_spi_sequence_add(&seq, shift);
+ } else if (next->tx_buf) {
+ if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ len = next->len;
+ while (len > 8) {
+ fsi_spi_sequence_add(&seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(8));
+ len -= 8;
+ }
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len));
} else {
next = NULL;
}
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 40f5c8fdba76..5e3818445234 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi,
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 4f192e013cd6..405deb6677c1 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -39,36 +39,8 @@ struct spi_gpio {
/*----------------------------------------------------------------------*/
-/*
- * Because the overhead of going through four GPIO procedure calls
- * per transferred bit can make performance a problem, this code
- * is set up so that you can use it in either of two ways:
- *
- * - The slow generic way: set up platform_data to hold the GPIO
- * numbers used for MISO/MOSI/SCK, and issue procedure calls for
- * each of them. This driver can handle several such busses.
- *
- * - The quicker inlined way: only helps with platform GPIO code
- * that inlines operations for constant GPIOs. This can give
- * you tight (fast!) inner loops, but each such bus needs a
- * new driver. You'll define a new C file, with Makefile and
- * Kconfig support; the C code can be a total of six lines:
- *
- * #define DRIVER_NAME "myboard_spi2"
- * #define SPI_MISO_GPIO 119
- * #define SPI_MOSI_GPIO 120
- * #define SPI_SCK_GPIO 121
- * #define SPI_N_CHIPSEL 4
- * #include "spi-gpio.c"
- */
-
-#ifndef DRIVER_NAME
#define DRIVER_NAME "spi_gpio"
-#define GENERIC_BITBANG /* vs tight inlines */
-
-#endif
-
/*----------------------------------------------------------------------*/
static inline struct spi_gpio *__pure
@@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
-#ifdef GENERIC_BITBANG
- if (!pdata || !pdata->num_chipselect)
+ if (!pdata)
return -ENODEV;
-#endif
- /*
- * The host needs to think there is a chipselect even if not
- * connected
- */
- host->num_chipselect = pdata->num_chipselect ?: 1;
+ /* It's just one always-selected device, fine to continue */
+ if (!pdata->num_chipselect)
+ return 0;
+
+ host->num_chipselect = pdata->num_chipselect;
spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
@@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
return devm_spi_register_controller(&pdev->dev, host);
}
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
static const struct of_device_id spi_gpio_dt_ids[] = {
{ .compatible = "spi-gpio" },
{}
@@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver);
MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eeb7d082c247..832d6e9009eb 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
timeout += 1;
/* Double calculated timeout */
- return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+ return secs_to_jiffies(2 * timeout);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index a9f0f47f4759..a31a1db07aa4 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -377,6 +377,17 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Make sure the operation frequency is correct before going futher */
spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
+ dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n",
+ op->cmd.opcode,
+ op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0),
+ op->dummy.nbytes,
+ op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "),
+ op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S',
+ op->addr.buswidth, op->addr.dtr ? 'D' : 'S',
+ op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S',
+ op->data.buswidth, op->data.dtr ? 'D' : 'S',
+ op->max_freq ? op->max_freq : mem->spi->max_speed_hz);
+
ret = spi_mem_check_op(op);
if (ret)
return ret;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index 5b6af55855ef..62ba0bd9cbb7 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -70,8 +70,7 @@
#define INT_RX_CHANNEL_OVERFLOW BIT(2)
#define INT_TX_CHANNEL_UNDERRUN BIT(3)
-#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \
- CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
+#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT)
#define REG_CONTROL (0x00)
#define REG_FRAME_SIZE (0x04)
@@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi)
mchp_corespi_write(spi, REG_CONTROL, control);
}
-static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max)
{
- while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) {
- u32 data = mchp_corespi_read(spi, REG_RX_DATA);
+ for (int i = 0; i < fifo_max; i++) {
+ u32 data;
+
+ while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)
+ ;
+
+ data = mchp_corespi_read(spi, REG_RX_DATA);
spi->rx_len -= spi->n_bytes;
@@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len)
mchp_corespi_write(spi, REG_FRAMESUP, len);
}
-static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi)
+static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max)
{
- int fifo_max, i = 0;
+ int i = 0;
- fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
mchp_corespi_set_xfer_size(spi, fifo_max);
while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) {
@@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id)
if (intfield == 0)
return IRQ_NONE;
- if (intfield & INT_TXDONE)
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE);
-
- if (intfield & INT_RXRDY) {
- mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY);
-
- if (spi->rx_len)
- mchp_corespi_read_fifo(spi);
- }
-
- if (!spi->rx_len && !spi->tx_len)
- finalise = true;
-
if (intfield & INT_RX_CHANNEL_OVERFLOW) {
mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW);
finalise = true;
@@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host,
mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select);
- while (spi->tx_len)
- mchp_corespi_write_fifo(spi);
+ while (spi->tx_len) {
+ int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes);
+
+ mchp_corespi_write_fifo(spi, fifo_max);
+ mchp_corespi_read_fifo(spi, fifo_max);
+ }
+ spi_finalize_current_transfer(host);
return 1;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 197bf2dbe5de..4b0a1c0db041 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_qos.h>
#define SPI_CFG0_REG 0x0000
#define SPI_CFG1_REG 0x0004
@@ -147,6 +148,7 @@ struct mtk_spi_compatible {
* @tx_sgl_len: Size of TX DMA transfer
* @rx_sgl_len: Size of RX DMA transfer
* @dev_comp: Device data structure
+ * @qos_request: QoS request
* @spi_clk_hz: Current SPI clock in Hz
* @spimem_done: SPI-MEM operation completion
* @use_spimem: Enables SPI-MEM
@@ -166,6 +168,7 @@ struct mtk_spi {
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
+ struct pm_qos_request qos_request;
u32 spi_clk_hz;
struct completion spimem_done;
bool use_spimem;
@@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host,
struct mtk_chip_config *chip_config = spi->controller_data;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
+ cpu_latency_qos_update_request(&mdata->qos_request, 500);
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
@@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host,
return mtk_spi_hw_init(host, msg->spi);
}
+static int mtk_spi_unprepare_message(struct spi_controller *host,
+ struct spi_message *message)
+{
+ struct mtk_spi *mdata = spi_controller_get_devdata(host);
+
+ cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+ return 0;
+}
+
static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 reg_val;
@@ -1143,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
host->set_cs = mtk_spi_set_cs;
host->prepare_message = mtk_spi_prepare_message;
+ host->unprepare_message = mtk_spi_unprepare_message;
host->transfer_one = mtk_spi_transfer_one;
host->can_dma = mtk_spi_can_dma;
host->setup = mtk_spi_setup;
@@ -1249,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev)
clk_disable_unprepare(mdata->spi_hclk);
}
+ cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
+
if (mdata->dev_comp->need_pad_sel) {
if (mdata->pad_num != host->num_chipselect)
return dev_err_probe(dev, -EINVAL,
@@ -1292,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev)
struct mtk_spi *mdata = spi_controller_get_devdata(host);
int ret;
+ cpu_latency_qos_remove_request(&mdata->qos_request);
if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
complete(&mdata->spimem_done);
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fdbea9dffb62..e82ee6dcf498 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller);
- dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
- op->addr.val, op->addr.buswidth, op->addr.nbytes,
- op->data.buswidth, op->data.nbytes);
if (mtk_snand_is_page_ops(op)) {
if (op->data.dir == SPI_MEM_DATA_IN)
return mtk_snand_read_page_cache(ms, op);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index c02c4204442f..0eb35c4e3987 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi)
priv->current_cs = spi_get_chipselect(spi, 0);
- spi_setup(priv->spi);
-
- return 0;
+ return spi_setup(priv->spi);
}
static int spi_mux_setup(struct spi_device *spi)
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 958bab27a081..67cc1d86de42 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
int ret = 0;
u8 *buf;
- dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth, op->addr.val,
- op->data.nbytes);
-
if (fiu->spix_mode || op->addr.nbytes > 4)
return -EOPNOTSUPP;
diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c
new file mode 100644
index 000000000000..805ed41560df
--- /dev/null
+++ b/drivers/spi/spi-offload-trigger-pwm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ *
+ * Generic PWM trigger for SPI offload.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwm.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+struct spi_offload_trigger_pwm_state {
+ struct device *dev;
+ struct pwm_device *pwm;
+};
+
+static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (nargs)
+ return false;
+
+ return type == SPI_OFFLOAD_TRIGGER_PERIODIC;
+}
+
+static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+ int ret;
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ ret = pwm_round_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns);
+
+ return 0;
+}
+
+static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ return pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+}
+
+static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct pwm_waveform wf;
+ int ret;
+
+ ret = pwm_get_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0) {
+ dev_err(st->dev, "failed to get waveform: %d\n", ret);
+ return;
+ }
+
+ wf.duty_length_ns = 0;
+
+ ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+ if (ret < 0)
+ dev_err(st->dev, "failed to disable PWM: %d\n", ret);
+}
+
+static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = {
+ .match = spi_offload_trigger_pwm_match,
+ .validate = spi_offload_trigger_pwm_validate,
+ .enable = spi_offload_trigger_pwm_enable,
+ .disable = spi_offload_trigger_pwm_disable,
+};
+
+static void spi_offload_trigger_pwm_release(void *data)
+{
+ pwm_disable(data);
+}
+
+static int spi_offload_trigger_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_offload_trigger_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &spi_offload_trigger_pwm_ops,
+ };
+ struct spi_offload_trigger_pwm_state *st;
+ struct pwm_state state;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ info.priv = st;
+ st->dev = dev;
+
+ st->pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->pwm))
+ return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n");
+
+ /* init with duty_cycle = 0, output enabled to ensure trigger off */
+ pwm_init_state(st->pwm, &state);
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->pwm, &state);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to apply PWM state\n");
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm);
+ if (ret)
+ return ret;
+
+ return devm_spi_offload_trigger_register(dev, &info);
+}
+
+static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = {
+ { .compatible = "pwm-trigger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table);
+
+static struct platform_driver spi_offload_trigger_pwm_driver = {
+ .driver = {
+ .name = "pwm-trigger",
+ .of_match_table = spi_offload_trigger_pwm_of_match_table,
+ },
+ .probe = spi_offload_trigger_pwm_probe,
+};
+module_platform_driver(spi_offload_trigger_pwm_driver);
+
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Generic PWM trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
new file mode 100644
index 000000000000..6bad042fe437
--- /dev/null
+++ b/drivers/spi/spi-offload.c
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+/*
+ * SPI Offloading support.
+ *
+ * Some SPI controllers support offloading of SPI transfers. Essentially, this
+ * is the ability for a SPI controller to perform SPI transfers with minimal
+ * or even no CPU intervention, e.g. via a specialized SPI controller with a
+ * hardware trigger or via a conventional SPI controller using a non-Linux MCU
+ * processor core to offload the work.
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+struct spi_controller_and_offload {
+ struct spi_controller *controller;
+ struct spi_offload *offload;
+};
+
+struct spi_offload_trigger {
+ struct list_head list;
+ struct kref ref;
+ struct fwnode_handle *fwnode;
+ /* synchronizes calling ops and driver registration */
+ struct mutex lock;
+ /*
+ * If the provider goes away while the consumer still has a reference,
+ * ops and priv will be set to NULL and all calls will fail with -ENODEV.
+ */
+ const struct spi_offload_trigger_ops *ops;
+ void *priv;
+};
+
+static LIST_HEAD(spi_offload_triggers);
+static DEFINE_MUTEX(spi_offload_triggers_lock);
+
+/**
+ * devm_spi_offload_alloc() - Allocate offload instance
+ * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
+ * @priv_size: Size of private data to allocate
+ *
+ * Offload providers should use this to allocate offload instances.
+ *
+ * Return: Pointer to new offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_alloc(struct device *dev,
+ size_t priv_size)
+{
+ struct spi_offload *offload;
+ void *priv;
+
+ offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return ERR_PTR(-ENOMEM);
+
+ priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ offload->provider_dev = dev;
+ offload->priv = priv;
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
+
+static void spi_offload_put(void *data)
+{
+ struct spi_controller_and_offload *resource = data;
+
+ resource->controller->put_offload(resource->offload);
+ kfree(resource);
+}
+
+/**
+ * devm_spi_offload_get() - Get an offload instance
+ * @dev: Device for devm purposes
+ * @spi: SPI device to use for the transfers
+ * @config: Offload configuration
+ *
+ * Peripheral drivers call this function to get an offload instance that meets
+ * the requirements specified in @config. If no suitable offload instance is
+ * available, -ENODEV is returned.
+ *
+ * Return: Offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_get(struct device *dev,
+ struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller_and_offload *resource;
+ struct spi_offload *offload;
+ int ret;
+
+ if (!spi || !config)
+ return ERR_PTR(-EINVAL);
+
+ if (!spi->controller->get_offload)
+ return ERR_PTR(-ENODEV);
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return ERR_PTR(-ENOMEM);
+
+ offload = spi->controller->get_offload(spi, config);
+ if (IS_ERR(offload)) {
+ kfree(resource);
+ return offload;
+ }
+
+ resource->controller = spi->controller;
+ resource->offload = offload;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_get);
+
+static void spi_offload_trigger_free(struct kref *ref)
+{
+ struct spi_offload_trigger *trigger =
+ container_of(ref, struct spi_offload_trigger, ref);
+
+ mutex_destroy(&trigger->lock);
+ fwnode_handle_put(trigger->fwnode);
+ kfree(trigger);
+}
+
+static void spi_offload_trigger_put(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &trigger->lock)
+ if (trigger->ops && trigger->ops->release)
+ trigger->ops->release(trigger);
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+static struct spi_offload_trigger
+*spi_offload_trigger_get(enum spi_offload_trigger_type type,
+ struct fwnode_reference_args *args)
+{
+ struct spi_offload_trigger *trigger;
+ bool match = false;
+ int ret;
+
+ guard(mutex)(&spi_offload_triggers_lock);
+
+ list_for_each_entry(trigger, &spi_offload_triggers, list) {
+ if (trigger->fwnode != args->fwnode)
+ continue;
+
+ match = trigger->ops->match(trigger, type, args->args, args->nargs);
+ if (match)
+ break;
+ }
+
+ if (!match)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return ERR_PTR(-ENODEV);
+
+ if (trigger->ops->request) {
+ ret = trigger->ops->request(trigger, type, args->args, args->nargs);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ kref_get(&trigger->ref);
+
+ return trigger;
+}
+
+/**
+ * devm_spi_offload_trigger_get() - Get an offload trigger instance
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance connected to a trigger.
+ * @type: Trigger type to get.
+ *
+ * Return: Offload trigger instance or error on failure.
+ */
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type)
+{
+ struct spi_offload_trigger *trigger;
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
+ "trigger-sources",
+ "#trigger-source-cells", 0, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trigger = spi_offload_trigger_get(type, &args);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(trigger))
+ return trigger;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trigger;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
+
+/**
+ * spi_offload_trigger_validate - Validate the requested trigger
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * On success, @config may be modifed to reflect what the hardware can do.
+ * For example, the frequency of a periodic trigger may be adjusted to the
+ * nearest supported value.
+ *
+ * Callers will likely need to do additional validation of the modified trigger
+ * parameters.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (!trigger->ops->validate)
+ return -EOPNOTSUPP;
+
+ return trigger->ops->validate(trigger, config);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
+
+/**
+ * spi_offload_trigger_enable - enables trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * There must be a prepared offload instance with the specified ID (i.e.
+ * spi_optimize_message() was called with the same offload assigned to the
+ * message). This will also reserve the bus for exclusive use by the offload
+ * instance until the trigger is disabled. Any other attempts to send a
+ * transfer or lock the bus will fail with -EBUSY during this time.
+ *
+ * Calls must be balanced with spi_offload_trigger_disable().
+ *
+ * Context: can sleep
+ * Return: 0 on success, else a negative error code.
+ */
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ int ret;
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (offload->ops && offload->ops->trigger_enable) {
+ ret = offload->ops->trigger_enable(offload);
+ if (ret)
+ return ret;
+ }
+
+ if (trigger->ops->enable) {
+ ret = trigger->ops->enable(trigger, config);
+ if (ret) {
+ if (offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
+
+/**
+ * spi_offload_trigger_disable - disables hardware trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ *
+ * Disables the hardware trigger for the offload instance with the specified ID
+ * and releases the bus for use by other clients.
+ *
+ * Context: can sleep
+ */
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger)
+{
+ if (offload->ops && offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return;
+
+ if (trigger->ops->disable)
+ trigger->ops->disable(trigger);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
+
+static void spi_offload_release_dma_chan(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will provide data to transfers that use the
+ * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->tx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
+
+/**
+ * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will receive data from transfers that use the
+ * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->rx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
+
+/* Triggers providers */
+
+static void spi_offload_trigger_unregister(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_del(&trigger->list);
+
+ scoped_guard(mutex, &trigger->lock) {
+ trigger->priv = NULL;
+ trigger->ops = NULL;
+ }
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+/**
+ * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
+ * @dev: Device for devm purposes.
+ * @info: Provider-specific trigger info.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info)
+{
+ struct spi_offload_trigger *trigger;
+
+ if (!info->fwnode || !info->ops)
+ return -EINVAL;
+
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger)
+ return -ENOMEM;
+
+ kref_init(&trigger->ref);
+ mutex_init(&trigger->lock);
+ trigger->fwnode = fwnode_handle_get(info->fwnode);
+ trigger->ops = info->ops;
+ trigger->priv = info->priv;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_add_tail(&trigger->list, &spi_offload_triggers);
+
+ return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
+
+/**
+ * spi_offload_trigger_get_priv() - Get the private data for the trigger
+ *
+ * @trigger: Offload trigger instance.
+ *
+ * Return: Private data for the trigger.
+ */
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
+{
+ return trigger->priv;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
new file mode 100644
index 000000000000..fbba7741a9bf
--- /dev/null
+++ b/drivers/spi/spi-qpic-snand.c
@@ -0,0 +1,1633 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Authors:
+ * Md Sadre Alam <quic_mdalam@quicinc.com>
+ * Sricharan R <quic_srichara@quicinc.com>
+ * Varadarajan Narayanan <quic_varada@quicinc.com>
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+#include <linux/mtd/spinand.h>
+#include <linux/bitfield.h>
+
+#define NAND_FLASH_SPI_CFG 0xc0
+#define NAND_NUM_ADDR_CYCLES 0xc4
+#define NAND_BUSY_CHECK_WAIT_CNT 0xc8
+#define NAND_FLASH_FEATURES 0xf64
+
+/* QSPI NAND config reg bits */
+#define LOAD_CLK_CNTR_INIT_EN BIT(28)
+#define CLK_CNTR_INIT_VAL_VEC 0x924
+#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
+#define FEA_STATUS_DEV_ADDR 0xc0
+#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
+#define SPI_CFG BIT(0)
+#define SPI_NUM_ADDR 0xDA4DB
+#define SPI_WAIT_CNT 0x10
+#define QPIC_QSPI_NUM_CS 1
+#define SPI_TRANSFER_MODE_x1 BIT(29)
+#define SPI_TRANSFER_MODE_x4 (3 << 29)
+#define SPI_WP BIT(28)
+#define SPI_HOLD BIT(27)
+#define QPIC_SET_FEATURE BIT(31)
+
+#define SPINAND_RESET 0xff
+#define SPINAND_READID 0x9f
+#define SPINAND_GET_FEATURE 0x0f
+#define SPINAND_SET_FEATURE 0x1f
+#define SPINAND_READ 0x13
+#define SPINAND_ERASE 0xd8
+#define SPINAND_WRITE_EN 0x06
+#define SPINAND_PROGRAM_EXECUTE 0x10
+#define SPINAND_PROGRAM_LOAD 0x84
+
+#define ACC_FEATURE 0xe
+#define BAD_BLOCK_MARKER_SIZE 0x2
+#define OOB_BUF_SIZE 128
+#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
+
+struct qpic_snand_op {
+ u32 cmd_reg;
+ u32 addr1_reg;
+ u32 addr2_reg;
+};
+
+struct snandc_read_status {
+ __le32 snandc_flash;
+ __le32 snandc_buffer;
+ __le32 snandc_erased_cw;
+};
+
+/*
+ * ECC state struct
+ * @corrected: ECC corrected
+ * @bitflips: Max bit flip
+ * @failed: ECC failed
+ */
+struct qcom_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct qpic_ecc {
+ struct device *dev;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ int ecc_mode;
+ int bytes;
+ int steps;
+ int step_size;
+ int strength;
+ int cw_size;
+ int cw_data;
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg0_raw;
+ u32 cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+ bool bch_enabled;
+};
+
+struct qpic_spi_nand {
+ struct qcom_nand_controller *snandc;
+ struct spi_controller *ctlr;
+ struct mtd_info *mtd;
+ struct clk *iomacro_clk;
+ struct qpic_ecc *ecc;
+ struct qcom_ecc_stats ecc_stats;
+ struct nand_ecc_engine ecc_eng;
+ u8 *data_buf;
+ u8 *oob_buf;
+ u32 wlen;
+ __le32 addr1;
+ __le32 addr2;
+ __le32 cmd;
+ u32 num_cw;
+ bool oob_rw;
+ bool page_rw;
+ bool raw_rw;
+};
+
+static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_0)
+ snandc->regs->read_location0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_1)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_2)
+ snandc->regs->read_location1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_3)
+ snandc->regs->read_location3 = locreg_val;
+}
+
+static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
+ int reg, int cw_offset, int read_size,
+ int is_last_read_loc)
+{
+ __le32 locreg_val;
+ u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
+ ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
+ << READ_LOCATION_LAST));
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg == NAND_READ_LOCATION_LAST_CW_0)
+ snandc->regs->read_location_last0 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_1)
+ snandc->regs->read_location_last1 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_2)
+ snandc->regs->read_location_last2 = locreg_val;
+ else if (reg == NAND_READ_LOCATION_LAST_CW_3)
+ snandc->regs->read_location_last3 = locreg_val;
+}
+
+static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+ struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
+
+ return qspi->snandc;
+}
+
+static int qcom_spi_init(struct qcom_nand_controller *snandc)
+{
+ u32 snand_cfg_val = 0x0;
+ int ret;
+
+ snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
+ FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
+ FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
+ FIELD_PREP(SPI_CFG, 0);
+
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+ snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
+ snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
+ snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
+ NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = qecc->steps * 4;
+ oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
+ .ecc = qcom_spi_ooblayout_ecc,
+ .free = qcom_spi_ooblayout_free,
+};
+
+static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int cwperpage, bad_block_byte;
+ struct qpic_ecc *ecc_cfg;
+
+ cwperpage = mtd->writesize / NANDC_STEP_SIZE;
+ snandc->qspi->num_cw = cwperpage;
+
+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ if (!ecc_cfg)
+ return -ENOMEM;
+ snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!snandc->qspi->oob_buf) {
+ kfree(ecc_cfg);
+ return -ENOMEM;
+ }
+
+ memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+ nand->ecc.ctx.priv = ecc_cfg;
+ snandc->qspi->mtd = mtd;
+
+ ecc_cfg->ecc_bytes_hw = 7;
+ ecc_cfg->spare_bytes = 4;
+ ecc_cfg->bbm_size = 1;
+ ecc_cfg->bch_enabled = true;
+ ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
+
+ ecc_cfg->steps = 4;
+ ecc_cfg->strength = 4;
+ ecc_cfg->step_size = 512;
+ ecc_cfg->cw_data = 516;
+ ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
+ bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
+
+ mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
+
+ ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
+
+ ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
+
+ ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, 0) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
+
+ ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ ecc_cfg->clrflashstatus = FS_READY_BSY_N;
+ ecc_cfg->clrreadstatus = 0xc0;
+
+ conf->step_size = ecc_cfg->step_size;
+ conf->strength = ecc_cfg->strength;
+
+ snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
+ snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
+
+ dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
+ ecc_cfg->strength, ecc_cfg->step_size);
+
+ return 0;
+}
+
+static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
+{
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ kfree(ecc_cfg);
+}
+
+static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
+
+ snandc->qspi->ecc = ecc_cfg;
+ snandc->qspi->raw_rw = false;
+ snandc->qspi->oob_rw = false;
+ snandc->qspi->page_rw = false;
+
+ if (req->datalen)
+ snandc->qspi->page_rw = true;
+
+ if (req->ooblen)
+ snandc->qspi->oob_rw = true;
+
+ if (req->mode == MTD_OPS_RAW)
+ snandc->qspi->raw_rw = true;
+
+ return 0;
+}
+
+static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
+ struct nand_page_io_req *req)
+{
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
+ return 0;
+
+ if (snandc->qspi->ecc_stats.failed)
+ mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
+ else
+ mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
+
+ if (snandc->qspi->ecc_stats.failed)
+ return -EBADMSG;
+ else
+ return snandc->qspi->ecc_stats.bitflips;
+}
+
+static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
+ .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
+ .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
+ .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
+ .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
+};
+
+/* helper to configure location register values */
+static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
+ int cw_offset, int read_size, int is_last_read_loc)
+{
+ int reg_base = NAND_READ_LOCATION_0;
+ int num_cw = snandc->qspi->num_cw;
+
+ if (cw == (num_cw - 1))
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
+
+ reg_base += reg * 4;
+
+ if (cw == (num_cw - 1))
+ return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+ else
+ return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
+ read_size, is_last_read_loc);
+}
+
+static void
+qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
+ NAND_BAM_NEXT_SGL);
+ }
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
+ snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to erase block\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
+ bool use_ecc, int cw)
+{
+ __le32 *reg = &snandc->regs->read_location0;
+ int num_cw = snandc->qspi->num_cw;
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ if (cw == (num_cw - 1)) {
+ reg = &snandc->regs->read_location_last0;
+ qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
+ }
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
+}
+
+static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int size, ret = 0;
+ int col, bbpos;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ qcom_clear_bam_transaction(snandc);
+ qcom_clear_read_regs(snandc);
+
+ size = ecc_cfg->cw_size;
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ memset(snandc->data_buffer, 0xff, size);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failed to read last cw\n");
+ return ret;
+ }
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+
+ bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+
+ if (snandc->data_buffer[bbpos] == 0xff)
+ snandc->data_buffer[bbpos + 1] = 0xff;
+ if (snandc->data_buffer[bbpos] != 0xff)
+ snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
+
+ memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
+
+ return ret;
+}
+
+static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
+{
+ struct snandc_read_status *buf;
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ int i, num_cw = snandc->qspi->num_cw;
+ bool flash_op_err = false, erased;
+ unsigned int max_bitflips = 0;
+ unsigned int uncorrectable_cws = 0;
+
+ snandc->qspi->ecc_stats.failed = 0;
+ snandc->qspi->ecc_stats.corrected = 0;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+ buf = (struct snandc_read_status *)snandc->reg_read_buf;
+
+ for (i = 0; i < num_cw; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (num_cw - 1)) {
+ data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_len = num_cw << 2;
+ } else {
+ data_len = ecc_cfg->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->snandc_flash);
+ buffer = le32_to_cpu(buf->snandc_buffer);
+ erased_cw = le32_to_cpu(buf->snandc_erased_cw);
+
+ if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
+ if (ecc_cfg->bch_enabled)
+ erased = (erased_cw & ERASED_CW) == ERASED_CW;
+ else
+ erased = false;
+
+ if (!erased)
+ uncorrectable_cws |= BIT(i);
+
+ } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ flash_op_err = true;
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ snandc->qspi->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ if (data_buf)
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc_cfg->bytes;
+ }
+
+ if (flash_op_err)
+ return -EIO;
+
+ if (!uncorrectable_cws)
+ snandc->qspi->ecc_stats.bitflips = max_bitflips;
+ else
+ snandc->qspi->ecc_stats.failed++;
+
+ return 0;
+}
+
+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
+{
+ int i;
+
+ qcom_nandc_dev_to_mem(snandc, true);
+
+ for (i = 0; i < cw_cnt; i++) {
+ u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
+ u8 *oob_buf, int cw)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
+ int raw_cw = cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+ int col;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ raw_cw = num_cw - 1;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 0 << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ col = ecc_cfg->cw_size * cw;
+
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (cw == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) * 4);
+ oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
+ read_loc += data_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
+ read_loc += oob_size1;
+
+ qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
+ read_loc += data_size2;
+
+ qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
+
+ qcom_spi_config_cw_read(snandc, false, raw_cw);
+
+ qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
+ reg_off += data_size1;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
+ reg_off += oob_size1;
+
+ qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
+ reg_off += data_size2;
+
+ qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
+ return ret;
+ }
+
+ return qcom_spi_check_raw_flash_errors(snandc, 1);
+}
+
+static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int ret, cw;
+ u32 num_cw = snandc->qspi->num_cw;
+
+ if (snandc->qspi->page_rw)
+ data_buf = op->data.buf.in;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ for (cw = 0; cw < num_cw; cw++) {
+ ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
+ if (ret)
+ return ret;
+
+ if (data_buf)
+ data_buf += ecc_cfg->cw_data;
+ if (oob_buf)
+ oob_buf += ecc_cfg->bytes;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ data_buf = op->data.buf.in;
+ data_buf_start = data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ oob_buf_start = oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_clear_bam_transaction(snandc);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = 512 - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ if (data_buf && oob_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
+ qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
+ } else if (data_buf) {
+ qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
+ } else {
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+ }
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (data_buf)
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
+ int ret, i;
+ u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
+
+ oob_buf = op->data.buf.in;
+ oob_buf_start = oob_buf;
+
+ data_buf_start = data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1,
+ NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
+
+ qcom_spi_config_cw_read(snandc, true, i);
+
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < ecc_cfg->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to read oob\n");
+ return ret;
+ }
+
+ return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
+}
+
+static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_page_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_read_page_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
+ return qcom_spi_read_last_cw(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_read_page_oob(snandc, op);
+
+ return 0;
+}
+
+static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
+ 1, NAND_BAM_NEXT_SGL);
+}
+
+static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
+{
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
+}
+
+static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ struct mtd_info *mtd = snandc->qspi->mtd;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg;
+
+ cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1_raw;
+ ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+ memset(oob_buf, 0xff, OOB_BUF_SIZE);
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
+ snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
+ oob_size1 = ecc_cfg->bbm_size;
+
+ if (i == (num_cw - 1)) {
+ data_size2 = NANDC_STEP_SIZE - data_size1 -
+ ((num_cw - 1) << 2);
+ oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size2 = ecc_cfg->cw_data - data_size1;
+ oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
+ }
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
+ oob_buf += oob_size1;
+ reg_off += oob_size1;
+
+ qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
+ oob_buf += oob_size2;
+
+ qcom_spi_config_cw_write(snandc);
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write raw page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *data_buf = NULL, *oob_buf = NULL;
+ int i, ret;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ if (snandc->qspi->data_buf)
+ data_buf = snandc->qspi->data_buf;
+
+ oob_buf = snandc->qspi->oob_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->addr0 = snandc->qspi->addr1;
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ qcom_spi_config_page_write(snandc);
+
+ for (i = 0; i < num_cw; i++) {
+ int data_size, oob_size;
+
+ if (i == (num_cw - 1)) {
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
+ ecc_cfg->spare_bytes;
+ } else {
+ data_size = ecc_cfg->cw_data;
+ oob_size = ecc_cfg->bytes;
+ }
+
+ if (data_buf)
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
+
+ if (i == (num_cw - 1)) {
+ if (oob_buf) {
+ oob_buf += ecc_cfg->bbm_size;
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
+ }
+ }
+
+ qcom_spi_config_cw_write(snandc);
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write page\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
+ u8 *oob_buf = NULL;
+ int ret, col, data_size, oob_size;
+ int num_cw = snandc->qspi->num_cw;
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+
+ col = ecc_cfg->cw_size * (num_cw - 1);
+
+ oob_buf = snandc->qspi->data_buf;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
+ snandc->regs->addr1 = snandc->qspi->addr2;
+ snandc->regs->cmd = snandc->qspi->cmd;
+ snandc->regs->cfg0 = cpu_to_le32(cfg0);
+ snandc->regs->cfg1 = cpu_to_le32(cfg1);
+ snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
+ snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
+ snandc->regs->exec = cpu_to_le32(1);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
+ oob_size = snandc->qspi->mtd->oobavail;
+
+ memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
+ /* override new oob content to last codeword */
+ mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
+ oob_buf, 0, snandc->qspi->mtd->oobavail);
+ qcom_spi_config_page_write(snandc);
+ qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
+ qcom_spi_config_cw_write(snandc);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret) {
+ dev_err(snandc->dev, "failure to write oob\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
+ return qcom_spi_program_raw(snandc, op);
+
+ if (snandc->qspi->page_rw)
+ return qcom_spi_program_ecc(snandc, op);
+
+ if (snandc->qspi->oob_rw)
+ return qcom_spi_program_oob(snandc, op);
+
+ return 0;
+}
+
+static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
+{
+ switch (opcode) {
+ case SPINAND_RESET:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
+ break;
+ case SPINAND_READID:
+ *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
+ break;
+ case SPINAND_GET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
+ break;
+ case SPINAND_SET_FEATURE:
+ *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
+ QPIC_SET_FEATURE);
+ break;
+ case SPINAND_READ:
+ if (snandc->qspi->raw_rw) {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ);
+ } else {
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
+ }
+
+ break;
+ case SPINAND_ERASE:
+ *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
+ SPI_HOLD | SPI_TRANSFER_MODE_x1;
+ break;
+ case SPINAND_WRITE_EN:
+ *cmd = SPINAND_WRITE_EN;
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
+ SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
+ break;
+ case SPINAND_PROGRAM_LOAD:
+ *cmd = SPINAND_PROGRAM_LOAD;
+ break;
+ default:
+ dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ int ret;
+ u32 cmd;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
+ snandc->qspi->data_buf = (u8 *)op->data.buf.out;
+
+ return 0;
+}
+
+static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
+ const struct spi_mem_op *op)
+{
+ struct qpic_snand_op s_op = {};
+ u32 cmd;
+ int ret, opcode;
+
+ ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
+ if (ret < 0)
+ return ret;
+
+ s_op.cmd_reg = cmd;
+ s_op.addr1_reg = op->addr.val;
+ s_op.addr2_reg = 0;
+
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_WRITE_EN:
+ return 0;
+ case SPINAND_PROGRAM_EXECUTE:
+ s_op.addr1_reg = op->addr.val << 16;
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return qcom_spi_program_execute(snandc, op);
+ case SPINAND_READ:
+ s_op.addr1_reg = (op->addr.val << 16);
+ s_op.addr2_reg = op->addr.val >> 16 & 0xff;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ return 0;
+ case SPINAND_ERASE:
+ s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
+ s_op.addr1_reg = op->addr.val;
+ snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
+ snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
+ snandc->qspi->cmd = cpu_to_le32(cmd);
+ qcom_spi_block_erase(snandc);
+ return 0;
+ default:
+ break;
+ }
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+
+ snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
+ snandc->regs->exec = cpu_to_le32(1);
+ snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
+ snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
+
+ qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
+
+ return ret;
+}
+
+static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
+{
+ int ret, val, opcode;
+ bool copy = false, copy_ftr = false;
+
+ ret = qcom_spi_send_cmdaddr(snandc, op);
+ if (ret)
+ return ret;
+
+ snandc->buf_count = 0;
+ snandc->buf_start = 0;
+ qcom_clear_read_regs(snandc);
+ qcom_clear_bam_transaction(snandc);
+ opcode = op->cmd.opcode;
+
+ switch (opcode) {
+ case SPINAND_READID:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ copy = true;
+ break;
+ case SPINAND_GET_FEATURE:
+ snandc->buf_count = 4;
+ qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ copy_ftr = true;
+ break;
+ case SPINAND_SET_FEATURE:
+ snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
+ qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
+ NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
+ break;
+ case SPINAND_PROGRAM_EXECUTE:
+ case SPINAND_WRITE_EN:
+ case SPINAND_RESET:
+ case SPINAND_ERASE:
+ case SPINAND_READ:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = qcom_submit_descs(snandc);
+ if (ret)
+ dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
+
+ if (copy) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
+ }
+
+ if (copy_ftr) {
+ qcom_nandc_dev_to_mem(snandc, true);
+ val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
+ val >>= 8;
+ memcpy(op->data.buf.in, &val, snandc->buf_count);
+ }
+
+ return ret;
+}
+
+static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
+{
+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (op->addr.buswidth == 4 && op->data.buswidth == 4)
+ return true;
+
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.buswidth == 4)
+ return true;
+ if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
+ return true;
+ }
+
+ return false;
+}
+
+static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
+ return false;
+
+ if (qcom_spi_is_page_op(op))
+ return true;
+
+ return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
+ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
+ (!op->data.nbytes || op->data.buswidth == 1));
+}
+
+static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
+
+ dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
+ op->addr.val, op->addr.buswidth, op->addr.nbytes,
+ op->data.buswidth, op->data.nbytes);
+
+ if (qcom_spi_is_page_op(op)) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ return qcom_spi_read_page(snandc, op);
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ return qcom_spi_write_page(snandc, op);
+ } else {
+ return qcom_spi_io_op(snandc, op);
+ }
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
+ .supports_op = qcom_spi_supports_op,
+ .exec_op = qcom_spi_exec_op,
+};
+
+static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
+ .ecc = true,
+};
+
+static int qcom_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct qcom_nand_controller *snandc;
+ struct qpic_spi_nand *qspi;
+ struct qpic_ecc *ecc;
+ struct resource *res;
+ const void *dev_data;
+ int ret;
+
+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+ if (!ecc)
+ return -ENOMEM;
+
+ qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
+ if (!qspi)
+ return -ENOMEM;
+
+ ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
+ if (!ctlr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctlr);
+
+ snandc = spi_controller_get_devdata(ctlr);
+ qspi->snandc = snandc;
+
+ snandc->dev = dev;
+ snandc->qspi = qspi;
+ snandc->qspi->ctlr = ctlr;
+ snandc->qspi->ecc = ecc;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ snandc->props = dev_data;
+ snandc->dev = &pdev->dev;
+
+ snandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(snandc->core_clk))
+ return PTR_ERR(snandc->core_clk);
+
+ snandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(snandc->aon_clk))
+ return PTR_ERR(snandc->aon_clk);
+
+ snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
+ if (IS_ERR(snandc->qspi->iomacro_clk))
+ return PTR_ERR(snandc->qspi->iomacro_clk);
+
+ snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(snandc->base))
+ return PTR_ERR(snandc->base);
+
+ snandc->base_phys = res->start;
+ snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, snandc->base_dma))
+ return -ENXIO;
+
+ ret = clk_prepare_enable(snandc->core_clk);
+ if (ret)
+ goto err_dis_core_clk;
+
+ ret = clk_prepare_enable(snandc->aon_clk);
+ if (ret)
+ goto err_dis_aon_clk;
+
+ ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
+ if (ret)
+ goto err_dis_iom_clk;
+
+ ret = qcom_nandc_alloc(snandc);
+ if (ret)
+ goto err_snand_alloc;
+
+ ret = qcom_spi_init(snandc);
+ if (ret)
+ goto err_spi_init;
+
+ /* setup ECC engine */
+ snandc->qspi->ecc_eng.dev = &pdev->dev;
+ snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
+ snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
+ snandc->qspi->ecc_eng.priv = snandc;
+
+ ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
+ goto err_spi_init;
+ }
+
+ ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
+ ctlr->mem_ops = &qcom_spi_mem_ops;
+ ctlr->mem_caps = &qcom_spi_mem_caps;
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+
+ ret = spi_register_controller(ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_controller failed.\n");
+ goto err_spi_init;
+ }
+
+ return 0;
+
+err_spi_init:
+ qcom_nandc_unalloc(snandc);
+err_snand_alloc:
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+err_dis_iom_clk:
+ clk_disable_unprepare(snandc->aon_clk);
+err_dis_aon_clk:
+ clk_disable_unprepare(snandc->core_clk);
+err_dis_core_clk:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ return ret;
+}
+
+static void qcom_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ spi_unregister_controller(ctlr);
+
+ qcom_nandc_unalloc(snandc);
+
+ clk_disable_unprepare(snandc->aon_clk);
+ clk_disable_unprepare(snandc->core_clk);
+ clk_disable_unprepare(snandc->qspi->iomacro_clk);
+
+ dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+}
+
+static const struct qcom_nandc_props ipq9574_snandc_props = {
+ .dev_cmd_reg_start = 0x7000,
+ .supports_bam = true,
+};
+
+static const struct of_device_id qcom_snandc_of_match[] = {
+ {
+ .compatible = "qcom,ipq9574-snand",
+ .data = &ipq9574_snandc_props,
+ },
+ {}
+}
+MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
+
+static struct platform_driver qcom_spi_driver = {
+ .driver = {
+ .name = "qcom_snand",
+ .of_match_table = qcom_snandc_of_match,
+ },
+ .probe = qcom_spi_probe,
+ .remove = qcom_spi_remove,
+};
+module_platform_driver(qcom_spi_driver);
+
+MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
+MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c
index cd0484041147..741cf2af3e91 100644
--- a/drivers/spi/spi-realtek-rtl-snand.c
+++ b/drivers/spi/spi-realtek-rtl-snand.c
@@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev)
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .cache_type = REGCACHE_NONE,
};
int irq, ret;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 389275dbc003..9c47f5741c5f 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data {
* struct s3c64xx_spi_port_config - SPI Controller hardware info
* @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead.
* @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead.
- * @fifo_depth: depth of the FIFO.
+ * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances
+ * of the IP define the same FIFO depth. It has higher precedence
+ * than the FIFO depth specified via DT.
* @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's
* length and position.
* @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's
diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c
new file mode 100644
index 000000000000..a59aa3fc55d2
--- /dev/null
+++ b/drivers/spi/spi-sg2044-nor.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SG2044 SPI NOR controller driver
+ *
+ * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi-mem.h>
+
+/* Hardware register definitions */
+#define SPIFMC_CTRL 0x00
+#define SPIFMC_CTRL_CPHA BIT(12)
+#define SPIFMC_CTRL_CPOL BIT(13)
+#define SPIFMC_CTRL_HOLD_OL BIT(14)
+#define SPIFMC_CTRL_WP_OL BIT(15)
+#define SPIFMC_CTRL_LSBF BIT(20)
+#define SPIFMC_CTRL_SRST BIT(21)
+#define SPIFMC_CTRL_SCK_DIV_SHIFT 0
+#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16
+#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF
+
+#define SPIFMC_CE_CTRL 0x04
+#define SPIFMC_CE_CTRL_CEMANUAL BIT(0)
+#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1)
+
+#define SPIFMC_DLY_CTRL 0x08
+#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f
+#define SPIFMC_CTRL_FM_INTVL BIT(0)
+#define SPIFMC_CTRL_CET_MASK 0x0f00
+#define SPIFMC_CTRL_CET BIT(8)
+
+#define SPIFMC_DMMR 0x0c
+
+#define SPIFMC_TRAN_CSR 0x10
+#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0)
+#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1)
+#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4)
+#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4)
+#define SPIFMC_TRAN_CSR_DMA_EN BIT(6)
+#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8)
+#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8
+#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12)
+#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12)
+#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15)
+#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20
+#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21
+
+#define SPIFMC_TRAN_NUM 0x14
+#define SPIFMC_FIFO_PORT 0x18
+#define SPIFMC_FIFO_PT 0x20
+
+#define SPIFMC_INT_STS 0x28
+#define SPIFMC_INT_TRAN_DONE BIT(0)
+#define SPIFMC_INT_RD_FIFO BIT(2)
+#define SPIFMC_INT_WR_FIFO BIT(3)
+#define SPIFMC_INT_RX_FRAME BIT(4)
+#define SPIFMC_INT_TX_FRAME BIT(5)
+
+#define SPIFMC_INT_EN 0x2c
+#define SPIFMC_INT_TRAN_DONE_EN BIT(0)
+#define SPIFMC_INT_RD_FIFO_EN BIT(2)
+#define SPIFMC_INT_WR_FIFO_EN BIT(3)
+#define SPIFMC_INT_RX_FRAME_EN BIT(4)
+#define SPIFMC_INT_TX_FRAME_EN BIT(5)
+
+#define SPIFMC_OPT 0x030
+#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1)
+
+#define SPIFMC_MAX_FIFO_DEPTH 8
+
+#define SPIFMC_MAX_READ_SIZE 0x10000
+
+struct sg2044_spifmc {
+ struct spi_controller *ctrl;
+ void __iomem *io_base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type)
+{
+ u32 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat,
+ (stat & int_type), 0, 1000000);
+}
+
+static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc,
+ int xfer_size)
+{
+ u8 stat;
+
+ return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat,
+ ((stat & 0xf) == xfer_size), 1, 1000000);
+}
+
+static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc)
+{
+ u32 reg;
+
+ reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK |
+ SPIFMC_TRAN_CSR_FAST_MODE |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT |
+ SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT |
+ SPIFMC_TRAN_CSR_DMA_EN |
+ SPIFMC_TRAN_CSR_ADDR_BYTES_MASK |
+ SPIFMC_TRAN_CSR_WITH_CMD |
+ SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK);
+
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ return reg;
+}
+
+static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op, loff_t from,
+ size_t len, u_char *buf)
+{
+ int xfer_size, offset;
+ u32 reg;
+ int ret;
+ int i;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO);
+ if (ret < 0)
+ return ret;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return len;
+}
+
+static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ size_t offset;
+ loff_t from = op->addr.val;
+ size_t len = op->data.nbytes;
+ int ret;
+ u8 *din = op->data.buf.in;
+
+ offset = 0;
+ while (offset < len) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset);
+
+ ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din);
+ if (ret < 0)
+ return ret;
+
+ offset += xfer_size;
+ din += xfer_size;
+ from += xfer_size;
+ }
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ size_t xfer_size;
+ const u8 *dout = op->data.buf.out;
+ int i, offset;
+ int ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ offset = 0;
+ while (offset < op->data.nbytes) {
+ xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset);
+
+ ret = sg2044_spifmc_wait_xfer_size(spifmc, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < xfer_size; i++)
+ writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ offset += xfer_size;
+ }
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ int i, ret;
+ u32 reg;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT;
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = op->addr.nbytes - 1; i >= 0; i--)
+ writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < op->dummy.nbytes; i++)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ sg2044_spifmc_read(spifmc, op);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ sg2044_spifmc_write(spifmc, op);
+ else
+ sg2044_spifmc_tran_cmd(spifmc, op);
+}
+
+static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc,
+ const struct spi_mem_op *op)
+{
+ const u8 *dout = NULL;
+ u8 *din = NULL;
+ size_t len = op->data.nbytes;
+ int ret, i;
+ u32 reg;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ din = op->data.buf.in;
+ else
+ dout = op->data.buf.out;
+
+ reg = sg2044_spifmc_init_reg(spifmc);
+ reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE;
+ reg |= SPIFMC_TRAN_CSR_WITH_CMD;
+
+ if (din) {
+ reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+
+ writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT);
+ } else {
+ /*
+ * If write values to the Status Register,
+ * configure TRAN_CSR register as the same as
+ * sg2044_spifmc_read_reg.
+ */
+ if (op->cmd.opcode == 0x01) {
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX;
+ reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX;
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ }
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+ writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT);
+
+ for (i = 0; i < len; i++) {
+ if (din)
+ writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT);
+ else
+ writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_INT_STS);
+ writel(len, spifmc->io_base + SPIFMC_TRAN_NUM);
+ reg |= SPIFMC_TRAN_CSR_GO_BUSY;
+ writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR);
+
+ ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE);
+ if (ret < 0)
+ return ret;
+
+ if (din) {
+ while (len--)
+ *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT);
+ }
+
+ writel(0, spifmc->io_base + SPIFMC_FIFO_PT);
+
+ return 0;
+}
+
+static int sg2044_spifmc_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct sg2044_spifmc *spifmc;
+
+ spifmc = spi_controller_get_devdata(mem->spi->controller);
+
+ mutex_lock(&spifmc->lock);
+
+ if (op->addr.nbytes == 0)
+ sg2044_spifmc_trans_reg(spifmc, op);
+ else
+ sg2044_spifmc_trans(spifmc, op);
+
+ mutex_unlock(&spifmc->lock);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = {
+ .exec_op = sg2044_spifmc_exec_op,
+};
+
+static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc)
+{
+ u32 tran_csr;
+ u32 reg;
+
+ writel(0, spifmc->io_base + SPIFMC_DMMR);
+
+ reg = readl(spifmc->io_base + SPIFMC_CTRL);
+ reg |= SPIFMC_CTRL_SRST;
+ reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK);
+ reg |= 1;
+ writel(reg, spifmc->io_base + SPIFMC_CTRL);
+
+ writel(0, spifmc->io_base + SPIFMC_CE_CTRL);
+
+ tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR);
+ tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT);
+ tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE;
+ tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD;
+ writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR);
+}
+
+static int sg2044_spifmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct sg2044_spifmc *spifmc;
+ int ret;
+
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc));
+ if (!ctrl)
+ return -ENOMEM;
+
+ spifmc = spi_controller_get_devdata(ctrl);
+
+ spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(spifmc->clk))
+ return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n");
+
+ spifmc->dev = &pdev->dev;
+ spifmc->ctrl = ctrl;
+
+ spifmc->io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(spifmc->io_base))
+ return PTR_ERR(spifmc->io_base);
+
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctrl->auto_runtime_pm = false;
+ ctrl->mem_ops = &sg2044_spifmc_mem_ops;
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD;
+
+ ret = devm_mutex_init(dev, &spifmc->lock);
+ if (ret)
+ return ret;
+
+ sg2044_spifmc_init(spifmc);
+ sg2044_spifmc_init_reg(spifmc);
+
+ ret = devm_spi_register_controller(&pdev->dev, ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "spi_register_controller failed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sg2044_spifmc_match[] = {
+ { .compatible = "sophgo,sg2044-spifmc-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sg2044_spifmc_match);
+
+static struct platform_driver sg2044_nor_driver = {
+ .driver = {
+ .name = "sg2044,spifmc-nor",
+ .of_match_table = sg2044_spifmc_match,
+ },
+ .probe = sg2044_spifmc_probe,
+};
+module_platform_driver(sg2044_nor_driver);
+
+MODULE_DESCRIPTION("SG2044 SPI NOR controller driver");
+MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
new file mode 100644
index 000000000000..668022098b1e
--- /dev/null
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/types.h>
+
+#define OSPI_CR 0x00
+#define CR_EN BIT(0)
+#define CR_ABORT BIT(1)
+#define CR_DMAEN BIT(2)
+#define CR_FTHRES_SHIFT 8
+#define CR_TEIE BIT(16)
+#define CR_TCIE BIT(17)
+#define CR_SMIE BIT(19)
+#define CR_APMS BIT(22)
+#define CR_CSSEL BIT(24)
+#define CR_FMODE_MASK GENMASK(29, 28)
+#define CR_FMODE_INDW (0U)
+#define CR_FMODE_INDR (1U)
+#define CR_FMODE_APM (2U)
+#define CR_FMODE_MM (3U)
+
+#define OSPI_DCR1 0x08
+#define DCR1_DLYBYP BIT(3)
+#define DCR1_DEVSIZE_MASK GENMASK(20, 16)
+#define DCR1_MTYP_MASK GENMASK(26, 24)
+#define DCR1_MTYP_MX_MODE 1
+#define DCR1_MTYP_HP_MEMMODE 4
+
+#define OSPI_DCR2 0x0c
+#define DCR2_PRESC_MASK GENMASK(7, 0)
+
+#define OSPI_SR 0x20
+#define SR_TEF BIT(0)
+#define SR_TCF BIT(1)
+#define SR_FTF BIT(2)
+#define SR_SMF BIT(3)
+#define SR_BUSY BIT(5)
+
+#define OSPI_FCR 0x24
+#define FCR_CTEF BIT(0)
+#define FCR_CTCF BIT(1)
+#define FCR_CSMF BIT(3)
+
+#define OSPI_DLR 0x40
+#define OSPI_AR 0x48
+#define OSPI_DR 0x50
+#define OSPI_PSMKR 0x80
+#define OSPI_PSMAR 0x88
+
+#define OSPI_CCR 0x100
+#define CCR_IMODE_MASK GENMASK(2, 0)
+#define CCR_IDTR BIT(3)
+#define CCR_ISIZE_MASK GENMASK(5, 4)
+#define CCR_ADMODE_MASK GENMASK(10, 8)
+#define CCR_ADMODE_8LINES 4
+#define CCR_ADDTR BIT(11)
+#define CCR_ADSIZE_MASK GENMASK(13, 12)
+#define CCR_ADSIZE_32BITS 3
+#define CCR_DMODE_MASK GENMASK(26, 24)
+#define CCR_DMODE_8LINES 4
+#define CCR_DQSE BIT(29)
+#define CCR_DDTR BIT(27)
+#define CCR_BUSWIDTH_0 0x0
+#define CCR_BUSWIDTH_1 0x1
+#define CCR_BUSWIDTH_2 0x2
+#define CCR_BUSWIDTH_4 0x3
+#define CCR_BUSWIDTH_8 0x4
+
+#define OSPI_TCR 0x108
+#define TCR_DCYC_MASK GENMASK(4, 0)
+#define TCR_DHQC BIT(28)
+#define TCR_SSHIFT BIT(30)
+
+#define OSPI_IR 0x110
+
+#define STM32_OSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_OSPI_MAX_NORCHIP 2
+
+#define STM32_FIFO_TIMEOUT_US 30000
+#define STM32_ABT_TIMEOUT_US 100000
+#define STM32_COMP_TIMEOUT_MS 5000
+#define STM32_BUSY_TIMEOUT_US 100000
+
+
+#define STM32_AUTOSUSPEND_DELAY -1
+
+struct stm32_ospi {
+ struct device *dev;
+ struct spi_controller *ctrl;
+ struct clk *clk;
+ struct reset_control *rstc;
+
+ struct completion data_completion;
+ struct completion match_completion;
+
+ struct dma_chan *dma_chtx;
+ struct dma_chan *dma_chrx;
+ struct completion dma_completion;
+
+ void __iomem *regs_base;
+ void __iomem *mm_base;
+ phys_addr_t regs_phys_base;
+ resource_size_t mm_size;
+ u32 clk_rate;
+ u32 fmode;
+ u32 cr_reg;
+ u32 dcr_reg;
+ u32 flash_presc[STM32_OSPI_MAX_NORCHIP];
+ int irq;
+ unsigned long status_timeout;
+
+ /*
+ * To protect device configuration, could be different between
+ * 2 flash access
+ */
+ struct mutex lock;
+};
+
+static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb_relaxed(addr);
+}
+
+static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb_relaxed(*val, addr);
+}
+
+static int stm32_ospi_abort(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+ int timeout;
+
+ cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ /* wait clear of abort bit by hw */
+ timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR,
+ cr, !(cr & CR_ABORT), 1,
+ STM32_ABT_TIMEOUT_US);
+
+ if (timeout)
+ dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout);
+
+ return timeout;
+}
+
+static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ void (*fifo)(u8 *val, void __iomem *addr);
+ u32 sr;
+ int ret;
+
+ if (read)
+ fifo = stm32_ospi_read_fifo;
+ else
+ fifo = stm32_ospi_write_fifo;
+
+ while (len--) {
+ ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR,
+ sr, sr & SR_FTF, 1,
+ STM32_FIFO_TIMEOUT_US);
+ if (ret) {
+ dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n",
+ len, sr);
+ return ret;
+ }
+ fifo(buf++, regs_base + OSPI_DR);
+ }
+
+ return 0;
+}
+
+static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi)
+{
+ u32 sr;
+
+ return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR,
+ sr, !(sr & SR_BUSY), 1,
+ STM32_BUSY_TIMEOUT_US);
+}
+
+static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+ int err = 0;
+
+ if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) ||
+ ospi->fmode == CR_FMODE_APM)
+ goto out;
+
+ reinit_completion(&ospi->data_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->data_completion,
+ msecs_to_jiffies(STM32_COMP_TIMEOUT_MS)))
+ err = -ETIMEDOUT;
+
+ sr = readl_relaxed(regs_base + OSPI_SR);
+ if (sr & SR_TCF)
+ /* avoid false timeout */
+ err = 0;
+ if (sr & SR_TEF)
+ err = -EIO;
+
+out:
+ /* clear flags */
+ writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR);
+
+ if (!err)
+ err = stm32_ospi_wait_nobusy(ospi);
+
+ return err;
+}
+
+static void stm32_ospi_dma_callback(void *arg)
+{
+ struct completion *dma_completion = arg;
+
+ complete(dma_completion);
+}
+
+static irqreturn_t stm32_ospi_irq(int irq, void *dev_id)
+{
+ struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id;
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr, sr;
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ sr = readl_relaxed(regs_base + OSPI_SR);
+
+ if (cr & CR_SMIE && sr & SR_SMF) {
+ /* disable irq */
+ cr &= ~CR_SMIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->match_completion);
+
+ return IRQ_HANDLED;
+ }
+
+ if (sr & (SR_TEF | SR_TCF)) {
+ /* disable irq */
+ cr &= ~CR_TCIE & ~CR_TEIE;
+ writel_relaxed(cr, regs_base + OSPI_CR);
+ complete(&ospi->data_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void stm32_ospi_dma_setup(struct stm32_ospi *ospi,
+ struct dma_slave_config *dma_cfg)
+{
+ if (dma_cfg && ospi->dma_chrx) {
+ if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) {
+ dev_err(ospi->dev, "dma rx config failed\n");
+ dma_release_channel(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ }
+ }
+
+ if (dma_cfg && ospi->dma_chtx) {
+ if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) {
+ dev_err(ospi->dev, "dma tx config failed\n");
+ dma_release_channel(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ }
+ }
+
+ init_completion(&ospi->dma_completion);
+}
+
+static int stm32_ospi_tx_mm(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val,
+ op->data.nbytes);
+ return 0;
+}
+
+static int stm32_ospi_tx_dma(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ struct dma_async_tx_descriptor *desc;
+ void __iomem *regs_base = ospi->regs_base;
+ enum dma_transfer_direction dma_dir;
+ struct dma_chan *dma_ch;
+ struct sg_table sgt;
+ dma_cookie_t cookie;
+ u32 cr, t_out;
+ int err;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ dma_dir = DMA_DEV_TO_MEM;
+ dma_ch = ospi->dma_chrx;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ dma_ch = ospi->dma_chtx;
+ }
+
+ /*
+ * Spi_map_buf return -EINVAL if the buffer is not DMA-able
+ * (DMA-able: in vmalloc | kmap | virt_addr_valid)
+ */
+ err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt);
+ if (err)
+ return err;
+
+ desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents,
+ dma_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ cr = readl_relaxed(regs_base + OSPI_CR);
+
+ reinit_completion(&ospi->dma_completion);
+ desc->callback = stm32_ospi_dma_callback;
+ desc->callback_param = &ospi->dma_completion;
+ cookie = dmaengine_submit(desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ goto out;
+
+ dma_async_issue_pending(dma_ch);
+
+ writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR);
+
+ t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
+ if (!wait_for_completion_timeout(&ospi->dma_completion,
+ msecs_to_jiffies(t_out)))
+ err = -ETIMEDOUT;
+
+ if (err)
+ dmaengine_terminate_all(dma_ch);
+
+out:
+ writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR);
+out_unmap:
+ spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt);
+
+ return err;
+}
+
+static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op)
+{
+ u8 *buf;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if (ospi->fmode == CR_FMODE_MM)
+ return stm32_ospi_tx_mm(ospi, op);
+ else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) ||
+ (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) &&
+ op->data.nbytes > 8)
+ if (!stm32_ospi_tx_dma(ospi, op))
+ return 0;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ buf = op->data.buf.in;
+ else
+ buf = (u8 *)op->data.buf.out;
+
+ return stm32_ospi_poll(ospi, buf, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN);
+}
+
+static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi,
+ const struct spi_mem_op *op)
+{
+ void __iomem *regs_base = ospi->regs_base;
+ u32 cr;
+
+ reinit_completion(&ospi->match_completion);
+ cr = readl_relaxed(regs_base + OSPI_CR);
+ writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR);
+
+ if (!wait_for_completion_timeout(&ospi->match_completion,
+ msecs_to_jiffies(ospi->status_timeout))) {
+ u32 sr = readl_relaxed(regs_base + OSPI_SR);
+
+ /* Avoid false timeout */
+ if (!(sr & SR_SMF))
+ return -ETIMEDOUT;
+ }
+
+ writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR);
+
+ return 0;
+}
+
+static int stm32_ospi_get_mode(u8 buswidth)
+{
+ switch (buswidth) {
+ case 8:
+ return CCR_BUSWIDTH_8;
+ case 4:
+ return CCR_BUSWIDTH_4;
+ default:
+ return buswidth;
+ }
+}
+
+static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ u32 ccr, cr, dcr2, tcr;
+ int timeout, err = 0, err_poll_status = 0;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ cr = readl_relaxed(ospi->regs_base + OSPI_CR);
+ cr &= ~CR_CSSEL;
+ cr |= FIELD_PREP(CR_CSSEL, cs);
+ cr &= ~CR_FMODE_MASK;
+ cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode);
+ writel_relaxed(cr, regs_base + OSPI_CR);
+
+ if (op->data.nbytes)
+ writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR);
+
+ /* set prescaler */
+ dcr2 = readl_relaxed(regs_base + OSPI_DCR2);
+ dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]);
+ writel_relaxed(dcr2, regs_base + OSPI_DCR2);
+
+ ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth));
+
+ if (op->addr.nbytes) {
+ ccr |= FIELD_PREP(CCR_ADMODE_MASK,
+ stm32_ospi_get_mode(op->addr.buswidth));
+ ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
+ }
+
+ tcr = TCR_SSHIFT;
+ if (op->dummy.buswidth && op->dummy.nbytes) {
+ tcr |= FIELD_PREP(TCR_DCYC_MASK,
+ op->dummy.nbytes * 8 / op->dummy.buswidth);
+ }
+ writel_relaxed(tcr, regs_base + OSPI_TCR);
+
+ if (op->data.nbytes) {
+ ccr |= FIELD_PREP(CCR_DMODE_MASK,
+ stm32_ospi_get_mode(op->data.buswidth));
+ }
+
+ writel_relaxed(ccr, regs_base + OSPI_CCR);
+
+ /* set instruction, must be set after ccr register update */
+ writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR);
+
+ if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM)
+ writel_relaxed(op->addr.val, regs_base + OSPI_AR);
+
+ if (ospi->fmode == CR_FMODE_APM)
+ err_poll_status = stm32_ospi_wait_poll_status(ospi, op);
+
+ err = stm32_ospi_xfer(ospi, op);
+
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (err || err_poll_status || ospi->fmode == CR_FMODE_MM)
+ goto abort;
+
+ /* Wait end of tx in indirect mode */
+ err = stm32_ospi_wait_cmd(ospi);
+ if (err)
+ goto abort;
+
+ return 0;
+
+abort:
+ timeout = stm32_ospi_abort(ospi);
+ writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR);
+
+ if (err || err_poll_status || timeout)
+ dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n",
+ __func__, err, err_poll_status, timeout);
+
+ return err;
+}
+
+static int stm32_ospi_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ writel_relaxed(mask, regs_base + OSPI_PSMKR);
+ writel_relaxed(match, regs_base + OSPI_PSMAR);
+ ospi->fmode = CR_FMODE_APM;
+ ospi->status_timeout = timeout_ms;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ ospi->fmode = CR_FMODE_INDR;
+ else
+ ospi->fmode = CR_FMODE_INDW;
+
+ ret = stm32_ospi_send(mem->spi, op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+
+ if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT)
+ return -EOPNOTSUPP;
+
+ /* Should never happen, as mm_base == null is an error probe exit condition */
+ if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN)
+ return -EOPNOTSUPP;
+
+ if (!ospi->mm_size)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller);
+ struct spi_mem_op op;
+ u32 addr_max;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+ /*
+ * Make a local copy of desc op_tmpl and complete dirmap rdesc
+ * spi_mem_op template with offs, len and *buf in order to get
+ * all needed transfer information into struct spi_mem_op
+ */
+ memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op));
+ dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf);
+
+ op.data.nbytes = len;
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+
+ addr_max = op.addr.val + op.data.nbytes + 1;
+ if (addr_max < ospi->mm_size && op.addr.buswidth)
+ ospi->fmode = CR_FMODE_MM;
+ else
+ ospi->fmode = CR_FMODE_INDR;
+
+ ret = stm32_ospi_send(desc->mem->spi, &op);
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret ?: len;
+}
+
+static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl,
+ struct spi_message *msg)
+{
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ struct spi_transfer *transfer;
+ struct spi_device *spi = msg->spi;
+ struct spi_mem_op op;
+ struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1];
+ int ret = 0;
+
+ if (!cs_gpiod)
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&ospi->lock);
+
+ gpiod_set_value_cansleep(cs_gpiod, true);
+
+ list_for_each_entry(transfer, &msg->transfers, transfer_list) {
+ u8 dummy_bytes = 0;
+
+ memset(&op, 0, sizeof(op));
+
+ dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n",
+ transfer->tx_buf, transfer->tx_nbits,
+ transfer->rx_buf, transfer->rx_nbits,
+ transfer->len, transfer->dummy_data);
+
+ /*
+ * OSPI hardware supports dummy bytes transfer.
+ * If current transfer is dummy byte, merge it with the next
+ * transfer in order to take into account OSPI block constraint
+ */
+ if (transfer->dummy_data) {
+ op.dummy.buswidth = transfer->tx_nbits;
+ op.dummy.nbytes = transfer->len;
+ dummy_bytes = transfer->len;
+
+ /* If happens, means that message is not correctly built */
+ if (list_is_last(&transfer->transfer_list, &msg->transfers)) {
+ ret = -EINVAL;
+ goto end_of_transfer;
+ }
+
+ transfer = list_next_entry(transfer, transfer_list);
+ }
+
+ op.data.nbytes = transfer->len;
+
+ if (transfer->rx_buf) {
+ ospi->fmode = CR_FMODE_INDR;
+ op.data.buswidth = transfer->rx_nbits;
+ op.data.dir = SPI_MEM_DATA_IN;
+ op.data.buf.in = transfer->rx_buf;
+ } else {
+ ospi->fmode = CR_FMODE_INDW;
+ op.data.buswidth = transfer->tx_nbits;
+ op.data.dir = SPI_MEM_DATA_OUT;
+ op.data.buf.out = transfer->tx_buf;
+ }
+
+ ret = stm32_ospi_send(spi, &op);
+ if (ret)
+ goto end_of_transfer;
+
+ msg->actual_length += transfer->len + dummy_bytes;
+ }
+
+end_of_transfer:
+ gpiod_set_value_cansleep(cs_gpiod, false);
+
+ mutex_unlock(&ospi->lock);
+
+ msg->status = ret;
+ spi_finalize_current_message(ctrl);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return ret;
+}
+
+static int stm32_ospi_setup(struct spi_device *spi)
+{
+ struct spi_controller *ctrl = spi->controller;
+ struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+ u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1];
+
+ if (ctrl->busy)
+ return -EBUSY;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1;
+
+ mutex_lock(&ospi->lock);
+
+ ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN;
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+
+ /* set dcr fsize to max address */
+ ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP;
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+
+ mutex_unlock(&ospi->lock);
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+/*
+ * No special host constraint, so use default spi_mem_default_supports_op
+ * to check supported mode.
+ */
+static const struct spi_controller_mem_ops stm32_ospi_mem_ops = {
+ .exec_op = stm32_ospi_exec_op,
+ .dirmap_create = stm32_ospi_dirmap_create,
+ .dirmap_read = stm32_ospi_dirmap_read,
+ .poll_status = stm32_ospi_poll_status,
+};
+
+static int stm32_ospi_get_resources(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ struct resource *res;
+ struct reserved_mem *rmem = NULL;
+ struct device_node *node;
+ int ret;
+
+ ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ospi->regs_base))
+ return PTR_ERR(ospi->regs_base);
+
+ ospi->regs_phys_base = res->start;
+
+ ospi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ospi->clk))
+ return dev_err_probe(dev, PTR_ERR(ospi->clk),
+ "Can't get clock\n");
+
+ ospi->clk_rate = clk_get_rate(ospi->clk);
+ if (!ospi->clk_rate) {
+ dev_err(dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ ospi->irq = platform_get_irq(pdev, 0);
+ if (ospi->irq < 0)
+ return ospi->irq;
+
+ ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0,
+ dev_name(dev), ospi);
+ if (ret) {
+ dev_err(dev, "Failed to request irq\n");
+ return ret;
+ }
+
+ ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev);
+ if (IS_ERR(ospi->rstc))
+ return dev_err_probe(dev, PTR_ERR(ospi->rstc),
+ "Can't get reset\n");
+
+ ospi->dma_chrx = dma_request_chan(dev, "rx");
+ if (IS_ERR(ospi->dma_chrx)) {
+ ret = PTR_ERR(ospi->dma_chrx);
+ ospi->dma_chrx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ ospi->dma_chtx = dma_request_chan(dev, "tx");
+ if (IS_ERR(ospi->dma_chtx)) {
+ ret = PTR_ERR(ospi->dma_chtx);
+ ospi->dma_chtx = NULL;
+ if (ret == -EPROBE_DEFER)
+ goto err_dma;
+ }
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (rmem) {
+ ospi->mm_size = rmem->size;
+ ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size);
+ if (!ospi->mm_base) {
+ dev_err(dev, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto err_dma;
+ }
+
+ if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) {
+ dev_err(dev, "Memory map size outsize bounds\n");
+ ret = -EINVAL;
+ goto err_dma;
+ }
+ } else {
+ dev_info(dev, "No memory-map region found\n");
+ }
+
+ init_completion(&ospi->data_completion);
+ init_completion(&ospi->match_completion);
+
+ return 0;
+
+err_dma:
+ dev_info(dev, "Can't get all resources (%d)\n", ret);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ return ret;
+};
+
+static int stm32_ospi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctrl;
+ struct stm32_ospi *ospi;
+ struct dma_slave_config dma_cfg;
+ struct device_node *child;
+ int ret;
+ u8 spi_flash_count = 0;
+
+ /*
+ * Flash subnodes sanity check:
+ * 1 or 2 spi-nand/spi-nor flashes => supported
+ * All other flash node configuration => not supported
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ if (of_device_is_compatible(child, "jedec,spi-nor") ||
+ of_device_is_compatible(child, "spi-nand"))
+ spi_flash_count++;
+ }
+
+ if (spi_flash_count == 0 || spi_flash_count > 2) {
+ dev_err(dev, "Incorrect DT flash node\n");
+ return -ENODEV;
+ }
+
+ ctrl = devm_spi_alloc_host(dev, sizeof(*ospi));
+ if (!ctrl)
+ return -ENOMEM;
+
+ ospi = spi_controller_get_devdata(ctrl);
+ ospi->ctrl = ctrl;
+
+ ospi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ospi);
+
+ ret = stm32_ospi_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR;
+ dma_cfg.src_maxburst = 4;
+ dma_cfg.dst_maxburst = 4;
+ stm32_ospi_dma_setup(ospi, &dma_cfg);
+
+ mutex_init(&ospi->lock);
+
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_TX_OCTAL | SPI_RX_OCTAL;
+ ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctrl->setup = stm32_ospi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &stm32_ospi_mem_ops;
+ ctrl->use_gpio_descriptors = true;
+ ctrl->transfer_one_message = stm32_ospi_transfer_one_message;
+ ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP;
+ ctrl->dev.of_node = dev->of_node;
+
+ pm_runtime_enable(ospi->dev);
+ pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(ospi->dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ goto err_pm_enable;
+
+ if (ospi->rstc) {
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+ }
+
+ ret = spi_register_controller(ctrl);
+ if (ret) {
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ goto err_pm_resume;
+ }
+
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+
+err_pm_resume:
+ pm_runtime_put_sync_suspend(ospi->dev);
+
+err_pm_enable:
+ pm_runtime_force_suspend(ospi->dev);
+ mutex_destroy(&ospi->lock);
+
+ return ret;
+}
+
+static void stm32_ospi_remove(struct platform_device *pdev)
+{
+ struct stm32_ospi *ospi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return;
+
+ spi_unregister_controller(ospi->ctrl);
+ /* Disable ospi */
+ writel_relaxed(0, ospi->regs_base + OSPI_CR);
+ mutex_destroy(&ospi->lock);
+
+ if (ospi->dma_chtx)
+ dma_release_channel(ospi->dma_chtx);
+ if (ospi->dma_chrx)
+ dma_release_channel(ospi->dma_chrx);
+
+ pm_runtime_put_sync_suspend(ospi->dev);
+ pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return pm_runtime_force_suspend(ospi->dev);
+}
+
+static int __maybe_unused stm32_ospi_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+ void __iomem *regs_base = ospi->regs_base;
+ int ret;
+
+ ret = pm_runtime_force_resume(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ pinctrl_pm_select_default_state(dev);
+
+ ret = pm_runtime_resume_and_get(ospi->dev);
+ if (ret < 0)
+ return ret;
+
+ writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
+ writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
+ pm_runtime_mark_last_busy(ospi->dev);
+ pm_runtime_put_autosuspend(ospi->dev);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(ospi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev)
+{
+ struct stm32_ospi *ospi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ospi->clk);
+}
+
+static const struct dev_pm_ops stm32_ospi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume)
+ SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend,
+ stm32_ospi_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm32_ospi_of_match[] = {
+ { .compatible = "st,stm32mp25-ospi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_ospi_of_match);
+
+static struct platform_driver stm32_ospi_driver = {
+ .probe = stm32_ospi_probe,
+ .remove = stm32_ospi_remove,
+ .driver = {
+ .name = "stm32-ospi",
+ .pm = &stm32_ospi_pm_ops,
+ .of_match_table = stm32_ospi_of_match,
+ },
+};
+module_platform_driver(stm32_ospi_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 540b6948b24d..9691197bbf5a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op)
u32 ccr, cr;
int timeout, err = 0, err_poll_status = 0;
- dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth,
- op->addr.val, op->data.nbytes);
-
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 2bd25c75f881..5232483c4a3a 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -540,10 +540,6 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
int err = 0, i;
u8 *tmpbuf;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi, op);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index d800d79f62a7..595b6dc10845 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -82,7 +82,6 @@
#define GQSPI_GENFIFO_RX 0x00020000
#define GQSPI_GENFIFO_STRIPE 0x00040000
#define GQSPI_GENFIFO_POLL 0x00080000
-#define GQSPI_GENFIFO_EXP_START 0x00000100
#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
@@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi,
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val);
}
+
+ dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz);
return 0;
}
@@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
u32 genfifoentry)
{
- u32 transfer_len = 0;
+ u32 transfer_len, tempcount, exponent;
+ u8 imm_data;
- if (xqspi->txbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_RX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
- genfifoentry |= GQSPI_GENFIFO_TX;
- transfer_len = xqspi->bytes_to_transfer;
- } else if (xqspi->rxbuf) {
- genfifoentry &= ~GQSPI_GENFIFO_TX;
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
+ if (xqspi->rxbuf) {
genfifoentry |= GQSPI_GENFIFO_RX;
if (xqspi->mode == GQSPI_MODE_DMA)
transfer_len = xqspi->dma_rx_bytes;
else
transfer_len = xqspi->bytes_to_receive;
} else {
- /* Sending dummy circles here */
- genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
- genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
transfer_len = xqspi->bytes_to_transfer;
}
+
+ if (xqspi->txbuf)
+ genfifoentry |= GQSPI_GENFIFO_TX;
+
genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
xqspi->genfifoentry = genfifoentry;
-
- if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= transfer_len;
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- } else {
- int tempcount = transfer_len;
- u32 exponent = 8; /* 2^8 = 256 */
- u8 imm_data = tempcount & 0xFF;
-
- tempcount &= ~(tempcount & 0xFF);
- /* Immediate entry */
- if (tempcount != 0) {
- /* Exponent entries */
- genfifoentry |= GQSPI_GENFIFO_EXP;
- while (tempcount != 0) {
- if (tempcount & GQSPI_GENFIFO_EXP_START) {
- genfifoentry &=
- ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= exponent;
- zynqmp_gqspi_write(xqspi,
- GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- tempcount = tempcount >> 1;
- exponent++;
- }
- }
- if (imm_data != 0) {
- genfifoentry &= ~GQSPI_GENFIFO_EXP;
- genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
- genfifoentry |= (u8)(imm_data & 0xFF);
+ dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n",
+ genfifoentry, transfer_len);
+
+ /* Exponent entries */
+ imm_data = transfer_len;
+ tempcount = transfer_len >> 8;
+ exponent = 8;
+ genfifoentry |= GQSPI_GENFIFO_EXP;
+ while (tempcount) {
+ if (tempcount & 1)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
- genfifoentry);
- }
- }
- if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) {
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
+ genfifoentry | exponent);
+ tempcount >>= 1;
+ exponent++;
}
+
+ /* Immediate entry */
+ genfifoentry &= ~GQSPI_GENFIFO_EXP;
+ if (imm_data)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST,
+ genfifoentry | imm_data);
+
+ /* Dummy generic FIFO entry */
+ if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf)
+ zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0);
+}
+
+/**
+ * zynqmp_qspi_disable_dma() - Disable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_IO;
+}
+
+/**
+ * zynqmp_qspi_enable_dma() - Enable DMA mode
+ * @xqspi: GQSPI instance
+ */
+static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi)
+{
+ u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
+
+ config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
+ config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
+ zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ xqspi->mode = GQSPI_MODE_DMA;
}
/**
@@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
*/
static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
{
- u32 config_reg, genfifoentry;
+ u32 genfifoentry;
dma_unmap_single(xqspi->dev, xqspi->dma_addr,
xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
@@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
if (xqspi->bytes_to_receive > 0) {
/* Switch to IO mode,for remaining bytes to receive */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
+ zynqmp_qspi_disable_dma(xqspi);
/* Initiate the transfer of remaining bytes */
genfifoentry = xqspi->genfifoentry;
@@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
{
struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id;
- irqreturn_t ret = IRQ_NONE;
u32 status, mask, dma_status = 0;
status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
@@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
dma_status);
}
- if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
+ if (!mask && !dma_status)
+ return IRQ_NONE;
+
+ if (mask & GQSPI_ISR_TXNOT_FULL_MASK)
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
- if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
+ if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK)
zynqmp_process_dma_irq(xqspi);
- ret = IRQ_HANDLED;
- } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
- (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
+ else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
+ (mask & GQSPI_IER_GENFIFOEMPTY_MASK))
zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
- ret = IRQ_HANDLED;
- }
if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 &&
((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
complete(&xqspi->data_completion);
- ret = IRQ_HANDLED;
}
- return ret;
+ return IRQ_HANDLED;
}
/**
@@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
*/
static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
{
- u32 rx_bytes, rx_rem, config_reg;
+ u32 rx_bytes, rx_rem;
dma_addr_t addr;
u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
if (xqspi->bytes_to_receive < 8 ||
((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
/* Setting to IO mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
- xqspi->mode = GQSPI_MODE_IO;
+ zynqmp_qspi_disable_dma(xqspi);
xqspi->dma_rx_bytes = 0;
return 0;
}
@@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
((u32)addr) & 0xfff);
- /* Enabling the DMA mode */
- config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
-
- /* Switch to DMA mode */
- xqspi->mode = GQSPI_MODE_DMA;
+ zynqmp_qspi_enable_dma(xqspi);
/* Write the number of bytes to transfer */
zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
@@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
u32 genfifoentry)
{
- u32 config_reg;
-
zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry);
zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
- if (xqspi->mode == GQSPI_MODE_DMA) {
- config_reg = zynqmp_gqspi_read(xqspi,
- GQSPI_CONFIG_OFST);
- config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
- zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
- config_reg);
- xqspi->mode = GQSPI_MODE_IO;
- }
+ if (xqspi->mode == GQSPI_MODE_DMA)
+ zynqmp_qspi_disable_dma(xqspi);
}
/**
@@ -1059,18 +1044,14 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits,
static int zynqmp_qspi_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct zynqmp_qspi *xqspi = spi_controller_get_devdata
- (mem->spi->controller);
+ struct zynqmp_qspi *xqspi =
+ spi_controller_get_devdata(mem->spi->controller);
unsigned long timeout;
int err = 0, i;
u32 genfifoentry = 0;
u16 opcode = op->cmd.opcode;
u64 opaddr;
- dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
- op->dummy.buswidth, op->data.buswidth);
-
mutex_lock(&xqspi->op_lock);
zynqmp_qspi_config_op(xqspi, op);
zynqmp_qspi_chipselect(mem->spi, false);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..90e27729ef6b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -31,6 +31,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
@@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
#include "internals.h"
-static DEFINE_IDR(spi_master_idr);
+static DEFINE_IDR(spi_controller_idr);
static void spidev_release(struct device *dev)
{
@@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = {
.attrs = spi_controller_statistics_attrs,
};
-static const struct attribute_group *spi_master_groups[] = {
+static const struct attribute_group *spi_controller_groups[] = {
&spi_controller_statistics_group,
NULL,
};
@@ -1106,7 +1107,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi_toggle_csgpiod(spi, idx, enable, activate);
}
}
- /* Some SPI masters need both GPIO CS & slave_select */
+ /* Some SPI controllers need both GPIO CS & ->set_cs() */
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
@@ -1495,10 +1496,7 @@ static void _spi_transfer_delay_ns(u32 ns)
} else {
u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
- if (us <= 10)
- udelay(us);
- else
- usleep_range(us, us + DIV_ROUND_UP(us, 10));
+ fsleep(us);
}
}
@@ -2534,7 +2532,7 @@ err_out:
* @ctlr: Pointer to spi_controller device
*
* Registers an spi_device for each child node of controller node which
- * represents a valid SPI slave.
+ * represents a valid SPI target device.
*/
static void of_register_spi_devices(struct spi_controller *ctlr)
{
@@ -2819,7 +2817,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
if (!lookup.max_speed_hz &&
ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
- /* Apple does not use _CRS but nested devices for SPI slaves */
+ /* Apple does not use _CRS but nested devices for SPI target devices */
acpi_spi_parse_apple_properties(adev, &lookup);
}
@@ -2911,7 +2909,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
SPI_ACPI_ENUMERATE_MAX_DEPTH,
acpi_spi_add_device, NULL, ctlr, NULL);
if (ACPI_FAILURE(status))
- dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
+ dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
@@ -2925,16 +2923,15 @@ static void spi_controller_release(struct device *dev)
kfree(ctlr);
}
-static const struct class spi_master_class = {
+static const struct class spi_controller_class = {
.name = "spi_master",
.dev_release = spi_controller_release,
- .dev_groups = spi_master_groups,
+ .dev_groups = spi_controller_groups,
};
#ifdef CONFIG_SPI_SLAVE
/**
- * spi_target_abort - abort the ongoing transfer request on an SPI slave
- * controller
+ * spi_target_abort - abort the ongoing transfer request on an SPI target controller
* @spi: device used for the current transfer
*/
int spi_target_abort(struct spi_device *spi)
@@ -2954,9 +2951,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
dev);
struct device *child;
+ int ret;
child = device_find_any_child(&ctlr->dev);
- return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
+ put_device(child);
+
+ return ret;
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -2975,13 +2976,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
child = device_find_any_child(&ctlr->dev);
if (child) {
- /* Remove registered slave */
+ /* Remove registered target device */
device_unregister(child);
put_device(child);
}
if (strcmp(name, "(null)")) {
- /* Register new slave */
+ /* Register new target device */
spi = spi_alloc_device(ctlr);
if (!spi)
return -ENOMEM;
@@ -3000,40 +3001,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(slave);
-static struct attribute *spi_slave_attrs[] = {
+static struct attribute *spi_target_attrs[] = {
&dev_attr_slave.attr,
NULL,
};
-static const struct attribute_group spi_slave_group = {
- .attrs = spi_slave_attrs,
+static const struct attribute_group spi_target_group = {
+ .attrs = spi_target_attrs,
};
-static const struct attribute_group *spi_slave_groups[] = {
+static const struct attribute_group *spi_target_groups[] = {
&spi_controller_statistics_group,
- &spi_slave_group,
+ &spi_target_group,
NULL,
};
-static const struct class spi_slave_class = {
+static const struct class spi_target_class = {
.name = "spi_slave",
.dev_release = spi_controller_release,
- .dev_groups = spi_slave_groups,
+ .dev_groups = spi_target_groups,
};
#else
-extern struct class spi_slave_class; /* dummy */
+extern struct class spi_target_class; /* dummy */
#endif
/**
- * __spi_alloc_controller - allocate an SPI master or slave controller
+ * __spi_alloc_controller - allocate an SPI host or target controller
* @dev: the controller, possibly using the platform_bus
* @size: how much zeroed driver-private data to allocate; the pointer to this
* memory is in the driver_data field of the returned device, accessible
* with spi_controller_get_devdata(); the memory is cacheline aligned;
* drivers granting DMA access to portions of their private data need to
* round up @size using ALIGN(size, dma_get_cache_alignment()).
- * @slave: flag indicating whether to allocate an SPI master (false) or SPI
- * slave (true) controller
+ * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
+ * controller
* Context: can sleep
*
* This call is used only by SPI controller drivers, which are the
@@ -3050,7 +3051,7 @@ extern struct class spi_slave_class; /* dummy */
* Return: the SPI controller structure on success, else NULL.
*/
struct spi_controller *__spi_alloc_controller(struct device *dev,
- unsigned int size, bool slave)
+ unsigned int size, bool target)
{
struct spi_controller *ctlr;
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
@@ -3071,11 +3072,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
mutex_init(&ctlr->add_lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 1;
- ctlr->slave = slave;
- if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
- ctlr->dev.class = &spi_slave_class;
+ ctlr->target = target;
+ if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
+ ctlr->dev.class = &spi_target_class;
else
- ctlr->dev.class = &spi_master_class;
+ ctlr->dev.class = &spi_controller_class;
ctlr->dev.parent = dev;
pm_suspend_ignore_children(&ctlr->dev, true);
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
@@ -3093,7 +3094,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
* __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
* @dev: physical device of SPI controller
* @size: how much zeroed driver-private data to allocate
- * @slave: whether to allocate an SPI master (false) or SPI slave (true)
+ * @target: whether to allocate an SPI host (false) or SPI target (true) controller
* Context: can sleep
*
* Allocate an SPI controller and automatically release a reference on it
@@ -3106,7 +3107,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr)
*/
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave)
+ bool target)
{
struct spi_controller **ptr, *ctlr;
@@ -3115,7 +3116,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
if (!ptr)
return NULL;
- ctlr = __spi_alloc_controller(dev, size, slave);
+ ctlr = __spi_alloc_controller(dev, size, target);
if (ctlr) {
ctlr->devm_allocated = true;
*ptr = ctlr;
@@ -3129,8 +3130,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
/**
- * spi_get_gpio_descs() - grab chip select GPIOs for the master
- * @ctlr: The SPI master to grab GPIO descriptors for
+ * spi_get_gpio_descs() - grab chip select GPIOs for the controller
+ * @ctlr: The SPI controller to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
@@ -3228,7 +3229,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e
int id;
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
+ id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id == -ENOSPC ? -EBUSY : id;
@@ -3377,7 +3378,7 @@ destroy_queue:
spi_destroy_queue(ctlr);
free_bus_id:
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_controller_idr, ctlr->bus_num);
mutex_unlock(&board_lock);
return status;
}
@@ -3389,8 +3390,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
}
/**
- * devm_spi_register_controller - register managed SPI host or target
- * controller
+ * devm_spi_register_controller - register managed SPI host or target controller
* @dev: device managing SPI controller
* @ctlr: initialized controller, originally from spi_alloc_host() or
* spi_alloc_target()
@@ -3430,7 +3430,7 @@ static int __unregister(struct device *dev, void *null)
}
/**
- * spi_unregister_controller - unregister SPI master or slave controller
+ * spi_unregister_controller - unregister SPI host or target controller
* @ctlr: the controller being unregistered
* Context: can sleep
*
@@ -3454,7 +3454,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, id);
+ found = idr_find(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (ctlr->queued) {
if (spi_destroy_queue(ctlr))
@@ -3469,7 +3469,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
/* Free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
- idr_remove(&spi_master_idr, id);
+ idr_remove(&spi_controller_idr, id);
mutex_unlock(&board_lock);
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
@@ -4158,6 +4158,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
+ return -EINVAL;
+ }
}
message->status = -EINPROGRESS;
@@ -4613,7 +4622,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked);
/**
* spi_bus_lock - obtain a lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that should be locked for exclusive bus access
+ * @ctlr: SPI bus controller that should be locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4644,7 +4653,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock);
/**
* spi_bus_unlock - release the lock for exclusive SPI bus usage
- * @ctlr: SPI bus master that was locked for exclusive bus access
+ * @ctlr: SPI bus controller that was locked for exclusive bus access
* Context: can sleep
*
* This call may only be used from a context that may sleep. The sleep
@@ -4761,9 +4770,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node
{
struct device *dev;
- dev = class_find_device_by_of_node(&spi_master_class, node);
+ dev = class_find_device_by_of_node(&spi_controller_class, node);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device_by_of_node(&spi_slave_class, node);
+ dev = class_find_device_by_of_node(&spi_target_class, node);
if (!dev)
return NULL;
@@ -4843,10 +4852,10 @@ struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev
{
struct device *dev;
- dev = class_find_device(&spi_master_class, NULL, adev,
+ dev = class_find_device(&spi_controller_class, NULL, adev,
spi_acpi_controller_match);
if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
- dev = class_find_device(&spi_slave_class, NULL, adev,
+ dev = class_find_device(&spi_target_class, NULL, adev,
spi_acpi_controller_match);
if (!dev)
return NULL;
@@ -4916,12 +4925,12 @@ static int __init spi_init(void)
if (status < 0)
goto err1;
- status = class_register(&spi_master_class);
+ status = class_register(&spi_controller_class);
if (status < 0)
goto err2;
if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
- status = class_register(&spi_slave_class);
+ status = class_register(&spi_target_class);
if (status < 0)
goto err3;
}
@@ -4934,7 +4943,7 @@ static int __init spi_init(void)
return 0;
err3:
- class_unregister(&spi_master_class);
+ class_unregister(&spi_controller_class);
err2:
bus_unregister(&spi_bus_type);
err1:
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 58ae4304fdab..6108959c28d9 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -706,6 +706,7 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = /* cisco */ "spi-petra" },
{ .name = /* dh */ "dhcom-board" },
{ .name = /* elgin */ "jg10309-01" },
+ { .name = /* gocontroll */ "moduline-module-slot"},
{ .name = /* lineartechnology */ "ltc2488" },
{ .name = /* lwn */ "bk4" },
{ .name = /* lwn */ "bk4-spi" },
@@ -737,6 +738,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "elgin,jg10309-01", .data = &spidev_of_check },
+ { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check},
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
{ .compatible = "lwn,bk4-spi", .data = &spidev_of_check },
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 049246774ced..6146555fe9cf 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -172,10 +172,10 @@ void atomisp_unregister_subdev(struct v4l2_subdev *subdev);
#define IS_BYT __IS_SOC(INTEL_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_ATOM_AIRMONT)
#define IS_MRFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID)
-#define IS_MOFD __IS_SOC(INTEL_ATOM_AIRMONT_MID)
+#define IS_MOFD __IS_SOC(INTEL_ATOM_SILVERMONT_MID2)
/* Both CHT and MOFD come with ISP2401 */
#define IS_ISP2401 __IS_SOCS(INTEL_ATOM_AIRMONT, \
- INTEL_ATOM_AIRMONT_MID)
+ INTEL_ATOM_SILVERMONT_MID2)
#endif /* ATOMISP_PLATFORM_H_ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 1a960a01854f..1fb2ba819de3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -34,7 +34,7 @@ static const char *CARD = "ATOM ISP"; /* max size 31 */
* FIXME: ISP should not know beforehand all CIDs supported by sensor.
* Instead, it needs to propagate to sensor unknown CIDs.
*/
-static struct v4l2_queryctrl ci_v4l2_controls[] = {
+static struct v4l2_query_ext_ctrl ci_v4l2_controls[] = {
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
@@ -1140,31 +1140,34 @@ static int atomisp_s_ctrl(struct file *file, void *fh,
/*
* To query the attributes of a control.
- * applications set the id field of a struct v4l2_queryctrl and call the
+ * applications set the id field of a struct v4l2_query_ext_ctrl and call the
* this ioctl with a pointer to this structure. The driver fills
* the rest of the structure.
*/
-static int atomisp_queryctl(struct file *file, void *fh,
- struct v4l2_queryctrl *qc)
+static int atomisp_query_ext_ctrl(struct file *file, void *fh,
+ struct v4l2_query_ext_ctrl *qc)
{
- int i, ret = -EINVAL;
+ int i;
+ /* TODO: implement V4L2_CTRL_FLAG_NEXT_CTRL */
if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
- return ret;
+ return -EINVAL;
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == qc->id) {
- memcpy(qc, &ci_v4l2_controls[i],
- sizeof(struct v4l2_queryctrl));
- qc->reserved[0] = 0;
- ret = 0;
- break;
+ *qc = ci_v4l2_controls[i];
+ qc->elems = 1;
+ qc->elem_size = 4;
+ return 0;
}
}
- if (ret != 0)
- qc->flags = V4L2_CTRL_FLAG_DISABLED;
- return ret;
+ /*
+ * This is probably not needed, but this flag has been set for
+ * many kernel versions. Leave it to avoid breaking any apps.
+ */
+ qc->flags = V4L2_CTRL_FLAG_DISABLED;
+ return -EINVAL;
}
static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
@@ -1561,9 +1564,7 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
.vidioc_enum_input = atomisp_enum_input,
.vidioc_g_input = atomisp_g_input,
.vidioc_s_input = atomisp_s_input,
- .vidioc_queryctrl = atomisp_queryctl,
- .vidioc_s_ctrl = atomisp_s_ctrl,
- .vidioc_g_ctrl = atomisp_g_ctrl,
+ .vidioc_query_ext_ctrl = atomisp_query_ext_ctrl,
.vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
.vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
.vidioc_enum_framesizes = atomisp_enum_framesizes,
diff --git a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
index a4e019e74e6b..a1bea8bd1a39 100644
--- a/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
+++ b/drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -230,7 +230,8 @@ void ia_css_sdis_clear_coefficients(
int
ia_css_get_dvs_statistics(
struct ia_css_dvs_statistics *host_stats,
- const struct ia_css_isp_dvs_statistics *isp_stats) {
+ const struct ia_css_isp_dvs_statistics *isp_stats)
+{
struct ia_css_isp_dvs_statistics_map *map;
int ret = 0;
@@ -240,13 +241,11 @@ ia_css_get_dvs_statistics(
assert(isp_stats);
map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL);
- if (map)
- {
+ if (map) {
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
ia_css_translate_dvs_statistics(host_stats, map);
ia_css_isp_dvs_statistics_map_free(map);
- } else
- {
+ } else {
IA_CSS_ERROR("out of memory");
ret = -ENOMEM;
}
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
index 09da4103a8db..86f2b30cb06c 100644
--- a/drivers/staging/media/imx/imx-media-vdic.c
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -180,60 +180,6 @@ out:
return ret;
}
-/*
- * This function is currently unused, but will be called when the
- * output/mem2mem device at the IDMAC input pad sends us a new
- * buffer. It kicks off the IDMAC read channels to bring in the
- * buffer fields from memory and begin the conversions.
- */
-static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
- struct imx_media_buffer *curr)
-{
- dma_addr_t prev_phys, curr_phys, next_phys;
- struct imx_media_buffer *prev;
- struct vb2_buffer *curr_vb, *prev_vb;
- u32 fs = priv->field_size;
- u32 is = priv->in_stride;
-
- /* current input buffer is now previous */
- priv->prev_in_buf = priv->curr_in_buf;
- priv->curr_in_buf = curr;
- prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
-
- prev_vb = &prev->vbuf.vb2_buf;
- curr_vb = &curr->vbuf.vb2_buf;
-
- switch (priv->fieldtype) {
- case V4L2_FIELD_SEQ_TB:
- case V4L2_FIELD_SEQ_BT:
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
- break;
- case V4L2_FIELD_INTERLACED_TB:
- case V4L2_FIELD_INTERLACED_BT:
- case V4L2_FIELD_INTERLACED:
- prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
- curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
- next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
- break;
- default:
- /*
- * can't get here, priv->fieldtype can only be one of
- * the above. This is to quiet smatch errors.
- */
- return;
- }
-
- ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
- ipu_cpmem_set_buffer(priv->vdi_in_ch, 0, curr_phys);
- ipu_cpmem_set_buffer(priv->vdi_in_ch_n, 0, next_phys);
-
- ipu_idmac_select_buffer(priv->vdi_in_ch_p, 0);
- ipu_idmac_select_buffer(priv->vdi_in_ch, 0);
- ipu_idmac_select_buffer(priv->vdi_in_ch_n, 0);
-}
-
static int setup_vdi_channel(struct vdic_priv *priv,
struct ipuv3_channel *channel,
dma_addr_t phys0, dma_addr_t phys1)
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index 4aa2797f5e3c..8b85524beb59 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -322,7 +322,8 @@ struct ipu3_uapi_ae_config {
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
- * Rule:
+ *
+ * Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 16e3ded98c32..832588f21f91 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -212,7 +212,7 @@ int iscsi_target_check_login_request(
if ((login_req->max_version != login->version_max) ||
(login_req->min_version != login->version_min)) {
- pr_err("Login request changed Version Max/Nin"
+ pr_err("Login request changed Version Max/Min"
" unexpectedly to 0x%02x/0x%02x, protocol error\n",
login_req->max_version, login_req->min_version);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
@@ -557,7 +557,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
* before initial PDU processing in iscsi_target_start_negotiation()
* has completed, go ahead and retry until it's cleared.
*
- * Otherwise if the TCP connection drops while this is occuring,
+ * Otherwise if the TCP connection drops while this is occurring,
* iscsi_target_start_negotiation() will detect the failure, call
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
* remaining iscsi connection resources from iscsi_np process context.
@@ -1050,7 +1050,7 @@ static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *l
/*
* Check to make sure the TCP connection has not
* dropped asynchronously while session reinstatement
- * was occuring in this kthread context, before
+ * was occurring in this kthread context, before
* transitioning to full feature phase operation.
*/
if (iscsi_target_sk_check_close(conn))
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 761c511aea07..c7b7da629741 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -176,7 +176,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
- tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
+ tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
tcm_loop_target_queue_cmd(tl_cmd);
return 0;
@@ -242,7 +242,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
- scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
+ blk_mq_unique_tag(scsi_cmd_to_rq(sc)),
+ TMR_ABORT_TASK);
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c40217f44b1b..66804bf1ee32 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -123,7 +123,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
goto unlock;
}
- read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+ read_bytes = scnprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
if (!read_bytes)
goto unlock;
@@ -143,7 +143,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
}
filp_close(fp, NULL);
- strncpy(db_root, db_root_stage, read_bytes);
+ strscpy(db_root, db_root_stage);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
r = read_bytes;
@@ -3664,7 +3664,7 @@ static void target_init_dbroot(void)
}
filp_close(fp, NULL);
- strncpy(db_root, db_root_stage, DB_ROOT_LEN);
+ strscpy(db_root, db_root_stage);
pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index d1ae3df069a4..cc2da086f96e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1078,8 +1078,8 @@ passthrough_parse_cdb(struct se_cmd *cmd,
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
@@ -1101,7 +1101,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
+ if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
@@ -1109,7 +1109,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
+ if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c8dc92a7d63e..73564efd11d2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -167,18 +167,6 @@ static int iblock_configure_device(struct se_device *dev)
break;
}
- if (dev->dev_attrib.pi_prot_type) {
- struct bio_set *bs = &ib_dev->ibd_bio_set;
-
- if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
- pr_err("Unable to allocate bioset for PI\n");
- ret = -ENOMEM;
- goto out_blkdev_put;
- }
- pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
- &bs->bio_integrity_pool);
- }
-
dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
return 0;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4f4ad6af416c..34cf2c399b39 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -91,7 +91,7 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
return 0;
default:
@@ -418,12 +418,12 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
return -EINVAL;
}
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index ea14a3835681..0a02492bef70 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1674,9 +1674,9 @@ static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
return true;
switch (descr->opcode) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/*
* The pr_ops which are used by the backend modules don't
@@ -1828,9 +1828,9 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
static struct target_opcode_descriptor tcm_opcode_release = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RELEASE,
+ .opcode = RELEASE_6,
.cdb_size = 6,
- .usage_bits = {RELEASE, 0x00, 0x00, 0x00,
+ .usage_bits = {RELEASE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -1847,9 +1847,9 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
static struct target_opcode_descriptor tcm_opcode_reserve = {
.support = SCSI_SUPPORT_FULL,
- .opcode = RESERVE,
+ .opcode = RESERVE_6,
.cdb_size = 6,
- .usage_bits = {RESERVE, 0x00, 0x00, 0x00,
+ .usage_bits = {RESERVE_6, 0x00, 0x00, 0x00,
0x00, SCSI_CONTROL_MASK},
.enabled = tcm_is_pr_enabled,
};
@@ -2151,8 +2151,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
- if (!descr->enabled || descr->enabled(descr, cmd))
+ if (!descr->enabled || descr->enabled(descr, cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
case 0x2:
/*
@@ -2166,8 +2168,10 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
if (descr->serv_action_valid &&
descr->service_action == requested_sa) {
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
} else if (!descr->serv_action_valid)
return TCM_INVALID_CDB_FIELD;
break;
@@ -2180,13 +2184,15 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
*/
if (descr->service_action == requested_sa)
if (!descr->enabled || descr->enabled(descr,
- cmd))
+ cmd)) {
*opcode = descr;
+ return TCM_NO_SENSE;
+ }
break;
}
}
- return 0;
+ return TCM_NO_SENSE;
}
static sense_reason_t
@@ -2243,7 +2249,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
response_length += spc_rsoc_encode_command_descriptor(
&buf[response_length], rctd, descr);
}
- put_unaligned_be32(response_length - 3, buf);
+ put_unaligned_be32(response_length - 4, buf);
} else {
response_length = spc_rsoc_encode_one_command_descriptor(
&buf[response_length], rctd, descr,
@@ -2267,9 +2273,9 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (!dev->dev_attrib.emulate_pr)
return TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2313,7 +2319,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
*size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
*size = get_unaligned_be16(&cdb[7]);
@@ -2322,7 +2328,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = target_scsi2_reservation_release;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 7e918bd3f100..4307161533a7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -412,8 +412,8 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
data->nr_sensors = 1;
- data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
- data->nr_sensors, GFP_KERNEL);
+ data->sensor = devm_kcalloc(dev, data->nr_sensors,
+ sizeof(*data->sensor), GFP_KERNEL);
if (!data->sensor)
return -ENOMEM;
diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
index 543b03960e99..57b90005888a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c
@@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev)
struct int3402_thermal_data *d;
int ret;
+ if (!adev)
+ return -ENODEV;
+
if (!acpi_has_method(adev->handle, "_TMP"))
return -ENODEV;
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 8dca6a6aceca..3d9efe69d562 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -133,8 +133,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
if (ACPI_SUCCESS(status))
int34x_zone->aux_trip_nr = trip_cnt;
- zone_trips = kzalloc(sizeof(*zone_trips) * (trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT),
- GFP_KERNEL);
+ zone_trips = kcalloc(trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT,
+ sizeof(*zone_trips), GFP_KERNEL);
if (!zone_trips) {
ret = -ENOMEM;
goto err_trips_alloc;
@@ -143,7 +143,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
for (i = 0; i < trip_cnt; i++) {
zone_trips[i].type = THERMAL_TRIP_PASSIVE;
zone_trips[i].temperature = THERMAL_TEMP_INVALID;
- zone_trips[i].flags |= THERMAL_TRIP_FLAG_RW_TEMP;
+ zone_trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP;
zone_trips[i].priv = THERMAL_INT_TO_TRIP_PRIV(i);
}
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 817421508d5c..b2a615aea7c1 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -106,7 +106,7 @@ static const struct x86_cpu_id intel_tcc_cpu_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_D, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT, &temp_broadwell),
- X86_MATCH_VFM(INTEL_ATOM_AIRMONT_MID, &temp_broadwell),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID2, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_AIRMONT_NP, &temp_broadwell),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT, &temp_goldmont),
X86_MATCH_VFM(INTEL_ATOM_GOLDMONT_D, &temp_goldmont),
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 70de6dbf99c5..a36289e61315 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -460,13 +460,13 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
goto err_alloc;
}
- ref_table = kzalloc(sizeof(*ref_table) * TABLE_SIZE, GFP_KERNEL);
+ ref_table = kcalloc(TABLE_SIZE, sizeof(*ref_table), GFP_KERNEL);
if (!ref_table) {
ret = -ENOMEM;
goto err_alloc;
}
- derived_table = devm_kzalloc(bgp->dev, sizeof(*derived_table) * TABLE_SIZE,
+ derived_table = devm_kcalloc(bgp->dev, TABLE_SIZE, sizeof(*derived_table),
GFP_KERNEL);
if (!derived_table) {
ret = -ENOMEM;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 2328ac0d8561..f96ca2710928 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1589,26 +1589,26 @@ thermal_zone_device_register_with_trips(const char *type,
tz->state = TZ_STATE_FLAG_INIT;
+ result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ if (result)
+ goto remove_id;
+
+ thermal_zone_device_init(tz);
+
+ result = thermal_zone_init_governor(tz);
+ if (result)
+ goto remove_id;
+
/* sys I/F */
/* Add nodes that are always present via .groups */
result = thermal_zone_create_device_groups(tz);
if (result)
goto remove_id;
- result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
- if (result) {
- thermal_zone_destroy_device_groups(tz);
- goto remove_id;
- }
- thermal_zone_device_init(tz);
result = device_register(&tz->device);
if (result)
goto release_device;
- result = thermal_zone_init_governor(tz);
- if (result)
- goto unregister;
-
if (!tz->tzp || !tz->tzp->no_hwmon) {
result = thermal_add_hwmon_sysfs(tz);
if (result)
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index c800504c3cfe..11d34f2a3d9f 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -319,7 +319,7 @@ static int cdev_tt_seq_show(struct seq_file *s, void *v)
int i = *(loff_t *)v;
if (!i)
- seq_puts(s, "Transition\tOccurences\n");
+ seq_puts(s, "Transition\tOccurrences\n");
list_for_each_entry(entry, &transitions[i], node) {
/*
@@ -876,7 +876,7 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
tz_dbg->tz = tz;
- tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
+ tz_dbg->trips_crossed = kcalloc(tz->num_trips, sizeof(int), GFP_KERNEL);
if (!tz_dbg->trips_crossed) {
thermal_debugfs_remove_id(thermal_dbg);
return;
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5401f03d6b6c..8264d5c3bbb3 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -107,7 +107,7 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n
if (!count)
return NULL;
- struct thermal_trip *tt __free(kfree) = kzalloc(sizeof(*tt) * count, GFP_KERNEL);
+ struct thermal_trip *tt __free(kfree) = kcalloc(count, sizeof(*tt), GFP_KERNEL);
if (!tt)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index dc1f456736dc..cd15e84c47f4 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -312,7 +312,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
static __be32 tb_crc(const void *data, size_t len)
{
- return cpu_to_be32(~__crc32c_le(~0, data, len));
+ return cpu_to_be32(~crc32c(~0, data, len));
}
static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 9c1d65d26553..e66183a72cf9 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -211,7 +211,7 @@ static u8 tb_crc8(u8 *data, int len)
static u32 tb_crc32(void *data, size_t len)
{
- return ~__crc32c_le(~0, data, len);
+ return ~crc32c(~0, data, len);
}
#define TB_DROM_DATA_START 13
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 8229a6fbda5a..717b31d78728 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -1009,6 +1009,8 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
*/
tb_tunnel_get(tunnel);
+ tunnel->dprx_started = true;
+
if (tunnel->callback) {
tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
@@ -1021,9 +1023,12 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
{
- tunnel->dprx_canceled = true;
- cancel_delayed_work(&tunnel->dprx_work);
- tb_tunnel_put(tunnel);
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+ }
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 7f6d3a18a41e..8a0a0cb21a89 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -63,6 +63,7 @@ enum tb_tunnel_state {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
* @dprx_canceled: Was DPRX capabilities read poll canceled
* @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
* @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
@@ -100,6 +101,7 @@ struct tb_tunnel {
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
bool dprx_canceled;
ktime_t dprx_timeout;
struct delayed_work dprx_work;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 63a494d36a1f..7fb81bbaee60 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -383,7 +383,24 @@ config NULL_TTY
available or desired.
In order to use this driver, you should redirect the console to this
- TTY, or boot the kernel with console=ttynull.
+ TTY, boot the kernel with console=ttynull, or enable
+ NULL_TTY_DEFAULT_CONSOLE.
+
+ If unsure, say N.
+
+config NULL_TTY_DEFAULT_CONSOLE
+ bool "Support for console on ttynull"
+ depends on NULL_TTY=y && !VT_CONSOLE
+ help
+ Say Y here if you want the NULL TTY to be used as a /dev/console
+ device by default.
+
+ For example, it might be useful to prevent a VT-less kernel from
+ writing the system log to a random device connected to the serial
+ port.
+
+ Another console driver still might get preferred via the command
+ line, SPCR, or the device tree.
If unsure, say N.
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index d0b18358859e..742004d63c6f 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1056,8 +1056,7 @@ static int brcmuart_probe(struct platform_device *pdev)
}
/* setup HR timer */
- hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- priv->hrt.function = brcmuart_hrtimer_func;
+ hrtimer_setup(&priv->hrt, brcmuart_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
up.port.shutdown = brcmuart_shutdown;
up.port.startup = brcmuart_startup;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 442967a6cd52..c57f44882abb 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -566,12 +566,10 @@ static int serial8250_em485_init(struct uart_8250_port *p)
if (!p->em485)
return -ENOMEM;
- hrtimer_init(&p->em485->stop_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- hrtimer_init(&p->em485->start_tx_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- p->em485->stop_tx_timer.function = &serial8250_em485_handle_stop_tx;
- p->em485->start_tx_timer.function = &serial8250_em485_handle_start_tx;
+ hrtimer_setup(&p->em485->stop_tx_timer, &serial8250_em485_handle_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&p->em485->start_tx_timer, &serial8250_em485_handle_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
p->em485->port = p;
p->em485->active_timer = NULL;
p->em485->tx_stopped = true;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 04212c823a91..98f178bdbcbe 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2867,11 +2867,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
return -EINVAL;
}
}
-
- hrtimer_init(&uap->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&uap->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- uap->trigger_start_tx.function = pl011_trigger_start_tx;
- uap->trigger_stop_tx.function = pl011_trigger_stop_tx;
+ hrtimer_setup(&uap->trigger_start_tx, pl011_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&uap->trigger_stop_tx, pl011_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9c59ec128bb4..9a1afe409b98 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2582,10 +2582,10 @@ static int imx_uart_probe(struct platform_device *pdev)
imx_uart_writel(sport, ucr3, UCR3);
}
- hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- sport->trigger_start_tx.function = imx_trigger_start_tx;
- sport->trigger_stop_tx.function = imx_trigger_stop_tx;
+ hrtimer_setup(&sport->trigger_start_tx, imx_trigger_start_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&sport->trigger_stop_tx, imx_trigger_stop_tx, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 92f7e752f862..d46650e578e5 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2426,10 +2426,10 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
}
/*
- * Disable the console device before suspending.
+ * Suspend the console device before suspending the port.
*/
if (uart_console(uport))
- console_stop(uport->cons);
+ console_suspend(uport->cons);
uart_change_pm(state, UART_PM_STATE_OFF);
@@ -2484,7 +2484,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_port_unlock_irq(uport);
}
if (console_suspend_enabled)
- console_start(uport->cons);
+ console_resume(uport->cons);
}
if (tty_port_suspended(port)) {
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index b1ea48f38248..b72c3bc19bfa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1702,8 +1702,7 @@ static void sci_request_dma(struct uart_port *port)
dma += s->buf_len_rx;
}
- hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s->rx_timer.function = sci_dma_rx_timer_fn;
+ hrtimer_setup(&s->rx_timer, sci_dma_rx_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->chan_rx_saved = s->chan_rx = chan;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 92ec51870d1d..fe457bf1e15b 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -454,7 +454,7 @@ static void cdns_uart_handle_tx(void *dev_id)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) {
- cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_rx_callback);
hrtimer_start(&cdns_uart->tx_timer,
ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
}
@@ -734,7 +734,7 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
if (!cdns_uart->rs485_tx_started) {
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_update_function(&cdns_uart->tx_timer, cdns_rs485_tx_callback);
cdns_rs485_tx_setup(cdns_uart);
return hrtimer_start(&cdns_uart->tx_timer,
ms_to_ktime(port->rs485.delay_rts_before_send),
@@ -1626,8 +1626,8 @@ static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
writel(val, port->membase + CDNS_UART_MODEMCR);
/* Timer setup */
- hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ hrtimer_setup(&cdns_uart->tx_timer, &cdns_rs485_tx_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/* Disable transmitter and make Rx setup*/
cdns_uart_stop_tx(port);
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 3438269a5440..90b5ab60f5ae 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -458,6 +458,14 @@ static ssize_t pm_qos_enable_store(struct device *dev,
return count;
}
+static ssize_t critical_health_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->critical_health_count);
+}
+
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
@@ -470,6 +478,7 @@ static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static DEVICE_ATTR_RW(rtc_update_ms);
static DEVICE_ATTR_RW(pm_qos_enable);
+static DEVICE_ATTR_RO(critical_health);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -484,6 +493,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_wb_flush_threshold.attr,
&dev_attr_rtc_update_ms.attr,
&dev_attr_pm_qos_enable.attr,
+ &dev_attr_critical_health.attr,
NULL
};
diff --git a/drivers/ufs/core/ufs_trace.h b/drivers/ufs/core/ufs_trace.h
index 84deca2b841d..caa32e23ffa5 100644
--- a/drivers/ufs/core/ufs_trace.h
+++ b/drivers/ufs/core/ufs_trace.h
@@ -83,34 +83,34 @@ UFS_CMD_TRACE_TSF_TYPES
TRACE_EVENT(ufshcd_clk_gating,
- TP_PROTO(const char *dev_name, int state),
+ TP_PROTO(struct ufs_hba *hba, int state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->state = state;
),
TP_printk("%s: gating state changed to %s",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
);
TRACE_EVENT(ufshcd_clk_scaling,
- TP_PROTO(const char *dev_name, const char *state, const char *clk,
+ TP_PROTO(struct ufs_hba *hba, const char *state, const char *clk,
u32 prev_state, u32 curr_state),
- TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+ TP_ARGS(hba, state, clk, prev_state, curr_state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
__string(clk, clk)
__field(u32, prev_state)
@@ -118,7 +118,7 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
__assign_str(clk);
__entry->prev_state = prev_state;
@@ -126,80 +126,80 @@ TRACE_EVENT(ufshcd_clk_scaling,
),
TP_printk("%s: %s %s from %u to %u Hz",
- __get_str(dev_name), __get_str(state), __get_str(clk),
+ dev_name(__entry->hba->dev), __get_str(state), __get_str(clk),
__entry->prev_state, __entry->curr_state)
);
TRACE_EVENT(ufshcd_auto_bkops_state,
- TP_PROTO(const char *dev_name, const char *state),
+ TP_PROTO(struct ufs_hba *hba, const char *state),
- TP_ARGS(dev_name, state),
+ TP_ARGS(hba, state),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(state, state)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(state);
),
TP_printk("%s: auto bkops - %s",
- __get_str(dev_name), __get_str(state))
+ dev_name(__entry->hba->dev), __get_str(state))
);
DECLARE_EVENT_CLASS(ufshcd_profiling_template,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err),
+ TP_ARGS(hba, profile_info, time_us, err),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__string(profile_info, profile_info)
__field(s64, time_us)
__field(int, err)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__assign_str(profile_info);
__entry->time_us = time_us;
__entry->err = err;
),
TP_printk("%s: %s: took %lld usecs, err %d",
- __get_str(dev_name), __get_str(profile_info),
+ dev_name(__entry->hba->dev), __get_str(profile_info),
__entry->time_us, __entry->err)
);
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
- TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+ TP_PROTO(struct ufs_hba *hba, const char *profile_info, s64 time_us,
int err),
- TP_ARGS(dev_name, profile_info, time_us, err));
+ TP_ARGS(hba, profile_info, time_us, err));
DECLARE_EVENT_CLASS(ufshcd_template,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+ TP_ARGS(hba, err, usecs, dev_state, link_state),
TP_STRUCT__entry(
__field(s64, usecs)
__field(int, err)
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(int, dev_state)
__field(int, link_state)
),
@@ -207,14 +207,14 @@ DECLARE_EVENT_CLASS(ufshcd_template,
TP_fast_assign(
__entry->usecs = usecs;
__entry->err = err;
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->dev_state = dev_state;
__entry->link_state = link_state;
),
TP_printk(
"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
- __get_str(dev_name),
+ dev_name(__entry->hba->dev),
__entry->usecs,
__print_symbolic(__entry->dev_state, UFS_PWR_MODES),
__print_symbolic(__entry->link_state, UFS_LINK_STATES),
@@ -223,60 +223,62 @@ DECLARE_EVENT_CLASS(ufshcd_template,
);
DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_init,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_suspend,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
DEFINE_EVENT(ufshcd_template, ufshcd_wl_runtime_resume,
- TP_PROTO(const char *dev_name, int err, s64 usecs,
+ TP_PROTO(struct ufs_hba *hba, int err, s64 usecs,
int dev_state, int link_state),
- TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+ TP_ARGS(hba, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(struct scsi_device *sdev, enum ufs_trace_str_t str_t,
+ TP_PROTO(struct scsi_device *sdev, struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
unsigned int tag, u32 doorbell, u32 hwq_id, int transfer_len,
u32 intr, u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(sdev, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
+ TP_ARGS(sdev, hba, str_t, tag, doorbell, hwq_id, transfer_len, intr, lba,
opcode, group_id),
TP_STRUCT__entry(
__field(struct scsi_device *, sdev)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
@@ -290,6 +292,7 @@ TRACE_EVENT(ufshcd_command,
TP_fast_assign(
__entry->sdev = sdev;
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
@@ -312,13 +315,13 @@ TRACE_EVENT(ufshcd_command,
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
+ TP_ARGS(hba, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
@@ -327,7 +330,7 @@ TRACE_EVENT(ufshcd_uic_command,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
@@ -337,19 +340,19 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ TP_PROTO(struct ufs_hba *hba, enum ufs_trace_str_t str_t, void *hdr,
void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
+ TP_ARGS(hba, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
@@ -357,7 +360,7 @@ TRACE_EVENT(ufshcd_upiu,
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
@@ -366,7 +369,7 @@ TRACE_EVENT(ufshcd_upiu,
TP_printk(
"%s: %s: HDR:%s, %s:%s",
- show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ show_ufs_cmd_trace_str(__entry->str_t), dev_name(__entry->hba->dev),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
@@ -375,22 +378,22 @@ TRACE_EVENT(ufshcd_upiu,
TRACE_EVENT(ufshcd_exception_event,
- TP_PROTO(const char *dev_name, u16 status),
+ TP_PROTO(struct ufs_hba *hba, u16 status),
- TP_ARGS(dev_name, status),
+ TP_ARGS(hba, status),
TP_STRUCT__entry(
- __string(dev_name, dev_name)
+ __field(struct ufs_hba *, hba)
__field(u16, status)
),
TP_fast_assign(
- __assign_str(dev_name);
+ __entry->hba = hba;
__entry->status = status;
),
TP_printk("%s: status 0x%x",
- __get_str(dev_name), __entry->status
+ dev_name(__entry->hba->dev), __entry->status
)
);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 694ff7578fc1..9e63a9d3cb7e 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -72,11 +72,11 @@ static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
- memcpy(cfg.crypto_key, key->raw, key->size/2);
+ memcpy(cfg.crypto_key, key->bytes, key->size/2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
- key->raw + key->size/2, key->size/2);
+ key->bytes + key->size/2, key->size/2);
} else {
- memcpy(cfg.crypto_key, key->raw, key->size);
+ memcpy(cfg.crypto_key, key->bytes, key->size);
}
ufshcd_program_key(hba, &cfg, slot);
@@ -185,6 +185,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
hba->crypto_profile.dev = hba->dev;
/*
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 786f20ef2238..10b4a19a70f1 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -117,11 +117,12 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
return ufshcd_readl(hba, REG_UFS_VERSION);
}
-static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
- bool up, enum ufs_notify_change_status status)
+static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
+ return hba->vops->clk_scale_notify(hba, up, target_freq, status);
return 0;
}
@@ -159,9 +160,9 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
}
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
if (hba->vops && hba->vops->pwr_change_notify)
return hba->vops->pwr_change_notify(hba, status,
@@ -270,6 +271,14 @@ static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
+static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ if (hba->vops && hba->vops->freq_to_gear_speed)
+ return hba->vops->freq_to_gear_speed(hba, freq);
+
+ return 0;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 464f13da259a..0534390c2a35 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -369,7 +369,7 @@ static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
else
header = &hba->lrb[tag].ucd_rsp_ptr->header;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
+ trace_ufshcd_upiu(hba, str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
@@ -380,7 +380,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
if (!trace_ufshcd_upiu_enabled())
return;
- trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ trace_ufshcd_upiu(hba, str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
@@ -393,12 +393,12 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
return;
if (str_t == UFS_TM_SEND)
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
- trace_ufshcd_upiu(dev_name(hba->dev), str_t,
+ trace_ufshcd_upiu(hba, str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
@@ -418,7 +418,7 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
+ trace_ufshcd_uic_command(hba, str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
@@ -473,7 +473,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
- trace_ufshcd_command(cmd->device, str_t, tag, doorbell, hwq_id,
+ trace_ufshcd_command(cmd->device, hba, str_t, tag, doorbell, hwq_id,
transfer_len, intr, lba, opcode, group_id);
}
@@ -1063,7 +1063,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->max_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
@@ -1081,7 +1081,7 @@ static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
clki->min_freq, ret);
break;
}
- trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_clk_scaling(hba,
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
@@ -1122,7 +1122,7 @@ int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table,
return ret;
}
- trace_ufshcd_clk_scaling(dev_name(dev),
+ trace_ufshcd_clk_scaling(hba,
(scaling_down ? "scaled down" : "scaled up"),
clki->name, hba->clk_scaling.target_freq, freq);
}
@@ -1162,7 +1162,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
int ret = 0;
ktime_t start = ktime_get();
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE);
if (ret)
goto out;
@@ -1173,7 +1173,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
if (ret)
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE);
if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
@@ -1186,7 +1186,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
ufshcd_pm_qos_update(hba, scale_up);
out:
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1313,16 +1313,26 @@ out:
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
+ * @target_gear: target gear to scale to
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
-static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
+ if (target_gear) {
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = target_gear;
+ new_pwr_info.gear_rx = target_gear;
+
+ goto config_pwr_mode;
+ }
+
+ /* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
@@ -1343,6 +1353,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
}
+config_pwr_mode:
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
@@ -1387,13 +1398,13 @@ out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
- /* Enable Write Booster if we have scaled up else disable it */
+ /* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
- ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
mutex_unlock(&hba->wb_mutex);
@@ -1413,15 +1424,19 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{
+ u32 old_gear = hba->pwr_info.gear_rx;
+ u32 new_gear = 0;
int ret = 0;
+ new_gear = ufshcd_vops_freq_to_gear_speed(hba, freq);
+
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
- ret = ufshcd_scale_gear(hba, false);
+ ret = ufshcd_scale_gear(hba, new_gear, false);
if (ret)
goto out_unprepare;
}
@@ -1429,13 +1444,13 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) {
if (!scale_up)
- ufshcd_scale_gear(hba, true);
+ ufshcd_scale_gear(hba, old_gear, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
- ret = ufshcd_scale_gear(hba, true);
+ ret = ufshcd_scale_gear(hba, new_gear, true);
if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
@@ -1444,7 +1459,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
}
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
+ ufshcd_clock_scaling_unprepare(hba, ret);
return ret;
}
@@ -1548,7 +1563,7 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ret)
hba->clk_scaling.target_freq = *freq;
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(hba,
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
@@ -1720,6 +1735,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_clk_info *clki;
+ unsigned long freq;
u32 value;
int err = 0;
@@ -1743,14 +1760,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
if (value) {
ufshcd_resume_clkscaling(hba);
- } else {
- ufshcd_suspend_clkscaling(hba);
- err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
- if (err)
- dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
- __func__, err);
+ goto out_rel;
}
+ clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
+ freq = clki->max_freq;
+
+ ufshcd_suspend_clkscaling(hba);
+
+ if (!ufshcd_is_devfreq_scaling_required(hba, freq, true))
+ goto out_rel;
+
+ err = ufshcd_devfreq_scale(hba, freq, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ else
+ hba->clk_scaling.target_freq = freq;
+
+out_rel:
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
@@ -1783,6 +1811,10 @@ static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
+ if (!hba->clk_scaling.wb_gear)
+ /* Use intermediate gear speed HS_G3 as the default wb_gear */
+ hba->clk_scaling.wb_gear = UFS_HS_G3;
+
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
@@ -1881,7 +1913,7 @@ start:
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
break;
}
@@ -1893,7 +1925,7 @@ start:
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
@@ -1933,7 +1965,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1955,7 +1987,7 @@ static void ufshcd_gate_work(struct work_struct *work)
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
return;
}
@@ -1980,7 +2012,7 @@ static void ufshcd_gate_work(struct work_struct *work)
guard(spinlock_irqsave)(&hba->clk_gating.lock);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
}
@@ -2006,7 +2038,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
}
hba->clk_gating.state = REQ_CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ trace_ufshcd_clk_gating(hba, hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
@@ -4005,7 +4037,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_reset(struct ufs_hba *hba)
+int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_RESET,
@@ -4019,6 +4051,7 @@ static int ufshcd_dme_reset(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_reset);
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
@@ -4044,7 +4077,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* Return: 0 on success, non-zero value on failure.
*/
-static int ufshcd_dme_enable(struct ufs_hba *hba)
+int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {
.command = UIC_CMD_DME_ENABLE,
@@ -4058,6 +4091,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_dme_enable);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
@@ -4422,7 +4456,7 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ trace_ufshcd_profile_hibern8(hba, "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
@@ -4447,7 +4481,7 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ trace_ufshcd_profile_hibern8(hba, "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
@@ -5808,7 +5842,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
+ trace_ufshcd_auto_bkops_state(hba, "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -5859,7 +5893,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
- trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
+ trace_ufshcd_auto_bkops_state(hba, "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
@@ -6193,7 +6227,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
return;
}
- trace_ufshcd_exception_event(dev_name(hba->dev), status);
+ trace_ufshcd_exception_event(hba, status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
@@ -6201,6 +6235,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
+ if (status & hba->ee_drv_mask & MASK_EE_HEALTH_CRITICAL) {
+ hba->critical_health_count++;
+ sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
+ }
+
ufs_debugfs_exception_event(hba, status);
}
@@ -7652,7 +7691,7 @@ static void ufshcd_process_probe_result(struct ufs_hba *hba,
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- trace_ufshcd_init(dev_name(hba->dev), ret,
+ trace_ufshcd_init(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), probe_start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
}
@@ -8293,6 +8332,11 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (dev_info->wspecversion >= 0x410) {
+ hba->critical_health_count = 0;
+ ufshcd_enable_ee(hba, MASK_EE_HEALTH_CRITICAL);
+ }
+
ufs_init_rtc(hba, desc_buf);
/*
@@ -9148,12 +9192,12 @@ out:
} else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
if (clk_state_changed)
- trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_gating(hba,
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -9853,7 +9897,7 @@ static int ufshcd_wl_runtime_suspend(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9873,7 +9917,7 @@ static int ufshcd_wl_runtime_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
- trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9905,7 +9949,7 @@ static int ufshcd_wl_suspend(struct device *dev)
out:
if (!ret)
hba->is_sys_suspended = true;
- trace_ufshcd_wl_suspend(dev_name(dev), ret,
+ trace_ufshcd_wl_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -9928,7 +9972,7 @@ static int ufshcd_wl_resume(struct device *dev)
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
- trace_ufshcd_wl_resume(dev_name(dev), ret,
+ trace_ufshcd_wl_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
@@ -9966,7 +10010,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev),
+ trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
@@ -10039,7 +10083,7 @@ int ufshcd_system_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
out:
- trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_system_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10067,7 +10111,7 @@ int ufshcd_system_resume(struct device *dev)
ret = ufshcd_resume(hba);
out:
- trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_system_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
@@ -10093,7 +10137,7 @@ int ufshcd_runtime_suspend(struct device *dev)
ret = ufshcd_suspend(hba);
- trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_suspend(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
@@ -10120,7 +10164,7 @@ int ufshcd_runtime_resume(struct device *dev)
ret = ufshcd_resume(hba);
- trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ trace_ufshcd_runtime_resume(hba, ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 580c8d0bd8bb..191fbd799ec5 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -142,3 +142,15 @@ config SCSI_UFS_SPRD
Select this if you have UFS controller on Unisoc chipset.
If unsure, say N.
+
+config SCSI_UFS_ROCKCHIP
+ tristate "Rockchip UFS host controller driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_ROCKCHIP || COMPILE_TEST)
+ help
+ This selects the Rockchip specific additions to UFSHCD platform driver.
+ UFS host on Rockchip needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Rockchip chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 4573aead02eb..2f97feb5db3f 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -10,5 +10,6 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o
obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 13dd5dfc03eb..d7539cda97da 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -321,7 +321,7 @@ static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u32 enabled_vh;
@@ -396,7 +396,7 @@ static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
@@ -813,7 +813,7 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
}
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -865,7 +865,7 @@ out:
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_req)
+ const struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
@@ -1320,6 +1320,7 @@ static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
return;
}
profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = hba->dev;
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
DATA_UNIT_SIZE;
@@ -1366,7 +1367,7 @@ static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba,
void *prdt, unsigned int num_segments)
{
struct fmp_sg_entry *fmp_prdt = prdt;
- const u8 *enckey = crypt_ctx->bc_key->raw;
+ const u8 *enckey = crypt_ctx->bc_key->bytes;
const u8 *twkey = enckey + AES_KEYSIZE_256;
u64 dun_lo = crypt_ctx->bc_dun[0];
u64 dun_hi = crypt_ctx->bc_dun[1];
@@ -1634,7 +1635,7 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
diff --git a/drivers/ufs/host/ufs-exynos.h b/drivers/ufs/host/ufs-exynos.h
index 9670dc138d1e..aac517276189 100644
--- a/drivers/ufs/host/ufs-exynos.h
+++ b/drivers/ufs/host/ufs-exynos.h
@@ -188,7 +188,7 @@ struct exynos_ufs_drv_data {
int (*pre_pwr_change)(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr);
int (*post_pwr_change)(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr);
+ const struct ufs_pa_layer_attr *pwr);
int (*pre_hce_enable)(struct exynos_ufs *ufs);
int (*post_hce_enable)(struct exynos_ufs *ufs);
};
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 6e6569de74d8..6f2e6bf31225 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -361,9 +361,9 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
}
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_host_params host_params;
int ret = 0;
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 135cd78109e2..182f58d0c9db 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1081,8 +1081,8 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_host_params host_params;
@@ -1134,9 +1134,9 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status stage,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status stage,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1643,6 +1643,7 @@ static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 23b9f6efa047..1b37449fbffc 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/time.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
#include <soc/qcom/ice.h>
@@ -97,7 +99,7 @@ static const struct __ufs_qcom_bw_table {
};
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up);
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
@@ -105,6 +107,26 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
+/**
+ * ufs_qcom_config_ice_allocator() - ICE core allocator configuration
+ *
+ * @host: pointer to qcom specific variant structure.
+ */
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+ struct ufs_hba *hba = host->hba;
+ static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 };
+ u32 config;
+
+ if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) ||
+ !(host->hba->caps & UFSHCD_CAP_CRYPTO))
+ return;
+
+ config = get_unaligned_le32(val);
+
+ ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG);
+ ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE);
+}
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
@@ -125,7 +147,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
int err;
int i;
- ice = of_qcom_ice_get(dev);
+ ice = devm_of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
@@ -147,6 +169,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
profile->ll_ops = ufs_qcom_crypto_ops;
profile->max_dun_bytes_supported = 8;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
profile->dev = dev;
/*
@@ -202,7 +225,7 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile,
err = qcom_ice_program_key(host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
- key->raw,
+ key->bytes,
key->crypto_cfg.data_unit_size / 512,
slot);
ufshcd_release(hba);
@@ -248,6 +271,11 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
return 0;
}
+
+static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host)
+{
+}
+
#endif
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
@@ -496,6 +524,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
ufs_qcom_ice_enable(host);
+ ufs_qcom_config_ice_allocator(host);
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -509,16 +538,10 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
* ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
*
* @hba: host controller instance
- * @gear: Current operating gear
- * @hs: current power mode
- * @rate: current operating rate (A or B)
- * @update_link_startup_timer: indicate if link_start ongoing
* @is_pre_scale_up: flag to check if pre scale up condition.
* Return: zero for success and non-zero in case of a failure.
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer,
- bool is_pre_scale_up)
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
@@ -534,11 +557,6 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- return -EINVAL;
- }
-
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk")) {
if (is_pre_scale_up)
@@ -574,14 +592,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
- err = ufs_qcom_set_core_clk_ctrl(hba, true);
+ err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX);
if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n");
/*
@@ -780,7 +797,7 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -831,9 +848,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
break;
case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false, false)) {
+ if (ufs_qcom_cfg_timers(hba, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
/*
@@ -989,6 +1004,14 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba)
host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba);
}
+static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major >= 0x5)
+ host->caps |= UFS_QCOM_CAP_ICE_CONFIG;
+}
+
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
@@ -997,6 +1020,8 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
+ ufs_qcom_set_host_caps(hba);
}
/**
@@ -1292,7 +1317,7 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg);
}
-static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
+static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
@@ -1306,10 +1331,11 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
!strcmp(clki->name, "core_clk_unipro")) {
if (!clki->max_freq)
cycles_in_1us = 150; /* default for backwards compatibility */
- else if (is_scale_up)
- cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
+ else if (freq == ULONG_MAX)
+ cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ);
else
- cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
+ cycles_in_1us = ceil(freq, HZ_PER_MHZ);
+
break;
}
}
@@ -1346,20 +1372,17 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us);
}
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *attr = &host->dev_req_params;
int ret;
- ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
- attr->hs_rate, false, true);
+ ret = ufs_qcom_cfg_timers(hba, true);
if (ret) {
dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__);
return ret;
}
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, true);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1388,14 +1411,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
return err;
}
-static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq)
{
/* set unipro core clock attributes and clear clock divider */
- return ufs_qcom_set_core_clk_ctrl(hba, false);
+ return ufs_qcom_set_core_clk_ctrl(hba, freq);
}
-static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
+static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
+ unsigned long target_freq,
+ enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
@@ -1409,7 +1433,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err)
return err;
if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
+ err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
@@ -1421,7 +1445,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
- err = ufs_qcom_clk_scale_down_post_change(hba);
+ err = ufs_qcom_clk_scale_down_post_change(hba, target_freq);
if (err) {
@@ -1855,6 +1879,36 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
return ret;
}
+static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
+{
+ u32 gear = 0;
+
+ switch (freq) {
+ case 403000000:
+ gear = UFS_HS_G5;
+ break;
+ case 300000000:
+ gear = UFS_HS_G4;
+ break;
+ case 201500000:
+ gear = UFS_HS_G3;
+ break;
+ case 150000000:
+ case 100000000:
+ gear = UFS_HS_G2;
+ break;
+ case 75000000:
+ case 37500000:
+ gear = UFS_HS_G1;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq);
+ break;
+ }
+
+ return gear;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1883,6 +1937,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.op_runtime_config = ufs_qcom_op_runtime_config,
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
+ .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
};
/**
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 919f53682beb..d0e6ec9128e7 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -50,6 +50,9 @@ enum {
*/
UFS_AH8_CFG = 0xFC,
+ REG_UFS_MEM_ICE_CONFIG = 0x260C,
+ REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
+
REG_UFS_CFG3 = 0x271C,
REG_UFS_DEBUG_SPARE_CFG = 0x284C,
@@ -110,6 +113,9 @@ enum {
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
+/* bit definition for UFS Shared ICE config */
+#define UFS_QCOM_CAP_ICE_CONFIG BIT(0)
+
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
@@ -135,6 +141,37 @@ enum {
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
+/* ICE allocator type to share AES engines among TX stream and RX stream */
+#define ICE_ALLOCATOR_TYPE 2
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is not in progress
+ */
+#define NUM_RX_R1W0 28
+
+/*
+ * Number of cores allocated for TX stream when Device asked to send write
+ * data block and Read data block is not in progress
+ */
+#define NUM_TX_R0W1 28
+
+/*
+ * Number of cores allocated for RX stream when Read data block received and
+ * Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_RX_R1W1 15
+
+/*
+ * Number of cores allocated for TX stream (UFS write) when Read data block
+ * received and Write data block is in progress
+ * OR
+ * Device asked to send write data block and Read data block is in progress
+ */
+#define NUM_TX_R1W1 13
+
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
u8 *major, u16 *minor, u16 *step)
@@ -196,7 +233,7 @@ struct ufs_qcom_host {
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
-
+ u32 caps;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index 03cd82db751b..5bf7d0e77ad8 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -9,324 +9,408 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/sys_soc.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
+#define EFUSE_CALIB_SIZE 8
+
struct ufs_renesas_priv {
+ const struct firmware *fw;
+ void (*pre_init)(struct ufs_hba *hba);
bool initialized; /* The hardware needs initialization once */
+ u8 calib[EFUSE_CALIB_SIZE];
};
-enum {
- SET_PHY_INDEX_LO = 0,
- SET_PHY_INDEX_HI,
- TIMER_INDEX,
- MAX_INDEX
-};
+#define UFS_RENESAS_FIRMWARE_NAME "r8a779f0_ufs.bin"
+MODULE_FIRMWARE(UFS_RENESAS_FIRMWARE_NAME);
-enum ufs_renesas_init_param_mode {
- MODE_RESTORE,
- MODE_SET,
- MODE_SAVE,
- MODE_POLL,
- MODE_WAIT,
- MODE_WRITE,
-};
+static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+{
+ ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+}
-#define PARAM_RESTORE(_reg, _index) \
- { .mode = MODE_RESTORE, .reg = _reg, .index = _index }
-#define PARAM_SET(_index, _set) \
- { .mode = MODE_SET, .index = _index, .u.set = _set }
-#define PARAM_SAVE(_reg, _mask, _index) \
- { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
- .index = _index }
-#define PARAM_POLL(_reg, _expected, _mask) \
- { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
- .mask = (u32)(_mask) }
-#define PARAM_WAIT(_delay_us) \
- { .mode = MODE_WAIT, .u.delay_us = _delay_us }
-
-#define PARAM_WRITE(_reg, _val) \
- { .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
-
-#define PARAM_WRITE_D0_D4(_d0, _d4) \
- PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
-
-#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_RESTORE_800_80C_POLL(_index) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE(0xd0, 0x00000800), \
- PARAM_RESTORE(0xd4, _index), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
- PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
- PARAM_WRITE(0xd0, 0x0000080c), \
- PARAM_POLL(0xd4, BIT(8), BIT(8))
-
-#define PARAM_WRITE_828_82C_POLL(_data_828) \
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
- PARAM_WRITE_D0_D4(0x00000828, _data_828), \
- PARAM_WRITE(0xd0, 0x0000082c), \
- PARAM_POLL(0xd4, _data_828, _data_828)
-
-#define PARAM_WRITE_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_SET_PHY(_addr16, _data16) \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE_804_80C_POLL(0x1a, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
- PARAM_WRITE_804_80C_POLL(0x1b, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0), \
- PARAM_WRITE(0xf0, 1), \
- PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
- PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
- PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
- PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
- PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
- PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
- PARAM_WRITE_828_82C_POLL(0x0f000000), \
- PARAM_WRITE(0xf0, 0)
-
-#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
- PARAM_WRITE(0xf0, _gpio), \
- PARAM_WRITE_800_80C_POLL(_addr, 0), \
- PARAM_WRITE(0xd0, 0x00000808), \
- PARAM_POLL(0xd4, _expected, _mask), \
- PARAM_WRITE(0xf0, 0)
-
-struct ufs_renesas_init_param {
- enum ufs_renesas_init_param_mode mode;
- u32 reg;
- union {
- u32 expected;
- u32 delay_us;
- u32 set;
- u32 val;
- } u;
- u32 mask;
- u32 index;
-};
+static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask)
+{
+ int ret;
+ u32 val;
-/* This setting is for SERIES B */
-static const struct ufs_renesas_init_param ufs_param[] = {
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
- PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
- PARAM_WAIT(1),
- PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
-
- PARAM_WRITE(0xc0, 0x49425308),
- PARAM_WRITE(0xc0, 0x41584901),
-
- PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
- PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
- PARAM_WRITE(0xd0, 0x0000080c),
- PARAM_POLL(0xd4, BIT(8), BIT(8)),
-
- PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
-
- PARAM_WRITE(0xd0, 0x00000804),
- PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
-
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
- PARAM_WRITE(0xd4, 0x00000000),
- PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
- PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
- PARAM_WRITE(0xd0, 0x0000082c),
- PARAM_POLL(0xd4, BIT(27), BIT(27)),
- PARAM_WRITE(0xd0, 0x00000d2c),
- PARAM_POLL(0xd4, BIT(0), BIT(0)),
+ ret = readl_poll_timeout_atomic(hba->mmio_base + reg,
+ val, (val & mask) == expected,
+ 10, 1000);
+ if (ret)
+ dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
+ __func__, ret, val, mask, expected);
+}
- /* phy setup */
- PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
- PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
- PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
- PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
-
- PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
- PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
-
- PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
- PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
- PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
-
- PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
- PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
- PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
- PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
- PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
- PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
-
- PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
- PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
-
- PARAM_WRITE_PHY(0x0028, 0x0061),
- PARAM_WRITE_PHY(0x4014, 0x0061),
- PARAM_SET_PHY(0x401c, BIT(2)),
- PARAM_WRITE_PHY(0x4000, 0x0000),
- PARAM_WRITE_PHY(0x4001, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0001),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0000),
- PARAM_WRITE_PHY(0x10af, 0x0002),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x0000),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_WRITE_PHY(0x10ae, 0x0001),
- PARAM_WRITE_PHY(0x10ad, 0x0080),
- PARAM_WRITE_PHY(0x10af, 0x001a),
- PARAM_WRITE_PHY(0x10b6, 0x0001),
- PARAM_WRITE_PHY(0x10ae, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
- PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
- PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
- PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
-
- PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
-
- PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
-
- PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
- PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
- PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
- PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
- /* end of phy setup */
+static u32 ufs_renesas_read(struct ufs_hba *hba, u32 reg)
+{
+ return ufshcd_readl(hba, reg);
+}
- PARAM_WRITE(0xf0, 0),
- PARAM_WRITE(0xd0, 0x00000d00),
- PARAM_RESTORE(0xd4, TIMER_INDEX),
-};
+static void ufs_renesas_write(struct ufs_hba *hba, u32 reg, u32 value)
+{
+ ufshcd_writel(hba, value, reg);
+}
-static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
+static void ufs_renesas_write_d0_d4(struct ufs_hba *hba, u32 data_d0, u32 data_d4)
{
- ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
+ ufs_renesas_write(hba, 0xd0, data_d0);
+ ufs_renesas_write(hba, 0xd4, data_d4);
}
-static void ufs_renesas_reg_control(struct ufs_hba *hba,
- const struct ufs_renesas_init_param *p)
+static void ufs_renesas_write_800_80c_poll(struct ufs_hba *hba, u32 addr,
+ u32 data_800)
{
- static u32 save[MAX_INDEX];
- int ret;
- u32 val;
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000800, (data_800 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_804_80c_poll(struct ufs_hba *hba, u32 addr, u32 data_804)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, (data_804 << 16) | BIT(8) | addr);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
+}
+
+static void ufs_renesas_write_828_82c_poll(struct ufs_hba *hba, u32 data_828)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, data_828);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, data_828, data_828);
+}
+
+static void ufs_renesas_write_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x18, data16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x19, (data16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16)
+{
+ u32 low, high;
+
+ ufs_renesas_write(hba, 0xf0, 1);
+ ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff);
+ ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write_804_80c_poll(hba, 0x1a, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ low = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_804_80c_poll(hba, 0x1b, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ high = ufs_renesas_read(hba, 0xd4) & 0xff;
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+
+ data16 |= (high << 8) | low;
+ ufs_renesas_write_phy(hba, addr16, data16);
+}
+
+static void ufs_renesas_reset_indirect_write(struct ufs_hba *hba, int gpio,
+ u32 addr, u32 data)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data);
+}
+
+static void ufs_renesas_reset_indirect_update(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x0f000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27) | BIT(26) | BIT(24), BIT(27) | BIT(26) | BIT(24));
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 data_800)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, data_800);
+ ufs_renesas_write_828_82c_poll(hba, 0x0f000000);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr,
+ u32 expected, u32 mask)
+{
+ ufs_renesas_write(hba, 0xf0, gpio);
+ ufs_renesas_write_800_80c_poll(hba, addr, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000808);
+ ufs_renesas_poll(hba, 0xd4, expected, mask);
+ ufs_renesas_write(hba, 0xf0, 0);
+}
+
+static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba, bool init108)
+{
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000002);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001);
+ if (init108)
+ ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000001);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001);
+ udelay(1);
+ ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000);
+
+ ufs_renesas_write(hba, 0xc0, 0x49425308);
+ ufs_renesas_write(hba, 0xc0, 0x41584901);
+}
+
+static void ufs_renesas_init_step4_to_6(struct ufs_hba *hba)
+{
+ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100);
+ ufs_renesas_write_d0_d4(hba, 0x00000804, 0x00000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000080c);
+ ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8));
- WARN_ON(p->index >= MAX_INDEX);
-
- switch (p->mode) {
- case MODE_RESTORE:
- ufshcd_writel(hba, save[p->index], p->reg);
- break;
- case MODE_SET:
- save[p->index] |= p->u.set;
- break;
- case MODE_SAVE:
- save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
- break;
- case MODE_POLL:
- ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
- val,
- (val & p->mask) == p->u.expected,
- 10, 1000);
- if (ret)
- dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
- __func__, ret, val, p->mask, p->u.expected);
- break;
- case MODE_WAIT:
- if (p->u.delay_us > 1000)
- mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
- else
- udelay(p->u.delay_us);
- break;
- case MODE_WRITE:
- ufshcd_writel(hba, p->u.val, p->reg);
- break;
- default:
- break;
+ ufs_renesas_write(hba, REG_CONTROLLER_ENABLE, 0x00000001);
+
+ ufs_renesas_write(hba, 0xd0, 0x00000804);
+ ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0));
+}
+
+static u32 ufs_renesas_init_disable_timer(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ timer_val = ufs_renesas_read(hba, 0xd4) & 0x0000ffff;
+ ufs_renesas_write(hba, 0xd4, 0x00000000);
+ ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000);
+ ufs_renesas_write_d0_d4(hba, 0x00000828, 0x08000000);
+ ufs_renesas_write(hba, 0xd0, 0x0000082c);
+ ufs_renesas_poll(hba, 0xd4, BIT(27), BIT(27));
+ ufs_renesas_write(hba, 0xd0, 0x00000d2c);
+ ufs_renesas_poll(hba, 0xd4, BIT(0), BIT(0));
+
+ return timer_val;
+}
+
+static void ufs_renesas_init_enable_timer(struct ufs_hba *hba, u32 timer_val)
+{
+ ufs_renesas_write(hba, 0xf0, 0);
+ ufs_renesas_write(hba, 0xd0, 0x00000d00);
+ ufs_renesas_write(hba, 0xd4, timer_val);
+}
+
+static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba,
+ u32 data_10ad, u32 data_10af)
+{
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ad, data_10ad);
+ ufs_renesas_write_phy(hba, 0x10af, data_10af);
+ ufs_renesas_write_phy(hba, 0x10b6, 0x0001);
+ ufs_renesas_write_phy(hba, 0x10ae, 0x0000);
+}
+
+static void ufs_renesas_init_compensation_and_slicers(struct ufs_hba *hba)
+{
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000);
+ ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a);
+}
+
+static void ufs_renesas_r8a779f0_es10_pre_init(struct ufs_hba *hba)
+{
+ u32 timer_val;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ /* phy setup */
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x5f, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_indirect_write(hba, 7, 0x5c, 0x0003);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 0, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, 0x0061);
+ ufs_renesas_indirect_write(hba, 4, 0x9b, 0x0009);
+ ufs_renesas_indirect_write(hba, 4, 0xa6, 0x0005);
+ ufs_renesas_indirect_write(hba, 4, 0xa5, 0x0058);
+ ufs_renesas_indirect_write(hba, 1, 0x39, 0x0027);
+ ufs_renesas_indirect_write(hba, 1, 0x47, 0x004c);
+
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_write_phy(hba, 0x0028, 0x0061);
+ ufs_renesas_write_phy(hba, 0x4014, 0x0061);
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+ ufs_renesas_write_phy(hba, 0x4000, 0x0000);
+ ufs_renesas_write_phy(hba, 0x4001, 0x0000);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016);
+ ufs_renesas_indirect_write(hba, 7, 0x72, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x73, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x74, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x75, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x76, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x77, 0x0010);
+ ufs_renesas_indirect_write(hba, 7, 0x78, 0x00ff);
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+
+ ufs_renesas_indirect_write(hba, 7, 0x19, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x1a, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+ /* end of phy setup */
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
+}
+
+static void ufs_renesas_r8a779f0_init_step3_add(struct ufs_hba *hba, bool assert)
+{
+ u32 val_2x = 0, val_3x = 0, val_4x = 0;
+
+ if (assert) {
+ val_2x = 0x0001;
+ val_3x = 0x0003;
+ val_4x = 0x0001;
}
+
+ ufs_renesas_reset_indirect_write(hba, 7, 0x20, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4a, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x35, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x21, val_2x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x4b, val_4x);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x36, val_3x);
+ ufs_renesas_reset_indirect_update(hba);
}
-static void ufs_renesas_pre_init(struct ufs_hba *hba)
+static void ufs_renesas_r8a779f0_pre_init(struct ufs_hba *hba)
{
- const struct ufs_renesas_init_param *p = ufs_param;
- unsigned int i;
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+ u32 timer_val;
+ u32 data;
+ int i;
+
+ /* This setting is for SERIES B */
+ ufs_renesas_init_step1_to_3(hba, true);
+
+ ufs_renesas_r8a779f0_init_step3_add(hba, true);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5f, 0x0063);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x60, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5b, 0x00a6);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_reset_indirect_write(hba, 7, 0x5c, 0x0003);
+ ufs_renesas_reset_indirect_update(hba);
+ ufs_renesas_r8a779f0_init_step3_add(hba, false);
+
+ ufs_renesas_init_step4_to_6(hba);
+
+ timer_val = ufs_renesas_init_disable_timer(hba);
+
+ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0007);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7));
+ ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4));
+
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080);
+ ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x2c, 0x0001);
+ ufs_renesas_indirect_write(hba, 1, 0x32, 0x0087);
+
+ ufs_renesas_indirect_write(hba, 1, 0x4d, priv->calib[2]);
+ ufs_renesas_indirect_write(hba, 1, 0x4e, priv->calib[3]);
+ ufs_renesas_indirect_write(hba, 1, 0x0d, 0x0006);
+ ufs_renesas_indirect_write(hba, 1, 0x0e, 0x0007);
+ ufs_renesas_write_phy(hba, 0x0028, priv->calib[3]);
+ ufs_renesas_write_phy(hba, 0x4014, priv->calib[3]);
+
+ ufs_renesas_set_phy(hba, 0x401c, BIT(2));
+
+ ufs_renesas_write_phy(hba, 0x4000, priv->calib[6]);
+ ufs_renesas_write_phy(hba, 0x4001, priv->calib[7]);
+
+ ufs_renesas_indirect_write(hba, 1, 0x14, 0x0001);
+
+ ufs_renesas_init_compensation_and_slicers(hba);
+
+ ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000);
+ ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c);
+ ufs_renesas_indirect_write(hba, 7, 0x62, 0x00c0);
+ ufs_renesas_indirect_write(hba, 7, 0x63, 0x0001);
+
+ for (i = 0; i < priv->fw->size / 2; i++) {
+ data = (priv->fw->data[i * 2 + 1] << 8) | priv->fw->data[i * 2];
+ ufs_renesas_write_phy(hba, 0xc000 + i, data);
+ }
- for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
- ufs_renesas_reg_control(hba, &p[i]);
+ ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002);
+ ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007);
+
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004);
+ ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017);
+ ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6));
+ ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7));
+
+ ufs_renesas_init_enable_timer(hba, timer_val);
}
static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
@@ -338,7 +422,7 @@ static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
return 0;
if (status == PRE_CHANGE)
- ufs_renesas_pre_init(hba);
+ priv->pre_init(hba);
priv->initialized = true;
@@ -356,20 +440,78 @@ static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
return 0;
}
+static const struct soc_device_attribute ufs_fallback[] = {
+ { .soc_id = "r8a779f0", .revision = "ES1.[01]" },
+ { /* Sentinel */ }
+};
+
static int ufs_renesas_init(struct ufs_hba *hba)
{
+ const struct soc_device_attribute *attr;
+ struct nvmem_cell *cell = NULL;
+ struct device *dev = hba->dev;
struct ufs_renesas_priv *priv;
+ u8 *data = NULL;
+ size_t len;
+ int ret;
- priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO;
+ attr = soc_device_match(ufs_fallback);
+ if (attr)
+ goto fallback;
+
+ ret = request_firmware(&priv->fw, UFS_RENESAS_FIRMWARE_NAME, dev);
+ if (ret) {
+ dev_warn(dev, "Failed to load firmware\n");
+ goto fallback;
+ }
+
+ cell = nvmem_cell_get(dev, "calibration");
+ if (IS_ERR(cell)) {
+ dev_warn(dev, "No calibration data specified\n");
+ goto fallback;
+ }
+
+ data = nvmem_cell_read(cell, &len);
+ if (IS_ERR(data)) {
+ dev_warn(dev, "Failed to read calibration data: %pe\n", data);
+ goto fallback;
+ }
+
+ if (len != EFUSE_CALIB_SIZE) {
+ dev_warn(dev, "Invalid calibration data size %zu\n", len);
+ goto fallback;
+ }
+
+ memcpy(priv->calib, data, EFUSE_CALIB_SIZE);
+ priv->pre_init = ufs_renesas_r8a779f0_pre_init;
+ goto out;
+
+fallback:
+ dev_info(dev, "Using ES1.0 init code\n");
+ priv->pre_init = ufs_renesas_r8a779f0_es10_pre_init;
+
+out:
+ kfree(data);
+ if (!IS_ERR_OR_NULL(cell))
+ nvmem_cell_put(cell);
+
return 0;
}
+static void ufs_renesas_exit(struct ufs_hba *hba)
+{
+ struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
+
+ release_firmware(priv->fw);
+}
+
static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
{
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
@@ -378,6 +520,7 @@ static int ufs_renesas_set_dma_mask(struct ufs_hba *hba)
static const struct ufs_hba_variant_ops ufs_renesas_vops = {
.name = "renesas",
.init = ufs_renesas_init,
+ .exit = ufs_renesas_exit,
.set_dma_mask = ufs_renesas_set_dma_mask,
.setup_clocks = ufs_renesas_setup_clocks,
.hce_enable_notify = ufs_renesas_hce_enable_notify,
diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c
new file mode 100644
index 000000000000..8754085dd0cc
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_wakeup.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-rockchip.h"
+
+static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ if (status == POST_CHANGE) {
+ err = ufshcd_dme_reset(hba);
+ if (err)
+ return err;
+
+ err = ufshcd_dme_enable(hba);
+ if (err)
+ return err;
+
+ return ufshcd_vops_phy_initialization(hba);
+ }
+
+ return 0;
+}
+
+static void ufs_rockchip_set_pm_lvl(struct ufs_hba *hba)
+{
+ hba->rpm_lvl = UFS_PM_LVL_5;
+ hba->spm_lvl = UFS_PM_LVL_5;
+}
+
+static int ufs_rockchip_rk3576_phy_init(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(PA_LOCAL_TX_LCC_ENABLE, 0x0), 0x0);
+ /* enable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_ENABLE);
+ for (int i = 0; i < 2; i++) {
+ /* Configuration M - TX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, SEL_TX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, SEL_TX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_VALUE, SEL_TX_LANE0 + i), 0x44);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, SEL_TX_LANE0 + i), 0xe6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, SEL_TX_LANE0 + i), 0x07);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_TASE_VALUE, SEL_TX_LANE0 + i), 0x93);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_BASE_NVALUE, SEL_TX_LANE0 + i), 0xc9);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_POWER_SAVING_CTRL, SEL_TX_LANE0 + i), 0x00);
+ /* Configuration M - RX */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, SEL_RX_LANE0 + i), 0x06);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, SEL_RX_LANE0 + i), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE, SEL_RX_LANE0 + i), 0x58);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE1, SEL_RX_LANE0 + i), 0x8c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE2, SEL_RX_LANE0 + i), 0x02);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_OPTION, SEL_RX_LANE0 + i), 0xf6);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_POWER_SAVING_CTRL, SEL_RX_LANE0 + i), 0x69);
+ }
+
+ /* disable the mphy DME_SET cfg */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_DISABLE);
+
+ ufs_sys_writel(host->mphy_base, 0x80, CMN_REG23);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV0_REG14);
+ ufs_sys_writel(host->mphy_base, 0xB5, TRSV1_REG14);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG15);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG15);
+
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV0_REG08);
+ ufs_sys_writel(host->mphy_base, 0x38, TRSV1_REG08);
+
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV0_REG29);
+ ufs_sys_writel(host->mphy_base, 0x50, TRSV1_REG29);
+
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV0_REG2E);
+ ufs_sys_writel(host->mphy_base, 0x80, TRSV1_REG2E);
+
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV0_REG3C);
+ ufs_sys_writel(host->mphy_base, 0x18, TRSV1_REG3C);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG16);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG16);
+
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV0_REG17);
+ ufs_sys_writel(host->mphy_base, 0x20, TRSV1_REG17);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV0_REG18);
+ ufs_sys_writel(host->mphy_base, 0xC0, TRSV1_REG18);
+
+ ufs_sys_writel(host->mphy_base, 0x03, CMN_REG25);
+
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV0_REG3D);
+ ufs_sys_writel(host->mphy_base, 0x03, TRSV1_REG3D);
+
+ ufs_sys_writel(host->mphy_base, 0xC0, CMN_REG23);
+ udelay(1);
+ ufs_sys_writel(host->mphy_base, 0x00, CMN_REG23);
+
+ usleep_range(200, 250);
+ /* start link up */
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_TX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_RX_ENDIAN, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID, 0), 0x0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID_VALID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_PEERDEVICEID, 0), 0x1);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_CONNECTIONSTATE, 0), 0x1);
+
+ return 0;
+}
+
+static int ufs_rockchip_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ufs_rockchip_host *host;
+ int err;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->ufs_sys_ctrl = devm_platform_ioremap_resource_byname(pdev, "hci_grf");
+ if (IS_ERR(host->ufs_sys_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_sys_ctrl),
+ "Failed to map HCI system control registers\n");
+
+ host->ufs_phy_ctrl = devm_platform_ioremap_resource_byname(pdev, "mphy_grf");
+ if (IS_ERR(host->ufs_phy_ctrl))
+ return dev_err_probe(dev, PTR_ERR(host->ufs_phy_ctrl),
+ "Failed to map mphy system control registers\n");
+
+ host->mphy_base = devm_platform_ioremap_resource_byname(pdev, "mphy");
+ if (IS_ERR(host->mphy_base))
+ return dev_err_probe(dev, PTR_ERR(host->mphy_base),
+ "Failed to map mphy base registers\n");
+
+ host->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(host->rst))
+ return dev_err_probe(dev, PTR_ERR(host->rst),
+ "failed to get reset control\n");
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out");
+ if (IS_ERR(host->ref_out_clk))
+ return dev_err_probe(dev, PTR_ERR(host->ref_out_clk),
+ "ref_out clock unavailable\n");
+
+ host->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(host->rst_gpio))
+ return dev_err_probe(dev, PTR_ERR(host->rst_gpio),
+ "failed to get reset gpio\n");
+
+ err = devm_clk_bulk_get_all_enabled(dev, &host->clks);
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
+
+ host->hba = hba;
+
+ ufshcd_set_variant(hba, host);
+
+ return 0;
+}
+
+static int ufs_rockchip_rk3576_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ hba->quirks = UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
+
+ /* Enable BKOPS when suspend */
+ hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+ /* Enable putting device into deep sleep */
+ hba->caps |= UFSHCD_CAP_DEEPSLEEP;
+ /* Enable devfreq of UFS */
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
+
+ /* Set the default desired pm level in case no users set via sysfs */
+ ufs_rockchip_set_pm_lvl(hba);
+
+ ret = ufs_rockchip_common_init(hba);
+ if (ret)
+ return dev_err_probe(dev, ret, "ufs common init fail\n");
+
+ return 0;
+}
+
+static int ufs_rockchip_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 1);
+ usleep_range(20, 25);
+
+ gpiod_set_value_cansleep(host->rst_gpio, 0);
+ usleep_range(20, 25);
+
+ return 0;
+}
+
+static const struct ufs_hba_variant_ops ufs_hba_rk3576_vops = {
+ .name = "rk3576",
+ .init = ufs_rockchip_rk3576_init,
+ .device_reset = ufs_rockchip_device_reset,
+ .hce_enable_notify = ufs_rockchip_hce_enable_notify,
+ .phy_initialization = ufs_rockchip_rk3576_phy_init,
+};
+
+static const struct of_device_id ufs_rockchip_of_match[] = {
+ { .compatible = "rockchip,rk3576-ufshc", .data = &ufs_hba_rk3576_vops },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ufs_rockchip_of_match);
+
+static int ufs_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct ufs_hba_variant_ops *vops;
+ int err;
+
+ vops = device_get_match_data(dev);
+ if (!vops)
+ return dev_err_probe(dev, -ENODATA, "ufs_hba_variant_ops not defined.\n");
+
+ err = ufshcd_pltfrm_init(pdev, vops);
+ if (err)
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init failed\n");
+
+ return 0;
+}
+
+static void ufs_rockchip_remove(struct platform_device *pdev)
+{
+ ufshcd_pltfrm_remove(pdev);
+}
+
+#ifdef CONFIG_PM
+static int ufs_rockchip_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ /* Do not power down the genpd if rpm_lvl is less than level 5 */
+ dev_pm_genpd_rpm_always_on(dev, hba->rpm_lvl < UFS_PM_LVL_5);
+
+ return ufshcd_runtime_suspend(dev);
+}
+
+static int ufs_rockchip_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ reset_control_assert(host->rst);
+ udelay(1);
+ reset_control_deassert(host->rst);
+
+ return ufshcd_runtime_resume(dev);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int ufs_rockchip_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ /*
+ * If spm_lvl is less than level 5, it means we need to keep the host
+ * controller in powered-on state. So device_set_awake_path() is
+ * calling pm core to notify the genpd provider to meet this requirement
+ */
+ if (hba->spm_lvl < UFS_PM_LVL_5)
+ device_set_awake_path(dev);
+
+ err = ufshcd_system_suspend(dev);
+ if (err) {
+ dev_err(hba->dev, "UFSHCD system suspend failed %d\n", err);
+ return err;
+ }
+
+ clk_disable_unprepare(host->ref_out_clk);
+
+ return 0;
+}
+
+static int ufs_rockchip_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
+ int err;
+
+ err = clk_prepare_enable(host->ref_out_clk);
+ if (err) {
+ dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
+ return err;
+ }
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops ufs_rockchip_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_rockchip_system_suspend, ufs_rockchip_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_rockchip_runtime_suspend, ufs_rockchip_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_rockchip_pltform = {
+ .probe = ufs_rockchip_probe,
+ .remove = ufs_rockchip_remove,
+ .driver = {
+ .name = "ufshcd-rockchip",
+ .pm = &ufs_rockchip_pm_ops,
+ .of_match_table = ufs_rockchip_of_match,
+ },
+};
+module_platform_driver(ufs_rockchip_pltform);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Rockchip UFS Host Driver");
diff --git a/drivers/ufs/host/ufs-rockchip.h b/drivers/ufs/host/ufs-rockchip.h
new file mode 100644
index 000000000000..3ba6fb9f73ae
--- /dev/null
+++ b/drivers/ufs/host/ufs-rockchip.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Rockchip UFS Host Controller driver
+ *
+ * Copyright (C) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef _UFS_ROCKCHIP_H_
+#define _UFS_ROCKCHIP_H_
+
+#define SEL_TX_LANE0 0x0
+#define SEL_TX_LANE1 0x1
+#define SEL_TX_LANE2 0x2
+#define SEL_TX_LANE3 0x3
+#define SEL_RX_LANE0 0x4
+#define SEL_RX_LANE1 0x5
+#define SEL_RX_LANE2 0x6
+#define SEL_RX_LANE3 0x7
+
+#define VND_TX_CLK_PRD 0xAA
+#define VND_TX_CLK_PRD_EN 0xA9
+#define VND_TX_LINERESET_PVALUE2 0xAB
+#define VND_TX_LINERESET_PVALUE1 0xAC
+#define VND_TX_LINERESET_VALUE 0xAD
+#define VND_TX_BASE_NVALUE 0x93
+#define VND_TX_TASE_VALUE 0x94
+#define VND_TX_POWER_SAVING_CTRL 0x7F
+#define VND_RX_CLK_PRD 0x12
+#define VND_RX_CLK_PRD_EN 0x11
+#define VND_RX_LINERESET_PVALUE2 0x1B
+#define VND_RX_LINERESET_PVALUE1 0x1C
+#define VND_RX_LINERESET_VALUE 0x1D
+#define VND_RX_LINERESET_OPTION 0x25
+#define VND_RX_POWER_SAVING_CTRL 0x2F
+#define VND_RX_SAVE_DET_CTRL 0x1E
+
+#define CMN_REG23 0x8C
+#define CMN_REG25 0x94
+#define TRSV0_REG08 0xE0
+#define TRSV1_REG08 0x220
+#define TRSV0_REG14 0x110
+#define TRSV1_REG14 0x250
+#define TRSV0_REG15 0x134
+#define TRSV1_REG15 0x274
+#define TRSV0_REG16 0x128
+#define TRSV1_REG16 0x268
+#define TRSV0_REG17 0x12C
+#define TRSV1_REG17 0x26c
+#define TRSV0_REG18 0x120
+#define TRSV1_REG18 0x260
+#define TRSV0_REG29 0x164
+#define TRSV1_REG29 0x2A4
+#define TRSV0_REG2E 0x178
+#define TRSV1_REG2E 0x2B8
+#define TRSV0_REG3C 0x1B0
+#define TRSV1_REG3C 0x2F0
+#define TRSV0_REG3D 0x1B4
+#define TRSV1_REG3D 0x2F4
+
+#define MPHY_CFG 0x200
+#define MPHY_CFG_ENABLE 0x40
+#define MPHY_CFG_DISABLE 0x0
+
+#define MIB_T_DBG_CPORT_TX_ENDIAN 0xc022
+#define MIB_T_DBG_CPORT_RX_ENDIAN 0xc023
+
+struct ufs_rockchip_host {
+ struct ufs_hba *hba;
+ void __iomem *ufs_phy_ctrl;
+ void __iomem *ufs_sys_ctrl;
+ void __iomem *mphy_base;
+ struct gpio_desc *rst_gpio;
+ struct reset_control *rst;
+ struct clk *ref_out_clk;
+ struct clk_bulk_data *clks;
+ uint64_t caps;
+};
+
+#define ufs_sys_writel(base, val, reg) \
+ writel((val), (base) + (reg))
+#define ufs_sys_readl(base, reg) readl((base) + (reg))
+#define ufs_sys_set_bits(base, mask, reg) \
+ ufs_sys_writel( \
+ (base), ((mask) | (ufs_sys_readl((base), (reg)))), (reg))
+#define ufs_sys_ctrl_clr_bits(base, mask, reg) \
+ ufs_sys_writel((base), \
+ ((~(mask)) & (ufs_sys_readl((base), (reg)))), \
+ (reg))
+
+#endif /* _UFS_ROCKCHIP_H_ */
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index b1d532363f9d..65bd8fb96b99 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -160,9 +160,9 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
}
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+ enum ufs_notify_change_status status,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 9cfcaad23cf9..996387906aa1 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -157,7 +157,7 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 0dd85d2635b9..47d06af33747 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
- struct usb_endpoint_descriptor *in, *out;
+ static const u8 ep_addrs[] = {
+ CXACRU_EP_CMD + USB_DIR_IN,
+ CXACRU_EP_CMD + USB_DIR_OUT,
+ 0};
int ret;
/* instance init */
@@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
}
if (usb_endpoint_xfer_int(&cmd_ep->desc))
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- NULL, NULL, &in, &out);
+ ret = usb_check_int_endpoints(intf, ep_addrs);
else
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- &in, &out, NULL, NULL);
+ ret = usb_check_bulk_endpoints(intf, ep_addrs);
- if (ret) {
+ if (!ret) {
usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index c17516c29b63..a093544482d5 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -424,8 +424,7 @@ static enum hrtimer_restart ci_otg_hrtimer_func(struct hrtimer *t)
/* Initialize timers */
static int ci_otg_init_timers(struct ci_hdrc *ci)
{
- hrtimer_init(&ci->otg_fsm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ci->otg_fsm_hrtimer.function = ci_otg_hrtimer_func;
+ hrtimer_setup(&ci->otg_fsm_hrtimer, ci_otg_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a76bb50b6202..dcba4281ea48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6066,6 +6066,36 @@ void usb_hub_cleanup(void)
} /* usb_hub_cleanup() */
/**
+ * hub_hc_release_resources - clear resources used by host controller
+ * @udev: pointer to device being released
+ *
+ * Context: task context, might sleep
+ *
+ * Function releases the host controller resources in correct order before
+ * making any operation on resuming usb device. The host controller resources
+ * allocated for devices in tree should be released starting from the last
+ * usb device in tree toward the root hub. This function is used only during
+ * resuming device when usb device require reinitialization – that is, when
+ * flag udev->reset_resume is set.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ */
+static void hub_hc_release_resources(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int i;
+
+ /* Release up resources for all children before this device */
+ for (i = 0; i < udev->maxchild; i++)
+ if (hub->ports[i]->child)
+ hub_hc_release_resources(hub->ports[i]->child);
+
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
+}
+
+/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
@@ -6129,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ if (udev->reset_resume)
+ hub_hc_release_resources(udev);
+
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index dfcfc142bd5e..8efbacc5bc34 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Prolific Single-LUN Mass Storage Card Reader */
+ { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_NO_LPM },
+
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 238c6fd50e75..2a542a99ec44 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1459,8 +1459,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
- hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- qh->wait_timer.function = &dwc2_wait_timer_fn;
+ hrtimer_setup(&qh->wait_timer, &dwc2_wait_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index dfa1b5fe48dc..66a08b527165 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
}
}
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
{
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
+ * and they can be set after core initialization.
+ */
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
+ if (DWC3_GCTL_PRTCAP(reg) != mode)
+ dwc3_enable_susphy(dwc, false);
+ }
+
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work)
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_prtcap(dwc, desired_dr_role);
+ dwc3_set_prtcap(dwc, desired_dr_role, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
*/
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
- * cleared after power-on reset, and it can be set after core
- * initialization.
- */
+ /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
@@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
break;
}
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
- * after power-on reset, and it can be set after core initialization.
- */
+ /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
@@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc)
goto err_exit_usb3_phy;
}
+ /*
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
+ * coreConsultant configuration. So default value will be '0' when the
+ * core is reset. Application needs to set it to '1' after the core
+ * initialization is completed.
+ *
+ * Certain phy requires to be in P0 power state during initialization.
+ * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
+ * prior to phy init to maintain in the P0 state.
+ *
+ * After phy initialization, some phy operations can only be executed
+ * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
+ * blocking phy ops.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ dwc3_enable_susphy(dwc, true);
+
return 0;
err_exit_usb3_phy:
@@ -1588,7 +1603,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
@@ -1600,7 +1615,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
@@ -1645,7 +1660,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
/* de-assert DRVVBUS for HOST and OTG mode */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
}
static void dwc3_get_software_properties(struct dwc3 *dwc)
@@ -1835,8 +1850,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
dwc->tx_max_burst_prd = tx_max_burst_prd;
- dwc->imod_interval = 0;
-
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
@@ -1854,21 +1867,19 @@ static void dwc3_check_params(struct dwc3 *dwc)
unsigned int hwparam_gen =
DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
- /* Check for proper value of imod_interval */
- if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
- dev_warn(dwc->dev, "Interrupt moderation not supported\n");
- dwc->imod_interval = 0;
- }
-
/*
+ * Enable IMOD for all supporting controllers.
+ *
+ * Particularly, DWC_usb3 v3.00a must enable this feature for
+ * the following reason:
+ *
* Workaround for STAR 9000961433 which affects only version
* 3.00a of the DWC_usb3 core. This prevents the controller
* interrupt from being masked while handling events. IMOD
* allows us to work around this issue. Enable it for the
* affected version.
*/
- if (!dwc->imod_interval &&
- DWC3_VER_IS(DWC3, 300A))
+ if (dwc3_has_imod((dwc)))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -2457,7 +2468,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
dwc3_gadget_resume(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
@@ -2465,7 +2476,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
@@ -2494,7 +2505,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, dwc->current_dr_role);
+ dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
dwc3_otg_init(dwc);
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c955039bb4f6..aaa39e663f60 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1558,7 +1558,7 @@ struct dwc3_gadget_ep_cmd_params {
#define DWC3_HAS_OTG BIT(3)
/* prototypes */
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index d76ae676783c..7977860932b1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc)
* block "Initialize GCTL for OTG operation".
*/
/* GCTL.PrtCapDir=2'b11 */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* GUSB2PHYCFG0.SusPHY=0 */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc)
dwc3_drd_update(dwc);
} else {
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index ddd6b2ce5710..89a4dc8ebf94 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -4501,14 +4501,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
+ evt->flags &= ~DWC3_EVENT_PENDING;
+ /*
+ * Add an explicit write memory barrier to make sure that the update of
+ * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf()
+ */
+ wmb();
+
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
- /* Keep the clearing of DWC3_EVENT_PENDING at the end */
- evt->flags &= ~DWC3_EVENT_PENDING;
-
return ret;
}
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 3d404d19a205..64c4965a160f 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -4901,8 +4901,7 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
*/
fotg210->need_io_watchdog = 1;
- hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fotg210->hrtimer.function = fotg210_hrtimer_func;
+ hrtimer_setup(&fotg210->hrtimer, fotg210_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index bdda8c74602d..869ad99afb48 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
- if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
- usb_gadget_set_selfpowered(gadget);
- else
+ if (power > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
- usb_gadget_set_selfpowered(gadget);
+ if (cdev->config &&
+ cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)
+ usb_gadget_set_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
- if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower);
} else {
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 2eae8fc2e0db..94d478b6bcd3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2142,8 +2142,8 @@ static int do_scsi_command(struct fsg_common *common)
* of Posix locks.
*/
case FORMAT_UNIT:
- case RELEASE:
- case RESERVE:
+ case RELEASE_6:
+ case RESERVE_6:
case SEND_DIAGNOSTIC:
default:
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index f60576a65ca6..58b0dd575af3 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1559,8 +1559,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- ncm->task_timer.function = ncm_tx_timeout;
+ hrtimer_setup(&ncm->task_timer, ncm_tx_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 09e2838917e2..f58590bf5e02 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link)
* There is a transfer in progress. So we trigger a remote
* wakeup to inform the host.
*/
- ether_wakeup_host(dev->port_usb);
- return;
+ if (!ether_wakeup_host(dev->port_usb))
+ return;
}
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = true;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index bda08c5ba7c0..4f1b5db51dda 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2479,8 +2479,7 @@ static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
@@ -2509,8 +2508,7 @@ static int dummy_start(struct usb_hcd *hcd)
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- hrtimer_init(&dum_hcd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
- dum_hcd->timer.function = dummy_timer;
+ hrtimer_setup(&dum_hcd->timer, dummy_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6de79ac5e6a4..6d1d190c914d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -466,8 +466,7 @@ static int ehci_init(struct usb_hcd *hcd)
*/
ehci->need_io_watchdog = 1;
- hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ehci->hrtimer.function = ehci_hrtimer_func;
+ hrtimer_setup(&ehci->hrtimer, ehci_hrtimer_func, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT;
hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9693464c0520..69c278b64084 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/pci.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
struct xhci_port *port)
{
+ struct usb_hcd *hcd;
void __iomem *base;
u32 offset;
+ /* Don't try and probe this capability for non-Intel hosts */
+ hcd = xhci_to_hcd(xhci);
+ if (!dev_is_pci(hcd->self.controller) ||
+ to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
+ return USB_LINK_UNKNOWN;
+
base = &xhci->cap_regs->hc_capbase;
offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 92703efda1f7..fdf0c1008225 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2437,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ad0ff356f6fa..54460d11f7ee 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -38,6 +38,8 @@
#define PCI_DEVICE_ID_ETRON_EJ168 0x7023
#define PCI_DEVICE_ID_ETRON_EJ188 0x7052
+#define PCI_DEVICE_ID_VIA_VL805 0x3483
+
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
@@ -418,8 +420,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
@@ -467,11 +471,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->device == 0x9203)
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->vendor == PCI_VENDOR_ID_CDNS &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45653114ccd7..1a90ebc8a30e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -780,8 +780,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
xhci_initialize_ring_info(ring);
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8c164340a2c3..779b01dee068 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1632,7 +1632,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
-#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_TRB_OVERFETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 9589243e8951..4cde3abb7006 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -760,8 +760,8 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
if (!controller)
goto kzalloc_fail;
- hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- controller->early_tx.function = cppi41_recheck_tx_req;
+ hrtimer_setup(&controller->early_tx, cppi41_recheck_tx_req, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 935fc496fe94..4b35ef216125 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
if (PTR_ERR(priv->clks[1]) == -ENOENT)
priv->clks[1] = NULL;
- else if (IS_ERR(priv->clks[1]))
+ else if (IS_ERR(priv->clks[1])) {
+ clk_put(priv->clks[0]);
return PTR_ERR(priv->clks[1]);
+ }
return 0;
}
@@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "usb remove\n");
+ flush_delayed_work(&priv->notify_hotplug_work);
+
/* power off */
if (!usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 105132ae87ac..e8e5723f5412 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
goto usbhs_mod_gadget_probe_err_gpriv;
}
- gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+ gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
!IS_ERR(gpriv->transceiver) ? "" : "no ");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e07c5e3eb18c..9b34e23b7091 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
/* GMC devices */
{ USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
+ /* Altera USB Blaster 3 */
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) },
+ { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5ee60ba2a73c..52be47d684ea 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1612,3 +1612,16 @@
*/
#define GMC_VID 0x1cd7
#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
+
+/*
+ * Altera USB Blaster 3 (http://www.altera.com).
+ */
+#define ALTERA_VID 0x09fb
+#define ALTERA_UB3_6022_PID 0x6022
+#define ALTERA_UB3_6025_PID 0x6025
+#define ALTERA_UB3_6026_PID 0x6026
+#define ALTERA_UB3_6029_PID 0x6029
+#define ALTERA_UB3_602A_PID 0x602a
+#define ALTERA_UB3_602C_PID 0x602c
+#define ALTERA_UB3_602D_PID 0x602d
+#define ALTERA_UB3_602E_PID 0x602e
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 58bd54e8c483..5cd26dac2069 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
@@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */
.driver_info = NCTRL(3) | RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */
.driver_info = NCTRL(5) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */
.driver_info = NCTRL(6) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
- { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */
.driver_info = NCTRL(6) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 576be66ad962..dda610f689b7 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -58,8 +58,8 @@ void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
case INQUIRY: what = "INQUIRY"; break;
case RECOVER_BUFFERED_DATA: what = "RECOVER_BUFFERED_DATA"; break;
case MODE_SELECT: what = "MODE_SELECT"; break;
- case RESERVE: what = "RESERVE"; break;
- case RELEASE: what = "RELEASE"; break;
+ case RESERVE_6: what = "RESERVE"; break;
+ case RELEASE_6: what = "RELEASE"; break;
case COPY: what = "COPY"; break;
case ERASE: what = "ERASE"; break;
case MODE_SENSE: what = "MODE_SENSE"; break;
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 64f6dd0dc660..88c50b984e8a 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client)
{
int ret;
struct rt1711h_chip *chip;
+ const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client)
dev_name(chip->dev), chip);
if (ret < 0)
return ret;
+
+ /* Enable alert interrupts */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask);
+ if (ret < 0)
+ return ret;
+
enable_irq_wake(client->irq);
return 0;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6bf1a22c785a..a99db4e025cd 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5117,16 +5117,16 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SNK_SOFT_RESET,
- port->timings.sink_wait_cap_time);
+ upcoming_state = SNK_SOFT_RESET;
} else {
if (!port->self_powered)
upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
else
upcoming_state = hard_reset_state(port);
- tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
- port->timings.sink_wait_cap_time);
}
+
+ tcpm_set_state(port, upcoming_state,
+ port->timings.sink_wait_cap_time);
break;
case SNK_WAIT_CAPABILITIES_TIMEOUT:
/*
@@ -7721,14 +7721,14 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
- hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->state_machine_timer.function = state_machine_timer_handler;
- hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
- hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->enable_frs_timer.function = enable_frs_timer_handler;
- hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- port->send_discover_timer.function = send_discover_timer_handler;
+ hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
spin_lock_init(&port->pd_event_lock);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index fcf499cc9458..2a2915b0a645 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -25,7 +25,7 @@
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
-#define UCSI_TIMEOUT_MS 5000
+#define UCSI_TIMEOUT_MS 10000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
@@ -1346,7 +1346,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
@@ -1364,7 +1364,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
if (cci & UCSI_CCI_COMMAND_COMPLETE)
@@ -1393,7 +1393,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
/* Give the PPM time to process a reset before reading CCI */
msleep(20);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret)
goto out;
@@ -1825,11 +1825,11 @@ static int ucsi_init(struct ucsi *ucsi)
err_unregister:
for (con = connector; con->port; con++) {
+ if (con->wq)
+ destroy_workqueue(con->wq);
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
usb_power_delivery_unregister_capabilities(con->port_sink_caps);
con->port_sink_caps = NULL;
@@ -1929,8 +1929,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
struct ucsi *ucsi;
if (!ops ||
- !ops->read_version || !ops->read_cci || !ops->read_message_in ||
- !ops->sync_control || !ops->async_control)
+ !ops->read_version || !ops->read_cci || !ops->poll_cci ||
+ !ops->read_message_in || !ops->sync_control || !ops->async_control)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
@@ -2013,10 +2013,6 @@ void ucsi_unregister(struct ucsi *ucsi)
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
if (ucsi->connector[i].wq) {
struct ucsi_work *uwork;
@@ -2032,6 +2028,11 @@ void ucsi_unregister(struct ucsi *ucsi)
destroy_workqueue(ucsi->connector[i].wq);
}
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
ucsi->connector[i].port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 82735eb34f0e..28780acc4af2 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -62,6 +62,7 @@ struct dentry;
* struct ucsi_operations - UCSI I/O operations
* @read_version: Read implemented UCSI version
* @read_cci: Read CCI register
+ * @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
@@ -76,6 +77,7 @@ struct dentry;
struct ucsi_operations {
int (*read_version)(struct ucsi *ucsi, u16 *version);
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
+ int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
int (*sync_control)(struct ucsi *ucsi, u64 command);
int (*async_control)(struct ucsi *ucsi, u64 command);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 5c5515551963..ac1ebb5d9527 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
- }
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
return 0;
}
+static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ return ucsi_acpi_read_cci(ucsi, cci);
+}
+
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_acpi_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
@@ -142,6 +148,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_gram_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 740171f24ef9..4b1668733a4b 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -664,6 +664,7 @@ err_put:
static const struct ucsi_operations ucsi_ccg_ops = {
.read_version = ucsi_ccg_read_version,
.read_cci = ucsi_ccg_read_cci,
+ .poll_cci = ucsi_ccg_read_cci,
.read_message_in = ucsi_ccg_read_message_in,
.sync_control = ucsi_ccg_sync_control,
.async_control = ucsi_ccg_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index fed39d458090..8af79101a2fc 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -206,6 +206,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read_version = pmic_glink_ucsi_read_version,
.read_cci = pmic_glink_ucsi_read_cci,
+ .poll_cci = pmic_glink_ucsi_read_cci,
.read_message_in = pmic_glink_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = pmic_glink_ucsi_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 6923fad31d79..57ef7d83a412 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read_version = ucsi_stm32g0_read_version,
.read_cci = ucsi_stm32g0_read_cci,
+ .poll_cci = ucsi_stm32g0_read_cci,
.read_message_in = ucsi_stm32g0_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_stm32g0_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index 4cae85c0dc12..d33e3f2dd1d8 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
+ .poll_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = yoga_c630_ucsi_async_control,
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index 49559605177e..c321d442f0da 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -266,24 +266,12 @@ static struct file *vfio_device_open_file(struct vfio_device *device)
if (ret)
goto err_free;
- /*
- * We can't use anon_inode_getfd() because we need to modify
- * the f_mode flags directly to allow more than just ioctls
- */
- filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
- df, O_RDWR);
+ filep = anon_inode_getfile_fmode("[vfio-device]", &vfio_device_fops,
+ df, O_RDWR, FMODE_PREAD | FMODE_PWRITE);
if (IS_ERR(filep)) {
ret = PTR_ERR(filep);
goto err_close_device;
}
-
- /*
- * TODO: add an anon_inode interface to do this.
- * Appears to be missing by lack of need rather than
- * explicitly prevented. Now there's need.
- */
- filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE);
-
/*
* Use the pseudo fs inode on the device to link all mmaps
* to the same address space, allowing us to unmap all vmas
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 7fdb5edd7e2e..75338ffc703f 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -282,6 +282,8 @@ static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
+static void hvfb_putmem(struct fb_info *info);
+
/* Send message to Hyper-V host */
static inline int synthvid_send(struct hv_device *hdev,
struct synthvid_msg *msg)
@@ -863,6 +865,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width,
}
/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end
+ * of unregister_framebuffer() or fb_release(). Do any cleanup related to
+ * framebuffer here.
+ */
+static void hvfb_destroy(struct fb_info *info)
+{
+ hvfb_putmem(info);
+ framebuffer_release(info);
+}
+
+/*
* TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
* driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
*/
@@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = {
.fb_set_par = hvfb_set_par,
.fb_setcolreg = hvfb_setcolreg,
.fb_blank = hvfb_blank,
+ .fb_destroy = hvfb_destroy,
};
/* Get options from kernel paramenter "video=" */
@@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
}
/* Release contiguous physical memory */
-static void hvfb_release_phymem(struct hv_device *hdev,
+static void hvfb_release_phymem(struct device *device,
phys_addr_t paddr, unsigned int size)
{
unsigned int order = get_order(size);
@@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev,
if (order <= MAX_PAGE_ORDER)
__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
else
- dma_free_coherent(&hdev->device,
+ dma_free_coherent(device,
round_up(size, PAGE_SIZE),
phys_to_virt(paddr),
paddr);
@@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
+ aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
/*
* For Gen 1 VM, we can directly use the contiguous memory
@@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+ aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
}
/*
- * Cannot use the contiguous physical memory.
- * Allocate mmio space for framebuffer.
+ * Cannot use contiguous physical memory, so allocate MMIO space for
+ * the framebuffer. At this point in the function, conflicting devices
+ * that might have claimed the framebuffer MMIO space based on
+ * screen_info.lfb_base must have already been removed so that
+ * vmbus_allocate_mmio() does not allocate different MMIO space. If the
+ * kdump image were to be loaded using kexec_file_load(), the
+ * framebuffer location in the kdump image would be set from
+ * screen_info.lfb_base at the time that kdump is enabled. If the
+ * framebuffer has moved elsewhere, this could be the wrong location,
+ * causing kdump to hang when efifb (for example) loads.
*/
dio_fb_size =
screen_width * screen_height * screen_depth / 8;
@@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- if (base && size)
- aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
- else
- aperture_remove_all_conflicting_devices(KBUILD_MODNAME);
-
if (!gen2vm)
pci_dev_put(pdev);
@@ -1074,16 +1094,16 @@ err1:
}
/* Release the framebuffer */
-static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
+static void hvfb_putmem(struct fb_info *info)
{
struct hvfb_par *par = info->par;
if (par->need_docopy) {
vfree(par->dio_vp);
- iounmap(info->screen_base);
+ iounmap(par->mmio_vp);
vmbus_free_mmio(par->mem->start, screen_fb_size);
} else {
- hvfb_release_phymem(hdev, info->fix.smem_start,
+ hvfb_release_phymem(info->device, info->fix.smem_start,
screen_fb_size);
}
@@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev,
if (ret)
goto error;
- ret = register_framebuffer(info);
+ ret = devm_register_framebuffer(&hdev->device, info);
if (ret) {
pr_err("Unable to register framebuffer\n");
goto error;
@@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev,
error:
fb_deferred_io_cleanup(info);
- hvfb_putmem(hdev, info);
+ hvfb_putmem(info);
error2:
vmbus_close(hdev->channel);
error1:
@@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev)
fb_deferred_io_cleanup(info);
- unregister_framebuffer(info);
cancel_delayed_work_sync(&par->dwork);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
-
- hvfb_putmem(hdev, info);
- framebuffer_release(info);
}
static int hvfb_suspend(struct hv_device *hdev)
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
index c24036c4e51e..e4e196abdaac 100644
--- a/drivers/virt/acrn/hsm.c
+++ b/drivers/virt/acrn/hsm.c
@@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
switch (cmd & PMCMD_TYPE_MASK) {
case ACRN_PMCMD_GET_PX_CNT:
case ACRN_PMCMD_GET_CX_CNT:
- pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ pm_info = kzalloc(sizeof(u64), GFP_KERNEL);
if (!pm_info)
return -ENOMEM;
@@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(pm_info);
break;
case ACRN_PMCMD_GET_PX_DATA:
- px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ px_data = kzalloc(sizeof(*px_data), GFP_KERNEL);
if (!px_data)
return -ENOMEM;
@@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(px_data);
break;
case ACRN_PMCMD_GET_CX_DATA:
- cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL);
if (!cx_data)
return -ENOMEM;
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 264b6523fe52..cf3fb61f4d5b 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -23,6 +23,7 @@
#include <linux/cleanup.h>
#include <linux/uuid.h>
#include <linux/configfs.h>
+#include <linux/mm.h>
#include <uapi/linux/sev-guest.h>
#include <uapi/linux/psp-sev.h>
@@ -38,12 +39,6 @@ struct snp_guest_dev {
struct miscdevice misc;
struct snp_msg_desc *msg_desc;
-
- union {
- struct snp_report_req report;
- struct snp_derived_key_req derived_key;
- struct snp_ext_report_req ext_report;
- } req;
};
/*
@@ -71,7 +66,7 @@ struct snp_req_resp {
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_report_req *report_req = &snp_dev->req.report;
+ struct snp_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
@@ -80,6 +75,10 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -116,7 +115,7 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
+ struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
@@ -136,6 +135,10 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
if (sizeof(buf) < resp_len)
return -ENOMEM;
+ derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
+ if (!derived_key_req)
+ return -ENOMEM;
+
if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
sizeof(*derived_key_req)))
return -EFAULT;
@@ -168,16 +171,21 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
struct snp_req_resp *io)
{
- struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
+ struct snp_ext_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
int ret, npages = 0, resp_len;
sockptr_t certs_address;
+ struct page *page;
if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -203,8 +211,20 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(mdesc->certs_data, 0, report_req->certs_len);
npages = report_req->certs_len >> PAGE_SHIFT;
+ page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ get_order(report_req->certs_len));
+ if (!page)
+ return -ENOMEM;
+
+ req.certs_data = page_address(page);
+ ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
+ if (ret) {
+ pr_err("failed to mark page shared, ret=%d\n", ret);
+ __free_pages(page, get_order(report_req->certs_len));
+ return -EFAULT;
+ }
+
cmd:
/*
* The intermediate response buffer is used while decrypting the
@@ -213,10 +233,12 @@ cmd:
*/
resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!report_resp)
- return -ENOMEM;
+ if (!report_resp) {
+ ret = -ENOMEM;
+ goto e_free_data;
+ }
- mdesc->input.data_npages = npages;
+ req.input.data_npages = npages;
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
@@ -231,7 +253,7 @@ cmd:
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT;
+ report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
ret = -EFAULT;
@@ -240,7 +262,7 @@ cmd:
if (ret)
goto e_free;
- if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
@@ -250,6 +272,13 @@ cmd:
e_free:
kfree(report_resp);
+e_free_data:
+ if (npages) {
+ if (set_memory_encrypted((unsigned long)req.certs_data, npages))
+ WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
+ else
+ __free_pages(page, get_order(report_req->certs_len));
+ }
return ret;
}
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index 11b153e7454e..eaba28c95e73 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on (ARM64 || X86) && PCI && INPUT
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI && INPUT
+ depends on HAS_IOPORT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 8a294b9cbcf6..56d0dbe62163 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -2950,8 +2950,8 @@ static int virtio_mem_probe(struct virtio_device *vdev)
mutex_init(&vm->hotplug_mutex);
INIT_LIST_HEAD(&vm->next);
spin_lock_init(&vm->removal_lock);
- hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- vm->retry_timer.function = virtio_mem_timer_expired;
+ hrtimer_setup(&vm->retry_timer, virtio_mem_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
vm->in_kdump = is_kdump_kernel();
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 7a1096265f18..0820e35ad2e3 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -187,14 +187,12 @@ static int __init softdog_init(void)
watchdog_set_nowayout(&softdog_dev, nowayout);
watchdog_stop_on_reboot(&softdog_dev);
- hrtimer_init(&softdog_ticktock, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- softdog_ticktock.function = softdog_fire;
+ hrtimer_setup(&softdog_ticktock, softdog_fire, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
if (IS_ENABLED(CONFIG_SOFT_WATCHDOG_PRETIMEOUT)) {
softdog_info.options |= WDIOF_PRETIMEOUT;
- hrtimer_init(&softdog_preticktock, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- softdog_preticktock.function = softdog_pretimeout;
+ hrtimer_setup(&softdog_preticktock, softdog_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
if (soft_active_on_boot)
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 19698d87dc57..8369fd94fc1a 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -1051,8 +1051,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
}
kthread_init_work(&wd_data->work, watchdog_ping_work);
- hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
- wd_data->timer.function = watchdog_timer_expired;
+ hrtimer_setup(&wd_data->timer, watchdog_timer_expired, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_HARD);
watchdog_hrtimer_pretimeout_init(wdd);
if (wdd->id == 0) {
diff --git a/drivers/watchdog/watchdog_hrtimer_pretimeout.c b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
index 940b53718a91..fbc7eecd8b20 100644
--- a/drivers/watchdog/watchdog_hrtimer_pretimeout.c
+++ b/drivers/watchdog/watchdog_hrtimer_pretimeout.c
@@ -23,8 +23,8 @@ void watchdog_hrtimer_pretimeout_init(struct watchdog_device *wdd)
{
struct watchdog_core_data *wd_data = wdd->wd_data;
- hrtimer_init(&wd_data->pretimeout_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- wd_data->pretimeout_timer.function = watchdog_hrtimer_pretimeout;
+ hrtimer_setup(&wd_data->pretimeout_timer, watchdog_hrtimer_pretimeout, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
void watchdog_hrtimer_pretimeout_start(struct watchdog_device *wdd)
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 416f231809cb..bfe07adb3e3a 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
pci_mcfg_reserved = true;
}
#endif
+
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values, do not attempt to register devices with Xen in
+ * segments greater or equal than 0x10000.
+ */
+ dev_info(dev,
+ "not registering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
+ if (pci_domain_nr(pci_dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(dev,
+ "not unregistering with Xen: invalid PCI segment\n");
+ return 0;
+ }
+
if (pci_seg_supported) {
struct physdev_pci_device device = {
.seg = pci_domain_nr(pci_dev->bus),
@@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
.flags = PCI_DEVICE_RESET_FLR,
};
+ if (pci_domain_nr(dev->bus) >> 16) {
+ /*
+ * The hypercall interface is limited to 16bit PCI segment
+ * values.
+ */
+ dev_info(&dev->dev,
+ "unable to notify Xen of device reset: invalid PCI segment\n");
+ return 0;
+ }
+
return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
}
EXPORT_SYMBOL_GPL(xen_reset_device);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 544d3f9010b9..1db82da56db6 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
+#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002
+
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@@ -174,6 +176,8 @@ pci_out:
static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index b616b7768c3b..5c2f829d5b0b 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -262,26 +262,6 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
return found_dev;
}
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev)
-{
- struct pcistub_device *psdev;
- struct pci_dev *found_dev = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&pcistub_devices_lock, flags);
-
- list_for_each_entry(psdev, &pcistub_devices, dev_list) {
- if (psdev->dev == dev) {
- found_dev = pcistub_device_get_pci_dev(pdev, psdev);
- break;
- }
- }
-
- spin_unlock_irqrestore(&pcistub_devices_lock, flags);
- return found_dev;
-}
-
/*
* Called when:
* - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f9599ed2f2e2..b786c1f74f85 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -67,8 +67,6 @@ extern struct list_head xen_pcibk_quirks;
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
-struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
- struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
static inline bool xen_pcibk_pv_support(void)
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index b799bc759c15..088b7f02c358 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs)
return -ENOMEM;
set_xen_guest_handle(symdata->name, xs->name);
- symdata->symnum--; /* Rewind */
+ symdata->symnum = symnum; /* Rewind */
ret = HYPERVISOR_platform_op(&xs->op);
if (ret < 0)
@@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
struct xensyms *xs = m->private;
- xs->op.u.symdata.symnum = ++(*pos);
+ *pos = xs->op.u.symdata.symnum;
if (xensyms_next_sym(xs))
return NULL;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 3e68521f4e2f..399d455d50d6 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -669,8 +669,8 @@ v9fs_vfs_create(struct mnt_idmap *idmap, struct inode *dir,
*
*/
-static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
int err;
u32 perm;
@@ -692,8 +692,7 @@ static int v9fs_vfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (fid)
p9_fid_put(fid);
-
- return err;
+ return ERR_PTR(err);
}
/**
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 143ac03b7425..cc2007be2173 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -350,9 +350,9 @@ out:
*
*/
-static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t omode)
+static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t omode)
{
int err;
struct v9fs_session_info *v9ses;
@@ -417,7 +417,7 @@ error:
p9_fid_put(fid);
v9fs_put_acl(dacl, pacl);
p9_fid_put(dfid);
- return err;
+ return ERR_PTR(err);
}
static int
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d420e3c475..afe21866d6b4 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -336,7 +336,6 @@ source "fs/qnx4/Kconfig"
source "fs/qnx6/Kconfig"
source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
-source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/erofs/Kconfig"
source "fs/vboxsf/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index 15df0a923d3a..77fd7f7b5d02 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -87,7 +87,6 @@ obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
obj-y += unicode/
-obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS) += smb/
obj-$(CONFIG_HPFS_FS) += hpfs/
obj-$(CONFIG_NTFS3_FS) += ntfs3/
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e8c2c4535cb3..ac4e9a02910b 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -168,7 +168,7 @@ extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsi
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
extern int affs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool);
-extern int affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+extern struct dentry *affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a5a861dd5223..7a71018e3f67 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -596,7 +596,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
BUG_ON(tmp > bsize);
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
affs_fix_checksum(sb, bh);
bh->b_state &= ~(1UL << BH_New);
@@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
tmp = min(bsize - boff, to - from);
BUG_ON(boff + tmp > bsize || tmp > bsize);
memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
- be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
+ AFFS_DATA_HEAD(bh)->size = cpu_to_be32(
+ max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size)));
affs_fix_checksum(sb, bh);
mark_buffer_dirty_inode(bh, inode);
written += tmp;
@@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
@@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
- AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
+ AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1);
AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
AFFS_DATA_HEAD(bh)->next = 0;
bh->b_state &= ~(1UL << BH_New);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 8c154490a2d6..f883be50db12 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -273,7 +273,7 @@ affs_create(struct mnt_idmap *idmap, struct inode *dir,
return 0;
}
-int
+struct dentry *
affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
@@ -285,7 +285,7 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inode = affs_new_inode(dir);
if (!inode)
- return -ENOSPC;
+ return ERR_PTR(-ENOSPC);
inode->i_mode = S_IFDIR | mode;
affs_mode_to_prot(inode);
@@ -298,9 +298,9 @@ affs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
- return error;
+ return ERR_PTR(error);
}
- return 0;
+ return NULL;
}
int
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index 6d42f85c6be5..e941da5b6dd9 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -362,3 +362,53 @@ int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *alist,
alist->nr_addrs++;
return 0;
}
+
+/*
+ * Set the app data on the rxrpc peers an address list points to
+ */
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist)
+{
+ unsigned long data = (unsigned long)server;
+ int n = 0, o = 0;
+
+ if (!old_alist) {
+ /* New server. Just set all. */
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ return;
+ }
+ if (!new_alist) {
+ /* Dead server. Just remove all. */
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+ return;
+ }
+
+ /* Walk through the two lists simultaneously, setting new peers and
+ * clearing old ones. The two lists are ordered by pointer to peer
+ * record.
+ */
+ while (n < new_alist->nr_addrs && o < old_alist->nr_addrs) {
+ struct rxrpc_peer *pn = new_alist->addrs[n].peer;
+ struct rxrpc_peer *po = old_alist->addrs[o].peer;
+
+ if (pn == po)
+ continue;
+ if (pn < po) {
+ rxrpc_kernel_set_peer_data(pn, data);
+ n++;
+ } else {
+ rxrpc_kernel_set_peer_data(po, 0);
+ o++;
+ }
+ }
+
+ if (n < new_alist->nr_addrs)
+ for (; n < new_alist->nr_addrs; n++)
+ rxrpc_kernel_set_peer_data(new_alist->addrs[n].peer, data);
+ if (o < old_alist->nr_addrs)
+ for (; o < old_alist->nr_addrs; o++)
+ rxrpc_kernel_set_peer_data(old_alist->addrs[o].peer, 0);
+}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index cee42646736c..0168bbf53fe0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -20,8 +20,9 @@ static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
static atomic_t cell_debug_id;
-static void afs_queue_cell_manager(struct afs_net *);
-static void afs_manage_cell_work(struct work_struct *);
+static void afs_cell_timer(struct timer_list *timer);
+static void afs_destroy_cell_work(struct work_struct *work);
+static void afs_manage_cell_work(struct work_struct *work);
static void afs_dec_cells_outstanding(struct afs_net *net)
{
@@ -29,19 +30,11 @@ static void afs_dec_cells_outstanding(struct afs_net *net)
wake_up_var(&net->cells_outstanding);
}
-/*
- * Set the cell timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_cell_timer(struct afs_net *net, time64_t delay)
+static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
{
- if (net->live) {
- atomic_inc(&net->cells_outstanding);
- if (timer_reduce(&net->cells_timer, jiffies + delay * HZ))
- afs_dec_cells_outstanding(net);
- } else {
- afs_queue_cell_manager(net);
- }
+ smp_store_release(&cell->state, state); /* Commit cell changes before state */
+ smp_wmb(); /* Set cell state before task state */
+ wake_up_var(&cell->state);
}
/*
@@ -64,7 +57,8 @@ static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
return ERR_PTR(-ENAMETOOLONG);
if (!name) {
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell,
+ lockdep_is_held(&net->cells_lock));
if (!cell)
return ERR_PTR(-EDESTADDRREQ);
goto found;
@@ -115,7 +109,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
const char *name, unsigned int namelen,
const char *addresses)
{
- struct afs_vlserver_list *vllist;
+ struct afs_vlserver_list *vllist = NULL;
struct afs_cell *cell;
int i, ret;
@@ -162,13 +156,15 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->net = net;
refcount_set(&cell->ref, 1);
atomic_set(&cell->active, 0);
+ INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
INIT_WORK(&cell->manager, afs_manage_cell_work);
+ timer_setup(&cell->management_timer, afs_cell_timer, 0);
init_rwsem(&cell->vs_lock);
cell->volumes = RB_ROOT;
INIT_HLIST_HEAD(&cell->proc_volumes);
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
- seqlock_init(&cell->fs_lock);
+ init_rwsem(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
@@ -203,7 +199,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
cell->dns_status = vllist->status;
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
atomic_inc(&net->cells_outstanding);
+ ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
+ 2, INT_MAX / 2, GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+ cell->dynroot_ino = ret;
cell->debug_id = atomic_inc_return(&cell_debug_id);
+
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
_leave(" = %p", cell);
@@ -213,6 +215,7 @@ parse_failed:
if (ret == -EINVAL)
printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
+ afs_put_vlserverlist(cell->net, vllist);
kfree(cell->name - 1);
kfree(cell);
_leave(" = %d", ret);
@@ -226,6 +229,7 @@ error:
* @namesz: The strlen of the cell name.
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
* @excl: T if an error should be given if the cell name already exists.
+ * @trace: The reason to be logged if the lookup is successful.
*
* Look up a cell record by name and query the DNS for VL server addresses if
* needed. Note that that actual DNS query is punted off to the manager thread
@@ -234,7 +238,8 @@ error:
*/
struct afs_cell *afs_lookup_cell(struct afs_net *net,
const char *name, unsigned int namesz,
- const char *vllist, bool excl)
+ const char *vllist, bool excl,
+ enum afs_cell_trace trace)
{
struct afs_cell *cell, *candidate, *cursor;
struct rb_node *parent, **pp;
@@ -244,7 +249,7 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
_enter("%s,%s", name, vllist);
if (!excl) {
- cell = afs_find_cell(net, name, namesz, afs_cell_trace_use_lookup);
+ cell = afs_find_cell(net, name, namesz, trace);
if (!IS_ERR(cell))
goto wait_for_cell;
}
@@ -287,26 +292,28 @@ struct afs_cell *afs_lookup_cell(struct afs_net *net,
cell = candidate;
candidate = NULL;
- atomic_set(&cell->active, 2);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 2, afs_cell_trace_insert);
+ afs_use_cell(cell, trace);
rb_link_node_rcu(&cell->net_node, parent, pp);
rb_insert_color(&cell->net_node, &net->cells);
up_write(&net->cells_lock);
- afs_queue_cell(cell, afs_cell_trace_get_queue_new);
+ afs_queue_cell(cell, afs_cell_trace_queue_new);
wait_for_cell:
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), atomic_read(&cell->active),
- afs_cell_trace_wait);
_debug("wait_for_cell");
- wait_var_event(&cell->state,
- ({
- state = smp_load_acquire(&cell->state); /* vs error */
- state == AFS_CELL_ACTIVE || state == AFS_CELL_REMOVED;
- }));
+ state = smp_load_acquire(&cell->state); /* vs error */
+ if (state != AFS_CELL_ACTIVE &&
+ state != AFS_CELL_DEAD) {
+ afs_see_cell(cell, afs_cell_trace_wait);
+ wait_var_event(&cell->state,
+ ({
+ state = smp_load_acquire(&cell->state); /* vs error */
+ state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
+ }));
+ }
/* Check the state obtained from the wait check. */
- if (state == AFS_CELL_REMOVED) {
+ if (state == AFS_CELL_DEAD) {
ret = cell->error;
goto error;
}
@@ -320,7 +327,7 @@ cell_already_exists:
if (excl) {
ret = -EEXIST;
} else {
- afs_use_cell(cursor, afs_cell_trace_use_lookup);
+ afs_use_cell(cursor, trace);
ret = 0;
}
up_write(&net->cells_lock);
@@ -330,7 +337,7 @@ cell_already_exists:
goto wait_for_cell;
goto error_noput;
error:
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_lookup);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
error_noput:
_leave(" = %d [error]", ret);
return ERR_PTR(ret);
@@ -375,8 +382,9 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
if (cp && cp < rootcell + len)
return -EINVAL;
- /* allocate a cell record for the root cell */
- new_root = afs_lookup_cell(net, rootcell, len, vllist, false);
+ /* allocate a cell record for the root/workstation cell */
+ new_root = afs_lookup_cell(net, rootcell, len, vllist, false,
+ afs_cell_trace_use_lookup_ws);
if (IS_ERR(new_root)) {
_leave(" = %ld", PTR_ERR(new_root));
return PTR_ERR(new_root);
@@ -387,12 +395,11 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
/* install the new cell */
down_write(&net->cells_lock);
- afs_see_cell(new_root, afs_cell_trace_see_ws);
- old_root = net->ws_cell;
- net->ws_cell = new_root;
+ old_root = rcu_replace_pointer(net->ws_cell, new_root,
+ lockdep_is_held(&net->cells_lock));
up_write(&net->cells_lock);
- afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws);
+ afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
_leave(" = 0");
return 0;
}
@@ -510,8 +517,9 @@ static void afs_cell_destroy(struct rcu_head *rcu)
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
- afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias);
+ afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
key_put(cell->anonymous_key);
+ idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
kfree(cell->name - 1);
kfree(cell);
@@ -519,30 +527,14 @@ static void afs_cell_destroy(struct rcu_head *rcu)
_leave(" [destroyed]");
}
-/*
- * Queue the cell manager.
- */
-static void afs_queue_cell_manager(struct afs_net *net)
-{
- int outstanding = atomic_inc_return(&net->cells_outstanding);
-
- _enter("%d", outstanding);
-
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
-}
-
-/*
- * Cell management timer. We have an increment on cells_outstanding that we
- * need to pass along to the work item.
- */
-void afs_cells_timer(struct timer_list *timer)
+static void afs_destroy_cell_work(struct work_struct *work)
{
- struct afs_net *net = container_of(timer, struct afs_net, cells_timer);
+ struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
- _enter("");
- if (!queue_work(afs_wq, &net->cells_manager))
- afs_dec_cells_outstanding(net);
+ afs_see_cell(cell, afs_cell_trace_destroy);
+ timer_delete_sync(&cell->management_timer);
+ cancel_work_sync(&cell->manager);
+ call_rcu(&cell->rcu, afs_cell_destroy);
}
/*
@@ -574,7 +566,7 @@ void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
if (zero) {
a = atomic_read(&cell->active);
WARN(a != 0, "Cell active count %u > 0\n", a);
- call_rcu(&cell->rcu, afs_cell_destroy);
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
}
}
@@ -586,10 +578,9 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
int r, a;
- r = refcount_read(&cell->ref);
- WARN_ON(r == 0);
+ __refcount_inc(&cell->ref, &r);
a = atomic_inc_return(&cell->active);
- trace_afs_cell(cell->debug_id, r, a, reason);
+ trace_afs_cell(cell->debug_id, r + 1, a, reason);
return cell;
}
@@ -597,10 +588,11 @@ struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
* Record a cell becoming less active. When the active counter reaches 1, it
* is scheduled for destruction, but may get reactivated.
*/
-void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_trace reason)
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
unsigned int debug_id;
time64_t now, expire_delay;
+ bool zero;
int r, a;
if (!cell)
@@ -615,13 +607,15 @@ void afs_unuse_cell(struct afs_net *net, struct afs_cell *cell, enum afs_cell_tr
expire_delay = afs_cell_gc_delay;
debug_id = cell->debug_id;
- r = refcount_read(&cell->ref);
a = atomic_dec_return(&cell->active);
- trace_afs_cell(debug_id, r, a, reason);
- WARN_ON(a == 0);
- if (a == 1)
+ if (!a)
/* 'cell' may now be garbage collected. */
- afs_set_cell_timer(net, expire_delay);
+ afs_set_cell_timer(cell, expire_delay);
+
+ zero = __refcount_dec_and_test(&cell->ref, &r);
+ trace_afs_cell(debug_id, r - 1, a, reason);
+ if (zero)
+ WARN_ON(!queue_work(afs_wq, &cell->destroyer));
}
/*
@@ -641,9 +635,27 @@ void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
*/
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
{
- afs_get_cell(cell, reason);
- if (!queue_work(afs_wq, &cell->manager))
- afs_put_cell(cell, afs_cell_trace_put_queue_fail);
+ queue_work(afs_wq, &cell->manager);
+}
+
+/*
+ * Cell-specific management timer.
+ */
+static void afs_cell_timer(struct timer_list *timer)
+{
+ struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
+
+ afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
+ if (refcount_read(&cell->ref) > 0 && cell->net->live)
+ queue_work(afs_wq, &cell->manager);
+}
+
+/*
+ * Set/reduce the cell timer.
+ */
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
+{
+ timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
}
/*
@@ -705,7 +717,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
if (cell->proc_link.next)
cell->proc_link.next->pprev = &cell->proc_link.next;
- afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
}
@@ -722,241 +733,162 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
mutex_lock(&net->proc_cells_lock);
if (!hlist_unhashed(&cell->proc_link))
hlist_del_rcu(&cell->proc_link);
- afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
_leave("");
}
+static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
+{
+ const struct afs_vlserver_list *vllist;
+ time64_t expire_at = cell->last_inactive;
+ time64_t now = ktime_get_real_seconds();
+
+ if (atomic_read(&cell->active))
+ return false;
+ if (!cell->net->live)
+ return true;
+
+ vllist = rcu_dereference_protected(cell->vl_servers, true);
+ if (vllist && vllist->nr_servers > 0)
+ expire_at += afs_cell_gc_delay;
+
+ if (expire_at <= now)
+ return true;
+ if (expire_at < *_next_manage)
+ *_next_manage = expire_at;
+ return false;
+}
+
/*
* Manage a cell record, initialising and destroying it, maintaining its DNS
* records.
*/
-static void afs_manage_cell(struct afs_cell *cell)
+static bool afs_manage_cell(struct afs_cell *cell)
{
struct afs_net *net = cell->net;
- int ret, active;
+ time64_t next_manage = TIME64_MAX;
+ int ret;
_enter("%s", cell->name);
-again:
_debug("state %u", cell->state);
switch (cell->state) {
- case AFS_CELL_INACTIVE:
- case AFS_CELL_FAILED:
- down_write(&net->cells_lock);
- active = 1;
- if (atomic_try_cmpxchg_relaxed(&cell->active, &active, 0)) {
- rb_erase(&cell->net_node, &net->cells);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref), 0,
- afs_cell_trace_unuse_delete);
- smp_store_release(&cell->state, AFS_CELL_REMOVED);
- }
- up_write(&net->cells_lock);
- if (cell->state == AFS_CELL_REMOVED) {
- wake_up_var(&cell->state);
- goto final_destruction;
- }
- if (cell->state == AFS_CELL_FAILED)
- goto done;
- smp_store_release(&cell->state, AFS_CELL_UNSET);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_UNSET:
- smp_store_release(&cell->state, AFS_CELL_ACTIVATING);
- wake_up_var(&cell->state);
- goto again;
-
- case AFS_CELL_ACTIVATING:
- ret = afs_activate_cell(net, cell);
- if (ret < 0)
- goto activation_failed;
+ case AFS_CELL_SETTING_UP:
+ goto set_up_cell;
+ case AFS_CELL_ACTIVE:
+ goto cell_is_active;
+ case AFS_CELL_REMOVING:
+ WARN_ON_ONCE(1);
+ return false;
+ case AFS_CELL_DEAD:
+ return false;
+ default:
+ _debug("bad state %u", cell->state);
+ WARN_ON_ONCE(1); /* Unhandled state */
+ return false;
+ }
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- goto again;
+set_up_cell:
+ ret = afs_activate_cell(net, cell);
+ if (ret < 0) {
+ cell->error = ret;
+ goto remove_cell;
+ }
- case AFS_CELL_ACTIVE:
- if (atomic_read(&cell->active) > 1) {
- if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
- ret = afs_update_cell(cell);
- if (ret < 0)
- cell->error = ret;
- }
- goto done;
- }
- smp_store_release(&cell->state, AFS_CELL_DEACTIVATING);
- wake_up_var(&cell->state);
- goto again;
+ afs_set_cell_state(cell, AFS_CELL_ACTIVE);
+
+cell_is_active:
+ if (afs_has_cell_expired(cell, &next_manage))
+ goto remove_cell;
- case AFS_CELL_DEACTIVATING:
- if (atomic_read(&cell->active) > 1)
- goto reverse_deactivation;
- afs_deactivate_cell(net, cell);
- smp_store_release(&cell->state, AFS_CELL_INACTIVE);
- wake_up_var(&cell->state);
- goto again;
+ if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
+ ret = afs_update_cell(cell);
+ if (ret < 0)
+ cell->error = ret;
+ }
- case AFS_CELL_REMOVED:
- goto done;
+ if (next_manage < TIME64_MAX && cell->net->live) {
+ time64_t now = ktime_get_real_seconds();
- default:
- break;
+ if (next_manage - now <= 0)
+ afs_queue_cell(cell, afs_cell_trace_queue_again);
+ else
+ afs_set_cell_timer(cell, next_manage - now);
}
- _debug("bad state %u", cell->state);
- BUG(); /* Unhandled state */
+ _leave(" [done %u]", cell->state);
+ return false;
-activation_failed:
- cell->error = ret;
- afs_deactivate_cell(net, cell);
+remove_cell:
+ down_write(&net->cells_lock);
- smp_store_release(&cell->state, AFS_CELL_FAILED); /* vs error */
- wake_up_var(&cell->state);
- goto again;
+ if (atomic_read(&cell->active)) {
+ up_write(&net->cells_lock);
+ goto cell_is_active;
+ }
-reverse_deactivation:
- smp_store_release(&cell->state, AFS_CELL_ACTIVE);
- wake_up_var(&cell->state);
- _leave(" [deact->act]");
- return;
+ /* Make sure that the expiring server records are going to see the fact
+ * that the cell is caput.
+ */
+ afs_set_cell_state(cell, AFS_CELL_REMOVING);
-done:
- _leave(" [done %u]", cell->state);
- return;
+ afs_deactivate_cell(net, cell);
+ afs_purge_servers(cell);
+
+ rb_erase(&cell->net_node, &net->cells);
+ afs_see_cell(cell, afs_cell_trace_unuse_delete);
+ up_write(&net->cells_lock);
-final_destruction:
/* The root volume is pinning the cell */
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
cell->root_volume = NULL;
- afs_put_cell(cell, afs_cell_trace_put_destroy);
+
+ afs_set_cell_state(cell, AFS_CELL_DEAD);
+ return true;
}
static void afs_manage_cell_work(struct work_struct *work)
{
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
+ bool final_put;
- afs_manage_cell(cell);
- afs_put_cell(cell, afs_cell_trace_put_queue_work);
+ afs_see_cell(cell, afs_cell_trace_manage);
+ final_put = afs_manage_cell(cell);
+ afs_see_cell(cell, afs_cell_trace_managed);
+ if (final_put)
+ afs_put_cell(cell, afs_cell_trace_put_final);
}
/*
- * Manage the records of cells known to a network namespace. This includes
- * updating the DNS records and garbage collecting unused cells that were
- * automatically added.
- *
- * Note that constructed cell records may only be removed from net->cells by
- * this work item, so it is safe for this work item to stash a cursor pointing
- * into the tree and then return to caller (provided it skips cells that are
- * still under construction).
- *
- * Note also that we were given an increment on net->cells_outstanding by
- * whoever queued us that we need to deal with before returning.
+ * Purge in-memory cell database.
*/
-void afs_manage_cells(struct work_struct *work)
+void afs_cell_purge(struct afs_net *net)
{
- struct afs_net *net = container_of(work, struct afs_net, cells_manager);
+ struct afs_cell *ws;
struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
_enter("");
- /* Trawl the cell database looking for cells that have expired from
- * lack of use and cells whose DNS results have expired and dispatch
- * their managers.
- */
- down_read(&net->cells_lock);
+ down_write(&net->cells_lock);
+ ws = rcu_replace_pointer(net->ws_cell, NULL,
+ lockdep_is_held(&net->cells_lock));
+ up_write(&net->cells_lock);
+ afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
+ _debug("kick cells");
+ down_read(&net->cells_lock);
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
- struct afs_cell *cell =
- rb_entry(cursor, struct afs_cell, net_node);
- unsigned active;
- bool sched_cell = false;
-
- active = atomic_read(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_manage);
-
- ASSERTCMP(active, >=, 1);
-
- if (purging) {
- if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags)) {
- active = atomic_dec_return(&cell->active);
- trace_afs_cell(cell->debug_id, refcount_read(&cell->ref),
- active, afs_cell_trace_unuse_pin);
- }
- }
+ struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
- if (active == 1) {
- struct afs_vlserver_list *vllist;
- time64_t expire_at = cell->last_inactive;
-
- read_lock(&cell->vl_servers_lock);
- vllist = rcu_dereference_protected(
- cell->vl_servers,
- lockdep_is_held(&cell->vl_servers_lock));
- if (vllist->nr_servers > 0)
- expire_at += afs_cell_gc_delay;
- read_unlock(&cell->vl_servers_lock);
- if (purging || expire_at <= now)
- sched_cell = true;
- else if (expire_at < next_manage)
- next_manage = expire_at;
- }
+ afs_see_cell(cell, afs_cell_trace_purge);
- if (!purging) {
- if (test_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags))
- sched_cell = true;
- }
+ if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
+ afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
- if (sched_cell)
- afs_queue_cell(cell, afs_cell_trace_get_queue_manage);
+ afs_queue_cell(cell, afs_cell_trace_queue_purge);
}
-
up_read(&net->cells_lock);
- /* Update the timer on the way out. We have to pass an increment on
- * cells_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
-
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->cells_manager))
- atomic_inc(&net->cells_outstanding);
- } else {
- afs_set_cell_timer(net, next_manage - now);
- }
- }
-
- afs_dec_cells_outstanding(net);
- _leave(" [%d]", atomic_read(&net->cells_outstanding));
-}
-
-/*
- * Purge in-memory cell database.
- */
-void afs_cell_purge(struct afs_net *net)
-{
- struct afs_cell *ws;
-
- _enter("");
-
- down_write(&net->cells_lock);
- ws = net->ws_cell;
- net->ws_cell = NULL;
- up_write(&net->cells_lock);
- afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws);
-
- _debug("del timer");
- if (del_timer_sync(&net->cells_timer))
- atomic_dec(&net->cells_outstanding);
-
- _debug("kick mgr");
- afs_queue_cell_manager(net);
-
_debug("wait");
wait_var_event(&net->cells_outstanding,
!atomic_read(&net->cells_outstanding));
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 99a3f20bc786..1a906805a9e3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -139,49 +139,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Find the server record by peer address and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_peer(struct afs_call *call)
-{
- struct sockaddr_rxrpc srx;
- struct afs_server *server;
- struct rxrpc_peer *peer;
-
- peer = rxrpc_kernel_get_call_peer(call->net->socket, call->rxcall);
-
- server = afs_find_server(call->net, peer);
- if (!server) {
- trace_afs_cm_no_server(call, &srx);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
- * Find the server record by server UUID and record a probe to the cache
- * manager from a server.
- */
-static int afs_find_cm_server_by_uuid(struct afs_call *call,
- struct afs_uuid *uuid)
-{
- struct afs_server *server;
-
- rcu_read_lock();
- server = afs_find_server_by_uuid(call->net, call->request);
- rcu_read_unlock();
- if (!server) {
- trace_afs_cm_no_server_u(call, call->request);
- return 0;
- }
-
- call->server = server;
- return 0;
-}
-
-/*
* Clean up a cache manager call.
*/
static void afs_cm_destructor(struct afs_call *call)
@@ -322,10 +279,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -349,18 +303,10 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
*/
static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
{
- int ret;
-
_enter("");
afs_extract_discard(call, 0);
- ret = afs_extract_data(call, false);
- if (ret < 0)
- return ret;
-
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_peer(call);
+ return afs_extract_data(call, false);
}
/*
@@ -373,8 +319,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
__be32 *b;
int ret;
- _enter("");
-
_enter("{%u}", call->unmarshall);
switch (call->unmarshall) {
@@ -421,9 +365,13 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- /* we'll need the file server record as that tells us which set of
- * vnodes to operate upon */
- return afs_find_cm_server_by_uuid(call, call->request);
+ if (memcmp(call->request, &call->server->_uuid, sizeof(call->server->_uuid)) != 0) {
+ pr_notice("Callback UUID does not match fileserver UUID\n");
+ trace_afs_cm_no_server_u(call, call->request);
+ return 0;
+ }
+
+ return 0;
}
/*
@@ -455,7 +403,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -533,7 +481,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -593,7 +541,7 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
/*
@@ -667,9 +615,5 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
-
- /* We'll need the file server record as that tells us which set of
- * vnodes to operate upon.
- */
- return afs_find_cm_server_by_peer(call);
+ return 0;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 02cbf38e1a77..9e7b1fe82c27 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -33,8 +33,8 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, int nl
loff_t fpos, u64 ino, unsigned dtype);
static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, bool excl);
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode);
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
static int afs_link(struct dentry *from, struct inode *dir,
@@ -1004,9 +1004,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
afs_stat_v(dvnode, n_lookup);
inode = afs_do_lookup(dir, dentry);
if (inode == ERR_PTR(-ENOENT))
- inode = afs_try_auto_mntpt(dentry, dir);
-
- if (!IS_ERR_OR_NULL(inode))
+ inode = NULL;
+ else if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
_debug("splice %p", dentry->d_inode);
@@ -1315,8 +1314,8 @@ static const struct afs_operation_ops afs_mkdir_operation = {
/*
* create a directory on an AFS filesystem
*/
-static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
@@ -1328,7 +1327,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op = afs_alloc_operation(NULL, dvnode->volume);
if (IS_ERR(op)) {
d_drop(dentry);
- return PTR_ERR(op);
+ return ERR_CAST(op);
}
fscache_use_cookie(afs_vnode_cache(dvnode), true);
@@ -1344,7 +1343,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
op->ops = &afs_mkdir_operation;
ret = afs_do_sync_operation(op);
afs_dir_unuse_cookie(dvnode, ret);
- return ret;
+ return ERR_PTR(ret);
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index d8bf52f77d93..691e0ae607a1 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,16 +10,19 @@
#include <linux/dns_resolver.h>
#include "internal.h"
-static atomic_t afs_autocell_ino;
+#define AFS_MIN_DYNROOT_CELL_INO 4 /* Allow for ., .., @cell, .@cell */
+#define AFS_MAX_DYNROOT_CELL_INO ((unsigned int)INT_MAX)
+
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino);
/*
* iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
*/
static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_fid *fid = opaque;
+
+ return inode->i_ino == fid->vnode;
}
/*
@@ -39,28 +42,16 @@ static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
+ * Create an inode for an autocell dynamic automount dir.
*/
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+static struct inode *afs_iget_pseudo_dir(struct super_block *sb, ino_t ino)
{
- struct afs_super_info *as = AFS_FS_S(sb);
struct afs_vnode *vnode;
struct inode *inode;
- struct afs_fid fid = {};
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
_enter("");
- if (as->volume)
- fid.vid = as->volume->vid;
- if (root) {
- fid.vnode = 1;
- fid.unique = 1;
- } else {
- fid.vnode = atomic_inc_return(&afs_autocell_ino);
- fid.unique = 0;
- }
-
inode = iget5_locked(sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
if (!inode) {
@@ -73,115 +64,71 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- netfs_inode_init(&vnode->netfs, NULL, false);
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- simple_inode_init_ts(inode);
- inode->i_blocks = 0;
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_autocell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
+ unlock_new_inode(inode);
+ }
_leave(" = %p", inode);
return inode;
}
/*
- * Probe to see if a cell may exist. This prevents positive dentries from
- * being created unnecessarily.
+ * Try to automount the mountpoint with pseudo directory, if the autocell
+ * option is set.
*/
-static int afs_probe_cell_name(struct dentry *dentry)
+static struct dentry *afs_dynroot_lookup_cell(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct afs_cell *cell;
+ struct afs_cell *cell = NULL;
struct afs_net *net = afs_d2net(dentry);
+ struct inode *inode = NULL;
const char *name = dentry->d_name.name;
size_t len = dentry->d_name.len;
- char *result = NULL;
- int ret;
+ bool dotted = false;
+ int ret = -ENOENT;
/* Names prefixed with a dot are R/W mounts. */
if (name[0] == '.') {
- if (len == 1)
- return -EINVAL;
name++;
len--;
+ dotted = true;
}
- cell = afs_find_cell(net, name, len, afs_cell_trace_use_probe);
- if (!IS_ERR(cell)) {
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_probe);
- return 0;
- }
-
- ret = dns_query(net->net, "afsdb", name, len, "srv=1",
- &result, NULL, false);
- if (ret == -ENODATA || ret == -ENOKEY || ret == 0)
- ret = -ENOENT;
- if (ret > 0 && ret >= sizeof(struct dns_server_list_v1_header)) {
- struct dns_server_list_v1_header *v1 = (void *)result;
-
- if (v1->hdr.zero == 0 &&
- v1->hdr.content == DNS_PAYLOAD_IS_SERVER_LIST &&
- v1->hdr.version == 1 &&
- (v1->status != DNS_LOOKUP_GOOD &&
- v1->status != DNS_LOOKUP_GOOD_WITH_BAD))
- return -ENOENT;
-
+ cell = afs_lookup_cell(net, name, len, NULL, false,
+ afs_cell_trace_use_lookup_dynroot);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ goto out_no_cell;
}
- kfree(result);
- return ret;
-}
-
-/*
- * Try to auto mount the mountpoint with pseudo directory, if the autocell
- * operation is setted.
- */
-struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
-{
- struct afs_vnode *vnode = AFS_FS_I(dir);
- struct inode *inode;
- int ret = -ENOENT;
-
- _enter("%p{%pd}, {%llx:%llu}",
- dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
-
- if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
- goto out;
-
- ret = afs_probe_cell_name(dentry);
- if (ret < 0)
- goto out;
-
- inode = afs_iget_pseudo_dir(dir->i_sb, false);
+ inode = afs_iget_pseudo_dir(dir->i_sb, cell->dynroot_ino * 2 + dotted);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
}
- _leave("= %p", inode);
- return inode;
+ dentry->d_fsdata = cell;
+ return d_splice_alias(inode, dentry);
out:
- _leave("= %d", ret);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_dynroot);
+out_no_cell:
+ if (!inode)
+ return d_splice_alias(inode, dentry);
return ret == -ENOENT ? NULL : ERR_PTR(ret);
}
@@ -193,8 +140,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
{
_enter("%pd", dentry);
- ASSERTCMP(d_inode(dentry), ==, NULL);
-
if (flags & LOOKUP_CREATE)
return ERR_PTR(-EOPNOTSUPP);
@@ -203,98 +148,49 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr
return ERR_PTR(-ENAMETOOLONG);
}
- return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry);
+ if (dentry->d_name.len == 5 &&
+ memcmp(dentry->d_name.name, "@cell", 5) == 0)
+ return afs_lookup_atcell(dir, dentry, 2);
+
+ if (dentry->d_name.len == 6 &&
+ memcmp(dentry->d_name.name, ".@cell", 6) == 0)
+ return afs_lookup_atcell(dir, dentry, 3);
+
+ return afs_dynroot_lookup_cell(dir, dentry, flags);
}
const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
-const struct dentry_operations afs_dynroot_dentry_operations = {
- .d_delete = always_delete_dentry,
- .d_release = afs_d_release,
- .d_automount = afs_d_automount,
-};
-
-/*
- * Create a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
- */
-int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell)
-{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *subdir, *dsubdir;
- char *dotname = cell->name - 1;
- int ret;
-
- if (!sb || atomic_read(&sb->s_active) == 0)
- return 0;
-
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- subdir = lookup_one_len(cell->name, root, cell->name_len);
- if (IS_ERR(subdir)) {
- ret = PTR_ERR(subdir);
- goto unlock;
- }
-
- dsubdir = lookup_one_len(dotname, root, cell->name_len + 1);
- if (IS_ERR(dsubdir)) {
- ret = PTR_ERR(dsubdir);
- dput(subdir);
- goto unlock;
- }
-
- /* Note that we're retaining extra refs on the dentries. */
- subdir->d_fsdata = (void *)1UL;
- dsubdir->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
-}
-
-static void afs_dynroot_rm_one_dir(struct dentry *root, const char *name, size_t name_len)
+static void afs_dynroot_d_release(struct dentry *dentry)
{
- struct dentry *subdir;
-
- /* Don't want to trigger a lookup call, which will re-add the cell */
- subdir = try_lookup_one_len(name, root, name_len);
- if (IS_ERR_OR_NULL(subdir)) {
- _debug("lookup %ld", PTR_ERR(subdir));
- return;
- }
-
- _debug("rmdir %pd %u", subdir, d_count(subdir));
+ struct afs_cell *cell = dentry->d_fsdata;
- if (subdir->d_fsdata) {
- _debug("unpin %u", d_count(subdir));
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- dput(subdir);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_dynroot_mntpt);
}
/*
- * Remove a manually added cell mount directory.
- * - The caller must hold net->proc_cells_lock
+ * Keep @cell symlink dentries around, but only keep cell autodirs when they're
+ * being used.
*/
-void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell)
+static int afs_dynroot_delete_dentry(const struct dentry *dentry)
{
- struct super_block *sb = net->dynroot_sb;
- char *dotname = cell->name - 1;
-
- if (!sb || atomic_read(&sb->s_active) == 0)
- return;
+ const struct qstr *name = &dentry->d_name;
- inode_lock(sb->s_root->d_inode);
- afs_dynroot_rm_one_dir(sb->s_root, cell->name, cell->name_len);
- afs_dynroot_rm_one_dir(sb->s_root, dotname, cell->name_len + 1);
- inode_unlock(sb->s_root->d_inode);
- _leave("");
+ if (name->len == 5 && memcmp(name->name, "@cell", 5) == 0)
+ return 0;
+ if (name->len == 6 && memcmp(name->name, ".@cell", 6) == 0)
+ return 0;
+ return 1;
}
+const struct dentry_operations afs_dynroot_dentry_operations = {
+ .d_delete = afs_dynroot_delete_dentry,
+ .d_release = afs_dynroot_d_release,
+ .d_automount = afs_d_automount,
+};
+
static void afs_atcell_delayed_put_cell(void *arg)
{
struct afs_cell *cell = arg;
@@ -314,12 +210,23 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
const char *name;
bool dotted = vnode->fid.vnode == 3;
- if (!net->ws_cell)
+ if (!rcu_access_pointer(net->ws_cell))
return ERR_PTR(-ENOENT);
+ if (!dentry) {
+ /* We're in RCU-pathwalk. */
+ cell = rcu_dereference(net->ws_cell);
+ if (dotted)
+ name = cell->name - 1;
+ else
+ name = cell->name;
+ /* Shouldn't need to set a delayed call. */
+ return name;
+ }
+
down_read(&net->cells_lock);
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
if (dotted)
name = cell->name - 1;
else
@@ -336,149 +243,163 @@ static const struct inode_operations afs_atcell_inode_operations = {
};
/*
- * Look up @cell or .@cell in a dynroot directory. This is a substitution for
- * the local cell name for the net namespace.
+ * Create an inode for the @cell or .@cell symlinks.
*/
-static struct dentry *afs_dynroot_create_symlink(struct dentry *root, const char *name)
+static struct dentry *afs_lookup_atcell(struct inode *dir, struct dentry *dentry, ino_t ino)
{
struct afs_vnode *vnode;
- struct afs_fid fid = { .vnode = 2, .unique = 1, };
- struct dentry *dentry;
struct inode *inode;
+ struct afs_fid fid = { .vnode = ino, .unique = 1, };
- if (name[0] == '.')
- fid.vnode = 3;
-
- dentry = d_alloc_name(root, name);
- if (!dentry)
- return ERR_PTR(-ENOMEM);
-
- inode = iget5_locked(dentry->d_sb, fid.vnode,
+ inode = iget5_locked(dir->i_sb, fid.vnode,
afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
- if (!inode) {
- dput(dentry);
+ if (!inode)
return ERR_PTR(-ENOMEM);
- }
vnode = AFS_FS_I(inode);
- /* there shouldn't be an existing inode */
- if (WARN_ON_ONCE(!(inode->i_state & I_NEW))) {
- iput(inode);
- dput(dentry);
- return ERR_PTR(-EIO);
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 1);
+ inode->i_size = 0;
+ inode->i_mode = S_IFLNK | 0555;
+ inode->i_op = &afs_atcell_inode_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ unlock_new_inode(inode);
}
-
- netfs_inode_init(&vnode->netfs, NULL, false);
- simple_inode_init_ts(inode);
- set_nlink(inode, 1);
- inode->i_size = 0;
- inode->i_mode = S_IFLNK | 0555;
- inode->i_op = &afs_atcell_inode_operations;
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_blocks = 0;
- inode->i_generation = 0;
- inode->i_flags |= S_NOATIME;
-
- unlock_new_inode(inode);
- d_splice_alias(inode, dentry);
- return dentry;
+ return d_splice_alias(inode, dentry);
}
/*
- * Create @cell and .@cell symlinks.
+ * Transcribe the cell database into readdir content under the RCU read lock.
+ * Each cell produces two entries, one prefixed with a dot and one not.
*/
-static int afs_dynroot_symlink(struct afs_net *net)
+static int afs_dynroot_readdir_cells(struct afs_net *net, struct dir_context *ctx)
{
- struct super_block *sb = net->dynroot_sb;
- struct dentry *root, *symlink, *dsymlink;
- int ret;
-
- /* Let the ->lookup op do the creation */
- root = sb->s_root;
- inode_lock(root->d_inode);
- symlink = afs_dynroot_create_symlink(root, "@cell");
- if (IS_ERR(symlink)) {
- ret = PTR_ERR(symlink);
- goto unlock;
- }
+ const struct afs_cell *cell;
+ loff_t newpos;
+
+ _enter("%llu", ctx->pos);
+
+ for (;;) {
+ unsigned int ix = ctx->pos >> 1;
+
+ cell = idr_get_next(&net->cells_dyn_ino, &ix);
+ if (!cell)
+ return 0;
+ if (READ_ONCE(cell->state) == AFS_CELL_REMOVING ||
+ READ_ONCE(cell->state) == AFS_CELL_DEAD) {
+ ctx->pos += 2;
+ ctx->pos &= ~1;
+ continue;
+ }
- dsymlink = afs_dynroot_create_symlink(root, ".@cell");
- if (IS_ERR(dsymlink)) {
- ret = PTR_ERR(dsymlink);
- dput(symlink);
- goto unlock;
- }
+ newpos = ix << 1;
+ if (newpos > ctx->pos)
+ ctx->pos = newpos;
- /* Note that we're retaining extra refs on the dentries. */
- symlink->d_fsdata = (void *)1UL;
- dsymlink->d_fsdata = (void *)1UL;
- ret = 0;
-unlock:
- inode_unlock(root->d_inode);
- return ret;
+ _debug("pos %llu -> cell %u", ctx->pos, cell->dynroot_ino);
+
+ if ((ctx->pos & 1) == 0) {
+ if (!dir_emit(ctx, cell->name, cell->name_len,
+ cell->dynroot_ino, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ if ((ctx->pos & 1) == 1) {
+ if (!dir_emit(ctx, cell->name - 1, cell->name_len + 1,
+ cell->dynroot_ino + 1, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
+ }
+ return 0;
}
/*
- * Populate a newly created dynamic root with cell names.
+ * Read the AFS dynamic root directory. This produces a list of cellnames,
+ * dotted and undotted, along with @cell and .@cell links if configured.
*/
-int afs_dynroot_populate(struct super_block *sb)
+static int afs_dynroot_readdir(struct file *file, struct dir_context *ctx)
{
- struct afs_cell *cell;
- struct afs_net *net = afs_sb2net(sb);
- int ret;
-
- mutex_lock(&net->proc_cells_lock);
+ struct afs_net *net = afs_d2net(file->f_path.dentry);
+ int ret = 0;
- net->dynroot_sb = sb;
- ret = afs_dynroot_symlink(net);
- if (ret < 0)
- goto error;
+ if (!dir_emit_dots(file, ctx))
+ return 0;
- hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
- ret = afs_dynroot_mkdir(net, cell);
- if (ret < 0)
- goto error;
+ if (ctx->pos == 2) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, "@cell", 5, 2, DT_LNK))
+ return 0;
+ ctx->pos = 3;
+ }
+ if (ctx->pos == 3) {
+ if (rcu_access_pointer(net->ws_cell) &&
+ !dir_emit(ctx, ".@cell", 6, 3, DT_LNK))
+ return 0;
+ ctx->pos = 4;
}
- ret = 0;
-out:
- mutex_unlock(&net->proc_cells_lock);
+ if ((unsigned long long)ctx->pos <= AFS_MAX_DYNROOT_CELL_INO) {
+ rcu_read_lock();
+ ret = afs_dynroot_readdir_cells(net, ctx);
+ rcu_read_unlock();
+ }
return ret;
-
-error:
- net->dynroot_sb = NULL;
- goto out;
}
+static const struct file_operations afs_dynroot_file_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = afs_dynroot_readdir,
+ .fsync = noop_fsync,
+};
+
/*
- * When a dynamic root that's in the process of being destroyed, depopulate it
- * of pinned directories.
+ * Create an inode for a dynamic root directory.
*/
-void afs_dynroot_depopulate(struct super_block *sb)
+struct inode *afs_dynroot_iget_root(struct super_block *sb)
{
- struct afs_net *net = afs_sb2net(sb);
- struct dentry *root = sb->s_root, *subdir;
-
- /* Prevent more subdirs from being created */
- mutex_lock(&net->proc_cells_lock);
- if (net->dynroot_sb == sb)
- net->dynroot_sb = NULL;
- mutex_unlock(&net->proc_cells_lock);
-
- if (root) {
- struct hlist_node *n;
- inode_lock(root->d_inode);
-
- /* Remove all the pins for dirs created for manually added cells */
- hlist_for_each_entry_safe(subdir, n, &root->d_children, d_sib) {
- if (subdir->d_fsdata) {
- subdir->d_fsdata = NULL;
- dput(subdir);
- }
- }
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = { .vid = 0, .vnode = 1, .unique = 1,};
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
- inode_unlock(root->d_inode);
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ if (inode->i_state & I_NEW) {
+ netfs_inode_init(&vnode->netfs, NULL, false);
+ simple_inode_init_ts(inode);
+ set_nlink(inode, 2);
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | 0555;
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &afs_dynroot_file_operations;
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+ inode->i_flags |= S_NOATIME;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ unlock_new_inode(inode);
}
+ _leave(" = %p", inode);
+ return inode;
}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index b516d05b0fef..07a8bfbdd9b9 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -235,20 +235,20 @@ out:
* Probe all of a fileserver's addresses to find out the best route and to
* query its capabilities.
*/
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_alist, struct key *key)
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key)
{
struct afs_endpoint_state *estate, *old;
- struct afs_addr_list *alist;
+ struct afs_addr_list *old_alist = NULL, *alist;
unsigned long unprobed;
_enter("%pU", &server->uuid);
estate = kzalloc(sizeof(*estate), GFP_KERNEL);
if (!estate)
- return;
+ return -ENOMEM;
- refcount_set(&estate->ref, 1);
+ refcount_set(&estate->ref, 2);
estate->server_id = server->debug_id;
estate->rtt = UINT_MAX;
@@ -256,21 +256,31 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
old = rcu_dereference_protected(server->endpoint_state,
lockdep_is_held(&server->fs_lock));
- estate->responsive_set = old->responsive_set;
- estate->addresses = afs_get_addrlist(new_alist ?: old->addresses,
- afs_alist_trace_get_estate);
+ if (old) {
+ estate->responsive_set = old->responsive_set;
+ if (!new_alist)
+ new_alist = old->addresses;
+ }
+
+ if (old_alist != new_alist)
+ afs_set_peer_appdata(server, old_alist, new_alist);
+
+ estate->addresses = afs_get_addrlist(new_alist, afs_alist_trace_get_estate);
alist = estate->addresses;
estate->probe_seq = ++server->probe_counter;
atomic_set(&estate->nr_probing, alist->nr_addrs);
+ if (new_alist)
+ server->addr_version = new_alist->version;
rcu_assign_pointer(server->endpoint_state, estate);
- set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
write_unlock(&server->fs_lock);
+ if (old)
+ set_bit(AFS_ESTATE_SUPERSEDED, &old->flags);
trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
afs_estate_trace_alloc_probe);
- afs_get_address_preferences(net, alist);
+ afs_get_address_preferences(net, new_alist);
server->probed_at = jiffies;
unprobed = (1UL << alist->nr_addrs) - 1;
@@ -293,6 +303,8 @@ void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
}
afs_put_endpoint_state(old, afs_estate_trace_put_probe);
+ afs_put_endpoint_state(estate, afs_estate_trace_put_probe);
+ return 0;
}
/*
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1d9ecd5418d8..bc9556991d7c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1653,7 +1653,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net, struct afs_server *server,
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- call->server = afs_use_server(server, afs_server_trace_give_up_cb);
+ call->server = afs_use_server(server, false, afs_server_trace_use_give_up_cb);
afs_make_call(call, GFP_NOFS);
afs_wait_for_call_to_complete(call);
ret = call->error;
@@ -1760,7 +1760,7 @@ bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
return false;
call->key = key;
- call->server = afs_use_server(server, afs_server_trace_get_caps);
+ call->server = afs_use_server(server, false, afs_server_trace_use_get_caps);
call->peer = rxrpc_kernel_get_peer(estate->addresses->addrs[addr_index].peer);
call->probe = afs_get_endpoint_state(estate, afs_estate_trace_get_getcaps);
call->probe_index = addr_index;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 90f407774a9a..440b0e731093 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -287,9 +287,8 @@ struct afs_net {
/* Cell database */
struct rb_root cells;
- struct afs_cell *ws_cell;
- struct work_struct cells_manager;
- struct timer_list cells_timer;
+ struct idr cells_dyn_ino; /* cell->dynroot_ino mapping */
+ struct afs_cell __rcu *ws_cell;
atomic_t cells_outstanding;
struct rw_semaphore cells_lock;
struct mutex cells_alias_lock;
@@ -301,18 +300,11 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
- struct rb_root fs_servers; /* afs_server (by server UUID or address) */
+ seqlock_t fs_lock; /* For fs_probe_*, fs_proc */
struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
- struct hlist_head fs_addresses; /* afs_server (by lowest IPv6 addr) */
- seqlock_t fs_addr_lock; /* For fs_addresses[46] */
-
- struct work_struct fs_manager;
- struct timer_list fs_timer;
-
struct work_struct fs_prober;
struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
@@ -345,13 +337,10 @@ struct afs_net {
extern const char afs_init_sysname[];
enum afs_cell_state {
- AFS_CELL_UNSET,
- AFS_CELL_ACTIVATING,
+ AFS_CELL_SETTING_UP,
AFS_CELL_ACTIVE,
- AFS_CELL_DEACTIVATING,
- AFS_CELL_INACTIVE,
- AFS_CELL_FAILED,
- AFS_CELL_REMOVED,
+ AFS_CELL_REMOVING,
+ AFS_CELL_DEAD,
};
/*
@@ -382,7 +371,9 @@ struct afs_cell {
struct afs_cell *alias_of; /* The cell this is an alias of */
struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
+ struct work_struct destroyer; /* Destroyer for cell */
struct work_struct manager; /* Manager for init/deinit/dns */
+ struct timer_list management_timer; /* General management timer */
struct hlist_node proc_link; /* /proc cell list link */
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
@@ -398,6 +389,7 @@ struct afs_cell {
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
unsigned int debug_id;
+ unsigned int dynroot_ino; /* Inode numbers for dynroot (a pair) */
/* The volumes belonging to this cell */
struct rw_semaphore vs_lock; /* Lock for server->volumes */
@@ -407,7 +399,7 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
- seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -542,22 +534,22 @@ struct afs_server {
};
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct rb_node uuid_rb; /* Link in net->fs_servers */
- struct afs_server __rcu *uuid_next; /* Next server with same UUID */
- struct afs_server *uuid_prev; /* Previous server with same UUID */
- struct list_head probe_link; /* Link in net->fs_probe_list */
- struct hlist_node addr_link; /* Link in net->fs_addresses6 */
+ struct rb_node uuid_rb; /* Link in cell->fs_servers */
+ struct list_head probe_link; /* Link in net->fs_probe_* */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct list_head volumes; /* RCU list of afs_server_entry objects */
- struct afs_server *gc_next; /* Next server in manager's list */
+ struct work_struct destroyer; /* Work item to try and destroy a server */
+ struct timer_list timer; /* Management timer */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
#define AFS_SERVER_FL_UPDATING 1
#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
-#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
+#define AFS_SERVER_FL_UNCREATED 3 /* The record needs creating */
+#define AFS_SERVER_FL_CREATING 4 /* The record is being created */
+#define AFS_SERVER_FL_EXPIRED 5 /* The record has expired */
+#define AFS_SERVER_FL_NOT_FOUND 6 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 7 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
@@ -567,6 +559,7 @@ struct afs_server {
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
u16 service_id; /* Service ID we're using. */
+ short create_error; /* Creation error */
unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
@@ -621,6 +614,7 @@ struct afs_volume {
afs_volid_t vid; /* The volume ID of this volume */
afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */
refcount_t ref;
+ unsigned int debug_id; /* Debugging ID for traces */
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
struct rb_node cell_node; /* Link in cell->volumes */
@@ -700,7 +694,6 @@ struct afs_vnode {
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
#define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
#define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
@@ -1008,6 +1001,9 @@ extern int afs_merge_fs_addr4(struct afs_net *net, struct afs_addr_list *addr,
__be32 xdr, u16 port);
extern int afs_merge_fs_addr6(struct afs_net *net, struct afs_addr_list *addr,
__be32 *xdr, u16 port);
+void afs_set_peer_appdata(struct afs_server *server,
+ struct afs_addr_list *old_alist,
+ struct afs_addr_list *new_alist);
/*
* addr_prefs.c
@@ -1044,16 +1040,17 @@ static inline bool afs_cb_is_broken(unsigned int cb_break,
extern int afs_cell_init(struct afs_net *, const char *);
extern struct afs_cell *afs_find_cell(struct afs_net *, const char *, unsigned,
enum afs_cell_trace);
-extern struct afs_cell *afs_lookup_cell(struct afs_net *, const char *, unsigned,
- const char *, bool);
+struct afs_cell *afs_lookup_cell(struct afs_net *net,
+ const char *name, unsigned int namesz,
+ const char *vllist, bool excl,
+ enum afs_cell_trace trace);
extern struct afs_cell *afs_use_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_unuse_cell(struct afs_net *, struct afs_cell *, enum afs_cell_trace);
+void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason);
extern struct afs_cell *afs_get_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_see_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_put_cell(struct afs_cell *, enum afs_cell_trace);
extern void afs_queue_cell(struct afs_cell *, enum afs_cell_trace);
-extern void afs_manage_cells(struct work_struct *);
-extern void afs_cells_timer(struct timer_list *);
+void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs);
extern void __net_exit afs_cell_purge(struct afs_net *);
/*
@@ -1111,11 +1108,7 @@ extern int afs_silly_iput(struct dentry *, struct inode *);
extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_dynroot_dentry_operations;
-extern struct inode *afs_try_auto_mntpt(struct dentry *, struct inode *);
-extern int afs_dynroot_mkdir(struct afs_net *, struct afs_cell *);
-extern void afs_dynroot_rmdir(struct afs_net *, struct afs_cell *);
-extern int afs_dynroot_populate(struct super_block *);
-extern void afs_dynroot_depopulate(struct super_block *);
+struct inode *afs_dynroot_iget_root(struct super_block *sb);
/*
* file.c
@@ -1207,8 +1200,8 @@ struct afs_endpoint_state *afs_get_endpoint_state(struct afs_endpoint_state *est
enum afs_estate_trace where);
void afs_put_endpoint_state(struct afs_endpoint_state *estate, enum afs_estate_trace where);
extern void afs_fileserver_probe_result(struct afs_call *);
-void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
- struct afs_addr_list *new_addrs, struct key *key);
+int afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_list *new_alist, struct key *key);
int afs_wait_for_fs_probes(struct afs_operation *op, struct afs_server_state *states, bool intr);
extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
extern void afs_fs_probe_dispatcher(struct work_struct *);
@@ -1228,7 +1221,6 @@ int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen);
extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
extern int afs_ilookup5_test_by_fid(struct inode *, void *);
-extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern int afs_getattr(struct mnt_idmap *idmap, const struct path *,
@@ -1510,20 +1502,30 @@ extern void __exit afs_clean_up_permit_cache(void);
*/
extern spinlock_t afs_server_peer_lock;
-extern struct afs_server *afs_find_server(struct afs_net *, const struct rxrpc_peer *);
-extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer);
extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
-extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason);
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
-extern void afs_manage_servers(struct work_struct *);
-extern void afs_servers_timer(struct timer_list *);
+void afs_purge_servers(struct afs_cell *cell);
extern void afs_fs_probe_timer(struct timer_list *);
-extern void __net_exit afs_purge_servers(struct afs_net *);
+void __net_exit afs_wait_for_servers(struct afs_net *net);
bool afs_check_server_record(struct afs_operation *op, struct afs_server *server, struct key *key);
+static inline void afs_see_server(struct afs_server *server, enum afs_server_trace trace)
+{
+ int r = refcount_read(&server->ref);
+ int a = atomic_read(&server->active);
+
+ trace_afs_server(server->debug_id, r, a, trace);
+
+}
+
static inline void afs_inc_servers_outstanding(struct afs_net *net)
{
atomic_inc(&net->servers_outstanding);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 1ae0067f772d..c845c5daaeba 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -76,25 +76,17 @@ static int __net_init afs_net_init(struct net *net_ns)
mutex_init(&net->socket_mutex);
net->cells = RB_ROOT;
+ idr_init(&net->cells_dyn_ino);
init_rwsem(&net->cells_lock);
- INIT_WORK(&net->cells_manager, afs_manage_cells);
- timer_setup(&net->cells_timer, afs_cells_timer, 0);
-
mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
- net->fs_servers = RB_ROOT;
INIT_LIST_HEAD(&net->fs_probe_fast);
INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
- INIT_HLIST_HEAD(&net->fs_addresses);
- seqlock_init(&net->fs_addr_lock);
-
- INIT_WORK(&net->fs_manager, afs_manage_servers);
- timer_setup(&net->fs_timer, afs_servers_timer, 0);
INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
atomic_set(&net->servers_outstanding, 1);
@@ -130,13 +122,14 @@ error_open_socket:
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
error_cell_init:
net->live = false;
afs_proc_cleanup(net);
error_proc:
afs_put_sysnames(net->sysnames);
error_sysnames:
+ idr_destroy(&net->cells_dyn_ino);
net->live = false;
return ret;
}
@@ -151,10 +144,11 @@ static void __net_exit afs_net_exit(struct net *net_ns)
net->live = false;
afs_fs_probe_cleanup(net);
afs_cell_purge(net);
- afs_purge_servers(net);
+ afs_wait_for_servers(net);
afs_close_socket(net);
afs_proc_cleanup(net);
afs_put_sysnames(net->sysnames);
+ idr_destroy(&net->cells_dyn_ino);
kfree_rcu(rcu_access_pointer(net->address_prefs), rcu);
}
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 507c25a5b2cb..45cee6534122 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -87,7 +87,7 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
ctx->force = true;
}
if (ctx->cell) {
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_mntpt);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_mntpt);
ctx->cell = NULL;
}
if (test_bit(AFS_VNODE_PSEUDODIR, &vnode->flags)) {
@@ -107,7 +107,8 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
if (size > AFS_MAXCELLNAME)
return -ENAMETOOLONG;
- cell = afs_lookup_cell(ctx->net, p, size, NULL, false);
+ cell = afs_lookup_cell(ctx->net, p, size, NULL, false,
+ afs_cell_trace_use_lookup_mntpt);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%pd'\n", mntpt);
return PTR_ERR(cell);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index e7614f4f30c2..40e879c8ca77 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -122,14 +122,15 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
if (strcmp(buf, "add") == 0) {
struct afs_cell *cell;
- cell = afs_lookup_cell(net, name, strlen(name), args, true);
+ cell = afs_lookup_cell(net, name, strlen(name), args, true,
+ afs_cell_trace_use_lookup_add);
if (IS_ERR(cell)) {
ret = PTR_ERR(cell);
goto done;
}
if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
- afs_unuse_cell(net, cell, afs_cell_trace_unuse_no_pin);
+ afs_unuse_cell(cell, afs_cell_trace_unuse_no_pin);
} else {
goto inval;
}
@@ -206,7 +207,7 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v)
net = afs_seq2net_single(m);
down_read(&net->cells_lock);
- cell = net->ws_cell;
+ cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
if (cell)
seq_printf(m, "%s\n", cell->name);
up_read(&net->cells_lock);
@@ -242,7 +243,7 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size)
ret = -EEXIST;
inode_lock(file_inode(file));
- if (!net->ws_cell)
+ if (!rcu_access_pointer(net->ws_cell))
ret = afs_cell_init(net, buf);
else
printk("busy\n");
@@ -443,8 +444,6 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
}
server = list_entry(v, struct afs_server, proc_link);
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
seq_printf(m, "%pU %3d %3d %s\n",
&server->uuid,
refcount_read(&server->ref),
@@ -454,10 +453,16 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
server->flags, server->rtt);
seq_printf(m, " - probe: last=%d\n",
(int)(jiffies - server->probed_at) / HZ);
+
+ estate = rcu_dereference(server->endpoint_state);
+ if (!estate)
+ goto out;
failed = estate->failed_set;
seq_printf(m, " - ESTATE pq=%x np=%u rsp=%lx f=%lx\n",
estate->probe_seq, atomic_read(&estate->nr_probing),
estate->responsive_set, estate->failed_set);
+
+ alist = estate->addresses;
seq_printf(m, " - ALIST v=%u ap=%u\n",
alist->version, alist->addr_pref_version);
for (i = 0; i < alist->nr_addrs; i++) {
@@ -470,6 +475,8 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
rxrpc_kernel_get_srtt(addr->peer),
addr->last_error, addr->prio);
}
+
+out:
return 0;
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 886416ea1d96..d5e480a33859 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -179,7 +179,7 @@ static void afs_free_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_unuse_call);
kfree(call->request);
o = atomic_read(&net->nr_outstanding_calls);
@@ -766,8 +766,14 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
unsigned long user_call_ID)
{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
struct afs_net *net = afs_sock2net(sk);
+ call->peer = rxrpc_kernel_get_call_peer(sk->sk_socket, call->rxcall);
+ call->server = afs_find_server(call->peer);
+ if (!call->server)
+ trace_afs_cm_no_server(call, rxrpc_kernel_remote_srx(call->peer));
+
queue_work(afs_wq, &net->charge_preallocation_work);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 4504e16b458c..c530d1ca15df 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -14,190 +14,104 @@
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
static atomic_t afs_server_debug_id;
-static struct afs_server *afs_maybe_use_server(struct afs_server *,
- enum afs_server_trace);
static void __afs_put_server(struct afs_net *, struct afs_server *);
+static void afs_server_timer(struct timer_list *timer);
+static void afs_server_destroyer(struct work_struct *work);
/*
* Find a server by one of its addresses.
*/
-struct afs_server *afs_find_server(struct afs_net *net, const struct rxrpc_peer *peer)
+struct afs_server *afs_find_server(const struct rxrpc_peer *peer)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server = NULL;
- unsigned int i;
- int seq = 1;
+ struct afs_server *server = (struct afs_server *)rxrpc_kernel_get_peer_data(peer);
- rcu_read_lock();
-
- do {
- if (server)
- afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
-
- hlist_for_each_entry_rcu(server, &net->fs_addresses, addr_link) {
- estate = rcu_dereference(server->endpoint_state);
- alist = estate->addresses;
- for (i = 0; i < alist->nr_addrs; i++)
- if (alist->addrs[i].peer == peer)
- goto found;
- }
-
- server = NULL;
- continue;
- found:
- server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
-
- } while (need_seqretry(&net->fs_addr_lock, seq));
-
- done_seqretry(&net->fs_addr_lock, seq);
-
- rcu_read_unlock();
- return server;
+ if (!server)
+ return NULL;
+ return afs_use_server(server, false, afs_server_trace_use_cm_call);
}
/*
- * Look up a server by its UUID and mark it active.
+ * Look up a server by its UUID and mark it active. The caller must hold
+ * cell->fs_lock.
*/
-struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
+static struct afs_server *afs_find_server_by_uuid(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_server *server = NULL;
+ struct afs_server *server;
struct rb_node *p;
- int diff, seq = 1;
+ int diff;
_enter("%pU", uuid);
- do {
- /* Unfortunately, rbtree walking doesn't give reliable results
- * under just the RCU read lock, so we have to check for
- * changes.
- */
- if (server)
- afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
- server = NULL;
- seq++; /* 2 on the 1st/lockless path, otherwise odd */
- read_seqbegin_or_lock(&net->fs_lock, &seq);
-
- p = net->fs_servers.rb_node;
- while (p) {
- server = rb_entry(p, struct afs_server, uuid_rb);
-
- diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
- if (diff < 0) {
- p = p->rb_left;
- } else if (diff > 0) {
- p = p->rb_right;
- } else {
- afs_use_server(server, afs_server_trace_get_by_uuid);
- break;
- }
-
- server = NULL;
- }
- } while (need_seqretry(&net->fs_lock, seq));
+ p = cell->fs_servers.rb_node;
+ while (p) {
+ server = rb_entry(p, struct afs_server, uuid_rb);
- done_seqretry(&net->fs_lock, seq);
+ diff = memcmp(uuid, &server->uuid, sizeof(*uuid));
+ if (diff < 0) {
+ p = p->rb_left;
+ } else if (diff > 0) {
+ p = p->rb_right;
+ } else {
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags))
+ return NULL; /* Need a write lock */
+ afs_use_server(server, true, afs_server_trace_use_by_uuid);
+ return server;
+ }
+ }
- _leave(" = %p", server);
- return server;
+ return NULL;
}
/*
- * Install a server record in the namespace tree. If there's a clash, we stick
- * it into a list anchored on whichever afs_server struct is actually in the
- * tree.
+ * Install a server record in the cell tree. The caller must hold an exclusive
+ * lock on cell->fs_lock.
*/
static struct afs_server *afs_install_server(struct afs_cell *cell,
- struct afs_server *candidate)
+ struct afs_server **candidate)
{
- const struct afs_endpoint_state *estate;
- const struct afs_addr_list *alist;
- struct afs_server *server, *next;
+ struct afs_server *server;
struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
_enter("%p", candidate);
- write_seqlock(&net->fs_lock);
-
/* Firstly install the server in the UUID lookup tree */
- pp = &net->fs_servers.rb_node;
+ pp = &cell->fs_servers.rb_node;
p = NULL;
while (*pp) {
p = *pp;
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
- diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0) {
+ diff = memcmp(&(*candidate)->uuid, &server->uuid, sizeof(uuid_t));
+ if (diff < 0)
pp = &(*pp)->rb_left;
- } else if (diff > 0) {
+ else if (diff > 0)
pp = &(*pp)->rb_right;
- } else {
- if (server->cell == cell)
- goto exists;
-
- /* We have the same UUID representing servers in
- * different cells. Append the new server to the list.
- */
- for (;;) {
- next = rcu_dereference_protected(
- server->uuid_next,
- lockdep_is_held(&net->fs_lock.lock));
- if (!next)
- break;
- server = next;
- }
- rcu_assign_pointer(server->uuid_next, candidate);
- candidate->uuid_prev = server;
- server = candidate;
- goto added_dup;
- }
+ else
+ goto exists;
}
- server = candidate;
+ server = *candidate;
+ *candidate = NULL;
rb_link_node(&server->uuid_rb, p, pp);
- rb_insert_color(&server->uuid_rb, &net->fs_servers);
+ rb_insert_color(&server->uuid_rb, &cell->fs_servers);
+ write_seqlock(&net->fs_lock);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+ write_sequnlock(&net->fs_lock);
afs_get_cell(cell, afs_cell_trace_get_server);
-added_dup:
- write_seqlock(&net->fs_addr_lock);
- estate = rcu_dereference_protected(server->endpoint_state,
- lockdep_is_held(&net->fs_addr_lock.lock));
- alist = estate->addresses;
-
- /* Secondly, if the server has any IPv4 and/or IPv6 addresses, install
- * it in the IPv4 and/or IPv6 reverse-map lists.
- *
- * TODO: For speed we want to use something other than a flat list
- * here; even sorting the list in terms of lowest address would help a
- * bit, but anything we might want to do gets messy and memory
- * intensive.
- */
- if (alist->nr_addrs > 0)
- hlist_add_head_rcu(&server->addr_link, &net->fs_addresses);
-
- write_sequnlock(&net->fs_addr_lock);
-
exists:
- afs_get_server(server, afs_server_trace_get_install);
- write_sequnlock(&net->fs_lock);
+ afs_use_server(server, true, afs_server_trace_use_install);
return server;
}
/*
- * Allocate a new server record and mark it active.
+ * Allocate a new server record and mark it as active but uncreated.
*/
-static struct afs_server *afs_alloc_server(struct afs_cell *cell,
- const uuid_t *uuid,
- struct afs_addr_list *alist)
+static struct afs_server *afs_alloc_server(struct afs_cell *cell, const uuid_t *uuid)
{
- struct afs_endpoint_state *estate;
struct afs_server *server;
struct afs_net *net = cell->net;
@@ -205,65 +119,49 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
if (!server)
- goto enomem;
-
- estate = kzalloc(sizeof(struct afs_endpoint_state), GFP_KERNEL);
- if (!estate)
- goto enomem_server;
+ return NULL;
refcount_set(&server->ref, 1);
- atomic_set(&server->active, 1);
+ atomic_set(&server->active, 0);
+ __set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
- server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->destroyer, &afs_server_destroyer);
+ timer_setup(&server->timer, afs_server_timer, 0);
INIT_LIST_HEAD(&server->volumes);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
+ INIT_HLIST_NODE(&server->proc_link);
spin_lock_init(&server->probe_lock);
server->cell = cell;
server->rtt = UINT_MAX;
server->service_id = FS_SERVICE;
-
server->probe_counter = 1;
server->probed_at = jiffies - LONG_MAX / 2;
- refcount_set(&estate->ref, 1);
- estate->addresses = alist;
- estate->server_id = server->debug_id;
- estate->probe_seq = 1;
- rcu_assign_pointer(server->endpoint_state, estate);
afs_inc_servers_outstanding(net);
- trace_afs_server(server->debug_id, 1, 1, afs_server_trace_alloc);
- trace_afs_estate(estate->server_id, estate->probe_seq, refcount_read(&estate->ref),
- afs_estate_trace_alloc_server);
_leave(" = %p", server);
return server;
-
-enomem_server:
- kfree(server);
-enomem:
- _leave(" = NULL [nomem]");
- return NULL;
}
/*
* Look up an address record for a server
*/
-static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
- struct key *key, const uuid_t *uuid)
+static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_server *server,
+ struct key *key)
{
struct afs_vl_cursor vc;
struct afs_addr_list *alist = NULL;
int ret;
ret = -ERESTARTSYS;
- if (afs_begin_vlserver_operation(&vc, cell, key)) {
+ if (afs_begin_vlserver_operation(&vc, server->cell, key)) {
while (afs_select_vlserver(&vc)) {
if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
- alist = afs_yfsvl_get_endpoints(&vc, uuid);
+ alist = afs_yfsvl_get_endpoints(&vc, &server->uuid);
else
- alist = afs_vl_get_addrs_u(&vc, uuid);
+ alist = afs_vl_get_addrs_u(&vc, &server->uuid);
}
ret = afs_end_vlserver_operation(&vc);
@@ -273,72 +171,122 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
}
/*
- * Get or create a fileserver record.
+ * Get or create a fileserver record and return it with an active-use count on
+ * it.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
const uuid_t *uuid, u32 addr_version)
{
- struct afs_addr_list *alist;
- struct afs_server *server, *candidate;
+ struct afs_addr_list *alist = NULL;
+ struct afs_server *server, *candidate = NULL;
+ bool creating = false;
+ int ret;
_enter("%p,%pU", cell->net, uuid);
- server = afs_find_server_by_uuid(cell->net, uuid);
+ down_read(&cell->fs_lock);
+ server = afs_find_server_by_uuid(cell, uuid);
+ /* Won't see servers marked uncreated. */
+ up_read(&cell->fs_lock);
+
if (server) {
+ timer_delete_sync(&server->timer);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags))
+ goto wait_for_creation;
if (server->addr_version != addr_version)
set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
}
- alist = afs_vl_lookup_addrs(cell, key, uuid);
- if (IS_ERR(alist))
- return ERR_CAST(alist);
-
- candidate = afs_alloc_server(cell, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid);
if (!candidate) {
afs_put_addrlist(alist, afs_alist_trace_put_server_oom);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell, candidate);
- if (server != candidate) {
- afs_put_addrlist(alist, afs_alist_trace_put_server_dup);
+ down_write(&cell->fs_lock);
+ server = afs_install_server(cell, &candidate);
+ if (test_bit(AFS_SERVER_FL_CREATING, &server->flags)) {
+ /* We need to wait for creation to complete. */
+ up_write(&cell->fs_lock);
+ goto wait_for_creation;
+ }
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ set_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
+ }
+ up_write(&cell->fs_lock);
+ timer_delete_sync(&server->timer);
+
+ /* If we get to create the server, we look up the addresses and then
+ * immediately dispatch an asynchronous probe to each interface on the
+ * fileserver. This will make sure the repeat-probing service is
+ * started.
+ */
+ if (creating) {
+ alist = afs_vl_lookup_addrs(server, key);
+ if (IS_ERR(alist)) {
+ ret = PTR_ERR(alist);
+ goto create_failed;
+ }
+
+ ret = afs_fs_probe_fileserver(cell->net, server, alist, key);
+ if (ret)
+ goto create_failed;
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+ }
+
+out:
+ afs_put_addrlist(alist, afs_alist_trace_put_server_create);
+ if (candidate) {
+ kfree(rcu_access_pointer(server->endpoint_state));
kfree(candidate);
- } else {
- /* Immediately dispatch an asynchronous probe to each interface
- * on the fileserver. This will make sure the repeat-probing
- * service is started.
- */
- afs_fs_probe_fileserver(cell->net, server, alist, key);
+ afs_dec_servers_outstanding(cell->net);
+ }
+ return server ?: ERR_PTR(ret);
+
+wait_for_creation:
+ afs_see_server(server, afs_server_trace_wait_create);
+ wait_on_bit(&server->flags, AFS_SERVER_FL_CREATING, TASK_UNINTERRUPTIBLE);
+ if (test_bit_acquire(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ /* Barrier: read flag before error */
+ ret = READ_ONCE(server->create_error);
+ afs_put_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+ goto out;
}
- return server;
-}
+ ret = 0;
+ goto out;
-/*
- * Set the server timer to fire after a given delay, assuming it's not already
- * set for an earlier time.
- */
-static void afs_set_server_timer(struct afs_net *net, time64_t delay)
-{
- if (net->live) {
- afs_inc_servers_outstanding(net);
- if (timer_reduce(&net->fs_timer, jiffies + delay * HZ))
- afs_dec_servers_outstanding(net);
+create_failed:
+ down_write(&cell->fs_lock);
+
+ WRITE_ONCE(server->create_error, ret);
+ smp_wmb(); /* Barrier: set error before flag. */
+ set_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+
+ clear_and_wake_up_bit(AFS_SERVER_FL_CREATING, &server->flags);
+
+ if (test_bit(AFS_SERVER_FL_UNCREATED, &server->flags)) {
+ clear_bit(AFS_SERVER_FL_UNCREATED, &server->flags);
+ creating = true;
}
+ afs_unuse_server(cell->net, server, afs_server_trace_unuse_create_fail);
+ server = NULL;
+
+ up_write(&cell->fs_lock);
+ goto out;
}
/*
- * Server management timer. We have an increment on fs_outstanding that we
- * need to pass along to the work item.
+ * Set/reduce a server's timer.
*/
-void afs_servers_timer(struct timer_list *timer)
+static void afs_set_server_timer(struct afs_server *server, unsigned int delay_secs)
{
- struct afs_net *net = container_of(timer, struct afs_net, fs_timer);
-
- _enter("");
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ mod_timer(&server->timer, jiffies + delay_secs * HZ);
}
/*
@@ -357,32 +305,20 @@ struct afs_server *afs_get_server(struct afs_server *server,
}
/*
- * Try to get a reference on a server object.
+ * Get an active count on a server object and maybe remove from the inactive
+ * list.
*/
-static struct afs_server *afs_maybe_use_server(struct afs_server *server,
- enum afs_server_trace reason)
-{
- unsigned int a;
- int r;
-
- if (!__refcount_inc_not_zero(&server->ref, &r))
- return NULL;
-
- a = atomic_inc_return(&server->active);
- trace_afs_server(server->debug_id, r + 1, a, reason);
- return server;
-}
-
-/*
- * Get an active count on a server object.
- */
-struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
+struct afs_server *afs_use_server(struct afs_server *server, bool activate,
+ enum afs_server_trace reason)
{
unsigned int a;
int r;
__refcount_inc(&server->ref, &r);
a = atomic_inc_return(&server->active);
+ if (a == 1 && activate &&
+ !test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ del_timer(&server->timer);
trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
@@ -415,13 +351,16 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- unsigned int active = atomic_dec_return(&server->active);
+ if (!server)
+ return;
- if (active == 0)
- afs_set_server_timer(net, afs_server_gc_delay);
- afs_put_server(net, server, reason);
+ if (atomic_dec_and_test(&server->active)) {
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) ||
+ READ_ONCE(server->cell->state) >= AFS_CELL_REMOVING)
+ schedule_work(&server->destroyer);
}
+
+ afs_put_server(net, server, reason);
}
/*
@@ -430,10 +369,22 @@ void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
void afs_unuse_server(struct afs_net *net, struct afs_server *server,
enum afs_server_trace reason)
{
- if (server) {
- server->unuse_time = ktime_get_real_seconds();
- afs_unuse_server_notime(net, server, reason);
+ if (!server)
+ return;
+
+ if (atomic_dec_and_test(&server->active)) {
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags) &&
+ READ_ONCE(server->cell->state) < AFS_CELL_REMOVING) {
+ time64_t unuse_time = ktime_get_real_seconds();
+
+ server->unuse_time = unuse_time;
+ afs_set_server_timer(server, afs_server_gc_delay);
+ } else {
+ schedule_work(&server->destroyer);
+ }
}
+
+ afs_put_server(net, server, reason);
}
static void afs_server_rcu(struct rcu_head *rcu)
@@ -463,159 +414,119 @@ static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server
}
/*
- * destroy a dead server
+ * Check to see if the server record has expired.
*/
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static bool afs_has_server_expired(const struct afs_server *server)
{
- if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_give_up_callbacks(net, server);
+ time64_t expires_at;
- afs_put_server(net, server, afs_server_trace_destroy);
+ if (atomic_read(&server->active))
+ return false;
+
+ if (server->cell->net->live ||
+ server->cell->state >= AFS_CELL_REMOVING) {
+ trace_afs_server(server->debug_id, refcount_read(&server->ref),
+ 0, afs_server_trace_purging);
+ return true;
+ }
+
+ expires_at = server->unuse_time;
+ if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
+ !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
+ expires_at += afs_server_gc_delay;
+
+ return ktime_get_real_seconds() > expires_at;
}
/*
- * Garbage collect any expired servers.
+ * Remove a server record from it's parent cell's database.
*/
-static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
+static bool afs_remove_server_from_cell(struct afs_server *server)
{
- struct afs_server *server, *next, *prev;
- int active;
-
- while ((server = gc_list)) {
- gc_list = server->gc_next;
-
- write_seqlock(&net->fs_lock);
-
- active = atomic_read(&server->active);
- if (active == 0) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_gc);
- next = rcu_dereference_protected(
- server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
- prev = server->uuid_prev;
- if (!prev) {
- /* The one at the front is in the tree */
- if (!next) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- } else {
- rb_replace_node_rcu(&server->uuid_rb,
- &next->uuid_rb,
- &net->fs_servers);
- next->uuid_prev = NULL;
- }
- } else {
- /* This server is not at the front */
- rcu_assign_pointer(prev->uuid_next, next);
- if (next)
- next->uuid_prev = prev;
- }
-
- list_del(&server->probe_link);
- hlist_del_rcu(&server->proc_link);
- if (!hlist_unhashed(&server->addr_link))
- hlist_del_rcu(&server->addr_link);
- }
- write_sequnlock(&net->fs_lock);
+ struct afs_cell *cell = server->cell;
+
+ down_write(&cell->fs_lock);
- if (active == 0)
- afs_destroy_server(net, server);
+ if (!afs_has_server_expired(server)) {
+ up_write(&cell->fs_lock);
+ return false;
}
+
+ set_bit(AFS_SERVER_FL_EXPIRED, &server->flags);
+ _debug("expire %pU %u", &server->uuid, atomic_read(&server->active));
+ afs_see_server(server, afs_server_trace_see_expired);
+ rb_erase(&server->uuid_rb, &cell->fs_servers);
+ up_write(&cell->fs_lock);
+ return true;
}
-/*
- * Manage the records of servers known to be within a network namespace. This
- * includes garbage collecting unused servers.
- *
- * Note also that we were given an increment on net->servers_outstanding by
- * whoever queued us that we need to deal with before returning.
- */
-void afs_manage_servers(struct work_struct *work)
+static void afs_server_destroyer(struct work_struct *work)
{
- struct afs_net *net = container_of(work, struct afs_net, fs_manager);
- struct afs_server *gc_list = NULL;
- struct rb_node *cursor;
- time64_t now = ktime_get_real_seconds(), next_manage = TIME64_MAX;
- bool purging = !net->live;
-
- _enter("");
+ struct afs_endpoint_state *estate;
+ struct afs_server *server = container_of(work, struct afs_server, destroyer);
+ struct afs_net *net = server->cell->net;
- /* Trawl the server list looking for servers that have expired from
- * lack of use.
- */
- read_seqlock_excl(&net->fs_lock);
+ afs_see_server(server, afs_server_trace_see_destroyer);
- for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
- struct afs_server *server =
- rb_entry(cursor, struct afs_server, uuid_rb);
- int active = atomic_read(&server->active);
+ if (test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ return;
- _debug("manage %pU %u", &server->uuid, active);
+ if (!afs_remove_server_from_cell(server))
+ return;
- if (purging) {
- trace_afs_server(server->debug_id, refcount_read(&server->ref),
- active, afs_server_trace_purging);
- if (active != 0)
- pr_notice("Can't purge s=%08x\n", server->debug_id);
- }
+ timer_shutdown_sync(&server->timer);
+ cancel_work(&server->destroyer);
- if (active == 0) {
- time64_t expire_at = server->unuse_time;
-
- if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
- !test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
- expire_at += afs_server_gc_delay;
- if (purging || expire_at <= now) {
- server->gc_next = gc_list;
- gc_list = server;
- } else if (expire_at < next_manage) {
- next_manage = expire_at;
- }
- }
- }
+ if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
+ afs_give_up_callbacks(net, server);
- read_sequnlock_excl(&net->fs_lock);
+ /* Unbind the rxrpc_peer records from the server. */
+ estate = rcu_access_pointer(server->endpoint_state);
+ if (estate)
+ afs_set_peer_appdata(server, estate->addresses, NULL);
- /* Update the timer on the way out. We have to pass an increment on
- * servers_outstanding in the namespace that we are in to the timer or
- * the work scheduler.
- */
- if (!purging && next_manage < TIME64_MAX) {
- now = ktime_get_real_seconds();
+ write_seqlock(&net->fs_lock);
+ list_del_init(&server->probe_link);
+ if (!hlist_unhashed(&server->proc_link))
+ hlist_del_rcu(&server->proc_link);
+ write_sequnlock(&net->fs_lock);
- if (next_manage - now <= 0) {
- if (queue_work(afs_wq, &net->fs_manager))
- afs_inc_servers_outstanding(net);
- } else {
- afs_set_server_timer(net, next_manage - now);
- }
- }
+ afs_put_server(net, server, afs_server_trace_destroy);
+}
- afs_gc_servers(net, gc_list);
+static void afs_server_timer(struct timer_list *timer)
+{
+ struct afs_server *server = container_of(timer, struct afs_server, timer);
- afs_dec_servers_outstanding(net);
- _leave(" [%d]", atomic_read(&net->servers_outstanding));
+ afs_see_server(server, afs_server_trace_see_timer);
+ if (!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
+ schedule_work(&server->destroyer);
}
-static void afs_queue_server_manager(struct afs_net *net)
+/*
+ * Wake up all the servers in a cell so that they can purge themselves.
+ */
+void afs_purge_servers(struct afs_cell *cell)
{
- afs_inc_servers_outstanding(net);
- if (!queue_work(afs_wq, &net->fs_manager))
- afs_dec_servers_outstanding(net);
+ struct afs_server *server;
+ struct rb_node *rb;
+
+ down_read(&cell->fs_lock);
+ for (rb = rb_first(&cell->fs_servers); rb; rb = rb_next(rb)) {
+ server = rb_entry(rb, struct afs_server, uuid_rb);
+ afs_see_server(server, afs_server_trace_see_purge);
+ schedule_work(&server->destroyer);
+ }
+ up_read(&cell->fs_lock);
}
/*
- * Purge list of servers.
+ * Wait for outstanding servers.
*/
-void afs_purge_servers(struct afs_net *net)
+void afs_wait_for_servers(struct afs_net *net)
{
_enter("");
- if (del_timer_sync(&net->fs_timer))
- afs_dec_servers_outstanding(net);
-
- afs_queue_server_manager(net);
-
- _debug("wait");
atomic_dec(&net->servers_outstanding);
wait_var_event(&net->servers_outstanding,
!atomic_read(&net->servers_outstanding));
@@ -639,7 +550,7 @@ static noinline bool afs_update_server_record(struct afs_operation *op,
atomic_read(&server->active),
afs_server_trace_update);
- alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
+ alist = afs_vl_lookup_addrs(server, op->key);
if (IS_ERR(alist)) {
rcu_read_lock();
estate = rcu_dereference(server->endpoint_state);
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index d20cd902ef94..20d5474837df 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -16,7 +16,7 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++)
afs_unuse_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
+ afs_server_trace_unuse_slist);
kfree_rcu(slist, rcu);
}
}
@@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume,
break;
if (j < slist->nr_servers) {
if (slist->servers[j].server == server) {
- afs_unuse_server(volume->cell->net, server,
- afs_server_trace_put_slist_isort);
+ afs_unuse_server_notime(volume->cell->net, server,
+ afs_server_trace_unuse_slist_isort);
continue;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index a9bee610674e..25b306db6992 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -194,8 +194,6 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
if (as->dyn_root)
seq_puts(m, ",dyn");
- if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
- seq_puts(m, ",autocell");
switch (as->flock_mode) {
case afs_flock_mode_unset: break;
case afs_flock_mode_local: p = "local"; break;
@@ -292,13 +290,14 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
/* lookup the cell record */
if (cellname) {
cell = afs_lookup_cell(ctx->net, cellname, cellnamesz,
- NULL, false);
+ NULL, false,
+ afs_cell_trace_use_lookup_mount);
if (IS_ERR(cell)) {
pr_err("kAFS: unable to lookup cell '%*.*s'\n",
cellnamesz, cellnamesz, cellname ?: "");
return PTR_ERR(cell);
}
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_parse);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_parse);
afs_see_cell(cell, afs_cell_trace_see_source);
ctx->cell = cell;
}
@@ -395,7 +394,7 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = NULL;
cell = afs_use_cell(ctx->cell->alias_of,
afs_cell_trace_use_fc_alias);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
ctx->cell = cell;
goto reget_key;
}
@@ -468,7 +467,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
/* allocate the root inode and dentry */
if (as->dyn_root) {
- inode = afs_iget_pseudo_dir(sb, true);
+ inode = afs_dynroot_iget_root(sb);
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
@@ -478,9 +477,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (ctx->autocell || as->dyn_root)
- set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
-
ret = -ENOMEM;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
@@ -488,9 +484,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
if (as->dyn_root) {
sb->s_d_op = &afs_dynroot_dentry_operations;
- ret = afs_dynroot_populate(sb);
- if (ret < 0)
- goto error;
} else {
sb->s_d_op = &afs_fs_dentry_operations;
rcu_assign_pointer(as->volume->sb, sb);
@@ -527,9 +520,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- struct afs_net *net = afs_net(as->net_ns);
afs_put_volume(as->volume, afs_volume_trace_put_destroy_sbi);
- afs_unuse_cell(net, as->cell, afs_cell_trace_unuse_sbi);
+ afs_unuse_cell(as->cell, afs_cell_trace_unuse_sbi);
put_net(as->net_ns);
kfree(as);
}
@@ -539,9 +531,6 @@ static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- if (as->dyn_root)
- afs_dynroot_depopulate(sb);
-
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
@@ -615,7 +604,7 @@ static void afs_free_fc(struct fs_context *fc)
afs_destroy_sbi(fc->s_fs_info);
afs_put_volume(ctx->volume, afs_volume_trace_put_free_fc);
- afs_unuse_cell(ctx->net, ctx->cell, afs_cell_trace_unuse_fc);
+ afs_unuse_cell(ctx->cell, afs_cell_trace_unuse_fc);
key_put(ctx->key);
kfree(ctx);
}
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
index f9e76b604f31..709b4cdb723e 100644
--- a/fs/afs/vl_alias.c
+++ b/fs/afs/vl_alias.c
@@ -205,11 +205,11 @@ static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
goto is_alias;
if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
return -ERESTARTSYS;
}
- afs_unuse_cell(cell->net, p, afs_cell_trace_unuse_check_alias);
+ afs_unuse_cell(p, afs_cell_trace_unuse_check_alias);
}
mutex_unlock(&cell->net->proc_cells_lock);
@@ -269,7 +269,8 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
if (!name_len || name_len > AFS_MAXCELLNAME)
master = ERR_PTR(-EOPNOTSUPP);
else
- master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false);
+ master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false,
+ afs_cell_trace_use_lookup_canonical);
kfree(cell_name);
if (IS_ERR(master))
return PTR_ERR(master);
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index d8f79f6ada3d..6ad9688d8f4b 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -48,7 +48,7 @@ static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
cell->dns_expiry <= ktime_get_real_seconds()) {
dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
- afs_queue_cell(cell, afs_cell_trace_get_queue_dns);
+ afs_queue_cell(cell, afs_cell_trace_queue_dns);
if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
if (wait_var_event_interruptible(
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index af3a3f57c1b3..0efff3d25133 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -10,6 +10,7 @@
#include "internal.h"
static unsigned __read_mostly afs_volume_record_life = 60 * 60;
+static atomic_t afs_volume_debug_id;
static void afs_destroy_volume(struct work_struct *work);
@@ -59,7 +60,7 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume)
struct afs_cell *cell = volume->cell;
if (!hlist_unhashed(&volume->proc_link)) {
- trace_afs_volume(volume->vid, refcount_read(&cell->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_remove);
write_seqlock(&cell->volume_lock);
hlist_del_rcu(&volume->proc_link);
@@ -84,6 +85,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
if (!volume)
goto error_0;
+ volume->debug_id = atomic_inc_return(&afs_volume_debug_id);
volume->vid = vldb->vid[params->type];
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol);
@@ -115,7 +117,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
*_slist = slist;
rcu_assign_pointer(volume->servers, slist);
- trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
+ trace_afs_volume(volume->debug_id, volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -247,7 +249,7 @@ static void afs_destroy_volume(struct work_struct *work)
afs_remove_volume_from_cell(volume);
afs_put_serverlist(volume->cell->net, slist);
afs_put_cell(volume->cell, afs_cell_trace_put_vol);
- trace_afs_volume(volume->vid, refcount_read(&volume->ref),
+ trace_afs_volume(volume->debug_id, volume->vid, refcount_read(&volume->ref),
afs_volume_trace_free);
kfree_rcu(volume, rcu);
@@ -262,7 +264,7 @@ bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason)
int r;
if (__refcount_inc_not_zero(&volume->ref, &r)) {
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
return true;
}
return false;
@@ -278,7 +280,7 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
int r;
__refcount_inc(&volume->ref, &r);
- trace_afs_volume(volume->vid, r + 1, reason);
+ trace_afs_volume(volume->debug_id, volume->vid, r + 1, reason);
}
return volume;
}
@@ -290,12 +292,13 @@ struct afs_volume *afs_get_volume(struct afs_volume *volume,
void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason)
{
if (volume) {
+ unsigned int debug_id = volume->debug_id;
afs_volid_t vid = volume->vid;
bool zero;
int r;
zero = __refcount_dec_and_test(&volume->ref, &r);
- trace_afs_volume(vid, r - 1, reason);
+ trace_afs_volume(debug_id, vid, r - 1, reason);
if (zero)
schedule_work(&volume->destructor);
}
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 77c7991d89aa..23cea74f9933 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -218,6 +218,8 @@ void autofs_clean_ino(struct autofs_info *);
static inline int autofs_check_pipe(struct file *pipe)
{
+ if (pipe->f_mode & FMODE_PATH)
+ return -EINVAL;
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index 6d57efbb8110..c5a6aae12d2c 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -442,7 +442,6 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
sbi->exp_timeout = timeout * HZ;
} else {
struct dentry *base = fp->f_path.dentry;
- struct inode *inode = base->d_inode;
int path_len = param->size - AUTOFS_DEV_IOCTL_SIZE - 1;
struct dentry *dentry;
struct autofs_info *ino;
@@ -460,9 +459,7 @@ static int autofs_dev_ioctl_timeout(struct file *fp,
"the parent autofs mount timeout which could "
"prevent shutdown\n");
- inode_lock_shared(inode);
dentry = try_lookup_one_len(param->path, base, path_len);
- inode_unlock_shared(inode);
if (IS_ERR_OR_NULL(dentry))
return dentry ? PTR_ERR(dentry) : -ENOENT;
ino = autofs_dentry_ino(dentry);
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 530d18827e35..174c7205fee4 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -15,8 +15,8 @@ static int autofs_dir_symlink(struct mnt_idmap *, struct inode *,
struct dentry *, const char *);
static int autofs_dir_unlink(struct inode *, struct dentry *);
static int autofs_dir_rmdir(struct inode *, struct dentry *);
-static int autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
- struct dentry *, umode_t);
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
static long autofs_root_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long autofs_root_compat_ioctl(struct file *,
@@ -720,9 +720,9 @@ static int autofs_dir_rmdir(struct inode *dir, struct dentry *dentry)
return 0;
}
-static int autofs_dir_mkdir(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
- umode_t mode)
+static struct dentry *autofs_dir_mkdir(struct mnt_idmap *idmap,
+ struct inode *dir, struct dentry *dentry,
+ umode_t mode)
{
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_info *ino = autofs_dentry_ino(dentry);
@@ -739,7 +739,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
inode = autofs_get_inode(dir->i_sb, S_IFDIR | mode);
if (!inode)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
d_add(dentry, inode);
if (sbi->version < 5)
@@ -751,7 +751,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
inc_nlink(dir);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
- return 0;
+ return NULL;
}
/* Get/set timeout ioctl() operation */
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 316d88da2ce1..0ef9bcb744dd 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -58,10 +58,10 @@ static int bad_inode_symlink(struct mnt_idmap *idmap,
return -EIO;
}
-static int bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
- struct dentry *dentry, umode_t mode)
+static struct dentry *bad_inode_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode)
{
- return -EIO;
+ return ERR_PTR(-EIO);
}
static int bad_inode_rmdir (struct inode *dir, struct dentry *dentry)
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index fc7efd0a7525..c9798750202d 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -16,7 +16,7 @@ config BCACHEFS_FS
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
select CRYPTO
- select CRYPTO_SHA256
+ select CRYPTO_LIB_SHA256
select CRYPTO_CHACHA20
select CRYPTO_POLY1305
select KEYS
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index d2689388d5e8..9af65079374f 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -41,7 +41,6 @@ bcachefs-y := \
extent_update.o \
eytzinger.o \
fs.o \
- fs-common.o \
fs-ioctl.o \
fs-io.o \
fs-io-buffered.o \
@@ -64,9 +63,11 @@ bcachefs-y := \
migrate.o \
move.o \
movinggc.o \
+ namei.o \
nocow_locking.o \
opts.o \
printbuf.o \
+ progress.o \
quota.o \
rebalance.o \
rcu_pending.o \
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 3ea809990ef1..5fb396be9127 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -232,7 +232,7 @@ int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k,
int ret = 0;
bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k),
- c, alloc_v2_unpack_error,
+ c, alloc_v3_unpack_error,
"unpack error");
fsck_err:
return ret;
@@ -777,14 +777,12 @@ static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, s
s64 delta_sectors,
s64 delta_fragmented, unsigned flags)
{
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = data_type,
- };
s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented };
- return bch2_disk_accounting_mod(trans, &acc, d, 3, flags & BTREE_TRIGGER_gc);
+ return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
+ d, dev_data_type,
+ .dev = ca->dev_idx,
+ .data_type = data_type);
}
int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca,
@@ -837,7 +835,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
if (!ca)
- return -EIO;
+ return -BCH_ERR_trigger_alloc;
struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
@@ -871,6 +869,9 @@ int bch2_trigger_alloc(struct btree_trans *trans,
if (data_type_is_empty(new_a->data_type) &&
BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
!bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
+ if (new_a->oldest_gen == new_a->gen &&
+ !bch2_bucket_sectors_total(*new_a))
+ new_a->oldest_gen++;
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
alloc_data_type_set(new_a, new_a->data_type);
@@ -889,26 +890,20 @@ int bch2_trigger_alloc(struct btree_trans *trans,
!new_a->io_time[READ])
new_a->io_time[READ] = bch2_current_io_time(c, READ);
- u64 old_lru = alloc_lru_idx_read(*old_a);
- u64 new_lru = alloc_lru_idx_read(*new_a);
- if (old_lru != new_lru) {
- ret = bch2_lru_change(trans, new.k->p.inode,
- bucket_to_u64(new.k->p),
- old_lru, new_lru);
- if (ret)
- goto err;
- }
+ ret = bch2_lru_change(trans, new.k->p.inode,
+ bucket_to_u64(new.k->p),
+ alloc_lru_idx_read(*old_a),
+ alloc_lru_idx_read(*new_a));
+ if (ret)
+ goto err;
- old_lru = alloc_lru_idx_fragmentation(*old_a, ca);
- new_lru = alloc_lru_idx_fragmentation(*new_a, ca);
- if (old_lru != new_lru) {
- ret = bch2_lru_change(trans,
- BCH_LRU_FRAGMENTATION_START,
- bucket_to_u64(new.k->p),
- old_lru, new_lru);
- if (ret)
- goto err;
- }
+ ret = bch2_lru_change(trans,
+ BCH_LRU_BUCKET_FRAGMENTATION,
+ bucket_to_u64(new.k->p),
+ alloc_lru_idx_fragmentation(*old_a, ca),
+ alloc_lru_idx_fragmentation(*new_a, ca));
+ if (ret)
+ goto err;
if (old_a->gen != new_a->gen) {
ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
@@ -1034,7 +1029,7 @@ fsck_err:
invalid_bucket:
bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
(bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
- ret = -EIO;
+ ret = -BCH_ERR_trigger_alloc;
goto err;
}
@@ -1705,7 +1700,8 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
if (lru_idx) {
- ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START,
+ ret = bch2_lru_check_set(trans, BCH_LRU_BUCKET_FRAGMENTATION,
+ bucket_to_u64(alloc_k.k->p),
lru_idx, alloc_k, last_flushed);
if (ret)
goto err;
@@ -1735,7 +1731,9 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
a = &a_mut->v;
}
- ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ],
+ ret = bch2_lru_check_set(trans, alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ a->io_time[READ],
alloc_k, last_flushed);
if (ret)
goto err;
@@ -1757,7 +1755,8 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)));
+ bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))) ?:
+ bch2_check_stripe_to_lru_refs(c);
bch2_bkey_buf_exit(&last_flushed, c);
bch_err_fn(c, ret);
@@ -1805,6 +1804,19 @@ struct discard_buckets_state {
u64 discarded;
};
+/*
+ * This is needed because discard is both a filesystem option and a device
+ * option, and mount options are supposed to apply to that mount and not be
+ * persisted, i.e. if it's set as a mount option we can't propagate it to the
+ * device.
+ */
+static inline bool discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca)
+{
+ return test_bit(BCH_FS_discard_mount_opt_set, &c->flags)
+ ? c->opts.discard
+ : ca->mi.discard;
+}
+
static int bch2_discard_one_bucket(struct btree_trans *trans,
struct bch_dev *ca,
struct btree_iter *need_discard_iter,
@@ -1868,7 +1880,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
s->discarded++;
*discard_pos_done = iter.pos;
- if (ca->mi.discard && !c->opts.nochanges) {
+ if (discard_opt_enabled(c, ca) && !c->opts.nochanges) {
/*
* This works without any other locks because this is the only
* thread that removes items from the need_discard tree
@@ -1897,7 +1909,10 @@ commit:
if (ret)
goto out;
- count_event(c, bucket_discard);
+ if (!fastpath)
+ count_event(c, bucket_discard);
+ else
+ count_event(c, bucket_discard_fast);
out:
fsck_err:
if (discard_locked)
@@ -2055,16 +2070,71 @@ put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
+static int invalidate_one_bp(struct btree_trans *trans,
+ struct bch_dev *ca,
+ struct bkey_s_c_backpointer bp,
+ struct bkey_buf *last_flushed)
+{
+ struct btree_iter extent_iter;
+ struct bkey_s_c extent_k =
+ bch2_backpointer_get_key(trans, bp, &extent_iter, 0, last_flushed);
+ int ret = bkey_err(extent_k);
+ if (ret)
+ return ret;
+
+ struct bkey_i *n =
+ bch2_bkey_make_mut(trans, &extent_iter, &extent_k,
+ BTREE_UPDATE_internal_snapshot_node);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto err;
+
+ bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx);
+err:
+ bch2_trans_iter_exit(trans, &extent_iter);
+ return ret;
+}
+
+static int invalidate_one_bucket_by_bps(struct btree_trans *trans,
+ struct bch_dev *ca,
+ struct bpos bucket,
+ u8 gen,
+ struct bkey_buf *last_flushed)
+{
+ struct bpos bp_start = bucket_pos_to_bp_start(ca, bucket);
+ struct bpos bp_end = bucket_pos_to_bp_end(ca, bucket);
+
+ return for_each_btree_key_max_commit(trans, iter, BTREE_ID_backpointers,
+ bp_start, bp_end, 0, k,
+ NULL, NULL,
+ BCH_WATERMARK_btree|
+ BCH_TRANS_COMMIT_no_enospc, ({
+ if (k.k->type != KEY_TYPE_backpointer)
+ continue;
+
+ struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
+
+ if (bp.v->bucket_gen != gen)
+ continue;
+
+ /* filter out bps with gens that don't match */
+
+ invalidate_one_bp(trans, ca, bp, last_flushed);
+ }));
+}
+
+noinline_for_stack
static int invalidate_one_bucket(struct btree_trans *trans,
+ struct bch_dev *ca,
struct btree_iter *lru_iter,
struct bkey_s_c lru_k,
+ struct bkey_buf *last_flushed,
s64 *nr_to_invalidate)
{
struct bch_fs *c = trans->c;
- struct bkey_i_alloc_v4 *a = NULL;
struct printbuf buf = PRINTBUF;
struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
- unsigned cached_sectors;
+ struct btree_iter alloc_iter = {};
int ret = 0;
if (*nr_to_invalidate <= 0)
@@ -2081,35 +2151,37 @@ static int invalidate_one_bucket(struct btree_trans *trans,
if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
return 0;
- a = bch2_trans_start_alloc_update(trans, bucket, BTREE_TRIGGER_bucket_invalidate);
- ret = PTR_ERR_OR_ZERO(a);
+ struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter,
+ BTREE_ID_alloc, bucket,
+ BTREE_ITER_cached);
+ ret = bkey_err(alloc_k);
if (ret)
- goto out;
+ return ret;
+
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert);
/* We expect harmless races here due to the btree write buffer: */
- if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
+ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a))
goto out;
- BUG_ON(a->v.data_type != BCH_DATA_cached);
- BUG_ON(a->v.dirty_sectors);
+ /*
+ * Impossible since alloc_lru_idx_read() only returns nonzero if the
+ * bucket is supposed to be on the cached bucket LRU (i.e.
+ * BCH_DATA_cached)
+ *
+ * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0
+ */
+ BUG_ON(a->data_type != BCH_DATA_cached);
+ BUG_ON(a->dirty_sectors);
- if (!a->v.cached_sectors)
+ if (!a->cached_sectors)
bch_err(c, "invalidating empty bucket, confused");
- cached_sectors = a->v.cached_sectors;
+ unsigned cached_sectors = a->cached_sectors;
+ u8 gen = a->gen;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
- a->v.gen++;
- a->v.data_type = 0;
- a->v.dirty_sectors = 0;
- a->v.stripe_sectors = 0;
- a->v.cached_sectors = 0;
- a->v.io_time[READ] = bch2_current_io_time(c, READ);
- a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE);
-
- ret = bch2_trans_commit(trans, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc);
+ ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed);
if (ret)
goto out;
@@ -2117,6 +2189,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
--*nr_to_invalidate;
out:
fsck_err:
+ bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
}
@@ -2143,6 +2216,10 @@ static void bch2_do_invalidates_work(struct work_struct *work)
struct btree_trans *trans = bch2_trans_get(c);
int ret = 0;
+ struct bkey_buf last_flushed;
+ bch2_bkey_buf_init(&last_flushed);
+ bkey_init(&last_flushed.k->k);
+
ret = bch2_btree_write_buffer_tryflush(trans);
if (ret)
goto err;
@@ -2167,7 +2244,7 @@ static void bch2_do_invalidates_work(struct work_struct *work)
if (!k.k)
break;
- ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
+ ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate);
restart_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -2180,6 +2257,7 @@ restart_err:
err:
bch2_trans_put(trans);
percpu_ref_put(&ca->io_ref);
+ bch2_bkey_buf_exit(&last_flushed, c);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index de25ba4ee94b..c556ccaffe89 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -131,7 +131,7 @@ static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
if (a.stripe)
return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
if (bch2_bucket_sectors_dirty(a))
- return data_type;
+ return bucket_data_type(data_type);
if (a.cached_sectors)
return BCH_DATA_cached;
if (BCH_ALLOC_V4_NEED_DISCARD(&a))
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 5a781fb4c794..0cac65347a5d 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -127,14 +127,14 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
void bch2_open_bucket_write_error(struct bch_fs *c,
struct open_buckets *obs,
- unsigned dev)
+ unsigned dev, int err)
{
struct open_bucket *ob;
unsigned i;
open_bucket_for_each(c, obs, ob, i)
if (ob->dev == dev && ob->ec)
- bch2_ec_bucket_cancel(c, ob);
+ bch2_ec_bucket_cancel(c, ob, err);
}
static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
@@ -179,23 +179,6 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
closure_wake_up(&c->freelist_wait);
}
-static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
-{
- switch (watermark) {
- case BCH_WATERMARK_interior_updates:
- return 0;
- case BCH_WATERMARK_reclaim:
- return OPEN_BUCKETS_COUNT / 6;
- case BCH_WATERMARK_btree:
- case BCH_WATERMARK_btree_copygc:
- return OPEN_BUCKETS_COUNT / 4;
- case BCH_WATERMARK_copygc:
- return OPEN_BUCKETS_COUNT / 3;
- default:
- return OPEN_BUCKETS_COUNT / 2;
- }
-}
-
static inline bool may_alloc_bucket(struct bch_fs *c,
struct bpos bucket,
struct bucket_alloc_state *s)
@@ -239,7 +222,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
spin_lock(&c->freelist_lock);
- if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
+ if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(watermark))) {
if (cl)
closure_wait(&c->open_buckets_wait, cl);
@@ -648,7 +631,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
struct bch_dev_usage *usage)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
+ u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;
@@ -728,7 +711,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
struct bch_dev_usage usage;
struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
- cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
+ cl, flags & BCH_WRITE_alloc_nowait, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
bch2_dev_put(ca);
@@ -1336,7 +1319,7 @@ retry:
if (wp->data_type != BCH_DATA_user)
have_cache = true;
- if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
+ if (target && !(flags & BCH_WRITE_only_specified_devs)) {
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
@@ -1426,7 +1409,7 @@ err:
if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
ret = -BCH_ERR_bucket_alloc_blocked;
- if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
+ if (cl && !(flags & BCH_WRITE_alloc_nowait) &&
bch2_err_matches(ret, BCH_ERR_freelist_empty))
ret = -BCH_ERR_bucket_alloc_blocked;
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
index f25481a0d1a0..69ec6a012898 100644
--- a/fs/bcachefs/alloc_foreground.h
+++ b/fs/bcachefs/alloc_foreground.h
@@ -33,6 +33,23 @@ static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
return bch2_dev_have_ref(c, ob->dev);
}
+static inline unsigned bch2_open_buckets_reserved(enum bch_watermark watermark)
+{
+ switch (watermark) {
+ case BCH_WATERMARK_interior_updates:
+ return 0;
+ case BCH_WATERMARK_reclaim:
+ return OPEN_BUCKETS_COUNT / 6;
+ case BCH_WATERMARK_btree:
+ case BCH_WATERMARK_btree_copygc:
+ return OPEN_BUCKETS_COUNT / 4;
+ case BCH_WATERMARK_copygc:
+ return OPEN_BUCKETS_COUNT / 3;
+ default:
+ return OPEN_BUCKETS_COUNT / 2;
+ }
+}
+
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
enum bch_watermark, enum bch_data_type,
struct closure *);
@@ -65,7 +82,7 @@ static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
}
void bch2_open_bucket_write_error(struct bch_fs *,
- struct open_buckets *, unsigned);
+ struct open_buckets *, unsigned, int);
void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index 4aa8ee026cb8..8f79f46c2a78 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -90,6 +90,7 @@ struct dev_stripe_state {
x(stopped) \
x(waiting_io) \
x(waiting_work) \
+ x(runnable) \
x(running)
enum write_point_state {
@@ -125,6 +126,7 @@ struct write_point {
enum write_point_state state;
u64 last_state_change;
u64 time[WRITE_POINT_STATE_NR];
+ u64 last_runtime;
} __aligned(SMP_CACHE_BYTES);
};
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index ebeb6a5ff9d2..20c497f0c2cb 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -11,6 +11,7 @@
#include "checksum.h"
#include "disk_accounting.h"
#include "error.h"
+#include "progress.h"
#include <linux/mm.h>
@@ -49,6 +50,8 @@ void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bke
}
bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level);
+ prt_str(out, " data_type=");
+ bch2_prt_data_type(out, bp.v->data_type);
prt_printf(out, " suboffset=%u len=%u gen=%u pos=",
(u32) bp.k->p.offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
bp.v->bucket_len,
@@ -244,27 +247,31 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
if (unlikely(bp.v->btree_id >= btree_id_nr_alive(c)))
return bkey_s_c_null;
- if (likely(!bp.v->level)) {
- bch2_trans_node_iter_init(trans, iter,
- bp.v->btree_id,
- bp.v->pos,
- 0, 0,
- iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
- if (bkey_err(k)) {
- bch2_trans_iter_exit(trans, iter);
- return k;
- }
+ bch2_trans_node_iter_init(trans, iter,
+ bp.v->btree_id,
+ bp.v->pos,
+ 0,
+ bp.v->level,
+ iter_flags);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ if (bkey_err(k)) {
+ bch2_trans_iter_exit(trans, iter);
+ return k;
+ }
- if (k.k &&
- extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
- return k;
+ if (k.k &&
+ extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
+ return k;
- bch2_trans_iter_exit(trans, iter);
+ bch2_trans_iter_exit(trans, iter);
+
+ if (!bp.v->level) {
int ret = backpointer_target_not_found(trans, bp, k, last_flushed);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
} else {
struct btree *b = bch2_backpointer_get_node(trans, bp, iter, last_flushed);
+ if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node))
+ return bkey_s_c_null;
if (IS_ERR_OR_NULL(b))
return ((struct bkey_s_c) { .k = ERR_CAST(b) });
@@ -514,6 +521,22 @@ check_existing_bp:
if (!other_extent.k)
goto missing;
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp->k.p.inode);
+ if (ca) {
+ struct bkey_ptrs_c other_extent_ptrs = bch2_bkey_ptrs_c(other_extent);
+ bkey_for_each_ptr(other_extent_ptrs, ptr)
+ if (ptr->dev == bp->k.p.inode &&
+ dev_ptr_stale_rcu(ca, ptr)) {
+ ret = drop_dev_and_update(trans, other_bp.v->btree_id,
+ other_extent, bp->k.p.inode);
+ if (ret)
+ goto err;
+ goto out;
+ }
+ }
+ rcu_read_unlock();
+
if (bch2_extents_match(orig_k, other_extent)) {
printbuf_reset(&buf);
prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
@@ -590,9 +613,6 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
struct extent_ptr_decoded p;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- if (p.ptr.cached)
- continue;
-
if (p.ptr.dev == BCH_SB_MEMBER_INVALID)
continue;
@@ -600,9 +620,11 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
bool check = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_mismatches);
bool empty = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_empty);
+
+ bool stale = p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr));
rcu_read_unlock();
- if (check || empty) {
+ if ((check || empty) && !stale) {
struct bkey_i_backpointer bp;
bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp);
@@ -715,71 +737,6 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
return ret;
}
-struct progress_indicator_state {
- unsigned long next_print;
- u64 nodes_seen;
- u64 nodes_total;
- struct btree *last_node;
-};
-
-static inline void progress_init(struct progress_indicator_state *s,
- struct bch_fs *c,
- u64 btree_id_mask)
-{
- memset(s, 0, sizeof(*s));
-
- s->next_print = jiffies + HZ * 10;
-
- for (unsigned i = 0; i < BTREE_ID_NR; i++) {
- if (!(btree_id_mask & BIT_ULL(i)))
- continue;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_btree,
- .btree.id = i,
- };
-
- u64 v;
- bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
- s->nodes_total += div64_ul(v, btree_sectors(c));
- }
-}
-
-static inline bool progress_update_p(struct progress_indicator_state *s)
-{
- bool ret = time_after_eq(jiffies, s->next_print);
-
- if (ret)
- s->next_print = jiffies + HZ * 10;
- return ret;
-}
-
-static void progress_update_iter(struct btree_trans *trans,
- struct progress_indicator_state *s,
- struct btree_iter *iter,
- const char *msg)
-{
- struct bch_fs *c = trans->c;
- struct btree *b = path_l(btree_iter_path(trans, iter))->b;
-
- s->nodes_seen += b != s->last_node;
- s->last_node = b;
-
- if (progress_update_p(s)) {
- struct printbuf buf = PRINTBUF;
- unsigned percent = s->nodes_total
- ? div64_u64(s->nodes_seen * 100, s->nodes_total)
- : 0;
-
- prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
- msg, percent, s->nodes_seen, s->nodes_total);
- bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
-
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-}
-
static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
struct extents_to_bp_state *s)
{
@@ -787,7 +744,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
struct progress_indicator_state progress;
int ret = 0;
- progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
+ bch2_progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
for (enum btree_id btree_id = 0;
btree_id < btree_id_nr_alive(c);
@@ -806,7 +763,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
BTREE_ITER_prefetch);
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
- progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
+ bch2_progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}));
@@ -827,7 +784,7 @@ enum alloc_sector_counter {
ALLOC_SECTORS_NR
};
-static enum alloc_sector_counter data_type_to_alloc_counter(enum bch_data_type t)
+static int data_type_to_alloc_counter(enum bch_data_type t)
{
switch (t) {
case BCH_DATA_btree:
@@ -836,9 +793,10 @@ static enum alloc_sector_counter data_type_to_alloc_counter(enum bch_data_type t
case BCH_DATA_cached:
return ALLOC_cached;
case BCH_DATA_stripe:
+ case BCH_DATA_parity:
return ALLOC_stripe;
default:
- BUG();
+ return -1;
}
}
@@ -889,7 +847,11 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
if (bp.v->bucket_gen != a->gen)
continue;
- sectors[data_type_to_alloc_counter(bp.v->data_type)] += bp.v->bucket_len;
+ int alloc_counter = data_type_to_alloc_counter(bp.v->data_type);
+ if (alloc_counter < 0)
+ continue;
+
+ sectors[alloc_counter] += bp.v->bucket_len;
};
bch2_trans_iter_exit(trans, &iter);
if (ret)
@@ -901,9 +863,8 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
goto err;
}
- /* Cached pointers don't have backpointers: */
-
if (sectors[ALLOC_dirty] != a->dirty_sectors ||
+ sectors[ALLOC_cached] != a->cached_sectors ||
sectors[ALLOC_stripe] != a->stripe_sectors) {
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) {
ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed);
@@ -912,6 +873,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
}
if (sectors[ALLOC_dirty] > a->dirty_sectors ||
+ sectors[ALLOC_cached] > a->cached_sectors ||
sectors[ALLOC_stripe] > a->stripe_sectors) {
ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?:
-BCH_ERR_transaction_restart_nested;
@@ -919,7 +881,8 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
}
if (!sectors[ALLOC_dirty] &&
- !sectors[ALLOC_stripe])
+ !sectors[ALLOC_stripe] &&
+ !sectors[ALLOC_cached])
__set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_empty);
else
__set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_mismatches);
@@ -1206,11 +1169,11 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
- progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
+ bch2_progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
int ret = for_each_btree_key(trans, iter, BTREE_ID_backpointers,
POS_MIN, BTREE_ITER_prefetch, k, ({
- progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
+ bch2_progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
check_one_backpointer(trans, start, end, k, &last_flushed);
}));
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 060dad1521ee..16575dbc5736 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
-#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
+#ifndef _BCACHEFS_BACKPOINTERS_H
+#define _BCACHEFS_BACKPOINTERS_H
#include "btree_cache.h"
#include "btree_iter.h"
@@ -123,7 +123,12 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
return BCH_DATA_btree;
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
- return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
+ if (p.has_ec)
+ return BCH_DATA_stripe;
+ if (p.ptr.cached)
+ return BCH_DATA_cached;
+ else
+ return BCH_DATA_user;
case KEY_TYPE_stripe: {
const struct bch_extent_ptr *ptr = &entry->ptr;
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
@@ -147,7 +152,20 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
struct bkey_i_backpointer *bp)
{
bkey_backpointer_init(&bp->k_i);
- bp->k.p = POS(p.ptr.dev, ((u64) p.ptr.offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset);
+ bp->k.p.inode = p.ptr.dev;
+
+ if (k.k->type != KEY_TYPE_stripe)
+ bp->k.p.offset = ((u64) p.ptr.offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset;
+ else {
+ /*
+ * Put stripe backpointers where they won't collide with the
+ * extent backpointers within the stripe:
+ */
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+ bp->k.p.offset = ((u64) (p.ptr.offset + le16_to_cpu(s.v->sectors)) <<
+ MAX_EXTENT_COMPRESS_RATIO_SHIFT) - 1;
+ }
+
bp->v = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 161cf2f05d2a..f52311017aee 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -203,6 +203,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/zstd.h>
+#include <linux/unicode.h>
#include "bcachefs_format.h"
#include "btree_journal_iter_types.h"
@@ -444,6 +445,7 @@ BCH_DEBUG_PARAMS_DEBUG()
x(btree_node_sort) \
x(btree_node_read) \
x(btree_node_read_done) \
+ x(btree_node_write) \
x(btree_interior_update_foreground) \
x(btree_interior_update_total) \
x(btree_gc) \
@@ -456,6 +458,7 @@ BCH_DEBUG_PARAMS_DEBUG()
x(blocked_journal_low_on_space) \
x(blocked_journal_low_on_pin) \
x(blocked_journal_max_in_flight) \
+ x(blocked_journal_max_open) \
x(blocked_key_cache_flush) \
x(blocked_allocate) \
x(blocked_allocate_open_bucket) \
@@ -533,6 +536,7 @@ struct bch_dev {
*/
struct bch_member_cpu mi;
atomic64_t errors[BCH_MEMBER_ERROR_NR];
+ unsigned long write_errors_start;
__uuid_t uuid;
char name[BDEVNAME_SIZE];
@@ -623,7 +627,8 @@ struct bch_dev {
x(topology_error) \
x(errors_fixed) \
x(errors_not_fixed) \
- x(no_invalid_checks)
+ x(no_invalid_checks) \
+ x(discard_mount_opt_set) \
enum bch_fs_flags {
#define x(n) BCH_FS_##n,
@@ -687,7 +692,8 @@ struct btree_trans_buf {
x(gc_gens) \
x(snapshot_delete_pagecache) \
x(sysfs) \
- x(btree_write_buffer)
+ x(btree_write_buffer) \
+ x(btree_node_scrub)
enum bch_write_ref {
#define x(n) BCH_WRITE_REF_##n,
@@ -696,6 +702,8 @@ enum bch_write_ref {
BCH_WRITE_REF_NR,
};
+#define BCH_FS_DEFAULT_UTF8_ENCODING UNICODE_AGE(12, 1, 0)
+
struct bch_fs {
struct closure cl;
@@ -780,6 +788,9 @@ struct bch_fs {
u64 btrees_lost_data;
} sb;
+#ifdef CONFIG_UNICODE
+ struct unicode_map *cf_encoding;
+#endif
struct bch_sb_handle disk_sb;
@@ -969,7 +980,6 @@ struct bch_fs {
mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR];
size_t zstd_workspace_size;
- struct crypto_shash *sha256;
struct crypto_sync_skcipher *chacha20;
struct crypto_shash *poly1305;
@@ -993,15 +1003,11 @@ struct bch_fs {
wait_queue_head_t copygc_running_wq;
/* STRIPES: */
- GENRADIX(struct stripe) stripes;
GENRADIX(struct gc_stripe) gc_stripes;
struct hlist_head ec_stripes_new[32];
spinlock_t ec_stripes_new_lock;
- ec_stripes_heap ec_stripes_heap;
- struct mutex ec_stripes_heap_lock;
-
/* ERASURE CODING */
struct list_head ec_stripe_head_list;
struct mutex ec_stripe_head_lock;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index f70f0108401f..e96d87767020 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -686,7 +686,12 @@ struct bch_sb_field_ext {
x(inode_depth, BCH_VERSION(1, 17)) \
x(persistent_inode_cursors, BCH_VERSION(1, 18)) \
x(autofix_errors, BCH_VERSION(1, 19)) \
- x(directory_size, BCH_VERSION(1, 20))
+ x(directory_size, BCH_VERSION(1, 20)) \
+ x(cached_backpointers, BCH_VERSION(1, 21)) \
+ x(stripe_backpointers, BCH_VERSION(1, 22)) \
+ x(stripe_lru, BCH_VERSION(1, 23)) \
+ x(casefolding, BCH_VERSION(1, 24)) \
+ x(extent_flags, BCH_VERSION(1, 25))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@@ -837,6 +842,7 @@ LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
+/* one free bit */
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
@@ -855,6 +861,8 @@ LE64_BITMASK(BCH_SB_VERSION_INCOMPAT, struct bch_sb, flags[5], 32, 48);
LE64_BITMASK(BCH_SB_VERSION_INCOMPAT_ALLOWED,
struct bch_sb, flags[5], 48, 64);
LE64_BITMASK(BCH_SB_SHARD_INUMS_NBITS, struct bch_sb, flags[6], 0, 4);
+LE64_BITMASK(BCH_SB_WRITE_ERROR_TIMEOUT,struct bch_sb, flags[6], 4, 14);
+LE64_BITMASK(BCH_SB_CSUM_ERR_RETRY_NR, struct bch_sb, flags[6], 14, 20);
static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
{
@@ -908,7 +916,8 @@ static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u
x(journal_no_flush, 16) \
x(alloc_v2, 17) \
x(extents_across_btree_nodes, 18) \
- x(incompat_version_field, 19)
+ x(incompat_version_field, 19) \
+ x(casefolding, 20)
#define BCH_SB_FEATURES_ALWAYS \
(BIT_ULL(BCH_FEATURE_new_extent_overwrite)| \
@@ -922,7 +931,8 @@ static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u
BIT_ULL(BCH_FEATURE_new_siphash)| \
BIT_ULL(BCH_FEATURE_btree_ptr_v2)| \
BIT_ULL(BCH_FEATURE_new_varint)| \
- BIT_ULL(BCH_FEATURE_journal_no_flush))
+ BIT_ULL(BCH_FEATURE_journal_no_flush)| \
+ BIT_ULL(BCH_FEATURE_incompat_version_field))
enum bch_sb_feature {
#define x(f, n) BCH_FEATURE_##f,
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
index 3c23bdf788ce..52594e925eb7 100644
--- a/fs/bcachefs/bcachefs_ioctl.h
+++ b/fs/bcachefs/bcachefs_ioctl.h
@@ -87,6 +87,7 @@ struct bch_ioctl_incremental {
#define BCH_IOCTL_FSCK_OFFLINE _IOW(0xbc, 19, struct bch_ioctl_fsck_offline)
#define BCH_IOCTL_FSCK_ONLINE _IOW(0xbc, 20, struct bch_ioctl_fsck_online)
#define BCH_IOCTL_QUERY_ACCOUNTING _IOW(0xbc, 21, struct bch_ioctl_query_accounting)
+#define BCH_IOCTL_QUERY_COUNTERS _IOW(0xbc, 21, struct bch_ioctl_query_counters)
/* ioctl below act on a particular file, not the filesystem as a whole: */
@@ -215,6 +216,10 @@ struct bch_ioctl_data {
union {
struct {
__u32 dev;
+ __u32 data_types;
+ } scrub;
+ struct {
+ __u32 dev;
__u32 pad;
} migrate;
struct {
@@ -229,6 +234,11 @@ enum bch_data_event {
BCH_DATA_EVENT_NR = 1,
};
+enum data_progress_data_type_special {
+ DATA_PROGRESS_DATA_TYPE_phys = 254,
+ DATA_PROGRESS_DATA_TYPE_done = 255,
+};
+
struct bch_ioctl_data_progress {
__u8 data_type;
__u8 btree_id;
@@ -237,11 +247,19 @@ struct bch_ioctl_data_progress {
__u64 sectors_done;
__u64 sectors_total;
+ __u64 sectors_error_corrected;
+ __u64 sectors_error_uncorrected;
} __packed __aligned(8);
+enum bch_ioctl_data_event_ret {
+ BCH_IOCTL_DATA_EVENT_RET_done = 1,
+ BCH_IOCTL_DATA_EVENT_RET_device_offline = 2,
+};
+
struct bch_ioctl_data_event {
__u8 type;
- __u8 pad[7];
+ __u8 ret;
+ __u8 pad[6];
union {
struct bch_ioctl_data_progress p;
__u64 pad2[15];
@@ -443,4 +461,13 @@ struct bch_ioctl_query_accounting {
struct bkey_i_accounting accounting[];
};
+#define BCH_IOCTL_QUERY_COUNTERS_MOUNT (1 << 0)
+
+struct bch_ioctl_query_counters {
+ __u16 nr;
+ __u16 flags;
+ __u32 pad;
+ __u64 d[];
+};
+
#endif /* _BCACHEFS_IOCTL_H */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 1ec1f90e0eb3..54666027aa85 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -610,6 +610,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
btree_node_write_in_flight(b));
btree_node_data_free(bc, b);
+ cond_resched();
}
BUG_ON(!bch2_journal_error(&c->journal) &&
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index dd1d9b74076e..ff681e733598 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -27,6 +27,7 @@
#include "journal.h"
#include "keylist.h"
#include "move.h"
+#include "progress.h"
#include "recovery_passes.h"
#include "reflink.h"
#include "recovery.h"
@@ -656,7 +657,9 @@ fsck_err:
return ret;
}
-static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool initial)
+static int bch2_gc_btree(struct btree_trans *trans,
+ struct progress_indicator_state *progress,
+ enum btree_id btree, bool initial)
{
struct bch_fs *c = trans->c;
unsigned target_depth = btree_node_type_has_triggers(__btree_node_type(0, btree)) ? 0 : 1;
@@ -673,6 +676,7 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
BTREE_ITER_prefetch);
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
+ bch2_progress_update_iter(trans, progress, &iter, "check_allocations");
gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
}));
@@ -717,22 +721,24 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
static int bch2_gc_btrees(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
- enum btree_id ids[BTREE_ID_NR];
struct printbuf buf = PRINTBUF;
- unsigned i;
int ret = 0;
- for (i = 0; i < BTREE_ID_NR; i++)
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c, ~0ULL);
+
+ enum btree_id ids[BTREE_ID_NR];
+ for (unsigned i = 0; i < BTREE_ID_NR; i++)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
- for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
continue;
- ret = bch2_gc_btree(trans, btree, true);
+ ret = bch2_gc_btree(trans, &progress, btree, true);
}
printbuf_exit(&buf);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index dece27d9db04..2ba33ffc9795 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "bkey_buf.h"
#include "bkey_methods.h"
#include "bkey_sort.h"
#include "btree_cache.h"
@@ -1186,7 +1187,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
le64_to_cpu(i->journal_seq),
b->written, b->written + sectors, ptr_written);
- b->written += sectors;
+ b->written = min(b->written + sectors, btree_sectors(c));
if (blacklisted && !first)
continue;
@@ -1328,6 +1329,7 @@ static void btree_node_read_work(struct work_struct *work)
bch_info(c, "retrying read");
ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
rb->have_ioref = ca != NULL;
+ rb->start_time = local_clock();
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
bio->bi_iter.bi_size = btree_buf_bytes(b);
@@ -1338,21 +1340,26 @@ static void btree_node_read_work(struct work_struct *work)
} else {
bio->bi_status = BLK_STS_REMOVED;
}
+
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
+ rb->start_time, !bio->bi_status);
start:
printbuf_reset(&buf);
bch2_btree_pos_to_text(&buf, c, b);
- bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
- "btree read error %s for %s",
- bch2_blk_status_to_str(bio->bi_status), buf.buf);
+
+ if (ca && bio->bi_status)
+ bch_err_dev_ratelimited(ca,
+ "btree read error %s for %s",
+ bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
percpu_ref_put(&ca->io_ref);
rb->have_ioref = false;
- bch2_mark_io_failure(&failed, &rb->pick);
+ bch2_mark_io_failure(&failed, &rb->pick, false);
can_retry = bch2_bkey_pick_read_device(c,
bkey_i_to_s_c(&b->key),
- &failed, &rb->pick) > 0;
+ &failed, &rb->pick, -1) > 0;
if (!bio->bi_status &&
!bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
@@ -1400,12 +1407,11 @@ static void btree_node_read_endio(struct bio *bio)
struct btree_read_bio *rb =
container_of(bio, struct btree_read_bio, bio);
struct bch_fs *c = rb->c;
+ struct bch_dev *ca = rb->have_ioref
+ ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
- if (rb->have_ioref) {
- struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
-
- bch2_latency_acct(ca, rb->start_time, READ);
- }
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
+ rb->start_time, !bio->bi_status);
queue_work(c->btree_read_complete_wq, &rb->work);
}
@@ -1697,7 +1703,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
return;
ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
- NULL, &pick);
+ NULL, &pick, -1);
if (ret <= 0) {
struct printbuf buf = PRINTBUF;
@@ -1811,6 +1817,190 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
}
+struct btree_node_scrub {
+ struct bch_fs *c;
+ struct bch_dev *ca;
+ void *buf;
+ bool used_mempool;
+ unsigned written;
+
+ enum btree_id btree;
+ unsigned level;
+ struct bkey_buf key;
+ __le64 seq;
+
+ struct work_struct work;
+ struct bio bio;
+};
+
+static bool btree_node_scrub_check(struct bch_fs *c, struct btree_node *data, unsigned ptr_written,
+ struct printbuf *err)
+{
+ unsigned written = 0;
+
+ if (le64_to_cpu(data->magic) != bset_magic(c)) {
+ prt_printf(err, "bad magic: want %llx, got %llx",
+ bset_magic(c), le64_to_cpu(data->magic));
+ return false;
+ }
+
+ while (written < (ptr_written ?: btree_sectors(c))) {
+ struct btree_node_entry *bne;
+ struct bset *i;
+ bool first = !written;
+
+ if (first) {
+ bne = NULL;
+ i = &data->keys;
+ } else {
+ bne = (void *) data + (written << 9);
+ i = &bne->keys;
+
+ if (!ptr_written && i->seq != data->keys.seq)
+ break;
+ }
+
+ struct nonce nonce = btree_nonce(i, written << 9);
+ bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i));
+
+ if (first) {
+ if (good_csum_type) {
+ struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, data);
+ if (bch2_crc_cmp(data->csum, csum)) {
+ bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), data->csum, csum);
+ return false;
+ }
+ }
+
+ written += vstruct_sectors(data, c->block_bits);
+ } else {
+ if (good_csum_type) {
+ struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
+ if (bch2_crc_cmp(bne->csum, csum)) {
+ bch2_csum_err_msg(err, BSET_CSUM_TYPE(i), bne->csum, csum);
+ return false;
+ }
+ }
+
+ written += vstruct_sectors(bne, c->block_bits);
+ }
+ }
+
+ return true;
+}
+
+static void btree_node_scrub_work(struct work_struct *work)
+{
+ struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work);
+ struct bch_fs *c = scrub->c;
+ struct printbuf err = PRINTBUF;
+
+ __bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level,
+ bkey_i_to_s_c(scrub->key.k));
+ prt_newline(&err);
+
+ if (!btree_node_scrub_check(c, scrub->buf, scrub->written, &err)) {
+ struct btree_trans *trans = bch2_trans_get(c);
+
+ struct btree_iter iter;
+ bch2_trans_node_iter_init(trans, &iter, scrub->btree,
+ scrub->key.k->k.p, 0, scrub->level - 1, 0);
+
+ struct btree *b;
+ int ret = lockrestart_do(trans, PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(&iter)));
+ if (ret)
+ goto err;
+
+ if (bkey_i_to_btree_ptr_v2(&b->key)->v.seq == scrub->seq) {
+ bch_err(c, "error validating btree node during scrub on %s at btree %s",
+ scrub->ca->name, err.buf);
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_begin(trans);
+ bch2_trans_put(trans);
+ }
+
+ printbuf_exit(&err);
+ bch2_bkey_buf_exit(&scrub->key, c);;
+ btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
+ percpu_ref_put(&scrub->ca->io_ref);
+ kfree(scrub);
+ bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
+}
+
+static void btree_node_scrub_endio(struct bio *bio)
+{
+ struct btree_node_scrub *scrub = container_of(bio, struct btree_node_scrub, bio);
+
+ queue_work(scrub->c->btree_read_complete_wq, &scrub->work);
+}
+
+int bch2_btree_node_scrub(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c k, unsigned dev)
+{
+ if (k.k->type != KEY_TYPE_btree_ptr_v2)
+ return 0;
+
+ struct bch_fs *c = trans->c;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_node_scrub))
+ return -BCH_ERR_erofs_no_writes;
+
+ struct extent_ptr_decoded pick;
+ int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
+ if (ret <= 0)
+ goto err;
+
+ struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+ if (!ca) {
+ ret = -BCH_ERR_device_offline;
+ goto err;
+ }
+
+ bool used_mempool = false;
+ void *buf = btree_bounce_alloc(c, c->opts.btree_node_size, &used_mempool);
+
+ unsigned vecs = buf_pages(buf, c->opts.btree_node_size);
+
+ struct btree_node_scrub *scrub =
+ kzalloc(sizeof(*scrub) + sizeof(struct bio_vec) * vecs, GFP_KERNEL);
+ if (!scrub) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ scrub->c = c;
+ scrub->ca = ca;
+ scrub->buf = buf;
+ scrub->used_mempool = used_mempool;
+ scrub->written = btree_ptr_sectors_written(k);
+
+ scrub->btree = btree;
+ scrub->level = level;
+ bch2_bkey_buf_init(&scrub->key);
+ bch2_bkey_buf_reassemble(&scrub->key, c, k);
+ scrub->seq = bkey_s_c_to_btree_ptr_v2(k).v->seq;
+
+ INIT_WORK(&scrub->work, btree_node_scrub_work);
+
+ bio_init(&scrub->bio, ca->disk_sb.bdev, scrub->bio.bi_inline_vecs, vecs, REQ_OP_READ);
+ bch2_bio_map(&scrub->bio, scrub->buf, c->opts.btree_node_size);
+ scrub->bio.bi_iter.bi_sector = pick.ptr.offset;
+ scrub->bio.bi_end_io = btree_node_scrub_endio;
+ submit_bio(&scrub->bio);
+ return 0;
+err_free:
+ btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
+ percpu_ref_put(&ca->io_ref);
+err:
+ bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
+ return ret;
+}
+
static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w)
{
@@ -1831,7 +2021,7 @@ static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
bch2_journal_pin_drop(&c->journal, &w->journal);
}
-static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
+static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
{
struct btree_write *w = btree_prev_write(b);
unsigned long old, new;
@@ -1839,6 +2029,9 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
bch2_btree_complete_write(c, b, w);
+ if (start_time)
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_write], start_time);
+
old = READ_ONCE(b->flags);
do {
new = old;
@@ -1869,7 +2062,7 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
}
-static void btree_node_write_done(struct bch_fs *c, struct btree *b)
+static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time)
{
struct btree_trans *trans = bch2_trans_get(c);
@@ -1877,7 +2070,7 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b)
/* we don't need transaction context anymore after we got the lock. */
bch2_trans_put(trans);
- __btree_node_write_done(c, b);
+ __btree_node_write_done(c, b, start_time);
six_unlock_read(&b->c.lock);
}
@@ -1887,6 +2080,7 @@ static void btree_node_write_work(struct work_struct *work)
container_of(work, struct btree_write_bio, work);
struct bch_fs *c = wbio->wbio.c;
struct btree *b = wbio->wbio.bio.bi_private;
+ u64 start_time = wbio->start_time;
int ret = 0;
btree_bounce_free(c,
@@ -1919,12 +2113,18 @@ static void btree_node_write_work(struct work_struct *work)
}
out:
bio_put(&wbio->wbio.bio);
- btree_node_write_done(c, b);
+ btree_node_write_done(c, b, start_time);
return;
err:
set_btree_node_noevict(b);
- bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
- "writing btree node: %s", bch2_err_str(ret));
+
+ if (!bch2_err_matches(ret, EROFS)) {
+ struct printbuf buf = PRINTBUF;
+ prt_printf(&buf, "writing btree node: %s\n ", bch2_err_str(ret));
+ bch2_btree_pos_to_text(&buf, c, b);
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
goto out;
}
@@ -1937,16 +2137,21 @@ static void btree_node_write_endio(struct bio *bio)
struct bch_fs *c = wbio->c;
struct btree *b = wbio->bio.bi_private;
struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
- unsigned long flags;
- if (wbio->have_ioref)
- bch2_latency_acct(ca, wbio->submit_time, WRITE);
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
+ wbio->submit_time, !bio->bi_status);
- if (!ca ||
- bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
- "btree write error: %s",
- bch2_blk_status_to_str(bio->bi_status)) ||
- bch2_meta_write_fault("btree")) {
+ if (ca && bio->bi_status) {
+ struct printbuf buf = PRINTBUF;
+ prt_printf(&buf, "btree write error: %s\n ",
+ bch2_blk_status_to_str(bio->bi_status));
+ bch2_btree_pos_to_text(&buf, c, b);
+ bch_err_dev_ratelimited(ca, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ if (bio->bi_status) {
+ unsigned long flags;
spin_lock_irqsave(&c->btree_write_error_lock, flags);
bch2_dev_list_add_dev(&orig->failed, wbio->dev);
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
@@ -2023,6 +2228,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
bool validate_before_checksum = false;
enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
void *data;
+ u64 start_time = local_clock();
int ret;
if (flags & BTREE_WRITE_ALREADY_STARTED)
@@ -2231,6 +2437,7 @@ do_write:
wbio->data = data;
wbio->data_bytes = bytes;
wbio->sector_offset = b->written;
+ wbio->start_time = start_time;
wbio->wbio.c = c;
wbio->wbio.used_mempool = used_mempool;
wbio->wbio.first_btree_write = !b->written;
@@ -2258,7 +2465,7 @@ err:
b->written += sectors_to_write;
nowrite:
btree_bounce_free(c, bytes, used_mempool, data);
- __btree_node_write_done(c, b);
+ __btree_node_write_done(c, b, 0);
}
/*
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 6f9e4a6dacf7..dbf76d22c660 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -52,6 +52,7 @@ struct btree_write_bio {
void *data;
unsigned data_bytes;
unsigned sector_offset;
+ u64 start_time;
struct bch_write_bio wbio;
};
@@ -132,6 +133,9 @@ void bch2_btree_node_read(struct btree_trans *, struct btree *, bool);
int bch2_btree_root_read(struct bch_fs *, enum btree_id,
const struct bkey_i *, unsigned);
+int bch2_btree_node_scrub(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, unsigned);
+
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
enum btree_write_flags {
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index e32fce4fd258..7542c6f9c88e 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -562,20 +562,6 @@ static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
bch2_btree_node_iter_peek_all(&l->iter, l->b));
}
-static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
- struct btree_path *path,
- struct btree_path_level *l,
- struct bkey *u)
-{
- struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
- bch2_btree_node_iter_peek(&l->iter, l->b));
-
- path->pos = k.k ? k.k->p : l->b->key.k.p;
- trans->paths_sorted = false;
- bch2_btree_path_verify_level(trans, path, l - path->l);
- return k;
-}
-
static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
struct btree_path *path,
struct btree_path_level *l,
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index b96157f3dc9c..8823eec6b284 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -335,13 +335,20 @@ static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_tra
}
__always_inline
-static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip)
+static int btree_trans_restart_foreign_task(struct btree_trans *trans, int err, unsigned long ip)
{
BUG_ON(err <= 0);
BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
trans->restarted = err;
trans->last_restarted_ip = ip;
+ return -err;
+}
+
+__always_inline
+static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip)
+{
+ btree_trans_restart_foreign_task(trans, err, ip);
#ifdef CONFIG_BCACHEFS_DEBUG
darray_exit(&trans->last_restarted_trace);
bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT);
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index caef65adeae4..94eb2b73a843 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -91,10 +91,10 @@ static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
struct trans_waiting_for_lock *i;
for (i = g->g; i != g->g + g->nr; i++) {
- struct task_struct *task = i->trans->locking_wait.task;
+ struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
if (i != g->g)
prt_str(out, "<- ");
- prt_printf(out, "%u ", task ?task->pid : 0);
+ prt_printf(out, "%u ", task ? task->pid : 0);
}
prt_newline(out);
}
@@ -172,7 +172,9 @@ static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
{
if (i == g->g) {
trace_would_deadlock(g, i->trans);
- return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
+ return btree_trans_restart_foreign_task(i->trans,
+ BCH_ERR_transaction_restart_would_deadlock,
+ _THIS_IP_);
} else {
i->trans->lock_must_abort = true;
wake_up_process(i->trans->locking_wait.task);
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index a7f06deee13c..678161321e42 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -166,11 +166,17 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
bio->bi_iter.bi_sector = offset;
bch2_bio_map(bio, bn, PAGE_SIZE);
+ u64 submit_time = local_clock();
submit_bio_wait(bio);
- if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
- "IO error in try_read_btree_node() at %llu: %s",
- offset, bch2_blk_status_to_str(bio->bi_status)))
+
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, submit_time, !bio->bi_status);
+
+ if (bio->bi_status) {
+ bch_err_dev_ratelimited(ca,
+ "IO error in try_read_btree_node() at %llu: %s",
+ offset, bch2_blk_status_to_str(bio->bi_status));
return;
+ }
if (le64_to_cpu(bn->magic) != bset_magic(c))
return;
@@ -264,7 +270,7 @@ static int read_btree_nodes_worker(void *p)
err:
bio_put(bio);
free_page((unsigned long) buf);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref);
closure_put(w->cl);
kfree(w);
return 0;
@@ -283,29 +289,28 @@ static int read_btree_nodes(struct find_btree_nodes *f)
continue;
struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
- struct task_struct *t;
-
if (!w) {
percpu_ref_put(&ca->io_ref);
ret = -ENOMEM;
goto err;
}
- percpu_ref_get(&ca->io_ref);
- closure_get(&cl);
w->cl = &cl;
w->f = f;
w->ca = ca;
- t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
+ struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
ret = PTR_ERR_OR_ZERO(t);
if (ret) {
percpu_ref_put(&ca->io_ref);
- closure_put(&cl);
- f->ret = ret;
- bch_err(c, "error starting kthread: %i", ret);
+ kfree(w);
+ bch_err_msg(c, ret, "starting kthread");
break;
}
+
+ closure_get(&cl);
+ percpu_ref_get(&ca->io_ref);
+ wake_up_process(t);
}
err:
closure_sync(&cl);
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index c4f524b2ca9a..7d7e52ddde02 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -164,6 +164,7 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
EBUG_ON(insert->k.u64s > bch2_btree_keys_u64s_remaining(b));
EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
+ kmsan_check_memory(insert, bkey_bytes(&insert->k));
k = bch2_btree_node_iter_peek_all(node_iter, b);
if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
@@ -336,6 +337,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(i->cached != path->cached);
BUG_ON(i->level != path->level);
BUG_ON(i->btree_id != path->btree_id);
+ BUG_ON(i->bkey_type != __btree_node_type(path->level, path->btree_id));
EBUG_ON(!i->level &&
btree_type_has_snapshots(i->btree_id) &&
!(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
@@ -517,69 +519,45 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
}
}
-static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
- unsigned *btree_id_updates_start)
+static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
{
- bool trans_trigger_run;
+ unsigned sort_id_start = 0;
- /*
- * Running triggers will append more updates to the list of updates as
- * we're walking it:
- */
- do {
- trans_trigger_run = false;
+ while (sort_id_start < trans->nr_updates) {
+ unsigned i, sort_id = trans->updates[sort_id_start].sort_order;
+ bool trans_trigger_run;
- for (unsigned i = *btree_id_updates_start;
- i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
- i++) {
- if (trans->updates[i].btree_id < btree_id) {
- *btree_id_updates_start = i;
- continue;
+ /*
+ * For a given btree, this algorithm runs insert triggers before
+ * overwrite triggers: this is so that when extents are being
+ * moved (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop
+ * references before they are re-added.
+ *
+ * Running triggers will append more updates to the list of
+ * updates as we're walking it:
+ */
+ do {
+ trans_trigger_run = false;
+
+ for (i = sort_id_start;
+ i < trans->nr_updates && trans->updates[i].sort_order <= sort_id;
+ i++) {
+ if (trans->updates[i].sort_order < sort_id) {
+ sort_id_start = i;
+ continue;
+ }
+
+ int ret = run_one_trans_trigger(trans, trans->updates + i);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ trans_trigger_run = true;
}
+ } while (trans_trigger_run);
- int ret = run_one_trans_trigger(trans, trans->updates + i);
- if (ret < 0)
- return ret;
- if (ret)
- trans_trigger_run = true;
- }
- } while (trans_trigger_run);
-
- trans_for_each_update(trans, i)
- BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
- i->btree_id == btree_id &&
- btree_node_type_has_trans_triggers(i->bkey_type) &&
- (!i->insert_trigger_run || !i->overwrite_trigger_run));
-
- return 0;
-}
-
-static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
-{
- unsigned btree_id = 0, btree_id_updates_start = 0;
- int ret = 0;
-
- /*
- *
- * For a given btree, this algorithm runs insert triggers before
- * overwrite triggers: this is so that when extents are being moved
- * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
- * they are re-added.
- */
- for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
- if (btree_id == BTREE_ID_alloc)
- continue;
-
- ret = run_btree_triggers(trans, btree_id, &btree_id_updates_start);
- if (ret)
- return ret;
+ sort_id_start = i;
}
- btree_id_updates_start = 0;
- ret = run_btree_triggers(trans, BTREE_ID_alloc, &btree_id_updates_start);
- if (ret)
- return ret;
-
#ifdef CONFIG_BCACHEFS_DEBUG
trans_for_each_update(trans, i)
BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
@@ -903,18 +881,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
struct bch_fs *c = trans->c;
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
- switch (ret) {
- case -BCH_ERR_btree_insert_btree_node_full:
- ret = bch2_btree_split_leaf(trans, i->path, flags);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- trace_and_count(c, trans_restart_btree_node_split, trans,
- trace_ip, trans->paths + i->path);
- break;
- case -BCH_ERR_btree_insert_need_mark_replicas:
- ret = drop_locks_do(trans,
- bch2_accounting_update_sb(trans));
- break;
- case -BCH_ERR_journal_res_get_blocked:
+ if (bch2_err_matches(ret, BCH_ERR_journal_res_blocked)) {
/*
* XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
* flag
@@ -922,13 +889,26 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
- break;
+ goto out;
}
ret = drop_locks_do(trans,
bch2_trans_journal_res_get(trans,
(flags & BCH_WATERMARK_MASK)|
JOURNAL_RES_GET_CHECK));
+ goto out;
+ }
+
+ switch (ret) {
+ case -BCH_ERR_btree_insert_btree_node_full:
+ ret = bch2_btree_split_leaf(trans, i->path, flags);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ trace_and_count(c, trans_restart_btree_node_split, trans,
+ trace_ip, trans->paths + i->path);
+ break;
+ case -BCH_ERR_btree_insert_need_mark_replicas:
+ ret = drop_locks_do(trans,
+ bch2_accounting_update_sb(trans));
break;
case -BCH_ERR_btree_insert_need_journal_reclaim:
bch2_trans_unlock(trans);
@@ -950,7 +930,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
BUG_ON(ret >= 0);
break;
}
-
+out:
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index a09cbe9cd94f..77578da2d23f 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -423,6 +423,7 @@ static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
struct btree_insert_entry {
unsigned flags;
+ u8 sort_order;
u8 bkey_type;
enum btree_id btree_id:8;
u8 level:4;
@@ -853,6 +854,18 @@ static inline bool btree_type_uses_write_buffer(enum btree_id btree)
return BIT_ULL(btree) & mask;
}
+static inline u8 btree_trigger_order(enum btree_id btree)
+{
+ switch (btree) {
+ case BTREE_ID_alloc:
+ return U8_MAX;
+ case BTREE_ID_stripes:
+ return U8_MAX - 1;
+ default:
+ return btree;
+ }
+}
+
struct btree_root {
struct btree *b;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index 13d794f201a5..bd2eb42edb24 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -17,7 +17,7 @@
static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
const struct btree_insert_entry *r)
{
- return cmp_int(l->btree_id, r->btree_id) ?:
+ return cmp_int(l->sort_order, r->sort_order) ?:
cmp_int(l->cached, r->cached) ?:
-cmp_int(l->level, r->level) ?:
bpos_cmp(l->k->k.p, r->k->k.p);
@@ -397,6 +397,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
n = (struct btree_insert_entry) {
.flags = flags,
+ .sort_order = btree_trigger_order(path->btree_id),
.bkey_type = __btree_node_type(path->level, path->btree_id),
.btree_id = path->btree_id,
.level = path->level,
@@ -511,6 +512,8 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
{
+ kmsan_check_memory(k, bkey_bytes(&k->k));
+
btree_path_idx_t path_idx = iter->update_path ?: iter->path;
int ret;
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 8f22ef9a7651..d2e1c04353f6 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -126,10 +126,20 @@ bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *);
+int bch2_btree_write_buffer_insert_err(struct btree_trans *,
+ enum btree_id, struct bkey_i *);
+
static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
enum btree_id btree,
struct bkey_i *k)
{
+ kmsan_check_memory(k, bkey_bytes(&k->k));
+
+ if (unlikely(!btree_type_uses_write_buffer(btree))) {
+ int ret = bch2_btree_write_buffer_insert_err(trans, btree, k);
+ dump_stack();
+ return ret;
+ }
/*
* Most updates skip the btree write buffer until journal replay is
* finished because synchronization with journal replay relies on having
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index e4e7c804625e..67f1e3202835 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -649,6 +649,14 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
return 0;
}
+/* If the node has been reused, we might be reading uninitialized memory - that's fine: */
+static noinline __no_kmsan_checks bool btree_node_seq_matches(struct btree *b, __le64 seq)
+{
+ struct btree_node *b_data = READ_ONCE(b->data);
+
+ return (b_data ? b_data->keys.seq : 0) == seq;
+}
+
static void btree_update_nodes_written(struct btree_update *as)
{
struct bch_fs *c = as->c;
@@ -677,17 +685,9 @@ static void btree_update_nodes_written(struct btree_update *as)
* on disk:
*/
for (i = 0; i < as->nr_old_nodes; i++) {
- __le64 seq;
-
b = as->old_nodes[i];
- bch2_trans_begin(trans);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- seq = b->data ? b->data->keys.seq : 0;
- six_unlock_read(&b->c.lock);
- bch2_trans_unlock_long(trans);
-
- if (seq == as->old_nodes_seq[i])
+ if (btree_node_seq_matches(b, as->old_nodes_seq[i]))
wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner,
TASK_UNINTERRUPTIBLE);
}
@@ -2126,6 +2126,31 @@ err_free_update:
goto out;
}
+static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
+ struct btree *b)
+{
+ bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH, b->c.level,
+ BTREE_ITER_intent);
+ int ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ goto err;
+
+ /* has node been freed? */
+ if (btree_iter_path(trans, iter)->l[b->c.level].b != b) {
+ /* node has been freed: */
+ BUG_ON(!btree_node_dying(b));
+ ret = -BCH_ERR_btree_node_dying;
+ goto err;
+ }
+
+ BUG_ON(!btree_node_hashed(b));
+ return 0;
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+}
+
int bch2_btree_node_rewrite(struct btree_trans *trans,
struct btree_iter *iter,
struct btree *b,
@@ -2191,66 +2216,78 @@ err:
goto out;
}
-struct async_btree_rewrite {
- struct bch_fs *c;
- struct work_struct work;
- struct list_head list;
- enum btree_id btree_id;
- unsigned level;
- struct bkey_buf key;
-};
-
-static int async_btree_node_rewrite_trans(struct btree_trans *trans,
- struct async_btree_rewrite *a)
+static int bch2_btree_node_rewrite_key(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_i *k, unsigned flags)
{
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter,
- a->btree_id, a->key.k->k.p,
- BTREE_MAX_DEPTH, a->level, 0);
+ btree, k->k.p,
+ BTREE_MAX_DEPTH, level, 0);
struct btree *b = bch2_btree_iter_peek_node(&iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto out;
- bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(a->key.k);
+ bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(k);
ret = found
- ? bch2_btree_node_rewrite(trans, &iter, b, 0)
+ ? bch2_btree_node_rewrite(trans, &iter, b, flags)
: -ENOENT;
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
-#if 0
- /* Tracepoint... */
- if (!ret || ret == -ENOENT) {
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
+int bch2_btree_node_rewrite_pos(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bpos pos, unsigned flags)
+{
+ BUG_ON(!level);
- if (!ret) {
- prt_printf(&buf, "rewrite node:\n ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k));
- } else {
- prt_printf(&buf, "node to rewrite not found:\n want: ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k));
- prt_printf(&buf, "\n got: ");
- if (b)
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- else
- prt_str(&buf, "(null)");
- }
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-#endif
-out:
+ /* Traverse one depth lower to get a pointer to the node itself: */
+ struct btree_iter iter;
+ bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0);
+ struct btree *b = bch2_btree_iter_peek_node(&iter);
+ int ret = PTR_ERR_OR_ZERO(b);
+ if (ret)
+ goto err;
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, flags);
+err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
+int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *trans,
+ struct btree *b, unsigned flags)
+{
+ struct btree_iter iter;
+ int ret = get_iter_to_node(trans, &iter, b);
+ if (ret)
+ return ret == -BCH_ERR_btree_node_dying ? 0 : ret;
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, flags);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+struct async_btree_rewrite {
+ struct bch_fs *c;
+ struct work_struct work;
+ struct list_head list;
+ enum btree_id btree_id;
+ unsigned level;
+ struct bkey_buf key;
+};
+
static void async_btree_node_rewrite_work(struct work_struct *work)
{
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
struct bch_fs *c = a->c;
- int ret = bch2_trans_do(c, async_btree_node_rewrite_trans(trans, a));
+ int ret = bch2_trans_do(c, bch2_btree_node_rewrite_key(trans,
+ a->btree_id, a->level, a->key.k, 0));
if (ret != -ENOENT)
bch_err_fn_ratelimited(c, ret);
@@ -2494,30 +2531,15 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
unsigned commit_flags, bool skip_triggers)
{
struct btree_iter iter;
- int ret;
-
- bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->c.level,
- BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter);
+ int ret = get_iter_to_node(trans, &iter, b);
if (ret)
- goto out;
-
- /* has node been freed? */
- if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) {
- /* node has been freed: */
- BUG_ON(!btree_node_dying(b));
- goto out;
- }
-
- BUG_ON(!btree_node_hashed(b));
+ return ret == -BCH_ERR_btree_node_dying ? 0 : ret;
bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr,
!bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev));
ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
commit_flags, skip_triggers);
-out:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 26d646e1275c..be71cd73b864 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -169,7 +169,14 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
struct btree *, unsigned);
+int bch2_btree_node_rewrite_pos(struct btree_trans *,
+ enum btree_id, unsigned,
+ struct bpos, unsigned);
+int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *,
+ struct btree *, unsigned);
+
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
+
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
struct btree *, struct bkey_i *,
unsigned, bool);
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index b56c4987b8c9..2c09d19dd621 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -264,6 +264,22 @@ out:
BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
}
+int bch2_btree_write_buffer_insert_err(struct btree_trans *trans,
+ enum btree_id btree, struct bkey_i *k)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, "attempting to do write buffer update on non wb btree=");
+ bch2_btree_id_to_text(&buf, btree);
+ prt_str(&buf, "\n");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -EROFS;
+}
+
static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
@@ -312,7 +328,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
darray_for_each(wb->sorted, i) {
struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
- BUG_ON(!btree_type_uses_write_buffer(k->btree));
+ if (unlikely(!btree_type_uses_write_buffer(k->btree))) {
+ ret = bch2_btree_write_buffer_insert_err(trans, k->btree, &k->k);
+ goto err;
+ }
for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
prefetch(&wb->flushing.keys.data[n->idx]);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 345b117a4a4a..e56ef623ebc1 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -590,11 +590,9 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (ret)
goto err;
- if (!p.ptr.cached) {
- ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
- if (ret)
- goto err;
- }
+ ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
+ if (ret)
+ goto err;
}
if (flags & BTREE_TRIGGER_gc) {
@@ -674,10 +672,10 @@ err:
return -BCH_ERR_ENOMEM_mark_stripe_ptr;
}
- mutex_lock(&c->ec_stripes_heap_lock);
+ gc_stripe_lock(m);
if (!m || !m->alive) {
- mutex_unlock(&c->ec_stripes_heap_lock);
+ gc_stripe_unlock(m);
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
@@ -693,7 +691,7 @@ err:
.type = BCH_DISK_ACCOUNTING_replicas,
};
memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
- mutex_unlock(&c->ec_stripes_heap_lock);
+ gc_stripe_unlock(m);
acc.replicas.data_type = data_type;
int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, true);
@@ -726,9 +724,7 @@ static int __trigger_extent(struct btree_trans *trans,
.replicas.nr_required = 1,
};
- struct disk_accounting_pos acct_compression_key = {
- .type = BCH_DISK_ACCOUNTING_compression,
- };
+ unsigned cur_compression_type = 0;
u64 compression_acct[3] = { 1, 0, 0 };
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
@@ -762,13 +758,13 @@ static int __trigger_extent(struct btree_trans *trans,
acc_replicas_key.replicas.nr_required = 0;
}
- if (acct_compression_key.compression.type &&
- acct_compression_key.compression.type != p.crc.compression_type) {
+ if (cur_compression_type &&
+ cur_compression_type != p.crc.compression_type) {
if (flags & BTREE_TRIGGER_overwrite)
bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
- ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
- ARRAY_SIZE(compression_acct), gc);
+ ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
+ compression, cur_compression_type);
if (ret)
return ret;
@@ -777,7 +773,7 @@ static int __trigger_extent(struct btree_trans *trans,
compression_acct[2] = 0;
}
- acct_compression_key.compression.type = p.crc.compression_type;
+ cur_compression_type = p.crc.compression_type;
if (p.crc.compression_type) {
compression_acct[1] += p.crc.uncompressed_size;
compression_acct[2] += p.crc.compressed_size;
@@ -791,45 +787,34 @@ static int __trigger_extent(struct btree_trans *trans,
}
if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
- struct disk_accounting_pos acc_snapshot_key = {
- .type = BCH_DISK_ACCOUNTING_snapshot,
- .snapshot.id = k.k->p.snapshot,
- };
- ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, replicas_sectors, 1, gc);
+ ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, snapshot, k.k->p.snapshot);
if (ret)
return ret;
}
- if (acct_compression_key.compression.type) {
+ if (cur_compression_type) {
if (flags & BTREE_TRIGGER_overwrite)
bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
- ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
- ARRAY_SIZE(compression_acct), gc);
+ ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
+ compression, cur_compression_type);
if (ret)
return ret;
}
if (level) {
- struct disk_accounting_pos acc_btree_key = {
- .type = BCH_DISK_ACCOUNTING_btree,
- .btree.id = btree_id,
- };
- ret = bch2_disk_accounting_mod(trans, &acc_btree_key, replicas_sectors, 1, gc);
+ ret = bch2_disk_accounting_mod2_nr(trans, gc, replicas_sectors, 1, btree, btree_id);
if (ret)
return ret;
} else {
bool insert = !(flags & BTREE_TRIGGER_overwrite);
- struct disk_accounting_pos acc_inum_key = {
- .type = BCH_DISK_ACCOUNTING_inum,
- .inum.inum = k.k->p.inode,
- };
+
s64 v[3] = {
insert ? 1 : -1,
insert ? k.k->size : -((s64) k.k->size),
*replicas_sectors,
};
- ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc);
+ ret = bch2_disk_accounting_mod2(trans, gc, v, inum, k.k->p.inode);
if (ret)
return ret;
}
@@ -878,15 +863,15 @@ int bch2_trigger_extent(struct btree_trans *trans,
}
int need_rebalance_delta = 0;
- s64 need_rebalance_sectors_delta = 0;
+ s64 need_rebalance_sectors_delta[1] = { 0 };
s64 s = bch2_bkey_sectors_need_rebalance(c, old);
need_rebalance_delta -= s != 0;
- need_rebalance_sectors_delta -= s;
+ need_rebalance_sectors_delta[0] -= s;
s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
need_rebalance_delta += s != 0;
- need_rebalance_sectors_delta += s;
+ need_rebalance_sectors_delta[0] += s;
if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
@@ -895,12 +880,9 @@ int bch2_trigger_extent(struct btree_trans *trans,
return ret;
}
- if (need_rebalance_sectors_delta) {
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_rebalance_work,
- };
- int ret = bch2_disk_accounting_mod(trans, &acc, &need_rebalance_sectors_delta, 1,
- flags & BTREE_TRIGGER_gc);
+ if (need_rebalance_sectors_delta[0]) {
+ int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
+ need_rebalance_sectors_delta, rebalance_work);
if (ret)
return ret;
}
@@ -916,17 +898,13 @@ static int __trigger_reservation(struct btree_trans *trans,
enum btree_iter_update_trigger_flags flags)
{
if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
- s64 sectors = k.k->size;
+ s64 sectors[1] = { k.k->size };
if (flags & BTREE_TRIGGER_overwrite)
- sectors = -sectors;
-
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_persistent_reserved,
- .persistent_reserved.nr_replicas = bkey_s_c_to_reservation(k).v->nr_replicas,
- };
+ sectors[0] = -sectors[0];
- return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, flags & BTREE_TRIGGER_gc);
+ return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, sectors,
+ persistent_reserved, bkey_s_c_to_reservation(k).v->nr_replicas);
}
return 0;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index a9acdd6c0c86..c5363256e363 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -39,33 +39,6 @@ static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
-/*
- * Ugly hack alert:
- *
- * We need to cram a spinlock in a single byte, because that's what we have left
- * in struct bucket, and we care about the size of these - during fsck, we need
- * in memory state for every single bucket on every device.
- *
- * We used to do
- * while (xchg(&b->lock, 1) cpu_relax();
- * but, it turns out not all architectures support xchg on a single byte.
- *
- * So now we use bit_spin_lock(), with fun games since we can't burn a whole
- * ulong for this - we just need to make sure the lock bit always ends up in the
- * first byte.
- */
-
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define BUCKET_LOCK_BITNR 0
-#else
-#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
-#endif
-
-union ulong_byte_assert {
- ulong ulong;
- u8 byte;
-};
-
static inline void bucket_unlock(struct bucket *b)
{
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
@@ -167,9 +140,7 @@ static inline int gen_cmp(u8 a, u8 b)
static inline int gen_after(u8 a, u8 b)
{
- int r = gen_cmp(a, b);
-
- return r > 0 ? r : 0;
+ return max(0, gen_cmp(a, b));
}
static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 7174047b8e92..900b8680c8b5 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -7,6 +7,33 @@
#define BUCKET_JOURNAL_SEQ_BITS 16
+/*
+ * Ugly hack alert:
+ *
+ * We need to cram a spinlock in a single byte, because that's what we have left
+ * in struct bucket, and we care about the size of these - during fsck, we need
+ * in memory state for every single bucket on every device.
+ *
+ * We used to do
+ * while (xchg(&b->lock, 1) cpu_relax();
+ * but, it turns out not all architectures support xchg on a single byte.
+ *
+ * So now we use bit_spin_lock(), with fun games since we can't burn a whole
+ * ulong for this - we just need to make sure the lock bit always ends up in the
+ * first byte.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BUCKET_LOCK_BITNR 0
+#else
+#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
+#endif
+
+union ulong_byte_assert {
+ ulong ulong;
+ u8 byte;
+};
+
struct bucket {
u8 lock;
u8 gen_valid:1;
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 46e9e32105a9..57d55b3ddc71 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -11,6 +11,7 @@
#include "move.h"
#include "recovery_passes.h"
#include "replicas.h"
+#include "sb-counters.h"
#include "super-io.h"
#include "thread_with_file.h"
@@ -312,7 +313,12 @@ static int bch2_data_thread(void *arg)
struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr);
ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg);
- ctx->stats.data_type = U8_MAX;
+ if (ctx->thr.ret == -BCH_ERR_device_offline)
+ ctx->stats.ret = BCH_IOCTL_DATA_EVENT_RET_device_offline;
+ else {
+ ctx->stats.ret = BCH_IOCTL_DATA_EVENT_RET_done;
+ ctx->stats.data_type = (int) DATA_PROGRESS_DATA_TYPE_done;
+ }
return 0;
}
@@ -331,14 +337,30 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr);
struct bch_fs *c = ctx->c;
struct bch_ioctl_data_event e = {
- .type = BCH_DATA_EVENT_PROGRESS,
- .p.data_type = ctx->stats.data_type,
- .p.btree_id = ctx->stats.pos.btree,
- .p.pos = ctx->stats.pos.pos,
- .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
- .p.sectors_total = bch2_fs_usage_read_short(c).used,
+ .type = BCH_DATA_EVENT_PROGRESS,
+ .ret = ctx->stats.ret,
+ .p.data_type = ctx->stats.data_type,
+ .p.btree_id = ctx->stats.pos.btree,
+ .p.pos = ctx->stats.pos.pos,
+ .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
+ .p.sectors_error_corrected = atomic64_read(&ctx->stats.sectors_error_corrected),
+ .p.sectors_error_uncorrected = atomic64_read(&ctx->stats.sectors_error_uncorrected),
};
+ if (ctx->arg.op == BCH_DATA_OP_scrub) {
+ struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
+ if (ca) {
+ struct bch_dev_usage u;
+ bch2_dev_usage_read_fast(ca, &u);
+ for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
+ if (ctx->arg.scrub.data_types & BIT(i))
+ e.p.sectors_total += u.d[i].sectors;
+ bch2_dev_put(ca);
+ }
+ } else {
+ e.p.sectors_total = bch2_fs_usage_read_short(c).used;
+ }
+
if (len < sizeof(e))
return -EINVAL;
@@ -710,6 +732,8 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
BCH_IOCTL(fsck_online, struct bch_ioctl_fsck_online);
case BCH_IOCTL_QUERY_ACCOUNTING:
return bch2_ioctl_query_accounting(c, arg);
+ case BCH_IOCTL_QUERY_COUNTERS:
+ return bch2_ioctl_query_counters(c, arg);
default:
return -ENOTTY;
}
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 23a383577d4c..3726689093e3 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -466,7 +466,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
prt_str(&buf, ")");
WARN_RATELIMIT(1, "%s", buf.buf);
printbuf_exit(&buf);
- return -EIO;
+ return -BCH_ERR_recompute_checksum;
}
for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
@@ -693,6 +693,14 @@ static int bch2_alloc_ciphers(struct bch_fs *c)
return 0;
}
+#if 0
+
+/*
+ * This seems to be duplicating code in cmd_remove_passphrase() in
+ * bcachefs-tools, but we might want to switch userspace to use this - and
+ * perhaps add an ioctl for calling this at runtime, so we can take the
+ * passphrase off of a mounted filesystem (which has come up).
+ */
int bch2_disable_encryption(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
@@ -725,6 +733,10 @@ out:
return ret;
}
+/*
+ * For enabling encryption on an existing filesystem: not hooked up yet, but it
+ * should be
+ */
int bch2_enable_encryption(struct bch_fs *c, bool keyed)
{
struct bch_encrypted_key key;
@@ -781,6 +793,7 @@ err:
memzero_explicit(&key, sizeof(key));
return ret;
}
+#endif
void bch2_fs_encryption_exit(struct bch_fs *c)
{
@@ -788,8 +801,6 @@ void bch2_fs_encryption_exit(struct bch_fs *c)
crypto_free_shash(c->poly1305);
if (c->chacha20)
crypto_free_sync_skcipher(c->chacha20);
- if (c->sha256)
- crypto_free_shash(c->sha256);
}
int bch2_fs_encryption_init(struct bch_fs *c)
@@ -798,14 +809,6 @@ int bch2_fs_encryption_init(struct bch_fs *c)
struct bch_key key;
int ret = 0;
- c->sha256 = crypto_alloc_shash("sha256", 0, 0);
- ret = PTR_ERR_OR_ZERO(c->sha256);
- if (ret) {
- c->sha256 = NULL;
- bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
- goto out;
- }
-
crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
if (!crypt)
goto out;
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 43b9d71f2f2b..4ac251c8fcd8 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -103,8 +103,10 @@ extern const struct bch_sb_field_ops bch_sb_field_ops_crypt;
int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
struct bch_key *);
+#if 0
int bch2_disable_encryption(struct bch_fs *);
int bch2_enable_encryption(struct bch_fs *, bool);
+#endif
void bch2_fs_encryption_exit(struct bch_fs *);
int bch2_fs_encryption_init(struct bch_fs *);
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 114bf2f3879f..85fc90342492 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -177,7 +177,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
size_t src_len = src->bi_iter.bi_size;
size_t dst_len = crc.uncompressed_size << 9;
void *workspace;
- int ret;
+ int ret = 0, ret2;
enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
mempool_t *workspace_pool = &c->compress_workspace[opt];
@@ -189,7 +189,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
else
ret = -BCH_ERR_compression_workspace_not_initialized;
if (ret)
- goto out;
+ goto err;
}
src_data = bio_map_or_bounce(c, src, READ);
@@ -197,10 +197,10 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
switch (crc.compression_type) {
case BCH_COMPRESSION_TYPE_lz4_old:
case BCH_COMPRESSION_TYPE_lz4:
- ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
- src_len, dst_len, dst_len);
- if (ret != dst_len)
- goto err;
+ ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data,
+ src_len, dst_len, dst_len);
+ if (ret2 != dst_len)
+ ret = -BCH_ERR_decompress_lz4;
break;
case BCH_COMPRESSION_TYPE_gzip: {
z_stream strm = {
@@ -214,45 +214,43 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
zlib_set_workspace(&strm, workspace);
zlib_inflateInit2(&strm, -MAX_WBITS);
- ret = zlib_inflate(&strm, Z_FINISH);
+ ret2 = zlib_inflate(&strm, Z_FINISH);
mempool_free(workspace, workspace_pool);
- if (ret != Z_STREAM_END)
- goto err;
+ if (ret2 != Z_STREAM_END)
+ ret = -BCH_ERR_decompress_gzip;
break;
}
case BCH_COMPRESSION_TYPE_zstd: {
ZSTD_DCtx *ctx;
size_t real_src_len = le32_to_cpup(src_data.b);
- if (real_src_len > src_len - 4)
+ if (real_src_len > src_len - 4) {
+ ret = -BCH_ERR_decompress_zstd_src_len_bad;
goto err;
+ }
workspace = mempool_alloc(workspace_pool, GFP_NOFS);
ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
- ret = zstd_decompress_dctx(ctx,
+ ret2 = zstd_decompress_dctx(ctx,
dst_data, dst_len,
src_data.b + 4, real_src_len);
mempool_free(workspace, workspace_pool);
- if (ret != dst_len)
- goto err;
+ if (ret2 != dst_len)
+ ret = -BCH_ERR_decompress_zstd;
break;
}
default:
BUG();
}
- ret = 0;
+err:
fsck_err:
-out:
bio_unmap_or_unbounce(c, src_data);
return ret;
-err:
- ret = -EIO;
- goto out;
}
int bch2_bio_uncompress_inplace(struct bch_write_op *op,
@@ -268,27 +266,22 @@ int bch2_bio_uncompress_inplace(struct bch_write_op *op,
BUG_ON(!bio->bi_vcnt);
BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
- if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max ||
- crc->compressed_size << 9 > c->opts.encoded_extent_max) {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "error rewriting existing data: extent too big");
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
- return -EIO;
+ if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max) {
+ bch2_write_op_error(op, op->pos.offset,
+ "extent too big to decompress (%u > %u)",
+ crc->uncompressed_size << 9, c->opts.encoded_extent_max);
+ return -BCH_ERR_decompress_exceeded_max_encoded_extent;
}
data = __bounce_alloc(c, dst_len, WRITE);
- if (__bio_uncompress(c, bio, data.b, *crc)) {
- if (!c->opts.no_data_io) {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "error rewriting existing data: decompression error");
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
- ret = -EIO;
+ ret = __bio_uncompress(c, bio, data.b, *crc);
+
+ if (c->opts.no_data_io)
+ ret = 0;
+
+ if (ret) {
+ bch2_write_op_error(op, op->pos.offset, "%s", bch2_err_str(ret));
goto err;
}
@@ -321,7 +314,7 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
crc.compressed_size << 9 > c->opts.encoded_extent_max)
- return -EIO;
+ return -BCH_ERR_decompress_exceeded_max_encoded_extent;
dst_data = dst_len == dst_iter.bi_size
? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 642fbc60ecab..0ec273daccb7 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -20,6 +20,8 @@
#include "subvolume.h"
#include "trace.h"
+#include <linux/ioprio.h>
+
static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -33,7 +35,7 @@ static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
- if (!bch2_dev_tryget(c, ptr->dev)) {
+ if (unlikely(!bch2_dev_tryget(c, ptr->dev))) {
bkey_for_each_ptr(ptrs, ptr2) {
if (ptr2 == ptr)
break;
@@ -91,7 +93,7 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc
return true;
}
-static noinline void trace_move_extent_finish2(struct data_update *u,
+static noinline void trace_io_move_finish2(struct data_update *u,
struct bkey_i *new,
struct bkey_i *insert)
{
@@ -111,11 +113,11 @@ static noinline void trace_move_extent_finish2(struct data_update *u,
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
prt_newline(&buf);
- trace_move_extent_finish(c, buf.buf);
+ trace_io_move_finish(c, buf.buf);
printbuf_exit(&buf);
}
-static void trace_move_extent_fail2(struct data_update *m,
+static void trace_io_move_fail2(struct data_update *m,
struct bkey_s_c new,
struct bkey_s_c wrote,
struct bkey_i *insert,
@@ -126,7 +128,7 @@ static void trace_move_extent_fail2(struct data_update *m,
struct printbuf buf = PRINTBUF;
unsigned rewrites_found = 0;
- if (!trace_move_extent_fail_enabled())
+ if (!trace_io_move_fail_enabled())
return;
prt_str(&buf, msg);
@@ -166,7 +168,7 @@ static void trace_move_extent_fail2(struct data_update *m,
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
}
- trace_move_extent_fail(c, buf.buf);
+ trace_io_move_fail(c, buf.buf);
printbuf_exit(&buf);
}
@@ -214,7 +216,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
new = bkey_i_to_extent(bch2_keylist_front(keys));
if (!bch2_extents_match(k, old)) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
+ trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i),
NULL, "no match:");
goto nowork;
}
@@ -254,7 +256,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
if (m->data_opts.rewrite_ptrs &&
!rewrites_found &&
bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
+ trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
goto nowork;
}
@@ -271,7 +273,7 @@ restart_drop_conflicting_replicas:
}
if (!bkey_val_u64s(&new->k)) {
- trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
+ trace_io_move_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
goto nowork;
}
@@ -352,7 +354,7 @@ restart_drop_extra_replicas:
printbuf_exit(&buf);
bch2_fatal_error(c);
- ret = -EIO;
+ ret = -BCH_ERR_invalid_bkey;
goto out;
}
@@ -385,9 +387,9 @@ restart_drop_extra_replicas:
if (!ret) {
bch2_btree_iter_set_pos(&iter, next_pos);
- this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
- if (trace_move_extent_finish_enabled())
- trace_move_extent_finish2(m, &new->k_i, insert);
+ this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size);
+ if (trace_io_move_finish_enabled())
+ trace_io_move_finish2(m, &new->k_i, insert);
}
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -409,7 +411,7 @@ nowork:
&m->stats->sectors_raced);
}
- count_event(c, move_extent_fail);
+ count_event(c, io_move_fail);
bch2_btree_iter_advance(&iter);
goto next;
@@ -427,14 +429,17 @@ int bch2_data_update_index_update(struct bch_write_op *op)
return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
}
-void bch2_data_update_read_done(struct data_update *m,
- struct bch_extent_crc_unpacked crc)
+void bch2_data_update_read_done(struct data_update *m)
{
+ m->read_done = true;
+
/* write bio must own pages: */
BUG_ON(!m->op.wbio.bio.bi_vcnt);
- m->op.crc = crc;
- m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
+ m->op.crc = m->rbio.pick.crc;
+ m->op.wbio.bio.bi_iter.bi_size = m->op.crc.compressed_size << 9;
+
+ this_cpu_add(m->op.c->counters[BCH_COUNTER_io_move_write], m->k.k->k.size);
closure_call(&m->op.cl, bch2_write, NULL, NULL);
}
@@ -444,31 +449,34 @@ void bch2_data_update_exit(struct data_update *update)
struct bch_fs *c = update->op.c;
struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
+ bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
+ kfree(update->bvecs);
+ update->bvecs = NULL;
+
if (c->opts.nocow_enabled)
bkey_nocow_unlock(c, k);
bkey_put_dev_refs(c, k);
- bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res);
- bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
+ bch2_bkey_buf_exit(&update->k, c);
}
-static void bch2_update_unwritten_extent(struct btree_trans *trans,
- struct data_update *update)
+static int bch2_update_unwritten_extent(struct btree_trans *trans,
+ struct data_update *update)
{
struct bch_fs *c = update->op.c;
- struct bio *bio = &update->op.wbio.bio;
struct bkey_i_extent *e;
struct write_point *wp;
struct closure cl;
struct btree_iter iter;
struct bkey_s_c k;
- int ret;
+ int ret = 0;
closure_init_stack(&cl);
bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
- while (bio_sectors(bio)) {
- unsigned sectors = bio_sectors(bio);
+ while (bpos_lt(update->op.pos, update->k.k->k.p)) {
+ unsigned sectors = update->k.k->k.p.offset -
+ update->op.pos.offset;
bch2_trans_begin(trans);
@@ -504,7 +512,7 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
bch_err_fn_ratelimited(c, ret);
if (ret)
- return;
+ break;
sectors = min(sectors, wp->sectors_free);
@@ -514,7 +522,6 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
bch2_alloc_sectors_done(c, wp);
- bio_advance(bio, sectors << 9);
update->op.pos.offset += sectors;
extent_for_each_ptr(extent_i_to_s(e), ptr)
@@ -533,13 +540,16 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_trans_unlock(trans);
closure_sync(&cl);
}
+
+ return ret;
}
void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
struct bch_io_opts *io_opts,
struct data_update_opts *data_opts)
{
- printbuf_tabstop_push(out, 20);
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 20);
prt_str_indented(out, "rewrite ptrs:\t");
bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
@@ -574,6 +584,17 @@ void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
}
+void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update *m)
+{
+ bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+ bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
+ prt_printf(out, "read_done:\t\%u\n", m->read_done);
+ bch2_write_op_to_text(out, &m->op);
+ printbuf_indent_sub(out, 2);
+}
+
int bch2_extent_drop_ptrs(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
@@ -617,12 +638,85 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}
+int bch2_data_update_bios_init(struct data_update *m, struct bch_fs *c,
+ struct bch_io_opts *io_opts)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(m->k.k));
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ /* write path might have to decompress data: */
+ unsigned buf_bytes = 0;
+ bkey_for_each_ptr_decode(&m->k.k->k, ptrs, p, entry)
+ buf_bytes = max_t(unsigned, buf_bytes, p.crc.uncompressed_size << 9);
+
+ unsigned nr_vecs = DIV_ROUND_UP(buf_bytes, PAGE_SIZE);
+
+ m->bvecs = kmalloc_array(nr_vecs, sizeof*(m->bvecs), GFP_KERNEL);
+ if (!m->bvecs)
+ return -ENOMEM;
+
+ bio_init(&m->rbio.bio, NULL, m->bvecs, nr_vecs, REQ_OP_READ);
+ bio_init(&m->op.wbio.bio, NULL, m->bvecs, nr_vecs, 0);
+
+ if (bch2_bio_alloc_pages(&m->op.wbio.bio, buf_bytes, GFP_KERNEL)) {
+ kfree(m->bvecs);
+ m->bvecs = NULL;
+ return -ENOMEM;
+ }
+
+ rbio_init(&m->rbio.bio, c, *io_opts, NULL);
+ m->rbio.data_update = true;
+ m->rbio.bio.bi_iter.bi_size = buf_bytes;
+ m->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&m->k.k->k);
+ m->op.wbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
+ return 0;
+}
+
+static int can_write_extent(struct bch_fs *c, struct data_update *m)
+{
+ if ((m->op.flags & BCH_WRITE_alloc_nowait) &&
+ unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(m->op.watermark)))
+ return -BCH_ERR_data_update_done_would_block;
+
+ unsigned target = m->op.flags & BCH_WRITE_only_specified_devs
+ ? m->op.target
+ : 0;
+ struct bch_devs_mask devs = target_rw_devs(c, BCH_DATA_user, target);
+
+ darray_for_each(m->op.devs_have, i)
+ __clear_bit(*i, devs.d);
+
+ rcu_read_lock();
+ unsigned nr_replicas = 0, i;
+ for_each_set_bit(i, devs.d, BCH_SB_MEMBERS_MAX) {
+ struct bch_dev *ca = bch2_dev_rcu(c, i);
+
+ struct bch_dev_usage usage;
+ bch2_dev_usage_read_fast(ca, &usage);
+
+ if (!dev_buckets_free(ca, usage, m->op.watermark))
+ continue;
+
+ nr_replicas += ca->mi.durability;
+ if (nr_replicas >= m->op.nr_replicas)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!nr_replicas)
+ return -BCH_ERR_data_update_done_no_rw_devs;
+ if (nr_replicas < m->op.nr_replicas)
+ return -BCH_ERR_insufficient_devices;
+ return 0;
+}
+
int bch2_data_update_init(struct btree_trans *trans,
struct btree_iter *iter,
struct moving_context *ctxt,
struct data_update *m,
struct write_point_specifier wp,
- struct bch_io_opts io_opts,
+ struct bch_io_opts *io_opts,
struct data_update_opts data_opts,
enum btree_id btree_id,
struct bkey_s_c k)
@@ -640,16 +734,7 @@ int bch2_data_update_init(struct btree_trans *trans,
* snapshots table - just skip it, we can move it later.
*/
if (unlikely(k.k->p.snapshot && !bch2_snapshot_exists(c, k.k->p.snapshot)))
- return -BCH_ERR_data_update_done;
-
- if (!bkey_get_dev_refs(c, k))
- return -BCH_ERR_data_update_done;
-
- if (c->opts.nocow_enabled &&
- !bkey_nocow_lock(c, ctxt, k)) {
- bkey_put_dev_refs(c, k);
- return -BCH_ERR_nocow_lock_blocked;
- }
+ return -BCH_ERR_data_update_done_no_snapshot;
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
@@ -658,18 +743,18 @@ int bch2_data_update_init(struct btree_trans *trans,
m->ctxt = ctxt;
m->stats = ctxt ? ctxt->stats : NULL;
- bch2_write_op_init(&m->op, c, io_opts);
+ bch2_write_op_init(&m->op, c, *io_opts);
m->op.pos = bkey_start_pos(k.k);
m->op.version = k.k->bversion;
m->op.target = data_opts.target;
m->op.write_point = wp;
m->op.nr_replicas = 0;
- m->op.flags |= BCH_WRITE_PAGES_STABLE|
- BCH_WRITE_PAGES_OWNED|
- BCH_WRITE_DATA_ENCODED|
- BCH_WRITE_MOVE|
+ m->op.flags |= BCH_WRITE_pages_stable|
+ BCH_WRITE_pages_owned|
+ BCH_WRITE_data_encoded|
+ BCH_WRITE_move|
m->data_opts.write_flags;
- m->op.compression_opt = io_opts.background_compression;
+ m->op.compression_opt = io_opts->background_compression;
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
unsigned durability_have = 0, durability_removing = 0;
@@ -707,7 +792,7 @@ int bch2_data_update_init(struct btree_trans *trans,
ptr_bit <<= 1;
}
- unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
+ unsigned durability_required = max(0, (int) (io_opts->data_replicas - durability_have));
/*
* If current extent durability is less than io_opts.data_replicas,
@@ -740,28 +825,70 @@ int bch2_data_update_init(struct btree_trans *trans,
m->data_opts.rewrite_ptrs = 0;
/* if iter == NULL, it's just a promote */
if (iter)
- ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
- goto out;
+ ret = bch2_extent_drop_ptrs(trans, iter, k, io_opts, &m->data_opts);
+ if (!ret)
+ ret = -BCH_ERR_data_update_done_no_writes_needed;
+ goto out_bkey_buf_exit;
}
+ /*
+ * Check if the allocation will succeed, to avoid getting an error later
+ * in bch2_write() -> bch2_alloc_sectors_start() and doing a useless
+ * read:
+ *
+ * This guards against
+ * - BCH_WRITE_alloc_nowait allocations failing (promotes)
+ * - Destination target full
+ * - Device(s) in destination target offline
+ * - Insufficient durability available in destination target
+ * (i.e. trying to move a durability=2 replica to a target with a
+ * single durability=2 device)
+ */
+ ret = can_write_extent(c, m);
+ if (ret)
+ goto out_bkey_buf_exit;
+
if (reserve_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
m->data_opts.extra_replicas
? 0
: BCH_DISK_RESERVATION_NOFAIL);
if (ret)
- goto out;
+ goto out_bkey_buf_exit;
+ }
+
+ if (!bkey_get_dev_refs(c, k)) {
+ ret = -BCH_ERR_data_update_done_no_dev_refs;
+ goto out_put_disk_res;
+ }
+
+ if (c->opts.nocow_enabled &&
+ !bkey_nocow_lock(c, ctxt, k)) {
+ ret = -BCH_ERR_nocow_lock_blocked;
+ goto out_put_dev_refs;
}
if (bkey_extent_is_unwritten(k)) {
- bch2_update_unwritten_extent(trans, m);
- goto out;
+ ret = bch2_update_unwritten_extent(trans, m) ?:
+ -BCH_ERR_data_update_done_unwritten;
+ goto out_nocow_unlock;
}
+ ret = bch2_data_update_bios_init(m, c, io_opts);
+ if (ret)
+ goto out_nocow_unlock;
+
return 0;
-out:
- bch2_data_update_exit(m);
- return ret ?: -BCH_ERR_data_update_done;
+out_nocow_unlock:
+ if (c->opts.nocow_enabled)
+ bkey_nocow_unlock(c, k);
+out_put_dev_refs:
+ bkey_put_dev_refs(c, k);
+out_put_disk_res:
+ bch2_disk_reservation_put(c, &m->op.res);
+out_bkey_buf_exit:
+ bch2_bkey_buf_exit(&m->k, c);
+ return ret;
}
void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
index e4b50723428e..c194cbbf5b51 100644
--- a/fs/bcachefs/data_update.h
+++ b/fs/bcachefs/data_update.h
@@ -4,6 +4,7 @@
#define _BCACHEFS_DATA_UPDATE_H
#include "bkey_buf.h"
+#include "io_read.h"
#include "io_write_types.h"
struct moving_context;
@@ -15,6 +16,9 @@ struct data_update_opts {
u8 extra_replicas;
unsigned btree_insert_flags;
unsigned write_flags;
+
+ int read_dev;
+ bool scrub;
};
void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
@@ -22,20 +26,24 @@ void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
struct data_update {
/* extent being updated: */
+ bool read_done;
enum btree_id btree_id;
struct bkey_buf k;
struct data_update_opts data_opts;
struct moving_context *ctxt;
struct bch_move_stats *stats;
+
+ struct bch_read_bio rbio;
struct bch_write_op op;
+ struct bio_vec *bvecs;
};
void bch2_data_update_to_text(struct printbuf *, struct data_update *);
+void bch2_data_update_inflight_to_text(struct printbuf *, struct data_update *);
int bch2_data_update_index_update(struct bch_write_op *);
-void bch2_data_update_read_done(struct data_update *,
- struct bch_extent_crc_unpacked);
+void bch2_data_update_read_done(struct data_update *);
int bch2_extent_drop_ptrs(struct btree_trans *,
struct btree_iter *,
@@ -43,12 +51,15 @@ int bch2_extent_drop_ptrs(struct btree_trans *,
struct bch_io_opts *,
struct data_update_opts *);
+int bch2_data_update_bios_init(struct data_update *, struct bch_fs *,
+ struct bch_io_opts *);
+
void bch2_data_update_exit(struct data_update *);
int bch2_data_update_init(struct btree_trans *, struct btree_iter *,
struct moving_context *,
struct data_update *,
struct write_point_specifier,
- struct bch_io_opts, struct data_update_opts,
+ struct bch_io_opts *, struct data_update_opts,
enum btree_id, struct bkey_s_c);
void bch2_data_update_opts_normalize(struct bkey_s_c, struct data_update_opts *);
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 55333e82d1fe..788af88f6979 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -7,6 +7,7 @@
*/
#include "bcachefs.h"
+#include "alloc_foreground.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_io.h"
@@ -190,7 +191,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
unsigned offset = 0;
int ret;
- if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick) <= 0) {
+ if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick, -1) <= 0) {
prt_printf(out, "error getting device to read from: invalid device\n");
return;
}
@@ -844,8 +845,11 @@ restart:
seqmutex_unlock(&c->btree_trans_lock);
}
-static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+typedef void (*fs_to_text_fn)(struct printbuf *, struct bch_fs *);
+
+static ssize_t bch2_simple_print(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos,
+ fs_to_text_fn fn)
{
struct dump_iter *i = file->private_data;
struct bch_fs *c = i->c;
@@ -856,7 +860,7 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
i->ret = 0;
if (!i->iter) {
- btree_deadlock_to_text(&i->buf, c);
+ fn(&i->buf, c);
i->iter++;
}
@@ -869,6 +873,12 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
return ret ?: i->ret;
}
+static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ return bch2_simple_print(file, buf, size, ppos, btree_deadlock_to_text);
+}
+
static const struct file_operations btree_deadlock_ops = {
.owner = THIS_MODULE,
.open = bch2_dump_open,
@@ -876,6 +886,19 @@ static const struct file_operations btree_deadlock_ops = {
.read = bch2_btree_deadlock_read,
};
+static ssize_t bch2_write_points_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ return bch2_simple_print(file, buf, size, ppos, bch2_write_points_to_text);
+}
+
+static const struct file_operations write_points_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_write_points_read,
+};
+
void bch2_fs_debug_exit(struct bch_fs *c)
{
if (!IS_ERR_OR_NULL(c->fs_debug_dir))
@@ -927,6 +950,9 @@ void bch2_fs_debug_init(struct bch_fs *c)
debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
c->btree_debug, &btree_deadlock_ops);
+ debugfs_create_file("write_points", 0400, c->fs_debug_dir,
+ c->btree_debug, &write_points_ops);
+
c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
if (IS_ERR_OR_NULL(c->btree_debug_dir))
return;
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 600eee936f13..d7f9f79318a2 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -13,6 +13,40 @@
#include <linux/dcache.h>
+static int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
+ const struct qstr *str, struct qstr *out_cf)
+{
+ *out_cf = (struct qstr) QSTR_INIT(NULL, 0);
+
+#ifdef CONFIG_UNICODE
+ unsigned char *buf = bch2_trans_kmalloc(trans, BCH_NAME_MAX + 1);
+ int ret = PTR_ERR_OR_ZERO(buf);
+ if (ret)
+ return ret;
+
+ ret = utf8_casefold(info->cf_encoding, str, buf, BCH_NAME_MAX + 1);
+ if (ret <= 0)
+ return ret;
+
+ *out_cf = (struct qstr) QSTR_INIT(buf, ret);
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static inline int bch2_maybe_casefold(struct btree_trans *trans,
+ const struct bch_hash_info *info,
+ const struct qstr *str, struct qstr *out_cf)
+{
+ if (likely(!info->cf_encoding)) {
+ *out_cf = *str;
+ return 0;
+ } else {
+ return bch2_casefold(trans, info, str, out_cf);
+ }
+}
+
static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
{
if (bkey_val_bytes(d.k) < offsetof(struct bch_dirent, d_name))
@@ -28,13 +62,38 @@ static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
#endif
return bkey_bytes -
- offsetof(struct bch_dirent, d_name) -
+ (d.v->d_casefold
+ ? offsetof(struct bch_dirent, d_cf_name_block.d_names)
+ : offsetof(struct bch_dirent, d_name)) -
trailing_nuls;
}
struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d)
{
- return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
+ if (d.v->d_casefold) {
+ unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
+ return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[0], name_len);
+ } else {
+ return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
+ }
+}
+
+static struct qstr bch2_dirent_get_casefold_name(struct bkey_s_c_dirent d)
+{
+ if (d.v->d_casefold) {
+ unsigned name_len = le16_to_cpu(d.v->d_cf_name_block.d_name_len);
+ unsigned cf_name_len = le16_to_cpu(d.v->d_cf_name_block.d_cf_name_len);
+ return (struct qstr) QSTR_INIT(&d.v->d_cf_name_block.d_names[name_len], cf_name_len);
+ } else {
+ return (struct qstr) QSTR_INIT(NULL, 0);
+ }
+}
+
+static inline struct qstr bch2_dirent_get_lookup_name(struct bkey_s_c_dirent d)
+{
+ return d.v->d_casefold
+ ? bch2_dirent_get_casefold_name(d)
+ : bch2_dirent_get_name(d);
}
static u64 bch2_dirent_hash(const struct bch_hash_info *info,
@@ -57,7 +116,7 @@ static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- struct qstr name = bch2_dirent_get_name(d);
+ struct qstr name = bch2_dirent_get_lookup_name(d);
return bch2_dirent_hash(info, &name);
}
@@ -65,7 +124,7 @@ static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
{
struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
- const struct qstr l_name = bch2_dirent_get_name(l);
+ const struct qstr l_name = bch2_dirent_get_lookup_name(l);
const struct qstr *r_name = _r;
return !qstr_eq(l_name, *r_name);
@@ -75,8 +134,8 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
{
struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
- const struct qstr l_name = bch2_dirent_get_name(l);
- const struct qstr r_name = bch2_dirent_get_name(r);
+ const struct qstr l_name = bch2_dirent_get_lookup_name(l);
+ const struct qstr r_name = bch2_dirent_get_lookup_name(r);
return !qstr_eq(l_name, r_name);
}
@@ -104,17 +163,19 @@ int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
struct bkey_validate_context from)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+ unsigned name_block_len = bch2_dirent_name_bytes(d);
struct qstr d_name = bch2_dirent_get_name(d);
+ struct qstr d_cf_name = bch2_dirent_get_casefold_name(d);
int ret = 0;
bkey_fsck_err_on(!d_name.len,
c, dirent_empty_name,
"empty name");
- bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len),
+ bkey_fsck_err_on(d_name.len + d_cf_name.len > name_block_len,
c, dirent_val_too_big,
- "value too big (%zu > %u)",
- bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
+ "dirent names exceed bkey size (%d + %d > %d)",
+ d_name.len, d_cf_name.len, name_block_len);
/*
* Check new keys don't exceed the max length
@@ -142,6 +203,18 @@ int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
le64_to_cpu(d.v->d_inum) == d.k->p.inode,
c, dirent_to_itself,
"dirent points to own directory");
+
+ if (d.v->d_casefold) {
+ bkey_fsck_err_on(from.from == BKEY_VALIDATE_commit &&
+ d_cf_name.len > BCH_NAME_MAX,
+ c, dirent_cf_name_too_big,
+ "dirent w/ cf name too big (%u > %u)",
+ d_cf_name.len, BCH_NAME_MAX);
+
+ bkey_fsck_err_on(d_cf_name.len != strnlen(d_cf_name.name, d_cf_name.len),
+ c, dirent_stray_data_after_cf_name,
+ "dirent has stray data after cf name's NUL");
+ }
fsck_err:
return ret;
}
@@ -163,15 +236,14 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
}
-static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
- subvol_inum dir, u8 type,
- const struct qstr *name, u64 dst)
+static struct bkey_i_dirent *dirent_alloc_key(struct btree_trans *trans,
+ subvol_inum dir,
+ u8 type,
+ int name_len, int cf_name_len,
+ u64 dst)
{
struct bkey_i_dirent *dirent;
- unsigned u64s = BKEY_U64s + dirent_val_u64s(name->len);
-
- if (name->len > BCH_NAME_MAX)
- return ERR_PTR(-ENAMETOOLONG);
+ unsigned u64s = BKEY_U64s + dirent_val_u64s(name_len, cf_name_len);
BUG_ON(u64s > U8_MAX);
@@ -190,14 +262,65 @@ static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
}
dirent->v.d_type = type;
+ dirent->v.d_unused = 0;
+ dirent->v.d_casefold = cf_name_len ? 1 : 0;
- memcpy(dirent->v.d_name, name->name, name->len);
- memset(dirent->v.d_name + name->len, 0,
- bkey_val_bytes(&dirent->k) -
- offsetof(struct bch_dirent, d_name) -
- name->len);
+ return dirent;
+}
- EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len);
+static void dirent_init_regular_name(struct bkey_i_dirent *dirent,
+ const struct qstr *name)
+{
+ EBUG_ON(dirent->v.d_casefold);
+
+ memcpy(&dirent->v.d_name[0], name->name, name->len);
+ memset(&dirent->v.d_name[name->len], 0,
+ bkey_val_bytes(&dirent->k) -
+ offsetof(struct bch_dirent, d_name) -
+ name->len);
+}
+
+static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent,
+ const struct qstr *name,
+ const struct qstr *cf_name)
+{
+ EBUG_ON(!dirent->v.d_casefold);
+ EBUG_ON(!cf_name->len);
+
+ dirent->v.d_cf_name_block.d_name_len = name->len;
+ dirent->v.d_cf_name_block.d_cf_name_len = cf_name->len;
+ memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
+ memcpy(&dirent->v.d_cf_name_block.d_names[name->len], cf_name->name, cf_name->len);
+ memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_name->len], 0,
+ bkey_val_bytes(&dirent->k) -
+ offsetof(struct bch_dirent, d_cf_name_block.d_names) -
+ name->len + cf_name->len);
+
+ EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_name->len);
+}
+
+static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
+ subvol_inum dir,
+ u8 type,
+ const struct qstr *name,
+ const struct qstr *cf_name,
+ u64 dst)
+{
+ struct bkey_i_dirent *dirent;
+
+ if (name->len > BCH_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ dirent = dirent_alloc_key(trans, dir, type, name->len, cf_name ? cf_name->len : 0, dst);
+ if (IS_ERR(dirent))
+ return dirent;
+
+ if (cf_name)
+ dirent_init_casefolded_name(dirent, name, cf_name);
+ else
+ dirent_init_regular_name(dirent, name);
+
+ EBUG_ON(bch2_dirent_get_name(dirent_i_to_s_c(dirent)).len != name->len);
return dirent;
}
@@ -213,7 +336,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
struct bkey_i_dirent *dirent;
int ret;
- dirent = dirent_create_key(trans, dir_inum, type, name, dst_inum);
+ dirent = dirent_create_key(trans, dir_inum, type, name, NULL, dst_inum);
ret = PTR_ERR_OR_ZERO(dirent);
if (ret)
return ret;
@@ -233,16 +356,28 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
const struct bch_hash_info *hash_info,
u8 type, const struct qstr *name, u64 dst_inum,
u64 *dir_offset,
+ u64 *i_size,
enum btree_iter_update_trigger_flags flags)
{
struct bkey_i_dirent *dirent;
int ret;
- dirent = dirent_create_key(trans, dir, type, name, dst_inum);
+ if (hash_info->cf_encoding) {
+ struct qstr cf_name;
+ ret = bch2_casefold(trans, hash_info, name, &cf_name);
+ if (ret)
+ return ret;
+ dirent = dirent_create_key(trans, dir, type, name, &cf_name, dst_inum);
+ } else {
+ dirent = dirent_create_key(trans, dir, type, name, NULL, dst_inum);
+ }
+
ret = PTR_ERR_OR_ZERO(dirent);
if (ret)
return ret;
+ *i_size += bkey_bytes(&dirent->k);
+
ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
dir, &dirent->k_i, flags);
*dir_offset = dirent->k.p.offset;
@@ -275,12 +410,13 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
}
int bch2_dirent_rename(struct btree_trans *trans,
- subvol_inum src_dir, struct bch_hash_info *src_hash,
- subvol_inum dst_dir, struct bch_hash_info *dst_hash,
+ subvol_inum src_dir, struct bch_hash_info *src_hash, u64 *src_dir_i_size,
+ subvol_inum dst_dir, struct bch_hash_info *dst_hash, u64 *dst_dir_i_size,
const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset,
const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset,
enum bch_rename_mode mode)
{
+ struct qstr src_name_lookup, dst_name_lookup;
struct btree_iter src_iter = { NULL };
struct btree_iter dst_iter = { NULL };
struct bkey_s_c old_src, old_dst = bkey_s_c_null;
@@ -295,8 +431,11 @@ int bch2_dirent_rename(struct btree_trans *trans,
memset(dst_inum, 0, sizeof(*dst_inum));
/* Lookup src: */
+ ret = bch2_maybe_casefold(trans, src_hash, src_name, &src_name_lookup);
+ if (ret)
+ goto out;
old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
- src_hash, src_dir, src_name,
+ src_hash, src_dir, &src_name_lookup,
BTREE_ITER_intent);
ret = bkey_err(old_src);
if (ret)
@@ -308,6 +447,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
/* Lookup dst: */
+ ret = bch2_maybe_casefold(trans, dst_hash, dst_name, &dst_name_lookup);
+ if (ret)
+ goto out;
if (mode == BCH_RENAME) {
/*
* Note that we're _not_ checking if the target already exists -
@@ -315,12 +457,12 @@ int bch2_dirent_rename(struct btree_trans *trans,
* correctness:
*/
ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
- dst_hash, dst_dir, dst_name);
+ dst_hash, dst_dir, &dst_name_lookup);
if (ret)
goto out;
} else {
old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
- dst_hash, dst_dir, dst_name,
+ dst_hash, dst_dir, &dst_name_lookup,
BTREE_ITER_intent);
ret = bkey_err(old_dst);
if (ret)
@@ -336,7 +478,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
*src_offset = dst_iter.pos.offset;
/* Create new dst key: */
- new_dst = dirent_create_key(trans, dst_dir, 0, dst_name, 0);
+ new_dst = dirent_create_key(trans, dst_dir, 0, dst_name,
+ dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0);
ret = PTR_ERR_OR_ZERO(new_dst);
if (ret)
goto out;
@@ -346,7 +489,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
/* Create new src key: */
if (mode == BCH_RENAME_EXCHANGE) {
- new_src = dirent_create_key(trans, src_dir, 0, src_name, 0);
+ new_src = dirent_create_key(trans, src_dir, 0, src_name,
+ src_hash->cf_encoding ? &src_name_lookup : NULL, 0);
ret = PTR_ERR_OR_ZERO(new_src);
if (ret)
goto out;
@@ -406,6 +550,14 @@ int bch2_dirent_rename(struct btree_trans *trans,
new_src->v.d_type == DT_SUBVOL)
new_src->v.d_parent_subvol = cpu_to_le32(src_dir.subvol);
+ if (old_dst.k)
+ *dst_dir_i_size -= bkey_bytes(old_dst.k);
+ *src_dir_i_size -= bkey_bytes(old_src.k);
+
+ if (mode == BCH_RENAME_EXCHANGE)
+ *src_dir_i_size += bkey_bytes(&new_src->k);
+ *dst_dir_i_size += bkey_bytes(&new_dst->k);
+
ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
if (ret)
goto out;
@@ -465,9 +617,14 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans,
const struct qstr *name, subvol_inum *inum,
unsigned flags)
{
+ struct qstr lookup_name;
+ int ret = bch2_maybe_casefold(trans, hash_info, name, &lookup_name);
+ if (ret)
+ return ret;
+
struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
- hash_info, dir, name, flags);
- int ret = bkey_err(k);
+ hash_info, dir, &lookup_name, flags);
+ ret = bkey_err(k);
if (ret)
goto err;
@@ -572,3 +729,54 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
return ret < 0 ? ret : 0;
}
+
+/* fsck */
+
+static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
+ BTREE_ITER_all_snapshots, k, ret) {
+ if (k.k->p.offset != inode_nr)
+ break;
+ if (!bkey_is_inode(k.k))
+ continue;
+ ret = bch2_inode_unpack(k, inode);
+ goto found;
+ }
+ ret = -BCH_ERR_ENOENT_inode;
+found:
+ bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bch_inode_unpacked dir_inode;
+ struct bch_hash_info dir_hash_info;
+ int ret;
+
+ ret = lookup_first_inode(trans, pos.inode, &dir_inode);
+ if (ret)
+ goto err;
+
+ dir_hash_info = bch2_hash_info_init(c, &dir_inode);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
+
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
+ &dir_hash_info, &iter,
+ BTREE_UPDATE_internal_snapshot_node);
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ bch_err_fn(c, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index 362b3b2f2f2e..0880772b80a9 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -25,10 +25,13 @@ struct bch_inode_info;
struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d);
-static inline unsigned dirent_val_u64s(unsigned len)
+static inline unsigned dirent_val_u64s(unsigned len, unsigned cf_len)
{
- return DIV_ROUND_UP(offsetof(struct bch_dirent, d_name) + len,
- sizeof(u64));
+ unsigned bytes = cf_len
+ ? offsetof(struct bch_dirent, d_cf_name_block.d_names) + len + cf_len
+ : offsetof(struct bch_dirent, d_name) + len;
+
+ return DIV_ROUND_UP(bytes, sizeof(u64));
}
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
@@ -47,7 +50,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
enum btree_iter_update_trigger_flags);
int bch2_dirent_create(struct btree_trans *, subvol_inum,
const struct bch_hash_info *, u8,
- const struct qstr *, u64, u64 *,
+ const struct qstr *, u64, u64 *, u64 *,
enum btree_iter_update_trigger_flags);
static inline unsigned vfs_d_type(unsigned type)
@@ -62,8 +65,8 @@ enum bch_rename_mode {
};
int bch2_dirent_rename(struct btree_trans *,
- subvol_inum, struct bch_hash_info *,
- subvol_inum, struct bch_hash_info *,
+ subvol_inum, struct bch_hash_info *, u64 *,
+ subvol_inum, struct bch_hash_info *, u64 *,
const struct qstr *, subvol_inum *, u64 *,
const struct qstr *, subvol_inum *, u64 *,
enum bch_rename_mode);
@@ -79,4 +82,6 @@ int bch2_empty_dir_snapshot(struct btree_trans *, u64, u32, u32);
int bch2_empty_dir_trans(struct btree_trans *, subvol_inum);
int bch2_readdir(struct bch_fs *, subvol_inum, struct dir_context *);
+int bch2_fsck_remove_dirent(struct btree_trans *, struct bpos);
+
#endif /* _BCACHEFS_DIRENT_H */
diff --git a/fs/bcachefs/dirent_format.h b/fs/bcachefs/dirent_format.h
index 5e116b88e814..a46dbddd21aa 100644
--- a/fs/bcachefs/dirent_format.h
+++ b/fs/bcachefs/dirent_format.h
@@ -29,9 +29,25 @@ struct bch_dirent {
* Copy of mode bits 12-15 from the target inode - so userspace can get
* the filetype without having to do a stat()
*/
- __u8 d_type;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 d_type:5,
+ d_unused:2,
+ d_casefold:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 d_casefold:1,
+ d_unused:2,
+ d_type:5;
+#endif
- __u8 d_name[];
+ union {
+ struct {
+ __u8 d_pad;
+ __le16 d_name_len;
+ __le16 d_cf_name_len;
+ __u8 d_names[];
+ } d_cf_name_block __packed;
+ __DECLARE_FLEX_ARRAY(__u8, d_name);
+ } __packed;
} __packed __aligned(8);
#define DT_SUBVOL 16
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
index f4372cafea2e..f9214e2d1346 100644
--- a/fs/bcachefs/disk_accounting.h
+++ b/fs/bcachefs/disk_accounting.h
@@ -85,6 +85,24 @@ static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos
int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *,
s64 *, unsigned, bool);
+
+#define disk_accounting_key_init(_k, _type, ...) \
+do { \
+ memset(&(_k), 0, sizeof(_k)); \
+ (_k).type = BCH_DISK_ACCOUNTING_##_type; \
+ (_k)._type = (struct bch_acct_##_type) { __VA_ARGS__ }; \
+} while (0)
+
+#define bch2_disk_accounting_mod2_nr(_trans, _gc, _v, _nr, ...) \
+({ \
+ struct disk_accounting_pos pos; \
+ disk_accounting_key_init(pos, __VA_ARGS__); \
+ bch2_disk_accounting_mod(trans, &pos, _v, _nr, _gc); \
+})
+
+#define bch2_disk_accounting_mod2(_trans, _gc, _v, ...) \
+ bch2_disk_accounting_mod2_nr(_trans, _gc, _v, ARRAY_SIZE(_v), __VA_ARGS__)
+
int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool);
int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c,
diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h
index 7b6e6c97e6aa..15190196485f 100644
--- a/fs/bcachefs/disk_accounting_format.h
+++ b/fs/bcachefs/disk_accounting_format.h
@@ -113,14 +113,14 @@ enum disk_accounting_type {
BCH_DISK_ACCOUNTING_TYPE_NR,
};
-struct bch_nr_inodes {
+struct bch_acct_nr_inodes {
};
-struct bch_persistent_reserved {
+struct bch_acct_persistent_reserved {
__u8 nr_replicas;
};
-struct bch_dev_data_type {
+struct bch_acct_dev_data_type {
__u8 dev;
__u8 data_type;
};
@@ -149,10 +149,10 @@ struct disk_accounting_pos {
struct {
__u8 type;
union {
- struct bch_nr_inodes nr_inodes;
- struct bch_persistent_reserved persistent_reserved;
+ struct bch_acct_nr_inodes nr_inodes;
+ struct bch_acct_persistent_reserved persistent_reserved;
struct bch_replicas_entry_v1 replicas;
- struct bch_dev_data_type dev_data_type;
+ struct bch_acct_dev_data_type dev_data_type;
struct bch_acct_compression compression;
struct bch_acct_snapshot snapshot;
struct bch_acct_btree btree;
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index d2a5e76e6479..f2b9225fe0bc 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -20,6 +20,7 @@
#include "io_read.h"
#include "io_write.h"
#include "keylist.h"
+#include "lru.h"
#include "recovery.h"
#include "replicas.h"
#include "super-io.h"
@@ -104,6 +105,7 @@ struct ec_bio {
struct bch_dev *ca;
struct ec_stripe_buf *buf;
size_t idx;
+ u64 submit_time;
struct bio bio;
};
@@ -298,10 +300,22 @@ static int mark_stripe_bucket(struct btree_trans *trans,
struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
if (flags & BTREE_TRIGGER_transactional) {
+ struct extent_ptr_decoded p = {
+ .ptr = *ptr,
+ .crc = bch2_extent_crc_unpack(s.k, NULL),
+ };
+ struct bkey_i_backpointer bp;
+ bch2_extent_ptr_to_bp(c, BTREE_ID_stripes, 0, s.s_c, p,
+ (const union bch_extent_entry *) ptr, &bp);
+
struct bkey_i_alloc_v4 *a =
bch2_trans_start_alloc_update(trans, bucket, 0);
- ret = PTR_ERR_OR_ZERO(a) ?:
- __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
+ ret = PTR_ERR_OR_ZERO(a) ?:
+ __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags) ?:
+ bch2_bucket_backpointer_mod(trans, s.s_c, &bp,
+ !(flags & BTREE_TRIGGER_overwrite));
+ if (ret)
+ goto err;
}
if (flags & BTREE_TRIGGER_gc) {
@@ -366,19 +380,6 @@ static int mark_stripe_buckets(struct btree_trans *trans,
return 0;
}
-static inline void stripe_to_mem(struct stripe *m, const struct bch_stripe *s)
-{
- m->sectors = le16_to_cpu(s->sectors);
- m->algorithm = s->algorithm;
- m->nr_blocks = s->nr_blocks;
- m->nr_redundant = s->nr_redundant;
- m->disk_label = s->disk_label;
- m->blocks_nonempty = 0;
-
- for (unsigned i = 0; i < s->nr_blocks; i++)
- m->blocks_nonempty += !!stripe_blockcount_get(s, i);
-}
-
int bch2_trigger_stripe(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s _new,
@@ -399,6 +400,15 @@ int bch2_trigger_stripe(struct btree_trans *trans,
(new_s->nr_blocks != old_s->nr_blocks ||
new_s->nr_redundant != old_s->nr_redundant));
+ if (flags & BTREE_TRIGGER_transactional) {
+ int ret = bch2_lru_change(trans,
+ BCH_LRU_STRIPE_FRAGMENTATION,
+ idx,
+ stripe_lru_pos(old_s),
+ stripe_lru_pos(new_s));
+ if (ret)
+ return ret;
+ }
if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
/*
@@ -472,38 +482,6 @@ int bch2_trigger_stripe(struct btree_trans *trans,
return ret;
}
- if (flags & BTREE_TRIGGER_atomic) {
- struct stripe *m = genradix_ptr(&c->stripes, idx);
-
- if (!m) {
- struct printbuf buf1 = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf1, c, old);
- bch2_bkey_val_to_text(&buf2, c, new);
- bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
- "old %s\n"
- "new %s", idx, buf1.buf, buf2.buf);
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- bch2_inconsistent_error(c);
- return -1;
- }
-
- if (!new_s) {
- bch2_stripes_heap_del(c, m, idx);
-
- memset(m, 0, sizeof(*m));
- } else {
- stripe_to_mem(m, new_s);
-
- if (!old_s)
- bch2_stripes_heap_insert(c, m, idx);
- else
- bch2_stripes_heap_update(c, m, idx);
- }
- }
-
return 0;
}
@@ -726,14 +704,15 @@ static void ec_block_endio(struct bio *bio)
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
- if (bch2_dev_io_err_on(bio->bi_status, ca,
- bio_data_dir(bio)
- ? BCH_MEMBER_ERROR_write
- : BCH_MEMBER_ERROR_read,
- "erasure coding %s error: %s",
+ bch2_account_io_completion(ca, bio_data_dir(bio),
+ ec_bio->submit_time, !bio->bi_status);
+
+ if (bio->bi_status) {
+ bch_err_dev_ratelimited(ca, "erasure coding %s error: %s",
str_write_read(bio_data_dir(bio)),
- bch2_blk_status_to_str(bio->bi_status)))
+ bch2_blk_status_to_str(bio->bi_status));
clear_bit(ec_bio->idx, ec_bio->buf->valid);
+ }
int stale = dev_ptr_stale(ca, ptr);
if (stale) {
@@ -796,6 +775,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->ca = ca;
ec_bio->buf = buf;
ec_bio->idx = idx;
+ ec_bio->submit_time = local_clock();
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
ec_bio->bio.bi_end_io = ec_block_endio;
@@ -917,26 +897,6 @@ err:
static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
{
- ec_stripes_heap n, *h = &c->ec_stripes_heap;
-
- if (idx >= h->size) {
- if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
- return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- if (n.size > h->size) {
- memcpy(n.data, h->data, h->nr * sizeof(h->data[0]));
- n.nr = h->nr;
- swap(*h, n);
- }
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- free_heap(&n);
- }
-
- if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
- return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
-
if (c->gc_pos.phase != GC_PHASE_not_running &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
@@ -1009,180 +969,50 @@ static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
s->idx = 0;
}
-/* Heap of all existing stripes, ordered by blocks_nonempty */
-
-static u64 stripe_idx_to_delete(struct bch_fs *c)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
-
- lockdep_assert_held(&c->ec_stripes_heap_lock);
-
- if (h->nr &&
- h->data[0].blocks_nonempty == 0 &&
- !bch2_stripe_is_open(c, h->data[0].idx))
- return h->data[0].idx;
-
- return 0;
-}
-
-static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
- size_t i)
-{
- struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
-
- genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
-}
-
-static inline bool ec_stripes_heap_cmp(const void *l, const void *r, void __always_unused *args)
-{
- struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
- struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
-
- return ((_l->blocks_nonempty > _r->blocks_nonempty) <
- (_l->blocks_nonempty < _r->blocks_nonempty));
-}
-
-static inline void ec_stripes_heap_swap(void *l, void *r, void *h)
-{
- struct ec_stripe_heap_entry *_l = (struct ec_stripe_heap_entry *)l;
- struct ec_stripe_heap_entry *_r = (struct ec_stripe_heap_entry *)r;
- ec_stripes_heap *_h = (ec_stripes_heap *)h;
- size_t i = _l - _h->data;
- size_t j = _r - _h->data;
-
- swap(*_l, *_r);
-
- ec_stripes_heap_set_backpointer(_h, i);
- ec_stripes_heap_set_backpointer(_h, j);
-}
-
-static const struct min_heap_callbacks callbacks = {
- .less = ec_stripes_heap_cmp,
- .swp = ec_stripes_heap_swap,
-};
-
-static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m = genradix_ptr(&c->stripes, idx);
-
- BUG_ON(m->heap_idx >= h->nr);
- BUG_ON(h->data[m->heap_idx].idx != idx);
-}
-
-void bch2_stripes_heap_del(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- mutex_lock(&c->ec_stripes_heap_lock);
- heap_verify_backpointer(c, idx);
-
- min_heap_del(&c->ec_stripes_heap, m->heap_idx, &callbacks, &c->ec_stripes_heap);
- mutex_unlock(&c->ec_stripes_heap_lock);
-}
-
-void bch2_stripes_heap_insert(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- mutex_lock(&c->ec_stripes_heap_lock);
- BUG_ON(min_heap_full(&c->ec_stripes_heap));
-
- genradix_ptr(&c->stripes, idx)->heap_idx = c->ec_stripes_heap.nr;
- min_heap_push(&c->ec_stripes_heap, &((struct ec_stripe_heap_entry) {
- .idx = idx,
- .blocks_nonempty = m->blocks_nonempty,
- }),
- &callbacks,
- &c->ec_stripes_heap);
-
- heap_verify_backpointer(c, idx);
- mutex_unlock(&c->ec_stripes_heap_lock);
-}
-
-void bch2_stripes_heap_update(struct bch_fs *c,
- struct stripe *m, size_t idx)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- bool do_deletes;
- size_t i;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- heap_verify_backpointer(c, idx);
-
- h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
-
- i = m->heap_idx;
- min_heap_sift_up(h, i, &callbacks, &c->ec_stripes_heap);
- min_heap_sift_down(h, i, &callbacks, &c->ec_stripes_heap);
-
- heap_verify_backpointer(c, idx);
-
- do_deletes = stripe_idx_to_delete(c) != 0;
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- if (do_deletes)
- bch2_do_stripe_deletes(c);
-}
-
/* stripe deletion */
static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
{
- struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bkey_s_c k;
- struct bkey_s_c_stripe s;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
- BTREE_ITER_intent);
- ret = bkey_err(k);
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
+ BTREE_ID_stripes, POS(0, idx),
+ BTREE_ITER_intent);
+ int ret = bkey_err(k);
if (ret)
goto err;
- if (k.k->type != KEY_TYPE_stripe) {
- bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
- ret = -EINVAL;
- goto err;
- }
-
- s = bkey_s_c_to_stripe(k);
- for (unsigned i = 0; i < s.v->nr_blocks; i++)
- if (stripe_blockcount_get(s.v, i)) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bkey_val_to_text(&buf, c, k);
- bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
- printbuf_exit(&buf);
- ret = -EINVAL;
- goto err;
- }
-
- ret = bch2_btree_delete_at(trans, &iter, 0);
+ /*
+ * We expect write buffer races here
+ * Important: check stripe_is_open with stripe key locked:
+ */
+ if (k.k->type == KEY_TYPE_stripe &&
+ !bch2_stripe_is_open(trans->c, idx) &&
+ stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1)
+ ret = bch2_btree_delete_at(trans, &iter, 0);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
+/*
+ * XXX
+ * can we kill this and delete stripes from the trigger?
+ */
static void ec_stripe_delete_work(struct work_struct *work)
{
struct bch_fs *c =
container_of(work, struct bch_fs, ec_stripe_delete_work);
- while (1) {
- mutex_lock(&c->ec_stripes_heap_lock);
- u64 idx = stripe_idx_to_delete(c);
- mutex_unlock(&c->ec_stripes_heap_lock);
-
- if (!idx)
- break;
-
- int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- ec_stripe_delete(trans, idx));
- bch_err_fn(c, ret);
- if (ret)
- break;
- }
-
+ bch2_trans_run(c,
+ bch2_btree_write_buffer_tryflush(trans) ?:
+ for_each_btree_key_max_commit(trans, lru_iter, BTREE_ID_lru,
+ lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, 0),
+ lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, LRU_TIME_MAX),
+ 0, lru_k,
+ NULL, NULL,
+ BCH_TRANS_COMMIT_no_enospc, ({
+ ec_stripe_delete(trans, lru_k.k->p.offset);
+ })));
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
}
@@ -1294,7 +1124,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
bch2_fs_inconsistent(c, "%s", buf.buf);
printbuf_exit(&buf);
- return -EIO;
+ return -BCH_ERR_erasure_coding_found_btree_node;
}
k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed);
@@ -1360,7 +1190,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
if (!ca)
- return -EIO;
+ return -BCH_ERR_ENOENT_dev_not_found;
struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
@@ -1380,8 +1210,12 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
if (bp_k.k->type != KEY_TYPE_backpointer)
continue;
+ struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
+ if (bp.v->btree_id == BTREE_ID_stripes)
+ continue;
+
ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s,
- bkey_s_c_to_backpointer(bp_k), &last_flushed);
+ bp, &last_flushed);
}));
bch2_bkey_buf_exit(&last_flushed, c);
@@ -1393,21 +1227,19 @@ static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
{
struct btree_trans *trans = bch2_trans_get(c);
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
- int ret = 0;
+ unsigned nr_data = v->nr_blocks - v->nr_redundant;
- ret = bch2_btree_write_buffer_flush_sync(trans);
+ int ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
goto err;
- for (i = 0; i < nr_data; i++) {
+ for (unsigned i = 0; i < nr_data; i++) {
ret = ec_stripe_update_bucket(trans, s, i);
if (ret)
break;
}
err:
bch2_trans_put(trans);
-
return ret;
}
@@ -1473,6 +1305,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (s->err) {
if (!bch2_err_matches(s->err, EROFS))
bch_err(c, "error creating stripe: error writing data buckets");
+ ret = s->err;
goto err;
}
@@ -1481,6 +1314,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (ec_do_recov(c, &s->existing_stripe)) {
bch_err(c, "error creating stripe: error reading existing stripe");
+ ret = -BCH_ERR_ec_block_read;
goto err;
}
@@ -1506,6 +1340,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (ec_nr_failed(&s->new_stripe)) {
bch_err(c, "error creating stripe: error writing redundancy buckets");
+ ret = -BCH_ERR_ec_block_write;
goto err;
}
@@ -1527,6 +1362,8 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (ret)
goto err;
err:
+ trace_stripe_create(c, s->idx, ret);
+
bch2_disk_reservation_put(c, &s->res);
for (i = 0; i < v->nr_blocks; i++)
@@ -1612,11 +1449,11 @@ static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int
ec_stripe_new_set_pending(c, h);
}
-void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
+void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob, int err)
{
struct ec_stripe_new *s = ob->ec;
- s->err = -EIO;
+ s->err = err;
}
void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
@@ -1968,39 +1805,40 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans,
return 0;
}
-static s64 get_existing_stripe(struct bch_fs *c,
- struct ec_stripe_head *head)
+static int __get_existing_stripe(struct btree_trans *trans,
+ struct ec_stripe_head *head,
+ struct ec_stripe_buf *stripe,
+ u64 idx)
{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m;
- size_t heap_idx;
- u64 stripe_idx;
- s64 ret = -1;
-
- if (may_create_new_stripe(c))
- return -1;
+ struct bch_fs *c = trans->c;
- mutex_lock(&c->ec_stripes_heap_lock);
- for (heap_idx = 0; heap_idx < h->nr; heap_idx++) {
- /* No blocks worth reusing, stripe will just be deleted: */
- if (!h->data[heap_idx].blocks_nonempty)
- continue;
+ struct btree_iter iter;
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
+ BTREE_ID_stripes, POS(0, idx), 0);
+ int ret = bkey_err(k);
+ if (ret)
+ goto err;
- stripe_idx = h->data[heap_idx].idx;
+ /* We expect write buffer races here */
+ if (k.k->type != KEY_TYPE_stripe)
+ goto out;
- m = genradix_ptr(&c->stripes, stripe_idx);
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+ if (stripe_lru_pos(s.v) <= 1)
+ goto out;
- if (m->disk_label == head->disk_label &&
- m->algorithm == head->algo &&
- m->nr_redundant == head->redundancy &&
- m->sectors == head->blocksize &&
- m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
- bch2_try_open_stripe(c, head->s, stripe_idx)) {
- ret = stripe_idx;
- break;
- }
+ if (s.v->disk_label == head->disk_label &&
+ s.v->algorithm == head->algo &&
+ s.v->nr_redundant == head->redundancy &&
+ le16_to_cpu(s.v->sectors) == head->blocksize &&
+ bch2_try_open_stripe(c, head->s, idx)) {
+ bkey_reassemble(&stripe->key, k);
+ ret = 1;
}
- mutex_unlock(&c->ec_stripes_heap_lock);
+out:
+ bch2_set_btree_iter_dontneed(&iter);
+err:
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -2052,24 +1890,33 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
struct ec_stripe_new *s)
{
struct bch_fs *c = trans->c;
- s64 idx;
- int ret;
/*
* If we can't allocate a new stripe, and there's no stripes with empty
* blocks for us to reuse, that means we have to wait on copygc:
*/
- idx = get_existing_stripe(c, h);
- if (idx < 0)
- return -BCH_ERR_stripe_alloc_blocked;
+ if (may_create_new_stripe(c))
+ return -1;
- ret = get_stripe_key_trans(trans, idx, &s->existing_stripe);
- bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c,
- "reading stripe key: %s", bch2_err_str(ret));
- if (ret) {
- bch2_stripe_close(c, s);
- return ret;
+ struct btree_iter lru_iter;
+ struct bkey_s_c lru_k;
+ int ret = 0;
+
+ for_each_btree_key_max_norestart(trans, lru_iter, BTREE_ID_lru,
+ lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, 0),
+ lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, LRU_TIME_MAX),
+ 0, lru_k, ret) {
+ ret = __get_existing_stripe(trans, h, &s->existing_stripe, lru_k.k->p.offset);
+ if (ret)
+ break;
}
+ bch2_trans_iter_exit(trans, &lru_iter);
+ if (!ret)
+ ret = -BCH_ERR_stripe_alloc_blocked;
+ if (ret == 1)
+ ret = 0;
+ if (ret)
+ return ret;
return init_new_stripe_from_existing(c, s);
}
@@ -2367,46 +2214,7 @@ void bch2_fs_ec_flush(struct bch_fs *c)
int bch2_stripes_read(struct bch_fs *c)
{
- int ret = bch2_trans_run(c,
- for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_prefetch, k, ({
- if (k.k->type != KEY_TYPE_stripe)
- continue;
-
- ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
- if (ret)
- break;
-
- struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
-
- stripe_to_mem(m, bkey_s_c_to_stripe(k).v);
-
- bch2_stripes_heap_insert(c, m, k.k->p.offset);
- 0;
- })));
- bch_err_fn(c, ret);
- return ret;
-}
-
-void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
-{
- ec_stripes_heap *h = &c->ec_stripes_heap;
- struct stripe *m;
- size_t i;
-
- mutex_lock(&c->ec_stripes_heap_lock);
- for (i = 0; i < min_t(size_t, h->nr, 50); i++) {
- m = genradix_ptr(&c->stripes, h->data[i].idx);
-
- prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
- h->data[i].blocks_nonempty,
- m->nr_blocks - m->nr_redundant,
- m->nr_redundant);
- if (bch2_stripe_is_open(c, h->data[i].idx))
- prt_str(out, " open");
- prt_newline(out);
- }
- mutex_unlock(&c->ec_stripes_heap_lock);
+ return 0;
}
static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
@@ -2477,15 +2285,12 @@ void bch2_fs_ec_exit(struct bch_fs *c)
BUG_ON(!list_empty(&c->ec_stripe_new_list));
- free_heap(&c->ec_stripes_heap);
- genradix_free(&c->stripes);
bioset_exit(&c->ec_bioset);
}
void bch2_fs_ec_init_early(struct bch_fs *c)
{
spin_lock_init(&c->ec_stripes_new_lock);
- mutex_init(&c->ec_stripes_heap_lock);
INIT_LIST_HEAD(&c->ec_stripe_head_list);
mutex_init(&c->ec_stripe_head_lock);
@@ -2503,3 +2308,40 @@ int bch2_fs_ec_init(struct bch_fs *c)
return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
BIOSET_NEED_BVECS);
}
+
+static int bch2_check_stripe_to_lru_ref(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct bkey_buf *last_flushed)
+{
+ if (k.k->type != KEY_TYPE_stripe)
+ return 0;
+
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
+ u64 lru_idx = stripe_lru_pos(s.v);
+ if (lru_idx) {
+ int ret = bch2_lru_check_set(trans, BCH_LRU_STRIPE_FRAGMENTATION,
+ k.k->p.offset, lru_idx, k, last_flushed);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int bch2_check_stripe_to_lru_refs(struct bch_fs *c)
+{
+ struct bkey_buf last_flushed;
+
+ bch2_bkey_buf_init(&last_flushed);
+ bkey_init(&last_flushed.k->k);
+
+ int ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_stripes,
+ POS_MIN, BTREE_ITER_prefetch, k,
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ bch2_check_stripe_to_lru_ref(trans, k, &last_flushed)));
+
+ bch2_bkey_buf_exit(&last_flushed, c);
+ bch_err_fn(c, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 583ca6a226da..62d27e04d763 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -92,6 +92,29 @@ static inline void stripe_csum_set(struct bch_stripe *s,
memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
}
+#define STRIPE_LRU_POS_EMPTY 1
+
+static inline u64 stripe_lru_pos(const struct bch_stripe *s)
+{
+ if (!s)
+ return 0;
+
+ unsigned nr_data = s->nr_blocks - s->nr_redundant, blocks_empty = 0;
+
+ for (unsigned i = 0; i < nr_data; i++)
+ blocks_empty += !stripe_blockcount_get(s, i);
+
+ /* Will be picked up by the stripe_delete worker */
+ if (blocks_empty == nr_data)
+ return STRIPE_LRU_POS_EMPTY;
+
+ if (!blocks_empty)
+ return 0;
+
+ /* invert: more blocks empty = reuse first */
+ return LRU_TIME_MAX - blocks_empty;
+}
+
static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
const struct bch_extent_ptr *data_ptr,
unsigned sectors)
@@ -132,6 +155,20 @@ static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
m->sectors);
}
+static inline void gc_stripe_unlock(struct gc_stripe *s)
+{
+ BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
+
+ clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
+ wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
+}
+
+static inline void gc_stripe_lock(struct gc_stripe *s)
+{
+ wait_on_bit_lock((void *) &s->lock, BUCKET_LOCK_BITNR,
+ TASK_UNINTERRUPTIBLE);
+}
+
struct bch_read_bio;
struct ec_stripe_buf {
@@ -212,7 +249,7 @@ int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey
void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
-void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
+void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *, int);
int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
@@ -221,10 +258,6 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
unsigned, unsigned, unsigned,
enum bch_watermark, struct closure *);
-void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
-void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
-void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
-
void bch2_do_stripe_deletes(struct bch_fs *);
void bch2_ec_do_stripe_creates(struct bch_fs *);
void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
@@ -261,11 +294,12 @@ void bch2_fs_ec_flush(struct bch_fs *);
int bch2_stripes_read(struct bch_fs *);
-void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
void bch2_fs_ec_exit(struct bch_fs *);
void bch2_fs_ec_init_early(struct bch_fs *);
int bch2_fs_ec_init(struct bch_fs *);
+int bch2_check_stripe_to_lru_refs(struct bch_fs *);
+
#endif /* _BCACHEFS_EC_H */
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
index 8d1e70e830ac..06144bfd9c19 100644
--- a/fs/bcachefs/ec_types.h
+++ b/fs/bcachefs/ec_types.h
@@ -20,23 +20,15 @@ struct stripe {
};
struct gc_stripe {
+ u8 lock;
+ unsigned alive:1; /* does a corresponding key exist in stripes btree? */
u16 sectors;
-
u8 nr_blocks;
u8 nr_redundant;
-
- unsigned alive:1; /* does a corresponding key exist in stripes btree? */
u16 block_sectors[BCH_BKEY_PTRS_MAX];
struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX];
struct bch_replicas_padded r;
};
-struct ec_stripe_heap_entry {
- size_t idx;
- unsigned blocks_nonempty;
-};
-
-typedef DEFINE_MIN_HEAP(struct ec_stripe_heap_entry, ec_stripes_heap) ec_stripes_heap;
-
#endif /* _BCACHEFS_EC_TYPES_H */
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 4590cd0c7c90..101806d7ebe1 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -116,9 +116,11 @@
x(ENOENT, ENOENT_snapshot_tree) \
x(ENOENT, ENOENT_dirent_doesnt_match_inode) \
x(ENOENT, ENOENT_dev_not_found) \
+ x(ENOENT, ENOENT_dev_bucket_not_found) \
x(ENOENT, ENOENT_dev_idx_not_found) \
x(ENOENT, ENOENT_inode_no_backpointer) \
x(ENOENT, ENOENT_no_snapshot_tree_subvol) \
+ x(ENOENT, btree_node_dying) \
x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \
x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \
x(EEXIST, EEXIST_str_hash_set) \
@@ -180,6 +182,12 @@
x(EINVAL, not_in_recovery) \
x(EINVAL, cannot_rewind_recovery) \
x(0, data_update_done) \
+ x(BCH_ERR_data_update_done, data_update_done_would_block) \
+ x(BCH_ERR_data_update_done, data_update_done_unwritten) \
+ x(BCH_ERR_data_update_done, data_update_done_no_writes_needed) \
+ x(BCH_ERR_data_update_done, data_update_done_no_snapshot) \
+ x(BCH_ERR_data_update_done, data_update_done_no_dev_refs) \
+ x(BCH_ERR_data_update_done, data_update_done_no_rw_devs) \
x(EINVAL, device_state_not_allowed) \
x(EINVAL, member_info_missing) \
x(EINVAL, mismatched_block_size) \
@@ -200,6 +208,8 @@
x(EINVAL, no_resize_with_buckets_nouse) \
x(EINVAL, inode_unpack_error) \
x(EINVAL, varint_decode_error) \
+ x(EINVAL, erasure_coding_found_btree_node) \
+ x(EOPNOTSUPP, may_not_use_incompat_feature) \
x(EROFS, erofs_trans_commit) \
x(EROFS, erofs_no_writes) \
x(EROFS, erofs_journal_err) \
@@ -210,10 +220,18 @@
x(EROFS, insufficient_devices) \
x(0, operation_blocked) \
x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \
- x(BCH_ERR_operation_blocked, journal_res_get_blocked) \
- x(BCH_ERR_operation_blocked, journal_preres_get_blocked) \
- x(BCH_ERR_operation_blocked, bucket_alloc_blocked) \
- x(BCH_ERR_operation_blocked, stripe_alloc_blocked) \
+ x(BCH_ERR_operation_blocked, journal_res_blocked) \
+ x(BCH_ERR_journal_res_blocked, journal_blocked) \
+ x(BCH_ERR_journal_res_blocked, journal_max_in_flight) \
+ x(BCH_ERR_journal_res_blocked, journal_max_open) \
+ x(BCH_ERR_journal_res_blocked, journal_full) \
+ x(BCH_ERR_journal_res_blocked, journal_pin_full) \
+ x(BCH_ERR_journal_res_blocked, journal_buf_enomem) \
+ x(BCH_ERR_journal_res_blocked, journal_stuck) \
+ x(BCH_ERR_journal_res_blocked, journal_retry_open) \
+ x(BCH_ERR_journal_res_blocked, journal_preres_get_blocked) \
+ x(BCH_ERR_journal_res_blocked, bucket_alloc_blocked) \
+ x(BCH_ERR_journal_res_blocked, stripe_alloc_blocked) \
x(BCH_ERR_invalid, invalid_sb) \
x(BCH_ERR_invalid_sb, invalid_sb_magic) \
x(BCH_ERR_invalid_sb, invalid_sb_version) \
@@ -223,6 +241,7 @@
x(BCH_ERR_invalid_sb, invalid_sb_csum) \
x(BCH_ERR_invalid_sb, invalid_sb_block_size) \
x(BCH_ERR_invalid_sb, invalid_sb_uuid) \
+ x(BCH_ERR_invalid_sb, invalid_sb_offset) \
x(BCH_ERR_invalid_sb, invalid_sb_too_many_members) \
x(BCH_ERR_invalid_sb, invalid_sb_dev_idx) \
x(BCH_ERR_invalid_sb, invalid_sb_time_precision) \
@@ -250,6 +269,7 @@
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
x(EIO, journal_shutdown) \
x(EIO, journal_flush_err) \
+ x(EIO, journal_write_err) \
x(EIO, btree_node_read_err) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_cached) \
x(EIO, sb_not_downgraded) \
@@ -258,17 +278,52 @@
x(EIO, btree_node_read_validate_error) \
x(EIO, btree_need_topology_repair) \
x(EIO, bucket_ref_update) \
+ x(EIO, trigger_alloc) \
x(EIO, trigger_pointer) \
x(EIO, trigger_stripe_pointer) \
x(EIO, metadata_bucket_inconsistency) \
x(EIO, mark_stripe) \
x(EIO, stripe_reconstruct) \
x(EIO, key_type_error) \
- x(EIO, no_device_to_read_from) \
+ x(EIO, extent_poisened) \
x(EIO, missing_indirect_extent) \
x(EIO, invalidate_stripe_to_dev) \
x(EIO, no_encryption_key) \
x(EIO, insufficient_journal_devices) \
+ x(EIO, device_offline) \
+ x(EIO, EIO_fault_injected) \
+ x(EIO, ec_block_read) \
+ x(EIO, ec_block_write) \
+ x(EIO, recompute_checksum) \
+ x(EIO, decompress) \
+ x(BCH_ERR_decompress, decompress_exceeded_max_encoded_extent) \
+ x(BCH_ERR_decompress, decompress_lz4) \
+ x(BCH_ERR_decompress, decompress_gzip) \
+ x(BCH_ERR_decompress, decompress_zstd_src_len_bad) \
+ x(BCH_ERR_decompress, decompress_zstd) \
+ x(EIO, data_write) \
+ x(BCH_ERR_data_write, data_write_io) \
+ x(BCH_ERR_data_write, data_write_csum) \
+ x(BCH_ERR_data_write, data_write_invalid_ptr) \
+ x(BCH_ERR_data_write, data_write_misaligned) \
+ x(BCH_ERR_decompress, data_read) \
+ x(BCH_ERR_data_read, no_device_to_read_from) \
+ x(BCH_ERR_data_read, data_read_io_err) \
+ x(BCH_ERR_data_read, data_read_csum_err) \
+ x(BCH_ERR_data_read, data_read_retry) \
+ x(BCH_ERR_data_read_retry, data_read_retry_avoid) \
+ x(BCH_ERR_data_read_retry_avoid,data_read_retry_device_offline) \
+ x(BCH_ERR_data_read_retry_avoid,data_read_retry_io_err) \
+ x(BCH_ERR_data_read_retry_avoid,data_read_retry_ec_reconstruct_err) \
+ x(BCH_ERR_data_read_retry_avoid,data_read_retry_csum_err) \
+ x(BCH_ERR_data_read_retry, data_read_retry_csum_err_maybe_userspace)\
+ x(BCH_ERR_data_read, data_read_decompress_err) \
+ x(BCH_ERR_data_read, data_read_decrypt_err) \
+ x(BCH_ERR_data_read, data_read_ptr_stale_race) \
+ x(BCH_ERR_data_read_retry, data_read_ptr_stale_retry) \
+ x(BCH_ERR_data_read, data_read_no_encryption_key) \
+ x(BCH_ERR_data_read, data_read_buffer_too_small) \
+ x(BCH_ERR_data_read, data_read_key_overwritten) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 038da6a61f6b..207f35d3cce2 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -3,8 +3,8 @@
#include "btree_cache.h"
#include "btree_iter.h"
#include "error.h"
-#include "fs-common.h"
#include "journal.h"
+#include "namei.h"
#include "recovery_passes.h"
#include "super.h"
#include "thread_with_file.h"
@@ -54,25 +54,41 @@ void bch2_io_error_work(struct work_struct *work)
{
struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work);
struct bch_fs *c = ca->fs;
- bool dev;
+
+ /* XXX: if it's reads or checksums that are failing, set it to failed */
down_write(&c->state_lock);
- dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
- BCH_FORCE_IF_DEGRADED);
- if (dev
- ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
- BCH_FORCE_IF_DEGRADED)
- : bch2_fs_emergency_read_only(c))
+ unsigned long write_errors_start = READ_ONCE(ca->write_errors_start);
+
+ if (write_errors_start &&
+ time_after(jiffies,
+ write_errors_start + c->opts.write_error_timeout * HZ)) {
+ if (ca->mi.state >= BCH_MEMBER_STATE_ro)
+ goto out;
+
+ bool dev = !__bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
+ BCH_FORCE_IF_DEGRADED);
+
bch_err(ca,
- "too many IO errors, setting %s RO",
+ "writes erroring for %u seconds, setting %s ro",
+ c->opts.write_error_timeout,
dev ? "device" : "filesystem");
+ if (!dev)
+ bch2_fs_emergency_read_only(c);
+
+ }
+out:
up_write(&c->state_lock);
}
void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type)
{
atomic64_inc(&ca->errors[type]);
- //queue_work(system_long_wq, &ca->io_error_work);
+
+ if (type == BCH_MEMBER_ERROR_write && !ca->write_errors_start)
+ ca->write_errors_start = jiffies;
+
+ queue_work(system_long_wq, &ca->io_error_work);
}
enum ask_yn {
@@ -530,35 +546,59 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
mutex_unlock(&c->fsck_error_msgs_lock);
}
-int bch2_inum_err_msg_trans(struct btree_trans *trans, struct printbuf *out, subvol_inum inum)
+int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out,
+ subvol_inum inum, u64 offset)
{
u32 restart_count = trans->restart_count;
int ret = 0;
- /* XXX: we don't yet attempt to print paths when we don't know the subvol */
- if (inum.subvol)
- ret = lockrestart_do(trans, bch2_inum_to_path(trans, inum, out));
+ if (inum.subvol) {
+ ret = bch2_inum_to_path(trans, inum, out);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+ }
if (!inum.subvol || ret)
prt_printf(out, "inum %llu:%llu", inum.subvol, inum.inum);
+ prt_printf(out, " offset %llu: ", offset);
return trans_was_restarted(trans, restart_count);
}
-int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out,
- subvol_inum inum, u64 offset)
+void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out,
+ subvol_inum inum, u64 offset)
{
- int ret = bch2_inum_err_msg_trans(trans, out, inum);
- prt_printf(out, " offset %llu: ", offset);
- return ret;
+ bch2_trans_do(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset));
}
-void bch2_inum_err_msg(struct bch_fs *c, struct printbuf *out, subvol_inum inum)
+int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out,
+ struct bpos pos)
{
- bch2_trans_run(c, bch2_inum_err_msg_trans(trans, out, inum));
+ struct bch_fs *c = trans->c;
+ int ret = 0;
+
+ if (!bch2_snapshot_is_leaf(c, pos.snapshot))
+ prt_str(out, "(multiple snapshots) ");
+
+ subvol_inum inum = {
+ .subvol = bch2_snapshot_tree_oldest_subvol(c, pos.snapshot),
+ .inum = pos.inode,
+ };
+
+ if (inum.subvol) {
+ ret = bch2_inum_to_path(trans, inum, out);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+ }
+
+ if (!inum.subvol || ret)
+ prt_printf(out, "inum %llu:%u", pos.inode, pos.snapshot);
+
+ prt_printf(out, " offset %llu: ", pos.offset << 8);
+ return 0;
}
-void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out,
- subvol_inum inum, u64 offset)
+void bch2_inum_snap_offset_err_msg(struct bch_fs *c, struct printbuf *out,
+ struct bpos pos)
{
- bch2_trans_run(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset));
+ bch2_trans_do(c, bch2_inum_snap_offset_err_msg_trans(trans, out, pos));
}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 7acf2a27ca28..7d3f0e2a5fd6 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -216,32 +216,43 @@ void bch2_io_error_work(struct work_struct *);
/* Does the error handling without logging a message */
void bch2_io_error(struct bch_dev *, enum bch_member_error_type);
-#define bch2_dev_io_err_on(cond, ca, _type, ...) \
-({ \
- bool _ret = (cond); \
- \
- if (_ret) { \
- bch_err_dev_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca, _type); \
- } \
- _ret; \
-})
-
-#define bch2_dev_inum_io_err_on(cond, ca, _type, ...) \
-({ \
- bool _ret = (cond); \
- \
- if (_ret) { \
- bch_err_inum_offset_ratelimited(ca, __VA_ARGS__); \
- bch2_io_error(ca, _type); \
- } \
- _ret; \
-})
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+void bch2_latency_acct(struct bch_dev *, u64, int);
+#else
+static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
+#endif
+
+static inline void bch2_account_io_success_fail(struct bch_dev *ca,
+ enum bch_member_error_type type,
+ bool success)
+{
+ if (likely(success)) {
+ if (type == BCH_MEMBER_ERROR_write &&
+ ca->write_errors_start)
+ ca->write_errors_start = 0;
+ } else {
+ bch2_io_error(ca, type);
+ }
+}
+
+static inline void bch2_account_io_completion(struct bch_dev *ca,
+ enum bch_member_error_type type,
+ u64 submit_time, bool success)
+{
+ if (unlikely(!ca))
+ return;
+
+ if (type != BCH_MEMBER_ERROR_checksum)
+ bch2_latency_acct(ca, submit_time, type);
+
+ bch2_account_io_success_fail(ca, type, success);
+}
-int bch2_inum_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum);
int bch2_inum_offset_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum, u64);
-void bch2_inum_err_msg(struct bch_fs *, struct printbuf *, subvol_inum);
void bch2_inum_offset_err_msg(struct bch_fs *, struct printbuf *, subvol_inum, u64);
+int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *, struct printbuf *, struct bpos);
+void bch2_inum_snap_offset_err_msg(struct bch_fs *, struct printbuf *, struct bpos);
+
#endif /* _BCACHEFS_ERROR_H */
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 05d5f71a7ca9..ae1a1d917805 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -28,6 +28,13 @@
#include "trace.h"
#include "util.h"
+static const char * const bch2_extent_flags_strs[] = {
+#define x(n, v) [BCH_EXTENT_FLAG_##n] = #n,
+ BCH_EXTENT_FLAGS()
+#undef x
+ NULL,
+};
+
static unsigned bch2_crc_field_size_max[] = {
[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
@@ -51,7 +58,8 @@ struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
}
void bch2_mark_io_failure(struct bch_io_failures *failed,
- struct extent_ptr_decoded *p)
+ struct extent_ptr_decoded *p,
+ bool csum_error)
{
struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
@@ -59,53 +67,57 @@ void bch2_mark_io_failure(struct bch_io_failures *failed,
BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
f = &failed->devs[failed->nr++];
- f->dev = p->ptr.dev;
- f->idx = p->idx;
- f->nr_failed = 1;
- f->nr_retries = 0;
- } else if (p->idx != f->idx) {
- f->idx = p->idx;
- f->nr_failed = 1;
- f->nr_retries = 0;
- } else {
- f->nr_failed++;
+ memset(f, 0, sizeof(*f));
+ f->dev = p->ptr.dev;
}
+
+ if (p->do_ec_reconstruct)
+ f->failed_ec = true;
+ else if (!csum_error)
+ f->failed_io = true;
+ else
+ f->failed_csum_nr++;
}
-static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
+static inline u64 dev_latency(struct bch_dev *ca)
{
- struct bch_dev *ca = bch2_dev_rcu(c, dev);
return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
}
+static inline int dev_failed(struct bch_dev *ca)
+{
+ return !ca || ca->mi.state == BCH_MEMBER_STATE_failed;
+}
+
/*
* returns true if p1 is better than p2:
*/
static inline bool ptr_better(struct bch_fs *c,
const struct extent_ptr_decoded p1,
- const struct extent_ptr_decoded p2)
+ u64 p1_latency,
+ struct bch_dev *ca1,
+ const struct extent_ptr_decoded p2,
+ u64 p2_latency)
{
- if (likely(!p1.idx && !p2.idx)) {
- u64 l1 = dev_latency(c, p1.ptr.dev);
- u64 l2 = dev_latency(c, p2.ptr.dev);
+ struct bch_dev *ca2 = bch2_dev_rcu(c, p2.ptr.dev);
- /*
- * Square the latencies, to bias more in favor of the faster
- * device - we never want to stop issuing reads to the slower
- * device altogether, so that we can update our latency numbers:
- */
- l1 *= l1;
- l2 *= l2;
+ int failed_delta = dev_failed(ca1) - dev_failed(ca2);
+ if (unlikely(failed_delta))
+ return failed_delta < 0;
- /* Pick at random, biased in favor of the faster device: */
+ if (unlikely(bch2_force_reconstruct_read))
+ return p1.do_ec_reconstruct > p2.do_ec_reconstruct;
- return bch2_rand_range(l1 + l2) > l1;
- }
+ if (unlikely(p1.do_ec_reconstruct || p2.do_ec_reconstruct))
+ return p1.do_ec_reconstruct < p2.do_ec_reconstruct;
+
+ int crc_retry_delta = (int) p1.crc_retry_nr - (int) p2.crc_retry_nr;
+ if (unlikely(crc_retry_delta))
+ return crc_retry_delta < 0;
- if (bch2_force_reconstruct_read)
- return p1.idx > p2.idx;
+ /* Pick at random, biased in favor of the faster device: */
- return p1.idx < p2.idx;
+ return bch2_get_random_u64_below(p1_latency + p2_latency) > p1_latency;
}
/*
@@ -115,64 +127,108 @@ static inline bool ptr_better(struct bch_fs *c,
*/
int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
struct bch_io_failures *failed,
- struct extent_ptr_decoded *pick)
+ struct extent_ptr_decoded *pick,
+ int dev)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- struct bch_dev_io_failures *f;
- int ret = 0;
+ bool have_csum_errors = false, have_io_errors = false, have_missing_devs = false;
+ bool have_dirty_ptrs = false, have_pick = false;
if (k.k->type == KEY_TYPE_error)
return -BCH_ERR_key_type_error;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+
+ if (bch2_bkey_extent_ptrs_flags(ptrs) & BIT_ULL(BCH_EXTENT_FLAG_poisoned))
+ return -BCH_ERR_extent_poisened;
+
rcu_read_lock();
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ u64 pick_latency;
+
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ have_dirty_ptrs |= !p.ptr.cached;
+
/*
* Unwritten extent: no need to actually read, treat it as a
* hole and return 0s:
*/
if (p.ptr.unwritten) {
- ret = 0;
- break;
+ rcu_read_unlock();
+ return 0;
}
- /*
- * If there are any dirty pointers it's an error if we can't
- * read:
- */
- if (!ret && !p.ptr.cached)
- ret = -BCH_ERR_no_device_to_read_from;
+ /* Are we being asked to read from a specific device? */
+ if (dev >= 0 && p.ptr.dev != dev)
+ continue;
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr)))
continue;
- f = failed ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
- if (f)
- p.idx = f->nr_failed < f->nr_retries
- ? f->idx
- : f->idx + 1;
+ struct bch_dev_io_failures *f =
+ unlikely(failed) ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
+ if (unlikely(f)) {
+ p.crc_retry_nr = f->failed_csum_nr;
+ p.has_ec &= ~f->failed_ec;
- if (!p.idx && (!ca || !bch2_dev_is_readable(ca)))
- p.idx++;
+ if (ca && ca->mi.state != BCH_MEMBER_STATE_failed) {
+ have_io_errors |= f->failed_io;
+ have_io_errors |= f->failed_ec;
+ }
+ have_csum_errors |= !!f->failed_csum_nr;
- if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
- p.idx++;
+ if (p.has_ec && (f->failed_io || f->failed_csum_nr))
+ p.do_ec_reconstruct = true;
+ else if (f->failed_io ||
+ f->failed_csum_nr > c->opts.checksum_err_retry_nr)
+ continue;
+ }
- if (p.idx > (unsigned) p.has_ec)
- continue;
+ have_missing_devs |= ca && !bch2_dev_is_online(ca);
- if (ret > 0 && !ptr_better(c, p, *pick))
- continue;
+ if (!ca || !bch2_dev_is_online(ca)) {
+ if (!p.has_ec)
+ continue;
+ p.do_ec_reconstruct = true;
+ }
+
+ if (bch2_force_reconstruct_read && p.has_ec)
+ p.do_ec_reconstruct = true;
- *pick = p;
- ret = 1;
+ u64 p_latency = dev_latency(ca);
+ /*
+ * Square the latencies, to bias more in favor of the faster
+ * device - we never want to stop issuing reads to the slower
+ * device altogether, so that we can update our latency numbers:
+ */
+ p_latency *= p_latency;
+
+ if (!have_pick ||
+ ptr_better(c,
+ p, p_latency, ca,
+ *pick, pick_latency)) {
+ *pick = p;
+ pick_latency = p_latency;
+ have_pick = true;
+ }
}
rcu_read_unlock();
- return ret;
+ if (have_pick)
+ return 1;
+ if (!have_dirty_ptrs)
+ return 0;
+ if (have_missing_devs)
+ return -BCH_ERR_no_device_to_read_from;
+ if (have_csum_errors)
+ return -BCH_ERR_data_read_csum_err;
+ if (have_io_errors)
+ return -BCH_ERR_data_read_io_err;
+
+ WARN_ONCE(1, "unhandled error case in %s\n", __func__);
+ return -EINVAL;
}
/* KEY_TYPE_btree_ptr: */
@@ -536,29 +592,35 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
struct bch_extent_crc_unpacked src,
enum bch_extent_entry_type type)
{
-#define set_common_fields(_dst, _src) \
- _dst.type = 1 << type; \
- _dst.csum_type = _src.csum_type, \
- _dst.compression_type = _src.compression_type, \
- _dst._compressed_size = _src.compressed_size - 1, \
- _dst._uncompressed_size = _src.uncompressed_size - 1, \
- _dst.offset = _src.offset
+#define common_fields(_src) \
+ .type = BIT(type), \
+ .csum_type = _src.csum_type, \
+ .compression_type = _src.compression_type, \
+ ._compressed_size = _src.compressed_size - 1, \
+ ._uncompressed_size = _src.uncompressed_size - 1, \
+ .offset = _src.offset
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
- set_common_fields(dst->crc32, src);
- dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
+ dst->crc32 = (struct bch_extent_crc32) {
+ common_fields(src),
+ .csum = (u32 __force) *((__le32 *) &src.csum.lo),
+ };
break;
case BCH_EXTENT_ENTRY_crc64:
- set_common_fields(dst->crc64, src);
- dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = (u64 __force) src.csum.lo;
- dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
+ dst->crc64 = (struct bch_extent_crc64) {
+ common_fields(src),
+ .nonce = src.nonce,
+ .csum_lo = (u64 __force) src.csum.lo,
+ .csum_hi = (u64 __force) *((__le16 *) &src.csum.hi),
+ };
break;
case BCH_EXTENT_ENTRY_crc128:
- set_common_fields(dst->crc128, src);
- dst->crc128.nonce = src.nonce;
- dst->crc128.csum = src.csum;
+ dst->crc128 = (struct bch_extent_crc128) {
+ common_fields(src),
+ .nonce = src.nonce,
+ .csum = src.csum,
+ };
break;
default:
BUG();
@@ -997,7 +1059,7 @@ static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts,
struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev);
- return ca && bch2_dev_is_readable(ca) && !dev_ptr_stale_rcu(ca, ptr);
+ return ca && bch2_dev_is_healthy(ca) && !dev_ptr_stale_rcu(ca, ptr);
}
void bch2_extent_ptr_set_cached(struct bch_fs *c,
@@ -1220,6 +1282,10 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
bch2_extent_rebalance_to_text(out, c, &entry->rebalance);
break;
+ case BCH_EXTENT_ENTRY_flags:
+ prt_bitflags(out, bch2_extent_flags_strs, entry->flags.flags);
+ break;
+
default:
prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
return;
@@ -1381,6 +1447,11 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
#endif
break;
}
+ case BCH_EXTENT_ENTRY_flags:
+ bkey_fsck_err_on(entry != ptrs.start,
+ c, extent_flags_not_at_start,
+ "extent flags entry not at start");
+ break;
}
}
@@ -1447,6 +1518,28 @@ void bch2_ptr_swab(struct bkey_s k)
}
}
+int bch2_bkey_extent_flags_set(struct bch_fs *c, struct bkey_i *k, u64 flags)
+{
+ int ret = bch2_request_incompat_feature(c, bcachefs_metadata_version_extent_flags);
+ if (ret)
+ return ret;
+
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+
+ if (ptrs.start != ptrs.end &&
+ extent_entry_type(ptrs.start) == BCH_EXTENT_ENTRY_flags) {
+ ptrs.start->flags.flags = flags;
+ } else {
+ struct bch_extent_flags f = {
+ .type = BIT(BCH_EXTENT_ENTRY_flags),
+ .flags = flags,
+ };
+ __extent_entry_insert(k, ptrs.start, (union bch_extent_entry *) &f);
+ }
+
+ return 0;
+}
+
/* Generic extent code: */
int bch2_cut_front_s(struct bpos where, struct bkey_s k)
@@ -1492,8 +1585,8 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k)
entry->crc128.offset += sub;
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
- break;
case BCH_EXTENT_ENTRY_rebalance:
+ case BCH_EXTENT_ENTRY_flags:
break;
}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 204d765dd74c..e78a39e7e18f 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -320,8 +320,9 @@ static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
({ \
__label__ out; \
\
- (_ptr).idx = 0; \
- (_ptr).has_ec = false; \
+ (_ptr).has_ec = false; \
+ (_ptr).do_ec_reconstruct = false; \
+ (_ptr).crc_retry_nr = 0; \
\
__bkey_extent_entry_for_each_from(_entry, _end, _entry) \
switch (__extent_entry_type(_entry)) { \
@@ -401,10 +402,10 @@ out: \
struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *,
unsigned);
void bch2_mark_io_failure(struct bch_io_failures *,
- struct extent_ptr_decoded *);
+ struct extent_ptr_decoded *, bool);
int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
struct bch_io_failures *,
- struct extent_ptr_decoded *);
+ struct extent_ptr_decoded *, int);
/* KEY_TYPE_btree_ptr: */
@@ -753,4 +754,19 @@ static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
k->size = new_size;
}
+static inline u64 bch2_bkey_extent_ptrs_flags(struct bkey_ptrs_c ptrs)
+{
+ if (ptrs.start != ptrs.end &&
+ extent_entry_type(ptrs.start) == BCH_EXTENT_ENTRY_flags)
+ return ptrs.start->flags.flags;
+ return 0;
+}
+
+static inline u64 bch2_bkey_extent_flags(struct bkey_s_c k)
+{
+ return bch2_bkey_extent_ptrs_flags(bch2_bkey_ptrs_c(k));
+}
+
+int bch2_bkey_extent_flags_set(struct bch_fs *, struct bkey_i *, u64);
+
#endif /* _BCACHEFS_EXTENTS_H */
diff --git a/fs/bcachefs/extents_format.h b/fs/bcachefs/extents_format.h
index c198dfc376d6..74c0252cbd98 100644
--- a/fs/bcachefs/extents_format.h
+++ b/fs/bcachefs/extents_format.h
@@ -79,8 +79,9 @@
x(crc64, 2) \
x(crc128, 3) \
x(stripe_ptr, 4) \
- x(rebalance, 5)
-#define BCH_EXTENT_ENTRY_MAX 6
+ x(rebalance, 5) \
+ x(flags, 6)
+#define BCH_EXTENT_ENTRY_MAX 7
enum bch_extent_entry_type {
#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
@@ -201,6 +202,25 @@ struct bch_extent_stripe_ptr {
#endif
};
+#define BCH_EXTENT_FLAGS() \
+ x(poisoned, 0)
+
+enum bch_extent_flags_e {
+#define x(n, v) BCH_EXTENT_FLAG_##n = v,
+ BCH_EXTENT_FLAGS()
+#undef x
+};
+
+struct bch_extent_flags {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:7,
+ flags:57;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 flags:57,
+ type:7;
+#endif
+};
+
/* bch_extent_rebalance: */
#include "rebalance_format.h"
diff --git a/fs/bcachefs/extents_types.h b/fs/bcachefs/extents_types.h
index 43d6c341ecca..e51529dca4c2 100644
--- a/fs/bcachefs/extents_types.h
+++ b/fs/bcachefs/extents_types.h
@@ -20,8 +20,9 @@ struct bch_extent_crc_unpacked {
};
struct extent_ptr_decoded {
- unsigned idx;
bool has_ec;
+ bool do_ec_reconstruct;
+ u8 crc_retry_nr;
struct bch_extent_crc_unpacked crc;
struct bch_extent_ptr ptr;
struct bch_extent_stripe_ptr ec;
@@ -31,10 +32,10 @@ struct bch_io_failures {
u8 nr;
struct bch_dev_io_failures {
u8 dev;
- u8 idx;
- u8 nr_failed;
- u8 nr_retries;
- } devs[BCH_REPLICAS_MAX];
+ unsigned failed_csum_nr:6,
+ failed_io:1,
+ failed_ec:1;
+ } devs[BCH_REPLICAS_MAX + 1];
};
#endif /* _BCACHEFS_EXTENTS_TYPES_H */
diff --git a/fs/bcachefs/eytzinger.c b/fs/bcachefs/eytzinger.c
index 2eaffe37b5e7..0e742555cb0a 100644
--- a/fs/bcachefs/eytzinger.c
+++ b/fs/bcachefs/eytzinger.c
@@ -148,89 +148,99 @@ static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *pr
return cmp(a, b, priv);
}
-static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
+static inline int eytzinger1_do_cmp(void *base1, size_t n, size_t size,
cmp_r_func_t cmp_func, const void *priv,
size_t l, size_t r)
{
- return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
+ return do_cmp(base1 + inorder_to_eytzinger1(l, n) * size,
+ base1 + inorder_to_eytzinger1(r, n) * size,
cmp_func, priv);
}
-static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
+static inline void eytzinger1_do_swap(void *base1, size_t n, size_t size,
swap_r_func_t swap_func, const void *priv,
size_t l, size_t r)
{
- do_swap(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
+ do_swap(base1 + inorder_to_eytzinger1(l, n) * size,
+ base1 + inorder_to_eytzinger1(r, n) * size,
size, swap_func, priv);
}
-void eytzinger0_sort_r(void *base, size_t n, size_t size,
- cmp_r_func_t cmp_func,
- swap_r_func_t swap_func,
- const void *priv)
+static void eytzinger1_sort_r(void *base1, size_t n, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
{
- int i, j, k;
+ unsigned i, j, k;
/* called from 'sort' without swap function, let's pick the default */
if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
swap_func = NULL;
if (!swap_func) {
- if (is_aligned(base, size, 8))
+ if (is_aligned(base1, size, 8))
swap_func = SWAP_WORDS_64;
- else if (is_aligned(base, size, 4))
+ else if (is_aligned(base1, size, 4))
swap_func = SWAP_WORDS_32;
else
swap_func = SWAP_BYTES;
}
/* heapify */
- for (i = n / 2 - 1; i >= 0; --i) {
+ for (i = n / 2; i >= 1; --i) {
/* Find the sift-down path all the way to the leaves. */
- for (j = i; k = j * 2 + 1, k + 1 < n;)
- j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
+ for (j = i; k = j * 2, k < n;)
+ j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
/* Special case for the last leaf with no sibling. */
- if (j * 2 + 2 == n)
- j = j * 2 + 1;
+ if (j * 2 == n)
+ j *= 2;
/* Backtrack to the correct location. */
- while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
- j = (j - 1) / 2;
+ while (j != i && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, i, j) >= 0)
+ j /= 2;
/* Shift the element into its correct place. */
for (k = j; j != i;) {
- j = (j - 1) / 2;
- eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
+ j /= 2;
+ eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
}
}
/* sort */
- for (i = n - 1; i > 0; --i) {
- eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
+ for (i = n; i > 1; --i) {
+ eytzinger1_do_swap(base1, n, size, swap_func, priv, 1, i);
/* Find the sift-down path all the way to the leaves. */
- for (j = 0; k = j * 2 + 1, k + 1 < i;)
- j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
+ for (j = 1; k = j * 2, k + 1 < i;)
+ j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
/* Special case for the last leaf with no sibling. */
- if (j * 2 + 2 == i)
- j = j * 2 + 1;
+ if (j * 2 + 1 == i)
+ j *= 2;
/* Backtrack to the correct location. */
- while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
- j = (j - 1) / 2;
+ while (j >= 1 && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, 1, j) >= 0)
+ j /= 2;
/* Shift the element into its correct place. */
- for (k = j; j;) {
- j = (j - 1) / 2;
- eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
+ for (k = j; j > 1;) {
+ j /= 2;
+ eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
}
}
}
+void eytzinger0_sort_r(void *base, size_t n, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ void *base1 = base - size;
+
+ return eytzinger1_sort_r(base1, n, size, cmp_func, swap_func, priv);
+}
+
void eytzinger0_sort(void *base, size_t n, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
index 0541192d7bc0..643c1f716061 100644
--- a/fs/bcachefs/eytzinger.h
+++ b/fs/bcachefs/eytzinger.h
@@ -6,6 +6,7 @@
#include <linux/log2.h>
#ifdef EYTZINGER_DEBUG
+#include <linux/bug.h>
#define EYTZINGER_BUG_ON(cond) BUG_ON(cond)
#else
#define EYTZINGER_BUG_ON(cond)
@@ -56,24 +57,14 @@ static inline unsigned eytzinger1_last(unsigned size)
return rounddown_pow_of_two(size + 1) - 1;
}
-/*
- * eytzinger1_next() and eytzinger1_prev() have the nice properties that
- *
- * eytzinger1_next(0) == eytzinger1_first())
- * eytzinger1_prev(0) == eytzinger1_last())
- *
- * eytzinger1_prev(eytzinger1_first()) == 0
- * eytzinger1_next(eytzinger1_last()) == 0
- */
-
static inline unsigned eytzinger1_next(unsigned i, unsigned size)
{
- EYTZINGER_BUG_ON(i > size);
+ EYTZINGER_BUG_ON(i == 0 || i > size);
if (eytzinger1_right_child(i) <= size) {
i = eytzinger1_right_child(i);
- i <<= __fls(size + 1) - __fls(i);
+ i <<= __fls(size) - __fls(i);
i >>= i > size;
} else {
i >>= ffz(i) + 1;
@@ -84,12 +75,12 @@ static inline unsigned eytzinger1_next(unsigned i, unsigned size)
static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
{
- EYTZINGER_BUG_ON(i > size);
+ EYTZINGER_BUG_ON(i == 0 || i > size);
if (eytzinger1_left_child(i) <= size) {
i = eytzinger1_left_child(i) + 1;
- i <<= __fls(size + 1) - __fls(i);
+ i <<= __fls(size) - __fls(i);
i -= 1;
i >>= i > size;
} else {
@@ -243,73 +234,63 @@ static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
(_i) != -1; \
(_i) = eytzinger0_next((_i), (_size)))
+#define eytzinger0_for_each_prev(_i, _size) \
+ for (unsigned (_i) = eytzinger0_last((_size)); \
+ (_i) != -1; \
+ (_i) = eytzinger0_prev((_i), (_size)))
+
/* return greatest node <= @search, or -1 if not found */
static inline int eytzinger0_find_le(void *base, size_t nr, size_t size,
cmp_func_t cmp, const void *search)
{
- unsigned i, n = 0;
-
- if (!nr)
- return -1;
-
- do {
- i = n;
- n = eytzinger0_child(i, cmp(base + i * size, search) <= 0);
- } while (n < nr);
-
- if (n & 1) {
- /*
- * @i was greater than @search, return previous node:
- *
- * if @i was leftmost/smallest element,
- * eytzinger0_prev(eytzinger0_first())) returns -1, as expected
- */
- return eytzinger0_prev(i, nr);
- } else {
- return i;
- }
+ void *base1 = base - size;
+ unsigned n = 1;
+
+ while (n <= nr)
+ n = eytzinger1_child(n, cmp(base1 + n * size, search) <= 0);
+ n >>= __ffs(n) + 1;
+ return n - 1;
}
+/* return smallest node > @search, or -1 if not found */
static inline int eytzinger0_find_gt(void *base, size_t nr, size_t size,
cmp_func_t cmp, const void *search)
{
- ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
+ void *base1 = base - size;
+ unsigned n = 1;
- /*
- * if eytitzinger0_find_le() returned -1 - no element was <= search - we
- * want to return the first element; next/prev identities mean this work
- * as expected
- *
- * similarly if find_le() returns last element, we should return -1;
- * identities mean this all works out:
- */
- return eytzinger0_next(idx, nr);
+ while (n <= nr)
+ n = eytzinger1_child(n, cmp(base1 + n * size, search) <= 0);
+ n >>= __ffs(n + 1) + 1;
+ return n - 1;
}
+/* return smallest node >= @search, or -1 if not found */
static inline int eytzinger0_find_ge(void *base, size_t nr, size_t size,
cmp_func_t cmp, const void *search)
{
- ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
-
- if (idx < nr && !cmp(base + idx * size, search))
- return idx;
+ void *base1 = base - size;
+ unsigned n = 1;
- return eytzinger0_next(idx, nr);
+ while (n <= nr)
+ n = eytzinger1_child(n, cmp(base1 + n * size, search) < 0);
+ n >>= __ffs(n + 1) + 1;
+ return n - 1;
}
#define eytzinger0_find(base, nr, size, _cmp, search) \
({ \
- void *_base = (base); \
+ size_t _size = (size); \
+ void *_base1 = (void *)(base) - _size; \
const void *_search = (search); \
size_t _nr = (nr); \
- size_t _size = (size); \
- size_t _i = 0; \
+ size_t _i = 1; \
int _res; \
\
- while (_i < _nr && \
- (_res = _cmp(_search, _base + _i * _size))) \
- _i = eytzinger0_child(_i, _res > 0); \
- _i; \
+ while (_i <= _nr && \
+ (_res = _cmp(_search, _base1 + _i * _size))) \
+ _i = eytzinger1_child(_i, _res > 0); \
+ _i - 1; \
})
void eytzinger0_sort_r(void *, size_t, size_t,
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index ab1d5db2fa56..5ab1c73c8d4c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -110,11 +110,21 @@ static int readpage_bio_extend(struct btree_trans *trans,
if (!get_more)
break;
+ unsigned sectors_remaining = sectors_this_extent - bio_sectors(bio);
+
+ if (sectors_remaining < PAGE_SECTORS << mapping_min_folio_order(iter->mapping))
+ break;
+
+ unsigned order = ilog2(rounddown_pow_of_two(sectors_remaining) / PAGE_SECTORS);
+
+ /* ensure proper alignment */
+ order = min(order, __ffs(folio_offset|BIT(31)));
+
folio = xa_load(&iter->mapping->i_pages, folio_offset);
if (folio && !xa_is_value(folio))
break;
- folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), order);
if (!folio)
break;
@@ -149,12 +159,10 @@ static void bchfs_read(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_buf sk;
- int flags = BCH_READ_RETRY_IF_STALE|
- BCH_READ_MAY_PROMOTE;
+ int flags = BCH_READ_retry_if_stale|
+ BCH_READ_may_promote;
int ret = 0;
- rbio->c = c;
- rbio->start_time = local_clock();
rbio->subvol = inum.subvol;
bch2_bkey_buf_init(&sk);
@@ -211,14 +219,14 @@ static void bchfs_read(struct btree_trans *trans,
swap(rbio->bio.bi_iter.bi_size, bytes);
if (rbio->bio.bi_iter.bi_size == bytes)
- flags |= BCH_READ_LAST_FRAGMENT;
+ flags |= BCH_READ_last_fragment;
bch2_bio_page_state_set(&rbio->bio, k);
bch2_read_extent(trans, rbio, iter.pos,
data_btree, k, offset_into_extent, flags);
- if (flags & BCH_READ_LAST_FRAGMENT)
+ if (flags & BCH_READ_last_fragment)
break;
swap(rbio->bio.bi_iter.bi_size, bytes);
@@ -232,7 +240,8 @@ err:
if (ret) {
struct printbuf buf = PRINTBUF;
- bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9);
+ lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9));
prt_printf(&buf, "read error %i from btree lookup", ret);
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
@@ -280,12 +289,13 @@ void bch2_readahead(struct readahead_control *ractl)
struct bch_read_bio *rbio =
rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
GFP_KERNEL, &c->bio_read),
- opts);
+ c,
+ opts,
+ bch2_readpages_end_io);
readpage_iter_advance(&readpages_iter);
rbio->bio.bi_iter.bi_sector = folio_sector(folio);
- rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
bchfs_read(trans, rbio, inode_inum(inode),
@@ -323,10 +333,10 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
- opts);
+ c,
+ opts,
+ bch2_read_single_folio_end_io);
rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
-
rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
rbio->bio.bi_iter.bi_sector = folio_sector(folio);
BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
@@ -420,7 +430,7 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
}
}
- if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
+ if (io->op.flags & BCH_WRITE_wrote_data_inline) {
bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 2089c36b5866..535bc5fcbcc0 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -73,6 +73,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
struct blk_plug plug;
loff_t offset = req->ki_pos;
bool sync = is_sync_kiocb(req);
+ bool split = false;
size_t shorten;
ssize_t ret;
@@ -99,8 +100,6 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
GFP_KERNEL,
&c->dio_read_bioset);
- bio->bi_end_io = bch2_direct_IO_read_endio;
-
dio = container_of(bio, struct dio_read, rbio.bio);
closure_init(&dio->cl, NULL);
@@ -133,12 +132,13 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
goto start;
while (iter->count) {
+ split = true;
+
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_READ,
GFP_KERNEL,
&c->bio_read);
- bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
bio->bi_opf = REQ_OP_READ|REQ_SYNC;
bio->bi_iter.bi_sector = offset >> 9;
@@ -160,7 +160,15 @@ start:
if (iter->count)
closure_get(&dio->cl);
- bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
+ struct bch_read_bio *rbio =
+ rbio_init(bio,
+ c,
+ opts,
+ split
+ ? bch2_direct_IO_read_split_endio
+ : bch2_direct_IO_read_endio);
+
+ bch2_read(c, rbio, inode_inum(inode));
}
blk_finish_plug(&plug);
@@ -511,8 +519,8 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
dio->op.devs_need_flush = &inode->ei_devs_need_flush;
if (sync)
- dio->op.flags |= BCH_WRITE_SYNC;
- dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+ dio->op.flags |= BCH_WRITE_sync;
+ dio->op.flags |= BCH_WRITE_check_enospc;
ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
bio_sectors(bio), true);
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 15725b4ce393..f45054cee746 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -5,8 +5,8 @@
#include "chardev.h"
#include "dirent.h"
#include "fs.h"
-#include "fs-common.h"
#include "fs-ioctl.h"
+#include "namei.h"
#include "quota.h"
#include <linux/compat.h>
@@ -54,6 +54,32 @@ static int bch2_inode_flags_set(struct btree_trans *trans,
(newflags & (BCH_INODE_nodump|BCH_INODE_noatime)) != newflags)
return -EINVAL;
+ if ((newflags ^ oldflags) & BCH_INODE_casefolded) {
+#ifdef CONFIG_UNICODE
+ int ret = 0;
+ /* Not supported on individual files. */
+ if (!S_ISDIR(bi->bi_mode))
+ return -EOPNOTSUPP;
+
+ /*
+ * Make sure the dir is empty, as otherwise we'd need to
+ * rehash everything and update the dirent keys.
+ */
+ ret = bch2_empty_dir_trans(trans, inode_inum(inode));
+ if (ret < 0)
+ return ret;
+
+ ret = bch2_request_incompat_feature(c,bcachefs_metadata_version_casefolding);
+ if (ret)
+ return ret;
+
+ bch2_check_set_feature(c, BCH_FEATURE_casefolding);
+#else
+ printk(KERN_ERR "Cannot use casefolding on a kernel without CONFIG_UNICODE\n");
+ return -EOPNOTSUPP;
+#endif
+ }
+
if (s->set_projinherit) {
bi->bi_fields_set &= ~(1 << Inode_opt_project);
bi->bi_fields_set |= ((int) s->projinherit << Inode_opt_project);
@@ -218,7 +244,7 @@ static int bch2_ioc_reinherit_attrs(struct bch_fs *c,
int ret = 0;
subvol_inum inum;
- kname = kmalloc(BCH_NAME_MAX + 1, GFP_KERNEL);
+ kname = kmalloc(BCH_NAME_MAX, GFP_KERNEL);
if (!kname)
return -ENOMEM;
@@ -511,10 +537,6 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
ret = -EXDEV;
goto err;
}
- if (!d_is_positive(victim)) {
- ret = -ENOENT;
- goto err;
- }
ret = __bch2_unlink(dir, victim, true);
if (!ret) {
fsnotify_rmdir(dir, victim);
diff --git a/fs/bcachefs/fs-ioctl.h b/fs/bcachefs/fs-ioctl.h
index d30f9bb056fd..ecd3bfdcde21 100644
--- a/fs/bcachefs/fs-ioctl.h
+++ b/fs/bcachefs/fs-ioctl.h
@@ -6,19 +6,21 @@
/* bcachefs inode flags -> vfs inode flags: */
static const __maybe_unused unsigned bch_flags_to_vfs[] = {
- [__BCH_INODE_sync] = S_SYNC,
- [__BCH_INODE_immutable] = S_IMMUTABLE,
- [__BCH_INODE_append] = S_APPEND,
- [__BCH_INODE_noatime] = S_NOATIME,
+ [__BCH_INODE_sync] = S_SYNC,
+ [__BCH_INODE_immutable] = S_IMMUTABLE,
+ [__BCH_INODE_append] = S_APPEND,
+ [__BCH_INODE_noatime] = S_NOATIME,
+ [__BCH_INODE_casefolded] = S_CASEFOLD,
};
/* bcachefs inode flags -> FS_IOC_GETFLAGS: */
static const __maybe_unused unsigned bch_flags_to_uflags[] = {
- [__BCH_INODE_sync] = FS_SYNC_FL,
- [__BCH_INODE_immutable] = FS_IMMUTABLE_FL,
- [__BCH_INODE_append] = FS_APPEND_FL,
- [__BCH_INODE_nodump] = FS_NODUMP_FL,
- [__BCH_INODE_noatime] = FS_NOATIME_FL,
+ [__BCH_INODE_sync] = FS_SYNC_FL,
+ [__BCH_INODE_immutable] = FS_IMMUTABLE_FL,
+ [__BCH_INODE_append] = FS_APPEND_FL,
+ [__BCH_INODE_nodump] = FS_NODUMP_FL,
+ [__BCH_INODE_noatime] = FS_NOATIME_FL,
+ [__BCH_INODE_casefolded] = FS_CASEFOLD_FL,
};
/* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 90ade8f648d9..c88c149d5de5 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -11,7 +11,6 @@
#include "errcode.h"
#include "extents.h"
#include "fs.h"
-#include "fs-common.h"
#include "fs-io.h"
#include "fs-ioctl.h"
#include "fs-io-buffered.h"
@@ -22,6 +21,7 @@
#include "io_read.h"
#include "journal.h"
#include "keylist.h"
+#include "namei.h"
#include "quota.h"
#include "rebalance.h"
#include "snapshot.h"
@@ -641,7 +641,9 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
if (ret)
return ERR_PTR(ret);
- ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum);
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+
+ ret = bch2_dirent_read_target(trans, dir, d, &inum);
if (ret > 0)
ret = -ENOENT;
if (ret)
@@ -651,30 +653,30 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
if (inode)
goto out;
+ /*
+ * Note: if check/repair needs it, we commit before
+ * bch2_inode_hash_init_insert(), as after that point we can't take a
+ * restart - not in the top level loop with a commit_do(), like we
+ * usually do:
+ */
+
struct bch_subvolume subvol;
struct bch_inode_unpacked inode_u;
ret = bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?:
bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
+ bch2_check_dirent_target(trans, &dirent_iter, d, &inode_u, false) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol));
+ /*
+ * don't remove it: check_inodes might find another inode that points
+ * back to this dirent
+ */
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
c, "dirent to missing inode:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf));
if (ret)
goto err;
-
- /* regular files may have hardlinks: */
- if (bch2_fs_inconsistent_on(bch2_inode_should_have_single_bp(&inode_u) &&
- !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)),
- c,
- "dirent points to inode that does not point back:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k),
- prt_printf(&buf, "\n "),
- bch2_inode_unpacked_to_text(&buf, &inode_u),
- buf.buf))) {
- ret = -ENOENT;
- goto err;
- }
out:
bch2_trans_iter_exit(trans, &dirent_iter);
printbuf_exit(&buf);
@@ -698,6 +700,23 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
if (IS_ERR(inode))
inode = NULL;
+#ifdef CONFIG_UNICODE
+ if (!inode && IS_CASEFOLDED(vdir)) {
+ /*
+ * Do not cache a negative dentry in casefolded directories
+ * as it would need to be invalidated in the following situation:
+ * - Lookup file "blAH" in a casefolded directory
+ * - Creation of file "BLAH" in a casefolded directory
+ * - Lookup file "blAH" in a casefolded directory
+ * which would fail if we had a negative dentry.
+ *
+ * We should come back to this when VFS has a method to handle
+ * this edgecase.
+ */
+ return NULL;
+ }
+#endif
+
return d_splice_alias(&inode->v, dentry);
}
@@ -858,10 +877,10 @@ err:
return bch2_err_class(ret);
}
-static int bch2_mkdir(struct mnt_idmap *idmap,
- struct inode *vdir, struct dentry *dentry, umode_t mode)
+static struct dentry *bch2_mkdir(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry, umode_t mode)
{
- return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0);
+ return ERR_PTR(bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0));
}
static int bch2_rename2(struct mnt_idmap *idmap,
@@ -1802,7 +1821,8 @@ static void bch2_vfs_inode_init(struct btree_trans *trans,
break;
}
- mapping_set_large_folios(inode->v.i_mapping);
+ mapping_set_folio_min_order(inode->v.i_mapping,
+ get_order(trans->c->opts.block_size));
}
static void bch2_free_inode(struct inode *vinode)
@@ -2008,44 +2028,6 @@ static struct bch_fs *bch2_path_to_fs(const char *path)
return c ?: ERR_PTR(-ENOENT);
}
-static int bch2_remount(struct super_block *sb, int *flags,
- struct bch_opts opts)
-{
- struct bch_fs *c = sb->s_fs_info;
- int ret = 0;
-
- opt_set(opts, read_only, (*flags & SB_RDONLY) != 0);
-
- if (opts.read_only != c->opts.read_only) {
- down_write(&c->state_lock);
-
- if (opts.read_only) {
- bch2_fs_read_only(c);
-
- sb->s_flags |= SB_RDONLY;
- } else {
- ret = bch2_fs_read_write(c);
- if (ret) {
- bch_err(c, "error going rw: %i", ret);
- up_write(&c->state_lock);
- ret = -EINVAL;
- goto err;
- }
-
- sb->s_flags &= ~SB_RDONLY;
- }
-
- c->opts.read_only = opts.read_only;
-
- up_write(&c->state_lock);
- }
-
- if (opt_defined(opts, errors))
- c->opts.errors = opts.errors;
-err:
- return bch2_err_class(ret);
-}
-
static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
{
struct bch_fs *c = root->d_sb->s_fs_info;
@@ -2192,6 +2174,9 @@ static int bch2_fs_get_tree(struct fs_context *fc)
if (ret)
goto err;
+ if (opt_defined(opts, discard))
+ set_bit(BCH_FS_discard_mount_opt_set, &c->flags);
+
/* Some options can't be parsed until after the fs is started: */
opts = bch2_opts_empty();
ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf);
@@ -2200,9 +2185,10 @@ static int bch2_fs_get_tree(struct fs_context *fc)
bch2_opts_apply(&c->opts, opts);
- ret = bch2_fs_start(c);
- if (ret)
- goto err_stop_fs;
+ /*
+ * need to initialise sb and set c->vfs_sb _before_ starting fs,
+ * for blk_holder_ops
+ */
sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c);
ret = PTR_ERR_OR_ZERO(sb);
@@ -2264,6 +2250,10 @@ got_sb:
sb->s_shrink->seeks = 0;
+ ret = bch2_fs_start(c);
+ if (ret)
+ goto err_put_super;
+
vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
ret = PTR_ERR_OR_ZERO(vinode);
bch_err_msg(c, ret, "mounting: error getting root inode");
@@ -2351,8 +2341,39 @@ static int bch2_fs_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct bch2_opts_parse *opts = fc->fs_private;
+ struct bch_fs *c = sb->s_fs_info;
+ int ret = 0;
+
+ opt_set(opts->opts, read_only, (fc->sb_flags & SB_RDONLY) != 0);
- return bch2_remount(sb, &fc->sb_flags, opts->opts);
+ if (opts->opts.read_only != c->opts.read_only) {
+ down_write(&c->state_lock);
+
+ if (opts->opts.read_only) {
+ bch2_fs_read_only(c);
+
+ sb->s_flags |= SB_RDONLY;
+ } else {
+ ret = bch2_fs_read_write(c);
+ if (ret) {
+ bch_err(c, "error going rw: %i", ret);
+ up_write(&c->state_lock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sb->s_flags &= ~SB_RDONLY;
+ }
+
+ c->opts.read_only = opts->opts.read_only;
+
+ up_write(&c->state_lock);
+ }
+
+ if (opt_defined(opts->opts, errors))
+ c->opts.errors = opts->opts.errors;
+err:
+ return bch2_err_class(ret);
}
static const struct fs_context_operations bch2_context_ops = {
@@ -2396,7 +2417,7 @@ static struct file_system_type bcache_fs_type = {
.name = "bcachefs",
.init_fs_context = bch2_init_fs_context,
.kill_sb = bch2_kill_sb,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_LBS,
};
MODULE_ALIAS_FS("bcachefs");
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 0e85131d0af8..091057023fc5 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -10,10 +10,10 @@
#include "dirent.h"
#include "error.h"
#include "fs.h"
-#include "fs-common.h"
#include "fsck.h"
#include "inode.h"
#include "keylist.h"
+#include "namei.h"
#include "recovery_passes.h"
#include "snapshot.h"
#include "super.h"
@@ -23,13 +23,6 @@
#include <linux/bsearch.h>
#include <linux/dcache.h> /* struct qstr */
-static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
- struct bkey_s_c_dirent d)
-{
- return inode->bi_dir == d.k->p.inode &&
- inode->bi_dir_offset == d.k->p.offset;
-}
-
static int dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
struct bch_inode_unpacked *inode)
{
@@ -116,29 +109,6 @@ static int subvol_lookup(struct btree_trans *trans, u32 subvol,
return ret;
}
-static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
- struct bch_inode_unpacked *inode)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- int ret;
-
- for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
- BTREE_ITER_all_snapshots, k, ret) {
- if (k.k->p.offset != inode_nr)
- break;
- if (!bkey_is_inode(k.k))
- continue;
- ret = bch2_inode_unpack(k, inode);
- goto found;
- }
- ret = -BCH_ERR_ENOENT_inode;
-found:
- bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
static int lookup_inode(struct btree_trans *trans, u64 inode_nr, u32 snapshot,
struct bch_inode_unpacked *inode)
{
@@ -179,32 +149,6 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans,
return 0;
}
-static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bch_inode_unpacked dir_inode;
- struct bch_hash_info dir_hash_info;
- int ret;
-
- ret = lookup_first_inode(trans, pos.inode, &dir_inode);
- if (ret)
- goto err;
-
- dir_hash_info = bch2_hash_info_init(c, &dir_inode);
-
- bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
-
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
- &dir_hash_info, &iter,
- BTREE_UPDATE_internal_snapshot_node);
- bch2_trans_iter_exit(trans, &iter);
-err:
- bch_err_fn(c, ret);
- return ret;
-}
-
/*
* Find any subvolume associated with a tree of snapshots
* We can't rely on master_subvol - it might have been deleted.
@@ -548,7 +492,7 @@ static int remove_backpointer(struct btree_trans *trans,
SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot));
int ret = bkey_err(d) ?:
dirent_points_to_inode(c, d, inode) ?:
- __remove_dirent(trans, d.k->p);
+ bch2_fsck_remove_dirent(trans, d.k->p);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -1985,169 +1929,6 @@ static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_wa
trans_was_restarted(trans, restart_count);
}
-noinline_for_stack
-static int check_dirent_inode_dirent(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *target)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
- struct btree_iter bp_iter = { NULL };
- int ret = 0;
-
- if (inode_points_to_dirent(target, d))
- return 0;
-
- if (!target->bi_dir &&
- !target->bi_dir_offset) {
- fsck_err_on(S_ISDIR(target->bi_mode),
- trans, inode_dir_missing_backpointer,
- "directory with missing backpointer\n%s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n"),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf));
-
- fsck_err_on(target->bi_flags & BCH_INODE_unlinked,
- trans, inode_unlinked_but_has_dirent,
- "inode unlinked but has dirent\n%s",
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n"),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf));
-
- target->bi_flags &= ~BCH_INODE_unlinked;
- target->bi_dir = d.k->p.inode;
- target->bi_dir_offset = d.k->p.offset;
- return __bch2_fsck_write_inode(trans, target);
- }
-
- if (bch2_inode_should_have_single_bp(target) &&
- !fsck_err(trans, inode_wrong_backpointer,
- "dirent points to inode that does not point back:\n %s",
- (bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n "),
- bch2_inode_unpacked_to_text(&buf, target),
- buf.buf)))
- goto err;
-
- struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
- SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot));
- ret = bkey_err(bp_dirent);
- if (ret && !bch2_err_matches(ret, ENOENT))
- goto err;
-
- bool backpointer_exists = !ret;
- ret = 0;
-
- if (fsck_err_on(!backpointer_exists,
- trans, inode_wrong_backpointer,
- "inode %llu:%u has wrong backpointer:\n"
- "got %llu:%llu\n"
- "should be %llu:%llu",
- target->bi_inum, target->bi_snapshot,
- target->bi_dir,
- target->bi_dir_offset,
- d.k->p.inode,
- d.k->p.offset)) {
- target->bi_dir = d.k->p.inode;
- target->bi_dir_offset = d.k->p.offset;
- ret = __bch2_fsck_write_inode(trans, target);
- goto out;
- }
-
- bch2_bkey_val_to_text(&buf, c, d.s_c);
- prt_newline(&buf);
- if (backpointer_exists)
- bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
-
- if (fsck_err_on(backpointer_exists &&
- (S_ISDIR(target->bi_mode) ||
- target->bi_subvol),
- trans, inode_dir_multiple_links,
- "%s %llu:%u with multiple links\n%s",
- S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
- target->bi_inum, target->bi_snapshot, buf.buf)) {
- ret = __remove_dirent(trans, d.k->p);
- goto out;
- }
-
- /*
- * hardlinked file with nlink 0:
- * We're just adjusting nlink here so check_nlinks() will pick
- * it up, it ignores inodes with nlink 0
- */
- if (fsck_err_on(backpointer_exists && !target->bi_nlink,
- trans, inode_multiple_links_but_nlink_0,
- "inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
- target->bi_inum, target->bi_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
- target->bi_nlink++;
- target->bi_flags &= ~BCH_INODE_unlinked;
- ret = __bch2_fsck_write_inode(trans, target);
- if (ret)
- goto err;
- }
-out:
-err:
-fsck_err:
- bch2_trans_iter_exit(trans, &bp_iter);
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
-noinline_for_stack
-static int check_dirent_target(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_s_c_dirent d,
- struct bch_inode_unpacked *target)
-{
- struct bch_fs *c = trans->c;
- struct bkey_i_dirent *n;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- ret = check_dirent_inode_dirent(trans, iter, d, target);
- if (ret)
- goto err;
-
- if (fsck_err_on(d.v->d_type != inode_d_type(target),
- trans, dirent_d_type_wrong,
- "incorrect d_type: got %s, should be %s:\n%s",
- bch2_d_type_str(d.v->d_type),
- bch2_d_type_str(inode_d_type(target)),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
- n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
- ret = PTR_ERR_OR_ZERO(n);
- if (ret)
- goto err;
-
- bkey_reassemble(&n->k_i, d.s_c);
- n->v.d_type = inode_d_type(target);
- if (n->v.d_type == DT_SUBVOL) {
- n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
- n->v.d_child_subvol = cpu_to_le32(target->bi_subvol);
- } else {
- n->v.d_inum = cpu_to_le64(target->bi_inum);
- }
-
- ret = bch2_trans_update(trans, iter, &n->k_i, 0);
- if (ret)
- goto err;
-
- d = dirent_i_to_s_c(n);
- }
-err:
-fsck_err:
- printbuf_exit(&buf);
- bch_err_fn(c, ret);
- return ret;
-}
-
/* find a subvolume that's a descendent of @snapshot: */
static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
{
@@ -2247,7 +2028,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
if (fsck_err(trans, dirent_to_missing_subvol,
"dirent points to missing subvolume\n%s",
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
- return __remove_dirent(trans, d.k->p);
+ return bch2_fsck_remove_dirent(trans, d.k->p);
ret = 0;
goto out;
}
@@ -2291,7 +2072,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
goto err;
}
- ret = check_dirent_target(trans, iter, d, &subvol_root);
+ ret = bch2_check_dirent_target(trans, iter, d, &subvol_root, true);
if (ret)
goto err;
out:
@@ -2378,13 +2159,13 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
- ret = __remove_dirent(trans, d.k->p);
+ ret = bch2_fsck_remove_dirent(trans, d.k->p);
if (ret)
goto err;
}
darray_for_each(target->inodes, i) {
- ret = check_dirent_target(trans, iter, d, &i->inode);
+ ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true);
if (ret)
goto err;
}
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 04ec05206f8c..80051073f613 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -731,10 +731,9 @@ int bch2_trigger_inode(struct btree_trans *trans,
bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
}
- s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
- if ((flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) && nr) {
- struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_nr_inodes };
- int ret = bch2_disk_accounting_mod(trans, &acc, &nr, 1, flags & BTREE_TRIGGER_gc);
+ s64 nr[1] = { bkey_is_inode(new.k) - bkey_is_inode(old.k) };
+ if ((flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) && nr[0]) {
+ int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, nr, nr_inodes);
if (ret)
return ret;
}
@@ -868,19 +867,6 @@ void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
uid, gid, mode, rdev, parent);
}
-static inline u32 bkey_generation(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_inode:
- case KEY_TYPE_inode_v2:
- BUG();
- case KEY_TYPE_inode_generation:
- return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
- default:
- return 0;
- }
-}
-
static struct bkey_i_inode_alloc_cursor *
bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *max)
{
@@ -1092,7 +1078,7 @@ retry:
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum.inum, snapshot);
- ret = -EIO;
+ ret = -BCH_ERR_ENOENT_inode;
goto err;
}
@@ -1198,6 +1184,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
opts->_name##_from_inode = true; \
} else { \
opts->_name = c->opts._name; \
+ opts->_name##_from_inode = false; \
}
BCH_INODE_OPTS()
#undef x
@@ -1255,7 +1242,7 @@ retry:
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum, snapshot);
- ret = -EIO;
+ ret = -BCH_ERR_ENOENT_inode;
goto err;
}
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index 428b9be6af34..f82cfbf460d0 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -277,6 +277,7 @@ static inline bool bch2_inode_should_have_single_bp(struct bch_inode_unpacked *i
bool inode_has_bp = inode->bi_dir || inode->bi_dir_offset;
return S_ISDIR(inode->bi_mode) ||
+ inode->bi_subvol ||
(!inode->bi_nlink && inode_has_bp);
}
diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h
index b99a5bf1a75e..117110af1e3f 100644
--- a/fs/bcachefs/inode_format.h
+++ b/fs/bcachefs/inode_format.h
@@ -137,7 +137,8 @@ enum inode_opt_id {
x(i_sectors_dirty, 6) \
x(unlinked, 7) \
x(backptr_untrusted, 8) \
- x(has_child_snapshot, 9)
+ x(has_child_snapshot, 9) \
+ x(casefolded, 10)
/* bits 20+ reserved for packed fields below: */
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 5353979117b0..6b842c8d21be 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -115,7 +115,8 @@ err:
bch2_increment_clock(c, sectors_allocated, WRITE);
if (should_print_err(ret)) {
struct printbuf buf = PRINTBUF;
- bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9);
+ lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9));
prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret));
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 8c7b2d3d779d..f1503df57dc7 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -25,8 +25,15 @@
#include "subvolume.h"
#include "trace.h"
+#include <linux/random.h>
#include <linux/sched/mm.h>
+#ifdef CONFIG_BCACHEFS_DEBUG
+static unsigned bch2_read_corrupt_ratio;
+module_param_named(read_corrupt_ratio, bch2_read_corrupt_ratio, uint, 0644);
+MODULE_PARM_DESC(read_corrupt_ratio, "");
+#endif
+
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
static bool bch2_target_congested(struct bch_fs *c, u16 target)
@@ -59,7 +66,7 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
}
rcu_read_unlock();
- return bch2_rand_range(nr * CONGESTED_MAX) < total;
+ return get_random_u32_below(nr * CONGESTED_MAX) < total;
}
#else
@@ -80,6 +87,7 @@ struct promote_op {
struct rhash_head hash;
struct bpos pos;
+ struct work_struct work;
struct data_update write;
struct bio_vec bi_inline_vecs[]; /* must be last */
};
@@ -96,6 +104,33 @@ static inline bool have_io_error(struct bch_io_failures *failed)
return failed && failed->nr;
}
+static inline struct data_update *rbio_data_update(struct bch_read_bio *rbio)
+{
+ EBUG_ON(rbio->split);
+
+ return rbio->data_update
+ ? container_of(rbio, struct data_update, rbio)
+ : NULL;
+}
+
+static bool ptr_being_rewritten(struct bch_read_bio *orig, unsigned dev)
+{
+ struct data_update *u = rbio_data_update(orig);
+ if (!u)
+ return false;
+
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(u->k.k));
+ unsigned i = 0;
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (ptr->dev == dev &&
+ u->data_opts.rewrite_ptrs & BIT(i))
+ return true;
+ i++;
+ }
+
+ return false;
+}
+
static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
struct bpos pos,
struct bch_io_opts opts,
@@ -105,7 +140,7 @@ static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
if (!have_io_error(failed)) {
BUG_ON(!opts.promote_target);
- if (!(flags & BCH_READ_MAY_PROMOTE))
+ if (!(flags & BCH_READ_may_promote))
return -BCH_ERR_nopromote_may_not;
if (bch2_bkey_has_target(c, k, opts.promote_target))
@@ -125,98 +160,93 @@ static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
return 0;
}
-static void promote_free(struct bch_fs *c, struct promote_op *op)
+static noinline void promote_free(struct bch_read_bio *rbio)
{
- int ret;
+ struct promote_op *op = container_of(rbio, struct promote_op, write.rbio);
+ struct bch_fs *c = rbio->c;
+
+ int ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
+ bch_promote_params);
+ BUG_ON(ret);
bch2_data_update_exit(&op->write);
- ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
- bch_promote_params);
- BUG_ON(ret);
bch2_write_ref_put(c, BCH_WRITE_REF_promote);
kfree_rcu(op, rcu);
}
static void promote_done(struct bch_write_op *wop)
{
- struct promote_op *op =
- container_of(wop, struct promote_op, write.op);
- struct bch_fs *c = op->write.op.c;
+ struct promote_op *op = container_of(wop, struct promote_op, write.op);
+ struct bch_fs *c = op->write.rbio.c;
- bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
- op->start_time);
- promote_free(c, op);
+ bch2_time_stats_update(&c->times[BCH_TIME_data_promote], op->start_time);
+ promote_free(&op->write.rbio);
}
-static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
+static void promote_start_work(struct work_struct *work)
{
- struct bio *bio = &op->write.op.wbio.bio;
+ struct promote_op *op = container_of(work, struct promote_op, work);
- trace_and_count(op->write.op.c, read_promote, &rbio->bio);
+ bch2_data_update_read_done(&op->write);
+}
- /* we now own pages: */
- BUG_ON(!rbio->bounce);
- BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
+static noinline void promote_start(struct bch_read_bio *rbio)
+{
+ struct promote_op *op = container_of(rbio, struct promote_op, write.rbio);
- memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
- sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
- swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
+ trace_and_count(op->write.op.c, io_read_promote, &rbio->bio);
- bch2_data_update_read_done(&op->write, rbio->pick.crc);
+ INIT_WORK(&op->work, promote_start_work);
+ queue_work(rbio->c->write_ref_wq, &op->work);
}
-static struct promote_op *__promote_alloc(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bkey_s_c k,
- struct bpos pos,
- struct extent_ptr_decoded *pick,
- struct bch_io_opts opts,
- unsigned sectors,
- struct bch_read_bio **rbio,
- struct bch_io_failures *failed)
+static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bkey_s_c k,
+ struct bpos pos,
+ struct extent_ptr_decoded *pick,
+ unsigned sectors,
+ struct bch_read_bio *orig,
+ struct bch_io_failures *failed)
{
struct bch_fs *c = trans->c;
- struct promote_op *op = NULL;
- struct bio *bio;
- unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
int ret;
- if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
- return ERR_PTR(-BCH_ERR_nopromote_no_writes);
+ struct data_update_opts update_opts = { .write_flags = BCH_WRITE_alloc_nowait };
- op = kzalloc(struct_size(op, bi_inline_vecs, pages), GFP_KERNEL);
- if (!op) {
- ret = -BCH_ERR_nopromote_enomem;
- goto err;
- }
+ if (!have_io_error(failed)) {
+ update_opts.target = orig->opts.promote_target;
+ update_opts.extra_replicas = 1;
+ update_opts.write_flags |= BCH_WRITE_cached;
+ update_opts.write_flags |= BCH_WRITE_only_specified_devs;
+ } else {
+ update_opts.target = orig->opts.foreground_target;
- op->start_time = local_clock();
- op->pos = pos;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ unsigned ptr_bit = 1;
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (bch2_dev_io_failures(failed, ptr->dev) &&
+ !ptr_being_rewritten(orig, ptr->dev))
+ update_opts.rewrite_ptrs |= ptr_bit;
+ ptr_bit <<= 1;
+ }
- /*
- * We don't use the mempool here because extents that aren't
- * checksummed or compressed can be too big for the mempool:
- */
- *rbio = kzalloc(sizeof(struct bch_read_bio) +
- sizeof(struct bio_vec) * pages,
- GFP_KERNEL);
- if (!*rbio) {
- ret = -BCH_ERR_nopromote_enomem;
- goto err;
+ if (!update_opts.rewrite_ptrs)
+ return NULL;
}
- rbio_init(&(*rbio)->bio, opts);
- bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
+ return ERR_PTR(-BCH_ERR_nopromote_no_writes);
- if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
+ struct promote_op *op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
ret = -BCH_ERR_nopromote_enomem;
- goto err;
+ goto err_put;
}
- (*rbio)->bounce = true;
- (*rbio)->split = true;
- (*rbio)->kmalloc = true;
+ op->start_time = local_clock();
+ op->pos = pos;
if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
bch_promote_params)) {
@@ -224,64 +254,43 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
goto err;
}
- bio = &op->write.op.wbio.bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
-
- struct data_update_opts update_opts = {};
-
- if (!have_io_error(failed)) {
- update_opts.target = opts.promote_target;
- update_opts.extra_replicas = 1;
- update_opts.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED;
- } else {
- update_opts.target = opts.foreground_target;
-
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- unsigned ptr_bit = 1;
- bkey_for_each_ptr(ptrs, ptr) {
- if (bch2_dev_io_failures(failed, ptr->dev))
- update_opts.rewrite_ptrs |= ptr_bit;
- ptr_bit <<= 1;
- }
- }
-
ret = bch2_data_update_init(trans, NULL, NULL, &op->write,
writepoint_hashed((unsigned long) current),
- opts,
+ &orig->opts,
update_opts,
btree_id, k);
/*
* possible errors: -BCH_ERR_nocow_lock_blocked,
* -BCH_ERR_ENOSPC_disk_reservation:
*/
- if (ret) {
- BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
- bch_promote_params));
- goto err;
- }
+ if (ret)
+ goto err_remove_hash;
+ rbio_init_fragment(&op->write.rbio.bio, orig);
+ op->write.rbio.bounce = true;
+ op->write.rbio.promote = true;
op->write.op.end_io = promote_done;
- return op;
+ return &op->write.rbio;
+err_remove_hash:
+ BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
+ bch_promote_params));
err:
- if (*rbio)
- bio_free_pages(&(*rbio)->bio);
- kfree(*rbio);
- *rbio = NULL;
+ bio_free_pages(&op->write.op.wbio.bio);
/* We may have added to the rhashtable and thus need rcu freeing: */
kfree_rcu(op, rcu);
+err_put:
bch2_write_ref_put(c, BCH_WRITE_REF_promote);
return ERR_PTR(ret);
}
noinline
-static struct promote_op *promote_alloc(struct btree_trans *trans,
+static struct bch_read_bio *promote_alloc(struct btree_trans *trans,
struct bvec_iter iter,
struct bkey_s_c k,
struct extent_ptr_decoded *pick,
- struct bch_io_opts opts,
unsigned flags,
- struct bch_read_bio **rbio,
+ struct bch_read_bio *orig,
bool *bounce,
bool *read_full,
struct bch_io_failures *failed)
@@ -301,18 +310,21 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
struct bpos pos = promote_full
? bkey_start_pos(k.k)
: POS(k.k->p.inode, iter.bi_sector);
- struct promote_op *promote;
int ret;
- ret = should_promote(c, k, pos, opts, flags, failed);
+ ret = should_promote(c, k, pos, orig->opts, flags, failed);
if (ret)
goto nopromote;
- promote = __promote_alloc(trans,
- k.k->type == KEY_TYPE_reflink_v
- ? BTREE_ID_reflink
- : BTREE_ID_extents,
- k, pos, pick, opts, sectors, rbio, failed);
+ struct bch_read_bio *promote =
+ __promote_alloc(trans,
+ k.k->type == KEY_TYPE_reflink_v
+ ? BTREE_ID_reflink
+ : BTREE_ID_extents,
+ k, pos, pick, sectors, orig, failed);
+ if (!promote)
+ return NULL;
+
ret = PTR_ERR_OR_ZERO(promote);
if (ret)
goto nopromote;
@@ -321,7 +333,7 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
*read_full = promote_full;
return promote;
nopromote:
- trace_read_nopromote(c, ret);
+ trace_io_read_nopromote(c, ret);
return NULL;
}
@@ -330,9 +342,17 @@ nopromote:
static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *out,
struct bch_read_bio *rbio, struct bpos read_pos)
{
- return bch2_inum_offset_err_msg_trans(trans, out,
- (subvol_inum) { rbio->subvol, read_pos.inode },
- read_pos.offset << 9);
+ int ret = lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, out,
+ (subvol_inum) { rbio->subvol, read_pos.inode },
+ read_pos.offset << 9));
+ if (ret)
+ return ret;
+
+ if (rbio->data_update)
+ prt_str(out, "(internal move) ");
+
+ return 0;
}
static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out,
@@ -341,10 +361,6 @@ static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out,
bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos));
}
-#define READ_RETRY_AVOID 1
-#define READ_RETRY 2
-#define READ_ERR 3
-
enum rbio_context {
RBIO_CONTEXT_NULL,
RBIO_CONTEXT_HIGHPRI,
@@ -375,20 +391,25 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
{
BUG_ON(rbio->bounce && !rbio->split);
- if (rbio->promote)
- promote_free(rbio->c, rbio->promote);
- rbio->promote = NULL;
-
- if (rbio->bounce)
- bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
+ if (rbio->have_ioref) {
+ struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev);
+ percpu_ref_put(&ca->io_ref);
+ }
if (rbio->split) {
struct bch_read_bio *parent = rbio->parent;
- if (rbio->kmalloc)
- kfree(rbio);
- else
+ if (unlikely(rbio->promote)) {
+ if (!rbio->bio.bi_status)
+ promote_start(rbio);
+ else
+ promote_free(rbio);
+ } else {
+ if (rbio->bounce)
+ bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
+
bio_put(&rbio->bio);
+ }
rbio = parent;
}
@@ -408,61 +429,49 @@ static void bch2_rbio_done(struct bch_read_bio *rbio)
bio_endio(&rbio->bio);
}
-static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
- struct bvec_iter bvec_iter,
- struct bch_io_failures *failed,
- unsigned flags)
+static noinline int bch2_read_retry_nodecode(struct btree_trans *trans,
+ struct bch_read_bio *rbio,
+ struct bvec_iter bvec_iter,
+ struct bch_io_failures *failed,
+ unsigned flags)
{
- struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter;
- struct bkey_buf sk;
- struct bkey_s_c k;
- int ret;
-
- flags &= ~BCH_READ_LAST_FRAGMENT;
- flags |= BCH_READ_MUST_CLONE;
-
- bch2_bkey_buf_init(&sk);
-
- bch2_trans_iter_init(trans, &iter, rbio->data_btree,
- rbio->read_pos, BTREE_ITER_slots);
+ struct data_update *u = container_of(rbio, struct data_update, rbio);
retry:
bch2_trans_begin(trans);
- rbio->bio.bi_status = 0;
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = lockrestart_do(trans,
+ bkey_err(k = bch2_bkey_get_iter(trans, &iter,
+ u->btree_id, bkey_start_pos(&u->k.k->k),
+ 0)));
if (ret)
goto err;
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
-
- if (!bch2_bkey_matches_ptr(c, k,
- rbio->pick.ptr,
- rbio->data_pos.offset -
- rbio->pick.crc.offset)) {
+ if (!bkey_and_val_eq(k, bkey_i_to_s_c(u->k.k))) {
/* extent we wanted to read no longer exists: */
- rbio->hole = true;
- goto out;
+ rbio->ret = -BCH_ERR_data_read_key_overwritten;
+ goto err;
}
ret = __bch2_read_extent(trans, rbio, bvec_iter,
- rbio->read_pos,
- rbio->data_btree,
- k, 0, failed, flags);
- if (ret == READ_RETRY)
- goto retry;
- if (ret)
- goto err;
-out:
- bch2_rbio_done(rbio);
- bch2_trans_iter_exit(trans, &iter);
- bch2_trans_put(trans);
- bch2_bkey_buf_exit(&sk, c);
- return;
+ bkey_start_pos(&u->k.k->k),
+ u->btree_id,
+ bkey_i_to_s_c(u->k.k),
+ 0, failed, flags, -1);
err:
- rbio->bio.bi_status = BLK_STS_IOERR;
- goto out;
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_data_read_retry))
+ goto retry;
+
+ if (ret) {
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ rbio->ret = ret;
+ }
+
+ BUG_ON(atomic_read(&rbio->bio.__bi_remaining) != 1);
+ return ret;
}
static void bch2_rbio_retry(struct work_struct *work)
@@ -477,45 +486,80 @@ static void bch2_rbio_retry(struct work_struct *work)
.inum = rbio->read_pos.inode,
};
struct bch_io_failures failed = { .nr = 0 };
+ struct btree_trans *trans = bch2_trans_get(c);
- trace_and_count(c, read_retry, &rbio->bio);
+ trace_io_read_retry(&rbio->bio);
+ this_cpu_add(c->counters[BCH_COUNTER_io_read_retry],
+ bvec_iter_sectors(rbio->bvec_iter));
- if (rbio->retry == READ_RETRY_AVOID)
- bch2_mark_io_failure(&failed, &rbio->pick);
+ if (bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid))
+ bch2_mark_io_failure(&failed, &rbio->pick,
+ rbio->ret == -BCH_ERR_data_read_retry_csum_err);
- rbio->bio.bi_status = 0;
+ if (!rbio->split) {
+ rbio->bio.bi_status = 0;
+ rbio->ret = 0;
+ }
+
+ unsigned subvol = rbio->subvol;
+ struct bpos read_pos = rbio->read_pos;
rbio = bch2_rbio_free(rbio);
- flags |= BCH_READ_IN_RETRY;
- flags &= ~BCH_READ_MAY_PROMOTE;
+ flags |= BCH_READ_in_retry;
+ flags &= ~BCH_READ_may_promote;
+ flags &= ~BCH_READ_last_fragment;
+ flags |= BCH_READ_must_clone;
+
+ int ret = rbio->data_update
+ ? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags)
+ : __bch2_read(trans, rbio, iter, inum, &failed, flags);
- if (flags & BCH_READ_NODECODE) {
- bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
+ if (ret) {
+ rbio->ret = ret;
+ rbio->bio.bi_status = BLK_STS_IOERR;
} else {
- flags &= ~BCH_READ_LAST_FRAGMENT;
- flags |= BCH_READ_MUST_CLONE;
+ struct printbuf buf = PRINTBUF;
+
+ lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, &buf,
+ (subvol_inum) { subvol, read_pos.inode },
+ read_pos.offset << 9));
+ if (rbio->data_update)
+ prt_str(&buf, "(internal move) ");
+ prt_str(&buf, "successful retry");
- __bch2_read(c, rbio, iter, inum, &failed, flags);
+ bch_err_ratelimited(c, "%s", buf.buf);
+ printbuf_exit(&buf);
}
+
+ bch2_rbio_done(rbio);
+ bch2_trans_put(trans);
}
-static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
- blk_status_t error)
+static void bch2_rbio_error(struct bch_read_bio *rbio,
+ int ret, blk_status_t blk_error)
{
- rbio->retry = retry;
+ BUG_ON(ret >= 0);
+
+ rbio->ret = ret;
+ rbio->bio.bi_status = blk_error;
+
+ bch2_rbio_parent(rbio)->saw_error = true;
- if (rbio->flags & BCH_READ_IN_RETRY)
+ if (rbio->flags & BCH_READ_in_retry)
return;
- if (retry == READ_ERR) {
+ if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) {
+ bch2_rbio_punt(rbio, bch2_rbio_retry,
+ RBIO_CONTEXT_UNBOUND, system_unbound_wq);
+ } else {
rbio = bch2_rbio_free(rbio);
- rbio->bio.bi_status = error;
+ rbio->ret = ret;
+ rbio->bio.bi_status = blk_error;
+
bch2_rbio_done(rbio);
- } else {
- bch2_rbio_punt(rbio, bch2_rbio_retry,
- RBIO_CONTEXT_UNBOUND, system_unbound_wq);
}
}
@@ -531,15 +575,13 @@ static void bch2_read_io_err(struct work_struct *work)
bch2_read_err_msg(c, &buf, rbio, rbio->read_pos);
prt_printf(&buf, "data read error: %s", bch2_blk_status_to_str(bio->bi_status));
- if (ca) {
- bch2_io_error(ca, BCH_MEMBER_ERROR_read);
+ if (ca)
bch_err_ratelimited(ca, "%s", buf.buf);
- } else {
+ else
bch_err_ratelimited(c, "%s", buf.buf);
- }
printbuf_exit(&buf);
- bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_io_err, bio->bi_status);
}
static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
@@ -621,14 +663,12 @@ static void bch2_read_csum_err(struct work_struct *work)
bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
- if (ca) {
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+ if (ca)
bch_err_ratelimited(ca, "%s", buf.buf);
- } else {
+ else
bch_err_ratelimited(c, "%s", buf.buf);
- }
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR);
printbuf_exit(&buf);
}
@@ -648,7 +688,7 @@ static void bch2_read_decompress_err(struct work_struct *work)
else
bch_err_ratelimited(c, "%s", buf.buf);
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_decompress_err, BLK_STS_IOERR);
printbuf_exit(&buf);
}
@@ -668,7 +708,7 @@ static void bch2_read_decrypt_err(struct work_struct *work)
else
bch_err_ratelimited(c, "%s", buf.buf);
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_decrypt_err, BLK_STS_IOERR);
printbuf_exit(&buf);
}
@@ -678,9 +718,11 @@ static void __bch2_read_endio(struct work_struct *work)
struct bch_read_bio *rbio =
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
- struct bio *src = &rbio->bio;
- struct bio *dst = &bch2_rbio_parent(rbio)->bio;
- struct bvec_iter dst_iter = rbio->bvec_iter;
+ struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
+ struct bch_read_bio *parent = bch2_rbio_parent(rbio);
+ struct bio *src = &rbio->bio;
+ struct bio *dst = &parent->bio;
+ struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_extent_crc_unpacked crc = rbio->pick.crc;
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
@@ -698,8 +740,26 @@ static void __bch2_read_endio(struct work_struct *work)
src->bi_iter = rbio->bvec_iter;
}
+ bch2_maybe_corrupt_bio(src, bch2_read_corrupt_ratio);
+
csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
- if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
+ bool csum_good = !bch2_crc_cmp(csum, rbio->pick.crc.csum) || c->opts.no_data_io;
+
+ /*
+ * Checksum error: if the bio wasn't bounced, we may have been
+ * reading into buffers owned by userspace (that userspace can
+ * scribble over) - retry the read, bouncing it this time:
+ */
+ if (!csum_good && !rbio->bounce && (rbio->flags & BCH_READ_user_mapped)) {
+ rbio->flags |= BCH_READ_must_bounce;
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err_maybe_userspace,
+ BLK_STS_IOERR);
+ goto out;
+ }
+
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_checksum, 0, csum_good);
+
+ if (!csum_good)
goto csum_err;
/*
@@ -712,32 +772,40 @@ static void __bch2_read_endio(struct work_struct *work)
if (unlikely(rbio->narrow_crcs))
bch2_rbio_narrow_crcs(rbio);
- if (rbio->flags & BCH_READ_NODECODE)
- goto nodecode;
+ if (likely(!parent->data_update)) {
+ /* Adjust crc to point to subset of data we want: */
+ crc.offset += rbio->offset_into_extent;
+ crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
- /* Adjust crc to point to subset of data we want: */
- crc.offset += rbio->offset_into_extent;
- crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
+ if (crc_is_compressed(crc)) {
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
- if (crc_is_compressed(crc)) {
- ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
- if (ret)
- goto decrypt_err;
+ if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
+ !c->opts.no_data_io)
+ goto decompression_err;
+ } else {
+ /* don't need to decrypt the entire bio: */
+ nonce = nonce_add(nonce, crc.offset << 9);
+ bio_advance(src, crc.offset << 9);
- if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
- !c->opts.no_data_io)
- goto decompression_err;
- } else {
- /* don't need to decrypt the entire bio: */
- nonce = nonce_add(nonce, crc.offset << 9);
- bio_advance(src, crc.offset << 9);
+ BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
+ src->bi_iter.bi_size = dst_iter.bi_size;
- BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
- src->bi_iter.bi_size = dst_iter.bi_size;
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
- ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
- if (ret)
- goto decrypt_err;
+ if (rbio->bounce) {
+ struct bvec_iter src_iter = src->bi_iter;
+
+ bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
+ }
+ }
+ } else {
+ if (rbio->split)
+ rbio->parent->pick = rbio->pick;
if (rbio->bounce) {
struct bvec_iter src_iter = src->bi_iter;
@@ -754,12 +822,9 @@ static void __bch2_read_endio(struct work_struct *work)
ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (ret)
goto decrypt_err;
-
- promote_start(rbio->promote, rbio);
- rbio->promote = NULL;
}
-nodecode:
- if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
+
+ if (likely(!(rbio->flags & BCH_READ_in_retry))) {
rbio = bch2_rbio_free(rbio);
bch2_rbio_done(rbio);
}
@@ -767,17 +832,6 @@ out:
memalloc_nofs_restore(nofs_flags);
return;
csum_err:
- /*
- * Checksum error: if the bio wasn't bounced, we may have been
- * reading into buffers owned by userspace (that userspace can
- * scribble over) - retry the read, bouncing it this time:
- */
- if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
- rbio->flags |= BCH_READ_MUST_BOUNCE;
- bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
- goto out;
- }
-
bch2_rbio_punt(rbio, bch2_read_csum_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
goto out;
decompression_err:
@@ -797,10 +851,8 @@ static void bch2_read_endio(struct bio *bio)
struct workqueue_struct *wq = NULL;
enum rbio_context context = RBIO_CONTEXT_NULL;
- if (rbio->have_ioref) {
- bch2_latency_acct(ca, rbio->submit_time, READ);
- percpu_ref_put(&ca->io_ref);
- }
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
+ rbio->submit_time, !bio->bi_status);
if (!rbio->split)
rbio->bio.bi_end_io = rbio->end_io;
@@ -810,14 +862,14 @@ static void bch2_read_endio(struct bio *bio)
return;
}
- if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
+ if (((rbio->flags & BCH_READ_retry_if_stale) && race_fault()) ||
(ca && dev_ptr_stale(ca, &rbio->pick.ptr))) {
- trace_and_count(c, read_reuse_race, &rbio->bio);
+ trace_and_count(c, io_read_reuse_race, &rbio->bio);
- if (rbio->flags & BCH_READ_RETRY_IF_STALE)
- bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
+ if (rbio->flags & BCH_READ_retry_if_stale)
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_retry, BLK_STS_AGAIN);
else
- bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_race, BLK_STS_AGAIN);
return;
}
@@ -883,15 +935,15 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bvec_iter iter, struct bpos read_pos,
enum btree_id data_btree, struct bkey_s_c k,
unsigned offset_into_extent,
- struct bch_io_failures *failed, unsigned flags)
+ struct bch_io_failures *failed, unsigned flags, int dev)
{
struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick;
struct bch_read_bio *rbio = NULL;
- struct promote_op *promote = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
- int pick_ret;
+ struct data_update *u = rbio_data_update(orig);
+ int ret = 0;
if (bkey_extent_is_inline_data(k.k)) {
unsigned bytes = min_t(unsigned, iter.bi_size,
@@ -902,19 +954,21 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
swap(iter.bi_size, bytes);
bio_advance_iter(&orig->bio, &iter, bytes);
zero_fill_bio_iter(&orig->bio, iter);
+ this_cpu_add(c->counters[BCH_COUNTER_io_read_inline],
+ bvec_iter_sectors(iter));
goto out_read_done;
}
retry_pick:
- pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
+ ret = bch2_bkey_pick_read_device(c, k, failed, &pick, dev);
/* hole or reservation - just zero fill: */
- if (!pick_ret)
+ if (!ret)
goto hole;
- if (unlikely(pick_ret < 0)) {
+ if (unlikely(ret < 0)) {
struct printbuf buf = PRINTBUF;
bch2_read_err_msg_trans(trans, &buf, orig, read_pos);
- prt_printf(&buf, "no device to read from: %s\n ", bch2_err_str(pick_ret));
+ prt_printf(&buf, "%s\n ", bch2_err_str(ret));
bch2_bkey_val_to_text(&buf, c, k);
bch_err_ratelimited(c, "%s", buf.buf);
@@ -930,6 +984,7 @@ retry_pick:
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
+ ret = -BCH_ERR_data_read_no_encryption_key;
goto err;
}
@@ -941,62 +996,57 @@ retry_pick:
* retry path, don't check here, it'll be caught in bch2_read_endio()
* and we'll end up in the retry path:
*/
- if ((flags & BCH_READ_IN_RETRY) &&
+ if ((flags & BCH_READ_in_retry) &&
!pick.ptr.cached &&
ca &&
unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
- bch2_mark_io_failure(failed, &pick);
+ bch2_mark_io_failure(failed, &pick, false);
percpu_ref_put(&ca->io_ref);
goto retry_pick;
}
- /*
- * Unlock the iterator while the btree node's lock is still in
- * cache, before doing the IO:
- */
- bch2_trans_unlock(trans);
+ if (likely(!u)) {
+ if (!(flags & BCH_READ_last_fragment) ||
+ bio_flagged(&orig->bio, BIO_CHAIN))
+ flags |= BCH_READ_must_clone;
+
+ narrow_crcs = !(flags & BCH_READ_in_retry) &&
+ bch2_can_narrow_extent_crcs(k, pick.crc);
+
+ if (narrow_crcs && (flags & BCH_READ_user_mapped))
+ flags |= BCH_READ_must_bounce;
- if (flags & BCH_READ_NODECODE) {
+ EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
+
+ if (crc_is_compressed(pick.crc) ||
+ (pick.crc.csum_type != BCH_CSUM_none &&
+ (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
+ (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
+ (flags & BCH_READ_user_mapped)) ||
+ (flags & BCH_READ_must_bounce)))) {
+ read_full = true;
+ bounce = true;
+ }
+ } else {
/*
* can happen if we retry, and the extent we were going to read
* has been merged in the meantime:
*/
- if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) {
+ if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
percpu_ref_put(&ca->io_ref);
- goto hole;
+ rbio->ret = -BCH_ERR_data_read_buffer_too_small;
+ goto out_read_done;
}
iter.bi_size = pick.crc.compressed_size << 9;
- goto get_bio;
- }
-
- if (!(flags & BCH_READ_LAST_FRAGMENT) ||
- bio_flagged(&orig->bio, BIO_CHAIN))
- flags |= BCH_READ_MUST_CLONE;
-
- narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
- bch2_can_narrow_extent_crcs(k, pick.crc);
-
- if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
- flags |= BCH_READ_MUST_BOUNCE;
-
- EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
-
- if (crc_is_compressed(pick.crc) ||
- (pick.crc.csum_type != BCH_CSUM_none &&
- (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
- (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
- (flags & BCH_READ_USER_MAPPED)) ||
- (flags & BCH_READ_MUST_BOUNCE)))) {
read_full = true;
- bounce = true;
}
if (orig->opts.promote_target || have_io_error(failed))
- promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
- &rbio, &bounce, &read_full, failed);
+ rbio = promote_alloc(trans, iter, k, &pick, flags, orig,
+ &bounce, &read_full, failed);
if (!read_full) {
EBUG_ON(crc_is_compressed(pick.crc));
@@ -1015,7 +1065,7 @@ retry_pick:
pick.crc.offset = 0;
pick.crc.live_size = bvec_iter_sectors(iter);
}
-get_bio:
+
if (rbio) {
/*
* promote already allocated bounce rbio:
@@ -1030,17 +1080,16 @@ get_bio:
} else if (bounce) {
unsigned sectors = pick.crc.compressed_size;
- rbio = rbio_init(bio_alloc_bioset(NULL,
+ rbio = rbio_init_fragment(bio_alloc_bioset(NULL,
DIV_ROUND_UP(sectors, PAGE_SECTORS),
0,
GFP_NOFS,
&c->bio_read_split),
- orig->opts);
+ orig);
bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
rbio->bounce = true;
- rbio->split = true;
- } else if (flags & BCH_READ_MUST_CLONE) {
+ } else if (flags & BCH_READ_must_clone) {
/*
* Have to clone if there were any splits, due to error
* reporting issues (if a split errored, and retrying didn't
@@ -1049,11 +1098,10 @@ get_bio:
* from the whole bio, in which case we don't want to retry and
* lose the error)
*/
- rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
+ rbio = rbio_init_fragment(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
&c->bio_read_split),
- orig->opts);
+ orig);
rbio->bio.bi_iter = iter;
- rbio->split = true;
} else {
rbio = orig;
rbio->bio.bi_iter = iter;
@@ -1062,58 +1110,60 @@ get_bio:
EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
- rbio->c = c;
rbio->submit_time = local_clock();
- if (rbio->split)
- rbio->parent = orig;
- else
+ if (!rbio->split)
rbio->end_io = orig->bio.bi_end_io;
rbio->bvec_iter = iter;
rbio->offset_into_extent= offset_into_extent;
rbio->flags = flags;
rbio->have_ioref = ca != NULL;
rbio->narrow_crcs = narrow_crcs;
- rbio->hole = 0;
- rbio->retry = 0;
+ rbio->ret = 0;
rbio->context = 0;
- /* XXX: only initialize this if needed */
- rbio->devs_have = bch2_bkey_devs(k);
rbio->pick = pick;
rbio->subvol = orig->subvol;
rbio->read_pos = read_pos;
rbio->data_btree = data_btree;
rbio->data_pos = data_pos;
rbio->version = k.k->bversion;
- rbio->promote = promote;
INIT_WORK(&rbio->work, NULL);
- if (flags & BCH_READ_NODECODE)
- orig->pick = pick;
-
rbio->bio.bi_opf = orig->bio.bi_opf;
rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
rbio->bio.bi_end_io = bch2_read_endio;
if (rbio->bounce)
- trace_and_count(c, read_bounce, &rbio->bio);
+ trace_and_count(c, io_read_bounce, &rbio->bio);
- this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
+ if (!u)
+ this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
+ else
+ this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio));
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
/*
* If it's being moved internally, we don't want to flag it as a cache
* hit:
*/
- if (ca && pick.ptr.cached && !(flags & BCH_READ_NODECODE))
+ if (ca && pick.ptr.cached && !u)
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
PTR_BUCKET_NR(ca, &pick.ptr), READ);
- if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
+ if (!(flags & (BCH_READ_in_retry|BCH_READ_last_fragment))) {
bio_inc_remaining(&orig->bio);
- trace_and_count(c, read_split, &orig->bio);
+ trace_and_count(c, io_read_split, &orig->bio);
}
- if (!rbio->pick.idx) {
+ /*
+ * Unlock the iterator while the btree node's lock is still in
+ * cache, before doing the IO:
+ */
+ if (!(flags & BCH_READ_in_retry))
+ bch2_trans_unlock(trans);
+ else
+ bch2_trans_unlock_long(trans);
+
+ if (likely(!rbio->pick.do_ec_reconstruct)) {
if (unlikely(!rbio->have_ioref)) {
struct printbuf buf = PRINTBUF;
bch2_read_err_msg_trans(trans, &buf, rbio, read_pos);
@@ -1123,7 +1173,9 @@ get_bio:
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ bch2_rbio_error(rbio,
+ -BCH_ERR_data_read_retry_device_offline,
+ BLK_STS_IOERR);
goto out;
}
@@ -1132,10 +1184,10 @@ get_bio:
bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
if (unlikely(c->opts.no_data_io)) {
- if (likely(!(flags & BCH_READ_IN_RETRY)))
+ if (likely(!(flags & BCH_READ_in_retry)))
bio_endio(&rbio->bio);
} else {
- if (likely(!(flags & BCH_READ_IN_RETRY)))
+ if (likely(!(flags & BCH_READ_in_retry)))
submit_bio(&rbio->bio);
else
submit_bio_wait(&rbio->bio);
@@ -1149,70 +1201,73 @@ get_bio:
} else {
/* Attempting reconstruct read: */
if (bch2_ec_read_extent(trans, rbio, k)) {
- bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_ec_reconstruct_err,
+ BLK_STS_IOERR);
goto out;
}
- if (likely(!(flags & BCH_READ_IN_RETRY)))
+ if (likely(!(flags & BCH_READ_in_retry)))
bio_endio(&rbio->bio);
}
out:
- if (likely(!(flags & BCH_READ_IN_RETRY))) {
+ if (likely(!(flags & BCH_READ_in_retry))) {
return 0;
} else {
+ bch2_trans_unlock(trans);
+
int ret;
rbio->context = RBIO_CONTEXT_UNBOUND;
bch2_read_endio(&rbio->bio);
- ret = rbio->retry;
+ ret = rbio->ret;
rbio = bch2_rbio_free(rbio);
- if (ret == READ_RETRY_AVOID) {
- bch2_mark_io_failure(failed, &pick);
- ret = READ_RETRY;
- }
-
- if (!ret)
- goto out_read_done;
+ if (bch2_err_matches(ret, BCH_ERR_data_read_retry_avoid))
+ bch2_mark_io_failure(failed, &pick,
+ ret == -BCH_ERR_data_read_retry_csum_err);
return ret;
}
err:
- if (flags & BCH_READ_IN_RETRY)
- return READ_ERR;
+ if (flags & BCH_READ_in_retry)
+ return ret;
- orig->bio.bi_status = BLK_STS_IOERR;
+ orig->bio.bi_status = BLK_STS_IOERR;
+ orig->ret = ret;
goto out_read_done;
hole:
+ this_cpu_add(c->counters[BCH_COUNTER_io_read_hole],
+ bvec_iter_sectors(iter));
/*
- * won't normally happen in the BCH_READ_NODECODE
- * (bch2_move_extent()) path, but if we retry and the extent we wanted
- * to read no longer exists we have to signal that:
+ * won't normally happen in the data update (bch2_move_extent()) path,
+ * but if we retry and the extent we wanted to read no longer exists we
+ * have to signal that:
*/
- if (flags & BCH_READ_NODECODE)
- orig->hole = true;
+ if (u)
+ orig->ret = -BCH_ERR_data_read_key_overwritten;
zero_fill_bio_iter(&orig->bio, iter);
out_read_done:
- if (flags & BCH_READ_LAST_FRAGMENT)
+ if ((flags & BCH_READ_last_fragment) &&
+ !(flags & BCH_READ_in_retry))
bch2_rbio_done(orig);
return 0;
}
-void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
- struct bvec_iter bvec_iter, subvol_inum inum,
- struct bch_io_failures *failed, unsigned flags)
+int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
+ struct bvec_iter bvec_iter, subvol_inum inum,
+ struct bch_io_failures *failed, unsigned flags)
{
- struct btree_trans *trans = bch2_trans_get(c);
+ struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_buf sk;
struct bkey_s_c k;
int ret;
- BUG_ON(flags & BCH_READ_NODECODE);
+ EBUG_ON(rbio->data_update);
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
@@ -1262,24 +1317,26 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
swap(bvec_iter.bi_size, bytes);
if (bvec_iter.bi_size == bytes)
- flags |= BCH_READ_LAST_FRAGMENT;
+ flags |= BCH_READ_last_fragment;
ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
data_btree, k,
- offset_into_extent, failed, flags);
+ offset_into_extent, failed, flags, -1);
if (ret)
goto err;
- if (flags & BCH_READ_LAST_FRAGMENT)
+ if (flags & BCH_READ_last_fragment)
break;
swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
err:
+ if (ret == -BCH_ERR_data_read_retry_csum_err_maybe_userspace)
+ flags |= BCH_READ_must_bounce;
+
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
- ret != READ_RETRY &&
- ret != READ_RETRY_AVOID)
+ !bch2_err_matches(ret, BCH_ERR_data_read_retry))
break;
}
@@ -1287,17 +1344,22 @@ err:
if (ret) {
struct printbuf buf = PRINTBUF;
- bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9);
- prt_printf(&buf, "read error %i from btree lookup", ret);
+ lockrestart_do(trans,
+ bch2_inum_offset_err_msg_trans(trans, &buf, inum,
+ bvec_iter.bi_sector << 9));
+ prt_printf(&buf, "read error: %s", bch2_err_str(ret));
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
- rbio->bio.bi_status = BLK_STS_IOERR;
- bch2_rbio_done(rbio);
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ rbio->ret = ret;
+
+ if (!(flags & BCH_READ_in_retry))
+ bch2_rbio_done(rbio);
}
- bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);
+ return ret;
}
void bch2_fs_io_read_exit(struct bch_fs *c)
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
index a82e8a94ccb6..cd21950417f6 100644
--- a/fs/bcachefs/io_read.h
+++ b/fs/bcachefs/io_read.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_IO_READ_H
#include "bkey_buf.h"
+#include "btree_iter.h"
#include "reflink.h"
struct bch_read_bio {
@@ -35,19 +36,18 @@ struct bch_read_bio {
u16 flags;
union {
struct {
- u16 bounce:1,
+ u16 data_update:1,
+ promote:1,
+ bounce:1,
split:1,
- kmalloc:1,
have_ioref:1,
narrow_crcs:1,
- hole:1,
- retry:2,
+ saw_error:1,
context:2;
};
u16 _state;
};
-
- struct bch_devs_list devs_have;
+ s16 ret;
struct extent_ptr_decoded pick;
@@ -65,8 +65,6 @@ struct bch_read_bio {
struct bpos data_pos;
struct bversion version;
- struct promote_op *promote;
-
struct bch_io_opts opts;
struct work_struct work;
@@ -108,23 +106,31 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans,
return 0;
}
+#define BCH_READ_FLAGS() \
+ x(retry_if_stale) \
+ x(may_promote) \
+ x(user_mapped) \
+ x(last_fragment) \
+ x(must_bounce) \
+ x(must_clone) \
+ x(in_retry)
+
+enum __bch_read_flags {
+#define x(n) __BCH_READ_##n,
+ BCH_READ_FLAGS()
+#undef x
+};
+
enum bch_read_flags {
- BCH_READ_RETRY_IF_STALE = 1 << 0,
- BCH_READ_MAY_PROMOTE = 1 << 1,
- BCH_READ_USER_MAPPED = 1 << 2,
- BCH_READ_NODECODE = 1 << 3,
- BCH_READ_LAST_FRAGMENT = 1 << 4,
-
- /* internal: */
- BCH_READ_MUST_BOUNCE = 1 << 5,
- BCH_READ_MUST_CLONE = 1 << 6,
- BCH_READ_IN_RETRY = 1 << 7,
+#define x(n) BCH_READ_##n = BIT(__BCH_READ_##n),
+ BCH_READ_FLAGS()
+#undef x
};
int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
struct bvec_iter, struct bpos, enum btree_id,
struct bkey_s_c, unsigned,
- struct bch_io_failures *, unsigned);
+ struct bch_io_failures *, unsigned, int);
static inline void bch2_read_extent(struct btree_trans *trans,
struct bch_read_bio *rbio, struct bpos read_pos,
@@ -132,37 +138,55 @@ static inline void bch2_read_extent(struct btree_trans *trans,
unsigned offset_into_extent, unsigned flags)
{
__bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
- data_btree, k, offset_into_extent, NULL, flags);
+ data_btree, k, offset_into_extent, NULL, flags, -1);
}
-void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
- subvol_inum, struct bch_io_failures *, unsigned flags);
+int __bch2_read(struct btree_trans *, struct bch_read_bio *, struct bvec_iter,
+ subvol_inum, struct bch_io_failures *, unsigned flags);
static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
subvol_inum inum)
{
- struct bch_io_failures failed = { .nr = 0 };
-
BUG_ON(rbio->_state);
- rbio->c = c;
- rbio->start_time = local_clock();
rbio->subvol = inum.subvol;
- __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
- BCH_READ_RETRY_IF_STALE|
- BCH_READ_MAY_PROMOTE|
- BCH_READ_USER_MAPPED);
+ bch2_trans_run(c,
+ __bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL,
+ BCH_READ_retry_if_stale|
+ BCH_READ_may_promote|
+ BCH_READ_user_mapped));
+}
+
+static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio,
+ struct bch_read_bio *orig)
+{
+ struct bch_read_bio *rbio = to_rbio(bio);
+
+ rbio->c = orig->c;
+ rbio->_state = 0;
+ rbio->flags = 0;
+ rbio->ret = 0;
+ rbio->split = true;
+ rbio->parent = orig;
+ rbio->opts = orig->opts;
+ return rbio;
}
static inline struct bch_read_bio *rbio_init(struct bio *bio,
- struct bch_io_opts opts)
+ struct bch_fs *c,
+ struct bch_io_opts opts,
+ bio_end_io_t end_io)
{
struct bch_read_bio *rbio = to_rbio(bio);
- rbio->_state = 0;
- rbio->promote = NULL;
- rbio->opts = opts;
+ rbio->start_time = local_clock();
+ rbio->c = c;
+ rbio->_state = 0;
+ rbio->flags = 0;
+ rbio->ret = 0;
+ rbio->opts = opts;
+ rbio->bio.bi_end_io = end_io;
return rbio;
}
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 03892388832b..29671075e3f1 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -34,6 +34,12 @@
#include <linux/random.h>
#include <linux/sched/mm.h>
+#ifdef CONFIG_BCACHEFS_DEBUG
+static unsigned bch2_write_corrupt_ratio;
+module_param_named(write_corrupt_ratio, bch2_write_corrupt_ratio, uint, 0644);
+MODULE_PARM_DESC(write_corrupt_ratio, "");
+#endif
+
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
@@ -374,7 +380,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
bch2_extent_update(trans, inum, &iter, sk.k,
&op->res,
op->new_i_size, &op->i_sectors_delta,
- op->flags & BCH_WRITE_CHECK_ENOSPC);
+ op->flags & BCH_WRITE_check_enospc);
bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -396,29 +402,42 @@ static int bch2_write_index_default(struct bch_write_op *op)
/* Writes */
-static void __bch2_write_op_error(struct printbuf *out, struct bch_write_op *op,
- u64 offset)
+void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...)
{
- bch2_inum_offset_err_msg(op->c, out,
- (subvol_inum) { op->subvol, op->pos.inode, },
- offset << 9);
- prt_printf(out, "write error%s: ",
- op->flags & BCH_WRITE_MOVE ? "(internal move)" : "");
-}
+ struct printbuf buf = PRINTBUF;
-void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op)
-{
- __bch2_write_op_error(out, op, op->pos.offset);
+ if (op->subvol) {
+ bch2_inum_offset_err_msg(op->c, &buf,
+ (subvol_inum) { op->subvol, op->pos.inode, },
+ offset << 9);
+ } else {
+ struct bpos pos = op->pos;
+ pos.offset = offset;
+ bch2_inum_snap_offset_err_msg(op->c, &buf, pos);
+ }
+
+ prt_str(&buf, "write error: ");
+
+ va_list args;
+ va_start(args, fmt);
+ prt_vprintf(&buf, fmt, args);
+ va_end(args);
+
+ if (op->flags & BCH_WRITE_move) {
+ struct data_update *u = container_of(op, struct data_update, op);
+
+ prt_printf(&buf, "\n from internal move ");
+ bch2_bkey_val_to_text(&buf, op->c, bkey_i_to_s_c(u->k.k));
+ }
+
+ bch_err_ratelimited(op->c, "%s", buf.buf);
+ printbuf_exit(&buf);
}
-static void bch2_write_op_error_trans(struct btree_trans *trans, struct printbuf *out,
- struct bch_write_op *op, u64 offset)
+static void bch2_write_csum_err_msg(struct bch_write_op *op)
{
- bch2_inum_offset_err_msg_trans(trans, out,
- (subvol_inum) { op->subvol, op->pos.inode, },
- offset << 9);
- prt_printf(out, "write error%s: ",
- op->flags & BCH_WRITE_MOVE ? "(internal move)" : "");
+ bch2_write_op_error(op, op->pos.offset,
+ "error verifying existing checksum while rewriting existing data (memory corruption?)");
}
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
@@ -493,7 +512,7 @@ static void bch2_write_done(struct closure *cl)
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
bch2_disk_reservation_put(c, &op->res);
- if (!(op->flags & BCH_WRITE_MOVE))
+ if (!(op->flags & BCH_WRITE_move))
bch2_write_ref_put(c, BCH_WRITE_REF_write);
bch2_keylist_free(&op->insert_keys, op->inline_keys);
@@ -516,7 +535,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
test_bit(ptr->dev, op->failed.d));
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
- return -EIO;
+ return -BCH_ERR_data_write_io;
}
if (dst != src)
@@ -539,7 +558,7 @@ static void __bch2_write_index(struct bch_write_op *op)
unsigned dev;
int ret = 0;
- if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
+ if (unlikely(op->flags & BCH_WRITE_io_error)) {
ret = bch2_write_drop_io_error_ptrs(op);
if (ret)
goto err;
@@ -548,7 +567,7 @@ static void __bch2_write_index(struct bch_write_op *op)
if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
- ret = !(op->flags & BCH_WRITE_MOVE)
+ ret = !(op->flags & BCH_WRITE_move)
? bch2_write_index_default(op)
: bch2_data_update_index_update(op);
@@ -560,11 +579,8 @@ static void __bch2_write_index(struct bch_write_op *op)
if (unlikely(ret && !bch2_err_matches(ret, EROFS))) {
struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
- struct printbuf buf = PRINTBUF;
- __bch2_write_op_error(&buf, op, bkey_start_offset(&insert->k));
- prt_printf(&buf, "btree update error: %s", bch2_err_str(ret));
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
+ bch2_write_op_error(op, bkey_start_offset(&insert->k),
+ "btree update error: %s", bch2_err_str(ret));
}
if (ret)
@@ -573,21 +589,29 @@ static void __bch2_write_index(struct bch_write_op *op)
out:
/* If some a bucket wasn't written, we can't erasure code it: */
for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
- bch2_open_bucket_write_error(c, &op->open_buckets, dev);
+ bch2_open_bucket_write_error(c, &op->open_buckets, dev, -BCH_ERR_data_write_io);
bch2_open_buckets_put(c, &op->open_buckets);
return;
err:
keys->top = keys->keys;
op->error = ret;
- op->flags |= BCH_WRITE_SUBMITTED;
+ op->flags |= BCH_WRITE_submitted;
goto out;
}
static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
{
if (state != wp->state) {
+ struct task_struct *p = current;
u64 now = ktime_get_ns();
+ u64 runtime = p->se.sum_exec_runtime +
+ (now - p->se.exec_start);
+
+ if (state == WRITE_POINT_runnable)
+ wp->last_runtime = runtime;
+ else if (wp->state == WRITE_POINT_runnable)
+ wp->time[WRITE_POINT_running] += runtime - wp->last_runtime;
if (wp->last_state_change &&
time_after64(now, wp->last_state_change))
@@ -601,7 +625,7 @@ static inline void wp_update_state(struct write_point *wp, bool running)
{
enum write_point_state state;
- state = running ? WRITE_POINT_running :
+ state = running ? WRITE_POINT_runnable:
!list_empty(&wp->writes) ? WRITE_POINT_waiting_io
: WRITE_POINT_stopped;
@@ -615,8 +639,8 @@ static CLOSURE_CALLBACK(bch2_write_index)
struct workqueue_struct *wq = index_update_wq(op);
unsigned long flags;
- if ((op->flags & BCH_WRITE_SUBMITTED) &&
- (op->flags & BCH_WRITE_MOVE))
+ if ((op->flags & BCH_WRITE_submitted) &&
+ (op->flags & BCH_WRITE_move))
bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
spin_lock_irqsave(&wp->writes_lock, flags);
@@ -654,11 +678,11 @@ void bch2_write_point_do_index_updates(struct work_struct *work)
if (!op)
break;
- op->flags |= BCH_WRITE_IN_WORKER;
+ op->flags |= BCH_WRITE_in_worker;
__bch2_write_index(op);
- if (!(op->flags & BCH_WRITE_SUBMITTED))
+ if (!(op->flags & BCH_WRITE_submitted))
__bch2_write(op);
else
bch2_write_done(&op->cl);
@@ -676,13 +700,17 @@ static void bch2_write_endio(struct bio *bio)
? bch2_dev_have_ref(c, wbio->dev)
: NULL;
- if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
+ wbio->submit_time, !bio->bi_status);
+
+ if (bio->bi_status) {
+ bch_err_inum_offset_ratelimited(ca,
op->pos.inode,
wbio->inode_offset << 9,
"data write error: %s",
- bch2_blk_status_to_str(bio->bi_status))) {
+ bch2_blk_status_to_str(bio->bi_status));
set_bit(wbio->dev, op->failed.d);
- op->flags |= BCH_WRITE_IO_ERROR;
+ op->flags |= BCH_WRITE_io_error;
}
if (wbio->nocow) {
@@ -692,10 +720,8 @@ static void bch2_write_endio(struct bio *bio)
set_bit(wbio->dev, op->devs_need_flush->d);
}
- if (wbio->have_ioref) {
- bch2_latency_acct(ca, wbio->submit_time, WRITE);
+ if (wbio->have_ioref)
percpu_ref_put(&ca->io_ref);
- }
if (wbio->bounce)
bch2_bio_free_pages_pool(c, bio);
@@ -729,7 +755,7 @@ static void init_append_extent(struct bch_write_op *op,
bch2_extent_crc_append(&e->k_i, crc);
bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
- op->flags & BCH_WRITE_CACHED);
+ op->flags & BCH_WRITE_cached);
bch2_keylist_push(&op->insert_keys);
}
@@ -789,7 +815,6 @@ static int bch2_write_rechecksum(struct bch_fs *c,
{
struct bio *bio = &op->wbio.bio;
struct bch_extent_crc_unpacked new_crc;
- int ret;
/* bch2_rechecksum_bio() can't encrypt or decrypt data: */
@@ -797,10 +822,10 @@ static int bch2_write_rechecksum(struct bch_fs *c,
bch2_csum_type_is_encryption(new_csum_type))
new_csum_type = op->crc.csum_type;
- ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
- NULL, &new_crc,
- op->crc.offset, op->crc.live_size,
- new_csum_type);
+ int ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
+ NULL, &new_crc,
+ op->crc.offset, op->crc.live_size,
+ new_csum_type);
if (ret)
return ret;
@@ -810,44 +835,12 @@ static int bch2_write_rechecksum(struct bch_fs *c,
return 0;
}
-static int bch2_write_decrypt(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
- struct nonce nonce = extent_nonce(op->version, op->crc);
- struct bch_csum csum;
- int ret;
-
- if (!bch2_csum_type_is_encryption(op->crc.csum_type))
- return 0;
-
- /*
- * If we need to decrypt data in the write path, we'll no longer be able
- * to verify the existing checksum (poly1305 mac, in this case) after
- * it's decrypted - this is the last point we'll be able to reverify the
- * checksum:
- */
- csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
- if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
- return -EIO;
-
- ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
- op->crc.csum_type = 0;
- op->crc.csum = (struct bch_csum) { 0, 0 };
- return ret;
-}
-
-static enum prep_encoded_ret {
- PREP_ENCODED_OK,
- PREP_ENCODED_ERR,
- PREP_ENCODED_CHECKSUM_ERR,
- PREP_ENCODED_DO_WRITE,
-} bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
+static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
{
struct bch_fs *c = op->c;
struct bio *bio = &op->wbio.bio;
-
- if (!(op->flags & BCH_WRITE_DATA_ENCODED))
- return PREP_ENCODED_OK;
+ struct nonce nonce = extent_nonce(op->version, op->crc);
+ int ret = 0;
BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
@@ -858,12 +851,13 @@ static enum prep_encoded_ret {
(op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
op->incompressible)) {
if (!crc_is_compressed(op->crc) &&
- op->csum_type != op->crc.csum_type &&
- bch2_write_rechecksum(c, op, op->csum_type) &&
- !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
+ op->csum_type != op->crc.csum_type) {
+ ret = bch2_write_rechecksum(c, op, op->csum_type);
+ if (ret)
+ return ret;
+ }
- return PREP_ENCODED_DO_WRITE;
+ return 1;
}
/*
@@ -871,20 +865,23 @@ static enum prep_encoded_ret {
* is, we have to decompress it:
*/
if (crc_is_compressed(op->crc)) {
- struct bch_csum csum;
-
- if (bch2_write_decrypt(op))
- return PREP_ENCODED_CHECKSUM_ERR;
-
/* Last point we can still verify checksum: */
- csum = bch2_checksum_bio(c, op->crc.csum_type,
- extent_nonce(op->version, op->crc),
- bio);
+ struct bch_csum csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
+ goto csum_err;
+
+ if (bch2_csum_type_is_encryption(op->crc.csum_type)) {
+ ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
+ if (ret)
+ return ret;
- if (bch2_bio_uncompress_inplace(op, bio))
- return PREP_ENCODED_ERR;
+ op->crc.csum_type = 0;
+ op->crc.csum = (struct bch_csum) { 0, 0 };
+ }
+
+ ret = bch2_bio_uncompress_inplace(op, bio);
+ if (ret)
+ return ret;
}
/*
@@ -896,22 +893,34 @@ static enum prep_encoded_ret {
* If the data is checksummed and we're only writing a subset,
* rechecksum and adjust bio to point to currently live data:
*/
- if ((op->crc.live_size != op->crc.uncompressed_size ||
- op->crc.csum_type != op->csum_type) &&
- bch2_write_rechecksum(c, op, op->csum_type) &&
- !c->opts.no_data_io)
- return PREP_ENCODED_CHECKSUM_ERR;
+ if (op->crc.live_size != op->crc.uncompressed_size ||
+ op->crc.csum_type != op->csum_type) {
+ ret = bch2_write_rechecksum(c, op, op->csum_type);
+ if (ret)
+ return ret;
+ }
/*
* If we want to compress the data, it has to be decrypted:
*/
- if ((op->compression_opt ||
- bch2_csum_type_is_encryption(op->crc.csum_type) !=
- bch2_csum_type_is_encryption(op->csum_type)) &&
- bch2_write_decrypt(op))
- return PREP_ENCODED_CHECKSUM_ERR;
+ if (bch2_csum_type_is_encryption(op->crc.csum_type) &&
+ (op->compression_opt || op->crc.csum_type != op->csum_type)) {
+ struct bch_csum csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
+ if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
+ goto csum_err;
+
+ ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, bio);
+ if (ret)
+ return ret;
- return PREP_ENCODED_OK;
+ op->crc.csum_type = 0;
+ op->crc.csum = (struct bch_csum) { 0, 0 };
+ }
+
+ return 0;
+csum_err:
+ bch2_write_csum_err_msg(op);
+ return -BCH_ERR_data_write_csum;
}
static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
@@ -930,39 +939,44 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
ec_buf = bch2_writepoint_ec_buf(c, wp);
- switch (bch2_write_prep_encoded_data(op, wp)) {
- case PREP_ENCODED_OK:
- break;
- case PREP_ENCODED_ERR:
- ret = -EIO;
- goto err;
- case PREP_ENCODED_CHECKSUM_ERR:
- goto csum_err;
- case PREP_ENCODED_DO_WRITE:
- /* XXX look for bug here */
- if (ec_buf) {
- dst = bch2_write_bio_alloc(c, wp, src,
- &page_alloc_failed,
- ec_buf);
- bio_copy_data(dst, src);
- bounce = true;
+ if (unlikely(op->flags & BCH_WRITE_data_encoded)) {
+ ret = bch2_write_prep_encoded_data(op, wp);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ if (ec_buf) {
+ dst = bch2_write_bio_alloc(c, wp, src,
+ &page_alloc_failed,
+ ec_buf);
+ bio_copy_data(dst, src);
+ bounce = true;
+ }
+ init_append_extent(op, wp, op->version, op->crc);
+ goto do_write;
}
- init_append_extent(op, wp, op->version, op->crc);
- goto do_write;
}
if (ec_buf ||
op->compression_opt ||
(op->csum_type &&
- !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
+ !(op->flags & BCH_WRITE_pages_stable)) ||
(bch2_csum_type_is_encryption(op->csum_type) &&
- !(op->flags & BCH_WRITE_PAGES_OWNED))) {
+ !(op->flags & BCH_WRITE_pages_owned))) {
dst = bch2_write_bio_alloc(c, wp, src,
&page_alloc_failed,
ec_buf);
bounce = true;
}
+#ifdef CONFIG_BCACHEFS_DEBUG
+ unsigned write_corrupt_ratio = READ_ONCE(bch2_write_corrupt_ratio);
+ if (!bounce && write_corrupt_ratio) {
+ dst = bch2_write_bio_alloc(c, wp, src,
+ &page_alloc_failed,
+ ec_buf);
+ bounce = true;
+ }
+#endif
saved_iter = dst->bi_iter;
do {
@@ -976,7 +990,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
break;
BUG_ON(op->compression_opt &&
- (op->flags & BCH_WRITE_DATA_ENCODED) &&
+ (op->flags & BCH_WRITE_data_encoded) &&
bch2_csum_type_is_encryption(op->crc.csum_type));
BUG_ON(op->compression_opt && !bounce);
@@ -1014,7 +1028,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
}
}
- if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
+ if ((op->flags & BCH_WRITE_data_encoded) &&
!crc_is_compressed(crc) &&
bch2_csum_type_is_encryption(op->crc.csum_type) ==
bch2_csum_type_is_encryption(op->csum_type)) {
@@ -1046,7 +1060,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
crc.compression_type = compression_type;
crc.nonce = nonce;
} else {
- if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
+ if ((op->flags & BCH_WRITE_data_encoded) &&
bch2_rechecksum_bio(c, src, version, op->crc,
NULL, &op->crc,
src_len >> 9,
@@ -1072,6 +1086,14 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
init_append_extent(op, wp, version, crc);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ if (write_corrupt_ratio) {
+ swap(dst->bi_iter.bi_size, dst_len);
+ bch2_maybe_corrupt_bio(dst, write_corrupt_ratio);
+ swap(dst->bi_iter.bi_size, dst_len);
+ }
+#endif
+
if (dst != src)
bio_advance(dst, dst_len);
bio_advance(src, src_len);
@@ -1104,15 +1126,8 @@ do_write:
*_dst = dst;
return more;
csum_err:
- {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "error verifying existing checksum while rewriting existing data (memory corruption?)");
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- ret = -EIO;
+ bch2_write_csum_err_msg(op);
+ ret = -BCH_ERR_data_write_csum;
err:
if (to_wbio(dst)->bounce)
bch2_bio_free_pages_pool(c, dst);
@@ -1190,39 +1205,36 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct btree_trans *trans = bch2_trans_get(c);
+ int ret = 0;
for_each_keylist_key(&op->insert_keys, orig) {
- int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
+ ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
bkey_start_pos(&orig->k), orig->k.p,
BTREE_ITER_intent, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
}));
-
- if (ret && !bch2_err_matches(ret, EROFS)) {
- struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
-
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error_trans(trans, &buf, op, bkey_start_offset(&insert->k));
- prt_printf(&buf, "btree update error: %s", bch2_err_str(ret));
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
-
- if (ret) {
- op->error = ret;
+ if (ret)
break;
- }
}
bch2_trans_put(trans);
+
+ if (ret && !bch2_err_matches(ret, EROFS)) {
+ struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
+ bch2_write_op_error(op, bkey_start_offset(&insert->k),
+ "btree update error: %s", bch2_err_str(ret));
+ }
+
+ if (ret)
+ op->error = ret;
}
static void __bch2_nocow_write_done(struct bch_write_op *op)
{
- if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
- op->error = -EIO;
- } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
+ if (unlikely(op->flags & BCH_WRITE_io_error)) {
+ op->error = -BCH_ERR_data_write_io;
+ } else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
bch2_nocow_write_convert_unwritten(op);
}
@@ -1251,7 +1263,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
struct bucket_to_lock *stale_at;
int stale, ret;
- if (op->flags & BCH_WRITE_MOVE)
+ if (op->flags & BCH_WRITE_move)
return;
darray_init(&buckets);
@@ -1309,7 +1321,7 @@ retry:
}), GFP_KERNEL|__GFP_NOFAIL);
if (ptr->unwritten)
- op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
+ op->flags |= BCH_WRITE_convert_unwritten;
}
/* Unlock before taking nocow locks, doing IO: */
@@ -1317,7 +1329,7 @@ retry:
bch2_trans_unlock(trans);
bch2_cut_front(op->pos, op->insert_keys.top);
- if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
+ if (op->flags & BCH_WRITE_convert_unwritten)
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
darray_for_each(buckets, i) {
@@ -1342,7 +1354,7 @@ retry:
wbio_init(bio)->put_bio = true;
bio->bi_opf = op->wbio.bio.bi_opf;
} else {
- op->flags |= BCH_WRITE_SUBMITTED;
+ op->flags |= BCH_WRITE_submitted;
}
op->pos.offset += bio_sectors(bio);
@@ -1352,11 +1364,12 @@ retry:
bio->bi_private = &op->cl;
bio->bi_opf |= REQ_OP_WRITE;
closure_get(&op->cl);
+
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
op->insert_keys.top, true);
bch2_keylist_push(&op->insert_keys);
- if (op->flags & BCH_WRITE_SUBMITTED)
+ if (op->flags & BCH_WRITE_submitted)
break;
bch2_btree_iter_advance(&iter);
}
@@ -1370,21 +1383,18 @@ err:
darray_exit(&buckets);
if (ret) {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
+ bch2_write_op_error(op, op->pos.offset,
+ "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
op->error = ret;
- op->flags |= BCH_WRITE_SUBMITTED;
+ op->flags |= BCH_WRITE_submitted;
}
/* fallback to cow write path? */
- if (!(op->flags & BCH_WRITE_SUBMITTED)) {
+ if (!(op->flags & BCH_WRITE_submitted)) {
closure_sync(&op->cl);
__bch2_nocow_write_done(op);
op->insert_keys.top = op->insert_keys.keys;
- } else if (op->flags & BCH_WRITE_SYNC) {
+ } else if (op->flags & BCH_WRITE_sync) {
closure_sync(&op->cl);
bch2_nocow_write_done(&op->cl.work);
} else {
@@ -1414,7 +1424,7 @@ err_bucket_stale:
"pointer to invalid bucket in nocow path on device %llu\n %s",
stale_at->b.inode,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ ret = -BCH_ERR_data_write_invalid_ptr;
} else {
/* We can retry this: */
ret = -BCH_ERR_transaction_restart;
@@ -1436,7 +1446,7 @@ static void __bch2_write(struct bch_write_op *op)
if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
bch2_nocow_write(op);
- if (op->flags & BCH_WRITE_SUBMITTED)
+ if (op->flags & BCH_WRITE_submitted)
goto out_nofs_restore;
}
again:
@@ -1466,7 +1476,7 @@ again:
ret = bch2_trans_run(c, lockrestart_do(trans,
bch2_alloc_sectors_start_trans(trans,
op->target,
- op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
+ op->opts.erasure_code && !(op->flags & BCH_WRITE_cached),
op->write_point,
&op->devs_have,
op->nr_replicas,
@@ -1489,16 +1499,12 @@ again:
bch2_alloc_sectors_done_inlined(c, wp);
err:
if (ret <= 0) {
- op->flags |= BCH_WRITE_SUBMITTED;
+ op->flags |= BCH_WRITE_submitted;
if (unlikely(ret < 0)) {
- if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "%s(): %s", __func__, bch2_err_str(ret));
- bch_err_ratelimited(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
+ if (!(op->flags & BCH_WRITE_alloc_nowait))
+ bch2_write_op_error(op, op->pos.offset,
+ "%s(): %s", __func__, bch2_err_str(ret));
op->error = ret;
break;
}
@@ -1524,14 +1530,14 @@ err:
* synchronously here if we weren't able to submit all of the IO at
* once, as that signals backpressure to the caller.
*/
- if ((op->flags & BCH_WRITE_SYNC) ||
- (!(op->flags & BCH_WRITE_SUBMITTED) &&
- !(op->flags & BCH_WRITE_IN_WORKER))) {
+ if ((op->flags & BCH_WRITE_sync) ||
+ (!(op->flags & BCH_WRITE_submitted) &&
+ !(op->flags & BCH_WRITE_in_worker))) {
bch2_wait_on_allocator(c, &op->cl);
__bch2_write_index(op);
- if (!(op->flags & BCH_WRITE_SUBMITTED))
+ if (!(op->flags & BCH_WRITE_submitted))
goto again;
bch2_write_done(&op->cl);
} else {
@@ -1552,8 +1558,8 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
memset(&op->failed, 0, sizeof(op->failed));
- op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
- op->flags |= BCH_WRITE_SUBMITTED;
+ op->flags |= BCH_WRITE_wrote_data_inline;
+ op->flags |= BCH_WRITE_submitted;
bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
@@ -1616,8 +1622,8 @@ CLOSURE_CALLBACK(bch2_write)
BUG_ON(!op->write_point.v);
BUG_ON(bkey_eq(op->pos, POS_MAX));
- if (op->flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
- op->flags |= BCH_WRITE_ALLOC_NOWAIT;
+ if (op->flags & BCH_WRITE_only_specified_devs)
+ op->flags |= BCH_WRITE_alloc_nowait;
op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
op->start_time = local_clock();
@@ -1625,11 +1631,8 @@ CLOSURE_CALLBACK(bch2_write)
wbio_init(bio)->put_bio = false;
if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
- struct printbuf buf = PRINTBUF;
- bch2_write_op_error(&buf, op);
- prt_printf(&buf, "misaligned write");
- printbuf_exit(&buf);
- op->error = -EIO;
+ bch2_write_op_error(op, op->pos.offset, "misaligned write");
+ op->error = -BCH_ERR_data_write_misaligned;
goto err;
}
@@ -1638,13 +1641,14 @@ CLOSURE_CALLBACK(bch2_write)
goto err;
}
- if (!(op->flags & BCH_WRITE_MOVE) &&
+ if (!(op->flags & BCH_WRITE_move) &&
!bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
op->error = -BCH_ERR_erofs_no_writes;
goto err;
}
- this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
+ if (!(op->flags & BCH_WRITE_move))
+ this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
bch2_increment_clock(c, bio_sectors(bio), WRITE);
data_len = min_t(u64, bio->bi_iter.bi_size,
@@ -1675,20 +1679,26 @@ static const char * const bch2_write_flags[] = {
void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
{
- prt_str(out, "pos: ");
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 32);
+
+ prt_printf(out, "pos:\t");
bch2_bpos_to_text(out, op->pos);
prt_newline(out);
printbuf_indent_add(out, 2);
- prt_str(out, "started: ");
+ prt_printf(out, "started:\t");
bch2_pr_time_units(out, local_clock() - op->start_time);
prt_newline(out);
- prt_str(out, "flags: ");
+ prt_printf(out, "flags:\t");
prt_bitflags(out, bch2_write_flags, op->flags);
prt_newline(out);
- prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
+ prt_printf(out, "nr_replicas:\t%u\n", op->nr_replicas);
+ prt_printf(out, "nr_replicas_required:\t%u\n", op->nr_replicas_required);
+
+ prt_printf(out, "ref:\t%u\n", closure_nr_remaining(&op->cl));
printbuf_indent_sub(out, 2);
}
diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h
index b4626013abc8..b8ab19a1e1da 100644
--- a/fs/bcachefs/io_write.h
+++ b/fs/bcachefs/io_write.h
@@ -11,33 +11,27 @@
void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
-#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
-void bch2_latency_acct(struct bch_dev *, u64, int);
-#else
-static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
-#endif
-
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
enum bch_data_type, const struct bkey_i *, bool);
-void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op);
+__printf(3, 4)
+void bch2_write_op_error(struct bch_write_op *op, u64, const char *, ...);
#define BCH_WRITE_FLAGS() \
- x(ALLOC_NOWAIT) \
- x(CACHED) \
- x(DATA_ENCODED) \
- x(PAGES_STABLE) \
- x(PAGES_OWNED) \
- x(ONLY_SPECIFIED_DEVS) \
- x(WROTE_DATA_INLINE) \
- x(FROM_INTERNAL) \
- x(CHECK_ENOSPC) \
- x(SYNC) \
- x(MOVE) \
- x(IN_WORKER) \
- x(SUBMITTED) \
- x(IO_ERROR) \
- x(CONVERT_UNWRITTEN)
+ x(alloc_nowait) \
+ x(cached) \
+ x(data_encoded) \
+ x(pages_stable) \
+ x(pages_owned) \
+ x(only_specified_devs) \
+ x(wrote_data_inline) \
+ x(check_enospc) \
+ x(sync) \
+ x(move) \
+ x(in_worker) \
+ x(submitted) \
+ x(io_error) \
+ x(convert_unwritten)
enum __bch_write_flags {
#define x(f) __BCH_WRITE_##f,
diff --git a/fs/bcachefs/io_write_types.h b/fs/bcachefs/io_write_types.h
index 6e878a6f2f0b..3ef6df9145ef 100644
--- a/fs/bcachefs/io_write_types.h
+++ b/fs/bcachefs/io_write_types.h
@@ -64,7 +64,7 @@ struct bch_write_op {
struct bpos pos;
struct bversion version;
- /* For BCH_WRITE_DATA_ENCODED: */
+ /* For BCH_WRITE_data_encoded: */
struct bch_extent_crc_unpacked crc;
struct write_point_specifier write_point;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 5dabbf3c0965..bfdaea6569ae 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -20,13 +20,6 @@
#include "journal_seq_blacklist.h"
#include "trace.h"
-static const char * const bch2_journal_errors[] = {
-#define x(n) #n,
- JOURNAL_ERRORS()
-#undef x
- NULL
-};
-
static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
{
return seq > j->seq_ondisk;
@@ -56,11 +49,18 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
prt_printf(out, "seq:\t%llu\n", seq);
printbuf_indent_add(out, 2);
- prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
+ if (!buf->write_started)
+ prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i & JOURNAL_STATE_BUF_MASK));
- prt_printf(out, "size:\t");
- prt_human_readable_u64(out, vstruct_bytes(buf->data));
- prt_newline(out);
+ struct closure *cl = &buf->io;
+ int r = atomic_read(&cl->remaining);
+ prt_printf(out, "io:\t%pS r %i\n", cl->fn, r & CLOSURE_REMAINING_MASK);
+
+ if (buf->data) {
+ prt_printf(out, "size:\t");
+ prt_human_readable_u64(out, vstruct_bytes(buf->data));
+ prt_newline(out);
+ }
prt_printf(out, "expires:\t");
prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
@@ -87,6 +87,9 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
{
+ lockdep_assert_held(&j->lock);
+ out->atomic++;
+
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 24);
@@ -95,6 +98,8 @@ static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
seq++)
bch2_journal_buf_to_text(out, j, seq);
prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
+
+ --out->atomic;
}
static inline struct journal_buf *
@@ -104,10 +109,8 @@ journal_seq_to_buf(struct journal *j, u64 seq)
EBUG_ON(seq > journal_cur_seq(j));
- if (journal_seq_unwritten(j, seq)) {
+ if (journal_seq_unwritten(j, seq))
buf = j->buf + (seq & JOURNAL_BUF_MASK);
- EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
- }
return buf;
}
@@ -139,8 +142,8 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
bool stuck = false;
struct printbuf buf = PRINTBUF;
- if (!(error == JOURNAL_ERR_journal_full ||
- error == JOURNAL_ERR_journal_pin_full) ||
+ if (!(error == -BCH_ERR_journal_full ||
+ error == -BCH_ERR_journal_pin_full) ||
nr_unwritten_journal_entries(j) ||
(flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
return stuck;
@@ -167,7 +170,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
spin_unlock(&j->lock);
bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
- bch2_journal_errors[error]);
+ bch2_err_str(error));
bch2_journal_debug_to_text(&buf, j);
bch_err(c, "%s", buf.buf);
@@ -195,7 +198,8 @@ void bch2_journal_do_writes(struct journal *j)
if (w->write_started)
continue;
- if (!journal_state_count(j->reservations, idx)) {
+ if (!journal_state_seq_count(j, j->reservations, seq)) {
+ j->seq_write_started = seq;
w->write_started = true;
closure_call(&w->io, bch2_journal_write, j->wq, NULL);
}
@@ -306,7 +310,7 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
bch2_journal_space_available(j);
- __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
+ __bch2_journal_buf_put(j, le64_to_cpu(buf->data->seq));
}
void bch2_journal_halt(struct journal *j)
@@ -377,29 +381,41 @@ static int journal_entry_open(struct journal *j)
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
if (j->blocked)
- return JOURNAL_ERR_blocked;
+ return -BCH_ERR_journal_blocked;
if (j->cur_entry_error)
return j->cur_entry_error;
- if (bch2_journal_error(j))
- return JOURNAL_ERR_insufficient_devices; /* -EROFS */
+ int ret = bch2_journal_error(j);
+ if (unlikely(ret))
+ return ret;
if (!fifo_free(&j->pin))
- return JOURNAL_ERR_journal_pin_full;
+ return -BCH_ERR_journal_pin_full;
if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
- return JOURNAL_ERR_max_in_flight;
+ return -BCH_ERR_journal_max_in_flight;
+
+ if (atomic64_read(&j->seq) - j->seq_write_started == JOURNAL_STATE_BUF_NR)
+ return -BCH_ERR_journal_max_open;
if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) {
bch_err(c, "cannot start: journal seq overflow");
if (bch2_fs_emergency_read_only_locked(c))
bch_err(c, "fatal error - emergency read only");
- return JOURNAL_ERR_insufficient_devices; /* -EROFS */
+ return -BCH_ERR_journal_shutdown;
}
+ if (!j->free_buf && !buf->data)
+ return -BCH_ERR_journal_buf_enomem; /* will retry after write completion frees up a buf */
+
BUG_ON(!j->cur_entry_sectors);
+ if (!buf->data) {
+ swap(buf->data, j->free_buf);
+ swap(buf->buf_size, j->free_buf_size);
+ }
+
buf->expires =
(journal_cur_seq(j) == j->flushed_seq_ondisk
? jiffies
@@ -415,7 +431,7 @@ static int journal_entry_open(struct journal *j)
u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
if (u64s <= (ssize_t) j->early_journal_entries.nr)
- return JOURNAL_ERR_journal_full;
+ return -BCH_ERR_journal_full;
if (fifo_empty(&j->pin) && j->reclaim_thread)
wake_up_process(j->reclaim_thread);
@@ -464,7 +480,7 @@ static int journal_entry_open(struct journal *j)
new.idx++;
BUG_ON(journal_state_count(new, new.idx));
- BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
+ BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_STATE_BUF_MASK));
journal_state_inc(&new);
@@ -514,6 +530,33 @@ static void journal_write_work(struct work_struct *work)
spin_unlock(&j->lock);
}
+static void journal_buf_prealloc(struct journal *j)
+{
+ if (j->free_buf &&
+ j->free_buf_size >= j->buf_size_want)
+ return;
+
+ unsigned buf_size = j->buf_size_want;
+
+ spin_unlock(&j->lock);
+ void *buf = kvmalloc(buf_size, GFP_NOFS);
+ spin_lock(&j->lock);
+
+ if (buf &&
+ (!j->free_buf ||
+ buf_size > j->free_buf_size)) {
+ swap(buf, j->free_buf);
+ swap(buf_size, j->free_buf_size);
+ }
+
+ if (unlikely(buf)) {
+ spin_unlock(&j->lock);
+ /* kvfree can sleep */
+ kvfree(buf);
+ spin_lock(&j->lock);
+ }
+}
+
static int __journal_res_get(struct journal *j, struct journal_res *res,
unsigned flags)
{
@@ -525,25 +568,28 @@ retry:
if (journal_res_get_fast(j, res, flags))
return 0;
- if (bch2_journal_error(j))
- return -BCH_ERR_erofs_journal_err;
+ ret = bch2_journal_error(j);
+ if (unlikely(ret))
+ return ret;
if (j->blocked)
- return -BCH_ERR_journal_res_get_blocked;
+ return -BCH_ERR_journal_blocked;
if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
- ret = JOURNAL_ERR_journal_full;
+ ret = -BCH_ERR_journal_full;
can_discard = j->can_discard;
goto out;
}
if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
- ret = JOURNAL_ERR_max_in_flight;
+ ret = -BCH_ERR_journal_max_in_flight;
goto out;
}
spin_lock(&j->lock);
+ journal_buf_prealloc(j);
+
/*
* Recheck after taking the lock, so we don't race with another thread
* that just did journal_entry_open() and call bch2_journal_entry_close()
@@ -566,25 +612,48 @@ retry:
j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
__journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
- ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
+ ret = journal_entry_open(j) ?: -BCH_ERR_journal_retry_open;
unlock:
can_discard = j->can_discard;
spin_unlock(&j->lock);
out:
- if (ret == JOURNAL_ERR_retry)
- goto retry;
- if (!ret)
+ if (likely(!ret))
return 0;
+ if (ret == -BCH_ERR_journal_retry_open)
+ goto retry;
if (journal_error_check_stuck(j, ret, flags))
- ret = -BCH_ERR_journal_res_get_blocked;
+ ret = -BCH_ERR_journal_stuck;
+
+ if (ret == -BCH_ERR_journal_max_in_flight &&
+ track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true) &&
+ trace_journal_entry_full_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_printbuf_make_room(&buf, 4096);
- if (ret == JOURNAL_ERR_max_in_flight &&
- track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
+ spin_lock(&j->lock);
+ prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
+ bch2_journal_bufs_to_text(&buf, j);
+ spin_unlock(&j->lock);
+
+ trace_journal_entry_full(c, buf.buf);
+ printbuf_exit(&buf);
+ count_event(c, journal_entry_full);
+ }
+ if (ret == -BCH_ERR_journal_max_open &&
+ track_event_change(&c->times[BCH_TIME_blocked_journal_max_open], true) &&
+ trace_journal_entry_full_enabled()) {
struct printbuf buf = PRINTBUF;
+
+ bch2_printbuf_make_room(&buf, 4096);
+
+ spin_lock(&j->lock);
prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
bch2_journal_bufs_to_text(&buf, j);
+ spin_unlock(&j->lock);
+
trace_journal_entry_full(c, buf.buf);
printbuf_exit(&buf);
count_event(c, journal_entry_full);
@@ -594,8 +663,8 @@ out:
* Journal is full - can't rely on reclaim from work item due to
* freezing:
*/
- if ((ret == JOURNAL_ERR_journal_full ||
- ret == JOURNAL_ERR_journal_pin_full) &&
+ if ((ret == -BCH_ERR_journal_full ||
+ ret == -BCH_ERR_journal_pin_full) &&
!(flags & JOURNAL_RES_GET_NONBLOCK)) {
if (can_discard) {
bch2_journal_do_discards(j);
@@ -608,9 +677,7 @@ out:
}
}
- return ret == JOURNAL_ERR_insufficient_devices
- ? -BCH_ERR_erofs_journal_err
- : -BCH_ERR_journal_res_get_blocked;
+ return ret;
}
static unsigned max_dev_latency(struct bch_fs *c)
@@ -640,7 +707,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
int ret;
if (closure_wait_event_timeout(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
+ !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
(flags & JOURNAL_RES_GET_NONBLOCK),
HZ))
return ret;
@@ -654,7 +721,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
remaining_wait = max(0, remaining_wait - HZ);
if (closure_wait_event_timeout(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
+ !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
(flags & JOURNAL_RES_GET_NONBLOCK),
remaining_wait))
return ret;
@@ -666,7 +733,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
printbuf_exit(&buf);
closure_wait_event(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
+ !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) ||
(flags & JOURNAL_RES_GET_NONBLOCK));
return ret;
}
@@ -687,7 +754,6 @@ void bch2_journal_entry_res_resize(struct journal *j,
goto out;
j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
- smp_mb();
state = READ_ONCE(j->reservations);
if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
@@ -907,7 +973,7 @@ int bch2_journal_meta(struct journal *j)
struct bch_fs *c = container_of(j, struct bch_fs, journal);
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
- return -EROFS;
+ return -BCH_ERR_erofs_no_writes;
int ret = __bch2_journal_meta(j);
bch2_write_ref_put(c, BCH_WRITE_REF_journal);
@@ -951,7 +1017,8 @@ static void __bch2_journal_block(struct journal *j)
new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL;
} while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v));
- journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
+ if (old.cur_entry_offset < JOURNAL_ENTRY_BLOCKED_VAL)
+ journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset);
}
}
@@ -992,7 +1059,7 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
*blocked = true;
}
- ret = journal_state_count(s, idx) > open
+ ret = journal_state_count(s, idx & JOURNAL_STATE_BUF_MASK) > open
? ERR_PTR(-EAGAIN)
: buf;
break;
@@ -1021,8 +1088,8 @@ struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j,
/* allocate journal on a device: */
-static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
- bool new_fs, struct closure *cl)
+static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
+ bool new_fs, struct closure *cl)
{
struct bch_fs *c = ca->fs;
struct journal_device *ja = &ca->journal;
@@ -1150,26 +1217,20 @@ err_free:
return ret;
}
-/*
- * Allocate more journal space at runtime - not currently making use if it, but
- * the code works:
- */
-int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
- unsigned nr)
+static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca,
+ unsigned nr, bool new_fs)
{
struct journal_device *ja = &ca->journal;
- struct closure cl;
int ret = 0;
+ struct closure cl;
closure_init_stack(&cl);
- down_write(&c->state_lock);
-
/* don't handle reducing nr of buckets yet: */
if (nr < ja->nr)
- goto unlock;
+ return 0;
- while (ja->nr < nr) {
+ while (!ret && ja->nr < nr) {
struct disk_reservation disk_res = { 0, 0, 0 };
/*
@@ -1182,27 +1243,38 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
* filesystem-wide allocation will succeed, this is a device
* specific allocation - we can hang here:
*/
+ if (!new_fs) {
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ bucket_to_sector(ca, nr - ja->nr), 1, 0);
+ if (ret)
+ break;
+ }
- ret = bch2_disk_reservation_get(c, &disk_res,
- bucket_to_sector(ca, nr - ja->nr), 1, 0);
- if (ret)
- break;
+ ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl);
- ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
+ if (ret == -BCH_ERR_bucket_alloc_blocked ||
+ ret == -BCH_ERR_open_buckets_empty)
+ ret = 0; /* wait and retry */
bch2_disk_reservation_put(c, &disk_res);
-
closure_sync(&cl);
-
- if (ret &&
- ret != -BCH_ERR_bucket_alloc_blocked &&
- ret != -BCH_ERR_open_buckets_empty)
- break;
}
- bch_err_fn(c, ret);
-unlock:
+ return ret;
+}
+
+/*
+ * Allocate more journal space at runtime - not currently making use if it, but
+ * the code works:
+ */
+int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
+ unsigned nr)
+{
+ down_write(&c->state_lock);
+ int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false);
up_write(&c->state_lock);
+
+ bch_err_fn(c, ret);
return ret;
}
@@ -1228,7 +1300,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
+ ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs);
err:
bch_err_fn(ca, ret);
return ret;
@@ -1344,6 +1416,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
j->replay_journal_seq_end = cur_seq;
j->last_seq_ondisk = last_seq;
j->flushed_seq_ondisk = cur_seq - 1;
+ j->seq_write_started = cur_seq - 1;
j->seq_ondisk = cur_seq - 1;
j->pin.front = last_seq;
j->pin.back = cur_seq;
@@ -1384,8 +1457,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
set_bit(JOURNAL_running, &j->flags);
j->last_flush_write = jiffies;
- j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
- j->reservations.unwritten_idx++;
+ j->reservations.idx = journal_cur_seq(j);
c->last_bucket_seq_cleanup = journal_cur_seq(j);
@@ -1438,7 +1510,7 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
- ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
+ ja->bio[i] = kzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
nr_bvecs), GFP_KERNEL);
if (!ja->bio[i])
return -BCH_ERR_ENOMEM_dev_journal_init;
@@ -1477,6 +1549,7 @@ void bch2_fs_journal_exit(struct journal *j)
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
kvfree(j->buf[i].data);
+ kvfree(j->free_buf);
free_fifo(&j->pin);
}
@@ -1503,13 +1576,13 @@ int bch2_fs_journal_init(struct journal *j)
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
return -BCH_ERR_ENOMEM_journal_pin_fifo;
- for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
- j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
- j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
- if (!j->buf[i].data)
- return -BCH_ERR_ENOMEM_journal_buf;
+ j->free_buf_size = j->buf_size_want = JOURNAL_ENTRY_SIZE_MIN;
+ j->free_buf = kvmalloc(j->free_buf_size, GFP_KERNEL);
+ if (!j->free_buf)
+ return -BCH_ERR_ENOMEM_journal_buf;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
j->buf[i].idx = i;
- }
j->pin.front = j->pin.back = 1;
@@ -1559,6 +1632,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
prt_printf(out, "average write size:\t");
prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
prt_newline(out);
+ prt_printf(out, "free buf:\t%u\n", j->free_buf ? j->free_buf_size : 0);
prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
@@ -1566,7 +1640,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
prt_printf(out, "blocked:\t%u\n", j->blocked);
prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
- prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
+ prt_printf(out, "current entry error:\t%s\n", bch2_err_str(j->cur_entry_error));
prt_printf(out, "current entry:\t");
switch (s.cur_entry_offset) {
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 107f7f901cd9..47828771f9c2 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -121,11 +121,6 @@ static inline void journal_wake(struct journal *j)
closure_wake_up(&j->async_wait);
}
-static inline struct journal_buf *journal_cur_buf(struct journal *j)
-{
- return j->buf + j->reservations.idx;
-}
-
/* Sequence number of oldest dirty journal entry */
static inline u64 journal_last_seq(struct journal *j)
@@ -143,6 +138,15 @@ static inline u64 journal_last_unwritten_seq(struct journal *j)
return j->seq_ondisk + 1;
}
+static inline struct journal_buf *journal_cur_buf(struct journal *j)
+{
+ unsigned idx = (journal_cur_seq(j) &
+ JOURNAL_BUF_MASK &
+ ~JOURNAL_STATE_BUF_MASK) + j->reservations.idx;
+
+ return j->buf + idx;
+}
+
static inline int journal_state_count(union journal_res_state s, int idx)
{
switch (idx) {
@@ -154,6 +158,15 @@ static inline int journal_state_count(union journal_res_state s, int idx)
BUG();
}
+static inline int journal_state_seq_count(struct journal *j,
+ union journal_res_state s, u64 seq)
+{
+ if (journal_cur_seq(j) - seq < JOURNAL_STATE_BUF_NR)
+ return journal_state_count(s, seq & JOURNAL_STATE_BUF_MASK);
+ else
+ return 0;
+}
+
static inline void journal_state_inc(union journal_res_state *s)
{
s->buf0_count += s->idx == 0;
@@ -193,7 +206,7 @@ bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
static inline struct jset_entry *
journal_res_entry(struct journal *j, struct journal_res *res)
{
- return vstruct_idx(j->buf[res->idx].data, res->offset);
+ return vstruct_idx(j->buf[res->seq & JOURNAL_BUF_MASK].data, res->offset);
}
static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type,
@@ -267,8 +280,9 @@ bool bch2_journal_entry_close(struct journal *);
void bch2_journal_do_writes(struct journal *);
void bch2_journal_buf_put_final(struct journal *, u64);
-static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+static inline void __bch2_journal_buf_put(struct journal *j, u64 seq)
{
+ unsigned idx = seq & JOURNAL_STATE_BUF_MASK;
union journal_res_state s;
s = journal_state_buf_put(j, idx);
@@ -276,8 +290,9 @@ static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 s
bch2_journal_buf_put_final(j, seq);
}
-static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+static inline void bch2_journal_buf_put(struct journal *j, u64 seq)
{
+ unsigned idx = seq & JOURNAL_STATE_BUF_MASK;
union journal_res_state s;
s = journal_state_buf_put(j, idx);
@@ -306,7 +321,7 @@ static inline void bch2_journal_res_put(struct journal *j,
BCH_JSET_ENTRY_btree_keys,
0, 0, 0);
- bch2_journal_buf_put(j, res->idx, res->seq);
+ bch2_journal_buf_put(j, res->seq);
res->ref = 0;
}
@@ -335,8 +350,10 @@ static inline int journal_res_get_fast(struct journal *j,
/*
* Check if there is still room in the current journal
- * entry:
+ * entry, smp_rmb() guarantees that reads from reservations.counter
+ * occur before accessing cur_entry_u64s:
*/
+ smp_rmb();
if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
return 0;
@@ -361,9 +378,9 @@ static inline int journal_res_get_fast(struct journal *j,
&old.v, new.v));
res->ref = true;
- res->idx = old.idx;
res->offset = old.cur_entry_offset;
- res->seq = le64_to_cpu(j->buf[old.idx].data->seq);
+ res->seq = journal_cur_seq(j);
+ res->seq -= (res->seq - old.idx) & JOURNAL_STATE_BUF_MASK;
return 1;
}
@@ -390,6 +407,7 @@ out:
(flags & JOURNAL_RES_GET_NONBLOCK) != 0,
NULL, _THIS_IP_);
EBUG_ON(!res->ref);
+ BUG_ON(!res->seq);
}
return 0;
}
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 11c39e0c34f4..4ed6137f0439 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -1041,13 +1041,19 @@ reread:
bio->bi_iter.bi_sector = offset;
bch2_bio_map(bio, buf->data, sectors_read << 9);
+ u64 submit_time = local_clock();
ret = submit_bio_wait(bio);
kfree(bio);
- if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
- "journal read error: sector %llu",
- offset) ||
- bch2_meta_read_fault("journal")) {
+ if (!ret && bch2_meta_read_fault("journal"))
+ ret = -BCH_ERR_EIO_fault_injected;
+
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
+ submit_time, !ret);
+
+ if (ret) {
+ bch_err_dev_ratelimited(ca,
+ "journal read error: sector %llu", offset);
/*
* We don't error out of the recovery process
* here, since the relevant journal entry may be
@@ -1110,13 +1116,16 @@ reread:
struct bch_csum csum;
csum_good = jset_csum_good(c, j, &csum);
- if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
- "%s",
- (printbuf_reset(&err),
- prt_str(&err, "journal "),
- bch2_csum_err_msg(&err, csum_type, j->csum, csum),
- err.buf)))
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_checksum, 0, csum_good);
+
+ if (!csum_good) {
+ bch_err_dev_ratelimited(ca, "%s",
+ (printbuf_reset(&err),
+ prt_str(&err, "journal "),
+ bch2_csum_err_msg(&err, csum_type, j->csum, csum),
+ err.buf));
saw_bad = true;
+ }
ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
j->encrypted_start,
@@ -1515,7 +1524,7 @@ static void __journal_write_alloc(struct journal *j,
* @j: journal object
* @w: journal buf (entry to be written)
*
- * Returns: 0 on success, or -EROFS on failure
+ * Returns: 0 on success, or -BCH_ERR_insufficient_devices on failure
*/
static int journal_write_alloc(struct journal *j, struct journal_buf *w)
{
@@ -1600,18 +1609,12 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
kvfree(new_buf);
}
-static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
-{
- return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
-}
-
static CLOSURE_CALLBACK(journal_write_done)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_replicas_padded replicas;
- union journal_res_state old, new;
u64 seq = le64_to_cpu(w->data->seq);
int err = 0;
@@ -1621,12 +1624,11 @@ static CLOSURE_CALLBACK(journal_write_done)
if (!w->devs_written.nr) {
bch_err(c, "unable to write journal to sufficient devices");
- err = -EIO;
+ err = -BCH_ERR_journal_write_err;
} else {
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
w->devs_written);
- if (bch2_mark_replicas(c, &replicas.e))
- err = -EIO;
+ err = bch2_mark_replicas(c, &replicas.e);
}
if (err)
@@ -1641,7 +1643,23 @@ static CLOSURE_CALLBACK(journal_write_done)
j->err_seq = seq;
w->write_done = true;
+ if (!j->free_buf || j->free_buf_size < w->buf_size) {
+ swap(j->free_buf, w->data);
+ swap(j->free_buf_size, w->buf_size);
+ }
+
+ if (w->data) {
+ void *buf = w->data;
+ w->data = NULL;
+ w->buf_size = 0;
+
+ spin_unlock(&j->lock);
+ kvfree(buf);
+ spin_lock(&j->lock);
+ }
+
bool completed = false;
+ bool do_discards = false;
for (seq = journal_last_unwritten_seq(j);
seq <= journal_cur_seq(j);
@@ -1650,11 +1668,10 @@ static CLOSURE_CALLBACK(journal_write_done)
if (!w->write_done)
break;
- if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
+ if (!j->err_seq && !w->noflush) {
j->flushed_seq_ondisk = seq;
j->last_seq_ondisk = w->last_seq;
- bch2_do_discards(c);
closure_wake_up(&c->freelist_wait);
bch2_reset_alloc_cursors(c);
}
@@ -1671,16 +1688,6 @@ static CLOSURE_CALLBACK(journal_write_done)
if (j->watermark != BCH_WATERMARK_stripe)
journal_reclaim_kick(&c->journal);
- old.v = atomic64_read(&j->reservations.counter);
- do {
- new.v = old.v;
- BUG_ON(journal_state_count(new, new.unwritten_idx));
- BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
-
- new.unwritten_idx++;
- } while (!atomic64_try_cmpxchg(&j->reservations.counter,
- &old.v, new.v));
-
closure_wake_up(&w->wait);
completed = true;
}
@@ -1695,7 +1702,7 @@ static CLOSURE_CALLBACK(journal_write_done)
}
if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
- new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
+ j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
struct journal_buf *buf = journal_cur_buf(j);
long delta = buf->expires - jiffies;
@@ -1715,6 +1722,9 @@ static CLOSURE_CALLBACK(journal_write_done)
*/
bch2_journal_do_writes(j);
spin_unlock(&j->lock);
+
+ if (do_discards)
+ bch2_do_discards(c);
}
static void journal_write_endio(struct bio *bio)
@@ -1724,13 +1734,16 @@ static void journal_write_endio(struct bio *bio)
struct journal *j = &ca->fs->journal;
struct journal_buf *w = j->buf + jbio->buf_idx;
- if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+ bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
+ jbio->submit_time, !bio->bi_status);
+
+ if (bio->bi_status) {
+ bch_err_dev_ratelimited(ca,
"error writing journal entry %llu: %s",
le64_to_cpu(w->data->seq),
- bch2_blk_status_to_str(bio->bi_status)) ||
- bch2_meta_write_fault("journal")) {
- unsigned long flags;
+ bch2_blk_status_to_str(bio->bi_status));
+ unsigned long flags;
spin_lock_irqsave(&j->err_lock, flags);
bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
spin_unlock_irqrestore(&j->err_lock, flags);
@@ -1759,7 +1772,11 @@ static CLOSURE_CALLBACK(journal_write_submit)
sectors);
struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->bio[w->idx]->bio;
+ struct journal_bio *jbio = ja->bio[w->idx];
+ struct bio *bio = &jbio->bio;
+
+ jbio->submit_time = local_clock();
+
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = ptr->offset;
bio->bi_end_io = journal_write_endio;
@@ -1791,6 +1808,10 @@ static CLOSURE_CALLBACK(journal_write_preflush)
struct journal *j = container_of(w, struct journal, buf[w->idx]);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ /*
+ * Wait for previous journal writes to comelete; they won't necessarily
+ * be flushed if they're still in flight
+ */
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
spin_lock(&j->lock);
if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
@@ -1984,7 +2005,7 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
* write anything at all.
*/
if (error && test_bit(JOURNAL_need_flush_write, &j->flags))
- return -EIO;
+ return error;
if (error ||
w->noflush ||
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index d373cd181a7f..5d1547aa118a 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -226,7 +226,7 @@ void bch2_journal_space_available(struct journal *j)
bch_err(c, "%s", buf.buf);
printbuf_exit(&buf);
- ret = JOURNAL_ERR_insufficient_devices;
+ ret = -BCH_ERR_insufficient_journal_devices;
goto out;
}
@@ -240,7 +240,7 @@ void bch2_journal_space_available(struct journal *j)
total = j->space[journal_space_total].total;
if (!j->space[journal_space_discarded].next_entry)
- ret = JOURNAL_ERR_journal_full;
+ ret = -BCH_ERR_journal_full;
if ((j->space[journal_space_clean_ondisk].next_entry <
j->space[journal_space_clean_ondisk].total) &&
@@ -645,7 +645,6 @@ static u64 journal_seq_to_flush(struct journal *j)
* @j: journal object
* @direct: direct or background reclaim?
* @kicked: requested to run since we last ran?
- * Returns: 0 on success, or -EIO if the journal has been shutdown
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
@@ -685,10 +684,9 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
if (kthread && kthread_should_stop())
break;
- if (bch2_journal_error(j)) {
- ret = -EIO;
+ ret = bch2_journal_error(j);
+ if (ret)
break;
- }
bch2_journal_do_discards(j);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index 1f25c111c54c..e463d2d95359 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -231,15 +231,14 @@ bool bch2_blacklist_entries_gc(struct bch_fs *c)
struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
BUG_ON(nr != t->nr);
- unsigned i;
- for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr);
- src < bl->start + nr;
- src++, i = eytzinger0_next(i, nr)) {
+ src = bl->start;
+ eytzinger0_for_each(i, nr) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk)
*dst++ = *src;
+ src++;
}
unsigned new_nr = dst - bl->start;
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index 1ef3a28ed6ab..8e0eba776b9d 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -12,7 +12,11 @@
/* btree write buffer steals 8 bits for its own purposes: */
#define JOURNAL_SEQ_MAX ((1ULL << 56) - 1)
-#define JOURNAL_BUF_BITS 2
+#define JOURNAL_STATE_BUF_BITS 2
+#define JOURNAL_STATE_BUF_NR (1U << JOURNAL_STATE_BUF_BITS)
+#define JOURNAL_STATE_BUF_MASK (JOURNAL_STATE_BUF_NR - 1)
+
+#define JOURNAL_BUF_BITS 4
#define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
#define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
@@ -82,7 +86,6 @@ struct journal_entry_pin {
struct journal_res {
bool ref;
- u8 idx;
u16 u64s;
u32 offset;
u64 seq;
@@ -98,9 +101,8 @@ union journal_res_state {
};
struct {
- u64 cur_entry_offset:20,
+ u64 cur_entry_offset:22,
idx:2,
- unwritten_idx:2,
buf0_count:10,
buf1_count:10,
buf2_count:10,
@@ -110,13 +112,13 @@ union journal_res_state {
/* bytes: */
#define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */
-#define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */
+#define JOURNAL_ENTRY_SIZE_MAX (4U << 22) /* 16M */
/*
* We stash some journal state as sentinal values in cur_entry_offset:
* note - cur_entry_offset is in units of u64s
*/
-#define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
+#define JOURNAL_ENTRY_OFFSET_MAX ((1U << 22) - 1)
#define JOURNAL_ENTRY_BLOCKED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 2)
#define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1)
@@ -149,28 +151,12 @@ enum journal_flags {
#undef x
};
-/* Reasons we may fail to get a journal reservation: */
-#define JOURNAL_ERRORS() \
- x(ok) \
- x(retry) \
- x(blocked) \
- x(max_in_flight) \
- x(journal_full) \
- x(journal_pin_full) \
- x(journal_stuck) \
- x(insufficient_devices)
-
-enum journal_errors {
-#define x(n) JOURNAL_ERR_##n,
- JOURNAL_ERRORS()
-#undef x
-};
-
typedef DARRAY(u64) darray_u64;
struct journal_bio {
struct bch_dev *ca;
unsigned buf_idx;
+ u64 submit_time;
struct bio bio;
};
@@ -199,7 +185,7 @@ struct journal {
* 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
* insufficient devices:
*/
- enum journal_errors cur_entry_error;
+ int cur_entry_error;
unsigned cur_entry_offset_if_blocked;
unsigned buf_size_want;
@@ -220,6 +206,8 @@ struct journal {
* other is possibly being written out.
*/
struct journal_buf buf[JOURNAL_BUF_NR];
+ void *free_buf;
+ unsigned free_buf_size;
spinlock_t lock;
@@ -237,6 +225,7 @@ struct journal {
/* Sequence number of most recent journal entry (last entry in @pin) */
atomic64_t seq;
+ u64 seq_write_started;
/* seq, last_seq from the most recent journal entry successfully written */
u64 seq_ondisk;
u64 flushed_seq_ondisk;
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index ce794d55818f..a299d9ec8ee4 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -6,6 +6,7 @@
#include "btree_iter.h"
#include "btree_update.h"
#include "btree_write_buffer.h"
+#include "ec.h"
#include "error.h"
#include "lru.h"
#include "recovery.h"
@@ -59,9 +60,9 @@ int bch2_lru_set(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time
return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_set);
}
-int bch2_lru_change(struct btree_trans *trans,
- u16 lru_id, u64 dev_bucket,
- u64 old_time, u64 new_time)
+int __bch2_lru_change(struct btree_trans *trans,
+ u16 lru_id, u64 dev_bucket,
+ u64 old_time, u64 new_time)
{
if (old_time == new_time)
return 0;
@@ -78,7 +79,9 @@ static const char * const bch2_lru_types[] = {
};
int bch2_lru_check_set(struct btree_trans *trans,
- u16 lru_id, u64 time,
+ u16 lru_id,
+ u64 dev_bucket,
+ u64 time,
struct bkey_s_c referring_k,
struct bkey_buf *last_flushed)
{
@@ -87,9 +90,7 @@ int bch2_lru_check_set(struct btree_trans *trans,
struct btree_iter lru_iter;
struct bkey_s_c lru_k =
bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
- lru_pos(lru_id,
- bucket_to_u64(referring_k.k->p),
- time), 0);
+ lru_pos(lru_id, dev_bucket, time), 0);
int ret = bkey_err(lru_k);
if (ret)
return ret;
@@ -104,7 +105,7 @@ int bch2_lru_check_set(struct btree_trans *trans,
" %s",
bch2_lru_types[lru_type(lru_k)],
(bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) {
- ret = bch2_lru_set(trans, lru_id, bucket_to_u64(referring_k.k->p), time);
+ ret = bch2_lru_set(trans, lru_id, dev_bucket, time);
if (ret)
goto err;
}
@@ -116,49 +117,73 @@ fsck_err:
return ret;
}
+static struct bbpos lru_pos_to_bp(struct bkey_s_c lru_k)
+{
+ enum bch_lru_type type = lru_type(lru_k);
+
+ switch (type) {
+ case BCH_LRU_read:
+ case BCH_LRU_fragmentation:
+ return BBPOS(BTREE_ID_alloc, u64_to_bucket(lru_k.k->p.offset));
+ case BCH_LRU_stripes:
+ return BBPOS(BTREE_ID_stripes, POS(0, lru_k.k->p.offset));
+ default:
+ BUG();
+ }
+}
+
+static u64 bkey_lru_type_idx(struct bch_fs *c,
+ enum bch_lru_type type,
+ struct bkey_s_c k)
+{
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+
+ switch (type) {
+ case BCH_LRU_read:
+ a = bch2_alloc_to_v4(k, &a_convert);
+ return alloc_lru_idx_read(*a);
+ case BCH_LRU_fragmentation: {
+ a = bch2_alloc_to_v4(k, &a_convert);
+
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
+ u64 idx = ca
+ ? alloc_lru_idx_fragmentation(*a, ca)
+ : 0;
+ rcu_read_unlock();
+ return idx;
+ }
+ case BCH_LRU_stripes:
+ return k.k->type == KEY_TYPE_stripe
+ ? stripe_lru_pos(bkey_s_c_to_stripe(k).v)
+ : 0;
+ default:
+ BUG();
+ }
+}
+
static int bch2_check_lru_key(struct btree_trans *trans,
struct btree_iter *lru_iter,
struct bkey_s_c lru_k,
struct bkey_buf *last_flushed)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
- enum bch_lru_type type = lru_type(lru_k);
- struct bpos alloc_pos = u64_to_bucket(lru_k.k->p.offset);
- u64 idx;
- int ret;
-
- struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_pos);
- if (fsck_err_on(!ca,
- trans, lru_entry_to_invalid_bucket,
- "lru key points to nonexistent device:bucket %llu:%llu",
- alloc_pos.inode, alloc_pos.offset))
- return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
+ struct bbpos bp = lru_pos_to_bp(lru_k);
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, alloc_pos, 0);
- ret = bkey_err(k);
+ struct btree_iter iter;
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, bp.btree, bp.pos, 0);
+ int ret = bkey_err(k);
if (ret)
goto err;
- a = bch2_alloc_to_v4(k, &a_convert);
-
- switch (type) {
- case BCH_LRU_read:
- idx = alloc_lru_idx_read(*a);
- break;
- case BCH_LRU_fragmentation:
- idx = alloc_lru_idx_fragmentation(*a, ca);
- break;
- }
+ enum bch_lru_type type = lru_type(lru_k);
+ u64 idx = bkey_lru_type_idx(c, type, k);
- if (lru_k.k->type != KEY_TYPE_set ||
- lru_pos_time(lru_k.k->p) != idx) {
+ if (lru_pos_time(lru_k.k->p) != idx) {
ret = bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed);
if (ret)
goto err;
@@ -176,7 +201,6 @@ static int bch2_check_lru_key(struct btree_trans *trans,
err:
fsck_err:
bch2_trans_iter_exit(trans, &iter);
- bch2_dev_put(ca);
printbuf_exit(&buf2);
printbuf_exit(&buf1);
return ret;
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index f31a6cf1514c..8abd0aa2083a 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -28,9 +28,14 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l)
{
u16 lru_id = l.k->p.inode >> 48;
- if (lru_id == BCH_LRU_FRAGMENTATION_START)
+ switch (lru_id) {
+ case BCH_LRU_BUCKET_FRAGMENTATION:
return BCH_LRU_fragmentation;
- return BCH_LRU_read;
+ case BCH_LRU_STRIPE_FRAGMENTATION:
+ return BCH_LRU_stripes;
+ default:
+ return BCH_LRU_read;
+ }
}
int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context);
@@ -46,10 +51,19 @@ void bch2_lru_pos_to_text(struct printbuf *, struct bpos);
int bch2_lru_del(struct btree_trans *, u16, u64, u64);
int bch2_lru_set(struct btree_trans *, u16, u64, u64);
-int bch2_lru_change(struct btree_trans *, u16, u64, u64, u64);
+int __bch2_lru_change(struct btree_trans *, u16, u64, u64, u64);
+
+static inline int bch2_lru_change(struct btree_trans *trans,
+ u16 lru_id, u64 dev_bucket,
+ u64 old_time, u64 new_time)
+{
+ return old_time != new_time
+ ? __bch2_lru_change(trans, lru_id, dev_bucket, old_time, new_time)
+ : 0;
+}
struct bkey_buf;
-int bch2_lru_check_set(struct btree_trans *, u16, u64, struct bkey_s_c, struct bkey_buf *);
+int bch2_lru_check_set(struct btree_trans *, u16, u64, u64, struct bkey_s_c, struct bkey_buf *);
int bch2_check_lrus(struct bch_fs *);
diff --git a/fs/bcachefs/lru_format.h b/fs/bcachefs/lru_format.h
index f372cb3b8cda..b7392ad8e41f 100644
--- a/fs/bcachefs/lru_format.h
+++ b/fs/bcachefs/lru_format.h
@@ -9,7 +9,8 @@ struct bch_lru {
#define BCH_LRU_TYPES() \
x(read) \
- x(fragmentation)
+ x(fragmentation) \
+ x(stripes)
enum bch_lru_type {
#define x(n) BCH_LRU_##n,
@@ -17,7 +18,8 @@ enum bch_lru_type {
#undef x
};
-#define BCH_LRU_FRAGMENTATION_START ((1U << 16) - 1)
+#define BCH_LRU_BUCKET_FRAGMENTATION ((1U << 16) - 1)
+#define BCH_LRU_STRIPE_FRAGMENTATION ((1U << 16) - 2)
#define LRU_TIME_BITS 48
#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index ddc187fb693d..57ad662871ba 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -15,6 +15,7 @@
#include "keylist.h"
#include "migrate.h"
#include "move.h"
+#include "progress.h"
#include "replicas.h"
#include "super-io.h"
@@ -76,7 +77,9 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
return 0;
}
-static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+static int bch2_dev_usrdata_drop(struct bch_fs *c,
+ struct progress_indicator_state *progress,
+ unsigned dev_idx, int flags)
{
struct btree_trans *trans = bch2_trans_get(c);
enum btree_id id;
@@ -88,8 +91,10 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
- NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
- bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
+ NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
+ bch2_progress_update_iter(trans, progress, &iter, "dropping user data");
+ bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags);
+ }));
if (ret)
break;
}
@@ -99,7 +104,9 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
return ret;
}
-static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+static int bch2_dev_metadata_drop(struct bch_fs *c,
+ struct progress_indicator_state *progress,
+ unsigned dev_idx, int flags)
{
struct btree_trans *trans;
struct btree_iter iter;
@@ -125,6 +132,8 @@ retry:
while (bch2_trans_begin(trans),
(b = bch2_btree_iter_peek_node(&iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
+ bch2_progress_update_iter(trans, progress, &iter, "dropping metadata");
+
if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx))
goto next;
@@ -169,6 +178,11 @@ err:
int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
- return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
- bch2_dev_metadata_drop(c, dev_idx, flags);
+ struct progress_indicator_state progress;
+ bch2_progress_init(&progress, c,
+ BIT_ULL(BTREE_ID_extents)|
+ BIT_ULL(BTREE_ID_reflink));
+
+ return bch2_dev_usrdata_drop(c, &progress, dev_idx, flags) ?:
+ bch2_dev_metadata_drop(c, &progress, dev_idx, flags);
}
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 160b4374160a..8fcdc6984f6e 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -38,28 +38,28 @@ const char * const bch2_data_ops_strs[] = {
NULL
};
-static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k,
+static void trace_io_move2(struct bch_fs *c, struct bkey_s_c k,
struct bch_io_opts *io_opts,
struct data_update_opts *data_opts)
{
- if (trace_move_extent_enabled()) {
+ if (trace_io_move_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
prt_newline(&buf);
bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts);
- trace_move_extent(c, buf.buf);
+ trace_io_move(c, buf.buf);
printbuf_exit(&buf);
}
}
-static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
+static void trace_io_move_read2(struct bch_fs *c, struct bkey_s_c k)
{
- if (trace_move_extent_read_enabled()) {
+ if (trace_io_move_read_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
- trace_move_extent_read(c, buf.buf);
+ trace_io_move_read(c, buf.buf);
printbuf_exit(&buf);
}
}
@@ -74,11 +74,7 @@ struct moving_io {
unsigned read_sectors;
unsigned write_sectors;
- struct bch_read_bio rbio;
-
struct data_update write;
- /* Must be last since it is variable size */
- struct bio_vec bi_inline_vecs[];
};
static void move_free(struct moving_io *io)
@@ -88,43 +84,72 @@ static void move_free(struct moving_io *io)
if (io->b)
atomic_dec(&io->b->count);
- bch2_data_update_exit(&io->write);
-
mutex_lock(&ctxt->lock);
list_del(&io->io_list);
wake_up(&ctxt->wait);
mutex_unlock(&ctxt->lock);
+ if (!io->write.data_opts.scrub) {
+ bch2_data_update_exit(&io->write);
+ } else {
+ bch2_bio_free_pages_pool(io->write.op.c, &io->write.op.wbio.bio);
+ kfree(io->write.bvecs);
+ }
kfree(io);
}
static void move_write_done(struct bch_write_op *op)
{
struct moving_io *io = container_of(op, struct moving_io, write.op);
+ struct bch_fs *c = op->c;
struct moving_context *ctxt = io->write.ctxt;
- if (io->write.op.error)
+ if (op->error) {
+ if (trace_io_move_write_fail_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_write_op_to_text(&buf, op);
+ prt_printf(&buf, "ret\t%s\n", bch2_err_str(op->error));
+ trace_io_move_write_fail(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+ this_cpu_inc(c->counters[BCH_COUNTER_io_move_write_fail]);
+
ctxt->write_error = true;
+ }
- atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
- atomic_dec(&io->write.ctxt->write_ios);
+ atomic_sub(io->write_sectors, &ctxt->write_sectors);
+ atomic_dec(&ctxt->write_ios);
move_free(io);
closure_put(&ctxt->cl);
}
static void move_write(struct moving_io *io)
{
- if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
+ struct moving_context *ctxt = io->write.ctxt;
+
+ if (ctxt->stats) {
+ if (io->write.rbio.bio.bi_status)
+ atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9,
+ &ctxt->stats->sectors_error_uncorrected);
+ else if (io->write.rbio.saw_error)
+ atomic64_add(io->write.rbio.bvec_iter.bi_size >> 9,
+ &ctxt->stats->sectors_error_corrected);
+ }
+
+ if (unlikely(io->write.rbio.ret ||
+ io->write.rbio.bio.bi_status ||
+ io->write.data_opts.scrub)) {
move_free(io);
return;
}
- if (trace_move_extent_write_enabled()) {
+ if (trace_io_move_write_enabled()) {
struct bch_fs *c = io->write.op.c;
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k));
- trace_move_extent_write(c, buf.buf);
+ trace_io_move_write(c, buf.buf);
printbuf_exit(&buf);
}
@@ -132,7 +157,7 @@ static void move_write(struct moving_io *io)
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
atomic_inc(&io->write.ctxt->write_ios);
- bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
+ bch2_data_update_read_done(&io->write);
}
struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
@@ -145,7 +170,7 @@ struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctx
static void move_read_endio(struct bio *bio)
{
- struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
+ struct moving_io *io = container_of(bio, struct moving_io, write.rbio.bio);
struct moving_context *ctxt = io->write.ctxt;
atomic_sub(io->read_sectors, &ctxt->read_sectors);
@@ -258,14 +283,10 @@ int bch2_move_extent(struct moving_context *ctxt,
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- struct moving_io *io;
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
- trace_move_extent2(c, k, &io_opts, &data_opts);
+ trace_io_move2(c, k, &io_opts, &data_opts);
+ this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
if (ctxt->stats)
ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
@@ -273,7 +294,8 @@ int bch2_move_extent(struct moving_context *ctxt,
bch2_data_update_opts_normalize(k, &data_opts);
if (!data_opts.rewrite_ptrs &&
- !data_opts.extra_replicas) {
+ !data_opts.extra_replicas &&
+ !data_opts.scrub) {
if (data_opts.kill_ptrs)
return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts);
return 0;
@@ -285,13 +307,7 @@ int bch2_move_extent(struct moving_context *ctxt,
*/
bch2_trans_unlock(trans);
- /* write path might have to decompress data: */
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
-
- pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
- io = kzalloc(sizeof(struct moving_io) +
- sizeof(struct bio_vec) * pages, GFP_KERNEL);
+ struct moving_io *io = kzalloc(sizeof(struct moving_io), GFP_KERNEL);
if (!io)
goto err;
@@ -300,31 +316,27 @@ int bch2_move_extent(struct moving_context *ctxt,
io->read_sectors = k.k->size;
io->write_sectors = k.k->size;
- bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
- io->write.op.wbio.bio.bi_ioprio =
- IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
-
- if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
- GFP_KERNEL))
- goto err_free;
+ if (!data_opts.scrub) {
+ ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
+ &io_opts, data_opts, iter->btree_id, k);
+ if (ret)
+ goto err_free;
- io->rbio.c = c;
- io->rbio.opts = io_opts;
- bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
- io->rbio.bio.bi_vcnt = pages;
- io->rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
- io->rbio.bio.bi_iter.bi_size = sectors << 9;
+ io->write.op.end_io = move_write_done;
+ } else {
+ bch2_bkey_buf_init(&io->write.k);
+ bch2_bkey_buf_reassemble(&io->write.k, c, k);
- io->rbio.bio.bi_opf = REQ_OP_READ;
- io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
- io->rbio.bio.bi_end_io = move_read_endio;
+ io->write.op.c = c;
+ io->write.data_opts = data_opts;
- ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp,
- io_opts, data_opts, iter->btree_id, k);
- if (ret)
- goto err_free_pages;
+ ret = bch2_data_update_bios_init(&io->write, c, &io_opts);
+ if (ret)
+ goto err_free;
+ }
- io->write.op.end_io = move_write_done;
+ io->write.rbio.bio.bi_end_io = move_read_endio;
+ io->write.rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size);
@@ -339,9 +351,7 @@ int bch2_move_extent(struct moving_context *ctxt,
atomic_inc(&io->b->count);
}
- this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
- this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
- trace_move_extent_read2(c, k);
+ trace_io_move_read2(c, k);
mutex_lock(&ctxt->lock);
atomic_add(io->read_sectors, &ctxt->read_sectors);
@@ -356,33 +366,33 @@ int bch2_move_extent(struct moving_context *ctxt,
* ctxt when doing wakeup
*/
closure_get(&ctxt->cl);
- bch2_read_extent(trans, &io->rbio,
- bkey_start_pos(k.k),
- iter->btree_id, k, 0,
- BCH_READ_NODECODE|
- BCH_READ_LAST_FRAGMENT);
+ __bch2_read_extent(trans, &io->write.rbio,
+ io->write.rbio.bio.bi_iter,
+ bkey_start_pos(k.k),
+ iter->btree_id, k, 0,
+ NULL,
+ BCH_READ_last_fragment,
+ data_opts.scrub ? data_opts.read_dev : -1);
return 0;
-err_free_pages:
- bio_free_pages(&io->write.op.wbio.bio);
err_free:
kfree(io);
err:
- if (ret == -BCH_ERR_data_update_done)
+ if (bch2_err_matches(ret, BCH_ERR_data_update_done))
return 0;
if (bch2_err_matches(ret, EROFS) ||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
return ret;
- count_event(c, move_extent_start_fail);
+ count_event(c, io_move_start_fail);
- if (trace_move_extent_start_fail_enabled()) {
+ if (trace_io_move_start_fail_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
prt_str(&buf, ": ");
prt_str(&buf, bch2_err_str(ret));
- trace_move_extent_start_fail(c, buf.buf);
+ trace_io_move_start_fail(c, buf.buf);
printbuf_exit(&buf);
}
return ret;
@@ -551,6 +561,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, btree_id, start,
BTREE_ITER_prefetch|
+ BTREE_ITER_not_extents|
BTREE_ITER_all_snapshots);
if (ctxt->rate)
@@ -581,7 +592,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
k.k->type == KEY_TYPE_reflink_p &&
REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) {
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
+ s64 offset_into_extent = 0;
bch2_trans_iter_exit(trans, &reflink_iter);
k = bch2_lookup_indirect_extent(trans, &reflink_iter, &offset_into_extent, p, true, 0);
@@ -600,6 +611,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
* pointer - need to fixup iter->k
*/
extent_iter = &reflink_iter;
+ offset_into_extent = 0;
}
if (!bkey_extent_is_direct_data(k.k))
@@ -627,7 +639,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
- if (ret2 == -ENOMEM) {
+ if (bch2_err_matches(ret2, ENOMEM)) {
/* memory allocation failure, wait for some IO to finish */
bch2_move_ctxt_wait_for_io(ctxt);
continue;
@@ -689,21 +701,22 @@ int bch2_move_data(struct bch_fs *c,
bool wait_on_copygc,
move_pred_fn pred, void *arg)
{
-
struct moving_context ctxt;
- int ret;
bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
- ret = __bch2_move_data(&ctxt, start, end, pred, arg);
+ int ret = __bch2_move_data(&ctxt, start, end, pred, arg);
bch2_moving_ctxt_exit(&ctxt);
return ret;
}
-int bch2_evacuate_bucket(struct moving_context *ctxt,
- struct move_bucket_in_flight *bucket_in_flight,
- struct bpos bucket, int gen,
- struct data_update_opts _data_opts)
+static int __bch2_move_data_phys(struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
+ unsigned dev,
+ u64 bucket_start,
+ u64 bucket_end,
+ unsigned data_types,
+ move_pred_fn pred, void *arg)
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
@@ -712,16 +725,19 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
struct btree_iter iter = {}, bp_iter = {};
struct bkey_buf sk;
struct bkey_s_c k;
- struct data_update_opts data_opts;
- unsigned sectors_moved = 0;
struct bkey_buf last_flushed;
int ret = 0;
- struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode);
+ struct bch_dev *ca = bch2_dev_tryget(c, dev);
if (!ca)
return 0;
- trace_bucket_evacuate(c, &bucket);
+ bucket_end = min(bucket_end, ca->mi.nbuckets);
+
+ struct bpos bp_start = bucket_pos_to_bp_start(ca, POS(dev, bucket_start));
+ struct bpos bp_end = bucket_pos_to_bp_end(ca, POS(dev, bucket_end));
+ bch2_dev_put(ca);
+ ca = NULL;
bch2_bkey_buf_init(&last_flushed);
bkey_init(&last_flushed.k->k);
@@ -732,8 +748,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
*/
bch2_trans_begin(trans);
- bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
- bucket_pos_to_bp_start(ca, bucket), 0);
+ bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0);
bch_err_msg(c, ret, "looking up alloc key");
if (ret)
@@ -757,7 +772,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
if (ret)
goto err;
- if (!k.k || bkey_gt(k.k->p, bucket_pos_to_bp_end(ca, bucket)))
+ if (!k.k || bkey_gt(k.k->p, bp_end))
break;
if (k.k->type != KEY_TYPE_backpointer)
@@ -765,107 +780,148 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
- if (!bp.v->level) {
- k = bch2_backpointer_get_key(trans, bp, &iter, 0, &last_flushed);
- ret = bkey_err(k);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
- if (!k.k)
- goto next;
+ if (ctxt->stats)
+ ctxt->stats->offset = bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
+
+ if (!(data_types & BIT(bp.v->data_type)))
+ goto next;
- bch2_bkey_buf_reassemble(&sk, c, k);
- k = bkey_i_to_s_c(sk.k);
+ if (!bp.v->level && bp.v->btree_id == BTREE_ID_stripes)
+ goto next;
+
+ k = bch2_backpointer_get_key(trans, bp, &iter, 0, &last_flushed);
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+ if (!k.k)
+ goto next;
+ if (!bp.v->level) {
ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k);
if (ret) {
bch2_trans_iter_exit(trans, &iter);
continue;
}
+ }
- data_opts = _data_opts;
- data_opts.target = io_opts.background_target;
- data_opts.rewrite_ptrs = 0;
-
- unsigned sectors = bp.v->bucket_len; /* move_extent will drop locks */
- unsigned i = 0;
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
- if (p.ptr.dev == bucket.inode) {
- if (p.ptr.cached) {
- bch2_trans_iter_exit(trans, &iter);
- goto next;
- }
- data_opts.rewrite_ptrs |= 1U << i;
- break;
- }
- i++;
- }
-
- ret = bch2_move_extent(ctxt, bucket_in_flight,
- &iter, k, io_opts, data_opts);
+ struct data_update_opts data_opts = {};
+ if (!pred(c, arg, k, &io_opts, &data_opts)) {
bch2_trans_iter_exit(trans, &iter);
+ goto next;
+ }
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret == -ENOMEM) {
- /* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
- continue;
- }
- if (ret)
- goto err;
-
- if (ctxt->stats)
- atomic64_add(sectors, &ctxt->stats->sectors_seen);
- sectors_moved += sectors;
- } else {
- struct btree *b;
+ if (data_opts.scrub &&
+ !bch2_dev_idx_is_online(c, data_opts.read_dev)) {
+ bch2_trans_iter_exit(trans, &iter);
+ ret = -BCH_ERR_device_offline;
+ break;
+ }
- b = bch2_backpointer_get_node(trans, bp, &iter, &last_flushed);
- ret = PTR_ERR_OR_ZERO(b);
- if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
- goto next;
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
- if (!b)
- goto next;
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
- unsigned sectors = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
+ /* move_extent will drop locks */
+ unsigned sectors = bp.v->bucket_len;
- ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
- bch2_trans_iter_exit(trans, &iter);
+ if (!bp.v->level)
+ ret = bch2_move_extent(ctxt, bucket_in_flight, &iter, k, io_opts, data_opts);
+ else if (!data_opts.scrub)
+ ret = bch2_btree_node_rewrite_pos(trans, bp.v->btree_id, bp.v->level, k.k->p, 0);
+ else
+ ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- continue;
- if (ret)
- goto err;
+ bch2_trans_iter_exit(trans, &iter);
- if (ctxt->rate)
- bch2_ratelimit_increment(ctxt->rate, sectors);
- if (ctxt->stats) {
- atomic64_add(sectors, &ctxt->stats->sectors_seen);
- atomic64_add(sectors, &ctxt->stats->sectors_moved);
- }
- sectors_moved += btree_sectors(c);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret == -ENOMEM) {
+ /* memory allocation failure, wait for some IO to finish */
+ bch2_move_ctxt_wait_for_io(ctxt);
+ continue;
}
+ if (ret)
+ goto err;
+
+ if (ctxt->stats)
+ atomic64_add(sectors, &ctxt->stats->sectors_seen);
next:
bch2_btree_iter_advance(&bp_iter);
}
-
- trace_evacuate_bucket(c, &bucket, sectors_moved, ca->mi.bucket_size, ret);
err:
bch2_trans_iter_exit(trans, &bp_iter);
- bch2_dev_put(ca);
bch2_bkey_buf_exit(&sk, c);
bch2_bkey_buf_exit(&last_flushed, c);
return ret;
}
+static int bch2_move_data_phys(struct bch_fs *c,
+ unsigned dev,
+ u64 start,
+ u64 end,
+ unsigned data_types,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc,
+ move_pred_fn pred, void *arg)
+{
+ struct moving_context ctxt;
+
+ bch2_trans_run(c, bch2_btree_write_buffer_flush_sync(trans));
+
+ bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+ ctxt.stats->phys = true;
+ ctxt.stats->data_type = (int) DATA_PROGRESS_DATA_TYPE_phys;
+
+ int ret = __bch2_move_data_phys(&ctxt, NULL, dev, start, end, data_types, pred, arg);
+ bch2_moving_ctxt_exit(&ctxt);
+
+ return ret;
+}
+
+struct evacuate_bucket_arg {
+ struct bpos bucket;
+ int gen;
+ struct data_update_opts data_opts;
+};
+
+static bool evacuate_bucket_pred(struct bch_fs *c, void *_arg, struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ struct evacuate_bucket_arg *arg = _arg;
+
+ *data_opts = arg->data_opts;
+
+ unsigned i = 0;
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
+ if (ptr->dev == arg->bucket.inode &&
+ (arg->gen < 0 || arg->gen == ptr->gen) &&
+ !ptr->cached)
+ data_opts->rewrite_ptrs |= BIT(i);
+ i++;
+ }
+
+ return data_opts->rewrite_ptrs != 0;
+}
+
+int bch2_evacuate_bucket(struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
+ struct bpos bucket, int gen,
+ struct data_update_opts data_opts)
+{
+ struct evacuate_bucket_arg arg = { bucket, gen, data_opts, };
+
+ return __bch2_move_data_phys(ctxt, bucket_in_flight,
+ bucket.inode,
+ bucket.offset,
+ bucket.offset + 1,
+ ~0,
+ evacuate_bucket_pred, &arg);
+}
+
typedef bool (*move_btree_pred)(struct bch_fs *, void *,
struct btree *, struct bch_io_opts *,
struct data_update_opts *);
@@ -1007,14 +1063,6 @@ static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
}
-static bool migrate_btree_pred(struct bch_fs *c, void *arg,
- struct btree *b,
- struct bch_io_opts *io_opts,
- struct data_update_opts *data_opts)
-{
- return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
-}
-
/*
* Ancient versions of bcachefs produced packed formats which could represent
* keys that the in memory format cannot represent; this checks for those
@@ -1104,6 +1152,30 @@ static bool drop_extra_replicas_btree_pred(struct bch_fs *c, void *arg,
return drop_extra_replicas_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
}
+static bool scrub_pred(struct bch_fs *c, void *_arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ struct bch_ioctl_data *arg = _arg;
+
+ if (k.k->type != KEY_TYPE_btree_ptr_v2) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == arg->migrate.dev) {
+ if (!p.crc.csum_type)
+ return false;
+ break;
+ }
+ }
+
+ data_opts->scrub = true;
+ data_opts->read_dev = arg->migrate.dev;
+ return true;
+}
+
int bch2_data_job(struct bch_fs *c,
struct bch_move_stats *stats,
struct bch_ioctl_data op)
@@ -1118,6 +1190,22 @@ int bch2_data_job(struct bch_fs *c,
bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]);
switch (op.op) {
+ case BCH_DATA_OP_scrub:
+ /*
+ * prevent tests from spuriously failing, make sure we see all
+ * btree nodes that need to be repaired
+ */
+ bch2_btree_interior_updates_flush(c);
+
+ ret = bch2_move_data_phys(c, op.scrub.dev, 0, U64_MAX,
+ op.scrub.data_types,
+ NULL,
+ stats,
+ writepoint_hashed((unsigned long) current),
+ false,
+ scrub_pred, &op) ?: ret;
+ break;
+
case BCH_DATA_OP_rereplicate:
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
@@ -1137,14 +1225,14 @@ int bch2_data_job(struct bch_fs *c,
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
- ret = bch2_move_btree(c, start, end,
- migrate_btree_pred, &op, stats) ?: ret;
- ret = bch2_move_data(c, start, end,
- NULL,
- stats,
- writepoint_hashed((unsigned long) current),
- true,
- migrate_pred, &op) ?: ret;
+ ret = bch2_move_data_phys(c, op.migrate.dev, 0, U64_MAX,
+ ~0,
+ NULL,
+ stats,
+ writepoint_hashed((unsigned long) current),
+ true,
+ migrate_pred, &op) ?: ret;
+ bch2_btree_interior_updates_flush(c);
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_rewrite_old_nodes:
@@ -1176,17 +1264,17 @@ void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
prt_newline(out);
printbuf_indent_add(out, 2);
- prt_printf(out, "keys moved: %llu\n", atomic64_read(&stats->keys_moved));
- prt_printf(out, "keys raced: %llu\n", atomic64_read(&stats->keys_raced));
- prt_printf(out, "bytes seen: ");
+ prt_printf(out, "keys moved:\t%llu\n", atomic64_read(&stats->keys_moved));
+ prt_printf(out, "keys raced:\t%llu\n", atomic64_read(&stats->keys_raced));
+ prt_printf(out, "bytes seen:\t");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
prt_newline(out);
- prt_printf(out, "bytes moved: ");
+ prt_printf(out, "bytes moved:\t");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
prt_newline(out);
- prt_printf(out, "bytes raced: ");
+ prt_printf(out, "bytes raced:\t");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
prt_newline(out);
@@ -1195,7 +1283,8 @@ void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
{
- struct moving_io *io;
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 32);
bch2_move_stats_to_text(out, ctxt->stats);
printbuf_indent_add(out, 2);
@@ -1215,8 +1304,9 @@ static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, str
printbuf_indent_add(out, 2);
mutex_lock(&ctxt->lock);
+ struct moving_io *io;
list_for_each_entry(io, &ctxt->ios, io_list)
- bch2_write_op_to_text(out, &io->write.op);
+ bch2_data_update_inflight_to_text(out, &io->write);
mutex_unlock(&ctxt->lock);
printbuf_indent_sub(out, 4);
diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h
index e22841ef31e4..807f779f6f76 100644
--- a/fs/bcachefs/move_types.h
+++ b/fs/bcachefs/move_types.h
@@ -3,22 +3,36 @@
#define _BCACHEFS_MOVE_TYPES_H
#include "bbpos_types.h"
+#include "bcachefs_ioctl.h"
struct bch_move_stats {
- enum bch_data_type data_type;
- struct bbpos pos;
char name[32];
+ bool phys;
+ enum bch_ioctl_data_event_ret ret;
+
+ union {
+ struct {
+ enum bch_data_type data_type;
+ struct bbpos pos;
+ };
+ struct {
+ unsigned dev;
+ u64 offset;
+ };
+ };
atomic64_t keys_moved;
atomic64_t keys_raced;
atomic64_t sectors_seen;
atomic64_t sectors_moved;
atomic64_t sectors_raced;
+ atomic64_t sectors_error_corrected;
+ atomic64_t sectors_error_uncorrected;
};
struct move_bucket_key {
struct bpos bucket;
- u8 gen;
+ unsigned gen;
};
struct move_bucket {
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 21805509ab9e..5126c870ce5b 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -74,20 +74,14 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
struct move_bucket *b, u64 time)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_alloc_v4 _a;
- const struct bch_alloc_v4 *a;
- int ret;
- if (bch2_bucket_is_open(trans->c,
- b->k.bucket.inode,
- b->k.bucket.offset))
+ if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset))
return 0;
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
- b->k.bucket, BTREE_ITER_cached);
- ret = bkey_err(k);
+ struct btree_iter iter;
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+ b->k.bucket, BTREE_ITER_cached);
+ int ret = bkey_err(k);
if (ret)
return ret;
@@ -95,13 +89,18 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
if (!ca)
goto out;
- a = bch2_alloc_to_v4(k, &_a);
+ if (ca->mi.state != BCH_MEMBER_STATE_rw ||
+ !bch2_dev_is_online(ca))
+ goto out_put;
+
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen;
b->sectors = bch2_bucket_sectors_dirty(*a);
u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
ret = lru_idx && lru_idx <= time;
-
+out_put:
bch2_dev_put(ca);
out:
bch2_trans_iter_exit(trans, &iter);
@@ -168,8 +167,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
bch2_trans_begin(trans);
ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru,
- lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
- lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+ lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, 0, 0),
+ lru_pos(BCH_LRU_BUCKET_FRAGMENTATION, U64_MAX, LRU_TIME_MAX),
0, k, ({
struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
int ret2 = 0;
@@ -318,6 +317,17 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "Currently calculated wait:\t");
prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
prt_newline(out);
+
+ rcu_read_lock();
+ struct task_struct *t = rcu_dereference(c->copygc_thread);
+ if (t)
+ get_task_struct(t);
+ rcu_read_unlock();
+
+ if (t) {
+ bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
+ put_task_struct(t);
+ }
}
static int bch2_copygc_thread(void *arg)
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/namei.c
index 2c3d46ac70c6..93246ad31541 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/namei.c
@@ -4,8 +4,8 @@
#include "acl.h"
#include "btree_update.h"
#include "dirent.h"
-#include "fs-common.h"
#include "inode.h"
+#include "namei.h"
#include "subvolume.h"
#include "xattr.h"
@@ -47,6 +47,10 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
+ /* Inherit casefold state from parent. */
+ if (S_ISDIR(mode))
+ new_inode->bi_flags |= dir_u->bi_flags & BCH_INODE_casefolded;
+
if (!(flags & BCH_CREATE_SNAPSHOT)) {
/* Normal create path - allocate a new inode: */
bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u);
@@ -153,16 +157,14 @@ int bch2_create_trans(struct btree_trans *trans,
dir_u->bi_nlink++;
dir_u->bi_mtime = dir_u->bi_ctime = now;
- ret = bch2_inode_write(trans, &dir_iter, dir_u);
- if (ret)
- goto err;
-
- ret = bch2_dirent_create(trans, dir, &dir_hash,
- dir_type,
- name,
- dir_target,
- &dir_offset,
- STR_HASH_must_create|BTREE_ITER_with_updates);
+ ret = bch2_dirent_create(trans, dir, &dir_hash,
+ dir_type,
+ name,
+ dir_target,
+ &dir_offset,
+ &dir_u->bi_size,
+ STR_HASH_must_create|BTREE_ITER_with_updates) ?:
+ bch2_inode_write(trans, &dir_iter, dir_u);
if (ret)
goto err;
@@ -225,7 +227,9 @@ int bch2_link_trans(struct btree_trans *trans,
ret = bch2_dirent_create(trans, dir, &dir_hash,
mode_to_type(inode_u->bi_mode),
- name, inum.inum, &dir_offset,
+ name, inum.inum,
+ &dir_offset,
+ &dir_u->bi_size,
STR_HASH_must_create);
if (ret)
goto err;
@@ -417,8 +421,8 @@ int bch2_rename_trans(struct btree_trans *trans,
}
ret = bch2_dirent_rename(trans,
- src_dir, &src_hash,
- dst_dir, &dst_hash,
+ src_dir, &src_hash, &src_dir_u->bi_size,
+ dst_dir, &dst_hash, &dst_dir_u->bi_size,
src_name, &src_inum, &src_offset,
dst_name, &dst_inum, &dst_offset,
mode);
@@ -560,6 +564,8 @@ err:
return ret;
}
+/* inum_to_path */
+
static inline void prt_bytes_reversed(struct printbuf *out, const void *b, unsigned n)
{
bch2_printbuf_make_room(out, n);
@@ -650,3 +656,179 @@ disconnected:
prt_str_reversed(path, "(disconnected)");
goto out;
}
+
+/* fsck */
+
+static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
+ struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *target,
+ bool in_fsck)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ struct btree_iter bp_iter = { NULL };
+ int ret = 0;
+
+ if (inode_points_to_dirent(target, d))
+ return 0;
+
+ if (!target->bi_dir &&
+ !target->bi_dir_offset) {
+ fsck_err_on(S_ISDIR(target->bi_mode),
+ trans, inode_dir_missing_backpointer,
+ "directory with missing backpointer\n%s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, d.s_c),
+ prt_printf(&buf, "\n"),
+ bch2_inode_unpacked_to_text(&buf, target),
+ buf.buf));
+
+ fsck_err_on(target->bi_flags & BCH_INODE_unlinked,
+ trans, inode_unlinked_but_has_dirent,
+ "inode unlinked but has dirent\n%s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, d.s_c),
+ prt_printf(&buf, "\n"),
+ bch2_inode_unpacked_to_text(&buf, target),
+ buf.buf));
+
+ target->bi_flags &= ~BCH_INODE_unlinked;
+ target->bi_dir = d.k->p.inode;
+ target->bi_dir_offset = d.k->p.offset;
+ return __bch2_fsck_write_inode(trans, target);
+ }
+
+ if (bch2_inode_should_have_single_bp(target) &&
+ !fsck_err(trans, inode_wrong_backpointer,
+ "dirent points to inode that does not point back:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, d.s_c),
+ prt_printf(&buf, "\n "),
+ bch2_inode_unpacked_to_text(&buf, target),
+ buf.buf)))
+ goto err;
+
+ struct bkey_s_c_dirent bp_dirent =
+ bch2_bkey_get_iter_typed(trans, &bp_iter, BTREE_ID_dirents,
+ SPOS(target->bi_dir, target->bi_dir_offset, target->bi_snapshot),
+ 0, dirent);
+ ret = bkey_err(bp_dirent);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ goto err;
+
+ bool backpointer_exists = !ret;
+ ret = 0;
+
+ if (!backpointer_exists) {
+ if (fsck_err(trans, inode_wrong_backpointer,
+ "inode %llu:%u has wrong backpointer:\n"
+ "got %llu:%llu\n"
+ "should be %llu:%llu",
+ target->bi_inum, target->bi_snapshot,
+ target->bi_dir,
+ target->bi_dir_offset,
+ d.k->p.inode,
+ d.k->p.offset)) {
+ target->bi_dir = d.k->p.inode;
+ target->bi_dir_offset = d.k->p.offset;
+ ret = __bch2_fsck_write_inode(trans, target);
+ }
+ } else {
+ bch2_bkey_val_to_text(&buf, c, d.s_c);
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
+
+ if (S_ISDIR(target->bi_mode) || target->bi_subvol) {
+ /*
+ * XXX: verify connectivity of the other dirent
+ * up to the root before removing this one
+ *
+ * Additionally, bch2_lookup would need to cope with the
+ * dirent it found being removed - or should we remove
+ * the other one, even though the inode points to it?
+ */
+ if (in_fsck) {
+ if (fsck_err(trans, inode_dir_multiple_links,
+ "%s %llu:%u with multiple links\n%s",
+ S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
+ target->bi_inum, target->bi_snapshot, buf.buf))
+ ret = bch2_fsck_remove_dirent(trans, d.k->p);
+ } else {
+ bch2_fs_inconsistent(c,
+ "%s %llu:%u with multiple links\n%s",
+ S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
+ target->bi_inum, target->bi_snapshot, buf.buf);
+ }
+
+ goto out;
+ } else {
+ /*
+ * hardlinked file with nlink 0:
+ * We're just adjusting nlink here so check_nlinks() will pick
+ * it up, it ignores inodes with nlink 0
+ */
+ if (fsck_err_on(!target->bi_nlink,
+ trans, inode_multiple_links_but_nlink_0,
+ "inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
+ target->bi_inum, target->bi_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
+ target->bi_nlink++;
+ target->bi_flags &= ~BCH_INODE_unlinked;
+ ret = __bch2_fsck_write_inode(trans, target);
+ if (ret)
+ goto err;
+ }
+ }
+ }
+out:
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &bp_iter);
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+int __bch2_check_dirent_target(struct btree_trans *trans,
+ struct btree_iter *dirent_iter,
+ struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *target,
+ bool in_fsck)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ ret = bch2_check_dirent_inode_dirent(trans, d, target, in_fsck);
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(d.v->d_type != inode_d_type(target),
+ trans, dirent_d_type_wrong,
+ "incorrect d_type: got %s, should be %s:\n%s",
+ bch2_d_type_str(d.v->d_type),
+ bch2_d_type_str(inode_d_type(target)),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
+ struct bkey_i_dirent *n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto err;
+
+ bkey_reassemble(&n->k_i, d.s_c);
+ n->v.d_type = inode_d_type(target);
+ if (n->v.d_type == DT_SUBVOL) {
+ n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
+ n->v.d_child_subvol = cpu_to_le32(target->bi_subvol);
+ } else {
+ n->v.d_inum = cpu_to_le64(target->bi_inum);
+ }
+
+ ret = bch2_trans_update(trans, dirent_iter, &n->k_i, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/fs-common.h b/fs/bcachefs/namei.h
index 2b59210bb5e8..2e6f6364767f 100644
--- a/fs/bcachefs/fs-common.h
+++ b/fs/bcachefs/namei.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_FS_COMMON_H
-#define _BCACHEFS_FS_COMMON_H
+#ifndef _BCACHEFS_NAMEI_H
+#define _BCACHEFS_NAMEI_H
#include "dirent.h"
@@ -44,4 +44,29 @@ bool bch2_reinherit_attrs(struct bch_inode_unpacked *,
int bch2_inum_to_path(struct btree_trans *, subvol_inum, struct printbuf *);
-#endif /* _BCACHEFS_FS_COMMON_H */
+int __bch2_check_dirent_target(struct btree_trans *,
+ struct btree_iter *,
+ struct bkey_s_c_dirent,
+ struct bch_inode_unpacked *, bool);
+
+static inline bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
+ struct bkey_s_c_dirent d)
+{
+ return inode->bi_dir == d.k->p.inode &&
+ inode->bi_dir_offset == d.k->p.offset;
+}
+
+static inline int bch2_check_dirent_target(struct btree_trans *trans,
+ struct btree_iter *dirent_iter,
+ struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *target,
+ bool in_fsck)
+{
+ if (likely(inode_points_to_dirent(target, d) &&
+ d.v->d_type == inode_d_type(target)))
+ return 0;
+
+ return __bch2_check_dirent_target(trans, dirent_iter, d, target, in_fsck);
+}
+
+#endif /* _BCACHEFS_NAMEI_H */
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 6772faf385a5..81fd6b7977d3 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -163,16 +163,6 @@ const char * const bch2_d_types[BCH_DT_MAX] = {
[DT_SUBVOL] = "subvol",
};
-u64 BCH2_NO_SB_OPT(const struct bch_sb *sb)
-{
- BUG();
-}
-
-void SET_BCH2_NO_SB_OPT(struct bch_sb *sb, u64 v)
-{
- BUG();
-}
-
void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
{
#define x(_name, ...) \
@@ -223,6 +213,21 @@ void bch2_opt_set_by_id(struct bch_opts *opts, enum bch_opt_id id, u64 v)
}
}
+/* dummy option, for options that aren't stored in the superblock */
+typedef u64 (*sb_opt_get_fn)(const struct bch_sb *);
+typedef void (*sb_opt_set_fn)(struct bch_sb *, u64);
+typedef u64 (*member_opt_get_fn)(const struct bch_member *);
+typedef void (*member_opt_set_fn)(struct bch_member *, u64);
+
+__maybe_unused static const sb_opt_get_fn BCH2_NO_SB_OPT = NULL;
+__maybe_unused static const sb_opt_set_fn SET_BCH2_NO_SB_OPT = NULL;
+__maybe_unused static const member_opt_get_fn BCH2_NO_MEMBER_OPT = NULL;
+__maybe_unused static const member_opt_set_fn SET_BCH2_NO_MEMBER_OPT = NULL;
+
+#define type_compatible_or_null(_p, _type) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(_p), typeof(_type)), _p, NULL)
+
const struct bch_option bch2_opt_table[] = {
#define OPT_BOOL() .type = BCH_OPT_BOOL, .min = 0, .max = 2
#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \
@@ -239,15 +244,15 @@ const struct bch_option bch2_opt_table[] = {
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
[Opt_##_name] = { \
- .attr = { \
- .name = #_name, \
- .mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
- }, \
- .flags = _flags, \
- .hint = _hint, \
- .help = _help, \
- .get_sb = _sb_opt, \
- .set_sb = SET_##_sb_opt, \
+ .attr.name = #_name, \
+ .attr.mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
+ .flags = _flags, \
+ .hint = _hint, \
+ .help = _help, \
+ .get_sb = type_compatible_or_null(_sb_opt, *BCH2_NO_SB_OPT), \
+ .set_sb = type_compatible_or_null(SET_##_sb_opt,*SET_BCH2_NO_SB_OPT), \
+ .get_member = type_compatible_or_null(_sb_opt, *BCH2_NO_MEMBER_OPT), \
+ .set_member = type_compatible_or_null(SET_##_sb_opt,*SET_BCH2_NO_MEMBER_OPT),\
_type \
},
@@ -475,11 +480,18 @@ void bch2_opts_to_text(struct printbuf *out,
}
}
-int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
+int bch2_opt_check_may_set(struct bch_fs *c, struct bch_dev *ca, int id, u64 v)
{
+ lockdep_assert_held(&c->state_lock);
+
int ret = 0;
switch (id) {
+ case Opt_state:
+ if (ca)
+ return __bch2_dev_set_state(c, ca, v, BCH_FORCE_IF_DEGRADED);
+ break;
+
case Opt_compression:
case Opt_background_compression:
ret = bch2_check_set_has_compressed_data(c, v);
@@ -495,12 +507,8 @@ int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
int bch2_opts_check_may_set(struct bch_fs *c)
{
- unsigned i;
- int ret;
-
- for (i = 0; i < bch2_opts_nr; i++) {
- ret = bch2_opt_check_may_set(c, i,
- bch2_opt_get_by_id(&c->opts, i));
+ for (unsigned i = 0; i < bch2_opts_nr; i++) {
+ int ret = bch2_opt_check_may_set(c, NULL, i, bch2_opt_get_by_id(&c->opts, i));
if (ret)
return ret;
}
@@ -619,12 +627,25 @@ out:
return ret;
}
-u64 bch2_opt_from_sb(struct bch_sb *sb, enum bch_opt_id id)
+u64 bch2_opt_from_sb(struct bch_sb *sb, enum bch_opt_id id, int dev_idx)
{
const struct bch_option *opt = bch2_opt_table + id;
u64 v;
- v = opt->get_sb(sb);
+ if (dev_idx < 0) {
+ v = opt->get_sb(sb);
+ } else {
+ if (WARN(!bch2_member_exists(sb, dev_idx),
+ "tried to set device option %s on nonexistent device %i",
+ opt->attr.name, dev_idx))
+ return 0;
+
+ struct bch_member m = bch2_sb_member_get(sb, dev_idx);
+ v = opt->get_member(&m);
+ }
+
+ if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
+ --v;
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = 1ULL << v;
@@ -641,35 +662,19 @@ u64 bch2_opt_from_sb(struct bch_sb *sb, enum bch_opt_id id)
*/
int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
{
- unsigned id;
-
- for (id = 0; id < bch2_opts_nr; id++) {
+ for (unsigned id = 0; id < bch2_opts_nr; id++) {
const struct bch_option *opt = bch2_opt_table + id;
- if (opt->get_sb == BCH2_NO_SB_OPT)
- continue;
-
- bch2_opt_set_by_id(opts, id, bch2_opt_from_sb(sb, id));
+ if (opt->get_sb)
+ bch2_opt_set_by_id(opts, id, bch2_opt_from_sb(sb, id, -1));
}
return 0;
}
-struct bch_dev_sb_opt_set {
- void (*set_sb)(struct bch_member *, u64);
-};
-
-static const struct bch_dev_sb_opt_set bch2_dev_sb_opt_setters [] = {
-#define x(n, set) [Opt_##n] = { .set_sb = SET_##set },
- BCH_DEV_OPT_SETTERS()
-#undef x
-};
-
void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
const struct bch_option *opt, u64 v)
{
- enum bch_opt_id id = opt - bch2_opt_table;
-
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
@@ -679,24 +684,16 @@ void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
v++;
- if (opt->flags & OPT_FS) {
- if (opt->set_sb != SET_BCH2_NO_SB_OPT)
- opt->set_sb(sb, v);
- }
+ if ((opt->flags & OPT_FS) && opt->set_sb && dev_idx < 0)
+ opt->set_sb(sb, v);
- if ((opt->flags & OPT_DEVICE) && dev_idx >= 0) {
+ if ((opt->flags & OPT_DEVICE) && opt->set_member && dev_idx >= 0) {
if (WARN(!bch2_member_exists(sb, dev_idx),
"tried to set device option %s on nonexistent device %i",
opt->attr.name, dev_idx))
return;
- struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx);
-
- const struct bch_dev_sb_opt_set *set = bch2_dev_sb_opt_setters + id;
- if (set->set_sb)
- set->set_sb(m, v);
- else
- pr_err("option %s cannot be set via opt_set_sb()", opt->attr.name);
+ opt->set_member(bch2_members_v2_get_mut(sb, dev_idx), v);
}
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 9d397fc2a1f0..bb621804d45a 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -50,10 +50,6 @@ static inline const char *bch2_d_type_str(unsigned d_type)
* apply the options from that struct that are defined.
*/
-/* dummy option, for options that aren't stored in the superblock */
-u64 BCH2_NO_SB_OPT(const struct bch_sb *);
-void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
-
/* When can be set: */
enum opt_flags {
OPT_FS = BIT(0), /* Filesystem option */
@@ -132,19 +128,24 @@ enum fsck_err_opts {
OPT_FS|OPT_FORMAT| \
OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 16), \
- BCH_SB_BLOCK_SIZE, 8, \
+ BCH_SB_BLOCK_SIZE, 4 << 10, \
"size", NULL) \
x(btree_node_size, u32, \
OPT_FS|OPT_FORMAT| \
OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 20), \
- BCH_SB_BTREE_NODE_SIZE, 512, \
+ BCH_SB_BTREE_NODE_SIZE, 256 << 10, \
"size", "Btree node size, default 256k") \
x(errors, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_error_actions), \
BCH_SB_ERROR_ACTION, BCH_ON_ERROR_fix_safe, \
NULL, "Action to take on filesystem error") \
+ x(write_error_timeout, u16, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1, 300), \
+ BCH_SB_WRITE_ERROR_TIMEOUT, 30, \
+ NULL, "Number of consecutive write errors allowed before kicking out a device")\
x(metadata_replicas, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_UINT(1, BCH_REPLICAS_MAX), \
@@ -181,6 +182,11 @@ enum fsck_err_opts {
OPT_STR(__bch2_csum_opts), \
BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
NULL, NULL) \
+ x(checksum_err_retry_nr, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(0, 32), \
+ BCH_SB_CSUM_ERR_RETRY_NR, 3, \
+ NULL, NULL) \
x(compression, u8, \
OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_FN(bch2_opt_compression), \
@@ -197,7 +203,7 @@ enum fsck_err_opts {
BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_siphash, \
NULL, "Hash function for directory entries and xattrs")\
x(metadata_target, u16, \
- OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_FN(bch2_opt_target), \
BCH_SB_METADATA_TARGET, 0, \
"(target)", "Device or label for metadata writes") \
@@ -308,11 +314,6 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, "Don't kick drives out when splitbrain detected")\
- x(discard, u8, \
- OPT_FS|OPT_MOUNT|OPT_DEVICE, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, true, \
- NULL, "Enable discard/TRIM support") \
x(verbose, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
@@ -493,27 +494,32 @@ enum fsck_err_opts {
BCH2_NO_SB_OPT, false, \
NULL, "Skip submit_bio() for data reads and writes, " \
"for performance testing purposes") \
- x(fs_size, u64, \
- OPT_DEVICE, \
+ x(state, u64, \
+ OPT_DEVICE|OPT_RUNTIME, \
+ OPT_STR(bch2_member_states), \
+ BCH_MEMBER_STATE, BCH_MEMBER_STATE_rw, \
+ "state", "rw,ro,failed,spare") \
+ x(bucket_size, u32, \
+ OPT_DEVICE|OPT_HUMAN_READABLE|OPT_SB_FIELD_SECTORS, \
OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, 0, \
- "size", "Size of filesystem on device") \
- x(bucket, u32, \
- OPT_DEVICE, \
- OPT_UINT(0, S64_MAX), \
- BCH2_NO_SB_OPT, 0, \
+ BCH_MEMBER_BUCKET_SIZE, 0, \
"size", "Specifies the bucket size; must be greater than the btree node size")\
x(durability, u8, \
- OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \
+ OPT_DEVICE|OPT_RUNTIME|OPT_SB_FIELD_ONE_BIAS, \
OPT_UINT(0, BCH_REPLICAS_MAX), \
- BCH2_NO_SB_OPT, 1, \
+ BCH_MEMBER_DURABILITY, 1, \
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \
x(data_allowed, u8, \
OPT_DEVICE, \
OPT_BITFIELD(__bch2_data_types), \
- BCH2_NO_SB_OPT, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
+ BCH_MEMBER_DATA_ALLOWED, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
"types", "Allowed data types for this device: journal, btree, and/or user")\
+ x(discard, u8, \
+ OPT_MOUNT|OPT_DEVICE|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_MEMBER_DISCARD, true, \
+ NULL, "Enable discard/TRIM support") \
x(btree_node_prefetch, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
@@ -521,11 +527,6 @@ enum fsck_err_opts {
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
" prefetched sequentially")
-#define BCH_DEV_OPT_SETTERS() \
- x(discard, BCH_MEMBER_DISCARD) \
- x(durability, BCH_MEMBER_DURABILITY) \
- x(data_allowed, BCH_MEMBER_DATA_ALLOWED)
-
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
BCH_OPTS()
@@ -582,8 +583,6 @@ struct printbuf;
struct bch_option {
struct attribute attr;
- u64 (*get_sb)(const struct bch_sb *);
- void (*set_sb)(struct bch_sb *, u64);
enum opt_type type;
enum opt_flags flags;
u64 min, max;
@@ -595,6 +594,12 @@ struct bch_option {
const char *hint;
const char *help;
+ u64 (*get_sb)(const struct bch_sb *);
+ void (*set_sb)(struct bch_sb *, u64);
+
+ u64 (*get_member)(const struct bch_member *);
+ void (*set_member)(struct bch_member *, u64);
+
};
extern const struct bch_option bch2_opt_table[];
@@ -603,7 +608,7 @@ bool bch2_opt_defined_by_id(const struct bch_opts *, enum bch_opt_id);
u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id);
void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
-u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
+u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id, int);
int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64);
@@ -625,7 +630,7 @@ void bch2_opts_to_text(struct printbuf *,
struct bch_fs *, struct bch_sb *,
unsigned, unsigned, unsigned);
-int bch2_opt_check_may_set(struct bch_fs *, int, u64);
+int bch2_opt_check_may_set(struct bch_fs *, struct bch_dev *, int, u64);
int bch2_opts_check_may_set(struct bch_fs *);
int bch2_parse_one_mount_opt(struct bch_fs *, struct bch_opts *,
struct printbuf *, const char *, const char *);
diff --git a/fs/bcachefs/progress.c b/fs/bcachefs/progress.c
new file mode 100644
index 000000000000..bafd1c91a802
--- /dev/null
+++ b/fs/bcachefs/progress.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "bbpos.h"
+#include "disk_accounting.h"
+#include "progress.h"
+
+void bch2_progress_init(struct progress_indicator_state *s,
+ struct bch_fs *c,
+ u64 btree_id_mask)
+{
+ memset(s, 0, sizeof(*s));
+
+ s->next_print = jiffies + HZ * 10;
+
+ for (unsigned i = 0; i < BTREE_ID_NR; i++) {
+ if (!(btree_id_mask & BIT_ULL(i)))
+ continue;
+
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_btree,
+ .btree.id = i,
+ };
+
+ u64 v;
+ bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
+ s->nodes_total += div64_ul(v, btree_sectors(c));
+ }
+}
+
+static inline bool progress_update_p(struct progress_indicator_state *s)
+{
+ bool ret = time_after_eq(jiffies, s->next_print);
+
+ if (ret)
+ s->next_print = jiffies + HZ * 10;
+ return ret;
+}
+
+void bch2_progress_update_iter(struct btree_trans *trans,
+ struct progress_indicator_state *s,
+ struct btree_iter *iter,
+ const char *msg)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b = path_l(btree_iter_path(trans, iter))->b;
+
+ s->nodes_seen += b != s->last_node;
+ s->last_node = b;
+
+ if (progress_update_p(s)) {
+ struct printbuf buf = PRINTBUF;
+ unsigned percent = s->nodes_total
+ ? div64_u64(s->nodes_seen * 100, s->nodes_total)
+ : 0;
+
+ prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
+ msg, percent, s->nodes_seen, s->nodes_total);
+ bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
+
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+}
diff --git a/fs/bcachefs/progress.h b/fs/bcachefs/progress.h
new file mode 100644
index 000000000000..23fb1811f943
--- /dev/null
+++ b/fs/bcachefs/progress.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_PROGRESS_H
+#define _BCACHEFS_PROGRESS_H
+
+/*
+ * Lame progress indicators
+ *
+ * We don't like to use these because they print to the dmesg console, which is
+ * spammy - we much prefer to be wired up to a userspace programm (e.g. via
+ * thread_with_file) and have it print the progress indicator.
+ *
+ * But some code is old and doesn't support that, or runs in a context where
+ * that's not yet practical (mount).
+ */
+
+struct progress_indicator_state {
+ unsigned long next_print;
+ u64 nodes_seen;
+ u64 nodes_total;
+ struct btree *last_node;
+};
+
+void bch2_progress_init(struct progress_indicator_state *, struct bch_fs *, u64);
+void bch2_progress_update_iter(struct btree_trans *,
+ struct progress_indicator_state *,
+ struct btree_iter *,
+ const char *);
+
+#endif /* _BCACHEFS_PROGRESS_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index d0a1f5cd5c2b..29a569384146 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -26,9 +26,8 @@
/* bch_extent_rebalance: */
-static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
+static const struct bch_extent_rebalance *bch2_bkey_ptrs_rebalance_opts(struct bkey_ptrs_c ptrs)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
bkey_extent_entry_for_each(ptrs, entry)
@@ -38,6 +37,11 @@ static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s
return NULL;
}
+static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
+{
+ return bch2_bkey_ptrs_rebalance_opts(bch2_bkey_ptrs_c(k));
+}
+
static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
struct bch_io_opts *opts,
struct bkey_s_c k,
@@ -97,11 +101,12 @@ static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
{
- const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+
+ const struct bch_extent_rebalance *opts = bch2_bkey_ptrs_rebalance_opts(ptrs);
if (!opts)
return 0;
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
u64 sectors = 0;
@@ -341,7 +346,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
memset(data_opts, 0, sizeof(*data_opts));
data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
data_opts->target = io_opts->background_target;
- data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
+ data_opts->write_flags |= BCH_WRITE_only_specified_devs;
if (!data_opts->rewrite_ptrs) {
/*
@@ -449,7 +454,7 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
{
data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
data_opts->target = io_opts->background_target;
- data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
+ data_opts->write_flags |= BCH_WRITE_only_specified_devs;
return data_opts->rewrite_ptrs != 0;
}
@@ -590,8 +595,19 @@ static int bch2_rebalance_thread(void *arg)
void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
{
+ printbuf_tabstop_push(out, 32);
+
struct bch_fs_rebalance *r = &c->rebalance;
+ /* print pending work */
+ struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_rebalance_work, };
+ u64 v;
+ bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
+
+ prt_printf(out, "pending work:\t");
+ prt_human_readable_u64(out, v);
+ prt_printf(out, "\n\n");
+
prt_str(out, bch2_rebalance_state_strs[r->state]);
prt_newline(out);
printbuf_indent_add(out, 2);
@@ -600,15 +616,15 @@ void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
case BCH_REBALANCE_waiting: {
u64 now = atomic64_read(&c->io_clock[WRITE].now);
- prt_str(out, "io wait duration: ");
+ prt_printf(out, "io wait duration:\t");
bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
prt_newline(out);
- prt_str(out, "io wait remaining: ");
+ prt_printf(out, "io wait remaining:\t");
bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
prt_newline(out);
- prt_str(out, "duration waited: ");
+ prt_printf(out, "duration waited:\t");
bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
prt_newline(out);
break;
@@ -621,6 +637,18 @@ void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
break;
}
prt_newline(out);
+
+ rcu_read_lock();
+ struct task_struct *t = rcu_dereference(c->rebalance.thread);
+ if (t)
+ get_task_struct(t);
+ rcu_read_unlock();
+
+ if (t) {
+ bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL);
+ put_task_struct(t);
+ }
+
printbuf_indent_sub(out, 2);
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 71c786cdb192..266c5770c824 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -13,12 +13,12 @@
#include "disk_accounting.h"
#include "errcode.h"
#include "error.h"
-#include "fs-common.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "journal_seq_blacklist.h"
#include "logged_ops.h"
#include "move.h"
+#include "namei.h"
#include "quota.h"
#include "rebalance.h"
#include "recovery.h"
@@ -899,7 +899,7 @@ use_clean:
* journal sequence numbers:
*/
if (!c->sb.clean)
- journal_seq += 8;
+ journal_seq += JOURNAL_BUF_NR * 4;
if (blacklist_seq != journal_seq) {
ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h
index 418557960ed6..e89b9c783285 100644
--- a/fs/bcachefs/recovery_passes_types.h
+++ b/fs/bcachefs/recovery_passes_types.h
@@ -24,7 +24,7 @@
x(check_topology, 4, 0) \
x(accounting_read, 39, PASS_ALWAYS) \
x(alloc_read, 0, PASS_ALWAYS) \
- x(stripes_read, 1, PASS_ALWAYS) \
+ x(stripes_read, 1, 0) \
x(initialize_subvolumes, 2, 0) \
x(snapshots_read, 3, PASS_ALWAYS) \
x(check_allocations, 5, PASS_FSCK) \
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 441e648f28b5..68172c6eba21 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -185,12 +185,21 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans,
BUG_ON(missing_start < refd_start);
BUG_ON(missing_end > refd_end);
- if (fsck_err(trans, reflink_p_to_missing_reflink_v,
- "pointer to missing indirect extent\n"
- " %s\n"
- " missing range %llu-%llu",
- (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
- missing_start, missing_end)) {
+ struct bpos missing_pos = bkey_start_pos(p.k);
+ missing_pos.offset += missing_start - live_start;
+
+ prt_printf(&buf, "pointer to missing indirect extent in ");
+ ret = bch2_inum_snap_offset_err_msg_trans(trans, &buf, missing_pos);
+ if (ret)
+ goto err;
+
+ prt_printf(&buf, "-%llu\n ", (missing_pos.offset + (missing_end - missing_start)) << 9);
+ bch2_bkey_val_to_text(&buf, c, p.s_c);
+
+ prt_printf(&buf, "\n missing reflink btree range %llu-%llu",
+ missing_start, missing_end);
+
+ if (fsck_err(trans, reflink_p_to_missing_reflink_v, "%s", buf.buf)) {
struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
@@ -597,7 +606,7 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 dst_done = 0;
u32 dst_snapshot, src_snapshot;
bool reflink_p_may_update_opts_field =
- bch2_request_incompat_feature(c, bcachefs_metadata_version_reflink_p_may_update_opts);
+ !bch2_request_incompat_feature(c, bcachefs_metadata_version_reflink_p_may_update_opts);
int ret = 0, ret2 = 0;
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
diff --git a/fs/bcachefs/sb-counters.c b/fs/bcachefs/sb-counters.c
index 6992e7469112..2b4b8445d418 100644
--- a/fs/bcachefs/sb-counters.c
+++ b/fs/bcachefs/sb-counters.c
@@ -5,7 +5,13 @@
/* BCH_SB_FIELD_counters */
-static const char * const bch2_counter_names[] = {
+static const u8 counters_to_stable_map[] = {
+#define x(n, id, ...) [BCH_COUNTER_##n] = BCH_COUNTER_STABLE_##n,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+};
+
+const char * const bch2_counter_names[] = {
#define x(t, n, ...) (#t),
BCH_PERSISTENT_COUNTERS()
#undef x
@@ -18,13 +24,13 @@ static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
return 0;
return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
-};
+}
static int bch2_sb_counters_validate(struct bch_sb *sb, struct bch_sb_field *f,
enum bch_validate_flags flags, struct printbuf *err)
{
return 0;
-};
+}
static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
@@ -32,50 +38,56 @@ static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
- for (unsigned i = 0; i < nr; i++)
- prt_printf(out, "%s \t%llu\n",
- i < BCH_COUNTER_NR ? bch2_counter_names[i] : "(unknown)",
- le64_to_cpu(ctrs->d[i]));
-};
+ for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+ unsigned stable = counters_to_stable_map[i];
+ if (stable < nr)
+ prt_printf(out, "%s \t%llu\n",
+ bch2_counter_names[i],
+ le64_to_cpu(ctrs->d[stable]));
+ }
+}
int bch2_sb_counters_to_cpu(struct bch_fs *c)
{
struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
- unsigned int i;
unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
- u64 val = 0;
- for (i = 0; i < BCH_COUNTER_NR; i++)
+ for (unsigned i = 0; i < BCH_COUNTER_NR; i++)
c->counters_on_mount[i] = 0;
- for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++) {
- val = le64_to_cpu(ctrs->d[i]);
- percpu_u64_set(&c->counters[i], val);
- c->counters_on_mount[i] = val;
+ for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+ unsigned stable = counters_to_stable_map[i];
+ if (stable < nr) {
+ u64 v = le64_to_cpu(ctrs->d[stable]);
+ percpu_u64_set(&c->counters[i], v);
+ c->counters_on_mount[i] = v;
+ }
}
+
return 0;
-};
+}
int bch2_sb_counters_from_cpu(struct bch_fs *c)
{
struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
struct bch_sb_field_counters *ret;
- unsigned int i;
unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
if (nr < BCH_COUNTER_NR) {
ret = bch2_sb_field_resize(&c->disk_sb, counters,
- sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
-
+ sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
if (ret) {
ctrs = ret;
nr = bch2_sb_counter_nr_entries(ctrs);
}
}
+ for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+ unsigned stable = counters_to_stable_map[i];
+ if (stable < nr)
+ ctrs->d[stable] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
+ }
- for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++)
- ctrs->d[i] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
return 0;
}
@@ -97,3 +109,39 @@ const struct bch_sb_field_ops bch_sb_field_ops_counters = {
.validate = bch2_sb_counters_validate,
.to_text = bch2_sb_counters_to_text,
};
+
+#ifndef NO_BCACHEFS_CHARDEV
+long bch2_ioctl_query_counters(struct bch_fs *c,
+ struct bch_ioctl_query_counters __user *user_arg)
+{
+ struct bch_ioctl_query_counters arg;
+ int ret = copy_from_user_errcode(&arg, user_arg, sizeof(arg));
+ if (ret)
+ return ret;
+
+ if ((arg.flags & ~BCH_IOCTL_QUERY_COUNTERS_MOUNT) ||
+ arg.pad)
+ return -EINVAL;
+
+ arg.nr = min(arg.nr, BCH_COUNTER_NR);
+ ret = put_user(arg.nr, &user_arg->nr);
+ if (ret)
+ return ret;
+
+ for (unsigned i = 0; i < BCH_COUNTER_NR; i++) {
+ unsigned stable = counters_to_stable_map[i];
+
+ if (stable < arg.nr) {
+ u64 v = !(arg.flags & BCH_IOCTL_QUERY_COUNTERS_MOUNT)
+ ? percpu_u64_get(&c->counters[i])
+ : c->counters_on_mount[i];
+
+ ret = put_user(v, &user_arg->d[stable]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/fs/bcachefs/sb-counters.h b/fs/bcachefs/sb-counters.h
index 81f8aec9fcb1..a4329ad8dd1b 100644
--- a/fs/bcachefs/sb-counters.h
+++ b/fs/bcachefs/sb-counters.h
@@ -11,6 +11,10 @@ int bch2_sb_counters_from_cpu(struct bch_fs *);
void bch2_fs_counters_exit(struct bch_fs *);
int bch2_fs_counters_init(struct bch_fs *);
+extern const char * const bch2_counter_names[];
extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
+long bch2_ioctl_query_counters(struct bch_fs *,
+ struct bch_ioctl_query_counters __user *);
+
#endif // _BCACHEFS_SB_COUNTERS_H
diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h
index fdcf598f08b1..fa27ec59a647 100644
--- a/fs/bcachefs/sb-counters_format.h
+++ b/fs/bcachefs/sb-counters_format.h
@@ -9,10 +9,24 @@ enum counters_flags {
#define BCH_PERSISTENT_COUNTERS() \
x(io_read, 0, TYPE_SECTORS) \
+ x(io_read_inline, 80, TYPE_SECTORS) \
+ x(io_read_hole, 81, TYPE_SECTORS) \
+ x(io_read_promote, 30, TYPE_COUNTER) \
+ x(io_read_bounce, 31, TYPE_COUNTER) \
+ x(io_read_split, 33, TYPE_COUNTER) \
+ x(io_read_reuse_race, 34, TYPE_COUNTER) \
+ x(io_read_retry, 32, TYPE_COUNTER) \
x(io_write, 1, TYPE_SECTORS) \
x(io_move, 2, TYPE_SECTORS) \
+ x(io_move_read, 35, TYPE_SECTORS) \
+ x(io_move_write, 36, TYPE_SECTORS) \
+ x(io_move_finish, 37, TYPE_SECTORS) \
+ x(io_move_fail, 38, TYPE_COUNTER) \
+ x(io_move_write_fail, 82, TYPE_COUNTER) \
+ x(io_move_start_fail, 39, TYPE_COUNTER) \
x(bucket_invalidate, 3, TYPE_COUNTER) \
x(bucket_discard, 4, TYPE_COUNTER) \
+ x(bucket_discard_fast, 79, TYPE_COUNTER) \
x(bucket_alloc, 5, TYPE_COUNTER) \
x(bucket_alloc_fail, 6, TYPE_COUNTER) \
x(btree_cache_scan, 7, TYPE_COUNTER) \
@@ -38,16 +52,6 @@ enum counters_flags {
x(journal_reclaim_finish, 27, TYPE_COUNTER) \
x(journal_reclaim_start, 28, TYPE_COUNTER) \
x(journal_write, 29, TYPE_COUNTER) \
- x(read_promote, 30, TYPE_COUNTER) \
- x(read_bounce, 31, TYPE_COUNTER) \
- x(read_split, 33, TYPE_COUNTER) \
- x(read_retry, 32, TYPE_COUNTER) \
- x(read_reuse_race, 34, TYPE_COUNTER) \
- x(move_extent_read, 35, TYPE_SECTORS) \
- x(move_extent_write, 36, TYPE_SECTORS) \
- x(move_extent_finish, 37, TYPE_SECTORS) \
- x(move_extent_fail, 38, TYPE_COUNTER) \
- x(move_extent_start_fail, 39, TYPE_COUNTER) \
x(copygc, 40, TYPE_COUNTER) \
x(copygc_wait, 41, TYPE_COUNTER) \
x(gc_gens_end, 42, TYPE_COUNTER) \
@@ -95,6 +99,13 @@ enum bch_persistent_counters {
BCH_COUNTER_NR
};
+enum bch_persistent_counters_stable {
+#define x(t, n, ...) BCH_COUNTER_STABLE_##t = n,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ BCH_COUNTER_STABLE_NR
+};
+
struct bch_sb_field_counters {
struct bch_sb_field field;
__le64 d[];
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index 051214fdc735..acb5d845841e 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -90,7 +90,13 @@
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_accounting_mismatch, \
BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
- BCH_FSCK_ERR_accounting_key_junk_at_end)
+ BCH_FSCK_ERR_accounting_key_junk_at_end) \
+ x(cached_backpointers, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\
+ BCH_FSCK_ERR_ptr_to_missing_backpointer) \
+ x(stripe_backpointers, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\
+ BCH_FSCK_ERR_ptr_to_missing_backpointer)
#define DOWNGRADE_TABLE() \
x(bucket_stripe_sectors, \
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index b86ec013d7d7..67455beb8358 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -179,6 +179,7 @@ enum bch_fsck_flags {
x(ptr_crc_redundant, 160, 0) \
x(ptr_crc_nonce_mismatch, 162, 0) \
x(ptr_stripe_redundant, 163, 0) \
+ x(extent_flags_not_at_start, 306, 0) \
x(reservation_key_nr_replicas_invalid, 164, 0) \
x(reflink_v_refcount_wrong, 165, FSCK_AUTOFIX) \
x(reflink_v_pos_bad, 292, 0) \
@@ -314,7 +315,9 @@ enum bch_fsck_flags {
x(compression_opt_not_marked_in_sb, 295, FSCK_AUTOFIX) \
x(compression_type_not_marked_in_sb, 296, FSCK_AUTOFIX) \
x(directory_size_mismatch, 303, FSCK_AUTOFIX) \
- x(MAX, 304, 0)
+ x(dirent_cf_name_too_big, 304, 0) \
+ x(dirent_stray_data_after_cf_name, 305, 0) \
+ x(MAX, 307, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 762083b564ee..38261638a611 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -23,7 +23,19 @@ static inline bool bch2_dev_is_online(struct bch_dev *ca)
return !percpu_ref_is_zero(&ca->io_ref);
}
-static inline bool bch2_dev_is_readable(struct bch_dev *ca)
+static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
+
+static inline bool bch2_dev_idx_is_online(struct bch_fs *c, unsigned dev)
+{
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ bool ret = ca && bch2_dev_is_online(ca);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool bch2_dev_is_healthy(struct bch_dev *ca)
{
return bch2_dev_is_online(ca) &&
ca->mi.state != BCH_MEMBER_STATE_failed;
@@ -271,6 +283,8 @@ static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev
static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
{
+ might_sleep();
+
rcu_read_lock();
struct bch_dev *ca = bch2_dev_rcu(c, dev);
if (ca && !percpu_ref_tryget(&ca->io_ref))
diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h
index 2adf1221a440..3affec823b3f 100644
--- a/fs/bcachefs/sb-members_format.h
+++ b/fs/bcachefs/sb-members_format.h
@@ -79,6 +79,7 @@ struct bch_member {
#define BCH_MEMBER_V1_BYTES 56
+LE16_BITMASK(BCH_MEMBER_BUCKET_SIZE, struct bch_member, bucket_size, 0, 16)
LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index c54091a28909..e7f197896db1 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -146,8 +146,9 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
goto out;
}
- while (id && id < ancestor - IS_ANCESTOR_BITMAP)
- id = get_ancestor_below(t, id, ancestor);
+ if (likely(ancestor >= IS_ANCESTOR_BITMAP))
+ while (id && id < ancestor - IS_ANCESTOR_BITMAP)
+ id = get_ancestor_below(t, id, ancestor);
ret = id && id < ancestor
? test_ancestor_bitmap(t, id, ancestor)
@@ -389,7 +390,7 @@ static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
return 0;
}
-static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
+u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
{
u32 id = snapshot_root;
u32 subvol = 0, s;
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index 00373cf32e7b..81180181d7c9 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -105,6 +105,7 @@ static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
return id;
}
+u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *, u32);
u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index d78451c2a0c6..93e71119e5a4 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -50,7 +50,7 @@ static noinline int fsck_rename_dirent(struct btree_trans *trans,
for (unsigned i = 0; i < 1000; i++) {
unsigned len = sprintf(new->v.d_name, "%.*s.fsck_renamed-%u",
old_name.len, old_name.name, i);
- unsigned u64s = BKEY_U64s + dirent_val_u64s(len);
+ unsigned u64s = BKEY_U64s + dirent_val_u64s(len, 0);
if (u64s > U8_MAX)
return -EINVAL;
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 55a4ac7bf220..575ad1e03904 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -12,7 +12,6 @@
#include "super.h"
#include <linux/crc32c.h>
-#include <crypto/hash.h>
#include <crypto/sha2.h>
static inline enum bch_str_hash_type
@@ -34,6 +33,7 @@ bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
struct bch_hash_info {
u8 type;
+ struct unicode_map *cf_encoding;
/*
* For crc32 or crc64 string hashes the first key value of
* the siphash_key (k0) is used as the key.
@@ -47,17 +47,17 @@ bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
/* XXX ick */
struct bch_hash_info info = {
.type = INODE_STR_HASH(bi),
+#ifdef CONFIG_UNICODE
+ .cf_encoding = !!(bi->bi_flags & BCH_INODE_casefolded) ? c->cf_encoding : NULL,
+#endif
.siphash_key = { .k0 = bi->bi_hash_seed }
};
if (unlikely(info.type == BCH_STR_HASH_siphash_old)) {
- SHASH_DESC_ON_STACK(desc, c->sha256);
u8 digest[SHA256_DIGEST_SIZE];
- desc->tfm = c->sha256;
-
- crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
- sizeof(bi->bi_hash_seed), digest);
+ sha256((const u8 *)&bi->bi_hash_seed,
+ sizeof(bi->bi_hash_seed), digest);
memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 8037ccbacf6a..572b06bfa0b8 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -25,9 +25,6 @@
#include <linux/sort.h>
#include <linux/string_choices.h>
-static const struct blk_holder_ops bch2_sb_handle_bdev_ops = {
-};
-
struct bch2_metadata_version {
u16 version;
const char *name;
@@ -69,14 +66,22 @@ enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_meta
return v;
}
-void bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version)
+int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version)
{
- mutex_lock(&c->sb_lock);
- SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb,
- max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version));
- c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ int ret = ((c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) &&
+ version <= c->sb.version_incompat_allowed)
+ ? 0
+ : -BCH_ERR_may_not_use_incompat_feature;
+
+ if (!ret) {
+ mutex_lock(&c->sb_lock);
+ SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb,
+ max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version));
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ return ret;
}
const char * const bch2_sb_fields[] = {
@@ -360,39 +365,41 @@ static int bch2_sb_compatible(struct bch_sb *sb, struct printbuf *out)
return 0;
}
-static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
- enum bch_validate_flags flags, struct printbuf *out)
+int bch2_sb_validate(struct bch_sb *sb, u64 read_offset,
+ enum bch_validate_flags flags, struct printbuf *out)
{
- struct bch_sb *sb = disk_sb->sb;
struct bch_sb_field_members_v1 *mi;
enum bch_opt_id opt_id;
- u16 block_size;
int ret;
ret = bch2_sb_compatible(sb, out);
if (ret)
return ret;
- if (sb->features[1] ||
- (le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR))) {
- prt_printf(out, "Filesystem has incompatible features");
+ u64 incompat = le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR);
+ unsigned incompat_bit = 0;
+ if (incompat)
+ incompat_bit = __ffs64(incompat);
+ else if (sb->features[1])
+ incompat_bit = 64 + __ffs64(le64_to_cpu(sb->features[1]));
+
+ if (incompat_bit) {
+ prt_printf(out, "Filesystem has incompatible feature bit %u, highest supported %s (%u)",
+ incompat_bit,
+ bch2_sb_features[BCH_FEATURE_NR - 1],
+ BCH_FEATURE_NR - 1);
return -BCH_ERR_invalid_sb_features;
}
if (BCH_VERSION_MAJOR(le16_to_cpu(sb->version)) > BCH_VERSION_MAJOR(bcachefs_metadata_version_current) ||
BCH_SB_VERSION_INCOMPAT(sb) > bcachefs_metadata_version_current) {
- prt_printf(out, "Filesystem has incompatible version");
+ prt_str(out, "Filesystem has incompatible version ");
+ bch2_version_to_text(out, le16_to_cpu(sb->version));
+ prt_str(out, ", current version ");
+ bch2_version_to_text(out, bcachefs_metadata_version_current);
return -BCH_ERR_invalid_sb_features;
}
- block_size = le16_to_cpu(sb->block_size);
-
- if (block_size > PAGE_SECTORS) {
- prt_printf(out, "Block size too big (got %u, max %u)",
- block_size, PAGE_SECTORS);
- return -BCH_ERR_invalid_sb_block_size;
- }
-
if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) {
prt_printf(out, "Bad user UUID (got zeroes)");
return -BCH_ERR_invalid_sb_uuid;
@@ -403,6 +410,13 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
return -BCH_ERR_invalid_sb_uuid;
}
+ if (!(flags & BCH_VALIDATE_write) &&
+ le64_to_cpu(sb->offset) != read_offset) {
+ prt_printf(out, "Bad sb offset (got %llu, read from %llu)",
+ le64_to_cpu(sb->offset), read_offset);
+ return -BCH_ERR_invalid_sb_offset;
+ }
+
if (!sb->nr_devices ||
sb->nr_devices > BCH_SB_MEMBERS_MAX) {
prt_printf(out, "Bad number of member devices %u (max %u)",
@@ -458,6 +472,13 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2)
SET_BCH_SB_PROMOTE_WHOLE_EXTENTS(sb, true);
+
+ if (!BCH_SB_WRITE_ERROR_TIMEOUT(sb))
+ SET_BCH_SB_WRITE_ERROR_TIMEOUT(sb, 30);
+
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_extent_flags &&
+ !BCH_SB_CSUM_ERR_RETRY_NR(sb))
+ SET_BCH_SB_CSUM_ERR_RETRY_NR(sb, 3);
}
#ifdef __KERNEL__
@@ -468,8 +489,8 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
const struct bch_option *opt = bch2_opt_table + opt_id;
- if (opt->get_sb != BCH2_NO_SB_OPT) {
- u64 v = bch2_opt_from_sb(sb, opt_id);
+ if (opt->get_sb) {
+ u64 v = bch2_opt_from_sb(sb, opt_id, -1);
prt_printf(out, "Invalid option ");
ret = bch2_opt_validate(opt, v, out);
@@ -749,7 +770,7 @@ retry:
memset(sb, 0, sizeof(*sb));
sb->mode = BLK_OPEN_READ;
sb->have_bio = true;
- sb->holder = kmalloc(1, GFP_KERNEL);
+ sb->holder = kzalloc(sizeof(*sb->holder), GFP_KERNEL);
if (!sb->holder)
return -ENOMEM;
@@ -875,7 +896,7 @@ got_super:
sb->have_layout = true;
- ret = bch2_sb_validate(sb, 0, &err);
+ ret = bch2_sb_validate(sb->sb, offset, 0, &err);
if (ret) {
bch2_print_opts(opts, KERN_ERR "bcachefs (%s): error validating superblock: %s\n",
path, err.buf);
@@ -912,16 +933,16 @@ static void write_super_endio(struct bio *bio)
{
struct bch_dev *ca = bio->bi_private;
+ bch2_account_io_success_fail(ca, bio_data_dir(bio), !bio->bi_status);
+
/* XXX: return errors directly */
- if (bch2_dev_io_err_on(bio->bi_status, ca,
- bio_data_dir(bio)
- ? BCH_MEMBER_ERROR_write
- : BCH_MEMBER_ERROR_read,
- "superblock %s error: %s",
+ if (bio->bi_status) {
+ bch_err_dev_ratelimited(ca, "superblock %s error: %s",
str_write_read(bio_data_dir(bio)),
- bch2_blk_status_to_str(bio->bi_status)))
+ bch2_blk_status_to_str(bio->bi_status));
ca->sb_write_error = 1;
+ }
closure_put(&ca->fs->sb_write);
percpu_ref_put(&ca->io_ref);
@@ -1032,7 +1053,7 @@ int bch2_write_super(struct bch_fs *c)
darray_for_each(online_devices, ca) {
printbuf_reset(&err);
- ret = bch2_sb_validate(&(*ca)->disk_sb, BCH_VALIDATE_write, &err);
+ ret = bch2_sb_validate((*ca)->disk_sb.sb, 0, BCH_VALIDATE_write, &err);
if (ret) {
bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
goto out;
@@ -1160,7 +1181,7 @@ int bch2_write_super(struct bch_fs *c)
!can_mount_with_written), c,
": Unable to write superblock to sufficient devices (from %ps)",
(void *) _RET_IP_))
- ret = -1;
+ ret = -BCH_ERR_erofs_sb_err;
out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);
@@ -1217,11 +1238,12 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat)
bch2_sb_field_resize(&c->disk_sb, downgrade, 0);
c->disk_sb.sb->version = cpu_to_le16(new_version);
- c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
- if (incompat)
+ if (incompat) {
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb,
max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), new_version));
+ }
}
static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f,
@@ -1451,8 +1473,8 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
for (id = 0; id < bch2_opts_nr; id++) {
const struct bch_option *opt = bch2_opt_table + id;
- if (opt->get_sb != BCH2_NO_SB_OPT) {
- u64 v = bch2_opt_from_sb(sb, id);
+ if (opt->get_sb) {
+ u64 v = bch2_opt_from_sb(sb, id, -1);
prt_printf(out, "%s:\t", opt->attr.name);
bch2_opt_to_text(out, NULL, sb, opt, v,
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index f1ab4f943720..78f708a6fbcd 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -21,17 +21,14 @@ static inline bool bch2_version_compatible(u16 version)
void bch2_version_to_text(struct printbuf *, enum bcachefs_metadata_version);
enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_metadata_version);
-void bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version);
+int bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version);
-static inline bool bch2_request_incompat_feature(struct bch_fs *c,
- enum bcachefs_metadata_version version)
+static inline int bch2_request_incompat_feature(struct bch_fs *c,
+ enum bcachefs_metadata_version version)
{
- if (unlikely(version > c->sb.version_incompat)) {
- if (version > c->sb.version_incompat_allowed)
- return false;
- bch2_set_version_incompat(c, version);
- }
- return true;
+ return likely(version <= c->sb.version_incompat)
+ ? 0
+ : bch2_set_version_incompat(c, version);
}
static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f)
@@ -95,6 +92,8 @@ int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
void bch2_free_super(struct bch_sb_handle *);
int bch2_sb_realloc(struct bch_sb_handle *, unsigned);
+int bch2_sb_validate(struct bch_sb *, u64, enum bch_validate_flags, struct printbuf *);
+
int bch2_read_super(const char *, struct bch_opts *, struct bch_sb_handle *);
int bch2_read_super_silent(const char *, struct bch_opts *, struct bch_sb_handle *);
int bch2_write_super(struct bch_fs *);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 6d97d412fed9..99f9a0aaa380 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -75,9 +75,6 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
-MODULE_SOFTDEP("pre: crc32c");
-MODULE_SOFTDEP("pre: crc64");
-MODULE_SOFTDEP("pre: sha256");
MODULE_SOFTDEP("pre: chacha20");
MODULE_SOFTDEP("pre: poly1305");
MODULE_SOFTDEP("pre: xxhash");
@@ -718,7 +715,7 @@ static int bch2_fs_online(struct bch_fs *c)
kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
#endif
kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
- bch2_opts_create_sysfs_files(&c->opts_dir);
+ bch2_opts_create_sysfs_files(&c->opts_dir, OPT_FS);
if (ret) {
bch_err(c, "error creating sysfs objects");
return ret;
@@ -837,6 +834,25 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
if (ret)
goto err;
+#ifdef CONFIG_UNICODE
+ /* Default encoding until we can potentially have more as an option. */
+ c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING);
+ if (IS_ERR(c->cf_encoding)) {
+ printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u",
+ unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
+ unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
+ ret = -EINVAL;
+ goto err;
+ }
+#else
+ if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) {
+ printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n");
+ ret = -EINVAL;
+ goto err;
+ }
+#endif
+
pr_uuid(&name, c->sb.user_uuid.b);
ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
if (ret)
@@ -1056,6 +1072,7 @@ int bch2_fs_start(struct bch_fs *c)
}
set_bit(BCH_FS_started, &c->flags);
+ wake_up(&c->ro_ref_wait);
if (c->opts.read_only) {
bch2_fs_read_only(c);
@@ -1280,8 +1297,8 @@ static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
return 0;
if (!ca->kobj.state_in_sysfs) {
- ret = kobject_add(&ca->kobj, &c->kobj,
- "dev-%u", ca->dev_idx);
+ ret = kobject_add(&ca->kobj, &c->kobj, "dev-%u", ca->dev_idx) ?:
+ bch2_opts_create_sysfs_files(&ca->kobj, OPT_DEVICE);
if (ret)
return ret;
}
@@ -1412,6 +1429,13 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
ca->disk_sb = *sb;
memset(sb, 0, sizeof(*sb));
+ /*
+ * Stash pointer to the filesystem for blk_holder_ops - note that once
+ * attached to a filesystem, we will always close the block device
+ * before tearing down the filesystem object.
+ */
+ ca->disk_sb.holder->c = ca->fs;
+
ca->dev = ca->disk_sb.bdev->bd_dev;
percpu_ref_reinit(&ca->io_ref);
@@ -1811,7 +1835,11 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
goto err_late;
up_write(&c->state_lock);
- return 0;
+out:
+ printbuf_exit(&label);
+ printbuf_exit(&errbuf);
+ bch_err_fn(c, ret);
+ return ret;
err_unlock:
mutex_unlock(&c->sb_lock);
@@ -1820,10 +1848,7 @@ err:
if (ca)
bch2_dev_free(ca);
bch2_free_super(&sb);
- printbuf_exit(&label);
- printbuf_exit(&errbuf);
- bch_err_fn(c, ret);
- return ret;
+ goto out;
err_late:
up_write(&c->state_lock);
ca = NULL;
@@ -1965,15 +1990,12 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
mutex_unlock(&c->sb_lock);
if (ca->mi.freespace_initialized) {
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = BCH_DATA_free,
- };
u64 v[3] = { nbuckets - old_nbuckets, 0, 0 };
ret = bch2_trans_commit_do(ca->fs, NULL, NULL, 0,
- bch2_disk_accounting_mod(trans, &acc, v, ARRAY_SIZE(v), false)) ?:
+ bch2_disk_accounting_mod2(trans, false, v, dev_data_type,
+ .dev = ca->dev_idx,
+ .data_type = BCH_DATA_free)) ?:
bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
if (ret)
goto err;
@@ -1997,6 +2019,102 @@ struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
}
+/* blk_holder_ops: */
+
+static struct bch_fs *bdev_get_fs(struct block_device *bdev)
+ __releases(&bdev->bd_holder_lock)
+{
+ struct bch_sb_handle_holder *holder = bdev->bd_holder;
+ struct bch_fs *c = holder->c;
+
+ if (c && !bch2_ro_ref_tryget(c))
+ c = NULL;
+
+ mutex_unlock(&bdev->bd_holder_lock);
+
+ if (c)
+ wait_event(c->ro_ref_wait, test_bit(BCH_FS_started, &c->flags));
+ return c;
+}
+
+/* returns with ref on ca->ref */
+static struct bch_dev *bdev_to_bch_dev(struct bch_fs *c, struct block_device *bdev)
+{
+ for_each_member_device(c, ca)
+ if (ca->disk_sb.bdev == bdev)
+ return ca;
+ return NULL;
+}
+
+static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
+{
+ struct bch_fs *c = bdev_get_fs(bdev);
+ if (!c)
+ return;
+
+ struct super_block *sb = c->vfs_sb;
+ if (sb) {
+ /*
+ * Not necessary, c->ro_ref guards against the filesystem being
+ * unmounted - we only take this to avoid a warning in
+ * sync_filesystem:
+ */
+ down_read(&sb->s_umount);
+ }
+
+ down_write(&c->state_lock);
+ struct bch_dev *ca = bdev_to_bch_dev(c, bdev);
+ if (!ca)
+ goto unlock;
+
+ if (bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, BCH_FORCE_IF_DEGRADED)) {
+ __bch2_dev_offline(c, ca);
+ } else {
+ if (sb) {
+ if (!surprise)
+ sync_filesystem(sb);
+ shrink_dcache_sb(sb);
+ evict_inodes(sb);
+ }
+
+ bch2_journal_flush(&c->journal);
+ bch2_fs_emergency_read_only(c);
+ }
+
+ bch2_dev_put(ca);
+unlock:
+ if (sb)
+ up_read(&sb->s_umount);
+ up_write(&c->state_lock);
+ bch2_ro_ref_put(c);
+}
+
+static void bch2_fs_bdev_sync(struct block_device *bdev)
+{
+ struct bch_fs *c = bdev_get_fs(bdev);
+ if (!c)
+ return;
+
+ struct super_block *sb = c->vfs_sb;
+ if (sb) {
+ /*
+ * Not necessary, c->ro_ref guards against the filesystem being
+ * unmounted - we only take this to avoid a warning in
+ * sync_filesystem:
+ */
+ down_read(&sb->s_umount);
+ sync_filesystem(sb);
+ up_read(&sb->s_umount);
+ }
+
+ bch2_ro_ref_put(c);
+}
+
+const struct blk_holder_ops bch2_sb_handle_bdev_ops = {
+ .mark_dead = bch2_fs_bdev_mark_dead,
+ .sync = bch2_fs_bdev_sync,
+};
+
/* Filesystem open: */
static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r)
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index 04f8287eff5c..23533bce5709 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -42,4 +42,6 @@ void bch2_fs_stop(struct bch_fs *);
int bch2_fs_start(struct bch_fs *);
struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
+extern const struct blk_holder_ops bch2_sb_handle_bdev_ops;
+
#endif /* _BCACHEFS_SUPER_H */
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index 368a63d938cf..3a899f799d1d 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -2,13 +2,19 @@
#ifndef _BCACHEFS_SUPER_TYPES_H
#define _BCACHEFS_SUPER_TYPES_H
+struct bch_fs;
+
+struct bch_sb_handle_holder {
+ struct bch_fs *c;
+};
+
struct bch_sb_handle {
struct bch_sb *sb;
struct file *s_bdev_file;
struct block_device *bdev;
char *sb_name;
struct bio *bio;
- void *holder;
+ struct bch_sb_handle_holder *holder;
size_t buffer_size;
blk_mode_t mode;
unsigned have_layout:1;
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index a7eb1f511484..251ba8224c1f 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -146,15 +146,14 @@ write_attribute(trigger_journal_writes);
write_attribute(trigger_btree_cache_shrink);
write_attribute(trigger_btree_key_cache_shrink);
write_attribute(trigger_freelist_wakeup);
+write_attribute(trigger_btree_updates);
read_attribute(gc_gens_pos);
read_attribute(uuid);
read_attribute(minor);
read_attribute(flags);
-read_attribute(bucket_size);
read_attribute(first_bucket);
read_attribute(nbuckets);
-rw_attribute(durability);
read_attribute(io_done);
read_attribute(io_errors);
write_attribute(io_errors_reset);
@@ -173,10 +172,8 @@ read_attribute(journal_debug);
read_attribute(btree_cache);
read_attribute(btree_key_cache);
read_attribute(btree_reserve_cache);
-read_attribute(stripes_heap);
read_attribute(open_buckets);
read_attribute(open_buckets_partial);
-read_attribute(write_points);
read_attribute(nocow_lock_table);
#ifdef BCH_WRITE_REF_DEBUG
@@ -209,8 +206,6 @@ read_attribute(usage_base);
BCH_PERSISTENT_COUNTERS()
#undef x
-rw_attribute(discard);
-read_attribute(state);
rw_attribute(label);
read_attribute(copy_gc_wait);
@@ -355,18 +350,12 @@ SHOW(bch2_fs)
if (attr == &sysfs_btree_reserve_cache)
bch2_btree_reserve_cache_to_text(out, c);
- if (attr == &sysfs_stripes_heap)
- bch2_stripes_heap_to_text(out, c);
-
if (attr == &sysfs_open_buckets)
bch2_open_buckets_to_text(out, c, NULL);
if (attr == &sysfs_open_buckets_partial)
bch2_open_buckets_partial_to_text(out, c);
- if (attr == &sysfs_write_points)
- bch2_write_points_to_text(out, c);
-
if (attr == &sysfs_compression_stats)
bch2_compression_stats_to_text(out, c);
@@ -415,6 +404,9 @@ STORE(bch2_fs)
/* Debugging: */
+ if (attr == &sysfs_trigger_btree_updates)
+ queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
+
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
return -EROFS;
@@ -566,10 +558,8 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_btree_key_cache,
&sysfs_btree_reserve_cache,
&sysfs_new_stripes,
- &sysfs_stripes_heap,
&sysfs_open_buckets,
&sysfs_open_buckets_partial,
- &sysfs_write_points,
#ifdef BCH_WRITE_REF_DEBUG
&sysfs_write_refs,
#endif
@@ -585,6 +575,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_btree_cache_shrink,
&sysfs_trigger_btree_key_cache_shrink,
&sysfs_trigger_freelist_wakeup,
+ &sysfs_trigger_btree_updates,
&sysfs_gc_gens_pos,
@@ -604,26 +595,34 @@ struct attribute *bch2_fs_internal_files[] = {
/* options */
-SHOW(bch2_fs_opts_dir)
+static ssize_t sysfs_opt_show(struct bch_fs *c,
+ struct bch_dev *ca,
+ enum bch_opt_id id,
+ struct printbuf *out)
{
- struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
- const struct bch_option *opt = container_of(attr, struct bch_option, attr);
- int id = opt - bch2_opt_table;
- u64 v = bch2_opt_get_by_id(&c->opts, id);
+ const struct bch_option *opt = bch2_opt_table + id;
+ u64 v;
+
+ if (opt->flags & OPT_FS) {
+ v = bch2_opt_get_by_id(&c->opts, id);
+ } else if ((opt->flags & OPT_DEVICE) && opt->get_member) {
+ v = bch2_opt_from_sb(c->disk_sb.sb, id, ca->dev_idx);
+ } else {
+ return -EINVAL;
+ }
bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
prt_char(out, '\n');
-
return 0;
}
-STORE(bch2_fs_opts_dir)
+static ssize_t sysfs_opt_store(struct bch_fs *c,
+ struct bch_dev *ca,
+ enum bch_opt_id id,
+ const char *buf, size_t size)
{
- struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
- const struct bch_option *opt = container_of(attr, struct bch_option, attr);
- int ret, id = opt - bch2_opt_table;
- char *tmp;
- u64 v;
+ const struct bch_option *opt = bch2_opt_table + id;
+ int ret = 0;
/*
* We don't need to take c->writes for correctness, but it eliminates an
@@ -632,27 +631,28 @@ STORE(bch2_fs_opts_dir)
if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
return -EROFS;
- tmp = kstrdup(buf, GFP_KERNEL);
+ down_write(&c->state_lock);
+
+ char *tmp = kstrdup(buf, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
goto err;
}
- ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
+ u64 v;
+ ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL) ?:
+ bch2_opt_check_may_set(c, ca, id, v);
kfree(tmp);
if (ret < 0)
goto err;
- ret = bch2_opt_check_may_set(c, id, v);
- if (ret < 0)
- goto err;
-
- bch2_opt_set_sb(c, NULL, opt, v);
+ bch2_opt_set_sb(c, ca, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if (v &&
(id == Opt_background_target ||
+ (id == Opt_foreground_target && !c->opts.background_target) ||
id == Opt_background_compression ||
(id == Opt_compression && !c->opts.background_compression)))
bch2_set_rebalance_needs_scan(c, 0);
@@ -664,27 +664,56 @@ STORE(bch2_fs_opts_dir)
c->copygc_thread)
wake_up_process(c->copygc_thread);
+ if (id == Opt_discard && !ca) {
+ mutex_lock(&c->sb_lock);
+ for_each_member_device(c, ca)
+ opt->set_member(bch2_members_v2_get_mut(ca->disk_sb.sb, ca->dev_idx), v);
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
ret = size;
err:
+ up_write(&c->state_lock);
bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
return ret;
}
+
+SHOW(bch2_fs_opts_dir)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
+ int id = bch2_opt_lookup(attr->name);
+ if (id < 0)
+ return 0;
+
+ return sysfs_opt_show(c, NULL, id, out);
+}
+
+STORE(bch2_fs_opts_dir)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
+ int id = bch2_opt_lookup(attr->name);
+ if (id < 0)
+ return 0;
+
+ return sysfs_opt_store(c, NULL, id, buf, size);
+}
SYSFS_OPS(bch2_fs_opts_dir);
struct attribute *bch2_fs_opts_dir_files[] = { NULL };
-int bch2_opts_create_sysfs_files(struct kobject *kobj)
+int bch2_opts_create_sysfs_files(struct kobject *kobj, unsigned type)
{
- const struct bch_option *i;
- int ret;
-
- for (i = bch2_opt_table;
+ for (const struct bch_option *i = bch2_opt_table;
i < bch2_opt_table + bch2_opts_nr;
i++) {
- if (!(i->flags & OPT_FS))
+ if (i->flags & OPT_HIDDEN)
+ continue;
+ if (!(i->flags & type))
continue;
- ret = sysfs_create_file(kobj, &i->attr);
+ int ret = sysfs_create_file(kobj, &i->attr);
if (ret)
return ret;
}
@@ -755,11 +784,8 @@ SHOW(bch2_dev)
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
- sysfs_print(bucket_size, bucket_bytes(ca));
sysfs_print(first_bucket, ca->mi.first_bucket);
sysfs_print(nbuckets, ca->mi.nbuckets);
- sysfs_print(durability, ca->mi.durability);
- sysfs_print(discard, ca->mi.discard);
if (attr == &sysfs_label) {
if (ca->mi.group)
@@ -772,11 +798,6 @@ SHOW(bch2_dev)
prt_char(out, '\n');
}
- if (attr == &sysfs_state) {
- prt_string_option(out, bch2_member_states, ca->mi.state);
- prt_char(out, '\n');
- }
-
if (attr == &sysfs_io_done)
dev_io_done_to_text(out, ca);
@@ -802,6 +823,10 @@ SHOW(bch2_dev)
if (attr == &sysfs_open_buckets)
bch2_open_buckets_to_text(out, c, ca);
+ int opt_id = bch2_opt_lookup(attr->name);
+ if (opt_id >= 0)
+ return sysfs_opt_show(c, ca, opt_id, out);
+
return 0;
}
@@ -810,18 +835,6 @@ STORE(bch2_dev)
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
- if (attr == &sysfs_discard) {
- bool v = strtoul_or_return(buf);
-
- bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_discard, v);
- }
-
- if (attr == &sysfs_durability) {
- u64 v = strtoul_or_return(buf);
-
- bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_durability, v);
- }
-
if (attr == &sysfs_label) {
char *tmp;
int ret;
@@ -839,20 +852,20 @@ STORE(bch2_dev)
if (attr == &sysfs_io_errors_reset)
bch2_dev_errors_reset(ca);
+ int opt_id = bch2_opt_lookup(attr->name);
+ if (opt_id >= 0)
+ return sysfs_opt_store(c, ca, opt_id, buf, size);
+
return size;
}
SYSFS_OPS(bch2_dev);
struct attribute *bch2_dev_files[] = {
&sysfs_uuid,
- &sysfs_bucket_size,
&sysfs_first_bucket,
&sysfs_nbuckets,
- &sysfs_durability,
/* settings: */
- &sysfs_discard,
- &sysfs_state,
&sysfs_label,
&sysfs_has_data,
diff --git a/fs/bcachefs/sysfs.h b/fs/bcachefs/sysfs.h
index 222cd5062702..303e0433c702 100644
--- a/fs/bcachefs/sysfs.h
+++ b/fs/bcachefs/sysfs.h
@@ -23,7 +23,7 @@ extern const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
extern const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
extern const struct sysfs_ops bch2_dev_sysfs_ops;
-int bch2_opts_create_sysfs_files(struct kobject *);
+int bch2_opts_create_sysfs_files(struct kobject *, unsigned);
#else
@@ -41,7 +41,8 @@ static const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
static const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
static const struct sysfs_ops bch2_dev_sysfs_ops;
-static inline int bch2_opts_create_sysfs_files(struct kobject *kobj) { return 0; }
+static inline int bch2_opts_create_sysfs_files(struct kobject *kobj, unsigned type)
+{ return 0; }
#endif /* NO_BCACHEFS_SYSFS */
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index c1b51009edf6..519d00d62ae7 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -295,12 +295,12 @@ TRACE_EVENT(write_super,
/* io.c: */
-DEFINE_EVENT(bio, read_promote,
+DEFINE_EVENT(bio, io_read_promote,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-TRACE_EVENT(read_nopromote,
+TRACE_EVENT(io_read_nopromote,
TP_PROTO(struct bch_fs *c, int ret),
TP_ARGS(c, ret),
@@ -319,26 +319,50 @@ TRACE_EVENT(read_nopromote,
__entry->ret)
);
-DEFINE_EVENT(bio, read_bounce,
+DEFINE_EVENT(bio, io_read_bounce,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bio, read_split,
+DEFINE_EVENT(bio, io_read_split,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bio, read_retry,
+DEFINE_EVENT(bio, io_read_retry,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bio, read_reuse_race,
+DEFINE_EVENT(bio, io_read_reuse_race,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
+/* ec.c */
+
+TRACE_EVENT(stripe_create,
+ TP_PROTO(struct bch_fs *c, u64 idx, int ret),
+ TP_ARGS(c, idx, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, idx )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->idx = idx;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%d,%d idx %llu ret %i",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->idx,
+ __entry->ret)
+);
+
/* Journal */
DEFINE_EVENT(bch_fs, journal_full,
@@ -797,53 +821,37 @@ TRACE_EVENT(bucket_invalidate,
/* Moving IO */
-TRACE_EVENT(bucket_evacuate,
- TP_PROTO(struct bch_fs *c, struct bpos *bucket),
- TP_ARGS(c, bucket),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u32, dev_idx )
- __field(u64, bucket )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->dev_idx = bucket->inode;
- __entry->bucket = bucket->offset;
- ),
-
- TP_printk("%d:%d %u:%llu",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->dev_idx, __entry->bucket)
+DEFINE_EVENT(fs_str, io_move,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent,
+DEFINE_EVENT(fs_str, io_move_read,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent_read,
+DEFINE_EVENT(fs_str, io_move_write,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent_write,
+DEFINE_EVENT(fs_str, io_move_finish,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent_finish,
+DEFINE_EVENT(fs_str, io_move_fail,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent_fail,
+DEFINE_EVENT(fs_str, io_move_write_fail,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
-DEFINE_EVENT(fs_str, move_extent_start_fail,
+DEFINE_EVENT(fs_str, io_move_start_fail,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
@@ -881,37 +889,6 @@ TRACE_EVENT(move_data,
__entry->sectors_raced)
);
-TRACE_EVENT(evacuate_bucket,
- TP_PROTO(struct bch_fs *c, struct bpos *bucket,
- unsigned sectors, unsigned bucket_size,
- int ret),
- TP_ARGS(c, bucket, sectors, bucket_size, ret),
-
- TP_STRUCT__entry(
- __field(dev_t, dev )
- __field(u64, member )
- __field(u64, bucket )
- __field(u32, sectors )
- __field(u32, bucket_size )
- __field(int, ret )
- ),
-
- TP_fast_assign(
- __entry->dev = c->dev;
- __entry->member = bucket->inode;
- __entry->bucket = bucket->offset;
- __entry->sectors = sectors;
- __entry->bucket_size = bucket_size;
- __entry->ret = ret;
- ),
-
- TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->member, __entry->bucket,
- __entry->sectors, __entry->bucket_size,
- __entry->ret)
-);
-
TRACE_EVENT(copygc,
TP_PROTO(struct bch_fs *c,
u64 buckets,
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index e0a876cbaa6b..553de8d8e3e5 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -473,10 +473,10 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
u64 last_q = 0;
prt_printf(out, "quantiles (%s):\t", u->name);
- eytzinger0_for_each(i, NR_QUANTILES) {
- bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
+ eytzinger0_for_each(j, NR_QUANTILES) {
+ bool is_last = eytzinger0_next(j, NR_QUANTILES) == -1;
- u64 q = max(quantiles->entries[i].m, last_q);
+ u64 q = max(quantiles->entries[j].m, last_q);
prt_printf(out, "%llu ", div64_u64(q, u->nsecs));
if (is_last)
prt_newline(out);
@@ -653,19 +653,25 @@ int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
return 0;
}
-size_t bch2_rand_range(size_t max)
+u64 bch2_get_random_u64_below(u64 ceil)
{
- size_t rand;
-
- if (!max)
- return 0;
-
- do {
- rand = get_random_long();
- rand &= roundup_pow_of_two(max) - 1;
- } while (rand >= max);
+ if (ceil <= U32_MAX)
+ return __get_random_u32_below(ceil);
+
+ /* this is the same (clever) algorithm as in __get_random_u32_below() */
+ u64 rand = get_random_u64();
+ u64 mult = ceil * rand;
+
+ if (unlikely(mult < ceil)) {
+ u64 bound;
+ div64_u64_rem(-ceil, ceil, &bound);
+ while (unlikely(mult < bound)) {
+ rand = get_random_u64();
+ mult = ceil * rand;
+ }
+ }
- return rand;
+ return mul_u64_u64_shr(ceil, rand, 64);
}
void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
@@ -698,12 +704,33 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
}
}
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_corrupt_bio(struct bio *bio)
+{
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ unsigned offset = get_random_u32_below(bio->bi_iter.bi_size / sizeof(u64));
+
+ bio_for_each_segment(bv, bio, iter) {
+ unsigned u64s = bv.bv_len / sizeof(u64);
+
+ if (offset < u64s) {
+ u64 *segment = bvec_kmap_local(&bv);
+ segment[offset] = get_random_u64();
+ kunmap_local(segment);
+ return;
+ }
+ offset -= u64s;
+ }
+}
+#endif
+
#if 0
void eytzinger1_test(void)
{
- unsigned inorder, eytz, size;
+ unsigned inorder, size;
- pr_info("1 based eytzinger test:");
+ pr_info("1 based eytzinger test:\n");
for (size = 2;
size < 65536;
@@ -711,13 +738,7 @@ void eytzinger1_test(void)
unsigned extra = eytzinger1_extra(size);
if (!(size % 4096))
- pr_info("tree size %u", size);
-
- BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
- BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
-
- BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
- BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
+ pr_info("tree size %u\n", size);
inorder = 1;
eytzinger1_for_each(eytz, size) {
@@ -728,15 +749,16 @@ void eytzinger1_test(void)
inorder++;
}
+ BUG_ON(inorder - 1 != size);
}
}
void eytzinger0_test(void)
{
- unsigned inorder, eytz, size;
+ unsigned inorder, size;
- pr_info("0 based eytzinger test:");
+ pr_info("0 based eytzinger test:\n");
for (size = 1;
size < 65536;
@@ -744,13 +766,7 @@ void eytzinger0_test(void)
unsigned extra = eytzinger0_extra(size);
if (!(size % 4096))
- pr_info("tree size %u", size);
-
- BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
- BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
-
- BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
- BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
+ pr_info("tree size %u\n", size);
inorder = 0;
eytzinger0_for_each(eytz, size) {
@@ -761,54 +777,191 @@ void eytzinger0_test(void)
inorder++;
}
+ BUG_ON(inorder != size);
+
+ inorder = size - 1;
+ eytzinger0_for_each_prev(eytz, size) {
+ BUG_ON(eytz != eytzinger0_first(size) &&
+ eytzinger0_next(eytzinger0_prev(eytz, size), size) != eytz);
+
+ inorder--;
+ }
+ BUG_ON(inorder != -1);
}
}
-static inline int cmp_u16(const void *_l, const void *_r, size_t size)
+static inline int cmp_u16(const void *_l, const void *_r)
{
const u16 *l = _l, *r = _r;
- return (*l > *r) - (*r - *l);
+ return (*l > *r) - (*r > *l);
}
-static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
+static void eytzinger0_find_test_le(u16 *test_array, unsigned nr, u16 search)
{
- int i, c1 = -1, c2 = -1;
- ssize_t r;
+ int r, s;
+ bool bad;
r = eytzinger0_find_le(test_array, nr,
sizeof(test_array[0]),
cmp_u16, &search);
- if (r >= 0)
- c1 = test_array[r];
-
- for (i = 0; i < nr; i++)
- if (test_array[i] <= search && test_array[i] > c2)
- c2 = test_array[i];
-
- if (c1 != c2) {
- eytzinger0_for_each(i, nr)
- pr_info("[%3u] = %12u", i, test_array[i]);
- pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
- i, r, c1, c2);
+ if (r >= 0) {
+ if (test_array[r] > search) {
+ bad = true;
+ } else {
+ s = eytzinger0_next(r, nr);
+ bad = s >= 0 && test_array[s] <= search;
+ }
+ } else {
+ s = eytzinger0_last(nr);
+ bad = s >= 0 && test_array[s] <= search;
+ }
+
+ if (bad) {
+ s = -1;
+ eytzinger0_for_each_prev(j, nr) {
+ if (test_array[j] <= search) {
+ s = j;
+ break;
+ }
+ }
+
+ eytzinger0_for_each(j, nr)
+ pr_info("[%3u] = %12u\n", j, test_array[j]);
+ pr_info("find_le(%12u) = %3i should be %3i\n",
+ search, r, s);
+ BUG();
+ }
+}
+
+static void eytzinger0_find_test_gt(u16 *test_array, unsigned nr, u16 search)
+{
+ int r, s;
+ bool bad;
+
+ r = eytzinger0_find_gt(test_array, nr,
+ sizeof(test_array[0]),
+ cmp_u16, &search);
+ if (r >= 0) {
+ if (test_array[r] <= search) {
+ bad = true;
+ } else {
+ s = eytzinger0_prev(r, nr);
+ bad = s >= 0 && test_array[s] > search;
+ }
+ } else {
+ s = eytzinger0_first(nr);
+ bad = s >= 0 && test_array[s] > search;
+ }
+
+ if (bad) {
+ s = -1;
+ eytzinger0_for_each(j, nr) {
+ if (test_array[j] > search) {
+ s = j;
+ break;
+ }
+ }
+
+ eytzinger0_for_each(j, nr)
+ pr_info("[%3u] = %12u\n", j, test_array[j]);
+ pr_info("find_gt(%12u) = %3i should be %3i\n",
+ search, r, s);
+ BUG();
}
}
+static void eytzinger0_find_test_ge(u16 *test_array, unsigned nr, u16 search)
+{
+ int r, s;
+ bool bad;
+
+ r = eytzinger0_find_ge(test_array, nr,
+ sizeof(test_array[0]),
+ cmp_u16, &search);
+ if (r >= 0) {
+ if (test_array[r] < search) {
+ bad = true;
+ } else {
+ s = eytzinger0_prev(r, nr);
+ bad = s >= 0 && test_array[s] >= search;
+ }
+ } else {
+ s = eytzinger0_first(nr);
+ bad = s >= 0 && test_array[s] >= search;
+ }
+
+ if (bad) {
+ s = -1;
+ eytzinger0_for_each(j, nr) {
+ if (test_array[j] >= search) {
+ s = j;
+ break;
+ }
+ }
+
+ eytzinger0_for_each(j, nr)
+ pr_info("[%3u] = %12u\n", j, test_array[j]);
+ pr_info("find_ge(%12u) = %3i should be %3i\n",
+ search, r, s);
+ BUG();
+ }
+}
+
+static void eytzinger0_find_test_eq(u16 *test_array, unsigned nr, u16 search)
+{
+ unsigned r;
+ int s;
+ bool bad;
+
+ r = eytzinger0_find(test_array, nr,
+ sizeof(test_array[0]),
+ cmp_u16, &search);
+
+ if (r < nr) {
+ bad = test_array[r] != search;
+ } else {
+ s = eytzinger0_find_le(test_array, nr,
+ sizeof(test_array[0]),
+ cmp_u16, &search);
+ bad = s >= 0 && test_array[s] == search;
+ }
+
+ if (bad) {
+ eytzinger0_for_each(j, nr)
+ pr_info("[%3u] = %12u\n", j, test_array[j]);
+ pr_info("find(%12u) = %3i is incorrect\n",
+ search, r);
+ BUG();
+ }
+}
+
+static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
+{
+ eytzinger0_find_test_le(test_array, nr, search);
+ eytzinger0_find_test_gt(test_array, nr, search);
+ eytzinger0_find_test_ge(test_array, nr, search);
+ eytzinger0_find_test_eq(test_array, nr, search);
+}
+
void eytzinger0_find_test(void)
{
unsigned i, nr, allocated = 1 << 12;
u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
for (nr = 1; nr < allocated; nr++) {
- pr_info("testing %u elems", nr);
+ u16 prev = 0;
+
+ pr_info("testing %u elems\n", nr);
get_random_bytes(test_array, nr * sizeof(test_array[0]));
eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
/* verify array is sorted correctly: */
- eytzinger0_for_each(i, nr)
- BUG_ON(i != eytzinger0_last(nr) &&
- test_array[i] > test_array[eytzinger0_next(i, nr)]);
+ eytzinger0_for_each(j, nr) {
+ BUG_ON(test_array[j] < prev);
+ prev = test_array[j];
+ }
for (i = 0; i < U16_MAX; i += 1 << 12)
eytzinger0_find_test_val(test_array, nr, i);
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index e7c3541b38f3..7d921fc920a0 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -401,11 +401,23 @@ do { \
_ret; \
})
-size_t bch2_rand_range(size_t);
+u64 bch2_get_random_u64_below(u64);
void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_corrupt_bio(struct bio *);
+
+static inline void bch2_maybe_corrupt_bio(struct bio *bio, unsigned ratio)
+{
+ if (ratio && !get_random_u32_below(ratio))
+ bch2_corrupt_bio(bio);
+}
+#else
+#define bch2_maybe_corrupt_bio(...) do {} while (0)
+#endif
+
static inline void memcpy_u64s_small(void *dst, const void *src,
unsigned u64s)
{
@@ -419,7 +431,7 @@ static inline void memcpy_u64s_small(void *dst, const void *src,
static inline void __memcpy_u64s(void *dst, const void *src,
unsigned u64s)
{
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_KMSAN)
long d0, d1, d2;
asm volatile("rep ; movsq"
@@ -496,7 +508,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src,
u64 *dst = (u64 *) _dst + u64s - 1;
u64 *src = (u64 *) _src + u64s - 1;
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_KMSAN)
long d0, d1, d2;
asm volatile("std ;\n"
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index aed7c6984173..f9667b944c0d 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -523,7 +523,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
if (ret < 0)
goto err_class_exit;
- ret = bch2_opt_check_may_set(c, opt_id, v);
+ ret = bch2_opt_check_may_set(c, NULL, opt_id, v);
if (ret < 0)
goto err_class_exit;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8054f44d39cf..584fa89bc877 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c